summaryrefslogtreecommitdiff
path: root/tools/perf/scripts/python/bin
diff options
context:
space:
mode:
authorEliad Peller <eliad@wizery.com>2014-05-26 18:44:35 +0300
committerEmmanuel Grumbach <emmanuel.grumbach@intel.com>2014-06-24 14:30:56 +0300
commit89879413eba4d9f57b69f84a32fd085d54057df3 (patch)
tree63b691bca840f94d12ced961903e657f8d8a2e91 /tools/perf/scripts/python/bin
parent7171511eaec5bf23fb06078f59784a3a0626b38f (diff)
iwlwifi: mvm: rework sched scan channel configuration
The current sched scan channel configuration code configures all the supported channels for scanning. However, this can result in SYSASSERT in some cases, when the configured channel is disabled. Instead, configure only the channels given in the req struct, and set the channel_count field appropriately. While on it, change the code to use channel->hw_value instead of recalculating the channel number. Signed-off-by: Eliad Peller <eliadx.peller@intel.com> Signed-off-by: Emmanuel Grumbach <emmanuel.grumbach@intel.com>
Diffstat (limited to 'tools/perf/scripts/python/bin')
0 files changed, 0 insertions, 0 deletions
'>stat only
Diffstat (limited to 'include/linux')
-rw-r--r--include/linux/8250_pci.h1
-rw-r--r--include/linux/a.out.h17
-rw-r--r--include/linux/acct.h5
-rw-r--r--include/linux/acpi.h719
-rw-r--r--include/linux/acpi_agdi.h13
-rw-r--r--include/linux/acpi_apmt.h19
-rw-r--r--include/linux/acpi_dma.h5
-rw-r--r--include/linux/acpi_iort.h64
-rw-r--r--include/linux/acpi_mdio.h33
-rw-r--r--include/linux/acpi_pmtmr.h1
-rw-r--r--include/linux/acpi_viot.h21
-rw-r--r--include/linux/adb.h1
-rw-r--r--include/linux/adfs_fs.h1
-rw-r--r--include/linux/adreno-smmu-priv.h72
-rw-r--r--include/linux/adxl.h13
-rw-r--r--include/linux/aer.h20
-rw-r--r--include/linux/agpgart.h2
-rw-r--r--include/linux/ahci-remap.h1
-rw-r--r--include/linux/ahci_platform.h22
-rw-r--r--include/linux/aio.h7
-rw-r--r--include/linux/alarmtimer.h8
-rw-r--r--include/linux/alcor_pci.h280
-rw-r--r--include/linux/align.h15
-rw-r--r--include/linux/altera_jtaguart.h1
-rw-r--r--include/linux/altera_uart.h1
-rw-r--r--include/linux/amba/bus.h118
-rw-r--r--include/linux/amba/clcd-regs.h1
-rw-r--r--include/linux/amba/clcd.h31
-rw-r--r--include/linux/amba/kmi.h16
-rw-r--r--include/linux/amba/mmci.h18
-rw-r--r--include/linux/amba/pl022.h21
-rw-r--r--include/linux/amba/pl080.h5
-rw-r--r--include/linux/amba/pl08x.h5
-rw-r--r--include/linux/amba/pl093.h80
-rw-r--r--include/linux/amba/serial.h15
-rw-r--r--include/linux/amd-iommu.h68
-rw-r--r--include/linux/amd-pstate.h111
-rw-r--r--include/linux/amifd.h62
-rw-r--r--include/linux/amifdreg.h81
-rw-r--r--include/linux/anon_inodes.h10
-rw-r--r--include/linux/aperture.h56
-rw-r--r--include/linux/apm-emulation.h3
-rw-r--r--include/linux/apm_bios.h11
-rw-r--r--include/linux/apple-gmux.h158
-rw-r--r--include/linux/apple-mailbox.h19
-rw-r--r--include/linux/apple_bl.h26
-rw-r--r--include/linux/arch_topology.h86
-rw-r--r--include/linux/arm-cci.h15
-rw-r--r--include/linux/arm-smccc.h476
-rw-r--r--include/linux/arm_ffa.h367
-rw-r--r--include/linux/arm_sdei.h84
-rw-r--r--include/linux/armada-37xx-rwtm-mailbox.h23
-rw-r--r--include/linux/ascii85.h39
-rw-r--r--include/linux/asn1.h6
-rw-r--r--include/linux/asn1_ber_bytecode.h6
-rw-r--r--include/linux/asn1_decoder.h6
-rw-r--r--include/linux/asn1_encoder.h32
-rw-r--r--include/linux/assoc_array.h8
-rw-r--r--include/linux/assoc_array_priv.h8
-rw-r--r--include/linux/async.h89
-rw-r--r--include/linux/async_tx.h40
-rw-r--r--include/linux/ata.h174
-rw-r--r--include/linux/ata_platform.h4
-rw-r--r--include/linux/atalk.h32
-rw-r--r--include/linux/atm.h1
-rw-r--r--include/linux/atm_suni.h12
-rw-r--r--include/linux/atm_tcp.h3
-rw-r--r--include/linux/atmdev.h30
-rw-r--r--include/linux/atmel-isc-media.h58
-rw-r--r--include/linux/atmel-mci.h1
-rw-r--r--include/linux/atmel-ssc.h1
-rw-r--r--include/linux/atmel_pdc.h6
-rw-r--r--include/linux/atmel_tc.h270
-rw-r--r--include/linux/atomic.h1054
-rw-r--r--include/linux/atomic/atomic-arch-fallback.h2671
-rw-r--r--include/linux/atomic/atomic-instrumented.h2170
-rw-r--r--include/linux/atomic/atomic-long.h1050
-rw-r--r--include/linux/attribute_container.h10
-rw-r--r--include/linux/audit.h343
-rw-r--r--include/linux/audit_arch.h24
-rw-r--r--include/linux/auto_dev-ioctl.h5
-rw-r--r--include/linux/auto_fs.h5
-rw-r--r--include/linux/auxiliary_bus.h251
-rw-r--r--include/linux/auxvec.h3
-rw-r--r--include/linux/average.h11
-rw-r--r--include/linux/avf/virtchnl.h1072
-rw-r--r--include/linux/b1pcmcia.h21
-rw-r--r--include/linux/backing-dev-defs.h131
-rw-r--r--include/linux/backing-dev.h287
-rw-r--r--include/linux/backlight.h446
-rw-r--r--include/linux/badblocks.h1
-rw-r--r--include/linux/balloon_compaction.h107
-rw-r--r--include/linux/base64.h16
-rw-r--r--include/linux/bcd.h5
-rw-r--r--include/linux/bch.h25
-rw-r--r--include/linux/bcm47xx_nvram.h11
-rw-r--r--include/linux/bcm47xx_sprom.h15
-rw-r--r--include/linux/bcm47xx_wdt.h1
-rw-r--r--include/linux/bcm963xx_nvram.h1
-rw-r--r--include/linux/bcm963xx_tag.h3
-rw-r--r--include/linux/bcma/bcma.h12
-rw-r--r--include/linux/bcma/bcma_driver_arm_c9.h1
-rw-r--r--include/linux/bcma/bcma_driver_chipcommon.h9
-rw-r--r--include/linux/bcma/bcma_driver_gmac_cmn.h1
-rw-r--r--include/linux/bcma/bcma_driver_mips.h1
-rw-r--r--include/linux/bcma/bcma_driver_pci.h1
-rw-r--r--include/linux/bcma/bcma_driver_pcie2.h1
-rw-r--r--include/linux/bcma/bcma_regs.h1
-rw-r--r--include/linux/bcma/bcma_soc.h2
-rw-r--r--include/linux/bfin_mac.h30
-rw-r--r--include/linux/binfmts.h88
-rw-r--r--include/linux/bio.h525
-rw-r--r--include/linux/bit_spinlock.h1
-rw-r--r--include/linux/bitfield.h127
-rw-r--r--include/linux/bitmap.h519
-rw-r--r--include/linux/bitops.h274
-rw-r--r--include/linux/bitrev.h56
-rw-r--r--include/linux/bits.h45
-rw-r--r--include/linux/blk-cgroup.h764
-rw-r--r--include/linux/blk-crypto-profile.h154
-rw-r--r--include/linux/blk-crypto.h135
-rw-r--r--include/linux/blk-integrity.h184
-rw-r--r--include/linux/blk-mq-pci.h6
-rw-r--r--include/linux/blk-mq-virtio.h5
-rw-r--r--include/linux/blk-mq.h1112
-rw-r--r--include/linux/blk-pm.h24
-rw-r--r--include/linux/blk_types.h499
-rw-r--r--include/linux/blkdev.h2194
-rw-r--r--include/linux/blkpg.h1
-rw-r--r--include/linux/blktrace_api.h60
-rw-r--r--include/linux/blockgroup_lock.h1
-rw-r--r--include/linux/bma150.h19
-rw-r--r--include/linux/bootconfig.h302
-rw-r--r--include/linux/bootmem.h374
-rw-r--r--include/linux/bootmem_info.h66
-rw-r--r--include/linux/bottom_half.h10
-rw-r--r--include/linux/bpf-cgroup-defs.h79
-rw-r--r--include/linux/bpf-cgroup.h480
-rw-r--r--include/linux/bpf-netns.h62
-rw-r--r--include/linux/bpf.h2925
-rw-r--r--include/linux/bpf_lirc.h30
-rw-r--r--include/linux/bpf_local_storage.h176
-rw-r--r--include/linux/bpf_lsm.h83
-rw-r--r--include/linux/bpf_mem_alloc.h37
-rw-r--r--include/linux/bpf_trace.h2
-rw-r--r--include/linux/bpf_types.h141
-rw-r--r--include/linux/bpf_verifier.h695
-rw-r--r--include/linux/bpfilter.h25
-rw-r--r--include/linux/bpfptr.h88
-rw-r--r--include/linux/brcmphy.h132
-rw-r--r--include/linux/bsearch.h27
-rw-r--r--include/linux/bsg-lib.h35
-rw-r--r--include/linux/bsg.h40
-rw-r--r--include/linux/btf.h571
-rw-r--r--include/linux/btf_ids.h271
-rw-r--r--include/linux/btree-128.h1
-rw-r--r--include/linux/btree-type.h1
-rw-r--r--include/linux/btree.h3
-rw-r--r--include/linux/btrfs.h1
-rw-r--r--include/linux/buffer_head.h166
-rw-r--r--include/linux/bug.h21
-rw-r--r--include/linux/build-salt.h20
-rw-r--r--include/linux/build_bug.h67
-rw-r--r--include/linux/buildid.h20
-rw-r--r--include/linux/bvec.h271
-rw-r--r--include/linux/byteorder/big_endian.h5
-rw-r--r--include/linux/byteorder/generic.h34
-rw-r--r--include/linux/byteorder/little_endian.h5
-rw-r--r--include/linux/c2port.h9
-rw-r--r--include/linux/cache.h26
-rw-r--r--include/linux/cacheflush.h20
-rw-r--r--include/linux/cacheinfo.h82
-rw-r--r--include/linux/can/bittiming.h165
-rw-r--r--include/linux/can/can-ml.h80
-rw-r--r--include/linux/can/core.h20
-rw-r--r--include/linux/can/dev.h177
-rw-r--r--include/linux/can/dev/peak_canfd.h16
-rw-r--r--include/linux/can/led.h54
-rw-r--r--include/linux/can/length.h174
-rw-r--r--include/linux/can/platform/cc770.h1
-rw-r--r--include/linux/can/platform/flexcan.h23
-rw-r--r--include/linux/can/platform/mcp251x.h21
-rw-r--r--include/linux/can/platform/rcar_can.h17
-rw-r--r--include/linux/can/platform/sja1000.h3
-rw-r--r--include/linux/can/rx-offload.h45
-rw-r--r--include/linux/can/skb.h111
-rw-r--r--include/linux/capability.h170
-rw-r--r--include/linux/cb710.h11
-rw-r--r--include/linux/cc_platform.h117
-rw-r--r--include/linux/cciss_ioctl.h1
-rw-r--r--include/linux/ccp.h21
-rw-r--r--include/linux/cdev.h3
-rw-r--r--include/linux/cdrom.h21
-rw-r--r--include/linux/cdx/cdx_bus.h174
-rw-r--r--include/linux/ceph/auth.h81
-rw-r--r--include/linux/ceph/buffer.h4
-rw-r--r--include/linux/ceph/ceph_debug.h1
-rw-r--r--include/linux/ceph/ceph_features.h41
-rw-r--r--include/linux/ceph/ceph_frag.h1
-rw-r--r--include/linux/ceph/ceph_fs.h110
-rw-r--r--include/linux/ceph/ceph_hash.h1
-rw-r--r--include/linux/ceph/cls_lock_client.h4
-rw-r--r--include/linux/ceph/debugfs.h19
-rw-r--r--include/linux/ceph/decode.h40
-rw-r--r--include/linux/ceph/libceph.h88
-rw-r--r--include/linux/ceph/mdsmap.h15
-rw-r--r--include/linux/ceph/messenger.h429
-rw-r--r--include/linux/ceph/mon_client.h11
-rw-r--r--include/linux/ceph/msgpool.h12
-rw-r--r--include/linux/ceph/msgr.h69
-rw-r--r--include/linux/ceph/osd_client.h113
-rw-r--r--include/linux/ceph/osdmap.h71
-rw-r--r--include/linux/ceph/pagelist.h14
-rw-r--r--include/linux/ceph/rados.h58
-rw-r--r--include/linux/ceph/string_table.h1
-rw-r--r--include/linux/ceph/striper.h71
-rw-r--r--include/linux/ceph/types.h2
-rw-r--r--include/linux/cfag12864b.h18
-rw-r--r--include/linux/cfi.h39
-rw-r--r--include/linux/cfi_types.h45
-rw-r--r--include/linux/cgroup-defs.h388
-rw-r--r--include/linux/cgroup.h362
-rw-r--r--include/linux/cgroup_api.h1
-rw-r--r--include/linux/cgroup_rdma.h7
-rw-r--r--include/linux/cgroup_refcnt.h96
-rw-r--r--include/linux/cgroup_subsys.h5
-rw-r--r--include/linux/circ_buf.h3
-rw-r--r--include/linux/cleancache.h123
-rw-r--r--include/linux/clk-provider.h1060
-rw-r--r--include/linux/clk.h505
-rw-r--r--include/linux/clk/analogbits-wrpll-cln28hpc.h79
-rw-r--r--include/linux/clk/at91_pmc.h118
-rw-r--r--include/linux/clk/bcm2835.h24
-rw-r--r--include/linux/clk/clk-conf.h10
-rw-r--r--include/linux/clk/davinci.h23
-rw-r--r--include/linux/clk/imx.h15
-rw-r--r--include/linux/clk/mmp.h1
-rw-r--r--include/linux/clk/mxs.h5
-rw-r--r--include/linux/clk/pxa.h16
-rw-r--r--include/linux/clk/renesas.h8
-rw-r--r--include/linux/clk/samsung.h24
-rw-r--r--include/linux/clk/spear.h37
-rw-r--r--include/linux/clk/sunxi-ng.h14
-rw-r--r--include/linux/clk/tegra.h173
-rw-r--r--include/linux/clk/ti.h48
-rw-r--r--include/linux/clk/zynq.h15
-rw-r--r--include/linux/clkdev.h19
-rw-r--r--include/linux/clock_cooling.h65
-rw-r--r--include/linux/clockchips.h5
-rw-r--r--include/linux/clocksource.h119
-rw-r--r--include/linux/clocksource_ids.h12
-rw-r--r--include/linux/cm4000_cs.h10
-rw-r--r--include/linux/cma.h36
-rw-r--r--include/linux/cmdline-parser.h45
-rw-r--r--include/linux/cnt32_to_63.h5
-rw-r--r--include/linux/coda.h3
-rw-r--r--include/linux/coda_psdev.h71
-rw-r--r--include/linux/comedi/comedi_8254.h134
-rw-r--r--include/linux/comedi/comedi_8255.h42
-rw-r--r--include/linux/comedi/comedi_isadma.h114
-rw-r--r--include/linux/comedi/comedi_pci.h56
-rw-r--r--include/linux/comedi/comedi_pcmcia.h48
-rw-r--r--include/linux/comedi/comedi_usb.h41
-rw-r--r--include/linux/comedi/comedidev.h1053
-rw-r--r--include/linux/comedi/comedilib.h26
-rw-r--r--include/linux/compaction.h67
-rw-r--r--include/linux/compat.h1146
-rw-r--r--include/linux/compiler-clang.h121
-rw-r--r--include/linux/compiler-gcc.h321
-rw-r--r--include/linux/compiler-intel.h45
-rw-r--r--include/linux/compiler-version.h14
-rw-r--r--include/linux/compiler.h583
-rw-r--r--include/linux/compiler_attributes.h369
-rw-r--r--include/linux/compiler_types.h426
-rw-r--r--include/linux/completion.h22
-rw-r--r--include/linux/component.h95
-rw-r--r--include/linux/concap.h112
-rw-r--r--include/linux/configfs.h33
-rw-r--r--include/linux/connector.h91
-rw-r--r--include/linux/console.h323
-rw-r--r--include/linux/console_struct.h115
-rw-r--r--include/linux/consolemap.h60
-rw-r--r--include/linux/const.h14
-rw-r--r--include/linux/container.h10
-rw-r--r--include/linux/container_of.h38
-rw-r--r--include/linux/context_tracking.h186
-rw-r--r--include/linux/context_tracking_irq.h21
-rw-r--r--include/linux/context_tracking_state.h135
-rw-r--r--include/linux/cookie.h51
-rw-r--r--include/linux/cordic.h9
-rw-r--r--include/linux/coredump.h42
-rw-r--r--include/linux/coresight-pmu.h77
-rw-r--r--include/linux/coresight-stm.h1
-rw-r--r--include/linux/coresight.h513
-rw-r--r--include/linux/count_zeros.h6
-rw-r--r--include/linux/counter.h632
-rw-r--r--include/linux/cper.h524
-rw-r--r--include/linux/cpu.h143
-rw-r--r--include/linux/cpu_cooling.h92
-rw-r--r--include/linux/cpu_pm.h11
-rw-r--r--include/linux/cpu_rmap.h11
-rw-r--r--include/linux/cpufeature.h7
-rw-r--r--include/linux/cpufreq.h623
-rw-r--r--include/linux/cpuhotplug.h229
-rw-r--r--include/linux/cpuidle.h195
-rw-r--r--include/linux/cpuidle_haltpoll.h16
-rw-r--r--include/linux/cpumask.h687
-rw-r--r--include/linux/cpumask_api.h1
-rw-r--r--include/linux/cpuset.h86
-rw-r--r--include/linux/crash_core.h14
-rw-r--r--include/linux/crash_dump.h80
-rw-r--r--include/linux/crc-ccitt.h8
-rw-r--r--include/linux/crc-itu-t.h6
-rw-r--r--include/linux/crc-t10dif.h2
-rw-r--r--include/linux/crc16.h4
-rw-r--r--include/linux/crc32c.h1
-rw-r--r--include/linux/crc32poly.h20
-rw-r--r--include/linux/crc4.h1
-rw-r--r--include/linux/crc64.h18
-rw-r--r--include/linux/crc7.h1
-rw-r--r--include/linux/crc8.h2
-rw-r--r--include/linux/cred.h73
-rw-r--r--include/linux/crush/crush.h22
-rw-r--r--include/linux/crush/hash.h1
-rw-r--r--include/linux/crush/mapper.h1
-rw-r--r--include/linux/crypto.h1329
-rw-r--r--include/linux/cryptohash.h13
-rw-r--r--include/linux/cs5535.h5
-rw-r--r--include/linux/ctype.h16
-rw-r--r--include/linux/cuda.h7
-rw-r--r--include/linux/cyclades.h360
-rw-r--r--include/linux/damon.h638
-rw-r--r--include/linux/dasd_mod.h11
-rw-r--r--include/linux/davinci_emac.h1
-rw-r--r--include/linux/dax.h298
-rw-r--r--include/linux/dca.h18
-rw-r--r--include/linux/dcache.h88
-rw-r--r--include/linux/dccp.h9
-rw-r--r--include/linux/dcookies.h68
-rw-r--r--include/linux/debug_locks.h14
-rw-r--r--include/linux/debugfs.h324
-rw-r--r--include/linux/debugobjects.h35
-rw-r--r--include/linux/decompress/bunzip2.h1
-rw-r--r--include/linux/decompress/generic.h1
-rw-r--r--include/linux/decompress/inflate.h1
-rw-r--r--include/linux/decompress/mm.h13
-rw-r--r--include/linux/decompress/unlz4.h1
-rw-r--r--include/linux/decompress/unlzma.h1
-rw-r--r--include/linux/decompress/unlzo.h1
-rw-r--r--include/linux/decompress/unzstd.h11
-rw-r--r--include/linux/delay.h31
-rw-r--r--include/linux/delayacct.h214
-rw-r--r--include/linux/delayed_call.h1
-rw-r--r--include/linux/dell-led.h6
-rw-r--r--include/linux/dev_printk.h277
-rw-r--r--include/linux/devcoredump.h17
-rw-r--r--include/linux/devfreq-event.h25
-rw-r--r--include/linux/devfreq.h258
-rw-r--r--include/linux/devfreq_cooling.h38
-rw-r--r--include/linux/device-mapper.h352
-rw-r--r--include/linux/device.h1411
-rw-r--r--include/linux/device/bus.h282
-rw-r--r--include/linux/device/class.h232
-rw-r--r--include/linux/device/driver.h291
-rw-r--r--include/linux/device_cgroup.h58
-rw-r--r--include/linux/devm-helpers.h79
-rw-r--r--include/linux/devpts_fs.h15
-rw-r--r--include/linux/dfl.h95
-rw-r--r--include/linux/digsig.h10
-rw-r--r--include/linux/dim.h337
-rw-r--r--include/linux/dio.h6
-rw-r--r--include/linux/dirent.h3
-rw-r--r--include/linux/dlm.h12
-rw-r--r--include/linux/dlm_plock.h5
-rw-r--r--include/linux/dm-bufio.h162
-rw-r--r--include/linux/dm-dirty-log.h9
-rw-r--r--include/linux/dm-io.h13
-rw-r--r--include/linux/dm-kcopyd.h28
-rw-r--r--include/linux/dm-region-hash.h9
-rw-r--r--include/linux/dm-verity-loadpin.h27
-rw-r--r--include/linux/dm9000.h6
-rw-r--r--include/linux/dma-buf.h409
-rw-r--r--include/linux/dma-contiguous.h164
-rw-r--r--include/linux/dma-debug.h213
-rw-r--r--include/linux/dma-direct.h127
-rw-r--r--include/linux/dma-direction.h15
-rw-r--r--include/linux/dma-fence-array.h48
-rw-r--r--include/linux/dma-fence-chain.h129
-rw-r--r--include/linux/dma-fence-unwrap.h75
-rw-r--r--include/linux/dma-fence.h416
-rw-r--r--include/linux/dma-heap.h68
-rw-r--r--include/linux/dma-iommu.h112
-rw-r--r--include/linux/dma-map-ops.h451
-rw-r--r--include/linux/dma-mapping.h903
-rw-r--r--include/linux/dma-resv.h487
-rw-r--r--include/linux/dma/amd_xdma.h16
-rw-r--r--include/linux/dma/dw.h9
-rw-r--r--include/linux/dma/edma.h119
-rw-r--r--include/linux/dma/hsu.h11
-rw-r--r--include/linux/dma/idma64.h14
-rw-r--r--include/linux/dma/imx-dma.h (renamed from include/linux/platform_data/dma-imx.h)45
-rw-r--r--include/linux/dma/ipu-dma.h5
-rw-r--r--include/linux/dma/k3-event-router.h16
-rw-r--r--include/linux/dma/k3-psil.h86
-rw-r--r--include/linux/dma/k3-udma-glue.h146
-rw-r--r--include/linux/dma/mmp-pdma.h15
-rw-r--r--include/linux/dma/mxs-dma.h24
-rw-r--r--include/linux/dma/pxa-dma.h21
-rw-r--r--include/linux/dma/qcom-gpi-dma.h83
-rw-r--r--include/linux/dma/qcom_adm.h12
-rw-r--r--include/linux/dma/qcom_bam_dma.h71
-rw-r--r--include/linux/dma/sprd-dma.h190
-rw-r--r--include/linux/dma/ti-cppi5.h1060
-rw-r--r--include/linux/dma/xilinx_dma.h22
-rw-r--r--include/linux/dma/xilinx_dpdma.h11
-rw-r--r--include/linux/dma_remapping.h57
-rw-r--r--include/linux/dmaengine.h418
-rw-r--r--include/linux/dmapool.h30
-rw-r--r--include/linux/dmar.h80
-rw-r--r--include/linux/dmi.h17
-rw-r--r--include/linux/dnotify.h4
-rw-r--r--include/linux/dns_resolver.h10
-rw-r--r--include/linux/dqblk_qtree.h1
-rw-r--r--include/linux/dqblk_v1.h1
-rw-r--r--include/linux/dqblk_v2.h1
-rw-r--r--include/linux/drbd.h21
-rw-r--r--include/linux/drbd_config.h16
-rw-r--r--include/linux/drbd_genl.h4
-rw-r--r--include/linux/drbd_genl_api.h3
-rw-r--r--include/linux/drbd_limits.h207
-rw-r--r--include/linux/ds2782_battery.h1
-rw-r--r--include/linux/dsa/8021q.h31
-rw-r--r--include/linux/dsa/brcm.h16
-rw-r--r--include/linux/dsa/ksz_common.h53
-rw-r--r--include/linux/dsa/lan9303.h39
-rw-r--r--include/linux/dsa/loop.h42
-rw-r--r--include/linux/dsa/mv88e6xxx.h13
-rw-r--r--include/linux/dsa/ocelot.h276
-rw-r--r--include/linux/dsa/sja1105.h79
-rw-r--r--include/linux/dsa/tag_qca.h87
-rw-r--r--include/linux/dtlk.h1
-rw-r--r--include/linux/dtpm.h73
-rw-r--r--include/linux/dw_apb_timer.h6
-rw-r--r--include/linux/dynamic_debug.h299
-rw-r--r--include/linux/dynamic_queue_limits.h7
-rw-r--r--include/linux/earlycpio.h1
-rw-r--r--include/linux/ecryptfs.h1
-rw-r--r--include/linux/edac.h208
-rw-r--r--include/linux/edd.h11
-rw-r--r--include/linux/edma.h29
-rw-r--r--include/linux/eeprom_93cx6.h15
-rw-r--r--include/linux/eeprom_93xx46.h9
-rw-r--r--include/linux/efi-bgrt.h1
-rw-r--r--include/linux/efi.h1420
-rw-r--r--include/linux/efi_embedded_fw.h41
-rw-r--r--include/linux/efs_vh.h1
-rw-r--r--include/linux/eisa.h1
-rw-r--r--include/linux/elevator.h270
-rw-r--r--include/linux/elf-fdpic.h6
-rw-r--r--include/linux/elf-randomize.h1
-rw-r--r--include/linux/elf.h54
-rw-r--r--include/linux/elfcore-compat.h33
-rw-r--r--include/linux/elfcore.h125
-rw-r--r--include/linux/elfnote-lto.h14
-rw-r--r--include/linux/elfnote.h5
-rw-r--r--include/linux/enclosure.h15
-rw-r--r--include/linux/energy_model.h349
-rw-r--r--include/linux/entry-common.h468
-rw-r--r--include/linux/entry-kvm.h99
-rw-r--r--include/linux/err.h4
-rw-r--r--include/linux/errname.h16
-rw-r--r--include/linux/errno.h3
-rw-r--r--include/linux/error-injection.h28
-rw-r--r--include/linux/errqueue.h1
-rw-r--r--include/linux/errseq.h15
-rw-r--r--include/linux/etherdevice.h134
-rw-r--r--include/linux/ethtool.h769
-rw-r--r--include/linux/ethtool_netlink.h122
-rw-r--r--include/linux/eventfd.h40
-rw-r--r--include/linux/eventpoll.h47
-rw-r--r--include/linux/evm.h90
-rw-r--r--include/linux/export-internal.h19
-rw-r--r--include/linux/export.h196
-rw-r--r--include/linux/exportfs.h26
-rw-r--r--include/linux/ext2_fs.h1
-rw-r--r--include/linux/extable.h13
-rw-r--r--include/linux/extcon-provider.h134
-rw-r--r--include/linux/extcon.h247
-rw-r--r--include/linux/extcon/extcon-adc-jack.h6
-rw-r--r--include/linux/extcon/extcon-gpio.h47
-rw-r--r--include/linux/f2fs_fs.h186
-rw-r--r--include/linux/falloc.h25
-rw-r--r--include/linux/fanotify.h134
-rw-r--r--include/linux/fault-inject-usercopy.h22
-rw-r--r--include/linux/fault-inject.h37
-rw-r--r--include/linux/fb.h164
-rw-r--r--include/linux/fbcon.h46
-rw-r--r--include/linux/fcdevice.h7
-rw-r--r--include/linux/fcntl.h17
-rw-r--r--include/linux/fd.h1
-rw-r--r--include/linux/fddidevice.h6
-rw-r--r--include/linux/fdtable.h51
-rw-r--r--include/linux/fec.h5
-rw-r--r--include/linux/fiemap.h21
-rw-r--r--include/linux/file.h29
-rw-r--r--include/linux/fileattr.h59
-rw-r--r--include/linux/filelock.h439
-rw-r--r--include/linux/filter.h917
-rw-r--r--include/linux/find.h670
-rw-r--r--include/linux/fips.h8
-rw-r--r--include/linux/firewire.h35
-rw-r--r--include/linux/firmware-map.h11
-rw-r--r--include/linux/firmware.h150
-rw-r--r--include/linux/firmware/broadcom/tee_bnxt_fw.h14
-rw-r--r--include/linux/firmware/cirrus/cs_dsp.h329
-rw-r--r--include/linux/firmware/cirrus/wmfw.h203
-rw-r--r--include/linux/firmware/imx/dsp.h77
-rw-r--r--include/linux/firmware/imx/ipc.h71
-rw-r--r--include/linux/firmware/imx/s4.h20
-rw-r--r--include/linux/firmware/imx/sci.h51
-rw-r--r--include/linux/firmware/imx/svc/misc.h77
-rw-r--r--include/linux/firmware/imx/svc/pm.h85
-rw-r--r--include/linux/firmware/imx/svc/rm.h74
-rw-r--r--include/linux/firmware/intel/stratix10-smc.h598
-rw-r--r--include/linux/firmware/intel/stratix10-svc-client.h283
-rw-r--r--include/linux/firmware/mediatek/mtk-adsp-ipc.h65
-rw-r--r--include/linux/firmware/meson/meson_sm.h26
-rw-r--r--include/linux/firmware/qcom/qcom_scm.h125
-rw-r--r--include/linux/firmware/trusted_foundations.h92
-rw-r--r--include/linux/firmware/xlnx-event-manager.h36
-rw-r--r--include/linux/firmware/xlnx-zynqmp.h889
-rw-r--r--include/linux/fixp-arith.h34
-rw-r--r--include/linux/flat.h59
-rw-r--r--include/linux/flex_array.h148
-rw-r--r--include/linux/flex_proportions.h10
-rw-r--r--include/linux/fmc-sdb.h38
-rw-r--r--include/linux/fmc.h237
-rw-r--r--include/linux/font.h22
-rw-r--r--include/linux/fortify-string.h755
-rw-r--r--include/linux/fpga/adi-axi-common.h23
-rw-r--r--include/linux/fpga/altera-pr-ip-core.h14
-rw-r--r--include/linux/fpga/fpga-bridge.h42
-rw-r--r--include/linux/fpga/fpga-mgr.h158
-rw-r--r--include/linux/fpga/fpga-region.h69
-rw-r--r--include/linux/fprobe.h111
-rw-r--r--include/linux/frame.h23
-rw-r--r--include/linux/freelist.h129
-rw-r--r--include/linux/freezer.h233
-rw-r--r--include/linux/frontswap.h29
-rw-r--r--include/linux/fs.h2404
-rw-r--r--include/linux/fs_api.h1
-rw-r--r--include/linux/fs_context.h235
-rw-r--r--include/linux/fs_parser.h135
-rw-r--r--include/linux/fs_pin.h2
-rw-r--r--include/linux/fs_stack.h1
-rw-r--r--include/linux/fs_struct.h5
-rw-r--r--include/linux/fs_types.h75
-rw-r--r--include/linux/fscache-cache.h607
-rw-r--r--include/linux/fscache.h1043
-rw-r--r--include/linux/fscrypt.h1054
-rw-r--r--include/linux/fscrypt_common.h141
-rw-r--r--include/linux/fscrypt_notsupp.h177
-rw-r--r--include/linux/fscrypt_supp.h145
-rw-r--r--include/linux/fsi-occ.h27
-rw-r--r--include/linux/fsi-sbefifo.h25
-rw-r--r--include/linux/fsi.h22
-rw-r--r--include/linux/fsl-diu-fb.h7
-rw-r--r--include/linux/fsl/bestcomm/bestcomm.h2
-rw-r--r--include/linux/fsl/bestcomm/gen_bd.h8
-rw-r--r--include/linux/fsl/edac.h1
-rw-r--r--include/linux/fsl/enetc_mdio.h67
-rw-r--r--include/linux/fsl/ftm.h88
-rw-r--r--include/linux/fsl/guts.h13
-rw-r--r--include/linux/fsl/mc.h683
-rw-r--r--include/linux/fsl/ptp_qoriq.h208
-rw-r--r--include/linux/fsl_devices.h14
-rw-r--r--include/linux/fsl_ifc.h23
-rw-r--r--include/linux/fsldma.h5
-rw-r--r--include/linux/fsnotify.h331
-rw-r--r--include/linux/fsnotify_backend.h642
-rw-r--r--include/linux/fsverity.h318
-rw-r--r--include/linux/ftrace.h633
-rw-r--r--include/linux/ftrace_irq.h25
-rw-r--r--include/linux/futex.h68
-rw-r--r--include/linux/fwnode.h180
-rw-r--r--include/linux/fwnode_mdio.h35
-rw-r--r--include/linux/gameport.h7
-rw-r--r--include/linux/gcd.h1
-rw-r--r--include/linux/genalloc.h93
-rw-r--r--include/linux/generic-radix-tree.h232
-rw-r--r--include/linux/genetlink.h25
-rw-r--r--include/linux/genhd.h735
-rw-r--r--include/linux/genl_magic_func.h21
-rw-r--r--include/linux/genl_magic_struct.h9
-rw-r--r--include/linux/getcpu.h1
-rw-r--r--include/linux/gfp.h452
-rw-r--r--include/linux/gfp_api.h1
-rw-r--r--include/linux/gfp_types.h340
-rw-r--r--include/linux/glob.h1
-rw-r--r--include/linux/gnss.h76
-rw-r--r--include/linux/goldfish.h22
-rw-r--r--include/linux/gpio-fan.h36
-rw-r--r--include/linux/gpio-pxa.h1
-rw-r--r--include/linux/gpio.h175
-rw-r--r--include/linux/gpio/aspeed.h19
-rw-r--r--include/linux/gpio/consumer.h384
-rw-r--r--include/linux/gpio/driver.h793
-rw-r--r--include/linux/gpio/gpio-reg.h7
-rw-r--r--include/linux/gpio/legacy-of-mm-gpiochip.h36
-rw-r--r--include/linux/gpio/machine.h94
-rw-r--r--include/linux/gpio/property.h11
-rw-r--r--include/linux/gpio/regmap.h94
-rw-r--r--include/linux/gpio_keys.h5
-rw-r--r--include/linux/gpio_mouse.h61
-rw-r--r--include/linux/greybus.h152
-rw-r--r--include/linux/greybus/bundle.h92
-rw-r--r--include/linux/greybus/connection.h131
-rw-r--r--include/linux/greybus/control.h60
-rw-r--r--include/linux/greybus/greybus_id.h27
-rw-r--r--include/linux/greybus/greybus_manifest.h181
-rw-r--r--include/linux/greybus/greybus_protocols.h2178
-rw-r--r--include/linux/greybus/hd.h85
-rw-r--r--include/linux/greybus/interface.h85
-rw-r--r--include/linux/greybus/manifest.h17
-rw-r--r--include/linux/greybus/module.h36
-rw-r--r--include/linux/greybus/operation.h229
-rw-r--r--include/linux/greybus/svc.h106
-rw-r--r--include/linux/group_cpus.h14
-rw-r--r--include/linux/hardirq.h119
-rw-r--r--include/linux/hash.h5
-rw-r--r--include/linux/hashtable.h9
-rw-r--r--include/linux/hashtable_api.h1
-rw-r--r--include/linux/hdlc.h9
-rw-r--r--include/linux/hdlcdrv.h3
-rw-r--r--include/linux/hdmi.h120
-rw-r--r--include/linux/hex.h35
-rw-r--r--include/linux/hid-debug.h24
-rw-r--r--include/linux/hid-roccat.h5
-rw-r--r--include/linux/hid-sensor-hub.h38
-rw-r--r--include/linux/hid-sensor-ids.h31
-rw-r--r--include/linux/hid.h468
-rw-r--r--include/linux/hid_bpf.h170
-rw-r--r--include/linux/hidden.h19
-rw-r--r--include/linux/hiddev.h14
-rw-r--r--include/linux/hidraw.h10
-rw-r--r--include/linux/highmem-internal.h290
-rw-r--r--include/linux/highmem.h568
-rw-r--r--include/linux/highuid.h1
-rw-r--r--include/linux/hil_mlc.h8
-rw-r--r--include/linux/hippidevice.h10
-rw-r--r--include/linux/hisi_acc_qm.h536
-rw-r--r--include/linux/hmm.h116
-rw-r--r--include/linux/host1x.h289
-rw-r--r--include/linux/host1x_context_bus.h15
-rw-r--r--include/linux/hp_sdc.h4
-rw-r--r--include/linux/hpet.h3
-rw-r--r--include/linux/hrtimer.h211
-rw-r--r--include/linux/hrtimer_api.h1
-rw-r--r--include/linux/hrtimer_defs.h27
-rw-r--r--include/linux/hsi/hsi.h15
-rw-r--r--include/linux/hsi/ssi_protocol.h15
-rw-r--r--include/linux/htcpld.h24
-rw-r--r--include/linux/hte.h271
-rw-r--r--include/linux/htirq.h38
-rw-r--r--include/linux/huge_mm.h307
-rw-r--r--include/linux/hugetlb.h1022
-rw-r--r--include/linux/hugetlb_cgroup.h233
-rw-r--r--include/linux/hugetlb_inline.h1
-rw-r--r--include/linux/hw_breakpoint.h27
-rw-r--r--include/linux/hw_random.h10
-rw-r--r--include/linux/hwmon-sysfs.h55
-rw-r--r--include/linux/hwmon-vid.h14
-rw-r--r--include/linux/hwmon.h120
-rw-r--r--include/linux/hwspinlock.h164
-rw-r--r--include/linux/hyperv.h828
-rw-r--r--include/linux/hypervisor.h34
-rw-r--r--include/linux/i2c-algo-bit.h34
-rw-r--r--include/linux/i2c-algo-pca.h16
-rw-r--r--include/linux/i2c-algo-pcf.h16
-rw-r--r--include/linux/i2c-dev.h15
-rw-r--r--include/linux/i2c-mux-pinctrl.h41
-rw-r--r--include/linux/i2c-mux.h18
-rw-r--r--include/linux/i2c-pnx.h38
-rw-r--r--include/linux/i2c-pxa.h17
-rw-r--r--include/linux/i2c-smbus.h47
-rw-r--r--include/linux/i2c.h649
-rw-r--r--include/linux/i2c/bfin_twi.h145
-rw-r--r--include/linux/i2c/dm355evm_msp.h79
-rw-r--r--include/linux/i2c/mlxcpld.h52
-rw-r--r--include/linux/i2c/pca954x.h48
-rw-r--r--include/linux/i2c/pxa-i2c.h85
-rw-r--r--include/linux/i2c/tc35876x.h11
-rw-r--r--include/linux/i3c/ccc.h385
-rw-r--r--include/linux/i3c/device.h346
-rw-r--r--include/linux/i3c/master.h655
-rw-r--r--include/linux/i7300_idle.h83
-rw-r--r--include/linux/i8042.h6
-rw-r--r--include/linux/i8253.h1
-rw-r--r--include/linux/icmp.h26
-rw-r--r--include/linux/icmpv6.h74
-rw-r--r--include/linux/ide.h1617
-rw-r--r--include/linux/idle_inject.h36
-rw-r--r--include/linux/idr.h239
-rw-r--r--include/linux/ieee80211.h2116
-rw-r--r--include/linux/ieee802154.h122
-rw-r--r--include/linux/if_arp.h11
-rw-r--r--include/linux/if_bridge.h138
-rw-r--r--include/linux/if_eql.h2
-rw-r--r--include/linux/if_ether.h14
-rw-r--r--include/linux/if_fddi.h6
-rw-r--r--include/linux/if_frad.h95
-rw-r--r--include/linux/if_hsr.h47
-rw-r--r--include/linux/if_link.h3
-rw-r--r--include/linux/if_ltalk.h1
-rw-r--r--include/linux/if_macvlan.h62
-rw-r--r--include/linux/if_phonet.h3
-rw-r--r--include/linux/if_pppol2tp.h8
-rw-r--r--include/linux/if_pppox.h11
-rw-r--r--include/linux/if_rmnet.h74
-rw-r--r--include/linux/if_tap.h27
-rw-r--r--include/linux/if_team.h44
-rw-r--r--include/linux/if_tun.h59
-rw-r--r--include/linux/if_tunnel.h1
-rw-r--r--include/linux/if_vlan.h281
-rw-r--r--include/linux/igmp.h43
-rw-r--r--include/linux/ihex.h32
-rw-r--r--include/linux/iio/accel/kxcjk_1013.h13
-rw-r--r--include/linux/iio/adc/ad_sigma_delta.h110
-rw-r--r--include/linux/iio/adc/adi-axi-adc.h64
-rw-r--r--include/linux/iio/adc/qcom-vadc-common.h167
-rw-r--r--include/linux/iio/adc/stm32-dfsdm-adc.h20
-rw-r--r--include/linux/iio/afe/rescale.h36
-rw-r--r--include/linux/iio/buffer-dma.h12
-rw-r--r--include/linux/iio/buffer-dmaengine.h11
-rw-r--r--include/linux/iio/buffer.h21
-rw-r--r--include/linux/iio/buffer_impl.h56
-rw-r--r--include/linux/iio/common/cros_ec_sensors_core.h130
-rw-r--r--include/linux/iio/common/ssp_sensors.h12
-rw-r--r--include/linux/iio/common/st_sensors.h162
-rw-r--r--include/linux/iio/common/st_sensors_i2c.h27
-rw-r--r--include/linux/iio/common/st_sensors_spi.h7
-rw-r--r--include/linux/iio/configfs.h5
-rw-r--r--include/linux/iio/consumer.h113
-rw-r--r--include/linux/iio/dac/ad5421.h1
-rw-r--r--include/linux/iio/dac/ad5504.h3
-rw-r--r--include/linux/iio/dac/ad5791.h3
-rw-r--r--include/linux/iio/dac/max517.h3
-rw-r--r--include/linux/iio/dac/mcp4725.h5
-rw-r--r--include/linux/iio/driver.h20
-rw-r--r--include/linux/iio/events.h5
-rw-r--r--include/linux/iio/frequency/ad9523.h11
-rw-r--r--include/linux/iio/frequency/adf4350.h7
-rw-r--r--include/linux/iio/gyro/itg3200.h8
-rw-r--r--include/linux/iio/hw-consumer.h20
-rw-r--r--include/linux/iio/iio-gts-helper.h206
-rw-r--r--include/linux/iio/iio-opaque.h82
-rw-r--r--include/linux/iio/iio.h307
-rw-r--r--include/linux/iio/imu/adis.h373
-rw-r--r--include/linux/iio/kfifo_buf.h13
-rw-r--r--include/linux/iio/machine.h12
-rw-r--r--include/linux/iio/magnetometer/ak8975.h16
-rw-r--r--include/linux/iio/sw_device.h7
-rw-r--r--include/linux/iio/sw_trigger.h7
-rw-r--r--include/linux/iio/sysfs.h19
-rw-r--r--include/linux/iio/timer/stm32-lptim-trigger.h29
-rw-r--r--include/linux/iio/timer/stm32-timer-trigger.h28
-rw-r--r--include/linux/iio/trigger.h51
-rw-r--r--include/linux/iio/trigger_consumer.h14
-rw-r--r--include/linux/iio/triggered_buffer.h33
-rw-r--r--include/linux/iio/triggered_event.h1
-rw-r--r--include/linux/iio/types.h42
-rw-r--r--include/linux/ima.h172
-rw-r--r--include/linux/imx-media.h6
-rw-r--r--include/linux/in.h13
-rw-r--r--include/linux/in6.h6
-rw-r--r--include/linux/indirect_call_wrapper.h71
-rw-r--r--include/linux/inet.h7
-rw-r--r--include/linux/inet_diag.h59
-rw-r--r--include/linux/inetdevice.h66
-rw-r--r--include/linux/init.h178
-rw-r--r--include/linux/init_ohci1394_dma.h1
-rw-r--r--include/linux/init_syscalls.h19
-rw-r--r--include/linux/init_task.h285
-rw-r--r--include/linux/initrd.h28
-rw-r--r--include/linux/inotify.h6
-rw-r--r--include/linux/input-polldev.h61
-rw-r--r--include/linux/input.h56
-rw-r--r--include/linux/input/ad714x.h3
-rw-r--r--include/linux/input/adp5589.h10
-rw-r--r--include/linux/input/adxl34x.h3
-rw-r--r--include/linux/input/as5011.h5
-rw-r--r--include/linux/input/auo-pixcir-ts.h54
-rw-r--r--include/linux/input/bu21013.h34
-rw-r--r--include/linux/input/cma3000.h13
-rw-r--r--include/linux/input/cy8ctmg110_pdata.h10
-rw-r--r--include/linux/input/cyttsp.h43
-rw-r--r--include/linux/input/elan-i2c-ids.h80
-rw-r--r--include/linux/input/gp2ap002a00f.h22
-rw-r--r--include/linux/input/gpio_tilt.h73
-rw-r--r--include/linux/input/ili210x.h10
-rw-r--r--include/linux/input/kxtj9.h15
-rw-r--r--include/linux/input/lm8333.h2
-rw-r--r--include/linux/input/matrix_keypad.h6
-rw-r--r--include/linux/input/mt.h12
-rw-r--r--include/linux/input/navpoint.h5
-rw-r--r--include/linux/input/samsung-keypad.h6
-rw-r--r--include/linux/input/sh_keysc.h1
-rw-r--r--include/linux/input/sparse-keymap.h6
-rw-r--r--include/linux/input/touchscreen.h5
-rw-r--r--include/linux/input/vivaldi-fmap.h27
-rw-r--r--include/linux/instruction_pointer.h8
-rw-r--r--include/linux/instrumentation.h61
-rw-r--r--include/linux/instrumented.h181
-rw-r--r--include/linux/integrity.h20
-rw-r--r--include/linux/intel-iommu.h494
-rw-r--r--include/linux/intel-ish-client-if.h121
-rw-r--r--include/linux/intel-svm.h141
-rw-r--r--include/linux/intel_rapl.h163
-rw-r--r--include/linux/intel_tcc.h18
-rw-r--r--include/linux/intel_th.h79
-rw-r--r--include/linux/intel_tpmi.h30
-rw-r--r--include/linux/interconnect-provider.h181
-rw-r--r--include/linux/interconnect.h146
-rw-r--r--include/linux/interrupt.h361
-rw-r--r--include/linux/interval_tree.h67
-rw-r--r--include/linux/interval_tree_generic.h84
-rw-r--r--include/linux/io-64-nonatomic-hi-lo.h65
-rw-r--r--include/linux/io-64-nonatomic-lo-hi.h65
-rw-r--r--include/linux/io-mapping.h89
-rw-r--r--include/linux/io-pgtable.h262
-rw-r--r--include/linux/io.h49
-rw-r--r--include/linux/io_uring.h107
-rw-r--r--include/linux/io_uring_types.h595
-rw-r--r--include/linux/ioam6.h13
-rw-r--r--include/linux/ioam6_genl.h13
-rw-r--r--include/linux/ioam6_iptunnel.h13
-rw-r--r--include/linux/ioc3.h93
-rw-r--r--include/linux/ioc4.h184
-rw-r--r--include/linux/iocontext.h57
-rw-r--r--include/linux/iomap.h338
-rw-r--r--include/linux/iommu-common.h52
-rw-r--r--include/linux/iommu-helper.h18
-rw-r--r--include/linux/iommu.h916
-rw-r--r--include/linux/iommufd.h105
-rw-r--r--include/linux/iopoll.h117
-rw-r--r--include/linux/ioport.h89
-rw-r--r--include/linux/ioprio.h72
-rw-r--r--include/linux/iosys-map.h516
-rw-r--r--include/linux/iova.h72
-rw-r--r--include/linux/iova_bitmap.h26
-rw-r--r--include/linux/ip.h32
-rw-r--r--include/linux/ipack.h5
-rw-r--r--include/linux/ipc.h11
-rw-r--r--include/linux/ipc_namespace.h72
-rw-r--r--include/linux/ipmi-fru.h135
-rw-r--r--include/linux/ipmi.h194
-rw-r--r--include/linux/ipmi_smi.h269
-rw-r--r--include/linux/ipv6.h74
-rw-r--r--include/linux/ipv6_route.h6
-rw-r--r--include/linux/irq.h312
-rw-r--r--include/linux/irq_cpustat.h31
-rw-r--r--include/linux/irq_poll.h1
-rw-r--r--include/linux/irq_sim.h26
-rw-r--r--include/linux/irq_work.h49
-rw-r--r--include/linux/irqbypass.h5
-rw-r--r--include/linux/irqchip.h49
-rw-r--r--include/linux/irqchip/arm-gic-common.h33
-rw-r--r--include/linux/irqchip/arm-gic-v3.h316
-rw-r--r--include/linux/irqchip/arm-gic-v4.h150
-rw-r--r--include/linux/irqchip/arm-gic.h32
-rw-r--r--include/linux/irqchip/arm-vgic-info.h45
-rw-r--r--include/linux/irqchip/arm-vic.h26
-rw-r--r--include/linux/irqchip/chained_irq.h13
-rw-r--r--include/linux/irqchip/ingenic.h23
-rw-r--r--include/linux/irqchip/irq-bcm2836.h61
-rw-r--r--include/linux/irqchip/irq-davinci-aintc.h27
-rw-r--r--include/linux/irqchip/irq-davinci-cp-intc.h25
-rw-r--r--include/linux/irqchip/irq-madera.h132
-rw-r--r--include/linux/irqchip/irq-omap-intc.h14
-rw-r--r--include/linux/irqchip/irq-partition-percpu.h18
-rw-r--r--include/linux/irqchip/irq-sa11x0.h5
-rw-r--r--include/linux/irqchip/metag-ext.h33
-rw-r--r--include/linux/irqchip/metag.h24
-rw-r--r--include/linux/irqchip/mips-gic.h297
-rw-r--r--include/linux/irqchip/mmp.h4
-rw-r--r--include/linux/irqchip/mxs.h5
-rw-r--r--include/linux/irqchip/versatile-fpga.h13
-rw-r--r--include/linux/irqdesc.h90
-rw-r--r--include/linux/irqdomain.h315
-rw-r--r--include/linux/irqdomain_defs.h31
-rw-r--r--include/linux/irqflags.h203
-rw-r--r--include/linux/irqhandler.h2
-rw-r--r--include/linux/irqnr.h1
-rw-r--r--include/linux/irqreturn.h9
-rw-r--r--include/linux/isa-dma.h14
-rw-r--r--include/linux/isa.h55
-rw-r--r--include/linux/isapnp.h23
-rw-r--r--include/linux/iscsi_boot_sysfs.h10
-rw-r--r--include/linux/iscsi_ibft.h28
-rw-r--r--include/linux/isdn.h473
-rw-r--r--include/linux/isdn/capilli.h20
-rw-r--r--include/linux/isdn/capiutil.h456
-rw-r--r--include/linux/isdn/hdlc.h82
-rw-r--r--include/linux/isdn_divertif.h35
-rw-r--r--include/linux/isdn_ppp.h194
-rw-r--r--include/linux/isdnif.h505
-rw-r--r--include/linux/isicom.h84
-rw-r--r--include/linux/ism.h98
-rw-r--r--include/linux/iversion.h300
-rw-r--r--include/linux/jbd2.h846
-rw-r--r--include/linux/jhash.h35
-rw-r--r--include/linux/jiffies.h39
-rw-r--r--include/linux/journal-head.h22
-rw-r--r--include/linux/joystick.h18
-rw-r--r--include/linux/jump_label.h230
-rw-r--r--include/linux/jump_label_ratelimit.h80
-rw-r--r--include/linux/jz4740-adc.h1
-rw-r--r--include/linux/jz4780-nemc.h6
-rw-r--r--include/linux/kallsyms.h120
-rw-r--r--include/linux/kasan-checks.h48
-rw-r--r--include/linux/kasan-enabled.h35
-rw-r--r--include/linux/kasan-tags.h15
-rw-r--r--include/linux/kasan.h485
-rw-r--r--include/linux/kbd_diacr.h1
-rw-r--r--include/linux/kbd_kern.h14
-rw-r--r--include/linux/kbuild.h1
-rw-r--r--include/linux/kconfig.h13
-rw-r--r--include/linux/kcore.h13
-rw-r--r--include/linux/kcov.h72
-rw-r--r--include/linux/kcsan-checks.h533
-rw-r--r--include/linux/kcsan.h75
-rw-r--r--include/linux/kd.h7
-rw-r--r--include/linux/kdb.h32
-rw-r--r--include/linux/kdebug.h1
-rw-r--r--include/linux/kdev_t.h23
-rw-r--r--include/linux/kern_levels.h3
-rw-r--r--include/linux/kernel-page-flags.h3
-rw-r--r--include/linux/kernel.h684
-rw-r--r--include/linux/kernel_read_file.h55
-rw-r--r--include/linux/kernel_stat.h42
-rw-r--r--include/linux/kernelcapi.h76
-rw-r--r--include/linux/kernfs.h255
-rw-r--r--include/linux/kexec.h265
-rw-r--r--include/linux/key-type.h49
-rw-r--r--include/linux/key.h222
-rw-r--r--include/linux/keyboard.h1
-rw-r--r--include/linux/keyctl.h42
-rw-r--r--include/linux/kfence.h250
-rw-r--r--include/linux/kfifo.h108
-rw-r--r--include/linux/kgdb.h80
-rw-r--r--include/linux/khugepaged.h73
-rw-r--r--include/linux/klist.h4
-rw-r--r--include/linux/kmemcheck.h171
-rw-r--r--include/linux/kmemleak.h30
-rw-r--r--include/linux/kmod.h75
-rw-r--r--include/linux/kmsan-checks.h83
-rw-r--r--include/linux/kmsan.h334
-rw-r--r--include/linux/kmsan_string.h21
-rw-r--r--include/linux/kmsan_types.h35
-rw-r--r--include/linux/kmsg_dump.h59
-rw-r--r--include/linux/kobj_map.h1
-rw-r--r--include/linux/kobject.h91
-rw-r--r--include/linux/kobject_api.h1
-rw-r--r--include/linux/kobject_ns.h9
-rw-r--r--include/linux/kprobes.h371
-rw-r--r--include/linux/kref.h9
-rw-r--r--include/linux/kref_api.h1
-rw-r--r--include/linux/ks0108.h18
-rw-r--r--include/linux/ks8842.h14
-rw-r--r--include/linux/ks8851_mll.h14
-rw-r--r--include/linux/ksm.h73
-rw-r--r--include/linux/kstrtox.h155
-rw-r--r--include/linux/kthread.h87
-rw-r--r--include/linux/ktime.h54
-rw-r--r--include/linux/ktime_api.h1
-rw-r--r--include/linux/kvm_dirty_ring.h101
-rw-r--r--include/linux/kvm_host.h1474
-rw-r--r--include/linux/kvm_irqfd.h13
-rw-r--r--include/linux/kvm_para.h6
-rw-r--r--include/linux/kvm_types.h92
-rw-r--r--include/linux/l2tp.h1
-rw-r--r--include/linux/lantiq.h23
-rw-r--r--include/linux/lapb.h6
-rw-r--r--include/linux/latencytop.h8
-rw-r--r--include/linux/lcd.h11
-rw-r--r--include/linux/lcm.h1
-rw-r--r--include/linux/led-class-flash.h80
-rw-r--r--include/linux/led-class-multicolor.h109
-rw-r--r--include/linux/led-lm3530.h3
-rw-r--r--include/linux/leds-bd2802.h7
-rw-r--r--include/linux/leds-lp3944.h6
-rw-r--r--include/linux/leds-lp3952.h6
-rw-r--r--include/linux/leds-pca9532.h6
-rw-r--r--include/linux/leds-regulator.h6
-rw-r--r--include/linux/leds-tca6507.h34
-rw-r--r--include/linux/leds-ti-lmu-common.h47
-rw-r--r--include/linux/leds.h383
-rw-r--r--include/linux/leds_pwm.h21
-rw-r--r--include/linux/lguest.h73
-rw-r--r--include/linux/lguest_launcher.h44
-rw-r--r--include/linux/libata.h603
-rw-r--r--include/linux/libfdt.h2
-rw-r--r--include/linux/libfdt_env.h11
-rw-r--r--include/linux/libgcc.h37
-rw-r--r--include/linux/libnvdimm.h242
-rw-r--r--include/linux/libps2.h43
-rw-r--r--include/linux/license.h1
-rw-r--r--include/linux/lightnvm.h510
-rw-r--r--include/linux/limits.h27
-rw-r--r--include/linux/linear_range.h61
-rw-r--r--include/linux/linkage.h284
-rw-r--r--include/linux/linkmode.h98
-rw-r--r--include/linux/linux_logo.h4
-rw-r--r--include/linux/lis3lv02d.h1
-rw-r--r--include/linux/list.h396
-rw-r--r--include/linux/list_bl.h27
-rw-r--r--include/linux/list_lru.h61
-rw-r--r--include/linux/list_nulls.h31
-rw-r--r--include/linux/list_sort.h9
-rw-r--r--include/linux/litex.h83
-rw-r--r--include/linux/livepatch.h140
-rw-r--r--include/linux/livepatch_sched.h29
-rw-r--r--include/linux/llist.h64
-rw-r--r--include/linux/llist_api.h1
-rw-r--r--include/linux/local_lock.h54
-rw-r--r--include/linux/local_lock_internal.h141
-rw-r--r--include/linux/lockd/bind.h7
-rw-r--r--include/linux/lockd/debug.h5
-rw-r--r--include/linux/lockd/lockd.h68
-rw-r--r--include/linux/lockd/nlm.h1
-rw-r--r--include/linux/lockd/share.h1
-rw-r--r--include/linux/lockd/xdr.h37
-rw-r--r--include/linux/lockd/xdr4.h34
-rw-r--r--include/linux/lockdep.h547
-rw-r--r--include/linux/lockdep_api.h1
-rw-r--r--include/linux/lockdep_types.h208
-rw-r--r--include/linux/lockref.h5
-rw-r--r--include/linux/log2.h120
-rw-r--r--include/linux/logic_iomem.h62
-rw-r--r--include/linux/logic_pio.h124
-rw-r--r--include/linux/lp.h1
-rw-r--r--include/linux/lru_cache.h25
-rw-r--r--include/linux/lsm_audit.h18
-rw-r--r--include/linux/lsm_hook_defs.h419
-rw-r--r--include/linux/lsm_hooks.h1982
-rw-r--r--include/linux/lz4.h18
-rw-r--r--include/linux/lzo.h7
-rw-r--r--include/linux/mISDNdsp.h1
-rw-r--r--include/linux/mISDNhw.h11
-rw-r--r--include/linux/mISDNif.h3
-rw-r--r--include/linux/mailbox/arm_mhuv2_message.h20
-rw-r--r--include/linux/mailbox/brcm-message.h5
-rw-r--r--include/linux/mailbox/mtk-cmdq-mailbox.h83
-rw-r--r--include/linux/mailbox/zynqmp-ipi-message.h20
-rw-r--r--include/linux/mailbox_client.h7
-rw-r--r--include/linux/mailbox_controller.h16
-rw-r--r--include/linux/map_benchmark.h31
-rw-r--r--include/linux/maple.h1
-rw-r--r--include/linux/maple_tree.h697
-rw-r--r--include/linux/marvell_phy.h18
-rw-r--r--include/linux/math.h189
-rw-r--r--include/linux/math64.h135
-rw-r--r--include/linux/max17040_battery.h19
-rw-r--r--include/linux/mbcache.h43
-rw-r--r--include/linux/mbus.h4
-rw-r--r--include/linux/mc146818rtc.h8
-rw-r--r--include/linux/mc6821.h1
-rw-r--r--include/linux/mcb.h14
-rw-r--r--include/linux/mdev.h163
-rw-r--r--include/linux/mdio-bitbang.h11
-rw-r--r--include/linux/mdio-gpio.h9
-rw-r--r--include/linux/mdio-mux.h9
-rw-r--r--include/linux/mdio.h326
-rw-r--r--include/linux/mdio/mdio-i2c.h24
-rw-r--r--include/linux/mdio/mdio-mscc-miim.h19
-rw-r--r--include/linux/mdio/mdio-xgene.h134
-rw-r--r--include/linux/mei_aux.h31
-rw-r--r--include/linux/mei_cl_bus.h28
-rw-r--r--include/linux/mem_encrypt.h36
-rw-r--r--include/linux/memblock.h445
-rw-r--r--include/linux/memcontrol.h1590
-rw-r--r--include/linux/memfd.h16
-rw-r--r--include/linux/memory-tiers.h101
-rw-r--r--include/linux/memory.h153
-rw-r--r--include/linux/memory_hotplug.h402
-rw-r--r--include/linux/mempolicy.h62
-rw-r--r--include/linux/mempool.h40
-rw-r--r--include/linux/memregion.h63
-rw-r--r--include/linux/memremap.h244
-rw-r--r--include/linux/memstick.h9
-rw-r--r--include/linux/mfd/88pm80x.h5
-rw-r--r--include/linux/mfd/88pm860x.h5
-rw-r--r--include/linux/mfd/aat2870.h16
-rw-r--r--include/linux/mfd/ab3100.h129
-rw-r--r--include/linux/mfd/abx500.h279
-rw-r--r--include/linux/mfd/abx500/ab8500-bm.h478
-rw-r--r--include/linux/mfd/abx500/ab8500-codec.h5
-rw-r--r--include/linux/mfd/abx500/ab8500-gpadc.h75
-rw-r--r--include/linux/mfd/abx500/ab8500-sysctrl.h2
-rw-r--r--include/linux/mfd/abx500/ab8500.h5
-rw-r--r--include/linux/mfd/abx500/ux500_chargalg.h55
-rw-r--r--include/linux/mfd/ac100.h5
-rw-r--r--include/linux/mfd/adp5520.h3
-rw-r--r--include/linux/mfd/altera-a10sr.h13
-rw-r--r--include/linux/mfd/altera-sysmgr.h29
-rw-r--r--include/linux/mfd/arizona/core.h5
-rw-r--r--include/linux/mfd/arizona/pdata.h11
-rw-r--r--include/linux/mfd/arizona/registers.h12
-rw-r--r--include/linux/mfd/as3711.h9
-rw-r--r--include/linux/mfd/as3722.h19
-rw-r--r--include/linux/mfd/asic3.h316
-rw-r--r--include/linux/mfd/atc260x/atc2603c.h281
-rw-r--r--include/linux/mfd/atc260x/atc2609a.h308
-rw-r--r--include/linux/mfd/atc260x/core.h58
-rw-r--r--include/linux/mfd/atmel-hlcdc.h13
-rw-r--r--include/linux/mfd/axp20x.h161
-rw-r--r--include/linux/mfd/bcm2835-pm.h15
-rw-r--r--include/linux/mfd/bcm590xx.h7
-rw-r--r--include/linux/mfd/bd9571mwv.h109
-rw-r--r--include/linux/mfd/core.h101
-rw-r--r--include/linux/mfd/cros_ec.h343
-rw-r--r--include/linux/mfd/cros_ec_commands.h2946
-rw-r--r--include/linux/mfd/cros_ec_lpc_mec.h90
-rw-r--r--include/linux/mfd/cros_ec_lpc_reg.h61
-rw-r--r--include/linux/mfd/da8xx-cfgchip.h11
-rw-r--r--include/linux/mfd/da903x.h1
-rw-r--r--include/linux/mfd/da9052/da9052.h22
-rw-r--r--include/linux/mfd/da9052/pdata.h16
-rw-r--r--include/linux/mfd/da9052/reg.h27
-rw-r--r--include/linux/mfd/da9055/core.h16
-rw-r--r--include/linux/mfd/da9055/pdata.h14
-rw-r--r--include/linux/mfd/da9055/reg.h16
-rw-r--r--include/linux/mfd/da9062/core.h11
-rw-r--r--include/linux/mfd/da9062/registers.h14
-rw-r--r--include/linux/mfd/da9063/core.h24
-rw-r--r--include/linux/mfd/da9063/pdata.h112
-rw-r--r--include/linux/mfd/da9063/registers.h54
-rw-r--r--include/linux/mfd/da9150/core.h6
-rw-r--r--include/linux/mfd/da9150/registers.h6
-rw-r--r--include/linux/mfd/davinci_voicecodec.h15
-rw-r--r--include/linux/mfd/db8500-prcmu.h26
-rw-r--r--include/linux/mfd/dbx500-prcmu.h65
-rw-r--r--include/linux/mfd/dln2.h1
-rw-r--r--include/linux/mfd/ds1wm.h30
-rw-r--r--include/linux/mfd/ezx-pcap.h1
-rw-r--r--include/linux/mfd/gsc.h76
-rw-r--r--include/linux/mfd/hi6421-pmic.h12
-rw-r--r--include/linux/mfd/hi655x-pmic.h11
-rw-r--r--include/linux/mfd/htc-pasic3.h54
-rw-r--r--include/linux/mfd/idt82p33_reg.h115
-rw-r--r--include/linux/mfd/idt8a340_reg.h768
-rw-r--r--include/linux/mfd/imx25-tsadc.h1
-rw-r--r--include/linux/mfd/ingenic-tcu.h56
-rw-r--r--include/linux/mfd/intel-m10-bmc.h291
-rw-r--r--include/linux/mfd/intel_msic.h456
-rw-r--r--include/linux/mfd/intel_pmc_bxt.h53
-rw-r--r--include/linux/mfd/intel_soc_pmic.h40
-rw-r--r--include/linux/mfd/intel_soc_pmic_bxtwc.h10
-rw-r--r--include/linux/mfd/intel_soc_pmic_mrfld.h81
-rw-r--r--include/linux/mfd/ipaq-micro.h5
-rw-r--r--include/linux/mfd/iqs62x.h143
-rw-r--r--include/linux/mfd/janz.h6
-rw-r--r--include/linux/mfd/kempld.h5
-rw-r--r--include/linux/mfd/khadas-mcu.h91
-rw-r--r--include/linux/mfd/lm3533.h6
-rw-r--r--include/linux/mfd/lochnagar.h55
-rw-r--r--include/linux/mfd/lochnagar1_regs.h157
-rw-r--r--include/linux/mfd/lochnagar2_regs.h291
-rw-r--r--include/linux/mfd/lp3943.h6
-rw-r--r--include/linux/mfd/lp873x.h12
-rw-r--r--include/linux/mfd/lp87565.h51
-rw-r--r--include/linux/mfd/lp8788-isink.h6
-rw-r--r--include/linux/mfd/lp8788.h22
-rw-r--r--include/linux/mfd/lpc_ich.h16
-rw-r--r--include/linux/mfd/madera/core.h210
-rw-r--r--include/linux/mfd/madera/pdata.h58
-rw-r--r--include/linux/mfd/madera/registers.h3449
-rw-r--r--include/linux/mfd/max14577-private.h11
-rw-r--r--include/linux/mfd/max14577.h11
-rw-r--r--include/linux/mfd/max597x.h96
-rw-r--r--include/linux/mfd/max77620.h11
-rw-r--r--include/linux/mfd/max77650.h59
-rw-r--r--include/linux/mfd/max77686-private.h43
-rw-r--r--include/linux/mfd/max77686.h15
-rw-r--r--include/linux/mfd/max77693-common.h6
-rw-r--r--include/linux/mfd/max77693-private.h17
-rw-r--r--include/linux/mfd/max77693.h15
-rw-r--r--include/linux/mfd/max77714.h60
-rw-r--r--include/linux/mfd/max77843-private.h11
-rw-r--r--include/linux/mfd/max8907.h5
-rw-r--r--include/linux/mfd/max8925.h5
-rw-r--r--include/linux/mfd/max8997-private.h15
-rw-r--r--include/linux/mfd/max8997.h27
-rw-r--r--include/linux/mfd/max8998-private.h15
-rw-r--r--include/linux/mfd/max8998.h16
-rw-r--r--include/linux/mfd/mc13783.h5
-rw-r--r--include/linux/mfd/mc13892.h5
-rw-r--r--include/linux/mfd/mc13xxx.h8
-rw-r--r--include/linux/mfd/mcp.h5
-rw-r--r--include/linux/mfd/menelaus.h1
-rw-r--r--include/linux/mfd/motorola-cpcap.h5
-rw-r--r--include/linux/mfd/mp2629.h26
-rw-r--r--include/linux/mfd/mt6323/core.h5
-rw-r--r--include/linux/mfd/mt6323/registers.h5
-rw-r--r--include/linux/mfd/mt6331/core.h40
-rw-r--r--include/linux/mfd/mt6331/registers.h584
-rw-r--r--include/linux/mfd/mt6332/core.h65
-rw-r--r--include/linux/mfd/mt6332/registers.h642
-rw-r--r--include/linux/mfd/mt6357/core.h119
-rw-r--r--include/linux/mfd/mt6357/registers.h1574
-rw-r--r--include/linux/mfd/mt6358/core.h156
-rw-r--r--include/linux/mfd/mt6358/registers.h291
-rw-r--r--include/linux/mfd/mt6359/core.h133
-rw-r--r--include/linux/mfd/mt6359/registers.h531
-rw-r--r--include/linux/mfd/mt6359p/registers.h249
-rw-r--r--include/linux/mfd/mt6397/core.h31
-rw-r--r--include/linux/mfd/mt6397/registers.h10
-rw-r--r--include/linux/mfd/mt6397/rtc.h86
-rw-r--r--include/linux/mfd/mxs-lradc.h11
-rw-r--r--include/linux/mfd/ntxec.h38
-rw-r--r--include/linux/mfd/ocelot.h62
-rw-r--r--include/linux/mfd/palmas.h22
-rw-r--r--include/linux/mfd/pcf50633/adc.h6
-rw-r--r--include/linux/mfd/pcf50633/backlight.h11
-rw-r--r--include/linux/mfd/pcf50633/core.h12
-rw-r--r--include/linux/mfd/pcf50633/gpio.h6
-rw-r--r--include/linux/mfd/pcf50633/mbc.h6
-rw-r--r--include/linux/mfd/pcf50633/pmic.h1
-rw-r--r--include/linux/mfd/qcom_rpm.h1
-rw-r--r--include/linux/mfd/rave-sp.h62
-rw-r--r--include/linux/mfd/rc5t583.h14
-rw-r--r--include/linux/mfd/rdc321x.h1
-rw-r--r--include/linux/mfd/rk808.h481
-rw-r--r--include/linux/mfd/rn5t618.h50
-rw-r--r--include/linux/mfd/rohm-bd71815.h562
-rw-r--r--include/linux/mfd/rohm-bd71828.h426
-rw-r--r--include/linux/mfd/rohm-bd718x7.h313
-rw-r--r--include/linux/mfd/rohm-bd957x.h140
-rw-r--r--include/linux/mfd/rohm-generic.h86
-rw-r--r--include/linux/mfd/rohm-shared.h21
-rw-r--r--include/linux/mfd/rsmu.h39
-rw-r--r--include/linux/mfd/rt5033-private.h50
-rw-r--r--include/linux/mfd/rt5033.h12
-rw-r--r--include/linux/mfd/rz-mtu3.h257
-rw-r--r--include/linux/mfd/samsung/core.h51
-rw-r--r--include/linux/mfd/samsung/irq.h60
-rw-r--r--include/linux/mfd/samsung/rtc.h26
-rw-r--r--include/linux/mfd/samsung/s2mpa01.h7
-rw-r--r--include/linux/mfd/samsung/s2mps11.h18
-rw-r--r--include/linux/mfd/samsung/s2mps13.h14
-rw-r--r--include/linux/mfd/samsung/s2mps14.h14
-rw-r--r--include/linux/mfd/samsung/s2mps15.h11
-rw-r--r--include/linux/mfd/samsung/s2mpu02.h14
-rw-r--r--include/linux/mfd/samsung/s5m8763.h96
-rw-r--r--include/linux/mfd/samsung/s5m8767.h10
-rw-r--r--include/linux/mfd/sc27xx-pmic.h7
-rw-r--r--include/linux/mfd/si476x-core.h13
-rw-r--r--include/linux/mfd/si476x-platform.h11
-rw-r--r--include/linux/mfd/si476x-reports.h11
-rw-r--r--include/linux/mfd/sky81452.h15
-rw-r--r--include/linux/mfd/smsc.h109
-rw-r--r--include/linux/mfd/sta2x11-mfd.h14
-rw-r--r--include/linux/mfd/stm32-lptimer.h68
-rw-r--r--include/linux/mfd/stm32-timers.h91
-rw-r--r--include/linux/mfd/stmfx.h122
-rw-r--r--include/linux/mfd/stmpe.h23
-rw-r--r--include/linux/mfd/stpmic1.h212
-rw-r--r--include/linux/mfd/stw481x.h3
-rw-r--r--include/linux/mfd/sun4i-gpadc.h5
-rw-r--r--include/linux/mfd/sy7636a.h34
-rw-r--r--include/linux/mfd/syscon.h39
-rw-r--r--include/linux/mfd/syscon/atmel-matrix.h7
-rw-r--r--include/linux/mfd/syscon/atmel-mc.h6
-rw-r--r--include/linux/mfd/syscon/atmel-smc.h37
-rw-r--r--include/linux/mfd/syscon/atmel-st.h6
-rw-r--r--include/linux/mfd/syscon/clps711x.h6
-rw-r--r--include/linux/mfd/syscon/exynos4-pmu.h21
-rw-r--r--include/linux/mfd/syscon/exynos5-pmu.h19
-rw-r--r--include/linux/mfd/syscon/imx6q-iomuxc-gpr.h26
-rw-r--r--include/linux/mfd/syscon/imx7-iomuxc-gpr.h5
-rw-r--r--include/linux/mfd/syscon/xlnx-vcu.h39
-rw-r--r--include/linux/mfd/t7l66xb.h34
-rw-r--r--include/linux/mfd/tc3589x.h9
-rw-r--r--include/linux/mfd/tc6387xb.h20
-rw-r--r--include/linux/mfd/tc6393xb.h59
-rw-r--r--include/linux/mfd/ti-lmu-register.h106
-rw-r--r--include/linux/mfd/ti-lmu.h14
-rw-r--r--include/linux/mfd/ti_am335x_tscadc.h116
-rw-r--r--include/linux/mfd/tmio.h46
-rw-r--r--include/linux/mfd/tps6105x.h3
-rw-r--r--include/linux/mfd/tps65010.h (renamed from include/linux/i2c/tps65010.h)2
-rw-r--r--include/linux/mfd/tps65086.h12
-rw-r--r--include/linux/mfd/tps65090.h24
-rw-r--r--include/linux/mfd/tps65217.h18
-rw-r--r--include/linux/mfd/tps65218.h44
-rw-r--r--include/linux/mfd/tps65219.h345
-rw-r--r--include/linux/mfd/tps6586x.h2
-rw-r--r--include/linux/mfd/tps65910.h47
-rw-r--r--include/linux/mfd/tps65912.h14
-rw-r--r--include/linux/mfd/tps68470.h97
-rw-r--r--include/linux/mfd/tps80031.h637
-rw-r--r--include/linux/mfd/twl.h (renamed from include/linux/i2c/twl.h)89
-rw-r--r--include/linux/mfd/twl4030-audio.h16
-rw-r--r--include/linux/mfd/twl6040.h48
-rw-r--r--include/linux/mfd/ucb1x00.h6
-rw-r--r--include/linux/mfd/viperboard.h7
-rw-r--r--include/linux/mfd/wcd934x/registers.h588
-rw-r--r--include/linux/mfd/wcd934x/wcd934x.h31
-rw-r--r--include/linux/mfd/wl1273-core.h15
-rw-r--r--include/linux/mfd/wm831x/auxadc.h7
-rw-r--r--include/linux/mfd/wm831x/core.h8
-rw-r--r--include/linux/mfd/wm831x/gpio.h7
-rw-r--r--include/linux/mfd/wm831x/irq.h7
-rw-r--r--include/linux/mfd/wm831x/otp.h7
-rw-r--r--include/linux/mfd/wm831x/pdata.h9
-rw-r--r--include/linux/mfd/wm831x/pmu.h7
-rw-r--r--include/linux/mfd/wm831x/regulator.h9
-rw-r--r--include/linux/mfd/wm831x/status.h7
-rw-r--r--include/linux/mfd/wm831x/watchdog.h7
-rw-r--r--include/linux/mfd/wm8350/audio.h10
-rw-r--r--include/linux/mfd/wm8350/comparator.h6
-rw-r--r--include/linux/mfd/wm8350/core.h8
-rw-r--r--include/linux/mfd/wm8350/gpio.h7
-rw-r--r--include/linux/mfd/wm8350/pmic.h7
-rw-r--r--include/linux/mfd/wm8350/rtc.h6
-rw-r--r--include/linux/mfd/wm8350/supply.h7
-rw-r--r--include/linux/mfd/wm8350/wdt.h6
-rw-r--r--include/linux/mfd/wm8400-audio.h15
-rw-r--r--include/linux/mfd/wm8400-private.h23
-rw-r--r--include/linux/mfd/wm8400.h15
-rw-r--r--include/linux/mfd/wm8994/core.h7
-rw-r--r--include/linux/mfd/wm8994/gpio.h7
-rw-r--r--include/linux/mfd/wm8994/pdata.h18
-rw-r--r--include/linux/mfd/wm8994/registers.h7
-rw-r--r--include/linux/mfd/wm97xx.h21
-rw-r--r--include/linux/mhi.h810
-rw-r--r--include/linux/mhi_ep.h277
-rw-r--r--include/linux/mic_bus.h111
-rw-r--r--include/linux/micrel_phy.h31
-rw-r--r--include/linux/microchipphy.h25
-rw-r--r--include/linux/migrate.h257
-rw-r--r--include/linux/migrate_mode.h19
-rw-r--r--include/linux/mii.h258
-rw-r--r--include/linux/mii_timestamper.h121
-rw-r--r--include/linux/min_heap.h134
-rw-r--r--include/linux/minmax.h169
-rw-r--r--include/linux/misc_cgroup.h136
-rw-r--r--include/linux/miscdevice.h30
-rw-r--r--include/linux/mlx4/cq.h8
-rw-r--r--include/linux/mlx4/device.h92
-rw-r--r--include/linux/mlx4/driver.h22
-rw-r--r--include/linux/mlx4/qp.h4
-rw-r--r--include/linux/mlx5/cmd.h51
-rw-r--r--include/linux/mlx5/cq.h51
-rw-r--r--include/linux/mlx5/device.h551
-rw-r--r--include/linux/mlx5/doorbell.h39
-rw-r--r--include/linux/mlx5/driver.h1103
-rw-r--r--include/linux/mlx5/eq.h63
-rw-r--r--include/linux/mlx5/eswitch.h210
-rw-r--r--include/linux/mlx5/fs.h211
-rw-r--r--include/linux/mlx5/fs_helpers.h94
-rw-r--r--include/linux/mlx5/mlx5_ifc.h4730
-rw-r--r--include/linux/mlx5/mlx5_ifc_fpga.h71
-rw-r--r--include/linux/mlx5/mlx5_ifc_vdpa.h215
-rw-r--r--include/linux/mlx5/mpfs.h18
-rw-r--r--include/linux/mlx5/port.h97
-rw-r--r--include/linux/mlx5/qp.h167
-rw-r--r--include/linux/mlx5/rsc_dump.h51
-rw-r--r--include/linux/mlx5/srq.h66
-rw-r--r--include/linux/mlx5/transobj.h55
-rw-r--r--include/linux/mlx5/vport.h47
-rw-r--r--include/linux/mm-arch-hooks.h25
-rw-r--r--include/linux/mm.h2852
-rw-r--r--include/linux/mm_api.h1
-rw-r--r--include/linux/mm_inline.h606
-rw-r--r--include/linux/mm_types.h1282
-rw-r--r--include/linux/mm_types_task.h26
-rw-r--r--include/linux/mman.h114
-rw-r--r--include/linux/mmap_lock.h183
-rw-r--r--include/linux/mmc/card.h56
-rw-r--r--include/linux/mmc/core.h39
-rw-r--r--include/linux/mmc/host.h236
-rw-r--r--include/linux/mmc/mmc.h21
-rw-r--r--include/linux/mmc/pm.h5
-rw-r--r--include/linux/mmc/sd.h16
-rw-r--r--include/linux/mmc/sdhci-pci-data.h20
-rw-r--r--include/linux/mmc/sdio.h13
-rw-r--r--include/linux/mmc/sdio_func.h28
-rw-r--r--include/linux/mmc/sdio_ids.h114
-rw-r--r--include/linux/mmc/slot-gpio.h18
-rw-r--r--include/linux/mmdebug.h61
-rw-r--r--include/linux/mmiotrace.h1
-rw-r--r--include/linux/mmu_context.h39
-rw-r--r--include/linux/mmu_notifier.h441
-rw-r--r--include/linux/mmzone.h1477
-rw-r--r--include/linux/mnt_idmapping.h247
-rw-r--r--include/linux/mnt_namespace.h3
-rw-r--r--include/linux/mod_devicetable.h273
-rw-r--r--include/linux/module.h577
-rw-r--r--include/linux/module_signature.h46
-rw-r--r--include/linux/module_symbol.h17
-rw-r--r--include/linux/moduleloader.h34
-rw-r--r--include/linux/moduleparam.h136
-rw-r--r--include/linux/most.h337
-rw-r--r--include/linux/mount.h59
-rw-r--r--include/linux/moxtet.h109
-rw-r--r--include/linux/mpage.h9
-rw-r--r--include/linux/mpi.h259
-rw-r--r--include/linux/mpls.h1
-rw-r--r--include/linux/mpls_iptunnel.h1
-rw-r--r--include/linux/mroute.h99
-rw-r--r--include/linux/mroute6.h83
-rw-r--r--include/linux/mroute_base.h477
-rw-r--r--include/linux/msdos_fs.h1
-rw-r--r--include/linux/msdos_partition.h50
-rw-r--r--include/linux/msg.h26
-rw-r--r--include/linux/msi.h687
-rw-r--r--include/linux/msi_api.h73
-rw-r--r--include/linux/mtd/bbm.h34
-rw-r--r--include/linux/mtd/blktrans.h32
-rw-r--r--include/linux/mtd/cfi.h31
-rw-r--r--include/linux/mtd/cfi_endian.h16
-rw-r--r--include/linux/mtd/concat.h16
-rw-r--r--include/linux/mtd/doc2000.h16
-rw-r--r--include/linux/mtd/flashchip.h19
-rw-r--r--include/linux/mtd/gen_probe.h16
-rw-r--r--include/linux/mtd/hyperbus.h95
-rw-r--r--include/linux/mtd/inftl.h1
-rw-r--r--include/linux/mtd/jedec.h91
-rw-r--r--include/linux/mtd/latch-addr-flash.h29
-rw-r--r--include/linux/mtd/lpc32xx_mlc.h5
-rw-r--r--include/linux/mtd/lpc32xx_slc.h5
-rw-r--r--include/linux/mtd/map.h146
-rw-r--r--include/linux/mtd/mtd.h254
-rw-r--r--include/linux/mtd/mtdram.h1
-rw-r--r--include/linux/mtd/nand-ecc-mtk.h47
-rw-r--r--include/linux/mtd/nand-ecc-mxic.h49
-rw-r--r--include/linux/mtd/nand-ecc-sw-bch.h71
-rw-r--r--include/linux/mtd/nand-ecc-sw-hamming.h89
-rw-r--r--include/linux/mtd/nand-gpio.h6
-rw-r--r--include/linux/mtd/nand.h1065
-rw-r--r--include/linux/mtd/nand_bch.h68
-rw-r--r--include/linux/mtd/nand_ecc.h42
-rw-r--r--include/linux/mtd/ndfc.h8
-rw-r--r--include/linux/mtd/nftl.h16
-rw-r--r--include/linux/mtd/onenand.h8
-rw-r--r--include/linux/mtd/onenand_regs.h6
-rw-r--r--include/linux/mtd/onfi.h189
-rw-r--r--include/linux/mtd/partitions.h4
-rw-r--r--include/linux/mtd/pfow.h36
-rw-r--r--include/linux/mtd/physmap.h7
-rw-r--r--include/linux/mtd/pismo.h5
-rw-r--r--include/linux/mtd/plat-ram.h6
-rw-r--r--include/linux/mtd/platnand.h74
-rw-r--r--include/linux/mtd/qinfo.h3
-rw-r--r--include/linux/mtd/rawnand.h1856
-rw-r--r--include/linux/mtd/sh_flctl.h16
-rw-r--r--include/linux/mtd/sharpsl.h11
-rw-r--r--include/linux/mtd/spi-nor.h374
-rw-r--r--include/linux/mtd/spinand.h521
-rw-r--r--include/linux/mtd/super.h12
-rw-r--r--include/linux/mtd/ubi.h16
-rw-r--r--include/linux/mtd/xip.h15
-rw-r--r--include/linux/mtio.h60
-rw-r--r--include/linux/mutex.h130
-rw-r--r--include/linux/mutex_api.h1
-rw-r--r--include/linux/mux/consumer.h48
-rw-r--r--include/linux/mux/driver.h9
-rw-r--r--include/linux/mv643xx.h60
-rw-r--r--include/linux/mv643xx_eth.h3
-rw-r--r--include/linux/mv643xx_i2c.h5
-rw-r--r--include/linux/mxm-wmi.h15
-rw-r--r--include/linux/n_r3964.h175
-rw-r--r--include/linux/namei.h100
-rw-r--r--include/linux/nd.h101
-rw-r--r--include/linux/ndctl.h22
-rw-r--r--include/linux/net.h113
-rw-r--r--include/linux/net/intel/i40e_client.h191
-rw-r--r--include/linux/net/intel/iidc.h107
-rw-r--r--include/linux/net_tstamp.h33
-rw-r--r--include/linux/netdev_features.h81
-rw-r--r--include/linux/netdevice.h2887
-rw-r--r--include/linux/netfilter.h325
-rw-r--r--include/linux/netfilter/ipset/ip_set.h170
-rw-r--r--include/linux/netfilter/ipset/ip_set_bitmap.h15
-rw-r--r--include/linux/netfilter/ipset/ip_set_comment.h76
-rw-r--r--include/linux/netfilter/ipset/ip_set_counter.h75
-rw-r--r--include/linux/netfilter/ipset/ip_set_getport.h10
-rw-r--r--include/linux/netfilter/ipset/ip_set_hash.h1
-rw-r--r--include/linux/netfilter/ipset/ip_set_list.h1
-rw-r--r--include/linux/netfilter/ipset/ip_set_skbinfo.h46
-rw-r--r--include/linux/netfilter/ipset/ip_set_timeout.h73
-rw-r--r--include/linux/netfilter/ipset/pfxlen.h1
-rw-r--r--include/linux/netfilter/nf_conntrack_amanda.h5
-rw-r--r--include/linux/netfilter/nf_conntrack_common.h27
-rw-r--r--include/linux/netfilter/nf_conntrack_dccp.h4
-rw-r--r--include/linux/netfilter/nf_conntrack_ftp.h9
-rw-r--r--include/linux/netfilter/nf_conntrack_h323.h121
-rw-r--r--include/linux/netfilter/nf_conntrack_h323_asn1.h9
-rw-r--r--include/linux/netfilter/nf_conntrack_h323_types.h8
-rw-r--r--include/linux/netfilter/nf_conntrack_irc.h6
-rw-r--r--include/linux/netfilter/nf_conntrack_pptp.h55
-rw-r--r--include/linux/netfilter/nf_conntrack_proto_gre.h9
-rw-r--r--include/linux/netfilter/nf_conntrack_sane.h5
-rw-r--r--include/linux/netfilter/nf_conntrack_sctp.h3
-rw-r--r--include/linux/netfilter/nf_conntrack_sip.h9
-rw-r--r--include/linux/netfilter/nf_conntrack_snmp.h4
-rw-r--r--include/linux/netfilter/nf_conntrack_tcp.h1
-rw-r--r--include/linux/netfilter/nf_conntrack_tftp.h6
-rw-r--r--include/linux/netfilter/nf_conntrack_zones_common.h1
-rw-r--r--include/linux/netfilter/nfnetlink.h90
-rw-r--r--include/linux/netfilter/nfnetlink_acct.h4
-rw-r--r--include/linux/netfilter/nfnetlink_osf.h38
-rw-r--r--include/linux/netfilter/x_tables.h55
-rw-r--r--include/linux/netfilter/xt_hashlimit.h9
-rw-r--r--include/linux/netfilter/xt_physdev.h7
-rw-r--r--include/linux/netfilter_arp/arp_tables.h16
-rw-r--r--include/linux/netfilter_bridge.h48
-rw-r--r--include/linux/netfilter_bridge/ebt_802_3.h11
-rw-r--r--include/linux/netfilter_bridge/ebtables.h30
-rw-r--r--include/linux/netfilter_defs.h7
-rw-r--r--include/linux/netfilter_ingress.h57
-rw-r--r--include/linux/netfilter_ipv4.h31
-rw-r--r--include/linux/netfilter_ipv4/ip_tables.h23
-rw-r--r--include/linux/netfilter_ipv6.h181
-rw-r--r--include/linux/netfilter_ipv6/ip6_tables.h34
-rw-r--r--include/linux/netfilter_netdev.h150
-rw-r--r--include/linux/netfs.h365
-rw-r--r--include/linux/netlink.h195
-rw-r--r--include/linux/netpoll.h24
-rw-r--r--include/linux/nfs.h29
-rw-r--r--include/linux/nfs3.h1
-rw-r--r--include/linux/nfs4.h101
-rw-r--r--include/linux/nfs_fs.h338
-rw-r--r--include/linux/nfs_fs_i.h1
-rw-r--r--include/linux/nfs_fs_sb.h77
-rw-r--r--include/linux/nfs_iostat.h13
-rw-r--r--include/linux/nfs_page.h123
-rw-r--r--include/linux/nfs_ssc.h81
-rw-r--r--include/linux/nfs_xdr.h333
-rw-r--r--include/linux/nfsacl.h7
-rw-r--r--include/linux/nitro_enclaves.h11
-rw-r--r--include/linux/nl802154.h13
-rw-r--r--include/linux/nls.h1
-rw-r--r--include/linux/nmi.h161
-rw-r--r--include/linux/node.h129
-rw-r--r--include/linux/nodemask.h113
-rw-r--r--include/linux/nospec.h74
-rw-r--r--include/linux/notifier.h59
-rw-r--r--include/linux/ns_common.h4
-rw-r--r--include/linux/nsc_gpio.h1
-rw-r--r--include/linux/nsproxy.h28
-rw-r--r--include/linux/ntb.h274
-rw-r--r--include/linux/nubus.h190
-rw-r--r--include/linux/numa.h52
-rw-r--r--include/linux/nvme-auth.h41
-rw-r--r--include/linux/nvme-fc-driver.h455
-rw-r--r--include/linux/nvme-fc.h216
-rw-r--r--include/linux/nvme-rdma.h14
-rw-r--r--include/linux/nvme-tcp.h191
-rw-r--r--include/linux/nvme.h999
-rw-r--r--include/linux/nvmem-consumer.h164
-rw-r--r--include/linux/nvmem-provider.h206
-rw-r--r--include/linux/nvram.h134
-rw-r--r--include/linux/objagg.h63
-rw-r--r--include/linux/objtool.h173
-rw-r--r--include/linux/objtool_types.h57
-rw-r--r--include/linux/of.h821
-rw-r--r--include/linux/of_address.h131
-rw-r--r--include/linux/of_clk.h33
-rw-r--r--include/linux/of_device.h74
-rw-r--r--include/linux/of_dma.h5
-rw-r--r--include/linux/of_fdt.h43
-rw-r--r--include/linux/of_gpio.h141
-rw-r--r--include/linux/of_graph.h11
-rw-r--r--include/linux/of_iommu.h37
-rw-r--r--include/linux/of_irq.h35
-rw-r--r--include/linux/of_mdio.h83
-rw-r--r--include/linux/of_net.h30
-rw-r--r--include/linux/of_pci.h65
-rw-r--r--include/linux/of_pdt.h8
-rw-r--r--include/linux/of_platform.h46
-rw-r--r--include/linux/of_reserved_mem.h39
-rw-r--r--include/linux/oid_registry.h60
-rw-r--r--include/linux/olpc-ec.h38
-rw-r--r--include/linux/omap-dma.h56
-rw-r--r--include/linux/omap-dmaengine.h21
-rw-r--r--include/linux/omap-gpmc.h43
-rw-r--r--include/linux/omap-iommu.h25
-rw-r--r--include/linux/omap-mailbox.h9
-rw-r--r--include/linux/omapfb.h15
-rw-r--r--include/linux/once.h45
-rw-r--r--include/linux/once_lite.h36
-rw-r--r--include/linux/oom.h43
-rw-r--r--include/linux/openvswitch.h20
-rw-r--r--include/linux/oprofile.h209
-rw-r--r--include/linux/osq_lock.h1
-rw-r--r--include/linux/overflow.h296
-rw-r--r--include/linux/oxu210hp.h7
-rw-r--r--include/linux/packing.h49
-rw-r--r--include/linux/padata.h158
-rw-r--r--include/linux/page-flags-layout.h59
-rw-r--r--include/linux/page-flags.h736
-rw-r--r--include/linux/page-isolation.h23
-rw-r--r--include/linux/page_counter.h50
-rw-r--r--include/linux/page_ext.h70
-rw-r--r--include/linux/page_idle.h127
-rw-r--r--include/linux/page_owner.h33
-rw-r--r--include/linux/page_ref.h215
-rw-r--r--include/linux/page_reporting.h29
-rw-r--r--include/linux/page_table_check.h166
-rw-r--r--include/linux/pageblock-flags.h67
-rw-r--r--include/linux/pagemap.h1331
-rw-r--r--include/linux/pagevec.h100
-rw-r--r--include/linux/pagewalk.h122
-rw-r--r--include/linux/panic.h94
-rw-r--r--include/linux/panic_notifier.h12
-rw-r--r--include/linux/parport.h66
-rw-r--r--include/linux/parport_pc.h4
-rw-r--r--include/linux/parser.h7
-rw-r--r--include/linux/part_stat.h82
-rw-r--r--include/linux/patchkey.h1
-rw-r--r--include/linux/path.h9
-rw-r--r--include/linux/pch_dma.h14
-rw-r--r--include/linux/pci-acpi.h37
-rw-r--r--include/linux/pci-aspm.h65
-rw-r--r--include/linux/pci-ats.h81
-rw-r--r--include/linux/pci-dma-compat.h147
-rw-r--r--include/linux/pci-dma.h11
-rw-r--r--include/linux/pci-doe.h25
-rw-r--r--include/linux/pci-ecam.h69
-rw-r--r--include/linux/pci-ep-cfs.h11
-rw-r--r--include/linux/pci-epc.h184
-rw-r--r--include/linux/pci-epf.h105
-rw-r--r--include/linux/pci-p2pdma.h107
-rw-r--r--include/linux/pci.h1487
-rw-r--r--include/linux/pci_hotplug.h142
-rw-r--r--include/linux/pci_ids.h222
-rw-r--r--include/linux/pcieport_if.h70
-rw-r--r--include/linux/pcs-altera-tse.h17
-rw-r--r--include/linux/pcs-lynx.h18
-rw-r--r--include/linux/pcs-rzn1-miic.h18
-rw-r--r--include/linux/pcs/pcs-mtk-lynxi.h13
-rw-r--r--include/linux/pcs/pcs-xpcs.h42
-rw-r--r--include/linux/pda_power.h42
-rw-r--r--include/linux/pds/pds_adminq.h647
-rw-r--r--include/linux/pds/pds_auxbus.h20
-rw-r--r--include/linux/pds/pds_common.h68
-rw-r--r--include/linux/pds/pds_core_if.h571
-rw-r--r--include/linux/pds/pds_intr.h163
-rw-r--r--include/linux/pe.h52
-rw-r--r--include/linux/peci-cpu.h40
-rw-r--r--include/linux/peci.h112
-rw-r--r--include/linux/percpu-defs.h27
-rw-r--r--include/linux/percpu-refcount.h161
-rw-r--r--include/linux/percpu-rwsem.h124
-rw-r--r--include/linux/percpu.h48
-rw-r--r--include/linux/percpu_counter.h54
-rw-r--r--include/linux/percpu_ida.h82
-rw-r--r--include/linux/perf/arm_pmu.h59
-rw-r--r--include/linux/perf/arm_pmuv3.h303
-rw-r--r--include/linux/perf/riscv_pmu.h84
-rw-r--r--include/linux/perf_event.h899
-rw-r--r--include/linux/perf_event_api.h1
-rw-r--r--include/linux/perf_regs.h15
-rw-r--r--include/linux/personality.h1
-rw-r--r--include/linux/pfn.h1
-rw-r--r--include/linux/pfn_t.h29
-rw-r--r--include/linux/pgtable.h1712
-rw-r--r--include/linux/pgtable_api.h1
-rw-r--r--include/linux/phonet.h15
-rw-r--r--include/linux/phy.h1704
-rw-r--r--include/linux/phy/omap_control_phy.h12
-rw-r--r--include/linux/phy/omap_usb.h81
-rw-r--r--include/linux/phy/pcie.h12
-rw-r--r--include/linux/phy/phy-dp.h95
-rw-r--r--include/linux/phy/phy-lvds.h32
-rw-r--r--include/linux/phy/phy-mipi-dphy.h287
-rw-r--r--include/linux/phy/phy-qcom-ufs.h41
-rw-r--r--include/linux/phy/phy-sun4i-usb.h10
-rw-r--r--include/linux/phy/phy.h199
-rw-r--r--include/linux/phy/tegra/xusb.h26
-rw-r--r--include/linux/phy/ulpi_phy.h1
-rw-r--r--include/linux/phy_fixed.h38
-rw-r--r--include/linux/phy_led_triggers.h15
-rw-r--r--include/linux/phylink.h663
-rw-r--r--include/linux/pid.h44
-rw-r--r--include/linux/pid_namespace.h63
-rw-r--r--include/linux/pim.h1
-rw-r--r--include/linux/pinctrl/consumer.h54
-rw-r--r--include/linux/pinctrl/devinfo.h22
-rw-r--r--include/linux/pinctrl/machine.h20
-rw-r--r--include/linux/pinctrl/pinconf-generic.h81
-rw-r--r--include/linux/pinctrl/pinconf.h16
-rw-r--r--include/linux/pinctrl/pinctrl-state.h6
-rw-r--r--include/linux/pinctrl/pinctrl.h85
-rw-r--r--include/linux/pinctrl/pinmux.h12
-rw-r--r--include/linux/pipe_fs_i.h176
-rw-r--r--include/linux/pkeys.h14
-rw-r--r--include/linux/pktcdvd.h14
-rw-r--r--include/linux/pl320-ipc.h12
-rw-r--r--include/linux/platform_data/ad5449.h3
-rw-r--r--include/linux/platform_data/ad5755.h103
-rw-r--r--include/linux/platform_data/ad5761.h5
-rw-r--r--include/linux/platform_data/ad7266.h6
-rw-r--r--include/linux/platform_data/ad7291.h12
-rw-r--r--include/linux/platform_data/ad7298.h20
-rw-r--r--include/linux/platform_data/ad7303.h21
-rw-r--r--include/linux/platform_data/ad7791.h1
-rw-r--r--include/linux/platform_data/ad7793.h5
-rw-r--r--include/linux/platform_data/ad7879.h41
-rw-r--r--include/linux/platform_data/ad7887.h7
-rw-r--r--include/linux/platform_data/adau17x1.h3
-rw-r--r--include/linux/platform_data/adau1977.h45
-rw-r--r--include/linux/platform_data/adp5588.h172
-rw-r--r--include/linux/platform_data/adp8860.h3
-rw-r--r--include/linux/platform_data/adp8870.h3
-rw-r--r--include/linux/platform_data/ads1015.h36
-rw-r--r--include/linux/platform_data/ads7828.h7
-rw-r--r--include/linux/platform_data/amd_xdma.h34
-rw-r--r--include/linux/platform_data/ams-delta-fiq.h58
-rw-r--r--include/linux/platform_data/apds990x.h16
-rw-r--r--include/linux/platform_data/arm-ux500-pm.h3
-rw-r--r--include/linux/platform_data/asoc-imx-ssi.h1
-rw-r--r--include/linux/platform_data/asoc-kirkwood.h1
-rw-r--r--include/linux/platform_data/asoc-mx27vis.h11
-rw-r--r--include/linux/platform_data/asoc-palm27x.h8
-rw-r--r--include/linux/platform_data/asoc-pxa.h31
-rw-r--r--include/linux/platform_data/asoc-s3c.h5
-rw-r--r--include/linux/platform_data/asoc-s3c24xx_simtec.h33
-rw-r--r--include/linux/platform_data/asoc-ti-mcbsp.h28
-rw-r--r--include/linux/platform_data/asoc-ux500-msp.h20
-rw-r--r--include/linux/platform_data/at24.h58
-rw-r--r--include/linux/platform_data/at91_adc.h50
-rw-r--r--include/linux/platform_data/ata-pxa.h15
-rw-r--r--include/linux/platform_data/ata-samsung_cf.h34
-rw-r--r--include/linux/platform_data/atmel.h15
-rw-r--r--include/linux/platform_data/atmel_mxt_ts.h31
-rw-r--r--include/linux/platform_data/b53.h6
-rw-r--r--include/linux/platform_data/bcm7038_wdt.h8
-rw-r--r--include/linux/platform_data/bcmgenet.h1
-rw-r--r--include/linux/platform_data/bd6107.h6
-rw-r--r--include/linux/platform_data/bfin_rotary.h117
-rw-r--r--include/linux/platform_data/bh1770glc.h16
-rw-r--r--include/linux/platform_data/brcmfmac.h4
-rw-r--r--include/linux/platform_data/brcmnand.h12
-rw-r--r--include/linux/platform_data/bt-nokia-h4p.h38
-rw-r--r--include/linux/platform_data/clk-da8xx-cfgchip.h21
-rw-r--r--include/linux/platform_data/clk-davinci-pll.h21
-rw-r--r--include/linux/platform_data/clk-fch.h18
-rw-r--r--include/linux/platform_data/clk-integrator.h2
-rw-r--r--include/linux/platform_data/clk-u300.h1
-rw-r--r--include/linux/platform_data/cpuidle-exynos.h5
-rw-r--r--include/linux/platform_data/cros_ec_chardev.h38
-rw-r--r--include/linux/platform_data/cros_ec_commands.h6397
-rw-r--r--include/linux/platform_data/cros_ec_proto.h277
-rw-r--r--include/linux/platform_data/cros_ec_sensorhub.h194
-rw-r--r--include/linux/platform_data/cros_usbpd_notify.h17
-rw-r--r--include/linux/platform_data/crypto-atmel.h22
-rw-r--r--include/linux/platform_data/crypto-ux500.h2
-rw-r--r--include/linux/platform_data/cyttsp4.h16
-rw-r--r--include/linux/platform_data/davinci-cpufreq.h19
-rw-r--r--include/linux/platform_data/davinci_asp.h14
-rw-r--r--include/linux/platform_data/db8500_thermal.h38
-rw-r--r--include/linux/platform_data/dma-atmel.h65
-rw-r--r--include/linux/platform_data/dma-coh901318.h72
-rw-r--r--include/linux/platform_data/dma-dw.h49
-rw-r--r--include/linux/platform_data/dma-ep93xx.h3
-rw-r--r--include/linux/platform_data/dma-hsu.h7
-rw-r--r--include/linux/platform_data/dma-imx-sdma.h67
-rw-r--r--include/linux/platform_data/dma-iop32x.h110
-rw-r--r--include/linux/platform_data/dma-mcf-edma.h38
-rw-r--r--include/linux/platform_data/dma-mmp_tdma.h40
-rw-r--r--include/linux/platform_data/dma-mv_xor.h1
-rw-r--r--include/linux/platform_data/dma-s3c24xx.h52
-rw-r--r--include/linux/platform_data/dma-ste-dma40.h2
-rw-r--r--include/linux/platform_data/dmtimer-omap.h51
-rw-r--r--include/linux/platform_data/ds620.h3
-rw-r--r--include/linux/platform_data/dsa.h68
-rw-r--r--include/linux/platform_data/dwc3-omap.h43
-rw-r--r--include/linux/platform_data/edma.h6
-rw-r--r--include/linux/platform_data/efm32-spi.h14
-rw-r--r--include/linux/platform_data/efm32-uart.h18
-rw-r--r--include/linux/platform_data/ehci-sh.h28
-rw-r--r--include/linux/platform_data/elm.h16
-rw-r--r--include/linux/platform_data/emc2305.h22
-rw-r--r--include/linux/platform_data/emif_plat.h5
-rw-r--r--include/linux/platform_data/eth-ep93xx.h10
-rw-r--r--include/linux/platform_data/eth-netx.h25
-rw-r--r--include/linux/platform_data/fsa9480.h27
-rw-r--r--include/linux/platform_data/g762.h15
-rw-r--r--include/linux/platform_data/gpio-ath79.h5
-rw-r--r--include/linux/platform_data/gpio-davinci.h47
-rw-r--r--include/linux/platform_data/gpio-dwapb.h31
-rw-r--r--include/linux/platform_data/gpio-htc-egpio.h6
-rw-r--r--include/linux/platform_data/gpio-omap.h42
-rw-r--r--include/linux/platform_data/gpio-ts5500.h27
-rw-r--r--include/linux/platform_data/gpio/gpio-amd-fch.h46
-rw-r--r--include/linux/platform_data/gpio_backlight.h9
-rw-r--r--include/linux/platform_data/gpmc-omap.h15
-rw-r--r--include/linux/platform_data/gsc_hwmon.h45
-rw-r--r--include/linux/platform_data/hirschmann-hellcreek.h24
-rw-r--r--include/linux/platform_data/hsmmc-omap.h22
-rw-r--r--include/linux/platform_data/hwmon-s3c.h5
-rw-r--r--include/linux/platform_data/i2c-cbus-gpio.h27
-rw-r--r--include/linux/platform_data/i2c-davinci.h5
-rw-r--r--include/linux/platform_data/i2c-designware.h21
-rw-r--r--include/linux/platform_data/i2c-gpio.h (renamed from include/linux/i2c-gpio.h)18
-rw-r--r--include/linux/platform_data/i2c-hid.h42
-rw-r--r--include/linux/platform_data/i2c-imx.h3
-rw-r--r--include/linux/platform_data/i2c-mux-gpio.h (renamed from include/linux/i2c-mux-gpio.h)12
-rw-r--r--include/linux/platform_data/i2c-mux-reg.h6
-rw-r--r--include/linux/platform_data/i2c-nuc900.h9
-rw-r--r--include/linux/platform_data/i2c-ocores.h (renamed from include/linux/i2c-ocores.h)8
-rw-r--r--include/linux/platform_data/i2c-omap.h (renamed from include/linux/i2c-omap.h)1
-rw-r--r--include/linux/platform_data/i2c-pca-platform.h (renamed from include/linux/i2c-pca-platform.h)4
-rw-r--r--include/linux/platform_data/i2c-pxa.h18
-rw-r--r--include/linux/platform_data/i2c-s3c2410.h5
-rw-r--r--include/linux/platform_data/i2c-xiic.h (renamed from include/linux/i2c-xiic.h)14
-rw-r--r--include/linux/platform_data/ina2xx.h9
-rw-r--r--include/linux/platform_data/intel-mid_wdt.h5
-rw-r--r--include/linux/platform_data/invensense_mpu6050.h12
-rw-r--r--include/linux/platform_data/iommu-omap.h9
-rw-r--r--include/linux/platform_data/irda-pxaficp.h25
-rw-r--r--include/linux/platform_data/irda-sa11x0.h20
-rw-r--r--include/linux/platform_data/isl9305.h6
-rw-r--r--include/linux/platform_data/itco_wdt.h12
-rw-r--r--include/linux/platform_data/keyboard-pxa930_rotary.h20
-rw-r--r--include/linux/platform_data/keypad-ep93xx.h5
-rw-r--r--include/linux/platform_data/keypad-nomadik-ske.h2
-rw-r--r--include/linux/platform_data/keypad-omap.h5
-rw-r--r--include/linux/platform_data/keypad-pxa27x.h1
-rw-r--r--include/linux/platform_data/keypad-w90p910.h15
-rw-r--r--include/linux/platform_data/keyscan-davinci.h15
-rw-r--r--include/linux/platform_data/lcd-mipid.h1
-rw-r--r--include/linux/platform_data/leds-kirkwood-netxbig.h54
-rw-r--r--include/linux/platform_data/leds-kirkwood-ns2.h38
-rw-r--r--include/linux/platform_data/leds-lm355x.h3
-rw-r--r--include/linux/platform_data/leds-lm3642.h3
-rw-r--r--include/linux/platform_data/leds-lp55xx.h18
-rw-r--r--include/linux/platform_data/leds-omap.h22
-rw-r--r--include/linux/platform_data/leds-pca963x.h48
-rw-r--r--include/linux/platform_data/leds-s3c24xx.h27
-rw-r--r--include/linux/platform_data/lm3630a_bl.h10
-rw-r--r--include/linux/platform_data/lm3639_bl.h6
-rw-r--r--include/linux/platform_data/lm8323.h14
-rw-r--r--include/linux/platform_data/lp855x.h6
-rw-r--r--include/linux/platform_data/lp8727.h5
-rw-r--r--include/linux/platform_data/lp8755.h6
-rw-r--r--include/linux/platform_data/ltc4245.h6
-rw-r--r--include/linux/platform_data/lv5207lp.h5
-rw-r--r--include/linux/platform_data/macb.h32
-rw-r--r--include/linux/platform_data/max197.h7
-rw-r--r--include/linux/platform_data/max3421-hcd.h1
-rw-r--r--include/linux/platform_data/max6639.h1
-rw-r--r--include/linux/platform_data/max6697.h5
-rw-r--r--include/linux/platform_data/max732x.h13
-rw-r--r--include/linux/platform_data/mcs.h7
-rw-r--r--include/linux/platform_data/mdio-bcm-unimac.h13
-rw-r--r--include/linux/platform_data/mdio-gpio.h29
-rw-r--r--include/linux/platform_data/media/camera-mx2.h44
-rw-r--r--include/linux/platform_data/media/camera-mx3.h52
-rw-r--r--include/linux/platform_data/media/camera-pxa.h14
-rw-r--r--include/linux/platform_data/media/coda.h18
-rw-r--r--include/linux/platform_data/media/gpio-ir-recv.h23
-rw-r--r--include/linux/platform_data/media/ir-rx51.h8
-rw-r--r--include/linux/platform_data/media/mmp-camera.h22
-rw-r--r--include/linux/platform_data/media/omap1_camera.h35
-rw-r--r--include/linux/platform_data/media/omap4iss.h1
-rw-r--r--include/linux/platform_data/media/s5p_hdmi.h36
-rw-r--r--include/linux/platform_data/media/si4713.h4
-rw-r--r--include/linux/platform_data/media/sii9234.h24
-rw-r--r--include/linux/platform_data/media/soc_camera_platform.h83
-rw-r--r--include/linux/platform_data/media/timb_radio.h14
-rw-r--r--include/linux/platform_data/media/timb_video.h14
-rw-r--r--include/linux/platform_data/mfd-mcp-sa11x0.h5
-rw-r--r--include/linux/platform_data/microchip-ksz.h2
-rw-r--r--include/linux/platform_data/mlxcpld-hotplug.h99
-rw-r--r--include/linux/platform_data/mlxcpld.h31
-rw-r--r--include/linux/platform_data/mlxreg.h239
-rw-r--r--include/linux/platform_data/mmc-davinci.h1
-rw-r--r--include/linux/platform_data/mmc-esdhc-imx.h51
-rw-r--r--include/linux/platform_data/mmc-esdhc-mcf.h17
-rw-r--r--include/linux/platform_data/mmc-mxcmmc.h1
-rw-r--r--include/linux/platform_data/mmc-omap.h11
-rw-r--r--include/linux/platform_data/mmc-pxamci.h5
-rw-r--r--include/linux/platform_data/mmc-s3cmci.h52
-rw-r--r--include/linux/platform_data/mmc-sdhci-s3c.h1
-rw-r--r--include/linux/platform_data/mmp_audio.h22
-rw-r--r--include/linux/platform_data/mmp_dma.h10
-rw-r--r--include/linux/platform_data/mms114.h24
-rw-r--r--include/linux/platform_data/mouse-pxa930_trkball.h10
-rw-r--r--include/linux/platform_data/mtd-davinci-aemif.h3
-rw-r--r--include/linux/platform_data/mtd-davinci.h34
-rw-r--r--include/linux/platform_data/mtd-mxc_nand.h32
-rw-r--r--include/linux/platform_data/mtd-nand-omap2.h30
-rw-r--r--include/linux/platform_data/mtd-nand-pxa3xx.h44
-rw-r--r--include/linux/platform_data/mtd-nand-s3c2410.h7
-rw-r--r--include/linux/platform_data/mtd-onenand-omap2.h34
-rw-r--r--include/linux/platform_data/mtd-orion_nand.h1
-rw-r--r--include/linux/platform_data/mv88e6xxx.h19
-rw-r--r--include/linux/platform_data/mv_usb.h15
-rw-r--r--include/linux/platform_data/net-cw1200.h2
-rw-r--r--include/linux/platform_data/nfcmrvl.h48
-rw-r--r--include/linux/platform_data/ntc_thermistor.h62
-rw-r--r--include/linux/platform_data/nxp-nci.h27
-rw-r--r--include/linux/platform_data/omap-twl4030.h17
-rw-r--r--include/linux/platform_data/omap-wd-timer.h6
-rw-r--r--include/linux/platform_data/omap1_bl.h1
-rw-r--r--include/linux/platform_data/omap_drm.h53
-rw-r--r--include/linux/platform_data/omapdss.h6
-rw-r--r--include/linux/platform_data/pca953x.h3
-rw-r--r--include/linux/platform_data/pcf857x.h44
-rw-r--r--include/linux/platform_data/pcmcia-pxa2xx_viper.h11
-rw-r--r--include/linux/platform_data/phy-da8xx-usb.h21
-rw-r--r--include/linux/platform_data/pinctrl-adi2.h40
-rw-r--r--include/linux/platform_data/pinctrl-single.h7
-rw-r--r--include/linux/platform_data/pixcir_i2c_ts.h63
-rw-r--r--include/linux/platform_data/pm33xx.h75
-rw-r--r--include/linux/platform_data/pwm_omap_dmtimer.h90
-rw-r--r--include/linux/platform_data/pxa2xx_udc.h1
-rw-r--r--include/linux/platform_data/pxa_sdhci.h9
-rw-r--r--include/linux/platform_data/regulator-haptic.h5
-rw-r--r--include/linux/platform_data/remoteproc-omap.h59
-rw-r--r--include/linux/platform_data/rtc-v3020.h41
-rw-r--r--include/linux/platform_data/s3c-hsotg.h5
-rw-r--r--include/linux/platform_data/s3c-hsudc.h34
-rw-r--r--include/linux/platform_data/sa11x0-serial.h1
-rw-r--r--include/linux/platform_data/sc18is602.h7
-rw-r--r--include/linux/platform_data/sdhci-pic32.h10
-rw-r--r--include/linux/platform_data/serial-imx.h28
-rw-r--r--include/linux/platform_data/serial-omap.h6
-rw-r--r--include/linux/platform_data/serial-sccnxp.h6
-rw-r--r--include/linux/platform_data/sgi-w1.h13
-rw-r--r--include/linux/platform_data/sh_ipmmu.h18
-rw-r--r--include/linux/platform_data/sh_mmcif.h (renamed from include/linux/mmc/sh_mmcif.h)8
-rw-r--r--include/linux/platform_data/shmob_drm.h12
-rw-r--r--include/linux/platform_data/sht15.h38
-rw-r--r--include/linux/platform_data/sht3x.h12
-rw-r--r--include/linux/platform_data/shtc1.h11
-rw-r--r--include/linux/platform_data/si5351.h3
-rw-r--r--include/linux/platform_data/simplefb.h10
-rw-r--r--include/linux/platform_data/sky81452-backlight.h46
-rw-r--r--include/linux/platform_data/spi-clps711x.h21
-rw-r--r--include/linux/platform_data/spi-davinci.h19
-rw-r--r--include/linux/platform_data/spi-ep93xx.h5
-rw-r--r--include/linux/platform_data/spi-imx.h27
-rw-r--r--include/linux/platform_data/spi-mt65xx.h9
-rw-r--r--include/linux/platform_data/spi-nuc900.h33
-rw-r--r--include/linux/platform_data/spi-omap2-mcspi.h10
-rw-r--r--include/linux/platform_data/spi-s3c64xx.h21
-rw-r--r--include/linux/platform_data/ssm2518.h22
-rw-r--r--include/linux/platform_data/st1232_pdata.h13
-rw-r--r--include/linux/platform_data/st33zp24.h28
-rw-r--r--include/linux/platform_data/st_sensors_pdata.h12
-rw-r--r--include/linux/platform_data/syscon.h1
-rw-r--r--include/linux/platform_data/tda9950.h16
-rw-r--r--include/linux/platform_data/ti-aemif.h30
-rw-r--r--include/linux/platform_data/ti-prm.h21
-rw-r--r--include/linux/platform_data/ti-sysc.h172
-rw-r--r--include/linux/platform_data/touchscreen-s3c2410.h5
-rw-r--r--include/linux/platform_data/tps68470.h40
-rw-r--r--include/linux/platform_data/tsc2007.h1
-rw-r--r--include/linux/platform_data/tsl2563.h8
-rw-r--r--include/linux/platform_data/tsl2772.h101
-rw-r--r--include/linux/platform_data/txx9/ndfmc.h28
-rw-r--r--include/linux/platform_data/uio_dmem_genirq.h10
-rw-r--r--include/linux/platform_data/uio_pruss.h12
-rw-r--r--include/linux/platform_data/usb-davinci.h14
-rw-r--r--include/linux/platform_data/usb-ehci-mxc.h13
-rw-r--r--include/linux/platform_data/usb-musb-ux500.h2
-rw-r--r--include/linux/platform_data/usb-mx2.h38
-rw-r--r--include/linux/platform_data/usb-ohci-pxa27x.h1
-rw-r--r--include/linux/platform_data/usb-ohci-s3c2410.h5
-rw-r--r--include/linux/platform_data/usb-omap.h18
-rw-r--r--include/linux/platform_data/usb-omap1.h4
-rw-r--r--include/linux/platform_data/usb-pxa3xx-ulpi.h35
-rw-r--r--include/linux/platform_data/usb-s3c2410_udc.h44
-rw-r--r--include/linux/platform_data/usb3503.h5
-rw-r--r--include/linux/platform_data/ux500_wdt.h19
-rw-r--r--include/linux/platform_data/video-clcd-versatile.h27
-rw-r--r--include/linux/platform_data/video-ep93xx.h1
-rw-r--r--include/linux/platform_data/video-imxfb.h69
-rw-r--r--include/linux/platform_data/video-mx3fb.h5
-rw-r--r--include/linux/platform_data/video-nuc900fb.h83
-rw-r--r--include/linux/platform_data/video-pxafb.h27
-rw-r--r--include/linux/platform_data/video_s3c.h1
-rw-r--r--include/linux/platform_data/voltage-omap.h6
-rw-r--r--include/linux/platform_data/wilco-ec.h225
-rw-r--r--include/linux/platform_data/wiznet.h3
-rw-r--r--include/linux/platform_data/wkup_m3.h10
-rw-r--r--include/linux/platform_data/x86/apple.h13
-rw-r--r--include/linux/platform_data/x86/asus-wmi.h133
-rw-r--r--include/linux/platform_data/x86/clk-lpss.h (renamed from include/linux/platform_data/clk-lpss.h)7
-rw-r--r--include/linux/platform_data/x86/clk-pmc-atom.h13
-rw-r--r--include/linux/platform_data/x86/nvidia-wmi-ec-backlight.h76
-rw-r--r--include/linux/platform_data/x86/p2sb.h28
-rw-r--r--include/linux/platform_data/x86/pmc_atom.h24
-rw-r--r--include/linux/platform_data/x86/pwm-lpss.h33
-rw-r--r--include/linux/platform_data/x86/simatic-ipc-base.h28
-rw-r--r--include/linux/platform_data/x86/simatic-ipc.h75
-rw-r--r--include/linux/platform_data/x86/soc.h70
-rw-r--r--include/linux/platform_data/x86/spi-intel.h (renamed from include/linux/platform_data/intel-spi.h)18
-rw-r--r--include/linux/platform_data/xilinx-ll-temac.h33
-rw-r--r--include/linux/platform_data/xtalk-bridge.h22
-rw-r--r--include/linux/platform_data/zforce_ts.h10
-rw-r--r--include/linux/platform_device.h132
-rw-r--r--include/linux/platform_profile.h41
-rw-r--r--include/linux/pldmfw.h165
-rw-r--r--include/linux/plist.h13
-rw-r--r--include/linux/pm-trace.h1
-rw-r--r--include/linux/pm.h199
-rw-r--r--include/linux/pm2301_charger.h61
-rw-r--r--include/linux/pm_clock.h8
-rw-r--r--include/linux/pm_domain.h301
-rw-r--r--include/linux/pm_opp.h449
-rw-r--r--include/linux/pm_qos.h235
-rw-r--r--include/linux/pm_runtime.h349
-rw-r--r--include/linux/pm_wakeirq.h23
-rw-r--r--include/linux/pm_wakeup.h102
-rw-r--r--include/linux/pmbus.h65
-rw-r--r--include/linux/pmu.h7
-rw-r--r--include/linux/pnfs_osd_xdr.h317
-rw-r--r--include/linux/pnp.h32
-rw-r--r--include/linux/poison.h28
-rw-r--r--include/linux/poll.h58
-rw-r--r--include/linux/polynomial.h35
-rw-r--r--include/linux/posix-clock.h38
-rw-r--r--include/linux/posix-timers.h195
-rw-r--r--include/linux/posix_acl.h71
-rw-r--r--include/linux/posix_acl_xattr.h41
-rw-r--r--include/linux/power/ab8500.h16
-rw-r--r--include/linux/power/bq2415x_charger.h21
-rw-r--r--include/linux/power/bq24190_charger.h15
-rw-r--r--include/linux/power/bq24735-charger.h15
-rw-r--r--include/linux/power/bq25890_charger.h15
-rw-r--r--include/linux/power/bq27xxx_battery.h38
-rw-r--r--include/linux/power/charger-manager.h48
-rw-r--r--include/linux/power/generic-adc-battery.h29
-rw-r--r--include/linux/power/gpio-charger.h17
-rw-r--r--include/linux/power/isp1704_charger.h30
-rw-r--r--include/linux/power/jz4740-battery.h11
-rw-r--r--include/linux/power/max17042_battery.h73
-rw-r--r--include/linux/power/max8903_charger.h57
-rw-r--r--include/linux/power/sbs-battery.h15
-rw-r--r--include/linux/power/smartreflex.h28
-rw-r--r--include/linux/power/smb347-charger.h117
-rw-r--r--include/linux/power/twl4030_madc_battery.h11
-rw-r--r--include/linux/power_supply.h600
-rw-r--r--include/linux/powercap.h25
-rw-r--r--include/linux/ppp-comp.h7
-rw-r--r--include/linux/ppp_channel.h11
-rw-r--r--include/linux/ppp_defs.h19
-rw-r--r--include/linux/pps-gpio.h32
-rw-r--r--include/linux/pps_kernel.h31
-rw-r--r--include/linux/pr.h1
-rw-r--r--include/linux/prandom.h56
-rw-r--r--include/linux/preempt.h189
-rw-r--r--include/linux/prefetch.h9
-rw-r--r--include/linux/prime_numbers.h1
-rw-r--r--include/linux/printk.h435
-rw-r--r--include/linux/prmt.h7
-rw-r--r--include/linux/proc_fs.h180
-rw-r--r--include/linux/proc_ns.h23
-rw-r--r--include/linux/processor.h10
-rw-r--r--include/linux/profile.h49
-rw-r--r--include/linux/projid.h1
-rw-r--r--include/linux/property.h462
-rw-r--r--include/linux/pruss_driver.h54
-rw-r--r--include/linux/psci.h29
-rw-r--r--include/linux/pse-pd/pse.h129
-rw-r--r--include/linux/pseudo_fs.h16
-rw-r--r--include/linux/psi.h69
-rw-r--r--include/linux/psi_types.h215
-rw-r--r--include/linux/psp-platform-access.h65
-rw-r--r--include/linux/psp-sev.h667
-rw-r--r--include/linux/psp-tee.h91
-rw-r--r--include/linux/psp.h29
-rw-r--r--include/linux/pstore.h62
-rw-r--r--include/linux/pstore_blk.h55
-rw-r--r--include/linux/pstore_ram.h70
-rw-r--r--include/linux/pstore_zone.h60
-rw-r--r--include/linux/ptdump.h23
-rw-r--r--include/linux/pti.h55
-rw-r--r--include/linux/ptp_classify.h194
-rw-r--r--include/linux/ptp_clock_kernel.h276
-rw-r--r--include/linux/ptp_kvm.h22
-rw-r--r--include/linux/ptp_pch.h26
-rw-r--r--include/linux/ptr_ring.h113
-rw-r--r--include/linux/ptrace.h158
-rw-r--r--include/linux/ptrace_api.h1
-rw-r--r--include/linux/purgatory.h3
-rw-r--r--include/linux/pvclock_gtod.h1
-rw-r--r--include/linux/pwm.h201
-rw-r--r--include/linux/pwm_backlight.h6
-rw-r--r--include/linux/pxa168_eth.h1
-rw-r--r--include/linux/pxa2xx_ssp.h235
-rw-r--r--include/linux/qcom_scm.h77
-rw-r--r--include/linux/qed/common_hsi.h1423
-rw-r--r--include/linux/qed/eth_common.h479
-rw-r--r--include/linux/qed/fcoe_common.h928
-rw-r--r--include/linux/qed/iscsi_common.h1635
-rw-r--r--include/linux/qed/iwarp_common.h47
-rw-r--r--include/linux/qed/nvmetcp_common.h531
-rw-r--r--include/linux/qed/qed_chain.h518
-rw-r--r--include/linux/qed/qed_eth_if.h101
-rw-r--r--include/linux/qed/qed_fcoe_if.h7
-rw-r--r--include/linux/qed/qed_if.h960
-rw-r--r--include/linux/qed/qed_iov_if.h30
-rw-r--r--include/linux/qed/qed_iscsi_if.h38
-rw-r--r--include/linux/qed/qed_ll2_if.h100
-rw-r--r--include/linux/qed/qed_nvmetcp_if.h257
-rw-r--r--include/linux/qed/qed_rdma_if.h90
-rw-r--r--include/linux/qed/qede_rdma.h45
-rw-r--r--include/linux/qed/rdma_common.h58
-rw-r--r--include/linux/qed/roce_common.h49
-rw-r--r--include/linux/qed/storage_common.h124
-rw-r--r--include/linux/qed/tcp_common.h195
-rw-r--r--include/linux/qnx6_fs.h1
-rw-r--r--include/linux/quicklist.h93
-rw-r--r--include/linux/quota.h59
-rw-r--r--include/linux/quotaops.h35
-rw-r--r--include/linux/radix-tree.h237
-rw-r--r--include/linux/raid/detect.h11
-rw-r--r--include/linux/raid/md_u.h20
-rw-r--r--include/linux/raid/pq.h38
-rw-r--r--include/linux/raid/xor.h22
-rw-r--r--include/linux/raid_class.h6
-rw-r--r--include/linux/ramfs.h10
-rw-r--r--include/linux/random.h254
-rw-r--r--include/linux/randomize_kstack.h92
-rw-r--r--include/linux/range.h12
-rw-r--r--include/linux/ras.h6
-rw-r--r--include/linux/ratelimit.h37
-rw-r--r--include/linux/ratelimit_types.h47
-rw-r--r--include/linux/rational.h1
-rw-r--r--include/linux/rbtree.h262
-rw-r--r--include/linux/rbtree_augmented.h139
-rw-r--r--include/linux/rbtree_latch.h8
-rw-r--r--include/linux/rbtree_types.h34
-rw-r--r--include/linux/rcu_node_tree.h19
-rw-r--r--include/linux/rcu_segcblist.h168
-rw-r--r--include/linux/rcu_sync.h53
-rw-r--r--include/linux/rculist.h245
-rw-r--r--include/linux/rculist_bl.h29
-rw-r--r--include/linux/rculist_nulls.h43
-rw-r--r--include/linux/rcupdate.h657
-rw-r--r--include/linux/rcupdate_trace.h100
-rw-r--r--include/linux/rcupdate_wait.h19
-rw-r--r--include/linux/rcuref.h155
-rw-r--r--include/linux/rcutiny.h155
-rw-r--r--include/linux/rcutree.h109
-rw-r--r--include/linux/rcuwait.h63
-rw-r--r--include/linux/rcuwait_api.h1
-rw-r--r--include/linux/reboot-mode.h1
-rw-r--r--include/linux/reboot.h111
-rw-r--r--include/linux/reciprocal_div.h69
-rw-r--r--include/linux/ref_tracker.h79
-rw-r--r--include/linux/refcount.h330
-rw-r--r--include/linux/refcount_api.h1
-rw-r--r--include/linux/regmap.h1017
-rw-r--r--include/linux/regset.h227
-rw-r--r--include/linux/regulator/ab8500.h325
-rw-r--r--include/linux/regulator/act8865.h10
-rw-r--r--include/linux/regulator/arizona-ldo1.h8
-rw-r--r--include/linux/regulator/arizona-micsupp.h5
-rw-r--r--include/linux/regulator/consumer.h156
-rw-r--r--include/linux/regulator/coupler.h100
-rw-r--r--include/linux/regulator/da9121.h36
-rw-r--r--include/linux/regulator/da9211.h20
-rw-r--r--include/linux/regulator/db8500-prcmu.h3
-rw-r--r--include/linux/regulator/driver.h381
-rw-r--r--include/linux/regulator/fan53555.h6
-rw-r--r--include/linux/regulator/fixed.h20
-rw-r--r--include/linux/regulator/gpio-regulator.h26
-rw-r--r--include/linux/regulator/lp3971.h15
-rw-r--r--include/linux/regulator/lp3972.h15
-rw-r--r--include/linux/regulator/lp872x.h23
-rw-r--r--include/linux/regulator/machine.h76
-rw-r--r--include/linux/regulator/max1586.h15
-rw-r--r--include/linux/regulator/max8649.h5
-rw-r--r--include/linux/regulator/max8660.h14
-rw-r--r--include/linux/regulator/max8952.h19
-rw-r--r--include/linux/regulator/max8973-regulator.h16
-rw-r--r--include/linux/regulator/mt6311.h10
-rw-r--r--include/linux/regulator/mt6315-regulator.h44
-rw-r--r--include/linux/regulator/mt6323-regulator.h10
-rw-r--r--include/linux/regulator/mt6331-regulator.h46
-rw-r--r--include/linux/regulator/mt6332-regulator.h27
-rw-r--r--include/linux/regulator/mt6357-regulator.h51
-rw-r--r--include/linux/regulator/mt6358-regulator.h101
-rw-r--r--include/linux/regulator/mt6359-regulator.h59
-rw-r--r--include/linux/regulator/mt6380-regulator.h24
-rw-r--r--include/linux/regulator/mt6397-regulator.h10
-rw-r--r--include/linux/regulator/of_regulator.h1
-rw-r--r--include/linux/regulator/pca9450.h236
-rw-r--r--include/linux/regulator/pfuze100.h33
-rw-r--r--include/linux/regulator/tps51632-regulator.h16
-rw-r--r--include/linux/regulator/tps62360.h22
-rw-r--r--include/linux/regulator/tps6507x.h14
-rw-r--r--include/linux/regulator/userspace-consumer.h2
-rw-r--r--include/linux/relay.h34
-rw-r--r--include/linux/remoteproc.h298
-rw-r--r--include/linux/remoteproc/mtk_scp.h68
-rw-r--r--include/linux/remoteproc/pruss.h83
-rw-r--r--include/linux/remoteproc/qcom_rproc.h48
-rw-r--r--include/linux/remoteproc/st_slim_rproc.h6
-rw-r--r--include/linux/resctrl.h267
-rw-r--r--include/linux/reservation.h264
-rw-r--r--include/linux/reset-controller.h58
-rw-r--r--include/linux/reset.h639
-rw-r--r--include/linux/reset/bcm63xx_pmb.h10
-rw-r--r--include/linux/reset/reset-simple.h48
-rw-r--r--include/linux/reset/socfpga.h7
-rw-r--r--include/linux/reset/sunxi.h7
-rw-r--r--include/linux/resource.h3
-rw-r--r--include/linux/resource_ext.h22
-rw-r--r--include/linux/restart_block.h13
-rw-r--r--include/linux/resume_user_mode.h64
-rw-r--r--include/linux/rethook.h100
-rw-r--r--include/linux/rfkill.h56
-rw-r--r--include/linux/rhashtable-types.h135
-rw-r--r--include/linux/rhashtable.h636
-rw-r--r--include/linux/ring_buffer.h135
-rw-r--r--include/linux/rio.h10
-rw-r--r--include/linux/rio_drv.h9
-rw-r--r--include/linux/rio_ids.h19
-rw-r--r--include/linux/rio_regs.h6
-rw-r--r--include/linux/rmap.h310
-rw-r--r--include/linux/rmi.h20
-rw-r--r--include/linux/rndis.h1
-rw-r--r--include/linux/rodata_test.h6
-rw-r--r--include/linux/root_dev.h2
-rw-r--r--include/linux/rpmsg.h123
-rw-r--r--include/linux/rpmsg/byteorder.h67
-rw-r--r--include/linux/rpmsg/mtk_rpmsg.h38
-rw-r--r--include/linux/rpmsg/ns.h45
-rw-r--r--include/linux/rpmsg/qcom_glink.h34
-rw-r--r--include/linux/rpmsg/qcom_smd.h6
-rw-r--r--include/linux/rslib.h74
-rw-r--r--include/linux/rtc.h160
-rw-r--r--include/linux/rtc/ds1685.h20
-rw-r--r--include/linux/rtc/m48t59.h5
-rw-r--r--include/linux/rtc/rtc-omap.h7
-rw-r--r--include/linux/rtc/sirfsoc_rtciobrg.h22
-rw-r--r--include/linux/rtmutex.h120
-rw-r--r--include/linux/rtnetlink.h52
-rw-r--r--include/linux/rtsx_common.h (renamed from include/linux/mfd/rtsx_common.h)14
-rw-r--r--include/linux/rtsx_pci.h (renamed from include/linux/mfd/rtsx_pci.h)396
-rw-r--r--include/linux/rtsx_usb.h (renamed from include/linux/mfd/rtsx_usb.h)15
-rw-r--r--include/linux/rv.h70
-rw-r--r--include/linux/rwbase_rt.h39
-rw-r--r--include/linux/rwlock.h24
-rw-r--r--include/linux/rwlock_api_smp.h32
-rw-r--r--include/linux/rwlock_rt.h150
-rw-r--r--include/linux/rwlock_types.h52
-rw-r--r--include/linux/rwsem-spinlock.h45
-rw-r--r--include/linux/rwsem.h138
-rw-r--r--include/linux/rxrpc.h79
-rw-r--r--include/linux/s3c_adc_battery.h41
-rw-r--r--include/linux/sa11x0-dma.h24
-rw-r--r--include/linux/sbitmap.h367
-rw-r--r--include/linux/scatterlist.h336
-rw-r--r--include/linux/scc.h1
-rw-r--r--include/linux/sched.h1365
-rw-r--r--include/linux/sched/affinity.h1
-rw-r--r--include/linux/sched/autogroup.h1
-rw-r--r--include/linux/sched/clock.h9
-rw-r--r--include/linux/sched/cond_resched.h1
-rw-r--r--include/linux/sched/coredump.h21
-rw-r--r--include/linux/sched/cpufreq.h21
-rw-r--r--include/linux/sched/cputime.h30
-rw-r--r--include/linux/sched/deadline.h15
-rw-r--r--include/linux/sched/debug.h12
-rw-r--r--include/linux/sched/hotplug.h3
-rw-r--r--include/linux/sched/idle.h45
-rw-r--r--include/linux/sched/init.h1
-rw-r--r--include/linux/sched/isolation.h73
-rw-r--r--include/linux/sched/jobctl.h11
-rw-r--r--include/linux/sched/loadavg.h27
-rw-r--r--include/linux/sched/mm.h332
-rw-r--r--include/linux/sched/nohz.h17
-rw-r--r--include/linux/sched/numa_balancing.h5
-rw-r--r--include/linux/sched/posix-timers.h1
-rw-r--r--include/linux/sched/prio.h19
-rw-r--r--include/linux/sched/rseq_api.h1
-rw-r--r--include/linux/sched/rt.h20
-rw-r--r--include/linux/sched/sd_flags.h166
-rw-r--r--include/linux/sched/signal.h350
-rw-r--r--include/linux/sched/smt.h20
-rw-r--r--include/linux/sched/stat.h20
-rw-r--r--include/linux/sched/sysctl.h71
-rw-r--r--include/linux/sched/task.h111
-rw-r--r--include/linux/sched/task_flags.h1
-rw-r--r--include/linux/sched/task_stack.h15
-rw-r--r--include/linux/sched/thread_info_api.h1
-rw-r--r--include/linux/sched/topology.h125
-rw-r--r--include/linux/sched/types.h23
-rw-r--r--include/linux/sched/user.h37
-rw-r--r--include/linux/sched/vhost_task.h23
-rw-r--r--include/linux/sched/wake_q.h23
-rw-r--r--include/linux/sched/xacct.h1
-rw-r--r--include/linux/sched_clock.h40
-rw-r--r--include/linux/scif.h1339
-rw-r--r--include/linux/scmi_protocol.h968
-rw-r--r--include/linux/scpi_protocol.h27
-rw-r--r--include/linux/screen_info.h1
-rw-r--r--include/linux/scs.h86
-rw-r--r--include/linux/sctp.h326
-rw-r--r--include/linux/scx200.h1
-rw-r--r--include/linux/scx200_gpio.h1
-rw-r--r--include/linux/sdb.h159
-rw-r--r--include/linux/sdla.h244
-rw-r--r--include/linux/seccomp.h46
-rw-r--r--include/linux/secretmem.h54
-rw-r--r--include/linux/securebits.h1
-rw-r--r--include/linux/security.h778
-rw-r--r--include/linux/sed-opal.h20
-rw-r--r--include/linux/seg6.h1
-rw-r--r--include/linux/seg6_genl.h1
-rw-r--r--include/linux/seg6_hmac.h1
-rw-r--r--include/linux/seg6_iptunnel.h1
-rw-r--r--include/linux/seg6_local.h6
-rw-r--r--include/linux/selection.h32
-rw-r--r--include/linux/selinux.h35
-rw-r--r--include/linux/sem.h40
-rw-r--r--include/linux/semaphore.h15
-rw-r--r--include/linux/seq_buf.h33
-rw-r--r--include/linux/seq_file.h90
-rw-r--r--include/linux/seq_file_net.h24
-rw-r--r--include/linux/seqlock.h1189
-rw-r--r--include/linux/seqlock_api.h1
-rw-r--r--include/linux/seqno-fence.h117
-rw-r--r--include/linux/serdev.h69
-rw-r--r--include/linux/serial.h27
-rw-r--r--include/linux/serial_8250.h81
-rw-r--r--include/linux/serial_bcm63xx.h1
-rw-r--r--include/linux/serial_core.h722
-rw-r--r--include/linux/serial_max3100.h6
-rw-r--r--include/linux/serial_pnx8xxx.h80
-rw-r--r--include/linux/serial_s3c.h40
-rw-r--r--include/linux/serial_sci.h2
-rw-r--r--include/linux/serio.h5
-rw-r--r--include/linux/set_memory.h69
-rw-r--r--include/linux/sfi.h210
-rw-r--r--include/linux/sfi_acpi.h93
-rw-r--r--include/linux/sfp.h639
-rw-r--r--include/linux/sh_clk.h1
-rw-r--r--include/linux/sh_dma.h5
-rw-r--r--include/linux/sh_eth.h5
-rw-r--r--include/linux/sh_intc.h6
-rw-r--r--include/linux/sh_timer.h1
-rw-r--r--include/linux/shdma-base.h7
-rw-r--r--include/linux/shm.h40
-rw-r--r--include/linux/shmem_fs.h110
-rw-r--r--include/linux/shrinker.h80
-rw-r--r--include/linux/signal.h124
-rw-r--r--include/linux/signal_types.h28
-rw-r--r--include/linux/signalfd.h1
-rw-r--r--include/linux/siox.h84
-rw-r--r--include/linux/siphash.h54
-rw-r--r--include/linux/sirfsoc_dma.h6
-rw-r--r--include/linux/sizes.h15
-rw-r--r--include/linux/skb_array.h21
-rw-r--r--include/linux/skbuff.h2257
-rw-r--r--include/linux/skmsg.h559
-rw-r--r--include/linux/slab.h729
-rw-r--r--include/linux/slab_def.h52
-rw-r--r--include/linux/slimbus.h212
-rw-r--r--include/linux/slub_def.h110
-rw-r--r--include/linux/sm501-regs.h5
-rw-r--r--include/linux/sm501.h14
-rw-r--r--include/linux/smc911x.h13
-rw-r--r--include/linux/smc91x.h1
-rw-r--r--include/linux/smp.h153
-rw-r--r--include/linux/smp_types.h69
-rw-r--r--include/linux/smpboot.h18
-rw-r--r--include/linux/smsc911x.h15
-rw-r--r--include/linux/smscphy.h11
-rw-r--r--include/linux/soc/amlogic/meson-canvas.h66
-rw-r--r--include/linux/soc/apple/rtkit.h193
-rw-r--r--include/linux/soc/apple/sart.h53
-rw-r--r--include/linux/soc/brcmstb/brcmstb.h34
-rw-r--r--include/linux/soc/cirrus/ep93xx.h37
-rw-r--r--include/linux/soc/dove/pmu.h1
-rw-r--r--include/linux/soc/ixp4xx/cpu.h120
-rw-r--r--include/linux/soc/ixp4xx/npe.h40
-rw-r--r--include/linux/soc/ixp4xx/qmgr.h88
-rw-r--r--include/linux/soc/marvell/octeontx2/asm.h57
-rw-r--r--include/linux/soc/mediatek/infracfg.h389
-rw-r--r--include/linux/soc/mediatek/mtk-cmdq.h394
-rw-r--r--include/linux/soc/mediatek/mtk-mmsys.h108
-rw-r--r--include/linux/soc/mediatek/mtk-mutex.h90
-rw-r--r--include/linux/soc/mediatek/mtk_sip_svc.h28
-rw-r--r--include/linux/soc/mediatek/mtk_wed.h266
-rw-r--r--include/linux/soc/mmp/cputype.h65
-rw-r--r--include/linux/soc/nxp/lpc32xx-misc.h33
-rw-r--r--include/linux/soc/pxa/cpu.h252
-rw-r--r--include/linux/soc/pxa/mfp.h470
-rw-r--r--include/linux/soc/pxa/smemc.h13
-rw-r--r--include/linux/soc/qcom/apr.h197
-rw-r--r--include/linux/soc/qcom/geni-se.h512
-rw-r--r--include/linux/soc/qcom/irq.h34
-rw-r--r--include/linux/soc/qcom/llcc-qcom.h217
-rw-r--r--include/linux/soc/qcom/mdt_loader.h58
-rw-r--r--include/linux/soc/qcom/pdr.h29
-rw-r--r--include/linux/soc/qcom/pmic_glink.h32
-rw-r--r--include/linux/soc/qcom/qcom_aoss.h38
-rw-r--r--include/linux/soc/qcom/qmi.h272
-rw-r--r--include/linux/soc/qcom/smd-rpm.h17
-rw-r--r--include/linux/soc/qcom/smem.h3
-rw-r--r--include/linux/soc/qcom/smem_state.h9
-rw-r--r--include/linux/soc/qcom/wcnss_ctrl.h1
-rw-r--r--include/linux/soc/renesas/r9a06g032-sysctrl.h11
-rw-r--r--include/linux/soc/renesas/rcar-rst.h3
-rw-r--r--include/linux/soc/renesas/rcar-sysc.h14
-rw-r--r--include/linux/soc/samsung/exynos-chipid.h50
-rw-r--r--include/linux/soc/samsung/exynos-pmu.h7
-rw-r--r--include/linux/soc/samsung/exynos-regs-pmu.h36
-rw-r--r--include/linux/soc/samsung/s3c-pm.h36
-rw-r--r--include/linux/soc/sunxi/sunxi_sram.h2
-rw-r--r--include/linux/soc/ti/k3-ringacc.h270
-rw-r--r--include/linux/soc/ti/knav_dma.h24
-rw-r--r--include/linux/soc/ti/knav_qmss.h13
-rw-r--r--include/linux/soc/ti/omap1-io.h143
-rw-r--r--include/linux/soc/ti/omap1-mux.h311
-rw-r--r--include/linux/soc/ti/omap1-soc.h163
-rw-r--r--include/linux/soc/ti/omap1-usb.h116
-rw-r--r--include/linux/soc/ti/ti-msgmgr.h18
-rw-r--r--include/linux/soc/ti/ti_sci_inta_msi.h21
-rw-r--r--include/linux/soc/ti/ti_sci_protocol.h453
-rw-r--r--include/linux/sock_diag.h15
-rw-r--r--include/linux/socket.h145
-rw-r--r--include/linux/sockptr.h118
-rw-r--r--include/linux/softirq.h1
-rw-r--r--include/linux/sonet.h1
-rw-r--r--include/linux/sony-laptop.h5
-rw-r--r--include/linux/sonypi.h16
-rw-r--r--include/linux/sort.h10
-rw-r--r--include/linux/sound.h3
-rw-r--r--include/linux/soundwire/sdw.h1145
-rw-r--r--include/linux/soundwire/sdw_amd.h109
-rw-r--r--include/linux/soundwire/sdw_intel.h348
-rw-r--r--include/linux/soundwire/sdw_registers.h344
-rw-r--r--include/linux/soundwire/sdw_type.h37
-rw-r--r--include/linux/spi/ad7877.h1
-rw-r--r--include/linux/spi/adi_spi3.h254
-rw-r--r--include/linux/spi/ads7846.h16
-rw-r--r--include/linux/spi/altera.h50
-rw-r--r--include/linux/spi/at73c213.h1
-rw-r--r--include/linux/spi/at86rf230.h28
-rw-r--r--include/linux/spi/cc2520.h26
-rw-r--r--include/linux/spi/corgi_lcd.h4
-rw-r--r--include/linux/spi/ds1305.h1
-rw-r--r--include/linux/spi/eeprom.h3
-rw-r--r--include/linux/spi/flash.h1
-rw-r--r--include/linux/spi/ifx_modem.h19
-rw-r--r--include/linux/spi/l4f00242t03.h25
-rw-r--r--include/linux/spi/libertas_spi.h6
-rw-r--r--include/linux/spi/lms283gf05.h24
-rw-r--r--include/linux/spi/max7301.h5
-rw-r--r--include/linux/spi/mc33880.h1
-rw-r--r--include/linux/spi/mcp23s08.h17
-rw-r--r--include/linux/spi/mmc_spi.h25
-rw-r--r--include/linux/spi/mxs-spi.h11
-rw-r--r--include/linux/spi/pxa2xx_spi.h44
-rw-r--r--include/linux/spi/rspi.h10
-rw-r--r--include/linux/spi/s3c24xx.h28
-rw-r--r--include/linux/spi/sh_hspi.h10
-rw-r--r--include/linux/spi/sh_msiof.h1
-rw-r--r--include/linux/spi/spi-fsl-dspi.h23
-rw-r--r--include/linux/spi/spi-mem.h404
-rw-r--r--include/linux/spi/spi.h662
-rw-r--r--include/linux/spi/spi_bitbang.h7
-rw-r--r--include/linux/spi/spi_gpio.h50
-rw-r--r--include/linux/spi/spi_oc_tiny.h5
-rw-r--r--include/linux/spi/tdo24m.h1
-rw-r--r--include/linux/spi/tle62x0.h10
-rw-r--r--include/linux/spi/xilinx_spi.h2
-rw-r--r--include/linux/spinlock.h229
-rw-r--r--include/linux/spinlock_api.h1
-rw-r--r--include/linux/spinlock_api_smp.h24
-rw-r--r--include/linux/spinlock_api_up.h3
-rw-r--r--include/linux/spinlock_rt.h159
-rw-r--r--include/linux/spinlock_types.h92
-rw-r--r--include/linux/spinlock_types_raw.h73
-rw-r--r--include/linux/spinlock_types_up.h2
-rw-r--r--include/linux/spinlock_up.h20
-rw-r--r--include/linux/splice.h7
-rw-r--r--include/linux/spmi.h14
-rw-r--r--include/linux/sram.h14
-rw-r--r--include/linux/srcu.h245
-rw-r--r--include/linux/srcutiny.h57
-rw-r--r--include/linux/srcutree.h143
-rw-r--r--include/linux/ssb/ssb.h7
-rw-r--r--include/linux/ssb/ssb_driver_chipcommon.h3
-rw-r--r--include/linux/ssb/ssb_driver_extif.h5
-rw-r--r--include/linux/ssb/ssb_driver_gige.h17
-rw-r--r--include/linux/ssb/ssb_driver_mips.h1
-rw-r--r--include/linux/ssb/ssb_driver_pci.h1
-rw-r--r--include/linux/ssb/ssb_embedded.h1
-rw-r--r--include/linux/ssb/ssb_regs.h1
-rw-r--r--include/linux/ssbi.h10
-rw-r--r--include/linux/stackdepot.h165
-rw-r--r--include/linux/stackleak.h83
-rw-r--r--include/linux/stackprotector.h22
-rw-r--r--include/linux/stacktrace.h102
-rw-r--r--include/linux/start_kernel.h5
-rw-r--r--include/linux/stat.h30
-rw-r--r--include/linux/statfs.h18
-rw-r--r--include/linux/static_call.h346
-rw-r--r--include/linux/static_call_types.h103
-rw-r--r--include/linux/stdarg.h11
-rw-r--r--include/linux/stddef.h80
-rw-r--r--include/linux/ste_modem_shm.h56
-rw-r--r--include/linux/stm.h12
-rw-r--r--include/linux/stmmac.h151
-rw-r--r--include/linux/stmp3xxx_rtc_wdt.h3
-rw-r--r--include/linux/stmp_device.h6
-rw-r--r--include/linux/stop_machine.h48
-rw-r--r--include/linux/string.h385
-rw-r--r--include/linux/string_helpers.h84
-rw-r--r--include/linux/stringhash.h5
-rw-r--r--include/linux/sudmac.h52
-rw-r--r--include/linux/sungem_phy.h3
-rw-r--r--include/linux/sunrpc/addr.h1
-rw-r--r--include/linux/sunrpc/auth.h122
-rw-r--r--include/linux/sunrpc/auth_gss.h4
-rw-r--r--include/linux/sunrpc/bc_xprt.h43
-rw-r--r--include/linux/sunrpc/cache.h101
-rw-r--r--include/linux/sunrpc/clnt.h58
-rw-r--r--include/linux/sunrpc/debug.h1
-rw-r--r--include/linux/sunrpc/gss_api.h14
-rw-r--r--include/linux/sunrpc/gss_err.h3
-rw-r--r--include/linux/sunrpc/gss_krb5.h209
-rw-r--r--include/linux/sunrpc/gss_krb5_enctypes.h4
-rw-r--r--include/linux/sunrpc/metrics.h12
-rw-r--r--include/linux/sunrpc/msg_prot.h15
-rw-r--r--include/linux/sunrpc/rpc_pipe_fs.h11
-rw-r--r--include/linux/sunrpc/rpc_rdma.h138
-rw-r--r--include/linux/sunrpc/rpc_rdma_cid.h24
-rw-r--r--include/linux/sunrpc/sched.h74
-rw-r--r--include/linux/sunrpc/stats.h5
-rw-r--r--include/linux/sunrpc/svc.h362
-rw-r--r--include/linux/sunrpc/svc_rdma.h183
-rw-r--r--include/linux/sunrpc/svc_rdma_pcl.h128
-rw-r--r--include/linux/sunrpc/svc_xprt.h38
-rw-r--r--include/linux/sunrpc/svcauth.h13
-rw-r--r--include/linux/sunrpc/svcauth_gss.h6
-rw-r--r--include/linux/sunrpc/svcsock.h16
-rw-r--r--include/linux/sunrpc/timer.h1
-rw-r--r--include/linux/sunrpc/types.h1
-rw-r--r--include/linux/sunrpc/xdr.h455
-rw-r--r--include/linux/sunrpc/xprt.h121
-rw-r--r--include/linux/sunrpc/xprtmultipath.h17
-rw-r--r--include/linux/sunrpc/xprtrdma.h7
-rw-r--r--include/linux/sunrpc/xprtsock.h50
-rw-r--r--include/linux/sunserialcore.h1
-rw-r--r--include/linux/sunxi-rsb.h2
-rw-r--r--include/linux/surface_acpi_notify.h39
-rw-r--r--include/linux/surface_aggregator/controller.h994
-rw-r--r--include/linux/surface_aggregator/device.h637
-rw-r--r--include/linux/surface_aggregator/serial_hub.h691
-rw-r--r--include/linux/suspend.h265
-rw-r--r--include/linux/svga.h1
-rw-r--r--include/linux/sw842.h1
-rw-r--r--include/linux/swab.h27
-rw-r--r--include/linux/swait.h167
-rw-r--r--include/linux/swait_api.h1
-rw-r--r--include/linux/swap.h468
-rw-r--r--include/linux/swap_cgroup.h5
-rw-r--r--include/linux/swap_slots.h5
-rw-r--r--include/linux/swapfile.h11
-rw-r--r--include/linux/swapops.h497
-rw-r--r--include/linux/swiotlb.h229
-rw-r--r--include/linux/switchtec.h525
-rw-r--r--include/linux/sxgbe_platform.h11
-rw-r--r--include/linux/sync_core.h21
-rw-r--r--include/linux/sync_file.h7
-rw-r--r--include/linux/sys.h1
-rw-r--r--include/linux/sys_soc.h4
-rw-r--r--include/linux/syscall_user_dispatch.h58
-rw-r--r--include/linux/syscalls.h1611
-rw-r--r--include/linux/syscalls_api.h1
-rw-r--r--include/linux/syscore_ops.h3
-rw-r--r--include/linux/sysctl.h159
-rw-r--r--include/linux/sysfb.h111
-rw-r--r--include/linux/sysfs.h175
-rw-r--r--include/linux/syslog.h27
-rw-r--r--include/linux/sysrq.h26
-rw-r--r--include/linux/sysv_fs.h1
-rw-r--r--include/linux/t10-pi.h43
-rw-r--r--include/linux/task_io_accounting.h1
-rw-r--r--include/linux/task_io_accounting_ops.h1
-rw-r--r--include/linux/task_work.h19
-rw-r--r--include/linux/taskstats_kern.h1
-rw-r--r--include/linux/tboot.h27
-rw-r--r--include/linux/tc.h1
-rw-r--r--include/linux/tca6416_keypad.h5
-rw-r--r--include/linux/tcp.h194
-rw-r--r--include/linux/tee_drv.h369
-rw-r--r--include/linux/termios_internal.h49
-rw-r--r--include/linux/textsearch.h7
-rw-r--r--include/linux/textsearch_fsm.h1
-rw-r--r--include/linux/tfrc.h6
-rw-r--r--include/linux/thermal.h380
-rw-r--r--include/linux/thinkpad_acpi.h15
-rw-r--r--include/linux/thread_info.h130
-rw-r--r--include/linux/threads.h5
-rw-r--r--include/linux/thunderbolt.h664
-rw-r--r--include/linux/ti-emif-sram.h139
-rw-r--r--include/linux/ti_wilink_st.h21
-rw-r--r--include/linux/tick.h128
-rw-r--r--include/linux/tifm.h8
-rw-r--r--include/linux/timb_dma.h14
-rw-r--r--include/linux/timb_gpio.h14
-rw-r--r--include/linux/time.h260
-rw-r--r--include/linux/time32.h72
-rw-r--r--include/linux/time64.h128
-rw-r--r--include/linux/time_namespace.h166
-rw-r--r--include/linux/timecounter.h13
-rw-r--r--include/linux/timekeeper_internal.h21
-rw-r--r--include/linux/timekeeping.h286
-rw-r--r--include/linux/timer.h199
-rw-r--r--include/linux/timerfd.h1
-rw-r--r--include/linux/timeriomem-rng.h10
-rw-r--r--include/linux/timerqueue.h24
-rw-r--r--include/linux/timex.h17
-rw-r--r--include/linux/tnum.h117
-rw-r--r--include/linux/topology.h108
-rw-r--r--include/linux/torture.h71
-rw-r--r--include/linux/toshiba.h12
-rw-r--r--include/linux/tpm.h444
-rw-r--r--include/linux/tpm_command.h1
-rw-r--r--include/linux/tpm_eventlog.h294
-rw-r--r--include/linux/trace.h73
-rw-r--r--include/linux/trace_clock.h1
-rw-r--r--include/linux/trace_events.h543
-rw-r--r--include/linux/trace_recursion.h228
-rw-r--r--include/linux/trace_seq.h17
-rw-r--r--include/linux/tracefs.h7
-rw-r--r--include/linux/tracehook.h196
-rw-r--r--include/linux/tracepoint-defs.h53
-rw-r--r--include/linux/tracepoint.h276
-rw-r--r--include/linux/transport_class.h15
-rw-r--r--include/linux/ts-nbus.h18
-rw-r--r--include/linux/tsacct_kern.h1
-rw-r--r--include/linux/tty.h724
-rw-r--r--include/linux/tty_buffer.h57
-rw-r--r--include/linux/tty_driver.h632
-rw-r--r--include/linux/tty_flip.h37
-rw-r--r--include/linux/tty_ldisc.h356
-rw-r--r--include/linux/tty_port.h256
-rw-r--r--include/linux/typecheck.h10
-rw-r--r--include/linux/types.h80
-rw-r--r--include/linux/u64_stats_sync.h207
-rw-r--r--include/linux/u64_stats_sync_api.h1
-rw-r--r--include/linux/uacce.h161
-rw-r--r--include/linux/uaccess.h298
-rw-r--r--include/linux/ubsan.h9
-rw-r--r--include/linux/ucb1400.h165
-rw-r--r--include/linux/ucs2_string.h1
-rw-r--r--include/linux/udp.h72
-rw-r--r--include/linux/uidgid.h1
-rw-r--r--include/linux/uinput.h81
-rw-r--r--include/linux/uio.h377
-rw-r--r--include/linux/uio_driver.h57
-rw-r--r--include/linux/ulpi/driver.h1
-rw-r--r--include/linux/ulpi/interface.h1
-rw-r--r--include/linux/ulpi/regs.h1
-rw-r--r--include/linux/umh.h70
-rw-r--r--include/linux/unaligned/access_ok.h67
-rw-r--r--include/linux/unaligned/be_byteshift.h70
-rw-r--r--include/linux/unaligned/be_memmove.h36
-rw-r--r--include/linux/unaligned/be_struct.h36
-rw-r--r--include/linux/unaligned/generic.h68
-rw-r--r--include/linux/unaligned/le_byteshift.h70
-rw-r--r--include/linux/unaligned/le_memmove.h36
-rw-r--r--include/linux/unaligned/le_struct.h36
-rw-r--r--include/linux/unaligned/memmove.h45
-rw-r--r--include/linux/unaligned/packed_struct.h2
-rw-r--r--include/linux/unicode.h79
-rw-r--r--include/linux/units.h111
-rw-r--r--include/linux/uprobes.h27
-rw-r--r--include/linux/usb.h212
-rw-r--r--include/linux/usb/association.h150
-rw-r--r--include/linux/usb/atmel_usba_udc.h23
-rw-r--r--include/linux/usb/audio-v2.h63
-rw-r--r--include/linux/usb/audio-v3.h454
-rw-r--r--include/linux/usb/audio.h4
-rw-r--r--include/linux/usb/c67x00.h16
-rw-r--r--include/linux/usb/ccid.h39
-rw-r--r--include/linux/usb/cdc-wdm.h8
-rw-r--r--include/linux/usb/cdc.h5
-rw-r--r--include/linux/usb/cdc_ncm.h21
-rw-r--r--include/linux/usb/ch9.h53
-rw-r--r--include/linux/usb/chipidea.h22
-rw-r--r--include/linux/usb/composite.h53
-rw-r--r--include/linux/usb/ehci-dbgp.h1
-rw-r--r--include/linux/usb/ehci_def.h50
-rw-r--r--include/linux/usb/ehci_pdriver.h16
-rw-r--r--include/linux/usb/ezusb.h1
-rw-r--r--include/linux/usb/functionfs.h1
-rw-r--r--include/linux/usb/g_hid.h15
-rw-r--r--include/linux/usb/gadget.h151
-rw-r--r--include/linux/usb/gadget_configfs.h1
-rw-r--r--include/linux/usb/gpio_vbus.h32
-rw-r--r--include/linux/usb/hcd.h96
-rw-r--r--include/linux/usb/input.h5
-rw-r--r--include/linux/usb/iowarrior.h1
-rw-r--r--include/linux/usb/irda.h14
-rw-r--r--include/linux/usb/isp116x.h1
-rw-r--r--include/linux/usb/isp1301.h11
-rw-r--r--include/linux/usb/isp1362.h1
-rw-r--r--include/linux/usb/isp1760.h18
-rw-r--r--include/linux/usb/m66592.h15
-rw-r--r--include/linux/usb/msm_hsusb_hw.h77
-rw-r--r--include/linux/usb/musb-ux500.h11
-rw-r--r--include/linux/usb/musb.h25
-rw-r--r--include/linux/usb/net2280.h15
-rw-r--r--include/linux/usb/of.h24
-rw-r--r--include/linux/usb/ohci_pdriver.h15
-rw-r--r--include/linux/usb/onboard_hub.h18
-rw-r--r--include/linux/usb/otg-fsm.h25
-rw-r--r--include/linux/usb/otg.h4
-rw-r--r--include/linux/usb/pd.h536
-rw-r--r--include/linux/usb/pd_ado.h42
-rw-r--r--include/linux/usb/pd_bdo.h22
-rw-r--r--include/linux/usb/pd_ext_sdb.h27
-rw-r--r--include/linux/usb/pd_vdo.h519
-rw-r--r--include/linux/usb/phy.h85
-rw-r--r--include/linux/usb/phy_companion.h13
-rw-r--r--include/linux/usb/quirks.h18
-rw-r--r--include/linux/usb/r8152.h37
-rw-r--r--include/linux/usb/r8a66597.h15
-rw-r--r--include/linux/usb/renesas_usbhs.h48
-rw-r--r--include/linux/usb/rndis_host.h16
-rw-r--r--include/linux/usb/role.h126
-rw-r--r--include/linux/usb/rzv2m_usb3drd.h20
-rw-r--r--include/linux/usb/samsung_usb_phy.h16
-rw-r--r--include/linux/usb/serial.h137
-rw-r--r--include/linux/usb/sl811.h1
-rw-r--r--include/linux/usb/storage.h3
-rw-r--r--include/linux/usb/tcpci.h239
-rw-r--r--include/linux/usb/tcpm.h177
-rw-r--r--include/linux/usb/tegra_usb_phy.h24
-rw-r--r--include/linux/usb/tilegx.h34
-rw-r--r--include/linux/usb/typec.h203
-rw-r--r--include/linux/usb/typec_altmode.h191
-rw-r--r--include/linux/usb/typec_dp.h109
-rw-r--r--include/linux/usb/typec_mux.h111
-rw-r--r--include/linux/usb/typec_retimer.h45
-rw-r--r--include/linux/usb/typec_tbt.h57
-rw-r--r--include/linux/usb/uas.h1
-rw-r--r--include/linux/usb/ulpi.h16
-rw-r--r--include/linux/usb/usb338x.h47
-rw-r--r--include/linux/usb/usb_phy_generic.h13
-rw-r--r--include/linux/usb/usbnet.h42
-rw-r--r--include/linux/usb/uvc.h158
-rw-r--r--include/linux/usb/webusb.h80
-rw-r--r--include/linux/usb/wusb-wa.h303
-rw-r--r--include/linux/usb/wusb.h377
-rw-r--r--include/linux/usb/xhci-dbgp.h7
-rw-r--r--include/linux/usb_usual.h7
-rw-r--r--include/linux/usbdevice_fs.h3
-rw-r--r--include/linux/user-return-notifier.h1
-rw-r--r--include/linux/user_events.h83
-rw-r--r--include/linux/user_namespace.h102
-rw-r--r--include/linux/userfaultfd_k.h243
-rw-r--r--include/linux/usermode_driver.h19
-rw-r--r--include/linux/util_macros.h15
-rw-r--r--include/linux/uts.h1
-rw-r--r--include/linux/utsname.h19
-rw-r--r--include/linux/uuid.h67
-rw-r--r--include/linux/uwb.h831
-rw-r--r--include/linux/uwb/debug-cmd.h68
-rw-r--r--include/linux/uwb/spec.h781
-rw-r--r--include/linux/uwb/umc.h193
-rw-r--r--include/linux/uwb/whci.h117
-rw-r--r--include/linux/vbox_utils.h59
-rw-r--r--include/linux/vdpa.h569
-rw-r--r--include/linux/verification.h33
-rw-r--r--include/linux/vermagic.h27
-rw-r--r--include/linux/vexpress.h39
-rw-r--r--include/linux/vfio.h353
-rw-r--r--include/linux/vfio_pci_core.h133
-rw-r--r--include/linux/vfs.h1
-rw-r--r--include/linux/vga_switcheroo.h17
-rw-r--r--include/linux/vgaarb.h120
-rw-r--r--include/linux/vhost_iotlb.h52
-rw-r--r--include/linux/via-core.h19
-rw-r--r--include/linux/via-gpio.h2
-rw-r--r--include/linux/via.h1
-rw-r--r--include/linux/via_i2c.h16
-rw-r--r--include/linux/videodev2.h1
-rw-r--r--include/linux/virtio.h62
-rw-r--r--include/linux/virtio_anchor.h19
-rw-r--r--include/linux/virtio_byteorder.h1
-rw-r--r--include/linux/virtio_caif.h6
-rw-r--r--include/linux/virtio_config.h315
-rw-r--r--include/linux/virtio_dma_buf.h37
-rw-r--r--include/linux/virtio_net.h146
-rw-r--r--include/linux/virtio_pci_legacy.h40
-rw-r--r--include/linux/virtio_pci_modern.h118
-rw-r--r--include/linux/virtio_ring.h53
-rw-r--r--include/linux/virtio_vsock.h159
-rw-r--r--include/linux/vlynq.h15
-rw-r--r--include/linux/vm_event_item.h70
-rw-r--r--include/linux/vm_sockets.h23
-rw-r--r--include/linux/vmacache.h38
-rw-r--r--include/linux/vmalloc.h215
-rw-r--r--include/linux/vme.h189
-rw-r--r--include/linux/vmpressure.h5
-rw-r--r--include/linux/vmstat.h366
-rw-r--r--include/linux/vmw_vmci_api.h12
-rw-r--r--include/linux/vmw_vmci_defs.h181
-rw-r--r--include/linux/vringh.h109
-rw-r--r--include/linux/vt.h1
-rw-r--r--include/linux/vt_buffer.h16
-rw-r--r--include/linux/vt_kern.h85
-rw-r--r--include/linux/vtime.h168
-rw-r--r--include/linux/w1-gpio.h14
-rw-r--r--include/linux/w1.h28
-rw-r--r--include/linux/wait.h321
-rw-r--r--include/linux/wait_api.h1
-rw-r--r--include/linux/wait_bit.h131
-rw-r--r--include/linux/wanrouter.h10
-rw-r--r--include/linux/watch_queue.h134
-rw-r--r--include/linux/watchdog.h20
-rw-r--r--include/linux/wimax/debug.h526
-rw-r--r--include/linux/win_minmax.h1
-rw-r--r--include/linux/wireless.h11
-rw-r--r--include/linux/wkup_m3_ipc.h34
-rw-r--r--include/linux/wl12xx.h58
-rw-r--r--include/linux/wm97xx.h6
-rw-r--r--include/linux/wmi.h33
-rw-r--r--include/linux/workqueue.h191
-rw-r--r--include/linux/workqueue_api.h1
-rw-r--r--include/linux/writeback.h155
-rw-r--r--include/linux/ww_mutex.h133
-rw-r--r--include/linux/wwan.h199
-rw-r--r--include/linux/xarray.h1868
-rw-r--r--include/linux/xattr.h101
-rw-r--r--include/linux/xxhash.h259
-rw-r--r--include/linux/xz.h110
-rw-r--r--include/linux/yam.h17
-rw-r--r--include/linux/z2_battery.h17
-rw-r--r--include/linux/zbud.h22
-rw-r--r--include/linux/zlib.h8
-rw-r--r--include/linux/zorro.h13
-rw-r--r--include/linux/zpool.h9
-rw-r--r--include/linux/zsmalloc.h7
-rw-r--r--include/linux/zstd.h447
-rw-r--r--include/linux/zstd_errors.h77
-rw-r--r--include/linux/zstd_lib.h2551
2855 files changed, 237880 insertions, 95049 deletions
diff --git a/include/linux/8250_pci.h b/include/linux/8250_pci.h
index b24ff086a662..9c777d2c98f5 100644
--- a/include/linux/8250_pci.h
+++ b/include/linux/8250_pci.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Definitions for PCI support.
*/
diff --git a/include/linux/a.out.h b/include/linux/a.out.h
deleted file mode 100644
index ee884168989f..000000000000
--- a/include/linux/a.out.h
+++ /dev/null
@@ -1,17 +0,0 @@
-#ifndef __A_OUT_GNU_H__
-#define __A_OUT_GNU_H__
-
-#include <uapi/linux/a.out.h>
-
-#ifndef __ASSEMBLY__
-#ifdef linux
-#include <asm/page.h>
-#if defined(__i386__) || defined(__mc68000__)
-#else
-#ifndef SEGMENT_SIZE
-#define SEGMENT_SIZE PAGE_SIZE
-#endif
-#endif
-#endif
-#endif /*__ASSEMBLY__ */
-#endif /* __A_OUT_GNU_H__ */
diff --git a/include/linux/acct.h b/include/linux/acct.h
index dccc2d4fe7de..2718c4854815 100644
--- a/include/linux/acct.h
+++ b/include/linux/acct.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* BSD Process Accounting for Linux - Definitions
*
@@ -19,11 +20,7 @@
#ifdef CONFIG_BSD_PROCESS_ACCT
-struct vfsmount;
-struct super_block;
-struct pacct_struct;
struct pid_namespace;
-extern int acct_parm[]; /* for sysctl */
extern void acct_collect(long exitcode, int group_dead);
extern void acct_process(void);
extern void acct_exit_ns(struct pid_namespace *);
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index c749eef1daa1..7b71dd74baeb 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -1,21 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* acpi.h - ACPI Interface
*
* Copyright (C) 2001 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
- *
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*/
#ifndef _LINUX_ACPI_H
@@ -25,9 +12,13 @@
#include <linux/ioport.h> /* for struct resource */
#include <linux/resource_ext.h>
#include <linux/device.h>
+#include <linux/mod_devicetable.h>
#include <linux/property.h>
#include <linux/uuid.h>
+struct irq_domain;
+struct irq_domain_ops;
+
#ifndef _LINUX
#define _LINUX
#endif
@@ -36,7 +27,6 @@
#ifdef CONFIG_ACPI
#include <linux/list.h>
-#include <linux/mod_devicetable.h>
#include <linux/dynamic_debug.h>
#include <linux/module.h>
#include <linux/mutex.h>
@@ -56,9 +46,8 @@ static inline acpi_handle acpi_device_handle(struct acpi_device *adev)
#define ACPI_COMPANION_SET(dev, adev) set_primary_fwnode(dev, (adev) ? \
acpi_fwnode_handle(adev) : NULL)
#define ACPI_HANDLE(dev) acpi_device_handle(ACPI_COMPANION(dev))
-
-
-extern const struct fwnode_operations acpi_fwnode_ops;
+#define ACPI_HANDLE_FWNODE(fwnode) \
+ acpi_device_handle(to_acpi_device_node(fwnode))
static inline struct fwnode_handle *acpi_alloc_fwnode_static(void)
{
@@ -68,15 +57,14 @@ static inline struct fwnode_handle *acpi_alloc_fwnode_static(void)
if (!fwnode)
return NULL;
- fwnode->type = FWNODE_ACPI_STATIC;
- fwnode->ops = &acpi_fwnode_ops;
+ fwnode_init(fwnode, &acpi_static_fwnode_ops);
return fwnode;
}
static inline void acpi_free_fwnode_static(struct fwnode_handle *fwnode)
{
- if (WARN_ON(!fwnode || fwnode->type != FWNODE_ACPI_STATIC))
+ if (WARN_ON(!is_acpi_static_node(fwnode)))
return;
kfree(fwnode);
@@ -103,7 +91,7 @@ static inline bool has_acpi_companion(struct device *dev)
static inline void acpi_preset_companion(struct device *dev,
struct acpi_device *parent, u64 addr)
{
- ACPI_COMPANION_SET(dev, acpi_find_child_device(parent, addr, NULL));
+ ACPI_COMPANION_SET(dev, acpi_find_child_device(parent, addr, false));
}
static inline const char *acpi_dev_name(struct acpi_device *adev)
@@ -119,6 +107,7 @@ enum acpi_irq_model_id {
ACPI_IRQ_MODEL_IOSAPIC,
ACPI_IRQ_MODEL_PLATFORM,
ACPI_IRQ_MODEL_GIC,
+ ACPI_IRQ_MODEL_LPIC,
ACPI_IRQ_MODEL_COUNT
};
@@ -143,12 +132,21 @@ enum acpi_address_range_id {
/* Table Handlers */
+union acpi_subtable_headers {
+ struct acpi_subtable_header common;
+ struct acpi_hmat_structure hmat;
+ struct acpi_prmt_module_header prmt;
+ struct acpi_cedt_header cedt;
+};
typedef int (*acpi_tbl_table_handler)(struct acpi_table_header *table);
-typedef int (*acpi_tbl_entry_handler)(struct acpi_subtable_header *header,
+typedef int (*acpi_tbl_entry_handler)(union acpi_subtable_headers *header,
const unsigned long end);
+typedef int (*acpi_tbl_entry_handler_arg)(union acpi_subtable_headers *header,
+ void *arg, const unsigned long end);
+
/* Debugger support */
struct acpi_debugger_ops {
@@ -225,36 +223,56 @@ static inline int acpi_debugger_notify_command_complete(void)
struct acpi_subtable_proc {
int id;
acpi_tbl_entry_handler handler;
+ acpi_tbl_entry_handler_arg handler_arg;
+ void *arg;
int count;
};
-char * __acpi_map_table (unsigned long phys_addr, unsigned long size);
-void __acpi_unmap_table(char *map, unsigned long size);
+void __iomem *__acpi_map_table(unsigned long phys, unsigned long size);
+void __acpi_unmap_table(void __iomem *map, unsigned long size);
int early_acpi_boot_init(void);
int acpi_boot_init (void);
+void acpi_boot_table_prepare (void);
void acpi_boot_table_init (void);
int acpi_mps_check (void);
int acpi_numa_init (void);
+int acpi_locate_initial_tables (void);
+void acpi_reserve_initial_tables (void);
+void acpi_table_init_complete (void);
int acpi_table_init (void);
+
+#ifdef CONFIG_ACPI_TABLE_LIB
+#define EXPORT_SYMBOL_ACPI_LIB(x) EXPORT_SYMBOL_NS_GPL(x, ACPI)
+#define __init_or_acpilib
+#define __initdata_or_acpilib
+#else
+#define EXPORT_SYMBOL_ACPI_LIB(x)
+#define __init_or_acpilib __init
+#define __initdata_or_acpilib __initdata
+#endif
+
int acpi_table_parse(char *id, acpi_tbl_table_handler handler);
-int __init acpi_table_parse_entries(char *id, unsigned long table_size,
- int entry_id,
- acpi_tbl_entry_handler handler,
- unsigned int max_entries);
-int __init acpi_table_parse_entries_array(char *id, unsigned long table_size,
- struct acpi_subtable_proc *proc, int proc_num,
- unsigned int max_entries);
+int __init_or_acpilib acpi_table_parse_entries(char *id,
+ unsigned long table_size, int entry_id,
+ acpi_tbl_entry_handler handler, unsigned int max_entries);
+int __init_or_acpilib acpi_table_parse_entries_array(char *id,
+ unsigned long table_size, struct acpi_subtable_proc *proc,
+ int proc_num, unsigned int max_entries);
int acpi_table_parse_madt(enum acpi_madt_type id,
acpi_tbl_entry_handler handler,
unsigned int max_entries);
+int __init_or_acpilib
+acpi_table_parse_cedt(enum acpi_cedt_type id,
+ acpi_tbl_entry_handler_arg handler_arg, void *arg);
+
int acpi_parse_mcfg (struct acpi_table_header *header);
void acpi_table_print_madt_entry (struct acpi_subtable_header *madt);
/* the following numa functions are architecture-dependent */
void acpi_numa_slit_init (struct acpi_table_slit *slit);
-#if defined(CONFIG_X86) || defined(CONFIG_IA64)
+#if defined(CONFIG_X86) || defined(CONFIG_IA64) || defined(CONFIG_LOONGARCH)
void acpi_numa_processor_affinity_init (struct acpi_srat_cpu_affinity *pa);
#else
static inline void
@@ -263,6 +281,12 @@ acpi_numa_processor_affinity_init(struct acpi_srat_cpu_affinity *pa) { }
void acpi_numa_x2apic_affinity_init(struct acpi_srat_x2apic_cpu_affinity *pa);
+#if defined(CONFIG_ARM64) || defined(CONFIG_LOONGARCH)
+void acpi_arch_dma_setup(struct device *dev);
+#else
+static inline void acpi_arch_dma_setup(struct device *dev) { }
+#endif
+
#ifdef CONFIG_ARM64
void acpi_numa_gicc_affinity_init(struct acpi_srat_gicc_affinity *pa);
#else
@@ -289,6 +313,21 @@ static inline bool invalid_phys_cpuid(phys_cpuid_t phys_id)
/* Validate the processor object's proc_id */
bool acpi_duplicate_processor_id(int proc_id);
+/* Processor _CTS control */
+struct acpi_processor_power;
+
+#ifdef CONFIG_ACPI_PROCESSOR_CSTATE
+bool acpi_processor_claim_cst_control(void);
+int acpi_processor_evaluate_cst(acpi_handle handle, u32 cpu,
+ struct acpi_processor_power *info);
+#else
+static inline bool acpi_processor_claim_cst_control(void) { return false; }
+static inline int acpi_processor_evaluate_cst(acpi_handle handle, u32 cpu,
+ struct acpi_processor_power *info)
+{
+ return -ENODEV;
+}
+#endif
#ifdef CONFIG_ACPI_HOTPLUG_CPU
/* Arch dependent functions for cpu hotplug support */
@@ -323,12 +362,22 @@ int acpi_gsi_to_irq (u32 gsi, unsigned int *irq);
int acpi_isa_irq_to_gsi (unsigned isa_irq, u32 *gsi);
void acpi_set_irq_model(enum acpi_irq_model_id model,
- struct fwnode_handle *fwnode);
+ struct fwnode_handle *(*)(u32));
+void acpi_set_gsi_to_irq_fallback(u32 (*)(u32));
+
+struct irq_domain *acpi_irq_create_hierarchy(unsigned int flags,
+ unsigned int size,
+ struct fwnode_handle *fwnode,
+ const struct irq_domain_ops *ops,
+ void *host_data);
#ifdef CONFIG_X86_IO_APIC
extern int acpi_get_override_irq(u32 gsi, int *trigger, int *polarity);
#else
-#define acpi_get_override_irq(gsi, trigger, polarity) (-1)
+static inline int acpi_get_override_irq(u32 gsi, int *trigger, int *polarity)
+{
+ return -1;
+}
#endif
/*
* This function undoes the effect of one call to acpi_register_gsi().
@@ -342,7 +391,14 @@ struct pci_dev;
int acpi_pci_irq_enable (struct pci_dev *dev);
void acpi_penalize_isa_irq(int irq, int active);
bool acpi_isa_irq_available(int irq);
+#ifdef CONFIG_PCI
void acpi_penalize_sci_irq(int irq, int trigger, int polarity);
+#else
+static inline void acpi_penalize_sci_irq(int irq, int trigger,
+ int polarity)
+{
+}
+#endif
void acpi_pci_irq_disable (struct pci_dev *dev);
extern int ec_read(u8 addr, u8 *val);
@@ -371,6 +427,7 @@ extern acpi_status wmi_install_notify_handler(const char *guid,
extern acpi_status wmi_remove_notify_handler(const char *guid);
extern acpi_status wmi_get_event_data(u32 event, struct acpi_buffer *out);
extern bool wmi_has_guid(const char *guid);
+extern char *wmi_get_acpi_device_uid(const char *guid);
#endif /* CONFIG_ACPI_WMI */
@@ -394,10 +451,35 @@ extern void acpi_osi_setup(char *str);
extern bool acpi_osi_is_win8(void);
#ifdef CONFIG_ACPI_NUMA
-int acpi_map_pxm_to_online_node(int pxm);
+int acpi_map_pxm_to_node(int pxm);
int acpi_get_node(acpi_handle handle);
+
+/**
+ * pxm_to_online_node - Map proximity ID to online node
+ * @pxm: ACPI proximity ID
+ *
+ * This is similar to pxm_to_node(), but always returns an online
+ * node. When the mapped node from a given proximity ID is offline, it
+ * looks up the node distance table and returns the nearest online node.
+ *
+ * ACPI device drivers, which are called after the NUMA initialization has
+ * completed in the kernel, can call this interface to obtain their device
+ * NUMA topology from ACPI tables. Such drivers do not have to deal with
+ * offline nodes. A node may be offline when SRAT memory entry does not exist,
+ * or NUMA is disabled, ex. "numa=off" on x86.
+ */
+static inline int pxm_to_online_node(int pxm)
+{
+ int node = pxm_to_node(pxm);
+
+ return numa_map_to_online_node(node);
+}
#else
-static inline int acpi_map_pxm_to_online_node(int pxm)
+static inline int pxm_to_online_node(int pxm)
+{
+ return 0;
+}
+static inline int acpi_map_pxm_to_node(int pxm)
{
return 0;
}
@@ -418,7 +500,7 @@ bool acpi_dev_resource_address_space(struct acpi_resource *ares,
struct resource_win *win);
bool acpi_dev_resource_ext_address_space(struct acpi_resource *ares,
struct resource_win *win);
-unsigned long acpi_dev_irq_flags(u8 triggering, u8 polarity, u8 shareable);
+unsigned long acpi_dev_irq_flags(u8 triggering, u8 polarity, u8 shareable, u8 wake_capable);
unsigned int acpi_dev_get_irq_type(int triggering, int polarity);
bool acpi_dev_resource_interrupt(struct acpi_resource *ares, int index,
struct resource *res);
@@ -427,6 +509,9 @@ void acpi_dev_free_resource_list(struct list_head *list);
int acpi_dev_get_resources(struct acpi_device *adev, struct list_head *list,
int (*preproc)(struct acpi_resource *, void *),
void *preproc_data);
+int acpi_dev_get_dma_resources(struct acpi_device *adev,
+ struct list_head *list);
+int acpi_dev_get_memory_resources(struct acpi_device *adev, struct list_head *list);
int acpi_dev_filter_resource_type(struct acpi_resource *ares,
unsigned long types);
@@ -446,15 +531,21 @@ int acpi_check_region(resource_size_t start, resource_size_t n,
int acpi_resources_are_enforced(void);
#ifdef CONFIG_HIBERNATION
-void __init acpi_no_s4_hw_signature(void);
+extern int acpi_check_s4_hw_signature;
#endif
#ifdef CONFIG_PM_SLEEP
void __init acpi_old_suspend_ordering(void);
void __init acpi_nvs_nosave(void);
void __init acpi_nvs_nosave_s3(void);
+void __init acpi_sleep_no_blacklist(void);
#endif /* CONFIG_PM_SLEEP */
+int acpi_register_wakeup_handler(
+ int wake_irq, bool (*wakeup)(void *context), void *context);
+void acpi_unregister_wakeup_handler(
+ bool (*wakeup)(void *context), void *context);
+
struct acpi_osc_context {
char *uuid_str; /* UUID string */
int rev;
@@ -464,10 +555,16 @@ struct acpi_osc_context {
acpi_status acpi_run_osc(acpi_handle handle, struct acpi_osc_context *context);
-/* Indexes into _OSC Capabilities Buffer (DWORDs 2 & 3 are device-specific) */
+/* Number of _OSC capability DWORDS depends on bridge type */
+#define OSC_PCI_CAPABILITY_DWORDS 3
+#define OSC_CXL_CAPABILITY_DWORDS 5
+
+/* Indexes into _OSC Capabilities Buffer (DWORDs 2 to 5 are device-specific) */
#define OSC_QUERY_DWORD 0 /* DWORD 1 */
#define OSC_SUPPORT_DWORD 1 /* DWORD 2 */
#define OSC_CONTROL_DWORD 2 /* DWORD 3 */
+#define OSC_EXT_SUPPORT_DWORD 3 /* DWORD 4 */
+#define OSC_EXT_CONTROL_DWORD 4 /* DWORD 5 */
/* _OSC Capabilities DWORD 1: Query/Control and Error Returns (generic) */
#define OSC_QUERY_ENABLE 0x00000001 /* input */
@@ -487,9 +584,25 @@ acpi_status acpi_run_osc(acpi_handle handle, struct acpi_osc_context *context);
#define OSC_SB_PCLPI_SUPPORT 0x00000080
#define OSC_SB_OSLPI_SUPPORT 0x00000100
#define OSC_SB_CPC_DIVERSE_HIGH_SUPPORT 0x00001000
+#define OSC_SB_GENERIC_INITIATOR_SUPPORT 0x00002000
+#define OSC_SB_CPC_FLEXIBLE_ADR_SPACE 0x00004000
+#define OSC_SB_NATIVE_USB4_SUPPORT 0x00040000
+#define OSC_SB_PRM_SUPPORT 0x00200000
+#define OSC_SB_FFH_OPR_SUPPORT 0x00400000
extern bool osc_sb_apei_support_acked;
extern bool osc_pc_lpi_support_confirmed;
+extern bool osc_sb_native_usb4_support_confirmed;
+extern bool osc_sb_cppc2_support_acked;
+extern bool osc_cpc_flexible_adr_space_confirmed;
+
+/* USB4 Capabilities */
+#define OSC_USB_USB3_TUNNELING 0x00000001
+#define OSC_USB_DP_TUNNELING 0x00000002
+#define OSC_USB_PCIE_TUNNELING 0x00000004
+#define OSC_USB_XDOMAIN 0x00000008
+
+extern u32 osc_sb_native_usb4_control;
/* PCI Host Bridge _OSC: Capabilities DWORD 2: Support Field */
#define OSC_PCI_EXT_CONFIG_SUPPORT 0x00000001
@@ -497,7 +610,8 @@ extern bool osc_pc_lpi_support_confirmed;
#define OSC_PCI_CLOCK_PM_SUPPORT 0x00000004
#define OSC_PCI_SEGMENT_GROUPS_SUPPORT 0x00000008
#define OSC_PCI_MSI_SUPPORT 0x00000010
-#define OSC_PCI_SUPPORT_MASKS 0x0000001f
+#define OSC_PCI_EDR_SUPPORT 0x00000080
+#define OSC_PCI_HPX_TYPE_3_SUPPORT 0x00000100
/* PCI Host Bridge _OSC: Capabilities DWORD 3: Control Field */
#define OSC_PCI_EXPRESS_NATIVE_HP_CONTROL 0x00000001
@@ -505,7 +619,31 @@ extern bool osc_pc_lpi_support_confirmed;
#define OSC_PCI_EXPRESS_PME_CONTROL 0x00000004
#define OSC_PCI_EXPRESS_AER_CONTROL 0x00000008
#define OSC_PCI_EXPRESS_CAPABILITY_CONTROL 0x00000010
-#define OSC_PCI_CONTROL_MASKS 0x0000001f
+#define OSC_PCI_EXPRESS_LTR_CONTROL 0x00000020
+#define OSC_PCI_EXPRESS_DPC_CONTROL 0x00000080
+
+/* CXL _OSC: Capabilities DWORD 4: Support Field */
+#define OSC_CXL_1_1_PORT_REG_ACCESS_SUPPORT 0x00000001
+#define OSC_CXL_2_0_PORT_DEV_REG_ACCESS_SUPPORT 0x00000002
+#define OSC_CXL_PROTOCOL_ERR_REPORTING_SUPPORT 0x00000004
+#define OSC_CXL_NATIVE_HP_SUPPORT 0x00000008
+
+/* CXL _OSC: Capabilities DWORD 5: Control Field */
+#define OSC_CXL_ERROR_REPORTING_CONTROL 0x00000001
+
+static inline u32 acpi_osc_ctx_get_pci_control(struct acpi_osc_context *context)
+{
+ u32 *ret = context->ret.pointer;
+
+ return ret[OSC_CONTROL_DWORD];
+}
+
+static inline u32 acpi_osc_ctx_get_cxl_control(struct acpi_osc_context *context)
+{
+ u32 *ret = context->ret.pointer;
+
+ return ret[OSC_EXT_CONTROL_DWORD];
+}
#define ACPI_GSB_ACCESS_ATTRIB_QUICK 0x00000002
#define ACPI_GSB_ACCESS_ATTRIB_SEND_RCV 0x00000004
@@ -518,9 +656,6 @@ extern bool osc_pc_lpi_support_confirmed;
#define ACPI_GSB_ACCESS_ATTRIB_RAW_BYTES 0x0000000E
#define ACPI_GSB_ACCESS_ATTRIB_RAW_PROCESS 0x0000000F
-extern acpi_status acpi_pci_osc_control_set(acpi_handle handle,
- u32 *mask, u32 req);
-
/* Enable _OST when all relevant hotplug operations are enabled */
#if defined(CONFIG_ACPI_HOTPLUG_CPU) && \
defined(CONFIG_ACPI_HOTPLUG_MEMORY) && \
@@ -556,8 +691,28 @@ extern acpi_status acpi_pci_osc_control_set(acpi_handle handle,
#define ACPI_OST_SC_DRIVER_LOAD_FAILURE 0x81
#define ACPI_OST_SC_INSERT_NOT_SUPPORTED 0x82
+enum acpi_predicate {
+ all_versions,
+ less_than_or_equal,
+ equal,
+ greater_than_or_equal,
+};
+
+/* Table must be terminted by a NULL entry */
+struct acpi_platform_list {
+ char oem_id[ACPI_OEM_ID_SIZE+1];
+ char oem_table_id[ACPI_OEM_TABLE_ID_SIZE+1];
+ u32 oem_revision;
+ char *table;
+ enum acpi_predicate pred;
+ char *reason;
+ u32 data;
+};
+int acpi_match_platform_list(const struct acpi_platform_list *plat);
+
extern void acpi_early_init(void);
extern void acpi_subsystem_init(void);
+extern void arch_post_acpi_subsys_init(void);
extern int acpi_nvs_register(__u64 start, __u64 size);
@@ -567,14 +722,14 @@ extern int acpi_nvs_for_each_region(int (*func)(__u64, __u64, void *),
const struct acpi_device_id *acpi_match_device(const struct acpi_device_id *ids,
const struct device *dev);
+const void *acpi_device_get_match_data(const struct device *dev);
extern bool acpi_driver_match_device(struct device *dev,
const struct device_driver *drv);
-int acpi_device_uevent_modalias(struct device *, struct kobj_uevent_env *);
+int acpi_device_uevent_modalias(const struct device *, struct kobj_uevent_env *);
int acpi_device_modalias(struct device *, char *, int);
-void acpi_walk_dep_device_list(acpi_handle handle);
struct platform_device *acpi_create_platform_device(struct acpi_device *,
- struct property_entry *);
+ const struct property_entry *);
#define ACPI_PTR(_ptr) (_ptr)
static inline void acpi_device_set_enumerated(struct acpi_device *adev)
@@ -602,6 +757,22 @@ bool acpi_gtdt_c3stop(int type);
int acpi_arch_timer_mem_init(struct arch_timer_mem *timer_mem, int *timer_count);
#endif
+#ifndef ACPI_HAVE_ARCH_SET_ROOT_POINTER
+static inline void acpi_arch_set_root_pointer(u64 addr)
+{
+}
+#endif
+
+#ifndef ACPI_HAVE_ARCH_GET_ROOT_POINTER
+static inline u64 acpi_arch_get_root_pointer(void)
+{
+ return 0;
+}
+#endif
+
+int acpi_get_local_address(acpi_handle handle, u32 *addr);
+const char *acpi_get_subsystem_id(acpi_handle handle);
+
#else /* !CONFIG_ACPI */
#define acpi_disabled 1
@@ -609,8 +780,11 @@ int acpi_arch_timer_mem_init(struct arch_timer_mem *timer_mem, int *timer_count)
#define ACPI_COMPANION(dev) (NULL)
#define ACPI_COMPANION_SET(dev, adev) do { } while (0)
#define ACPI_HANDLE(dev) (NULL)
+#define ACPI_HANDLE_FWNODE(fwnode) (NULL)
#define ACPI_DEVICE_CLASS(_cls, _msk) .cls = (0), .cls_msk = (0),
+#include <acpi/acpi_numa.h>
+
struct fwnode_handle;
static inline bool acpi_dev_found(const char *hid)
@@ -623,32 +797,58 @@ static inline bool acpi_dev_present(const char *hid, const char *uid, s64 hrv)
return false;
}
-static inline bool is_acpi_node(struct fwnode_handle *fwnode)
+struct acpi_device;
+
+static inline bool
+acpi_dev_hid_uid_match(struct acpi_device *adev, const char *hid2, const char *uid2)
+{
+ return false;
+}
+
+static inline int acpi_dev_uid_to_integer(struct acpi_device *adev, u64 *integer)
+{
+ return -ENODEV;
+}
+
+static inline struct acpi_device *
+acpi_dev_get_first_match_dev(const char *hid, const char *uid, s64 hrv)
+{
+ return NULL;
+}
+
+static inline bool acpi_reduced_hardware(void)
+{
+ return false;
+}
+
+static inline void acpi_dev_put(struct acpi_device *adev) {}
+
+static inline bool is_acpi_node(const struct fwnode_handle *fwnode)
{
return false;
}
-static inline bool is_acpi_device_node(struct fwnode_handle *fwnode)
+static inline bool is_acpi_device_node(const struct fwnode_handle *fwnode)
{
return false;
}
-static inline struct acpi_device *to_acpi_device_node(struct fwnode_handle *fwnode)
+static inline struct acpi_device *to_acpi_device_node(const struct fwnode_handle *fwnode)
{
return NULL;
}
-static inline bool is_acpi_data_node(struct fwnode_handle *fwnode)
+static inline bool is_acpi_data_node(const struct fwnode_handle *fwnode)
{
return false;
}
-static inline struct acpi_data_node *to_acpi_data_node(struct fwnode_handle *fwnode)
+static inline struct acpi_data_node *to_acpi_data_node(const struct fwnode_handle *fwnode)
{
return NULL;
}
-static inline bool acpi_data_node_match(struct fwnode_handle *fwnode,
+static inline bool acpi_data_node_match(const struct fwnode_handle *fwnode,
const char *name)
{
return false;
@@ -691,9 +891,12 @@ static inline int acpi_boot_init(void)
return 0;
}
+static inline void acpi_boot_table_prepare(void)
+{
+}
+
static inline void acpi_boot_table_init(void)
{
- return;
}
static inline int acpi_mps_check(void)
@@ -738,21 +941,41 @@ static inline const struct acpi_device_id *acpi_match_device(
return NULL;
}
+static inline const void *acpi_device_get_match_data(const struct device *dev)
+{
+ return NULL;
+}
+
static inline bool acpi_driver_match_device(struct device *dev,
const struct device_driver *drv)
{
return false;
}
+static inline bool acpi_check_dsm(acpi_handle handle, const guid_t *guid,
+ u64 rev, u64 funcs)
+{
+ return false;
+}
+
static inline union acpi_object *acpi_evaluate_dsm(acpi_handle handle,
const guid_t *guid,
- int rev, int func,
+ u64 rev, u64 func,
union acpi_object *argv4)
{
return NULL;
}
-static inline int acpi_device_uevent_modalias(struct device *dev,
+static inline union acpi_object *acpi_evaluate_dsm_typed(acpi_handle handle,
+ const guid_t *guid,
+ u64 rev, u64 func,
+ union acpi_object *argv4,
+ acpi_object_type type)
+{
+ return NULL;
+}
+
+static inline int acpi_device_uevent_modalias(const struct device *dev,
struct kobj_uevent_env *env)
{
return -ENODEV;
@@ -764,7 +987,14 @@ static inline int acpi_device_modalias(struct device *dev,
return -ENODEV;
}
-static inline bool acpi_dma_supported(struct acpi_device *adev)
+static inline struct platform_device *
+acpi_create_platform_device(struct acpi_device *adev,
+ const struct property_entry *properties)
+{
+ return NULL;
+}
+
+static inline bool acpi_dma_supported(const struct acpi_device *adev)
{
return false;
}
@@ -774,13 +1004,23 @@ static inline enum dev_dma_attr acpi_get_dma_attr(struct acpi_device *adev)
return DEV_DMA_NOT_SUPPORTED;
}
+static inline int acpi_dma_get_range(struct device *dev, const struct bus_dma_region **map)
+{
+ return -ENODEV;
+}
+
static inline int acpi_dma_configure(struct device *dev,
enum dev_dma_attr attr)
{
return 0;
}
-static inline void acpi_dma_deconfigure(struct device *dev) { }
+static inline int acpi_dma_configure_id(struct device *dev,
+ enum dev_dma_attr attr,
+ const u32 *input_id)
+{
+ return 0;
+}
#define ACPI_PTR(_ptr) (NULL)
@@ -807,6 +1047,41 @@ static inline struct acpi_device *acpi_resource_consumer(struct resource *res)
return NULL;
}
+static inline int acpi_get_local_address(acpi_handle handle, u32 *addr)
+{
+ return -ENODEV;
+}
+
+static inline const char *acpi_get_subsystem_id(acpi_handle handle)
+{
+ return ERR_PTR(-ENODEV);
+}
+
+static inline int acpi_register_wakeup_handler(int wake_irq,
+ bool (*wakeup)(void *context), void *context)
+{
+ return -ENXIO;
+}
+
+static inline void acpi_unregister_wakeup_handler(
+ bool (*wakeup)(void *context), void *context) { }
+
+struct acpi_osc_context;
+static inline u32 acpi_osc_ctx_get_pci_control(struct acpi_osc_context *context)
+{
+ return 0;
+}
+
+static inline u32 acpi_osc_ctx_get_cxl_control(struct acpi_osc_context *context)
+{
+ return 0;
+}
+
+static inline bool acpi_sleep_state_supported(u8 sleep_state)
+{
+ return false;
+}
+
#endif /* !CONFIG_ACPI */
#ifdef CONFIG_ACPI_HOTPLUG_IOAPIC
@@ -827,8 +1102,17 @@ void acpi_os_set_prepare_extended_sleep(int (*func)(u8 sleep_state,
acpi_status acpi_os_prepare_extended_sleep(u8 sleep_state,
u32 val_a, u32 val_b);
-
#ifdef CONFIG_X86
+struct acpi_s2idle_dev_ops {
+ struct list_head list_node;
+ void (*prepare)(void);
+ void (*check)(void);
+ void (*restore)(void);
+};
+int acpi_register_lps0_dev(struct acpi_s2idle_dev_ops *arg);
+void acpi_unregister_lps0_dev(struct acpi_s2idle_dev_ops *arg);
+#endif /* CONFIG_X86 */
+#ifndef CONFIG_IA64
void arch_reserve_mem_area(acpi_physical_address addr, size_t size);
#else
static inline void arch_reserve_mem_area(acpi_physical_address addr,
@@ -841,62 +1125,71 @@ static inline void arch_reserve_mem_area(acpi_physical_address addr,
#endif
#if defined(CONFIG_ACPI) && defined(CONFIG_PM)
-int acpi_dev_runtime_suspend(struct device *dev);
-int acpi_dev_runtime_resume(struct device *dev);
+int acpi_dev_suspend(struct device *dev, bool wakeup);
+int acpi_dev_resume(struct device *dev);
int acpi_subsys_runtime_suspend(struct device *dev);
int acpi_subsys_runtime_resume(struct device *dev);
-struct acpi_device *acpi_dev_pm_get_node(struct device *dev);
int acpi_dev_pm_attach(struct device *dev, bool power_on);
+bool acpi_storage_d3(struct device *dev);
+bool acpi_dev_state_d0(struct device *dev);
#else
-static inline int acpi_dev_runtime_suspend(struct device *dev) { return 0; }
-static inline int acpi_dev_runtime_resume(struct device *dev) { return 0; }
static inline int acpi_subsys_runtime_suspend(struct device *dev) { return 0; }
static inline int acpi_subsys_runtime_resume(struct device *dev) { return 0; }
-static inline struct acpi_device *acpi_dev_pm_get_node(struct device *dev)
+static inline int acpi_dev_pm_attach(struct device *dev, bool power_on)
{
- return NULL;
+ return 0;
}
-static inline int acpi_dev_pm_attach(struct device *dev, bool power_on)
+static inline bool acpi_storage_d3(struct device *dev)
{
- return -ENODEV;
+ return false;
+}
+static inline bool acpi_dev_state_d0(struct device *dev)
+{
+ return true;
}
#endif
#if defined(CONFIG_ACPI) && defined(CONFIG_PM_SLEEP)
-int acpi_dev_suspend_late(struct device *dev);
-int acpi_dev_resume_early(struct device *dev);
int acpi_subsys_prepare(struct device *dev);
void acpi_subsys_complete(struct device *dev);
int acpi_subsys_suspend_late(struct device *dev);
-int acpi_subsys_resume_early(struct device *dev);
+int acpi_subsys_suspend_noirq(struct device *dev);
int acpi_subsys_suspend(struct device *dev);
int acpi_subsys_freeze(struct device *dev);
+int acpi_subsys_poweroff(struct device *dev);
+void acpi_ec_mark_gpe_for_wake(void);
+void acpi_ec_set_gpe_wake_mask(u8 action);
+int acpi_subsys_restore_early(struct device *dev);
#else
-static inline int acpi_dev_suspend_late(struct device *dev) { return 0; }
-static inline int acpi_dev_resume_early(struct device *dev) { return 0; }
static inline int acpi_subsys_prepare(struct device *dev) { return 0; }
static inline void acpi_subsys_complete(struct device *dev) {}
static inline int acpi_subsys_suspend_late(struct device *dev) { return 0; }
-static inline int acpi_subsys_resume_early(struct device *dev) { return 0; }
+static inline int acpi_subsys_suspend_noirq(struct device *dev) { return 0; }
static inline int acpi_subsys_suspend(struct device *dev) { return 0; }
static inline int acpi_subsys_freeze(struct device *dev) { return 0; }
+static inline int acpi_subsys_poweroff(struct device *dev) { return 0; }
+static inline int acpi_subsys_restore_early(struct device *dev) { return 0; }
+static inline void acpi_ec_mark_gpe_for_wake(void) {}
+static inline void acpi_ec_set_gpe_wake_mask(u8 action) {}
#endif
#ifdef CONFIG_ACPI
__printf(3, 4)
void acpi_handle_printk(const char *level, acpi_handle handle,
const char *fmt, ...);
+void acpi_evaluation_failure_warn(acpi_handle handle, const char *name,
+ acpi_status status);
#else /* !CONFIG_ACPI */
static inline __printf(3, 4) void
acpi_handle_printk(const char *level, void *handle, const char *fmt, ...) {}
+static inline void acpi_evaluation_failure_warn(acpi_handle handle,
+ const char *name,
+ acpi_status status) {}
#endif /* !CONFIG_ACPI */
#if defined(CONFIG_ACPI) && defined(CONFIG_DYNAMIC_DEBUG)
__printf(3, 4)
void __acpi_handle_debug(struct _ddebug *descriptor, acpi_handle handle, const char *fmt, ...);
-#else
-#define __acpi_handle_debug(descriptor, handle, fmt, ...) \
- acpi_handle_printk(KERN_DEBUG, handle, fmt, ##__VA_ARGS__);
#endif
/*
@@ -926,12 +1219,8 @@ void __acpi_handle_debug(struct _ddebug *descriptor, acpi_handle handle, const c
#else
#if defined(CONFIG_DYNAMIC_DEBUG)
#define acpi_handle_debug(handle, fmt, ...) \
-do { \
- DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, fmt); \
- if (unlikely(descriptor.flags & _DPRINTK_FLAGS_PRINT)) \
- __acpi_handle_debug(&descriptor, handle, pr_fmt(fmt), \
- ##__VA_ARGS__); \
-} while (0)
+ _dynamic_func_call(fmt, __acpi_handle_debug, \
+ handle, pr_fmt(fmt), ##__VA_ARGS__)
#else
#define acpi_handle_debug(handle, fmt, ...) \
({ \
@@ -942,104 +1231,80 @@ do { \
#endif
#endif
-struct acpi_gpio_params {
- unsigned int crs_entry_index;
- unsigned int line_index;
- bool active_low;
-};
-
-struct acpi_gpio_mapping {
- const char *name;
- const struct acpi_gpio_params *data;
- unsigned int size;
-};
-
#if defined(CONFIG_ACPI) && defined(CONFIG_GPIOLIB)
-int acpi_dev_add_driver_gpios(struct acpi_device *adev,
- const struct acpi_gpio_mapping *gpios);
-
-static inline void acpi_dev_remove_driver_gpios(struct acpi_device *adev)
-{
- if (adev)
- adev->driver_gpios = NULL;
-}
-
-int devm_acpi_dev_add_driver_gpios(struct device *dev,
- const struct acpi_gpio_mapping *gpios);
-void devm_acpi_dev_remove_driver_gpios(struct device *dev);
-
bool acpi_gpio_get_irq_resource(struct acpi_resource *ares,
struct acpi_resource_gpio **agpio);
-int acpi_dev_gpio_irq_get(struct acpi_device *adev, int index);
+bool acpi_gpio_get_io_resource(struct acpi_resource *ares,
+ struct acpi_resource_gpio **agpio);
+int acpi_dev_gpio_irq_wake_get_by(struct acpi_device *adev, const char *name, int index,
+ bool *wake_capable);
#else
-static inline int acpi_dev_add_driver_gpios(struct acpi_device *adev,
- const struct acpi_gpio_mapping *gpios)
+static inline bool acpi_gpio_get_irq_resource(struct acpi_resource *ares,
+ struct acpi_resource_gpio **agpio)
+{
+ return false;
+}
+static inline bool acpi_gpio_get_io_resource(struct acpi_resource *ares,
+ struct acpi_resource_gpio **agpio)
+{
+ return false;
+}
+static inline int acpi_dev_gpio_irq_wake_get_by(struct acpi_device *adev, const char *name,
+ int index, bool *wake_capable)
{
return -ENXIO;
}
-static inline void acpi_dev_remove_driver_gpios(struct acpi_device *adev) {}
+#endif
-static inline int devm_acpi_dev_add_driver_gpios(struct device *dev,
- const struct acpi_gpio_mapping *gpios)
+static inline int acpi_dev_gpio_irq_wake_get(struct acpi_device *adev, int index,
+ bool *wake_capable)
{
- return -ENXIO;
+ return acpi_dev_gpio_irq_wake_get_by(adev, NULL, index, wake_capable);
}
-static inline void devm_acpi_dev_remove_driver_gpios(struct device *dev) {}
-static inline bool acpi_gpio_get_irq_resource(struct acpi_resource *ares,
- struct acpi_resource_gpio **agpio)
+static inline int acpi_dev_gpio_irq_get_by(struct acpi_device *adev, const char *name,
+ int index)
{
- return false;
+ return acpi_dev_gpio_irq_wake_get_by(adev, name, index, NULL);
}
+
static inline int acpi_dev_gpio_irq_get(struct acpi_device *adev, int index)
{
- return -ENXIO;
+ return acpi_dev_gpio_irq_wake_get_by(adev, NULL, index, NULL);
}
-#endif
/* Device properties */
-#define MAX_ACPI_REFERENCE_ARGS 8
-struct acpi_reference_args {
- struct acpi_device *adev;
- size_t nargs;
- u64 args[MAX_ACPI_REFERENCE_ARGS];
-};
-
#ifdef CONFIG_ACPI
-int acpi_dev_get_property(struct acpi_device *adev, const char *name,
+int acpi_dev_get_property(const struct acpi_device *adev, const char *name,
acpi_object_type type, const union acpi_object **obj);
-int __acpi_node_get_property_reference(struct fwnode_handle *fwnode,
+int __acpi_node_get_property_reference(const struct fwnode_handle *fwnode,
const char *name, size_t index, size_t num_args,
- struct acpi_reference_args *args);
+ struct fwnode_reference_args *args);
-static inline int acpi_node_get_property_reference(struct fwnode_handle *fwnode,
+static inline int acpi_node_get_property_reference(
+ const struct fwnode_handle *fwnode,
const char *name, size_t index,
- struct acpi_reference_args *args)
+ struct fwnode_reference_args *args)
{
return __acpi_node_get_property_reference(fwnode, name, index,
- MAX_ACPI_REFERENCE_ARGS, args);
+ NR_FWNODE_REFERENCE_ARGS, args);
}
-int acpi_node_prop_get(struct fwnode_handle *fwnode, const char *propname,
+static inline bool acpi_dev_has_props(const struct acpi_device *adev)
+{
+ return !list_empty(&adev->data.properties);
+}
+
+struct acpi_device_properties *
+acpi_data_add_props(struct acpi_device_data *data, const guid_t *guid,
+ union acpi_object *properties);
+
+int acpi_node_prop_get(const struct fwnode_handle *fwnode, const char *propname,
void **valptr);
-int acpi_dev_prop_read_single(struct acpi_device *adev, const char *propname,
- enum dev_prop_type proptype, void *val);
-int acpi_node_prop_read(struct fwnode_handle *fwnode, const char *propname,
- enum dev_prop_type proptype, void *val, size_t nval);
-int acpi_dev_prop_read(struct acpi_device *adev, const char *propname,
- enum dev_prop_type proptype, void *val, size_t nval);
-
-struct fwnode_handle *acpi_get_next_subnode(struct fwnode_handle *fwnode,
- struct fwnode_handle *child);
-struct fwnode_handle *acpi_node_get_parent(struct fwnode_handle *fwnode);
-struct fwnode_handle *acpi_graph_get_next_endpoint(struct fwnode_handle *fwnode,
- struct fwnode_handle *prev);
-int acpi_graph_get_remote_endpoint(struct fwnode_handle *fwnode,
- struct fwnode_handle **remote,
- struct fwnode_handle **port,
- struct fwnode_handle **endpoint);
+struct fwnode_handle *acpi_get_next_subnode(const struct fwnode_handle *fwnode,
+ struct fwnode_handle *child);
struct acpi_probe_entry;
typedef bool (*acpi_probe_entry_validate_subtbl)(struct acpi_subtable_header *,
@@ -1071,16 +1336,27 @@ struct acpi_probe_entry {
kernel_ulong_t driver_data;
};
-#define ACPI_DECLARE_PROBE_ENTRY(table, name, table_id, subtable, valid, data, fn) \
+#define ACPI_DECLARE_PROBE_ENTRY(table, name, table_id, subtable, \
+ valid, data, fn) \
+ static const struct acpi_probe_entry __acpi_probe_##name \
+ __used __section("__" #table "_acpi_probe_table") = { \
+ .id = table_id, \
+ .type = subtable, \
+ .subtable_valid = valid, \
+ .probe_table = fn, \
+ .driver_data = data, \
+ }
+
+#define ACPI_DECLARE_SUBTABLE_PROBE_ENTRY(table, name, table_id, \
+ subtable, valid, data, fn) \
static const struct acpi_probe_entry __acpi_probe_##name \
- __used __section(__##table##_acpi_probe_table) \
- = { \
+ __used __section("__" #table "_acpi_probe_table") = { \
.id = table_id, \
.type = subtable, \
.subtable_valid = valid, \
- .probe_table = (acpi_tbl_table_handler)fn, \
- .driver_data = data, \
- }
+ .probe_subtbl = fn, \
+ .driver_data = data, \
+ }
#define ACPI_PROBE_TABLE(name) __##name##_acpi_probe_table
#define ACPI_PROBE_TABLE_END(name) __##name##_acpi_probe_table_end
@@ -1104,79 +1380,44 @@ static inline int acpi_dev_get_property(struct acpi_device *adev,
}
static inline int
-__acpi_node_get_property_reference(struct fwnode_handle *fwnode,
+__acpi_node_get_property_reference(const struct fwnode_handle *fwnode,
const char *name, size_t index, size_t num_args,
- struct acpi_reference_args *args)
+ struct fwnode_reference_args *args)
{
return -ENXIO;
}
-static inline int acpi_node_get_property_reference(struct fwnode_handle *fwnode,
- const char *name, size_t index,
- struct acpi_reference_args *args)
+static inline int
+acpi_node_get_property_reference(const struct fwnode_handle *fwnode,
+ const char *name, size_t index,
+ struct fwnode_reference_args *args)
{
return -ENXIO;
}
-static inline int acpi_node_prop_get(struct fwnode_handle *fwnode,
+static inline int acpi_node_prop_get(const struct fwnode_handle *fwnode,
const char *propname,
void **valptr)
{
return -ENXIO;
}
-static inline int acpi_dev_prop_get(struct acpi_device *adev,
- const char *propname,
- void **valptr)
-{
- return -ENXIO;
-}
-
-static inline int acpi_dev_prop_read_single(struct acpi_device *adev,
- const char *propname,
- enum dev_prop_type proptype,
- void *val)
-{
- return -ENXIO;
-}
-
-static inline int acpi_node_prop_read(struct fwnode_handle *fwnode,
- const char *propname,
- enum dev_prop_type proptype,
- void *val, size_t nval)
-{
- return -ENXIO;
-}
-
-static inline int acpi_dev_prop_read(struct acpi_device *adev,
- const char *propname,
- enum dev_prop_type proptype,
- void *val, size_t nval)
-{
- return -ENXIO;
-}
-
-static inline struct fwnode_handle *
-acpi_get_next_subnode(struct fwnode_handle *fwnode, struct fwnode_handle *child)
-{
- return NULL;
-}
-
static inline struct fwnode_handle *
-acpi_node_get_parent(struct fwnode_handle *fwnode)
+acpi_get_next_subnode(const struct fwnode_handle *fwnode,
+ struct fwnode_handle *child)
{
return NULL;
}
static inline struct fwnode_handle *
-acpi_graph_get_next_endpoint(struct fwnode_handle *fwnode,
+acpi_graph_get_next_endpoint(const struct fwnode_handle *fwnode,
struct fwnode_handle *prev)
{
return ERR_PTR(-ENXIO);
}
static inline int
-acpi_graph_get_remote_endpoint(struct fwnode_handle *fwnode,
+acpi_graph_get_remote_endpoint(const struct fwnode_handle *fwnode,
struct fwnode_handle **remote,
struct fwnode_handle **port,
struct fwnode_handle **endpoint)
@@ -1209,9 +1450,13 @@ static inline bool acpi_has_watchdog(void) { return false; }
#endif
#ifdef CONFIG_ACPI_SPCR_TABLE
-int parse_spcr(bool earlycon);
+extern bool qdf2400_e44_present;
+int acpi_parse_spcr(bool enable_earlycon, bool enable_console);
#else
-static inline int parse_spcr(bool earlycon) { return 0; }
+static inline int acpi_parse_spcr(bool enable_earlycon, bool enable_console)
+{
+ return 0;
+}
#endif
#if IS_ENABLED(CONFIG_ACPI_GENERIC_GSI)
@@ -1224,4 +1469,66 @@ int acpi_irq_get(acpi_handle handle, unsigned int index, struct resource *res)
}
#endif
+#ifdef CONFIG_ACPI_LPIT
+int lpit_read_residency_count_address(u64 *address);
+#else
+static inline int lpit_read_residency_count_address(u64 *address)
+{
+ return -EINVAL;
+}
+#endif
+
+#ifdef CONFIG_ACPI_PPTT
+int acpi_pptt_cpu_is_thread(unsigned int cpu);
+int find_acpi_cpu_topology(unsigned int cpu, int level);
+int find_acpi_cpu_topology_cluster(unsigned int cpu);
+int find_acpi_cpu_topology_package(unsigned int cpu);
+int find_acpi_cpu_topology_hetero_id(unsigned int cpu);
+#else
+static inline int acpi_pptt_cpu_is_thread(unsigned int cpu)
+{
+ return -EINVAL;
+}
+static inline int find_acpi_cpu_topology(unsigned int cpu, int level)
+{
+ return -EINVAL;
+}
+static inline int find_acpi_cpu_topology_cluster(unsigned int cpu)
+{
+ return -EINVAL;
+}
+static inline int find_acpi_cpu_topology_package(unsigned int cpu)
+{
+ return -EINVAL;
+}
+static inline int find_acpi_cpu_topology_hetero_id(unsigned int cpu)
+{
+ return -EINVAL;
+}
+#endif
+
+#ifdef CONFIG_ACPI_PCC
+void acpi_init_pcc(void);
+#else
+static inline void acpi_init_pcc(void) { }
+#endif
+
+#ifdef CONFIG_ACPI_FFH
+void acpi_init_ffh(void);
+extern int acpi_ffh_address_space_arch_setup(void *handler_ctxt,
+ void **region_ctxt);
+extern int acpi_ffh_address_space_arch_handler(acpi_integer *value,
+ void *region_context);
+#else
+static inline void acpi_init_ffh(void) { }
+#endif
+
+#ifdef CONFIG_ACPI
+extern void acpi_device_notify(struct device *dev);
+extern void acpi_device_notify_remove(struct device *dev);
+#else
+static inline void acpi_device_notify(struct device *dev) { }
+static inline void acpi_device_notify_remove(struct device *dev) { }
+#endif
+
#endif /*_LINUX_ACPI_H*/
diff --git a/include/linux/acpi_agdi.h b/include/linux/acpi_agdi.h
new file mode 100644
index 000000000000..f477f0b452fa
--- /dev/null
+++ b/include/linux/acpi_agdi.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef __ACPI_AGDI_H__
+#define __ACPI_AGDI_H__
+
+#include <linux/acpi.h>
+
+#ifdef CONFIG_ACPI_AGDI
+void __init acpi_agdi_init(void);
+#else
+static inline void acpi_agdi_init(void) {}
+#endif
+#endif /* __ACPI_AGDI_H__ */
diff --git a/include/linux/acpi_apmt.h b/include/linux/acpi_apmt.h
new file mode 100644
index 000000000000..40bd634d082f
--- /dev/null
+++ b/include/linux/acpi_apmt.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * ARM CoreSight PMU driver.
+ * Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES.
+ *
+ */
+
+#ifndef __ACPI_APMT_H__
+#define __ACPI_APMT_H__
+
+#include <linux/acpi.h>
+
+#ifdef CONFIG_ACPI_APMT
+void acpi_apmt_init(void);
+#else
+static inline void acpi_apmt_init(void) { }
+#endif /* CONFIG_ACPI_APMT */
+
+#endif /* __ACPI_APMT_H__ */
diff --git a/include/linux/acpi_dma.h b/include/linux/acpi_dma.h
index 329436d38e66..72cedb916a9c 100644
--- a/include/linux/acpi_dma.h
+++ b/include/linux/acpi_dma.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* ACPI helpers for DMA request / controller
*
@@ -5,10 +6,6 @@
*
* Copyright (C) 2013, Intel Corporation
* Author: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef __LINUX_ACPI_DMA_H
diff --git a/include/linux/acpi_iort.h b/include/linux/acpi_iort.h
index 8379d406ad2e..b43be0987b19 100644
--- a/include/linux/acpi_iort.h
+++ b/include/linux/acpi_iort.h
@@ -1,19 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2016, Semihalf
* Author: Tomasz Nowicki <tn@semihalf.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
- * Place - Suite 330, Boston, MA 02111-1307 USA.
*/
#ifndef __ACPI_IORT_H__
@@ -26,31 +14,57 @@
#define IORT_IRQ_MASK(irq) (irq & 0xffffffffULL)
#define IORT_IRQ_TRIGGER_MASK(irq) ((irq >> 32) & 0xffffffffULL)
-int iort_register_domain_token(int trans_id, struct fwnode_handle *fw_node);
+/*
+ * PMCG model identifiers for use in smmu pmu driver. Please note
+ * that this is purely for the use of software and has nothing to
+ * do with hardware or with IORT specification.
+ */
+#define IORT_SMMU_V3_PMCG_GENERIC 0x00000000 /* Generic SMMUv3 PMCG */
+#define IORT_SMMU_V3_PMCG_HISI_HIP08 0x00000001 /* HiSilicon HIP08 PMCG */
+
+int iort_register_domain_token(int trans_id, phys_addr_t base,
+ struct fwnode_handle *fw_node);
void iort_deregister_domain_token(int trans_id);
struct fwnode_handle *iort_find_domain_token(int trans_id);
#ifdef CONFIG_ACPI_IORT
void acpi_iort_init(void);
-u32 iort_msi_map_rid(struct device *dev, u32 req_id);
-struct irq_domain *iort_get_device_domain(struct device *dev, u32 req_id);
+u32 iort_msi_map_id(struct device *dev, u32 id);
+struct irq_domain *iort_get_device_domain(struct device *dev, u32 id,
+ enum irq_domain_bus_token bus_token);
void acpi_configure_pmsi_domain(struct device *dev);
int iort_pmsi_get_dev_id(struct device *dev, u32 *dev_id);
+void iort_get_rmr_sids(struct fwnode_handle *iommu_fwnode,
+ struct list_head *head);
+void iort_put_rmr_sids(struct fwnode_handle *iommu_fwnode,
+ struct list_head *head);
/* IOMMU interface */
-void iort_set_dma_mask(struct device *dev);
-const struct iommu_ops *iort_iommu_configure(struct device *dev);
+int iort_dma_get_ranges(struct device *dev, u64 *size);
+int iort_iommu_configure_id(struct device *dev, const u32 *id_in);
+void iort_iommu_get_resv_regions(struct device *dev, struct list_head *head);
+phys_addr_t acpi_iort_dma_get_max_cpu_address(void);
#else
static inline void acpi_iort_init(void) { }
-static inline u32 iort_msi_map_rid(struct device *dev, u32 req_id)
-{ return req_id; }
-static inline struct irq_domain *iort_get_device_domain(struct device *dev,
- u32 req_id)
+static inline u32 iort_msi_map_id(struct device *dev, u32 id)
+{ return id; }
+static inline struct irq_domain *iort_get_device_domain(
+ struct device *dev, u32 id, enum irq_domain_bus_token bus_token)
{ return NULL; }
static inline void acpi_configure_pmsi_domain(struct device *dev) { }
+static inline
+void iort_get_rmr_sids(struct fwnode_handle *iommu_fwnode, struct list_head *head) { }
+static inline
+void iort_put_rmr_sids(struct fwnode_handle *iommu_fwnode, struct list_head *head) { }
/* IOMMU interface */
-static inline void iort_set_dma_mask(struct device *dev) { }
+static inline int iort_dma_get_ranges(struct device *dev, u64 *size)
+{ return -ENODEV; }
+static inline int iort_iommu_configure_id(struct device *dev, const u32 *id_in)
+{ return -ENODEV; }
static inline
-const struct iommu_ops *iort_iommu_configure(struct device *dev)
-{ return NULL; }
+void iort_iommu_get_resv_regions(struct device *dev, struct list_head *head)
+{ }
+
+static inline phys_addr_t acpi_iort_dma_get_max_cpu_address(void)
+{ return PHYS_ADDR_MAX; }
#endif
#endif /* __ACPI_IORT_H__ */
diff --git a/include/linux/acpi_mdio.h b/include/linux/acpi_mdio.h
new file mode 100644
index 000000000000..8e2eefa9fbc0
--- /dev/null
+++ b/include/linux/acpi_mdio.h
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * ACPI helper for the MDIO (Ethernet PHY) API
+ */
+
+#ifndef __LINUX_ACPI_MDIO_H
+#define __LINUX_ACPI_MDIO_H
+
+#include <linux/phy.h>
+
+#if IS_ENABLED(CONFIG_ACPI_MDIO)
+int __acpi_mdiobus_register(struct mii_bus *mdio, struct fwnode_handle *fwnode,
+ struct module *owner);
+
+static inline int
+acpi_mdiobus_register(struct mii_bus *mdio, struct fwnode_handle *handle)
+{
+ return __acpi_mdiobus_register(mdio, handle, THIS_MODULE);
+}
+#else /* CONFIG_ACPI_MDIO */
+static inline int
+acpi_mdiobus_register(struct mii_bus *mdio, struct fwnode_handle *fwnode)
+{
+ /*
+ * Fall back to mdiobus_register() function to register a bus.
+ * This way, we don't have to keep compat bits around in drivers.
+ */
+
+ return mdiobus_register(mdio);
+}
+#endif
+
+#endif /* __LINUX_ACPI_MDIO_H */
diff --git a/include/linux/acpi_pmtmr.h b/include/linux/acpi_pmtmr.h
index 1d0ef1ae8036..50d88bf1498d 100644
--- a/include/linux/acpi_pmtmr.h
+++ b/include/linux/acpi_pmtmr.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ACPI_PMTMR_H_
#define _ACPI_PMTMR_H_
diff --git a/include/linux/acpi_viot.h b/include/linux/acpi_viot.h
new file mode 100644
index 000000000000..a5a122431563
--- /dev/null
+++ b/include/linux/acpi_viot.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef __ACPI_VIOT_H__
+#define __ACPI_VIOT_H__
+
+#include <linux/acpi.h>
+
+#ifdef CONFIG_ACPI_VIOT
+void __init acpi_viot_early_init(void);
+void __init acpi_viot_init(void);
+int viot_iommu_configure(struct device *dev);
+#else
+static inline void acpi_viot_early_init(void) {}
+static inline void acpi_viot_init(void) {}
+static inline int viot_iommu_configure(struct device *dev)
+{
+ return -ENODEV;
+}
+#endif
+
+#endif /* __ACPI_VIOT_H__ */
diff --git a/include/linux/adb.h b/include/linux/adb.h
index cde41300c7ad..f6306fc86015 100644
--- a/include/linux/adb.h
+++ b/include/linux/adb.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Definitions for ADB (Apple Desktop Bus) support.
*/
diff --git a/include/linux/adfs_fs.h b/include/linux/adfs_fs.h
index 0d991071a9d4..4836e382ad52 100644
--- a/include/linux/adfs_fs.h
+++ b/include/linux/adfs_fs.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ADFS_FS_H
#define _ADFS_FS_H
diff --git a/include/linux/adreno-smmu-priv.h b/include/linux/adreno-smmu-priv.h
new file mode 100644
index 000000000000..c637e0997f6d
--- /dev/null
+++ b/include/linux/adreno-smmu-priv.h
@@ -0,0 +1,72 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2020 Google, Inc
+ */
+
+#ifndef __ADRENO_SMMU_PRIV_H
+#define __ADRENO_SMMU_PRIV_H
+
+#include <linux/io-pgtable.h>
+
+/**
+ * struct adreno_smmu_fault_info - container for key fault information
+ *
+ * @far: The faulting IOVA from ARM_SMMU_CB_FAR
+ * @ttbr0: The current TTBR0 pagetable from ARM_SMMU_CB_TTBR0
+ * @contextidr: The value of ARM_SMMU_CB_CONTEXTIDR
+ * @fsr: The fault status from ARM_SMMU_CB_FSR
+ * @fsynr0: The value of FSYNR0 from ARM_SMMU_CB_FSYNR0
+ * @fsynr1: The value of FSYNR1 from ARM_SMMU_CB_FSYNR0
+ * @cbfrsynra: The value of CBFRSYNRA from ARM_SMMU_GR1_CBFRSYNRA(idx)
+ *
+ * This struct passes back key page fault information to the GPU driver
+ * through the get_fault_info function pointer.
+ * The GPU driver can use this information to print informative
+ * log messages and provide deeper GPU specific insight into the fault.
+ */
+struct adreno_smmu_fault_info {
+ u64 far;
+ u64 ttbr0;
+ u32 contextidr;
+ u32 fsr;
+ u32 fsynr0;
+ u32 fsynr1;
+ u32 cbfrsynra;
+};
+
+/**
+ * struct adreno_smmu_priv - private interface between adreno-smmu and GPU
+ *
+ * @cookie: An opque token provided by adreno-smmu and passed
+ * back into the callbacks
+ * @get_ttbr1_cfg: Get the TTBR1 config for the GPUs context-bank
+ * @set_ttbr0_cfg: Set the TTBR0 config for the GPUs context bank. A
+ * NULL config disables TTBR0 translation, otherwise
+ * TTBR0 translation is enabled with the specified cfg
+ * @get_fault_info: Called by the GPU fault handler to get information about
+ * the fault
+ * @set_stall: Configure whether stall on fault (CFCFG) is enabled. Call
+ * before set_ttbr0_cfg(). If stalling on fault is enabled,
+ * the GPU driver must call resume_translation()
+ * @resume_translation: Resume translation after a fault
+ *
+ *
+ * The GPU driver (drm/msm) and adreno-smmu work together for controlling
+ * the GPU's SMMU instance. This is by necessity, as the GPU is directly
+ * updating the SMMU for context switches, while on the other hand we do
+ * not want to duplicate all of the initial setup logic from arm-smmu.
+ *
+ * This private interface is used for the two drivers to coordinate. The
+ * cookie and callback functions are populated when the GPU driver attaches
+ * it's domain.
+ */
+struct adreno_smmu_priv {
+ const void *cookie;
+ const struct io_pgtable_cfg *(*get_ttbr1_cfg)(const void *cookie);
+ int (*set_ttbr0_cfg)(const void *cookie, const struct io_pgtable_cfg *cfg);
+ void (*get_fault_info)(const void *cookie, struct adreno_smmu_fault_info *info);
+ void (*set_stall)(const void *cookie, bool enabled);
+ void (*resume_translation)(const void *cookie, bool terminate);
+};
+
+#endif /* __ADRENO_SMMU_PRIV_H */
diff --git a/include/linux/adxl.h b/include/linux/adxl.h
new file mode 100644
index 000000000000..2a629acb4c3f
--- /dev/null
+++ b/include/linux/adxl.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Address translation interface via ACPI DSM.
+ * Copyright (C) 2018 Intel Corporation
+ */
+
+#ifndef _LINUX_ADXL_H
+#define _LINUX_ADXL_H
+
+const char * const *adxl_get_component_names(void);
+int adxl_decode(u64 addr, u64 component_values[]);
+
+#endif /* _LINUX_ADXL_H */
diff --git a/include/linux/aer.h b/include/linux/aer.h
index 04602cbe85dc..97f64ba1b34a 100644
--- a/include/linux/aer.h
+++ b/include/linux/aer.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2006 Intel Corp.
* Tom Long Nguyen (tom.l.nguyen@intel.com)
@@ -13,6 +14,7 @@
#define AER_NONFATAL 0
#define AER_FATAL 1
#define AER_CORRECTABLE 2
+#define DPC_FATAL 3
struct pci_dev;
@@ -39,11 +41,12 @@ struct aer_capability_regs {
};
#if defined(CONFIG_PCIEAER)
-/* pci-e port driver needs this function to enable aer */
+/* PCIe port driver needs this function to enable AER */
int pci_enable_pcie_error_reporting(struct pci_dev *dev);
int pci_disable_pcie_error_reporting(struct pci_dev *dev);
-int pci_cleanup_aer_uncorrect_error_status(struct pci_dev *dev);
-int pci_cleanup_aer_error_status_regs(struct pci_dev *dev);
+int pci_aer_clear_nonfatal_status(struct pci_dev *dev);
+void pci_save_aer_state(struct pci_dev *dev);
+void pci_restore_aer_state(struct pci_dev *dev);
#else
static inline int pci_enable_pcie_error_reporting(struct pci_dev *dev)
{
@@ -53,21 +56,18 @@ static inline int pci_disable_pcie_error_reporting(struct pci_dev *dev)
{
return -EINVAL;
}
-static inline int pci_cleanup_aer_uncorrect_error_status(struct pci_dev *dev)
-{
- return -EINVAL;
-}
-static inline int pci_cleanup_aer_error_status_regs(struct pci_dev *dev)
+static inline int pci_aer_clear_nonfatal_status(struct pci_dev *dev)
{
return -EINVAL;
}
+static inline void pci_save_aer_state(struct pci_dev *dev) {}
+static inline void pci_restore_aer_state(struct pci_dev *dev) {}
#endif
void cper_print_aer(struct pci_dev *dev, int aer_severity,
struct aer_capability_regs *aer);
int cper_severity_to_aer(int cper_severity);
void aer_recover_queue(int domain, unsigned int bus, unsigned int devfn,
- int severity,
- struct aer_capability_regs *aer_regs);
+ int severity, struct aer_capability_regs *aer_regs);
#endif //_AER_H_
diff --git a/include/linux/agpgart.h b/include/linux/agpgart.h
index c6b61ca97053..21b34a96cfd8 100644
--- a/include/linux/agpgart.h
+++ b/include/linux/agpgart.h
@@ -30,8 +30,6 @@
#include <linux/agp_backend.h>
#include <uapi/linux/agpgart.h>
-#define AGPGART_MINOR 175
-
struct agp_info {
struct agp_version version; /* version of the driver */
u32 bridge_id; /* bridge vendor/device */
diff --git a/include/linux/ahci-remap.h b/include/linux/ahci-remap.h
index 62be3a40239d..230c871ba084 100644
--- a/include/linux/ahci-remap.h
+++ b/include/linux/ahci-remap.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_AHCI_REMAP_H
#define _LINUX_AHCI_REMAP_H
diff --git a/include/linux/ahci_platform.h b/include/linux/ahci_platform.h
index a270f25ee7c7..fe0760ce34c8 100644
--- a/include/linux/ahci_platform.h
+++ b/include/linux/ahci_platform.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* AHCI SATA platform driver
*
@@ -5,11 +6,6 @@
* Jeff Garzik <jgarzik@pobox.com>
* Copyright 2010 MontaVista Software, LLC.
* Anton Vorontsov <avorontsov@ru.mvista.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
*/
#ifndef _AHCI_PLATFORM_H
@@ -17,28 +13,40 @@
#include <linux/compiler.h>
+struct clk;
struct device;
struct ata_port_info;
struct ahci_host_priv;
struct platform_device;
struct scsi_host_template;
+int ahci_platform_enable_phys(struct ahci_host_priv *hpriv);
+void ahci_platform_disable_phys(struct ahci_host_priv *hpriv);
+struct clk *ahci_platform_find_clk(struct ahci_host_priv *hpriv,
+ const char *con_id);
int ahci_platform_enable_clks(struct ahci_host_priv *hpriv);
void ahci_platform_disable_clks(struct ahci_host_priv *hpriv);
+int ahci_platform_deassert_rsts(struct ahci_host_priv *hpriv);
+int ahci_platform_assert_rsts(struct ahci_host_priv *hpriv);
int ahci_platform_enable_regulators(struct ahci_host_priv *hpriv);
void ahci_platform_disable_regulators(struct ahci_host_priv *hpriv);
int ahci_platform_enable_resources(struct ahci_host_priv *hpriv);
void ahci_platform_disable_resources(struct ahci_host_priv *hpriv);
struct ahci_host_priv *ahci_platform_get_resources(
- struct platform_device *pdev);
+ struct platform_device *pdev, unsigned int flags);
int ahci_platform_init_host(struct platform_device *pdev,
struct ahci_host_priv *hpriv,
const struct ata_port_info *pi_template,
- struct scsi_host_template *sht);
+ const struct scsi_host_template *sht);
+
+void ahci_platform_shutdown(struct platform_device *pdev);
int ahci_platform_suspend_host(struct device *dev);
int ahci_platform_resume_host(struct device *dev);
int ahci_platform_suspend(struct device *dev);
int ahci_platform_resume(struct device *dev);
+#define AHCI_PLATFORM_GET_RESETS BIT(0)
+#define AHCI_PLATFORM_RST_TRIGGER BIT(1)
+
#endif /* _AHCI_PLATFORM_H */
diff --git a/include/linux/aio.h b/include/linux/aio.h
index fdd0a343f455..86892a4fe7c8 100644
--- a/include/linux/aio.h
+++ b/include/linux/aio.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __LINUX__AIO_H
#define __LINUX__AIO_H
@@ -7,8 +8,6 @@ struct kioctx;
struct kiocb;
struct mm_struct;
-#define KIOCB_KEY 0
-
typedef int (kiocb_cancel_fn)(struct kiocb *);
/* prototypes */
@@ -21,8 +20,4 @@ static inline void kiocb_set_cancel_fn(struct kiocb *req,
kiocb_cancel_fn *cancel) { }
#endif /* CONFIG_AIO */
-/* for sysctl: */
-extern unsigned long aio_nr;
-extern unsigned long aio_max_nr;
-
#endif /* __LINUX__AIO_H */
diff --git a/include/linux/alarmtimer.h b/include/linux/alarmtimer.h
index c70aac13244a..05e758b8b894 100644
--- a/include/linux/alarmtimer.h
+++ b/include/linux/alarmtimer.h
@@ -1,10 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_ALARMTIMER_H
#define _LINUX_ALARMTIMER_H
#include <linux/time.h>
#include <linux/hrtimer.h>
#include <linux/timerqueue.h>
-#include <linux/rtc.h>
+
+struct rtc_device;
enum alarmtimer_type {
ALARM_REALTIME,
@@ -58,7 +60,11 @@ u64 alarm_forward(struct alarm *alarm, ktime_t now, ktime_t interval);
u64 alarm_forward_now(struct alarm *alarm, ktime_t interval);
ktime_t alarm_expires_remaining(const struct alarm *alarm);
+#ifdef CONFIG_RTC_CLASS
/* Provide way to access the rtc device being used by alarmtimers */
struct rtc_device *alarmtimer_get_rtcdev(void);
+#else
+static inline struct rtc_device *alarmtimer_get_rtcdev(void) { return NULL; }
+#endif
#endif
diff --git a/include/linux/alcor_pci.h b/include/linux/alcor_pci.h
new file mode 100644
index 000000000000..c4a0b23846d8
--- /dev/null
+++ b/include/linux/alcor_pci.h
@@ -0,0 +1,280 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright (C) 2018 Oleksij Rempel <linux@rempel-privat.de>
+ *
+ * Driver for Alcor Micro AU6601 and AU6621 controllers
+ */
+
+#ifndef __ALCOR_PCI_H
+#define __ALCOR_PCI_H
+
+#define ALCOR_SD_CARD 0
+#define ALCOR_MS_CARD 1
+
+#define DRV_NAME_ALCOR_PCI_SDMMC "alcor_sdmmc"
+#define DRV_NAME_ALCOR_PCI_MS "alcor_ms"
+
+#define PCI_ID_ALCOR_MICRO 0x1AEA
+#define PCI_ID_AU6601 0x6601
+#define PCI_ID_AU6621 0x6621
+#define PCI_ID_AU6625 0x6625
+
+#define MHZ_TO_HZ(freq) ((freq) * 1000 * 1000)
+
+#define AU6601_BASE_CLOCK 31000000
+#define AU6601_MIN_CLOCK 150000
+#define AU6601_MAX_CLOCK 208000000
+#define AU6601_MAX_DMA_SEGMENTS 64
+#define AU6601_MAX_PIO_SEGMENTS 1
+#define AU6601_MAX_DMA_BLOCK_SIZE 0x1000
+#define AU6601_MAX_PIO_BLOCK_SIZE 0x200
+#define AU6601_MAX_DMA_BLOCKS 1
+#define AU6601_DMA_LOCAL_SEGMENTS 1
+
+/* registers spotter by reverse engineering but still
+ * with unknown functionality:
+ * 0x10 - ADMA phy address. AU6621 only?
+ * 0x51 - LED ctrl?
+ * 0x52 - unknown
+ * 0x61 - LED related? Always toggled BIT0
+ * 0x63 - Same as 0x61?
+ * 0x77 - unknown
+ */
+
+/* SDMA phy address. Higher then 0x0800.0000?
+ * The au6601 and au6621 have different DMA engines with different issues. One
+ * For example au6621 engine is triggered by addr change. No other interaction
+ * is needed. This means, if we get two buffers with same address, then engine
+ * will stall.
+ */
+#define AU6601_REG_SDMA_ADDR 0x00
+#define AU6601_SDMA_MASK 0xffffffff
+
+#define AU6601_DMA_BOUNDARY 0x05
+#define AU6621_DMA_PAGE_CNT 0x05
+/* PIO */
+#define AU6601_REG_BUFFER 0x08
+/* ADMA ctrl? AU6621 only. */
+#define AU6621_DMA_CTRL 0x0c
+#define AU6621_DMA_ENABLE BIT(0)
+/* CMD index */
+#define AU6601_REG_CMD_OPCODE 0x23
+/* CMD parametr */
+#define AU6601_REG_CMD_ARG 0x24
+/* CMD response 4x4 Bytes */
+#define AU6601_REG_CMD_RSP0 0x30
+#define AU6601_REG_CMD_RSP1 0x34
+#define AU6601_REG_CMD_RSP2 0x38
+#define AU6601_REG_CMD_RSP3 0x3C
+/* default timeout set to 125: 125 * 40ms = 5 sec
+ * how exactly it is calculated?
+ */
+#define AU6601_TIME_OUT_CTRL 0x69
+/* Block size for SDMA or PIO */
+#define AU6601_REG_BLOCK_SIZE 0x6c
+/* Some power related reg, used together with AU6601_OUTPUT_ENABLE */
+#define AU6601_POWER_CONTROL 0x70
+
+/* PLL ctrl */
+#define AU6601_CLK_SELECT 0x72
+#define AU6601_CLK_OVER_CLK 0x80
+#define AU6601_CLK_384_MHZ 0x30
+#define AU6601_CLK_125_MHZ 0x20
+#define AU6601_CLK_48_MHZ 0x10
+#define AU6601_CLK_EXT_PLL 0x04
+#define AU6601_CLK_X2_MODE 0x02
+#define AU6601_CLK_ENABLE 0x01
+#define AU6601_CLK_31_25_MHZ 0x00
+
+#define AU6601_CLK_DIVIDER 0x73
+
+#define AU6601_INTERFACE_MODE_CTRL 0x74
+#define AU6601_DLINK_MODE 0x80
+#define AU6601_INTERRUPT_DELAY_TIME 0x40
+#define AU6601_SIGNAL_REQ_CTRL 0x30
+#define AU6601_MS_CARD_WP BIT(3)
+#define AU6601_SD_CARD_WP BIT(0)
+
+/* same register values are used for:
+ * - AU6601_OUTPUT_ENABLE
+ * - AU6601_POWER_CONTROL
+ */
+#define AU6601_ACTIVE_CTRL 0x75
+#define AU6601_XD_CARD BIT(4)
+/* AU6601_MS_CARD_ACTIVE - will cativate MS card section? */
+#define AU6601_MS_CARD BIT(3)
+#define AU6601_SD_CARD BIT(0)
+
+/* card slot state. It should automatically detect type of
+ * the card
+ */
+#define AU6601_DETECT_STATUS 0x76
+#define AU6601_DETECT_EN BIT(7)
+#define AU6601_MS_DETECTED BIT(3)
+#define AU6601_SD_DETECTED BIT(0)
+#define AU6601_DETECT_STATUS_M 0xf
+
+#define AU6601_REG_SW_RESET 0x79
+#define AU6601_BUF_CTRL_RESET BIT(7)
+#define AU6601_RESET_DATA BIT(3)
+#define AU6601_RESET_CMD BIT(0)
+
+#define AU6601_OUTPUT_ENABLE 0x7a
+
+#define AU6601_PAD_DRIVE0 0x7b
+#define AU6601_PAD_DRIVE1 0x7c
+#define AU6601_PAD_DRIVE2 0x7d
+/* read EEPROM? */
+#define AU6601_FUNCTION 0x7f
+
+#define AU6601_CMD_XFER_CTRL 0x81
+#define AU6601_CMD_17_BYTE_CRC 0xc0
+#define AU6601_CMD_6_BYTE_WO_CRC 0x80
+#define AU6601_CMD_6_BYTE_CRC 0x40
+#define AU6601_CMD_START_XFER 0x20
+#define AU6601_CMD_STOP_WAIT_RDY 0x10
+#define AU6601_CMD_NO_RESP 0x00
+
+#define AU6601_REG_BUS_CTRL 0x82
+#define AU6601_BUS_WIDTH_4BIT 0x20
+#define AU6601_BUS_WIDTH_8BIT 0x10
+#define AU6601_BUS_WIDTH_1BIT 0x00
+
+#define AU6601_DATA_XFER_CTRL 0x83
+#define AU6601_DATA_WRITE BIT(7)
+#define AU6601_DATA_DMA_MODE BIT(6)
+#define AU6601_DATA_START_XFER BIT(0)
+
+#define AU6601_DATA_PIN_STATE 0x84
+#define AU6601_BUS_STAT_CMD BIT(15)
+/* BIT(4) - BIT(7) are permanently 1.
+ * May be reserved or not attached DAT4-DAT7
+ */
+#define AU6601_BUS_STAT_DAT3 BIT(3)
+#define AU6601_BUS_STAT_DAT2 BIT(2)
+#define AU6601_BUS_STAT_DAT1 BIT(1)
+#define AU6601_BUS_STAT_DAT0 BIT(0)
+#define AU6601_BUS_STAT_DAT_MASK 0xf
+
+#define AU6601_OPT 0x85
+#define AU6601_OPT_CMD_LINE_LEVEL 0x80
+#define AU6601_OPT_NCRC_16_CLK BIT(4)
+#define AU6601_OPT_CMD_NWT BIT(3)
+#define AU6601_OPT_STOP_CLK BIT(2)
+#define AU6601_OPT_DDR_MODE BIT(1)
+#define AU6601_OPT_SD_18V BIT(0)
+
+#define AU6601_CLK_DELAY 0x86
+#define AU6601_CLK_DATA_POSITIVE_EDGE 0x80
+#define AU6601_CLK_CMD_POSITIVE_EDGE 0x40
+#define AU6601_CLK_POSITIVE_EDGE_ALL (AU6601_CLK_CMD_POSITIVE_EDGE \
+ | AU6601_CLK_DATA_POSITIVE_EDGE)
+
+
+#define AU6601_REG_INT_STATUS 0x90
+#define AU6601_REG_INT_ENABLE 0x94
+#define AU6601_INT_DATA_END_BIT_ERR BIT(22)
+#define AU6601_INT_DATA_CRC_ERR BIT(21)
+#define AU6601_INT_DATA_TIMEOUT_ERR BIT(20)
+#define AU6601_INT_CMD_INDEX_ERR BIT(19)
+#define AU6601_INT_CMD_END_BIT_ERR BIT(18)
+#define AU6601_INT_CMD_CRC_ERR BIT(17)
+#define AU6601_INT_CMD_TIMEOUT_ERR BIT(16)
+#define AU6601_INT_ERROR BIT(15)
+#define AU6601_INT_OVER_CURRENT_ERR BIT(8)
+#define AU6601_INT_CARD_INSERT BIT(7)
+#define AU6601_INT_CARD_REMOVE BIT(6)
+#define AU6601_INT_READ_BUF_RDY BIT(5)
+#define AU6601_INT_WRITE_BUF_RDY BIT(4)
+#define AU6601_INT_DMA_END BIT(3)
+#define AU6601_INT_DATA_END BIT(1)
+#define AU6601_INT_CMD_END BIT(0)
+
+#define AU6601_INT_NORMAL_MASK 0x00007FFF
+#define AU6601_INT_ERROR_MASK 0xFFFF8000
+
+#define AU6601_INT_CMD_MASK (AU6601_INT_CMD_END | \
+ AU6601_INT_CMD_TIMEOUT_ERR | AU6601_INT_CMD_CRC_ERR | \
+ AU6601_INT_CMD_END_BIT_ERR | AU6601_INT_CMD_INDEX_ERR)
+#define AU6601_INT_DATA_MASK (AU6601_INT_DATA_END | AU6601_INT_DMA_END | \
+ AU6601_INT_READ_BUF_RDY | AU6601_INT_WRITE_BUF_RDY | \
+ AU6601_INT_DATA_TIMEOUT_ERR | AU6601_INT_DATA_CRC_ERR | \
+ AU6601_INT_DATA_END_BIT_ERR)
+#define AU6601_INT_ALL_MASK ((u32)-1)
+
+/* MS_CARD mode registers */
+
+#define AU6601_MS_STATUS 0xa0
+
+#define AU6601_MS_BUS_MODE_CTRL 0xa1
+#define AU6601_MS_BUS_8BIT_MODE 0x03
+#define AU6601_MS_BUS_4BIT_MODE 0x01
+#define AU6601_MS_BUS_1BIT_MODE 0x00
+
+#define AU6601_MS_TPC_CMD 0xa2
+#define AU6601_MS_TPC_READ_PAGE_DATA 0x02
+#define AU6601_MS_TPC_READ_REG 0x04
+#define AU6601_MS_TPC_GET_INT 0x07
+#define AU6601_MS_TPC_WRITE_PAGE_DATA 0x0D
+#define AU6601_MS_TPC_WRITE_REG 0x0B
+#define AU6601_MS_TPC_SET_RW_REG_ADRS 0x08
+#define AU6601_MS_TPC_SET_CMD 0x0E
+#define AU6601_MS_TPC_EX_SET_CMD 0x09
+#define AU6601_MS_TPC_READ_SHORT_DATA 0x03
+#define AU6601_MS_TPC_WRITE_SHORT_DATA 0x0C
+
+#define AU6601_MS_TRANSFER_MODE 0xa3
+#define AU6601_MS_XFER_INT_TIMEOUT_CHK BIT(2)
+#define AU6601_MS_XFER_DMA_ENABLE BIT(1)
+#define AU6601_MS_XFER_START BIT(0)
+
+#define AU6601_MS_DATA_PIN_STATE 0xa4
+
+#define AU6601_MS_INT_STATUS 0xb0
+#define AU6601_MS_INT_ENABLE 0xb4
+#define AU6601_MS_INT_OVER_CURRENT_ERROR BIT(23)
+#define AU6601_MS_INT_DATA_CRC_ERROR BIT(21)
+#define AU6601_MS_INT_INT_TIMEOUT BIT(20)
+#define AU6601_MS_INT_INT_RESP_ERROR BIT(19)
+#define AU6601_MS_INT_CED_ERROR BIT(18)
+#define AU6601_MS_INT_TPC_TIMEOUT BIT(16)
+#define AU6601_MS_INT_ERROR BIT(15)
+#define AU6601_MS_INT_CARD_INSERT BIT(7)
+#define AU6601_MS_INT_CARD_REMOVE BIT(6)
+#define AU6601_MS_INT_BUF_READ_RDY BIT(5)
+#define AU6601_MS_INT_BUF_WRITE_RDY BIT(4)
+#define AU6601_MS_INT_DMA_END BIT(3)
+#define AU6601_MS_INT_TPC_END BIT(1)
+
+#define AU6601_MS_INT_DATA_MASK 0x00000038
+#define AU6601_MS_INT_TPC_MASK 0x003d8002
+#define AU6601_MS_INT_TPC_ERROR 0x003d0000
+
+#define ALCOR_PCIE_LINK_CTRL_OFFSET 0x10
+#define ALCOR_PCIE_LINK_CAP_OFFSET 0x0c
+#define ALCOR_CAP_START_OFFSET 0x34
+
+struct alcor_dev_cfg {
+ u8 dma;
+};
+
+struct alcor_pci_priv {
+ struct pci_dev *pdev;
+ struct pci_dev *parent_pdev;
+ struct device *dev;
+ void __iomem *iobase;
+ unsigned int irq;
+
+ unsigned long id; /* idr id */
+
+ struct alcor_dev_cfg *cfg;
+};
+
+void alcor_write8(struct alcor_pci_priv *priv, u8 val, unsigned int addr);
+void alcor_write16(struct alcor_pci_priv *priv, u16 val, unsigned int addr);
+void alcor_write32(struct alcor_pci_priv *priv, u32 val, unsigned int addr);
+void alcor_write32be(struct alcor_pci_priv *priv, u32 val, unsigned int addr);
+u8 alcor_read8(struct alcor_pci_priv *priv, unsigned int addr);
+u32 alcor_read32(struct alcor_pci_priv *priv, unsigned int addr);
+u32 alcor_read32be(struct alcor_pci_priv *priv, unsigned int addr);
+#endif
diff --git a/include/linux/align.h b/include/linux/align.h
new file mode 100644
index 000000000000..2b4acec7b95a
--- /dev/null
+++ b/include/linux/align.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_ALIGN_H
+#define _LINUX_ALIGN_H
+
+#include <linux/const.h>
+
+/* @a is a power of 2 value */
+#define ALIGN(x, a) __ALIGN_KERNEL((x), (a))
+#define ALIGN_DOWN(x, a) __ALIGN_KERNEL((x) - ((a) - 1), (a))
+#define __ALIGN_MASK(x, mask) __ALIGN_KERNEL_MASK((x), (mask))
+#define PTR_ALIGN(p, a) ((typeof(p))ALIGN((unsigned long)(p), (a)))
+#define PTR_ALIGN_DOWN(p, a) ((typeof(p))ALIGN_DOWN((unsigned long)(p), (a)))
+#define IS_ALIGNED(x, a) (((x) & ((typeof(x))(a) - 1)) == 0)
+
+#endif /* _LINUX_ALIGN_H */
diff --git a/include/linux/altera_jtaguart.h b/include/linux/altera_jtaguart.h
index 953b178a1650..527a142cd530 100644
--- a/include/linux/altera_jtaguart.h
+++ b/include/linux/altera_jtaguart.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* altera_jtaguart.h -- Altera JTAG UART driver defines.
*/
diff --git a/include/linux/altera_uart.h b/include/linux/altera_uart.h
index c022c82db7ca..3eb73b8c49c8 100644
--- a/include/linux/altera_uart.h
+++ b/include/linux/altera_uart.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* altera_uart.h -- Altera UART driver defines.
*/
diff --git a/include/linux/amba/bus.h b/include/linux/amba/bus.h
index d143c13bed26..5001e14c5c06 100644
--- a/include/linux/amba/bus.h
+++ b/include/linux/amba/bus.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* linux/include/amba/bus.h
*
@@ -6,10 +7,6 @@
* region or that is derived from a PrimeCell.
*
* Copyright (C) 2003 Deep Blue Solutions Ltd, All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef ASMARM_AMBA_H
#define ASMARM_AMBA_H
@@ -25,23 +22,76 @@
#define AMBA_CID 0xb105f00d
#define CORESIGHT_CID 0xb105900d
+/*
+ * CoreSight Architecture specification updates the ID specification
+ * for components on the AMBA bus. (ARM IHI 0029E)
+ *
+ * Bits 15:12 of the CID are the device class.
+ *
+ * Class 0xF remains for PrimeCell and legacy components. (AMBA_CID above)
+ * Class 0x9 defines the component as CoreSight (CORESIGHT_CID above)
+ * Class 0x0, 0x1, 0xB, 0xE define components that do not have driver support
+ * at present.
+ * Class 0x2-0x8,0xA and 0xD-0xD are presently reserved.
+ *
+ * Remaining CID bits stay as 0xb105-00d
+ */
+
+/**
+ * Class 0x9 components use additional values to form a Unique Component
+ * Identifier (UCI), where peripheral ID values are identical for different
+ * components. Passed to the amba bus code from the component driver via
+ * the amba_id->data pointer.
+ * @devarch : coresight devarch register value
+ * @devarch_mask: mask bits used for matching. 0 indicates UCI not used.
+ * @devtype : coresight device type value
+ * @data : additional driver data. As we have usurped the original
+ * pointer some devices may still need additional data
+ */
+struct amba_cs_uci_id {
+ unsigned int devarch;
+ unsigned int devarch_mask;
+ unsigned int devtype;
+ void *data;
+};
+
+/* define offsets for registers used by UCI */
+#define UCI_REG_DEVTYPE_OFFSET 0xFCC
+#define UCI_REG_DEVARCH_OFFSET 0xFBC
+
struct clk;
struct amba_device {
struct device dev;
struct resource res;
struct clk *pclk;
+ struct device_dma_parameters dma_parms;
unsigned int periphid;
+ struct mutex periphid_lock;
+ unsigned int cid;
+ struct amba_cs_uci_id uci;
unsigned int irq[AMBA_NR_IRQS];
- char *driver_override;
+ /*
+ * Driver name to force a match. Do not set directly, because core
+ * frees it. Use driver_set_override() to set or clear it.
+ */
+ const char *driver_override;
};
struct amba_driver {
struct device_driver drv;
int (*probe)(struct amba_device *, const struct amba_id *);
- int (*remove)(struct amba_device *);
+ void (*remove)(struct amba_device *);
void (*shutdown)(struct amba_device *);
const struct amba_id *id_table;
+ /*
+ * For most device drivers, no need to care about this flag as long as
+ * all DMAs are handled through the kernel DMA API. For some special
+ * ones, for example VFIO drivers, they know how to manage the DMA
+ * themselves and set this flag so that the IOMMU layer will allow them
+ * to setup and manage their own I/O address space.
+ */
+ bool driver_managed_dma;
};
/*
@@ -53,14 +103,8 @@ enum amba_vendor {
AMBA_VENDOR_ST = 0x80,
AMBA_VENDOR_QCOM = 0x51,
AMBA_VENDOR_LSI = 0xb6,
- AMBA_VENDOR_LINUX = 0xfe, /* This value is not official */
};
-/* This is used to generate pseudo-ID for AMBA device */
-#define AMBA_LINUX_ID(conf, rev, part) \
- (((conf) & 0xff) << 24 | ((rev) & 0xf) << 20 | \
- AMBA_VENDOR_LINUX << 12 | ((part) & 0xfff))
-
extern struct bus_type amba_bustype;
#define to_amba_device(d) container_of(d, struct amba_device, dev)
@@ -68,55 +112,27 @@ extern struct bus_type amba_bustype;
#define amba_get_drvdata(d) dev_get_drvdata(&d->dev)
#define amba_set_drvdata(d,p) dev_set_drvdata(&d->dev, p)
+#ifdef CONFIG_ARM_AMBA
int amba_driver_register(struct amba_driver *);
void amba_driver_unregister(struct amba_driver *);
+#else
+static inline int amba_driver_register(struct amba_driver *drv)
+{
+ return -EINVAL;
+}
+static inline void amba_driver_unregister(struct amba_driver *drv)
+{
+}
+#endif
+
struct amba_device *amba_device_alloc(const char *, resource_size_t, size_t);
void amba_device_put(struct amba_device *);
int amba_device_add(struct amba_device *, struct resource *);
int amba_device_register(struct amba_device *, struct resource *);
-struct amba_device *amba_apb_device_add(struct device *parent, const char *name,
- resource_size_t base, size_t size,
- int irq1, int irq2, void *pdata,
- unsigned int periphid);
-struct amba_device *amba_ahb_device_add(struct device *parent, const char *name,
- resource_size_t base, size_t size,
- int irq1, int irq2, void *pdata,
- unsigned int periphid);
-struct amba_device *
-amba_apb_device_add_res(struct device *parent, const char *name,
- resource_size_t base, size_t size, int irq1,
- int irq2, void *pdata, unsigned int periphid,
- struct resource *resbase);
-struct amba_device *
-amba_ahb_device_add_res(struct device *parent, const char *name,
- resource_size_t base, size_t size, int irq1,
- int irq2, void *pdata, unsigned int periphid,
- struct resource *resbase);
void amba_device_unregister(struct amba_device *);
-struct amba_device *amba_find_device(const char *, struct device *, unsigned int, unsigned int);
int amba_request_regions(struct amba_device *, const char *);
void amba_release_regions(struct amba_device *);
-static inline int amba_pclk_enable(struct amba_device *dev)
-{
- return clk_enable(dev->pclk);
-}
-
-static inline void amba_pclk_disable(struct amba_device *dev)
-{
- clk_disable(dev->pclk);
-}
-
-static inline int amba_pclk_prepare(struct amba_device *dev)
-{
- return clk_prepare(dev->pclk);
-}
-
-static inline void amba_pclk_unprepare(struct amba_device *dev)
-{
- clk_unprepare(dev->pclk);
-}
-
/* Some drivers don't use the struct amba_device */
#define AMBA_CONFIG_BITS(a) (((a) >> 24) & 0xff)
#define AMBA_REV_BITS(a) (((a) >> 20) & 0x0f)
diff --git a/include/linux/amba/clcd-regs.h b/include/linux/amba/clcd-regs.h
index 516a6fda83c5..421b0fa90d6a 100644
--- a/include/linux/amba/clcd-regs.h
+++ b/include/linux/amba/clcd-regs.h
@@ -42,6 +42,7 @@
#define TIM2_PCD_LO_MASK GENMASK(4, 0)
#define TIM2_PCD_LO_BITS 5
#define TIM2_CLKSEL (1 << 5)
+#define TIM2_ACB_MASK GENMASK(10, 6)
#define TIM2_IVS (1 << 11)
#define TIM2_IHS (1 << 12)
#define TIM2_IPC (1 << 13)
diff --git a/include/linux/amba/clcd.h b/include/linux/amba/clcd.h
index d0c3be77c18e..b6e0cbeaf533 100644
--- a/include/linux/amba/clcd.h
+++ b/include/linux/amba/clcd.h
@@ -124,38 +124,11 @@ struct clcd_board {
struct amba_device;
struct clk;
-/**
- * struct clcd_vendor_data - holds hardware (IP-block) vendor-specific
- * variant information
- *
- * @clock_timregs: the CLCD needs to be clocked when accessing the
- * timer registers, or the hardware will hang.
- * @packed_24_bit_pixels: this variant supports 24bit packed pixel data,
- * so that RGB accesses 3 bytes at a time, not just on even 32bit
- * boundaries, packing the pixel data in memory. ST Microelectronics
- * have this.
- * @st_bitmux_control: ST Microelectronics have implemented output
- * bit line multiplexing into the CLCD control register. This indicates
- * that we need to use this.
- * @init_board: custom board init function for this variant
- * @init_panel: custom panel init function for this variant
- */
-struct clcd_vendor_data {
- bool clock_timregs;
- bool packed_24_bit_pixels;
- bool st_bitmux_control;
- int (*init_board)(struct amba_device *adev,
- struct clcd_board *board);
- int (*init_panel)(struct clcd_fb *fb,
- struct device_node *panel);
-};
-
/* this data structure describes each frame buffer device we find */
struct clcd_fb {
struct fb_info fb;
struct amba_device *dev;
struct clk *clk;
- struct clcd_vendor_data *vendor;
struct clcd_panel *panel;
struct clcd_board *board;
void *board_data;
@@ -257,10 +230,6 @@ static inline void clcdfb_decode(struct clcd_fb *fb, struct clcd_regs *regs)
else
val |= CNTL_LCDBPP16_444;
break;
- case 24:
- /* Modified variant supporting 24 bit packed pixels */
- val |= CNTL_ST_LCDBPP24_PACKED;
- break;
case 32:
val |= CNTL_LCDBPP24;
break;
diff --git a/include/linux/amba/kmi.h b/include/linux/amba/kmi.h
index a39e5be751b3..94dd727f1aea 100644
--- a/include/linux/amba/kmi.h
+++ b/include/linux/amba/kmi.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* linux/include/asm-arm/hardware/amba_kmi.h
*
@@ -5,21 +6,6 @@
*
* Copyright (C) 2000 Deep Blue Solutions Ltd.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- *
* ---------------------------------------------------------------------------
* From ARM PrimeCell(tm) PS2 Keyboard/Mouse Interface (PL050) Technical
* Reference Manual - ARM DDI 0143B - see http://www.arm.com/
diff --git a/include/linux/amba/mmci.h b/include/linux/amba/mmci.h
index 8c98113069ce..6f96dc2209c0 100644
--- a/include/linux/amba/mmci.h
+++ b/include/linux/amba/mmci.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* include/linux/amba/mmci.h
*/
@@ -12,25 +13,12 @@
* @ocr_mask: available voltages on the 4 pins from the block, this
* is ignored if a regulator is used, see the MMC_VDD_* masks in
* mmc/host.h
- * @ios_handler: a callback function to act on specfic ios changes,
- * used for example to control a levelshifter
- * mask into a value to be binary (or set some other custom bits
- * in MMCIPWR) or:ed and written into the MMCIPWR register of the
- * block. May also control external power based on the power_mode.
- * @status: if no GPIO read function was given to the block in
- * gpio_wp (below) this function will be called to determine
- * whether a card is present in the MMC slot or not
- * @gpio_wp: read this GPIO pin to see if the card is write protected
- * @gpio_cd: read this GPIO pin to detect card insertion
- * @cd_invert: true if the gpio_cd pin value is active low
+ * @status: if no GPIO line was given to the block in this function will
+ * be called to determine whether a card is present in the MMC slot or not
*/
struct mmci_platform_data {
unsigned int ocr_mask;
- int (*ios_handler)(struct device *, struct mmc_ios *);
unsigned int (*status)(struct device *);
- int gpio_wp;
- int gpio_cd;
- bool cd_invert;
};
#endif
diff --git a/include/linux/amba/pl022.h b/include/linux/amba/pl022.h
index 854b7294f6c6..9bf58aac0df2 100644
--- a/include/linux/amba/pl022.h
+++ b/include/linux/amba/pl022.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* include/linux/amba/pl022.h
*
@@ -10,16 +11,6 @@
* linux-2.6.17-rc3-mm1/drivers/spi/pxa2xx_spi.c
* Initial adoption to PL022 by:
* Sachin Verma <sachin.verma@st.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#ifndef _SSP_PL022_H
@@ -232,10 +223,6 @@ struct dma_chan;
/**
* struct pl022_ssp_master - device.platform_data for SPI controller devices.
* @bus_id: identifier for this bus
- * @num_chipselect: chipselects are used to distinguish individual
- * SPI slaves, and are numbered from zero to num_chipselects - 1.
- * each slave has a chipselect signal, but it's common that not
- * every chipselect is connected to a slave.
* @enable_dma: if true enables DMA driven transfers.
* @dma_rx_param: parameter to locate an RX DMA channel.
* @dma_tx_param: parameter to locate a TX DMA channel.
@@ -244,18 +231,15 @@ struct dma_chan;
* indicates no delay and the device will be suspended immediately.
* @rt: indicates the controller should run the message pump with realtime
* priority to minimise the transfer latency on the bus.
- * @chipselects: list of <num_chipselects> chip select gpios
*/
struct pl022_ssp_controller {
u16 bus_id;
- u8 num_chipselect;
u8 enable_dma:1;
bool (*dma_filter)(struct dma_chan *chan, void *filter_param);
void *dma_rx_param;
void *dma_tx_param;
int autosuspend_delay;
bool rt;
- int *chipselects;
};
/**
@@ -274,8 +258,6 @@ struct pl022_ssp_controller {
* @duplex: Microwire interface: Full/Half duplex
* @clkdelay: on the PL023 variant, the delay in feeback clock cycles
* before sampling the incoming line
- * @cs_control: function pointer to board-specific function to
- * assert/deassert I/O port to control HW generation of devices chip-select.
*/
struct pl022_config_chip {
enum ssp_interface iface;
@@ -289,7 +271,6 @@ struct pl022_config_chip {
enum ssp_microwire_wait_state wait_state;
enum ssp_duplex duplex;
enum ssp_clkdelay clkdelay;
- void (*cs_control) (u32 control);
};
#endif /* _SSP_PL022_H */
diff --git a/include/linux/amba/pl080.h b/include/linux/amba/pl080.h
index ab036b6b1804..e192d546639b 100644
--- a/include/linux/amba/pl080.h
+++ b/include/linux/amba/pl080.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/* include/linux/amba/pl080.h
*
* Copyright 2008 Openmoko, Inc.
@@ -6,10 +7,6 @@
* Ben Dooks <ben@simtec.co.uk>
*
* ARM PrimeCell PL080 DMA controller
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
/* Note, there are some Samsung updates to this controller block which
diff --git a/include/linux/amba/pl08x.h b/include/linux/amba/pl08x.h
index 79d1bcee738d..3100e0debcdd 100644
--- a/include/linux/amba/pl08x.h
+++ b/include/linux/amba/pl08x.h
@@ -1,13 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* linux/amba/pl08x.h - ARM PrimeCell DMA Controller driver
*
* Copyright (C) 2005 ARM Ltd
* Copyright (C) 2010 ST-Ericsson SA
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
* pl08x information required by platform code
*
* Please credit ARM.com
diff --git a/include/linux/amba/pl093.h b/include/linux/amba/pl093.h
deleted file mode 100644
index 2983e3671adb..000000000000
--- a/include/linux/amba/pl093.h
+++ /dev/null
@@ -1,80 +0,0 @@
-/* linux/amba/pl093.h
- *
- * Copyright (c) 2008 Simtec Electronics
- * http://armlinux.simtec.co.uk/
- * Ben Dooks <ben@simtec.co.uk>
- *
- * AMBA PL093 SSMC (synchronous static memory controller)
- * See DDI0236.pdf (r0p4) for more details
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-#define SMB_BANK(x) ((x) * 0x20) /* each bank control set is 0x20 apart */
-
-/* Offsets for SMBxxxxRy registers */
-
-#define SMBIDCYR (0x00)
-#define SMBWSTRDR (0x04)
-#define SMBWSTWRR (0x08)
-#define SMBWSTOENR (0x0C)
-#define SMBWSTWENR (0x10)
-#define SMBCR (0x14)
-#define SMBSR (0x18)
-#define SMBWSTBRDR (0x1C)
-
-/* Masks for SMB registers */
-#define IDCY_MASK (0xf)
-#define WSTRD_MASK (0xf)
-#define WSTWR_MASK (0xf)
-#define WSTOEN_MASK (0xf)
-#define WSTWEN_MASK (0xf)
-
-/* Notes from datasheet:
- * WSTOEN <= WSTRD
- * WSTWEN <= WSTWR
- *
- * WSTOEN is not used with nWAIT
- */
-
-/* SMBCR bit definitions */
-#define SMBCR_BIWRITEEN (1 << 21)
-#define SMBCR_ADDRVALIDWRITEEN (1 << 20)
-#define SMBCR_SYNCWRITE (1 << 17)
-#define SMBCR_BMWRITE (1 << 16)
-#define SMBCR_WRAPREAD (1 << 14)
-#define SMBCR_BIREADEN (1 << 13)
-#define SMBCR_ADDRVALIDREADEN (1 << 12)
-#define SMBCR_SYNCREAD (1 << 9)
-#define SMBCR_BMREAD (1 << 8)
-#define SMBCR_SMBLSPOL (1 << 6)
-#define SMBCR_WP (1 << 3)
-#define SMBCR_WAITEN (1 << 2)
-#define SMBCR_WAITPOL (1 << 1)
-#define SMBCR_RBLE (1 << 0)
-
-#define SMBCR_BURSTLENWRITE_MASK (3 << 18)
-#define SMBCR_BURSTLENWRITE_4 (0 << 18)
-#define SMBCR_BURSTLENWRITE_8 (1 << 18)
-#define SMBCR_BURSTLENWRITE_RESERVED (2 << 18)
-#define SMBCR_BURSTLENWRITE_CONTINUOUS (3 << 18)
-
-#define SMBCR_BURSTLENREAD_MASK (3 << 10)
-#define SMBCR_BURSTLENREAD_4 (0 << 10)
-#define SMBCR_BURSTLENREAD_8 (1 << 10)
-#define SMBCR_BURSTLENREAD_16 (2 << 10)
-#define SMBCR_BURSTLENREAD_CONTINUOUS (3 << 10)
-
-#define SMBCR_MW_MASK (3 << 4)
-#define SMBCR_MW_8BIT (0 << 4)
-#define SMBCR_MW_16BIT (1 << 4)
-#define SMBCR_MW_M32BIT (2 << 4)
-
-/* SSMC status registers */
-#define SSMCCSR (0x200)
-#define SSMCCR (0x204)
-#define SSMCITCR (0x208)
-#define SSMCITIP (0x20C)
-#define SSMCITIOP (0x210)
diff --git a/include/linux/amba/serial.h b/include/linux/amba/serial.h
index ad0965e21a5e..a1307b58cc2c 100644
--- a/include/linux/amba/serial.h
+++ b/include/linux/amba/serial.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* linux/include/asm-arm/hardware/serial_amba.h
*
@@ -5,20 +6,6 @@
*
* Copyright (C) ARM Limited
* Copyright (C) 2000 Deep Blue Solutions Ltd.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#ifndef ASM_ARM_HARDWARE_SERIAL_AMBA_H
#define ASM_ARM_HARDWARE_SERIAL_AMBA_H
diff --git a/include/linux/amd-iommu.h b/include/linux/amd-iommu.h
index 09751d349963..953e6f12fa1c 100644
--- a/include/linux/amd-iommu.h
+++ b/include/linux/amd-iommu.h
@@ -1,20 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2007-2010 Advanced Micro Devices, Inc.
* Author: Joerg Roedel <joerg.roedel@amd.com>
* Leo Duran <leo.duran@amd.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published
- * by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#ifndef _ASM_X86_AMD_IOMMU_H
@@ -22,6 +10,8 @@
#include <linux/types.h>
+struct amd_iommu;
+
/*
* This is mainly used to communicate information back-and-forth
* between SVM and IOMMU for setting up and tearing down posted
@@ -45,24 +35,6 @@ extern int amd_iommu_detect(void);
extern int amd_iommu_init_hardware(void);
/**
- * amd_iommu_enable_device_erratum() - Enable erratum workaround for device
- * in the IOMMUv2 driver
- * @pdev: The PCI device the workaround is necessary for
- * @erratum: The erratum workaround to enable
- *
- * The function needs to be called before amd_iommu_init_device().
- * Possible values for the erratum number are for now:
- * - AMD_PRI_DEV_ERRATUM_ENABLE_RESET - Reset PRI capability when PRI
- * is enabled
- * - AMD_PRI_DEV_ERRATUM_LIMIT_REQ_ONE - Limit number of outstanding PRI
- * requests to one
- */
-#define AMD_PRI_DEV_ERRATUM_ENABLE_RESET 0
-#define AMD_PRI_DEV_ERRATUM_LIMIT_REQ_ONE 1
-
-extern void amd_iommu_enable_device_erratum(struct pci_dev *pdev, u32 erratum);
-
-/**
* amd_iommu_init_device() - Init device for use with IOMMUv2 driver
* @pdev: The PCI device to initialize
* @pasids: Number of PASIDs to support for this device
@@ -88,7 +60,7 @@ extern void amd_iommu_free_device(struct pci_dev *pdev);
*
* The function returns 0 on success or a negative value on error.
*/
-extern int amd_iommu_bind_pasid(struct pci_dev *pdev, int pasid,
+extern int amd_iommu_bind_pasid(struct pci_dev *pdev, u32 pasid,
struct task_struct *task);
/**
@@ -100,7 +72,7 @@ extern int amd_iommu_bind_pasid(struct pci_dev *pdev, int pasid,
* When this function returns the device is no longer using the PASID
* and the PASID is no longer bound to its task.
*/
-extern void amd_iommu_unbind_pasid(struct pci_dev *pdev, int pasid);
+extern void amd_iommu_unbind_pasid(struct pci_dev *pdev, u32 pasid);
/**
* amd_iommu_set_invalid_ppr_cb() - Register a call-back for failed
@@ -126,7 +98,7 @@ extern void amd_iommu_unbind_pasid(struct pci_dev *pdev, int pasid);
#define AMD_IOMMU_INV_PRI_RSP_FAIL 2
typedef int (*amd_iommu_invalid_ppr_cb)(struct pci_dev *pdev,
- int pasid,
+ u32 pasid,
unsigned long address,
u16);
@@ -178,7 +150,7 @@ extern int amd_iommu_device_info(struct pci_dev *pdev,
* @cb: The call-back function
*/
-typedef void (*amd_iommu_invalidate_ctx)(struct pci_dev *pdev, int pasid);
+typedef void (*amd_iommu_invalidate_ctx)(struct pci_dev *pdev, u32 pasid);
extern int amd_iommu_set_invalidate_ctx_cb(struct pci_dev *pdev,
amd_iommu_invalidate_ctx cb);
@@ -196,6 +168,9 @@ extern int amd_iommu_register_ga_log_notifier(int (*notifier)(u32));
extern int
amd_iommu_update_ga(int cpu, bool is_run, void *data);
+extern int amd_iommu_activate_guest_mode(void *data);
+extern int amd_iommu_deactivate_guest_mode(void *data);
+
#else /* defined(CONFIG_AMD_IOMMU) && defined(CONFIG_IRQ_REMAP) */
static inline int
@@ -210,6 +185,29 @@ amd_iommu_update_ga(int cpu, bool is_run, void *data)
return 0;
}
+static inline int amd_iommu_activate_guest_mode(void *data)
+{
+ return 0;
+}
+
+static inline int amd_iommu_deactivate_guest_mode(void *data)
+{
+ return 0;
+}
#endif /* defined(CONFIG_AMD_IOMMU) && defined(CONFIG_IRQ_REMAP) */
+int amd_iommu_get_num_iommus(void);
+bool amd_iommu_pc_supported(void);
+u8 amd_iommu_pc_get_max_banks(unsigned int idx);
+u8 amd_iommu_pc_get_max_counters(unsigned int idx);
+int amd_iommu_pc_set_reg(struct amd_iommu *iommu, u8 bank, u8 cntr, u8 fxn,
+ u64 *value);
+int amd_iommu_pc_get_reg(struct amd_iommu *iommu, u8 bank, u8 cntr, u8 fxn,
+ u64 *value);
+struct amd_iommu *get_amd_iommu(unsigned int idx);
+
+#ifdef CONFIG_AMD_MEM_ENCRYPT
+int amd_iommu_snp_enable(void);
+#endif
+
#endif /* _ASM_X86_AMD_IOMMU_H */
diff --git a/include/linux/amd-pstate.h b/include/linux/amd-pstate.h
new file mode 100644
index 000000000000..c10ebf8c42e6
--- /dev/null
+++ b/include/linux/amd-pstate.h
@@ -0,0 +1,111 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * linux/include/linux/amd-pstate.h
+ *
+ * Copyright (C) 2022 Advanced Micro Devices, Inc.
+ *
+ * Author: Meng Li <li.meng@amd.com>
+ */
+
+#ifndef _LINUX_AMD_PSTATE_H
+#define _LINUX_AMD_PSTATE_H
+
+#include <linux/pm_qos.h>
+
+#define AMD_CPPC_EPP_PERFORMANCE 0x00
+#define AMD_CPPC_EPP_BALANCE_PERFORMANCE 0x80
+#define AMD_CPPC_EPP_BALANCE_POWERSAVE 0xBF
+#define AMD_CPPC_EPP_POWERSAVE 0xFF
+
+/*********************************************************************
+ * AMD P-state INTERFACE *
+ *********************************************************************/
+/**
+ * struct amd_aperf_mperf
+ * @aperf: actual performance frequency clock count
+ * @mperf: maximum performance frequency clock count
+ * @tsc: time stamp counter
+ */
+struct amd_aperf_mperf {
+ u64 aperf;
+ u64 mperf;
+ u64 tsc;
+};
+
+/**
+ * struct amd_cpudata - private CPU data for AMD P-State
+ * @cpu: CPU number
+ * @req: constraint request to apply
+ * @cppc_req_cached: cached performance request hints
+ * @highest_perf: the maximum performance an individual processor may reach,
+ * assuming ideal conditions
+ * @nominal_perf: the maximum sustained performance level of the processor,
+ * assuming ideal operating conditions
+ * @lowest_nonlinear_perf: the lowest performance level at which nonlinear power
+ * savings are achieved
+ * @lowest_perf: the absolute lowest performance level of the processor
+ * @max_freq: the frequency that mapped to highest_perf
+ * @min_freq: the frequency that mapped to lowest_perf
+ * @nominal_freq: the frequency that mapped to nominal_perf
+ * @lowest_nonlinear_freq: the frequency that mapped to lowest_nonlinear_perf
+ * @cur: Difference of Aperf/Mperf/tsc count between last and current sample
+ * @prev: Last Aperf/Mperf/tsc count value read from register
+ * @freq: current cpu frequency value
+ * @boost_supported: check whether the Processor or SBIOS supports boost mode
+ * @epp_policy: Last saved policy used to set energy-performance preference
+ * @epp_cached: Cached CPPC energy-performance preference value
+ * @policy: Cpufreq policy value
+ * @cppc_cap1_cached Cached MSR_AMD_CPPC_CAP1 register value
+ *
+ * The amd_cpudata is key private data for each CPU thread in AMD P-State, and
+ * represents all the attributes and goals that AMD P-State requests at runtime.
+ */
+struct amd_cpudata {
+ int cpu;
+
+ struct freq_qos_request req[2];
+ u64 cppc_req_cached;
+
+ u32 highest_perf;
+ u32 nominal_perf;
+ u32 lowest_nonlinear_perf;
+ u32 lowest_perf;
+
+ u32 max_freq;
+ u32 min_freq;
+ u32 nominal_freq;
+ u32 lowest_nonlinear_freq;
+
+ struct amd_aperf_mperf cur;
+ struct amd_aperf_mperf prev;
+
+ u64 freq;
+ bool boost_supported;
+
+ /* EPP feature related attributes*/
+ s16 epp_policy;
+ s16 epp_cached;
+ u32 policy;
+ u64 cppc_cap1_cached;
+ bool suspended;
+};
+
+/*
+ * enum amd_pstate_mode - driver working mode of amd pstate
+ */
+enum amd_pstate_mode {
+ AMD_PSTATE_DISABLE = 0,
+ AMD_PSTATE_PASSIVE,
+ AMD_PSTATE_ACTIVE,
+ AMD_PSTATE_GUIDED,
+ AMD_PSTATE_MAX,
+};
+
+static const char * const amd_pstate_mode_string[] = {
+ [AMD_PSTATE_DISABLE] = "disable",
+ [AMD_PSTATE_PASSIVE] = "passive",
+ [AMD_PSTATE_ACTIVE] = "active",
+ [AMD_PSTATE_GUIDED] = "guided",
+ NULL,
+};
+#endif /* _LINUX_AMD_PSTATE_H */
diff --git a/include/linux/amifd.h b/include/linux/amifd.h
deleted file mode 100644
index 346993268b45..000000000000
--- a/include/linux/amifd.h
+++ /dev/null
@@ -1,62 +0,0 @@
-#ifndef _AMIFD_H
-#define _AMIFD_H
-
-/* Definitions for the Amiga floppy driver */
-
-#include <linux/fd.h>
-
-#define FD_MAX_UNITS 4 /* Max. Number of drives */
-#define FLOPPY_MAX_SECTORS 22 /* Max. Number of sectors per track */
-
-#ifndef ASSEMBLER
-
-struct fd_data_type {
- char *name; /* description of data type */
- int sects; /* sectors per track */
-#ifdef __STDC__
- int (*read_fkt)(int);
- void (*write_fkt)(int);
-#else
- int (*read_fkt)(); /* read whole track */
- void (*write_fkt)(); /* write whole track */
-#endif
-};
-
-/*
-** Floppy type descriptions
-*/
-
-struct fd_drive_type {
- unsigned long code; /* code returned from drive */
- char *name; /* description of drive */
- unsigned int tracks; /* number of tracks */
- unsigned int heads; /* number of heads */
- unsigned int read_size; /* raw read size for one track */
- unsigned int write_size; /* raw write size for one track */
- unsigned int sect_mult; /* sectors and gap multiplier (HD = 2) */
- unsigned int precomp1; /* start track for precomp 1 */
- unsigned int precomp2; /* start track for precomp 2 */
- unsigned int step_delay; /* time (in ms) for delay after step */
- unsigned int settle_time; /* time to settle after dir change */
- unsigned int side_time; /* time needed to change sides */
-};
-
-struct amiga_floppy_struct {
- struct fd_drive_type *type; /* type of floppy for this unit */
- struct fd_data_type *dtype; /* type of floppy for this unit */
- int track; /* current track (-1 == unknown) */
- unsigned char *trackbuf; /* current track (kmaloc()'d */
-
- int blocks; /* total # blocks on disk */
-
- int changed; /* true when not known */
- int disk; /* disk in drive (-1 == unknown) */
- int motor; /* true when motor is at speed */
- int busy; /* true when drive is active */
- int dirty; /* true when trackbuf is not on disk */
- int status; /* current error code for unit */
- struct gendisk *gendisk;
-};
-#endif
-
-#endif
diff --git a/include/linux/amifdreg.h b/include/linux/amifdreg.h
deleted file mode 100644
index 76188bf48d3b..000000000000
--- a/include/linux/amifdreg.h
+++ /dev/null
@@ -1,81 +0,0 @@
-#ifndef _LINUX_AMIFDREG_H
-#define _LINUX_AMIFDREG_H
-
-/*
-** CIAAPRA bits (read only)
-*/
-
-#define DSKRDY (0x1<<5) /* disk ready when low */
-#define DSKTRACK0 (0x1<<4) /* head at track zero when low */
-#define DSKPROT (0x1<<3) /* disk protected when low */
-#define DSKCHANGE (0x1<<2) /* low when disk removed */
-
-/*
-** CIAAPRB bits (read/write)
-*/
-
-#define DSKMOTOR (0x1<<7) /* motor on when low */
-#define DSKSEL3 (0x1<<6) /* select drive 3 when low */
-#define DSKSEL2 (0x1<<5) /* select drive 2 when low */
-#define DSKSEL1 (0x1<<4) /* select drive 1 when low */
-#define DSKSEL0 (0x1<<3) /* select drive 0 when low */
-#define DSKSIDE (0x1<<2) /* side selection: 0 = upper, 1 = lower */
-#define DSKDIREC (0x1<<1) /* step direction: 0=in, 1=out (to trk 0) */
-#define DSKSTEP (0x1) /* pulse low to step head 1 track */
-
-/*
-** DSKBYTR bits (read only)
-*/
-
-#define DSKBYT (1<<15) /* register contains valid byte when set */
-#define DMAON (1<<14) /* disk DMA enabled */
-#define DISKWRITE (1<<13) /* disk write bit in DSKLEN enabled */
-#define WORDEQUAL (1<<12) /* DSKSYNC register match when true */
-/* bits 7-0 are data */
-
-/*
-** ADKCON/ADKCONR bits
-*/
-
-#ifndef SETCLR
-#define ADK_SETCLR (1<<15) /* control bit */
-#endif
-#define ADK_PRECOMP1 (1<<14) /* precompensation selection */
-#define ADK_PRECOMP0 (1<<13) /* 00=none, 01=140ns, 10=280ns, 11=500ns */
-#define ADK_MFMPREC (1<<12) /* 0=GCR precomp., 1=MFM precomp. */
-#define ADK_WORDSYNC (1<<10) /* enable DSKSYNC auto DMA */
-#define ADK_MSBSYNC (1<<9) /* when 1, enable sync on MSbit (for GCR) */
-#define ADK_FAST (1<<8) /* bit cell: 0=2us (GCR), 1=1us (MFM) */
-
-/*
-** DSKLEN bits
-*/
-
-#define DSKLEN_DMAEN (1<<15)
-#define DSKLEN_WRITE (1<<14)
-
-/*
-** INTENA/INTREQ bits
-*/
-
-#define DSKINDEX (0x1<<4) /* DSKINDEX bit */
-
-/*
-** Misc
-*/
-
-#define MFM_SYNC 0x4489 /* standard MFM sync value */
-
-/* Values for FD_COMMAND */
-#define FD_RECALIBRATE 0x07 /* move to track 0 */
-#define FD_SEEK 0x0F /* seek track */
-#define FD_READ 0xE6 /* read with MT, MFM, SKip deleted */
-#define FD_WRITE 0xC5 /* write with MT, MFM */
-#define FD_SENSEI 0x08 /* Sense Interrupt Status */
-#define FD_SPECIFY 0x03 /* specify HUT etc */
-#define FD_FORMAT 0x4D /* format one track */
-#define FD_VERSION 0x10 /* get version code */
-#define FD_CONFIGURE 0x13 /* configure FIFO operation */
-#define FD_PERPENDICULAR 0x12 /* perpendicular r/w mode */
-
-#endif /* _LINUX_AMIFDREG_H */
diff --git a/include/linux/anon_inodes.h b/include/linux/anon_inodes.h
index 8013a45242fe..5deaddbd7927 100644
--- a/include/linux/anon_inodes.h
+++ b/include/linux/anon_inodes.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* include/linux/anon_inodes.h
*
@@ -9,12 +10,21 @@
#define _LINUX_ANON_INODES_H
struct file_operations;
+struct inode;
struct file *anon_inode_getfile(const char *name,
const struct file_operations *fops,
void *priv, int flags);
+struct file *anon_inode_getfile_secure(const char *name,
+ const struct file_operations *fops,
+ void *priv, int flags,
+ const struct inode *context_inode);
int anon_inode_getfd(const char *name, const struct file_operations *fops,
void *priv, int flags);
+int anon_inode_getfd_secure(const char *name,
+ const struct file_operations *fops,
+ void *priv, int flags,
+ const struct inode *context_inode);
#endif /* _LINUX_ANON_INODES_H */
diff --git a/include/linux/aperture.h b/include/linux/aperture.h
new file mode 100644
index 000000000000..442f15a57cad
--- /dev/null
+++ b/include/linux/aperture.h
@@ -0,0 +1,56 @@
+/* SPDX-License-Identifier: MIT */
+
+#ifndef _LINUX_APERTURE_H_
+#define _LINUX_APERTURE_H_
+
+#include <linux/types.h>
+
+struct pci_dev;
+struct platform_device;
+
+#if defined(CONFIG_APERTURE_HELPERS)
+int devm_aperture_acquire_for_platform_device(struct platform_device *pdev,
+ resource_size_t base,
+ resource_size_t size);
+
+int aperture_remove_conflicting_devices(resource_size_t base, resource_size_t size,
+ bool primary, const char *name);
+
+int aperture_remove_conflicting_pci_devices(struct pci_dev *pdev, const char *name);
+#else
+static inline int devm_aperture_acquire_for_platform_device(struct platform_device *pdev,
+ resource_size_t base,
+ resource_size_t size)
+{
+ return 0;
+}
+
+static inline int aperture_remove_conflicting_devices(resource_size_t base, resource_size_t size,
+ bool primary, const char *name)
+{
+ return 0;
+}
+
+static inline int aperture_remove_conflicting_pci_devices(struct pci_dev *pdev, const char *name)
+{
+ return 0;
+}
+#endif
+
+/**
+ * aperture_remove_all_conflicting_devices - remove all existing framebuffers
+ * @primary: also kick vga16fb if present; only relevant for VGA devices
+ * @name: a descriptive name of the requesting driver
+ *
+ * This function removes all graphics device drivers. Use this function on systems
+ * that can have their framebuffer located anywhere in memory.
+ *
+ * Returns:
+ * 0 on success, or a negative errno code otherwise
+ */
+static inline int aperture_remove_all_conflicting_devices(bool primary, const char *name)
+{
+ return aperture_remove_conflicting_devices(0, (resource_size_t)-1, primary, name);
+}
+
+#endif
diff --git a/include/linux/apm-emulation.h b/include/linux/apm-emulation.h
index e6d800358dd6..94c036957948 100644
--- a/include/linux/apm-emulation.h
+++ b/include/linux/apm-emulation.h
@@ -1,9 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/* -*- linux-c -*-
*
* (C) 2003 zecke@handhelds.org
*
- * GPL version 2
- *
* based on arch/arm/kernel/apm.c
* factor out the information needed by architectures to provide
* apm status
diff --git a/include/linux/apm_bios.h b/include/linux/apm_bios.h
index 9c3a87184f48..7554192c3ae3 100644
--- a/include/linux/apm_bios.h
+++ b/include/linux/apm_bios.h
@@ -1,16 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Include file for the interface to an APM BIOS
* Copyright 1994-2001 Stephen Rothwell (sfr@canb.auug.org.au)
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2, or (at your option) any
- * later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
*/
#ifndef _LINUX_APM_H
#define _LINUX_APM_H
diff --git a/include/linux/apple-gmux.h b/include/linux/apple-gmux.h
index 714186de8c36..206d97ffda79 100644
--- a/include/linux/apple-gmux.h
+++ b/include/linux/apple-gmux.h
@@ -1,36 +1,161 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* apple-gmux.h - microcontroller built into dual GPU MacBook Pro & Mac Pro
* Copyright (C) 2015 Lukas Wunner <lukas@wunner.de>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License (version 2) as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#ifndef LINUX_APPLE_GMUX_H
#define LINUX_APPLE_GMUX_H
#include <linux/acpi.h>
+#include <linux/io.h>
+#include <linux/pnp.h>
#define GMUX_ACPI_HID "APP000B"
+/*
+ * gmux port offsets. Many of these are not yet used, but may be in the
+ * future, and it's useful to have them documented here anyhow.
+ */
+#define GMUX_PORT_VERSION_MAJOR 0x04
+#define GMUX_PORT_VERSION_MINOR 0x05
+#define GMUX_PORT_VERSION_RELEASE 0x06
+#define GMUX_PORT_SWITCH_DISPLAY 0x10
+#define GMUX_PORT_SWITCH_GET_DISPLAY 0x11
+#define GMUX_PORT_INTERRUPT_ENABLE 0x14
+#define GMUX_PORT_INTERRUPT_STATUS 0x16
+#define GMUX_PORT_SWITCH_DDC 0x28
+#define GMUX_PORT_SWITCH_EXTERNAL 0x40
+#define GMUX_PORT_SWITCH_GET_EXTERNAL 0x41
+#define GMUX_PORT_DISCRETE_POWER 0x50
+#define GMUX_PORT_MAX_BRIGHTNESS 0x70
+#define GMUX_PORT_BRIGHTNESS 0x74
+#define GMUX_PORT_VALUE 0xc2
+#define GMUX_PORT_READ 0xd0
+#define GMUX_PORT_WRITE 0xd4
+
+#define GMUX_MMIO_PORT_SELECT 0x0e
+#define GMUX_MMIO_COMMAND_SEND 0x0f
+
+#define GMUX_MMIO_READ 0x00
+#define GMUX_MMIO_WRITE 0x40
+
+#define GMUX_MIN_IO_LEN (GMUX_PORT_BRIGHTNESS + 4)
+
+enum apple_gmux_type {
+ APPLE_GMUX_TYPE_PIO,
+ APPLE_GMUX_TYPE_INDEXED,
+ APPLE_GMUX_TYPE_MMIO,
+};
+
#if IS_ENABLED(CONFIG_APPLE_GMUX)
+static inline bool apple_gmux_is_indexed(unsigned long iostart)
+{
+ u16 val;
+
+ outb(0xaa, iostart + 0xcc);
+ outb(0x55, iostart + 0xcd);
+ outb(0x00, iostart + 0xce);
+
+ val = inb(iostart + 0xcc) | (inb(iostart + 0xcd) << 8);
+ if (val == 0x55aa)
+ return true;
+
+ return false;
+}
+
+static inline bool apple_gmux_is_mmio(unsigned long iostart)
+{
+ u8 __iomem *iomem_base = ioremap(iostart, 16);
+ u8 val;
+
+ if (!iomem_base)
+ return false;
+
+ /*
+ * If this is 0xff, then gmux must not be present, as the gmux would
+ * reset it to 0x00, or it would be one of 0x1, 0x4, 0x41, 0x44 if a
+ * command is currently being processed.
+ */
+ val = ioread8(iomem_base + GMUX_MMIO_COMMAND_SEND);
+ iounmap(iomem_base);
+ return (val != 0xff);
+}
/**
- * apple_gmux_present() - detect if gmux is built into the machine
+ * apple_gmux_detect() - detect if gmux is built into the machine
+ *
+ * @pnp_dev: Device to probe or NULL to use the first matching device
+ * @type_ret: Returns (by reference) the apple_gmux_type of the device
+ *
+ * Detect if a supported gmux device is present by actually probing it.
+ * This avoids the false positives returned on some models by
+ * apple_gmux_present().
+ *
+ * Return: %true if a supported gmux ACPI device is detected and the kernel
+ * was configured with CONFIG_APPLE_GMUX, %false otherwise.
+ */
+static inline bool apple_gmux_detect(struct pnp_dev *pnp_dev, enum apple_gmux_type *type_ret)
+{
+ u8 ver_major, ver_minor, ver_release;
+ struct device *dev = NULL;
+ struct acpi_device *adev;
+ struct resource *res;
+ enum apple_gmux_type type = APPLE_GMUX_TYPE_PIO;
+ bool ret = false;
+
+ if (!pnp_dev) {
+ adev = acpi_dev_get_first_match_dev(GMUX_ACPI_HID, NULL, -1);
+ if (!adev)
+ return false;
+
+ dev = get_device(acpi_get_first_physical_node(adev));
+ acpi_dev_put(adev);
+ if (!dev)
+ return false;
+
+ pnp_dev = to_pnp_dev(dev);
+ }
+
+ res = pnp_get_resource(pnp_dev, IORESOURCE_IO, 0);
+ if (res && resource_size(res) >= GMUX_MIN_IO_LEN) {
+ /*
+ * Invalid version information may indicate either that the gmux
+ * device isn't present or that it's a new one that uses indexed io.
+ */
+ ver_major = inb(res->start + GMUX_PORT_VERSION_MAJOR);
+ ver_minor = inb(res->start + GMUX_PORT_VERSION_MINOR);
+ ver_release = inb(res->start + GMUX_PORT_VERSION_RELEASE);
+ if (ver_major == 0xff && ver_minor == 0xff && ver_release == 0xff) {
+ if (apple_gmux_is_indexed(res->start))
+ type = APPLE_GMUX_TYPE_INDEXED;
+ else
+ goto out;
+ }
+ } else {
+ res = pnp_get_resource(pnp_dev, IORESOURCE_MEM, 0);
+ if (res && apple_gmux_is_mmio(res->start))
+ type = APPLE_GMUX_TYPE_MMIO;
+ else
+ goto out;
+ }
+
+ if (type_ret)
+ *type_ret = type;
+
+ ret = true;
+out:
+ put_device(dev);
+ return ret;
+}
+
+/**
+ * apple_gmux_present() - check if gmux ACPI device is present
*
* Drivers may use this to activate quirks specific to dual GPU MacBook Pros
* and Mac Pros, e.g. for deferred probing, runtime pm and backlight.
*
- * Return: %true if gmux is present and the kernel was configured
+ * Return: %true if gmux ACPI device is present and the kernel was configured
* with CONFIG_APPLE_GMUX, %false otherwise.
*/
static inline bool apple_gmux_present(void)
@@ -45,6 +170,11 @@ static inline bool apple_gmux_present(void)
return false;
}
+static inline bool apple_gmux_detect(struct pnp_dev *pnp_dev, bool *indexed_ret)
+{
+ return false;
+}
+
#endif /* !CONFIG_APPLE_GMUX */
#endif /* LINUX_APPLE_GMUX_H */
diff --git a/include/linux/apple-mailbox.h b/include/linux/apple-mailbox.h
new file mode 100644
index 000000000000..720fbb70294a
--- /dev/null
+++ b/include/linux/apple-mailbox.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR MIT */
+/*
+ * Apple mailbox message format
+ *
+ * Copyright (C) 2021 The Asahi Linux Contributors
+ */
+
+#ifndef _LINUX_APPLE_MAILBOX_H_
+#define _LINUX_APPLE_MAILBOX_H_
+
+#include <linux/types.h>
+
+/* encodes a single 96bit message sent over the single channel */
+struct apple_mbox_msg {
+ u64 msg0;
+ u32 msg1;
+};
+
+#endif
diff --git a/include/linux/apple_bl.h b/include/linux/apple_bl.h
deleted file mode 100644
index 0a95e730fcea..000000000000
--- a/include/linux/apple_bl.h
+++ /dev/null
@@ -1,26 +0,0 @@
-/*
- * apple_bl exported symbols
- */
-
-#ifndef _LINUX_APPLE_BL_H
-#define _LINUX_APPLE_BL_H
-
-#if defined(CONFIG_BACKLIGHT_APPLE) || defined(CONFIG_BACKLIGHT_APPLE_MODULE)
-
-extern int apple_bl_register(void);
-extern void apple_bl_unregister(void);
-
-#else /* !CONFIG_BACKLIGHT_APPLE */
-
-static inline int apple_bl_register(void)
-{
- return 0;
-}
-
-static inline void apple_bl_unregister(void)
-{
-}
-
-#endif /* !CONFIG_BACKLIGHT_APPLE */
-
-#endif /* _LINUX_APPLE_BL_H */
diff --git a/include/linux/arch_topology.h b/include/linux/arch_topology.h
index 9af3c174c03a..a07b510e7dc5 100644
--- a/include/linux/arch_topology.h
+++ b/include/linux/arch_topology.h
@@ -1,17 +1,97 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* include/linux/arch_topology.h - arch specific cpu topology information
*/
#ifndef _LINUX_ARCH_TOPOLOGY_H_
#define _LINUX_ARCH_TOPOLOGY_H_
+#include <linux/types.h>
+#include <linux/percpu.h>
+
void topology_normalize_cpu_scale(void);
+int topology_update_cpu_topology(void);
+
+#ifdef CONFIG_ACPI_CPPC_LIB
+void topology_init_cpu_capacity_cppc(void);
+#endif
struct device_node;
-int topology_parse_cpu_capacity(struct device_node *cpu_node, int cpu);
+bool topology_parse_cpu_capacity(struct device_node *cpu_node, int cpu);
-struct sched_domain;
-unsigned long topology_get_cpu_scale(struct sched_domain *sd, int cpu);
+DECLARE_PER_CPU(unsigned long, cpu_scale);
+
+static inline unsigned long topology_get_cpu_scale(int cpu)
+{
+ return per_cpu(cpu_scale, cpu);
+}
void topology_set_cpu_scale(unsigned int cpu, unsigned long capacity);
+DECLARE_PER_CPU(unsigned long, arch_freq_scale);
+
+static inline unsigned long topology_get_freq_scale(int cpu)
+{
+ return per_cpu(arch_freq_scale, cpu);
+}
+
+void topology_set_freq_scale(const struct cpumask *cpus, unsigned long cur_freq,
+ unsigned long max_freq);
+bool topology_scale_freq_invariant(void);
+
+enum scale_freq_source {
+ SCALE_FREQ_SOURCE_CPUFREQ = 0,
+ SCALE_FREQ_SOURCE_ARCH,
+ SCALE_FREQ_SOURCE_CPPC,
+};
+
+struct scale_freq_data {
+ enum scale_freq_source source;
+ void (*set_freq_scale)(void);
+};
+
+void topology_scale_freq_tick(void);
+void topology_set_scale_freq_source(struct scale_freq_data *data, const struct cpumask *cpus);
+void topology_clear_scale_freq_source(enum scale_freq_source source, const struct cpumask *cpus);
+
+DECLARE_PER_CPU(unsigned long, thermal_pressure);
+
+static inline unsigned long topology_get_thermal_pressure(int cpu)
+{
+ return per_cpu(thermal_pressure, cpu);
+}
+
+void topology_update_thermal_pressure(const struct cpumask *cpus,
+ unsigned long capped_freq);
+
+struct cpu_topology {
+ int thread_id;
+ int core_id;
+ int cluster_id;
+ int package_id;
+ cpumask_t thread_sibling;
+ cpumask_t core_sibling;
+ cpumask_t cluster_sibling;
+ cpumask_t llc_sibling;
+};
+
+#ifdef CONFIG_GENERIC_ARCH_TOPOLOGY
+extern struct cpu_topology cpu_topology[NR_CPUS];
+
+#define topology_physical_package_id(cpu) (cpu_topology[cpu].package_id)
+#define topology_cluster_id(cpu) (cpu_topology[cpu].cluster_id)
+#define topology_core_id(cpu) (cpu_topology[cpu].core_id)
+#define topology_core_cpumask(cpu) (&cpu_topology[cpu].core_sibling)
+#define topology_sibling_cpumask(cpu) (&cpu_topology[cpu].thread_sibling)
+#define topology_cluster_cpumask(cpu) (&cpu_topology[cpu].cluster_sibling)
+#define topology_llc_cpumask(cpu) (&cpu_topology[cpu].llc_sibling)
+void init_cpu_topology(void);
+void store_cpu_topology(unsigned int cpuid);
+const struct cpumask *cpu_coregroup_mask(int cpu);
+const struct cpumask *cpu_clustergroup_mask(int cpu);
+void update_siblings_masks(unsigned int cpu);
+void remove_cpu_topology(unsigned int cpuid);
+void reset_cpu_topology(void);
+int parse_acpi_topology(void);
+#endif
+
#endif /* _LINUX_ARCH_TOPOLOGY_H_ */
diff --git a/include/linux/arm-cci.h b/include/linux/arm-cci.h
index 521ec1f2e6bc..d0e44201d855 100644
--- a/include/linux/arm-cci.h
+++ b/include/linux/arm-cci.h
@@ -1,21 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* CCI cache coherent interconnect support
*
* Copyright (C) 2013 ARM Ltd.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#ifndef __LINUX_ARM_CCI_H
diff --git a/include/linux/arm-smccc.h b/include/linux/arm-smccc.h
index 4c5bca38c653..f196c19f8e55 100644
--- a/include/linux/arm-smccc.h
+++ b/include/linux/arm-smccc.h
@@ -1,27 +1,23 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2015, Linaro Limited
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
*/
#ifndef __LINUX_ARM_SMCCC_H
#define __LINUX_ARM_SMCCC_H
+#include <linux/init.h>
+#include <uapi/linux/const.h>
+
/*
* This file provides common defines for ARM SMC Calling Convention as
* specified in
- * http://infocenter.arm.com/help/topic/com.arm.doc.den0028a/index.html
+ * https://developer.arm.com/docs/den0028/latest
+ *
+ * This code is up-to-date with version DEN 0028 C
*/
-#define ARM_SMCCC_STD_CALL 0
-#define ARM_SMCCC_FAST_CALL 1
+#define ARM_SMCCC_STD_CALL _AC(0,U)
+#define ARM_SMCCC_FAST_CALL _AC(1,U)
#define ARM_SMCCC_TYPE_SHIFT 31
#define ARM_SMCCC_SMC_32 0
@@ -52,18 +48,202 @@
#define ARM_SMCCC_OWNER_SIP 2
#define ARM_SMCCC_OWNER_OEM 3
#define ARM_SMCCC_OWNER_STANDARD 4
+#define ARM_SMCCC_OWNER_STANDARD_HYP 5
+#define ARM_SMCCC_OWNER_VENDOR_HYP 6
#define ARM_SMCCC_OWNER_TRUSTED_APP 48
#define ARM_SMCCC_OWNER_TRUSTED_APP_END 49
#define ARM_SMCCC_OWNER_TRUSTED_OS 50
#define ARM_SMCCC_OWNER_TRUSTED_OS_END 63
+#define ARM_SMCCC_FUNC_QUERY_CALL_UID 0xff01
+
#define ARM_SMCCC_QUIRK_NONE 0
#define ARM_SMCCC_QUIRK_QCOM_A6 1 /* Save/restore register a6 */
+#define ARM_SMCCC_VERSION_1_0 0x10000
+#define ARM_SMCCC_VERSION_1_1 0x10001
+#define ARM_SMCCC_VERSION_1_2 0x10002
+#define ARM_SMCCC_VERSION_1_3 0x10003
+
+#define ARM_SMCCC_1_3_SVE_HINT 0x10000
+
+#define ARM_SMCCC_VERSION_FUNC_ID \
+ ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \
+ ARM_SMCCC_SMC_32, \
+ 0, 0)
+
+#define ARM_SMCCC_ARCH_FEATURES_FUNC_ID \
+ ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \
+ ARM_SMCCC_SMC_32, \
+ 0, 1)
+
+#define ARM_SMCCC_ARCH_SOC_ID \
+ ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \
+ ARM_SMCCC_SMC_32, \
+ 0, 2)
+
+#define ARM_SMCCC_ARCH_WORKAROUND_1 \
+ ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \
+ ARM_SMCCC_SMC_32, \
+ 0, 0x8000)
+
+#define ARM_SMCCC_ARCH_WORKAROUND_2 \
+ ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \
+ ARM_SMCCC_SMC_32, \
+ 0, 0x7fff)
+
+#define ARM_SMCCC_ARCH_WORKAROUND_3 \
+ ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \
+ ARM_SMCCC_SMC_32, \
+ 0, 0x3fff)
+
+#define ARM_SMCCC_VENDOR_HYP_CALL_UID_FUNC_ID \
+ ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \
+ ARM_SMCCC_SMC_32, \
+ ARM_SMCCC_OWNER_VENDOR_HYP, \
+ ARM_SMCCC_FUNC_QUERY_CALL_UID)
+
+/* KVM UID value: 28b46fb6-2ec5-11e9-a9ca-4b564d003a74 */
+#define ARM_SMCCC_VENDOR_HYP_UID_KVM_REG_0 0xb66fb428U
+#define ARM_SMCCC_VENDOR_HYP_UID_KVM_REG_1 0xe911c52eU
+#define ARM_SMCCC_VENDOR_HYP_UID_KVM_REG_2 0x564bcaa9U
+#define ARM_SMCCC_VENDOR_HYP_UID_KVM_REG_3 0x743a004dU
+
+/* KVM "vendor specific" services */
+#define ARM_SMCCC_KVM_FUNC_FEATURES 0
+#define ARM_SMCCC_KVM_FUNC_PTP 1
+#define ARM_SMCCC_KVM_FUNC_FEATURES_2 127
+#define ARM_SMCCC_KVM_NUM_FUNCS 128
+
+#define ARM_SMCCC_VENDOR_HYP_KVM_FEATURES_FUNC_ID \
+ ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \
+ ARM_SMCCC_SMC_32, \
+ ARM_SMCCC_OWNER_VENDOR_HYP, \
+ ARM_SMCCC_KVM_FUNC_FEATURES)
+
+#define SMCCC_ARCH_WORKAROUND_RET_UNAFFECTED 1
+
+/*
+ * ptp_kvm is a feature used for time sync between vm and host.
+ * ptp_kvm module in guest kernel will get service from host using
+ * this hypercall ID.
+ */
+#define ARM_SMCCC_VENDOR_HYP_KVM_PTP_FUNC_ID \
+ ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \
+ ARM_SMCCC_SMC_32, \
+ ARM_SMCCC_OWNER_VENDOR_HYP, \
+ ARM_SMCCC_KVM_FUNC_PTP)
+
+/* ptp_kvm counter type ID */
+#define KVM_PTP_VIRT_COUNTER 0
+#define KVM_PTP_PHYS_COUNTER 1
+
+/* Paravirtualised time calls (defined by ARM DEN0057A) */
+#define ARM_SMCCC_HV_PV_TIME_FEATURES \
+ ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \
+ ARM_SMCCC_SMC_64, \
+ ARM_SMCCC_OWNER_STANDARD_HYP, \
+ 0x20)
+
+#define ARM_SMCCC_HV_PV_TIME_ST \
+ ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \
+ ARM_SMCCC_SMC_64, \
+ ARM_SMCCC_OWNER_STANDARD_HYP, \
+ 0x21)
+
+/* TRNG entropy source calls (defined by ARM DEN0098) */
+#define ARM_SMCCC_TRNG_VERSION \
+ ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \
+ ARM_SMCCC_SMC_32, \
+ ARM_SMCCC_OWNER_STANDARD, \
+ 0x50)
+
+#define ARM_SMCCC_TRNG_FEATURES \
+ ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \
+ ARM_SMCCC_SMC_32, \
+ ARM_SMCCC_OWNER_STANDARD, \
+ 0x51)
+
+#define ARM_SMCCC_TRNG_GET_UUID \
+ ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \
+ ARM_SMCCC_SMC_32, \
+ ARM_SMCCC_OWNER_STANDARD, \
+ 0x52)
+
+#define ARM_SMCCC_TRNG_RND32 \
+ ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \
+ ARM_SMCCC_SMC_32, \
+ ARM_SMCCC_OWNER_STANDARD, \
+ 0x53)
+
+#define ARM_SMCCC_TRNG_RND64 \
+ ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \
+ ARM_SMCCC_SMC_64, \
+ ARM_SMCCC_OWNER_STANDARD, \
+ 0x53)
+
+/*
+ * Return codes defined in ARM DEN 0070A
+ * ARM DEN 0070A is now merged/consolidated into ARM DEN 0028 C
+ */
+#define SMCCC_RET_SUCCESS 0
+#define SMCCC_RET_NOT_SUPPORTED -1
+#define SMCCC_RET_NOT_REQUIRED -2
+#define SMCCC_RET_INVALID_PARAMETER -3
+
#ifndef __ASSEMBLY__
#include <linux/linkage.h>
#include <linux/types.h>
+
+enum arm_smccc_conduit {
+ SMCCC_CONDUIT_NONE,
+ SMCCC_CONDUIT_SMC,
+ SMCCC_CONDUIT_HVC,
+};
+
+/**
+ * arm_smccc_1_1_get_conduit()
+ *
+ * Returns the conduit to be used for SMCCCv1.1 or later.
+ *
+ * When SMCCCv1.1 is not present, returns SMCCC_CONDUIT_NONE.
+ */
+enum arm_smccc_conduit arm_smccc_1_1_get_conduit(void);
+
+/**
+ * arm_smccc_get_version()
+ *
+ * Returns the version to be used for SMCCCv1.1 or later.
+ *
+ * When SMCCCv1.1 or above is not present, returns SMCCCv1.0, but this
+ * does not imply the presence of firmware or a valid conduit. Caller
+ * handling SMCCCv1.0 must determine the conduit by other means.
+ */
+u32 arm_smccc_get_version(void);
+
+void __init arm_smccc_version_init(u32 version, enum arm_smccc_conduit conduit);
+
+extern u64 smccc_has_sve_hint;
+
+/**
+ * arm_smccc_get_soc_id_version()
+ *
+ * Returns the SOC ID version.
+ *
+ * When ARM_SMCCC_ARCH_SOC_ID is not present, returns SMCCC_RET_NOT_SUPPORTED.
+ */
+s32 arm_smccc_get_soc_id_version(void);
+
+/**
+ * arm_smccc_get_soc_id_revision()
+ *
+ * Returns the SOC ID revision.
+ *
+ * When ARM_SMCCC_ARCH_SOC_ID is not present, returns SMCCC_RET_NOT_SUPPORTED.
+ */
+s32 arm_smccc_get_soc_id_revision(void);
+
/**
* struct arm_smccc_res - Result from SMC/HVC call
* @a0-a3 result values from registers 0 to 3
@@ -75,6 +255,61 @@ struct arm_smccc_res {
unsigned long a3;
};
+#ifdef CONFIG_ARM64
+/**
+ * struct arm_smccc_1_2_regs - Arguments for or Results from SMC/HVC call
+ * @a0-a17 argument values from registers 0 to 17
+ */
+struct arm_smccc_1_2_regs {
+ unsigned long a0;
+ unsigned long a1;
+ unsigned long a2;
+ unsigned long a3;
+ unsigned long a4;
+ unsigned long a5;
+ unsigned long a6;
+ unsigned long a7;
+ unsigned long a8;
+ unsigned long a9;
+ unsigned long a10;
+ unsigned long a11;
+ unsigned long a12;
+ unsigned long a13;
+ unsigned long a14;
+ unsigned long a15;
+ unsigned long a16;
+ unsigned long a17;
+};
+
+/**
+ * arm_smccc_1_2_hvc() - make HVC calls
+ * @args: arguments passed via struct arm_smccc_1_2_regs
+ * @res: result values via struct arm_smccc_1_2_regs
+ *
+ * This function is used to make HVC calls following SMC Calling Convention
+ * v1.2 or above. The content of the supplied param are copied from the
+ * structure to registers prior to the HVC instruction. The return values
+ * are updated with the content from registers on return from the HVC
+ * instruction.
+ */
+asmlinkage void arm_smccc_1_2_hvc(const struct arm_smccc_1_2_regs *args,
+ struct arm_smccc_1_2_regs *res);
+
+/**
+ * arm_smccc_1_2_smc() - make SMC calls
+ * @args: arguments passed via struct arm_smccc_1_2_regs
+ * @res: result values via struct arm_smccc_1_2_regs
+ *
+ * This function is used to make SMC calls following SMC Calling Convention
+ * v1.2 or above. The content of the supplied param are copied from the
+ * structure to registers prior to the SMC instruction. The return values
+ * are updated with the content from registers on return from the SMC
+ * instruction.
+ */
+asmlinkage void arm_smccc_1_2_smc(const struct arm_smccc_1_2_regs *args,
+ struct arm_smccc_1_2_regs *res);
+#endif
+
/**
* struct arm_smccc_quirk - Contains quirk information
* @id: quirk identification
@@ -89,6 +324,15 @@ struct arm_smccc_quirk {
};
/**
+ * __arm_smccc_sve_check() - Set the SVE hint bit when doing SMC calls
+ *
+ * Sets the SMCCC hint bit to indicate if there is live state in the SVE
+ * registers, this modifies x0 in place and should never be called from C
+ * code.
+ */
+asmlinkage unsigned long __arm_smccc_sve_check(unsigned long x0);
+
+/**
* __arm_smccc_smc() - make SMC calls
* @a0-a7: arguments passed in registers 0 to 7
* @res: result values from registers 0 to 3
@@ -100,10 +344,20 @@ struct arm_smccc_quirk {
* from register 0 to 3 on return from the SMC instruction. An optional
* quirk structure provides vendor specific behavior.
*/
+#ifdef CONFIG_HAVE_ARM_SMCCC
asmlinkage void __arm_smccc_smc(unsigned long a0, unsigned long a1,
unsigned long a2, unsigned long a3, unsigned long a4,
unsigned long a5, unsigned long a6, unsigned long a7,
struct arm_smccc_res *res, struct arm_smccc_quirk *quirk);
+#else
+static inline void __arm_smccc_smc(unsigned long a0, unsigned long a1,
+ unsigned long a2, unsigned long a3, unsigned long a4,
+ unsigned long a5, unsigned long a6, unsigned long a7,
+ struct arm_smccc_res *res, struct arm_smccc_quirk *quirk)
+{
+ *res = (struct arm_smccc_res){};
+}
+#endif
/**
* __arm_smccc_hvc() - make HVC calls
@@ -130,5 +384,201 @@ asmlinkage void __arm_smccc_hvc(unsigned long a0, unsigned long a1,
#define arm_smccc_hvc_quirk(...) __arm_smccc_hvc(__VA_ARGS__)
+/* SMCCC v1.1 implementation madness follows */
+#ifdef CONFIG_ARM64
+
+#define SMCCC_SMC_INST "smc #0"
+#define SMCCC_HVC_INST "hvc #0"
+
+#elif defined(CONFIG_ARM)
+#include <asm/opcodes-sec.h>
+#include <asm/opcodes-virt.h>
+
+#define SMCCC_SMC_INST __SMC(0)
+#define SMCCC_HVC_INST __HVC(0)
+
+#endif
+
+/* nVHE hypervisor doesn't have a current thread so needs separate checks */
+#if defined(CONFIG_ARM64_SVE) && !defined(__KVM_NVHE_HYPERVISOR__)
+
+#define SMCCC_SVE_CHECK ALTERNATIVE("nop \n", "bl __arm_smccc_sve_check \n", \
+ ARM64_SVE)
+#define smccc_sve_clobbers "x16", "x30", "cc",
+
+#else
+
+#define SMCCC_SVE_CHECK
+#define smccc_sve_clobbers
+
+#endif
+
+#define ___count_args(_0, _1, _2, _3, _4, _5, _6, _7, _8, x, ...) x
+
+#define __count_args(...) \
+ ___count_args(__VA_ARGS__, 7, 6, 5, 4, 3, 2, 1, 0)
+
+#define __constraint_read_0 "r" (arg0)
+#define __constraint_read_1 __constraint_read_0, "r" (arg1)
+#define __constraint_read_2 __constraint_read_1, "r" (arg2)
+#define __constraint_read_3 __constraint_read_2, "r" (arg3)
+#define __constraint_read_4 __constraint_read_3, "r" (arg4)
+#define __constraint_read_5 __constraint_read_4, "r" (arg5)
+#define __constraint_read_6 __constraint_read_5, "r" (arg6)
+#define __constraint_read_7 __constraint_read_6, "r" (arg7)
+
+#define __declare_arg_0(a0, res) \
+ struct arm_smccc_res *___res = res; \
+ register unsigned long arg0 asm("r0") = (u32)a0
+
+#define __declare_arg_1(a0, a1, res) \
+ typeof(a1) __a1 = a1; \
+ struct arm_smccc_res *___res = res; \
+ register unsigned long arg0 asm("r0") = (u32)a0; \
+ register typeof(a1) arg1 asm("r1") = __a1
+
+#define __declare_arg_2(a0, a1, a2, res) \
+ typeof(a1) __a1 = a1; \
+ typeof(a2) __a2 = a2; \
+ struct arm_smccc_res *___res = res; \
+ register unsigned long arg0 asm("r0") = (u32)a0; \
+ register typeof(a1) arg1 asm("r1") = __a1; \
+ register typeof(a2) arg2 asm("r2") = __a2
+
+#define __declare_arg_3(a0, a1, a2, a3, res) \
+ typeof(a1) __a1 = a1; \
+ typeof(a2) __a2 = a2; \
+ typeof(a3) __a3 = a3; \
+ struct arm_smccc_res *___res = res; \
+ register unsigned long arg0 asm("r0") = (u32)a0; \
+ register typeof(a1) arg1 asm("r1") = __a1; \
+ register typeof(a2) arg2 asm("r2") = __a2; \
+ register typeof(a3) arg3 asm("r3") = __a3
+
+#define __declare_arg_4(a0, a1, a2, a3, a4, res) \
+ typeof(a4) __a4 = a4; \
+ __declare_arg_3(a0, a1, a2, a3, res); \
+ register typeof(a4) arg4 asm("r4") = __a4
+
+#define __declare_arg_5(a0, a1, a2, a3, a4, a5, res) \
+ typeof(a5) __a5 = a5; \
+ __declare_arg_4(a0, a1, a2, a3, a4, res); \
+ register typeof(a5) arg5 asm("r5") = __a5
+
+#define __declare_arg_6(a0, a1, a2, a3, a4, a5, a6, res) \
+ typeof(a6) __a6 = a6; \
+ __declare_arg_5(a0, a1, a2, a3, a4, a5, res); \
+ register typeof(a6) arg6 asm("r6") = __a6
+
+#define __declare_arg_7(a0, a1, a2, a3, a4, a5, a6, a7, res) \
+ typeof(a7) __a7 = a7; \
+ __declare_arg_6(a0, a1, a2, a3, a4, a5, a6, res); \
+ register typeof(a7) arg7 asm("r7") = __a7
+
+#define ___declare_args(count, ...) __declare_arg_ ## count(__VA_ARGS__)
+#define __declare_args(count, ...) ___declare_args(count, __VA_ARGS__)
+
+#define ___constraints(count) \
+ : __constraint_read_ ## count \
+ : smccc_sve_clobbers "memory"
+#define __constraints(count) ___constraints(count)
+
+/*
+ * We have an output list that is not necessarily used, and GCC feels
+ * entitled to optimise the whole sequence away. "volatile" is what
+ * makes it stick.
+ */
+#define __arm_smccc_1_1(inst, ...) \
+ do { \
+ register unsigned long r0 asm("r0"); \
+ register unsigned long r1 asm("r1"); \
+ register unsigned long r2 asm("r2"); \
+ register unsigned long r3 asm("r3"); \
+ __declare_args(__count_args(__VA_ARGS__), __VA_ARGS__); \
+ asm volatile(SMCCC_SVE_CHECK \
+ inst "\n" : \
+ "=r" (r0), "=r" (r1), "=r" (r2), "=r" (r3) \
+ __constraints(__count_args(__VA_ARGS__))); \
+ if (___res) \
+ *___res = (typeof(*___res)){r0, r1, r2, r3}; \
+ } while (0)
+
+/*
+ * arm_smccc_1_1_smc() - make an SMCCC v1.1 compliant SMC call
+ *
+ * This is a variadic macro taking one to eight source arguments, and
+ * an optional return structure.
+ *
+ * @a0-a7: arguments passed in registers 0 to 7
+ * @res: result values from registers 0 to 3
+ *
+ * This macro is used to make SMC calls following SMC Calling Convention v1.1.
+ * The content of the supplied param are copied to registers 0 to 7 prior
+ * to the SMC instruction. The return values are updated with the content
+ * from register 0 to 3 on return from the SMC instruction if not NULL.
+ */
+#define arm_smccc_1_1_smc(...) __arm_smccc_1_1(SMCCC_SMC_INST, __VA_ARGS__)
+
+/*
+ * arm_smccc_1_1_hvc() - make an SMCCC v1.1 compliant HVC call
+ *
+ * This is a variadic macro taking one to eight source arguments, and
+ * an optional return structure.
+ *
+ * @a0-a7: arguments passed in registers 0 to 7
+ * @res: result values from registers 0 to 3
+ *
+ * This macro is used to make HVC calls following SMC Calling Convention v1.1.
+ * The content of the supplied param are copied to registers 0 to 7 prior
+ * to the HVC instruction. The return values are updated with the content
+ * from register 0 to 3 on return from the HVC instruction if not NULL.
+ */
+#define arm_smccc_1_1_hvc(...) __arm_smccc_1_1(SMCCC_HVC_INST, __VA_ARGS__)
+
+/*
+ * Like arm_smccc_1_1* but always returns SMCCC_RET_NOT_SUPPORTED.
+ * Used when the SMCCC conduit is not defined. The empty asm statement
+ * avoids compiler warnings about unused variables.
+ */
+#define __fail_smccc_1_1(...) \
+ do { \
+ __declare_args(__count_args(__VA_ARGS__), __VA_ARGS__); \
+ asm ("" : __constraints(__count_args(__VA_ARGS__))); \
+ if (___res) \
+ ___res->a0 = SMCCC_RET_NOT_SUPPORTED; \
+ } while (0)
+
+/*
+ * arm_smccc_1_1_invoke() - make an SMCCC v1.1 compliant call
+ *
+ * This is a variadic macro taking one to eight source arguments, and
+ * an optional return structure.
+ *
+ * @a0-a7: arguments passed in registers 0 to 7
+ * @res: result values from registers 0 to 3
+ *
+ * This macro will make either an HVC call or an SMC call depending on the
+ * current SMCCC conduit. If no valid conduit is available then -1
+ * (SMCCC_RET_NOT_SUPPORTED) is returned in @res.a0 (if supplied).
+ *
+ * The return value also provides the conduit that was used.
+ */
+#define arm_smccc_1_1_invoke(...) ({ \
+ int method = arm_smccc_1_1_get_conduit(); \
+ switch (method) { \
+ case SMCCC_CONDUIT_HVC: \
+ arm_smccc_1_1_hvc(__VA_ARGS__); \
+ break; \
+ case SMCCC_CONDUIT_SMC: \
+ arm_smccc_1_1_smc(__VA_ARGS__); \
+ break; \
+ default: \
+ __fail_smccc_1_1(__VA_ARGS__); \
+ method = SMCCC_CONDUIT_NONE; \
+ break; \
+ } \
+ method; \
+ })
+
#endif /*__ASSEMBLY__*/
#endif /*__LINUX_ARM_SMCCC_H*/
diff --git a/include/linux/arm_ffa.h b/include/linux/arm_ffa.h
new file mode 100644
index 000000000000..c87aeecaa9b2
--- /dev/null
+++ b/include/linux/arm_ffa.h
@@ -0,0 +1,367 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2021 ARM Ltd.
+ */
+
+#ifndef _LINUX_ARM_FFA_H
+#define _LINUX_ARM_FFA_H
+
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/uuid.h>
+
+#define FFA_SMC(calling_convention, func_num) \
+ ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, (calling_convention), \
+ ARM_SMCCC_OWNER_STANDARD, (func_num))
+
+#define FFA_SMC_32(func_num) FFA_SMC(ARM_SMCCC_SMC_32, (func_num))
+#define FFA_SMC_64(func_num) FFA_SMC(ARM_SMCCC_SMC_64, (func_num))
+
+#define FFA_ERROR FFA_SMC_32(0x60)
+#define FFA_SUCCESS FFA_SMC_32(0x61)
+#define FFA_INTERRUPT FFA_SMC_32(0x62)
+#define FFA_VERSION FFA_SMC_32(0x63)
+#define FFA_FEATURES FFA_SMC_32(0x64)
+#define FFA_RX_RELEASE FFA_SMC_32(0x65)
+#define FFA_RXTX_MAP FFA_SMC_32(0x66)
+#define FFA_FN64_RXTX_MAP FFA_SMC_64(0x66)
+#define FFA_RXTX_UNMAP FFA_SMC_32(0x67)
+#define FFA_PARTITION_INFO_GET FFA_SMC_32(0x68)
+#define FFA_ID_GET FFA_SMC_32(0x69)
+#define FFA_MSG_POLL FFA_SMC_32(0x6A)
+#define FFA_MSG_WAIT FFA_SMC_32(0x6B)
+#define FFA_YIELD FFA_SMC_32(0x6C)
+#define FFA_RUN FFA_SMC_32(0x6D)
+#define FFA_MSG_SEND FFA_SMC_32(0x6E)
+#define FFA_MSG_SEND_DIRECT_REQ FFA_SMC_32(0x6F)
+#define FFA_FN64_MSG_SEND_DIRECT_REQ FFA_SMC_64(0x6F)
+#define FFA_MSG_SEND_DIRECT_RESP FFA_SMC_32(0x70)
+#define FFA_FN64_MSG_SEND_DIRECT_RESP FFA_SMC_64(0x70)
+#define FFA_MEM_DONATE FFA_SMC_32(0x71)
+#define FFA_FN64_MEM_DONATE FFA_SMC_64(0x71)
+#define FFA_MEM_LEND FFA_SMC_32(0x72)
+#define FFA_FN64_MEM_LEND FFA_SMC_64(0x72)
+#define FFA_MEM_SHARE FFA_SMC_32(0x73)
+#define FFA_FN64_MEM_SHARE FFA_SMC_64(0x73)
+#define FFA_MEM_RETRIEVE_REQ FFA_SMC_32(0x74)
+#define FFA_FN64_MEM_RETRIEVE_REQ FFA_SMC_64(0x74)
+#define FFA_MEM_RETRIEVE_RESP FFA_SMC_32(0x75)
+#define FFA_MEM_RELINQUISH FFA_SMC_32(0x76)
+#define FFA_MEM_RECLAIM FFA_SMC_32(0x77)
+#define FFA_MEM_OP_PAUSE FFA_SMC_32(0x78)
+#define FFA_MEM_OP_RESUME FFA_SMC_32(0x79)
+#define FFA_MEM_FRAG_RX FFA_SMC_32(0x7A)
+#define FFA_MEM_FRAG_TX FFA_SMC_32(0x7B)
+#define FFA_NORMAL_WORLD_RESUME FFA_SMC_32(0x7C)
+
+/*
+ * For some calls it is necessary to use SMC64 to pass or return 64-bit values.
+ * For such calls FFA_FN_NATIVE(name) will choose the appropriate
+ * (native-width) function ID.
+ */
+#ifdef CONFIG_64BIT
+#define FFA_FN_NATIVE(name) FFA_FN64_##name
+#else
+#define FFA_FN_NATIVE(name) FFA_##name
+#endif
+
+/* FFA error codes. */
+#define FFA_RET_SUCCESS (0)
+#define FFA_RET_NOT_SUPPORTED (-1)
+#define FFA_RET_INVALID_PARAMETERS (-2)
+#define FFA_RET_NO_MEMORY (-3)
+#define FFA_RET_BUSY (-4)
+#define FFA_RET_INTERRUPTED (-5)
+#define FFA_RET_DENIED (-6)
+#define FFA_RET_RETRY (-7)
+#define FFA_RET_ABORTED (-8)
+
+/* FFA version encoding */
+#define FFA_MAJOR_VERSION_MASK GENMASK(30, 16)
+#define FFA_MINOR_VERSION_MASK GENMASK(15, 0)
+#define FFA_MAJOR_VERSION(x) ((u16)(FIELD_GET(FFA_MAJOR_VERSION_MASK, (x))))
+#define FFA_MINOR_VERSION(x) ((u16)(FIELD_GET(FFA_MINOR_VERSION_MASK, (x))))
+#define FFA_PACK_VERSION_INFO(major, minor) \
+ (FIELD_PREP(FFA_MAJOR_VERSION_MASK, (major)) | \
+ FIELD_PREP(FFA_MINOR_VERSION_MASK, (minor)))
+#define FFA_VERSION_1_0 FFA_PACK_VERSION_INFO(1, 0)
+
+/**
+ * FF-A specification mentions explicitly about '4K pages'. This should
+ * not be confused with the kernel PAGE_SIZE, which is the translation
+ * granule kernel is configured and may be one among 4K, 16K and 64K.
+ */
+#define FFA_PAGE_SIZE SZ_4K
+
+/* FFA Bus/Device/Driver related */
+struct ffa_device {
+ int vm_id;
+ bool mode_32bit;
+ uuid_t uuid;
+ struct device dev;
+ const struct ffa_ops *ops;
+};
+
+#define to_ffa_dev(d) container_of(d, struct ffa_device, dev)
+
+struct ffa_device_id {
+ uuid_t uuid;
+};
+
+struct ffa_driver {
+ const char *name;
+ int (*probe)(struct ffa_device *sdev);
+ void (*remove)(struct ffa_device *sdev);
+ const struct ffa_device_id *id_table;
+
+ struct device_driver driver;
+};
+
+#define to_ffa_driver(d) container_of(d, struct ffa_driver, driver)
+
+static inline void ffa_dev_set_drvdata(struct ffa_device *fdev, void *data)
+{
+ dev_set_drvdata(&fdev->dev, data);
+}
+
+static inline void *ffa_dev_get_drvdata(struct ffa_device *fdev)
+{
+ return dev_get_drvdata(&fdev->dev);
+}
+
+#if IS_REACHABLE(CONFIG_ARM_FFA_TRANSPORT)
+struct ffa_device *ffa_device_register(const uuid_t *uuid, int vm_id,
+ const struct ffa_ops *ops);
+void ffa_device_unregister(struct ffa_device *ffa_dev);
+int ffa_driver_register(struct ffa_driver *driver, struct module *owner,
+ const char *mod_name);
+void ffa_driver_unregister(struct ffa_driver *driver);
+bool ffa_device_is_valid(struct ffa_device *ffa_dev);
+
+#else
+static inline
+struct ffa_device *ffa_device_register(const uuid_t *uuid, int vm_id,
+ const struct ffa_ops *ops)
+{
+ return NULL;
+}
+
+static inline void ffa_device_unregister(struct ffa_device *dev) {}
+
+static inline int
+ffa_driver_register(struct ffa_driver *driver, struct module *owner,
+ const char *mod_name)
+{
+ return -EINVAL;
+}
+
+static inline void ffa_driver_unregister(struct ffa_driver *driver) {}
+
+static inline
+bool ffa_device_is_valid(struct ffa_device *ffa_dev) { return false; }
+
+#endif /* CONFIG_ARM_FFA_TRANSPORT */
+
+#define ffa_register(driver) \
+ ffa_driver_register(driver, THIS_MODULE, KBUILD_MODNAME)
+#define ffa_unregister(driver) \
+ ffa_driver_unregister(driver)
+
+/**
+ * module_ffa_driver() - Helper macro for registering a psa_ffa driver
+ * @__ffa_driver: ffa_driver structure
+ *
+ * Helper macro for psa_ffa drivers to set up proper module init / exit
+ * functions. Replaces module_init() and module_exit() and keeps people from
+ * printing pointless things to the kernel log when their driver is loaded.
+ */
+#define module_ffa_driver(__ffa_driver) \
+ module_driver(__ffa_driver, ffa_register, ffa_unregister)
+
+/* FFA transport related */
+struct ffa_partition_info {
+ u16 id;
+ u16 exec_ctxt;
+/* partition supports receipt of direct requests */
+#define FFA_PARTITION_DIRECT_RECV BIT(0)
+/* partition can send direct requests. */
+#define FFA_PARTITION_DIRECT_SEND BIT(1)
+/* partition can send and receive indirect messages. */
+#define FFA_PARTITION_INDIRECT_MSG BIT(2)
+/* partition runs in the AArch64 execution state. */
+#define FFA_PARTITION_AARCH64_EXEC BIT(8)
+ u32 properties;
+ u32 uuid[4];
+};
+
+/* For use with FFA_MSG_SEND_DIRECT_{REQ,RESP} which pass data via registers */
+struct ffa_send_direct_data {
+ unsigned long data0; /* w3/x3 */
+ unsigned long data1; /* w4/x4 */
+ unsigned long data2; /* w5/x5 */
+ unsigned long data3; /* w6/x6 */
+ unsigned long data4; /* w7/x7 */
+};
+
+struct ffa_mem_region_addr_range {
+ /* The base IPA of the constituent memory region, aligned to 4 kiB */
+ u64 address;
+ /* The number of 4 kiB pages in the constituent memory region. */
+ u32 pg_cnt;
+ u32 reserved;
+};
+
+struct ffa_composite_mem_region {
+ /*
+ * The total number of 4 kiB pages included in this memory region. This
+ * must be equal to the sum of page counts specified in each
+ * `struct ffa_mem_region_addr_range`.
+ */
+ u32 total_pg_cnt;
+ /* The number of constituents included in this memory region range */
+ u32 addr_range_cnt;
+ u64 reserved;
+ /** An array of `addr_range_cnt` memory region constituents. */
+ struct ffa_mem_region_addr_range constituents[];
+};
+
+struct ffa_mem_region_attributes {
+ /* The ID of the VM to which the memory is being given or shared. */
+ u16 receiver;
+ /*
+ * The permissions with which the memory region should be mapped in the
+ * receiver's page table.
+ */
+#define FFA_MEM_EXEC BIT(3)
+#define FFA_MEM_NO_EXEC BIT(2)
+#define FFA_MEM_RW BIT(1)
+#define FFA_MEM_RO BIT(0)
+ u8 attrs;
+ /*
+ * Flags used during FFA_MEM_RETRIEVE_REQ and FFA_MEM_RETRIEVE_RESP
+ * for memory regions with multiple borrowers.
+ */
+#define FFA_MEM_RETRIEVE_SELF_BORROWER BIT(0)
+ u8 flag;
+ /*
+ * Offset in bytes from the start of the outer `ffa_memory_region` to
+ * an `struct ffa_mem_region_addr_range`.
+ */
+ u32 composite_off;
+ u64 reserved;
+};
+
+struct ffa_mem_region {
+ /* The ID of the VM/owner which originally sent the memory region */
+ u16 sender_id;
+#define FFA_MEM_NORMAL BIT(5)
+#define FFA_MEM_DEVICE BIT(4)
+
+#define FFA_MEM_WRITE_BACK (3 << 2)
+#define FFA_MEM_NON_CACHEABLE (1 << 2)
+
+#define FFA_DEV_nGnRnE (0 << 2)
+#define FFA_DEV_nGnRE (1 << 2)
+#define FFA_DEV_nGRE (2 << 2)
+#define FFA_DEV_GRE (3 << 2)
+
+#define FFA_MEM_NON_SHAREABLE (0)
+#define FFA_MEM_OUTER_SHAREABLE (2)
+#define FFA_MEM_INNER_SHAREABLE (3)
+ u8 attributes;
+ u8 reserved_0;
+/*
+ * Clear memory region contents after unmapping it from the sender and
+ * before mapping it for any receiver.
+ */
+#define FFA_MEM_CLEAR BIT(0)
+/*
+ * Whether the hypervisor may time slice the memory sharing or retrieval
+ * operation.
+ */
+#define FFA_TIME_SLICE_ENABLE BIT(1)
+
+#define FFA_MEM_RETRIEVE_TYPE_IN_RESP (0 << 3)
+#define FFA_MEM_RETRIEVE_TYPE_SHARE (1 << 3)
+#define FFA_MEM_RETRIEVE_TYPE_LEND (2 << 3)
+#define FFA_MEM_RETRIEVE_TYPE_DONATE (3 << 3)
+
+#define FFA_MEM_RETRIEVE_ADDR_ALIGN_HINT BIT(9)
+#define FFA_MEM_RETRIEVE_ADDR_ALIGN(x) ((x) << 5)
+ /* Flags to control behaviour of the transaction. */
+ u32 flags;
+#define HANDLE_LOW_MASK GENMASK_ULL(31, 0)
+#define HANDLE_HIGH_MASK GENMASK_ULL(63, 32)
+#define HANDLE_LOW(x) ((u32)(FIELD_GET(HANDLE_LOW_MASK, (x))))
+#define HANDLE_HIGH(x) ((u32)(FIELD_GET(HANDLE_HIGH_MASK, (x))))
+
+#define PACK_HANDLE(l, h) \
+ (FIELD_PREP(HANDLE_LOW_MASK, (l)) | FIELD_PREP(HANDLE_HIGH_MASK, (h)))
+ /*
+ * A globally-unique ID assigned by the hypervisor for a region
+ * of memory being sent between VMs.
+ */
+ u64 handle;
+ /*
+ * An implementation defined value associated with the receiver and the
+ * memory region.
+ */
+ u64 tag;
+ u32 reserved_1;
+ /*
+ * The number of `ffa_mem_region_attributes` entries included in this
+ * transaction.
+ */
+ u32 ep_count;
+ /*
+ * An array of endpoint memory access descriptors.
+ * Each one specifies a memory region offset, an endpoint and the
+ * attributes with which this memory region should be mapped in that
+ * endpoint's page table.
+ */
+ struct ffa_mem_region_attributes ep_mem_access[];
+};
+
+#define COMPOSITE_OFFSET(x) \
+ (offsetof(struct ffa_mem_region, ep_mem_access[x]))
+#define CONSTITUENTS_OFFSET(x) \
+ (offsetof(struct ffa_composite_mem_region, constituents[x]))
+#define COMPOSITE_CONSTITUENTS_OFFSET(x, y) \
+ (COMPOSITE_OFFSET(x) + CONSTITUENTS_OFFSET(y))
+
+struct ffa_mem_ops_args {
+ bool use_txbuf;
+ u32 nattrs;
+ u32 flags;
+ u64 tag;
+ u64 g_handle;
+ struct scatterlist *sg;
+ struct ffa_mem_region_attributes *attrs;
+};
+
+struct ffa_info_ops {
+ u32 (*api_version_get)(void);
+ int (*partition_info_get)(const char *uuid_str,
+ struct ffa_partition_info *buffer);
+};
+
+struct ffa_msg_ops {
+ void (*mode_32bit_set)(struct ffa_device *dev);
+ int (*sync_send_receive)(struct ffa_device *dev,
+ struct ffa_send_direct_data *data);
+};
+
+struct ffa_mem_ops {
+ int (*memory_reclaim)(u64 g_handle, u32 flags);
+ int (*memory_share)(struct ffa_mem_ops_args *args);
+ int (*memory_lend)(struct ffa_mem_ops_args *args);
+};
+
+struct ffa_ops {
+ const struct ffa_info_ops *info_ops;
+ const struct ffa_msg_ops *msg_ops;
+ const struct ffa_mem_ops *mem_ops;
+};
+
+#endif /* _LINUX_ARM_FFA_H */
diff --git a/include/linux/arm_sdei.h b/include/linux/arm_sdei.h
new file mode 100644
index 000000000000..14dc461b0e82
--- /dev/null
+++ b/include/linux/arm_sdei.h
@@ -0,0 +1,84 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2017 Arm Ltd.
+#ifndef __LINUX_ARM_SDEI_H
+#define __LINUX_ARM_SDEI_H
+
+#include <uapi/linux/arm_sdei.h>
+
+#include <acpi/ghes.h>
+
+#ifdef CONFIG_ARM_SDE_INTERFACE
+#include <asm/sdei.h>
+#endif
+
+/* Arch code should override this to set the entry point from firmware... */
+#ifndef sdei_arch_get_entry_point
+#define sdei_arch_get_entry_point(conduit) (0)
+#endif
+
+/*
+ * When an event occurs sdei_event_handler() will call a user-provided callback
+ * like this in NMI context on the CPU that received the event.
+ */
+typedef int (sdei_event_callback)(u32 event, struct pt_regs *regs, void *arg);
+
+/*
+ * Register your callback to claim an event. The event must be described
+ * by firmware.
+ */
+int sdei_event_register(u32 event_num, sdei_event_callback *cb, void *arg);
+
+/*
+ * Calls to sdei_event_unregister() may return EINPROGRESS. Keep calling
+ * it until it succeeds.
+ */
+int sdei_event_unregister(u32 event_num);
+
+int sdei_event_enable(u32 event_num);
+int sdei_event_disable(u32 event_num);
+
+/* GHES register/unregister helpers */
+int sdei_register_ghes(struct ghes *ghes, sdei_event_callback *normal_cb,
+ sdei_event_callback *critical_cb);
+int sdei_unregister_ghes(struct ghes *ghes);
+
+#ifdef CONFIG_ARM_SDE_INTERFACE
+/* For use by arch code when CPU hotplug notifiers are not appropriate. */
+int sdei_mask_local_cpu(void);
+int sdei_unmask_local_cpu(void);
+void __init sdei_init(void);
+#else
+static inline int sdei_mask_local_cpu(void) { return 0; }
+static inline int sdei_unmask_local_cpu(void) { return 0; }
+static inline void sdei_init(void) { }
+#endif /* CONFIG_ARM_SDE_INTERFACE */
+
+
+/*
+ * This struct represents an event that has been registered. The driver
+ * maintains a list of all events, and which ones are registered. (Private
+ * events have one entry in the list, but are registered on each CPU).
+ * A pointer to this struct is passed to firmware, and back to the event
+ * handler. The event handler can then use this to invoke the registered
+ * callback, without having to walk the list.
+ *
+ * For CPU private events, this structure is per-cpu.
+ */
+struct sdei_registered_event {
+ /* For use by arch code: */
+ struct pt_regs interrupted_regs;
+
+ sdei_event_callback *callback;
+ void *callback_arg;
+ u32 event_num;
+ u8 priority;
+};
+
+/* The arch code entry point should then call this when an event arrives. */
+int notrace sdei_event_handler(struct pt_regs *regs,
+ struct sdei_registered_event *arg);
+
+/* arch code may use this to retrieve the extra registers. */
+int sdei_api_event_context(u32 query, u64 *result);
+
+#endif /* __LINUX_ARM_SDEI_H */
diff --git a/include/linux/armada-37xx-rwtm-mailbox.h b/include/linux/armada-37xx-rwtm-mailbox.h
new file mode 100644
index 000000000000..ef4bd705eb65
--- /dev/null
+++ b/include/linux/armada-37xx-rwtm-mailbox.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * rWTM BIU Mailbox driver for Armada 37xx
+ *
+ * Author: Marek Behún <kabel@kernel.org>
+ */
+
+#ifndef _LINUX_ARMADA_37XX_RWTM_MAILBOX_H_
+#define _LINUX_ARMADA_37XX_RWTM_MAILBOX_H_
+
+#include <linux/types.h>
+
+struct armada_37xx_rwtm_tx_msg {
+ u16 command;
+ u32 args[16];
+};
+
+struct armada_37xx_rwtm_rx_msg {
+ u32 retval;
+ u32 status[16];
+};
+
+#endif /* _LINUX_ARMADA_37XX_RWTM_MAILBOX_H_ */
diff --git a/include/linux/ascii85.h b/include/linux/ascii85.h
new file mode 100644
index 000000000000..83ad775ad0aa
--- /dev/null
+++ b/include/linux/ascii85.h
@@ -0,0 +1,39 @@
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright (c) 2008 Intel Corporation
+ * Copyright (c) 2018 The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _ASCII85_H_
+#define _ASCII85_H_
+
+#include <linux/math.h>
+#include <linux/types.h>
+
+#define ASCII85_BUFSZ 6
+
+static inline long
+ascii85_encode_len(long len)
+{
+ return DIV_ROUND_UP(len, 4);
+}
+
+static inline const char *
+ascii85_encode(u32 in, char *out)
+{
+ int i;
+
+ if (in == 0)
+ return "z";
+
+ out[5] = '\0';
+ for (i = 5; i--; ) {
+ out[i] = '!' + in % 85;
+ in /= 85;
+ }
+
+ return out;
+}
+
+#endif
diff --git a/include/linux/asn1.h b/include/linux/asn1.h
index eed6982860ba..a4d0bdd10711 100644
--- a/include/linux/asn1.h
+++ b/include/linux/asn1.h
@@ -1,12 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/* ASN.1 BER/DER/CER encoding definitions
*
* Copyright (C) 2012 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public Licence
- * as published by the Free Software Foundation; either version
- * 2 of the Licence, or (at your option) any later version.
*/
#ifndef _LINUX_ASN1_H
diff --git a/include/linux/asn1_ber_bytecode.h b/include/linux/asn1_ber_bytecode.h
index ab3a6c002f7b..b38361953a48 100644
--- a/include/linux/asn1_ber_bytecode.h
+++ b/include/linux/asn1_ber_bytecode.h
@@ -1,12 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/* ASN.1 BER/DER/CER parsing state machine internal definitions
*
* Copyright (C) 2012 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public Licence
- * as published by the Free Software Foundation; either version
- * 2 of the Licence, or (at your option) any later version.
*/
#ifndef _LINUX_ASN1_BER_BYTECODE_H
diff --git a/include/linux/asn1_decoder.h b/include/linux/asn1_decoder.h
index fa2ff5bc0483..83f9c6e1e5e9 100644
--- a/include/linux/asn1_decoder.h
+++ b/include/linux/asn1_decoder.h
@@ -1,12 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/* ASN.1 decoder
*
* Copyright (C) 2012 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public Licence
- * as published by the Free Software Foundation; either version
- * 2 of the Licence, or (at your option) any later version.
*/
#ifndef _LINUX_ASN1_DECODER_H
diff --git a/include/linux/asn1_encoder.h b/include/linux/asn1_encoder.h
new file mode 100644
index 000000000000..08cd0c2ad34f
--- /dev/null
+++ b/include/linux/asn1_encoder.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef _LINUX_ASN1_ENCODER_H
+#define _LINUX_ASN1_ENCODER_H
+
+#include <linux/types.h>
+#include <linux/asn1.h>
+#include <linux/asn1_ber_bytecode.h>
+#include <linux/bug.h>
+
+#define asn1_oid_len(oid) (sizeof(oid)/sizeof(u32))
+unsigned char *
+asn1_encode_integer(unsigned char *data, const unsigned char *end_data,
+ s64 integer);
+unsigned char *
+asn1_encode_oid(unsigned char *data, const unsigned char *end_data,
+ u32 oid[], int oid_len);
+unsigned char *
+asn1_encode_tag(unsigned char *data, const unsigned char *end_data,
+ u32 tag, const unsigned char *string, int len);
+unsigned char *
+asn1_encode_octet_string(unsigned char *data,
+ const unsigned char *end_data,
+ const unsigned char *string, u32 len);
+unsigned char *
+asn1_encode_sequence(unsigned char *data, const unsigned char *end_data,
+ const unsigned char *seq, int len);
+unsigned char *
+asn1_encode_boolean(unsigned char *data, const unsigned char *end_data,
+ bool val);
+
+#endif
diff --git a/include/linux/assoc_array.h b/include/linux/assoc_array.h
index a89df3be1686..8b3f230ce894 100644
--- a/include/linux/assoc_array.h
+++ b/include/linux/assoc_array.h
@@ -1,14 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/* Generic associative array implementation.
*
- * See Documentation/assoc_array.txt for information.
+ * See Documentation/core-api/assoc_array.rst for information.
*
* Copyright (C) 2013 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public Licence
- * as published by the Free Software Foundation; either version
- * 2 of the Licence, or (at your option) any later version.
*/
#ifndef _LINUX_ASSOC_ARRAY_H
diff --git a/include/linux/assoc_array_priv.h b/include/linux/assoc_array_priv.h
index 711275e6681c..dca733ef6750 100644
--- a/include/linux/assoc_array_priv.h
+++ b/include/linux/assoc_array_priv.h
@@ -1,14 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/* Private definitions for the generic associative array implementation.
*
- * See Documentation/assoc_array.txt for information.
+ * See Documentation/core-api/assoc_array.rst for information.
*
* Copyright (C) 2013 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public Licence
- * as published by the Free Software Foundation; either version
- * 2 of the Licence, or (at your option) any later version.
*/
#ifndef _LINUX_ASSOC_ARRAY_PRIV_H
diff --git a/include/linux/async.h b/include/linux/async.h
index 6b0226bdaadc..cce4ad31e8fc 100644
--- a/include/linux/async.h
+++ b/include/linux/async.h
@@ -1,19 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* async.h: Asynchronous function calls for boot performance
*
* (C) Copyright 2009 Intel Corporation
* Author: Arjan van de Ven <arjan@linux.intel.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; version 2
- * of the License.
*/
#ifndef __ASYNC_H__
#define __ASYNC_H__
#include <linux/types.h>
#include <linux/list.h>
+#include <linux/numa.h>
+#include <linux/device.h>
typedef u64 async_cookie_t;
typedef void (*async_func_t) (void *data, async_cookie_t cookie);
@@ -37,10 +35,83 @@ struct async_domain {
struct async_domain _name = { .pending = LIST_HEAD_INIT(_name.pending), \
.registered = 0 }
-extern async_cookie_t async_schedule(async_func_t func, void *data);
-extern async_cookie_t async_schedule_domain(async_func_t func, void *data,
- struct async_domain *domain);
-void async_unregister_domain(struct async_domain *domain);
+async_cookie_t async_schedule_node(async_func_t func, void *data,
+ int node);
+async_cookie_t async_schedule_node_domain(async_func_t func, void *data,
+ int node,
+ struct async_domain *domain);
+
+/**
+ * async_schedule - schedule a function for asynchronous execution
+ * @func: function to execute asynchronously
+ * @data: data pointer to pass to the function
+ *
+ * Returns an async_cookie_t that may be used for checkpointing later.
+ * Note: This function may be called from atomic or non-atomic contexts.
+ */
+static inline async_cookie_t async_schedule(async_func_t func, void *data)
+{
+ return async_schedule_node(func, data, NUMA_NO_NODE);
+}
+
+/**
+ * async_schedule_domain - schedule a function for asynchronous execution within a certain domain
+ * @func: function to execute asynchronously
+ * @data: data pointer to pass to the function
+ * @domain: the domain
+ *
+ * Returns an async_cookie_t that may be used for checkpointing later.
+ * @domain may be used in the async_synchronize_*_domain() functions to
+ * wait within a certain synchronization domain rather than globally.
+ * Note: This function may be called from atomic or non-atomic contexts.
+ */
+static inline async_cookie_t
+async_schedule_domain(async_func_t func, void *data,
+ struct async_domain *domain)
+{
+ return async_schedule_node_domain(func, data, NUMA_NO_NODE, domain);
+}
+
+/**
+ * async_schedule_dev - A device specific version of async_schedule
+ * @func: function to execute asynchronously
+ * @dev: device argument to be passed to function
+ *
+ * Returns an async_cookie_t that may be used for checkpointing later.
+ * @dev is used as both the argument for the function and to provide NUMA
+ * context for where to run the function. By doing this we can try to
+ * provide for the best possible outcome by operating on the device on the
+ * CPUs closest to the device.
+ * Note: This function may be called from atomic or non-atomic contexts.
+ */
+static inline async_cookie_t
+async_schedule_dev(async_func_t func, struct device *dev)
+{
+ return async_schedule_node(func, dev, dev_to_node(dev));
+}
+
+/**
+ * async_schedule_dev_domain - A device specific version of async_schedule_domain
+ * @func: function to execute asynchronously
+ * @dev: device argument to be passed to function
+ * @domain: the domain
+ *
+ * Returns an async_cookie_t that may be used for checkpointing later.
+ * @dev is used as both the argument for the function and to provide NUMA
+ * context for where to run the function. By doing this we can try to
+ * provide for the best possible outcome by operating on the device on the
+ * CPUs closest to the device.
+ * @domain may be used in the async_synchronize_*_domain() functions to
+ * wait within a certain synchronization domain rather than globally.
+ * Note: This function may be called from atomic or non-atomic contexts.
+ */
+static inline async_cookie_t
+async_schedule_dev_domain(async_func_t func, struct device *dev,
+ struct async_domain *domain)
+{
+ return async_schedule_node_domain(func, dev, dev_to_node(dev), domain);
+}
+
extern void async_synchronize_full(void);
extern void async_synchronize_full_domain(struct async_domain *domain);
extern void async_synchronize_cookie(async_cookie_t cookie);
diff --git a/include/linux/async_tx.h b/include/linux/async_tx.h
index 28e3cf1465ab..5cc73d7e5b52 100644
--- a/include/linux/async_tx.h
+++ b/include/linux/async_tx.h
@@ -1,19 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright © 2006, Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
- *
*/
#ifndef _ASYNC_TX_H_
#define _ASYNC_TX_H_
@@ -49,7 +36,7 @@ struct dma_chan_ref {
/**
* async_tx_flags - modifiers for the async_* calls
* @ASYNC_TX_XOR_ZERO_DST: this flag must be used for xor operations where the
- * the destination address is not a source. The asynchronous case handles this
+ * destination address is not a source. The asynchronous case handles this
* implicitly, the synchronous case needs to zero the destination block.
* @ASYNC_TX_XOR_DROP_DST: this flag must be used if the destination address is
* also one of the source addresses. In the synchronous case the destination
@@ -176,11 +163,22 @@ async_xor(struct page *dest, struct page **src_list, unsigned int offset,
int src_cnt, size_t len, struct async_submit_ctl *submit);
struct dma_async_tx_descriptor *
+async_xor_offs(struct page *dest, unsigned int offset,
+ struct page **src_list, unsigned int *src_offset,
+ int src_cnt, size_t len, struct async_submit_ctl *submit);
+
+struct dma_async_tx_descriptor *
async_xor_val(struct page *dest, struct page **src_list, unsigned int offset,
int src_cnt, size_t len, enum sum_check_flags *result,
struct async_submit_ctl *submit);
struct dma_async_tx_descriptor *
+async_xor_val_offs(struct page *dest, unsigned int offset,
+ struct page **src_list, unsigned int *src_offset,
+ int src_cnt, size_t len, enum sum_check_flags *result,
+ struct async_submit_ctl *submit);
+
+struct dma_async_tx_descriptor *
async_memcpy(struct page *dest, struct page *src, unsigned int dest_offset,
unsigned int src_offset, size_t len,
struct async_submit_ctl *submit);
@@ -188,21 +186,23 @@ async_memcpy(struct page *dest, struct page *src, unsigned int dest_offset,
struct dma_async_tx_descriptor *async_trigger_callback(struct async_submit_ctl *submit);
struct dma_async_tx_descriptor *
-async_gen_syndrome(struct page **blocks, unsigned int offset, int src_cnt,
+async_gen_syndrome(struct page **blocks, unsigned int *offsets, int src_cnt,
size_t len, struct async_submit_ctl *submit);
struct dma_async_tx_descriptor *
-async_syndrome_val(struct page **blocks, unsigned int offset, int src_cnt,
+async_syndrome_val(struct page **blocks, unsigned int *offsets, int src_cnt,
size_t len, enum sum_check_flags *pqres, struct page *spare,
- struct async_submit_ctl *submit);
+ unsigned int s_off, struct async_submit_ctl *submit);
struct dma_async_tx_descriptor *
async_raid6_2data_recov(int src_num, size_t bytes, int faila, int failb,
- struct page **ptrs, struct async_submit_ctl *submit);
+ struct page **ptrs, unsigned int *offs,
+ struct async_submit_ctl *submit);
struct dma_async_tx_descriptor *
async_raid6_datap_recov(int src_num, size_t bytes, int faila,
- struct page **ptrs, struct async_submit_ctl *submit);
+ struct page **ptrs, unsigned int *offs,
+ struct async_submit_ctl *submit);
void async_tx_quiesce(struct dma_async_tx_descriptor **tx);
#endif /* _ASYNC_TX_H_ */
diff --git a/include/linux/ata.h b/include/linux/ata.h
index e65ae4b2ed48..c224dbddb9b2 100644
--- a/include/linux/ata.h
+++ b/include/linux/ata.h
@@ -1,38 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Copyright 2003-2004 Red Hat, Inc. All rights reserved.
* Copyright 2003-2004 Jeff Garzik
*
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; see the file COPYING. If not, write to
- * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- *
* libata documentation is available via 'make {ps|pdf}docs',
* as Documentation/driver-api/libata.rst
*
* Hardware documentation available from http://www.t13.org/
- *
*/
#ifndef __LINUX_ATA_H__
#define __LINUX_ATA_H__
-#include <linux/kernel.h>
+#include <linux/bits.h>
#include <linux/string.h>
#include <linux/types.h>
-#include <asm/byteorder.h>
/* defines only for the constants which don't work well as enums */
#define ATA_DMA_BOUNDARY 0xffffUL
@@ -60,7 +43,8 @@ enum {
ATA_ID_FW_REV = 23,
ATA_ID_PROD = 27,
ATA_ID_MAX_MULTSECT = 47,
- ATA_ID_DWORD_IO = 48,
+ ATA_ID_DWORD_IO = 48, /* before ATA-8 */
+ ATA_ID_TRUSTED = 48, /* ATA-8 and later */
ATA_ID_CAPABILITY = 49,
ATA_ID_OLD_PIO_MODES = 51,
ATA_ID_OLD_DMA_MODES = 52,
@@ -339,6 +323,7 @@ enum {
ATA_LOG_NCQ_NON_DATA = 0x12,
ATA_LOG_NCQ_SEND_RECV = 0x13,
ATA_LOG_IDENTIFY_DEVICE = 0x30,
+ ATA_LOG_CONCURRENT_POSITIONING_RANGES = 0x47,
/* Identify device log pages: */
ATA_LOG_SECURITY = 0x06,
@@ -447,6 +432,8 @@ enum {
ATA_SET_MAX_LOCK = 0x02,
ATA_SET_MAX_UNLOCK = 0x03,
ATA_SET_MAX_FREEZE_LOCK = 0x04,
+ ATA_SET_MAX_PASSWD_DMA = 0x05,
+ ATA_SET_MAX_UNLOCK_DMA = 0x06,
/* feature values for DEVICE CONFIGURATION OVERLAY */
ATA_DCO_RESTORE = 0xC0,
@@ -578,6 +565,18 @@ struct ata_bmdma_prd {
((((id)[ATA_ID_SATA_CAPABILITY] != 0x0000) && \
((id)[ATA_ID_SATA_CAPABILITY] != 0xffff)) && \
((id)[ATA_ID_FEATURE_SUPP] & (1 << 2)))
+#define ata_id_has_devslp(id) \
+ ((((id)[ATA_ID_SATA_CAPABILITY] != 0x0000) && \
+ ((id)[ATA_ID_SATA_CAPABILITY] != 0xffff)) && \
+ ((id)[ATA_ID_FEATURE_SUPP] & (1 << 8)))
+#define ata_id_has_ncq_autosense(id) \
+ ((((id)[ATA_ID_SATA_CAPABILITY] != 0x0000) && \
+ ((id)[ATA_ID_SATA_CAPABILITY] != 0xffff)) && \
+ ((id)[ATA_ID_FEATURE_SUPP] & (1 << 7)))
+#define ata_id_has_dipm(id) \
+ ((((id)[ATA_ID_SATA_CAPABILITY] != 0x0000) && \
+ ((id)[ATA_ID_SATA_CAPABILITY] != 0xffff)) && \
+ ((id)[ATA_ID_FEATURE_SUPP] & (1 << 3)))
#define ata_id_iordy_disable(id) ((id)[ATA_ID_CAPABILITY] & (1 << 10))
#define ata_id_has_iordy(id) ((id)[ATA_ID_CAPABILITY] & (1 << 11))
#define ata_id_u32(id,n) \
@@ -590,9 +589,6 @@ struct ata_bmdma_prd {
#define ata_id_cdb_intr(id) (((id)[ATA_ID_CONFIG] & 0x60) == 0x20)
#define ata_id_has_da(id) ((id)[ATA_ID_SATA_CAPABILITY_2] & (1 << 4))
-#define ata_id_has_devslp(id) ((id)[ATA_ID_FEATURE_SUPP] & (1 << 8))
-#define ata_id_has_ncq_autosense(id) \
- ((id)[ATA_ID_FEATURE_SUPP] & (1 << 7))
static inline bool ata_id_has_hipm(const u16 *id)
{
@@ -604,17 +600,6 @@ static inline bool ata_id_has_hipm(const u16 *id)
return val & (1 << 9);
}
-static inline bool ata_id_has_dipm(const u16 *id)
-{
- u16 val = id[ATA_ID_FEATURE_SUPP];
-
- if (val == 0 || val == 0xffff)
- return false;
-
- return val & (1 << 3);
-}
-
-
static inline bool ata_id_has_fua(const u16 *id)
{
if ((id[ATA_ID_CFSSE] & 0xC000) != 0x4000)
@@ -629,15 +614,6 @@ static inline bool ata_id_has_flush(const u16 *id)
return id[ATA_ID_COMMAND_SET_2] & (1 << 12);
}
-static inline bool ata_id_flush_enabled(const u16 *id)
-{
- if (ata_id_has_flush(id) == 0)
- return false;
- if ((id[ATA_ID_CSF_DEFAULT] & 0xC000) != 0x4000)
- return false;
- return id[ATA_ID_CFS_ENABLE_2] & (1 << 12);
-}
-
static inline bool ata_id_has_flush_ext(const u16 *id)
{
if ((id[ATA_ID_COMMAND_SET_2] & 0xC000) != 0x4000)
@@ -645,19 +621,6 @@ static inline bool ata_id_has_flush_ext(const u16 *id)
return id[ATA_ID_COMMAND_SET_2] & (1 << 13);
}
-static inline bool ata_id_flush_ext_enabled(const u16 *id)
-{
- if (ata_id_has_flush_ext(id) == 0)
- return false;
- if ((id[ATA_ID_CSF_DEFAULT] & 0xC000) != 0x4000)
- return false;
- /*
- * some Maxtor disks have bit 13 defined incorrectly
- * so check bit 10 too
- */
- return (id[ATA_ID_CFS_ENABLE_2] & 0x2400) == 0x2400;
-}
-
static inline u32 ata_id_logical_sector_size(const u16 *id)
{
/* T13/1699-D Revision 6a, Sep 6, 2008. Page 128.
@@ -712,15 +675,6 @@ static inline bool ata_id_has_lba48(const u16 *id)
return id[ATA_ID_COMMAND_SET_2] & (1 << 10);
}
-static inline bool ata_id_lba48_enabled(const u16 *id)
-{
- if (ata_id_has_lba48(id) == 0)
- return false;
- if ((id[ATA_ID_CSF_DEFAULT] & 0xC000) != 0x4000)
- return false;
- return id[ATA_ID_CFS_ENABLE_2] & (1 << 10);
-}
-
static inline bool ata_id_hpa_enabled(const u16 *id)
{
/* Yes children, word 83 valid bits cover word 82 data */
@@ -783,16 +737,21 @@ static inline bool ata_id_has_read_log_dma_ext(const u16 *id)
static inline bool ata_id_has_sense_reporting(const u16 *id)
{
- if (!(id[ATA_ID_CFS_ENABLE_2] & (1 << 15)))
+ if (!(id[ATA_ID_CFS_ENABLE_2] & BIT(15)))
+ return false;
+ if ((id[ATA_ID_COMMAND_SET_3] & (BIT(15) | BIT(14))) != BIT(14))
return false;
- return id[ATA_ID_COMMAND_SET_3] & (1 << 6);
+ return id[ATA_ID_COMMAND_SET_3] & BIT(6);
}
static inline bool ata_id_sense_reporting_enabled(const u16 *id)
{
- if (!(id[ATA_ID_CFS_ENABLE_2] & (1 << 15)))
+ if (!ata_id_has_sense_reporting(id))
return false;
- return id[ATA_ID_COMMAND_SET_4] & (1 << 6);
+ /* ata_id_has_sense_reporting() == true, word 86 must have bit 15 set */
+ if ((id[ATA_ID_COMMAND_SET_4] & (BIT(15) | BIT(14))) != BIT(14))
+ return false;
+ return id[ATA_ID_COMMAND_SET_4] & BIT(6);
}
/**
@@ -889,6 +848,13 @@ static inline bool ata_id_has_dword_io(const u16 *id)
return id[ATA_ID_DWORD_IO] & (1 << 0);
}
+static inline bool ata_id_has_trusted(const u16 *id)
+{
+ if (ata_id_major_version(id) <= 7)
+ return false;
+ return id[ATA_ID_TRUSTED] & (1 << 0);
+}
+
static inline bool ata_id_has_unload(const u16 *id)
{
if (ata_id_major_version(id) >= 7 &&
@@ -1050,76 +1016,6 @@ static inline bool atapi_id_dmadir(const u16 *dev_id)
return ata_id_major_version(dev_id) >= 7 && (dev_id[62] & 0x8000);
}
-/*
- * ata_id_is_lba_capacity_ok() performs a sanity check on
- * the claimed LBA capacity value for the device.
- *
- * Returns 1 if LBA capacity looks sensible, 0 otherwise.
- *
- * It is called only once for each device.
- */
-static inline bool ata_id_is_lba_capacity_ok(u16 *id)
-{
- unsigned long lba_sects, chs_sects, head, tail;
-
- /* No non-LBA info .. so valid! */
- if (id[ATA_ID_CYLS] == 0)
- return true;
-
- lba_sects = ata_id_u32(id, ATA_ID_LBA_CAPACITY);
-
- /*
- * The ATA spec tells large drives to return
- * C/H/S = 16383/16/63 independent of their size.
- * Some drives can be jumpered to use 15 heads instead of 16.
- * Some drives can be jumpered to use 4092 cyls instead of 16383.
- */
- if ((id[ATA_ID_CYLS] == 16383 ||
- (id[ATA_ID_CYLS] == 4092 && id[ATA_ID_CUR_CYLS] == 16383)) &&
- id[ATA_ID_SECTORS] == 63 &&
- (id[ATA_ID_HEADS] == 15 || id[ATA_ID_HEADS] == 16) &&
- (lba_sects >= 16383 * 63 * id[ATA_ID_HEADS]))
- return true;
-
- chs_sects = id[ATA_ID_CYLS] * id[ATA_ID_HEADS] * id[ATA_ID_SECTORS];
-
- /* perform a rough sanity check on lba_sects: within 10% is OK */
- if (lba_sects - chs_sects < chs_sects/10)
- return true;
-
- /* some drives have the word order reversed */
- head = (lba_sects >> 16) & 0xffff;
- tail = lba_sects & 0xffff;
- lba_sects = head | (tail << 16);
-
- if (lba_sects - chs_sects < chs_sects/10) {
- *(__le32 *)&id[ATA_ID_LBA_CAPACITY] = __cpu_to_le32(lba_sects);
- return true; /* LBA capacity is (now) good */
- }
-
- return false; /* LBA capacity value may be bad */
-}
-
-static inline void ata_id_to_hd_driveid(u16 *id)
-{
-#ifdef __BIG_ENDIAN
- /* accessed in struct hd_driveid as 8-bit values */
- id[ATA_ID_MAX_MULTSECT] = __cpu_to_le16(id[ATA_ID_MAX_MULTSECT]);
- id[ATA_ID_CAPABILITY] = __cpu_to_le16(id[ATA_ID_CAPABILITY]);
- id[ATA_ID_OLD_PIO_MODES] = __cpu_to_le16(id[ATA_ID_OLD_PIO_MODES]);
- id[ATA_ID_OLD_DMA_MODES] = __cpu_to_le16(id[ATA_ID_OLD_DMA_MODES]);
- id[ATA_ID_MULTSECT] = __cpu_to_le16(id[ATA_ID_MULTSECT]);
-
- /* as 32-bit values */
- *(u32 *)&id[ATA_ID_LBA_CAPACITY] = ata_id_u32(id, ATA_ID_LBA_CAPACITY);
- *(u32 *)&id[ATA_ID_SPG] = ata_id_u32(id, ATA_ID_SPG);
-
- /* as 64-bit value */
- *(u64 *)&id[ATA_ID_LBA_CAPACITY_2] =
- ata_id_u64(id, ATA_ID_LBA_CAPACITY_2);
-#endif
-}
-
static inline bool ata_ok(u8 status)
{
return ((status & (ATA_BUSY | ATA_DRDY | ATA_DF | ATA_DRQ | ATA_ERR))
diff --git a/include/linux/ata_platform.h b/include/linux/ata_platform.h
index 619d9e78e644..b9745cc08e38 100644
--- a/include/linux/ata_platform.h
+++ b/include/linux/ata_platform.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __LINUX_ATA_PLATFORM_H
#define __LINUX_ATA_PLATFORM_H
@@ -18,7 +19,8 @@ extern int __pata_platform_probe(struct device *dev,
struct resource *irq_res,
unsigned int ioport_shift,
int __pio_mask,
- struct scsi_host_template *sht);
+ const struct scsi_host_template *sht,
+ bool use16bit);
/*
* Marvell SATA private data
diff --git a/include/linux/atalk.h b/include/linux/atalk.h
index 73fd8b7e9534..a55bfc6567d0 100644
--- a/include/linux/atalk.h
+++ b/include/linux/atalk.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __LINUX_ATALK_H__
#define __LINUX_ATALK_H__
@@ -107,15 +108,17 @@ static __inline__ struct elapaarp *aarp_hdr(struct sk_buff *skb)
#define AARP_RESOLVE_TIME (10 * HZ)
extern struct datalink_proto *ddp_dl, *aarp_dl;
-extern void aarp_proto_init(void);
+extern int aarp_proto_init(void);
/* Inter module exports */
/* Give a device find its atif control structure */
+#if IS_ENABLED(CONFIG_ATALK)
static inline struct atalk_iface *atalk_find_dev(struct net_device *dev)
{
return dev->atalk_ptr;
}
+#endif
extern struct atalk_addr *atalk_find_dev_addr(struct net_device *dev);
extern struct net_device *atrtr_get_dev(struct atalk_addr *sa);
@@ -142,7 +145,12 @@ extern rwlock_t atalk_interfaces_lock;
extern struct atalk_route atrtr_default;
-extern const struct file_operations atalk_seq_arp_fops;
+struct aarp_iter_state {
+ int bucket;
+ struct aarp_entry **table;
+};
+
+extern const struct seq_operations aarp_seq_ops;
extern int sysctl_aarp_expiry_time;
extern int sysctl_aarp_tick_time;
@@ -150,19 +158,29 @@ extern int sysctl_aarp_retransmit_limit;
extern int sysctl_aarp_resolve_time;
#ifdef CONFIG_SYSCTL
-extern void atalk_register_sysctl(void);
+extern int atalk_register_sysctl(void);
extern void atalk_unregister_sysctl(void);
#else
-#define atalk_register_sysctl() do { } while(0)
-#define atalk_unregister_sysctl() do { } while(0)
+static inline int atalk_register_sysctl(void)
+{
+ return 0;
+}
+static inline void atalk_unregister_sysctl(void)
+{
+}
#endif
#ifdef CONFIG_PROC_FS
extern int atalk_proc_init(void);
extern void atalk_proc_exit(void);
#else
-#define atalk_proc_init() ({ 0; })
-#define atalk_proc_exit() do { } while(0)
+static inline int atalk_proc_init(void)
+{
+ return 0;
+}
+static inline void atalk_proc_exit(void)
+{
+}
#endif /* CONFIG_PROC_FS */
#endif /* __LINUX_ATALK_H__ */
diff --git a/include/linux/atm.h b/include/linux/atm.h
index 30006c435951..4b50fd0a6eab 100644
--- a/include/linux/atm.h
+++ b/include/linux/atm.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/* atm.h - general ATM declarations */
#ifndef _LINUX_ATM_H
#define _LINUX_ATM_H
diff --git a/include/linux/atm_suni.h b/include/linux/atm_suni.h
deleted file mode 100644
index 84f3aab54468..000000000000
--- a/include/linux/atm_suni.h
+++ /dev/null
@@ -1,12 +0,0 @@
-/* atm_suni.h - Driver-specific declarations of the SUNI driver (for use by
- driver-specific utilities) */
-
-/* Written 1998,2000 by Werner Almesberger, EPFL ICA */
-
-
-#ifndef LINUX_ATM_SUNI_H
-#define LINUX_ATM_SUNI_H
-
-/* everything obsoleted */
-
-#endif
diff --git a/include/linux/atm_tcp.h b/include/linux/atm_tcp.h
index db6b65fc0aec..2558439d849b 100644
--- a/include/linux/atm_tcp.h
+++ b/include/linux/atm_tcp.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/* atm_tcp.h - Driver-specific declarations of the ATMTCP driver (for use by
driver-specific utilities) */
@@ -8,6 +9,8 @@
#include <uapi/linux/atm_tcp.h>
+struct atm_vcc;
+struct module;
struct atm_tcp_ops {
int (*attach)(struct atm_vcc *vcc,int itf);
diff --git a/include/linux/atmdev.h b/include/linux/atmdev.h
index 0ec9bdb1cc9f..9b02961d65ee 100644
--- a/include/linux/atmdev.h
+++ b/include/linux/atmdev.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/* atmdev.h - ATM device driver declarations and various related items */
#ifndef LINUX_ATMDEV_H
#define LINUX_ATMDEV_H
@@ -150,7 +151,7 @@ struct atm_dev {
const char *type; /* device type name */
int number; /* device index */
void *dev_data; /* per-device data */
- void *phy_data; /* private PHY date */
+ void *phy_data; /* private PHY data */
unsigned long flags; /* device flags (ATM_DF_*) */
struct list_head local; /* local ATM addresses */
struct list_head lecs; /* LECS ATM addresses learned via ILMI */
@@ -175,11 +176,6 @@ struct atm_dev {
#define ATM_OF_IMMED 1 /* Attempt immediate delivery */
#define ATM_OF_INRATE 2 /* Attempt in-rate delivery */
-
-/*
- * ioctl, getsockopt, and setsockopt are optional and can be set to NULL.
- */
-
struct atmdev_ops { /* only send is required */
void (*dev_close)(struct atm_dev *dev);
int (*open)(struct atm_vcc *vcc);
@@ -189,11 +185,8 @@ struct atmdev_ops { /* only send is required */
int (*compat_ioctl)(struct atm_dev *dev,unsigned int cmd,
void __user *arg);
#endif
- int (*getsockopt)(struct atm_vcc *vcc,int level,int optname,
- void __user *optval,int optlen);
- int (*setsockopt)(struct atm_vcc *vcc,int level,int optname,
- void __user *optval,unsigned int optlen);
int (*send)(struct atm_vcc *vcc,struct sk_buff *skb);
+ int (*send_bh)(struct atm_vcc *vcc, struct sk_buff *skb);
int (*send_oam)(struct atm_vcc *vcc,void *cell,int flags);
void (*phy_put)(struct atm_dev *dev,unsigned char value,
unsigned long addr);
@@ -213,7 +206,8 @@ struct atmphy_ops {
struct atm_skb_data {
struct atm_vcc *vcc; /* ATM VCC */
unsigned long atm_options; /* ATM layer options */
-};
+ unsigned int acct_truesize; /* truesize accounted to vcc */
+} __packed;
#define VCC_HTABLE_SIZE 32
@@ -240,6 +234,20 @@ void vcc_insert_socket(struct sock *sk);
void atm_dev_release_vccs(struct atm_dev *dev);
+static inline void atm_account_tx(struct atm_vcc *vcc, struct sk_buff *skb)
+{
+ /*
+ * Because ATM skbs may not belong to a sock (and we don't
+ * necessarily want to), skb->truesize may be adjusted,
+ * escaping the hack in pskb_expand_head() which avoids
+ * doing so for some cases. So stash the value of truesize
+ * at the time we accounted it, and atm_pop_raw() can use
+ * that value later, in case it changes.
+ */
+ refcount_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc);
+ ATM_SKB(skb)->acct_truesize = skb->truesize;
+ ATM_SKB(skb)->atm_options = vcc->atm_options;
+}
static inline void atm_force_charge(struct atm_vcc *vcc,int truesize)
{
diff --git a/include/linux/atmel-isc-media.h b/include/linux/atmel-isc-media.h
new file mode 100644
index 000000000000..79a320fb724e
--- /dev/null
+++ b/include/linux/atmel-isc-media.h
@@ -0,0 +1,58 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2019 Microchip Technology Inc. and its subsidiaries
+ *
+ * Author: Eugen Hristev <eugen.hristev@microchip.com>
+ */
+
+#ifndef __LINUX_ATMEL_ISC_MEDIA_H__
+#define __LINUX_ATMEL_ISC_MEDIA_H__
+
+/*
+ * There are 8 controls available:
+ * 4 gain controls, sliders, for each of the BAYER components: R, B, GR, GB.
+ * These gains are multipliers for each component, in format unsigned 0:4:9 with
+ * a default value of 512 (1.0 multiplier).
+ * 4 offset controls, sliders, for each of the BAYER components: R, B, GR, GB.
+ * These offsets are added/substracted from each component, in format signed
+ * 1:12:0 with a default value of 0 (+/- 0)
+ *
+ * To expose this to userspace, added 8 custom controls, in an auto cluster.
+ *
+ * To summarize the functionality:
+ * The auto cluster switch is the auto white balance control, and it works
+ * like this:
+ * AWB == 1: autowhitebalance is on, the do_white_balance button is inactive,
+ * the gains/offsets are inactive, but volatile and readable.
+ * Thus, the results of the whitebalance algorithm are available to userspace to
+ * read at any time.
+ * AWB == 0: autowhitebalance is off, cluster is in manual mode, user can
+ * configure the gain/offsets directly.
+ * More than that, if the do_white_balance button is
+ * pressed, the driver will perform one-time-adjustment, (preferably with color
+ * checker card) and the userspace can read again the new values.
+ *
+ * With this feature, the userspace can save the coefficients and reinstall them
+ * for example after reboot or reprobing the driver.
+ */
+
+enum atmel_isc_ctrl_id {
+ /* Red component gain control */
+ ISC_CID_R_GAIN = (V4L2_CID_USER_ATMEL_ISC_BASE + 0),
+ /* Blue component gain control */
+ ISC_CID_B_GAIN,
+ /* Green Red component gain control */
+ ISC_CID_GR_GAIN,
+ /* Green Blue gain control */
+ ISC_CID_GB_GAIN,
+ /* Red component offset control */
+ ISC_CID_R_OFFSET,
+ /* Blue component offset control */
+ ISC_CID_B_OFFSET,
+ /* Green Red component offset control */
+ ISC_CID_GR_OFFSET,
+ /* Green Blue component offset control */
+ ISC_CID_GB_OFFSET,
+};
+
+#endif
diff --git a/include/linux/atmel-mci.h b/include/linux/atmel-mci.h
index 42a9e1884842..1491af38cc6e 100644
--- a/include/linux/atmel-mci.h
+++ b/include/linux/atmel-mci.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __LINUX_ATMEL_MCI_H
#define __LINUX_ATMEL_MCI_H
diff --git a/include/linux/atmel-ssc.h b/include/linux/atmel-ssc.h
index fdb545101ede..6091d2abc1eb 100644
--- a/include/linux/atmel-ssc.h
+++ b/include/linux/atmel-ssc.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __INCLUDE_ATMEL_SSC_H
#define __INCLUDE_ATMEL_SSC_H
diff --git a/include/linux/atmel_pdc.h b/include/linux/atmel_pdc.h
index 63499ce806ea..00a766b5ee96 100644
--- a/include/linux/atmel_pdc.h
+++ b/include/linux/atmel_pdc.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* include/linux/atmel_pdc.h
*
@@ -6,11 +7,6 @@
*
* Peripheral Data Controller (PDC) registers.
* Based on AT91RM9200 datasheet revision E.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
#ifndef ATMEL_PDC_H
diff --git a/include/linux/atmel_tc.h b/include/linux/atmel_tc.h
deleted file mode 100644
index 468fdfa643f0..000000000000
--- a/include/linux/atmel_tc.h
+++ /dev/null
@@ -1,270 +0,0 @@
-/*
- * Timer/Counter Unit (TC) registers.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
-
-#ifndef ATMEL_TC_H
-#define ATMEL_TC_H
-
-#include <linux/compiler.h>
-#include <linux/list.h>
-
-/*
- * Many 32-bit Atmel SOCs include one or more TC blocks, each of which holds
- * three general-purpose 16-bit timers. These timers share one register bank.
- * Depending on the SOC, each timer may have its own clock and IRQ, or those
- * may be shared by the whole TC block.
- *
- * These TC blocks may have up to nine external pins: TCLK0..2 signals for
- * clocks or clock gates, and per-timer TIOA and TIOB signals used for PWM
- * or triggering. Those pins need to be set up for use with the TC block,
- * else they will be used as GPIOs or for a different controller.
- *
- * Although we expect each TC block to have a platform_device node, those
- * nodes are not what drivers bind to. Instead, they ask for a specific
- * TC block, by number ... which is a common approach on systems with many
- * timers. Then they use clk_get() and platform_get_irq() to get clock and
- * IRQ resources.
- */
-
-struct clk;
-
-/**
- * struct atmel_tcb_config - SoC data for a Timer/Counter Block
- * @counter_width: size in bits of a timer counter register
- */
-struct atmel_tcb_config {
- size_t counter_width;
-};
-
-/**
- * struct atmel_tc - information about a Timer/Counter Block
- * @pdev: physical device
- * @regs: mapping through which the I/O registers can be accessed
- * @id: block id
- * @tcb_config: configuration data from SoC
- * @irq: irq for each of the three channels
- * @clk: internal clock source for each of the three channels
- * @node: list node, for tclib internal use
- * @allocated: if already used, for tclib internal use
- *
- * On some platforms, each TC channel has its own clocks and IRQs,
- * while on others, all TC channels share the same clock and IRQ.
- * Drivers should clk_enable() all the clocks they need even though
- * all the entries in @clk may point to the same physical clock.
- * Likewise, drivers should request irqs independently for each
- * channel, but they must use IRQF_SHARED in case some of the entries
- * in @irq are actually the same IRQ.
- */
-struct atmel_tc {
- struct platform_device *pdev;
- void __iomem *regs;
- int id;
- const struct atmel_tcb_config *tcb_config;
- int irq[3];
- struct clk *clk[3];
- struct clk *slow_clk;
- struct list_head node;
- bool allocated;
-};
-
-extern struct atmel_tc *atmel_tc_alloc(unsigned block);
-extern void atmel_tc_free(struct atmel_tc *tc);
-
-/* platform-specific ATMEL_TC_TIMER_CLOCKx divisors (0 means 32KiHz) */
-extern const u8 atmel_tc_divisors[5];
-
-
-/*
- * Two registers have block-wide controls. These are: configuring the three
- * "external" clocks (or event sources) used by the timer channels; and
- * synchronizing the timers by resetting them all at once.
- *
- * "External" can mean "external to chip" using the TCLK0, TCLK1, or TCLK2
- * signals. Or, it can mean "external to timer", using the TIOA output from
- * one of the other two timers that's being run in waveform mode.
- */
-
-#define ATMEL_TC_BCR 0xc0 /* TC Block Control Register */
-#define ATMEL_TC_SYNC (1 << 0) /* synchronize timers */
-
-#define ATMEL_TC_BMR 0xc4 /* TC Block Mode Register */
-#define ATMEL_TC_TC0XC0S (3 << 0) /* external clock 0 source */
-#define ATMEL_TC_TC0XC0S_TCLK0 (0 << 0)
-#define ATMEL_TC_TC0XC0S_NONE (1 << 0)
-#define ATMEL_TC_TC0XC0S_TIOA1 (2 << 0)
-#define ATMEL_TC_TC0XC0S_TIOA2 (3 << 0)
-#define ATMEL_TC_TC1XC1S (3 << 2) /* external clock 1 source */
-#define ATMEL_TC_TC1XC1S_TCLK1 (0 << 2)
-#define ATMEL_TC_TC1XC1S_NONE (1 << 2)
-#define ATMEL_TC_TC1XC1S_TIOA0 (2 << 2)
-#define ATMEL_TC_TC1XC1S_TIOA2 (3 << 2)
-#define ATMEL_TC_TC2XC2S (3 << 4) /* external clock 2 source */
-#define ATMEL_TC_TC2XC2S_TCLK2 (0 << 4)
-#define ATMEL_TC_TC2XC2S_NONE (1 << 4)
-#define ATMEL_TC_TC2XC2S_TIOA0 (2 << 4)
-#define ATMEL_TC_TC2XC2S_TIOA1 (3 << 4)
-
-
-/*
- * Each TC block has three "channels", each with one counter and controls.
- *
- * Note that the semantics of ATMEL_TC_TIMER_CLOCKx (input clock selection
- * when it's not "external") is silicon-specific. AT91 platforms use one
- * set of definitions; AVR32 platforms use a different set. Don't hard-wire
- * such knowledge into your code, use the global "atmel_tc_divisors" ...
- * where index N is the divisor for clock N+1, else zero to indicate it uses
- * the 32 KiHz clock.
- *
- * The timers can be chained in various ways, and operated in "waveform"
- * generation mode (including PWM) or "capture" mode (to time events). In
- * both modes, behavior can be configured in many ways.
- *
- * Each timer has two I/O pins, TIOA and TIOB. Waveform mode uses TIOA as a
- * PWM output, and TIOB as either another PWM or as a trigger. Capture mode
- * uses them only as inputs.
- */
-#define ATMEL_TC_CHAN(idx) ((idx)*0x40)
-#define ATMEL_TC_REG(idx, reg) (ATMEL_TC_CHAN(idx) + ATMEL_TC_ ## reg)
-
-#define ATMEL_TC_CCR 0x00 /* Channel Control Register */
-#define ATMEL_TC_CLKEN (1 << 0) /* clock enable */
-#define ATMEL_TC_CLKDIS (1 << 1) /* clock disable */
-#define ATMEL_TC_SWTRG (1 << 2) /* software trigger */
-
-#define ATMEL_TC_CMR 0x04 /* Channel Mode Register */
-
-/* Both modes share some CMR bits */
-#define ATMEL_TC_TCCLKS (7 << 0) /* clock source */
-#define ATMEL_TC_TIMER_CLOCK1 (0 << 0)
-#define ATMEL_TC_TIMER_CLOCK2 (1 << 0)
-#define ATMEL_TC_TIMER_CLOCK3 (2 << 0)
-#define ATMEL_TC_TIMER_CLOCK4 (3 << 0)
-#define ATMEL_TC_TIMER_CLOCK5 (4 << 0)
-#define ATMEL_TC_XC0 (5 << 0)
-#define ATMEL_TC_XC1 (6 << 0)
-#define ATMEL_TC_XC2 (7 << 0)
-#define ATMEL_TC_CLKI (1 << 3) /* clock invert */
-#define ATMEL_TC_BURST (3 << 4) /* clock gating */
-#define ATMEL_TC_GATE_NONE (0 << 4)
-#define ATMEL_TC_GATE_XC0 (1 << 4)
-#define ATMEL_TC_GATE_XC1 (2 << 4)
-#define ATMEL_TC_GATE_XC2 (3 << 4)
-#define ATMEL_TC_WAVE (1 << 15) /* true = Waveform mode */
-
-/* CAPTURE mode CMR bits */
-#define ATMEL_TC_LDBSTOP (1 << 6) /* counter stops on RB load */
-#define ATMEL_TC_LDBDIS (1 << 7) /* counter disable on RB load */
-#define ATMEL_TC_ETRGEDG (3 << 8) /* external trigger edge */
-#define ATMEL_TC_ETRGEDG_NONE (0 << 8)
-#define ATMEL_TC_ETRGEDG_RISING (1 << 8)
-#define ATMEL_TC_ETRGEDG_FALLING (2 << 8)
-#define ATMEL_TC_ETRGEDG_BOTH (3 << 8)
-#define ATMEL_TC_ABETRG (1 << 10) /* external trigger is TIOA? */
-#define ATMEL_TC_CPCTRG (1 << 14) /* RC compare trigger enable */
-#define ATMEL_TC_LDRA (3 << 16) /* RA loading edge (of TIOA) */
-#define ATMEL_TC_LDRA_NONE (0 << 16)
-#define ATMEL_TC_LDRA_RISING (1 << 16)
-#define ATMEL_TC_LDRA_FALLING (2 << 16)
-#define ATMEL_TC_LDRA_BOTH (3 << 16)
-#define ATMEL_TC_LDRB (3 << 18) /* RB loading edge (of TIOA) */
-#define ATMEL_TC_LDRB_NONE (0 << 18)
-#define ATMEL_TC_LDRB_RISING (1 << 18)
-#define ATMEL_TC_LDRB_FALLING (2 << 18)
-#define ATMEL_TC_LDRB_BOTH (3 << 18)
-
-/* WAVEFORM mode CMR bits */
-#define ATMEL_TC_CPCSTOP (1 << 6) /* RC compare stops counter */
-#define ATMEL_TC_CPCDIS (1 << 7) /* RC compare disables counter */
-#define ATMEL_TC_EEVTEDG (3 << 8) /* external event edge */
-#define ATMEL_TC_EEVTEDG_NONE (0 << 8)
-#define ATMEL_TC_EEVTEDG_RISING (1 << 8)
-#define ATMEL_TC_EEVTEDG_FALLING (2 << 8)
-#define ATMEL_TC_EEVTEDG_BOTH (3 << 8)
-#define ATMEL_TC_EEVT (3 << 10) /* external event source */
-#define ATMEL_TC_EEVT_TIOB (0 << 10)
-#define ATMEL_TC_EEVT_XC0 (1 << 10)
-#define ATMEL_TC_EEVT_XC1 (2 << 10)
-#define ATMEL_TC_EEVT_XC2 (3 << 10)
-#define ATMEL_TC_ENETRG (1 << 12) /* external event is trigger */
-#define ATMEL_TC_WAVESEL (3 << 13) /* waveform type */
-#define ATMEL_TC_WAVESEL_UP (0 << 13)
-#define ATMEL_TC_WAVESEL_UPDOWN (1 << 13)
-#define ATMEL_TC_WAVESEL_UP_AUTO (2 << 13)
-#define ATMEL_TC_WAVESEL_UPDOWN_AUTO (3 << 13)
-#define ATMEL_TC_ACPA (3 << 16) /* RA compare changes TIOA */
-#define ATMEL_TC_ACPA_NONE (0 << 16)
-#define ATMEL_TC_ACPA_SET (1 << 16)
-#define ATMEL_TC_ACPA_CLEAR (2 << 16)
-#define ATMEL_TC_ACPA_TOGGLE (3 << 16)
-#define ATMEL_TC_ACPC (3 << 18) /* RC compare changes TIOA */
-#define ATMEL_TC_ACPC_NONE (0 << 18)
-#define ATMEL_TC_ACPC_SET (1 << 18)
-#define ATMEL_TC_ACPC_CLEAR (2 << 18)
-#define ATMEL_TC_ACPC_TOGGLE (3 << 18)
-#define ATMEL_TC_AEEVT (3 << 20) /* external event changes TIOA */
-#define ATMEL_TC_AEEVT_NONE (0 << 20)
-#define ATMEL_TC_AEEVT_SET (1 << 20)
-#define ATMEL_TC_AEEVT_CLEAR (2 << 20)
-#define ATMEL_TC_AEEVT_TOGGLE (3 << 20)
-#define ATMEL_TC_ASWTRG (3 << 22) /* software trigger changes TIOA */
-#define ATMEL_TC_ASWTRG_NONE (0 << 22)
-#define ATMEL_TC_ASWTRG_SET (1 << 22)
-#define ATMEL_TC_ASWTRG_CLEAR (2 << 22)
-#define ATMEL_TC_ASWTRG_TOGGLE (3 << 22)
-#define ATMEL_TC_BCPB (3 << 24) /* RB compare changes TIOB */
-#define ATMEL_TC_BCPB_NONE (0 << 24)
-#define ATMEL_TC_BCPB_SET (1 << 24)
-#define ATMEL_TC_BCPB_CLEAR (2 << 24)
-#define ATMEL_TC_BCPB_TOGGLE (3 << 24)
-#define ATMEL_TC_BCPC (3 << 26) /* RC compare changes TIOB */
-#define ATMEL_TC_BCPC_NONE (0 << 26)
-#define ATMEL_TC_BCPC_SET (1 << 26)
-#define ATMEL_TC_BCPC_CLEAR (2 << 26)
-#define ATMEL_TC_BCPC_TOGGLE (3 << 26)
-#define ATMEL_TC_BEEVT (3 << 28) /* external event changes TIOB */
-#define ATMEL_TC_BEEVT_NONE (0 << 28)
-#define ATMEL_TC_BEEVT_SET (1 << 28)
-#define ATMEL_TC_BEEVT_CLEAR (2 << 28)
-#define ATMEL_TC_BEEVT_TOGGLE (3 << 28)
-#define ATMEL_TC_BSWTRG (3 << 30) /* software trigger changes TIOB */
-#define ATMEL_TC_BSWTRG_NONE (0 << 30)
-#define ATMEL_TC_BSWTRG_SET (1 << 30)
-#define ATMEL_TC_BSWTRG_CLEAR (2 << 30)
-#define ATMEL_TC_BSWTRG_TOGGLE (3 << 30)
-
-#define ATMEL_TC_CV 0x10 /* counter Value */
-#define ATMEL_TC_RA 0x14 /* register A */
-#define ATMEL_TC_RB 0x18 /* register B */
-#define ATMEL_TC_RC 0x1c /* register C */
-
-#define ATMEL_TC_SR 0x20 /* status (read-only) */
-/* Status-only flags */
-#define ATMEL_TC_CLKSTA (1 << 16) /* clock enabled */
-#define ATMEL_TC_MTIOA (1 << 17) /* TIOA mirror */
-#define ATMEL_TC_MTIOB (1 << 18) /* TIOB mirror */
-
-#define ATMEL_TC_IER 0x24 /* interrupt enable (write-only) */
-#define ATMEL_TC_IDR 0x28 /* interrupt disable (write-only) */
-#define ATMEL_TC_IMR 0x2c /* interrupt mask (read-only) */
-
-/* Status and IRQ flags */
-#define ATMEL_TC_COVFS (1 << 0) /* counter overflow */
-#define ATMEL_TC_LOVRS (1 << 1) /* load overrun */
-#define ATMEL_TC_CPAS (1 << 2) /* RA compare */
-#define ATMEL_TC_CPBS (1 << 3) /* RB compare */
-#define ATMEL_TC_CPCS (1 << 4) /* RC compare */
-#define ATMEL_TC_LDRAS (1 << 5) /* RA loading */
-#define ATMEL_TC_LDRBS (1 << 6) /* RB loading */
-#define ATMEL_TC_ETRGS (1 << 7) /* external trigger */
-#define ATMEL_TC_ALL_IRQ (ATMEL_TC_COVFS | ATMEL_TC_LOVRS | \
- ATMEL_TC_CPAS | ATMEL_TC_CPBS | \
- ATMEL_TC_CPCS | ATMEL_TC_LDRAS | \
- ATMEL_TC_LDRBS | ATMEL_TC_ETRGS) \
- /* all IRQs */
-
-#endif
diff --git a/include/linux/atomic.h b/include/linux/atomic.h
index c56be7410130..8dd57c3a99e9 100644
--- a/include/linux/atomic.h
+++ b/include/linux/atomic.h
@@ -1,6 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/* Atomic operations usable in machine independent code */
#ifndef _LINUX_ATOMIC_H
#define _LINUX_ATOMIC_H
+#include <linux/types.h>
+
#include <asm/atomic.h>
#include <asm/barrier.h>
@@ -22,1053 +25,60 @@
* See Documentation/memory-barriers.txt for ACQUIRE/RELEASE definitions.
*/
-#ifndef atomic_read_acquire
-#define atomic_read_acquire(v) smp_load_acquire(&(v)->counter)
-#endif
+#define atomic_cond_read_acquire(v, c) smp_cond_load_acquire(&(v)->counter, (c))
+#define atomic_cond_read_relaxed(v, c) smp_cond_load_relaxed(&(v)->counter, (c))
-#ifndef atomic_set_release
-#define atomic_set_release(v, i) smp_store_release(&(v)->counter, (i))
-#endif
+#define atomic64_cond_read_acquire(v, c) smp_cond_load_acquire(&(v)->counter, (c))
+#define atomic64_cond_read_relaxed(v, c) smp_cond_load_relaxed(&(v)->counter, (c))
/*
* The idea here is to build acquire/release variants by adding explicit
* barriers on top of the relaxed variant. In the case where the relaxed
* variant is already fully ordered, no additional barriers are needed.
*
- * Besides, if an arch has a special barrier for acquire/release, it could
- * implement its own __atomic_op_* and use the same framework for building
- * variants
+ * If an architecture overrides __atomic_acquire_fence() it will probably
+ * want to define smp_mb__after_spinlock().
*/
-#ifndef __atomic_op_acquire
+#ifndef __atomic_acquire_fence
+#define __atomic_acquire_fence smp_mb__after_atomic
+#endif
+
+#ifndef __atomic_release_fence
+#define __atomic_release_fence smp_mb__before_atomic
+#endif
+
+#ifndef __atomic_pre_full_fence
+#define __atomic_pre_full_fence smp_mb__before_atomic
+#endif
+
+#ifndef __atomic_post_full_fence
+#define __atomic_post_full_fence smp_mb__after_atomic
+#endif
+
#define __atomic_op_acquire(op, args...) \
({ \
typeof(op##_relaxed(args)) __ret = op##_relaxed(args); \
- smp_mb__after_atomic(); \
+ __atomic_acquire_fence(); \
__ret; \
})
-#endif
-#ifndef __atomic_op_release
#define __atomic_op_release(op, args...) \
({ \
- smp_mb__before_atomic(); \
+ __atomic_release_fence(); \
op##_relaxed(args); \
})
-#endif
-#ifndef __atomic_op_fence
#define __atomic_op_fence(op, args...) \
({ \
typeof(op##_relaxed(args)) __ret; \
- smp_mb__before_atomic(); \
+ __atomic_pre_full_fence(); \
__ret = op##_relaxed(args); \
- smp_mb__after_atomic(); \
+ __atomic_post_full_fence(); \
__ret; \
})
-#endif
-
-/* atomic_add_return_relaxed */
-#ifndef atomic_add_return_relaxed
-#define atomic_add_return_relaxed atomic_add_return
-#define atomic_add_return_acquire atomic_add_return
-#define atomic_add_return_release atomic_add_return
-
-#else /* atomic_add_return_relaxed */
-
-#ifndef atomic_add_return_acquire
-#define atomic_add_return_acquire(...) \
- __atomic_op_acquire(atomic_add_return, __VA_ARGS__)
-#endif
-
-#ifndef atomic_add_return_release
-#define atomic_add_return_release(...) \
- __atomic_op_release(atomic_add_return, __VA_ARGS__)
-#endif
-
-#ifndef atomic_add_return
-#define atomic_add_return(...) \
- __atomic_op_fence(atomic_add_return, __VA_ARGS__)
-#endif
-#endif /* atomic_add_return_relaxed */
-
-/* atomic_inc_return_relaxed */
-#ifndef atomic_inc_return_relaxed
-#define atomic_inc_return_relaxed atomic_inc_return
-#define atomic_inc_return_acquire atomic_inc_return
-#define atomic_inc_return_release atomic_inc_return
-
-#else /* atomic_inc_return_relaxed */
-
-#ifndef atomic_inc_return_acquire
-#define atomic_inc_return_acquire(...) \
- __atomic_op_acquire(atomic_inc_return, __VA_ARGS__)
-#endif
-
-#ifndef atomic_inc_return_release
-#define atomic_inc_return_release(...) \
- __atomic_op_release(atomic_inc_return, __VA_ARGS__)
-#endif
-
-#ifndef atomic_inc_return
-#define atomic_inc_return(...) \
- __atomic_op_fence(atomic_inc_return, __VA_ARGS__)
-#endif
-#endif /* atomic_inc_return_relaxed */
-
-/* atomic_sub_return_relaxed */
-#ifndef atomic_sub_return_relaxed
-#define atomic_sub_return_relaxed atomic_sub_return
-#define atomic_sub_return_acquire atomic_sub_return
-#define atomic_sub_return_release atomic_sub_return
-
-#else /* atomic_sub_return_relaxed */
-
-#ifndef atomic_sub_return_acquire
-#define atomic_sub_return_acquire(...) \
- __atomic_op_acquire(atomic_sub_return, __VA_ARGS__)
-#endif
-
-#ifndef atomic_sub_return_release
-#define atomic_sub_return_release(...) \
- __atomic_op_release(atomic_sub_return, __VA_ARGS__)
-#endif
-
-#ifndef atomic_sub_return
-#define atomic_sub_return(...) \
- __atomic_op_fence(atomic_sub_return, __VA_ARGS__)
-#endif
-#endif /* atomic_sub_return_relaxed */
-
-/* atomic_dec_return_relaxed */
-#ifndef atomic_dec_return_relaxed
-#define atomic_dec_return_relaxed atomic_dec_return
-#define atomic_dec_return_acquire atomic_dec_return
-#define atomic_dec_return_release atomic_dec_return
-
-#else /* atomic_dec_return_relaxed */
-
-#ifndef atomic_dec_return_acquire
-#define atomic_dec_return_acquire(...) \
- __atomic_op_acquire(atomic_dec_return, __VA_ARGS__)
-#endif
-
-#ifndef atomic_dec_return_release
-#define atomic_dec_return_release(...) \
- __atomic_op_release(atomic_dec_return, __VA_ARGS__)
-#endif
-
-#ifndef atomic_dec_return
-#define atomic_dec_return(...) \
- __atomic_op_fence(atomic_dec_return, __VA_ARGS__)
-#endif
-#endif /* atomic_dec_return_relaxed */
-
-
-/* atomic_fetch_add_relaxed */
-#ifndef atomic_fetch_add_relaxed
-#define atomic_fetch_add_relaxed atomic_fetch_add
-#define atomic_fetch_add_acquire atomic_fetch_add
-#define atomic_fetch_add_release atomic_fetch_add
-
-#else /* atomic_fetch_add_relaxed */
-
-#ifndef atomic_fetch_add_acquire
-#define atomic_fetch_add_acquire(...) \
- __atomic_op_acquire(atomic_fetch_add, __VA_ARGS__)
-#endif
-
-#ifndef atomic_fetch_add_release
-#define atomic_fetch_add_release(...) \
- __atomic_op_release(atomic_fetch_add, __VA_ARGS__)
-#endif
-
-#ifndef atomic_fetch_add
-#define atomic_fetch_add(...) \
- __atomic_op_fence(atomic_fetch_add, __VA_ARGS__)
-#endif
-#endif /* atomic_fetch_add_relaxed */
-
-/* atomic_fetch_inc_relaxed */
-#ifndef atomic_fetch_inc_relaxed
-
-#ifndef atomic_fetch_inc
-#define atomic_fetch_inc(v) atomic_fetch_add(1, (v))
-#define atomic_fetch_inc_relaxed(v) atomic_fetch_add_relaxed(1, (v))
-#define atomic_fetch_inc_acquire(v) atomic_fetch_add_acquire(1, (v))
-#define atomic_fetch_inc_release(v) atomic_fetch_add_release(1, (v))
-#else /* atomic_fetch_inc */
-#define atomic_fetch_inc_relaxed atomic_fetch_inc
-#define atomic_fetch_inc_acquire atomic_fetch_inc
-#define atomic_fetch_inc_release atomic_fetch_inc
-#endif /* atomic_fetch_inc */
-
-#else /* atomic_fetch_inc_relaxed */
-
-#ifndef atomic_fetch_inc_acquire
-#define atomic_fetch_inc_acquire(...) \
- __atomic_op_acquire(atomic_fetch_inc, __VA_ARGS__)
-#endif
-
-#ifndef atomic_fetch_inc_release
-#define atomic_fetch_inc_release(...) \
- __atomic_op_release(atomic_fetch_inc, __VA_ARGS__)
-#endif
-
-#ifndef atomic_fetch_inc
-#define atomic_fetch_inc(...) \
- __atomic_op_fence(atomic_fetch_inc, __VA_ARGS__)
-#endif
-#endif /* atomic_fetch_inc_relaxed */
-
-/* atomic_fetch_sub_relaxed */
-#ifndef atomic_fetch_sub_relaxed
-#define atomic_fetch_sub_relaxed atomic_fetch_sub
-#define atomic_fetch_sub_acquire atomic_fetch_sub
-#define atomic_fetch_sub_release atomic_fetch_sub
-
-#else /* atomic_fetch_sub_relaxed */
-
-#ifndef atomic_fetch_sub_acquire
-#define atomic_fetch_sub_acquire(...) \
- __atomic_op_acquire(atomic_fetch_sub, __VA_ARGS__)
-#endif
-
-#ifndef atomic_fetch_sub_release
-#define atomic_fetch_sub_release(...) \
- __atomic_op_release(atomic_fetch_sub, __VA_ARGS__)
-#endif
-
-#ifndef atomic_fetch_sub
-#define atomic_fetch_sub(...) \
- __atomic_op_fence(atomic_fetch_sub, __VA_ARGS__)
-#endif
-#endif /* atomic_fetch_sub_relaxed */
-
-/* atomic_fetch_dec_relaxed */
-#ifndef atomic_fetch_dec_relaxed
-
-#ifndef atomic_fetch_dec
-#define atomic_fetch_dec(v) atomic_fetch_sub(1, (v))
-#define atomic_fetch_dec_relaxed(v) atomic_fetch_sub_relaxed(1, (v))
-#define atomic_fetch_dec_acquire(v) atomic_fetch_sub_acquire(1, (v))
-#define atomic_fetch_dec_release(v) atomic_fetch_sub_release(1, (v))
-#else /* atomic_fetch_dec */
-#define atomic_fetch_dec_relaxed atomic_fetch_dec
-#define atomic_fetch_dec_acquire atomic_fetch_dec
-#define atomic_fetch_dec_release atomic_fetch_dec
-#endif /* atomic_fetch_dec */
-
-#else /* atomic_fetch_dec_relaxed */
-
-#ifndef atomic_fetch_dec_acquire
-#define atomic_fetch_dec_acquire(...) \
- __atomic_op_acquire(atomic_fetch_dec, __VA_ARGS__)
-#endif
-
-#ifndef atomic_fetch_dec_release
-#define atomic_fetch_dec_release(...) \
- __atomic_op_release(atomic_fetch_dec, __VA_ARGS__)
-#endif
-
-#ifndef atomic_fetch_dec
-#define atomic_fetch_dec(...) \
- __atomic_op_fence(atomic_fetch_dec, __VA_ARGS__)
-#endif
-#endif /* atomic_fetch_dec_relaxed */
-
-/* atomic_fetch_or_relaxed */
-#ifndef atomic_fetch_or_relaxed
-#define atomic_fetch_or_relaxed atomic_fetch_or
-#define atomic_fetch_or_acquire atomic_fetch_or
-#define atomic_fetch_or_release atomic_fetch_or
-
-#else /* atomic_fetch_or_relaxed */
-
-#ifndef atomic_fetch_or_acquire
-#define atomic_fetch_or_acquire(...) \
- __atomic_op_acquire(atomic_fetch_or, __VA_ARGS__)
-#endif
-
-#ifndef atomic_fetch_or_release
-#define atomic_fetch_or_release(...) \
- __atomic_op_release(atomic_fetch_or, __VA_ARGS__)
-#endif
-
-#ifndef atomic_fetch_or
-#define atomic_fetch_or(...) \
- __atomic_op_fence(atomic_fetch_or, __VA_ARGS__)
-#endif
-#endif /* atomic_fetch_or_relaxed */
-
-/* atomic_fetch_and_relaxed */
-#ifndef atomic_fetch_and_relaxed
-#define atomic_fetch_and_relaxed atomic_fetch_and
-#define atomic_fetch_and_acquire atomic_fetch_and
-#define atomic_fetch_and_release atomic_fetch_and
-
-#else /* atomic_fetch_and_relaxed */
-
-#ifndef atomic_fetch_and_acquire
-#define atomic_fetch_and_acquire(...) \
- __atomic_op_acquire(atomic_fetch_and, __VA_ARGS__)
-#endif
-
-#ifndef atomic_fetch_and_release
-#define atomic_fetch_and_release(...) \
- __atomic_op_release(atomic_fetch_and, __VA_ARGS__)
-#endif
-
-#ifndef atomic_fetch_and
-#define atomic_fetch_and(...) \
- __atomic_op_fence(atomic_fetch_and, __VA_ARGS__)
-#endif
-#endif /* atomic_fetch_and_relaxed */
-
-#ifdef atomic_andnot
-/* atomic_fetch_andnot_relaxed */
-#ifndef atomic_fetch_andnot_relaxed
-#define atomic_fetch_andnot_relaxed atomic_fetch_andnot
-#define atomic_fetch_andnot_acquire atomic_fetch_andnot
-#define atomic_fetch_andnot_release atomic_fetch_andnot
-
-#else /* atomic_fetch_andnot_relaxed */
-
-#ifndef atomic_fetch_andnot_acquire
-#define atomic_fetch_andnot_acquire(...) \
- __atomic_op_acquire(atomic_fetch_andnot, __VA_ARGS__)
-#endif
-
-#ifndef atomic_fetch_andnot_release
-#define atomic_fetch_andnot_release(...) \
- __atomic_op_release(atomic_fetch_andnot, __VA_ARGS__)
-#endif
-
-#ifndef atomic_fetch_andnot
-#define atomic_fetch_andnot(...) \
- __atomic_op_fence(atomic_fetch_andnot, __VA_ARGS__)
-#endif
-#endif /* atomic_fetch_andnot_relaxed */
-#endif /* atomic_andnot */
-
-/* atomic_fetch_xor_relaxed */
-#ifndef atomic_fetch_xor_relaxed
-#define atomic_fetch_xor_relaxed atomic_fetch_xor
-#define atomic_fetch_xor_acquire atomic_fetch_xor
-#define atomic_fetch_xor_release atomic_fetch_xor
-
-#else /* atomic_fetch_xor_relaxed */
-
-#ifndef atomic_fetch_xor_acquire
-#define atomic_fetch_xor_acquire(...) \
- __atomic_op_acquire(atomic_fetch_xor, __VA_ARGS__)
-#endif
-
-#ifndef atomic_fetch_xor_release
-#define atomic_fetch_xor_release(...) \
- __atomic_op_release(atomic_fetch_xor, __VA_ARGS__)
-#endif
-
-#ifndef atomic_fetch_xor
-#define atomic_fetch_xor(...) \
- __atomic_op_fence(atomic_fetch_xor, __VA_ARGS__)
-#endif
-#endif /* atomic_fetch_xor_relaxed */
-
-
-/* atomic_xchg_relaxed */
-#ifndef atomic_xchg_relaxed
-#define atomic_xchg_relaxed atomic_xchg
-#define atomic_xchg_acquire atomic_xchg
-#define atomic_xchg_release atomic_xchg
-
-#else /* atomic_xchg_relaxed */
-
-#ifndef atomic_xchg_acquire
-#define atomic_xchg_acquire(...) \
- __atomic_op_acquire(atomic_xchg, __VA_ARGS__)
-#endif
-
-#ifndef atomic_xchg_release
-#define atomic_xchg_release(...) \
- __atomic_op_release(atomic_xchg, __VA_ARGS__)
-#endif
-
-#ifndef atomic_xchg
-#define atomic_xchg(...) \
- __atomic_op_fence(atomic_xchg, __VA_ARGS__)
-#endif
-#endif /* atomic_xchg_relaxed */
-
-/* atomic_cmpxchg_relaxed */
-#ifndef atomic_cmpxchg_relaxed
-#define atomic_cmpxchg_relaxed atomic_cmpxchg
-#define atomic_cmpxchg_acquire atomic_cmpxchg
-#define atomic_cmpxchg_release atomic_cmpxchg
-
-#else /* atomic_cmpxchg_relaxed */
-
-#ifndef atomic_cmpxchg_acquire
-#define atomic_cmpxchg_acquire(...) \
- __atomic_op_acquire(atomic_cmpxchg, __VA_ARGS__)
-#endif
-
-#ifndef atomic_cmpxchg_release
-#define atomic_cmpxchg_release(...) \
- __atomic_op_release(atomic_cmpxchg, __VA_ARGS__)
-#endif
-
-#ifndef atomic_cmpxchg
-#define atomic_cmpxchg(...) \
- __atomic_op_fence(atomic_cmpxchg, __VA_ARGS__)
-#endif
-#endif /* atomic_cmpxchg_relaxed */
-
-#ifndef atomic_try_cmpxchg
-
-#define __atomic_try_cmpxchg(type, _p, _po, _n) \
-({ \
- typeof(_po) __po = (_po); \
- typeof(*(_po)) __r, __o = *__po; \
- __r = atomic_cmpxchg##type((_p), __o, (_n)); \
- if (unlikely(__r != __o)) \
- *__po = __r; \
- likely(__r == __o); \
-})
-
-#define atomic_try_cmpxchg(_p, _po, _n) __atomic_try_cmpxchg(, _p, _po, _n)
-#define atomic_try_cmpxchg_relaxed(_p, _po, _n) __atomic_try_cmpxchg(_relaxed, _p, _po, _n)
-#define atomic_try_cmpxchg_acquire(_p, _po, _n) __atomic_try_cmpxchg(_acquire, _p, _po, _n)
-#define atomic_try_cmpxchg_release(_p, _po, _n) __atomic_try_cmpxchg(_release, _p, _po, _n)
-
-#else /* atomic_try_cmpxchg */
-#define atomic_try_cmpxchg_relaxed atomic_try_cmpxchg
-#define atomic_try_cmpxchg_acquire atomic_try_cmpxchg
-#define atomic_try_cmpxchg_release atomic_try_cmpxchg
-#endif /* atomic_try_cmpxchg */
-
-/* cmpxchg_relaxed */
-#ifndef cmpxchg_relaxed
-#define cmpxchg_relaxed cmpxchg
-#define cmpxchg_acquire cmpxchg
-#define cmpxchg_release cmpxchg
-
-#else /* cmpxchg_relaxed */
-
-#ifndef cmpxchg_acquire
-#define cmpxchg_acquire(...) \
- __atomic_op_acquire(cmpxchg, __VA_ARGS__)
-#endif
-
-#ifndef cmpxchg_release
-#define cmpxchg_release(...) \
- __atomic_op_release(cmpxchg, __VA_ARGS__)
-#endif
-
-#ifndef cmpxchg
-#define cmpxchg(...) \
- __atomic_op_fence(cmpxchg, __VA_ARGS__)
-#endif
-#endif /* cmpxchg_relaxed */
-
-/* cmpxchg64_relaxed */
-#ifndef cmpxchg64_relaxed
-#define cmpxchg64_relaxed cmpxchg64
-#define cmpxchg64_acquire cmpxchg64
-#define cmpxchg64_release cmpxchg64
-
-#else /* cmpxchg64_relaxed */
-
-#ifndef cmpxchg64_acquire
-#define cmpxchg64_acquire(...) \
- __atomic_op_acquire(cmpxchg64, __VA_ARGS__)
-#endif
-
-#ifndef cmpxchg64_release
-#define cmpxchg64_release(...) \
- __atomic_op_release(cmpxchg64, __VA_ARGS__)
-#endif
-
-#ifndef cmpxchg64
-#define cmpxchg64(...) \
- __atomic_op_fence(cmpxchg64, __VA_ARGS__)
-#endif
-#endif /* cmpxchg64_relaxed */
-
-/* xchg_relaxed */
-#ifndef xchg_relaxed
-#define xchg_relaxed xchg
-#define xchg_acquire xchg
-#define xchg_release xchg
-
-#else /* xchg_relaxed */
-
-#ifndef xchg_acquire
-#define xchg_acquire(...) __atomic_op_acquire(xchg, __VA_ARGS__)
-#endif
-
-#ifndef xchg_release
-#define xchg_release(...) __atomic_op_release(xchg, __VA_ARGS__)
-#endif
-
-#ifndef xchg
-#define xchg(...) __atomic_op_fence(xchg, __VA_ARGS__)
-#endif
-#endif /* xchg_relaxed */
-
-/**
- * atomic_add_unless - add unless the number is already a given value
- * @v: pointer of type atomic_t
- * @a: the amount to add to v...
- * @u: ...unless v is equal to u.
- *
- * Atomically adds @a to @v, so long as @v was not already @u.
- * Returns non-zero if @v was not @u, and zero otherwise.
- */
-static inline int atomic_add_unless(atomic_t *v, int a, int u)
-{
- return __atomic_add_unless(v, a, u) != u;
-}
-
-/**
- * atomic_inc_not_zero - increment unless the number is zero
- * @v: pointer of type atomic_t
- *
- * Atomically increments @v by 1, so long as @v is non-zero.
- * Returns non-zero if @v was non-zero, and zero otherwise.
- */
-#ifndef atomic_inc_not_zero
-#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
-#endif
-
-#ifndef atomic_andnot
-static inline void atomic_andnot(int i, atomic_t *v)
-{
- atomic_and(~i, v);
-}
-
-static inline int atomic_fetch_andnot(int i, atomic_t *v)
-{
- return atomic_fetch_and(~i, v);
-}
-
-static inline int atomic_fetch_andnot_relaxed(int i, atomic_t *v)
-{
- return atomic_fetch_and_relaxed(~i, v);
-}
-
-static inline int atomic_fetch_andnot_acquire(int i, atomic_t *v)
-{
- return atomic_fetch_and_acquire(~i, v);
-}
-
-static inline int atomic_fetch_andnot_release(int i, atomic_t *v)
-{
- return atomic_fetch_and_release(~i, v);
-}
-#endif
-
-/**
- * atomic_inc_not_zero_hint - increment if not null
- * @v: pointer of type atomic_t
- * @hint: probable value of the atomic before the increment
- *
- * This version of atomic_inc_not_zero() gives a hint of probable
- * value of the atomic. This helps processor to not read the memory
- * before doing the atomic read/modify/write cycle, lowering
- * number of bus transactions on some arches.
- *
- * Returns: 0 if increment was not done, 1 otherwise.
- */
-#ifndef atomic_inc_not_zero_hint
-static inline int atomic_inc_not_zero_hint(atomic_t *v, int hint)
-{
- int val, c = hint;
-
- /* sanity test, should be removed by compiler if hint is a constant */
- if (!hint)
- return atomic_inc_not_zero(v);
-
- do {
- val = atomic_cmpxchg(v, c, c + 1);
- if (val == c)
- return 1;
- c = val;
- } while (c);
-
- return 0;
-}
-#endif
-
-#ifndef atomic_inc_unless_negative
-static inline int atomic_inc_unless_negative(atomic_t *p)
-{
- int v, v1;
- for (v = 0; v >= 0; v = v1) {
- v1 = atomic_cmpxchg(p, v, v + 1);
- if (likely(v1 == v))
- return 1;
- }
- return 0;
-}
-#endif
-
-#ifndef atomic_dec_unless_positive
-static inline int atomic_dec_unless_positive(atomic_t *p)
-{
- int v, v1;
- for (v = 0; v <= 0; v = v1) {
- v1 = atomic_cmpxchg(p, v, v - 1);
- if (likely(v1 == v))
- return 1;
- }
- return 0;
-}
-#endif
-
-/*
- * atomic_dec_if_positive - decrement by 1 if old value positive
- * @v: pointer of type atomic_t
- *
- * The function returns the old value of *v minus 1, even if
- * the atomic variable, v, was not decremented.
- */
-#ifndef atomic_dec_if_positive
-static inline int atomic_dec_if_positive(atomic_t *v)
-{
- int c, old, dec;
- c = atomic_read(v);
- for (;;) {
- dec = c - 1;
- if (unlikely(dec < 0))
- break;
- old = atomic_cmpxchg((v), c, dec);
- if (likely(old == c))
- break;
- c = old;
- }
- return dec;
-}
-#endif
-
-#ifdef CONFIG_GENERIC_ATOMIC64
-#include <asm-generic/atomic64.h>
-#endif
-
-#ifndef atomic64_read_acquire
-#define atomic64_read_acquire(v) smp_load_acquire(&(v)->counter)
-#endif
-
-#ifndef atomic64_set_release
-#define atomic64_set_release(v, i) smp_store_release(&(v)->counter, (i))
-#endif
-
-/* atomic64_add_return_relaxed */
-#ifndef atomic64_add_return_relaxed
-#define atomic64_add_return_relaxed atomic64_add_return
-#define atomic64_add_return_acquire atomic64_add_return
-#define atomic64_add_return_release atomic64_add_return
-
-#else /* atomic64_add_return_relaxed */
-
-#ifndef atomic64_add_return_acquire
-#define atomic64_add_return_acquire(...) \
- __atomic_op_acquire(atomic64_add_return, __VA_ARGS__)
-#endif
-
-#ifndef atomic64_add_return_release
-#define atomic64_add_return_release(...) \
- __atomic_op_release(atomic64_add_return, __VA_ARGS__)
-#endif
-
-#ifndef atomic64_add_return
-#define atomic64_add_return(...) \
- __atomic_op_fence(atomic64_add_return, __VA_ARGS__)
-#endif
-#endif /* atomic64_add_return_relaxed */
-
-/* atomic64_inc_return_relaxed */
-#ifndef atomic64_inc_return_relaxed
-#define atomic64_inc_return_relaxed atomic64_inc_return
-#define atomic64_inc_return_acquire atomic64_inc_return
-#define atomic64_inc_return_release atomic64_inc_return
-
-#else /* atomic64_inc_return_relaxed */
-
-#ifndef atomic64_inc_return_acquire
-#define atomic64_inc_return_acquire(...) \
- __atomic_op_acquire(atomic64_inc_return, __VA_ARGS__)
-#endif
-
-#ifndef atomic64_inc_return_release
-#define atomic64_inc_return_release(...) \
- __atomic_op_release(atomic64_inc_return, __VA_ARGS__)
-#endif
-
-#ifndef atomic64_inc_return
-#define atomic64_inc_return(...) \
- __atomic_op_fence(atomic64_inc_return, __VA_ARGS__)
-#endif
-#endif /* atomic64_inc_return_relaxed */
-
-
-/* atomic64_sub_return_relaxed */
-#ifndef atomic64_sub_return_relaxed
-#define atomic64_sub_return_relaxed atomic64_sub_return
-#define atomic64_sub_return_acquire atomic64_sub_return
-#define atomic64_sub_return_release atomic64_sub_return
-
-#else /* atomic64_sub_return_relaxed */
-
-#ifndef atomic64_sub_return_acquire
-#define atomic64_sub_return_acquire(...) \
- __atomic_op_acquire(atomic64_sub_return, __VA_ARGS__)
-#endif
-
-#ifndef atomic64_sub_return_release
-#define atomic64_sub_return_release(...) \
- __atomic_op_release(atomic64_sub_return, __VA_ARGS__)
-#endif
-
-#ifndef atomic64_sub_return
-#define atomic64_sub_return(...) \
- __atomic_op_fence(atomic64_sub_return, __VA_ARGS__)
-#endif
-#endif /* atomic64_sub_return_relaxed */
-
-/* atomic64_dec_return_relaxed */
-#ifndef atomic64_dec_return_relaxed
-#define atomic64_dec_return_relaxed atomic64_dec_return
-#define atomic64_dec_return_acquire atomic64_dec_return
-#define atomic64_dec_return_release atomic64_dec_return
-
-#else /* atomic64_dec_return_relaxed */
-
-#ifndef atomic64_dec_return_acquire
-#define atomic64_dec_return_acquire(...) \
- __atomic_op_acquire(atomic64_dec_return, __VA_ARGS__)
-#endif
-
-#ifndef atomic64_dec_return_release
-#define atomic64_dec_return_release(...) \
- __atomic_op_release(atomic64_dec_return, __VA_ARGS__)
-#endif
-
-#ifndef atomic64_dec_return
-#define atomic64_dec_return(...) \
- __atomic_op_fence(atomic64_dec_return, __VA_ARGS__)
-#endif
-#endif /* atomic64_dec_return_relaxed */
-
-
-/* atomic64_fetch_add_relaxed */
-#ifndef atomic64_fetch_add_relaxed
-#define atomic64_fetch_add_relaxed atomic64_fetch_add
-#define atomic64_fetch_add_acquire atomic64_fetch_add
-#define atomic64_fetch_add_release atomic64_fetch_add
-
-#else /* atomic64_fetch_add_relaxed */
-
-#ifndef atomic64_fetch_add_acquire
-#define atomic64_fetch_add_acquire(...) \
- __atomic_op_acquire(atomic64_fetch_add, __VA_ARGS__)
-#endif
-
-#ifndef atomic64_fetch_add_release
-#define atomic64_fetch_add_release(...) \
- __atomic_op_release(atomic64_fetch_add, __VA_ARGS__)
-#endif
-
-#ifndef atomic64_fetch_add
-#define atomic64_fetch_add(...) \
- __atomic_op_fence(atomic64_fetch_add, __VA_ARGS__)
-#endif
-#endif /* atomic64_fetch_add_relaxed */
-
-/* atomic64_fetch_inc_relaxed */
-#ifndef atomic64_fetch_inc_relaxed
-
-#ifndef atomic64_fetch_inc
-#define atomic64_fetch_inc(v) atomic64_fetch_add(1, (v))
-#define atomic64_fetch_inc_relaxed(v) atomic64_fetch_add_relaxed(1, (v))
-#define atomic64_fetch_inc_acquire(v) atomic64_fetch_add_acquire(1, (v))
-#define atomic64_fetch_inc_release(v) atomic64_fetch_add_release(1, (v))
-#else /* atomic64_fetch_inc */
-#define atomic64_fetch_inc_relaxed atomic64_fetch_inc
-#define atomic64_fetch_inc_acquire atomic64_fetch_inc
-#define atomic64_fetch_inc_release atomic64_fetch_inc
-#endif /* atomic64_fetch_inc */
-
-#else /* atomic64_fetch_inc_relaxed */
-
-#ifndef atomic64_fetch_inc_acquire
-#define atomic64_fetch_inc_acquire(...) \
- __atomic_op_acquire(atomic64_fetch_inc, __VA_ARGS__)
-#endif
-
-#ifndef atomic64_fetch_inc_release
-#define atomic64_fetch_inc_release(...) \
- __atomic_op_release(atomic64_fetch_inc, __VA_ARGS__)
-#endif
-
-#ifndef atomic64_fetch_inc
-#define atomic64_fetch_inc(...) \
- __atomic_op_fence(atomic64_fetch_inc, __VA_ARGS__)
-#endif
-#endif /* atomic64_fetch_inc_relaxed */
-
-/* atomic64_fetch_sub_relaxed */
-#ifndef atomic64_fetch_sub_relaxed
-#define atomic64_fetch_sub_relaxed atomic64_fetch_sub
-#define atomic64_fetch_sub_acquire atomic64_fetch_sub
-#define atomic64_fetch_sub_release atomic64_fetch_sub
-
-#else /* atomic64_fetch_sub_relaxed */
-
-#ifndef atomic64_fetch_sub_acquire
-#define atomic64_fetch_sub_acquire(...) \
- __atomic_op_acquire(atomic64_fetch_sub, __VA_ARGS__)
-#endif
-
-#ifndef atomic64_fetch_sub_release
-#define atomic64_fetch_sub_release(...) \
- __atomic_op_release(atomic64_fetch_sub, __VA_ARGS__)
-#endif
-
-#ifndef atomic64_fetch_sub
-#define atomic64_fetch_sub(...) \
- __atomic_op_fence(atomic64_fetch_sub, __VA_ARGS__)
-#endif
-#endif /* atomic64_fetch_sub_relaxed */
-
-/* atomic64_fetch_dec_relaxed */
-#ifndef atomic64_fetch_dec_relaxed
-
-#ifndef atomic64_fetch_dec
-#define atomic64_fetch_dec(v) atomic64_fetch_sub(1, (v))
-#define atomic64_fetch_dec_relaxed(v) atomic64_fetch_sub_relaxed(1, (v))
-#define atomic64_fetch_dec_acquire(v) atomic64_fetch_sub_acquire(1, (v))
-#define atomic64_fetch_dec_release(v) atomic64_fetch_sub_release(1, (v))
-#else /* atomic64_fetch_dec */
-#define atomic64_fetch_dec_relaxed atomic64_fetch_dec
-#define atomic64_fetch_dec_acquire atomic64_fetch_dec
-#define atomic64_fetch_dec_release atomic64_fetch_dec
-#endif /* atomic64_fetch_dec */
-
-#else /* atomic64_fetch_dec_relaxed */
-
-#ifndef atomic64_fetch_dec_acquire
-#define atomic64_fetch_dec_acquire(...) \
- __atomic_op_acquire(atomic64_fetch_dec, __VA_ARGS__)
-#endif
-
-#ifndef atomic64_fetch_dec_release
-#define atomic64_fetch_dec_release(...) \
- __atomic_op_release(atomic64_fetch_dec, __VA_ARGS__)
-#endif
-
-#ifndef atomic64_fetch_dec
-#define atomic64_fetch_dec(...) \
- __atomic_op_fence(atomic64_fetch_dec, __VA_ARGS__)
-#endif
-#endif /* atomic64_fetch_dec_relaxed */
-
-/* atomic64_fetch_or_relaxed */
-#ifndef atomic64_fetch_or_relaxed
-#define atomic64_fetch_or_relaxed atomic64_fetch_or
-#define atomic64_fetch_or_acquire atomic64_fetch_or
-#define atomic64_fetch_or_release atomic64_fetch_or
-
-#else /* atomic64_fetch_or_relaxed */
-
-#ifndef atomic64_fetch_or_acquire
-#define atomic64_fetch_or_acquire(...) \
- __atomic_op_acquire(atomic64_fetch_or, __VA_ARGS__)
-#endif
-
-#ifndef atomic64_fetch_or_release
-#define atomic64_fetch_or_release(...) \
- __atomic_op_release(atomic64_fetch_or, __VA_ARGS__)
-#endif
-
-#ifndef atomic64_fetch_or
-#define atomic64_fetch_or(...) \
- __atomic_op_fence(atomic64_fetch_or, __VA_ARGS__)
-#endif
-#endif /* atomic64_fetch_or_relaxed */
-
-/* atomic64_fetch_and_relaxed */
-#ifndef atomic64_fetch_and_relaxed
-#define atomic64_fetch_and_relaxed atomic64_fetch_and
-#define atomic64_fetch_and_acquire atomic64_fetch_and
-#define atomic64_fetch_and_release atomic64_fetch_and
-
-#else /* atomic64_fetch_and_relaxed */
-
-#ifndef atomic64_fetch_and_acquire
-#define atomic64_fetch_and_acquire(...) \
- __atomic_op_acquire(atomic64_fetch_and, __VA_ARGS__)
-#endif
-
-#ifndef atomic64_fetch_and_release
-#define atomic64_fetch_and_release(...) \
- __atomic_op_release(atomic64_fetch_and, __VA_ARGS__)
-#endif
-
-#ifndef atomic64_fetch_and
-#define atomic64_fetch_and(...) \
- __atomic_op_fence(atomic64_fetch_and, __VA_ARGS__)
-#endif
-#endif /* atomic64_fetch_and_relaxed */
-
-#ifdef atomic64_andnot
-/* atomic64_fetch_andnot_relaxed */
-#ifndef atomic64_fetch_andnot_relaxed
-#define atomic64_fetch_andnot_relaxed atomic64_fetch_andnot
-#define atomic64_fetch_andnot_acquire atomic64_fetch_andnot
-#define atomic64_fetch_andnot_release atomic64_fetch_andnot
-
-#else /* atomic64_fetch_andnot_relaxed */
-
-#ifndef atomic64_fetch_andnot_acquire
-#define atomic64_fetch_andnot_acquire(...) \
- __atomic_op_acquire(atomic64_fetch_andnot, __VA_ARGS__)
-#endif
-
-#ifndef atomic64_fetch_andnot_release
-#define atomic64_fetch_andnot_release(...) \
- __atomic_op_release(atomic64_fetch_andnot, __VA_ARGS__)
-#endif
-
-#ifndef atomic64_fetch_andnot
-#define atomic64_fetch_andnot(...) \
- __atomic_op_fence(atomic64_fetch_andnot, __VA_ARGS__)
-#endif
-#endif /* atomic64_fetch_andnot_relaxed */
-#endif /* atomic64_andnot */
-
-/* atomic64_fetch_xor_relaxed */
-#ifndef atomic64_fetch_xor_relaxed
-#define atomic64_fetch_xor_relaxed atomic64_fetch_xor
-#define atomic64_fetch_xor_acquire atomic64_fetch_xor
-#define atomic64_fetch_xor_release atomic64_fetch_xor
-
-#else /* atomic64_fetch_xor_relaxed */
-
-#ifndef atomic64_fetch_xor_acquire
-#define atomic64_fetch_xor_acquire(...) \
- __atomic_op_acquire(atomic64_fetch_xor, __VA_ARGS__)
-#endif
-
-#ifndef atomic64_fetch_xor_release
-#define atomic64_fetch_xor_release(...) \
- __atomic_op_release(atomic64_fetch_xor, __VA_ARGS__)
-#endif
-
-#ifndef atomic64_fetch_xor
-#define atomic64_fetch_xor(...) \
- __atomic_op_fence(atomic64_fetch_xor, __VA_ARGS__)
-#endif
-#endif /* atomic64_fetch_xor_relaxed */
-
-
-/* atomic64_xchg_relaxed */
-#ifndef atomic64_xchg_relaxed
-#define atomic64_xchg_relaxed atomic64_xchg
-#define atomic64_xchg_acquire atomic64_xchg
-#define atomic64_xchg_release atomic64_xchg
-
-#else /* atomic64_xchg_relaxed */
-
-#ifndef atomic64_xchg_acquire
-#define atomic64_xchg_acquire(...) \
- __atomic_op_acquire(atomic64_xchg, __VA_ARGS__)
-#endif
-
-#ifndef atomic64_xchg_release
-#define atomic64_xchg_release(...) \
- __atomic_op_release(atomic64_xchg, __VA_ARGS__)
-#endif
-
-#ifndef atomic64_xchg
-#define atomic64_xchg(...) \
- __atomic_op_fence(atomic64_xchg, __VA_ARGS__)
-#endif
-#endif /* atomic64_xchg_relaxed */
-
-/* atomic64_cmpxchg_relaxed */
-#ifndef atomic64_cmpxchg_relaxed
-#define atomic64_cmpxchg_relaxed atomic64_cmpxchg
-#define atomic64_cmpxchg_acquire atomic64_cmpxchg
-#define atomic64_cmpxchg_release atomic64_cmpxchg
-
-#else /* atomic64_cmpxchg_relaxed */
-
-#ifndef atomic64_cmpxchg_acquire
-#define atomic64_cmpxchg_acquire(...) \
- __atomic_op_acquire(atomic64_cmpxchg, __VA_ARGS__)
-#endif
-
-#ifndef atomic64_cmpxchg_release
-#define atomic64_cmpxchg_release(...) \
- __atomic_op_release(atomic64_cmpxchg, __VA_ARGS__)
-#endif
-
-#ifndef atomic64_cmpxchg
-#define atomic64_cmpxchg(...) \
- __atomic_op_fence(atomic64_cmpxchg, __VA_ARGS__)
-#endif
-#endif /* atomic64_cmpxchg_relaxed */
-
-#ifndef atomic64_try_cmpxchg
-
-#define __atomic64_try_cmpxchg(type, _p, _po, _n) \
-({ \
- typeof(_po) __po = (_po); \
- typeof(*(_po)) __r, __o = *__po; \
- __r = atomic64_cmpxchg##type((_p), __o, (_n)); \
- if (unlikely(__r != __o)) \
- *__po = __r; \
- likely(__r == __o); \
-})
-
-#define atomic64_try_cmpxchg(_p, _po, _n) __atomic64_try_cmpxchg(, _p, _po, _n)
-#define atomic64_try_cmpxchg_relaxed(_p, _po, _n) __atomic64_try_cmpxchg(_relaxed, _p, _po, _n)
-#define atomic64_try_cmpxchg_acquire(_p, _po, _n) __atomic64_try_cmpxchg(_acquire, _p, _po, _n)
-#define atomic64_try_cmpxchg_release(_p, _po, _n) __atomic64_try_cmpxchg(_release, _p, _po, _n)
-
-#else /* atomic64_try_cmpxchg */
-#define atomic64_try_cmpxchg_relaxed atomic64_try_cmpxchg
-#define atomic64_try_cmpxchg_acquire atomic64_try_cmpxchg
-#define atomic64_try_cmpxchg_release atomic64_try_cmpxchg
-#endif /* atomic64_try_cmpxchg */
-
-#ifndef atomic64_andnot
-static inline void atomic64_andnot(long long i, atomic64_t *v)
-{
- atomic64_and(~i, v);
-}
-
-static inline long long atomic64_fetch_andnot(long long i, atomic64_t *v)
-{
- return atomic64_fetch_and(~i, v);
-}
-
-static inline long long atomic64_fetch_andnot_relaxed(long long i, atomic64_t *v)
-{
- return atomic64_fetch_and_relaxed(~i, v);
-}
-
-static inline long long atomic64_fetch_andnot_acquire(long long i, atomic64_t *v)
-{
- return atomic64_fetch_and_acquire(~i, v);
-}
-
-static inline long long atomic64_fetch_andnot_release(long long i, atomic64_t *v)
-{
- return atomic64_fetch_and_release(~i, v);
-}
-#endif
-#include <asm-generic/atomic-long.h>
+#include <linux/atomic/atomic-arch-fallback.h>
+#include <linux/atomic/atomic-long.h>
+#include <linux/atomic/atomic-instrumented.h>
#endif /* _LINUX_ATOMIC_H */
diff --git a/include/linux/atomic/atomic-arch-fallback.h b/include/linux/atomic/atomic-arch-fallback.h
new file mode 100644
index 000000000000..a6e4437c5f36
--- /dev/null
+++ b/include/linux/atomic/atomic-arch-fallback.h
@@ -0,0 +1,2671 @@
+// SPDX-License-Identifier: GPL-2.0
+
+// Generated by scripts/atomic/gen-atomic-fallback.sh
+// DO NOT MODIFY THIS FILE DIRECTLY
+
+#ifndef _LINUX_ATOMIC_FALLBACK_H
+#define _LINUX_ATOMIC_FALLBACK_H
+
+#include <linux/compiler.h>
+
+#ifndef arch_xchg_relaxed
+#define arch_xchg_acquire arch_xchg
+#define arch_xchg_release arch_xchg
+#define arch_xchg_relaxed arch_xchg
+#else /* arch_xchg_relaxed */
+
+#ifndef arch_xchg_acquire
+#define arch_xchg_acquire(...) \
+ __atomic_op_acquire(arch_xchg, __VA_ARGS__)
+#endif
+
+#ifndef arch_xchg_release
+#define arch_xchg_release(...) \
+ __atomic_op_release(arch_xchg, __VA_ARGS__)
+#endif
+
+#ifndef arch_xchg
+#define arch_xchg(...) \
+ __atomic_op_fence(arch_xchg, __VA_ARGS__)
+#endif
+
+#endif /* arch_xchg_relaxed */
+
+#ifndef arch_cmpxchg_relaxed
+#define arch_cmpxchg_acquire arch_cmpxchg
+#define arch_cmpxchg_release arch_cmpxchg
+#define arch_cmpxchg_relaxed arch_cmpxchg
+#else /* arch_cmpxchg_relaxed */
+
+#ifndef arch_cmpxchg_acquire
+#define arch_cmpxchg_acquire(...) \
+ __atomic_op_acquire(arch_cmpxchg, __VA_ARGS__)
+#endif
+
+#ifndef arch_cmpxchg_release
+#define arch_cmpxchg_release(...) \
+ __atomic_op_release(arch_cmpxchg, __VA_ARGS__)
+#endif
+
+#ifndef arch_cmpxchg
+#define arch_cmpxchg(...) \
+ __atomic_op_fence(arch_cmpxchg, __VA_ARGS__)
+#endif
+
+#endif /* arch_cmpxchg_relaxed */
+
+#ifndef arch_cmpxchg64_relaxed
+#define arch_cmpxchg64_acquire arch_cmpxchg64
+#define arch_cmpxchg64_release arch_cmpxchg64
+#define arch_cmpxchg64_relaxed arch_cmpxchg64
+#else /* arch_cmpxchg64_relaxed */
+
+#ifndef arch_cmpxchg64_acquire
+#define arch_cmpxchg64_acquire(...) \
+ __atomic_op_acquire(arch_cmpxchg64, __VA_ARGS__)
+#endif
+
+#ifndef arch_cmpxchg64_release
+#define arch_cmpxchg64_release(...) \
+ __atomic_op_release(arch_cmpxchg64, __VA_ARGS__)
+#endif
+
+#ifndef arch_cmpxchg64
+#define arch_cmpxchg64(...) \
+ __atomic_op_fence(arch_cmpxchg64, __VA_ARGS__)
+#endif
+
+#endif /* arch_cmpxchg64_relaxed */
+
+#ifndef arch_try_cmpxchg_relaxed
+#ifdef arch_try_cmpxchg
+#define arch_try_cmpxchg_acquire arch_try_cmpxchg
+#define arch_try_cmpxchg_release arch_try_cmpxchg
+#define arch_try_cmpxchg_relaxed arch_try_cmpxchg
+#endif /* arch_try_cmpxchg */
+
+#ifndef arch_try_cmpxchg
+#define arch_try_cmpxchg(_ptr, _oldp, _new) \
+({ \
+ typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
+ ___r = arch_cmpxchg((_ptr), ___o, (_new)); \
+ if (unlikely(___r != ___o)) \
+ *___op = ___r; \
+ likely(___r == ___o); \
+})
+#endif /* arch_try_cmpxchg */
+
+#ifndef arch_try_cmpxchg_acquire
+#define arch_try_cmpxchg_acquire(_ptr, _oldp, _new) \
+({ \
+ typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
+ ___r = arch_cmpxchg_acquire((_ptr), ___o, (_new)); \
+ if (unlikely(___r != ___o)) \
+ *___op = ___r; \
+ likely(___r == ___o); \
+})
+#endif /* arch_try_cmpxchg_acquire */
+
+#ifndef arch_try_cmpxchg_release
+#define arch_try_cmpxchg_release(_ptr, _oldp, _new) \
+({ \
+ typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
+ ___r = arch_cmpxchg_release((_ptr), ___o, (_new)); \
+ if (unlikely(___r != ___o)) \
+ *___op = ___r; \
+ likely(___r == ___o); \
+})
+#endif /* arch_try_cmpxchg_release */
+
+#ifndef arch_try_cmpxchg_relaxed
+#define arch_try_cmpxchg_relaxed(_ptr, _oldp, _new) \
+({ \
+ typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
+ ___r = arch_cmpxchg_relaxed((_ptr), ___o, (_new)); \
+ if (unlikely(___r != ___o)) \
+ *___op = ___r; \
+ likely(___r == ___o); \
+})
+#endif /* arch_try_cmpxchg_relaxed */
+
+#else /* arch_try_cmpxchg_relaxed */
+
+#ifndef arch_try_cmpxchg_acquire
+#define arch_try_cmpxchg_acquire(...) \
+ __atomic_op_acquire(arch_try_cmpxchg, __VA_ARGS__)
+#endif
+
+#ifndef arch_try_cmpxchg_release
+#define arch_try_cmpxchg_release(...) \
+ __atomic_op_release(arch_try_cmpxchg, __VA_ARGS__)
+#endif
+
+#ifndef arch_try_cmpxchg
+#define arch_try_cmpxchg(...) \
+ __atomic_op_fence(arch_try_cmpxchg, __VA_ARGS__)
+#endif
+
+#endif /* arch_try_cmpxchg_relaxed */
+
+#ifndef arch_try_cmpxchg64_relaxed
+#ifdef arch_try_cmpxchg64
+#define arch_try_cmpxchg64_acquire arch_try_cmpxchg64
+#define arch_try_cmpxchg64_release arch_try_cmpxchg64
+#define arch_try_cmpxchg64_relaxed arch_try_cmpxchg64
+#endif /* arch_try_cmpxchg64 */
+
+#ifndef arch_try_cmpxchg64
+#define arch_try_cmpxchg64(_ptr, _oldp, _new) \
+({ \
+ typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
+ ___r = arch_cmpxchg64((_ptr), ___o, (_new)); \
+ if (unlikely(___r != ___o)) \
+ *___op = ___r; \
+ likely(___r == ___o); \
+})
+#endif /* arch_try_cmpxchg64 */
+
+#ifndef arch_try_cmpxchg64_acquire
+#define arch_try_cmpxchg64_acquire(_ptr, _oldp, _new) \
+({ \
+ typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
+ ___r = arch_cmpxchg64_acquire((_ptr), ___o, (_new)); \
+ if (unlikely(___r != ___o)) \
+ *___op = ___r; \
+ likely(___r == ___o); \
+})
+#endif /* arch_try_cmpxchg64_acquire */
+
+#ifndef arch_try_cmpxchg64_release
+#define arch_try_cmpxchg64_release(_ptr, _oldp, _new) \
+({ \
+ typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
+ ___r = arch_cmpxchg64_release((_ptr), ___o, (_new)); \
+ if (unlikely(___r != ___o)) \
+ *___op = ___r; \
+ likely(___r == ___o); \
+})
+#endif /* arch_try_cmpxchg64_release */
+
+#ifndef arch_try_cmpxchg64_relaxed
+#define arch_try_cmpxchg64_relaxed(_ptr, _oldp, _new) \
+({ \
+ typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
+ ___r = arch_cmpxchg64_relaxed((_ptr), ___o, (_new)); \
+ if (unlikely(___r != ___o)) \
+ *___op = ___r; \
+ likely(___r == ___o); \
+})
+#endif /* arch_try_cmpxchg64_relaxed */
+
+#else /* arch_try_cmpxchg64_relaxed */
+
+#ifndef arch_try_cmpxchg64_acquire
+#define arch_try_cmpxchg64_acquire(...) \
+ __atomic_op_acquire(arch_try_cmpxchg64, __VA_ARGS__)
+#endif
+
+#ifndef arch_try_cmpxchg64_release
+#define arch_try_cmpxchg64_release(...) \
+ __atomic_op_release(arch_try_cmpxchg64, __VA_ARGS__)
+#endif
+
+#ifndef arch_try_cmpxchg64
+#define arch_try_cmpxchg64(...) \
+ __atomic_op_fence(arch_try_cmpxchg64, __VA_ARGS__)
+#endif
+
+#endif /* arch_try_cmpxchg64_relaxed */
+
+#ifndef arch_try_cmpxchg_local
+#define arch_try_cmpxchg_local(_ptr, _oldp, _new) \
+({ \
+ typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
+ ___r = arch_cmpxchg_local((_ptr), ___o, (_new)); \
+ if (unlikely(___r != ___o)) \
+ *___op = ___r; \
+ likely(___r == ___o); \
+})
+#endif /* arch_try_cmpxchg_local */
+
+#ifndef arch_try_cmpxchg64_local
+#define arch_try_cmpxchg64_local(_ptr, _oldp, _new) \
+({ \
+ typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
+ ___r = arch_cmpxchg64_local((_ptr), ___o, (_new)); \
+ if (unlikely(___r != ___o)) \
+ *___op = ___r; \
+ likely(___r == ___o); \
+})
+#endif /* arch_try_cmpxchg64_local */
+
+#ifndef arch_atomic_read_acquire
+static __always_inline int
+arch_atomic_read_acquire(const atomic_t *v)
+{
+ int ret;
+
+ if (__native_word(atomic_t)) {
+ ret = smp_load_acquire(&(v)->counter);
+ } else {
+ ret = arch_atomic_read(v);
+ __atomic_acquire_fence();
+ }
+
+ return ret;
+}
+#define arch_atomic_read_acquire arch_atomic_read_acquire
+#endif
+
+#ifndef arch_atomic_set_release
+static __always_inline void
+arch_atomic_set_release(atomic_t *v, int i)
+{
+ if (__native_word(atomic_t)) {
+ smp_store_release(&(v)->counter, i);
+ } else {
+ __atomic_release_fence();
+ arch_atomic_set(v, i);
+ }
+}
+#define arch_atomic_set_release arch_atomic_set_release
+#endif
+
+#ifndef arch_atomic_add_return_relaxed
+#define arch_atomic_add_return_acquire arch_atomic_add_return
+#define arch_atomic_add_return_release arch_atomic_add_return
+#define arch_atomic_add_return_relaxed arch_atomic_add_return
+#else /* arch_atomic_add_return_relaxed */
+
+#ifndef arch_atomic_add_return_acquire
+static __always_inline int
+arch_atomic_add_return_acquire(int i, atomic_t *v)
+{
+ int ret = arch_atomic_add_return_relaxed(i, v);
+ __atomic_acquire_fence();
+ return ret;
+}
+#define arch_atomic_add_return_acquire arch_atomic_add_return_acquire
+#endif
+
+#ifndef arch_atomic_add_return_release
+static __always_inline int
+arch_atomic_add_return_release(int i, atomic_t *v)
+{
+ __atomic_release_fence();
+ return arch_atomic_add_return_relaxed(i, v);
+}
+#define arch_atomic_add_return_release arch_atomic_add_return_release
+#endif
+
+#ifndef arch_atomic_add_return
+static __always_inline int
+arch_atomic_add_return(int i, atomic_t *v)
+{
+ int ret;
+ __atomic_pre_full_fence();
+ ret = arch_atomic_add_return_relaxed(i, v);
+ __atomic_post_full_fence();
+ return ret;
+}
+#define arch_atomic_add_return arch_atomic_add_return
+#endif
+
+#endif /* arch_atomic_add_return_relaxed */
+
+#ifndef arch_atomic_fetch_add_relaxed
+#define arch_atomic_fetch_add_acquire arch_atomic_fetch_add
+#define arch_atomic_fetch_add_release arch_atomic_fetch_add
+#define arch_atomic_fetch_add_relaxed arch_atomic_fetch_add
+#else /* arch_atomic_fetch_add_relaxed */
+
+#ifndef arch_atomic_fetch_add_acquire
+static __always_inline int
+arch_atomic_fetch_add_acquire(int i, atomic_t *v)
+{
+ int ret = arch_atomic_fetch_add_relaxed(i, v);
+ __atomic_acquire_fence();
+ return ret;
+}
+#define arch_atomic_fetch_add_acquire arch_atomic_fetch_add_acquire
+#endif
+
+#ifndef arch_atomic_fetch_add_release
+static __always_inline int
+arch_atomic_fetch_add_release(int i, atomic_t *v)
+{
+ __atomic_release_fence();
+ return arch_atomic_fetch_add_relaxed(i, v);
+}
+#define arch_atomic_fetch_add_release arch_atomic_fetch_add_release
+#endif
+
+#ifndef arch_atomic_fetch_add
+static __always_inline int
+arch_atomic_fetch_add(int i, atomic_t *v)
+{
+ int ret;
+ __atomic_pre_full_fence();
+ ret = arch_atomic_fetch_add_relaxed(i, v);
+ __atomic_post_full_fence();
+ return ret;
+}
+#define arch_atomic_fetch_add arch_atomic_fetch_add
+#endif
+
+#endif /* arch_atomic_fetch_add_relaxed */
+
+#ifndef arch_atomic_sub_return_relaxed
+#define arch_atomic_sub_return_acquire arch_atomic_sub_return
+#define arch_atomic_sub_return_release arch_atomic_sub_return
+#define arch_atomic_sub_return_relaxed arch_atomic_sub_return
+#else /* arch_atomic_sub_return_relaxed */
+
+#ifndef arch_atomic_sub_return_acquire
+static __always_inline int
+arch_atomic_sub_return_acquire(int i, atomic_t *v)
+{
+ int ret = arch_atomic_sub_return_relaxed(i, v);
+ __atomic_acquire_fence();
+ return ret;
+}
+#define arch_atomic_sub_return_acquire arch_atomic_sub_return_acquire
+#endif
+
+#ifndef arch_atomic_sub_return_release
+static __always_inline int
+arch_atomic_sub_return_release(int i, atomic_t *v)
+{
+ __atomic_release_fence();
+ return arch_atomic_sub_return_relaxed(i, v);
+}
+#define arch_atomic_sub_return_release arch_atomic_sub_return_release
+#endif
+
+#ifndef arch_atomic_sub_return
+static __always_inline int
+arch_atomic_sub_return(int i, atomic_t *v)
+{
+ int ret;
+ __atomic_pre_full_fence();
+ ret = arch_atomic_sub_return_relaxed(i, v);
+ __atomic_post_full_fence();
+ return ret;
+}
+#define arch_atomic_sub_return arch_atomic_sub_return
+#endif
+
+#endif /* arch_atomic_sub_return_relaxed */
+
+#ifndef arch_atomic_fetch_sub_relaxed
+#define arch_atomic_fetch_sub_acquire arch_atomic_fetch_sub
+#define arch_atomic_fetch_sub_release arch_atomic_fetch_sub
+#define arch_atomic_fetch_sub_relaxed arch_atomic_fetch_sub
+#else /* arch_atomic_fetch_sub_relaxed */
+
+#ifndef arch_atomic_fetch_sub_acquire
+static __always_inline int
+arch_atomic_fetch_sub_acquire(int i, atomic_t *v)
+{
+ int ret = arch_atomic_fetch_sub_relaxed(i, v);
+ __atomic_acquire_fence();
+ return ret;
+}
+#define arch_atomic_fetch_sub_acquire arch_atomic_fetch_sub_acquire
+#endif
+
+#ifndef arch_atomic_fetch_sub_release
+static __always_inline int
+arch_atomic_fetch_sub_release(int i, atomic_t *v)
+{
+ __atomic_release_fence();
+ return arch_atomic_fetch_sub_relaxed(i, v);
+}
+#define arch_atomic_fetch_sub_release arch_atomic_fetch_sub_release
+#endif
+
+#ifndef arch_atomic_fetch_sub
+static __always_inline int
+arch_atomic_fetch_sub(int i, atomic_t *v)
+{
+ int ret;
+ __atomic_pre_full_fence();
+ ret = arch_atomic_fetch_sub_relaxed(i, v);
+ __atomic_post_full_fence();
+ return ret;
+}
+#define arch_atomic_fetch_sub arch_atomic_fetch_sub
+#endif
+
+#endif /* arch_atomic_fetch_sub_relaxed */
+
+#ifndef arch_atomic_inc
+static __always_inline void
+arch_atomic_inc(atomic_t *v)
+{
+ arch_atomic_add(1, v);
+}
+#define arch_atomic_inc arch_atomic_inc
+#endif
+
+#ifndef arch_atomic_inc_return_relaxed
+#ifdef arch_atomic_inc_return
+#define arch_atomic_inc_return_acquire arch_atomic_inc_return
+#define arch_atomic_inc_return_release arch_atomic_inc_return
+#define arch_atomic_inc_return_relaxed arch_atomic_inc_return
+#endif /* arch_atomic_inc_return */
+
+#ifndef arch_atomic_inc_return
+static __always_inline int
+arch_atomic_inc_return(atomic_t *v)
+{
+ return arch_atomic_add_return(1, v);
+}
+#define arch_atomic_inc_return arch_atomic_inc_return
+#endif
+
+#ifndef arch_atomic_inc_return_acquire
+static __always_inline int
+arch_atomic_inc_return_acquire(atomic_t *v)
+{
+ return arch_atomic_add_return_acquire(1, v);
+}
+#define arch_atomic_inc_return_acquire arch_atomic_inc_return_acquire
+#endif
+
+#ifndef arch_atomic_inc_return_release
+static __always_inline int
+arch_atomic_inc_return_release(atomic_t *v)
+{
+ return arch_atomic_add_return_release(1, v);
+}
+#define arch_atomic_inc_return_release arch_atomic_inc_return_release
+#endif
+
+#ifndef arch_atomic_inc_return_relaxed
+static __always_inline int
+arch_atomic_inc_return_relaxed(atomic_t *v)
+{
+ return arch_atomic_add_return_relaxed(1, v);
+}
+#define arch_atomic_inc_return_relaxed arch_atomic_inc_return_relaxed
+#endif
+
+#else /* arch_atomic_inc_return_relaxed */
+
+#ifndef arch_atomic_inc_return_acquire
+static __always_inline int
+arch_atomic_inc_return_acquire(atomic_t *v)
+{
+ int ret = arch_atomic_inc_return_relaxed(v);
+ __atomic_acquire_fence();
+ return ret;
+}
+#define arch_atomic_inc_return_acquire arch_atomic_inc_return_acquire
+#endif
+
+#ifndef arch_atomic_inc_return_release
+static __always_inline int
+arch_atomic_inc_return_release(atomic_t *v)
+{
+ __atomic_release_fence();
+ return arch_atomic_inc_return_relaxed(v);
+}
+#define arch_atomic_inc_return_release arch_atomic_inc_return_release
+#endif
+
+#ifndef arch_atomic_inc_return
+static __always_inline int
+arch_atomic_inc_return(atomic_t *v)
+{
+ int ret;
+ __atomic_pre_full_fence();
+ ret = arch_atomic_inc_return_relaxed(v);
+ __atomic_post_full_fence();
+ return ret;
+}
+#define arch_atomic_inc_return arch_atomic_inc_return
+#endif
+
+#endif /* arch_atomic_inc_return_relaxed */
+
+#ifndef arch_atomic_fetch_inc_relaxed
+#ifdef arch_atomic_fetch_inc
+#define arch_atomic_fetch_inc_acquire arch_atomic_fetch_inc
+#define arch_atomic_fetch_inc_release arch_atomic_fetch_inc
+#define arch_atomic_fetch_inc_relaxed arch_atomic_fetch_inc
+#endif /* arch_atomic_fetch_inc */
+
+#ifndef arch_atomic_fetch_inc
+static __always_inline int
+arch_atomic_fetch_inc(atomic_t *v)
+{
+ return arch_atomic_fetch_add(1, v);
+}
+#define arch_atomic_fetch_inc arch_atomic_fetch_inc
+#endif
+
+#ifndef arch_atomic_fetch_inc_acquire
+static __always_inline int
+arch_atomic_fetch_inc_acquire(atomic_t *v)
+{
+ return arch_atomic_fetch_add_acquire(1, v);
+}
+#define arch_atomic_fetch_inc_acquire arch_atomic_fetch_inc_acquire
+#endif
+
+#ifndef arch_atomic_fetch_inc_release
+static __always_inline int
+arch_atomic_fetch_inc_release(atomic_t *v)
+{
+ return arch_atomic_fetch_add_release(1, v);
+}
+#define arch_atomic_fetch_inc_release arch_atomic_fetch_inc_release
+#endif
+
+#ifndef arch_atomic_fetch_inc_relaxed
+static __always_inline int
+arch_atomic_fetch_inc_relaxed(atomic_t *v)
+{
+ return arch_atomic_fetch_add_relaxed(1, v);
+}
+#define arch_atomic_fetch_inc_relaxed arch_atomic_fetch_inc_relaxed
+#endif
+
+#else /* arch_atomic_fetch_inc_relaxed */
+
+#ifndef arch_atomic_fetch_inc_acquire
+static __always_inline int
+arch_atomic_fetch_inc_acquire(atomic_t *v)
+{
+ int ret = arch_atomic_fetch_inc_relaxed(v);
+ __atomic_acquire_fence();
+ return ret;
+}
+#define arch_atomic_fetch_inc_acquire arch_atomic_fetch_inc_acquire
+#endif
+
+#ifndef arch_atomic_fetch_inc_release
+static __always_inline int
+arch_atomic_fetch_inc_release(atomic_t *v)
+{
+ __atomic_release_fence();
+ return arch_atomic_fetch_inc_relaxed(v);
+}
+#define arch_atomic_fetch_inc_release arch_atomic_fetch_inc_release
+#endif
+
+#ifndef arch_atomic_fetch_inc
+static __always_inline int
+arch_atomic_fetch_inc(atomic_t *v)
+{
+ int ret;
+ __atomic_pre_full_fence();
+ ret = arch_atomic_fetch_inc_relaxed(v);
+ __atomic_post_full_fence();
+ return ret;
+}
+#define arch_atomic_fetch_inc arch_atomic_fetch_inc
+#endif
+
+#endif /* arch_atomic_fetch_inc_relaxed */
+
+#ifndef arch_atomic_dec
+static __always_inline void
+arch_atomic_dec(atomic_t *v)
+{
+ arch_atomic_sub(1, v);
+}
+#define arch_atomic_dec arch_atomic_dec
+#endif
+
+#ifndef arch_atomic_dec_return_relaxed
+#ifdef arch_atomic_dec_return
+#define arch_atomic_dec_return_acquire arch_atomic_dec_return
+#define arch_atomic_dec_return_release arch_atomic_dec_return
+#define arch_atomic_dec_return_relaxed arch_atomic_dec_return
+#endif /* arch_atomic_dec_return */
+
+#ifndef arch_atomic_dec_return
+static __always_inline int
+arch_atomic_dec_return(atomic_t *v)
+{
+ return arch_atomic_sub_return(1, v);
+}
+#define arch_atomic_dec_return arch_atomic_dec_return
+#endif
+
+#ifndef arch_atomic_dec_return_acquire
+static __always_inline int
+arch_atomic_dec_return_acquire(atomic_t *v)
+{
+ return arch_atomic_sub_return_acquire(1, v);
+}
+#define arch_atomic_dec_return_acquire arch_atomic_dec_return_acquire
+#endif
+
+#ifndef arch_atomic_dec_return_release
+static __always_inline int
+arch_atomic_dec_return_release(atomic_t *v)
+{
+ return arch_atomic_sub_return_release(1, v);
+}
+#define arch_atomic_dec_return_release arch_atomic_dec_return_release
+#endif
+
+#ifndef arch_atomic_dec_return_relaxed
+static __always_inline int
+arch_atomic_dec_return_relaxed(atomic_t *v)
+{
+ return arch_atomic_sub_return_relaxed(1, v);
+}
+#define arch_atomic_dec_return_relaxed arch_atomic_dec_return_relaxed
+#endif
+
+#else /* arch_atomic_dec_return_relaxed */
+
+#ifndef arch_atomic_dec_return_acquire
+static __always_inline int
+arch_atomic_dec_return_acquire(atomic_t *v)
+{
+ int ret = arch_atomic_dec_return_relaxed(v);
+ __atomic_acquire_fence();
+ return ret;
+}
+#define arch_atomic_dec_return_acquire arch_atomic_dec_return_acquire
+#endif
+
+#ifndef arch_atomic_dec_return_release
+static __always_inline int
+arch_atomic_dec_return_release(atomic_t *v)
+{
+ __atomic_release_fence();
+ return arch_atomic_dec_return_relaxed(v);
+}
+#define arch_atomic_dec_return_release arch_atomic_dec_return_release
+#endif
+
+#ifndef arch_atomic_dec_return
+static __always_inline int
+arch_atomic_dec_return(atomic_t *v)
+{
+ int ret;
+ __atomic_pre_full_fence();
+ ret = arch_atomic_dec_return_relaxed(v);
+ __atomic_post_full_fence();
+ return ret;
+}
+#define arch_atomic_dec_return arch_atomic_dec_return
+#endif
+
+#endif /* arch_atomic_dec_return_relaxed */
+
+#ifndef arch_atomic_fetch_dec_relaxed
+#ifdef arch_atomic_fetch_dec
+#define arch_atomic_fetch_dec_acquire arch_atomic_fetch_dec
+#define arch_atomic_fetch_dec_release arch_atomic_fetch_dec
+#define arch_atomic_fetch_dec_relaxed arch_atomic_fetch_dec
+#endif /* arch_atomic_fetch_dec */
+
+#ifndef arch_atomic_fetch_dec
+static __always_inline int
+arch_atomic_fetch_dec(atomic_t *v)
+{
+ return arch_atomic_fetch_sub(1, v);
+}
+#define arch_atomic_fetch_dec arch_atomic_fetch_dec
+#endif
+
+#ifndef arch_atomic_fetch_dec_acquire
+static __always_inline int
+arch_atomic_fetch_dec_acquire(atomic_t *v)
+{
+ return arch_atomic_fetch_sub_acquire(1, v);
+}
+#define arch_atomic_fetch_dec_acquire arch_atomic_fetch_dec_acquire
+#endif
+
+#ifndef arch_atomic_fetch_dec_release
+static __always_inline int
+arch_atomic_fetch_dec_release(atomic_t *v)
+{
+ return arch_atomic_fetch_sub_release(1, v);
+}
+#define arch_atomic_fetch_dec_release arch_atomic_fetch_dec_release
+#endif
+
+#ifndef arch_atomic_fetch_dec_relaxed
+static __always_inline int
+arch_atomic_fetch_dec_relaxed(atomic_t *v)
+{
+ return arch_atomic_fetch_sub_relaxed(1, v);
+}
+#define arch_atomic_fetch_dec_relaxed arch_atomic_fetch_dec_relaxed
+#endif
+
+#else /* arch_atomic_fetch_dec_relaxed */
+
+#ifndef arch_atomic_fetch_dec_acquire
+static __always_inline int
+arch_atomic_fetch_dec_acquire(atomic_t *v)
+{
+ int ret = arch_atomic_fetch_dec_relaxed(v);
+ __atomic_acquire_fence();
+ return ret;
+}
+#define arch_atomic_fetch_dec_acquire arch_atomic_fetch_dec_acquire
+#endif
+
+#ifndef arch_atomic_fetch_dec_release
+static __always_inline int
+arch_atomic_fetch_dec_release(atomic_t *v)
+{
+ __atomic_release_fence();
+ return arch_atomic_fetch_dec_relaxed(v);
+}
+#define arch_atomic_fetch_dec_release arch_atomic_fetch_dec_release
+#endif
+
+#ifndef arch_atomic_fetch_dec
+static __always_inline int
+arch_atomic_fetch_dec(atomic_t *v)
+{
+ int ret;
+ __atomic_pre_full_fence();
+ ret = arch_atomic_fetch_dec_relaxed(v);
+ __atomic_post_full_fence();
+ return ret;
+}
+#define arch_atomic_fetch_dec arch_atomic_fetch_dec
+#endif
+
+#endif /* arch_atomic_fetch_dec_relaxed */
+
+#ifndef arch_atomic_fetch_and_relaxed
+#define arch_atomic_fetch_and_acquire arch_atomic_fetch_and
+#define arch_atomic_fetch_and_release arch_atomic_fetch_and
+#define arch_atomic_fetch_and_relaxed arch_atomic_fetch_and
+#else /* arch_atomic_fetch_and_relaxed */
+
+#ifndef arch_atomic_fetch_and_acquire
+static __always_inline int
+arch_atomic_fetch_and_acquire(int i, atomic_t *v)
+{
+ int ret = arch_atomic_fetch_and_relaxed(i, v);
+ __atomic_acquire_fence();
+ return ret;
+}
+#define arch_atomic_fetch_and_acquire arch_atomic_fetch_and_acquire
+#endif
+
+#ifndef arch_atomic_fetch_and_release
+static __always_inline int
+arch_atomic_fetch_and_release(int i, atomic_t *v)
+{
+ __atomic_release_fence();
+ return arch_atomic_fetch_and_relaxed(i, v);
+}
+#define arch_atomic_fetch_and_release arch_atomic_fetch_and_release
+#endif
+
+#ifndef arch_atomic_fetch_and
+static __always_inline int
+arch_atomic_fetch_and(int i, atomic_t *v)
+{
+ int ret;
+ __atomic_pre_full_fence();
+ ret = arch_atomic_fetch_and_relaxed(i, v);
+ __atomic_post_full_fence();
+ return ret;
+}
+#define arch_atomic_fetch_and arch_atomic_fetch_and
+#endif
+
+#endif /* arch_atomic_fetch_and_relaxed */
+
+#ifndef arch_atomic_andnot
+static __always_inline void
+arch_atomic_andnot(int i, atomic_t *v)
+{
+ arch_atomic_and(~i, v);
+}
+#define arch_atomic_andnot arch_atomic_andnot
+#endif
+
+#ifndef arch_atomic_fetch_andnot_relaxed
+#ifdef arch_atomic_fetch_andnot
+#define arch_atomic_fetch_andnot_acquire arch_atomic_fetch_andnot
+#define arch_atomic_fetch_andnot_release arch_atomic_fetch_andnot
+#define arch_atomic_fetch_andnot_relaxed arch_atomic_fetch_andnot
+#endif /* arch_atomic_fetch_andnot */
+
+#ifndef arch_atomic_fetch_andnot
+static __always_inline int
+arch_atomic_fetch_andnot(int i, atomic_t *v)
+{
+ return arch_atomic_fetch_and(~i, v);
+}
+#define arch_atomic_fetch_andnot arch_atomic_fetch_andnot
+#endif
+
+#ifndef arch_atomic_fetch_andnot_acquire
+static __always_inline int
+arch_atomic_fetch_andnot_acquire(int i, atomic_t *v)
+{
+ return arch_atomic_fetch_and_acquire(~i, v);
+}
+#define arch_atomic_fetch_andnot_acquire arch_atomic_fetch_andnot_acquire
+#endif
+
+#ifndef arch_atomic_fetch_andnot_release
+static __always_inline int
+arch_atomic_fetch_andnot_release(int i, atomic_t *v)
+{
+ return arch_atomic_fetch_and_release(~i, v);
+}
+#define arch_atomic_fetch_andnot_release arch_atomic_fetch_andnot_release
+#endif
+
+#ifndef arch_atomic_fetch_andnot_relaxed
+static __always_inline int
+arch_atomic_fetch_andnot_relaxed(int i, atomic_t *v)
+{
+ return arch_atomic_fetch_and_relaxed(~i, v);
+}
+#define arch_atomic_fetch_andnot_relaxed arch_atomic_fetch_andnot_relaxed
+#endif
+
+#else /* arch_atomic_fetch_andnot_relaxed */
+
+#ifndef arch_atomic_fetch_andnot_acquire
+static __always_inline int
+arch_atomic_fetch_andnot_acquire(int i, atomic_t *v)
+{
+ int ret = arch_atomic_fetch_andnot_relaxed(i, v);
+ __atomic_acquire_fence();
+ return ret;
+}
+#define arch_atomic_fetch_andnot_acquire arch_atomic_fetch_andnot_acquire
+#endif
+
+#ifndef arch_atomic_fetch_andnot_release
+static __always_inline int
+arch_atomic_fetch_andnot_release(int i, atomic_t *v)
+{
+ __atomic_release_fence();
+ return arch_atomic_fetch_andnot_relaxed(i, v);
+}
+#define arch_atomic_fetch_andnot_release arch_atomic_fetch_andnot_release
+#endif
+
+#ifndef arch_atomic_fetch_andnot
+static __always_inline int
+arch_atomic_fetch_andnot(int i, atomic_t *v)
+{
+ int ret;
+ __atomic_pre_full_fence();
+ ret = arch_atomic_fetch_andnot_relaxed(i, v);
+ __atomic_post_full_fence();
+ return ret;
+}
+#define arch_atomic_fetch_andnot arch_atomic_fetch_andnot
+#endif
+
+#endif /* arch_atomic_fetch_andnot_relaxed */
+
+#ifndef arch_atomic_fetch_or_relaxed
+#define arch_atomic_fetch_or_acquire arch_atomic_fetch_or
+#define arch_atomic_fetch_or_release arch_atomic_fetch_or
+#define arch_atomic_fetch_or_relaxed arch_atomic_fetch_or
+#else /* arch_atomic_fetch_or_relaxed */
+
+#ifndef arch_atomic_fetch_or_acquire
+static __always_inline int
+arch_atomic_fetch_or_acquire(int i, atomic_t *v)
+{
+ int ret = arch_atomic_fetch_or_relaxed(i, v);
+ __atomic_acquire_fence();
+ return ret;
+}
+#define arch_atomic_fetch_or_acquire arch_atomic_fetch_or_acquire
+#endif
+
+#ifndef arch_atomic_fetch_or_release
+static __always_inline int
+arch_atomic_fetch_or_release(int i, atomic_t *v)
+{
+ __atomic_release_fence();
+ return arch_atomic_fetch_or_relaxed(i, v);
+}
+#define arch_atomic_fetch_or_release arch_atomic_fetch_or_release
+#endif
+
+#ifndef arch_atomic_fetch_or
+static __always_inline int
+arch_atomic_fetch_or(int i, atomic_t *v)
+{
+ int ret;
+ __atomic_pre_full_fence();
+ ret = arch_atomic_fetch_or_relaxed(i, v);
+ __atomic_post_full_fence();
+ return ret;
+}
+#define arch_atomic_fetch_or arch_atomic_fetch_or
+#endif
+
+#endif /* arch_atomic_fetch_or_relaxed */
+
+#ifndef arch_atomic_fetch_xor_relaxed
+#define arch_atomic_fetch_xor_acquire arch_atomic_fetch_xor
+#define arch_atomic_fetch_xor_release arch_atomic_fetch_xor
+#define arch_atomic_fetch_xor_relaxed arch_atomic_fetch_xor
+#else /* arch_atomic_fetch_xor_relaxed */
+
+#ifndef arch_atomic_fetch_xor_acquire
+static __always_inline int
+arch_atomic_fetch_xor_acquire(int i, atomic_t *v)
+{
+ int ret = arch_atomic_fetch_xor_relaxed(i, v);
+ __atomic_acquire_fence();
+ return ret;
+}
+#define arch_atomic_fetch_xor_acquire arch_atomic_fetch_xor_acquire
+#endif
+
+#ifndef arch_atomic_fetch_xor_release
+static __always_inline int
+arch_atomic_fetch_xor_release(int i, atomic_t *v)
+{
+ __atomic_release_fence();
+ return arch_atomic_fetch_xor_relaxed(i, v);
+}
+#define arch_atomic_fetch_xor_release arch_atomic_fetch_xor_release
+#endif
+
+#ifndef arch_atomic_fetch_xor
+static __always_inline int
+arch_atomic_fetch_xor(int i, atomic_t *v)
+{
+ int ret;
+ __atomic_pre_full_fence();
+ ret = arch_atomic_fetch_xor_relaxed(i, v);
+ __atomic_post_full_fence();
+ return ret;
+}
+#define arch_atomic_fetch_xor arch_atomic_fetch_xor
+#endif
+
+#endif /* arch_atomic_fetch_xor_relaxed */
+
+#ifndef arch_atomic_xchg_relaxed
+#define arch_atomic_xchg_acquire arch_atomic_xchg
+#define arch_atomic_xchg_release arch_atomic_xchg
+#define arch_atomic_xchg_relaxed arch_atomic_xchg
+#else /* arch_atomic_xchg_relaxed */
+
+#ifndef arch_atomic_xchg_acquire
+static __always_inline int
+arch_atomic_xchg_acquire(atomic_t *v, int i)
+{
+ int ret = arch_atomic_xchg_relaxed(v, i);
+ __atomic_acquire_fence();
+ return ret;
+}
+#define arch_atomic_xchg_acquire arch_atomic_xchg_acquire
+#endif
+
+#ifndef arch_atomic_xchg_release
+static __always_inline int
+arch_atomic_xchg_release(atomic_t *v, int i)
+{
+ __atomic_release_fence();
+ return arch_atomic_xchg_relaxed(v, i);
+}
+#define arch_atomic_xchg_release arch_atomic_xchg_release
+#endif
+
+#ifndef arch_atomic_xchg
+static __always_inline int
+arch_atomic_xchg(atomic_t *v, int i)
+{
+ int ret;
+ __atomic_pre_full_fence();
+ ret = arch_atomic_xchg_relaxed(v, i);
+ __atomic_post_full_fence();
+ return ret;
+}
+#define arch_atomic_xchg arch_atomic_xchg
+#endif
+
+#endif /* arch_atomic_xchg_relaxed */
+
+#ifndef arch_atomic_cmpxchg_relaxed
+#define arch_atomic_cmpxchg_acquire arch_atomic_cmpxchg
+#define arch_atomic_cmpxchg_release arch_atomic_cmpxchg
+#define arch_atomic_cmpxchg_relaxed arch_atomic_cmpxchg
+#else /* arch_atomic_cmpxchg_relaxed */
+
+#ifndef arch_atomic_cmpxchg_acquire
+static __always_inline int
+arch_atomic_cmpxchg_acquire(atomic_t *v, int old, int new)
+{
+ int ret = arch_atomic_cmpxchg_relaxed(v, old, new);
+ __atomic_acquire_fence();
+ return ret;
+}
+#define arch_atomic_cmpxchg_acquire arch_atomic_cmpxchg_acquire
+#endif
+
+#ifndef arch_atomic_cmpxchg_release
+static __always_inline int
+arch_atomic_cmpxchg_release(atomic_t *v, int old, int new)
+{
+ __atomic_release_fence();
+ return arch_atomic_cmpxchg_relaxed(v, old, new);
+}
+#define arch_atomic_cmpxchg_release arch_atomic_cmpxchg_release
+#endif
+
+#ifndef arch_atomic_cmpxchg
+static __always_inline int
+arch_atomic_cmpxchg(atomic_t *v, int old, int new)
+{
+ int ret;
+ __atomic_pre_full_fence();
+ ret = arch_atomic_cmpxchg_relaxed(v, old, new);
+ __atomic_post_full_fence();
+ return ret;
+}
+#define arch_atomic_cmpxchg arch_atomic_cmpxchg
+#endif
+
+#endif /* arch_atomic_cmpxchg_relaxed */
+
+#ifndef arch_atomic_try_cmpxchg_relaxed
+#ifdef arch_atomic_try_cmpxchg
+#define arch_atomic_try_cmpxchg_acquire arch_atomic_try_cmpxchg
+#define arch_atomic_try_cmpxchg_release arch_atomic_try_cmpxchg
+#define arch_atomic_try_cmpxchg_relaxed arch_atomic_try_cmpxchg
+#endif /* arch_atomic_try_cmpxchg */
+
+#ifndef arch_atomic_try_cmpxchg
+static __always_inline bool
+arch_atomic_try_cmpxchg(atomic_t *v, int *old, int new)
+{
+ int r, o = *old;
+ r = arch_atomic_cmpxchg(v, o, new);
+ if (unlikely(r != o))
+ *old = r;
+ return likely(r == o);
+}
+#define arch_atomic_try_cmpxchg arch_atomic_try_cmpxchg
+#endif
+
+#ifndef arch_atomic_try_cmpxchg_acquire
+static __always_inline bool
+arch_atomic_try_cmpxchg_acquire(atomic_t *v, int *old, int new)
+{
+ int r, o = *old;
+ r = arch_atomic_cmpxchg_acquire(v, o, new);
+ if (unlikely(r != o))
+ *old = r;
+ return likely(r == o);
+}
+#define arch_atomic_try_cmpxchg_acquire arch_atomic_try_cmpxchg_acquire
+#endif
+
+#ifndef arch_atomic_try_cmpxchg_release
+static __always_inline bool
+arch_atomic_try_cmpxchg_release(atomic_t *v, int *old, int new)
+{
+ int r, o = *old;
+ r = arch_atomic_cmpxchg_release(v, o, new);
+ if (unlikely(r != o))
+ *old = r;
+ return likely(r == o);
+}
+#define arch_atomic_try_cmpxchg_release arch_atomic_try_cmpxchg_release
+#endif
+
+#ifndef arch_atomic_try_cmpxchg_relaxed
+static __always_inline bool
+arch_atomic_try_cmpxchg_relaxed(atomic_t *v, int *old, int new)
+{
+ int r, o = *old;
+ r = arch_atomic_cmpxchg_relaxed(v, o, new);
+ if (unlikely(r != o))
+ *old = r;
+ return likely(r == o);
+}
+#define arch_atomic_try_cmpxchg_relaxed arch_atomic_try_cmpxchg_relaxed
+#endif
+
+#else /* arch_atomic_try_cmpxchg_relaxed */
+
+#ifndef arch_atomic_try_cmpxchg_acquire
+static __always_inline bool
+arch_atomic_try_cmpxchg_acquire(atomic_t *v, int *old, int new)
+{
+ bool ret = arch_atomic_try_cmpxchg_relaxed(v, old, new);
+ __atomic_acquire_fence();
+ return ret;
+}
+#define arch_atomic_try_cmpxchg_acquire arch_atomic_try_cmpxchg_acquire
+#endif
+
+#ifndef arch_atomic_try_cmpxchg_release
+static __always_inline bool
+arch_atomic_try_cmpxchg_release(atomic_t *v, int *old, int new)
+{
+ __atomic_release_fence();
+ return arch_atomic_try_cmpxchg_relaxed(v, old, new);
+}
+#define arch_atomic_try_cmpxchg_release arch_atomic_try_cmpxchg_release
+#endif
+
+#ifndef arch_atomic_try_cmpxchg
+static __always_inline bool
+arch_atomic_try_cmpxchg(atomic_t *v, int *old, int new)
+{
+ bool ret;
+ __atomic_pre_full_fence();
+ ret = arch_atomic_try_cmpxchg_relaxed(v, old, new);
+ __atomic_post_full_fence();
+ return ret;
+}
+#define arch_atomic_try_cmpxchg arch_atomic_try_cmpxchg
+#endif
+
+#endif /* arch_atomic_try_cmpxchg_relaxed */
+
+#ifndef arch_atomic_sub_and_test
+/**
+ * arch_atomic_sub_and_test - subtract value from variable and test result
+ * @i: integer value to subtract
+ * @v: pointer of type atomic_t
+ *
+ * Atomically subtracts @i from @v and returns
+ * true if the result is zero, or false for all
+ * other cases.
+ */
+static __always_inline bool
+arch_atomic_sub_and_test(int i, atomic_t *v)
+{
+ return arch_atomic_sub_return(i, v) == 0;
+}
+#define arch_atomic_sub_and_test arch_atomic_sub_and_test
+#endif
+
+#ifndef arch_atomic_dec_and_test
+/**
+ * arch_atomic_dec_and_test - decrement and test
+ * @v: pointer of type atomic_t
+ *
+ * Atomically decrements @v by 1 and
+ * returns true if the result is 0, or false for all other
+ * cases.
+ */
+static __always_inline bool
+arch_atomic_dec_and_test(atomic_t *v)
+{
+ return arch_atomic_dec_return(v) == 0;
+}
+#define arch_atomic_dec_and_test arch_atomic_dec_and_test
+#endif
+
+#ifndef arch_atomic_inc_and_test
+/**
+ * arch_atomic_inc_and_test - increment and test
+ * @v: pointer of type atomic_t
+ *
+ * Atomically increments @v by 1
+ * and returns true if the result is zero, or false for all
+ * other cases.
+ */
+static __always_inline bool
+arch_atomic_inc_and_test(atomic_t *v)
+{
+ return arch_atomic_inc_return(v) == 0;
+}
+#define arch_atomic_inc_and_test arch_atomic_inc_and_test
+#endif
+
+#ifndef arch_atomic_add_negative_relaxed
+#ifdef arch_atomic_add_negative
+#define arch_atomic_add_negative_acquire arch_atomic_add_negative
+#define arch_atomic_add_negative_release arch_atomic_add_negative
+#define arch_atomic_add_negative_relaxed arch_atomic_add_negative
+#endif /* arch_atomic_add_negative */
+
+#ifndef arch_atomic_add_negative
+/**
+ * arch_atomic_add_negative - Add and test if negative
+ * @i: integer value to add
+ * @v: pointer of type atomic_t
+ *
+ * Atomically adds @i to @v and returns true if the result is negative,
+ * or false when the result is greater than or equal to zero.
+ */
+static __always_inline bool
+arch_atomic_add_negative(int i, atomic_t *v)
+{
+ return arch_atomic_add_return(i, v) < 0;
+}
+#define arch_atomic_add_negative arch_atomic_add_negative
+#endif
+
+#ifndef arch_atomic_add_negative_acquire
+/**
+ * arch_atomic_add_negative_acquire - Add and test if negative
+ * @i: integer value to add
+ * @v: pointer of type atomic_t
+ *
+ * Atomically adds @i to @v and returns true if the result is negative,
+ * or false when the result is greater than or equal to zero.
+ */
+static __always_inline bool
+arch_atomic_add_negative_acquire(int i, atomic_t *v)
+{
+ return arch_atomic_add_return_acquire(i, v) < 0;
+}
+#define arch_atomic_add_negative_acquire arch_atomic_add_negative_acquire
+#endif
+
+#ifndef arch_atomic_add_negative_release
+/**
+ * arch_atomic_add_negative_release - Add and test if negative
+ * @i: integer value to add
+ * @v: pointer of type atomic_t
+ *
+ * Atomically adds @i to @v and returns true if the result is negative,
+ * or false when the result is greater than or equal to zero.
+ */
+static __always_inline bool
+arch_atomic_add_negative_release(int i, atomic_t *v)
+{
+ return arch_atomic_add_return_release(i, v) < 0;
+}
+#define arch_atomic_add_negative_release arch_atomic_add_negative_release
+#endif
+
+#ifndef arch_atomic_add_negative_relaxed
+/**
+ * arch_atomic_add_negative_relaxed - Add and test if negative
+ * @i: integer value to add
+ * @v: pointer of type atomic_t
+ *
+ * Atomically adds @i to @v and returns true if the result is negative,
+ * or false when the result is greater than or equal to zero.
+ */
+static __always_inline bool
+arch_atomic_add_negative_relaxed(int i, atomic_t *v)
+{
+ return arch_atomic_add_return_relaxed(i, v) < 0;
+}
+#define arch_atomic_add_negative_relaxed arch_atomic_add_negative_relaxed
+#endif
+
+#else /* arch_atomic_add_negative_relaxed */
+
+#ifndef arch_atomic_add_negative_acquire
+static __always_inline bool
+arch_atomic_add_negative_acquire(int i, atomic_t *v)
+{
+ bool ret = arch_atomic_add_negative_relaxed(i, v);
+ __atomic_acquire_fence();
+ return ret;
+}
+#define arch_atomic_add_negative_acquire arch_atomic_add_negative_acquire
+#endif
+
+#ifndef arch_atomic_add_negative_release
+static __always_inline bool
+arch_atomic_add_negative_release(int i, atomic_t *v)
+{
+ __atomic_release_fence();
+ return arch_atomic_add_negative_relaxed(i, v);
+}
+#define arch_atomic_add_negative_release arch_atomic_add_negative_release
+#endif
+
+#ifndef arch_atomic_add_negative
+static __always_inline bool
+arch_atomic_add_negative(int i, atomic_t *v)
+{
+ bool ret;
+ __atomic_pre_full_fence();
+ ret = arch_atomic_add_negative_relaxed(i, v);
+ __atomic_post_full_fence();
+ return ret;
+}
+#define arch_atomic_add_negative arch_atomic_add_negative
+#endif
+
+#endif /* arch_atomic_add_negative_relaxed */
+
+#ifndef arch_atomic_fetch_add_unless
+/**
+ * arch_atomic_fetch_add_unless - add unless the number is already a given value
+ * @v: pointer of type atomic_t
+ * @a: the amount to add to v...
+ * @u: ...unless v is equal to u.
+ *
+ * Atomically adds @a to @v, so long as @v was not already @u.
+ * Returns original value of @v
+ */
+static __always_inline int
+arch_atomic_fetch_add_unless(atomic_t *v, int a, int u)
+{
+ int c = arch_atomic_read(v);
+
+ do {
+ if (unlikely(c == u))
+ break;
+ } while (!arch_atomic_try_cmpxchg(v, &c, c + a));
+
+ return c;
+}
+#define arch_atomic_fetch_add_unless arch_atomic_fetch_add_unless
+#endif
+
+#ifndef arch_atomic_add_unless
+/**
+ * arch_atomic_add_unless - add unless the number is already a given value
+ * @v: pointer of type atomic_t
+ * @a: the amount to add to v...
+ * @u: ...unless v is equal to u.
+ *
+ * Atomically adds @a to @v, if @v was not already @u.
+ * Returns true if the addition was done.
+ */
+static __always_inline bool
+arch_atomic_add_unless(atomic_t *v, int a, int u)
+{
+ return arch_atomic_fetch_add_unless(v, a, u) != u;
+}
+#define arch_atomic_add_unless arch_atomic_add_unless
+#endif
+
+#ifndef arch_atomic_inc_not_zero
+/**
+ * arch_atomic_inc_not_zero - increment unless the number is zero
+ * @v: pointer of type atomic_t
+ *
+ * Atomically increments @v by 1, if @v is non-zero.
+ * Returns true if the increment was done.
+ */
+static __always_inline bool
+arch_atomic_inc_not_zero(atomic_t *v)
+{
+ return arch_atomic_add_unless(v, 1, 0);
+}
+#define arch_atomic_inc_not_zero arch_atomic_inc_not_zero
+#endif
+
+#ifndef arch_atomic_inc_unless_negative
+static __always_inline bool
+arch_atomic_inc_unless_negative(atomic_t *v)
+{
+ int c = arch_atomic_read(v);
+
+ do {
+ if (unlikely(c < 0))
+ return false;
+ } while (!arch_atomic_try_cmpxchg(v, &c, c + 1));
+
+ return true;
+}
+#define arch_atomic_inc_unless_negative arch_atomic_inc_unless_negative
+#endif
+
+#ifndef arch_atomic_dec_unless_positive
+static __always_inline bool
+arch_atomic_dec_unless_positive(atomic_t *v)
+{
+ int c = arch_atomic_read(v);
+
+ do {
+ if (unlikely(c > 0))
+ return false;
+ } while (!arch_atomic_try_cmpxchg(v, &c, c - 1));
+
+ return true;
+}
+#define arch_atomic_dec_unless_positive arch_atomic_dec_unless_positive
+#endif
+
+#ifndef arch_atomic_dec_if_positive
+static __always_inline int
+arch_atomic_dec_if_positive(atomic_t *v)
+{
+ int dec, c = arch_atomic_read(v);
+
+ do {
+ dec = c - 1;
+ if (unlikely(dec < 0))
+ break;
+ } while (!arch_atomic_try_cmpxchg(v, &c, dec));
+
+ return dec;
+}
+#define arch_atomic_dec_if_positive arch_atomic_dec_if_positive
+#endif
+
+#ifdef CONFIG_GENERIC_ATOMIC64
+#include <asm-generic/atomic64.h>
+#endif
+
+#ifndef arch_atomic64_read_acquire
+static __always_inline s64
+arch_atomic64_read_acquire(const atomic64_t *v)
+{
+ s64 ret;
+
+ if (__native_word(atomic64_t)) {
+ ret = smp_load_acquire(&(v)->counter);
+ } else {
+ ret = arch_atomic64_read(v);
+ __atomic_acquire_fence();
+ }
+
+ return ret;
+}
+#define arch_atomic64_read_acquire arch_atomic64_read_acquire
+#endif
+
+#ifndef arch_atomic64_set_release
+static __always_inline void
+arch_atomic64_set_release(atomic64_t *v, s64 i)
+{
+ if (__native_word(atomic64_t)) {
+ smp_store_release(&(v)->counter, i);
+ } else {
+ __atomic_release_fence();
+ arch_atomic64_set(v, i);
+ }
+}
+#define arch_atomic64_set_release arch_atomic64_set_release
+#endif
+
+#ifndef arch_atomic64_add_return_relaxed
+#define arch_atomic64_add_return_acquire arch_atomic64_add_return
+#define arch_atomic64_add_return_release arch_atomic64_add_return
+#define arch_atomic64_add_return_relaxed arch_atomic64_add_return
+#else /* arch_atomic64_add_return_relaxed */
+
+#ifndef arch_atomic64_add_return_acquire
+static __always_inline s64
+arch_atomic64_add_return_acquire(s64 i, atomic64_t *v)
+{
+ s64 ret = arch_atomic64_add_return_relaxed(i, v);
+ __atomic_acquire_fence();
+ return ret;
+}
+#define arch_atomic64_add_return_acquire arch_atomic64_add_return_acquire
+#endif
+
+#ifndef arch_atomic64_add_return_release
+static __always_inline s64
+arch_atomic64_add_return_release(s64 i, atomic64_t *v)
+{
+ __atomic_release_fence();
+ return arch_atomic64_add_return_relaxed(i, v);
+}
+#define arch_atomic64_add_return_release arch_atomic64_add_return_release
+#endif
+
+#ifndef arch_atomic64_add_return
+static __always_inline s64
+arch_atomic64_add_return(s64 i, atomic64_t *v)
+{
+ s64 ret;
+ __atomic_pre_full_fence();
+ ret = arch_atomic64_add_return_relaxed(i, v);
+ __atomic_post_full_fence();
+ return ret;
+}
+#define arch_atomic64_add_return arch_atomic64_add_return
+#endif
+
+#endif /* arch_atomic64_add_return_relaxed */
+
+#ifndef arch_atomic64_fetch_add_relaxed
+#define arch_atomic64_fetch_add_acquire arch_atomic64_fetch_add
+#define arch_atomic64_fetch_add_release arch_atomic64_fetch_add
+#define arch_atomic64_fetch_add_relaxed arch_atomic64_fetch_add
+#else /* arch_atomic64_fetch_add_relaxed */
+
+#ifndef arch_atomic64_fetch_add_acquire
+static __always_inline s64
+arch_atomic64_fetch_add_acquire(s64 i, atomic64_t *v)
+{
+ s64 ret = arch_atomic64_fetch_add_relaxed(i, v);
+ __atomic_acquire_fence();
+ return ret;
+}
+#define arch_atomic64_fetch_add_acquire arch_atomic64_fetch_add_acquire
+#endif
+
+#ifndef arch_atomic64_fetch_add_release
+static __always_inline s64
+arch_atomic64_fetch_add_release(s64 i, atomic64_t *v)
+{
+ __atomic_release_fence();
+ return arch_atomic64_fetch_add_relaxed(i, v);
+}
+#define arch_atomic64_fetch_add_release arch_atomic64_fetch_add_release
+#endif
+
+#ifndef arch_atomic64_fetch_add
+static __always_inline s64
+arch_atomic64_fetch_add(s64 i, atomic64_t *v)
+{
+ s64 ret;
+ __atomic_pre_full_fence();
+ ret = arch_atomic64_fetch_add_relaxed(i, v);
+ __atomic_post_full_fence();
+ return ret;
+}
+#define arch_atomic64_fetch_add arch_atomic64_fetch_add
+#endif
+
+#endif /* arch_atomic64_fetch_add_relaxed */
+
+#ifndef arch_atomic64_sub_return_relaxed
+#define arch_atomic64_sub_return_acquire arch_atomic64_sub_return
+#define arch_atomic64_sub_return_release arch_atomic64_sub_return
+#define arch_atomic64_sub_return_relaxed arch_atomic64_sub_return
+#else /* arch_atomic64_sub_return_relaxed */
+
+#ifndef arch_atomic64_sub_return_acquire
+static __always_inline s64
+arch_atomic64_sub_return_acquire(s64 i, atomic64_t *v)
+{
+ s64 ret = arch_atomic64_sub_return_relaxed(i, v);
+ __atomic_acquire_fence();
+ return ret;
+}
+#define arch_atomic64_sub_return_acquire arch_atomic64_sub_return_acquire
+#endif
+
+#ifndef arch_atomic64_sub_return_release
+static __always_inline s64
+arch_atomic64_sub_return_release(s64 i, atomic64_t *v)
+{
+ __atomic_release_fence();
+ return arch_atomic64_sub_return_relaxed(i, v);
+}
+#define arch_atomic64_sub_return_release arch_atomic64_sub_return_release
+#endif
+
+#ifndef arch_atomic64_sub_return
+static __always_inline s64
+arch_atomic64_sub_return(s64 i, atomic64_t *v)
+{
+ s64 ret;
+ __atomic_pre_full_fence();
+ ret = arch_atomic64_sub_return_relaxed(i, v);
+ __atomic_post_full_fence();
+ return ret;
+}
+#define arch_atomic64_sub_return arch_atomic64_sub_return
+#endif
+
+#endif /* arch_atomic64_sub_return_relaxed */
+
+#ifndef arch_atomic64_fetch_sub_relaxed
+#define arch_atomic64_fetch_sub_acquire arch_atomic64_fetch_sub
+#define arch_atomic64_fetch_sub_release arch_atomic64_fetch_sub
+#define arch_atomic64_fetch_sub_relaxed arch_atomic64_fetch_sub
+#else /* arch_atomic64_fetch_sub_relaxed */
+
+#ifndef arch_atomic64_fetch_sub_acquire
+static __always_inline s64
+arch_atomic64_fetch_sub_acquire(s64 i, atomic64_t *v)
+{
+ s64 ret = arch_atomic64_fetch_sub_relaxed(i, v);
+ __atomic_acquire_fence();
+ return ret;
+}
+#define arch_atomic64_fetch_sub_acquire arch_atomic64_fetch_sub_acquire
+#endif
+
+#ifndef arch_atomic64_fetch_sub_release
+static __always_inline s64
+arch_atomic64_fetch_sub_release(s64 i, atomic64_t *v)
+{
+ __atomic_release_fence();
+ return arch_atomic64_fetch_sub_relaxed(i, v);
+}
+#define arch_atomic64_fetch_sub_release arch_atomic64_fetch_sub_release
+#endif
+
+#ifndef arch_atomic64_fetch_sub
+static __always_inline s64
+arch_atomic64_fetch_sub(s64 i, atomic64_t *v)
+{
+ s64 ret;
+ __atomic_pre_full_fence();
+ ret = arch_atomic64_fetch_sub_relaxed(i, v);
+ __atomic_post_full_fence();
+ return ret;
+}
+#define arch_atomic64_fetch_sub arch_atomic64_fetch_sub
+#endif
+
+#endif /* arch_atomic64_fetch_sub_relaxed */
+
+#ifndef arch_atomic64_inc
+static __always_inline void
+arch_atomic64_inc(atomic64_t *v)
+{
+ arch_atomic64_add(1, v);
+}
+#define arch_atomic64_inc arch_atomic64_inc
+#endif
+
+#ifndef arch_atomic64_inc_return_relaxed
+#ifdef arch_atomic64_inc_return
+#define arch_atomic64_inc_return_acquire arch_atomic64_inc_return
+#define arch_atomic64_inc_return_release arch_atomic64_inc_return
+#define arch_atomic64_inc_return_relaxed arch_atomic64_inc_return
+#endif /* arch_atomic64_inc_return */
+
+#ifndef arch_atomic64_inc_return
+static __always_inline s64
+arch_atomic64_inc_return(atomic64_t *v)
+{
+ return arch_atomic64_add_return(1, v);
+}
+#define arch_atomic64_inc_return arch_atomic64_inc_return
+#endif
+
+#ifndef arch_atomic64_inc_return_acquire
+static __always_inline s64
+arch_atomic64_inc_return_acquire(atomic64_t *v)
+{
+ return arch_atomic64_add_return_acquire(1, v);
+}
+#define arch_atomic64_inc_return_acquire arch_atomic64_inc_return_acquire
+#endif
+
+#ifndef arch_atomic64_inc_return_release
+static __always_inline s64
+arch_atomic64_inc_return_release(atomic64_t *v)
+{
+ return arch_atomic64_add_return_release(1, v);
+}
+#define arch_atomic64_inc_return_release arch_atomic64_inc_return_release
+#endif
+
+#ifndef arch_atomic64_inc_return_relaxed
+static __always_inline s64
+arch_atomic64_inc_return_relaxed(atomic64_t *v)
+{
+ return arch_atomic64_add_return_relaxed(1, v);
+}
+#define arch_atomic64_inc_return_relaxed arch_atomic64_inc_return_relaxed
+#endif
+
+#else /* arch_atomic64_inc_return_relaxed */
+
+#ifndef arch_atomic64_inc_return_acquire
+static __always_inline s64
+arch_atomic64_inc_return_acquire(atomic64_t *v)
+{
+ s64 ret = arch_atomic64_inc_return_relaxed(v);
+ __atomic_acquire_fence();
+ return ret;
+}
+#define arch_atomic64_inc_return_acquire arch_atomic64_inc_return_acquire
+#endif
+
+#ifndef arch_atomic64_inc_return_release
+static __always_inline s64
+arch_atomic64_inc_return_release(atomic64_t *v)
+{
+ __atomic_release_fence();
+ return arch_atomic64_inc_return_relaxed(v);
+}
+#define arch_atomic64_inc_return_release arch_atomic64_inc_return_release
+#endif
+
+#ifndef arch_atomic64_inc_return
+static __always_inline s64
+arch_atomic64_inc_return(atomic64_t *v)
+{
+ s64 ret;
+ __atomic_pre_full_fence();
+ ret = arch_atomic64_inc_return_relaxed(v);
+ __atomic_post_full_fence();
+ return ret;
+}
+#define arch_atomic64_inc_return arch_atomic64_inc_return
+#endif
+
+#endif /* arch_atomic64_inc_return_relaxed */
+
+#ifndef arch_atomic64_fetch_inc_relaxed
+#ifdef arch_atomic64_fetch_inc
+#define arch_atomic64_fetch_inc_acquire arch_atomic64_fetch_inc
+#define arch_atomic64_fetch_inc_release arch_atomic64_fetch_inc
+#define arch_atomic64_fetch_inc_relaxed arch_atomic64_fetch_inc
+#endif /* arch_atomic64_fetch_inc */
+
+#ifndef arch_atomic64_fetch_inc
+static __always_inline s64
+arch_atomic64_fetch_inc(atomic64_t *v)
+{
+ return arch_atomic64_fetch_add(1, v);
+}
+#define arch_atomic64_fetch_inc arch_atomic64_fetch_inc
+#endif
+
+#ifndef arch_atomic64_fetch_inc_acquire
+static __always_inline s64
+arch_atomic64_fetch_inc_acquire(atomic64_t *v)
+{
+ return arch_atomic64_fetch_add_acquire(1, v);
+}
+#define arch_atomic64_fetch_inc_acquire arch_atomic64_fetch_inc_acquire
+#endif
+
+#ifndef arch_atomic64_fetch_inc_release
+static __always_inline s64
+arch_atomic64_fetch_inc_release(atomic64_t *v)
+{
+ return arch_atomic64_fetch_add_release(1, v);
+}
+#define arch_atomic64_fetch_inc_release arch_atomic64_fetch_inc_release
+#endif
+
+#ifndef arch_atomic64_fetch_inc_relaxed
+static __always_inline s64
+arch_atomic64_fetch_inc_relaxed(atomic64_t *v)
+{
+ return arch_atomic64_fetch_add_relaxed(1, v);
+}
+#define arch_atomic64_fetch_inc_relaxed arch_atomic64_fetch_inc_relaxed
+#endif
+
+#else /* arch_atomic64_fetch_inc_relaxed */
+
+#ifndef arch_atomic64_fetch_inc_acquire
+static __always_inline s64
+arch_atomic64_fetch_inc_acquire(atomic64_t *v)
+{
+ s64 ret = arch_atomic64_fetch_inc_relaxed(v);
+ __atomic_acquire_fence();
+ return ret;
+}
+#define arch_atomic64_fetch_inc_acquire arch_atomic64_fetch_inc_acquire
+#endif
+
+#ifndef arch_atomic64_fetch_inc_release
+static __always_inline s64
+arch_atomic64_fetch_inc_release(atomic64_t *v)
+{
+ __atomic_release_fence();
+ return arch_atomic64_fetch_inc_relaxed(v);
+}
+#define arch_atomic64_fetch_inc_release arch_atomic64_fetch_inc_release
+#endif
+
+#ifndef arch_atomic64_fetch_inc
+static __always_inline s64
+arch_atomic64_fetch_inc(atomic64_t *v)
+{
+ s64 ret;
+ __atomic_pre_full_fence();
+ ret = arch_atomic64_fetch_inc_relaxed(v);
+ __atomic_post_full_fence();
+ return ret;
+}
+#define arch_atomic64_fetch_inc arch_atomic64_fetch_inc
+#endif
+
+#endif /* arch_atomic64_fetch_inc_relaxed */
+
+#ifndef arch_atomic64_dec
+static __always_inline void
+arch_atomic64_dec(atomic64_t *v)
+{
+ arch_atomic64_sub(1, v);
+}
+#define arch_atomic64_dec arch_atomic64_dec
+#endif
+
+#ifndef arch_atomic64_dec_return_relaxed
+#ifdef arch_atomic64_dec_return
+#define arch_atomic64_dec_return_acquire arch_atomic64_dec_return
+#define arch_atomic64_dec_return_release arch_atomic64_dec_return
+#define arch_atomic64_dec_return_relaxed arch_atomic64_dec_return
+#endif /* arch_atomic64_dec_return */
+
+#ifndef arch_atomic64_dec_return
+static __always_inline s64
+arch_atomic64_dec_return(atomic64_t *v)
+{
+ return arch_atomic64_sub_return(1, v);
+}
+#define arch_atomic64_dec_return arch_atomic64_dec_return
+#endif
+
+#ifndef arch_atomic64_dec_return_acquire
+static __always_inline s64
+arch_atomic64_dec_return_acquire(atomic64_t *v)
+{
+ return arch_atomic64_sub_return_acquire(1, v);
+}
+#define arch_atomic64_dec_return_acquire arch_atomic64_dec_return_acquire
+#endif
+
+#ifndef arch_atomic64_dec_return_release
+static __always_inline s64
+arch_atomic64_dec_return_release(atomic64_t *v)
+{
+ return arch_atomic64_sub_return_release(1, v);
+}
+#define arch_atomic64_dec_return_release arch_atomic64_dec_return_release
+#endif
+
+#ifndef arch_atomic64_dec_return_relaxed
+static __always_inline s64
+arch_atomic64_dec_return_relaxed(atomic64_t *v)
+{
+ return arch_atomic64_sub_return_relaxed(1, v);
+}
+#define arch_atomic64_dec_return_relaxed arch_atomic64_dec_return_relaxed
+#endif
+
+#else /* arch_atomic64_dec_return_relaxed */
+
+#ifndef arch_atomic64_dec_return_acquire
+static __always_inline s64
+arch_atomic64_dec_return_acquire(atomic64_t *v)
+{
+ s64 ret = arch_atomic64_dec_return_relaxed(v);
+ __atomic_acquire_fence();
+ return ret;
+}
+#define arch_atomic64_dec_return_acquire arch_atomic64_dec_return_acquire
+#endif
+
+#ifndef arch_atomic64_dec_return_release
+static __always_inline s64
+arch_atomic64_dec_return_release(atomic64_t *v)
+{
+ __atomic_release_fence();
+ return arch_atomic64_dec_return_relaxed(v);
+}
+#define arch_atomic64_dec_return_release arch_atomic64_dec_return_release
+#endif
+
+#ifndef arch_atomic64_dec_return
+static __always_inline s64
+arch_atomic64_dec_return(atomic64_t *v)
+{
+ s64 ret;
+ __atomic_pre_full_fence();
+ ret = arch_atomic64_dec_return_relaxed(v);
+ __atomic_post_full_fence();
+ return ret;
+}
+#define arch_atomic64_dec_return arch_atomic64_dec_return
+#endif
+
+#endif /* arch_atomic64_dec_return_relaxed */
+
+#ifndef arch_atomic64_fetch_dec_relaxed
+#ifdef arch_atomic64_fetch_dec
+#define arch_atomic64_fetch_dec_acquire arch_atomic64_fetch_dec
+#define arch_atomic64_fetch_dec_release arch_atomic64_fetch_dec
+#define arch_atomic64_fetch_dec_relaxed arch_atomic64_fetch_dec
+#endif /* arch_atomic64_fetch_dec */
+
+#ifndef arch_atomic64_fetch_dec
+static __always_inline s64
+arch_atomic64_fetch_dec(atomic64_t *v)
+{
+ return arch_atomic64_fetch_sub(1, v);
+}
+#define arch_atomic64_fetch_dec arch_atomic64_fetch_dec
+#endif
+
+#ifndef arch_atomic64_fetch_dec_acquire
+static __always_inline s64
+arch_atomic64_fetch_dec_acquire(atomic64_t *v)
+{
+ return arch_atomic64_fetch_sub_acquire(1, v);
+}
+#define arch_atomic64_fetch_dec_acquire arch_atomic64_fetch_dec_acquire
+#endif
+
+#ifndef arch_atomic64_fetch_dec_release
+static __always_inline s64
+arch_atomic64_fetch_dec_release(atomic64_t *v)
+{
+ return arch_atomic64_fetch_sub_release(1, v);
+}
+#define arch_atomic64_fetch_dec_release arch_atomic64_fetch_dec_release
+#endif
+
+#ifndef arch_atomic64_fetch_dec_relaxed
+static __always_inline s64
+arch_atomic64_fetch_dec_relaxed(atomic64_t *v)
+{
+ return arch_atomic64_fetch_sub_relaxed(1, v);
+}
+#define arch_atomic64_fetch_dec_relaxed arch_atomic64_fetch_dec_relaxed
+#endif
+
+#else /* arch_atomic64_fetch_dec_relaxed */
+
+#ifndef arch_atomic64_fetch_dec_acquire
+static __always_inline s64
+arch_atomic64_fetch_dec_acquire(atomic64_t *v)
+{
+ s64 ret = arch_atomic64_fetch_dec_relaxed(v);
+ __atomic_acquire_fence();
+ return ret;
+}
+#define arch_atomic64_fetch_dec_acquire arch_atomic64_fetch_dec_acquire
+#endif
+
+#ifndef arch_atomic64_fetch_dec_release
+static __always_inline s64
+arch_atomic64_fetch_dec_release(atomic64_t *v)
+{
+ __atomic_release_fence();
+ return arch_atomic64_fetch_dec_relaxed(v);
+}
+#define arch_atomic64_fetch_dec_release arch_atomic64_fetch_dec_release
+#endif
+
+#ifndef arch_atomic64_fetch_dec
+static __always_inline s64
+arch_atomic64_fetch_dec(atomic64_t *v)
+{
+ s64 ret;
+ __atomic_pre_full_fence();
+ ret = arch_atomic64_fetch_dec_relaxed(v);
+ __atomic_post_full_fence();
+ return ret;
+}
+#define arch_atomic64_fetch_dec arch_atomic64_fetch_dec
+#endif
+
+#endif /* arch_atomic64_fetch_dec_relaxed */
+
+#ifndef arch_atomic64_fetch_and_relaxed
+#define arch_atomic64_fetch_and_acquire arch_atomic64_fetch_and
+#define arch_atomic64_fetch_and_release arch_atomic64_fetch_and
+#define arch_atomic64_fetch_and_relaxed arch_atomic64_fetch_and
+#else /* arch_atomic64_fetch_and_relaxed */
+
+#ifndef arch_atomic64_fetch_and_acquire
+static __always_inline s64
+arch_atomic64_fetch_and_acquire(s64 i, atomic64_t *v)
+{
+ s64 ret = arch_atomic64_fetch_and_relaxed(i, v);
+ __atomic_acquire_fence();
+ return ret;
+}
+#define arch_atomic64_fetch_and_acquire arch_atomic64_fetch_and_acquire
+#endif
+
+#ifndef arch_atomic64_fetch_and_release
+static __always_inline s64
+arch_atomic64_fetch_and_release(s64 i, atomic64_t *v)
+{
+ __atomic_release_fence();
+ return arch_atomic64_fetch_and_relaxed(i, v);
+}
+#define arch_atomic64_fetch_and_release arch_atomic64_fetch_and_release
+#endif
+
+#ifndef arch_atomic64_fetch_and
+static __always_inline s64
+arch_atomic64_fetch_and(s64 i, atomic64_t *v)
+{
+ s64 ret;
+ __atomic_pre_full_fence();
+ ret = arch_atomic64_fetch_and_relaxed(i, v);
+ __atomic_post_full_fence();
+ return ret;
+}
+#define arch_atomic64_fetch_and arch_atomic64_fetch_and
+#endif
+
+#endif /* arch_atomic64_fetch_and_relaxed */
+
+#ifndef arch_atomic64_andnot
+static __always_inline void
+arch_atomic64_andnot(s64 i, atomic64_t *v)
+{
+ arch_atomic64_and(~i, v);
+}
+#define arch_atomic64_andnot arch_atomic64_andnot
+#endif
+
+#ifndef arch_atomic64_fetch_andnot_relaxed
+#ifdef arch_atomic64_fetch_andnot
+#define arch_atomic64_fetch_andnot_acquire arch_atomic64_fetch_andnot
+#define arch_atomic64_fetch_andnot_release arch_atomic64_fetch_andnot
+#define arch_atomic64_fetch_andnot_relaxed arch_atomic64_fetch_andnot
+#endif /* arch_atomic64_fetch_andnot */
+
+#ifndef arch_atomic64_fetch_andnot
+static __always_inline s64
+arch_atomic64_fetch_andnot(s64 i, atomic64_t *v)
+{
+ return arch_atomic64_fetch_and(~i, v);
+}
+#define arch_atomic64_fetch_andnot arch_atomic64_fetch_andnot
+#endif
+
+#ifndef arch_atomic64_fetch_andnot_acquire
+static __always_inline s64
+arch_atomic64_fetch_andnot_acquire(s64 i, atomic64_t *v)
+{
+ return arch_atomic64_fetch_and_acquire(~i, v);
+}
+#define arch_atomic64_fetch_andnot_acquire arch_atomic64_fetch_andnot_acquire
+#endif
+
+#ifndef arch_atomic64_fetch_andnot_release
+static __always_inline s64
+arch_atomic64_fetch_andnot_release(s64 i, atomic64_t *v)
+{
+ return arch_atomic64_fetch_and_release(~i, v);
+}
+#define arch_atomic64_fetch_andnot_release arch_atomic64_fetch_andnot_release
+#endif
+
+#ifndef arch_atomic64_fetch_andnot_relaxed
+static __always_inline s64
+arch_atomic64_fetch_andnot_relaxed(s64 i, atomic64_t *v)
+{
+ return arch_atomic64_fetch_and_relaxed(~i, v);
+}
+#define arch_atomic64_fetch_andnot_relaxed arch_atomic64_fetch_andnot_relaxed
+#endif
+
+#else /* arch_atomic64_fetch_andnot_relaxed */
+
+#ifndef arch_atomic64_fetch_andnot_acquire
+static __always_inline s64
+arch_atomic64_fetch_andnot_acquire(s64 i, atomic64_t *v)
+{
+ s64 ret = arch_atomic64_fetch_andnot_relaxed(i, v);
+ __atomic_acquire_fence();
+ return ret;
+}
+#define arch_atomic64_fetch_andnot_acquire arch_atomic64_fetch_andnot_acquire
+#endif
+
+#ifndef arch_atomic64_fetch_andnot_release
+static __always_inline s64
+arch_atomic64_fetch_andnot_release(s64 i, atomic64_t *v)
+{
+ __atomic_release_fence();
+ return arch_atomic64_fetch_andnot_relaxed(i, v);
+}
+#define arch_atomic64_fetch_andnot_release arch_atomic64_fetch_andnot_release
+#endif
+
+#ifndef arch_atomic64_fetch_andnot
+static __always_inline s64
+arch_atomic64_fetch_andnot(s64 i, atomic64_t *v)
+{
+ s64 ret;
+ __atomic_pre_full_fence();
+ ret = arch_atomic64_fetch_andnot_relaxed(i, v);
+ __atomic_post_full_fence();
+ return ret;
+}
+#define arch_atomic64_fetch_andnot arch_atomic64_fetch_andnot
+#endif
+
+#endif /* arch_atomic64_fetch_andnot_relaxed */
+
+#ifndef arch_atomic64_fetch_or_relaxed
+#define arch_atomic64_fetch_or_acquire arch_atomic64_fetch_or
+#define arch_atomic64_fetch_or_release arch_atomic64_fetch_or
+#define arch_atomic64_fetch_or_relaxed arch_atomic64_fetch_or
+#else /* arch_atomic64_fetch_or_relaxed */
+
+#ifndef arch_atomic64_fetch_or_acquire
+static __always_inline s64
+arch_atomic64_fetch_or_acquire(s64 i, atomic64_t *v)
+{
+ s64 ret = arch_atomic64_fetch_or_relaxed(i, v);
+ __atomic_acquire_fence();
+ return ret;
+}
+#define arch_atomic64_fetch_or_acquire arch_atomic64_fetch_or_acquire
+#endif
+
+#ifndef arch_atomic64_fetch_or_release
+static __always_inline s64
+arch_atomic64_fetch_or_release(s64 i, atomic64_t *v)
+{
+ __atomic_release_fence();
+ return arch_atomic64_fetch_or_relaxed(i, v);
+}
+#define arch_atomic64_fetch_or_release arch_atomic64_fetch_or_release
+#endif
+
+#ifndef arch_atomic64_fetch_or
+static __always_inline s64
+arch_atomic64_fetch_or(s64 i, atomic64_t *v)
+{
+ s64 ret;
+ __atomic_pre_full_fence();
+ ret = arch_atomic64_fetch_or_relaxed(i, v);
+ __atomic_post_full_fence();
+ return ret;
+}
+#define arch_atomic64_fetch_or arch_atomic64_fetch_or
+#endif
+
+#endif /* arch_atomic64_fetch_or_relaxed */
+
+#ifndef arch_atomic64_fetch_xor_relaxed
+#define arch_atomic64_fetch_xor_acquire arch_atomic64_fetch_xor
+#define arch_atomic64_fetch_xor_release arch_atomic64_fetch_xor
+#define arch_atomic64_fetch_xor_relaxed arch_atomic64_fetch_xor
+#else /* arch_atomic64_fetch_xor_relaxed */
+
+#ifndef arch_atomic64_fetch_xor_acquire
+static __always_inline s64
+arch_atomic64_fetch_xor_acquire(s64 i, atomic64_t *v)
+{
+ s64 ret = arch_atomic64_fetch_xor_relaxed(i, v);
+ __atomic_acquire_fence();
+ return ret;
+}
+#define arch_atomic64_fetch_xor_acquire arch_atomic64_fetch_xor_acquire
+#endif
+
+#ifndef arch_atomic64_fetch_xor_release
+static __always_inline s64
+arch_atomic64_fetch_xor_release(s64 i, atomic64_t *v)
+{
+ __atomic_release_fence();
+ return arch_atomic64_fetch_xor_relaxed(i, v);
+}
+#define arch_atomic64_fetch_xor_release arch_atomic64_fetch_xor_release
+#endif
+
+#ifndef arch_atomic64_fetch_xor
+static __always_inline s64
+arch_atomic64_fetch_xor(s64 i, atomic64_t *v)
+{
+ s64 ret;
+ __atomic_pre_full_fence();
+ ret = arch_atomic64_fetch_xor_relaxed(i, v);
+ __atomic_post_full_fence();
+ return ret;
+}
+#define arch_atomic64_fetch_xor arch_atomic64_fetch_xor
+#endif
+
+#endif /* arch_atomic64_fetch_xor_relaxed */
+
+#ifndef arch_atomic64_xchg_relaxed
+#define arch_atomic64_xchg_acquire arch_atomic64_xchg
+#define arch_atomic64_xchg_release arch_atomic64_xchg
+#define arch_atomic64_xchg_relaxed arch_atomic64_xchg
+#else /* arch_atomic64_xchg_relaxed */
+
+#ifndef arch_atomic64_xchg_acquire
+static __always_inline s64
+arch_atomic64_xchg_acquire(atomic64_t *v, s64 i)
+{
+ s64 ret = arch_atomic64_xchg_relaxed(v, i);
+ __atomic_acquire_fence();
+ return ret;
+}
+#define arch_atomic64_xchg_acquire arch_atomic64_xchg_acquire
+#endif
+
+#ifndef arch_atomic64_xchg_release
+static __always_inline s64
+arch_atomic64_xchg_release(atomic64_t *v, s64 i)
+{
+ __atomic_release_fence();
+ return arch_atomic64_xchg_relaxed(v, i);
+}
+#define arch_atomic64_xchg_release arch_atomic64_xchg_release
+#endif
+
+#ifndef arch_atomic64_xchg
+static __always_inline s64
+arch_atomic64_xchg(atomic64_t *v, s64 i)
+{
+ s64 ret;
+ __atomic_pre_full_fence();
+ ret = arch_atomic64_xchg_relaxed(v, i);
+ __atomic_post_full_fence();
+ return ret;
+}
+#define arch_atomic64_xchg arch_atomic64_xchg
+#endif
+
+#endif /* arch_atomic64_xchg_relaxed */
+
+#ifndef arch_atomic64_cmpxchg_relaxed
+#define arch_atomic64_cmpxchg_acquire arch_atomic64_cmpxchg
+#define arch_atomic64_cmpxchg_release arch_atomic64_cmpxchg
+#define arch_atomic64_cmpxchg_relaxed arch_atomic64_cmpxchg
+#else /* arch_atomic64_cmpxchg_relaxed */
+
+#ifndef arch_atomic64_cmpxchg_acquire
+static __always_inline s64
+arch_atomic64_cmpxchg_acquire(atomic64_t *v, s64 old, s64 new)
+{
+ s64 ret = arch_atomic64_cmpxchg_relaxed(v, old, new);
+ __atomic_acquire_fence();
+ return ret;
+}
+#define arch_atomic64_cmpxchg_acquire arch_atomic64_cmpxchg_acquire
+#endif
+
+#ifndef arch_atomic64_cmpxchg_release
+static __always_inline s64
+arch_atomic64_cmpxchg_release(atomic64_t *v, s64 old, s64 new)
+{
+ __atomic_release_fence();
+ return arch_atomic64_cmpxchg_relaxed(v, old, new);
+}
+#define arch_atomic64_cmpxchg_release arch_atomic64_cmpxchg_release
+#endif
+
+#ifndef arch_atomic64_cmpxchg
+static __always_inline s64
+arch_atomic64_cmpxchg(atomic64_t *v, s64 old, s64 new)
+{
+ s64 ret;
+ __atomic_pre_full_fence();
+ ret = arch_atomic64_cmpxchg_relaxed(v, old, new);
+ __atomic_post_full_fence();
+ return ret;
+}
+#define arch_atomic64_cmpxchg arch_atomic64_cmpxchg
+#endif
+
+#endif /* arch_atomic64_cmpxchg_relaxed */
+
+#ifndef arch_atomic64_try_cmpxchg_relaxed
+#ifdef arch_atomic64_try_cmpxchg
+#define arch_atomic64_try_cmpxchg_acquire arch_atomic64_try_cmpxchg
+#define arch_atomic64_try_cmpxchg_release arch_atomic64_try_cmpxchg
+#define arch_atomic64_try_cmpxchg_relaxed arch_atomic64_try_cmpxchg
+#endif /* arch_atomic64_try_cmpxchg */
+
+#ifndef arch_atomic64_try_cmpxchg
+static __always_inline bool
+arch_atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 new)
+{
+ s64 r, o = *old;
+ r = arch_atomic64_cmpxchg(v, o, new);
+ if (unlikely(r != o))
+ *old = r;
+ return likely(r == o);
+}
+#define arch_atomic64_try_cmpxchg arch_atomic64_try_cmpxchg
+#endif
+
+#ifndef arch_atomic64_try_cmpxchg_acquire
+static __always_inline bool
+arch_atomic64_try_cmpxchg_acquire(atomic64_t *v, s64 *old, s64 new)
+{
+ s64 r, o = *old;
+ r = arch_atomic64_cmpxchg_acquire(v, o, new);
+ if (unlikely(r != o))
+ *old = r;
+ return likely(r == o);
+}
+#define arch_atomic64_try_cmpxchg_acquire arch_atomic64_try_cmpxchg_acquire
+#endif
+
+#ifndef arch_atomic64_try_cmpxchg_release
+static __always_inline bool
+arch_atomic64_try_cmpxchg_release(atomic64_t *v, s64 *old, s64 new)
+{
+ s64 r, o = *old;
+ r = arch_atomic64_cmpxchg_release(v, o, new);
+ if (unlikely(r != o))
+ *old = r;
+ return likely(r == o);
+}
+#define arch_atomic64_try_cmpxchg_release arch_atomic64_try_cmpxchg_release
+#endif
+
+#ifndef arch_atomic64_try_cmpxchg_relaxed
+static __always_inline bool
+arch_atomic64_try_cmpxchg_relaxed(atomic64_t *v, s64 *old, s64 new)
+{
+ s64 r, o = *old;
+ r = arch_atomic64_cmpxchg_relaxed(v, o, new);
+ if (unlikely(r != o))
+ *old = r;
+ return likely(r == o);
+}
+#define arch_atomic64_try_cmpxchg_relaxed arch_atomic64_try_cmpxchg_relaxed
+#endif
+
+#else /* arch_atomic64_try_cmpxchg_relaxed */
+
+#ifndef arch_atomic64_try_cmpxchg_acquire
+static __always_inline bool
+arch_atomic64_try_cmpxchg_acquire(atomic64_t *v, s64 *old, s64 new)
+{
+ bool ret = arch_atomic64_try_cmpxchg_relaxed(v, old, new);
+ __atomic_acquire_fence();
+ return ret;
+}
+#define arch_atomic64_try_cmpxchg_acquire arch_atomic64_try_cmpxchg_acquire
+#endif
+
+#ifndef arch_atomic64_try_cmpxchg_release
+static __always_inline bool
+arch_atomic64_try_cmpxchg_release(atomic64_t *v, s64 *old, s64 new)
+{
+ __atomic_release_fence();
+ return arch_atomic64_try_cmpxchg_relaxed(v, old, new);
+}
+#define arch_atomic64_try_cmpxchg_release arch_atomic64_try_cmpxchg_release
+#endif
+
+#ifndef arch_atomic64_try_cmpxchg
+static __always_inline bool
+arch_atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 new)
+{
+ bool ret;
+ __atomic_pre_full_fence();
+ ret = arch_atomic64_try_cmpxchg_relaxed(v, old, new);
+ __atomic_post_full_fence();
+ return ret;
+}
+#define arch_atomic64_try_cmpxchg arch_atomic64_try_cmpxchg
+#endif
+
+#endif /* arch_atomic64_try_cmpxchg_relaxed */
+
+#ifndef arch_atomic64_sub_and_test
+/**
+ * arch_atomic64_sub_and_test - subtract value from variable and test result
+ * @i: integer value to subtract
+ * @v: pointer of type atomic64_t
+ *
+ * Atomically subtracts @i from @v and returns
+ * true if the result is zero, or false for all
+ * other cases.
+ */
+static __always_inline bool
+arch_atomic64_sub_and_test(s64 i, atomic64_t *v)
+{
+ return arch_atomic64_sub_return(i, v) == 0;
+}
+#define arch_atomic64_sub_and_test arch_atomic64_sub_and_test
+#endif
+
+#ifndef arch_atomic64_dec_and_test
+/**
+ * arch_atomic64_dec_and_test - decrement and test
+ * @v: pointer of type atomic64_t
+ *
+ * Atomically decrements @v by 1 and
+ * returns true if the result is 0, or false for all other
+ * cases.
+ */
+static __always_inline bool
+arch_atomic64_dec_and_test(atomic64_t *v)
+{
+ return arch_atomic64_dec_return(v) == 0;
+}
+#define arch_atomic64_dec_and_test arch_atomic64_dec_and_test
+#endif
+
+#ifndef arch_atomic64_inc_and_test
+/**
+ * arch_atomic64_inc_and_test - increment and test
+ * @v: pointer of type atomic64_t
+ *
+ * Atomically increments @v by 1
+ * and returns true if the result is zero, or false for all
+ * other cases.
+ */
+static __always_inline bool
+arch_atomic64_inc_and_test(atomic64_t *v)
+{
+ return arch_atomic64_inc_return(v) == 0;
+}
+#define arch_atomic64_inc_and_test arch_atomic64_inc_and_test
+#endif
+
+#ifndef arch_atomic64_add_negative_relaxed
+#ifdef arch_atomic64_add_negative
+#define arch_atomic64_add_negative_acquire arch_atomic64_add_negative
+#define arch_atomic64_add_negative_release arch_atomic64_add_negative
+#define arch_atomic64_add_negative_relaxed arch_atomic64_add_negative
+#endif /* arch_atomic64_add_negative */
+
+#ifndef arch_atomic64_add_negative
+/**
+ * arch_atomic64_add_negative - Add and test if negative
+ * @i: integer value to add
+ * @v: pointer of type atomic64_t
+ *
+ * Atomically adds @i to @v and returns true if the result is negative,
+ * or false when the result is greater than or equal to zero.
+ */
+static __always_inline bool
+arch_atomic64_add_negative(s64 i, atomic64_t *v)
+{
+ return arch_atomic64_add_return(i, v) < 0;
+}
+#define arch_atomic64_add_negative arch_atomic64_add_negative
+#endif
+
+#ifndef arch_atomic64_add_negative_acquire
+/**
+ * arch_atomic64_add_negative_acquire - Add and test if negative
+ * @i: integer value to add
+ * @v: pointer of type atomic64_t
+ *
+ * Atomically adds @i to @v and returns true if the result is negative,
+ * or false when the result is greater than or equal to zero.
+ */
+static __always_inline bool
+arch_atomic64_add_negative_acquire(s64 i, atomic64_t *v)
+{
+ return arch_atomic64_add_return_acquire(i, v) < 0;
+}
+#define arch_atomic64_add_negative_acquire arch_atomic64_add_negative_acquire
+#endif
+
+#ifndef arch_atomic64_add_negative_release
+/**
+ * arch_atomic64_add_negative_release - Add and test if negative
+ * @i: integer value to add
+ * @v: pointer of type atomic64_t
+ *
+ * Atomically adds @i to @v and returns true if the result is negative,
+ * or false when the result is greater than or equal to zero.
+ */
+static __always_inline bool
+arch_atomic64_add_negative_release(s64 i, atomic64_t *v)
+{
+ return arch_atomic64_add_return_release(i, v) < 0;
+}
+#define arch_atomic64_add_negative_release arch_atomic64_add_negative_release
+#endif
+
+#ifndef arch_atomic64_add_negative_relaxed
+/**
+ * arch_atomic64_add_negative_relaxed - Add and test if negative
+ * @i: integer value to add
+ * @v: pointer of type atomic64_t
+ *
+ * Atomically adds @i to @v and returns true if the result is negative,
+ * or false when the result is greater than or equal to zero.
+ */
+static __always_inline bool
+arch_atomic64_add_negative_relaxed(s64 i, atomic64_t *v)
+{
+ return arch_atomic64_add_return_relaxed(i, v) < 0;
+}
+#define arch_atomic64_add_negative_relaxed arch_atomic64_add_negative_relaxed
+#endif
+
+#else /* arch_atomic64_add_negative_relaxed */
+
+#ifndef arch_atomic64_add_negative_acquire
+static __always_inline bool
+arch_atomic64_add_negative_acquire(s64 i, atomic64_t *v)
+{
+ bool ret = arch_atomic64_add_negative_relaxed(i, v);
+ __atomic_acquire_fence();
+ return ret;
+}
+#define arch_atomic64_add_negative_acquire arch_atomic64_add_negative_acquire
+#endif
+
+#ifndef arch_atomic64_add_negative_release
+static __always_inline bool
+arch_atomic64_add_negative_release(s64 i, atomic64_t *v)
+{
+ __atomic_release_fence();
+ return arch_atomic64_add_negative_relaxed(i, v);
+}
+#define arch_atomic64_add_negative_release arch_atomic64_add_negative_release
+#endif
+
+#ifndef arch_atomic64_add_negative
+static __always_inline bool
+arch_atomic64_add_negative(s64 i, atomic64_t *v)
+{
+ bool ret;
+ __atomic_pre_full_fence();
+ ret = arch_atomic64_add_negative_relaxed(i, v);
+ __atomic_post_full_fence();
+ return ret;
+}
+#define arch_atomic64_add_negative arch_atomic64_add_negative
+#endif
+
+#endif /* arch_atomic64_add_negative_relaxed */
+
+#ifndef arch_atomic64_fetch_add_unless
+/**
+ * arch_atomic64_fetch_add_unless - add unless the number is already a given value
+ * @v: pointer of type atomic64_t
+ * @a: the amount to add to v...
+ * @u: ...unless v is equal to u.
+ *
+ * Atomically adds @a to @v, so long as @v was not already @u.
+ * Returns original value of @v
+ */
+static __always_inline s64
+arch_atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
+{
+ s64 c = arch_atomic64_read(v);
+
+ do {
+ if (unlikely(c == u))
+ break;
+ } while (!arch_atomic64_try_cmpxchg(v, &c, c + a));
+
+ return c;
+}
+#define arch_atomic64_fetch_add_unless arch_atomic64_fetch_add_unless
+#endif
+
+#ifndef arch_atomic64_add_unless
+/**
+ * arch_atomic64_add_unless - add unless the number is already a given value
+ * @v: pointer of type atomic64_t
+ * @a: the amount to add to v...
+ * @u: ...unless v is equal to u.
+ *
+ * Atomically adds @a to @v, if @v was not already @u.
+ * Returns true if the addition was done.
+ */
+static __always_inline bool
+arch_atomic64_add_unless(atomic64_t *v, s64 a, s64 u)
+{
+ return arch_atomic64_fetch_add_unless(v, a, u) != u;
+}
+#define arch_atomic64_add_unless arch_atomic64_add_unless
+#endif
+
+#ifndef arch_atomic64_inc_not_zero
+/**
+ * arch_atomic64_inc_not_zero - increment unless the number is zero
+ * @v: pointer of type atomic64_t
+ *
+ * Atomically increments @v by 1, if @v is non-zero.
+ * Returns true if the increment was done.
+ */
+static __always_inline bool
+arch_atomic64_inc_not_zero(atomic64_t *v)
+{
+ return arch_atomic64_add_unless(v, 1, 0);
+}
+#define arch_atomic64_inc_not_zero arch_atomic64_inc_not_zero
+#endif
+
+#ifndef arch_atomic64_inc_unless_negative
+static __always_inline bool
+arch_atomic64_inc_unless_negative(atomic64_t *v)
+{
+ s64 c = arch_atomic64_read(v);
+
+ do {
+ if (unlikely(c < 0))
+ return false;
+ } while (!arch_atomic64_try_cmpxchg(v, &c, c + 1));
+
+ return true;
+}
+#define arch_atomic64_inc_unless_negative arch_atomic64_inc_unless_negative
+#endif
+
+#ifndef arch_atomic64_dec_unless_positive
+static __always_inline bool
+arch_atomic64_dec_unless_positive(atomic64_t *v)
+{
+ s64 c = arch_atomic64_read(v);
+
+ do {
+ if (unlikely(c > 0))
+ return false;
+ } while (!arch_atomic64_try_cmpxchg(v, &c, c - 1));
+
+ return true;
+}
+#define arch_atomic64_dec_unless_positive arch_atomic64_dec_unless_positive
+#endif
+
+#ifndef arch_atomic64_dec_if_positive
+static __always_inline s64
+arch_atomic64_dec_if_positive(atomic64_t *v)
+{
+ s64 dec, c = arch_atomic64_read(v);
+
+ do {
+ dec = c - 1;
+ if (unlikely(dec < 0))
+ break;
+ } while (!arch_atomic64_try_cmpxchg(v, &c, dec));
+
+ return dec;
+}
+#define arch_atomic64_dec_if_positive arch_atomic64_dec_if_positive
+#endif
+
+#endif /* _LINUX_ATOMIC_FALLBACK_H */
+// ad2e2b4d168dbc60a73922616047a9bfa446af36
diff --git a/include/linux/atomic/atomic-instrumented.h b/include/linux/atomic/atomic-instrumented.h
new file mode 100644
index 000000000000..03a232a1fa57
--- /dev/null
+++ b/include/linux/atomic/atomic-instrumented.h
@@ -0,0 +1,2170 @@
+// SPDX-License-Identifier: GPL-2.0
+
+// Generated by scripts/atomic/gen-atomic-instrumented.sh
+// DO NOT MODIFY THIS FILE DIRECTLY
+
+/*
+ * This file provides wrappers with KASAN instrumentation for atomic operations.
+ * To use this functionality an arch's atomic.h file needs to define all
+ * atomic operations with arch_ prefix (e.g. arch_atomic_read()) and include
+ * this file at the end. This file provides atomic_read() that forwards to
+ * arch_atomic_read() for actual atomic operation.
+ * Note: if an arch atomic operation is implemented by means of other atomic
+ * operations (e.g. atomic_read()/atomic_cmpxchg() loop), then it needs to use
+ * arch_ variants (i.e. arch_atomic_read()/arch_atomic_cmpxchg()) to avoid
+ * double instrumentation.
+ */
+#ifndef _LINUX_ATOMIC_INSTRUMENTED_H
+#define _LINUX_ATOMIC_INSTRUMENTED_H
+
+#include <linux/build_bug.h>
+#include <linux/compiler.h>
+#include <linux/instrumented.h>
+
+static __always_inline int
+atomic_read(const atomic_t *v)
+{
+ instrument_atomic_read(v, sizeof(*v));
+ return arch_atomic_read(v);
+}
+
+static __always_inline int
+atomic_read_acquire(const atomic_t *v)
+{
+ instrument_atomic_read(v, sizeof(*v));
+ return arch_atomic_read_acquire(v);
+}
+
+static __always_inline void
+atomic_set(atomic_t *v, int i)
+{
+ instrument_atomic_write(v, sizeof(*v));
+ arch_atomic_set(v, i);
+}
+
+static __always_inline void
+atomic_set_release(atomic_t *v, int i)
+{
+ kcsan_release();
+ instrument_atomic_write(v, sizeof(*v));
+ arch_atomic_set_release(v, i);
+}
+
+static __always_inline void
+atomic_add(int i, atomic_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ arch_atomic_add(i, v);
+}
+
+static __always_inline int
+atomic_add_return(int i, atomic_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic_add_return(i, v);
+}
+
+static __always_inline int
+atomic_add_return_acquire(int i, atomic_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic_add_return_acquire(i, v);
+}
+
+static __always_inline int
+atomic_add_return_release(int i, atomic_t *v)
+{
+ kcsan_release();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic_add_return_release(i, v);
+}
+
+static __always_inline int
+atomic_add_return_relaxed(int i, atomic_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic_add_return_relaxed(i, v);
+}
+
+static __always_inline int
+atomic_fetch_add(int i, atomic_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic_fetch_add(i, v);
+}
+
+static __always_inline int
+atomic_fetch_add_acquire(int i, atomic_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic_fetch_add_acquire(i, v);
+}
+
+static __always_inline int
+atomic_fetch_add_release(int i, atomic_t *v)
+{
+ kcsan_release();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic_fetch_add_release(i, v);
+}
+
+static __always_inline int
+atomic_fetch_add_relaxed(int i, atomic_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic_fetch_add_relaxed(i, v);
+}
+
+static __always_inline void
+atomic_sub(int i, atomic_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ arch_atomic_sub(i, v);
+}
+
+static __always_inline int
+atomic_sub_return(int i, atomic_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic_sub_return(i, v);
+}
+
+static __always_inline int
+atomic_sub_return_acquire(int i, atomic_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic_sub_return_acquire(i, v);
+}
+
+static __always_inline int
+atomic_sub_return_release(int i, atomic_t *v)
+{
+ kcsan_release();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic_sub_return_release(i, v);
+}
+
+static __always_inline int
+atomic_sub_return_relaxed(int i, atomic_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic_sub_return_relaxed(i, v);
+}
+
+static __always_inline int
+atomic_fetch_sub(int i, atomic_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic_fetch_sub(i, v);
+}
+
+static __always_inline int
+atomic_fetch_sub_acquire(int i, atomic_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic_fetch_sub_acquire(i, v);
+}
+
+static __always_inline int
+atomic_fetch_sub_release(int i, atomic_t *v)
+{
+ kcsan_release();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic_fetch_sub_release(i, v);
+}
+
+static __always_inline int
+atomic_fetch_sub_relaxed(int i, atomic_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic_fetch_sub_relaxed(i, v);
+}
+
+static __always_inline void
+atomic_inc(atomic_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ arch_atomic_inc(v);
+}
+
+static __always_inline int
+atomic_inc_return(atomic_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic_inc_return(v);
+}
+
+static __always_inline int
+atomic_inc_return_acquire(atomic_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic_inc_return_acquire(v);
+}
+
+static __always_inline int
+atomic_inc_return_release(atomic_t *v)
+{
+ kcsan_release();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic_inc_return_release(v);
+}
+
+static __always_inline int
+atomic_inc_return_relaxed(atomic_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic_inc_return_relaxed(v);
+}
+
+static __always_inline int
+atomic_fetch_inc(atomic_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic_fetch_inc(v);
+}
+
+static __always_inline int
+atomic_fetch_inc_acquire(atomic_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic_fetch_inc_acquire(v);
+}
+
+static __always_inline int
+atomic_fetch_inc_release(atomic_t *v)
+{
+ kcsan_release();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic_fetch_inc_release(v);
+}
+
+static __always_inline int
+atomic_fetch_inc_relaxed(atomic_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic_fetch_inc_relaxed(v);
+}
+
+static __always_inline void
+atomic_dec(atomic_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ arch_atomic_dec(v);
+}
+
+static __always_inline int
+atomic_dec_return(atomic_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic_dec_return(v);
+}
+
+static __always_inline int
+atomic_dec_return_acquire(atomic_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic_dec_return_acquire(v);
+}
+
+static __always_inline int
+atomic_dec_return_release(atomic_t *v)
+{
+ kcsan_release();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic_dec_return_release(v);
+}
+
+static __always_inline int
+atomic_dec_return_relaxed(atomic_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic_dec_return_relaxed(v);
+}
+
+static __always_inline int
+atomic_fetch_dec(atomic_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic_fetch_dec(v);
+}
+
+static __always_inline int
+atomic_fetch_dec_acquire(atomic_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic_fetch_dec_acquire(v);
+}
+
+static __always_inline int
+atomic_fetch_dec_release(atomic_t *v)
+{
+ kcsan_release();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic_fetch_dec_release(v);
+}
+
+static __always_inline int
+atomic_fetch_dec_relaxed(atomic_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic_fetch_dec_relaxed(v);
+}
+
+static __always_inline void
+atomic_and(int i, atomic_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ arch_atomic_and(i, v);
+}
+
+static __always_inline int
+atomic_fetch_and(int i, atomic_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic_fetch_and(i, v);
+}
+
+static __always_inline int
+atomic_fetch_and_acquire(int i, atomic_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic_fetch_and_acquire(i, v);
+}
+
+static __always_inline int
+atomic_fetch_and_release(int i, atomic_t *v)
+{
+ kcsan_release();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic_fetch_and_release(i, v);
+}
+
+static __always_inline int
+atomic_fetch_and_relaxed(int i, atomic_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic_fetch_and_relaxed(i, v);
+}
+
+static __always_inline void
+atomic_andnot(int i, atomic_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ arch_atomic_andnot(i, v);
+}
+
+static __always_inline int
+atomic_fetch_andnot(int i, atomic_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic_fetch_andnot(i, v);
+}
+
+static __always_inline int
+atomic_fetch_andnot_acquire(int i, atomic_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic_fetch_andnot_acquire(i, v);
+}
+
+static __always_inline int
+atomic_fetch_andnot_release(int i, atomic_t *v)
+{
+ kcsan_release();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic_fetch_andnot_release(i, v);
+}
+
+static __always_inline int
+atomic_fetch_andnot_relaxed(int i, atomic_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic_fetch_andnot_relaxed(i, v);
+}
+
+static __always_inline void
+atomic_or(int i, atomic_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ arch_atomic_or(i, v);
+}
+
+static __always_inline int
+atomic_fetch_or(int i, atomic_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic_fetch_or(i, v);
+}
+
+static __always_inline int
+atomic_fetch_or_acquire(int i, atomic_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic_fetch_or_acquire(i, v);
+}
+
+static __always_inline int
+atomic_fetch_or_release(int i, atomic_t *v)
+{
+ kcsan_release();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic_fetch_or_release(i, v);
+}
+
+static __always_inline int
+atomic_fetch_or_relaxed(int i, atomic_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic_fetch_or_relaxed(i, v);
+}
+
+static __always_inline void
+atomic_xor(int i, atomic_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ arch_atomic_xor(i, v);
+}
+
+static __always_inline int
+atomic_fetch_xor(int i, atomic_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic_fetch_xor(i, v);
+}
+
+static __always_inline int
+atomic_fetch_xor_acquire(int i, atomic_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic_fetch_xor_acquire(i, v);
+}
+
+static __always_inline int
+atomic_fetch_xor_release(int i, atomic_t *v)
+{
+ kcsan_release();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic_fetch_xor_release(i, v);
+}
+
+static __always_inline int
+atomic_fetch_xor_relaxed(int i, atomic_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic_fetch_xor_relaxed(i, v);
+}
+
+static __always_inline int
+atomic_xchg(atomic_t *v, int i)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic_xchg(v, i);
+}
+
+static __always_inline int
+atomic_xchg_acquire(atomic_t *v, int i)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic_xchg_acquire(v, i);
+}
+
+static __always_inline int
+atomic_xchg_release(atomic_t *v, int i)
+{
+ kcsan_release();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic_xchg_release(v, i);
+}
+
+static __always_inline int
+atomic_xchg_relaxed(atomic_t *v, int i)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic_xchg_relaxed(v, i);
+}
+
+static __always_inline int
+atomic_cmpxchg(atomic_t *v, int old, int new)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic_cmpxchg(v, old, new);
+}
+
+static __always_inline int
+atomic_cmpxchg_acquire(atomic_t *v, int old, int new)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic_cmpxchg_acquire(v, old, new);
+}
+
+static __always_inline int
+atomic_cmpxchg_release(atomic_t *v, int old, int new)
+{
+ kcsan_release();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic_cmpxchg_release(v, old, new);
+}
+
+static __always_inline int
+atomic_cmpxchg_relaxed(atomic_t *v, int old, int new)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic_cmpxchg_relaxed(v, old, new);
+}
+
+static __always_inline bool
+atomic_try_cmpxchg(atomic_t *v, int *old, int new)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ instrument_atomic_read_write(old, sizeof(*old));
+ return arch_atomic_try_cmpxchg(v, old, new);
+}
+
+static __always_inline bool
+atomic_try_cmpxchg_acquire(atomic_t *v, int *old, int new)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ instrument_atomic_read_write(old, sizeof(*old));
+ return arch_atomic_try_cmpxchg_acquire(v, old, new);
+}
+
+static __always_inline bool
+atomic_try_cmpxchg_release(atomic_t *v, int *old, int new)
+{
+ kcsan_release();
+ instrument_atomic_read_write(v, sizeof(*v));
+ instrument_atomic_read_write(old, sizeof(*old));
+ return arch_atomic_try_cmpxchg_release(v, old, new);
+}
+
+static __always_inline bool
+atomic_try_cmpxchg_relaxed(atomic_t *v, int *old, int new)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ instrument_atomic_read_write(old, sizeof(*old));
+ return arch_atomic_try_cmpxchg_relaxed(v, old, new);
+}
+
+static __always_inline bool
+atomic_sub_and_test(int i, atomic_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic_sub_and_test(i, v);
+}
+
+static __always_inline bool
+atomic_dec_and_test(atomic_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic_dec_and_test(v);
+}
+
+static __always_inline bool
+atomic_inc_and_test(atomic_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic_inc_and_test(v);
+}
+
+static __always_inline bool
+atomic_add_negative(int i, atomic_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic_add_negative(i, v);
+}
+
+static __always_inline bool
+atomic_add_negative_acquire(int i, atomic_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic_add_negative_acquire(i, v);
+}
+
+static __always_inline bool
+atomic_add_negative_release(int i, atomic_t *v)
+{
+ kcsan_release();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic_add_negative_release(i, v);
+}
+
+static __always_inline bool
+atomic_add_negative_relaxed(int i, atomic_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic_add_negative_relaxed(i, v);
+}
+
+static __always_inline int
+atomic_fetch_add_unless(atomic_t *v, int a, int u)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic_fetch_add_unless(v, a, u);
+}
+
+static __always_inline bool
+atomic_add_unless(atomic_t *v, int a, int u)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic_add_unless(v, a, u);
+}
+
+static __always_inline bool
+atomic_inc_not_zero(atomic_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic_inc_not_zero(v);
+}
+
+static __always_inline bool
+atomic_inc_unless_negative(atomic_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic_inc_unless_negative(v);
+}
+
+static __always_inline bool
+atomic_dec_unless_positive(atomic_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic_dec_unless_positive(v);
+}
+
+static __always_inline int
+atomic_dec_if_positive(atomic_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic_dec_if_positive(v);
+}
+
+static __always_inline s64
+atomic64_read(const atomic64_t *v)
+{
+ instrument_atomic_read(v, sizeof(*v));
+ return arch_atomic64_read(v);
+}
+
+static __always_inline s64
+atomic64_read_acquire(const atomic64_t *v)
+{
+ instrument_atomic_read(v, sizeof(*v));
+ return arch_atomic64_read_acquire(v);
+}
+
+static __always_inline void
+atomic64_set(atomic64_t *v, s64 i)
+{
+ instrument_atomic_write(v, sizeof(*v));
+ arch_atomic64_set(v, i);
+}
+
+static __always_inline void
+atomic64_set_release(atomic64_t *v, s64 i)
+{
+ kcsan_release();
+ instrument_atomic_write(v, sizeof(*v));
+ arch_atomic64_set_release(v, i);
+}
+
+static __always_inline void
+atomic64_add(s64 i, atomic64_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ arch_atomic64_add(i, v);
+}
+
+static __always_inline s64
+atomic64_add_return(s64 i, atomic64_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic64_add_return(i, v);
+}
+
+static __always_inline s64
+atomic64_add_return_acquire(s64 i, atomic64_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic64_add_return_acquire(i, v);
+}
+
+static __always_inline s64
+atomic64_add_return_release(s64 i, atomic64_t *v)
+{
+ kcsan_release();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic64_add_return_release(i, v);
+}
+
+static __always_inline s64
+atomic64_add_return_relaxed(s64 i, atomic64_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic64_add_return_relaxed(i, v);
+}
+
+static __always_inline s64
+atomic64_fetch_add(s64 i, atomic64_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic64_fetch_add(i, v);
+}
+
+static __always_inline s64
+atomic64_fetch_add_acquire(s64 i, atomic64_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic64_fetch_add_acquire(i, v);
+}
+
+static __always_inline s64
+atomic64_fetch_add_release(s64 i, atomic64_t *v)
+{
+ kcsan_release();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic64_fetch_add_release(i, v);
+}
+
+static __always_inline s64
+atomic64_fetch_add_relaxed(s64 i, atomic64_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic64_fetch_add_relaxed(i, v);
+}
+
+static __always_inline void
+atomic64_sub(s64 i, atomic64_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ arch_atomic64_sub(i, v);
+}
+
+static __always_inline s64
+atomic64_sub_return(s64 i, atomic64_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic64_sub_return(i, v);
+}
+
+static __always_inline s64
+atomic64_sub_return_acquire(s64 i, atomic64_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic64_sub_return_acquire(i, v);
+}
+
+static __always_inline s64
+atomic64_sub_return_release(s64 i, atomic64_t *v)
+{
+ kcsan_release();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic64_sub_return_release(i, v);
+}
+
+static __always_inline s64
+atomic64_sub_return_relaxed(s64 i, atomic64_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic64_sub_return_relaxed(i, v);
+}
+
+static __always_inline s64
+atomic64_fetch_sub(s64 i, atomic64_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic64_fetch_sub(i, v);
+}
+
+static __always_inline s64
+atomic64_fetch_sub_acquire(s64 i, atomic64_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic64_fetch_sub_acquire(i, v);
+}
+
+static __always_inline s64
+atomic64_fetch_sub_release(s64 i, atomic64_t *v)
+{
+ kcsan_release();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic64_fetch_sub_release(i, v);
+}
+
+static __always_inline s64
+atomic64_fetch_sub_relaxed(s64 i, atomic64_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic64_fetch_sub_relaxed(i, v);
+}
+
+static __always_inline void
+atomic64_inc(atomic64_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ arch_atomic64_inc(v);
+}
+
+static __always_inline s64
+atomic64_inc_return(atomic64_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic64_inc_return(v);
+}
+
+static __always_inline s64
+atomic64_inc_return_acquire(atomic64_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic64_inc_return_acquire(v);
+}
+
+static __always_inline s64
+atomic64_inc_return_release(atomic64_t *v)
+{
+ kcsan_release();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic64_inc_return_release(v);
+}
+
+static __always_inline s64
+atomic64_inc_return_relaxed(atomic64_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic64_inc_return_relaxed(v);
+}
+
+static __always_inline s64
+atomic64_fetch_inc(atomic64_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic64_fetch_inc(v);
+}
+
+static __always_inline s64
+atomic64_fetch_inc_acquire(atomic64_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic64_fetch_inc_acquire(v);
+}
+
+static __always_inline s64
+atomic64_fetch_inc_release(atomic64_t *v)
+{
+ kcsan_release();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic64_fetch_inc_release(v);
+}
+
+static __always_inline s64
+atomic64_fetch_inc_relaxed(atomic64_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic64_fetch_inc_relaxed(v);
+}
+
+static __always_inline void
+atomic64_dec(atomic64_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ arch_atomic64_dec(v);
+}
+
+static __always_inline s64
+atomic64_dec_return(atomic64_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic64_dec_return(v);
+}
+
+static __always_inline s64
+atomic64_dec_return_acquire(atomic64_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic64_dec_return_acquire(v);
+}
+
+static __always_inline s64
+atomic64_dec_return_release(atomic64_t *v)
+{
+ kcsan_release();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic64_dec_return_release(v);
+}
+
+static __always_inline s64
+atomic64_dec_return_relaxed(atomic64_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic64_dec_return_relaxed(v);
+}
+
+static __always_inline s64
+atomic64_fetch_dec(atomic64_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic64_fetch_dec(v);
+}
+
+static __always_inline s64
+atomic64_fetch_dec_acquire(atomic64_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic64_fetch_dec_acquire(v);
+}
+
+static __always_inline s64
+atomic64_fetch_dec_release(atomic64_t *v)
+{
+ kcsan_release();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic64_fetch_dec_release(v);
+}
+
+static __always_inline s64
+atomic64_fetch_dec_relaxed(atomic64_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic64_fetch_dec_relaxed(v);
+}
+
+static __always_inline void
+atomic64_and(s64 i, atomic64_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ arch_atomic64_and(i, v);
+}
+
+static __always_inline s64
+atomic64_fetch_and(s64 i, atomic64_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic64_fetch_and(i, v);
+}
+
+static __always_inline s64
+atomic64_fetch_and_acquire(s64 i, atomic64_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic64_fetch_and_acquire(i, v);
+}
+
+static __always_inline s64
+atomic64_fetch_and_release(s64 i, atomic64_t *v)
+{
+ kcsan_release();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic64_fetch_and_release(i, v);
+}
+
+static __always_inline s64
+atomic64_fetch_and_relaxed(s64 i, atomic64_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic64_fetch_and_relaxed(i, v);
+}
+
+static __always_inline void
+atomic64_andnot(s64 i, atomic64_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ arch_atomic64_andnot(i, v);
+}
+
+static __always_inline s64
+atomic64_fetch_andnot(s64 i, atomic64_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic64_fetch_andnot(i, v);
+}
+
+static __always_inline s64
+atomic64_fetch_andnot_acquire(s64 i, atomic64_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic64_fetch_andnot_acquire(i, v);
+}
+
+static __always_inline s64
+atomic64_fetch_andnot_release(s64 i, atomic64_t *v)
+{
+ kcsan_release();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic64_fetch_andnot_release(i, v);
+}
+
+static __always_inline s64
+atomic64_fetch_andnot_relaxed(s64 i, atomic64_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic64_fetch_andnot_relaxed(i, v);
+}
+
+static __always_inline void
+atomic64_or(s64 i, atomic64_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ arch_atomic64_or(i, v);
+}
+
+static __always_inline s64
+atomic64_fetch_or(s64 i, atomic64_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic64_fetch_or(i, v);
+}
+
+static __always_inline s64
+atomic64_fetch_or_acquire(s64 i, atomic64_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic64_fetch_or_acquire(i, v);
+}
+
+static __always_inline s64
+atomic64_fetch_or_release(s64 i, atomic64_t *v)
+{
+ kcsan_release();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic64_fetch_or_release(i, v);
+}
+
+static __always_inline s64
+atomic64_fetch_or_relaxed(s64 i, atomic64_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic64_fetch_or_relaxed(i, v);
+}
+
+static __always_inline void
+atomic64_xor(s64 i, atomic64_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ arch_atomic64_xor(i, v);
+}
+
+static __always_inline s64
+atomic64_fetch_xor(s64 i, atomic64_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic64_fetch_xor(i, v);
+}
+
+static __always_inline s64
+atomic64_fetch_xor_acquire(s64 i, atomic64_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic64_fetch_xor_acquire(i, v);
+}
+
+static __always_inline s64
+atomic64_fetch_xor_release(s64 i, atomic64_t *v)
+{
+ kcsan_release();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic64_fetch_xor_release(i, v);
+}
+
+static __always_inline s64
+atomic64_fetch_xor_relaxed(s64 i, atomic64_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic64_fetch_xor_relaxed(i, v);
+}
+
+static __always_inline s64
+atomic64_xchg(atomic64_t *v, s64 i)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic64_xchg(v, i);
+}
+
+static __always_inline s64
+atomic64_xchg_acquire(atomic64_t *v, s64 i)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic64_xchg_acquire(v, i);
+}
+
+static __always_inline s64
+atomic64_xchg_release(atomic64_t *v, s64 i)
+{
+ kcsan_release();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic64_xchg_release(v, i);
+}
+
+static __always_inline s64
+atomic64_xchg_relaxed(atomic64_t *v, s64 i)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic64_xchg_relaxed(v, i);
+}
+
+static __always_inline s64
+atomic64_cmpxchg(atomic64_t *v, s64 old, s64 new)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic64_cmpxchg(v, old, new);
+}
+
+static __always_inline s64
+atomic64_cmpxchg_acquire(atomic64_t *v, s64 old, s64 new)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic64_cmpxchg_acquire(v, old, new);
+}
+
+static __always_inline s64
+atomic64_cmpxchg_release(atomic64_t *v, s64 old, s64 new)
+{
+ kcsan_release();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic64_cmpxchg_release(v, old, new);
+}
+
+static __always_inline s64
+atomic64_cmpxchg_relaxed(atomic64_t *v, s64 old, s64 new)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic64_cmpxchg_relaxed(v, old, new);
+}
+
+static __always_inline bool
+atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 new)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ instrument_atomic_read_write(old, sizeof(*old));
+ return arch_atomic64_try_cmpxchg(v, old, new);
+}
+
+static __always_inline bool
+atomic64_try_cmpxchg_acquire(atomic64_t *v, s64 *old, s64 new)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ instrument_atomic_read_write(old, sizeof(*old));
+ return arch_atomic64_try_cmpxchg_acquire(v, old, new);
+}
+
+static __always_inline bool
+atomic64_try_cmpxchg_release(atomic64_t *v, s64 *old, s64 new)
+{
+ kcsan_release();
+ instrument_atomic_read_write(v, sizeof(*v));
+ instrument_atomic_read_write(old, sizeof(*old));
+ return arch_atomic64_try_cmpxchg_release(v, old, new);
+}
+
+static __always_inline bool
+atomic64_try_cmpxchg_relaxed(atomic64_t *v, s64 *old, s64 new)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ instrument_atomic_read_write(old, sizeof(*old));
+ return arch_atomic64_try_cmpxchg_relaxed(v, old, new);
+}
+
+static __always_inline bool
+atomic64_sub_and_test(s64 i, atomic64_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic64_sub_and_test(i, v);
+}
+
+static __always_inline bool
+atomic64_dec_and_test(atomic64_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic64_dec_and_test(v);
+}
+
+static __always_inline bool
+atomic64_inc_and_test(atomic64_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic64_inc_and_test(v);
+}
+
+static __always_inline bool
+atomic64_add_negative(s64 i, atomic64_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic64_add_negative(i, v);
+}
+
+static __always_inline bool
+atomic64_add_negative_acquire(s64 i, atomic64_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic64_add_negative_acquire(i, v);
+}
+
+static __always_inline bool
+atomic64_add_negative_release(s64 i, atomic64_t *v)
+{
+ kcsan_release();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic64_add_negative_release(i, v);
+}
+
+static __always_inline bool
+atomic64_add_negative_relaxed(s64 i, atomic64_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic64_add_negative_relaxed(i, v);
+}
+
+static __always_inline s64
+atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic64_fetch_add_unless(v, a, u);
+}
+
+static __always_inline bool
+atomic64_add_unless(atomic64_t *v, s64 a, s64 u)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic64_add_unless(v, a, u);
+}
+
+static __always_inline bool
+atomic64_inc_not_zero(atomic64_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic64_inc_not_zero(v);
+}
+
+static __always_inline bool
+atomic64_inc_unless_negative(atomic64_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic64_inc_unless_negative(v);
+}
+
+static __always_inline bool
+atomic64_dec_unless_positive(atomic64_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic64_dec_unless_positive(v);
+}
+
+static __always_inline s64
+atomic64_dec_if_positive(atomic64_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic64_dec_if_positive(v);
+}
+
+static __always_inline long
+atomic_long_read(const atomic_long_t *v)
+{
+ instrument_atomic_read(v, sizeof(*v));
+ return arch_atomic_long_read(v);
+}
+
+static __always_inline long
+atomic_long_read_acquire(const atomic_long_t *v)
+{
+ instrument_atomic_read(v, sizeof(*v));
+ return arch_atomic_long_read_acquire(v);
+}
+
+static __always_inline void
+atomic_long_set(atomic_long_t *v, long i)
+{
+ instrument_atomic_write(v, sizeof(*v));
+ arch_atomic_long_set(v, i);
+}
+
+static __always_inline void
+atomic_long_set_release(atomic_long_t *v, long i)
+{
+ kcsan_release();
+ instrument_atomic_write(v, sizeof(*v));
+ arch_atomic_long_set_release(v, i);
+}
+
+static __always_inline void
+atomic_long_add(long i, atomic_long_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ arch_atomic_long_add(i, v);
+}
+
+static __always_inline long
+atomic_long_add_return(long i, atomic_long_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic_long_add_return(i, v);
+}
+
+static __always_inline long
+atomic_long_add_return_acquire(long i, atomic_long_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic_long_add_return_acquire(i, v);
+}
+
+static __always_inline long
+atomic_long_add_return_release(long i, atomic_long_t *v)
+{
+ kcsan_release();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic_long_add_return_release(i, v);
+}
+
+static __always_inline long
+atomic_long_add_return_relaxed(long i, atomic_long_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic_long_add_return_relaxed(i, v);
+}
+
+static __always_inline long
+atomic_long_fetch_add(long i, atomic_long_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic_long_fetch_add(i, v);
+}
+
+static __always_inline long
+atomic_long_fetch_add_acquire(long i, atomic_long_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic_long_fetch_add_acquire(i, v);
+}
+
+static __always_inline long
+atomic_long_fetch_add_release(long i, atomic_long_t *v)
+{
+ kcsan_release();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic_long_fetch_add_release(i, v);
+}
+
+static __always_inline long
+atomic_long_fetch_add_relaxed(long i, atomic_long_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic_long_fetch_add_relaxed(i, v);
+}
+
+static __always_inline void
+atomic_long_sub(long i, atomic_long_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ arch_atomic_long_sub(i, v);
+}
+
+static __always_inline long
+atomic_long_sub_return(long i, atomic_long_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic_long_sub_return(i, v);
+}
+
+static __always_inline long
+atomic_long_sub_return_acquire(long i, atomic_long_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic_long_sub_return_acquire(i, v);
+}
+
+static __always_inline long
+atomic_long_sub_return_release(long i, atomic_long_t *v)
+{
+ kcsan_release();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic_long_sub_return_release(i, v);
+}
+
+static __always_inline long
+atomic_long_sub_return_relaxed(long i, atomic_long_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic_long_sub_return_relaxed(i, v);
+}
+
+static __always_inline long
+atomic_long_fetch_sub(long i, atomic_long_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic_long_fetch_sub(i, v);
+}
+
+static __always_inline long
+atomic_long_fetch_sub_acquire(long i, atomic_long_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic_long_fetch_sub_acquire(i, v);
+}
+
+static __always_inline long
+atomic_long_fetch_sub_release(long i, atomic_long_t *v)
+{
+ kcsan_release();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic_long_fetch_sub_release(i, v);
+}
+
+static __always_inline long
+atomic_long_fetch_sub_relaxed(long i, atomic_long_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic_long_fetch_sub_relaxed(i, v);
+}
+
+static __always_inline void
+atomic_long_inc(atomic_long_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ arch_atomic_long_inc(v);
+}
+
+static __always_inline long
+atomic_long_inc_return(atomic_long_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic_long_inc_return(v);
+}
+
+static __always_inline long
+atomic_long_inc_return_acquire(atomic_long_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic_long_inc_return_acquire(v);
+}
+
+static __always_inline long
+atomic_long_inc_return_release(atomic_long_t *v)
+{
+ kcsan_release();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic_long_inc_return_release(v);
+}
+
+static __always_inline long
+atomic_long_inc_return_relaxed(atomic_long_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic_long_inc_return_relaxed(v);
+}
+
+static __always_inline long
+atomic_long_fetch_inc(atomic_long_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic_long_fetch_inc(v);
+}
+
+static __always_inline long
+atomic_long_fetch_inc_acquire(atomic_long_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic_long_fetch_inc_acquire(v);
+}
+
+static __always_inline long
+atomic_long_fetch_inc_release(atomic_long_t *v)
+{
+ kcsan_release();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic_long_fetch_inc_release(v);
+}
+
+static __always_inline long
+atomic_long_fetch_inc_relaxed(atomic_long_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic_long_fetch_inc_relaxed(v);
+}
+
+static __always_inline void
+atomic_long_dec(atomic_long_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ arch_atomic_long_dec(v);
+}
+
+static __always_inline long
+atomic_long_dec_return(atomic_long_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic_long_dec_return(v);
+}
+
+static __always_inline long
+atomic_long_dec_return_acquire(atomic_long_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic_long_dec_return_acquire(v);
+}
+
+static __always_inline long
+atomic_long_dec_return_release(atomic_long_t *v)
+{
+ kcsan_release();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic_long_dec_return_release(v);
+}
+
+static __always_inline long
+atomic_long_dec_return_relaxed(atomic_long_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic_long_dec_return_relaxed(v);
+}
+
+static __always_inline long
+atomic_long_fetch_dec(atomic_long_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic_long_fetch_dec(v);
+}
+
+static __always_inline long
+atomic_long_fetch_dec_acquire(atomic_long_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic_long_fetch_dec_acquire(v);
+}
+
+static __always_inline long
+atomic_long_fetch_dec_release(atomic_long_t *v)
+{
+ kcsan_release();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic_long_fetch_dec_release(v);
+}
+
+static __always_inline long
+atomic_long_fetch_dec_relaxed(atomic_long_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic_long_fetch_dec_relaxed(v);
+}
+
+static __always_inline void
+atomic_long_and(long i, atomic_long_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ arch_atomic_long_and(i, v);
+}
+
+static __always_inline long
+atomic_long_fetch_and(long i, atomic_long_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic_long_fetch_and(i, v);
+}
+
+static __always_inline long
+atomic_long_fetch_and_acquire(long i, atomic_long_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic_long_fetch_and_acquire(i, v);
+}
+
+static __always_inline long
+atomic_long_fetch_and_release(long i, atomic_long_t *v)
+{
+ kcsan_release();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic_long_fetch_and_release(i, v);
+}
+
+static __always_inline long
+atomic_long_fetch_and_relaxed(long i, atomic_long_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic_long_fetch_and_relaxed(i, v);
+}
+
+static __always_inline void
+atomic_long_andnot(long i, atomic_long_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ arch_atomic_long_andnot(i, v);
+}
+
+static __always_inline long
+atomic_long_fetch_andnot(long i, atomic_long_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic_long_fetch_andnot(i, v);
+}
+
+static __always_inline long
+atomic_long_fetch_andnot_acquire(long i, atomic_long_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic_long_fetch_andnot_acquire(i, v);
+}
+
+static __always_inline long
+atomic_long_fetch_andnot_release(long i, atomic_long_t *v)
+{
+ kcsan_release();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic_long_fetch_andnot_release(i, v);
+}
+
+static __always_inline long
+atomic_long_fetch_andnot_relaxed(long i, atomic_long_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic_long_fetch_andnot_relaxed(i, v);
+}
+
+static __always_inline void
+atomic_long_or(long i, atomic_long_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ arch_atomic_long_or(i, v);
+}
+
+static __always_inline long
+atomic_long_fetch_or(long i, atomic_long_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic_long_fetch_or(i, v);
+}
+
+static __always_inline long
+atomic_long_fetch_or_acquire(long i, atomic_long_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic_long_fetch_or_acquire(i, v);
+}
+
+static __always_inline long
+atomic_long_fetch_or_release(long i, atomic_long_t *v)
+{
+ kcsan_release();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic_long_fetch_or_release(i, v);
+}
+
+static __always_inline long
+atomic_long_fetch_or_relaxed(long i, atomic_long_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic_long_fetch_or_relaxed(i, v);
+}
+
+static __always_inline void
+atomic_long_xor(long i, atomic_long_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ arch_atomic_long_xor(i, v);
+}
+
+static __always_inline long
+atomic_long_fetch_xor(long i, atomic_long_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic_long_fetch_xor(i, v);
+}
+
+static __always_inline long
+atomic_long_fetch_xor_acquire(long i, atomic_long_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic_long_fetch_xor_acquire(i, v);
+}
+
+static __always_inline long
+atomic_long_fetch_xor_release(long i, atomic_long_t *v)
+{
+ kcsan_release();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic_long_fetch_xor_release(i, v);
+}
+
+static __always_inline long
+atomic_long_fetch_xor_relaxed(long i, atomic_long_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic_long_fetch_xor_relaxed(i, v);
+}
+
+static __always_inline long
+atomic_long_xchg(atomic_long_t *v, long i)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic_long_xchg(v, i);
+}
+
+static __always_inline long
+atomic_long_xchg_acquire(atomic_long_t *v, long i)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic_long_xchg_acquire(v, i);
+}
+
+static __always_inline long
+atomic_long_xchg_release(atomic_long_t *v, long i)
+{
+ kcsan_release();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic_long_xchg_release(v, i);
+}
+
+static __always_inline long
+atomic_long_xchg_relaxed(atomic_long_t *v, long i)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic_long_xchg_relaxed(v, i);
+}
+
+static __always_inline long
+atomic_long_cmpxchg(atomic_long_t *v, long old, long new)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic_long_cmpxchg(v, old, new);
+}
+
+static __always_inline long
+atomic_long_cmpxchg_acquire(atomic_long_t *v, long old, long new)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic_long_cmpxchg_acquire(v, old, new);
+}
+
+static __always_inline long
+atomic_long_cmpxchg_release(atomic_long_t *v, long old, long new)
+{
+ kcsan_release();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic_long_cmpxchg_release(v, old, new);
+}
+
+static __always_inline long
+atomic_long_cmpxchg_relaxed(atomic_long_t *v, long old, long new)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic_long_cmpxchg_relaxed(v, old, new);
+}
+
+static __always_inline bool
+atomic_long_try_cmpxchg(atomic_long_t *v, long *old, long new)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ instrument_atomic_read_write(old, sizeof(*old));
+ return arch_atomic_long_try_cmpxchg(v, old, new);
+}
+
+static __always_inline bool
+atomic_long_try_cmpxchg_acquire(atomic_long_t *v, long *old, long new)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ instrument_atomic_read_write(old, sizeof(*old));
+ return arch_atomic_long_try_cmpxchg_acquire(v, old, new);
+}
+
+static __always_inline bool
+atomic_long_try_cmpxchg_release(atomic_long_t *v, long *old, long new)
+{
+ kcsan_release();
+ instrument_atomic_read_write(v, sizeof(*v));
+ instrument_atomic_read_write(old, sizeof(*old));
+ return arch_atomic_long_try_cmpxchg_release(v, old, new);
+}
+
+static __always_inline bool
+atomic_long_try_cmpxchg_relaxed(atomic_long_t *v, long *old, long new)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ instrument_atomic_read_write(old, sizeof(*old));
+ return arch_atomic_long_try_cmpxchg_relaxed(v, old, new);
+}
+
+static __always_inline bool
+atomic_long_sub_and_test(long i, atomic_long_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic_long_sub_and_test(i, v);
+}
+
+static __always_inline bool
+atomic_long_dec_and_test(atomic_long_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic_long_dec_and_test(v);
+}
+
+static __always_inline bool
+atomic_long_inc_and_test(atomic_long_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic_long_inc_and_test(v);
+}
+
+static __always_inline bool
+atomic_long_add_negative(long i, atomic_long_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic_long_add_negative(i, v);
+}
+
+static __always_inline bool
+atomic_long_add_negative_acquire(long i, atomic_long_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic_long_add_negative_acquire(i, v);
+}
+
+static __always_inline bool
+atomic_long_add_negative_release(long i, atomic_long_t *v)
+{
+ kcsan_release();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic_long_add_negative_release(i, v);
+}
+
+static __always_inline bool
+atomic_long_add_negative_relaxed(long i, atomic_long_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic_long_add_negative_relaxed(i, v);
+}
+
+static __always_inline long
+atomic_long_fetch_add_unless(atomic_long_t *v, long a, long u)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic_long_fetch_add_unless(v, a, u);
+}
+
+static __always_inline bool
+atomic_long_add_unless(atomic_long_t *v, long a, long u)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic_long_add_unless(v, a, u);
+}
+
+static __always_inline bool
+atomic_long_inc_not_zero(atomic_long_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic_long_inc_not_zero(v);
+}
+
+static __always_inline bool
+atomic_long_inc_unless_negative(atomic_long_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic_long_inc_unless_negative(v);
+}
+
+static __always_inline bool
+atomic_long_dec_unless_positive(atomic_long_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic_long_dec_unless_positive(v);
+}
+
+static __always_inline long
+atomic_long_dec_if_positive(atomic_long_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return arch_atomic_long_dec_if_positive(v);
+}
+
+#define xchg(ptr, ...) \
+({ \
+ typeof(ptr) __ai_ptr = (ptr); \
+ kcsan_mb(); \
+ instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \
+ arch_xchg(__ai_ptr, __VA_ARGS__); \
+})
+
+#define xchg_acquire(ptr, ...) \
+({ \
+ typeof(ptr) __ai_ptr = (ptr); \
+ instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \
+ arch_xchg_acquire(__ai_ptr, __VA_ARGS__); \
+})
+
+#define xchg_release(ptr, ...) \
+({ \
+ typeof(ptr) __ai_ptr = (ptr); \
+ kcsan_release(); \
+ instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \
+ arch_xchg_release(__ai_ptr, __VA_ARGS__); \
+})
+
+#define xchg_relaxed(ptr, ...) \
+({ \
+ typeof(ptr) __ai_ptr = (ptr); \
+ instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \
+ arch_xchg_relaxed(__ai_ptr, __VA_ARGS__); \
+})
+
+#define cmpxchg(ptr, ...) \
+({ \
+ typeof(ptr) __ai_ptr = (ptr); \
+ kcsan_mb(); \
+ instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \
+ arch_cmpxchg(__ai_ptr, __VA_ARGS__); \
+})
+
+#define cmpxchg_acquire(ptr, ...) \
+({ \
+ typeof(ptr) __ai_ptr = (ptr); \
+ instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \
+ arch_cmpxchg_acquire(__ai_ptr, __VA_ARGS__); \
+})
+
+#define cmpxchg_release(ptr, ...) \
+({ \
+ typeof(ptr) __ai_ptr = (ptr); \
+ kcsan_release(); \
+ instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \
+ arch_cmpxchg_release(__ai_ptr, __VA_ARGS__); \
+})
+
+#define cmpxchg_relaxed(ptr, ...) \
+({ \
+ typeof(ptr) __ai_ptr = (ptr); \
+ instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \
+ arch_cmpxchg_relaxed(__ai_ptr, __VA_ARGS__); \
+})
+
+#define cmpxchg64(ptr, ...) \
+({ \
+ typeof(ptr) __ai_ptr = (ptr); \
+ kcsan_mb(); \
+ instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \
+ arch_cmpxchg64(__ai_ptr, __VA_ARGS__); \
+})
+
+#define cmpxchg64_acquire(ptr, ...) \
+({ \
+ typeof(ptr) __ai_ptr = (ptr); \
+ instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \
+ arch_cmpxchg64_acquire(__ai_ptr, __VA_ARGS__); \
+})
+
+#define cmpxchg64_release(ptr, ...) \
+({ \
+ typeof(ptr) __ai_ptr = (ptr); \
+ kcsan_release(); \
+ instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \
+ arch_cmpxchg64_release(__ai_ptr, __VA_ARGS__); \
+})
+
+#define cmpxchg64_relaxed(ptr, ...) \
+({ \
+ typeof(ptr) __ai_ptr = (ptr); \
+ instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \
+ arch_cmpxchg64_relaxed(__ai_ptr, __VA_ARGS__); \
+})
+
+#define try_cmpxchg(ptr, oldp, ...) \
+({ \
+ typeof(ptr) __ai_ptr = (ptr); \
+ typeof(oldp) __ai_oldp = (oldp); \
+ kcsan_mb(); \
+ instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \
+ instrument_read_write(__ai_oldp, sizeof(*__ai_oldp)); \
+ arch_try_cmpxchg(__ai_ptr, __ai_oldp, __VA_ARGS__); \
+})
+
+#define try_cmpxchg_acquire(ptr, oldp, ...) \
+({ \
+ typeof(ptr) __ai_ptr = (ptr); \
+ typeof(oldp) __ai_oldp = (oldp); \
+ instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \
+ instrument_read_write(__ai_oldp, sizeof(*__ai_oldp)); \
+ arch_try_cmpxchg_acquire(__ai_ptr, __ai_oldp, __VA_ARGS__); \
+})
+
+#define try_cmpxchg_release(ptr, oldp, ...) \
+({ \
+ typeof(ptr) __ai_ptr = (ptr); \
+ typeof(oldp) __ai_oldp = (oldp); \
+ kcsan_release(); \
+ instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \
+ instrument_read_write(__ai_oldp, sizeof(*__ai_oldp)); \
+ arch_try_cmpxchg_release(__ai_ptr, __ai_oldp, __VA_ARGS__); \
+})
+
+#define try_cmpxchg_relaxed(ptr, oldp, ...) \
+({ \
+ typeof(ptr) __ai_ptr = (ptr); \
+ typeof(oldp) __ai_oldp = (oldp); \
+ instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \
+ instrument_read_write(__ai_oldp, sizeof(*__ai_oldp)); \
+ arch_try_cmpxchg_relaxed(__ai_ptr, __ai_oldp, __VA_ARGS__); \
+})
+
+#define try_cmpxchg64(ptr, oldp, ...) \
+({ \
+ typeof(ptr) __ai_ptr = (ptr); \
+ typeof(oldp) __ai_oldp = (oldp); \
+ kcsan_mb(); \
+ instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \
+ instrument_read_write(__ai_oldp, sizeof(*__ai_oldp)); \
+ arch_try_cmpxchg64(__ai_ptr, __ai_oldp, __VA_ARGS__); \
+})
+
+#define try_cmpxchg64_acquire(ptr, oldp, ...) \
+({ \
+ typeof(ptr) __ai_ptr = (ptr); \
+ typeof(oldp) __ai_oldp = (oldp); \
+ instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \
+ instrument_read_write(__ai_oldp, sizeof(*__ai_oldp)); \
+ arch_try_cmpxchg64_acquire(__ai_ptr, __ai_oldp, __VA_ARGS__); \
+})
+
+#define try_cmpxchg64_release(ptr, oldp, ...) \
+({ \
+ typeof(ptr) __ai_ptr = (ptr); \
+ typeof(oldp) __ai_oldp = (oldp); \
+ kcsan_release(); \
+ instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \
+ instrument_read_write(__ai_oldp, sizeof(*__ai_oldp)); \
+ arch_try_cmpxchg64_release(__ai_ptr, __ai_oldp, __VA_ARGS__); \
+})
+
+#define try_cmpxchg64_relaxed(ptr, oldp, ...) \
+({ \
+ typeof(ptr) __ai_ptr = (ptr); \
+ typeof(oldp) __ai_oldp = (oldp); \
+ instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \
+ instrument_read_write(__ai_oldp, sizeof(*__ai_oldp)); \
+ arch_try_cmpxchg64_relaxed(__ai_ptr, __ai_oldp, __VA_ARGS__); \
+})
+
+#define cmpxchg_local(ptr, ...) \
+({ \
+ typeof(ptr) __ai_ptr = (ptr); \
+ instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \
+ arch_cmpxchg_local(__ai_ptr, __VA_ARGS__); \
+})
+
+#define cmpxchg64_local(ptr, ...) \
+({ \
+ typeof(ptr) __ai_ptr = (ptr); \
+ instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \
+ arch_cmpxchg64_local(__ai_ptr, __VA_ARGS__); \
+})
+
+#define sync_cmpxchg(ptr, ...) \
+({ \
+ typeof(ptr) __ai_ptr = (ptr); \
+ kcsan_mb(); \
+ instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \
+ arch_sync_cmpxchg(__ai_ptr, __VA_ARGS__); \
+})
+
+#define try_cmpxchg_local(ptr, oldp, ...) \
+({ \
+ typeof(ptr) __ai_ptr = (ptr); \
+ typeof(oldp) __ai_oldp = (oldp); \
+ instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \
+ instrument_read_write(__ai_oldp, sizeof(*__ai_oldp)); \
+ arch_try_cmpxchg_local(__ai_ptr, __ai_oldp, __VA_ARGS__); \
+})
+
+#define try_cmpxchg64_local(ptr, oldp, ...) \
+({ \
+ typeof(ptr) __ai_ptr = (ptr); \
+ typeof(oldp) __ai_oldp = (oldp); \
+ instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \
+ instrument_read_write(__ai_oldp, sizeof(*__ai_oldp)); \
+ arch_try_cmpxchg64_local(__ai_ptr, __ai_oldp, __VA_ARGS__); \
+})
+
+#define cmpxchg_double(ptr, ...) \
+({ \
+ typeof(ptr) __ai_ptr = (ptr); \
+ kcsan_mb(); \
+ instrument_atomic_read_write(__ai_ptr, 2 * sizeof(*__ai_ptr)); \
+ arch_cmpxchg_double(__ai_ptr, __VA_ARGS__); \
+})
+
+
+#define cmpxchg_double_local(ptr, ...) \
+({ \
+ typeof(ptr) __ai_ptr = (ptr); \
+ instrument_atomic_read_write(__ai_ptr, 2 * sizeof(*__ai_ptr)); \
+ arch_cmpxchg_double_local(__ai_ptr, __VA_ARGS__); \
+})
+
+#endif /* _LINUX_ATOMIC_INSTRUMENTED_H */
+// 6b513a42e1a1b5962532a019b7fc91eaa044ad5e
diff --git a/include/linux/atomic/atomic-long.h b/include/linux/atomic/atomic-long.h
new file mode 100644
index 000000000000..2fc51ba66beb
--- /dev/null
+++ b/include/linux/atomic/atomic-long.h
@@ -0,0 +1,1050 @@
+// SPDX-License-Identifier: GPL-2.0
+
+// Generated by scripts/atomic/gen-atomic-long.sh
+// DO NOT MODIFY THIS FILE DIRECTLY
+
+#ifndef _LINUX_ATOMIC_LONG_H
+#define _LINUX_ATOMIC_LONG_H
+
+#include <linux/compiler.h>
+#include <asm/types.h>
+
+#ifdef CONFIG_64BIT
+typedef atomic64_t atomic_long_t;
+#define ATOMIC_LONG_INIT(i) ATOMIC64_INIT(i)
+#define atomic_long_cond_read_acquire atomic64_cond_read_acquire
+#define atomic_long_cond_read_relaxed atomic64_cond_read_relaxed
+#else
+typedef atomic_t atomic_long_t;
+#define ATOMIC_LONG_INIT(i) ATOMIC_INIT(i)
+#define atomic_long_cond_read_acquire atomic_cond_read_acquire
+#define atomic_long_cond_read_relaxed atomic_cond_read_relaxed
+#endif
+
+#ifdef CONFIG_64BIT
+
+static __always_inline long
+arch_atomic_long_read(const atomic_long_t *v)
+{
+ return arch_atomic64_read(v);
+}
+
+static __always_inline long
+arch_atomic_long_read_acquire(const atomic_long_t *v)
+{
+ return arch_atomic64_read_acquire(v);
+}
+
+static __always_inline void
+arch_atomic_long_set(atomic_long_t *v, long i)
+{
+ arch_atomic64_set(v, i);
+}
+
+static __always_inline void
+arch_atomic_long_set_release(atomic_long_t *v, long i)
+{
+ arch_atomic64_set_release(v, i);
+}
+
+static __always_inline void
+arch_atomic_long_add(long i, atomic_long_t *v)
+{
+ arch_atomic64_add(i, v);
+}
+
+static __always_inline long
+arch_atomic_long_add_return(long i, atomic_long_t *v)
+{
+ return arch_atomic64_add_return(i, v);
+}
+
+static __always_inline long
+arch_atomic_long_add_return_acquire(long i, atomic_long_t *v)
+{
+ return arch_atomic64_add_return_acquire(i, v);
+}
+
+static __always_inline long
+arch_atomic_long_add_return_release(long i, atomic_long_t *v)
+{
+ return arch_atomic64_add_return_release(i, v);
+}
+
+static __always_inline long
+arch_atomic_long_add_return_relaxed(long i, atomic_long_t *v)
+{
+ return arch_atomic64_add_return_relaxed(i, v);
+}
+
+static __always_inline long
+arch_atomic_long_fetch_add(long i, atomic_long_t *v)
+{
+ return arch_atomic64_fetch_add(i, v);
+}
+
+static __always_inline long
+arch_atomic_long_fetch_add_acquire(long i, atomic_long_t *v)
+{
+ return arch_atomic64_fetch_add_acquire(i, v);
+}
+
+static __always_inline long
+arch_atomic_long_fetch_add_release(long i, atomic_long_t *v)
+{
+ return arch_atomic64_fetch_add_release(i, v);
+}
+
+static __always_inline long
+arch_atomic_long_fetch_add_relaxed(long i, atomic_long_t *v)
+{
+ return arch_atomic64_fetch_add_relaxed(i, v);
+}
+
+static __always_inline void
+arch_atomic_long_sub(long i, atomic_long_t *v)
+{
+ arch_atomic64_sub(i, v);
+}
+
+static __always_inline long
+arch_atomic_long_sub_return(long i, atomic_long_t *v)
+{
+ return arch_atomic64_sub_return(i, v);
+}
+
+static __always_inline long
+arch_atomic_long_sub_return_acquire(long i, atomic_long_t *v)
+{
+ return arch_atomic64_sub_return_acquire(i, v);
+}
+
+static __always_inline long
+arch_atomic_long_sub_return_release(long i, atomic_long_t *v)
+{
+ return arch_atomic64_sub_return_release(i, v);
+}
+
+static __always_inline long
+arch_atomic_long_sub_return_relaxed(long i, atomic_long_t *v)
+{
+ return arch_atomic64_sub_return_relaxed(i, v);
+}
+
+static __always_inline long
+arch_atomic_long_fetch_sub(long i, atomic_long_t *v)
+{
+ return arch_atomic64_fetch_sub(i, v);
+}
+
+static __always_inline long
+arch_atomic_long_fetch_sub_acquire(long i, atomic_long_t *v)
+{
+ return arch_atomic64_fetch_sub_acquire(i, v);
+}
+
+static __always_inline long
+arch_atomic_long_fetch_sub_release(long i, atomic_long_t *v)
+{
+ return arch_atomic64_fetch_sub_release(i, v);
+}
+
+static __always_inline long
+arch_atomic_long_fetch_sub_relaxed(long i, atomic_long_t *v)
+{
+ return arch_atomic64_fetch_sub_relaxed(i, v);
+}
+
+static __always_inline void
+arch_atomic_long_inc(atomic_long_t *v)
+{
+ arch_atomic64_inc(v);
+}
+
+static __always_inline long
+arch_atomic_long_inc_return(atomic_long_t *v)
+{
+ return arch_atomic64_inc_return(v);
+}
+
+static __always_inline long
+arch_atomic_long_inc_return_acquire(atomic_long_t *v)
+{
+ return arch_atomic64_inc_return_acquire(v);
+}
+
+static __always_inline long
+arch_atomic_long_inc_return_release(atomic_long_t *v)
+{
+ return arch_atomic64_inc_return_release(v);
+}
+
+static __always_inline long
+arch_atomic_long_inc_return_relaxed(atomic_long_t *v)
+{
+ return arch_atomic64_inc_return_relaxed(v);
+}
+
+static __always_inline long
+arch_atomic_long_fetch_inc(atomic_long_t *v)
+{
+ return arch_atomic64_fetch_inc(v);
+}
+
+static __always_inline long
+arch_atomic_long_fetch_inc_acquire(atomic_long_t *v)
+{
+ return arch_atomic64_fetch_inc_acquire(v);
+}
+
+static __always_inline long
+arch_atomic_long_fetch_inc_release(atomic_long_t *v)
+{
+ return arch_atomic64_fetch_inc_release(v);
+}
+
+static __always_inline long
+arch_atomic_long_fetch_inc_relaxed(atomic_long_t *v)
+{
+ return arch_atomic64_fetch_inc_relaxed(v);
+}
+
+static __always_inline void
+arch_atomic_long_dec(atomic_long_t *v)
+{
+ arch_atomic64_dec(v);
+}
+
+static __always_inline long
+arch_atomic_long_dec_return(atomic_long_t *v)
+{
+ return arch_atomic64_dec_return(v);
+}
+
+static __always_inline long
+arch_atomic_long_dec_return_acquire(atomic_long_t *v)
+{
+ return arch_atomic64_dec_return_acquire(v);
+}
+
+static __always_inline long
+arch_atomic_long_dec_return_release(atomic_long_t *v)
+{
+ return arch_atomic64_dec_return_release(v);
+}
+
+static __always_inline long
+arch_atomic_long_dec_return_relaxed(atomic_long_t *v)
+{
+ return arch_atomic64_dec_return_relaxed(v);
+}
+
+static __always_inline long
+arch_atomic_long_fetch_dec(atomic_long_t *v)
+{
+ return arch_atomic64_fetch_dec(v);
+}
+
+static __always_inline long
+arch_atomic_long_fetch_dec_acquire(atomic_long_t *v)
+{
+ return arch_atomic64_fetch_dec_acquire(v);
+}
+
+static __always_inline long
+arch_atomic_long_fetch_dec_release(atomic_long_t *v)
+{
+ return arch_atomic64_fetch_dec_release(v);
+}
+
+static __always_inline long
+arch_atomic_long_fetch_dec_relaxed(atomic_long_t *v)
+{
+ return arch_atomic64_fetch_dec_relaxed(v);
+}
+
+static __always_inline void
+arch_atomic_long_and(long i, atomic_long_t *v)
+{
+ arch_atomic64_and(i, v);
+}
+
+static __always_inline long
+arch_atomic_long_fetch_and(long i, atomic_long_t *v)
+{
+ return arch_atomic64_fetch_and(i, v);
+}
+
+static __always_inline long
+arch_atomic_long_fetch_and_acquire(long i, atomic_long_t *v)
+{
+ return arch_atomic64_fetch_and_acquire(i, v);
+}
+
+static __always_inline long
+arch_atomic_long_fetch_and_release(long i, atomic_long_t *v)
+{
+ return arch_atomic64_fetch_and_release(i, v);
+}
+
+static __always_inline long
+arch_atomic_long_fetch_and_relaxed(long i, atomic_long_t *v)
+{
+ return arch_atomic64_fetch_and_relaxed(i, v);
+}
+
+static __always_inline void
+arch_atomic_long_andnot(long i, atomic_long_t *v)
+{
+ arch_atomic64_andnot(i, v);
+}
+
+static __always_inline long
+arch_atomic_long_fetch_andnot(long i, atomic_long_t *v)
+{
+ return arch_atomic64_fetch_andnot(i, v);
+}
+
+static __always_inline long
+arch_atomic_long_fetch_andnot_acquire(long i, atomic_long_t *v)
+{
+ return arch_atomic64_fetch_andnot_acquire(i, v);
+}
+
+static __always_inline long
+arch_atomic_long_fetch_andnot_release(long i, atomic_long_t *v)
+{
+ return arch_atomic64_fetch_andnot_release(i, v);
+}
+
+static __always_inline long
+arch_atomic_long_fetch_andnot_relaxed(long i, atomic_long_t *v)
+{
+ return arch_atomic64_fetch_andnot_relaxed(i, v);
+}
+
+static __always_inline void
+arch_atomic_long_or(long i, atomic_long_t *v)
+{
+ arch_atomic64_or(i, v);
+}
+
+static __always_inline long
+arch_atomic_long_fetch_or(long i, atomic_long_t *v)
+{
+ return arch_atomic64_fetch_or(i, v);
+}
+
+static __always_inline long
+arch_atomic_long_fetch_or_acquire(long i, atomic_long_t *v)
+{
+ return arch_atomic64_fetch_or_acquire(i, v);
+}
+
+static __always_inline long
+arch_atomic_long_fetch_or_release(long i, atomic_long_t *v)
+{
+ return arch_atomic64_fetch_or_release(i, v);
+}
+
+static __always_inline long
+arch_atomic_long_fetch_or_relaxed(long i, atomic_long_t *v)
+{
+ return arch_atomic64_fetch_or_relaxed(i, v);
+}
+
+static __always_inline void
+arch_atomic_long_xor(long i, atomic_long_t *v)
+{
+ arch_atomic64_xor(i, v);
+}
+
+static __always_inline long
+arch_atomic_long_fetch_xor(long i, atomic_long_t *v)
+{
+ return arch_atomic64_fetch_xor(i, v);
+}
+
+static __always_inline long
+arch_atomic_long_fetch_xor_acquire(long i, atomic_long_t *v)
+{
+ return arch_atomic64_fetch_xor_acquire(i, v);
+}
+
+static __always_inline long
+arch_atomic_long_fetch_xor_release(long i, atomic_long_t *v)
+{
+ return arch_atomic64_fetch_xor_release(i, v);
+}
+
+static __always_inline long
+arch_atomic_long_fetch_xor_relaxed(long i, atomic_long_t *v)
+{
+ return arch_atomic64_fetch_xor_relaxed(i, v);
+}
+
+static __always_inline long
+arch_atomic_long_xchg(atomic_long_t *v, long i)
+{
+ return arch_atomic64_xchg(v, i);
+}
+
+static __always_inline long
+arch_atomic_long_xchg_acquire(atomic_long_t *v, long i)
+{
+ return arch_atomic64_xchg_acquire(v, i);
+}
+
+static __always_inline long
+arch_atomic_long_xchg_release(atomic_long_t *v, long i)
+{
+ return arch_atomic64_xchg_release(v, i);
+}
+
+static __always_inline long
+arch_atomic_long_xchg_relaxed(atomic_long_t *v, long i)
+{
+ return arch_atomic64_xchg_relaxed(v, i);
+}
+
+static __always_inline long
+arch_atomic_long_cmpxchg(atomic_long_t *v, long old, long new)
+{
+ return arch_atomic64_cmpxchg(v, old, new);
+}
+
+static __always_inline long
+arch_atomic_long_cmpxchg_acquire(atomic_long_t *v, long old, long new)
+{
+ return arch_atomic64_cmpxchg_acquire(v, old, new);
+}
+
+static __always_inline long
+arch_atomic_long_cmpxchg_release(atomic_long_t *v, long old, long new)
+{
+ return arch_atomic64_cmpxchg_release(v, old, new);
+}
+
+static __always_inline long
+arch_atomic_long_cmpxchg_relaxed(atomic_long_t *v, long old, long new)
+{
+ return arch_atomic64_cmpxchg_relaxed(v, old, new);
+}
+
+static __always_inline bool
+arch_atomic_long_try_cmpxchg(atomic_long_t *v, long *old, long new)
+{
+ return arch_atomic64_try_cmpxchg(v, (s64 *)old, new);
+}
+
+static __always_inline bool
+arch_atomic_long_try_cmpxchg_acquire(atomic_long_t *v, long *old, long new)
+{
+ return arch_atomic64_try_cmpxchg_acquire(v, (s64 *)old, new);
+}
+
+static __always_inline bool
+arch_atomic_long_try_cmpxchg_release(atomic_long_t *v, long *old, long new)
+{
+ return arch_atomic64_try_cmpxchg_release(v, (s64 *)old, new);
+}
+
+static __always_inline bool
+arch_atomic_long_try_cmpxchg_relaxed(atomic_long_t *v, long *old, long new)
+{
+ return arch_atomic64_try_cmpxchg_relaxed(v, (s64 *)old, new);
+}
+
+static __always_inline bool
+arch_atomic_long_sub_and_test(long i, atomic_long_t *v)
+{
+ return arch_atomic64_sub_and_test(i, v);
+}
+
+static __always_inline bool
+arch_atomic_long_dec_and_test(atomic_long_t *v)
+{
+ return arch_atomic64_dec_and_test(v);
+}
+
+static __always_inline bool
+arch_atomic_long_inc_and_test(atomic_long_t *v)
+{
+ return arch_atomic64_inc_and_test(v);
+}
+
+static __always_inline bool
+arch_atomic_long_add_negative(long i, atomic_long_t *v)
+{
+ return arch_atomic64_add_negative(i, v);
+}
+
+static __always_inline bool
+arch_atomic_long_add_negative_acquire(long i, atomic_long_t *v)
+{
+ return arch_atomic64_add_negative_acquire(i, v);
+}
+
+static __always_inline bool
+arch_atomic_long_add_negative_release(long i, atomic_long_t *v)
+{
+ return arch_atomic64_add_negative_release(i, v);
+}
+
+static __always_inline bool
+arch_atomic_long_add_negative_relaxed(long i, atomic_long_t *v)
+{
+ return arch_atomic64_add_negative_relaxed(i, v);
+}
+
+static __always_inline long
+arch_atomic_long_fetch_add_unless(atomic_long_t *v, long a, long u)
+{
+ return arch_atomic64_fetch_add_unless(v, a, u);
+}
+
+static __always_inline bool
+arch_atomic_long_add_unless(atomic_long_t *v, long a, long u)
+{
+ return arch_atomic64_add_unless(v, a, u);
+}
+
+static __always_inline bool
+arch_atomic_long_inc_not_zero(atomic_long_t *v)
+{
+ return arch_atomic64_inc_not_zero(v);
+}
+
+static __always_inline bool
+arch_atomic_long_inc_unless_negative(atomic_long_t *v)
+{
+ return arch_atomic64_inc_unless_negative(v);
+}
+
+static __always_inline bool
+arch_atomic_long_dec_unless_positive(atomic_long_t *v)
+{
+ return arch_atomic64_dec_unless_positive(v);
+}
+
+static __always_inline long
+arch_atomic_long_dec_if_positive(atomic_long_t *v)
+{
+ return arch_atomic64_dec_if_positive(v);
+}
+
+#else /* CONFIG_64BIT */
+
+static __always_inline long
+arch_atomic_long_read(const atomic_long_t *v)
+{
+ return arch_atomic_read(v);
+}
+
+static __always_inline long
+arch_atomic_long_read_acquire(const atomic_long_t *v)
+{
+ return arch_atomic_read_acquire(v);
+}
+
+static __always_inline void
+arch_atomic_long_set(atomic_long_t *v, long i)
+{
+ arch_atomic_set(v, i);
+}
+
+static __always_inline void
+arch_atomic_long_set_release(atomic_long_t *v, long i)
+{
+ arch_atomic_set_release(v, i);
+}
+
+static __always_inline void
+arch_atomic_long_add(long i, atomic_long_t *v)
+{
+ arch_atomic_add(i, v);
+}
+
+static __always_inline long
+arch_atomic_long_add_return(long i, atomic_long_t *v)
+{
+ return arch_atomic_add_return(i, v);
+}
+
+static __always_inline long
+arch_atomic_long_add_return_acquire(long i, atomic_long_t *v)
+{
+ return arch_atomic_add_return_acquire(i, v);
+}
+
+static __always_inline long
+arch_atomic_long_add_return_release(long i, atomic_long_t *v)
+{
+ return arch_atomic_add_return_release(i, v);
+}
+
+static __always_inline long
+arch_atomic_long_add_return_relaxed(long i, atomic_long_t *v)
+{
+ return arch_atomic_add_return_relaxed(i, v);
+}
+
+static __always_inline long
+arch_atomic_long_fetch_add(long i, atomic_long_t *v)
+{
+ return arch_atomic_fetch_add(i, v);
+}
+
+static __always_inline long
+arch_atomic_long_fetch_add_acquire(long i, atomic_long_t *v)
+{
+ return arch_atomic_fetch_add_acquire(i, v);
+}
+
+static __always_inline long
+arch_atomic_long_fetch_add_release(long i, atomic_long_t *v)
+{
+ return arch_atomic_fetch_add_release(i, v);
+}
+
+static __always_inline long
+arch_atomic_long_fetch_add_relaxed(long i, atomic_long_t *v)
+{
+ return arch_atomic_fetch_add_relaxed(i, v);
+}
+
+static __always_inline void
+arch_atomic_long_sub(long i, atomic_long_t *v)
+{
+ arch_atomic_sub(i, v);
+}
+
+static __always_inline long
+arch_atomic_long_sub_return(long i, atomic_long_t *v)
+{
+ return arch_atomic_sub_return(i, v);
+}
+
+static __always_inline long
+arch_atomic_long_sub_return_acquire(long i, atomic_long_t *v)
+{
+ return arch_atomic_sub_return_acquire(i, v);
+}
+
+static __always_inline long
+arch_atomic_long_sub_return_release(long i, atomic_long_t *v)
+{
+ return arch_atomic_sub_return_release(i, v);
+}
+
+static __always_inline long
+arch_atomic_long_sub_return_relaxed(long i, atomic_long_t *v)
+{
+ return arch_atomic_sub_return_relaxed(i, v);
+}
+
+static __always_inline long
+arch_atomic_long_fetch_sub(long i, atomic_long_t *v)
+{
+ return arch_atomic_fetch_sub(i, v);
+}
+
+static __always_inline long
+arch_atomic_long_fetch_sub_acquire(long i, atomic_long_t *v)
+{
+ return arch_atomic_fetch_sub_acquire(i, v);
+}
+
+static __always_inline long
+arch_atomic_long_fetch_sub_release(long i, atomic_long_t *v)
+{
+ return arch_atomic_fetch_sub_release(i, v);
+}
+
+static __always_inline long
+arch_atomic_long_fetch_sub_relaxed(long i, atomic_long_t *v)
+{
+ return arch_atomic_fetch_sub_relaxed(i, v);
+}
+
+static __always_inline void
+arch_atomic_long_inc(atomic_long_t *v)
+{
+ arch_atomic_inc(v);
+}
+
+static __always_inline long
+arch_atomic_long_inc_return(atomic_long_t *v)
+{
+ return arch_atomic_inc_return(v);
+}
+
+static __always_inline long
+arch_atomic_long_inc_return_acquire(atomic_long_t *v)
+{
+ return arch_atomic_inc_return_acquire(v);
+}
+
+static __always_inline long
+arch_atomic_long_inc_return_release(atomic_long_t *v)
+{
+ return arch_atomic_inc_return_release(v);
+}
+
+static __always_inline long
+arch_atomic_long_inc_return_relaxed(atomic_long_t *v)
+{
+ return arch_atomic_inc_return_relaxed(v);
+}
+
+static __always_inline long
+arch_atomic_long_fetch_inc(atomic_long_t *v)
+{
+ return arch_atomic_fetch_inc(v);
+}
+
+static __always_inline long
+arch_atomic_long_fetch_inc_acquire(atomic_long_t *v)
+{
+ return arch_atomic_fetch_inc_acquire(v);
+}
+
+static __always_inline long
+arch_atomic_long_fetch_inc_release(atomic_long_t *v)
+{
+ return arch_atomic_fetch_inc_release(v);
+}
+
+static __always_inline long
+arch_atomic_long_fetch_inc_relaxed(atomic_long_t *v)
+{
+ return arch_atomic_fetch_inc_relaxed(v);
+}
+
+static __always_inline void
+arch_atomic_long_dec(atomic_long_t *v)
+{
+ arch_atomic_dec(v);
+}
+
+static __always_inline long
+arch_atomic_long_dec_return(atomic_long_t *v)
+{
+ return arch_atomic_dec_return(v);
+}
+
+static __always_inline long
+arch_atomic_long_dec_return_acquire(atomic_long_t *v)
+{
+ return arch_atomic_dec_return_acquire(v);
+}
+
+static __always_inline long
+arch_atomic_long_dec_return_release(atomic_long_t *v)
+{
+ return arch_atomic_dec_return_release(v);
+}
+
+static __always_inline long
+arch_atomic_long_dec_return_relaxed(atomic_long_t *v)
+{
+ return arch_atomic_dec_return_relaxed(v);
+}
+
+static __always_inline long
+arch_atomic_long_fetch_dec(atomic_long_t *v)
+{
+ return arch_atomic_fetch_dec(v);
+}
+
+static __always_inline long
+arch_atomic_long_fetch_dec_acquire(atomic_long_t *v)
+{
+ return arch_atomic_fetch_dec_acquire(v);
+}
+
+static __always_inline long
+arch_atomic_long_fetch_dec_release(atomic_long_t *v)
+{
+ return arch_atomic_fetch_dec_release(v);
+}
+
+static __always_inline long
+arch_atomic_long_fetch_dec_relaxed(atomic_long_t *v)
+{
+ return arch_atomic_fetch_dec_relaxed(v);
+}
+
+static __always_inline void
+arch_atomic_long_and(long i, atomic_long_t *v)
+{
+ arch_atomic_and(i, v);
+}
+
+static __always_inline long
+arch_atomic_long_fetch_and(long i, atomic_long_t *v)
+{
+ return arch_atomic_fetch_and(i, v);
+}
+
+static __always_inline long
+arch_atomic_long_fetch_and_acquire(long i, atomic_long_t *v)
+{
+ return arch_atomic_fetch_and_acquire(i, v);
+}
+
+static __always_inline long
+arch_atomic_long_fetch_and_release(long i, atomic_long_t *v)
+{
+ return arch_atomic_fetch_and_release(i, v);
+}
+
+static __always_inline long
+arch_atomic_long_fetch_and_relaxed(long i, atomic_long_t *v)
+{
+ return arch_atomic_fetch_and_relaxed(i, v);
+}
+
+static __always_inline void
+arch_atomic_long_andnot(long i, atomic_long_t *v)
+{
+ arch_atomic_andnot(i, v);
+}
+
+static __always_inline long
+arch_atomic_long_fetch_andnot(long i, atomic_long_t *v)
+{
+ return arch_atomic_fetch_andnot(i, v);
+}
+
+static __always_inline long
+arch_atomic_long_fetch_andnot_acquire(long i, atomic_long_t *v)
+{
+ return arch_atomic_fetch_andnot_acquire(i, v);
+}
+
+static __always_inline long
+arch_atomic_long_fetch_andnot_release(long i, atomic_long_t *v)
+{
+ return arch_atomic_fetch_andnot_release(i, v);
+}
+
+static __always_inline long
+arch_atomic_long_fetch_andnot_relaxed(long i, atomic_long_t *v)
+{
+ return arch_atomic_fetch_andnot_relaxed(i, v);
+}
+
+static __always_inline void
+arch_atomic_long_or(long i, atomic_long_t *v)
+{
+ arch_atomic_or(i, v);
+}
+
+static __always_inline long
+arch_atomic_long_fetch_or(long i, atomic_long_t *v)
+{
+ return arch_atomic_fetch_or(i, v);
+}
+
+static __always_inline long
+arch_atomic_long_fetch_or_acquire(long i, atomic_long_t *v)
+{
+ return arch_atomic_fetch_or_acquire(i, v);
+}
+
+static __always_inline long
+arch_atomic_long_fetch_or_release(long i, atomic_long_t *v)
+{
+ return arch_atomic_fetch_or_release(i, v);
+}
+
+static __always_inline long
+arch_atomic_long_fetch_or_relaxed(long i, atomic_long_t *v)
+{
+ return arch_atomic_fetch_or_relaxed(i, v);
+}
+
+static __always_inline void
+arch_atomic_long_xor(long i, atomic_long_t *v)
+{
+ arch_atomic_xor(i, v);
+}
+
+static __always_inline long
+arch_atomic_long_fetch_xor(long i, atomic_long_t *v)
+{
+ return arch_atomic_fetch_xor(i, v);
+}
+
+static __always_inline long
+arch_atomic_long_fetch_xor_acquire(long i, atomic_long_t *v)
+{
+ return arch_atomic_fetch_xor_acquire(i, v);
+}
+
+static __always_inline long
+arch_atomic_long_fetch_xor_release(long i, atomic_long_t *v)
+{
+ return arch_atomic_fetch_xor_release(i, v);
+}
+
+static __always_inline long
+arch_atomic_long_fetch_xor_relaxed(long i, atomic_long_t *v)
+{
+ return arch_atomic_fetch_xor_relaxed(i, v);
+}
+
+static __always_inline long
+arch_atomic_long_xchg(atomic_long_t *v, long i)
+{
+ return arch_atomic_xchg(v, i);
+}
+
+static __always_inline long
+arch_atomic_long_xchg_acquire(atomic_long_t *v, long i)
+{
+ return arch_atomic_xchg_acquire(v, i);
+}
+
+static __always_inline long
+arch_atomic_long_xchg_release(atomic_long_t *v, long i)
+{
+ return arch_atomic_xchg_release(v, i);
+}
+
+static __always_inline long
+arch_atomic_long_xchg_relaxed(atomic_long_t *v, long i)
+{
+ return arch_atomic_xchg_relaxed(v, i);
+}
+
+static __always_inline long
+arch_atomic_long_cmpxchg(atomic_long_t *v, long old, long new)
+{
+ return arch_atomic_cmpxchg(v, old, new);
+}
+
+static __always_inline long
+arch_atomic_long_cmpxchg_acquire(atomic_long_t *v, long old, long new)
+{
+ return arch_atomic_cmpxchg_acquire(v, old, new);
+}
+
+static __always_inline long
+arch_atomic_long_cmpxchg_release(atomic_long_t *v, long old, long new)
+{
+ return arch_atomic_cmpxchg_release(v, old, new);
+}
+
+static __always_inline long
+arch_atomic_long_cmpxchg_relaxed(atomic_long_t *v, long old, long new)
+{
+ return arch_atomic_cmpxchg_relaxed(v, old, new);
+}
+
+static __always_inline bool
+arch_atomic_long_try_cmpxchg(atomic_long_t *v, long *old, long new)
+{
+ return arch_atomic_try_cmpxchg(v, (int *)old, new);
+}
+
+static __always_inline bool
+arch_atomic_long_try_cmpxchg_acquire(atomic_long_t *v, long *old, long new)
+{
+ return arch_atomic_try_cmpxchg_acquire(v, (int *)old, new);
+}
+
+static __always_inline bool
+arch_atomic_long_try_cmpxchg_release(atomic_long_t *v, long *old, long new)
+{
+ return arch_atomic_try_cmpxchg_release(v, (int *)old, new);
+}
+
+static __always_inline bool
+arch_atomic_long_try_cmpxchg_relaxed(atomic_long_t *v, long *old, long new)
+{
+ return arch_atomic_try_cmpxchg_relaxed(v, (int *)old, new);
+}
+
+static __always_inline bool
+arch_atomic_long_sub_and_test(long i, atomic_long_t *v)
+{
+ return arch_atomic_sub_and_test(i, v);
+}
+
+static __always_inline bool
+arch_atomic_long_dec_and_test(atomic_long_t *v)
+{
+ return arch_atomic_dec_and_test(v);
+}
+
+static __always_inline bool
+arch_atomic_long_inc_and_test(atomic_long_t *v)
+{
+ return arch_atomic_inc_and_test(v);
+}
+
+static __always_inline bool
+arch_atomic_long_add_negative(long i, atomic_long_t *v)
+{
+ return arch_atomic_add_negative(i, v);
+}
+
+static __always_inline bool
+arch_atomic_long_add_negative_acquire(long i, atomic_long_t *v)
+{
+ return arch_atomic_add_negative_acquire(i, v);
+}
+
+static __always_inline bool
+arch_atomic_long_add_negative_release(long i, atomic_long_t *v)
+{
+ return arch_atomic_add_negative_release(i, v);
+}
+
+static __always_inline bool
+arch_atomic_long_add_negative_relaxed(long i, atomic_long_t *v)
+{
+ return arch_atomic_add_negative_relaxed(i, v);
+}
+
+static __always_inline long
+arch_atomic_long_fetch_add_unless(atomic_long_t *v, long a, long u)
+{
+ return arch_atomic_fetch_add_unless(v, a, u);
+}
+
+static __always_inline bool
+arch_atomic_long_add_unless(atomic_long_t *v, long a, long u)
+{
+ return arch_atomic_add_unless(v, a, u);
+}
+
+static __always_inline bool
+arch_atomic_long_inc_not_zero(atomic_long_t *v)
+{
+ return arch_atomic_inc_not_zero(v);
+}
+
+static __always_inline bool
+arch_atomic_long_inc_unless_negative(atomic_long_t *v)
+{
+ return arch_atomic_inc_unless_negative(v);
+}
+
+static __always_inline bool
+arch_atomic_long_dec_unless_positive(atomic_long_t *v)
+{
+ return arch_atomic_dec_unless_positive(v);
+}
+
+static __always_inline long
+arch_atomic_long_dec_if_positive(atomic_long_t *v)
+{
+ return arch_atomic_dec_if_positive(v);
+}
+
+#endif /* CONFIG_64BIT */
+#endif /* _LINUX_ATOMIC_LONG_H */
+// a194c07d7d2f4b0e178d3c118c919775d5d65f50
diff --git a/include/linux/attribute_container.h b/include/linux/attribute_container.h
index 896c6892f327..e4004d1e6725 100644
--- a/include/linux/attribute_container.h
+++ b/include/linux/attribute_container.h
@@ -1,9 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* attribute_container.h - a generic container for all classes
*
* Copyright (c) 2005 - James Bottomley <James.Bottomley@steeleye.com>
- *
- * This file is licensed under GPLv2
*/
#ifndef _ATTRIBUTE_CONTAINER_H_
@@ -55,6 +54,13 @@ void attribute_container_device_trigger(struct device *dev,
int (*fn)(struct attribute_container *,
struct device *,
struct device *));
+int attribute_container_device_trigger_safe(struct device *dev,
+ int (*fn)(struct attribute_container *,
+ struct device *,
+ struct device *),
+ int (*undo)(struct attribute_container *,
+ struct device *,
+ struct device *));
void attribute_container_trigger(struct device *dev,
int (*fn)(struct attribute_container *,
struct device *));
diff --git a/include/linux/audit.h b/include/linux/audit.h
index 2150bdccfbab..31086a72e32a 100644
--- a/include/linux/audit.h
+++ b/include/linux/audit.h
@@ -1,31 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/* audit.h -- Auditing support
*
* Copyright 2003-2004 Red Hat Inc., Durham, North Carolina.
* All Rights Reserved.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
* Written by Rickard E. (Rik) Faith <faith@redhat.com>
- *
*/
#ifndef _LINUX_AUDIT_H_
#define _LINUX_AUDIT_H_
#include <linux/sched.h>
#include <linux/ptrace.h>
+#include <linux/audit_arch.h>
#include <uapi/linux/audit.h>
+#include <uapi/linux/netfilter/nf_tables.h>
+#include <uapi/linux/fanotify.h>
#define AUDIT_INO_UNSET ((unsigned long)-1)
#define AUDIT_DEV_UNSET ((dev_t)-1)
@@ -33,7 +22,7 @@
struct audit_sig_info {
uid_t uid;
pid_t pid;
- char ctx[0];
+ char ctx[];
};
struct audit_buffer;
@@ -85,7 +74,51 @@ struct audit_field {
u32 op;
};
-extern int is_audit_feature_set(int which);
+enum audit_ntp_type {
+ AUDIT_NTP_OFFSET,
+ AUDIT_NTP_FREQ,
+ AUDIT_NTP_STATUS,
+ AUDIT_NTP_TAI,
+ AUDIT_NTP_TICK,
+ AUDIT_NTP_ADJUST,
+
+ AUDIT_NTP_NVALS /* count */
+};
+
+#ifdef CONFIG_AUDITSYSCALL
+struct audit_ntp_val {
+ long long oldval, newval;
+};
+
+struct audit_ntp_data {
+ struct audit_ntp_val vals[AUDIT_NTP_NVALS];
+};
+#else
+struct audit_ntp_data {};
+#endif
+
+enum audit_nfcfgop {
+ AUDIT_XT_OP_REGISTER,
+ AUDIT_XT_OP_REPLACE,
+ AUDIT_XT_OP_UNREGISTER,
+ AUDIT_NFT_OP_TABLE_REGISTER,
+ AUDIT_NFT_OP_TABLE_UNREGISTER,
+ AUDIT_NFT_OP_CHAIN_REGISTER,
+ AUDIT_NFT_OP_CHAIN_UNREGISTER,
+ AUDIT_NFT_OP_RULE_REGISTER,
+ AUDIT_NFT_OP_RULE_UNREGISTER,
+ AUDIT_NFT_OP_SET_REGISTER,
+ AUDIT_NFT_OP_SET_UNREGISTER,
+ AUDIT_NFT_OP_SETELEM_REGISTER,
+ AUDIT_NFT_OP_SETELEM_UNREGISTER,
+ AUDIT_NFT_OP_GEN_REGISTER,
+ AUDIT_NFT_OP_OBJ_REGISTER,
+ AUDIT_NFT_OP_OBJ_UNREGISTER,
+ AUDIT_NFT_OP_OBJ_RESET,
+ AUDIT_NFT_OP_FLOWTABLE_REGISTER,
+ AUDIT_NFT_OP_FLOWTABLE_UNREGISTER,
+ AUDIT_NFT_OP_INVALID,
+};
extern int __init audit_register_class(int class, unsigned *list);
extern int audit_classify_syscall(int abi, unsigned syscall);
@@ -115,8 +148,9 @@ extern int audit_classify_compat_syscall(int abi, unsigned syscall);
struct filename;
-extern void audit_log_session_info(struct audit_buffer *ab);
-
+#define AUDIT_OFF 0
+#define AUDIT_ON 1
+#define AUDIT_LOCKED 2
#ifdef CONFIG_AUDIT
/* These are defined in audit.c */
/* Public API */
@@ -146,19 +180,12 @@ extern void audit_log_d_path(struct audit_buffer *ab,
const struct path *path);
extern void audit_log_key(struct audit_buffer *ab,
char *key);
-extern void audit_log_link_denied(const char *operation,
- const struct path *link);
+extern void audit_log_path_denied(int type,
+ const char *operation);
extern void audit_log_lost(const char *message);
-#ifdef CONFIG_SECURITY
-extern void audit_log_secctx(struct audit_buffer *ab, u32 secid);
-#else
-static inline void audit_log_secctx(struct audit_buffer *ab, u32 secid)
-{ }
-#endif
extern int audit_log_task_context(struct audit_buffer *ab);
-extern void audit_log_task_info(struct audit_buffer *ab,
- struct task_struct *tsk);
+extern void audit_log_task_info(struct audit_buffer *ab);
extern int audit_update_lsm_rules(void);
@@ -166,7 +193,22 @@ extern int audit_update_lsm_rules(void);
extern int audit_rule_change(int type, int seq, void *data, size_t datasz);
extern int audit_list_rules_send(struct sk_buff *request_skb, int seq);
+extern int audit_set_loginuid(kuid_t loginuid);
+
+static inline kuid_t audit_get_loginuid(struct task_struct *tsk)
+{
+ return tsk->loginuid;
+}
+
+static inline unsigned int audit_get_sessionid(struct task_struct *tsk)
+{
+ return tsk->sessionid;
+}
+
extern u32 audit_enabled;
+
+extern int audit_signal_info(int sig, struct task_struct *t);
+
#else /* CONFIG_AUDIT */
static inline __printf(4, 5)
void audit_log(struct audit_context *ctx, gfp_t gfp_mask, int type,
@@ -200,19 +242,32 @@ static inline void audit_log_d_path(struct audit_buffer *ab,
{ }
static inline void audit_log_key(struct audit_buffer *ab, char *key)
{ }
-static inline void audit_log_link_denied(const char *string,
- const struct path *link)
-{ }
-static inline void audit_log_secctx(struct audit_buffer *ab, u32 secid)
+static inline void audit_log_path_denied(int type, const char *operation)
{ }
static inline int audit_log_task_context(struct audit_buffer *ab)
{
return 0;
}
-static inline void audit_log_task_info(struct audit_buffer *ab,
- struct task_struct *tsk)
+static inline void audit_log_task_info(struct audit_buffer *ab)
{ }
-#define audit_enabled 0
+
+static inline kuid_t audit_get_loginuid(struct task_struct *tsk)
+{
+ return INVALID_UID;
+}
+
+static inline unsigned int audit_get_sessionid(struct task_struct *tsk)
+{
+ return AUDIT_SID_UNSET;
+}
+
+#define audit_enabled AUDIT_OFF
+
+static inline int audit_signal_info(int sig, struct task_struct *t)
+{
+ return 0;
+}
+
#endif /* CONFIG_AUDIT */
#ifdef CONFIG_AUDIT_COMPAT_GENERIC
@@ -221,6 +276,10 @@ static inline void audit_log_task_info(struct audit_buffer *ab,
#define audit_is_compat(arch) false
#endif
+#define AUDIT_INODE_PARENT 1 /* dentry represents the parent */
+#define AUDIT_INODE_HIDDEN 2 /* audit record should be hidden */
+#define AUDIT_INODE_NOEVAL 4 /* audit record incomplete */
+
#ifdef CONFIG_AUDITSYSCALL
#include <asm/syscall.h> /* for syscall_get_arch() */
@@ -228,26 +287,37 @@ static inline void audit_log_task_info(struct audit_buffer *ab,
/* Public API */
extern int audit_alloc(struct task_struct *task);
extern void __audit_free(struct task_struct *task);
+extern void __audit_uring_entry(u8 op);
+extern void __audit_uring_exit(int success, long code);
extern void __audit_syscall_entry(int major, unsigned long a0, unsigned long a1,
unsigned long a2, unsigned long a3);
extern void __audit_syscall_exit(int ret_success, long ret_value);
extern struct filename *__audit_reusename(const __user char *uptr);
extern void __audit_getname(struct filename *name);
-
-#define AUDIT_INODE_PARENT 1 /* dentry represents the parent */
-#define AUDIT_INODE_HIDDEN 2 /* audit record should be hidden */
extern void __audit_inode(struct filename *name, const struct dentry *dentry,
unsigned int flags);
extern void __audit_file(const struct file *);
extern void __audit_inode_child(struct inode *parent,
const struct dentry *dentry,
const unsigned char type);
-extern void __audit_seccomp(unsigned long syscall, long signr, int code);
+extern void audit_seccomp(unsigned long syscall, long signr, int code);
+extern void audit_seccomp_actions_logged(const char *names,
+ const char *old_names, int res);
extern void __audit_ptrace(struct task_struct *t);
+static inline void audit_set_context(struct task_struct *task, struct audit_context *ctx)
+{
+ task->audit_context = ctx;
+}
+
+static inline struct audit_context *audit_context(void)
+{
+ return current->audit_context;
+}
+
static inline bool audit_dummy_context(void)
{
- void *p = current->audit_context;
+ void *p = audit_context();
return !p || *(int *)p;
}
static inline void audit_free(struct task_struct *task)
@@ -255,16 +325,31 @@ static inline void audit_free(struct task_struct *task)
if (unlikely(task->audit_context))
__audit_free(task);
}
+static inline void audit_uring_entry(u8 op)
+{
+ /*
+ * We intentionally check audit_context() before audit_enabled as most
+ * Linux systems (as of ~2021) rely on systemd which forces audit to
+ * be enabled regardless of the user's audit configuration.
+ */
+ if (unlikely(audit_context() && audit_enabled))
+ __audit_uring_entry(op);
+}
+static inline void audit_uring_exit(int success, long code)
+{
+ if (unlikely(audit_context()))
+ __audit_uring_exit(success, code);
+}
static inline void audit_syscall_entry(int major, unsigned long a0,
unsigned long a1, unsigned long a2,
unsigned long a3)
{
- if (unlikely(current->audit_context))
+ if (unlikely(audit_context()))
__audit_syscall_entry(major, a0, a1, a2, a3);
}
static inline void audit_syscall_exit(void *pt_regs)
{
- if (unlikely(current->audit_context)) {
+ if (unlikely(audit_context())) {
int success = is_syscall_success(pt_regs);
long return_code = regs_return_value(pt_regs);
@@ -284,13 +369,9 @@ static inline void audit_getname(struct filename *name)
}
static inline void audit_inode(struct filename *name,
const struct dentry *dentry,
- unsigned int parent) {
- if (unlikely(!audit_dummy_context())) {
- unsigned int flags = 0;
- if (parent)
- flags |= AUDIT_INODE_PARENT;
- __audit_inode(name, dentry, flags);
- }
+ unsigned int aflags) {
+ if (unlikely(!audit_dummy_context()))
+ __audit_inode(name, dentry, aflags);
}
static inline void audit_file(struct file *file)
{
@@ -312,16 +393,6 @@ static inline void audit_inode_child(struct inode *parent,
}
void audit_core_dumps(long signr);
-static inline void audit_seccomp(unsigned long syscall, long signr, int code)
-{
- if (!audit_enabled)
- return;
-
- /* Force a record to be reported if a signal was delivered. */
- if (signr || unlikely(!audit_dummy_context()))
- __audit_seccomp(syscall, signr, code);
-}
-
static inline void audit_ptrace(struct task_struct *t)
{
if (unlikely(!audit_dummy_context()))
@@ -329,21 +400,6 @@ static inline void audit_ptrace(struct task_struct *t)
}
/* Private API (for audit.c only) */
-extern unsigned int audit_serial(void);
-extern int auditsc_get_stamp(struct audit_context *ctx,
- struct timespec64 *t, unsigned int *serial);
-extern int audit_set_loginuid(kuid_t loginuid);
-
-static inline kuid_t audit_get_loginuid(struct task_struct *tsk)
-{
- return tsk->loginuid;
-}
-
-static inline unsigned int audit_get_sessionid(struct task_struct *tsk)
-{
- return tsk->sessionid;
-}
-
extern void __audit_ipc_obj(struct kern_ipc_perm *ipcp);
extern void __audit_ipc_set_perm(unsigned long qbytes, uid_t uid, gid_t gid, umode_t mode);
extern void __audit_bprm(struct linux_binprm *bprm);
@@ -351,7 +407,7 @@ extern int __audit_socketcall(int nargs, unsigned long *args);
extern int __audit_sockaddr(int len, void *addr);
extern void __audit_fd_pair(int fd1, int fd2);
extern void __audit_mq_open(int oflag, umode_t mode, struct mq_attr *attr);
-extern void __audit_mq_sendrecv(mqd_t mqdes, size_t msg_len, unsigned int msg_prio, const struct timespec *abs_timeout);
+extern void __audit_mq_sendrecv(mqd_t mqdes, size_t msg_len, unsigned int msg_prio, const struct timespec64 *abs_timeout);
extern void __audit_mq_notify(mqd_t mqdes, const struct sigevent *notification);
extern void __audit_mq_getsetattr(mqd_t mqdes, struct mq_attr *mqstat);
extern int __audit_log_bprm_fcaps(struct linux_binprm *bprm,
@@ -359,7 +415,13 @@ extern int __audit_log_bprm_fcaps(struct linux_binprm *bprm,
const struct cred *old);
extern void __audit_log_capset(const struct cred *new, const struct cred *old);
extern void __audit_mmap_fd(int fd, int flags);
+extern void __audit_openat2_how(struct open_how *how);
extern void __audit_log_kern_module(char *name);
+extern void __audit_fanotify(u32 response, struct fanotify_response_info_audit_rule *friar);
+extern void __audit_tk_injoffset(struct timespec64 offset);
+extern void __audit_ntp_log(const struct audit_ntp_data *ad);
+extern void __audit_log_nfcfg(const char *name, u8 af, unsigned int nentries,
+ enum audit_nfcfgop op, gfp_t gfp);
static inline void audit_ipc_obj(struct kern_ipc_perm *ipcp)
{
@@ -412,7 +474,7 @@ static inline void audit_mq_open(int oflag, umode_t mode, struct mq_attr *attr)
if (unlikely(!audit_dummy_context()))
__audit_mq_open(oflag, mode, attr);
}
-static inline void audit_mq_sendrecv(mqd_t mqdes, size_t msg_len, unsigned int msg_prio, const struct timespec *abs_timeout)
+static inline void audit_mq_sendrecv(mqd_t mqdes, size_t msg_len, unsigned int msg_prio, const struct timespec64 *abs_timeout)
{
if (unlikely(!audit_dummy_context()))
__audit_mq_sendrecv(mqdes, msg_len, msg_prio, abs_timeout);
@@ -450,12 +512,65 @@ static inline void audit_mmap_fd(int fd, int flags)
__audit_mmap_fd(fd, flags);
}
+static inline void audit_openat2_how(struct open_how *how)
+{
+ if (unlikely(!audit_dummy_context()))
+ __audit_openat2_how(how);
+}
+
static inline void audit_log_kern_module(char *name)
{
if (!audit_dummy_context())
__audit_log_kern_module(name);
}
+static inline void audit_fanotify(u32 response, struct fanotify_response_info_audit_rule *friar)
+{
+ if (!audit_dummy_context())
+ __audit_fanotify(response, friar);
+}
+
+static inline void audit_tk_injoffset(struct timespec64 offset)
+{
+ /* ignore no-op events */
+ if (offset.tv_sec == 0 && offset.tv_nsec == 0)
+ return;
+
+ if (!audit_dummy_context())
+ __audit_tk_injoffset(offset);
+}
+
+static inline void audit_ntp_init(struct audit_ntp_data *ad)
+{
+ memset(ad, 0, sizeof(*ad));
+}
+
+static inline void audit_ntp_set_old(struct audit_ntp_data *ad,
+ enum audit_ntp_type type, long long val)
+{
+ ad->vals[type].oldval = val;
+}
+
+static inline void audit_ntp_set_new(struct audit_ntp_data *ad,
+ enum audit_ntp_type type, long long val)
+{
+ ad->vals[type].newval = val;
+}
+
+static inline void audit_ntp_log(const struct audit_ntp_data *ad)
+{
+ if (!audit_dummy_context())
+ __audit_ntp_log(ad);
+}
+
+static inline void audit_log_nfcfg(const char *name, u8 af,
+ unsigned int nentries,
+ enum audit_nfcfgop op, gfp_t gfp)
+{
+ if (audit_enabled)
+ __audit_log_nfcfg(name, af, nentries, op, gfp);
+}
+
extern int audit_n_rules;
extern int audit_signals;
#else /* CONFIG_AUDITSYSCALL */
@@ -465,6 +580,10 @@ static inline int audit_alloc(struct task_struct *task)
}
static inline void audit_free(struct task_struct *task)
{ }
+static inline void audit_uring_entry(u8 op)
+{ }
+static inline void audit_uring_exit(int success, long code)
+{ }
static inline void audit_syscall_entry(int major, unsigned long a0,
unsigned long a1, unsigned long a2,
unsigned long a3)
@@ -475,23 +594,21 @@ static inline bool audit_dummy_context(void)
{
return true;
}
+static inline void audit_set_context(struct task_struct *task, struct audit_context *ctx)
+{ }
+static inline struct audit_context *audit_context(void)
+{
+ return NULL;
+}
static inline struct filename *audit_reusename(const __user char *name)
{
return NULL;
}
static inline void audit_getname(struct filename *name)
{ }
-static inline void __audit_inode(struct filename *name,
- const struct dentry *dentry,
- unsigned int flags)
-{ }
-static inline void __audit_inode_child(struct inode *parent,
- const struct dentry *dentry,
- const unsigned char type)
-{ }
static inline void audit_inode(struct filename *name,
const struct dentry *dentry,
- unsigned int parent)
+ unsigned int aflags)
{ }
static inline void audit_file(struct file *file)
{
@@ -505,23 +622,11 @@ static inline void audit_inode_child(struct inode *parent,
{ }
static inline void audit_core_dumps(long signr)
{ }
-static inline void __audit_seccomp(unsigned long syscall, long signr, int code)
-{ }
static inline void audit_seccomp(unsigned long syscall, long signr, int code)
{ }
-static inline int auditsc_get_stamp(struct audit_context *ctx,
- struct timespec64 *t, unsigned int *serial)
-{
- return 0;
-}
-static inline kuid_t audit_get_loginuid(struct task_struct *tsk)
-{
- return INVALID_UID;
-}
-static inline unsigned int audit_get_sessionid(struct task_struct *tsk)
-{
- return -1;
-}
+static inline void audit_seccomp_actions_logged(const char *names,
+ const char *old_names, int res)
+{ }
static inline void audit_ipc_obj(struct kern_ipc_perm *ipcp)
{ }
static inline void audit_ipc_set_perm(unsigned long qbytes, uid_t uid,
@@ -549,7 +654,7 @@ static inline void audit_mq_open(int oflag, umode_t mode, struct mq_attr *attr)
{ }
static inline void audit_mq_sendrecv(mqd_t mqdes, size_t msg_len,
unsigned int msg_prio,
- const struct timespec *abs_timeout)
+ const struct timespec64 *abs_timeout)
{ }
static inline void audit_mq_notify(mqd_t mqdes,
const struct sigevent *notification)
@@ -568,12 +673,41 @@ static inline void audit_log_capset(const struct cred *new,
static inline void audit_mmap_fd(int fd, int flags)
{ }
+static inline void audit_openat2_how(struct open_how *how)
+{ }
+
static inline void audit_log_kern_module(char *name)
{
}
+static inline void audit_fanotify(u32 response, struct fanotify_response_info_audit_rule *friar)
+{ }
+
+static inline void audit_tk_injoffset(struct timespec64 offset)
+{ }
+
+static inline void audit_ntp_init(struct audit_ntp_data *ad)
+{ }
+
+static inline void audit_ntp_set_old(struct audit_ntp_data *ad,
+ enum audit_ntp_type type, long long val)
+{ }
+
+static inline void audit_ntp_set_new(struct audit_ntp_data *ad,
+ enum audit_ntp_type type, long long val)
+{ }
+
+static inline void audit_ntp_log(const struct audit_ntp_data *ad)
+{ }
+
static inline void audit_ptrace(struct task_struct *t)
{ }
+
+static inline void audit_log_nfcfg(const char *name, u8 af,
+ unsigned int nentries,
+ enum audit_nfcfgop op, gfp_t gfp)
+{ }
+
#define audit_n_rules 0
#define audit_signals 0
#endif /* CONFIG_AUDITSYSCALL */
@@ -583,9 +717,4 @@ static inline bool audit_loginuid_set(struct task_struct *tsk)
return uid_valid(audit_get_loginuid(tsk));
}
-static inline void audit_log_string(struct audit_buffer *ab, const char *buf)
-{
- audit_log_n_string(ab, buf, strlen(buf));
-}
-
#endif
diff --git a/include/linux/audit_arch.h b/include/linux/audit_arch.h
new file mode 100644
index 000000000000..8fdb1afe251a
--- /dev/null
+++ b/include/linux/audit_arch.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/* audit_arch.h -- Arch layer specific support for audit
+ *
+ * Copyright 2021 Red Hat Inc., Durham, North Carolina.
+ * All Rights Reserved.
+ *
+ * Author: Richard Guy Briggs <rgb@redhat.com>
+ */
+#ifndef _LINUX_AUDIT_ARCH_H_
+#define _LINUX_AUDIT_ARCH_H_
+
+enum auditsc_class_t {
+ AUDITSC_NATIVE = 0,
+ AUDITSC_COMPAT,
+ AUDITSC_OPEN,
+ AUDITSC_OPENAT,
+ AUDITSC_SOCKETCALL,
+ AUDITSC_EXECVE,
+ AUDITSC_OPENAT2,
+
+ AUDITSC_NVALS /* count */
+};
+
+#endif
diff --git a/include/linux/auto_dev-ioctl.h b/include/linux/auto_dev-ioctl.h
index 28c15050ebe6..6e1ca6f95f80 100644
--- a/include/linux/auto_dev-ioctl.h
+++ b/include/linux/auto_dev-ioctl.h
@@ -1,10 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Copyright 2008 Red Hat, Inc. All rights reserved.
* Copyright 2008 Ian Kent <raven@themaw.net>
- *
- * This file is part of the Linux kernel and is made available under
- * the terms of the GNU General Public License, version 2, or at your
- * option, any later version, incorporated herein by reference.
*/
#ifndef _LINUX_AUTO_DEV_IOCTL_H
diff --git a/include/linux/auto_fs.h b/include/linux/auto_fs.h
index b8f814c95cf5..893f952ca40c 100644
--- a/include/linux/auto_fs.h
+++ b/include/linux/auto_fs.h
@@ -1,9 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Copyright 1997 Transmeta Corporation - All Rights Reserved
- *
- * This file is part of the Linux kernel and is made available under
- * the terms of the GNU General Public License, version 2, or at your
- * option, any later version, incorporated herein by reference.
*/
#ifndef _LINUX_AUTO_FS_H
diff --git a/include/linux/auxiliary_bus.h b/include/linux/auxiliary_bus.h
new file mode 100644
index 000000000000..de21d9d24a95
--- /dev/null
+++ b/include/linux/auxiliary_bus.h
@@ -0,0 +1,251 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2019-2020 Intel Corporation
+ *
+ * Please see Documentation/driver-api/auxiliary_bus.rst for more information.
+ */
+
+#ifndef _AUXILIARY_BUS_H_
+#define _AUXILIARY_BUS_H_
+
+#include <linux/device.h>
+#include <linux/mod_devicetable.h>
+
+/**
+ * DOC: DEVICE_LIFESPAN
+ *
+ * The registering driver is the entity that allocates memory for the
+ * auxiliary_device and registers it on the auxiliary bus. It is important to
+ * note that, as opposed to the platform bus, the registering driver is wholly
+ * responsible for the management of the memory used for the device object.
+ *
+ * To be clear the memory for the auxiliary_device is freed in the release()
+ * callback defined by the registering driver. The registering driver should
+ * only call auxiliary_device_delete() and then auxiliary_device_uninit() when
+ * it is done with the device. The release() function is then automatically
+ * called if and when other code releases their reference to the devices.
+ *
+ * A parent object, defined in the shared header file, contains the
+ * auxiliary_device. It also contains a pointer to the shared object(s), which
+ * also is defined in the shared header. Both the parent object and the shared
+ * object(s) are allocated by the registering driver. This layout allows the
+ * auxiliary_driver's registering module to perform a container_of() call to go
+ * from the pointer to the auxiliary_device, that is passed during the call to
+ * the auxiliary_driver's probe function, up to the parent object, and then
+ * have access to the shared object(s).
+ *
+ * The memory for the shared object(s) must have a lifespan equal to, or
+ * greater than, the lifespan of the memory for the auxiliary_device. The
+ * auxiliary_driver should only consider that the shared object is valid as
+ * long as the auxiliary_device is still registered on the auxiliary bus. It
+ * is up to the registering driver to manage (e.g. free or keep available) the
+ * memory for the shared object beyond the life of the auxiliary_device.
+ *
+ * The registering driver must unregister all auxiliary devices before its own
+ * driver.remove() is completed. An easy way to ensure this is to use the
+ * devm_add_action_or_reset() call to register a function against the parent
+ * device which unregisters the auxiliary device object(s).
+ *
+ * Finally, any operations which operate on the auxiliary devices must continue
+ * to function (if only to return an error) after the registering driver
+ * unregisters the auxiliary device.
+ */
+
+/**
+ * struct auxiliary_device - auxiliary device object.
+ * @dev: Device,
+ * The release and parent fields of the device structure must be filled
+ * in
+ * @name: Match name found by the auxiliary device driver,
+ * @id: unique identitier if multiple devices of the same name are exported,
+ *
+ * An auxiliary_device represents a part of its parent device's functionality.
+ * It is given a name that, combined with the registering drivers
+ * KBUILD_MODNAME, creates a match_name that is used for driver binding, and an
+ * id that combined with the match_name provide a unique name to register with
+ * the bus subsystem. For example, a driver registering an auxiliary device is
+ * named 'foo_mod.ko' and the subdevice is named 'foo_dev'. The match name is
+ * therefore 'foo_mod.foo_dev'.
+ *
+ * Registering an auxiliary_device is a three-step process.
+ *
+ * First, a 'struct auxiliary_device' needs to be defined or allocated for each
+ * sub-device desired. The name, id, dev.release, and dev.parent fields of
+ * this structure must be filled in as follows.
+ *
+ * The 'name' field is to be given a name that is recognized by the auxiliary
+ * driver. If two auxiliary_devices with the same match_name, eg
+ * "foo_mod.foo_dev", are registered onto the bus, they must have unique id
+ * values (e.g. "x" and "y") so that the registered devices names are
+ * "foo_mod.foo_dev.x" and "foo_mod.foo_dev.y". If match_name + id are not
+ * unique, then the device_add fails and generates an error message.
+ *
+ * The auxiliary_device.dev.type.release or auxiliary_device.dev.release must
+ * be populated with a non-NULL pointer to successfully register the
+ * auxiliary_device. This release call is where resources associated with the
+ * auxiliary device must be free'ed. Because once the device is placed on the
+ * bus the parent driver can not tell what other code may have a reference to
+ * this data.
+ *
+ * The auxiliary_device.dev.parent should be set. Typically to the registering
+ * drivers device.
+ *
+ * Second, call auxiliary_device_init(), which checks several aspects of the
+ * auxiliary_device struct and performs a device_initialize(). After this step
+ * completes, any error state must have a call to auxiliary_device_uninit() in
+ * its resolution path.
+ *
+ * The third and final step in registering an auxiliary_device is to perform a
+ * call to auxiliary_device_add(), which sets the name of the device and adds
+ * the device to the bus.
+ *
+ * .. code-block:: c
+ *
+ * #define MY_DEVICE_NAME "foo_dev"
+ *
+ * ...
+ *
+ * struct auxiliary_device *my_aux_dev = my_aux_dev_alloc(xxx);
+ *
+ * // Step 1:
+ * my_aux_dev->name = MY_DEVICE_NAME;
+ * my_aux_dev->id = my_unique_id_alloc(xxx);
+ * my_aux_dev->dev.release = my_aux_dev_release;
+ * my_aux_dev->dev.parent = my_dev;
+ *
+ * // Step 2:
+ * if (auxiliary_device_init(my_aux_dev))
+ * goto fail;
+ *
+ * // Step 3:
+ * if (auxiliary_device_add(my_aux_dev)) {
+ * auxiliary_device_uninit(my_aux_dev);
+ * goto fail;
+ * }
+ *
+ * ...
+ *
+ *
+ * Unregistering an auxiliary_device is a two-step process to mirror the
+ * register process. First call auxiliary_device_delete(), then call
+ * auxiliary_device_uninit().
+ *
+ * .. code-block:: c
+ *
+ * auxiliary_device_delete(my_dev->my_aux_dev);
+ * auxiliary_device_uninit(my_dev->my_aux_dev);
+ */
+struct auxiliary_device {
+ struct device dev;
+ const char *name;
+ u32 id;
+};
+
+/**
+ * struct auxiliary_driver - Definition of an auxiliary bus driver
+ * @probe: Called when a matching device is added to the bus.
+ * @remove: Called when device is removed from the bus.
+ * @shutdown: Called at shut-down time to quiesce the device.
+ * @suspend: Called to put the device to sleep mode. Usually to a power state.
+ * @resume: Called to bring a device from sleep mode.
+ * @name: Driver name.
+ * @driver: Core driver structure.
+ * @id_table: Table of devices this driver should match on the bus.
+ *
+ * Auxiliary drivers follow the standard driver model convention, where
+ * discovery/enumeration is handled by the core, and drivers provide probe()
+ * and remove() methods. They support power management and shutdown
+ * notifications using the standard conventions.
+ *
+ * Auxiliary drivers register themselves with the bus by calling
+ * auxiliary_driver_register(). The id_table contains the match_names of
+ * auxiliary devices that a driver can bind with.
+ *
+ * .. code-block:: c
+ *
+ * static const struct auxiliary_device_id my_auxiliary_id_table[] = {
+ * { .name = "foo_mod.foo_dev" },
+ * {},
+ * };
+ *
+ * MODULE_DEVICE_TABLE(auxiliary, my_auxiliary_id_table);
+ *
+ * struct auxiliary_driver my_drv = {
+ * .name = "myauxiliarydrv",
+ * .id_table = my_auxiliary_id_table,
+ * .probe = my_drv_probe,
+ * .remove = my_drv_remove
+ * };
+ */
+struct auxiliary_driver {
+ int (*probe)(struct auxiliary_device *auxdev, const struct auxiliary_device_id *id);
+ void (*remove)(struct auxiliary_device *auxdev);
+ void (*shutdown)(struct auxiliary_device *auxdev);
+ int (*suspend)(struct auxiliary_device *auxdev, pm_message_t state);
+ int (*resume)(struct auxiliary_device *auxdev);
+ const char *name;
+ struct device_driver driver;
+ const struct auxiliary_device_id *id_table;
+};
+
+static inline void *auxiliary_get_drvdata(struct auxiliary_device *auxdev)
+{
+ return dev_get_drvdata(&auxdev->dev);
+}
+
+static inline void auxiliary_set_drvdata(struct auxiliary_device *auxdev, void *data)
+{
+ dev_set_drvdata(&auxdev->dev, data);
+}
+
+static inline struct auxiliary_device *to_auxiliary_dev(struct device *dev)
+{
+ return container_of(dev, struct auxiliary_device, dev);
+}
+
+static inline struct auxiliary_driver *to_auxiliary_drv(struct device_driver *drv)
+{
+ return container_of(drv, struct auxiliary_driver, driver);
+}
+
+int auxiliary_device_init(struct auxiliary_device *auxdev);
+int __auxiliary_device_add(struct auxiliary_device *auxdev, const char *modname);
+#define auxiliary_device_add(auxdev) __auxiliary_device_add(auxdev, KBUILD_MODNAME)
+
+static inline void auxiliary_device_uninit(struct auxiliary_device *auxdev)
+{
+ put_device(&auxdev->dev);
+}
+
+static inline void auxiliary_device_delete(struct auxiliary_device *auxdev)
+{
+ device_del(&auxdev->dev);
+}
+
+int __auxiliary_driver_register(struct auxiliary_driver *auxdrv, struct module *owner,
+ const char *modname);
+#define auxiliary_driver_register(auxdrv) \
+ __auxiliary_driver_register(auxdrv, THIS_MODULE, KBUILD_MODNAME)
+
+void auxiliary_driver_unregister(struct auxiliary_driver *auxdrv);
+
+/**
+ * module_auxiliary_driver() - Helper macro for registering an auxiliary driver
+ * @__auxiliary_driver: auxiliary driver struct
+ *
+ * Helper macro for auxiliary drivers which do not do anything special in
+ * module init/exit. This eliminates a lot of boilerplate. Each module may only
+ * use this macro once, and calling it replaces module_init() and module_exit()
+ *
+ * .. code-block:: c
+ *
+ * module_auxiliary_driver(my_drv);
+ */
+#define module_auxiliary_driver(__auxiliary_driver) \
+ module_driver(__auxiliary_driver, auxiliary_driver_register, auxiliary_driver_unregister)
+
+struct auxiliary_device *auxiliary_find_device(struct device *start,
+ const void *data,
+ int (*match)(struct device *dev, const void *data));
+
+#endif /* _AUXILIARY_BUS_H_ */
diff --git a/include/linux/auxvec.h b/include/linux/auxvec.h
index 3e0fbe441763..407f7005e6d6 100644
--- a/include/linux/auxvec.h
+++ b/include/linux/auxvec.h
@@ -1,8 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_AUXVEC_H
#define _LINUX_AUXVEC_H
#include <uapi/linux/auxvec.h>
-#define AT_VECTOR_SIZE_BASE 20 /* NEW_AUX_ENT entries in auxiliary table */
+#define AT_VECTOR_SIZE_BASE 22 /* NEW_AUX_ENT entries in auxiliary table */
/* number of "#define AT_.*" above, minus {AT_NULL, AT_IGNORE, AT_NOTELF} */
#endif /* _LINUX_AUXVEC_H */
diff --git a/include/linux/average.h b/include/linux/average.h
index 7ddaf340d2ac..a1a8f09631ce 100644
--- a/include/linux/average.h
+++ b/include/linux/average.h
@@ -1,6 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_AVERAGE_H
#define _LINUX_AVERAGE_H
+#include <linux/bug.h>
+#include <linux/compiler.h>
+#include <linux/log2.h>
+
/*
* Exponentially weighted moving average (EWMA)
*
@@ -48,7 +53,7 @@
static inline void ewma_##name##_add(struct ewma_##name *e, \
unsigned long val) \
{ \
- unsigned long internal = ACCESS_ONCE(e->internal); \
+ unsigned long internal = READ_ONCE(e->internal); \
unsigned long weight_rcp = ilog2(_weight_rcp); \
unsigned long precision = _precision; \
\
@@ -57,10 +62,10 @@
BUILD_BUG_ON((_precision) > 30); \
BUILD_BUG_ON_NOT_POWER_OF_2(_weight_rcp); \
\
- ACCESS_ONCE(e->internal) = internal ? \
+ WRITE_ONCE(e->internal, internal ? \
(((internal << weight_rcp) - internal) + \
(val << precision)) >> weight_rcp : \
- (val << precision); \
+ (val << precision)); \
}
#endif /* _LINUX_AVERAGE_H */
diff --git a/include/linux/avf/virtchnl.h b/include/linux/avf/virtchnl.h
index c893b9520a67..c15221dcb75e 100644
--- a/include/linux/avf/virtchnl.h
+++ b/include/linux/avf/virtchnl.h
@@ -1,35 +1,13 @@
-/*******************************************************************************
- *
- * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
- * Copyright(c) 2013 - 2014 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program. If not, see <http://www.gnu.org/licenses/>.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- ******************************************************************************/
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (c) 2013-2022, Intel Corporation. */
#ifndef _VIRTCHNL_H_
#define _VIRTCHNL_H_
/* Description:
- * This header file describes the VF-PF communication protocol used
- * by the drivers for all devices starting from our 40G product line
+ * This header file describes the Virtual Function (VF) - Physical Function
+ * (PF) communication protocol used by the drivers for all devices starting
+ * from our 40G product line
*
* Admin queue buffer usage:
* desc->opcode is always aqc_opc_send_msg_to_pf
@@ -43,8 +21,8 @@
* have a maximum of sixteen queues for all of its VSIs.
*
* The PF is required to return a status code in v_retval for all messages
- * except RESET_VF, which does not require any response. The return value
- * is of status_code type, defined in the shared type.h.
+ * except RESET_VF, which does not require any response. The returned value
+ * is of virtchnl_status_code type, defined here.
*
* In general, VF driver initialization should roughly follow the order of
* these opcodes. The VF driver must first validate the API version of the
@@ -62,19 +40,27 @@
/* Error Codes */
enum virtchnl_status_code {
VIRTCHNL_STATUS_SUCCESS = 0,
- VIRTCHNL_ERR_PARAM = -5,
+ VIRTCHNL_STATUS_ERR_PARAM = -5,
+ VIRTCHNL_STATUS_ERR_NO_MEMORY = -18,
VIRTCHNL_STATUS_ERR_OPCODE_MISMATCH = -38,
VIRTCHNL_STATUS_ERR_CQP_COMPL_ERROR = -39,
VIRTCHNL_STATUS_ERR_INVALID_VF_ID = -40,
- VIRTCHNL_STATUS_NOT_SUPPORTED = -64,
+ VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR = -53,
+ VIRTCHNL_STATUS_ERR_NOT_SUPPORTED = -64,
};
+/* Backward compatibility */
+#define VIRTCHNL_ERR_PARAM VIRTCHNL_STATUS_ERR_PARAM
+#define VIRTCHNL_STATUS_NOT_SUPPORTED VIRTCHNL_STATUS_ERR_NOT_SUPPORTED
+
+#define VIRTCHNL_LINK_SPEED_2_5GB_SHIFT 0x0
#define VIRTCHNL_LINK_SPEED_100MB_SHIFT 0x1
#define VIRTCHNL_LINK_SPEED_1000MB_SHIFT 0x2
#define VIRTCHNL_LINK_SPEED_10GB_SHIFT 0x3
#define VIRTCHNL_LINK_SPEED_40GB_SHIFT 0x4
#define VIRTCHNL_LINK_SPEED_20GB_SHIFT 0x5
#define VIRTCHNL_LINK_SPEED_25GB_SHIFT 0x6
+#define VIRTCHNL_LINK_SPEED_5GB_SHIFT 0x7
enum virtchnl_link_speed {
VIRTCHNL_LINK_SPEED_UNKNOWN = 0,
@@ -84,6 +70,8 @@ enum virtchnl_link_speed {
VIRTCHNL_LINK_SPEED_40GB = BIT(VIRTCHNL_LINK_SPEED_40GB_SHIFT),
VIRTCHNL_LINK_SPEED_20GB = BIT(VIRTCHNL_LINK_SPEED_20GB_SHIFT),
VIRTCHNL_LINK_SPEED_25GB = BIT(VIRTCHNL_LINK_SPEED_25GB_SHIFT),
+ VIRTCHNL_LINK_SPEED_2_5GB = BIT(VIRTCHNL_LINK_SPEED_2_5GB_SHIFT),
+ VIRTCHNL_LINK_SPEED_5GB = BIT(VIRTCHNL_LINK_SPEED_5GB_SHIFT),
};
/* for hsplit_0 field of Rx HMC context */
@@ -126,37 +114,51 @@ enum virtchnl_ops {
VIRTCHNL_OP_GET_STATS = 15,
VIRTCHNL_OP_RSVD = 16,
VIRTCHNL_OP_EVENT = 17, /* must ALWAYS be 17 */
+ /* opcode 19 is reserved */
VIRTCHNL_OP_IWARP = 20, /* advanced opcode */
+ VIRTCHNL_OP_RDMA = VIRTCHNL_OP_IWARP,
VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP = 21, /* advanced opcode */
+ VIRTCHNL_OP_CONFIG_RDMA_IRQ_MAP = VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP,
VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP = 22, /* advanced opcode */
+ VIRTCHNL_OP_RELEASE_RDMA_IRQ_MAP = VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP,
VIRTCHNL_OP_CONFIG_RSS_KEY = 23,
VIRTCHNL_OP_CONFIG_RSS_LUT = 24,
VIRTCHNL_OP_GET_RSS_HENA_CAPS = 25,
VIRTCHNL_OP_SET_RSS_HENA = 26,
+ VIRTCHNL_OP_ENABLE_VLAN_STRIPPING = 27,
+ VIRTCHNL_OP_DISABLE_VLAN_STRIPPING = 28,
+ VIRTCHNL_OP_REQUEST_QUEUES = 29,
+ VIRTCHNL_OP_ENABLE_CHANNELS = 30,
+ VIRTCHNL_OP_DISABLE_CHANNELS = 31,
+ VIRTCHNL_OP_ADD_CLOUD_FILTER = 32,
+ VIRTCHNL_OP_DEL_CLOUD_FILTER = 33,
+ /* opcode 34 - 43 are reserved */
+ VIRTCHNL_OP_GET_SUPPORTED_RXDIDS = 44,
+ VIRTCHNL_OP_ADD_RSS_CFG = 45,
+ VIRTCHNL_OP_DEL_RSS_CFG = 46,
+ VIRTCHNL_OP_ADD_FDIR_FILTER = 47,
+ VIRTCHNL_OP_DEL_FDIR_FILTER = 48,
+ VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS = 51,
+ VIRTCHNL_OP_ADD_VLAN_V2 = 52,
+ VIRTCHNL_OP_DEL_VLAN_V2 = 53,
+ VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2 = 54,
+ VIRTCHNL_OP_DISABLE_VLAN_STRIPPING_V2 = 55,
+ VIRTCHNL_OP_ENABLE_VLAN_INSERTION_V2 = 56,
+ VIRTCHNL_OP_DISABLE_VLAN_INSERTION_V2 = 57,
+ VIRTCHNL_OP_MAX,
};
-/* This macro is used to generate a compilation error if a structure
+/* These macros are used to generate compilation errors if a structure/union
* is not exactly the correct length. It gives a divide by zero error if the
- * structure is not of the correct size, otherwise it creates an enum that is
- * never used.
+ * structure/union is not of the correct size, otherwise it creates an enum
+ * that is never used.
*/
#define VIRTCHNL_CHECK_STRUCT_LEN(n, X) enum virtchnl_static_assert_enum_##X \
{ virtchnl_static_assert_##X = (n)/((sizeof(struct X) == (n)) ? 1 : 0) }
+#define VIRTCHNL_CHECK_UNION_LEN(n, X) enum virtchnl_static_asset_enum_##X \
+ { virtchnl_static_assert_##X = (n)/((sizeof(union X) == (n)) ? 1 : 0) }
-/* Virtual channel message descriptor. This overlays the admin queue
- * descriptor. All other data is passed in external buffers.
- */
-
-struct virtchnl_msg {
- u8 pad[8]; /* AQ flags/opcode/len/retval fields */
- enum virtchnl_ops v_opcode; /* avoid confusion with desc->opcode */
- enum virtchnl_status_code v_retval; /* ditto for desc->retval */
- u32 vfid; /* used by PF when sending to VF */
-};
-
-VIRTCHNL_CHECK_STRUCT_LEN(20, virtchnl_msg);
-
-/* Message descriptions and data structures.*/
+/* Message descriptions and data structures. */
/* VIRTCHNL_OP_VERSION
* VF posts its version number to the PF. PF responds with its version number
@@ -216,30 +218,41 @@ enum virtchnl_vsi_type {
struct virtchnl_vsi_resource {
u16 vsi_id;
u16 num_queue_pairs;
- enum virtchnl_vsi_type vsi_type;
+
+ /* see enum virtchnl_vsi_type */
+ s32 vsi_type;
u16 qset_handle;
u8 default_mac_addr[ETH_ALEN];
};
VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_vsi_resource);
-/* VF offload flags
+/* VF capability flags
* VIRTCHNL_VF_OFFLOAD_L2 flag is inclusive of base mode L2 offloads including
* TX/RX Checksum offloading and TSO for non-tunnelled packets.
*/
-#define VIRTCHNL_VF_OFFLOAD_L2 0x00000001
-#define VIRTCHNL_VF_OFFLOAD_IWARP 0x00000002
-#define VIRTCHNL_VF_OFFLOAD_RSVD 0x00000004
-#define VIRTCHNL_VF_OFFLOAD_RSS_AQ 0x00000008
-#define VIRTCHNL_VF_OFFLOAD_RSS_REG 0x00000010
-#define VIRTCHNL_VF_OFFLOAD_WB_ON_ITR 0x00000020
-#define VIRTCHNL_VF_OFFLOAD_VLAN 0x00010000
-#define VIRTCHNL_VF_OFFLOAD_RX_POLLING 0x00020000
-#define VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2 0x00040000
-#define VIRTCHNL_VF_OFFLOAD_RSS_PF 0X00080000
-#define VIRTCHNL_VF_OFFLOAD_ENCAP 0X00100000
-#define VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM 0X00200000
-#define VIRTCHNL_VF_OFFLOAD_RX_ENCAP_CSUM 0X00400000
+#define VIRTCHNL_VF_OFFLOAD_L2 BIT(0)
+#define VIRTCHNL_VF_OFFLOAD_RDMA BIT(1)
+#define VIRTCHNL_VF_CAP_RDMA VIRTCHNL_VF_OFFLOAD_RDMA
+#define VIRTCHNL_VF_OFFLOAD_RSS_AQ BIT(3)
+#define VIRTCHNL_VF_OFFLOAD_RSS_REG BIT(4)
+#define VIRTCHNL_VF_OFFLOAD_WB_ON_ITR BIT(5)
+#define VIRTCHNL_VF_OFFLOAD_REQ_QUEUES BIT(6)
+/* used to negotiate communicating link speeds in Mbps */
+#define VIRTCHNL_VF_CAP_ADV_LINK_SPEED BIT(7)
+#define VIRTCHNL_VF_OFFLOAD_VLAN_V2 BIT(15)
+#define VIRTCHNL_VF_OFFLOAD_VLAN BIT(16)
+#define VIRTCHNL_VF_OFFLOAD_RX_POLLING BIT(17)
+#define VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2 BIT(18)
+#define VIRTCHNL_VF_OFFLOAD_RSS_PF BIT(19)
+#define VIRTCHNL_VF_OFFLOAD_ENCAP BIT(20)
+#define VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM BIT(21)
+#define VIRTCHNL_VF_OFFLOAD_RX_ENCAP_CSUM BIT(22)
+#define VIRTCHNL_VF_OFFLOAD_ADQ BIT(23)
+#define VIRTCHNL_VF_OFFLOAD_USO BIT(25)
+#define VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC BIT(26)
+#define VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF BIT(27)
+#define VIRTCHNL_VF_OFFLOAD_FDIR_PF BIT(28)
#define VF_BASE_MODE_OFFLOADS (VIRTCHNL_VF_OFFLOAD_L2 | \
VIRTCHNL_VF_OFFLOAD_VLAN | \
@@ -251,7 +264,7 @@ struct virtchnl_vf_resource {
u16 max_vectors;
u16 max_mtu;
- u32 vf_offload_flags;
+ u32 vf_cap_flags;
u32 rss_key_size;
u32 rss_lut_size;
@@ -293,9 +306,13 @@ struct virtchnl_rxq_info {
u16 splithdr_enabled; /* deprecated with AVF 1.0 */
u32 databuffer_size;
u32 max_pkt_size;
- u32 pad1;
+ u8 pad0;
+ u8 rxdid;
+ u8 pad1[2];
u64 dma_ring_addr;
- enum virtchnl_rx_hsplit rx_split_pos; /* deprecated with AVF 1.0 */
+
+ /* see enum virtchnl_rx_hsplit; deprecated with AVF 1.0 */
+ s32 rx_split_pos;
u32 pad2;
};
@@ -307,6 +324,9 @@ VIRTCHNL_CHECK_STRUCT_LEN(40, virtchnl_rxq_info);
* PF configures queues and returns status.
* If the number of queues specified is greater than the number of queues
* associated with the VSI, an error is returned and no queues are configured.
+ * NOTE: The VF is not required to configure all queues in a single request.
+ * It may send multiple messages. PF drivers must correctly handle all VF
+ * requests.
*/
struct virtchnl_queue_pair_info {
/* NOTE: vsi_id and queue_id should be identical for both queues. */
@@ -325,12 +345,32 @@ struct virtchnl_vsi_queue_config_info {
VIRTCHNL_CHECK_STRUCT_LEN(72, virtchnl_vsi_queue_config_info);
+/* VIRTCHNL_OP_REQUEST_QUEUES
+ * VF sends this message to request the PF to allocate additional queues to
+ * this VF. Each VF gets a guaranteed number of queues on init but asking for
+ * additional queues must be negotiated. This is a best effort request as it
+ * is possible the PF does not have enough queues left to support the request.
+ * If the PF cannot support the number requested it will respond with the
+ * maximum number it is able to support. If the request is successful, PF will
+ * then reset the VF to institute required changes.
+ */
+
+/* VF resource request */
+struct virtchnl_vf_res_request {
+ u16 num_queue_pairs;
+};
+
/* VIRTCHNL_OP_CONFIG_IRQ_MAP
* VF uses this message to map vectors to queues.
* The rxq_map and txq_map fields are bitmaps used to indicate which queues
* are to be associated with the specified vector.
- * The "other" causes are always mapped to vector 0.
+ * The "other" causes are always mapped to vector 0. The VF may not request
+ * that vector 0 be used for traffic.
* PF configures interrupt mapping and returns status.
+ * NOTE: due to hardware requirements, all active queues (both TX and RX)
+ * should be mapped to interrupts, even if the driver intends to operate
+ * only in polling mode. In this case the interrupt may be disabled, but
+ * the ITR timer will still run to trigger writebacks.
*/
struct virtchnl_vector_map {
u16 vsi_id;
@@ -357,6 +397,9 @@ VIRTCHNL_CHECK_STRUCT_LEN(14, virtchnl_irq_map_info);
* (Currently, we only support 16 queues per VF, but we make the field
* u32 to allow for expansion.)
* PF performs requested action and returns status.
+ * NOTE: The VF is not required to enable/disable all queues in a single
+ * request. It may send multiple messages.
+ * PF drivers must correctly handle all VF requests.
*/
struct virtchnl_queue_select {
u16 vsi_id;
@@ -379,9 +422,36 @@ VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_queue_select);
* PF removes the filters and returns status.
*/
+/* VIRTCHNL_ETHER_ADDR_LEGACY
+ * Prior to adding the @type member to virtchnl_ether_addr, there were 2 pad
+ * bytes. Moving forward all VF drivers should not set type to
+ * VIRTCHNL_ETHER_ADDR_LEGACY. This is only here to not break previous/legacy
+ * behavior. The control plane function (i.e. PF) can use a best effort method
+ * of tracking the primary/device unicast in this case, but there is no
+ * guarantee and functionality depends on the implementation of the PF.
+ */
+
+/* VIRTCHNL_ETHER_ADDR_PRIMARY
+ * All VF drivers should set @type to VIRTCHNL_ETHER_ADDR_PRIMARY for the
+ * primary/device unicast MAC address filter for VIRTCHNL_OP_ADD_ETH_ADDR and
+ * VIRTCHNL_OP_DEL_ETH_ADDR. This allows for the underlying control plane
+ * function (i.e. PF) to accurately track and use this MAC address for
+ * displaying on the host and for VM/function reset.
+ */
+
+/* VIRTCHNL_ETHER_ADDR_EXTRA
+ * All VF drivers should set @type to VIRTCHNL_ETHER_ADDR_EXTRA for any extra
+ * unicast and/or multicast filters that are being added/deleted via
+ * VIRTCHNL_OP_DEL_ETH_ADDR/VIRTCHNL_OP_ADD_ETH_ADDR respectively.
+ */
struct virtchnl_ether_addr {
u8 addr[ETH_ALEN];
- u8 pad[2];
+ u8 type;
+#define VIRTCHNL_ETHER_ADDR_LEGACY 0
+#define VIRTCHNL_ETHER_ADDR_PRIMARY 1
+#define VIRTCHNL_ETHER_ADDR_EXTRA 2
+#define VIRTCHNL_ETHER_ADDR_TYPE_MASK 3 /* first two bits of type are valid */
+ u8 pad;
};
VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_ether_addr);
@@ -416,6 +486,351 @@ struct virtchnl_vlan_filter_list {
VIRTCHNL_CHECK_STRUCT_LEN(6, virtchnl_vlan_filter_list);
+/* This enum is used for all of the VIRTCHNL_VF_OFFLOAD_VLAN_V2_CAPS related
+ * structures and opcodes.
+ *
+ * VIRTCHNL_VLAN_UNSUPPORTED - This field is not supported and if a VF driver
+ * populates it the PF should return VIRTCHNL_STATUS_ERR_NOT_SUPPORTED.
+ *
+ * VIRTCHNL_VLAN_ETHERTYPE_8100 - This field supports 0x8100 ethertype.
+ * VIRTCHNL_VLAN_ETHERTYPE_88A8 - This field supports 0x88A8 ethertype.
+ * VIRTCHNL_VLAN_ETHERTYPE_9100 - This field supports 0x9100 ethertype.
+ *
+ * VIRTCHNL_VLAN_ETHERTYPE_AND - Used when multiple ethertypes can be supported
+ * by the PF concurrently. For example, if the PF can support
+ * VIRTCHNL_VLAN_ETHERTYPE_8100 AND VIRTCHNL_VLAN_ETHERTYPE_88A8 filters it
+ * would OR the following bits:
+ *
+ * VIRTHCNL_VLAN_ETHERTYPE_8100 |
+ * VIRTCHNL_VLAN_ETHERTYPE_88A8 |
+ * VIRTCHNL_VLAN_ETHERTYPE_AND;
+ *
+ * The VF would interpret this as VLAN filtering can be supported on both 0x8100
+ * and 0x88A8 VLAN ethertypes.
+ *
+ * VIRTCHNL_ETHERTYPE_XOR - Used when only a single ethertype can be supported
+ * by the PF concurrently. For example if the PF can support
+ * VIRTCHNL_VLAN_ETHERTYPE_8100 XOR VIRTCHNL_VLAN_ETHERTYPE_88A8 stripping
+ * offload it would OR the following bits:
+ *
+ * VIRTCHNL_VLAN_ETHERTYPE_8100 |
+ * VIRTCHNL_VLAN_ETHERTYPE_88A8 |
+ * VIRTCHNL_VLAN_ETHERTYPE_XOR;
+ *
+ * The VF would interpret this as VLAN stripping can be supported on either
+ * 0x8100 or 0x88a8 VLAN ethertypes. So when requesting VLAN stripping via
+ * VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2 the specified ethertype will override
+ * the previously set value.
+ *
+ * VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1 - Used to tell the VF to insert and/or
+ * strip the VLAN tag using the L2TAG1 field of the Tx/Rx descriptors.
+ *
+ * VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2 - Used to tell the VF to insert hardware
+ * offloaded VLAN tags using the L2TAG2 field of the Tx descriptor.
+ *
+ * VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2 - Used to tell the VF to strip hardware
+ * offloaded VLAN tags using the L2TAG2_2 field of the Rx descriptor.
+ *
+ * VIRTCHNL_VLAN_PRIO - This field supports VLAN priority bits. This is used for
+ * VLAN filtering if the underlying PF supports it.
+ *
+ * VIRTCHNL_VLAN_TOGGLE_ALLOWED - This field is used to say whether a
+ * certain VLAN capability can be toggled. For example if the underlying PF/CP
+ * allows the VF to toggle VLAN filtering, stripping, and/or insertion it should
+ * set this bit along with the supported ethertypes.
+ */
+enum virtchnl_vlan_support {
+ VIRTCHNL_VLAN_UNSUPPORTED = 0,
+ VIRTCHNL_VLAN_ETHERTYPE_8100 = BIT(0),
+ VIRTCHNL_VLAN_ETHERTYPE_88A8 = BIT(1),
+ VIRTCHNL_VLAN_ETHERTYPE_9100 = BIT(2),
+ VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1 = BIT(8),
+ VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2 = BIT(9),
+ VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2_2 = BIT(10),
+ VIRTCHNL_VLAN_PRIO = BIT(24),
+ VIRTCHNL_VLAN_FILTER_MASK = BIT(28),
+ VIRTCHNL_VLAN_ETHERTYPE_AND = BIT(29),
+ VIRTCHNL_VLAN_ETHERTYPE_XOR = BIT(30),
+ VIRTCHNL_VLAN_TOGGLE = BIT(31),
+};
+
+/* This structure is used as part of the VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS
+ * for filtering, insertion, and stripping capabilities.
+ *
+ * If only outer capabilities are supported (for filtering, insertion, and/or
+ * stripping) then this refers to the outer most or single VLAN from the VF's
+ * perspective.
+ *
+ * If only inner capabilities are supported (for filtering, insertion, and/or
+ * stripping) then this refers to the outer most or single VLAN from the VF's
+ * perspective. Functionally this is the same as if only outer capabilities are
+ * supported. The VF driver is just forced to use the inner fields when
+ * adding/deleting filters and enabling/disabling offloads (if supported).
+ *
+ * If both outer and inner capabilities are supported (for filtering, insertion,
+ * and/or stripping) then outer refers to the outer most or single VLAN and
+ * inner refers to the second VLAN, if it exists, in the packet.
+ *
+ * There is no support for tunneled VLAN offloads, so outer or inner are never
+ * referring to a tunneled packet from the VF's perspective.
+ */
+struct virtchnl_vlan_supported_caps {
+ u32 outer;
+ u32 inner;
+};
+
+/* The PF populates these fields based on the supported VLAN filtering. If a
+ * field is VIRTCHNL_VLAN_UNSUPPORTED then it's not supported and the PF will
+ * reject any VIRTCHNL_OP_ADD_VLAN_V2 or VIRTCHNL_OP_DEL_VLAN_V2 messages using
+ * the unsupported fields.
+ *
+ * Also, a VF is only allowed to toggle its VLAN filtering setting if the
+ * VIRTCHNL_VLAN_TOGGLE bit is set.
+ *
+ * The ethertype(s) specified in the ethertype_init field are the ethertypes
+ * enabled for VLAN filtering. VLAN filtering in this case refers to the outer
+ * most VLAN from the VF's perspective. If both inner and outer filtering are
+ * allowed then ethertype_init only refers to the outer most VLAN as only
+ * VLAN ethertype supported for inner VLAN filtering is
+ * VIRTCHNL_VLAN_ETHERTYPE_8100. By default, inner VLAN filtering is disabled
+ * when both inner and outer filtering are allowed.
+ *
+ * The max_filters field tells the VF how many VLAN filters it's allowed to have
+ * at any one time. If it exceeds this amount and tries to add another filter,
+ * then the request will be rejected by the PF. To prevent failures, the VF
+ * should keep track of how many VLAN filters it has added and not attempt to
+ * add more than max_filters.
+ */
+struct virtchnl_vlan_filtering_caps {
+ struct virtchnl_vlan_supported_caps filtering_support;
+ u32 ethertype_init;
+ u16 max_filters;
+ u8 pad[2];
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_vlan_filtering_caps);
+
+/* This enum is used for the virtchnl_vlan_offload_caps structure to specify
+ * if the PF supports a different ethertype for stripping and insertion.
+ *
+ * VIRTCHNL_ETHERTYPE_STRIPPING_MATCHES_INSERTION - The ethertype(s) specified
+ * for stripping affect the ethertype(s) specified for insertion and visa versa
+ * as well. If the VF tries to configure VLAN stripping via
+ * VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2 with VIRTCHNL_VLAN_ETHERTYPE_8100 then
+ * that will be the ethertype for both stripping and insertion.
+ *
+ * VIRTCHNL_ETHERTYPE_MATCH_NOT_REQUIRED - The ethertype(s) specified for
+ * stripping do not affect the ethertype(s) specified for insertion and visa
+ * versa.
+ */
+enum virtchnl_vlan_ethertype_match {
+ VIRTCHNL_ETHERTYPE_STRIPPING_MATCHES_INSERTION = 0,
+ VIRTCHNL_ETHERTYPE_MATCH_NOT_REQUIRED = 1,
+};
+
+/* The PF populates these fields based on the supported VLAN offloads. If a
+ * field is VIRTCHNL_VLAN_UNSUPPORTED then it's not supported and the PF will
+ * reject any VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2 or
+ * VIRTCHNL_OP_DISABLE_VLAN_STRIPPING_V2 messages using the unsupported fields.
+ *
+ * Also, a VF is only allowed to toggle its VLAN offload setting if the
+ * VIRTCHNL_VLAN_TOGGLE_ALLOWED bit is set.
+ *
+ * The VF driver needs to be aware of how the tags are stripped by hardware and
+ * inserted by the VF driver based on the level of offload support. The PF will
+ * populate these fields based on where the VLAN tags are expected to be
+ * offloaded via the VIRTHCNL_VLAN_TAG_LOCATION_* bits. The VF will need to
+ * interpret these fields. See the definition of the
+ * VIRTCHNL_VLAN_TAG_LOCATION_* bits above the virtchnl_vlan_support
+ * enumeration.
+ */
+struct virtchnl_vlan_offload_caps {
+ struct virtchnl_vlan_supported_caps stripping_support;
+ struct virtchnl_vlan_supported_caps insertion_support;
+ u32 ethertype_init;
+ u8 ethertype_match;
+ u8 pad[3];
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(24, virtchnl_vlan_offload_caps);
+
+/* VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS
+ * VF sends this message to determine its VLAN capabilities.
+ *
+ * PF will mark which capabilities it supports based on hardware support and
+ * current configuration. For example, if a port VLAN is configured the PF will
+ * not allow outer VLAN filtering, stripping, or insertion to be configured so
+ * it will block these features from the VF.
+ *
+ * The VF will need to cross reference its capabilities with the PFs
+ * capabilities in the response message from the PF to determine the VLAN
+ * support.
+ */
+struct virtchnl_vlan_caps {
+ struct virtchnl_vlan_filtering_caps filtering;
+ struct virtchnl_vlan_offload_caps offloads;
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(40, virtchnl_vlan_caps);
+
+struct virtchnl_vlan {
+ u16 tci; /* tci[15:13] = PCP and tci[11:0] = VID */
+ u16 tci_mask; /* only valid if VIRTCHNL_VLAN_FILTER_MASK set in
+ * filtering caps
+ */
+ u16 tpid; /* 0x8100, 0x88a8, etc. and only type(s) set in
+ * filtering caps. Note that tpid here does not refer to
+ * VIRTCHNL_VLAN_ETHERTYPE_*, but it refers to the
+ * actual 2-byte VLAN TPID
+ */
+ u8 pad[2];
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_vlan);
+
+struct virtchnl_vlan_filter {
+ struct virtchnl_vlan inner;
+ struct virtchnl_vlan outer;
+ u8 pad[16];
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(32, virtchnl_vlan_filter);
+
+/* VIRTCHNL_OP_ADD_VLAN_V2
+ * VIRTCHNL_OP_DEL_VLAN_V2
+ *
+ * VF sends these messages to add/del one or more VLAN tag filters for Rx
+ * traffic.
+ *
+ * The PF attempts to add the filters and returns status.
+ *
+ * The VF should only ever attempt to add/del virtchnl_vlan_filter(s) using the
+ * supported fields negotiated via VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS.
+ */
+struct virtchnl_vlan_filter_list_v2 {
+ u16 vport_id;
+ u16 num_elements;
+ u8 pad[4];
+ struct virtchnl_vlan_filter filters[1];
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(40, virtchnl_vlan_filter_list_v2);
+
+/* VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2
+ * VIRTCHNL_OP_DISABLE_VLAN_STRIPPING_V2
+ * VIRTCHNL_OP_ENABLE_VLAN_INSERTION_V2
+ * VIRTCHNL_OP_DISABLE_VLAN_INSERTION_V2
+ *
+ * VF sends this message to enable or disable VLAN stripping or insertion. It
+ * also needs to specify an ethertype. The VF knows which VLAN ethertypes are
+ * allowed and whether or not it's allowed to enable/disable the specific
+ * offload via the VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS message. The VF needs to
+ * parse the virtchnl_vlan_caps.offloads fields to determine which offload
+ * messages are allowed.
+ *
+ * For example, if the PF populates the virtchnl_vlan_caps.offloads in the
+ * following manner the VF will be allowed to enable and/or disable 0x8100 inner
+ * VLAN insertion and/or stripping via the opcodes listed above. Inner in this
+ * case means the outer most or single VLAN from the VF's perspective. This is
+ * because no outer offloads are supported. See the comments above the
+ * virtchnl_vlan_supported_caps structure for more details.
+ *
+ * virtchnl_vlan_caps.offloads.stripping_support.inner =
+ * VIRTCHNL_VLAN_TOGGLE |
+ * VIRTCHNL_VLAN_ETHERTYPE_8100;
+ *
+ * virtchnl_vlan_caps.offloads.insertion_support.inner =
+ * VIRTCHNL_VLAN_TOGGLE |
+ * VIRTCHNL_VLAN_ETHERTYPE_8100;
+ *
+ * In order to enable inner (again note that in this case inner is the outer
+ * most or single VLAN from the VF's perspective) VLAN stripping for 0x8100
+ * VLANs, the VF would populate the virtchnl_vlan_setting structure in the
+ * following manner and send the VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2 message.
+ *
+ * virtchnl_vlan_setting.inner_ethertype_setting =
+ * VIRTCHNL_VLAN_ETHERTYPE_8100;
+ *
+ * virtchnl_vlan_setting.vport_id = vport_id or vsi_id assigned to the VF on
+ * initialization.
+ *
+ * The reason that VLAN TPID(s) are not being used for the
+ * outer_ethertype_setting and inner_ethertype_setting fields is because it's
+ * possible a device could support VLAN insertion and/or stripping offload on
+ * multiple ethertypes concurrently, so this method allows a VF to request
+ * multiple ethertypes in one message using the virtchnl_vlan_support
+ * enumeration.
+ *
+ * For example, if the PF populates the virtchnl_vlan_caps.offloads in the
+ * following manner the VF will be allowed to enable 0x8100 and 0x88a8 outer
+ * VLAN insertion and stripping simultaneously. The
+ * virtchnl_vlan_caps.offloads.ethertype_match field will also have to be
+ * populated based on what the PF can support.
+ *
+ * virtchnl_vlan_caps.offloads.stripping_support.outer =
+ * VIRTCHNL_VLAN_TOGGLE |
+ * VIRTCHNL_VLAN_ETHERTYPE_8100 |
+ * VIRTCHNL_VLAN_ETHERTYPE_88A8 |
+ * VIRTCHNL_VLAN_ETHERTYPE_AND;
+ *
+ * virtchnl_vlan_caps.offloads.insertion_support.outer =
+ * VIRTCHNL_VLAN_TOGGLE |
+ * VIRTCHNL_VLAN_ETHERTYPE_8100 |
+ * VIRTCHNL_VLAN_ETHERTYPE_88A8 |
+ * VIRTCHNL_VLAN_ETHERTYPE_AND;
+ *
+ * In order to enable outer VLAN stripping for 0x8100 and 0x88a8 VLANs, the VF
+ * would populate the virthcnl_vlan_offload_structure in the following manner
+ * and send the VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2 message.
+ *
+ * virtchnl_vlan_setting.outer_ethertype_setting =
+ * VIRTHCNL_VLAN_ETHERTYPE_8100 |
+ * VIRTHCNL_VLAN_ETHERTYPE_88A8;
+ *
+ * virtchnl_vlan_setting.vport_id = vport_id or vsi_id assigned to the VF on
+ * initialization.
+ *
+ * There is also the case where a PF and the underlying hardware can support
+ * VLAN offloads on multiple ethertypes, but not concurrently. For example, if
+ * the PF populates the virtchnl_vlan_caps.offloads in the following manner the
+ * VF will be allowed to enable and/or disable 0x8100 XOR 0x88a8 outer VLAN
+ * offloads. The ethertypes must match for stripping and insertion.
+ *
+ * virtchnl_vlan_caps.offloads.stripping_support.outer =
+ * VIRTCHNL_VLAN_TOGGLE |
+ * VIRTCHNL_VLAN_ETHERTYPE_8100 |
+ * VIRTCHNL_VLAN_ETHERTYPE_88A8 |
+ * VIRTCHNL_VLAN_ETHERTYPE_XOR;
+ *
+ * virtchnl_vlan_caps.offloads.insertion_support.outer =
+ * VIRTCHNL_VLAN_TOGGLE |
+ * VIRTCHNL_VLAN_ETHERTYPE_8100 |
+ * VIRTCHNL_VLAN_ETHERTYPE_88A8 |
+ * VIRTCHNL_VLAN_ETHERTYPE_XOR;
+ *
+ * virtchnl_vlan_caps.offloads.ethertype_match =
+ * VIRTCHNL_ETHERTYPE_STRIPPING_MATCHES_INSERTION;
+ *
+ * In order to enable outer VLAN stripping for 0x88a8 VLANs, the VF would
+ * populate the virtchnl_vlan_setting structure in the following manner and send
+ * the VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2. Also, this will change the
+ * ethertype for VLAN insertion if it's enabled. So, for completeness, a
+ * VIRTCHNL_OP_ENABLE_VLAN_INSERTION_V2 with the same ethertype should be sent.
+ *
+ * virtchnl_vlan_setting.outer_ethertype_setting = VIRTHCNL_VLAN_ETHERTYPE_88A8;
+ *
+ * virtchnl_vlan_setting.vport_id = vport_id or vsi_id assigned to the VF on
+ * initialization.
+ */
+struct virtchnl_vlan_setting {
+ u32 outer_ethertype_setting;
+ u32 inner_ethertype_setting;
+ u16 vport_id;
+ u8 pad[6];
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_vlan_setting);
+
/* VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE
* VF sends VSI id and flags.
* PF returns status code in retval.
@@ -459,7 +874,7 @@ VIRTCHNL_CHECK_STRUCT_LEN(6, virtchnl_rss_key);
struct virtchnl_rss_lut {
u16 vsi_id;
u16 lut_entries;
- u8 lut[1]; /* RSS lookup table*/
+ u8 lut[1]; /* RSS lookup table */
};
VIRTCHNL_CHECK_STRUCT_LEN(6, virtchnl_rss_lut);
@@ -477,6 +892,95 @@ struct virtchnl_rss_hena {
VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_rss_hena);
+/* VIRTCHNL_OP_ENABLE_CHANNELS
+ * VIRTCHNL_OP_DISABLE_CHANNELS
+ * VF sends these messages to enable or disable channels based on
+ * the user specified queue count and queue offset for each traffic class.
+ * This struct encompasses all the information that the PF needs from
+ * VF to create a channel.
+ */
+struct virtchnl_channel_info {
+ u16 count; /* number of queues in a channel */
+ u16 offset; /* queues in a channel start from 'offset' */
+ u32 pad;
+ u64 max_tx_rate;
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_channel_info);
+
+struct virtchnl_tc_info {
+ u32 num_tc;
+ u32 pad;
+ struct virtchnl_channel_info list[1];
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(24, virtchnl_tc_info);
+
+/* VIRTCHNL_ADD_CLOUD_FILTER
+ * VIRTCHNL_DEL_CLOUD_FILTER
+ * VF sends these messages to add or delete a cloud filter based on the
+ * user specified match and action filters. These structures encompass
+ * all the information that the PF needs from the VF to add/delete a
+ * cloud filter.
+ */
+
+struct virtchnl_l4_spec {
+ u8 src_mac[ETH_ALEN];
+ u8 dst_mac[ETH_ALEN];
+ __be16 vlan_id;
+ __be16 pad; /* reserved for future use */
+ __be32 src_ip[4];
+ __be32 dst_ip[4];
+ __be16 src_port;
+ __be16 dst_port;
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(52, virtchnl_l4_spec);
+
+union virtchnl_flow_spec {
+ struct virtchnl_l4_spec tcp_spec;
+ u8 buffer[128]; /* reserved for future use */
+};
+
+VIRTCHNL_CHECK_UNION_LEN(128, virtchnl_flow_spec);
+
+enum virtchnl_action {
+ /* action types */
+ VIRTCHNL_ACTION_DROP = 0,
+ VIRTCHNL_ACTION_TC_REDIRECT,
+ VIRTCHNL_ACTION_PASSTHRU,
+ VIRTCHNL_ACTION_QUEUE,
+ VIRTCHNL_ACTION_Q_REGION,
+ VIRTCHNL_ACTION_MARK,
+ VIRTCHNL_ACTION_COUNT,
+};
+
+enum virtchnl_flow_type {
+ /* flow types */
+ VIRTCHNL_TCP_V4_FLOW = 0,
+ VIRTCHNL_TCP_V6_FLOW,
+};
+
+struct virtchnl_filter {
+ union virtchnl_flow_spec data;
+ union virtchnl_flow_spec mask;
+
+ /* see enum virtchnl_flow_type */
+ s32 flow_type;
+
+ /* see enum virtchnl_action */
+ s32 action;
+ u32 action_meta;
+ u8 field_flags;
+ u8 pad[3];
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(272, virtchnl_filter);
+
+struct virtchnl_supported_rxdids {
+ u64 supported_rxdids;
+};
+
/* VIRTCHNL_OP_EVENT
* PF sends this message to inform the VF driver of events that may affect it.
* No direct response is expected from the VF, though it may generate other
@@ -493,46 +997,65 @@ enum virtchnl_event_codes {
#define PF_EVENT_SEVERITY_CERTAIN_DOOM 255
struct virtchnl_pf_event {
- enum virtchnl_event_codes event;
+ /* see enum virtchnl_event_codes */
+ s32 event;
union {
+ /* If the PF driver does not support the new speed reporting
+ * capabilities then use link_event else use link_event_adv to
+ * get the speed and link information. The ability to understand
+ * new speeds is indicated by setting the capability flag
+ * VIRTCHNL_VF_CAP_ADV_LINK_SPEED in vf_cap_flags parameter
+ * in virtchnl_vf_resource struct and can be used to determine
+ * which link event struct to use below.
+ */
struct {
enum virtchnl_link_speed link_speed;
bool link_status;
+ u8 pad[3];
} link_event;
+ struct {
+ /* link_speed provided in Mbps */
+ u32 link_speed;
+ u8 link_status;
+ u8 pad[3];
+ } link_event_adv;
} event_data;
- int severity;
+ s32 severity;
};
VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_pf_event);
-/* VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP
- * VF uses this message to request PF to map IWARP vectors to IWARP queues.
- * The request for this originates from the VF IWARP driver through
- * a client interface between VF LAN and VF IWARP driver.
+/* used to specify if a ceq_idx or aeq_idx is invalid */
+#define VIRTCHNL_RDMA_INVALID_QUEUE_IDX 0xFFFF
+/* VIRTCHNL_OP_CONFIG_RDMA_IRQ_MAP
+ * VF uses this message to request PF to map RDMA vectors to RDMA queues.
+ * The request for this originates from the VF RDMA driver through
+ * a client interface between VF LAN and VF RDMA driver.
* A vector could have an AEQ and CEQ attached to it although
- * there is a single AEQ per VF IWARP instance in which case
- * most vectors will have an INVALID_IDX for aeq and valid idx for ceq.
- * There will never be a case where there will be multiple CEQs attached
- * to a single vector.
+ * there is a single AEQ per VF RDMA instance in which case
+ * most vectors will have an VIRTCHNL_RDMA_INVALID_QUEUE_IDX for aeq and valid
+ * idx for ceqs There will never be a case where there will be multiple CEQs
+ * attached to a single vector.
* PF configures interrupt mapping and returns status.
*/
-struct virtchnl_iwarp_qv_info {
+struct virtchnl_rdma_qv_info {
u32 v_idx; /* msix_vector */
- u16 ceq_idx;
- u16 aeq_idx;
+ u16 ceq_idx; /* set to VIRTCHNL_RDMA_INVALID_QUEUE_IDX if invalid */
+ u16 aeq_idx; /* set to VIRTCHNL_RDMA_INVALID_QUEUE_IDX if invalid */
u8 itr_idx;
+ u8 pad[3];
};
-VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_iwarp_qv_info);
+VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_rdma_qv_info);
-struct virtchnl_iwarp_qvlist_info {
+struct virtchnl_rdma_qvlist_info {
u32 num_vectors;
- struct virtchnl_iwarp_qv_info qv_info[1];
+ struct virtchnl_rdma_qv_info qv_info[1];
};
-VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_iwarp_qvlist_info);
+VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_rdma_qvlist_info);
/* VF reset states - these are written into the RSTAT register:
* VFGEN_RSTAT on the VF
@@ -551,6 +1074,299 @@ enum virtchnl_vfr_states {
VIRTCHNL_VFR_VFACTIVE,
};
+/* Type of RSS algorithm */
+enum virtchnl_rss_algorithm {
+ VIRTCHNL_RSS_ALG_TOEPLITZ_ASYMMETRIC = 0,
+ VIRTCHNL_RSS_ALG_R_ASYMMETRIC = 1,
+ VIRTCHNL_RSS_ALG_TOEPLITZ_SYMMETRIC = 2,
+ VIRTCHNL_RSS_ALG_XOR_SYMMETRIC = 3,
+};
+
+#define VIRTCHNL_MAX_NUM_PROTO_HDRS 32
+#define PROTO_HDR_SHIFT 5
+#define PROTO_HDR_FIELD_START(proto_hdr_type) ((proto_hdr_type) << PROTO_HDR_SHIFT)
+#define PROTO_HDR_FIELD_MASK ((1UL << PROTO_HDR_SHIFT) - 1)
+
+/* VF use these macros to configure each protocol header.
+ * Specify which protocol headers and protocol header fields base on
+ * virtchnl_proto_hdr_type and virtchnl_proto_hdr_field.
+ * @param hdr: a struct of virtchnl_proto_hdr
+ * @param hdr_type: ETH/IPV4/TCP, etc
+ * @param field: SRC/DST/TEID/SPI, etc
+ */
+#define VIRTCHNL_ADD_PROTO_HDR_FIELD(hdr, field) \
+ ((hdr)->field_selector |= BIT((field) & PROTO_HDR_FIELD_MASK))
+#define VIRTCHNL_DEL_PROTO_HDR_FIELD(hdr, field) \
+ ((hdr)->field_selector &= ~BIT((field) & PROTO_HDR_FIELD_MASK))
+#define VIRTCHNL_TEST_PROTO_HDR_FIELD(hdr, val) \
+ ((hdr)->field_selector & BIT((val) & PROTO_HDR_FIELD_MASK))
+#define VIRTCHNL_GET_PROTO_HDR_FIELD(hdr) ((hdr)->field_selector)
+
+#define VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, hdr_type, field) \
+ (VIRTCHNL_ADD_PROTO_HDR_FIELD(hdr, \
+ VIRTCHNL_PROTO_HDR_ ## hdr_type ## _ ## field))
+#define VIRTCHNL_DEL_PROTO_HDR_FIELD_BIT(hdr, hdr_type, field) \
+ (VIRTCHNL_DEL_PROTO_HDR_FIELD(hdr, \
+ VIRTCHNL_PROTO_HDR_ ## hdr_type ## _ ## field))
+
+#define VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, hdr_type) \
+ ((hdr)->type = VIRTCHNL_PROTO_HDR_ ## hdr_type)
+#define VIRTCHNL_GET_PROTO_HDR_TYPE(hdr) \
+ (((hdr)->type) >> PROTO_HDR_SHIFT)
+#define VIRTCHNL_TEST_PROTO_HDR_TYPE(hdr, val) \
+ ((hdr)->type == ((s32)((val) >> PROTO_HDR_SHIFT)))
+#define VIRTCHNL_TEST_PROTO_HDR(hdr, val) \
+ (VIRTCHNL_TEST_PROTO_HDR_TYPE((hdr), (val)) && \
+ VIRTCHNL_TEST_PROTO_HDR_FIELD((hdr), (val)))
+
+/* Protocol header type within a packet segment. A segment consists of one or
+ * more protocol headers that make up a logical group of protocol headers. Each
+ * logical group of protocol headers encapsulates or is encapsulated using/by
+ * tunneling or encapsulation protocols for network virtualization.
+ */
+enum virtchnl_proto_hdr_type {
+ VIRTCHNL_PROTO_HDR_NONE,
+ VIRTCHNL_PROTO_HDR_ETH,
+ VIRTCHNL_PROTO_HDR_S_VLAN,
+ VIRTCHNL_PROTO_HDR_C_VLAN,
+ VIRTCHNL_PROTO_HDR_IPV4,
+ VIRTCHNL_PROTO_HDR_IPV6,
+ VIRTCHNL_PROTO_HDR_TCP,
+ VIRTCHNL_PROTO_HDR_UDP,
+ VIRTCHNL_PROTO_HDR_SCTP,
+ VIRTCHNL_PROTO_HDR_GTPU_IP,
+ VIRTCHNL_PROTO_HDR_GTPU_EH,
+ VIRTCHNL_PROTO_HDR_GTPU_EH_PDU_DWN,
+ VIRTCHNL_PROTO_HDR_GTPU_EH_PDU_UP,
+ VIRTCHNL_PROTO_HDR_PPPOE,
+ VIRTCHNL_PROTO_HDR_L2TPV3,
+ VIRTCHNL_PROTO_HDR_ESP,
+ VIRTCHNL_PROTO_HDR_AH,
+ VIRTCHNL_PROTO_HDR_PFCP,
+};
+
+/* Protocol header field within a protocol header. */
+enum virtchnl_proto_hdr_field {
+ /* ETHER */
+ VIRTCHNL_PROTO_HDR_ETH_SRC =
+ PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_ETH),
+ VIRTCHNL_PROTO_HDR_ETH_DST,
+ VIRTCHNL_PROTO_HDR_ETH_ETHERTYPE,
+ /* S-VLAN */
+ VIRTCHNL_PROTO_HDR_S_VLAN_ID =
+ PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_S_VLAN),
+ /* C-VLAN */
+ VIRTCHNL_PROTO_HDR_C_VLAN_ID =
+ PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_C_VLAN),
+ /* IPV4 */
+ VIRTCHNL_PROTO_HDR_IPV4_SRC =
+ PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_IPV4),
+ VIRTCHNL_PROTO_HDR_IPV4_DST,
+ VIRTCHNL_PROTO_HDR_IPV4_DSCP,
+ VIRTCHNL_PROTO_HDR_IPV4_TTL,
+ VIRTCHNL_PROTO_HDR_IPV4_PROT,
+ /* IPV6 */
+ VIRTCHNL_PROTO_HDR_IPV6_SRC =
+ PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_IPV6),
+ VIRTCHNL_PROTO_HDR_IPV6_DST,
+ VIRTCHNL_PROTO_HDR_IPV6_TC,
+ VIRTCHNL_PROTO_HDR_IPV6_HOP_LIMIT,
+ VIRTCHNL_PROTO_HDR_IPV6_PROT,
+ /* TCP */
+ VIRTCHNL_PROTO_HDR_TCP_SRC_PORT =
+ PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_TCP),
+ VIRTCHNL_PROTO_HDR_TCP_DST_PORT,
+ /* UDP */
+ VIRTCHNL_PROTO_HDR_UDP_SRC_PORT =
+ PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_UDP),
+ VIRTCHNL_PROTO_HDR_UDP_DST_PORT,
+ /* SCTP */
+ VIRTCHNL_PROTO_HDR_SCTP_SRC_PORT =
+ PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_SCTP),
+ VIRTCHNL_PROTO_HDR_SCTP_DST_PORT,
+ /* GTPU_IP */
+ VIRTCHNL_PROTO_HDR_GTPU_IP_TEID =
+ PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_GTPU_IP),
+ /* GTPU_EH */
+ VIRTCHNL_PROTO_HDR_GTPU_EH_PDU =
+ PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_GTPU_EH),
+ VIRTCHNL_PROTO_HDR_GTPU_EH_QFI,
+ /* PPPOE */
+ VIRTCHNL_PROTO_HDR_PPPOE_SESS_ID =
+ PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_PPPOE),
+ /* L2TPV3 */
+ VIRTCHNL_PROTO_HDR_L2TPV3_SESS_ID =
+ PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_L2TPV3),
+ /* ESP */
+ VIRTCHNL_PROTO_HDR_ESP_SPI =
+ PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_ESP),
+ /* AH */
+ VIRTCHNL_PROTO_HDR_AH_SPI =
+ PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_AH),
+ /* PFCP */
+ VIRTCHNL_PROTO_HDR_PFCP_S_FIELD =
+ PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_PFCP),
+ VIRTCHNL_PROTO_HDR_PFCP_SEID,
+};
+
+struct virtchnl_proto_hdr {
+ /* see enum virtchnl_proto_hdr_type */
+ s32 type;
+ u32 field_selector; /* a bit mask to select field for header type */
+ u8 buffer[64];
+ /**
+ * binary buffer in network order for specific header type.
+ * For example, if type = VIRTCHNL_PROTO_HDR_IPV4, a IPv4
+ * header is expected to be copied into the buffer.
+ */
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(72, virtchnl_proto_hdr);
+
+struct virtchnl_proto_hdrs {
+ u8 tunnel_level;
+ u8 pad[3];
+ /**
+ * specify where protocol header start from.
+ * 0 - from the outer layer
+ * 1 - from the first inner layer
+ * 2 - from the second inner layer
+ * ....
+ **/
+ int count; /* the proto layers must < VIRTCHNL_MAX_NUM_PROTO_HDRS */
+ struct virtchnl_proto_hdr proto_hdr[VIRTCHNL_MAX_NUM_PROTO_HDRS];
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(2312, virtchnl_proto_hdrs);
+
+struct virtchnl_rss_cfg {
+ struct virtchnl_proto_hdrs proto_hdrs; /* protocol headers */
+
+ /* see enum virtchnl_rss_algorithm; rss algorithm type */
+ s32 rss_algorithm;
+ u8 reserved[128]; /* reserve for future */
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(2444, virtchnl_rss_cfg);
+
+/* action configuration for FDIR */
+struct virtchnl_filter_action {
+ /* see enum virtchnl_action type */
+ s32 type;
+ union {
+ /* used for queue and qgroup action */
+ struct {
+ u16 index;
+ u8 region;
+ } queue;
+ /* used for count action */
+ struct {
+ /* share counter ID with other flow rules */
+ u8 shared;
+ u32 id; /* counter ID */
+ } count;
+ /* used for mark action */
+ u32 mark_id;
+ u8 reserve[32];
+ } act_conf;
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(36, virtchnl_filter_action);
+
+#define VIRTCHNL_MAX_NUM_ACTIONS 8
+
+struct virtchnl_filter_action_set {
+ /* action number must be less then VIRTCHNL_MAX_NUM_ACTIONS */
+ int count;
+ struct virtchnl_filter_action actions[VIRTCHNL_MAX_NUM_ACTIONS];
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(292, virtchnl_filter_action_set);
+
+/* pattern and action for FDIR rule */
+struct virtchnl_fdir_rule {
+ struct virtchnl_proto_hdrs proto_hdrs;
+ struct virtchnl_filter_action_set action_set;
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(2604, virtchnl_fdir_rule);
+
+/* Status returned to VF after VF requests FDIR commands
+ * VIRTCHNL_FDIR_SUCCESS
+ * VF FDIR related request is successfully done by PF
+ * The request can be OP_ADD/DEL/QUERY_FDIR_FILTER.
+ *
+ * VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE
+ * OP_ADD_FDIR_FILTER request is failed due to no Hardware resource.
+ *
+ * VIRTCHNL_FDIR_FAILURE_RULE_EXIST
+ * OP_ADD_FDIR_FILTER request is failed due to the rule is already existed.
+ *
+ * VIRTCHNL_FDIR_FAILURE_RULE_CONFLICT
+ * OP_ADD_FDIR_FILTER request is failed due to conflict with existing rule.
+ *
+ * VIRTCHNL_FDIR_FAILURE_RULE_NONEXIST
+ * OP_DEL_FDIR_FILTER request is failed due to this rule doesn't exist.
+ *
+ * VIRTCHNL_FDIR_FAILURE_RULE_INVALID
+ * OP_ADD_FDIR_FILTER request is failed due to parameters validation
+ * or HW doesn't support.
+ *
+ * VIRTCHNL_FDIR_FAILURE_RULE_TIMEOUT
+ * OP_ADD/DEL_FDIR_FILTER request is failed due to timing out
+ * for programming.
+ *
+ * VIRTCHNL_FDIR_FAILURE_QUERY_INVALID
+ * OP_QUERY_FDIR_FILTER request is failed due to parameters validation,
+ * for example, VF query counter of a rule who has no counter action.
+ */
+enum virtchnl_fdir_prgm_status {
+ VIRTCHNL_FDIR_SUCCESS = 0,
+ VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE,
+ VIRTCHNL_FDIR_FAILURE_RULE_EXIST,
+ VIRTCHNL_FDIR_FAILURE_RULE_CONFLICT,
+ VIRTCHNL_FDIR_FAILURE_RULE_NONEXIST,
+ VIRTCHNL_FDIR_FAILURE_RULE_INVALID,
+ VIRTCHNL_FDIR_FAILURE_RULE_TIMEOUT,
+ VIRTCHNL_FDIR_FAILURE_QUERY_INVALID,
+};
+
+/* VIRTCHNL_OP_ADD_FDIR_FILTER
+ * VF sends this request to PF by filling out vsi_id,
+ * validate_only and rule_cfg. PF will return flow_id
+ * if the request is successfully done and return add_status to VF.
+ */
+struct virtchnl_fdir_add {
+ u16 vsi_id; /* INPUT */
+ /*
+ * 1 for validating a fdir rule, 0 for creating a fdir rule.
+ * Validate and create share one ops: VIRTCHNL_OP_ADD_FDIR_FILTER.
+ */
+ u16 validate_only; /* INPUT */
+ u32 flow_id; /* OUTPUT */
+ struct virtchnl_fdir_rule rule_cfg; /* INPUT */
+
+ /* see enum virtchnl_fdir_prgm_status; OUTPUT */
+ s32 status;
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(2616, virtchnl_fdir_add);
+
+/* VIRTCHNL_OP_DEL_FDIR_FILTER
+ * VF sends this request to PF by filling out vsi_id
+ * and flow_id. PF will return del_status to VF.
+ */
+struct virtchnl_fdir_del {
+ u16 vsi_id; /* INPUT */
+ u16 pad;
+ u32 flow_id; /* INPUT */
+
+ /* see enum virtchnl_fdir_prgm_status; OUTPUT */
+ s32 status;
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_fdir_del);
+
/**
* virtchnl_vc_validate_vf_msg
* @ver: Virtchnl version info
@@ -565,7 +1381,7 @@ virtchnl_vc_validate_vf_msg(struct virtchnl_version_info *ver, u32 v_opcode,
u8 *msg, u16 msglen)
{
bool err_msg_format = false;
- int valid_len = 0;
+ u32 valid_len = 0;
/* Validate message length. */
switch (v_opcode) {
@@ -640,7 +1456,7 @@ virtchnl_vc_validate_vf_msg(struct virtchnl_version_info *ver, u32 v_opcode,
case VIRTCHNL_OP_GET_STATS:
valid_len = sizeof(struct virtchnl_queue_select);
break;
- case VIRTCHNL_OP_IWARP:
+ case VIRTCHNL_OP_RDMA:
/* These messages are opaque to us and will be validated in
* the RDMA client code. We just need to check for nonzero
* length. The firmware will enforce max length restrictions.
@@ -650,19 +1466,16 @@ virtchnl_vc_validate_vf_msg(struct virtchnl_version_info *ver, u32 v_opcode,
else
err_msg_format = true;
break;
- case VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP:
+ case VIRTCHNL_OP_RELEASE_RDMA_IRQ_MAP:
break;
- case VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP:
- valid_len = sizeof(struct virtchnl_iwarp_qvlist_info);
+ case VIRTCHNL_OP_CONFIG_RDMA_IRQ_MAP:
+ valid_len = sizeof(struct virtchnl_rdma_qvlist_info);
if (msglen >= valid_len) {
- struct virtchnl_iwarp_qvlist_info *qv =
- (struct virtchnl_iwarp_qvlist_info *)msg;
- if (qv->num_vectors == 0) {
- err_msg_format = true;
- break;
- }
+ struct virtchnl_rdma_qvlist_info *qv =
+ (struct virtchnl_rdma_qvlist_info *)msg;
+
valid_len += ((qv->num_vectors - 1) *
- sizeof(struct virtchnl_iwarp_qv_info));
+ sizeof(struct virtchnl_rdma_qv_info));
}
break;
case VIRTCHNL_OP_CONFIG_RSS_KEY:
@@ -686,14 +1499,73 @@ virtchnl_vc_validate_vf_msg(struct virtchnl_version_info *ver, u32 v_opcode,
case VIRTCHNL_OP_SET_RSS_HENA:
valid_len = sizeof(struct virtchnl_rss_hena);
break;
+ case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING:
+ case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING:
+ break;
+ case VIRTCHNL_OP_REQUEST_QUEUES:
+ valid_len = sizeof(struct virtchnl_vf_res_request);
+ break;
+ case VIRTCHNL_OP_ENABLE_CHANNELS:
+ valid_len = sizeof(struct virtchnl_tc_info);
+ if (msglen >= valid_len) {
+ struct virtchnl_tc_info *vti =
+ (struct virtchnl_tc_info *)msg;
+ valid_len += (vti->num_tc - 1) *
+ sizeof(struct virtchnl_channel_info);
+ if (vti->num_tc == 0)
+ err_msg_format = true;
+ }
+ break;
+ case VIRTCHNL_OP_DISABLE_CHANNELS:
+ break;
+ case VIRTCHNL_OP_ADD_CLOUD_FILTER:
+ case VIRTCHNL_OP_DEL_CLOUD_FILTER:
+ valid_len = sizeof(struct virtchnl_filter);
+ break;
+ case VIRTCHNL_OP_GET_SUPPORTED_RXDIDS:
+ break;
+ case VIRTCHNL_OP_ADD_RSS_CFG:
+ case VIRTCHNL_OP_DEL_RSS_CFG:
+ valid_len = sizeof(struct virtchnl_rss_cfg);
+ break;
+ case VIRTCHNL_OP_ADD_FDIR_FILTER:
+ valid_len = sizeof(struct virtchnl_fdir_add);
+ break;
+ case VIRTCHNL_OP_DEL_FDIR_FILTER:
+ valid_len = sizeof(struct virtchnl_fdir_del);
+ break;
+ case VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS:
+ break;
+ case VIRTCHNL_OP_ADD_VLAN_V2:
+ case VIRTCHNL_OP_DEL_VLAN_V2:
+ valid_len = sizeof(struct virtchnl_vlan_filter_list_v2);
+ if (msglen >= valid_len) {
+ struct virtchnl_vlan_filter_list_v2 *vfl =
+ (struct virtchnl_vlan_filter_list_v2 *)msg;
+
+ valid_len += (vfl->num_elements - 1) *
+ sizeof(struct virtchnl_vlan_filter);
+
+ if (vfl->num_elements == 0) {
+ err_msg_format = true;
+ break;
+ }
+ }
+ break;
+ case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2:
+ case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING_V2:
+ case VIRTCHNL_OP_ENABLE_VLAN_INSERTION_V2:
+ case VIRTCHNL_OP_DISABLE_VLAN_INSERTION_V2:
+ valid_len = sizeof(struct virtchnl_vlan_setting);
+ break;
/* These are always errors coming from the VF. */
case VIRTCHNL_OP_EVENT:
case VIRTCHNL_OP_UNKNOWN:
default:
- return VIRTCHNL_ERR_PARAM;
+ return VIRTCHNL_STATUS_ERR_PARAM;
}
/* few more checks */
- if ((valid_len != msglen) || (err_msg_format))
+ if (err_msg_format || valid_len != msglen)
return VIRTCHNL_STATUS_ERR_OPCODE_MISMATCH;
return 0;
diff --git a/include/linux/b1pcmcia.h b/include/linux/b1pcmcia.h
deleted file mode 100644
index 12a867c6061e..000000000000
--- a/include/linux/b1pcmcia.h
+++ /dev/null
@@ -1,21 +0,0 @@
-/* $Id: b1pcmcia.h,v 1.1.8.2 2001/09/23 22:25:05 kai Exp $
- *
- * Exported functions of module b1pcmcia to be called by
- * avm_cs card services module.
- *
- * Copyright 1999 by Carsten Paeth (calle@calle.in-berlin.de)
- *
- * This software may be used and distributed according to the terms
- * of the GNU General Public License, incorporated herein by reference.
- *
- */
-
-#ifndef _B1PCMCIA_H_
-#define _B1PCMCIA_H_
-
-int b1pcmcia_addcard_b1(unsigned int port, unsigned irq);
-int b1pcmcia_addcard_m1(unsigned int port, unsigned irq);
-int b1pcmcia_addcard_m2(unsigned int port, unsigned irq);
-int b1pcmcia_delcard(unsigned int port, unsigned irq);
-
-#endif /* _B1PCMCIA_H_ */
diff --git a/include/linux/backing-dev-defs.h b/include/linux/backing-dev-defs.h
index 866c433e7d32..ae12696ec492 100644
--- a/include/linux/backing-dev-defs.h
+++ b/include/linux/backing-dev-defs.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __LINUX_BACKING_DEV_DEFS_H
#define __LINUX_BACKING_DEV_DEFS_H
@@ -11,6 +12,7 @@
#include <linux/timer.h>
#include <linux/workqueue.h>
#include <linux/kref.h>
+#include <linux/refcount.h>
struct page;
struct device;
@@ -21,18 +23,11 @@ struct dentry;
*/
enum wb_state {
WB_registered, /* bdi_register() was done */
- WB_shutting_down, /* wb_shutdown() in progress */
WB_writeback_running, /* Writeback is in progress */
WB_has_dirty_io, /* Dirty inodes on ->b_{dirty|io|more_io} */
+ WB_start_all, /* nr_pages == 0 (all) work pending */
};
-enum wb_congested_state {
- WB_async_congested, /* The async (write) queue is getting full */
- WB_sync_congested, /* The sync queue is getting full */
-};
-
-typedef int (congested_fn)(void *, int);
-
enum wb_stat_item {
WB_RECLAIMABLE,
WB_WRITEBACK,
@@ -44,25 +39,47 @@ enum wb_stat_item {
#define WB_STAT_BATCH (8*(1+ilog2(nr_cpu_ids)))
/*
- * For cgroup writeback, multiple wb's may map to the same blkcg. Those
- * wb's can operate mostly independently but should share the congested
- * state. To facilitate such sharing, the congested state is tracked using
- * the following struct which is created on demand, indexed by blkcg ID on
- * its bdi, and refcounted.
+ * why some writeback work was initiated
*/
-struct bdi_writeback_congested {
- unsigned long state; /* WB_[a]sync_congested flags */
- atomic_t refcnt; /* nr of attached wb's and blkg */
+enum wb_reason {
+ WB_REASON_BACKGROUND,
+ WB_REASON_VMSCAN,
+ WB_REASON_SYNC,
+ WB_REASON_PERIODIC,
+ WB_REASON_LAPTOP_TIMER,
+ WB_REASON_FS_FREE_SPACE,
+ /*
+ * There is no bdi forker thread any more and works are done
+ * by emergency worker, however, this is TPs userland visible
+ * and we'll be exposing exactly the same information,
+ * so it has a mismatch name.
+ */
+ WB_REASON_FORKER_THREAD,
+ WB_REASON_FOREIGN_FLUSH,
-#ifdef CONFIG_CGROUP_WRITEBACK
- struct backing_dev_info *__bdi; /* the associated bdi, set to NULL
- * on bdi unregistration. For memcg-wb
- * internal use only! */
- int blkcg_id; /* ID of the associated blkcg */
- struct rb_node rb_node; /* on bdi->cgwb_congestion_tree */
-#endif
+ WB_REASON_MAX,
};
+struct wb_completion {
+ atomic_t cnt;
+ wait_queue_head_t *waitq;
+};
+
+#define __WB_COMPLETION_INIT(_waitq) \
+ (struct wb_completion){ .cnt = ATOMIC_INIT(1), .waitq = (_waitq) }
+
+/*
+ * If one wants to wait for one or more wb_writeback_works, each work's
+ * ->done should be set to a wb_completion defined using the following
+ * macro. Once all work items are issued with wb_queue_work(), the caller
+ * can wait for the completion of all using wb_wait_for_completion(). Work
+ * items which are waited upon aren't freed automatically on completion.
+ */
+#define WB_COMPLETION_INIT(bdi) __WB_COMPLETION_INIT(&(bdi)->wb_waitq)
+
+#define DEFINE_WB_COMPLETION(cmpl, bdi) \
+ struct wb_completion cmpl = WB_COMPLETION_INIT(bdi)
+
/*
* Each wb (bdi_writeback) can perform writeback operations, is measured
* and throttled, independently. Without cgroup writeback, each bdi
@@ -81,6 +98,9 @@ struct bdi_writeback_congested {
* change as blkcg is disabled and enabled higher up in the hierarchy, a wb
* is tested for blkcg after lookup and removed from index on mismatch so
* that a new wb for the combination can be created.
+ *
+ * Each bdi_writeback that is not embedded into the backing_dev_info must hold
+ * a reference to the parent backing_dev_info. See cgwb_create() for details.
*/
struct bdi_writeback {
struct backing_dev_info *bdi; /* our parent bdi */
@@ -94,10 +114,9 @@ struct bdi_writeback {
struct list_head b_dirty_time; /* time stamps are dirty */
spinlock_t list_lock; /* protects the b_* lists */
+ atomic_t writeback_inodes; /* number of inodes under writeback */
struct percpu_counter stat[NR_WB_STAT_ITEMS];
- struct bdi_writeback_congested *congested;
-
unsigned long bw_time_stamp; /* last time write bw is updated */
unsigned long dirtied_stamp;
unsigned long written_stamp; /* pages written at bw_time_stamp */
@@ -115,10 +134,12 @@ struct bdi_writeback {
struct fprop_local_percpu completions;
int dirty_exceeded;
+ enum wb_reason start_all_reason;
spinlock_t work_lock; /* protects work_list & dwork scheduling */
struct list_head work_list;
struct delayed_work dwork; /* work item used for writeback */
+ struct delayed_work bw_dwork; /* work item used for bandwidth estimate */
unsigned long dirty_sleep; /* last wait */
@@ -131,6 +152,8 @@ struct bdi_writeback {
struct cgroup_subsys_state *blkcg_css; /* and blkcg */
struct list_head memcg_node; /* anchored at memcg->cgwb_list */
struct list_head blkcg_node; /* anchored at blkcg->cgwb_list */
+ struct list_head b_attached; /* attached inodes, protected by list_lock */
+ struct list_head offline_node; /* anchored at offline_cgwbs */
union {
struct work_struct release_work;
@@ -140,13 +163,11 @@ struct bdi_writeback {
};
struct backing_dev_info {
+ u64 id;
+ struct rb_node rb_node; /* keyed by ->id */
struct list_head bdi_list;
unsigned long ra_pages; /* max readahead in PAGE_SIZE units */
unsigned long io_pages; /* max allowed IO size */
- congested_fn *congested_fn; /* Function pointer if device is md/dm */
- void *congested_data; /* Pointer to aux data for congested func */
-
- const char *name;
struct kref refcnt; /* Reference counter for the structure */
unsigned int capabilities; /* Device capabilities */
@@ -163,41 +184,27 @@ struct backing_dev_info {
struct list_head wb_list; /* list of all wbs */
#ifdef CONFIG_CGROUP_WRITEBACK
struct radix_tree_root cgwb_tree; /* radix tree of active cgroup wbs */
- struct rb_root cgwb_congested_tree; /* their congested states */
-#else
- struct bdi_writeback_congested *wb_congested;
+ struct mutex cgwb_release_mutex; /* protect shutdown of wb structs */
+ struct rw_semaphore wb_switch_rwsem; /* no cgwb switch while syncing */
#endif
wait_queue_head_t wb_waitq;
struct device *dev;
+ char dev_name[64];
struct device *owner;
struct timer_list laptop_mode_wb_timer;
#ifdef CONFIG_DEBUG_FS
struct dentry *debug_dir;
- struct dentry *debug_stats;
#endif
};
-enum {
- BLK_RW_ASYNC = 0,
- BLK_RW_SYNC = 1,
+struct wb_lock_cookie {
+ bool locked;
+ unsigned long flags;
};
-void clear_wb_congested(struct bdi_writeback_congested *congested, int sync);
-void set_wb_congested(struct bdi_writeback_congested *congested, int sync);
-
-static inline void clear_bdi_congested(struct backing_dev_info *bdi, int sync)
-{
- clear_wb_congested(bdi->wb.congested, sync);
-}
-
-static inline void set_bdi_congested(struct backing_dev_info *bdi, int sync)
-{
- set_wb_congested(bdi->wb.congested, sync);
-}
-
#ifdef CONFIG_CGROUP_WRITEBACK
/**
@@ -224,11 +231,29 @@ static inline void wb_get(struct bdi_writeback *wb)
/**
* wb_put - decrement a wb's refcount
* @wb: bdi_writeback to put
+ * @nr: number of references to put
*/
-static inline void wb_put(struct bdi_writeback *wb)
+static inline void wb_put_many(struct bdi_writeback *wb, unsigned long nr)
{
+ if (WARN_ON_ONCE(!wb->bdi)) {
+ /*
+ * A driver bug might cause a file to be removed before bdi was
+ * initialized.
+ */
+ return;
+ }
+
if (wb != &wb->bdi->wb)
- percpu_ref_put(&wb->refcnt);
+ percpu_ref_put_many(&wb->refcnt, nr);
+}
+
+/**
+ * wb_put - decrement a wb's refcount
+ * @wb: bdi_writeback to put
+ */
+static inline void wb_put(struct bdi_writeback *wb)
+{
+ wb_put_many(wb, 1);
}
/**
@@ -257,6 +282,10 @@ static inline void wb_put(struct bdi_writeback *wb)
{
}
+static inline void wb_put_many(struct bdi_writeback *wb, unsigned long nr)
+{
+}
+
static inline bool wb_dying(struct bdi_writeback *wb)
{
return false;
diff --git a/include/linux/backing-dev.h b/include/linux/backing-dev.h
index 854e1bdd0b2a..fbad4fcd408e 100644
--- a/include/linux/backing-dev.h
+++ b/include/linux/backing-dev.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* include/linux/backing-dev.h
*
@@ -11,9 +12,8 @@
#include <linux/kernel.h>
#include <linux/fs.h>
#include <linux/sched.h>
-#include <linux/blkdev.h>
+#include <linux/device.h>
#include <linux/writeback.h>
-#include <linux/blk-cgroup.h>
#include <linux/backing-dev-defs.h>
#include <linux/slab.h>
@@ -23,31 +23,30 @@ static inline struct backing_dev_info *bdi_get(struct backing_dev_info *bdi)
return bdi;
}
+struct backing_dev_info *bdi_get_by_id(u64 id);
void bdi_put(struct backing_dev_info *bdi);
__printf(2, 3)
int bdi_register(struct backing_dev_info *bdi, const char *fmt, ...);
+__printf(2, 0)
int bdi_register_va(struct backing_dev_info *bdi, const char *fmt,
va_list args);
-int bdi_register_owner(struct backing_dev_info *bdi, struct device *owner);
+void bdi_set_owner(struct backing_dev_info *bdi, struct device *owner);
void bdi_unregister(struct backing_dev_info *bdi);
-struct backing_dev_info *bdi_alloc_node(gfp_t gfp_mask, int node_id);
-static inline struct backing_dev_info *bdi_alloc(gfp_t gfp_mask)
-{
- return bdi_alloc_node(gfp_mask, NUMA_NO_NODE);
-}
+struct backing_dev_info *bdi_alloc(int node_id);
-void wb_start_writeback(struct bdi_writeback *wb, long nr_pages,
- bool range_cyclic, enum wb_reason reason);
void wb_start_background_writeback(struct bdi_writeback *wb);
void wb_workfn(struct work_struct *work);
void wb_wakeup_delayed(struct bdi_writeback *wb);
+void wb_wait_for_completion(struct wb_completion *done);
+
extern spinlock_t bdi_lock;
extern struct list_head bdi_list;
extern struct workqueue_struct *bdi_wq;
+extern struct workqueue_struct *bdi_async_bio_wq;
static inline bool wb_has_dirty_io(struct bdi_writeback *wb)
{
@@ -63,7 +62,7 @@ static inline bool bdi_has_dirty_io(struct backing_dev_info *bdi)
return atomic_long_read(&bdi->tot_write_bandwidth);
}
-static inline void __add_wb_stat(struct bdi_writeback *wb,
+static inline void wb_stat_mod(struct bdi_writeback *wb,
enum wb_stat_item item, s64 amount)
{
percpu_counter_add_batch(&wb->stat[item], amount, WB_STAT_BATCH);
@@ -71,12 +70,12 @@ static inline void __add_wb_stat(struct bdi_writeback *wb,
static inline void inc_wb_stat(struct bdi_writeback *wb, enum wb_stat_item item)
{
- __add_wb_stat(wb, item, 1);
+ wb_stat_mod(wb, item, 1);
}
static inline void dec_wb_stat(struct bdi_writeback *wb, enum wb_stat_item item)
{
- __add_wb_stat(wb, item, -1);
+ wb_stat_mod(wb, item, -1);
}
static inline s64 wb_stat(struct bdi_writeback *wb, enum wb_stat_item item)
@@ -94,7 +93,7 @@ extern void wb_writeout_inc(struct bdi_writeback *wb);
/*
* maximal error of a stat counter.
*/
-static inline unsigned long wb_stat_error(struct bdi_writeback *wb)
+static inline unsigned long wb_stat_error(void)
{
#ifdef CONFIG_SMP
return nr_cpu_ids * WB_STAT_BATCH;
@@ -103,39 +102,35 @@ static inline unsigned long wb_stat_error(struct bdi_writeback *wb)
#endif
}
+/* BDI ratio is expressed as part per 1000000 for finer granularity. */
+#define BDI_RATIO_SCALE 10000
+
+u64 bdi_get_min_bytes(struct backing_dev_info *bdi);
+u64 bdi_get_max_bytes(struct backing_dev_info *bdi);
int bdi_set_min_ratio(struct backing_dev_info *bdi, unsigned int min_ratio);
int bdi_set_max_ratio(struct backing_dev_info *bdi, unsigned int max_ratio);
+int bdi_set_min_ratio_no_scale(struct backing_dev_info *bdi, unsigned int min_ratio);
+int bdi_set_max_ratio_no_scale(struct backing_dev_info *bdi, unsigned int max_ratio);
+int bdi_set_min_bytes(struct backing_dev_info *bdi, u64 min_bytes);
+int bdi_set_max_bytes(struct backing_dev_info *bdi, u64 max_bytes);
+int bdi_set_strict_limit(struct backing_dev_info *bdi, unsigned int strict_limit);
/*
* Flags in backing_dev_info::capability
*
- * The first three flags control whether dirty pages will contribute to the
- * VM's accounting and whether writepages() should be called for dirty pages
- * (something that would not, for example, be appropriate for ramfs)
- *
- * WARNING: these flags are closely related and should not normally be
- * used separately. The BDI_CAP_NO_ACCT_AND_WRITEBACK combines these
- * three flags into a single convenience macro.
- *
- * BDI_CAP_NO_ACCT_DIRTY: Dirty pages shouldn't contribute to accounting
- * BDI_CAP_NO_WRITEBACK: Don't write pages back
- * BDI_CAP_NO_ACCT_WB: Don't automatically account writeback pages
- * BDI_CAP_STRICTLIMIT: Keep number of dirty pages below bdi threshold.
- *
- * BDI_CAP_CGROUP_WRITEBACK: Supports cgroup-aware writeback.
+ * BDI_CAP_WRITEBACK: Supports dirty page writeback, and dirty pages
+ * should contribute to accounting
+ * BDI_CAP_WRITEBACK_ACCT: Automatically account writeback pages
+ * BDI_CAP_STRICTLIMIT: Keep number of dirty pages below bdi threshold
*/
-#define BDI_CAP_NO_ACCT_DIRTY 0x00000001
-#define BDI_CAP_NO_WRITEBACK 0x00000002
-#define BDI_CAP_NO_ACCT_WB 0x00000004
-#define BDI_CAP_STABLE_WRITES 0x00000008
-#define BDI_CAP_STRICTLIMIT 0x00000010
-#define BDI_CAP_CGROUP_WRITEBACK 0x00000020
-
-#define BDI_CAP_NO_ACCT_AND_WRITEBACK \
- (BDI_CAP_NO_WRITEBACK | BDI_CAP_NO_ACCT_DIRTY | BDI_CAP_NO_ACCT_WB)
+#define BDI_CAP_WRITEBACK (1 << 0)
+#define BDI_CAP_WRITEBACK_ACCT (1 << 1)
+#define BDI_CAP_STRICTLIMIT (1 << 2)
extern struct backing_dev_info noop_backing_dev_info;
+int bdi_init(struct backing_dev_info *bdi);
+
/**
* writeback_in_progress - determine whether there is writeback in progress
* @wb: bdi_writeback of interest
@@ -148,92 +143,30 @@ static inline bool writeback_in_progress(struct bdi_writeback *wb)
return test_bit(WB_writeback_running, &wb->state);
}
-static inline struct backing_dev_info *inode_to_bdi(struct inode *inode)
-{
- struct super_block *sb;
-
- if (!inode)
- return &noop_backing_dev_info;
-
- sb = inode->i_sb;
-#ifdef CONFIG_BLOCK
- if (sb_is_blkdev_sb(sb))
- return I_BDEV(inode)->bd_bdi;
-#endif
- return sb->s_bdi;
-}
-
-static inline int wb_congested(struct bdi_writeback *wb, int cong_bits)
-{
- struct backing_dev_info *bdi = wb->bdi;
-
- if (bdi->congested_fn)
- return bdi->congested_fn(bdi->congested_data, cong_bits);
- return wb->congested->state & cong_bits;
-}
-
-long congestion_wait(int sync, long timeout);
-long wait_iff_congested(struct pglist_data *pgdat, int sync, long timeout);
-int pdflush_proc_obsolete(struct ctl_table *table, int write,
- void __user *buffer, size_t *lenp, loff_t *ppos);
-
-static inline bool bdi_cap_stable_pages_required(struct backing_dev_info *bdi)
-{
- return bdi->capabilities & BDI_CAP_STABLE_WRITES;
-}
+struct backing_dev_info *inode_to_bdi(struct inode *inode);
-static inline bool bdi_cap_writeback_dirty(struct backing_dev_info *bdi)
+static inline bool mapping_can_writeback(struct address_space *mapping)
{
- return !(bdi->capabilities & BDI_CAP_NO_WRITEBACK);
-}
-
-static inline bool bdi_cap_account_dirty(struct backing_dev_info *bdi)
-{
- return !(bdi->capabilities & BDI_CAP_NO_ACCT_DIRTY);
-}
-
-static inline bool bdi_cap_account_writeback(struct backing_dev_info *bdi)
-{
- /* Paranoia: BDI_CAP_NO_WRITEBACK implies BDI_CAP_NO_ACCT_WB */
- return !(bdi->capabilities & (BDI_CAP_NO_ACCT_WB |
- BDI_CAP_NO_WRITEBACK));
-}
-
-static inline bool mapping_cap_writeback_dirty(struct address_space *mapping)
-{
- return bdi_cap_writeback_dirty(inode_to_bdi(mapping->host));
-}
-
-static inline bool mapping_cap_account_dirty(struct address_space *mapping)
-{
- return bdi_cap_account_dirty(inode_to_bdi(mapping->host));
-}
-
-static inline int bdi_sched_wait(void *word)
-{
- schedule();
- return 0;
+ return inode_to_bdi(mapping->host)->capabilities & BDI_CAP_WRITEBACK;
}
#ifdef CONFIG_CGROUP_WRITEBACK
-struct bdi_writeback_congested *
-wb_congested_get_create(struct backing_dev_info *bdi, int blkcg_id, gfp_t gfp);
-void wb_congested_put(struct bdi_writeback_congested *congested);
+struct bdi_writeback *wb_get_lookup(struct backing_dev_info *bdi,
+ struct cgroup_subsys_state *memcg_css);
struct bdi_writeback *wb_get_create(struct backing_dev_info *bdi,
struct cgroup_subsys_state *memcg_css,
gfp_t gfp);
void wb_memcg_offline(struct mem_cgroup *memcg);
-void wb_blkcg_offline(struct blkcg *blkcg);
-int inode_congested(struct inode *inode, int cong_bits);
+void wb_blkcg_offline(struct cgroup_subsys_state *css);
/**
* inode_cgwb_enabled - test whether cgroup writeback is enabled on an inode
* @inode: inode of interest
*
- * cgroup writeback requires support from both the bdi and filesystem.
- * Also, both memcg and iocg have to be on the default hierarchy. Test
- * whether all conditions are met.
+ * Cgroup writeback requires support from the filesystem. Also, both memcg and
+ * iocg have to be on the default hierarchy. Test whether all conditions are
+ * met.
*
* Note that the test result may change dynamically on the same inode
* depending on how memcg and iocg are configured.
@@ -244,8 +177,7 @@ static inline bool inode_cgwb_enabled(struct inode *inode)
return cgroup_subsys_on_dfl(memory_cgrp_subsys) &&
cgroup_subsys_on_dfl(io_cgrp_subsys) &&
- bdi_cap_account_dirty(bdi) &&
- (bdi->capabilities & BDI_CAP_CGROUP_WRITEBACK) &&
+ (bdi->capabilities & BDI_CAP_WRITEBACK) &&
(inode->i_sb->s_iflags & SB_I_CGROUPWB);
}
@@ -308,68 +240,67 @@ wb_get_create_current(struct backing_dev_info *bdi, gfp_t gfp)
}
/**
- * inode_to_wb_is_valid - test whether an inode has a wb associated
- * @inode: inode of interest
- *
- * Returns %true if @inode has a wb associated. May be called without any
- * locking.
- */
-static inline bool inode_to_wb_is_valid(struct inode *inode)
-{
- return inode->i_wb;
-}
-
-/**
* inode_to_wb - determine the wb of an inode
* @inode: inode of interest
*
* Returns the wb @inode is currently associated with. The caller must be
- * holding either @inode->i_lock, @inode->i_mapping->tree_lock, or the
+ * holding either @inode->i_lock, the i_pages lock, or the
* associated wb's list_lock.
*/
-static inline struct bdi_writeback *inode_to_wb(struct inode *inode)
+static inline struct bdi_writeback *inode_to_wb(const struct inode *inode)
{
#ifdef CONFIG_LOCKDEP
WARN_ON_ONCE(debug_locks &&
(!lockdep_is_held(&inode->i_lock) &&
- !lockdep_is_held(&inode->i_mapping->tree_lock) &&
+ !lockdep_is_held(&inode->i_mapping->i_pages.xa_lock) &&
!lockdep_is_held(&inode->i_wb->list_lock)));
#endif
return inode->i_wb;
}
+static inline struct bdi_writeback *inode_to_wb_wbc(
+ struct inode *inode,
+ struct writeback_control *wbc)
+{
+ /*
+ * If wbc does not have inode attached, it means cgroup writeback was
+ * disabled when wbc started. Just use the default wb in that case.
+ */
+ return wbc->wb ? wbc->wb : &inode_to_bdi(inode)->wb;
+}
+
/**
* unlocked_inode_to_wb_begin - begin unlocked inode wb access transaction
* @inode: target inode
- * @lockedp: temp bool output param, to be passed to the end function
+ * @cookie: output param, to be passed to the end function
*
* The caller wants to access the wb associated with @inode but isn't
- * holding inode->i_lock, mapping->tree_lock or wb->list_lock. This
+ * holding inode->i_lock, the i_pages lock or wb->list_lock. This
* function determines the wb associated with @inode and ensures that the
* association doesn't change until the transaction is finished with
* unlocked_inode_to_wb_end().
*
- * The caller must call unlocked_inode_to_wb_end() with *@lockdep
- * afterwards and can't sleep during transaction. IRQ may or may not be
- * disabled on return.
+ * The caller must call unlocked_inode_to_wb_end() with *@cookie afterwards and
+ * can't sleep during the transaction. IRQs may or may not be disabled on
+ * return.
*/
static inline struct bdi_writeback *
-unlocked_inode_to_wb_begin(struct inode *inode, bool *lockedp)
+unlocked_inode_to_wb_begin(struct inode *inode, struct wb_lock_cookie *cookie)
{
rcu_read_lock();
/*
- * Paired with store_release in inode_switch_wb_work_fn() and
+ * Paired with store_release in inode_switch_wbs_work_fn() and
* ensures that we see the new wb if we see cleared I_WB_SWITCH.
*/
- *lockedp = smp_load_acquire(&inode->i_state) & I_WB_SWITCH;
+ cookie->locked = smp_load_acquire(&inode->i_state) & I_WB_SWITCH;
- if (unlikely(*lockedp))
- spin_lock_irq(&inode->i_mapping->tree_lock);
+ if (unlikely(cookie->locked))
+ xa_lock_irqsave(&inode->i_mapping->i_pages, cookie->flags);
/*
- * Protected by either !I_WB_SWITCH + rcu_read_lock() or tree_lock.
- * inode_to_wb() will bark. Deref directly.
+ * Protected by either !I_WB_SWITCH + rcu_read_lock() or the i_pages
+ * lock. inode_to_wb() will bark. Deref directly.
*/
return inode->i_wb;
}
@@ -377,12 +308,13 @@ unlocked_inode_to_wb_begin(struct inode *inode, bool *lockedp)
/**
* unlocked_inode_to_wb_end - end inode wb access transaction
* @inode: target inode
- * @locked: *@lockedp from unlocked_inode_to_wb_begin()
+ * @cookie: @cookie from unlocked_inode_to_wb_begin()
*/
-static inline void unlocked_inode_to_wb_end(struct inode *inode, bool locked)
+static inline void unlocked_inode_to_wb_end(struct inode *inode,
+ struct wb_lock_cookie *cookie)
{
- if (unlikely(locked))
- spin_unlock_irq(&inode->i_mapping->tree_lock);
+ if (unlikely(cookie->locked))
+ xa_unlock_irqrestore(&inode->i_mapping->i_pages, cookie->flags);
rcu_read_unlock();
}
@@ -394,19 +326,6 @@ static inline bool inode_cgwb_enabled(struct inode *inode)
return false;
}
-static inline struct bdi_writeback_congested *
-wb_congested_get_create(struct backing_dev_info *bdi, int blkcg_id, gfp_t gfp)
-{
- atomic_inc(&bdi->wb_congested->refcnt);
- return bdi->wb_congested;
-}
-
-static inline void wb_congested_put(struct bdi_writeback_congested *congested)
-{
- if (atomic_dec_and_test(&congested->refcnt))
- kfree(congested);
-}
-
static inline struct bdi_writeback *wb_find_current(struct backing_dev_info *bdi)
{
return &bdi->wb;
@@ -418,76 +337,40 @@ wb_get_create_current(struct backing_dev_info *bdi, gfp_t gfp)
return &bdi->wb;
}
-static inline bool inode_to_wb_is_valid(struct inode *inode)
-{
- return true;
-}
-
static inline struct bdi_writeback *inode_to_wb(struct inode *inode)
{
return &inode_to_bdi(inode)->wb;
}
-static inline struct bdi_writeback *
-unlocked_inode_to_wb_begin(struct inode *inode, bool *lockedp)
+static inline struct bdi_writeback *inode_to_wb_wbc(
+ struct inode *inode,
+ struct writeback_control *wbc)
{
return inode_to_wb(inode);
}
-static inline void unlocked_inode_to_wb_end(struct inode *inode, bool locked)
-{
-}
-static inline void wb_memcg_offline(struct mem_cgroup *memcg)
-{
-}
-
-static inline void wb_blkcg_offline(struct blkcg *blkcg)
-{
-}
-
-static inline int inode_congested(struct inode *inode, int cong_bits)
-{
- return wb_congested(&inode_to_bdi(inode)->wb, cong_bits);
-}
-
-#endif /* CONFIG_CGROUP_WRITEBACK */
-
-static inline int inode_read_congested(struct inode *inode)
-{
- return inode_congested(inode, 1 << WB_sync_congested);
-}
-
-static inline int inode_write_congested(struct inode *inode)
+static inline struct bdi_writeback *
+unlocked_inode_to_wb_begin(struct inode *inode, struct wb_lock_cookie *cookie)
{
- return inode_congested(inode, 1 << WB_async_congested);
+ return inode_to_wb(inode);
}
-static inline int inode_rw_congested(struct inode *inode)
+static inline void unlocked_inode_to_wb_end(struct inode *inode,
+ struct wb_lock_cookie *cookie)
{
- return inode_congested(inode, (1 << WB_sync_congested) |
- (1 << WB_async_congested));
}
-static inline int bdi_congested(struct backing_dev_info *bdi, int cong_bits)
+static inline void wb_memcg_offline(struct mem_cgroup *memcg)
{
- return wb_congested(&bdi->wb, cong_bits);
}
-static inline int bdi_read_congested(struct backing_dev_info *bdi)
+static inline void wb_blkcg_offline(struct cgroup_subsys_state *css)
{
- return bdi_congested(bdi, 1 << WB_sync_congested);
}
-static inline int bdi_write_congested(struct backing_dev_info *bdi)
-{
- return bdi_congested(bdi, 1 << WB_async_congested);
-}
+#endif /* CONFIG_CGROUP_WRITEBACK */
-static inline int bdi_rw_congested(struct backing_dev_info *bdi)
-{
- return bdi_congested(bdi, (1 << WB_sync_congested) |
- (1 << WB_async_congested));
-}
+const char *bdi_dev_name(struct backing_dev_info *bdi);
#endif /* _LINUX_BACKING_DEV_H */
diff --git a/include/linux/backlight.h b/include/linux/backlight.h
index 5f2fd61ef4fb..614653e07e3a 100644
--- a/include/linux/backlight.h
+++ b/include/linux/backlight.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Backlight Lowlevel Control Abstraction
*
@@ -13,110 +14,336 @@
#include <linux/mutex.h>
#include <linux/notifier.h>
-/* Notes on locking:
+/**
+ * enum backlight_update_reason - what method was used to update backlight
*
- * backlight_device->ops_lock is an internal backlight lock protecting the
- * ops pointer and no code outside the core should need to touch it.
- *
- * Access to update_status() is serialised by the update_lock mutex since
- * most drivers seem to need this and historically get it wrong.
- *
- * Most drivers don't need locking on their get_brightness() method.
- * If yours does, you need to implement it in the driver. You can use the
- * update_lock mutex if appropriate.
- *
- * Any other use of the locks below is probably wrong.
+ * A driver indicates the method (reason) used for updating the backlight
+ * when calling backlight_force_update().
*/
-
enum backlight_update_reason {
+ /**
+ * @BACKLIGHT_UPDATE_HOTKEY: The backlight was updated using a hot-key.
+ */
BACKLIGHT_UPDATE_HOTKEY,
+
+ /**
+ * @BACKLIGHT_UPDATE_SYSFS: The backlight was updated using sysfs.
+ */
BACKLIGHT_UPDATE_SYSFS,
};
+/**
+ * enum backlight_type - the type of backlight control
+ *
+ * The type of interface used to control the backlight.
+ */
enum backlight_type {
+ /**
+ * @BACKLIGHT_RAW:
+ *
+ * The backlight is controlled using hardware registers.
+ */
BACKLIGHT_RAW = 1,
+
+ /**
+ * @BACKLIGHT_PLATFORM:
+ *
+ * The backlight is controlled using a platform-specific interface.
+ */
BACKLIGHT_PLATFORM,
+
+ /**
+ * @BACKLIGHT_FIRMWARE:
+ *
+ * The backlight is controlled using a standard firmware interface.
+ */
BACKLIGHT_FIRMWARE,
+
+ /**
+ * @BACKLIGHT_TYPE_MAX: Number of entries.
+ */
BACKLIGHT_TYPE_MAX,
};
+/**
+ * enum backlight_notification - the type of notification
+ *
+ * The notifications that is used for notification sent to the receiver
+ * that registered notifications using backlight_register_notifier().
+ */
enum backlight_notification {
+ /**
+ * @BACKLIGHT_REGISTERED: The backlight device is registered.
+ */
BACKLIGHT_REGISTERED,
+
+ /**
+ * @BACKLIGHT_UNREGISTERED: The backlight revice is unregistered.
+ */
BACKLIGHT_UNREGISTERED,
};
+/** enum backlight_scale - the type of scale used for brightness values
+ *
+ * The type of scale used for brightness values.
+ */
+enum backlight_scale {
+ /**
+ * @BACKLIGHT_SCALE_UNKNOWN: The scale is unknown.
+ */
+ BACKLIGHT_SCALE_UNKNOWN = 0,
+
+ /**
+ * @BACKLIGHT_SCALE_LINEAR: The scale is linear.
+ *
+ * The linear scale will increase brightness the same for each step.
+ */
+ BACKLIGHT_SCALE_LINEAR,
+
+ /**
+ * @BACKLIGHT_SCALE_NON_LINEAR: The scale is not linear.
+ *
+ * This is often used when the brightness values tries to adjust to
+ * the relative perception of the eye demanding a non-linear scale.
+ */
+ BACKLIGHT_SCALE_NON_LINEAR,
+};
+
struct backlight_device;
struct fb_info;
+/**
+ * struct backlight_ops - backlight operations
+ *
+ * The backlight operations are specified when the backlight device is registered.
+ */
struct backlight_ops {
+ /**
+ * @options: Configure how operations are called from the core.
+ *
+ * The options parameter is used to adjust the behaviour of the core.
+ * Set BL_CORE_SUSPENDRESUME to get the update_status() operation called
+ * upon suspend and resume.
+ */
unsigned int options;
#define BL_CORE_SUSPENDRESUME (1 << 0)
- /* Notify the backlight driver some property has changed */
+ /**
+ * @update_status: Operation called when properties have changed.
+ *
+ * Notify the backlight driver some property has changed.
+ * The update_status operation is protected by the update_lock.
+ *
+ * The backlight driver is expected to use backlight_is_blank()
+ * to check if the display is blanked and set brightness accordingly.
+ * update_status() is called when any of the properties has changed.
+ *
+ * RETURNS:
+ *
+ * 0 on success, negative error code if any failure occurred.
+ */
int (*update_status)(struct backlight_device *);
- /* Return the current backlight brightness (accounting for power,
- fb_blank etc.) */
+
+ /**
+ * @get_brightness: Return the current backlight brightness.
+ *
+ * The driver may implement this as a readback from the HW.
+ * This operation is optional and if not present then the current
+ * brightness property value is used.
+ *
+ * RETURNS:
+ *
+ * A brightness value which is 0 or a positive number.
+ * On failure a negative error code is returned.
+ */
int (*get_brightness)(struct backlight_device *);
- /* Check if given framebuffer device is the one bound to this backlight;
- return 0 if not, !=0 if it is. If NULL, backlight always matches the fb. */
- int (*check_fb)(struct backlight_device *, struct fb_info *);
+
+ /**
+ * @check_fb: Check the framebuffer device.
+ *
+ * Check if given framebuffer device is the one bound to this backlight.
+ * This operation is optional and if not implemented it is assumed that the
+ * fbdev is always the one bound to the backlight.
+ *
+ * RETURNS:
+ *
+ * If info is NULL or the info matches the fbdev bound to the backlight return true.
+ * If info does not match the fbdev bound to the backlight return false.
+ */
+ int (*check_fb)(struct backlight_device *bd, struct fb_info *info);
};
-/* This structure defines all the properties of a backlight */
+/**
+ * struct backlight_properties - backlight properties
+ *
+ * This structure defines all the properties of a backlight.
+ */
struct backlight_properties {
- /* Current User requested brightness (0 - max_brightness) */
+ /**
+ * @brightness: The current brightness requested by the user.
+ *
+ * The backlight core makes sure the range is (0 to max_brightness)
+ * when the brightness is set via the sysfs attribute:
+ * /sys/class/backlight/<backlight>/brightness.
+ *
+ * This value can be set in the backlight_properties passed
+ * to devm_backlight_device_register() to set a default brightness
+ * value.
+ */
int brightness;
- /* Maximal value for brightness (read-only) */
+
+ /**
+ * @max_brightness: The maximum brightness value.
+ *
+ * This value must be set in the backlight_properties passed to
+ * devm_backlight_device_register() and shall not be modified by the
+ * driver after registration.
+ */
int max_brightness;
- /* Current FB Power mode (0: full on, 1..3: power saving
- modes; 4: full off), see FB_BLANK_XXX */
+
+ /**
+ * @power: The current power mode.
+ *
+ * User space can configure the power mode using the sysfs
+ * attribute: /sys/class/backlight/<backlight>/bl_power
+ * When the power property is updated update_status() is called.
+ *
+ * The possible values are: (0: full on, 1 to 3: power saving
+ * modes; 4: full off), see FB_BLANK_XXX.
+ *
+ * When the backlight device is enabled @power is set
+ * to FB_BLANK_UNBLANK. When the backlight device is disabled
+ * @power is set to FB_BLANK_POWERDOWN.
+ */
int power;
- /* FB Blanking active? (values as for power) */
- /* Due to be removed, please use (state & BL_CORE_FBBLANK) */
+
+ /**
+ * @fb_blank: The power state from the FBIOBLANK ioctl.
+ *
+ * When the FBIOBLANK ioctl is called @fb_blank is set to the
+ * blank parameter and the update_status() operation is called.
+ *
+ * When the backlight device is enabled @fb_blank is set
+ * to FB_BLANK_UNBLANK. When the backlight device is disabled
+ * @fb_blank is set to FB_BLANK_POWERDOWN.
+ *
+ * Backlight drivers should avoid using this property. It has been
+ * replaced by state & BL_CORE_FBLANK (although most drivers should
+ * use backlight_is_blank() as the preferred means to get the blank
+ * state).
+ *
+ * fb_blank is deprecated and will be removed.
+ */
int fb_blank;
- /* Backlight type */
+
+ /**
+ * @type: The type of backlight supported.
+ *
+ * The backlight type allows userspace to make appropriate
+ * policy decisions based on the backlight type.
+ *
+ * This value must be set in the backlight_properties
+ * passed to devm_backlight_device_register().
+ */
enum backlight_type type;
- /* Flags used to signal drivers of state changes */
- /* Upper 4 bits are reserved for driver internal use */
+
+ /**
+ * @state: The state of the backlight core.
+ *
+ * The state is a bitmask. BL_CORE_FBBLANK is set when the display
+ * is expected to be blank. BL_CORE_SUSPENDED is set when the
+ * driver is suspended.
+ *
+ * backlight drivers are expected to use backlight_is_blank()
+ * in their update_status() operation rather than reading the
+ * state property.
+ *
+ * The state is maintained by the core and drivers may not modify it.
+ */
unsigned int state;
#define BL_CORE_SUSPENDED (1 << 0) /* backlight is suspended */
#define BL_CORE_FBBLANK (1 << 1) /* backlight is under an fb blank event */
-#define BL_CORE_DRIVER4 (1 << 28) /* reserved for driver specific use */
-#define BL_CORE_DRIVER3 (1 << 29) /* reserved for driver specific use */
-#define BL_CORE_DRIVER2 (1 << 30) /* reserved for driver specific use */
-#define BL_CORE_DRIVER1 (1 << 31) /* reserved for driver specific use */
+ /**
+ * @scale: The type of the brightness scale.
+ */
+ enum backlight_scale scale;
};
+/**
+ * struct backlight_device - backlight device data
+ *
+ * This structure holds all data required by a backlight device.
+ */
struct backlight_device {
- /* Backlight properties */
+ /**
+ * @props: Backlight properties
+ */
struct backlight_properties props;
- /* Serialise access to update_status method */
+ /**
+ * @update_lock: The lock used when calling the update_status() operation.
+ *
+ * update_lock is an internal backlight lock that serialise access
+ * to the update_status() operation. The backlight core holds the update_lock
+ * when calling the update_status() operation. The update_lock shall not
+ * be used by backlight drivers.
+ */
struct mutex update_lock;
- /* This protects the 'ops' field. If 'ops' is NULL, the driver that
- registered this device has been unloaded, and if class_get_devdata()
- points to something in the body of that driver, it is also invalid. */
+ /**
+ * @ops_lock: The lock used around everything related to backlight_ops.
+ *
+ * ops_lock is an internal backlight lock that protects the ops pointer
+ * and is used around all accesses to ops and when the operations are
+ * invoked. The ops_lock shall not be used by backlight drivers.
+ */
struct mutex ops_lock;
+
+ /**
+ * @ops: Pointer to the backlight operations.
+ *
+ * If ops is NULL, the driver that registered this device has been unloaded,
+ * and if class_get_devdata() points to something in the body of that driver,
+ * it is also invalid.
+ */
const struct backlight_ops *ops;
- /* The framebuffer notifier block */
+ /**
+ * @fb_notif: The framebuffer notifier block
+ */
struct notifier_block fb_notif;
- /* list entry of all registered backlight devices */
+ /**
+ * @entry: List entry of all registered backlight devices
+ */
struct list_head entry;
+ /**
+ * @dev: Parent device.
+ */
struct device dev;
- /* Multiple framebuffers may share one backlight device */
+ /**
+ * @fb_bl_on: The state of individual fbdev's.
+ *
+ * Multiple fbdev's may share one backlight device. The fb_bl_on
+ * records the state of the individual fbdev.
+ */
bool fb_bl_on[FB_MAX];
+ /**
+ * @use_count: The number of uses of fb_bl_on.
+ */
int use_count;
};
+/**
+ * backlight_update_status - force an update of the backlight device status
+ * @bd: the backlight device
+ */
static inline int backlight_update_status(struct backlight_device *bd)
{
int ret = -ENOENT;
@@ -129,39 +356,116 @@ static inline int backlight_update_status(struct backlight_device *bd)
return ret;
}
-extern struct backlight_device *backlight_device_register(const char *name,
- struct device *dev, void *devdata, const struct backlight_ops *ops,
- const struct backlight_properties *props);
-extern struct backlight_device *devm_backlight_device_register(
- struct device *dev, const char *name, struct device *parent,
- void *devdata, const struct backlight_ops *ops,
- const struct backlight_properties *props);
-extern void backlight_device_unregister(struct backlight_device *bd);
-extern void devm_backlight_device_unregister(struct device *dev,
- struct backlight_device *bd);
-extern void backlight_force_update(struct backlight_device *bd,
- enum backlight_update_reason reason);
-extern int backlight_register_notifier(struct notifier_block *nb);
-extern int backlight_unregister_notifier(struct notifier_block *nb);
-extern struct backlight_device *backlight_device_get_by_type(enum backlight_type type);
-extern int backlight_device_set_brightness(struct backlight_device *bd, unsigned long brightness);
+/**
+ * backlight_enable - Enable backlight
+ * @bd: the backlight device to enable
+ */
+static inline int backlight_enable(struct backlight_device *bd)
+{
+ if (!bd)
+ return 0;
+
+ bd->props.power = FB_BLANK_UNBLANK;
+ bd->props.fb_blank = FB_BLANK_UNBLANK;
+ bd->props.state &= ~BL_CORE_FBBLANK;
+
+ return backlight_update_status(bd);
+}
+
+/**
+ * backlight_disable - Disable backlight
+ * @bd: the backlight device to disable
+ */
+static inline int backlight_disable(struct backlight_device *bd)
+{
+ if (!bd)
+ return 0;
+
+ bd->props.power = FB_BLANK_POWERDOWN;
+ bd->props.fb_blank = FB_BLANK_POWERDOWN;
+ bd->props.state |= BL_CORE_FBBLANK;
+
+ return backlight_update_status(bd);
+}
+
+/**
+ * backlight_is_blank - Return true if display is expected to be blank
+ * @bd: the backlight device
+ *
+ * Display is expected to be blank if any of these is true::
+ *
+ * 1) if power in not UNBLANK
+ * 2) if fb_blank is not UNBLANK
+ * 3) if state indicate BLANK or SUSPENDED
+ *
+ * Returns true if display is expected to be blank, false otherwise.
+ */
+static inline bool backlight_is_blank(const struct backlight_device *bd)
+{
+ return bd->props.power != FB_BLANK_UNBLANK ||
+ bd->props.fb_blank != FB_BLANK_UNBLANK ||
+ bd->props.state & (BL_CORE_SUSPENDED | BL_CORE_FBBLANK);
+}
+
+/**
+ * backlight_get_brightness - Returns the current brightness value
+ * @bd: the backlight device
+ *
+ * Returns the current brightness value, taking in consideration the current
+ * state. If backlight_is_blank() returns true then return 0 as brightness
+ * otherwise return the current brightness property value.
+ *
+ * Backlight drivers are expected to use this function in their update_status()
+ * operation to get the brightness value.
+ */
+static inline int backlight_get_brightness(const struct backlight_device *bd)
+{
+ if (backlight_is_blank(bd))
+ return 0;
+ else
+ return bd->props.brightness;
+}
+
+struct backlight_device *
+backlight_device_register(const char *name, struct device *dev, void *devdata,
+ const struct backlight_ops *ops,
+ const struct backlight_properties *props);
+struct backlight_device *
+devm_backlight_device_register(struct device *dev, const char *name,
+ struct device *parent, void *devdata,
+ const struct backlight_ops *ops,
+ const struct backlight_properties *props);
+void backlight_device_unregister(struct backlight_device *bd);
+void devm_backlight_device_unregister(struct device *dev,
+ struct backlight_device *bd);
+void backlight_force_update(struct backlight_device *bd,
+ enum backlight_update_reason reason);
+int backlight_register_notifier(struct notifier_block *nb);
+int backlight_unregister_notifier(struct notifier_block *nb);
+struct backlight_device *backlight_device_get_by_name(const char *name);
+struct backlight_device *backlight_device_get_by_type(enum backlight_type type);
+int backlight_device_set_brightness(struct backlight_device *bd,
+ unsigned long brightness);
#define to_backlight_device(obj) container_of(obj, struct backlight_device, dev)
+/**
+ * bl_get_data - access devdata
+ * @bl_dev: pointer to backlight device
+ *
+ * When a backlight device is registered the driver has the possibility
+ * to supply a void * devdata. bl_get_data() return a pointer to the
+ * devdata.
+ *
+ * RETURNS:
+ *
+ * pointer to devdata stored while registering the backlight device.
+ */
static inline void * bl_get_data(struct backlight_device *bl_dev)
{
return dev_get_drvdata(&bl_dev->dev);
}
-struct generic_bl_info {
- const char *name;
- int max_intensity;
- int default_intensity;
- int limit_mask;
- void (*set_bl_intensity)(int intensity);
- void (*kick_battery)(void);
-};
-
#ifdef CONFIG_OF
struct backlight_device *of_find_backlight_by_node(struct device_node *node);
#else
@@ -172,4 +476,14 @@ of_find_backlight_by_node(struct device_node *node)
}
#endif
+#if IS_ENABLED(CONFIG_BACKLIGHT_CLASS_DEVICE)
+struct backlight_device *devm_of_find_backlight(struct device *dev);
+#else
+static inline struct backlight_device *
+devm_of_find_backlight(struct device *dev)
+{
+ return NULL;
+}
+#endif
+
#endif
diff --git a/include/linux/badblocks.h b/include/linux/badblocks.h
index c3bdf8c59480..2426276b9bd3 100644
--- a/include/linux/badblocks.h
+++ b/include/linux/badblocks.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_BADBLOCKS_H
#define _LINUX_BADBLOCKS_H
diff --git a/include/linux/balloon_compaction.h b/include/linux/balloon_compaction.h
index 79542b2698ec..5ca2d5699620 100644
--- a/include/linux/balloon_compaction.h
+++ b/include/linux/balloon_compaction.h
@@ -1,17 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* include/linux/balloon_compaction.h
*
* Common interface definitions for making balloon pages movable by compaction.
*
- * Despite being perfectly possible to perform ballooned pages migration, they
- * make a special corner case to compaction scans because balloon pages are not
- * enlisted at any LRU list like the other pages we do compact / migrate.
+ * Balloon page migration makes use of the general non-lru movable page
+ * feature.
+ *
+ * page->private is used to reference the responsible balloon device.
+ * page->mapping is used in context of non-lru page migration to reference
+ * the address space operations for page isolation/migration/compaction.
*
* As the page isolation scanning step a compaction thread does is a lockless
* procedure (from a page standpoint), it might bring some racy situations while
* performing balloon page compaction. In order to sort out these racy scenarios
* and safely perform balloon's page compaction and migration we must, always,
- * ensure following these three simple rules:
+ * ensure following these simple rules:
*
* i. when updating a balloon's page ->mapping element, strictly do it under
* the following lock order, independently of the far superior
@@ -20,19 +24,8 @@
* +--spin_lock_irq(&b_dev_info->pages_lock);
* ... page->mapping updates here ...
*
- * ii. before isolating or dequeueing a balloon page from the balloon device
- * pages list, the page reference counter must be raised by one and the
- * extra refcount must be dropped when the page is enqueued back into
- * the balloon device page list, thus a balloon page keeps its reference
- * counter raised only while it is under our special handling;
- *
- * iii. after the lockless scan step have selected a potential balloon page for
- * isolation, re-test the PageBalloon mark and the PagePrivate flag
- * under the proper page lock, to ensure isolating a valid balloon page
- * (not yet isolated, nor under release procedure)
- *
- * iv. isolation or dequeueing procedure must clear PagePrivate flag under
- * page lock together with removing page from balloon device page list.
+ * ii. isolation or dequeueing procedure must remove the page from balloon
+ * device page list under b_dev_info->pages_lock.
*
* The functions provided by this interface are placed to help on coping with
* the aforementioned balloon page corner case, as well as to ensure the simple
@@ -49,6 +42,7 @@
#include <linux/gfp.h>
#include <linux/err.h>
#include <linux/fs.h>
+#include <linux/list.h>
/*
* Balloon device information descriptor.
@@ -63,11 +57,16 @@ struct balloon_dev_info {
struct list_head pages; /* Pages enqueued & handled to Host */
int (*migratepage)(struct balloon_dev_info *, struct page *newpage,
struct page *page, enum migrate_mode mode);
- struct inode *inode;
};
-extern struct page *balloon_page_enqueue(struct balloon_dev_info *b_dev_info);
+extern struct page *balloon_page_alloc(void);
+extern void balloon_page_enqueue(struct balloon_dev_info *b_dev_info,
+ struct page *page);
extern struct page *balloon_page_dequeue(struct balloon_dev_info *b_dev_info);
+extern size_t balloon_page_list_enqueue(struct balloon_dev_info *b_dev_info,
+ struct list_head *pages);
+extern size_t balloon_page_list_dequeue(struct balloon_dev_info *b_dev_info,
+ struct list_head *pages, size_t n_req_pages);
static inline void balloon_devinfo_init(struct balloon_dev_info *balloon)
{
@@ -75,17 +74,10 @@ static inline void balloon_devinfo_init(struct balloon_dev_info *balloon)
spin_lock_init(&balloon->pages_lock);
INIT_LIST_HEAD(&balloon->pages);
balloon->migratepage = NULL;
- balloon->inode = NULL;
}
#ifdef CONFIG_BALLOON_COMPACTION
-extern const struct address_space_operations balloon_aops;
-extern bool balloon_page_isolate(struct page *page,
- isolate_mode_t mode);
-extern void balloon_page_putback(struct page *page);
-extern int balloon_page_migrate(struct address_space *mapping,
- struct page *newpage,
- struct page *page, enum migrate_mode mode);
+extern const struct movable_operations balloon_mops;
/*
* balloon_page_insert - insert a page into the balloon's page list and make
@@ -99,8 +91,8 @@ extern int balloon_page_migrate(struct address_space *mapping,
static inline void balloon_page_insert(struct balloon_dev_info *balloon,
struct page *page)
{
- __SetPageBalloon(page);
- __SetPageMovable(page, balloon->inode->i_mapping);
+ __SetPageOffline(page);
+ __SetPageMovable(page, &balloon_mops);
set_page_private(page, (unsigned long)balloon);
list_add(&page->lru, &balloon->pages);
}
@@ -115,7 +107,7 @@ static inline void balloon_page_insert(struct balloon_dev_info *balloon,
*/
static inline void balloon_page_delete(struct page *page)
{
- __ClearPageBalloon(page);
+ __ClearPageOffline(page);
__ClearPageMovable(page);
set_page_private(page, 0);
/*
@@ -145,51 +137,50 @@ static inline gfp_t balloon_mapping_gfp_mask(void)
static inline void balloon_page_insert(struct balloon_dev_info *balloon,
struct page *page)
{
- __SetPageBalloon(page);
+ __SetPageOffline(page);
list_add(&page->lru, &balloon->pages);
}
static inline void balloon_page_delete(struct page *page)
{
- __ClearPageBalloon(page);
+ __ClearPageOffline(page);
list_del(&page->lru);
}
-static inline bool __is_movable_balloon_page(struct page *page)
-{
- return false;
-}
-
-static inline bool balloon_page_movable(struct page *page)
+static inline gfp_t balloon_mapping_gfp_mask(void)
{
- return false;
+ return GFP_HIGHUSER;
}
-static inline bool isolated_balloon_page(struct page *page)
-{
- return false;
-}
+#endif /* CONFIG_BALLOON_COMPACTION */
-static inline bool balloon_page_isolate(struct page *page)
+/*
+ * balloon_page_push - insert a page into a page list.
+ * @head : pointer to list
+ * @page : page to be added
+ *
+ * Caller must ensure the page is private and protect the list.
+ */
+static inline void balloon_page_push(struct list_head *pages, struct page *page)
{
- return false;
+ list_add(&page->lru, pages);
}
-static inline void balloon_page_putback(struct page *page)
+/*
+ * balloon_page_pop - remove a page from a page list.
+ * @head : pointer to list
+ * @page : page to be added
+ *
+ * Caller must ensure the page is private and protect the list.
+ */
+static inline struct page *balloon_page_pop(struct list_head *pages)
{
- return;
-}
+ struct page *page = list_first_entry_or_null(pages, struct page, lru);
-static inline int balloon_page_migrate(struct page *newpage,
- struct page *page, enum migrate_mode mode)
-{
- return 0;
-}
+ if (!page)
+ return NULL;
-static inline gfp_t balloon_mapping_gfp_mask(void)
-{
- return GFP_HIGHUSER;
+ list_del(&page->lru);
+ return page;
}
-
-#endif /* CONFIG_BALLOON_COMPACTION */
#endif /* _LINUX_BALLOON_COMPACTION_H */
diff --git a/include/linux/base64.h b/include/linux/base64.h
new file mode 100644
index 000000000000..660d4cb1ef31
--- /dev/null
+++ b/include/linux/base64.h
@@ -0,0 +1,16 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * base64 encoding, lifted from fs/crypto/fname.c.
+ */
+
+#ifndef _LINUX_BASE64_H
+#define _LINUX_BASE64_H
+
+#include <linux/types.h>
+
+#define BASE64_CHARS(nbytes) DIV_ROUND_UP((nbytes) * 4, 3)
+
+int base64_encode(const u8 *src, int len, char *dst);
+int base64_decode(const char *src, int len, u8 *dst);
+
+#endif /* _LINUX_BASE64_H */
diff --git a/include/linux/bcd.h b/include/linux/bcd.h
index 18fff11fb3ea..abbc8149178e 100644
--- a/include/linux/bcd.h
+++ b/include/linux/bcd.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _BCD_H
#define _BCD_H
@@ -13,8 +14,12 @@
const_bin2bcd(x) : \
_bin2bcd(x))
+#define bcd_is_valid(x) \
+ const_bcd_is_valid(x)
+
#define const_bcd2bin(x) (((x) & 0x0f) + ((x) >> 4) * 10)
#define const_bin2bcd(x) ((((x) / 10) << 4) + (x) % 10)
+#define const_bcd_is_valid(x) (((x) & 0x0f) < 10 && ((x) >> 4) < 10)
unsigned _bcd2bin(unsigned char val) __attribute_const__;
unsigned char _bin2bcd(unsigned val) __attribute_const__;
diff --git a/include/linux/bch.h b/include/linux/bch.h
index 295b4ef153bb..85fdce83d4e2 100644
--- a/include/linux/bch.h
+++ b/include/linux/bch.h
@@ -1,19 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Generic binary BCH encoding/decoding library
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc., 51
- * Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
* Copyright © 2011 Parrot S.A.
*
* Author: Ivan Djelic <ivan.djelic@parrot.com>
@@ -45,6 +33,7 @@
* @cache: log-based polynomial representation buffer
* @elp: error locator polynomial
* @poly_2t: temporary polynomials of degree 2t
+ * @swap_bits: swap bits within data and syndrome bytes
*/
struct bch_control {
unsigned int m;
@@ -63,16 +52,18 @@ struct bch_control {
int *cache;
struct gf_poly *elp;
struct gf_poly *poly_2t[4];
+ bool swap_bits;
};
-struct bch_control *init_bch(int m, int t, unsigned int prim_poly);
+struct bch_control *bch_init(int m, int t, unsigned int prim_poly,
+ bool swap_bits);
-void free_bch(struct bch_control *bch);
+void bch_free(struct bch_control *bch);
-void encode_bch(struct bch_control *bch, const uint8_t *data,
+void bch_encode(struct bch_control *bch, const uint8_t *data,
unsigned int len, uint8_t *ecc);
-int decode_bch(struct bch_control *bch, const uint8_t *data, unsigned int len,
+int bch_decode(struct bch_control *bch, const uint8_t *data, unsigned int len,
const uint8_t *recv_ecc, const uint8_t *calc_ecc,
const unsigned int *syn, unsigned int *errloc);
diff --git a/include/linux/bcm47xx_nvram.h b/include/linux/bcm47xx_nvram.h
index a414a2b53e41..7615f8d7b1ed 100644
--- a/include/linux/bcm47xx_nvram.h
+++ b/include/linux/bcm47xx_nvram.h
@@ -1,8 +1,5 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
*/
#ifndef __BCM47XX_NVRAM_H
@@ -14,6 +11,7 @@
#include <linux/vmalloc.h>
#ifdef CONFIG_BCM47XX_NVRAM
+int bcm47xx_nvram_init_from_iomem(void __iomem *nvram_start, size_t res_size);
int bcm47xx_nvram_init_from_mem(u32 base, u32 lim);
int bcm47xx_nvram_getenv(const char *name, char *val, size_t val_len);
int bcm47xx_nvram_gpio_pin(const char *name);
@@ -23,6 +21,11 @@ static inline void bcm47xx_nvram_release_contents(char *nvram)
vfree(nvram);
};
#else
+static inline int bcm47xx_nvram_init_from_iomem(void __iomem *nvram_start,
+ size_t res_size)
+{
+ return -ENOTSUPP;
+}
static inline int bcm47xx_nvram_init_from_mem(u32 base, u32 lim)
{
return -ENOTSUPP;
diff --git a/include/linux/bcm47xx_sprom.h b/include/linux/bcm47xx_sprom.h
index c06b47c84e1a..f8254fd53e15 100644
--- a/include/linux/bcm47xx_sprom.h
+++ b/include/linux/bcm47xx_sprom.h
@@ -1,8 +1,5 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
*/
#ifndef __BCM47XX_SPROM_H
@@ -12,9 +9,19 @@
#include <linux/kernel.h>
#include <linux/vmalloc.h>
+struct ssb_sprom;
+
#ifdef CONFIG_BCM47XX_SPROM
+void bcm47xx_fill_sprom(struct ssb_sprom *sprom, const char *prefix,
+ bool fallback);
int bcm47xx_sprom_register_fallbacks(void);
#else
+static inline void bcm47xx_fill_sprom(struct ssb_sprom *sprom,
+ const char *prefix,
+ bool fallback)
+{
+}
+
static inline int bcm47xx_sprom_register_fallbacks(void)
{
return -ENOTSUPP;
diff --git a/include/linux/bcm47xx_wdt.h b/include/linux/bcm47xx_wdt.h
index 8d9d07ec22a5..fc9dcdb4b979 100644
--- a/include/linux/bcm47xx_wdt.h
+++ b/include/linux/bcm47xx_wdt.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef LINUX_BCM47XX_WDT_H_
#define LINUX_BCM47XX_WDT_H_
diff --git a/include/linux/bcm963xx_nvram.h b/include/linux/bcm963xx_nvram.h
index 290c231b8cf1..c8c7f01159fe 100644
--- a/include/linux/bcm963xx_nvram.h
+++ b/include/linux/bcm963xx_nvram.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __LINUX_BCM963XX_NVRAM_H__
#define __LINUX_BCM963XX_NVRAM_H__
diff --git a/include/linux/bcm963xx_tag.h b/include/linux/bcm963xx_tag.h
index 161c7b37a77b..7edb809a2586 100644
--- a/include/linux/bcm963xx_tag.h
+++ b/include/linux/bcm963xx_tag.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __LINUX_BCM963XX_TAG_H__
#define __LINUX_BCM963XX_TAG_H__
@@ -83,7 +84,7 @@ struct bcm_tag {
char flash_layout_ver[FLASHLAYOUTVER_LEN];
/* 196-199: kernel+rootfs CRC32 */
__u32 fskernel_crc;
- /* 200-215: Unused except on Alice Gate where is is information */
+ /* 200-215: Unused except on Alice Gate where it is information */
char information2[TAGINFO2_LEN];
/* 216-219: CRC32 of image less imagetag (kernel for Alice Gate) */
__u32 image_crc;
diff --git a/include/linux/bcma/bcma.h b/include/linux/bcma/bcma.h
index 8eeedb2db924..60b94b944e9f 100644
--- a/include/linux/bcma/bcma.h
+++ b/include/linux/bcma/bcma.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef LINUX_BCMA_H_
#define LINUX_BCMA_H_
@@ -331,6 +332,8 @@ extern int bcma_arch_register_fallback_sprom(
struct ssb_sprom *out));
struct bcma_bus {
+ struct device *dev;
+
/* The MMIO area. */
void __iomem *mmio;
@@ -338,14 +341,7 @@ struct bcma_bus {
enum bcma_hosttype hosttype;
bool host_is_pcie2; /* Used for BCMA_HOSTTYPE_PCI only */
- union {
- /* Pointer to the PCI bus (only for BCMA_HOSTTYPE_PCI) */
- struct pci_dev *host_pci;
- /* Pointer to the SDIO device (only for BCMA_HOSTTYPE_SDIO) */
- struct sdio_func *host_sdio;
- /* Pointer to platform device (only for BCMA_HOSTTYPE_SOC) */
- struct platform_device *host_pdev;
- };
+ struct pci_dev *host_pci; /* PCI bus pointer (BCMA_HOSTTYPE_PCI only) */
struct bcma_chipinfo chipinfo;
diff --git a/include/linux/bcma/bcma_driver_arm_c9.h b/include/linux/bcma/bcma_driver_arm_c9.h
index 93bd73d670d5..688cf590c99b 100644
--- a/include/linux/bcma/bcma_driver_arm_c9.h
+++ b/include/linux/bcma/bcma_driver_arm_c9.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef LINUX_BCMA_DRIVER_ARM_C9_H_
#define LINUX_BCMA_DRIVER_ARM_C9_H_
diff --git a/include/linux/bcma/bcma_driver_chipcommon.h b/include/linux/bcma/bcma_driver_chipcommon.h
index 2f1c690a3e66..0cb6638b55e5 100644
--- a/include/linux/bcma/bcma_driver_chipcommon.h
+++ b/include/linux/bcma/bcma_driver_chipcommon.h
@@ -1,8 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef LINUX_BCMA_DRIVER_CC_H_
#define LINUX_BCMA_DRIVER_CC_H_
#include <linux/platform_device.h>
-#include <linux/gpio.h>
+#include <linux/platform_data/brcmnand.h>
+#include <linux/gpio/driver.h>
/** ChipCommon core registers. **/
#define BCMA_CC_ID 0x0000
@@ -269,6 +271,7 @@
#define BCMA_CC_SROM_CONTROL_OP_WRDIS 0x40000000
#define BCMA_CC_SROM_CONTROL_OP_WREN 0x60000000
#define BCMA_CC_SROM_CONTROL_OTPSEL 0x00000010
+#define BCMA_CC_SROM_CONTROL_OTP_PRESENT 0x00000020
#define BCMA_CC_SROM_CONTROL_LOCK 0x00000008
#define BCMA_CC_SROM_CONTROL_SIZE_MASK 0x00000006
#define BCMA_CC_SROM_CONTROL_SIZE_1K 0x00000000
@@ -598,6 +601,10 @@ struct bcma_sflash {
#ifdef CONFIG_BCMA_NFLASH
struct bcma_nflash {
+ /* Must be the fist member for the brcmnand driver to
+ * de-reference that structure.
+ */
+ struct brcmnand_platform_data brcmnand_info;
bool present;
bool boot; /* This is the flash the SoC boots from */
};
diff --git a/include/linux/bcma/bcma_driver_gmac_cmn.h b/include/linux/bcma/bcma_driver_gmac_cmn.h
index 4354d4ea6713..420e222d7a22 100644
--- a/include/linux/bcma/bcma_driver_gmac_cmn.h
+++ b/include/linux/bcma/bcma_driver_gmac_cmn.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef LINUX_BCMA_DRIVER_GMAC_CMN_H_
#define LINUX_BCMA_DRIVER_GMAC_CMN_H_
diff --git a/include/linux/bcma/bcma_driver_mips.h b/include/linux/bcma/bcma_driver_mips.h
index 8eea7f9e33b4..798013fab54f 100644
--- a/include/linux/bcma/bcma_driver_mips.h
+++ b/include/linux/bcma/bcma_driver_mips.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef LINUX_BCMA_DRIVER_MIPS_H_
#define LINUX_BCMA_DRIVER_MIPS_H_
diff --git a/include/linux/bcma/bcma_driver_pci.h b/include/linux/bcma/bcma_driver_pci.h
index bca6a5e4ca3d..68da8dba5162 100644
--- a/include/linux/bcma/bcma_driver_pci.h
+++ b/include/linux/bcma/bcma_driver_pci.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef LINUX_BCMA_DRIVER_PCI_H_
#define LINUX_BCMA_DRIVER_PCI_H_
diff --git a/include/linux/bcma/bcma_driver_pcie2.h b/include/linux/bcma/bcma_driver_pcie2.h
index 31e6d17ab798..91ce515e3a77 100644
--- a/include/linux/bcma/bcma_driver_pcie2.h
+++ b/include/linux/bcma/bcma_driver_pcie2.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef LINUX_BCMA_DRIVER_PCIE2_H_
#define LINUX_BCMA_DRIVER_PCIE2_H_
diff --git a/include/linux/bcma/bcma_regs.h b/include/linux/bcma/bcma_regs.h
index 9986f8288d01..944105cbd671 100644
--- a/include/linux/bcma/bcma_regs.h
+++ b/include/linux/bcma/bcma_regs.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef LINUX_BCMA_REGS_H_
#define LINUX_BCMA_REGS_H_
diff --git a/include/linux/bcma/bcma_soc.h b/include/linux/bcma/bcma_soc.h
index 1b5fc0c3b1b5..f3c43519baa7 100644
--- a/include/linux/bcma/bcma_soc.h
+++ b/include/linux/bcma/bcma_soc.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef LINUX_BCMA_SOC_H_
#define LINUX_BCMA_SOC_H_
@@ -5,6 +6,7 @@
struct bcma_soc {
struct bcma_bus bus;
+ struct device *dev;
};
int __init bcma_host_soc_register(struct bcma_soc *soc);
diff --git a/include/linux/bfin_mac.h b/include/linux/bfin_mac.h
deleted file mode 100644
index a69554ef8476..000000000000
--- a/include/linux/bfin_mac.h
+++ /dev/null
@@ -1,30 +0,0 @@
-/*
- * Blackfin On-Chip MAC Driver
- *
- * Copyright 2004-2010 Analog Devices Inc.
- *
- * Enter bugs at http://blackfin.uclinux.org/
- *
- * Licensed under the GPL-2 or later.
- */
-
-#ifndef _LINUX_BFIN_MAC_H_
-#define _LINUX_BFIN_MAC_H_
-
-#include <linux/phy.h>
-
-struct bfin_phydev_platform_data {
- unsigned short addr;
- int irq;
-};
-
-struct bfin_mii_bus_platform_data {
- int phydev_number;
- struct bfin_phydev_platform_data *phydev_data;
- const unsigned short *mac_peripherals;
- int phy_mode;
- unsigned int phy_mask;
- unsigned short vlan1_mask, vlan2_mask;
-};
-
-#endif
diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h
index 05488da3aee9..8d51f69f9f5e 100644
--- a/include/linux/binfmts.h
+++ b/include/linux/binfmts.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_BINFMTS_H
#define _LINUX_BINFMTS_H
@@ -7,6 +8,7 @@
#include <uapi/linux/binfmts.h>
struct filename;
+struct coredump_params;
#define CORENAME_MAX_SIZE 128
@@ -14,7 +16,6 @@ struct filename;
* This structure is used to hold the arguments that are used when loading binaries.
*/
struct linux_binprm {
- char buf[BINPRM_BUF_SIZE];
#ifdef CONFIG_MMU
struct vm_area_struct *vma;
unsigned long vma_pages;
@@ -24,51 +25,55 @@ struct linux_binprm {
#endif
struct mm_struct *mm;
unsigned long p; /* current top of mem */
+ unsigned long argmin; /* rlimit marker for copy_strings() */
unsigned int
- cred_prepared:1,/* true if creds already prepared (multiple
- * preps happen for interpreters) */
- cap_effective:1;/* true if has elevated effective capabilities,
- * false if not; except for init which inherits
- * its parent's caps anyway */
-#ifdef __alpha__
- unsigned int taso:1;
-#endif
- unsigned int recursion_depth; /* only for search_binary_handler() */
- struct file * file;
+ /* Should an execfd be passed to userspace? */
+ have_execfd:1,
+
+ /* Use the creds of a script (see binfmt_misc) */
+ execfd_creds:1,
+ /*
+ * Set by bprm_creds_for_exec hook to indicate a
+ * privilege-gaining exec has happened. Used to set
+ * AT_SECURE auxv for glibc.
+ */
+ secureexec:1,
+ /*
+ * Set when errors can no longer be returned to the
+ * original userspace.
+ */
+ point_of_no_return:1;
+ struct file *executable; /* Executable to pass to the interpreter */
+ struct file *interpreter;
+ struct file *file;
struct cred *cred; /* new credentials */
int unsafe; /* how unsafe this exec is (mask of LSM_UNSAFE_*) */
unsigned int per_clear; /* bits to clear in current->personality */
int argc, envc;
- const char * filename; /* Name of binary as seen by procps */
- const char * interp; /* Name of the binary really executed. Most
+ const char *filename; /* Name of binary as seen by procps */
+ const char *interp; /* Name of the binary really executed. Most
of the time same as filename, but could be
different for binfmt_{misc,script} */
+ const char *fdpath; /* generated filename for execveat */
unsigned interp_flags;
- unsigned interp_data;
+ int execfd; /* File descriptor of the executable */
unsigned long loader, exec;
-};
+
+ struct rlimit rlim_stack; /* Saved RLIMIT_STACK used during exec. */
+
+ char buf[BINPRM_BUF_SIZE];
+} __randomize_layout;
#define BINPRM_FLAGS_ENFORCE_NONDUMP_BIT 0
#define BINPRM_FLAGS_ENFORCE_NONDUMP (1 << BINPRM_FLAGS_ENFORCE_NONDUMP_BIT)
-/* fd of the binary should be passed to the interpreter */
-#define BINPRM_FLAGS_EXECFD_BIT 1
-#define BINPRM_FLAGS_EXECFD (1 << BINPRM_FLAGS_EXECFD_BIT)
-
/* filename of the binary will be inaccessible after exec */
#define BINPRM_FLAGS_PATH_INACCESSIBLE_BIT 2
#define BINPRM_FLAGS_PATH_INACCESSIBLE (1 << BINPRM_FLAGS_PATH_INACCESSIBLE_BIT)
-/* Function parameter for binfmt->coredump */
-struct coredump_params {
- const siginfo_t *siginfo;
- struct pt_regs *regs;
- struct file *file;
- unsigned long limit;
- unsigned long mm_flags;
- loff_t written;
- loff_t pos;
-};
+/* preserve argv0 for the interpreter */
+#define BINPRM_FLAGS_PRESERVE_ARGV0_BIT 3
+#define BINPRM_FLAGS_PRESERVE_ARGV0 (1 << BINPRM_FLAGS_PRESERVE_ARGV0_BIT)
/*
* This structure defines the functions that are used to load the binary formats that
@@ -79,9 +84,11 @@ struct linux_binfmt {
struct module *module;
int (*load_binary)(struct linux_binprm *);
int (*load_shlib)(struct file *);
+#ifdef CONFIG_COREDUMP
int (*core_dump)(struct coredump_params *cprm);
unsigned long min_coredump; /* minimal dump size */
-};
+#endif
+} __randomize_layout;
extern void __register_binfmt(struct linux_binfmt *fmt, int insert);
@@ -98,11 +105,10 @@ static inline void insert_binfmt(struct linux_binfmt *fmt)
extern void unregister_binfmt(struct linux_binfmt *);
-extern int prepare_binprm(struct linux_binprm *);
extern int __must_check remove_arg_zero(struct linux_binprm *);
-extern int search_binary_handler(struct linux_binprm *);
-extern int flush_old_exec(struct linux_binprm * bprm);
+extern int begin_new_exec(struct linux_binprm * bprm);
extern void setup_new_exec(struct linux_binprm * bprm);
+extern void finalize_exec(struct linux_binprm *bprm);
extern void would_dump(struct linux_binprm *, struct file *);
extern int suid_dumpable;
@@ -117,20 +123,12 @@ extern int setup_arg_pages(struct linux_binprm * bprm,
int executable_stack);
extern int transfer_args_to_stack(struct linux_binprm *bprm,
unsigned long *sp_location);
-extern int bprm_change_interp(char *interp, struct linux_binprm *bprm);
-extern int copy_strings_kernel(int argc, const char *const *argv,
- struct linux_binprm *bprm);
-extern int prepare_bprm_creds(struct linux_binprm *bprm);
-extern void install_exec_creds(struct linux_binprm *bprm);
+extern int bprm_change_interp(const char *interp, struct linux_binprm *bprm);
+int copy_string_kernel(const char *arg, struct linux_binprm *bprm);
extern void set_binfmt(struct linux_binfmt *new);
extern ssize_t read_code(struct file *, unsigned long, loff_t, size_t);
-extern int do_execve(struct filename *,
- const char __user * const __user *,
- const char __user * const __user *);
-extern int do_execveat(int, struct filename *,
- const char __user * const __user *,
- const char __user * const __user *,
- int);
+int kernel_execve(const char *filename,
+ const char *const *argv, const char *const *envp);
#endif /* _LINUX_BINFMTS_H */
diff --git a/include/linux/bio.h b/include/linux/bio.h
index 7b1cf4ba0902..b3e7529ff55e 100644
--- a/include/linux/bio.h
+++ b/include/linux/bio.h
@@ -1,44 +1,23 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2001 Jens Axboe <axboe@suse.de>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- *
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public Licens
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-
*/
#ifndef __LINUX_BIO_H
#define __LINUX_BIO_H
-#include <linux/highmem.h>
#include <linux/mempool.h>
-#include <linux/ioprio.h>
-#include <linux/bug.h>
-
-#ifdef CONFIG_BLOCK
-
-#include <asm/io.h>
-
/* struct bio, bio_vec and BIO_* flags are defined in blk_types.h */
#include <linux/blk_types.h>
+#include <linux/uio.h>
-#define BIO_DEBUG
+#define BIO_MAX_VECS 256U
-#ifdef BIO_DEBUG
-#define BIO_BUG_ON BUG_ON
-#else
-#define BIO_BUG_ON
-#endif
+struct queue_limits;
-#define BIO_MAX_PAGES 256
+static inline unsigned int bio_max_segs(unsigned int nr_segs)
+{
+ return min(nr_segs, BIO_MAX_VECS);
+}
#define bio_prio(bio) (bio)->bi_ioprio
#define bio_set_prio(bio, prio) ((bio)->bi_ioprio = prio)
@@ -57,10 +36,11 @@
#define bio_offset(bio) bio_iter_offset((bio), (bio)->bi_iter)
#define bio_iovec(bio) bio_iter_iovec((bio), (bio)->bi_iter)
-#define bio_multiple_segments(bio) \
- ((bio)->bi_iter.bi_size != bio_iovec(bio).bv_len)
-#define bio_sectors(bio) ((bio)->bi_iter.bi_size >> 9)
-#define bio_end_sector(bio) ((bio)->bi_iter.bi_sector + bio_sectors((bio)))
+#define bvec_iter_sectors(iter) ((iter).bi_size >> 9)
+#define bvec_iter_end_sector(iter) ((iter).bi_sector + bvec_iter_sectors((iter)))
+
+#define bio_sectors(bio) bvec_iter_sectors((bio)->bi_iter)
+#define bio_end_sector(bio) bvec_iter_end_sector((bio)->bi_iter)
/*
* Return the data direction, READ or WRITE.
@@ -83,30 +63,13 @@ static inline bool bio_has_data(struct bio *bio)
return false;
}
-static inline bool bio_no_advance_iter(struct bio *bio)
+static inline bool bio_no_advance_iter(const struct bio *bio)
{
return bio_op(bio) == REQ_OP_DISCARD ||
bio_op(bio) == REQ_OP_SECURE_ERASE ||
- bio_op(bio) == REQ_OP_WRITE_SAME ||
bio_op(bio) == REQ_OP_WRITE_ZEROES;
}
-static inline bool bio_mergeable(struct bio *bio)
-{
- if (bio->bi_opf & REQ_NOMERGE_FLAGS)
- return false;
-
- return true;
-}
-
-static inline unsigned int bio_cur_bytes(struct bio *bio)
-{
- if (bio_has_data(bio))
- return bio_iovec(bio).bv_len;
- else /* dataless requests such as discard */
- return bio->bi_iter.bi_size;
-}
-
static inline void *bio_data(struct bio *bio)
{
if (bio_has_data(bio))
@@ -115,88 +78,97 @@ static inline void *bio_data(struct bio *bio)
return NULL;
}
-/*
- * will die
- */
-#define bvec_to_phys(bv) (page_to_phys((bv)->bv_page) + (unsigned long) (bv)->bv_offset)
-
-/*
- * queues that have highmem support enabled may still need to revert to
- * PIO transfers occasionally and thus map high pages temporarily. For
- * permanent PIO fall back, user is probably better off disabling highmem
- * I/O completely on that queue (see ide-dma for example)
- */
-#define __bio_kmap_atomic(bio, iter) \
- (kmap_atomic(bio_iter_iovec((bio), (iter)).bv_page) + \
- bio_iter_iovec((bio), (iter)).bv_offset)
-
-#define __bio_kunmap_atomic(addr) kunmap_atomic(addr)
-
-/*
- * merge helpers etc
- */
-
-/* Default implementation of BIOVEC_PHYS_MERGEABLE */
-#define __BIOVEC_PHYS_MERGEABLE(vec1, vec2) \
- ((bvec_to_phys((vec1)) + (vec1)->bv_len) == bvec_to_phys((vec2)))
-
-/*
- * allow arch override, for eg virtualized architectures (put in asm/io.h)
- */
-#ifndef BIOVEC_PHYS_MERGEABLE
-#define BIOVEC_PHYS_MERGEABLE(vec1, vec2) \
- __BIOVEC_PHYS_MERGEABLE(vec1, vec2)
-#endif
+static inline bool bio_next_segment(const struct bio *bio,
+ struct bvec_iter_all *iter)
+{
+ if (iter->idx >= bio->bi_vcnt)
+ return false;
-#define __BIO_SEG_BOUNDARY(addr1, addr2, mask) \
- (((addr1) | (mask)) == (((addr2) - 1) | (mask)))
-#define BIOVEC_SEG_BOUNDARY(q, b1, b2) \
- __BIO_SEG_BOUNDARY(bvec_to_phys((b1)), bvec_to_phys((b2)) + (b2)->bv_len, queue_segment_boundary((q)))
+ bvec_advance(&bio->bi_io_vec[iter->idx], iter);
+ return true;
+}
/*
* drivers should _never_ use the all version - the bio may have been split
* before it got to the driver and the driver won't own all of it
*/
-#define bio_for_each_segment_all(bvl, bio, i) \
- for (i = 0, bvl = (bio)->bi_io_vec; i < (bio)->bi_vcnt; i++, bvl++)
+#define bio_for_each_segment_all(bvl, bio, iter) \
+ for (bvl = bvec_init_iter_all(&iter); bio_next_segment((bio), &iter); )
-static inline void bio_advance_iter(struct bio *bio, struct bvec_iter *iter,
- unsigned bytes)
+static inline void bio_advance_iter(const struct bio *bio,
+ struct bvec_iter *iter, unsigned int bytes)
{
iter->bi_sector += bytes >> 9;
- if (bio_no_advance_iter(bio)) {
+ if (bio_no_advance_iter(bio))
iter->bi_size -= bytes;
- iter->bi_done += bytes;
- } else {
+ else
bvec_iter_advance(bio->bi_io_vec, iter, bytes);
/* TODO: It is reasonable to complete bio with error here. */
- }
}
-static inline bool bio_rewind_iter(struct bio *bio, struct bvec_iter *iter,
- unsigned int bytes)
+/* @bytes should be less or equal to bvec[i->bi_idx].bv_len */
+static inline void bio_advance_iter_single(const struct bio *bio,
+ struct bvec_iter *iter,
+ unsigned int bytes)
{
- iter->bi_sector -= bytes >> 9;
+ iter->bi_sector += bytes >> 9;
- if (bio_no_advance_iter(bio)) {
- iter->bi_size += bytes;
- iter->bi_done -= bytes;
- return true;
- }
+ if (bio_no_advance_iter(bio))
+ iter->bi_size -= bytes;
+ else
+ bvec_iter_advance_single(bio->bi_io_vec, iter, bytes);
+}
+
+void __bio_advance(struct bio *, unsigned bytes);
- return bvec_iter_rewind(bio->bi_io_vec, iter, bytes);
+/**
+ * bio_advance - increment/complete a bio by some number of bytes
+ * @bio: bio to advance
+ * @nbytes: number of bytes to complete
+ *
+ * This updates bi_sector, bi_size and bi_idx; if the number of bytes to
+ * complete doesn't align with a bvec boundary, then bv_len and bv_offset will
+ * be updated on the last bvec as well.
+ *
+ * @bio will then represent the remaining, uncompleted portion of the io.
+ */
+static inline void bio_advance(struct bio *bio, unsigned int nbytes)
+{
+ if (nbytes == bio->bi_iter.bi_size) {
+ bio->bi_iter.bi_size = 0;
+ return;
+ }
+ __bio_advance(bio, nbytes);
}
#define __bio_for_each_segment(bvl, bio, iter, start) \
for (iter = (start); \
(iter).bi_size && \
((bvl = bio_iter_iovec((bio), (iter))), 1); \
- bio_advance_iter((bio), &(iter), (bvl).bv_len))
+ bio_advance_iter_single((bio), &(iter), (bvl).bv_len))
#define bio_for_each_segment(bvl, bio, iter) \
__bio_for_each_segment(bvl, bio, iter, (bio)->bi_iter)
+#define __bio_for_each_bvec(bvl, bio, iter, start) \
+ for (iter = (start); \
+ (iter).bi_size && \
+ ((bvl = mp_bvec_iter_bvec((bio)->bi_io_vec, (iter))), 1); \
+ bio_advance_iter_single((bio), &(iter), (bvl).bv_len))
+
+/* iterate over multi-page bvec */
+#define bio_for_each_bvec(bvl, bio, iter) \
+ __bio_for_each_bvec(bvl, bio, iter, (bio)->bi_iter)
+
+/*
+ * Iterate over all multi-page bvecs. Drivers shouldn't use this version for the
+ * same reasons as bio_for_each_segment_all().
+ */
+#define bio_for_each_bvec_all(bvl, bio, i) \
+ for (i = 0, bvl = bio_first_bvec_all(bio); \
+ i < (bio)->bi_vcnt; i++, bvl++)
+
#define bio_iter_last(bvec, iter) ((iter).bi_size == (bvec).bv_len)
static inline unsigned bio_segments(struct bio *bio)
@@ -215,8 +187,6 @@ static inline unsigned bio_segments(struct bio *bio)
case REQ_OP_SECURE_ERASE:
case REQ_OP_WRITE_ZEROES:
return 0;
- case REQ_OP_WRITE_SAME:
- return 1;
default:
break;
}
@@ -252,7 +222,7 @@ static inline void bio_cnt_set(struct bio *bio, unsigned int count)
{
if (count != 1) {
bio->bi_flags |= (1 << BIO_REFFED);
- smp_mb__before_atomic();
+ smp_mb();
}
atomic_set(&bio->__bi_cnt, count);
}
@@ -272,38 +242,77 @@ static inline void bio_clear_flag(struct bio *bio, unsigned int bit)
bio->bi_flags &= ~(1U << bit);
}
-static inline void bio_get_first_bvec(struct bio *bio, struct bio_vec *bv)
+static inline struct bio_vec *bio_first_bvec_all(struct bio *bio)
{
- *bv = bio_iovec(bio);
+ WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED));
+ return bio->bi_io_vec;
}
-static inline void bio_get_last_bvec(struct bio *bio, struct bio_vec *bv)
+static inline struct page *bio_first_page_all(struct bio *bio)
{
- struct bvec_iter iter = bio->bi_iter;
- int idx;
+ return bio_first_bvec_all(bio)->bv_page;
+}
- if (unlikely(!bio_multiple_segments(bio))) {
- *bv = bio_iovec(bio);
- return;
- }
+static inline struct bio_vec *bio_last_bvec_all(struct bio *bio)
+{
+ WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED));
+ return &bio->bi_io_vec[bio->bi_vcnt - 1];
+}
- bio_advance_iter(bio, &iter, iter.bi_size);
+/**
+ * struct folio_iter - State for iterating all folios in a bio.
+ * @folio: The current folio we're iterating. NULL after the last folio.
+ * @offset: The byte offset within the current folio.
+ * @length: The number of bytes in this iteration (will not cross folio
+ * boundary).
+ */
+struct folio_iter {
+ struct folio *folio;
+ size_t offset;
+ size_t length;
+ /* private: for use by the iterator */
+ struct folio *_next;
+ size_t _seg_count;
+ int _i;
+};
- if (!iter.bi_bvec_done)
- idx = iter.bi_idx - 1;
- else /* in the middle of bvec */
- idx = iter.bi_idx;
+static inline void bio_first_folio(struct folio_iter *fi, struct bio *bio,
+ int i)
+{
+ struct bio_vec *bvec = bio_first_bvec_all(bio) + i;
- *bv = bio->bi_io_vec[idx];
+ fi->folio = page_folio(bvec->bv_page);
+ fi->offset = bvec->bv_offset +
+ PAGE_SIZE * (bvec->bv_page - &fi->folio->page);
+ fi->_seg_count = bvec->bv_len;
+ fi->length = min(folio_size(fi->folio) - fi->offset, fi->_seg_count);
+ fi->_next = folio_next(fi->folio);
+ fi->_i = i;
+}
- /*
- * iter.bi_bvec_done records actual length of the last bvec
- * if this bio ends in the middle of one io vector
- */
- if (iter.bi_bvec_done)
- bv->bv_len = iter.bi_bvec_done;
+static inline void bio_next_folio(struct folio_iter *fi, struct bio *bio)
+{
+ fi->_seg_count -= fi->length;
+ if (fi->_seg_count) {
+ fi->folio = fi->_next;
+ fi->offset = 0;
+ fi->length = min(folio_size(fi->folio), fi->_seg_count);
+ fi->_next = folio_next(fi->folio);
+ } else if (fi->_i + 1 < bio->bi_vcnt) {
+ bio_first_folio(fi, bio, fi->_i + 1);
+ } else {
+ fi->folio = NULL;
+ }
}
+/**
+ * bio_for_each_folio_all - Iterate over each folio in a bio.
+ * @fi: struct folio_iter which is updated for each folio.
+ * @bio: struct bio to iterate over.
+ */
+#define bio_for_each_folio_all(fi, bio) \
+ for (bio_first_folio(&fi, bio, 0); fi.folio; bio_next_folio(&fi, bio))
+
enum bip_flags {
BIP_BLOCK_INTEGRITY = 1 << 0, /* block layer owns integrity data */
BIP_MAPPED_INTEGRITY = 1 << 1, /* ref tag has been remapped */
@@ -320,15 +329,16 @@ struct bio_integrity_payload {
struct bvec_iter bip_iter;
- unsigned short bip_slab; /* slab the bip came from */
unsigned short bip_vcnt; /* # of integrity bio_vecs */
unsigned short bip_max_vcnt; /* integrity bio_vec slots */
unsigned short bip_flags; /* control flags */
+ struct bvec_iter bio_iter; /* for rewinding parent bio */
+
struct work_struct bip_work; /* I/O completion */
struct bio_vec *bip_vec;
- struct bio_vec bip_inline_vecs[0];/* embedded bvec array */
+ struct bio_vec bip_inline_vecs[];/* embedded bvec array */
};
#if defined(CONFIG_BLK_DEV_INTEGRITY)
@@ -364,9 +374,11 @@ static inline void bip_set_seed(struct bio_integrity_payload *bip,
#endif /* CONFIG_BLK_DEV_INTEGRITY */
-extern void bio_trim(struct bio *bio, int offset, int size);
+void bio_trim(struct bio *bio, sector_t offset, sector_t size);
extern struct bio *bio_split(struct bio *bio, int sectors,
gfp_t gfp, struct bio_set *bs);
+struct bio *bio_split_rw(struct bio *bio, const struct queue_limits *lim,
+ unsigned *segs, struct bio_set *bs, unsigned max_bytes);
/**
* bio_next_split - get next @sectors from a bio, splitting if necessary
@@ -375,7 +387,7 @@ extern struct bio *bio_split(struct bio *bio, int sectors,
* @gfp: gfp mask
* @bs: bio set to allocate from
*
- * Returns a bio representing the next @sectors of @bio - if the bio is smaller
+ * Return: a bio representing the next @sectors of @bio - if the bio is smaller
* than @sectors, returns the original bio unchanged.
*/
static inline struct bio *bio_next_split(struct bio *bio, int sectors,
@@ -387,40 +399,35 @@ static inline struct bio *bio_next_split(struct bio *bio, int sectors,
return bio_split(bio, sectors, gfp, bs);
}
-extern struct bio_set *bioset_create(unsigned int, unsigned int, int flags);
enum {
BIOSET_NEED_BVECS = BIT(0),
BIOSET_NEED_RESCUER = BIT(1),
+ BIOSET_PERCPU_CACHE = BIT(2),
};
-extern void bioset_free(struct bio_set *);
-extern mempool_t *biovec_create_pool(int pool_entries);
-
-extern struct bio *bio_alloc_bioset(gfp_t, unsigned int, struct bio_set *);
+extern int bioset_init(struct bio_set *, unsigned int, unsigned int, int flags);
+extern void bioset_exit(struct bio_set *);
+extern int biovec_init_pool(mempool_t *pool, int pool_entries);
+
+struct bio *bio_alloc_bioset(struct block_device *bdev, unsigned short nr_vecs,
+ blk_opf_t opf, gfp_t gfp_mask,
+ struct bio_set *bs);
+struct bio *bio_kmalloc(unsigned short nr_vecs, gfp_t gfp_mask);
extern void bio_put(struct bio *);
-extern void __bio_clone_fast(struct bio *, struct bio *);
-extern struct bio *bio_clone_fast(struct bio *, gfp_t, struct bio_set *);
-extern struct bio *bio_clone_bioset(struct bio *, gfp_t, struct bio_set *bs);
+struct bio *bio_alloc_clone(struct block_device *bdev, struct bio *bio_src,
+ gfp_t gfp, struct bio_set *bs);
+int bio_init_clone(struct block_device *bdev, struct bio *bio,
+ struct bio *bio_src, gfp_t gfp);
-extern struct bio_set *fs_bio_set;
+extern struct bio_set fs_bio_set;
-static inline struct bio *bio_alloc(gfp_t gfp_mask, unsigned int nr_iovecs)
+static inline struct bio *bio_alloc(struct block_device *bdev,
+ unsigned short nr_vecs, blk_opf_t opf, gfp_t gfp_mask)
{
- return bio_alloc_bioset(gfp_mask, nr_iovecs, fs_bio_set);
+ return bio_alloc_bioset(bdev, nr_vecs, opf, gfp_mask, &fs_bio_set);
}
-static inline struct bio *bio_kmalloc(gfp_t gfp_mask, unsigned int nr_iovecs)
-{
- return bio_alloc_bioset(gfp_mask, nr_iovecs, NULL);
-}
-
-static inline struct bio *bio_clone_kmalloc(struct bio *bio, gfp_t gfp_mask)
-{
- return bio_clone_bioset(bio, gfp_mask, NULL);
-
-}
-
-extern blk_qc_t submit_bio(struct bio *);
+void submit_bio(struct bio *bio);
extern void bio_endio(struct bio *);
@@ -432,132 +439,89 @@ static inline void bio_io_error(struct bio *bio)
static inline void bio_wouldblock_error(struct bio *bio)
{
+ bio_set_flag(bio, BIO_QUIET);
bio->bi_status = BLK_STS_AGAIN;
bio_endio(bio);
}
+/*
+ * Calculate number of bvec segments that should be allocated to fit data
+ * pointed by @iter. If @iter is backed by bvec it's going to be reused
+ * instead of allocating a new one.
+ */
+static inline int bio_iov_vecs_to_alloc(struct iov_iter *iter, int max_segs)
+{
+ if (iov_iter_is_bvec(iter))
+ return 0;
+ return iov_iter_npages(iter, max_segs);
+}
+
struct request_queue;
-extern int bio_phys_segments(struct request_queue *, struct bio *);
extern int submit_bio_wait(struct bio *bio);
-extern void bio_advance(struct bio *, unsigned);
-
-extern void bio_init(struct bio *bio, struct bio_vec *table,
- unsigned short max_vecs);
+void bio_init(struct bio *bio, struct block_device *bdev, struct bio_vec *table,
+ unsigned short max_vecs, blk_opf_t opf);
extern void bio_uninit(struct bio *);
-extern void bio_reset(struct bio *);
+void bio_reset(struct bio *bio, struct block_device *bdev, blk_opf_t opf);
void bio_chain(struct bio *, struct bio *);
-extern int bio_add_page(struct bio *, struct page *, unsigned int,unsigned int);
+int bio_add_page(struct bio *, struct page *, unsigned len, unsigned off);
+bool bio_add_folio(struct bio *, struct folio *, size_t len, size_t off);
extern int bio_add_pc_page(struct request_queue *, struct bio *, struct page *,
unsigned int, unsigned int);
+int bio_add_zone_append_page(struct bio *bio, struct page *page,
+ unsigned int len, unsigned int offset);
+void __bio_add_page(struct bio *bio, struct page *page,
+ unsigned int len, unsigned int off);
int bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter);
-struct rq_map_data;
-extern struct bio *bio_map_user_iov(struct request_queue *,
- const struct iov_iter *, gfp_t);
-extern void bio_unmap_user(struct bio *);
-extern struct bio *bio_map_kern(struct request_queue *, void *, unsigned int,
- gfp_t);
-extern struct bio *bio_copy_kern(struct request_queue *, void *, unsigned int,
- gfp_t, int);
+void bio_iov_bvec_set(struct bio *bio, struct iov_iter *iter);
+void __bio_release_pages(struct bio *bio, bool mark_dirty);
extern void bio_set_pages_dirty(struct bio *bio);
extern void bio_check_pages_dirty(struct bio *bio);
-void generic_start_io_acct(int rw, unsigned long sectors,
- struct hd_struct *part);
-void generic_end_io_acct(int rw, struct hd_struct *part,
- unsigned long start_time);
-
-#ifndef ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
-# error "You should define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE for your platform"
-#endif
-#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
-extern void bio_flush_dcache_pages(struct bio *bi);
-#else
-static inline void bio_flush_dcache_pages(struct bio *bi)
-{
-}
-#endif
-
+extern void bio_copy_data_iter(struct bio *dst, struct bvec_iter *dst_iter,
+ struct bio *src, struct bvec_iter *src_iter);
extern void bio_copy_data(struct bio *dst, struct bio *src);
-extern int bio_alloc_pages(struct bio *bio, gfp_t gfp);
extern void bio_free_pages(struct bio *bio);
-
-extern struct bio *bio_copy_user_iov(struct request_queue *,
- struct rq_map_data *,
- const struct iov_iter *,
- gfp_t);
-extern int bio_uncopy_user(struct bio *);
+void guard_bio_eod(struct bio *bio);
void zero_fill_bio(struct bio *bio);
-extern struct bio_vec *bvec_alloc(gfp_t, int, unsigned long *, mempool_t *);
-extern void bvec_free(mempool_t *, struct bio_vec *, unsigned int);
-extern unsigned int bvec_nr_vecs(unsigned short idx);
-
-#ifdef CONFIG_BLK_CGROUP
-int bio_associate_blkcg(struct bio *bio, struct cgroup_subsys_state *blkcg_css);
-int bio_associate_current(struct bio *bio);
-void bio_disassociate_task(struct bio *bio);
-void bio_clone_blkcg_association(struct bio *dst, struct bio *src);
-#else /* CONFIG_BLK_CGROUP */
-static inline int bio_associate_blkcg(struct bio *bio,
- struct cgroup_subsys_state *blkcg_css) { return 0; }
-static inline int bio_associate_current(struct bio *bio) { return -ENOENT; }
-static inline void bio_disassociate_task(struct bio *bio) { }
-static inline void bio_clone_blkcg_association(struct bio *dst,
- struct bio *src) { }
-#endif /* CONFIG_BLK_CGROUP */
-
-#ifdef CONFIG_HIGHMEM
-/*
- * remember never ever reenable interrupts between a bvec_kmap_irq and
- * bvec_kunmap_irq!
- */
-static inline char *bvec_kmap_irq(struct bio_vec *bvec, unsigned long *flags)
-{
- unsigned long addr;
-
- /*
- * might not be a highmem page, but the preempt/irq count
- * balancing is a lot nicer this way
- */
- local_irq_save(*flags);
- addr = (unsigned long) kmap_atomic(bvec->bv_page);
-
- BUG_ON(addr & ~PAGE_MASK);
-
- return (char *) addr + bvec->bv_offset;
-}
-static inline void bvec_kunmap_irq(char *buffer, unsigned long *flags)
+static inline void bio_release_pages(struct bio *bio, bool mark_dirty)
{
- unsigned long ptr = (unsigned long) buffer & PAGE_MASK;
-
- kunmap_atomic((void *) ptr);
- local_irq_restore(*flags);
+ if (!bio_flagged(bio, BIO_NO_PAGE_REF))
+ __bio_release_pages(bio, mark_dirty);
}
-#else
-static inline char *bvec_kmap_irq(struct bio_vec *bvec, unsigned long *flags)
-{
- return page_address(bvec->bv_page) + bvec->bv_offset;
-}
+#define bio_dev(bio) \
+ disk_devt((bio)->bi_bdev->bd_disk)
-static inline void bvec_kunmap_irq(char *buffer, unsigned long *flags)
+#ifdef CONFIG_BLK_CGROUP
+void bio_associate_blkg(struct bio *bio);
+void bio_associate_blkg_from_css(struct bio *bio,
+ struct cgroup_subsys_state *css);
+void bio_clone_blkg_association(struct bio *dst, struct bio *src);
+void blkcg_punt_bio_submit(struct bio *bio);
+#else /* CONFIG_BLK_CGROUP */
+static inline void bio_associate_blkg(struct bio *bio) { }
+static inline void bio_associate_blkg_from_css(struct bio *bio,
+ struct cgroup_subsys_state *css)
+{ }
+static inline void bio_clone_blkg_association(struct bio *dst,
+ struct bio *src) { }
+static inline void blkcg_punt_bio_submit(struct bio *bio)
{
- *flags = 0;
+ submit_bio(bio);
}
-#endif
+#endif /* CONFIG_BLK_CGROUP */
-static inline char *__bio_kmap_irq(struct bio *bio, struct bvec_iter iter,
- unsigned long *flags)
+static inline void bio_set_dev(struct bio *bio, struct block_device *bdev)
{
- return bvec_kmap_irq(&bio_iter_iovec(bio, iter), flags);
+ bio_clear_flag(bio, BIO_REMAPPED);
+ if (bio->bi_bdev != bdev)
+ bio_clear_flag(bio, BIO_BPS_THROTTLED);
+ bio->bi_bdev = bdev;
+ bio_associate_blkg(bio);
}
-#define __bio_kunmap_irq(buf, flags) bvec_kunmap_irq(buf, flags)
-
-#define bio_kmap_irq(bio, flags) \
- __bio_kmap_irq((bio), (bio)->bi_iter, (flags))
-#define bio_kunmap_irq(buf,flags) __bio_kunmap_irq(buf, flags)
/*
* BIO list management for use by remapping drivers (e.g. DM or MD) and loop.
@@ -698,13 +662,19 @@ struct bio_set {
struct kmem_cache *bio_slab;
unsigned int front_pad;
- mempool_t *bio_pool;
- mempool_t *bvec_pool;
+ /*
+ * per-cpu bio alloc cache
+ */
+ struct bio_alloc_cache __percpu *cache;
+
+ mempool_t bio_pool;
+ mempool_t bvec_pool;
#if defined(CONFIG_BLK_DEV_INTEGRITY)
- mempool_t *bio_integrity_pool;
- mempool_t *bvec_integrity_pool;
+ mempool_t bio_integrity_pool;
+ mempool_t bvec_integrity_pool;
#endif
+ unsigned int back_pad;
/*
* Deadlock avoidance for stacking block drivers: see comments in
* bio_alloc_bioset() for details
@@ -713,19 +683,17 @@ struct bio_set {
struct bio_list rescue_list;
struct work_struct rescue_work;
struct workqueue_struct *rescue_workqueue;
-};
-struct biovec_slab {
- int nr_vecs;
- char *name;
- struct kmem_cache *slab;
+ /*
+ * Hot un-plug notifier for the per-cpu cache, if used
+ */
+ struct hlist_node cpuhp_dead;
};
-/*
- * a small number of entries is fine, not going to be performance critical.
- * basically we just need to survive
- */
-#define BIO_SPLIT_ENTRIES 2
+static inline bool bioset_initialized(struct bio_set *bs)
+{
+ return bs->bio_slab != NULL;
+}
#if defined(CONFIG_BLK_DEV_INTEGRITY)
@@ -809,5 +777,26 @@ static inline int bio_integrity_add_page(struct bio *bio, struct page *page,
#endif /* CONFIG_BLK_DEV_INTEGRITY */
-#endif /* CONFIG_BLOCK */
+/*
+ * Mark a bio as polled. Note that for async polled IO, the caller must
+ * expect -EWOULDBLOCK if we cannot allocate a request (or other resources).
+ * We cannot block waiting for requests on polled IO, as those completions
+ * must be found by the caller. This is different than IRQ driven IO, where
+ * it's safe to wait for IO to complete.
+ */
+static inline void bio_set_polled(struct bio *bio, struct kiocb *kiocb)
+{
+ bio->bi_opf |= REQ_POLLED;
+ if (!is_sync_kiocb(kiocb))
+ bio->bi_opf |= REQ_NOWAIT;
+}
+
+static inline void bio_clear_polled(struct bio *bio)
+{
+ bio->bi_opf &= ~REQ_POLLED;
+}
+
+struct bio *blk_next_bio(struct bio *bio, struct block_device *bdev,
+ unsigned int nr_pages, blk_opf_t opf, gfp_t gfp);
+
#endif /* __LINUX_BIO_H */
diff --git a/include/linux/bit_spinlock.h b/include/linux/bit_spinlock.h
index 3b5bafce4337..bbc4730a6505 100644
--- a/include/linux/bit_spinlock.h
+++ b/include/linux/bit_spinlock.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __LINUX_BIT_SPINLOCK_H
#define __LINUX_BIT_SPINLOCK_H
diff --git a/include/linux/bitfield.h b/include/linux/bitfield.h
index 8b9d6fff002d..ebfa12f69501 100644
--- a/include/linux/bitfield.h
+++ b/include/linux/bitfield.h
@@ -1,21 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2014 Felix Fietkau <nbd@nbd.name>
* Copyright (C) 2004 - 2009 Ivo van Doorn <IvDoorn@gmail.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2
- * as published by the Free Software Foundation
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#ifndef _LINUX_BITFIELD_H
#define _LINUX_BITFIELD_H
-#include <linux/bug.h>
+#include <linux/build_bug.h>
+#include <asm/byteorder.h>
/*
* Bitfield access macros
@@ -26,6 +19,9 @@
*
* Example:
*
+ * #include <linux/bitfield.h>
+ * #include <linux/bits.h>
+ *
* #define REG_FIELD_A GENMASK(6, 0)
* #define REG_FIELD_B BIT(7)
* #define REG_FIELD_C GENMASK(15, 8)
@@ -48,21 +44,51 @@
#define __bf_shf(x) (__builtin_ffsll(x) - 1)
+#define __scalar_type_to_unsigned_cases(type) \
+ unsigned type: (unsigned type)0, \
+ signed type: (unsigned type)0
+
+#define __unsigned_scalar_typeof(x) typeof( \
+ _Generic((x), \
+ char: (unsigned char)0, \
+ __scalar_type_to_unsigned_cases(char), \
+ __scalar_type_to_unsigned_cases(short), \
+ __scalar_type_to_unsigned_cases(int), \
+ __scalar_type_to_unsigned_cases(long), \
+ __scalar_type_to_unsigned_cases(long long), \
+ default: (x)))
+
+#define __bf_cast_unsigned(type, x) ((__unsigned_scalar_typeof(type))(x))
+
#define __BF_FIELD_CHECK(_mask, _reg, _val, _pfx) \
({ \
BUILD_BUG_ON_MSG(!__builtin_constant_p(_mask), \
_pfx "mask is not constant"); \
- BUILD_BUG_ON_MSG(!(_mask), _pfx "mask is zero"); \
+ BUILD_BUG_ON_MSG((_mask) == 0, _pfx "mask is zero"); \
BUILD_BUG_ON_MSG(__builtin_constant_p(_val) ? \
~((_mask) >> __bf_shf(_mask)) & (_val) : 0, \
_pfx "value too large for the field"); \
- BUILD_BUG_ON_MSG((_mask) > (typeof(_reg))~0ull, \
+ BUILD_BUG_ON_MSG(__bf_cast_unsigned(_mask, _mask) > \
+ __bf_cast_unsigned(_reg, ~0ull), \
_pfx "type of reg too small for mask"); \
__BUILD_BUG_ON_NOT_POWER_OF_2((_mask) + \
(1ULL << __bf_shf(_mask))); \
})
/**
+ * FIELD_MAX() - produce the maximum value representable by a field
+ * @_mask: shifted mask defining the field's length and position
+ *
+ * FIELD_MAX() returns the maximum value that can be held in the field
+ * specified by @_mask.
+ */
+#define FIELD_MAX(_mask) \
+ ({ \
+ __BF_FIELD_CHECK(_mask, 0ULL, 0ULL, "FIELD_MAX: "); \
+ (typeof(_mask))((_mask) >> __bf_shf(_mask)); \
+ })
+
+/**
* FIELD_FIT() - check if value fits in the field
* @_mask: shifted mask defining the field's length and position
* @_val: value to test against the field
@@ -71,7 +97,7 @@
*/
#define FIELD_FIT(_mask, _val) \
({ \
- __BF_FIELD_CHECK(_mask, 0ULL, _val, "FIELD_FIT: "); \
+ __BF_FIELD_CHECK(_mask, 0ULL, 0ULL, "FIELD_FIT: "); \
!((((typeof(_mask))_val) << __bf_shf(_mask)) & ~(_mask)); \
})
@@ -89,10 +115,36 @@
((typeof(_mask))(_val) << __bf_shf(_mask)) & (_mask); \
})
+#define __BF_CHECK_POW2(n) BUILD_BUG_ON_ZERO(((n) & ((n) - 1)) != 0)
+
+/**
+ * FIELD_PREP_CONST() - prepare a constant bitfield element
+ * @_mask: shifted mask defining the field's length and position
+ * @_val: value to put in the field
+ *
+ * FIELD_PREP_CONST() masks and shifts up the value. The result should
+ * be combined with other fields of the bitfield using logical OR.
+ *
+ * Unlike FIELD_PREP() this is a constant expression and can therefore
+ * be used in initializers. Error checking is less comfortable for this
+ * version, and non-constant masks cannot be used.
+ */
+#define FIELD_PREP_CONST(_mask, _val) \
+ ( \
+ /* mask must be non-zero */ \
+ BUILD_BUG_ON_ZERO((_mask) == 0) + \
+ /* check if value fits */ \
+ BUILD_BUG_ON_ZERO(~((_mask) >> __bf_shf(_mask)) & (_val)) + \
+ /* check if mask is contiguous */ \
+ __BF_CHECK_POW2((_mask) + (1ULL << __bf_shf(_mask))) + \
+ /* and create the value */ \
+ (((typeof(_mask))(_val) << __bf_shf(_mask)) & (_mask)) \
+ )
+
/**
* FIELD_GET() - extract a bitfield element
* @_mask: shifted mask defining the field's length and position
- * @_reg: 32bit value of entire bitfield
+ * @_reg: value of entire bitfield
*
* FIELD_GET() extracts the field specified by @_mask from the
* bitfield passed in as @_reg by masking and shifting it down.
@@ -103,4 +155,51 @@
(typeof(_mask))(((_reg) & (_mask)) >> __bf_shf(_mask)); \
})
+extern void __compiletime_error("value doesn't fit into mask")
+__field_overflow(void);
+extern void __compiletime_error("bad bitfield mask")
+__bad_mask(void);
+static __always_inline u64 field_multiplier(u64 field)
+{
+ if ((field | (field - 1)) & ((field | (field - 1)) + 1))
+ __bad_mask();
+ return field & -field;
+}
+static __always_inline u64 field_mask(u64 field)
+{
+ return field / field_multiplier(field);
+}
+#define field_max(field) ((typeof(field))field_mask(field))
+#define ____MAKE_OP(type,base,to,from) \
+static __always_inline __##type type##_encode_bits(base v, base field) \
+{ \
+ if (__builtin_constant_p(v) && (v & ~field_mask(field))) \
+ __field_overflow(); \
+ return to((v & field_mask(field)) * field_multiplier(field)); \
+} \
+static __always_inline __##type type##_replace_bits(__##type old, \
+ base val, base field) \
+{ \
+ return (old & ~to(field)) | type##_encode_bits(val, field); \
+} \
+static __always_inline void type##p_replace_bits(__##type *p, \
+ base val, base field) \
+{ \
+ *p = (*p & ~to(field)) | type##_encode_bits(val, field); \
+} \
+static __always_inline base type##_get_bits(__##type v, base field) \
+{ \
+ return (from(v) & field)/field_multiplier(field); \
+}
+#define __MAKE_OP(size) \
+ ____MAKE_OP(le##size,u##size,cpu_to_le##size,le##size##_to_cpu) \
+ ____MAKE_OP(be##size,u##size,cpu_to_be##size,be##size##_to_cpu) \
+ ____MAKE_OP(u##size,u##size,,)
+____MAKE_OP(u8,u8,,)
+__MAKE_OP(16)
+__MAKE_OP(32)
+__MAKE_OP(64)
+#undef __MAKE_OP
+#undef ____MAKE_OP
+
#endif
diff --git a/include/linux/bitmap.h b/include/linux/bitmap.h
index 5797ca6fdfe2..7d6d73b78147 100644
--- a/include/linux/bitmap.h
+++ b/include/linux/bitmap.h
@@ -1,12 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __LINUX_BITMAP_H
#define __LINUX_BITMAP_H
#ifndef __ASSEMBLY__
-#include <linux/types.h>
+#include <linux/align.h>
#include <linux/bitops.h>
+#include <linux/find.h>
+#include <linux/limits.h>
#include <linux/string.h>
-#include <linux/kernel.h>
+#include <linux/types.h>
+
+struct device;
/*
* bitmaps provide bit arrays that consume one or more unsigned
@@ -21,106 +26,156 @@
* See lib/bitmap.c for more details.
*/
-/*
+/**
+ * DOC: bitmap overview
+ *
* The available bitmap operations and their rough meaning in the
* case that the bitmap is a single unsigned long are thus:
*
- * Note that nbits should be always a compile time evaluable constant.
- * Otherwise many inlines will generate horrible code.
+ * The generated code is more efficient when nbits is known at
+ * compile-time and at most BITS_PER_LONG.
*
- * bitmap_zero(dst, nbits) *dst = 0UL
- * bitmap_fill(dst, nbits) *dst = ~0UL
- * bitmap_copy(dst, src, nbits) *dst = *src
- * bitmap_and(dst, src1, src2, nbits) *dst = *src1 & *src2
- * bitmap_or(dst, src1, src2, nbits) *dst = *src1 | *src2
- * bitmap_xor(dst, src1, src2, nbits) *dst = *src1 ^ *src2
- * bitmap_andnot(dst, src1, src2, nbits) *dst = *src1 & ~(*src2)
- * bitmap_complement(dst, src, nbits) *dst = ~(*src)
- * bitmap_equal(src1, src2, nbits) Are *src1 and *src2 equal?
- * bitmap_intersects(src1, src2, nbits) Do *src1 and *src2 overlap?
- * bitmap_subset(src1, src2, nbits) Is *src1 a subset of *src2?
- * bitmap_empty(src, nbits) Are all bits zero in *src?
- * bitmap_full(src, nbits) Are all bits set in *src?
- * bitmap_weight(src, nbits) Hamming Weight: number set bits
- * bitmap_set(dst, pos, nbits) Set specified bit area
- * bitmap_clear(dst, pos, nbits) Clear specified bit area
- * bitmap_find_next_zero_area(buf, len, pos, n, mask) Find bit free area
- * bitmap_find_next_zero_area_off(buf, len, pos, n, mask) as above
- * bitmap_shift_right(dst, src, n, nbits) *dst = *src >> n
- * bitmap_shift_left(dst, src, n, nbits) *dst = *src << n
- * bitmap_remap(dst, src, old, new, nbits) *dst = map(old, new)(src)
- * bitmap_bitremap(oldbit, old, new, nbits) newbit = map(old, new)(oldbit)
- * bitmap_onto(dst, orig, relmap, nbits) *dst = orig relative to relmap
- * bitmap_fold(dst, orig, sz, nbits) dst bits = orig bits mod sz
- * bitmap_parse(buf, buflen, dst, nbits) Parse bitmap dst from kernel buf
- * bitmap_parse_user(ubuf, ulen, dst, nbits) Parse bitmap dst from user buf
- * bitmap_parselist(buf, dst, nbits) Parse bitmap dst from kernel buf
- * bitmap_parselist_user(buf, dst, nbits) Parse bitmap dst from user buf
- * bitmap_find_free_region(bitmap, bits, order) Find and allocate bit region
- * bitmap_release_region(bitmap, pos, order) Free specified bit region
- * bitmap_allocate_region(bitmap, pos, order) Allocate specified bit region
- * bitmap_from_u32array(dst, nbits, buf, nwords) *dst = *buf (nwords 32b words)
- * bitmap_to_u32array(buf, nwords, src, nbits) *buf = *dst (nwords 32b words)
+ * ::
+ *
+ * bitmap_zero(dst, nbits) *dst = 0UL
+ * bitmap_fill(dst, nbits) *dst = ~0UL
+ * bitmap_copy(dst, src, nbits) *dst = *src
+ * bitmap_and(dst, src1, src2, nbits) *dst = *src1 & *src2
+ * bitmap_or(dst, src1, src2, nbits) *dst = *src1 | *src2
+ * bitmap_xor(dst, src1, src2, nbits) *dst = *src1 ^ *src2
+ * bitmap_andnot(dst, src1, src2, nbits) *dst = *src1 & ~(*src2)
+ * bitmap_complement(dst, src, nbits) *dst = ~(*src)
+ * bitmap_equal(src1, src2, nbits) Are *src1 and *src2 equal?
+ * bitmap_intersects(src1, src2, nbits) Do *src1 and *src2 overlap?
+ * bitmap_subset(src1, src2, nbits) Is *src1 a subset of *src2?
+ * bitmap_empty(src, nbits) Are all bits zero in *src?
+ * bitmap_full(src, nbits) Are all bits set in *src?
+ * bitmap_weight(src, nbits) Hamming Weight: number set bits
+ * bitmap_weight_and(src1, src2, nbits) Hamming Weight of and'ed bitmap
+ * bitmap_set(dst, pos, nbits) Set specified bit area
+ * bitmap_clear(dst, pos, nbits) Clear specified bit area
+ * bitmap_find_next_zero_area(buf, len, pos, n, mask) Find bit free area
+ * bitmap_find_next_zero_area_off(buf, len, pos, n, mask, mask_off) as above
+ * bitmap_shift_right(dst, src, n, nbits) *dst = *src >> n
+ * bitmap_shift_left(dst, src, n, nbits) *dst = *src << n
+ * bitmap_cut(dst, src, first, n, nbits) Cut n bits from first, copy rest
+ * bitmap_replace(dst, old, new, mask, nbits) *dst = (*old & ~(*mask)) | (*new & *mask)
+ * bitmap_remap(dst, src, old, new, nbits) *dst = map(old, new)(src)
+ * bitmap_bitremap(oldbit, old, new, nbits) newbit = map(old, new)(oldbit)
+ * bitmap_onto(dst, orig, relmap, nbits) *dst = orig relative to relmap
+ * bitmap_fold(dst, orig, sz, nbits) dst bits = orig bits mod sz
+ * bitmap_parse(buf, buflen, dst, nbits) Parse bitmap dst from kernel buf
+ * bitmap_parse_user(ubuf, ulen, dst, nbits) Parse bitmap dst from user buf
+ * bitmap_parselist(buf, dst, nbits) Parse bitmap dst from kernel buf
+ * bitmap_parselist_user(buf, dst, nbits) Parse bitmap dst from user buf
+ * bitmap_find_free_region(bitmap, bits, order) Find and allocate bit region
+ * bitmap_release_region(bitmap, pos, order) Free specified bit region
+ * bitmap_allocate_region(bitmap, pos, order) Allocate specified bit region
+ * bitmap_from_arr32(dst, buf, nbits) Copy nbits from u32[] buf to dst
+ * bitmap_from_arr64(dst, buf, nbits) Copy nbits from u64[] buf to dst
+ * bitmap_to_arr32(buf, src, nbits) Copy nbits from buf to u32[] dst
+ * bitmap_to_arr64(buf, src, nbits) Copy nbits from buf to u64[] dst
+ * bitmap_get_value8(map, start) Get 8bit value from map at start
+ * bitmap_set_value8(map, value, start) Set 8bit value to map at start
+ *
+ * Note, bitmap_zero() and bitmap_fill() operate over the region of
+ * unsigned longs, that is, bits behind bitmap till the unsigned long
+ * boundary will be zeroed or filled as well. Consider to use
+ * bitmap_clear() or bitmap_set() to make explicit zeroing or filling
+ * respectively.
*/
-/*
- * Also the following operations in asm/bitops.h apply to bitmaps.
+/**
+ * DOC: bitmap bitops
+ *
+ * Also the following operations in asm/bitops.h apply to bitmaps.::
+ *
+ * set_bit(bit, addr) *addr |= bit
+ * clear_bit(bit, addr) *addr &= ~bit
+ * change_bit(bit, addr) *addr ^= bit
+ * test_bit(bit, addr) Is bit set in *addr?
+ * test_and_set_bit(bit, addr) Set bit and return old value
+ * test_and_clear_bit(bit, addr) Clear bit and return old value
+ * test_and_change_bit(bit, addr) Change bit and return old value
+ * find_first_zero_bit(addr, nbits) Position first zero bit in *addr
+ * find_first_bit(addr, nbits) Position first set bit in *addr
+ * find_next_zero_bit(addr, nbits, bit)
+ * Position next zero bit in *addr >= bit
+ * find_next_bit(addr, nbits, bit) Position next set bit in *addr >= bit
+ * find_next_and_bit(addr1, addr2, nbits, bit)
+ * Same as find_next_bit, but in
+ * (*addr1 & *addr2)
*
- * set_bit(bit, addr) *addr |= bit
- * clear_bit(bit, addr) *addr &= ~bit
- * change_bit(bit, addr) *addr ^= bit
- * test_bit(bit, addr) Is bit set in *addr?
- * test_and_set_bit(bit, addr) Set bit and return old value
- * test_and_clear_bit(bit, addr) Clear bit and return old value
- * test_and_change_bit(bit, addr) Change bit and return old value
- * find_first_zero_bit(addr, nbits) Position first zero bit in *addr
- * find_first_bit(addr, nbits) Position first set bit in *addr
- * find_next_zero_bit(addr, nbits, bit) Position next zero bit in *addr >= bit
- * find_next_bit(addr, nbits, bit) Position next set bit in *addr >= bit
*/
-/*
+/**
+ * DOC: declare bitmap
* The DECLARE_BITMAP(name,bits) macro, in linux/types.h, can be used
* to declare an array named 'name' of just enough unsigned longs to
* contain all bit positions from 0 to 'bits' - 1.
*/
/*
+ * Allocation and deallocation of bitmap.
+ * Provided in lib/bitmap.c to avoid circular dependency.
+ */
+unsigned long *bitmap_alloc(unsigned int nbits, gfp_t flags);
+unsigned long *bitmap_zalloc(unsigned int nbits, gfp_t flags);
+unsigned long *bitmap_alloc_node(unsigned int nbits, gfp_t flags, int node);
+unsigned long *bitmap_zalloc_node(unsigned int nbits, gfp_t flags, int node);
+void bitmap_free(const unsigned long *bitmap);
+
+/* Managed variants of the above. */
+unsigned long *devm_bitmap_alloc(struct device *dev,
+ unsigned int nbits, gfp_t flags);
+unsigned long *devm_bitmap_zalloc(struct device *dev,
+ unsigned int nbits, gfp_t flags);
+
+/*
* lib/bitmap.c provides these functions:
*/
-extern int __bitmap_empty(const unsigned long *bitmap, unsigned int nbits);
-extern int __bitmap_full(const unsigned long *bitmap, unsigned int nbits);
-extern int __bitmap_equal(const unsigned long *bitmap1,
- const unsigned long *bitmap2, unsigned int nbits);
-extern void __bitmap_complement(unsigned long *dst, const unsigned long *src,
- unsigned int nbits);
-extern void __bitmap_shift_right(unsigned long *dst, const unsigned long *src,
- unsigned int shift, unsigned int nbits);
-extern void __bitmap_shift_left(unsigned long *dst, const unsigned long *src,
- unsigned int shift, unsigned int nbits);
-extern int __bitmap_and(unsigned long *dst, const unsigned long *bitmap1,
- const unsigned long *bitmap2, unsigned int nbits);
-extern void __bitmap_or(unsigned long *dst, const unsigned long *bitmap1,
- const unsigned long *bitmap2, unsigned int nbits);
-extern void __bitmap_xor(unsigned long *dst, const unsigned long *bitmap1,
- const unsigned long *bitmap2, unsigned int nbits);
-extern int __bitmap_andnot(unsigned long *dst, const unsigned long *bitmap1,
- const unsigned long *bitmap2, unsigned int nbits);
-extern int __bitmap_intersects(const unsigned long *bitmap1,
- const unsigned long *bitmap2, unsigned int nbits);
-extern int __bitmap_subset(const unsigned long *bitmap1,
- const unsigned long *bitmap2, unsigned int nbits);
-extern int __bitmap_weight(const unsigned long *bitmap, unsigned int nbits);
-extern void __bitmap_set(unsigned long *map, unsigned int start, int len);
-extern void __bitmap_clear(unsigned long *map, unsigned int start, int len);
-
-extern unsigned long bitmap_find_next_zero_area_off(unsigned long *map,
- unsigned long size,
- unsigned long start,
- unsigned int nr,
- unsigned long align_mask,
- unsigned long align_offset);
+bool __bitmap_equal(const unsigned long *bitmap1,
+ const unsigned long *bitmap2, unsigned int nbits);
+bool __pure __bitmap_or_equal(const unsigned long *src1,
+ const unsigned long *src2,
+ const unsigned long *src3,
+ unsigned int nbits);
+void __bitmap_complement(unsigned long *dst, const unsigned long *src,
+ unsigned int nbits);
+void __bitmap_shift_right(unsigned long *dst, const unsigned long *src,
+ unsigned int shift, unsigned int nbits);
+void __bitmap_shift_left(unsigned long *dst, const unsigned long *src,
+ unsigned int shift, unsigned int nbits);
+void bitmap_cut(unsigned long *dst, const unsigned long *src,
+ unsigned int first, unsigned int cut, unsigned int nbits);
+bool __bitmap_and(unsigned long *dst, const unsigned long *bitmap1,
+ const unsigned long *bitmap2, unsigned int nbits);
+void __bitmap_or(unsigned long *dst, const unsigned long *bitmap1,
+ const unsigned long *bitmap2, unsigned int nbits);
+void __bitmap_xor(unsigned long *dst, const unsigned long *bitmap1,
+ const unsigned long *bitmap2, unsigned int nbits);
+bool __bitmap_andnot(unsigned long *dst, const unsigned long *bitmap1,
+ const unsigned long *bitmap2, unsigned int nbits);
+void __bitmap_replace(unsigned long *dst,
+ const unsigned long *old, const unsigned long *new,
+ const unsigned long *mask, unsigned int nbits);
+bool __bitmap_intersects(const unsigned long *bitmap1,
+ const unsigned long *bitmap2, unsigned int nbits);
+bool __bitmap_subset(const unsigned long *bitmap1,
+ const unsigned long *bitmap2, unsigned int nbits);
+unsigned int __bitmap_weight(const unsigned long *bitmap, unsigned int nbits);
+unsigned int __bitmap_weight_and(const unsigned long *bitmap1,
+ const unsigned long *bitmap2, unsigned int nbits);
+void __bitmap_set(unsigned long *map, unsigned int start, int len);
+void __bitmap_clear(unsigned long *map, unsigned int start, int len);
+
+unsigned long bitmap_find_next_zero_area_off(unsigned long *map,
+ unsigned long size,
+ unsigned long start,
+ unsigned int nr,
+ unsigned long align_mask,
+ unsigned long align_offset);
/**
* bitmap_find_next_zero_area - find a contiguous aligned zero area
@@ -145,80 +200,124 @@ bitmap_find_next_zero_area(unsigned long *map,
align_mask, 0);
}
-extern int __bitmap_parse(const char *buf, unsigned int buflen, int is_user,
+int bitmap_parse(const char *buf, unsigned int buflen,
unsigned long *dst, int nbits);
-extern int bitmap_parse_user(const char __user *ubuf, unsigned int ulen,
+int bitmap_parse_user(const char __user *ubuf, unsigned int ulen,
unsigned long *dst, int nbits);
-extern int bitmap_parselist(const char *buf, unsigned long *maskp,
+int bitmap_parselist(const char *buf, unsigned long *maskp,
int nmaskbits);
-extern int bitmap_parselist_user(const char __user *ubuf, unsigned int ulen,
+int bitmap_parselist_user(const char __user *ubuf, unsigned int ulen,
unsigned long *dst, int nbits);
-extern void bitmap_remap(unsigned long *dst, const unsigned long *src,
+void bitmap_remap(unsigned long *dst, const unsigned long *src,
const unsigned long *old, const unsigned long *new, unsigned int nbits);
-extern int bitmap_bitremap(int oldbit,
+int bitmap_bitremap(int oldbit,
const unsigned long *old, const unsigned long *new, int bits);
-extern void bitmap_onto(unsigned long *dst, const unsigned long *orig,
+void bitmap_onto(unsigned long *dst, const unsigned long *orig,
const unsigned long *relmap, unsigned int bits);
-extern void bitmap_fold(unsigned long *dst, const unsigned long *orig,
+void bitmap_fold(unsigned long *dst, const unsigned long *orig,
unsigned int sz, unsigned int nbits);
-extern int bitmap_find_free_region(unsigned long *bitmap, unsigned int bits, int order);
-extern void bitmap_release_region(unsigned long *bitmap, unsigned int pos, int order);
-extern int bitmap_allocate_region(unsigned long *bitmap, unsigned int pos, int order);
-extern unsigned int bitmap_from_u32array(unsigned long *bitmap,
- unsigned int nbits,
- const u32 *buf,
- unsigned int nwords);
-extern unsigned int bitmap_to_u32array(u32 *buf,
- unsigned int nwords,
- const unsigned long *bitmap,
- unsigned int nbits);
+int bitmap_find_free_region(unsigned long *bitmap, unsigned int bits, int order);
+void bitmap_release_region(unsigned long *bitmap, unsigned int pos, int order);
+int bitmap_allocate_region(unsigned long *bitmap, unsigned int pos, int order);
+
#ifdef __BIG_ENDIAN
-extern void bitmap_copy_le(unsigned long *dst, const unsigned long *src, unsigned int nbits);
+void bitmap_copy_le(unsigned long *dst, const unsigned long *src, unsigned int nbits);
#else
#define bitmap_copy_le bitmap_copy
#endif
-extern unsigned int bitmap_ord_to_pos(const unsigned long *bitmap, unsigned int ord, unsigned int nbits);
-extern int bitmap_print_to_pagebuf(bool list, char *buf,
+int bitmap_print_to_pagebuf(bool list, char *buf,
const unsigned long *maskp, int nmaskbits);
+extern int bitmap_print_bitmask_to_buf(char *buf, const unsigned long *maskp,
+ int nmaskbits, loff_t off, size_t count);
+
+extern int bitmap_print_list_to_buf(char *buf, const unsigned long *maskp,
+ int nmaskbits, loff_t off, size_t count);
+
#define BITMAP_FIRST_WORD_MASK(start) (~0UL << ((start) & (BITS_PER_LONG - 1)))
#define BITMAP_LAST_WORD_MASK(nbits) (~0UL >> (-(nbits) & (BITS_PER_LONG - 1)))
-#define small_const_nbits(nbits) \
- (__builtin_constant_p(nbits) && (nbits) <= BITS_PER_LONG)
-
static inline void bitmap_zero(unsigned long *dst, unsigned int nbits)
{
+ unsigned int len = BITS_TO_LONGS(nbits) * sizeof(unsigned long);
+
if (small_const_nbits(nbits))
- *dst = 0UL;
- else {
- unsigned int len = BITS_TO_LONGS(nbits) * sizeof(unsigned long);
+ *dst = 0;
+ else
memset(dst, 0, len);
- }
}
static inline void bitmap_fill(unsigned long *dst, unsigned int nbits)
{
- unsigned int nlongs = BITS_TO_LONGS(nbits);
- if (!small_const_nbits(nbits)) {
- unsigned int len = (nlongs - 1) * sizeof(unsigned long);
- memset(dst, 0xff, len);
- }
- dst[nlongs - 1] = BITMAP_LAST_WORD_MASK(nbits);
+ unsigned int len = BITS_TO_LONGS(nbits) * sizeof(unsigned long);
+
+ if (small_const_nbits(nbits))
+ *dst = ~0UL;
+ else
+ memset(dst, 0xff, len);
}
static inline void bitmap_copy(unsigned long *dst, const unsigned long *src,
unsigned int nbits)
{
+ unsigned int len = BITS_TO_LONGS(nbits) * sizeof(unsigned long);
+
if (small_const_nbits(nbits))
*dst = *src;
- else {
- unsigned int len = BITS_TO_LONGS(nbits) * sizeof(unsigned long);
+ else
memcpy(dst, src, len);
- }
}
-static inline int bitmap_and(unsigned long *dst, const unsigned long *src1,
+/*
+ * Copy bitmap and clear tail bits in last word.
+ */
+static inline void bitmap_copy_clear_tail(unsigned long *dst,
+ const unsigned long *src, unsigned int nbits)
+{
+ bitmap_copy(dst, src, nbits);
+ if (nbits % BITS_PER_LONG)
+ dst[nbits / BITS_PER_LONG] &= BITMAP_LAST_WORD_MASK(nbits);
+}
+
+/*
+ * On 32-bit systems bitmaps are represented as u32 arrays internally. On LE64
+ * machines the order of hi and lo parts of numbers match the bitmap structure.
+ * In both cases conversion is not needed when copying data from/to arrays of
+ * u32. But in LE64 case, typecast in bitmap_copy_clear_tail() may lead
+ * to out-of-bound access. To avoid that, both LE and BE variants of 64-bit
+ * architectures are not using bitmap_copy_clear_tail().
+ */
+#if BITS_PER_LONG == 64
+void bitmap_from_arr32(unsigned long *bitmap, const u32 *buf,
+ unsigned int nbits);
+void bitmap_to_arr32(u32 *buf, const unsigned long *bitmap,
+ unsigned int nbits);
+#else
+#define bitmap_from_arr32(bitmap, buf, nbits) \
+ bitmap_copy_clear_tail((unsigned long *) (bitmap), \
+ (const unsigned long *) (buf), (nbits))
+#define bitmap_to_arr32(buf, bitmap, nbits) \
+ bitmap_copy_clear_tail((unsigned long *) (buf), \
+ (const unsigned long *) (bitmap), (nbits))
+#endif
+
+/*
+ * On 64-bit systems bitmaps are represented as u64 arrays internally. On LE32
+ * machines the order of hi and lo parts of numbers match the bitmap structure.
+ * In both cases conversion is not needed when copying data from/to arrays of
+ * u64.
+ */
+#if (BITS_PER_LONG == 32) && defined(__BIG_ENDIAN)
+void bitmap_from_arr64(unsigned long *bitmap, const u64 *buf, unsigned int nbits);
+void bitmap_to_arr64(u64 *buf, const unsigned long *bitmap, unsigned int nbits);
+#else
+#define bitmap_from_arr64(bitmap, buf, nbits) \
+ bitmap_copy_clear_tail((unsigned long *)(bitmap), (const unsigned long *)(buf), (nbits))
+#define bitmap_to_arr64(buf, bitmap, nbits) \
+ bitmap_copy_clear_tail((unsigned long *)(buf), (const unsigned long *)(bitmap), (nbits))
+#endif
+
+static inline bool bitmap_and(unsigned long *dst, const unsigned long *src1,
const unsigned long *src2, unsigned int nbits)
{
if (small_const_nbits(nbits))
@@ -244,7 +343,7 @@ static inline void bitmap_xor(unsigned long *dst, const unsigned long *src1,
__bitmap_xor(dst, src1, src2, nbits);
}
-static inline int bitmap_andnot(unsigned long *dst, const unsigned long *src1,
+static inline bool bitmap_andnot(unsigned long *dst, const unsigned long *src1,
const unsigned long *src2, unsigned int nbits)
{
if (small_const_nbits(nbits))
@@ -261,18 +360,47 @@ static inline void bitmap_complement(unsigned long *dst, const unsigned long *sr
__bitmap_complement(dst, src, nbits);
}
-static inline int bitmap_equal(const unsigned long *src1,
- const unsigned long *src2, unsigned int nbits)
+#ifdef __LITTLE_ENDIAN
+#define BITMAP_MEM_ALIGNMENT 8
+#else
+#define BITMAP_MEM_ALIGNMENT (8 * sizeof(unsigned long))
+#endif
+#define BITMAP_MEM_MASK (BITMAP_MEM_ALIGNMENT - 1)
+
+static inline bool bitmap_equal(const unsigned long *src1,
+ const unsigned long *src2, unsigned int nbits)
{
if (small_const_nbits(nbits))
return !((*src1 ^ *src2) & BITMAP_LAST_WORD_MASK(nbits));
- if (__builtin_constant_p(nbits & 7) && IS_ALIGNED(nbits, 8))
+ if (__builtin_constant_p(nbits & BITMAP_MEM_MASK) &&
+ IS_ALIGNED(nbits, BITMAP_MEM_ALIGNMENT))
return !memcmp(src1, src2, nbits / 8);
return __bitmap_equal(src1, src2, nbits);
}
-static inline int bitmap_intersects(const unsigned long *src1,
- const unsigned long *src2, unsigned int nbits)
+/**
+ * bitmap_or_equal - Check whether the or of two bitmaps is equal to a third
+ * @src1: Pointer to bitmap 1
+ * @src2: Pointer to bitmap 2 will be or'ed with bitmap 1
+ * @src3: Pointer to bitmap 3. Compare to the result of *@src1 | *@src2
+ * @nbits: number of bits in each of these bitmaps
+ *
+ * Returns: True if (*@src1 | *@src2) == *@src3, false otherwise
+ */
+static inline bool bitmap_or_equal(const unsigned long *src1,
+ const unsigned long *src2,
+ const unsigned long *src3,
+ unsigned int nbits)
+{
+ if (!small_const_nbits(nbits))
+ return __bitmap_or_equal(src1, src2, src3, nbits);
+
+ return !(((*src1 | *src2) ^ *src3) & BITMAP_LAST_WORD_MASK(nbits));
+}
+
+static inline bool bitmap_intersects(const unsigned long *src1,
+ const unsigned long *src2,
+ unsigned int nbits)
{
if (small_const_nbits(nbits))
return ((*src1 & *src2) & BITMAP_LAST_WORD_MASK(nbits)) != 0;
@@ -280,8 +408,8 @@ static inline int bitmap_intersects(const unsigned long *src1,
return __bitmap_intersects(src1, src2, nbits);
}
-static inline int bitmap_subset(const unsigned long *src1,
- const unsigned long *src2, unsigned int nbits)
+static inline bool bitmap_subset(const unsigned long *src1,
+ const unsigned long *src2, unsigned int nbits)
{
if (small_const_nbits(nbits))
return ! ((*src1 & ~(*src2)) & BITMAP_LAST_WORD_MASK(nbits));
@@ -289,7 +417,7 @@ static inline int bitmap_subset(const unsigned long *src1,
return __bitmap_subset(src1, src2, nbits);
}
-static inline int bitmap_empty(const unsigned long *src, unsigned nbits)
+static inline bool bitmap_empty(const unsigned long *src, unsigned nbits)
{
if (small_const_nbits(nbits))
return ! (*src & BITMAP_LAST_WORD_MASK(nbits));
@@ -297,7 +425,7 @@ static inline int bitmap_empty(const unsigned long *src, unsigned nbits)
return find_first_bit(src, nbits) == nbits;
}
-static inline int bitmap_full(const unsigned long *src, unsigned int nbits)
+static inline bool bitmap_full(const unsigned long *src, unsigned int nbits)
{
if (small_const_nbits(nbits))
return ! (~(*src) & BITMAP_LAST_WORD_MASK(nbits));
@@ -305,20 +433,34 @@ static inline int bitmap_full(const unsigned long *src, unsigned int nbits)
return find_first_zero_bit(src, nbits) == nbits;
}
-static __always_inline int bitmap_weight(const unsigned long *src, unsigned int nbits)
+static __always_inline
+unsigned int bitmap_weight(const unsigned long *src, unsigned int nbits)
{
if (small_const_nbits(nbits))
return hweight_long(*src & BITMAP_LAST_WORD_MASK(nbits));
return __bitmap_weight(src, nbits);
}
+static __always_inline
+unsigned long bitmap_weight_and(const unsigned long *src1,
+ const unsigned long *src2, unsigned int nbits)
+{
+ if (small_const_nbits(nbits))
+ return hweight_long(*src1 & *src2 & BITMAP_LAST_WORD_MASK(nbits));
+ return __bitmap_weight_and(src1, src2, nbits);
+}
+
static __always_inline void bitmap_set(unsigned long *map, unsigned int start,
unsigned int nbits)
{
if (__builtin_constant_p(nbits) && nbits == 1)
__set_bit(start, map);
- else if (__builtin_constant_p(start & 7) && IS_ALIGNED(start, 8) &&
- __builtin_constant_p(nbits & 7) && IS_ALIGNED(nbits, 8))
+ else if (small_const_nbits(start + nbits))
+ *map |= GENMASK(start + nbits - 1, start);
+ else if (__builtin_constant_p(start & BITMAP_MEM_MASK) &&
+ IS_ALIGNED(start, BITMAP_MEM_ALIGNMENT) &&
+ __builtin_constant_p(nbits & BITMAP_MEM_MASK) &&
+ IS_ALIGNED(nbits, BITMAP_MEM_ALIGNMENT))
memset((char *)map + start / 8, 0xff, nbits / 8);
else
__bitmap_set(map, start, nbits);
@@ -329,15 +471,19 @@ static __always_inline void bitmap_clear(unsigned long *map, unsigned int start,
{
if (__builtin_constant_p(nbits) && nbits == 1)
__clear_bit(start, map);
- else if (__builtin_constant_p(start & 7) && IS_ALIGNED(start, 8) &&
- __builtin_constant_p(nbits & 7) && IS_ALIGNED(nbits, 8))
+ else if (small_const_nbits(start + nbits))
+ *map &= ~GENMASK(start + nbits - 1, start);
+ else if (__builtin_constant_p(start & BITMAP_MEM_MASK) &&
+ IS_ALIGNED(start, BITMAP_MEM_ALIGNMENT) &&
+ __builtin_constant_p(nbits & BITMAP_MEM_MASK) &&
+ IS_ALIGNED(nbits, BITMAP_MEM_ALIGNMENT))
memset((char *)map + start / 8, 0, nbits / 8);
else
__bitmap_clear(map, start, nbits);
}
static inline void bitmap_shift_right(unsigned long *dst, const unsigned long *src,
- unsigned int shift, int nbits)
+ unsigned int shift, unsigned int nbits)
{
if (small_const_nbits(nbits))
*dst = (*src & BITMAP_LAST_WORD_MASK(nbits)) >> shift;
@@ -354,28 +500,105 @@ static inline void bitmap_shift_left(unsigned long *dst, const unsigned long *sr
__bitmap_shift_left(dst, src, shift, nbits);
}
-static inline int bitmap_parse(const char *buf, unsigned int buflen,
- unsigned long *maskp, int nmaskbits)
+static inline void bitmap_replace(unsigned long *dst,
+ const unsigned long *old,
+ const unsigned long *new,
+ const unsigned long *mask,
+ unsigned int nbits)
{
- return __bitmap_parse(buf, buflen, 0, maskp, nmaskbits);
+ if (small_const_nbits(nbits))
+ *dst = (*old & ~(*mask)) | (*new & *mask);
+ else
+ __bitmap_replace(dst, old, new, mask, nbits);
}
-/*
+static inline void bitmap_next_set_region(unsigned long *bitmap,
+ unsigned int *rs, unsigned int *re,
+ unsigned int end)
+{
+ *rs = find_next_bit(bitmap, end, *rs);
+ *re = find_next_zero_bit(bitmap, end, *rs + 1);
+}
+
+/**
+ * BITMAP_FROM_U64() - Represent u64 value in the format suitable for bitmap.
+ * @n: u64 value
+ *
+ * Linux bitmaps are internally arrays of unsigned longs, i.e. 32-bit
+ * integers in 32-bit environment, and 64-bit integers in 64-bit one.
+ *
+ * There are four combinations of endianness and length of the word in linux
+ * ABIs: LE64, BE64, LE32 and BE32.
+ *
+ * On 64-bit kernels 64-bit LE and BE numbers are naturally ordered in
+ * bitmaps and therefore don't require any special handling.
+ *
+ * On 32-bit kernels 32-bit LE ABI orders lo word of 64-bit number in memory
+ * prior to hi, and 32-bit BE orders hi word prior to lo. The bitmap on the
+ * other hand is represented as an array of 32-bit words and the position of
+ * bit N may therefore be calculated as: word #(N/32) and bit #(N%32) in that
+ * word. For example, bit #42 is located at 10th position of 2nd word.
+ * It matches 32-bit LE ABI, and we can simply let the compiler store 64-bit
+ * values in memory as it usually does. But for BE we need to swap hi and lo
+ * words manually.
+ *
+ * With all that, the macro BITMAP_FROM_U64() does explicit reordering of hi and
+ * lo parts of u64. For LE32 it does nothing, and for BE environment it swaps
+ * hi and lo words, as is expected by bitmap.
+ */
+#if __BITS_PER_LONG == 64
+#define BITMAP_FROM_U64(n) (n)
+#else
+#define BITMAP_FROM_U64(n) ((unsigned long) ((u64)(n) & ULONG_MAX)), \
+ ((unsigned long) ((u64)(n) >> 32))
+#endif
+
+/**
* bitmap_from_u64 - Check and swap words within u64.
* @mask: source bitmap
* @dst: destination bitmap
*
- * In 32-bit Big Endian kernel, when using (u32 *)(&val)[*]
+ * In 32-bit Big Endian kernel, when using ``(u32 *)(&val)[*]``
* to read u64 mask, we will get the wrong word.
- * That is "(u32 *)(&val)[0]" gets the upper 32 bits,
+ * That is ``(u32 *)(&val)[0]`` gets the upper 32 bits,
* but we expect the lower 32-bits of u64.
*/
static inline void bitmap_from_u64(unsigned long *dst, u64 mask)
{
- dst[0] = mask & ULONG_MAX;
+ bitmap_from_arr64(dst, &mask, 64);
+}
+
+/**
+ * bitmap_get_value8 - get an 8-bit value within a memory region
+ * @map: address to the bitmap memory region
+ * @start: bit offset of the 8-bit value; must be a multiple of 8
+ *
+ * Returns the 8-bit value located at the @start bit offset within the @src
+ * memory region.
+ */
+static inline unsigned long bitmap_get_value8(const unsigned long *map,
+ unsigned long start)
+{
+ const size_t index = BIT_WORD(start);
+ const unsigned long offset = start % BITS_PER_LONG;
+
+ return (map[index] >> offset) & 0xFF;
+}
+
+/**
+ * bitmap_set_value8 - set an 8-bit value within a memory region
+ * @map: address to the bitmap memory region
+ * @value: the 8-bit value; values wider than 8 bits may clobber bitmap
+ * @start: bit offset of the 8-bit value; must be a multiple of 8
+ */
+static inline void bitmap_set_value8(unsigned long *map, unsigned long value,
+ unsigned long start)
+{
+ const size_t index = BIT_WORD(start);
+ const unsigned long offset = start % BITS_PER_LONG;
- if (sizeof(mask) > sizeof(unsigned long))
- dst[1] = mask >> 32;
+ map[index] &= ~(0xFFUL << offset);
+ map[index] |= value << offset;
}
#endif /* __ASSEMBLY__ */
diff --git a/include/linux/bitops.h b/include/linux/bitops.h
index a83c822c35c2..2ba557e067fe 100644
--- a/include/linux/bitops.h
+++ b/include/linux/bitops.h
@@ -1,28 +1,25 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_BITOPS_H
#define _LINUX_BITOPS_H
+
#include <asm/types.h>
+#include <linux/bits.h>
+#include <linux/typecheck.h>
-#ifdef __KERNEL__
-#define BIT(nr) (1UL << (nr))
-#define BIT_ULL(nr) (1ULL << (nr))
-#define BIT_MASK(nr) (1UL << ((nr) % BITS_PER_LONG))
-#define BIT_WORD(nr) ((nr) / BITS_PER_LONG)
-#define BIT_ULL_MASK(nr) (1ULL << ((nr) % BITS_PER_LONG_LONG))
-#define BIT_ULL_WORD(nr) ((nr) / BITS_PER_LONG_LONG)
-#define BITS_PER_BYTE 8
-#define BITS_TO_LONGS(nr) DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(long))
-#endif
+#include <uapi/linux/kernel.h>
-/*
- * Create a contiguous bitmask starting at bit position @l and ending at
- * position @h. For example
- * GENMASK_ULL(39, 21) gives us the 64bit vector 0x000000ffffe00000.
- */
-#define GENMASK(h, l) \
- (((~0UL) << (l)) & (~0UL >> (BITS_PER_LONG - 1 - (h))))
+/* Set bits in the first 'n' bytes when loaded from memory */
+#ifdef __LITTLE_ENDIAN
+# define aligned_byte_mask(n) ((1UL << 8*(n))-1)
+#else
+# define aligned_byte_mask(n) (~0xffUL << (BITS_PER_LONG - 8 - 8*(n)))
+#endif
-#define GENMASK_ULL(h, l) \
- (((~0ULL) << (l)) & (~0ULL >> (BITS_PER_LONG_LONG - 1 - (h))))
+#define BITS_PER_TYPE(type) (sizeof(type) * BITS_PER_BYTE)
+#define BITS_TO_LONGS(nr) __KERNEL_DIV_ROUND_UP(nr, BITS_PER_TYPE(long))
+#define BITS_TO_U64(nr) __KERNEL_DIV_ROUND_UP(nr, BITS_PER_TYPE(u64))
+#define BITS_TO_U32(nr) __KERNEL_DIV_ROUND_UP(nr, BITS_PER_TYPE(u32))
+#define BITS_TO_BYTES(nr) __KERNEL_DIV_ROUND_UP(nr, BITS_PER_TYPE(char))
extern unsigned int __sw_hweight8(unsigned int w);
extern unsigned int __sw_hweight16(unsigned int w);
@@ -30,32 +27,61 @@ extern unsigned int __sw_hweight32(unsigned int w);
extern unsigned long __sw_hweight64(__u64 w);
/*
+ * Defined here because those may be needed by architecture-specific static
+ * inlines.
+ */
+
+#include <asm-generic/bitops/generic-non-atomic.h>
+
+/*
+ * Many architecture-specific non-atomic bitops contain inline asm code and due
+ * to that the compiler can't optimize them to compile-time expressions or
+ * constants. In contrary, generic_*() helpers are defined in pure C and
+ * compilers optimize them just well.
+ * Therefore, to make `unsigned long foo = 0; __set_bit(BAR, &foo)` effectively
+ * equal to `unsigned long foo = BIT(BAR)`, pick the generic C alternative when
+ * the arguments can be resolved at compile time. That expression itself is a
+ * constant and doesn't bring any functional changes to the rest of cases.
+ * The casts to `uintptr_t` are needed to mitigate `-Waddress` warnings when
+ * passing a bitmap from .bss or .data (-> `!!addr` is always true).
+ */
+#define bitop(op, nr, addr) \
+ ((__builtin_constant_p(nr) && \
+ __builtin_constant_p((uintptr_t)(addr) != (uintptr_t)NULL) && \
+ (uintptr_t)(addr) != (uintptr_t)NULL && \
+ __builtin_constant_p(*(const unsigned long *)(addr))) ? \
+ const##op(nr, addr) : op(nr, addr))
+
+#define __set_bit(nr, addr) bitop(___set_bit, nr, addr)
+#define __clear_bit(nr, addr) bitop(___clear_bit, nr, addr)
+#define __change_bit(nr, addr) bitop(___change_bit, nr, addr)
+#define __test_and_set_bit(nr, addr) bitop(___test_and_set_bit, nr, addr)
+#define __test_and_clear_bit(nr, addr) bitop(___test_and_clear_bit, nr, addr)
+#define __test_and_change_bit(nr, addr) bitop(___test_and_change_bit, nr, addr)
+#define test_bit(nr, addr) bitop(_test_bit, nr, addr)
+#define test_bit_acquire(nr, addr) bitop(_test_bit_acquire, nr, addr)
+
+/*
* Include this here because some architectures need generic_ffs/fls in
* scope
*/
#include <asm/bitops.h>
-#define for_each_set_bit(bit, addr, size) \
- for ((bit) = find_first_bit((addr), (size)); \
- (bit) < (size); \
- (bit) = find_next_bit((addr), (size), (bit) + 1))
-
-/* same as for_each_set_bit() but use bit as value to start with */
-#define for_each_set_bit_from(bit, addr, size) \
- for ((bit) = find_next_bit((addr), (size), (bit)); \
- (bit) < (size); \
- (bit) = find_next_bit((addr), (size), (bit) + 1))
+/* Check that the bitops prototypes are sane */
+#define __check_bitop_pr(name) \
+ static_assert(__same_type(arch_##name, generic_##name) && \
+ __same_type(const_##name, generic_##name) && \
+ __same_type(_##name, generic_##name))
-#define for_each_clear_bit(bit, addr, size) \
- for ((bit) = find_first_zero_bit((addr), (size)); \
- (bit) < (size); \
- (bit) = find_next_zero_bit((addr), (size), (bit) + 1))
+__check_bitop_pr(__set_bit);
+__check_bitop_pr(__clear_bit);
+__check_bitop_pr(__change_bit);
+__check_bitop_pr(__test_and_set_bit);
+__check_bitop_pr(__test_and_clear_bit);
+__check_bitop_pr(__test_and_change_bit);
+__check_bitop_pr(test_bit);
-/* same as for_each_clear_bit() but use bit as value to start with */
-#define for_each_clear_bit_from(bit, addr, size) \
- for ((bit) = find_next_zero_bit((addr), (size), (bit)); \
- (bit) < (size); \
- (bit) = find_next_zero_bit((addr), (size), (bit) + 1))
+#undef __check_bitop_pr
static inline int get_bitmask_order(unsigned int count)
{
@@ -67,7 +93,7 @@ static inline int get_bitmask_order(unsigned int count)
static __always_inline unsigned long hweight_long(unsigned long w)
{
- return sizeof(w) == 4 ? hweight32(w) : hweight64(w);
+ return sizeof(w) == 4 ? hweight32(w) : hweight64((__u64)w);
}
/**
@@ -77,7 +103,7 @@ static __always_inline unsigned long hweight_long(unsigned long w)
*/
static inline __u64 rol64(__u64 word, unsigned int shift)
{
- return (word << shift) | (word >> (64 - shift));
+ return (word << (shift & 63)) | (word >> ((-shift) & 63));
}
/**
@@ -87,7 +113,7 @@ static inline __u64 rol64(__u64 word, unsigned int shift)
*/
static inline __u64 ror64(__u64 word, unsigned int shift)
{
- return (word >> shift) | (word << (64 - shift));
+ return (word >> (shift & 63)) | (word << ((-shift) & 63));
}
/**
@@ -97,7 +123,7 @@ static inline __u64 ror64(__u64 word, unsigned int shift)
*/
static inline __u32 rol32(__u32 word, unsigned int shift)
{
- return (word << shift) | (word >> ((-shift) & 31));
+ return (word << (shift & 31)) | (word >> ((-shift) & 31));
}
/**
@@ -107,7 +133,7 @@ static inline __u32 rol32(__u32 word, unsigned int shift)
*/
static inline __u32 ror32(__u32 word, unsigned int shift)
{
- return (word >> shift) | (word << (32 - shift));
+ return (word >> (shift & 31)) | (word << ((-shift) & 31));
}
/**
@@ -117,7 +143,7 @@ static inline __u32 ror32(__u32 word, unsigned int shift)
*/
static inline __u16 rol16(__u16 word, unsigned int shift)
{
- return (word << shift) | (word >> (16 - shift));
+ return (word << (shift & 15)) | (word >> ((-shift) & 15));
}
/**
@@ -127,7 +153,7 @@ static inline __u16 rol16(__u16 word, unsigned int shift)
*/
static inline __u16 ror16(__u16 word, unsigned int shift)
{
- return (word >> shift) | (word << (16 - shift));
+ return (word >> (shift & 15)) | (word << ((-shift) & 15));
}
/**
@@ -137,7 +163,7 @@ static inline __u16 ror16(__u16 word, unsigned int shift)
*/
static inline __u8 rol8(__u8 word, unsigned int shift)
{
- return (word << shift) | (word >> (8 - shift));
+ return (word << (shift & 7)) | (word >> ((-shift) & 7));
}
/**
@@ -147,7 +173,7 @@ static inline __u8 rol8(__u8 word, unsigned int shift)
*/
static inline __u8 ror8(__u8 word, unsigned int shift)
{
- return (word >> shift) | (word << (8 - shift));
+ return (word >> (shift & 7)) | (word << ((-shift) & 7));
}
/**
@@ -157,7 +183,7 @@ static inline __u8 ror8(__u8 word, unsigned int shift)
*
* This is safe to use for 16- and 8-bit types as well.
*/
-static inline __s32 sign_extend32(__u32 value, int index)
+static __always_inline __s32 sign_extend32(__u32 value, int index)
{
__u8 shift = 31 - index;
return (__s32)(value << shift) >> shift;
@@ -168,7 +194,7 @@ static inline __s32 sign_extend32(__u32 value, int index)
* @value: value to sign extend
* @index: 0 based bit index (0<=index<64) to sign bit
*/
-static inline __s64 sign_extend64(__u64 value, int index)
+static __always_inline __s64 sign_extend64(__u64 value, int index)
{
__u8 shift = 63 - index;
return (__s64)(value << shift) >> shift;
@@ -183,12 +209,10 @@ static inline unsigned fls_long(unsigned long l)
static inline int get_count_order(unsigned int count)
{
- int order;
+ if (count == 0)
+ return -1;
- order = fls(count) - 1;
- if (count & (count - 1))
- order++;
- return order;
+ return fls(--count);
}
/**
@@ -201,17 +225,14 @@ static inline int get_count_order_long(unsigned long l)
{
if (l == 0UL)
return -1;
- else if (l & (l - 1UL))
- return (int)fls_long(l);
- else
- return (int)fls_long(l) - 1;
+ return (int)fls_long(--l);
}
/**
* __ffs64 - find first set bit in a 64 bit word
* @word: The 64 bit word
*
- * On 64 bit arches this is a synomyn for __ffs
+ * On 64 bit arches this is a synonym for __ffs
* The result is not defined if no bits are set, so check that @word
* is non-zero before calling this.
*/
@@ -226,50 +247,131 @@ static inline unsigned long __ffs64(u64 word)
return __ffs((unsigned long)word);
}
+/**
+ * fns - find N'th set bit in a word
+ * @word: The word to search
+ * @n: Bit to find
+ */
+static inline unsigned long fns(unsigned long word, unsigned int n)
+{
+ unsigned int bit;
+
+ while (word) {
+ bit = __ffs(word);
+ if (n-- == 0)
+ return bit;
+ __clear_bit(bit, &word);
+ }
+
+ return BITS_PER_LONG;
+}
+
+/**
+ * assign_bit - Assign value to a bit in memory
+ * @nr: the bit to set
+ * @addr: the address to start counting from
+ * @value: the value to assign
+ */
+static __always_inline void assign_bit(long nr, volatile unsigned long *addr,
+ bool value)
+{
+ if (value)
+ set_bit(nr, addr);
+ else
+ clear_bit(nr, addr);
+}
+
+static __always_inline void __assign_bit(long nr, volatile unsigned long *addr,
+ bool value)
+{
+ if (value)
+ __set_bit(nr, addr);
+ else
+ __clear_bit(nr, addr);
+}
+
+/**
+ * __ptr_set_bit - Set bit in a pointer's value
+ * @nr: the bit to set
+ * @addr: the address of the pointer variable
+ *
+ * Example:
+ * void *p = foo();
+ * __ptr_set_bit(bit, &p);
+ */
+#define __ptr_set_bit(nr, addr) \
+ ({ \
+ typecheck_pointer(*(addr)); \
+ __set_bit(nr, (unsigned long *)(addr)); \
+ })
+
+/**
+ * __ptr_clear_bit - Clear bit in a pointer's value
+ * @nr: the bit to clear
+ * @addr: the address of the pointer variable
+ *
+ * Example:
+ * void *p = foo();
+ * __ptr_clear_bit(bit, &p);
+ */
+#define __ptr_clear_bit(nr, addr) \
+ ({ \
+ typecheck_pointer(*(addr)); \
+ __clear_bit(nr, (unsigned long *)(addr)); \
+ })
+
+/**
+ * __ptr_test_bit - Test bit in a pointer's value
+ * @nr: the bit to test
+ * @addr: the address of the pointer variable
+ *
+ * Example:
+ * void *p = foo();
+ * if (__ptr_test_bit(bit, &p)) {
+ * ...
+ * } else {
+ * ...
+ * }
+ */
+#define __ptr_test_bit(nr, addr) \
+ ({ \
+ typecheck_pointer(*(addr)); \
+ test_bit(nr, (unsigned long *)(addr)); \
+ })
+
#ifdef __KERNEL__
#ifndef set_mask_bits
-#define set_mask_bits(ptr, _mask, _bits) \
+#define set_mask_bits(ptr, mask, bits) \
({ \
- const typeof(*ptr) mask = (_mask), bits = (_bits); \
- typeof(*ptr) old, new; \
+ const typeof(*(ptr)) mask__ = (mask), bits__ = (bits); \
+ typeof(*(ptr)) old__, new__; \
\
+ old__ = READ_ONCE(*(ptr)); \
do { \
- old = ACCESS_ONCE(*ptr); \
- new = (old & ~mask) | bits; \
- } while (cmpxchg(ptr, old, new) != old); \
+ new__ = (old__ & ~mask__) | bits__; \
+ } while (!try_cmpxchg(ptr, &old__, new__)); \
\
- new; \
+ old__; \
})
#endif
#ifndef bit_clear_unless
-#define bit_clear_unless(ptr, _clear, _test) \
+#define bit_clear_unless(ptr, clear, test) \
({ \
- const typeof(*ptr) clear = (_clear), test = (_test); \
- typeof(*ptr) old, new; \
+ const typeof(*(ptr)) clear__ = (clear), test__ = (test);\
+ typeof(*(ptr)) old__, new__; \
\
+ old__ = READ_ONCE(*(ptr)); \
do { \
- old = ACCESS_ONCE(*ptr); \
- new = old & ~clear; \
- } while (!(old & test) && \
- cmpxchg(ptr, old, new) != old); \
+ if (old__ & test__) \
+ break; \
+ new__ = old__ & ~clear__; \
+ } while (!try_cmpxchg(ptr, &old__, new__)); \
\
- !(old & test); \
+ !(old__ & test__); \
})
#endif
-#ifndef find_last_bit
-/**
- * find_last_bit - find the last set bit in a memory region
- * @addr: The address to start the search at
- * @size: The number of bits to search
- *
- * Returns the bit number of the last set bit, or size.
- */
-extern unsigned long find_last_bit(const unsigned long *addr,
- unsigned long size);
-#endif
-
#endif /* __KERNEL__ */
#endif
diff --git a/include/linux/bitrev.h b/include/linux/bitrev.h
index fb790b8449c1..d35b8ec1c485 100644
--- a/include/linux/bitrev.h
+++ b/include/linux/bitrev.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_BITREV_H
#define _LINUX_BITREV_H
@@ -29,34 +30,45 @@ static inline u32 __bitrev32(u32 x)
#endif /* CONFIG_HAVE_ARCH_BITREVERSE */
+#define __bitrev8x4(x) (__bitrev32(swab32(x)))
+
#define __constant_bitrev32(x) \
({ \
- u32 __x = x; \
- __x = (__x >> 16) | (__x << 16); \
- __x = ((__x & (u32)0xFF00FF00UL) >> 8) | ((__x & (u32)0x00FF00FFUL) << 8); \
- __x = ((__x & (u32)0xF0F0F0F0UL) >> 4) | ((__x & (u32)0x0F0F0F0FUL) << 4); \
- __x = ((__x & (u32)0xCCCCCCCCUL) >> 2) | ((__x & (u32)0x33333333UL) << 2); \
- __x = ((__x & (u32)0xAAAAAAAAUL) >> 1) | ((__x & (u32)0x55555555UL) << 1); \
- __x; \
+ u32 ___x = x; \
+ ___x = (___x >> 16) | (___x << 16); \
+ ___x = ((___x & (u32)0xFF00FF00UL) >> 8) | ((___x & (u32)0x00FF00FFUL) << 8); \
+ ___x = ((___x & (u32)0xF0F0F0F0UL) >> 4) | ((___x & (u32)0x0F0F0F0FUL) << 4); \
+ ___x = ((___x & (u32)0xCCCCCCCCUL) >> 2) | ((___x & (u32)0x33333333UL) << 2); \
+ ___x = ((___x & (u32)0xAAAAAAAAUL) >> 1) | ((___x & (u32)0x55555555UL) << 1); \
+ ___x; \
})
#define __constant_bitrev16(x) \
({ \
- u16 __x = x; \
- __x = (__x >> 8) | (__x << 8); \
- __x = ((__x & (u16)0xF0F0U) >> 4) | ((__x & (u16)0x0F0FU) << 4); \
- __x = ((__x & (u16)0xCCCCU) >> 2) | ((__x & (u16)0x3333U) << 2); \
- __x = ((__x & (u16)0xAAAAU) >> 1) | ((__x & (u16)0x5555U) << 1); \
- __x; \
+ u16 ___x = x; \
+ ___x = (___x >> 8) | (___x << 8); \
+ ___x = ((___x & (u16)0xF0F0U) >> 4) | ((___x & (u16)0x0F0FU) << 4); \
+ ___x = ((___x & (u16)0xCCCCU) >> 2) | ((___x & (u16)0x3333U) << 2); \
+ ___x = ((___x & (u16)0xAAAAU) >> 1) | ((___x & (u16)0x5555U) << 1); \
+ ___x; \
+})
+
+#define __constant_bitrev8x4(x) \
+({ \
+ u32 ___x = x; \
+ ___x = ((___x & (u32)0xF0F0F0F0UL) >> 4) | ((___x & (u32)0x0F0F0F0FUL) << 4); \
+ ___x = ((___x & (u32)0xCCCCCCCCUL) >> 2) | ((___x & (u32)0x33333333UL) << 2); \
+ ___x = ((___x & (u32)0xAAAAAAAAUL) >> 1) | ((___x & (u32)0x55555555UL) << 1); \
+ ___x; \
})
#define __constant_bitrev8(x) \
({ \
- u8 __x = x; \
- __x = (__x >> 4) | (__x << 4); \
- __x = ((__x & (u8)0xCCU) >> 2) | ((__x & (u8)0x33U) << 2); \
- __x = ((__x & (u8)0xAAU) >> 1) | ((__x & (u8)0x55U) << 1); \
- __x; \
+ u8 ___x = x; \
+ ___x = (___x >> 4) | (___x << 4); \
+ ___x = ((___x & (u8)0xCCU) >> 2) | ((___x & (u8)0x33U) << 2); \
+ ___x = ((___x & (u8)0xAAU) >> 1) | ((___x & (u8)0x55U) << 1); \
+ ___x; \
})
#define bitrev32(x) \
@@ -75,6 +87,14 @@ static inline u32 __bitrev32(u32 x)
__bitrev16(__x); \
})
+#define bitrev8x4(x) \
+({ \
+ u32 __x = x; \
+ __builtin_constant_p(__x) ? \
+ __constant_bitrev8x4(__x) : \
+ __bitrev8x4(__x); \
+ })
+
#define bitrev8(x) \
({ \
u8 __x = x; \
diff --git a/include/linux/bits.h b/include/linux/bits.h
new file mode 100644
index 000000000000..7c0cf5031abe
--- /dev/null
+++ b/include/linux/bits.h
@@ -0,0 +1,45 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __LINUX_BITS_H
+#define __LINUX_BITS_H
+
+#include <linux/const.h>
+#include <vdso/bits.h>
+#include <asm/bitsperlong.h>
+
+#define BIT_MASK(nr) (UL(1) << ((nr) % BITS_PER_LONG))
+#define BIT_WORD(nr) ((nr) / BITS_PER_LONG)
+#define BIT_ULL_MASK(nr) (ULL(1) << ((nr) % BITS_PER_LONG_LONG))
+#define BIT_ULL_WORD(nr) ((nr) / BITS_PER_LONG_LONG)
+#define BITS_PER_BYTE 8
+
+/*
+ * Create a contiguous bitmask starting at bit position @l and ending at
+ * position @h. For example
+ * GENMASK_ULL(39, 21) gives us the 64bit vector 0x000000ffffe00000.
+ */
+#if !defined(__ASSEMBLY__)
+#include <linux/build_bug.h>
+#define GENMASK_INPUT_CHECK(h, l) \
+ (BUILD_BUG_ON_ZERO(__builtin_choose_expr( \
+ __is_constexpr((l) > (h)), (l) > (h), 0)))
+#else
+/*
+ * BUILD_BUG_ON_ZERO is not available in h files included from asm files,
+ * disable the input check if that is the case.
+ */
+#define GENMASK_INPUT_CHECK(h, l) 0
+#endif
+
+#define __GENMASK(h, l) \
+ (((~UL(0)) - (UL(1) << (l)) + 1) & \
+ (~UL(0) >> (BITS_PER_LONG - 1 - (h))))
+#define GENMASK(h, l) \
+ (GENMASK_INPUT_CHECK(h, l) + __GENMASK(h, l))
+
+#define __GENMASK_ULL(h, l) \
+ (((~ULL(0)) - (ULL(1) << (l)) + 1) & \
+ (~ULL(0) >> (BITS_PER_LONG_LONG - 1 - (h))))
+#define GENMASK_ULL(h, l) \
+ (GENMASK_INPUT_CHECK(h, l) + __GENMASK_ULL(h, l))
+
+#endif /* __LINUX_BITS_H */
diff --git a/include/linux/blk-cgroup.h b/include/linux/blk-cgroup.h
index 7104bea8dab1..dd5841a42c33 100644
--- a/include/linux/blk-cgroup.h
+++ b/include/linux/blk-cgroup.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _BLK_CGROUP_H
#define _BLK_CGROUP_H
/*
@@ -13,765 +14,38 @@
* Nauman Rafique <nauman@google.com>
*/
-#include <linux/cgroup.h>
-#include <linux/percpu_counter.h>
-#include <linux/seq_file.h>
-#include <linux/radix-tree.h>
-#include <linux/blkdev.h>
-#include <linux/atomic.h>
+#include <linux/types.h>
-/* percpu_counter batch for blkg_[rw]stats, per-cpu drift doesn't matter */
-#define BLKG_STAT_CPU_BATCH (INT_MAX / 2)
+struct bio;
+struct cgroup_subsys_state;
+struct gendisk;
-/* Max limits for throttle policy */
-#define THROTL_IOPS_MAX UINT_MAX
+#define FC_APPID_LEN 129
#ifdef CONFIG_BLK_CGROUP
-
-enum blkg_rwstat_type {
- BLKG_RWSTAT_READ,
- BLKG_RWSTAT_WRITE,
- BLKG_RWSTAT_SYNC,
- BLKG_RWSTAT_ASYNC,
-
- BLKG_RWSTAT_NR,
- BLKG_RWSTAT_TOTAL = BLKG_RWSTAT_NR,
-};
-
-struct blkcg_gq;
-
-struct blkcg {
- struct cgroup_subsys_state css;
- spinlock_t lock;
-
- struct radix_tree_root blkg_tree;
- struct blkcg_gq __rcu *blkg_hint;
- struct hlist_head blkg_list;
-
- struct blkcg_policy_data *cpd[BLKCG_MAX_POLS];
-
- struct list_head all_blkcgs_node;
-#ifdef CONFIG_CGROUP_WRITEBACK
- struct list_head cgwb_list;
-#endif
-};
-
-/*
- * blkg_[rw]stat->aux_cnt is excluded for local stats but included for
- * recursive. Used to carry stats of dead children, and, for blkg_rwstat,
- * to carry result values from read and sum operations.
- */
-struct blkg_stat {
- struct percpu_counter cpu_cnt;
- atomic64_t aux_cnt;
-};
-
-struct blkg_rwstat {
- struct percpu_counter cpu_cnt[BLKG_RWSTAT_NR];
- atomic64_t aux_cnt[BLKG_RWSTAT_NR];
-};
-
-/*
- * A blkcg_gq (blkg) is association between a block cgroup (blkcg) and a
- * request_queue (q). This is used by blkcg policies which need to track
- * information per blkcg - q pair.
- *
- * There can be multiple active blkcg policies and each blkg:policy pair is
- * represented by a blkg_policy_data which is allocated and freed by each
- * policy's pd_alloc/free_fn() methods. A policy can allocate private data
- * area by allocating larger data structure which embeds blkg_policy_data
- * at the beginning.
- */
-struct blkg_policy_data {
- /* the blkg and policy id this per-policy data belongs to */
- struct blkcg_gq *blkg;
- int plid;
-};
-
-/*
- * Policies that need to keep per-blkcg data which is independent from any
- * request_queue associated to it should implement cpd_alloc/free_fn()
- * methods. A policy can allocate private data area by allocating larger
- * data structure which embeds blkcg_policy_data at the beginning.
- * cpd_init() is invoked to let each policy handle per-blkcg data.
- */
-struct blkcg_policy_data {
- /* the blkcg and policy id this per-policy data belongs to */
- struct blkcg *blkcg;
- int plid;
-};
-
-/* association between a blk cgroup and a request queue */
-struct blkcg_gq {
- /* Pointer to the associated request_queue */
- struct request_queue *q;
- struct list_head q_node;
- struct hlist_node blkcg_node;
- struct blkcg *blkcg;
-
- /*
- * Each blkg gets congested separately and the congestion state is
- * propagated to the matching bdi_writeback_congested.
- */
- struct bdi_writeback_congested *wb_congested;
-
- /* all non-root blkcg_gq's are guaranteed to have access to parent */
- struct blkcg_gq *parent;
-
- /* request allocation list for this blkcg-q pair */
- struct request_list rl;
-
- /* reference count */
- atomic_t refcnt;
-
- /* is this blkg online? protected by both blkcg and q locks */
- bool online;
-
- struct blkg_rwstat stat_bytes;
- struct blkg_rwstat stat_ios;
-
- struct blkg_policy_data *pd[BLKCG_MAX_POLS];
-
- struct rcu_head rcu_head;
-};
-
-typedef struct blkcg_policy_data *(blkcg_pol_alloc_cpd_fn)(gfp_t gfp);
-typedef void (blkcg_pol_init_cpd_fn)(struct blkcg_policy_data *cpd);
-typedef void (blkcg_pol_free_cpd_fn)(struct blkcg_policy_data *cpd);
-typedef void (blkcg_pol_bind_cpd_fn)(struct blkcg_policy_data *cpd);
-typedef struct blkg_policy_data *(blkcg_pol_alloc_pd_fn)(gfp_t gfp, int node);
-typedef void (blkcg_pol_init_pd_fn)(struct blkg_policy_data *pd);
-typedef void (blkcg_pol_online_pd_fn)(struct blkg_policy_data *pd);
-typedef void (blkcg_pol_offline_pd_fn)(struct blkg_policy_data *pd);
-typedef void (blkcg_pol_free_pd_fn)(struct blkg_policy_data *pd);
-typedef void (blkcg_pol_reset_pd_stats_fn)(struct blkg_policy_data *pd);
-
-struct blkcg_policy {
- int plid;
- /* cgroup files for the policy */
- struct cftype *dfl_cftypes;
- struct cftype *legacy_cftypes;
-
- /* operations */
- blkcg_pol_alloc_cpd_fn *cpd_alloc_fn;
- blkcg_pol_init_cpd_fn *cpd_init_fn;
- blkcg_pol_free_cpd_fn *cpd_free_fn;
- blkcg_pol_bind_cpd_fn *cpd_bind_fn;
-
- blkcg_pol_alloc_pd_fn *pd_alloc_fn;
- blkcg_pol_init_pd_fn *pd_init_fn;
- blkcg_pol_online_pd_fn *pd_online_fn;
- blkcg_pol_offline_pd_fn *pd_offline_fn;
- blkcg_pol_free_pd_fn *pd_free_fn;
- blkcg_pol_reset_pd_stats_fn *pd_reset_stats_fn;
-};
-
-extern struct blkcg blkcg_root;
extern struct cgroup_subsys_state * const blkcg_root_css;
-struct blkcg_gq *blkg_lookup_slowpath(struct blkcg *blkcg,
- struct request_queue *q, bool update_hint);
-struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg,
- struct request_queue *q);
-int blkcg_init_queue(struct request_queue *q);
-void blkcg_drain_queue(struct request_queue *q);
-void blkcg_exit_queue(struct request_queue *q);
-
-/* Blkio controller policy registration */
-int blkcg_policy_register(struct blkcg_policy *pol);
-void blkcg_policy_unregister(struct blkcg_policy *pol);
-int blkcg_activate_policy(struct request_queue *q,
- const struct blkcg_policy *pol);
-void blkcg_deactivate_policy(struct request_queue *q,
- const struct blkcg_policy *pol);
-
-const char *blkg_dev_name(struct blkcg_gq *blkg);
-void blkcg_print_blkgs(struct seq_file *sf, struct blkcg *blkcg,
- u64 (*prfill)(struct seq_file *,
- struct blkg_policy_data *, int),
- const struct blkcg_policy *pol, int data,
- bool show_total);
-u64 __blkg_prfill_u64(struct seq_file *sf, struct blkg_policy_data *pd, u64 v);
-u64 __blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
- const struct blkg_rwstat *rwstat);
-u64 blkg_prfill_stat(struct seq_file *sf, struct blkg_policy_data *pd, int off);
-u64 blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
- int off);
-int blkg_print_stat_bytes(struct seq_file *sf, void *v);
-int blkg_print_stat_ios(struct seq_file *sf, void *v);
-int blkg_print_stat_bytes_recursive(struct seq_file *sf, void *v);
-int blkg_print_stat_ios_recursive(struct seq_file *sf, void *v);
-
-u64 blkg_stat_recursive_sum(struct blkcg_gq *blkg,
- struct blkcg_policy *pol, int off);
-struct blkg_rwstat blkg_rwstat_recursive_sum(struct blkcg_gq *blkg,
- struct blkcg_policy *pol, int off);
-
-struct blkg_conf_ctx {
- struct gendisk *disk;
- struct blkcg_gq *blkg;
- char *body;
-};
-
-int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
- char *input, struct blkg_conf_ctx *ctx);
-void blkg_conf_finish(struct blkg_conf_ctx *ctx);
-
-
-static inline struct blkcg *css_to_blkcg(struct cgroup_subsys_state *css)
-{
- return css ? container_of(css, struct blkcg, css) : NULL;
-}
-
-static inline struct blkcg *task_blkcg(struct task_struct *tsk)
-{
- return css_to_blkcg(task_css(tsk, io_cgrp_id));
-}
-
-static inline struct blkcg *bio_blkcg(struct bio *bio)
-{
- if (bio && bio->bi_css)
- return css_to_blkcg(bio->bi_css);
- return task_blkcg(current);
-}
-
-static inline struct cgroup_subsys_state *
-task_get_blkcg_css(struct task_struct *task)
-{
- return task_get_css(task, io_cgrp_id);
-}
-
-/**
- * blkcg_parent - get the parent of a blkcg
- * @blkcg: blkcg of interest
- *
- * Return the parent blkcg of @blkcg. Can be called anytime.
- */
-static inline struct blkcg *blkcg_parent(struct blkcg *blkcg)
-{
- return css_to_blkcg(blkcg->css.parent);
-}
-
-/**
- * __blkg_lookup - internal version of blkg_lookup()
- * @blkcg: blkcg of interest
- * @q: request_queue of interest
- * @update_hint: whether to update lookup hint with the result or not
- *
- * This is internal version and shouldn't be used by policy
- * implementations. Looks up blkgs for the @blkcg - @q pair regardless of
- * @q's bypass state. If @update_hint is %true, the caller should be
- * holding @q->queue_lock and lookup hint is updated on success.
- */
-static inline struct blkcg_gq *__blkg_lookup(struct blkcg *blkcg,
- struct request_queue *q,
- bool update_hint)
-{
- struct blkcg_gq *blkg;
-
- if (blkcg == &blkcg_root)
- return q->root_blkg;
-
- blkg = rcu_dereference(blkcg->blkg_hint);
- if (blkg && blkg->q == q)
- return blkg;
-
- return blkg_lookup_slowpath(blkcg, q, update_hint);
-}
-
-/**
- * blkg_lookup - lookup blkg for the specified blkcg - q pair
- * @blkcg: blkcg of interest
- * @q: request_queue of interest
- *
- * Lookup blkg for the @blkcg - @q pair. This function should be called
- * under RCU read lock and is guaranteed to return %NULL if @q is bypassing
- * - see blk_queue_bypass_start() for details.
- */
-static inline struct blkcg_gq *blkg_lookup(struct blkcg *blkcg,
- struct request_queue *q)
-{
- WARN_ON_ONCE(!rcu_read_lock_held());
-
- if (unlikely(blk_queue_bypass(q)))
- return NULL;
- return __blkg_lookup(blkcg, q, false);
-}
-
-/**
- * blkg_to_pdata - get policy private data
- * @blkg: blkg of interest
- * @pol: policy of interest
- *
- * Return pointer to private data associated with the @blkg-@pol pair.
- */
-static inline struct blkg_policy_data *blkg_to_pd(struct blkcg_gq *blkg,
- struct blkcg_policy *pol)
-{
- return blkg ? blkg->pd[pol->plid] : NULL;
-}
-
-static inline struct blkcg_policy_data *blkcg_to_cpd(struct blkcg *blkcg,
- struct blkcg_policy *pol)
-{
- return blkcg ? blkcg->cpd[pol->plid] : NULL;
-}
-
-/**
- * pdata_to_blkg - get blkg associated with policy private data
- * @pd: policy private data of interest
- *
- * @pd is policy private data. Determine the blkg it's associated with.
- */
-static inline struct blkcg_gq *pd_to_blkg(struct blkg_policy_data *pd)
-{
- return pd ? pd->blkg : NULL;
-}
-
-static inline struct blkcg *cpd_to_blkcg(struct blkcg_policy_data *cpd)
-{
- return cpd ? cpd->blkcg : NULL;
-}
-
-/**
- * blkg_path - format cgroup path of blkg
- * @blkg: blkg of interest
- * @buf: target buffer
- * @buflen: target buffer length
- *
- * Format the path of the cgroup of @blkg into @buf.
- */
-static inline int blkg_path(struct blkcg_gq *blkg, char *buf, int buflen)
-{
- return cgroup_path(blkg->blkcg->css.cgroup, buf, buflen);
-}
-
-/**
- * blkg_get - get a blkg reference
- * @blkg: blkg to get
- *
- * The caller should be holding an existing reference.
- */
-static inline void blkg_get(struct blkcg_gq *blkg)
-{
- WARN_ON_ONCE(atomic_read(&blkg->refcnt) <= 0);
- atomic_inc(&blkg->refcnt);
-}
-
-void __blkg_release_rcu(struct rcu_head *rcu);
-
-/**
- * blkg_put - put a blkg reference
- * @blkg: blkg to put
- */
-static inline void blkg_put(struct blkcg_gq *blkg)
-{
- WARN_ON_ONCE(atomic_read(&blkg->refcnt) <= 0);
- if (atomic_dec_and_test(&blkg->refcnt))
- call_rcu(&blkg->rcu_head, __blkg_release_rcu);
-}
-
-/**
- * blkg_for_each_descendant_pre - pre-order walk of a blkg's descendants
- * @d_blkg: loop cursor pointing to the current descendant
- * @pos_css: used for iteration
- * @p_blkg: target blkg to walk descendants of
- *
- * Walk @c_blkg through the descendants of @p_blkg. Must be used with RCU
- * read locked. If called under either blkcg or queue lock, the iteration
- * is guaranteed to include all and only online blkgs. The caller may
- * update @pos_css by calling css_rightmost_descendant() to skip subtree.
- * @p_blkg is included in the iteration and the first node to be visited.
- */
-#define blkg_for_each_descendant_pre(d_blkg, pos_css, p_blkg) \
- css_for_each_descendant_pre((pos_css), &(p_blkg)->blkcg->css) \
- if (((d_blkg) = __blkg_lookup(css_to_blkcg(pos_css), \
- (p_blkg)->q, false)))
-
-/**
- * blkg_for_each_descendant_post - post-order walk of a blkg's descendants
- * @d_blkg: loop cursor pointing to the current descendant
- * @pos_css: used for iteration
- * @p_blkg: target blkg to walk descendants of
- *
- * Similar to blkg_for_each_descendant_pre() but performs post-order
- * traversal instead. Synchronization rules are the same. @p_blkg is
- * included in the iteration and the last node to be visited.
- */
-#define blkg_for_each_descendant_post(d_blkg, pos_css, p_blkg) \
- css_for_each_descendant_post((pos_css), &(p_blkg)->blkcg->css) \
- if (((d_blkg) = __blkg_lookup(css_to_blkcg(pos_css), \
- (p_blkg)->q, false)))
-
-/**
- * blk_get_rl - get request_list to use
- * @q: request_queue of interest
- * @bio: bio which will be attached to the allocated request (may be %NULL)
- *
- * The caller wants to allocate a request from @q to use for @bio. Find
- * the request_list to use and obtain a reference on it. Should be called
- * under queue_lock. This function is guaranteed to return non-%NULL
- * request_list.
- */
-static inline struct request_list *blk_get_rl(struct request_queue *q,
- struct bio *bio)
-{
- struct blkcg *blkcg;
- struct blkcg_gq *blkg;
-
- rcu_read_lock();
-
- blkcg = bio_blkcg(bio);
-
- /* bypass blkg lookup and use @q->root_rl directly for root */
- if (blkcg == &blkcg_root)
- goto root_rl;
-
- /*
- * Try to use blkg->rl. blkg lookup may fail under memory pressure
- * or if either the blkcg or queue is going away. Fall back to
- * root_rl in such cases.
- */
- blkg = blkg_lookup(blkcg, q);
- if (unlikely(!blkg))
- goto root_rl;
-
- blkg_get(blkg);
- rcu_read_unlock();
- return &blkg->rl;
-root_rl:
- rcu_read_unlock();
- return &q->root_rl;
-}
-
-/**
- * blk_put_rl - put request_list
- * @rl: request_list to put
- *
- * Put the reference acquired by blk_get_rl(). Should be called under
- * queue_lock.
- */
-static inline void blk_put_rl(struct request_list *rl)
-{
- if (rl->blkg->blkcg != &blkcg_root)
- blkg_put(rl->blkg);
-}
-
-/**
- * blk_rq_set_rl - associate a request with a request_list
- * @rq: request of interest
- * @rl: target request_list
- *
- * Associate @rq with @rl so that accounting and freeing can know the
- * request_list @rq came from.
- */
-static inline void blk_rq_set_rl(struct request *rq, struct request_list *rl)
-{
- rq->rl = rl;
-}
-
-/**
- * blk_rq_rl - return the request_list a request came from
- * @rq: request of interest
- *
- * Return the request_list @rq is allocated from.
- */
-static inline struct request_list *blk_rq_rl(struct request *rq)
-{
- return rq->rl;
-}
-
-struct request_list *__blk_queue_next_rl(struct request_list *rl,
- struct request_queue *q);
-/**
- * blk_queue_for_each_rl - iterate through all request_lists of a request_queue
- *
- * Should be used under queue_lock.
- */
-#define blk_queue_for_each_rl(rl, q) \
- for ((rl) = &(q)->root_rl; (rl); (rl) = __blk_queue_next_rl((rl), (q)))
-
-static inline int blkg_stat_init(struct blkg_stat *stat, gfp_t gfp)
-{
- int ret;
-
- ret = percpu_counter_init(&stat->cpu_cnt, 0, gfp);
- if (ret)
- return ret;
-
- atomic64_set(&stat->aux_cnt, 0);
- return 0;
-}
-
-static inline void blkg_stat_exit(struct blkg_stat *stat)
-{
- percpu_counter_destroy(&stat->cpu_cnt);
-}
-
-/**
- * blkg_stat_add - add a value to a blkg_stat
- * @stat: target blkg_stat
- * @val: value to add
- *
- * Add @val to @stat. The caller must ensure that IRQ on the same CPU
- * don't re-enter this function for the same counter.
- */
-static inline void blkg_stat_add(struct blkg_stat *stat, uint64_t val)
-{
- percpu_counter_add_batch(&stat->cpu_cnt, val, BLKG_STAT_CPU_BATCH);
-}
-
-/**
- * blkg_stat_read - read the current value of a blkg_stat
- * @stat: blkg_stat to read
- */
-static inline uint64_t blkg_stat_read(struct blkg_stat *stat)
-{
- return percpu_counter_sum_positive(&stat->cpu_cnt);
-}
-
-/**
- * blkg_stat_reset - reset a blkg_stat
- * @stat: blkg_stat to reset
- */
-static inline void blkg_stat_reset(struct blkg_stat *stat)
-{
- percpu_counter_set(&stat->cpu_cnt, 0);
- atomic64_set(&stat->aux_cnt, 0);
-}
-
-/**
- * blkg_stat_add_aux - add a blkg_stat into another's aux count
- * @to: the destination blkg_stat
- * @from: the source
- *
- * Add @from's count including the aux one to @to's aux count.
- */
-static inline void blkg_stat_add_aux(struct blkg_stat *to,
- struct blkg_stat *from)
-{
- atomic64_add(blkg_stat_read(from) + atomic64_read(&from->aux_cnt),
- &to->aux_cnt);
-}
-
-static inline int blkg_rwstat_init(struct blkg_rwstat *rwstat, gfp_t gfp)
-{
- int i, ret;
-
- for (i = 0; i < BLKG_RWSTAT_NR; i++) {
- ret = percpu_counter_init(&rwstat->cpu_cnt[i], 0, gfp);
- if (ret) {
- while (--i >= 0)
- percpu_counter_destroy(&rwstat->cpu_cnt[i]);
- return ret;
- }
- atomic64_set(&rwstat->aux_cnt[i], 0);
- }
- return 0;
-}
-
-static inline void blkg_rwstat_exit(struct blkg_rwstat *rwstat)
-{
- int i;
-
- for (i = 0; i < BLKG_RWSTAT_NR; i++)
- percpu_counter_destroy(&rwstat->cpu_cnt[i]);
-}
-
-/**
- * blkg_rwstat_add - add a value to a blkg_rwstat
- * @rwstat: target blkg_rwstat
- * @op: REQ_OP and flags
- * @val: value to add
- *
- * Add @val to @rwstat. The counters are chosen according to @rw. The
- * caller is responsible for synchronizing calls to this function.
- */
-static inline void blkg_rwstat_add(struct blkg_rwstat *rwstat,
- unsigned int op, uint64_t val)
-{
- struct percpu_counter *cnt;
-
- if (op_is_write(op))
- cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_WRITE];
- else
- cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_READ];
-
- percpu_counter_add_batch(cnt, val, BLKG_STAT_CPU_BATCH);
-
- if (op_is_sync(op))
- cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_SYNC];
- else
- cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_ASYNC];
-
- percpu_counter_add_batch(cnt, val, BLKG_STAT_CPU_BATCH);
-}
-
-/**
- * blkg_rwstat_read - read the current values of a blkg_rwstat
- * @rwstat: blkg_rwstat to read
- *
- * Read the current snapshot of @rwstat and return it in the aux counts.
- */
-static inline struct blkg_rwstat blkg_rwstat_read(struct blkg_rwstat *rwstat)
-{
- struct blkg_rwstat result;
- int i;
-
- for (i = 0; i < BLKG_RWSTAT_NR; i++)
- atomic64_set(&result.aux_cnt[i],
- percpu_counter_sum_positive(&rwstat->cpu_cnt[i]));
- return result;
-}
-
-/**
- * blkg_rwstat_total - read the total count of a blkg_rwstat
- * @rwstat: blkg_rwstat to read
- *
- * Return the total count of @rwstat regardless of the IO direction. This
- * function can be called without synchronization and takes care of u64
- * atomicity.
- */
-static inline uint64_t blkg_rwstat_total(struct blkg_rwstat *rwstat)
-{
- struct blkg_rwstat tmp = blkg_rwstat_read(rwstat);
-
- return atomic64_read(&tmp.aux_cnt[BLKG_RWSTAT_READ]) +
- atomic64_read(&tmp.aux_cnt[BLKG_RWSTAT_WRITE]);
-}
-
-/**
- * blkg_rwstat_reset - reset a blkg_rwstat
- * @rwstat: blkg_rwstat to reset
- */
-static inline void blkg_rwstat_reset(struct blkg_rwstat *rwstat)
-{
- int i;
-
- for (i = 0; i < BLKG_RWSTAT_NR; i++) {
- percpu_counter_set(&rwstat->cpu_cnt[i], 0);
- atomic64_set(&rwstat->aux_cnt[i], 0);
- }
-}
-
-/**
- * blkg_rwstat_add_aux - add a blkg_rwstat into another's aux count
- * @to: the destination blkg_rwstat
- * @from: the source
- *
- * Add @from's count including the aux one to @to's aux count.
- */
-static inline void blkg_rwstat_add_aux(struct blkg_rwstat *to,
- struct blkg_rwstat *from)
-{
- struct blkg_rwstat v = blkg_rwstat_read(from);
- int i;
-
- for (i = 0; i < BLKG_RWSTAT_NR; i++)
- atomic64_add(atomic64_read(&v.aux_cnt[i]) +
- atomic64_read(&from->aux_cnt[i]),
- &to->aux_cnt[i]);
-}
-
-#ifdef CONFIG_BLK_DEV_THROTTLING
-extern bool blk_throtl_bio(struct request_queue *q, struct blkcg_gq *blkg,
- struct bio *bio);
-#else
-static inline bool blk_throtl_bio(struct request_queue *q, struct blkcg_gq *blkg,
- struct bio *bio) { return false; }
-#endif
-
-static inline bool blkcg_bio_issue_check(struct request_queue *q,
- struct bio *bio)
-{
- struct blkcg *blkcg;
- struct blkcg_gq *blkg;
- bool throtl = false;
-
- rcu_read_lock();
- blkcg = bio_blkcg(bio);
-
- blkg = blkg_lookup(blkcg, q);
- if (unlikely(!blkg)) {
- spin_lock_irq(q->queue_lock);
- blkg = blkg_lookup_create(blkcg, q);
- if (IS_ERR(blkg))
- blkg = NULL;
- spin_unlock_irq(q->queue_lock);
- }
-
- throtl = blk_throtl_bio(q, blkg, bio);
-
- if (!throtl) {
- blkg = blkg ?: q->root_blkg;
- blkg_rwstat_add(&blkg->stat_bytes, bio->bi_opf,
- bio->bi_iter.bi_size);
- blkg_rwstat_add(&blkg->stat_ios, bio->bi_opf, 1);
- }
-
- rcu_read_unlock();
- return !throtl;
-}
+void blkcg_schedule_throttle(struct gendisk *disk, bool use_memdelay);
+void blkcg_maybe_throttle_current(void);
+bool blk_cgroup_congested(void);
+void blkcg_pin_online(struct cgroup_subsys_state *blkcg_css);
+void blkcg_unpin_online(struct cgroup_subsys_state *blkcg_css);
+struct list_head *blkcg_get_cgwb_list(struct cgroup_subsys_state *css);
+struct cgroup_subsys_state *bio_blkcg_css(struct bio *bio);
#else /* CONFIG_BLK_CGROUP */
-struct blkcg {
-};
-
-struct blkg_policy_data {
-};
-
-struct blkcg_policy_data {
-};
-
-struct blkcg_gq {
-};
-
-struct blkcg_policy {
-};
-
#define blkcg_root_css ((struct cgroup_subsys_state *)ERR_PTR(-EINVAL))
-static inline struct cgroup_subsys_state *
-task_get_blkcg_css(struct task_struct *task)
+static inline void blkcg_maybe_throttle_current(void) { }
+static inline bool blk_cgroup_congested(void) { return false; }
+static inline struct cgroup_subsys_state *bio_blkcg_css(struct bio *bio)
{
return NULL;
}
+#endif /* CONFIG_BLK_CGROUP */
-#ifdef CONFIG_BLOCK
-
-static inline struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, void *key) { return NULL; }
-static inline int blkcg_init_queue(struct request_queue *q) { return 0; }
-static inline void blkcg_drain_queue(struct request_queue *q) { }
-static inline void blkcg_exit_queue(struct request_queue *q) { }
-static inline int blkcg_policy_register(struct blkcg_policy *pol) { return 0; }
-static inline void blkcg_policy_unregister(struct blkcg_policy *pol) { }
-static inline int blkcg_activate_policy(struct request_queue *q,
- const struct blkcg_policy *pol) { return 0; }
-static inline void blkcg_deactivate_policy(struct request_queue *q,
- const struct blkcg_policy *pol) { }
-
-static inline struct blkcg *bio_blkcg(struct bio *bio) { return NULL; }
-
-static inline struct blkg_policy_data *blkg_to_pd(struct blkcg_gq *blkg,
- struct blkcg_policy *pol) { return NULL; }
-static inline struct blkcg_gq *pd_to_blkg(struct blkg_policy_data *pd) { return NULL; }
-static inline char *blkg_path(struct blkcg_gq *blkg) { return NULL; }
-static inline void blkg_get(struct blkcg_gq *blkg) { }
-static inline void blkg_put(struct blkcg_gq *blkg) { }
-
-static inline struct request_list *blk_get_rl(struct request_queue *q,
- struct bio *bio) { return &q->root_rl; }
-static inline void blk_put_rl(struct request_list *rl) { }
-static inline void blk_rq_set_rl(struct request *rq, struct request_list *rl) { }
-static inline struct request_list *blk_rq_rl(struct request *rq) { return &rq->q->root_rl; }
-
-static inline bool blkcg_bio_issue_check(struct request_queue *q,
- struct bio *bio) { return true; }
-
-#define blk_queue_for_each_rl(rl, q) \
- for ((rl) = &(q)->root_rl; (rl); (rl) = NULL)
+int blkcg_set_fc_appid(char *app_id, u64 cgrp_id, size_t app_id_len);
+char *blkcg_get_fc_appid(struct bio *bio);
-#endif /* CONFIG_BLOCK */
-#endif /* CONFIG_BLK_CGROUP */
#endif /* _BLK_CGROUP_H */
diff --git a/include/linux/blk-crypto-profile.h b/include/linux/blk-crypto-profile.h
new file mode 100644
index 000000000000..e6802b69cdd6
--- /dev/null
+++ b/include/linux/blk-crypto-profile.h
@@ -0,0 +1,154 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright 2019 Google LLC
+ */
+
+#ifndef __LINUX_BLK_CRYPTO_PROFILE_H
+#define __LINUX_BLK_CRYPTO_PROFILE_H
+
+#include <linux/bio.h>
+#include <linux/blk-crypto.h>
+
+struct blk_crypto_profile;
+
+/**
+ * struct blk_crypto_ll_ops - functions to control inline encryption hardware
+ *
+ * Low-level operations for controlling inline encryption hardware. This
+ * interface must be implemented by storage drivers that support inline
+ * encryption. All functions may sleep, are serialized by profile->lock, and
+ * are never called while profile->dev (if set) is runtime-suspended.
+ */
+struct blk_crypto_ll_ops {
+
+ /**
+ * @keyslot_program: Program a key into the inline encryption hardware.
+ *
+ * Program @key into the specified @slot in the inline encryption
+ * hardware, overwriting any key that the keyslot may already contain.
+ * The keyslot is guaranteed to not be in-use by any I/O.
+ *
+ * This is required if the device has keyslots. Otherwise (i.e. if the
+ * device is a layered device, or if the device is real hardware that
+ * simply doesn't have the concept of keyslots) it is never called.
+ *
+ * Must return 0 on success, or -errno on failure.
+ */
+ int (*keyslot_program)(struct blk_crypto_profile *profile,
+ const struct blk_crypto_key *key,
+ unsigned int slot);
+
+ /**
+ * @keyslot_evict: Evict a key from the inline encryption hardware.
+ *
+ * If the device has keyslots, this function must evict the key from the
+ * specified @slot. The slot will contain @key, but there should be no
+ * need for the @key argument to be used as @slot should be sufficient.
+ * The keyslot is guaranteed to not be in-use by any I/O.
+ *
+ * If the device doesn't have keyslots itself, this function must evict
+ * @key from any underlying devices. @slot won't be valid in this case.
+ *
+ * If there are no keyslots and no underlying devices, this function
+ * isn't required.
+ *
+ * Must return 0 on success, or -errno on failure.
+ */
+ int (*keyslot_evict)(struct blk_crypto_profile *profile,
+ const struct blk_crypto_key *key,
+ unsigned int slot);
+};
+
+/**
+ * struct blk_crypto_profile - inline encryption profile for a device
+ *
+ * This struct contains a storage device's inline encryption capabilities (e.g.
+ * the supported crypto algorithms), driver-provided functions to control the
+ * inline encryption hardware (e.g. programming and evicting keys), and optional
+ * device-independent keyslot management data.
+ */
+struct blk_crypto_profile {
+
+ /* public: Drivers must initialize the following fields. */
+
+ /**
+ * @ll_ops: Driver-provided functions to control the inline encryption
+ * hardware, e.g. program and evict keys.
+ */
+ struct blk_crypto_ll_ops ll_ops;
+
+ /**
+ * @max_dun_bytes_supported: The maximum number of bytes supported for
+ * specifying the data unit number (DUN). Specifically, the range of
+ * supported DUNs is 0 through (1 << (8 * max_dun_bytes_supported)) - 1.
+ */
+ unsigned int max_dun_bytes_supported;
+
+ /**
+ * @modes_supported: Array of bitmasks that specifies whether each
+ * combination of crypto mode and data unit size is supported.
+ * Specifically, the i'th bit of modes_supported[crypto_mode] is set if
+ * crypto_mode can be used with a data unit size of (1 << i). Note that
+ * only data unit sizes that are powers of 2 can be supported.
+ */
+ unsigned int modes_supported[BLK_ENCRYPTION_MODE_MAX];
+
+ /**
+ * @dev: An optional device for runtime power management. If the driver
+ * provides this device, it will be runtime-resumed before any function
+ * in @ll_ops is called and will remain resumed during the call.
+ */
+ struct device *dev;
+
+ /* private: The following fields shouldn't be accessed by drivers. */
+
+ /* Number of keyslots, or 0 if not applicable */
+ unsigned int num_slots;
+
+ /*
+ * Serializes all calls to functions in @ll_ops as well as all changes
+ * to @slot_hashtable. This can also be taken in read mode to look up
+ * keyslots while ensuring that they can't be changed concurrently.
+ */
+ struct rw_semaphore lock;
+
+ /* List of idle slots, with least recently used slot at front */
+ wait_queue_head_t idle_slots_wait_queue;
+ struct list_head idle_slots;
+ spinlock_t idle_slots_lock;
+
+ /*
+ * Hash table which maps struct *blk_crypto_key to keyslots, so that we
+ * can find a key's keyslot in O(1) time rather than O(num_slots).
+ * Protected by 'lock'.
+ */
+ struct hlist_head *slot_hashtable;
+ unsigned int log_slot_ht_size;
+
+ /* Per-keyslot data */
+ struct blk_crypto_keyslot *slots;
+};
+
+int blk_crypto_profile_init(struct blk_crypto_profile *profile,
+ unsigned int num_slots);
+
+int devm_blk_crypto_profile_init(struct device *dev,
+ struct blk_crypto_profile *profile,
+ unsigned int num_slots);
+
+unsigned int blk_crypto_keyslot_index(struct blk_crypto_keyslot *slot);
+
+void blk_crypto_reprogram_all_keys(struct blk_crypto_profile *profile);
+
+void blk_crypto_profile_destroy(struct blk_crypto_profile *profile);
+
+void blk_crypto_intersect_capabilities(struct blk_crypto_profile *parent,
+ const struct blk_crypto_profile *child);
+
+bool blk_crypto_has_capabilities(const struct blk_crypto_profile *target,
+ const struct blk_crypto_profile *reference);
+
+void blk_crypto_update_capabilities(struct blk_crypto_profile *dst,
+ const struct blk_crypto_profile *src);
+
+#endif /* __LINUX_BLK_CRYPTO_PROFILE_H */
diff --git a/include/linux/blk-crypto.h b/include/linux/blk-crypto.h
new file mode 100644
index 000000000000..5e5822c18ee4
--- /dev/null
+++ b/include/linux/blk-crypto.h
@@ -0,0 +1,135 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright 2019 Google LLC
+ */
+
+#ifndef __LINUX_BLK_CRYPTO_H
+#define __LINUX_BLK_CRYPTO_H
+
+#include <linux/types.h>
+
+enum blk_crypto_mode_num {
+ BLK_ENCRYPTION_MODE_INVALID,
+ BLK_ENCRYPTION_MODE_AES_256_XTS,
+ BLK_ENCRYPTION_MODE_AES_128_CBC_ESSIV,
+ BLK_ENCRYPTION_MODE_ADIANTUM,
+ BLK_ENCRYPTION_MODE_SM4_XTS,
+ BLK_ENCRYPTION_MODE_MAX,
+};
+
+#define BLK_CRYPTO_MAX_KEY_SIZE 64
+/**
+ * struct blk_crypto_config - an inline encryption key's crypto configuration
+ * @crypto_mode: encryption algorithm this key is for
+ * @data_unit_size: the data unit size for all encryption/decryptions with this
+ * key. This is the size in bytes of each individual plaintext and
+ * ciphertext. This is always a power of 2. It might be e.g. the
+ * filesystem block size or the disk sector size.
+ * @dun_bytes: the maximum number of bytes of DUN used when using this key
+ */
+struct blk_crypto_config {
+ enum blk_crypto_mode_num crypto_mode;
+ unsigned int data_unit_size;
+ unsigned int dun_bytes;
+};
+
+/**
+ * struct blk_crypto_key - an inline encryption key
+ * @crypto_cfg: the crypto configuration (like crypto_mode, key size) for this
+ * key
+ * @data_unit_size_bits: log2 of data_unit_size
+ * @size: size of this key in bytes (determined by @crypto_cfg.crypto_mode)
+ * @raw: the raw bytes of this key. Only the first @size bytes are used.
+ *
+ * A blk_crypto_key is immutable once created, and many bios can reference it at
+ * the same time. It must not be freed until all bios using it have completed
+ * and it has been evicted from all devices on which it may have been used.
+ */
+struct blk_crypto_key {
+ struct blk_crypto_config crypto_cfg;
+ unsigned int data_unit_size_bits;
+ unsigned int size;
+ u8 raw[BLK_CRYPTO_MAX_KEY_SIZE];
+};
+
+#define BLK_CRYPTO_MAX_IV_SIZE 32
+#define BLK_CRYPTO_DUN_ARRAY_SIZE (BLK_CRYPTO_MAX_IV_SIZE / sizeof(u64))
+
+/**
+ * struct bio_crypt_ctx - an inline encryption context
+ * @bc_key: the key, algorithm, and data unit size to use
+ * @bc_dun: the data unit number (starting IV) to use
+ *
+ * A bio_crypt_ctx specifies that the contents of the bio will be encrypted (for
+ * write requests) or decrypted (for read requests) inline by the storage device
+ * or controller, or by the crypto API fallback.
+ */
+struct bio_crypt_ctx {
+ const struct blk_crypto_key *bc_key;
+ u64 bc_dun[BLK_CRYPTO_DUN_ARRAY_SIZE];
+};
+
+#include <linux/blk_types.h>
+#include <linux/blkdev.h>
+
+#ifdef CONFIG_BLK_INLINE_ENCRYPTION
+
+static inline bool bio_has_crypt_ctx(struct bio *bio)
+{
+ return bio->bi_crypt_context;
+}
+
+void bio_crypt_set_ctx(struct bio *bio, const struct blk_crypto_key *key,
+ const u64 dun[BLK_CRYPTO_DUN_ARRAY_SIZE],
+ gfp_t gfp_mask);
+
+bool bio_crypt_dun_is_contiguous(const struct bio_crypt_ctx *bc,
+ unsigned int bytes,
+ const u64 next_dun[BLK_CRYPTO_DUN_ARRAY_SIZE]);
+
+int blk_crypto_init_key(struct blk_crypto_key *blk_key, const u8 *raw_key,
+ enum blk_crypto_mode_num crypto_mode,
+ unsigned int dun_bytes,
+ unsigned int data_unit_size);
+
+int blk_crypto_start_using_key(struct block_device *bdev,
+ const struct blk_crypto_key *key);
+
+void blk_crypto_evict_key(struct block_device *bdev,
+ const struct blk_crypto_key *key);
+
+bool blk_crypto_config_supported_natively(struct block_device *bdev,
+ const struct blk_crypto_config *cfg);
+bool blk_crypto_config_supported(struct block_device *bdev,
+ const struct blk_crypto_config *cfg);
+
+#else /* CONFIG_BLK_INLINE_ENCRYPTION */
+
+static inline bool bio_has_crypt_ctx(struct bio *bio)
+{
+ return false;
+}
+
+#endif /* CONFIG_BLK_INLINE_ENCRYPTION */
+
+int __bio_crypt_clone(struct bio *dst, struct bio *src, gfp_t gfp_mask);
+/**
+ * bio_crypt_clone - clone bio encryption context
+ * @dst: destination bio
+ * @src: source bio
+ * @gfp_mask: memory allocation flags
+ *
+ * If @src has an encryption context, clone it to @dst.
+ *
+ * Return: 0 on success, -ENOMEM if out of memory. -ENOMEM is only possible if
+ * @gfp_mask doesn't include %__GFP_DIRECT_RECLAIM.
+ */
+static inline int bio_crypt_clone(struct bio *dst, struct bio *src,
+ gfp_t gfp_mask)
+{
+ if (bio_has_crypt_ctx(src))
+ return __bio_crypt_clone(dst, src, gfp_mask);
+ return 0;
+}
+
+#endif /* __LINUX_BLK_CRYPTO_H */
diff --git a/include/linux/blk-integrity.h b/include/linux/blk-integrity.h
new file mode 100644
index 000000000000..378b2459efe2
--- /dev/null
+++ b/include/linux/blk-integrity.h
@@ -0,0 +1,184 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_BLK_INTEGRITY_H
+#define _LINUX_BLK_INTEGRITY_H
+
+#include <linux/blk-mq.h>
+
+struct request;
+
+enum blk_integrity_flags {
+ BLK_INTEGRITY_VERIFY = 1 << 0,
+ BLK_INTEGRITY_GENERATE = 1 << 1,
+ BLK_INTEGRITY_DEVICE_CAPABLE = 1 << 2,
+ BLK_INTEGRITY_IP_CHECKSUM = 1 << 3,
+};
+
+struct blk_integrity_iter {
+ void *prot_buf;
+ void *data_buf;
+ sector_t seed;
+ unsigned int data_size;
+ unsigned short interval;
+ unsigned char tuple_size;
+ const char *disk_name;
+};
+
+typedef blk_status_t (integrity_processing_fn) (struct blk_integrity_iter *);
+typedef void (integrity_prepare_fn) (struct request *);
+typedef void (integrity_complete_fn) (struct request *, unsigned int);
+
+struct blk_integrity_profile {
+ integrity_processing_fn *generate_fn;
+ integrity_processing_fn *verify_fn;
+ integrity_prepare_fn *prepare_fn;
+ integrity_complete_fn *complete_fn;
+ const char *name;
+};
+
+#ifdef CONFIG_BLK_DEV_INTEGRITY
+void blk_integrity_register(struct gendisk *, struct blk_integrity *);
+void blk_integrity_unregister(struct gendisk *);
+int blk_integrity_compare(struct gendisk *, struct gendisk *);
+int blk_rq_map_integrity_sg(struct request_queue *, struct bio *,
+ struct scatterlist *);
+int blk_rq_count_integrity_sg(struct request_queue *, struct bio *);
+
+static inline struct blk_integrity *blk_get_integrity(struct gendisk *disk)
+{
+ struct blk_integrity *bi = &disk->queue->integrity;
+
+ if (!bi->profile)
+ return NULL;
+
+ return bi;
+}
+
+static inline struct blk_integrity *
+bdev_get_integrity(struct block_device *bdev)
+{
+ return blk_get_integrity(bdev->bd_disk);
+}
+
+static inline bool
+blk_integrity_queue_supports_integrity(struct request_queue *q)
+{
+ return q->integrity.profile;
+}
+
+static inline void blk_queue_max_integrity_segments(struct request_queue *q,
+ unsigned int segs)
+{
+ q->limits.max_integrity_segments = segs;
+}
+
+static inline unsigned short
+queue_max_integrity_segments(const struct request_queue *q)
+{
+ return q->limits.max_integrity_segments;
+}
+
+/**
+ * bio_integrity_intervals - Return number of integrity intervals for a bio
+ * @bi: blk_integrity profile for device
+ * @sectors: Size of the bio in 512-byte sectors
+ *
+ * Description: The block layer calculates everything in 512 byte
+ * sectors but integrity metadata is done in terms of the data integrity
+ * interval size of the storage device. Convert the block layer sectors
+ * to the appropriate number of integrity intervals.
+ */
+static inline unsigned int bio_integrity_intervals(struct blk_integrity *bi,
+ unsigned int sectors)
+{
+ return sectors >> (bi->interval_exp - 9);
+}
+
+static inline unsigned int bio_integrity_bytes(struct blk_integrity *bi,
+ unsigned int sectors)
+{
+ return bio_integrity_intervals(bi, sectors) * bi->tuple_size;
+}
+
+static inline bool blk_integrity_rq(struct request *rq)
+{
+ return rq->cmd_flags & REQ_INTEGRITY;
+}
+
+/*
+ * Return the first bvec that contains integrity data. Only drivers that are
+ * limited to a single integrity segment should use this helper.
+ */
+static inline struct bio_vec *rq_integrity_vec(struct request *rq)
+{
+ if (WARN_ON_ONCE(queue_max_integrity_segments(rq->q) > 1))
+ return NULL;
+ return rq->bio->bi_integrity->bip_vec;
+}
+#else /* CONFIG_BLK_DEV_INTEGRITY */
+static inline int blk_rq_count_integrity_sg(struct request_queue *q,
+ struct bio *b)
+{
+ return 0;
+}
+static inline int blk_rq_map_integrity_sg(struct request_queue *q,
+ struct bio *b,
+ struct scatterlist *s)
+{
+ return 0;
+}
+static inline struct blk_integrity *bdev_get_integrity(struct block_device *b)
+{
+ return NULL;
+}
+static inline struct blk_integrity *blk_get_integrity(struct gendisk *disk)
+{
+ return NULL;
+}
+static inline bool
+blk_integrity_queue_supports_integrity(struct request_queue *q)
+{
+ return false;
+}
+static inline int blk_integrity_compare(struct gendisk *a, struct gendisk *b)
+{
+ return 0;
+}
+static inline void blk_integrity_register(struct gendisk *d,
+ struct blk_integrity *b)
+{
+}
+static inline void blk_integrity_unregister(struct gendisk *d)
+{
+}
+static inline void blk_queue_max_integrity_segments(struct request_queue *q,
+ unsigned int segs)
+{
+}
+static inline unsigned short
+queue_max_integrity_segments(const struct request_queue *q)
+{
+ return 0;
+}
+
+static inline unsigned int bio_integrity_intervals(struct blk_integrity *bi,
+ unsigned int sectors)
+{
+ return 0;
+}
+
+static inline unsigned int bio_integrity_bytes(struct blk_integrity *bi,
+ unsigned int sectors)
+{
+ return 0;
+}
+static inline int blk_integrity_rq(struct request *rq)
+{
+ return 0;
+}
+
+static inline struct bio_vec *rq_integrity_vec(struct request *rq)
+{
+ return NULL;
+}
+#endif /* CONFIG_BLK_DEV_INTEGRITY */
+#endif /* _LINUX_BLK_INTEGRITY_H */
diff --git a/include/linux/blk-mq-pci.h b/include/linux/blk-mq-pci.h
index 6ab595259112..ca544e1d3508 100644
--- a/include/linux/blk-mq-pci.h
+++ b/include/linux/blk-mq-pci.h
@@ -1,9 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_BLK_MQ_PCI_H
#define _LINUX_BLK_MQ_PCI_H
-struct blk_mq_tag_set;
+struct blk_mq_queue_map;
struct pci_dev;
-int blk_mq_pci_map_queues(struct blk_mq_tag_set *set, struct pci_dev *pdev);
+void blk_mq_pci_map_queues(struct blk_mq_queue_map *qmap, struct pci_dev *pdev,
+ int offset);
#endif /* _LINUX_BLK_MQ_PCI_H */
diff --git a/include/linux/blk-mq-virtio.h b/include/linux/blk-mq-virtio.h
index b1ef6e14744f..13226e9b22dd 100644
--- a/include/linux/blk-mq-virtio.h
+++ b/include/linux/blk-mq-virtio.h
@@ -1,10 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_BLK_MQ_VIRTIO_H
#define _LINUX_BLK_MQ_VIRTIO_H
-struct blk_mq_tag_set;
+struct blk_mq_queue_map;
struct virtio_device;
-int blk_mq_virtio_map_queues(struct blk_mq_tag_set *set,
+void blk_mq_virtio_map_queues(struct blk_mq_queue_map *qmap,
struct virtio_device *vdev, int first_vec);
#endif /* _LINUX_BLK_MQ_VIRTIO_H */
diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h
index 14542308d25b..06caacd77ed6 100644
--- a/include/linux/blk-mq.h
+++ b/include/linux/blk-mq.h
@@ -1,157 +1,659 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef BLK_MQ_H
#define BLK_MQ_H
#include <linux/blkdev.h>
#include <linux/sbitmap.h>
+#include <linux/lockdep.h>
+#include <linux/scatterlist.h>
+#include <linux/prefetch.h>
#include <linux/srcu.h>
struct blk_mq_tags;
struct blk_flush_queue;
+#define BLKDEV_MIN_RQ 4
+#define BLKDEV_DEFAULT_RQ 128
+
+enum rq_end_io_ret {
+ RQ_END_IO_NONE,
+ RQ_END_IO_FREE,
+};
+
+typedef enum rq_end_io_ret (rq_end_io_fn)(struct request *, blk_status_t);
+
+/*
+ * request flags */
+typedef __u32 __bitwise req_flags_t;
+
+/* drive already may have started this one */
+#define RQF_STARTED ((__force req_flags_t)(1 << 1))
+/* may not be passed by ioscheduler */
+#define RQF_SOFTBARRIER ((__force req_flags_t)(1 << 3))
+/* request for flush sequence */
+#define RQF_FLUSH_SEQ ((__force req_flags_t)(1 << 4))
+/* merge of different types, fail separately */
+#define RQF_MIXED_MERGE ((__force req_flags_t)(1 << 5))
+/* track inflight for MQ */
+#define RQF_MQ_INFLIGHT ((__force req_flags_t)(1 << 6))
+/* don't call prep for this one */
+#define RQF_DONTPREP ((__force req_flags_t)(1 << 7))
+/* vaguely specified driver internal error. Ignored by the block layer */
+#define RQF_FAILED ((__force req_flags_t)(1 << 10))
+/* don't warn about errors */
+#define RQF_QUIET ((__force req_flags_t)(1 << 11))
+/* elevator private data attached */
+#define RQF_ELVPRIV ((__force req_flags_t)(1 << 12))
+/* account into disk and partition IO statistics */
+#define RQF_IO_STAT ((__force req_flags_t)(1 << 13))
+/* runtime pm request */
+#define RQF_PM ((__force req_flags_t)(1 << 15))
+/* on IO scheduler merge hash */
+#define RQF_HASHED ((__force req_flags_t)(1 << 16))
+/* track IO completion time */
+#define RQF_STATS ((__force req_flags_t)(1 << 17))
+/* Look at ->special_vec for the actual data payload instead of the
+ bio chain. */
+#define RQF_SPECIAL_PAYLOAD ((__force req_flags_t)(1 << 18))
+/* The per-zone write lock is held for this request */
+#define RQF_ZONE_WRITE_LOCKED ((__force req_flags_t)(1 << 19))
+/* ->timeout has been called, don't expire again */
+#define RQF_TIMED_OUT ((__force req_flags_t)(1 << 21))
+/* queue has elevator attached */
+#define RQF_ELV ((__force req_flags_t)(1 << 22))
+#define RQF_RESV ((__force req_flags_t)(1 << 23))
+
+/* flags that prevent us from merging requests: */
+#define RQF_NOMERGE_FLAGS \
+ (RQF_STARTED | RQF_SOFTBARRIER | RQF_FLUSH_SEQ | RQF_SPECIAL_PAYLOAD)
+
+enum mq_rq_state {
+ MQ_RQ_IDLE = 0,
+ MQ_RQ_IN_FLIGHT = 1,
+ MQ_RQ_COMPLETE = 2,
+};
+
+/*
+ * Try to put the fields that are referenced together in the same cacheline.
+ *
+ * If you modify this structure, make sure to update blk_rq_init() and
+ * especially blk_mq_rq_ctx_init() to take care of the added fields.
+ */
+struct request {
+ struct request_queue *q;
+ struct blk_mq_ctx *mq_ctx;
+ struct blk_mq_hw_ctx *mq_hctx;
+
+ blk_opf_t cmd_flags; /* op and common flags */
+ req_flags_t rq_flags;
+
+ int tag;
+ int internal_tag;
+
+ unsigned int timeout;
+
+ /* the following two fields are internal, NEVER access directly */
+ unsigned int __data_len; /* total data len */
+ sector_t __sector; /* sector cursor */
+
+ struct bio *bio;
+ struct bio *biotail;
+
+ union {
+ struct list_head queuelist;
+ struct request *rq_next;
+ };
+
+ struct block_device *part;
+#ifdef CONFIG_BLK_RQ_ALLOC_TIME
+ /* Time that the first bio started allocating this request. */
+ u64 alloc_time_ns;
+#endif
+ /* Time that this request was allocated for this IO. */
+ u64 start_time_ns;
+ /* Time that I/O was submitted to the device. */
+ u64 io_start_time_ns;
+
+#ifdef CONFIG_BLK_WBT
+ unsigned short wbt_flags;
+#endif
+ /*
+ * rq sectors used for blk stats. It has the same value
+ * with blk_rq_sectors(rq), except that it never be zeroed
+ * by completion.
+ */
+ unsigned short stats_sectors;
+
+ /*
+ * Number of scatter-gather DMA addr+len pairs after
+ * physical address coalescing is performed.
+ */
+ unsigned short nr_phys_segments;
+
+#ifdef CONFIG_BLK_DEV_INTEGRITY
+ unsigned short nr_integrity_segments;
+#endif
+
+#ifdef CONFIG_BLK_INLINE_ENCRYPTION
+ struct bio_crypt_ctx *crypt_ctx;
+ struct blk_crypto_keyslot *crypt_keyslot;
+#endif
+
+ unsigned short ioprio;
+
+ enum mq_rq_state state;
+ atomic_t ref;
+
+ unsigned long deadline;
+
+ /*
+ * The hash is used inside the scheduler, and killed once the
+ * request reaches the dispatch list. The ipi_list is only used
+ * to queue the request for softirq completion, which is long
+ * after the request has been unhashed (and even removed from
+ * the dispatch list).
+ */
+ union {
+ struct hlist_node hash; /* merge hash */
+ struct llist_node ipi_list;
+ };
+
+ /*
+ * The rb_node is only used inside the io scheduler, requests
+ * are pruned when moved to the dispatch queue. So let the
+ * completion_data share space with the rb_node.
+ */
+ union {
+ struct rb_node rb_node; /* sort/lookup */
+ struct bio_vec special_vec;
+ void *completion_data;
+ };
+
+
+ /*
+ * Three pointers are available for the IO schedulers, if they need
+ * more they have to dynamically allocate it. Flush requests are
+ * never put on the IO scheduler. So let the flush fields share
+ * space with the elevator data.
+ */
+ union {
+ struct {
+ struct io_cq *icq;
+ void *priv[2];
+ } elv;
+
+ struct {
+ unsigned int seq;
+ struct list_head list;
+ rq_end_io_fn *saved_end_io;
+ } flush;
+ };
+
+ union {
+ struct __call_single_data csd;
+ u64 fifo_time;
+ };
+
+ /*
+ * completion callback.
+ */
+ rq_end_io_fn *end_io;
+ void *end_io_data;
+};
+
+static inline enum req_op req_op(const struct request *req)
+{
+ return req->cmd_flags & REQ_OP_MASK;
+}
+
+static inline bool blk_rq_is_passthrough(struct request *rq)
+{
+ return blk_op_is_passthrough(req_op(rq));
+}
+
+static inline unsigned short req_get_ioprio(struct request *req)
+{
+ return req->ioprio;
+}
+
+#define rq_data_dir(rq) (op_is_write(req_op(rq)) ? WRITE : READ)
+
+#define rq_dma_dir(rq) \
+ (op_is_write(req_op(rq)) ? DMA_TO_DEVICE : DMA_FROM_DEVICE)
+
+#define rq_list_add(listptr, rq) do { \
+ (rq)->rq_next = *(listptr); \
+ *(listptr) = rq; \
+} while (0)
+
+#define rq_list_add_tail(lastpptr, rq) do { \
+ (rq)->rq_next = NULL; \
+ **(lastpptr) = rq; \
+ *(lastpptr) = &rq->rq_next; \
+} while (0)
+
+#define rq_list_pop(listptr) \
+({ \
+ struct request *__req = NULL; \
+ if ((listptr) && *(listptr)) { \
+ __req = *(listptr); \
+ *(listptr) = __req->rq_next; \
+ } \
+ __req; \
+})
+
+#define rq_list_peek(listptr) \
+({ \
+ struct request *__req = NULL; \
+ if ((listptr) && *(listptr)) \
+ __req = *(listptr); \
+ __req; \
+})
+
+#define rq_list_for_each(listptr, pos) \
+ for (pos = rq_list_peek((listptr)); pos; pos = rq_list_next(pos))
+
+#define rq_list_for_each_safe(listptr, pos, nxt) \
+ for (pos = rq_list_peek((listptr)), nxt = rq_list_next(pos); \
+ pos; pos = nxt, nxt = pos ? rq_list_next(pos) : NULL)
+
+#define rq_list_next(rq) (rq)->rq_next
+#define rq_list_empty(list) ((list) == (struct request *) NULL)
+
+/**
+ * rq_list_move() - move a struct request from one list to another
+ * @src: The source list @rq is currently in
+ * @dst: The destination list that @rq will be appended to
+ * @rq: The request to move
+ * @prev: The request preceding @rq in @src (NULL if @rq is the head)
+ */
+static inline void rq_list_move(struct request **src, struct request **dst,
+ struct request *rq, struct request *prev)
+{
+ if (prev)
+ prev->rq_next = rq->rq_next;
+ else
+ *src = rq->rq_next;
+ rq_list_add(dst, rq);
+}
+
+/**
+ * enum blk_eh_timer_return - How the timeout handler should proceed
+ * @BLK_EH_DONE: The block driver completed the command or will complete it at
+ * a later time.
+ * @BLK_EH_RESET_TIMER: Reset the request timer and continue waiting for the
+ * request to complete.
+ */
+enum blk_eh_timer_return {
+ BLK_EH_DONE,
+ BLK_EH_RESET_TIMER,
+};
+
+#define BLK_TAG_ALLOC_FIFO 0 /* allocate starting from 0 */
+#define BLK_TAG_ALLOC_RR 1 /* allocate starting from last allocated tag */
+
+/**
+ * struct blk_mq_hw_ctx - State for a hardware queue facing the hardware
+ * block device
+ */
struct blk_mq_hw_ctx {
struct {
+ /** @lock: Protects the dispatch list. */
spinlock_t lock;
+ /**
+ * @dispatch: Used for requests that are ready to be
+ * dispatched to the hardware but for some reason (e.g. lack of
+ * resources) could not be sent to the hardware. As soon as the
+ * driver can send new requests, requests at this list will
+ * be sent first for a fairer dispatch.
+ */
struct list_head dispatch;
- unsigned long state; /* BLK_MQ_S_* flags */
+ /**
+ * @state: BLK_MQ_S_* flags. Defines the state of the hw
+ * queue (active, scheduled to restart, stopped).
+ */
+ unsigned long state;
} ____cacheline_aligned_in_smp;
+ /**
+ * @run_work: Used for scheduling a hardware queue run at a later time.
+ */
struct delayed_work run_work;
+ /** @cpumask: Map of available CPUs where this hctx can run. */
cpumask_var_t cpumask;
+ /**
+ * @next_cpu: Used by blk_mq_hctx_next_cpu() for round-robin CPU
+ * selection from @cpumask.
+ */
int next_cpu;
+ /**
+ * @next_cpu_batch: Counter of how many works left in the batch before
+ * changing to the next CPU.
+ */
int next_cpu_batch;
- unsigned long flags; /* BLK_MQ_F_* flags */
+ /** @flags: BLK_MQ_F_* flags. Defines the behaviour of the queue. */
+ unsigned long flags;
+ /**
+ * @sched_data: Pointer owned by the IO scheduler attached to a request
+ * queue. It's up to the IO scheduler how to use this pointer.
+ */
void *sched_data;
+ /**
+ * @queue: Pointer to the request queue that owns this hardware context.
+ */
struct request_queue *queue;
+ /** @fq: Queue of requests that need to perform a flush operation. */
struct blk_flush_queue *fq;
+ /**
+ * @driver_data: Pointer to data owned by the block driver that created
+ * this hctx
+ */
void *driver_data;
+ /**
+ * @ctx_map: Bitmap for each software queue. If bit is on, there is a
+ * pending request in that software queue.
+ */
struct sbitmap ctx_map;
+ /**
+ * @dispatch_from: Software queue to be used when no scheduler was
+ * selected.
+ */
+ struct blk_mq_ctx *dispatch_from;
+ /**
+ * @dispatch_busy: Number used by blk_mq_update_dispatch_busy() to
+ * decide if the hw_queue is busy using Exponential Weighted Moving
+ * Average algorithm.
+ */
+ unsigned int dispatch_busy;
+
+ /** @type: HCTX_TYPE_* flags. Type of hardware queue. */
+ unsigned short type;
+ /** @nr_ctx: Number of software queues. */
+ unsigned short nr_ctx;
+ /** @ctxs: Array of software queues. */
struct blk_mq_ctx **ctxs;
- unsigned int nr_ctx;
- wait_queue_entry_t dispatch_wait;
+ /** @dispatch_wait_lock: Lock for dispatch_wait queue. */
+ spinlock_t dispatch_wait_lock;
+ /**
+ * @dispatch_wait: Waitqueue to put requests when there is no tag
+ * available at the moment, to wait for another try in the future.
+ */
+ wait_queue_entry_t dispatch_wait;
+
+ /**
+ * @wait_index: Index of next available dispatch_wait queue to insert
+ * requests.
+ */
atomic_t wait_index;
+ /**
+ * @tags: Tags owned by the block driver. A tag at this set is only
+ * assigned when a request is dispatched from a hardware queue.
+ */
struct blk_mq_tags *tags;
+ /**
+ * @sched_tags: Tags owned by I/O scheduler. If there is an I/O
+ * scheduler associated with a request queue, a tag is assigned when
+ * that request is allocated. Else, this member is not used.
+ */
struct blk_mq_tags *sched_tags;
+ /** @queued: Number of queued requests. */
unsigned long queued;
+ /** @run: Number of dispatched requests. */
unsigned long run;
-#define BLK_MQ_MAX_DISPATCH_ORDER 7
- unsigned long dispatched[BLK_MQ_MAX_DISPATCH_ORDER];
+ /** @numa_node: NUMA node the storage adapter has been connected to. */
unsigned int numa_node;
+ /** @queue_num: Index of this hardware queue. */
unsigned int queue_num;
+ /**
+ * @nr_active: Number of active requests. Only used when a tag set is
+ * shared across request queues.
+ */
atomic_t nr_active;
+ /** @cpuhp_online: List to store request if CPU is going to die */
+ struct hlist_node cpuhp_online;
+ /** @cpuhp_dead: List to store request if some CPU die. */
struct hlist_node cpuhp_dead;
+ /** @kobj: Kernel object for sysfs. */
struct kobject kobj;
- unsigned long poll_considered;
- unsigned long poll_invoked;
- unsigned long poll_success;
-
#ifdef CONFIG_BLK_DEBUG_FS
+ /**
+ * @debugfs_dir: debugfs directory for this hardware queue. Named
+ * as cpu<cpu_number>.
+ */
struct dentry *debugfs_dir;
+ /** @sched_debugfs_dir: debugfs directory for the scheduler. */
struct dentry *sched_debugfs_dir;
#endif
- /* Must be the last member - see also blk_mq_hw_ctx_size(). */
- struct srcu_struct queue_rq_srcu[0];
+ /**
+ * @hctx_list: if this hctx is not in use, this is an entry in
+ * q->unused_hctx_list.
+ */
+ struct list_head hctx_list;
+};
+
+/**
+ * struct blk_mq_queue_map - Map software queues to hardware queues
+ * @mq_map: CPU ID to hardware queue index map. This is an array
+ * with nr_cpu_ids elements. Each element has a value in the range
+ * [@queue_offset, @queue_offset + @nr_queues).
+ * @nr_queues: Number of hardware queues to map CPU IDs onto.
+ * @queue_offset: First hardware queue to map onto. Used by the PCIe NVMe
+ * driver to map each hardware queue type (enum hctx_type) onto a distinct
+ * set of hardware queues.
+ */
+struct blk_mq_queue_map {
+ unsigned int *mq_map;
+ unsigned int nr_queues;
+ unsigned int queue_offset;
+};
+
+/**
+ * enum hctx_type - Type of hardware queue
+ * @HCTX_TYPE_DEFAULT: All I/O not otherwise accounted for.
+ * @HCTX_TYPE_READ: Just for READ I/O.
+ * @HCTX_TYPE_POLL: Polled I/O of any kind.
+ * @HCTX_MAX_TYPES: Number of types of hctx.
+ */
+enum hctx_type {
+ HCTX_TYPE_DEFAULT,
+ HCTX_TYPE_READ,
+ HCTX_TYPE_POLL,
+
+ HCTX_MAX_TYPES,
};
+/**
+ * struct blk_mq_tag_set - tag set that can be shared between request queues
+ * @ops: Pointers to functions that implement block driver behavior.
+ * @map: One or more ctx -> hctx mappings. One map exists for each
+ * hardware queue type (enum hctx_type) that the driver wishes
+ * to support. There are no restrictions on maps being of the
+ * same size, and it's perfectly legal to share maps between
+ * types.
+ * @nr_maps: Number of elements in the @map array. A number in the range
+ * [1, HCTX_MAX_TYPES].
+ * @nr_hw_queues: Number of hardware queues supported by the block driver that
+ * owns this data structure.
+ * @queue_depth: Number of tags per hardware queue, reserved tags included.
+ * @reserved_tags: Number of tags to set aside for BLK_MQ_REQ_RESERVED tag
+ * allocations.
+ * @cmd_size: Number of additional bytes to allocate per request. The block
+ * driver owns these additional bytes.
+ * @numa_node: NUMA node the storage adapter has been connected to.
+ * @timeout: Request processing timeout in jiffies.
+ * @flags: Zero or more BLK_MQ_F_* flags.
+ * @driver_data: Pointer to data owned by the block driver that created this
+ * tag set.
+ * @tags: Tag sets. One tag set per hardware queue. Has @nr_hw_queues
+ * elements.
+ * @shared_tags:
+ * Shared set of tags. Has @nr_hw_queues elements. If set,
+ * shared by all @tags.
+ * @tag_list_lock: Serializes tag_list accesses.
+ * @tag_list: List of the request queues that use this tag set. See also
+ * request_queue.tag_set_list.
+ * @srcu: Use as lock when type of the request queue is blocking
+ * (BLK_MQ_F_BLOCKING).
+ */
struct blk_mq_tag_set {
- unsigned int *mq_map;
const struct blk_mq_ops *ops;
+ struct blk_mq_queue_map map[HCTX_MAX_TYPES];
+ unsigned int nr_maps;
unsigned int nr_hw_queues;
- unsigned int queue_depth; /* max hw supported */
+ unsigned int queue_depth;
unsigned int reserved_tags;
- unsigned int cmd_size; /* per-request extra data */
+ unsigned int cmd_size;
int numa_node;
unsigned int timeout;
- unsigned int flags; /* BLK_MQ_F_* */
+ unsigned int flags;
void *driver_data;
struct blk_mq_tags **tags;
+ struct blk_mq_tags *shared_tags;
+
struct mutex tag_list_lock;
struct list_head tag_list;
+ struct srcu_struct *srcu;
};
+/**
+ * struct blk_mq_queue_data - Data about a request inserted in a queue
+ *
+ * @rq: Request pointer.
+ * @last: If it is the last request in the queue.
+ */
struct blk_mq_queue_data {
struct request *rq;
bool last;
};
-typedef blk_status_t (queue_rq_fn)(struct blk_mq_hw_ctx *,
- const struct blk_mq_queue_data *);
-typedef enum blk_eh_timer_return (timeout_fn)(struct request *, bool);
-typedef int (init_hctx_fn)(struct blk_mq_hw_ctx *, void *, unsigned int);
-typedef void (exit_hctx_fn)(struct blk_mq_hw_ctx *, unsigned int);
-typedef int (init_request_fn)(struct blk_mq_tag_set *set, struct request *,
- unsigned int, unsigned int);
-typedef void (exit_request_fn)(struct blk_mq_tag_set *set, struct request *,
- unsigned int);
-typedef int (reinit_request_fn)(void *, struct request *);
+typedef bool (busy_tag_iter_fn)(struct request *, void *);
-typedef void (busy_iter_fn)(struct blk_mq_hw_ctx *, struct request *, void *,
- bool);
-typedef void (busy_tag_iter_fn)(struct request *, void *, bool);
-typedef int (poll_fn)(struct blk_mq_hw_ctx *, unsigned int);
-typedef int (map_queues_fn)(struct blk_mq_tag_set *set);
+/**
+ * struct blk_mq_ops - Callback functions that implements block driver
+ * behaviour.
+ */
+struct blk_mq_ops {
+ /**
+ * @queue_rq: Queue a new request from block IO.
+ */
+ blk_status_t (*queue_rq)(struct blk_mq_hw_ctx *,
+ const struct blk_mq_queue_data *);
+ /**
+ * @commit_rqs: If a driver uses bd->last to judge when to submit
+ * requests to hardware, it must define this function. In case of errors
+ * that make us stop issuing further requests, this hook serves the
+ * purpose of kicking the hardware (which the last request otherwise
+ * would have done).
+ */
+ void (*commit_rqs)(struct blk_mq_hw_ctx *);
-struct blk_mq_ops {
- /*
- * Queue request
+ /**
+ * @queue_rqs: Queue a list of new requests. Driver is guaranteed
+ * that each request belongs to the same queue. If the driver doesn't
+ * empty the @rqlist completely, then the rest will be queued
+ * individually by the block layer upon return.
*/
- queue_rq_fn *queue_rq;
+ void (*queue_rqs)(struct request **rqlist);
- /*
- * Called on request timeout
+ /**
+ * @get_budget: Reserve budget before queue request, once .queue_rq is
+ * run, it is driver's responsibility to release the
+ * reserved budget. Also we have to handle failure case
+ * of .get_budget for avoiding I/O deadlock.
*/
- timeout_fn *timeout;
+ int (*get_budget)(struct request_queue *);
- /*
- * Called to poll for completion of a specific tag.
+ /**
+ * @put_budget: Release the reserved budget.
*/
- poll_fn *poll;
+ void (*put_budget)(struct request_queue *, int);
- softirq_done_fn *complete;
+ /**
+ * @set_rq_budget_token: store rq's budget token
+ */
+ void (*set_rq_budget_token)(struct request *, int);
+ /**
+ * @get_rq_budget_token: retrieve rq's budget token
+ */
+ int (*get_rq_budget_token)(struct request *);
- /*
- * Called when the block layer side of a hardware queue has been
- * set up, allowing the driver to allocate/init matching structures.
- * Ditto for exit/teardown.
+ /**
+ * @timeout: Called on request timeout.
*/
- init_hctx_fn *init_hctx;
- exit_hctx_fn *exit_hctx;
+ enum blk_eh_timer_return (*timeout)(struct request *);
- /*
- * Called for every command allocated by the block layer to allow
- * the driver to set up driver specific data.
+ /**
+ * @poll: Called to poll for completion of a specific tag.
+ */
+ int (*poll)(struct blk_mq_hw_ctx *, struct io_comp_batch *);
+
+ /**
+ * @complete: Mark the request as complete.
+ */
+ void (*complete)(struct request *);
+
+ /**
+ * @init_hctx: Called when the block layer side of a hardware queue has
+ * been set up, allowing the driver to allocate/init matching
+ * structures.
+ */
+ int (*init_hctx)(struct blk_mq_hw_ctx *, void *, unsigned int);
+ /**
+ * @exit_hctx: Ditto for exit/teardown.
+ */
+ void (*exit_hctx)(struct blk_mq_hw_ctx *, unsigned int);
+
+ /**
+ * @init_request: Called for every command allocated by the block layer
+ * to allow the driver to set up driver specific data.
*
* Tag greater than or equal to queue_depth is for setting up
* flush request.
- *
- * Ditto for exit/teardown.
*/
- init_request_fn *init_request;
- exit_request_fn *exit_request;
- reinit_request_fn *reinit_request;
- /* Called from inside blk_get_request() */
- void (*initialize_rq_fn)(struct request *rq);
+ int (*init_request)(struct blk_mq_tag_set *set, struct request *,
+ unsigned int, unsigned int);
+ /**
+ * @exit_request: Ditto for exit/teardown.
+ */
+ void (*exit_request)(struct blk_mq_tag_set *set, struct request *,
+ unsigned int);
- map_queues_fn *map_queues;
+ /**
+ * @cleanup_rq: Called before freeing one request which isn't completed
+ * yet, and usually for freeing the driver private data.
+ */
+ void (*cleanup_rq)(struct request *);
+
+ /**
+ * @busy: If set, returns whether or not this queue currently is busy.
+ */
+ bool (*busy)(struct request_queue *);
+
+ /**
+ * @map_queues: This allows drivers specify their own queue mapping by
+ * overriding the setup-time function that builds the mq_map.
+ */
+ void (*map_queues)(struct blk_mq_tag_set *set);
#ifdef CONFIG_BLK_DEBUG_FS
- /*
- * Used by the debugfs implementation to show driver-specific
+ /**
+ * @show_rq: Used by the debugfs implementation to show driver-specific
* information about a request.
*/
void (*show_rq)(struct seq_file *m, struct request *rq);
@@ -160,18 +662,30 @@ struct blk_mq_ops {
enum {
BLK_MQ_F_SHOULD_MERGE = 1 << 0,
- BLK_MQ_F_TAG_SHARED = 1 << 1,
- BLK_MQ_F_SG_MERGE = 1 << 2,
+ BLK_MQ_F_TAG_QUEUE_SHARED = 1 << 1,
+ /*
+ * Set when this device requires underlying blk-mq device for
+ * completing IO:
+ */
+ BLK_MQ_F_STACKING = 1 << 2,
+ BLK_MQ_F_TAG_HCTX_SHARED = 1 << 3,
BLK_MQ_F_BLOCKING = 1 << 5,
+ /* Do not allow an I/O scheduler to be configured. */
BLK_MQ_F_NO_SCHED = 1 << 6,
+ /*
+ * Select 'none' during queue registration in case of a single hwq
+ * or shared hwqs instead of 'mq-deadline'.
+ */
+ BLK_MQ_F_NO_SCHED_BY_DEFAULT = 1 << 7,
BLK_MQ_F_ALLOC_POLICY_START_BIT = 8,
BLK_MQ_F_ALLOC_POLICY_BITS = 1,
BLK_MQ_S_STOPPED = 0,
BLK_MQ_S_TAG_ACTIVE = 1,
BLK_MQ_S_SCHED_RESTART = 2,
- BLK_MQ_S_TAG_WAITING = 3,
- BLK_MQ_S_START_ON_RUN = 4,
+
+ /* hw queue is inactive after all its CPUs become offline */
+ BLK_MQ_S_INACTIVE = 3,
BLK_MQ_MAX_DEPTH = 10240,
@@ -184,31 +698,81 @@ enum {
((policy & ((1 << BLK_MQ_F_ALLOC_POLICY_BITS) - 1)) \
<< BLK_MQ_F_ALLOC_POLICY_START_BIT)
+#define BLK_MQ_NO_HCTX_IDX (-1U)
+
+struct gendisk *__blk_mq_alloc_disk(struct blk_mq_tag_set *set, void *queuedata,
+ struct lock_class_key *lkclass);
+#define blk_mq_alloc_disk(set, queuedata) \
+({ \
+ static struct lock_class_key __key; \
+ \
+ __blk_mq_alloc_disk(set, queuedata, &__key); \
+})
+struct gendisk *blk_mq_alloc_disk_for_queue(struct request_queue *q,
+ struct lock_class_key *lkclass);
struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *);
-struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
- struct request_queue *q);
-int blk_mq_register_dev(struct device *, struct request_queue *);
-void blk_mq_unregister_dev(struct device *, struct request_queue *);
+int blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
+ struct request_queue *q);
+void blk_mq_destroy_queue(struct request_queue *);
int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set);
+int blk_mq_alloc_sq_tag_set(struct blk_mq_tag_set *set,
+ const struct blk_mq_ops *ops, unsigned int queue_depth,
+ unsigned int set_flags);
void blk_mq_free_tag_set(struct blk_mq_tag_set *set);
-void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule);
-
void blk_mq_free_request(struct request *rq);
-bool blk_mq_can_queue(struct blk_mq_hw_ctx *);
+
+bool blk_mq_queue_inflight(struct request_queue *q);
enum {
- BLK_MQ_REQ_NOWAIT = (1 << 0), /* return when out of requests */
- BLK_MQ_REQ_RESERVED = (1 << 1), /* allocate from reserved pool */
- BLK_MQ_REQ_INTERNAL = (1 << 2), /* allocate internal/sched tag */
+ /* return when out of requests */
+ BLK_MQ_REQ_NOWAIT = (__force blk_mq_req_flags_t)(1 << 0),
+ /* allocate from reserved pool */
+ BLK_MQ_REQ_RESERVED = (__force blk_mq_req_flags_t)(1 << 1),
+ /* set RQF_PM */
+ BLK_MQ_REQ_PM = (__force blk_mq_req_flags_t)(1 << 2),
};
-struct request *blk_mq_alloc_request(struct request_queue *q, unsigned int op,
- unsigned int flags);
+struct request *blk_mq_alloc_request(struct request_queue *q, blk_opf_t opf,
+ blk_mq_req_flags_t flags);
struct request *blk_mq_alloc_request_hctx(struct request_queue *q,
- unsigned int op, unsigned int flags, unsigned int hctx_idx);
-struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags, unsigned int tag);
+ blk_opf_t opf, blk_mq_req_flags_t flags,
+ unsigned int hctx_idx);
+
+/*
+ * Tag address space map.
+ */
+struct blk_mq_tags {
+ unsigned int nr_tags;
+ unsigned int nr_reserved_tags;
+
+ atomic_t active_queues;
+
+ struct sbitmap_queue bitmap_tags;
+ struct sbitmap_queue breserved_tags;
+
+ struct request **rqs;
+ struct request **static_rqs;
+ struct list_head page_list;
+
+ /*
+ * used to clear request reference in rqs[] before freeing one
+ * request pool
+ */
+ spinlock_t lock;
+};
+
+static inline struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags,
+ unsigned int tag)
+{
+ if (tag < tags->nr_tags) {
+ prefetch(tags->rqs[tag]);
+ return tags->rqs[tag];
+ }
+
+ return NULL;
+}
enum {
BLK_MQ_UNIQUE_TAG_BITS = 16,
@@ -227,20 +791,93 @@ static inline u16 blk_mq_unique_tag_to_tag(u32 unique_tag)
return unique_tag & BLK_MQ_UNIQUE_TAG_MASK;
}
+/**
+ * blk_mq_rq_state() - read the current MQ_RQ_* state of a request
+ * @rq: target request.
+ */
+static inline enum mq_rq_state blk_mq_rq_state(struct request *rq)
+{
+ return READ_ONCE(rq->state);
+}
+
+static inline int blk_mq_request_started(struct request *rq)
+{
+ return blk_mq_rq_state(rq) != MQ_RQ_IDLE;
+}
+
+static inline int blk_mq_request_completed(struct request *rq)
+{
+ return blk_mq_rq_state(rq) == MQ_RQ_COMPLETE;
+}
+
+/*
+ *
+ * Set the state to complete when completing a request from inside ->queue_rq.
+ * This is used by drivers that want to ensure special complete actions that
+ * need access to the request are called on failure, e.g. by nvme for
+ * multipathing.
+ */
+static inline void blk_mq_set_request_complete(struct request *rq)
+{
+ WRITE_ONCE(rq->state, MQ_RQ_COMPLETE);
+}
+
+/*
+ * Complete the request directly instead of deferring it to softirq or
+ * completing it another CPU. Useful in preemptible instead of an interrupt.
+ */
+static inline void blk_mq_complete_request_direct(struct request *rq,
+ void (*complete)(struct request *rq))
+{
+ WRITE_ONCE(rq->state, MQ_RQ_COMPLETE);
+ complete(rq);
+}
-int blk_mq_request_started(struct request *rq);
void blk_mq_start_request(struct request *rq);
void blk_mq_end_request(struct request *rq, blk_status_t error);
void __blk_mq_end_request(struct request *rq, blk_status_t error);
+void blk_mq_end_request_batch(struct io_comp_batch *ib);
+
+/*
+ * Only need start/end time stamping if we have iostat or
+ * blk stats enabled, or using an IO scheduler.
+ */
+static inline bool blk_mq_need_time_stamp(struct request *rq)
+{
+ return (rq->rq_flags & (RQF_IO_STAT | RQF_STATS | RQF_ELV));
+}
+
+static inline bool blk_mq_is_reserved_rq(struct request *rq)
+{
+ return rq->rq_flags & RQF_RESV;
+}
+
+/*
+ * Batched completions only work when there is no I/O error and no special
+ * ->end_io handler.
+ */
+static inline bool blk_mq_add_to_batch(struct request *req,
+ struct io_comp_batch *iob, int ioerror,
+ void (*complete)(struct io_comp_batch *))
+{
+ if (!iob || (req->rq_flags & RQF_ELV) || ioerror ||
+ (req->end_io && !blk_rq_is_passthrough(req)))
+ return false;
+
+ if (!iob->complete)
+ iob->complete = complete;
+ else if (iob->complete != complete)
+ return false;
+ iob->need_ts |= blk_mq_need_time_stamp(req);
+ rq_list_add(&iob->req_list, req);
+ return true;
+}
void blk_mq_requeue_request(struct request *rq, bool kick_requeue_list);
-void blk_mq_add_to_requeue_list(struct request *rq, bool at_head,
- bool kick_requeue_list);
void blk_mq_kick_requeue_list(struct request_queue *q);
void blk_mq_delay_kick_requeue_list(struct request_queue *q, unsigned long msecs);
void blk_mq_complete_request(struct request *rq);
-
-bool blk_mq_queue_stopped(struct request_queue *q);
+bool blk_mq_complete_request_remote(struct request *rq);
void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx);
void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx);
void blk_mq_stop_hw_queues(struct request_queue *q);
@@ -248,45 +885,336 @@ void blk_mq_start_hw_queues(struct request_queue *q);
void blk_mq_start_stopped_hw_queue(struct blk_mq_hw_ctx *hctx, bool async);
void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async);
void blk_mq_quiesce_queue(struct request_queue *q);
+void blk_mq_wait_quiesce_done(struct blk_mq_tag_set *set);
+void blk_mq_quiesce_tagset(struct blk_mq_tag_set *set);
+void blk_mq_unquiesce_tagset(struct blk_mq_tag_set *set);
void blk_mq_unquiesce_queue(struct request_queue *q);
void blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs);
void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async);
void blk_mq_run_hw_queues(struct request_queue *q, bool async);
-void blk_mq_delay_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs);
+void blk_mq_delay_run_hw_queues(struct request_queue *q, unsigned long msecs);
void blk_mq_tagset_busy_iter(struct blk_mq_tag_set *tagset,
busy_tag_iter_fn *fn, void *priv);
+void blk_mq_tagset_wait_completed_request(struct blk_mq_tag_set *tagset);
void blk_mq_freeze_queue(struct request_queue *q);
void blk_mq_unfreeze_queue(struct request_queue *q);
void blk_freeze_queue_start(struct request_queue *q);
void blk_mq_freeze_queue_wait(struct request_queue *q);
int blk_mq_freeze_queue_wait_timeout(struct request_queue *q,
unsigned long timeout);
-int blk_mq_reinit_tagset(struct blk_mq_tag_set *set);
-int blk_mq_map_queues(struct blk_mq_tag_set *set);
+void blk_mq_map_queues(struct blk_mq_queue_map *qmap);
void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues);
void blk_mq_quiesce_queue_nowait(struct request_queue *q);
-/*
+unsigned int blk_mq_rq_cpu(struct request *rq);
+
+bool __blk_should_fake_timeout(struct request_queue *q);
+static inline bool blk_should_fake_timeout(struct request_queue *q)
+{
+ if (IS_ENABLED(CONFIG_FAIL_IO_TIMEOUT) &&
+ test_bit(QUEUE_FLAG_FAIL_IO, &q->queue_flags))
+ return __blk_should_fake_timeout(q);
+ return false;
+}
+
+/**
+ * blk_mq_rq_from_pdu - cast a PDU to a request
+ * @pdu: the PDU (Protocol Data Unit) to be casted
+ *
+ * Return: request
+ *
* Driver command data is immediately after the request. So subtract request
- * size to get back to the original request, add request size to get the PDU.
+ * size to get back to the original request.
*/
static inline struct request *blk_mq_rq_from_pdu(void *pdu)
{
return pdu - sizeof(struct request);
}
+
+/**
+ * blk_mq_rq_to_pdu - cast a request to a PDU
+ * @rq: the request to be casted
+ *
+ * Return: pointer to the PDU
+ *
+ * Driver command data is immediately after the request. So add request to get
+ * the PDU.
+ */
static inline void *blk_mq_rq_to_pdu(struct request *rq)
{
return rq + 1;
}
#define queue_for_each_hw_ctx(q, hctx, i) \
- for ((i) = 0; (i) < (q)->nr_hw_queues && \
- ({ hctx = (q)->queue_hw_ctx[i]; 1; }); (i)++)
+ xa_for_each(&(q)->hctx_table, (i), (hctx))
#define hctx_for_each_ctx(hctx, ctx, i) \
for ((i) = 0; (i) < (hctx)->nr_ctx && \
({ ctx = (hctx)->ctxs[(i)]; 1; }); (i)++)
-#endif
+static inline void blk_mq_cleanup_rq(struct request *rq)
+{
+ if (rq->q->mq_ops->cleanup_rq)
+ rq->q->mq_ops->cleanup_rq(rq);
+}
+
+static inline void blk_rq_bio_prep(struct request *rq, struct bio *bio,
+ unsigned int nr_segs)
+{
+ rq->nr_phys_segments = nr_segs;
+ rq->__data_len = bio->bi_iter.bi_size;
+ rq->bio = rq->biotail = bio;
+ rq->ioprio = bio_prio(bio);
+}
+
+void blk_mq_hctx_set_fq_lock_class(struct blk_mq_hw_ctx *hctx,
+ struct lock_class_key *key);
+
+static inline bool rq_is_sync(struct request *rq)
+{
+ return op_is_sync(rq->cmd_flags);
+}
+
+void blk_rq_init(struct request_queue *q, struct request *rq);
+int blk_rq_prep_clone(struct request *rq, struct request *rq_src,
+ struct bio_set *bs, gfp_t gfp_mask,
+ int (*bio_ctr)(struct bio *, struct bio *, void *), void *data);
+void blk_rq_unprep_clone(struct request *rq);
+blk_status_t blk_insert_cloned_request(struct request *rq);
+
+struct rq_map_data {
+ struct page **pages;
+ unsigned long offset;
+ unsigned short page_order;
+ unsigned short nr_entries;
+ bool null_mapped;
+ bool from_user;
+};
+
+int blk_rq_map_user(struct request_queue *, struct request *,
+ struct rq_map_data *, void __user *, unsigned long, gfp_t);
+int blk_rq_map_user_io(struct request *, struct rq_map_data *,
+ void __user *, unsigned long, gfp_t, bool, int, bool, int);
+int blk_rq_map_user_iov(struct request_queue *, struct request *,
+ struct rq_map_data *, const struct iov_iter *, gfp_t);
+int blk_rq_unmap_user(struct bio *);
+int blk_rq_map_kern(struct request_queue *, struct request *, void *,
+ unsigned int, gfp_t);
+int blk_rq_append_bio(struct request *rq, struct bio *bio);
+void blk_execute_rq_nowait(struct request *rq, bool at_head);
+blk_status_t blk_execute_rq(struct request *rq, bool at_head);
+bool blk_rq_is_poll(struct request *rq);
+
+struct req_iterator {
+ struct bvec_iter iter;
+ struct bio *bio;
+};
+
+#define __rq_for_each_bio(_bio, rq) \
+ if ((rq->bio)) \
+ for (_bio = (rq)->bio; _bio; _bio = _bio->bi_next)
+
+#define rq_for_each_segment(bvl, _rq, _iter) \
+ __rq_for_each_bio(_iter.bio, _rq) \
+ bio_for_each_segment(bvl, _iter.bio, _iter.iter)
+
+#define rq_for_each_bvec(bvl, _rq, _iter) \
+ __rq_for_each_bio(_iter.bio, _rq) \
+ bio_for_each_bvec(bvl, _iter.bio, _iter.iter)
+
+#define rq_iter_last(bvec, _iter) \
+ (_iter.bio->bi_next == NULL && \
+ bio_iter_last(bvec, _iter.iter))
+
+/*
+ * blk_rq_pos() : the current sector
+ * blk_rq_bytes() : bytes left in the entire request
+ * blk_rq_cur_bytes() : bytes left in the current segment
+ * blk_rq_sectors() : sectors left in the entire request
+ * blk_rq_cur_sectors() : sectors left in the current segment
+ * blk_rq_stats_sectors() : sectors of the entire request used for stats
+ */
+static inline sector_t blk_rq_pos(const struct request *rq)
+{
+ return rq->__sector;
+}
+
+static inline unsigned int blk_rq_bytes(const struct request *rq)
+{
+ return rq->__data_len;
+}
+
+static inline int blk_rq_cur_bytes(const struct request *rq)
+{
+ if (!rq->bio)
+ return 0;
+ if (!bio_has_data(rq->bio)) /* dataless requests such as discard */
+ return rq->bio->bi_iter.bi_size;
+ return bio_iovec(rq->bio).bv_len;
+}
+
+static inline unsigned int blk_rq_sectors(const struct request *rq)
+{
+ return blk_rq_bytes(rq) >> SECTOR_SHIFT;
+}
+
+static inline unsigned int blk_rq_cur_sectors(const struct request *rq)
+{
+ return blk_rq_cur_bytes(rq) >> SECTOR_SHIFT;
+}
+
+static inline unsigned int blk_rq_stats_sectors(const struct request *rq)
+{
+ return rq->stats_sectors;
+}
+
+/*
+ * Some commands like WRITE SAME have a payload or data transfer size which
+ * is different from the size of the request. Any driver that supports such
+ * commands using the RQF_SPECIAL_PAYLOAD flag needs to use this helper to
+ * calculate the data transfer size.
+ */
+static inline unsigned int blk_rq_payload_bytes(struct request *rq)
+{
+ if (rq->rq_flags & RQF_SPECIAL_PAYLOAD)
+ return rq->special_vec.bv_len;
+ return blk_rq_bytes(rq);
+}
+
+/*
+ * Return the first full biovec in the request. The caller needs to check that
+ * there are any bvecs before calling this helper.
+ */
+static inline struct bio_vec req_bvec(struct request *rq)
+{
+ if (rq->rq_flags & RQF_SPECIAL_PAYLOAD)
+ return rq->special_vec;
+ return mp_bvec_iter_bvec(rq->bio->bi_io_vec, rq->bio->bi_iter);
+}
+
+static inline unsigned int blk_rq_count_bios(struct request *rq)
+{
+ unsigned int nr_bios = 0;
+ struct bio *bio;
+
+ __rq_for_each_bio(bio, rq)
+ nr_bios++;
+
+ return nr_bios;
+}
+
+void blk_steal_bios(struct bio_list *list, struct request *rq);
+
+/*
+ * Request completion related functions.
+ *
+ * blk_update_request() completes given number of bytes and updates
+ * the request without completing it.
+ */
+bool blk_update_request(struct request *rq, blk_status_t error,
+ unsigned int nr_bytes);
+void blk_abort_request(struct request *);
+
+/*
+ * Number of physical segments as sent to the device.
+ *
+ * Normally this is the number of discontiguous data segments sent by the
+ * submitter. But for data-less command like discard we might have no
+ * actual data segments submitted, but the driver might have to add it's
+ * own special payload. In that case we still return 1 here so that this
+ * special payload will be mapped.
+ */
+static inline unsigned short blk_rq_nr_phys_segments(struct request *rq)
+{
+ if (rq->rq_flags & RQF_SPECIAL_PAYLOAD)
+ return 1;
+ return rq->nr_phys_segments;
+}
+
+/*
+ * Number of discard segments (or ranges) the driver needs to fill in.
+ * Each discard bio merged into a request is counted as one segment.
+ */
+static inline unsigned short blk_rq_nr_discard_segments(struct request *rq)
+{
+ return max_t(unsigned short, rq->nr_phys_segments, 1);
+}
+
+int __blk_rq_map_sg(struct request_queue *q, struct request *rq,
+ struct scatterlist *sglist, struct scatterlist **last_sg);
+static inline int blk_rq_map_sg(struct request_queue *q, struct request *rq,
+ struct scatterlist *sglist)
+{
+ struct scatterlist *last_sg = NULL;
+
+ return __blk_rq_map_sg(q, rq, sglist, &last_sg);
+}
+void blk_dump_rq_flags(struct request *, char *);
+
+#ifdef CONFIG_BLK_DEV_ZONED
+static inline unsigned int blk_rq_zone_no(struct request *rq)
+{
+ return disk_zone_no(rq->q->disk, blk_rq_pos(rq));
+}
+
+static inline unsigned int blk_rq_zone_is_seq(struct request *rq)
+{
+ return disk_zone_is_seq(rq->q->disk, blk_rq_pos(rq));
+}
+
+bool blk_req_needs_zone_write_lock(struct request *rq);
+bool blk_req_zone_write_trylock(struct request *rq);
+void __blk_req_zone_write_lock(struct request *rq);
+void __blk_req_zone_write_unlock(struct request *rq);
+
+static inline void blk_req_zone_write_lock(struct request *rq)
+{
+ if (blk_req_needs_zone_write_lock(rq))
+ __blk_req_zone_write_lock(rq);
+}
+
+static inline void blk_req_zone_write_unlock(struct request *rq)
+{
+ if (rq->rq_flags & RQF_ZONE_WRITE_LOCKED)
+ __blk_req_zone_write_unlock(rq);
+}
+
+static inline bool blk_req_zone_is_write_locked(struct request *rq)
+{
+ return rq->q->disk->seq_zones_wlock &&
+ test_bit(blk_rq_zone_no(rq), rq->q->disk->seq_zones_wlock);
+}
+
+static inline bool blk_req_can_dispatch_to_zone(struct request *rq)
+{
+ if (!blk_req_needs_zone_write_lock(rq))
+ return true;
+ return !blk_req_zone_is_write_locked(rq);
+}
+#else /* CONFIG_BLK_DEV_ZONED */
+static inline bool blk_req_needs_zone_write_lock(struct request *rq)
+{
+ return false;
+}
+
+static inline void blk_req_zone_write_lock(struct request *rq)
+{
+}
+
+static inline void blk_req_zone_write_unlock(struct request *rq)
+{
+}
+static inline bool blk_req_zone_is_write_locked(struct request *rq)
+{
+ return false;
+}
+
+static inline bool blk_req_can_dispatch_to_zone(struct request *rq)
+{
+ return true;
+}
+#endif /* CONFIG_BLK_DEV_ZONED */
+
+#endif /* BLK_MQ_H */
diff --git a/include/linux/blk-pm.h b/include/linux/blk-pm.h
new file mode 100644
index 000000000000..2580e05a8ab6
--- /dev/null
+++ b/include/linux/blk-pm.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef _BLK_PM_H_
+#define _BLK_PM_H_
+
+struct device;
+struct request_queue;
+
+/*
+ * block layer runtime pm functions
+ */
+#ifdef CONFIG_PM
+extern void blk_pm_runtime_init(struct request_queue *q, struct device *dev);
+extern int blk_pre_runtime_suspend(struct request_queue *q);
+extern void blk_post_runtime_suspend(struct request_queue *q, int err);
+extern void blk_pre_runtime_resume(struct request_queue *q);
+extern void blk_post_runtime_resume(struct request_queue *q);
+extern void blk_set_runtime_active(struct request_queue *q);
+#else
+static inline void blk_pm_runtime_init(struct request_queue *q,
+ struct device *dev) {}
+#endif
+
+#endif /* _BLK_PM_H_ */
diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h
index d2eb87c84d82..740afe80f297 100644
--- a/include/linux/blk_types.h
+++ b/include/linux/blk_types.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Block data types and constants. Directly include this file only to
* break include dependency loop.
@@ -7,20 +8,93 @@
#include <linux/types.h>
#include <linux/bvec.h>
+#include <linux/device.h>
+#include <linux/ktime.h>
struct bio_set;
struct bio;
struct bio_integrity_payload;
struct page;
-struct block_device;
struct io_context;
struct cgroup_subsys_state;
typedef void (bio_end_io_t) (struct bio *);
+struct bio_crypt_ctx;
+
+/*
+ * The basic unit of block I/O is a sector. It is used in a number of contexts
+ * in Linux (blk, bio, genhd). The size of one sector is 512 = 2**9
+ * bytes. Variables of type sector_t represent an offset or size that is a
+ * multiple of 512 bytes. Hence these two constants.
+ */
+#ifndef SECTOR_SHIFT
+#define SECTOR_SHIFT 9
+#endif
+#ifndef SECTOR_SIZE
+#define SECTOR_SIZE (1 << SECTOR_SHIFT)
+#endif
+
+#define PAGE_SECTORS_SHIFT (PAGE_SHIFT - SECTOR_SHIFT)
+#define PAGE_SECTORS (1 << PAGE_SECTORS_SHIFT)
+#define SECTOR_MASK (PAGE_SECTORS - 1)
+
+struct block_device {
+ sector_t bd_start_sect;
+ sector_t bd_nr_sectors;
+ struct gendisk * bd_disk;
+ struct request_queue * bd_queue;
+ struct disk_stats __percpu *bd_stats;
+ unsigned long bd_stamp;
+ bool bd_read_only; /* read-only policy */
+ u8 bd_partno;
+ bool bd_write_holder;
+ bool bd_has_submit_bio;
+ dev_t bd_dev;
+ atomic_t bd_openers;
+ spinlock_t bd_size_lock; /* for bd_inode->i_size updates */
+ struct inode * bd_inode; /* will die */
+ struct super_block * bd_super;
+ void * bd_claiming;
+ void * bd_holder;
+ /* The counter of freeze processes */
+ int bd_fsfreeze_count;
+ int bd_holders;
+ struct kobject *bd_holder_dir;
+
+ /* Mutex for freeze */
+ struct mutex bd_fsfreeze_mutex;
+ struct super_block *bd_fsfreeze_sb;
+
+ struct partition_meta_info *bd_meta_info;
+#ifdef CONFIG_FAIL_MAKE_REQUEST
+ bool bd_make_it_fail;
+#endif
+ /*
+ * keep this out-of-line as it's both big and not needed in the fast
+ * path
+ */
+ struct device bd_device;
+} __randomize_layout;
+
+#define bdev_whole(_bdev) \
+ ((_bdev)->bd_disk->part0)
+
+#define dev_to_bdev(device) \
+ container_of((device), struct block_device, bd_device)
+
+#define bdev_kobj(_bdev) \
+ (&((_bdev)->bd_device.kobj))
/*
* Block error status values. See block/blk-core:blk_errors for the details.
+ * Alpha cannot write a byte atomically, so we need to use 32-bit value.
*/
+#if defined(CONFIG_ALPHA) && !defined(__alpha_bwx__)
+typedef u32 __bitwise blk_status_t;
+typedef u32 blk_short_t;
+#else
typedef u8 __bitwise blk_status_t;
+typedef u16 blk_short_t;
+#endif
#define BLK_STS_OK 0
#define BLK_STS_NOTSUPP ((__force blk_status_t)1)
#define BLK_STS_TIMEOUT ((__force blk_status_t)2)
@@ -36,12 +110,146 @@ typedef u8 __bitwise blk_status_t;
/* hack for device mapper, don't use elsewhere: */
#define BLK_STS_DM_REQUEUE ((__force blk_status_t)11)
+/*
+ * BLK_STS_AGAIN should only be returned if RQF_NOWAIT is set
+ * and the bio would block (cf bio_wouldblock_error())
+ */
#define BLK_STS_AGAIN ((__force blk_status_t)12)
-struct blk_issue_stat {
- u64 stat;
+/*
+ * BLK_STS_DEV_RESOURCE is returned from the driver to the block layer if
+ * device related resources are unavailable, but the driver can guarantee
+ * that the queue will be rerun in the future once resources become
+ * available again. This is typically the case for device specific
+ * resources that are consumed for IO. If the driver fails allocating these
+ * resources, we know that inflight (or pending) IO will free these
+ * resource upon completion.
+ *
+ * This is different from BLK_STS_RESOURCE in that it explicitly references
+ * a device specific resource. For resources of wider scope, allocation
+ * failure can happen without having pending IO. This means that we can't
+ * rely on request completions freeing these resources, as IO may not be in
+ * flight. Examples of that are kernel memory allocations, DMA mappings, or
+ * any other system wide resources.
+ */
+#define BLK_STS_DEV_RESOURCE ((__force blk_status_t)13)
+
+/*
+ * BLK_STS_ZONE_RESOURCE is returned from the driver to the block layer if zone
+ * related resources are unavailable, but the driver can guarantee the queue
+ * will be rerun in the future once the resources become available again.
+ *
+ * This is different from BLK_STS_DEV_RESOURCE in that it explicitly references
+ * a zone specific resource and IO to a different zone on the same device could
+ * still be served. Examples of that are zones that are write-locked, but a read
+ * to the same zone could be served.
+ */
+#define BLK_STS_ZONE_RESOURCE ((__force blk_status_t)14)
+
+/*
+ * BLK_STS_ZONE_OPEN_RESOURCE is returned from the driver in the completion
+ * path if the device returns a status indicating that too many zone resources
+ * are currently open. The same command should be successful if resubmitted
+ * after the number of open zones decreases below the device's limits, which is
+ * reported in the request_queue's max_open_zones.
+ */
+#define BLK_STS_ZONE_OPEN_RESOURCE ((__force blk_status_t)15)
+
+/*
+ * BLK_STS_ZONE_ACTIVE_RESOURCE is returned from the driver in the completion
+ * path if the device returns a status indicating that too many zone resources
+ * are currently active. The same command should be successful if resubmitted
+ * after the number of active zones decreases below the device's limits, which
+ * is reported in the request_queue's max_active_zones.
+ */
+#define BLK_STS_ZONE_ACTIVE_RESOURCE ((__force blk_status_t)16)
+
+/*
+ * BLK_STS_OFFLINE is returned from the driver when the target device is offline
+ * or is being taken offline. This could help differentiate the case where a
+ * device is intentionally being shut down from a real I/O error.
+ */
+#define BLK_STS_OFFLINE ((__force blk_status_t)17)
+
+/**
+ * blk_path_error - returns true if error may be path related
+ * @error: status the request was completed with
+ *
+ * Description:
+ * This classifies block error status into non-retryable errors and ones
+ * that may be successful if retried on a failover path.
+ *
+ * Return:
+ * %false - retrying failover path will not help
+ * %true - may succeed if retried
+ */
+static inline bool blk_path_error(blk_status_t error)
+{
+ switch (error) {
+ case BLK_STS_NOTSUPP:
+ case BLK_STS_NOSPC:
+ case BLK_STS_TARGET:
+ case BLK_STS_NEXUS:
+ case BLK_STS_MEDIUM:
+ case BLK_STS_PROTECTION:
+ return false;
+ }
+
+ /* Anything else could be a path failure, so should be retried */
+ return true;
+}
+
+/*
+ * From most significant bit:
+ * 1 bit: reserved for other usage, see below
+ * 12 bits: original size of bio
+ * 51 bits: issue time of bio
+ */
+#define BIO_ISSUE_RES_BITS 1
+#define BIO_ISSUE_SIZE_BITS 12
+#define BIO_ISSUE_RES_SHIFT (64 - BIO_ISSUE_RES_BITS)
+#define BIO_ISSUE_SIZE_SHIFT (BIO_ISSUE_RES_SHIFT - BIO_ISSUE_SIZE_BITS)
+#define BIO_ISSUE_TIME_MASK ((1ULL << BIO_ISSUE_SIZE_SHIFT) - 1)
+#define BIO_ISSUE_SIZE_MASK \
+ (((1ULL << BIO_ISSUE_SIZE_BITS) - 1) << BIO_ISSUE_SIZE_SHIFT)
+#define BIO_ISSUE_RES_MASK (~((1ULL << BIO_ISSUE_RES_SHIFT) - 1))
+
+/* Reserved bit for blk-throtl */
+#define BIO_ISSUE_THROTL_SKIP_LATENCY (1ULL << 63)
+
+struct bio_issue {
+ u64 value;
};
+static inline u64 __bio_issue_time(u64 time)
+{
+ return time & BIO_ISSUE_TIME_MASK;
+}
+
+static inline u64 bio_issue_time(struct bio_issue *issue)
+{
+ return __bio_issue_time(issue->value);
+}
+
+static inline sector_t bio_issue_size(struct bio_issue *issue)
+{
+ return ((issue->value & BIO_ISSUE_SIZE_MASK) >> BIO_ISSUE_SIZE_SHIFT);
+}
+
+static inline void bio_issue_init(struct bio_issue *issue,
+ sector_t size)
+{
+ size &= (1ULL << BIO_ISSUE_SIZE_BITS) - 1;
+ issue->value = ((issue->value & BIO_ISSUE_RES_MASK) |
+ (ktime_get_ns() & BIO_ISSUE_TIME_MASK) |
+ ((u64)size << BIO_ISSUE_SIZE_SHIFT));
+}
+
+typedef __u32 __bitwise blk_opf_t;
+
+typedef unsigned int blk_qc_t;
+#define BLK_QC_T_NONE -1U
+
/*
* main unit of I/O for the block layer and lower layers (ie drivers and
* stacking drivers)
@@ -49,46 +257,37 @@ struct blk_issue_stat {
struct bio {
struct bio *bi_next; /* request queue link */
struct block_device *bi_bdev;
- blk_status_t bi_status;
- unsigned int bi_opf; /* bottom bits req flags,
- * top bits REQ_OP. Use
- * accessors.
+ blk_opf_t bi_opf; /* bottom bits REQ_OP, top bits
+ * req_flags.
*/
- unsigned short bi_flags; /* status, etc and bvec pool number */
+ unsigned short bi_flags; /* BIO_* below */
unsigned short bi_ioprio;
- unsigned short bi_write_hint;
+ blk_status_t bi_status;
+ atomic_t __bi_remaining;
struct bvec_iter bi_iter;
- /* Number of segments in this BIO after
- * physical address coalescing is performed.
- */
- unsigned int bi_phys_segments;
-
- /*
- * To keep track of the max segment size, we account for the
- * sizes of the first and last mergeable segments in this bio.
- */
- unsigned int bi_seg_front_size;
- unsigned int bi_seg_back_size;
-
- atomic_t __bi_remaining;
-
+ blk_qc_t bi_cookie;
bio_end_io_t *bi_end_io;
-
void *bi_private;
#ifdef CONFIG_BLK_CGROUP
/*
- * Optional ioc and css associated with this bio. Put on bio
- * release. Read comment on top of bio_associate_current().
+ * Represents the association of the css and request_queue for the bio.
+ * If a bio goes direct to device, it will not have a blkg as it will
+ * not have a request_queue associated with it. The reference is put
+ * on release of the bio.
*/
- struct io_context *bi_ioc;
- struct cgroup_subsys_state *bi_css;
-#ifdef CONFIG_BLK_DEV_THROTTLING_LOW
- void *bi_cg_private;
- struct blk_issue_stat bi_issue_stat;
+ struct blkcg_gq *bi_blkg;
+ struct bio_issue bi_issue;
+#ifdef CONFIG_BLK_CGROUP_IOCOST
+ u64 bi_iocost_cost;
#endif
#endif
+
+#ifdef CONFIG_BLK_INLINE_ENCRYPTION
+ struct bio_crypt_ctx *bi_crypt_context;
+#endif
+
union {
#if defined(CONFIG_BLK_DEV_INTEGRITY)
struct bio_integrity_payload *bi_integrity; /* data integrity */
@@ -114,55 +313,42 @@ struct bio {
* double allocations for a small number of bio_vecs. This member
* MUST obviously be kept at the very end of the bio.
*/
- struct bio_vec bi_inline_vecs[0];
+ struct bio_vec bi_inline_vecs[];
};
#define BIO_RESET_BYTES offsetof(struct bio, bi_max_vecs)
+#define BIO_MAX_SECTORS (UINT_MAX >> SECTOR_SHIFT)
/*
* bio flags
*/
-#define BIO_SEG_VALID 1 /* bi_phys_segments valid */
-#define BIO_CLONED 2 /* doesn't own data */
-#define BIO_BOUNCED 3 /* bio is a bounce bio */
-#define BIO_USER_MAPPED 4 /* contains user pages */
-#define BIO_NULL_MAPPED 5 /* contains invalid user pages */
-#define BIO_QUIET 6 /* Make BIO Quiet */
-#define BIO_CHAIN 7 /* chained bio, ->bi_remaining in effect */
-#define BIO_REFFED 8 /* bio has elevated ->bi_cnt */
-#define BIO_THROTTLED 9 /* This bio has already been subjected to
+enum {
+ BIO_NO_PAGE_REF, /* don't put release vec pages */
+ BIO_CLONED, /* doesn't own data */
+ BIO_BOUNCED, /* bio is a bounce bio */
+ BIO_QUIET, /* Make BIO Quiet */
+ BIO_CHAIN, /* chained bio, ->bi_remaining in effect */
+ BIO_REFFED, /* bio has elevated ->bi_cnt */
+ BIO_BPS_THROTTLED, /* This bio has already been subjected to
* throttling rules. Don't do it again. */
-#define BIO_TRACE_COMPLETION 10 /* bio_endio() should trace the final completion
+ BIO_TRACE_COMPLETION, /* bio_endio() should trace the final completion
* of this bio. */
-/* See BVEC_POOL_OFFSET below before adding new flags */
-
-/*
- * We support 6 different bvec pools, the last one is magic in that it
- * is backed by a mempool.
- */
-#define BVEC_POOL_NR 6
-#define BVEC_POOL_MAX (BVEC_POOL_NR - 1)
+ BIO_CGROUP_ACCT, /* has been accounted to a cgroup */
+ BIO_QOS_THROTTLED, /* bio went through rq_qos throttle path */
+ BIO_QOS_MERGED, /* but went through rq_qos merge path */
+ BIO_REMAPPED,
+ BIO_ZONE_WRITE_LOCKED, /* Owns a zoned device zone write lock */
+ BIO_FLAG_LAST
+};
-/*
- * Top 3 bits of bio flags indicate the pool the bvecs came from. We add
- * 1 to the actual index so that 0 indicates that there are no bvecs to be
- * freed.
- */
-#define BVEC_POOL_BITS (3)
-#define BVEC_POOL_OFFSET (16 - BVEC_POOL_BITS)
-#define BVEC_POOL_IDX(bio) ((bio)->bi_flags >> BVEC_POOL_OFFSET)
-#if (1<< BVEC_POOL_BITS) < (BVEC_POOL_NR+1)
-# error "BVEC_POOL_BITS is too small"
-#endif
+typedef __u32 __bitwise blk_mq_req_flags_t;
-/*
- * Flags starting here get preserved by bio_reset() - this includes
- * only BVEC_POOL_IDX()
- */
-#define BIO_RESET_BITS BVEC_POOL_OFFSET
+#define REQ_OP_BITS 8
+#define REQ_OP_MASK (__force blk_opf_t)((1 << REQ_OP_BITS) - 1)
+#define REQ_FLAG_BITS 24
-/*
- * Operations and flags common to the bio and request structures.
+/**
+ * enum req_op - Operations common to the bio and request structures.
* We use 8 bits for encoding the operation, and the remaining 24 for flags.
*
* The least significant bit of the operation number indicates the data
@@ -174,38 +360,37 @@ struct bio {
* If a operation does not transfer data the least significant bit has no
* meaning.
*/
-#define REQ_OP_BITS 8
-#define REQ_OP_MASK ((1 << REQ_OP_BITS) - 1)
-#define REQ_FLAG_BITS 24
-
-enum req_opf {
+enum req_op {
/* read sectors from the device */
- REQ_OP_READ = 0,
+ REQ_OP_READ = (__force blk_opf_t)0,
/* write sectors to the device */
- REQ_OP_WRITE = 1,
+ REQ_OP_WRITE = (__force blk_opf_t)1,
/* flush the volatile write cache */
- REQ_OP_FLUSH = 2,
+ REQ_OP_FLUSH = (__force blk_opf_t)2,
/* discard sectors */
- REQ_OP_DISCARD = 3,
- /* get zone information */
- REQ_OP_ZONE_REPORT = 4,
+ REQ_OP_DISCARD = (__force blk_opf_t)3,
/* securely erase sectors */
- REQ_OP_SECURE_ERASE = 5,
- /* seset a zone write pointer */
- REQ_OP_ZONE_RESET = 6,
- /* write the same sector many times */
- REQ_OP_WRITE_SAME = 7,
+ REQ_OP_SECURE_ERASE = (__force blk_opf_t)5,
/* write the zero filled sector many times */
- REQ_OP_WRITE_ZEROES = 9,
+ REQ_OP_WRITE_ZEROES = (__force blk_opf_t)9,
+ /* Open a zone */
+ REQ_OP_ZONE_OPEN = (__force blk_opf_t)10,
+ /* Close a zone */
+ REQ_OP_ZONE_CLOSE = (__force blk_opf_t)11,
+ /* Transition a zone to full */
+ REQ_OP_ZONE_FINISH = (__force blk_opf_t)12,
+ /* write data at the current zone write pointer */
+ REQ_OP_ZONE_APPEND = (__force blk_opf_t)13,
+ /* reset a zone write pointer */
+ REQ_OP_ZONE_RESET = (__force blk_opf_t)15,
+ /* reset all the zone present on the device */
+ REQ_OP_ZONE_RESET_ALL = (__force blk_opf_t)17,
- /* SCSI passthrough using struct scsi_request */
- REQ_OP_SCSI_IN = 32,
- REQ_OP_SCSI_OUT = 33,
/* Driver private requests */
- REQ_OP_DRV_IN = 34,
- REQ_OP_DRV_OUT = 35,
+ REQ_OP_DRV_IN = (__force blk_opf_t)34,
+ REQ_OP_DRV_OUT = (__force blk_opf_t)35,
- REQ_OP_LAST,
+ REQ_OP_LAST = (__force blk_opf_t)36,
};
enum req_flag_bits {
@@ -223,30 +408,46 @@ enum req_flag_bits {
__REQ_PREFLUSH, /* request for cache flush */
__REQ_RAHEAD, /* read ahead, can fail anytime */
__REQ_BACKGROUND, /* background IO */
+ __REQ_NOWAIT, /* Don't wait if request will block */
+ __REQ_POLLED, /* caller polls for completion using bio_poll */
+ __REQ_ALLOC_CACHE, /* allocate IO from cache if available */
+ __REQ_SWAP, /* swap I/O */
+ __REQ_DRV, /* for driver use */
+ __REQ_FS_PRIVATE, /* for file system (submitter) use */
- /* command specific flags for REQ_OP_WRITE_ZEROES: */
+ /*
+ * Command specific flags, keep last:
+ */
+ /* for REQ_OP_WRITE_ZEROES: */
__REQ_NOUNMAP, /* do not free blocks when zeroing */
- __REQ_NOWAIT, /* Don't wait if request will block */
__REQ_NR_BITS, /* stops here */
};
-#define REQ_FAILFAST_DEV (1ULL << __REQ_FAILFAST_DEV)
-#define REQ_FAILFAST_TRANSPORT (1ULL << __REQ_FAILFAST_TRANSPORT)
-#define REQ_FAILFAST_DRIVER (1ULL << __REQ_FAILFAST_DRIVER)
-#define REQ_SYNC (1ULL << __REQ_SYNC)
-#define REQ_META (1ULL << __REQ_META)
-#define REQ_PRIO (1ULL << __REQ_PRIO)
-#define REQ_NOMERGE (1ULL << __REQ_NOMERGE)
-#define REQ_IDLE (1ULL << __REQ_IDLE)
-#define REQ_INTEGRITY (1ULL << __REQ_INTEGRITY)
-#define REQ_FUA (1ULL << __REQ_FUA)
-#define REQ_PREFLUSH (1ULL << __REQ_PREFLUSH)
-#define REQ_RAHEAD (1ULL << __REQ_RAHEAD)
-#define REQ_BACKGROUND (1ULL << __REQ_BACKGROUND)
-
-#define REQ_NOUNMAP (1ULL << __REQ_NOUNMAP)
-#define REQ_NOWAIT (1ULL << __REQ_NOWAIT)
+#define REQ_FAILFAST_DEV \
+ (__force blk_opf_t)(1ULL << __REQ_FAILFAST_DEV)
+#define REQ_FAILFAST_TRANSPORT \
+ (__force blk_opf_t)(1ULL << __REQ_FAILFAST_TRANSPORT)
+#define REQ_FAILFAST_DRIVER \
+ (__force blk_opf_t)(1ULL << __REQ_FAILFAST_DRIVER)
+#define REQ_SYNC (__force blk_opf_t)(1ULL << __REQ_SYNC)
+#define REQ_META (__force blk_opf_t)(1ULL << __REQ_META)
+#define REQ_PRIO (__force blk_opf_t)(1ULL << __REQ_PRIO)
+#define REQ_NOMERGE (__force blk_opf_t)(1ULL << __REQ_NOMERGE)
+#define REQ_IDLE (__force blk_opf_t)(1ULL << __REQ_IDLE)
+#define REQ_INTEGRITY (__force blk_opf_t)(1ULL << __REQ_INTEGRITY)
+#define REQ_FUA (__force blk_opf_t)(1ULL << __REQ_FUA)
+#define REQ_PREFLUSH (__force blk_opf_t)(1ULL << __REQ_PREFLUSH)
+#define REQ_RAHEAD (__force blk_opf_t)(1ULL << __REQ_RAHEAD)
+#define REQ_BACKGROUND (__force blk_opf_t)(1ULL << __REQ_BACKGROUND)
+#define REQ_NOWAIT (__force blk_opf_t)(1ULL << __REQ_NOWAIT)
+#define REQ_POLLED (__force blk_opf_t)(1ULL << __REQ_POLLED)
+#define REQ_ALLOC_CACHE (__force blk_opf_t)(1ULL << __REQ_ALLOC_CACHE)
+#define REQ_SWAP (__force blk_opf_t)(1ULL << __REQ_SWAP)
+#define REQ_DRV (__force blk_opf_t)(1ULL << __REQ_DRV)
+#define REQ_FS_PRIVATE (__force blk_opf_t)(1ULL << __REQ_FS_PRIVATE)
+
+#define REQ_NOUNMAP (__force blk_opf_t)(1ULL << __REQ_NOUNMAP)
#define REQ_FAILFAST_MASK \
(REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | REQ_FAILFAST_DRIVER)
@@ -254,28 +455,30 @@ enum req_flag_bits {
#define REQ_NOMERGE_FLAGS \
(REQ_NOMERGE | REQ_PREFLUSH | REQ_FUA)
-#define bio_op(bio) \
- ((bio)->bi_opf & REQ_OP_MASK)
-#define req_op(req) \
- ((req)->cmd_flags & REQ_OP_MASK)
+enum stat_group {
+ STAT_READ,
+ STAT_WRITE,
+ STAT_DISCARD,
+ STAT_FLUSH,
+
+ NR_STAT_GROUPS
+};
-/* obsolete, don't use in new code */
-static inline void bio_set_op_attrs(struct bio *bio, unsigned op,
- unsigned op_flags)
+static inline enum req_op bio_op(const struct bio *bio)
{
- bio->bi_opf = op | op_flags;
+ return bio->bi_opf & REQ_OP_MASK;
}
-static inline bool op_is_write(unsigned int op)
+static inline bool op_is_write(blk_opf_t op)
{
- return (op & 1);
+ return !!(op & (__force blk_opf_t)1);
}
/*
* Check if the bio or request is one that needs special treatment in the
* flush state machine.
*/
-static inline bool op_is_flush(unsigned int op)
+static inline bool op_is_flush(blk_opf_t op)
{
return op & (REQ_FUA | REQ_PREFLUSH);
}
@@ -285,54 +488,48 @@ static inline bool op_is_flush(unsigned int op)
* PREFLUSH flag. Other operations may be marked as synchronous using the
* REQ_SYNC flag.
*/
-static inline bool op_is_sync(unsigned int op)
+static inline bool op_is_sync(blk_opf_t op)
{
return (op & REQ_OP_MASK) == REQ_OP_READ ||
(op & (REQ_SYNC | REQ_FUA | REQ_PREFLUSH));
}
-typedef unsigned int blk_qc_t;
-#define BLK_QC_T_NONE -1U
-#define BLK_QC_T_SHIFT 16
-#define BLK_QC_T_INTERNAL (1U << 31)
-
-static inline bool blk_qc_t_valid(blk_qc_t cookie)
+static inline bool op_is_discard(blk_opf_t op)
{
- return cookie != BLK_QC_T_NONE;
+ return (op & REQ_OP_MASK) == REQ_OP_DISCARD;
}
-static inline blk_qc_t blk_tag_to_qc_t(unsigned int tag, unsigned int queue_num,
- bool internal)
-{
- blk_qc_t ret = tag | (queue_num << BLK_QC_T_SHIFT);
-
- if (internal)
- ret |= BLK_QC_T_INTERNAL;
-
- return ret;
-}
-
-static inline unsigned int blk_qc_t_to_queue_num(blk_qc_t cookie)
-{
- return (cookie & ~BLK_QC_T_INTERNAL) >> BLK_QC_T_SHIFT;
-}
-
-static inline unsigned int blk_qc_t_to_tag(blk_qc_t cookie)
+/*
+ * Check if a bio or request operation is a zone management operation, with
+ * the exception of REQ_OP_ZONE_RESET_ALL which is treated as a special case
+ * due to its different handling in the block layer and device response in
+ * case of command failure.
+ */
+static inline bool op_is_zone_mgmt(enum req_op op)
{
- return cookie & ((1u << BLK_QC_T_SHIFT) - 1);
+ switch (op & REQ_OP_MASK) {
+ case REQ_OP_ZONE_RESET:
+ case REQ_OP_ZONE_OPEN:
+ case REQ_OP_ZONE_CLOSE:
+ case REQ_OP_ZONE_FINISH:
+ return true;
+ default:
+ return false;
+ }
}
-static inline bool blk_qc_t_is_internal(blk_qc_t cookie)
+static inline int op_stat_group(enum req_op op)
{
- return (cookie & BLK_QC_T_INTERNAL) != 0;
+ if (op_is_discard(op))
+ return STAT_DISCARD;
+ return op_is_write(op);
}
struct blk_rq_stat {
- s64 mean;
+ u64 mean;
u64 min;
u64 max;
- s32 nr_samples;
- s32 nr_batch;
+ u32 nr_samples;
u64 batch;
};
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 25f6a0cb27d3..b441e633f4dd 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -1,321 +1,277 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Portions Copyright (C) 1992 Drew Eckhardt
+ */
#ifndef _LINUX_BLKDEV_H
#define _LINUX_BLKDEV_H
-#include <linux/sched.h>
-#include <linux/sched/clock.h>
-
-#ifdef CONFIG_BLOCK
-
-#include <linux/major.h>
-#include <linux/genhd.h>
+#include <linux/types.h>
+#include <linux/blk_types.h>
+#include <linux/device.h>
#include <linux/list.h>
#include <linux/llist.h>
+#include <linux/minmax.h>
#include <linux/timer.h>
#include <linux/workqueue.h>
-#include <linux/pagemap.h>
-#include <linux/backing-dev-defs.h>
#include <linux/wait.h>
-#include <linux/mempool.h>
-#include <linux/pfn.h>
#include <linux/bio.h>
-#include <linux/stringify.h>
#include <linux/gfp.h>
-#include <linux/bsg.h>
-#include <linux/smp.h>
+#include <linux/kdev_t.h>
#include <linux/rcupdate.h>
#include <linux/percpu-refcount.h>
-#include <linux/scatterlist.h>
#include <linux/blkzoned.h>
+#include <linux/sched.h>
+#include <linux/sbitmap.h>
+#include <linux/uuid.h>
+#include <linux/xarray.h>
struct module;
-struct scsi_ioctl_command;
-
struct request_queue;
struct elevator_queue;
struct blk_trace;
struct request;
struct sg_io_hdr;
-struct bsg_job;
struct blkcg_gq;
struct blk_flush_queue;
+struct kiocb;
struct pr_ops;
-struct rq_wb;
+struct rq_qos;
struct blk_queue_stats;
struct blk_stat_callback;
+struct blk_crypto_profile;
-#define BLKDEV_MIN_RQ 4
-#define BLKDEV_MAX_RQ 128 /* Default maximum */
-
-/* Must be consisitent with blk_mq_poll_stats_bkt() */
-#define BLK_MQ_POLL_STATS_BKTS 16
+extern const struct device_type disk_type;
+extern struct device_type part_type;
+extern struct class block_class;
/*
* Maximum number of blkcg policies allowed to be registered concurrently.
* Defined here to simplify include dependency.
*/
-#define BLKCG_MAX_POLS 3
+#define BLKCG_MAX_POLS 6
-typedef void (rq_end_io_fn)(struct request *, blk_status_t);
+#define DISK_MAX_PARTS 256
+#define DISK_NAME_LEN 32
-#define BLK_RL_SYNCFULL (1U << 0)
-#define BLK_RL_ASYNCFULL (1U << 1)
+#define PARTITION_META_INFO_VOLNAMELTH 64
+/*
+ * Enough for the string representation of any kind of UUID plus NULL.
+ * EFI UUID is 36 characters. MSDOS UUID is 11 characters.
+ */
+#define PARTITION_META_INFO_UUIDLTH (UUID_STRING_LEN + 1)
-struct request_list {
- struct request_queue *q; /* the queue this rl belongs to */
-#ifdef CONFIG_BLK_CGROUP
- struct blkcg_gq *blkg; /* blkg this request pool belongs to */
-#endif
- /*
- * count[], starved[], and wait[] are indexed by
- * BLK_RW_SYNC/BLK_RW_ASYNC
- */
- int count[2];
- int starved[2];
- mempool_t *rq_pool;
- wait_queue_head_t wait[2];
- unsigned int flags;
+struct partition_meta_info {
+ char uuid[PARTITION_META_INFO_UUIDLTH];
+ u8 volname[PARTITION_META_INFO_VOLNAMELTH];
};
-/*
- * request flags */
-typedef __u32 __bitwise req_flags_t;
-
-/* elevator knows about this request */
-#define RQF_SORTED ((__force req_flags_t)(1 << 0))
-/* drive already may have started this one */
-#define RQF_STARTED ((__force req_flags_t)(1 << 1))
-/* uses tagged queueing */
-#define RQF_QUEUED ((__force req_flags_t)(1 << 2))
-/* may not be passed by ioscheduler */
-#define RQF_SOFTBARRIER ((__force req_flags_t)(1 << 3))
-/* request for flush sequence */
-#define RQF_FLUSH_SEQ ((__force req_flags_t)(1 << 4))
-/* merge of different types, fail separately */
-#define RQF_MIXED_MERGE ((__force req_flags_t)(1 << 5))
-/* track inflight for MQ */
-#define RQF_MQ_INFLIGHT ((__force req_flags_t)(1 << 6))
-/* don't call prep for this one */
-#define RQF_DONTPREP ((__force req_flags_t)(1 << 7))
-/* set for "ide_preempt" requests and also for requests for which the SCSI
- "quiesce" state must be ignored. */
-#define RQF_PREEMPT ((__force req_flags_t)(1 << 8))
-/* contains copies of user pages */
-#define RQF_COPY_USER ((__force req_flags_t)(1 << 9))
-/* vaguely specified driver internal error. Ignored by the block layer */
-#define RQF_FAILED ((__force req_flags_t)(1 << 10))
-/* don't warn about errors */
-#define RQF_QUIET ((__force req_flags_t)(1 << 11))
-/* elevator private data attached */
-#define RQF_ELVPRIV ((__force req_flags_t)(1 << 12))
-/* account I/O stat */
-#define RQF_IO_STAT ((__force req_flags_t)(1 << 13))
-/* request came from our alloc pool */
-#define RQF_ALLOCED ((__force req_flags_t)(1 << 14))
-/* runtime pm request */
-#define RQF_PM ((__force req_flags_t)(1 << 15))
-/* on IO scheduler merge hash */
-#define RQF_HASHED ((__force req_flags_t)(1 << 16))
-/* IO stats tracking on */
-#define RQF_STATS ((__force req_flags_t)(1 << 17))
-/* Look at ->special_vec for the actual data payload instead of the
- bio chain. */
-#define RQF_SPECIAL_PAYLOAD ((__force req_flags_t)(1 << 18))
-
-/* flags that prevent us from merging requests: */
-#define RQF_NOMERGE_FLAGS \
- (RQF_STARTED | RQF_SOFTBARRIER | RQF_FLUSH_SEQ | RQF_SPECIAL_PAYLOAD)
-
-/*
- * Try to put the fields that are referenced together in the same cacheline.
+/**
+ * DOC: genhd capability flags
+ *
+ * ``GENHD_FL_REMOVABLE``: indicates that the block device gives access to
+ * removable media. When set, the device remains present even when media is not
+ * inserted. Shall not be set for devices which are removed entirely when the
+ * media is removed.
+ *
+ * ``GENHD_FL_HIDDEN``: the block device is hidden; it doesn't produce events,
+ * doesn't appear in sysfs, and can't be opened from userspace or using
+ * blkdev_get*. Used for the underlying components of multipath devices.
+ *
+ * ``GENHD_FL_NO_PART``: partition support is disabled. The kernel will not
+ * scan for partitions from add_disk, and users can't add partitions manually.
*
- * If you modify this structure, make sure to update blk_rq_init() and
- * especially blk_mq_rq_ctx_init() to take care of the added fields.
*/
-struct request {
- struct list_head queuelist;
- union {
- struct call_single_data csd;
- u64 fifo_time;
- };
-
- struct request_queue *q;
- struct blk_mq_ctx *mq_ctx;
-
- int cpu;
- unsigned int cmd_flags; /* op and common flags */
- req_flags_t rq_flags;
+enum {
+ GENHD_FL_REMOVABLE = 1 << 0,
+ GENHD_FL_HIDDEN = 1 << 1,
+ GENHD_FL_NO_PART = 1 << 2,
+};
- int internal_tag;
+enum {
+ DISK_EVENT_MEDIA_CHANGE = 1 << 0, /* media changed */
+ DISK_EVENT_EJECT_REQUEST = 1 << 1, /* eject requested */
+};
- unsigned long atomic_flags;
+enum {
+ /* Poll even if events_poll_msecs is unset */
+ DISK_EVENT_FLAG_POLL = 1 << 0,
+ /* Forward events to udev */
+ DISK_EVENT_FLAG_UEVENT = 1 << 1,
+ /* Block event polling when open for exclusive write */
+ DISK_EVENT_FLAG_BLOCK_ON_EXCL_WRITE = 1 << 2,
+};
- /* the following two fields are internal, NEVER access directly */
- unsigned int __data_len; /* total data len */
- int tag;
- sector_t __sector; /* sector cursor */
+struct disk_events;
+struct badblocks;
- struct bio *bio;
- struct bio *biotail;
+struct blk_integrity {
+ const struct blk_integrity_profile *profile;
+ unsigned char flags;
+ unsigned char tuple_size;
+ unsigned char interval_exp;
+ unsigned char tag_size;
+};
+struct gendisk {
/*
- * The hash is used inside the scheduler, and killed once the
- * request reaches the dispatch list. The ipi_list is only used
- * to queue the request for softirq completion, which is long
- * after the request has been unhashed (and even removed from
- * the dispatch list).
+ * major/first_minor/minors should not be set by any new driver, the
+ * block core will take care of allocating them automatically.
*/
- union {
- struct hlist_node hash; /* merge hash */
- struct list_head ipi_list;
- };
+ int major;
+ int first_minor;
+ int minors;
+
+ char disk_name[DISK_NAME_LEN]; /* name of major driver */
+
+ unsigned short events; /* supported events */
+ unsigned short event_flags; /* flags related to event processing */
+
+ struct xarray part_tbl;
+ struct block_device *part0;
+
+ const struct block_device_operations *fops;
+ struct request_queue *queue;
+ void *private_data;
+
+ struct bio_set bio_split;
+
+ int flags;
+ unsigned long state;
+#define GD_NEED_PART_SCAN 0
+#define GD_READ_ONLY 1
+#define GD_DEAD 2
+#define GD_NATIVE_CAPACITY 3
+#define GD_ADDED 4
+#define GD_SUPPRESS_PART_SCAN 5
+#define GD_OWNS_QUEUE 6
+
+ struct mutex open_mutex; /* open/close mutex */
+ unsigned open_partitions; /* number of open partitions */
+
+ struct backing_dev_info *bdi;
+ struct kobject queue_kobj; /* the queue/ directory */
+ struct kobject *slave_dir;
+#ifdef CONFIG_BLOCK_HOLDER_DEPRECATED
+ struct list_head slave_bdevs;
+#endif
+ struct timer_rand_state *random;
+ atomic_t sync_io; /* RAID */
+ struct disk_events *ev;
+#ifdef CONFIG_BLK_DEV_ZONED
/*
- * The rb_node is only used inside the io scheduler, requests
- * are pruned when moved to the dispatch queue. So let the
- * completion_data share space with the rb_node.
+ * Zoned block device information for request dispatch control.
+ * nr_zones is the total number of zones of the device. This is always
+ * 0 for regular block devices. conv_zones_bitmap is a bitmap of nr_zones
+ * bits which indicates if a zone is conventional (bit set) or
+ * sequential (bit clear). seq_zones_wlock is a bitmap of nr_zones
+ * bits which indicates if a zone is write locked, that is, if a write
+ * request targeting the zone was dispatched.
+ *
+ * Reads of this information must be protected with blk_queue_enter() /
+ * blk_queue_exit(). Modifying this information is only allowed while
+ * no requests are being processed. See also blk_mq_freeze_queue() and
+ * blk_mq_unfreeze_queue().
*/
- union {
- struct rb_node rb_node; /* sort/lookup */
- struct bio_vec special_vec;
- void *completion_data;
- int error_count; /* for legacy drivers, don't use */
- };
+ unsigned int nr_zones;
+ unsigned int max_open_zones;
+ unsigned int max_active_zones;
+ unsigned long *conv_zones_bitmap;
+ unsigned long *seq_zones_wlock;
+#endif /* CONFIG_BLK_DEV_ZONED */
- /*
- * Three pointers are available for the IO schedulers, if they need
- * more they have to dynamically allocate it. Flush requests are
- * never put on the IO scheduler. So let the flush fields share
- * space with the elevator data.
- */
- union {
- struct {
- struct io_cq *icq;
- void *priv[2];
- } elv;
-
- struct {
- unsigned int seq;
- struct list_head list;
- rq_end_io_fn *saved_end_io;
- } flush;
- };
-
- struct gendisk *rq_disk;
- struct hd_struct *part;
- unsigned long start_time;
- struct blk_issue_stat issue_stat;
-#ifdef CONFIG_BLK_CGROUP
- struct request_list *rl; /* rl this rq is alloced from */
- unsigned long long start_time_ns;
- unsigned long long io_start_time_ns; /* when passed to hardware */
+#if IS_ENABLED(CONFIG_CDROM)
+ struct cdrom_device_info *cdi;
#endif
- /* Number of scatter-gather DMA addr+len pairs after
- * physical address coalescing is performed.
- */
- unsigned short nr_phys_segments;
-#if defined(CONFIG_BLK_DEV_INTEGRITY)
- unsigned short nr_integrity_segments;
-#endif
-
- unsigned short ioprio;
-
- unsigned int timeout;
-
- void *special; /* opaque pointer available for LLD use */
-
- unsigned int extra_len; /* length of alignment and padding */
-
- unsigned short write_hint;
-
- unsigned long deadline;
- struct list_head timeout_list;
+ int node_id;
+ struct badblocks *bb;
+ struct lockdep_map lockdep_map;
+ u64 diskseq;
/*
- * completion callback.
+ * Independent sector access ranges. This is always NULL for
+ * devices that do not have multiple independent access ranges.
*/
- rq_end_io_fn *end_io;
- void *end_io_data;
-
- /* for bidi */
- struct request *next_rq;
+ struct blk_independent_access_ranges *ia_ranges;
};
-static inline bool blk_rq_is_scsi(struct request *rq)
+static inline bool disk_live(struct gendisk *disk)
{
- return req_op(rq) == REQ_OP_SCSI_IN || req_op(rq) == REQ_OP_SCSI_OUT;
+ return !inode_unhashed(disk->part0->bd_inode);
}
-static inline bool blk_rq_is_private(struct request *rq)
+/**
+ * disk_openers - returns how many openers are there for a disk
+ * @disk: disk to check
+ *
+ * This returns the number of openers for a disk. Note that this value is only
+ * stable if disk->open_mutex is held.
+ *
+ * Note: Due to a quirk in the block layer open code, each open partition is
+ * only counted once even if there are multiple openers.
+ */
+static inline unsigned int disk_openers(struct gendisk *disk)
{
- return req_op(rq) == REQ_OP_DRV_IN || req_op(rq) == REQ_OP_DRV_OUT;
+ return atomic_read(&disk->part0->bd_openers);
}
-static inline bool blk_rq_is_passthrough(struct request *rq)
-{
- return blk_rq_is_scsi(rq) || blk_rq_is_private(rq);
-}
+/*
+ * The gendisk is refcounted by the part0 block_device, and the bd_device
+ * therein is also used for device model presentation in sysfs.
+ */
+#define dev_to_disk(device) \
+ (dev_to_bdev(device)->bd_disk)
+#define disk_to_dev(disk) \
+ (&((disk)->part0->bd_device))
+
+#if IS_REACHABLE(CONFIG_CDROM)
+#define disk_to_cdi(disk) ((disk)->cdi)
+#else
+#define disk_to_cdi(disk) NULL
+#endif
-static inline unsigned short req_get_ioprio(struct request *req)
+static inline dev_t disk_devt(struct gendisk *disk)
{
- return req->ioprio;
+ return MKDEV(disk->major, disk->first_minor);
}
-#include <linux/elevator.h>
-
-struct blk_queue_ctx;
-
-typedef void (request_fn_proc) (struct request_queue *q);
-typedef blk_qc_t (make_request_fn) (struct request_queue *q, struct bio *bio);
-typedef int (prep_rq_fn) (struct request_queue *, struct request *);
-typedef void (unprep_rq_fn) (struct request_queue *, struct request *);
-
-struct bio_vec;
-typedef void (softirq_done_fn)(struct request *);
-typedef int (dma_drain_needed_fn)(struct request *);
-typedef int (lld_busy_fn) (struct request_queue *q);
-typedef int (bsg_job_fn) (struct bsg_job *);
-typedef int (init_rq_fn)(struct request_queue *, struct request *, gfp_t);
-typedef void (exit_rq_fn)(struct request_queue *, struct request *);
-
-enum blk_eh_timer_return {
- BLK_EH_NOT_HANDLED,
- BLK_EH_HANDLED,
- BLK_EH_RESET_TIMER,
-};
-
-typedef enum blk_eh_timer_return (rq_timed_out_fn)(struct request *);
-
-enum blk_queue_state {
- Queue_down,
- Queue_up,
-};
+static inline int blk_validate_block_size(unsigned long bsize)
+{
+ if (bsize < 512 || bsize > PAGE_SIZE || !is_power_of_2(bsize))
+ return -EINVAL;
-struct blk_queue_tag {
- struct request **tag_index; /* map of busy tags */
- unsigned long *tag_map; /* bit map of free/busy tags */
- int max_depth; /* what we will send to device */
- int real_max_depth; /* what the array can hold */
- atomic_t refcnt; /* map can be shared */
- int alloc_policy; /* tag allocation policy */
- int next_tag; /* next tag */
-};
-#define BLK_TAG_ALLOC_FIFO 0 /* allocate starting from 0 */
-#define BLK_TAG_ALLOC_RR 1 /* allocate starting from last allocated tag */
+ return 0;
+}
-#define BLK_SCSI_MAX_CMDS (256)
-#define BLK_SCSI_CMD_PER_LONG (BLK_SCSI_MAX_CMDS / (sizeof(long) * 8))
+static inline bool blk_op_is_passthrough(blk_opf_t op)
+{
+ op &= REQ_OP_MASK;
+ return op == REQ_OP_DRV_IN || op == REQ_OP_DRV_OUT;
+}
/*
* Zoned block device models (zoned limit).
+ *
+ * Note: This needs to be ordered from the least to the most severe
+ * restrictions for the inheritance in blk_stack_limits() to work.
*/
enum blk_zoned_model {
- BLK_ZONED_NONE, /* Regular block device */
- BLK_ZONED_HA, /* Host-aware zoned block device */
- BLK_ZONED_HM, /* Host-managed zoned block device */
+ BLK_ZONED_NONE = 0, /* Regular block device */
+ BLK_ZONED_HA, /* Host-aware zoned block device */
+ BLK_ZONED_HM, /* Host-managed zoned block device */
+};
+
+/*
+ * BLK_BOUNCE_NONE: never bounce (default)
+ * BLK_BOUNCE_HIGH: bounce all highmem pages
+ */
+enum blk_bounce {
+ BLK_BOUNCE_NONE,
+ BLK_BOUNCE_HIGH,
};
struct queue_limits {
- unsigned long bounce_pfn;
+ enum blk_bounce bounce;
unsigned long seg_boundary_mask;
unsigned long virt_boundary_mask;
@@ -323,50 +279,68 @@ struct queue_limits {
unsigned int max_dev_sectors;
unsigned int chunk_sectors;
unsigned int max_sectors;
+ unsigned int max_user_sectors;
unsigned int max_segment_size;
unsigned int physical_block_size;
+ unsigned int logical_block_size;
unsigned int alignment_offset;
unsigned int io_min;
unsigned int io_opt;
unsigned int max_discard_sectors;
unsigned int max_hw_discard_sectors;
- unsigned int max_write_same_sectors;
+ unsigned int max_secure_erase_sectors;
unsigned int max_write_zeroes_sectors;
+ unsigned int max_zone_append_sectors;
unsigned int discard_granularity;
unsigned int discard_alignment;
+ unsigned int zone_write_granularity;
- unsigned short logical_block_size;
unsigned short max_segments;
unsigned short max_integrity_segments;
unsigned short max_discard_segments;
unsigned char misaligned;
unsigned char discard_misaligned;
- unsigned char cluster;
unsigned char raid_partial_stripes_expensive;
enum blk_zoned_model zoned;
+
+ /*
+ * Drivers that set dma_alignment to less than 511 must be prepared to
+ * handle individual bvec's that are not a multiple of a SECTOR_SIZE
+ * due to possible offsets.
+ */
+ unsigned int dma_alignment;
};
-#ifdef CONFIG_BLK_DEV_ZONED
+typedef int (*report_zones_cb)(struct blk_zone *zone, unsigned int idx,
+ void *data);
-struct blk_zone_report_hdr {
- unsigned int nr_zones;
- u8 padding[60];
-};
+void disk_set_zoned(struct gendisk *disk, enum blk_zoned_model model);
+
+#ifdef CONFIG_BLK_DEV_ZONED
-extern int blkdev_report_zones(struct block_device *bdev,
- sector_t sector, struct blk_zone *zones,
- unsigned int *nr_zones, gfp_t gfp_mask);
-extern int blkdev_reset_zones(struct block_device *bdev, sector_t sectors,
- sector_t nr_sectors, gfp_t gfp_mask);
+#define BLK_ALL_ZONES ((unsigned int)-1)
+int blkdev_report_zones(struct block_device *bdev, sector_t sector,
+ unsigned int nr_zones, report_zones_cb cb, void *data);
+unsigned int bdev_nr_zones(struct block_device *bdev);
+extern int blkdev_zone_mgmt(struct block_device *bdev, enum req_op op,
+ sector_t sectors, sector_t nr_sectors,
+ gfp_t gfp_mask);
+int blk_revalidate_disk_zones(struct gendisk *disk,
+ void (*update_driver_data)(struct gendisk *disk));
extern int blkdev_report_zones_ioctl(struct block_device *bdev, fmode_t mode,
unsigned int cmd, unsigned long arg);
-extern int blkdev_reset_zones_ioctl(struct block_device *bdev, fmode_t mode,
- unsigned int cmd, unsigned long arg);
+extern int blkdev_zone_mgmt_ioctl(struct block_device *bdev, fmode_t mode,
+ unsigned int cmd, unsigned long arg);
#else /* CONFIG_BLK_DEV_ZONED */
+static inline unsigned int bdev_nr_zones(struct block_device *bdev)
+{
+ return 0;
+}
+
static inline int blkdev_report_zones_ioctl(struct block_device *bdev,
fmode_t mode, unsigned int cmd,
unsigned long arg)
@@ -374,81 +348,63 @@ static inline int blkdev_report_zones_ioctl(struct block_device *bdev,
return -ENOTTY;
}
-static inline int blkdev_reset_zones_ioctl(struct block_device *bdev,
- fmode_t mode, unsigned int cmd,
- unsigned long arg)
+static inline int blkdev_zone_mgmt_ioctl(struct block_device *bdev,
+ fmode_t mode, unsigned int cmd,
+ unsigned long arg)
{
return -ENOTTY;
}
#endif /* CONFIG_BLK_DEV_ZONED */
+/*
+ * Independent access ranges: struct blk_independent_access_range describes
+ * a range of contiguous sectors that can be accessed using device command
+ * execution resources that are independent from the resources used for
+ * other access ranges. This is typically found with single-LUN multi-actuator
+ * HDDs where each access range is served by a different set of heads.
+ * The set of independent ranges supported by the device is defined using
+ * struct blk_independent_access_ranges. The independent ranges must not overlap
+ * and must include all sectors within the disk capacity (no sector holes
+ * allowed).
+ * For a device with multiple ranges, requests targeting sectors in different
+ * ranges can be executed in parallel. A request can straddle an access range
+ * boundary.
+ */
+struct blk_independent_access_range {
+ struct kobject kobj;
+ sector_t sector;
+ sector_t nr_sectors;
+};
+
+struct blk_independent_access_ranges {
+ struct kobject kobj;
+ bool sysfs_registered;
+ unsigned int nr_ia_ranges;
+ struct blk_independent_access_range ia_range[];
+};
+
struct request_queue {
- /*
- * Together with queue_head for cacheline sharing
- */
- struct list_head queue_head;
struct request *last_merge;
struct elevator_queue *elevator;
- int nr_rqs[2]; /* # allocated [a]sync rqs */
- int nr_rqs_elvpriv; /* # allocated rqs w/ elvpriv */
- atomic_t shared_hctx_restart;
+ struct percpu_ref q_usage_counter;
struct blk_queue_stats *stats;
- struct rq_wb *rq_wb;
-
- /*
- * If blkcg is not used, @q->root_rl serves all requests. If blkcg
- * is used, root blkg allocates from @q->root_rl and all other
- * blkgs from their own blkg->rl. Which one to use should be
- * determined using bio_request_list().
- */
- struct request_list root_rl;
-
- request_fn_proc *request_fn;
- make_request_fn *make_request_fn;
- prep_rq_fn *prep_rq_fn;
- unprep_rq_fn *unprep_rq_fn;
- softirq_done_fn *softirq_done_fn;
- rq_timed_out_fn *rq_timed_out_fn;
- dma_drain_needed_fn *dma_drain_needed;
- lld_busy_fn *lld_busy_fn;
- /* Called just after a request is allocated */
- init_rq_fn *init_rq_fn;
- /* Called just before a request is freed */
- exit_rq_fn *exit_rq_fn;
- /* Called from inside blk_get_request() */
- void (*initialize_rq_fn)(struct request *rq);
+ struct rq_qos *rq_qos;
const struct blk_mq_ops *mq_ops;
- unsigned int *mq_map;
-
/* sw queues */
struct blk_mq_ctx __percpu *queue_ctx;
- unsigned int nr_queues;
unsigned int queue_depth;
/* hw dispatch queues */
- struct blk_mq_hw_ctx **queue_hw_ctx;
+ struct xarray hctx_table;
unsigned int nr_hw_queues;
/*
- * Dispatch queue sorting
- */
- sector_t end_sector;
- struct request *boundary_rq;
-
- /*
- * Delayed queue handling
- */
- struct delayed_work delay_work;
-
- struct backing_dev_info *backing_dev_info;
-
- /*
* The queue owner gets to use this for whatever they like.
* ll_rw_blk doesn't touch it.
*/
@@ -458,6 +414,11 @@ struct request_queue {
* various queue flags, see QUEUE_* below
*/
unsigned long queue_flags;
+ /*
+ * Number of contexts that have called blk_set_pm_only(). If this
+ * counter is above zero then only RQF_PM requests are processed.
+ */
+ atomic_t pm_only;
/*
* ida allocated id for this queue. Used to index queues from
@@ -465,28 +426,16 @@ struct request_queue {
*/
int id;
- /*
- * queue needs bounce pages for pages above this limit
- */
- gfp_t bounce_gfp;
+ spinlock_t queue_lock;
- /*
- * protects queue structures from reentrancy. ->__queue_lock should
- * _never_ be used directly, it is queue private. always use
- * ->queue_lock.
- */
- spinlock_t __queue_lock;
- spinlock_t *queue_lock;
+ struct gendisk *disk;
- /*
- * queue kobject
- */
- struct kobject kobj;
+ refcount_t refs;
/*
* mq queue kobject
*/
- struct kobject mq_kobj;
+ struct kobject *mq_kobj;
#ifdef CONFIG_BLK_DEV_INTEGRITY
struct blk_integrity integrity;
@@ -494,63 +443,45 @@ struct request_queue {
#ifdef CONFIG_PM
struct device *dev;
- int rpm_status;
- unsigned int nr_pending;
+ enum rpm_status rpm_status;
#endif
/*
* queue settings
*/
unsigned long nr_requests; /* Max # of requests */
- unsigned int nr_congestion_on;
- unsigned int nr_congestion_off;
- unsigned int nr_batching;
- unsigned int dma_drain_size;
- void *dma_drain_buffer;
unsigned int dma_pad_mask;
- unsigned int dma_alignment;
-
- struct blk_queue_tag *queue_tags;
- struct list_head tag_busy_list;
-
- unsigned int nr_sorted;
- unsigned int in_flight[2];
- /*
- * Number of active block driver functions for which blk_drain_queue()
- * must wait. Must be incremented around functions that unlock the
- * queue_lock internally, e.g. scsi_request_fn().
- */
- unsigned int request_fn_active;
+#ifdef CONFIG_BLK_INLINE_ENCRYPTION
+ struct blk_crypto_profile *crypto_profile;
+ struct kobject *crypto_kobject;
+#endif
unsigned int rq_timeout;
- int poll_nsec;
-
- struct blk_stat_callback *poll_cb;
- struct blk_rq_stat poll_stat[BLK_MQ_POLL_STATS_BKTS];
struct timer_list timeout;
struct work_struct timeout_work;
- struct list_head timeout_list;
+
+ atomic_t nr_active_requests_shared_tags;
+
+ struct blk_mq_tags *sched_shared_tags;
struct list_head icq_list;
#ifdef CONFIG_BLK_CGROUP
DECLARE_BITMAP (blkcg_pols, BLKCG_MAX_POLS);
struct blkcg_gq *root_blkg;
struct list_head blkg_list;
+ struct mutex blkcg_mutex;
#endif
struct queue_limits limits;
- /*
- * sg stuff
- */
- unsigned int sg_timeout;
- unsigned int sg_reserved_size;
+ unsigned int required_elevator_features;
+
int node;
#ifdef CONFIG_BLK_DEV_IO_TRACE
- struct blk_trace *blk_trace;
+ struct blk_trace __rcu *blk_trace;
#endif
/*
* for flush operations
@@ -562,15 +493,16 @@ struct request_queue {
struct delayed_work requeue_work;
struct mutex sysfs_lock;
+ struct mutex sysfs_dir_lock;
- int bypass_depth;
- atomic_t mq_freeze_depth;
+ /*
+ * for reusing dead hctx instance in case of updating
+ * nr_hw_queues
+ */
+ struct list_head unused_hctx_list;
+ spinlock_t unused_hctx_lock;
-#if defined(CONFIG_BLK_DEV_BSG)
- bsg_job_fn *bsg_job_fn;
- int bsg_job_size;
- struct bsg_class_device bsg_dev;
-#endif
+ int mq_freeze_depth;
#ifdef CONFIG_BLK_DEV_THROTTLING
/* Throttle data */
@@ -578,198 +510,131 @@ struct request_queue {
#endif
struct rcu_head rcu_head;
wait_queue_head_t mq_freeze_wq;
- struct percpu_ref q_usage_counter;
- struct list_head all_q_node;
+ /*
+ * Protect concurrent access to q_usage_counter by
+ * percpu_ref_kill() and percpu_ref_reinit().
+ */
+ struct mutex mq_freeze_lock;
+
+ int quiesce_depth;
struct blk_mq_tag_set *tag_set;
struct list_head tag_set_list;
- struct bio_set *bio_split;
-#ifdef CONFIG_BLK_DEBUG_FS
struct dentry *debugfs_dir;
struct dentry *sched_debugfs_dir;
-#endif
+ struct dentry *rqos_debugfs_dir;
+ /*
+ * Serializes all debugfs metadata operations using the above dentries.
+ */
+ struct mutex debugfs_mutex;
bool mq_sysfs_init_done;
-
- size_t cmd_size;
- void *rq_alloc_data;
-
- struct work_struct release_work;
-
-#define BLK_MAX_WRITE_HINTS 5
- u64 write_hints[BLK_MAX_WRITE_HINTS];
};
-#define QUEUE_FLAG_QUEUED 1 /* uses generic tag queueing */
-#define QUEUE_FLAG_STOPPED 2 /* queue is stopped */
-#define QUEUE_FLAG_SYNCFULL 3 /* read queue has been filled */
-#define QUEUE_FLAG_ASYNCFULL 4 /* write queue has been filled */
-#define QUEUE_FLAG_DYING 5 /* queue being torn down */
-#define QUEUE_FLAG_BYPASS 6 /* act as dumb FIFO queue */
-#define QUEUE_FLAG_BIDI 7 /* queue supports bidi requests */
-#define QUEUE_FLAG_NOMERGES 8 /* disable merge attempts */
-#define QUEUE_FLAG_SAME_COMP 9 /* complete on same CPU-group */
-#define QUEUE_FLAG_FAIL_IO 10 /* fake timeout */
-#define QUEUE_FLAG_STACKABLE 11 /* supports request stacking */
-#define QUEUE_FLAG_NONROT 12 /* non-rotational device (SSD) */
-#define QUEUE_FLAG_VIRT QUEUE_FLAG_NONROT /* paravirt device */
-#define QUEUE_FLAG_IO_STAT 13 /* do IO stats */
-#define QUEUE_FLAG_DISCARD 14 /* supports DISCARD */
-#define QUEUE_FLAG_NOXMERGES 15 /* No extended merges */
-#define QUEUE_FLAG_ADD_RANDOM 16 /* Contributes to random pool */
-#define QUEUE_FLAG_SECERASE 17 /* supports secure erase */
-#define QUEUE_FLAG_SAME_FORCE 18 /* force complete on same CPU */
-#define QUEUE_FLAG_DEAD 19 /* queue tear-down finished */
-#define QUEUE_FLAG_INIT_DONE 20 /* queue is initialized */
-#define QUEUE_FLAG_NO_SG_MERGE 21 /* don't attempt to merge SG segments*/
-#define QUEUE_FLAG_POLL 22 /* IO polling enabled if set */
-#define QUEUE_FLAG_WC 23 /* Write back caching */
-#define QUEUE_FLAG_FUA 24 /* device supports FUA writes */
-#define QUEUE_FLAG_FLUSH_NQ 25 /* flush not queueuable */
-#define QUEUE_FLAG_DAX 26 /* device supports DAX */
-#define QUEUE_FLAG_STATS 27 /* track rq completion times */
-#define QUEUE_FLAG_POLL_STATS 28 /* collecting stats for hybrid polling */
-#define QUEUE_FLAG_REGISTERED 29 /* queue has been registered to a disk */
-#define QUEUE_FLAG_SCSI_PASSTHROUGH 30 /* queue supports SCSI commands */
-#define QUEUE_FLAG_QUIESCED 31 /* queue has been quiesced */
-
-#define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \
- (1 << QUEUE_FLAG_STACKABLE) | \
- (1 << QUEUE_FLAG_SAME_COMP) | \
- (1 << QUEUE_FLAG_ADD_RANDOM))
-
-#define QUEUE_FLAG_MQ_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \
- (1 << QUEUE_FLAG_STACKABLE) | \
- (1 << QUEUE_FLAG_SAME_COMP) | \
- (1 << QUEUE_FLAG_POLL))
+/* Keep blk_queue_flag_name[] in sync with the definitions below */
+#define QUEUE_FLAG_STOPPED 0 /* queue is stopped */
+#define QUEUE_FLAG_DYING 1 /* queue being torn down */
+#define QUEUE_FLAG_NOMERGES 3 /* disable merge attempts */
+#define QUEUE_FLAG_SAME_COMP 4 /* complete on same CPU-group */
+#define QUEUE_FLAG_FAIL_IO 5 /* fake timeout */
+#define QUEUE_FLAG_NONROT 6 /* non-rotational device (SSD) */
+#define QUEUE_FLAG_VIRT QUEUE_FLAG_NONROT /* paravirt device */
+#define QUEUE_FLAG_IO_STAT 7 /* do disk/partitions IO accounting */
+#define QUEUE_FLAG_NOXMERGES 9 /* No extended merges */
+#define QUEUE_FLAG_ADD_RANDOM 10 /* Contributes to random pool */
+#define QUEUE_FLAG_SYNCHRONOUS 11 /* always completes in submit context */
+#define QUEUE_FLAG_SAME_FORCE 12 /* force complete on same CPU */
+#define QUEUE_FLAG_INIT_DONE 14 /* queue is initialized */
+#define QUEUE_FLAG_STABLE_WRITES 15 /* don't modify blks until WB is done */
+#define QUEUE_FLAG_POLL 16 /* IO polling enabled if set */
+#define QUEUE_FLAG_WC 17 /* Write back caching */
+#define QUEUE_FLAG_FUA 18 /* device supports FUA writes */
+#define QUEUE_FLAG_DAX 19 /* device supports DAX */
+#define QUEUE_FLAG_STATS 20 /* track IO start and completion times */
+#define QUEUE_FLAG_REGISTERED 22 /* queue has been registered to a disk */
+#define QUEUE_FLAG_QUIESCED 24 /* queue has been quiesced */
+#define QUEUE_FLAG_PCI_P2PDMA 25 /* device supports PCI p2p requests */
+#define QUEUE_FLAG_ZONE_RESETALL 26 /* supports Zone Reset All */
+#define QUEUE_FLAG_RQ_ALLOC_TIME 27 /* record rq->alloc_time_ns */
+#define QUEUE_FLAG_HCTX_ACTIVE 28 /* at least one blk-mq hctx is active */
+#define QUEUE_FLAG_NOWAIT 29 /* device supports NOWAIT */
+#define QUEUE_FLAG_SQ_SCHED 30 /* single queue style io dispatch */
+#define QUEUE_FLAG_SKIP_TAGSET_QUIESCE 31 /* quiesce_tagset skip the queue*/
+
+#define QUEUE_FLAG_MQ_DEFAULT ((1UL << QUEUE_FLAG_IO_STAT) | \
+ (1UL << QUEUE_FLAG_SAME_COMP) | \
+ (1UL << QUEUE_FLAG_NOWAIT))
+
+void blk_queue_flag_set(unsigned int flag, struct request_queue *q);
+void blk_queue_flag_clear(unsigned int flag, struct request_queue *q);
+bool blk_queue_flag_test_and_set(unsigned int flag, struct request_queue *q);
-/*
- * @q->queue_lock is set while a queue is being initialized. Since we know
- * that no other threads access the queue object before @q->queue_lock has
- * been set, it is safe to manipulate queue flags without holding the
- * queue_lock if @q->queue_lock == NULL. See also blk_alloc_queue_node() and
- * blk_init_allocated_queue().
- */
-static inline void queue_lockdep_assert_held(struct request_queue *q)
-{
- if (q->queue_lock)
- lockdep_assert_held(q->queue_lock);
-}
-
-static inline void queue_flag_set_unlocked(unsigned int flag,
- struct request_queue *q)
-{
- __set_bit(flag, &q->queue_flags);
-}
-
-static inline int queue_flag_test_and_clear(unsigned int flag,
- struct request_queue *q)
-{
- queue_lockdep_assert_held(q);
-
- if (test_bit(flag, &q->queue_flags)) {
- __clear_bit(flag, &q->queue_flags);
- return 1;
- }
-
- return 0;
-}
-
-static inline int queue_flag_test_and_set(unsigned int flag,
- struct request_queue *q)
-{
- queue_lockdep_assert_held(q);
-
- if (!test_bit(flag, &q->queue_flags)) {
- __set_bit(flag, &q->queue_flags);
- return 0;
- }
-
- return 1;
-}
-
-static inline void queue_flag_set(unsigned int flag, struct request_queue *q)
-{
- queue_lockdep_assert_held(q);
- __set_bit(flag, &q->queue_flags);
-}
-
-static inline void queue_flag_clear_unlocked(unsigned int flag,
- struct request_queue *q)
-{
- __clear_bit(flag, &q->queue_flags);
-}
-
-static inline int queue_in_flight(struct request_queue *q)
-{
- return q->in_flight[0] + q->in_flight[1];
-}
-
-static inline void queue_flag_clear(unsigned int flag, struct request_queue *q)
-{
- queue_lockdep_assert_held(q);
- __clear_bit(flag, &q->queue_flags);
-}
-
-#define blk_queue_tagged(q) test_bit(QUEUE_FLAG_QUEUED, &(q)->queue_flags)
#define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags)
#define blk_queue_dying(q) test_bit(QUEUE_FLAG_DYING, &(q)->queue_flags)
-#define blk_queue_dead(q) test_bit(QUEUE_FLAG_DEAD, &(q)->queue_flags)
-#define blk_queue_bypass(q) test_bit(QUEUE_FLAG_BYPASS, &(q)->queue_flags)
#define blk_queue_init_done(q) test_bit(QUEUE_FLAG_INIT_DONE, &(q)->queue_flags)
#define blk_queue_nomerges(q) test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags)
#define blk_queue_noxmerges(q) \
test_bit(QUEUE_FLAG_NOXMERGES, &(q)->queue_flags)
#define blk_queue_nonrot(q) test_bit(QUEUE_FLAG_NONROT, &(q)->queue_flags)
+#define blk_queue_stable_writes(q) \
+ test_bit(QUEUE_FLAG_STABLE_WRITES, &(q)->queue_flags)
#define blk_queue_io_stat(q) test_bit(QUEUE_FLAG_IO_STAT, &(q)->queue_flags)
#define blk_queue_add_random(q) test_bit(QUEUE_FLAG_ADD_RANDOM, &(q)->queue_flags)
-#define blk_queue_stackable(q) \
- test_bit(QUEUE_FLAG_STACKABLE, &(q)->queue_flags)
-#define blk_queue_discard(q) test_bit(QUEUE_FLAG_DISCARD, &(q)->queue_flags)
-#define blk_queue_secure_erase(q) \
- (test_bit(QUEUE_FLAG_SECERASE, &(q)->queue_flags))
+#define blk_queue_zone_resetall(q) \
+ test_bit(QUEUE_FLAG_ZONE_RESETALL, &(q)->queue_flags)
#define blk_queue_dax(q) test_bit(QUEUE_FLAG_DAX, &(q)->queue_flags)
-#define blk_queue_scsi_passthrough(q) \
- test_bit(QUEUE_FLAG_SCSI_PASSTHROUGH, &(q)->queue_flags)
+#define blk_queue_pci_p2pdma(q) \
+ test_bit(QUEUE_FLAG_PCI_P2PDMA, &(q)->queue_flags)
+#ifdef CONFIG_BLK_RQ_ALLOC_TIME
+#define blk_queue_rq_alloc_time(q) \
+ test_bit(QUEUE_FLAG_RQ_ALLOC_TIME, &(q)->queue_flags)
+#else
+#define blk_queue_rq_alloc_time(q) false
+#endif
#define blk_noretry_request(rq) \
((rq)->cmd_flags & (REQ_FAILFAST_DEV|REQ_FAILFAST_TRANSPORT| \
REQ_FAILFAST_DRIVER))
#define blk_queue_quiesced(q) test_bit(QUEUE_FLAG_QUIESCED, &(q)->queue_flags)
+#define blk_queue_pm_only(q) atomic_read(&(q)->pm_only)
+#define blk_queue_registered(q) test_bit(QUEUE_FLAG_REGISTERED, &(q)->queue_flags)
+#define blk_queue_sq_sched(q) test_bit(QUEUE_FLAG_SQ_SCHED, &(q)->queue_flags)
+#define blk_queue_skip_tagset_quiesce(q) \
+ test_bit(QUEUE_FLAG_SKIP_TAGSET_QUIESCE, &(q)->queue_flags)
-static inline bool blk_account_rq(struct request *rq)
-{
- return (rq->rq_flags & RQF_STARTED) && !blk_rq_is_passthrough(rq);
-}
-
-#define blk_rq_cpu_valid(rq) ((rq)->cpu != -1)
-#define blk_bidi_rq(rq) ((rq)->next_rq != NULL)
-/* rq->queuelist of dequeued request must be list_empty() */
-#define blk_queued_rq(rq) (!list_empty(&(rq)->queuelist))
+extern void blk_set_pm_only(struct request_queue *q);
+extern void blk_clear_pm_only(struct request_queue *q);
#define list_entry_rq(ptr) list_entry((ptr), struct request, queuelist)
-#define rq_data_dir(rq) (op_is_write(req_op(rq)) ? WRITE : READ)
+#define dma_map_bvec(dev, bv, dir, attrs) \
+ dma_map_page_attrs(dev, (bv)->bv_page, (bv)->bv_offset, (bv)->bv_len, \
+ (dir), (attrs))
-/*
- * Driver can handle struct request, if it either has an old style
- * request_fn defined, or is blk-mq based.
- */
-static inline bool queue_is_rq_based(struct request_queue *q)
+static inline bool queue_is_mq(struct request_queue *q)
{
- return q->request_fn || q->mq_ops;
+ return q->mq_ops;
}
-static inline unsigned int blk_queue_cluster(struct request_queue *q)
+#ifdef CONFIG_PM
+static inline enum rpm_status queue_rpm_status(struct request_queue *q)
+{
+ return q->rpm_status;
+}
+#else
+static inline enum rpm_status queue_rpm_status(struct request_queue *q)
{
- return q->limits.cluster;
+ return RPM_ACTIVE;
}
+#endif
static inline enum blk_zoned_model
blk_queue_zoned_model(struct request_queue *q)
{
- return q->limits.zoned;
+ if (IS_ENABLED(CONFIG_BLK_DEV_ZONED))
+ return q->limits.zoned;
+ return BLK_ZONED_NONE;
}
static inline bool blk_queue_is_zoned(struct request_queue *q)
@@ -783,64 +648,73 @@ static inline bool blk_queue_is_zoned(struct request_queue *q)
}
}
-static inline unsigned int blk_queue_zone_sectors(struct request_queue *q)
+#ifdef CONFIG_BLK_DEV_ZONED
+static inline unsigned int disk_nr_zones(struct gendisk *disk)
{
- return blk_queue_is_zoned(q) ? q->limits.chunk_sectors : 0;
+ return blk_queue_is_zoned(disk->queue) ? disk->nr_zones : 0;
}
-static inline bool rq_is_sync(struct request *rq)
+static inline unsigned int disk_zone_no(struct gendisk *disk, sector_t sector)
{
- return op_is_sync(rq->cmd_flags);
+ if (!blk_queue_is_zoned(disk->queue))
+ return 0;
+ return sector >> ilog2(disk->queue->limits.chunk_sectors);
}
-static inline bool blk_rl_full(struct request_list *rl, bool sync)
+static inline bool disk_zone_is_seq(struct gendisk *disk, sector_t sector)
{
- unsigned int flag = sync ? BLK_RL_SYNCFULL : BLK_RL_ASYNCFULL;
-
- return rl->flags & flag;
+ if (!blk_queue_is_zoned(disk->queue))
+ return false;
+ if (!disk->conv_zones_bitmap)
+ return true;
+ return !test_bit(disk_zone_no(disk, sector), disk->conv_zones_bitmap);
}
-static inline void blk_set_rl_full(struct request_list *rl, bool sync)
+static inline void disk_set_max_open_zones(struct gendisk *disk,
+ unsigned int max_open_zones)
{
- unsigned int flag = sync ? BLK_RL_SYNCFULL : BLK_RL_ASYNCFULL;
-
- rl->flags |= flag;
+ disk->max_open_zones = max_open_zones;
}
-static inline void blk_clear_rl_full(struct request_list *rl, bool sync)
+static inline void disk_set_max_active_zones(struct gendisk *disk,
+ unsigned int max_active_zones)
{
- unsigned int flag = sync ? BLK_RL_SYNCFULL : BLK_RL_ASYNCFULL;
-
- rl->flags &= ~flag;
+ disk->max_active_zones = max_active_zones;
}
-static inline bool rq_mergeable(struct request *rq)
+static inline unsigned int bdev_max_open_zones(struct block_device *bdev)
{
- if (blk_rq_is_passthrough(rq))
- return false;
-
- if (req_op(rq) == REQ_OP_FLUSH)
- return false;
-
- if (req_op(rq) == REQ_OP_WRITE_ZEROES)
- return false;
-
- if (rq->cmd_flags & REQ_NOMERGE_FLAGS)
- return false;
- if (rq->rq_flags & RQF_NOMERGE_FLAGS)
- return false;
-
- return true;
+ return bdev->bd_disk->max_open_zones;
}
-static inline bool blk_write_same_mergeable(struct bio *a, struct bio *b)
+static inline unsigned int bdev_max_active_zones(struct block_device *bdev)
{
- if (bio_page(a) == bio_page(b) &&
- bio_offset(a) == bio_offset(b))
- return true;
+ return bdev->bd_disk->max_active_zones;
+}
+#else /* CONFIG_BLK_DEV_ZONED */
+static inline unsigned int disk_nr_zones(struct gendisk *disk)
+{
+ return 0;
+}
+static inline bool disk_zone_is_seq(struct gendisk *disk, sector_t sector)
+{
return false;
}
+static inline unsigned int disk_zone_no(struct gendisk *disk, sector_t sector)
+{
+ return 0;
+}
+static inline unsigned int bdev_max_open_zones(struct block_device *bdev)
+{
+ return 0;
+}
+
+static inline unsigned int bdev_max_active_zones(struct block_device *bdev)
+{
+ return 0;
+}
+#endif /* CONFIG_BLK_DEV_ZONED */
static inline unsigned int blk_queue_depth(struct request_queue *q)
{
@@ -851,411 +725,236 @@ static inline unsigned int blk_queue_depth(struct request_queue *q)
}
/*
- * q->prep_rq_fn return values
- */
-enum {
- BLKPREP_OK, /* serve it */
- BLKPREP_KILL, /* fatal error, kill, return -EIO */
- BLKPREP_DEFER, /* leave on queue */
- BLKPREP_INVALID, /* invalid command, kill, return -EREMOTEIO */
-};
-
-extern unsigned long blk_max_low_pfn, blk_max_pfn;
-
-/*
- * standard bounce addresses:
- *
- * BLK_BOUNCE_HIGH : bounce all highmem pages
- * BLK_BOUNCE_ANY : don't bounce anything
- * BLK_BOUNCE_ISA : bounce pages above ISA DMA boundary
- */
-
-#if BITS_PER_LONG == 32
-#define BLK_BOUNCE_HIGH ((u64)blk_max_low_pfn << PAGE_SHIFT)
-#else
-#define BLK_BOUNCE_HIGH -1ULL
-#endif
-#define BLK_BOUNCE_ANY (-1ULL)
-#define BLK_BOUNCE_ISA (DMA_BIT_MASK(24))
-
-/*
* default timeout for SG_IO if none specified
*/
#define BLK_DEFAULT_SG_TIMEOUT (60 * HZ)
#define BLK_MIN_SG_TIMEOUT (7 * HZ)
-struct rq_map_data {
- struct page **pages;
- int page_order;
- int nr_entries;
- unsigned long offset;
- int null_mapped;
- int from_user;
-};
-
-struct req_iterator {
- struct bvec_iter iter;
- struct bio *bio;
-};
-
/* This should not be used directly - use rq_for_each_segment */
#define for_each_bio(_bio) \
for (; _bio; _bio = _bio->bi_next)
-#define __rq_for_each_bio(_bio, rq) \
- if ((rq->bio)) \
- for (_bio = (rq)->bio; _bio; _bio = _bio->bi_next)
-
-#define rq_for_each_segment(bvl, _rq, _iter) \
- __rq_for_each_bio(_iter.bio, _rq) \
- bio_for_each_segment(bvl, _iter.bio, _iter.iter)
-#define rq_iter_last(bvec, _iter) \
- (_iter.bio->bi_next == NULL && \
- bio_iter_last(bvec, _iter.iter))
-
-#ifndef ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
-# error "You should define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE for your platform"
-#endif
-#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
-extern void rq_flush_dcache_pages(struct request *rq);
-#else
-static inline void rq_flush_dcache_pages(struct request *rq)
+int __must_check device_add_disk(struct device *parent, struct gendisk *disk,
+ const struct attribute_group **groups);
+static inline int __must_check add_disk(struct gendisk *disk)
{
+ return device_add_disk(NULL, disk, NULL);
}
-#endif
+void del_gendisk(struct gendisk *gp);
+void invalidate_disk(struct gendisk *disk);
+void set_disk_ro(struct gendisk *disk, bool read_only);
+void disk_uevent(struct gendisk *disk, enum kobject_action action);
-#ifdef CONFIG_PRINTK
-#define vfs_msg(sb, level, fmt, ...) \
- __vfs_msg(sb, level, fmt, ##__VA_ARGS__)
-#else
-#define vfs_msg(sb, level, fmt, ...) \
-do { \
- no_printk(fmt, ##__VA_ARGS__); \
- __vfs_msg(sb, "", " "); \
-} while (0)
-#endif
+static inline int get_disk_ro(struct gendisk *disk)
+{
+ return disk->part0->bd_read_only ||
+ test_bit(GD_READ_ONLY, &disk->state);
+}
-extern int blk_register_queue(struct gendisk *disk);
-extern void blk_unregister_queue(struct gendisk *disk);
-extern blk_qc_t generic_make_request(struct bio *bio);
-extern void blk_rq_init(struct request_queue *q, struct request *rq);
-extern void blk_init_request_from_bio(struct request *req, struct bio *bio);
-extern void blk_put_request(struct request *);
-extern void __blk_put_request(struct request_queue *, struct request *);
-extern struct request *blk_get_request(struct request_queue *, unsigned int op,
- gfp_t gfp_mask);
-extern void blk_requeue_request(struct request_queue *, struct request *);
-extern int blk_lld_busy(struct request_queue *q);
-extern int blk_rq_prep_clone(struct request *rq, struct request *rq_src,
- struct bio_set *bs, gfp_t gfp_mask,
- int (*bio_ctr)(struct bio *, struct bio *, void *),
- void *data);
-extern void blk_rq_unprep_clone(struct request *rq);
-extern blk_status_t blk_insert_cloned_request(struct request_queue *q,
- struct request *rq);
-extern int blk_rq_append_bio(struct request *rq, struct bio *bio);
-extern void blk_delay_queue(struct request_queue *, unsigned long);
-extern void blk_queue_split(struct request_queue *, struct bio **);
-extern void blk_recount_segments(struct request_queue *, struct bio *);
-extern int scsi_verify_blk_ioctl(struct block_device *, unsigned int);
-extern int scsi_cmd_blk_ioctl(struct block_device *, fmode_t,
- unsigned int, void __user *);
-extern int scsi_cmd_ioctl(struct request_queue *, struct gendisk *, fmode_t,
- unsigned int, void __user *);
-extern int sg_scsi_ioctl(struct request_queue *, struct gendisk *, fmode_t,
- struct scsi_ioctl_command __user *);
-
-extern int blk_queue_enter(struct request_queue *q, bool nowait);
-extern void blk_queue_exit(struct request_queue *q);
-extern void blk_start_queue(struct request_queue *q);
-extern void blk_start_queue_async(struct request_queue *q);
-extern void blk_stop_queue(struct request_queue *q);
-extern void blk_sync_queue(struct request_queue *q);
-extern void __blk_stop_queue(struct request_queue *q);
-extern void __blk_run_queue(struct request_queue *q);
-extern void __blk_run_queue_uncond(struct request_queue *q);
-extern void blk_run_queue(struct request_queue *);
-extern void blk_run_queue_async(struct request_queue *q);
-extern int blk_rq_map_user(struct request_queue *, struct request *,
- struct rq_map_data *, void __user *, unsigned long,
- gfp_t);
-extern int blk_rq_unmap_user(struct bio *);
-extern int blk_rq_map_kern(struct request_queue *, struct request *, void *, unsigned int, gfp_t);
-extern int blk_rq_map_user_iov(struct request_queue *, struct request *,
- struct rq_map_data *, const struct iov_iter *,
- gfp_t);
-extern void blk_execute_rq(struct request_queue *, struct gendisk *,
- struct request *, int);
-extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *,
- struct request *, int, rq_end_io_fn *);
+static inline int bdev_read_only(struct block_device *bdev)
+{
+ return bdev->bd_read_only || get_disk_ro(bdev->bd_disk);
+}
-int blk_status_to_errno(blk_status_t status);
-blk_status_t errno_to_blk_status(int errno);
+bool set_capacity_and_notify(struct gendisk *disk, sector_t size);
+bool disk_force_media_change(struct gendisk *disk, unsigned int events);
-bool blk_mq_poll(struct request_queue *q, blk_qc_t cookie);
+void add_disk_randomness(struct gendisk *disk) __latent_entropy;
+void rand_initialize_disk(struct gendisk *disk);
-static inline struct request_queue *bdev_get_queue(struct block_device *bdev)
+static inline sector_t get_start_sect(struct block_device *bdev)
{
- return bdev->bd_disk->queue; /* this is never NULL */
+ return bdev->bd_start_sect;
}
-/*
- * blk_rq_pos() : the current sector
- * blk_rq_bytes() : bytes left in the entire request
- * blk_rq_cur_bytes() : bytes left in the current segment
- * blk_rq_err_bytes() : bytes left till the next error boundary
- * blk_rq_sectors() : sectors left in the entire request
- * blk_rq_cur_sectors() : sectors left in the current segment
- */
-static inline sector_t blk_rq_pos(const struct request *rq)
+static inline sector_t bdev_nr_sectors(struct block_device *bdev)
{
- return rq->__sector;
+ return bdev->bd_nr_sectors;
}
-static inline unsigned int blk_rq_bytes(const struct request *rq)
+static inline loff_t bdev_nr_bytes(struct block_device *bdev)
{
- return rq->__data_len;
+ return (loff_t)bdev_nr_sectors(bdev) << SECTOR_SHIFT;
}
-static inline int blk_rq_cur_bytes(const struct request *rq)
+static inline sector_t get_capacity(struct gendisk *disk)
{
- return rq->bio ? bio_cur_bytes(rq->bio) : 0;
+ return bdev_nr_sectors(disk->part0);
}
-extern unsigned int blk_rq_err_bytes(const struct request *rq);
-
-static inline unsigned int blk_rq_sectors(const struct request *rq)
+static inline u64 sb_bdev_nr_blocks(struct super_block *sb)
{
- return blk_rq_bytes(rq) >> 9;
+ return bdev_nr_sectors(sb->s_bdev) >>
+ (sb->s_blocksize_bits - SECTOR_SHIFT);
}
-static inline unsigned int blk_rq_cur_sectors(const struct request *rq)
-{
- return blk_rq_cur_bytes(rq) >> 9;
-}
+int bdev_disk_changed(struct gendisk *disk, bool invalidate);
-/*
- * Some commands like WRITE SAME have a payload or data transfer size which
- * is different from the size of the request. Any driver that supports such
- * commands using the RQF_SPECIAL_PAYLOAD flag needs to use this helper to
- * calculate the data transfer size.
+void put_disk(struct gendisk *disk);
+struct gendisk *__blk_alloc_disk(int node, struct lock_class_key *lkclass);
+
+/**
+ * blk_alloc_disk - allocate a gendisk structure
+ * @node_id: numa node to allocate on
+ *
+ * Allocate and pre-initialize a gendisk structure for use with BIO based
+ * drivers.
+ *
+ * Context: can sleep
*/
-static inline unsigned int blk_rq_payload_bytes(struct request *rq)
+#define blk_alloc_disk(node_id) \
+({ \
+ static struct lock_class_key __key; \
+ \
+ __blk_alloc_disk(node_id, &__key); \
+})
+
+int __register_blkdev(unsigned int major, const char *name,
+ void (*probe)(dev_t devt));
+#define register_blkdev(major, name) \
+ __register_blkdev(major, name, NULL)
+void unregister_blkdev(unsigned int major, const char *name);
+
+bool bdev_check_media_change(struct block_device *bdev);
+int __invalidate_device(struct block_device *bdev, bool kill_dirty);
+void set_capacity(struct gendisk *disk, sector_t size);
+
+#ifdef CONFIG_BLOCK_HOLDER_DEPRECATED
+int bd_link_disk_holder(struct block_device *bdev, struct gendisk *disk);
+void bd_unlink_disk_holder(struct block_device *bdev, struct gendisk *disk);
+#else
+static inline int bd_link_disk_holder(struct block_device *bdev,
+ struct gendisk *disk)
{
- if (rq->rq_flags & RQF_SPECIAL_PAYLOAD)
- return rq->special_vec.bv_len;
- return blk_rq_bytes(rq);
+ return 0;
}
-
-static inline unsigned int blk_queue_get_max_sectors(struct request_queue *q,
- int op)
+static inline void bd_unlink_disk_holder(struct block_device *bdev,
+ struct gendisk *disk)
{
- if (unlikely(op == REQ_OP_DISCARD || op == REQ_OP_SECURE_ERASE))
- return min(q->limits.max_discard_sectors, UINT_MAX >> 9);
-
- if (unlikely(op == REQ_OP_WRITE_SAME))
- return q->limits.max_write_same_sectors;
-
- if (unlikely(op == REQ_OP_WRITE_ZEROES))
- return q->limits.max_write_zeroes_sectors;
-
- return q->limits.max_sectors;
}
+#endif /* CONFIG_BLOCK_HOLDER_DEPRECATED */
-/*
- * Return maximum size of a request at given offset. Only valid for
- * file system requests.
- */
-static inline unsigned int blk_max_size_offset(struct request_queue *q,
- sector_t offset)
-{
- if (!q->limits.chunk_sectors)
- return q->limits.max_sectors;
+dev_t part_devt(struct gendisk *disk, u8 partno);
+void inc_diskseq(struct gendisk *disk);
+dev_t blk_lookup_devt(const char *name, int partno);
+void blk_request_module(dev_t devt);
- return q->limits.chunk_sectors -
- (offset & (q->limits.chunk_sectors - 1));
-}
+extern int blk_register_queue(struct gendisk *disk);
+extern void blk_unregister_queue(struct gendisk *disk);
+void submit_bio_noacct(struct bio *bio);
+struct bio *bio_split_to_limits(struct bio *bio);
-static inline unsigned int blk_rq_get_max_sectors(struct request *rq,
- sector_t offset)
-{
- struct request_queue *q = rq->q;
+extern int blk_lld_busy(struct request_queue *q);
+extern int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags);
+extern void blk_queue_exit(struct request_queue *q);
+extern void blk_sync_queue(struct request_queue *q);
- if (blk_rq_is_passthrough(rq))
- return q->limits.max_hw_sectors;
+/* Helper to convert REQ_OP_XXX to its string format XXX */
+extern const char *blk_op_str(enum req_op op);
- if (!q->limits.chunk_sectors ||
- req_op(rq) == REQ_OP_DISCARD ||
- req_op(rq) == REQ_OP_SECURE_ERASE)
- return blk_queue_get_max_sectors(q, req_op(rq));
+int blk_status_to_errno(blk_status_t status);
+blk_status_t errno_to_blk_status(int errno);
- return min(blk_max_size_offset(q, offset),
- blk_queue_get_max_sectors(q, req_op(rq)));
-}
+/* only poll the hardware once, don't continue until a completion was found */
+#define BLK_POLL_ONESHOT (1 << 0)
+int bio_poll(struct bio *bio, struct io_comp_batch *iob, unsigned int flags);
+int iocb_bio_iopoll(struct kiocb *kiocb, struct io_comp_batch *iob,
+ unsigned int flags);
-static inline unsigned int blk_rq_count_bios(struct request *rq)
+static inline struct request_queue *bdev_get_queue(struct block_device *bdev)
{
- unsigned int nr_bios = 0;
- struct bio *bio;
+ return bdev->bd_queue; /* this is never NULL */
+}
- __rq_for_each_bio(bio, rq)
- nr_bios++;
+/* Helper to convert BLK_ZONE_ZONE_XXX to its string format XXX */
+const char *blk_zone_cond_str(enum blk_zone_cond zone_cond);
- return nr_bios;
+static inline unsigned int bio_zone_no(struct bio *bio)
+{
+ return disk_zone_no(bio->bi_bdev->bd_disk, bio->bi_iter.bi_sector);
}
-/*
- * Request issue related functions.
- */
-extern struct request *blk_peek_request(struct request_queue *q);
-extern void blk_start_request(struct request *rq);
-extern struct request *blk_fetch_request(struct request_queue *q);
+static inline unsigned int bio_zone_is_seq(struct bio *bio)
+{
+ return disk_zone_is_seq(bio->bi_bdev->bd_disk, bio->bi_iter.bi_sector);
+}
/*
- * Request completion related functions.
- *
- * blk_update_request() completes given number of bytes and updates
- * the request without completing it.
- *
- * blk_end_request() and friends. __blk_end_request() must be called
- * with the request queue spinlock acquired.
- *
- * Several drivers define their own end_request and call
- * blk_end_request() for parts of the original function.
- * This prevents code duplication in drivers.
+ * Return how much of the chunk is left to be used for I/O at a given offset.
*/
-extern bool blk_update_request(struct request *rq, blk_status_t error,
- unsigned int nr_bytes);
-extern void blk_finish_request(struct request *rq, blk_status_t error);
-extern bool blk_end_request(struct request *rq, blk_status_t error,
- unsigned int nr_bytes);
-extern void blk_end_request_all(struct request *rq, blk_status_t error);
-extern bool __blk_end_request(struct request *rq, blk_status_t error,
- unsigned int nr_bytes);
-extern void __blk_end_request_all(struct request *rq, blk_status_t error);
-extern bool __blk_end_request_cur(struct request *rq, blk_status_t error);
-
-extern void blk_complete_request(struct request *);
-extern void __blk_complete_request(struct request *);
-extern void blk_abort_request(struct request *);
-extern void blk_unprep_request(struct request *);
+static inline unsigned int blk_chunk_sectors_left(sector_t offset,
+ unsigned int chunk_sectors)
+{
+ if (unlikely(!is_power_of_2(chunk_sectors)))
+ return chunk_sectors - sector_div(offset, chunk_sectors);
+ return chunk_sectors - (offset & (chunk_sectors - 1));
+}
/*
* Access functions for manipulating queue properties
*/
-extern struct request_queue *blk_init_queue_node(request_fn_proc *rfn,
- spinlock_t *lock, int node_id);
-extern struct request_queue *blk_init_queue(request_fn_proc *, spinlock_t *);
-extern int blk_init_allocated_queue(struct request_queue *);
-extern void blk_cleanup_queue(struct request_queue *);
-extern void blk_queue_make_request(struct request_queue *, make_request_fn *);
-extern void blk_queue_bounce_limit(struct request_queue *, u64);
+void blk_queue_bounce_limit(struct request_queue *q, enum blk_bounce limit);
extern void blk_queue_max_hw_sectors(struct request_queue *, unsigned int);
extern void blk_queue_chunk_sectors(struct request_queue *, unsigned int);
extern void blk_queue_max_segments(struct request_queue *, unsigned short);
extern void blk_queue_max_discard_segments(struct request_queue *,
unsigned short);
+void blk_queue_max_secure_erase_sectors(struct request_queue *q,
+ unsigned int max_sectors);
extern void blk_queue_max_segment_size(struct request_queue *, unsigned int);
extern void blk_queue_max_discard_sectors(struct request_queue *q,
unsigned int max_discard_sectors);
-extern void blk_queue_max_write_same_sectors(struct request_queue *q,
- unsigned int max_write_same_sectors);
extern void blk_queue_max_write_zeroes_sectors(struct request_queue *q,
unsigned int max_write_same_sectors);
-extern void blk_queue_logical_block_size(struct request_queue *, unsigned short);
+extern void blk_queue_logical_block_size(struct request_queue *, unsigned int);
+extern void blk_queue_max_zone_append_sectors(struct request_queue *q,
+ unsigned int max_zone_append_sectors);
extern void blk_queue_physical_block_size(struct request_queue *, unsigned int);
+void blk_queue_zone_write_granularity(struct request_queue *q,
+ unsigned int size);
extern void blk_queue_alignment_offset(struct request_queue *q,
unsigned int alignment);
+void disk_update_readahead(struct gendisk *disk);
extern void blk_limits_io_min(struct queue_limits *limits, unsigned int min);
extern void blk_queue_io_min(struct request_queue *q, unsigned int min);
extern void blk_limits_io_opt(struct queue_limits *limits, unsigned int opt);
extern void blk_queue_io_opt(struct request_queue *q, unsigned int opt);
extern void blk_set_queue_depth(struct request_queue *q, unsigned int depth);
-extern void blk_set_default_limits(struct queue_limits *lim);
extern void blk_set_stacking_limits(struct queue_limits *lim);
extern int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
sector_t offset);
-extern int bdev_stack_limits(struct queue_limits *t, struct block_device *bdev,
- sector_t offset);
extern void disk_stack_limits(struct gendisk *disk, struct block_device *bdev,
sector_t offset);
-extern void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b);
-extern void blk_queue_dma_pad(struct request_queue *, unsigned int);
extern void blk_queue_update_dma_pad(struct request_queue *, unsigned int);
-extern int blk_queue_dma_drain(struct request_queue *q,
- dma_drain_needed_fn *dma_drain_needed,
- void *buf, unsigned int size);
-extern void blk_queue_lld_busy(struct request_queue *q, lld_busy_fn *fn);
extern void blk_queue_segment_boundary(struct request_queue *, unsigned long);
extern void blk_queue_virt_boundary(struct request_queue *, unsigned long);
-extern void blk_queue_prep_rq(struct request_queue *, prep_rq_fn *pfn);
-extern void blk_queue_unprep_rq(struct request_queue *, unprep_rq_fn *ufn);
extern void blk_queue_dma_alignment(struct request_queue *, int);
extern void blk_queue_update_dma_alignment(struct request_queue *, int);
-extern void blk_queue_softirq_done(struct request_queue *, softirq_done_fn *);
-extern void blk_queue_rq_timed_out(struct request_queue *, rq_timed_out_fn *);
extern void blk_queue_rq_timeout(struct request_queue *, unsigned int);
-extern void blk_queue_flush_queueable(struct request_queue *q, bool queueable);
extern void blk_queue_write_cache(struct request_queue *q, bool enabled, bool fua);
-/*
- * Number of physical segments as sent to the device.
- *
- * Normally this is the number of discontiguous data segments sent by the
- * submitter. But for data-less command like discard we might have no
- * actual data segments submitted, but the driver might have to add it's
- * own special payload. In that case we still return 1 here so that this
- * special payload will be mapped.
- */
-static inline unsigned short blk_rq_nr_phys_segments(struct request *rq)
-{
- if (rq->rq_flags & RQF_SPECIAL_PAYLOAD)
- return 1;
- return rq->nr_phys_segments;
-}
+struct blk_independent_access_ranges *
+disk_alloc_independent_access_ranges(struct gendisk *disk, int nr_ia_ranges);
+void disk_set_independent_access_ranges(struct gendisk *disk,
+ struct blk_independent_access_ranges *iars);
/*
- * Number of discard segments (or ranges) the driver needs to fill in.
- * Each discard bio merged into a request is counted as one segment.
+ * Elevator features for blk_queue_required_elevator_features:
*/
-static inline unsigned short blk_rq_nr_discard_segments(struct request *rq)
-{
- return max_t(unsigned short, rq->nr_phys_segments, 1);
-}
+/* Supports zoned block devices sequential write constraint */
+#define ELEVATOR_F_ZBD_SEQ_WRITE (1U << 0)
-extern int blk_rq_map_sg(struct request_queue *, struct request *, struct scatterlist *);
-extern void blk_dump_rq_flags(struct request *, char *);
-extern long nr_blockdev_pages(void);
+extern void blk_queue_required_elevator_features(struct request_queue *q,
+ unsigned int features);
+extern bool blk_queue_can_use_dma_map_merging(struct request_queue *q,
+ struct device *dev);
bool __must_check blk_get_queue(struct request_queue *);
-struct request_queue *blk_alloc_queue(gfp_t);
-struct request_queue *blk_alloc_queue_node(gfp_t, int);
extern void blk_put_queue(struct request_queue *);
-extern void blk_set_queue_dying(struct request_queue *);
-/*
- * block layer runtime pm functions
- */
-#ifdef CONFIG_PM
-extern void blk_pm_runtime_init(struct request_queue *q, struct device *dev);
-extern int blk_pre_runtime_suspend(struct request_queue *q);
-extern void blk_post_runtime_suspend(struct request_queue *q, int err);
-extern void blk_pre_runtime_resume(struct request_queue *q);
-extern void blk_post_runtime_resume(struct request_queue *q, int err);
-extern void blk_set_runtime_active(struct request_queue *q);
-#else
-static inline void blk_pm_runtime_init(struct request_queue *q,
- struct device *dev) {}
-static inline int blk_pre_runtime_suspend(struct request_queue *q)
-{
- return -ENOSYS;
-}
-static inline void blk_post_runtime_suspend(struct request_queue *q, int err) {}
-static inline void blk_pre_runtime_resume(struct request_queue *q) {}
-static inline void blk_post_runtime_resume(struct request_queue *q, int err) {}
-static inline void blk_set_runtime_active(struct request_queue *q) {}
-#endif
+void blk_mark_disk_dead(struct gendisk *disk);
+#ifdef CONFIG_BLOCK
/*
* blk_plug permits building a queue of related requests by holding the I/O
* fragments for a short period. This allows merging of sequential requests
@@ -1264,17 +963,24 @@ static inline void blk_set_runtime_active(struct request_queue *q) {}
* as the lock contention for request_queue lock is reduced.
*
* It is ok not to disable preemption when adding the request to the plug list
- * or when attempting a merge, because blk_schedule_flush_list() will only flush
- * the plug list when the task sleeps by itself. For details, please see
- * schedule() where blk_schedule_flush_plug() is called.
+ * or when attempting a merge. For details, please see schedule() where
+ * blk_flush_plug() is called.
*/
struct blk_plug {
- struct list_head list; /* requests */
- struct list_head mq_list; /* blk-mq requests */
+ struct request *mq_list; /* blk-mq requests */
+
+ /* if ios_left is > 1, we can batch tag/rq allocations */
+ struct request *cached_rq;
+ unsigned short nr_ios;
+
+ unsigned short rq_count;
+
+ bool multiple_queues;
+ bool has_elevator;
+ bool nowait;
+
struct list_head cb_list; /* md requires an unplug callback */
};
-#define BLK_MAX_REQUEST_COUNT 16
-#define BLK_PLUG_FLUSH_SIZE (128 * 1024)
struct blk_plug_cb;
typedef void (*blk_plug_cb_fn)(struct blk_plug_cb *, bool);
@@ -1286,67 +992,58 @@ struct blk_plug_cb {
extern struct blk_plug_cb *blk_check_plugged(blk_plug_cb_fn unplug,
void *data, int size);
extern void blk_start_plug(struct blk_plug *);
+extern void blk_start_plug_nr_ios(struct blk_plug *, unsigned short);
extern void blk_finish_plug(struct blk_plug *);
-extern void blk_flush_plug_list(struct blk_plug *, bool);
-static inline void blk_flush_plug(struct task_struct *tsk)
+void __blk_flush_plug(struct blk_plug *plug, bool from_schedule);
+static inline void blk_flush_plug(struct blk_plug *plug, bool async)
{
- struct blk_plug *plug = tsk->plug;
-
if (plug)
- blk_flush_plug_list(plug, false);
+ __blk_flush_plug(plug, async);
}
-static inline void blk_schedule_flush_plug(struct task_struct *tsk)
-{
- struct blk_plug *plug = tsk->plug;
+int blkdev_issue_flush(struct block_device *bdev);
+long nr_blockdev_pages(void);
+#else /* CONFIG_BLOCK */
+struct blk_plug {
+};
- if (plug)
- blk_flush_plug_list(plug, true);
+static inline void blk_start_plug_nr_ios(struct blk_plug *plug,
+ unsigned short nr_ios)
+{
}
-static inline bool blk_needs_flush_plug(struct task_struct *tsk)
+static inline void blk_start_plug(struct blk_plug *plug)
{
- struct blk_plug *plug = tsk->plug;
+}
- return plug &&
- (!list_empty(&plug->list) ||
- !list_empty(&plug->mq_list) ||
- !list_empty(&plug->cb_list));
+static inline void blk_finish_plug(struct blk_plug *plug)
+{
}
-/*
- * tag stuff
- */
-extern int blk_queue_start_tag(struct request_queue *, struct request *);
-extern struct request *blk_queue_find_tag(struct request_queue *, int);
-extern void blk_queue_end_tag(struct request_queue *, struct request *);
-extern int blk_queue_init_tags(struct request_queue *, int, struct blk_queue_tag *, int);
-extern void blk_queue_free_tags(struct request_queue *);
-extern int blk_queue_resize_tags(struct request_queue *, int);
-extern void blk_queue_invalidate_tags(struct request_queue *);
-extern struct blk_queue_tag *blk_init_tags(int, int);
-extern void blk_free_tags(struct blk_queue_tag *);
+static inline void blk_flush_plug(struct blk_plug *plug, bool async)
+{
+}
-static inline struct request *blk_map_queue_find_tag(struct blk_queue_tag *bqt,
- int tag)
+static inline int blkdev_issue_flush(struct block_device *bdev)
{
- if (unlikely(bqt == NULL || tag >= bqt->real_max_depth))
- return NULL;
- return bqt->tag_index[tag];
+ return 0;
}
-extern int blkdev_issue_flush(struct block_device *, gfp_t, sector_t *);
-extern int blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
- sector_t nr_sects, gfp_t gfp_mask, struct page *page);
+static inline long nr_blockdev_pages(void)
+{
+ return 0;
+}
+#endif /* CONFIG_BLOCK */
-#define BLKDEV_DISCARD_SECURE (1 << 0) /* issue a secure erase */
+extern void blk_io_schedule(void);
-extern int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
- sector_t nr_sects, gfp_t gfp_mask, unsigned long flags);
-extern int __blkdev_issue_discard(struct block_device *bdev, sector_t sector,
- sector_t nr_sects, gfp_t gfp_mask, int flags,
- struct bio **biop);
+int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
+ sector_t nr_sects, gfp_t gfp_mask);
+int __blkdev_issue_discard(struct block_device *bdev, sector_t sector,
+ sector_t nr_sects, gfp_t gfp_mask, struct bio **biop);
+int blkdev_issue_secure_erase(struct block_device *bdev, sector_t sector,
+ sector_t nr_sects, gfp_t gfp);
#define BLKDEV_ZERO_NOUNMAP (1 << 0) /* do not free blocks */
#define BLKDEV_ZERO_NOFALLBACK (1 << 1) /* don't write explicit zeroes */
@@ -1360,67 +1057,98 @@ extern int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
static inline int sb_issue_discard(struct super_block *sb, sector_t block,
sector_t nr_blocks, gfp_t gfp_mask, unsigned long flags)
{
- return blkdev_issue_discard(sb->s_bdev, block << (sb->s_blocksize_bits - 9),
- nr_blocks << (sb->s_blocksize_bits - 9),
- gfp_mask, flags);
+ return blkdev_issue_discard(sb->s_bdev,
+ block << (sb->s_blocksize_bits -
+ SECTOR_SHIFT),
+ nr_blocks << (sb->s_blocksize_bits -
+ SECTOR_SHIFT),
+ gfp_mask);
}
static inline int sb_issue_zeroout(struct super_block *sb, sector_t block,
sector_t nr_blocks, gfp_t gfp_mask)
{
return blkdev_issue_zeroout(sb->s_bdev,
- block << (sb->s_blocksize_bits - 9),
- nr_blocks << (sb->s_blocksize_bits - 9),
+ block << (sb->s_blocksize_bits -
+ SECTOR_SHIFT),
+ nr_blocks << (sb->s_blocksize_bits -
+ SECTOR_SHIFT),
gfp_mask, 0);
}
-extern int blk_verify_command(unsigned char *cmd, fmode_t has_write_perm);
+static inline bool bdev_is_partition(struct block_device *bdev)
+{
+ return bdev->bd_partno;
+}
enum blk_default_limits {
BLK_MAX_SEGMENTS = 128,
BLK_SAFE_MAX_SECTORS = 255,
- BLK_DEF_MAX_SECTORS = 2560,
BLK_MAX_SEGMENT_SIZE = 65536,
BLK_SEG_BOUNDARY_MASK = 0xFFFFFFFFUL,
};
-#define blkdev_entry_to_request(entry) list_entry((entry), struct request, queuelist)
+#define BLK_DEF_MAX_SECTORS 2560u
-static inline unsigned long queue_segment_boundary(struct request_queue *q)
+static inline unsigned long queue_segment_boundary(const struct request_queue *q)
{
return q->limits.seg_boundary_mask;
}
-static inline unsigned long queue_virt_boundary(struct request_queue *q)
+static inline unsigned long queue_virt_boundary(const struct request_queue *q)
{
return q->limits.virt_boundary_mask;
}
-static inline unsigned int queue_max_sectors(struct request_queue *q)
+static inline unsigned int queue_max_sectors(const struct request_queue *q)
{
return q->limits.max_sectors;
}
-static inline unsigned int queue_max_hw_sectors(struct request_queue *q)
+static inline unsigned int queue_max_bytes(struct request_queue *q)
+{
+ return min_t(unsigned int, queue_max_sectors(q), INT_MAX >> 9) << 9;
+}
+
+static inline unsigned int queue_max_hw_sectors(const struct request_queue *q)
{
return q->limits.max_hw_sectors;
}
-static inline unsigned short queue_max_segments(struct request_queue *q)
+static inline unsigned short queue_max_segments(const struct request_queue *q)
{
return q->limits.max_segments;
}
-static inline unsigned short queue_max_discard_segments(struct request_queue *q)
+static inline unsigned short queue_max_discard_segments(const struct request_queue *q)
{
return q->limits.max_discard_segments;
}
-static inline unsigned int queue_max_segment_size(struct request_queue *q)
+static inline unsigned int queue_max_segment_size(const struct request_queue *q)
{
return q->limits.max_segment_size;
}
-static inline unsigned short queue_logical_block_size(struct request_queue *q)
+static inline unsigned int queue_max_zone_append_sectors(const struct request_queue *q)
+{
+
+ const struct queue_limits *l = &q->limits;
+
+ return min(l->max_zone_append_sectors, l->max_sectors);
+}
+
+static inline unsigned int
+bdev_max_zone_append_sectors(struct block_device *bdev)
+{
+ return queue_max_zone_append_sectors(bdev_get_queue(bdev));
+}
+
+static inline unsigned int bdev_max_segments(struct block_device *bdev)
+{
+ return queue_max_segments(bdev_get_queue(bdev));
+}
+
+static inline unsigned queue_logical_block_size(const struct request_queue *q)
{
int retval = 512;
@@ -1430,12 +1158,12 @@ static inline unsigned short queue_logical_block_size(struct request_queue *q)
return retval;
}
-static inline unsigned short bdev_logical_block_size(struct block_device *bdev)
+static inline unsigned int bdev_logical_block_size(struct block_device *bdev)
{
return queue_logical_block_size(bdev_get_queue(bdev));
}
-static inline unsigned int queue_physical_block_size(struct request_queue *q)
+static inline unsigned int queue_physical_block_size(const struct request_queue *q)
{
return q->limits.physical_block_size;
}
@@ -1445,7 +1173,7 @@ static inline unsigned int bdev_physical_block_size(struct block_device *bdev)
return queue_physical_block_size(bdev_get_queue(bdev));
}
-static inline unsigned int queue_io_min(struct request_queue *q)
+static inline unsigned int queue_io_min(const struct request_queue *q)
{
return q->limits.io_min;
}
@@ -1455,7 +1183,7 @@ static inline int bdev_io_min(struct block_device *bdev)
return queue_io_min(bdev_get_queue(bdev));
}
-static inline unsigned int queue_io_opt(struct request_queue *q)
+static inline unsigned int queue_io_opt(const struct request_queue *q)
{
return q->limits.io_opt;
}
@@ -1465,84 +1193,35 @@ static inline int bdev_io_opt(struct block_device *bdev)
return queue_io_opt(bdev_get_queue(bdev));
}
-static inline int queue_alignment_offset(struct request_queue *q)
+static inline unsigned int
+queue_zone_write_granularity(const struct request_queue *q)
{
- if (q->limits.misaligned)
- return -1;
-
- return q->limits.alignment_offset;
+ return q->limits.zone_write_granularity;
}
-static inline int queue_limit_alignment_offset(struct queue_limits *lim, sector_t sector)
+static inline unsigned int
+bdev_zone_write_granularity(struct block_device *bdev)
{
- unsigned int granularity = max(lim->physical_block_size, lim->io_min);
- unsigned int alignment = sector_div(sector, granularity >> 9) << 9;
-
- return (granularity + lim->alignment_offset - alignment) % granularity;
-}
-
-static inline int bdev_alignment_offset(struct block_device *bdev)
-{
- struct request_queue *q = bdev_get_queue(bdev);
-
- if (q->limits.misaligned)
- return -1;
-
- if (bdev != bdev->bd_contains)
- return bdev->bd_part->alignment_offset;
-
- return q->limits.alignment_offset;
+ return queue_zone_write_granularity(bdev_get_queue(bdev));
}
-static inline int queue_discard_alignment(struct request_queue *q)
-{
- if (q->limits.discard_misaligned)
- return -1;
+int bdev_alignment_offset(struct block_device *bdev);
+unsigned int bdev_discard_alignment(struct block_device *bdev);
- return q->limits.discard_alignment;
-}
-
-static inline int queue_limit_discard_alignment(struct queue_limits *lim, sector_t sector)
+static inline unsigned int bdev_max_discard_sectors(struct block_device *bdev)
{
- unsigned int alignment, granularity, offset;
-
- if (!lim->max_discard_sectors)
- return 0;
-
- /* Why are these in bytes, not sectors? */
- alignment = lim->discard_alignment >> 9;
- granularity = lim->discard_granularity >> 9;
- if (!granularity)
- return 0;
-
- /* Offset of the partition start in 'granularity' sectors */
- offset = sector_div(sector, granularity);
-
- /* And why do we do this modulus *again* in blkdev_issue_discard()? */
- offset = (granularity + alignment - offset) % granularity;
-
- /* Turn it back into bytes, gaah */
- return offset << 9;
+ return bdev_get_queue(bdev)->limits.max_discard_sectors;
}
-static inline int bdev_discard_alignment(struct block_device *bdev)
+static inline unsigned int bdev_discard_granularity(struct block_device *bdev)
{
- struct request_queue *q = bdev_get_queue(bdev);
-
- if (bdev != bdev->bd_contains)
- return bdev->bd_part->discard_alignment;
-
- return q->limits.discard_alignment;
+ return bdev_get_queue(bdev)->limits.discard_granularity;
}
-static inline unsigned int bdev_write_same(struct block_device *bdev)
+static inline unsigned int
+bdev_max_secure_erase_sectors(struct block_device *bdev)
{
- struct request_queue *q = bdev_get_queue(bdev);
-
- if (q)
- return q->limits.max_write_same_sectors;
-
- return 0;
+ return bdev_get_queue(bdev)->limits.max_secure_erase_sectors;
}
static inline unsigned int bdev_write_zeroes_sectors(struct block_device *bdev)
@@ -1555,453 +1234,296 @@ static inline unsigned int bdev_write_zeroes_sectors(struct block_device *bdev)
return 0;
}
-static inline enum blk_zoned_model bdev_zoned_model(struct block_device *bdev)
+static inline bool bdev_nonrot(struct block_device *bdev)
{
- struct request_queue *q = bdev_get_queue(bdev);
-
- if (q)
- return blk_queue_zoned_model(q);
-
- return BLK_ZONED_NONE;
+ return blk_queue_nonrot(bdev_get_queue(bdev));
}
-static inline bool bdev_is_zoned(struct block_device *bdev)
+static inline bool bdev_synchronous(struct block_device *bdev)
{
- struct request_queue *q = bdev_get_queue(bdev);
-
- if (q)
- return blk_queue_is_zoned(q);
-
- return false;
+ return test_bit(QUEUE_FLAG_SYNCHRONOUS,
+ &bdev_get_queue(bdev)->queue_flags);
}
-static inline unsigned int bdev_zone_sectors(struct block_device *bdev)
+static inline bool bdev_stable_writes(struct block_device *bdev)
{
- struct request_queue *q = bdev_get_queue(bdev);
-
- if (q)
- return blk_queue_zone_sectors(q);
-
- return 0;
+ return test_bit(QUEUE_FLAG_STABLE_WRITES,
+ &bdev_get_queue(bdev)->queue_flags);
}
-static inline int queue_dma_alignment(struct request_queue *q)
+static inline bool bdev_write_cache(struct block_device *bdev)
{
- return q ? q->dma_alignment : 511;
+ return test_bit(QUEUE_FLAG_WC, &bdev_get_queue(bdev)->queue_flags);
}
-static inline int blk_rq_aligned(struct request_queue *q, unsigned long addr,
- unsigned int len)
+static inline bool bdev_fua(struct block_device *bdev)
{
- unsigned int alignment = queue_dma_alignment(q) | q->dma_pad_mask;
- return !(addr & alignment) && !(len & alignment);
+ return test_bit(QUEUE_FLAG_FUA, &bdev_get_queue(bdev)->queue_flags);
}
-/* assumes size > 256 */
-static inline unsigned int blksize_bits(unsigned int size)
+static inline bool bdev_nowait(struct block_device *bdev)
{
- unsigned int bits = 8;
- do {
- bits++;
- size >>= 1;
- } while (size > 256);
- return bits;
+ return test_bit(QUEUE_FLAG_NOWAIT, &bdev_get_queue(bdev)->queue_flags);
}
-static inline unsigned int block_size(struct block_device *bdev)
-{
- return bdev->bd_block_size;
-}
-
-static inline bool queue_flush_queueable(struct request_queue *q)
+static inline enum blk_zoned_model bdev_zoned_model(struct block_device *bdev)
{
- return !test_bit(QUEUE_FLAG_FLUSH_NQ, &q->queue_flags);
+ return blk_queue_zoned_model(bdev_get_queue(bdev));
}
-typedef struct {struct page *v;} Sector;
-
-unsigned char *read_dev_sector(struct block_device *, sector_t, Sector *);
-
-static inline void put_dev_sector(Sector p)
+static inline bool bdev_is_zoned(struct block_device *bdev)
{
- put_page(p.v);
+ return blk_queue_is_zoned(bdev_get_queue(bdev));
}
-static inline bool __bvec_gap_to_prev(struct request_queue *q,
- struct bio_vec *bprv, unsigned int offset)
+static inline unsigned int bdev_zone_no(struct block_device *bdev, sector_t sec)
{
- return offset ||
- ((bprv->bv_offset + bprv->bv_len) & queue_virt_boundary(q));
+ return disk_zone_no(bdev->bd_disk, sec);
}
-/*
- * Check if adding a bio_vec after bprv with offset would create a gap in
- * the SG list. Most drivers don't care about this, but some do.
- */
-static inline bool bvec_gap_to_prev(struct request_queue *q,
- struct bio_vec *bprv, unsigned int offset)
+static inline bool bdev_op_is_zoned_write(struct block_device *bdev,
+ blk_opf_t op)
{
- if (!queue_virt_boundary(q))
+ if (!bdev_is_zoned(bdev))
return false;
- return __bvec_gap_to_prev(q, bprv, offset);
-}
-
-/*
- * Check if the two bvecs from two bios can be merged to one segment.
- * If yes, no need to check gap between the two bios since the 1st bio
- * and the 1st bvec in the 2nd bio can be handled in one segment.
- */
-static inline bool bios_segs_mergeable(struct request_queue *q,
- struct bio *prev, struct bio_vec *prev_last_bv,
- struct bio_vec *next_first_bv)
-{
- if (!BIOVEC_PHYS_MERGEABLE(prev_last_bv, next_first_bv))
- return false;
- if (!BIOVEC_SEG_BOUNDARY(q, prev_last_bv, next_first_bv))
- return false;
- if (prev->bi_seg_back_size + next_first_bv->bv_len >
- queue_max_segment_size(q))
- return false;
- return true;
-}
-static inline bool bio_will_gap(struct request_queue *q,
- struct request *prev_rq,
- struct bio *prev,
- struct bio *next)
-{
- if (bio_has_data(prev) && queue_virt_boundary(q)) {
- struct bio_vec pb, nb;
-
- /*
- * don't merge if the 1st bio starts with non-zero
- * offset, otherwise it is quite difficult to respect
- * sg gap limit. We work hard to merge a huge number of small
- * single bios in case of mkfs.
- */
- if (prev_rq)
- bio_get_first_bvec(prev_rq->bio, &pb);
- else
- bio_get_first_bvec(prev, &pb);
- if (pb.bv_offset)
- return true;
-
- /*
- * We don't need to worry about the situation that the
- * merged segment ends in unaligned virt boundary:
- *
- * - if 'pb' ends aligned, the merged segment ends aligned
- * - if 'pb' ends unaligned, the next bio must include
- * one single bvec of 'nb', otherwise the 'nb' can't
- * merge with 'pb'
- */
- bio_get_last_bvec(prev, &pb);
- bio_get_first_bvec(next, &nb);
-
- if (!bios_segs_mergeable(q, prev, &pb, &nb))
- return __bvec_gap_to_prev(q, &pb, nb.bv_offset);
- }
-
- return false;
+ return op == REQ_OP_WRITE || op == REQ_OP_WRITE_ZEROES;
}
-static inline bool req_gap_back_merge(struct request *req, struct bio *bio)
+static inline sector_t bdev_zone_sectors(struct block_device *bdev)
{
- return bio_will_gap(req->q, req, req->biotail, bio);
-}
+ struct request_queue *q = bdev_get_queue(bdev);
-static inline bool req_gap_front_merge(struct request *req, struct bio *bio)
-{
- return bio_will_gap(req->q, NULL, bio, req->bio);
+ if (!blk_queue_is_zoned(q))
+ return 0;
+ return q->limits.chunk_sectors;
}
-int kblockd_schedule_work(struct work_struct *work);
-int kblockd_schedule_work_on(int cpu, struct work_struct *work);
-int kblockd_schedule_delayed_work(struct delayed_work *dwork, unsigned long delay);
-int kblockd_schedule_delayed_work_on(int cpu, struct delayed_work *dwork, unsigned long delay);
-int kblockd_mod_delayed_work_on(int cpu, struct delayed_work *dwork, unsigned long delay);
-
-#ifdef CONFIG_BLK_CGROUP
-/*
- * This should not be using sched_clock(). A real patch is in progress
- * to fix this up, until that is in place we need to disable preemption
- * around sched_clock() in this function and set_io_start_time_ns().
- */
-static inline void set_start_time_ns(struct request *req)
+static inline sector_t bdev_offset_from_zone_start(struct block_device *bdev,
+ sector_t sector)
{
- preempt_disable();
- req->start_time_ns = sched_clock();
- preempt_enable();
+ return sector & (bdev_zone_sectors(bdev) - 1);
}
-static inline void set_io_start_time_ns(struct request *req)
+static inline bool bdev_is_zone_start(struct block_device *bdev,
+ sector_t sector)
{
- preempt_disable();
- req->io_start_time_ns = sched_clock();
- preempt_enable();
+ return bdev_offset_from_zone_start(bdev, sector) == 0;
}
-static inline uint64_t rq_start_time_ns(struct request *req)
+static inline int queue_dma_alignment(const struct request_queue *q)
{
- return req->start_time_ns;
+ return q ? q->limits.dma_alignment : 511;
}
-static inline uint64_t rq_io_start_time_ns(struct request *req)
-{
- return req->io_start_time_ns;
-}
-#else
-static inline void set_start_time_ns(struct request *req) {}
-static inline void set_io_start_time_ns(struct request *req) {}
-static inline uint64_t rq_start_time_ns(struct request *req)
-{
- return 0;
-}
-static inline uint64_t rq_io_start_time_ns(struct request *req)
+static inline unsigned int bdev_dma_alignment(struct block_device *bdev)
{
- return 0;
+ return queue_dma_alignment(bdev_get_queue(bdev));
}
-#endif
-
-#define MODULE_ALIAS_BLOCKDEV(major,minor) \
- MODULE_ALIAS("block-major-" __stringify(major) "-" __stringify(minor))
-#define MODULE_ALIAS_BLOCKDEV_MAJOR(major) \
- MODULE_ALIAS("block-major-" __stringify(major) "-*")
-
-#if defined(CONFIG_BLK_DEV_INTEGRITY)
-enum blk_integrity_flags {
- BLK_INTEGRITY_VERIFY = 1 << 0,
- BLK_INTEGRITY_GENERATE = 1 << 1,
- BLK_INTEGRITY_DEVICE_CAPABLE = 1 << 2,
- BLK_INTEGRITY_IP_CHECKSUM = 1 << 3,
-};
-
-struct blk_integrity_iter {
- void *prot_buf;
- void *data_buf;
- sector_t seed;
- unsigned int data_size;
- unsigned short interval;
- const char *disk_name;
-};
-
-typedef blk_status_t (integrity_processing_fn) (struct blk_integrity_iter *);
-
-struct blk_integrity_profile {
- integrity_processing_fn *generate_fn;
- integrity_processing_fn *verify_fn;
- const char *name;
-};
-
-extern void blk_integrity_register(struct gendisk *, struct blk_integrity *);
-extern void blk_integrity_unregister(struct gendisk *);
-extern int blk_integrity_compare(struct gendisk *, struct gendisk *);
-extern int blk_rq_map_integrity_sg(struct request_queue *, struct bio *,
- struct scatterlist *);
-extern int blk_rq_count_integrity_sg(struct request_queue *, struct bio *);
-extern bool blk_integrity_merge_rq(struct request_queue *, struct request *,
- struct request *);
-extern bool blk_integrity_merge_bio(struct request_queue *, struct request *,
- struct bio *);
-
-static inline struct blk_integrity *blk_get_integrity(struct gendisk *disk)
+static inline bool bdev_iter_is_aligned(struct block_device *bdev,
+ struct iov_iter *iter)
{
- struct blk_integrity *bi = &disk->queue->integrity;
-
- if (!bi->profile)
- return NULL;
-
- return bi;
+ return iov_iter_is_aligned(iter, bdev_dma_alignment(bdev),
+ bdev_logical_block_size(bdev) - 1);
}
-static inline
-struct blk_integrity *bdev_get_integrity(struct block_device *bdev)
-{
- return blk_get_integrity(bdev->bd_disk);
-}
-
-static inline bool blk_integrity_rq(struct request *rq)
+static inline int blk_rq_aligned(struct request_queue *q, unsigned long addr,
+ unsigned int len)
{
- return rq->cmd_flags & REQ_INTEGRITY;
+ unsigned int alignment = queue_dma_alignment(q) | q->dma_pad_mask;
+ return !(addr & alignment) && !(len & alignment);
}
-static inline void blk_queue_max_integrity_segments(struct request_queue *q,
- unsigned int segs)
+/* assumes size > 256 */
+static inline unsigned int blksize_bits(unsigned int size)
{
- q->limits.max_integrity_segments = segs;
+ return order_base_2(size >> SECTOR_SHIFT) + SECTOR_SHIFT;
}
-static inline unsigned short
-queue_max_integrity_segments(struct request_queue *q)
+static inline unsigned int block_size(struct block_device *bdev)
{
- return q->limits.max_integrity_segments;
+ return 1 << bdev->bd_inode->i_blkbits;
}
-static inline bool integrity_req_gap_back_merge(struct request *req,
- struct bio *next)
-{
- struct bio_integrity_payload *bip = bio_integrity(req->bio);
- struct bio_integrity_payload *bip_next = bio_integrity(next);
-
- return bvec_gap_to_prev(req->q, &bip->bip_vec[bip->bip_vcnt - 1],
- bip_next->bip_vec[0].bv_offset);
-}
+int kblockd_schedule_work(struct work_struct *work);
+int kblockd_mod_delayed_work_on(int cpu, struct delayed_work *dwork, unsigned long delay);
-static inline bool integrity_req_gap_front_merge(struct request *req,
- struct bio *bio)
-{
- struct bio_integrity_payload *bip = bio_integrity(bio);
- struct bio_integrity_payload *bip_next = bio_integrity(req->bio);
+#define MODULE_ALIAS_BLOCKDEV(major,minor) \
+ MODULE_ALIAS("block-major-" __stringify(major) "-" __stringify(minor))
+#define MODULE_ALIAS_BLOCKDEV_MAJOR(major) \
+ MODULE_ALIAS("block-major-" __stringify(major) "-*")
- return bvec_gap_to_prev(req->q, &bip->bip_vec[bip->bip_vcnt - 1],
- bip_next->bip_vec[0].bv_offset);
-}
+#ifdef CONFIG_BLK_INLINE_ENCRYPTION
-#else /* CONFIG_BLK_DEV_INTEGRITY */
+bool blk_crypto_register(struct blk_crypto_profile *profile,
+ struct request_queue *q);
-struct bio;
-struct block_device;
-struct gendisk;
-struct blk_integrity;
+#else /* CONFIG_BLK_INLINE_ENCRYPTION */
-static inline int blk_integrity_rq(struct request *rq)
-{
- return 0;
-}
-static inline int blk_rq_count_integrity_sg(struct request_queue *q,
- struct bio *b)
-{
- return 0;
-}
-static inline int blk_rq_map_integrity_sg(struct request_queue *q,
- struct bio *b,
- struct scatterlist *s)
-{
- return 0;
-}
-static inline struct blk_integrity *bdev_get_integrity(struct block_device *b)
-{
- return NULL;
-}
-static inline struct blk_integrity *blk_get_integrity(struct gendisk *disk)
-{
- return NULL;
-}
-static inline int blk_integrity_compare(struct gendisk *a, struct gendisk *b)
-{
- return 0;
-}
-static inline void blk_integrity_register(struct gendisk *d,
- struct blk_integrity *b)
-{
-}
-static inline void blk_integrity_unregister(struct gendisk *d)
-{
-}
-static inline void blk_queue_max_integrity_segments(struct request_queue *q,
- unsigned int segs)
-{
-}
-static inline unsigned short queue_max_integrity_segments(struct request_queue *q)
-{
- return 0;
-}
-static inline bool blk_integrity_merge_rq(struct request_queue *rq,
- struct request *r1,
- struct request *r2)
-{
- return true;
-}
-static inline bool blk_integrity_merge_bio(struct request_queue *rq,
- struct request *r,
- struct bio *b)
+static inline bool blk_crypto_register(struct blk_crypto_profile *profile,
+ struct request_queue *q)
{
return true;
}
-static inline bool integrity_req_gap_back_merge(struct request *req,
- struct bio *next)
-{
- return false;
-}
-static inline bool integrity_req_gap_front_merge(struct request *req,
- struct bio *bio)
-{
- return false;
-}
+#endif /* CONFIG_BLK_INLINE_ENCRYPTION */
+
+enum blk_unique_id {
+ /* these match the Designator Types specified in SPC */
+ BLK_UID_T10 = 1,
+ BLK_UID_EUI64 = 2,
+ BLK_UID_NAA = 3,
+};
-#endif /* CONFIG_BLK_DEV_INTEGRITY */
+#define NFL4_UFLG_MASK 0x0000003F
struct block_device_operations {
+ void (*submit_bio)(struct bio *bio);
+ int (*poll_bio)(struct bio *bio, struct io_comp_batch *iob,
+ unsigned int flags);
int (*open) (struct block_device *, fmode_t);
void (*release) (struct gendisk *, fmode_t);
- int (*rw_page)(struct block_device *, sector_t, struct page *, bool);
int (*ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
int (*compat_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
unsigned int (*check_events) (struct gendisk *disk,
unsigned int clearing);
- /* ->media_changed() is DEPRECATED, use ->check_events() instead */
- int (*media_changed) (struct gendisk *);
void (*unlock_native_capacity) (struct gendisk *);
- int (*revalidate_disk) (struct gendisk *);
int (*getgeo)(struct block_device *, struct hd_geometry *);
+ int (*set_read_only)(struct block_device *bdev, bool ro);
+ void (*free_disk)(struct gendisk *disk);
/* this callback is with swap_lock and sometimes page table lock held */
void (*swap_slot_free_notify) (struct block_device *, unsigned long);
+ int (*report_zones)(struct gendisk *, sector_t sector,
+ unsigned int nr_zones, report_zones_cb cb, void *data);
+ char *(*devnode)(struct gendisk *disk, umode_t *mode);
+ /* returns the length of the identifier or a negative errno: */
+ int (*get_unique_id)(struct gendisk *disk, u8 id[16],
+ enum blk_unique_id id_type);
struct module *owner;
const struct pr_ops *pr_ops;
+
+ /*
+ * Special callback for probing GPT entry at a given sector.
+ * Needed by Android devices, used by GPT scanner and MMC blk
+ * driver.
+ */
+ int (*alternative_gpt_sector)(struct gendisk *disk, sector_t *sector);
};
-extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int,
- unsigned long);
-extern int bdev_read_page(struct block_device *, sector_t, struct page *);
-extern int bdev_write_page(struct block_device *, sector_t, struct page *,
- struct writeback_control *);
-#else /* CONFIG_BLOCK */
+#ifdef CONFIG_COMPAT
+extern int blkdev_compat_ptr_ioctl(struct block_device *, fmode_t,
+ unsigned int, unsigned long);
+#else
+#define blkdev_compat_ptr_ioctl NULL
+#endif
-struct block_device;
+static inline void blk_wake_io_task(struct task_struct *waiter)
+{
+ /*
+ * If we're polling, the task itself is doing the completions. For
+ * that case, we don't need to signal a wakeup, it's enough to just
+ * mark us as RUNNING.
+ */
+ if (waiter == current)
+ __set_current_state(TASK_RUNNING);
+ else
+ wake_up_process(waiter);
+}
-/*
- * stubs for when the block layer is configured out
- */
-#define buffer_heads_over_limit 0
+unsigned long bdev_start_io_acct(struct block_device *bdev, enum req_op op,
+ unsigned long start_time);
+void bdev_end_io_acct(struct block_device *bdev, enum req_op op,
+ unsigned int sectors, unsigned long start_time);
-static inline long nr_blockdev_pages(void)
+unsigned long bio_start_io_acct(struct bio *bio);
+void bio_end_io_acct_remapped(struct bio *bio, unsigned long start_time,
+ struct block_device *orig_bdev);
+
+/**
+ * bio_end_io_acct - end I/O accounting for bio based drivers
+ * @bio: bio to end account for
+ * @start_time: start time returned by bio_start_io_acct()
+ */
+static inline void bio_end_io_acct(struct bio *bio, unsigned long start_time)
{
- return 0;
+ return bio_end_io_acct_remapped(bio, start_time, bio->bi_bdev);
}
-struct blk_plug {
-};
+int bdev_read_only(struct block_device *bdev);
+int set_blocksize(struct block_device *bdev, int size);
-static inline void blk_start_plug(struct blk_plug *plug)
+int lookup_bdev(const char *pathname, dev_t *dev);
+
+void blkdev_show(struct seq_file *seqf, off_t offset);
+
+#define BDEVNAME_SIZE 32 /* Largest string for a blockdev identifier */
+#define BDEVT_SIZE 10 /* Largest string for MAJ:MIN for blkdev */
+#ifdef CONFIG_BLOCK
+#define BLKDEV_MAJOR_MAX 512
+#else
+#define BLKDEV_MAJOR_MAX 0
+#endif
+
+struct block_device *blkdev_get_by_path(const char *path, fmode_t mode,
+ void *holder);
+struct block_device *blkdev_get_by_dev(dev_t dev, fmode_t mode, void *holder);
+int bd_prepare_to_claim(struct block_device *bdev, void *holder);
+void bd_abort_claiming(struct block_device *bdev, void *holder);
+void blkdev_put(struct block_device *bdev, fmode_t mode);
+
+/* just for blk-cgroup, don't use elsewhere */
+struct block_device *blkdev_get_no_open(dev_t dev);
+void blkdev_put_no_open(struct block_device *bdev);
+
+struct block_device *bdev_alloc(struct gendisk *disk, u8 partno);
+void bdev_add(struct block_device *bdev, dev_t dev);
+struct block_device *I_BDEV(struct inode *inode);
+int truncate_bdev_range(struct block_device *bdev, fmode_t mode, loff_t lstart,
+ loff_t lend);
+
+#ifdef CONFIG_BLOCK
+void invalidate_bdev(struct block_device *bdev);
+int sync_blockdev(struct block_device *bdev);
+int sync_blockdev_range(struct block_device *bdev, loff_t lstart, loff_t lend);
+int sync_blockdev_nowait(struct block_device *bdev);
+void sync_bdevs(bool wait);
+void bdev_statx_dioalign(struct inode *inode, struct kstat *stat);
+void printk_all_partitions(void);
+#else
+static inline void invalidate_bdev(struct block_device *bdev)
{
}
-
-static inline void blk_finish_plug(struct blk_plug *plug)
+static inline int sync_blockdev(struct block_device *bdev)
{
+ return 0;
}
-
-static inline void blk_flush_plug(struct task_struct *task)
+static inline int sync_blockdev_nowait(struct block_device *bdev)
{
+ return 0;
}
-
-static inline void blk_schedule_flush_plug(struct task_struct *task)
+static inline void sync_bdevs(bool wait)
{
}
-
-
-static inline bool blk_needs_flush_plug(struct task_struct *tsk)
+static inline void bdev_statx_dioalign(struct inode *inode, struct kstat *stat)
{
- return false;
}
-
-static inline int blkdev_issue_flush(struct block_device *bdev, gfp_t gfp_mask,
- sector_t *error_sector)
+static inline void printk_all_partitions(void)
{
- return 0;
}
-
#endif /* CONFIG_BLOCK */
-#endif
+int fsync_bdev(struct block_device *bdev);
+
+int freeze_bdev(struct block_device *bdev);
+int thaw_bdev(struct block_device *bdev);
+
+struct io_comp_batch {
+ struct request *req_list;
+ bool need_ts;
+ void (*complete)(struct io_comp_batch *);
+};
+
+#define DEFINE_IO_COMP_BATCH(name) struct io_comp_batch name = { }
+
+#endif /* _LINUX_BLKDEV_H */
diff --git a/include/linux/blkpg.h b/include/linux/blkpg.h
index bef124fde61e..1c91753c3c28 100644
--- a/include/linux/blkpg.h
+++ b/include/linux/blkpg.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_BLKPG_H
#define _LINUX_BLKPG_H
diff --git a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h
index d2e908586e3d..cfbda114348c 100644
--- a/include/linux/blktrace_api.h
+++ b/include/linux/blktrace_api.h
@@ -1,11 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef BLKTRACE_H
#define BLKTRACE_H
-#include <linux/blkdev.h>
+#include <linux/blk-mq.h>
#include <linux/relay.h>
#include <linux/compat.h>
#include <uapi/linux/blktrace_api.h>
#include <linux/list.h>
+#include <linux/blk_types.h>
#if defined(CONFIG_BLK_DEV_IO_TRACE)
@@ -22,16 +24,14 @@ struct blk_trace {
u32 pid;
u32 dev;
struct dentry *dir;
- struct dentry *dropped_file;
- struct dentry *msg_file;
struct list_head running_list;
atomic_t dropped;
};
extern int blk_trace_ioctl(struct block_device *, unsigned, char __user *);
extern void blk_trace_shutdown(struct request_queue *);
-extern __printf(2, 3)
-void __trace_note_message(struct blk_trace *, const char *fmt, ...);
+__printf(3, 4) void __blk_trace_note_message(struct blk_trace *bt,
+ struct cgroup_subsys_state *css, const char *fmt, ...);
/**
* blk_add_trace_msg - Add a (simple) message to the blktrace stream
@@ -46,49 +46,49 @@ void __trace_note_message(struct blk_trace *, const char *fmt, ...);
* NOTE: Can not use 'static inline' due to presence of var args...
*
**/
-#define blk_add_trace_msg(q, fmt, ...) \
+#define blk_add_cgroup_trace_msg(q, css, fmt, ...) \
do { \
- struct blk_trace *bt = (q)->blk_trace; \
+ struct blk_trace *bt; \
+ \
+ rcu_read_lock(); \
+ bt = rcu_dereference((q)->blk_trace); \
if (unlikely(bt)) \
- __trace_note_message(bt, fmt, ##__VA_ARGS__); \
+ __blk_trace_note_message(bt, css, fmt, ##__VA_ARGS__);\
+ rcu_read_unlock(); \
} while (0)
+#define blk_add_trace_msg(q, fmt, ...) \
+ blk_add_cgroup_trace_msg(q, NULL, fmt, ##__VA_ARGS__)
#define BLK_TN_MAX_MSG 128
static inline bool blk_trace_note_message_enabled(struct request_queue *q)
{
- struct blk_trace *bt = q->blk_trace;
- if (likely(!bt))
- return false;
- return bt->act_mask & BLK_TC_NOTIFY;
+ struct blk_trace *bt;
+ bool ret;
+
+ rcu_read_lock();
+ bt = rcu_dereference(q->blk_trace);
+ ret = bt && (bt->act_mask & BLK_TC_NOTIFY);
+ rcu_read_unlock();
+ return ret;
}
-extern void blk_add_driver_data(struct request_queue *q, struct request *rq,
- void *data, size_t len);
+extern void blk_add_driver_data(struct request *rq, void *data, size_t len);
extern int blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
struct block_device *bdev,
char __user *arg);
extern int blk_trace_startstop(struct request_queue *q, int start);
extern int blk_trace_remove(struct request_queue *q);
-extern void blk_trace_remove_sysfs(struct device *dev);
-extern int blk_trace_init_sysfs(struct device *dev);
-
-extern struct attribute_group blk_trace_attr_group;
#else /* !CONFIG_BLK_DEV_IO_TRACE */
# define blk_trace_ioctl(bdev, cmd, arg) (-ENOTTY)
# define blk_trace_shutdown(q) do { } while (0)
-# define blk_add_driver_data(q, rq, data, len) do {} while (0)
+# define blk_add_driver_data(rq, data, len) do {} while (0)
# define blk_trace_setup(q, name, dev, bdev, arg) (-ENOTTY)
# define blk_trace_startstop(q, start) (-ENOTTY)
# define blk_trace_remove(q) (-ENOTTY)
# define blk_add_trace_msg(q, fmt, ...) do { } while (0)
-# define blk_trace_remove_sysfs(dev) do { } while (0)
+# define blk_add_cgroup_trace_msg(q, cg, fmt, ...) do { } while (0)
# define blk_trace_note_message_enabled(q) (false)
-static inline int blk_trace_init_sysfs(struct device *dev)
-{
- return 0;
-}
-
#endif /* CONFIG_BLK_DEV_IO_TRACE */
#ifdef CONFIG_COMPAT
@@ -106,11 +106,17 @@ struct compat_blk_user_trace_setup {
#endif
-extern void blk_fill_rwbs(char *rwbs, unsigned int op, int bytes);
+void blk_fill_rwbs(char *rwbs, blk_opf_t opf);
static inline sector_t blk_rq_trace_sector(struct request *rq)
{
- return blk_rq_is_passthrough(rq) ? 0 : blk_rq_pos(rq);
+ /*
+ * Tracing should ignore starting sector for passthrough requests and
+ * requests where starting sector didn't get set.
+ */
+ if (blk_rq_is_passthrough(rq) || blk_rq_pos(rq) == (sector_t)-1)
+ return 0;
+ return blk_rq_pos(rq);
}
static inline unsigned int blk_rq_trace_nr_sectors(struct request *rq)
diff --git a/include/linux/blockgroup_lock.h b/include/linux/blockgroup_lock.h
index 225bdb7daec7..511ab123a822 100644
--- a/include/linux/blockgroup_lock.h
+++ b/include/linux/blockgroup_lock.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_BLOCKGROUP_LOCK_H
#define _LINUX_BLOCKGROUP_LOCK_H
/*
diff --git a/include/linux/bma150.h b/include/linux/bma150.h
index 97ade7cdc870..4d4a62d49341 100644
--- a/include/linux/bma150.h
+++ b/include/linux/bma150.h
@@ -1,20 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Copyright (c) 2011 Bosch Sensortec GmbH
* Copyright (c) 2011 Unixphere
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#ifndef _BMA150_H_
@@ -46,8 +33,8 @@ struct bma150_cfg {
unsigned char lg_hyst; /* Low-G hysterisis */
unsigned char lg_dur; /* Low-G duration */
unsigned char lg_thres; /* Low-G threshold */
- unsigned char range; /* one of BMA0150_RANGE_xxx */
- unsigned char bandwidth; /* one of BMA0150_BW_xxx */
+ unsigned char range; /* one of BMA150_RANGE_xxx */
+ unsigned char bandwidth; /* one of BMA150_BW_xxx */
};
struct bma150_platform_data {
diff --git a/include/linux/bootconfig.h b/include/linux/bootconfig.h
new file mode 100644
index 000000000000..ca73940e26df
--- /dev/null
+++ b/include/linux/bootconfig.h
@@ -0,0 +1,302 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_XBC_H
+#define _LINUX_XBC_H
+/*
+ * Extra Boot Config
+ * Copyright (C) 2019 Linaro Ltd.
+ * Author: Masami Hiramatsu <mhiramat@kernel.org>
+ */
+
+#ifdef __KERNEL__
+#include <linux/kernel.h>
+#include <linux/types.h>
+#else /* !__KERNEL__ */
+/*
+ * NOTE: This is only for tools/bootconfig, because tools/bootconfig will
+ * run the parser sanity test.
+ * This does NOT mean linux/bootconfig.h is available in the user space.
+ * However, if you change this file, please make sure the tools/bootconfig
+ * has no issue on building and running.
+ */
+#endif
+
+#define BOOTCONFIG_MAGIC "#BOOTCONFIG\n"
+#define BOOTCONFIG_MAGIC_LEN 12
+#define BOOTCONFIG_ALIGN_SHIFT 2
+#define BOOTCONFIG_ALIGN (1 << BOOTCONFIG_ALIGN_SHIFT)
+#define BOOTCONFIG_ALIGN_MASK (BOOTCONFIG_ALIGN - 1)
+
+/**
+ * xbc_calc_checksum() - Calculate checksum of bootconfig
+ * @data: Bootconfig data.
+ * @size: The size of the bootconfig data.
+ *
+ * Calculate the checksum value of the bootconfig data.
+ * The checksum will be used with the BOOTCONFIG_MAGIC and the size for
+ * embedding the bootconfig in the initrd image.
+ */
+static inline __init uint32_t xbc_calc_checksum(void *data, uint32_t size)
+{
+ unsigned char *p = data;
+ uint32_t ret = 0;
+
+ while (size--)
+ ret += *p++;
+
+ return ret;
+}
+
+/* XBC tree node */
+struct xbc_node {
+ uint16_t next;
+ uint16_t child;
+ uint16_t parent;
+ uint16_t data;
+} __attribute__ ((__packed__));
+
+#define XBC_KEY 0
+#define XBC_VALUE (1 << 15)
+/* Maximum size of boot config is 32KB - 1 */
+#define XBC_DATA_MAX (XBC_VALUE - 1)
+
+#define XBC_NODE_MAX 8192
+#define XBC_KEYLEN_MAX 256
+#define XBC_DEPTH_MAX 16
+
+/* Node tree access raw APIs */
+struct xbc_node * __init xbc_root_node(void);
+int __init xbc_node_index(struct xbc_node *node);
+struct xbc_node * __init xbc_node_get_parent(struct xbc_node *node);
+struct xbc_node * __init xbc_node_get_child(struct xbc_node *node);
+struct xbc_node * __init xbc_node_get_next(struct xbc_node *node);
+const char * __init xbc_node_get_data(struct xbc_node *node);
+
+/**
+ * xbc_node_is_value() - Test the node is a value node
+ * @node: An XBC node.
+ *
+ * Test the @node is a value node and return true if a value node, false if not.
+ */
+static inline __init bool xbc_node_is_value(struct xbc_node *node)
+{
+ return node->data & XBC_VALUE;
+}
+
+/**
+ * xbc_node_is_key() - Test the node is a key node
+ * @node: An XBC node.
+ *
+ * Test the @node is a key node and return true if a key node, false if not.
+ */
+static inline __init bool xbc_node_is_key(struct xbc_node *node)
+{
+ return !xbc_node_is_value(node);
+}
+
+/**
+ * xbc_node_is_array() - Test the node is an arraied value node
+ * @node: An XBC node.
+ *
+ * Test the @node is an arraied value node.
+ */
+static inline __init bool xbc_node_is_array(struct xbc_node *node)
+{
+ return xbc_node_is_value(node) && node->child != 0;
+}
+
+/**
+ * xbc_node_is_leaf() - Test the node is a leaf key node
+ * @node: An XBC node.
+ *
+ * Test the @node is a leaf key node which is a key node and has a value node
+ * or no child. Returns true if it is a leaf node, or false if not.
+ * Note that the leaf node can have subkey nodes in addition to the
+ * value node.
+ */
+static inline __init bool xbc_node_is_leaf(struct xbc_node *node)
+{
+ return xbc_node_is_key(node) &&
+ (!node->child || xbc_node_is_value(xbc_node_get_child(node)));
+}
+
+/* Tree-based key-value access APIs */
+struct xbc_node * __init xbc_node_find_subkey(struct xbc_node *parent,
+ const char *key);
+
+const char * __init xbc_node_find_value(struct xbc_node *parent,
+ const char *key,
+ struct xbc_node **vnode);
+
+struct xbc_node * __init xbc_node_find_next_leaf(struct xbc_node *root,
+ struct xbc_node *leaf);
+
+const char * __init xbc_node_find_next_key_value(struct xbc_node *root,
+ struct xbc_node **leaf);
+
+/**
+ * xbc_find_value() - Find a value which matches the key
+ * @key: Search key
+ * @vnode: A container pointer of XBC value node.
+ *
+ * Search a value whose key matches @key from whole of XBC tree and return
+ * the value if found. Found value node is stored in *@vnode.
+ * Note that this can return 0-length string and store NULL in *@vnode for
+ * key-only (non-value) entry.
+ */
+static inline const char * __init
+xbc_find_value(const char *key, struct xbc_node **vnode)
+{
+ return xbc_node_find_value(NULL, key, vnode);
+}
+
+/**
+ * xbc_find_node() - Find a node which matches the key
+ * @key: Search key
+ *
+ * Search a (key) node whose key matches @key from whole of XBC tree and
+ * return the node if found. If not found, returns NULL.
+ */
+static inline struct xbc_node * __init xbc_find_node(const char *key)
+{
+ return xbc_node_find_subkey(NULL, key);
+}
+
+/**
+ * xbc_node_get_subkey() - Return the first subkey node if exists
+ * @node: Parent node
+ *
+ * Return the first subkey node of the @node. If the @node has no child
+ * or only value node, this will return NULL.
+ */
+static inline struct xbc_node * __init xbc_node_get_subkey(struct xbc_node *node)
+{
+ struct xbc_node *child = xbc_node_get_child(node);
+
+ if (child && xbc_node_is_value(child))
+ return xbc_node_get_next(child);
+ else
+ return child;
+}
+
+/**
+ * xbc_array_for_each_value() - Iterate value nodes on an array
+ * @anode: An XBC arraied value node
+ * @value: A value
+ *
+ * Iterate array value nodes and values starts from @anode. This is expected to
+ * be used with xbc_find_value() and xbc_node_find_value(), so that user can
+ * process each array entry node.
+ */
+#define xbc_array_for_each_value(anode, value) \
+ for (value = xbc_node_get_data(anode); anode != NULL ; \
+ anode = xbc_node_get_child(anode), \
+ value = anode ? xbc_node_get_data(anode) : NULL)
+
+/**
+ * xbc_node_for_each_child() - Iterate child nodes
+ * @parent: An XBC node.
+ * @child: Iterated XBC node.
+ *
+ * Iterate child nodes of @parent. Each child nodes are stored to @child.
+ * The @child can be mixture of a value node and subkey nodes.
+ */
+#define xbc_node_for_each_child(parent, child) \
+ for (child = xbc_node_get_child(parent); child != NULL ; \
+ child = xbc_node_get_next(child))
+
+/**
+ * xbc_node_for_each_subkey() - Iterate child subkey nodes
+ * @parent: An XBC node.
+ * @child: Iterated XBC node.
+ *
+ * Iterate subkey nodes of @parent. Each child nodes are stored to @child.
+ * The @child is only the subkey node.
+ */
+#define xbc_node_for_each_subkey(parent, child) \
+ for (child = xbc_node_get_subkey(parent); child != NULL ; \
+ child = xbc_node_get_next(child))
+
+/**
+ * xbc_node_for_each_array_value() - Iterate array entries of geven key
+ * @node: An XBC node.
+ * @key: A key string searched under @node
+ * @anode: Iterated XBC node of array entry.
+ * @value: Iterated value of array entry.
+ *
+ * Iterate array entries of given @key under @node. Each array entry node
+ * is stored to @anode and @value. If the @node doesn't have @key node,
+ * it does nothing.
+ * Note that even if the found key node has only one value (not array)
+ * this executes block once. However, if the found key node has no value
+ * (key-only node), this does nothing. So don't use this for testing the
+ * key-value pair existence.
+ */
+#define xbc_node_for_each_array_value(node, key, anode, value) \
+ for (value = xbc_node_find_value(node, key, &anode); value != NULL; \
+ anode = xbc_node_get_child(anode), \
+ value = anode ? xbc_node_get_data(anode) : NULL)
+
+/**
+ * xbc_node_for_each_key_value() - Iterate key-value pairs under a node
+ * @node: An XBC node.
+ * @knode: Iterated key node
+ * @value: Iterated value string
+ *
+ * Iterate key-value pairs under @node. Each key node and value string are
+ * stored in @knode and @value respectively.
+ */
+#define xbc_node_for_each_key_value(node, knode, value) \
+ for (knode = NULL, value = xbc_node_find_next_key_value(node, &knode);\
+ knode != NULL; value = xbc_node_find_next_key_value(node, &knode))
+
+/**
+ * xbc_for_each_key_value() - Iterate key-value pairs
+ * @knode: Iterated key node
+ * @value: Iterated value string
+ *
+ * Iterate key-value pairs in whole XBC tree. Each key node and value string
+ * are stored in @knode and @value respectively.
+ */
+#define xbc_for_each_key_value(knode, value) \
+ xbc_node_for_each_key_value(NULL, knode, value)
+
+/* Compose partial key */
+int __init xbc_node_compose_key_after(struct xbc_node *root,
+ struct xbc_node *node, char *buf, size_t size);
+
+/**
+ * xbc_node_compose_key() - Compose full key string of the XBC node
+ * @node: An XBC node.
+ * @buf: A buffer to store the key.
+ * @size: The size of the @buf.
+ *
+ * Compose the full-length key of the @node into @buf. Returns the total
+ * length of the key stored in @buf. Or returns -EINVAL if @node is NULL,
+ * and -ERANGE if the key depth is deeper than max depth.
+ */
+static inline int __init xbc_node_compose_key(struct xbc_node *node,
+ char *buf, size_t size)
+{
+ return xbc_node_compose_key_after(NULL, node, buf, size);
+}
+
+/* XBC node initializer */
+int __init xbc_init(const char *buf, size_t size, const char **emsg, int *epos);
+
+/* XBC node and size information */
+int __init xbc_get_info(int *node_size, size_t *data_size);
+
+/* XBC cleanup data structures */
+void __init xbc_exit(void);
+
+/* XBC embedded bootconfig data in kernel */
+#ifdef CONFIG_BOOT_CONFIG_EMBED
+const char * __init xbc_get_embedded_bootconfig(size_t *size);
+#else
+static inline const char *xbc_get_embedded_bootconfig(size_t *size)
+{
+ return NULL;
+}
+#endif
+
+#endif
diff --git a/include/linux/bootmem.h b/include/linux/bootmem.h
deleted file mode 100644
index e223d91b6439..000000000000
--- a/include/linux/bootmem.h
+++ /dev/null
@@ -1,374 +0,0 @@
-/*
- * Discontiguous memory support, Kanoj Sarcar, SGI, Nov 1999
- */
-#ifndef _LINUX_BOOTMEM_H
-#define _LINUX_BOOTMEM_H
-
-#include <linux/mmzone.h>
-#include <linux/mm_types.h>
-#include <asm/dma.h>
-#include <asm/processor.h>
-
-/*
- * simple boot-time physical memory area allocator.
- */
-
-extern unsigned long max_low_pfn;
-extern unsigned long min_low_pfn;
-
-/*
- * highest page
- */
-extern unsigned long max_pfn;
-/*
- * highest possible page
- */
-extern unsigned long long max_possible_pfn;
-
-#ifndef CONFIG_NO_BOOTMEM
-/*
- * node_bootmem_map is a map pointer - the bits represent all physical
- * memory pages (including holes) on the node.
- */
-typedef struct bootmem_data {
- unsigned long node_min_pfn;
- unsigned long node_low_pfn;
- void *node_bootmem_map;
- unsigned long last_end_off;
- unsigned long hint_idx;
- struct list_head list;
-} bootmem_data_t;
-
-extern bootmem_data_t bootmem_node_data[];
-#endif
-
-extern unsigned long bootmem_bootmap_pages(unsigned long);
-
-extern unsigned long init_bootmem_node(pg_data_t *pgdat,
- unsigned long freepfn,
- unsigned long startpfn,
- unsigned long endpfn);
-extern unsigned long init_bootmem(unsigned long addr, unsigned long memend);
-
-extern unsigned long free_all_bootmem(void);
-extern void reset_node_managed_pages(pg_data_t *pgdat);
-extern void reset_all_zones_managed_pages(void);
-
-extern void free_bootmem_node(pg_data_t *pgdat,
- unsigned long addr,
- unsigned long size);
-extern void free_bootmem(unsigned long physaddr, unsigned long size);
-extern void free_bootmem_late(unsigned long physaddr, unsigned long size);
-
-/*
- * Flags for reserve_bootmem (also if CONFIG_HAVE_ARCH_BOOTMEM_NODE,
- * the architecture-specific code should honor this).
- *
- * If flags is BOOTMEM_DEFAULT, then the return value is always 0 (success).
- * If flags contains BOOTMEM_EXCLUSIVE, then -EBUSY is returned if the memory
- * already was reserved.
- */
-#define BOOTMEM_DEFAULT 0
-#define BOOTMEM_EXCLUSIVE (1<<0)
-
-extern int reserve_bootmem(unsigned long addr,
- unsigned long size,
- int flags);
-extern int reserve_bootmem_node(pg_data_t *pgdat,
- unsigned long physaddr,
- unsigned long size,
- int flags);
-
-extern void *__alloc_bootmem(unsigned long size,
- unsigned long align,
- unsigned long goal);
-extern void *__alloc_bootmem_nopanic(unsigned long size,
- unsigned long align,
- unsigned long goal) __malloc;
-extern void *__alloc_bootmem_node(pg_data_t *pgdat,
- unsigned long size,
- unsigned long align,
- unsigned long goal) __malloc;
-void *__alloc_bootmem_node_high(pg_data_t *pgdat,
- unsigned long size,
- unsigned long align,
- unsigned long goal) __malloc;
-extern void *__alloc_bootmem_node_nopanic(pg_data_t *pgdat,
- unsigned long size,
- unsigned long align,
- unsigned long goal) __malloc;
-void *___alloc_bootmem_node_nopanic(pg_data_t *pgdat,
- unsigned long size,
- unsigned long align,
- unsigned long goal,
- unsigned long limit) __malloc;
-extern void *__alloc_bootmem_low(unsigned long size,
- unsigned long align,
- unsigned long goal) __malloc;
-void *__alloc_bootmem_low_nopanic(unsigned long size,
- unsigned long align,
- unsigned long goal) __malloc;
-extern void *__alloc_bootmem_low_node(pg_data_t *pgdat,
- unsigned long size,
- unsigned long align,
- unsigned long goal) __malloc;
-
-#ifdef CONFIG_NO_BOOTMEM
-/* We are using top down, so it is safe to use 0 here */
-#define BOOTMEM_LOW_LIMIT 0
-#else
-#define BOOTMEM_LOW_LIMIT __pa(MAX_DMA_ADDRESS)
-#endif
-
-#ifndef ARCH_LOW_ADDRESS_LIMIT
-#define ARCH_LOW_ADDRESS_LIMIT 0xffffffffUL
-#endif
-
-#define alloc_bootmem(x) \
- __alloc_bootmem(x, SMP_CACHE_BYTES, BOOTMEM_LOW_LIMIT)
-#define alloc_bootmem_align(x, align) \
- __alloc_bootmem(x, align, BOOTMEM_LOW_LIMIT)
-#define alloc_bootmem_nopanic(x) \
- __alloc_bootmem_nopanic(x, SMP_CACHE_BYTES, BOOTMEM_LOW_LIMIT)
-#define alloc_bootmem_pages(x) \
- __alloc_bootmem(x, PAGE_SIZE, BOOTMEM_LOW_LIMIT)
-#define alloc_bootmem_pages_nopanic(x) \
- __alloc_bootmem_nopanic(x, PAGE_SIZE, BOOTMEM_LOW_LIMIT)
-#define alloc_bootmem_node(pgdat, x) \
- __alloc_bootmem_node(pgdat, x, SMP_CACHE_BYTES, BOOTMEM_LOW_LIMIT)
-#define alloc_bootmem_node_nopanic(pgdat, x) \
- __alloc_bootmem_node_nopanic(pgdat, x, SMP_CACHE_BYTES, BOOTMEM_LOW_LIMIT)
-#define alloc_bootmem_pages_node(pgdat, x) \
- __alloc_bootmem_node(pgdat, x, PAGE_SIZE, BOOTMEM_LOW_LIMIT)
-#define alloc_bootmem_pages_node_nopanic(pgdat, x) \
- __alloc_bootmem_node_nopanic(pgdat, x, PAGE_SIZE, BOOTMEM_LOW_LIMIT)
-
-#define alloc_bootmem_low(x) \
- __alloc_bootmem_low(x, SMP_CACHE_BYTES, 0)
-#define alloc_bootmem_low_pages_nopanic(x) \
- __alloc_bootmem_low_nopanic(x, PAGE_SIZE, 0)
-#define alloc_bootmem_low_pages(x) \
- __alloc_bootmem_low(x, PAGE_SIZE, 0)
-#define alloc_bootmem_low_pages_node(pgdat, x) \
- __alloc_bootmem_low_node(pgdat, x, PAGE_SIZE, 0)
-
-
-#if defined(CONFIG_HAVE_MEMBLOCK) && defined(CONFIG_NO_BOOTMEM)
-
-/* FIXME: use MEMBLOCK_ALLOC_* variants here */
-#define BOOTMEM_ALLOC_ACCESSIBLE 0
-#define BOOTMEM_ALLOC_ANYWHERE (~(phys_addr_t)0)
-
-/* FIXME: Move to memblock.h at a point where we remove nobootmem.c */
-void *memblock_virt_alloc_try_nid_nopanic(phys_addr_t size,
- phys_addr_t align, phys_addr_t min_addr,
- phys_addr_t max_addr, int nid);
-void *memblock_virt_alloc_try_nid(phys_addr_t size, phys_addr_t align,
- phys_addr_t min_addr, phys_addr_t max_addr, int nid);
-void __memblock_free_early(phys_addr_t base, phys_addr_t size);
-void __memblock_free_late(phys_addr_t base, phys_addr_t size);
-
-static inline void * __init memblock_virt_alloc(
- phys_addr_t size, phys_addr_t align)
-{
- return memblock_virt_alloc_try_nid(size, align, BOOTMEM_LOW_LIMIT,
- BOOTMEM_ALLOC_ACCESSIBLE,
- NUMA_NO_NODE);
-}
-
-static inline void * __init memblock_virt_alloc_nopanic(
- phys_addr_t size, phys_addr_t align)
-{
- return memblock_virt_alloc_try_nid_nopanic(size, align,
- BOOTMEM_LOW_LIMIT,
- BOOTMEM_ALLOC_ACCESSIBLE,
- NUMA_NO_NODE);
-}
-
-static inline void * __init memblock_virt_alloc_low(
- phys_addr_t size, phys_addr_t align)
-{
- return memblock_virt_alloc_try_nid(size, align,
- BOOTMEM_LOW_LIMIT,
- ARCH_LOW_ADDRESS_LIMIT,
- NUMA_NO_NODE);
-}
-static inline void * __init memblock_virt_alloc_low_nopanic(
- phys_addr_t size, phys_addr_t align)
-{
- return memblock_virt_alloc_try_nid_nopanic(size, align,
- BOOTMEM_LOW_LIMIT,
- ARCH_LOW_ADDRESS_LIMIT,
- NUMA_NO_NODE);
-}
-
-static inline void * __init memblock_virt_alloc_from_nopanic(
- phys_addr_t size, phys_addr_t align, phys_addr_t min_addr)
-{
- return memblock_virt_alloc_try_nid_nopanic(size, align, min_addr,
- BOOTMEM_ALLOC_ACCESSIBLE,
- NUMA_NO_NODE);
-}
-
-static inline void * __init memblock_virt_alloc_node(
- phys_addr_t size, int nid)
-{
- return memblock_virt_alloc_try_nid(size, 0, BOOTMEM_LOW_LIMIT,
- BOOTMEM_ALLOC_ACCESSIBLE, nid);
-}
-
-static inline void * __init memblock_virt_alloc_node_nopanic(
- phys_addr_t size, int nid)
-{
- return memblock_virt_alloc_try_nid_nopanic(size, 0, BOOTMEM_LOW_LIMIT,
- BOOTMEM_ALLOC_ACCESSIBLE,
- nid);
-}
-
-static inline void __init memblock_free_early(
- phys_addr_t base, phys_addr_t size)
-{
- __memblock_free_early(base, size);
-}
-
-static inline void __init memblock_free_early_nid(
- phys_addr_t base, phys_addr_t size, int nid)
-{
- __memblock_free_early(base, size);
-}
-
-static inline void __init memblock_free_late(
- phys_addr_t base, phys_addr_t size)
-{
- __memblock_free_late(base, size);
-}
-
-#else
-
-#define BOOTMEM_ALLOC_ACCESSIBLE 0
-
-
-/* Fall back to all the existing bootmem APIs */
-static inline void * __init memblock_virt_alloc(
- phys_addr_t size, phys_addr_t align)
-{
- if (!align)
- align = SMP_CACHE_BYTES;
- return __alloc_bootmem(size, align, BOOTMEM_LOW_LIMIT);
-}
-
-static inline void * __init memblock_virt_alloc_nopanic(
- phys_addr_t size, phys_addr_t align)
-{
- if (!align)
- align = SMP_CACHE_BYTES;
- return __alloc_bootmem_nopanic(size, align, BOOTMEM_LOW_LIMIT);
-}
-
-static inline void * __init memblock_virt_alloc_low(
- phys_addr_t size, phys_addr_t align)
-{
- if (!align)
- align = SMP_CACHE_BYTES;
- return __alloc_bootmem_low(size, align, 0);
-}
-
-static inline void * __init memblock_virt_alloc_low_nopanic(
- phys_addr_t size, phys_addr_t align)
-{
- if (!align)
- align = SMP_CACHE_BYTES;
- return __alloc_bootmem_low_nopanic(size, align, 0);
-}
-
-static inline void * __init memblock_virt_alloc_from_nopanic(
- phys_addr_t size, phys_addr_t align, phys_addr_t min_addr)
-{
- return __alloc_bootmem_nopanic(size, align, min_addr);
-}
-
-static inline void * __init memblock_virt_alloc_node(
- phys_addr_t size, int nid)
-{
- return __alloc_bootmem_node(NODE_DATA(nid), size, SMP_CACHE_BYTES,
- BOOTMEM_LOW_LIMIT);
-}
-
-static inline void * __init memblock_virt_alloc_node_nopanic(
- phys_addr_t size, int nid)
-{
- return __alloc_bootmem_node_nopanic(NODE_DATA(nid), size,
- SMP_CACHE_BYTES,
- BOOTMEM_LOW_LIMIT);
-}
-
-static inline void * __init memblock_virt_alloc_try_nid(phys_addr_t size,
- phys_addr_t align, phys_addr_t min_addr, phys_addr_t max_addr, int nid)
-{
- return __alloc_bootmem_node_high(NODE_DATA(nid), size, align,
- min_addr);
-}
-
-static inline void * __init memblock_virt_alloc_try_nid_nopanic(
- phys_addr_t size, phys_addr_t align,
- phys_addr_t min_addr, phys_addr_t max_addr, int nid)
-{
- return ___alloc_bootmem_node_nopanic(NODE_DATA(nid), size, align,
- min_addr, max_addr);
-}
-
-static inline void __init memblock_free_early(
- phys_addr_t base, phys_addr_t size)
-{
- free_bootmem(base, size);
-}
-
-static inline void __init memblock_free_early_nid(
- phys_addr_t base, phys_addr_t size, int nid)
-{
- free_bootmem_node(NODE_DATA(nid), base, size);
-}
-
-static inline void __init memblock_free_late(
- phys_addr_t base, phys_addr_t size)
-{
- free_bootmem_late(base, size);
-}
-#endif /* defined(CONFIG_HAVE_MEMBLOCK) && defined(CONFIG_NO_BOOTMEM) */
-
-#ifdef CONFIG_HAVE_ARCH_ALLOC_REMAP
-extern void *alloc_remap(int nid, unsigned long size);
-#else
-static inline void *alloc_remap(int nid, unsigned long size)
-{
- return NULL;
-}
-#endif /* CONFIG_HAVE_ARCH_ALLOC_REMAP */
-
-extern void *alloc_large_system_hash(const char *tablename,
- unsigned long bucketsize,
- unsigned long numentries,
- int scale,
- int flags,
- unsigned int *_hash_shift,
- unsigned int *_hash_mask,
- unsigned long low_limit,
- unsigned long high_limit);
-
-#define HASH_EARLY 0x00000001 /* Allocating during early boot? */
-#define HASH_SMALL 0x00000002 /* sub-page allocation allowed, min
- * shift passed via *_hash_shift */
-#define HASH_ZERO 0x00000004 /* Zero allocated hash table */
-
-/* Only NUMA needs hash distribution. 64bit NUMA architectures have
- * sufficient vmalloc space.
- */
-#ifdef CONFIG_NUMA
-#define HASHDIST_DEFAULT IS_ENABLED(CONFIG_64BIT)
-extern int hashdist; /* Distribute hashes across NUMA nodes? */
-#else
-#define hashdist (0)
-#endif
-
-
-#endif /* _LINUX_BOOTMEM_H */
diff --git a/include/linux/bootmem_info.h b/include/linux/bootmem_info.h
new file mode 100644
index 000000000000..cc35d010fa94
--- /dev/null
+++ b/include/linux/bootmem_info.h
@@ -0,0 +1,66 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __LINUX_BOOTMEM_INFO_H
+#define __LINUX_BOOTMEM_INFO_H
+
+#include <linux/mm.h>
+
+/*
+ * Types for free bootmem stored in page->lru.next. These have to be in
+ * some random range in unsigned long space for debugging purposes.
+ */
+enum {
+ MEMORY_HOTPLUG_MIN_BOOTMEM_TYPE = 12,
+ SECTION_INFO = MEMORY_HOTPLUG_MIN_BOOTMEM_TYPE,
+ MIX_SECTION_INFO,
+ NODE_INFO,
+ MEMORY_HOTPLUG_MAX_BOOTMEM_TYPE = NODE_INFO,
+};
+
+#ifdef CONFIG_HAVE_BOOTMEM_INFO_NODE
+void __init register_page_bootmem_info_node(struct pglist_data *pgdat);
+
+void get_page_bootmem(unsigned long info, struct page *page,
+ unsigned long type);
+void put_page_bootmem(struct page *page);
+
+/*
+ * Any memory allocated via the memblock allocator and not via the
+ * buddy will be marked reserved already in the memmap. For those
+ * pages, we can call this function to free it to buddy allocator.
+ */
+static inline void free_bootmem_page(struct page *page)
+{
+ unsigned long magic = page->index;
+
+ /*
+ * The reserve_bootmem_region sets the reserved flag on bootmem
+ * pages.
+ */
+ VM_BUG_ON_PAGE(page_ref_count(page) != 2, page);
+
+ if (magic == SECTION_INFO || magic == MIX_SECTION_INFO)
+ put_page_bootmem(page);
+ else
+ VM_BUG_ON_PAGE(1, page);
+}
+#else
+static inline void register_page_bootmem_info_node(struct pglist_data *pgdat)
+{
+}
+
+static inline void put_page_bootmem(struct page *page)
+{
+}
+
+static inline void get_page_bootmem(unsigned long info, struct page *page,
+ unsigned long type)
+{
+}
+
+static inline void free_bootmem_page(struct page *page)
+{
+ free_reserved_page(page);
+}
+#endif
+
+#endif /* __LINUX_BOOTMEM_INFO_H */
diff --git a/include/linux/bottom_half.h b/include/linux/bottom_half.h
index 8fdcb783197d..fc53e0ad56d9 100644
--- a/include/linux/bottom_half.h
+++ b/include/linux/bottom_half.h
@@ -1,9 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_BH_H
#define _LINUX_BH_H
+#include <linux/instruction_pointer.h>
#include <linux/preempt.h>
-#ifdef CONFIG_TRACE_IRQFLAGS
+#if defined(CONFIG_PREEMPT_RT) || defined(CONFIG_TRACE_IRQFLAGS)
extern void __local_bh_disable_ip(unsigned long ip, unsigned int cnt);
#else
static __always_inline void __local_bh_disable_ip(unsigned long ip, unsigned int cnt)
@@ -31,4 +33,10 @@ static inline void local_bh_enable(void)
__local_bh_enable_ip(_THIS_IP_, SOFTIRQ_DISABLE_OFFSET);
}
+#ifdef CONFIG_PREEMPT_RT
+extern bool local_bh_blocked(void);
+#else
+static inline bool local_bh_blocked(void) { return false; }
+#endif
+
#endif /* _LINUX_BH_H */
diff --git a/include/linux/bpf-cgroup-defs.h b/include/linux/bpf-cgroup-defs.h
new file mode 100644
index 000000000000..7b121bd780eb
--- /dev/null
+++ b/include/linux/bpf-cgroup-defs.h
@@ -0,0 +1,79 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _BPF_CGROUP_DEFS_H
+#define _BPF_CGROUP_DEFS_H
+
+#ifdef CONFIG_CGROUP_BPF
+
+#include <linux/list.h>
+#include <linux/percpu-refcount.h>
+#include <linux/workqueue.h>
+
+struct bpf_prog_array;
+
+#ifdef CONFIG_BPF_LSM
+/* Maximum number of concurrently attachable per-cgroup LSM hooks. */
+#define CGROUP_LSM_NUM 10
+#else
+#define CGROUP_LSM_NUM 0
+#endif
+
+enum cgroup_bpf_attach_type {
+ CGROUP_BPF_ATTACH_TYPE_INVALID = -1,
+ CGROUP_INET_INGRESS = 0,
+ CGROUP_INET_EGRESS,
+ CGROUP_INET_SOCK_CREATE,
+ CGROUP_SOCK_OPS,
+ CGROUP_DEVICE,
+ CGROUP_INET4_BIND,
+ CGROUP_INET6_BIND,
+ CGROUP_INET4_CONNECT,
+ CGROUP_INET6_CONNECT,
+ CGROUP_INET4_POST_BIND,
+ CGROUP_INET6_POST_BIND,
+ CGROUP_UDP4_SENDMSG,
+ CGROUP_UDP6_SENDMSG,
+ CGROUP_SYSCTL,
+ CGROUP_UDP4_RECVMSG,
+ CGROUP_UDP6_RECVMSG,
+ CGROUP_GETSOCKOPT,
+ CGROUP_SETSOCKOPT,
+ CGROUP_INET4_GETPEERNAME,
+ CGROUP_INET6_GETPEERNAME,
+ CGROUP_INET4_GETSOCKNAME,
+ CGROUP_INET6_GETSOCKNAME,
+ CGROUP_INET_SOCK_RELEASE,
+ CGROUP_LSM_START,
+ CGROUP_LSM_END = CGROUP_LSM_START + CGROUP_LSM_NUM - 1,
+ MAX_CGROUP_BPF_ATTACH_TYPE
+};
+
+struct cgroup_bpf {
+ /* array of effective progs in this cgroup */
+ struct bpf_prog_array __rcu *effective[MAX_CGROUP_BPF_ATTACH_TYPE];
+
+ /* attached progs to this cgroup and attach flags
+ * when flags == 0 or BPF_F_ALLOW_OVERRIDE the progs list will
+ * have either zero or one element
+ * when BPF_F_ALLOW_MULTI the list can have up to BPF_CGROUP_MAX_PROGS
+ */
+ struct hlist_head progs[MAX_CGROUP_BPF_ATTACH_TYPE];
+ u8 flags[MAX_CGROUP_BPF_ATTACH_TYPE];
+
+ /* list of cgroup shared storages */
+ struct list_head storages;
+
+ /* temp storage for effective prog array used by prog_attach/detach */
+ struct bpf_prog_array *inactive;
+
+ /* reference counter used to detach bpf programs after cgroup removal */
+ struct percpu_ref refcnt;
+
+ /* cgroup_bpf is released using a work queue */
+ struct work_struct release_work;
+};
+
+#else /* CONFIG_CGROUP_BPF */
+struct cgroup_bpf {};
+#endif /* CONFIG_CGROUP_BPF */
+
+#endif
diff --git a/include/linux/bpf-cgroup.h b/include/linux/bpf-cgroup.h
index 360c082e885c..57e9e109257e 100644
--- a/include/linux/bpf-cgroup.h
+++ b/include/linux/bpf-cgroup.h
@@ -1,59 +1,197 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _BPF_CGROUP_H
#define _BPF_CGROUP_H
+#include <linux/bpf.h>
+#include <linux/bpf-cgroup-defs.h>
+#include <linux/errno.h>
#include <linux/jump_label.h>
+#include <linux/percpu.h>
+#include <linux/rbtree.h>
+#include <net/sock.h>
#include <uapi/linux/bpf.h>
struct sock;
+struct sockaddr;
struct cgroup;
struct sk_buff;
+struct bpf_map;
+struct bpf_prog;
struct bpf_sock_ops_kern;
+struct bpf_cgroup_storage;
+struct ctl_table;
+struct ctl_table_header;
+struct task_struct;
+
+unsigned int __cgroup_bpf_run_lsm_sock(const void *ctx,
+ const struct bpf_insn *insn);
+unsigned int __cgroup_bpf_run_lsm_socket(const void *ctx,
+ const struct bpf_insn *insn);
+unsigned int __cgroup_bpf_run_lsm_current(const void *ctx,
+ const struct bpf_insn *insn);
#ifdef CONFIG_CGROUP_BPF
-extern struct static_key_false cgroup_bpf_enabled_key;
-#define cgroup_bpf_enabled static_branch_unlikely(&cgroup_bpf_enabled_key)
-
-struct cgroup_bpf {
- /*
- * Store two sets of bpf_prog pointers, one for programs that are
- * pinned directly to this cgroup, and one for those that are effective
- * when this cgroup is accessed.
- */
- struct bpf_prog *prog[MAX_BPF_ATTACH_TYPE];
- struct bpf_prog __rcu *effective[MAX_BPF_ATTACH_TYPE];
- bool disallow_override[MAX_BPF_ATTACH_TYPE];
+#define CGROUP_ATYPE(type) \
+ case BPF_##type: return type
+
+static inline enum cgroup_bpf_attach_type
+to_cgroup_bpf_attach_type(enum bpf_attach_type attach_type)
+{
+ switch (attach_type) {
+ CGROUP_ATYPE(CGROUP_INET_INGRESS);
+ CGROUP_ATYPE(CGROUP_INET_EGRESS);
+ CGROUP_ATYPE(CGROUP_INET_SOCK_CREATE);
+ CGROUP_ATYPE(CGROUP_SOCK_OPS);
+ CGROUP_ATYPE(CGROUP_DEVICE);
+ CGROUP_ATYPE(CGROUP_INET4_BIND);
+ CGROUP_ATYPE(CGROUP_INET6_BIND);
+ CGROUP_ATYPE(CGROUP_INET4_CONNECT);
+ CGROUP_ATYPE(CGROUP_INET6_CONNECT);
+ CGROUP_ATYPE(CGROUP_INET4_POST_BIND);
+ CGROUP_ATYPE(CGROUP_INET6_POST_BIND);
+ CGROUP_ATYPE(CGROUP_UDP4_SENDMSG);
+ CGROUP_ATYPE(CGROUP_UDP6_SENDMSG);
+ CGROUP_ATYPE(CGROUP_SYSCTL);
+ CGROUP_ATYPE(CGROUP_UDP4_RECVMSG);
+ CGROUP_ATYPE(CGROUP_UDP6_RECVMSG);
+ CGROUP_ATYPE(CGROUP_GETSOCKOPT);
+ CGROUP_ATYPE(CGROUP_SETSOCKOPT);
+ CGROUP_ATYPE(CGROUP_INET4_GETPEERNAME);
+ CGROUP_ATYPE(CGROUP_INET6_GETPEERNAME);
+ CGROUP_ATYPE(CGROUP_INET4_GETSOCKNAME);
+ CGROUP_ATYPE(CGROUP_INET6_GETSOCKNAME);
+ CGROUP_ATYPE(CGROUP_INET_SOCK_RELEASE);
+ default:
+ return CGROUP_BPF_ATTACH_TYPE_INVALID;
+ }
+}
+
+#undef CGROUP_ATYPE
+
+extern struct static_key_false cgroup_bpf_enabled_key[MAX_CGROUP_BPF_ATTACH_TYPE];
+#define cgroup_bpf_enabled(atype) static_branch_unlikely(&cgroup_bpf_enabled_key[atype])
+
+#define for_each_cgroup_storage_type(stype) \
+ for (stype = 0; stype < MAX_BPF_CGROUP_STORAGE_TYPE; stype++)
+
+struct bpf_cgroup_storage_map;
+
+struct bpf_storage_buffer {
+ struct rcu_head rcu;
+ char data[];
+};
+
+struct bpf_cgroup_storage {
+ union {
+ struct bpf_storage_buffer *buf;
+ void __percpu *percpu_buf;
+ };
+ struct bpf_cgroup_storage_map *map;
+ struct bpf_cgroup_storage_key key;
+ struct list_head list_map;
+ struct list_head list_cg;
+ struct rb_node node;
+ struct rcu_head rcu;
};
-void cgroup_bpf_put(struct cgroup *cgrp);
-void cgroup_bpf_inherit(struct cgroup *cgrp, struct cgroup *parent);
+struct bpf_cgroup_link {
+ struct bpf_link link;
+ struct cgroup *cgroup;
+ enum bpf_attach_type type;
+};
-int __cgroup_bpf_update(struct cgroup *cgrp, struct cgroup *parent,
- struct bpf_prog *prog, enum bpf_attach_type type,
- bool overridable);
+struct bpf_prog_list {
+ struct hlist_node node;
+ struct bpf_prog *prog;
+ struct bpf_cgroup_link *link;
+ struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE];
+};
-/* Wrapper for __cgroup_bpf_update() protected by cgroup_mutex */
-int cgroup_bpf_update(struct cgroup *cgrp, struct bpf_prog *prog,
- enum bpf_attach_type type, bool overridable);
+int cgroup_bpf_inherit(struct cgroup *cgrp);
+void cgroup_bpf_offline(struct cgroup *cgrp);
int __cgroup_bpf_run_filter_skb(struct sock *sk,
struct sk_buff *skb,
- enum bpf_attach_type type);
+ enum cgroup_bpf_attach_type atype);
int __cgroup_bpf_run_filter_sk(struct sock *sk,
- enum bpf_attach_type type);
+ enum cgroup_bpf_attach_type atype);
+
+int __cgroup_bpf_run_filter_sock_addr(struct sock *sk,
+ struct sockaddr *uaddr,
+ enum cgroup_bpf_attach_type atype,
+ void *t_ctx,
+ u32 *flags);
int __cgroup_bpf_run_filter_sock_ops(struct sock *sk,
struct bpf_sock_ops_kern *sock_ops,
- enum bpf_attach_type type);
+ enum cgroup_bpf_attach_type atype);
+
+int __cgroup_bpf_check_dev_permission(short dev_type, u32 major, u32 minor,
+ short access, enum cgroup_bpf_attach_type atype);
+
+int __cgroup_bpf_run_filter_sysctl(struct ctl_table_header *head,
+ struct ctl_table *table, int write,
+ char **buf, size_t *pcount, loff_t *ppos,
+ enum cgroup_bpf_attach_type atype);
+
+int __cgroup_bpf_run_filter_setsockopt(struct sock *sock, int *level,
+ int *optname, char __user *optval,
+ int *optlen, char **kernel_optval);
+int __cgroup_bpf_run_filter_getsockopt(struct sock *sk, int level,
+ int optname, char __user *optval,
+ int __user *optlen, int max_optlen,
+ int retval);
+
+int __cgroup_bpf_run_filter_getsockopt_kern(struct sock *sk, int level,
+ int optname, void *optval,
+ int *optlen, int retval);
+
+static inline enum bpf_cgroup_storage_type cgroup_storage_type(
+ struct bpf_map *map)
+{
+ if (map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE)
+ return BPF_CGROUP_STORAGE_PERCPU;
+
+ return BPF_CGROUP_STORAGE_SHARED;
+}
+
+struct bpf_cgroup_storage *
+cgroup_storage_lookup(struct bpf_cgroup_storage_map *map,
+ void *key, bool locked);
+struct bpf_cgroup_storage *bpf_cgroup_storage_alloc(struct bpf_prog *prog,
+ enum bpf_cgroup_storage_type stype);
+void bpf_cgroup_storage_free(struct bpf_cgroup_storage *storage);
+void bpf_cgroup_storage_link(struct bpf_cgroup_storage *storage,
+ struct cgroup *cgroup,
+ enum bpf_attach_type type);
+void bpf_cgroup_storage_unlink(struct bpf_cgroup_storage *storage);
+int bpf_cgroup_storage_assign(struct bpf_prog_aux *aux, struct bpf_map *map);
+
+int bpf_percpu_cgroup_storage_copy(struct bpf_map *map, void *key, void *value);
+int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key,
+ void *value, u64 flags);
+
+/* Opportunistic check to see whether we have any BPF program attached*/
+static inline bool cgroup_bpf_sock_enabled(struct sock *sk,
+ enum cgroup_bpf_attach_type type)
+{
+ struct cgroup *cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
+ struct bpf_prog_array *array;
+
+ array = rcu_access_pointer(cgrp->bpf.effective[type]);
+ return array != &bpf_empty_prog_array.hdr;
+}
/* Wrappers for __cgroup_bpf_run_filter_skb() guarded by cgroup_bpf_enabled. */
#define BPF_CGROUP_RUN_PROG_INET_INGRESS(sk, skb) \
({ \
int __ret = 0; \
- if (cgroup_bpf_enabled) \
+ if (cgroup_bpf_enabled(CGROUP_INET_INGRESS) && \
+ cgroup_bpf_sock_enabled(sk, CGROUP_INET_INGRESS)) \
__ret = __cgroup_bpf_run_filter_skb(sk, skb, \
- BPF_CGROUP_INET_INGRESS); \
+ CGROUP_INET_INGRESS); \
\
__ret; \
})
@@ -61,48 +199,314 @@ int __cgroup_bpf_run_filter_sock_ops(struct sock *sk,
#define BPF_CGROUP_RUN_PROG_INET_EGRESS(sk, skb) \
({ \
int __ret = 0; \
- if (cgroup_bpf_enabled && sk && sk == skb->sk) { \
+ if (cgroup_bpf_enabled(CGROUP_INET_EGRESS) && sk && sk == skb->sk) { \
typeof(sk) __sk = sk_to_full_sk(sk); \
- if (sk_fullsock(__sk)) \
+ if (sk_fullsock(__sk) && \
+ cgroup_bpf_sock_enabled(__sk, CGROUP_INET_EGRESS)) \
__ret = __cgroup_bpf_run_filter_skb(__sk, skb, \
- BPF_CGROUP_INET_EGRESS); \
+ CGROUP_INET_EGRESS); \
+ } \
+ __ret; \
+})
+
+#define BPF_CGROUP_RUN_SK_PROG(sk, atype) \
+({ \
+ int __ret = 0; \
+ if (cgroup_bpf_enabled(atype)) { \
+ __ret = __cgroup_bpf_run_filter_sk(sk, atype); \
} \
__ret; \
})
#define BPF_CGROUP_RUN_PROG_INET_SOCK(sk) \
+ BPF_CGROUP_RUN_SK_PROG(sk, CGROUP_INET_SOCK_CREATE)
+
+#define BPF_CGROUP_RUN_PROG_INET_SOCK_RELEASE(sk) \
+ BPF_CGROUP_RUN_SK_PROG(sk, CGROUP_INET_SOCK_RELEASE)
+
+#define BPF_CGROUP_RUN_PROG_INET4_POST_BIND(sk) \
+ BPF_CGROUP_RUN_SK_PROG(sk, CGROUP_INET4_POST_BIND)
+
+#define BPF_CGROUP_RUN_PROG_INET6_POST_BIND(sk) \
+ BPF_CGROUP_RUN_SK_PROG(sk, CGROUP_INET6_POST_BIND)
+
+#define BPF_CGROUP_RUN_SA_PROG(sk, uaddr, atype) \
({ \
int __ret = 0; \
- if (cgroup_bpf_enabled && sk) { \
- __ret = __cgroup_bpf_run_filter_sk(sk, \
- BPF_CGROUP_INET_SOCK_CREATE); \
+ if (cgroup_bpf_enabled(atype)) \
+ __ret = __cgroup_bpf_run_filter_sock_addr(sk, uaddr, atype, \
+ NULL, NULL); \
+ __ret; \
+})
+
+#define BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, atype, t_ctx) \
+({ \
+ int __ret = 0; \
+ if (cgroup_bpf_enabled(atype)) { \
+ lock_sock(sk); \
+ __ret = __cgroup_bpf_run_filter_sock_addr(sk, uaddr, atype, \
+ t_ctx, NULL); \
+ release_sock(sk); \
} \
__ret; \
})
+/* BPF_CGROUP_INET4_BIND and BPF_CGROUP_INET6_BIND can return extra flags
+ * via upper bits of return code. The only flag that is supported
+ * (at bit position 0) is to indicate CAP_NET_BIND_SERVICE capability check
+ * should be bypassed (BPF_RET_BIND_NO_CAP_NET_BIND_SERVICE).
+ */
+#define BPF_CGROUP_RUN_PROG_INET_BIND_LOCK(sk, uaddr, atype, bind_flags) \
+({ \
+ u32 __flags = 0; \
+ int __ret = 0; \
+ if (cgroup_bpf_enabled(atype)) { \
+ lock_sock(sk); \
+ __ret = __cgroup_bpf_run_filter_sock_addr(sk, uaddr, atype, \
+ NULL, &__flags); \
+ release_sock(sk); \
+ if (__flags & BPF_RET_BIND_NO_CAP_NET_BIND_SERVICE) \
+ *bind_flags |= BIND_NO_CAP_NET_BIND_SERVICE; \
+ } \
+ __ret; \
+})
+
+#define BPF_CGROUP_PRE_CONNECT_ENABLED(sk) \
+ ((cgroup_bpf_enabled(CGROUP_INET4_CONNECT) || \
+ cgroup_bpf_enabled(CGROUP_INET6_CONNECT)) && \
+ (sk)->sk_prot->pre_connect)
+
+#define BPF_CGROUP_RUN_PROG_INET4_CONNECT(sk, uaddr) \
+ BPF_CGROUP_RUN_SA_PROG(sk, uaddr, CGROUP_INET4_CONNECT)
+
+#define BPF_CGROUP_RUN_PROG_INET6_CONNECT(sk, uaddr) \
+ BPF_CGROUP_RUN_SA_PROG(sk, uaddr, CGROUP_INET6_CONNECT)
+
+#define BPF_CGROUP_RUN_PROG_INET4_CONNECT_LOCK(sk, uaddr) \
+ BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, CGROUP_INET4_CONNECT, NULL)
+
+#define BPF_CGROUP_RUN_PROG_INET6_CONNECT_LOCK(sk, uaddr) \
+ BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, CGROUP_INET6_CONNECT, NULL)
+
+#define BPF_CGROUP_RUN_PROG_UDP4_SENDMSG_LOCK(sk, uaddr, t_ctx) \
+ BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, CGROUP_UDP4_SENDMSG, t_ctx)
+
+#define BPF_CGROUP_RUN_PROG_UDP6_SENDMSG_LOCK(sk, uaddr, t_ctx) \
+ BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, CGROUP_UDP6_SENDMSG, t_ctx)
+
+#define BPF_CGROUP_RUN_PROG_UDP4_RECVMSG_LOCK(sk, uaddr) \
+ BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, CGROUP_UDP4_RECVMSG, NULL)
+
+#define BPF_CGROUP_RUN_PROG_UDP6_RECVMSG_LOCK(sk, uaddr) \
+ BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, CGROUP_UDP6_RECVMSG, NULL)
+
+/* The SOCK_OPS"_SK" macro should be used when sock_ops->sk is not a
+ * fullsock and its parent fullsock cannot be traced by
+ * sk_to_full_sk().
+ *
+ * e.g. sock_ops->sk is a request_sock and it is under syncookie mode.
+ * Its listener-sk is not attached to the rsk_listener.
+ * In this case, the caller holds the listener-sk (unlocked),
+ * set its sock_ops->sk to req_sk, and call this SOCK_OPS"_SK" with
+ * the listener-sk such that the cgroup-bpf-progs of the
+ * listener-sk will be run.
+ *
+ * Regardless of syncookie mode or not,
+ * calling bpf_setsockopt on listener-sk will not make sense anyway,
+ * so passing 'sock_ops->sk == req_sk' to the bpf prog is appropriate here.
+ */
+#define BPF_CGROUP_RUN_PROG_SOCK_OPS_SK(sock_ops, sk) \
+({ \
+ int __ret = 0; \
+ if (cgroup_bpf_enabled(CGROUP_SOCK_OPS)) \
+ __ret = __cgroup_bpf_run_filter_sock_ops(sk, \
+ sock_ops, \
+ CGROUP_SOCK_OPS); \
+ __ret; \
+})
+
#define BPF_CGROUP_RUN_PROG_SOCK_OPS(sock_ops) \
({ \
int __ret = 0; \
- if (cgroup_bpf_enabled && (sock_ops)->sk) { \
+ if (cgroup_bpf_enabled(CGROUP_SOCK_OPS) && (sock_ops)->sk) { \
typeof(sk) __sk = sk_to_full_sk((sock_ops)->sk); \
- if (sk_fullsock(__sk)) \
+ if (__sk && sk_fullsock(__sk)) \
__ret = __cgroup_bpf_run_filter_sock_ops(__sk, \
sock_ops, \
- BPF_CGROUP_SOCK_OPS); \
+ CGROUP_SOCK_OPS); \
} \
__ret; \
})
+
+#define BPF_CGROUP_RUN_PROG_DEVICE_CGROUP(atype, major, minor, access) \
+({ \
+ int __ret = 0; \
+ if (cgroup_bpf_enabled(CGROUP_DEVICE)) \
+ __ret = __cgroup_bpf_check_dev_permission(atype, major, minor, \
+ access, \
+ CGROUP_DEVICE); \
+ \
+ __ret; \
+})
+
+
+#define BPF_CGROUP_RUN_PROG_SYSCTL(head, table, write, buf, count, pos) \
+({ \
+ int __ret = 0; \
+ if (cgroup_bpf_enabled(CGROUP_SYSCTL)) \
+ __ret = __cgroup_bpf_run_filter_sysctl(head, table, write, \
+ buf, count, pos, \
+ CGROUP_SYSCTL); \
+ __ret; \
+})
+
+#define BPF_CGROUP_RUN_PROG_SETSOCKOPT(sock, level, optname, optval, optlen, \
+ kernel_optval) \
+({ \
+ int __ret = 0; \
+ if (cgroup_bpf_enabled(CGROUP_SETSOCKOPT) && \
+ cgroup_bpf_sock_enabled(sock, CGROUP_SETSOCKOPT)) \
+ __ret = __cgroup_bpf_run_filter_setsockopt(sock, level, \
+ optname, optval, \
+ optlen, \
+ kernel_optval); \
+ __ret; \
+})
+
+#define BPF_CGROUP_GETSOCKOPT_MAX_OPTLEN(optlen) \
+({ \
+ int __ret = 0; \
+ if (cgroup_bpf_enabled(CGROUP_GETSOCKOPT)) \
+ get_user(__ret, optlen); \
+ __ret; \
+})
+
+#define BPF_CGROUP_RUN_PROG_GETSOCKOPT(sock, level, optname, optval, optlen, \
+ max_optlen, retval) \
+({ \
+ int __ret = retval; \
+ if (cgroup_bpf_enabled(CGROUP_GETSOCKOPT) && \
+ cgroup_bpf_sock_enabled(sock, CGROUP_GETSOCKOPT)) \
+ if (!(sock)->sk_prot->bpf_bypass_getsockopt || \
+ !INDIRECT_CALL_INET_1((sock)->sk_prot->bpf_bypass_getsockopt, \
+ tcp_bpf_bypass_getsockopt, \
+ level, optname)) \
+ __ret = __cgroup_bpf_run_filter_getsockopt( \
+ sock, level, optname, optval, optlen, \
+ max_optlen, retval); \
+ __ret; \
+})
+
+#define BPF_CGROUP_RUN_PROG_GETSOCKOPT_KERN(sock, level, optname, optval, \
+ optlen, retval) \
+({ \
+ int __ret = retval; \
+ if (cgroup_bpf_enabled(CGROUP_GETSOCKOPT)) \
+ __ret = __cgroup_bpf_run_filter_getsockopt_kern( \
+ sock, level, optname, optval, optlen, retval); \
+ __ret; \
+})
+
+int cgroup_bpf_prog_attach(const union bpf_attr *attr,
+ enum bpf_prog_type ptype, struct bpf_prog *prog);
+int cgroup_bpf_prog_detach(const union bpf_attr *attr,
+ enum bpf_prog_type ptype);
+int cgroup_bpf_link_attach(const union bpf_attr *attr, struct bpf_prog *prog);
+int cgroup_bpf_prog_query(const union bpf_attr *attr,
+ union bpf_attr __user *uattr);
+
+const struct bpf_func_proto *
+cgroup_common_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog);
+const struct bpf_func_proto *
+cgroup_current_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog);
#else
-struct cgroup_bpf {};
-static inline void cgroup_bpf_put(struct cgroup *cgrp) {}
-static inline void cgroup_bpf_inherit(struct cgroup *cgrp,
- struct cgroup *parent) {}
+static inline int cgroup_bpf_inherit(struct cgroup *cgrp) { return 0; }
+static inline void cgroup_bpf_offline(struct cgroup *cgrp) {}
+
+static inline int cgroup_bpf_prog_attach(const union bpf_attr *attr,
+ enum bpf_prog_type ptype,
+ struct bpf_prog *prog)
+{
+ return -EINVAL;
+}
+
+static inline int cgroup_bpf_prog_detach(const union bpf_attr *attr,
+ enum bpf_prog_type ptype)
+{
+ return -EINVAL;
+}
+
+static inline int cgroup_bpf_link_attach(const union bpf_attr *attr,
+ struct bpf_prog *prog)
+{
+ return -EINVAL;
+}
+static inline int cgroup_bpf_prog_query(const union bpf_attr *attr,
+ union bpf_attr __user *uattr)
+{
+ return -EINVAL;
+}
+
+static inline const struct bpf_func_proto *
+cgroup_common_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
+{
+ return NULL;
+}
+
+static inline const struct bpf_func_proto *
+cgroup_current_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
+{
+ return NULL;
+}
+
+static inline int bpf_cgroup_storage_assign(struct bpf_prog_aux *aux,
+ struct bpf_map *map) { return 0; }
+static inline struct bpf_cgroup_storage *bpf_cgroup_storage_alloc(
+ struct bpf_prog *prog, enum bpf_cgroup_storage_type stype) { return NULL; }
+static inline void bpf_cgroup_storage_free(
+ struct bpf_cgroup_storage *storage) {}
+static inline int bpf_percpu_cgroup_storage_copy(struct bpf_map *map, void *key,
+ void *value) {
+ return 0;
+}
+static inline int bpf_percpu_cgroup_storage_update(struct bpf_map *map,
+ void *key, void *value, u64 flags) {
+ return 0;
+}
+
+#define cgroup_bpf_enabled(atype) (0)
+#define BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, atype, t_ctx) ({ 0; })
+#define BPF_CGROUP_RUN_SA_PROG(sk, uaddr, atype) ({ 0; })
+#define BPF_CGROUP_PRE_CONNECT_ENABLED(sk) (0)
#define BPF_CGROUP_RUN_PROG_INET_INGRESS(sk,skb) ({ 0; })
#define BPF_CGROUP_RUN_PROG_INET_EGRESS(sk,skb) ({ 0; })
#define BPF_CGROUP_RUN_PROG_INET_SOCK(sk) ({ 0; })
+#define BPF_CGROUP_RUN_PROG_INET_SOCK_RELEASE(sk) ({ 0; })
+#define BPF_CGROUP_RUN_PROG_INET_BIND_LOCK(sk, uaddr, atype, flags) ({ 0; })
+#define BPF_CGROUP_RUN_PROG_INET4_POST_BIND(sk) ({ 0; })
+#define BPF_CGROUP_RUN_PROG_INET6_POST_BIND(sk) ({ 0; })
+#define BPF_CGROUP_RUN_PROG_INET4_CONNECT(sk, uaddr) ({ 0; })
+#define BPF_CGROUP_RUN_PROG_INET4_CONNECT_LOCK(sk, uaddr) ({ 0; })
+#define BPF_CGROUP_RUN_PROG_INET6_CONNECT(sk, uaddr) ({ 0; })
+#define BPF_CGROUP_RUN_PROG_INET6_CONNECT_LOCK(sk, uaddr) ({ 0; })
+#define BPF_CGROUP_RUN_PROG_UDP4_SENDMSG_LOCK(sk, uaddr, t_ctx) ({ 0; })
+#define BPF_CGROUP_RUN_PROG_UDP6_SENDMSG_LOCK(sk, uaddr, t_ctx) ({ 0; })
+#define BPF_CGROUP_RUN_PROG_UDP4_RECVMSG_LOCK(sk, uaddr) ({ 0; })
+#define BPF_CGROUP_RUN_PROG_UDP6_RECVMSG_LOCK(sk, uaddr) ({ 0; })
#define BPF_CGROUP_RUN_PROG_SOCK_OPS(sock_ops) ({ 0; })
+#define BPF_CGROUP_RUN_PROG_DEVICE_CGROUP(atype, major, minor, access) ({ 0; })
+#define BPF_CGROUP_RUN_PROG_SYSCTL(head,table,write,buf,count,pos) ({ 0; })
+#define BPF_CGROUP_GETSOCKOPT_MAX_OPTLEN(optlen) ({ 0; })
+#define BPF_CGROUP_RUN_PROG_GETSOCKOPT(sock, level, optname, optval, \
+ optlen, max_optlen, retval) ({ retval; })
+#define BPF_CGROUP_RUN_PROG_GETSOCKOPT_KERN(sock, level, optname, optval, \
+ optlen, retval) ({ retval; })
+#define BPF_CGROUP_RUN_PROG_SETSOCKOPT(sock, level, optname, optval, optlen, \
+ kernel_optval) ({ 0; })
+
+#define for_each_cgroup_storage_type(stype) for (; false; )
#endif /* CONFIG_CGROUP_BPF */
diff --git a/include/linux/bpf-netns.h b/include/linux/bpf-netns.h
new file mode 100644
index 000000000000..413cfa5e4b07
--- /dev/null
+++ b/include/linux/bpf-netns.h
@@ -0,0 +1,62 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _BPF_NETNS_H
+#define _BPF_NETNS_H
+
+#include <linux/mutex.h>
+#include <net/netns/bpf.h>
+#include <uapi/linux/bpf.h>
+
+static inline enum netns_bpf_attach_type
+to_netns_bpf_attach_type(enum bpf_attach_type attach_type)
+{
+ switch (attach_type) {
+ case BPF_FLOW_DISSECTOR:
+ return NETNS_BPF_FLOW_DISSECTOR;
+ case BPF_SK_LOOKUP:
+ return NETNS_BPF_SK_LOOKUP;
+ default:
+ return NETNS_BPF_INVALID;
+ }
+}
+
+/* Protects updates to netns_bpf */
+extern struct mutex netns_bpf_mutex;
+
+union bpf_attr;
+struct bpf_prog;
+
+#ifdef CONFIG_NET
+int netns_bpf_prog_query(const union bpf_attr *attr,
+ union bpf_attr __user *uattr);
+int netns_bpf_prog_attach(const union bpf_attr *attr,
+ struct bpf_prog *prog);
+int netns_bpf_prog_detach(const union bpf_attr *attr, enum bpf_prog_type ptype);
+int netns_bpf_link_create(const union bpf_attr *attr,
+ struct bpf_prog *prog);
+#else
+static inline int netns_bpf_prog_query(const union bpf_attr *attr,
+ union bpf_attr __user *uattr)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int netns_bpf_prog_attach(const union bpf_attr *attr,
+ struct bpf_prog *prog)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int netns_bpf_prog_detach(const union bpf_attr *attr,
+ enum bpf_prog_type ptype)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int netns_bpf_link_create(const union bpf_attr *attr,
+ struct bpf_prog *prog)
+{
+ return -EOPNOTSUPP;
+}
+#endif
+
+#endif /* _BPF_NETNS_H */
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index b69e7a5869ff..e53ceee1df37 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -1,60 +1,668 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of version 2 of the GNU General Public
- * License as published by the Free Software Foundation.
*/
#ifndef _LINUX_BPF_H
#define _LINUX_BPF_H 1
#include <uapi/linux/bpf.h>
+#include <uapi/linux/filter.h>
#include <linux/workqueue.h>
#include <linux/file.h>
#include <linux/percpu.h>
#include <linux/err.h>
#include <linux/rbtree_latch.h>
+#include <linux/numa.h>
+#include <linux/mm_types.h>
+#include <linux/wait.h>
+#include <linux/refcount.h>
+#include <linux/mutex.h>
+#include <linux/module.h>
+#include <linux/kallsyms.h>
+#include <linux/capability.h>
+#include <linux/sched/mm.h>
+#include <linux/slab.h>
+#include <linux/percpu-refcount.h>
+#include <linux/stddef.h>
+#include <linux/bpfptr.h>
+#include <linux/btf.h>
+#include <linux/rcupdate_trace.h>
+#include <linux/static_call.h>
+#include <linux/memcontrol.h>
+struct bpf_verifier_env;
+struct bpf_verifier_log;
struct perf_event;
+struct bpf_prog;
+struct bpf_prog_aux;
struct bpf_map;
+struct sock;
+struct seq_file;
+struct btf;
+struct btf_type;
+struct exception_table_entry;
+struct seq_operations;
+struct bpf_iter_aux_info;
+struct bpf_local_storage;
+struct bpf_local_storage_map;
+struct kobject;
+struct mem_cgroup;
+struct module;
+struct bpf_func_state;
+struct ftrace_ops;
+struct cgroup;
-/* map is generic key/value storage optionally accesible by eBPF programs */
+extern struct idr btf_idr;
+extern spinlock_t btf_idr_lock;
+extern struct kobject *btf_kobj;
+extern struct bpf_mem_alloc bpf_global_ma;
+extern bool bpf_global_ma_set;
+
+typedef u64 (*bpf_callback_t)(u64, u64, u64, u64, u64);
+typedef int (*bpf_iter_init_seq_priv_t)(void *private_data,
+ struct bpf_iter_aux_info *aux);
+typedef void (*bpf_iter_fini_seq_priv_t)(void *private_data);
+typedef unsigned int (*bpf_func_t)(const void *,
+ const struct bpf_insn *);
+struct bpf_iter_seq_info {
+ const struct seq_operations *seq_ops;
+ bpf_iter_init_seq_priv_t init_seq_private;
+ bpf_iter_fini_seq_priv_t fini_seq_private;
+ u32 seq_priv_size;
+};
+
+/* map is generic key/value storage optionally accessible by eBPF programs */
struct bpf_map_ops {
/* funcs callable from userspace (via syscall) */
+ int (*map_alloc_check)(union bpf_attr *attr);
struct bpf_map *(*map_alloc)(union bpf_attr *attr);
void (*map_release)(struct bpf_map *map, struct file *map_file);
void (*map_free)(struct bpf_map *map);
int (*map_get_next_key)(struct bpf_map *map, void *key, void *next_key);
+ void (*map_release_uref)(struct bpf_map *map);
+ void *(*map_lookup_elem_sys_only)(struct bpf_map *map, void *key);
+ int (*map_lookup_batch)(struct bpf_map *map, const union bpf_attr *attr,
+ union bpf_attr __user *uattr);
+ int (*map_lookup_and_delete_elem)(struct bpf_map *map, void *key,
+ void *value, u64 flags);
+ int (*map_lookup_and_delete_batch)(struct bpf_map *map,
+ const union bpf_attr *attr,
+ union bpf_attr __user *uattr);
+ int (*map_update_batch)(struct bpf_map *map, struct file *map_file,
+ const union bpf_attr *attr,
+ union bpf_attr __user *uattr);
+ int (*map_delete_batch)(struct bpf_map *map, const union bpf_attr *attr,
+ union bpf_attr __user *uattr);
/* funcs callable from userspace and from eBPF programs */
void *(*map_lookup_elem)(struct bpf_map *map, void *key);
- int (*map_update_elem)(struct bpf_map *map, void *key, void *value, u64 flags);
- int (*map_delete_elem)(struct bpf_map *map, void *key);
+ long (*map_update_elem)(struct bpf_map *map, void *key, void *value, u64 flags);
+ long (*map_delete_elem)(struct bpf_map *map, void *key);
+ long (*map_push_elem)(struct bpf_map *map, void *value, u64 flags);
+ long (*map_pop_elem)(struct bpf_map *map, void *value);
+ long (*map_peek_elem)(struct bpf_map *map, void *value);
+ void *(*map_lookup_percpu_elem)(struct bpf_map *map, void *key, u32 cpu);
/* funcs called by prog_array and perf_event_array map */
void *(*map_fd_get_ptr)(struct bpf_map *map, struct file *map_file,
int fd);
void (*map_fd_put_ptr)(void *ptr);
- u32 (*map_gen_lookup)(struct bpf_map *map, struct bpf_insn *insn_buf);
+ int (*map_gen_lookup)(struct bpf_map *map, struct bpf_insn *insn_buf);
u32 (*map_fd_sys_lookup_elem)(void *ptr);
+ void (*map_seq_show_elem)(struct bpf_map *map, void *key,
+ struct seq_file *m);
+ int (*map_check_btf)(const struct bpf_map *map,
+ const struct btf *btf,
+ const struct btf_type *key_type,
+ const struct btf_type *value_type);
+
+ /* Prog poke tracking helpers. */
+ int (*map_poke_track)(struct bpf_map *map, struct bpf_prog_aux *aux);
+ void (*map_poke_untrack)(struct bpf_map *map, struct bpf_prog_aux *aux);
+ void (*map_poke_run)(struct bpf_map *map, u32 key, struct bpf_prog *old,
+ struct bpf_prog *new);
+
+ /* Direct value access helpers. */
+ int (*map_direct_value_addr)(const struct bpf_map *map,
+ u64 *imm, u32 off);
+ int (*map_direct_value_meta)(const struct bpf_map *map,
+ u64 imm, u32 *off);
+ int (*map_mmap)(struct bpf_map *map, struct vm_area_struct *vma);
+ __poll_t (*map_poll)(struct bpf_map *map, struct file *filp,
+ struct poll_table_struct *pts);
+
+ /* Functions called by bpf_local_storage maps */
+ int (*map_local_storage_charge)(struct bpf_local_storage_map *smap,
+ void *owner, u32 size);
+ void (*map_local_storage_uncharge)(struct bpf_local_storage_map *smap,
+ void *owner, u32 size);
+ struct bpf_local_storage __rcu ** (*map_owner_storage_ptr)(void *owner);
+
+ /* Misc helpers.*/
+ long (*map_redirect)(struct bpf_map *map, u64 key, u64 flags);
+
+ /* map_meta_equal must be implemented for maps that can be
+ * used as an inner map. It is a runtime check to ensure
+ * an inner map can be inserted to an outer map.
+ *
+ * Some properties of the inner map has been used during the
+ * verification time. When inserting an inner map at the runtime,
+ * map_meta_equal has to ensure the inserting map has the same
+ * properties that the verifier has used earlier.
+ */
+ bool (*map_meta_equal)(const struct bpf_map *meta0,
+ const struct bpf_map *meta1);
+
+
+ int (*map_set_for_each_callback_args)(struct bpf_verifier_env *env,
+ struct bpf_func_state *caller,
+ struct bpf_func_state *callee);
+ long (*map_for_each_callback)(struct bpf_map *map,
+ bpf_callback_t callback_fn,
+ void *callback_ctx, u64 flags);
+
+ u64 (*map_mem_usage)(const struct bpf_map *map);
+
+ /* BTF id of struct allocated by map_alloc */
+ int *map_btf_id;
+
+ /* bpf_iter info used to open a seq_file */
+ const struct bpf_iter_seq_info *iter_seq_info;
+};
+
+enum {
+ /* Support at most 10 fields in a BTF type */
+ BTF_FIELDS_MAX = 10,
+};
+
+enum btf_field_type {
+ BPF_SPIN_LOCK = (1 << 0),
+ BPF_TIMER = (1 << 1),
+ BPF_KPTR_UNREF = (1 << 2),
+ BPF_KPTR_REF = (1 << 3),
+ BPF_KPTR = BPF_KPTR_UNREF | BPF_KPTR_REF,
+ BPF_LIST_HEAD = (1 << 4),
+ BPF_LIST_NODE = (1 << 5),
+ BPF_RB_ROOT = (1 << 6),
+ BPF_RB_NODE = (1 << 7),
+ BPF_GRAPH_NODE_OR_ROOT = BPF_LIST_NODE | BPF_LIST_HEAD |
+ BPF_RB_NODE | BPF_RB_ROOT,
+ BPF_REFCOUNT = (1 << 8),
+};
+
+typedef void (*btf_dtor_kfunc_t)(void *);
+
+struct btf_field_kptr {
+ struct btf *btf;
+ struct module *module;
+ /* dtor used if btf_is_kernel(btf), otherwise the type is
+ * program-allocated, dtor is NULL, and __bpf_obj_drop_impl is used
+ */
+ btf_dtor_kfunc_t dtor;
+ u32 btf_id;
+};
+
+struct btf_field_graph_root {
+ struct btf *btf;
+ u32 value_btf_id;
+ u32 node_offset;
+ struct btf_record *value_rec;
+};
+
+struct btf_field {
+ u32 offset;
+ u32 size;
+ enum btf_field_type type;
+ union {
+ struct btf_field_kptr kptr;
+ struct btf_field_graph_root graph_root;
+ };
+};
+
+struct btf_record {
+ u32 cnt;
+ u32 field_mask;
+ int spin_lock_off;
+ int timer_off;
+ int refcount_off;
+ struct btf_field fields[];
};
struct bpf_map {
- atomic_t refcnt;
+ /* The first two cachelines with read-mostly members of which some
+ * are also accessed in fast-path (e.g. ops, max_entries).
+ */
+ const struct bpf_map_ops *ops ____cacheline_aligned;
+ struct bpf_map *inner_map_meta;
+#ifdef CONFIG_SECURITY
+ void *security;
+#endif
enum bpf_map_type map_type;
u32 key_size;
u32 value_size;
u32 max_entries;
+ u64 map_extra; /* any per-map-type extra fields */
u32 map_flags;
- u32 pages;
u32 id;
- struct user_struct *user;
- const struct bpf_map_ops *ops;
+ struct btf_record *record;
+ int numa_node;
+ u32 btf_key_type_id;
+ u32 btf_value_type_id;
+ u32 btf_vmlinux_value_type_id;
+ struct btf *btf;
+#ifdef CONFIG_MEMCG_KMEM
+ struct obj_cgroup *objcg;
+#endif
+ char name[BPF_OBJ_NAME_LEN];
+ /* The 3rd and 4th cacheline with misc members to avoid false sharing
+ * particularly with refcounting.
+ */
+ atomic64_t refcnt ____cacheline_aligned;
+ atomic64_t usercnt;
struct work_struct work;
- atomic_t usercnt;
- struct bpf_map *inner_map_meta;
+ struct mutex freeze_mutex;
+ atomic64_t writecnt;
+ /* 'Ownership' of program-containing map is claimed by the first program
+ * that is going to use this map or by the first program which FD is
+ * stored in the map to make sure that all callers and callees have the
+ * same prog type, JITed flag and xdp_has_frags flag.
+ */
+ struct {
+ spinlock_t lock;
+ enum bpf_prog_type type;
+ bool jited;
+ bool xdp_has_frags;
+ } owner;
+ bool bypass_spec_v1;
+ bool frozen; /* write-once; write-protected by freeze_mutex */
+};
+
+static inline const char *btf_field_type_name(enum btf_field_type type)
+{
+ switch (type) {
+ case BPF_SPIN_LOCK:
+ return "bpf_spin_lock";
+ case BPF_TIMER:
+ return "bpf_timer";
+ case BPF_KPTR_UNREF:
+ case BPF_KPTR_REF:
+ return "kptr";
+ case BPF_LIST_HEAD:
+ return "bpf_list_head";
+ case BPF_LIST_NODE:
+ return "bpf_list_node";
+ case BPF_RB_ROOT:
+ return "bpf_rb_root";
+ case BPF_RB_NODE:
+ return "bpf_rb_node";
+ case BPF_REFCOUNT:
+ return "bpf_refcount";
+ default:
+ WARN_ON_ONCE(1);
+ return "unknown";
+ }
+}
+
+static inline u32 btf_field_type_size(enum btf_field_type type)
+{
+ switch (type) {
+ case BPF_SPIN_LOCK:
+ return sizeof(struct bpf_spin_lock);
+ case BPF_TIMER:
+ return sizeof(struct bpf_timer);
+ case BPF_KPTR_UNREF:
+ case BPF_KPTR_REF:
+ return sizeof(u64);
+ case BPF_LIST_HEAD:
+ return sizeof(struct bpf_list_head);
+ case BPF_LIST_NODE:
+ return sizeof(struct bpf_list_node);
+ case BPF_RB_ROOT:
+ return sizeof(struct bpf_rb_root);
+ case BPF_RB_NODE:
+ return sizeof(struct bpf_rb_node);
+ case BPF_REFCOUNT:
+ return sizeof(struct bpf_refcount);
+ default:
+ WARN_ON_ONCE(1);
+ return 0;
+ }
+}
+
+static inline u32 btf_field_type_align(enum btf_field_type type)
+{
+ switch (type) {
+ case BPF_SPIN_LOCK:
+ return __alignof__(struct bpf_spin_lock);
+ case BPF_TIMER:
+ return __alignof__(struct bpf_timer);
+ case BPF_KPTR_UNREF:
+ case BPF_KPTR_REF:
+ return __alignof__(u64);
+ case BPF_LIST_HEAD:
+ return __alignof__(struct bpf_list_head);
+ case BPF_LIST_NODE:
+ return __alignof__(struct bpf_list_node);
+ case BPF_RB_ROOT:
+ return __alignof__(struct bpf_rb_root);
+ case BPF_RB_NODE:
+ return __alignof__(struct bpf_rb_node);
+ case BPF_REFCOUNT:
+ return __alignof__(struct bpf_refcount);
+ default:
+ WARN_ON_ONCE(1);
+ return 0;
+ }
+}
+
+static inline void bpf_obj_init_field(const struct btf_field *field, void *addr)
+{
+ memset(addr, 0, field->size);
+
+ switch (field->type) {
+ case BPF_REFCOUNT:
+ refcount_set((refcount_t *)addr, 1);
+ break;
+ case BPF_RB_NODE:
+ RB_CLEAR_NODE((struct rb_node *)addr);
+ break;
+ case BPF_LIST_HEAD:
+ case BPF_LIST_NODE:
+ INIT_LIST_HEAD((struct list_head *)addr);
+ break;
+ case BPF_RB_ROOT:
+ /* RB_ROOT_CACHED 0-inits, no need to do anything after memset */
+ case BPF_SPIN_LOCK:
+ case BPF_TIMER:
+ case BPF_KPTR_UNREF:
+ case BPF_KPTR_REF:
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ return;
+ }
+}
+
+static inline bool btf_record_has_field(const struct btf_record *rec, enum btf_field_type type)
+{
+ if (IS_ERR_OR_NULL(rec))
+ return false;
+ return rec->field_mask & type;
+}
+
+static inline void bpf_obj_init(const struct btf_record *rec, void *obj)
+{
+ int i;
+
+ if (IS_ERR_OR_NULL(rec))
+ return;
+ for (i = 0; i < rec->cnt; i++)
+ bpf_obj_init_field(&rec->fields[i], obj + rec->fields[i].offset);
+}
+
+/* 'dst' must be a temporary buffer and should not point to memory that is being
+ * used in parallel by a bpf program or bpf syscall, otherwise the access from
+ * the bpf program or bpf syscall may be corrupted by the reinitialization,
+ * leading to weird problems. Even 'dst' is newly-allocated from bpf memory
+ * allocator, it is still possible for 'dst' to be used in parallel by a bpf
+ * program or bpf syscall.
+ */
+static inline void check_and_init_map_value(struct bpf_map *map, void *dst)
+{
+ bpf_obj_init(map->record, dst);
+}
+
+/* memcpy that is used with 8-byte aligned pointers, power-of-8 size and
+ * forced to use 'long' read/writes to try to atomically copy long counters.
+ * Best-effort only. No barriers here, since it _will_ race with concurrent
+ * updates from BPF programs. Called from bpf syscall and mostly used with
+ * size 8 or 16 bytes, so ask compiler to inline it.
+ */
+static inline void bpf_long_memcpy(void *dst, const void *src, u32 size)
+{
+ const long *lsrc = src;
+ long *ldst = dst;
+
+ size /= sizeof(long);
+ while (size--)
+ *ldst++ = *lsrc++;
+}
+
+/* copy everything but bpf_spin_lock, bpf_timer, and kptrs. There could be one of each. */
+static inline void bpf_obj_memcpy(struct btf_record *rec,
+ void *dst, void *src, u32 size,
+ bool long_memcpy)
+{
+ u32 curr_off = 0;
+ int i;
+
+ if (IS_ERR_OR_NULL(rec)) {
+ if (long_memcpy)
+ bpf_long_memcpy(dst, src, round_up(size, 8));
+ else
+ memcpy(dst, src, size);
+ return;
+ }
+
+ for (i = 0; i < rec->cnt; i++) {
+ u32 next_off = rec->fields[i].offset;
+ u32 sz = next_off - curr_off;
+
+ memcpy(dst + curr_off, src + curr_off, sz);
+ curr_off += rec->fields[i].size + sz;
+ }
+ memcpy(dst + curr_off, src + curr_off, size - curr_off);
+}
+
+static inline void copy_map_value(struct bpf_map *map, void *dst, void *src)
+{
+ bpf_obj_memcpy(map->record, dst, src, map->value_size, false);
+}
+
+static inline void copy_map_value_long(struct bpf_map *map, void *dst, void *src)
+{
+ bpf_obj_memcpy(map->record, dst, src, map->value_size, true);
+}
+
+static inline void bpf_obj_memzero(struct btf_record *rec, void *dst, u32 size)
+{
+ u32 curr_off = 0;
+ int i;
+
+ if (IS_ERR_OR_NULL(rec)) {
+ memset(dst, 0, size);
+ return;
+ }
+
+ for (i = 0; i < rec->cnt; i++) {
+ u32 next_off = rec->fields[i].offset;
+ u32 sz = next_off - curr_off;
+
+ memset(dst + curr_off, 0, sz);
+ curr_off += rec->fields[i].size + sz;
+ }
+ memset(dst + curr_off, 0, size - curr_off);
+}
+
+static inline void zero_map_value(struct bpf_map *map, void *dst)
+{
+ bpf_obj_memzero(map->record, dst, map->value_size);
+}
+
+void copy_map_value_locked(struct bpf_map *map, void *dst, void *src,
+ bool lock_src);
+void bpf_timer_cancel_and_free(void *timer);
+void bpf_list_head_free(const struct btf_field *field, void *list_head,
+ struct bpf_spin_lock *spin_lock);
+void bpf_rb_root_free(const struct btf_field *field, void *rb_root,
+ struct bpf_spin_lock *spin_lock);
+
+
+int bpf_obj_name_cpy(char *dst, const char *src, unsigned int size);
+
+struct bpf_offload_dev;
+struct bpf_offloaded_map;
+
+struct bpf_map_dev_ops {
+ int (*map_get_next_key)(struct bpf_offloaded_map *map,
+ void *key, void *next_key);
+ int (*map_lookup_elem)(struct bpf_offloaded_map *map,
+ void *key, void *value);
+ int (*map_update_elem)(struct bpf_offloaded_map *map,
+ void *key, void *value, u64 flags);
+ int (*map_delete_elem)(struct bpf_offloaded_map *map, void *key);
+};
+
+struct bpf_offloaded_map {
+ struct bpf_map map;
+ struct net_device *netdev;
+ const struct bpf_map_dev_ops *dev_ops;
+ void *dev_priv;
+ struct list_head offloads;
+};
+
+static inline struct bpf_offloaded_map *map_to_offmap(struct bpf_map *map)
+{
+ return container_of(map, struct bpf_offloaded_map, map);
+}
+
+static inline bool bpf_map_offload_neutral(const struct bpf_map *map)
+{
+ return map->map_type == BPF_MAP_TYPE_PERF_EVENT_ARRAY;
+}
+
+static inline bool bpf_map_support_seq_show(const struct bpf_map *map)
+{
+ return (map->btf_value_type_id || map->btf_vmlinux_value_type_id) &&
+ map->ops->map_seq_show_elem;
+}
+
+int map_check_no_btf(const struct bpf_map *map,
+ const struct btf *btf,
+ const struct btf_type *key_type,
+ const struct btf_type *value_type);
+
+bool bpf_map_meta_equal(const struct bpf_map *meta0,
+ const struct bpf_map *meta1);
+
+extern const struct bpf_map_ops bpf_map_offload_ops;
+
+/* bpf_type_flag contains a set of flags that are applicable to the values of
+ * arg_type, ret_type and reg_type. For example, a pointer value may be null,
+ * or a memory is read-only. We classify types into two categories: base types
+ * and extended types. Extended types are base types combined with a type flag.
+ *
+ * Currently there are no more than 32 base types in arg_type, ret_type and
+ * reg_types.
+ */
+#define BPF_BASE_TYPE_BITS 8
+
+enum bpf_type_flag {
+ /* PTR may be NULL. */
+ PTR_MAYBE_NULL = BIT(0 + BPF_BASE_TYPE_BITS),
+
+ /* MEM is read-only. When applied on bpf_arg, it indicates the arg is
+ * compatible with both mutable and immutable memory.
+ */
+ MEM_RDONLY = BIT(1 + BPF_BASE_TYPE_BITS),
+
+ /* MEM points to BPF ring buffer reservation. */
+ MEM_RINGBUF = BIT(2 + BPF_BASE_TYPE_BITS),
+
+ /* MEM is in user address space. */
+ MEM_USER = BIT(3 + BPF_BASE_TYPE_BITS),
+
+ /* MEM is a percpu memory. MEM_PERCPU tags PTR_TO_BTF_ID. When tagged
+ * with MEM_PERCPU, PTR_TO_BTF_ID _cannot_ be directly accessed. In
+ * order to drop this tag, it must be passed into bpf_per_cpu_ptr()
+ * or bpf_this_cpu_ptr(), which will return the pointer corresponding
+ * to the specified cpu.
+ */
+ MEM_PERCPU = BIT(4 + BPF_BASE_TYPE_BITS),
+
+ /* Indicates that the argument will be released. */
+ OBJ_RELEASE = BIT(5 + BPF_BASE_TYPE_BITS),
+
+ /* PTR is not trusted. This is only used with PTR_TO_BTF_ID, to mark
+ * unreferenced and referenced kptr loaded from map value using a load
+ * instruction, so that they can only be dereferenced but not escape the
+ * BPF program into the kernel (i.e. cannot be passed as arguments to
+ * kfunc or bpf helpers).
+ */
+ PTR_UNTRUSTED = BIT(6 + BPF_BASE_TYPE_BITS),
+
+ MEM_UNINIT = BIT(7 + BPF_BASE_TYPE_BITS),
+
+ /* DYNPTR points to memory local to the bpf program. */
+ DYNPTR_TYPE_LOCAL = BIT(8 + BPF_BASE_TYPE_BITS),
+
+ /* DYNPTR points to a kernel-produced ringbuf record. */
+ DYNPTR_TYPE_RINGBUF = BIT(9 + BPF_BASE_TYPE_BITS),
+
+ /* Size is known at compile time. */
+ MEM_FIXED_SIZE = BIT(10 + BPF_BASE_TYPE_BITS),
+
+ /* MEM is of an allocated object of type in program BTF. This is used to
+ * tag PTR_TO_BTF_ID allocated using bpf_obj_new.
+ */
+ MEM_ALLOC = BIT(11 + BPF_BASE_TYPE_BITS),
+
+ /* PTR was passed from the kernel in a trusted context, and may be
+ * passed to KF_TRUSTED_ARGS kfuncs or BPF helper functions.
+ * Confusingly, this is _not_ the opposite of PTR_UNTRUSTED above.
+ * PTR_UNTRUSTED refers to a kptr that was read directly from a map
+ * without invoking bpf_kptr_xchg(). What we really need to know is
+ * whether a pointer is safe to pass to a kfunc or BPF helper function.
+ * While PTR_UNTRUSTED pointers are unsafe to pass to kfuncs and BPF
+ * helpers, they do not cover all possible instances of unsafe
+ * pointers. For example, a pointer that was obtained from walking a
+ * struct will _not_ get the PTR_UNTRUSTED type modifier, despite the
+ * fact that it may be NULL, invalid, etc. This is due to backwards
+ * compatibility requirements, as this was the behavior that was first
+ * introduced when kptrs were added. The behavior is now considered
+ * deprecated, and PTR_UNTRUSTED will eventually be removed.
+ *
+ * PTR_TRUSTED, on the other hand, is a pointer that the kernel
+ * guarantees to be valid and safe to pass to kfuncs and BPF helpers.
+ * For example, pointers passed to tracepoint arguments are considered
+ * PTR_TRUSTED, as are pointers that are passed to struct_ops
+ * callbacks. As alluded to above, pointers that are obtained from
+ * walking PTR_TRUSTED pointers are _not_ trusted. For example, if a
+ * struct task_struct *task is PTR_TRUSTED, then accessing
+ * task->last_wakee will lose the PTR_TRUSTED modifier when it's stored
+ * in a BPF register. Similarly, pointers passed to certain programs
+ * types such as kretprobes are not guaranteed to be valid, as they may
+ * for example contain an object that was recently freed.
+ */
+ PTR_TRUSTED = BIT(12 + BPF_BASE_TYPE_BITS),
+
+ /* MEM is tagged with rcu and memory access needs rcu_read_lock protection. */
+ MEM_RCU = BIT(13 + BPF_BASE_TYPE_BITS),
+
+ /* Used to tag PTR_TO_BTF_ID | MEM_ALLOC references which are non-owning.
+ * Currently only valid for linked-list and rbtree nodes.
+ */
+ NON_OWN_REF = BIT(14 + BPF_BASE_TYPE_BITS),
+
+ /* DYNPTR points to sk_buff */
+ DYNPTR_TYPE_SKB = BIT(15 + BPF_BASE_TYPE_BITS),
+
+ /* DYNPTR points to xdp_buff */
+ DYNPTR_TYPE_XDP = BIT(16 + BPF_BASE_TYPE_BITS),
+
+ __BPF_TYPE_FLAG_MAX,
+ __BPF_TYPE_LAST_FLAG = __BPF_TYPE_FLAG_MAX - 1,
};
+#define DYNPTR_TYPE_FLAG_MASK (DYNPTR_TYPE_LOCAL | DYNPTR_TYPE_RINGBUF | DYNPTR_TYPE_SKB \
+ | DYNPTR_TYPE_XDP)
+
+/* Max number of base types. */
+#define BPF_BASE_TYPE_LIMIT (1UL << BPF_BASE_TYPE_BITS)
+
+/* Max number of all types. */
+#define BPF_TYPE_LIMIT (__BPF_TYPE_LAST_FLAG | (__BPF_TYPE_LAST_FLAG - 1))
+
/* function argument constraints */
enum bpf_arg_type {
ARG_DONTCARE = 0, /* unused argument in helper function */
@@ -66,28 +674,84 @@ enum bpf_arg_type {
ARG_PTR_TO_MAP_KEY, /* pointer to stack used as map key */
ARG_PTR_TO_MAP_VALUE, /* pointer to stack used as map value */
- /* the following constraints used to prototype bpf_memcmp() and other
- * functions that access data on eBPF program stack
+ /* Used to prototype bpf_memcmp() and other functions that access data
+ * on eBPF program stack
*/
ARG_PTR_TO_MEM, /* pointer to valid memory (stack, packet, map value) */
- ARG_PTR_TO_UNINIT_MEM, /* pointer to memory does not need to be initialized,
- * helper function must fill all bytes or clear
- * them in error case.
- */
ARG_CONST_SIZE, /* number of bytes accessed from memory */
ARG_CONST_SIZE_OR_ZERO, /* number of bytes accessed from memory or 0 */
ARG_PTR_TO_CTX, /* pointer to context */
ARG_ANYTHING, /* any (initialized) argument is ok */
+ ARG_PTR_TO_SPIN_LOCK, /* pointer to bpf_spin_lock */
+ ARG_PTR_TO_SOCK_COMMON, /* pointer to sock_common */
+ ARG_PTR_TO_INT, /* pointer to int */
+ ARG_PTR_TO_LONG, /* pointer to long */
+ ARG_PTR_TO_SOCKET, /* pointer to bpf_sock (fullsock) */
+ ARG_PTR_TO_BTF_ID, /* pointer to in-kernel struct */
+ ARG_PTR_TO_RINGBUF_MEM, /* pointer to dynamically reserved ringbuf memory */
+ ARG_CONST_ALLOC_SIZE_OR_ZERO, /* number of allocated bytes requested */
+ ARG_PTR_TO_BTF_ID_SOCK_COMMON, /* pointer to in-kernel sock_common or bpf-mirrored bpf_sock */
+ ARG_PTR_TO_PERCPU_BTF_ID, /* pointer to in-kernel percpu type */
+ ARG_PTR_TO_FUNC, /* pointer to a bpf program function */
+ ARG_PTR_TO_STACK, /* pointer to stack */
+ ARG_PTR_TO_CONST_STR, /* pointer to a null terminated read-only string */
+ ARG_PTR_TO_TIMER, /* pointer to bpf_timer */
+ ARG_PTR_TO_KPTR, /* pointer to referenced kptr */
+ ARG_PTR_TO_DYNPTR, /* pointer to bpf_dynptr. See bpf_type_flag for dynptr type */
+ __BPF_ARG_TYPE_MAX,
+
+ /* Extended arg_types. */
+ ARG_PTR_TO_MAP_VALUE_OR_NULL = PTR_MAYBE_NULL | ARG_PTR_TO_MAP_VALUE,
+ ARG_PTR_TO_MEM_OR_NULL = PTR_MAYBE_NULL | ARG_PTR_TO_MEM,
+ ARG_PTR_TO_CTX_OR_NULL = PTR_MAYBE_NULL | ARG_PTR_TO_CTX,
+ ARG_PTR_TO_SOCKET_OR_NULL = PTR_MAYBE_NULL | ARG_PTR_TO_SOCKET,
+ ARG_PTR_TO_STACK_OR_NULL = PTR_MAYBE_NULL | ARG_PTR_TO_STACK,
+ ARG_PTR_TO_BTF_ID_OR_NULL = PTR_MAYBE_NULL | ARG_PTR_TO_BTF_ID,
+ /* pointer to memory does not need to be initialized, helper function must fill
+ * all bytes or clear them in error case.
+ */
+ ARG_PTR_TO_UNINIT_MEM = MEM_UNINIT | ARG_PTR_TO_MEM,
+ /* Pointer to valid memory of size known at compile time. */
+ ARG_PTR_TO_FIXED_SIZE_MEM = MEM_FIXED_SIZE | ARG_PTR_TO_MEM,
+
+ /* This must be the last entry. Its purpose is to ensure the enum is
+ * wide enough to hold the higher bits reserved for bpf_type_flag.
+ */
+ __BPF_ARG_TYPE_LIMIT = BPF_TYPE_LIMIT,
};
+static_assert(__BPF_ARG_TYPE_MAX <= BPF_BASE_TYPE_LIMIT);
/* type of values returned from helper functions */
enum bpf_return_type {
RET_INTEGER, /* function returns integer */
RET_VOID, /* function doesn't return anything */
- RET_PTR_TO_MAP_VALUE_OR_NULL, /* returns a pointer to map elem value or NULL */
+ RET_PTR_TO_MAP_VALUE, /* returns a pointer to map elem value */
+ RET_PTR_TO_SOCKET, /* returns a pointer to a socket */
+ RET_PTR_TO_TCP_SOCK, /* returns a pointer to a tcp_sock */
+ RET_PTR_TO_SOCK_COMMON, /* returns a pointer to a sock_common */
+ RET_PTR_TO_MEM, /* returns a pointer to memory */
+ RET_PTR_TO_MEM_OR_BTF_ID, /* returns a pointer to a valid memory or a btf_id */
+ RET_PTR_TO_BTF_ID, /* returns a pointer to a btf_id */
+ __BPF_RET_TYPE_MAX,
+
+ /* Extended ret_types. */
+ RET_PTR_TO_MAP_VALUE_OR_NULL = PTR_MAYBE_NULL | RET_PTR_TO_MAP_VALUE,
+ RET_PTR_TO_SOCKET_OR_NULL = PTR_MAYBE_NULL | RET_PTR_TO_SOCKET,
+ RET_PTR_TO_TCP_SOCK_OR_NULL = PTR_MAYBE_NULL | RET_PTR_TO_TCP_SOCK,
+ RET_PTR_TO_SOCK_COMMON_OR_NULL = PTR_MAYBE_NULL | RET_PTR_TO_SOCK_COMMON,
+ RET_PTR_TO_RINGBUF_MEM_OR_NULL = PTR_MAYBE_NULL | MEM_RINGBUF | RET_PTR_TO_MEM,
+ RET_PTR_TO_DYNPTR_MEM_OR_NULL = PTR_MAYBE_NULL | RET_PTR_TO_MEM,
+ RET_PTR_TO_BTF_ID_OR_NULL = PTR_MAYBE_NULL | RET_PTR_TO_BTF_ID,
+ RET_PTR_TO_BTF_ID_TRUSTED = PTR_TRUSTED | RET_PTR_TO_BTF_ID,
+
+ /* This must be the last entry. Its purpose is to ensure the enum is
+ * wide enough to hold the higher bits reserved for bpf_type_flag.
+ */
+ __BPF_RET_TYPE_LIMIT = BPF_TYPE_LIMIT,
};
+static_assert(__BPF_RET_TYPE_MAX <= BPF_BASE_TYPE_LIMIT);
/* eBPF function prototype used by verifier to allow BPF_CALLs from eBPF programs
* to in-kernel helper functions and for adjusting imm32 field in BPF_CALL
@@ -97,12 +761,38 @@ struct bpf_func_proto {
u64 (*func)(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
bool gpl_only;
bool pkt_access;
+ bool might_sleep;
enum bpf_return_type ret_type;
- enum bpf_arg_type arg1_type;
- enum bpf_arg_type arg2_type;
- enum bpf_arg_type arg3_type;
- enum bpf_arg_type arg4_type;
- enum bpf_arg_type arg5_type;
+ union {
+ struct {
+ enum bpf_arg_type arg1_type;
+ enum bpf_arg_type arg2_type;
+ enum bpf_arg_type arg3_type;
+ enum bpf_arg_type arg4_type;
+ enum bpf_arg_type arg5_type;
+ };
+ enum bpf_arg_type arg_type[5];
+ };
+ union {
+ struct {
+ u32 *arg1_btf_id;
+ u32 *arg2_btf_id;
+ u32 *arg3_btf_id;
+ u32 *arg4_btf_id;
+ u32 *arg5_btf_id;
+ };
+ u32 *arg_btf_id[5];
+ struct {
+ size_t arg1_size;
+ size_t arg2_size;
+ size_t arg3_size;
+ size_t arg4_size;
+ size_t arg5_size;
+ };
+ size_t arg_size[5];
+ };
+ int *ret_btf_id; /* return value btf_id */
+ bool (*allowed)(const struct bpf_prog *prog);
};
/* bpf_context is intentionally undefined structure. Pointer to bpf_context is
@@ -117,45 +807,80 @@ enum bpf_access_type {
};
/* types of values stored in eBPF registers */
+/* Pointer types represent:
+ * pointer
+ * pointer + imm
+ * pointer + (u16) var
+ * pointer + (u16) var + imm
+ * if (range > 0) then [ptr, ptr + range - off) is safe to access
+ * if (id > 0) means that some 'var' was added
+ * if (off > 0) means that 'imm' was added
+ */
enum bpf_reg_type {
NOT_INIT = 0, /* nothing was written into register */
- UNKNOWN_VALUE, /* reg doesn't contain a valid pointer */
+ SCALAR_VALUE, /* reg doesn't contain a valid pointer */
PTR_TO_CTX, /* reg points to bpf_context */
CONST_PTR_TO_MAP, /* reg points to struct bpf_map */
PTR_TO_MAP_VALUE, /* reg points to map element value */
- PTR_TO_MAP_VALUE_OR_NULL,/* points to map elem value or NULL */
- FRAME_PTR, /* reg == frame_pointer */
- PTR_TO_STACK, /* reg == frame_pointer + imm */
- CONST_IMM, /* constant integer value */
-
- /* PTR_TO_PACKET represents:
- * skb->data
- * skb->data + imm
- * skb->data + (u16) var
- * skb->data + (u16) var + imm
- * if (range > 0) then [ptr, ptr + range - off) is safe to access
- * if (id > 0) means that some 'var' was added
- * if (off > 0) menas that 'imm' was added
- */
- PTR_TO_PACKET,
+ PTR_TO_MAP_KEY, /* reg points to a map element key */
+ PTR_TO_STACK, /* reg == frame_pointer + offset */
+ PTR_TO_PACKET_META, /* skb->data - meta_len */
+ PTR_TO_PACKET, /* reg points to skb->data */
PTR_TO_PACKET_END, /* skb->data + headlen */
+ PTR_TO_FLOW_KEYS, /* reg points to bpf_flow_keys */
+ PTR_TO_SOCKET, /* reg points to struct bpf_sock */
+ PTR_TO_SOCK_COMMON, /* reg points to sock_common */
+ PTR_TO_TCP_SOCK, /* reg points to struct tcp_sock */
+ PTR_TO_TP_BUFFER, /* reg points to a writable raw tp's buffer */
+ PTR_TO_XDP_SOCK, /* reg points to struct xdp_sock */
+ /* PTR_TO_BTF_ID points to a kernel struct that does not need
+ * to be null checked by the BPF program. This does not imply the
+ * pointer is _not_ null and in practice this can easily be a null
+ * pointer when reading pointer chains. The assumption is program
+ * context will handle null pointer dereference typically via fault
+ * handling. The verifier must keep this in mind and can make no
+ * assumptions about null or non-null when doing branch analysis.
+ * Further, when passed into helpers the helpers can not, without
+ * additional context, assume the value is non-null.
+ */
+ PTR_TO_BTF_ID,
+ /* PTR_TO_BTF_ID_OR_NULL points to a kernel struct that has not
+ * been checked for null. Used primarily to inform the verifier
+ * an explicit null check is required for this struct.
+ */
+ PTR_TO_MEM, /* reg points to valid memory region */
+ PTR_TO_BUF, /* reg points to a read/write buffer */
+ PTR_TO_FUNC, /* reg points to a bpf program function */
+ CONST_PTR_TO_DYNPTR, /* reg points to a const struct bpf_dynptr */
+ __BPF_REG_TYPE_MAX,
+
+ /* Extended reg_types. */
+ PTR_TO_MAP_VALUE_OR_NULL = PTR_MAYBE_NULL | PTR_TO_MAP_VALUE,
+ PTR_TO_SOCKET_OR_NULL = PTR_MAYBE_NULL | PTR_TO_SOCKET,
+ PTR_TO_SOCK_COMMON_OR_NULL = PTR_MAYBE_NULL | PTR_TO_SOCK_COMMON,
+ PTR_TO_TCP_SOCK_OR_NULL = PTR_MAYBE_NULL | PTR_TO_TCP_SOCK,
+ PTR_TO_BTF_ID_OR_NULL = PTR_MAYBE_NULL | PTR_TO_BTF_ID,
- /* PTR_TO_MAP_VALUE_ADJ is used for doing pointer math inside of a map
- * elem value. We only allow this if we can statically verify that
- * access from this register are going to fall within the size of the
- * map element.
+ /* This must be the last entry. Its purpose is to ensure the enum is
+ * wide enough to hold the higher bits reserved for bpf_type_flag.
*/
- PTR_TO_MAP_VALUE_ADJ,
+ __BPF_REG_TYPE_LIMIT = BPF_TYPE_LIMIT,
};
-
-struct bpf_prog;
+static_assert(__BPF_REG_TYPE_MAX <= BPF_BASE_TYPE_LIMIT);
/* The information passed from prog-specific *_is_valid_access
* back to the verifier.
*/
struct bpf_insn_access_aux {
enum bpf_reg_type reg_type;
- int ctx_field_size;
+ union {
+ int ctx_field_size;
+ struct {
+ struct btf *btf;
+ u32 btf_id;
+ };
+ };
+ struct bpf_verifier_log *log; /* for verbose logs */
};
static inline void
@@ -164,61 +889,820 @@ bpf_ctx_record_field_size(struct bpf_insn_access_aux *aux, u32 size)
aux->ctx_field_size = size;
}
+static inline bool bpf_pseudo_func(const struct bpf_insn *insn)
+{
+ return insn->code == (BPF_LD | BPF_IMM | BPF_DW) &&
+ insn->src_reg == BPF_PSEUDO_FUNC;
+}
+
+struct bpf_prog_ops {
+ int (*test_run)(struct bpf_prog *prog, const union bpf_attr *kattr,
+ union bpf_attr __user *uattr);
+};
+
+struct bpf_reg_state;
struct bpf_verifier_ops {
/* return eBPF function prototype for verification */
- const struct bpf_func_proto *(*get_func_proto)(enum bpf_func_id func_id);
+ const struct bpf_func_proto *
+ (*get_func_proto)(enum bpf_func_id func_id,
+ const struct bpf_prog *prog);
/* return true if 'size' wide access at offset 'off' within bpf_context
* with 'type' (read or write) is allowed
*/
bool (*is_valid_access)(int off, int size, enum bpf_access_type type,
+ const struct bpf_prog *prog,
struct bpf_insn_access_aux *info);
int (*gen_prologue)(struct bpf_insn *insn, bool direct_write,
const struct bpf_prog *prog);
+ int (*gen_ld_abs)(const struct bpf_insn *orig,
+ struct bpf_insn *insn_buf);
u32 (*convert_ctx_access)(enum bpf_access_type type,
const struct bpf_insn *src,
struct bpf_insn *dst,
struct bpf_prog *prog, u32 *target_size);
- int (*test_run)(struct bpf_prog *prog, const union bpf_attr *kattr,
- union bpf_attr __user *uattr);
+ int (*btf_struct_access)(struct bpf_verifier_log *log,
+ const struct bpf_reg_state *reg,
+ int off, int size);
+};
+
+struct bpf_prog_offload_ops {
+ /* verifier basic callbacks */
+ int (*insn_hook)(struct bpf_verifier_env *env,
+ int insn_idx, int prev_insn_idx);
+ int (*finalize)(struct bpf_verifier_env *env);
+ /* verifier optimization callbacks (called after .finalize) */
+ int (*replace_insn)(struct bpf_verifier_env *env, u32 off,
+ struct bpf_insn *insn);
+ int (*remove_insns)(struct bpf_verifier_env *env, u32 off, u32 cnt);
+ /* program management callbacks */
+ int (*prepare)(struct bpf_prog *prog);
+ int (*translate)(struct bpf_prog *prog);
+ void (*destroy)(struct bpf_prog *prog);
+};
+
+struct bpf_prog_offload {
+ struct bpf_prog *prog;
+ struct net_device *netdev;
+ struct bpf_offload_dev *offdev;
+ void *dev_priv;
+ struct list_head offloads;
+ bool dev_state;
+ bool opt_failed;
+ void *jited_image;
+ u32 jited_len;
+};
+
+enum bpf_cgroup_storage_type {
+ BPF_CGROUP_STORAGE_SHARED,
+ BPF_CGROUP_STORAGE_PERCPU,
+ __BPF_CGROUP_STORAGE_MAX
+};
+
+#define MAX_BPF_CGROUP_STORAGE_TYPE __BPF_CGROUP_STORAGE_MAX
+
+/* The longest tracepoint has 12 args.
+ * See include/trace/bpf_probe.h
+ */
+#define MAX_BPF_FUNC_ARGS 12
+
+/* The maximum number of arguments passed through registers
+ * a single function may have.
+ */
+#define MAX_BPF_FUNC_REG_ARGS 5
+
+/* The argument is a structure. */
+#define BTF_FMODEL_STRUCT_ARG BIT(0)
+
+/* The argument is signed. */
+#define BTF_FMODEL_SIGNED_ARG BIT(1)
+
+struct btf_func_model {
+ u8 ret_size;
+ u8 ret_flags;
+ u8 nr_args;
+ u8 arg_size[MAX_BPF_FUNC_ARGS];
+ u8 arg_flags[MAX_BPF_FUNC_ARGS];
+};
+
+/* Restore arguments before returning from trampoline to let original function
+ * continue executing. This flag is used for fentry progs when there are no
+ * fexit progs.
+ */
+#define BPF_TRAMP_F_RESTORE_REGS BIT(0)
+/* Call original function after fentry progs, but before fexit progs.
+ * Makes sense for fentry/fexit, normal calls and indirect calls.
+ */
+#define BPF_TRAMP_F_CALL_ORIG BIT(1)
+/* Skip current frame and return to parent. Makes sense for fentry/fexit
+ * programs only. Should not be used with normal calls and indirect calls.
+ */
+#define BPF_TRAMP_F_SKIP_FRAME BIT(2)
+/* Store IP address of the caller on the trampoline stack,
+ * so it's available for trampoline's programs.
+ */
+#define BPF_TRAMP_F_IP_ARG BIT(3)
+/* Return the return value of fentry prog. Only used by bpf_struct_ops. */
+#define BPF_TRAMP_F_RET_FENTRY_RET BIT(4)
+
+/* Get original function from stack instead of from provided direct address.
+ * Makes sense for trampolines with fexit or fmod_ret programs.
+ */
+#define BPF_TRAMP_F_ORIG_STACK BIT(5)
+
+/* This trampoline is on a function with another ftrace_ops with IPMODIFY,
+ * e.g., a live patch. This flag is set and cleared by ftrace call backs,
+ */
+#define BPF_TRAMP_F_SHARE_IPMODIFY BIT(6)
+
+/* Each call __bpf_prog_enter + call bpf_func + call __bpf_prog_exit is ~50
+ * bytes on x86.
+ */
+enum {
+#if defined(__s390x__)
+ BPF_MAX_TRAMP_LINKS = 27,
+#else
+ BPF_MAX_TRAMP_LINKS = 38,
+#endif
+};
+
+struct bpf_tramp_links {
+ struct bpf_tramp_link *links[BPF_MAX_TRAMP_LINKS];
+ int nr_links;
+};
+
+struct bpf_tramp_run_ctx;
+
+/* Different use cases for BPF trampoline:
+ * 1. replace nop at the function entry (kprobe equivalent)
+ * flags = BPF_TRAMP_F_RESTORE_REGS
+ * fentry = a set of programs to run before returning from trampoline
+ *
+ * 2. replace nop at the function entry (kprobe + kretprobe equivalent)
+ * flags = BPF_TRAMP_F_CALL_ORIG | BPF_TRAMP_F_SKIP_FRAME
+ * orig_call = fentry_ip + MCOUNT_INSN_SIZE
+ * fentry = a set of program to run before calling original function
+ * fexit = a set of program to run after original function
+ *
+ * 3. replace direct call instruction anywhere in the function body
+ * or assign a function pointer for indirect call (like tcp_congestion_ops->cong_avoid)
+ * With flags = 0
+ * fentry = a set of programs to run before returning from trampoline
+ * With flags = BPF_TRAMP_F_CALL_ORIG
+ * orig_call = original callback addr or direct function addr
+ * fentry = a set of program to run before calling original function
+ * fexit = a set of program to run after original function
+ */
+struct bpf_tramp_image;
+int arch_prepare_bpf_trampoline(struct bpf_tramp_image *tr, void *image, void *image_end,
+ const struct btf_func_model *m, u32 flags,
+ struct bpf_tramp_links *tlinks,
+ void *orig_call);
+u64 notrace __bpf_prog_enter_sleepable_recur(struct bpf_prog *prog,
+ struct bpf_tramp_run_ctx *run_ctx);
+void notrace __bpf_prog_exit_sleepable_recur(struct bpf_prog *prog, u64 start,
+ struct bpf_tramp_run_ctx *run_ctx);
+void notrace __bpf_tramp_enter(struct bpf_tramp_image *tr);
+void notrace __bpf_tramp_exit(struct bpf_tramp_image *tr);
+typedef u64 (*bpf_trampoline_enter_t)(struct bpf_prog *prog,
+ struct bpf_tramp_run_ctx *run_ctx);
+typedef void (*bpf_trampoline_exit_t)(struct bpf_prog *prog, u64 start,
+ struct bpf_tramp_run_ctx *run_ctx);
+bpf_trampoline_enter_t bpf_trampoline_enter(const struct bpf_prog *prog);
+bpf_trampoline_exit_t bpf_trampoline_exit(const struct bpf_prog *prog);
+
+struct bpf_ksym {
+ unsigned long start;
+ unsigned long end;
+ char name[KSYM_NAME_LEN];
+ struct list_head lnode;
+ struct latch_tree_node tnode;
+ bool prog;
+};
+
+enum bpf_tramp_prog_type {
+ BPF_TRAMP_FENTRY,
+ BPF_TRAMP_FEXIT,
+ BPF_TRAMP_MODIFY_RETURN,
+ BPF_TRAMP_MAX,
+ BPF_TRAMP_REPLACE, /* more than MAX */
+};
+
+struct bpf_tramp_image {
+ void *image;
+ struct bpf_ksym ksym;
+ struct percpu_ref pcref;
+ void *ip_after_call;
+ void *ip_epilogue;
+ union {
+ struct rcu_head rcu;
+ struct work_struct work;
+ };
+};
+
+struct bpf_trampoline {
+ /* hlist for trampoline_table */
+ struct hlist_node hlist;
+ struct ftrace_ops *fops;
+ /* serializes access to fields of this trampoline */
+ struct mutex mutex;
+ refcount_t refcnt;
+ u32 flags;
+ u64 key;
+ struct {
+ struct btf_func_model model;
+ void *addr;
+ bool ftrace_managed;
+ } func;
+ /* if !NULL this is BPF_PROG_TYPE_EXT program that extends another BPF
+ * program by replacing one of its functions. func.addr is the address
+ * of the function it replaced.
+ */
+ struct bpf_prog *extension_prog;
+ /* list of BPF programs using this trampoline */
+ struct hlist_head progs_hlist[BPF_TRAMP_MAX];
+ /* Number of attached programs. A counter per kind. */
+ int progs_cnt[BPF_TRAMP_MAX];
+ /* Executable image of trampoline */
+ struct bpf_tramp_image *cur_image;
+ u64 selector;
+ struct module *mod;
};
+struct bpf_attach_target_info {
+ struct btf_func_model fmodel;
+ long tgt_addr;
+ struct module *tgt_mod;
+ const char *tgt_name;
+ const struct btf_type *tgt_type;
+};
+
+#define BPF_DISPATCHER_MAX 48 /* Fits in 2048B */
+
+struct bpf_dispatcher_prog {
+ struct bpf_prog *prog;
+ refcount_t users;
+};
+
+struct bpf_dispatcher {
+ /* dispatcher mutex */
+ struct mutex mutex;
+ void *func;
+ struct bpf_dispatcher_prog progs[BPF_DISPATCHER_MAX];
+ int num_progs;
+ void *image;
+ void *rw_image;
+ u32 image_off;
+ struct bpf_ksym ksym;
+#ifdef CONFIG_HAVE_STATIC_CALL
+ struct static_call_key *sc_key;
+ void *sc_tramp;
+#endif
+};
+
+static __always_inline __nocfi unsigned int bpf_dispatcher_nop_func(
+ const void *ctx,
+ const struct bpf_insn *insnsi,
+ bpf_func_t bpf_func)
+{
+ return bpf_func(ctx, insnsi);
+}
+
+/* the implementation of the opaque uapi struct bpf_dynptr */
+struct bpf_dynptr_kern {
+ void *data;
+ /* Size represents the number of usable bytes of dynptr data.
+ * If for example the offset is at 4 for a local dynptr whose data is
+ * of type u64, the number of usable bytes is 4.
+ *
+ * The upper 8 bits are reserved. It is as follows:
+ * Bits 0 - 23 = size
+ * Bits 24 - 30 = dynptr type
+ * Bit 31 = whether dynptr is read-only
+ */
+ u32 size;
+ u32 offset;
+} __aligned(8);
+
+enum bpf_dynptr_type {
+ BPF_DYNPTR_TYPE_INVALID,
+ /* Points to memory that is local to the bpf program */
+ BPF_DYNPTR_TYPE_LOCAL,
+ /* Underlying data is a ringbuf record */
+ BPF_DYNPTR_TYPE_RINGBUF,
+ /* Underlying data is a sk_buff */
+ BPF_DYNPTR_TYPE_SKB,
+ /* Underlying data is a xdp_buff */
+ BPF_DYNPTR_TYPE_XDP,
+};
+
+int bpf_dynptr_check_size(u32 size);
+u32 bpf_dynptr_get_size(const struct bpf_dynptr_kern *ptr);
+
+#ifdef CONFIG_BPF_JIT
+int bpf_trampoline_link_prog(struct bpf_tramp_link *link, struct bpf_trampoline *tr);
+int bpf_trampoline_unlink_prog(struct bpf_tramp_link *link, struct bpf_trampoline *tr);
+struct bpf_trampoline *bpf_trampoline_get(u64 key,
+ struct bpf_attach_target_info *tgt_info);
+void bpf_trampoline_put(struct bpf_trampoline *tr);
+int arch_prepare_bpf_dispatcher(void *image, void *buf, s64 *funcs, int num_funcs);
+
+/*
+ * When the architecture supports STATIC_CALL replace the bpf_dispatcher_fn
+ * indirection with a direct call to the bpf program. If the architecture does
+ * not have STATIC_CALL, avoid a double-indirection.
+ */
+#ifdef CONFIG_HAVE_STATIC_CALL
+
+#define __BPF_DISPATCHER_SC_INIT(_name) \
+ .sc_key = &STATIC_CALL_KEY(_name), \
+ .sc_tramp = STATIC_CALL_TRAMP_ADDR(_name),
+
+#define __BPF_DISPATCHER_SC(name) \
+ DEFINE_STATIC_CALL(bpf_dispatcher_##name##_call, bpf_dispatcher_nop_func)
+
+#define __BPF_DISPATCHER_CALL(name) \
+ static_call(bpf_dispatcher_##name##_call)(ctx, insnsi, bpf_func)
+
+#define __BPF_DISPATCHER_UPDATE(_d, _new) \
+ __static_call_update((_d)->sc_key, (_d)->sc_tramp, (_new))
+
+#else
+#define __BPF_DISPATCHER_SC_INIT(name)
+#define __BPF_DISPATCHER_SC(name)
+#define __BPF_DISPATCHER_CALL(name) bpf_func(ctx, insnsi)
+#define __BPF_DISPATCHER_UPDATE(_d, _new)
+#endif
+
+#define BPF_DISPATCHER_INIT(_name) { \
+ .mutex = __MUTEX_INITIALIZER(_name.mutex), \
+ .func = &_name##_func, \
+ .progs = {}, \
+ .num_progs = 0, \
+ .image = NULL, \
+ .image_off = 0, \
+ .ksym = { \
+ .name = #_name, \
+ .lnode = LIST_HEAD_INIT(_name.ksym.lnode), \
+ }, \
+ __BPF_DISPATCHER_SC_INIT(_name##_call) \
+}
+
+#define DEFINE_BPF_DISPATCHER(name) \
+ __BPF_DISPATCHER_SC(name); \
+ noinline __nocfi unsigned int bpf_dispatcher_##name##_func( \
+ const void *ctx, \
+ const struct bpf_insn *insnsi, \
+ bpf_func_t bpf_func) \
+ { \
+ return __BPF_DISPATCHER_CALL(name); \
+ } \
+ EXPORT_SYMBOL(bpf_dispatcher_##name##_func); \
+ struct bpf_dispatcher bpf_dispatcher_##name = \
+ BPF_DISPATCHER_INIT(bpf_dispatcher_##name);
+
+#define DECLARE_BPF_DISPATCHER(name) \
+ unsigned int bpf_dispatcher_##name##_func( \
+ const void *ctx, \
+ const struct bpf_insn *insnsi, \
+ bpf_func_t bpf_func); \
+ extern struct bpf_dispatcher bpf_dispatcher_##name;
+
+#define BPF_DISPATCHER_FUNC(name) bpf_dispatcher_##name##_func
+#define BPF_DISPATCHER_PTR(name) (&bpf_dispatcher_##name)
+void bpf_dispatcher_change_prog(struct bpf_dispatcher *d, struct bpf_prog *from,
+ struct bpf_prog *to);
+/* Called only from JIT-enabled code, so there's no need for stubs. */
+void bpf_image_ksym_add(void *data, struct bpf_ksym *ksym);
+void bpf_image_ksym_del(struct bpf_ksym *ksym);
+void bpf_ksym_add(struct bpf_ksym *ksym);
+void bpf_ksym_del(struct bpf_ksym *ksym);
+int bpf_jit_charge_modmem(u32 size);
+void bpf_jit_uncharge_modmem(u32 size);
+bool bpf_prog_has_trampoline(const struct bpf_prog *prog);
+#else
+static inline int bpf_trampoline_link_prog(struct bpf_tramp_link *link,
+ struct bpf_trampoline *tr)
+{
+ return -ENOTSUPP;
+}
+static inline int bpf_trampoline_unlink_prog(struct bpf_tramp_link *link,
+ struct bpf_trampoline *tr)
+{
+ return -ENOTSUPP;
+}
+static inline struct bpf_trampoline *bpf_trampoline_get(u64 key,
+ struct bpf_attach_target_info *tgt_info)
+{
+ return ERR_PTR(-EOPNOTSUPP);
+}
+static inline void bpf_trampoline_put(struct bpf_trampoline *tr) {}
+#define DEFINE_BPF_DISPATCHER(name)
+#define DECLARE_BPF_DISPATCHER(name)
+#define BPF_DISPATCHER_FUNC(name) bpf_dispatcher_nop_func
+#define BPF_DISPATCHER_PTR(name) NULL
+static inline void bpf_dispatcher_change_prog(struct bpf_dispatcher *d,
+ struct bpf_prog *from,
+ struct bpf_prog *to) {}
+static inline bool is_bpf_image_address(unsigned long address)
+{
+ return false;
+}
+static inline bool bpf_prog_has_trampoline(const struct bpf_prog *prog)
+{
+ return false;
+}
+#endif
+
+struct bpf_func_info_aux {
+ u16 linkage;
+ bool unreliable;
+};
+
+enum bpf_jit_poke_reason {
+ BPF_POKE_REASON_TAIL_CALL,
+};
+
+/* Descriptor of pokes pointing /into/ the JITed image. */
+struct bpf_jit_poke_descriptor {
+ void *tailcall_target;
+ void *tailcall_bypass;
+ void *bypass_addr;
+ void *aux;
+ union {
+ struct {
+ struct bpf_map *map;
+ u32 key;
+ } tail_call;
+ };
+ bool tailcall_target_stable;
+ u8 adj_off;
+ u16 reason;
+ u32 insn_idx;
+};
+
+/* reg_type info for ctx arguments */
+struct bpf_ctx_arg_aux {
+ u32 offset;
+ enum bpf_reg_type reg_type;
+ u32 btf_id;
+};
+
+struct btf_mod_pair {
+ struct btf *btf;
+ struct module *module;
+};
+
+struct bpf_kfunc_desc_tab;
+
struct bpf_prog_aux {
- atomic_t refcnt;
+ atomic64_t refcnt;
u32 used_map_cnt;
+ u32 used_btf_cnt;
u32 max_ctx_offset;
+ u32 max_pkt_offset;
+ u32 max_tp_access;
u32 stack_depth;
u32 id;
- struct latch_tree_node ksym_tnode;
- struct list_head ksym_lnode;
- const struct bpf_verifier_ops *ops;
+ u32 func_cnt; /* used by non-func prog as the number of func progs */
+ u32 func_idx; /* 0 for non-func prog, the index in func array for func prog */
+ u32 attach_btf_id; /* in-kernel BTF type id to attach to */
+ u32 ctx_arg_info_size;
+ u32 max_rdonly_access;
+ u32 max_rdwr_access;
+ struct btf *attach_btf;
+ const struct bpf_ctx_arg_aux *ctx_arg_info;
+ struct mutex dst_mutex; /* protects dst_* pointers below, *after* prog becomes visible */
+ struct bpf_prog *dst_prog;
+ struct bpf_trampoline *dst_trampoline;
+ enum bpf_prog_type saved_dst_prog_type;
+ enum bpf_attach_type saved_dst_attach_type;
+ bool verifier_zext; /* Zero extensions has been inserted by verifier. */
+ bool dev_bound; /* Program is bound to the netdev. */
+ bool offload_requested; /* Program is bound and offloaded to the netdev. */
+ bool attach_btf_trace; /* true if attaching to BTF-enabled raw tp */
+ bool func_proto_unreliable;
+ bool sleepable;
+ bool tail_call_reachable;
+ bool xdp_has_frags;
+ /* BTF_KIND_FUNC_PROTO for valid attach_btf_id */
+ const struct btf_type *attach_func_proto;
+ /* function name for valid attach_btf_id */
+ const char *attach_func_name;
+ struct bpf_prog **func;
+ void *jit_data; /* JIT specific data. arch dependent */
+ struct bpf_jit_poke_descriptor *poke_tab;
+ struct bpf_kfunc_desc_tab *kfunc_tab;
+ struct bpf_kfunc_btf_tab *kfunc_btf_tab;
+ u32 size_poke_tab;
+ struct bpf_ksym ksym;
+ const struct bpf_prog_ops *ops;
struct bpf_map **used_maps;
+ struct mutex used_maps_mutex; /* mutex for used_maps and used_map_cnt */
+ struct btf_mod_pair *used_btfs;
struct bpf_prog *prog;
struct user_struct *user;
+ u64 load_time; /* ns since boottime */
+ u32 verified_insns;
+ int cgroup_atype; /* enum cgroup_bpf_attach_type */
+ struct bpf_map *cgroup_storage[MAX_BPF_CGROUP_STORAGE_TYPE];
+ char name[BPF_OBJ_NAME_LEN];
+#ifdef CONFIG_SECURITY
+ void *security;
+#endif
+ struct bpf_prog_offload *offload;
+ struct btf *btf;
+ struct bpf_func_info *func_info;
+ struct bpf_func_info_aux *func_info_aux;
+ /* bpf_line_info loaded from userspace. linfo->insn_off
+ * has the xlated insn offset.
+ * Both the main and sub prog share the same linfo.
+ * The subprog can access its first linfo by
+ * using the linfo_idx.
+ */
+ struct bpf_line_info *linfo;
+ /* jited_linfo is the jited addr of the linfo. It has a
+ * one to one mapping to linfo:
+ * jited_linfo[i] is the jited addr for the linfo[i]->insn_off.
+ * Both the main and sub prog share the same jited_linfo.
+ * The subprog can access its first jited_linfo by
+ * using the linfo_idx.
+ */
+ void **jited_linfo;
+ u32 func_info_cnt;
+ u32 nr_linfo;
+ /* subprog can use linfo_idx to access its first linfo and
+ * jited_linfo.
+ * main prog always has linfo_idx == 0
+ */
+ u32 linfo_idx;
+ struct module *mod;
+ u32 num_exentries;
+ struct exception_table_entry *extable;
union {
struct work_struct work;
struct rcu_head rcu;
};
};
+struct bpf_prog {
+ u16 pages; /* Number of allocated pages */
+ u16 jited:1, /* Is our filter JIT'ed? */
+ jit_requested:1,/* archs need to JIT the prog */
+ gpl_compatible:1, /* Is filter GPL compatible? */
+ cb_access:1, /* Is control block accessed? */
+ dst_needed:1, /* Do we need dst entry? */
+ blinding_requested:1, /* needs constant blinding */
+ blinded:1, /* Was blinded */
+ is_func:1, /* program is a bpf function */
+ kprobe_override:1, /* Do we override a kprobe? */
+ has_callchain_buf:1, /* callchain buffer allocated? */
+ enforce_expected_attach_type:1, /* Enforce expected_attach_type checking at attach time */
+ call_get_stack:1, /* Do we call bpf_get_stack() or bpf_get_stackid() */
+ call_get_func_ip:1, /* Do we call get_func_ip() */
+ tstamp_type_access:1; /* Accessed __sk_buff->tstamp_type */
+ enum bpf_prog_type type; /* Type of BPF program */
+ enum bpf_attach_type expected_attach_type; /* For some prog types */
+ u32 len; /* Number of filter blocks */
+ u32 jited_len; /* Size of jited insns in bytes */
+ u8 tag[BPF_TAG_SIZE];
+ struct bpf_prog_stats __percpu *stats;
+ int __percpu *active;
+ unsigned int (*bpf_func)(const void *ctx,
+ const struct bpf_insn *insn);
+ struct bpf_prog_aux *aux; /* Auxiliary fields */
+ struct sock_fprog_kern *orig_prog; /* Original BPF program */
+ /* Instructions for interpreter */
+ union {
+ DECLARE_FLEX_ARRAY(struct sock_filter, insns);
+ DECLARE_FLEX_ARRAY(struct bpf_insn, insnsi);
+ };
+};
+
+struct bpf_array_aux {
+ /* Programs with direct jumps into programs part of this array. */
+ struct list_head poke_progs;
+ struct bpf_map *map;
+ struct mutex poke_mutex;
+ struct work_struct work;
+};
+
+struct bpf_link {
+ atomic64_t refcnt;
+ u32 id;
+ enum bpf_link_type type;
+ const struct bpf_link_ops *ops;
+ struct bpf_prog *prog;
+ struct work_struct work;
+};
+
+struct bpf_link_ops {
+ void (*release)(struct bpf_link *link);
+ void (*dealloc)(struct bpf_link *link);
+ int (*detach)(struct bpf_link *link);
+ int (*update_prog)(struct bpf_link *link, struct bpf_prog *new_prog,
+ struct bpf_prog *old_prog);
+ void (*show_fdinfo)(const struct bpf_link *link, struct seq_file *seq);
+ int (*fill_link_info)(const struct bpf_link *link,
+ struct bpf_link_info *info);
+ int (*update_map)(struct bpf_link *link, struct bpf_map *new_map,
+ struct bpf_map *old_map);
+};
+
+struct bpf_tramp_link {
+ struct bpf_link link;
+ struct hlist_node tramp_hlist;
+ u64 cookie;
+};
+
+struct bpf_shim_tramp_link {
+ struct bpf_tramp_link link;
+ struct bpf_trampoline *trampoline;
+};
+
+struct bpf_tracing_link {
+ struct bpf_tramp_link link;
+ enum bpf_attach_type attach_type;
+ struct bpf_trampoline *trampoline;
+ struct bpf_prog *tgt_prog;
+};
+
+struct bpf_link_primer {
+ struct bpf_link *link;
+ struct file *file;
+ int fd;
+ u32 id;
+};
+
+struct bpf_struct_ops_value;
+struct btf_member;
+
+#define BPF_STRUCT_OPS_MAX_NR_MEMBERS 64
+struct bpf_struct_ops {
+ const struct bpf_verifier_ops *verifier_ops;
+ int (*init)(struct btf *btf);
+ int (*check_member)(const struct btf_type *t,
+ const struct btf_member *member,
+ const struct bpf_prog *prog);
+ int (*init_member)(const struct btf_type *t,
+ const struct btf_member *member,
+ void *kdata, const void *udata);
+ int (*reg)(void *kdata);
+ void (*unreg)(void *kdata);
+ int (*update)(void *kdata, void *old_kdata);
+ int (*validate)(void *kdata);
+ const struct btf_type *type;
+ const struct btf_type *value_type;
+ const char *name;
+ struct btf_func_model func_models[BPF_STRUCT_OPS_MAX_NR_MEMBERS];
+ u32 type_id;
+ u32 value_id;
+};
+
+#if defined(CONFIG_BPF_JIT) && defined(CONFIG_BPF_SYSCALL)
+#define BPF_MODULE_OWNER ((void *)((0xeB9FUL << 2) + POISON_POINTER_DELTA))
+const struct bpf_struct_ops *bpf_struct_ops_find(u32 type_id);
+void bpf_struct_ops_init(struct btf *btf, struct bpf_verifier_log *log);
+bool bpf_struct_ops_get(const void *kdata);
+void bpf_struct_ops_put(const void *kdata);
+int bpf_struct_ops_map_sys_lookup_elem(struct bpf_map *map, void *key,
+ void *value);
+int bpf_struct_ops_prepare_trampoline(struct bpf_tramp_links *tlinks,
+ struct bpf_tramp_link *link,
+ const struct btf_func_model *model,
+ void *image, void *image_end);
+static inline bool bpf_try_module_get(const void *data, struct module *owner)
+{
+ if (owner == BPF_MODULE_OWNER)
+ return bpf_struct_ops_get(data);
+ else
+ return try_module_get(owner);
+}
+static inline void bpf_module_put(const void *data, struct module *owner)
+{
+ if (owner == BPF_MODULE_OWNER)
+ bpf_struct_ops_put(data);
+ else
+ module_put(owner);
+}
+int bpf_struct_ops_link_create(union bpf_attr *attr);
+
+#ifdef CONFIG_NET
+/* Define it here to avoid the use of forward declaration */
+struct bpf_dummy_ops_state {
+ int val;
+};
+
+struct bpf_dummy_ops {
+ int (*test_1)(struct bpf_dummy_ops_state *cb);
+ int (*test_2)(struct bpf_dummy_ops_state *cb, int a1, unsigned short a2,
+ char a3, unsigned long a4);
+ int (*test_sleepable)(struct bpf_dummy_ops_state *cb);
+};
+
+int bpf_struct_ops_test_run(struct bpf_prog *prog, const union bpf_attr *kattr,
+ union bpf_attr __user *uattr);
+#endif
+#else
+static inline const struct bpf_struct_ops *bpf_struct_ops_find(u32 type_id)
+{
+ return NULL;
+}
+static inline void bpf_struct_ops_init(struct btf *btf,
+ struct bpf_verifier_log *log)
+{
+}
+static inline bool bpf_try_module_get(const void *data, struct module *owner)
+{
+ return try_module_get(owner);
+}
+static inline void bpf_module_put(const void *data, struct module *owner)
+{
+ module_put(owner);
+}
+static inline int bpf_struct_ops_map_sys_lookup_elem(struct bpf_map *map,
+ void *key,
+ void *value)
+{
+ return -EINVAL;
+}
+static inline int bpf_struct_ops_link_create(union bpf_attr *attr)
+{
+ return -EOPNOTSUPP;
+}
+
+#endif
+
+#if defined(CONFIG_CGROUP_BPF) && defined(CONFIG_BPF_LSM)
+int bpf_trampoline_link_cgroup_shim(struct bpf_prog *prog,
+ int cgroup_atype);
+void bpf_trampoline_unlink_cgroup_shim(struct bpf_prog *prog);
+#else
+static inline int bpf_trampoline_link_cgroup_shim(struct bpf_prog *prog,
+ int cgroup_atype)
+{
+ return -EOPNOTSUPP;
+}
+static inline void bpf_trampoline_unlink_cgroup_shim(struct bpf_prog *prog)
+{
+}
+#endif
+
struct bpf_array {
struct bpf_map map;
u32 elem_size;
- /* 'ownership' of prog_array is claimed by the first program that
- * is going to use this map or by the first program which FD is stored
- * in the map to make sure that all callers and callees have the same
- * prog_type and JITed flag
- */
- enum bpf_prog_type owner_prog_type;
- bool owner_jited;
+ u32 index_mask;
+ struct bpf_array_aux *aux;
union {
- char value[0] __aligned(8);
- void *ptrs[0] __aligned(8);
- void __percpu *pptrs[0] __aligned(8);
+ DECLARE_FLEX_ARRAY(char, value) __aligned(8);
+ DECLARE_FLEX_ARRAY(void *, ptrs) __aligned(8);
+ DECLARE_FLEX_ARRAY(void __percpu *, pptrs) __aligned(8);
};
};
-#define MAX_TAIL_CALL_CNT 32
+#define BPF_COMPLEXITY_LIMIT_INSNS 1000000 /* yes. 1M insns */
+#define MAX_TAIL_CALL_CNT 33
+
+/* Maximum number of loops for bpf_loop and bpf_iter_num.
+ * It's enum to expose it (and thus make it discoverable) through BTF.
+ */
+enum {
+ BPF_MAX_LOOPS = 8 * 1024 * 1024,
+};
+
+#define BPF_F_ACCESS_MASK (BPF_F_RDONLY | \
+ BPF_F_RDONLY_PROG | \
+ BPF_F_WRONLY | \
+ BPF_F_WRONLY_PROG)
+
+#define BPF_MAP_CAN_READ BIT(0)
+#define BPF_MAP_CAN_WRITE BIT(1)
+
+/* Maximum number of user-producer ring buffer samples that can be drained in
+ * a call to bpf_user_ringbuf_drain().
+ */
+#define BPF_MAX_USER_RINGBUF_SAMPLES (128 * 1024)
+
+static inline u32 bpf_map_flags_to_cap(struct bpf_map *map)
+{
+ u32 access_flags = map->map_flags & (BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG);
+
+ /* Combination of BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG is
+ * not possible.
+ */
+ if (access_flags & BPF_F_RDONLY_PROG)
+ return BPF_MAP_CAN_READ;
+ else if (access_flags & BPF_F_WRONLY_PROG)
+ return BPF_MAP_CAN_WRITE;
+ else
+ return BPF_MAP_CAN_READ | BPF_MAP_CAN_WRITE;
+}
+
+static inline bool bpf_map_flags_access_ok(u32 access_flags)
+{
+ return (access_flags & (BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG)) !=
+ (BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG);
+}
struct bpf_event_entry {
struct perf_event *event;
@@ -227,61 +1711,478 @@ struct bpf_event_entry {
struct rcu_head rcu;
};
-u64 bpf_tail_call(u64 ctx, u64 r2, u64 index, u64 r4, u64 r5);
-u64 bpf_get_stackid(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
+static inline bool map_type_contains_progs(struct bpf_map *map)
+{
+ return map->map_type == BPF_MAP_TYPE_PROG_ARRAY ||
+ map->map_type == BPF_MAP_TYPE_DEVMAP ||
+ map->map_type == BPF_MAP_TYPE_CPUMAP;
+}
-bool bpf_prog_array_compatible(struct bpf_array *array, const struct bpf_prog *fp);
+bool bpf_prog_map_compatible(struct bpf_map *map, const struct bpf_prog *fp);
int bpf_prog_calc_tag(struct bpf_prog *fp);
const struct bpf_func_proto *bpf_get_trace_printk_proto(void);
+const struct bpf_func_proto *bpf_get_trace_vprintk_proto(void);
typedef unsigned long (*bpf_ctx_copy_t)(void *dst, const void *src,
unsigned long off, unsigned long len);
+typedef u32 (*bpf_convert_ctx_access_t)(enum bpf_access_type type,
+ const struct bpf_insn *src,
+ struct bpf_insn *dst,
+ struct bpf_prog *prog,
+ u32 *target_size);
u64 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size,
void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy);
-int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr,
- union bpf_attr __user *uattr);
-int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr,
- union bpf_attr __user *uattr);
+/* an array of programs to be executed under rcu_lock.
+ *
+ * Typical usage:
+ * ret = bpf_prog_run_array(rcu_dereference(&bpf_prog_array), ctx, bpf_prog_run);
+ *
+ * the structure returned by bpf_prog_array_alloc() should be populated
+ * with program pointers and the last pointer must be NULL.
+ * The user has to keep refcnt on the program and make sure the program
+ * is removed from the array before bpf_prog_put().
+ * The 'struct bpf_prog_array *' should only be replaced with xchg()
+ * since other cpus are walking the array of pointers in parallel.
+ */
+struct bpf_prog_array_item {
+ struct bpf_prog *prog;
+ union {
+ struct bpf_cgroup_storage *cgroup_storage[MAX_BPF_CGROUP_STORAGE_TYPE];
+ u64 bpf_cookie;
+ };
+};
+
+struct bpf_prog_array {
+ struct rcu_head rcu;
+ struct bpf_prog_array_item items[];
+};
+
+struct bpf_empty_prog_array {
+ struct bpf_prog_array hdr;
+ struct bpf_prog *null_prog;
+};
+
+/* to avoid allocating empty bpf_prog_array for cgroups that
+ * don't have bpf program attached use one global 'bpf_empty_prog_array'
+ * It will not be modified the caller of bpf_prog_array_alloc()
+ * (since caller requested prog_cnt == 0)
+ * that pointer should be 'freed' by bpf_prog_array_free()
+ */
+extern struct bpf_empty_prog_array bpf_empty_prog_array;
+
+struct bpf_prog_array *bpf_prog_array_alloc(u32 prog_cnt, gfp_t flags);
+void bpf_prog_array_free(struct bpf_prog_array *progs);
+/* Use when traversal over the bpf_prog_array uses tasks_trace rcu */
+void bpf_prog_array_free_sleepable(struct bpf_prog_array *progs);
+int bpf_prog_array_length(struct bpf_prog_array *progs);
+bool bpf_prog_array_is_empty(struct bpf_prog_array *array);
+int bpf_prog_array_copy_to_user(struct bpf_prog_array *progs,
+ __u32 __user *prog_ids, u32 cnt);
+
+void bpf_prog_array_delete_safe(struct bpf_prog_array *progs,
+ struct bpf_prog *old_prog);
+int bpf_prog_array_delete_safe_at(struct bpf_prog_array *array, int index);
+int bpf_prog_array_update_at(struct bpf_prog_array *array, int index,
+ struct bpf_prog *prog);
+int bpf_prog_array_copy_info(struct bpf_prog_array *array,
+ u32 *prog_ids, u32 request_cnt,
+ u32 *prog_cnt);
+int bpf_prog_array_copy(struct bpf_prog_array *old_array,
+ struct bpf_prog *exclude_prog,
+ struct bpf_prog *include_prog,
+ u64 bpf_cookie,
+ struct bpf_prog_array **new_array);
+
+struct bpf_run_ctx {};
+
+struct bpf_cg_run_ctx {
+ struct bpf_run_ctx run_ctx;
+ const struct bpf_prog_array_item *prog_item;
+ int retval;
+};
+
+struct bpf_trace_run_ctx {
+ struct bpf_run_ctx run_ctx;
+ u64 bpf_cookie;
+};
+
+struct bpf_tramp_run_ctx {
+ struct bpf_run_ctx run_ctx;
+ u64 bpf_cookie;
+ struct bpf_run_ctx *saved_run_ctx;
+};
+
+static inline struct bpf_run_ctx *bpf_set_run_ctx(struct bpf_run_ctx *new_ctx)
+{
+ struct bpf_run_ctx *old_ctx = NULL;
+
+#ifdef CONFIG_BPF_SYSCALL
+ old_ctx = current->bpf_ctx;
+ current->bpf_ctx = new_ctx;
+#endif
+ return old_ctx;
+}
+
+static inline void bpf_reset_run_ctx(struct bpf_run_ctx *old_ctx)
+{
+#ifdef CONFIG_BPF_SYSCALL
+ current->bpf_ctx = old_ctx;
+#endif
+}
+
+/* BPF program asks to bypass CAP_NET_BIND_SERVICE in bind. */
+#define BPF_RET_BIND_NO_CAP_NET_BIND_SERVICE (1 << 0)
+/* BPF program asks to set CN on the packet. */
+#define BPF_RET_SET_CN (1 << 0)
+
+typedef u32 (*bpf_prog_run_fn)(const struct bpf_prog *prog, const void *ctx);
+
+static __always_inline u32
+bpf_prog_run_array(const struct bpf_prog_array *array,
+ const void *ctx, bpf_prog_run_fn run_prog)
+{
+ const struct bpf_prog_array_item *item;
+ const struct bpf_prog *prog;
+ struct bpf_run_ctx *old_run_ctx;
+ struct bpf_trace_run_ctx run_ctx;
+ u32 ret = 1;
+
+ RCU_LOCKDEP_WARN(!rcu_read_lock_held(), "no rcu lock held");
+
+ if (unlikely(!array))
+ return ret;
+
+ migrate_disable();
+ old_run_ctx = bpf_set_run_ctx(&run_ctx.run_ctx);
+ item = &array->items[0];
+ while ((prog = READ_ONCE(item->prog))) {
+ run_ctx.bpf_cookie = item->bpf_cookie;
+ ret &= run_prog(prog, ctx);
+ item++;
+ }
+ bpf_reset_run_ctx(old_run_ctx);
+ migrate_enable();
+ return ret;
+}
+
+/* Notes on RCU design for bpf_prog_arrays containing sleepable programs:
+ *
+ * We use the tasks_trace rcu flavor read section to protect the bpf_prog_array
+ * overall. As a result, we must use the bpf_prog_array_free_sleepable
+ * in order to use the tasks_trace rcu grace period.
+ *
+ * When a non-sleepable program is inside the array, we take the rcu read
+ * section and disable preemption for that program alone, so it can access
+ * rcu-protected dynamically sized maps.
+ */
+static __always_inline u32
+bpf_prog_run_array_sleepable(const struct bpf_prog_array __rcu *array_rcu,
+ const void *ctx, bpf_prog_run_fn run_prog)
+{
+ const struct bpf_prog_array_item *item;
+ const struct bpf_prog *prog;
+ const struct bpf_prog_array *array;
+ struct bpf_run_ctx *old_run_ctx;
+ struct bpf_trace_run_ctx run_ctx;
+ u32 ret = 1;
+
+ might_fault();
+
+ rcu_read_lock_trace();
+ migrate_disable();
+
+ array = rcu_dereference_check(array_rcu, rcu_read_lock_trace_held());
+ if (unlikely(!array))
+ goto out;
+ old_run_ctx = bpf_set_run_ctx(&run_ctx.run_ctx);
+ item = &array->items[0];
+ while ((prog = READ_ONCE(item->prog))) {
+ if (!prog->aux->sleepable)
+ rcu_read_lock();
+
+ run_ctx.bpf_cookie = item->bpf_cookie;
+ ret &= run_prog(prog, ctx);
+ item++;
+
+ if (!prog->aux->sleepable)
+ rcu_read_unlock();
+ }
+ bpf_reset_run_ctx(old_run_ctx);
+out:
+ migrate_enable();
+ rcu_read_unlock_trace();
+ return ret;
+}
#ifdef CONFIG_BPF_SYSCALL
DECLARE_PER_CPU(int, bpf_prog_active);
+extern struct mutex bpf_stats_enabled_mutex;
+
+/*
+ * Block execution of BPF programs attached to instrumentation (perf,
+ * kprobes, tracepoints) to prevent deadlocks on map operations as any of
+ * these events can happen inside a region which holds a map bucket lock
+ * and can deadlock on it.
+ */
+static inline void bpf_disable_instrumentation(void)
+{
+ migrate_disable();
+ this_cpu_inc(bpf_prog_active);
+}
+
+static inline void bpf_enable_instrumentation(void)
+{
+ this_cpu_dec(bpf_prog_active);
+ migrate_enable();
+}
-#define BPF_PROG_TYPE(_id, _ops) \
- extern const struct bpf_verifier_ops _ops;
+extern const struct file_operations bpf_map_fops;
+extern const struct file_operations bpf_prog_fops;
+extern const struct file_operations bpf_iter_fops;
+
+#define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type) \
+ extern const struct bpf_prog_ops _name ## _prog_ops; \
+ extern const struct bpf_verifier_ops _name ## _verifier_ops;
#define BPF_MAP_TYPE(_id, _ops) \
extern const struct bpf_map_ops _ops;
+#define BPF_LINK_TYPE(_id, _name)
#include <linux/bpf_types.h>
#undef BPF_PROG_TYPE
#undef BPF_MAP_TYPE
+#undef BPF_LINK_TYPE
+
+extern const struct bpf_prog_ops bpf_offload_prog_ops;
+extern const struct bpf_verifier_ops tc_cls_act_analyzer_ops;
+extern const struct bpf_verifier_ops xdp_analyzer_ops;
struct bpf_prog *bpf_prog_get(u32 ufd);
-struct bpf_prog *bpf_prog_get_type(u32 ufd, enum bpf_prog_type type);
-struct bpf_prog * __must_check bpf_prog_add(struct bpf_prog *prog, int i);
+struct bpf_prog *bpf_prog_get_type_dev(u32 ufd, enum bpf_prog_type type,
+ bool attach_drv);
+void bpf_prog_add(struct bpf_prog *prog, int i);
void bpf_prog_sub(struct bpf_prog *prog, int i);
-struct bpf_prog * __must_check bpf_prog_inc(struct bpf_prog *prog);
+void bpf_prog_inc(struct bpf_prog *prog);
+struct bpf_prog * __must_check bpf_prog_inc_not_zero(struct bpf_prog *prog);
void bpf_prog_put(struct bpf_prog *prog);
-int __bpf_prog_charge(struct user_struct *user, u32 pages);
-void __bpf_prog_uncharge(struct user_struct *user, u32 pages);
+void bpf_prog_free_id(struct bpf_prog *prog);
+void bpf_map_free_id(struct bpf_map *map);
+
+struct btf_field *btf_record_find(const struct btf_record *rec,
+ u32 offset, u32 field_mask);
+void btf_record_free(struct btf_record *rec);
+void bpf_map_free_record(struct bpf_map *map);
+struct btf_record *btf_record_dup(const struct btf_record *rec);
+bool btf_record_equal(const struct btf_record *rec_a, const struct btf_record *rec_b);
+void bpf_obj_free_timer(const struct btf_record *rec, void *obj);
+void bpf_obj_free_fields(const struct btf_record *rec, void *obj);
+
+struct bpf_map *bpf_map_get(u32 ufd);
struct bpf_map *bpf_map_get_with_uref(u32 ufd);
struct bpf_map *__bpf_map_get(struct fd f);
-struct bpf_map * __must_check bpf_map_inc(struct bpf_map *map, bool uref);
+void bpf_map_inc(struct bpf_map *map);
+void bpf_map_inc_with_uref(struct bpf_map *map);
+struct bpf_map *__bpf_map_inc_not_zero(struct bpf_map *map, bool uref);
+struct bpf_map * __must_check bpf_map_inc_not_zero(struct bpf_map *map);
void bpf_map_put_with_uref(struct bpf_map *map);
void bpf_map_put(struct bpf_map *map);
-int bpf_map_precharge_memlock(u32 pages);
-void *bpf_map_area_alloc(size_t size);
+void *bpf_map_area_alloc(u64 size, int numa_node);
+void *bpf_map_area_mmapable_alloc(u64 size, int numa_node);
void bpf_map_area_free(void *base);
+bool bpf_map_write_active(const struct bpf_map *map);
+void bpf_map_init_from_attr(struct bpf_map *map, union bpf_attr *attr);
+int generic_map_lookup_batch(struct bpf_map *map,
+ const union bpf_attr *attr,
+ union bpf_attr __user *uattr);
+int generic_map_update_batch(struct bpf_map *map, struct file *map_file,
+ const union bpf_attr *attr,
+ union bpf_attr __user *uattr);
+int generic_map_delete_batch(struct bpf_map *map,
+ const union bpf_attr *attr,
+ union bpf_attr __user *uattr);
+struct bpf_map *bpf_map_get_curr_or_next(u32 *id);
+struct bpf_prog *bpf_prog_get_curr_or_next(u32 *id);
+
+#ifdef CONFIG_MEMCG_KMEM
+void *bpf_map_kmalloc_node(const struct bpf_map *map, size_t size, gfp_t flags,
+ int node);
+void *bpf_map_kzalloc(const struct bpf_map *map, size_t size, gfp_t flags);
+void *bpf_map_kvcalloc(struct bpf_map *map, size_t n, size_t size,
+ gfp_t flags);
+void __percpu *bpf_map_alloc_percpu(const struct bpf_map *map, size_t size,
+ size_t align, gfp_t flags);
+#else
+static inline void *
+bpf_map_kmalloc_node(const struct bpf_map *map, size_t size, gfp_t flags,
+ int node)
+{
+ return kmalloc_node(size, flags, node);
+}
+
+static inline void *
+bpf_map_kzalloc(const struct bpf_map *map, size_t size, gfp_t flags)
+{
+ return kzalloc(size, flags);
+}
+
+static inline void *
+bpf_map_kvcalloc(struct bpf_map *map, size_t n, size_t size, gfp_t flags)
+{
+ return kvcalloc(n, size, flags);
+}
+
+static inline void __percpu *
+bpf_map_alloc_percpu(const struct bpf_map *map, size_t size, size_t align,
+ gfp_t flags)
+{
+ return __alloc_percpu_gfp(size, align, flags);
+}
+#endif
extern int sysctl_unprivileged_bpf_disabled;
-int bpf_map_new_fd(struct bpf_map *map);
+static inline bool bpf_allow_ptr_leaks(void)
+{
+ return perfmon_capable();
+}
+
+static inline bool bpf_allow_uninit_stack(void)
+{
+ return perfmon_capable();
+}
+
+static inline bool bpf_bypass_spec_v1(void)
+{
+ return perfmon_capable();
+}
+
+static inline bool bpf_bypass_spec_v4(void)
+{
+ return perfmon_capable();
+}
+
+int bpf_map_new_fd(struct bpf_map *map, int flags);
int bpf_prog_new_fd(struct bpf_prog *prog);
+void bpf_link_init(struct bpf_link *link, enum bpf_link_type type,
+ const struct bpf_link_ops *ops, struct bpf_prog *prog);
+int bpf_link_prime(struct bpf_link *link, struct bpf_link_primer *primer);
+int bpf_link_settle(struct bpf_link_primer *primer);
+void bpf_link_cleanup(struct bpf_link_primer *primer);
+void bpf_link_inc(struct bpf_link *link);
+void bpf_link_put(struct bpf_link *link);
+int bpf_link_new_fd(struct bpf_link *link);
+struct file *bpf_link_new_file(struct bpf_link *link, int *reserved_fd);
+struct bpf_link *bpf_link_get_from_fd(u32 ufd);
+struct bpf_link *bpf_link_get_curr_or_next(u32 *id);
+
int bpf_obj_pin_user(u32 ufd, const char __user *pathname);
-int bpf_obj_get_user(const char __user *pathname);
+int bpf_obj_get_user(const char __user *pathname, int flags);
+
+#define BPF_ITER_FUNC_PREFIX "bpf_iter_"
+#define DEFINE_BPF_ITER_FUNC(target, args...) \
+ extern int bpf_iter_ ## target(args); \
+ int __init bpf_iter_ ## target(args) { return 0; }
+
+/*
+ * The task type of iterators.
+ *
+ * For BPF task iterators, they can be parameterized with various
+ * parameters to visit only some of tasks.
+ *
+ * BPF_TASK_ITER_ALL (default)
+ * Iterate over resources of every task.
+ *
+ * BPF_TASK_ITER_TID
+ * Iterate over resources of a task/tid.
+ *
+ * BPF_TASK_ITER_TGID
+ * Iterate over resources of every task of a process / task group.
+ */
+enum bpf_iter_task_type {
+ BPF_TASK_ITER_ALL = 0,
+ BPF_TASK_ITER_TID,
+ BPF_TASK_ITER_TGID,
+};
+
+struct bpf_iter_aux_info {
+ /* for map_elem iter */
+ struct bpf_map *map;
+
+ /* for cgroup iter */
+ struct {
+ struct cgroup *start; /* starting cgroup */
+ enum bpf_cgroup_iter_order order;
+ } cgroup;
+ struct {
+ enum bpf_iter_task_type type;
+ u32 pid;
+ } task;
+};
+
+typedef int (*bpf_iter_attach_target_t)(struct bpf_prog *prog,
+ union bpf_iter_link_info *linfo,
+ struct bpf_iter_aux_info *aux);
+typedef void (*bpf_iter_detach_target_t)(struct bpf_iter_aux_info *aux);
+typedef void (*bpf_iter_show_fdinfo_t) (const struct bpf_iter_aux_info *aux,
+ struct seq_file *seq);
+typedef int (*bpf_iter_fill_link_info_t)(const struct bpf_iter_aux_info *aux,
+ struct bpf_link_info *info);
+typedef const struct bpf_func_proto *
+(*bpf_iter_get_func_proto_t)(enum bpf_func_id func_id,
+ const struct bpf_prog *prog);
+
+enum bpf_iter_feature {
+ BPF_ITER_RESCHED = BIT(0),
+};
+
+#define BPF_ITER_CTX_ARG_MAX 2
+struct bpf_iter_reg {
+ const char *target;
+ bpf_iter_attach_target_t attach_target;
+ bpf_iter_detach_target_t detach_target;
+ bpf_iter_show_fdinfo_t show_fdinfo;
+ bpf_iter_fill_link_info_t fill_link_info;
+ bpf_iter_get_func_proto_t get_func_proto;
+ u32 ctx_arg_info_size;
+ u32 feature;
+ struct bpf_ctx_arg_aux ctx_arg_info[BPF_ITER_CTX_ARG_MAX];
+ const struct bpf_iter_seq_info *seq_info;
+};
+
+struct bpf_iter_meta {
+ __bpf_md_ptr(struct seq_file *, seq);
+ u64 session_id;
+ u64 seq_num;
+};
+
+struct bpf_iter__bpf_map_elem {
+ __bpf_md_ptr(struct bpf_iter_meta *, meta);
+ __bpf_md_ptr(struct bpf_map *, map);
+ __bpf_md_ptr(void *, key);
+ __bpf_md_ptr(void *, value);
+};
+
+int bpf_iter_reg_target(const struct bpf_iter_reg *reg_info);
+void bpf_iter_unreg_target(const struct bpf_iter_reg *reg_info);
+bool bpf_iter_prog_supported(struct bpf_prog *prog);
+const struct bpf_func_proto *
+bpf_iter_get_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog);
+int bpf_iter_link_attach(const union bpf_attr *attr, bpfptr_t uattr, struct bpf_prog *prog);
+int bpf_iter_new_fd(struct bpf_link *link);
+bool bpf_link_is_iter(struct bpf_link *link);
+struct bpf_prog *bpf_iter_get_info(struct bpf_iter_meta *meta, bool in_stop);
+int bpf_iter_run_prog(struct bpf_prog *prog, void *ctx);
+void bpf_iter_map_show_fdinfo(const struct bpf_iter_aux_info *aux,
+ struct seq_file *seq);
+int bpf_iter_map_fill_link_info(const struct bpf_iter_aux_info *aux,
+ struct bpf_link_info *info);
+
+int map_set_for_each_callback_args(struct bpf_verifier_env *env,
+ struct bpf_func_state *caller,
+ struct bpf_func_state *callee);
int bpf_percpu_hash_copy(struct bpf_map *map, void *key, void *value);
int bpf_percpu_array_copy(struct bpf_map *map, void *key, void *value);
@@ -295,44 +2196,193 @@ int bpf_stackmap_copy(struct bpf_map *map, void *key, void *value);
int bpf_fd_array_map_update_elem(struct bpf_map *map, struct file *map_file,
void *key, void *value, u64 map_flags);
int bpf_fd_array_map_lookup_elem(struct bpf_map *map, void *key, u32 *value);
-void bpf_fd_array_map_clear(struct bpf_map *map);
int bpf_fd_htab_map_update_elem(struct bpf_map *map, struct file *map_file,
void *key, void *value, u64 map_flags);
int bpf_fd_htab_map_lookup_elem(struct bpf_map *map, void *key, u32 *value);
-/* memcpy that is used with 8-byte aligned pointers, power-of-8 size and
- * forced to use 'long' read/writes to try to atomically copy long counters.
- * Best-effort only. No barriers here, since it _will_ race with concurrent
- * updates from BPF programs. Called from bpf syscall and mostly used with
- * size 8 or 16 bytes, so ask compiler to inline it.
- */
-static inline void bpf_long_memcpy(void *dst, const void *src, u32 size)
+int bpf_get_file_flag(int flags);
+int bpf_check_uarg_tail_zero(bpfptr_t uaddr, size_t expected_size,
+ size_t actual_size);
+
+/* verify correctness of eBPF program */
+int bpf_check(struct bpf_prog **fp, union bpf_attr *attr, bpfptr_t uattr, u32 uattr_size);
+
+#ifndef CONFIG_BPF_JIT_ALWAYS_ON
+void bpf_patch_call_args(struct bpf_insn *insn, u32 stack_depth);
+#endif
+
+struct btf *bpf_get_btf_vmlinux(void);
+
+/* Map specifics */
+struct xdp_frame;
+struct sk_buff;
+struct bpf_dtab_netdev;
+struct bpf_cpu_map_entry;
+
+void __dev_flush(void);
+int dev_xdp_enqueue(struct net_device *dev, struct xdp_frame *xdpf,
+ struct net_device *dev_rx);
+int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_frame *xdpf,
+ struct net_device *dev_rx);
+int dev_map_enqueue_multi(struct xdp_frame *xdpf, struct net_device *dev_rx,
+ struct bpf_map *map, bool exclude_ingress);
+int dev_map_generic_redirect(struct bpf_dtab_netdev *dst, struct sk_buff *skb,
+ struct bpf_prog *xdp_prog);
+int dev_map_redirect_multi(struct net_device *dev, struct sk_buff *skb,
+ struct bpf_prog *xdp_prog, struct bpf_map *map,
+ bool exclude_ingress);
+
+void __cpu_map_flush(void);
+int cpu_map_enqueue(struct bpf_cpu_map_entry *rcpu, struct xdp_frame *xdpf,
+ struct net_device *dev_rx);
+int cpu_map_generic_redirect(struct bpf_cpu_map_entry *rcpu,
+ struct sk_buff *skb);
+
+/* Return map's numa specified by userspace */
+static inline int bpf_map_attr_numa_node(const union bpf_attr *attr)
{
- const long *lsrc = src;
- long *ldst = dst;
+ return (attr->map_flags & BPF_F_NUMA_NODE) ?
+ attr->numa_node : NUMA_NO_NODE;
+}
- size /= sizeof(long);
- while (size--)
- *ldst++ = *lsrc++;
+struct bpf_prog *bpf_prog_get_type_path(const char *name, enum bpf_prog_type type);
+int array_map_alloc_check(union bpf_attr *attr);
+
+int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr,
+ union bpf_attr __user *uattr);
+int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr,
+ union bpf_attr __user *uattr);
+int bpf_prog_test_run_tracing(struct bpf_prog *prog,
+ const union bpf_attr *kattr,
+ union bpf_attr __user *uattr);
+int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog,
+ const union bpf_attr *kattr,
+ union bpf_attr __user *uattr);
+int bpf_prog_test_run_raw_tp(struct bpf_prog *prog,
+ const union bpf_attr *kattr,
+ union bpf_attr __user *uattr);
+int bpf_prog_test_run_sk_lookup(struct bpf_prog *prog,
+ const union bpf_attr *kattr,
+ union bpf_attr __user *uattr);
+int bpf_prog_test_run_nf(struct bpf_prog *prog,
+ const union bpf_attr *kattr,
+ union bpf_attr __user *uattr);
+bool btf_ctx_access(int off, int size, enum bpf_access_type type,
+ const struct bpf_prog *prog,
+ struct bpf_insn_access_aux *info);
+
+static inline bool bpf_tracing_ctx_access(int off, int size,
+ enum bpf_access_type type)
+{
+ if (off < 0 || off >= sizeof(__u64) * MAX_BPF_FUNC_ARGS)
+ return false;
+ if (type != BPF_READ)
+ return false;
+ if (off % size != 0)
+ return false;
+ return true;
}
-/* verify correctness of eBPF program */
-int bpf_check(struct bpf_prog **fp, union bpf_attr *attr);
-#else
+static inline bool bpf_tracing_btf_ctx_access(int off, int size,
+ enum bpf_access_type type,
+ const struct bpf_prog *prog,
+ struct bpf_insn_access_aux *info)
+{
+ if (!bpf_tracing_ctx_access(off, size, type))
+ return false;
+ return btf_ctx_access(off, size, type, prog, info);
+}
+
+int btf_struct_access(struct bpf_verifier_log *log,
+ const struct bpf_reg_state *reg,
+ int off, int size, enum bpf_access_type atype,
+ u32 *next_btf_id, enum bpf_type_flag *flag, const char **field_name);
+bool btf_struct_ids_match(struct bpf_verifier_log *log,
+ const struct btf *btf, u32 id, int off,
+ const struct btf *need_btf, u32 need_type_id,
+ bool strict);
+
+int btf_distill_func_proto(struct bpf_verifier_log *log,
+ struct btf *btf,
+ const struct btf_type *func_proto,
+ const char *func_name,
+ struct btf_func_model *m);
+
+struct bpf_reg_state;
+int btf_check_subprog_arg_match(struct bpf_verifier_env *env, int subprog,
+ struct bpf_reg_state *regs);
+int btf_check_subprog_call(struct bpf_verifier_env *env, int subprog,
+ struct bpf_reg_state *regs);
+int btf_prepare_func_args(struct bpf_verifier_env *env, int subprog,
+ struct bpf_reg_state *reg);
+int btf_check_type_match(struct bpf_verifier_log *log, const struct bpf_prog *prog,
+ struct btf *btf, const struct btf_type *t);
+
+struct bpf_prog *bpf_prog_by_id(u32 id);
+struct bpf_link *bpf_link_by_id(u32 id);
+
+const struct bpf_func_proto *bpf_base_func_proto(enum bpf_func_id func_id);
+void bpf_task_storage_free(struct task_struct *task);
+void bpf_cgrp_storage_free(struct cgroup *cgroup);
+bool bpf_prog_has_kfunc_call(const struct bpf_prog *prog);
+const struct btf_func_model *
+bpf_jit_find_kfunc_model(const struct bpf_prog *prog,
+ const struct bpf_insn *insn);
+int bpf_get_kfunc_addr(const struct bpf_prog *prog, u32 func_id,
+ u16 btf_fd_idx, u8 **func_addr);
+
+struct bpf_core_ctx {
+ struct bpf_verifier_log *log;
+ const struct btf *btf;
+};
+
+bool btf_nested_type_is_trusted(struct bpf_verifier_log *log,
+ const struct bpf_reg_state *reg,
+ const char *field_name, u32 btf_id, const char *suffix);
+
+bool btf_type_ids_nocast_alias(struct bpf_verifier_log *log,
+ const struct btf *reg_btf, u32 reg_id,
+ const struct btf *arg_btf, u32 arg_id);
+
+int bpf_core_apply(struct bpf_core_ctx *ctx, const struct bpf_core_relo *relo,
+ int relo_idx, void *insn);
+
+static inline bool unprivileged_ebpf_enabled(void)
+{
+ return !sysctl_unprivileged_bpf_disabled;
+}
+
+/* Not all bpf prog type has the bpf_ctx.
+ * For the bpf prog type that has initialized the bpf_ctx,
+ * this function can be used to decide if a kernel function
+ * is called by a bpf program.
+ */
+static inline bool has_current_bpf_ctx(void)
+{
+ return !!current->bpf_ctx;
+}
+
+void notrace bpf_prog_inc_misses_counter(struct bpf_prog *prog);
+
+void bpf_dynptr_init(struct bpf_dynptr_kern *ptr, void *data,
+ enum bpf_dynptr_type type, u32 offset, u32 size);
+void bpf_dynptr_set_null(struct bpf_dynptr_kern *ptr);
+void bpf_dynptr_set_rdonly(struct bpf_dynptr_kern *ptr);
+#else /* !CONFIG_BPF_SYSCALL */
static inline struct bpf_prog *bpf_prog_get(u32 ufd)
{
return ERR_PTR(-EOPNOTSUPP);
}
-static inline struct bpf_prog *bpf_prog_get_type(u32 ufd,
- enum bpf_prog_type type)
+static inline struct bpf_prog *bpf_prog_get_type_dev(u32 ufd,
+ enum bpf_prog_type type,
+ bool attach_drv)
{
return ERR_PTR(-EOPNOTSUPP);
}
-static inline struct bpf_prog * __must_check bpf_prog_add(struct bpf_prog *prog,
- int i)
+
+static inline void bpf_prog_add(struct bpf_prog *prog, int i)
{
- return ERR_PTR(-EOPNOTSUPP);
}
static inline void bpf_prog_sub(struct bpf_prog *prog, int i)
@@ -343,40 +2393,689 @@ static inline void bpf_prog_put(struct bpf_prog *prog)
{
}
-static inline struct bpf_prog * __must_check bpf_prog_inc(struct bpf_prog *prog)
+static inline void bpf_prog_inc(struct bpf_prog *prog)
+{
+}
+
+static inline struct bpf_prog *__must_check
+bpf_prog_inc_not_zero(struct bpf_prog *prog)
{
return ERR_PTR(-EOPNOTSUPP);
}
-static inline int __bpf_prog_charge(struct user_struct *user, u32 pages)
+static inline void bpf_link_init(struct bpf_link *link, enum bpf_link_type type,
+ const struct bpf_link_ops *ops,
+ struct bpf_prog *prog)
+{
+}
+
+static inline int bpf_link_prime(struct bpf_link *link,
+ struct bpf_link_primer *primer)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int bpf_link_settle(struct bpf_link_primer *primer)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline void bpf_link_cleanup(struct bpf_link_primer *primer)
+{
+}
+
+static inline void bpf_link_inc(struct bpf_link *link)
+{
+}
+
+static inline void bpf_link_put(struct bpf_link *link)
+{
+}
+
+static inline int bpf_obj_get_user(const char __user *pathname, int flags)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline void __dev_flush(void)
+{
+}
+
+struct xdp_frame;
+struct bpf_dtab_netdev;
+struct bpf_cpu_map_entry;
+
+static inline
+int dev_xdp_enqueue(struct net_device *dev, struct xdp_frame *xdpf,
+ struct net_device *dev_rx)
{
return 0;
}
-static inline void __bpf_prog_uncharge(struct user_struct *user, u32 pages)
+static inline
+int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_frame *xdpf,
+ struct net_device *dev_rx)
+{
+ return 0;
+}
+
+static inline
+int dev_map_enqueue_multi(struct xdp_frame *xdpf, struct net_device *dev_rx,
+ struct bpf_map *map, bool exclude_ingress)
+{
+ return 0;
+}
+
+struct sk_buff;
+
+static inline int dev_map_generic_redirect(struct bpf_dtab_netdev *dst,
+ struct sk_buff *skb,
+ struct bpf_prog *xdp_prog)
+{
+ return 0;
+}
+
+static inline
+int dev_map_redirect_multi(struct net_device *dev, struct sk_buff *skb,
+ struct bpf_prog *xdp_prog, struct bpf_map *map,
+ bool exclude_ingress)
+{
+ return 0;
+}
+
+static inline void __cpu_map_flush(void)
+{
+}
+
+static inline int cpu_map_enqueue(struct bpf_cpu_map_entry *rcpu,
+ struct xdp_frame *xdpf,
+ struct net_device *dev_rx)
+{
+ return 0;
+}
+
+static inline int cpu_map_generic_redirect(struct bpf_cpu_map_entry *rcpu,
+ struct sk_buff *skb)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline struct bpf_prog *bpf_prog_get_type_path(const char *name,
+ enum bpf_prog_type type)
+{
+ return ERR_PTR(-EOPNOTSUPP);
+}
+
+static inline int bpf_prog_test_run_xdp(struct bpf_prog *prog,
+ const union bpf_attr *kattr,
+ union bpf_attr __user *uattr)
+{
+ return -ENOTSUPP;
+}
+
+static inline int bpf_prog_test_run_skb(struct bpf_prog *prog,
+ const union bpf_attr *kattr,
+ union bpf_attr __user *uattr)
+{
+ return -ENOTSUPP;
+}
+
+static inline int bpf_prog_test_run_tracing(struct bpf_prog *prog,
+ const union bpf_attr *kattr,
+ union bpf_attr __user *uattr)
+{
+ return -ENOTSUPP;
+}
+
+static inline int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog,
+ const union bpf_attr *kattr,
+ union bpf_attr __user *uattr)
+{
+ return -ENOTSUPP;
+}
+
+static inline int bpf_prog_test_run_sk_lookup(struct bpf_prog *prog,
+ const union bpf_attr *kattr,
+ union bpf_attr __user *uattr)
+{
+ return -ENOTSUPP;
+}
+
+static inline void bpf_map_put(struct bpf_map *map)
+{
+}
+
+static inline struct bpf_prog *bpf_prog_by_id(u32 id)
+{
+ return ERR_PTR(-ENOTSUPP);
+}
+
+static inline int btf_struct_access(struct bpf_verifier_log *log,
+ const struct bpf_reg_state *reg,
+ int off, int size, enum bpf_access_type atype,
+ u32 *next_btf_id, enum bpf_type_flag *flag,
+ const char **field_name)
+{
+ return -EACCES;
+}
+
+static inline const struct bpf_func_proto *
+bpf_base_func_proto(enum bpf_func_id func_id)
+{
+ return NULL;
+}
+
+static inline void bpf_task_storage_free(struct task_struct *task)
+{
+}
+
+static inline bool bpf_prog_has_kfunc_call(const struct bpf_prog *prog)
+{
+ return false;
+}
+
+static inline const struct btf_func_model *
+bpf_jit_find_kfunc_model(const struct bpf_prog *prog,
+ const struct bpf_insn *insn)
+{
+ return NULL;
+}
+
+static inline int
+bpf_get_kfunc_addr(const struct bpf_prog *prog, u32 func_id,
+ u16 btf_fd_idx, u8 **func_addr)
+{
+ return -ENOTSUPP;
+}
+
+static inline bool unprivileged_ebpf_enabled(void)
+{
+ return false;
+}
+
+static inline bool has_current_bpf_ctx(void)
+{
+ return false;
+}
+
+static inline void bpf_prog_inc_misses_counter(struct bpf_prog *prog)
+{
+}
+
+static inline void bpf_cgrp_storage_free(struct cgroup *cgroup)
+{
+}
+
+static inline void bpf_dynptr_init(struct bpf_dynptr_kern *ptr, void *data,
+ enum bpf_dynptr_type type, u32 offset, u32 size)
+{
+}
+
+static inline void bpf_dynptr_set_null(struct bpf_dynptr_kern *ptr)
+{
+}
+
+static inline void bpf_dynptr_set_rdonly(struct bpf_dynptr_kern *ptr)
{
}
#endif /* CONFIG_BPF_SYSCALL */
+void __bpf_free_used_btfs(struct bpf_prog_aux *aux,
+ struct btf_mod_pair *used_btfs, u32 len);
+
+static inline struct bpf_prog *bpf_prog_get_type(u32 ufd,
+ enum bpf_prog_type type)
+{
+ return bpf_prog_get_type_dev(ufd, type, false);
+}
+
+void __bpf_free_used_maps(struct bpf_prog_aux *aux,
+ struct bpf_map **used_maps, u32 len);
+
+bool bpf_prog_get_ok(struct bpf_prog *, enum bpf_prog_type *, bool);
+
+int bpf_prog_offload_compile(struct bpf_prog *prog);
+void bpf_prog_dev_bound_destroy(struct bpf_prog *prog);
+int bpf_prog_offload_info_fill(struct bpf_prog_info *info,
+ struct bpf_prog *prog);
+
+int bpf_map_offload_info_fill(struct bpf_map_info *info, struct bpf_map *map);
+
+int bpf_map_offload_lookup_elem(struct bpf_map *map, void *key, void *value);
+int bpf_map_offload_update_elem(struct bpf_map *map,
+ void *key, void *value, u64 flags);
+int bpf_map_offload_delete_elem(struct bpf_map *map, void *key);
+int bpf_map_offload_get_next_key(struct bpf_map *map,
+ void *key, void *next_key);
+
+bool bpf_offload_prog_map_match(struct bpf_prog *prog, struct bpf_map *map);
+
+struct bpf_offload_dev *
+bpf_offload_dev_create(const struct bpf_prog_offload_ops *ops, void *priv);
+void bpf_offload_dev_destroy(struct bpf_offload_dev *offdev);
+void *bpf_offload_dev_priv(struct bpf_offload_dev *offdev);
+int bpf_offload_dev_netdev_register(struct bpf_offload_dev *offdev,
+ struct net_device *netdev);
+void bpf_offload_dev_netdev_unregister(struct bpf_offload_dev *offdev,
+ struct net_device *netdev);
+bool bpf_offload_dev_match(struct bpf_prog *prog, struct net_device *netdev);
+
+void unpriv_ebpf_notify(int new_state);
+
+#if defined(CONFIG_NET) && defined(CONFIG_BPF_SYSCALL)
+int bpf_dev_bound_kfunc_check(struct bpf_verifier_log *log,
+ struct bpf_prog_aux *prog_aux);
+void *bpf_dev_bound_resolve_kfunc(struct bpf_prog *prog, u32 func_id);
+int bpf_prog_dev_bound_init(struct bpf_prog *prog, union bpf_attr *attr);
+int bpf_prog_dev_bound_inherit(struct bpf_prog *new_prog, struct bpf_prog *old_prog);
+void bpf_dev_bound_netdev_unregister(struct net_device *dev);
+
+static inline bool bpf_prog_is_dev_bound(const struct bpf_prog_aux *aux)
+{
+ return aux->dev_bound;
+}
+
+static inline bool bpf_prog_is_offloaded(const struct bpf_prog_aux *aux)
+{
+ return aux->offload_requested;
+}
+
+bool bpf_prog_dev_bound_match(const struct bpf_prog *lhs, const struct bpf_prog *rhs);
+
+static inline bool bpf_map_is_offloaded(struct bpf_map *map)
+{
+ return unlikely(map->ops == &bpf_map_offload_ops);
+}
+
+struct bpf_map *bpf_map_offload_map_alloc(union bpf_attr *attr);
+void bpf_map_offload_map_free(struct bpf_map *map);
+u64 bpf_map_offload_map_mem_usage(const struct bpf_map *map);
+int bpf_prog_test_run_syscall(struct bpf_prog *prog,
+ const union bpf_attr *kattr,
+ union bpf_attr __user *uattr);
+
+int sock_map_get_from_fd(const union bpf_attr *attr, struct bpf_prog *prog);
+int sock_map_prog_detach(const union bpf_attr *attr, enum bpf_prog_type ptype);
+int sock_map_update_elem_sys(struct bpf_map *map, void *key, void *value, u64 flags);
+int sock_map_bpf_prog_query(const union bpf_attr *attr,
+ union bpf_attr __user *uattr);
+
+void sock_map_unhash(struct sock *sk);
+void sock_map_destroy(struct sock *sk);
+void sock_map_close(struct sock *sk, long timeout);
+#else
+static inline int bpf_dev_bound_kfunc_check(struct bpf_verifier_log *log,
+ struct bpf_prog_aux *prog_aux)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline void *bpf_dev_bound_resolve_kfunc(struct bpf_prog *prog,
+ u32 func_id)
+{
+ return NULL;
+}
+
+static inline int bpf_prog_dev_bound_init(struct bpf_prog *prog,
+ union bpf_attr *attr)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int bpf_prog_dev_bound_inherit(struct bpf_prog *new_prog,
+ struct bpf_prog *old_prog)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline void bpf_dev_bound_netdev_unregister(struct net_device *dev)
+{
+}
+
+static inline bool bpf_prog_is_dev_bound(const struct bpf_prog_aux *aux)
+{
+ return false;
+}
+
+static inline bool bpf_prog_is_offloaded(struct bpf_prog_aux *aux)
+{
+ return false;
+}
+
+static inline bool bpf_prog_dev_bound_match(const struct bpf_prog *lhs, const struct bpf_prog *rhs)
+{
+ return false;
+}
+
+static inline bool bpf_map_is_offloaded(struct bpf_map *map)
+{
+ return false;
+}
+
+static inline struct bpf_map *bpf_map_offload_map_alloc(union bpf_attr *attr)
+{
+ return ERR_PTR(-EOPNOTSUPP);
+}
+
+static inline void bpf_map_offload_map_free(struct bpf_map *map)
+{
+}
+
+static inline u64 bpf_map_offload_map_mem_usage(const struct bpf_map *map)
+{
+ return 0;
+}
+
+static inline int bpf_prog_test_run_syscall(struct bpf_prog *prog,
+ const union bpf_attr *kattr,
+ union bpf_attr __user *uattr)
+{
+ return -ENOTSUPP;
+}
+
+#ifdef CONFIG_BPF_SYSCALL
+static inline int sock_map_get_from_fd(const union bpf_attr *attr,
+ struct bpf_prog *prog)
+{
+ return -EINVAL;
+}
+
+static inline int sock_map_prog_detach(const union bpf_attr *attr,
+ enum bpf_prog_type ptype)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int sock_map_update_elem_sys(struct bpf_map *map, void *key, void *value,
+ u64 flags)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int sock_map_bpf_prog_query(const union bpf_attr *attr,
+ union bpf_attr __user *uattr)
+{
+ return -EINVAL;
+}
+#endif /* CONFIG_BPF_SYSCALL */
+#endif /* CONFIG_NET && CONFIG_BPF_SYSCALL */
+
+#if defined(CONFIG_INET) && defined(CONFIG_BPF_SYSCALL)
+void bpf_sk_reuseport_detach(struct sock *sk);
+int bpf_fd_reuseport_array_lookup_elem(struct bpf_map *map, void *key,
+ void *value);
+int bpf_fd_reuseport_array_update_elem(struct bpf_map *map, void *key,
+ void *value, u64 map_flags);
+#else
+static inline void bpf_sk_reuseport_detach(struct sock *sk)
+{
+}
+
+#ifdef CONFIG_BPF_SYSCALL
+static inline int bpf_fd_reuseport_array_lookup_elem(struct bpf_map *map,
+ void *key, void *value)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int bpf_fd_reuseport_array_update_elem(struct bpf_map *map,
+ void *key, void *value,
+ u64 map_flags)
+{
+ return -EOPNOTSUPP;
+}
+#endif /* CONFIG_BPF_SYSCALL */
+#endif /* defined(CONFIG_INET) && defined(CONFIG_BPF_SYSCALL) */
+
/* verifier prototypes for helper functions called from eBPF programs */
extern const struct bpf_func_proto bpf_map_lookup_elem_proto;
extern const struct bpf_func_proto bpf_map_update_elem_proto;
extern const struct bpf_func_proto bpf_map_delete_elem_proto;
+extern const struct bpf_func_proto bpf_map_push_elem_proto;
+extern const struct bpf_func_proto bpf_map_pop_elem_proto;
+extern const struct bpf_func_proto bpf_map_peek_elem_proto;
+extern const struct bpf_func_proto bpf_map_lookup_percpu_elem_proto;
extern const struct bpf_func_proto bpf_get_prandom_u32_proto;
extern const struct bpf_func_proto bpf_get_smp_processor_id_proto;
extern const struct bpf_func_proto bpf_get_numa_node_id_proto;
extern const struct bpf_func_proto bpf_tail_call_proto;
extern const struct bpf_func_proto bpf_ktime_get_ns_proto;
+extern const struct bpf_func_proto bpf_ktime_get_boot_ns_proto;
+extern const struct bpf_func_proto bpf_ktime_get_tai_ns_proto;
extern const struct bpf_func_proto bpf_get_current_pid_tgid_proto;
extern const struct bpf_func_proto bpf_get_current_uid_gid_proto;
extern const struct bpf_func_proto bpf_get_current_comm_proto;
-extern const struct bpf_func_proto bpf_skb_vlan_push_proto;
-extern const struct bpf_func_proto bpf_skb_vlan_pop_proto;
extern const struct bpf_func_proto bpf_get_stackid_proto;
+extern const struct bpf_func_proto bpf_get_stack_proto;
+extern const struct bpf_func_proto bpf_get_task_stack_proto;
+extern const struct bpf_func_proto bpf_get_stackid_proto_pe;
+extern const struct bpf_func_proto bpf_get_stack_proto_pe;
+extern const struct bpf_func_proto bpf_sock_map_update_proto;
+extern const struct bpf_func_proto bpf_sock_hash_update_proto;
+extern const struct bpf_func_proto bpf_get_current_cgroup_id_proto;
+extern const struct bpf_func_proto bpf_get_current_ancestor_cgroup_id_proto;
+extern const struct bpf_func_proto bpf_get_cgroup_classid_curr_proto;
+extern const struct bpf_func_proto bpf_msg_redirect_hash_proto;
+extern const struct bpf_func_proto bpf_msg_redirect_map_proto;
+extern const struct bpf_func_proto bpf_sk_redirect_hash_proto;
+extern const struct bpf_func_proto bpf_sk_redirect_map_proto;
+extern const struct bpf_func_proto bpf_spin_lock_proto;
+extern const struct bpf_func_proto bpf_spin_unlock_proto;
+extern const struct bpf_func_proto bpf_get_local_storage_proto;
+extern const struct bpf_func_proto bpf_strtol_proto;
+extern const struct bpf_func_proto bpf_strtoul_proto;
+extern const struct bpf_func_proto bpf_tcp_sock_proto;
+extern const struct bpf_func_proto bpf_jiffies64_proto;
+extern const struct bpf_func_proto bpf_get_ns_current_pid_tgid_proto;
+extern const struct bpf_func_proto bpf_event_output_data_proto;
+extern const struct bpf_func_proto bpf_ringbuf_output_proto;
+extern const struct bpf_func_proto bpf_ringbuf_reserve_proto;
+extern const struct bpf_func_proto bpf_ringbuf_submit_proto;
+extern const struct bpf_func_proto bpf_ringbuf_discard_proto;
+extern const struct bpf_func_proto bpf_ringbuf_query_proto;
+extern const struct bpf_func_proto bpf_ringbuf_reserve_dynptr_proto;
+extern const struct bpf_func_proto bpf_ringbuf_submit_dynptr_proto;
+extern const struct bpf_func_proto bpf_ringbuf_discard_dynptr_proto;
+extern const struct bpf_func_proto bpf_skc_to_tcp6_sock_proto;
+extern const struct bpf_func_proto bpf_skc_to_tcp_sock_proto;
+extern const struct bpf_func_proto bpf_skc_to_tcp_timewait_sock_proto;
+extern const struct bpf_func_proto bpf_skc_to_tcp_request_sock_proto;
+extern const struct bpf_func_proto bpf_skc_to_udp6_sock_proto;
+extern const struct bpf_func_proto bpf_skc_to_unix_sock_proto;
+extern const struct bpf_func_proto bpf_skc_to_mptcp_sock_proto;
+extern const struct bpf_func_proto bpf_copy_from_user_proto;
+extern const struct bpf_func_proto bpf_snprintf_btf_proto;
+extern const struct bpf_func_proto bpf_snprintf_proto;
+extern const struct bpf_func_proto bpf_per_cpu_ptr_proto;
+extern const struct bpf_func_proto bpf_this_cpu_ptr_proto;
+extern const struct bpf_func_proto bpf_ktime_get_coarse_ns_proto;
+extern const struct bpf_func_proto bpf_sock_from_file_proto;
+extern const struct bpf_func_proto bpf_get_socket_ptr_cookie_proto;
+extern const struct bpf_func_proto bpf_task_storage_get_recur_proto;
+extern const struct bpf_func_proto bpf_task_storage_get_proto;
+extern const struct bpf_func_proto bpf_task_storage_delete_recur_proto;
+extern const struct bpf_func_proto bpf_task_storage_delete_proto;
+extern const struct bpf_func_proto bpf_for_each_map_elem_proto;
+extern const struct bpf_func_proto bpf_btf_find_by_name_kind_proto;
+extern const struct bpf_func_proto bpf_sk_setsockopt_proto;
+extern const struct bpf_func_proto bpf_sk_getsockopt_proto;
+extern const struct bpf_func_proto bpf_unlocked_sk_setsockopt_proto;
+extern const struct bpf_func_proto bpf_unlocked_sk_getsockopt_proto;
+extern const struct bpf_func_proto bpf_find_vma_proto;
+extern const struct bpf_func_proto bpf_loop_proto;
+extern const struct bpf_func_proto bpf_copy_from_user_task_proto;
+extern const struct bpf_func_proto bpf_set_retval_proto;
+extern const struct bpf_func_proto bpf_get_retval_proto;
+extern const struct bpf_func_proto bpf_user_ringbuf_drain_proto;
+extern const struct bpf_func_proto bpf_cgrp_storage_get_proto;
+extern const struct bpf_func_proto bpf_cgrp_storage_delete_proto;
+
+const struct bpf_func_proto *tracing_prog_func_proto(
+ enum bpf_func_id func_id, const struct bpf_prog *prog);
/* Shared helpers among cBPF and eBPF. */
void bpf_user_rnd_init_once(void);
u64 bpf_user_rnd_u32(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
+u64 bpf_get_raw_cpu_id(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
+
+#if defined(CONFIG_NET)
+bool bpf_sock_common_is_valid_access(int off, int size,
+ enum bpf_access_type type,
+ struct bpf_insn_access_aux *info);
+bool bpf_sock_is_valid_access(int off, int size, enum bpf_access_type type,
+ struct bpf_insn_access_aux *info);
+u32 bpf_sock_convert_ctx_access(enum bpf_access_type type,
+ const struct bpf_insn *si,
+ struct bpf_insn *insn_buf,
+ struct bpf_prog *prog,
+ u32 *target_size);
+int bpf_dynptr_from_skb_rdonly(struct sk_buff *skb, u64 flags,
+ struct bpf_dynptr_kern *ptr);
+#else
+static inline bool bpf_sock_common_is_valid_access(int off, int size,
+ enum bpf_access_type type,
+ struct bpf_insn_access_aux *info)
+{
+ return false;
+}
+static inline bool bpf_sock_is_valid_access(int off, int size,
+ enum bpf_access_type type,
+ struct bpf_insn_access_aux *info)
+{
+ return false;
+}
+static inline u32 bpf_sock_convert_ctx_access(enum bpf_access_type type,
+ const struct bpf_insn *si,
+ struct bpf_insn *insn_buf,
+ struct bpf_prog *prog,
+ u32 *target_size)
+{
+ return 0;
+}
+static inline int bpf_dynptr_from_skb_rdonly(struct sk_buff *skb, u64 flags,
+ struct bpf_dynptr_kern *ptr)
+{
+ return -EOPNOTSUPP;
+}
+#endif
+
+#ifdef CONFIG_INET
+struct sk_reuseport_kern {
+ struct sk_buff *skb;
+ struct sock *sk;
+ struct sock *selected_sk;
+ struct sock *migrating_sk;
+ void *data_end;
+ u32 hash;
+ u32 reuseport_id;
+ bool bind_inany;
+};
+bool bpf_tcp_sock_is_valid_access(int off, int size, enum bpf_access_type type,
+ struct bpf_insn_access_aux *info);
+
+u32 bpf_tcp_sock_convert_ctx_access(enum bpf_access_type type,
+ const struct bpf_insn *si,
+ struct bpf_insn *insn_buf,
+ struct bpf_prog *prog,
+ u32 *target_size);
+
+bool bpf_xdp_sock_is_valid_access(int off, int size, enum bpf_access_type type,
+ struct bpf_insn_access_aux *info);
+
+u32 bpf_xdp_sock_convert_ctx_access(enum bpf_access_type type,
+ const struct bpf_insn *si,
+ struct bpf_insn *insn_buf,
+ struct bpf_prog *prog,
+ u32 *target_size);
+#else
+static inline bool bpf_tcp_sock_is_valid_access(int off, int size,
+ enum bpf_access_type type,
+ struct bpf_insn_access_aux *info)
+{
+ return false;
+}
+
+static inline u32 bpf_tcp_sock_convert_ctx_access(enum bpf_access_type type,
+ const struct bpf_insn *si,
+ struct bpf_insn *insn_buf,
+ struct bpf_prog *prog,
+ u32 *target_size)
+{
+ return 0;
+}
+static inline bool bpf_xdp_sock_is_valid_access(int off, int size,
+ enum bpf_access_type type,
+ struct bpf_insn_access_aux *info)
+{
+ return false;
+}
+
+static inline u32 bpf_xdp_sock_convert_ctx_access(enum bpf_access_type type,
+ const struct bpf_insn *si,
+ struct bpf_insn *insn_buf,
+ struct bpf_prog *prog,
+ u32 *target_size)
+{
+ return 0;
+}
+#endif /* CONFIG_INET */
+
+enum bpf_text_poke_type {
+ BPF_MOD_CALL,
+ BPF_MOD_JUMP,
+};
+
+int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t,
+ void *addr1, void *addr2);
+
+void *bpf_arch_text_copy(void *dst, void *src, size_t len);
+int bpf_arch_text_invalidate(void *dst, size_t len);
+
+struct btf_id_set;
+bool btf_id_set_contains(const struct btf_id_set *set, u32 id);
+
+#define MAX_BPRINTF_VARARGS 12
+#define MAX_BPRINTF_BUF 1024
+
+struct bpf_bprintf_data {
+ u32 *bin_args;
+ char *buf;
+ bool get_bin_args;
+ bool get_buf;
+};
+
+int bpf_bprintf_prepare(char *fmt, u32 fmt_size, const u64 *raw_args,
+ u32 num_args, struct bpf_bprintf_data *data);
+void bpf_bprintf_cleanup(struct bpf_bprintf_data *data);
+
+#ifdef CONFIG_BPF_LSM
+void bpf_cgroup_atype_get(u32 attach_btf_id, int cgroup_atype);
+void bpf_cgroup_atype_put(int cgroup_atype);
+#else
+static inline void bpf_cgroup_atype_get(u32 attach_btf_id, int cgroup_atype) {}
+static inline void bpf_cgroup_atype_put(int cgroup_atype) {}
+#endif /* CONFIG_BPF_LSM */
+
+struct key;
+
+#ifdef CONFIG_KEYS
+struct bpf_key {
+ struct key *key;
+ bool has_ref;
+};
+#endif /* CONFIG_KEYS */
+
+static inline bool type_is_alloc(u32 type)
+{
+ return type & MEM_ALLOC;
+}
+
+static inline gfp_t bpf_memcg_flags(gfp_t flags)
+{
+ if (memcg_bpf_enabled())
+ return flags | __GFP_ACCOUNT;
+ return flags;
+}
#endif /* _LINUX_BPF_H */
diff --git a/include/linux/bpf_lirc.h b/include/linux/bpf_lirc.h
new file mode 100644
index 000000000000..9d9ff755ec29
--- /dev/null
+++ b/include/linux/bpf_lirc.h
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _BPF_LIRC_H
+#define _BPF_LIRC_H
+
+#include <uapi/linux/bpf.h>
+
+#ifdef CONFIG_BPF_LIRC_MODE2
+int lirc_prog_attach(const union bpf_attr *attr, struct bpf_prog *prog);
+int lirc_prog_detach(const union bpf_attr *attr);
+int lirc_prog_query(const union bpf_attr *attr, union bpf_attr __user *uattr);
+#else
+static inline int lirc_prog_attach(const union bpf_attr *attr,
+ struct bpf_prog *prog)
+{
+ return -EINVAL;
+}
+
+static inline int lirc_prog_detach(const union bpf_attr *attr)
+{
+ return -EINVAL;
+}
+
+static inline int lirc_prog_query(const union bpf_attr *attr,
+ union bpf_attr __user *uattr)
+{
+ return -EINVAL;
+}
+#endif
+
+#endif /* _BPF_LIRC_H */
diff --git a/include/linux/bpf_local_storage.h b/include/linux/bpf_local_storage.h
new file mode 100644
index 000000000000..173ec7f43ed1
--- /dev/null
+++ b/include/linux/bpf_local_storage.h
@@ -0,0 +1,176 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2019 Facebook
+ * Copyright 2020 Google LLC.
+ */
+
+#ifndef _BPF_LOCAL_STORAGE_H
+#define _BPF_LOCAL_STORAGE_H
+
+#include <linux/bpf.h>
+#include <linux/filter.h>
+#include <linux/rculist.h>
+#include <linux/list.h>
+#include <linux/hash.h>
+#include <linux/types.h>
+#include <linux/bpf_mem_alloc.h>
+#include <uapi/linux/btf.h>
+
+#define BPF_LOCAL_STORAGE_CACHE_SIZE 16
+
+#define bpf_rcu_lock_held() \
+ (rcu_read_lock_held() || rcu_read_lock_trace_held() || \
+ rcu_read_lock_bh_held())
+struct bpf_local_storage_map_bucket {
+ struct hlist_head list;
+ raw_spinlock_t lock;
+};
+
+/* Thp map is not the primary owner of a bpf_local_storage_elem.
+ * Instead, the container object (eg. sk->sk_bpf_storage) is.
+ *
+ * The map (bpf_local_storage_map) is for two purposes
+ * 1. Define the size of the "local storage". It is
+ * the map's value_size.
+ *
+ * 2. Maintain a list to keep track of all elems such
+ * that they can be cleaned up during the map destruction.
+ *
+ * When a bpf local storage is being looked up for a
+ * particular object, the "bpf_map" pointer is actually used
+ * as the "key" to search in the list of elem in
+ * the respective bpf_local_storage owned by the object.
+ *
+ * e.g. sk->sk_bpf_storage is the mini-map with the "bpf_map" pointer
+ * as the searching key.
+ */
+struct bpf_local_storage_map {
+ struct bpf_map map;
+ /* Lookup elem does not require accessing the map.
+ *
+ * Updating/Deleting requires a bucket lock to
+ * link/unlink the elem from the map. Having
+ * multiple buckets to improve contention.
+ */
+ struct bpf_local_storage_map_bucket *buckets;
+ u32 bucket_log;
+ u16 elem_size;
+ u16 cache_idx;
+ struct bpf_mem_alloc selem_ma;
+ struct bpf_mem_alloc storage_ma;
+ bool bpf_ma;
+};
+
+struct bpf_local_storage_data {
+ /* smap is used as the searching key when looking up
+ * from the object's bpf_local_storage.
+ *
+ * Put it in the same cacheline as the data to minimize
+ * the number of cachelines accessed during the cache hit case.
+ */
+ struct bpf_local_storage_map __rcu *smap;
+ u8 data[] __aligned(8);
+};
+
+/* Linked to bpf_local_storage and bpf_local_storage_map */
+struct bpf_local_storage_elem {
+ struct hlist_node map_node; /* Linked to bpf_local_storage_map */
+ struct hlist_node snode; /* Linked to bpf_local_storage */
+ struct bpf_local_storage __rcu *local_storage;
+ struct rcu_head rcu;
+ /* 8 bytes hole */
+ /* The data is stored in another cacheline to minimize
+ * the number of cachelines access during a cache hit.
+ */
+ struct bpf_local_storage_data sdata ____cacheline_aligned;
+};
+
+struct bpf_local_storage {
+ struct bpf_local_storage_data __rcu *cache[BPF_LOCAL_STORAGE_CACHE_SIZE];
+ struct bpf_local_storage_map __rcu *smap;
+ struct hlist_head list; /* List of bpf_local_storage_elem */
+ void *owner; /* The object that owns the above "list" of
+ * bpf_local_storage_elem.
+ */
+ struct rcu_head rcu;
+ raw_spinlock_t lock; /* Protect adding/removing from the "list" */
+};
+
+/* U16_MAX is much more than enough for sk local storage
+ * considering a tcp_sock is ~2k.
+ */
+#define BPF_LOCAL_STORAGE_MAX_VALUE_SIZE \
+ min_t(u32, \
+ (KMALLOC_MAX_SIZE - MAX_BPF_STACK - \
+ sizeof(struct bpf_local_storage_elem)), \
+ (U16_MAX - sizeof(struct bpf_local_storage_elem)))
+
+#define SELEM(_SDATA) \
+ container_of((_SDATA), struct bpf_local_storage_elem, sdata)
+#define SDATA(_SELEM) (&(_SELEM)->sdata)
+
+#define BPF_LOCAL_STORAGE_CACHE_SIZE 16
+
+struct bpf_local_storage_cache {
+ spinlock_t idx_lock;
+ u64 idx_usage_counts[BPF_LOCAL_STORAGE_CACHE_SIZE];
+};
+
+#define DEFINE_BPF_STORAGE_CACHE(name) \
+static struct bpf_local_storage_cache name = { \
+ .idx_lock = __SPIN_LOCK_UNLOCKED(name.idx_lock), \
+}
+
+/* Helper functions for bpf_local_storage */
+int bpf_local_storage_map_alloc_check(union bpf_attr *attr);
+
+struct bpf_map *
+bpf_local_storage_map_alloc(union bpf_attr *attr,
+ struct bpf_local_storage_cache *cache,
+ bool bpf_ma);
+
+struct bpf_local_storage_data *
+bpf_local_storage_lookup(struct bpf_local_storage *local_storage,
+ struct bpf_local_storage_map *smap,
+ bool cacheit_lockit);
+
+void bpf_local_storage_destroy(struct bpf_local_storage *local_storage);
+
+void bpf_local_storage_map_free(struct bpf_map *map,
+ struct bpf_local_storage_cache *cache,
+ int __percpu *busy_counter);
+
+int bpf_local_storage_map_check_btf(const struct bpf_map *map,
+ const struct btf *btf,
+ const struct btf_type *key_type,
+ const struct btf_type *value_type);
+
+void bpf_selem_link_storage_nolock(struct bpf_local_storage *local_storage,
+ struct bpf_local_storage_elem *selem);
+
+void bpf_selem_unlink(struct bpf_local_storage_elem *selem, bool reuse_now);
+
+void bpf_selem_link_map(struct bpf_local_storage_map *smap,
+ struct bpf_local_storage_elem *selem);
+
+struct bpf_local_storage_elem *
+bpf_selem_alloc(struct bpf_local_storage_map *smap, void *owner, void *value,
+ bool charge_mem, gfp_t gfp_flags);
+
+void bpf_selem_free(struct bpf_local_storage_elem *selem,
+ struct bpf_local_storage_map *smap,
+ bool reuse_now);
+
+int
+bpf_local_storage_alloc(void *owner,
+ struct bpf_local_storage_map *smap,
+ struct bpf_local_storage_elem *first_selem,
+ gfp_t gfp_flags);
+
+struct bpf_local_storage_data *
+bpf_local_storage_update(void *owner, struct bpf_local_storage_map *smap,
+ void *value, u64 map_flags, gfp_t gfp_flags);
+
+u64 bpf_local_storage_map_mem_usage(const struct bpf_map *map);
+
+#endif /* _BPF_LOCAL_STORAGE_H */
diff --git a/include/linux/bpf_lsm.h b/include/linux/bpf_lsm.h
new file mode 100644
index 000000000000..1de7ece5d36d
--- /dev/null
+++ b/include/linux/bpf_lsm.h
@@ -0,0 +1,83 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/*
+ * Copyright (C) 2020 Google LLC.
+ */
+
+#ifndef _LINUX_BPF_LSM_H
+#define _LINUX_BPF_LSM_H
+
+#include <linux/sched.h>
+#include <linux/bpf.h>
+#include <linux/lsm_hooks.h>
+
+#ifdef CONFIG_BPF_LSM
+
+#define LSM_HOOK(RET, DEFAULT, NAME, ...) \
+ RET bpf_lsm_##NAME(__VA_ARGS__);
+#include <linux/lsm_hook_defs.h>
+#undef LSM_HOOK
+
+struct bpf_storage_blob {
+ struct bpf_local_storage __rcu *storage;
+};
+
+extern struct lsm_blob_sizes bpf_lsm_blob_sizes;
+
+int bpf_lsm_verify_prog(struct bpf_verifier_log *vlog,
+ const struct bpf_prog *prog);
+
+bool bpf_lsm_is_sleepable_hook(u32 btf_id);
+bool bpf_lsm_is_trusted(const struct bpf_prog *prog);
+
+static inline struct bpf_storage_blob *bpf_inode(
+ const struct inode *inode)
+{
+ if (unlikely(!inode->i_security))
+ return NULL;
+
+ return inode->i_security + bpf_lsm_blob_sizes.lbs_inode;
+}
+
+extern const struct bpf_func_proto bpf_inode_storage_get_proto;
+extern const struct bpf_func_proto bpf_inode_storage_delete_proto;
+void bpf_inode_storage_free(struct inode *inode);
+
+void bpf_lsm_find_cgroup_shim(const struct bpf_prog *prog, bpf_func_t *bpf_func);
+
+#else /* !CONFIG_BPF_LSM */
+
+static inline bool bpf_lsm_is_sleepable_hook(u32 btf_id)
+{
+ return false;
+}
+
+static inline bool bpf_lsm_is_trusted(const struct bpf_prog *prog)
+{
+ return false;
+}
+
+static inline int bpf_lsm_verify_prog(struct bpf_verifier_log *vlog,
+ const struct bpf_prog *prog)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline struct bpf_storage_blob *bpf_inode(
+ const struct inode *inode)
+{
+ return NULL;
+}
+
+static inline void bpf_inode_storage_free(struct inode *inode)
+{
+}
+
+static inline void bpf_lsm_find_cgroup_shim(const struct bpf_prog *prog,
+ bpf_func_t *bpf_func)
+{
+}
+
+#endif /* CONFIG_BPF_LSM */
+
+#endif /* _LINUX_BPF_LSM_H */
diff --git a/include/linux/bpf_mem_alloc.h b/include/linux/bpf_mem_alloc.h
new file mode 100644
index 000000000000..3929be5743f4
--- /dev/null
+++ b/include/linux/bpf_mem_alloc.h
@@ -0,0 +1,37 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */
+#ifndef _BPF_MEM_ALLOC_H
+#define _BPF_MEM_ALLOC_H
+#include <linux/compiler_types.h>
+#include <linux/workqueue.h>
+
+struct bpf_mem_cache;
+struct bpf_mem_caches;
+
+struct bpf_mem_alloc {
+ struct bpf_mem_caches __percpu *caches;
+ struct bpf_mem_cache __percpu *cache;
+ struct work_struct work;
+};
+
+/* 'size != 0' is for bpf_mem_alloc which manages fixed-size objects.
+ * Alloc and free are done with bpf_mem_cache_{alloc,free}().
+ *
+ * 'size = 0' is for bpf_mem_alloc which manages many fixed-size objects.
+ * Alloc and free are done with bpf_mem_{alloc,free}() and the size of
+ * the returned object is given by the size argument of bpf_mem_alloc().
+ */
+int bpf_mem_alloc_init(struct bpf_mem_alloc *ma, int size, bool percpu);
+void bpf_mem_alloc_destroy(struct bpf_mem_alloc *ma);
+
+/* kmalloc/kfree equivalent: */
+void *bpf_mem_alloc(struct bpf_mem_alloc *ma, size_t size);
+void bpf_mem_free(struct bpf_mem_alloc *ma, void *ptr);
+
+/* kmem_cache_alloc/free equivalent: */
+void *bpf_mem_cache_alloc(struct bpf_mem_alloc *ma);
+void bpf_mem_cache_free(struct bpf_mem_alloc *ma, void *ptr);
+void bpf_mem_cache_raw_free(void *ptr);
+void *bpf_mem_cache_alloc_flags(struct bpf_mem_alloc *ma, gfp_t flags);
+
+#endif /* _BPF_MEM_ALLOC_H */
diff --git a/include/linux/bpf_trace.h b/include/linux/bpf_trace.h
index b22efbdd2eb4..ddf896abcfb6 100644
--- a/include/linux/bpf_trace.h
+++ b/include/linux/bpf_trace.h
@@ -1,7 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __LINUX_BPF_TRACE_H__
#define __LINUX_BPF_TRACE_H__
-#include <trace/events/bpf.h>
#include <trace/events/xdp.h>
#endif /* __LINUX_BPF_TRACE_H__ */
diff --git a/include/linux/bpf_types.h b/include/linux/bpf_types.h
index 3d137c33d664..fc0d6f32c687 100644
--- a/include/linux/bpf_types.h
+++ b/include/linux/bpf_types.h
@@ -1,21 +1,87 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/* internal file - do not include directly */
#ifdef CONFIG_NET
-BPF_PROG_TYPE(BPF_PROG_TYPE_SOCKET_FILTER, sk_filter_prog_ops)
-BPF_PROG_TYPE(BPF_PROG_TYPE_SCHED_CLS, tc_cls_act_prog_ops)
-BPF_PROG_TYPE(BPF_PROG_TYPE_SCHED_ACT, tc_cls_act_prog_ops)
-BPF_PROG_TYPE(BPF_PROG_TYPE_XDP, xdp_prog_ops)
-BPF_PROG_TYPE(BPF_PROG_TYPE_CGROUP_SKB, cg_skb_prog_ops)
-BPF_PROG_TYPE(BPF_PROG_TYPE_CGROUP_SOCK, cg_sock_prog_ops)
-BPF_PROG_TYPE(BPF_PROG_TYPE_LWT_IN, lwt_inout_prog_ops)
-BPF_PROG_TYPE(BPF_PROG_TYPE_LWT_OUT, lwt_inout_prog_ops)
-BPF_PROG_TYPE(BPF_PROG_TYPE_LWT_XMIT, lwt_xmit_prog_ops)
-BPF_PROG_TYPE(BPF_PROG_TYPE_SOCK_OPS, sock_ops_prog_ops)
+BPF_PROG_TYPE(BPF_PROG_TYPE_SOCKET_FILTER, sk_filter,
+ struct __sk_buff, struct sk_buff)
+BPF_PROG_TYPE(BPF_PROG_TYPE_SCHED_CLS, tc_cls_act,
+ struct __sk_buff, struct sk_buff)
+BPF_PROG_TYPE(BPF_PROG_TYPE_SCHED_ACT, tc_cls_act,
+ struct __sk_buff, struct sk_buff)
+BPF_PROG_TYPE(BPF_PROG_TYPE_XDP, xdp,
+ struct xdp_md, struct xdp_buff)
+#ifdef CONFIG_CGROUP_BPF
+BPF_PROG_TYPE(BPF_PROG_TYPE_CGROUP_SKB, cg_skb,
+ struct __sk_buff, struct sk_buff)
+BPF_PROG_TYPE(BPF_PROG_TYPE_CGROUP_SOCK, cg_sock,
+ struct bpf_sock, struct sock)
+BPF_PROG_TYPE(BPF_PROG_TYPE_CGROUP_SOCK_ADDR, cg_sock_addr,
+ struct bpf_sock_addr, struct bpf_sock_addr_kern)
+#endif
+BPF_PROG_TYPE(BPF_PROG_TYPE_LWT_IN, lwt_in,
+ struct __sk_buff, struct sk_buff)
+BPF_PROG_TYPE(BPF_PROG_TYPE_LWT_OUT, lwt_out,
+ struct __sk_buff, struct sk_buff)
+BPF_PROG_TYPE(BPF_PROG_TYPE_LWT_XMIT, lwt_xmit,
+ struct __sk_buff, struct sk_buff)
+BPF_PROG_TYPE(BPF_PROG_TYPE_LWT_SEG6LOCAL, lwt_seg6local,
+ struct __sk_buff, struct sk_buff)
+BPF_PROG_TYPE(BPF_PROG_TYPE_SOCK_OPS, sock_ops,
+ struct bpf_sock_ops, struct bpf_sock_ops_kern)
+BPF_PROG_TYPE(BPF_PROG_TYPE_SK_SKB, sk_skb,
+ struct __sk_buff, struct sk_buff)
+BPF_PROG_TYPE(BPF_PROG_TYPE_SK_MSG, sk_msg,
+ struct sk_msg_md, struct sk_msg)
+BPF_PROG_TYPE(BPF_PROG_TYPE_FLOW_DISSECTOR, flow_dissector,
+ struct __sk_buff, struct bpf_flow_dissector)
#endif
#ifdef CONFIG_BPF_EVENTS
-BPF_PROG_TYPE(BPF_PROG_TYPE_KPROBE, kprobe_prog_ops)
-BPF_PROG_TYPE(BPF_PROG_TYPE_TRACEPOINT, tracepoint_prog_ops)
-BPF_PROG_TYPE(BPF_PROG_TYPE_PERF_EVENT, perf_event_prog_ops)
+BPF_PROG_TYPE(BPF_PROG_TYPE_KPROBE, kprobe,
+ bpf_user_pt_regs_t, struct pt_regs)
+BPF_PROG_TYPE(BPF_PROG_TYPE_TRACEPOINT, tracepoint,
+ __u64, u64)
+BPF_PROG_TYPE(BPF_PROG_TYPE_PERF_EVENT, perf_event,
+ struct bpf_perf_event_data, struct bpf_perf_event_data_kern)
+BPF_PROG_TYPE(BPF_PROG_TYPE_RAW_TRACEPOINT, raw_tracepoint,
+ struct bpf_raw_tracepoint_args, u64)
+BPF_PROG_TYPE(BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE, raw_tracepoint_writable,
+ struct bpf_raw_tracepoint_args, u64)
+BPF_PROG_TYPE(BPF_PROG_TYPE_TRACING, tracing,
+ void *, void *)
+#endif
+#ifdef CONFIG_CGROUP_BPF
+BPF_PROG_TYPE(BPF_PROG_TYPE_CGROUP_DEVICE, cg_dev,
+ struct bpf_cgroup_dev_ctx, struct bpf_cgroup_dev_ctx)
+BPF_PROG_TYPE(BPF_PROG_TYPE_CGROUP_SYSCTL, cg_sysctl,
+ struct bpf_sysctl, struct bpf_sysctl_kern)
+BPF_PROG_TYPE(BPF_PROG_TYPE_CGROUP_SOCKOPT, cg_sockopt,
+ struct bpf_sockopt, struct bpf_sockopt_kern)
+#endif
+#ifdef CONFIG_BPF_LIRC_MODE2
+BPF_PROG_TYPE(BPF_PROG_TYPE_LIRC_MODE2, lirc_mode2,
+ __u32, u32)
+#endif
+#ifdef CONFIG_INET
+BPF_PROG_TYPE(BPF_PROG_TYPE_SK_REUSEPORT, sk_reuseport,
+ struct sk_reuseport_md, struct sk_reuseport_kern)
+BPF_PROG_TYPE(BPF_PROG_TYPE_SK_LOOKUP, sk_lookup,
+ struct bpf_sk_lookup, struct bpf_sk_lookup_kern)
+#endif
+#if defined(CONFIG_BPF_JIT)
+BPF_PROG_TYPE(BPF_PROG_TYPE_STRUCT_OPS, bpf_struct_ops,
+ void *, void *)
+BPF_PROG_TYPE(BPF_PROG_TYPE_EXT, bpf_extension,
+ void *, void *)
+#ifdef CONFIG_BPF_LSM
+BPF_PROG_TYPE(BPF_PROG_TYPE_LSM, lsm,
+ void *, void *)
+#endif /* CONFIG_BPF_LSM */
+#endif
+BPF_PROG_TYPE(BPF_PROG_TYPE_SYSCALL, bpf_syscall,
+ void *, void *)
+#ifdef CONFIG_NETFILTER_BPF_LINK
+BPF_PROG_TYPE(BPF_PROG_TYPE_NETFILTER, netfilter,
+ struct bpf_nf_ctx, struct bpf_nf_ctx)
#endif
BPF_MAP_TYPE(BPF_MAP_TYPE_ARRAY, array_map_ops)
@@ -24,6 +90,11 @@ BPF_MAP_TYPE(BPF_MAP_TYPE_PROG_ARRAY, prog_array_map_ops)
BPF_MAP_TYPE(BPF_MAP_TYPE_PERF_EVENT_ARRAY, perf_event_array_map_ops)
#ifdef CONFIG_CGROUPS
BPF_MAP_TYPE(BPF_MAP_TYPE_CGROUP_ARRAY, cgroup_array_map_ops)
+BPF_MAP_TYPE(BPF_MAP_TYPE_CGRP_STORAGE, cgrp_storage_map_ops)
+#endif
+#ifdef CONFIG_CGROUP_BPF
+BPF_MAP_TYPE(BPF_MAP_TYPE_CGROUP_STORAGE, cgroup_storage_map_ops)
+BPF_MAP_TYPE(BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE, cgroup_storage_map_ops)
#endif
BPF_MAP_TYPE(BPF_MAP_TYPE_HASH, htab_map_ops)
BPF_MAP_TYPE(BPF_MAP_TYPE_PERCPU_HASH, htab_percpu_map_ops)
@@ -31,7 +102,49 @@ BPF_MAP_TYPE(BPF_MAP_TYPE_LRU_HASH, htab_lru_map_ops)
BPF_MAP_TYPE(BPF_MAP_TYPE_LRU_PERCPU_HASH, htab_lru_percpu_map_ops)
BPF_MAP_TYPE(BPF_MAP_TYPE_LPM_TRIE, trie_map_ops)
#ifdef CONFIG_PERF_EVENTS
-BPF_MAP_TYPE(BPF_MAP_TYPE_STACK_TRACE, stack_map_ops)
+BPF_MAP_TYPE(BPF_MAP_TYPE_STACK_TRACE, stack_trace_map_ops)
#endif
BPF_MAP_TYPE(BPF_MAP_TYPE_ARRAY_OF_MAPS, array_of_maps_map_ops)
BPF_MAP_TYPE(BPF_MAP_TYPE_HASH_OF_MAPS, htab_of_maps_map_ops)
+#ifdef CONFIG_BPF_LSM
+BPF_MAP_TYPE(BPF_MAP_TYPE_INODE_STORAGE, inode_storage_map_ops)
+#endif
+BPF_MAP_TYPE(BPF_MAP_TYPE_TASK_STORAGE, task_storage_map_ops)
+#ifdef CONFIG_NET
+BPF_MAP_TYPE(BPF_MAP_TYPE_DEVMAP, dev_map_ops)
+BPF_MAP_TYPE(BPF_MAP_TYPE_DEVMAP_HASH, dev_map_hash_ops)
+BPF_MAP_TYPE(BPF_MAP_TYPE_SK_STORAGE, sk_storage_map_ops)
+BPF_MAP_TYPE(BPF_MAP_TYPE_CPUMAP, cpu_map_ops)
+#if defined(CONFIG_XDP_SOCKETS)
+BPF_MAP_TYPE(BPF_MAP_TYPE_XSKMAP, xsk_map_ops)
+#endif
+#ifdef CONFIG_INET
+BPF_MAP_TYPE(BPF_MAP_TYPE_SOCKMAP, sock_map_ops)
+BPF_MAP_TYPE(BPF_MAP_TYPE_SOCKHASH, sock_hash_ops)
+BPF_MAP_TYPE(BPF_MAP_TYPE_REUSEPORT_SOCKARRAY, reuseport_array_ops)
+#endif
+#endif
+BPF_MAP_TYPE(BPF_MAP_TYPE_QUEUE, queue_map_ops)
+BPF_MAP_TYPE(BPF_MAP_TYPE_STACK, stack_map_ops)
+#if defined(CONFIG_BPF_JIT)
+BPF_MAP_TYPE(BPF_MAP_TYPE_STRUCT_OPS, bpf_struct_ops_map_ops)
+#endif
+BPF_MAP_TYPE(BPF_MAP_TYPE_RINGBUF, ringbuf_map_ops)
+BPF_MAP_TYPE(BPF_MAP_TYPE_BLOOM_FILTER, bloom_filter_map_ops)
+BPF_MAP_TYPE(BPF_MAP_TYPE_USER_RINGBUF, user_ringbuf_map_ops)
+
+BPF_LINK_TYPE(BPF_LINK_TYPE_RAW_TRACEPOINT, raw_tracepoint)
+BPF_LINK_TYPE(BPF_LINK_TYPE_TRACING, tracing)
+#ifdef CONFIG_CGROUP_BPF
+BPF_LINK_TYPE(BPF_LINK_TYPE_CGROUP, cgroup)
+#endif
+BPF_LINK_TYPE(BPF_LINK_TYPE_ITER, iter)
+#ifdef CONFIG_NET
+BPF_LINK_TYPE(BPF_LINK_TYPE_NETNS, netns)
+BPF_LINK_TYPE(BPF_LINK_TYPE_XDP, xdp)
+#endif
+#ifdef CONFIG_PERF_EVENTS
+BPF_LINK_TYPE(BPF_LINK_TYPE_PERF_EVENT, perf)
+#endif
+BPF_LINK_TYPE(BPF_LINK_TYPE_KPROBE_MULTI, kprobe_multi)
+BPF_LINK_TYPE(BPF_LINK_TYPE_STRUCT_OPS, struct_ops)
diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h
index 621076f56251..3dd29a53b711 100644
--- a/include/linux/bpf_verifier.h
+++ b/include/linux/bpf_verifier.h
@@ -1,112 +1,723 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of version 2 of the GNU General Public
- * License as published by the Free Software Foundation.
*/
#ifndef _LINUX_BPF_VERIFIER_H
#define _LINUX_BPF_VERIFIER_H 1
#include <linux/bpf.h> /* for enum bpf_reg_type */
+#include <linux/btf.h> /* for struct btf and btf_id() */
#include <linux/filter.h> /* for MAX_BPF_STACK */
+#include <linux/tnum.h>
- /* Just some arbitrary values so we can safely do math without overflowing and
- * are obviously wrong for any sort of memory access.
- */
-#define BPF_REGISTER_MAX_RANGE (1024 * 1024 * 1024)
-#define BPF_REGISTER_MIN_RANGE -1
+/* Maximum variable offset umax_value permitted when resolving memory accesses.
+ * In practice this is far bigger than any realistic pointer offset; this limit
+ * ensures that umax_value + (int)off + (int)size cannot overflow a u64.
+ */
+#define BPF_MAX_VAR_OFF (1 << 29)
+/* Maximum variable size permitted for ARG_CONST_SIZE[_OR_ZERO]. This ensures
+ * that converting umax_value to int cannot overflow.
+ */
+#define BPF_MAX_VAR_SIZ (1 << 29)
+/* size of type_str_buf in bpf_verifier. */
+#define TYPE_STR_BUF_LEN 128
+
+/* Liveness marks, used for registers and spilled-regs (in stack slots).
+ * Read marks propagate upwards until they find a write mark; they record that
+ * "one of this state's descendants read this reg" (and therefore the reg is
+ * relevant for states_equal() checks).
+ * Write marks collect downwards and do not propagate; they record that "the
+ * straight-line code that reached this state (from its parent) wrote this reg"
+ * (and therefore that reads propagated from this state or its descendants
+ * should not propagate to its parent).
+ * A state with a write mark can receive read marks; it just won't propagate
+ * them to its parent, since the write mark is a property, not of the state,
+ * but of the link between it and its parent. See mark_reg_read() and
+ * mark_stack_slot_read() in kernel/bpf/verifier.c.
+ */
+enum bpf_reg_liveness {
+ REG_LIVE_NONE = 0, /* reg hasn't been read or written this branch */
+ REG_LIVE_READ32 = 0x1, /* reg was read, so we're sensitive to initial value */
+ REG_LIVE_READ64 = 0x2, /* likewise, but full 64-bit content matters */
+ REG_LIVE_READ = REG_LIVE_READ32 | REG_LIVE_READ64,
+ REG_LIVE_WRITTEN = 0x4, /* reg was written first, screening off later reads */
+ REG_LIVE_DONE = 0x8, /* liveness won't be updating this register anymore */
+};
+
+/* For every reg representing a map value or allocated object pointer,
+ * we consider the tuple of (ptr, id) for them to be unique in verifier
+ * context and conside them to not alias each other for the purposes of
+ * tracking lock state.
+ */
+struct bpf_active_lock {
+ /* This can either be reg->map_ptr or reg->btf. If ptr is NULL,
+ * there's no active lock held, and other fields have no
+ * meaning. If non-NULL, it indicates that a lock is held and
+ * id member has the reg->id of the register which can be >= 0.
+ */
+ void *ptr;
+ /* This will be reg->id */
+ u32 id;
+};
+
+#define ITER_PREFIX "bpf_iter_"
+
+enum bpf_iter_state {
+ BPF_ITER_STATE_INVALID, /* for non-first slot */
+ BPF_ITER_STATE_ACTIVE,
+ BPF_ITER_STATE_DRAINED,
+};
struct bpf_reg_state {
+ /* Ordering of fields matters. See states_equal() */
enum bpf_reg_type type;
+ /* Fixed part of pointer offset, pointer types only */
+ s32 off;
union {
- /* valid when type == CONST_IMM | PTR_TO_STACK | UNKNOWN_VALUE */
- s64 imm;
-
- /* valid when type == PTR_TO_PACKET* */
- struct {
- u16 off;
- u16 range;
- };
+ /* valid when type == PTR_TO_PACKET */
+ int range;
/* valid when type == CONST_PTR_TO_MAP | PTR_TO_MAP_VALUE |
* PTR_TO_MAP_VALUE_OR_NULL
*/
- struct bpf_map *map_ptr;
+ struct {
+ struct bpf_map *map_ptr;
+ /* To distinguish map lookups from outer map
+ * the map_uid is non-zero for registers
+ * pointing to inner maps.
+ */
+ u32 map_uid;
+ };
+
+ /* for PTR_TO_BTF_ID */
+ struct {
+ struct btf *btf;
+ u32 btf_id;
+ };
+
+ struct { /* for PTR_TO_MEM | PTR_TO_MEM_OR_NULL */
+ u32 mem_size;
+ u32 dynptr_id; /* for dynptr slices */
+ };
+
+ /* For dynptr stack slots */
+ struct {
+ enum bpf_dynptr_type type;
+ /* A dynptr is 16 bytes so it takes up 2 stack slots.
+ * We need to track which slot is the first slot
+ * to protect against cases where the user may try to
+ * pass in an address starting at the second slot of the
+ * dynptr.
+ */
+ bool first_slot;
+ } dynptr;
+
+ /* For bpf_iter stack slots */
+ struct {
+ /* BTF container and BTF type ID describing
+ * struct bpf_iter_<type> of an iterator state
+ */
+ struct btf *btf;
+ u32 btf_id;
+ /* packing following two fields to fit iter state into 16 bytes */
+ enum bpf_iter_state state:2;
+ int depth:30;
+ } iter;
+
+ /* Max size from any of the above. */
+ struct {
+ unsigned long raw1;
+ unsigned long raw2;
+ } raw;
+
+ u32 subprogno; /* for PTR_TO_FUNC */
};
- u32 id;
+ /* For scalar types (SCALAR_VALUE), this represents our knowledge of
+ * the actual value.
+ * For pointer types, this represents the variable part of the offset
+ * from the pointed-to object, and is shared with all bpf_reg_states
+ * with the same id as us.
+ */
+ struct tnum var_off;
/* Used to determine if any memory access using this register will
- * result in a bad access. These two fields must be last.
- * See states_equal()
+ * result in a bad access.
+ * These refer to the same value as var_off, not necessarily the actual
+ * contents of the register.
+ */
+ s64 smin_value; /* minimum possible (s64)value */
+ s64 smax_value; /* maximum possible (s64)value */
+ u64 umin_value; /* minimum possible (u64)value */
+ u64 umax_value; /* maximum possible (u64)value */
+ s32 s32_min_value; /* minimum possible (s32)value */
+ s32 s32_max_value; /* maximum possible (s32)value */
+ u32 u32_min_value; /* minimum possible (u32)value */
+ u32 u32_max_value; /* maximum possible (u32)value */
+ /* For PTR_TO_PACKET, used to find other pointers with the same variable
+ * offset, so they can share range knowledge.
+ * For PTR_TO_MAP_VALUE_OR_NULL this is used to share which map value we
+ * came from, when one is tested for != NULL.
+ * For PTR_TO_MEM_OR_NULL this is used to identify memory allocation
+ * for the purpose of tracking that it's freed.
+ * For PTR_TO_SOCKET this is used to share which pointers retain the
+ * same reference to the socket, to determine proper reference freeing.
+ * For stack slots that are dynptrs, this is used to track references to
+ * the dynptr to determine proper reference freeing.
+ * Similarly to dynptrs, we use ID to track "belonging" of a reference
+ * to a specific instance of bpf_iter.
*/
- s64 min_value;
- u64 max_value;
- u32 min_align;
- u32 aux_off;
- u32 aux_off_align;
+ u32 id;
+ /* PTR_TO_SOCKET and PTR_TO_TCP_SOCK could be a ptr returned
+ * from a pointer-cast helper, bpf_sk_fullsock() and
+ * bpf_tcp_sock().
+ *
+ * Consider the following where "sk" is a reference counted
+ * pointer returned from "sk = bpf_sk_lookup_tcp();":
+ *
+ * 1: sk = bpf_sk_lookup_tcp();
+ * 2: if (!sk) { return 0; }
+ * 3: fullsock = bpf_sk_fullsock(sk);
+ * 4: if (!fullsock) { bpf_sk_release(sk); return 0; }
+ * 5: tp = bpf_tcp_sock(fullsock);
+ * 6: if (!tp) { bpf_sk_release(sk); return 0; }
+ * 7: bpf_sk_release(sk);
+ * 8: snd_cwnd = tp->snd_cwnd; // verifier will complain
+ *
+ * After bpf_sk_release(sk) at line 7, both "fullsock" ptr and
+ * "tp" ptr should be invalidated also. In order to do that,
+ * the reg holding "fullsock" and "sk" need to remember
+ * the original refcounted ptr id (i.e. sk_reg->id) in ref_obj_id
+ * such that the verifier can reset all regs which have
+ * ref_obj_id matching the sk_reg->id.
+ *
+ * sk_reg->ref_obj_id is set to sk_reg->id at line 1.
+ * sk_reg->id will stay as NULL-marking purpose only.
+ * After NULL-marking is done, sk_reg->id can be reset to 0.
+ *
+ * After "fullsock = bpf_sk_fullsock(sk);" at line 3,
+ * fullsock_reg->ref_obj_id is set to sk_reg->ref_obj_id.
+ *
+ * After "tp = bpf_tcp_sock(fullsock);" at line 5,
+ * tp_reg->ref_obj_id is set to fullsock_reg->ref_obj_id
+ * which is the same as sk_reg->ref_obj_id.
+ *
+ * From the verifier perspective, if sk, fullsock and tp
+ * are not NULL, they are the same ptr with different
+ * reg->type. In particular, bpf_sk_release(tp) is also
+ * allowed and has the same effect as bpf_sk_release(sk).
+ */
+ u32 ref_obj_id;
+ /* parentage chain for liveness checking */
+ struct bpf_reg_state *parent;
+ /* Inside the callee two registers can be both PTR_TO_STACK like
+ * R1=fp-8 and R2=fp-8, but one of them points to this function stack
+ * while another to the caller's stack. To differentiate them 'frameno'
+ * is used which is an index in bpf_verifier_state->frame[] array
+ * pointing to bpf_func_state.
+ */
+ u32 frameno;
+ /* Tracks subreg definition. The stored value is the insn_idx of the
+ * writing insn. This is safe because subreg_def is used before any insn
+ * patching which only happens after main verification finished.
+ */
+ s32 subreg_def;
+ enum bpf_reg_liveness live;
+ /* if (!precise && SCALAR_VALUE) min/max/tnum don't affect safety */
+ bool precise;
};
enum bpf_stack_slot_type {
STACK_INVALID, /* nothing was stored in this stack slot */
STACK_SPILL, /* register spilled into stack */
- STACK_MISC /* BPF program wrote some data into this slot */
+ STACK_MISC, /* BPF program wrote some data into this slot */
+ STACK_ZERO, /* BPF program wrote constant zero */
+ /* A dynptr is stored in this stack slot. The type of dynptr
+ * is stored in bpf_stack_state->spilled_ptr.dynptr.type
+ */
+ STACK_DYNPTR,
+ STACK_ITER,
};
#define BPF_REG_SIZE 8 /* size of eBPF register in bytes */
+#define BPF_DYNPTR_SIZE sizeof(struct bpf_dynptr_kern)
+#define BPF_DYNPTR_NR_SLOTS (BPF_DYNPTR_SIZE / BPF_REG_SIZE)
+
+struct bpf_stack_state {
+ struct bpf_reg_state spilled_ptr;
+ u8 slot_type[BPF_REG_SIZE];
+};
+
+struct bpf_reference_state {
+ /* Track each reference created with a unique id, even if the same
+ * instruction creates the reference multiple times (eg, via CALL).
+ */
+ int id;
+ /* Instruction where the allocation of this reference occurred. This
+ * is used purely to inform the user of a reference leak.
+ */
+ int insn_idx;
+ /* There can be a case like:
+ * main (frame 0)
+ * cb (frame 1)
+ * func (frame 3)
+ * cb (frame 4)
+ * Hence for frame 4, if callback_ref just stored boolean, it would be
+ * impossible to distinguish nested callback refs. Hence store the
+ * frameno and compare that to callback_ref in check_reference_leak when
+ * exiting a callback function.
+ */
+ int callback_ref;
+};
+
/* state of the program:
* type of all registers and stack info
*/
-struct bpf_verifier_state {
+struct bpf_func_state {
struct bpf_reg_state regs[MAX_BPF_REG];
- u8 stack_slot_type[MAX_BPF_STACK];
- struct bpf_reg_state spilled_regs[MAX_BPF_STACK / BPF_REG_SIZE];
+ /* index of call instruction that called into this func */
+ int callsite;
+ /* stack frame number of this function state from pov of
+ * enclosing bpf_verifier_state.
+ * 0 = main function, 1 = first callee.
+ */
+ u32 frameno;
+ /* subprog number == index within subprog_info
+ * zero == main subprog
+ */
+ u32 subprogno;
+ /* Every bpf_timer_start will increment async_entry_cnt.
+ * It's used to distinguish:
+ * void foo(void) { for(;;); }
+ * void foo(void) { bpf_timer_set_callback(,foo); }
+ */
+ u32 async_entry_cnt;
+ bool in_callback_fn;
+ struct tnum callback_ret_range;
+ bool in_async_callback_fn;
+
+ /* The following fields should be last. See copy_func_state() */
+ int acquired_refs;
+ struct bpf_reference_state *refs;
+ int allocated_stack;
+ struct bpf_stack_state *stack;
};
+struct bpf_idx_pair {
+ u32 prev_idx;
+ u32 idx;
+};
+
+struct bpf_id_pair {
+ u32 old;
+ u32 cur;
+};
+
+#define MAX_CALL_FRAMES 8
+/* Maximum number of register states that can exist at once */
+#define BPF_ID_MAP_SIZE ((MAX_BPF_REG + MAX_BPF_STACK / BPF_REG_SIZE) * MAX_CALL_FRAMES)
+struct bpf_verifier_state {
+ /* call stack tracking */
+ struct bpf_func_state *frame[MAX_CALL_FRAMES];
+ struct bpf_verifier_state *parent;
+ /*
+ * 'branches' field is the number of branches left to explore:
+ * 0 - all possible paths from this state reached bpf_exit or
+ * were safely pruned
+ * 1 - at least one path is being explored.
+ * This state hasn't reached bpf_exit
+ * 2 - at least two paths are being explored.
+ * This state is an immediate parent of two children.
+ * One is fallthrough branch with branches==1 and another
+ * state is pushed into stack (to be explored later) also with
+ * branches==1. The parent of this state has branches==1.
+ * The verifier state tree connected via 'parent' pointer looks like:
+ * 1
+ * 1
+ * 2 -> 1 (first 'if' pushed into stack)
+ * 1
+ * 2 -> 1 (second 'if' pushed into stack)
+ * 1
+ * 1
+ * 1 bpf_exit.
+ *
+ * Once do_check() reaches bpf_exit, it calls update_branch_counts()
+ * and the verifier state tree will look:
+ * 1
+ * 1
+ * 2 -> 1 (first 'if' pushed into stack)
+ * 1
+ * 1 -> 1 (second 'if' pushed into stack)
+ * 0
+ * 0
+ * 0 bpf_exit.
+ * After pop_stack() the do_check() will resume at second 'if'.
+ *
+ * If is_state_visited() sees a state with branches > 0 it means
+ * there is a loop. If such state is exactly equal to the current state
+ * it's an infinite loop. Note states_equal() checks for states
+ * equivalency, so two states being 'states_equal' does not mean
+ * infinite loop. The exact comparison is provided by
+ * states_maybe_looping() function. It's a stronger pre-check and
+ * much faster than states_equal().
+ *
+ * This algorithm may not find all possible infinite loops or
+ * loop iteration count may be too high.
+ * In such cases BPF_COMPLEXITY_LIMIT_INSNS limit kicks in.
+ */
+ u32 branches;
+ u32 insn_idx;
+ u32 curframe;
+
+ struct bpf_active_lock active_lock;
+ bool speculative;
+ bool active_rcu_lock;
+
+ /* first and last insn idx of this verifier state */
+ u32 first_insn_idx;
+ u32 last_insn_idx;
+ /* jmp history recorded from first to last.
+ * backtracking is using it to go from last to first.
+ * For most states jmp_history_cnt is [0-3].
+ * For loops can go up to ~40.
+ */
+ struct bpf_idx_pair *jmp_history;
+ u32 jmp_history_cnt;
+};
+
+#define bpf_get_spilled_reg(slot, frame) \
+ (((slot < frame->allocated_stack / BPF_REG_SIZE) && \
+ (frame->stack[slot].slot_type[0] == STACK_SPILL)) \
+ ? &frame->stack[slot].spilled_ptr : NULL)
+
+/* Iterate over 'frame', setting 'reg' to either NULL or a spilled register. */
+#define bpf_for_each_spilled_reg(iter, frame, reg) \
+ for (iter = 0, reg = bpf_get_spilled_reg(iter, frame); \
+ iter < frame->allocated_stack / BPF_REG_SIZE; \
+ iter++, reg = bpf_get_spilled_reg(iter, frame))
+
+/* Invoke __expr over regsiters in __vst, setting __state and __reg */
+#define bpf_for_each_reg_in_vstate(__vst, __state, __reg, __expr) \
+ ({ \
+ struct bpf_verifier_state *___vstate = __vst; \
+ int ___i, ___j; \
+ for (___i = 0; ___i <= ___vstate->curframe; ___i++) { \
+ struct bpf_reg_state *___regs; \
+ __state = ___vstate->frame[___i]; \
+ ___regs = __state->regs; \
+ for (___j = 0; ___j < MAX_BPF_REG; ___j++) { \
+ __reg = &___regs[___j]; \
+ (void)(__expr); \
+ } \
+ bpf_for_each_spilled_reg(___j, __state, __reg) { \
+ if (!__reg) \
+ continue; \
+ (void)(__expr); \
+ } \
+ } \
+ })
+
/* linked list of verifier states used to prune search */
struct bpf_verifier_state_list {
struct bpf_verifier_state state;
struct bpf_verifier_state_list *next;
+ int miss_cnt, hit_cnt;
+};
+
+struct bpf_loop_inline_state {
+ unsigned int initialized:1; /* set to true upon first entry */
+ unsigned int fit_for_inline:1; /* true if callback function is the same
+ * at each call and flags are always zero
+ */
+ u32 callback_subprogno; /* valid when fit_for_inline is true */
};
+/* Possible states for alu_state member. */
+#define BPF_ALU_SANITIZE_SRC (1U << 0)
+#define BPF_ALU_SANITIZE_DST (1U << 1)
+#define BPF_ALU_NEG_VALUE (1U << 2)
+#define BPF_ALU_NON_POINTER (1U << 3)
+#define BPF_ALU_IMMEDIATE (1U << 4)
+#define BPF_ALU_SANITIZE (BPF_ALU_SANITIZE_SRC | \
+ BPF_ALU_SANITIZE_DST)
+
struct bpf_insn_aux_data {
union {
enum bpf_reg_type ptr_type; /* pointer type for load/store insns */
- struct bpf_map *map_ptr; /* pointer for call insn into lookup_elem */
+ unsigned long map_ptr_state; /* pointer/poison value for maps */
+ s32 call_imm; /* saved imm field of call insn */
+ u32 alu_limit; /* limit for add/sub register with pointer */
+ struct {
+ u32 map_index; /* index into used_maps[] */
+ u32 map_off; /* offset from value base address */
+ };
+ struct {
+ enum bpf_reg_type reg_type; /* type of pseudo_btf_id */
+ union {
+ struct {
+ struct btf *btf;
+ u32 btf_id; /* btf_id for struct typed var */
+ };
+ u32 mem_size; /* mem_size for non-struct typed var */
+ };
+ } btf_var;
+ /* if instruction is a call to bpf_loop this field tracks
+ * the state of the relevant registers to make decision about inlining
+ */
+ struct bpf_loop_inline_state loop_inline_state;
+ };
+ union {
+ /* remember the size of type passed to bpf_obj_new to rewrite R1 */
+ u64 obj_new_size;
+ /* remember the offset of node field within type to rewrite */
+ u64 insert_off;
};
+ struct btf_struct_meta *kptr_struct_meta;
+ u64 map_key_state; /* constant (32 bit) key tracking for maps */
int ctx_field_size; /* the ctx field size for load insn, maybe 0 */
- int converted_op_size; /* the valid value width after perceived conversion */
+ u32 seen; /* this insn was processed by the verifier at env->pass_cnt */
+ bool sanitize_stack_spill; /* subject to Spectre v4 sanitation */
+ bool zext_dst; /* this insn zero extends dst reg */
+ bool storage_get_func_atomic; /* bpf_*_storage_get() with atomic memory alloc */
+ bool is_iter_next; /* bpf_iter_<type>_next() kfunc call */
+ u8 alu_state; /* used in combination with alu_limit */
+
+ /* below fields are initialized once */
+ unsigned int orig_idx; /* original instruction index */
+ bool jmp_point;
+ bool prune_point;
+ /* ensure we check state equivalence and save state checkpoint and
+ * this instruction, regardless of any heuristics
+ */
+ bool force_checkpoint;
};
#define MAX_USED_MAPS 64 /* max number of maps accessed by one eBPF program */
+#define MAX_USED_BTFS 64 /* max number of BTFs accessed by one BPF program */
+
+#define BPF_VERIFIER_TMP_LOG_SIZE 1024
-struct bpf_verifier_env;
-struct bpf_ext_analyzer_ops {
- int (*insn_hook)(struct bpf_verifier_env *env,
- int insn_idx, int prev_insn_idx);
+struct bpf_verifier_log {
+ /* Logical start and end positions of a "log window" of the verifier log.
+ * start_pos == 0 means we haven't truncated anything.
+ * Once truncation starts to happen, start_pos + len_total == end_pos,
+ * except during log reset situations, in which (end_pos - start_pos)
+ * might get smaller than len_total (see bpf_vlog_reset()).
+ * Generally, (end_pos - start_pos) gives number of useful data in
+ * user log buffer.
+ */
+ u64 start_pos;
+ u64 end_pos;
+ char __user *ubuf;
+ u32 level;
+ u32 len_total;
+ u32 len_max;
+ char kbuf[BPF_VERIFIER_TMP_LOG_SIZE];
+};
+
+#define BPF_LOG_LEVEL1 1
+#define BPF_LOG_LEVEL2 2
+#define BPF_LOG_STATS 4
+#define BPF_LOG_FIXED 8
+#define BPF_LOG_LEVEL (BPF_LOG_LEVEL1 | BPF_LOG_LEVEL2)
+#define BPF_LOG_MASK (BPF_LOG_LEVEL | BPF_LOG_STATS | BPF_LOG_FIXED)
+#define BPF_LOG_KERNEL (BPF_LOG_MASK + 1) /* kernel internal flag */
+#define BPF_LOG_MIN_ALIGNMENT 8U
+#define BPF_LOG_ALIGNMENT 40U
+
+static inline bool bpf_verifier_log_needed(const struct bpf_verifier_log *log)
+{
+ return log && log->level;
+}
+
+#define BPF_MAX_SUBPROGS 256
+
+struct bpf_subprog_info {
+ /* 'start' has to be the first field otherwise find_subprog() won't work */
+ u32 start; /* insn idx of function entry point */
+ u32 linfo_idx; /* The idx to the main_prog->aux->linfo */
+ u16 stack_depth; /* max. stack depth used by this function */
+ bool has_tail_call;
+ bool tail_call_reachable;
+ bool has_ld_abs;
+ bool is_async_cb;
};
/* single container for all structs
* one verifier_env per bpf_check() call
*/
struct bpf_verifier_env {
+ u32 insn_idx;
+ u32 prev_insn_idx;
struct bpf_prog *prog; /* eBPF program being verified */
+ const struct bpf_verifier_ops *ops;
struct bpf_verifier_stack_elem *head; /* stack of verifier states to be processed */
int stack_size; /* number of states to be processed */
bool strict_alignment; /* perform strict pointer alignment checks */
- struct bpf_verifier_state cur_state; /* current verifier state */
+ bool test_state_freq; /* test verifier with different pruning frequency */
+ struct bpf_verifier_state *cur_state; /* current verifier state */
struct bpf_verifier_state_list **explored_states; /* search pruning optimization */
- const struct bpf_ext_analyzer_ops *analyzer_ops; /* external analyzer ops */
- void *analyzer_priv; /* pointer to external analyzer's private data */
+ struct bpf_verifier_state_list *free_list;
struct bpf_map *used_maps[MAX_USED_MAPS]; /* array of map's used by eBPF program */
+ struct btf_mod_pair used_btfs[MAX_USED_BTFS]; /* array of BTF's used by BPF program */
u32 used_map_cnt; /* number of used maps */
+ u32 used_btf_cnt; /* number of used BTF objects */
u32 id_gen; /* used to generate unique reg IDs */
+ bool explore_alu_limits;
bool allow_ptr_leaks;
+ bool allow_uninit_stack;
+ bool bpf_capable;
+ bool bypass_spec_v1;
+ bool bypass_spec_v4;
bool seen_direct_write;
- bool varlen_map_value_access;
struct bpf_insn_aux_data *insn_aux_data; /* array of per-insn state */
+ const struct bpf_line_info *prev_linfo;
+ struct bpf_verifier_log log;
+ struct bpf_subprog_info subprog_info[BPF_MAX_SUBPROGS + 1];
+ struct bpf_id_pair idmap_scratch[BPF_ID_MAP_SIZE];
+ struct {
+ int *insn_state;
+ int *insn_stack;
+ int cur_stack;
+ } cfg;
+ u32 pass_cnt; /* number of times do_check() was called */
+ u32 subprog_cnt;
+ /* number of instructions analyzed by the verifier */
+ u32 prev_insn_processed, insn_processed;
+ /* number of jmps, calls, exits analyzed so far */
+ u32 prev_jmps_processed, jmps_processed;
+ /* total verification time */
+ u64 verification_time;
+ /* maximum number of verifier states kept in 'branching' instructions */
+ u32 max_states_per_insn;
+ /* total number of allocated verifier states */
+ u32 total_states;
+ /* some states are freed during program analysis.
+ * this is peak number of states. this number dominates kernel
+ * memory consumption during verification
+ */
+ u32 peak_states;
+ /* longest register parentage chain walked for liveness marking */
+ u32 longest_mark_read_walk;
+ bpfptr_t fd_array;
+
+ /* bit mask to keep track of whether a register has been accessed
+ * since the last time the function state was printed
+ */
+ u32 scratched_regs;
+ /* Same as scratched_regs but for stack slots */
+ u64 scratched_stack_slots;
+ u64 prev_log_pos, prev_insn_print_pos;
+ /* buffer used in reg_type_str() to generate reg_type string */
+ char type_str_buf[TYPE_STR_BUF_LEN];
};
-int bpf_analyzer(struct bpf_prog *prog, const struct bpf_ext_analyzer_ops *ops,
- void *priv);
+__printf(2, 0) void bpf_verifier_vlog(struct bpf_verifier_log *log,
+ const char *fmt, va_list args);
+__printf(2, 3) void bpf_verifier_log_write(struct bpf_verifier_env *env,
+ const char *fmt, ...);
+__printf(2, 3) void bpf_log(struct bpf_verifier_log *log,
+ const char *fmt, ...);
+int bpf_vlog_init(struct bpf_verifier_log *log, u32 log_level,
+ char __user *log_buf, u32 log_size);
+void bpf_vlog_reset(struct bpf_verifier_log *log, u64 new_pos);
+int bpf_vlog_finalize(struct bpf_verifier_log *log, u32 *log_size_actual);
+
+static inline struct bpf_func_state *cur_func(struct bpf_verifier_env *env)
+{
+ struct bpf_verifier_state *cur = env->cur_state;
+
+ return cur->frame[cur->curframe];
+}
+
+static inline struct bpf_reg_state *cur_regs(struct bpf_verifier_env *env)
+{
+ return cur_func(env)->regs;
+}
+
+int bpf_prog_offload_verifier_prep(struct bpf_prog *prog);
+int bpf_prog_offload_verify_insn(struct bpf_verifier_env *env,
+ int insn_idx, int prev_insn_idx);
+int bpf_prog_offload_finalize(struct bpf_verifier_env *env);
+void
+bpf_prog_offload_replace_insn(struct bpf_verifier_env *env, u32 off,
+ struct bpf_insn *insn);
+void
+bpf_prog_offload_remove_insns(struct bpf_verifier_env *env, u32 off, u32 cnt);
+
+int check_ptr_off_reg(struct bpf_verifier_env *env,
+ const struct bpf_reg_state *reg, int regno);
+int check_func_arg_reg_off(struct bpf_verifier_env *env,
+ const struct bpf_reg_state *reg, int regno,
+ enum bpf_arg_type arg_type);
+int check_mem_reg(struct bpf_verifier_env *env, struct bpf_reg_state *reg,
+ u32 regno, u32 mem_size);
+
+/* this lives here instead of in bpf.h because it needs to dereference tgt_prog */
+static inline u64 bpf_trampoline_compute_key(const struct bpf_prog *tgt_prog,
+ struct btf *btf, u32 btf_id)
+{
+ if (tgt_prog)
+ return ((u64)tgt_prog->aux->id << 32) | btf_id;
+ else
+ return ((u64)btf_obj_id(btf) << 32) | 0x80000000 | btf_id;
+}
+
+/* unpack the IDs from the key as constructed above */
+static inline void bpf_trampoline_unpack_key(u64 key, u32 *obj_id, u32 *btf_id)
+{
+ if (obj_id)
+ *obj_id = key >> 32;
+ if (btf_id)
+ *btf_id = key & 0x7FFFFFFF;
+}
+
+int bpf_check_attach_target(struct bpf_verifier_log *log,
+ const struct bpf_prog *prog,
+ const struct bpf_prog *tgt_prog,
+ u32 btf_id,
+ struct bpf_attach_target_info *tgt_info);
+void bpf_free_kfunc_btf_tab(struct bpf_kfunc_btf_tab *tab);
+
+int mark_chain_precision(struct bpf_verifier_env *env, int regno);
+
+#define BPF_BASE_TYPE_MASK GENMASK(BPF_BASE_TYPE_BITS - 1, 0)
+
+/* extract base type from bpf_{arg, return, reg}_type. */
+static inline u32 base_type(u32 type)
+{
+ return type & BPF_BASE_TYPE_MASK;
+}
+
+/* extract flags from an extended type. See bpf_type_flag in bpf.h. */
+static inline u32 type_flag(u32 type)
+{
+ return type & ~BPF_BASE_TYPE_MASK;
+}
+
+/* only use after check_attach_btf_id() */
+static inline enum bpf_prog_type resolve_prog_type(const struct bpf_prog *prog)
+{
+ return prog->type == BPF_PROG_TYPE_EXT ?
+ prog->aux->dst_prog->type : prog->type;
+}
+
+static inline bool bpf_prog_check_recur(const struct bpf_prog *prog)
+{
+ switch (resolve_prog_type(prog)) {
+ case BPF_PROG_TYPE_TRACING:
+ return prog->expected_attach_type != BPF_TRACE_ITER;
+ case BPF_PROG_TYPE_STRUCT_OPS:
+ case BPF_PROG_TYPE_LSM:
+ return false;
+ default:
+ return true;
+ }
+}
+
+#define BPF_REG_TRUSTED_MODIFIERS (MEM_ALLOC | PTR_TRUSTED)
+
+static inline bool bpf_type_has_unsafe_modifiers(u32 type)
+{
+ return type_flag(type) & ~BPF_REG_TRUSTED_MODIFIERS;
+}
#endif /* _LINUX_BPF_VERIFIER_H */
diff --git a/include/linux/bpfilter.h b/include/linux/bpfilter.h
new file mode 100644
index 000000000000..2ae3c8e1d83c
--- /dev/null
+++ b/include/linux/bpfilter.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_BPFILTER_H
+#define _LINUX_BPFILTER_H
+
+#include <uapi/linux/bpfilter.h>
+#include <linux/usermode_driver.h>
+#include <linux/sockptr.h>
+
+struct sock;
+int bpfilter_ip_set_sockopt(struct sock *sk, int optname, sockptr_t optval,
+ unsigned int optlen);
+int bpfilter_ip_get_sockopt(struct sock *sk, int optname, char __user *optval,
+ int __user *optlen);
+void bpfilter_umh_cleanup(struct umd_info *info);
+
+struct bpfilter_umh_ops {
+ struct umd_info info;
+ /* since ip_getsockopt() can run in parallel, serialize access to umh */
+ struct mutex lock;
+ int (*sockopt)(struct sock *sk, int optname, sockptr_t optval,
+ unsigned int optlen, bool is_set);
+ int (*start)(void);
+};
+extern struct bpfilter_umh_ops bpfilter_ops;
+#endif
diff --git a/include/linux/bpfptr.h b/include/linux/bpfptr.h
new file mode 100644
index 000000000000..79b2f78eec1a
--- /dev/null
+++ b/include/linux/bpfptr.h
@@ -0,0 +1,88 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* A pointer that can point to either kernel or userspace memory. */
+#ifndef _LINUX_BPFPTR_H
+#define _LINUX_BPFPTR_H
+
+#include <linux/mm.h>
+#include <linux/sockptr.h>
+
+typedef sockptr_t bpfptr_t;
+
+static inline bool bpfptr_is_kernel(bpfptr_t bpfptr)
+{
+ return bpfptr.is_kernel;
+}
+
+static inline bpfptr_t KERNEL_BPFPTR(void *p)
+{
+ return (bpfptr_t) { .kernel = p, .is_kernel = true };
+}
+
+static inline bpfptr_t USER_BPFPTR(void __user *p)
+{
+ return (bpfptr_t) { .user = p };
+}
+
+static inline bpfptr_t make_bpfptr(u64 addr, bool is_kernel)
+{
+ if (is_kernel)
+ return KERNEL_BPFPTR((void*) (uintptr_t) addr);
+ else
+ return USER_BPFPTR(u64_to_user_ptr(addr));
+}
+
+static inline bool bpfptr_is_null(bpfptr_t bpfptr)
+{
+ if (bpfptr_is_kernel(bpfptr))
+ return !bpfptr.kernel;
+ return !bpfptr.user;
+}
+
+static inline void bpfptr_add(bpfptr_t *bpfptr, size_t val)
+{
+ if (bpfptr_is_kernel(*bpfptr))
+ bpfptr->kernel += val;
+ else
+ bpfptr->user += val;
+}
+
+static inline int copy_from_bpfptr_offset(void *dst, bpfptr_t src,
+ size_t offset, size_t size)
+{
+ if (!bpfptr_is_kernel(src))
+ return copy_from_user(dst, src.user + offset, size);
+ return copy_from_kernel_nofault(dst, src.kernel + offset, size);
+}
+
+static inline int copy_from_bpfptr(void *dst, bpfptr_t src, size_t size)
+{
+ return copy_from_bpfptr_offset(dst, src, 0, size);
+}
+
+static inline int copy_to_bpfptr_offset(bpfptr_t dst, size_t offset,
+ const void *src, size_t size)
+{
+ return copy_to_sockptr_offset((sockptr_t) dst, offset, src, size);
+}
+
+static inline void *kvmemdup_bpfptr(bpfptr_t src, size_t len)
+{
+ void *p = kvmalloc(len, GFP_USER | __GFP_NOWARN);
+
+ if (!p)
+ return ERR_PTR(-ENOMEM);
+ if (copy_from_bpfptr(p, src, len)) {
+ kvfree(p);
+ return ERR_PTR(-EFAULT);
+ }
+ return p;
+}
+
+static inline long strncpy_from_bpfptr(char *dst, bpfptr_t src, size_t count)
+{
+ if (bpfptr_is_kernel(src))
+ return strncpy_from_kernel_nofault(dst, src.kernel, count);
+ return strncpy_from_user(dst, src.user, count);
+}
+
+#endif /* _LINUX_BPFPTR_H */
diff --git a/include/linux/brcmphy.h b/include/linux/brcmphy.h
index abcda9b458ab..9e77165f3ef6 100644
--- a/include/linux/brcmphy.h
+++ b/include/linux/brcmphy.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_BRCMPHY_H
#define _LINUX_BRCMPHY_H
@@ -13,7 +14,11 @@
#define PHY_ID_BCM5241 0x0143bc30
#define PHY_ID_BCMAC131 0x0143bc70
#define PHY_ID_BCM5481 0x0143bca0
+#define PHY_ID_BCM5395 0x0143bcf0
+#define PHY_ID_BCM53125 0x03625f20
+#define PHY_ID_BCM53128 0x03625e10
#define PHY_ID_BCM54810 0x03625d00
+#define PHY_ID_BCM54811 0x03625cc0
#define PHY_ID_BCM5482 0x0143bcb0
#define PHY_ID_BCM5411 0x00206070
#define PHY_ID_BCM5421 0x002060e0
@@ -22,9 +27,15 @@
#define PHY_ID_BCM5461 0x002060c0
#define PHY_ID_BCM54612E 0x03625e60
#define PHY_ID_BCM54616S 0x03625d10
+#define PHY_ID_BCM54140 0xae025009
#define PHY_ID_BCM57780 0x03625d90
+#define PHY_ID_BCM89610 0x03625cd0
+#define PHY_ID_BCM72113 0x35905310
+#define PHY_ID_BCM72116 0x35905350
+#define PHY_ID_BCM72165 0x35905340
#define PHY_ID_BCM7250 0xae025280
+#define PHY_ID_BCM7255 0xae025120
#define PHY_ID_BCM7260 0xae025190
#define PHY_ID_BCM7268 0xae025090
#define PHY_ID_BCM7271 0xae0253b0
@@ -40,8 +51,10 @@
#define PHY_ID_BCM7439 0x600d8480
#define PHY_ID_BCM7439_2 0xae025080
#define PHY_ID_BCM7445 0x600d8510
+#define PHY_ID_BCM7712 0x35905330
#define PHY_ID_BCM_CYGNUS 0xae025200
+#define PHY_ID_BCM_OMEGA 0xae025100
#define PHY_BCM_OUI_MASK 0xfffffc00
#define PHY_BCM_OUI_1 0x00206000
@@ -51,18 +64,12 @@
#define PHY_BCM_OUI_5 0x03625e00
#define PHY_BCM_OUI_6 0xae025000
-#define PHY_BCM_FLAGS_MODE_COPPER 0x00000001
-#define PHY_BCM_FLAGS_MODE_1000BX 0x00000002
-#define PHY_BCM_FLAGS_INTF_SGMII 0x00000010
-#define PHY_BCM_FLAGS_INTF_XAUI 0x00000020
-#define PHY_BRCM_WIRESPEED_ENABLE 0x00000100
-#define PHY_BRCM_AUTO_PWRDWN_ENABLE 0x00000200
-#define PHY_BRCM_RX_REFCLK_UNUSED 0x00000400
-#define PHY_BRCM_STD_IBND_DISABLE 0x00000800
-#define PHY_BRCM_EXT_IBND_RX_ENABLE 0x00001000
-#define PHY_BRCM_EXT_IBND_TX_ENABLE 0x00002000
-#define PHY_BRCM_CLEAR_RGMII_MODE 0x00004000
-#define PHY_BRCM_DIS_TXCRXC_NOENRGY 0x00008000
+#define PHY_BRCM_AUTO_PWRDWN_ENABLE 0x00000001
+#define PHY_BRCM_RX_REFCLK_UNUSED 0x00000002
+#define PHY_BRCM_CLEAR_RGMII_MODE 0x00000004
+#define PHY_BRCM_DIS_TXCRXC_NOENRGY 0x00000008
+#define PHY_BRCM_EN_MASTER_MODE 0x00000010
+#define PHY_BRCM_IDDQ_SUSPEND 0x00000020
/* Broadcom BCM7xxx specific workarounds */
#define PHY_BRCM_7XXX_REV(x) (((x) >> 8) & 0xff)
@@ -73,14 +80,17 @@
#define MII_BCM54XX_ECR 0x10 /* BCM54xx extended control register */
#define MII_BCM54XX_ECR_IM 0x1000 /* Interrupt mask */
#define MII_BCM54XX_ECR_IF 0x0800 /* Interrupt force */
+#define MII_BCM54XX_ECR_FIFOE 0x0001 /* FIFO elasticity */
#define MII_BCM54XX_ESR 0x11 /* BCM54xx extended status register */
#define MII_BCM54XX_ESR_IS 0x1000 /* Interrupt status */
#define MII_BCM54XX_EXP_DATA 0x15 /* Expansion register data */
#define MII_BCM54XX_EXP_SEL 0x17 /* Expansion register select */
+#define MII_BCM54XX_EXP_SEL_TOP 0x0d00 /* TOP_MISC expansion register select */
#define MII_BCM54XX_EXP_SEL_SSD 0x0e00 /* Secondary SerDes select */
#define MII_BCM54XX_EXP_SEL_ER 0x0f00 /* Expansion register select */
+#define MII_BCM54XX_EXP_SEL_ETC 0x0d00 /* Expansion register spare + 2k mem */
#define MII_BCM54XX_AUX_CTL 0x18 /* Auxiliary control register */
#define MII_BCM54XX_ISR 0x1a /* BCM54xx interrupt status register */
@@ -106,15 +116,25 @@
#define MII_BCM54XX_SHD_VAL(x) ((x & 0x1f) << 10)
#define MII_BCM54XX_SHD_DATA(x) ((x & 0x3ff) << 0)
+#define MII_BCM54XX_RDB_ADDR 0x1e
+#define MII_BCM54XX_RDB_DATA 0x1f
+
+/* legacy access control via rdb/expansion register */
+#define BCM54XX_RDB_REG0087 0x0087
+#define BCM54XX_EXP_REG7E (MII_BCM54XX_EXP_SEL_ER + 0x7E)
+#define BCM54XX_ACCESS_MODE_LEGACY_EN BIT(15)
+
/*
* AUXILIARY CONTROL SHADOW ACCESS REGISTERS. (PHY REG 0x18)
*/
#define MII_BCM54XX_AUXCTL_SHDWSEL_AUXCTL 0x00
#define MII_BCM54XX_AUXCTL_ACTL_TX_6DB 0x0400
#define MII_BCM54XX_AUXCTL_ACTL_SMDSP_ENA 0x0800
+#define MII_BCM54XX_AUXCTL_ACTL_EXT_PKT_LEN 0x4000
#define MII_BCM54XX_AUXCTL_SHDWSEL_MISC 0x07
#define MII_BCM54XX_AUXCTL_SHDWSEL_MISC_WIRESPEED_EN 0x0010
+#define MII_BCM54XX_AUXCTL_SHDWSEL_MISC_RGMII_EN 0x0080
#define MII_BCM54XX_AUXCTL_SHDWSEL_MISC_RGMII_SKEW_EN 0x0100
#define MII_BCM54XX_AUXCTL_MISC_FORCE_AMDIX 0x0200
#define MII_BCM54XX_AUXCTL_MISC_WREN 0x8000
@@ -141,6 +161,22 @@
#define BCM_LED_SRC_OFF 0xe /* Tied high */
#define BCM_LED_SRC_ON 0xf /* Tied low */
+/*
+ * Broadcom Multicolor LED configurations (expansion register 4)
+ */
+#define BCM_EXP_MULTICOLOR (MII_BCM54XX_EXP_SEL_ER + 0x04)
+#define BCM_LED_MULTICOLOR_IN_PHASE BIT(8)
+#define BCM_LED_MULTICOLOR_LINK_ACT 0x0
+#define BCM_LED_MULTICOLOR_SPEED 0x1
+#define BCM_LED_MULTICOLOR_ACT_FLASH 0x2
+#define BCM_LED_MULTICOLOR_FDX 0x3
+#define BCM_LED_MULTICOLOR_OFF 0x4
+#define BCM_LED_MULTICOLOR_ON 0x5
+#define BCM_LED_MULTICOLOR_ALT 0x6
+#define BCM_LED_MULTICOLOR_FLASH 0x7
+#define BCM_LED_MULTICOLOR_LINK 0x8
+#define BCM_LED_MULTICOLOR_ACT 0x9
+#define BCM_LED_MULTICOLOR_PROGRAM 0xa
/*
* BCM5482: Shadow registers
@@ -160,6 +196,7 @@
#define BCM54XX_SHD_SCR3_DEF_CLK125 0x0001
#define BCM54XX_SHD_SCR3_DLLAPD_DIS 0x0002
#define BCM54XX_SHD_SCR3_TRDDAPD 0x0004
+#define BCM54XX_SHD_SCR3_RXCTXC_DIS 0x0100
/* 01010: Auto Power-Down */
#define BCM54XX_SHD_APD 0x0a
@@ -177,9 +214,18 @@
#define BCM5482_SHD_SSD 0x14 /* 10100: Secondary SerDes control */
#define BCM5482_SHD_SSD_LEDM 0x0008 /* SSD LED Mode enable */
#define BCM5482_SHD_SSD_EN 0x0001 /* SSD enable */
-#define BCM5482_SHD_MODE 0x1f /* 11111: Mode Control Register */
-#define BCM5482_SHD_MODE_1000BX 0x0001 /* Enable 1000BASE-X registers */
+/* 10011: SerDes 100-FX Control Register */
+#define BCM54616S_SHD_100FX_CTRL 0x13
+#define BCM54616S_100FX_MODE BIT(0) /* 100-FX SerDes Enable */
+
+/* 11111: Mode Control Register */
+#define BCM54XX_SHD_MODE 0x1f
+#define BCM54XX_SHD_INTF_SEL_MASK GENMASK(2, 1) /* INTERF_SEL[1:0] */
+#define BCM54XX_SHD_INTF_SEL_RGMII 0x02
+#define BCM54XX_SHD_INTF_SEL_SGMII 0x04
+#define BCM54XX_SHD_INTF_SEL_GBIC 0x06
+#define BCM54XX_SHD_MODE_1000BX BIT(0) /* Enable 1000-X registers */
/*
* EXPANSION SHADOW ACCESS REGISTERS. (PHY REG 0x15, 0x16, and 0x17)
@@ -192,6 +238,7 @@
#define MII_BCM54XX_EXP_EXP08 0x0F08
#define MII_BCM54XX_EXP_EXP08_RJCT_2MHZ 0x0001
#define MII_BCM54XX_EXP_EXP08_EARLY_DAC_WAKE 0x0200
+#define MII_BCM54XX_EXP_EXP08_FORCE_DAC_WAKE 0x0100
#define MII_BCM54XX_EXP_EXP75 0x0f75
#define MII_BCM54XX_EXP_EXP75_VDACCTRL 0x003c
#define MII_BCM54XX_EXP_EXP75_CM_OSC 0x0001
@@ -200,6 +247,12 @@
#define MII_BCM54XX_EXP_EXP97 0x0f97
#define MII_BCM54XX_EXP_EXP97_MYST 0x0c0c
+/* Top-MISC expansion registers */
+#define BCM54XX_TOP_MISC_IDDQ_CTRL (MII_BCM54XX_EXP_SEL_TOP + 0x06)
+#define BCM54XX_TOP_MISC_IDDQ_LP (1 << 0)
+#define BCM54XX_TOP_MISC_IDDQ_SD (1 << 2)
+#define BCM54XX_TOP_MISC_IDDQ_SR (1 << 3)
+
/*
* BCM5482: Secondary SerDes registers
*/
@@ -215,6 +268,9 @@
#define BCM54810_SHD_CLK_CTL 0x3
#define BCM54810_SHD_CLK_CTL_GTXCLK_EN (1 << 9)
+/* BCM54612E Registers */
+#define BCM54612E_EXP_SPARE0 (MII_BCM54XX_EXP_SEL_ETC + 0x34)
+#define BCM54612E_LED4_CLK125OUT_EN (1 << 1)
/*****************************************************************************/
/* Fast Ethernet Transceiver definitions. */
@@ -237,6 +293,7 @@
#define MII_BRCM_FET_SHDW_MC_FAME 0x4000 /* Force Auto MDIX enable */
#define MII_BRCM_FET_SHDW_AUXMODE4 0x1a /* Auxiliary mode 4 */
+#define MII_BRCM_FET_SHDW_AM4_STANDBY 0x0008 /* Standby enable */
#define MII_BRCM_FET_SHDW_AM4_LED_MASK 0x0003
#define MII_BRCM_FET_SHDW_AM4_LED_MODE1 0x0001
@@ -255,4 +312,51 @@
#define MII_BRCM_CORE_EXPB0 0xB0
#define MII_BRCM_CORE_EXPB1 0xB1
+/* Enhanced Cable Diagnostics */
+#define BCM54XX_RDB_ECD_CTRL 0x2a0
+#define BCM54XX_EXP_ECD_CTRL (MII_BCM54XX_EXP_SEL_ER + 0xc0)
+
+#define BCM54XX_ECD_CTRL_CABLE_TYPE_CAT3 1 /* CAT3 or worse */
+#define BCM54XX_ECD_CTRL_CABLE_TYPE_CAT5 0 /* CAT5 or better */
+#define BCM54XX_ECD_CTRL_CABLE_TYPE_MASK BIT(0) /* cable type */
+#define BCM54XX_ECD_CTRL_INVALID BIT(3) /* invalid result */
+#define BCM54XX_ECD_CTRL_UNIT_CM 0 /* centimeters */
+#define BCM54XX_ECD_CTRL_UNIT_M 1 /* meters */
+#define BCM54XX_ECD_CTRL_UNIT_MASK BIT(10) /* cable length unit */
+#define BCM54XX_ECD_CTRL_IN_PROGRESS BIT(11) /* test in progress */
+#define BCM54XX_ECD_CTRL_BREAK_LINK BIT(12) /* unconnect link
+ * during test
+ */
+#define BCM54XX_ECD_CTRL_CROSS_SHORT_DIS BIT(13) /* disable inter-pair
+ * short check
+ */
+#define BCM54XX_ECD_CTRL_RUN BIT(15) /* run immediate */
+
+#define BCM54XX_RDB_ECD_FAULT_TYPE 0x2a1
+#define BCM54XX_EXP_ECD_FAULT_TYPE (MII_BCM54XX_EXP_SEL_ER + 0xc1)
+#define BCM54XX_ECD_FAULT_TYPE_INVALID 0x0
+#define BCM54XX_ECD_FAULT_TYPE_OK 0x1
+#define BCM54XX_ECD_FAULT_TYPE_OPEN 0x2
+#define BCM54XX_ECD_FAULT_TYPE_SAME_SHORT 0x3 /* short same pair */
+#define BCM54XX_ECD_FAULT_TYPE_CROSS_SHORT 0x4 /* short different pairs */
+#define BCM54XX_ECD_FAULT_TYPE_BUSY 0x9
+#define BCM54XX_ECD_FAULT_TYPE_PAIR_D_MASK GENMASK(3, 0)
+#define BCM54XX_ECD_FAULT_TYPE_PAIR_C_MASK GENMASK(7, 4)
+#define BCM54XX_ECD_FAULT_TYPE_PAIR_B_MASK GENMASK(11, 8)
+#define BCM54XX_ECD_FAULT_TYPE_PAIR_A_MASK GENMASK(15, 12)
+#define BCM54XX_ECD_PAIR_A_LENGTH_RESULTS 0x2a2
+#define BCM54XX_ECD_PAIR_B_LENGTH_RESULTS 0x2a3
+#define BCM54XX_ECD_PAIR_C_LENGTH_RESULTS 0x2a4
+#define BCM54XX_ECD_PAIR_D_LENGTH_RESULTS 0x2a5
+
+#define BCM54XX_RDB_ECD_PAIR_A_LENGTH_RESULTS 0x2a2
+#define BCM54XX_EXP_ECD_PAIR_A_LENGTH_RESULTS (MII_BCM54XX_EXP_SEL_ER + 0xc2)
+#define BCM54XX_RDB_ECD_PAIR_B_LENGTH_RESULTS 0x2a3
+#define BCM54XX_EXP_ECD_PAIR_B_LENGTH_RESULTS (MII_BCM54XX_EXP_SEL_ER + 0xc3)
+#define BCM54XX_RDB_ECD_PAIR_C_LENGTH_RESULTS 0x2a4
+#define BCM54XX_EXP_ECD_PAIR_C_LENGTH_RESULTS (MII_BCM54XX_EXP_SEL_ER + 0xc4)
+#define BCM54XX_RDB_ECD_PAIR_D_LENGTH_RESULTS 0x2a5
+#define BCM54XX_EXP_ECD_PAIR_D_LENGTH_RESULTS (MII_BCM54XX_EXP_SEL_ER + 0xc5)
+#define BCM54XX_ECD_LENGTH_RESULTS_INVALID 0xffff
+
#endif /* _LINUX_BRCMPHY_H */
diff --git a/include/linux/bsearch.h b/include/linux/bsearch.h
index 90b1aa867224..e66b711d091e 100644
--- a/include/linux/bsearch.h
+++ b/include/linux/bsearch.h
@@ -1,9 +1,32 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_BSEARCH_H
#define _LINUX_BSEARCH_H
#include <linux/types.h>
-void *bsearch(const void *key, const void *base, size_t num, size_t size,
- int (*cmp)(const void *key, const void *elt));
+static __always_inline
+void *__inline_bsearch(const void *key, const void *base, size_t num, size_t size, cmp_func_t cmp)
+{
+ const char *pivot;
+ int result;
+
+ while (num > 0) {
+ pivot = base + (num >> 1) * size;
+ result = cmp(key, pivot);
+
+ if (result == 0)
+ return (void *)pivot;
+
+ if (result > 0) {
+ base = pivot + size;
+ num--;
+ }
+ num >>= 1;
+ }
+
+ return NULL;
+}
+
+extern void *bsearch(const void *key, const void *base, size_t num, size_t size, cmp_func_t cmp);
#endif /* _LINUX_BSEARCH_H */
diff --git a/include/linux/bsg-lib.h b/include/linux/bsg-lib.h
index e34dde2da0ef..9e97ced2896d 100644
--- a/include/linux/bsg-lib.h
+++ b/include/linux/bsg-lib.h
@@ -1,35 +1,25 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* BSG helper library
*
* Copyright (C) 2008 James Smart, Emulex Corporation
* Copyright (C) 2011 Red Hat, Inc. All rights reserved.
* Copyright (C) 2011 Mike Christie
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
*/
#ifndef _BLK_BSG_
#define _BLK_BSG_
#include <linux/blkdev.h>
+struct bsg_job;
struct request;
struct device;
struct scatterlist;
struct request_queue;
+typedef int (bsg_job_fn) (struct bsg_job *);
+typedef enum blk_eh_timer_return (bsg_timeout_fn)(struct request *);
+
struct bsg_buffer {
unsigned int payload_len;
int sg_cnt;
@@ -38,10 +28,11 @@ struct bsg_buffer {
struct bsg_job {
struct device *dev;
- struct request *req;
struct kref kref;
+ unsigned int timeout;
+
/* Transport/driver specific request/reply structs */
void *request;
void *reply;
@@ -61,13 +52,21 @@ struct bsg_job {
struct bsg_buffer request_payload;
struct bsg_buffer reply_payload;
+ int result;
+ unsigned int reply_payload_rcv_len;
+
+ /* BIDI support */
+ struct request *bidi_rq;
+ struct bio *bidi_bio;
+
void *dd_data; /* Used for driver-specific storage */
};
void bsg_job_done(struct bsg_job *job, int result,
unsigned int reply_payload_rcv_len);
-struct request_queue *bsg_setup_queue(struct device *dev, char *name,
- bsg_job_fn *job_fn, int dd_job_size);
+struct request_queue *bsg_setup_queue(struct device *dev, const char *name,
+ bsg_job_fn *job_fn, bsg_timeout_fn *timeout, int dd_job_size);
+void bsg_remove_queue(struct request_queue *q);
void bsg_job_put(struct bsg_job *job);
int __must_check bsg_job_get(struct bsg_job *job);
diff --git a/include/linux/bsg.h b/include/linux/bsg.h
index 7173f6e9d2dd..1ac81c809da9 100644
--- a/include/linux/bsg.h
+++ b/include/linux/bsg.h
@@ -1,33 +1,19 @@
-#ifndef BSG_H
-#define BSG_H
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_BSG_H
+#define _LINUX_BSG_H
#include <uapi/linux/bsg.h>
+struct bsg_device;
+struct device;
+struct request_queue;
-#if defined(CONFIG_BLK_DEV_BSG)
-struct bsg_class_device {
- struct device *class_dev;
- struct device *parent;
- int minor;
- struct request_queue *queue;
- struct kref ref;
- void (*release)(struct device *);
-};
+typedef int (bsg_sg_io_fn)(struct request_queue *, struct sg_io_v4 *hdr,
+ fmode_t mode, unsigned int timeout);
-extern int bsg_register_queue(struct request_queue *q,
- struct device *parent, const char *name,
- void (*release)(struct device *));
-extern void bsg_unregister_queue(struct request_queue *);
-#else
-static inline int bsg_register_queue(struct request_queue *q,
- struct device *parent, const char *name,
- void (*release)(struct device *))
-{
- return 0;
-}
-static inline void bsg_unregister_queue(struct request_queue *q)
-{
-}
-#endif
+struct bsg_device *bsg_register_queue(struct request_queue *q,
+ struct device *parent, const char *name,
+ bsg_sg_io_fn *sg_io_fn);
+void bsg_unregister_queue(struct bsg_device *bcd);
-#endif
+#endif /* _LINUX_BSG_H */
diff --git a/include/linux/btf.h b/include/linux/btf.h
new file mode 100644
index 000000000000..508199e38415
--- /dev/null
+++ b/include/linux/btf.h
@@ -0,0 +1,571 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2018 Facebook */
+
+#ifndef _LINUX_BTF_H
+#define _LINUX_BTF_H 1
+
+#include <linux/types.h>
+#include <linux/bpfptr.h>
+#include <linux/bsearch.h>
+#include <linux/btf_ids.h>
+#include <uapi/linux/btf.h>
+#include <uapi/linux/bpf.h>
+
+#define BTF_TYPE_EMIT(type) ((void)(type *)0)
+#define BTF_TYPE_EMIT_ENUM(enum_val) ((void)enum_val)
+
+/* These need to be macros, as the expressions are used in assembler input */
+#define KF_ACQUIRE (1 << 0) /* kfunc is an acquire function */
+#define KF_RELEASE (1 << 1) /* kfunc is a release function */
+#define KF_RET_NULL (1 << 2) /* kfunc returns a pointer that may be NULL */
+/* Trusted arguments are those which are guaranteed to be valid when passed to
+ * the kfunc. It is used to enforce that pointers obtained from either acquire
+ * kfuncs, or from the main kernel on a tracepoint or struct_ops callback
+ * invocation, remain unmodified when being passed to helpers taking trusted
+ * args.
+ *
+ * Consider, for example, the following new task tracepoint:
+ *
+ * SEC("tp_btf/task_newtask")
+ * int BPF_PROG(new_task_tp, struct task_struct *task, u64 clone_flags)
+ * {
+ * ...
+ * }
+ *
+ * And the following kfunc:
+ *
+ * BTF_ID_FLAGS(func, bpf_task_acquire, KF_ACQUIRE | KF_TRUSTED_ARGS)
+ *
+ * All invocations to the kfunc must pass the unmodified, unwalked task:
+ *
+ * bpf_task_acquire(task); // Allowed
+ * bpf_task_acquire(task->last_wakee); // Rejected, walked task
+ *
+ * Programs may also pass referenced tasks directly to the kfunc:
+ *
+ * struct task_struct *acquired;
+ *
+ * acquired = bpf_task_acquire(task); // Allowed, same as above
+ * bpf_task_acquire(acquired); // Allowed
+ * bpf_task_acquire(task); // Allowed
+ * bpf_task_acquire(acquired->last_wakee); // Rejected, walked task
+ *
+ * Programs may _not_, however, pass a task from an arbitrary fentry/fexit, or
+ * kprobe/kretprobe to the kfunc, as BPF cannot guarantee that all of these
+ * pointers are guaranteed to be safe. For example, the following BPF program
+ * would be rejected:
+ *
+ * SEC("kretprobe/free_task")
+ * int BPF_PROG(free_task_probe, struct task_struct *tsk)
+ * {
+ * struct task_struct *acquired;
+ *
+ * acquired = bpf_task_acquire(acquired); // Rejected, not a trusted pointer
+ * bpf_task_release(acquired);
+ *
+ * return 0;
+ * }
+ */
+#define KF_TRUSTED_ARGS (1 << 4) /* kfunc only takes trusted pointer arguments */
+#define KF_SLEEPABLE (1 << 5) /* kfunc may sleep */
+#define KF_DESTRUCTIVE (1 << 6) /* kfunc performs destructive actions */
+#define KF_RCU (1 << 7) /* kfunc takes either rcu or trusted pointer arguments */
+/* only one of KF_ITER_{NEW,NEXT,DESTROY} could be specified per kfunc */
+#define KF_ITER_NEW (1 << 8) /* kfunc implements BPF iter constructor */
+#define KF_ITER_NEXT (1 << 9) /* kfunc implements BPF iter next method */
+#define KF_ITER_DESTROY (1 << 10) /* kfunc implements BPF iter destructor */
+
+/*
+ * Tag marking a kernel function as a kfunc. This is meant to minimize the
+ * amount of copy-paste that kfunc authors have to include for correctness so
+ * as to avoid issues such as the compiler inlining or eliding either a static
+ * kfunc, or a global kfunc in an LTO build.
+ */
+#define __bpf_kfunc __used noinline
+
+/*
+ * Return the name of the passed struct, if exists, or halt the build if for
+ * example the structure gets renamed. In this way, developers have to revisit
+ * the code using that structure name, and update it accordingly.
+ */
+#define stringify_struct(x) \
+ ({ BUILD_BUG_ON(sizeof(struct x) < 0); \
+ __stringify(x); })
+
+struct btf;
+struct btf_member;
+struct btf_type;
+union bpf_attr;
+struct btf_show;
+struct btf_id_set;
+
+struct btf_kfunc_id_set {
+ struct module *owner;
+ struct btf_id_set8 *set;
+};
+
+struct btf_id_dtor_kfunc {
+ u32 btf_id;
+ u32 kfunc_btf_id;
+};
+
+struct btf_struct_meta {
+ u32 btf_id;
+ struct btf_record *record;
+};
+
+struct btf_struct_metas {
+ u32 cnt;
+ struct btf_struct_meta types[];
+};
+
+extern const struct file_operations btf_fops;
+
+void btf_get(struct btf *btf);
+void btf_put(struct btf *btf);
+int btf_new_fd(const union bpf_attr *attr, bpfptr_t uattr, u32 uattr_sz);
+struct btf *btf_get_by_fd(int fd);
+int btf_get_info_by_fd(const struct btf *btf,
+ const union bpf_attr *attr,
+ union bpf_attr __user *uattr);
+/* Figure out the size of a type_id. If type_id is a modifier
+ * (e.g. const), it will be resolved to find out the type with size.
+ *
+ * For example:
+ * In describing "const void *", type_id is "const" and "const"
+ * refers to "void *". The return type will be "void *".
+ *
+ * If type_id is a simple "int", then return type will be "int".
+ *
+ * @btf: struct btf object
+ * @type_id: Find out the size of type_id. The type_id of the return
+ * type is set to *type_id.
+ * @ret_size: It can be NULL. If not NULL, the size of the return
+ * type is set to *ret_size.
+ * Return: The btf_type (resolved to another type with size info if needed).
+ * NULL is returned if type_id itself does not have size info
+ * (e.g. void) or it cannot be resolved to another type that
+ * has size info.
+ * *type_id and *ret_size will not be changed in the
+ * NULL return case.
+ */
+const struct btf_type *btf_type_id_size(const struct btf *btf,
+ u32 *type_id,
+ u32 *ret_size);
+
+/*
+ * Options to control show behaviour.
+ * - BTF_SHOW_COMPACT: no formatting around type information
+ * - BTF_SHOW_NONAME: no struct/union member names/types
+ * - BTF_SHOW_PTR_RAW: show raw (unobfuscated) pointer values;
+ * equivalent to %px.
+ * - BTF_SHOW_ZERO: show zero-valued struct/union members; they
+ * are not displayed by default
+ * - BTF_SHOW_UNSAFE: skip use of bpf_probe_read() to safely read
+ * data before displaying it.
+ */
+#define BTF_SHOW_COMPACT BTF_F_COMPACT
+#define BTF_SHOW_NONAME BTF_F_NONAME
+#define BTF_SHOW_PTR_RAW BTF_F_PTR_RAW
+#define BTF_SHOW_ZERO BTF_F_ZERO
+#define BTF_SHOW_UNSAFE (1ULL << 4)
+
+void btf_type_seq_show(const struct btf *btf, u32 type_id, void *obj,
+ struct seq_file *m);
+int btf_type_seq_show_flags(const struct btf *btf, u32 type_id, void *obj,
+ struct seq_file *m, u64 flags);
+
+/*
+ * Copy len bytes of string representation of obj of BTF type_id into buf.
+ *
+ * @btf: struct btf object
+ * @type_id: type id of type obj points to
+ * @obj: pointer to typed data
+ * @buf: buffer to write to
+ * @len: maximum length to write to buf
+ * @flags: show options (see above)
+ *
+ * Return: length that would have been/was copied as per snprintf, or
+ * negative error.
+ */
+int btf_type_snprintf_show(const struct btf *btf, u32 type_id, void *obj,
+ char *buf, int len, u64 flags);
+
+int btf_get_fd_by_id(u32 id);
+u32 btf_obj_id(const struct btf *btf);
+bool btf_is_kernel(const struct btf *btf);
+bool btf_is_module(const struct btf *btf);
+struct module *btf_try_get_module(const struct btf *btf);
+u32 btf_nr_types(const struct btf *btf);
+bool btf_member_is_reg_int(const struct btf *btf, const struct btf_type *s,
+ const struct btf_member *m,
+ u32 expected_offset, u32 expected_size);
+int btf_find_spin_lock(const struct btf *btf, const struct btf_type *t);
+int btf_find_timer(const struct btf *btf, const struct btf_type *t);
+struct btf_record *btf_parse_fields(const struct btf *btf, const struct btf_type *t,
+ u32 field_mask, u32 value_size);
+int btf_check_and_fixup_fields(const struct btf *btf, struct btf_record *rec);
+bool btf_type_is_void(const struct btf_type *t);
+s32 btf_find_by_name_kind(const struct btf *btf, const char *name, u8 kind);
+const struct btf_type *btf_type_skip_modifiers(const struct btf *btf,
+ u32 id, u32 *res_id);
+const struct btf_type *btf_type_resolve_ptr(const struct btf *btf,
+ u32 id, u32 *res_id);
+const struct btf_type *btf_type_resolve_func_ptr(const struct btf *btf,
+ u32 id, u32 *res_id);
+const struct btf_type *
+btf_resolve_size(const struct btf *btf, const struct btf_type *type,
+ u32 *type_size);
+const char *btf_type_str(const struct btf_type *t);
+
+#define for_each_member(i, struct_type, member) \
+ for (i = 0, member = btf_type_member(struct_type); \
+ i < btf_type_vlen(struct_type); \
+ i++, member++)
+
+#define for_each_vsi(i, datasec_type, member) \
+ for (i = 0, member = btf_type_var_secinfo(datasec_type); \
+ i < btf_type_vlen(datasec_type); \
+ i++, member++)
+
+static inline bool btf_type_is_ptr(const struct btf_type *t)
+{
+ return BTF_INFO_KIND(t->info) == BTF_KIND_PTR;
+}
+
+static inline bool btf_type_is_int(const struct btf_type *t)
+{
+ return BTF_INFO_KIND(t->info) == BTF_KIND_INT;
+}
+
+static inline bool btf_type_is_small_int(const struct btf_type *t)
+{
+ return btf_type_is_int(t) && t->size <= sizeof(u64);
+}
+
+static inline u8 btf_int_encoding(const struct btf_type *t)
+{
+ return BTF_INT_ENCODING(*(u32 *)(t + 1));
+}
+
+static inline bool btf_type_is_signed_int(const struct btf_type *t)
+{
+ return btf_type_is_int(t) && (btf_int_encoding(t) & BTF_INT_SIGNED);
+}
+
+static inline bool btf_type_is_enum(const struct btf_type *t)
+{
+ return BTF_INFO_KIND(t->info) == BTF_KIND_ENUM;
+}
+
+static inline bool btf_is_any_enum(const struct btf_type *t)
+{
+ return BTF_INFO_KIND(t->info) == BTF_KIND_ENUM ||
+ BTF_INFO_KIND(t->info) == BTF_KIND_ENUM64;
+}
+
+static inline bool btf_kind_core_compat(const struct btf_type *t1,
+ const struct btf_type *t2)
+{
+ return BTF_INFO_KIND(t1->info) == BTF_INFO_KIND(t2->info) ||
+ (btf_is_any_enum(t1) && btf_is_any_enum(t2));
+}
+
+static inline bool str_is_empty(const char *s)
+{
+ return !s || !s[0];
+}
+
+static inline u16 btf_kind(const struct btf_type *t)
+{
+ return BTF_INFO_KIND(t->info);
+}
+
+static inline bool btf_is_enum(const struct btf_type *t)
+{
+ return btf_kind(t) == BTF_KIND_ENUM;
+}
+
+static inline bool btf_is_enum64(const struct btf_type *t)
+{
+ return btf_kind(t) == BTF_KIND_ENUM64;
+}
+
+static inline u64 btf_enum64_value(const struct btf_enum64 *e)
+{
+ return ((u64)e->val_hi32 << 32) | e->val_lo32;
+}
+
+static inline bool btf_is_composite(const struct btf_type *t)
+{
+ u16 kind = btf_kind(t);
+
+ return kind == BTF_KIND_STRUCT || kind == BTF_KIND_UNION;
+}
+
+static inline bool btf_is_array(const struct btf_type *t)
+{
+ return btf_kind(t) == BTF_KIND_ARRAY;
+}
+
+static inline bool btf_is_int(const struct btf_type *t)
+{
+ return btf_kind(t) == BTF_KIND_INT;
+}
+
+static inline bool btf_is_ptr(const struct btf_type *t)
+{
+ return btf_kind(t) == BTF_KIND_PTR;
+}
+
+static inline u8 btf_int_offset(const struct btf_type *t)
+{
+ return BTF_INT_OFFSET(*(u32 *)(t + 1));
+}
+
+static inline bool btf_type_is_scalar(const struct btf_type *t)
+{
+ return btf_type_is_int(t) || btf_type_is_enum(t);
+}
+
+static inline bool btf_type_is_typedef(const struct btf_type *t)
+{
+ return BTF_INFO_KIND(t->info) == BTF_KIND_TYPEDEF;
+}
+
+static inline bool btf_type_is_volatile(const struct btf_type *t)
+{
+ return BTF_INFO_KIND(t->info) == BTF_KIND_VOLATILE;
+}
+
+static inline bool btf_type_is_func(const struct btf_type *t)
+{
+ return BTF_INFO_KIND(t->info) == BTF_KIND_FUNC;
+}
+
+static inline bool btf_type_is_func_proto(const struct btf_type *t)
+{
+ return BTF_INFO_KIND(t->info) == BTF_KIND_FUNC_PROTO;
+}
+
+static inline bool btf_type_is_var(const struct btf_type *t)
+{
+ return BTF_INFO_KIND(t->info) == BTF_KIND_VAR;
+}
+
+static inline bool btf_type_is_type_tag(const struct btf_type *t)
+{
+ return BTF_INFO_KIND(t->info) == BTF_KIND_TYPE_TAG;
+}
+
+/* union is only a special case of struct:
+ * all its offsetof(member) == 0
+ */
+static inline bool btf_type_is_struct(const struct btf_type *t)
+{
+ u8 kind = BTF_INFO_KIND(t->info);
+
+ return kind == BTF_KIND_STRUCT || kind == BTF_KIND_UNION;
+}
+
+static inline bool __btf_type_is_struct(const struct btf_type *t)
+{
+ return BTF_INFO_KIND(t->info) == BTF_KIND_STRUCT;
+}
+
+static inline bool btf_type_is_array(const struct btf_type *t)
+{
+ return BTF_INFO_KIND(t->info) == BTF_KIND_ARRAY;
+}
+
+static inline u16 btf_type_vlen(const struct btf_type *t)
+{
+ return BTF_INFO_VLEN(t->info);
+}
+
+static inline u16 btf_vlen(const struct btf_type *t)
+{
+ return btf_type_vlen(t);
+}
+
+static inline u16 btf_func_linkage(const struct btf_type *t)
+{
+ return BTF_INFO_VLEN(t->info);
+}
+
+static inline bool btf_type_kflag(const struct btf_type *t)
+{
+ return BTF_INFO_KFLAG(t->info);
+}
+
+static inline u32 __btf_member_bit_offset(const struct btf_type *struct_type,
+ const struct btf_member *member)
+{
+ return btf_type_kflag(struct_type) ? BTF_MEMBER_BIT_OFFSET(member->offset)
+ : member->offset;
+}
+
+static inline u32 __btf_member_bitfield_size(const struct btf_type *struct_type,
+ const struct btf_member *member)
+{
+ return btf_type_kflag(struct_type) ? BTF_MEMBER_BITFIELD_SIZE(member->offset)
+ : 0;
+}
+
+static inline struct btf_member *btf_members(const struct btf_type *t)
+{
+ return (struct btf_member *)(t + 1);
+}
+
+static inline u32 btf_member_bit_offset(const struct btf_type *t, u32 member_idx)
+{
+ const struct btf_member *m = btf_members(t) + member_idx;
+
+ return __btf_member_bit_offset(t, m);
+}
+
+static inline u32 btf_member_bitfield_size(const struct btf_type *t, u32 member_idx)
+{
+ const struct btf_member *m = btf_members(t) + member_idx;
+
+ return __btf_member_bitfield_size(t, m);
+}
+
+static inline const struct btf_member *btf_type_member(const struct btf_type *t)
+{
+ return (const struct btf_member *)(t + 1);
+}
+
+static inline struct btf_array *btf_array(const struct btf_type *t)
+{
+ return (struct btf_array *)(t + 1);
+}
+
+static inline struct btf_enum *btf_enum(const struct btf_type *t)
+{
+ return (struct btf_enum *)(t + 1);
+}
+
+static inline struct btf_enum64 *btf_enum64(const struct btf_type *t)
+{
+ return (struct btf_enum64 *)(t + 1);
+}
+
+static inline const struct btf_var_secinfo *btf_type_var_secinfo(
+ const struct btf_type *t)
+{
+ return (const struct btf_var_secinfo *)(t + 1);
+}
+
+static inline struct btf_param *btf_params(const struct btf_type *t)
+{
+ return (struct btf_param *)(t + 1);
+}
+
+static inline int btf_id_cmp_func(const void *a, const void *b)
+{
+ const int *pa = a, *pb = b;
+
+ return *pa - *pb;
+}
+
+static inline bool btf_id_set_contains(const struct btf_id_set *set, u32 id)
+{
+ return bsearch(&id, set->ids, set->cnt, sizeof(u32), btf_id_cmp_func) != NULL;
+}
+
+static inline void *btf_id_set8_contains(const struct btf_id_set8 *set, u32 id)
+{
+ return bsearch(&id, set->pairs, set->cnt, sizeof(set->pairs[0]), btf_id_cmp_func);
+}
+
+struct bpf_prog;
+struct bpf_verifier_log;
+
+#ifdef CONFIG_BPF_SYSCALL
+const struct btf_type *btf_type_by_id(const struct btf *btf, u32 type_id);
+const char *btf_name_by_offset(const struct btf *btf, u32 offset);
+struct btf *btf_parse_vmlinux(void);
+struct btf *bpf_prog_get_target_btf(const struct bpf_prog *prog);
+u32 *btf_kfunc_id_set_contains(const struct btf *btf,
+ enum bpf_prog_type prog_type,
+ u32 kfunc_btf_id);
+u32 *btf_kfunc_is_modify_return(const struct btf *btf, u32 kfunc_btf_id);
+int register_btf_kfunc_id_set(enum bpf_prog_type prog_type,
+ const struct btf_kfunc_id_set *s);
+int register_btf_fmodret_id_set(const struct btf_kfunc_id_set *kset);
+s32 btf_find_dtor_kfunc(struct btf *btf, u32 btf_id);
+int register_btf_id_dtor_kfuncs(const struct btf_id_dtor_kfunc *dtors, u32 add_cnt,
+ struct module *owner);
+struct btf_struct_meta *btf_find_struct_meta(const struct btf *btf, u32 btf_id);
+const struct btf_member *
+btf_get_prog_ctx_type(struct bpf_verifier_log *log, const struct btf *btf,
+ const struct btf_type *t, enum bpf_prog_type prog_type,
+ int arg);
+int get_kern_ctx_btf_id(struct bpf_verifier_log *log, enum bpf_prog_type prog_type);
+bool btf_types_are_same(const struct btf *btf1, u32 id1,
+ const struct btf *btf2, u32 id2);
+#else
+static inline const struct btf_type *btf_type_by_id(const struct btf *btf,
+ u32 type_id)
+{
+ return NULL;
+}
+static inline const char *btf_name_by_offset(const struct btf *btf,
+ u32 offset)
+{
+ return NULL;
+}
+static inline u32 *btf_kfunc_id_set_contains(const struct btf *btf,
+ enum bpf_prog_type prog_type,
+ u32 kfunc_btf_id)
+{
+ return NULL;
+}
+static inline int register_btf_kfunc_id_set(enum bpf_prog_type prog_type,
+ const struct btf_kfunc_id_set *s)
+{
+ return 0;
+}
+static inline s32 btf_find_dtor_kfunc(struct btf *btf, u32 btf_id)
+{
+ return -ENOENT;
+}
+static inline int register_btf_id_dtor_kfuncs(const struct btf_id_dtor_kfunc *dtors,
+ u32 add_cnt, struct module *owner)
+{
+ return 0;
+}
+static inline struct btf_struct_meta *btf_find_struct_meta(const struct btf *btf, u32 btf_id)
+{
+ return NULL;
+}
+static inline const struct btf_member *
+btf_get_prog_ctx_type(struct bpf_verifier_log *log, const struct btf *btf,
+ const struct btf_type *t, enum bpf_prog_type prog_type,
+ int arg)
+{
+ return NULL;
+}
+static inline int get_kern_ctx_btf_id(struct bpf_verifier_log *log,
+ enum bpf_prog_type prog_type) {
+ return -EINVAL;
+}
+static inline bool btf_types_are_same(const struct btf *btf1, u32 id1,
+ const struct btf *btf2, u32 id2)
+{
+ return false;
+}
+#endif
+
+static inline bool btf_type_is_struct_ptr(struct btf *btf, const struct btf_type *t)
+{
+ if (!btf_type_is_ptr(t))
+ return false;
+
+ t = btf_type_skip_modifiers(btf, t->type, NULL);
+
+ return btf_type_is_struct(t);
+}
+
+#endif
diff --git a/include/linux/btf_ids.h b/include/linux/btf_ids.h
new file mode 100644
index 000000000000..00950cc03bff
--- /dev/null
+++ b/include/linux/btf_ids.h
@@ -0,0 +1,271 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef _LINUX_BTF_IDS_H
+#define _LINUX_BTF_IDS_H
+
+struct btf_id_set {
+ u32 cnt;
+ u32 ids[];
+};
+
+struct btf_id_set8 {
+ u32 cnt;
+ u32 flags;
+ struct {
+ u32 id;
+ u32 flags;
+ } pairs[];
+};
+
+#ifdef CONFIG_DEBUG_INFO_BTF
+
+#include <linux/compiler.h> /* for __PASTE */
+#include <linux/compiler_attributes.h> /* for __maybe_unused */
+
+/*
+ * Following macros help to define lists of BTF IDs placed
+ * in .BTF_ids section. They are initially filled with zeros
+ * (during compilation) and resolved later during the
+ * linking phase by resolve_btfids tool.
+ *
+ * Any change in list layout must be reflected in resolve_btfids
+ * tool logic.
+ */
+
+#define BTF_IDS_SECTION ".BTF_ids"
+
+#define ____BTF_ID(symbol, word) \
+asm( \
+".pushsection " BTF_IDS_SECTION ",\"a\"; \n" \
+".local " #symbol " ; \n" \
+".type " #symbol ", STT_OBJECT; \n" \
+".size " #symbol ", 4; \n" \
+#symbol ": \n" \
+".zero 4 \n" \
+word \
+".popsection; \n");
+
+#define __BTF_ID(symbol, word) \
+ ____BTF_ID(symbol, word)
+
+#define __ID(prefix) \
+ __PASTE(prefix, __COUNTER__)
+
+/*
+ * The BTF_ID defines unique symbol for each ID pointing
+ * to 4 zero bytes.
+ */
+#define BTF_ID(prefix, name) \
+ __BTF_ID(__ID(__BTF_ID__##prefix##__##name##__), "")
+
+#define ____BTF_ID_FLAGS(prefix, name, flags) \
+ __BTF_ID(__ID(__BTF_ID__##prefix##__##name##__), ".long " #flags "\n")
+#define __BTF_ID_FLAGS(prefix, name, flags, ...) \
+ ____BTF_ID_FLAGS(prefix, name, flags)
+#define BTF_ID_FLAGS(prefix, name, ...) \
+ __BTF_ID_FLAGS(prefix, name, ##__VA_ARGS__, 0)
+
+/*
+ * The BTF_ID_LIST macro defines pure (unsorted) list
+ * of BTF IDs, with following layout:
+ *
+ * BTF_ID_LIST(list1)
+ * BTF_ID(type1, name1)
+ * BTF_ID(type2, name2)
+ *
+ * list1:
+ * __BTF_ID__type1__name1__1:
+ * .zero 4
+ * __BTF_ID__type2__name2__2:
+ * .zero 4
+ *
+ */
+#define __BTF_ID_LIST(name, scope) \
+asm( \
+".pushsection " BTF_IDS_SECTION ",\"a\"; \n" \
+"." #scope " " #name "; \n" \
+#name ":; \n" \
+".popsection; \n");
+
+#define BTF_ID_LIST(name) \
+__BTF_ID_LIST(name, local) \
+extern u32 name[];
+
+#define BTF_ID_LIST_GLOBAL(name, n) \
+__BTF_ID_LIST(name, globl)
+
+/* The BTF_ID_LIST_SINGLE macro defines a BTF_ID_LIST with
+ * a single entry.
+ */
+#define BTF_ID_LIST_SINGLE(name, prefix, typename) \
+ BTF_ID_LIST(name) \
+ BTF_ID(prefix, typename)
+#define BTF_ID_LIST_GLOBAL_SINGLE(name, prefix, typename) \
+ BTF_ID_LIST_GLOBAL(name, 1) \
+ BTF_ID(prefix, typename)
+
+/*
+ * The BTF_ID_UNUSED macro defines 4 zero bytes.
+ * It's used when we want to define 'unused' entry
+ * in BTF_ID_LIST, like:
+ *
+ * BTF_ID_LIST(bpf_skb_output_btf_ids)
+ * BTF_ID(struct, sk_buff)
+ * BTF_ID_UNUSED
+ * BTF_ID(struct, task_struct)
+ */
+
+#define BTF_ID_UNUSED \
+asm( \
+".pushsection " BTF_IDS_SECTION ",\"a\"; \n" \
+".zero 4 \n" \
+".popsection; \n");
+
+/*
+ * The BTF_SET_START/END macros pair defines sorted list of
+ * BTF IDs plus its members count, with following layout:
+ *
+ * BTF_SET_START(list)
+ * BTF_ID(type1, name1)
+ * BTF_ID(type2, name2)
+ * BTF_SET_END(list)
+ *
+ * __BTF_ID__set__list:
+ * .zero 4
+ * list:
+ * __BTF_ID__type1__name1__3:
+ * .zero 4
+ * __BTF_ID__type2__name2__4:
+ * .zero 4
+ *
+ */
+#define __BTF_SET_START(name, scope) \
+asm( \
+".pushsection " BTF_IDS_SECTION ",\"a\"; \n" \
+"." #scope " __BTF_ID__set__" #name "; \n" \
+"__BTF_ID__set__" #name ":; \n" \
+".zero 4 \n" \
+".popsection; \n");
+
+#define BTF_SET_START(name) \
+__BTF_ID_LIST(name, local) \
+__BTF_SET_START(name, local)
+
+#define BTF_SET_START_GLOBAL(name) \
+__BTF_ID_LIST(name, globl) \
+__BTF_SET_START(name, globl)
+
+#define BTF_SET_END(name) \
+asm( \
+".pushsection " BTF_IDS_SECTION ",\"a\"; \n" \
+".size __BTF_ID__set__" #name ", .-" #name " \n" \
+".popsection; \n"); \
+extern struct btf_id_set name;
+
+/*
+ * The BTF_SET8_START/END macros pair defines sorted list of
+ * BTF IDs and their flags plus its members count, with the
+ * following layout:
+ *
+ * BTF_SET8_START(list)
+ * BTF_ID_FLAGS(type1, name1, flags)
+ * BTF_ID_FLAGS(type2, name2, flags)
+ * BTF_SET8_END(list)
+ *
+ * __BTF_ID__set8__list:
+ * .zero 8
+ * list:
+ * __BTF_ID__type1__name1__3:
+ * .zero 4
+ * .word (1 << 0) | (1 << 2)
+ * __BTF_ID__type2__name2__5:
+ * .zero 4
+ * .word (1 << 3) | (1 << 1) | (1 << 2)
+ *
+ */
+#define __BTF_SET8_START(name, scope) \
+asm( \
+".pushsection " BTF_IDS_SECTION ",\"a\"; \n" \
+"." #scope " __BTF_ID__set8__" #name "; \n" \
+"__BTF_ID__set8__" #name ":; \n" \
+".zero 8 \n" \
+".popsection; \n");
+
+#define BTF_SET8_START(name) \
+__BTF_ID_LIST(name, local) \
+__BTF_SET8_START(name, local)
+
+#define BTF_SET8_END(name) \
+asm( \
+".pushsection " BTF_IDS_SECTION ",\"a\"; \n" \
+".size __BTF_ID__set8__" #name ", .-" #name " \n" \
+".popsection; \n"); \
+extern struct btf_id_set8 name;
+
+#else
+
+#define BTF_ID_LIST(name) static u32 __maybe_unused name[64];
+#define BTF_ID(prefix, name)
+#define BTF_ID_FLAGS(prefix, name, ...)
+#define BTF_ID_UNUSED
+#define BTF_ID_LIST_GLOBAL(name, n) u32 __maybe_unused name[n];
+#define BTF_ID_LIST_SINGLE(name, prefix, typename) static u32 __maybe_unused name[1];
+#define BTF_ID_LIST_GLOBAL_SINGLE(name, prefix, typename) u32 __maybe_unused name[1];
+#define BTF_SET_START(name) static struct btf_id_set __maybe_unused name = { 0 };
+#define BTF_SET_START_GLOBAL(name) static struct btf_id_set __maybe_unused name = { 0 };
+#define BTF_SET_END(name)
+#define BTF_SET8_START(name) static struct btf_id_set8 __maybe_unused name = { 0 };
+#define BTF_SET8_END(name)
+
+#endif /* CONFIG_DEBUG_INFO_BTF */
+
+#ifdef CONFIG_NET
+/* Define a list of socket types which can be the argument for
+ * skc_to_*_sock() helpers. All these sockets should have
+ * sock_common as the first argument in its memory layout.
+ */
+#define BTF_SOCK_TYPE_xxx \
+ BTF_SOCK_TYPE(BTF_SOCK_TYPE_INET, inet_sock) \
+ BTF_SOCK_TYPE(BTF_SOCK_TYPE_INET_CONN, inet_connection_sock) \
+ BTF_SOCK_TYPE(BTF_SOCK_TYPE_INET_REQ, inet_request_sock) \
+ BTF_SOCK_TYPE(BTF_SOCK_TYPE_INET_TW, inet_timewait_sock) \
+ BTF_SOCK_TYPE(BTF_SOCK_TYPE_REQ, request_sock) \
+ BTF_SOCK_TYPE(BTF_SOCK_TYPE_SOCK, sock) \
+ BTF_SOCK_TYPE(BTF_SOCK_TYPE_SOCK_COMMON, sock_common) \
+ BTF_SOCK_TYPE(BTF_SOCK_TYPE_TCP, tcp_sock) \
+ BTF_SOCK_TYPE(BTF_SOCK_TYPE_TCP_REQ, tcp_request_sock) \
+ BTF_SOCK_TYPE(BTF_SOCK_TYPE_TCP_TW, tcp_timewait_sock) \
+ BTF_SOCK_TYPE(BTF_SOCK_TYPE_TCP6, tcp6_sock) \
+ BTF_SOCK_TYPE(BTF_SOCK_TYPE_UDP, udp_sock) \
+ BTF_SOCK_TYPE(BTF_SOCK_TYPE_UDP6, udp6_sock) \
+ BTF_SOCK_TYPE(BTF_SOCK_TYPE_UNIX, unix_sock) \
+ BTF_SOCK_TYPE(BTF_SOCK_TYPE_MPTCP, mptcp_sock) \
+ BTF_SOCK_TYPE(BTF_SOCK_TYPE_SOCKET, socket)
+
+enum {
+#define BTF_SOCK_TYPE(name, str) name,
+BTF_SOCK_TYPE_xxx
+#undef BTF_SOCK_TYPE
+MAX_BTF_SOCK_TYPE,
+};
+
+extern u32 btf_sock_ids[];
+#endif
+
+#define BTF_TRACING_TYPE_xxx \
+ BTF_TRACING_TYPE(BTF_TRACING_TYPE_TASK, task_struct) \
+ BTF_TRACING_TYPE(BTF_TRACING_TYPE_FILE, file) \
+ BTF_TRACING_TYPE(BTF_TRACING_TYPE_VMA, vm_area_struct)
+
+enum {
+#define BTF_TRACING_TYPE(name, type) name,
+BTF_TRACING_TYPE_xxx
+#undef BTF_TRACING_TYPE
+MAX_BTF_TRACING_TYPE,
+};
+
+extern u32 btf_tracing_ids[];
+extern u32 bpf_cgroup_btf_id[];
+extern u32 bpf_local_storage_map_btf_id[];
+
+#endif
diff --git a/include/linux/btree-128.h b/include/linux/btree-128.h
index 0b3414c4c928..22c09f5c3c39 100644
--- a/include/linux/btree-128.h
+++ b/include/linux/btree-128.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
extern struct btree_geo btree_geo128;
struct btree_head128 { struct btree_head h; };
diff --git a/include/linux/btree-type.h b/include/linux/btree-type.h
index 9a1147ef8563..fb34a52c788b 100644
--- a/include/linux/btree-type.h
+++ b/include/linux/btree-type.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#define __BTREE_TP(pfx, type, sfx) pfx ## type ## sfx
#define _BTREE_TP(pfx, type, sfx) __BTREE_TP(pfx, type, sfx)
#define BTREE_TP(pfx) _BTREE_TP(pfx, BTREE_TYPE_SUFFIX,)
diff --git a/include/linux/btree.h b/include/linux/btree.h
index 65b5bb058324..243ee544397a 100644
--- a/include/linux/btree.h
+++ b/include/linux/btree.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef BTREE_H
#define BTREE_H
@@ -9,7 +10,7 @@
*
* A B+Tree is a data structure for looking up arbitrary (currently allowing
* unsigned long, u32, u64 and 2 * u64) keys into pointers. The data structure
- * is described at http://en.wikipedia.org/wiki/B-tree, we currently do not
+ * is described at https://en.wikipedia.org/wiki/B-tree, we currently do not
* use binary search to find the key on lookups.
*
* Each B+Tree consists of a head, that contains bookkeeping information and
diff --git a/include/linux/btrfs.h b/include/linux/btrfs.h
index 22d799147db2..9a37a45ec801 100644
--- a/include/linux/btrfs.h
+++ b/include/linux/btrfs.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_BTRFS_H
#define _LINUX_BTRFS_H
diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h
index c8dae555eccf..1520793c72da 100644
--- a/include/linux/buffer_head.h
+++ b/include/linux/buffer_head.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* include/linux/buffer_head.h
*
@@ -8,6 +9,7 @@
#define _LINUX_BUFFER_HEAD_H
#include <linux/types.h>
+#include <linux/blk_types.h>
#include <linux/fs.h>
#include <linux/linkage.h>
#include <linux/pagemap.h>
@@ -21,9 +23,6 @@ enum bh_state_bits {
BH_Dirty, /* Is dirty */
BH_Lock, /* Is locked */
BH_Req, /* Has been submitted for I/O */
- BH_Uptodate_Lock,/* Used by the first bh in a page, to serialise
- * IO completion of other buffers in the page
- */
BH_Mapped, /* Has a disk mapping */
BH_New, /* Disk mapping was newly created by get_block */
@@ -62,7 +61,10 @@ typedef void (bh_end_io_t)(struct buffer_head *bh, int uptodate);
struct buffer_head {
unsigned long b_state; /* buffer state bitmap (see above) */
struct buffer_head *b_this_page;/* circular list of page's buffers */
- struct page *b_page; /* the page this bh is mapped to */
+ union {
+ struct page *b_page; /* the page this bh is mapped to */
+ struct folio *b_folio; /* the folio this bh is mapped to */
+ };
sector_t b_blocknr; /* start block number */
size_t b_size; /* size of mapping */
@@ -75,16 +77,22 @@ struct buffer_head {
struct address_space *b_assoc_map; /* mapping this buffer is
associated with */
atomic_t b_count; /* users using this buffer_head */
+ spinlock_t b_uptodate_lock; /* Used by the first bh in a page, to
+ * serialise IO completion of other
+ * buffers in the page */
};
/*
* macro tricks to expand the set_buffer_foo(), clear_buffer_foo()
* and buffer_foo() functions.
+ * To avoid reset buffer flags that are already set, because that causes
+ * a costly cache line transition, check the flag first.
*/
#define BUFFER_FNS(bit, name) \
static __always_inline void set_buffer_##name(struct buffer_head *bh) \
{ \
- set_bit(BH_##bit, &(bh)->b_state); \
+ if (!test_bit(BH_##bit, &(bh)->b_state)) \
+ set_bit(BH_##bit, &(bh)->b_state); \
} \
static __always_inline void clear_buffer_##name(struct buffer_head *bh) \
{ \
@@ -113,7 +121,6 @@ static __always_inline int test_clear_buffer_##name(struct buffer_head *bh) \
* of the form "mark_buffer_foo()". These are higher-level functions which
* do something in addition to setting a b_state bit.
*/
-BUFFER_FNS(Uptodate, uptodate)
BUFFER_FNS(Dirty, dirty)
TAS_BUFFER_FNS(Dirty, dirty)
BUFFER_FNS(Lock, locked)
@@ -131,6 +138,41 @@ BUFFER_FNS(Meta, meta)
BUFFER_FNS(Prio, prio)
BUFFER_FNS(Defer_Completion, defer_completion)
+static __always_inline void set_buffer_uptodate(struct buffer_head *bh)
+{
+ /*
+ * If somebody else already set this uptodate, they will
+ * have done the memory barrier, and a reader will thus
+ * see *some* valid buffer state.
+ *
+ * Any other serialization (with IO errors or whatever that
+ * might clear the bit) has to come from other state (eg BH_Lock).
+ */
+ if (test_bit(BH_Uptodate, &bh->b_state))
+ return;
+
+ /*
+ * make it consistent with folio_mark_uptodate
+ * pairs with smp_load_acquire in buffer_uptodate
+ */
+ smp_mb__before_atomic();
+ set_bit(BH_Uptodate, &bh->b_state);
+}
+
+static __always_inline void clear_buffer_uptodate(struct buffer_head *bh)
+{
+ clear_bit(BH_Uptodate, &bh->b_state);
+}
+
+static __always_inline int buffer_uptodate(const struct buffer_head *bh)
+{
+ /*
+ * make it consistent with folio_test_uptodate
+ * pairs with smp_mb__before_atomic in set_buffer_uptodate
+ */
+ return test_bit_acquire(BH_Uptodate, &bh->b_state);
+}
+
#define bh_offset(bh) ((unsigned long)(bh)->b_data & ~PAGE_MASK)
/* If we *know* page->private refers to buffer_heads */
@@ -140,8 +182,9 @@ BUFFER_FNS(Defer_Completion, defer_completion)
((struct buffer_head *)page_private(page)); \
})
#define page_has_buffers(page) PagePrivate(page)
+#define folio_buffers(folio) folio_get_private(folio)
-void buffer_check_dirty_writeback(struct page *page,
+void buffer_check_dirty_writeback(struct folio *folio,
bool *dirty, bool *writeback);
/*
@@ -150,15 +193,20 @@ void buffer_check_dirty_writeback(struct page *page,
void mark_buffer_dirty(struct buffer_head *bh);
void mark_buffer_write_io_error(struct buffer_head *bh);
-void init_buffer(struct buffer_head *, bh_end_io_t *, void *);
void touch_buffer(struct buffer_head *bh);
void set_bh_page(struct buffer_head *bh,
struct page *page, unsigned long offset);
-int try_to_free_buffers(struct page *);
+void folio_set_bh(struct buffer_head *bh, struct folio *folio,
+ unsigned long offset);
+bool try_to_free_buffers(struct folio *);
+struct buffer_head *folio_alloc_buffers(struct folio *folio, unsigned long size,
+ bool retry);
struct buffer_head *alloc_page_buffers(struct page *page, unsigned long size,
- int retry);
+ bool retry);
void create_empty_buffers(struct page *, unsigned long,
unsigned long b_state);
+void folio_create_empty_buffers(struct folio *folio, unsigned long blocksize,
+ unsigned long b_state);
void end_buffer_read_sync(struct buffer_head *bh, int uptodate);
void end_buffer_write_sync(struct buffer_head *bh, int uptodate);
void end_buffer_async_write(struct buffer_head *bh, int uptodate);
@@ -189,21 +237,22 @@ void __breadahead(struct block_device *, sector_t block, unsigned int size);
struct buffer_head *__bread_gfp(struct block_device *,
sector_t block, unsigned size, gfp_t gfp);
void invalidate_bh_lrus(void);
+void invalidate_bh_lrus_cpu(void);
+bool has_bh_in_lru(int cpu, void *dummy);
struct buffer_head *alloc_buffer_head(gfp_t gfp_flags);
void free_buffer_head(struct buffer_head * bh);
void unlock_buffer(struct buffer_head *bh);
void __lock_buffer(struct buffer_head *bh);
-void ll_rw_block(int, int, int, struct buffer_head * bh[]);
int sync_dirty_buffer(struct buffer_head *bh);
-int __sync_dirty_buffer(struct buffer_head *bh, int op_flags);
-void write_dirty_buffer(struct buffer_head *bh, int op_flags);
-int submit_bh(int, int, struct buffer_head *);
+int __sync_dirty_buffer(struct buffer_head *bh, blk_opf_t op_flags);
+void write_dirty_buffer(struct buffer_head *bh, blk_opf_t op_flags);
+void submit_bh(blk_opf_t, struct buffer_head *);
void write_boundary_block(struct block_device *bdev,
sector_t bblock, unsigned blocksize);
int bh_uptodate_or_lock(struct buffer_head *bh);
-int bh_submit_read(struct buffer_head *bh);
-loff_t page_cache_seek_hole_data(struct inode *inode, loff_t offset,
- loff_t length, int whence);
+int __bh_read(struct buffer_head *bh, blk_opf_t op_flags, bool wait);
+void __bh_read_batch(int nr, struct buffer_head *bhs[],
+ blk_opf_t op_flags, bool force_lock);
extern int buffer_heads_over_limit;
@@ -211,18 +260,16 @@ extern int buffer_heads_over_limit;
* Generic address_space_operations implementations for buffer_head-backed
* address_spaces.
*/
-void block_invalidatepage(struct page *page, unsigned int offset,
- unsigned int length);
+void block_invalidate_folio(struct folio *folio, size_t offset, size_t length);
int block_write_full_page(struct page *page, get_block_t *get_block,
struct writeback_control *wbc);
int __block_write_full_page(struct inode *inode, struct page *page,
get_block_t *get_block, struct writeback_control *wbc,
bh_end_io_t *handler);
-int block_read_full_page(struct page*, get_block_t*);
-int block_is_partially_uptodate(struct page *page, unsigned long from,
- unsigned long count);
+int block_read_full_folio(struct folio *, get_block_t *);
+bool block_is_partially_uptodate(struct folio *, size_t from, size_t count);
int block_write_begin(struct address_space *mapping, loff_t pos, unsigned len,
- unsigned flags, struct page **pagep, get_block_t *get_block);
+ struct page **pagep, get_block_t *get_block);
int __block_write_begin(struct page *page, loff_t pos, unsigned len,
get_block_t *get_block);
int block_write_end(struct file *, struct address_space *,
@@ -232,15 +279,16 @@ int generic_write_end(struct file *, struct address_space *,
loff_t, unsigned, unsigned,
struct page *, void *);
void page_zero_new_buffers(struct page *page, unsigned from, unsigned to);
+void clean_page_buffers(struct page *page);
int cont_write_begin(struct file *, struct address_space *, loff_t,
- unsigned, unsigned, struct page **, void **,
+ unsigned, struct page **, void **,
get_block_t *, loff_t *);
int generic_cont_expand_simple(struct inode *inode, loff_t size);
int block_commit_write(struct page *page, unsigned from, unsigned to);
int block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf,
get_block_t get_block);
/* Convert errno to return value from ->page_mkwrite() call */
-static inline int block_page_mkwrite_return(int err)
+static inline vm_fault_t block_page_mkwrite_return(int err)
{
if (err == 0)
return VM_FAULT_LOCKED;
@@ -253,14 +301,16 @@ static inline int block_page_mkwrite_return(int err)
}
sector_t generic_block_bmap(struct address_space *, sector_t, get_block_t *);
int block_truncate_page(struct address_space *, loff_t, get_block_t *);
-int nobh_write_begin(struct address_space *, loff_t, unsigned, unsigned,
- struct page **, void **, get_block_t*);
-int nobh_write_end(struct file *, struct address_space *,
- loff_t, unsigned, unsigned,
- struct page *, void *);
-int nobh_truncate_page(struct address_space *, loff_t, get_block_t *);
-int nobh_writepage(struct page *page, get_block_t *get_block,
- struct writeback_control *wbc);
+
+#ifdef CONFIG_MIGRATION
+extern int buffer_migrate_folio(struct address_space *,
+ struct folio *dst, struct folio *src, enum migrate_mode);
+extern int buffer_migrate_folio_norefs(struct address_space *,
+ struct folio *dst, struct folio *src, enum migrate_mode);
+#else
+#define buffer_migrate_folio NULL
+#define buffer_migrate_folio_norefs NULL
+#endif
void buffer_init(void);
@@ -268,14 +318,6 @@ void buffer_init(void);
* inline definitions
*/
-static inline void attach_page_buffers(struct page *page,
- struct buffer_head *head)
-{
- get_page(page);
- SetPagePrivate(page);
- set_page_private(page, (unsigned long)head);
-}
-
static inline void get_bh(struct buffer_head *bh)
{
atomic_inc(&bh->b_count);
@@ -378,6 +420,41 @@ static inline struct buffer_head *__getblk(struct block_device *bdev,
return __getblk_gfp(bdev, block, size, __GFP_MOVABLE);
}
+static inline void bh_readahead(struct buffer_head *bh, blk_opf_t op_flags)
+{
+ if (!buffer_uptodate(bh) && trylock_buffer(bh)) {
+ if (!buffer_uptodate(bh))
+ __bh_read(bh, op_flags, false);
+ else
+ unlock_buffer(bh);
+ }
+}
+
+static inline void bh_read_nowait(struct buffer_head *bh, blk_opf_t op_flags)
+{
+ if (!bh_uptodate_or_lock(bh))
+ __bh_read(bh, op_flags, false);
+}
+
+/* Returns 1 if buffer uptodated, 0 on success, and -EIO on error. */
+static inline int bh_read(struct buffer_head *bh, blk_opf_t op_flags)
+{
+ if (bh_uptodate_or_lock(bh))
+ return 1;
+ return __bh_read(bh, op_flags, true);
+}
+
+static inline void bh_read_batch(int nr, struct buffer_head *bhs[])
+{
+ __bh_read_batch(nr, bhs, 0, true);
+}
+
+static inline void bh_readahead_batch(int nr, struct buffer_head *bhs[],
+ blk_opf_t op_flags)
+{
+ __bh_read_batch(nr, bhs, op_flags, false);
+}
+
/**
* __bread() - reads a specified block and returns the bh
* @bdev: the block_device to read from
@@ -394,16 +471,19 @@ __bread(struct block_device *bdev, sector_t block, unsigned size)
return __bread_gfp(bdev, block, size, __GFP_MOVABLE);
}
-extern int __set_page_dirty_buffers(struct page *page);
+bool block_dirty_folio(struct address_space *mapping, struct folio *folio);
#else /* CONFIG_BLOCK */
static inline void buffer_init(void) {}
-static inline int try_to_free_buffers(struct page *page) { return 1; }
+static inline bool try_to_free_buffers(struct folio *folio) { return true; }
static inline int inode_has_buffers(struct inode *inode) { return 0; }
static inline void invalidate_inode_buffers(struct inode *inode) {}
static inline int remove_inode_buffers(struct inode *inode) { return 1; }
static inline int sync_mapping_buffers(struct address_space *mapping) { return 0; }
+static inline void invalidate_bh_lrus_cpu(void) {}
+static inline bool has_bh_in_lru(int cpu, void *dummy) { return false; }
+#define buffer_heads_over_limit 0
#endif /* CONFIG_BLOCK */
#endif /* _LINUX_BUFFER_HEAD_H */
diff --git a/include/linux/bug.h b/include/linux/bug.h
index 5d5554c874fd..348acf2558f3 100644
--- a/include/linux/bug.h
+++ b/include/linux/bug.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_BUG_H
#define _LINUX_BUG_H
@@ -35,6 +36,9 @@ static inline int is_warning_bug(const struct bug_entry *bug)
return bug->flags & BUGFLAG_WARNING;
}
+void bug_get_file_line(struct bug_entry *bug, const char **file,
+ unsigned int *line);
+
struct bug_entry *find_bug(unsigned long bugaddr);
enum bug_trap_type report_bug(unsigned long bug_addr, struct pt_regs *regs);
@@ -42,14 +46,31 @@ enum bug_trap_type report_bug(unsigned long bug_addr, struct pt_regs *regs);
/* These are defined by the architecture */
int is_valid_bugaddr(unsigned long addr);
+void generic_bug_clear_once(void);
+
#else /* !CONFIG_GENERIC_BUG */
+static inline void *find_bug(unsigned long bugaddr)
+{
+ return NULL;
+}
+
static inline enum bug_trap_type report_bug(unsigned long bug_addr,
struct pt_regs *regs)
{
return BUG_TRAP_TYPE_BUG;
}
+struct bug_entry;
+static inline void bug_get_file_line(struct bug_entry *bug, const char **file,
+ unsigned int *line)
+{
+ *file = NULL;
+ *line = 0;
+}
+
+static inline void generic_bug_clear_once(void) {}
+
#endif /* CONFIG_GENERIC_BUG */
/*
diff --git a/include/linux/build-salt.h b/include/linux/build-salt.h
new file mode 100644
index 000000000000..bb007bd05e7a
--- /dev/null
+++ b/include/linux/build-salt.h
@@ -0,0 +1,20 @@
+#ifndef __BUILD_SALT_H
+#define __BUILD_SALT_H
+
+#include <linux/elfnote.h>
+
+#define LINUX_ELFNOTE_BUILD_SALT 0x100
+
+#ifdef __ASSEMBLER__
+
+#define BUILD_SALT \
+ ELFNOTE(Linux, LINUX_ELFNOTE_BUILD_SALT, .asciz CONFIG_BUILD_SALT)
+
+#else
+
+#define BUILD_SALT \
+ ELFNOTE32("Linux", LINUX_ELFNOTE_BUILD_SALT, CONFIG_BUILD_SALT)
+
+#endif
+
+#endif /* __BUILD_SALT_H */
diff --git a/include/linux/build_bug.h b/include/linux/build_bug.h
index b7d22d60008a..3aa3640f8c18 100644
--- a/include/linux/build_bug.h
+++ b/include/linux/build_bug.h
@@ -1,18 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_BUILD_BUG_H
#define _LINUX_BUILD_BUG_H
#include <linux/compiler.h>
#ifdef __CHECKER__
-#define __BUILD_BUG_ON_NOT_POWER_OF_2(n) (0)
-#define BUILD_BUG_ON_NOT_POWER_OF_2(n) (0)
#define BUILD_BUG_ON_ZERO(e) (0)
-#define BUILD_BUG_ON_NULL(e) ((void *)0)
-#define BUILD_BUG_ON_INVALID(e) (0)
-#define BUILD_BUG_ON_MSG(cond, msg) (0)
-#define BUILD_BUG_ON(condition) (0)
-#define BUILD_BUG() (0)
#else /* __CHECKER__ */
+/*
+ * Force a compilation error if condition is true, but also produce a
+ * result (of value 0 and type int), so the expression can be used
+ * e.g. in a structure initializer (or where-ever else comma expressions
+ * aren't permitted).
+ */
+#define BUILD_BUG_ON_ZERO(e) ((int)(sizeof(struct { int:(-!!(e)); })))
+#endif /* __CHECKER__ */
/* Force a compilation error if a constant expression is not a power of 2 */
#define __BUILD_BUG_ON_NOT_POWER_OF_2(n) \
@@ -21,15 +23,6 @@
BUILD_BUG_ON((n) == 0 || (((n) & ((n) - 1)) != 0))
/*
- * Force a compilation error if condition is true, but also produce a
- * result (of value 0 and type size_t), so the expression can be used
- * e.g. in a structure initializer (or where-ever else comma expressions
- * aren't permitted).
- */
-#define BUILD_BUG_ON_ZERO(e) (sizeof(struct { int:(-!!(e)); }))
-#define BUILD_BUG_ON_NULL(e) ((void *)sizeof(struct { int:(-!!(e)); }))
-
-/*
* BUILD_BUG_ON_INVALID() permits the compiler to check the validity of the
* expression but avoids the generation of any code, even if that expression
* has side-effects.
@@ -52,23 +45,9 @@
* If you have some code which relies on certain constants being equal, or
* some other compile-time-evaluated condition, you should use BUILD_BUG_ON to
* detect if someone changes it.
- *
- * The implementation uses gcc's reluctance to create a negative array, but gcc
- * (as of 4.4) only emits that error for obvious cases (e.g. not arguments to
- * inline functions). Luckily, in 4.3 they added the "error" function
- * attribute just for this type of case. Thus, we use a negative sized array
- * (should always create an error on gcc versions older than 4.4) and then call
- * an undefined function with the error attribute (should always create an
- * error on gcc 4.3 and later). If for some reason, neither creates a
- * compile-time error, we'll still have a link-time error, which is harder to
- * track down.
*/
-#ifndef __OPTIMIZE__
-#define BUILD_BUG_ON(condition) ((void)sizeof(char[1 - 2*!!(condition)]))
-#else
#define BUILD_BUG_ON(condition) \
BUILD_BUG_ON_MSG(condition, "BUILD_BUG_ON failed: " #condition)
-#endif
/**
* BUILD_BUG - break compile if used.
@@ -79,6 +58,32 @@
*/
#define BUILD_BUG() BUILD_BUG_ON_MSG(1, "BUILD_BUG failed")
-#endif /* __CHECKER__ */
+/**
+ * static_assert - check integer constant expression at build time
+ *
+ * static_assert() is a wrapper for the C11 _Static_assert, with a
+ * little macro magic to make the message optional (defaulting to the
+ * stringification of the tested expression).
+ *
+ * Contrary to BUILD_BUG_ON(), static_assert() can be used at global
+ * scope, but requires the expression to be an integer constant
+ * expression (i.e., it is not enough that __builtin_constant_p() is
+ * true for expr).
+ *
+ * Also note that BUILD_BUG_ON() fails the build if the condition is
+ * true, while static_assert() fails the build if the expression is
+ * false.
+ */
+#define static_assert(expr, ...) __static_assert(expr, ##__VA_ARGS__, #expr)
+#define __static_assert(expr, msg, ...) _Static_assert(expr, msg)
+
+
+/*
+ * Compile time check that field has an expected offset
+ */
+#define ASSERT_STRUCT_OFFSET(type, field, expected_offset) \
+ BUILD_BUG_ON_MSG(offsetof(type, field) != (expected_offset), \
+ "Offset of " #field " in " #type " has changed.")
+
#endif /* _LINUX_BUILD_BUG_H */
diff --git a/include/linux/buildid.h b/include/linux/buildid.h
new file mode 100644
index 000000000000..3b7a0ff4642f
--- /dev/null
+++ b/include/linux/buildid.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_BUILDID_H
+#define _LINUX_BUILDID_H
+
+#include <linux/mm_types.h>
+
+#define BUILD_ID_SIZE_MAX 20
+
+int build_id_parse(struct vm_area_struct *vma, unsigned char *build_id,
+ __u32 *size);
+int build_id_parse_buf(const void *buf, unsigned char *build_id, u32 buf_size);
+
+#if IS_ENABLED(CONFIG_STACKTRACE_BUILD_ID) || IS_ENABLED(CONFIG_CRASH_CORE)
+extern unsigned char vmlinux_build_id[BUILD_ID_SIZE_MAX];
+void init_vmlinux_build_id(void);
+#else
+static inline void init_vmlinux_build_id(void) { }
+#endif
+
+#endif
diff --git a/include/linux/bvec.h b/include/linux/bvec.h
index ec8a4d7af6bd..555aae5448ae 100644
--- a/include/linux/bvec.h
+++ b/include/linux/bvec.h
@@ -1,31 +1,32 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* bvec iterator
*
* Copyright (C) 2001 Ming Lei <ming.lei@canonical.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- *
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public Licens
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-
*/
-#ifndef __LINUX_BVEC_ITER_H
-#define __LINUX_BVEC_ITER_H
+#ifndef __LINUX_BVEC_H
+#define __LINUX_BVEC_H
-#include <linux/kernel.h>
+#include <linux/highmem.h>
#include <linux/bug.h>
#include <linux/errno.h>
+#include <linux/limits.h>
+#include <linux/minmax.h>
+#include <linux/types.h>
-/*
- * was unsigned short, but we might as well be ready for > 64kB I/O pages
+struct page;
+
+/**
+ * struct bio_vec - a contiguous range of physical memory addresses
+ * @bv_page: First page associated with the address range.
+ * @bv_len: Number of bytes in the address range.
+ * @bv_offset: Start of the address range relative to the start of @bv_page.
+ *
+ * The following holds for a bvec if n * PAGE_SIZE < bv_offset + bv_len:
+ *
+ * nth_page(@bv_page, n) == @bv_page + n
+ *
+ * This holds because page_is_mergeable() checks the above property.
*/
struct bio_vec {
struct page *bv_page;
@@ -33,6 +34,46 @@ struct bio_vec {
unsigned int bv_offset;
};
+/**
+ * bvec_set_page - initialize a bvec based off a struct page
+ * @bv: bvec to initialize
+ * @page: page the bvec should point to
+ * @len: length of the bvec
+ * @offset: offset into the page
+ */
+static inline void bvec_set_page(struct bio_vec *bv, struct page *page,
+ unsigned int len, unsigned int offset)
+{
+ bv->bv_page = page;
+ bv->bv_len = len;
+ bv->bv_offset = offset;
+}
+
+/**
+ * bvec_set_folio - initialize a bvec based off a struct folio
+ * @bv: bvec to initialize
+ * @folio: folio the bvec should point to
+ * @len: length of the bvec
+ * @offset: offset into the folio
+ */
+static inline void bvec_set_folio(struct bio_vec *bv, struct folio *folio,
+ unsigned int len, unsigned int offset)
+{
+ bvec_set_page(bv, &folio->page, len, offset);
+}
+
+/**
+ * bvec_set_virt - initialize a bvec based on a virtual address
+ * @bv: bvec to initialize
+ * @vaddr: virtual address to set the bvec to
+ * @len: length of the bvec
+ */
+static inline void bvec_set_virt(struct bio_vec *bv, void *vaddr,
+ unsigned int len)
+{
+ bvec_set_page(bv, virt_to_page(vaddr), len, offset_in_page(vaddr));
+}
+
struct bvec_iter {
sector_t bi_sector; /* device address in 512 byte
sectors */
@@ -40,10 +81,14 @@ struct bvec_iter {
unsigned int bi_idx; /* current index into bvl_vec */
- unsigned int bi_done; /* number of bytes completed */
-
unsigned int bi_bvec_done; /* number of bytes completed in
current bvec */
+} __packed;
+
+struct bvec_iter_all {
+ struct bio_vec bv;
+ int idx;
+ unsigned done;
};
/*
@@ -52,16 +97,39 @@ struct bvec_iter {
*/
#define __bvec_iter_bvec(bvec, iter) (&(bvec)[(iter).bi_idx])
-#define bvec_iter_page(bvec, iter) \
+/* multi-page (mp_bvec) helpers */
+#define mp_bvec_iter_page(bvec, iter) \
(__bvec_iter_bvec((bvec), (iter))->bv_page)
-#define bvec_iter_len(bvec, iter) \
+#define mp_bvec_iter_len(bvec, iter) \
min((iter).bi_size, \
__bvec_iter_bvec((bvec), (iter))->bv_len - (iter).bi_bvec_done)
-#define bvec_iter_offset(bvec, iter) \
+#define mp_bvec_iter_offset(bvec, iter) \
(__bvec_iter_bvec((bvec), (iter))->bv_offset + (iter).bi_bvec_done)
+#define mp_bvec_iter_page_idx(bvec, iter) \
+ (mp_bvec_iter_offset((bvec), (iter)) / PAGE_SIZE)
+
+#define mp_bvec_iter_bvec(bvec, iter) \
+((struct bio_vec) { \
+ .bv_page = mp_bvec_iter_page((bvec), (iter)), \
+ .bv_len = mp_bvec_iter_len((bvec), (iter)), \
+ .bv_offset = mp_bvec_iter_offset((bvec), (iter)), \
+})
+
+/* For building single-page bvec in flight */
+ #define bvec_iter_offset(bvec, iter) \
+ (mp_bvec_iter_offset((bvec), (iter)) % PAGE_SIZE)
+
+#define bvec_iter_len(bvec, iter) \
+ min_t(unsigned, mp_bvec_iter_len((bvec), (iter)), \
+ PAGE_SIZE - bvec_iter_offset((bvec), (iter)))
+
+#define bvec_iter_page(bvec, iter) \
+ (mp_bvec_iter_page((bvec), (iter)) + \
+ mp_bvec_iter_page_idx((bvec), (iter)))
+
#define bvec_iter_bvec(bvec, iter) \
((struct bio_vec) { \
.bv_page = bvec_iter_page((bvec), (iter)), \
@@ -72,57 +140,144 @@ struct bvec_iter {
static inline bool bvec_iter_advance(const struct bio_vec *bv,
struct bvec_iter *iter, unsigned bytes)
{
+ unsigned int idx = iter->bi_idx;
+
if (WARN_ONCE(bytes > iter->bi_size,
"Attempted to advance past end of bvec iter\n")) {
iter->bi_size = 0;
return false;
}
- while (bytes) {
- unsigned iter_len = bvec_iter_len(bv, *iter);
- unsigned len = min(bytes, iter_len);
+ iter->bi_size -= bytes;
+ bytes += iter->bi_bvec_done;
- bytes -= len;
- iter->bi_size -= len;
- iter->bi_bvec_done += len;
- iter->bi_done += len;
-
- if (iter->bi_bvec_done == __bvec_iter_bvec(bv, *iter)->bv_len) {
- iter->bi_bvec_done = 0;
- iter->bi_idx++;
- }
+ while (bytes && bytes >= bv[idx].bv_len) {
+ bytes -= bv[idx].bv_len;
+ idx++;
}
+
+ iter->bi_idx = idx;
+ iter->bi_bvec_done = bytes;
return true;
}
-static inline bool bvec_iter_rewind(const struct bio_vec *bv,
- struct bvec_iter *iter,
- unsigned int bytes)
+/*
+ * A simpler version of bvec_iter_advance(), @bytes should not span
+ * across multiple bvec entries, i.e. bytes <= bv[i->bi_idx].bv_len
+ */
+static inline void bvec_iter_advance_single(const struct bio_vec *bv,
+ struct bvec_iter *iter, unsigned int bytes)
{
- while (bytes) {
- unsigned len = min(bytes, iter->bi_bvec_done);
-
- if (iter->bi_bvec_done == 0) {
- if (WARN_ONCE(iter->bi_idx == 0,
- "Attempted to rewind iter beyond "
- "bvec's boundaries\n")) {
- return false;
- }
- iter->bi_idx--;
- iter->bi_bvec_done = __bvec_iter_bvec(bv, *iter)->bv_len;
- continue;
- }
- bytes -= len;
- iter->bi_size += len;
- iter->bi_bvec_done -= len;
+ unsigned int done = iter->bi_bvec_done + bytes;
+
+ if (done == bv[iter->bi_idx].bv_len) {
+ done = 0;
+ iter->bi_idx++;
}
- return true;
+ iter->bi_bvec_done = done;
+ iter->bi_size -= bytes;
}
#define for_each_bvec(bvl, bio_vec, iter, start) \
for (iter = (start); \
(iter).bi_size && \
((bvl = bvec_iter_bvec((bio_vec), (iter))), 1); \
- bvec_iter_advance((bio_vec), &(iter), (bvl).bv_len))
+ bvec_iter_advance_single((bio_vec), &(iter), (bvl).bv_len))
+
+/* for iterating one bio from start to end */
+#define BVEC_ITER_ALL_INIT (struct bvec_iter) \
+{ \
+ .bi_sector = 0, \
+ .bi_size = UINT_MAX, \
+ .bi_idx = 0, \
+ .bi_bvec_done = 0, \
+}
+
+static inline struct bio_vec *bvec_init_iter_all(struct bvec_iter_all *iter_all)
+{
+ iter_all->done = 0;
+ iter_all->idx = 0;
+
+ return &iter_all->bv;
+}
+
+static inline void bvec_advance(const struct bio_vec *bvec,
+ struct bvec_iter_all *iter_all)
+{
+ struct bio_vec *bv = &iter_all->bv;
+
+ if (iter_all->done) {
+ bv->bv_page++;
+ bv->bv_offset = 0;
+ } else {
+ bv->bv_page = bvec->bv_page + (bvec->bv_offset >> PAGE_SHIFT);
+ bv->bv_offset = bvec->bv_offset & ~PAGE_MASK;
+ }
+ bv->bv_len = min_t(unsigned int, PAGE_SIZE - bv->bv_offset,
+ bvec->bv_len - iter_all->done);
+ iter_all->done += bv->bv_len;
+
+ if (iter_all->done == bvec->bv_len) {
+ iter_all->idx++;
+ iter_all->done = 0;
+ }
+}
+
+/**
+ * bvec_kmap_local - map a bvec into the kernel virtual address space
+ * @bvec: bvec to map
+ *
+ * Must be called on single-page bvecs only. Call kunmap_local on the returned
+ * address to unmap.
+ */
+static inline void *bvec_kmap_local(struct bio_vec *bvec)
+{
+ return kmap_local_page(bvec->bv_page) + bvec->bv_offset;
+}
+
+/**
+ * memcpy_from_bvec - copy data from a bvec
+ * @bvec: bvec to copy from
+ *
+ * Must be called on single-page bvecs only.
+ */
+static inline void memcpy_from_bvec(char *to, struct bio_vec *bvec)
+{
+ memcpy_from_page(to, bvec->bv_page, bvec->bv_offset, bvec->bv_len);
+}
+
+/**
+ * memcpy_to_bvec - copy data to a bvec
+ * @bvec: bvec to copy to
+ *
+ * Must be called on single-page bvecs only.
+ */
+static inline void memcpy_to_bvec(struct bio_vec *bvec, const char *from)
+{
+ memcpy_to_page(bvec->bv_page, bvec->bv_offset, from, bvec->bv_len);
+}
+
+/**
+ * memzero_bvec - zero all data in a bvec
+ * @bvec: bvec to zero
+ *
+ * Must be called on single-page bvecs only.
+ */
+static inline void memzero_bvec(struct bio_vec *bvec)
+{
+ memzero_page(bvec->bv_page, bvec->bv_offset, bvec->bv_len);
+}
+
+/**
+ * bvec_virt - return the virtual address for a bvec
+ * @bvec: bvec to return the virtual address for
+ *
+ * Note: the caller must ensure that @bvec->bv_page is not a highmem page.
+ */
+static inline void *bvec_virt(struct bio_vec *bvec)
+{
+ WARN_ON_ONCE(PageHighMem(bvec->bv_page));
+ return page_address(bvec->bv_page) + bvec->bv_offset;
+}
-#endif /* __LINUX_BVEC_ITER_H */
+#endif /* __LINUX_BVEC_H */
diff --git a/include/linux/byteorder/big_endian.h b/include/linux/byteorder/big_endian.h
index 392041475c72..d64a524d3cfb 100644
--- a/include/linux/byteorder/big_endian.h
+++ b/include/linux/byteorder/big_endian.h
@@ -1,7 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_BYTEORDER_BIG_ENDIAN_H
#define _LINUX_BYTEORDER_BIG_ENDIAN_H
#include <uapi/linux/byteorder/big_endian.h>
+#ifndef CONFIG_CPU_BIG_ENDIAN
+#warning inconsistent configuration, needs CONFIG_CPU_BIG_ENDIAN
+#endif
+
#include <linux/byteorder/generic.h>
#endif /* _LINUX_BYTEORDER_BIG_ENDIAN_H */
diff --git a/include/linux/byteorder/generic.h b/include/linux/byteorder/generic.h
index 89f67c1c3160..c9a4c96c9943 100644
--- a/include/linux/byteorder/generic.h
+++ b/include/linux/byteorder/generic.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_BYTEORDER_GENERIC_H
#define _LINUX_BYTEORDER_GENERIC_H
@@ -155,6 +156,23 @@ static inline void le64_add_cpu(__le64 *var, u64 val)
*var = cpu_to_le64(le64_to_cpu(*var) + val);
}
+/* XXX: this stuff can be optimized */
+static inline void le32_to_cpu_array(u32 *buf, unsigned int words)
+{
+ while (words--) {
+ __le32_to_cpus(buf);
+ buf++;
+ }
+}
+
+static inline void cpu_to_le32_array(u32 *buf, unsigned int words)
+{
+ while (words--) {
+ __cpu_to_le32s(buf);
+ buf++;
+ }
+}
+
static inline void be16_add_cpu(__be16 *var, u16 val)
{
*var = cpu_to_be16(be16_to_cpu(*var) + val);
@@ -170,4 +188,20 @@ static inline void be64_add_cpu(__be64 *var, u64 val)
*var = cpu_to_be64(be64_to_cpu(*var) + val);
}
+static inline void cpu_to_be32_array(__be32 *dst, const u32 *src, size_t len)
+{
+ size_t i;
+
+ for (i = 0; i < len; i++)
+ dst[i] = cpu_to_be32(src[i]);
+}
+
+static inline void be32_to_cpu_array(u32 *dst, const __be32 *src, size_t len)
+{
+ size_t i;
+
+ for (i = 0; i < len; i++)
+ dst[i] = be32_to_cpu(src[i]);
+}
+
#endif /* _LINUX_BYTEORDER_GENERIC_H */
diff --git a/include/linux/byteorder/little_endian.h b/include/linux/byteorder/little_endian.h
index 08057377aa23..1ec650ff76cb 100644
--- a/include/linux/byteorder/little_endian.h
+++ b/include/linux/byteorder/little_endian.h
@@ -1,7 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_BYTEORDER_LITTLE_ENDIAN_H
#define _LINUX_BYTEORDER_LITTLE_ENDIAN_H
#include <uapi/linux/byteorder/little_endian.h>
+#ifdef CONFIG_CPU_BIG_ENDIAN
+#warning inconsistent configuration, CONFIG_CPU_BIG_ENDIAN is set
+#endif
+
#include <linux/byteorder/generic.h>
#endif /* _LINUX_BYTEORDER_LITTLE_ENDIAN_H */
diff --git a/include/linux/c2port.h b/include/linux/c2port.h
index 4efabcb51347..4e93bc63c27a 100644
--- a/include/linux/c2port.h
+++ b/include/linux/c2port.h
@@ -1,16 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Silicon Labs C2 port Linux support
*
* Copyright (c) 2007 Rodolfo Giometti <giometti@linux.it>
* Copyright (c) 2007 Eurotech S.p.A. <info@eurotech.it>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation
*/
-#include <linux/kmemcheck.h>
-
#define C2PORT_NAME_LEN 32
struct device;
@@ -22,10 +17,8 @@ struct device;
/* Main struct */
struct c2port_ops;
struct c2port_device {
- kmemcheck_bitfield_begin(flags);
unsigned int access:1;
unsigned int flash_access:1;
- kmemcheck_bitfield_end(flags);
int id;
char name[C2PORT_NAME_LEN];
diff --git a/include/linux/cache.h b/include/linux/cache.h
index 1be04f8c563a..5da1bbd96154 100644
--- a/include/linux/cache.h
+++ b/include/linux/cache.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __LINUX_CACHE_H
#define __LINUX_CACHE_H
@@ -14,8 +15,14 @@
/*
* __read_mostly is used to keep rarely changing variables out of frequently
- * updated cachelines. If an architecture doesn't support it, ignore the
- * hint.
+ * updated cachelines. Its use should be reserved for data that is used
+ * frequently in hot paths. Performance traces can help decide when to use
+ * this. You want __read_mostly data to be tightly packed, so that in the
+ * best case multiple frequently read variables for a hot path will be next
+ * to each other in order to reduce the number of cachelines needed to
+ * execute a critical path. We should be mindful and selective of its use.
+ * ie: if you're going to use it please supply a *good* justification in your
+ * commit log
*/
#ifndef __read_mostly
#define __read_mostly
@@ -27,7 +34,7 @@
* but may get written to during init, so can't live in .rodata (via "const").
*/
#ifndef __ro_after_init
-#define __ro_after_init __attribute__((__section__(".data..ro_after_init")))
+#define __ro_after_init __section(".data..ro_after_init")
#endif
#ifndef ____cacheline_aligned
@@ -78,4 +85,17 @@
#define cache_line_size() L1_CACHE_BYTES
#endif
+/*
+ * Helper to add padding within a struct to ensure data fall into separate
+ * cachelines.
+ */
+#if defined(CONFIG_SMP)
+struct cacheline_padding {
+ char x[0];
+} ____cacheline_internodealigned_in_smp;
+#define CACHELINE_PADDING(name) struct cacheline_padding name
+#else
+#define CACHELINE_PADDING(name)
+#endif
+
#endif /* __LINUX_CACHE_H */
diff --git a/include/linux/cacheflush.h b/include/linux/cacheflush.h
new file mode 100644
index 000000000000..a6189d21f2ba
--- /dev/null
+++ b/include/linux/cacheflush.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_CACHEFLUSH_H
+#define _LINUX_CACHEFLUSH_H
+
+#include <asm/cacheflush.h>
+
+struct folio;
+
+#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
+#ifndef ARCH_IMPLEMENTS_FLUSH_DCACHE_FOLIO
+void flush_dcache_folio(struct folio *folio);
+#endif
+#else
+static inline void flush_dcache_folio(struct folio *folio)
+{
+}
+#define ARCH_IMPLEMENTS_FLUSH_DCACHE_FOLIO 0
+#endif /* ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE */
+
+#endif /* _LINUX_CACHEFLUSH_H */
diff --git a/include/linux/cacheinfo.h b/include/linux/cacheinfo.h
index 6a524bf6a06d..a5cfd44fab45 100644
--- a/include/linux/cacheinfo.h
+++ b/include/linux/cacheinfo.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_CACHEINFO_H
#define _LINUX_CACHEINFO_H
@@ -16,6 +17,8 @@ enum cache_type {
CACHE_TYPE_UNIFIED = BIT(2),
};
+extern unsigned int coherency_max_size;
+
/**
* struct cacheinfo - represent a cache leaf node
* @id: This cache's id. It is unique among caches with the same (type, level).
@@ -33,9 +36,8 @@ enum cache_type {
* @shared_cpu_map: logical cpumask representing all the cpus sharing
* this cache node
* @attributes: bitfield representing various cache attributes
- * @of_node: if devicetree is used, this represents either the cpu node in
- * case there's no explicit cache node or the cache node itself in the
- * device tree
+ * @fw_token: Unique value used to determine if different cacheinfo
+ * structures represent a single hardware cache instance.
* @disable_sysfs: indicates whether this node is visible to the user via
* sysfs or not
* @priv: pointer to any private data structure specific to particular
@@ -64,8 +66,7 @@ struct cacheinfo {
#define CACHE_ALLOCATE_POLICY_MASK \
(CACHE_READ_ALLOCATE | CACHE_WRITE_ALLOCATE)
#define CACHE_ID BIT(4)
-
- struct device_node *of_node;
+ void *fw_token;
bool disable_sysfs;
void *priv;
};
@@ -75,30 +76,65 @@ struct cpu_cacheinfo {
unsigned int num_levels;
unsigned int num_leaves;
bool cpu_map_populated;
+ bool early_ci_levels;
};
-/*
- * Helpers to make sure "func" is executed on the cpu whose cache
- * attributes are being detected
- */
-#define DEFINE_SMP_CALL_CACHE_FUNCTION(func) \
-static inline void _##func(void *ret) \
-{ \
- int cpu = smp_processor_id(); \
- *(int *)ret = __##func(cpu); \
-} \
- \
-int func(unsigned int cpu) \
-{ \
- int ret; \
- smp_call_function_single(cpu, _##func, &ret, true); \
- return ret; \
-}
-
struct cpu_cacheinfo *get_cpu_cacheinfo(unsigned int cpu);
+int early_cache_level(unsigned int cpu);
int init_cache_level(unsigned int cpu);
+int init_of_cache_level(unsigned int cpu);
int populate_cache_leaves(unsigned int cpu);
+int cache_setup_acpi(unsigned int cpu);
+bool last_level_cache_is_valid(unsigned int cpu);
+bool last_level_cache_is_shared(unsigned int cpu_x, unsigned int cpu_y);
+int fetch_cache_info(unsigned int cpu);
+int detect_cache_attributes(unsigned int cpu);
+#ifndef CONFIG_ACPI_PPTT
+/*
+ * acpi_get_cache_info() is only called on ACPI enabled
+ * platforms using the PPTT for topology. This means that if
+ * the platform supports other firmware configuration methods
+ * we need to stub out the call when ACPI is disabled.
+ * ACPI enabled platforms not using PPTT won't be making calls
+ * to this function so we need not worry about them.
+ */
+static inline
+int acpi_get_cache_info(unsigned int cpu,
+ unsigned int *levels, unsigned int *split_levels)
+{
+ return -ENOENT;
+}
+#else
+int acpi_get_cache_info(unsigned int cpu,
+ unsigned int *levels, unsigned int *split_levels);
+#endif
const struct attribute_group *cache_get_priv_group(struct cacheinfo *this_leaf);
+/*
+ * Get the id of the cache associated with @cpu at level @level.
+ * cpuhp lock must be held.
+ */
+static inline int get_cpu_cacheinfo_id(int cpu, int level)
+{
+ struct cpu_cacheinfo *ci = get_cpu_cacheinfo(cpu);
+ int i;
+
+ for (i = 0; i < ci->num_leaves; i++) {
+ if (ci->info_list[i].level == level) {
+ if (ci->info_list[i].attributes & CACHE_ID)
+ return ci->info_list[i].id;
+ return -1;
+ }
+ }
+
+ return -1;
+}
+
+#ifdef CONFIG_ARM64
+#define use_arch_cache_info() (true)
+#else
+#define use_arch_cache_info() (false)
+#endif
+
#endif /* _LINUX_CACHEINFO_H */
diff --git a/include/linux/can/bittiming.h b/include/linux/can/bittiming.h
new file mode 100644
index 000000000000..9b8a9c39614b
--- /dev/null
+++ b/include/linux/can/bittiming.h
@@ -0,0 +1,165 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (c) 2020 Pengutronix, Marc Kleine-Budde <kernel@pengutronix.de>
+ * Copyright (c) 2021 Vincent Mailhol <mailhol.vincent@wanadoo.fr>
+ */
+
+#ifndef _CAN_BITTIMING_H
+#define _CAN_BITTIMING_H
+
+#include <linux/netdevice.h>
+#include <linux/can/netlink.h>
+
+#define CAN_SYNC_SEG 1
+
+#define CAN_BITRATE_UNSET 0
+#define CAN_BITRATE_UNKNOWN (-1U)
+
+#define CAN_CTRLMODE_TDC_MASK \
+ (CAN_CTRLMODE_TDC_AUTO | CAN_CTRLMODE_TDC_MANUAL)
+
+/*
+ * struct can_tdc - CAN FD Transmission Delay Compensation parameters
+ *
+ * At high bit rates, the propagation delay from the TX pin to the RX
+ * pin of the transceiver causes measurement errors: the sample point
+ * on the RX pin might occur on the previous bit.
+ *
+ * To solve this issue, ISO 11898-1 introduces in section 11.3.3
+ * "Transmitter delay compensation" a SSP (Secondary Sample Point)
+ * equal to the distance from the start of the bit time on the TX pin
+ * to the actual measurement on the RX pin.
+ *
+ * This structure contains the parameters to calculate that SSP.
+ *
+ * -+----------- one bit ----------+-- TX pin
+ * |<--- Sample Point --->|
+ *
+ * --+----------- one bit ----------+-- RX pin
+ * |<-------- TDCV -------->|
+ * |<------- TDCO ------->|
+ * |<----------- Secondary Sample Point ---------->|
+ *
+ * To increase precision, contrary to the other bittiming parameters
+ * which are measured in time quanta, the TDC parameters are measured
+ * in clock periods (also referred as "minimum time quantum" in ISO
+ * 11898-1).
+ *
+ * @tdcv: Transmitter Delay Compensation Value. The time needed for
+ * the signal to propagate, i.e. the distance, in clock periods,
+ * from the start of the bit on the TX pin to when it is received
+ * on the RX pin. @tdcv depends on the controller modes:
+ *
+ * CAN_CTRLMODE_TDC_AUTO is set: The transceiver dynamically
+ * measures @tdcv for each transmitted CAN FD frame and the
+ * value provided here should be ignored.
+ *
+ * CAN_CTRLMODE_TDC_MANUAL is set: use the fixed provided @tdcv
+ * value.
+ *
+ * N.B. CAN_CTRLMODE_TDC_AUTO and CAN_CTRLMODE_TDC_MANUAL are
+ * mutually exclusive. Only one can be set at a time. If both
+ * CAN_TDC_CTRLMODE_AUTO and CAN_TDC_CTRLMODE_MANUAL are unset,
+ * TDC is disabled and all the values of this structure should be
+ * ignored.
+ *
+ * @tdco: Transmitter Delay Compensation Offset. Offset value, in
+ * clock periods, defining the distance between the start of the
+ * bit reception on the RX pin of the transceiver and the SSP
+ * position such that SSP = @tdcv + @tdco.
+ *
+ * @tdcf: Transmitter Delay Compensation Filter window. Defines the
+ * minimum value for the SSP position in clock periods. If the
+ * SSP position is less than @tdcf, then no delay compensations
+ * occur and the normal sampling point is used instead. The
+ * feature is enabled if and only if @tdcv is set to zero
+ * (automatic mode) and @tdcf is configured to a value greater
+ * than @tdco.
+ */
+struct can_tdc {
+ u32 tdcv;
+ u32 tdco;
+ u32 tdcf;
+};
+
+/*
+ * struct can_tdc_const - CAN hardware-dependent constant for
+ * Transmission Delay Compensation
+ *
+ * @tdcv_min: Transmitter Delay Compensation Value minimum value. If
+ * the controller does not support manual mode for tdcv
+ * (c.f. flag CAN_CTRLMODE_TDC_MANUAL) then this value is
+ * ignored.
+ * @tdcv_max: Transmitter Delay Compensation Value maximum value. If
+ * the controller does not support manual mode for tdcv
+ * (c.f. flag CAN_CTRLMODE_TDC_MANUAL) then this value is
+ * ignored.
+ *
+ * @tdco_min: Transmitter Delay Compensation Offset minimum value.
+ * @tdco_max: Transmitter Delay Compensation Offset maximum value.
+ * Should not be zero. If the controller does not support TDC,
+ * then the pointer to this structure should be NULL.
+ *
+ * @tdcf_min: Transmitter Delay Compensation Filter window minimum
+ * value. If @tdcf_max is zero, this value is ignored.
+ * @tdcf_max: Transmitter Delay Compensation Filter window maximum
+ * value. Should be set to zero if the controller does not
+ * support this feature.
+ */
+struct can_tdc_const {
+ u32 tdcv_min;
+ u32 tdcv_max;
+ u32 tdco_min;
+ u32 tdco_max;
+ u32 tdcf_min;
+ u32 tdcf_max;
+};
+
+#ifdef CONFIG_CAN_CALC_BITTIMING
+int can_calc_bittiming(const struct net_device *dev, struct can_bittiming *bt,
+ const struct can_bittiming_const *btc, struct netlink_ext_ack *extack);
+
+void can_calc_tdco(struct can_tdc *tdc, const struct can_tdc_const *tdc_const,
+ const struct can_bittiming *dbt,
+ u32 *ctrlmode, u32 ctrlmode_supported);
+#else /* !CONFIG_CAN_CALC_BITTIMING */
+static inline int
+can_calc_bittiming(const struct net_device *dev, struct can_bittiming *bt,
+ const struct can_bittiming_const *btc, struct netlink_ext_ack *extack)
+{
+ netdev_err(dev, "bit-timing calculation not available\n");
+ return -EINVAL;
+}
+
+static inline void
+can_calc_tdco(struct can_tdc *tdc, const struct can_tdc_const *tdc_const,
+ const struct can_bittiming *dbt,
+ u32 *ctrlmode, u32 ctrlmode_supported)
+{
+}
+#endif /* CONFIG_CAN_CALC_BITTIMING */
+
+void can_sjw_set_default(struct can_bittiming *bt);
+
+int can_sjw_check(const struct net_device *dev, const struct can_bittiming *bt,
+ const struct can_bittiming_const *btc, struct netlink_ext_ack *extack);
+
+int can_get_bittiming(const struct net_device *dev, struct can_bittiming *bt,
+ const struct can_bittiming_const *btc,
+ const u32 *bitrate_const,
+ const unsigned int bitrate_const_cnt,
+ struct netlink_ext_ack *extack);
+
+/*
+ * can_bit_time() - Duration of one bit
+ *
+ * Please refer to ISO 11898-1:2015, section 11.3.1.1 "Bit time" for
+ * additional information.
+ *
+ * Return: the number of time quanta in one bit.
+ */
+static inline unsigned int can_bit_time(const struct can_bittiming *bt)
+{
+ return CAN_SYNC_SEG + bt->prop_seg + bt->phase_seg1 + bt->phase_seg2;
+}
+
+#endif /* !_CAN_BITTIMING_H */
diff --git a/include/linux/can/can-ml.h b/include/linux/can/can-ml.h
new file mode 100644
index 000000000000..8afa92d15a66
--- /dev/null
+++ b/include/linux/can/can-ml.h
@@ -0,0 +1,80 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) */
+/* Copyright (c) 2002-2007 Volkswagen Group Electronic Research
+ * Copyright (c) 2017 Pengutronix, Marc Kleine-Budde <kernel@pengutronix.de>
+ *
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of Volkswagen nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * Alternatively, provided that this notice is retained in full, this
+ * software may be distributed under the terms of the GNU General
+ * Public License ("GPL") version 2, in which case the provisions of the
+ * GPL apply INSTEAD OF those given above.
+ *
+ * The provided data structures and external interfaces from this code
+ * are not restricted to be used by modules with a GPL compatible license.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
+ *
+ */
+
+#ifndef CAN_ML_H
+#define CAN_ML_H
+
+#include <linux/can.h>
+#include <linux/list.h>
+#include <linux/netdevice.h>
+
+#define CAN_SFF_RCV_ARRAY_SZ (1 << CAN_SFF_ID_BITS)
+#define CAN_EFF_RCV_HASH_BITS 10
+#define CAN_EFF_RCV_ARRAY_SZ (1 << CAN_EFF_RCV_HASH_BITS)
+
+enum { RX_ERR, RX_ALL, RX_FIL, RX_INV, RX_MAX };
+
+struct can_dev_rcv_lists {
+ struct hlist_head rx[RX_MAX];
+ struct hlist_head rx_sff[CAN_SFF_RCV_ARRAY_SZ];
+ struct hlist_head rx_eff[CAN_EFF_RCV_ARRAY_SZ];
+ int entries;
+};
+
+struct can_ml_priv {
+ struct can_dev_rcv_lists dev_rcv_lists;
+#ifdef CAN_J1939
+ struct j1939_priv *j1939_priv;
+#endif
+};
+
+static inline struct can_ml_priv *can_get_ml_priv(struct net_device *dev)
+{
+ return netdev_get_ml_priv(dev, ML_PRIV_CAN);
+}
+
+static inline void can_set_ml_priv(struct net_device *dev,
+ struct can_ml_priv *ml_priv)
+{
+ netdev_set_ml_priv(dev, ml_priv, ML_PRIV_CAN);
+}
+
+#endif /* CAN_ML_H */
diff --git a/include/linux/can/core.h b/include/linux/can/core.h
index c9a17bb1221c..5fb8d0e3f9c1 100644
--- a/include/linux/can/core.h
+++ b/include/linux/can/core.h
@@ -1,7 +1,8 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) */
/*
* linux/can/core.h
*
- * Protoypes and definitions for CAN protocol modules using the PF_CAN core
+ * Prototypes and definitions for CAN protocol modules using the PF_CAN core
*
* Authors: Oliver Hartkopp <oliver.hartkopp@volkswagen.de>
* Urs Thuermann <urs.thuermann@volkswagen.de>
@@ -17,13 +18,6 @@
#include <linux/skbuff.h>
#include <linux/netdevice.h>
-#define CAN_VERSION "20170425"
-
-/* increment this number each time you change some user-space interface */
-#define CAN_ABI_VERSION "9"
-
-#define CAN_VERSION_STRING "rev " CAN_VERSION " abi " CAN_ABI_VERSION
-
#define DNAME(dev) ((dev) ? (dev)->name : "any")
/**
@@ -40,6 +34,14 @@ struct can_proto {
struct proto *prot;
};
+/* required_size
+ * macro to find the minimum size of a struct
+ * that includes a requested member
+ */
+#define CAN_REQUIRED_SIZE(struct_type, member) \
+ (offsetof(typeof(struct_type), member) + \
+ sizeof(((typeof(struct_type) *)(NULL))->member))
+
/* function prototypes for the CAN networklayer core (af_can.c) */
extern int can_proto_register(const struct can_proto *cp);
@@ -56,6 +58,6 @@ extern void can_rx_unregister(struct net *net, struct net_device *dev,
void *data);
extern int can_send(struct sk_buff *skb, int loop);
-extern int can_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg);
+void can_sock_destruct(struct sock *sk);
#endif /* !_CAN_CORE_H */
diff --git a/include/linux/can/dev.h b/include/linux/can/dev.h
index 141b05aade81..982ba245eb41 100644
--- a/include/linux/can/dev.h
+++ b/include/linux/can/dev.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* linux/can/dev.h
*
@@ -14,9 +15,12 @@
#define _CAN_DEV_H
#include <linux/can.h>
+#include <linux/can/bittiming.h>
#include <linux/can/error.h>
-#include <linux/can/led.h>
+#include <linux/can/length.h>
#include <linux/can/netlink.h>
+#include <linux/can/skb.h>
+#include <linux/ethtool.h>
#include <linux/netdevice.h>
/*
@@ -28,6 +32,12 @@ enum can_mode {
CAN_MODE_SLEEP
};
+enum can_termination_gpio {
+ CAN_TERMINATION_GPIO_DISABLED = 0,
+ CAN_TERMINATION_GPIO_ENABLED,
+ CAN_TERMINATION_GPIO_MAX,
+};
+
/*
* CAN common private data
*/
@@ -35,24 +45,33 @@ struct can_priv {
struct net_device *dev;
struct can_device_stats can_stats;
- struct can_bittiming bittiming, data_bittiming;
const struct can_bittiming_const *bittiming_const,
*data_bittiming_const;
- const u16 *termination_const;
- unsigned int termination_const_cnt;
- u16 termination;
- const u32 *bitrate_const;
+ struct can_bittiming bittiming, data_bittiming;
+ const struct can_tdc_const *tdc_const;
+ struct can_tdc tdc;
+
unsigned int bitrate_const_cnt;
+ const u32 *bitrate_const;
const u32 *data_bitrate_const;
unsigned int data_bitrate_const_cnt;
+ u32 bitrate_max;
struct can_clock clock;
+ unsigned int termination_const_cnt;
+ const u16 *termination_const;
+ u16 termination;
+ struct gpio_desc *termination_gpio;
+ u16 termination_gpio_ohms[CAN_TERMINATION_GPIO_MAX];
+
+ unsigned int echo_skb_max;
+ struct sk_buff **echo_skb;
+
enum can_state state;
/* CAN controller features - see include/uapi/linux/can/netlink.h */
u32 ctrlmode; /* current options setting */
u32 ctrlmode_supported; /* options that can be modified by netlink */
- u32 ctrlmode_static; /* static enabled options for driver/hardware */
int restart_ms;
struct delayed_work restart_work;
@@ -65,83 +84,98 @@ struct can_priv {
enum can_state *state);
int (*do_get_berr_counter)(const struct net_device *dev,
struct can_berr_counter *bec);
-
- unsigned int echo_skb_max;
- struct sk_buff **echo_skb;
-
-#ifdef CONFIG_CAN_LEDS
- struct led_trigger *tx_led_trig;
- char tx_led_trig_name[CAN_LED_NAME_SZ];
- struct led_trigger *rx_led_trig;
- char rx_led_trig_name[CAN_LED_NAME_SZ];
- struct led_trigger *rxtx_led_trig;
- char rxtx_led_trig_name[CAN_LED_NAME_SZ];
-#endif
+ int (*do_get_auto_tdcv)(const struct net_device *dev, u32 *tdcv);
};
+static inline bool can_tdc_is_enabled(const struct can_priv *priv)
+{
+ return !!(priv->ctrlmode & CAN_CTRLMODE_TDC_MASK);
+}
+
/*
- * get_can_dlc(value) - helper macro to cast a given data length code (dlc)
- * to __u8 and ensure the dlc value to be max. 8 bytes.
+ * can_get_relative_tdco() - TDCO relative to the sample point
+ *
+ * struct can_tdc::tdco represents the absolute offset from TDCV. Some
+ * controllers use instead an offset relative to the Sample Point (SP)
+ * such that:
+ *
+ * SSP = TDCV + absolute TDCO
+ * = TDCV + SP + relative TDCO
+ *
+ * -+----------- one bit ----------+-- TX pin
+ * |<--- Sample Point --->|
*
- * To be used in the CAN netdriver receive path to ensure conformance with
- * ISO 11898-1 Chapter 8.4.2.3 (DLC field)
+ * --+----------- one bit ----------+-- RX pin
+ * |<-------- TDCV -------->|
+ * |<------------------------>| absolute TDCO
+ * |<--- Sample Point --->|
+ * | |<->| relative TDCO
+ * |<------------- Secondary Sample Point ------------>|
*/
-#define get_can_dlc(i) (min_t(__u8, (i), CAN_MAX_DLC))
-#define get_canfd_dlc(i) (min_t(__u8, (i), CANFD_MAX_DLC))
-
-/* Drop a given socketbuffer if it does not contain a valid CAN frame. */
-static inline bool can_dropped_invalid_skb(struct net_device *dev,
- struct sk_buff *skb)
+static inline s32 can_get_relative_tdco(const struct can_priv *priv)
{
- const struct canfd_frame *cfd = (struct canfd_frame *)skb->data;
-
- if (skb->protocol == htons(ETH_P_CAN)) {
- if (unlikely(skb->len != CAN_MTU ||
- cfd->len > CAN_MAX_DLEN))
- goto inval_skb;
- } else if (skb->protocol == htons(ETH_P_CANFD)) {
- if (unlikely(skb->len != CANFD_MTU ||
- cfd->len > CANFD_MAX_DLEN))
- goto inval_skb;
- } else
- goto inval_skb;
-
- return false;
-
-inval_skb:
- kfree_skb(skb);
- dev->stats.tx_dropped++;
- return true;
-}
+ const struct can_bittiming *dbt = &priv->data_bittiming;
+ s32 sample_point_in_tc = (CAN_SYNC_SEG + dbt->prop_seg +
+ dbt->phase_seg1) * dbt->brp;
-static inline bool can_is_canfd_skb(const struct sk_buff *skb)
-{
- /* the CAN specific type of skb is identified by its data length */
- return skb->len == CANFD_MTU;
+ return (s32)priv->tdc.tdco - sample_point_in_tc;
}
/* helper to define static CAN controller features at device creation time */
-static inline void can_set_static_ctrlmode(struct net_device *dev,
- u32 static_mode)
+static inline int __must_check can_set_static_ctrlmode(struct net_device *dev,
+ u32 static_mode)
{
struct can_priv *priv = netdev_priv(dev);
/* alloc_candev() succeeded => netdev_priv() is valid at this point */
+ if (priv->ctrlmode_supported & static_mode) {
+ netdev_warn(dev,
+ "Controller features can not be supported and static at the same time\n");
+ return -EINVAL;
+ }
priv->ctrlmode = static_mode;
- priv->ctrlmode_static = static_mode;
/* override MTU which was set by default in can_setup()? */
if (static_mode & CAN_CTRLMODE_FD)
dev->mtu = CANFD_MTU;
+
+ return 0;
+}
+
+static inline u32 can_get_static_ctrlmode(struct can_priv *priv)
+{
+ return priv->ctrlmode & ~priv->ctrlmode_supported;
}
-/* get data length from can_dlc with sanitized can_dlc */
-u8 can_dlc2len(u8 can_dlc);
+static inline bool can_is_canxl_dev_mtu(unsigned int mtu)
+{
+ return (mtu >= CANXL_MIN_MTU && mtu <= CANXL_MAX_MTU);
+}
-/* map the sanitized data length to an appropriate data length code */
-u8 can_len2dlc(u8 len);
+/* drop skb if it does not contain a valid CAN frame for sending */
+static inline bool can_dev_dropped_skb(struct net_device *dev, struct sk_buff *skb)
+{
+ struct can_priv *priv = netdev_priv(dev);
-struct net_device *alloc_candev(int sizeof_priv, unsigned int echo_skb_max);
+ if (priv->ctrlmode & CAN_CTRLMODE_LISTENONLY) {
+ netdev_info_once(dev,
+ "interface in listen only mode, dropping skb\n");
+ kfree_skb(skb);
+ dev->stats.tx_dropped++;
+ return true;
+ }
+
+ return can_dropped_invalid_skb(dev, skb);
+}
+
+void can_setup(struct net_device *dev);
+
+struct net_device *alloc_candev_mqs(int sizeof_priv, unsigned int echo_skb_max,
+ unsigned int txqs, unsigned int rxqs);
+#define alloc_candev(sizeof_priv, echo_skb_max) \
+ alloc_candev_mqs(sizeof_priv, echo_skb_max, 1, 1)
+#define alloc_candev_mq(sizeof_priv, echo_skb_max, count) \
+ alloc_candev_mqs(sizeof_priv, echo_skb_max, count, count)
void free_candev(struct net_device *dev);
/* a candev safe wrapper around netdev_priv */
@@ -150,6 +184,9 @@ struct can_priv *safe_candev_priv(struct net_device *dev);
int open_candev(struct net_device *dev);
void close_candev(struct net_device *dev);
int can_change_mtu(struct net_device *dev, int new_mtu);
+int can_eth_ioctl_hwts(struct net_device *netdev, struct ifreq *ifr, int cmd);
+int can_ethtool_op_get_ts_info_hwts(struct net_device *dev,
+ struct ethtool_ts_info *info);
int register_candev(struct net_device *dev);
void unregister_candev(struct net_device *dev);
@@ -157,18 +194,18 @@ void unregister_candev(struct net_device *dev);
int can_restart_now(struct net_device *dev);
void can_bus_off(struct net_device *dev);
+const char *can_get_state_str(const enum can_state state);
void can_change_state(struct net_device *dev, struct can_frame *cf,
enum can_state tx_state, enum can_state rx_state);
-void can_put_echo_skb(struct sk_buff *skb, struct net_device *dev,
- unsigned int idx);
-unsigned int can_get_echo_skb(struct net_device *dev, unsigned int idx);
-void can_free_echo_skb(struct net_device *dev, unsigned int idx);
+#ifdef CONFIG_OF
+void of_can_transceiver(struct net_device *dev);
+#else
+static inline void of_can_transceiver(struct net_device *dev) { }
+#endif
-struct sk_buff *alloc_can_skb(struct net_device *dev, struct can_frame **cf);
-struct sk_buff *alloc_canfd_skb(struct net_device *dev,
- struct canfd_frame **cfd);
-struct sk_buff *alloc_can_err_skb(struct net_device *dev,
- struct can_frame **cf);
+extern struct rtnl_link_ops can_link_ops;
+int can_netlink_register(void);
+void can_netlink_unregister(void);
#endif /* !_CAN_DEV_H */
diff --git a/include/linux/can/dev/peak_canfd.h b/include/linux/can/dev/peak_canfd.h
index 46dceef2cfa6..f38772fd0c07 100644
--- a/include/linux/can/dev/peak_canfd.h
+++ b/include/linux/can/dev/peak_canfd.h
@@ -1,17 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* CAN driver for PEAK System micro-CAN based adapters
*
* Copyright (C) 2003-2011 PEAK System-Technik GmbH
* Copyright (C) 2011-2013 Stephane Grosjean <s.grosjean@peak-system.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published
- * by the Free Software Foundation; version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
*/
#ifndef PUCAN_H
#define PUCAN_H
@@ -197,7 +189,7 @@ struct __packed pucan_rx_msg {
u8 client;
__le16 flags;
__le32 can_id;
- u8 d[0];
+ u8 d[];
};
/* uCAN error types */
@@ -274,7 +266,7 @@ struct __packed pucan_tx_msg {
u8 client;
__le16 flags;
__le32 can_id;
- u8 d[0];
+ u8 d[];
};
/* build the cmd opcode_channel field with respect to the correct endianness */
@@ -290,7 +282,7 @@ static inline int pucan_msg_get_channel(const struct pucan_rx_msg *msg)
}
/* return the dlc value from any received message channel_dlc field */
-static inline int pucan_msg_get_dlc(const struct pucan_rx_msg *msg)
+static inline u8 pucan_msg_get_dlc(const struct pucan_rx_msg *msg)
{
return msg->channel_dlc >> 4;
}
diff --git a/include/linux/can/led.h b/include/linux/can/led.h
deleted file mode 100644
index 2746f7c2f87d..000000000000
--- a/include/linux/can/led.h
+++ /dev/null
@@ -1,54 +0,0 @@
-/*
- * Copyright 2012, Fabio Baltieri <fabio.baltieri@gmail.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#ifndef _CAN_LED_H
-#define _CAN_LED_H
-
-#include <linux/if.h>
-#include <linux/leds.h>
-#include <linux/netdevice.h>
-
-enum can_led_event {
- CAN_LED_EVENT_OPEN,
- CAN_LED_EVENT_STOP,
- CAN_LED_EVENT_TX,
- CAN_LED_EVENT_RX,
-};
-
-#ifdef CONFIG_CAN_LEDS
-
-/* keep space for interface name + "-tx"/"-rx"/"-rxtx"
- * suffix and null terminator
- */
-#define CAN_LED_NAME_SZ (IFNAMSIZ + 6)
-
-void can_led_event(struct net_device *netdev, enum can_led_event event);
-void devm_can_led_init(struct net_device *netdev);
-int __init can_led_notifier_init(void);
-void __exit can_led_notifier_exit(void);
-
-#else
-
-static inline void can_led_event(struct net_device *netdev,
- enum can_led_event event)
-{
-}
-static inline void devm_can_led_init(struct net_device *netdev)
-{
-}
-static inline int can_led_notifier_init(void)
-{
- return 0;
-}
-static inline void can_led_notifier_exit(void)
-{
-}
-
-#endif
-
-#endif /* !_CAN_LED_H */
diff --git a/include/linux/can/length.h b/include/linux/can/length.h
new file mode 100644
index 000000000000..6995092b774e
--- /dev/null
+++ b/include/linux/can/length.h
@@ -0,0 +1,174 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2020 Oliver Hartkopp <socketcan@hartkopp.net>
+ * Copyright (C) 2020 Marc Kleine-Budde <kernel@pengutronix.de>
+ */
+
+#ifndef _CAN_LENGTH_H
+#define _CAN_LENGTH_H
+
+/*
+ * Size of a Classical CAN Standard Frame
+ *
+ * Name of Field Bits
+ * ---------------------------------------------------------
+ * Start-of-frame 1
+ * Identifier 11
+ * Remote transmission request (RTR) 1
+ * Identifier extension bit (IDE) 1
+ * Reserved bit (r0) 1
+ * Data length code (DLC) 4
+ * Data field 0...64
+ * CRC 15
+ * CRC delimiter 1
+ * ACK slot 1
+ * ACK delimiter 1
+ * End-of-frame (EOF) 7
+ * Inter frame spacing 3
+ *
+ * rounded up and ignoring bitstuffing
+ */
+#define CAN_FRAME_OVERHEAD_SFF DIV_ROUND_UP(47, 8)
+
+/*
+ * Size of a Classical CAN Extended Frame
+ *
+ * Name of Field Bits
+ * ---------------------------------------------------------
+ * Start-of-frame 1
+ * Identifier A 11
+ * Substitute remote request (SRR) 1
+ * Identifier extension bit (IDE) 1
+ * Identifier B 18
+ * Remote transmission request (RTR) 1
+ * Reserved bits (r1, r0) 2
+ * Data length code (DLC) 4
+ * Data field 0...64
+ * CRC 15
+ * CRC delimiter 1
+ * ACK slot 1
+ * ACK delimiter 1
+ * End-of-frame (EOF) 7
+ * Inter frame spacing 3
+ *
+ * rounded up and ignoring bitstuffing
+ */
+#define CAN_FRAME_OVERHEAD_EFF DIV_ROUND_UP(67, 8)
+
+/*
+ * Size of a CAN-FD Standard Frame
+ *
+ * Name of Field Bits
+ * ---------------------------------------------------------
+ * Start-of-frame 1
+ * Identifier 11
+ * Reserved bit (r1) 1
+ * Identifier extension bit (IDE) 1
+ * Flexible data rate format (FDF) 1
+ * Reserved bit (r0) 1
+ * Bit Rate Switch (BRS) 1
+ * Error Status Indicator (ESI) 1
+ * Data length code (DLC) 4
+ * Data field 0...512
+ * Stuff Bit Count (SBC) 0...16: 4 20...64:5
+ * CRC 0...16: 17 20...64:21
+ * CRC delimiter (CD) 1
+ * ACK slot (AS) 1
+ * ACK delimiter (AD) 1
+ * End-of-frame (EOF) 7
+ * Inter frame spacing 3
+ *
+ * assuming CRC21, rounded up and ignoring bitstuffing
+ */
+#define CANFD_FRAME_OVERHEAD_SFF DIV_ROUND_UP(61, 8)
+
+/*
+ * Size of a CAN-FD Extended Frame
+ *
+ * Name of Field Bits
+ * ---------------------------------------------------------
+ * Start-of-frame 1
+ * Identifier A 11
+ * Substitute remote request (SRR) 1
+ * Identifier extension bit (IDE) 1
+ * Identifier B 18
+ * Reserved bit (r1) 1
+ * Flexible data rate format (FDF) 1
+ * Reserved bit (r0) 1
+ * Bit Rate Switch (BRS) 1
+ * Error Status Indicator (ESI) 1
+ * Data length code (DLC) 4
+ * Data field 0...512
+ * Stuff Bit Count (SBC) 0...16: 4 20...64:5
+ * CRC 0...16: 17 20...64:21
+ * CRC delimiter (CD) 1
+ * ACK slot (AS) 1
+ * ACK delimiter (AD) 1
+ * End-of-frame (EOF) 7
+ * Inter frame spacing 3
+ *
+ * assuming CRC21, rounded up and ignoring bitstuffing
+ */
+#define CANFD_FRAME_OVERHEAD_EFF DIV_ROUND_UP(80, 8)
+
+/*
+ * Maximum size of a Classical CAN frame
+ * (rounded up and ignoring bitstuffing)
+ */
+#define CAN_FRAME_LEN_MAX (CAN_FRAME_OVERHEAD_EFF + CAN_MAX_DLEN)
+
+/*
+ * Maximum size of a CAN-FD frame
+ * (rounded up and ignoring bitstuffing)
+ */
+#define CANFD_FRAME_LEN_MAX (CANFD_FRAME_OVERHEAD_EFF + CANFD_MAX_DLEN)
+
+/*
+ * can_cc_dlc2len(value) - convert a given data length code (dlc) of a
+ * Classical CAN frame into a valid data length of max. 8 bytes.
+ *
+ * To be used in the CAN netdriver receive path to ensure conformance with
+ * ISO 11898-1 Chapter 8.4.2.3 (DLC field)
+ */
+#define can_cc_dlc2len(dlc) (min_t(u8, (dlc), CAN_MAX_DLEN))
+
+/* helper to get the data length code (DLC) for Classical CAN raw DLC access */
+static inline u8 can_get_cc_dlc(const struct can_frame *cf, const u32 ctrlmode)
+{
+ /* return len8_dlc as dlc value only if all conditions apply */
+ if ((ctrlmode & CAN_CTRLMODE_CC_LEN8_DLC) &&
+ (cf->len == CAN_MAX_DLEN) &&
+ (cf->len8_dlc > CAN_MAX_DLEN && cf->len8_dlc <= CAN_MAX_RAW_DLC))
+ return cf->len8_dlc;
+
+ /* return the payload length as dlc value */
+ return cf->len;
+}
+
+/* helper to set len and len8_dlc value for Classical CAN raw DLC access */
+static inline void can_frame_set_cc_len(struct can_frame *cf, const u8 dlc,
+ const u32 ctrlmode)
+{
+ /* the caller already ensured that dlc is a value from 0 .. 15 */
+ if (ctrlmode & CAN_CTRLMODE_CC_LEN8_DLC && dlc > CAN_MAX_DLEN)
+ cf->len8_dlc = dlc;
+
+ /* limit the payload length 'len' to CAN_MAX_DLEN */
+ cf->len = can_cc_dlc2len(dlc);
+}
+
+/* get data length from raw data length code (DLC) */
+u8 can_fd_dlc2len(u8 dlc);
+
+/* map the sanitized data length to an appropriate data length code */
+u8 can_fd_len2dlc(u8 len);
+
+/* calculate the CAN Frame length in bytes of a given skb */
+unsigned int can_skb_get_frame_len(const struct sk_buff *skb);
+
+/* map the data length to an appropriate data link layer length */
+static inline u8 canfd_sanitize_len(u8 len)
+{
+ return can_fd_dlc2len(can_fd_len2dlc(len));
+}
+
+#endif /* !_CAN_LENGTH_H */
diff --git a/include/linux/can/platform/cc770.h b/include/linux/can/platform/cc770.h
index 78b2d44f04cf..9587d6882906 100644
--- a/include/linux/can/platform/cc770.h
+++ b/include/linux/can/platform/cc770.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _CAN_PLATFORM_CC770_H
#define _CAN_PLATFORM_CC770_H
diff --git a/include/linux/can/platform/flexcan.h b/include/linux/can/platform/flexcan.h
new file mode 100644
index 000000000000..1b536fb999de
--- /dev/null
+++ b/include/linux/can/platform/flexcan.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2021 Angelo Dureghello <angelo@kernel-space.org>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAN_PLATFORM_FLEXCAN_H
+#define _CAN_PLATFORM_FLEXCAN_H
+
+struct flexcan_platform_data {
+ u32 clock_frequency;
+ u8 clk_src;
+};
+
+#endif /* _CAN_PLATFORM_FLEXCAN_H */
diff --git a/include/linux/can/platform/mcp251x.h b/include/linux/can/platform/mcp251x.h
deleted file mode 100644
index d44fcae274ff..000000000000
--- a/include/linux/can/platform/mcp251x.h
+++ /dev/null
@@ -1,21 +0,0 @@
-#ifndef _CAN_PLATFORM_MCP251X_H
-#define _CAN_PLATFORM_MCP251X_H
-
-/*
- *
- * CAN bus driver for Microchip 251x CAN Controller with SPI Interface
- *
- */
-
-#include <linux/spi/spi.h>
-
-/*
- * struct mcp251x_platform_data - MCP251X SPI CAN controller platform data
- * @oscillator_frequency: - oscillator frequency in Hz
- */
-
-struct mcp251x_platform_data {
- unsigned long oscillator_frequency;
-};
-
-#endif /* !_CAN_PLATFORM_MCP251X_H */
diff --git a/include/linux/can/platform/rcar_can.h b/include/linux/can/platform/rcar_can.h
deleted file mode 100644
index 0f4a2f3df504..000000000000
--- a/include/linux/can/platform/rcar_can.h
+++ /dev/null
@@ -1,17 +0,0 @@
-#ifndef _CAN_PLATFORM_RCAR_CAN_H_
-#define _CAN_PLATFORM_RCAR_CAN_H_
-
-#include <linux/types.h>
-
-/* Clock Select Register settings */
-enum CLKR {
- CLKR_CLKP1 = 0, /* Peripheral clock (clkp1) */
- CLKR_CLKP2 = 1, /* Peripheral clock (clkp2) */
- CLKR_CLKEXT = 3 /* Externally input clock */
-};
-
-struct rcar_can_platform_data {
- enum CLKR clock_select; /* Clock source select */
-};
-
-#endif /* !_CAN_PLATFORM_RCAR_CAN_H_ */
diff --git a/include/linux/can/platform/sja1000.h b/include/linux/can/platform/sja1000.h
index 93570b61ec6c..6a869682c120 100644
--- a/include/linux/can/platform/sja1000.h
+++ b/include/linux/can/platform/sja1000.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _CAN_PLATFORM_SJA1000_H
#define _CAN_PLATFORM_SJA1000_H
@@ -13,7 +14,7 @@
#define OCR_MODE_TEST 0x01
#define OCR_MODE_NORMAL 0x02
#define OCR_MODE_CLOCK 0x03
-#define OCR_MODE_MASK 0x07
+#define OCR_MODE_MASK 0x03
#define OCR_TX0_INVERT 0x04
#define OCR_TX0_PULLDOWN 0x08
#define OCR_TX0_PULLUP 0x10
diff --git a/include/linux/can/rx-offload.h b/include/linux/can/rx-offload.h
index cb31683bbe15..c205c51d79c9 100644
--- a/include/linux/can/rx-offload.h
+++ b/include/linux/can/rx-offload.h
@@ -1,17 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* linux/can/rx-offload.h
*
* Copyright (c) 2014 David Jander, Protonic Holland
* Copyright (c) 2014-2017 Pengutronix, Marc Kleine-Budde <kernel@pengutronix.de>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the version 2 of the GNU General Public License
- * as published by the Free Software Foundation
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#ifndef _CAN_RX_OFFLOAD_H
@@ -23,10 +15,12 @@
struct can_rx_offload {
struct net_device *dev;
- unsigned int (*mailbox_read)(struct can_rx_offload *offload, struct can_frame *cf,
- u32 *timestamp, unsigned int mb);
+ struct sk_buff *(*mailbox_read)(struct can_rx_offload *offload,
+ unsigned int mb, u32 *timestamp,
+ bool drop);
struct sk_buff_head skb_queue;
+ struct sk_buff_head skb_irq_queue;
u32 skb_queue_len_max;
unsigned int mb_first;
@@ -37,20 +31,29 @@ struct can_rx_offload {
bool inc;
};
-int can_rx_offload_add_timestamp(struct net_device *dev, struct can_rx_offload *offload);
-int can_rx_offload_add_fifo(struct net_device *dev, struct can_rx_offload *offload, unsigned int weight);
-int can_rx_offload_irq_offload_timestamp(struct can_rx_offload *offload, u64 reg);
+int can_rx_offload_add_timestamp(struct net_device *dev,
+ struct can_rx_offload *offload);
+int can_rx_offload_add_fifo(struct net_device *dev,
+ struct can_rx_offload *offload,
+ unsigned int weight);
+int can_rx_offload_add_manual(struct net_device *dev,
+ struct can_rx_offload *offload,
+ unsigned int weight);
+int can_rx_offload_irq_offload_timestamp(struct can_rx_offload *offload,
+ u64 reg);
int can_rx_offload_irq_offload_fifo(struct can_rx_offload *offload);
-int can_rx_offload_irq_queue_err_skb(struct can_rx_offload *offload, struct sk_buff *skb);
-void can_rx_offload_reset(struct can_rx_offload *offload);
+int can_rx_offload_queue_timestamp(struct can_rx_offload *offload,
+ struct sk_buff *skb, u32 timestamp);
+unsigned int can_rx_offload_get_echo_skb(struct can_rx_offload *offload,
+ unsigned int idx, u32 timestamp,
+ unsigned int *frame_len_ptr);
+int can_rx_offload_queue_tail(struct can_rx_offload *offload,
+ struct sk_buff *skb);
+void can_rx_offload_irq_finish(struct can_rx_offload *offload);
+void can_rx_offload_threaded_irq_finish(struct can_rx_offload *offload);
void can_rx_offload_del(struct can_rx_offload *offload);
void can_rx_offload_enable(struct can_rx_offload *offload);
-static inline void can_rx_offload_schedule(struct can_rx_offload *offload)
-{
- napi_schedule(&offload->napi);
-}
-
static inline void can_rx_offload_disable(struct can_rx_offload *offload)
{
napi_disable(&offload->napi);
diff --git a/include/linux/can/skb.h b/include/linux/can/skb.h
index 51bb6532785c..1abc25a8d144 100644
--- a/include/linux/can/skb.h
+++ b/include/linux/can/skb.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) */
/*
* linux/can/skb.h
*
@@ -15,6 +16,27 @@
#include <linux/can.h>
#include <net/sock.h>
+void can_flush_echo_skb(struct net_device *dev);
+int can_put_echo_skb(struct sk_buff *skb, struct net_device *dev,
+ unsigned int idx, unsigned int frame_len);
+struct sk_buff *__can_get_echo_skb(struct net_device *dev, unsigned int idx,
+ unsigned int *len_ptr,
+ unsigned int *frame_len_ptr);
+unsigned int __must_check can_get_echo_skb(struct net_device *dev,
+ unsigned int idx,
+ unsigned int *frame_len_ptr);
+void can_free_echo_skb(struct net_device *dev, unsigned int idx,
+ unsigned int *frame_len_ptr);
+struct sk_buff *alloc_can_skb(struct net_device *dev, struct can_frame **cf);
+struct sk_buff *alloc_canfd_skb(struct net_device *dev,
+ struct canfd_frame **cfd);
+struct sk_buff *alloc_canxl_skb(struct net_device *dev,
+ struct canxl_frame **cxl,
+ unsigned int data_len);
+struct sk_buff *alloc_can_err_skb(struct net_device *dev,
+ struct can_frame **cf);
+bool can_dropped_invalid_skb(struct net_device *dev, struct sk_buff *skb);
+
/*
* The struct can_skb_priv is used to transport additional information along
* with the stored struct can(fd)_frame that can not be contained in existing
@@ -28,12 +50,14 @@
* struct can_skb_priv - private additional data inside CAN sk_buffs
* @ifindex: ifindex of the first interface the CAN frame appeared on
* @skbcnt: atomic counter to have an unique id together with skb pointer
+ * @frame_len: length of CAN frame in data link layer
* @cf: align to the following CAN frame at skb->data
*/
struct can_skb_priv {
int ifindex;
int skbcnt;
- struct can_frame cf[0];
+ unsigned int frame_len;
+ struct can_frame cf[];
};
static inline struct can_skb_priv *can_skb_prv(struct sk_buff *skb)
@@ -48,8 +72,12 @@ static inline void can_skb_reserve(struct sk_buff *skb)
static inline void can_skb_set_owner(struct sk_buff *skb, struct sock *sk)
{
- if (sk) {
- sock_hold(sk);
+ /* If the socket has already been closed by user space, the
+ * refcount may already be 0 (and the socket will be freed
+ * after the last TX skb has been freed). So only increase
+ * socket refcount if the refcount is > 0.
+ */
+ if (sk && refcount_inc_not_zero(&sk->sk_refcnt)) {
skb->destructor = sock_efree;
skb->sk = sk;
}
@@ -60,21 +88,72 @@ static inline void can_skb_set_owner(struct sk_buff *skb, struct sock *sk)
*/
static inline struct sk_buff *can_create_echo_skb(struct sk_buff *skb)
{
- if (skb_shared(skb)) {
- struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC);
-
- if (likely(nskb)) {
- can_skb_set_owner(nskb, skb->sk);
- consume_skb(skb);
- return nskb;
- } else {
- kfree_skb(skb);
- return NULL;
- }
+ struct sk_buff *nskb;
+
+ nskb = skb_clone(skb, GFP_ATOMIC);
+ if (unlikely(!nskb)) {
+ kfree_skb(skb);
+ return NULL;
}
- /* we can assume to have an unshared skb with proper owner */
- return skb;
+ can_skb_set_owner(nskb, skb->sk);
+ consume_skb(skb);
+ return nskb;
+}
+
+static inline bool can_is_can_skb(const struct sk_buff *skb)
+{
+ struct can_frame *cf = (struct can_frame *)skb->data;
+
+ /* the CAN specific type of skb is identified by its data length */
+ return (skb->len == CAN_MTU && cf->len <= CAN_MAX_DLEN);
+}
+
+static inline bool can_is_canfd_skb(const struct sk_buff *skb)
+{
+ struct canfd_frame *cfd = (struct canfd_frame *)skb->data;
+
+ /* the CAN specific type of skb is identified by its data length */
+ return (skb->len == CANFD_MTU && cfd->len <= CANFD_MAX_DLEN);
+}
+
+static inline bool can_is_canxl_skb(const struct sk_buff *skb)
+{
+ const struct canxl_frame *cxl = (struct canxl_frame *)skb->data;
+
+ if (skb->len < CANXL_HDR_SIZE + CANXL_MIN_DLEN || skb->len > CANXL_MTU)
+ return false;
+
+ /* this also checks valid CAN XL data length boundaries */
+ if (skb->len != CANXL_HDR_SIZE + cxl->len)
+ return false;
+
+ return cxl->flags & CANXL_XLF;
+}
+
+/* get length element value from can[|fd|xl]_frame structure */
+static inline unsigned int can_skb_get_len_val(struct sk_buff *skb)
+{
+ const struct canxl_frame *cxl = (struct canxl_frame *)skb->data;
+ const struct canfd_frame *cfd = (struct canfd_frame *)skb->data;
+
+ if (can_is_canxl_skb(skb))
+ return cxl->len;
+
+ return cfd->len;
+}
+
+/* get needed data length inside CAN frame for all frame types (RTR aware) */
+static inline unsigned int can_skb_get_data_len(struct sk_buff *skb)
+{
+ unsigned int len = can_skb_get_len_val(skb);
+ const struct can_frame *cf = (struct can_frame *)skb->data;
+
+ /* RTR frames have an actual length of zero */
+ if (can_is_can_skb(skb) && cf->can_id & CAN_RTR_FLAG)
+ return 0;
+
+ return len;
}
#endif /* !_CAN_SKB_H */
diff --git a/include/linux/capability.h b/include/linux/capability.h
index 6ffb67e10c06..0c356a517991 100644
--- a/include/linux/capability.h
+++ b/include/linux/capability.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* This is <linux/capability.h>
*
@@ -13,20 +14,19 @@
#define _LINUX_CAPABILITY_H
#include <uapi/linux/capability.h>
-
+#include <linux/uidgid.h>
+#include <linux/bits.h>
#define _KERNEL_CAPABILITY_VERSION _LINUX_CAPABILITY_VERSION_3
-#define _KERNEL_CAPABILITY_U32S _LINUX_CAPABILITY_U32S_3
extern int file_caps_enabled;
-typedef struct kernel_cap_struct {
- __u32 cap[_KERNEL_CAPABILITY_U32S];
-} kernel_cap_t;
+typedef struct { u64 val; } kernel_cap_t;
-/* exact same as vfs_cap_data but in cpu endian and always filled completely */
+/* same as vfs_ns_cap_data but in cpu endian and always filled completely */
struct cpu_vfs_cap_data {
__u32 magic_etc;
+ kuid_t rootid;
kernel_cap_t permitted;
kernel_cap_t inheritable;
};
@@ -34,22 +34,12 @@ struct cpu_vfs_cap_data {
#define _USER_CAP_HEADER_SIZE (sizeof(struct __user_cap_header_struct))
#define _KERNEL_CAP_T_SIZE (sizeof(kernel_cap_t))
-
struct file;
struct inode;
struct dentry;
struct task_struct;
struct user_namespace;
-
-extern const kernel_cap_t __cap_empty_set;
-extern const kernel_cap_t __cap_init_eff_set;
-
-/*
- * Internal kernel functions only
- */
-
-#define CAP_FOR_EACH_U32(__capi) \
- for (__capi = 0; __capi < _KERNEL_CAPABILITY_U32S; ++__capi)
+struct mnt_idmap;
/*
* CAP_FS_MASK and CAP_NFSD_MASKS:
@@ -64,94 +54,52 @@ extern const kernel_cap_t __cap_init_eff_set;
* 2. The security.* and trusted.* xattrs are fs-related MAC permissions
*/
-# define CAP_FS_MASK_B0 (CAP_TO_MASK(CAP_CHOWN) \
- | CAP_TO_MASK(CAP_MKNOD) \
- | CAP_TO_MASK(CAP_DAC_OVERRIDE) \
- | CAP_TO_MASK(CAP_DAC_READ_SEARCH) \
- | CAP_TO_MASK(CAP_FOWNER) \
- | CAP_TO_MASK(CAP_FSETID))
-
-# define CAP_FS_MASK_B1 (CAP_TO_MASK(CAP_MAC_OVERRIDE))
-
-#if _KERNEL_CAPABILITY_U32S != 2
-# error Fix up hand-coded capability macro initializers
-#else /* HAND-CODED capability initializers */
-
-#define CAP_LAST_U32 ((_KERNEL_CAPABILITY_U32S) - 1)
-#define CAP_LAST_U32_VALID_MASK (CAP_TO_MASK(CAP_LAST_CAP + 1) -1)
-
-# define CAP_EMPTY_SET ((kernel_cap_t){{ 0, 0 }})
-# define CAP_FULL_SET ((kernel_cap_t){{ ~0, CAP_LAST_U32_VALID_MASK }})
-# define CAP_FS_SET ((kernel_cap_t){{ CAP_FS_MASK_B0 \
- | CAP_TO_MASK(CAP_LINUX_IMMUTABLE), \
- CAP_FS_MASK_B1 } })
-# define CAP_NFSD_SET ((kernel_cap_t){{ CAP_FS_MASK_B0 \
- | CAP_TO_MASK(CAP_SYS_RESOURCE), \
- CAP_FS_MASK_B1 } })
-
-#endif /* _KERNEL_CAPABILITY_U32S != 2 */
-
-# define cap_clear(c) do { (c) = __cap_empty_set; } while (0)
-
-#define cap_raise(c, flag) ((c).cap[CAP_TO_INDEX(flag)] |= CAP_TO_MASK(flag))
-#define cap_lower(c, flag) ((c).cap[CAP_TO_INDEX(flag)] &= ~CAP_TO_MASK(flag))
-#define cap_raised(c, flag) ((c).cap[CAP_TO_INDEX(flag)] & CAP_TO_MASK(flag))
-
-#define CAP_BOP_ALL(c, a, b, OP) \
-do { \
- unsigned __capi; \
- CAP_FOR_EACH_U32(__capi) { \
- c.cap[__capi] = a.cap[__capi] OP b.cap[__capi]; \
- } \
-} while (0)
-
-#define CAP_UOP_ALL(c, a, OP) \
-do { \
- unsigned __capi; \
- CAP_FOR_EACH_U32(__capi) { \
- c.cap[__capi] = OP a.cap[__capi]; \
- } \
-} while (0)
+# define CAP_FS_MASK (BIT_ULL(CAP_CHOWN) \
+ | BIT_ULL(CAP_MKNOD) \
+ | BIT_ULL(CAP_DAC_OVERRIDE) \
+ | BIT_ULL(CAP_DAC_READ_SEARCH) \
+ | BIT_ULL(CAP_FOWNER) \
+ | BIT_ULL(CAP_FSETID) \
+ | BIT_ULL(CAP_MAC_OVERRIDE))
+#define CAP_VALID_MASK (BIT_ULL(CAP_LAST_CAP+1)-1)
+
+# define CAP_EMPTY_SET ((kernel_cap_t) { 0 })
+# define CAP_FULL_SET ((kernel_cap_t) { CAP_VALID_MASK })
+# define CAP_FS_SET ((kernel_cap_t) { CAP_FS_MASK | BIT_ULL(CAP_LINUX_IMMUTABLE) })
+# define CAP_NFSD_SET ((kernel_cap_t) { CAP_FS_MASK | BIT_ULL(CAP_SYS_RESOURCE) })
+
+# define cap_clear(c) do { (c).val = 0; } while (0)
+
+#define cap_raise(c, flag) ((c).val |= BIT_ULL(flag))
+#define cap_lower(c, flag) ((c).val &= ~BIT_ULL(flag))
+#define cap_raised(c, flag) (((c).val & BIT_ULL(flag)) != 0)
static inline kernel_cap_t cap_combine(const kernel_cap_t a,
const kernel_cap_t b)
{
- kernel_cap_t dest;
- CAP_BOP_ALL(dest, a, b, |);
- return dest;
+ return (kernel_cap_t) { a.val | b.val };
}
static inline kernel_cap_t cap_intersect(const kernel_cap_t a,
const kernel_cap_t b)
{
- kernel_cap_t dest;
- CAP_BOP_ALL(dest, a, b, &);
- return dest;
+ return (kernel_cap_t) { a.val & b.val };
}
static inline kernel_cap_t cap_drop(const kernel_cap_t a,
const kernel_cap_t drop)
{
- kernel_cap_t dest;
- CAP_BOP_ALL(dest, a, drop, &~);
- return dest;
+ return (kernel_cap_t) { a.val &~ drop.val };
}
-static inline kernel_cap_t cap_invert(const kernel_cap_t c)
+static inline bool cap_isclear(const kernel_cap_t a)
{
- kernel_cap_t dest;
- CAP_UOP_ALL(dest, c, ~);
- return dest;
+ return !a.val;
}
-static inline bool cap_isclear(const kernel_cap_t a)
+static inline bool cap_isidentical(const kernel_cap_t a, const kernel_cap_t b)
{
- unsigned __capi;
- CAP_FOR_EACH_U32(__capi) {
- if (a.cap[__capi] != 0)
- return false;
- }
- return true;
+ return a.val == b.val;
}
/*
@@ -163,39 +111,31 @@ static inline bool cap_isclear(const kernel_cap_t a)
*/
static inline bool cap_issubset(const kernel_cap_t a, const kernel_cap_t set)
{
- kernel_cap_t dest;
- dest = cap_drop(a, set);
- return cap_isclear(dest);
+ return !(a.val & ~set.val);
}
/* Used to decide between falling back on the old suser() or fsuser(). */
static inline kernel_cap_t cap_drop_fs_set(const kernel_cap_t a)
{
- const kernel_cap_t __cap_fs_set = CAP_FS_SET;
- return cap_drop(a, __cap_fs_set);
+ return cap_drop(a, CAP_FS_SET);
}
static inline kernel_cap_t cap_raise_fs_set(const kernel_cap_t a,
const kernel_cap_t permitted)
{
- const kernel_cap_t __cap_fs_set = CAP_FS_SET;
- return cap_combine(a,
- cap_intersect(permitted, __cap_fs_set));
+ return cap_combine(a, cap_intersect(permitted, CAP_FS_SET));
}
static inline kernel_cap_t cap_drop_nfsd_set(const kernel_cap_t a)
{
- const kernel_cap_t __cap_fs_set = CAP_NFSD_SET;
- return cap_drop(a, __cap_fs_set);
+ return cap_drop(a, CAP_NFSD_SET);
}
static inline kernel_cap_t cap_raise_nfsd_set(const kernel_cap_t a,
const kernel_cap_t permitted)
{
- const kernel_cap_t __cap_nfsd_set = CAP_NFSD_SET;
- return cap_combine(a,
- cap_intersect(permitted, __cap_nfsd_set));
+ return cap_combine(a, cap_intersect(permitted, CAP_NFSD_SET));
}
#ifdef CONFIG_MULTIUSER
@@ -208,6 +148,7 @@ extern bool has_ns_capability_noaudit(struct task_struct *t,
extern bool capable(int cap);
extern bool ns_capable(struct user_namespace *ns, int cap);
extern bool ns_capable_noaudit(struct user_namespace *ns, int cap);
+extern bool ns_capable_setid(struct user_namespace *ns, int cap);
#else
static inline bool has_capability(struct task_struct *t, int cap)
{
@@ -239,13 +180,40 @@ static inline bool ns_capable_noaudit(struct user_namespace *ns, int cap)
{
return true;
}
+static inline bool ns_capable_setid(struct user_namespace *ns, int cap)
+{
+ return true;
+}
#endif /* CONFIG_MULTIUSER */
-extern bool privileged_wrt_inode_uidgid(struct user_namespace *ns, const struct inode *inode);
-extern bool capable_wrt_inode_uidgid(const struct inode *inode, int cap);
+bool privileged_wrt_inode_uidgid(struct user_namespace *ns,
+ struct mnt_idmap *idmap,
+ const struct inode *inode);
+bool capable_wrt_inode_uidgid(struct mnt_idmap *idmap,
+ const struct inode *inode, int cap);
extern bool file_ns_capable(const struct file *file, struct user_namespace *ns, int cap);
extern bool ptracer_capable(struct task_struct *tsk, struct user_namespace *ns);
+static inline bool perfmon_capable(void)
+{
+ return capable(CAP_PERFMON) || capable(CAP_SYS_ADMIN);
+}
+
+static inline bool bpf_capable(void)
+{
+ return capable(CAP_BPF) || capable(CAP_SYS_ADMIN);
+}
+
+static inline bool checkpoint_restore_ns_capable(struct user_namespace *ns)
+{
+ return ns_capable(ns, CAP_CHECKPOINT_RESTORE) ||
+ ns_capable(ns, CAP_SYS_ADMIN);
+}
/* audit system wants to get cap info from files as well */
-extern int get_vfs_caps_from_disk(const struct dentry *dentry, struct cpu_vfs_cap_data *cpu_caps);
+int get_vfs_caps_from_disk(struct mnt_idmap *idmap,
+ const struct dentry *dentry,
+ struct cpu_vfs_cap_data *cpu_caps);
+
+int cap_convert_nscap(struct mnt_idmap *idmap, struct dentry *dentry,
+ const void **ivalue, size_t size);
#endif /* !_LINUX_CAPABILITY_H */
diff --git a/include/linux/cb710.h b/include/linux/cb710.h
index 8cc10411bab2..405657a9a0d5 100644
--- a/include/linux/cb710.h
+++ b/include/linux/cb710.h
@@ -1,11 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* cb710/cb710.h
*
* Copyright by Michał Mirosław, 2008-2009
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef LINUX_CB710_DRIVER_H
#define LINUX_CB710_DRIVER_H
@@ -39,7 +36,7 @@ struct cb710_chip {
unsigned slot_mask;
unsigned slots;
spinlock_t irq_lock;
- struct cb710_slot slot[0];
+ struct cb710_slot slot[];
};
/* NOTE: cb710_chip.slots is modified only during device init/exit and
@@ -129,10 +126,6 @@ void cb710_dump_regs(struct cb710_chip *chip, unsigned dump);
* cb710/sgbuf2.h
*
* Copyright by Michał Mirosław, 2008-2009
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef LINUX_CB710_SG_H
#define LINUX_CB710_SG_H
diff --git a/include/linux/cc_platform.h b/include/linux/cc_platform.h
new file mode 100644
index 000000000000..cb0d6cd1c12f
--- /dev/null
+++ b/include/linux/cc_platform.h
@@ -0,0 +1,117 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Confidential Computing Platform Capability checks
+ *
+ * Copyright (C) 2021 Advanced Micro Devices, Inc.
+ *
+ * Author: Tom Lendacky <thomas.lendacky@amd.com>
+ */
+
+#ifndef _LINUX_CC_PLATFORM_H
+#define _LINUX_CC_PLATFORM_H
+
+#include <linux/types.h>
+#include <linux/stddef.h>
+
+/**
+ * enum cc_attr - Confidential computing attributes
+ *
+ * These attributes represent confidential computing features that are
+ * currently active.
+ */
+enum cc_attr {
+ /**
+ * @CC_ATTR_MEM_ENCRYPT: Memory encryption is active
+ *
+ * The platform/OS is running with active memory encryption. This
+ * includes running either as a bare-metal system or a hypervisor
+ * and actively using memory encryption or as a guest/virtual machine
+ * and actively using memory encryption.
+ *
+ * Examples include SME, SEV and SEV-ES.
+ */
+ CC_ATTR_MEM_ENCRYPT,
+
+ /**
+ * @CC_ATTR_HOST_MEM_ENCRYPT: Host memory encryption is active
+ *
+ * The platform/OS is running as a bare-metal system or a hypervisor
+ * and actively using memory encryption.
+ *
+ * Examples include SME.
+ */
+ CC_ATTR_HOST_MEM_ENCRYPT,
+
+ /**
+ * @CC_ATTR_GUEST_MEM_ENCRYPT: Guest memory encryption is active
+ *
+ * The platform/OS is running as a guest/virtual machine and actively
+ * using memory encryption.
+ *
+ * Examples include SEV and SEV-ES.
+ */
+ CC_ATTR_GUEST_MEM_ENCRYPT,
+
+ /**
+ * @CC_ATTR_GUEST_STATE_ENCRYPT: Guest state encryption is active
+ *
+ * The platform/OS is running as a guest/virtual machine and actively
+ * using memory encryption and register state encryption.
+ *
+ * Examples include SEV-ES.
+ */
+ CC_ATTR_GUEST_STATE_ENCRYPT,
+
+ /**
+ * @CC_ATTR_GUEST_UNROLL_STRING_IO: String I/O is implemented with
+ * IN/OUT instructions
+ *
+ * The platform/OS is running as a guest/virtual machine and uses
+ * IN/OUT instructions in place of string I/O.
+ *
+ * Examples include TDX guest & SEV.
+ */
+ CC_ATTR_GUEST_UNROLL_STRING_IO,
+
+ /**
+ * @CC_ATTR_SEV_SNP: Guest SNP is active.
+ *
+ * The platform/OS is running as a guest/virtual machine and actively
+ * using AMD SEV-SNP features.
+ */
+ CC_ATTR_GUEST_SEV_SNP,
+
+ /**
+ * @CC_ATTR_HOTPLUG_DISABLED: Hotplug is not supported or disabled.
+ *
+ * The platform/OS is running as a guest/virtual machine does not
+ * support CPU hotplug feature.
+ *
+ * Examples include TDX Guest.
+ */
+ CC_ATTR_HOTPLUG_DISABLED,
+};
+
+#ifdef CONFIG_ARCH_HAS_CC_PLATFORM
+
+/**
+ * cc_platform_has() - Checks if the specified cc_attr attribute is active
+ * @attr: Confidential computing attribute to check
+ *
+ * The cc_platform_has() function will return an indicator as to whether the
+ * specified Confidential Computing attribute is currently active.
+ *
+ * Context: Any context
+ * Return:
+ * * TRUE - Specified Confidential Computing attribute is active
+ * * FALSE - Specified Confidential Computing attribute is not active
+ */
+bool cc_platform_has(enum cc_attr attr);
+
+#else /* !CONFIG_ARCH_HAS_CC_PLATFORM */
+
+static inline bool cc_platform_has(enum cc_attr attr) { return false; }
+
+#endif /* CONFIG_ARCH_HAS_CC_PLATFORM */
+
+#endif /* _LINUX_CC_PLATFORM_H */
diff --git a/include/linux/cciss_ioctl.h b/include/linux/cciss_ioctl.h
index 84b6e2d0f44d..1d5229200a71 100644
--- a/include/linux/cciss_ioctl.h
+++ b/include/linux/cciss_ioctl.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef CCISS_IOCTLH
#define CCISS_IOCTLH
diff --git a/include/linux/ccp.h b/include/linux/ccp.h
index 3285c944194a..868924dec5a1 100644
--- a/include/linux/ccp.h
+++ b/include/linux/ccp.h
@@ -1,14 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* AMD Cryptographic Coprocessor (CCP) driver
*
- * Copyright (C) 2013,2016 Advanced Micro Devices, Inc.
+ * Copyright (C) 2013,2017 Advanced Micro Devices, Inc.
*
* Author: Tom Lendacky <thomas.lendacky@amd.com>
* Author: Gary R Hook <gary.hook@amd.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef __CCP_H__
@@ -18,14 +15,13 @@
#include <linux/workqueue.h>
#include <linux/list.h>
#include <crypto/aes.h>
-#include <crypto/sha.h>
-
+#include <crypto/sha1.h>
+#include <crypto/sha2.h>
struct ccp_device;
struct ccp_cmd;
-#if defined(CONFIG_CRYPTO_DEV_CCP_DD) || \
- defined(CONFIG_CRYPTO_DEV_CCP_DD_MODULE)
+#if defined(CONFIG_CRYPTO_DEV_SP_CCP)
/**
* ccp_present - check if a CCP device is present
@@ -71,7 +67,7 @@ unsigned int ccp_version(void);
*/
int ccp_enqueue_cmd(struct ccp_cmd *cmd);
-#else /* CONFIG_CRYPTO_DEV_CCP_DD is not enabled */
+#else /* CONFIG_CRYPTO_DEV_CCP_SP_DEV is not enabled */
static inline int ccp_present(void)
{
@@ -88,7 +84,7 @@ static inline int ccp_enqueue_cmd(struct ccp_cmd *cmd)
return -ENODEV;
}
-#endif /* CONFIG_CRYPTO_DEV_CCP_DD */
+#endif /* CONFIG_CRYPTO_DEV_SP_CCP */
/***** AES engine *****/
@@ -175,6 +171,8 @@ struct ccp_aes_engine {
enum ccp_aes_mode mode;
enum ccp_aes_action action;
+ u32 authsize;
+
struct scatterlist *key;
u32 key_len; /* In bytes */
@@ -231,6 +229,7 @@ enum ccp_xts_aes_unit_size {
* AES operation the new IV overwrites the old IV.
*/
struct ccp_xts_aes_engine {
+ enum ccp_aes_type type;
enum ccp_aes_action action;
enum ccp_xts_aes_unit_size unit_size;
diff --git a/include/linux/cdev.h b/include/linux/cdev.h
index 408bc09ce497..0e8cd6293deb 100644
--- a/include/linux/cdev.h
+++ b/include/linux/cdev.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_CDEV_H
#define _LINUX_CDEV_H
@@ -17,7 +18,7 @@ struct cdev {
struct list_head list;
dev_t dev;
unsigned int count;
-};
+} __randomize_layout;
void cdev_init(struct cdev *, const struct file_operations *);
diff --git a/include/linux/cdrom.h b/include/linux/cdrom.h
index 6e8f209a6dff..67caa909e3e6 100644
--- a/include/linux/cdrom.h
+++ b/include/linux/cdrom.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* -- <linux/cdrom.h>
* General header file for linux CD-ROM drivers
@@ -12,6 +13,7 @@
#include <linux/fs.h> /* not really needed, later.. */
#include <linux/list.h>
+#include <scsi/scsi_common.h>
#include <uapi/linux/cdrom.h>
struct packet_command
@@ -20,7 +22,7 @@ struct packet_command
unsigned char *buffer;
unsigned int buflen;
int stat;
- struct request_sense *sense;
+ struct scsi_sense_hdr *sshdr;
unsigned char data_direction;
int quiet;
int timeout;
@@ -62,6 +64,7 @@ struct cdrom_device_info {
int for_data;
int (*exit)(struct cdrom_device_info *);
int mrw_mode_page;
+ __s64 last_media_change_ms;
};
struct cdrom_device_ops {
@@ -71,11 +74,9 @@ struct cdrom_device_ops {
int (*drive_status) (struct cdrom_device_info *, int);
unsigned int (*check_events) (struct cdrom_device_info *cdi,
unsigned int clearing, int slot);
- int (*media_changed) (struct cdrom_device_info *, int);
int (*tray_move) (struct cdrom_device_info *, int);
int (*lock_door) (struct cdrom_device_info *, int);
int (*select_speed) (struct cdrom_device_info *, int);
- int (*select_disc) (struct cdrom_device_info *, int);
int (*get_last_session) (struct cdrom_device_info *,
struct cdrom_multisession *);
int (*get_mcn) (struct cdrom_device_info *,
@@ -85,13 +86,20 @@ struct cdrom_device_ops {
/* play stuff */
int (*audio_ioctl) (struct cdrom_device_info *,unsigned int, void *);
-/* driver specifications */
- const int capability; /* capability flags */
/* handle uniform packets for scsi type devices (scsi,atapi) */
int (*generic_packet) (struct cdrom_device_info *,
struct packet_command *);
+ int (*read_cdda_bpc)(struct cdrom_device_info *cdi, void __user *ubuf,
+ u32 lba, u32 nframes, u8 *last_sense);
+/* driver specifications */
+ const int capability; /* capability flags */
};
+int cdrom_multisession(struct cdrom_device_info *cdi,
+ struct cdrom_multisession *info);
+int cdrom_read_tocentry(struct cdrom_device_info *cdi,
+ struct cdrom_tocentry *entry);
+
/* the general block_device operations structure: */
extern int cdrom_open(struct cdrom_device_info *cdi, struct block_device *bdev,
fmode_t mode);
@@ -100,9 +108,8 @@ extern int cdrom_ioctl(struct cdrom_device_info *cdi, struct block_device *bdev,
fmode_t mode, unsigned int cmd, unsigned long arg);
extern unsigned int cdrom_check_events(struct cdrom_device_info *cdi,
unsigned int clearing);
-extern int cdrom_media_changed(struct cdrom_device_info *);
-extern int register_cdrom(struct cdrom_device_info *cdi);
+extern int register_cdrom(struct gendisk *disk, struct cdrom_device_info *cdi);
extern void unregister_cdrom(struct cdrom_device_info *cdi);
typedef struct {
diff --git a/include/linux/cdx/cdx_bus.h b/include/linux/cdx/cdx_bus.h
new file mode 100644
index 000000000000..35ef41d8a61a
--- /dev/null
+++ b/include/linux/cdx/cdx_bus.h
@@ -0,0 +1,174 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * CDX bus public interface
+ *
+ * Copyright (C) 2022-2023, Advanced Micro Devices, Inc.
+ *
+ */
+
+#ifndef _CDX_BUS_H_
+#define _CDX_BUS_H_
+
+#include <linux/device.h>
+#include <linux/list.h>
+#include <linux/mod_devicetable.h>
+
+#define MAX_CDX_DEV_RESOURCES 4
+#define CDX_ANY_ID (0xFFFF)
+#define CDX_CONTROLLER_ID_SHIFT 4
+#define CDX_BUS_NUM_MASK 0xF
+
+/* Forward declaration for CDX controller */
+struct cdx_controller;
+
+enum {
+ CDX_DEV_RESET_CONF,
+};
+
+struct cdx_device_config {
+ u8 type;
+};
+
+typedef int (*cdx_scan_cb)(struct cdx_controller *cdx);
+
+typedef int (*cdx_dev_configure_cb)(struct cdx_controller *cdx,
+ u8 bus_num, u8 dev_num,
+ struct cdx_device_config *dev_config);
+
+/**
+ * CDX_DEVICE_DRIVER_OVERRIDE - macro used to describe a CDX device with
+ * override_only flags.
+ * @vend: the 16 bit CDX Vendor ID
+ * @dev: the 16 bit CDX Device ID
+ * @driver_override: the 32 bit CDX Device override_only
+ *
+ * This macro is used to create a struct cdx_device_id that matches only a
+ * driver_override device.
+ */
+#define CDX_DEVICE_DRIVER_OVERRIDE(vend, dev, driver_override) \
+ .vendor = (vend), .device = (dev), .override_only = (driver_override)
+
+/**
+ * struct cdx_ops - Callbacks supported by CDX controller.
+ * @scan: scan the devices on the controller
+ * @dev_configure: configuration like reset, master_enable,
+ * msi_config etc for a CDX device
+ */
+struct cdx_ops {
+ cdx_scan_cb scan;
+ cdx_dev_configure_cb dev_configure;
+};
+
+/**
+ * struct cdx_controller: CDX controller object
+ * @dev: Linux device associated with the CDX controller.
+ * @priv: private data
+ * @id: Controller ID
+ * @ops: CDX controller ops
+ */
+struct cdx_controller {
+ struct device *dev;
+ void *priv;
+ u32 id;
+ struct cdx_ops *ops;
+};
+
+/**
+ * struct cdx_device - CDX device object
+ * @dev: Linux driver model device object
+ * @cdx: CDX controller associated with the device
+ * @vendor: Vendor ID for CDX device
+ * @device: Device ID for CDX device
+ * @bus_num: Bus number for this CDX device
+ * @dev_num: Device number for this device
+ * @res: array of MMIO region entries
+ * @res_attr: resource binary attribute
+ * @res_count: number of valid MMIO regions
+ * @dma_mask: Default DMA mask
+ * @flags: CDX device flags
+ * @req_id: Requestor ID associated with CDX device
+ * @driver_override: driver name to force a match; do not set directly,
+ * because core frees it; use driver_set_override() to
+ * set or clear it.
+ */
+struct cdx_device {
+ struct device dev;
+ struct cdx_controller *cdx;
+ u16 vendor;
+ u16 device;
+ u8 bus_num;
+ u8 dev_num;
+ struct resource res[MAX_CDX_DEV_RESOURCES];
+ u8 res_count;
+ u64 dma_mask;
+ u16 flags;
+ u32 req_id;
+ const char *driver_override;
+};
+
+#define to_cdx_device(_dev) \
+ container_of(_dev, struct cdx_device, dev)
+
+/**
+ * struct cdx_driver - CDX device driver
+ * @driver: Generic device driver
+ * @match_id_table: table of supported device matching Ids
+ * @probe: Function called when a device is added
+ * @remove: Function called when a device is removed
+ * @shutdown: Function called at shutdown time to quiesce the device
+ * @reset_prepare: Function called before is reset to notify driver
+ * @reset_done: Function called after reset is complete to notify driver
+ * @driver_managed_dma: Device driver doesn't use kernel DMA API for DMA.
+ * For most device drivers, no need to care about this flag
+ * as long as all DMAs are handled through the kernel DMA API.
+ * For some special ones, for example VFIO drivers, they know
+ * how to manage the DMA themselves and set this flag so that
+ * the IOMMU layer will allow them to setup and manage their
+ * own I/O address space.
+ */
+struct cdx_driver {
+ struct device_driver driver;
+ const struct cdx_device_id *match_id_table;
+ int (*probe)(struct cdx_device *dev);
+ int (*remove)(struct cdx_device *dev);
+ void (*shutdown)(struct cdx_device *dev);
+ void (*reset_prepare)(struct cdx_device *dev);
+ void (*reset_done)(struct cdx_device *dev);
+ bool driver_managed_dma;
+};
+
+#define to_cdx_driver(_drv) \
+ container_of(_drv, struct cdx_driver, driver)
+
+/* Macro to avoid include chaining to get THIS_MODULE */
+#define cdx_driver_register(drv) \
+ __cdx_driver_register(drv, THIS_MODULE)
+
+/**
+ * __cdx_driver_register - registers a CDX device driver
+ * @cdx_driver: CDX driver to register
+ * @owner: module owner
+ *
+ * Return: -errno on failure, 0 on success.
+ */
+int __must_check __cdx_driver_register(struct cdx_driver *cdx_driver,
+ struct module *owner);
+
+/**
+ * cdx_driver_unregister - unregisters a device driver from the
+ * CDX bus.
+ * @cdx_driver: CDX driver to register
+ */
+void cdx_driver_unregister(struct cdx_driver *cdx_driver);
+
+extern struct bus_type cdx_bus_type;
+
+/**
+ * cdx_dev_reset - Reset CDX device
+ * @dev: device pointer
+ *
+ * Return: 0 for success, -errno on failure
+ */
+int cdx_dev_reset(struct device *dev);
+
+#endif /* _CDX_BUS_H_ */
diff --git a/include/linux/ceph/auth.h b/include/linux/ceph/auth.h
index a6747789fe5c..6b138fa97db8 100644
--- a/include/linux/ceph/auth.h
+++ b/include/linux/ceph/auth.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _FS_CEPH_AUTH_H
#define _FS_CEPH_AUTH_H
@@ -31,8 +32,6 @@ struct ceph_auth_handshake {
};
struct ceph_auth_client_ops {
- const char *name;
-
/*
* true if we are authenticated and can connect to
* services.
@@ -51,8 +50,10 @@ struct ceph_auth_client_ops {
* another request.
*/
int (*build_request)(struct ceph_auth_client *ac, void *buf, void *end);
- int (*handle_reply)(struct ceph_auth_client *ac, int result,
- void *buf, void *end);
+ int (*handle_reply)(struct ceph_auth_client *ac, u64 global_id,
+ void *buf, void *end, u8 *session_key,
+ int *session_key_len, u8 *con_secret,
+ int *con_secret_len);
/*
* Create authorizer for connecting to a service, and verify
@@ -63,8 +64,15 @@ struct ceph_auth_client_ops {
/* ensure that an existing authorizer is up to date */
int (*update_authorizer)(struct ceph_auth_client *ac, int peer_type,
struct ceph_auth_handshake *auth);
+ int (*add_authorizer_challenge)(struct ceph_auth_client *ac,
+ struct ceph_authorizer *a,
+ void *challenge_buf,
+ int challenge_buf_len);
int (*verify_authorizer_reply)(struct ceph_auth_client *ac,
- struct ceph_authorizer *a);
+ struct ceph_authorizer *a,
+ void *reply, int reply_len,
+ u8 *session_key, int *session_key_len,
+ u8 *con_secret, int *con_secret_len);
void (*invalidate_authorizer)(struct ceph_auth_client *ac,
int peer_type);
@@ -90,11 +98,17 @@ struct ceph_auth_client {
const struct ceph_crypto_key *key; /* our secret key */
unsigned want_keys; /* which services we want */
+ int preferred_mode; /* CEPH_CON_MODE_* */
+ int fallback_mode; /* ditto */
+
struct mutex mutex;
};
-extern struct ceph_auth_client *ceph_auth_init(const char *name,
- const struct ceph_crypto_key *key);
+void ceph_auth_set_global_id(struct ceph_auth_client *ac, u64 global_id);
+
+struct ceph_auth_client *ceph_auth_init(const char *name,
+ const struct ceph_crypto_key *key,
+ const int *con_modes);
extern void ceph_auth_destroy(struct ceph_auth_client *ac);
extern void ceph_auth_reset(struct ceph_auth_client *ac);
@@ -108,17 +122,22 @@ int ceph_auth_entity_name_encode(const char *name, void **p, void *end);
extern int ceph_build_auth(struct ceph_auth_client *ac,
void *msg_buf, size_t msg_len);
-
extern int ceph_auth_is_authenticated(struct ceph_auth_client *ac);
-extern int ceph_auth_create_authorizer(struct ceph_auth_client *ac,
- int peer_type,
- struct ceph_auth_handshake *auth);
+
+int __ceph_auth_get_authorizer(struct ceph_auth_client *ac,
+ struct ceph_auth_handshake *auth,
+ int peer_type, bool force_new,
+ int *proto, int *pref_mode, int *fallb_mode);
void ceph_auth_destroy_authorizer(struct ceph_authorizer *a);
-extern int ceph_auth_update_authorizer(struct ceph_auth_client *ac,
- int peer_type,
- struct ceph_auth_handshake *a);
-extern int ceph_auth_verify_authorizer_reply(struct ceph_auth_client *ac,
- struct ceph_authorizer *a);
+int ceph_auth_add_authorizer_challenge(struct ceph_auth_client *ac,
+ struct ceph_authorizer *a,
+ void *challenge_buf,
+ int challenge_buf_len);
+int ceph_auth_verify_authorizer_reply(struct ceph_auth_client *ac,
+ struct ceph_authorizer *a,
+ void *reply, int reply_len,
+ u8 *session_key, int *session_key_len,
+ u8 *con_secret, int *con_secret_len);
extern void ceph_auth_invalidate_authorizer(struct ceph_auth_client *ac,
int peer_type);
@@ -138,4 +157,34 @@ int ceph_auth_check_message_signature(struct ceph_auth_handshake *auth,
return auth->check_message_signature(auth, msg);
return 0;
}
+
+int ceph_auth_get_request(struct ceph_auth_client *ac, void *buf, int buf_len);
+int ceph_auth_handle_reply_more(struct ceph_auth_client *ac, void *reply,
+ int reply_len, void *buf, int buf_len);
+int ceph_auth_handle_reply_done(struct ceph_auth_client *ac,
+ u64 global_id, void *reply, int reply_len,
+ u8 *session_key, int *session_key_len,
+ u8 *con_secret, int *con_secret_len);
+bool ceph_auth_handle_bad_method(struct ceph_auth_client *ac,
+ int used_proto, int result,
+ const int *allowed_protos, int proto_cnt,
+ const int *allowed_modes, int mode_cnt);
+
+int ceph_auth_get_authorizer(struct ceph_auth_client *ac,
+ struct ceph_auth_handshake *auth,
+ int peer_type, void *buf, int *buf_len);
+int ceph_auth_handle_svc_reply_more(struct ceph_auth_client *ac,
+ struct ceph_auth_handshake *auth,
+ void *reply, int reply_len,
+ void *buf, int *buf_len);
+int ceph_auth_handle_svc_reply_done(struct ceph_auth_client *ac,
+ struct ceph_auth_handshake *auth,
+ void *reply, int reply_len,
+ u8 *session_key, int *session_key_len,
+ u8 *con_secret, int *con_secret_len);
+bool ceph_auth_handle_bad_authorizer(struct ceph_auth_client *ac,
+ int peer_type, int used_proto, int result,
+ const int *allowed_protos, int proto_cnt,
+ const int *allowed_modes, int mode_cnt);
+
#endif
diff --git a/include/linux/ceph/buffer.h b/include/linux/ceph/buffer.h
index 07ca15e76100..11cdc7c60480 100644
--- a/include/linux/ceph/buffer.h
+++ b/include/linux/ceph/buffer.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __FS_CEPH_BUFFER_H
#define __FS_CEPH_BUFFER_H
@@ -29,7 +30,8 @@ static inline struct ceph_buffer *ceph_buffer_get(struct ceph_buffer *b)
static inline void ceph_buffer_put(struct ceph_buffer *b)
{
- kref_put(&b->kref, ceph_buffer_release);
+ if (b)
+ kref_put(&b->kref, ceph_buffer_release);
}
extern int ceph_decode_buffer(struct ceph_buffer **b, void **p, void *end);
diff --git a/include/linux/ceph/ceph_debug.h b/include/linux/ceph/ceph_debug.h
index 51c5bd64bd00..d5a5da838caf 100644
--- a/include/linux/ceph/ceph_debug.h
+++ b/include/linux/ceph/ceph_debug.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _FS_CEPH_DEBUG_H
#define _FS_CEPH_DEBUG_H
diff --git a/include/linux/ceph/ceph_features.h b/include/linux/ceph/ceph_features.h
index f0f6c537b64c..3a47acd9cc14 100644
--- a/include/linux/ceph/ceph_features.h
+++ b/include/linux/ceph/ceph_features.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __CEPH_FEATURES
#define __CEPH_FEATURES
@@ -7,17 +8,18 @@
* feature. Base case is 1 (first use).
*/
#define CEPH_FEATURE_INCARNATION_1 (0ull)
-#define CEPH_FEATURE_INCARNATION_2 (1ull<<57) // CEPH_FEATURE_SERVER_JEWEL
+#define CEPH_FEATURE_INCARNATION_2 (1ull<<57) // SERVER_JEWEL
+#define CEPH_FEATURE_INCARNATION_3 ((1ull<<57)|(1ull<<28)) // SERVER_MIMIC
#define DEFINE_CEPH_FEATURE(bit, incarnation, name) \
- const static uint64_t CEPH_FEATURE_##name = (1ULL<<bit); \
- const static uint64_t CEPH_FEATUREMASK_##name = \
+ static const uint64_t __maybe_unused CEPH_FEATURE_##name = (1ULL<<bit); \
+ static const uint64_t __maybe_unused CEPH_FEATUREMASK_##name = \
(1ULL<<bit | CEPH_FEATURE_INCARNATION_##incarnation);
/* this bit is ignored but still advertised by release *when* */
#define DEFINE_CEPH_FEATURE_DEPRECATED(bit, incarnation, name, when) \
- const static uint64_t DEPRECATED_CEPH_FEATURE_##name = (1ULL<<bit); \
- const static uint64_t DEPRECATED_CEPH_FEATUREMASK_##name = \
+ static const uint64_t __maybe_unused DEPRECATED_CEPH_FEATURE_##name = (1ULL<<bit); \
+ static const uint64_t __maybe_unused DEPRECATED_CEPH_FEATUREMASK_##name = \
(1ULL<<bit | CEPH_FEATURE_INCARNATION_##incarnation);
/*
@@ -57,7 +59,7 @@
* because 10.2.z (jewel) did not care if its peers advertised this
* feature bit.
*
- * - In the second phase we stop advertising the the bit and call it
+ * - In the second phase we stop advertising the bit and call it
* RETIRED. This can normally be done in the *next* major release
* following the one in which we marked the feature DEPRECATED. In
* the above example, for 12.0.z (luminous) we can say:
@@ -74,7 +76,7 @@
DEFINE_CEPH_FEATURE( 0, 1, UID)
DEFINE_CEPH_FEATURE( 1, 1, NOSRCADDR)
DEFINE_CEPH_FEATURE_RETIRED( 2, 1, MONCLOCKCHECK, JEWEL, LUMINOUS)
-
+DEFINE_CEPH_FEATURE( 2, 3, SERVER_NAUTILUS)
DEFINE_CEPH_FEATURE( 3, 1, FLOCK)
DEFINE_CEPH_FEATURE( 4, 1, SUBSCRIBE2)
DEFINE_CEPH_FEATURE( 5, 1, MONNAMES)
@@ -113,7 +115,7 @@ DEFINE_CEPH_FEATURE(25, 1, CRUSH_TUNABLES2)
DEFINE_CEPH_FEATURE(26, 1, CREATEPOOLID)
DEFINE_CEPH_FEATURE(27, 1, REPLY_CREATE_INODE)
DEFINE_CEPH_FEATURE_RETIRED(28, 1, OSD_HBMSGS, HAMMER, JEWEL)
-DEFINE_CEPH_FEATURE(28, 2, SERVER_M)
+DEFINE_CEPH_FEATURE(28, 2, SERVER_MIMIC)
DEFINE_CEPH_FEATURE(29, 1, MDSENC)
DEFINE_CEPH_FEATURE(30, 1, OSDHASHPSPOOL)
DEFINE_CEPH_FEATURE(31, 1, MON_SINGLE_PAXOS) // deprecate me
@@ -164,9 +166,9 @@ DEFINE_CEPH_FEATURE(58, 1, FS_FILE_LAYOUT_V2) // overlap
DEFINE_CEPH_FEATURE(59, 1, FS_BTIME)
DEFINE_CEPH_FEATURE(59, 1, FS_CHANGE_ATTR) // overlap
DEFINE_CEPH_FEATURE(59, 1, MSG_ADDR2) // overlap
-DEFINE_CEPH_FEATURE(60, 1, BLKIN_TRACING) // *do not share this bit*
+DEFINE_CEPH_FEATURE(60, 1, OSD_RECOVERY_DELETES) // *do not share this bit*
+DEFINE_CEPH_FEATURE(61, 1, CEPHX_V2) // *do not share this bit*
-DEFINE_CEPH_FEATURE(61, 1, RESERVED2) // unused, but slow down!
DEFINE_CEPH_FEATURE(62, 1, RESERVED) // do not use; used as a sentinal
DEFINE_CEPH_FEATURE_DEPRECATED(63, 1, RESERVED_BROKEN, LUMINOUS) // client-facing
@@ -176,13 +178,16 @@ DEFINE_CEPH_FEATURE_DEPRECATED(63, 1, RESERVED_BROKEN, LUMINOUS) // client-facin
*/
#define CEPH_FEATURES_SUPPORTED_DEFAULT \
(CEPH_FEATURE_NOSRCADDR | \
+ CEPH_FEATURE_SERVER_NAUTILUS | \
CEPH_FEATURE_FLOCK | \
CEPH_FEATURE_SUBSCRIBE2 | \
+ CEPH_FEATURE_MONNAMES | \
CEPH_FEATURE_RECONNECT_SEQ | \
CEPH_FEATURE_DIRLAYOUTHASH | \
CEPH_FEATURE_PGID64 | \
CEPH_FEATURE_PGPOOL3 | \
CEPH_FEATURE_OSDENC | \
+ CEPH_FEATURE_MONENC | \
CEPH_FEATURE_CRUSH_TUNABLES | \
CEPH_FEATURE_SERVER_LUMINOUS | \
CEPH_FEATURE_RESEND_ON_SPLIT | \
@@ -192,6 +197,7 @@ DEFINE_CEPH_FEATURE_DEPRECATED(63, 1, RESERVED_BROKEN, LUMINOUS) // client-facin
CEPH_FEATURE_MSG_AUTH | \
CEPH_FEATURE_CRUSH_TUNABLES2 | \
CEPH_FEATURE_REPLY_CREATE_INODE | \
+ CEPH_FEATURE_SERVER_MIMIC | \
CEPH_FEATURE_MDSENC | \
CEPH_FEATURE_OSDHASHPSPOOL | \
CEPH_FEATURE_OSD_CACHEPOOL | \
@@ -203,19 +209,16 @@ DEFINE_CEPH_FEATURE_DEPRECATED(63, 1, RESERVED_BROKEN, LUMINOUS) // client-facin
CEPH_FEATURE_OSD_PRIMARY_AFFINITY | \
CEPH_FEATURE_MSGR_KEEPALIVE2 | \
CEPH_FEATURE_OSD_POOLRESEND | \
+ CEPH_FEATURE_MDS_QUOTA | \
CEPH_FEATURE_CRUSH_V4 | \
CEPH_FEATURE_NEW_OSDOP_ENCODING | \
CEPH_FEATURE_SERVER_JEWEL | \
CEPH_FEATURE_MON_STATEFUL_SUB | \
CEPH_FEATURE_CRUSH_TUNABLES5 | \
- CEPH_FEATURE_NEW_OSDOPREPLY_ENCODING)
-
-#define CEPH_FEATURES_REQUIRED_DEFAULT \
- (CEPH_FEATURE_NOSRCADDR | \
- CEPH_FEATURE_SUBSCRIBE2 | \
- CEPH_FEATURE_RECONNECT_SEQ | \
- CEPH_FEATURE_PGID64 | \
- CEPH_FEATURE_PGPOOL3 | \
- CEPH_FEATURE_OSDENC)
+ CEPH_FEATURE_NEW_OSDOPREPLY_ENCODING | \
+ CEPH_FEATURE_MSG_ADDR2 | \
+ CEPH_FEATURE_CEPHX_V2)
+
+#define CEPH_FEATURES_REQUIRED_DEFAULT 0
#endif
diff --git a/include/linux/ceph/ceph_frag.h b/include/linux/ceph/ceph_frag.h
index 146507df8650..97bab0adc58a 100644
--- a/include/linux/ceph/ceph_frag.h
+++ b/include/linux/ceph/ceph_frag.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef FS_CEPH_FRAG_H
#define FS_CEPH_FRAG_H
diff --git a/include/linux/ceph/ceph_fs.h b/include/linux/ceph/ceph_fs.h
index edf5b04b918a..49586ff26152 100644
--- a/include/linux/ceph/ceph_fs.h
+++ b/include/linux/ceph/ceph_fs.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* ceph_fs.h - Ceph constants and data types to share between kernel and
* user space.
@@ -27,8 +28,8 @@
#define CEPH_INO_ROOT 1
-#define CEPH_INO_CEPH 2 /* hidden .ceph dir */
-#define CEPH_INO_DOTDOT 3 /* used by ceph fuse for parent (..) */
+#define CEPH_INO_CEPH 2 /* hidden .ceph dir */
+#define CEPH_INO_GLOBAL_SNAPREALM 3 /* global dummy snaprealm */
/* arbitrary limit on max # of monitors (cluster of 3 is typical) */
#define CEPH_MAX_MON 31
@@ -92,8 +93,19 @@ struct ceph_dir_layout {
#define CEPH_AUTH_NONE 0x1
#define CEPH_AUTH_CEPHX 0x2
+#define CEPH_AUTH_MODE_NONE 0
+#define CEPH_AUTH_MODE_AUTHORIZER 1
+#define CEPH_AUTH_MODE_MON 10
+
+/* msgr2 protocol modes */
+#define CEPH_CON_MODE_UNKNOWN 0x0
+#define CEPH_CON_MODE_CRC 0x1
+#define CEPH_CON_MODE_SECURE 0x2
+
#define CEPH_AUTH_UID_DEFAULT ((__u64) -1)
+const char *ceph_auth_proto_name(int proto);
+const char *ceph_con_mode_name(int mode);
/*********************************************
* message layer
@@ -129,10 +141,12 @@ struct ceph_dir_layout {
#define CEPH_MSG_CLIENT_REQUEST 24
#define CEPH_MSG_CLIENT_REQUEST_FORWARD 25
#define CEPH_MSG_CLIENT_REPLY 26
+#define CEPH_MSG_CLIENT_METRICS 29
#define CEPH_MSG_CLIENT_CAPS 0x310
#define CEPH_MSG_CLIENT_LEASE 0x311
#define CEPH_MSG_CLIENT_SNAP 0x312
#define CEPH_MSG_CLIENT_CAPRELEASE 0x313
+#define CEPH_MSG_CLIENT_QUOTA 0x314
/* pool ops */
#define CEPH_MSG_POOLOP_REPLY 48
@@ -167,6 +181,8 @@ struct ceph_mon_request_header {
struct ceph_mon_statfs {
struct ceph_mon_request_header monhdr;
struct ceph_fsid fsid;
+ __u8 contains_data_pool;
+ __le64 data_pool;
} __attribute__ ((packed));
struct ceph_statfs {
@@ -283,8 +299,11 @@ enum {
CEPH_SESSION_FLUSHMSG_ACK,
CEPH_SESSION_FORCE_RO,
CEPH_SESSION_REJECT,
+ CEPH_SESSION_REQUEST_FLUSH_MDLOG,
};
+#define CEPH_SESSION_BLOCKLISTED (1 << 0) /* session blocklisted */
+
extern const char *ceph_session_op_name(int op);
struct ceph_mds_session_head {
@@ -309,6 +328,7 @@ enum {
CEPH_MDS_OP_LOOKUPPARENT = 0x00103,
CEPH_MDS_OP_LOOKUPINO = 0x00104,
CEPH_MDS_OP_LOOKUPNAME = 0x00105,
+ CEPH_MDS_OP_GETVXATTR = 0x00106,
CEPH_MDS_OP_SETXATTR = 0x01105,
CEPH_MDS_OP_RMXATTR = 0x01106,
@@ -413,12 +433,13 @@ union ceph_mds_request_args {
__le32 stripe_unit; /* layout for newly created file */
__le32 stripe_count; /* ... */
__le32 object_size;
- __le32 file_replication;
- __le32 mask; /* CEPH_CAP_* */
- __le32 old_size;
+ __le32 pool;
+ __le32 mask; /* CEPH_CAP_* */
+ __le64 old_size;
} __attribute__ ((packed)) open;
struct {
__le32 flags;
+ __le32 osdmap_epoch; /* used for setting file/dir layouts */
} __attribute__ ((packed)) setxattr;
struct {
struct ceph_file_layout_legacy layout;
@@ -432,12 +453,33 @@ union ceph_mds_request_args {
__le64 length; /* num bytes to lock from start */
__u8 wait; /* will caller wait for lock to become available? */
} __attribute__ ((packed)) filelock_change;
+ struct {
+ __le32 mask; /* CEPH_CAP_* */
+ __le64 snapid;
+ __le64 parent;
+ __le32 hash;
+ } __attribute__ ((packed)) lookupino;
} __attribute__ ((packed));
-#define CEPH_MDS_FLAG_REPLAY 1 /* this is a replayed op */
-#define CEPH_MDS_FLAG_WANT_DENTRY 2 /* want dentry in reply */
+union ceph_mds_request_args_ext {
+ union ceph_mds_request_args old;
+ struct {
+ __le32 mode;
+ __le32 uid;
+ __le32 gid;
+ struct ceph_timespec mtime;
+ struct ceph_timespec atime;
+ __le64 size, old_size; /* old_size needed by truncate */
+ __le32 mask; /* CEPH_SETATTR_* */
+ struct ceph_timespec btime;
+ } __attribute__ ((packed)) setattr_ext;
+};
-struct ceph_mds_request_head {
+#define CEPH_MDS_FLAG_REPLAY 1 /* this is a replayed op */
+#define CEPH_MDS_FLAG_WANT_DENTRY 2 /* want dentry in reply */
+#define CEPH_MDS_FLAG_ASYNC 4 /* request is asynchronous */
+
+struct ceph_mds_request_head_old {
__le64 oldest_client_tid;
__le32 mdsmap_epoch; /* on client */
__le32 flags; /* CEPH_MDS_FLAG_* */
@@ -450,6 +492,22 @@ struct ceph_mds_request_head {
union ceph_mds_request_args args;
} __attribute__ ((packed));
+#define CEPH_MDS_REQUEST_HEAD_VERSION 1
+
+struct ceph_mds_request_head {
+ __le16 version; /* struct version */
+ __le64 oldest_client_tid;
+ __le32 mdsmap_epoch; /* on client */
+ __le32 flags; /* CEPH_MDS_FLAG_* */
+ __u8 num_retry, num_fwd; /* count retry, fwd attempts */
+ __le16 num_releases; /* # include cap/lease release records */
+ __le32 op; /* mds op code */
+ __le32 caller_uid, caller_gid;
+ __le64 ino; /* use this ino for openc, mkdir, mknod,
+ etc. (if replaying) */
+ union ceph_mds_request_args_ext args;
+} __attribute__ ((packed));
+
/* cap/lease release record */
struct ceph_mds_request_release {
__le64 ino, cap_id; /* ino and unique cap id */
@@ -520,6 +578,9 @@ struct ceph_mds_reply_lease {
__le32 seq;
} __attribute__ ((packed));
+#define CEPH_LEASE_VALID (1 | 2) /* old and new bit values */
+#define CEPH_LEASE_PRIMARY_LINK 4 /* primary linkage */
+
struct ceph_mds_reply_dirfrag {
__le32 frag; /* fragment */
__le32 auth; /* auth mds, if this is a delegation point */
@@ -554,6 +615,7 @@ struct ceph_filelock {
#define CEPH_FILE_MODE_RDWR 3 /* RD | WR */
#define CEPH_FILE_MODE_LAZY 4 /* lazy io */
#define CEPH_FILE_MODE_BITS 4
+#define CEPH_FILE_MODE_MASK ((1 << CEPH_FILE_MODE_BITS) - 1)
int ceph_flags_to_mode(int flags);
@@ -624,6 +686,7 @@ int ceph_flags_to_mode(int flags);
CEPH_CAP_XATTR_SHARED)
#define CEPH_STAT_CAP_INLINE_DATA (CEPH_CAP_FILE_SHARED | \
CEPH_CAP_FILE_RD)
+#define CEPH_STAT_RSTAT CEPH_CAP_FILE_WREXTEND
#define CEPH_CAP_ANY_SHARED (CEPH_CAP_AUTH_SHARED | \
CEPH_CAP_LINK_SHARED | \
@@ -644,10 +707,19 @@ int ceph_flags_to_mode(int flags);
#define CEPH_CAP_ANY (CEPH_CAP_ANY_RD | CEPH_CAP_ANY_EXCL | \
CEPH_CAP_ANY_FILE_WR | CEPH_CAP_FILE_LAZYIO | \
CEPH_CAP_PIN)
+#define CEPH_CAP_ALL_FILE (CEPH_CAP_PIN | CEPH_CAP_ANY_SHARED | \
+ CEPH_CAP_AUTH_EXCL | CEPH_CAP_XATTR_EXCL | \
+ CEPH_CAP_ANY_FILE_RD | CEPH_CAP_ANY_FILE_WR)
#define CEPH_CAP_LOCKS (CEPH_LOCK_IFILE | CEPH_LOCK_IAUTH | CEPH_LOCK_ILINK | \
CEPH_LOCK_IXATTR)
+/* cap masks async dir operations */
+#define CEPH_CAP_DIR_CREATE CEPH_CAP_FILE_CACHE
+#define CEPH_CAP_DIR_UNLINK CEPH_CAP_FILE_RD
+#define CEPH_CAP_ANY_DIR_OPS (CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_RD | \
+ CEPH_CAP_FILE_WREXTEND | CEPH_CAP_FILE_LAZYIO)
+
int ceph_caps_for_mode(int mode);
enum {
@@ -669,7 +741,9 @@ enum {
extern const char *ceph_cap_op_name(int op);
/* flags field in client cap messages (version >= 10) */
-#define CEPH_CLIENT_CAPS_SYNC (0x1)
+#define CEPH_CLIENT_CAPS_SYNC (1<<0)
+#define CEPH_CLIENT_CAPS_NO_CAPSNAP (1<<1)
+#define CEPH_CLIENT_CAPS_PENDING_CAPSNAP (1<<2)
/*
* caps message, used for capability callbacks, acks, requests, etc.
@@ -694,7 +768,7 @@ struct ceph_mds_caps {
__le32 xattr_len;
__le64 xattr_version;
- /* filelock */
+ /* a union of non-export and export bodies. */
__le64 size, max_size, truncate_size;
__le32 truncate_seq;
struct ceph_timespec mtime, atime, ctime;
@@ -802,4 +876,20 @@ struct ceph_mds_snap_realm {
} __attribute__ ((packed));
/* followed by my snap list, then prior parent snap list */
+/*
+ * quotas
+ */
+struct ceph_mds_quota {
+ __le64 ino; /* ino */
+ struct ceph_timespec rctime;
+ __le64 rbytes; /* dir stats */
+ __le64 rfiles;
+ __le64 rsubdirs;
+ __u8 struct_v; /* compat */
+ __u8 struct_compat;
+ __le32 struct_len;
+ __le64 max_bytes; /* quota max. bytes */
+ __le64 max_files; /* quota max. files */
+} __attribute__ ((packed));
+
#endif
diff --git a/include/linux/ceph/ceph_hash.h b/include/linux/ceph/ceph_hash.h
index d099c3f90236..fda474c7a5d6 100644
--- a/include/linux/ceph/ceph_hash.h
+++ b/include/linux/ceph/ceph_hash.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef FS_CEPH_HASH_H
#define FS_CEPH_HASH_H
diff --git a/include/linux/ceph/cls_lock_client.h b/include/linux/ceph/cls_lock_client.h
index 0594d3bba774..17bc7584d1fe 100644
--- a/include/linux/ceph/cls_lock_client.h
+++ b/include/linux/ceph/cls_lock_client.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_CEPH_CLS_LOCK_CLIENT_H
#define _LINUX_CEPH_CLS_LOCK_CLIENT_H
@@ -51,4 +52,7 @@ int ceph_cls_lock_info(struct ceph_osd_client *osdc,
char *lock_name, u8 *type, char **tag,
struct ceph_locker **lockers, u32 *num_lockers);
+int ceph_cls_assert_locked(struct ceph_osd_request *req, int which,
+ char *lock_name, u8 type, char *cookie, char *tag);
+
#endif
diff --git a/include/linux/ceph/debugfs.h b/include/linux/ceph/debugfs.h
index 29cf897cc5cd..8b3a1a7a953a 100644
--- a/include/linux/ceph/debugfs.h
+++ b/include/linux/ceph/debugfs.h
@@ -1,26 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _FS_CEPH_DEBUGFS_H
#define _FS_CEPH_DEBUGFS_H
-#include <linux/ceph/ceph_debug.h>
#include <linux/ceph/types.h>
-#define CEPH_DEFINE_SHOW_FUNC(name) \
-static int name##_open(struct inode *inode, struct file *file) \
-{ \
- return single_open(file, name, inode->i_private); \
-} \
- \
-static const struct file_operations name##_fops = { \
- .open = name##_open, \
- .read = seq_read, \
- .llseek = seq_lseek, \
- .release = single_release, \
-};
-
/* debugfs.c */
-extern int ceph_debugfs_init(void);
+extern void ceph_debugfs_init(void);
extern void ceph_debugfs_cleanup(void);
-extern int ceph_debugfs_client_init(struct ceph_client *client);
+extern void ceph_debugfs_client_init(struct ceph_client *client);
extern void ceph_debugfs_client_cleanup(struct ceph_client *client);
#endif
diff --git a/include/linux/ceph/decode.h b/include/linux/ceph/decode.h
index 14af9b70d301..04f3ace5787b 100644
--- a/include/linux/ceph/decode.h
+++ b/include/linux/ceph/decode.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __CEPH_DECODE_H
#define __CEPH_DECODE_H
@@ -193,16 +194,22 @@ ceph_decode_skip_n(p, end, sizeof(u8), bad)
} while (0)
/*
- * struct ceph_timespec <-> struct timespec
+ * struct ceph_timespec <-> struct timespec64
*/
-static inline void ceph_decode_timespec(struct timespec *ts,
- const struct ceph_timespec *tv)
+static inline void ceph_decode_timespec64(struct timespec64 *ts,
+ const struct ceph_timespec *tv)
{
- ts->tv_sec = (__kernel_time_t)le32_to_cpu(tv->tv_sec);
+ /*
+ * This will still overflow in year 2106. We could extend
+ * the protocol to steal two more bits from tv_nsec to
+ * add three more 136 year epochs after that the way ext4
+ * does if necessary.
+ */
+ ts->tv_sec = (time64_t)le32_to_cpu(tv->tv_sec);
ts->tv_nsec = (long)le32_to_cpu(tv->tv_nsec);
}
-static inline void ceph_encode_timespec(struct ceph_timespec *tv,
- const struct timespec *ts)
+static inline void ceph_encode_timespec64(struct ceph_timespec *tv,
+ const struct timespec64 *ts)
{
tv->tv_sec = cpu_to_le32((u32)ts->tv_sec);
tv->tv_nsec = cpu_to_le32((u32)ts->tv_nsec);
@@ -211,18 +218,35 @@ static inline void ceph_encode_timespec(struct ceph_timespec *tv,
/*
* sockaddr_storage <-> ceph_sockaddr
*/
-static inline void ceph_encode_addr(struct ceph_entity_addr *a)
+#define CEPH_ENTITY_ADDR_TYPE_NONE 0
+#define CEPH_ENTITY_ADDR_TYPE_LEGACY __cpu_to_le32(1)
+#define CEPH_ENTITY_ADDR_TYPE_MSGR2 __cpu_to_le32(2)
+#define CEPH_ENTITY_ADDR_TYPE_ANY __cpu_to_le32(3)
+
+static inline void ceph_encode_banner_addr(struct ceph_entity_addr *a)
{
__be16 ss_family = htons(a->in_addr.ss_family);
a->in_addr.ss_family = *(__u16 *)&ss_family;
+
+ /* Banner addresses require TYPE_NONE */
+ a->type = CEPH_ENTITY_ADDR_TYPE_NONE;
}
-static inline void ceph_decode_addr(struct ceph_entity_addr *a)
+static inline void ceph_decode_banner_addr(struct ceph_entity_addr *a)
{
__be16 ss_family = *(__be16 *)&a->in_addr.ss_family;
a->in_addr.ss_family = ntohs(ss_family);
WARN_ON(a->in_addr.ss_family == 512);
+ a->type = CEPH_ENTITY_ADDR_TYPE_LEGACY;
}
+extern int ceph_decode_entity_addr(void **p, void *end,
+ struct ceph_entity_addr *addr);
+int ceph_decode_entity_addrvec(void **p, void *end, bool msgr2,
+ struct ceph_entity_addr *addr);
+
+int ceph_entity_addr_encoding_len(const struct ceph_entity_addr *addr);
+void ceph_encode_entity_addr(void **p, const struct ceph_entity_addr *addr);
+
/*
* encoders
*/
diff --git a/include/linux/ceph/libceph.h b/include/linux/ceph/libceph.h
index 8a79587e1317..4497d0a6772c 100644
--- a/include/linux/ceph/libceph.h
+++ b/include/linux/ceph/libceph.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _FS_CEPH_LIBCEPH_H
#define _FS_CEPH_LIBCEPH_H
@@ -30,10 +31,11 @@
#define CEPH_OPT_FSID (1<<0)
#define CEPH_OPT_NOSHARE (1<<1) /* don't share client with other sbs */
#define CEPH_OPT_MYIP (1<<2) /* specified my ip */
-#define CEPH_OPT_NOCRC (1<<3) /* no data crc on writes */
-#define CEPH_OPT_NOMSGAUTH (1<<4) /* don't require msg signing feat */
-#define CEPH_OPT_TCP_NODELAY (1<<5) /* TCP_NODELAY on TCP sockets */
-#define CEPH_OPT_NOMSGSIGN (1<<6) /* don't sign msgs */
+#define CEPH_OPT_NOCRC (1<<3) /* no data crc on writes (msgr1) */
+#define CEPH_OPT_TCP_NODELAY (1<<4) /* TCP_NODELAY on TCP sockets */
+#define CEPH_OPT_NOMSGSIGN (1<<5) /* don't sign msgs (msgr1) */
+#define CEPH_OPT_ABORT_ON_FULL (1<<6) /* abort w/ ENOSPC when full */
+#define CEPH_OPT_RXBOUNCE (1<<7) /* double-buffer read data */
#define CEPH_OPT_DEFAULT (CEPH_OPT_TCP_NODELAY)
@@ -50,9 +52,11 @@ struct ceph_options {
unsigned long osd_idle_ttl; /* jiffies */
unsigned long osd_keepalive_timeout; /* jiffies */
unsigned long osd_request_timeout; /* jiffies */
+ u32 read_from_replica; /* CEPH_OSD_FLAG_BALANCE/LOCALIZE_READS */
+ int con_modes[2]; /* CEPH_CON_MODE_* */
/*
- * any type that can't be simply compared or doesn't need need
+ * any type that can't be simply compared or doesn't need
* to be compared should go beyond this point,
* ceph_compare_options() should be updated accordingly
*/
@@ -62,6 +66,7 @@ struct ceph_options {
int num_mon;
char *name;
struct ceph_crypto_key *key;
+ struct rb_root crush_locs;
};
/*
@@ -71,6 +76,7 @@ struct ceph_options {
#define CEPH_OSD_KEEPALIVE_DEFAULT msecs_to_jiffies(5 * 1000)
#define CEPH_OSD_IDLE_TTL_DEFAULT msecs_to_jiffies(60 * 1000)
#define CEPH_OSD_REQUEST_TIMEOUT_DEFAULT 0 /* no timeout */
+#define CEPH_READ_FROM_REPLICA_DEFAULT 0 /* read from primary */
#define CEPH_MONC_HUNT_INTERVAL msecs_to_jiffies(3 * 1000)
#define CEPH_MONC_PING_INTERVAL msecs_to_jiffies(10 * 1000)
@@ -78,31 +84,20 @@ struct ceph_options {
#define CEPH_MONC_HUNT_BACKOFF 2
#define CEPH_MONC_HUNT_MAX_MULT 10
+#define CEPH_MSG_MAX_CONTROL_LEN (16*1024*1024)
#define CEPH_MSG_MAX_FRONT_LEN (16*1024*1024)
#define CEPH_MSG_MAX_MIDDLE_LEN (16*1024*1024)
-#define CEPH_MSG_MAX_DATA_LEN (16*1024*1024)
-
-#define CEPH_AUTH_NAME_DEFAULT "guest"
/*
- * Delay telling the MDS we no longer want caps, in case we reopen
- * the file. Delay a minimum amount of time, even if we send a cap
- * message for some other reason. Otherwise, take the oppotunity to
- * update the mds to avoid sending another message later.
+ * The largest possible rbd data object is 32M.
+ * The largest possible rbd object map object is 64M.
+ *
+ * There is no limit on the size of cephfs objects, but it has to obey
+ * rsize and wsize mount options anyway.
*/
-#define CEPH_CAPS_WANTED_DELAY_MIN_DEFAULT 5 /* cap release delay */
-#define CEPH_CAPS_WANTED_DELAY_MAX_DEFAULT 60 /* cap release delay */
-
-#define CEPH_CAP_RELEASE_SAFETY_DEFAULT (CEPH_CAPS_PER_RELEASE * 4)
-
-/* mount state */
-enum {
- CEPH_MOUNT_MOUNTING,
- CEPH_MOUNT_MOUNTED,
- CEPH_MOUNT_UNMOUNTING,
- CEPH_MOUNT_UNMOUNTED,
- CEPH_MOUNT_SHUTDOWN,
-};
+#define CEPH_MSG_MAX_DATA_LEN (64*1024*1024)
+
+#define CEPH_AUTH_NAME_DEFAULT "guest"
static inline unsigned long ceph_timeout_jiffies(unsigned long timeout)
{
@@ -148,6 +143,10 @@ struct ceph_client {
#define from_msgr(ms) container_of(ms, struct ceph_client, msgr)
+static inline bool ceph_msgr2(struct ceph_client *client)
+{
+ return client->options->con_modes[0] != CEPH_CON_MODE_UNKNOWN;
+}
/*
* snapshots
@@ -189,7 +188,7 @@ static inline int calc_pages_for(u64 off, u64 len)
#define RB_CMP3WAY(a, b) ((a) < (b) ? -1 : (a) > (b))
#define DEFINE_RB_INSDEL_FUNCS2(name, type, keyfld, cmpexp, keyexp, nodefld) \
-static void insert_##name(struct rb_root *root, type *t) \
+static bool __insert_##name(struct rb_root *root, type *t) \
{ \
struct rb_node **n = &root->rb_node; \
struct rb_node *parent = NULL; \
@@ -207,11 +206,17 @@ static void insert_##name(struct rb_root *root, type *t) \
else if (cmp > 0) \
n = &(*n)->rb_right; \
else \
- BUG(); \
+ return false; \
} \
\
rb_link_node(&t->nodefld, parent, n); \
rb_insert_color(&t->nodefld, root); \
+ return true; \
+} \
+static void __maybe_unused insert_##name(struct rb_root *root, type *t) \
+{ \
+ if (!__insert_##name(root, t)) \
+ BUG(); \
} \
static void erase_##name(struct rb_root *root, type *t) \
{ \
@@ -269,22 +274,30 @@ DEFINE_RB_LOOKUP_FUNC(name, type, keyfld, nodefld)
extern struct kmem_cache *ceph_inode_cachep;
extern struct kmem_cache *ceph_cap_cachep;
+extern struct kmem_cache *ceph_cap_snap_cachep;
extern struct kmem_cache *ceph_cap_flush_cachep;
extern struct kmem_cache *ceph_dentry_cachep;
extern struct kmem_cache *ceph_file_cachep;
+extern struct kmem_cache *ceph_dir_file_cachep;
+extern struct kmem_cache *ceph_mds_request_cachep;
+extern mempool_t *ceph_wb_pagevec_pool;
/* ceph_common.c */
extern bool libceph_compatible(void *data);
extern const char *ceph_msg_type_name(int type);
extern int ceph_check_fsid(struct ceph_client *client, struct ceph_fsid *fsid);
-extern void *ceph_kvmalloc(size_t size, gfp_t flags);
-
-extern struct ceph_options *ceph_parse_options(char *options,
- const char *dev_name, const char *dev_name_end,
- int (*parse_extra_token)(char *c, void *private),
- void *private);
-int ceph_print_client_options(struct seq_file *m, struct ceph_client *client);
+extern int ceph_parse_fsid(const char *str, struct ceph_fsid *fsid);
+
+struct fs_parameter;
+struct fc_log;
+struct ceph_options *ceph_alloc_options(void);
+int ceph_parse_mon_ips(const char *buf, size_t len, struct ceph_options *opt,
+ struct fc_log *l, char delim);
+int ceph_parse_param(struct fs_parameter *param, struct ceph_options *opt,
+ struct fc_log *l);
+int ceph_print_client_options(struct seq_file *m, struct ceph_client *client,
+ bool show_all);
extern void ceph_destroy_options(struct ceph_options *opt);
extern int ceph_compare_options(struct ceph_options *new_opt,
struct ceph_client *client);
@@ -292,16 +305,15 @@ struct ceph_client *ceph_create_client(struct ceph_options *opt, void *private);
struct ceph_entity_addr *ceph_client_addr(struct ceph_client *client);
u64 ceph_client_gid(struct ceph_client *client);
extern void ceph_destroy_client(struct ceph_client *client);
+extern void ceph_reset_client_addr(struct ceph_client *client);
extern int __ceph_open_session(struct ceph_client *client,
unsigned long started);
extern int ceph_open_session(struct ceph_client *client);
+int ceph_wait_for_latest_osdmap(struct ceph_client *client,
+ unsigned long timeout);
/* pagevec.c */
extern void ceph_release_page_vector(struct page **pages, int num_pages);
-
-extern struct page **ceph_get_direct_page_vector(const void __user *data,
- int num_pages,
- bool write_page);
extern void ceph_put_page_vector(struct page **pages, int num_pages,
bool dirty);
extern struct page **ceph_alloc_page_vector(int num_pages, gfp_t flags);
diff --git a/include/linux/ceph/mdsmap.h b/include/linux/ceph/mdsmap.h
index d5f783f3226a..4c3e0648dc27 100644
--- a/include/linux/ceph/mdsmap.h
+++ b/include/linux/ceph/mdsmap.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _FS_CEPH_MDSMAP_H
#define _FS_CEPH_MDSMAP_H
@@ -24,8 +25,10 @@ struct ceph_mdsmap {
u32 m_session_timeout; /* seconds */
u32 m_session_autoclose; /* seconds */
u64 m_max_file_size;
- u32 m_max_mds; /* size of m_addr, m_state arrays */
- int m_num_mds;
+ u64 m_max_xattr_size; /* maximum size for xattrs blob */
+ u32 m_max_mds; /* expected up:active mds number */
+ u32 m_num_active_mds; /* actual up:active mds number */
+ u32 possible_max_rank; /* possible max rank index */
struct ceph_mds_info *m_info;
/* which object pools file data can be stored in */
@@ -41,7 +44,7 @@ struct ceph_mdsmap {
static inline struct ceph_entity_addr *
ceph_mdsmap_get_addr(struct ceph_mdsmap *m, int w)
{
- if (w >= m->m_num_mds)
+ if (w >= m->possible_max_rank)
return NULL;
return &m->m_info[w].addr;
}
@@ -49,20 +52,20 @@ ceph_mdsmap_get_addr(struct ceph_mdsmap *m, int w)
static inline int ceph_mdsmap_get_state(struct ceph_mdsmap *m, int w)
{
BUG_ON(w < 0);
- if (w >= m->m_num_mds)
+ if (w >= m->possible_max_rank)
return CEPH_MDS_STATE_DNE;
return m->m_info[w].state;
}
static inline bool ceph_mdsmap_is_laggy(struct ceph_mdsmap *m, int w)
{
- if (w >= 0 && w < m->m_num_mds)
+ if (w >= 0 && w < m->possible_max_rank)
return m->m_info[w].laggy;
return false;
}
extern int ceph_mdsmap_get_random_mds(struct ceph_mdsmap *m);
-extern struct ceph_mdsmap *ceph_mdsmap_decode(void **p, void *end);
+struct ceph_mdsmap *ceph_mdsmap_decode(void **p, void *end, bool msgr2);
extern void ceph_mdsmap_destroy(struct ceph_mdsmap *m);
extern bool ceph_mdsmap_is_cluster_available(struct ceph_mdsmap *m);
diff --git a/include/linux/ceph/messenger.h b/include/linux/ceph/messenger.h
index fbd94d9fa5dd..99c1726be6ee 100644
--- a/include/linux/ceph/messenger.h
+++ b/include/linux/ceph/messenger.h
@@ -1,7 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __FS_CEPH_MESSENGER_H
#define __FS_CEPH_MESSENGER_H
#include <linux/bvec.h>
+#include <linux/crypto.h>
#include <linux/kref.h>
#include <linux/mutex.h>
#include <linux/net.h>
@@ -30,6 +32,9 @@ struct ceph_connection_operations {
struct ceph_auth_handshake *(*get_authorizer) (
struct ceph_connection *con,
int *proto, int force_new);
+ int (*add_authorizer_challenge)(struct ceph_connection *con,
+ void *challenge_buf,
+ int challenge_buf_len);
int (*verify_authorizer_reply) (struct ceph_connection *con);
int (*invalidate_authorizer)(struct ceph_connection *con);
@@ -48,9 +53,26 @@ struct ceph_connection_operations {
int (*sign_message) (struct ceph_msg *msg);
int (*check_message_signature) (struct ceph_msg *msg);
+
+ /* msgr2 authentication exchange */
+ int (*get_auth_request)(struct ceph_connection *con,
+ void *buf, int *buf_len,
+ void **authorizer, int *authorizer_len);
+ int (*handle_auth_reply_more)(struct ceph_connection *con,
+ void *reply, int reply_len,
+ void *buf, int *buf_len,
+ void **authorizer, int *authorizer_len);
+ int (*handle_auth_done)(struct ceph_connection *con,
+ u64 global_id, void *reply, int reply_len,
+ u8 *session_key, int *session_key_len,
+ u8 *con_secret, int *con_secret_len);
+ int (*handle_auth_bad_method)(struct ceph_connection *con,
+ int used_proto, int result,
+ const int *allowed_protos, int proto_cnt,
+ const int *allowed_modes, int mode_cnt);
};
-/* use format string %s%d */
+/* use format string %s%lld */
#define ENTITY_NAME(n) ceph_entity_type_name((n).type), le64_to_cpu((n).num)
struct ceph_messenger {
@@ -75,37 +97,106 @@ enum ceph_msg_data_type {
#ifdef CONFIG_BLOCK
CEPH_MSG_DATA_BIO, /* data source/destination is a bio list */
#endif /* CONFIG_BLOCK */
+ CEPH_MSG_DATA_BVECS, /* data source/destination is a bio_vec array */
};
-static __inline__ bool ceph_msg_data_type_valid(enum ceph_msg_data_type type)
-{
- switch (type) {
- case CEPH_MSG_DATA_NONE:
- case CEPH_MSG_DATA_PAGES:
- case CEPH_MSG_DATA_PAGELIST:
#ifdef CONFIG_BLOCK
- case CEPH_MSG_DATA_BIO:
+
+struct ceph_bio_iter {
+ struct bio *bio;
+ struct bvec_iter iter;
+};
+
+#define __ceph_bio_iter_advance_step(it, n, STEP) do { \
+ unsigned int __n = (n), __cur_n; \
+ \
+ while (__n) { \
+ BUG_ON(!(it)->iter.bi_size); \
+ __cur_n = min((it)->iter.bi_size, __n); \
+ (void)(STEP); \
+ bio_advance_iter((it)->bio, &(it)->iter, __cur_n); \
+ if (!(it)->iter.bi_size && (it)->bio->bi_next) { \
+ dout("__ceph_bio_iter_advance_step next bio\n"); \
+ (it)->bio = (it)->bio->bi_next; \
+ (it)->iter = (it)->bio->bi_iter; \
+ } \
+ __n -= __cur_n; \
+ } \
+} while (0)
+
+/*
+ * Advance @it by @n bytes.
+ */
+#define ceph_bio_iter_advance(it, n) \
+ __ceph_bio_iter_advance_step(it, n, 0)
+
+/*
+ * Advance @it by @n bytes, executing BVEC_STEP for each bio_vec.
+ */
+#define ceph_bio_iter_advance_step(it, n, BVEC_STEP) \
+ __ceph_bio_iter_advance_step(it, n, ({ \
+ struct bio_vec bv; \
+ struct bvec_iter __cur_iter; \
+ \
+ __cur_iter = (it)->iter; \
+ __cur_iter.bi_size = __cur_n; \
+ __bio_for_each_segment(bv, (it)->bio, __cur_iter, __cur_iter) \
+ (void)(BVEC_STEP); \
+ }))
+
#endif /* CONFIG_BLOCK */
- return true;
- default:
- return false;
- }
-}
+
+struct ceph_bvec_iter {
+ struct bio_vec *bvecs;
+ struct bvec_iter iter;
+};
+
+#define __ceph_bvec_iter_advance_step(it, n, STEP) do { \
+ BUG_ON((n) > (it)->iter.bi_size); \
+ (void)(STEP); \
+ bvec_iter_advance((it)->bvecs, &(it)->iter, (n)); \
+} while (0)
+
+/*
+ * Advance @it by @n bytes.
+ */
+#define ceph_bvec_iter_advance(it, n) \
+ __ceph_bvec_iter_advance_step(it, n, 0)
+
+/*
+ * Advance @it by @n bytes, executing BVEC_STEP for each bio_vec.
+ */
+#define ceph_bvec_iter_advance_step(it, n, BVEC_STEP) \
+ __ceph_bvec_iter_advance_step(it, n, ({ \
+ struct bio_vec bv; \
+ struct bvec_iter __cur_iter; \
+ \
+ __cur_iter = (it)->iter; \
+ __cur_iter.bi_size = (n); \
+ for_each_bvec(bv, (it)->bvecs, __cur_iter, __cur_iter) \
+ (void)(BVEC_STEP); \
+ }))
+
+#define ceph_bvec_iter_shorten(it, n) do { \
+ BUG_ON((n) > (it)->iter.bi_size); \
+ (it)->iter.bi_size = (n); \
+} while (0)
struct ceph_msg_data {
- struct list_head links; /* ceph_msg->data */
enum ceph_msg_data_type type;
union {
#ifdef CONFIG_BLOCK
struct {
- struct bio *bio;
- size_t bio_length;
+ struct ceph_bio_iter bio_pos;
+ u32 bio_length;
};
#endif /* CONFIG_BLOCK */
+ struct ceph_bvec_iter bvec_pos;
struct {
- struct page **pages; /* NOT OWNER. */
+ struct page **pages;
size_t length; /* total # bytes */
unsigned int alignment; /* first page */
+ bool own_pages;
};
struct ceph_pagelist *pagelist;
};
@@ -113,19 +204,15 @@ struct ceph_msg_data {
struct ceph_msg_data_cursor {
size_t total_resid; /* across all data items */
- struct list_head *data_head; /* = &ceph_msg->data */
struct ceph_msg_data *data; /* current data item */
size_t resid; /* bytes not yet consumed */
- bool last_piece; /* current is last piece */
bool need_crc; /* crc update needed */
union {
#ifdef CONFIG_BLOCK
- struct { /* bio */
- struct bio *bio; /* bio from list */
- struct bvec_iter bvec_iter;
- };
+ struct ceph_bio_iter bio_iter;
#endif /* CONFIG_BLOCK */
+ struct bvec_iter bvec_iter;
struct { /* pages */
unsigned int page_offset; /* offset in page */
unsigned short page_index; /* index in array */
@@ -153,7 +240,9 @@ struct ceph_msg {
struct ceph_buffer *middle;
size_t data_length;
- struct list_head data;
+ struct ceph_msg_data *data;
+ int num_data_items;
+ int max_data_items;
struct ceph_msg_data_cursor cursor;
struct ceph_connection *con;
@@ -163,14 +252,175 @@ struct ceph_msg {
bool more_to_follow;
bool needs_out_seq;
int front_alloc_len;
- unsigned long ack_stamp; /* tx: when we were acked */
struct ceph_msgpool *pool;
};
+/*
+ * connection states
+ */
+#define CEPH_CON_S_CLOSED 1
+#define CEPH_CON_S_PREOPEN 2
+#define CEPH_CON_S_V1_BANNER 3
+#define CEPH_CON_S_V1_CONNECT_MSG 4
+#define CEPH_CON_S_V2_BANNER_PREFIX 5
+#define CEPH_CON_S_V2_BANNER_PAYLOAD 6
+#define CEPH_CON_S_V2_HELLO 7
+#define CEPH_CON_S_V2_AUTH 8
+#define CEPH_CON_S_V2_AUTH_SIGNATURE 9
+#define CEPH_CON_S_V2_SESSION_CONNECT 10
+#define CEPH_CON_S_V2_SESSION_RECONNECT 11
+#define CEPH_CON_S_OPEN 12
+#define CEPH_CON_S_STANDBY 13
+
+/*
+ * ceph_connection flag bits
+ */
+#define CEPH_CON_F_LOSSYTX 0 /* we can close channel or drop
+ messages on errors */
+#define CEPH_CON_F_KEEPALIVE_PENDING 1 /* we need to send a keepalive */
+#define CEPH_CON_F_WRITE_PENDING 2 /* we have data ready to send */
+#define CEPH_CON_F_SOCK_CLOSED 3 /* socket state changed to closed */
+#define CEPH_CON_F_BACKOFF 4 /* need to retry queuing delayed
+ work */
+
/* ceph connection fault delay defaults, for exponential backoff */
-#define BASE_DELAY_INTERVAL (HZ/2)
-#define MAX_DELAY_INTERVAL (5 * 60 * HZ)
+#define BASE_DELAY_INTERVAL (HZ / 4)
+#define MAX_DELAY_INTERVAL (15 * HZ)
+
+struct ceph_connection_v1_info {
+ struct kvec out_kvec[8], /* sending header/footer data */
+ *out_kvec_cur;
+ int out_kvec_left; /* kvec's left in out_kvec */
+ int out_skip; /* skip this many bytes */
+ int out_kvec_bytes; /* total bytes left */
+ bool out_more; /* there is more data after the kvecs */
+ bool out_msg_done;
+
+ struct ceph_auth_handshake *auth;
+ int auth_retry; /* true if we need a newer authorizer */
+
+ /* connection negotiation temps */
+ u8 in_banner[CEPH_BANNER_MAX_LEN];
+ struct ceph_entity_addr actual_peer_addr;
+ struct ceph_entity_addr peer_addr_for_me;
+ struct ceph_msg_connect out_connect;
+ struct ceph_msg_connect_reply in_reply;
+
+ int in_base_pos; /* bytes read */
+
+ /* message in temps */
+ u8 in_tag; /* protocol control byte */
+ struct ceph_msg_header in_hdr;
+ __le64 in_temp_ack; /* for reading an ack */
+
+ /* message out temps */
+ struct ceph_msg_header out_hdr;
+ __le64 out_temp_ack; /* for writing an ack */
+ struct ceph_timespec out_temp_keepalive2; /* for writing keepalive2
+ stamp */
+
+ u32 connect_seq; /* identify the most recent connection
+ attempt for this session */
+ u32 peer_global_seq; /* peer's global seq for this connection */
+};
+
+#define CEPH_CRC_LEN 4
+#define CEPH_GCM_KEY_LEN 16
+#define CEPH_GCM_IV_LEN sizeof(struct ceph_gcm_nonce)
+#define CEPH_GCM_BLOCK_LEN 16
+#define CEPH_GCM_TAG_LEN 16
+
+#define CEPH_PREAMBLE_LEN 32
+#define CEPH_PREAMBLE_INLINE_LEN 48
+#define CEPH_PREAMBLE_PLAIN_LEN CEPH_PREAMBLE_LEN
+#define CEPH_PREAMBLE_SECURE_LEN (CEPH_PREAMBLE_LEN + \
+ CEPH_PREAMBLE_INLINE_LEN + \
+ CEPH_GCM_TAG_LEN)
+#define CEPH_EPILOGUE_PLAIN_LEN (1 + 3 * CEPH_CRC_LEN)
+#define CEPH_EPILOGUE_SECURE_LEN (CEPH_GCM_BLOCK_LEN + CEPH_GCM_TAG_LEN)
+
+#define CEPH_FRAME_MAX_SEGMENT_COUNT 4
+
+struct ceph_frame_desc {
+ int fd_tag; /* FRAME_TAG_* */
+ int fd_seg_cnt;
+ int fd_lens[CEPH_FRAME_MAX_SEGMENT_COUNT]; /* logical */
+ int fd_aligns[CEPH_FRAME_MAX_SEGMENT_COUNT];
+};
+
+struct ceph_gcm_nonce {
+ __le32 fixed;
+ __le64 counter __packed;
+};
+
+struct ceph_connection_v2_info {
+ struct iov_iter in_iter;
+ struct kvec in_kvecs[5]; /* recvmsg */
+ struct bio_vec in_bvec; /* recvmsg (in_cursor) */
+ int in_kvec_cnt;
+ int in_state; /* IN_S_* */
+
+ struct iov_iter out_iter;
+ struct kvec out_kvecs[8]; /* sendmsg */
+ struct bio_vec out_bvec; /* sendpage (out_cursor, out_zero),
+ sendmsg (out_enc_pages) */
+ int out_kvec_cnt;
+ int out_state; /* OUT_S_* */
+
+ int out_zero; /* # of zero bytes to send */
+ bool out_iter_sendpage; /* use sendpage if possible */
+
+ struct ceph_frame_desc in_desc;
+ struct ceph_msg_data_cursor in_cursor;
+ struct ceph_msg_data_cursor out_cursor;
+
+ struct crypto_shash *hmac_tfm; /* post-auth signature */
+ struct crypto_aead *gcm_tfm; /* on-wire encryption */
+ struct aead_request *gcm_req;
+ struct crypto_wait gcm_wait;
+ struct ceph_gcm_nonce in_gcm_nonce;
+ struct ceph_gcm_nonce out_gcm_nonce;
+
+ struct page **in_enc_pages;
+ int in_enc_page_cnt;
+ int in_enc_resid;
+ int in_enc_i;
+ struct page **out_enc_pages;
+ int out_enc_page_cnt;
+ int out_enc_resid;
+ int out_enc_i;
+
+ int con_mode; /* CEPH_CON_MODE_* */
+
+ void *conn_bufs[16];
+ int conn_buf_cnt;
+
+ struct kvec in_sign_kvecs[8];
+ struct kvec out_sign_kvecs[8];
+ int in_sign_kvec_cnt;
+ int out_sign_kvec_cnt;
+
+ u64 client_cookie;
+ u64 server_cookie;
+ u64 global_seq;
+ u64 connect_seq;
+ u64 peer_global_seq;
+
+ u8 in_buf[CEPH_PREAMBLE_SECURE_LEN];
+ u8 out_buf[CEPH_PREAMBLE_SECURE_LEN];
+ struct {
+ u8 late_status; /* FRAME_LATE_STATUS_* */
+ union {
+ struct {
+ u32 front_crc;
+ u32 middle_crc;
+ u32 data_crc;
+ } __packed;
+ u8 pad[CEPH_GCM_BLOCK_LEN - 1];
+ };
+ } out_epil;
+};
/*
* A single connection with another host.
@@ -186,25 +436,16 @@ struct ceph_connection {
struct ceph_messenger *msgr;
+ int state; /* CEPH_CON_S_* */
atomic_t sock_state;
struct socket *sock;
- struct ceph_entity_addr peer_addr; /* peer address */
- struct ceph_entity_addr peer_addr_for_me;
- unsigned long flags;
- unsigned long state;
+ unsigned long flags; /* CEPH_CON_F_* */
const char *error_msg; /* error message, if any */
struct ceph_entity_name peer_name; /* peer name */
-
+ struct ceph_entity_addr peer_addr; /* peer address */
u64 peer_features;
- u32 connect_seq; /* identify the most recent connection
- attempt for this connection, client */
- u32 peer_global_seq; /* peer's global seq for this connection */
-
- int auth_retry; /* true if we need a newer authorizer */
- void *auth_reply_buf; /* where to put the authorizer reply */
- int auth_reply_buf_len;
struct mutex mutex;
@@ -215,49 +456,86 @@ struct ceph_connection {
u64 in_seq, in_seq_acked; /* last message received, acked */
- /* connection negotiation temps */
- char in_banner[CEPH_BANNER_MAX_LEN];
- struct ceph_msg_connect out_connect;
- struct ceph_msg_connect_reply in_reply;
- struct ceph_entity_addr actual_peer_addr;
-
- /* message out temps */
- struct ceph_msg_header out_hdr;
+ struct ceph_msg *in_msg;
struct ceph_msg *out_msg; /* sending message (== tail of
out_sent) */
- bool out_msg_done;
- struct kvec out_kvec[8], /* sending header/footer data */
- *out_kvec_cur;
- int out_kvec_left; /* kvec's left in out_kvec */
- int out_skip; /* skip this many bytes */
- int out_kvec_bytes; /* total bytes left */
- int out_more; /* there is more data after the kvecs */
- __le64 out_temp_ack; /* for writing an ack */
- struct ceph_timespec out_temp_keepalive2; /* for writing keepalive2
- stamp */
-
- /* message in temps */
- struct ceph_msg_header in_hdr;
- struct ceph_msg *in_msg;
+ struct page *bounce_page;
u32 in_front_crc, in_middle_crc, in_data_crc; /* calculated crc */
- char in_tag; /* protocol control byte */
- int in_base_pos; /* bytes read */
- __le64 in_temp_ack; /* for reading an ack */
-
- struct timespec last_keepalive_ack; /* keepalive2 ack stamp */
+ struct timespec64 last_keepalive_ack; /* keepalive2 ack stamp */
struct delayed_work work; /* send|recv work */
unsigned long delay; /* current delay interval */
+
+ union {
+ struct ceph_connection_v1_info v1;
+ struct ceph_connection_v2_info v2;
+ };
};
+extern struct page *ceph_zero_page;
+
+void ceph_con_flag_clear(struct ceph_connection *con, unsigned long con_flag);
+void ceph_con_flag_set(struct ceph_connection *con, unsigned long con_flag);
+bool ceph_con_flag_test(struct ceph_connection *con, unsigned long con_flag);
+bool ceph_con_flag_test_and_clear(struct ceph_connection *con,
+ unsigned long con_flag);
+bool ceph_con_flag_test_and_set(struct ceph_connection *con,
+ unsigned long con_flag);
+
+void ceph_encode_my_addr(struct ceph_messenger *msgr);
+
+int ceph_tcp_connect(struct ceph_connection *con);
+int ceph_con_close_socket(struct ceph_connection *con);
+void ceph_con_reset_session(struct ceph_connection *con);
+
+u32 ceph_get_global_seq(struct ceph_messenger *msgr, u32 gt);
+void ceph_con_discard_sent(struct ceph_connection *con, u64 ack_seq);
+void ceph_con_discard_requeued(struct ceph_connection *con, u64 reconnect_seq);
+
+void ceph_msg_data_cursor_init(struct ceph_msg_data_cursor *cursor,
+ struct ceph_msg *msg, size_t length);
+struct page *ceph_msg_data_next(struct ceph_msg_data_cursor *cursor,
+ size_t *page_offset, size_t *length);
+void ceph_msg_data_advance(struct ceph_msg_data_cursor *cursor, size_t bytes);
+
+u32 ceph_crc32c_page(u32 crc, struct page *page, unsigned int page_offset,
+ unsigned int length);
+
+bool ceph_addr_is_blank(const struct ceph_entity_addr *addr);
+int ceph_addr_port(const struct ceph_entity_addr *addr);
+void ceph_addr_set_port(struct ceph_entity_addr *addr, int p);
+
+void ceph_con_process_message(struct ceph_connection *con);
+int ceph_con_in_msg_alloc(struct ceph_connection *con,
+ struct ceph_msg_header *hdr, int *skip);
+void ceph_con_get_out_msg(struct ceph_connection *con);
+
+/* messenger_v1.c */
+int ceph_con_v1_try_read(struct ceph_connection *con);
+int ceph_con_v1_try_write(struct ceph_connection *con);
+void ceph_con_v1_revoke(struct ceph_connection *con);
+void ceph_con_v1_revoke_incoming(struct ceph_connection *con);
+bool ceph_con_v1_opened(struct ceph_connection *con);
+void ceph_con_v1_reset_session(struct ceph_connection *con);
+void ceph_con_v1_reset_protocol(struct ceph_connection *con);
+
+/* messenger_v2.c */
+int ceph_con_v2_try_read(struct ceph_connection *con);
+int ceph_con_v2_try_write(struct ceph_connection *con);
+void ceph_con_v2_revoke(struct ceph_connection *con);
+void ceph_con_v2_revoke_incoming(struct ceph_connection *con);
+bool ceph_con_v2_opened(struct ceph_connection *con);
+void ceph_con_v2_reset_session(struct ceph_connection *con);
+void ceph_con_v2_reset_protocol(struct ceph_connection *con);
+
+
+extern const char *ceph_pr_addr(const struct ceph_entity_addr *addr);
-extern const char *ceph_pr_addr(const struct sockaddr_storage *ss);
extern int ceph_parse_ips(const char *c, const char *end,
struct ceph_entity_addr *addr,
- int max_count, int *count);
-
+ int max_count, int *count, char delim);
extern int ceph_msgr_init(void);
extern void ceph_msgr_exit(void);
@@ -266,6 +544,7 @@ extern void ceph_msgr_flush(void);
extern void ceph_messenger_init(struct ceph_messenger *msgr,
struct ceph_entity_addr *myaddr);
extern void ceph_messenger_fini(struct ceph_messenger *msgr);
+extern void ceph_messenger_reset_nonce(struct ceph_messenger *msgr);
extern void ceph_con_init(struct ceph_connection *con, void *private,
const struct ceph_connection_operations *ops,
@@ -284,15 +563,19 @@ extern void ceph_con_keepalive(struct ceph_connection *con);
extern bool ceph_con_keepalive_expired(struct ceph_connection *con,
unsigned long interval);
-extern void ceph_msg_data_add_pages(struct ceph_msg *msg, struct page **pages,
- size_t length, size_t alignment);
+void ceph_msg_data_add_pages(struct ceph_msg *msg, struct page **pages,
+ size_t length, size_t alignment, bool own_pages);
extern void ceph_msg_data_add_pagelist(struct ceph_msg *msg,
struct ceph_pagelist *pagelist);
#ifdef CONFIG_BLOCK
-extern void ceph_msg_data_add_bio(struct ceph_msg *msg, struct bio *bio,
- size_t length);
+void ceph_msg_data_add_bio(struct ceph_msg *msg, struct ceph_bio_iter *bio_pos,
+ u32 length);
#endif /* CONFIG_BLOCK */
+void ceph_msg_data_add_bvecs(struct ceph_msg *msg,
+ struct ceph_bvec_iter *bvec_pos);
+struct ceph_msg *ceph_msg_new2(int type, int front_len, int max_data_items,
+ gfp_t flags, bool can_fail);
extern struct ceph_msg *ceph_msg_new(int type, int front_len, gfp_t flags,
bool can_fail);
diff --git a/include/linux/ceph/mon_client.h b/include/linux/ceph/mon_client.h
index d5a3ecea578d..b658961156a0 100644
--- a/include/linux/ceph/mon_client.h
+++ b/include/linux/ceph/mon_client.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _FS_CEPH_MON_CLIENT_H
#define _FS_CEPH_MON_CLIENT_H
@@ -18,7 +19,7 @@ struct ceph_monmap {
struct ceph_fsid fsid;
u32 epoch;
u32 num_mon;
- struct ceph_entity_inst mon_inst[0];
+ struct ceph_entity_inst mon_inst[];
};
struct ceph_mon_client;
@@ -103,12 +104,12 @@ struct ceph_mon_client {
#endif
};
-extern struct ceph_monmap *ceph_monmap_decode(void *p, void *end);
extern int ceph_monmap_contains(struct ceph_monmap *m,
struct ceph_entity_addr *addr);
extern int ceph_monc_init(struct ceph_mon_client *monc, struct ceph_client *cl);
extern void ceph_monc_stop(struct ceph_mon_client *monc);
+extern void ceph_monc_reopen_session(struct ceph_mon_client *monc);
enum {
CEPH_SUB_MONMAP = 0,
@@ -133,15 +134,15 @@ void ceph_monc_renew_subs(struct ceph_mon_client *monc);
extern int ceph_monc_wait_osdmap(struct ceph_mon_client *monc, u32 epoch,
unsigned long timeout);
-extern int ceph_monc_do_statfs(struct ceph_mon_client *monc,
- struct ceph_statfs *buf);
+int ceph_monc_do_statfs(struct ceph_mon_client *monc, u64 data_pool,
+ struct ceph_statfs *buf);
int ceph_monc_get_version(struct ceph_mon_client *monc, const char *what,
u64 *newest);
int ceph_monc_get_version_async(struct ceph_mon_client *monc, const char *what,
ceph_monc_callback_t cb, u64 private_data);
-int ceph_monc_blacklist_add(struct ceph_mon_client *monc,
+int ceph_monc_blocklist_add(struct ceph_mon_client *monc,
struct ceph_entity_addr *client_addr);
extern int ceph_monc_open_session(struct ceph_mon_client *monc);
diff --git a/include/linux/ceph/msgpool.h b/include/linux/ceph/msgpool.h
index ddd0d48d0384..729cdf700eae 100644
--- a/include/linux/ceph/msgpool.h
+++ b/include/linux/ceph/msgpool.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _FS_CEPH_MSGPOOL
#define _FS_CEPH_MSGPOOL
@@ -12,14 +13,15 @@ struct ceph_msgpool {
mempool_t *pool;
int type; /* preallocated message type */
int front_len; /* preallocated payload size */
+ int max_data_items;
};
-extern int ceph_msgpool_init(struct ceph_msgpool *pool, int type,
- int front_len, int size, bool blocking,
- const char *name);
+int ceph_msgpool_init(struct ceph_msgpool *pool, int type,
+ int front_len, int max_data_items, int size,
+ const char *name);
extern void ceph_msgpool_destroy(struct ceph_msgpool *pool);
-extern struct ceph_msg *ceph_msgpool_get(struct ceph_msgpool *,
- int front_len);
+struct ceph_msg *ceph_msgpool_get(struct ceph_msgpool *pool, int front_len,
+ int max_data_items);
extern void ceph_msgpool_put(struct ceph_msgpool *, struct ceph_msg *);
#endif
diff --git a/include/linux/ceph/msgr.h b/include/linux/ceph/msgr.h
index 0fe2656ac415..3989dcb94d3d 100644
--- a/include/linux/ceph/msgr.h
+++ b/include/linux/ceph/msgr.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef CEPH_MSGR_H
#define CEPH_MSGR_H
@@ -8,24 +9,45 @@
#define CEPH_MON_PORT 6789 /* default monitor port */
/*
- * client-side processes will try to bind to ports in this
- * range, simply for the benefit of tools like nmap or wireshark
- * that would like to identify the protocol.
- */
-#define CEPH_PORT_FIRST 6789
-#define CEPH_PORT_START 6800 /* non-monitors start here */
-#define CEPH_PORT_LAST 6900
-
-/*
* tcp connection banner. include a protocol version. and adjust
* whenever the wire protocol changes. try to keep this string length
* constant.
*/
#define CEPH_BANNER "ceph v027"
+#define CEPH_BANNER_LEN 9
#define CEPH_BANNER_MAX_LEN 30
/*
+ * messenger V2 connection banner prefix.
+ * The full banner string should have the form: "ceph v2\n<le16>"
+ * the 2 bytes are the length of the remaining banner.
+ */
+#define CEPH_BANNER_V2 "ceph v2\n"
+#define CEPH_BANNER_V2_LEN 8
+#define CEPH_BANNER_V2_PREFIX_LEN (CEPH_BANNER_V2_LEN + sizeof(__le16))
+
+/*
+ * messenger V2 features
+ */
+#define CEPH_MSGR2_INCARNATION_1 (0ull)
+
+#define DEFINE_MSGR2_FEATURE(bit, incarnation, name) \
+ static const uint64_t __maybe_unused CEPH_MSGR2_FEATURE_##name = (1ULL << bit); \
+ static const uint64_t __maybe_unused CEPH_MSGR2_FEATUREMASK_##name = \
+ (1ULL << bit | CEPH_MSGR2_INCARNATION_##incarnation);
+
+#define HAVE_MSGR2_FEATURE(x, name) \
+ (((x) & (CEPH_MSGR2_FEATUREMASK_##name)) == (CEPH_MSGR2_FEATUREMASK_##name))
+
+DEFINE_MSGR2_FEATURE( 0, 1, REVISION_1) // msgr2.1
+
+#define CEPH_MSGR2_SUPPORTED_FEATURES (CEPH_MSGR2_FEATURE_REVISION_1)
+
+#define CEPH_MSGR2_REQUIRED_FEATURES (CEPH_MSGR2_FEATURE_REVISION_1)
+
+
+/*
* Rollover-safe type and comparator for 32-bit sequence numbers.
* Comparator returns -1, 0, or 1.
*/
@@ -60,11 +82,18 @@ extern const char *ceph_entity_type_name(int type);
* entity_addr -- network address
*/
struct ceph_entity_addr {
- __le32 type;
+ __le32 type; /* CEPH_ENTITY_ADDR_TYPE_* */
__le32 nonce; /* unique id for process (e.g. pid) */
struct sockaddr_storage in_addr;
} __attribute__ ((packed));
+static inline bool ceph_addr_equal_no_type(const struct ceph_entity_addr *lhs,
+ const struct ceph_entity_addr *rhs)
+{
+ return !memcmp(&lhs->in_addr, &rhs->in_addr, sizeof(lhs->in_addr)) &&
+ lhs->nonce == rhs->nonce;
+}
+
struct ceph_entity_inst {
struct ceph_entity_name name;
struct ceph_entity_addr addr;
@@ -90,7 +119,7 @@ struct ceph_entity_inst {
#define CEPH_MSGR_TAG_SEQ 13 /* 64-bit int follows with seen seq number */
#define CEPH_MSGR_TAG_KEEPALIVE2 14 /* keepalive2 byte + ceph_timespec */
#define CEPH_MSGR_TAG_KEEPALIVE2_ACK 15 /* keepalive2 reply */
-
+#define CEPH_MSGR_TAG_CHALLENGE_AUTHORIZER 16 /* cephx v2 doing server challenge */
/*
* connection negotiation
@@ -159,6 +188,24 @@ struct ceph_msg_header {
__le32 crc; /* header crc32c */
} __attribute__ ((packed));
+struct ceph_msg_header2 {
+ __le64 seq; /* message seq# for this session */
+ __le64 tid; /* transaction id */
+ __le16 type; /* message type */
+ __le16 priority; /* priority. higher value == higher priority */
+ __le16 version; /* version of message encoding */
+
+ __le32 data_pre_padding_len;
+ __le16 data_off; /* sender: include full offset;
+ receiver: mask against ~PAGE_MASK */
+
+ __le64 ack_seq;
+ __u8 flags;
+ /* oldest code we think can decode this. unknown if zero. */
+ __le16 compat_version;
+ __le16 reserved;
+} __attribute__ ((packed));
+
#define CEPH_MSG_PRIO_LOW 64
#define CEPH_MSG_PRIO_DEFAULT 127
#define CEPH_MSG_PRIO_HIGH 196
diff --git a/include/linux/ceph/osd_client.h b/include/linux/ceph/osd_client.h
index c6d96a5f46fd..fb6be72104df 100644
--- a/include/linux/ceph/osd_client.h
+++ b/include/linux/ceph/osd_client.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _FS_CEPH_OSD_CLIENT_H
#define _FS_CEPH_OSD_CLIENT_H
@@ -7,6 +8,7 @@
#include <linux/mempool.h>
#include <linux/rbtree.h>
#include <linux/refcount.h>
+#include <linux/ktime.h>
#include <linux/ceph/types.h>
#include <linux/ceph/osdmap.h>
@@ -56,6 +58,7 @@ enum ceph_osd_data_type {
#ifdef CONFIG_BLOCK
CEPH_OSD_DATA_TYPE_BIO,
#endif /* CONFIG_BLOCK */
+ CEPH_OSD_DATA_TYPE_BVECS,
};
struct ceph_osd_data {
@@ -71,10 +74,14 @@ struct ceph_osd_data {
struct ceph_pagelist *pagelist;
#ifdef CONFIG_BLOCK
struct {
- struct bio *bio; /* list of bios */
- size_t bio_length; /* total in list */
+ struct ceph_bio_iter bio_pos;
+ u32 bio_length;
};
#endif /* CONFIG_BLOCK */
+ struct {
+ struct ceph_bvec_iter bvec_pos;
+ u32 num_bvecs;
+ };
};
};
@@ -129,7 +136,15 @@ struct ceph_osd_req_op {
struct {
u64 expected_object_size;
u64 expected_write_size;
+ u32 flags; /* CEPH_OSD_OP_ALLOC_HINT_FLAG_* */
} alloc_hint;
+ struct {
+ u64 snapid;
+ u64 src_version;
+ u8 flags;
+ u32 src_fadvise_flags;
+ struct ceph_osd_data osd_data;
+ } copy_from;
};
};
@@ -148,8 +163,10 @@ struct ceph_osd_request_target {
int size;
int min_size;
bool sort_bitwise;
+ bool recovery_deletes;
unsigned int flags; /* CEPH_OSD_FLAG_* */
+ bool used_replica;
bool paused;
u32 epoch;
@@ -163,6 +180,7 @@ struct ceph_osd_request {
u64 r_tid; /* unique for this client */
struct rb_node r_node;
struct rb_node r_mc_node; /* map check */
+ struct work_struct r_complete_work;
struct ceph_osd *r_osd;
struct ceph_osd_request_target r_t;
@@ -183,22 +201,23 @@ struct ceph_osd_request {
bool r_mempool;
struct completion r_completion; /* private to osd_client.c */
ceph_osdc_callback_t r_callback;
- struct list_head r_unsafe_item;
struct inode *r_inode; /* for use by callbacks */
+ struct list_head r_private_item; /* ditto */
void *r_priv; /* ditto */
/* set by submitter */
u64 r_snapid; /* for reads, CEPH_NOSNAP o/w */
struct ceph_snap_context *r_snapc; /* for writes */
- struct timespec r_mtime; /* ditto */
+ struct timespec64 r_mtime; /* ditto */
u64 r_data_offset; /* ditto */
bool r_linger; /* don't resend on failure */
- bool r_abort_on_full; /* return ENOSPC when full */
/* internal */
unsigned long r_stamp; /* jiffies, send or check time */
unsigned long r_start_stamp; /* jiffies */
+ ktime_t r_start_latency; /* ktime_t */
+ ktime_t r_end_latency; /* ktime_t */
int r_attempts;
u32 r_map_dne_bound;
@@ -246,7 +265,7 @@ struct ceph_osd_linger_request {
struct ceph_osd_request_target t;
u32 map_dne_bound;
- struct timespec mtime;
+ struct timespec64 mtime;
struct kref kref;
struct mutex lock;
@@ -268,6 +287,9 @@ struct ceph_osd_linger_request {
rados_watcherrcb_t errcb;
void *data;
+ struct ceph_pagelist *request_pl;
+ struct page **notify_id_pages;
+
struct page ***preply_pages;
size_t *preply_len;
};
@@ -340,6 +362,7 @@ struct ceph_osd_client {
struct rb_root linger_map_checks;
atomic_t num_requests;
atomic_t num_homeless;
+ int abort_err;
struct delayed_work timeout_work;
struct delayed_work osds_timeout_work;
#ifdef CONFIG_DEBUG_FS
@@ -352,6 +375,7 @@ struct ceph_osd_client {
struct ceph_msgpool msgpool_op_reply;
struct workqueue_struct *notify_wq;
+ struct workqueue_struct *completion_wq;
};
static inline bool ceph_osdmap_flag(struct ceph_osd_client *osdc, int flag)
@@ -365,14 +389,25 @@ extern void ceph_osdc_cleanup(void);
extern int ceph_osdc_init(struct ceph_osd_client *osdc,
struct ceph_client *client);
extern void ceph_osdc_stop(struct ceph_osd_client *osdc);
+extern void ceph_osdc_reopen_osds(struct ceph_osd_client *osdc);
extern void ceph_osdc_handle_reply(struct ceph_osd_client *osdc,
struct ceph_msg *msg);
extern void ceph_osdc_handle_map(struct ceph_osd_client *osdc,
struct ceph_msg *msg);
void ceph_osdc_update_epoch_barrier(struct ceph_osd_client *osdc, u32 eb);
-
-extern void osd_req_op_init(struct ceph_osd_request *osd_req,
+void ceph_osdc_abort_requests(struct ceph_osd_client *osdc, int err);
+void ceph_osdc_clear_abort_err(struct ceph_osd_client *osdc);
+
+#define osd_req_op_data(oreq, whch, typ, fld) \
+({ \
+ struct ceph_osd_request *__oreq = (oreq); \
+ unsigned int __whch = (whch); \
+ BUG_ON(__whch >= __oreq->r_num_ops); \
+ &__oreq->r_ops[__whch].typ.fld; \
+})
+
+struct ceph_osd_req_op *osd_req_op_init(struct ceph_osd_request *osd_req,
unsigned int which, u16 opcode, u32 flags);
extern void osd_req_op_raw_data_in_pages(struct ceph_osd_request *,
@@ -403,10 +438,18 @@ extern void osd_req_op_extent_osd_data_pagelist(struct ceph_osd_request *,
unsigned int which,
struct ceph_pagelist *pagelist);
#ifdef CONFIG_BLOCK
-extern void osd_req_op_extent_osd_data_bio(struct ceph_osd_request *,
- unsigned int which,
- struct bio *bio, size_t bio_length);
+void osd_req_op_extent_osd_data_bio(struct ceph_osd_request *osd_req,
+ unsigned int which,
+ struct ceph_bio_iter *bio_pos,
+ u32 bio_length);
#endif /* CONFIG_BLOCK */
+void osd_req_op_extent_osd_data_bvecs(struct ceph_osd_request *osd_req,
+ unsigned int which,
+ struct bio_vec *bvecs, u32 num_bvecs,
+ u32 bytes);
+void osd_req_op_extent_osd_data_bvec_pos(struct ceph_osd_request *osd_req,
+ unsigned int which,
+ struct ceph_bvec_iter *bvec_pos);
extern void osd_req_op_cls_request_data_pagelist(struct ceph_osd_request *,
unsigned int which,
@@ -416,21 +459,33 @@ extern void osd_req_op_cls_request_data_pages(struct ceph_osd_request *,
struct page **pages, u64 length,
u32 alignment, bool pages_from_pool,
bool own_pages);
+void osd_req_op_cls_request_data_bvecs(struct ceph_osd_request *osd_req,
+ unsigned int which,
+ struct bio_vec *bvecs, u32 num_bvecs,
+ u32 bytes);
extern void osd_req_op_cls_response_data_pages(struct ceph_osd_request *,
unsigned int which,
struct page **pages, u64 length,
u32 alignment, bool pages_from_pool,
bool own_pages);
-extern void osd_req_op_cls_init(struct ceph_osd_request *osd_req,
- unsigned int which, u16 opcode,
- const char *class, const char *method);
+int osd_req_op_cls_init(struct ceph_osd_request *osd_req, unsigned int which,
+ const char *class, const char *method);
extern int osd_req_op_xattr_init(struct ceph_osd_request *osd_req, unsigned int which,
u16 opcode, const char *name, const void *value,
size_t size, u8 cmp_op, u8 cmp_mode);
extern void osd_req_op_alloc_hint_init(struct ceph_osd_request *osd_req,
unsigned int which,
u64 expected_object_size,
- u64 expected_write_size);
+ u64 expected_write_size,
+ u32 flags);
+extern int osd_req_op_copy_from_init(struct ceph_osd_request *req,
+ u64 src_snapid, u64 src_version,
+ struct ceph_object_id *src_oid,
+ struct ceph_object_locator *src_oloc,
+ u32 src_fadvise_flags,
+ u32 dst_fadvise_flags,
+ u32 truncate_seq, u64 truncate_size,
+ u8 copy_from_flags);
extern struct ceph_osd_request *ceph_osdc_alloc_request(struct ceph_osd_client *osdc,
struct ceph_snap_context *snapc,
@@ -452,9 +507,8 @@ extern struct ceph_osd_request *ceph_osdc_new_request(struct ceph_osd_client *,
extern void ceph_osdc_get_request(struct ceph_osd_request *req);
extern void ceph_osdc_put_request(struct ceph_osd_request *req);
-extern int ceph_osdc_start_request(struct ceph_osd_client *osdc,
- struct ceph_osd_request *req,
- bool nofail);
+void ceph_osdc_start_request(struct ceph_osd_client *osdc,
+ struct ceph_osd_request *req);
extern void ceph_osdc_cancel_request(struct ceph_osd_request *req);
extern int ceph_osdc_wait_request(struct ceph_osd_client *osdc,
struct ceph_osd_request *req);
@@ -469,24 +523,7 @@ int ceph_osdc_call(struct ceph_osd_client *osdc,
const char *class, const char *method,
unsigned int flags,
struct page *req_page, size_t req_len,
- struct page *resp_page, size_t *resp_len);
-
-extern int ceph_osdc_readpages(struct ceph_osd_client *osdc,
- struct ceph_vino vino,
- struct ceph_file_layout *layout,
- u64 off, u64 *plen,
- u32 truncate_seq, u64 truncate_size,
- struct page **pages, int nr_pages,
- int page_align);
-
-extern int ceph_osdc_writepages(struct ceph_osd_client *osdc,
- struct ceph_vino vino,
- struct ceph_file_layout *layout,
- struct ceph_snap_context *sc,
- u64 off, u64 len,
- u32 truncate_seq, u64 truncate_size,
- struct timespec *mtime,
- struct page **pages, int nr_pages);
+ struct page **resp_pages, size_t *resp_len);
/* watch/notify */
struct ceph_osd_linger_request *
@@ -505,12 +542,12 @@ int ceph_osdc_notify_ack(struct ceph_osd_client *osdc,
u64 notify_id,
u64 cookie,
void *payload,
- size_t payload_len);
+ u32 payload_len);
int ceph_osdc_notify(struct ceph_osd_client *osdc,
struct ceph_object_id *oid,
struct ceph_object_locator *oloc,
void *payload,
- size_t payload_len,
+ u32 payload_len,
u32 timeout,
struct page ***preply_pages,
size_t *preply_len);
diff --git a/include/linux/ceph/osdmap.h b/include/linux/ceph/osdmap.h
index a0996cb9faed..5553019c3f07 100644
--- a/include/linux/ceph/osdmap.h
+++ b/include/linux/ceph/osdmap.h
@@ -1,10 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _FS_CEPH_OSDMAP_H
#define _FS_CEPH_OSDMAP_H
#include <linux/rbtree.h>
#include <linux/ceph/types.h>
#include <linux/ceph/decode.h>
-#include <linux/ceph/ceph_fs.h>
#include <linux/crush/crush.h>
/*
@@ -37,6 +37,9 @@ int ceph_spg_compare(const struct ceph_spg *lhs, const struct ceph_spg *rhs);
#define CEPH_POOL_FLAG_HASHPSPOOL (1ULL << 0) /* hash pg seed and pool id
together */
#define CEPH_POOL_FLAG_FULL (1ULL << 1) /* pool is full */
+#define CEPH_POOL_FLAG_FULL_QUOTA (1ULL << 10) /* pool ran out of quota,
+ will set FULL too */
+#define CEPH_POOL_FLAG_NEARFULL (1ULL << 11) /* pool is nearfull */
struct ceph_pg_pool_info {
struct rb_node node;
@@ -110,17 +113,16 @@ struct ceph_object_id {
int name_len;
};
+#define __CEPH_OID_INITIALIZER(oid) { .name = (oid).inline_name }
+
+#define CEPH_DEFINE_OID_ONSTACK(oid) \
+ struct ceph_object_id oid = __CEPH_OID_INITIALIZER(oid)
+
static inline void ceph_oid_init(struct ceph_object_id *oid)
{
- oid->name = oid->inline_name;
- oid->name_len = 0;
+ *oid = (struct ceph_object_id) __CEPH_OID_INITIALIZER(*oid);
}
-#define CEPH_OID_INIT_ONSTACK(oid) \
- ({ ceph_oid_init(&oid); oid; })
-#define CEPH_DEFINE_OID_ONSTACK(oid) \
- struct ceph_object_id oid = CEPH_OID_INIT_ONSTACK(oid)
-
static inline bool ceph_oid_empty(const struct ceph_object_id *oid)
{
return oid->name == oid->inline_name && !oid->name_len;
@@ -135,6 +137,17 @@ int ceph_oid_aprintf(struct ceph_object_id *oid, gfp_t gfp,
const char *fmt, ...);
void ceph_oid_destroy(struct ceph_object_id *oid);
+struct workspace_manager {
+ struct list_head idle_ws;
+ spinlock_t ws_lock;
+ /* Number of free workspaces */
+ int free_ws;
+ /* Total number of allocated workspaces */
+ atomic_t total_ws;
+ /* Waiters for a free workspace */
+ wait_queue_head_t ws_wait;
+};
+
struct ceph_pg_mapping {
struct rb_node node;
struct ceph_pg pgid;
@@ -182,8 +195,7 @@ struct ceph_osdmap {
* the list of osds that store+replicate them. */
struct crush_map *crush;
- struct mutex crush_workspace_mutex;
- void *crush_workspace;
+ struct workspace_manager crush_wsm;
};
static inline bool ceph_osd_exists(struct ceph_osdmap *map, int osd)
@@ -239,8 +251,8 @@ static inline int ceph_decode_pgid(void **p, void *end, struct ceph_pg *pgid)
}
struct ceph_osdmap *ceph_osdmap_alloc(void);
-extern struct ceph_osdmap *ceph_osdmap_decode(void **p, void *end);
-struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end,
+struct ceph_osdmap *ceph_osdmap_decode(void **p, void *end, bool msgr2);
+struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end, bool msgr2,
struct ceph_osdmap *map);
extern void ceph_osdmap_destroy(struct ceph_osdmap *map);
@@ -272,20 +284,17 @@ bool ceph_is_new_interval(const struct ceph_osds *old_acting,
u32 new_pg_num,
bool old_sort_bitwise,
bool new_sort_bitwise,
+ bool old_recovery_deletes,
+ bool new_recovery_deletes,
const struct ceph_pg *pgid);
bool ceph_osds_changed(const struct ceph_osds *old_acting,
const struct ceph_osds *new_acting,
bool any_change);
-/* calculate mapping of a file extent to an object */
-extern int ceph_calc_file_object_mapping(struct ceph_file_layout *layout,
- u64 off, u64 len,
- u64 *bno, u64 *oxoff, u64 *oxlen);
-
-int __ceph_object_locator_to_pg(struct ceph_pg_pool_info *pi,
- const struct ceph_object_id *oid,
- const struct ceph_object_locator *oloc,
- struct ceph_pg *raw_pgid);
+void __ceph_object_locator_to_pg(struct ceph_pg_pool_info *pi,
+ const struct ceph_object_id *oid,
+ const struct ceph_object_locator *oloc,
+ struct ceph_pg *raw_pgid);
int ceph_object_locator_to_pg(struct ceph_osdmap *osdmap,
const struct ceph_object_id *oid,
const struct ceph_object_locator *oloc,
@@ -303,10 +312,28 @@ bool ceph_pg_to_primary_shard(struct ceph_osdmap *osdmap,
int ceph_pg_to_acting_primary(struct ceph_osdmap *osdmap,
const struct ceph_pg *raw_pgid);
+struct crush_loc {
+ char *cl_type_name;
+ char *cl_name;
+};
+
+struct crush_loc_node {
+ struct rb_node cl_node;
+ struct crush_loc cl_loc; /* pointers into cl_data */
+ char cl_data[];
+};
+
+int ceph_parse_crush_location(char *crush_location, struct rb_root *locs);
+int ceph_compare_crush_locs(struct rb_root *locs1, struct rb_root *locs2);
+void ceph_clear_crush_locs(struct rb_root *locs);
+
+int ceph_get_crush_locality(struct ceph_osdmap *osdmap, int id,
+ struct rb_root *locs);
+
extern struct ceph_pg_pool_info *ceph_pg_pool_by_id(struct ceph_osdmap *map,
u64 id);
-
extern const char *ceph_pg_pool_name_by_id(struct ceph_osdmap *map, u64 id);
extern int ceph_pg_poolid_by_name(struct ceph_osdmap *map, const char *name);
+u64 ceph_pg_pool_flags(struct ceph_osdmap *map, u64 id);
#endif
diff --git a/include/linux/ceph/pagelist.h b/include/linux/ceph/pagelist.h
index 75a7db21457d..5dead8486fd8 100644
--- a/include/linux/ceph/pagelist.h
+++ b/include/linux/ceph/pagelist.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __FS_CEPH_PAGELIST_H
#define __FS_CEPH_PAGELIST_H
@@ -22,16 +23,7 @@ struct ceph_pagelist_cursor {
size_t room; /* room remaining to reset to */
};
-static inline void ceph_pagelist_init(struct ceph_pagelist *pl)
-{
- INIT_LIST_HEAD(&pl->head);
- pl->mapped_tail = NULL;
- pl->length = 0;
- pl->room = 0;
- INIT_LIST_HEAD(&pl->free_list);
- pl->num_pages_free = 0;
- refcount_set(&pl->refcnt, 1);
-}
+struct ceph_pagelist *ceph_pagelist_alloc(gfp_t gfp_flags);
extern void ceph_pagelist_release(struct ceph_pagelist *pl);
@@ -67,7 +59,7 @@ static inline int ceph_pagelist_encode_8(struct ceph_pagelist *pl, u8 v)
return ceph_pagelist_append(pl, &v, 1);
}
static inline int ceph_pagelist_encode_string(struct ceph_pagelist *pl,
- char *s, size_t len)
+ char *s, u32 len)
{
int ret = ceph_pagelist_encode_32(pl, len);
if (ret)
diff --git a/include/linux/ceph/rados.h b/include/linux/ceph/rados.h
index 385db08bb8b2..43a7a1573b51 100644
--- a/include/linux/ceph/rados.h
+++ b/include/linux/ceph/rados.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef CEPH_RADOS_H
#define CEPH_RADOS_H
@@ -142,8 +143,10 @@ extern const char *ceph_osd_state_name(int s);
/*
* osd map flag bits
*/
-#define CEPH_OSDMAP_NEARFULL (1<<0) /* sync writes (near ENOSPC) */
-#define CEPH_OSDMAP_FULL (1<<1) /* no data writes (ENOSPC) */
+#define CEPH_OSDMAP_NEARFULL (1<<0) /* sync writes (near ENOSPC),
+ not set since ~luminous */
+#define CEPH_OSDMAP_FULL (1<<1) /* no data writes (ENOSPC),
+ not set since ~luminous */
#define CEPH_OSDMAP_PAUSERD (1<<2) /* pause all reads */
#define CEPH_OSDMAP_PAUSEWR (1<<3) /* pause all writes */
#define CEPH_OSDMAP_PAUSEREC (1<<4) /* pause recovery */
@@ -158,6 +161,10 @@ extern const char *ceph_osd_state_name(int s);
#define CEPH_OSDMAP_NOTIERAGENT (1<<13) /* disable tiering agent */
#define CEPH_OSDMAP_NOREBALANCE (1<<14) /* block osd backfill unless pg is degraded */
#define CEPH_OSDMAP_SORTBITWISE (1<<15) /* use bitwise hobject_t sort */
+#define CEPH_OSDMAP_REQUIRE_JEWEL (1<<16) /* require jewel for booting osds */
+#define CEPH_OSDMAP_REQUIRE_KRAKEN (1<<17) /* require kraken for booting osds */
+#define CEPH_OSDMAP_REQUIRE_LUMINOUS (1<<18) /* require l for booting osds */
+#define CEPH_OSDMAP_RECOVERY_DELETES (1<<19) /* deletes performed during recovery instead of peering */
/*
* The error code to return when an OSD can't handle a write
@@ -226,7 +233,6 @@ extern const char *ceph_osd_state_name(int s);
\
/* fancy write */ \
f(APPEND, __CEPH_OSD_OP(WR, DATA, 6), "append") \
- f(STARTSYNC, __CEPH_OSD_OP(WR, DATA, 7), "startsync") \
f(SETTRUNC, __CEPH_OSD_OP(WR, DATA, 8), "settrunc") \
f(TRIMTRUNC, __CEPH_OSD_OP(WR, DATA, 9), "trimtrunc") \
\
@@ -252,6 +258,7 @@ extern const char *ceph_osd_state_name(int s);
\
/* tiering */ \
f(COPY_FROM, __CEPH_OSD_OP(WR, DATA, 26), "copy-from") \
+ f(COPY_FROM2, __CEPH_OSD_OP(WR, DATA, 45), "copy-from2") \
f(COPY_GET_CLASSIC, __CEPH_OSD_OP(RD, DATA, 27), "copy-get-classic") \
f(UNDIRTY, __CEPH_OSD_OP(WR, DATA, 28), "undirty") \
f(ISDIRTY, __CEPH_OSD_OP(RD, DATA, 29), "isdirty") \
@@ -406,10 +413,18 @@ enum {
enum {
CEPH_OSD_OP_FLAG_EXCL = 1, /* EXCL object create */
CEPH_OSD_OP_FLAG_FAILOK = 2, /* continue despite failure */
+ CEPH_OSD_OP_FLAG_FADVISE_RANDOM = 0x4, /* the op is random */
+ CEPH_OSD_OP_FLAG_FADVISE_SEQUENTIAL = 0x8, /* the op is sequential */
+ CEPH_OSD_OP_FLAG_FADVISE_WILLNEED = 0x10,/* data will be accessed in
+ the near future */
+ CEPH_OSD_OP_FLAG_FADVISE_DONTNEED = 0x20,/* data will not be accessed
+ in the near future */
+ CEPH_OSD_OP_FLAG_FADVISE_NOCACHE = 0x40,/* data will be accessed only
+ once by this client */
};
#define EOLDSNAPC ERESTART /* ORDERSNAP flag set; writer has old snapc*/
-#define EBLACKLISTED ESHUTDOWN /* blacklisted */
+#define EBLOCKLISTED ESHUTDOWN /* blocklisted */
/* xattr comparison */
enum {
@@ -428,6 +443,16 @@ enum {
};
enum {
+ CEPH_OSD_COPY_FROM_FLAG_FLUSH = 1, /* part of a flush operation */
+ CEPH_OSD_COPY_FROM_FLAG_IGNORE_OVERLAY = 2, /* ignore pool overlay */
+ CEPH_OSD_COPY_FROM_FLAG_IGNORE_CACHE = 4, /* ignore osd cache logic */
+ CEPH_OSD_COPY_FROM_FLAG_MAP_SNAP_CLONE = 8, /* map snap direct to
+ * cloneid */
+ CEPH_OSD_COPY_FROM_FLAG_RWORDERED = 16, /* order with write */
+ CEPH_OSD_COPY_FROM_FLAG_TRUNCATE_SEQ = 32, /* send truncate_{seq,size} */
+};
+
+enum {
CEPH_OSD_WATCH_OP_UNWATCH = 0,
CEPH_OSD_WATCH_OP_LEGACY_WATCH = 1,
/* note: use only ODD ids to prevent pre-giant code from
@@ -440,6 +465,19 @@ enum {
const char *ceph_osd_watch_op_name(int o);
enum {
+ CEPH_OSD_ALLOC_HINT_FLAG_SEQUENTIAL_WRITE = 1,
+ CEPH_OSD_ALLOC_HINT_FLAG_RANDOM_WRITE = 2,
+ CEPH_OSD_ALLOC_HINT_FLAG_SEQUENTIAL_READ = 4,
+ CEPH_OSD_ALLOC_HINT_FLAG_RANDOM_READ = 8,
+ CEPH_OSD_ALLOC_HINT_FLAG_APPEND_ONLY = 16,
+ CEPH_OSD_ALLOC_HINT_FLAG_IMMUTABLE = 32,
+ CEPH_OSD_ALLOC_HINT_FLAG_SHORTLIVED = 64,
+ CEPH_OSD_ALLOC_HINT_FLAG_LONGLIVED = 128,
+ CEPH_OSD_ALLOC_HINT_FLAG_COMPRESSIBLE = 256,
+ CEPH_OSD_ALLOC_HINT_FLAG_INCOMPRESSIBLE = 512,
+};
+
+enum {
CEPH_OSD_BACKOFF_OP_BLOCK = 1,
CEPH_OSD_BACKOFF_OP_ACK_BLOCK = 2,
CEPH_OSD_BACKOFF_OP_UNBLOCK = 3,
@@ -492,7 +530,19 @@ struct ceph_osd_op {
struct {
__le64 expected_object_size;
__le64 expected_write_size;
+ __le32 flags; /* CEPH_OSD_OP_ALLOC_HINT_FLAG_* */
} __attribute__ ((packed)) alloc_hint;
+ struct {
+ __le64 snapid;
+ __le64 src_version;
+ __u8 flags; /* CEPH_OSD_COPY_FROM_FLAG_* */
+ /*
+ * CEPH_OSD_OP_FLAG_FADVISE_*: fadvise flags
+ * for src object, flags for dest object are in
+ * ceph_osd_op::flags.
+ */
+ __le32 src_fadvise_flags;
+ } __attribute__ ((packed)) copy_from;
};
__le32 payload_len;
} __attribute__ ((packed));
diff --git a/include/linux/ceph/string_table.h b/include/linux/ceph/string_table.h
index 1b02c96daf75..a4a9962d1e14 100644
--- a/include/linux/ceph/string_table.h
+++ b/include/linux/ceph/string_table.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _FS_CEPH_STRING_TABLE_H
#define _FS_CEPH_STRING_TABLE_H
diff --git a/include/linux/ceph/striper.h b/include/linux/ceph/striper.h
new file mode 100644
index 000000000000..3486636c0e6e
--- /dev/null
+++ b/include/linux/ceph/striper.h
@@ -0,0 +1,71 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_CEPH_STRIPER_H
+#define _LINUX_CEPH_STRIPER_H
+
+#include <linux/list.h>
+#include <linux/types.h>
+
+struct ceph_file_layout;
+
+void ceph_calc_file_object_mapping(struct ceph_file_layout *l,
+ u64 off, u64 len,
+ u64 *objno, u64 *objoff, u32 *xlen);
+
+struct ceph_object_extent {
+ struct list_head oe_item;
+ u64 oe_objno;
+ u64 oe_off;
+ u64 oe_len;
+};
+
+static inline void ceph_object_extent_init(struct ceph_object_extent *ex)
+{
+ INIT_LIST_HEAD(&ex->oe_item);
+}
+
+/*
+ * Called for each mapped stripe unit.
+ *
+ * @bytes: number of bytes mapped, i.e. the minimum of the full length
+ * requested (file extent length) or the remainder of the stripe
+ * unit within an object
+ */
+typedef void (*ceph_object_extent_fn_t)(struct ceph_object_extent *ex,
+ u32 bytes, void *arg);
+
+int ceph_file_to_extents(struct ceph_file_layout *l, u64 off, u64 len,
+ struct list_head *object_extents,
+ struct ceph_object_extent *alloc_fn(void *arg),
+ void *alloc_arg,
+ ceph_object_extent_fn_t action_fn,
+ void *action_arg);
+int ceph_iterate_extents(struct ceph_file_layout *l, u64 off, u64 len,
+ struct list_head *object_extents,
+ ceph_object_extent_fn_t action_fn,
+ void *action_arg);
+
+struct ceph_file_extent {
+ u64 fe_off;
+ u64 fe_len;
+};
+
+static inline u64 ceph_file_extents_bytes(struct ceph_file_extent *file_extents,
+ u32 num_file_extents)
+{
+ u64 bytes = 0;
+ u32 i;
+
+ for (i = 0; i < num_file_extents; i++)
+ bytes += file_extents[i].fe_len;
+
+ return bytes;
+}
+
+int ceph_extent_to_file(struct ceph_file_layout *l,
+ u64 objno, u64 objoff, u64 objlen,
+ struct ceph_file_extent **file_extents,
+ u32 *num_file_extents);
+
+u64 ceph_get_num_objects(struct ceph_file_layout *l, u64 size);
+
+#endif
diff --git a/include/linux/ceph/types.h b/include/linux/ceph/types.h
index d3ff1cf2d27e..bd3d532902d7 100644
--- a/include/linux/ceph/types.h
+++ b/include/linux/ceph/types.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _FS_CEPH_TYPES_H
#define _FS_CEPH_TYPES_H
@@ -23,6 +24,7 @@ struct ceph_vino {
/* context for the caps reservation mechanism */
struct ceph_cap_reservation {
int count;
+ int used;
};
diff --git a/include/linux/cfag12864b.h b/include/linux/cfag12864b.h
index b454dfce60d9..6617d9c68d86 100644
--- a/include/linux/cfag12864b.h
+++ b/include/linux/cfag12864b.h
@@ -1,25 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Filename: cfag12864b.h
* Version: 0.1.0
* Description: cfag12864b LCD driver header
- * License: GPLv2
*
- * Author: Copyright (C) Miguel Ojeda Sandonis
+ * Author: Copyright (C) Miguel Ojeda <ojeda@kernel.org>
* Date: 2006-10-12
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
*/
#ifndef _CFAG12864B_H_
diff --git a/include/linux/cfi.h b/include/linux/cfi.h
new file mode 100644
index 000000000000..5e134f4ce8b7
--- /dev/null
+++ b/include/linux/cfi.h
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Clang Control Flow Integrity (CFI) support.
+ *
+ * Copyright (C) 2022 Google LLC
+ */
+#ifndef _LINUX_CFI_H
+#define _LINUX_CFI_H
+
+#include <linux/bug.h>
+#include <linux/module.h>
+
+#ifdef CONFIG_CFI_CLANG
+enum bug_trap_type report_cfi_failure(struct pt_regs *regs, unsigned long addr,
+ unsigned long *target, u32 type);
+
+static inline enum bug_trap_type report_cfi_failure_noaddr(struct pt_regs *regs,
+ unsigned long addr)
+{
+ return report_cfi_failure(regs, addr, NULL, 0);
+}
+
+#ifdef CONFIG_ARCH_USES_CFI_TRAPS
+bool is_cfi_trap(unsigned long addr);
+#endif
+#endif /* CONFIG_CFI_CLANG */
+
+#ifdef CONFIG_MODULES
+#ifdef CONFIG_ARCH_USES_CFI_TRAPS
+void module_cfi_finalize(const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs,
+ struct module *mod);
+#else
+static inline void module_cfi_finalize(const Elf_Ehdr *hdr,
+ const Elf_Shdr *sechdrs,
+ struct module *mod) {}
+#endif /* CONFIG_ARCH_USES_CFI_TRAPS */
+#endif /* CONFIG_MODULES */
+
+#endif /* _LINUX_CFI_H */
diff --git a/include/linux/cfi_types.h b/include/linux/cfi_types.h
new file mode 100644
index 000000000000..6b8713675765
--- /dev/null
+++ b/include/linux/cfi_types.h
@@ -0,0 +1,45 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Clang Control Flow Integrity (CFI) type definitions.
+ */
+#ifndef _LINUX_CFI_TYPES_H
+#define _LINUX_CFI_TYPES_H
+
+#ifdef __ASSEMBLY__
+#include <linux/linkage.h>
+
+#ifdef CONFIG_CFI_CLANG
+/*
+ * Use the __kcfi_typeid_<function> type identifier symbol to
+ * annotate indirectly called assembly functions. The compiler emits
+ * these symbols for all address-taken function declarations in C
+ * code.
+ */
+#ifndef __CFI_TYPE
+#define __CFI_TYPE(name) \
+ .4byte __kcfi_typeid_##name
+#endif
+
+#define SYM_TYPED_ENTRY(name, linkage, align...) \
+ linkage(name) ASM_NL \
+ align ASM_NL \
+ __CFI_TYPE(name) ASM_NL \
+ name:
+
+#define SYM_TYPED_START(name, linkage, align...) \
+ SYM_TYPED_ENTRY(name, linkage, align)
+
+#else /* CONFIG_CFI_CLANG */
+
+#define SYM_TYPED_START(name, linkage, align...) \
+ SYM_START(name, linkage, align)
+
+#endif /* CONFIG_CFI_CLANG */
+
+#ifndef SYM_TYPED_FUNC_START
+#define SYM_TYPED_FUNC_START(name) \
+ SYM_TYPED_START(name, SYM_L_GLOBAL, SYM_A_ALIGN)
+#endif
+
+#endif /* __ASSEMBLY__ */
+#endif /* _LINUX_CFI_TYPES_H */
diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h
index 09f4c7df1478..8a0d5466c7be 100644
--- a/include/linux/cgroup-defs.h
+++ b/include/linux/cgroup-defs.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* linux/cgroup-defs.h - basic definitions for cgroup
*
@@ -16,8 +17,10 @@
#include <linux/refcount.h>
#include <linux/percpu-refcount.h>
#include <linux/percpu-rwsem.h>
+#include <linux/u64_stats_sync.h>
#include <linux/workqueue.h>
-#include <linux/bpf-cgroup.h>
+#include <linux/bpf-cgroup-defs.h>
+#include <linux/psi_types.h>
#ifdef CONFIG_CGROUPS
@@ -29,6 +32,7 @@ struct kernfs_node;
struct kernfs_ops;
struct kernfs_open_file;
struct seq_file;
+struct poll_table_struct;
#define MAX_CGROUP_TYPE_NAMELEN 32
#define MAX_CGROUP_ROOT_NAMELEN 64
@@ -61,6 +65,15 @@ enum {
* specified at mount time and thus is implemented here.
*/
CGRP_CPUSET_CLONE_CHILDREN,
+
+ /* Control group has to be frozen. */
+ CGRP_FREEZE,
+
+ /* Cgroup is frozen. */
+ CGRP_FROZEN,
+
+ /* Control group has to be killed. */
+ CGRP_KILL,
};
/* cgroup_root->flags */
@@ -74,6 +87,34 @@ enum {
* aren't writeable from inside the namespace.
*/
CGRP_ROOT_NS_DELEGATE = (1 << 3),
+
+ /*
+ * Reduce latencies on dynamic cgroup modifications such as task
+ * migrations and controller on/offs by disabling percpu operation on
+ * cgroup_threadgroup_rwsem. This makes hot path operations such as
+ * forks and exits into the slow path and more expensive.
+ *
+ * The static usage pattern of creating a cgroup, enabling controllers,
+ * and then seeding it with CLONE_INTO_CGROUP doesn't require write
+ * locking cgroup_threadgroup_rwsem and thus doesn't benefit from
+ * favordynmod.
+ */
+ CGRP_ROOT_FAVOR_DYNMODS = (1 << 4),
+
+ /*
+ * Enable cpuset controller in v1 cgroup to use v2 behavior.
+ */
+ CGRP_ROOT_CPUSET_V2_MODE = (1 << 16),
+
+ /*
+ * Enable legacy local memory.events.
+ */
+ CGRP_ROOT_MEMORY_LOCAL_EVENTS = (1 << 17),
+
+ /*
+ * Enable recursive subtree protection
+ */
+ CGRP_ROOT_MEMORY_RECURSIVE_PROT = (1 << 18),
};
/* cftype->flags */
@@ -84,10 +125,12 @@ enum {
CFTYPE_NO_PREFIX = (1 << 3), /* (DON'T USE FOR NEW FILES) no subsys prefix */
CFTYPE_WORLD_WRITABLE = (1 << 4), /* (DON'T USE FOR NEW FILES) S_IWUGO */
+ CFTYPE_DEBUG = (1 << 5), /* create when cgroup_debug */
/* internal flags, do not use outside cgroup core proper */
__CFTYPE_ONLY_ON_DFL = (1 << 16), /* only on default hierarchy */
__CFTYPE_NOT_ON_DFL = (1 << 17), /* not on default hierarchy */
+ __CFTYPE_ADDED = (1 << 18),
};
/*
@@ -98,6 +141,8 @@ enum {
struct cgroup_file {
/* do not access any fields from outside cgroup core */
struct kernfs_node *kn;
+ unsigned long notified_at;
+ struct timer_list notify_timer;
};
/*
@@ -121,6 +166,9 @@ struct cgroup_subsys_state {
struct list_head sibling;
struct list_head children;
+ /* flush target list anchored at cgrp->rstat_css_list */
+ struct list_head rstat_css_node;
+
/*
* PI: Subsys-unique ID. 0 is unused and root is always 1. The
* matching css can be looked up using css_from_id().
@@ -144,8 +192,8 @@ struct cgroup_subsys_state {
atomic_t online_cnt;
/* percpu_ref killing and RCU release */
- struct rcu_head rcu_head;
struct work_struct destroy_work;
+ struct rcu_work destroy_rwork;
/*
* PI: the parent css. Placed here for cache proximity to following
@@ -172,6 +220,14 @@ struct css_set {
/* reference count */
refcount_t refcount;
+ /*
+ * For a domain cgroup, the following points to self. If threaded,
+ * to the matching cset of the nearest domain ancestor. The
+ * dom_cset provides access to the domain cgroup and its csses to
+ * which domain level resource consumptions should be charged.
+ */
+ struct css_set *dom_cset;
+
/* the default cgroup associated with this css_set */
struct cgroup *dfl_cgrp;
@@ -187,12 +243,13 @@ struct css_set {
*/
struct list_head tasks;
struct list_head mg_tasks;
+ struct list_head dying_tasks;
/* all css_task_iters currently walking this cset */
struct list_head task_iters;
/*
- * On the default hierarhcy, ->subsys[ssid] may point to a css
+ * On the default hierarchy, ->subsys[ssid] may point to a css
* attached to an ancestor instead of the cgroup this css_set is
* associated with. The following node is anchored at
* ->subsys[ssid]->cgroup->e_csets[ssid] and provides a way to
@@ -200,6 +257,10 @@ struct css_set {
*/
struct list_head e_cset_node[CGROUP_SUBSYS_COUNT];
+ /* all threaded csets whose ->dom_cset points to this cset */
+ struct list_head threaded_csets;
+ struct list_head threaded_csets_node;
+
/*
* List running through all cgroup groups in the same hash
* slot. Protected by css_set_lock
@@ -216,7 +277,8 @@ struct css_set {
* List of csets participating in the on-going migration either as
* source or destination. Protected by cgroup_mutex.
*/
- struct list_head mg_preload_node;
+ struct list_head mg_src_preload_node;
+ struct list_head mg_dst_preload_node;
struct list_head mg_node;
/*
@@ -237,46 +299,142 @@ struct css_set {
struct rcu_head rcu_head;
};
-struct cgroup {
- /* self css with NULL ->ss, points back to this cgroup */
- struct cgroup_subsys_state self;
+struct cgroup_base_stat {
+ struct task_cputime cputime;
- unsigned long flags; /* "unsigned long" so bitops work */
+#ifdef CONFIG_SCHED_CORE
+ u64 forceidle_sum;
+#endif
+};
+
+/*
+ * rstat - cgroup scalable recursive statistics. Accounting is done
+ * per-cpu in cgroup_rstat_cpu which is then lazily propagated up the
+ * hierarchy on reads.
+ *
+ * When a stat gets updated, the cgroup_rstat_cpu and its ancestors are
+ * linked into the updated tree. On the following read, propagation only
+ * considers and consumes the updated tree. This makes reading O(the
+ * number of descendants which have been active since last read) instead of
+ * O(the total number of descendants).
+ *
+ * This is important because there can be a lot of (draining) cgroups which
+ * aren't active and stat may be read frequently. The combination can
+ * become very expensive. By propagating selectively, increasing reading
+ * frequency decreases the cost of each read.
+ *
+ * This struct hosts both the fields which implement the above -
+ * updated_children and updated_next - and the fields which track basic
+ * resource statistics on top of it - bsync, bstat and last_bstat.
+ */
+struct cgroup_rstat_cpu {
+ /*
+ * ->bsync protects ->bstat. These are the only fields which get
+ * updated in the hot path.
+ */
+ struct u64_stats_sync bsync;
+ struct cgroup_base_stat bstat;
+
+ /*
+ * Snapshots at the last reading. These are used to calculate the
+ * deltas to propagate to the global counters.
+ */
+ struct cgroup_base_stat last_bstat;
/*
- * idr allocated in-hierarchy ID.
+ * Child cgroups with stat updates on this cpu since the last read
+ * are linked on the parent's ->updated_children through
+ * ->updated_next.
*
- * ID 0 is not used, the ID of the root cgroup is always 1, and a
- * new cgroup will be assigned with a smallest available ID.
+ * In addition to being more compact, singly-linked list pointing
+ * to the cgroup makes it unnecessary for each per-cpu struct to
+ * point back to the associated cgroup.
*
- * Allocating/Removing ID must be protected by cgroup_mutex.
+ * Protected by per-cpu cgroup_rstat_cpu_lock.
*/
- int id;
+ struct cgroup *updated_children; /* terminated by self cgroup */
+ struct cgroup *updated_next; /* NULL iff not on the list */
+};
+
+struct cgroup_freezer_state {
+ /* Should the cgroup and its descendants be frozen. */
+ bool freeze;
+
+ /* Should the cgroup actually be frozen? */
+ int e_freeze;
+
+ /* Fields below are protected by css_set_lock */
+
+ /* Number of frozen descendant cgroups */
+ int nr_frozen_descendants;
+
+ /*
+ * Number of tasks, which are counted as frozen:
+ * frozen, SIGSTOPped, and PTRACEd.
+ */
+ int nr_frozen_tasks;
+};
+
+struct cgroup {
+ /* self css with NULL ->ss, points back to this cgroup */
+ struct cgroup_subsys_state self;
+
+ unsigned long flags; /* "unsigned long" so bitops work */
/*
* The depth this cgroup is at. The root is at depth zero and each
* step down the hierarchy increments the level. This along with
- * ancestor_ids[] can determine whether a given cgroup is a
+ * ancestors[] can determine whether a given cgroup is a
* descendant of another without traversing the hierarchy.
*/
int level;
+ /* Maximum allowed descent tree depth */
+ int max_depth;
+
+ /*
+ * Keep track of total numbers of visible and dying descent cgroups.
+ * Dying cgroups are cgroups which were deleted by a user,
+ * but are still existing because someone else is holding a reference.
+ * max_descendants is a maximum allowed number of descent cgroups.
+ *
+ * nr_descendants and nr_dying_descendants are protected
+ * by cgroup_mutex and css_set_lock. It's fine to read them holding
+ * any of cgroup_mutex and css_set_lock; for writing both locks
+ * should be held.
+ */
+ int nr_descendants;
+ int nr_dying_descendants;
+ int max_descendants;
+
/*
* Each non-empty css_set associated with this cgroup contributes
- * one to populated_cnt. All children with non-zero popuplated_cnt
- * of their own contribute one. The count is zero iff there's no
- * task in this cgroup or its subtree.
+ * one to nr_populated_csets. The counter is zero iff this cgroup
+ * doesn't have any tasks.
+ *
+ * All children which have non-zero nr_populated_csets and/or
+ * nr_populated_children of their own contribute one to either
+ * nr_populated_domain_children or nr_populated_threaded_children
+ * depending on their type. Each counter is zero iff all cgroups
+ * of the type in the subtree proper don't have any tasks.
*/
- int populated_cnt;
+ int nr_populated_csets;
+ int nr_populated_domain_children;
+ int nr_populated_threaded_children;
+
+ int nr_threaded_children; /* # of live threaded child cgroups */
struct kernfs_node *kn; /* cgroup kernfs entry */
struct cgroup_file procs_file; /* handle for "cgroup.procs" */
struct cgroup_file events_file; /* handle for "cgroup.events" */
+ /* handles for "{cpu,memory,io,irq}.pressure" */
+ struct cgroup_file psi_files[NR_PSI_RESOURCES];
+
/*
* The bitmask of subsystems enabled on the child cgroups.
* ->subtree_control is the one configured through
- * "cgroup.subtree_control" while ->child_ss_mask is the effective
+ * "cgroup.subtree_control" while ->subtree_ss_mask is the effective
* one which may have more subsystems enabled. Controller knobs
* are made available iff it's enabled in ->subtree_control.
*/
@@ -306,6 +464,25 @@ struct cgroup {
struct list_head e_csets[CGROUP_SUBSYS_COUNT];
/*
+ * If !threaded, self. If threaded, it points to the nearest
+ * domain ancestor. Inside a threaded subtree, cgroups are exempt
+ * from process granularity and no-internal-task constraint.
+ * Domain level resource consumptions which aren't tied to a
+ * specific task are charged to the dom_cgrp.
+ */
+ struct cgroup *dom_cgrp;
+ struct cgroup *old_dom_cgrp; /* used while enabling threaded */
+
+ /* per-cpu recursive resource statistics */
+ struct cgroup_rstat_cpu __percpu *rstat_cpu;
+ struct list_head rstat_css_list;
+
+ /* cgroup basic resource statistics */
+ struct cgroup_base_stat last_bstat;
+ struct cgroup_base_stat bstat;
+ struct prev_cputime prev_cputime; /* for printing out cputime */
+
+ /*
* list of pidlists, up to two for each namespace (one for procs, one
* for tasks); created on demand.
*/
@@ -318,11 +495,24 @@ struct cgroup {
/* used to schedule release agent */
struct work_struct release_agent_work;
+ /* used to track pressure stalls */
+ struct psi_group *psi;
+
/* used to store eBPF programs */
struct cgroup_bpf bpf;
- /* ids of the ancestors at each level including self */
- int ancestor_ids[];
+ /* If there is block congestion on this cgroup. */
+ atomic_t congestion_count;
+
+ /* Used to store internal freezer state */
+ struct cgroup_freezer_state freezer;
+
+#ifdef CONFIG_BPF_SYSCALL
+ struct bpf_local_storage __rcu *bpf_cgrp_storage;
+#endif
+
+ /* All ancestors including self */
+ struct cgroup *ancestors[];
};
/*
@@ -339,11 +529,15 @@ struct cgroup_root {
/* Unique id for this hierarchy. */
int hierarchy_id;
- /* The root cgroup. Root is destroyed on its release. */
+ /*
+ * The root cgroup. The containing cgroup_root will be destroyed on its
+ * release. cgrp->ancestors[0] will be used overflowing into the
+ * following field. cgrp_ancestor_storage must immediately follow.
+ */
struct cgroup cgrp;
- /* for cgrp->ancestor_ids[0] */
- int cgrp_ancestor_id_storage;
+ /* must follow cgrp for cgrp->ancestors[0], see above */
+ struct cgroup *cgrp_ancestor_storage;
/* Number of cgroups in the hierarchy, used only for /proc/cgroups */
atomic_t nr_cgrps;
@@ -354,9 +548,6 @@ struct cgroup_root {
/* Hierarchy-specific flags */
unsigned int flags;
- /* IDs for cgroups in this hierarchy */
- struct idr cgroup_idr;
-
/* The path to use for release notifications. */
char release_agent_path[PATH_MAX];
@@ -448,6 +639,9 @@ struct cftype {
ssize_t (*write)(struct kernfs_open_file *of,
char *buf, size_t nbytes, loff_t off);
+ __poll_t (*poll)(struct kernfs_open_file *of,
+ struct poll_table_struct *pt);
+
#ifdef CONFIG_DEBUG_LOCK_ALLOC
struct lock_class_key lockdep_key;
#endif
@@ -455,7 +649,7 @@ struct cftype {
/*
* Control Group subsystem type.
- * See Documentation/cgroups/cgroups.txt for details
+ * See Documentation/admin-guide/cgroup-v1/cgroups.rst for details
*/
struct cgroup_subsys {
struct cgroup_subsys_state *(*css_alloc)(struct cgroup_subsys_state *parent_css);
@@ -464,16 +658,20 @@ struct cgroup_subsys {
void (*css_released)(struct cgroup_subsys_state *css);
void (*css_free)(struct cgroup_subsys_state *css);
void (*css_reset)(struct cgroup_subsys_state *css);
+ void (*css_rstat_flush)(struct cgroup_subsys_state *css, int cpu);
+ int (*css_extra_stat_show)(struct seq_file *seq,
+ struct cgroup_subsys_state *css);
int (*can_attach)(struct cgroup_taskset *tset);
void (*cancel_attach)(struct cgroup_taskset *tset);
void (*attach)(struct cgroup_taskset *tset);
void (*post_attach)(void);
- int (*can_fork)(struct task_struct *task);
- void (*cancel_fork)(struct task_struct *task);
+ int (*can_fork)(struct task_struct *task,
+ struct css_set *cset);
+ void (*cancel_fork)(struct task_struct *task, struct css_set *cset);
void (*fork)(struct task_struct *task);
void (*exit)(struct task_struct *task);
- void (*free)(struct task_struct *task);
+ void (*release)(struct task_struct *task);
void (*bind)(struct cgroup_subsys_state *root_css);
bool early_init:1;
@@ -492,21 +690,18 @@ struct cgroup_subsys {
bool implicit_on_dfl:1;
/*
- * If %false, this subsystem is properly hierarchical -
- * configuration, resource accounting and restriction on a parent
- * cgroup cover those of its children. If %true, hierarchy support
- * is broken in some ways - some subsystems ignore hierarchy
- * completely while others are only implemented half-way.
+ * If %true, the controller, supports threaded mode on the default
+ * hierarchy. In a threaded subtree, both process granularity and
+ * no-internal-process constraint are ignored and a threaded
+ * controllers should be able to handle that.
*
- * It's now disallowed to create nested cgroups if the subsystem is
- * broken and cgroup core will emit a warning message on such
- * cases. Eventually, all subsystems will be made properly
- * hierarchical and this will go away.
+ * Note that as an implicit controller is automatically enabled on
+ * all cgroups on the default hierarchy, it should also be
+ * threaded. implicit && !threaded is not supported.
*/
- bool broken_hierarchy:1;
- bool warned_broken_hierarchy:1;
+ bool threaded:1;
- /* the following two fields are initialized automtically during boot */
+ /* the following two fields are initialized automatically during boot */
int id;
const char *name;
@@ -586,103 +781,54 @@ static inline void cgroup_threadgroup_change_end(struct task_struct *tsk) {}
* sock_cgroup_data is embedded at sock->sk_cgrp_data and contains
* per-socket cgroup information except for memcg association.
*
- * On legacy hierarchies, net_prio and net_cls controllers directly set
- * attributes on each sock which can then be tested by the network layer.
- * On the default hierarchy, each sock is associated with the cgroup it was
- * created in and the networking layer can match the cgroup directly.
- *
- * To avoid carrying all three cgroup related fields separately in sock,
- * sock_cgroup_data overloads (prioidx, classid) and the cgroup pointer.
- * On boot, sock_cgroup_data records the cgroup that the sock was created
- * in so that cgroup2 matches can be made; however, once either net_prio or
- * net_cls starts being used, the area is overriden to carry prioidx and/or
- * classid. The two modes are distinguished by whether the lowest bit is
- * set. Clear bit indicates cgroup pointer while set bit prioidx and
- * classid.
- *
- * While userland may start using net_prio or net_cls at any time, once
- * either is used, cgroup2 matching no longer works. There is no reason to
- * mix the two and this is in line with how legacy and v2 compatibility is
- * handled. On mode switch, cgroup references which are already being
- * pointed to by socks may be leaked. While this can be remedied by adding
- * synchronization around sock_cgroup_data, given that the number of leaked
- * cgroups is bound and highly unlikely to be high, this seems to be the
- * better trade-off.
+ * On legacy hierarchies, net_prio and net_cls controllers directly
+ * set attributes on each sock which can then be tested by the network
+ * layer. On the default hierarchy, each sock is associated with the
+ * cgroup it was created in and the networking layer can match the
+ * cgroup directly.
*/
struct sock_cgroup_data {
- union {
-#ifdef __LITTLE_ENDIAN
- struct {
- u8 is_data;
- u8 padding;
- u16 prioidx;
- u32 classid;
- } __packed;
-#else
- struct {
- u32 classid;
- u16 prioidx;
- u8 padding;
- u8 is_data;
- } __packed;
+ struct cgroup *cgroup; /* v2 */
+#ifdef CONFIG_CGROUP_NET_CLASSID
+ u32 classid; /* v1 */
+#endif
+#ifdef CONFIG_CGROUP_NET_PRIO
+ u16 prioidx; /* v1 */
#endif
- u64 val;
- };
};
-/*
- * There's a theoretical window where the following accessors race with
- * updaters and return part of the previous pointer as the prioidx or
- * classid. Such races are short-lived and the result isn't critical.
- */
-static inline u16 sock_cgroup_prioidx(struct sock_cgroup_data *skcd)
+static inline u16 sock_cgroup_prioidx(const struct sock_cgroup_data *skcd)
{
- /* fallback to 1 which is always the ID of the root cgroup */
- return (skcd->is_data & 1) ? skcd->prioidx : 1;
+#ifdef CONFIG_CGROUP_NET_PRIO
+ return READ_ONCE(skcd->prioidx);
+#else
+ return 1;
+#endif
}
-static inline u32 sock_cgroup_classid(struct sock_cgroup_data *skcd)
+static inline u32 sock_cgroup_classid(const struct sock_cgroup_data *skcd)
{
- /* fallback to 0 which is the unconfigured default classid */
- return (skcd->is_data & 1) ? skcd->classid : 0;
+#ifdef CONFIG_CGROUP_NET_CLASSID
+ return READ_ONCE(skcd->classid);
+#else
+ return 0;
+#endif
}
-/*
- * If invoked concurrently, the updaters may clobber each other. The
- * caller is responsible for synchronization.
- */
static inline void sock_cgroup_set_prioidx(struct sock_cgroup_data *skcd,
u16 prioidx)
{
- struct sock_cgroup_data skcd_buf = {{ .val = READ_ONCE(skcd->val) }};
-
- if (sock_cgroup_prioidx(&skcd_buf) == prioidx)
- return;
-
- if (!(skcd_buf.is_data & 1)) {
- skcd_buf.val = 0;
- skcd_buf.is_data = 1;
- }
-
- skcd_buf.prioidx = prioidx;
- WRITE_ONCE(skcd->val, skcd_buf.val); /* see sock_cgroup_ptr() */
+#ifdef CONFIG_CGROUP_NET_PRIO
+ WRITE_ONCE(skcd->prioidx, prioidx);
+#endif
}
static inline void sock_cgroup_set_classid(struct sock_cgroup_data *skcd,
u32 classid)
{
- struct sock_cgroup_data skcd_buf = {{ .val = READ_ONCE(skcd->val) }};
-
- if (sock_cgroup_classid(&skcd_buf) == classid)
- return;
-
- if (!(skcd_buf.is_data & 1)) {
- skcd_buf.val = 0;
- skcd_buf.is_data = 1;
- }
-
- skcd_buf.classid = classid;
- WRITE_ONCE(skcd->val, skcd_buf.val); /* see sock_cgroup_ptr() */
+#ifdef CONFIG_CGROUP_NET_CLASSID
+ WRITE_ONCE(skcd->classid, classid);
+#endif
}
#else /* CONFIG_SOCK_CGROUP_DATA */
diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h
index 710a005c6b7a..885f5395fcd0 100644
--- a/include/linux/cgroup.h
+++ b/include/linux/cgroup.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_CGROUP_H
#define _LINUX_CGROUP_H
/*
@@ -22,13 +23,16 @@
#include <linux/nsproxy.h>
#include <linux/user_namespace.h>
#include <linux/refcount.h>
+#include <linux/kernel_stat.h>
#include <linux/cgroup-defs.h>
+struct kernel_clone_args;
+
#ifdef CONFIG_CGROUPS
/*
- * All weight knobs on the default hierarhcy should use the following min,
+ * All weight knobs on the default hierarchy should use the following min,
* default and max values. The default value is the logarithmic center of
* MIN and MAX and allows 100x to be expressed in both directions.
*/
@@ -36,22 +40,35 @@
#define CGROUP_WEIGHT_DFL 100
#define CGROUP_WEIGHT_MAX 10000
+/* walk only threadgroup leaders */
+#define CSS_TASK_ITER_PROCS (1U << 0)
+/* walk all threaded css_sets in the domain */
+#define CSS_TASK_ITER_THREADED (1U << 1)
+
+/* internal flags */
+#define CSS_TASK_ITER_SKIPPED (1U << 16)
+
/* a css_task_iter should be treated as an opaque object */
struct css_task_iter {
struct cgroup_subsys *ss;
+ unsigned int flags;
struct list_head *cset_pos;
struct list_head *cset_head;
+ struct list_head *tcset_pos;
+ struct list_head *tcset_head;
+
struct list_head *task_pos;
- struct list_head *tasks_head;
- struct list_head *mg_tasks_head;
+ struct list_head *cur_tasks_head;
struct css_set *cur_cset;
+ struct css_set *cur_dcset;
struct task_struct *cur_task;
struct list_head iters_node; /* css_set->task_iters */
};
+extern struct file_system_type cgroup_fs_type;
extern struct cgroup_root cgrp_dfl_root;
extern struct css_set init_css_set;
@@ -81,6 +98,8 @@ extern struct css_set init_css_set;
bool css_has_online_children(struct cgroup_subsys_state *css);
struct cgroup_subsys_state *css_from_id(int id, struct cgroup_subsys *ss);
+struct cgroup_subsys_state *cgroup_e_css(struct cgroup *cgroup,
+ struct cgroup_subsys *ss);
struct cgroup_subsys_state *cgroup_get_e_css(struct cgroup *cgroup,
struct cgroup_subsys *ss);
struct cgroup_subsys_state *css_tryget_online_from_dir(struct dentry *dentry,
@@ -88,6 +107,7 @@ struct cgroup_subsys_state *css_tryget_online_from_dir(struct dentry *dentry,
struct cgroup *cgroup_get_from_path(const char *path);
struct cgroup *cgroup_get_from_fd(int fd);
+struct cgroup *cgroup_v1v2_get_from_fd(int fd);
int cgroup_attach_task_all(struct task_struct *from, struct task_struct *);
int cgroup_transfer_tasks(struct cgroup *to, struct cgroup *from);
@@ -96,6 +116,7 @@ int cgroup_add_dfl_cftypes(struct cgroup_subsys *ss, struct cftype *cfts);
int cgroup_add_legacy_cftypes(struct cgroup_subsys *ss, struct cftype *cfts);
int cgroup_rm_cftypes(struct cftype *cfts);
void cgroup_file_notify(struct cgroup_file *cfile);
+void cgroup_file_show(struct cgroup_file *cfile, bool show);
int task_cgroup_path(struct task_struct *task, char *buf, size_t buflen);
int cgroupstats_build(struct cgroupstats *stats, struct dentry *dentry);
@@ -103,15 +124,21 @@ int proc_cgroup_show(struct seq_file *m, struct pid_namespace *ns,
struct pid *pid, struct task_struct *tsk);
void cgroup_fork(struct task_struct *p);
-extern int cgroup_can_fork(struct task_struct *p);
-extern void cgroup_cancel_fork(struct task_struct *p);
-extern void cgroup_post_fork(struct task_struct *p);
+extern int cgroup_can_fork(struct task_struct *p,
+ struct kernel_clone_args *kargs);
+extern void cgroup_cancel_fork(struct task_struct *p,
+ struct kernel_clone_args *kargs);
+extern void cgroup_post_fork(struct task_struct *p,
+ struct kernel_clone_args *kargs);
void cgroup_exit(struct task_struct *p);
+void cgroup_release(struct task_struct *p);
void cgroup_free(struct task_struct *p);
int cgroup_init_early(void);
int cgroup_init(void);
+int cgroup_parse_float(const char *input, unsigned dec_shift, s64 *v);
+
/*
* Iteration helpers and macros.
*/
@@ -129,7 +156,7 @@ struct task_struct *cgroup_taskset_first(struct cgroup_taskset *tset,
struct task_struct *cgroup_taskset_next(struct cgroup_taskset *tset,
struct cgroup_subsys_state **dst_cssp);
-void css_task_iter_start(struct cgroup_subsys_state *css,
+void css_task_iter_start(struct cgroup_subsys_state *css, unsigned int flags,
struct css_task_iter *it);
struct task_struct *css_task_iter_next(struct css_task_iter *it);
void css_task_iter_end(struct css_task_iter *it);
@@ -283,64 +310,22 @@ void css_task_iter_end(struct css_task_iter *it);
* Inline functions.
*/
-/**
- * css_get - obtain a reference on the specified css
- * @css: target css
- *
- * The caller must already have a reference.
- */
-static inline void css_get(struct cgroup_subsys_state *css)
-{
- if (!(css->flags & CSS_NO_REF))
- percpu_ref_get(&css->refcnt);
-}
-
-/**
- * css_get_many - obtain references on the specified css
- * @css: target css
- * @n: number of references to get
- *
- * The caller must already have a reference.
- */
-static inline void css_get_many(struct cgroup_subsys_state *css, unsigned int n)
-{
- if (!(css->flags & CSS_NO_REF))
- percpu_ref_get_many(&css->refcnt, n);
-}
+#ifdef CONFIG_DEBUG_CGROUP_REF
+void css_get(struct cgroup_subsys_state *css);
+void css_get_many(struct cgroup_subsys_state *css, unsigned int n);
+bool css_tryget(struct cgroup_subsys_state *css);
+bool css_tryget_online(struct cgroup_subsys_state *css);
+void css_put(struct cgroup_subsys_state *css);
+void css_put_many(struct cgroup_subsys_state *css, unsigned int n);
+#else
+#define CGROUP_REF_FN_ATTRS static inline
+#define CGROUP_REF_EXPORT(fn)
+#include <linux/cgroup_refcnt.h>
+#endif
-/**
- * css_tryget - try to obtain a reference on the specified css
- * @css: target css
- *
- * Obtain a reference on @css unless it already has reached zero and is
- * being released. This function doesn't care whether @css is on or
- * offline. The caller naturally needs to ensure that @css is accessible
- * but doesn't have to be holding a reference on it - IOW, RCU protected
- * access is good enough for this function. Returns %true if a reference
- * count was successfully obtained; %false otherwise.
- */
-static inline bool css_tryget(struct cgroup_subsys_state *css)
+static inline u64 cgroup_id(const struct cgroup *cgrp)
{
- if (!(css->flags & CSS_NO_REF))
- return percpu_ref_tryget(&css->refcnt);
- return true;
-}
-
-/**
- * css_tryget_online - try to obtain a reference on the specified css if online
- * @css: target css
- *
- * Obtain a reference on @css if it's online. The caller naturally needs
- * to ensure that @css is accessible but doesn't have to be holding a
- * reference on it - IOW, RCU protected access is good enough for this
- * function. Returns %true if a reference count was successfully obtained;
- * %false otherwise.
- */
-static inline bool css_tryget_online(struct cgroup_subsys_state *css)
-{
- if (!(css->flags & CSS_NO_REF))
- return percpu_ref_tryget_live(&css->refcnt);
- return true;
+ return cgrp->kn->id;
}
/**
@@ -363,29 +348,14 @@ static inline bool css_is_dying(struct cgroup_subsys_state *css)
return !(css->flags & CSS_NO_REF) && percpu_ref_is_dying(&css->refcnt);
}
-/**
- * css_put - put a css reference
- * @css: target css
- *
- * Put a reference obtained via css_get() and css_tryget_online().
- */
-static inline void css_put(struct cgroup_subsys_state *css)
+static inline void cgroup_get(struct cgroup *cgrp)
{
- if (!(css->flags & CSS_NO_REF))
- percpu_ref_put(&css->refcnt);
+ css_get(&cgrp->self);
}
-/**
- * css_put_many - put css references
- * @css: target css
- * @n: number of references to put
- *
- * Put references obtained via css_get() and css_tryget_online().
- */
-static inline void css_put_many(struct cgroup_subsys_state *css, unsigned int n)
+static inline bool cgroup_tryget(struct cgroup *cgrp)
{
- if (!(css->flags & CSS_NO_REF))
- percpu_ref_put_many(&css->refcnt, n);
+ return css_tryget(&cgrp->self);
}
static inline void cgroup_put(struct cgroup *cgrp)
@@ -393,6 +363,18 @@ static inline void cgroup_put(struct cgroup *cgrp)
css_put(&cgrp->self);
}
+extern struct mutex cgroup_mutex;
+
+static inline void cgroup_lock(void)
+{
+ mutex_lock(&cgroup_mutex);
+}
+
+static inline void cgroup_unlock(void)
+{
+ mutex_unlock(&cgroup_mutex);
+}
+
/**
* task_css_set_check - obtain a task's css_set with extra access conditions
* @task: the task to obtain css_set for
@@ -407,10 +389,10 @@ static inline void cgroup_put(struct cgroup *cgrp)
* as locks used during the cgroup_subsys::attach() methods.
*/
#ifdef CONFIG_PROVE_RCU
-extern struct mutex cgroup_mutex;
extern spinlock_t css_set_lock;
#define task_css_set_check(task, __c) \
rcu_dereference_check((task)->cgroups, \
+ rcu_read_lock_sched_held() || \
lockdep_is_held(&cgroup_mutex) || \
lockdep_is_held(&css_set_lock) || \
((task)->flags & PF_EXITING) || (__c))
@@ -462,7 +444,7 @@ static inline struct cgroup_subsys_state *task_css(struct task_struct *task,
*
* Find the css for the (@task, @subsys_id) combination, increment a
* reference on and return it. This function is guaranteed to return a
- * valid css.
+ * valid css. The returned css may already have been offlined.
*/
static inline struct cgroup_subsys_state *
task_get_css(struct task_struct *task, int subsys_id)
@@ -472,7 +454,13 @@ task_get_css(struct task_struct *task, int subsys_id)
rcu_read_lock();
while (true) {
css = task_css(task, subsys_id);
- if (likely(css_tryget_online(css)))
+ /*
+ * Can't use css_tryget_online() here. A task which has
+ * PF_EXITING set may stay associated with an offline css.
+ * If such task calls this function, css_tryget_online()
+ * will keep failing.
+ */
+ if (likely(css_tryget(css)))
break;
cpu_relax();
}
@@ -500,6 +488,20 @@ static inline struct cgroup *task_cgroup(struct task_struct *task,
return task_css(task, subsys_id)->cgroup;
}
+static inline struct cgroup *task_dfl_cgroup(struct task_struct *task)
+{
+ return task_css_set(task)->dfl_cgrp;
+}
+
+static inline struct cgroup *cgroup_parent(struct cgroup *cgrp)
+{
+ struct cgroup_subsys_state *parent_css = cgrp->self.parent;
+
+ if (parent_css)
+ return container_of(parent_css, struct cgroup, self);
+ return NULL;
+}
+
/**
* cgroup_is_descendant - test ancestry
* @cgrp: the cgroup to be tested
@@ -514,7 +516,26 @@ static inline bool cgroup_is_descendant(struct cgroup *cgrp,
{
if (cgrp->root != ancestor->root || cgrp->level < ancestor->level)
return false;
- return cgrp->ancestor_ids[ancestor->level] == ancestor->id;
+ return cgrp->ancestors[ancestor->level] == ancestor;
+}
+
+/**
+ * cgroup_ancestor - find ancestor of cgroup
+ * @cgrp: cgroup to find ancestor of
+ * @ancestor_level: level of ancestor to find starting from root
+ *
+ * Find ancestor of cgroup at specified level starting from root if it exists
+ * and return pointer to it. Return NULL if @cgrp doesn't have ancestor at
+ * @ancestor_level.
+ *
+ * This function is safe to call as long as @cgrp is accessible.
+ */
+static inline struct cgroup *cgroup_ancestor(struct cgroup *cgrp,
+ int ancestor_level)
+{
+ if (ancestor_level < 0 || ancestor_level > cgrp->level)
+ return NULL;
+ return cgrp->ancestors[ancestor_level];
}
/**
@@ -537,13 +558,14 @@ static inline bool task_under_cgroup_hierarchy(struct task_struct *task,
/* no synchronization, the result can only be used as a hint */
static inline bool cgroup_is_populated(struct cgroup *cgrp)
{
- return cgrp->populated_cnt;
+ return cgrp->nr_populated_csets + cgrp->nr_populated_domain_children +
+ cgrp->nr_populated_threaded_children;
}
/* returns ino associated with a cgroup */
static inline ino_t cgroup_ino(struct cgroup *cgrp)
{
- return cgrp->kn->ino;
+ return kernfs_ino(cgrp->kn);
}
/* cft/css accessors for cftype->write() operation */
@@ -590,6 +612,8 @@ static inline void pr_cont_cgroup_path(struct cgroup *cgrp)
pr_cont_kernfs_path(cgrp->kn);
}
+bool cgroup_psi_enabled(void);
+
static inline void cgroup_init_kthreadd(void)
{
/*
@@ -609,22 +633,32 @@ static inline void cgroup_kthread_ready(void)
current->no_cgroup_migration = 0;
}
+void cgroup_path_from_kernfs_id(u64 id, char *buf, size_t buflen);
+struct cgroup *cgroup_get_from_id(u64 id);
#else /* !CONFIG_CGROUPS */
struct cgroup_subsys_state;
struct cgroup;
+static inline u64 cgroup_id(const struct cgroup *cgrp) { return 1; }
+static inline void css_get(struct cgroup_subsys_state *css) {}
static inline void css_put(struct cgroup_subsys_state *css) {}
+static inline void cgroup_lock(void) {}
+static inline void cgroup_unlock(void) {}
static inline int cgroup_attach_task_all(struct task_struct *from,
struct task_struct *t) { return 0; }
static inline int cgroupstats_build(struct cgroupstats *stats,
struct dentry *dentry) { return -EINVAL; }
static inline void cgroup_fork(struct task_struct *p) {}
-static inline int cgroup_can_fork(struct task_struct *p) { return 0; }
-static inline void cgroup_cancel_fork(struct task_struct *p) {}
-static inline void cgroup_post_fork(struct task_struct *p) {}
+static inline int cgroup_can_fork(struct task_struct *p,
+ struct kernel_clone_args *kargs) { return 0; }
+static inline void cgroup_cancel_fork(struct task_struct *p,
+ struct kernel_clone_args *kargs) {}
+static inline void cgroup_post_fork(struct task_struct *p,
+ struct kernel_clone_args *kargs) {}
static inline void cgroup_exit(struct task_struct *p) {}
+static inline void cgroup_release(struct task_struct *p) {}
static inline void cgroup_free(struct task_struct *p) {}
static inline int cgroup_init_early(void) { return 0; }
@@ -632,56 +666,111 @@ static inline int cgroup_init(void) { return 0; }
static inline void cgroup_init_kthreadd(void) {}
static inline void cgroup_kthread_ready(void) {}
+static inline struct cgroup *cgroup_parent(struct cgroup *cgrp)
+{
+ return NULL;
+}
+
+static inline bool cgroup_psi_enabled(void)
+{
+ return false;
+}
+
static inline bool task_under_cgroup_hierarchy(struct task_struct *task,
struct cgroup *ancestor)
{
return true;
}
+
+static inline void cgroup_path_from_kernfs_id(u64 id, char *buf, size_t buflen)
+{}
#endif /* !CONFIG_CGROUPS */
+#ifdef CONFIG_CGROUPS
+/*
+ * cgroup scalable recursive statistics.
+ */
+void cgroup_rstat_updated(struct cgroup *cgrp, int cpu);
+void cgroup_rstat_flush(struct cgroup *cgrp);
+void cgroup_rstat_flush_atomic(struct cgroup *cgrp);
+void cgroup_rstat_flush_hold(struct cgroup *cgrp);
+void cgroup_rstat_flush_release(void);
+
+/*
+ * Basic resource stats.
+ */
+#ifdef CONFIG_CGROUP_CPUACCT
+void cpuacct_charge(struct task_struct *tsk, u64 cputime);
+void cpuacct_account_field(struct task_struct *tsk, int index, u64 val);
+#else
+static inline void cpuacct_charge(struct task_struct *tsk, u64 cputime) {}
+static inline void cpuacct_account_field(struct task_struct *tsk, int index,
+ u64 val) {}
+#endif
+
+void __cgroup_account_cputime(struct cgroup *cgrp, u64 delta_exec);
+void __cgroup_account_cputime_field(struct cgroup *cgrp,
+ enum cpu_usage_stat index, u64 delta_exec);
+
+static inline void cgroup_account_cputime(struct task_struct *task,
+ u64 delta_exec)
+{
+ struct cgroup *cgrp;
+
+ cpuacct_charge(task, delta_exec);
+
+ cgrp = task_dfl_cgroup(task);
+ if (cgroup_parent(cgrp))
+ __cgroup_account_cputime(cgrp, delta_exec);
+}
+
+static inline void cgroup_account_cputime_field(struct task_struct *task,
+ enum cpu_usage_stat index,
+ u64 delta_exec)
+{
+ struct cgroup *cgrp;
+
+ cpuacct_account_field(task, index, delta_exec);
+
+ cgrp = task_dfl_cgroup(task);
+ if (cgroup_parent(cgrp))
+ __cgroup_account_cputime_field(cgrp, index, delta_exec);
+}
+
+#else /* CONFIG_CGROUPS */
+
+static inline void cgroup_account_cputime(struct task_struct *task,
+ u64 delta_exec) {}
+static inline void cgroup_account_cputime_field(struct task_struct *task,
+ enum cpu_usage_stat index,
+ u64 delta_exec) {}
+
+#endif /* CONFIG_CGROUPS */
+
/*
* sock->sk_cgrp_data handling. For more info, see sock_cgroup_data
* definition in cgroup-defs.h.
*/
#ifdef CONFIG_SOCK_CGROUP_DATA
-#if defined(CONFIG_CGROUP_NET_PRIO) || defined(CONFIG_CGROUP_NET_CLASSID)
-extern spinlock_t cgroup_sk_update_lock;
-#endif
-
-void cgroup_sk_alloc_disable(void);
void cgroup_sk_alloc(struct sock_cgroup_data *skcd);
+void cgroup_sk_clone(struct sock_cgroup_data *skcd);
void cgroup_sk_free(struct sock_cgroup_data *skcd);
static inline struct cgroup *sock_cgroup_ptr(struct sock_cgroup_data *skcd)
{
-#if defined(CONFIG_CGROUP_NET_PRIO) || defined(CONFIG_CGROUP_NET_CLASSID)
- unsigned long v;
-
- /*
- * @skcd->val is 64bit but the following is safe on 32bit too as we
- * just need the lower ulong to be written and read atomically.
- */
- v = READ_ONCE(skcd->val);
-
- if (v & 1)
- return &cgrp_dfl_root.cgrp;
-
- return (struct cgroup *)(unsigned long)v ?: &cgrp_dfl_root.cgrp;
-#else
- return (struct cgroup *)(unsigned long)skcd->val;
-#endif
+ return skcd->cgroup;
}
#else /* CONFIG_CGROUP_DATA */
static inline void cgroup_sk_alloc(struct sock_cgroup_data *skcd) {}
+static inline void cgroup_sk_clone(struct sock_cgroup_data *skcd) {}
static inline void cgroup_sk_free(struct sock_cgroup_data *skcd) {}
#endif /* CONFIG_CGROUP_DATA */
struct cgroup_namespace {
- refcount_t count;
struct ns_common ns;
struct user_namespace *user_ns;
struct ucounts *ucounts;
@@ -716,13 +805,56 @@ copy_cgroup_ns(unsigned long flags, struct user_namespace *user_ns,
static inline void get_cgroup_ns(struct cgroup_namespace *ns)
{
if (ns)
- refcount_inc(&ns->count);
+ refcount_inc(&ns->ns.count);
}
static inline void put_cgroup_ns(struct cgroup_namespace *ns)
{
- if (ns && refcount_dec_and_test(&ns->count))
+ if (ns && refcount_dec_and_test(&ns->ns.count))
free_cgroup_ns(ns);
}
+#ifdef CONFIG_CGROUPS
+
+void cgroup_enter_frozen(void);
+void cgroup_leave_frozen(bool always_leave);
+void cgroup_update_frozen(struct cgroup *cgrp);
+void cgroup_freeze(struct cgroup *cgrp, bool freeze);
+void cgroup_freezer_migrate_task(struct task_struct *task, struct cgroup *src,
+ struct cgroup *dst);
+
+static inline bool cgroup_task_frozen(struct task_struct *task)
+{
+ return task->frozen;
+}
+
+#else /* !CONFIG_CGROUPS */
+
+static inline void cgroup_enter_frozen(void) { }
+static inline void cgroup_leave_frozen(bool always_leave) { }
+static inline bool cgroup_task_frozen(struct task_struct *task)
+{
+ return false;
+}
+
+#endif /* !CONFIG_CGROUPS */
+
+#ifdef CONFIG_CGROUP_BPF
+static inline void cgroup_bpf_get(struct cgroup *cgrp)
+{
+ percpu_ref_get(&cgrp->bpf.refcnt);
+}
+
+static inline void cgroup_bpf_put(struct cgroup *cgrp)
+{
+ percpu_ref_put(&cgrp->bpf.refcnt);
+}
+
+#else /* CONFIG_CGROUP_BPF */
+
+static inline void cgroup_bpf_get(struct cgroup *cgrp) {}
+static inline void cgroup_bpf_put(struct cgroup *cgrp) {}
+
+#endif /* CONFIG_CGROUP_BPF */
+
#endif /* _LINUX_CGROUP_H */
diff --git a/include/linux/cgroup_api.h b/include/linux/cgroup_api.h
new file mode 100644
index 000000000000..d0cfe8025111
--- /dev/null
+++ b/include/linux/cgroup_api.h
@@ -0,0 +1 @@
+#include <linux/cgroup.h>
diff --git a/include/linux/cgroup_rdma.h b/include/linux/cgroup_rdma.h
index e94290b29e99..80edae03c313 100644
--- a/include/linux/cgroup_rdma.h
+++ b/include/linux/cgroup_rdma.h
@@ -1,9 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2016 Parav Pandit <pandit.parav@gmail.com>
- *
- * This file is subject to the terms and conditions of version 2 of the GNU
- * General Public License. See the file COPYING in the main directory of the
- * Linux distribution for more details.
*/
#ifndef _CGROUP_RDMA_H
@@ -39,7 +36,7 @@ struct rdmacg_device {
* APIs for RDMA/IB stack to publish when a device wants to
* participate in resource accounting
*/
-int rdmacg_register_device(struct rdmacg_device *device);
+void rdmacg_register_device(struct rdmacg_device *device);
void rdmacg_unregister_device(struct rdmacg_device *device);
/* APIs for RDMA/IB stack to charge/uncharge pool specific resources */
diff --git a/include/linux/cgroup_refcnt.h b/include/linux/cgroup_refcnt.h
new file mode 100644
index 000000000000..2eea0a69ecfc
--- /dev/null
+++ b/include/linux/cgroup_refcnt.h
@@ -0,0 +1,96 @@
+/**
+ * css_get - obtain a reference on the specified css
+ * @css: target css
+ *
+ * The caller must already have a reference.
+ */
+CGROUP_REF_FN_ATTRS
+void css_get(struct cgroup_subsys_state *css)
+{
+ if (!(css->flags & CSS_NO_REF))
+ percpu_ref_get(&css->refcnt);
+}
+CGROUP_REF_EXPORT(css_get)
+
+/**
+ * css_get_many - obtain references on the specified css
+ * @css: target css
+ * @n: number of references to get
+ *
+ * The caller must already have a reference.
+ */
+CGROUP_REF_FN_ATTRS
+void css_get_many(struct cgroup_subsys_state *css, unsigned int n)
+{
+ if (!(css->flags & CSS_NO_REF))
+ percpu_ref_get_many(&css->refcnt, n);
+}
+CGROUP_REF_EXPORT(css_get_many)
+
+/**
+ * css_tryget - try to obtain a reference on the specified css
+ * @css: target css
+ *
+ * Obtain a reference on @css unless it already has reached zero and is
+ * being released. This function doesn't care whether @css is on or
+ * offline. The caller naturally needs to ensure that @css is accessible
+ * but doesn't have to be holding a reference on it - IOW, RCU protected
+ * access is good enough for this function. Returns %true if a reference
+ * count was successfully obtained; %false otherwise.
+ */
+CGROUP_REF_FN_ATTRS
+bool css_tryget(struct cgroup_subsys_state *css)
+{
+ if (!(css->flags & CSS_NO_REF))
+ return percpu_ref_tryget(&css->refcnt);
+ return true;
+}
+CGROUP_REF_EXPORT(css_tryget)
+
+/**
+ * css_tryget_online - try to obtain a reference on the specified css if online
+ * @css: target css
+ *
+ * Obtain a reference on @css if it's online. The caller naturally needs
+ * to ensure that @css is accessible but doesn't have to be holding a
+ * reference on it - IOW, RCU protected access is good enough for this
+ * function. Returns %true if a reference count was successfully obtained;
+ * %false otherwise.
+ */
+CGROUP_REF_FN_ATTRS
+bool css_tryget_online(struct cgroup_subsys_state *css)
+{
+ if (!(css->flags & CSS_NO_REF))
+ return percpu_ref_tryget_live(&css->refcnt);
+ return true;
+}
+CGROUP_REF_EXPORT(css_tryget_online)
+
+/**
+ * css_put - put a css reference
+ * @css: target css
+ *
+ * Put a reference obtained via css_get() and css_tryget_online().
+ */
+CGROUP_REF_FN_ATTRS
+void css_put(struct cgroup_subsys_state *css)
+{
+ if (!(css->flags & CSS_NO_REF))
+ percpu_ref_put(&css->refcnt);
+}
+CGROUP_REF_EXPORT(css_put)
+
+/**
+ * css_put_many - put css references
+ * @css: target css
+ * @n: number of references to put
+ *
+ * Put references obtained via css_get() and css_tryget_online().
+ */
+CGROUP_REF_FN_ATTRS
+void css_put_many(struct cgroup_subsys_state *css, unsigned int n)
+{
+ if (!(css->flags & CSS_NO_REF))
+ percpu_ref_put_many(&css->refcnt, n);
+}
+CGROUP_REF_EXPORT(css_put_many)
diff --git a/include/linux/cgroup_subsys.h b/include/linux/cgroup_subsys.h
index d0e597c44585..445235487230 100644
--- a/include/linux/cgroup_subsys.h
+++ b/include/linux/cgroup_subsys.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* List of cgroup subsystems.
*
@@ -60,6 +61,10 @@ SUBSYS(pids)
SUBSYS(rdma)
#endif
+#if IS_ENABLED(CONFIG_CGROUP_MISC)
+SUBSYS(misc)
+#endif
+
/*
* The following subsystems are not supported on the default hierarchy.
*/
diff --git a/include/linux/circ_buf.h b/include/linux/circ_buf.h
index 90f2471dc6f2..b3233e8202f9 100644
--- a/include/linux/circ_buf.h
+++ b/include/linux/circ_buf.h
@@ -1,5 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
- * See Documentation/circular-buffers.txt for more information.
+ * See Documentation/core-api/circular-buffers.rst for more information.
*/
#ifndef _LINUX_CIRC_BUF_H
diff --git a/include/linux/cleancache.h b/include/linux/cleancache.h
deleted file mode 100644
index bbb3712dd892..000000000000
--- a/include/linux/cleancache.h
+++ /dev/null
@@ -1,123 +0,0 @@
-#ifndef _LINUX_CLEANCACHE_H
-#define _LINUX_CLEANCACHE_H
-
-#include <linux/fs.h>
-#include <linux/exportfs.h>
-#include <linux/mm.h>
-
-#define CLEANCACHE_NO_POOL -1
-#define CLEANCACHE_NO_BACKEND -2
-#define CLEANCACHE_NO_BACKEND_SHARED -3
-
-#define CLEANCACHE_KEY_MAX 6
-
-/*
- * cleancache requires every file with a page in cleancache to have a
- * unique key unless/until the file is removed/truncated. For some
- * filesystems, the inode number is unique, but for "modern" filesystems
- * an exportable filehandle is required (see exportfs.h)
- */
-struct cleancache_filekey {
- union {
- ino_t ino;
- __u32 fh[CLEANCACHE_KEY_MAX];
- u32 key[CLEANCACHE_KEY_MAX];
- } u;
-};
-
-struct cleancache_ops {
- int (*init_fs)(size_t);
- int (*init_shared_fs)(uuid_t *uuid, size_t);
- int (*get_page)(int, struct cleancache_filekey,
- pgoff_t, struct page *);
- void (*put_page)(int, struct cleancache_filekey,
- pgoff_t, struct page *);
- void (*invalidate_page)(int, struct cleancache_filekey, pgoff_t);
- void (*invalidate_inode)(int, struct cleancache_filekey);
- void (*invalidate_fs)(int);
-};
-
-extern int cleancache_register_ops(const struct cleancache_ops *ops);
-extern void __cleancache_init_fs(struct super_block *);
-extern void __cleancache_init_shared_fs(struct super_block *);
-extern int __cleancache_get_page(struct page *);
-extern void __cleancache_put_page(struct page *);
-extern void __cleancache_invalidate_page(struct address_space *, struct page *);
-extern void __cleancache_invalidate_inode(struct address_space *);
-extern void __cleancache_invalidate_fs(struct super_block *);
-
-#ifdef CONFIG_CLEANCACHE
-#define cleancache_enabled (1)
-static inline bool cleancache_fs_enabled_mapping(struct address_space *mapping)
-{
- return mapping->host->i_sb->cleancache_poolid >= 0;
-}
-static inline bool cleancache_fs_enabled(struct page *page)
-{
- return cleancache_fs_enabled_mapping(page->mapping);
-}
-#else
-#define cleancache_enabled (0)
-#define cleancache_fs_enabled(_page) (0)
-#define cleancache_fs_enabled_mapping(_page) (0)
-#endif
-
-/*
- * The shim layer provided by these inline functions allows the compiler
- * to reduce all cleancache hooks to nothingness if CONFIG_CLEANCACHE
- * is disabled, to a single global variable check if CONFIG_CLEANCACHE
- * is enabled but no cleancache "backend" has dynamically enabled it,
- * and, for the most frequent cleancache ops, to a single global variable
- * check plus a superblock element comparison if CONFIG_CLEANCACHE is enabled
- * and a cleancache backend has dynamically enabled cleancache, but the
- * filesystem referenced by that cleancache op has not enabled cleancache.
- * As a result, CONFIG_CLEANCACHE can be enabled by default with essentially
- * no measurable performance impact.
- */
-
-static inline void cleancache_init_fs(struct super_block *sb)
-{
- if (cleancache_enabled)
- __cleancache_init_fs(sb);
-}
-
-static inline void cleancache_init_shared_fs(struct super_block *sb)
-{
- if (cleancache_enabled)
- __cleancache_init_shared_fs(sb);
-}
-
-static inline int cleancache_get_page(struct page *page)
-{
- if (cleancache_enabled && cleancache_fs_enabled(page))
- return __cleancache_get_page(page);
- return -1;
-}
-
-static inline void cleancache_put_page(struct page *page)
-{
- if (cleancache_enabled && cleancache_fs_enabled(page))
- __cleancache_put_page(page);
-}
-
-static inline void cleancache_invalidate_page(struct address_space *mapping,
- struct page *page)
-{
- /* careful... page->mapping is NULL sometimes when this is called */
- if (cleancache_enabled && cleancache_fs_enabled_mapping(mapping))
- __cleancache_invalidate_page(mapping, page);
-}
-
-static inline void cleancache_invalidate_inode(struct address_space *mapping)
-{
- if (cleancache_enabled && cleancache_fs_enabled_mapping(mapping))
- __cleancache_invalidate_inode(mapping);
-}
-
-static inline void cleancache_invalidate_fs(struct super_block *sb)
-{
- if (cleancache_enabled)
- __cleancache_invalidate_fs(sb);
-}
-
-#endif /* _LINUX_CLEANCACHE_H */
diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h
index c59c62571e4f..28ff6f1a6ada 100644
--- a/include/linux/clk-provider.h
+++ b/include/linux/clk-provider.h
@@ -1,32 +1,27 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
- * linux/include/linux/clk-provider.h
- *
* Copyright (c) 2010-2011 Jeremy Kerr <jeremy.kerr@canonical.com>
* Copyright (C) 2011-2012 Linaro Ltd <mturquette@linaro.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef __LINUX_CLK_PROVIDER_H
#define __LINUX_CLK_PROVIDER_H
-#include <linux/io.h>
#include <linux/of.h>
-
-#ifdef CONFIG_COMMON_CLK
+#include <linux/of_clk.h>
/*
* flags used across common struct clk. these flags should only affect the
* top-level framework. custom flags for dealing with hardware specifics
* belong in struct clk_foo
+ *
+ * Please update clk_flags[] in drivers/clk/clk.c when making changes here!
*/
#define CLK_SET_RATE_GATE BIT(0) /* must be gated across rate change */
#define CLK_SET_PARENT_GATE BIT(1) /* must be gated across re-parent */
#define CLK_SET_RATE_PARENT BIT(2) /* propagate rate change up one level */
#define CLK_IGNORE_UNUSED BIT(3) /* do not gate even if unused */
/* unused */
-#define CLK_IS_BASIC BIT(5) /* Basic clk, can't do a to_clk_foo() */
+ /* unused */
#define CLK_GET_RATE_NOCACHE BIT(6) /* do not use the cached clk rate */
#define CLK_SET_RATE_NO_REPARENT BIT(7) /* don't re-parent on rate change */
#define CLK_GET_ACCURACY_NOCACHE BIT(8) /* do not use the cached clk accuracy */
@@ -35,6 +30,8 @@
#define CLK_IS_CRITICAL BIT(11) /* do not gate, ever */
/* parents need enable during gate/ungate, set rate and re-parent */
#define CLK_OPS_PARENT_ENABLE BIT(12)
+/* duty cycle call may be forwarded to the parent clock */
+#define CLK_DUTY_CYCLE_PARENT BIT(13)
struct clk;
struct clk_hw;
@@ -45,6 +42,9 @@ struct dentry;
* struct clk_rate_request - Structure encoding the clk constraints that
* a clock user might require.
*
+ * Should be initialized by calling clk_hw_init_rate_request().
+ *
+ * @core: Pointer to the struct clk_core affected by this request
* @rate: Requested clock rate. This field will be adjusted by
* clock drivers according to hardware capabilities.
* @min_rate: Minimum rate imposed by clk users.
@@ -56,6 +56,7 @@ struct dentry;
*
*/
struct clk_rate_request {
+ struct clk_core *core;
unsigned long rate;
unsigned long min_rate;
unsigned long max_rate;
@@ -63,6 +64,26 @@ struct clk_rate_request {
struct clk_hw *best_parent_hw;
};
+void clk_hw_init_rate_request(const struct clk_hw *hw,
+ struct clk_rate_request *req,
+ unsigned long rate);
+void clk_hw_forward_rate_request(const struct clk_hw *core,
+ const struct clk_rate_request *old_req,
+ const struct clk_hw *parent,
+ struct clk_rate_request *req,
+ unsigned long parent_rate);
+
+/**
+ * struct clk_duty - Struture encoding the duty cycle ratio of a clock
+ *
+ * @num: Numerator of the duty cycle ratio
+ * @den: Denominator of the duty cycle ratio
+ */
+struct clk_duty {
+ unsigned int num;
+ unsigned int den;
+};
+
/**
* struct clk_ops - Callback operations for hardware clocks; these are to
* be provided by the clock implementation, and will be called by drivers
@@ -103,10 +124,16 @@ struct clk_rate_request {
* Called with enable_lock held. This function must not
* sleep.
*
+ * @save_context: Save the context of the clock in prepration for poweroff.
+ *
+ * @restore_context: Restore the context of the clock after a restoration
+ * of power.
+ *
* @recalc_rate Recalculate the rate of this clock, by querying hardware. The
* parent rate is an input parameter. It is up to the caller to
- * ensure that the prepare_mutex is held across this call.
- * Returns the calculated rate. Optional, but recommended - if
+ * ensure that the prepare_mutex is held across this call. If the
+ * driver cannot figure out a rate for this clock, it must return
+ * 0. Returns the calculated rate. Optional, but recommended - if
* this op is not set then clock rate will be initialized to 0.
*
* @round_rate: Given a target rate as input, returns the closest rate actually
@@ -166,10 +193,25 @@ struct clk_rate_request {
* by the second argument. Valid values for degrees are
* 0-359. Return 0 on success, otherwise -EERROR.
*
+ * @get_duty_cycle: Queries the hardware to get the current duty cycle ratio
+ * of a clock. Returned values denominator cannot be 0 and must be
+ * superior or equal to the numerator.
+ *
+ * @set_duty_cycle: Apply the duty cycle ratio to this clock signal specified by
+ * the numerator (2nd argurment) and denominator (3rd argument).
+ * Argument must be a valid ratio (denominator > 0
+ * and >= numerator) Return 0 on success, otherwise -EERROR.
+ *
* @init: Perform platform-specific initialization magic.
- * This is not not used by any of the basic clock types.
- * Please consider other ways of solving initialization problems
- * before using this callback, as its use is discouraged.
+ * This is not used by any of the basic clock types.
+ * This callback exist for HW which needs to perform some
+ * initialisation magic for CCF to get an accurate view of the
+ * clock. It may also be used dynamic resource allocation is
+ * required. It shall not used to deal with clock parameters,
+ * such as rate or parents.
+ * Returns 0 on success, -EERROR otherwise.
+ *
+ * @terminate: Free any resource allocated by init.
*
* @debug_init: Set up type-specific debugfs entries for this clock. This
* is called once, after the debugfs directory entry for this
@@ -198,6 +240,8 @@ struct clk_ops {
void (*disable)(struct clk_hw *hw);
int (*is_enabled)(struct clk_hw *hw);
void (*disable_unused)(struct clk_hw *hw);
+ int (*save_context)(struct clk_hw *hw);
+ void (*restore_context)(struct clk_hw *hw);
unsigned long (*recalc_rate)(struct clk_hw *hw,
unsigned long parent_rate);
long (*round_rate)(struct clk_hw *hw, unsigned long rate,
@@ -215,8 +259,27 @@ struct clk_ops {
unsigned long parent_accuracy);
int (*get_phase)(struct clk_hw *hw);
int (*set_phase)(struct clk_hw *hw, int degrees);
- void (*init)(struct clk_hw *hw);
- int (*debug_init)(struct clk_hw *hw, struct dentry *dentry);
+ int (*get_duty_cycle)(struct clk_hw *hw,
+ struct clk_duty *duty);
+ int (*set_duty_cycle)(struct clk_hw *hw,
+ struct clk_duty *duty);
+ int (*init)(struct clk_hw *hw);
+ void (*terminate)(struct clk_hw *hw);
+ void (*debug_init)(struct clk_hw *hw, struct dentry *dentry);
+};
+
+/**
+ * struct clk_parent_data - clk parent information
+ * @hw: parent clk_hw pointer (used for clk providers with internal clks)
+ * @fw_name: parent name local to provider registering clk
+ * @name: globally unique parent name (used as a fallback)
+ * @index: parent index local to provider registering clk (if @fw_name absent)
+ */
+struct clk_parent_data {
+ const struct clk_hw *hw;
+ const char *fw_name;
+ const char *name;
+ int index;
};
/**
@@ -226,13 +289,20 @@ struct clk_ops {
* @name: clock name
* @ops: operations this clock supports
* @parent_names: array of string names for all possible parents
+ * @parent_data: array of parent data for all possible parents (when some
+ * parents are external to the clk controller)
+ * @parent_hws: array of pointers to all possible parents (when all parents
+ * are internal to the clk controller)
* @num_parents: number of possible parents
* @flags: framework-level hints and quirks
*/
struct clk_init_data {
const char *name;
const struct clk_ops *ops;
+ /* Only one of the following three should be assigned */
const char * const *parent_names;
+ const struct clk_parent_data *parent_data;
+ const struct clk_hw **parent_hws;
u8 num_parents;
unsigned long flags;
};
@@ -250,7 +320,8 @@ struct clk_init_data {
* into the clk API
*
* @init: pointer to struct clk_init_data that contains the init data shared
- * with the common clock framework.
+ * with the common clock framework. This pointer will be set to NULL once
+ * a clk_register() variant is called on this clk_hw pointer.
*/
struct clk_hw {
struct clk_core *core;
@@ -271,30 +342,146 @@ struct clk_hw {
* struct clk_fixed_rate - fixed-rate clock
* @hw: handle between common and hardware-specific interfaces
* @fixed_rate: constant frequency of clock
+ * @fixed_accuracy: constant accuracy of clock in ppb (parts per billion)
+ * @flags: hardware specific flags
+ *
+ * Flags:
+ * * CLK_FIXED_RATE_PARENT_ACCURACY - Use the accuracy of the parent clk
+ * instead of what's set in @fixed_accuracy.
*/
struct clk_fixed_rate {
struct clk_hw hw;
unsigned long fixed_rate;
unsigned long fixed_accuracy;
- u8 flags;
+ unsigned long flags;
};
-#define to_clk_fixed_rate(_hw) container_of(_hw, struct clk_fixed_rate, hw)
+#define CLK_FIXED_RATE_PARENT_ACCURACY BIT(0)
extern const struct clk_ops clk_fixed_rate_ops;
+struct clk_hw *__clk_hw_register_fixed_rate(struct device *dev,
+ struct device_node *np, const char *name,
+ const char *parent_name, const struct clk_hw *parent_hw,
+ const struct clk_parent_data *parent_data, unsigned long flags,
+ unsigned long fixed_rate, unsigned long fixed_accuracy,
+ unsigned long clk_fixed_flags, bool devm);
struct clk *clk_register_fixed_rate(struct device *dev, const char *name,
const char *parent_name, unsigned long flags,
unsigned long fixed_rate);
-struct clk_hw *clk_hw_register_fixed_rate(struct device *dev, const char *name,
- const char *parent_name, unsigned long flags,
- unsigned long fixed_rate);
-struct clk *clk_register_fixed_rate_with_accuracy(struct device *dev,
- const char *name, const char *parent_name, unsigned long flags,
- unsigned long fixed_rate, unsigned long fixed_accuracy);
+/**
+ * clk_hw_register_fixed_rate - register fixed-rate clock with the clock
+ * framework
+ * @dev: device that is registering this clock
+ * @name: name of this clock
+ * @parent_name: name of clock's parent
+ * @flags: framework-specific flags
+ * @fixed_rate: non-adjustable clock rate
+ */
+#define clk_hw_register_fixed_rate(dev, name, parent_name, flags, fixed_rate) \
+ __clk_hw_register_fixed_rate((dev), NULL, (name), (parent_name), NULL, \
+ NULL, (flags), (fixed_rate), 0, 0, false)
+
+/**
+ * devm_clk_hw_register_fixed_rate - register fixed-rate clock with the clock
+ * framework
+ * @dev: device that is registering this clock
+ * @name: name of this clock
+ * @parent_name: name of clock's parent
+ * @flags: framework-specific flags
+ * @fixed_rate: non-adjustable clock rate
+ */
+#define devm_clk_hw_register_fixed_rate(dev, name, parent_name, flags, fixed_rate) \
+ __clk_hw_register_fixed_rate((dev), NULL, (name), (parent_name), NULL, \
+ NULL, (flags), (fixed_rate), 0, 0, true)
+/**
+ * clk_hw_register_fixed_rate_parent_hw - register fixed-rate clock with
+ * the clock framework
+ * @dev: device that is registering this clock
+ * @name: name of this clock
+ * @parent_hw: pointer to parent clk
+ * @flags: framework-specific flags
+ * @fixed_rate: non-adjustable clock rate
+ */
+#define clk_hw_register_fixed_rate_parent_hw(dev, name, parent_hw, flags, \
+ fixed_rate) \
+ __clk_hw_register_fixed_rate((dev), NULL, (name), NULL, (parent_hw), \
+ NULL, (flags), (fixed_rate), 0, 0, false)
+/**
+ * clk_hw_register_fixed_rate_parent_data - register fixed-rate clock with
+ * the clock framework
+ * @dev: device that is registering this clock
+ * @name: name of this clock
+ * @parent_data: parent clk data
+ * @flags: framework-specific flags
+ * @fixed_rate: non-adjustable clock rate
+ */
+#define clk_hw_register_fixed_rate_parent_data(dev, name, parent_hw, flags, \
+ fixed_rate) \
+ __clk_hw_register_fixed_rate((dev), NULL, (name), NULL, NULL, \
+ (parent_data), (flags), (fixed_rate), 0, \
+ 0, false)
+/**
+ * clk_hw_register_fixed_rate_with_accuracy - register fixed-rate clock with
+ * the clock framework
+ * @dev: device that is registering this clock
+ * @name: name of this clock
+ * @parent_name: name of clock's parent
+ * @flags: framework-specific flags
+ * @fixed_rate: non-adjustable clock rate
+ * @fixed_accuracy: non-adjustable clock accuracy
+ */
+#define clk_hw_register_fixed_rate_with_accuracy(dev, name, parent_name, \
+ flags, fixed_rate, \
+ fixed_accuracy) \
+ __clk_hw_register_fixed_rate((dev), NULL, (name), (parent_name), \
+ NULL, NULL, (flags), (fixed_rate), \
+ (fixed_accuracy), 0, false)
+/**
+ * clk_hw_register_fixed_rate_with_accuracy_parent_hw - register fixed-rate
+ * clock with the clock framework
+ * @dev: device that is registering this clock
+ * @name: name of this clock
+ * @parent_hw: pointer to parent clk
+ * @flags: framework-specific flags
+ * @fixed_rate: non-adjustable clock rate
+ * @fixed_accuracy: non-adjustable clock accuracy
+ */
+#define clk_hw_register_fixed_rate_with_accuracy_parent_hw(dev, name, \
+ parent_hw, flags, fixed_rate, fixed_accuracy) \
+ __clk_hw_register_fixed_rate((dev), NULL, (name), NULL, (parent_hw) \
+ NULL, NULL, (flags), (fixed_rate), \
+ (fixed_accuracy), 0, false)
+/**
+ * clk_hw_register_fixed_rate_with_accuracy_parent_data - register fixed-rate
+ * clock with the clock framework
+ * @dev: device that is registering this clock
+ * @name: name of this clock
+ * @parent_name: name of clock's parent
+ * @flags: framework-specific flags
+ * @fixed_rate: non-adjustable clock rate
+ * @fixed_accuracy: non-adjustable clock accuracy
+ */
+#define clk_hw_register_fixed_rate_with_accuracy_parent_data(dev, name, \
+ parent_data, flags, fixed_rate, fixed_accuracy) \
+ __clk_hw_register_fixed_rate((dev), NULL, (name), NULL, NULL, \
+ (parent_data), NULL, (flags), \
+ (fixed_rate), (fixed_accuracy), 0, false)
+/**
+ * clk_hw_register_fixed_rate_parent_accuracy - register fixed-rate clock with
+ * the clock framework
+ * @dev: device that is registering this clock
+ * @name: name of this clock
+ * @parent_name: name of clock's parent
+ * @flags: framework-specific flags
+ * @fixed_rate: non-adjustable clock rate
+ */
+#define clk_hw_register_fixed_rate_parent_accuracy(dev, name, parent_data, \
+ flags, fixed_rate) \
+ __clk_hw_register_fixed_rate((dev), NULL, (name), NULL, NULL, \
+ (parent_data), (flags), (fixed_rate), 0, \
+ CLK_FIXED_RATE_PARENT_ACCURACY, false)
+
void clk_unregister_fixed_rate(struct clk *clk);
-struct clk_hw *clk_hw_register_fixed_rate_with_accuracy(struct device *dev,
- const char *name, const char *parent_name, unsigned long flags,
- unsigned long fixed_rate, unsigned long fixed_accuracy);
void clk_hw_unregister_fixed_rate(struct clk_hw *hw);
void of_fixed_clk_setup(struct device_node *np);
@@ -318,6 +505,9 @@ void of_fixed_clk_setup(struct device_node *np);
* of this register, and mask of gate bits are in higher 16-bit of this
* register. While setting the gate bits, higher 16-bit should also be
* updated to indicate changing gate bits.
+ * CLK_GATE_BIG_ENDIAN - by default little endian register accesses are used for
+ * the gate register. Setting this flag makes the register accesses big
+ * endian.
*/
struct clk_gate {
struct clk_hw hw;
@@ -331,18 +521,115 @@ struct clk_gate {
#define CLK_GATE_SET_TO_DISABLE BIT(0)
#define CLK_GATE_HIWORD_MASK BIT(1)
+#define CLK_GATE_BIG_ENDIAN BIT(2)
extern const struct clk_ops clk_gate_ops;
-struct clk *clk_register_gate(struct device *dev, const char *name,
- const char *parent_name, unsigned long flags,
+struct clk_hw *__clk_hw_register_gate(struct device *dev,
+ struct device_node *np, const char *name,
+ const char *parent_name, const struct clk_hw *parent_hw,
+ const struct clk_parent_data *parent_data,
+ unsigned long flags,
+ void __iomem *reg, u8 bit_idx,
+ u8 clk_gate_flags, spinlock_t *lock);
+struct clk_hw *__devm_clk_hw_register_gate(struct device *dev,
+ struct device_node *np, const char *name,
+ const char *parent_name, const struct clk_hw *parent_hw,
+ const struct clk_parent_data *parent_data,
+ unsigned long flags,
void __iomem *reg, u8 bit_idx,
u8 clk_gate_flags, spinlock_t *lock);
-struct clk_hw *clk_hw_register_gate(struct device *dev, const char *name,
+struct clk *clk_register_gate(struct device *dev, const char *name,
const char *parent_name, unsigned long flags,
void __iomem *reg, u8 bit_idx,
u8 clk_gate_flags, spinlock_t *lock);
+/**
+ * clk_hw_register_gate - register a gate clock with the clock framework
+ * @dev: device that is registering this clock
+ * @name: name of this clock
+ * @parent_name: name of this clock's parent
+ * @flags: framework-specific flags for this clock
+ * @reg: register address to control gating of this clock
+ * @bit_idx: which bit in the register controls gating of this clock
+ * @clk_gate_flags: gate-specific flags for this clock
+ * @lock: shared register lock for this clock
+ */
+#define clk_hw_register_gate(dev, name, parent_name, flags, reg, bit_idx, \
+ clk_gate_flags, lock) \
+ __clk_hw_register_gate((dev), NULL, (name), (parent_name), NULL, \
+ NULL, (flags), (reg), (bit_idx), \
+ (clk_gate_flags), (lock))
+/**
+ * clk_hw_register_gate_parent_hw - register a gate clock with the clock
+ * framework
+ * @dev: device that is registering this clock
+ * @name: name of this clock
+ * @parent_hw: pointer to parent clk
+ * @flags: framework-specific flags for this clock
+ * @reg: register address to control gating of this clock
+ * @bit_idx: which bit in the register controls gating of this clock
+ * @clk_gate_flags: gate-specific flags for this clock
+ * @lock: shared register lock for this clock
+ */
+#define clk_hw_register_gate_parent_hw(dev, name, parent_hw, flags, reg, \
+ bit_idx, clk_gate_flags, lock) \
+ __clk_hw_register_gate((dev), NULL, (name), NULL, (parent_hw), \
+ NULL, (flags), (reg), (bit_idx), \
+ (clk_gate_flags), (lock))
+/**
+ * clk_hw_register_gate_parent_data - register a gate clock with the clock
+ * framework
+ * @dev: device that is registering this clock
+ * @name: name of this clock
+ * @parent_data: parent clk data
+ * @flags: framework-specific flags for this clock
+ * @reg: register address to control gating of this clock
+ * @bit_idx: which bit in the register controls gating of this clock
+ * @clk_gate_flags: gate-specific flags for this clock
+ * @lock: shared register lock for this clock
+ */
+#define clk_hw_register_gate_parent_data(dev, name, parent_data, flags, reg, \
+ bit_idx, clk_gate_flags, lock) \
+ __clk_hw_register_gate((dev), NULL, (name), NULL, NULL, (parent_data), \
+ (flags), (reg), (bit_idx), \
+ (clk_gate_flags), (lock))
+/**
+ * devm_clk_hw_register_gate - register a gate clock with the clock framework
+ * @dev: device that is registering this clock
+ * @name: name of this clock
+ * @parent_name: name of this clock's parent
+ * @flags: framework-specific flags for this clock
+ * @reg: register address to control gating of this clock
+ * @bit_idx: which bit in the register controls gating of this clock
+ * @clk_gate_flags: gate-specific flags for this clock
+ * @lock: shared register lock for this clock
+ */
+#define devm_clk_hw_register_gate(dev, name, parent_name, flags, reg, bit_idx,\
+ clk_gate_flags, lock) \
+ __devm_clk_hw_register_gate((dev), NULL, (name), (parent_name), NULL, \
+ NULL, (flags), (reg), (bit_idx), \
+ (clk_gate_flags), (lock))
+/**
+ * devm_clk_hw_register_gate_parent_data - register a gate clock with the
+ * clock framework
+ * @dev: device that is registering this clock
+ * @name: name of this clock
+ * @parent_data: parent clk data
+ * @flags: framework-specific flags for this clock
+ * @reg: register address to control gating of this clock
+ * @bit_idx: which bit in the register controls gating of this clock
+ * @clk_gate_flags: gate-specific flags for this clock
+ * @lock: shared register lock for this clock
+ */
+#define devm_clk_hw_register_gate_parent_data(dev, name, parent_data, flags, \
+ reg, bit_idx, clk_gate_flags, \
+ lock) \
+ __devm_clk_hw_register_gate((dev), NULL, (name), NULL, NULL, \
+ (parent_data), (flags), (reg), (bit_idx), \
+ (clk_gate_flags), (lock))
+
void clk_unregister_gate(struct clk *clk);
void clk_hw_unregister_gate(struct clk_hw *hw);
+int clk_gate_is_enabled(struct clk_hw *hw);
struct clk_div_table {
unsigned int val;
@@ -385,6 +672,9 @@ struct clk_div_table {
* CLK_DIVIDER_MAX_AT_ZERO - For dividers which are like CLK_DIVIDER_ONE_BASED
* except when the value read from the register is zero, the divisor is
* 2^width of the field.
+ * CLK_DIVIDER_BIG_ENDIAN - By default little endian register accesses are used
+ * for the divider register. Setting this flag makes the register accesses
+ * big endian.
*/
struct clk_divider {
struct clk_hw hw;
@@ -396,6 +686,7 @@ struct clk_divider {
spinlock_t *lock;
};
+#define clk_div_mask(width) ((1 << (width)) - 1)
#define to_clk_divider(_hw) container_of(_hw, struct clk_divider, hw)
#define CLK_DIVIDER_ONE_BASED BIT(0)
@@ -405,39 +696,242 @@ struct clk_divider {
#define CLK_DIVIDER_ROUND_CLOSEST BIT(4)
#define CLK_DIVIDER_READ_ONLY BIT(5)
#define CLK_DIVIDER_MAX_AT_ZERO BIT(6)
+#define CLK_DIVIDER_BIG_ENDIAN BIT(7)
extern const struct clk_ops clk_divider_ops;
extern const struct clk_ops clk_divider_ro_ops;
unsigned long divider_recalc_rate(struct clk_hw *hw, unsigned long parent_rate,
unsigned int val, const struct clk_div_table *table,
- unsigned long flags);
+ unsigned long flags, unsigned long width);
long divider_round_rate_parent(struct clk_hw *hw, struct clk_hw *parent,
unsigned long rate, unsigned long *prate,
const struct clk_div_table *table,
u8 width, unsigned long flags);
+long divider_ro_round_rate_parent(struct clk_hw *hw, struct clk_hw *parent,
+ unsigned long rate, unsigned long *prate,
+ const struct clk_div_table *table, u8 width,
+ unsigned long flags, unsigned int val);
+int divider_determine_rate(struct clk_hw *hw, struct clk_rate_request *req,
+ const struct clk_div_table *table, u8 width,
+ unsigned long flags);
+int divider_ro_determine_rate(struct clk_hw *hw, struct clk_rate_request *req,
+ const struct clk_div_table *table, u8 width,
+ unsigned long flags, unsigned int val);
int divider_get_val(unsigned long rate, unsigned long parent_rate,
const struct clk_div_table *table, u8 width,
unsigned long flags);
-struct clk *clk_register_divider(struct device *dev, const char *name,
- const char *parent_name, unsigned long flags,
- void __iomem *reg, u8 shift, u8 width,
- u8 clk_divider_flags, spinlock_t *lock);
-struct clk_hw *clk_hw_register_divider(struct device *dev, const char *name,
- const char *parent_name, unsigned long flags,
- void __iomem *reg, u8 shift, u8 width,
- u8 clk_divider_flags, spinlock_t *lock);
+struct clk_hw *__clk_hw_register_divider(struct device *dev,
+ struct device_node *np, const char *name,
+ const char *parent_name, const struct clk_hw *parent_hw,
+ const struct clk_parent_data *parent_data, unsigned long flags,
+ void __iomem *reg, u8 shift, u8 width, u8 clk_divider_flags,
+ const struct clk_div_table *table, spinlock_t *lock);
+struct clk_hw *__devm_clk_hw_register_divider(struct device *dev,
+ struct device_node *np, const char *name,
+ const char *parent_name, const struct clk_hw *parent_hw,
+ const struct clk_parent_data *parent_data, unsigned long flags,
+ void __iomem *reg, u8 shift, u8 width, u8 clk_divider_flags,
+ const struct clk_div_table *table, spinlock_t *lock);
struct clk *clk_register_divider_table(struct device *dev, const char *name,
const char *parent_name, unsigned long flags,
void __iomem *reg, u8 shift, u8 width,
u8 clk_divider_flags, const struct clk_div_table *table,
spinlock_t *lock);
-struct clk_hw *clk_hw_register_divider_table(struct device *dev,
- const char *name, const char *parent_name, unsigned long flags,
- void __iomem *reg, u8 shift, u8 width,
- u8 clk_divider_flags, const struct clk_div_table *table,
- spinlock_t *lock);
+/**
+ * clk_register_divider - register a divider clock with the clock framework
+ * @dev: device registering this clock
+ * @name: name of this clock
+ * @parent_name: name of clock's parent
+ * @flags: framework-specific flags
+ * @reg: register address to adjust divider
+ * @shift: number of bits to shift the bitfield
+ * @width: width of the bitfield
+ * @clk_divider_flags: divider-specific flags for this clock
+ * @lock: shared register lock for this clock
+ */
+#define clk_register_divider(dev, name, parent_name, flags, reg, shift, width, \
+ clk_divider_flags, lock) \
+ clk_register_divider_table((dev), (name), (parent_name), (flags), \
+ (reg), (shift), (width), \
+ (clk_divider_flags), NULL, (lock))
+/**
+ * clk_hw_register_divider - register a divider clock with the clock framework
+ * @dev: device registering this clock
+ * @name: name of this clock
+ * @parent_name: name of clock's parent
+ * @flags: framework-specific flags
+ * @reg: register address to adjust divider
+ * @shift: number of bits to shift the bitfield
+ * @width: width of the bitfield
+ * @clk_divider_flags: divider-specific flags for this clock
+ * @lock: shared register lock for this clock
+ */
+#define clk_hw_register_divider(dev, name, parent_name, flags, reg, shift, \
+ width, clk_divider_flags, lock) \
+ __clk_hw_register_divider((dev), NULL, (name), (parent_name), NULL, \
+ NULL, (flags), (reg), (shift), (width), \
+ (clk_divider_flags), NULL, (lock))
+/**
+ * clk_hw_register_divider_parent_hw - register a divider clock with the clock
+ * framework
+ * @dev: device registering this clock
+ * @name: name of this clock
+ * @parent_hw: pointer to parent clk
+ * @flags: framework-specific flags
+ * @reg: register address to adjust divider
+ * @shift: number of bits to shift the bitfield
+ * @width: width of the bitfield
+ * @clk_divider_flags: divider-specific flags for this clock
+ * @lock: shared register lock for this clock
+ */
+#define clk_hw_register_divider_parent_hw(dev, name, parent_hw, flags, reg, \
+ shift, width, clk_divider_flags, \
+ lock) \
+ __clk_hw_register_divider((dev), NULL, (name), NULL, (parent_hw), \
+ NULL, (flags), (reg), (shift), (width), \
+ (clk_divider_flags), NULL, (lock))
+/**
+ * clk_hw_register_divider_parent_data - register a divider clock with the clock
+ * framework
+ * @dev: device registering this clock
+ * @name: name of this clock
+ * @parent_data: parent clk data
+ * @flags: framework-specific flags
+ * @reg: register address to adjust divider
+ * @shift: number of bits to shift the bitfield
+ * @width: width of the bitfield
+ * @clk_divider_flags: divider-specific flags for this clock
+ * @lock: shared register lock for this clock
+ */
+#define clk_hw_register_divider_parent_data(dev, name, parent_data, flags, \
+ reg, shift, width, \
+ clk_divider_flags, lock) \
+ __clk_hw_register_divider((dev), NULL, (name), NULL, NULL, \
+ (parent_data), (flags), (reg), (shift), \
+ (width), (clk_divider_flags), NULL, (lock))
+/**
+ * clk_hw_register_divider_table - register a table based divider clock with
+ * the clock framework
+ * @dev: device registering this clock
+ * @name: name of this clock
+ * @parent_name: name of clock's parent
+ * @flags: framework-specific flags
+ * @reg: register address to adjust divider
+ * @shift: number of bits to shift the bitfield
+ * @width: width of the bitfield
+ * @clk_divider_flags: divider-specific flags for this clock
+ * @table: array of divider/value pairs ending with a div set to 0
+ * @lock: shared register lock for this clock
+ */
+#define clk_hw_register_divider_table(dev, name, parent_name, flags, reg, \
+ shift, width, clk_divider_flags, table, \
+ lock) \
+ __clk_hw_register_divider((dev), NULL, (name), (parent_name), NULL, \
+ NULL, (flags), (reg), (shift), (width), \
+ (clk_divider_flags), (table), (lock))
+/**
+ * clk_hw_register_divider_table_parent_hw - register a table based divider
+ * clock with the clock framework
+ * @dev: device registering this clock
+ * @name: name of this clock
+ * @parent_hw: pointer to parent clk
+ * @flags: framework-specific flags
+ * @reg: register address to adjust divider
+ * @shift: number of bits to shift the bitfield
+ * @width: width of the bitfield
+ * @clk_divider_flags: divider-specific flags for this clock
+ * @table: array of divider/value pairs ending with a div set to 0
+ * @lock: shared register lock for this clock
+ */
+#define clk_hw_register_divider_table_parent_hw(dev, name, parent_hw, flags, \
+ reg, shift, width, \
+ clk_divider_flags, table, \
+ lock) \
+ __clk_hw_register_divider((dev), NULL, (name), NULL, (parent_hw), \
+ NULL, (flags), (reg), (shift), (width), \
+ (clk_divider_flags), (table), (lock))
+/**
+ * clk_hw_register_divider_table_parent_data - register a table based divider
+ * clock with the clock framework
+ * @dev: device registering this clock
+ * @name: name of this clock
+ * @parent_data: parent clk data
+ * @flags: framework-specific flags
+ * @reg: register address to adjust divider
+ * @shift: number of bits to shift the bitfield
+ * @width: width of the bitfield
+ * @clk_divider_flags: divider-specific flags for this clock
+ * @table: array of divider/value pairs ending with a div set to 0
+ * @lock: shared register lock for this clock
+ */
+#define clk_hw_register_divider_table_parent_data(dev, name, parent_data, \
+ flags, reg, shift, width, \
+ clk_divider_flags, table, \
+ lock) \
+ __clk_hw_register_divider((dev), NULL, (name), NULL, NULL, \
+ (parent_data), (flags), (reg), (shift), \
+ (width), (clk_divider_flags), (table), \
+ (lock))
+/**
+ * devm_clk_hw_register_divider - register a divider clock with the clock framework
+ * @dev: device registering this clock
+ * @name: name of this clock
+ * @parent_name: name of clock's parent
+ * @flags: framework-specific flags
+ * @reg: register address to adjust divider
+ * @shift: number of bits to shift the bitfield
+ * @width: width of the bitfield
+ * @clk_divider_flags: divider-specific flags for this clock
+ * @lock: shared register lock for this clock
+ */
+#define devm_clk_hw_register_divider(dev, name, parent_name, flags, reg, shift, \
+ width, clk_divider_flags, lock) \
+ __devm_clk_hw_register_divider((dev), NULL, (name), (parent_name), NULL, \
+ NULL, (flags), (reg), (shift), (width), \
+ (clk_divider_flags), NULL, (lock))
+/**
+ * devm_clk_hw_register_divider_parent_hw - register a divider clock with the clock framework
+ * @dev: device registering this clock
+ * @name: name of this clock
+ * @parent_hw: pointer to parent clk
+ * @flags: framework-specific flags
+ * @reg: register address to adjust divider
+ * @shift: number of bits to shift the bitfield
+ * @width: width of the bitfield
+ * @clk_divider_flags: divider-specific flags for this clock
+ * @lock: shared register lock for this clock
+ */
+#define devm_clk_hw_register_divider_parent_hw(dev, name, parent_hw, flags, \
+ reg, shift, width, \
+ clk_divider_flags, lock) \
+ __devm_clk_hw_register_divider((dev), NULL, (name), NULL, \
+ (parent_hw), NULL, (flags), (reg), \
+ (shift), (width), (clk_divider_flags), \
+ NULL, (lock))
+/**
+ * devm_clk_hw_register_divider_table - register a table based divider clock
+ * with the clock framework (devres variant)
+ * @dev: device registering this clock
+ * @name: name of this clock
+ * @parent_name: name of clock's parent
+ * @flags: framework-specific flags
+ * @reg: register address to adjust divider
+ * @shift: number of bits to shift the bitfield
+ * @width: width of the bitfield
+ * @clk_divider_flags: divider-specific flags for this clock
+ * @table: array of divider/value pairs ending with a div set to 0
+ * @lock: shared register lock for this clock
+ */
+#define devm_clk_hw_register_divider_table(dev, name, parent_name, flags, \
+ reg, shift, width, \
+ clk_divider_flags, table, lock) \
+ __devm_clk_hw_register_divider((dev), NULL, (name), (parent_name), \
+ NULL, NULL, (flags), (reg), (shift), \
+ (width), (clk_divider_flags), (table), \
+ (lock))
+
void clk_unregister_divider(struct clk *clk);
void clk_hw_unregister_divider(struct clk_hw *hw);
@@ -446,8 +940,9 @@ void clk_hw_unregister_divider(struct clk_hw *hw);
*
* @hw: handle between common and hardware-specific interfaces
* @reg: register controlling multiplexer
+ * @table: array of register values corresponding to the parent index
* @shift: shift to multiplexer bit field
- * @width: width of mutliplexer bit field
+ * @mask: mask of mutliplexer bit field
* @flags: hardware-specific flags
* @lock: register lock
*
@@ -461,13 +956,18 @@ void clk_hw_unregister_divider(struct clk_hw *hw);
* register, and mask of mux bits are in higher 16-bit of this register.
* While setting the mux bits, higher 16-bit should also be updated to
* indicate changing mux bits.
+ * CLK_MUX_READ_ONLY - The mux registers can't be written, only read in the
+ * .get_parent clk_op.
* CLK_MUX_ROUND_CLOSEST - Use the parent rate that is closest to the desired
* frequency.
+ * CLK_MUX_BIG_ENDIAN - By default little endian register accesses are used for
+ * the mux register. Setting this flag makes the register accesses big
+ * endian.
*/
struct clk_mux {
struct clk_hw hw;
void __iomem *reg;
- u32 *table;
+ const u32 *table;
u32 mask;
u8 shift;
u8 flags;
@@ -481,31 +981,97 @@ struct clk_mux {
#define CLK_MUX_HIWORD_MASK BIT(2)
#define CLK_MUX_READ_ONLY BIT(3) /* mux can't be changed */
#define CLK_MUX_ROUND_CLOSEST BIT(4)
+#define CLK_MUX_BIG_ENDIAN BIT(5)
extern const struct clk_ops clk_mux_ops;
extern const struct clk_ops clk_mux_ro_ops;
-struct clk *clk_register_mux(struct device *dev, const char *name,
- const char * const *parent_names, u8 num_parents,
- unsigned long flags,
- void __iomem *reg, u8 shift, u8 width,
- u8 clk_mux_flags, spinlock_t *lock);
-struct clk_hw *clk_hw_register_mux(struct device *dev, const char *name,
- const char * const *parent_names, u8 num_parents,
- unsigned long flags,
- void __iomem *reg, u8 shift, u8 width,
- u8 clk_mux_flags, spinlock_t *lock);
-
+struct clk_hw *__clk_hw_register_mux(struct device *dev, struct device_node *np,
+ const char *name, u8 num_parents,
+ const char * const *parent_names,
+ const struct clk_hw **parent_hws,
+ const struct clk_parent_data *parent_data,
+ unsigned long flags, void __iomem *reg, u8 shift, u32 mask,
+ u8 clk_mux_flags, const u32 *table, spinlock_t *lock);
+struct clk_hw *__devm_clk_hw_register_mux(struct device *dev, struct device_node *np,
+ const char *name, u8 num_parents,
+ const char * const *parent_names,
+ const struct clk_hw **parent_hws,
+ const struct clk_parent_data *parent_data,
+ unsigned long flags, void __iomem *reg, u8 shift, u32 mask,
+ u8 clk_mux_flags, const u32 *table, spinlock_t *lock);
struct clk *clk_register_mux_table(struct device *dev, const char *name,
const char * const *parent_names, u8 num_parents,
- unsigned long flags,
- void __iomem *reg, u8 shift, u32 mask,
- u8 clk_mux_flags, u32 *table, spinlock_t *lock);
-struct clk_hw *clk_hw_register_mux_table(struct device *dev, const char *name,
- const char * const *parent_names, u8 num_parents,
- unsigned long flags,
- void __iomem *reg, u8 shift, u32 mask,
- u8 clk_mux_flags, u32 *table, spinlock_t *lock);
+ unsigned long flags, void __iomem *reg, u8 shift, u32 mask,
+ u8 clk_mux_flags, const u32 *table, spinlock_t *lock);
+
+#define clk_register_mux(dev, name, parent_names, num_parents, flags, reg, \
+ shift, width, clk_mux_flags, lock) \
+ clk_register_mux_table((dev), (name), (parent_names), (num_parents), \
+ (flags), (reg), (shift), BIT((width)) - 1, \
+ (clk_mux_flags), NULL, (lock))
+#define clk_hw_register_mux_table(dev, name, parent_names, num_parents, \
+ flags, reg, shift, mask, clk_mux_flags, \
+ table, lock) \
+ __clk_hw_register_mux((dev), NULL, (name), (num_parents), \
+ (parent_names), NULL, NULL, (flags), (reg), \
+ (shift), (mask), (clk_mux_flags), (table), \
+ (lock))
+#define clk_hw_register_mux_table_parent_data(dev, name, parent_data, \
+ num_parents, flags, reg, shift, mask, \
+ clk_mux_flags, table, lock) \
+ __clk_hw_register_mux((dev), NULL, (name), (num_parents), \
+ NULL, NULL, (parent_data), (flags), (reg), \
+ (shift), (mask), (clk_mux_flags), (table), \
+ (lock))
+#define clk_hw_register_mux(dev, name, parent_names, num_parents, flags, reg, \
+ shift, width, clk_mux_flags, lock) \
+ __clk_hw_register_mux((dev), NULL, (name), (num_parents), \
+ (parent_names), NULL, NULL, (flags), (reg), \
+ (shift), BIT((width)) - 1, (clk_mux_flags), \
+ NULL, (lock))
+#define clk_hw_register_mux_hws(dev, name, parent_hws, num_parents, flags, \
+ reg, shift, width, clk_mux_flags, lock) \
+ __clk_hw_register_mux((dev), NULL, (name), (num_parents), NULL, \
+ (parent_hws), NULL, (flags), (reg), (shift), \
+ BIT((width)) - 1, (clk_mux_flags), NULL, (lock))
+#define clk_hw_register_mux_parent_data(dev, name, parent_data, num_parents, \
+ flags, reg, shift, width, \
+ clk_mux_flags, lock) \
+ __clk_hw_register_mux((dev), NULL, (name), (num_parents), NULL, NULL, \
+ (parent_data), (flags), (reg), (shift), \
+ BIT((width)) - 1, (clk_mux_flags), NULL, (lock))
+#define clk_hw_register_mux_parent_data_table(dev, name, parent_data, \
+ num_parents, flags, reg, shift, \
+ width, clk_mux_flags, table, \
+ lock) \
+ __clk_hw_register_mux((dev), NULL, (name), (num_parents), NULL, NULL, \
+ (parent_data), (flags), (reg), (shift), \
+ BIT((width)) - 1, (clk_mux_flags), table, (lock))
+#define devm_clk_hw_register_mux(dev, name, parent_names, num_parents, flags, reg, \
+ shift, width, clk_mux_flags, lock) \
+ __devm_clk_hw_register_mux((dev), NULL, (name), (num_parents), \
+ (parent_names), NULL, NULL, (flags), (reg), \
+ (shift), BIT((width)) - 1, (clk_mux_flags), \
+ NULL, (lock))
+#define devm_clk_hw_register_mux_parent_hws(dev, name, parent_hws, \
+ num_parents, flags, reg, shift, \
+ width, clk_mux_flags, lock) \
+ __devm_clk_hw_register_mux((dev), NULL, (name), (num_parents), NULL, \
+ (parent_hws), NULL, (flags), (reg), \
+ (shift), BIT((width)) - 1, \
+ (clk_mux_flags), NULL, (lock))
+#define devm_clk_hw_register_mux_parent_data_table(dev, name, parent_data, \
+ num_parents, flags, reg, shift, \
+ width, clk_mux_flags, table, \
+ lock) \
+ __devm_clk_hw_register_mux((dev), NULL, (name), (num_parents), NULL, \
+ NULL, (parent_data), (flags), (reg), (shift), \
+ BIT((width)) - 1, (clk_mux_flags), table, (lock))
+
+int clk_mux_val_to_index(struct clk_hw *hw, const u32 *table, unsigned int flags,
+ unsigned int val);
+unsigned int clk_mux_index_to_val(const u32 *table, unsigned int flags, u8 index);
void clk_unregister_mux(struct clk *clk);
void clk_hw_unregister_mux(struct clk_hw *hw);
@@ -541,7 +1107,20 @@ struct clk_hw *clk_hw_register_fixed_factor(struct device *dev,
const char *name, const char *parent_name, unsigned long flags,
unsigned int mult, unsigned int div);
void clk_hw_unregister_fixed_factor(struct clk_hw *hw);
+struct clk_hw *devm_clk_hw_register_fixed_factor(struct device *dev,
+ const char *name, const char *parent_name, unsigned long flags,
+ unsigned int mult, unsigned int div);
+struct clk_hw *devm_clk_hw_register_fixed_factor_index(struct device *dev,
+ const char *name, unsigned int index, unsigned long flags,
+ unsigned int mult, unsigned int div);
+struct clk_hw *devm_clk_hw_register_fixed_factor_parent_hw(struct device *dev,
+ const char *name, const struct clk_hw *parent_hw,
+ unsigned long flags, unsigned int mult, unsigned int div);
+
+struct clk_hw *clk_hw_register_fixed_factor_parent_hw(struct device *dev,
+ const char *name, const struct clk_hw *parent_hw,
+ unsigned long flags, unsigned int mult, unsigned int div);
/**
* struct clk_fractional_divider - adjustable fractional divider clock
*
@@ -554,23 +1133,42 @@ void clk_hw_unregister_fixed_factor(struct clk_hw *hw);
* @lock: register lock
*
* Clock with adjustable fractional divider affecting its output frequency.
+ *
+ * Flags:
+ * CLK_FRAC_DIVIDER_ZERO_BASED - by default the numerator and denominator
+ * is the value read from the register. If CLK_FRAC_DIVIDER_ZERO_BASED
+ * is set then the numerator and denominator are both the value read
+ * plus one.
+ * CLK_FRAC_DIVIDER_BIG_ENDIAN - By default little endian register accesses are
+ * used for the divider register. Setting this flag makes the register
+ * accesses big endian.
+ * CLK_FRAC_DIVIDER_POWER_OF_TWO_PS - By default the resulting fraction might
+ * be saturated and the caller will get quite far from the good enough
+ * approximation. Instead the caller may require, by setting this flag,
+ * to shift left by a few bits in case, when the asked one is quite small
+ * to satisfy the desired range of denominator. It assumes that on the
+ * caller's side the power-of-two capable prescaler exists.
*/
struct clk_fractional_divider {
struct clk_hw hw;
void __iomem *reg;
u8 mshift;
u8 mwidth;
- u32 mmask;
u8 nshift;
u8 nwidth;
- u32 nmask;
u8 flags;
+ void (*approximation)(struct clk_hw *hw,
+ unsigned long rate, unsigned long *parent_rate,
+ unsigned long *m, unsigned long *n);
spinlock_t *lock;
};
#define to_clk_fd(_hw) container_of(_hw, struct clk_fractional_divider, hw)
-extern const struct clk_ops clk_fractional_divider_ops;
+#define CLK_FRAC_DIVIDER_ZERO_BASED BIT(0)
+#define CLK_FRAC_DIVIDER_BIG_ENDIAN BIT(1)
+#define CLK_FRAC_DIVIDER_POWER_OF_TWO_PS BIT(2)
+
struct clk *clk_register_fractional_divider(struct device *dev,
const char *name, const char *parent_name, unsigned long flags,
void __iomem *reg, u8 mshift, u8 mwidth, u8 nshift, u8 nwidth,
@@ -601,6 +1199,9 @@ void clk_hw_unregister_fractional_divider(struct clk_hw *hw);
* leaving the parent rate unmodified.
* CLK_MULTIPLIER_ROUND_CLOSEST - Makes the best calculated divider to be
* rounded to the closest integer instead of the down one.
+ * CLK_MULTIPLIER_BIG_ENDIAN - By default little endian register accesses are
+ * used for the multiplier register. Setting this flag makes the register
+ * accesses big endian.
*/
struct clk_multiplier {
struct clk_hw hw;
@@ -613,8 +1214,9 @@ struct clk_multiplier {
#define to_clk_multiplier(_hw) container_of(_hw, struct clk_multiplier, hw)
-#define CLK_MULTIPLIER_ZERO_BYPASS BIT(0)
+#define CLK_MULTIPLIER_ZERO_BYPASS BIT(0)
#define CLK_MULTIPLIER_ROUND_CLOSEST BIT(1)
+#define CLK_MULTIPLIER_BIG_ENDIAN BIT(2)
extern const struct clk_ops clk_multiplier_ops;
@@ -650,6 +1252,12 @@ struct clk *clk_register_composite(struct device *dev, const char *name,
struct clk_hw *rate_hw, const struct clk_ops *rate_ops,
struct clk_hw *gate_hw, const struct clk_ops *gate_ops,
unsigned long flags);
+struct clk *clk_register_composite_pdata(struct device *dev, const char *name,
+ const struct clk_parent_data *parent_data, int num_parents,
+ struct clk_hw *mux_hw, const struct clk_ops *mux_ops,
+ struct clk_hw *rate_hw, const struct clk_ops *rate_ops,
+ struct clk_hw *gate_hw, const struct clk_ops *gate_ops,
+ unsigned long flags);
void clk_unregister_composite(struct clk *clk);
struct clk_hw *clk_hw_register_composite(struct device *dev, const char *name,
const char * const *parent_names, int num_parents,
@@ -657,89 +1265,63 @@ struct clk_hw *clk_hw_register_composite(struct device *dev, const char *name,
struct clk_hw *rate_hw, const struct clk_ops *rate_ops,
struct clk_hw *gate_hw, const struct clk_ops *gate_ops,
unsigned long flags);
-void clk_hw_unregister_composite(struct clk_hw *hw);
-
-/***
- * struct clk_gpio_gate - gpio gated clock
- *
- * @hw: handle between common and hardware-specific interfaces
- * @gpiod: gpio descriptor
- *
- * Clock with a gpio control for enabling and disabling the parent clock.
- * Implements .enable, .disable and .is_enabled
- */
-
-struct clk_gpio {
- struct clk_hw hw;
- struct gpio_desc *gpiod;
-};
-
-#define to_clk_gpio(_hw) container_of(_hw, struct clk_gpio, hw)
-
-extern const struct clk_ops clk_gpio_gate_ops;
-struct clk *clk_register_gpio_gate(struct device *dev, const char *name,
- const char *parent_name, unsigned gpio, bool active_low,
+struct clk_hw *clk_hw_register_composite_pdata(struct device *dev,
+ const char *name,
+ const struct clk_parent_data *parent_data, int num_parents,
+ struct clk_hw *mux_hw, const struct clk_ops *mux_ops,
+ struct clk_hw *rate_hw, const struct clk_ops *rate_ops,
+ struct clk_hw *gate_hw, const struct clk_ops *gate_ops,
unsigned long flags);
-struct clk_hw *clk_hw_register_gpio_gate(struct device *dev, const char *name,
- const char *parent_name, unsigned gpio, bool active_low,
+struct clk_hw *devm_clk_hw_register_composite_pdata(struct device *dev,
+ const char *name, const struct clk_parent_data *parent_data,
+ int num_parents,
+ struct clk_hw *mux_hw, const struct clk_ops *mux_ops,
+ struct clk_hw *rate_hw, const struct clk_ops *rate_ops,
+ struct clk_hw *gate_hw, const struct clk_ops *gate_ops,
unsigned long flags);
-void clk_hw_unregister_gpio_gate(struct clk_hw *hw);
-
-/**
- * struct clk_gpio_mux - gpio controlled clock multiplexer
- *
- * @hw: see struct clk_gpio
- * @gpiod: gpio descriptor to select the parent of this clock multiplexer
- *
- * Clock with a gpio control for selecting the parent clock.
- * Implements .get_parent, .set_parent and .determine_rate
- */
-
-extern const struct clk_ops clk_gpio_mux_ops;
-struct clk *clk_register_gpio_mux(struct device *dev, const char *name,
- const char * const *parent_names, u8 num_parents, unsigned gpio,
- bool active_low, unsigned long flags);
-struct clk_hw *clk_hw_register_gpio_mux(struct device *dev, const char *name,
- const char * const *parent_names, u8 num_parents, unsigned gpio,
- bool active_low, unsigned long flags);
-void clk_hw_unregister_gpio_mux(struct clk_hw *hw);
+void clk_hw_unregister_composite(struct clk_hw *hw);
-/**
- * clk_register - allocate a new clock, register it and return an opaque cookie
- * @dev: device that is registering this clock
- * @hw: link to hardware-specific clock data
- *
- * clk_register is the primary interface for populating the clock tree with new
- * clock nodes. It returns a pointer to the newly allocated struct clk which
- * cannot be dereferenced by driver code but may be used in conjuction with the
- * rest of the clock API. In the event of an error clk_register will return an
- * error code; drivers must test for an error code after calling clk_register.
- */
struct clk *clk_register(struct device *dev, struct clk_hw *hw);
struct clk *devm_clk_register(struct device *dev, struct clk_hw *hw);
int __must_check clk_hw_register(struct device *dev, struct clk_hw *hw);
int __must_check devm_clk_hw_register(struct device *dev, struct clk_hw *hw);
+int __must_check of_clk_hw_register(struct device_node *node, struct clk_hw *hw);
void clk_unregister(struct clk *clk);
-void devm_clk_unregister(struct device *dev, struct clk *clk);
void clk_hw_unregister(struct clk_hw *hw);
-void devm_clk_hw_unregister(struct device *dev, struct clk_hw *hw);
/* helper functions */
const char *__clk_get_name(const struct clk *clk);
const char *clk_hw_get_name(const struct clk_hw *hw);
+#ifdef CONFIG_COMMON_CLK
struct clk_hw *__clk_get_hw(struct clk *clk);
+#else
+static inline struct clk_hw *__clk_get_hw(struct clk *clk)
+{
+ return (struct clk_hw *)clk;
+}
+#endif
+
+struct clk *clk_hw_get_clk(struct clk_hw *hw, const char *con_id);
+struct clk *devm_clk_hw_get_clk(struct device *dev, struct clk_hw *hw,
+ const char *con_id);
+
unsigned int clk_hw_get_num_parents(const struct clk_hw *hw);
struct clk_hw *clk_hw_get_parent(const struct clk_hw *hw);
struct clk_hw *clk_hw_get_parent_by_index(const struct clk_hw *hw,
unsigned int index);
+int clk_hw_get_parent_index(struct clk_hw *hw);
+int clk_hw_set_parent(struct clk_hw *hw, struct clk_hw *new_parent);
unsigned int __clk_get_enable_count(struct clk *clk);
unsigned long clk_hw_get_rate(const struct clk_hw *hw);
-unsigned long __clk_get_flags(struct clk *clk);
unsigned long clk_hw_get_flags(const struct clk_hw *hw);
+#define clk_hw_can_set_rate_parent(hw) \
+ (clk_hw_get_flags((hw)) & CLK_SET_RATE_PARENT)
+
bool clk_hw_is_prepared(const struct clk_hw *hw);
+bool clk_hw_rate_is_protected(const struct clk_hw *hw);
bool clk_hw_is_enabled(const struct clk_hw *hw);
bool __clk_is_enabled(struct clk *clk);
struct clk *__clk_lookup(const char *name);
@@ -748,7 +1330,12 @@ int __clk_mux_determine_rate(struct clk_hw *hw,
int __clk_determine_rate(struct clk_hw *core, struct clk_rate_request *req);
int __clk_mux_determine_rate_closest(struct clk_hw *hw,
struct clk_rate_request *req);
+int clk_mux_determine_rate_flags(struct clk_hw *hw,
+ struct clk_rate_request *req,
+ unsigned long flags);
void clk_hw_reparent(struct clk_hw *hw, struct clk_hw *new_parent);
+void clk_hw_get_rate_range(struct clk_hw *hw, unsigned long *min_rate,
+ unsigned long *max_rate);
void clk_hw_set_rate_range(struct clk_hw *hw, unsigned long min_rate,
unsigned long max_rate);
@@ -767,15 +1354,22 @@ static inline long divider_round_rate(struct clk_hw *hw, unsigned long rate,
rate, prate, table, width, flags);
}
+static inline long divider_ro_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *prate,
+ const struct clk_div_table *table,
+ u8 width, unsigned long flags,
+ unsigned int val)
+{
+ return divider_ro_round_rate_parent(hw, clk_hw_get_parent(hw),
+ rate, prate, table, width, flags,
+ val);
+}
+
/*
* FIXME clock api without lock protection
*/
unsigned long clk_hw_round_rate(struct clk_hw *hw, unsigned long rate);
-struct of_device_id;
-
-typedef void (*of_clk_init_cb_t)(struct device_node *);
-
struct clk_onecell_data {
struct clk **clks;
unsigned int clk_num;
@@ -786,9 +1380,13 @@ struct clk_hw_onecell_data {
struct clk_hw *hws[];
};
-extern struct of_device_id __clk_of_table;
-
-#define CLK_OF_DECLARE(name, compat, fn) OF_DECLARE_1(clk, name, compat, fn)
+#define CLK_OF_DECLARE(name, compat, fn) \
+ static void __init __##name##_of_clk_init_declare(struct device_node *np) \
+ { \
+ fn(np); \
+ fwnode_dev_initialized(of_fwnode_handle(np), true); \
+ } \
+ OF_DECLARE_1(clk, name, compat, __##name##_of_clk_init_declare)
/*
* Use this macro when you have a driver that requires two initialization
@@ -802,6 +1400,133 @@ extern struct of_device_id __clk_of_table;
} \
OF_DECLARE_1(clk, name, compat, name##_of_clk_init_driver)
+#define CLK_HW_INIT(_name, _parent, _ops, _flags) \
+ (&(struct clk_init_data) { \
+ .flags = _flags, \
+ .name = _name, \
+ .parent_names = (const char *[]) { _parent }, \
+ .num_parents = 1, \
+ .ops = _ops, \
+ })
+
+#define CLK_HW_INIT_HW(_name, _parent, _ops, _flags) \
+ (&(struct clk_init_data) { \
+ .flags = _flags, \
+ .name = _name, \
+ .parent_hws = (const struct clk_hw*[]) { _parent }, \
+ .num_parents = 1, \
+ .ops = _ops, \
+ })
+
+/*
+ * This macro is intended for drivers to be able to share the otherwise
+ * individual struct clk_hw[] compound literals created by the compiler
+ * when using CLK_HW_INIT_HW. It does NOT support multiple parents.
+ */
+#define CLK_HW_INIT_HWS(_name, _parent, _ops, _flags) \
+ (&(struct clk_init_data) { \
+ .flags = _flags, \
+ .name = _name, \
+ .parent_hws = _parent, \
+ .num_parents = 1, \
+ .ops = _ops, \
+ })
+
+#define CLK_HW_INIT_FW_NAME(_name, _parent, _ops, _flags) \
+ (&(struct clk_init_data) { \
+ .flags = _flags, \
+ .name = _name, \
+ .parent_data = (const struct clk_parent_data[]) { \
+ { .fw_name = _parent }, \
+ }, \
+ .num_parents = 1, \
+ .ops = _ops, \
+ })
+
+#define CLK_HW_INIT_PARENTS(_name, _parents, _ops, _flags) \
+ (&(struct clk_init_data) { \
+ .flags = _flags, \
+ .name = _name, \
+ .parent_names = _parents, \
+ .num_parents = ARRAY_SIZE(_parents), \
+ .ops = _ops, \
+ })
+
+#define CLK_HW_INIT_PARENTS_HW(_name, _parents, _ops, _flags) \
+ (&(struct clk_init_data) { \
+ .flags = _flags, \
+ .name = _name, \
+ .parent_hws = _parents, \
+ .num_parents = ARRAY_SIZE(_parents), \
+ .ops = _ops, \
+ })
+
+#define CLK_HW_INIT_PARENTS_DATA(_name, _parents, _ops, _flags) \
+ (&(struct clk_init_data) { \
+ .flags = _flags, \
+ .name = _name, \
+ .parent_data = _parents, \
+ .num_parents = ARRAY_SIZE(_parents), \
+ .ops = _ops, \
+ })
+
+#define CLK_HW_INIT_NO_PARENT(_name, _ops, _flags) \
+ (&(struct clk_init_data) { \
+ .flags = _flags, \
+ .name = _name, \
+ .parent_names = NULL, \
+ .num_parents = 0, \
+ .ops = _ops, \
+ })
+
+#define CLK_FIXED_FACTOR(_struct, _name, _parent, \
+ _div, _mult, _flags) \
+ struct clk_fixed_factor _struct = { \
+ .div = _div, \
+ .mult = _mult, \
+ .hw.init = CLK_HW_INIT(_name, \
+ _parent, \
+ &clk_fixed_factor_ops, \
+ _flags), \
+ }
+
+#define CLK_FIXED_FACTOR_HW(_struct, _name, _parent, \
+ _div, _mult, _flags) \
+ struct clk_fixed_factor _struct = { \
+ .div = _div, \
+ .mult = _mult, \
+ .hw.init = CLK_HW_INIT_HW(_name, \
+ _parent, \
+ &clk_fixed_factor_ops, \
+ _flags), \
+ }
+
+/*
+ * This macro allows the driver to reuse the _parent array for multiple
+ * fixed factor clk declarations.
+ */
+#define CLK_FIXED_FACTOR_HWS(_struct, _name, _parent, \
+ _div, _mult, _flags) \
+ struct clk_fixed_factor _struct = { \
+ .div = _div, \
+ .mult = _mult, \
+ .hw.init = CLK_HW_INIT_HWS(_name, \
+ _parent, \
+ &clk_fixed_factor_ops, \
+ _flags), \
+ }
+
+#define CLK_FIXED_FACTOR_FW_NAME(_struct, _name, _parent, \
+ _div, _mult, _flags) \
+ struct clk_fixed_factor _struct = { \
+ .div = _div, \
+ .mult = _mult, \
+ .hw.init = CLK_HW_INIT_FW_NAME(_name, \
+ _parent, \
+ &clk_fixed_factor_ops, \
+ _flags), \
+ }
+
#ifdef CONFIG_OF
int of_clk_add_provider(struct device_node *np,
struct clk *(*clk_src_get)(struct of_phandle_args *args,
@@ -811,7 +1536,12 @@ int of_clk_add_hw_provider(struct device_node *np,
struct clk_hw *(*get)(struct of_phandle_args *clkspec,
void *data),
void *data);
+int devm_of_clk_add_hw_provider(struct device *dev,
+ struct clk_hw *(*get)(struct of_phandle_args *clkspec,
+ void *data),
+ void *data);
void of_clk_del_provider(struct device_node *np);
+
struct clk *of_clk_src_simple_get(struct of_phandle_args *clkspec,
void *data);
struct clk_hw *of_clk_hw_simple_get(struct of_phandle_args *clkspec,
@@ -819,13 +1549,10 @@ struct clk_hw *of_clk_hw_simple_get(struct of_phandle_args *clkspec,
struct clk *of_clk_src_onecell_get(struct of_phandle_args *clkspec, void *data);
struct clk_hw *of_clk_hw_onecell_get(struct of_phandle_args *clkspec,
void *data);
-unsigned int of_clk_get_parent_count(struct device_node *np);
int of_clk_parent_fill(struct device_node *np, const char **parents,
unsigned int size);
-const char *of_clk_get_parent_name(struct device_node *np, int index);
int of_clk_detect_critical(struct device_node *np, int index,
unsigned long *flags);
-void of_clk_init(const struct of_device_id *matches);
#else /* !CONFIG_OF */
@@ -843,7 +1570,15 @@ static inline int of_clk_add_hw_provider(struct device_node *np,
{
return 0;
}
+static inline int devm_of_clk_add_hw_provider(struct device *dev,
+ struct clk_hw *(*get)(struct of_phandle_args *clkspec,
+ void *data),
+ void *data)
+{
+ return 0;
+}
static inline void of_clk_del_provider(struct device_node *np) {}
+
static inline struct clk *of_clk_src_simple_get(
struct of_phandle_args *clkspec, void *data)
{
@@ -864,63 +1599,18 @@ of_clk_hw_onecell_get(struct of_phandle_args *clkspec, void *data)
{
return ERR_PTR(-ENOENT);
}
-static inline unsigned int of_clk_get_parent_count(struct device_node *np)
-{
- return 0;
-}
static inline int of_clk_parent_fill(struct device_node *np,
const char **parents, unsigned int size)
{
return 0;
}
-static inline const char *of_clk_get_parent_name(struct device_node *np,
- int index)
-{
- return NULL;
-}
static inline int of_clk_detect_critical(struct device_node *np, int index,
unsigned long *flags)
{
return 0;
}
-static inline void of_clk_init(const struct of_device_id *matches) {}
#endif /* CONFIG_OF */
-/*
- * wrap access to peripherals in accessor routines
- * for improved portability across platforms
- */
-
-#if IS_ENABLED(CONFIG_PPC)
-
-static inline u32 clk_readl(u32 __iomem *reg)
-{
- return ioread32be(reg);
-}
-
-static inline void clk_writel(u32 val, u32 __iomem *reg)
-{
- iowrite32be(val, reg);
-}
-
-#else /* platform dependent I/O accessors */
-
-static inline u32 clk_readl(u32 __iomem *reg)
-{
- return readl(reg);
-}
-
-static inline void clk_writel(u32 val, u32 __iomem *reg)
-{
- writel(val, reg);
-}
-
-#endif /* platform dependent I/O accessors */
-
-#ifdef CONFIG_DEBUG_FS
-struct dentry *clk_debugfs_add_file(struct clk_hw *hw, char *name, umode_t mode,
- void *data, const struct file_operations *fops);
-#endif
+void clk_gate_restore_context(struct clk_hw *hw);
-#endif /* CONFIG_COMMON_CLK */
#endif /* CLK_PROVIDER_H */
diff --git a/include/linux/clk.h b/include/linux/clk.h
index 12c96d94d1fa..1ef013324237 100644
--- a/include/linux/clk.h
+++ b/include/linux/clk.h
@@ -1,13 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* linux/include/linux/clk.h
*
* Copyright (C) 2004 ARM Limited.
* Written by Deep Blue Solutions Limited.
* Copyright (C) 2011-2012 Linaro Ltd <mturquette@linaro.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef __LINUX_CLK_H
#define __LINUX_CLK_H
@@ -95,7 +92,7 @@ struct clk_bulk_data {
#ifdef CONFIG_COMMON_CLK
/**
- * clk_notifier_register: register a clock rate-change notifier callback
+ * clk_notifier_register - register a clock rate-change notifier callback
* @clk: clock whose rate we are interested in
* @nb: notifier block with callback function pointer
*
@@ -106,13 +103,24 @@ struct clk_bulk_data {
int clk_notifier_register(struct clk *clk, struct notifier_block *nb);
/**
- * clk_notifier_unregister: unregister a clock rate-change notifier callback
+ * clk_notifier_unregister - unregister a clock rate-change notifier callback
* @clk: clock whose rate we are no longer interested in
* @nb: notifier block which will be unregistered
*/
int clk_notifier_unregister(struct clk *clk, struct notifier_block *nb);
/**
+ * devm_clk_notifier_register - register a managed rate-change notifier callback
+ * @dev: device for clock "consumer"
+ * @clk: clock whose rate we are interested in
+ * @nb: notifier block with callback function pointer
+ *
+ * Returns 0 on success, -EERROR otherwise
+ */
+int devm_clk_notifier_register(struct device *dev, struct clk *clk,
+ struct notifier_block *nb);
+
+/**
* clk_get_accuracy - obtain the clock accuracy in ppb (parts per billion)
* for a clock source.
* @clk: clock source
@@ -142,6 +150,27 @@ int clk_set_phase(struct clk *clk, int degrees);
int clk_get_phase(struct clk *clk);
/**
+ * clk_set_duty_cycle - adjust the duty cycle ratio of a clock signal
+ * @clk: clock signal source
+ * @num: numerator of the duty cycle ratio to be applied
+ * @den: denominator of the duty cycle ratio to be applied
+ *
+ * Adjust the duty cycle of a clock signal by the specified ratio. Returns 0 on
+ * success, -EERROR otherwise.
+ */
+int clk_set_duty_cycle(struct clk *clk, unsigned int num, unsigned int den);
+
+/**
+ * clk_get_scaled_duty_cycle - return the duty cycle ratio of a clock signal
+ * @clk: clock signal source
+ * @scale: scaling factor to be applied to represent the ratio as an integer
+ *
+ * Returns the duty cycle ratio multiplied by the scale provided, otherwise
+ * returns -EERROR.
+ */
+int clk_get_scaled_duty_cycle(struct clk *clk, unsigned int scale);
+
+/**
* clk_is_match - check if two clk's point to the same hardware clock
* @p: clk compared against q
* @q: clk compared against p
@@ -168,6 +197,13 @@ static inline int clk_notifier_unregister(struct clk *clk,
return -ENOTSUPP;
}
+static inline int devm_clk_notifier_register(struct device *dev,
+ struct clk *clk,
+ struct notifier_block *nb)
+{
+ return -ENOTSUPP;
+}
+
static inline long clk_get_accuracy(struct clk *clk)
{
return -ENOTSUPP;
@@ -183,6 +219,18 @@ static inline long clk_get_phase(struct clk *clk)
return -ENOTSUPP;
}
+static inline int clk_set_duty_cycle(struct clk *clk, unsigned int num,
+ unsigned int den)
+{
+ return -ENOTSUPP;
+}
+
+static inline unsigned int clk_get_scaled_duty_cycle(struct clk *clk,
+ unsigned int scale)
+{
+ return 0;
+}
+
static inline bool clk_is_match(const struct clk *p, const struct clk *q)
{
return p == q;
@@ -190,6 +238,7 @@ static inline bool clk_is_match(const struct clk *p, const struct clk *q)
#endif
+#ifdef CONFIG_HAVE_CLK_PREPARE
/**
* clk_prepare - prepare a clock source
* @clk: clock source
@@ -198,10 +247,26 @@ static inline bool clk_is_match(const struct clk *p, const struct clk *q)
*
* Must not be called from within atomic context.
*/
-#ifdef CONFIG_HAVE_CLK_PREPARE
int clk_prepare(struct clk *clk);
int __must_check clk_bulk_prepare(int num_clks,
const struct clk_bulk_data *clks);
+
+/**
+ * clk_is_enabled_when_prepared - indicate if preparing a clock also enables it.
+ * @clk: clock source
+ *
+ * Returns true if clk_prepare() implicitly enables the clock, effectively
+ * making clk_enable()/clk_disable() no-ops, false otherwise.
+ *
+ * This is of interest mainly to the power management code where actually
+ * disabling the clock also requires unpreparing it to have any material
+ * effect.
+ *
+ * Regardless of the value returned here, the caller must always invoke
+ * clk_enable() or clk_prepare_enable() and counterparts for usage counts
+ * to be right.
+ */
+bool clk_is_enabled_when_prepared(struct clk *clk);
#else
static inline int clk_prepare(struct clk *clk)
{
@@ -209,11 +274,17 @@ static inline int clk_prepare(struct clk *clk)
return 0;
}
-static inline int clk_bulk_prepare(int num_clks, struct clk_bulk_data *clks)
+static inline int __must_check
+clk_bulk_prepare(int num_clks, const struct clk_bulk_data *clks)
{
might_sleep();
return 0;
}
+
+static inline bool clk_is_enabled_when_prepared(struct clk *clk)
+{
+ return false;
+}
#endif
/**
@@ -233,7 +304,8 @@ static inline void clk_unprepare(struct clk *clk)
{
might_sleep();
}
-static inline void clk_bulk_unprepare(int num_clks, struct clk_bulk_data *clks)
+static inline void clk_bulk_unprepare(int num_clks,
+ const struct clk_bulk_data *clks)
{
might_sleep();
}
@@ -279,8 +351,40 @@ struct clk *clk_get(struct device *dev, const char *id);
*/
int __must_check clk_bulk_get(struct device *dev, int num_clks,
struct clk_bulk_data *clks);
+/**
+ * clk_bulk_get_all - lookup and obtain all available references to clock
+ * producer.
+ * @dev: device for clock "consumer"
+ * @clks: pointer to the clk_bulk_data table of consumer
+ *
+ * This helper function allows drivers to get all clk consumers in one
+ * operation. If any of the clk cannot be acquired then any clks
+ * that were obtained will be freed before returning to the caller.
+ *
+ * Returns a positive value for the number of clocks obtained while the
+ * clock references are stored in the clk_bulk_data table in @clks field.
+ * Returns 0 if there're none and a negative value if something failed.
+ *
+ * Drivers must assume that the clock source is not enabled.
+ *
+ * clk_bulk_get should not be called from within interrupt context.
+ */
+int __must_check clk_bulk_get_all(struct device *dev,
+ struct clk_bulk_data **clks);
/**
+ * clk_bulk_get_optional - lookup and obtain a number of references to clock producer
+ * @dev: device for clock "consumer"
+ * @num_clks: the number of clk_bulk_data
+ * @clks: the clk_bulk_data table of consumer
+ *
+ * Behaves the same as clk_bulk_get() except where there is no clock producer.
+ * In this case, instead of returning -ENOENT, the function returns 0 and
+ * NULL for a clk for which a clock producer could not be determined.
+ */
+int __must_check clk_bulk_get_optional(struct device *dev, int num_clks,
+ struct clk_bulk_data *clks);
+/**
* devm_clk_bulk_get - managed get multiple clk consumers
* @dev: device for clock "consumer"
* @num_clks: the number of clk_bulk_data
@@ -294,21 +398,61 @@ int __must_check clk_bulk_get(struct device *dev, int num_clks,
*/
int __must_check devm_clk_bulk_get(struct device *dev, int num_clks,
struct clk_bulk_data *clks);
+/**
+ * devm_clk_bulk_get_optional - managed get multiple optional consumer clocks
+ * @dev: device for clock "consumer"
+ * @num_clks: the number of clk_bulk_data
+ * @clks: pointer to the clk_bulk_data table of consumer
+ *
+ * Behaves the same as devm_clk_bulk_get() except where there is no clock
+ * producer. In this case, instead of returning -ENOENT, the function returns
+ * NULL for given clk. It is assumed all clocks in clk_bulk_data are optional.
+ *
+ * Returns 0 if all clocks specified in clk_bulk_data table are obtained
+ * successfully or for any clk there was no clk provider available, otherwise
+ * returns valid IS_ERR() condition containing errno.
+ * The implementation uses @dev and @clk_bulk_data.id to determine the
+ * clock consumer, and thereby the clock producer.
+ * The clock returned is stored in each @clk_bulk_data.clk field.
+ *
+ * Drivers must assume that the clock source is not enabled.
+ *
+ * clk_bulk_get should not be called from within interrupt context.
+ */
+int __must_check devm_clk_bulk_get_optional(struct device *dev, int num_clks,
+ struct clk_bulk_data *clks);
+/**
+ * devm_clk_bulk_get_all - managed get multiple clk consumers
+ * @dev: device for clock "consumer"
+ * @clks: pointer to the clk_bulk_data table of consumer
+ *
+ * Returns a positive value for the number of clocks obtained while the
+ * clock references are stored in the clk_bulk_data table in @clks field.
+ * Returns 0 if there're none and a negative value if something failed.
+ *
+ * This helper function allows drivers to get several clk
+ * consumers in one operation with management, the clks will
+ * automatically be freed when the device is unbound.
+ */
+
+int __must_check devm_clk_bulk_get_all(struct device *dev,
+ struct clk_bulk_data **clks);
/**
* devm_clk_get - lookup and obtain a managed reference to a clock producer.
* @dev: device for clock "consumer"
* @id: clock consumer ID
*
- * Returns a struct clk corresponding to the clock producer, or
+ * Context: May sleep.
+ *
+ * Return: a struct clk corresponding to the clock producer, or
* valid IS_ERR() condition containing errno. The implementation
* uses @dev and @id to determine the clock consumer, and thereby
* the clock producer. (IOW, @id may be identical strings, but
* clk_get may return different clock producers depending on @dev.)
*
- * Drivers must assume that the clock source is not enabled.
- *
- * devm_clk_get should not be called from within interrupt context.
+ * Drivers must assume that the clock source is neither prepared nor
+ * enabled.
*
* The clock will automatically be freed when the device is unbound
* from the bus.
@@ -316,6 +460,114 @@ int __must_check devm_clk_bulk_get(struct device *dev, int num_clks,
struct clk *devm_clk_get(struct device *dev, const char *id);
/**
+ * devm_clk_get_prepared - devm_clk_get() + clk_prepare()
+ * @dev: device for clock "consumer"
+ * @id: clock consumer ID
+ *
+ * Context: May sleep.
+ *
+ * Return: a struct clk corresponding to the clock producer, or
+ * valid IS_ERR() condition containing errno. The implementation
+ * uses @dev and @id to determine the clock consumer, and thereby
+ * the clock producer. (IOW, @id may be identical strings, but
+ * clk_get may return different clock producers depending on @dev.)
+ *
+ * The returned clk (if valid) is prepared. Drivers must however assume
+ * that the clock is not enabled.
+ *
+ * The clock will automatically be unprepared and freed when the device
+ * is unbound from the bus.
+ */
+struct clk *devm_clk_get_prepared(struct device *dev, const char *id);
+
+/**
+ * devm_clk_get_enabled - devm_clk_get() + clk_prepare_enable()
+ * @dev: device for clock "consumer"
+ * @id: clock consumer ID
+ *
+ * Context: May sleep.
+ *
+ * Return: a struct clk corresponding to the clock producer, or
+ * valid IS_ERR() condition containing errno. The implementation
+ * uses @dev and @id to determine the clock consumer, and thereby
+ * the clock producer. (IOW, @id may be identical strings, but
+ * clk_get may return different clock producers depending on @dev.)
+ *
+ * The returned clk (if valid) is prepared and enabled.
+ *
+ * The clock will automatically be disabled, unprepared and freed
+ * when the device is unbound from the bus.
+ */
+struct clk *devm_clk_get_enabled(struct device *dev, const char *id);
+
+/**
+ * devm_clk_get_optional - lookup and obtain a managed reference to an optional
+ * clock producer.
+ * @dev: device for clock "consumer"
+ * @id: clock consumer ID
+ *
+ * Context: May sleep.
+ *
+ * Return: a struct clk corresponding to the clock producer, or
+ * valid IS_ERR() condition containing errno. The implementation
+ * uses @dev and @id to determine the clock consumer, and thereby
+ * the clock producer. If no such clk is found, it returns NULL
+ * which serves as a dummy clk. That's the only difference compared
+ * to devm_clk_get().
+ *
+ * Drivers must assume that the clock source is neither prepared nor
+ * enabled.
+ *
+ * The clock will automatically be freed when the device is unbound
+ * from the bus.
+ */
+struct clk *devm_clk_get_optional(struct device *dev, const char *id);
+
+/**
+ * devm_clk_get_optional_prepared - devm_clk_get_optional() + clk_prepare()
+ * @dev: device for clock "consumer"
+ * @id: clock consumer ID
+ *
+ * Context: May sleep.
+ *
+ * Return: a struct clk corresponding to the clock producer, or
+ * valid IS_ERR() condition containing errno. The implementation
+ * uses @dev and @id to determine the clock consumer, and thereby
+ * the clock producer. If no such clk is found, it returns NULL
+ * which serves as a dummy clk. That's the only difference compared
+ * to devm_clk_get_prepared().
+ *
+ * The returned clk (if valid) is prepared. Drivers must however
+ * assume that the clock is not enabled.
+ *
+ * The clock will automatically be unprepared and freed when the
+ * device is unbound from the bus.
+ */
+struct clk *devm_clk_get_optional_prepared(struct device *dev, const char *id);
+
+/**
+ * devm_clk_get_optional_enabled - devm_clk_get_optional() +
+ * clk_prepare_enable()
+ * @dev: device for clock "consumer"
+ * @id: clock consumer ID
+ *
+ * Context: May sleep.
+ *
+ * Return: a struct clk corresponding to the clock producer, or
+ * valid IS_ERR() condition containing errno. The implementation
+ * uses @dev and @id to determine the clock consumer, and thereby
+ * the clock producer. If no such clk is found, it returns NULL
+ * which serves as a dummy clk. That's the only difference compared
+ * to devm_clk_get_enabled().
+ *
+ * The returned clk (if valid) is prepared and enabled.
+ *
+ * The clock will automatically be disabled, unprepared and freed
+ * when the device is unbound from the bus.
+ */
+struct clk *devm_clk_get_optional_enabled(struct device *dev, const char *id);
+
+/**
* devm_get_clk_from_child - lookup and obtain a managed reference to a
* clock producer from child node.
* @dev: device for clock "consumer"
@@ -331,6 +583,38 @@ struct clk *devm_clk_get(struct device *dev, const char *id);
*/
struct clk *devm_get_clk_from_child(struct device *dev,
struct device_node *np, const char *con_id);
+/**
+ * clk_rate_exclusive_get - get exclusivity over the rate control of a
+ * producer
+ * @clk: clock source
+ *
+ * This function allows drivers to get exclusive control over the rate of a
+ * provider. It prevents any other consumer to execute, even indirectly,
+ * opereation which could alter the rate of the provider or cause glitches
+ *
+ * If exlusivity is claimed more than once on clock, even by the same driver,
+ * the rate effectively gets locked as exclusivity can't be preempted.
+ *
+ * Must not be called from within atomic context.
+ *
+ * Returns success (0) or negative errno.
+ */
+int clk_rate_exclusive_get(struct clk *clk);
+
+/**
+ * clk_rate_exclusive_put - release exclusivity over the rate control of a
+ * producer
+ * @clk: clock source
+ *
+ * This function allows drivers to release the exclusivity it previously got
+ * from clk_rate_exclusive_get()
+ *
+ * The caller must balance the number of clk_rate_exclusive_get() and
+ * clk_rate_exclusive_put() calls.
+ *
+ * Must not be called from within atomic context.
+ */
+void clk_rate_exclusive_put(struct clk *clk);
/**
* clk_enable - inform the system when the clock source should be running.
@@ -423,6 +707,19 @@ void clk_put(struct clk *clk);
void clk_bulk_put(int num_clks, struct clk_bulk_data *clks);
/**
+ * clk_bulk_put_all - "free" all the clock source
+ * @num_clks: the number of clk_bulk_data
+ * @clks: the clk_bulk_data table of consumer
+ *
+ * Note: drivers must ensure that all clk_bulk_enable calls made on this
+ * clock source are balanced by clk_bulk_disable calls prior to calling
+ * this function.
+ *
+ * clk_bulk_put_all should not be called from within interrupt context.
+ */
+void clk_bulk_put_all(int num_clks, struct clk_bulk_data *clks);
+
+/**
* devm_clk_put - "free" a managed clock source
* @dev: device used to acquire the clock
* @clk: clock source acquired with devm_clk_get()
@@ -468,11 +765,31 @@ long clk_round_rate(struct clk *clk, unsigned long rate);
* @clk: clock source
* @rate: desired clock rate in Hz
*
+ * Updating the rate starts at the top-most affected clock and then
+ * walks the tree down to the bottom-most clock that needs updating.
+ *
* Returns success (0) or negative errno.
*/
int clk_set_rate(struct clk *clk, unsigned long rate);
/**
+ * clk_set_rate_exclusive- set the clock rate and claim exclusivity over
+ * clock source
+ * @clk: clock source
+ * @rate: desired clock rate in Hz
+ *
+ * This helper function allows drivers to atomically set the rate of a producer
+ * and claim exclusivity over the rate control of the producer.
+ *
+ * It is essentially a combination of clk_set_rate() and
+ * clk_rate_exclusite_get(). Caller must balance this call with a call to
+ * clk_rate_exclusive_put()
+ *
+ * Returns success (0) or negative errno.
+ */
+int clk_set_rate_exclusive(struct clk *clk, unsigned long rate);
+
+/**
* clk_has_parent - check if a clock is a possible parent for another
* @clk: clock source
* @parent: parent clock source
@@ -482,7 +799,7 @@ int clk_set_rate(struct clk *clk, unsigned long rate);
*
* Returns true if @parent is a possible parent for @clk, false otherwise.
*/
-bool clk_has_parent(struct clk *clk, struct clk *parent);
+bool clk_has_parent(const struct clk *clk, const struct clk *parent);
/**
* clk_set_rate_range - set a rate range for a clock source
@@ -547,6 +864,23 @@ struct clk *clk_get_parent(struct clk *clk);
*/
struct clk *clk_get_sys(const char *dev_id, const char *con_id);
+/**
+ * clk_save_context - save clock context for poweroff
+ *
+ * Saves the context of the clock register for powerstates in which the
+ * contents of the registers will be lost. Occurs deep within the suspend
+ * code so locking is not necessary.
+ */
+int clk_save_context(void);
+
+/**
+ * clk_restore_context - restore clock context after poweroff
+ *
+ * This occurs with all clocks enabled. Occurs deep within the resume code
+ * so locking is not necessary.
+ */
+void clk_restore_context(void);
+
#else /* !CONFIG_HAVE_CLK */
static inline struct clk *clk_get(struct device *dev, const char *id)
@@ -554,8 +888,20 @@ static inline struct clk *clk_get(struct device *dev, const char *id)
return NULL;
}
-static inline int clk_bulk_get(struct device *dev, int num_clks,
- struct clk_bulk_data *clks)
+static inline int __must_check clk_bulk_get(struct device *dev, int num_clks,
+ struct clk_bulk_data *clks)
+{
+ return 0;
+}
+
+static inline int __must_check clk_bulk_get_optional(struct device *dev,
+ int num_clks, struct clk_bulk_data *clks)
+{
+ return 0;
+}
+
+static inline int __must_check clk_bulk_get_all(struct device *dev,
+ struct clk_bulk_data **clks)
{
return 0;
}
@@ -565,12 +911,55 @@ static inline struct clk *devm_clk_get(struct device *dev, const char *id)
return NULL;
}
-static inline int devm_clk_bulk_get(struct device *dev, int num_clks,
- struct clk_bulk_data *clks)
+static inline struct clk *devm_clk_get_prepared(struct device *dev,
+ const char *id)
+{
+ return NULL;
+}
+
+static inline struct clk *devm_clk_get_enabled(struct device *dev,
+ const char *id)
+{
+ return NULL;
+}
+
+static inline struct clk *devm_clk_get_optional(struct device *dev,
+ const char *id)
+{
+ return NULL;
+}
+
+static inline struct clk *devm_clk_get_optional_prepared(struct device *dev,
+ const char *id)
+{
+ return NULL;
+}
+
+static inline struct clk *devm_clk_get_optional_enabled(struct device *dev,
+ const char *id)
+{
+ return NULL;
+}
+
+static inline int __must_check devm_clk_bulk_get(struct device *dev, int num_clks,
+ struct clk_bulk_data *clks)
{
return 0;
}
+static inline int __must_check devm_clk_bulk_get_optional(struct device *dev,
+ int num_clks, struct clk_bulk_data *clks)
+{
+ return 0;
+}
+
+static inline int __must_check devm_clk_bulk_get_all(struct device *dev,
+ struct clk_bulk_data **clks)
+{
+
+ return 0;
+}
+
static inline struct clk *devm_get_clk_from_child(struct device *dev,
struct device_node *np, const char *con_id)
{
@@ -581,14 +970,25 @@ static inline void clk_put(struct clk *clk) {}
static inline void clk_bulk_put(int num_clks, struct clk_bulk_data *clks) {}
+static inline void clk_bulk_put_all(int num_clks, struct clk_bulk_data *clks) {}
+
static inline void devm_clk_put(struct device *dev, struct clk *clk) {}
+
+static inline int clk_rate_exclusive_get(struct clk *clk)
+{
+ return 0;
+}
+
+static inline void clk_rate_exclusive_put(struct clk *clk) {}
+
static inline int clk_enable(struct clk *clk)
{
return 0;
}
-static inline int clk_bulk_enable(int num_clks, struct clk_bulk_data *clks)
+static inline int __must_check clk_bulk_enable(int num_clks,
+ const struct clk_bulk_data *clks)
{
return 0;
}
@@ -597,7 +997,7 @@ static inline void clk_disable(struct clk *clk) {}
static inline void clk_bulk_disable(int num_clks,
- struct clk_bulk_data *clks) {}
+ const struct clk_bulk_data *clks) {}
static inline unsigned long clk_get_rate(struct clk *clk)
{
@@ -609,6 +1009,11 @@ static inline int clk_set_rate(struct clk *clk, unsigned long rate)
return 0;
}
+static inline int clk_set_rate_exclusive(struct clk *clk, unsigned long rate)
+{
+ return 0;
+}
+
static inline long clk_round_rate(struct clk *clk, unsigned long rate)
{
return 0;
@@ -619,6 +1024,22 @@ static inline bool clk_has_parent(struct clk *clk, struct clk *parent)
return true;
}
+static inline int clk_set_rate_range(struct clk *clk, unsigned long min,
+ unsigned long max)
+{
+ return 0;
+}
+
+static inline int clk_set_min_rate(struct clk *clk, unsigned long rate)
+{
+ return 0;
+}
+
+static inline int clk_set_max_rate(struct clk *clk, unsigned long rate)
+{
+ return 0;
+}
+
static inline int clk_set_parent(struct clk *clk, struct clk *parent)
{
return 0;
@@ -633,6 +1054,14 @@ static inline struct clk *clk_get_sys(const char *dev_id, const char *con_id)
{
return NULL;
}
+
+static inline int clk_save_context(void)
+{
+ return 0;
+}
+
+static inline void clk_restore_context(void) {}
+
#endif
/* clk_prepare_enable helps cases using clk_enable in non-atomic context. */
@@ -657,8 +1086,8 @@ static inline void clk_disable_unprepare(struct clk *clk)
clk_unprepare(clk);
}
-static inline int clk_bulk_prepare_enable(int num_clks,
- struct clk_bulk_data *clks)
+static inline int __must_check
+clk_bulk_prepare_enable(int num_clks, const struct clk_bulk_data *clks)
{
int ret;
@@ -673,12 +1102,42 @@ static inline int clk_bulk_prepare_enable(int num_clks,
}
static inline void clk_bulk_disable_unprepare(int num_clks,
- struct clk_bulk_data *clks)
+ const struct clk_bulk_data *clks)
{
clk_bulk_disable(num_clks, clks);
clk_bulk_unprepare(num_clks, clks);
}
+/**
+ * clk_drop_range - Reset any range set on that clock
+ * @clk: clock source
+ *
+ * Returns success (0) or negative errno.
+ */
+static inline int clk_drop_range(struct clk *clk)
+{
+ return clk_set_rate_range(clk, 0, ULONG_MAX);
+}
+
+/**
+ * clk_get_optional - lookup and obtain a reference to an optional clock
+ * producer.
+ * @dev: device for clock "consumer"
+ * @id: clock consumer ID
+ *
+ * Behaves the same as clk_get() except where there is no clock producer. In
+ * this case, instead of returning -ENOENT, the function returns NULL.
+ */
+static inline struct clk *clk_get_optional(struct device *dev, const char *id)
+{
+ struct clk *clk = clk_get(dev, id);
+
+ if (clk == ERR_PTR(-ENOENT))
+ return NULL;
+
+ return clk;
+}
+
#if defined(CONFIG_OF) && defined(CONFIG_COMMON_CLK)
struct clk *of_clk_get(struct device_node *np, int index);
struct clk *of_clk_get_by_name(struct device_node *np, const char *name);
diff --git a/include/linux/clk/analogbits-wrpll-cln28hpc.h b/include/linux/clk/analogbits-wrpll-cln28hpc.h
new file mode 100644
index 000000000000..03279097e138
--- /dev/null
+++ b/include/linux/clk/analogbits-wrpll-cln28hpc.h
@@ -0,0 +1,79 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2018-2019 SiFive, Inc.
+ * Wesley Terpstra
+ * Paul Walmsley
+ */
+
+#ifndef __LINUX_CLK_ANALOGBITS_WRPLL_CLN28HPC_H
+#define __LINUX_CLK_ANALOGBITS_WRPLL_CLN28HPC_H
+
+#include <linux/types.h>
+
+/* DIVQ_VALUES: number of valid DIVQ values */
+#define DIVQ_VALUES 6
+
+/*
+ * Bit definitions for struct wrpll_cfg.flags
+ *
+ * WRPLL_FLAGS_BYPASS_FLAG: if set, the PLL is either in bypass, or should be
+ * programmed to enter bypass
+ * WRPLL_FLAGS_RESET_FLAG: if set, the PLL is in reset
+ * WRPLL_FLAGS_INT_FEEDBACK_FLAG: if set, the PLL is configured for internal
+ * feedback mode
+ * WRPLL_FLAGS_EXT_FEEDBACK_FLAG: if set, the PLL is configured for external
+ * feedback mode (not yet supported by this driver)
+ */
+#define WRPLL_FLAGS_BYPASS_SHIFT 0
+#define WRPLL_FLAGS_BYPASS_MASK BIT(WRPLL_FLAGS_BYPASS_SHIFT)
+#define WRPLL_FLAGS_RESET_SHIFT 1
+#define WRPLL_FLAGS_RESET_MASK BIT(WRPLL_FLAGS_RESET_SHIFT)
+#define WRPLL_FLAGS_INT_FEEDBACK_SHIFT 2
+#define WRPLL_FLAGS_INT_FEEDBACK_MASK BIT(WRPLL_FLAGS_INT_FEEDBACK_SHIFT)
+#define WRPLL_FLAGS_EXT_FEEDBACK_SHIFT 3
+#define WRPLL_FLAGS_EXT_FEEDBACK_MASK BIT(WRPLL_FLAGS_EXT_FEEDBACK_SHIFT)
+
+/**
+ * struct wrpll_cfg - WRPLL configuration values
+ * @divr: reference divider value (6 bits), as presented to the PLL signals
+ * @divf: feedback divider value (9 bits), as presented to the PLL signals
+ * @divq: output divider value (3 bits), as presented to the PLL signals
+ * @flags: PLL configuration flags. See above for more information
+ * @range: PLL loop filter range. See below for more information
+ * @output_rate_cache: cached output rates, swept across DIVQ
+ * @parent_rate: PLL refclk rate for which values are valid
+ * @max_r: maximum possible R divider value, given @parent_rate
+ * @init_r: initial R divider value to start the search from
+ *
+ * @divr, @divq, @divq, @range represent what the PLL expects to see
+ * on its input signals. Thus @divr and @divf are the actual divisors
+ * minus one. @divq is a power-of-two divider; for example, 1 =
+ * divide-by-2 and 6 = divide-by-64. 0 is an invalid @divq value.
+ *
+ * When initially passing a struct wrpll_cfg record, the
+ * record should be zero-initialized with the exception of the @flags
+ * field. The only flag bits that need to be set are either
+ * WRPLL_FLAGS_INT_FEEDBACK or WRPLL_FLAGS_EXT_FEEDBACK.
+ */
+struct wrpll_cfg {
+ u8 divr;
+ u8 divq;
+ u8 range;
+ u8 flags;
+ u16 divf;
+/* private: */
+ u32 output_rate_cache[DIVQ_VALUES];
+ unsigned long parent_rate;
+ u8 max_r;
+ u8 init_r;
+};
+
+int wrpll_configure_for_rate(struct wrpll_cfg *c, u32 target_rate,
+ unsigned long parent_rate);
+
+unsigned int wrpll_calc_max_lock_us(const struct wrpll_cfg *c);
+
+unsigned long wrpll_calc_output_rate(const struct wrpll_cfg *c,
+ unsigned long parent_rate);
+
+#endif /* __LINUX_CLK_ANALOGBITS_WRPLL_CLN28HPC_H */
diff --git a/include/linux/clk/at91_pmc.h b/include/linux/clk/at91_pmc.h
index 17f413bbbedf..7af499bdbecb 100644
--- a/include/linux/clk/at91_pmc.h
+++ b/include/linux/clk/at91_pmc.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* include/linux/clk/at91_pmc.h
*
@@ -6,16 +7,16 @@
*
* Power Management Controller (PMC) - System peripherals registers.
* Based on AT91RM9200 datasheet revision E.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
#ifndef AT91_PMC_H
#define AT91_PMC_H
+#include <linux/bits.h>
+
+#define AT91_PMC_V1 (1) /* PMC version 1 */
+#define AT91_PMC_V2 (2) /* PMC version 2 [SAM9X60] */
+
#define AT91_PMC_SCER 0x00 /* System Clock Enable Register */
#define AT91_PMC_SCDR 0x04 /* System Clock Disable Register */
@@ -34,21 +35,42 @@
#define AT91_PMC_HCK0 (1 << 16) /* AHB Clock (USB host) [AT91SAM9261 only] */
#define AT91_PMC_HCK1 (1 << 17) /* AHB Clock (LCD) [AT91SAM9261 only] */
+#define AT91_PMC_PLL_CTRL0 0x0C /* PLL Control Register 0 [for SAM9X60] */
+#define AT91_PMC_PLL_CTRL0_ENPLL (1 << 28) /* Enable PLL */
+#define AT91_PMC_PLL_CTRL0_ENPLLCK (1 << 29) /* Enable PLL clock for PMC */
+#define AT91_PMC_PLL_CTRL0_ENLOCK (1 << 31) /* Enable PLL lock */
+
+#define AT91_PMC_PLL_CTRL1 0x10 /* PLL Control Register 1 [for SAM9X60] */
+
#define AT91_PMC_PCER 0x10 /* Peripheral Clock Enable Register */
#define AT91_PMC_PCDR 0x14 /* Peripheral Clock Disable Register */
#define AT91_PMC_PCSR 0x18 /* Peripheral Clock Status Register */
+#define AT91_PMC_PLL_ACR 0x18 /* PLL Analog Control Register [for SAM9X60] */
+#define AT91_PMC_PLL_ACR_DEFAULT_UPLL UL(0x12020010) /* Default PLL ACR value for UPLL */
+#define AT91_PMC_PLL_ACR_DEFAULT_PLLA UL(0x00020010) /* Default PLL ACR value for PLLA */
+#define AT91_PMC_PLL_ACR_UTMIVR (1 << 12) /* UPLL Voltage regulator Control */
+#define AT91_PMC_PLL_ACR_UTMIBG (1 << 13) /* UPLL Bandgap Control */
+
#define AT91_CKGR_UCKR 0x1C /* UTMI Clock Register [some SAM9] */
#define AT91_PMC_UPLLEN (1 << 16) /* UTMI PLL Enable */
#define AT91_PMC_UPLLCOUNT (0xf << 20) /* UTMI PLL Start-up Time */
#define AT91_PMC_BIASEN (1 << 24) /* UTMI BIAS Enable */
#define AT91_PMC_BIASCOUNT (0xf << 28) /* UTMI BIAS Start-up Time */
+#define AT91_PMC_PLL_UPDT 0x1C /* PMC PLL update register [for SAM9X60] */
+#define AT91_PMC_PLL_UPDT_UPDATE (1 << 8) /* Update PLL settings */
+#define AT91_PMC_PLL_UPDT_ID (1 << 0) /* PLL ID */
+#define AT91_PMC_PLL_UPDT_ID_MSK (0xf) /* PLL ID mask */
+#define AT91_PMC_PLL_UPDT_STUPTIM (0xff << 16) /* Startup time */
+
#define AT91_CKGR_MOR 0x20 /* Main Oscillator Register [not on SAM9RL] */
#define AT91_PMC_MOSCEN (1 << 0) /* Main Oscillator Enable */
#define AT91_PMC_OSCBYPASS (1 << 1) /* Oscillator Bypass */
+#define AT91_PMC_WAITMODE (1 << 2) /* Wait Mode Command */
#define AT91_PMC_MOSCRCEN (1 << 3) /* Main On-Chip RC Oscillator Enable [some SAM9] */
#define AT91_PMC_OSCOUNT (0xff << 8) /* Main Oscillator Start-up Time */
+#define AT91_PMC_KEY_MASK (0xff << 16)
#define AT91_PMC_KEY (0x37 << 16) /* MOR Writing Key */
#define AT91_PMC_MOSCSEL (1 << 24) /* Main Oscillator Selection [some SAM9] */
#define AT91_PMC_CFDEN (1 << 25) /* Clock Failure Detector Enable [some SAM9] */
@@ -58,6 +80,10 @@
#define AT91_PMC_MAINRDY (1 << 16) /* Main Clock Ready */
#define AT91_CKGR_PLLAR 0x28 /* PLL A Register */
+
+#define AT91_PMC_RATIO 0x2c /* Processor clock ratio register [SAMA7G5 only] */
+#define AT91_PMC_RATIO_RATIO (0xf) /* CPU clock ratio. */
+
#define AT91_CKGR_PLLBR 0x2c /* PLL B Register */
#define AT91_PMC_DIV (0xff << 0) /* Divider */
#define AT91_PMC_PLLCOUNT (0x3f << 8) /* PLL Counter */
@@ -72,6 +98,8 @@
#define AT91_PMC_USBDIV_4 (2 << 28)
#define AT91_PMC_USB96M (1 << 28) /* Divider by 2 Enable (PLLB only) */
+#define AT91_PMC_CPU_CKR 0x28 /* CPU Clock Register */
+
#define AT91_PMC_MCKR 0x30 /* Master Clock Register */
#define AT91_PMC_CSS (3 << 0) /* Master Clock Selection */
#define AT91_PMC_CSS_SLOW (0 << 0)
@@ -115,6 +143,34 @@
#define AT91_PMC_PLLADIV2_ON (1 << 12)
#define AT91_PMC_H32MXDIV BIT(24)
+#define AT91_PMC_MCR_V2 0x30 /* Master Clock Register [SAMA7G5 only] */
+#define AT91_PMC_MCR_V2_ID_MSK (0xF)
+#define AT91_PMC_MCR_V2_ID(_id) ((_id) & AT91_PMC_MCR_V2_ID_MSK)
+#define AT91_PMC_MCR_V2_CMD (1 << 7)
+#define AT91_PMC_MCR_V2_DIV (7 << 8)
+#define AT91_PMC_MCR_V2_DIV1 (0 << 8)
+#define AT91_PMC_MCR_V2_DIV2 (1 << 8)
+#define AT91_PMC_MCR_V2_DIV4 (2 << 8)
+#define AT91_PMC_MCR_V2_DIV8 (3 << 8)
+#define AT91_PMC_MCR_V2_DIV16 (4 << 8)
+#define AT91_PMC_MCR_V2_DIV32 (5 << 8)
+#define AT91_PMC_MCR_V2_DIV64 (6 << 8)
+#define AT91_PMC_MCR_V2_DIV3 (7 << 8)
+#define AT91_PMC_MCR_V2_CSS (0x1F << 16)
+#define AT91_PMC_MCR_V2_CSS_MD_SLCK (0 << 16)
+#define AT91_PMC_MCR_V2_CSS_TD_SLCK (1 << 16)
+#define AT91_PMC_MCR_V2_CSS_MAINCK (2 << 16)
+#define AT91_PMC_MCR_V2_CSS_MCK0 (3 << 16)
+#define AT91_PMC_MCR_V2_CSS_SYSPLL (5 << 16)
+#define AT91_PMC_MCR_V2_CSS_DDRPLL (6 << 16)
+#define AT91_PMC_MCR_V2_CSS_IMGPLL (7 << 16)
+#define AT91_PMC_MCR_V2_CSS_BAUDPLL (8 << 16)
+#define AT91_PMC_MCR_V2_CSS_AUDIOPLL (9 << 16)
+#define AT91_PMC_MCR_V2_CSS_ETHPLL (10 << 16)
+#define AT91_PMC_MCR_V2_EN (1 << 28)
+
+#define AT91_PMC_XTALF 0x34 /* Main XTAL Frequency Register [SAMA7G5 only] */
+
#define AT91_PMC_USB 0x38 /* USB Clock Register [some SAM9 only] */
#define AT91_PMC_USBS (0x1 << 0) /* USB OHCI Input clock selection */
#define AT91_PMC_USBS_PLLA (0 << 0)
@@ -153,8 +209,23 @@
#define AT91_PMC_MOSCRCS (1 << 17) /* Main On-Chip RC [some SAM9] */
#define AT91_PMC_CFDEV (1 << 18) /* Clock Failure Detector Event [some SAM9] */
#define AT91_PMC_GCKRDY (1 << 24) /* Generated Clocks */
+#define AT91_PMC_MCKXRDY (1 << 26) /* Master Clock x [x=1..4] Ready Status */
#define AT91_PMC_IMR 0x6c /* Interrupt Mask Register */
+#define AT91_PMC_FSMR 0x70 /* Fast Startup Mode Register */
+#define AT91_PMC_FSTT(n) BIT(n)
+#define AT91_PMC_RTTAL BIT(16)
+#define AT91_PMC_RTCAL BIT(17) /* RTC Alarm Enable */
+#define AT91_PMC_USBAL BIT(18) /* USB Resume Enable */
+#define AT91_PMC_SDMMC_CD BIT(19) /* SDMMC Card Detect Enable */
+#define AT91_PMC_LPM BIT(20) /* Low-power Mode */
+#define AT91_PMC_RXLP_MCE BIT(24) /* Backup UART Receive Enable */
+#define AT91_PMC_ACC_CE BIT(25) /* ACC Enable */
+
+#define AT91_PMC_FSPR 0x74 /* Fast Startup Polarity Reg */
+
+#define AT91_PMC_FS_INPUT_MASK 0x7ff
+
#define AT91_PMC_PLLICPR 0x80 /* PLL Charge Pump Current Register */
#define AT91_PMC_PROT 0xe4 /* Write Protect Mode Register [some SAM9] */
@@ -166,23 +237,42 @@
#define AT91_PMC_WPVS (0x1 << 0) /* Write Protect Violation Status */
#define AT91_PMC_WPVSRC (0xffff << 8) /* Write Protect Violation Source */
+#define AT91_PMC_PLL_ISR0 0xEC /* PLL Interrupt Status Register 0 [SAM9X60 only] */
+
#define AT91_PMC_PCER1 0x100 /* Peripheral Clock Enable Register 1 [SAMA5 only]*/
#define AT91_PMC_PCDR1 0x104 /* Peripheral Clock Enable Register 1 */
#define AT91_PMC_PCSR1 0x108 /* Peripheral Clock Enable Register 1 */
#define AT91_PMC_PCR 0x10c /* Peripheral Control Register [some SAM9 and SAMA5] */
#define AT91_PMC_PCR_PID_MASK 0x3f
-#define AT91_PMC_PCR_GCKCSS_OFFSET 8
-#define AT91_PMC_PCR_GCKCSS_MASK (0x7 << AT91_PMC_PCR_GCKCSS_OFFSET)
-#define AT91_PMC_PCR_GCKCSS(n) ((n) << AT91_PMC_PCR_GCKCSS_OFFSET) /* GCK Clock Source Selection */
#define AT91_PMC_PCR_CMD (0x1 << 12) /* Command (read=0, write=1) */
-#define AT91_PMC_PCR_DIV_OFFSET 16
-#define AT91_PMC_PCR_DIV_MASK (0x3 << AT91_PMC_PCR_DIV_OFFSET)
-#define AT91_PMC_PCR_DIV(n) ((n) << AT91_PMC_PCR_DIV_OFFSET) /* Divisor Value */
-#define AT91_PMC_PCR_GCKDIV_OFFSET 20
-#define AT91_PMC_PCR_GCKDIV_MASK (0xff << AT91_PMC_PCR_GCKDIV_OFFSET)
-#define AT91_PMC_PCR_GCKDIV(n) ((n) << AT91_PMC_PCR_GCKDIV_OFFSET) /* Generated Clock Divisor Value */
+#define AT91_PMC_PCR_GCKDIV_MASK GENMASK(27, 20)
#define AT91_PMC_PCR_EN (0x1 << 28) /* Enable */
#define AT91_PMC_PCR_GCKEN (0x1 << 29) /* GCK Enable */
+#define AT91_PMC_AUDIO_PLL0 0x14c
+#define AT91_PMC_AUDIO_PLL_PLLEN (1 << 0)
+#define AT91_PMC_AUDIO_PLL_PADEN (1 << 1)
+#define AT91_PMC_AUDIO_PLL_PMCEN (1 << 2)
+#define AT91_PMC_AUDIO_PLL_RESETN (1 << 3)
+#define AT91_PMC_AUDIO_PLL_ND_OFFSET 8
+#define AT91_PMC_AUDIO_PLL_ND_MASK (0x7f << AT91_PMC_AUDIO_PLL_ND_OFFSET)
+#define AT91_PMC_AUDIO_PLL_ND(n) ((n) << AT91_PMC_AUDIO_PLL_ND_OFFSET)
+#define AT91_PMC_AUDIO_PLL_QDPMC_OFFSET 16
+#define AT91_PMC_AUDIO_PLL_QDPMC_MASK (0x7f << AT91_PMC_AUDIO_PLL_QDPMC_OFFSET)
+#define AT91_PMC_AUDIO_PLL_QDPMC(n) ((n) << AT91_PMC_AUDIO_PLL_QDPMC_OFFSET)
+
+#define AT91_PMC_AUDIO_PLL1 0x150
+#define AT91_PMC_AUDIO_PLL_FRACR_MASK 0x3fffff
+#define AT91_PMC_AUDIO_PLL_QDPAD_OFFSET 24
+#define AT91_PMC_AUDIO_PLL_QDPAD_MASK (0x7f << AT91_PMC_AUDIO_PLL_QDPAD_OFFSET)
+#define AT91_PMC_AUDIO_PLL_QDPAD(n) ((n) << AT91_PMC_AUDIO_PLL_QDPAD_OFFSET)
+#define AT91_PMC_AUDIO_PLL_QDPAD_DIV_OFFSET AT91_PMC_AUDIO_PLL_QDPAD_OFFSET
+#define AT91_PMC_AUDIO_PLL_QDPAD_DIV_MASK (0x3 << AT91_PMC_AUDIO_PLL_QDPAD_DIV_OFFSET)
+#define AT91_PMC_AUDIO_PLL_QDPAD_DIV(n) ((n) << AT91_PMC_AUDIO_PLL_QDPAD_DIV_OFFSET)
+#define AT91_PMC_AUDIO_PLL_QDPAD_EXTDIV_OFFSET 26
+#define AT91_PMC_AUDIO_PLL_QDPAD_EXTDIV_MAX 0x1f
+#define AT91_PMC_AUDIO_PLL_QDPAD_EXTDIV_MASK (AT91_PMC_AUDIO_PLL_QDPAD_EXTDIV_MAX << AT91_PMC_AUDIO_PLL_QDPAD_EXTDIV_OFFSET)
+#define AT91_PMC_AUDIO_PLL_QDPAD_EXTDIV(n) ((n) << AT91_PMC_AUDIO_PLL_QDPAD_EXTDIV_OFFSET)
+
#endif
diff --git a/include/linux/clk/bcm2835.h b/include/linux/clk/bcm2835.h
deleted file mode 100644
index aa937f6c17da..000000000000
--- a/include/linux/clk/bcm2835.h
+++ /dev/null
@@ -1,24 +0,0 @@
-/*
- * Copyright (C) 2010 Broadcom
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-
-#ifndef __LINUX_CLK_BCM2835_H_
-#define __LINUX_CLK_BCM2835_H_
-
-void __init bcm2835_init_clocks(void);
-
-#endif
diff --git a/include/linux/clk/clk-conf.h b/include/linux/clk/clk-conf.h
index e0c362363c38..eae9652c70cd 100644
--- a/include/linux/clk/clk-conf.h
+++ b/include/linux/clk/clk-conf.h
@@ -1,12 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2014 Samsung Electronics Co., Ltd.
* Sylwester Nawrocki <s.nawrocki@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
+#ifndef __CLK_CONF_H
+#define __CLK_CONF_H
+
#include <linux/types.h>
struct device_node;
@@ -20,3 +20,5 @@ static inline int of_clk_set_defaults(struct device_node *node,
return 0;
}
#endif
+
+#endif /* __CLK_CONF_H */
diff --git a/include/linux/clk/davinci.h b/include/linux/clk/davinci.h
new file mode 100644
index 000000000000..e1d37451e03f
--- /dev/null
+++ b/include/linux/clk/davinci.h
@@ -0,0 +1,23 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Clock drivers for TI DaVinci PLL and PSC controllers
+ *
+ * Copyright (C) 2018 David Lechner <david@lechnology.com>
+ */
+
+#ifndef __LINUX_CLK_DAVINCI_PLL_H___
+#define __LINUX_CLK_DAVINCI_PLL_H___
+
+#include <linux/device.h>
+#include <linux/regmap.h>
+
+/* function for registering clocks in early boot */
+
+#ifdef CONFIG_ARCH_DAVINCI_DA830
+int da830_pll_init(struct device *dev, void __iomem *base, struct regmap *cfgchip);
+#endif
+#ifdef CONFIG_ARCH_DAVINCI_DA850
+int da850_pll0_init(struct device *dev, void __iomem *base, struct regmap *cfgchip);
+#endif
+
+#endif /* __LINUX_CLK_DAVINCI_PLL_H___ */
diff --git a/include/linux/clk/imx.h b/include/linux/clk/imx.h
new file mode 100644
index 000000000000..75a0d9696552
--- /dev/null
+++ b/include/linux/clk/imx.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2020 Freescale Semiconductor, Inc.
+ *
+ * Author: Lee Jones <lee.jones@linaro.org>
+ */
+
+#ifndef __LINUX_CLK_IMX_H
+#define __LINUX_CLK_IMX_H
+
+#include <linux/types.h>
+
+void imx6sl_set_wait_clk(bool enter);
+
+#endif
diff --git a/include/linux/clk/mmp.h b/include/linux/clk/mmp.h
index 607321fa2c2b..445130460380 100644
--- a/include/linux/clk/mmp.h
+++ b/include/linux/clk/mmp.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __CLK_MMP_H
#define __CLK_MMP_H
diff --git a/include/linux/clk/mxs.h b/include/linux/clk/mxs.h
index 5138a90e018c..2674e607ffb1 100644
--- a/include/linux/clk/mxs.h
+++ b/include/linux/clk/mxs.h
@@ -1,9 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2013 Freescale Semiconductor, Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef __LINUX_CLK_MXS_H
diff --git a/include/linux/clk/pxa.h b/include/linux/clk/pxa.h
new file mode 100644
index 000000000000..736b8bb91bd7
--- /dev/null
+++ b/include/linux/clk/pxa.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#include <linux/compiler.h>
+#include <linux/types.h>
+
+extern int pxa25x_clocks_init(void __iomem *regs);
+extern int pxa27x_clocks_init(void __iomem *regs);
+extern int pxa3xx_clocks_init(void __iomem *regs, void __iomem *oscc_reg);
+
+#ifdef CONFIG_PXA3xx
+extern unsigned pxa3xx_get_clk_frequency_khz(int);
+extern void pxa3xx_clk_update_accr(u32 disable, u32 enable, u32 xclkcfg, u32 mask);
+#else
+#define pxa3xx_get_clk_frequency_khz(x) (0)
+#define pxa3xx_clk_update_accr(disable, enable, xclkcfg, mask) do { } while (0)
+#endif
diff --git a/include/linux/clk/renesas.h b/include/linux/clk/renesas.h
index 9ebf1f8243bb..0ebbe2f0b45e 100644
--- a/include/linux/clk/renesas.h
+++ b/include/linux/clk/renesas.h
@@ -1,14 +1,10 @@
-/*
+/* SPDX-License-Identifier: GPL-2.0+
+ *
* Copyright 2013 Ideas On Board SPRL
* Copyright 2013, 2014 Horms Solutions Ltd.
*
* Contact: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
* Contact: Simon Horman <horms@verge.net.au>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
#ifndef __LINUX_CLK_RENESAS_H_
diff --git a/include/linux/clk/samsung.h b/include/linux/clk/samsung.h
new file mode 100644
index 000000000000..0cf7aac83439
--- /dev/null
+++ b/include/linux/clk/samsung.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2020 Krzysztof Kozlowski <krzk@kernel.org>
+ */
+
+#ifndef __LINUX_CLK_SAMSUNG_H_
+#define __LINUX_CLK_SAMSUNG_H_
+
+#include <linux/compiler_types.h>
+
+struct device_node;
+
+#ifdef CONFIG_S3C64XX_COMMON_CLK
+void s3c64xx_clk_init(struct device_node *np, unsigned long xtal_f,
+ unsigned long xusbxti_f, bool s3c6400,
+ void __iomem *base);
+#else
+static inline void s3c64xx_clk_init(struct device_node *np,
+ unsigned long xtal_f,
+ unsigned long xusbxti_f,
+ bool s3c6400, void __iomem *base) { }
+#endif /* CONFIG_S3C64XX_COMMON_CLK */
+
+#endif /* __LINUX_CLK_SAMSUNG_H_ */
diff --git a/include/linux/clk/spear.h b/include/linux/clk/spear.h
new file mode 100644
index 000000000000..eaf95ca656f8
--- /dev/null
+++ b/include/linux/clk/spear.h
@@ -0,0 +1,37 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (C) 2020 STMicroelectronics - All Rights Reserved
+ *
+ * Author: Lee Jones <lee.jones@linaro.org>
+ */
+
+#ifndef __LINUX_CLK_SPEAR_H
+#define __LINUX_CLK_SPEAR_H
+
+#ifdef CONFIG_ARCH_SPEAR3XX
+void __init spear3xx_clk_init(void __iomem *misc_base,
+ void __iomem *soc_config_base);
+#else
+static inline void __init spear3xx_clk_init(void __iomem *misc_base,
+ void __iomem *soc_config_base) {}
+#endif
+
+#ifdef CONFIG_ARCH_SPEAR6XX
+void __init spear6xx_clk_init(void __iomem *misc_base);
+#else
+static inline void __init spear6xx_clk_init(void __iomem *misc_base) {}
+#endif
+
+#ifdef CONFIG_MACH_SPEAR1310
+void __init spear1310_clk_init(void __iomem *misc_base, void __iomem *ras_base);
+#else
+static inline void spear1310_clk_init(void __iomem *misc_base, void __iomem *ras_base) {}
+#endif
+
+#ifdef CONFIG_MACH_SPEAR1340
+void __init spear1340_clk_init(void __iomem *misc_base);
+#else
+static inline void spear1340_clk_init(void __iomem *misc_base) {}
+#endif
+
+#endif
diff --git a/include/linux/clk/sunxi-ng.h b/include/linux/clk/sunxi-ng.h
new file mode 100644
index 000000000000..57c8ec44ab4e
--- /dev/null
+++ b/include/linux/clk/sunxi-ng.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2017 Chen-Yu Tsai. All rights reserved.
+ */
+
+#ifndef _LINUX_CLK_SUNXI_NG_H_
+#define _LINUX_CLK_SUNXI_NG_H_
+
+int sunxi_ccu_set_mmc_timing_mode(struct clk *clk, bool new_mode);
+int sunxi_ccu_get_mmc_timing_mode(struct clk *clk);
+
+int sun6i_rtc_ccu_probe(struct device *dev, void __iomem *reg);
+
+#endif
diff --git a/include/linux/clk/tegra.h b/include/linux/clk/tegra.h
index d23c9cf26993..3650e926e93f 100644
--- a/include/linux/clk/tegra.h
+++ b/include/linux/clk/tegra.h
@@ -1,17 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * Copyright (c) 2012, NVIDIA CORPORATION. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ * Copyright (c) 2012-2020, NVIDIA CORPORATION. All rights reserved.
*/
#ifndef __LINUX_CLK_TEGRA_H_
@@ -53,6 +42,7 @@ struct tegra_cpu_car_ops {
#endif
};
+#ifdef CONFIG_ARCH_TEGRA
extern struct tegra_cpu_car_ops *tegra_cpu_car_ops;
static inline void tegra_wait_cpu_in_reset(u32 cpu)
@@ -94,8 +84,29 @@ static inline void tegra_disable_cpu_clock(u32 cpu)
tegra_cpu_car_ops->disable_clock(cpu);
}
+#else
+static inline void tegra_wait_cpu_in_reset(u32 cpu)
+{
+}
-#ifdef CONFIG_PM_SLEEP
+static inline void tegra_put_cpu_in_reset(u32 cpu)
+{
+}
+
+static inline void tegra_cpu_out_of_reset(u32 cpu)
+{
+}
+
+static inline void tegra_enable_cpu_clock(u32 cpu)
+{
+}
+
+static inline void tegra_disable_cpu_clock(u32 cpu)
+{
+}
+#endif
+
+#if defined(CONFIG_ARCH_TEGRA) && defined(CONFIG_PM_SLEEP)
static inline bool tegra_cpu_rail_off_ready(void)
{
if (WARN_ON(!tegra_cpu_car_ops->rail_off_ready))
@@ -119,14 +130,134 @@ static inline void tegra_cpu_clock_resume(void)
tegra_cpu_car_ops->resume();
}
+#else
+static inline bool tegra_cpu_rail_off_ready(void)
+{
+ return false;
+}
+
+static inline void tegra_cpu_clock_suspend(void)
+{
+}
+
+static inline void tegra_cpu_clock_resume(void)
+{
+}
#endif
-extern void tegra210_xusb_pll_hw_control_enable(void);
-extern void tegra210_xusb_pll_hw_sequence_start(void);
-extern void tegra210_sata_pll_hw_control_enable(void);
-extern void tegra210_sata_pll_hw_sequence_start(void);
-extern void tegra210_set_sata_pll_seq_sw(bool state);
-extern void tegra210_put_utmipll_in_iddq(void);
-extern void tegra210_put_utmipll_out_iddq(void);
+struct clk;
+struct tegra_emc;
+
+typedef long (tegra20_clk_emc_round_cb)(unsigned long rate,
+ unsigned long min_rate,
+ unsigned long max_rate,
+ void *arg);
+typedef int (tegra124_emc_prepare_timing_change_cb)(struct tegra_emc *emc,
+ unsigned long rate);
+typedef void (tegra124_emc_complete_timing_change_cb)(struct tegra_emc *emc,
+ unsigned long rate);
+
+struct tegra210_clk_emc_config {
+ unsigned long rate;
+ bool same_freq;
+ u32 value;
+
+ unsigned long parent_rate;
+ u8 parent;
+};
+
+struct tegra210_clk_emc_provider {
+ struct module *owner;
+ struct device *dev;
+
+ struct tegra210_clk_emc_config *configs;
+ unsigned int num_configs;
+
+ int (*set_rate)(struct device *dev,
+ const struct tegra210_clk_emc_config *config);
+};
+
+#if defined(CONFIG_ARCH_TEGRA_2x_SOC) || defined(CONFIG_ARCH_TEGRA_3x_SOC)
+void tegra20_clk_set_emc_round_callback(tegra20_clk_emc_round_cb *round_cb,
+ void *cb_arg);
+int tegra20_clk_prepare_emc_mc_same_freq(struct clk *emc_clk, bool same);
+#else
+static inline void
+tegra20_clk_set_emc_round_callback(tegra20_clk_emc_round_cb *round_cb,
+ void *cb_arg)
+{
+}
+
+static inline int
+tegra20_clk_prepare_emc_mc_same_freq(struct clk *emc_clk, bool same)
+{
+ return 0;
+}
+#endif
+
+#ifdef CONFIG_TEGRA124_CLK_EMC
+void tegra124_clk_set_emc_callbacks(tegra124_emc_prepare_timing_change_cb *prep_cb,
+ tegra124_emc_complete_timing_change_cb *complete_cb);
+#else
+static inline void
+tegra124_clk_set_emc_callbacks(tegra124_emc_prepare_timing_change_cb *prep_cb,
+ tegra124_emc_complete_timing_change_cb *complete_cb)
+{
+}
+#endif
+
+#ifdef CONFIG_ARCH_TEGRA_210_SOC
+int tegra210_plle_hw_sequence_start(void);
+bool tegra210_plle_hw_sequence_is_enabled(void);
+void tegra210_xusb_pll_hw_control_enable(void);
+void tegra210_xusb_pll_hw_sequence_start(void);
+void tegra210_sata_pll_hw_control_enable(void);
+void tegra210_sata_pll_hw_sequence_start(void);
+void tegra210_set_sata_pll_seq_sw(bool state);
+void tegra210_put_utmipll_in_iddq(void);
+void tegra210_put_utmipll_out_iddq(void);
+int tegra210_clk_handle_mbist_war(unsigned int id);
+void tegra210_clk_emc_dll_enable(bool flag);
+void tegra210_clk_emc_dll_update_setting(u32 emc_dll_src_value);
+void tegra210_clk_emc_update_setting(u32 emc_src_value);
+
+int tegra210_clk_emc_attach(struct clk *clk,
+ struct tegra210_clk_emc_provider *provider);
+void tegra210_clk_emc_detach(struct clk *clk);
+#else
+static inline int tegra210_plle_hw_sequence_start(void)
+{
+ return 0;
+}
+
+static inline bool tegra210_plle_hw_sequence_is_enabled(void)
+{
+ return false;
+}
+
+static inline int tegra210_clk_handle_mbist_war(unsigned int id)
+{
+ return 0;
+}
+
+static inline int
+tegra210_clk_emc_attach(struct clk *clk,
+ struct tegra210_clk_emc_provider *provider)
+{
+ return 0;
+}
+
+static inline void tegra210_xusb_pll_hw_control_enable(void) {}
+static inline void tegra210_xusb_pll_hw_sequence_start(void) {}
+static inline void tegra210_sata_pll_hw_control_enable(void) {}
+static inline void tegra210_sata_pll_hw_sequence_start(void) {}
+static inline void tegra210_set_sata_pll_seq_sw(bool state) {}
+static inline void tegra210_put_utmipll_in_iddq(void) {}
+static inline void tegra210_put_utmipll_out_iddq(void) {}
+static inline void tegra210_clk_emc_dll_enable(bool flag) {}
+static inline void tegra210_clk_emc_dll_update_setting(u32 emc_dll_src_value) {}
+static inline void tegra210_clk_emc_update_setting(u32 emc_src_value) {}
+static inline void tegra210_clk_emc_detach(struct clk *clk) {}
+#endif
#endif /* __LINUX_CLK_TEGRA_H_ */
diff --git a/include/linux/clk/ti.h b/include/linux/clk/ti.h
index d18da839b810..cbfcbf186ce3 100644
--- a/include/linux/clk/ti.h
+++ b/include/linux/clk/ti.h
@@ -1,16 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* TI clock drivers support
*
* Copyright (C) 2013 Texas Instruments, Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#ifndef __LINUX_CLK_TI_H__
#define __LINUX_CLK_TI_H__
@@ -63,6 +55,17 @@ struct clk_omap_reg {
* @auto_recal_bit: bitshift of the driftguard enable bit in @control_reg
* @recal_en_bit: bitshift of the PRM_IRQENABLE_* bit for recalibration IRQs
* @recal_st_bit: bitshift of the PRM_IRQSTATUS_* bit for recalibration IRQs
+ * @ssc_deltam_reg: register containing the DPLL SSC frequency spreading
+ * @ssc_modfreq_reg: register containing the DPLL SSC modulation frequency
+ * @ssc_modfreq_mant_mask: mask of the mantissa component in @ssc_modfreq_reg
+ * @ssc_modfreq_exp_mask: mask of the exponent component in @ssc_modfreq_reg
+ * @ssc_enable_mask: mask of the DPLL SSC enable bit in @control_reg
+ * @ssc_downspread_mask: mask of the DPLL SSC low frequency only bit in
+ * @control_reg
+ * @ssc_modfreq: the DPLL SSC frequency modulation in kHz
+ * @ssc_deltam: the DPLL SSC frequency spreading in permille (10th of percent)
+ * @ssc_downspread: require the only low frequency spread of the DPLL in SSC
+ * mode
* @flags: DPLL type/features (see below)
*
* Possible values for @flags:
@@ -110,6 +113,17 @@ struct dpll_data {
u8 auto_recal_bit;
u8 recal_en_bit;
u8 recal_st_bit;
+ struct clk_omap_reg ssc_deltam_reg;
+ struct clk_omap_reg ssc_modfreq_reg;
+ u32 ssc_deltam_int_mask;
+ u32 ssc_deltam_frac_mask;
+ u32 ssc_modfreq_mant_mask;
+ u32 ssc_modfreq_exp_mask;
+ u32 ssc_enable_mask;
+ u32 ssc_downspread_mask;
+ u32 ssc_modfreq;
+ u32 ssc_deltam;
+ bool ssc_downspread;
u8 flags;
};
@@ -153,12 +167,14 @@ struct clk_hw_omap {
u8 fixed_div;
struct clk_omap_reg enable_reg;
u8 enable_bit;
- u8 flags;
+ unsigned long flags;
struct clk_omap_reg clksel_reg;
struct dpll_data *dpll_data;
const char *clkdm_name;
struct clockdomain *clkdm;
const struct clk_hw_omap_ops *ops;
+ u32 context;
+ int autoidle_count;
};
/*
@@ -203,6 +219,7 @@ enum {
TI_CLKM_PRM,
TI_CLKM_SCRM,
TI_CLKM_CTRL,
+ TI_CLKM_CTRL_AUX,
TI_CLKM_PLLSS,
CLK_MAX_MEMMAPS
};
@@ -211,6 +228,7 @@ enum {
* struct ti_clk_ll_ops - low-level ops for clocks
* @clk_readl: pointer to register read function
* @clk_writel: pointer to register write function
+ * @clk_rmw: pointer to register read-modify-write function
* @clkdm_clk_enable: pointer to clockdomain enable function
* @clkdm_clk_disable: pointer to clockdomain disable function
* @clkdm_lookup: pointer to clockdomain lookup function
@@ -226,6 +244,7 @@ enum {
struct ti_clk_ll_ops {
u32 (*clk_readl)(const struct clk_omap_reg *reg);
void (*clk_writel)(u32 val, const struct clk_omap_reg *reg);
+ void (*clk_rmw)(u32 val, u32 mask, const struct clk_omap_reg *reg);
int (*clkdm_clk_enable)(struct clockdomain *clkdm, struct clk *clk);
int (*clkdm_clk_disable)(struct clockdomain *clkdm,
struct clk *clk);
@@ -238,6 +257,7 @@ struct ti_clk_ll_ops {
#define to_clk_hw_omap(_hw) container_of(_hw, struct clk_hw_omap, hw)
+bool omap2_clk_is_hw_omap(struct clk_hw *hw);
int omap2_clk_disable_autoidle_all(void);
int omap2_clk_enable_autoidle_all(void);
int omap2_clk_allow_idle(struct clk *clk);
@@ -287,9 +307,17 @@ struct ti_clk_features {
#define TI_CLK_DPLL4_DENY_REPROGRAM BIT(1)
#define TI_CLK_DISABLE_CLKDM_CONTROL BIT(2)
#define TI_CLK_ERRATA_I810 BIT(3)
+#define TI_CLK_CLKCTRL_COMPAT BIT(4)
+#define TI_CLK_DEVICE_TYPE_GP BIT(5)
void ti_clk_setup_features(struct ti_clk_features *features);
const struct ti_clk_features *ti_clk_get_features(void);
+bool ti_clk_is_in_standby(struct clk *clk);
+int omap3_noncore_dpll_save_context(struct clk_hw *hw);
+void omap3_noncore_dpll_restore_context(struct clk_hw *hw);
+
+int omap3_core_dpll_save_context(struct clk_hw *hw);
+void omap3_core_dpll_restore_context(struct clk_hw *hw);
extern const struct clk_hw_omap_ops clkhwops_omap2xxx_dpll;
diff --git a/include/linux/clk/zynq.h b/include/linux/clk/zynq.h
index 7a5633b71533..a198dd9255a4 100644
--- a/include/linux/clk/zynq.h
+++ b/include/linux/clk/zynq.h
@@ -1,20 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Copyright (C) 2013 Xilinx Inc.
* Copyright (C) 2012 National Instruments
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#ifndef __LINUX_CLK_ZYNQ_H_
diff --git a/include/linux/clkdev.h b/include/linux/clkdev.h
index 2eabc862abdb..45570bc21a43 100644
--- a/include/linux/clkdev.h
+++ b/include/linux/clkdev.h
@@ -1,18 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* include/linux/clkdev.h
*
* Copyright (C) 2008 Russell King.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
* Helper for the clk API to assist looking up a struct clk.
*/
#ifndef __CLKDEV_H
#define __CLKDEV_H
-#include <asm/clkdev.h>
+#include <linux/slab.h>
struct clk;
struct clk_hw;
@@ -33,11 +30,6 @@ struct clk_lookup {
.clk = c, \
}
-struct clk_lookup *clkdev_alloc(struct clk *clk, const char *con_id,
- const char *dev_fmt, ...) __printf(3, 4);
-struct clk_lookup *clkdev_hw_alloc(struct clk_hw *hw, const char *con_id,
- const char *dev_fmt, ...) __printf(3, 4);
-
void clkdev_add(struct clk_lookup *cl);
void clkdev_drop(struct clk_lookup *cl);
@@ -52,9 +44,6 @@ int clk_add_alias(const char *, const char *, const char *, struct device *);
int clk_register_clkdev(struct clk *, const char *, const char *);
int clk_hw_register_clkdev(struct clk_hw *, const char *, const char *);
-#ifdef CONFIG_COMMON_CLK
-int __clk_get(struct clk *clk);
-void __clk_put(struct clk *clk);
-#endif
-
+int devm_clk_hw_register_clkdev(struct device *dev, struct clk_hw *hw,
+ const char *con_id, const char *dev_id);
#endif
diff --git a/include/linux/clock_cooling.h b/include/linux/clock_cooling.h
deleted file mode 100644
index 4d1019d56f7f..000000000000
--- a/include/linux/clock_cooling.h
+++ /dev/null
@@ -1,65 +0,0 @@
-/*
- * linux/include/linux/clock_cooling.h
- *
- * Copyright (C) 2014 Eduardo Valentin <edubezval@gmail.com>
- *
- * Copyright (C) 2013 Texas Instruments Inc.
- * Contact: Eduardo Valentin <eduardo.valentin@ti.com>
- *
- * Highly based on cpu_cooling.c.
- * Copyright (C) 2012 Samsung Electronics Co., Ltd(http://www.samsung.com)
- * Copyright (C) 2012 Amit Daniel <amit.kachhap@linaro.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- */
-
-#ifndef __CPU_COOLING_H__
-#define __CPU_COOLING_H__
-
-#include <linux/of.h>
-#include <linux/thermal.h>
-#include <linux/cpumask.h>
-
-#ifdef CONFIG_CLOCK_THERMAL
-/**
- * clock_cooling_register - function to create clock cooling device.
- * @dev: struct device pointer to the device used as clock cooling device.
- * @clock_name: string containing the clock used as cooling mechanism.
- */
-struct thermal_cooling_device *
-clock_cooling_register(struct device *dev, const char *clock_name);
-
-/**
- * clock_cooling_unregister - function to remove clock cooling device.
- * @cdev: thermal cooling device pointer.
- */
-void clock_cooling_unregister(struct thermal_cooling_device *cdev);
-
-unsigned long clock_cooling_get_level(struct thermal_cooling_device *cdev,
- unsigned long freq);
-#else /* !CONFIG_CLOCK_THERMAL */
-static inline struct thermal_cooling_device *
-clock_cooling_register(struct device *dev, const char *clock_name)
-{
- return NULL;
-}
-static inline
-void clock_cooling_unregister(struct thermal_cooling_device *cdev)
-{
-}
-static inline
-unsigned long clock_cooling_get_level(struct thermal_cooling_device *cdev,
- unsigned long freq)
-{
- return THERMAL_CSTATE_INVALID;
-}
-#endif /* CONFIG_CLOCK_THERMAL */
-
-#endif /* __CPU_COOLING_H__ */
diff --git a/include/linux/clockchips.h b/include/linux/clockchips.h
index a116926598fd..9aac31d856f3 100644
--- a/include/linux/clockchips.h
+++ b/include/linux/clockchips.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/* linux/include/linux/clockchips.h
*
* This file contains the structure definitions for clockchips.
@@ -210,7 +211,7 @@ extern int tick_receive_broadcast(void);
extern void tick_setup_hrtimer_broadcast(void);
extern int tick_check_broadcast_expired(void);
# else
-static inline int tick_check_broadcast_expired(void) { return 0; }
+static __always_inline int tick_check_broadcast_expired(void) { return 0; }
static inline void tick_setup_hrtimer_broadcast(void) { }
# endif
@@ -218,7 +219,7 @@ static inline void tick_setup_hrtimer_broadcast(void) { }
static inline void clockevents_suspend(void) { }
static inline void clockevents_resume(void) { }
-static inline int tick_check_broadcast_expired(void) { return 0; }
+static __always_inline int tick_check_broadcast_expired(void) { return 0; }
static inline void tick_setup_hrtimer_broadcast(void) { }
#endif /* !CONFIG_GENERIC_CLOCKEVENTS */
diff --git a/include/linux/clocksource.h b/include/linux/clocksource.h
index a78cb1848e65..1d42d4b17327 100644
--- a/include/linux/clocksource.h
+++ b/include/linux/clocksource.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/* linux/include/linux/clocksource.h
*
* This file contains the structure definitions for clocksources.
@@ -16,24 +17,40 @@
#include <linux/timer.h>
#include <linux/init.h>
#include <linux/of.h>
+#include <linux/clocksource_ids.h>
#include <asm/div64.h>
#include <asm/io.h>
struct clocksource;
struct module;
-#ifdef CONFIG_ARCH_CLOCKSOURCE_DATA
+#if defined(CONFIG_ARCH_CLOCKSOURCE_DATA) || \
+ defined(CONFIG_GENERIC_GETTIMEOFDAY)
#include <asm/clocksource.h>
#endif
+#include <vdso/clocksource.h>
+
/**
* struct clocksource - hardware abstraction for a free running counter
* Provides mostly state-free accessors to the underlying hardware.
* This is the structure used for system time.
*
- * @name: ptr to clocksource name
- * @list: list head for registration
- * @rating: rating value for selection (higher is better)
+ * @read: Returns a cycle value, passes clocksource as argument
+ * @mask: Bitmask for two's complement
+ * subtraction of non 64 bit counters
+ * @mult: Cycle to nanosecond multiplier
+ * @shift: Cycle to nanosecond divisor (power of two)
+ * @max_idle_ns: Maximum idle time permitted by the clocksource (nsecs)
+ * @maxadj: Maximum adjustment value to mult (~11%)
+ * @uncertainty_margin: Maximum uncertainty in nanoseconds per half second.
+ * Zero says to use default WATCHDOG_THRESHOLD.
+ * @archdata: Optional arch-specific data
+ * @max_cycles: Maximum safe cycle value which won't overflow on
+ * multiplication
+ * @name: Pointer to clocksource name
+ * @list: List head for registration (internal)
+ * @rating: Rating value for selection (higher is better)
* To avoid rating inflation the following
* list should give you a guide as to how
* to assign your clocksource a rating
@@ -48,27 +65,27 @@ struct module;
* 400-499: Perfect
* The ideal clocksource. A must-use where
* available.
- * @read: returns a cycle value, passes clocksource as argument
- * @enable: optional function to enable the clocksource
- * @disable: optional function to disable the clocksource
- * @mask: bitmask for two's complement
- * subtraction of non 64 bit counters
- * @mult: cycle to nanosecond multiplier
- * @shift: cycle to nanosecond divisor (power of two)
- * @max_idle_ns: max idle time permitted by the clocksource (nsecs)
- * @maxadj: maximum adjustment value to mult (~11%)
- * @max_cycles: maximum safe cycle value which won't overflow on multiplication
- * @flags: flags describing special properties
- * @archdata: arch-specific data
- * @suspend: suspend function for the clocksource, if necessary
- * @resume: resume function for the clocksource, if necessary
+ * @id: Defaults to CSID_GENERIC. The id value is captured
+ * in certain snapshot functions to allow callers to
+ * validate the clocksource from which the snapshot was
+ * taken.
+ * @flags: Flags describing special properties
+ * @enable: Optional function to enable the clocksource
+ * @disable: Optional function to disable the clocksource
+ * @suspend: Optional suspend function for the clocksource
+ * @resume: Optional resume function for the clocksource
* @mark_unstable: Optional function to inform the clocksource driver that
* the watchdog marked the clocksource unstable
- * @owner: module reference, must be set by clocksource in modules
+ * @tick_stable: Optional function called periodically from the watchdog
+ * code to provide stable synchronization points
+ * @wd_list: List head to enqueue into the watchdog list (internal)
+ * @cs_last: Last clocksource value for clocksource watchdog
+ * @wd_last: Last watchdog value corresponding to @cs_last
+ * @owner: Module reference, must be set by clocksource in modules
*
* Note: This struct is not used in hotpathes of the timekeeping code
* because the timekeeper caches the hot path fields in its own data
- * structure, so no line cache alignment is required,
+ * structure, so no cache line alignment is required,
*
* The pointer to the clocksource itself is handed to the read
* callback. If you need extra information there you can wrap struct
@@ -77,35 +94,39 @@ struct module;
* structure.
*/
struct clocksource {
- u64 (*read)(struct clocksource *cs);
- u64 mask;
- u32 mult;
- u32 shift;
- u64 max_idle_ns;
- u32 maxadj;
+ u64 (*read)(struct clocksource *cs);
+ u64 mask;
+ u32 mult;
+ u32 shift;
+ u64 max_idle_ns;
+ u32 maxadj;
+ u32 uncertainty_margin;
#ifdef CONFIG_ARCH_CLOCKSOURCE_DATA
struct arch_clocksource_data archdata;
#endif
- u64 max_cycles;
- const char *name;
- struct list_head list;
- int rating;
- int (*enable)(struct clocksource *cs);
- void (*disable)(struct clocksource *cs);
- unsigned long flags;
- void (*suspend)(struct clocksource *cs);
- void (*resume)(struct clocksource *cs);
- void (*mark_unstable)(struct clocksource *cs);
- void (*tick_stable)(struct clocksource *cs);
+ u64 max_cycles;
+ const char *name;
+ struct list_head list;
+ int rating;
+ enum clocksource_ids id;
+ enum vdso_clock_mode vdso_clock_mode;
+ unsigned long flags;
+
+ int (*enable)(struct clocksource *cs);
+ void (*disable)(struct clocksource *cs);
+ void (*suspend)(struct clocksource *cs);
+ void (*resume)(struct clocksource *cs);
+ void (*mark_unstable)(struct clocksource *cs);
+ void (*tick_stable)(struct clocksource *cs);
/* private: */
#ifdef CONFIG_CLOCKSOURCE_WATCHDOG
/* Watchdog related data, used by the framework */
- struct list_head wd_list;
- u64 cs_last;
- u64 wd_last;
+ struct list_head wd_list;
+ u64 cs_last;
+ u64 wd_last;
#endif
- struct module *owner;
+ struct module *owner;
};
/*
@@ -119,7 +140,7 @@ struct clocksource {
#define CLOCK_SOURCE_UNSTABLE 0x40
#define CLOCK_SOURCE_SUSPEND_NONSTOP 0x80
#define CLOCK_SOURCE_RESELECT 0x100
-
+#define CLOCK_SOURCE_VERIFY_PERCPU 0x200
/* simplify initialization of mask field */
#define CLOCKSOURCE_MASK(bits) GENMASK_ULL((bits) - 1, 0)
@@ -193,6 +214,9 @@ extern void clocksource_suspend(void);
extern void clocksource_resume(void);
extern struct clocksource * __init clocksource_default_clock(void);
extern void clocksource_mark_unstable(struct clocksource *cs);
+extern void
+clocksource_start_suspend_timing(struct clocksource *cs, u64 start_cycles);
+extern u64 clocksource_stop_suspend_timing(struct clocksource *cs, u64 now);
extern u64
clocks_calc_max_nsecs(u32 mult, u32 shift, u32 maxadj, u64 mask, u64 *max_cycles);
@@ -237,6 +261,11 @@ static inline void __clocksource_update_freq_khz(struct clocksource *cs, u32 khz
__clocksource_update_freq_scale(cs, 1000, khz);
}
+#ifdef CONFIG_ARCH_CLOCKSOURCE_INIT
+extern void clocksource_arch_init(struct clocksource *cs);
+#else
+static inline void clocksource_arch_init(struct clocksource *cs) { }
+#endif
extern int timekeeping_notify(struct clocksource *clock);
@@ -253,9 +282,6 @@ extern int clocksource_i8253_init(void);
#define TIMER_OF_DECLARE(name, compat, fn) \
OF_DECLARE_1_RET(timer, name, compat, fn)
-#define CLOCKSOURCE_OF_DECLARE(name, compat, fn) \
- TIMER_OF_DECLARE(name, compat, fn)
-
#ifdef CONFIG_TIMER_PROBE
extern void timer_probe(void);
#else
@@ -265,4 +291,7 @@ static inline void timer_probe(void) {}
#define TIMER_ACPI_DECLARE(name, table_id, fn) \
ACPI_DECLARE_PROBE_ENTRY(timer, name, table_id, 0, NULL, 0, fn)
+extern ulong max_cswd_read_retries;
+void clocksource_verify_percpu(struct clocksource *cs);
+
#endif /* _LINUX_CLOCKSOURCE_H */
diff --git a/include/linux/clocksource_ids.h b/include/linux/clocksource_ids.h
new file mode 100644
index 000000000000..16775d7d8f8d
--- /dev/null
+++ b/include/linux/clocksource_ids.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_CLOCKSOURCE_IDS_H
+#define _LINUX_CLOCKSOURCE_IDS_H
+
+/* Enum to give clocksources a unique identifier */
+enum clocksource_ids {
+ CSID_GENERIC = 0,
+ CSID_ARM_ARCH_COUNTER,
+ CSID_MAX,
+};
+
+#endif
diff --git a/include/linux/cm4000_cs.h b/include/linux/cm4000_cs.h
deleted file mode 100644
index 88bee3a33090..000000000000
--- a/include/linux/cm4000_cs.h
+++ /dev/null
@@ -1,10 +0,0 @@
-#ifndef _CM4000_H_
-#define _CM4000_H_
-
-#include <uapi/linux/cm4000_cs.h>
-
-
-#define DEVICE_NAME "cmm"
-#define MODULE_NAME "cm4000_cs"
-
-#endif /* _CM4000_H_ */
diff --git a/include/linux/cma.h b/include/linux/cma.h
index 3e8fbf5a5c73..63873b93deaa 100644
--- a/include/linux/cma.h
+++ b/include/linux/cma.h
@@ -1,8 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __CMA_H__
#define __CMA_H__
#include <linux/init.h>
#include <linux/types.h>
+#include <linux/numa.h>
/*
* There is always at least global CMA area and a few optional
@@ -10,11 +12,17 @@
*/
#ifdef CONFIG_CMA_AREAS
#define MAX_CMA_AREAS (1 + CONFIG_CMA_AREAS)
+#endif
-#else
-#define MAX_CMA_AREAS (0)
+#define CMA_MAX_NAME 64
-#endif
+/*
+ * the buddy -- especially pageblock merging and alloc_contig_range()
+ * -- can deal with only some pageblocks of a higher-order page being
+ * MIGRATE_CMA, we can use pageblock_nr_pages.
+ */
+#define CMA_MIN_ALIGNMENT_PAGES pageblock_nr_pages
+#define CMA_MIN_ALIGNMENT_BYTES (PAGE_SIZE * CMA_MIN_ALIGNMENT_PAGES)
struct cma;
@@ -23,17 +31,29 @@ extern phys_addr_t cma_get_base(const struct cma *cma);
extern unsigned long cma_get_size(const struct cma *cma);
extern const char *cma_get_name(const struct cma *cma);
-extern int __init cma_declare_contiguous(phys_addr_t base,
+extern int __init cma_declare_contiguous_nid(phys_addr_t base,
phys_addr_t size, phys_addr_t limit,
phys_addr_t alignment, unsigned int order_per_bit,
- bool fixed, const char *name, struct cma **res_cma);
+ bool fixed, const char *name, struct cma **res_cma,
+ int nid);
+static inline int __init cma_declare_contiguous(phys_addr_t base,
+ phys_addr_t size, phys_addr_t limit,
+ phys_addr_t alignment, unsigned int order_per_bit,
+ bool fixed, const char *name, struct cma **res_cma)
+{
+ return cma_declare_contiguous_nid(base, size, limit, alignment,
+ order_per_bit, fixed, name, res_cma, NUMA_NO_NODE);
+}
extern int cma_init_reserved_mem(phys_addr_t base, phys_addr_t size,
unsigned int order_per_bit,
const char *name,
struct cma **res_cma);
-extern struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align,
- gfp_t gfp_mask);
-extern bool cma_release(struct cma *cma, const struct page *pages, unsigned int count);
+extern struct page *cma_alloc(struct cma *cma, unsigned long count, unsigned int align,
+ bool no_warn);
+extern bool cma_pages_valid(struct cma *cma, const struct page *pages, unsigned long count);
+extern bool cma_release(struct cma *cma, const struct page *pages, unsigned long count);
extern int cma_for_each_area(int (*it)(struct cma *cma, void *data), void *data);
+
+extern void cma_reserve_pages_on_error(struct cma *cma);
#endif
diff --git a/include/linux/cmdline-parser.h b/include/linux/cmdline-parser.h
deleted file mode 100644
index 2e6dce6e5c2a..000000000000
--- a/include/linux/cmdline-parser.h
+++ /dev/null
@@ -1,45 +0,0 @@
-/*
- * Parsing command line, get the partitions information.
- *
- * Written by Cai Zhiyong <caizhiyong@huawei.com>
- *
- */
-#ifndef CMDLINEPARSEH
-#define CMDLINEPARSEH
-
-#include <linux/blkdev.h>
-#include <linux/fs.h>
-#include <linux/slab.h>
-
-/* partition flags */
-#define PF_RDONLY 0x01 /* Device is read only */
-#define PF_POWERUP_LOCK 0x02 /* Always locked after reset */
-
-struct cmdline_subpart {
- char name[BDEVNAME_SIZE]; /* partition name, such as 'rootfs' */
- sector_t from;
- sector_t size;
- int flags;
- struct cmdline_subpart *next_subpart;
-};
-
-struct cmdline_parts {
- char name[BDEVNAME_SIZE]; /* block device, such as 'mmcblk0' */
- unsigned int nr_subparts;
- struct cmdline_subpart *subpart;
- struct cmdline_parts *next_parts;
-};
-
-void cmdline_parts_free(struct cmdline_parts **parts);
-
-int cmdline_parts_parse(struct cmdline_parts **parts, const char *cmdline);
-
-struct cmdline_parts *cmdline_parts_find(struct cmdline_parts *parts,
- const char *bdev);
-
-int cmdline_parts_set(struct cmdline_parts *parts, sector_t disk_size,
- int slot,
- int (*add_part)(int, struct cmdline_subpart *, void *),
- void *param);
-
-#endif /* CMDLINEPARSEH */
diff --git a/include/linux/cnt32_to_63.h b/include/linux/cnt32_to_63.h
index aa629bce9033..064428479f2d 100644
--- a/include/linux/cnt32_to_63.h
+++ b/include/linux/cnt32_to_63.h
@@ -1,13 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Extend a 32-bit counter to 63 bits
*
* Author: Nicolas Pitre
* Created: December 3, 2006
* Copyright: MontaVista Software, Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2
- * as published by the Free Software Foundation.
*/
#ifndef __LINUX_CNT32_TO_63_H__
diff --git a/include/linux/coda.h b/include/linux/coda.h
index d30209b9cef8..0ca0c83fdb1c 100644
--- a/include/linux/coda.h
+++ b/include/linux/coda.h
@@ -58,8 +58,7 @@ Mellon the rights to redistribute these changes without encumbrance.
#ifndef _CODA_HEADER_
#define _CODA_HEADER_
-#if defined(__linux__)
typedef unsigned long long u_quad_t;
-#endif
+
#include <uapi/linux/coda.h>
#endif
diff --git a/include/linux/coda_psdev.h b/include/linux/coda_psdev.h
deleted file mode 100644
index 31e4e1f1547c..000000000000
--- a/include/linux/coda_psdev.h
+++ /dev/null
@@ -1,71 +0,0 @@
-#ifndef __CODA_PSDEV_H
-#define __CODA_PSDEV_H
-
-#include <linux/backing-dev.h>
-#include <linux/mutex.h>
-#include <uapi/linux/coda_psdev.h>
-
-struct kstatfs;
-
-/* communication pending/processing queues */
-struct venus_comm {
- u_long vc_seq;
- wait_queue_head_t vc_waitq; /* Venus wait queue */
- struct list_head vc_pending;
- struct list_head vc_processing;
- int vc_inuse;
- struct super_block *vc_sb;
- struct mutex vc_mutex;
-};
-
-
-static inline struct venus_comm *coda_vcp(struct super_block *sb)
-{
- return (struct venus_comm *)((sb)->s_fs_info);
-}
-
-/* upcalls */
-int venus_rootfid(struct super_block *sb, struct CodaFid *fidp);
-int venus_getattr(struct super_block *sb, struct CodaFid *fid,
- struct coda_vattr *attr);
-int venus_setattr(struct super_block *, struct CodaFid *, struct coda_vattr *);
-int venus_lookup(struct super_block *sb, struct CodaFid *fid,
- const char *name, int length, int *type,
- struct CodaFid *resfid);
-int venus_close(struct super_block *sb, struct CodaFid *fid, int flags,
- kuid_t uid);
-int venus_open(struct super_block *sb, struct CodaFid *fid, int flags,
- struct file **f);
-int venus_mkdir(struct super_block *sb, struct CodaFid *dirfid,
- const char *name, int length,
- struct CodaFid *newfid, struct coda_vattr *attrs);
-int venus_create(struct super_block *sb, struct CodaFid *dirfid,
- const char *name, int length, int excl, int mode,
- struct CodaFid *newfid, struct coda_vattr *attrs) ;
-int venus_rmdir(struct super_block *sb, struct CodaFid *dirfid,
- const char *name, int length);
-int venus_remove(struct super_block *sb, struct CodaFid *dirfid,
- const char *name, int length);
-int venus_readlink(struct super_block *sb, struct CodaFid *fid,
- char *buffer, int *length);
-int venus_rename(struct super_block *, struct CodaFid *new_fid,
- struct CodaFid *old_fid, size_t old_length,
- size_t new_length, const char *old_name,
- const char *new_name);
-int venus_link(struct super_block *sb, struct CodaFid *fid,
- struct CodaFid *dirfid, const char *name, int len );
-int venus_symlink(struct super_block *sb, struct CodaFid *fid,
- const char *name, int len, const char *symname, int symlen);
-int venus_access(struct super_block *sb, struct CodaFid *fid, int mask);
-int venus_pioctl(struct super_block *sb, struct CodaFid *fid,
- unsigned int cmd, struct PioctlData *data);
-int coda_downcall(struct venus_comm *vcp, int opcode, union outputArgs *out);
-int venus_fsync(struct super_block *sb, struct CodaFid *fid);
-int venus_statfs(struct dentry *dentry, struct kstatfs *sfs);
-
-/*
- * Statistics
- */
-
-extern struct venus_comm coda_comms[];
-#endif
diff --git a/include/linux/comedi/comedi_8254.h b/include/linux/comedi/comedi_8254.h
new file mode 100644
index 000000000000..d8264417e53c
--- /dev/null
+++ b/include/linux/comedi/comedi_8254.h
@@ -0,0 +1,134 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * comedi_8254.h
+ * Generic 8254 timer/counter support
+ * Copyright (C) 2014 H Hartley Sweeten <hsweeten@visionengravers.com>
+ *
+ * COMEDI - Linux Control and Measurement Device Interface
+ * Copyright (C) 2000 David A. Schleef <ds@schleef.org>
+ */
+
+#ifndef _COMEDI_8254_H
+#define _COMEDI_8254_H
+
+#include <linux/types.h>
+
+struct comedi_device;
+struct comedi_insn;
+struct comedi_subdevice;
+
+/*
+ * Common oscillator base values in nanoseconds
+ */
+#define I8254_OSC_BASE_10MHZ 100
+#define I8254_OSC_BASE_5MHZ 200
+#define I8254_OSC_BASE_4MHZ 250
+#define I8254_OSC_BASE_2MHZ 500
+#define I8254_OSC_BASE_1MHZ 1000
+#define I8254_OSC_BASE_100KHZ 10000
+#define I8254_OSC_BASE_10KHZ 100000
+#define I8254_OSC_BASE_1KHZ 1000000
+
+/*
+ * I/O access size used to read/write registers
+ */
+#define I8254_IO8 1
+#define I8254_IO16 2
+#define I8254_IO32 4
+
+/*
+ * Register map for generic 8254 timer (I8254_IO8 with 0 regshift)
+ */
+#define I8254_COUNTER0_REG 0x00
+#define I8254_COUNTER1_REG 0x01
+#define I8254_COUNTER2_REG 0x02
+#define I8254_CTRL_REG 0x03
+#define I8254_CTRL_SEL_CTR(x) ((x) << 6)
+#define I8254_CTRL_READBACK(x) (I8254_CTRL_SEL_CTR(3) | BIT(x))
+#define I8254_CTRL_READBACK_COUNT I8254_CTRL_READBACK(4)
+#define I8254_CTRL_READBACK_STATUS I8254_CTRL_READBACK(5)
+#define I8254_CTRL_READBACK_SEL_CTR(x) (2 << (x))
+#define I8254_CTRL_RW(x) (((x) & 0x3) << 4)
+#define I8254_CTRL_LATCH I8254_CTRL_RW(0)
+#define I8254_CTRL_LSB_ONLY I8254_CTRL_RW(1)
+#define I8254_CTRL_MSB_ONLY I8254_CTRL_RW(2)
+#define I8254_CTRL_LSB_MSB I8254_CTRL_RW(3)
+
+/* counter maps zero to 0x10000 */
+#define I8254_MAX_COUNT 0x10000
+
+/**
+ * struct comedi_8254 - private data used by this module
+ * @iobase: PIO base address of the registers (in/out)
+ * @mmio: MMIO base address of the registers (read/write)
+ * @iosize: I/O size used to access the registers (b/w/l)
+ * @regshift: register gap shift
+ * @osc_base: cascaded oscillator speed in ns
+ * @divisor: divisor for single counter
+ * @divisor1: divisor loaded into first cascaded counter
+ * @divisor2: divisor loaded into second cascaded counter
+ * #next_div: next divisor for single counter
+ * @next_div1: next divisor to use for first cascaded counter
+ * @next_div2: next divisor to use for second cascaded counter
+ * @clock_src; current clock source for each counter (driver specific)
+ * @gate_src; current gate source for each counter (driver specific)
+ * @busy: flags used to indicate that a counter is "busy"
+ * @insn_config: driver specific (*insn_config) callback
+ */
+struct comedi_8254 {
+ unsigned long iobase;
+ void __iomem *mmio;
+ unsigned int iosize;
+ unsigned int regshift;
+ unsigned int osc_base;
+ unsigned int divisor;
+ unsigned int divisor1;
+ unsigned int divisor2;
+ unsigned int next_div;
+ unsigned int next_div1;
+ unsigned int next_div2;
+ unsigned int clock_src[3];
+ unsigned int gate_src[3];
+ bool busy[3];
+
+ int (*insn_config)(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn, unsigned int *data);
+};
+
+unsigned int comedi_8254_status(struct comedi_8254 *i8254,
+ unsigned int counter);
+unsigned int comedi_8254_read(struct comedi_8254 *i8254, unsigned int counter);
+void comedi_8254_write(struct comedi_8254 *i8254,
+ unsigned int counter, unsigned int val);
+
+int comedi_8254_set_mode(struct comedi_8254 *i8254,
+ unsigned int counter, unsigned int mode);
+int comedi_8254_load(struct comedi_8254 *i8254,
+ unsigned int counter, unsigned int val, unsigned int mode);
+
+void comedi_8254_pacer_enable(struct comedi_8254 *i8254,
+ unsigned int counter1, unsigned int counter2,
+ bool enable);
+void comedi_8254_update_divisors(struct comedi_8254 *i8254);
+void comedi_8254_cascade_ns_to_timer(struct comedi_8254 *i8254,
+ unsigned int *nanosec, unsigned int flags);
+void comedi_8254_ns_to_timer(struct comedi_8254 *i8254,
+ unsigned int *nanosec, unsigned int flags);
+
+void comedi_8254_set_busy(struct comedi_8254 *i8254,
+ unsigned int counter, bool busy);
+
+void comedi_8254_subdevice_init(struct comedi_subdevice *s,
+ struct comedi_8254 *i8254);
+
+struct comedi_8254 *comedi_8254_init(unsigned long iobase,
+ unsigned int osc_base,
+ unsigned int iosize,
+ unsigned int regshift);
+struct comedi_8254 *comedi_8254_mm_init(void __iomem *mmio,
+ unsigned int osc_base,
+ unsigned int iosize,
+ unsigned int regshift);
+
+#endif /* _COMEDI_8254_H */
diff --git a/include/linux/comedi/comedi_8255.h b/include/linux/comedi/comedi_8255.h
new file mode 100644
index 000000000000..b2a5bc6b3a49
--- /dev/null
+++ b/include/linux/comedi/comedi_8255.h
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * comedi_8255.h
+ * Generic 8255 digital I/O subdevice support
+ *
+ * COMEDI - Linux Control and Measurement Device Interface
+ * Copyright (C) 1998 David A. Schleef <ds@schleef.org>
+ */
+
+#ifndef _COMEDI_8255_H
+#define _COMEDI_8255_H
+
+#define I8255_SIZE 0x04
+
+#define I8255_DATA_A_REG 0x00
+#define I8255_DATA_B_REG 0x01
+#define I8255_DATA_C_REG 0x02
+#define I8255_CTRL_REG 0x03
+#define I8255_CTRL_C_LO_IO BIT(0)
+#define I8255_CTRL_B_IO BIT(1)
+#define I8255_CTRL_B_MODE BIT(2)
+#define I8255_CTRL_C_HI_IO BIT(3)
+#define I8255_CTRL_A_IO BIT(4)
+#define I8255_CTRL_A_MODE(x) ((x) << 5)
+#define I8255_CTRL_CW BIT(7)
+
+struct comedi_device;
+struct comedi_subdevice;
+
+int subdev_8255_init(struct comedi_device *dev, struct comedi_subdevice *s,
+ int (*io)(struct comedi_device *dev, int dir, int port,
+ int data, unsigned long regbase),
+ unsigned long regbase);
+
+int subdev_8255_mm_init(struct comedi_device *dev, struct comedi_subdevice *s,
+ int (*io)(struct comedi_device *dev, int dir, int port,
+ int data, unsigned long regbase),
+ unsigned long regbase);
+
+unsigned long subdev_8255_regbase(struct comedi_subdevice *s);
+
+#endif
diff --git a/include/linux/comedi/comedi_isadma.h b/include/linux/comedi/comedi_isadma.h
new file mode 100644
index 000000000000..9d2b12db7e6e
--- /dev/null
+++ b/include/linux/comedi/comedi_isadma.h
@@ -0,0 +1,114 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * COMEDI ISA DMA support functions
+ * Copyright (c) 2014 H Hartley Sweeten <hsweeten@visionengravers.com>
+ */
+
+#ifndef _COMEDI_ISADMA_H
+#define _COMEDI_ISADMA_H
+
+#include <linux/types.h>
+
+struct comedi_device;
+struct device;
+
+/*
+ * These are used to avoid issues when <asm/dma.h> and the DMA_MODE_
+ * defines are not available.
+ */
+#define COMEDI_ISADMA_READ 0
+#define COMEDI_ISADMA_WRITE 1
+
+/**
+ * struct comedi_isadma_desc - cookie for ISA DMA
+ * @virt_addr: virtual address of buffer
+ * @hw_addr: hardware (bus) address of buffer
+ * @chan: DMA channel
+ * @maxsize: allocated size of buffer (in bytes)
+ * @size: transfer size (in bytes)
+ * @mode: DMA_MODE_READ or DMA_MODE_WRITE
+ */
+struct comedi_isadma_desc {
+ void *virt_addr;
+ dma_addr_t hw_addr;
+ unsigned int chan;
+ unsigned int maxsize;
+ unsigned int size;
+ char mode;
+};
+
+/**
+ * struct comedi_isadma - ISA DMA data
+ * @dev: device to allocate non-coherent memory for
+ * @desc: cookie for each DMA buffer
+ * @n_desc: the number of cookies
+ * @cur_dma: the current cookie in use
+ * @chan: the first DMA channel requested
+ * @chan2: the second DMA channel requested
+ */
+struct comedi_isadma {
+ struct device *dev;
+ struct comedi_isadma_desc *desc;
+ int n_desc;
+ int cur_dma;
+ unsigned int chan;
+ unsigned int chan2;
+};
+
+#if IS_ENABLED(CONFIG_ISA_DMA_API)
+
+void comedi_isadma_program(struct comedi_isadma_desc *desc);
+unsigned int comedi_isadma_disable(unsigned int dma_chan);
+unsigned int comedi_isadma_disable_on_sample(unsigned int dma_chan,
+ unsigned int size);
+unsigned int comedi_isadma_poll(struct comedi_isadma *dma);
+void comedi_isadma_set_mode(struct comedi_isadma_desc *desc, char dma_dir);
+
+struct comedi_isadma *comedi_isadma_alloc(struct comedi_device *dev,
+ int n_desc, unsigned int dma_chan1,
+ unsigned int dma_chan2,
+ unsigned int maxsize, char dma_dir);
+void comedi_isadma_free(struct comedi_isadma *dma);
+
+#else /* !IS_ENABLED(CONFIG_ISA_DMA_API) */
+
+static inline void comedi_isadma_program(struct comedi_isadma_desc *desc)
+{
+}
+
+static inline unsigned int comedi_isadma_disable(unsigned int dma_chan)
+{
+ return 0;
+}
+
+static inline unsigned int
+comedi_isadma_disable_on_sample(unsigned int dma_chan, unsigned int size)
+{
+ return 0;
+}
+
+static inline unsigned int comedi_isadma_poll(struct comedi_isadma *dma)
+{
+ return 0;
+}
+
+static inline void comedi_isadma_set_mode(struct comedi_isadma_desc *desc,
+ char dma_dir)
+{
+}
+
+static inline struct comedi_isadma *
+comedi_isadma_alloc(struct comedi_device *dev, int n_desc,
+ unsigned int dma_chan1, unsigned int dma_chan2,
+ unsigned int maxsize, char dma_dir)
+{
+ return NULL;
+}
+
+static inline void comedi_isadma_free(struct comedi_isadma *dma)
+{
+}
+
+#endif /* !IS_ENABLED(CONFIG_ISA_DMA_API) */
+
+#endif /* #ifndef _COMEDI_ISADMA_H */
diff --git a/include/linux/comedi/comedi_pci.h b/include/linux/comedi/comedi_pci.h
new file mode 100644
index 000000000000..2fb50663e3ed
--- /dev/null
+++ b/include/linux/comedi/comedi_pci.h
@@ -0,0 +1,56 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * comedi_pci.h
+ * header file for Comedi PCI drivers
+ *
+ * COMEDI - Linux Control and Measurement Device Interface
+ * Copyright (C) 1997-2000 David A. Schleef <ds@schleef.org>
+ */
+
+#ifndef _COMEDI_PCI_H
+#define _COMEDI_PCI_H
+
+#include <linux/pci.h>
+#include <linux/comedi/comedidev.h>
+
+/*
+ * PCI Vendor IDs not in <linux/pci_ids.h>
+ */
+#define PCI_VENDOR_ID_KOLTER 0x1001
+#define PCI_VENDOR_ID_ICP 0x104c
+#define PCI_VENDOR_ID_DT 0x1116
+#define PCI_VENDOR_ID_IOTECH 0x1616
+#define PCI_VENDOR_ID_CONTEC 0x1221
+#define PCI_VENDOR_ID_RTD 0x1435
+#define PCI_VENDOR_ID_HUMUSOFT 0x186c
+
+struct pci_dev *comedi_to_pci_dev(struct comedi_device *dev);
+
+int comedi_pci_enable(struct comedi_device *dev);
+void comedi_pci_disable(struct comedi_device *dev);
+void comedi_pci_detach(struct comedi_device *dev);
+
+int comedi_pci_auto_config(struct pci_dev *pcidev, struct comedi_driver *driver,
+ unsigned long context);
+void comedi_pci_auto_unconfig(struct pci_dev *pcidev);
+
+int comedi_pci_driver_register(struct comedi_driver *comedi_driver,
+ struct pci_driver *pci_driver);
+void comedi_pci_driver_unregister(struct comedi_driver *comedi_driver,
+ struct pci_driver *pci_driver);
+
+/**
+ * module_comedi_pci_driver() - Helper macro for registering a comedi PCI driver
+ * @__comedi_driver: comedi_driver struct
+ * @__pci_driver: pci_driver struct
+ *
+ * Helper macro for comedi PCI drivers which do not do anything special
+ * in module init/exit. This eliminates a lot of boilerplate. Each
+ * module may only use this macro once, and calling it replaces
+ * module_init() and module_exit()
+ */
+#define module_comedi_pci_driver(__comedi_driver, __pci_driver) \
+ module_driver(__comedi_driver, comedi_pci_driver_register, \
+ comedi_pci_driver_unregister, &(__pci_driver))
+
+#endif /* _COMEDI_PCI_H */
diff --git a/include/linux/comedi/comedi_pcmcia.h b/include/linux/comedi/comedi_pcmcia.h
new file mode 100644
index 000000000000..a33dfb65b869
--- /dev/null
+++ b/include/linux/comedi/comedi_pcmcia.h
@@ -0,0 +1,48 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * comedi_pcmcia.h
+ * header file for Comedi PCMCIA drivers
+ *
+ * COMEDI - Linux Control and Measurement Device Interface
+ * Copyright (C) 1997-2000 David A. Schleef <ds@schleef.org>
+ */
+
+#ifndef _COMEDI_PCMCIA_H
+#define _COMEDI_PCMCIA_H
+
+#include <pcmcia/cistpl.h>
+#include <pcmcia/ds.h>
+#include <linux/comedi/comedidev.h>
+
+struct pcmcia_device *comedi_to_pcmcia_dev(struct comedi_device *dev);
+
+int comedi_pcmcia_enable(struct comedi_device *dev,
+ int (*conf_check)(struct pcmcia_device *p_dev,
+ void *priv_data));
+void comedi_pcmcia_disable(struct comedi_device *dev);
+
+int comedi_pcmcia_auto_config(struct pcmcia_device *link,
+ struct comedi_driver *driver);
+void comedi_pcmcia_auto_unconfig(struct pcmcia_device *link);
+
+int comedi_pcmcia_driver_register(struct comedi_driver *comedi_driver,
+ struct pcmcia_driver *pcmcia_driver);
+void comedi_pcmcia_driver_unregister(struct comedi_driver *comedi_driver,
+ struct pcmcia_driver *pcmcia_driver);
+
+/**
+ * module_comedi_pcmcia_driver() - Helper macro for registering a comedi
+ * PCMCIA driver
+ * @__comedi_driver: comedi_driver struct
+ * @__pcmcia_driver: pcmcia_driver struct
+ *
+ * Helper macro for comedi PCMCIA drivers which do not do anything special
+ * in module init/exit. This eliminates a lot of boilerplate. Each
+ * module may only use this macro once, and calling it replaces
+ * module_init() and module_exit()
+ */
+#define module_comedi_pcmcia_driver(__comedi_driver, __pcmcia_driver) \
+ module_driver(__comedi_driver, comedi_pcmcia_driver_register, \
+ comedi_pcmcia_driver_unregister, &(__pcmcia_driver))
+
+#endif /* _COMEDI_PCMCIA_H */
diff --git a/include/linux/comedi/comedi_usb.h b/include/linux/comedi/comedi_usb.h
new file mode 100644
index 000000000000..5d17dd425bd2
--- /dev/null
+++ b/include/linux/comedi/comedi_usb.h
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/* comedi_usb.h
+ * header file for USB Comedi drivers
+ *
+ * COMEDI - Linux Control and Measurement Device Interface
+ * Copyright (C) 1997-2000 David A. Schleef <ds@schleef.org>
+ */
+
+#ifndef _COMEDI_USB_H
+#define _COMEDI_USB_H
+
+#include <linux/usb.h>
+#include <linux/comedi/comedidev.h>
+
+struct usb_interface *comedi_to_usb_interface(struct comedi_device *dev);
+struct usb_device *comedi_to_usb_dev(struct comedi_device *dev);
+
+int comedi_usb_auto_config(struct usb_interface *intf,
+ struct comedi_driver *driver, unsigned long context);
+void comedi_usb_auto_unconfig(struct usb_interface *intf);
+
+int comedi_usb_driver_register(struct comedi_driver *comedi_driver,
+ struct usb_driver *usb_driver);
+void comedi_usb_driver_unregister(struct comedi_driver *comedi_driver,
+ struct usb_driver *usb_driver);
+
+/**
+ * module_comedi_usb_driver() - Helper macro for registering a comedi USB driver
+ * @__comedi_driver: comedi_driver struct
+ * @__usb_driver: usb_driver struct
+ *
+ * Helper macro for comedi USB drivers which do not do anything special
+ * in module init/exit. This eliminates a lot of boilerplate. Each
+ * module may only use this macro once, and calling it replaces
+ * module_init() and module_exit()
+ */
+#define module_comedi_usb_driver(__comedi_driver, __usb_driver) \
+ module_driver(__comedi_driver, comedi_usb_driver_register, \
+ comedi_usb_driver_unregister, &(__usb_driver))
+
+#endif /* _COMEDI_USB_H */
diff --git a/include/linux/comedi/comedidev.h b/include/linux/comedi/comedidev.h
new file mode 100644
index 000000000000..0a1150900ef3
--- /dev/null
+++ b/include/linux/comedi/comedidev.h
@@ -0,0 +1,1053 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * comedidev.h
+ * header file for kernel-only structures, variables, and constants
+ *
+ * COMEDI - Linux Control and Measurement Device Interface
+ * Copyright (C) 1997-2000 David A. Schleef <ds@schleef.org>
+ */
+
+#ifndef _COMEDIDEV_H
+#define _COMEDIDEV_H
+
+#include <linux/dma-mapping.h>
+#include <linux/mutex.h>
+#include <linux/spinlock_types.h>
+#include <linux/rwsem.h>
+#include <linux/kref.h>
+#include <linux/comedi.h>
+
+#define COMEDI_VERSION(a, b, c) (((a) << 16) + ((b) << 8) + (c))
+#define COMEDI_VERSION_CODE COMEDI_VERSION(COMEDI_MAJORVERSION, \
+ COMEDI_MINORVERSION, COMEDI_MICROVERSION)
+#define COMEDI_RELEASE VERSION
+
+#define COMEDI_NUM_BOARD_MINORS 0x30
+
+/**
+ * struct comedi_subdevice - Working data for a COMEDI subdevice
+ * @device: COMEDI device to which this subdevice belongs. (Initialized by
+ * comedi_alloc_subdevices().)
+ * @index: Index of this subdevice within device's array of subdevices.
+ * (Initialized by comedi_alloc_subdevices().)
+ * @type: Type of subdevice from &enum comedi_subdevice_type. (Initialized by
+ * the low-level driver.)
+ * @n_chan: Number of channels the subdevice supports. (Initialized by the
+ * low-level driver.)
+ * @subdev_flags: Various "SDF" flags indicating aspects of the subdevice to
+ * the COMEDI core and user application. (Initialized by the low-level
+ * driver.)
+ * @len_chanlist: Maximum length of a channel list if the subdevice supports
+ * asynchronous acquisition commands. (Optionally initialized by the
+ * low-level driver, or changed from 0 to 1 during post-configuration.)
+ * @private: Private data pointer which is either set by the low-level driver
+ * itself, or by a call to comedi_alloc_spriv() which allocates storage.
+ * In the latter case, the storage is automatically freed after the
+ * low-level driver's "detach" handler is called for the device.
+ * (Initialized by the low-level driver.)
+ * @async: Pointer to &struct comedi_async id the subdevice supports
+ * asynchronous acquisition commands. (Allocated and initialized during
+ * post-configuration if needed.)
+ * @lock: Pointer to a file object that performed a %COMEDI_LOCK ioctl on the
+ * subdevice. (Initially NULL.)
+ * @busy: Pointer to a file object that is performing an asynchronous
+ * acquisition command on the subdevice. (Initially NULL.)
+ * @runflags: Internal flags for use by COMEDI core, mostly indicating whether
+ * an asynchronous acquisition command is running.
+ * @spin_lock: Generic spin-lock for use by the COMEDI core and the low-level
+ * driver. (Initialized by comedi_alloc_subdevices().)
+ * @io_bits: Bit-mask indicating the channel directions for a DIO subdevice
+ * with no more than 32 channels. A '1' at a bit position indicates the
+ * corresponding channel is configured as an output. (Initialized by the
+ * low-level driver for a DIO subdevice. Forced to all-outputs during
+ * post-configuration for a digital output subdevice.)
+ * @maxdata: If non-zero, this is the maximum raw data value of each channel.
+ * If zero, the maximum data value is channel-specific. (Initialized by
+ * the low-level driver.)
+ * @maxdata_list: If the maximum data value is channel-specific, this points
+ * to an array of maximum data values indexed by channel index.
+ * (Initialized by the low-level driver.)
+ * @range_table: If non-NULL, this points to a COMEDI range table for the
+ * subdevice. If NULL, the range table is channel-specific. (Initialized
+ * by the low-level driver, will be set to an "invalid" range table during
+ * post-configuration if @range_table and @range_table_list are both
+ * NULL.)
+ * @range_table_list: If the COMEDI range table is channel-specific, this
+ * points to an array of pointers to COMEDI range tables indexed by
+ * channel number. (Initialized by the low-level driver.)
+ * @chanlist: Not used.
+ * @insn_read: Optional pointer to a handler for the %INSN_READ instruction.
+ * (Initialized by the low-level driver, or set to a default handler
+ * during post-configuration.)
+ * @insn_write: Optional pointer to a handler for the %INSN_WRITE instruction.
+ * (Initialized by the low-level driver, or set to a default handler
+ * during post-configuration.)
+ * @insn_bits: Optional pointer to a handler for the %INSN_BITS instruction
+ * for a digital input, digital output or digital input/output subdevice.
+ * (Initialized by the low-level driver, or set to a default handler
+ * during post-configuration.)
+ * @insn_config: Optional pointer to a handler for the %INSN_CONFIG
+ * instruction. (Initialized by the low-level driver, or set to a default
+ * handler during post-configuration.)
+ * @do_cmd: If the subdevice supports asynchronous acquisition commands, this
+ * points to a handler to set it up in hardware. (Initialized by the
+ * low-level driver.)
+ * @do_cmdtest: If the subdevice supports asynchronous acquisition commands,
+ * this points to a handler used to check and possibly tweak a prospective
+ * acquisition command without setting it up in hardware. (Initialized by
+ * the low-level driver.)
+ * @poll: If the subdevice supports asynchronous acquisition commands, this
+ * is an optional pointer to a handler for the %COMEDI_POLL ioctl which
+ * instructs the low-level driver to synchronize buffers. (Initialized by
+ * the low-level driver if needed.)
+ * @cancel: If the subdevice supports asynchronous acquisition commands, this
+ * points to a handler used to terminate a running command. (Initialized
+ * by the low-level driver.)
+ * @buf_change: If the subdevice supports asynchronous acquisition commands,
+ * this is an optional pointer to a handler that is called when the data
+ * buffer for handling asynchronous commands is allocated or reallocated.
+ * (Initialized by the low-level driver if needed.)
+ * @munge: If the subdevice supports asynchronous acquisition commands and
+ * uses DMA to transfer data from the hardware to the acquisition buffer,
+ * this points to a function used to "munge" the data values from the
+ * hardware into the format expected by COMEDI. (Initialized by the
+ * low-level driver if needed.)
+ * @async_dma_dir: If the subdevice supports asynchronous acquisition commands
+ * and uses DMA to transfer data from the hardware to the acquisition
+ * buffer, this sets the DMA direction for the buffer. (initialized to
+ * %DMA_NONE by comedi_alloc_subdevices() and changed by the low-level
+ * driver if necessary.)
+ * @state: Handy bit-mask indicating the output states for a DIO or digital
+ * output subdevice with no more than 32 channels. (Initialized by the
+ * low-level driver.)
+ * @class_dev: If the subdevice supports asynchronous acquisition commands,
+ * this points to a sysfs comediX_subdY device where X is the minor device
+ * number of the COMEDI device and Y is the subdevice number. The minor
+ * device number for the sysfs device is allocated dynamically in the
+ * range 48 to 255. This is used to allow the COMEDI device to be opened
+ * with a different default read or write subdevice. (Allocated during
+ * post-configuration if needed.)
+ * @minor: If @class_dev is set, this is its dynamically allocated minor
+ * device number. (Set during post-configuration if necessary.)
+ * @readback: Optional pointer to memory allocated by
+ * comedi_alloc_subdev_readback() used to hold the values written to
+ * analog output channels so they can be read back. The storage is
+ * automatically freed after the low-level driver's "detach" handler is
+ * called for the device. (Initialized by the low-level driver.)
+ *
+ * This is the main control structure for a COMEDI subdevice. If the subdevice
+ * supports asynchronous acquisition commands, additional information is stored
+ * in the &struct comedi_async pointed to by @async.
+ *
+ * Most of the subdevice is initialized by the low-level driver's "attach" or
+ * "auto_attach" handlers but parts of it are initialized by
+ * comedi_alloc_subdevices(), and other parts are initialized during
+ * post-configuration on return from that handler.
+ *
+ * A low-level driver that sets @insn_bits for a digital input, digital output,
+ * or DIO subdevice may leave @insn_read and @insn_write uninitialized, in
+ * which case they will be set to a default handler during post-configuration
+ * that uses @insn_bits to emulate the %INSN_READ and %INSN_WRITE instructions.
+ */
+struct comedi_subdevice {
+ struct comedi_device *device;
+ int index;
+ int type;
+ int n_chan;
+ int subdev_flags;
+ int len_chanlist; /* maximum length of channel/gain list */
+
+ void *private;
+
+ struct comedi_async *async;
+
+ void *lock;
+ void *busy;
+ unsigned int runflags;
+ spinlock_t spin_lock; /* generic spin-lock for COMEDI and drivers */
+
+ unsigned int io_bits;
+
+ unsigned int maxdata; /* if maxdata==0, use list */
+ const unsigned int *maxdata_list; /* list is channel specific */
+
+ const struct comedi_lrange *range_table;
+ const struct comedi_lrange *const *range_table_list;
+
+ unsigned int *chanlist; /* driver-owned chanlist (not used) */
+
+ int (*insn_read)(struct comedi_device *dev, struct comedi_subdevice *s,
+ struct comedi_insn *insn, unsigned int *data);
+ int (*insn_write)(struct comedi_device *dev, struct comedi_subdevice *s,
+ struct comedi_insn *insn, unsigned int *data);
+ int (*insn_bits)(struct comedi_device *dev, struct comedi_subdevice *s,
+ struct comedi_insn *insn, unsigned int *data);
+ int (*insn_config)(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data);
+
+ int (*do_cmd)(struct comedi_device *dev, struct comedi_subdevice *s);
+ int (*do_cmdtest)(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_cmd *cmd);
+ int (*poll)(struct comedi_device *dev, struct comedi_subdevice *s);
+ int (*cancel)(struct comedi_device *dev, struct comedi_subdevice *s);
+
+ /* called when the buffer changes */
+ int (*buf_change)(struct comedi_device *dev,
+ struct comedi_subdevice *s);
+
+ void (*munge)(struct comedi_device *dev, struct comedi_subdevice *s,
+ void *data, unsigned int num_bytes,
+ unsigned int start_chan_index);
+ enum dma_data_direction async_dma_dir;
+
+ unsigned int state;
+
+ struct device *class_dev;
+ int minor;
+
+ unsigned int *readback;
+};
+
+/**
+ * struct comedi_buf_page - Describe a page of a COMEDI buffer
+ * @virt_addr: Kernel address of page.
+ * @dma_addr: DMA address of page if in DMA coherent memory.
+ */
+struct comedi_buf_page {
+ void *virt_addr;
+ dma_addr_t dma_addr;
+};
+
+/**
+ * struct comedi_buf_map - Describe pages in a COMEDI buffer
+ * @dma_hw_dev: Low-level hardware &struct device pointer copied from the
+ * COMEDI device's hw_dev member.
+ * @page_list: Pointer to array of &struct comedi_buf_page, one for each
+ * page in the buffer.
+ * @n_pages: Number of pages in the buffer.
+ * @dma_dir: DMA direction used to allocate pages of DMA coherent memory,
+ * or %DMA_NONE if pages allocated from regular memory.
+ * @refcount: &struct kref reference counter used to free the buffer.
+ *
+ * A COMEDI data buffer is allocated as individual pages, either in
+ * conventional memory or DMA coherent memory, depending on the attached,
+ * low-level hardware device. (The buffer pages also get mapped into the
+ * kernel's contiguous virtual address space pointed to by the 'prealloc_buf'
+ * member of &struct comedi_async.)
+ *
+ * The buffer is normally freed when the COMEDI device is detached from the
+ * low-level driver (which may happen due to device removal), but if it happens
+ * to be mmapped at the time, the pages cannot be freed until the buffer has
+ * been munmapped. That is what the reference counter is for. (The virtual
+ * address space pointed by 'prealloc_buf' is freed when the COMEDI device is
+ * detached.)
+ */
+struct comedi_buf_map {
+ struct device *dma_hw_dev;
+ struct comedi_buf_page *page_list;
+ unsigned int n_pages;
+ enum dma_data_direction dma_dir;
+ struct kref refcount;
+};
+
+/**
+ * struct comedi_async - Control data for asynchronous COMEDI commands
+ * @prealloc_buf: Kernel virtual address of allocated acquisition buffer.
+ * @prealloc_bufsz: Buffer size (in bytes).
+ * @buf_map: Map of buffer pages.
+ * @max_bufsize: Maximum allowed buffer size (in bytes).
+ * @buf_write_count: "Write completed" count (in bytes, modulo 2**32).
+ * @buf_write_alloc_count: "Allocated for writing" count (in bytes,
+ * modulo 2**32).
+ * @buf_read_count: "Read completed" count (in bytes, modulo 2**32).
+ * @buf_read_alloc_count: "Allocated for reading" count (in bytes,
+ * modulo 2**32).
+ * @buf_write_ptr: Buffer position for writer.
+ * @buf_read_ptr: Buffer position for reader.
+ * @cur_chan: Current position in chanlist for scan (for those drivers that
+ * use it).
+ * @scans_done: The number of scans completed.
+ * @scan_progress: Amount received or sent for current scan (in bytes).
+ * @munge_chan: Current position in chanlist for "munging".
+ * @munge_count: "Munge" count (in bytes, modulo 2**32).
+ * @munge_ptr: Buffer position for "munging".
+ * @events: Bit-vector of events that have occurred.
+ * @cmd: Details of comedi command in progress.
+ * @wait_head: Task wait queue for file reader or writer.
+ * @cb_mask: Bit-vector of events that should wake waiting tasks.
+ * @inttrig: Software trigger function for command, or NULL.
+ *
+ * Note about the ..._count and ..._ptr members:
+ *
+ * Think of the _Count values being integers of unlimited size, indexing
+ * into a buffer of infinite length (though only an advancing portion
+ * of the buffer of fixed length prealloc_bufsz is accessible at any
+ * time). Then:
+ *
+ * Buf_Read_Count <= Buf_Read_Alloc_Count <= Munge_Count <=
+ * Buf_Write_Count <= Buf_Write_Alloc_Count <=
+ * (Buf_Read_Count + prealloc_bufsz)
+ *
+ * (Those aren't the actual members, apart from prealloc_bufsz.) When the
+ * buffer is reset, those _Count values start at 0 and only increase in value,
+ * maintaining the above inequalities until the next time the buffer is
+ * reset. The buffer is divided into the following regions by the inequalities:
+ *
+ * [0, Buf_Read_Count):
+ * old region no longer accessible
+ *
+ * [Buf_Read_Count, Buf_Read_Alloc_Count):
+ * filled and munged region allocated for reading but not yet read
+ *
+ * [Buf_Read_Alloc_Count, Munge_Count):
+ * filled and munged region not yet allocated for reading
+ *
+ * [Munge_Count, Buf_Write_Count):
+ * filled region not yet munged
+ *
+ * [Buf_Write_Count, Buf_Write_Alloc_Count):
+ * unfilled region allocated for writing but not yet written
+ *
+ * [Buf_Write_Alloc_Count, Buf_Read_Count + prealloc_bufsz):
+ * unfilled region not yet allocated for writing
+ *
+ * [Buf_Read_Count + prealloc_bufsz, infinity):
+ * unfilled region not yet accessible
+ *
+ * Data needs to be written into the buffer before it can be read out,
+ * and may need to be converted (or "munged") between the two
+ * operations. Extra unfilled buffer space may need to allocated for
+ * writing (advancing Buf_Write_Alloc_Count) before new data is written.
+ * After writing new data, the newly filled space needs to be released
+ * (advancing Buf_Write_Count). This also results in the new data being
+ * "munged" (advancing Munge_Count). Before data is read out of the
+ * buffer, extra space may need to be allocated for reading (advancing
+ * Buf_Read_Alloc_Count). After the data has been read out, the space
+ * needs to be released (advancing Buf_Read_Count).
+ *
+ * The actual members, buf_read_count, buf_read_alloc_count,
+ * munge_count, buf_write_count, and buf_write_alloc_count take the
+ * value of the corresponding capitalized _Count values modulo 2^32
+ * (UINT_MAX+1). Subtracting a "higher" _count value from a "lower"
+ * _count value gives the same answer as subtracting a "higher" _Count
+ * value from a lower _Count value because prealloc_bufsz < UINT_MAX+1.
+ * The modulo operation is done implicitly.
+ *
+ * The buf_read_ptr, munge_ptr, and buf_write_ptr members take the value
+ * of the corresponding capitalized _Count values modulo prealloc_bufsz.
+ * These correspond to byte indices in the physical buffer. The modulo
+ * operation is done by subtracting prealloc_bufsz when the value
+ * exceeds prealloc_bufsz (assuming prealloc_bufsz plus the increment is
+ * less than or equal to UINT_MAX).
+ */
+struct comedi_async {
+ void *prealloc_buf;
+ unsigned int prealloc_bufsz;
+ struct comedi_buf_map *buf_map;
+ unsigned int max_bufsize;
+ unsigned int buf_write_count;
+ unsigned int buf_write_alloc_count;
+ unsigned int buf_read_count;
+ unsigned int buf_read_alloc_count;
+ unsigned int buf_write_ptr;
+ unsigned int buf_read_ptr;
+ unsigned int cur_chan;
+ unsigned int scans_done;
+ unsigned int scan_progress;
+ unsigned int munge_chan;
+ unsigned int munge_count;
+ unsigned int munge_ptr;
+ unsigned int events;
+ struct comedi_cmd cmd;
+ wait_queue_head_t wait_head;
+ unsigned int cb_mask;
+ int (*inttrig)(struct comedi_device *dev, struct comedi_subdevice *s,
+ unsigned int x);
+};
+
+/**
+ * enum comedi_cb - &struct comedi_async callback "events"
+ * @COMEDI_CB_EOS: end-of-scan
+ * @COMEDI_CB_EOA: end-of-acquisition/output
+ * @COMEDI_CB_BLOCK: data has arrived, wakes up read() / write()
+ * @COMEDI_CB_EOBUF: DEPRECATED: end of buffer
+ * @COMEDI_CB_ERROR: card error during acquisition
+ * @COMEDI_CB_OVERFLOW: buffer overflow/underflow
+ * @COMEDI_CB_ERROR_MASK: events that indicate an error has occurred
+ * @COMEDI_CB_CANCEL_MASK: events that will cancel an async command
+ */
+enum comedi_cb {
+ COMEDI_CB_EOS = BIT(0),
+ COMEDI_CB_EOA = BIT(1),
+ COMEDI_CB_BLOCK = BIT(2),
+ COMEDI_CB_EOBUF = BIT(3),
+ COMEDI_CB_ERROR = BIT(4),
+ COMEDI_CB_OVERFLOW = BIT(5),
+ /* masks */
+ COMEDI_CB_ERROR_MASK = (COMEDI_CB_ERROR | COMEDI_CB_OVERFLOW),
+ COMEDI_CB_CANCEL_MASK = (COMEDI_CB_EOA | COMEDI_CB_ERROR_MASK)
+};
+
+/**
+ * struct comedi_driver - COMEDI driver registration
+ * @driver_name: Name of driver.
+ * @module: Owning module.
+ * @attach: The optional "attach" handler for manually configured COMEDI
+ * devices.
+ * @detach: The "detach" handler for deconfiguring COMEDI devices.
+ * @auto_attach: The optional "auto_attach" handler for automatically
+ * configured COMEDI devices.
+ * @num_names: Optional number of "board names" supported.
+ * @board_name: Optional pointer to a pointer to a board name. The pointer
+ * to a board name is embedded in an element of a driver-defined array
+ * of static, read-only board type information.
+ * @offset: Optional size of each element of the driver-defined array of
+ * static, read-only board type information, i.e. the offset between each
+ * pointer to a board name.
+ *
+ * This is used with comedi_driver_register() and comedi_driver_unregister() to
+ * register and unregister a low-level COMEDI driver with the COMEDI core.
+ *
+ * If @num_names is non-zero, @board_name should be non-NULL, and @offset
+ * should be at least sizeof(*board_name). These are used by the handler for
+ * the %COMEDI_DEVCONFIG ioctl to match a hardware device and its driver by
+ * board name. If @num_names is zero, the %COMEDI_DEVCONFIG ioctl matches a
+ * hardware device and its driver by driver name. This is only useful if the
+ * @attach handler is set. If @num_names is non-zero, the driver's @attach
+ * handler will be called with the COMEDI device structure's board_ptr member
+ * pointing to the matched pointer to a board name within the driver's private
+ * array of static, read-only board type information.
+ *
+ * The @detach handler has two roles. If a COMEDI device was successfully
+ * configured by the @attach or @auto_attach handler, it is called when the
+ * device is being deconfigured (by the %COMEDI_DEVCONFIG ioctl, or due to
+ * unloading of the driver, or due to device removal). It is also called when
+ * the @attach or @auto_attach handler returns an error. Therefore, the
+ * @attach or @auto_attach handlers can defer clean-up on error until the
+ * @detach handler is called. If the @attach or @auto_attach handlers free
+ * any resources themselves, they must prevent the @detach handler from
+ * freeing the same resources. The @detach handler must not assume that all
+ * resources requested by the @attach or @auto_attach handler were
+ * successfully allocated.
+ */
+struct comedi_driver {
+ /* private: */
+ struct comedi_driver *next; /* Next in list of COMEDI drivers. */
+ /* public: */
+ const char *driver_name;
+ struct module *module;
+ int (*attach)(struct comedi_device *dev, struct comedi_devconfig *it);
+ void (*detach)(struct comedi_device *dev);
+ int (*auto_attach)(struct comedi_device *dev, unsigned long context);
+ unsigned int num_names;
+ const char *const *board_name;
+ int offset;
+};
+
+/**
+ * struct comedi_device - Working data for a COMEDI device
+ * @use_count: Number of open file objects.
+ * @driver: Low-level COMEDI driver attached to this COMEDI device.
+ * @pacer: Optional pointer to a dynamically allocated acquisition pacer
+ * control. It is freed automatically after the COMEDI device is
+ * detached from the low-level driver.
+ * @private: Optional pointer to private data allocated by the low-level
+ * driver. It is freed automatically after the COMEDI device is
+ * detached from the low-level driver.
+ * @class_dev: Sysfs comediX device.
+ * @minor: Minor device number of COMEDI char device (0-47).
+ * @detach_count: Counter incremented every time the COMEDI device is detached.
+ * Used for checking a previous attachment is still valid.
+ * @hw_dev: Optional pointer to the low-level hardware &struct device. It is
+ * required for automatically configured COMEDI devices and optional for
+ * COMEDI devices configured by the %COMEDI_DEVCONFIG ioctl, although
+ * the bus-specific COMEDI functions only work if it is set correctly.
+ * It is also passed to dma_alloc_coherent() for COMEDI subdevices that
+ * have their 'async_dma_dir' member set to something other than
+ * %DMA_NONE.
+ * @board_name: Pointer to a COMEDI board name or a COMEDI driver name. When
+ * the low-level driver's "attach" handler is called by the handler for
+ * the %COMEDI_DEVCONFIG ioctl, it either points to a matched board name
+ * string if the 'num_names' member of the &struct comedi_driver is
+ * non-zero, otherwise it points to the low-level driver name string.
+ * When the low-lever driver's "auto_attach" handler is called for an
+ * automatically configured COMEDI device, it points to the low-level
+ * driver name string. The low-level driver is free to change it in its
+ * "attach" or "auto_attach" handler if it wishes.
+ * @board_ptr: Optional pointer to private, read-only board type information in
+ * the low-level driver. If the 'num_names' member of the &struct
+ * comedi_driver is non-zero, the handler for the %COMEDI_DEVCONFIG ioctl
+ * will point it to a pointer to a matched board name string within the
+ * driver's private array of static, read-only board type information when
+ * calling the driver's "attach" handler. The low-level driver is free to
+ * change it.
+ * @attached: Flag indicating that the COMEDI device is attached to a low-level
+ * driver.
+ * @ioenabled: Flag used to indicate that a PCI device has been enabled and
+ * its regions requested.
+ * @spinlock: Generic spin-lock for use by the low-level driver.
+ * @mutex: Generic mutex for use by the COMEDI core module.
+ * @attach_lock: &struct rw_semaphore used to guard against the COMEDI device
+ * being detached while an operation is in progress. The down_write()
+ * operation is only allowed while @mutex is held and is used when
+ * changing @attached and @detach_count and calling the low-level driver's
+ * "detach" handler. The down_read() operation is generally used without
+ * holding @mutex.
+ * @refcount: &struct kref reference counter for freeing COMEDI device.
+ * @n_subdevices: Number of COMEDI subdevices allocated by the low-level
+ * driver for this device.
+ * @subdevices: Dynamically allocated array of COMEDI subdevices.
+ * @mmio: Optional pointer to a remapped MMIO region set by the low-level
+ * driver.
+ * @iobase: Optional base of an I/O port region requested by the low-level
+ * driver.
+ * @iolen: Length of I/O port region requested at @iobase.
+ * @irq: Optional IRQ number requested by the low-level driver.
+ * @read_subdev: Optional pointer to a default COMEDI subdevice operated on by
+ * the read() file operation. Set by the low-level driver.
+ * @write_subdev: Optional pointer to a default COMEDI subdevice operated on by
+ * the write() file operation. Set by the low-level driver.
+ * @async_queue: Storage for fasync_helper().
+ * @open: Optional pointer to a function set by the low-level driver to be
+ * called when @use_count changes from 0 to 1.
+ * @close: Optional pointer to a function set by the low-level driver to be
+ * called when @use_count changed from 1 to 0.
+ * @insn_device_config: Optional pointer to a handler for all sub-instructions
+ * except %INSN_DEVICE_CONFIG_GET_ROUTES of the %INSN_DEVICE_CONFIG
+ * instruction. If this is not initialized by the low-level driver, a
+ * default handler will be set during post-configuration.
+ * @get_valid_routes: Optional pointer to a handler for the
+ * %INSN_DEVICE_CONFIG_GET_ROUTES sub-instruction of the
+ * %INSN_DEVICE_CONFIG instruction set. If this is not initialized by the
+ * low-level driver, a default handler that copies zero routes back to the
+ * user will be used.
+ *
+ * This is the main control data structure for a COMEDI device (as far as the
+ * COMEDI core is concerned). There are two groups of COMEDI devices -
+ * "legacy" devices that are configured by the handler for the
+ * %COMEDI_DEVCONFIG ioctl, and automatically configured devices resulting
+ * from a call to comedi_auto_config() as a result of a bus driver probe in
+ * a low-level COMEDI driver. The "legacy" COMEDI devices are allocated
+ * during module initialization if the "comedi_num_legacy_minors" module
+ * parameter is non-zero and use minor device numbers from 0 to
+ * comedi_num_legacy_minors minus one. The automatically configured COMEDI
+ * devices are allocated on demand and use minor device numbers from
+ * comedi_num_legacy_minors to 47.
+ */
+struct comedi_device {
+ int use_count;
+ struct comedi_driver *driver;
+ struct comedi_8254 *pacer;
+ void *private;
+
+ struct device *class_dev;
+ int minor;
+ unsigned int detach_count;
+ struct device *hw_dev;
+
+ const char *board_name;
+ const void *board_ptr;
+ unsigned int attached:1;
+ unsigned int ioenabled:1;
+ spinlock_t spinlock; /* generic spin-lock for low-level driver */
+ struct mutex mutex; /* generic mutex for COMEDI core */
+ struct rw_semaphore attach_lock;
+ struct kref refcount;
+
+ int n_subdevices;
+ struct comedi_subdevice *subdevices;
+
+ /* dumb */
+ void __iomem *mmio;
+ unsigned long iobase;
+ unsigned long iolen;
+ unsigned int irq;
+
+ struct comedi_subdevice *read_subdev;
+ struct comedi_subdevice *write_subdev;
+
+ struct fasync_struct *async_queue;
+
+ int (*open)(struct comedi_device *dev);
+ void (*close)(struct comedi_device *dev);
+ int (*insn_device_config)(struct comedi_device *dev,
+ struct comedi_insn *insn, unsigned int *data);
+ unsigned int (*get_valid_routes)(struct comedi_device *dev,
+ unsigned int n_pairs,
+ unsigned int *pair_data);
+};
+
+/*
+ * function prototypes
+ */
+
+void comedi_event(struct comedi_device *dev, struct comedi_subdevice *s);
+
+struct comedi_device *comedi_dev_get_from_minor(unsigned int minor);
+int comedi_dev_put(struct comedi_device *dev);
+
+bool comedi_is_subdevice_running(struct comedi_subdevice *s);
+
+void *comedi_alloc_spriv(struct comedi_subdevice *s, size_t size);
+void comedi_set_spriv_auto_free(struct comedi_subdevice *s);
+
+int comedi_check_chanlist(struct comedi_subdevice *s,
+ int n,
+ unsigned int *chanlist);
+
+/* range stuff */
+
+#define RANGE(a, b) {(a) * 1e6, (b) * 1e6, 0}
+#define RANGE_ext(a, b) {(a) * 1e6, (b) * 1e6, RF_EXTERNAL}
+#define RANGE_mA(a, b) {(a) * 1e6, (b) * 1e6, UNIT_mA}
+#define RANGE_unitless(a, b) {(a) * 1e6, (b) * 1e6, 0}
+#define BIP_RANGE(a) {-(a) * 1e6, (a) * 1e6, 0}
+#define UNI_RANGE(a) {0, (a) * 1e6, 0}
+
+extern const struct comedi_lrange range_bipolar10;
+extern const struct comedi_lrange range_bipolar5;
+extern const struct comedi_lrange range_bipolar2_5;
+extern const struct comedi_lrange range_unipolar10;
+extern const struct comedi_lrange range_unipolar5;
+extern const struct comedi_lrange range_unipolar2_5;
+extern const struct comedi_lrange range_0_20mA;
+extern const struct comedi_lrange range_4_20mA;
+extern const struct comedi_lrange range_0_32mA;
+extern const struct comedi_lrange range_unknown;
+
+#define range_digital range_unipolar5
+
+/**
+ * struct comedi_lrange - Describes a COMEDI range table
+ * @length: Number of entries in the range table.
+ * @range: Array of &struct comedi_krange, one for each range.
+ *
+ * Each element of @range[] describes the minimum and maximum physical range
+ * and the type of units. Typically, the type of unit is %UNIT_volt
+ * (i.e. volts) and the minimum and maximum are in millionths of a volt.
+ * There may also be a flag that indicates the minimum and maximum are merely
+ * scale factors for an unknown, external reference.
+ */
+struct comedi_lrange {
+ int length;
+ struct comedi_krange range[];
+};
+
+/**
+ * comedi_range_is_bipolar() - Test if subdevice range is bipolar
+ * @s: COMEDI subdevice.
+ * @range: Index of range within a range table.
+ *
+ * Tests whether a range is bipolar by checking whether its minimum value
+ * is negative.
+ *
+ * Assumes @range is valid. Does not work for subdevices using a
+ * channel-specific range table list.
+ *
+ * Return:
+ * %true if the range is bipolar.
+ * %false if the range is unipolar.
+ */
+static inline bool comedi_range_is_bipolar(struct comedi_subdevice *s,
+ unsigned int range)
+{
+ return s->range_table->range[range].min < 0;
+}
+
+/**
+ * comedi_range_is_unipolar() - Test if subdevice range is unipolar
+ * @s: COMEDI subdevice.
+ * @range: Index of range within a range table.
+ *
+ * Tests whether a range is unipolar by checking whether its minimum value
+ * is at least 0.
+ *
+ * Assumes @range is valid. Does not work for subdevices using a
+ * channel-specific range table list.
+ *
+ * Return:
+ * %true if the range is unipolar.
+ * %false if the range is bipolar.
+ */
+static inline bool comedi_range_is_unipolar(struct comedi_subdevice *s,
+ unsigned int range)
+{
+ return s->range_table->range[range].min >= 0;
+}
+
+/**
+ * comedi_range_is_external() - Test if subdevice range is external
+ * @s: COMEDI subdevice.
+ * @range: Index of range within a range table.
+ *
+ * Tests whether a range is externally reference by checking whether its
+ * %RF_EXTERNAL flag is set.
+ *
+ * Assumes @range is valid. Does not work for subdevices using a
+ * channel-specific range table list.
+ *
+ * Return:
+ * %true if the range is external.
+ * %false if the range is internal.
+ */
+static inline bool comedi_range_is_external(struct comedi_subdevice *s,
+ unsigned int range)
+{
+ return !!(s->range_table->range[range].flags & RF_EXTERNAL);
+}
+
+/**
+ * comedi_chan_range_is_bipolar() - Test if channel-specific range is bipolar
+ * @s: COMEDI subdevice.
+ * @chan: The channel number.
+ * @range: Index of range within a range table.
+ *
+ * Tests whether a range is bipolar by checking whether its minimum value
+ * is negative.
+ *
+ * Assumes @chan and @range are valid. Only works for subdevices with a
+ * channel-specific range table list.
+ *
+ * Return:
+ * %true if the range is bipolar.
+ * %false if the range is unipolar.
+ */
+static inline bool comedi_chan_range_is_bipolar(struct comedi_subdevice *s,
+ unsigned int chan,
+ unsigned int range)
+{
+ return s->range_table_list[chan]->range[range].min < 0;
+}
+
+/**
+ * comedi_chan_range_is_unipolar() - Test if channel-specific range is unipolar
+ * @s: COMEDI subdevice.
+ * @chan: The channel number.
+ * @range: Index of range within a range table.
+ *
+ * Tests whether a range is unipolar by checking whether its minimum value
+ * is at least 0.
+ *
+ * Assumes @chan and @range are valid. Only works for subdevices with a
+ * channel-specific range table list.
+ *
+ * Return:
+ * %true if the range is unipolar.
+ * %false if the range is bipolar.
+ */
+static inline bool comedi_chan_range_is_unipolar(struct comedi_subdevice *s,
+ unsigned int chan,
+ unsigned int range)
+{
+ return s->range_table_list[chan]->range[range].min >= 0;
+}
+
+/**
+ * comedi_chan_range_is_external() - Test if channel-specific range is external
+ * @s: COMEDI subdevice.
+ * @chan: The channel number.
+ * @range: Index of range within a range table.
+ *
+ * Tests whether a range is externally reference by checking whether its
+ * %RF_EXTERNAL flag is set.
+ *
+ * Assumes @chan and @range are valid. Only works for subdevices with a
+ * channel-specific range table list.
+ *
+ * Return:
+ * %true if the range is bipolar.
+ * %false if the range is unipolar.
+ */
+static inline bool comedi_chan_range_is_external(struct comedi_subdevice *s,
+ unsigned int chan,
+ unsigned int range)
+{
+ return !!(s->range_table_list[chan]->range[range].flags & RF_EXTERNAL);
+}
+
+/**
+ * comedi_offset_munge() - Convert between offset binary and 2's complement
+ * @s: COMEDI subdevice.
+ * @val: Value to be converted.
+ *
+ * Toggles the highest bit of a sample value to toggle between offset binary
+ * and 2's complement. Assumes that @s->maxdata is a power of 2 minus 1.
+ *
+ * Return: The converted value.
+ */
+static inline unsigned int comedi_offset_munge(struct comedi_subdevice *s,
+ unsigned int val)
+{
+ return val ^ s->maxdata ^ (s->maxdata >> 1);
+}
+
+/**
+ * comedi_bytes_per_sample() - Determine subdevice sample size
+ * @s: COMEDI subdevice.
+ *
+ * The sample size will be 4 (sizeof int) or 2 (sizeof short) depending on
+ * whether the %SDF_LSAMPL subdevice flag is set or not.
+ *
+ * Return: The subdevice sample size.
+ */
+static inline unsigned int comedi_bytes_per_sample(struct comedi_subdevice *s)
+{
+ return s->subdev_flags & SDF_LSAMPL ? sizeof(int) : sizeof(short);
+}
+
+/**
+ * comedi_sample_shift() - Determine log2 of subdevice sample size
+ * @s: COMEDI subdevice.
+ *
+ * The sample size will be 4 (sizeof int) or 2 (sizeof short) depending on
+ * whether the %SDF_LSAMPL subdevice flag is set or not. The log2 of the
+ * sample size will be 2 or 1 and can be used as the right operand of a
+ * bit-shift operator to multiply or divide something by the sample size.
+ *
+ * Return: log2 of the subdevice sample size.
+ */
+static inline unsigned int comedi_sample_shift(struct comedi_subdevice *s)
+{
+ return s->subdev_flags & SDF_LSAMPL ? 2 : 1;
+}
+
+/**
+ * comedi_bytes_to_samples() - Convert a number of bytes to a number of samples
+ * @s: COMEDI subdevice.
+ * @nbytes: Number of bytes
+ *
+ * Return: The number of bytes divided by the subdevice sample size.
+ */
+static inline unsigned int comedi_bytes_to_samples(struct comedi_subdevice *s,
+ unsigned int nbytes)
+{
+ return nbytes >> comedi_sample_shift(s);
+}
+
+/**
+ * comedi_samples_to_bytes() - Convert a number of samples to a number of bytes
+ * @s: COMEDI subdevice.
+ * @nsamples: Number of samples.
+ *
+ * Return: The number of samples multiplied by the subdevice sample size.
+ * (Does not check for arithmetic overflow.)
+ */
+static inline unsigned int comedi_samples_to_bytes(struct comedi_subdevice *s,
+ unsigned int nsamples)
+{
+ return nsamples << comedi_sample_shift(s);
+}
+
+/**
+ * comedi_check_trigger_src() - Trivially validate a comedi_cmd trigger source
+ * @src: Pointer to the trigger source to validate.
+ * @flags: Bitmask of valid %TRIG_* for the trigger.
+ *
+ * This is used in "step 1" of the do_cmdtest functions of comedi drivers
+ * to validate the comedi_cmd triggers. The mask of the @src against the
+ * @flags allows the userspace comedilib to pass all the comedi_cmd
+ * triggers as %TRIG_ANY and get back a bitmask of the valid trigger sources.
+ *
+ * Return:
+ * 0 if trigger sources in *@src are all supported.
+ * -EINVAL if any trigger source in *@src is unsupported.
+ */
+static inline int comedi_check_trigger_src(unsigned int *src,
+ unsigned int flags)
+{
+ unsigned int orig_src = *src;
+
+ *src = orig_src & flags;
+ if (*src == TRIG_INVALID || *src != orig_src)
+ return -EINVAL;
+ return 0;
+}
+
+/**
+ * comedi_check_trigger_is_unique() - Make sure a trigger source is unique
+ * @src: The trigger source to check.
+ *
+ * Return:
+ * 0 if no more than one trigger source is set.
+ * -EINVAL if more than one trigger source is set.
+ */
+static inline int comedi_check_trigger_is_unique(unsigned int src)
+{
+ /* this test is true if more than one _src bit is set */
+ if ((src & (src - 1)) != 0)
+ return -EINVAL;
+ return 0;
+}
+
+/**
+ * comedi_check_trigger_arg_is() - Trivially validate a trigger argument
+ * @arg: Pointer to the trigger arg to validate.
+ * @val: The value the argument should be.
+ *
+ * Forces *@arg to be @val.
+ *
+ * Return:
+ * 0 if *@arg was already @val.
+ * -EINVAL if *@arg differed from @val.
+ */
+static inline int comedi_check_trigger_arg_is(unsigned int *arg,
+ unsigned int val)
+{
+ if (*arg != val) {
+ *arg = val;
+ return -EINVAL;
+ }
+ return 0;
+}
+
+/**
+ * comedi_check_trigger_arg_min() - Trivially validate a trigger argument min
+ * @arg: Pointer to the trigger arg to validate.
+ * @val: The minimum value the argument should be.
+ *
+ * Forces *@arg to be at least @val, setting it to @val if necessary.
+ *
+ * Return:
+ * 0 if *@arg was already at least @val.
+ * -EINVAL if *@arg was less than @val.
+ */
+static inline int comedi_check_trigger_arg_min(unsigned int *arg,
+ unsigned int val)
+{
+ if (*arg < val) {
+ *arg = val;
+ return -EINVAL;
+ }
+ return 0;
+}
+
+/**
+ * comedi_check_trigger_arg_max() - Trivially validate a trigger argument max
+ * @arg: Pointer to the trigger arg to validate.
+ * @val: The maximum value the argument should be.
+ *
+ * Forces *@arg to be no more than @val, setting it to @val if necessary.
+ *
+ * Return:
+ * 0 if*@arg was already no more than @val.
+ * -EINVAL if *@arg was greater than @val.
+ */
+static inline int comedi_check_trigger_arg_max(unsigned int *arg,
+ unsigned int val)
+{
+ if (*arg > val) {
+ *arg = val;
+ return -EINVAL;
+ }
+ return 0;
+}
+
+/*
+ * Must set dev->hw_dev if you wish to dma directly into comedi's buffer.
+ * Also useful for retrieving a previously configured hardware device of
+ * known bus type. Set automatically for auto-configured devices.
+ * Automatically set to NULL when detaching hardware device.
+ */
+int comedi_set_hw_dev(struct comedi_device *dev, struct device *hw_dev);
+
+/**
+ * comedi_buf_n_bytes_ready - Determine amount of unread data in buffer
+ * @s: COMEDI subdevice.
+ *
+ * Determines the number of bytes of unread data in the asynchronous
+ * acquisition data buffer for a subdevice. The data in question might not
+ * have been fully "munged" yet.
+ *
+ * Returns: The amount of unread data in bytes.
+ */
+static inline unsigned int comedi_buf_n_bytes_ready(struct comedi_subdevice *s)
+{
+ return s->async->buf_write_count - s->async->buf_read_count;
+}
+
+unsigned int comedi_buf_write_alloc(struct comedi_subdevice *s, unsigned int n);
+unsigned int comedi_buf_write_free(struct comedi_subdevice *s, unsigned int n);
+
+unsigned int comedi_buf_read_n_available(struct comedi_subdevice *s);
+unsigned int comedi_buf_read_alloc(struct comedi_subdevice *s, unsigned int n);
+unsigned int comedi_buf_read_free(struct comedi_subdevice *s, unsigned int n);
+
+unsigned int comedi_buf_write_samples(struct comedi_subdevice *s,
+ const void *data, unsigned int nsamples);
+unsigned int comedi_buf_read_samples(struct comedi_subdevice *s,
+ void *data, unsigned int nsamples);
+
+/* drivers.c - general comedi driver functions */
+
+#define COMEDI_TIMEOUT_MS 1000
+
+int comedi_timeout(struct comedi_device *dev, struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ int (*cb)(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn, unsigned long context),
+ unsigned long context);
+
+unsigned int comedi_handle_events(struct comedi_device *dev,
+ struct comedi_subdevice *s);
+
+int comedi_dio_insn_config(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn, unsigned int *data,
+ unsigned int mask);
+unsigned int comedi_dio_update_state(struct comedi_subdevice *s,
+ unsigned int *data);
+unsigned int comedi_bytes_per_scan_cmd(struct comedi_subdevice *s,
+ struct comedi_cmd *cmd);
+unsigned int comedi_bytes_per_scan(struct comedi_subdevice *s);
+unsigned int comedi_nscans_left(struct comedi_subdevice *s,
+ unsigned int nscans);
+unsigned int comedi_nsamples_left(struct comedi_subdevice *s,
+ unsigned int nsamples);
+void comedi_inc_scan_progress(struct comedi_subdevice *s,
+ unsigned int num_bytes);
+
+void *comedi_alloc_devpriv(struct comedi_device *dev, size_t size);
+int comedi_alloc_subdevices(struct comedi_device *dev, int num_subdevices);
+int comedi_alloc_subdev_readback(struct comedi_subdevice *s);
+
+int comedi_readback_insn_read(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn, unsigned int *data);
+
+int comedi_load_firmware(struct comedi_device *dev, struct device *hw_dev,
+ const char *name,
+ int (*cb)(struct comedi_device *dev,
+ const u8 *data, size_t size,
+ unsigned long context),
+ unsigned long context);
+
+int __comedi_request_region(struct comedi_device *dev,
+ unsigned long start, unsigned long len);
+int comedi_request_region(struct comedi_device *dev,
+ unsigned long start, unsigned long len);
+void comedi_legacy_detach(struct comedi_device *dev);
+
+int comedi_auto_config(struct device *hardware_device,
+ struct comedi_driver *driver, unsigned long context);
+void comedi_auto_unconfig(struct device *hardware_device);
+
+int comedi_driver_register(struct comedi_driver *driver);
+void comedi_driver_unregister(struct comedi_driver *driver);
+
+/**
+ * module_comedi_driver() - Helper macro for registering a comedi driver
+ * @__comedi_driver: comedi_driver struct
+ *
+ * Helper macro for comedi drivers which do not do anything special in module
+ * init/exit. This eliminates a lot of boilerplate. Each module may only use
+ * this macro once, and calling it replaces module_init() and module_exit().
+ */
+#define module_comedi_driver(__comedi_driver) \
+ module_driver(__comedi_driver, comedi_driver_register, \
+ comedi_driver_unregister)
+
+#endif /* _COMEDIDEV_H */
diff --git a/include/linux/comedi/comedilib.h b/include/linux/comedi/comedilib.h
new file mode 100644
index 000000000000..0223c9cd9215
--- /dev/null
+++ b/include/linux/comedi/comedilib.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * comedilib.h
+ * Header file for kcomedilib
+ *
+ * COMEDI - Linux Control and Measurement Device Interface
+ * Copyright (C) 1998-2001 David A. Schleef <ds@schleef.org>
+ */
+
+#ifndef _LINUX_COMEDILIB_H
+#define _LINUX_COMEDILIB_H
+
+struct comedi_device *comedi_open(const char *path);
+int comedi_close(struct comedi_device *dev);
+int comedi_dio_get_config(struct comedi_device *dev, unsigned int subdev,
+ unsigned int chan, unsigned int *io);
+int comedi_dio_config(struct comedi_device *dev, unsigned int subdev,
+ unsigned int chan, unsigned int io);
+int comedi_dio_bitfield2(struct comedi_device *dev, unsigned int subdev,
+ unsigned int mask, unsigned int *bits,
+ unsigned int base_channel);
+int comedi_find_subdevice_by_type(struct comedi_device *dev, int type,
+ unsigned int subd);
+int comedi_get_n_channels(struct comedi_device *dev, unsigned int subdevice);
+
+#endif
diff --git a/include/linux/compaction.h b/include/linux/compaction.h
index 0d8415820fc3..a6e512cfb670 100644
--- a/include/linux/compaction.h
+++ b/include/linux/compaction.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_COMPACTION_H
#define _LINUX_COMPACTION_H
@@ -28,21 +29,18 @@ enum compact_result {
/* compaction didn't start as it was deferred due to past failures */
COMPACT_DEFERRED,
- /* compaction not active last round */
- COMPACT_INACTIVE = COMPACT_DEFERRED,
-
/* For more detailed tracepoint output - internal to compaction */
COMPACT_NO_SUITABLE_PAGE,
/* compaction should continue to another pageblock */
COMPACT_CONTINUE,
/*
- * The full zone was compacted scanned but wasn't successfull to compact
+ * The full zone was compacted scanned but wasn't successful to compact
* suitable pages.
*/
COMPACT_COMPLETE,
/*
- * direct compaction has scanned part of the zone but wasn't successfull
+ * direct compaction has scanned part of the zone but wasn't successful
* to compact suitable pages.
*/
COMPACT_PARTIAL_SKIPPED,
@@ -83,27 +81,19 @@ static inline unsigned long compact_gap(unsigned int order)
}
#ifdef CONFIG_COMPACTION
-extern int sysctl_compact_memory;
-extern int sysctl_compaction_handler(struct ctl_table *table, int write,
- void __user *buffer, size_t *length, loff_t *ppos);
-extern int sysctl_extfrag_threshold;
-extern int sysctl_extfrag_handler(struct ctl_table *table, int write,
- void __user *buffer, size_t *length, loff_t *ppos);
-extern int sysctl_compact_unevictable_allowed;
+extern unsigned int extfrag_for_order(struct zone *zone, unsigned int order);
extern int fragmentation_index(struct zone *zone, unsigned int order);
extern enum compact_result try_to_compact_pages(gfp_t gfp_mask,
unsigned int order, unsigned int alloc_flags,
- const struct alloc_context *ac, enum compact_priority prio);
+ const struct alloc_context *ac, enum compact_priority prio,
+ struct page **page);
extern void reset_isolation_suitable(pg_data_t *pgdat);
extern enum compact_result compaction_suitable(struct zone *zone, int order,
- unsigned int alloc_flags, int classzone_idx);
+ unsigned int alloc_flags, int highest_zoneidx);
-extern void defer_compaction(struct zone *zone, int order);
-extern bool compaction_deferred(struct zone *zone, int order);
extern void compaction_defer_reset(struct zone *zone, int order,
bool alloc_success);
-extern bool compaction_restarting(struct zone *zone, int order);
/* Compaction has made some progress and retrying makes sense */
static inline bool compaction_made_progress(enum compact_result result)
@@ -129,11 +119,8 @@ static inline bool compaction_failed(enum compact_result result)
return false;
}
-/*
- * Compaction has backed off for some reason. It might be throttling or
- * lock contention. Retrying is still worthwhile.
- */
-static inline bool compaction_withdrawn(enum compact_result result)
+/* Compaction needs reclaim to be performed first, so it can continue. */
+static inline bool compaction_needs_reclaim(enum compact_result result)
{
/*
* Compaction backed off due to watermark checks for order-0
@@ -142,6 +129,16 @@ static inline bool compaction_withdrawn(enum compact_result result)
if (result == COMPACT_SKIPPED)
return true;
+ return false;
+}
+
+/*
+ * Compaction has backed off for some reason after doing some work or none
+ * at all. It might be throttling or lock contention. Retrying might be still
+ * worthwhile, but with a higher priority if allowed.
+ */
+static inline bool compaction_withdrawn(enum compact_result result)
+{
/*
* If compaction is deferred for high-order allocations, it is
* because sync compaction recently failed. If this is the case
@@ -173,9 +170,9 @@ static inline bool compaction_withdrawn(enum compact_result result)
bool compaction_zonelist_suitable(struct alloc_context *ac, int order,
int alloc_flags);
-extern int kcompactd_run(int nid);
+extern void kcompactd_run(int nid);
extern void kcompactd_stop(int nid);
-extern void wakeup_kcompactd(pg_data_t *pgdat, int order, int classzone_idx);
+extern void wakeup_kcompactd(pg_data_t *pgdat, int order, int highest_zoneidx);
#else
static inline void reset_isolation_suitable(pg_data_t *pgdat)
@@ -183,26 +180,22 @@ static inline void reset_isolation_suitable(pg_data_t *pgdat)
}
static inline enum compact_result compaction_suitable(struct zone *zone, int order,
- int alloc_flags, int classzone_idx)
+ int alloc_flags, int highest_zoneidx)
{
return COMPACT_SKIPPED;
}
-static inline void defer_compaction(struct zone *zone, int order)
-{
-}
-
-static inline bool compaction_deferred(struct zone *zone, int order)
+static inline bool compaction_made_progress(enum compact_result result)
{
- return true;
+ return false;
}
-static inline bool compaction_made_progress(enum compact_result result)
+static inline bool compaction_failed(enum compact_result result)
{
return false;
}
-static inline bool compaction_failed(enum compact_result result)
+static inline bool compaction_needs_reclaim(enum compact_result result)
{
return false;
}
@@ -212,22 +205,22 @@ static inline bool compaction_withdrawn(enum compact_result result)
return true;
}
-static inline int kcompactd_run(int nid)
+static inline void kcompactd_run(int nid)
{
- return 0;
}
static inline void kcompactd_stop(int nid)
{
}
-static inline void wakeup_kcompactd(pg_data_t *pgdat, int order, int classzone_idx)
+static inline void wakeup_kcompactd(pg_data_t *pgdat,
+ int order, int highest_zoneidx)
{
}
#endif /* CONFIG_COMPACTION */
-#if defined(CONFIG_COMPACTION) && defined(CONFIG_SYSFS) && defined(CONFIG_NUMA)
struct node;
+#if defined(CONFIG_COMPACTION) && defined(CONFIG_SYSFS) && defined(CONFIG_NUMA)
extern int compaction_register_node(struct node *node);
extern void compaction_unregister_node(struct node *node);
diff --git a/include/linux/compat.h b/include/linux/compat.h
index 5a6a109b4a50..44b1736c95b5 100644
--- a/include/linux/compat.h
+++ b/include/linux/compat.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_COMPAT_H
#define _LINUX_COMPAT_H
/*
@@ -6,8 +7,7 @@
*/
#include <linux/types.h>
-
-#ifdef CONFIG_COMPAT
+#include <linux/time.h>
#include <linux/stat.h>
#include <linux/param.h> /* for HZ */
@@ -16,22 +16,38 @@
#include <linux/if.h>
#include <linux/fs.h>
#include <linux/aio_abi.h> /* for aio_context_t */
+#include <linux/uaccess.h>
#include <linux/unistd.h>
#include <asm/compat.h>
#include <asm/siginfo.h>
#include <asm/signal.h>
+#ifdef CONFIG_ARCH_HAS_SYSCALL_WRAPPER
+/*
+ * It may be useful for an architecture to override the definitions of the
+ * COMPAT_SYSCALL_DEFINE0 and COMPAT_SYSCALL_DEFINEx() macros, in particular
+ * to use a different calling convention for syscalls. To allow for that,
+ + the prototypes for the compat_sys_*() functions below will *not* be included
+ * if CONFIG_ARCH_HAS_SYSCALL_WRAPPER is enabled.
+ */
+#include <asm/syscall_wrapper.h>
+#endif /* CONFIG_ARCH_HAS_SYSCALL_WRAPPER */
+
#ifndef COMPAT_USE_64BIT_TIME
#define COMPAT_USE_64BIT_TIME 0
#endif
#ifndef __SC_DELOUSE
-#define __SC_DELOUSE(t,v) ((t)(unsigned long)(v))
+#define __SC_DELOUSE(t,v) ((__force t)(unsigned long)(v))
#endif
+#ifndef COMPAT_SYSCALL_DEFINE0
#define COMPAT_SYSCALL_DEFINE0(name) \
+ asmlinkage long compat_sys_##name(void); \
+ ALLOW_ERROR_INJECTION(compat_sys_##name, ERRNO); \
asmlinkage long compat_sys_##name(void)
+#endif /* COMPAT_SYSCALL_DEFINE0 */
#define COMPAT_SYSCALL_DEFINE1(name, ...) \
COMPAT_SYSCALL_DEFINEx(1, _##name, __VA_ARGS__)
@@ -46,16 +62,35 @@
#define COMPAT_SYSCALL_DEFINE6(name, ...) \
COMPAT_SYSCALL_DEFINEx(6, _##name, __VA_ARGS__)
-#define COMPAT_SYSCALL_DEFINEx(x, name, ...) \
- asmlinkage long compat_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__))\
- __attribute__((alias(__stringify(compat_SyS##name)))); \
- static inline long C_SYSC##name(__MAP(x,__SC_DECL,__VA_ARGS__));\
- asmlinkage long compat_SyS##name(__MAP(x,__SC_LONG,__VA_ARGS__));\
- asmlinkage long compat_SyS##name(__MAP(x,__SC_LONG,__VA_ARGS__))\
- { \
- return C_SYSC##name(__MAP(x,__SC_DELOUSE,__VA_ARGS__)); \
- } \
- static inline long C_SYSC##name(__MAP(x,__SC_DECL,__VA_ARGS__))
+/*
+ * The asmlinkage stub is aliased to a function named __se_compat_sys_*() which
+ * sign-extends 32-bit ints to longs whenever needed. The actual work is
+ * done within __do_compat_sys_*().
+ */
+#ifndef COMPAT_SYSCALL_DEFINEx
+#define COMPAT_SYSCALL_DEFINEx(x, name, ...) \
+ __diag_push(); \
+ __diag_ignore(GCC, 8, "-Wattribute-alias", \
+ "Type aliasing is used to sanitize syscall arguments");\
+ asmlinkage long compat_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__)) \
+ __attribute__((alias(__stringify(__se_compat_sys##name)))); \
+ ALLOW_ERROR_INJECTION(compat_sys##name, ERRNO); \
+ static inline long __do_compat_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__));\
+ asmlinkage long __se_compat_sys##name(__MAP(x,__SC_LONG,__VA_ARGS__)); \
+ asmlinkage long __se_compat_sys##name(__MAP(x,__SC_LONG,__VA_ARGS__)) \
+ { \
+ long ret = __do_compat_sys##name(__MAP(x,__SC_DELOUSE,__VA_ARGS__));\
+ __MAP(x,__SC_TEST,__VA_ARGS__); \
+ return ret; \
+ } \
+ __diag_pop(); \
+ static inline long __do_compat_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__))
+#endif /* COMPAT_SYSCALL_DEFINEx */
+
+struct compat_iovec {
+ compat_uptr_t iov_base;
+ compat_size_t iov_len;
+};
#ifndef compat_user_stack_pointer
#define compat_user_stack_pointer() current_user_stack_pointer()
@@ -67,6 +102,9 @@ typedef struct compat_sigaltstack {
compat_size_t ss_size;
} compat_stack_t;
#endif
+#ifndef COMPAT_MINSIGSTKSZ
+#define COMPAT_MINSIGSTKSZ MINSIGSTKSZ
+#endif
#define compat_jiffies_to_clock_t(x) \
(((unsigned long)(x) * COMPAT_USER_HZ) / HZ)
@@ -74,29 +112,10 @@ typedef struct compat_sigaltstack {
typedef __compat_uid32_t compat_uid_t;
typedef __compat_gid32_t compat_gid_t;
-typedef compat_ulong_t compat_aio_context_t;
-
struct compat_sel_arg_struct;
struct rusage;
-struct compat_itimerspec {
- struct compat_timespec it_interval;
- struct compat_timespec it_value;
-};
-
-struct compat_utimbuf {
- compat_time_t actime;
- compat_time_t modtime;
-};
-
-struct compat_itimerval {
- struct compat_timeval it_interval;
- struct compat_timeval it_value;
-};
-
-struct itimerval;
-int get_compat_itimerval(struct itimerval *, const struct compat_itimerval __user *);
-int put_compat_itimerval(struct compat_itimerval __user *, const struct itimerval *);
+struct old_itimerval32;
struct compat_tms {
compat_clock_t tms_utime;
@@ -105,43 +124,15 @@ struct compat_tms {
compat_clock_t tms_cstime;
};
-struct compat_timex {
- compat_uint_t modes;
- compat_long_t offset;
- compat_long_t freq;
- compat_long_t maxerror;
- compat_long_t esterror;
- compat_int_t status;
- compat_long_t constant;
- compat_long_t precision;
- compat_long_t tolerance;
- struct compat_timeval time;
- compat_long_t tick;
- compat_long_t ppsfreq;
- compat_long_t jitter;
- compat_int_t shift;
- compat_long_t stabil;
- compat_long_t jitcnt;
- compat_long_t calcnt;
- compat_long_t errcnt;
- compat_long_t stbcnt;
- compat_int_t tai;
-
- compat_int_t:32; compat_int_t:32; compat_int_t:32; compat_int_t:32;
- compat_int_t:32; compat_int_t:32; compat_int_t:32; compat_int_t:32;
- compat_int_t:32; compat_int_t:32; compat_int_t:32;
-};
-
-struct timex;
-int compat_get_timex(struct timex *, const struct compat_timex __user *);
-int compat_put_timex(struct compat_timex __user *, const struct timex *);
-
#define _COMPAT_NSIG_WORDS (_COMPAT_NSIG / _COMPAT_NSIG_BPW)
typedef struct {
compat_sigset_word sig[_COMPAT_NSIG_WORDS];
} compat_sigset_t;
+int set_compat_user_sigmask(const compat_sigset_t __user *umask,
+ size_t sigsetsize);
+
struct compat_sigaction {
#ifndef __ARCH_HAS_IRIX_SIGACTION
compat_uptr_t sa_handler;
@@ -156,43 +147,150 @@ struct compat_sigaction {
compat_sigset_t sa_mask __packed;
};
-/*
- * These functions operate on 32- or 64-bit specs depending on
- * COMPAT_USE_64BIT_TIME, hence the void user pointer arguments.
- */
-extern int compat_get_timespec(struct timespec *, const void __user *);
-extern int compat_put_timespec(const struct timespec *, void __user *);
-extern int compat_get_timeval(struct timeval *, const void __user *);
-extern int compat_put_timeval(const struct timeval *, void __user *);
-extern int compat_get_timespec64(struct timespec64 *, const void __user *);
-extern int compat_put_timespec64(const struct timespec64 *, void __user *);
-extern int get_compat_itimerspec64(struct itimerspec64 *its,
- const struct compat_itimerspec __user *uits);
-extern int put_compat_itimerspec64(const struct itimerspec64 *its,
- struct compat_itimerspec __user *uits);
+typedef union compat_sigval {
+ compat_int_t sival_int;
+ compat_uptr_t sival_ptr;
+} compat_sigval_t;
-/*
- * This function convert a timespec if necessary and returns a *user
- * space* pointer. If no conversion is necessary, it returns the
- * initial pointer. NULL is a legitimate argument and will always
- * output NULL.
- */
-extern int compat_convert_timespec(struct timespec __user **,
- const void __user *);
+typedef struct compat_siginfo {
+ int si_signo;
+#ifndef __ARCH_HAS_SWAPPED_SIGINFO
+ int si_errno;
+ int si_code;
+#else
+ int si_code;
+ int si_errno;
+#endif
-struct compat_iovec {
- compat_uptr_t iov_base;
- compat_size_t iov_len;
-};
+ union {
+ int _pad[128/sizeof(int) - 3];
+
+ /* kill() */
+ struct {
+ compat_pid_t _pid; /* sender's pid */
+ __compat_uid32_t _uid; /* sender's uid */
+ } _kill;
+
+ /* POSIX.1b timers */
+ struct {
+ compat_timer_t _tid; /* timer id */
+ int _overrun; /* overrun count */
+ compat_sigval_t _sigval; /* same as below */
+ } _timer;
+
+ /* POSIX.1b signals */
+ struct {
+ compat_pid_t _pid; /* sender's pid */
+ __compat_uid32_t _uid; /* sender's uid */
+ compat_sigval_t _sigval;
+ } _rt;
+
+ /* SIGCHLD */
+ struct {
+ compat_pid_t _pid; /* which child */
+ __compat_uid32_t _uid; /* sender's uid */
+ int _status; /* exit code */
+ compat_clock_t _utime;
+ compat_clock_t _stime;
+ } _sigchld;
+
+#ifdef CONFIG_X86_X32_ABI
+ /* SIGCHLD (x32 version) */
+ struct {
+ compat_pid_t _pid; /* which child */
+ __compat_uid32_t _uid; /* sender's uid */
+ int _status; /* exit code */
+ compat_s64 _utime;
+ compat_s64 _stime;
+ } _sigchld_x32;
+#endif
+
+ /* SIGILL, SIGFPE, SIGSEGV, SIGBUS, SIGTRAP, SIGEMT */
+ struct {
+ compat_uptr_t _addr; /* faulting insn/memory ref. */
+#define __COMPAT_ADDR_BND_PKEY_PAD (__alignof__(compat_uptr_t) < sizeof(short) ? \
+ sizeof(short) : __alignof__(compat_uptr_t))
+ union {
+ /* used on alpha and sparc */
+ int _trapno; /* TRAP # which caused the signal */
+ /*
+ * used when si_code=BUS_MCEERR_AR or
+ * used when si_code=BUS_MCEERR_AO
+ */
+ short int _addr_lsb; /* Valid LSB of the reported address. */
+ /* used when si_code=SEGV_BNDERR */
+ struct {
+ char _dummy_bnd[__COMPAT_ADDR_BND_PKEY_PAD];
+ compat_uptr_t _lower;
+ compat_uptr_t _upper;
+ } _addr_bnd;
+ /* used when si_code=SEGV_PKUERR */
+ struct {
+ char _dummy_pkey[__COMPAT_ADDR_BND_PKEY_PAD];
+ u32 _pkey;
+ } _addr_pkey;
+ /* used when si_code=TRAP_PERF */
+ struct {
+ compat_ulong_t _data;
+ u32 _type;
+ u32 _flags;
+ } _perf;
+ };
+ } _sigfault;
+
+ /* SIGPOLL */
+ struct {
+ compat_long_t _band; /* POLL_IN, POLL_OUT, POLL_MSG */
+ int _fd;
+ } _sigpoll;
+
+ struct {
+ compat_uptr_t _call_addr; /* calling user insn */
+ int _syscall; /* triggering system call number */
+ unsigned int _arch; /* AUDIT_ARCH_* of syscall */
+ } _sigsys;
+ } _sifields;
+} compat_siginfo_t;
struct compat_rlimit {
compat_ulong_t rlim_cur;
compat_ulong_t rlim_max;
};
+#ifdef __ARCH_NEED_COMPAT_FLOCK64_PACKED
+#define __ARCH_COMPAT_FLOCK64_PACK __attribute__((packed))
+#else
+#define __ARCH_COMPAT_FLOCK64_PACK
+#endif
+
+struct compat_flock {
+ short l_type;
+ short l_whence;
+ compat_off_t l_start;
+ compat_off_t l_len;
+#ifdef __ARCH_COMPAT_FLOCK_EXTRA_SYSID
+ __ARCH_COMPAT_FLOCK_EXTRA_SYSID
+#endif
+ compat_pid_t l_pid;
+#ifdef __ARCH_COMPAT_FLOCK_PAD
+ __ARCH_COMPAT_FLOCK_PAD
+#endif
+};
+
+struct compat_flock64 {
+ short l_type;
+ short l_whence;
+ compat_loff_t l_start;
+ compat_loff_t l_len;
+ compat_pid_t l_pid;
+#ifdef __ARCH_COMPAT_FLOCK64_PAD
+ __ARCH_COMPAT_FLOCK64_PAD
+#endif
+} __ARCH_COMPAT_FLOCK64_PACK;
+
struct compat_rusage {
- struct compat_timeval ru_utime;
- struct compat_timeval ru_stime;
+ struct old_timeval32 ru_utime;
+ struct old_timeval32 ru_stime;
compat_long_t ru_maxrss;
compat_long_t ru_ixrss;
compat_long_t ru_idrss;
@@ -213,10 +311,7 @@ extern int put_compat_rusage(const struct rusage *,
struct compat_rusage __user *);
struct compat_siginfo;
-
-extern asmlinkage long compat_sys_waitid(int, compat_pid_t,
- struct compat_siginfo __user *, int,
- struct compat_rusage __user *);
+struct __compat_aio_sigset;
struct compat_dirent {
u32 d_ino;
@@ -316,6 +411,7 @@ struct compat_keyctl_kdf_params {
__u32 __spare[8];
};
+struct compat_stat;
struct compat_statfs;
struct compat_statfs64;
struct compat_old_linux_dirent;
@@ -329,135 +425,112 @@ struct compat_kexec_segment;
struct compat_mq_attr;
struct compat_msgbuf;
-extern void compat_exit_robust_list(struct task_struct *curr);
-
-asmlinkage long
-compat_sys_set_robust_list(struct compat_robust_list_head __user *head,
- compat_size_t len);
-asmlinkage long
-compat_sys_get_robust_list(int pid, compat_uptr_t __user *head_ptr,
- compat_size_t __user *len_ptr);
-
-asmlinkage long compat_sys_ipc(u32, int, int, u32, compat_uptr_t, u32);
-asmlinkage long compat_sys_shmat(int shmid, compat_uptr_t shmaddr, int shmflg);
-asmlinkage long compat_sys_semctl(int semid, int semnum, int cmd, int arg);
-asmlinkage long compat_sys_msgsnd(int msqid, compat_uptr_t msgp,
- compat_ssize_t msgsz, int msgflg);
-asmlinkage long compat_sys_msgrcv(int msqid, compat_uptr_t msgp,
- compat_ssize_t msgsz, compat_long_t msgtyp, int msgflg);
-long compat_sys_msgctl(int first, int second, void __user *uptr);
-long compat_sys_shmctl(int first, int second, void __user *uptr);
-long compat_sys_semtimedop(int semid, struct sembuf __user *tsems,
- unsigned nsems, const struct compat_timespec __user *timeout);
-asmlinkage long compat_sys_keyctl(u32 option,
- u32 arg2, u32 arg3, u32 arg4, u32 arg5);
-asmlinkage long compat_sys_ustat(unsigned dev, struct compat_ustat __user *u32);
-
-asmlinkage ssize_t compat_sys_readv(compat_ulong_t fd,
- const struct compat_iovec __user *vec, compat_ulong_t vlen);
-asmlinkage ssize_t compat_sys_writev(compat_ulong_t fd,
- const struct compat_iovec __user *vec, compat_ulong_t vlen);
-asmlinkage ssize_t compat_sys_preadv(compat_ulong_t fd,
- const struct compat_iovec __user *vec,
- compat_ulong_t vlen, u32 pos_low, u32 pos_high);
-asmlinkage ssize_t compat_sys_pwritev(compat_ulong_t fd,
- const struct compat_iovec __user *vec,
- compat_ulong_t vlen, u32 pos_low, u32 pos_high);
-asmlinkage ssize_t compat_sys_preadv2(compat_ulong_t fd,
- const struct compat_iovec __user *vec,
- compat_ulong_t vlen, u32 pos_low, u32 pos_high, int flags);
-asmlinkage ssize_t compat_sys_pwritev2(compat_ulong_t fd,
- const struct compat_iovec __user *vec,
- compat_ulong_t vlen, u32 pos_low, u32 pos_high, int flags);
-
-#ifdef __ARCH_WANT_COMPAT_SYS_PREADV64
-asmlinkage long compat_sys_preadv64(unsigned long fd,
- const struct compat_iovec __user *vec,
- unsigned long vlen, loff_t pos);
-#endif
-
-#ifdef __ARCH_WANT_COMPAT_SYS_PWRITEV64
-asmlinkage long compat_sys_pwritev64(unsigned long fd,
- const struct compat_iovec __user *vec,
- unsigned long vlen, loff_t pos);
+void copy_siginfo_to_external32(struct compat_siginfo *to,
+ const struct kernel_siginfo *from);
+int copy_siginfo_from_user32(kernel_siginfo_t *to,
+ const struct compat_siginfo __user *from);
+int __copy_siginfo_to_user32(struct compat_siginfo __user *to,
+ const kernel_siginfo_t *from);
+#ifndef copy_siginfo_to_user32
+#define copy_siginfo_to_user32 __copy_siginfo_to_user32
#endif
-
-asmlinkage long compat_sys_lseek(unsigned int, compat_off_t, unsigned int);
-
-asmlinkage long compat_sys_execve(const char __user *filename, const compat_uptr_t __user *argv,
- const compat_uptr_t __user *envp);
-asmlinkage long compat_sys_execveat(int dfd, const char __user *filename,
- const compat_uptr_t __user *argv,
- const compat_uptr_t __user *envp, int flags);
-
-asmlinkage long compat_sys_select(int n, compat_ulong_t __user *inp,
- compat_ulong_t __user *outp, compat_ulong_t __user *exp,
- struct compat_timeval __user *tvp);
-
-asmlinkage long compat_sys_old_select(struct compat_sel_arg_struct __user *arg);
-
-asmlinkage long compat_sys_wait4(compat_pid_t pid,
- compat_uint_t __user *stat_addr, int options,
- struct compat_rusage __user *ru);
-
-#define BITS_PER_COMPAT_LONG (8*sizeof(compat_long_t))
-
-#define BITS_TO_COMPAT_LONGS(bits) DIV_ROUND_UP(bits, BITS_PER_COMPAT_LONG)
-
-long compat_get_bitmap(unsigned long *mask, const compat_ulong_t __user *umask,
- unsigned long bitmap_size);
-long compat_put_bitmap(compat_ulong_t __user *umask, unsigned long *mask,
- unsigned long bitmap_size);
-int copy_siginfo_from_user32(siginfo_t *to, struct compat_siginfo __user *from);
-int copy_siginfo_to_user32(struct compat_siginfo __user *to, const siginfo_t *from);
int get_compat_sigevent(struct sigevent *event,
const struct compat_sigevent __user *u_event);
-long compat_sys_rt_tgsigqueueinfo(compat_pid_t tgid, compat_pid_t pid, int sig,
- struct compat_siginfo __user *uinfo);
-#ifdef CONFIG_COMPAT_OLD_SIGACTION
-asmlinkage long compat_sys_sigaction(int sig,
- const struct compat_old_sigaction __user *act,
- struct compat_old_sigaction __user *oact);
-#endif
-static inline int compat_timeval_compare(struct compat_timeval *lhs,
- struct compat_timeval *rhs)
-{
- if (lhs->tv_sec < rhs->tv_sec)
- return -1;
- if (lhs->tv_sec > rhs->tv_sec)
- return 1;
- return lhs->tv_usec - rhs->tv_usec;
-}
+extern int get_compat_sigset(sigset_t *set, const compat_sigset_t __user *compat);
-static inline int compat_timespec_compare(struct compat_timespec *lhs,
- struct compat_timespec *rhs)
+/*
+ * Defined inline such that size can be compile time constant, which avoids
+ * CONFIG_HARDENED_USERCOPY complaining about copies from task_struct
+ */
+static inline int
+put_compat_sigset(compat_sigset_t __user *compat, const sigset_t *set,
+ unsigned int size)
{
- if (lhs->tv_sec < rhs->tv_sec)
- return -1;
- if (lhs->tv_sec > rhs->tv_sec)
- return 1;
- return lhs->tv_nsec - rhs->tv_nsec;
+ /* size <= sizeof(compat_sigset_t) <= sizeof(sigset_t) */
+#if defined(__BIG_ENDIAN) && defined(CONFIG_64BIT)
+ compat_sigset_t v;
+ switch (_NSIG_WORDS) {
+ case 4: v.sig[7] = (set->sig[3] >> 32); v.sig[6] = set->sig[3];
+ fallthrough;
+ case 3: v.sig[5] = (set->sig[2] >> 32); v.sig[4] = set->sig[2];
+ fallthrough;
+ case 2: v.sig[3] = (set->sig[1] >> 32); v.sig[2] = set->sig[1];
+ fallthrough;
+ case 1: v.sig[1] = (set->sig[0] >> 32); v.sig[0] = set->sig[0];
+ }
+ return copy_to_user(compat, &v, size) ? -EFAULT : 0;
+#else
+ return copy_to_user(compat, set, size) ? -EFAULT : 0;
+#endif
}
-extern int get_compat_itimerspec(struct itimerspec *dst,
- const struct compat_itimerspec __user *src);
-extern int put_compat_itimerspec(struct compat_itimerspec __user *dst,
- const struct itimerspec *src);
-
-asmlinkage long compat_sys_gettimeofday(struct compat_timeval __user *tv,
- struct timezone __user *tz);
-asmlinkage long compat_sys_settimeofday(struct compat_timeval __user *tv,
- struct timezone __user *tz);
-
-asmlinkage long compat_sys_adjtimex(struct compat_timex __user *utp);
-
-extern void sigset_from_compat(sigset_t *set, const compat_sigset_t *compat);
-extern void sigset_to_compat(compat_sigset_t *compat, const sigset_t *set);
-
-asmlinkage long compat_sys_migrate_pages(compat_pid_t pid,
- compat_ulong_t maxnode, const compat_ulong_t __user *old_nodes,
- const compat_ulong_t __user *new_nodes);
+#ifdef CONFIG_CPU_BIG_ENDIAN
+#define unsafe_put_compat_sigset(compat, set, label) do { \
+ compat_sigset_t __user *__c = compat; \
+ const sigset_t *__s = set; \
+ \
+ switch (_NSIG_WORDS) { \
+ case 4: \
+ unsafe_put_user(__s->sig[3] >> 32, &__c->sig[7], label); \
+ unsafe_put_user(__s->sig[3], &__c->sig[6], label); \
+ fallthrough; \
+ case 3: \
+ unsafe_put_user(__s->sig[2] >> 32, &__c->sig[5], label); \
+ unsafe_put_user(__s->sig[2], &__c->sig[4], label); \
+ fallthrough; \
+ case 2: \
+ unsafe_put_user(__s->sig[1] >> 32, &__c->sig[3], label); \
+ unsafe_put_user(__s->sig[1], &__c->sig[2], label); \
+ fallthrough; \
+ case 1: \
+ unsafe_put_user(__s->sig[0] >> 32, &__c->sig[1], label); \
+ unsafe_put_user(__s->sig[0], &__c->sig[0], label); \
+ } \
+} while (0)
+
+#define unsafe_get_compat_sigset(set, compat, label) do { \
+ const compat_sigset_t __user *__c = compat; \
+ compat_sigset_word hi, lo; \
+ sigset_t *__s = set; \
+ \
+ switch (_NSIG_WORDS) { \
+ case 4: \
+ unsafe_get_user(lo, &__c->sig[7], label); \
+ unsafe_get_user(hi, &__c->sig[6], label); \
+ __s->sig[3] = hi | (((long)lo) << 32); \
+ fallthrough; \
+ case 3: \
+ unsafe_get_user(lo, &__c->sig[5], label); \
+ unsafe_get_user(hi, &__c->sig[4], label); \
+ __s->sig[2] = hi | (((long)lo) << 32); \
+ fallthrough; \
+ case 2: \
+ unsafe_get_user(lo, &__c->sig[3], label); \
+ unsafe_get_user(hi, &__c->sig[2], label); \
+ __s->sig[1] = hi | (((long)lo) << 32); \
+ fallthrough; \
+ case 1: \
+ unsafe_get_user(lo, &__c->sig[1], label); \
+ unsafe_get_user(hi, &__c->sig[0], label); \
+ __s->sig[0] = hi | (((long)lo) << 32); \
+ } \
+} while (0)
+#else
+#define unsafe_put_compat_sigset(compat, set, label) do { \
+ compat_sigset_t __user *__c = compat; \
+ const sigset_t *__s = set; \
+ \
+ unsafe_copy_to_user(__c, __s, sizeof(*__c), label); \
+} while (0)
+
+#define unsafe_get_compat_sigset(set, compat, label) do { \
+ const compat_sigset_t __user *__c = compat; \
+ sigset_t *__s = set; \
+ \
+ unsafe_copy_from_user(__s, __c, sizeof(*__c), label); \
+} while (0)
+#endif
extern int compat_ptrace_request(struct task_struct *child,
compat_long_t request,
@@ -465,252 +538,376 @@ extern int compat_ptrace_request(struct task_struct *child,
extern long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
compat_ulong_t addr, compat_ulong_t data);
-asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid,
- compat_long_t addr, compat_long_t data);
-asmlinkage long compat_sys_lookup_dcookie(u32, u32, char __user *, compat_size_t);
+struct epoll_event; /* fortunately, this one is fixed-layout */
+
+int compat_restore_altstack(const compat_stack_t __user *uss);
+int __compat_save_altstack(compat_stack_t __user *, unsigned long);
+#define unsafe_compat_save_altstack(uss, sp, label) do { \
+ compat_stack_t __user *__uss = uss; \
+ struct task_struct *t = current; \
+ unsafe_put_user(ptr_to_compat((void __user *)t->sas_ss_sp), \
+ &__uss->ss_sp, label); \
+ unsafe_put_user(t->sas_ss_flags, &__uss->ss_flags, label); \
+ unsafe_put_user(t->sas_ss_size, &__uss->ss_size, label); \
+} while (0);
+
/*
- * epoll (fs/eventpoll.c) compat bits follow ...
+ * These syscall function prototypes are kept in the same order as
+ * include/uapi/asm-generic/unistd.h. Deprecated or obsolete system calls
+ * go below.
+ *
+ * Please note that these prototypes here are only provided for information
+ * purposes, for static analysis, and for linking from the syscall table.
+ * These functions should not be called elsewhere from kernel code.
+ *
+ * As the syscall calling convention may be different from the default
+ * for architectures overriding the syscall calling convention, do not
+ * include the prototypes if CONFIG_ARCH_HAS_SYSCALL_WRAPPER is enabled.
*/
-struct epoll_event; /* fortunately, this one is fixed-layout */
+#ifndef CONFIG_ARCH_HAS_SYSCALL_WRAPPER
+asmlinkage long compat_sys_io_setup(unsigned nr_reqs, u32 __user *ctx32p);
+asmlinkage long compat_sys_io_submit(compat_aio_context_t ctx_id, int nr,
+ u32 __user *iocb);
+asmlinkage long compat_sys_io_pgetevents(compat_aio_context_t ctx_id,
+ compat_long_t min_nr,
+ compat_long_t nr,
+ struct io_event __user *events,
+ struct old_timespec32 __user *timeout,
+ const struct __compat_aio_sigset __user *usig);
+asmlinkage long compat_sys_io_pgetevents_time64(compat_aio_context_t ctx_id,
+ compat_long_t min_nr,
+ compat_long_t nr,
+ struct io_event __user *events,
+ struct __kernel_timespec __user *timeout,
+ const struct __compat_aio_sigset __user *usig);
+
+/* fs/cookies.c */
+asmlinkage long compat_sys_lookup_dcookie(u32, u32, char __user *, compat_size_t);
+
+/* fs/eventpoll.c */
asmlinkage long compat_sys_epoll_pwait(int epfd,
struct epoll_event __user *events,
int maxevents, int timeout,
const compat_sigset_t __user *sigmask,
compat_size_t sigsetsize);
+asmlinkage long compat_sys_epoll_pwait2(int epfd,
+ struct epoll_event __user *events,
+ int maxevents,
+ const struct __kernel_timespec __user *timeout,
+ const compat_sigset_t __user *sigmask,
+ compat_size_t sigsetsize);
-asmlinkage long compat_sys_utime(const char __user *filename,
- struct compat_utimbuf __user *t);
-asmlinkage long compat_sys_utimensat(unsigned int dfd,
- const char __user *filename,
- struct compat_timespec __user *t,
- int flags);
+/* fs/fcntl.c */
+asmlinkage long compat_sys_fcntl(unsigned int fd, unsigned int cmd,
+ compat_ulong_t arg);
+asmlinkage long compat_sys_fcntl64(unsigned int fd, unsigned int cmd,
+ compat_ulong_t arg);
-asmlinkage long compat_sys_time(compat_time_t __user *tloc);
-asmlinkage long compat_sys_stime(compat_time_t __user *tptr);
-asmlinkage long compat_sys_signalfd(int ufd,
- const compat_sigset_t __user *sigmask,
- compat_size_t sigsetsize);
-asmlinkage long compat_sys_timerfd_settime(int ufd, int flags,
- const struct compat_itimerspec __user *utmr,
- struct compat_itimerspec __user *otmr);
-asmlinkage long compat_sys_timerfd_gettime(int ufd,
- struct compat_itimerspec __user *otmr);
-
-asmlinkage long compat_sys_move_pages(pid_t pid, compat_ulong_t nr_pages,
- __u32 __user *pages,
- const int __user *nodes,
- int __user *status,
- int flags);
-asmlinkage long compat_sys_futimesat(unsigned int dfd,
- const char __user *filename,
- struct compat_timeval __user *t);
-asmlinkage long compat_sys_utimes(const char __user *filename,
- struct compat_timeval __user *t);
-asmlinkage long compat_sys_newstat(const char __user *filename,
- struct compat_stat __user *statbuf);
-asmlinkage long compat_sys_newlstat(const char __user *filename,
- struct compat_stat __user *statbuf);
-asmlinkage long compat_sys_newfstatat(unsigned int dfd,
- const char __user *filename,
- struct compat_stat __user *statbuf,
- int flag);
-asmlinkage long compat_sys_newfstat(unsigned int fd,
- struct compat_stat __user *statbuf);
+/* fs/ioctl.c */
+asmlinkage long compat_sys_ioctl(unsigned int fd, unsigned int cmd,
+ compat_ulong_t arg);
+
+/* fs/open.c */
asmlinkage long compat_sys_statfs(const char __user *pathname,
struct compat_statfs __user *buf);
-asmlinkage long compat_sys_fstatfs(unsigned int fd,
- struct compat_statfs __user *buf);
asmlinkage long compat_sys_statfs64(const char __user *pathname,
compat_size_t sz,
struct compat_statfs64 __user *buf);
+asmlinkage long compat_sys_fstatfs(unsigned int fd,
+ struct compat_statfs __user *buf);
asmlinkage long compat_sys_fstatfs64(unsigned int fd, compat_size_t sz,
struct compat_statfs64 __user *buf);
-asmlinkage long compat_sys_fcntl64(unsigned int fd, unsigned int cmd,
- compat_ulong_t arg);
-asmlinkage long compat_sys_fcntl(unsigned int fd, unsigned int cmd,
- compat_ulong_t arg);
-asmlinkage long compat_sys_io_setup(unsigned nr_reqs, u32 __user *ctx32p);
-asmlinkage long compat_sys_io_getevents(compat_aio_context_t ctx_id,
- compat_long_t min_nr,
- compat_long_t nr,
- struct io_event __user *events,
- struct compat_timespec __user *timeout);
-asmlinkage long compat_sys_io_submit(compat_aio_context_t ctx_id, int nr,
- u32 __user *iocb);
-asmlinkage long compat_sys_mount(const char __user *dev_name,
- const char __user *dir_name,
- const char __user *type, compat_ulong_t flags,
- const void __user *data);
-asmlinkage long compat_sys_old_readdir(unsigned int fd,
- struct compat_old_linux_dirent __user *,
- unsigned int count);
+asmlinkage long compat_sys_truncate(const char __user *, compat_off_t);
+asmlinkage long compat_sys_ftruncate(unsigned int, compat_ulong_t);
+/* No generic prototype for truncate64, ftruncate64, fallocate */
+asmlinkage long compat_sys_openat(int dfd, const char __user *filename,
+ int flags, umode_t mode);
+
+/* fs/readdir.c */
asmlinkage long compat_sys_getdents(unsigned int fd,
struct compat_linux_dirent __user *dirent,
unsigned int count);
-asmlinkage long compat_sys_vmsplice(int fd, const struct compat_iovec __user *,
- unsigned int nr_segs, unsigned int flags);
-asmlinkage long compat_sys_open(const char __user *filename, int flags,
- umode_t mode);
-asmlinkage long compat_sys_openat(int dfd, const char __user *filename,
- int flags, umode_t mode);
-asmlinkage long compat_sys_open_by_handle_at(int mountdirfd,
- struct file_handle __user *handle,
- int flags);
-asmlinkage long compat_sys_truncate(const char __user *, compat_off_t);
-asmlinkage long compat_sys_ftruncate(unsigned int, compat_ulong_t);
-asmlinkage long compat_sys_pselect6(int n, compat_ulong_t __user *inp,
+
+/* fs/read_write.c */
+asmlinkage long compat_sys_lseek(unsigned int, compat_off_t, unsigned int);
+/* No generic prototype for pread64 and pwrite64 */
+asmlinkage ssize_t compat_sys_preadv(compat_ulong_t fd,
+ const struct iovec __user *vec,
+ compat_ulong_t vlen, u32 pos_low, u32 pos_high);
+asmlinkage ssize_t compat_sys_pwritev(compat_ulong_t fd,
+ const struct iovec __user *vec,
+ compat_ulong_t vlen, u32 pos_low, u32 pos_high);
+#ifdef __ARCH_WANT_COMPAT_SYS_PREADV64
+asmlinkage long compat_sys_preadv64(unsigned long fd,
+ const struct iovec __user *vec,
+ unsigned long vlen, loff_t pos);
+#endif
+
+#ifdef __ARCH_WANT_COMPAT_SYS_PWRITEV64
+asmlinkage long compat_sys_pwritev64(unsigned long fd,
+ const struct iovec __user *vec,
+ unsigned long vlen, loff_t pos);
+#endif
+
+/* fs/sendfile.c */
+asmlinkage long compat_sys_sendfile(int out_fd, int in_fd,
+ compat_off_t __user *offset, compat_size_t count);
+asmlinkage long compat_sys_sendfile64(int out_fd, int in_fd,
+ compat_loff_t __user *offset, compat_size_t count);
+
+/* fs/select.c */
+asmlinkage long compat_sys_pselect6_time32(int n, compat_ulong_t __user *inp,
compat_ulong_t __user *outp,
compat_ulong_t __user *exp,
- struct compat_timespec __user *tsp,
+ struct old_timespec32 __user *tsp,
void __user *sig);
-asmlinkage long compat_sys_ppoll(struct pollfd __user *ufds,
+asmlinkage long compat_sys_pselect6_time64(int n, compat_ulong_t __user *inp,
+ compat_ulong_t __user *outp,
+ compat_ulong_t __user *exp,
+ struct __kernel_timespec __user *tsp,
+ void __user *sig);
+asmlinkage long compat_sys_ppoll_time32(struct pollfd __user *ufds,
+ unsigned int nfds,
+ struct old_timespec32 __user *tsp,
+ const compat_sigset_t __user *sigmask,
+ compat_size_t sigsetsize);
+asmlinkage long compat_sys_ppoll_time64(struct pollfd __user *ufds,
unsigned int nfds,
- struct compat_timespec __user *tsp,
+ struct __kernel_timespec __user *tsp,
const compat_sigset_t __user *sigmask,
compat_size_t sigsetsize);
+
+/* fs/signalfd.c */
asmlinkage long compat_sys_signalfd4(int ufd,
const compat_sigset_t __user *sigmask,
compat_size_t sigsetsize, int flags);
-asmlinkage long compat_sys_get_mempolicy(int __user *policy,
- compat_ulong_t __user *nmask,
- compat_ulong_t maxnode,
- compat_ulong_t addr,
- compat_ulong_t flags);
-asmlinkage long compat_sys_set_mempolicy(int mode, compat_ulong_t __user *nmask,
- compat_ulong_t maxnode);
-asmlinkage long compat_sys_mbind(compat_ulong_t start, compat_ulong_t len,
- compat_ulong_t mode,
- compat_ulong_t __user *nmask,
- compat_ulong_t maxnode, compat_ulong_t flags);
-
-asmlinkage long compat_sys_setsockopt(int fd, int level, int optname,
- char __user *optval, unsigned int optlen);
-asmlinkage long compat_sys_sendmsg(int fd, struct compat_msghdr __user *msg,
- unsigned flags);
-asmlinkage long compat_sys_sendmmsg(int fd, struct compat_mmsghdr __user *mmsg,
- unsigned vlen, unsigned int flags);
-asmlinkage long compat_sys_recvmsg(int fd, struct compat_msghdr __user *msg,
- unsigned int flags);
-asmlinkage long compat_sys_recv(int fd, void __user *buf, compat_size_t len,
- unsigned flags);
-asmlinkage long compat_sys_recvfrom(int fd, void __user *buf, compat_size_t len,
- unsigned flags, struct sockaddr __user *addr,
- int __user *addrlen);
-asmlinkage long compat_sys_recvmmsg(int fd, struct compat_mmsghdr __user *mmsg,
- unsigned vlen, unsigned int flags,
- struct compat_timespec __user *timeout);
-asmlinkage long compat_sys_nanosleep(struct compat_timespec __user *rqtp,
- struct compat_timespec __user *rmtp);
+
+/* fs/stat.c */
+asmlinkage long compat_sys_newfstatat(unsigned int dfd,
+ const char __user *filename,
+ struct compat_stat __user *statbuf,
+ int flag);
+asmlinkage long compat_sys_newfstat(unsigned int fd,
+ struct compat_stat __user *statbuf);
+
+/* fs/sync.c: No generic prototype for sync_file_range and sync_file_range2 */
+
+/* kernel/exit.c */
+asmlinkage long compat_sys_waitid(int, compat_pid_t,
+ struct compat_siginfo __user *, int,
+ struct compat_rusage __user *);
+
+
+
+/* kernel/futex.c */
+asmlinkage long
+compat_sys_set_robust_list(struct compat_robust_list_head __user *head,
+ compat_size_t len);
+asmlinkage long
+compat_sys_get_robust_list(int pid, compat_uptr_t __user *head_ptr,
+ compat_size_t __user *len_ptr);
+
+/* kernel/itimer.c */
asmlinkage long compat_sys_getitimer(int which,
- struct compat_itimerval __user *it);
+ struct old_itimerval32 __user *it);
asmlinkage long compat_sys_setitimer(int which,
- struct compat_itimerval __user *in,
- struct compat_itimerval __user *out);
-asmlinkage long compat_sys_times(struct compat_tms __user *tbuf);
-asmlinkage long compat_sys_setrlimit(unsigned int resource,
- struct compat_rlimit __user *rlim);
-asmlinkage long compat_sys_getrlimit(unsigned int resource,
- struct compat_rlimit __user *rlim);
-asmlinkage long compat_sys_getrusage(int who, struct compat_rusage __user *ru);
+ struct old_itimerval32 __user *in,
+ struct old_itimerval32 __user *out);
+
+/* kernel/kexec.c */
+asmlinkage long compat_sys_kexec_load(compat_ulong_t entry,
+ compat_ulong_t nr_segments,
+ struct compat_kexec_segment __user *,
+ compat_ulong_t flags);
+
+/* kernel/posix-timers.c */
+asmlinkage long compat_sys_timer_create(clockid_t which_clock,
+ struct compat_sigevent __user *timer_event_spec,
+ timer_t __user *created_timer_id);
+
+/* kernel/ptrace.c */
+asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid,
+ compat_long_t addr, compat_long_t data);
+
+/* kernel/sched/core.c */
asmlinkage long compat_sys_sched_setaffinity(compat_pid_t pid,
unsigned int len,
compat_ulong_t __user *user_mask_ptr);
asmlinkage long compat_sys_sched_getaffinity(compat_pid_t pid,
unsigned int len,
compat_ulong_t __user *user_mask_ptr);
-asmlinkage long compat_sys_timer_create(clockid_t which_clock,
- struct compat_sigevent __user *timer_event_spec,
- timer_t __user *created_timer_id);
-asmlinkage long compat_sys_timer_settime(timer_t timer_id, int flags,
- struct compat_itimerspec __user *new,
- struct compat_itimerspec __user *old);
-asmlinkage long compat_sys_timer_gettime(timer_t timer_id,
- struct compat_itimerspec __user *setting);
-asmlinkage long compat_sys_clock_settime(clockid_t which_clock,
- struct compat_timespec __user *tp);
-asmlinkage long compat_sys_clock_gettime(clockid_t which_clock,
- struct compat_timespec __user *tp);
-asmlinkage long compat_sys_clock_adjtime(clockid_t which_clock,
- struct compat_timex __user *tp);
-asmlinkage long compat_sys_clock_getres(clockid_t which_clock,
- struct compat_timespec __user *tp);
-asmlinkage long compat_sys_clock_nanosleep(clockid_t which_clock, int flags,
- struct compat_timespec __user *rqtp,
- struct compat_timespec __user *rmtp);
-asmlinkage long compat_sys_rt_sigtimedwait(compat_sigset_t __user *uthese,
- struct compat_siginfo __user *uinfo,
- struct compat_timespec __user *uts, compat_size_t sigsetsize);
+
+/* kernel/signal.c */
+asmlinkage long compat_sys_sigaltstack(const compat_stack_t __user *uss_ptr,
+ compat_stack_t __user *uoss_ptr);
asmlinkage long compat_sys_rt_sigsuspend(compat_sigset_t __user *unewset,
compat_size_t sigsetsize);
-asmlinkage long compat_sys_rt_sigprocmask(int how, compat_sigset_t __user *set,
- compat_sigset_t __user *oset,
- compat_size_t sigsetsize);
-asmlinkage long compat_sys_rt_sigpending(compat_sigset_t __user *uset,
- compat_size_t sigsetsize);
#ifndef CONFIG_ODD_RT_SIGACTION
asmlinkage long compat_sys_rt_sigaction(int,
const struct compat_sigaction __user *,
struct compat_sigaction __user *,
compat_size_t);
#endif
+asmlinkage long compat_sys_rt_sigprocmask(int how, compat_sigset_t __user *set,
+ compat_sigset_t __user *oset,
+ compat_size_t sigsetsize);
+asmlinkage long compat_sys_rt_sigpending(compat_sigset_t __user *uset,
+ compat_size_t sigsetsize);
+asmlinkage long compat_sys_rt_sigtimedwait_time32(compat_sigset_t __user *uthese,
+ struct compat_siginfo __user *uinfo,
+ struct old_timespec32 __user *uts, compat_size_t sigsetsize);
+asmlinkage long compat_sys_rt_sigtimedwait_time64(compat_sigset_t __user *uthese,
+ struct compat_siginfo __user *uinfo,
+ struct __kernel_timespec __user *uts, compat_size_t sigsetsize);
asmlinkage long compat_sys_rt_sigqueueinfo(compat_pid_t pid, int sig,
struct compat_siginfo __user *uinfo);
+/* No generic prototype for rt_sigreturn */
+
+/* kernel/sys.c */
+asmlinkage long compat_sys_times(struct compat_tms __user *tbuf);
+asmlinkage long compat_sys_getrlimit(unsigned int resource,
+ struct compat_rlimit __user *rlim);
+asmlinkage long compat_sys_setrlimit(unsigned int resource,
+ struct compat_rlimit __user *rlim);
+asmlinkage long compat_sys_getrusage(int who, struct compat_rusage __user *ru);
+
+/* kernel/time.c */
+asmlinkage long compat_sys_gettimeofday(struct old_timeval32 __user *tv,
+ struct timezone __user *tz);
+asmlinkage long compat_sys_settimeofday(struct old_timeval32 __user *tv,
+ struct timezone __user *tz);
+
+/* kernel/timer.c */
asmlinkage long compat_sys_sysinfo(struct compat_sysinfo __user *info);
-asmlinkage long compat_sys_ioctl(unsigned int fd, unsigned int cmd,
- compat_ulong_t arg);
-asmlinkage long compat_sys_futex(u32 __user *uaddr, int op, u32 val,
- struct compat_timespec __user *utime, u32 __user *uaddr2,
- u32 val3);
-asmlinkage long compat_sys_getsockopt(int fd, int level, int optname,
- char __user *optval, int __user *optlen);
-asmlinkage long compat_sys_kexec_load(compat_ulong_t entry,
- compat_ulong_t nr_segments,
- struct compat_kexec_segment __user *,
- compat_ulong_t flags);
-asmlinkage long compat_sys_mq_getsetattr(mqd_t mqdes,
- const struct compat_mq_attr __user *u_mqstat,
- struct compat_mq_attr __user *u_omqstat);
-asmlinkage long compat_sys_mq_notify(mqd_t mqdes,
- const struct compat_sigevent __user *u_notification);
+
+/* ipc/mqueue.c */
asmlinkage long compat_sys_mq_open(const char __user *u_name,
int oflag, compat_mode_t mode,
struct compat_mq_attr __user *u_attr);
-asmlinkage long compat_sys_mq_timedsend(mqd_t mqdes,
- const char __user *u_msg_ptr,
- compat_size_t msg_len, unsigned int msg_prio,
- const struct compat_timespec __user *u_abs_timeout);
-asmlinkage ssize_t compat_sys_mq_timedreceive(mqd_t mqdes,
- char __user *u_msg_ptr,
- compat_size_t msg_len, unsigned int __user *u_msg_prio,
- const struct compat_timespec __user *u_abs_timeout);
-asmlinkage long compat_sys_socketcall(int call, u32 __user *args);
-asmlinkage long compat_sys_sysctl(struct compat_sysctl_args __user *args);
+asmlinkage long compat_sys_mq_notify(mqd_t mqdes,
+ const struct compat_sigevent __user *u_notification);
+asmlinkage long compat_sys_mq_getsetattr(mqd_t mqdes,
+ const struct compat_mq_attr __user *u_mqstat,
+ struct compat_mq_attr __user *u_omqstat);
-extern ssize_t compat_rw_copy_check_uvector(int type,
- const struct compat_iovec __user *uvector,
- unsigned long nr_segs,
- unsigned long fast_segs, struct iovec *fast_pointer,
- struct iovec **ret_pointer);
+/* ipc/msg.c */
+asmlinkage long compat_sys_msgctl(int first, int second, void __user *uptr);
+asmlinkage long compat_sys_msgrcv(int msqid, compat_uptr_t msgp,
+ compat_ssize_t msgsz, compat_long_t msgtyp, int msgflg);
+asmlinkage long compat_sys_msgsnd(int msqid, compat_uptr_t msgp,
+ compat_ssize_t msgsz, int msgflg);
-extern void __user *compat_alloc_user_space(unsigned long len);
+/* ipc/sem.c */
+asmlinkage long compat_sys_semctl(int semid, int semnum, int cmd, int arg);
-asmlinkage ssize_t compat_sys_process_vm_readv(compat_pid_t pid,
- const struct compat_iovec __user *lvec,
- compat_ulong_t liovcnt, const struct compat_iovec __user *rvec,
- compat_ulong_t riovcnt, compat_ulong_t flags);
-asmlinkage ssize_t compat_sys_process_vm_writev(compat_pid_t pid,
- const struct compat_iovec __user *lvec,
- compat_ulong_t liovcnt, const struct compat_iovec __user *rvec,
- compat_ulong_t riovcnt, compat_ulong_t flags);
+/* ipc/shm.c */
+asmlinkage long compat_sys_shmctl(int first, int second, void __user *uptr);
+asmlinkage long compat_sys_shmat(int shmid, compat_uptr_t shmaddr, int shmflg);
-asmlinkage long compat_sys_sendfile(int out_fd, int in_fd,
- compat_off_t __user *offset, compat_size_t count);
-asmlinkage long compat_sys_sendfile64(int out_fd, int in_fd,
- compat_loff_t __user *offset, compat_size_t count);
-asmlinkage long compat_sys_sigaltstack(const compat_stack_t __user *uss_ptr,
- compat_stack_t __user *uoss_ptr);
+/* net/socket.c */
+asmlinkage long compat_sys_recvfrom(int fd, void __user *buf, compat_size_t len,
+ unsigned flags, struct sockaddr __user *addr,
+ int __user *addrlen);
+asmlinkage long compat_sys_sendmsg(int fd, struct compat_msghdr __user *msg,
+ unsigned flags);
+asmlinkage long compat_sys_recvmsg(int fd, struct compat_msghdr __user *msg,
+ unsigned int flags);
+/* mm/filemap.c: No generic prototype for readahead */
+
+/* security/keys/keyctl.c */
+asmlinkage long compat_sys_keyctl(u32 option,
+ u32 arg2, u32 arg3, u32 arg4, u32 arg5);
+
+/* arch/example/kernel/sys_example.c */
+asmlinkage long compat_sys_execve(const char __user *filename, const compat_uptr_t __user *argv,
+ const compat_uptr_t __user *envp);
+
+/* mm/fadvise.c: No generic prototype for fadvise64_64 */
+
+/* mm/, CONFIG_MMU only */
+asmlinkage long compat_sys_rt_tgsigqueueinfo(compat_pid_t tgid,
+ compat_pid_t pid, int sig,
+ struct compat_siginfo __user *uinfo);
+asmlinkage long compat_sys_recvmmsg_time64(int fd, struct compat_mmsghdr __user *mmsg,
+ unsigned vlen, unsigned int flags,
+ struct __kernel_timespec __user *timeout);
+asmlinkage long compat_sys_recvmmsg_time32(int fd, struct compat_mmsghdr __user *mmsg,
+ unsigned vlen, unsigned int flags,
+ struct old_timespec32 __user *timeout);
+asmlinkage long compat_sys_wait4(compat_pid_t pid,
+ compat_uint_t __user *stat_addr, int options,
+ struct compat_rusage __user *ru);
+asmlinkage long compat_sys_fanotify_mark(int, unsigned int, __u32, __u32,
+ int, const char __user *);
+asmlinkage long compat_sys_open_by_handle_at(int mountdirfd,
+ struct file_handle __user *handle,
+ int flags);
+asmlinkage long compat_sys_sendmmsg(int fd, struct compat_mmsghdr __user *mmsg,
+ unsigned vlen, unsigned int flags);
+asmlinkage long compat_sys_execveat(int dfd, const char __user *filename,
+ const compat_uptr_t __user *argv,
+ const compat_uptr_t __user *envp, int flags);
+asmlinkage ssize_t compat_sys_preadv2(compat_ulong_t fd,
+ const struct iovec __user *vec,
+ compat_ulong_t vlen, u32 pos_low, u32 pos_high, rwf_t flags);
+asmlinkage ssize_t compat_sys_pwritev2(compat_ulong_t fd,
+ const struct iovec __user *vec,
+ compat_ulong_t vlen, u32 pos_low, u32 pos_high, rwf_t flags);
+#ifdef __ARCH_WANT_COMPAT_SYS_PREADV64V2
+asmlinkage long compat_sys_preadv64v2(unsigned long fd,
+ const struct iovec __user *vec,
+ unsigned long vlen, loff_t pos, rwf_t flags);
+#endif
+
+#ifdef __ARCH_WANT_COMPAT_SYS_PWRITEV64V2
+asmlinkage long compat_sys_pwritev64v2(unsigned long fd,
+ const struct iovec __user *vec,
+ unsigned long vlen, loff_t pos, rwf_t flags);
+#endif
+
+
+/*
+ * Deprecated system calls which are still defined in
+ * include/uapi/asm-generic/unistd.h and wanted by >= 1 arch
+ */
+
+/* __ARCH_WANT_SYSCALL_NO_AT */
+asmlinkage long compat_sys_open(const char __user *filename, int flags,
+ umode_t mode);
+
+/* __ARCH_WANT_SYSCALL_NO_FLAGS */
+asmlinkage long compat_sys_signalfd(int ufd,
+ const compat_sigset_t __user *sigmask,
+ compat_size_t sigsetsize);
+
+/* __ARCH_WANT_SYSCALL_OFF_T */
+asmlinkage long compat_sys_newstat(const char __user *filename,
+ struct compat_stat __user *statbuf);
+asmlinkage long compat_sys_newlstat(const char __user *filename,
+ struct compat_stat __user *statbuf);
+
+/* __ARCH_WANT_SYSCALL_DEPRECATED */
+asmlinkage long compat_sys_select(int n, compat_ulong_t __user *inp,
+ compat_ulong_t __user *outp, compat_ulong_t __user *exp,
+ struct old_timeval32 __user *tvp);
+asmlinkage long compat_sys_ustat(unsigned dev, struct compat_ustat __user *u32);
+asmlinkage long compat_sys_recv(int fd, void __user *buf, compat_size_t len,
+ unsigned flags);
+
+/* obsolete: fs/readdir.c */
+asmlinkage long compat_sys_old_readdir(unsigned int fd,
+ struct compat_old_linux_dirent __user *,
+ unsigned int count);
+
+/* obsolete: fs/select.c */
+asmlinkage long compat_sys_old_select(struct compat_sel_arg_struct __user *arg);
+
+/* obsolete: ipc */
+asmlinkage long compat_sys_ipc(u32, int, int, u32, compat_uptr_t, u32);
+
+/* obsolete: kernel/signal.c */
#ifdef __ARCH_WANT_SYS_SIGPENDING
asmlinkage long compat_sys_sigpending(compat_old_sigset_t __user *set);
#endif
@@ -719,60 +916,137 @@ asmlinkage long compat_sys_sigpending(compat_old_sigset_t __user *set);
asmlinkage long compat_sys_sigprocmask(int how, compat_old_sigset_t __user *nset,
compat_old_sigset_t __user *oset);
#endif
+#ifdef CONFIG_COMPAT_OLD_SIGACTION
+asmlinkage long compat_sys_sigaction(int sig,
+ const struct compat_old_sigaction __user *act,
+ struct compat_old_sigaction __user *oact);
+#endif
-int compat_restore_altstack(const compat_stack_t __user *uss);
-int __compat_save_altstack(compat_stack_t __user *, unsigned long);
-#define compat_save_altstack_ex(uss, sp) do { \
- compat_stack_t __user *__uss = uss; \
- struct task_struct *t = current; \
- put_user_ex(ptr_to_compat((void __user *)t->sas_ss_sp), &__uss->ss_sp); \
- put_user_ex(t->sas_ss_flags, &__uss->ss_flags); \
- put_user_ex(t->sas_ss_size, &__uss->ss_size); \
- if (t->sas_ss_flags & SS_AUTODISARM) \
- sas_ss_reset(t); \
-} while (0);
+/* obsolete: net/socket.c */
+asmlinkage long compat_sys_socketcall(int call, u32 __user *args);
-asmlinkage long compat_sys_sched_rr_get_interval(compat_pid_t pid,
- struct compat_timespec __user *interval);
+#ifdef __ARCH_WANT_COMPAT_TRUNCATE64
+asmlinkage long compat_sys_truncate64(const char __user *pathname, compat_arg_u64(len));
+#endif
-asmlinkage long compat_sys_fanotify_mark(int, unsigned int, __u32, __u32,
- int, const char __user *);
+#ifdef __ARCH_WANT_COMPAT_FTRUNCATE64
+asmlinkage long compat_sys_ftruncate64(unsigned int fd, compat_arg_u64(len));
+#endif
-asmlinkage long compat_sys_arch_prctl(int option, unsigned long arg2);
+#ifdef __ARCH_WANT_COMPAT_FALLOCATE
+asmlinkage long compat_sys_fallocate(int fd, int mode, compat_arg_u64(offset),
+ compat_arg_u64(len));
+#endif
-/*
- * For most but not all architectures, "am I in a compat syscall?" and
- * "am I a compat task?" are the same question. For architectures on which
- * they aren't the same question, arch code can override in_compat_syscall.
- */
+#ifdef __ARCH_WANT_COMPAT_PREAD64
+asmlinkage long compat_sys_pread64(unsigned int fd, char __user *buf, size_t count,
+ compat_arg_u64(pos));
+#endif
-#ifndef in_compat_syscall
-static inline bool in_compat_syscall(void) { return is_compat_task(); }
+#ifdef __ARCH_WANT_COMPAT_PWRITE64
+asmlinkage long compat_sys_pwrite64(unsigned int fd, const char __user *buf, size_t count,
+ compat_arg_u64(pos));
+#endif
+
+#ifdef __ARCH_WANT_COMPAT_SYNC_FILE_RANGE
+asmlinkage long compat_sys_sync_file_range(int fd, compat_arg_u64(pos),
+ compat_arg_u64(nbytes), unsigned int flags);
+#endif
+
+#ifdef __ARCH_WANT_COMPAT_FADVISE64_64
+asmlinkage long compat_sys_fadvise64_64(int fd, compat_arg_u64(pos),
+ compat_arg_u64(len), int advice);
+#endif
+
+#ifdef __ARCH_WANT_COMPAT_READAHEAD
+asmlinkage long compat_sys_readahead(int fd, compat_arg_u64(offset), size_t count);
#endif
+#endif /* CONFIG_ARCH_HAS_SYSCALL_WRAPPER */
+
/**
- * ns_to_compat_timeval - Compat version of ns_to_timeval
+ * ns_to_old_timeval32 - Compat version of ns_to_timeval
* @nsec: the nanoseconds value to be converted
*
- * Returns the compat_timeval representation of the nsec parameter.
+ * Returns the old_timeval32 representation of the nsec parameter.
*/
-static inline struct compat_timeval ns_to_compat_timeval(s64 nsec)
+static inline struct old_timeval32 ns_to_old_timeval32(s64 nsec)
{
- struct timeval tv;
- struct compat_timeval ctv;
+ struct __kernel_old_timeval tv;
+ struct old_timeval32 ctv;
- tv = ns_to_timeval(nsec);
+ tv = ns_to_kernel_old_timeval(nsec);
ctv.tv_sec = tv.tv_sec;
ctv.tv_usec = tv.tv_usec;
return ctv;
}
+/*
+ * Kernel code should not call compat syscalls (i.e., compat_sys_xyzyyz())
+ * directly. Instead, use one of the functions which work equivalently, such
+ * as the kcompat_sys_xyzyyz() functions prototyped below.
+ */
+
+int kcompat_sys_statfs64(const char __user * pathname, compat_size_t sz,
+ struct compat_statfs64 __user * buf);
+int kcompat_sys_fstatfs64(unsigned int fd, compat_size_t sz,
+ struct compat_statfs64 __user * buf);
+
+#ifdef CONFIG_COMPAT
+
+/*
+ * For most but not all architectures, "am I in a compat syscall?" and
+ * "am I a compat task?" are the same question. For architectures on which
+ * they aren't the same question, arch code can override in_compat_syscall.
+ */
+#ifndef in_compat_syscall
+static inline bool in_compat_syscall(void) { return is_compat_task(); }
+#endif
+
#else /* !CONFIG_COMPAT */
#define is_compat_task() (0)
+/* Ensure no one redefines in_compat_syscall() under !CONFIG_COMPAT */
+#define in_compat_syscall in_compat_syscall
static inline bool in_compat_syscall(void) { return false; }
#endif /* CONFIG_COMPAT */
+#define BITS_PER_COMPAT_LONG (8*sizeof(compat_long_t))
+
+#define BITS_TO_COMPAT_LONGS(bits) DIV_ROUND_UP(bits, BITS_PER_COMPAT_LONG)
+
+long compat_get_bitmap(unsigned long *mask, const compat_ulong_t __user *umask,
+ unsigned long bitmap_size);
+long compat_put_bitmap(compat_ulong_t __user *umask, unsigned long *mask,
+ unsigned long bitmap_size);
+
+/*
+ * Some legacy ABIs like the i386 one use less than natural alignment for 64-bit
+ * types, and will need special compat treatment for that. Most architectures
+ * don't need that special handling even for compat syscalls.
+ */
+#ifndef compat_need_64bit_alignment_fixup
+#define compat_need_64bit_alignment_fixup() false
+#endif
+
+/*
+ * A pointer passed in from user mode. This should not
+ * be used for syscall parameters, just declare them
+ * as pointers because the syscall entry code will have
+ * appropriately converted them already.
+ */
+#ifndef compat_ptr
+static inline void __user *compat_ptr(compat_uptr_t uptr)
+{
+ return (void __user *)(unsigned long)uptr;
+}
+#endif
+
+static inline compat_uptr_t ptr_to_compat(void __user *uptr)
+{
+ return (u32)(unsigned long)uptr;
+}
+
#endif /* _LINUX_COMPAT_H */
diff --git a/include/linux/compiler-clang.h b/include/linux/compiler-clang.h
index de179993e039..6cfd6902bd5b 100644
--- a/include/linux/compiler-clang.h
+++ b/include/linux/compiler-clang.h
@@ -1,17 +1,120 @@
-#ifndef __LINUX_COMPILER_H
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __LINUX_COMPILER_TYPES_H
#error "Please don't include <linux/compiler-clang.h> directly, include <linux/compiler.h> instead."
#endif
-/* Some compiler specific definitions are overwritten here
- * for Clang compiler
- */
-
-#ifdef uninitialized_var
-#undef uninitialized_var
-#define uninitialized_var(x) x = *(&(x))
-#endif
+/* Compiler specific definitions for Clang compiler */
/* same as gcc, this was present in clang-2.6 so we can assume it works
* with any version that can compile the kernel
*/
#define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __COUNTER__)
+
+/* all clang versions usable with the kernel support KASAN ABI version 5 */
+#define KASAN_ABI_VERSION 5
+
+/*
+ * Note: Checking __has_feature(*_sanitizer) is only true if the feature is
+ * enabled. Therefore it is not required to additionally check defined(CONFIG_*)
+ * to avoid adding redundant attributes in other configurations.
+ */
+
+#if __has_feature(address_sanitizer) || __has_feature(hwaddress_sanitizer)
+/* Emulate GCC's __SANITIZE_ADDRESS__ flag */
+#define __SANITIZE_ADDRESS__
+#define __no_sanitize_address \
+ __attribute__((no_sanitize("address", "hwaddress")))
+#else
+#define __no_sanitize_address
+#endif
+
+#if __has_feature(thread_sanitizer)
+/* emulate gcc's __SANITIZE_THREAD__ flag */
+#define __SANITIZE_THREAD__
+#define __no_sanitize_thread \
+ __attribute__((no_sanitize("thread")))
+#else
+#define __no_sanitize_thread
+#endif
+
+#if defined(CONFIG_ARCH_USE_BUILTIN_BSWAP)
+#define __HAVE_BUILTIN_BSWAP32__
+#define __HAVE_BUILTIN_BSWAP64__
+#define __HAVE_BUILTIN_BSWAP16__
+#endif /* CONFIG_ARCH_USE_BUILTIN_BSWAP */
+
+#if __has_feature(undefined_behavior_sanitizer)
+/* GCC does not have __SANITIZE_UNDEFINED__ */
+#define __no_sanitize_undefined \
+ __attribute__((no_sanitize("undefined")))
+#else
+#define __no_sanitize_undefined
+#endif
+
+#if __has_feature(memory_sanitizer)
+#define __SANITIZE_MEMORY__
+/*
+ * Unlike other sanitizers, KMSAN still inserts code into functions marked with
+ * no_sanitize("kernel-memory"). Using disable_sanitizer_instrumentation
+ * provides the behavior consistent with other __no_sanitize_ attributes,
+ * guaranteeing that __no_sanitize_memory functions remain uninstrumented.
+ */
+#define __no_sanitize_memory __disable_sanitizer_instrumentation
+
+/*
+ * The __no_kmsan_checks attribute ensures that a function does not produce
+ * false positive reports by:
+ * - initializing all local variables and memory stores in this function;
+ * - skipping all shadow checks;
+ * - passing initialized arguments to this function's callees.
+ */
+#define __no_kmsan_checks __attribute__((no_sanitize("kernel-memory")))
+#else
+#define __no_sanitize_memory
+#define __no_kmsan_checks
+#endif
+
+/*
+ * Support for __has_feature(coverage_sanitizer) was added in Clang 13 together
+ * with no_sanitize("coverage"). Prior versions of Clang support coverage
+ * instrumentation, but cannot be queried for support by the preprocessor.
+ */
+#if __has_feature(coverage_sanitizer)
+#define __no_sanitize_coverage __attribute__((no_sanitize("coverage")))
+#else
+#define __no_sanitize_coverage
+#endif
+
+#if __has_feature(shadow_call_stack)
+# define __noscs __attribute__((__no_sanitize__("shadow-call-stack")))
+#endif
+
+#if __has_feature(kcfi)
+/* Disable CFI checking inside a function. */
+#define __nocfi __attribute__((__no_sanitize__("kcfi")))
+#endif
+
+/*
+ * Turn individual warnings and errors on and off locally, depending
+ * on version.
+ */
+#define __diag_clang(version, severity, s) \
+ __diag_clang_ ## version(__diag_clang_ ## severity s)
+
+/* Severity used in pragma directives */
+#define __diag_clang_ignore ignored
+#define __diag_clang_warn warning
+#define __diag_clang_error error
+
+#define __diag_str1(s) #s
+#define __diag_str(s) __diag_str1(s)
+#define __diag(s) _Pragma(__diag_str(clang diagnostic s))
+
+#if CONFIG_CLANG_VERSION >= 110000
+#define __diag_clang_11(s) __diag(s)
+#else
+#define __diag_clang_11(s)
+#endif
+
+#define __diag_ignore_all(option, comment) \
+ __diag_clang(11, ignore, option)
diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h
index cd4bbe8242bd..7af9e34ec261 100644
--- a/include/linux/compiler-gcc.h
+++ b/include/linux/compiler-gcc.h
@@ -1,4 +1,5 @@
-#ifndef __LINUX_COMPILER_H
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __LINUX_COMPILER_TYPES_H
#error "Please don't include <linux/compiler-gcc.h> directly, include <linux/compiler.h> instead."
#endif
@@ -9,25 +10,6 @@
+ __GNUC_MINOR__ * 100 \
+ __GNUC_PATCHLEVEL__)
-/* Optimization barrier */
-
-/* The "volatile" is due to gcc bugs */
-#define barrier() __asm__ __volatile__("": : :"memory")
-/*
- * This version is i.e. to prevent dead stores elimination on @ptr
- * where gcc and llvm may behave differently when otherwise using
- * normal barrier(): while gcc behavior gets along with a normal
- * barrier(), llvm needs an explicit input variable to be assumed
- * clobbered. The issue is as follows: while the inline asm might
- * access any memory it wants, the compiler could have fit all of
- * @ptr into memory registers instead, and since @ptr never escaped
- * from that, it proved that the inline asm wasn't touching any of
- * it. This version works well with both compilers, i.e. we're telling
- * the compiler that the inline asm absolutely may see the contents
- * of @ptr. See also: https://llvm.org/bugs/show_bug.cgi?id=15495
- */
-#define barrier_data(ptr) __asm__ __volatile__("": :"r"(ptr) :"memory")
-
/*
* This macro obfuscates arithmetic on a variable address so that gcc
* shouldn't recognize the original var, and make assumptions about it.
@@ -53,276 +35,115 @@
(typeof(ptr)) (__ptr + (off)); \
})
-/* Make the optimizer believe the variable can be manipulated arbitrarily. */
-#define OPTIMIZER_HIDE_VAR(var) \
- __asm__ ("" : "=r" (var) : "0" (var))
-
-#ifdef __CHECKER__
-#define __must_be_array(a) 0
-#else
-/* &a[0] degrades to a pointer: a different type from an array */
-#define __must_be_array(a) BUILD_BUG_ON_ZERO(__same_type((a), &(a)[0]))
-#endif
-
-/*
- * Force always-inline if the user requests it so via the .config,
- * or if gcc is too old.
- * GCC does not warn about unused static inline functions for
- * -Wunused-function. This turns out to avoid the need for complex #ifdef
- * directives. Suppress the warning in clang as well by using "unused"
- * function attribute, which is redundant but not harmful for gcc.
- */
-#if !defined(CONFIG_ARCH_SUPPORTS_OPTIMIZED_INLINING) || \
- !defined(CONFIG_OPTIMIZE_INLINING) || (__GNUC__ < 4)
-#define inline inline __attribute__((always_inline,unused)) notrace
-#define __inline__ __inline__ __attribute__((always_inline,unused)) notrace
-#define __inline __inline __attribute__((always_inline,unused)) notrace
-#else
-/* A lot of inline functions can cause havoc with function tracing */
-#define inline inline __attribute__((unused)) notrace
-#define __inline__ __inline__ __attribute__((unused)) notrace
-#define __inline __inline __attribute__((unused)) notrace
+#ifdef CONFIG_RETPOLINE
+#define __noretpoline __attribute__((__indirect_branch__("keep")))
#endif
-#define __always_inline inline __attribute__((always_inline))
-#define noinline __attribute__((noinline))
+#define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __COUNTER__)
-#define __deprecated __attribute__((deprecated))
-#define __packed __attribute__((packed))
-#define __weak __attribute__((weak))
-#define __alias(symbol) __attribute__((alias(#symbol)))
+#if defined(LATENT_ENTROPY_PLUGIN) && !defined(__CHECKER__)
+#define __latent_entropy __attribute__((latent_entropy))
+#endif
/*
- * it doesn't make sense on ARM (currently the only user of __naked)
- * to trace naked functions because then mcount is called without
- * stack and frame pointer being set up and there is no chance to
- * restore the lr register to the value before mcount was called.
+ * calling noreturn functions, __builtin_unreachable() and __builtin_trap()
+ * confuse the stack allocation in gcc, leading to overly large stack
+ * frames, see https://gcc.gnu.org/bugzilla/show_bug.cgi?id=82365
*
- * The asm() bodies of naked functions often depend on standard calling
- * conventions, therefore they must be noinline and noclone.
- *
- * GCC 4.[56] currently fail to enforce this, so we must do so ourselves.
- * See GCC PR44290.
+ * Adding an empty inline assembly before it works around the problem
*/
-#define __naked __attribute__((naked)) noinline __noclone notrace
-
-#define __noreturn __attribute__((noreturn))
+#define barrier_before_unreachable() asm volatile("")
/*
- * From the GCC manual:
- *
- * Many functions have no effects except the return value and their
- * return value depends only on the parameters and/or global
- * variables. Such a function can be subject to common subexpression
- * elimination and loop optimization just as an arithmetic operator
- * would be.
- * [...]
+ * Mark a position in code as unreachable. This can be used to
+ * suppress control flow warnings after asm blocks that transfer
+ * control elsewhere.
*/
-#define __pure __attribute__((pure))
-#define __aligned(x) __attribute__((aligned(x)))
-#define __aligned_largest __attribute__((aligned))
-#define __printf(a, b) __attribute__((format(printf, a, b)))
-#define __scanf(a, b) __attribute__((format(scanf, a, b)))
-#define __attribute_const__ __attribute__((__const__))
-#define __maybe_unused __attribute__((unused))
-#define __always_unused __attribute__((unused))
-#define __mode(x) __attribute__((mode(x)))
-
-/* gcc version specific checks */
+#define unreachable() \
+ do { \
+ annotate_unreachable(); \
+ barrier_before_unreachable(); \
+ __builtin_unreachable(); \
+ } while (0)
-#if GCC_VERSION < 30200
-# error Sorry, your compiler is too old - please upgrade it.
-#endif
+#if defined(CONFIG_ARCH_USE_BUILTIN_BSWAP)
+#define __HAVE_BUILTIN_BSWAP32__
+#define __HAVE_BUILTIN_BSWAP64__
+#define __HAVE_BUILTIN_BSWAP16__
+#endif /* CONFIG_ARCH_USE_BUILTIN_BSWAP */
-#if GCC_VERSION < 30300
-# define __used __attribute__((__unused__))
+#if GCC_VERSION >= 70000
+#define KASAN_ABI_VERSION 5
#else
-# define __used __attribute__((__used__))
-#endif
-
-#ifdef CONFIG_GCOV_KERNEL
-# if GCC_VERSION < 30400
-# error "GCOV profiling support for gcc versions below 3.4 not included"
-# endif /* __GNUC_MINOR__ */
-#endif /* CONFIG_GCOV_KERNEL */
-
-#if GCC_VERSION >= 30400
-#define __must_check __attribute__((warn_unused_result))
-#define __malloc __attribute__((__malloc__))
-#endif
-
-#if GCC_VERSION >= 40000
-
-/* GCC 4.1.[01] miscompiles __weak */
-#ifdef __KERNEL__
-# if GCC_VERSION >= 40100 && GCC_VERSION <= 40101
-# error Your version of gcc miscompiles the __weak directive
-# endif
+#define KASAN_ABI_VERSION 4
#endif
-#define __used __attribute__((__used__))
-#define __compiler_offsetof(a, b) \
- __builtin_offsetof(a, b)
-
-#if GCC_VERSION >= 40100
-# define __compiletime_object_size(obj) __builtin_object_size(obj, 0)
+#ifdef CONFIG_SHADOW_CALL_STACK
+#define __noscs __attribute__((__no_sanitize__("shadow-call-stack")))
#endif
-#if GCC_VERSION >= 40300
-/* Mark functions as cold. gcc will assume any path leading to a call
- * to them will be unlikely. This means a lot of manual unlikely()s
- * are unnecessary now for any paths leading to the usual suspects
- * like BUG(), printk(), panic() etc. [but let's keep them for now for
- * older compilers]
- *
- * Early snapshots of gcc 4.3 don't support this and we can't detect this
- * in the preprocessor, but we can live with this because they're unreleased.
- * Maketime probing would be overkill here.
- *
- * gcc also has a __attribute__((__hot__)) to move hot functions into
- * a special section, but I don't see any sense in this right now in
- * the kernel context
- */
-#define __cold __attribute__((__cold__))
-
-#define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __COUNTER__)
-
-#ifndef __CHECKER__
-# define __compiletime_warning(message) __attribute__((warning(message)))
-# define __compiletime_error(message) __attribute__((error(message)))
-#endif /* __CHECKER__ */
-#endif /* GCC_VERSION >= 40300 */
-
-#if GCC_VERSION >= 40500
+#define __no_sanitize_address __attribute__((__no_sanitize_address__))
-#ifndef __CHECKER__
-#ifdef LATENT_ENTROPY_PLUGIN
-#define __latent_entropy __attribute__((latent_entropy))
-#endif
-#endif
-
-#ifdef CONFIG_STACK_VALIDATION
-#define annotate_unreachable() ({ \
- asm("%c0:\t\n" \
- ".pushsection .discard.unreachable\t\n" \
- ".long %c0b - .\t\n" \
- ".popsection\t\n" : : "i" (__LINE__)); \
-})
+#if defined(__SANITIZE_THREAD__)
+#define __no_sanitize_thread __attribute__((__no_sanitize_thread__))
#else
-#define annotate_unreachable()
+#define __no_sanitize_thread
#endif
-/*
- * Mark a position in code as unreachable. This can be used to
- * suppress control flow warnings after asm blocks that transfer
- * control elsewhere.
- *
- * Early snapshots of gcc 4.5 don't support this and we can't detect
- * this in the preprocessor, but we can live with this because they're
- * unreleased. Really, we need to have autoconf for the kernel.
- */
-#define unreachable() \
- do { annotate_unreachable(); __builtin_unreachable(); } while (0)
-
-/* Mark a function definition as prohibited from being cloned. */
-#define __noclone __attribute__((__noclone__, __optimize__("no-tracer")))
-
-#ifdef RANDSTRUCT_PLUGIN
-#define __randomize_layout __attribute__((randomize_layout))
-#define __no_randomize_layout __attribute__((no_randomize_layout))
-#endif
-
-#endif /* GCC_VERSION >= 40500 */
+#define __no_sanitize_undefined __attribute__((__no_sanitize_undefined__))
-#if GCC_VERSION >= 40600
/*
- * When used with Link Time Optimization, gcc can optimize away C functions or
- * variables which are referenced only from assembly code. __visible tells the
- * optimizer that something else uses this function or variable, thus preventing
- * this.
+ * Only supported since gcc >= 12
*/
-#define __visible __attribute__((externally_visible))
+#if defined(CONFIG_KCOV) && __has_attribute(__no_sanitize_coverage__)
+#define __no_sanitize_coverage __attribute__((__no_sanitize_coverage__))
+#else
+#define __no_sanitize_coverage
#endif
-
-#if GCC_VERSION >= 40900 && !defined(__CHECKER__)
/*
- * __assume_aligned(n, k): Tell the optimizer that the returned
- * pointer can be assumed to be k modulo n. The second argument is
- * optional (default 0), so we use a variadic macro to make the
- * shorthand.
- *
- * Beware: Do not apply this to functions which may return
- * ERR_PTRs. Also, it is probably unwise to apply it to functions
- * returning extra information in the low bits (but in that case the
- * compiler should see some alignment anyway, when the return value is
- * massaged by 'flags = ptr & 3; ptr &= ~3;').
+ * Treat __SANITIZE_HWADDRESS__ the same as __SANITIZE_ADDRESS__ in the kernel,
+ * matching the defines used by Clang.
*/
-#define __assume_aligned(a, ...) __attribute__((__assume_aligned__(a, ## __VA_ARGS__)))
+#ifdef __SANITIZE_HWADDRESS__
+#define __SANITIZE_ADDRESS__
#endif
/*
- * GCC 'asm goto' miscompiles certain code sequences:
- *
- * http://gcc.gnu.org/bugzilla/show_bug.cgi?id=58670
- *
- * Work it around via a compiler barrier quirk suggested by Jakub Jelinek.
- *
- * (asm goto is automatically volatile - the naming reflects this.)
- */
-#define asm_volatile_goto(x...) do { asm goto(x); asm (""); } while (0)
-
-/*
- * sparse (__CHECKER__) pretends to be gcc, but can't do constant
- * folding in __builtin_bswap*() (yet), so don't set these for it.
+ * GCC does not support KMSAN.
*/
-#if defined(CONFIG_ARCH_USE_BUILTIN_BSWAP) && !defined(__CHECKER__)
-#if GCC_VERSION >= 40400
-#define __HAVE_BUILTIN_BSWAP32__
-#define __HAVE_BUILTIN_BSWAP64__
-#endif
-#if GCC_VERSION >= 40800
-#define __HAVE_BUILTIN_BSWAP16__
-#endif
-#endif /* CONFIG_ARCH_USE_BUILTIN_BSWAP && !__CHECKER__ */
+#define __no_sanitize_memory
+#define __no_kmsan_checks
-#if GCC_VERSION >= 70000
-#define KASAN_ABI_VERSION 5
-#elif GCC_VERSION >= 50000
-#define KASAN_ABI_VERSION 4
-#elif GCC_VERSION >= 40902
-#define KASAN_ABI_VERSION 3
-#endif
-
-#if GCC_VERSION >= 40902
/*
- * Tell the compiler that address safety instrumentation (KASAN)
- * should not be applied to that function.
- * Conflicts with inlining: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=67368
+ * Turn individual warnings and errors on and off locally, depending
+ * on version.
*/
-#define __no_sanitize_address __attribute__((no_sanitize_address))
-#endif
+#define __diag_GCC(version, severity, s) \
+ __diag_GCC_ ## version(__diag_GCC_ ## severity s)
-#if GCC_VERSION >= 50100
-/*
- * Mark structures as requiring designated initializers.
- * https://gcc.gnu.org/onlinedocs/gcc/Designated-Inits.html
- */
-#define __designated_init __attribute__((designated_init))
-#endif
+/* Severity used in pragma directives */
+#define __diag_GCC_ignore ignored
+#define __diag_GCC_warn warning
+#define __diag_GCC_error error
-#endif /* gcc version >= 40000 specific checks */
+#define __diag_str1(s) #s
+#define __diag_str(s) __diag_str1(s)
+#define __diag(s) _Pragma(__diag_str(GCC diagnostic s))
-#if !defined(__noclone)
-#define __noclone /* not needed */
+#if GCC_VERSION >= 80000
+#define __diag_GCC_8(s) __diag(s)
+#else
+#define __diag_GCC_8(s)
#endif
-#if !defined(__no_sanitize_address)
-#define __no_sanitize_address
-#endif
+#define __diag_ignore_all(option, comment) \
+ __diag_GCC(8, ignore, option)
/*
- * A trick to suppress uninitialized variable warning without generating any
- * code
+ * Prior to 9.1, -Wno-alloc-size-larger-than (and therefore the "alloc_size"
+ * attribute) do not work, and must be disabled.
*/
-#define uninitialized_var(x) x = x
+#if GCC_VERSION < 90100
+#undef __alloc_size__
+#endif
diff --git a/include/linux/compiler-intel.h b/include/linux/compiler-intel.h
deleted file mode 100644
index d4c71132d07f..000000000000
--- a/include/linux/compiler-intel.h
+++ /dev/null
@@ -1,45 +0,0 @@
-#ifndef __LINUX_COMPILER_H
-#error "Please don't include <linux/compiler-intel.h> directly, include <linux/compiler.h> instead."
-#endif
-
-#ifdef __ECC
-
-/* Some compiler specific definitions are overwritten here
- * for Intel ECC compiler
- */
-
-#include <asm/intrinsics.h>
-
-/* Intel ECC compiler doesn't support gcc specific asm stmts.
- * It uses intrinsics to do the equivalent things.
- */
-#undef barrier
-#undef barrier_data
-#undef RELOC_HIDE
-#undef OPTIMIZER_HIDE_VAR
-
-#define barrier() __memory_barrier()
-#define barrier_data(ptr) barrier()
-
-#define RELOC_HIDE(ptr, off) \
- ({ unsigned long __ptr; \
- __ptr = (unsigned long) (ptr); \
- (typeof(ptr)) (__ptr + (off)); })
-
-/* This should act as an optimization barrier on var.
- * Given that this compiler does not have inline assembly, a compiler barrier
- * is the best we can do.
- */
-#define OPTIMIZER_HIDE_VAR(var) barrier()
-
-/* Intel ECC compiler doesn't support __builtin_types_compatible_p() */
-#define __must_be_array(a) 0
-
-#endif
-
-#ifndef __HAVE_BUILTIN_BSWAP16__
-/* icc has this, but it's called _bswap16 */
-#define __HAVE_BUILTIN_BSWAP16__
-#define __builtin_bswap16 _bswap16
-#endif
-
diff --git a/include/linux/compiler-version.h b/include/linux/compiler-version.h
new file mode 100644
index 000000000000..573fa85b6c0c
--- /dev/null
+++ b/include/linux/compiler-version.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifdef __LINUX_COMPILER_VERSION_H
+#error "Please do not include <linux/compiler-version.h>. This is done by the build system."
+#endif
+#define __LINUX_COMPILER_VERSION_H
+
+/*
+ * This header exists to force full rebuild when the compiler is upgraded.
+ *
+ * When fixdep scans this, it will find this string "CONFIG_CC_VERSION_TEXT"
+ * and add dependency on include/config/CC_VERSION_TEXT, which is touched
+ * by Kconfig when the version string from the compiler changes.
+ */
diff --git a/include/linux/compiler.h b/include/linux/compiler.h
index 219f82f3ec1a..947a60b801db 100644
--- a/include/linux/compiler.h
+++ b/include/linux/compiler.h
@@ -1,111 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __LINUX_COMPILER_H
#define __LINUX_COMPILER_H
-#ifndef __ASSEMBLY__
+#include <linux/compiler_types.h>
-#ifdef __CHECKER__
-# define __user __attribute__((noderef, address_space(1)))
-# define __kernel __attribute__((address_space(0)))
-# define __safe __attribute__((safe))
-# define __force __attribute__((force))
-# define __nocast __attribute__((nocast))
-# define __iomem __attribute__((noderef, address_space(2)))
-# define __must_hold(x) __attribute__((context(x,1,1)))
-# define __acquires(x) __attribute__((context(x,0,1)))
-# define __releases(x) __attribute__((context(x,1,0)))
-# define __acquire(x) __context__(x,1)
-# define __release(x) __context__(x,-1)
-# define __cond_lock(x,c) ((c) ? ({ __acquire(x); 1; }) : 0)
-# define __percpu __attribute__((noderef, address_space(3)))
-# define __rcu __attribute__((noderef, address_space(4)))
-# define __private __attribute__((noderef))
-extern void __chk_user_ptr(const volatile void __user *);
-extern void __chk_io_ptr(const volatile void __iomem *);
-# define ACCESS_PRIVATE(p, member) (*((typeof((p)->member) __force *) &(p)->member))
-#else /* __CHECKER__ */
-# ifdef STRUCTLEAK_PLUGIN
-# define __user __attribute__((user))
-# else
-# define __user
-# endif
-# define __kernel
-# define __safe
-# define __force
-# define __nocast
-# define __iomem
-# define __chk_user_ptr(x) (void)0
-# define __chk_io_ptr(x) (void)0
-# define __builtin_warning(x, y...) (1)
-# define __must_hold(x)
-# define __acquires(x)
-# define __releases(x)
-# define __acquire(x) (void)0
-# define __release(x) (void)0
-# define __cond_lock(x,c) (c)
-# define __percpu
-# define __rcu
-# define __private
-# define ACCESS_PRIVATE(p, member) ((p)->member)
-#endif /* __CHECKER__ */
-
-/* Indirect macros required for expanded argument pasting, eg. __LINE__. */
-#define ___PASTE(a,b) a##b
-#define __PASTE(a,b) ___PASTE(a,b)
+#ifndef __ASSEMBLY__
#ifdef __KERNEL__
-#ifdef __GNUC__
-#include <linux/compiler-gcc.h>
-#endif
-
-#if defined(CC_USING_HOTPATCH) && !defined(__CHECKER__)
-#define notrace __attribute__((hotpatch(0,0)))
-#else
-#define notrace __attribute__((no_instrument_function))
-#endif
-
-/* Intel compiler defines __GNUC__. So we will overwrite implementations
- * coming from above header files here
- */
-#ifdef __INTEL_COMPILER
-# include <linux/compiler-intel.h>
-#endif
-
-/* Clang compiler defines __GNUC__. So we will overwrite implementations
- * coming from above header files here
- */
-#ifdef __clang__
-#include <linux/compiler-clang.h>
-#endif
-
-/*
- * Generic compiler-dependent macros required for kernel
- * build go below this comment. Actual compiler/compiler version
- * specific implementations come from the above header files
- */
-
-struct ftrace_branch_data {
- const char *func;
- const char *file;
- unsigned line;
- union {
- struct {
- unsigned long correct;
- unsigned long incorrect;
- };
- struct {
- unsigned long miss;
- unsigned long hit;
- };
- unsigned long miss_hit[2];
- };
-};
-
-struct ftrace_likely_data {
- struct ftrace_branch_data data;
- unsigned long constant;
-};
-
/*
* Note: DISABLE_BRANCH_PROFILING can be used by special lowlevel code
* to disable branch tracing on a per file basis.
@@ -119,10 +21,10 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val,
#define unlikely_notrace(x) __builtin_expect(!!(x), 0)
#define __branch_check__(x, expect, is_constant) ({ \
- int ______r; \
+ long ______r; \
static struct ftrace_likely_data \
- __attribute__((__aligned__(4))) \
- __attribute__((section("_ftrace_annotated_branch"))) \
+ __aligned(4) \
+ __section("_ftrace_annotated_branch") \
______f = { \
.data.func = __func__, \
.data.file = __FILE__, \
@@ -151,42 +53,91 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val,
* "Define 'is'", Bill Clinton
* "Define 'if'", Steven Rostedt
*/
-#define if(cond, ...) __trace_if( (cond , ## __VA_ARGS__) )
-#define __trace_if(cond) \
- if (__builtin_constant_p(!!(cond)) ? !!(cond) : \
- ({ \
- int ______r; \
- static struct ftrace_branch_data \
- __attribute__((__aligned__(4))) \
- __attribute__((section("_ftrace_branch"))) \
- ______f = { \
- .func = __func__, \
- .file = __FILE__, \
- .line = __LINE__, \
- }; \
- ______r = !!(cond); \
- ______f.miss_hit[______r]++; \
- ______r; \
- }))
+#define if(cond, ...) if ( __trace_if_var( !!(cond , ## __VA_ARGS__) ) )
+
+#define __trace_if_var(cond) (__builtin_constant_p(cond) ? (cond) : __trace_if_value(cond))
+
+#define __trace_if_value(cond) ({ \
+ static struct ftrace_branch_data \
+ __aligned(4) \
+ __section("_ftrace_branch") \
+ __if_trace = { \
+ .func = __func__, \
+ .file = __FILE__, \
+ .line = __LINE__, \
+ }; \
+ (cond) ? \
+ (__if_trace.miss_hit[1]++,1) : \
+ (__if_trace.miss_hit[0]++,0); \
+})
+
#endif /* CONFIG_PROFILE_ALL_BRANCHES */
#else
# define likely(x) __builtin_expect(!!(x), 1)
# define unlikely(x) __builtin_expect(!!(x), 0)
+# define likely_notrace(x) likely(x)
+# define unlikely_notrace(x) unlikely(x)
#endif
/* Optimization barrier */
#ifndef barrier
-# define barrier() __memory_barrier()
+/* The "volatile" is due to gcc bugs */
+# define barrier() __asm__ __volatile__("": : :"memory")
#endif
#ifndef barrier_data
-# define barrier_data(ptr) barrier()
+/*
+ * This version is i.e. to prevent dead stores elimination on @ptr
+ * where gcc and llvm may behave differently when otherwise using
+ * normal barrier(): while gcc behavior gets along with a normal
+ * barrier(), llvm needs an explicit input variable to be assumed
+ * clobbered. The issue is as follows: while the inline asm might
+ * access any memory it wants, the compiler could have fit all of
+ * @ptr into memory registers instead, and since @ptr never escaped
+ * from that, it proved that the inline asm wasn't touching any of
+ * it. This version works well with both compilers, i.e. we're telling
+ * the compiler that the inline asm absolutely may see the contents
+ * of @ptr. See also: https://llvm.org/bugs/show_bug.cgi?id=15495
+ */
+# define barrier_data(ptr) __asm__ __volatile__("": :"r"(ptr) :"memory")
+#endif
+
+/* workaround for GCC PR82365 if needed */
+#ifndef barrier_before_unreachable
+# define barrier_before_unreachable() do { } while (0)
#endif
/* Unreachable code */
+#ifdef CONFIG_OBJTOOL
+/*
+ * These macros help objtool understand GCC code flow for unreachable code.
+ * The __COUNTER__ based labels are a hack to make each instance of the macros
+ * unique, to convince GCC not to merge duplicate inline asm statements.
+ */
+#define __stringify_label(n) #n
+
+#define __annotate_unreachable(c) ({ \
+ asm volatile(__stringify_label(c) ":\n\t" \
+ ".pushsection .discard.unreachable\n\t" \
+ ".long " __stringify_label(c) "b - .\n\t" \
+ ".popsection\n\t" : : "i" (c)); \
+})
+#define annotate_unreachable() __annotate_unreachable(__COUNTER__)
+
+/* Annotate a C jump table to allow objtool to follow the code flow */
+#define __annotate_jump_table __section(".rodata..c_jump_table")
+
+#else /* !CONFIG_OBJTOOL */
+#define annotate_unreachable()
+#define __annotate_jump_table
+#endif /* CONFIG_OBJTOOL */
+
#ifndef unreachable
-# define unreachable() do { } while (1)
+# define unreachable() do { \
+ annotate_unreachable(); \
+ __builtin_unreachable(); \
+} while (0)
#endif
/*
@@ -208,7 +159,7 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val,
extern typeof(sym) sym; \
static const unsigned long __kentry_##sym \
__used \
- __attribute__((section("___kentry" "+" #sym ), used)) \
+ __attribute__((__section__("___kentry+" #sym))) \
= (unsigned long)&sym;
#endif
@@ -219,8 +170,12 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val,
(typeof(ptr)) (__ptr + (off)); })
#endif
+#define absolute_pointer(val) RELOC_HIDE((void *)(val), 0)
+
#ifndef OPTIMIZER_HIDE_VAR
-#define OPTIMIZER_HIDE_VAR(var) barrier()
+/* Make the optimizer believe the variable can be manipulated arbitrarily. */
+#define OPTIMIZER_HIDE_VAR(var) \
+ __asm__ ("" : "=r" (var) : "0" (var))
#endif
/* Not-quite-unique ID. */
@@ -228,361 +183,67 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val,
# define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __LINE__)
#endif
-#include <uapi/linux/types.h>
-
-#define __READ_ONCE_SIZE \
-({ \
- switch (size) { \
- case 1: *(__u8 *)res = *(volatile __u8 *)p; break; \
- case 2: *(__u16 *)res = *(volatile __u16 *)p; break; \
- case 4: *(__u32 *)res = *(volatile __u32 *)p; break; \
- case 8: *(__u64 *)res = *(volatile __u64 *)p; break; \
- default: \
- barrier(); \
- __builtin_memcpy((void *)res, (const void *)p, size); \
- barrier(); \
- } \
-})
-
-static __always_inline
-void __read_once_size(const volatile void *p, void *res, int size)
-{
- __READ_ONCE_SIZE;
-}
-
-#ifdef CONFIG_KASAN
-/*
- * This function is not 'inline' because __no_sanitize_address confilcts
- * with inlining. Attempt to inline it may cause a build failure.
- * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=67368
- * '__maybe_unused' allows us to avoid defined-but-not-used warnings.
- */
-static __no_sanitize_address __maybe_unused
-void __read_once_size_nocheck(const volatile void *p, void *res, int size)
-{
- __READ_ONCE_SIZE;
-}
-#else
-static __always_inline
-void __read_once_size_nocheck(const volatile void *p, void *res, int size)
-{
- __READ_ONCE_SIZE;
-}
-#endif
-
-static __always_inline void __write_once_size(volatile void *p, void *res, int size)
-{
- switch (size) {
- case 1: *(volatile __u8 *)p = *(__u8 *)res; break;
- case 2: *(volatile __u16 *)p = *(__u16 *)res; break;
- case 4: *(volatile __u32 *)p = *(__u32 *)res; break;
- case 8: *(volatile __u64 *)p = *(__u64 *)res; break;
- default:
- barrier();
- __builtin_memcpy((void *)p, (const void *)res, size);
- barrier();
- }
-}
-
-/*
- * Prevent the compiler from merging or refetching reads or writes. The
- * compiler is also forbidden from reordering successive instances of
- * READ_ONCE, WRITE_ONCE and ACCESS_ONCE (see below), but only when the
- * compiler is aware of some particular ordering. One way to make the
- * compiler aware of ordering is to put the two invocations of READ_ONCE,
- * WRITE_ONCE or ACCESS_ONCE() in different C statements.
+/**
+ * data_race - mark an expression as containing intentional data races
*
- * In contrast to ACCESS_ONCE these two macros will also work on aggregate
- * data types like structs or unions. If the size of the accessed data
- * type exceeds the word size of the machine (e.g., 32 bits or 64 bits)
- * READ_ONCE() and WRITE_ONCE() will fall back to memcpy(). There's at
- * least two memcpy()s: one for the __builtin_memcpy() and then one for
- * the macro doing the copy of variable - '__u' allocated on the stack.
+ * This data_race() macro is useful for situations in which data races
+ * should be forgiven. One example is diagnostic code that accesses
+ * shared variables but is not a part of the core synchronization design.
*
- * Their two major use cases are: (1) Mediating communication between
- * process-level code and irq/NMI handlers, all running on the same CPU,
- * and (2) Ensuring that the compiler does not fold, spindle, or otherwise
- * mutilate accesses that either do not require ordering or that interact
- * with an explicit memory barrier or atomic instruction that provides the
- * required ordering.
+ * This macro *does not* affect normal code generation, but is a hint
+ * to tooling that data races here are to be ignored.
*/
-
-#define __READ_ONCE(x, check) \
+#define data_race(expr) \
({ \
- union { typeof(x) __val; char __c[1]; } __u; \
- if (check) \
- __read_once_size(&(x), __u.__c, sizeof(x)); \
- else \
- __read_once_size_nocheck(&(x), __u.__c, sizeof(x)); \
- __u.__val; \
+ __unqual_scalar_typeof(({ expr; })) __v = ({ \
+ __kcsan_disable_current(); \
+ expr; \
+ }); \
+ __kcsan_enable_current(); \
+ __v; \
})
-#define READ_ONCE(x) __READ_ONCE(x, 1)
-
-/*
- * Use READ_ONCE_NOCHECK() instead of READ_ONCE() if you need
- * to hide memory access from KASAN.
- */
-#define READ_ONCE_NOCHECK(x) __READ_ONCE(x, 0)
-
-#define WRITE_ONCE(x, val) \
-({ \
- union { typeof(x) __val; char __c[1]; } __u = \
- { .__val = (__force typeof(x)) (val) }; \
- __write_once_size(&(x), __u.__c, sizeof(x)); \
- __u.__val; \
-})
-
-#endif /* __KERNEL__ */
-
-#endif /* __ASSEMBLY__ */
-
-#ifdef __KERNEL__
-/*
- * Allow us to mark functions as 'deprecated' and have gcc emit a nice
- * warning for each use, in hopes of speeding the functions removal.
- * Usage is:
- * int __deprecated foo(void)
- */
-#ifndef __deprecated
-# define __deprecated /* unimplemented */
-#endif
-
-#ifdef MODULE
-#define __deprecated_for_modules __deprecated
-#else
-#define __deprecated_for_modules
-#endif
-
-#ifndef __must_check
-#define __must_check
-#endif
-
-#ifndef CONFIG_ENABLE_MUST_CHECK
-#undef __must_check
-#define __must_check
-#endif
-#ifndef CONFIG_ENABLE_WARN_DEPRECATED
-#undef __deprecated
-#undef __deprecated_for_modules
-#define __deprecated
-#define __deprecated_for_modules
-#endif
-
-#ifndef __malloc
-#define __malloc
-#endif
-
-/*
- * Allow us to avoid 'defined but not used' warnings on functions and data,
- * as well as force them to be emitted to the assembly file.
- *
- * As of gcc 3.4, static functions that are not marked with attribute((used))
- * may be elided from the assembly file. As of gcc 3.4, static data not so
- * marked will not be elided, but this may change in a future gcc version.
- *
- * NOTE: Because distributions shipped with a backported unit-at-a-time
- * compiler in gcc 3.3, we must define __used to be __attribute__((used))
- * for gcc >=3.3 instead of 3.4.
- *
- * In prior versions of gcc, such functions and data would be emitted, but
- * would be warned about except with attribute((unused)).
- *
- * Mark functions that are referenced only in inline assembly as __used so
- * the code is emitted even though it appears to be unreferenced.
- */
-#ifndef __used
-# define __used /* unimplemented */
-#endif
-
-#ifndef __maybe_unused
-# define __maybe_unused /* unimplemented */
-#endif
-
-#ifndef __always_unused
-# define __always_unused /* unimplemented */
-#endif
-
-#ifndef noinline
-#define noinline
-#endif
-
-/*
- * Rather then using noinline to prevent stack consumption, use
- * noinline_for_stack instead. For documentation reasons.
- */
-#define noinline_for_stack noinline
-
-#ifndef __always_inline
-#define __always_inline inline
-#endif
#endif /* __KERNEL__ */
/*
- * From the GCC manual:
- *
- * Many functions do not examine any values except their arguments,
- * and have no effects except the return value. Basically this is
- * just slightly more strict class than the `pure' attribute above,
- * since function is not allowed to read global memory.
- *
- * Note that a function that has pointer arguments and examines the
- * data pointed to must _not_ be declared `const'. Likewise, a
- * function that calls a non-`const' function usually must not be
- * `const'. It does not make sense for a `const' function to return
- * `void'.
+ * Force the compiler to emit 'sym' as a symbol, so that we can reference
+ * it from inline assembler. Necessary in case 'sym' could be inlined
+ * otherwise, or eliminated entirely due to lack of references that are
+ * visible to the compiler.
*/
-#ifndef __attribute_const__
-# define __attribute_const__ /* unimplemented */
-#endif
-
-#ifndef __designated_init
-# define __designated_init
-#endif
-
-#ifndef __latent_entropy
-# define __latent_entropy
-#endif
-
-#ifndef __randomize_layout
-# define __randomize_layout __designated_init
-#endif
+#define ___ADDRESSABLE(sym, __attrs) \
+ static void * __used __attrs \
+ __UNIQUE_ID(__PASTE(__addressable_,sym)) = (void *)&sym;
+#define __ADDRESSABLE(sym) \
+ ___ADDRESSABLE(sym, __section(".discard.addressable"))
-#ifndef __no_randomize_layout
-# define __no_randomize_layout
-#endif
-
-/*
- * Tell gcc if a function is cold. The compiler will assume any path
- * directly leading to the call is unlikely.
- */
-
-#ifndef __cold
-#define __cold
-#endif
-
-/* Simple shorthand for a section definition */
-#ifndef __section
-# define __section(S) __attribute__ ((__section__(#S)))
-#endif
-
-#ifndef __visible
-#define __visible
-#endif
-
-/*
- * Assume alignment of return value.
+/**
+ * offset_to_ptr - convert a relative memory offset to an absolute pointer
+ * @off: the address of the 32-bit offset value
*/
-#ifndef __assume_aligned
-#define __assume_aligned(a, ...)
-#endif
-
+static inline void *offset_to_ptr(const int *off)
+{
+ return (void *)((unsigned long)off + *off);
+}
-/* Are two types/vars the same type (ignoring qualifiers)? */
-#ifndef __same_type
-# define __same_type(a, b) __builtin_types_compatible_p(typeof(a), typeof(b))
-#endif
+#endif /* __ASSEMBLY__ */
-/* Is this type a native word size -- useful for atomic operations */
-#ifndef __native_word
-# define __native_word(t) (sizeof(t) == sizeof(char) || sizeof(t) == sizeof(short) || sizeof(t) == sizeof(int) || sizeof(t) == sizeof(long))
-#endif
+/* &a[0] degrades to a pointer: a different type from an array */
+#define __must_be_array(a) BUILD_BUG_ON_ZERO(__same_type((a), &(a)[0]))
-/* Compile time object size, -1 for unknown */
-#ifndef __compiletime_object_size
-# define __compiletime_object_size(obj) -1
-#endif
-#ifndef __compiletime_warning
-# define __compiletime_warning(message)
-#endif
-#ifndef __compiletime_error
-# define __compiletime_error(message)
/*
- * Sparse complains of variable sized arrays due to the temporary variable in
- * __compiletime_assert. Unfortunately we can't just expand it out to make
- * sparse see a constant array size without breaking compiletime_assert on old
- * versions of GCC (e.g. 4.2.4), so hide the array from sparse altogether.
+ * Whether 'type' is a signed type or an unsigned type. Supports scalar types,
+ * bool and also pointer types.
*/
-# ifndef __CHECKER__
-# define __compiletime_error_fallback(condition) \
- do { ((void)sizeof(char[1 - 2 * condition])); } while (0)
-# endif
-#endif
-#ifndef __compiletime_error_fallback
-# define __compiletime_error_fallback(condition) do { } while (0)
-#endif
-
-#define __compiletime_assert(condition, msg, prefix, suffix) \
- do { \
- bool __cond = !(condition); \
- extern void prefix ## suffix(void) __compiletime_error(msg); \
- if (__cond) \
- prefix ## suffix(); \
- __compiletime_error_fallback(__cond); \
- } while (0)
-
-#define _compiletime_assert(condition, msg, prefix, suffix) \
- __compiletime_assert(condition, msg, prefix, suffix)
-
-/**
- * compiletime_assert - break build and emit msg if condition is false
- * @condition: a compile-time constant condition to check
- * @msg: a message to emit if condition is false
- *
- * In tradition of POSIX assert, this macro will break the build if the
- * supplied condition is *false*, emitting the supplied error message if the
- * compiler has support to do so.
- */
-#define compiletime_assert(condition, msg) \
- _compiletime_assert(condition, msg, __compiletime_assert_, __LINE__)
-
-#define compiletime_assert_atomic_type(t) \
- compiletime_assert(__native_word(t), \
- "Need native word sized stores/loads for atomicity.")
+#define is_signed_type(type) (((type)(-1)) < (__force type)1)
+#define is_unsigned_type(type) (!is_signed_type(type))
/*
- * Prevent the compiler from merging or refetching accesses. The compiler
- * is also forbidden from reordering successive instances of ACCESS_ONCE(),
- * but only when the compiler is aware of some particular ordering. One way
- * to make the compiler aware of ordering is to put the two invocations of
- * ACCESS_ONCE() in different C statements.
- *
- * ACCESS_ONCE will only work on scalar types. For union types, ACCESS_ONCE
- * on a union member will work as long as the size of the member matches the
- * size of the union and the size is smaller than word size.
- *
- * The major use cases of ACCESS_ONCE used to be (1) Mediating communication
- * between process-level code and irq/NMI handlers, all running on the same CPU,
- * and (2) Ensuring that the compiler does not fold, spindle, or otherwise
- * mutilate accesses that either do not require ordering or that interact
- * with an explicit memory barrier or atomic instruction that provides the
- * required ordering.
- *
- * If possible use READ_ONCE()/WRITE_ONCE() instead.
+ * This is needed in functions which generate the stack canary, see
+ * arch/x86/kernel/smpboot.c::start_secondary() for an example.
*/
-#define __ACCESS_ONCE(x) ({ \
- __maybe_unused typeof(x) __var = (__force typeof(x)) 0; \
- (volatile typeof(x) *)&(x); })
-#define ACCESS_ONCE(x) (*__ACCESS_ONCE(x))
+#define prevent_tail_call_optimization() mb()
-/**
- * lockless_dereference() - safely load a pointer for later dereference
- * @p: The pointer to load
- *
- * Similar to rcu_dereference(), but for situations where the pointed-to
- * object's lifetime is managed by something other than RCU. That
- * "something other" might be reference counting or simple immortality.
- *
- * The seemingly unused variable ___typecheck_p validates that @p is
- * indeed a pointer type by using a pointer to typeof(*p) as the type.
- * Taking a pointer to typeof(*p) again is needed in case p is void *.
- */
-#define lockless_dereference(p) \
-({ \
- typeof(p) _________p1 = READ_ONCE(p); \
- typeof(*(p)) *___typecheck_p __maybe_unused; \
- smp_read_barrier_depends(); /* Dependency order vs. p above. */ \
- (_________p1); \
-})
+#include <asm/rwonce.h>
#endif /* __LINUX_COMPILER_H */
diff --git a/include/linux/compiler_attributes.h b/include/linux/compiler_attributes.h
new file mode 100644
index 000000000000..e659cb6fded3
--- /dev/null
+++ b/include/linux/compiler_attributes.h
@@ -0,0 +1,369 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __LINUX_COMPILER_ATTRIBUTES_H
+#define __LINUX_COMPILER_ATTRIBUTES_H
+
+/*
+ * The attributes in this file are unconditionally defined and they directly
+ * map to compiler attribute(s), unless one of the compilers does not support
+ * the attribute. In that case, __has_attribute is used to check for support
+ * and the reason is stated in its comment ("Optional: ...").
+ *
+ * Any other "attributes" (i.e. those that depend on a configuration option,
+ * on a compiler, on an architecture, on plugins, on other attributes...)
+ * should be defined elsewhere (e.g. compiler_types.h or compiler-*.h).
+ * The intention is to keep this file as simple as possible, as well as
+ * compiler- and version-agnostic (e.g. avoiding GCC_VERSION checks).
+ *
+ * This file is meant to be sorted (by actual attribute name,
+ * not by #define identifier). Use the __attribute__((__name__)) syntax
+ * (i.e. with underscores) to avoid future collisions with other macros.
+ * Provide links to the documentation of each supported compiler, if it exists.
+ */
+
+/*
+ * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-alias-function-attribute
+ */
+#define __alias(symbol) __attribute__((__alias__(#symbol)))
+
+/*
+ * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-aligned-function-attribute
+ * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Type-Attributes.html#index-aligned-type-attribute
+ * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Variable-Attributes.html#index-aligned-variable-attribute
+ */
+#define __aligned(x) __attribute__((__aligned__(x)))
+#define __aligned_largest __attribute__((__aligned__))
+
+/*
+ * Note: do not use this directly. Instead, use __alloc_size() since it is conditionally
+ * available and includes other attributes. For GCC < 9.1, __alloc_size__ gets undefined
+ * in compiler-gcc.h, due to misbehaviors.
+ *
+ * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-alloc_005fsize-function-attribute
+ * clang: https://clang.llvm.org/docs/AttributeReference.html#alloc-size
+ */
+#define __alloc_size__(x, ...) __attribute__((__alloc_size__(x, ## __VA_ARGS__)))
+
+/*
+ * Note: users of __always_inline currently do not write "inline" themselves,
+ * which seems to be required by gcc to apply the attribute according
+ * to its docs (and also "warning: always_inline function might not be
+ * inlinable [-Wattributes]" is emitted).
+ *
+ * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-always_005finline-function-attribute
+ * clang: mentioned
+ */
+#define __always_inline inline __attribute__((__always_inline__))
+
+/*
+ * The second argument is optional (default 0), so we use a variadic macro
+ * to make the shorthand.
+ *
+ * Beware: Do not apply this to functions which may return
+ * ERR_PTRs. Also, it is probably unwise to apply it to functions
+ * returning extra information in the low bits (but in that case the
+ * compiler should see some alignment anyway, when the return value is
+ * massaged by 'flags = ptr & 3; ptr &= ~3;').
+ *
+ * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-assume_005faligned-function-attribute
+ * clang: https://clang.llvm.org/docs/AttributeReference.html#assume-aligned
+ */
+#define __assume_aligned(a, ...) __attribute__((__assume_aligned__(a, ## __VA_ARGS__)))
+
+/*
+ * Note the long name.
+ *
+ * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-const-function-attribute
+ */
+#define __attribute_const__ __attribute__((__const__))
+
+/*
+ * Optional: only supported since gcc >= 9
+ * Optional: not supported by clang
+ *
+ * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-copy-function-attribute
+ */
+#if __has_attribute(__copy__)
+# define __copy(symbol) __attribute__((__copy__(symbol)))
+#else
+# define __copy(symbol)
+#endif
+
+/*
+ * Optional: not supported by gcc
+ * Optional: only supported since clang >= 14.0
+ *
+ * clang: https://clang.llvm.org/docs/AttributeReference.html#diagnose_as_builtin
+ */
+#if __has_attribute(__diagnose_as_builtin__)
+# define __diagnose_as(builtin...) __attribute__((__diagnose_as_builtin__(builtin)))
+#else
+# define __diagnose_as(builtin...)
+#endif
+
+/*
+ * Don't. Just don't. See commit 771c035372a0 ("deprecate the '__deprecated'
+ * attribute warnings entirely and for good") for more information.
+ *
+ * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-deprecated-function-attribute
+ * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Type-Attributes.html#index-deprecated-type-attribute
+ * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Variable-Attributes.html#index-deprecated-variable-attribute
+ * gcc: https://gcc.gnu.org/onlinedocs/gcc/Enumerator-Attributes.html#index-deprecated-enumerator-attribute
+ * clang: https://clang.llvm.org/docs/AttributeReference.html#deprecated
+ */
+#define __deprecated
+
+/*
+ * Optional: not supported by clang
+ *
+ * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Type-Attributes.html#index-designated_005finit-type-attribute
+ */
+#if __has_attribute(__designated_init__)
+# define __designated_init __attribute__((__designated_init__))
+#else
+# define __designated_init
+#endif
+
+/*
+ * Optional: only supported since clang >= 14.0
+ *
+ * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-error-function-attribute
+ */
+#if __has_attribute(__error__)
+# define __compiletime_error(msg) __attribute__((__error__(msg)))
+#else
+# define __compiletime_error(msg)
+#endif
+
+/*
+ * Optional: not supported by clang
+ *
+ * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-externally_005fvisible-function-attribute
+ */
+#if __has_attribute(__externally_visible__)
+# define __visible __attribute__((__externally_visible__))
+#else
+# define __visible
+#endif
+
+/*
+ * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-format-function-attribute
+ * clang: https://clang.llvm.org/docs/AttributeReference.html#format
+ */
+#define __printf(a, b) __attribute__((__format__(printf, a, b)))
+#define __scanf(a, b) __attribute__((__format__(scanf, a, b)))
+
+/*
+ * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-gnu_005finline-function-attribute
+ * clang: https://clang.llvm.org/docs/AttributeReference.html#gnu-inline
+ */
+#define __gnu_inline __attribute__((__gnu_inline__))
+
+/*
+ * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-malloc-function-attribute
+ * clang: https://clang.llvm.org/docs/AttributeReference.html#malloc
+ */
+#define __malloc __attribute__((__malloc__))
+
+/*
+ * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Type-Attributes.html#index-mode-type-attribute
+ * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Variable-Attributes.html#index-mode-variable-attribute
+ */
+#define __mode(x) __attribute__((__mode__(x)))
+
+/*
+ * Optional: only supported since gcc >= 7
+ *
+ * gcc: https://gcc.gnu.org/onlinedocs/gcc/x86-Function-Attributes.html#index-no_005fcaller_005fsaved_005fregisters-function-attribute_002c-x86
+ * clang: https://clang.llvm.org/docs/AttributeReference.html#no-caller-saved-registers
+ */
+#if __has_attribute(__no_caller_saved_registers__)
+# define __no_caller_saved_registers __attribute__((__no_caller_saved_registers__))
+#else
+# define __no_caller_saved_registers
+#endif
+
+/*
+ * Optional: not supported by clang
+ *
+ * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-noclone-function-attribute
+ */
+#if __has_attribute(__noclone__)
+# define __noclone __attribute__((__noclone__))
+#else
+# define __noclone
+#endif
+
+/*
+ * Add the pseudo keyword 'fallthrough' so case statement blocks
+ * must end with any of these keywords:
+ * break;
+ * fallthrough;
+ * continue;
+ * goto <label>;
+ * return [expression];
+ *
+ * gcc: https://gcc.gnu.org/onlinedocs/gcc/Statement-Attributes.html#Statement-Attributes
+ */
+#if __has_attribute(__fallthrough__)
+# define fallthrough __attribute__((__fallthrough__))
+#else
+# define fallthrough do {} while (0) /* fallthrough */
+#endif
+
+/*
+ * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#Common-Function-Attributes
+ * clang: https://clang.llvm.org/docs/AttributeReference.html#flatten
+ */
+# define __flatten __attribute__((flatten))
+
+/*
+ * Note the missing underscores.
+ *
+ * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-noinline-function-attribute
+ * clang: mentioned
+ */
+#define noinline __attribute__((__noinline__))
+
+/*
+ * Optional: only supported since gcc >= 8
+ * Optional: not supported by clang
+ *
+ * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Variable-Attributes.html#index-nonstring-variable-attribute
+ */
+#if __has_attribute(__nonstring__)
+# define __nonstring __attribute__((__nonstring__))
+#else
+# define __nonstring
+#endif
+
+/*
+ * Optional: only supported since GCC >= 7.1, clang >= 13.0.
+ *
+ * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-no_005fprofile_005finstrument_005ffunction-function-attribute
+ * clang: https://clang.llvm.org/docs/AttributeReference.html#no-profile-instrument-function
+ */
+#if __has_attribute(__no_profile_instrument_function__)
+# define __no_profile __attribute__((__no_profile_instrument_function__))
+#else
+# define __no_profile
+#endif
+
+/*
+ * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-noreturn-function-attribute
+ * clang: https://clang.llvm.org/docs/AttributeReference.html#noreturn
+ * clang: https://clang.llvm.org/docs/AttributeReference.html#id1
+ */
+#define __noreturn __attribute__((__noreturn__))
+
+/*
+ * Optional: not supported by gcc.
+ *
+ * clang: https://clang.llvm.org/docs/AttributeReference.html#overloadable
+ */
+#if __has_attribute(__overloadable__)
+# define __overloadable __attribute__((__overloadable__))
+#else
+# define __overloadable
+#endif
+
+/*
+ * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Type-Attributes.html#index-packed-type-attribute
+ * clang: https://gcc.gnu.org/onlinedocs/gcc/Common-Variable-Attributes.html#index-packed-variable-attribute
+ */
+#define __packed __attribute__((__packed__))
+
+/*
+ * Note: the "type" argument should match any __builtin_object_size(p, type) usage.
+ *
+ * Optional: not supported by gcc.
+ *
+ * clang: https://clang.llvm.org/docs/AttributeReference.html#pass-object-size-pass-dynamic-object-size
+ */
+#if __has_attribute(__pass_dynamic_object_size__)
+# define __pass_dynamic_object_size(type) __attribute__((__pass_dynamic_object_size__(type)))
+#else
+# define __pass_dynamic_object_size(type)
+#endif
+#if __has_attribute(__pass_object_size__)
+# define __pass_object_size(type) __attribute__((__pass_object_size__(type)))
+#else
+# define __pass_object_size(type)
+#endif
+
+/*
+ * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-pure-function-attribute
+ */
+#define __pure __attribute__((__pure__))
+
+/*
+ * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-section-function-attribute
+ * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Variable-Attributes.html#index-section-variable-attribute
+ * clang: https://clang.llvm.org/docs/AttributeReference.html#section-declspec-allocate
+ */
+#define __section(section) __attribute__((__section__(section)))
+
+/*
+ * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-unused-function-attribute
+ * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Type-Attributes.html#index-unused-type-attribute
+ * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Variable-Attributes.html#index-unused-variable-attribute
+ * gcc: https://gcc.gnu.org/onlinedocs/gcc/Label-Attributes.html#index-unused-label-attribute
+ * clang: https://clang.llvm.org/docs/AttributeReference.html#maybe-unused-unused
+ */
+#define __always_unused __attribute__((__unused__))
+#define __maybe_unused __attribute__((__unused__))
+
+/*
+ * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-used-function-attribute
+ * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Variable-Attributes.html#index-used-variable-attribute
+ */
+#define __used __attribute__((__used__))
+
+/*
+ * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-warn_005funused_005fresult-function-attribute
+ * clang: https://clang.llvm.org/docs/AttributeReference.html#nodiscard-warn-unused-result
+ */
+#define __must_check __attribute__((__warn_unused_result__))
+
+/*
+ * Optional: only supported since clang >= 14.0
+ *
+ * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-warning-function-attribute
+ */
+#if __has_attribute(__warning__)
+# define __compiletime_warning(msg) __attribute__((__warning__(msg)))
+#else
+# define __compiletime_warning(msg)
+#endif
+
+/*
+ * Optional: only supported since clang >= 14.0
+ *
+ * clang: https://clang.llvm.org/docs/AttributeReference.html#disable-sanitizer-instrumentation
+ *
+ * disable_sanitizer_instrumentation is not always similar to
+ * no_sanitize((<sanitizer-name>)): the latter may still let specific sanitizers
+ * insert code into functions to prevent false positives. Unlike that,
+ * disable_sanitizer_instrumentation prevents all kinds of instrumentation to
+ * functions with the attribute.
+ */
+#if __has_attribute(disable_sanitizer_instrumentation)
+# define __disable_sanitizer_instrumentation \
+ __attribute__((disable_sanitizer_instrumentation))
+#else
+# define __disable_sanitizer_instrumentation
+#endif
+
+/*
+ * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-weak-function-attribute
+ * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Variable-Attributes.html#index-weak-variable-attribute
+ */
+#define __weak __attribute__((__weak__))
+
+/*
+ * Used by functions that use '__builtin_return_address'. These function
+ * don't want to be splited or made inline, which can make
+ * the '__builtin_return_address' get unexpected address.
+ */
+#define __fix_address noinline __noclone
+
+#endif /* __LINUX_COMPILER_ATTRIBUTES_H */
diff --git a/include/linux/compiler_types.h b/include/linux/compiler_types.h
new file mode 100644
index 000000000000..547ea1ff806e
--- /dev/null
+++ b/include/linux/compiler_types.h
@@ -0,0 +1,426 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __LINUX_COMPILER_TYPES_H
+#define __LINUX_COMPILER_TYPES_H
+
+#ifndef __ASSEMBLY__
+
+/*
+ * Skipped when running bindgen due to a libclang issue;
+ * see https://github.com/rust-lang/rust-bindgen/issues/2244.
+ */
+#if defined(CONFIG_DEBUG_INFO_BTF) && defined(CONFIG_PAHOLE_HAS_BTF_TAG) && \
+ __has_attribute(btf_type_tag) && !defined(__BINDGEN__)
+# define BTF_TYPE_TAG(value) __attribute__((btf_type_tag(#value)))
+#else
+# define BTF_TYPE_TAG(value) /* nothing */
+#endif
+
+/* sparse defines __CHECKER__; see Documentation/dev-tools/sparse.rst */
+#ifdef __CHECKER__
+/* address spaces */
+# define __kernel __attribute__((address_space(0)))
+# define __user __attribute__((noderef, address_space(__user)))
+# define __iomem __attribute__((noderef, address_space(__iomem)))
+# define __percpu __attribute__((noderef, address_space(__percpu)))
+# define __rcu __attribute__((noderef, address_space(__rcu)))
+static inline void __chk_user_ptr(const volatile void __user *ptr) { }
+static inline void __chk_io_ptr(const volatile void __iomem *ptr) { }
+/* context/locking */
+# define __must_hold(x) __attribute__((context(x,1,1)))
+# define __acquires(x) __attribute__((context(x,0,1)))
+# define __cond_acquires(x) __attribute__((context(x,0,-1)))
+# define __releases(x) __attribute__((context(x,1,0)))
+# define __acquire(x) __context__(x,1)
+# define __release(x) __context__(x,-1)
+# define __cond_lock(x,c) ((c) ? ({ __acquire(x); 1; }) : 0)
+/* other */
+# define __force __attribute__((force))
+# define __nocast __attribute__((nocast))
+# define __safe __attribute__((safe))
+# define __private __attribute__((noderef))
+# define ACCESS_PRIVATE(p, member) (*((typeof((p)->member) __force *) &(p)->member))
+#else /* __CHECKER__ */
+/* address spaces */
+# define __kernel
+# ifdef STRUCTLEAK_PLUGIN
+# define __user __attribute__((user))
+# else
+# define __user BTF_TYPE_TAG(user)
+# endif
+# define __iomem
+# define __percpu BTF_TYPE_TAG(percpu)
+# define __rcu BTF_TYPE_TAG(rcu)
+
+# define __chk_user_ptr(x) (void)0
+# define __chk_io_ptr(x) (void)0
+/* context/locking */
+# define __must_hold(x)
+# define __acquires(x)
+# define __cond_acquires(x)
+# define __releases(x)
+# define __acquire(x) (void)0
+# define __release(x) (void)0
+# define __cond_lock(x,c) (c)
+/* other */
+# define __force
+# define __nocast
+# define __safe
+# define __private
+# define ACCESS_PRIVATE(p, member) ((p)->member)
+# define __builtin_warning(x, y...) (1)
+#endif /* __CHECKER__ */
+
+/* Indirect macros required for expanded argument pasting, eg. __LINE__. */
+#define ___PASTE(a,b) a##b
+#define __PASTE(a,b) ___PASTE(a,b)
+
+#ifdef __KERNEL__
+
+/* Attributes */
+#include <linux/compiler_attributes.h>
+
+#if CONFIG_FUNCTION_ALIGNMENT > 0
+#define __function_aligned __aligned(CONFIG_FUNCTION_ALIGNMENT)
+#else
+#define __function_aligned
+#endif
+
+/*
+ * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-cold-function-attribute
+ * gcc: https://gcc.gnu.org/onlinedocs/gcc/Label-Attributes.html#index-cold-label-attribute
+ *
+ * When -falign-functions=N is in use, we must avoid the cold attribute as
+ * contemporary versions of GCC drop the alignment for cold functions. Worse,
+ * GCC can implicitly mark callees of cold functions as cold themselves, so
+ * it's not sufficient to add __function_aligned here as that will not ensure
+ * that callees are correctly aligned.
+ *
+ * See:
+ *
+ * https://lore.kernel.org/lkml/Y77%2FqVgvaJidFpYt@FVFF77S0Q05N
+ * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=88345#c9
+ */
+#if !defined(CONFIG_CC_IS_GCC) || (CONFIG_FUNCTION_ALIGNMENT == 0)
+#define __cold __attribute__((__cold__))
+#else
+#define __cold
+#endif
+
+/* Builtins */
+
+/*
+ * __has_builtin is supported on gcc >= 10, clang >= 3 and icc >= 21.
+ * In the meantime, to support gcc < 10, we implement __has_builtin
+ * by hand.
+ */
+#ifndef __has_builtin
+#define __has_builtin(x) (0)
+#endif
+
+/* Compiler specific macros. */
+#ifdef __clang__
+#include <linux/compiler-clang.h>
+#elif defined(__GNUC__)
+/* The above compilers also define __GNUC__, so order is important here. */
+#include <linux/compiler-gcc.h>
+#else
+#error "Unknown compiler"
+#endif
+
+/*
+ * Some architectures need to provide custom definitions of macros provided
+ * by linux/compiler-*.h, and can do so using asm/compiler.h. We include that
+ * conditionally rather than using an asm-generic wrapper in order to avoid
+ * build failures if any C compilation, which will include this file via an
+ * -include argument in c_flags, occurs prior to the asm-generic wrappers being
+ * generated.
+ */
+#ifdef CONFIG_HAVE_ARCH_COMPILER_H
+#include <asm/compiler.h>
+#endif
+
+struct ftrace_branch_data {
+ const char *func;
+ const char *file;
+ unsigned line;
+ union {
+ struct {
+ unsigned long correct;
+ unsigned long incorrect;
+ };
+ struct {
+ unsigned long miss;
+ unsigned long hit;
+ };
+ unsigned long miss_hit[2];
+ };
+};
+
+struct ftrace_likely_data {
+ struct ftrace_branch_data data;
+ unsigned long constant;
+};
+
+#if defined(CC_USING_HOTPATCH)
+#define notrace __attribute__((hotpatch(0, 0)))
+#elif defined(CC_USING_PATCHABLE_FUNCTION_ENTRY)
+#define notrace __attribute__((patchable_function_entry(0, 0)))
+#else
+#define notrace __attribute__((__no_instrument_function__))
+#endif
+
+/*
+ * it doesn't make sense on ARM (currently the only user of __naked)
+ * to trace naked functions because then mcount is called without
+ * stack and frame pointer being set up and there is no chance to
+ * restore the lr register to the value before mcount was called.
+ */
+#define __naked __attribute__((__naked__)) notrace
+
+/*
+ * Prefer gnu_inline, so that extern inline functions do not emit an
+ * externally visible function. This makes extern inline behave as per gnu89
+ * semantics rather than c99. This prevents multiple symbol definition errors
+ * of extern inline functions at link time.
+ * A lot of inline functions can cause havoc with function tracing.
+ */
+#define inline inline __gnu_inline __inline_maybe_unused notrace
+
+/*
+ * gcc provides both __inline__ and __inline as alternate spellings of
+ * the inline keyword, though the latter is undocumented. New kernel
+ * code should only use the inline spelling, but some existing code
+ * uses __inline__. Since we #define inline above, to ensure
+ * __inline__ has the same semantics, we need this #define.
+ *
+ * However, the spelling __inline is strictly reserved for referring
+ * to the bare keyword.
+ */
+#define __inline__ inline
+
+/*
+ * GCC does not warn about unused static inline functions for -Wunused-function.
+ * Suppress the warning in clang as well by using __maybe_unused, but enable it
+ * for W=1 build. This will allow clang to find unused functions. Remove the
+ * __inline_maybe_unused entirely after fixing most of -Wunused-function warnings.
+ */
+#ifdef KBUILD_EXTRA_WARN1
+#define __inline_maybe_unused
+#else
+#define __inline_maybe_unused __maybe_unused
+#endif
+
+/*
+ * Rather then using noinline to prevent stack consumption, use
+ * noinline_for_stack instead. For documentation reasons.
+ */
+#define noinline_for_stack noinline
+
+/*
+ * Sanitizer helper attributes: Because using __always_inline and
+ * __no_sanitize_* conflict, provide helper attributes that will either expand
+ * to __no_sanitize_* in compilation units where instrumentation is enabled
+ * (__SANITIZE_*__), or __always_inline in compilation units without
+ * instrumentation (__SANITIZE_*__ undefined).
+ */
+#ifdef __SANITIZE_ADDRESS__
+/*
+ * We can't declare function 'inline' because __no_sanitize_address conflicts
+ * with inlining. Attempt to inline it may cause a build failure.
+ * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=67368
+ * '__maybe_unused' allows us to avoid defined-but-not-used warnings.
+ */
+# define __no_kasan_or_inline __no_sanitize_address notrace __maybe_unused
+# define __no_sanitize_or_inline __no_kasan_or_inline
+#else
+# define __no_kasan_or_inline __always_inline
+#endif
+
+#ifdef __SANITIZE_THREAD__
+/*
+ * Clang still emits instrumentation for __tsan_func_{entry,exit}() and builtin
+ * atomics even with __no_sanitize_thread (to avoid false positives in userspace
+ * ThreadSanitizer). The kernel's requirements are stricter and we really do not
+ * want any instrumentation with __no_kcsan.
+ *
+ * Therefore we add __disable_sanitizer_instrumentation where available to
+ * disable all instrumentation. See Kconfig.kcsan where this is mandatory.
+ */
+# define __no_kcsan __no_sanitize_thread __disable_sanitizer_instrumentation
+# define __no_sanitize_or_inline __no_kcsan notrace __maybe_unused
+#else
+# define __no_kcsan
+#endif
+
+#ifndef __no_sanitize_or_inline
+#define __no_sanitize_or_inline __always_inline
+#endif
+
+/* Section for code which can't be instrumented at all */
+#define __noinstr_section(section) \
+ noinline notrace __attribute((__section__(section))) \
+ __no_kcsan __no_sanitize_address __no_profile __no_sanitize_coverage \
+ __no_sanitize_memory
+
+#define noinstr __noinstr_section(".noinstr.text")
+
+/*
+ * The __cpuidle section is used twofold:
+ *
+ * 1) the original use -- identifying if a CPU is 'stuck' in idle state based
+ * on it's instruction pointer. See cpu_in_idle().
+ *
+ * 2) supressing instrumentation around where cpuidle disables RCU; where the
+ * function isn't strictly required for #1, this is interchangeable with
+ * noinstr.
+ */
+#define __cpuidle __noinstr_section(".cpuidle.text")
+
+#endif /* __KERNEL__ */
+
+#endif /* __ASSEMBLY__ */
+
+/*
+ * The below symbols may be defined for one or more, but not ALL, of the above
+ * compilers. We don't consider that to be an error, so set them to nothing.
+ * For example, some of them are for compiler specific plugins.
+ */
+#ifndef __latent_entropy
+# define __latent_entropy
+#endif
+
+#if defined(RANDSTRUCT) && !defined(__CHECKER__)
+# define __randomize_layout __designated_init __attribute__((randomize_layout))
+# define __no_randomize_layout __attribute__((no_randomize_layout))
+/* This anon struct can add padding, so only enable it under randstruct. */
+# define randomized_struct_fields_start struct {
+# define randomized_struct_fields_end } __randomize_layout;
+#else
+# define __randomize_layout __designated_init
+# define __no_randomize_layout
+# define randomized_struct_fields_start
+# define randomized_struct_fields_end
+#endif
+
+#ifndef __noscs
+# define __noscs
+#endif
+
+#ifndef __nocfi
+# define __nocfi
+#endif
+
+/*
+ * Any place that could be marked with the "alloc_size" attribute is also
+ * a place to be marked with the "malloc" attribute, except those that may
+ * be performing a _reallocation_, as that may alias the existing pointer.
+ * For these, use __realloc_size().
+ */
+#ifdef __alloc_size__
+# define __alloc_size(x, ...) __alloc_size__(x, ## __VA_ARGS__) __malloc
+# define __realloc_size(x, ...) __alloc_size__(x, ## __VA_ARGS__)
+#else
+# define __alloc_size(x, ...) __malloc
+# define __realloc_size(x, ...)
+#endif
+
+#ifndef asm_volatile_goto
+#define asm_volatile_goto(x...) asm goto(x)
+#endif
+
+#ifdef CONFIG_CC_HAS_ASM_INLINE
+#define asm_inline asm __inline
+#else
+#define asm_inline asm
+#endif
+
+/* Are two types/vars the same type (ignoring qualifiers)? */
+#define __same_type(a, b) __builtin_types_compatible_p(typeof(a), typeof(b))
+
+/*
+ * __unqual_scalar_typeof(x) - Declare an unqualified scalar type, leaving
+ * non-scalar types unchanged.
+ */
+/*
+ * Prefer C11 _Generic for better compile-times and simpler code. Note: 'char'
+ * is not type-compatible with 'signed char', and we define a separate case.
+ */
+#define __scalar_type_to_expr_cases(type) \
+ unsigned type: (unsigned type)0, \
+ signed type: (signed type)0
+
+#define __unqual_scalar_typeof(x) typeof( \
+ _Generic((x), \
+ char: (char)0, \
+ __scalar_type_to_expr_cases(char), \
+ __scalar_type_to_expr_cases(short), \
+ __scalar_type_to_expr_cases(int), \
+ __scalar_type_to_expr_cases(long), \
+ __scalar_type_to_expr_cases(long long), \
+ default: (x)))
+
+/* Is this type a native word size -- useful for atomic operations */
+#define __native_word(t) \
+ (sizeof(t) == sizeof(char) || sizeof(t) == sizeof(short) || \
+ sizeof(t) == sizeof(int) || sizeof(t) == sizeof(long))
+
+#ifdef __OPTIMIZE__
+# define __compiletime_assert(condition, msg, prefix, suffix) \
+ do { \
+ /* \
+ * __noreturn is needed to give the compiler enough \
+ * information to avoid certain possibly-uninitialized \
+ * warnings (regardless of the build failing). \
+ */ \
+ __noreturn extern void prefix ## suffix(void) \
+ __compiletime_error(msg); \
+ if (!(condition)) \
+ prefix ## suffix(); \
+ } while (0)
+#else
+# define __compiletime_assert(condition, msg, prefix, suffix) do { } while (0)
+#endif
+
+#define _compiletime_assert(condition, msg, prefix, suffix) \
+ __compiletime_assert(condition, msg, prefix, suffix)
+
+/**
+ * compiletime_assert - break build and emit msg if condition is false
+ * @condition: a compile-time constant condition to check
+ * @msg: a message to emit if condition is false
+ *
+ * In tradition of POSIX assert, this macro will break the build if the
+ * supplied condition is *false*, emitting the supplied error message if the
+ * compiler has support to do so.
+ */
+#define compiletime_assert(condition, msg) \
+ _compiletime_assert(condition, msg, __compiletime_assert_, __COUNTER__)
+
+#define compiletime_assert_atomic_type(t) \
+ compiletime_assert(__native_word(t), \
+ "Need native word sized stores/loads for atomicity.")
+
+/* Helpers for emitting diagnostics in pragmas. */
+#ifndef __diag
+#define __diag(string)
+#endif
+
+#ifndef __diag_GCC
+#define __diag_GCC(version, severity, string)
+#endif
+
+#define __diag_push() __diag(push)
+#define __diag_pop() __diag(pop)
+
+#define __diag_ignore(compiler, version, option, comment) \
+ __diag_ ## compiler(version, ignore, option)
+#define __diag_warn(compiler, version, option, comment) \
+ __diag_ ## compiler(version, warn, option)
+#define __diag_error(compiler, version, option, comment) \
+ __diag_ ## compiler(version, error, option)
+
+#ifndef __diag_ignore_all
+#define __diag_ignore_all(option, comment)
+#endif
+
+#endif /* __LINUX_COMPILER_TYPES_H */
diff --git a/include/linux/completion.h b/include/linux/completion.h
index 5d5aaae3af43..62b32b19e0a8 100644
--- a/include/linux/completion.h
+++ b/include/linux/completion.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __LINUX_COMPLETION_H
#define __LINUX_COMPLETION_H
@@ -8,7 +9,7 @@
* See kernel/sched/completion.c for details.
*/
-#include <linux/wait.h>
+#include <linux/swait.h>
/*
* struct completion - structure used to maintain state for a "completion"
@@ -24,14 +25,21 @@
*/
struct completion {
unsigned int done;
- wait_queue_head_t wait;
+ struct swait_queue_head wait;
};
+#define init_completion_map(x, m) init_completion(x)
+static inline void complete_acquire(struct completion *x) {}
+static inline void complete_release(struct completion *x) {}
+
#define COMPLETION_INITIALIZER(work) \
- { 0, __WAIT_QUEUE_HEAD_INITIALIZER((work).wait) }
+ { 0, __SWAIT_QUEUE_HEAD_INITIALIZER((work).wait) }
+
+#define COMPLETION_INITIALIZER_ONSTACK_MAP(work, map) \
+ (*({ init_completion_map(&(work), &(map)); &(work); }))
#define COMPLETION_INITIALIZER_ONSTACK(work) \
- ({ init_completion(&work); work; })
+ (*({ init_completion(&work); &work; }))
/**
* DECLARE_COMPLETION - declare and initialize a completion structure
@@ -59,8 +67,11 @@ struct completion {
#ifdef CONFIG_LOCKDEP
# define DECLARE_COMPLETION_ONSTACK(work) \
struct completion work = COMPLETION_INITIALIZER_ONSTACK(work)
+# define DECLARE_COMPLETION_ONSTACK_MAP(work, map) \
+ struct completion work = COMPLETION_INITIALIZER_ONSTACK_MAP(work, map)
#else
# define DECLARE_COMPLETION_ONSTACK(work) DECLARE_COMPLETION(work)
+# define DECLARE_COMPLETION_ONSTACK_MAP(work, map) DECLARE_COMPLETION(work)
#endif
/**
@@ -73,7 +84,7 @@ struct completion {
static inline void init_completion(struct completion *x)
{
x->done = 0;
- init_waitqueue_head(&x->wait);
+ init_swait_queue_head(&x->wait);
}
/**
@@ -92,6 +103,7 @@ extern void wait_for_completion(struct completion *);
extern void wait_for_completion_io(struct completion *);
extern int wait_for_completion_interruptible(struct completion *x);
extern int wait_for_completion_killable(struct completion *x);
+extern int wait_for_completion_state(struct completion *x, unsigned int state);
extern unsigned long wait_for_completion_timeout(struct completion *x,
unsigned long timeout);
extern unsigned long wait_for_completion_io_timeout(struct completion *x,
diff --git a/include/linux/component.h b/include/linux/component.h
index a559eebc0e0f..df4aa75c9e7c 100644
--- a/include/linux/component.h
+++ b/include/linux/component.h
@@ -1,30 +1,93 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef COMPONENT_H
#define COMPONENT_H
#include <linux/stddef.h>
+
struct device;
+/**
+ * struct component_ops - callbacks for component drivers
+ *
+ * Components are registered with component_add() and unregistered with
+ * component_del().
+ */
struct component_ops {
+ /**
+ * @bind:
+ *
+ * Called through component_bind_all() when the aggregate driver is
+ * ready to bind the overall driver.
+ */
int (*bind)(struct device *comp, struct device *master,
void *master_data);
+ /**
+ * @unbind:
+ *
+ * Called through component_unbind_all() when the aggregate driver is
+ * ready to bind the overall driver, or when component_bind_all() fails
+ * part-ways through and needs to unbind some already bound components.
+ */
void (*unbind)(struct device *comp, struct device *master,
void *master_data);
};
int component_add(struct device *, const struct component_ops *);
+int component_add_typed(struct device *dev, const struct component_ops *ops,
+ int subcomponent);
void component_del(struct device *, const struct component_ops *);
-int component_bind_all(struct device *master, void *master_data);
-void component_unbind_all(struct device *master, void *master_data);
+int component_bind_all(struct device *parent, void *data);
+void component_unbind_all(struct device *parent, void *data);
-struct master;
+struct aggregate_device;
+/**
+ * struct component_master_ops - callback for the aggregate driver
+ *
+ * Aggregate drivers are registered with component_master_add_with_match() and
+ * unregistered with component_master_del().
+ */
struct component_master_ops {
+ /**
+ * @bind:
+ *
+ * Called when all components or the aggregate driver, as specified in
+ * the match list passed to component_master_add_with_match(), are
+ * ready. Usually there are 3 steps to bind an aggregate driver:
+ *
+ * 1. Allocate a structure for the aggregate driver.
+ *
+ * 2. Bind all components to the aggregate driver by calling
+ * component_bind_all() with the aggregate driver structure as opaque
+ * pointer data.
+ *
+ * 3. Register the aggregate driver with the subsystem to publish its
+ * interfaces.
+ *
+ * Note that the lifetime of the aggregate driver does not align with
+ * any of the underlying &struct device instances. Therefore devm cannot
+ * be used and all resources acquired or allocated in this callback must
+ * be explicitly released in the @unbind callback.
+ */
int (*bind)(struct device *master);
+ /**
+ * @unbind:
+ *
+ * Called when either the aggregate driver, using
+ * component_master_del(), or one of its components, using
+ * component_del(), is unregistered.
+ */
void (*unbind)(struct device *master);
};
+/* A set helper functions for component compare/release */
+int component_compare_of(struct device *dev, void *data);
+void component_release_of(struct device *dev, void *data);
+int component_compare_dev(struct device *dev, void *data);
+int component_compare_dev_name(struct device *dev, void *data);
+
void component_master_del(struct device *,
const struct component_master_ops *);
@@ -32,16 +95,36 @@ struct component_match;
int component_master_add_with_match(struct device *,
const struct component_master_ops *, struct component_match *);
-void component_match_add_release(struct device *master,
+void component_match_add_release(struct device *parent,
struct component_match **matchptr,
void (*release)(struct device *, void *),
int (*compare)(struct device *, void *), void *compare_data);
+void component_match_add_typed(struct device *parent,
+ struct component_match **matchptr,
+ int (*compare_typed)(struct device *, int, void *), void *compare_data);
-static inline void component_match_add(struct device *master,
+/**
+ * component_match_add - add a component match entry
+ * @parent: device with the aggregate driver
+ * @matchptr: pointer to the list of component matches
+ * @compare: compare function to match against all components
+ * @compare_data: opaque pointer passed to the @compare function
+ *
+ * Adds a new component match to the list stored in @matchptr, which the @parent
+ * aggregate driver needs to function. The list of component matches pointed to
+ * by @matchptr must be initialized to NULL before adding the first match. This
+ * only matches against components added with component_add().
+ *
+ * The allocated match list in @matchptr is automatically released using devm
+ * actions.
+ *
+ * See also component_match_add_release() and component_match_add_typed().
+ */
+static inline void component_match_add(struct device *parent,
struct component_match **matchptr,
int (*compare)(struct device *, void *), void *compare_data)
{
- component_match_add_release(master, matchptr, NULL, compare,
+ component_match_add_release(parent, matchptr, NULL, compare,
compare_data);
}
diff --git a/include/linux/concap.h b/include/linux/concap.h
deleted file mode 100644
index 977acb3d1fb2..000000000000
--- a/include/linux/concap.h
+++ /dev/null
@@ -1,112 +0,0 @@
-/* $Id: concap.h,v 1.3.2.2 2004/01/12 23:08:35 keil Exp $
- *
- * Copyright 1997 by Henner Eisen <eis@baty.hanse.de>
- *
- * This software may be used and distributed according to the terms
- * of the GNU General Public License, incorporated herein by reference.
- */
-
-#ifndef _LINUX_CONCAP_H
-#define _LINUX_CONCAP_H
-
-#include <linux/skbuff.h>
-#include <linux/netdevice.h>
-
-/* Stuff to support encapsulation protocols genericly. The encapsulation
- protocol is processed at the uppermost layer of the network interface.
-
- Based on a ideas developed in a 'synchronous device' thread in the
- linux-x25 mailing list contributed by Alan Cox, Thomasz Motylewski
- and Jonathan Naylor.
-
- For more documetation on this refer to Documentation/isdn/README.concap
-*/
-
-struct concap_proto_ops;
-struct concap_device_ops;
-
-/* this manages all data needed by the encapsulation protocol
- */
-struct concap_proto{
- struct net_device *net_dev; /* net device using our service */
- struct concap_device_ops *dops; /* callbacks provided by device */
- struct concap_proto_ops *pops; /* callbacks provided by us */
- spinlock_t lock;
- int flags;
- void *proto_data; /* protocol specific private data, to
- be accessed via *pops methods only*/
- /*
- :
- whatever
- :
- */
-};
-
-/* Operations to be supported by the net device. Called by the encapsulation
- * protocol entity. No receive method is offered because the encapsulation
- * protocol directly calls netif_rx().
- */
-struct concap_device_ops{
-
- /* to request data is submitted by device*/
- int (*data_req)(struct concap_proto *, struct sk_buff *);
-
- /* Control methods must be set to NULL by devices which do not
- support connection control.*/
- /* to request a connection is set up */
- int (*connect_req)(struct concap_proto *);
-
- /* to request a connection is released */
- int (*disconn_req)(struct concap_proto *);
-};
-
-/* Operations to be supported by the encapsulation protocol. Called by
- * device driver.
- */
-struct concap_proto_ops{
-
- /* create a new encapsulation protocol instance of same type */
- struct concap_proto * (*proto_new) (void);
-
- /* delete encapsulation protocol instance and free all its resources.
- cprot may no loger be referenced after calling this */
- void (*proto_del)(struct concap_proto *cprot);
-
- /* initialize the protocol's data. To be called at interface startup
- or when the device driver resets the interface. All services of the
- encapsulation protocol may be used after this*/
- int (*restart)(struct concap_proto *cprot,
- struct net_device *ndev,
- struct concap_device_ops *dops);
-
- /* inactivate an encapsulation protocol instance. The encapsulation
- protocol may not call any *dops methods after this. */
- int (*close)(struct concap_proto *cprot);
-
- /* process a frame handed down to us by upper layer */
- int (*encap_and_xmit)(struct concap_proto *cprot, struct sk_buff *skb);
-
- /* to be called for each data entity received from lower layer*/
- int (*data_ind)(struct concap_proto *cprot, struct sk_buff *skb);
-
- /* to be called when a connection was set up/down.
- Protocols that don't process these primitives might fill in
- dummy methods here */
- int (*connect_ind)(struct concap_proto *cprot);
- int (*disconn_ind)(struct concap_proto *cprot);
- /*
- Some network device support functions, like net_header(), rebuild_header(),
- and others, that depend solely on the encapsulation protocol, might
- be provided here, too. The net device would just fill them in its
- corresponding fields when it is opened.
- */
-};
-
-/* dummy restart/close/connect/reset/disconn methods
- */
-extern int concap_nop(struct concap_proto *cprot);
-
-/* dummy submit method
- */
-extern int concap_drop_skb(struct concap_proto *cprot, struct sk_buff *skb);
-#endif
diff --git a/include/linux/configfs.h b/include/linux/configfs.h
index c96709049683..2606711adb18 100644
--- a/include/linux/configfs.h
+++ b/include/linux/configfs.h
@@ -1,23 +1,7 @@
-/* -*- mode: c; c-basic-offset: 8; -*-
- * vim: noexpandtab sw=8 ts=8 sts=0:
- *
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
* configfs.h - definitions for the device driver filesystem
*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public
- * License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public
- * License along with this program; if not, write to the
- * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
- * Boston, MA 021110-1307, USA.
- *
* Based on sysfs:
* sysfs is Copyright (C) 2001, 2002, 2003 Patrick Mochel
*
@@ -27,7 +11,7 @@
*
* configfs Copyright (C) 2005 Oracle. All rights reserved.
*
- * Please read Documentation/filesystems/configfs/configfs.txt before using
+ * Please read Documentation/filesystems/configfs.rst before using
* the configfs interface, ESPECIALLY the parts about reference counts and
* item destructors.
*/
@@ -58,7 +42,7 @@ struct config_item {
struct list_head ci_entry;
struct config_item *ci_parent;
struct config_group *ci_group;
- struct config_item_type *ci_type;
+ const struct config_item_type *ci_type;
struct dentry *ci_dentry;
};
@@ -72,7 +56,7 @@ static inline char *config_item_name(struct config_item * item)
extern void config_item_init_type_name(struct config_item *item,
const char *name,
- struct config_item_type *type);
+ const struct config_item_type *type);
extern struct config_item *config_item_get(struct config_item *);
extern struct config_item *config_item_get_unless_zero(struct config_item *);
@@ -101,7 +85,7 @@ struct config_group {
extern void config_group_init(struct config_group *group);
extern void config_group_init_type_name(struct config_group *group,
const char *name,
- struct config_item_type *type);
+ const struct config_item_type *type);
static inline struct config_group *to_config_group(struct config_item *item)
{
@@ -220,8 +204,6 @@ static struct configfs_bin_attribute _pfx##attr_##_name = { \
* group children. default_groups may coexist alongsize make_group() or
* make_item(), but if the group wishes to have only default_groups
* children (disallowing mkdir(2)), it need not provide either function.
- * If the group has commit(), it supports pending and committed (active)
- * items.
*/
struct configfs_item_operations {
void (*release)(struct config_item *);
@@ -232,7 +214,6 @@ struct configfs_item_operations {
struct configfs_group_operations {
struct config_item *(*make_item)(struct config_group *group, const char *name);
struct config_group *(*make_group)(struct config_group *group, const char *name);
- int (*commit_item)(struct config_item *item);
void (*disconnect_notify)(struct config_group *group, struct config_item *item);
void (*drop_item)(struct config_group *group, struct config_item *item);
};
@@ -261,7 +242,7 @@ void configfs_remove_default_groups(struct config_group *group);
struct config_group *
configfs_register_default_group(struct config_group *parent_group,
const char *name,
- struct config_item_type *item_type);
+ const struct config_item_type *item_type);
void configfs_unregister_default_group(struct config_group *group);
/* These functions can sleep and can alloc with GFP_KERNEL */
diff --git a/include/linux/connector.h b/include/linux/connector.h
index f8fe8637d771..487350bb19c3 100644
--- a/include/linux/connector.h
+++ b/include/linux/connector.h
@@ -1,28 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* connector.h
*
* 2004-2005 Copyright (c) Evgeniy Polyakov <zbr@ioremap.net>
* All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#ifndef __CONNECTOR_H
#define __CONNECTOR_H
-#include <linux/atomic.h>
+#include <linux/refcount.h>
#include <linux/list.h>
#include <linux/workqueue.h>
@@ -49,7 +36,7 @@ struct cn_callback_id {
struct cn_callback_entry {
struct list_head callback_entry;
- atomic_t refcnt;
+ refcount_t refcnt;
struct cn_queue_dev *pdev;
struct cn_callback_id id;
@@ -63,26 +50,86 @@ struct cn_dev {
u32 seq, groups;
struct sock *nls;
- void (*input) (struct sk_buff *skb);
struct cn_queue_dev *cbdev;
};
-int cn_add_callback(struct cb_id *id, const char *name,
+/**
+ * cn_add_callback() - Registers new callback with connector core.
+ *
+ * @id: unique connector's user identifier.
+ * It must be registered in connector.h for legal
+ * in-kernel users.
+ * @name: connector's callback symbolic name.
+ * @callback: connector's callback.
+ * parameters are %cn_msg and the sender's credentials
+ */
+int cn_add_callback(const struct cb_id *id, const char *name,
void (*callback)(struct cn_msg *, struct netlink_skb_parms *));
-void cn_del_callback(struct cb_id *);
+/**
+ * cn_del_callback() - Unregisters new callback with connector core.
+ *
+ * @id: unique connector's user identifier.
+ */
+void cn_del_callback(const struct cb_id *id);
+
+
+/**
+ * cn_netlink_send_mult - Sends message to the specified groups.
+ *
+ * @msg: message header(with attached data).
+ * @len: Number of @msg to be sent.
+ * @portid: destination port.
+ * If non-zero the message will be sent to the given port,
+ * which should be set to the original sender.
+ * @group: destination group.
+ * If @portid and @group is zero, then appropriate group will
+ * be searched through all registered connector users, and
+ * message will be delivered to the group which was created
+ * for user with the same ID as in @msg.
+ * If @group is not zero, then message will be delivered
+ * to the specified group.
+ * @gfp_mask: GFP mask.
+ *
+ * It can be safely called from softirq context, but may silently
+ * fail under strong memory pressure.
+ *
+ * If there are no listeners for given group %-ESRCH can be returned.
+ */
int cn_netlink_send_mult(struct cn_msg *msg, u16 len, u32 portid, u32 group, gfp_t gfp_mask);
+
+/**
+ * cn_netlink_send - Sends message to the specified groups.
+ *
+ * @msg: message header(with attached data).
+ * @portid: destination port.
+ * If non-zero the message will be sent to the given port,
+ * which should be set to the original sender.
+ * @group: destination group.
+ * If @portid and @group is zero, then appropriate group will
+ * be searched through all registered connector users, and
+ * message will be delivered to the group which was created
+ * for user with the same ID as in @msg.
+ * If @group is not zero, then message will be delivered
+ * to the specified group.
+ * @gfp_mask: GFP mask.
+ *
+ * It can be safely called from softirq context, but may silently
+ * fail under strong memory pressure.
+ *
+ * If there are no listeners for given group %-ESRCH can be returned.
+ */
int cn_netlink_send(struct cn_msg *msg, u32 portid, u32 group, gfp_t gfp_mask);
int cn_queue_add_callback(struct cn_queue_dev *dev, const char *name,
- struct cb_id *id,
+ const struct cb_id *id,
void (*callback)(struct cn_msg *, struct netlink_skb_parms *));
-void cn_queue_del_callback(struct cn_queue_dev *dev, struct cb_id *id);
+void cn_queue_del_callback(struct cn_queue_dev *dev, const struct cb_id *id);
void cn_queue_release_callback(struct cn_callback_entry *);
struct cn_queue_dev *cn_queue_alloc_dev(const char *name, struct sock *);
void cn_queue_free_dev(struct cn_queue_dev *dev);
-int cn_cb_equal(struct cb_id *, struct cb_id *);
+int cn_cb_equal(const struct cb_id *, const struct cb_id *);
#endif /* __CONNECTOR_H */
diff --git a/include/linux/console.h b/include/linux/console.h
index b8920a031a3e..d3195664baa5 100644
--- a/include/linux/console.h
+++ b/include/linux/console.h
@@ -14,6 +14,9 @@
#ifndef _LINUX_CONSOLE_H_
#define _LINUX_CONSOLE_H_ 1
+#include <linux/atomic.h>
+#include <linux/bits.h>
+#include <linux/rculist.h>
#include <linux/types.h>
struct vc_data;
@@ -21,18 +24,15 @@ struct console_font_op;
struct console_font;
struct module;
struct tty_struct;
-
-/*
- * this is what the terminal answers to a ESC-Z or csi0c query.
- */
-#define VT100ID "\033[?1;2c"
-#define VT102ID "\033[?6c"
+struct notifier_block;
enum con_scroll {
SM_UP,
SM_DOWN,
};
+enum vc_intensity;
+
/**
* struct consw - callbacks for consoles
*
@@ -46,46 +46,53 @@ enum con_scroll {
struct consw {
struct module *owner;
const char *(*con_startup)(void);
- void (*con_init)(struct vc_data *, int);
- void (*con_deinit)(struct vc_data *);
- void (*con_clear)(struct vc_data *, int, int, int, int);
- void (*con_putc)(struct vc_data *, int, int, int);
- void (*con_putcs)(struct vc_data *, const unsigned short *, int, int, int);
- void (*con_cursor)(struct vc_data *, int);
- bool (*con_scroll)(struct vc_data *, unsigned int top,
+ void (*con_init)(struct vc_data *vc, int init);
+ void (*con_deinit)(struct vc_data *vc);
+ void (*con_clear)(struct vc_data *vc, int sy, int sx, int height,
+ int width);
+ void (*con_putc)(struct vc_data *vc, int c, int ypos, int xpos);
+ void (*con_putcs)(struct vc_data *vc, const unsigned short *s,
+ int count, int ypos, int xpos);
+ void (*con_cursor)(struct vc_data *vc, int mode);
+ bool (*con_scroll)(struct vc_data *vc, unsigned int top,
unsigned int bottom, enum con_scroll dir,
unsigned int lines);
- int (*con_switch)(struct vc_data *);
- int (*con_blank)(struct vc_data *, int, int);
- int (*con_font_set)(struct vc_data *, struct console_font *, unsigned);
- int (*con_font_get)(struct vc_data *, struct console_font *);
- int (*con_font_default)(struct vc_data *, struct console_font *, char *);
- int (*con_font_copy)(struct vc_data *, int);
- int (*con_resize)(struct vc_data *, unsigned int, unsigned int,
- unsigned int);
- void (*con_set_palette)(struct vc_data *,
+ int (*con_switch)(struct vc_data *vc);
+ int (*con_blank)(struct vc_data *vc, int blank, int mode_switch);
+ int (*con_font_set)(struct vc_data *vc, struct console_font *font,
+ unsigned int vpitch, unsigned int flags);
+ int (*con_font_get)(struct vc_data *vc, struct console_font *font,
+ unsigned int vpitch);
+ int (*con_font_default)(struct vc_data *vc,
+ struct console_font *font, char *name);
+ int (*con_resize)(struct vc_data *vc, unsigned int width,
+ unsigned int height, unsigned int user);
+ void (*con_set_palette)(struct vc_data *vc,
const unsigned char *table);
- void (*con_scrolldelta)(struct vc_data *, int lines);
- int (*con_set_origin)(struct vc_data *);
- void (*con_save_screen)(struct vc_data *);
- u8 (*con_build_attr)(struct vc_data *, u8, u8, u8, u8, u8, u8);
- void (*con_invert_region)(struct vc_data *, u16 *, int);
- u16 *(*con_screen_pos)(struct vc_data *, int);
- unsigned long (*con_getxy)(struct vc_data *, unsigned long, int *, int *);
+ void (*con_scrolldelta)(struct vc_data *vc, int lines);
+ int (*con_set_origin)(struct vc_data *vc);
+ void (*con_save_screen)(struct vc_data *vc);
+ u8 (*con_build_attr)(struct vc_data *vc, u8 color,
+ enum vc_intensity intensity,
+ bool blink, bool underline, bool reverse, bool italic);
+ void (*con_invert_region)(struct vc_data *vc, u16 *p, int count);
+ u16 *(*con_screen_pos)(const struct vc_data *vc, int offset);
+ unsigned long (*con_getxy)(struct vc_data *vc, unsigned long position,
+ int *px, int *py);
/*
* Flush the video console driver's scrollback buffer
*/
- void (*con_flush_scrollback)(struct vc_data *);
+ void (*con_flush_scrollback)(struct vc_data *vc);
/*
* Prepare the console for the debugger. This includes, but is not
* limited to, unblanking the console, loading an appropriate
* palette, and allowing debugger generated output.
*/
- int (*con_debug_enter)(struct vc_data *);
+ int (*con_debug_enter)(struct vc_data *vc);
/*
* Restore the console to its pre-debug state as closely as possible.
*/
- int (*con_debug_leave)(struct vc_data *);
+ int (*con_debug_leave)(struct vc_data *vc);
};
extern const struct consw *conswitchp;
@@ -93,7 +100,6 @@ extern const struct consw *conswitchp;
extern const struct consw dummy_con; /* dummy console buffer */
extern const struct consw vga_con; /* VGA text console */
extern const struct consw newport_con; /* SGI Newport console */
-extern const struct consw prom_con; /* SPARC PROM console */
int con_is_bound(const struct consw *csw);
int do_unregister_con_driver(const struct consw *csw);
@@ -121,53 +127,226 @@ static inline int con_debug_leave(void)
/*
* The interface for a console, or any other device that wants to capture
* console messages (printer driver?)
- *
- * If a console driver is marked CON_BOOT then it will be auto-unregistered
- * when the first real console is registered. This is for early-printk drivers.
*/
-#define CON_PRINTBUFFER (1)
-#define CON_CONSDEV (2) /* Last on the command line */
-#define CON_ENABLED (4)
-#define CON_BOOT (8)
-#define CON_ANYTIME (16) /* Safe to call when cpu is offline */
-#define CON_BRL (32) /* Used for a braille device */
-#define CON_EXTENDED (64) /* Use the extended output format a la /dev/kmsg */
+/**
+ * cons_flags - General console flags
+ * @CON_PRINTBUFFER: Used by newly registered consoles to avoid duplicate
+ * output of messages that were already shown by boot
+ * consoles or read by userspace via syslog() syscall.
+ * @CON_CONSDEV: Indicates that the console driver is backing
+ * /dev/console.
+ * @CON_ENABLED: Indicates if a console is allowed to print records. If
+ * false, the console also will not advance to later
+ * records.
+ * @CON_BOOT: Marks the console driver as early console driver which
+ * is used during boot before the real driver becomes
+ * available. It will be automatically unregistered
+ * when the real console driver is registered unless
+ * "keep_bootcon" parameter is used.
+ * @CON_ANYTIME: A misnomed historical flag which tells the core code
+ * that the legacy @console::write callback can be invoked
+ * on a CPU which is marked OFFLINE. That is misleading as
+ * it suggests that there is no contextual limit for
+ * invoking the callback. The original motivation was
+ * readiness of the per-CPU areas.
+ * @CON_BRL: Indicates a braille device which is exempt from
+ * receiving the printk spam for obvious reasons.
+ * @CON_EXTENDED: The console supports the extended output format of
+ * /dev/kmesg which requires a larger output buffer.
+ */
+enum cons_flags {
+ CON_PRINTBUFFER = BIT(0),
+ CON_CONSDEV = BIT(1),
+ CON_ENABLED = BIT(2),
+ CON_BOOT = BIT(3),
+ CON_ANYTIME = BIT(4),
+ CON_BRL = BIT(5),
+ CON_EXTENDED = BIT(6),
+};
+/**
+ * struct console - The console descriptor structure
+ * @name: The name of the console driver
+ * @write: Write callback to output messages (Optional)
+ * @read: Read callback for console input (Optional)
+ * @device: The underlying TTY device driver (Optional)
+ * @unblank: Callback to unblank the console (Optional)
+ * @setup: Callback for initializing the console (Optional)
+ * @exit: Callback for teardown of the console (Optional)
+ * @match: Callback for matching a console (Optional)
+ * @flags: Console flags. See enum cons_flags
+ * @index: Console index, e.g. port number
+ * @cflag: TTY control mode flags
+ * @ispeed: TTY input speed
+ * @ospeed: TTY output speed
+ * @seq: Sequence number of the next ringbuffer record to print
+ * @dropped: Number of unreported dropped ringbuffer records
+ * @data: Driver private data
+ * @node: hlist node for the console list
+ */
struct console {
- char name[16];
- void (*write)(struct console *, const char *, unsigned);
- int (*read)(struct console *, char *, unsigned);
- struct tty_driver *(*device)(struct console *, int *);
- void (*unblank)(void);
- int (*setup)(struct console *, char *);
- int (*match)(struct console *, char *name, int idx, char *options);
- short flags;
- short index;
- int cflag;
- void *data;
- struct console *next;
+ char name[16];
+ void (*write)(struct console *co, const char *s, unsigned int count);
+ int (*read)(struct console *co, char *s, unsigned int count);
+ struct tty_driver *(*device)(struct console *co, int *index);
+ void (*unblank)(void);
+ int (*setup)(struct console *co, char *options);
+ int (*exit)(struct console *co);
+ int (*match)(struct console *co, char *name, int idx, char *options);
+ short flags;
+ short index;
+ int cflag;
+ uint ispeed;
+ uint ospeed;
+ u64 seq;
+ unsigned long dropped;
+ void *data;
+ struct hlist_node node;
};
+#ifdef CONFIG_LOCKDEP
+extern void lockdep_assert_console_list_lock_held(void);
+#else
+static inline void lockdep_assert_console_list_lock_held(void)
+{
+}
+#endif
+
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+extern bool console_srcu_read_lock_is_held(void);
+#else
+static inline bool console_srcu_read_lock_is_held(void)
+{
+ return 1;
+}
+#endif
+
+extern int console_srcu_read_lock(void);
+extern void console_srcu_read_unlock(int cookie);
+
+extern void console_list_lock(void) __acquires(console_mutex);
+extern void console_list_unlock(void) __releases(console_mutex);
+
+extern struct hlist_head console_list;
+
+/**
+ * console_srcu_read_flags - Locklessly read the console flags
+ * @con: struct console pointer of console to read flags from
+ *
+ * This function provides the necessary READ_ONCE() and data_race()
+ * notation for locklessly reading the console flags. The READ_ONCE()
+ * in this function matches the WRITE_ONCE() when @flags are modified
+ * for registered consoles with console_srcu_write_flags().
+ *
+ * Only use this function to read console flags when locklessly
+ * iterating the console list via srcu.
+ *
+ * Context: Any context.
+ */
+static inline short console_srcu_read_flags(const struct console *con)
+{
+ WARN_ON_ONCE(!console_srcu_read_lock_is_held());
+
+ /*
+ * Locklessly reading console->flags provides a consistent
+ * read value because there is at most one CPU modifying
+ * console->flags and that CPU is using only read-modify-write
+ * operations to do so.
+ */
+ return data_race(READ_ONCE(con->flags));
+}
+
+/**
+ * console_srcu_write_flags - Write flags for a registered console
+ * @con: struct console pointer of console to write flags to
+ * @flags: new flags value to write
+ *
+ * Only use this function to write flags for registered consoles. It
+ * requires holding the console_list_lock.
+ *
+ * Context: Any context.
+ */
+static inline void console_srcu_write_flags(struct console *con, short flags)
+{
+ lockdep_assert_console_list_lock_held();
+
+ /* This matches the READ_ONCE() in console_srcu_read_flags(). */
+ WRITE_ONCE(con->flags, flags);
+}
+
+/* Variant of console_is_registered() when the console_list_lock is held. */
+static inline bool console_is_registered_locked(const struct console *con)
+{
+ lockdep_assert_console_list_lock_held();
+ return !hlist_unhashed(&con->node);
+}
+
/*
- * for_each_console() allows you to iterate on each console
+ * console_is_registered - Check if the console is registered
+ * @con: struct console pointer of console to check
+ *
+ * Context: Process context. May sleep while acquiring console list lock.
+ * Return: true if the console is in the console list, otherwise false.
+ *
+ * If false is returned for a console that was previously registered, it
+ * can be assumed that the console's unregistration is fully completed,
+ * including the exit() callback after console list removal.
*/
-#define for_each_console(con) \
- for (con = console_drivers; con != NULL; con = con->next)
+static inline bool console_is_registered(const struct console *con)
+{
+ bool ret;
+
+ console_list_lock();
+ ret = console_is_registered_locked(con);
+ console_list_unlock();
+ return ret;
+}
+
+/**
+ * for_each_console_srcu() - Iterator over registered consoles
+ * @con: struct console pointer used as loop cursor
+ *
+ * Although SRCU guarantees the console list will be consistent, the
+ * struct console fields may be updated by other CPUs while iterating.
+ *
+ * Requires console_srcu_read_lock to be held. Can be invoked from
+ * any context.
+ */
+#define for_each_console_srcu(con) \
+ hlist_for_each_entry_srcu(con, &console_list, node, \
+ console_srcu_read_lock_is_held())
+
+/**
+ * for_each_console() - Iterator over registered consoles
+ * @con: struct console pointer used as loop cursor
+ *
+ * The console list and the console->flags are immutable while iterating.
+ *
+ * Requires console_list_lock to be held.
+ */
+#define for_each_console(con) \
+ lockdep_assert_console_list_lock_held(); \
+ hlist_for_each_entry(con, &console_list, node)
extern int console_set_on_cmdline;
extern struct console *early_console;
+enum con_flush_mode {
+ CONSOLE_FLUSH_PENDING,
+ CONSOLE_REPLAY_ALL,
+};
+
extern int add_preferred_console(char *name, int idx, char *options);
+extern void console_force_preferred_locked(struct console *con);
extern void register_console(struct console *);
extern int unregister_console(struct console *);
-extern struct console *console_drivers;
extern void console_lock(void);
extern int console_trylock(void);
extern void console_unlock(void);
extern void console_conditional_schedule(void);
extern void console_unblank(void);
-extern void console_flush_on_panic(void);
+extern void console_flush_on_panic(enum con_flush_mode mode);
extern struct tty_driver *console_device(int *);
extern void console_stop(struct console *);
extern void console_start(struct console *);
@@ -188,17 +367,19 @@ extern void suspend_console(void);
extern void resume_console(void);
int mda_console_init(void);
-void prom_con_init(void);
void vcs_make_sysfs(int index);
void vcs_remove_sysfs(int index);
/* Some debug stub to catch some of the obvious races in the VT code */
-#if 1
-#define WARN_CONSOLE_UNLOCKED() WARN_ON(!is_console_locked() && !oops_in_progress)
-#else
-#define WARN_CONSOLE_UNLOCKED()
-#endif
+#define WARN_CONSOLE_UNLOCKED() \
+ WARN_ON(!atomic_read(&ignore_console_lock_warning) && \
+ !is_console_locked() && !oops_in_progress)
+/*
+ * Increment ignore_console_lock_warning if you need to quiet
+ * WARN_CONSOLE_UNLOCKED() for debugging purposes.
+ */
+extern atomic_t ignore_console_lock_warning;
/* VESA Blanking Levels */
#define VESA_NO_BLANKING 0
@@ -206,12 +387,10 @@ void vcs_remove_sysfs(int index);
#define VESA_HSYNC_SUSPEND 2
#define VESA_POWERDOWN 3
-#ifdef CONFIG_VGA_CONSOLE
-extern bool vgacon_text_force(void);
-#else
-static inline bool vgacon_text_force(void) { return false; }
-#endif
-
extern void console_init(void);
+/* For deferred console takeover */
+void dummycon_register_output_notifier(struct notifier_block *nb);
+void dummycon_unregister_output_notifier(struct notifier_block *nb);
+
#endif /* _LINUX_CONSOLE_H */
diff --git a/include/linux/console_struct.h b/include/linux/console_struct.h
index 6fd3c908a340..539f1cd45309 100644
--- a/include/linux/console_struct.h
+++ b/include/linux/console_struct.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* console_struct.h
*
@@ -16,10 +17,46 @@
#include <linux/vt.h>
#include <linux/workqueue.h>
-struct vt_struct;
-struct uni_pagedir;
+struct uni_pagedict;
#define NPAR 16
+#define VC_TABSTOPS_COUNT 256U
+
+enum vc_intensity {
+ VCI_HALF_BRIGHT,
+ VCI_NORMAL,
+ VCI_BOLD,
+ VCI_MASK = 0x3,
+};
+
+/**
+ * struct vc_state -- state of a VC
+ * @x: cursor's x-position
+ * @y: cursor's y-position
+ * @color: foreground & background colors
+ * @Gx_charset: what's G0/G1 slot set to (like GRAF_MAP, LAT1_MAP)
+ * @charset: what character set to use (0=G0 or 1=G1)
+ * @intensity: see enum vc_intensity for values
+ * @reverse: reversed foreground/background colors
+ *
+ * These members are defined separately from struct vc_data as we save &
+ * restore them at times.
+ */
+struct vc_state {
+ unsigned int x, y;
+
+ unsigned char color;
+
+ unsigned char Gx_charset[2];
+ unsigned int charset : 1;
+
+ /* attribute flags */
+ enum vc_intensity intensity;
+ bool italic;
+ bool underline;
+ bool blink;
+ bool reverse;
+};
/*
* Example: vc_data of a console that was scrolled 3 lines down.
@@ -56,11 +93,14 @@ struct uni_pagedir;
struct vc_data {
struct tty_port port; /* Upper level data */
+ struct vc_state state, saved_state;
+
unsigned short vc_num; /* Console number */
unsigned int vc_cols; /* [#] Console size */
unsigned int vc_rows;
unsigned int vc_size_row; /* Bytes per row */
unsigned int vc_scan_lines; /* # of scan lines */
+ unsigned int vc_cell_height; /* CRTC character cell height */
unsigned long vc_origin; /* [!] Start of real screen */
unsigned long vc_scr_end; /* [!] End of real screen */
unsigned long vc_visible_origin; /* [!] Top of visible window */
@@ -72,8 +112,6 @@ struct vc_data {
/* attributes for all characters on screen */
unsigned char vc_attr; /* Current attributes */
unsigned char vc_def_color; /* Default colors */
- unsigned char vc_color; /* Foreground & background */
- unsigned char vc_s_color; /* Saved foreground & background */
unsigned char vc_ulcolor; /* Color for underline mode */
unsigned char vc_itcolor;
unsigned char vc_halfcolor; /* Color for half intensity mode */
@@ -81,8 +119,6 @@ struct vc_data {
unsigned int vc_cursor_type;
unsigned short vc_complement_mask; /* [#] Xor mask for mouse pointer */
unsigned short vc_s_complement_mask; /* Saved mouse pointer mask */
- unsigned int vc_x, vc_y; /* Cursor position */
- unsigned int vc_saved_x, vc_saved_y;
unsigned long vc_pos; /* Cursor address */
/* fonts */
unsigned short vc_hi_font_mask; /* [#] Attribute set for upper 256 chars of font or 0 if not supported */
@@ -97,8 +133,6 @@ struct vc_data {
int vt_newvt;
wait_queue_head_t paste_wait;
/* mode flags */
- unsigned int vc_charset : 1; /* Character set G0 / G1 */
- unsigned int vc_s_charset : 1; /* Saved character set */
unsigned int vc_disp_ctrl : 1; /* Display chars < 32? */
unsigned int vc_toggle_meta : 1; /* Toggle high bit? */
unsigned int vc_decscnm : 1; /* Screen Mode */
@@ -106,40 +140,25 @@ struct vc_data {
unsigned int vc_decawm : 1; /* Autowrap Mode */
unsigned int vc_deccm : 1; /* Cursor Visible */
unsigned int vc_decim : 1; /* Insert Mode */
- /* attribute flags */
- unsigned int vc_intensity : 2; /* 0=half-bright, 1=normal, 2=bold */
- unsigned int vc_italic:1;
- unsigned int vc_underline : 1;
- unsigned int vc_blink : 1;
- unsigned int vc_reverse : 1;
- unsigned int vc_s_intensity : 2; /* saved rendition */
- unsigned int vc_s_italic:1;
- unsigned int vc_s_underline : 1;
- unsigned int vc_s_blink : 1;
- unsigned int vc_s_reverse : 1;
/* misc */
- unsigned int vc_ques : 1;
+ unsigned int vc_priv : 3;
unsigned int vc_need_wrap : 1;
unsigned int vc_can_do_color : 1;
unsigned int vc_report_mouse : 2;
unsigned char vc_utf : 1; /* Unicode UTF-8 encoding */
unsigned char vc_utf_count;
int vc_utf_char;
- unsigned int vc_tab_stop[8]; /* Tab stops. 256 columns. */
+ DECLARE_BITMAP(vc_tab_stop, VC_TABSTOPS_COUNT); /* Tab stops. 256 columns. */
unsigned char vc_palette[16*3]; /* Colour palette for VGA+ */
unsigned short * vc_translate;
- unsigned char vc_G0_charset;
- unsigned char vc_G1_charset;
- unsigned char vc_saved_G0;
- unsigned char vc_saved_G1;
unsigned int vc_resize_user; /* resize request from user */
unsigned int vc_bell_pitch; /* Console bell pitch */
unsigned int vc_bell_duration; /* Console bell duration */
unsigned short vc_cur_blink_ms; /* Cursor blink duration */
struct vc_data **vc_display_fg; /* [!] Ptr to var holding fg console for this display */
- struct uni_pagedir *vc_uni_pagedir;
- struct uni_pagedir **vc_uni_pagedir_loc; /* [!] Location of uni_pagedir variable for this console */
- bool vc_panic_force_write; /* when oops/panic this VC can accept forced output/blanking */
+ struct uni_pagedict *uni_pagedict;
+ struct uni_pagedict **uni_pagedict_loc; /* [!] Location of uni_pagedict variable for this console */
+ u32 **vc_uni_lines; /* unicode screen content */
/* additional information is in vt_kern.h */
};
@@ -147,29 +166,31 @@ struct vc {
struct vc_data *d;
struct work_struct SAK_work;
- /* might add scrmem, vt_struct, kbd at some time,
- to have everything in one place - the disadvantage
- would be that vc_cons etc can no longer be static */
+ /* might add scrmem, kbd at some time,
+ to have everything in one place */
};
extern struct vc vc_cons [MAX_NR_CONSOLES];
extern void vc_SAK(struct work_struct *work);
-#define CUR_DEF 0
-#define CUR_NONE 1
-#define CUR_UNDERLINE 2
-#define CUR_LOWER_THIRD 3
-#define CUR_LOWER_HALF 4
-#define CUR_TWO_THIRDS 5
-#define CUR_BLOCK 6
-#define CUR_HWMASK 0x0f
-#define CUR_SWMASK 0xfff0
-
-#define CUR_DEFAULT CUR_UNDERLINE
-
-static inline bool con_is_visible(const struct vc_data *vc)
-{
- return *vc->vc_display_fg == vc;
-}
+#define CUR_MAKE(size, change, set) ((size) | ((change) << 8) | \
+ ((set) << 16))
+#define CUR_SIZE(c) ((c) & 0x00000f)
+# define CUR_DEF 0
+# define CUR_NONE 1
+# define CUR_UNDERLINE 2
+# define CUR_LOWER_THIRD 3
+# define CUR_LOWER_HALF 4
+# define CUR_TWO_THIRDS 5
+# define CUR_BLOCK 6
+#define CUR_SW 0x000010
+#define CUR_ALWAYS_BG 0x000020
+#define CUR_INVERT_FG_BG 0x000040
+#define CUR_FG 0x000700
+#define CUR_BG 0x007000
+#define CUR_CHANGE(c) ((c) & 0x00ff00)
+#define CUR_SET(c) (((c) & 0xff0000) >> 8)
+
+bool con_is_visible(const struct vc_data *vc);
#endif /* _LINUX_CONSOLE_STRUCT_H */
diff --git a/include/linux/consolemap.h b/include/linux/consolemap.h
index c4811da1338b..c35db4896c37 100644
--- a/include/linux/consolemap.h
+++ b/include/linux/consolemap.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* consolemap.h
*
@@ -6,29 +7,56 @@
#ifndef __LINUX_CONSOLEMAP_H__
#define __LINUX_CONSOLEMAP_H__
-#define LAT1_MAP 0
-#define GRAF_MAP 1
-#define IBMPC_MAP 2
-#define USER_MAP 3
+enum translation_map {
+ LAT1_MAP,
+ GRAF_MAP,
+ IBMPC_MAP,
+ USER_MAP,
+
+ FIRST_MAP = LAT1_MAP,
+ LAST_MAP = USER_MAP,
+};
#include <linux/types.h>
-#ifdef CONFIG_CONSOLE_TRANSLATIONS
struct vc_data;
-extern u16 inverse_translate(struct vc_data *conp, int glyph, int use_unicode);
-extern unsigned short *set_translate(int m, struct vc_data *vc);
-extern int conv_uni_to_pc(struct vc_data *conp, long ucs);
-extern u32 conv_8bit_to_uni(unsigned char c);
-extern int conv_uni_to_8bit(u32 uni);
+#ifdef CONFIG_CONSOLE_TRANSLATIONS
+u16 inverse_translate(const struct vc_data *conp, u16 glyph, bool use_unicode);
+unsigned short *set_translate(enum translation_map m, struct vc_data *vc);
+int conv_uni_to_pc(struct vc_data *conp, long ucs);
+u32 conv_8bit_to_uni(unsigned char c);
+int conv_uni_to_8bit(u32 uni);
void console_map_init(void);
#else
-#define inverse_translate(conp, glyph, uni) ((uint16_t)glyph)
-#define set_translate(m, vc) ((unsigned short *)NULL)
-#define conv_uni_to_pc(conp, ucs) ((int) (ucs > 0xff ? -1: ucs))
-#define conv_8bit_to_uni(c) ((uint32_t)(c))
-#define conv_uni_to_8bit(c) ((int) ((c) & 0xff))
-#define console_map_init(c) do { ; } while (0)
+static inline u16 inverse_translate(const struct vc_data *conp, u16 glyph,
+ bool use_unicode)
+{
+ return glyph;
+}
+
+static inline unsigned short *set_translate(enum translation_map m,
+ struct vc_data *vc)
+{
+ return NULL;
+}
+
+static inline int conv_uni_to_pc(struct vc_data *conp, long ucs)
+{
+ return ucs > 0xff ? -1 : ucs;
+}
+
+static inline u32 conv_8bit_to_uni(unsigned char c)
+{
+ return c;
+}
+
+static inline int conv_uni_to_8bit(u32 uni)
+{
+ return uni & 0xff;
+}
+
+static inline void console_map_init(void) { }
#endif /* CONFIG_CONSOLE_TRANSLATIONS */
#endif /* __LINUX_CONSOLEMAP_H__ */
diff --git a/include/linux/const.h b/include/linux/const.h
new file mode 100644
index 000000000000..435ddd72d2c4
--- /dev/null
+++ b/include/linux/const.h
@@ -0,0 +1,14 @@
+#ifndef _LINUX_CONST_H
+#define _LINUX_CONST_H
+
+#include <vdso/const.h>
+
+/*
+ * This returns a constant expression while determining if an argument is
+ * a constant expression, most importantly without evaluating the argument.
+ * Glory to Martin Uecker <Martin.Uecker@med.uni-goettingen.de>
+ */
+#define __is_constexpr(x) \
+ (sizeof(int) == sizeof(*(8 ? ((void *)((long)(x) * 0l)) : (int *)8)))
+
+#endif /* _LINUX_CONST_H */
diff --git a/include/linux/container.h b/include/linux/container.h
index 3c03e6fd2035..2566a1baa736 100644
--- a/include/linux/container.h
+++ b/include/linux/container.h
@@ -1,14 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Definitions for container bus type.
*
* Copyright (C) 2013, Intel Corporation
* Author: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
+#ifndef _LINUX_CONTAINER_H
+#define _LINUX_CONTAINER_H
+
#include <linux/device.h>
/* drivers/base/power/container.c */
@@ -23,3 +23,5 @@ static inline struct container_dev *to_container_dev(struct device *dev)
{
return container_of(dev, struct container_dev, dev);
}
+
+#endif /* _LINUX_CONTAINER_H */
diff --git a/include/linux/container_of.h b/include/linux/container_of.h
new file mode 100644
index 000000000000..713890c867be
--- /dev/null
+++ b/include/linux/container_of.h
@@ -0,0 +1,38 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_CONTAINER_OF_H
+#define _LINUX_CONTAINER_OF_H
+
+#include <linux/build_bug.h>
+#include <linux/stddef.h>
+
+#define typeof_member(T, m) typeof(((T*)0)->m)
+
+/**
+ * container_of - cast a member of a structure out to the containing structure
+ * @ptr: the pointer to the member.
+ * @type: the type of the container struct this is embedded in.
+ * @member: the name of the member within the struct.
+ *
+ * WARNING: any const qualifier of @ptr is lost.
+ */
+#define container_of(ptr, type, member) ({ \
+ void *__mptr = (void *)(ptr); \
+ static_assert(__same_type(*(ptr), ((type *)0)->member) || \
+ __same_type(*(ptr), void), \
+ "pointer type mismatch in container_of()"); \
+ ((type *)(__mptr - offsetof(type, member))); })
+
+/**
+ * container_of_const - cast a member of a structure out to the containing
+ * structure and preserve the const-ness of the pointer
+ * @ptr: the pointer to the member
+ * @type: the type of the container struct this is embedded in.
+ * @member: the name of the member within the struct.
+ */
+#define container_of_const(ptr, type, member) \
+ _Generic(ptr, \
+ const typeof(*(ptr)) *: ((const type *)container_of(ptr, type, member)),\
+ default: ((type *)container_of(ptr, type, member)) \
+ )
+
+#endif /* _LINUX_CONTAINER_OF_H */
diff --git a/include/linux/context_tracking.h b/include/linux/context_tracking.h
index c78fc27418f2..d3cbb6c16bab 100644
--- a/include/linux/context_tracking.h
+++ b/include/linux/context_tracking.h
@@ -1,174 +1,166 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_CONTEXT_TRACKING_H
#define _LINUX_CONTEXT_TRACKING_H
#include <linux/sched.h>
#include <linux/vtime.h>
#include <linux/context_tracking_state.h>
+#include <linux/instrumentation.h>
+
#include <asm/ptrace.h>
-#ifdef CONFIG_CONTEXT_TRACKING
-extern void context_tracking_cpu_set(int cpu);
+#ifdef CONFIG_CONTEXT_TRACKING_USER
+extern void ct_cpu_track_user(int cpu);
/* Called with interrupts disabled. */
-extern void __context_tracking_enter(enum ctx_state state);
-extern void __context_tracking_exit(enum ctx_state state);
+extern void __ct_user_enter(enum ctx_state state);
+extern void __ct_user_exit(enum ctx_state state);
+
+extern void ct_user_enter(enum ctx_state state);
+extern void ct_user_exit(enum ctx_state state);
-extern void context_tracking_enter(enum ctx_state state);
-extern void context_tracking_exit(enum ctx_state state);
-extern void context_tracking_user_enter(void);
-extern void context_tracking_user_exit(void);
+extern void user_enter_callable(void);
+extern void user_exit_callable(void);
static inline void user_enter(void)
{
- if (context_tracking_is_enabled())
- context_tracking_enter(CONTEXT_USER);
+ if (context_tracking_enabled())
+ ct_user_enter(CONTEXT_USER);
}
static inline void user_exit(void)
{
- if (context_tracking_is_enabled())
- context_tracking_exit(CONTEXT_USER);
+ if (context_tracking_enabled())
+ ct_user_exit(CONTEXT_USER);
}
/* Called with interrupts disabled. */
-static inline void user_enter_irqoff(void)
+static __always_inline void user_enter_irqoff(void)
{
- if (context_tracking_is_enabled())
- __context_tracking_enter(CONTEXT_USER);
+ if (context_tracking_enabled())
+ __ct_user_enter(CONTEXT_USER);
}
-static inline void user_exit_irqoff(void)
+static __always_inline void user_exit_irqoff(void)
{
- if (context_tracking_is_enabled())
- __context_tracking_exit(CONTEXT_USER);
+ if (context_tracking_enabled())
+ __ct_user_exit(CONTEXT_USER);
}
static inline enum ctx_state exception_enter(void)
{
enum ctx_state prev_ctx;
- if (!context_tracking_is_enabled())
+ if (IS_ENABLED(CONFIG_HAVE_CONTEXT_TRACKING_USER_OFFSTACK) ||
+ !context_tracking_enabled())
return 0;
- prev_ctx = this_cpu_read(context_tracking.state);
+ prev_ctx = __ct_state();
if (prev_ctx != CONTEXT_KERNEL)
- context_tracking_exit(prev_ctx);
+ ct_user_exit(prev_ctx);
return prev_ctx;
}
static inline void exception_exit(enum ctx_state prev_ctx)
{
- if (context_tracking_is_enabled()) {
+ if (!IS_ENABLED(CONFIG_HAVE_CONTEXT_TRACKING_USER_OFFSTACK) &&
+ context_tracking_enabled()) {
if (prev_ctx != CONTEXT_KERNEL)
- context_tracking_enter(prev_ctx);
+ ct_user_enter(prev_ctx);
}
}
+static __always_inline bool context_tracking_guest_enter(void)
+{
+ if (context_tracking_enabled())
+ __ct_user_enter(CONTEXT_GUEST);
-/**
- * ct_state() - return the current context tracking state if known
- *
- * Returns the current cpu's context tracking state if context tracking
- * is enabled. If context tracking is disabled, returns
- * CONTEXT_DISABLED. This should be used primarily for debugging.
- */
-static inline enum ctx_state ct_state(void)
+ return context_tracking_enabled_this_cpu();
+}
+
+static __always_inline void context_tracking_guest_exit(void)
{
- return context_tracking_is_enabled() ?
- this_cpu_read(context_tracking.state) : CONTEXT_DISABLED;
+ if (context_tracking_enabled())
+ __ct_user_exit(CONTEXT_GUEST);
}
+
+#define CT_WARN_ON(cond) WARN_ON(context_tracking_enabled() && (cond))
+
#else
static inline void user_enter(void) { }
static inline void user_exit(void) { }
static inline void user_enter_irqoff(void) { }
static inline void user_exit_irqoff(void) { }
-static inline enum ctx_state exception_enter(void) { return 0; }
+static inline int exception_enter(void) { return 0; }
static inline void exception_exit(enum ctx_state prev_ctx) { }
-static inline enum ctx_state ct_state(void) { return CONTEXT_DISABLED; }
-#endif /* !CONFIG_CONTEXT_TRACKING */
-
-#define CT_WARN_ON(cond) WARN_ON(context_tracking_is_enabled() && (cond))
-
-#ifdef CONFIG_CONTEXT_TRACKING_FORCE
+static inline int ct_state(void) { return -1; }
+static inline int __ct_state(void) { return -1; }
+static __always_inline bool context_tracking_guest_enter(void) { return false; }
+static __always_inline void context_tracking_guest_exit(void) { }
+#define CT_WARN_ON(cond) do { } while (0)
+#endif /* !CONFIG_CONTEXT_TRACKING_USER */
+
+#ifdef CONFIG_CONTEXT_TRACKING_USER_FORCE
extern void context_tracking_init(void);
#else
static inline void context_tracking_init(void) { }
-#endif /* CONFIG_CONTEXT_TRACKING_FORCE */
+#endif /* CONFIG_CONTEXT_TRACKING_USER_FORCE */
+#ifdef CONFIG_CONTEXT_TRACKING_IDLE
+extern void ct_idle_enter(void);
+extern void ct_idle_exit(void);
-#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
-/* must be called with irqs disabled */
-static inline void guest_enter_irqoff(void)
+/*
+ * Is the current CPU in an extended quiescent state?
+ *
+ * No ordering, as we are sampling CPU-local information.
+ */
+static __always_inline bool rcu_dynticks_curr_cpu_in_eqs(void)
{
- if (vtime_accounting_cpu_enabled())
- vtime_guest_enter(current);
- else
- current->flags |= PF_VCPU;
-
- if (context_tracking_is_enabled())
- __context_tracking_enter(CONTEXT_GUEST);
-
- /* KVM does not hold any references to rcu protected data when it
- * switches CPU into a guest mode. In fact switching to a guest mode
- * is very similar to exiting to userspace from rcu point of view. In
- * addition CPU may stay in a guest mode for quite a long time (up to
- * one time slice). Lets treat guest mode as quiescent state, just like
- * we do with user-mode execution.
- */
- if (!context_tracking_cpu_is_enabled())
- rcu_virt_note_context_switch(smp_processor_id());
+ return !(arch_atomic_read(this_cpu_ptr(&context_tracking.state)) & RCU_DYNTICKS_IDX);
}
-static inline void guest_exit_irqoff(void)
+/*
+ * Increment the current CPU's context_tracking structure's ->state field
+ * with ordering. Return the new value.
+ */
+static __always_inline unsigned long ct_state_inc(int incby)
{
- if (context_tracking_is_enabled())
- __context_tracking_exit(CONTEXT_GUEST);
-
- if (vtime_accounting_cpu_enabled())
- vtime_guest_exit(current);
- else
- current->flags &= ~PF_VCPU;
+ return arch_atomic_add_return(incby, this_cpu_ptr(&context_tracking.state));
}
-#else
-static inline void guest_enter_irqoff(void)
+static __always_inline bool warn_rcu_enter(void)
{
+ bool ret = false;
+
/*
- * This is running in ioctl context so its safe
- * to assume that it's the stime pending cputime
- * to flush.
+ * Horrible hack to shut up recursive RCU isn't watching fail since
+ * lots of the actual reporting also relies on RCU.
*/
- vtime_account_system(current);
- current->flags |= PF_VCPU;
- rcu_virt_note_context_switch(smp_processor_id());
-}
+ preempt_disable_notrace();
+ if (rcu_dynticks_curr_cpu_in_eqs()) {
+ ret = true;
+ ct_state_inc(RCU_DYNTICKS_IDX);
+ }
-static inline void guest_exit_irqoff(void)
-{
- /* Flush the guest cputime we spent on the guest */
- vtime_account_system(current);
- current->flags &= ~PF_VCPU;
+ return ret;
}
-#endif /* CONFIG_VIRT_CPU_ACCOUNTING_GEN */
-static inline void guest_enter(void)
+static __always_inline void warn_rcu_exit(bool rcu)
{
- unsigned long flags;
-
- local_irq_save(flags);
- guest_enter_irqoff();
- local_irq_restore(flags);
+ if (rcu)
+ ct_state_inc(RCU_DYNTICKS_IDX);
+ preempt_enable_notrace();
}
-static inline void guest_exit(void)
-{
- unsigned long flags;
+#else
+static inline void ct_idle_enter(void) { }
+static inline void ct_idle_exit(void) { }
- local_irq_save(flags);
- guest_exit_irqoff();
- local_irq_restore(flags);
-}
+static __always_inline bool warn_rcu_enter(void) { return false; }
+static __always_inline void warn_rcu_exit(bool rcu) { }
+#endif /* !CONFIG_CONTEXT_TRACKING_IDLE */
#endif
diff --git a/include/linux/context_tracking_irq.h b/include/linux/context_tracking_irq.h
new file mode 100644
index 000000000000..c50b5670c4a5
--- /dev/null
+++ b/include/linux/context_tracking_irq.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_CONTEXT_TRACKING_IRQ_H
+#define _LINUX_CONTEXT_TRACKING_IRQ_H
+
+#ifdef CONFIG_CONTEXT_TRACKING_IDLE
+void ct_irq_enter(void);
+void ct_irq_exit(void);
+void ct_irq_enter_irqson(void);
+void ct_irq_exit_irqson(void);
+void ct_nmi_enter(void);
+void ct_nmi_exit(void);
+#else
+static inline void ct_irq_enter(void) { }
+static inline void ct_irq_exit(void) { }
+static inline void ct_irq_enter_irqson(void) { }
+static inline void ct_irq_exit_irqson(void) { }
+static inline void ct_nmi_enter(void) { }
+static inline void ct_nmi_exit(void) { }
+#endif
+
+#endif
diff --git a/include/linux/context_tracking_state.h b/include/linux/context_tracking_state.h
index 1d34fe68f48a..fdd537ea513f 100644
--- a/include/linux/context_tracking_state.h
+++ b/include/linux/context_tracking_state.h
@@ -1,10 +1,31 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_CONTEXT_TRACKING_STATE_H
#define _LINUX_CONTEXT_TRACKING_STATE_H
#include <linux/percpu.h>
#include <linux/static_key.h>
+#include <linux/context_tracking_irq.h>
+
+/* Offset to allow distinguishing irq vs. task-based idle entry/exit. */
+#define DYNTICK_IRQ_NONIDLE ((LONG_MAX / 2) + 1)
+
+enum ctx_state {
+ CONTEXT_DISABLED = -1, /* returned by ct_state() if unknown */
+ CONTEXT_KERNEL = 0,
+ CONTEXT_IDLE = 1,
+ CONTEXT_USER = 2,
+ CONTEXT_GUEST = 3,
+ CONTEXT_MAX = 4,
+};
+
+/* Even value for idle, else odd. */
+#define RCU_DYNTICKS_IDX CONTEXT_MAX
+
+#define CT_STATE_MASK (CONTEXT_MAX - 1)
+#define CT_DYNTICKS_MASK (~CT_STATE_MASK)
struct context_tracking {
+#ifdef CONFIG_CONTEXT_TRACKING_USER
/*
* When active is false, probes are unset in order
* to minimize overhead: TIF flags are cleared
@@ -13,37 +34,115 @@ struct context_tracking {
*/
bool active;
int recursion;
- enum ctx_state {
- CONTEXT_DISABLED = -1, /* returned by ct_state() if unknown */
- CONTEXT_KERNEL = 0,
- CONTEXT_USER,
- CONTEXT_GUEST,
- } state;
+#endif
+#ifdef CONFIG_CONTEXT_TRACKING
+ atomic_t state;
+#endif
+#ifdef CONFIG_CONTEXT_TRACKING_IDLE
+ long dynticks_nesting; /* Track process nesting level. */
+ long dynticks_nmi_nesting; /* Track irq/NMI nesting level. */
+#endif
};
#ifdef CONFIG_CONTEXT_TRACKING
-extern struct static_key_false context_tracking_enabled;
DECLARE_PER_CPU(struct context_tracking, context_tracking);
+#endif
+
+#ifdef CONFIG_CONTEXT_TRACKING_USER
+static __always_inline int __ct_state(void)
+{
+ return arch_atomic_read(this_cpu_ptr(&context_tracking.state)) & CT_STATE_MASK;
+}
+#endif
+
+#ifdef CONFIG_CONTEXT_TRACKING_IDLE
+static __always_inline int ct_dynticks(void)
+{
+ return atomic_read(this_cpu_ptr(&context_tracking.state)) & CT_DYNTICKS_MASK;
+}
+
+static __always_inline int ct_dynticks_cpu(int cpu)
+{
+ struct context_tracking *ct = per_cpu_ptr(&context_tracking, cpu);
+
+ return atomic_read(&ct->state) & CT_DYNTICKS_MASK;
+}
-static inline bool context_tracking_is_enabled(void)
+static __always_inline int ct_dynticks_cpu_acquire(int cpu)
{
- return static_branch_unlikely(&context_tracking_enabled);
+ struct context_tracking *ct = per_cpu_ptr(&context_tracking, cpu);
+
+ return atomic_read_acquire(&ct->state) & CT_DYNTICKS_MASK;
}
-static inline bool context_tracking_cpu_is_enabled(void)
+static __always_inline long ct_dynticks_nesting(void)
{
- return __this_cpu_read(context_tracking.active);
+ return __this_cpu_read(context_tracking.dynticks_nesting);
}
-static inline bool context_tracking_in_user(void)
+static __always_inline long ct_dynticks_nesting_cpu(int cpu)
{
- return __this_cpu_read(context_tracking.state) == CONTEXT_USER;
+ struct context_tracking *ct = per_cpu_ptr(&context_tracking, cpu);
+
+ return ct->dynticks_nesting;
}
+
+static __always_inline long ct_dynticks_nmi_nesting(void)
+{
+ return __this_cpu_read(context_tracking.dynticks_nmi_nesting);
+}
+
+static __always_inline long ct_dynticks_nmi_nesting_cpu(int cpu)
+{
+ struct context_tracking *ct = per_cpu_ptr(&context_tracking, cpu);
+
+ return ct->dynticks_nmi_nesting;
+}
+#endif /* #ifdef CONFIG_CONTEXT_TRACKING_IDLE */
+
+#ifdef CONFIG_CONTEXT_TRACKING_USER
+extern struct static_key_false context_tracking_key;
+
+static __always_inline bool context_tracking_enabled(void)
+{
+ return static_branch_unlikely(&context_tracking_key);
+}
+
+static __always_inline bool context_tracking_enabled_cpu(int cpu)
+{
+ return context_tracking_enabled() && per_cpu(context_tracking.active, cpu);
+}
+
+static inline bool context_tracking_enabled_this_cpu(void)
+{
+ return context_tracking_enabled() && __this_cpu_read(context_tracking.active);
+}
+
+/**
+ * ct_state() - return the current context tracking state if known
+ *
+ * Returns the current cpu's context tracking state if context tracking
+ * is enabled. If context tracking is disabled, returns
+ * CONTEXT_DISABLED. This should be used primarily for debugging.
+ */
+static __always_inline int ct_state(void)
+{
+ int ret;
+
+ if (!context_tracking_enabled())
+ return CONTEXT_DISABLED;
+
+ preempt_disable();
+ ret = __ct_state();
+ preempt_enable();
+
+ return ret;
+}
+
#else
-static inline bool context_tracking_in_user(void) { return false; }
-static inline bool context_tracking_active(void) { return false; }
-static inline bool context_tracking_is_enabled(void) { return false; }
-static inline bool context_tracking_cpu_is_enabled(void) { return false; }
-#endif /* CONFIG_CONTEXT_TRACKING */
+static __always_inline bool context_tracking_enabled(void) { return false; }
+static __always_inline bool context_tracking_enabled_cpu(int cpu) { return false; }
+static __always_inline bool context_tracking_enabled_this_cpu(void) { return false; }
+#endif /* CONFIG_CONTEXT_TRACKING_USER */
#endif
diff --git a/include/linux/cookie.h b/include/linux/cookie.h
new file mode 100644
index 000000000000..0c159f585109
--- /dev/null
+++ b/include/linux/cookie.h
@@ -0,0 +1,51 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __LINUX_COOKIE_H
+#define __LINUX_COOKIE_H
+
+#include <linux/atomic.h>
+#include <linux/percpu.h>
+#include <asm/local.h>
+
+struct pcpu_gen_cookie {
+ local_t nesting;
+ u64 last;
+} __aligned(16);
+
+struct gen_cookie {
+ struct pcpu_gen_cookie __percpu *local;
+ atomic64_t forward_last ____cacheline_aligned_in_smp;
+ atomic64_t reverse_last;
+};
+
+#define COOKIE_LOCAL_BATCH 4096
+
+#define DEFINE_COOKIE(name) \
+ static DEFINE_PER_CPU(struct pcpu_gen_cookie, __##name); \
+ static struct gen_cookie name = { \
+ .local = &__##name, \
+ .forward_last = ATOMIC64_INIT(0), \
+ .reverse_last = ATOMIC64_INIT(0), \
+ }
+
+static __always_inline u64 gen_cookie_next(struct gen_cookie *gc)
+{
+ struct pcpu_gen_cookie *local = this_cpu_ptr(gc->local);
+ u64 val;
+
+ if (likely(local_inc_return(&local->nesting) == 1)) {
+ val = local->last;
+ if (__is_defined(CONFIG_SMP) &&
+ unlikely((val & (COOKIE_LOCAL_BATCH - 1)) == 0)) {
+ s64 next = atomic64_add_return(COOKIE_LOCAL_BATCH,
+ &gc->forward_last);
+ val = next - COOKIE_LOCAL_BATCH;
+ }
+ local->last = ++val;
+ } else {
+ val = atomic64_dec_return(&gc->reverse_last);
+ }
+ local_dec(&local->nesting);
+ return val;
+}
+
+#endif /* __LINUX_COOKIE_H */
diff --git a/include/linux/cordic.h b/include/linux/cordic.h
index cf68ca4a508c..3d656f54d64f 100644
--- a/include/linux/cordic.h
+++ b/include/linux/cordic.h
@@ -18,6 +18,15 @@
#include <linux/types.h>
+#define CORDIC_ANGLE_GEN 39797
+#define CORDIC_PRECISION_SHIFT 16
+#define CORDIC_NUM_ITER (CORDIC_PRECISION_SHIFT + 2)
+
+#define CORDIC_FIXED(X) ((s32)((X) << CORDIC_PRECISION_SHIFT))
+#define CORDIC_FLOAT(X) (((X) >= 0) \
+ ? ((((X) >> (CORDIC_PRECISION_SHIFT - 1)) + 1) >> 1) \
+ : -((((-(X)) >> (CORDIC_PRECISION_SHIFT - 1)) + 1) >> 1))
+
/**
* struct cordic_iq - i/q coordinate.
*
diff --git a/include/linux/coredump.h b/include/linux/coredump.h
index 28ffa94aed6b..d3eba4360150 100644
--- a/include/linux/coredump.h
+++ b/include/linux/coredump.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_COREDUMP_H
#define _LINUX_COREDUMP_H
@@ -6,19 +7,48 @@
#include <linux/fs.h>
#include <asm/siginfo.h>
+#ifdef CONFIG_COREDUMP
+struct core_vma_metadata {
+ unsigned long start, end;
+ unsigned long flags;
+ unsigned long dump_size;
+ unsigned long pgoff;
+ struct file *file;
+};
+
+struct coredump_params {
+ const kernel_siginfo_t *siginfo;
+ struct file *file;
+ unsigned long limit;
+ unsigned long mm_flags;
+ int cpu;
+ loff_t written;
+ loff_t pos;
+ loff_t to_skip;
+ int vma_count;
+ size_t vma_data_size;
+ struct core_vma_metadata *vma_meta;
+};
+
/*
* These are the only things you should do on a core-file: use only these
* functions to write out all the necessary info.
*/
-struct coredump_params;
-extern int dump_skip(struct coredump_params *cprm, size_t nr);
+extern void dump_skip_to(struct coredump_params *cprm, unsigned long to);
+extern void dump_skip(struct coredump_params *cprm, size_t nr);
extern int dump_emit(struct coredump_params *cprm, const void *addr, int nr);
extern int dump_align(struct coredump_params *cprm, int align);
-extern void dump_truncate(struct coredump_params *cprm);
-#ifdef CONFIG_COREDUMP
-extern void do_coredump(const siginfo_t *siginfo);
+int dump_user_range(struct coredump_params *cprm, unsigned long start,
+ unsigned long len);
+extern void do_coredump(const kernel_siginfo_t *siginfo);
+#else
+static inline void do_coredump(const kernel_siginfo_t *siginfo) {}
+#endif
+
+#if defined(CONFIG_COREDUMP) && defined(CONFIG_SYSCTL)
+extern void validate_coredump_safety(void);
#else
-static inline void do_coredump(const siginfo_t *siginfo) {}
+static inline void validate_coredump_safety(void) {}
#endif
#endif /* _LINUX_COREDUMP_H */
diff --git a/include/linux/coresight-pmu.h b/include/linux/coresight-pmu.h
index 7d410260661b..51ac441a37c3 100644
--- a/include/linux/coresight-pmu.h
+++ b/include/linux/coresight-pmu.h
@@ -1,39 +1,60 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright(C) 2015 Linaro Limited. All rights reserved.
* Author: Mathieu Poirier <mathieu.poirier@linaro.org>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef _LINUX_CORESIGHT_PMU_H
#define _LINUX_CORESIGHT_PMU_H
+#include <linux/bits.h>
+
#define CORESIGHT_ETM_PMU_NAME "cs_etm"
-#define CORESIGHT_ETM_PMU_SEED 0x10
-
-/* ETMv3.5/PTM's ETMCR config bit */
-#define ETM_OPT_CYCACC 12
-#define ETM_OPT_TS 28
-
-static inline int coresight_get_trace_id(int cpu)
-{
- /*
- * A trace ID of value 0 is invalid, so let's start at some
- * random value that fits in 7 bits and go from there. Since
- * the common convention is to have data trace IDs be I(N) + 1,
- * set instruction trace IDs as a function of the CPU number.
- */
- return (CORESIGHT_ETM_PMU_SEED + (cpu * 2));
-}
+
+/*
+ * The legacy Trace ID system based on fixed calculation from the cpu
+ * number. This has been replaced by drivers using a dynamic allocation
+ * system - but need to retain the legacy algorithm for backward comparibility
+ * in certain situations:-
+ * a) new perf running on older systems that generate the legacy mapping
+ * b) older tools that may not update at the same time as the kernel.
+ */
+#define CORESIGHT_LEGACY_CPU_TRACE_ID(cpu) (0x10 + (cpu * 2))
+
+/*
+ * Below are the definition of bit offsets for perf option, and works as
+ * arbitrary values for all ETM versions.
+ *
+ * Most of them are orignally from ETMv3.5/PTM's ETMCR config, therefore,
+ * ETMv3.5/PTM doesn't define ETMCR config bits with prefix "ETM3_" and
+ * directly use below macros as config bits.
+ */
+#define ETM_OPT_BRANCH_BROADCAST 8
+#define ETM_OPT_CYCACC 12
+#define ETM_OPT_CTXTID 14
+#define ETM_OPT_CTXTID2 15
+#define ETM_OPT_TS 28
+#define ETM_OPT_RETSTK 29
+
+/* ETMv4 CONFIGR programming bits for the ETM OPTs */
+#define ETM4_CFG_BIT_BB 3
+#define ETM4_CFG_BIT_CYCACC 4
+#define ETM4_CFG_BIT_CTXTID 6
+#define ETM4_CFG_BIT_VMID 7
+#define ETM4_CFG_BIT_TS 11
+#define ETM4_CFG_BIT_RETSTK 12
+#define ETM4_CFG_BIT_VMID_OPT 15
+
+/*
+ * Interpretation of the PERF_RECORD_AUX_OUTPUT_HW_ID payload.
+ * Used to associate a CPU with the CoreSight Trace ID.
+ * [07:00] - Trace ID - uses 8 bits to make value easy to read in file.
+ * [59:08] - Unused (SBZ)
+ * [63:60] - Version
+ */
+#define CS_AUX_HW_ID_TRACE_ID_MASK GENMASK_ULL(7, 0)
+#define CS_AUX_HW_ID_VERSION_MASK GENMASK_ULL(63, 60)
+
+#define CS_AUX_HW_ID_CURR_VERSION 0
#endif
diff --git a/include/linux/coresight-stm.h b/include/linux/coresight-stm.h
index a978bb85599a..74714b59f9d2 100644
--- a/include/linux/coresight-stm.h
+++ b/include/linux/coresight-stm.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __LINUX_CORESIGHT_STM_H_
#define __LINUX_CORESIGHT_STM_H_
diff --git a/include/linux/coresight.h b/include/linux/coresight.h
index d950dad5056a..f19a47b9bb5a 100644
--- a/include/linux/coresight.h
+++ b/include/linux/coresight.h
@@ -1,19 +1,13 @@
-/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2012, The Linux Foundation. All rights reserved.
*/
#ifndef _LINUX_CORESIGHT_H
#define _LINUX_CORESIGHT_H
#include <linux/device.h>
+#include <linux/io.h>
#include <linux/perf_event.h>
#include <linux/sched.h>
@@ -42,142 +36,247 @@
extern struct bus_type coresight_bustype;
enum coresight_dev_type {
- CORESIGHT_DEV_TYPE_NONE,
CORESIGHT_DEV_TYPE_SINK,
CORESIGHT_DEV_TYPE_LINK,
CORESIGHT_DEV_TYPE_LINKSINK,
CORESIGHT_DEV_TYPE_SOURCE,
+ CORESIGHT_DEV_TYPE_HELPER,
+ CORESIGHT_DEV_TYPE_ECT,
};
enum coresight_dev_subtype_sink {
- CORESIGHT_DEV_SUBTYPE_SINK_NONE,
CORESIGHT_DEV_SUBTYPE_SINK_PORT,
CORESIGHT_DEV_SUBTYPE_SINK_BUFFER,
+ CORESIGHT_DEV_SUBTYPE_SINK_SYSMEM,
+ CORESIGHT_DEV_SUBTYPE_SINK_PERCPU_SYSMEM,
};
enum coresight_dev_subtype_link {
- CORESIGHT_DEV_SUBTYPE_LINK_NONE,
CORESIGHT_DEV_SUBTYPE_LINK_MERG,
CORESIGHT_DEV_SUBTYPE_LINK_SPLIT,
CORESIGHT_DEV_SUBTYPE_LINK_FIFO,
};
enum coresight_dev_subtype_source {
- CORESIGHT_DEV_SUBTYPE_SOURCE_NONE,
CORESIGHT_DEV_SUBTYPE_SOURCE_PROC,
CORESIGHT_DEV_SUBTYPE_SOURCE_BUS,
CORESIGHT_DEV_SUBTYPE_SOURCE_SOFTWARE,
+ CORESIGHT_DEV_SUBTYPE_SOURCE_OTHERS,
+};
+
+enum coresight_dev_subtype_helper {
+ CORESIGHT_DEV_SUBTYPE_HELPER_CATU,
+};
+
+/* Embedded Cross Trigger (ECT) sub-types */
+enum coresight_dev_subtype_ect {
+ CORESIGHT_DEV_SUBTYPE_ECT_NONE,
+ CORESIGHT_DEV_SUBTYPE_ECT_CTI,
};
/**
- * struct coresight_dev_subtype - further characterisation of a type
+ * union coresight_dev_subtype - further characterisation of a type
* @sink_subtype: type of sink this component is, as defined
- by @coresight_dev_subtype_sink.
+ * by @coresight_dev_subtype_sink.
* @link_subtype: type of link this component is, as defined
- by @coresight_dev_subtype_link.
+ * by @coresight_dev_subtype_link.
* @source_subtype: type of source this component is, as defined
- by @coresight_dev_subtype_source.
+ * by @coresight_dev_subtype_source.
+ * @helper_subtype: type of helper this component is, as defined
+ * by @coresight_dev_subtype_helper.
+ * @ect_subtype: type of cross trigger this component is, as
+ * defined by @coresight_dev_subtype_ect
*/
-struct coresight_dev_subtype {
- enum coresight_dev_subtype_sink sink_subtype;
- enum coresight_dev_subtype_link link_subtype;
+union coresight_dev_subtype {
+ /* We have some devices which acts as LINK and SINK */
+ struct {
+ enum coresight_dev_subtype_sink sink_subtype;
+ enum coresight_dev_subtype_link link_subtype;
+ };
enum coresight_dev_subtype_source source_subtype;
+ enum coresight_dev_subtype_helper helper_subtype;
+ enum coresight_dev_subtype_ect ect_subtype;
};
/**
- * struct coresight_platform_data - data harvested from the DT specification
- * @cpu: the CPU a source belongs to. Only applicable for ETM/PTMs.
- * @name: name of the component as shown under sysfs.
- * @nr_inport: number of input ports for this component.
- * @outports: list of remote endpoint port number.
- * @child_names:name of all child components connected to this device.
- * @child_ports:child component port number the current component is
- connected to.
- * @nr_outport: number of output ports for this component.
- * @clk: The clock this component is associated to.
+ * struct coresight_platform_data - data harvested from the firmware
+ * specification.
+ *
+ * @nr_inport: Number of elements for the input connections.
+ * @nr_outport: Number of elements for the output connections.
+ * @conns: Sparse array of nr_outport connections from this component.
*/
struct coresight_platform_data {
- int cpu;
- const char *name;
int nr_inport;
- int *outports;
- const char **child_names;
- int *child_ports;
int nr_outport;
- struct clk *clk;
+ struct coresight_connection *conns;
};
/**
+ * struct csdev_access - Abstraction of a CoreSight device access.
+ *
+ * @io_mem : True if the device has memory mapped I/O
+ * @base : When io_mem == true, base address of the component
+ * @read : Read from the given "offset" of the given instance.
+ * @write : Write "val" to the given "offset".
+ */
+struct csdev_access {
+ bool io_mem;
+ union {
+ void __iomem *base;
+ struct {
+ u64 (*read)(u32 offset, bool relaxed, bool _64bit);
+ void (*write)(u64 val, u32 offset, bool relaxed,
+ bool _64bit);
+ };
+ };
+};
+
+#define CSDEV_ACCESS_IOMEM(_addr) \
+ ((struct csdev_access) { \
+ .io_mem = true, \
+ .base = (_addr), \
+ })
+
+/**
* struct coresight_desc - description of a component required from drivers
* @type: as defined by @coresight_dev_type.
* @subtype: as defined by @coresight_dev_subtype.
* @ops: generic operations for this component, as defined
- by @coresight_ops.
+ * by @coresight_ops.
* @pdata: platform data collected from DT.
* @dev: The device entity associated to this component.
* @groups: operations specific to this component. These will end up
- in the component's sysfs sub-directory.
+ * in the component's sysfs sub-directory.
+ * @name: name for the coresight device, also shown under sysfs.
+ * @access: Describe access to the device
*/
struct coresight_desc {
enum coresight_dev_type type;
- struct coresight_dev_subtype subtype;
+ union coresight_dev_subtype subtype;
const struct coresight_ops *ops;
struct coresight_platform_data *pdata;
struct device *dev;
const struct attribute_group **groups;
+ const char *name;
+ struct csdev_access access;
};
/**
* struct coresight_connection - representation of a single connection
* @outport: a connection's output port number.
- * @chid_name: remote component's name.
* @child_port: remote component's port number @output is connected to.
+ * @chid_fwnode: remote component's fwnode handle.
* @child_dev: a @coresight_device representation of the component
connected to @outport.
+ * @link: Representation of the connection as a sysfs link.
*/
struct coresight_connection {
int outport;
- const char *child_name;
int child_port;
+ struct fwnode_handle *child_fwnode;
struct coresight_device *child_dev;
+ struct coresight_sysfs_link *link;
+};
+
+/**
+ * struct coresight_sysfs_link - representation of a connection in sysfs.
+ * @orig: Originating (master) coresight device for the link.
+ * @orig_name: Name to use for the link orig->target.
+ * @target: Target (slave) coresight device for the link.
+ * @target_name: Name to use for the link target->orig.
+ */
+struct coresight_sysfs_link {
+ struct coresight_device *orig;
+ const char *orig_name;
+ struct coresight_device *target;
+ const char *target_name;
};
/**
* struct coresight_device - representation of a device as used by the framework
- * @conns: array of coresight_connections associated to this component.
- * @nr_inport: number of input port associated to this component.
- * @nr_outport: number of output port associated to this component.
+ * @pdata: Platform data with device connections associated to this device.
* @type: as defined by @coresight_dev_type.
* @subtype: as defined by @coresight_dev_subtype.
* @ops: generic operations for this component, as defined
- by @coresight_ops.
+ * by @coresight_ops.
+ * @access: Device i/o access abstraction for this device.
* @dev: The device entity associated to this component.
* @refcnt: keep track of what is in use.
* @orphan: true if the component has connections that haven't been linked.
* @enable: 'true' if component is currently part of an active path.
* @activated: 'true' only if a _sink_ has been activated. A sink can be
- activated but not yet enabled. Enabling for a _sink_
- happens when a source has been selected for that it.
+ * activated but not yet enabled. Enabling for a _sink_
+ * happens when a source has been selected and a path is enabled
+ * from source to that sink.
+ * @ea: Device attribute for sink representation under PMU directory.
+ * @def_sink: cached reference to default sink found for this device.
+ * @ect_dev: Associated cross trigger device. Not part of the trace data
+ * path or connections.
+ * @nr_links: number of sysfs links created to other components from this
+ * device. These will appear in the "connections" group.
+ * @has_conns_grp: Have added a "connections" group for sysfs links.
+ * @feature_csdev_list: List of complex feature programming added to the device.
+ * @config_csdev_list: List of system configurations added to the device.
+ * @cscfg_csdev_lock: Protect the lists of configurations and features.
+ * @active_cscfg_ctxt: Context information for current active system configuration.
*/
struct coresight_device {
- struct coresight_connection *conns;
- int nr_inport;
- int nr_outport;
+ struct coresight_platform_data *pdata;
enum coresight_dev_type type;
- struct coresight_dev_subtype subtype;
+ union coresight_dev_subtype subtype;
const struct coresight_ops *ops;
+ struct csdev_access access;
struct device dev;
atomic_t *refcnt;
bool orphan;
bool enable; /* true only if configured as part of a path */
+ /* sink specific fields */
bool activated; /* true only if a sink is part of a path */
+ struct dev_ext_attribute *ea;
+ struct coresight_device *def_sink;
+ /* cross trigger handling */
+ struct coresight_device *ect_dev;
+ /* sysfs links between components */
+ int nr_links;
+ bool has_conns_grp;
+ bool ect_enabled; /* true only if associated ect device is enabled */
+ /* system configuration and feature lists */
+ struct list_head feature_csdev_list;
+ struct list_head config_csdev_list;
+ spinlock_t cscfg_csdev_lock;
+ void *active_cscfg_ctxt;
};
+/*
+ * coresight_dev_list - Mapping for devices to "name" index for device
+ * names.
+ *
+ * @nr_idx: Number of entries already allocated.
+ * @pfx: Prefix pattern for device name.
+ * @fwnode_list: Array of fwnode_handles associated with each allocated
+ * index, upto nr_idx entries.
+ */
+struct coresight_dev_list {
+ int nr_idx;
+ const char *pfx;
+ struct fwnode_handle **fwnode_list;
+};
+
+#define DEFINE_CORESIGHT_DEVLIST(var, dev_pfx) \
+static struct coresight_dev_list (var) = { \
+ .pfx = dev_pfx, \
+ .nr_idx = 0, \
+ .fwnode_list = NULL, \
+}
+
#define to_coresight_device(d) container_of(d, struct coresight_device, dev)
#define source_ops(csdev) csdev->ops->source_ops
#define sink_ops(csdev) csdev->ops->sink_ops
#define link_ops(csdev) csdev->ops->link_ops
+#define helper_ops(csdev) csdev->ops->helper_ops
+#define ect_ops(csdev) csdev->ops->ect_ops
/**
* struct coresight_ops_sink - basic operations for a sink
@@ -186,23 +285,16 @@ struct coresight_device {
* @disable: disables the sink.
* @alloc_buffer: initialises perf's ring buffer for trace collection.
* @free_buffer: release memory allocated in @get_config.
- * @set_buffer: initialises buffer mechanic before a trace session.
- * @reset_buffer: finalises buffer mechanic after a trace session.
* @update_buffer: update buffer pointers after a trace session.
*/
struct coresight_ops_sink {
- int (*enable)(struct coresight_device *csdev, u32 mode);
- void (*disable)(struct coresight_device *csdev);
- void *(*alloc_buffer)(struct coresight_device *csdev, int cpu,
- void **pages, int nr_pages, bool overwrite);
+ int (*enable)(struct coresight_device *csdev, u32 mode, void *data);
+ int (*disable)(struct coresight_device *csdev);
+ void *(*alloc_buffer)(struct coresight_device *csdev,
+ struct perf_event *event, void **pages,
+ int nr_pages, bool overwrite);
void (*free_buffer)(void *config);
- int (*set_buffer)(struct coresight_device *csdev,
- struct perf_output_handle *handle,
- void *sink_config);
- unsigned long (*reset_buffer)(struct coresight_device *csdev,
- struct perf_output_handle *handle,
- void *sink_config);
- void (*update_buffer)(struct coresight_device *csdev,
+ unsigned long (*update_buffer)(struct coresight_device *csdev,
struct perf_output_handle *handle,
void *sink_config);
};
@@ -223,34 +315,213 @@ struct coresight_ops_link {
* Operations available for sources.
* @cpu_id: returns the value of the CPU number this component
* is associated to.
- * @trace_id: returns the value of the component's trace ID as known
- * to the HW.
* @enable: enables tracing for a source.
* @disable: disables tracing for a source.
*/
struct coresight_ops_source {
int (*cpu_id)(struct coresight_device *csdev);
- int (*trace_id)(struct coresight_device *csdev);
int (*enable)(struct coresight_device *csdev,
struct perf_event *event, u32 mode);
void (*disable)(struct coresight_device *csdev,
struct perf_event *event);
};
+/**
+ * struct coresight_ops_helper - Operations for a helper device.
+ *
+ * All operations could pass in a device specific data, which could
+ * help the helper device to determine what to do.
+ *
+ * @enable : Enable the device
+ * @disable : Disable the device
+ */
+struct coresight_ops_helper {
+ int (*enable)(struct coresight_device *csdev, void *data);
+ int (*disable)(struct coresight_device *csdev, void *data);
+};
+
+/**
+ * struct coresight_ops_ect - Ops for an embedded cross trigger device
+ *
+ * @enable : Enable the device
+ * @disable : Disable the device
+ */
+struct coresight_ops_ect {
+ int (*enable)(struct coresight_device *csdev);
+ int (*disable)(struct coresight_device *csdev);
+};
+
struct coresight_ops {
const struct coresight_ops_sink *sink_ops;
const struct coresight_ops_link *link_ops;
const struct coresight_ops_source *source_ops;
+ const struct coresight_ops_helper *helper_ops;
+ const struct coresight_ops_ect *ect_ops;
};
-#ifdef CONFIG_CORESIGHT
+#if IS_ENABLED(CONFIG_CORESIGHT)
+
+static inline u32 csdev_access_relaxed_read32(struct csdev_access *csa,
+ u32 offset)
+{
+ if (likely(csa->io_mem))
+ return readl_relaxed(csa->base + offset);
+
+ return csa->read(offset, true, false);
+}
+
+static inline u64 csdev_access_relaxed_read_pair(struct csdev_access *csa,
+ u32 lo_offset, u32 hi_offset)
+{
+ if (likely(csa->io_mem)) {
+ return readl_relaxed(csa->base + lo_offset) |
+ ((u64)readl_relaxed(csa->base + hi_offset) << 32);
+ }
+
+ return csa->read(lo_offset, true, false) | (csa->read(hi_offset, true, false) << 32);
+}
+
+static inline void csdev_access_relaxed_write_pair(struct csdev_access *csa, u64 val,
+ u32 lo_offset, u32 hi_offset)
+{
+ if (likely(csa->io_mem)) {
+ writel_relaxed((u32)val, csa->base + lo_offset);
+ writel_relaxed((u32)(val >> 32), csa->base + hi_offset);
+ } else {
+ csa->write((u32)val, lo_offset, true, false);
+ csa->write((u32)(val >> 32), hi_offset, true, false);
+ }
+}
+
+static inline u32 csdev_access_read32(struct csdev_access *csa, u32 offset)
+{
+ if (likely(csa->io_mem))
+ return readl(csa->base + offset);
+
+ return csa->read(offset, false, false);
+}
+
+static inline void csdev_access_relaxed_write32(struct csdev_access *csa,
+ u32 val, u32 offset)
+{
+ if (likely(csa->io_mem))
+ writel_relaxed(val, csa->base + offset);
+ else
+ csa->write(val, offset, true, false);
+}
+
+static inline void csdev_access_write32(struct csdev_access *csa, u32 val, u32 offset)
+{
+ if (likely(csa->io_mem))
+ writel(val, csa->base + offset);
+ else
+ csa->write(val, offset, false, false);
+}
+
+#ifdef CONFIG_64BIT
+
+static inline u64 csdev_access_relaxed_read64(struct csdev_access *csa,
+ u32 offset)
+{
+ if (likely(csa->io_mem))
+ return readq_relaxed(csa->base + offset);
+
+ return csa->read(offset, true, true);
+}
+
+static inline u64 csdev_access_read64(struct csdev_access *csa, u32 offset)
+{
+ if (likely(csa->io_mem))
+ return readq(csa->base + offset);
+
+ return csa->read(offset, false, true);
+}
+
+static inline void csdev_access_relaxed_write64(struct csdev_access *csa,
+ u64 val, u32 offset)
+{
+ if (likely(csa->io_mem))
+ writeq_relaxed(val, csa->base + offset);
+ else
+ csa->write(val, offset, true, true);
+}
+
+static inline void csdev_access_write64(struct csdev_access *csa, u64 val, u32 offset)
+{
+ if (likely(csa->io_mem))
+ writeq(val, csa->base + offset);
+ else
+ csa->write(val, offset, false, true);
+}
+
+#else /* !CONFIG_64BIT */
+
+static inline u64 csdev_access_relaxed_read64(struct csdev_access *csa,
+ u32 offset)
+{
+ WARN_ON(1);
+ return 0;
+}
+
+static inline u64 csdev_access_read64(struct csdev_access *csa, u32 offset)
+{
+ WARN_ON(1);
+ return 0;
+}
+
+static inline void csdev_access_relaxed_write64(struct csdev_access *csa,
+ u64 val, u32 offset)
+{
+ WARN_ON(1);
+}
+
+static inline void csdev_access_write64(struct csdev_access *csa, u64 val, u32 offset)
+{
+ WARN_ON(1);
+}
+#endif /* CONFIG_64BIT */
+
+static inline bool coresight_is_percpu_source(struct coresight_device *csdev)
+{
+ return csdev && (csdev->type == CORESIGHT_DEV_TYPE_SOURCE) &&
+ (csdev->subtype.source_subtype == CORESIGHT_DEV_SUBTYPE_SOURCE_PROC);
+}
+
+static inline bool coresight_is_percpu_sink(struct coresight_device *csdev)
+{
+ return csdev && (csdev->type == CORESIGHT_DEV_TYPE_SINK) &&
+ (csdev->subtype.sink_subtype == CORESIGHT_DEV_SUBTYPE_SINK_PERCPU_SYSMEM);
+}
+
extern struct coresight_device *
coresight_register(struct coresight_desc *desc);
extern void coresight_unregister(struct coresight_device *csdev);
extern int coresight_enable(struct coresight_device *csdev);
extern void coresight_disable(struct coresight_device *csdev);
-extern int coresight_timeout(void __iomem *addr, u32 offset,
+extern int coresight_timeout(struct csdev_access *csa, u32 offset,
int position, int value);
+
+extern int coresight_claim_device(struct coresight_device *csdev);
+extern int coresight_claim_device_unlocked(struct coresight_device *csdev);
+
+extern void coresight_disclaim_device(struct coresight_device *csdev);
+extern void coresight_disclaim_device_unlocked(struct coresight_device *csdev);
+extern char *coresight_alloc_device_name(struct coresight_dev_list *devs,
+ struct device *dev);
+
+extern bool coresight_loses_context_with_cpu(struct device *dev);
+
+u32 coresight_relaxed_read32(struct coresight_device *csdev, u32 offset);
+u32 coresight_read32(struct coresight_device *csdev, u32 offset);
+void coresight_write32(struct coresight_device *csdev, u32 val, u32 offset);
+void coresight_relaxed_write32(struct coresight_device *csdev,
+ u32 val, u32 offset);
+u64 coresight_relaxed_read64(struct coresight_device *csdev, u32 offset);
+u64 coresight_read64(struct coresight_device *csdev, u32 offset);
+void coresight_relaxed_write64(struct coresight_device *csdev,
+ u64 val, u32 offset);
+void coresight_write64(struct coresight_device *csdev, u64 val, u32 offset);
+
#else
static inline struct coresight_device *
coresight_register(struct coresight_desc *desc) { return NULL; }
@@ -258,40 +529,78 @@ static inline void coresight_unregister(struct coresight_device *csdev) {}
static inline int
coresight_enable(struct coresight_device *csdev) { return -ENOSYS; }
static inline void coresight_disable(struct coresight_device *csdev) {}
-static inline int coresight_timeout(void __iomem *addr, u32 offset,
- int position, int value) { return 1; }
-#endif
-
-#ifdef CONFIG_OF
-extern int of_coresight_get_cpu(const struct device_node *node);
-extern struct coresight_platform_data *
-of_get_coresight_platform_data(struct device *dev,
- const struct device_node *node);
-#else
-static inline int of_coresight_get_cpu(const struct device_node *node)
-{ return 0; }
-static inline struct coresight_platform_data *of_get_coresight_platform_data(
- struct device *dev, const struct device_node *node) { return NULL; }
-#endif
-
-#ifdef CONFIG_PID_NS
-static inline unsigned long
-coresight_vpid_to_pid(unsigned long vpid)
+
+static inline int coresight_timeout(struct csdev_access *csa, u32 offset,
+ int position, int value)
+{
+ return 1;
+}
+
+static inline int coresight_claim_device_unlocked(struct coresight_device *csdev)
{
- struct task_struct *task = NULL;
- unsigned long pid = 0;
+ return -EINVAL;
+}
- rcu_read_lock();
- task = find_task_by_vpid(vpid);
- if (task)
- pid = task_pid_nr(task);
- rcu_read_unlock();
+static inline int coresight_claim_device(struct coresight_device *csdev)
+{
+ return -EINVAL;
+}
- return pid;
+static inline void coresight_disclaim_device(struct coresight_device *csdev) {}
+static inline void coresight_disclaim_device_unlocked(struct coresight_device *csdev) {}
+
+static inline bool coresight_loses_context_with_cpu(struct device *dev)
+{
+ return false;
}
-#else
-static inline unsigned long
-coresight_vpid_to_pid(unsigned long vpid) { return vpid; }
-#endif
-#endif
+static inline u32 coresight_relaxed_read32(struct coresight_device *csdev, u32 offset)
+{
+ WARN_ON_ONCE(1);
+ return 0;
+}
+
+static inline u32 coresight_read32(struct coresight_device *csdev, u32 offset)
+{
+ WARN_ON_ONCE(1);
+ return 0;
+}
+
+static inline void coresight_write32(struct coresight_device *csdev, u32 val, u32 offset)
+{
+}
+
+static inline void coresight_relaxed_write32(struct coresight_device *csdev,
+ u32 val, u32 offset)
+{
+}
+
+static inline u64 coresight_relaxed_read64(struct coresight_device *csdev,
+ u32 offset)
+{
+ WARN_ON_ONCE(1);
+ return 0;
+}
+
+static inline u64 coresight_read64(struct coresight_device *csdev, u32 offset)
+{
+ WARN_ON_ONCE(1);
+ return 0;
+}
+
+static inline void coresight_relaxed_write64(struct coresight_device *csdev,
+ u64 val, u32 offset)
+{
+}
+
+static inline void coresight_write64(struct coresight_device *csdev, u64 val, u32 offset)
+{
+}
+
+#endif /* IS_ENABLED(CONFIG_CORESIGHT) */
+
+extern int coresight_get_cpu(struct device *dev);
+
+struct coresight_platform_data *coresight_get_platform_data(struct device *dev);
+
+#endif /* _LINUX_COREISGHT_H */
diff --git a/include/linux/count_zeros.h b/include/linux/count_zeros.h
index 363da78c4f64..5b8ff5ac660d 100644
--- a/include/linux/count_zeros.h
+++ b/include/linux/count_zeros.h
@@ -1,12 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/* Count leading and trailing zeros functions
*
* Copyright (C) 2012 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public Licence
- * as published by the Free Software Foundation; either version
- * 2 of the Licence, or (at your option) any later version.
*/
#ifndef _LINUX_BITOPS_COUNT_ZEROS_H_
diff --git a/include/linux/counter.h b/include/linux/counter.h
new file mode 100644
index 000000000000..b63746637de2
--- /dev/null
+++ b/include/linux/counter.h
@@ -0,0 +1,632 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Counter interface
+ * Copyright (C) 2018 William Breathitt Gray
+ */
+#ifndef _COUNTER_H_
+#define _COUNTER_H_
+
+#include <linux/cdev.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/kfifo.h>
+#include <linux/mutex.h>
+#include <linux/spinlock_types.h>
+#include <linux/types.h>
+#include <linux/wait.h>
+#include <uapi/linux/counter.h>
+
+struct counter_device;
+struct counter_count;
+struct counter_synapse;
+struct counter_signal;
+
+enum counter_comp_type {
+ COUNTER_COMP_U8,
+ COUNTER_COMP_U64,
+ COUNTER_COMP_BOOL,
+ COUNTER_COMP_SIGNAL_LEVEL,
+ COUNTER_COMP_FUNCTION,
+ COUNTER_COMP_SYNAPSE_ACTION,
+ COUNTER_COMP_ENUM,
+ COUNTER_COMP_COUNT_DIRECTION,
+ COUNTER_COMP_COUNT_MODE,
+ COUNTER_COMP_SIGNAL_POLARITY,
+ COUNTER_COMP_ARRAY,
+};
+
+/**
+ * struct counter_comp - Counter component node
+ * @type: Counter component data type
+ * @name: device-specific component name
+ * @priv: component-relevant data
+ * @action_read: Synapse action mode read callback. The read value of the
+ * respective Synapse action mode should be passed back via
+ * the action parameter.
+ * @device_u8_read: Device u8 component read callback. The read value of the
+ * respective Device u8 component should be passed back via
+ * the val parameter.
+ * @count_u8_read: Count u8 component read callback. The read value of the
+ * respective Count u8 component should be passed back via
+ * the val parameter.
+ * @signal_u8_read: Signal u8 component read callback. The read value of the
+ * respective Signal u8 component should be passed back via
+ * the val parameter.
+ * @device_u32_read: Device u32 component read callback. The read value of
+ * the respective Device u32 component should be passed
+ * back via the val parameter.
+ * @count_u32_read: Count u32 component read callback. The read value of the
+ * respective Count u32 component should be passed back via
+ * the val parameter.
+ * @signal_u32_read: Signal u32 component read callback. The read value of
+ * the respective Signal u32 component should be passed
+ * back via the val parameter.
+ * @device_u64_read: Device u64 component read callback. The read value of
+ * the respective Device u64 component should be passed
+ * back via the val parameter.
+ * @count_u64_read: Count u64 component read callback. The read value of the
+ * respective Count u64 component should be passed back via
+ * the val parameter.
+ * @signal_u64_read: Signal u64 component read callback. The read value of
+ * the respective Signal u64 component should be passed
+ * back via the val parameter.
+ * @signal_array_u32_read: Signal u32 array component read callback. The
+ * index of the respective Count u32 array
+ * component element is passed via the idx
+ * parameter. The read value of the respective
+ * Count u32 array component element should be
+ * passed back via the val parameter.
+ * @device_array_u64_read: Device u64 array component read callback. The
+ * index of the respective Device u64 array
+ * component element is passed via the idx
+ * parameter. The read value of the respective
+ * Device u64 array component element should be
+ * passed back via the val parameter.
+ * @count_array_u64_read: Count u64 array component read callback. The
+ * index of the respective Count u64 array
+ * component element is passed via the idx
+ * parameter. The read value of the respective
+ * Count u64 array component element should be
+ * passed back via the val parameter.
+ * @signal_array_u64_read: Signal u64 array component read callback. The
+ * index of the respective Count u64 array
+ * component element is passed via the idx
+ * parameter. The read value of the respective
+ * Count u64 array component element should be
+ * passed back via the val parameter.
+ * @action_write: Synapse action mode write callback. The write value of
+ * the respective Synapse action mode is passed via the
+ * action parameter.
+ * @device_u8_write: Device u8 component write callback. The write value of
+ * the respective Device u8 component is passed via the val
+ * parameter.
+ * @count_u8_write: Count u8 component write callback. The write value of
+ * the respective Count u8 component is passed via the val
+ * parameter.
+ * @signal_u8_write: Signal u8 component write callback. The write value of
+ * the respective Signal u8 component is passed via the val
+ * parameter.
+ * @device_u32_write: Device u32 component write callback. The write value of
+ * the respective Device u32 component is passed via the
+ * val parameter.
+ * @count_u32_write: Count u32 component write callback. The write value of
+ * the respective Count u32 component is passed via the val
+ * parameter.
+ * @signal_u32_write: Signal u32 component write callback. The write value of
+ * the respective Signal u32 component is passed via the
+ * val parameter.
+ * @device_u64_write: Device u64 component write callback. The write value of
+ * the respective Device u64 component is passed via the
+ * val parameter.
+ * @count_u64_write: Count u64 component write callback. The write value of
+ * the respective Count u64 component is passed via the val
+ * parameter.
+ * @signal_u64_write: Signal u64 component write callback. The write value of
+ * the respective Signal u64 component is passed via the
+ * val parameter.
+ * @signal_array_u32_write: Signal u32 array component write callback. The
+ * index of the respective Signal u32 array
+ * component element is passed via the idx
+ * parameter. The write value of the respective
+ * Signal u32 array component element is passed via
+ * the val parameter.
+ * @device_array_u64_write: Device u64 array component write callback. The
+ * index of the respective Device u64 array
+ * component element is passed via the idx
+ * parameter. The write value of the respective
+ * Device u64 array component element is passed via
+ * the val parameter.
+ * @count_array_u64_write: Count u64 array component write callback. The
+ * index of the respective Count u64 array
+ * component element is passed via the idx
+ * parameter. The write value of the respective
+ * Count u64 array component element is passed via
+ * the val parameter.
+ * @signal_array_u64_write: Signal u64 array component write callback. The
+ * index of the respective Signal u64 array
+ * component element is passed via the idx
+ * parameter. The write value of the respective
+ * Signal u64 array component element is passed via
+ * the val parameter.
+ */
+struct counter_comp {
+ enum counter_comp_type type;
+ const char *name;
+ void *priv;
+ union {
+ int (*action_read)(struct counter_device *counter,
+ struct counter_count *count,
+ struct counter_synapse *synapse,
+ enum counter_synapse_action *action);
+ int (*device_u8_read)(struct counter_device *counter, u8 *val);
+ int (*count_u8_read)(struct counter_device *counter,
+ struct counter_count *count, u8 *val);
+ int (*signal_u8_read)(struct counter_device *counter,
+ struct counter_signal *signal, u8 *val);
+ int (*device_u32_read)(struct counter_device *counter,
+ u32 *val);
+ int (*count_u32_read)(struct counter_device *counter,
+ struct counter_count *count, u32 *val);
+ int (*signal_u32_read)(struct counter_device *counter,
+ struct counter_signal *signal, u32 *val);
+ int (*device_u64_read)(struct counter_device *counter,
+ u64 *val);
+ int (*count_u64_read)(struct counter_device *counter,
+ struct counter_count *count, u64 *val);
+ int (*signal_u64_read)(struct counter_device *counter,
+ struct counter_signal *signal, u64 *val);
+ int (*signal_array_u32_read)(struct counter_device *counter,
+ struct counter_signal *signal,
+ size_t idx, u32 *val);
+ int (*device_array_u64_read)(struct counter_device *counter,
+ size_t idx, u64 *val);
+ int (*count_array_u64_read)(struct counter_device *counter,
+ struct counter_count *count,
+ size_t idx, u64 *val);
+ int (*signal_array_u64_read)(struct counter_device *counter,
+ struct counter_signal *signal,
+ size_t idx, u64 *val);
+ };
+ union {
+ int (*action_write)(struct counter_device *counter,
+ struct counter_count *count,
+ struct counter_synapse *synapse,
+ enum counter_synapse_action action);
+ int (*device_u8_write)(struct counter_device *counter, u8 val);
+ int (*count_u8_write)(struct counter_device *counter,
+ struct counter_count *count, u8 val);
+ int (*signal_u8_write)(struct counter_device *counter,
+ struct counter_signal *signal, u8 val);
+ int (*device_u32_write)(struct counter_device *counter,
+ u32 val);
+ int (*count_u32_write)(struct counter_device *counter,
+ struct counter_count *count, u32 val);
+ int (*signal_u32_write)(struct counter_device *counter,
+ struct counter_signal *signal, u32 val);
+ int (*device_u64_write)(struct counter_device *counter,
+ u64 val);
+ int (*count_u64_write)(struct counter_device *counter,
+ struct counter_count *count, u64 val);
+ int (*signal_u64_write)(struct counter_device *counter,
+ struct counter_signal *signal, u64 val);
+ int (*signal_array_u32_write)(struct counter_device *counter,
+ struct counter_signal *signal,
+ size_t idx, u32 val);
+ int (*device_array_u64_write)(struct counter_device *counter,
+ size_t idx, u64 val);
+ int (*count_array_u64_write)(struct counter_device *counter,
+ struct counter_count *count,
+ size_t idx, u64 val);
+ int (*signal_array_u64_write)(struct counter_device *counter,
+ struct counter_signal *signal,
+ size_t idx, u64 val);
+ };
+};
+
+/**
+ * struct counter_signal - Counter Signal node
+ * @id: unique ID used to identify the Signal
+ * @name: device-specific Signal name
+ * @ext: optional array of Signal extensions
+ * @num_ext: number of Signal extensions specified in @ext
+ */
+struct counter_signal {
+ int id;
+ const char *name;
+
+ struct counter_comp *ext;
+ size_t num_ext;
+};
+
+/**
+ * struct counter_synapse - Counter Synapse node
+ * @actions_list: array of available action modes
+ * @num_actions: number of action modes specified in @actions_list
+ * @signal: pointer to the associated Signal
+ */
+struct counter_synapse {
+ const enum counter_synapse_action *actions_list;
+ size_t num_actions;
+
+ struct counter_signal *signal;
+};
+
+/**
+ * struct counter_count - Counter Count node
+ * @id: unique ID used to identify the Count
+ * @name: device-specific Count name
+ * @functions_list: array of available function modes
+ * @num_functions: number of function modes specified in @functions_list
+ * @synapses: array of Synapses for initialization
+ * @num_synapses: number of Synapses specified in @synapses
+ * @ext: optional array of Count extensions
+ * @num_ext: number of Count extensions specified in @ext
+ */
+struct counter_count {
+ int id;
+ const char *name;
+
+ const enum counter_function *functions_list;
+ size_t num_functions;
+
+ struct counter_synapse *synapses;
+ size_t num_synapses;
+
+ struct counter_comp *ext;
+ size_t num_ext;
+};
+
+/**
+ * struct counter_event_node - Counter Event node
+ * @l: list of current watching Counter events
+ * @event: event that triggers
+ * @channel: event channel
+ * @comp_list: list of components to watch when event triggers
+ */
+struct counter_event_node {
+ struct list_head l;
+ u8 event;
+ u8 channel;
+ struct list_head comp_list;
+};
+
+/**
+ * struct counter_ops - Callbacks from driver
+ * @signal_read: optional read callback for Signals. The read level of
+ * the respective Signal should be passed back via the
+ * level parameter.
+ * @count_read: read callback for Counts. The read value of the
+ * respective Count should be passed back via the value
+ * parameter.
+ * @count_write: optional write callback for Counts. The write value for
+ * the respective Count is passed in via the value
+ * parameter.
+ * @function_read: read callback the Count function modes. The read
+ * function mode of the respective Count should be passed
+ * back via the function parameter.
+ * @function_write: optional write callback for Count function modes. The
+ * function mode to write for the respective Count is
+ * passed in via the function parameter.
+ * @action_read: optional read callback the Synapse action modes. The
+ * read action mode of the respective Synapse should be
+ * passed back via the action parameter.
+ * @action_write: optional write callback for Synapse action modes. The
+ * action mode to write for the respective Synapse is
+ * passed in via the action parameter.
+ * @events_configure: optional write callback to configure events. The list of
+ * struct counter_event_node may be accessed via the
+ * events_list member of the counter parameter.
+ * @watch_validate: optional callback to validate a watch. The Counter
+ * component watch configuration is passed in via the watch
+ * parameter. A return value of 0 indicates a valid Counter
+ * component watch configuration.
+ */
+struct counter_ops {
+ int (*signal_read)(struct counter_device *counter,
+ struct counter_signal *signal,
+ enum counter_signal_level *level);
+ int (*count_read)(struct counter_device *counter,
+ struct counter_count *count, u64 *value);
+ int (*count_write)(struct counter_device *counter,
+ struct counter_count *count, u64 value);
+ int (*function_read)(struct counter_device *counter,
+ struct counter_count *count,
+ enum counter_function *function);
+ int (*function_write)(struct counter_device *counter,
+ struct counter_count *count,
+ enum counter_function function);
+ int (*action_read)(struct counter_device *counter,
+ struct counter_count *count,
+ struct counter_synapse *synapse,
+ enum counter_synapse_action *action);
+ int (*action_write)(struct counter_device *counter,
+ struct counter_count *count,
+ struct counter_synapse *synapse,
+ enum counter_synapse_action action);
+ int (*events_configure)(struct counter_device *counter);
+ int (*watch_validate)(struct counter_device *counter,
+ const struct counter_watch *watch);
+};
+
+/**
+ * struct counter_device - Counter data structure
+ * @name: name of the device
+ * @parent: optional parent device providing the counters
+ * @ops: callbacks from driver
+ * @signals: array of Signals
+ * @num_signals: number of Signals specified in @signals
+ * @counts: array of Counts
+ * @num_counts: number of Counts specified in @counts
+ * @ext: optional array of Counter device extensions
+ * @num_ext: number of Counter device extensions specified in @ext
+ * @priv: optional private data supplied by driver
+ * @dev: internal device structure
+ * @chrdev: internal character device structure
+ * @events_list: list of current watching Counter events
+ * @events_list_lock: lock to protect Counter events list operations
+ * @next_events_list: list of next watching Counter events
+ * @n_events_list_lock: lock to protect Counter next events list operations
+ * @events: queue of detected Counter events
+ * @events_wait: wait queue to allow blocking reads of Counter events
+ * @events_in_lock: lock to protect Counter events queue in operations
+ * @events_out_lock: lock to protect Counter events queue out operations
+ * @ops_exist_lock: lock to prevent use during removal
+ */
+struct counter_device {
+ const char *name;
+ struct device *parent;
+
+ const struct counter_ops *ops;
+
+ struct counter_signal *signals;
+ size_t num_signals;
+ struct counter_count *counts;
+ size_t num_counts;
+
+ struct counter_comp *ext;
+ size_t num_ext;
+
+ struct device dev;
+ struct cdev chrdev;
+ struct list_head events_list;
+ spinlock_t events_list_lock;
+ struct list_head next_events_list;
+ struct mutex n_events_list_lock;
+ DECLARE_KFIFO_PTR(events, struct counter_event);
+ wait_queue_head_t events_wait;
+ spinlock_t events_in_lock;
+ struct mutex events_out_lock;
+ struct mutex ops_exist_lock;
+};
+
+void *counter_priv(const struct counter_device *const counter);
+
+struct counter_device *counter_alloc(size_t sizeof_priv);
+void counter_put(struct counter_device *const counter);
+int counter_add(struct counter_device *const counter);
+
+void counter_unregister(struct counter_device *const counter);
+struct counter_device *devm_counter_alloc(struct device *dev,
+ size_t sizeof_priv);
+int devm_counter_add(struct device *dev,
+ struct counter_device *const counter);
+void counter_push_event(struct counter_device *const counter, const u8 event,
+ const u8 channel);
+
+#define COUNTER_COMP_DEVICE_U8(_name, _read, _write) \
+{ \
+ .type = COUNTER_COMP_U8, \
+ .name = (_name), \
+ .device_u8_read = (_read), \
+ .device_u8_write = (_write), \
+}
+#define COUNTER_COMP_COUNT_U8(_name, _read, _write) \
+{ \
+ .type = COUNTER_COMP_U8, \
+ .name = (_name), \
+ .count_u8_read = (_read), \
+ .count_u8_write = (_write), \
+}
+#define COUNTER_COMP_SIGNAL_U8(_name, _read, _write) \
+{ \
+ .type = COUNTER_COMP_U8, \
+ .name = (_name), \
+ .signal_u8_read = (_read), \
+ .signal_u8_write = (_write), \
+}
+
+#define COUNTER_COMP_DEVICE_U64(_name, _read, _write) \
+{ \
+ .type = COUNTER_COMP_U64, \
+ .name = (_name), \
+ .device_u64_read = (_read), \
+ .device_u64_write = (_write), \
+}
+#define COUNTER_COMP_COUNT_U64(_name, _read, _write) \
+{ \
+ .type = COUNTER_COMP_U64, \
+ .name = (_name), \
+ .count_u64_read = (_read), \
+ .count_u64_write = (_write), \
+}
+#define COUNTER_COMP_SIGNAL_U64(_name, _read, _write) \
+{ \
+ .type = COUNTER_COMP_U64, \
+ .name = (_name), \
+ .signal_u64_read = (_read), \
+ .signal_u64_write = (_write), \
+}
+
+#define COUNTER_COMP_DEVICE_BOOL(_name, _read, _write) \
+{ \
+ .type = COUNTER_COMP_BOOL, \
+ .name = (_name), \
+ .device_u8_read = (_read), \
+ .device_u8_write = (_write), \
+}
+#define COUNTER_COMP_COUNT_BOOL(_name, _read, _write) \
+{ \
+ .type = COUNTER_COMP_BOOL, \
+ .name = (_name), \
+ .count_u8_read = (_read), \
+ .count_u8_write = (_write), \
+}
+#define COUNTER_COMP_SIGNAL_BOOL(_name, _read, _write) \
+{ \
+ .type = COUNTER_COMP_BOOL, \
+ .name = (_name), \
+ .signal_u8_read = (_read), \
+ .signal_u8_write = (_write), \
+}
+
+struct counter_available {
+ union {
+ const u32 *enums;
+ const char *const *strs;
+ };
+ size_t num_items;
+};
+
+#define DEFINE_COUNTER_AVAILABLE(_name, _enums) \
+ struct counter_available _name = { \
+ .enums = (_enums), \
+ .num_items = ARRAY_SIZE(_enums), \
+ }
+
+#define DEFINE_COUNTER_ENUM(_name, _strs) \
+ struct counter_available _name = { \
+ .strs = (_strs), \
+ .num_items = ARRAY_SIZE(_strs), \
+ }
+
+#define COUNTER_COMP_DEVICE_ENUM(_name, _get, _set, _available) \
+{ \
+ .type = COUNTER_COMP_ENUM, \
+ .name = (_name), \
+ .device_u32_read = (_get), \
+ .device_u32_write = (_set), \
+ .priv = &(_available), \
+}
+#define COUNTER_COMP_COUNT_ENUM(_name, _get, _set, _available) \
+{ \
+ .type = COUNTER_COMP_ENUM, \
+ .name = (_name), \
+ .count_u32_read = (_get), \
+ .count_u32_write = (_set), \
+ .priv = &(_available), \
+}
+#define COUNTER_COMP_SIGNAL_ENUM(_name, _get, _set, _available) \
+{ \
+ .type = COUNTER_COMP_ENUM, \
+ .name = (_name), \
+ .signal_u32_read = (_get), \
+ .signal_u32_write = (_set), \
+ .priv = &(_available), \
+}
+
+struct counter_array {
+ enum counter_comp_type type;
+ const struct counter_available *avail;
+ union {
+ size_t length;
+ size_t idx;
+ };
+};
+
+#define DEFINE_COUNTER_ARRAY_U64(_name, _length) \
+ struct counter_array _name = { \
+ .type = COUNTER_COMP_U64, \
+ .length = (_length), \
+ }
+
+#define DEFINE_COUNTER_ARRAY_CAPTURE(_name, _length) \
+ DEFINE_COUNTER_ARRAY_U64(_name, _length)
+
+#define DEFINE_COUNTER_ARRAY_POLARITY(_name, _available, _length) \
+ struct counter_array _name = { \
+ .type = COUNTER_COMP_SIGNAL_POLARITY, \
+ .avail = &(_available), \
+ .length = (_length), \
+ }
+
+#define COUNTER_COMP_DEVICE_ARRAY_U64(_name, _read, _write, _array) \
+{ \
+ .type = COUNTER_COMP_ARRAY, \
+ .name = (_name), \
+ .device_array_u64_read = (_read), \
+ .device_array_u64_write = (_write), \
+ .priv = &(_array), \
+}
+#define COUNTER_COMP_COUNT_ARRAY_U64(_name, _read, _write, _array) \
+{ \
+ .type = COUNTER_COMP_ARRAY, \
+ .name = (_name), \
+ .count_array_u64_read = (_read), \
+ .count_array_u64_write = (_write), \
+ .priv = &(_array), \
+}
+#define COUNTER_COMP_SIGNAL_ARRAY_U64(_name, _read, _write, _array) \
+{ \
+ .type = COUNTER_COMP_ARRAY, \
+ .name = (_name), \
+ .signal_array_u64_read = (_read), \
+ .signal_array_u64_write = (_write), \
+ .priv = &(_array), \
+}
+
+#define COUNTER_COMP_CAPTURE(_read, _write) \
+ COUNTER_COMP_COUNT_U64("capture", _read, _write)
+
+#define COUNTER_COMP_CEILING(_read, _write) \
+ COUNTER_COMP_COUNT_U64("ceiling", _read, _write)
+
+#define COUNTER_COMP_COUNT_MODE(_read, _write, _available) \
+{ \
+ .type = COUNTER_COMP_COUNT_MODE, \
+ .name = "count_mode", \
+ .count_u32_read = (_read), \
+ .count_u32_write = (_write), \
+ .priv = &(_available), \
+}
+
+#define COUNTER_COMP_DIRECTION(_read) \
+{ \
+ .type = COUNTER_COMP_COUNT_DIRECTION, \
+ .name = "direction", \
+ .count_u32_read = (_read), \
+}
+
+#define COUNTER_COMP_ENABLE(_read, _write) \
+ COUNTER_COMP_COUNT_BOOL("enable", _read, _write)
+
+#define COUNTER_COMP_FLOOR(_read, _write) \
+ COUNTER_COMP_COUNT_U64("floor", _read, _write)
+
+#define COUNTER_COMP_POLARITY(_read, _write, _available) \
+{ \
+ .type = COUNTER_COMP_SIGNAL_POLARITY, \
+ .name = "polarity", \
+ .signal_u32_read = (_read), \
+ .signal_u32_write = (_write), \
+ .priv = &(_available), \
+}
+
+#define COUNTER_COMP_PRESET(_read, _write) \
+ COUNTER_COMP_COUNT_U64("preset", _read, _write)
+
+#define COUNTER_COMP_PRESET_ENABLE(_read, _write) \
+ COUNTER_COMP_COUNT_BOOL("preset_enable", _read, _write)
+
+#define COUNTER_COMP_ARRAY_CAPTURE(_read, _write, _array) \
+ COUNTER_COMP_COUNT_ARRAY_U64("capture", _read, _write, _array)
+
+#define COUNTER_COMP_ARRAY_POLARITY(_read, _write, _array) \
+{ \
+ .type = COUNTER_COMP_ARRAY, \
+ .name = "polarity", \
+ .signal_array_u32_read = (_read), \
+ .signal_array_u32_write = (_write), \
+ .priv = &(_array), \
+}
+
+#endif /* _COUNTER_H_ */
diff --git a/include/linux/cper.h b/include/linux/cper.h
index 4c671fc2081e..eacb7dd7b3af 100644
--- a/include/linux/cper.h
+++ b/include/linux/cper.h
@@ -1,21 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* UEFI Common Platform Error Record
*
* Copyright (C) 2010, Intel Corp.
* Author: Huang Ying <ying.huang@intel.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#ifndef LINUX_CPER_H
@@ -44,7 +32,7 @@
*/
#define CPER_REC_LEN 256
/*
- * Severity difinition for error_severity in struct cper_record_header
+ * Severity definition for error_severity in struct cper_record_header
* and section_severity in struct cper_section_descriptor
*/
enum {
@@ -55,55 +43,52 @@ enum {
};
/*
- * Validation bits difinition for validation_bits in struct
+ * Validation bits definition for validation_bits in struct
* cper_record_header. If set, corresponding fields in struct
* cper_record_header contain valid information.
- *
- * corresponds platform_id
*/
#define CPER_VALID_PLATFORM_ID 0x0001
-/* corresponds timestamp */
#define CPER_VALID_TIMESTAMP 0x0002
-/* corresponds partition_id */
#define CPER_VALID_PARTITION_ID 0x0004
/*
* Notification type used to generate error record, used in
- * notification_type in struct cper_record_header
- *
- * Corrected Machine Check
+ * notification_type in struct cper_record_header. These UUIDs are defined
+ * in the UEFI spec v2.7, sec N.2.1.
*/
+
+/* Corrected Machine Check */
#define CPER_NOTIFY_CMC \
- UUID_LE(0x2DCE8BB1, 0xBDD7, 0x450e, 0xB9, 0xAD, 0x9C, 0xF4, \
- 0xEB, 0xD4, 0xF8, 0x90)
+ GUID_INIT(0x2DCE8BB1, 0xBDD7, 0x450e, 0xB9, 0xAD, 0x9C, 0xF4, \
+ 0xEB, 0xD4, 0xF8, 0x90)
/* Corrected Platform Error */
#define CPER_NOTIFY_CPE \
- UUID_LE(0x4E292F96, 0xD843, 0x4a55, 0xA8, 0xC2, 0xD4, 0x81, \
- 0xF2, 0x7E, 0xBE, 0xEE)
+ GUID_INIT(0x4E292F96, 0xD843, 0x4a55, 0xA8, 0xC2, 0xD4, 0x81, \
+ 0xF2, 0x7E, 0xBE, 0xEE)
/* Machine Check Exception */
#define CPER_NOTIFY_MCE \
- UUID_LE(0xE8F56FFE, 0x919C, 0x4cc5, 0xBA, 0x88, 0x65, 0xAB, \
- 0xE1, 0x49, 0x13, 0xBB)
+ GUID_INIT(0xE8F56FFE, 0x919C, 0x4cc5, 0xBA, 0x88, 0x65, 0xAB, \
+ 0xE1, 0x49, 0x13, 0xBB)
/* PCI Express Error */
#define CPER_NOTIFY_PCIE \
- UUID_LE(0xCF93C01F, 0x1A16, 0x4dfc, 0xB8, 0xBC, 0x9C, 0x4D, \
- 0xAF, 0x67, 0xC1, 0x04)
+ GUID_INIT(0xCF93C01F, 0x1A16, 0x4dfc, 0xB8, 0xBC, 0x9C, 0x4D, \
+ 0xAF, 0x67, 0xC1, 0x04)
/* INIT Record (for IPF) */
#define CPER_NOTIFY_INIT \
- UUID_LE(0xCC5263E8, 0x9308, 0x454a, 0x89, 0xD0, 0x34, 0x0B, \
- 0xD3, 0x9B, 0xC9, 0x8E)
+ GUID_INIT(0xCC5263E8, 0x9308, 0x454a, 0x89, 0xD0, 0x34, 0x0B, \
+ 0xD3, 0x9B, 0xC9, 0x8E)
/* Non-Maskable Interrupt */
#define CPER_NOTIFY_NMI \
- UUID_LE(0x5BAD89FF, 0xB7E6, 0x42c9, 0x81, 0x4A, 0xCF, 0x24, \
- 0x85, 0xD6, 0xE9, 0x8A)
+ GUID_INIT(0x5BAD89FF, 0xB7E6, 0x42c9, 0x81, 0x4A, 0xCF, 0x24, \
+ 0x85, 0xD6, 0xE9, 0x8A)
/* BOOT Error Record */
#define CPER_NOTIFY_BOOT \
- UUID_LE(0x3D61A466, 0xAB40, 0x409a, 0xA6, 0x98, 0xF3, 0x62, \
- 0xD4, 0x64, 0xB3, 0x8F)
+ GUID_INIT(0x3D61A466, 0xAB40, 0x409a, 0xA6, 0x98, 0xF3, 0x62, \
+ 0xD4, 0x64, 0xB3, 0x8F)
/* DMA Remapping Error */
#define CPER_NOTIFY_DMAR \
- UUID_LE(0x667DD791, 0xC6B3, 0x4c27, 0x8A, 0x6B, 0x0F, 0x8E, \
- 0x72, 0x2D, 0xEB, 0x41)
+ GUID_INIT(0x667DD791, 0xC6B3, 0x4c27, 0x8A, 0x6B, 0x0F, 0x8E, \
+ 0x72, 0x2D, 0xEB, 0x41)
/*
* Flags bits definitions for flags in struct cper_record_header
@@ -122,14 +107,11 @@ enum {
#define CPER_SEC_REV 0x0100
/*
- * Validation bits difinition for validation_bits in struct
+ * Validation bits definition for validation_bits in struct
* cper_section_descriptor. If set, corresponding fields in struct
* cper_section_descriptor contain valid information.
- *
- * corresponds fru_id
*/
#define CPER_SEC_VALID_FRU_ID 0x1
-/* corresponds fru_text */
#define CPER_SEC_VALID_FRU_TEXT 0x2
/*
@@ -165,55 +147,56 @@ enum {
/*
* Section type definitions, used in section_type field in struct
- * cper_section_descriptor
- *
- * Processor Generic
+ * cper_section_descriptor. These UUIDs are defined in the UEFI spec
+ * v2.7, sec N.2.2.
*/
+
+/* Processor Generic */
#define CPER_SEC_PROC_GENERIC \
- UUID_LE(0x9876CCAD, 0x47B4, 0x4bdb, 0xB6, 0x5E, 0x16, 0xF1, \
- 0x93, 0xC4, 0xF3, 0xDB)
+ GUID_INIT(0x9876CCAD, 0x47B4, 0x4bdb, 0xB6, 0x5E, 0x16, 0xF1, \
+ 0x93, 0xC4, 0xF3, 0xDB)
/* Processor Specific: X86/X86_64 */
#define CPER_SEC_PROC_IA \
- UUID_LE(0xDC3EA0B0, 0xA144, 0x4797, 0xB9, 0x5B, 0x53, 0xFA, \
- 0x24, 0x2B, 0x6E, 0x1D)
+ GUID_INIT(0xDC3EA0B0, 0xA144, 0x4797, 0xB9, 0x5B, 0x53, 0xFA, \
+ 0x24, 0x2B, 0x6E, 0x1D)
/* Processor Specific: IA64 */
#define CPER_SEC_PROC_IPF \
- UUID_LE(0xE429FAF1, 0x3CB7, 0x11D4, 0x0B, 0xCA, 0x07, 0x00, \
- 0x80, 0xC7, 0x3C, 0x88, 0x81)
+ GUID_INIT(0xE429FAF1, 0x3CB7, 0x11D4, 0x0B, 0xCA, 0x07, 0x00, \
+ 0x80, 0xC7, 0x3C, 0x88, 0x81)
/* Processor Specific: ARM */
#define CPER_SEC_PROC_ARM \
- UUID_LE(0xE19E3D16, 0xBC11, 0x11E4, 0x9C, 0xAA, 0xC2, 0x05, \
- 0x1D, 0x5D, 0x46, 0xB0)
+ GUID_INIT(0xE19E3D16, 0xBC11, 0x11E4, 0x9C, 0xAA, 0xC2, 0x05, \
+ 0x1D, 0x5D, 0x46, 0xB0)
/* Platform Memory */
#define CPER_SEC_PLATFORM_MEM \
- UUID_LE(0xA5BC1114, 0x6F64, 0x4EDE, 0xB8, 0x63, 0x3E, 0x83, \
- 0xED, 0x7C, 0x83, 0xB1)
+ GUID_INIT(0xA5BC1114, 0x6F64, 0x4EDE, 0xB8, 0x63, 0x3E, 0x83, \
+ 0xED, 0x7C, 0x83, 0xB1)
#define CPER_SEC_PCIE \
- UUID_LE(0xD995E954, 0xBBC1, 0x430F, 0xAD, 0x91, 0xB4, 0x4D, \
- 0xCB, 0x3C, 0x6F, 0x35)
+ GUID_INIT(0xD995E954, 0xBBC1, 0x430F, 0xAD, 0x91, 0xB4, 0x4D, \
+ 0xCB, 0x3C, 0x6F, 0x35)
/* Firmware Error Record Reference */
#define CPER_SEC_FW_ERR_REC_REF \
- UUID_LE(0x81212A96, 0x09ED, 0x4996, 0x94, 0x71, 0x8D, 0x72, \
- 0x9C, 0x8E, 0x69, 0xED)
+ GUID_INIT(0x81212A96, 0x09ED, 0x4996, 0x94, 0x71, 0x8D, 0x72, \
+ 0x9C, 0x8E, 0x69, 0xED)
/* PCI/PCI-X Bus */
#define CPER_SEC_PCI_X_BUS \
- UUID_LE(0xC5753963, 0x3B84, 0x4095, 0xBF, 0x78, 0xED, 0xDA, \
- 0xD3, 0xF9, 0xC9, 0xDD)
+ GUID_INIT(0xC5753963, 0x3B84, 0x4095, 0xBF, 0x78, 0xED, 0xDA, \
+ 0xD3, 0xF9, 0xC9, 0xDD)
/* PCI Component/Device */
#define CPER_SEC_PCI_DEV \
- UUID_LE(0xEB5E4685, 0xCA66, 0x4769, 0xB6, 0xA2, 0x26, 0x06, \
- 0x8B, 0x00, 0x13, 0x26)
+ GUID_INIT(0xEB5E4685, 0xCA66, 0x4769, 0xB6, 0xA2, 0x26, 0x06, \
+ 0x8B, 0x00, 0x13, 0x26)
#define CPER_SEC_DMAR_GENERIC \
- UUID_LE(0x5B51FEF7, 0xC79D, 0x4434, 0x8F, 0x1B, 0xAA, 0x62, \
- 0xDE, 0x3E, 0x2C, 0x64)
+ GUID_INIT(0x5B51FEF7, 0xC79D, 0x4434, 0x8F, 0x1B, 0xAA, 0x62, \
+ 0xDE, 0x3E, 0x2C, 0x64)
/* Intel VT for Directed I/O specific DMAr */
#define CPER_SEC_DMAR_VT \
- UUID_LE(0x71761D37, 0x32B2, 0x45cd, 0xA7, 0xD0, 0xB0, 0xFE, \
- 0xDD, 0x93, 0xE8, 0xCF)
+ GUID_INIT(0x71761D37, 0x32B2, 0x45cd, 0xA7, 0xD0, 0xB0, 0xFE, \
+ 0xDD, 0x93, 0xE8, 0xCF)
/* IOMMU specific DMAr */
#define CPER_SEC_DMAR_IOMMU \
- UUID_LE(0x036F84E1, 0x7F37, 0x428c, 0xA7, 0x9E, 0x57, 0x5F, \
- 0xDF, 0xAA, 0x84, 0xEC)
+ GUID_INIT(0x036F84E1, 0x7F37, 0x428c, 0xA7, 0x9E, 0x57, 0x5F, \
+ 0xDF, 0xAA, 0x84, 0xEC)
#define CPER_PROC_VALID_TYPE 0x0001
#define CPER_PROC_VALID_ISA 0x0002
@@ -247,6 +230,18 @@ enum {
#define CPER_MEM_VALID_RANK_NUMBER 0x8000
#define CPER_MEM_VALID_CARD_HANDLE 0x10000
#define CPER_MEM_VALID_MODULE_HANDLE 0x20000
+#define CPER_MEM_VALID_ROW_EXT 0x40000
+#define CPER_MEM_VALID_BANK_GROUP 0x80000
+#define CPER_MEM_VALID_BANK_ADDRESS 0x100000
+#define CPER_MEM_VALID_CHIP_ID 0x200000
+
+#define CPER_MEM_EXT_ROW_MASK 0x3
+#define CPER_MEM_EXT_ROW_SHIFT 16
+
+#define CPER_MEM_BANK_ADDRESS_MASK 0xff
+#define CPER_MEM_BANK_GROUP_SHIFT 8
+
+#define CPER_MEM_CHIP_ID_SHIFT 5
#define CPER_PCIE_VALID_PORT_TYPE 0x0001
#define CPER_PCIE_VALID_VERSION 0x0002
@@ -275,233 +270,306 @@ enum {
#define CPER_ARM_INFO_FLAGS_PROPAGATED BIT(2)
#define CPER_ARM_INFO_FLAGS_OVERFLOW BIT(3)
+#define CPER_ARM_CACHE_ERROR 0
+#define CPER_ARM_TLB_ERROR 1
+#define CPER_ARM_BUS_ERROR 2
+#define CPER_ARM_VENDOR_ERROR 3
+#define CPER_ARM_MAX_TYPE CPER_ARM_VENDOR_ERROR
+
+#define CPER_ARM_ERR_VALID_TRANSACTION_TYPE BIT(0)
+#define CPER_ARM_ERR_VALID_OPERATION_TYPE BIT(1)
+#define CPER_ARM_ERR_VALID_LEVEL BIT(2)
+#define CPER_ARM_ERR_VALID_PROC_CONTEXT_CORRUPT BIT(3)
+#define CPER_ARM_ERR_VALID_CORRECTED BIT(4)
+#define CPER_ARM_ERR_VALID_PRECISE_PC BIT(5)
+#define CPER_ARM_ERR_VALID_RESTARTABLE_PC BIT(6)
+#define CPER_ARM_ERR_VALID_PARTICIPATION_TYPE BIT(7)
+#define CPER_ARM_ERR_VALID_TIME_OUT BIT(8)
+#define CPER_ARM_ERR_VALID_ADDRESS_SPACE BIT(9)
+#define CPER_ARM_ERR_VALID_MEM_ATTRIBUTES BIT(10)
+#define CPER_ARM_ERR_VALID_ACCESS_MODE BIT(11)
+
+#define CPER_ARM_ERR_TRANSACTION_SHIFT 16
+#define CPER_ARM_ERR_TRANSACTION_MASK GENMASK(1,0)
+#define CPER_ARM_ERR_OPERATION_SHIFT 18
+#define CPER_ARM_ERR_OPERATION_MASK GENMASK(3,0)
+#define CPER_ARM_ERR_LEVEL_SHIFT 22
+#define CPER_ARM_ERR_LEVEL_MASK GENMASK(2,0)
+#define CPER_ARM_ERR_PC_CORRUPT_SHIFT 25
+#define CPER_ARM_ERR_PC_CORRUPT_MASK GENMASK(0,0)
+#define CPER_ARM_ERR_CORRECTED_SHIFT 26
+#define CPER_ARM_ERR_CORRECTED_MASK GENMASK(0,0)
+#define CPER_ARM_ERR_PRECISE_PC_SHIFT 27
+#define CPER_ARM_ERR_PRECISE_PC_MASK GENMASK(0,0)
+#define CPER_ARM_ERR_RESTARTABLE_PC_SHIFT 28
+#define CPER_ARM_ERR_RESTARTABLE_PC_MASK GENMASK(0,0)
+#define CPER_ARM_ERR_PARTICIPATION_TYPE_SHIFT 29
+#define CPER_ARM_ERR_PARTICIPATION_TYPE_MASK GENMASK(1,0)
+#define CPER_ARM_ERR_TIME_OUT_SHIFT 31
+#define CPER_ARM_ERR_TIME_OUT_MASK GENMASK(0,0)
+#define CPER_ARM_ERR_ADDRESS_SPACE_SHIFT 32
+#define CPER_ARM_ERR_ADDRESS_SPACE_MASK GENMASK(1,0)
+#define CPER_ARM_ERR_MEM_ATTRIBUTES_SHIFT 34
+#define CPER_ARM_ERR_MEM_ATTRIBUTES_MASK GENMASK(8,0)
+#define CPER_ARM_ERR_ACCESS_MODE_SHIFT 43
+#define CPER_ARM_ERR_ACCESS_MODE_MASK GENMASK(0,0)
+
/*
* All tables and structs must be byte-packed to match CPER
* specification, since the tables are provided by the system BIOS
*/
#pragma pack(1)
+/* Record Header, UEFI v2.7 sec N.2.1 */
struct cper_record_header {
char signature[CPER_SIG_SIZE]; /* must be CPER_SIG_RECORD */
- __u16 revision; /* must be CPER_RECORD_REV */
- __u32 signature_end; /* must be CPER_SIG_END */
- __u16 section_count;
- __u32 error_severity;
- __u32 validation_bits;
- __u32 record_length;
- __u64 timestamp;
- uuid_le platform_id;
- uuid_le partition_id;
- uuid_le creator_id;
- uuid_le notification_type;
- __u64 record_id;
- __u32 flags;
- __u64 persistence_information;
- __u8 reserved[12]; /* must be zero */
+ u16 revision; /* must be CPER_RECORD_REV */
+ u32 signature_end; /* must be CPER_SIG_END */
+ u16 section_count;
+ u32 error_severity;
+ u32 validation_bits;
+ u32 record_length;
+ u64 timestamp;
+ guid_t platform_id;
+ guid_t partition_id;
+ guid_t creator_id;
+ guid_t notification_type;
+ u64 record_id;
+ u32 flags;
+ u64 persistence_information;
+ u8 reserved[12]; /* must be zero */
};
+/* Section Descriptor, UEFI v2.7 sec N.2.2 */
struct cper_section_descriptor {
- __u32 section_offset; /* Offset in bytes of the
+ u32 section_offset; /* Offset in bytes of the
* section body from the base
* of the record header */
- __u32 section_length;
- __u16 revision; /* must be CPER_RECORD_REV */
- __u8 validation_bits;
- __u8 reserved; /* must be zero */
- __u32 flags;
- uuid_le section_type;
- uuid_le fru_id;
- __u32 section_severity;
- __u8 fru_text[20];
+ u32 section_length;
+ u16 revision; /* must be CPER_RECORD_REV */
+ u8 validation_bits;
+ u8 reserved; /* must be zero */
+ u32 flags;
+ guid_t section_type;
+ guid_t fru_id;
+ u32 section_severity;
+ u8 fru_text[20];
};
-/* Generic Processor Error Section */
+/* Generic Processor Error Section, UEFI v2.7 sec N.2.4.1 */
struct cper_sec_proc_generic {
- __u64 validation_bits;
- __u8 proc_type;
- __u8 proc_isa;
- __u8 proc_error_type;
- __u8 operation;
- __u8 flags;
- __u8 level;
- __u16 reserved;
- __u64 cpu_version;
+ u64 validation_bits;
+ u8 proc_type;
+ u8 proc_isa;
+ u8 proc_error_type;
+ u8 operation;
+ u8 flags;
+ u8 level;
+ u16 reserved;
+ u64 cpu_version;
char cpu_brand[128];
- __u64 proc_id;
- __u64 target_addr;
- __u64 requestor_id;
- __u64 responder_id;
- __u64 ip;
+ u64 proc_id;
+ u64 target_addr;
+ u64 requestor_id;
+ u64 responder_id;
+ u64 ip;
};
-/* IA32/X64 Processor Error Section */
+/* IA32/X64 Processor Error Section, UEFI v2.7 sec N.2.4.2 */
struct cper_sec_proc_ia {
- __u64 validation_bits;
- __u8 lapic_id;
- __u8 cpuid[48];
+ u64 validation_bits;
+ u64 lapic_id;
+ u8 cpuid[48];
};
-/* IA32/X64 Processor Error Information Structure */
+/* IA32/X64 Processor Error Information Structure, UEFI v2.7 sec N.2.4.2.1 */
struct cper_ia_err_info {
- uuid_le err_type;
- __u64 validation_bits;
- __u64 check_info;
- __u64 target_id;
- __u64 requestor_id;
- __u64 responder_id;
- __u64 ip;
+ guid_t err_type;
+ u64 validation_bits;
+ u64 check_info;
+ u64 target_id;
+ u64 requestor_id;
+ u64 responder_id;
+ u64 ip;
};
-/* IA32/X64 Processor Context Information Structure */
+/* IA32/X64 Processor Context Information Structure, UEFI v2.7 sec N.2.4.2.2 */
struct cper_ia_proc_ctx {
- __u16 reg_ctx_type;
- __u16 reg_arr_size;
- __u32 msr_addr;
- __u64 mm_reg_addr;
+ u16 reg_ctx_type;
+ u16 reg_arr_size;
+ u32 msr_addr;
+ u64 mm_reg_addr;
};
-/* ARM Processor Error Section */
+/* ARM Processor Error Section, UEFI v2.7 sec N.2.4.4 */
struct cper_sec_proc_arm {
- __u32 validation_bits;
- __u16 err_info_num; /* Number of Processor Error Info */
- __u16 context_info_num; /* Number of Processor Context Info Records*/
- __u32 section_length;
- __u8 affinity_level;
- __u8 reserved[3]; /* must be zero */
- __u64 mpidr;
- __u64 midr;
- __u32 running_state; /* Bit 0 set - Processor running. PSCI = 0 */
- __u32 psci_state;
+ u32 validation_bits;
+ u16 err_info_num; /* Number of Processor Error Info */
+ u16 context_info_num; /* Number of Processor Context Info Records*/
+ u32 section_length;
+ u8 affinity_level;
+ u8 reserved[3]; /* must be zero */
+ u64 mpidr;
+ u64 midr;
+ u32 running_state; /* Bit 0 set - Processor running. PSCI = 0 */
+ u32 psci_state;
};
-/* ARM Processor Error Information Structure */
+/* ARM Processor Error Information Structure, UEFI v2.7 sec N.2.4.4.1 */
struct cper_arm_err_info {
- __u8 version;
- __u8 length;
- __u16 validation_bits;
- __u8 type;
- __u16 multiple_error;
- __u8 flags;
- __u64 error_info;
- __u64 virt_fault_addr;
- __u64 physical_fault_addr;
+ u8 version;
+ u8 length;
+ u16 validation_bits;
+ u8 type;
+ u16 multiple_error;
+ u8 flags;
+ u64 error_info;
+ u64 virt_fault_addr;
+ u64 physical_fault_addr;
};
-/* ARM Processor Context Information Structure */
+/* ARM Processor Context Information Structure, UEFI v2.7 sec N.2.4.4.2 */
struct cper_arm_ctx_info {
- __u16 version;
- __u16 type;
- __u32 size;
+ u16 version;
+ u16 type;
+ u32 size;
};
-/* Old Memory Error Section UEFI 2.1, 2.2 */
+/* Old Memory Error Section, UEFI v2.1, v2.2 */
struct cper_sec_mem_err_old {
- __u64 validation_bits;
- __u64 error_status;
- __u64 physical_addr;
- __u64 physical_addr_mask;
- __u16 node;
- __u16 card;
- __u16 module;
- __u16 bank;
- __u16 device;
- __u16 row;
- __u16 column;
- __u16 bit_pos;
- __u64 requestor_id;
- __u64 responder_id;
- __u64 target_id;
- __u8 error_type;
+ u64 validation_bits;
+ u64 error_status;
+ u64 physical_addr;
+ u64 physical_addr_mask;
+ u16 node;
+ u16 card;
+ u16 module;
+ u16 bank;
+ u16 device;
+ u16 row;
+ u16 column;
+ u16 bit_pos;
+ u64 requestor_id;
+ u64 responder_id;
+ u64 target_id;
+ u8 error_type;
};
-/* Memory Error Section UEFI >= 2.3 */
+/* Memory Error Section (UEFI >= v2.3), UEFI v2.8 sec N.2.5 */
struct cper_sec_mem_err {
- __u64 validation_bits;
- __u64 error_status;
- __u64 physical_addr;
- __u64 physical_addr_mask;
- __u16 node;
- __u16 card;
- __u16 module;
- __u16 bank;
- __u16 device;
- __u16 row;
- __u16 column;
- __u16 bit_pos;
- __u64 requestor_id;
- __u64 responder_id;
- __u64 target_id;
- __u8 error_type;
- __u8 reserved;
- __u16 rank;
- __u16 mem_array_handle; /* card handle in UEFI 2.4 */
- __u16 mem_dev_handle; /* module handle in UEFI 2.4 */
+ u64 validation_bits;
+ u64 error_status;
+ u64 physical_addr;
+ u64 physical_addr_mask;
+ u16 node;
+ u16 card;
+ u16 module;
+ u16 bank;
+ u16 device;
+ u16 row;
+ u16 column;
+ u16 bit_pos;
+ u64 requestor_id;
+ u64 responder_id;
+ u64 target_id;
+ u8 error_type;
+ u8 extended;
+ u16 rank;
+ u16 mem_array_handle; /* "card handle" in UEFI 2.4 */
+ u16 mem_dev_handle; /* "module handle" in UEFI 2.4 */
};
struct cper_mem_err_compact {
- __u64 validation_bits;
- __u16 node;
- __u16 card;
- __u16 module;
- __u16 bank;
- __u16 device;
- __u16 row;
- __u16 column;
- __u16 bit_pos;
- __u64 requestor_id;
- __u64 responder_id;
- __u64 target_id;
- __u16 rank;
- __u16 mem_array_handle;
- __u16 mem_dev_handle;
+ u64 validation_bits;
+ u16 node;
+ u16 card;
+ u16 module;
+ u16 bank;
+ u16 device;
+ u16 row;
+ u16 column;
+ u16 bit_pos;
+ u64 requestor_id;
+ u64 responder_id;
+ u64 target_id;
+ u16 rank;
+ u16 mem_array_handle;
+ u16 mem_dev_handle;
+ u8 extended;
};
+static inline u32 cper_get_mem_extension(u64 mem_valid, u8 mem_extended)
+{
+ if (!(mem_valid & CPER_MEM_VALID_ROW_EXT))
+ return 0;
+ return (mem_extended & CPER_MEM_EXT_ROW_MASK) << CPER_MEM_EXT_ROW_SHIFT;
+}
+
+/* PCI Express Error Section, UEFI v2.7 sec N.2.7 */
struct cper_sec_pcie {
- __u64 validation_bits;
- __u32 port_type;
+ u64 validation_bits;
+ u32 port_type;
struct {
- __u8 minor;
- __u8 major;
- __u8 reserved[2];
+ u8 minor;
+ u8 major;
+ u8 reserved[2];
} version;
- __u16 command;
- __u16 status;
- __u32 reserved;
+ u16 command;
+ u16 status;
+ u32 reserved;
struct {
- __u16 vendor_id;
- __u16 device_id;
- __u8 class_code[3];
- __u8 function;
- __u8 device;
- __u16 segment;
- __u8 bus;
- __u8 secondary_bus;
- __u16 slot;
- __u8 reserved;
+ u16 vendor_id;
+ u16 device_id;
+ u8 class_code[3];
+ u8 function;
+ u8 device;
+ u16 segment;
+ u8 bus;
+ u8 secondary_bus;
+ u16 slot;
+ u8 reserved;
} device_id;
struct {
- __u32 lower;
- __u32 upper;
+ u32 lower;
+ u32 upper;
} serial_number;
struct {
- __u16 secondary_status;
- __u16 control;
+ u16 secondary_status;
+ u16 control;
} bridge;
- __u8 capability[60];
- __u8 aer_info[96];
+ u8 capability[60];
+ u8 aer_info[96];
+};
+
+/* Firmware Error Record Reference, UEFI v2.7 sec N.2.10 */
+struct cper_sec_fw_err_rec_ref {
+ u8 record_type;
+ u8 revision;
+ u8 reserved[6];
+ u64 record_identifier;
+ guid_t record_identifier_guid;
};
/* Reset to default packing */
#pragma pack()
+extern const char *const cper_proc_error_type_strs[4];
+
u64 cper_next_record_id(void);
const char *cper_severity_str(unsigned int);
const char *cper_mem_err_type_str(unsigned int);
+const char *cper_mem_err_status_str(u64 status);
void cper_print_bits(const char *prefix, unsigned int bits,
const char * const strs[], unsigned int strs_size);
void cper_mem_err_pack(const struct cper_sec_mem_err *,
struct cper_mem_err_compact *);
const char *cper_mem_err_unpack(struct trace_seq *,
struct cper_mem_err_compact *);
+void cper_print_proc_arm(const char *pfx,
+ const struct cper_sec_proc_arm *proc);
+void cper_print_proc_ia(const char *pfx,
+ const struct cper_sec_proc_ia *proc);
+int cper_mem_err_location(struct cper_mem_err_compact *mem, char *msg);
+int cper_dimm_err_location(struct cper_mem_err_compact *mem, char *msg);
#endif
diff --git a/include/linux/cpu.h b/include/linux/cpu.h
index ca73bc1563f4..8582a7142623 100644
--- a/include/linux/cpu.h
+++ b/include/linux/cpu.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* include/linux/cpu.h - generic cpu definition
*
@@ -29,7 +30,7 @@ struct cpu {
};
extern void boot_cpu_init(void);
-extern void boot_cpu_state_init(void);
+extern void boot_cpu_hotplug_init(void);
extern void cpu_init(void);
extern void trap_init(void);
@@ -46,6 +47,30 @@ extern void cpu_remove_dev_attr(struct device_attribute *attr);
extern int cpu_add_dev_attr_group(struct attribute_group *attrs);
extern void cpu_remove_dev_attr_group(struct attribute_group *attrs);
+extern ssize_t cpu_show_meltdown(struct device *dev,
+ struct device_attribute *attr, char *buf);
+extern ssize_t cpu_show_spectre_v1(struct device *dev,
+ struct device_attribute *attr, char *buf);
+extern ssize_t cpu_show_spectre_v2(struct device *dev,
+ struct device_attribute *attr, char *buf);
+extern ssize_t cpu_show_spec_store_bypass(struct device *dev,
+ struct device_attribute *attr, char *buf);
+extern ssize_t cpu_show_l1tf(struct device *dev,
+ struct device_attribute *attr, char *buf);
+extern ssize_t cpu_show_mds(struct device *dev,
+ struct device_attribute *attr, char *buf);
+extern ssize_t cpu_show_tsx_async_abort(struct device *dev,
+ struct device_attribute *attr,
+ char *buf);
+extern ssize_t cpu_show_itlb_multihit(struct device *dev,
+ struct device_attribute *attr, char *buf);
+extern ssize_t cpu_show_srbds(struct device *dev, struct device_attribute *attr, char *buf);
+extern ssize_t cpu_show_mmio_stale_data(struct device *dev,
+ struct device_attribute *attr,
+ char *buf);
+extern ssize_t cpu_show_retbleed(struct device *dev,
+ struct device_attribute *attr, char *buf);
+
extern __printf(4, 5)
struct device *cpu_device_create(struct device *parent, void *drvdata,
const struct attribute_group **groups,
@@ -55,34 +80,27 @@ extern void unregister_cpu(struct cpu *cpu);
extern ssize_t arch_cpu_probe(const char *, size_t);
extern ssize_t arch_cpu_release(const char *, size_t);
#endif
-struct notifier_block;
-
-#define CPU_ONLINE 0x0002 /* CPU (unsigned)v is up */
-#define CPU_UP_PREPARE 0x0003 /* CPU (unsigned)v coming up */
-#define CPU_DEAD 0x0007 /* CPU (unsigned)v dead */
-#define CPU_POST_DEAD 0x0009 /* CPU (unsigned)v dead, cpu_hotplug
- * lock is dropped */
-#define CPU_BROKEN 0x000B /* CPU (unsigned)v did not die properly,
- * perhaps due to preemption. */
-
-/* Used for CPU hotplug events occurring while tasks are frozen due to a suspend
- * operation in progress
- */
-#define CPU_TASKS_FROZEN 0x0010
-#define CPU_ONLINE_FROZEN (CPU_ONLINE | CPU_TASKS_FROZEN)
-#define CPU_UP_PREPARE_FROZEN (CPU_UP_PREPARE | CPU_TASKS_FROZEN)
-#define CPU_UP_CANCELED_FROZEN (CPU_UP_CANCELED | CPU_TASKS_FROZEN)
-#define CPU_DOWN_PREPARE_FROZEN (CPU_DOWN_PREPARE | CPU_TASKS_FROZEN)
-#define CPU_DOWN_FAILED_FROZEN (CPU_DOWN_FAILED | CPU_TASKS_FROZEN)
-#define CPU_DEAD_FROZEN (CPU_DEAD | CPU_TASKS_FROZEN)
+/*
+ * These states are not related to the core CPU hotplug mechanism. They are
+ * used by various (sub)architectures to track internal state
+ */
+#define CPU_ONLINE 0x0002 /* CPU is up */
+#define CPU_UP_PREPARE 0x0003 /* CPU coming up */
+#define CPU_DEAD 0x0007 /* CPU dead */
+#define CPU_DEAD_FROZEN 0x0008 /* CPU timed out on unplug */
+#define CPU_POST_DEAD 0x0009 /* CPU successfully unplugged */
+#define CPU_BROKEN 0x000B /* CPU did not die properly */
#ifdef CONFIG_SMP
extern bool cpuhp_tasks_frozen;
-int cpu_up(unsigned int cpu);
+int add_cpu(unsigned int cpu);
+int cpu_device_up(struct device *dev);
void notify_cpu_starting(unsigned int cpu);
extern void cpu_maps_update_begin(void);
extern void cpu_maps_update_done(void);
+int bringup_hibernate_cpu(unsigned int sleep_cpu);
+void bringup_nonboot_cpus(unsigned int setup_max_cpus);
#else /* CONFIG_SMP */
#define cpuhp_tasks_frozen 0
@@ -95,19 +113,26 @@ static inline void cpu_maps_update_done(void)
{
}
+static inline int add_cpu(unsigned int cpu) { return 0;}
+
#endif /* CONFIG_SMP */
extern struct bus_type cpu_subsys;
+extern int lockdep_is_cpus_held(void);
+
#ifdef CONFIG_HOTPLUG_CPU
extern void cpus_write_lock(void);
extern void cpus_write_unlock(void);
extern void cpus_read_lock(void);
extern void cpus_read_unlock(void);
+extern int cpus_read_trylock(void);
extern void lockdep_assert_cpus_held(void);
extern void cpu_hotplug_disable(void);
extern void cpu_hotplug_enable(void);
void clear_tasks_mm_cpumask(int cpu);
-int cpu_down(unsigned int cpu);
+int remove_cpu(unsigned int cpu);
+int cpu_device_down(struct device *dev);
+extern void smp_shutdown_nonboot_cpus(unsigned int primary_cpu);
#else /* CONFIG_HOTPLUG_CPU */
@@ -115,48 +140,59 @@ static inline void cpus_write_lock(void) { }
static inline void cpus_write_unlock(void) { }
static inline void cpus_read_lock(void) { }
static inline void cpus_read_unlock(void) { }
+static inline int cpus_read_trylock(void) { return true; }
static inline void lockdep_assert_cpus_held(void) { }
static inline void cpu_hotplug_disable(void) { }
static inline void cpu_hotplug_enable(void) { }
+static inline int remove_cpu(unsigned int cpu) { return -EPERM; }
+static inline void smp_shutdown_nonboot_cpus(unsigned int primary_cpu) { }
#endif /* !CONFIG_HOTPLUG_CPU */
-/* Wrappers which go away once all code is converted */
-static inline void cpu_hotplug_begin(void) { cpus_write_lock(); }
-static inline void cpu_hotplug_done(void) { cpus_write_unlock(); }
-static inline void get_online_cpus(void) { cpus_read_lock(); }
-static inline void put_online_cpus(void) { cpus_read_unlock(); }
-
#ifdef CONFIG_PM_SLEEP_SMP
extern int freeze_secondary_cpus(int primary);
-static inline int disable_nonboot_cpus(void)
+extern void thaw_secondary_cpus(void);
+
+static inline int suspend_disable_secondary_cpus(void)
+{
+ int cpu = 0;
+
+ if (IS_ENABLED(CONFIG_PM_SLEEP_SMP_NONZERO_CPU))
+ cpu = -1;
+
+ return freeze_secondary_cpus(cpu);
+}
+static inline void suspend_enable_secondary_cpus(void)
{
- return freeze_secondary_cpus(0);
+ return thaw_secondary_cpus();
}
-extern void enable_nonboot_cpus(void);
+
#else /* !CONFIG_PM_SLEEP_SMP */
-static inline int disable_nonboot_cpus(void) { return 0; }
-static inline void enable_nonboot_cpus(void) {}
+static inline void thaw_secondary_cpus(void) {}
+static inline int suspend_disable_secondary_cpus(void) { return 0; }
+static inline void suspend_enable_secondary_cpus(void) { }
#endif /* !CONFIG_PM_SLEEP_SMP */
-void cpu_startup_entry(enum cpuhp_state state);
+void __noreturn cpu_startup_entry(enum cpuhp_state state);
void cpu_idle_poll_ctrl(bool enable);
-/* Attach to any functions which should be considered cpuidle. */
-#define __cpuidle __attribute__((__section__(".cpuidle.text")))
-
bool cpu_in_idle(unsigned long pc);
void arch_cpu_idle(void);
void arch_cpu_idle_prepare(void);
void arch_cpu_idle_enter(void);
void arch_cpu_idle_exit(void);
-void arch_cpu_idle_dead(void);
+void __noreturn arch_cpu_idle_dead(void);
int cpu_report_state(int cpu);
int cpu_check_up_prepare(int cpu);
void cpu_set_state_online(int cpu);
-void play_idle(unsigned long duration_ms);
+void play_idle_precise(u64 duration_ns, u64 latency_ns);
+
+static inline void play_idle(unsigned long duration_us)
+{
+ play_idle_precise(duration_us * NSEC_PER_USEC, U64_MAX);
+}
#ifdef CONFIG_HOTPLUG_CPU
bool cpu_wait_death(unsigned int cpu, int seconds);
@@ -166,4 +202,31 @@ void cpuhp_report_idle_dead(void);
static inline void cpuhp_report_idle_dead(void) { }
#endif /* #ifdef CONFIG_HOTPLUG_CPU */
+enum cpuhp_smt_control {
+ CPU_SMT_ENABLED,
+ CPU_SMT_DISABLED,
+ CPU_SMT_FORCE_DISABLED,
+ CPU_SMT_NOT_SUPPORTED,
+ CPU_SMT_NOT_IMPLEMENTED,
+};
+
+#if defined(CONFIG_SMP) && defined(CONFIG_HOTPLUG_SMT)
+extern enum cpuhp_smt_control cpu_smt_control;
+extern void cpu_smt_disable(bool force);
+extern void cpu_smt_check_topology(void);
+extern bool cpu_smt_possible(void);
+extern int cpuhp_smt_enable(void);
+extern int cpuhp_smt_disable(enum cpuhp_smt_control ctrlval);
+#else
+# define cpu_smt_control (CPU_SMT_NOT_IMPLEMENTED)
+static inline void cpu_smt_disable(bool force) { }
+static inline void cpu_smt_check_topology(void) { }
+static inline bool cpu_smt_possible(void) { return false; }
+static inline int cpuhp_smt_enable(void) { return 0; }
+static inline int cpuhp_smt_disable(enum cpuhp_smt_control ctrlval) { return 0; }
+#endif
+
+extern bool cpu_mitigations_off(void);
+extern bool cpu_mitigations_auto_nosmt(void);
+
#endif /* _LINUX_CPU_H_ */
diff --git a/include/linux/cpu_cooling.h b/include/linux/cpu_cooling.h
index d4292ebc5c8b..a3bdc8a98f2c 100644
--- a/include/linux/cpu_cooling.h
+++ b/include/linux/cpu_cooling.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* linux/include/linux/cpu_cooling.h
*
@@ -5,18 +6,6 @@
* Copyright (C) 2012 Amit Daniel <amit.kachhap@linaro.org>
*
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
*
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*/
@@ -30,10 +19,7 @@
struct cpufreq_policy;
-typedef int (*get_static_t)(cpumask_t *cpumask, int interval,
- unsigned long voltage, u32 *power);
-
-#ifdef CONFIG_CPU_THERMAL
+#ifdef CONFIG_CPU_FREQ_THERMAL
/**
* cpufreq_cooling_register - function to create cpufreq cooling device.
* @policy: cpufreq policy.
@@ -41,83 +27,47 @@ typedef int (*get_static_t)(cpumask_t *cpumask, int interval,
struct thermal_cooling_device *
cpufreq_cooling_register(struct cpufreq_policy *policy);
-struct thermal_cooling_device *
-cpufreq_power_cooling_register(struct cpufreq_policy *policy,
- u32 capacitance, get_static_t plat_static_func);
+/**
+ * cpufreq_cooling_unregister - function to remove cpufreq cooling device.
+ * @cdev: thermal cooling device pointer.
+ */
+void cpufreq_cooling_unregister(struct thermal_cooling_device *cdev);
/**
* of_cpufreq_cooling_register - create cpufreq cooling device based on DT.
- * @np: a valid struct device_node to the cooling device device tree node.
* @policy: cpufreq policy.
*/
-#ifdef CONFIG_THERMAL_OF
-struct thermal_cooling_device *
-of_cpufreq_cooling_register(struct device_node *np,
- struct cpufreq_policy *policy);
-
struct thermal_cooling_device *
-of_cpufreq_power_cooling_register(struct device_node *np,
- struct cpufreq_policy *policy,
- u32 capacitance,
- get_static_t plat_static_func);
-#else
-static inline struct thermal_cooling_device *
-of_cpufreq_cooling_register(struct device_node *np,
- struct cpufreq_policy *policy)
-{
- return ERR_PTR(-ENOSYS);
-}
+of_cpufreq_cooling_register(struct cpufreq_policy *policy);
-static inline struct thermal_cooling_device *
-of_cpufreq_power_cooling_register(struct device_node *np,
- struct cpufreq_policy *policy,
- u32 capacitance,
- get_static_t plat_static_func)
-{
- return NULL;
-}
-#endif
-
-/**
- * cpufreq_cooling_unregister - function to remove cpufreq cooling device.
- * @cdev: thermal cooling device pointer.
- */
-void cpufreq_cooling_unregister(struct thermal_cooling_device *cdev);
-
-#else /* !CONFIG_CPU_THERMAL */
+#else /* !CONFIG_CPU_FREQ_THERMAL */
static inline struct thermal_cooling_device *
cpufreq_cooling_register(struct cpufreq_policy *policy)
{
return ERR_PTR(-ENOSYS);
}
-static inline struct thermal_cooling_device *
-cpufreq_power_cooling_register(struct cpufreq_policy *policy,
- u32 capacitance, get_static_t plat_static_func)
-{
- return NULL;
-}
-static inline struct thermal_cooling_device *
-of_cpufreq_cooling_register(struct device_node *np,
- struct cpufreq_policy *policy)
+static inline
+void cpufreq_cooling_unregister(struct thermal_cooling_device *cdev)
{
- return ERR_PTR(-ENOSYS);
+ return;
}
static inline struct thermal_cooling_device *
-of_cpufreq_power_cooling_register(struct device_node *np,
- struct cpufreq_policy *policy,
- u32 capacitance,
- get_static_t plat_static_func)
+of_cpufreq_cooling_register(struct cpufreq_policy *policy)
{
return NULL;
}
+#endif /* CONFIG_CPU_FREQ_THERMAL */
-static inline
-void cpufreq_cooling_unregister(struct thermal_cooling_device *cdev)
+struct cpuidle_driver;
+
+#ifdef CONFIG_CPU_IDLE_THERMAL
+void cpuidle_cooling_register(struct cpuidle_driver *drv);
+#else /* CONFIG_CPU_IDLE_THERMAL */
+static inline void cpuidle_cooling_register(struct cpuidle_driver *drv)
{
- return;
}
-#endif /* CONFIG_CPU_THERMAL */
+#endif /* CONFIG_CPU_IDLE_THERMAL */
#endif /* __CPU_COOLING_H__ */
diff --git a/include/linux/cpu_pm.h b/include/linux/cpu_pm.h
index 455b233dd3b1..552b8f9ea05e 100644
--- a/include/linux/cpu_pm.h
+++ b/include/linux/cpu_pm.h
@@ -1,18 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2011 Google, Inc.
*
* Author:
* Colin Cross <ccross@android.com>
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
*/
#ifndef _LINUX_CPU_PM_H
diff --git a/include/linux/cpu_rmap.h b/include/linux/cpu_rmap.h
index bdd18caa6c94..cae324d10965 100644
--- a/include/linux/cpu_rmap.h
+++ b/include/linux/cpu_rmap.h
@@ -1,13 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
#ifndef __LINUX_CPU_RMAP_H
#define __LINUX_CPU_RMAP_H
/*
* cpu_rmap.c: CPU affinity reverse-map support
* Copyright 2011 Solarflare Communications Inc.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published
- * by the Free Software Foundation, incorporated herein by reference.
*/
#include <linux/cpumask.h>
@@ -19,19 +16,18 @@
* struct cpu_rmap - CPU affinity reverse-map
* @refcount: kref for object
* @size: Number of objects to be reverse-mapped
- * @used: Number of objects added
* @obj: Pointer to array of object pointers
* @near: For each CPU, the index and distance to the nearest object,
* based on affinity masks
*/
struct cpu_rmap {
struct kref refcount;
- u16 size, used;
+ u16 size;
void **obj;
struct {
u16 index;
u16 dist;
- } near[0];
+ } near[];
};
#define CPU_RMAP_DIST_INF 0xffff
@@ -64,6 +60,7 @@ static inline struct cpu_rmap *alloc_irq_cpu_rmap(unsigned int size)
}
extern void free_irq_cpu_rmap(struct cpu_rmap *rmap);
+int irq_cpu_rmap_remove(struct cpu_rmap *rmap, int irq);
extern int irq_cpu_rmap_add(struct cpu_rmap *rmap, int irq);
#endif /* __LINUX_CPU_RMAP_H */
diff --git a/include/linux/cpufeature.h b/include/linux/cpufeature.h
index 986c06c88d81..6aff540ee9e5 100644
--- a/include/linux/cpufeature.h
+++ b/include/linux/cpufeature.h
@@ -1,9 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2014 Linaro Ltd. <ard.biesheuvel@linaro.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef __LINUX_CPUFEATURE_H
@@ -45,7 +42,7 @@
* 'asm/cpufeature.h' of your favorite architecture.
*/
#define module_cpu_feature_match(x, __initfunc) \
-static struct cpu_feature const cpu_feature_match_ ## x[] = \
+static struct cpu_feature const __maybe_unused cpu_feature_match_ ## x[] = \
{ { .feature = cpu_feature(x) }, { } }; \
MODULE_DEVICE_TABLE(cpu, cpu_feature_match_ ## x); \
\
diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
index f10a9b3761cd..26e2eb399484 100644
--- a/include/linux/cpufreq.h
+++ b/include/linux/cpufreq.h
@@ -1,21 +1,22 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* linux/include/linux/cpufreq.h
*
* Copyright (C) 2001 Russell King
* (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef _LINUX_CPUFREQ_H
#define _LINUX_CPUFREQ_H
#include <linux/clk.h>
+#include <linux/cpu.h>
#include <linux/cpumask.h>
#include <linux/completion.h>
#include <linux/kobject.h>
#include <linux/notifier.h>
+#include <linux/of.h>
+#include <linux/pm_opp.h>
+#include <linux/pm_qos.h>
#include <linux/spinlock.h>
#include <linux/sysfs.h>
@@ -42,13 +43,6 @@ enum cpufreq_table_sorting {
CPUFREQ_TABLE_SORTED_DESCENDING
};
-struct cpufreq_freqs {
- unsigned int cpu; /* cpu nr */
- unsigned int old;
- unsigned int new;
- u8 flags; /* flags of cpufreq_driver, see below. */
-};
-
struct cpufreq_cpuinfo {
unsigned int max_freq;
unsigned int min_freq;
@@ -57,11 +51,6 @@ struct cpufreq_cpuinfo {
unsigned int transition_latency;
};
-struct cpufreq_user_policy {
- unsigned int min; /* in kHz */
- unsigned int max; /* in kHz */
-};
-
struct cpufreq_policy {
/* CPUs sharing clock, require sw coordination */
cpumask_var_t cpus; /* Online CPUs only */
@@ -79,7 +68,6 @@ struct cpufreq_policy {
unsigned int max; /* in kHz */
unsigned int cur; /* in kHz, only needed if cpufreq
* governors are used */
- unsigned int restore_freq; /* = policy->cur before transition */
unsigned int suspend_freq; /* freq to set during suspend */
unsigned int policy; /* see above */
@@ -91,7 +79,10 @@ struct cpufreq_policy {
struct work_struct update; /* if update_policy() needs to be
* called, but you're in IRQ context */
- struct cpufreq_user_policy user_policy;
+ struct freq_constraints constraints;
+ struct freq_qos_request *min_freq_req;
+ struct freq_qos_request *max_freq_req;
+
struct cpufreq_frequency_table *freq_table;
enum cpufreq_table_sorting freq_table_sorted;
@@ -121,15 +112,37 @@ struct cpufreq_policy {
bool fast_switch_enabled;
/*
+ * Set if the CPUFREQ_GOV_STRICT_TARGET flag is set for the current
+ * governor.
+ */
+ bool strict_target;
+
+ /*
+ * Set if inefficient frequencies were found in the frequency table.
+ * This indicates if the relation flag CPUFREQ_RELATION_E can be
+ * honored.
+ */
+ bool efficiencies_available;
+
+ /*
* Preferred average time interval between consecutive invocations of
* the driver to set the frequency for this policy. To be set by the
* scaling driver (0, which is the default, means no preference).
*/
unsigned int transition_delay_us;
+ /*
+ * Remote DVFS flag (Not added to the driver structure as we don't want
+ * to access another structure from scheduler hotpath).
+ *
+ * Should be set if CPUs can do DVFS on behalf of other CPUs from
+ * different cpufreq policies.
+ */
+ bool dvfs_possible_from_any_cpu;
+
/* Cached frequency lookup from cpufreq_driver_resolve_freq. */
unsigned int cached_target_freq;
- int cached_resolved_idx;
+ unsigned int cached_resolved_idx;
/* Synchronization for frequency transitions */
bool transition_ongoing; /* Tracks transition status */
@@ -142,6 +155,33 @@ struct cpufreq_policy {
/* For cpufreq driver's internal use */
void *driver_data;
+
+ /* Pointer to the cooling device if used for thermal mitigation */
+ struct thermal_cooling_device *cdev;
+
+ struct notifier_block nb_min;
+ struct notifier_block nb_max;
+};
+
+/*
+ * Used for passing new cpufreq policy data to the cpufreq driver's ->verify()
+ * callback for sanitization. That callback is only expected to modify the min
+ * and max values, if necessary, and specifically it must not update the
+ * frequency table.
+ */
+struct cpufreq_policy_data {
+ struct cpufreq_cpuinfo cpuinfo;
+ struct cpufreq_frequency_table *freq_table;
+ unsigned int cpu;
+ unsigned int min; /* in kHz */
+ unsigned int max; /* in kHz */
+};
+
+struct cpufreq_freqs {
+ struct cpufreq_policy *policy;
+ unsigned int old;
+ unsigned int new;
+ u8 flags; /* flags of cpufreq_driver, see below. */
};
/* Only for ACPI */
@@ -166,27 +206,37 @@ static inline struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
static inline void cpufreq_cpu_put(struct cpufreq_policy *policy) { }
#endif
+static inline bool policy_is_inactive(struct cpufreq_policy *policy)
+{
+ return cpumask_empty(policy->cpus);
+}
+
static inline bool policy_is_shared(struct cpufreq_policy *policy)
{
return cpumask_weight(policy->cpus) > 1;
}
-/* /sys/devices/system/cpu/cpufreq: entry point for global variables */
-extern struct kobject *cpufreq_global_kobject;
-
#ifdef CONFIG_CPU_FREQ
unsigned int cpufreq_get(unsigned int cpu);
unsigned int cpufreq_quick_get(unsigned int cpu);
unsigned int cpufreq_quick_get_max(unsigned int cpu);
+unsigned int cpufreq_get_hw_max_freq(unsigned int cpu);
void disable_cpufreq(void);
u64 get_cpu_idle_time(unsigned int cpu, u64 *wall, int io_busy);
+
+struct cpufreq_policy *cpufreq_cpu_acquire(unsigned int cpu);
+void cpufreq_cpu_release(struct cpufreq_policy *policy);
int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu);
+void refresh_frequency_limits(struct cpufreq_policy *policy);
void cpufreq_update_policy(unsigned int cpu);
+void cpufreq_update_limits(unsigned int cpu);
bool have_governor_per_policy(void);
+bool cpufreq_supports_freq_invariance(void);
struct kobject *get_governor_parent_kobj(struct cpufreq_policy *policy);
void cpufreq_enable_fast_switch(struct cpufreq_policy *policy);
void cpufreq_disable_fast_switch(struct cpufreq_policy *policy);
+bool has_target_index(void);
#else
static inline unsigned int cpufreq_get(unsigned int cpu)
{
@@ -200,6 +250,14 @@ static inline unsigned int cpufreq_quick_get_max(unsigned int cpu)
{
return 0;
}
+static inline unsigned int cpufreq_get_hw_max_freq(unsigned int cpu)
+{
+ return 0;
+}
+static inline bool cpufreq_supports_freq_invariance(void)
+{
+ return false;
+}
static inline void disable_cpufreq(void) { }
#endif
@@ -222,6 +280,12 @@ static inline void cpufreq_stats_record_transition(struct cpufreq_policy *policy
#define CPUFREQ_RELATION_L 0 /* lowest frequency at or above target */
#define CPUFREQ_RELATION_H 1 /* highest frequency below or at target */
#define CPUFREQ_RELATION_C 2 /* closest frequency to target */
+/* relation flags */
+#define CPUFREQ_RELATION_E BIT(2) /* Get if possible an efficient frequency */
+
+#define CPUFREQ_RELATION_LE (CPUFREQ_RELATION_L | CPUFREQ_RELATION_E)
+#define CPUFREQ_RELATION_HE (CPUFREQ_RELATION_H | CPUFREQ_RELATION_E)
+#define CPUFREQ_RELATION_CE (CPUFREQ_RELATION_C | CPUFREQ_RELATION_E)
struct freq_attr {
struct attribute attr;
@@ -245,39 +309,27 @@ __ATTR(_name, 0644, show_##_name, store_##_name)
static struct freq_attr _name = \
__ATTR(_name, 0200, NULL, store_##_name)
-struct global_attr {
- struct attribute attr;
- ssize_t (*show)(struct kobject *kobj,
- struct attribute *attr, char *buf);
- ssize_t (*store)(struct kobject *a, struct attribute *b,
- const char *c, size_t count);
-};
-
#define define_one_global_ro(_name) \
-static struct global_attr _name = \
+static struct kobj_attribute _name = \
__ATTR(_name, 0444, show_##_name, NULL)
#define define_one_global_rw(_name) \
-static struct global_attr _name = \
+static struct kobj_attribute _name = \
__ATTR(_name, 0644, show_##_name, store_##_name)
struct cpufreq_driver {
char name[CPUFREQ_NAME_LEN];
- u8 flags;
+ u16 flags;
void *driver_data;
/* needed by all drivers */
int (*init)(struct cpufreq_policy *policy);
- int (*verify)(struct cpufreq_policy *policy);
+ int (*verify)(struct cpufreq_policy_data *policy);
/* define one out of two */
int (*setpolicy)(struct cpufreq_policy *policy);
- /*
- * On failure, should always restore frequency to policy->restore_freq
- * (i.e. old freq).
- */
int (*target)(struct cpufreq_policy *policy,
unsigned int target_freq,
unsigned int relation); /* Deprecated */
@@ -285,15 +337,15 @@ struct cpufreq_driver {
unsigned int index);
unsigned int (*fast_switch)(struct cpufreq_policy *policy,
unsigned int target_freq);
-
/*
- * Caches and returns the lowest driver-supported frequency greater than
- * or equal to the target frequency, subject to any driver limitations.
- * Does not set the frequency. Only to be implemented for drivers with
- * target().
+ * ->fast_switch() replacement for drivers that use an internal
+ * representation of performance levels and can pass hints other than
+ * the target performance level to the hardware.
*/
- unsigned int (*resolve_freq)(struct cpufreq_policy *policy,
- unsigned int target_freq);
+ void (*adjust_perf)(unsigned int cpu,
+ unsigned long min_perf,
+ unsigned long target_perf,
+ unsigned long capacity);
/*
* Only for drivers with target_index() and CPUFREQ_ASYNC_NOTIFICATION
@@ -301,7 +353,7 @@ struct cpufreq_driver {
*
* get_intermediate should return a stable intermediate frequency
* platform wants to switch to and target_intermediate() should set CPU
- * to to that frequency, before jumping to the frequency corresponding
+ * to that frequency, before jumping to the frequency corresponding
* to 'index'. Core will take care of sending notifications and driver
* doesn't have to handle them in target_intermediate() or
* target_index().
@@ -318,11 +370,15 @@ struct cpufreq_driver {
/* should be defined, if possible */
unsigned int (*get)(unsigned int cpu);
+ /* Called to update policy limits on firmware notifications. */
+ void (*update_limits)(unsigned int cpu);
+
/* optional */
int (*bios_limit)(int cpu, unsigned int *limit);
+ int (*online)(struct cpufreq_policy *policy);
+ int (*offline)(struct cpufreq_policy *policy);
int (*exit)(struct cpufreq_policy *policy);
- void (*stop_cpu)(struct cpufreq_policy *policy);
int (*suspend)(struct cpufreq_policy *policy);
int (*resume)(struct cpufreq_policy *policy);
@@ -333,18 +389,33 @@ struct cpufreq_driver {
/* platform specific boost support code */
bool boost_enabled;
- int (*set_boost)(int state);
+ int (*set_boost)(struct cpufreq_policy *policy, int state);
+
+ /*
+ * Set by drivers that want to register with the energy model after the
+ * policy is properly initialized, but before the governor is started.
+ */
+ void (*register_em)(struct cpufreq_policy *policy);
};
/* flags */
-#define CPUFREQ_STICKY (1 << 0) /* driver isn't removed even if
- all ->init() calls failed */
-#define CPUFREQ_CONST_LOOPS (1 << 1) /* loops_per_jiffy or other
- kernel "constants" aren't
- affected by frequency
- transitions */
-#define CPUFREQ_PM_NO_WARN (1 << 2) /* don't warn on suspend/resume
- speed mismatches */
+
+/*
+ * Set by drivers that need to update internal upper and lower boundaries along
+ * with the target frequency and so the core and governors should also invoke
+ * the diver if the target frequency does not change, but the policy min or max
+ * may have changed.
+ */
+#define CPUFREQ_NEED_UPDATE_LIMITS BIT(0)
+
+/* loops_per_jiffy or other kernel "constants" aren't affected by frequency transitions */
+#define CPUFREQ_CONST_LOOPS BIT(1)
+
+/*
+ * Set by drivers that want the core to automatically register the cpufreq
+ * driver as a thermal cooling device.
+ */
+#define CPUFREQ_IS_COOLING_DEV BIT(2)
/*
* This should be set by platforms having multiple clock-domains, i.e.
@@ -352,14 +423,14 @@ struct cpufreq_driver {
* be created in cpu/cpu<num>/cpufreq/ directory and so they can use the same
* governor with different tunables for different clusters.
*/
-#define CPUFREQ_HAVE_GOVERNOR_PER_POLICY (1 << 3)
+#define CPUFREQ_HAVE_GOVERNOR_PER_POLICY BIT(3)
/*
* Driver will do POSTCHANGE notifications from outside of their ->target()
* routine and so must set cpufreq_driver->flags with this flag, so that core
* can handle them specially.
*/
-#define CPUFREQ_ASYNC_NOTIFICATION (1 << 4)
+#define CPUFREQ_ASYNC_NOTIFICATION BIT(4)
/*
* Set by drivers which want cpufreq core to check if CPU is running at a
@@ -368,16 +439,30 @@ struct cpufreq_driver {
* from the table. And if that fails, we will stop further boot process by
* issuing a BUG_ON().
*/
-#define CPUFREQ_NEED_INITIAL_FREQ_CHECK (1 << 5)
+#define CPUFREQ_NEED_INITIAL_FREQ_CHECK BIT(5)
+
+/*
+ * Set by drivers to disallow use of governors with "dynamic_switching" flag
+ * set.
+ */
+#define CPUFREQ_NO_AUTO_DYNAMIC_SWITCHING BIT(6)
int cpufreq_register_driver(struct cpufreq_driver *driver_data);
-int cpufreq_unregister_driver(struct cpufreq_driver *driver_data);
+void cpufreq_unregister_driver(struct cpufreq_driver *driver_data);
+bool cpufreq_driver_test_flags(u16 flags);
const char *cpufreq_get_current_driver(void);
void *cpufreq_get_driver_data(void);
-static inline void cpufreq_verify_within_limits(struct cpufreq_policy *policy,
- unsigned int min, unsigned int max)
+static inline int cpufreq_thermal_control_enabled(struct cpufreq_driver *drv)
+{
+ return IS_ENABLED(CONFIG_CPU_THERMAL) &&
+ (drv->flags & CPUFREQ_IS_COOLING_DEV);
+}
+
+static inline void cpufreq_verify_within_limits(struct cpufreq_policy_data *policy,
+ unsigned int min,
+ unsigned int max)
{
if (policy->min < min)
policy->min = min;
@@ -393,10 +478,10 @@ static inline void cpufreq_verify_within_limits(struct cpufreq_policy *policy,
}
static inline void
-cpufreq_verify_within_cpu_limits(struct cpufreq_policy *policy)
+cpufreq_verify_within_cpu_limits(struct cpufreq_policy_data *policy)
{
cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq,
- policy->cpuinfo.max_freq);
+ policy->cpuinfo.max_freq);
}
#ifdef CONFIG_CPU_FREQ
@@ -420,8 +505,8 @@ static inline void cpufreq_resume(void) {}
#define CPUFREQ_POSTCHANGE (1)
/* Policy Notifiers */
-#define CPUFREQ_ADJUST (0)
-#define CPUFREQ_NOTIFY (1)
+#define CPUFREQ_CREATE_POLICY (0)
+#define CPUFREQ_REMOVE_POLICY (1)
#ifdef CONFIG_CPU_FREQ
int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list);
@@ -474,6 +559,7 @@ static inline unsigned long cpufreq_scale(unsigned long old, u_int div,
* CPUFREQ GOVERNORS *
*********************************************************************/
+#define CPUFREQ_POLICY_UNKNOWN (0)
/*
* If (cpufreq_driver->target) exists, the ->governor decides what frequency
* within the limits is used. If (cpufreq_driver->setpolicy> exists, these
@@ -487,14 +573,8 @@ static inline unsigned long cpufreq_scale(unsigned long old, u_int div,
* polling frequency is 1000 times the transition latency of the processor. The
* ondemand governor will work on any processor with transition latency <= 10ms,
* using appropriate sampling rate.
- *
- * For CPUs with transition latency > 10ms (mostly drivers with CPUFREQ_ETERNAL)
- * the ondemand governor will not work. All times here are in us (microseconds).
*/
-#define MIN_SAMPLING_RATE_RATIO (2)
#define LATENCY_MULTIPLIER (1000)
-#define MIN_LATENCY_MULTIPLIER (20)
-#define TRANSITION_LATENCY_LIMIT (10 * 1000 * 1000)
struct cpufreq_governor {
char name[CPUFREQ_NAME_LEN];
@@ -507,16 +587,28 @@ struct cpufreq_governor {
char *buf);
int (*store_setspeed) (struct cpufreq_policy *policy,
unsigned int freq);
- unsigned int max_transition_latency; /* HW must be able to switch to
- next freq faster than this value in nano secs or we
- will fallback to performance governor */
struct list_head governor_list;
struct module *owner;
+ u8 flags;
};
+/* Governor flags */
+
+/* For governors which change frequency dynamically by themselves */
+#define CPUFREQ_GOV_DYNAMIC_SWITCHING BIT(0)
+
+/* For governors wanting the target frequency to be set exactly */
+#define CPUFREQ_GOV_STRICT_TARGET BIT(1)
+
+
/* Pass a target to the cpufreq driver */
unsigned int cpufreq_driver_fast_switch(struct cpufreq_policy *policy,
unsigned int target_freq);
+void cpufreq_driver_adjust_perf(unsigned int cpu,
+ unsigned long min_perf,
+ unsigned long target_perf,
+ unsigned long capacity);
+bool cpufreq_driver_has_adjust_perf(void);
int cpufreq_driver_target(struct cpufreq_policy *policy,
unsigned int target_freq,
unsigned int relation);
@@ -525,8 +617,25 @@ int __cpufreq_driver_target(struct cpufreq_policy *policy,
unsigned int relation);
unsigned int cpufreq_driver_resolve_freq(struct cpufreq_policy *policy,
unsigned int target_freq);
+unsigned int cpufreq_policy_transition_delay_us(struct cpufreq_policy *policy);
int cpufreq_register_governor(struct cpufreq_governor *governor);
void cpufreq_unregister_governor(struct cpufreq_governor *governor);
+int cpufreq_start_governor(struct cpufreq_policy *policy);
+void cpufreq_stop_governor(struct cpufreq_policy *policy);
+
+#define cpufreq_governor_init(__governor) \
+static int __init __governor##_init(void) \
+{ \
+ return cpufreq_register_governor(&__governor); \
+} \
+core_initcall(__governor##_init)
+
+#define cpufreq_governor_exit(__governor) \
+static void __exit __governor##_exit(void) \
+{ \
+ return cpufreq_unregister_governor(&__governor); \
+} \
+module_exit(__governor##_exit)
struct cpufreq_governor *cpufreq_default_governor(void);
struct cpufreq_governor *cpufreq_fallback_governor(void);
@@ -534,9 +643,11 @@ struct cpufreq_governor *cpufreq_fallback_governor(void);
static inline void cpufreq_policy_apply_limits(struct cpufreq_policy *policy)
{
if (policy->max < policy->cur)
- __cpufreq_driver_target(policy, policy->max, CPUFREQ_RELATION_H);
+ __cpufreq_driver_target(policy, policy->max,
+ CPUFREQ_RELATION_HE);
else if (policy->min > policy->cur)
- __cpufreq_driver_target(policy, policy->min, CPUFREQ_RELATION_L);
+ __cpufreq_driver_target(policy, policy->min,
+ CPUFREQ_RELATION_LE);
}
/* Governor attribute set */
@@ -550,6 +661,11 @@ struct gov_attr_set {
/* sysfs ops for cpufreq governors */
extern const struct sysfs_ops governor_sysfs_ops;
+static inline struct gov_attr_set *to_gov_attr_set(struct kobject *kobj)
+{
+ return container_of(kobj, struct gov_attr_set, kobj);
+}
+
void gov_attr_set_init(struct gov_attr_set *attr_set, struct list_head *list_node);
void gov_attr_set_get(struct gov_attr_set *attr_set, struct list_head *list_node);
unsigned int gov_attr_set_put(struct gov_attr_set *attr_set, struct list_head *list_node);
@@ -567,10 +683,11 @@ struct governor_attr {
*********************************************************************/
/* Special Values of .frequency field */
-#define CPUFREQ_ENTRY_INVALID ~0u
-#define CPUFREQ_TABLE_END ~1u
+#define CPUFREQ_ENTRY_INVALID ~0u
+#define CPUFREQ_TABLE_END ~1u
/* Special Values of .flags field */
-#define CPUFREQ_BOOST_FREQ (1 << 0)
+#define CPUFREQ_BOOST_FREQ (1 << 0)
+#define CPUFREQ_INEFFICIENT_FREQ (1 << 1)
struct cpufreq_frequency_table {
unsigned int flags;
@@ -609,6 +726,18 @@ static inline void dev_pm_opp_free_cpufreq_table(struct device *dev,
for (pos = table; pos->frequency != CPUFREQ_TABLE_END; pos++)
/*
+ * cpufreq_for_each_entry_idx - iterate over a cpufreq_frequency_table
+ * with index
+ * @pos: the cpufreq_frequency_table * to use as a loop cursor.
+ * @table: the cpufreq_frequency_table * to iterate over.
+ * @idx: the table entry currently being processed
+ */
+
+#define cpufreq_for_each_entry_idx(pos, table, idx) \
+ for (pos = table, idx = 0; pos->frequency != CPUFREQ_TABLE_END; \
+ pos++, idx++)
+
+/*
* cpufreq_for_each_valid_entry - iterate over a cpufreq_frequency_table
* excluding CPUFREQ_ENTRY_INVALID frequencies.
* @pos: the cpufreq_frequency_table * to use as a loop cursor.
@@ -621,12 +750,43 @@ static inline void dev_pm_opp_free_cpufreq_table(struct device *dev,
continue; \
else
+/*
+ * cpufreq_for_each_valid_entry_idx - iterate with index over a cpufreq
+ * frequency_table excluding CPUFREQ_ENTRY_INVALID frequencies.
+ * @pos: the cpufreq_frequency_table * to use as a loop cursor.
+ * @table: the cpufreq_frequency_table * to iterate over.
+ * @idx: the table entry currently being processed
+ */
+
+#define cpufreq_for_each_valid_entry_idx(pos, table, idx) \
+ cpufreq_for_each_entry_idx(pos, table, idx) \
+ if (pos->frequency == CPUFREQ_ENTRY_INVALID) \
+ continue; \
+ else
+
+/**
+ * cpufreq_for_each_efficient_entry_idx - iterate with index over a cpufreq
+ * frequency_table excluding CPUFREQ_ENTRY_INVALID and
+ * CPUFREQ_INEFFICIENT_FREQ frequencies.
+ * @pos: the &struct cpufreq_frequency_table to use as a loop cursor.
+ * @table: the &struct cpufreq_frequency_table to iterate over.
+ * @idx: the table entry currently being processed.
+ * @efficiencies: set to true to only iterate over efficient frequencies.
+ */
+
+#define cpufreq_for_each_efficient_entry_idx(pos, table, idx, efficiencies) \
+ cpufreq_for_each_valid_entry_idx(pos, table, idx) \
+ if (efficiencies && (pos->flags & CPUFREQ_INEFFICIENT_FREQ)) \
+ continue; \
+ else
+
+
int cpufreq_frequency_table_cpuinfo(struct cpufreq_policy *policy,
struct cpufreq_frequency_table *table);
-int cpufreq_frequency_table_verify(struct cpufreq_policy *policy,
+int cpufreq_frequency_table_verify(struct cpufreq_policy_data *policy,
struct cpufreq_frequency_table *table);
-int cpufreq_generic_frequency_table_verify(struct cpufreq_policy *policy);
+int cpufreq_generic_frequency_table_verify(struct cpufreq_policy_data *policy);
int cpufreq_table_index_unsorted(struct cpufreq_policy *policy,
unsigned int target_freq,
@@ -644,223 +804,264 @@ bool policy_has_boost_freq(struct cpufreq_policy *policy);
/* Find lowest freq at or above target in a table in ascending order */
static inline int cpufreq_table_find_index_al(struct cpufreq_policy *policy,
- unsigned int target_freq)
+ unsigned int target_freq,
+ bool efficiencies)
{
struct cpufreq_frequency_table *table = policy->freq_table;
- struct cpufreq_frequency_table *pos, *best = table - 1;
+ struct cpufreq_frequency_table *pos;
unsigned int freq;
+ int idx, best = -1;
- cpufreq_for_each_valid_entry(pos, table) {
+ cpufreq_for_each_efficient_entry_idx(pos, table, idx, efficiencies) {
freq = pos->frequency;
if (freq >= target_freq)
- return pos - table;
+ return idx;
- best = pos;
+ best = idx;
}
- return best - table;
+ return best;
}
/* Find lowest freq at or above target in a table in descending order */
static inline int cpufreq_table_find_index_dl(struct cpufreq_policy *policy,
- unsigned int target_freq)
+ unsigned int target_freq,
+ bool efficiencies)
{
struct cpufreq_frequency_table *table = policy->freq_table;
- struct cpufreq_frequency_table *pos, *best = table - 1;
+ struct cpufreq_frequency_table *pos;
unsigned int freq;
+ int idx, best = -1;
- cpufreq_for_each_valid_entry(pos, table) {
+ cpufreq_for_each_efficient_entry_idx(pos, table, idx, efficiencies) {
freq = pos->frequency;
if (freq == target_freq)
- return pos - table;
+ return idx;
if (freq > target_freq) {
- best = pos;
+ best = idx;
continue;
}
/* No freq found above target_freq */
- if (best == table - 1)
- return pos - table;
+ if (best == -1)
+ return idx;
- return best - table;
+ return best;
}
- return best - table;
+ return best;
}
/* Works only on sorted freq-tables */
static inline int cpufreq_table_find_index_l(struct cpufreq_policy *policy,
- unsigned int target_freq)
+ unsigned int target_freq,
+ bool efficiencies)
{
target_freq = clamp_val(target_freq, policy->min, policy->max);
if (policy->freq_table_sorted == CPUFREQ_TABLE_SORTED_ASCENDING)
- return cpufreq_table_find_index_al(policy, target_freq);
+ return cpufreq_table_find_index_al(policy, target_freq,
+ efficiencies);
else
- return cpufreq_table_find_index_dl(policy, target_freq);
+ return cpufreq_table_find_index_dl(policy, target_freq,
+ efficiencies);
}
/* Find highest freq at or below target in a table in ascending order */
static inline int cpufreq_table_find_index_ah(struct cpufreq_policy *policy,
- unsigned int target_freq)
+ unsigned int target_freq,
+ bool efficiencies)
{
struct cpufreq_frequency_table *table = policy->freq_table;
- struct cpufreq_frequency_table *pos, *best = table - 1;
+ struct cpufreq_frequency_table *pos;
unsigned int freq;
+ int idx, best = -1;
- cpufreq_for_each_valid_entry(pos, table) {
+ cpufreq_for_each_efficient_entry_idx(pos, table, idx, efficiencies) {
freq = pos->frequency;
if (freq == target_freq)
- return pos - table;
+ return idx;
if (freq < target_freq) {
- best = pos;
+ best = idx;
continue;
}
/* No freq found below target_freq */
- if (best == table - 1)
- return pos - table;
+ if (best == -1)
+ return idx;
- return best - table;
+ return best;
}
- return best - table;
+ return best;
}
/* Find highest freq at or below target in a table in descending order */
static inline int cpufreq_table_find_index_dh(struct cpufreq_policy *policy,
- unsigned int target_freq)
+ unsigned int target_freq,
+ bool efficiencies)
{
struct cpufreq_frequency_table *table = policy->freq_table;
- struct cpufreq_frequency_table *pos, *best = table - 1;
+ struct cpufreq_frequency_table *pos;
unsigned int freq;
+ int idx, best = -1;
- cpufreq_for_each_valid_entry(pos, table) {
+ cpufreq_for_each_efficient_entry_idx(pos, table, idx, efficiencies) {
freq = pos->frequency;
if (freq <= target_freq)
- return pos - table;
+ return idx;
- best = pos;
+ best = idx;
}
- return best - table;
+ return best;
}
/* Works only on sorted freq-tables */
static inline int cpufreq_table_find_index_h(struct cpufreq_policy *policy,
- unsigned int target_freq)
+ unsigned int target_freq,
+ bool efficiencies)
{
target_freq = clamp_val(target_freq, policy->min, policy->max);
if (policy->freq_table_sorted == CPUFREQ_TABLE_SORTED_ASCENDING)
- return cpufreq_table_find_index_ah(policy, target_freq);
+ return cpufreq_table_find_index_ah(policy, target_freq,
+ efficiencies);
else
- return cpufreq_table_find_index_dh(policy, target_freq);
+ return cpufreq_table_find_index_dh(policy, target_freq,
+ efficiencies);
}
/* Find closest freq to target in a table in ascending order */
static inline int cpufreq_table_find_index_ac(struct cpufreq_policy *policy,
- unsigned int target_freq)
+ unsigned int target_freq,
+ bool efficiencies)
{
struct cpufreq_frequency_table *table = policy->freq_table;
- struct cpufreq_frequency_table *pos, *best = table - 1;
+ struct cpufreq_frequency_table *pos;
unsigned int freq;
+ int idx, best = -1;
- cpufreq_for_each_valid_entry(pos, table) {
+ cpufreq_for_each_efficient_entry_idx(pos, table, idx, efficiencies) {
freq = pos->frequency;
if (freq == target_freq)
- return pos - table;
+ return idx;
if (freq < target_freq) {
- best = pos;
+ best = idx;
continue;
}
/* No freq found below target_freq */
- if (best == table - 1)
- return pos - table;
+ if (best == -1)
+ return idx;
/* Choose the closest freq */
- if (target_freq - best->frequency > freq - target_freq)
- return pos - table;
+ if (target_freq - table[best].frequency > freq - target_freq)
+ return idx;
- return best - table;
+ return best;
}
- return best - table;
+ return best;
}
/* Find closest freq to target in a table in descending order */
static inline int cpufreq_table_find_index_dc(struct cpufreq_policy *policy,
- unsigned int target_freq)
+ unsigned int target_freq,
+ bool efficiencies)
{
struct cpufreq_frequency_table *table = policy->freq_table;
- struct cpufreq_frequency_table *pos, *best = table - 1;
+ struct cpufreq_frequency_table *pos;
unsigned int freq;
+ int idx, best = -1;
- cpufreq_for_each_valid_entry(pos, table) {
+ cpufreq_for_each_efficient_entry_idx(pos, table, idx, efficiencies) {
freq = pos->frequency;
if (freq == target_freq)
- return pos - table;
+ return idx;
if (freq > target_freq) {
- best = pos;
+ best = idx;
continue;
}
/* No freq found above target_freq */
- if (best == table - 1)
- return pos - table;
+ if (best == -1)
+ return idx;
/* Choose the closest freq */
- if (best->frequency - target_freq > target_freq - freq)
- return pos - table;
+ if (table[best].frequency - target_freq > target_freq - freq)
+ return idx;
- return best - table;
+ return best;
}
- return best - table;
+ return best;
}
/* Works only on sorted freq-tables */
static inline int cpufreq_table_find_index_c(struct cpufreq_policy *policy,
- unsigned int target_freq)
+ unsigned int target_freq,
+ bool efficiencies)
{
target_freq = clamp_val(target_freq, policy->min, policy->max);
if (policy->freq_table_sorted == CPUFREQ_TABLE_SORTED_ASCENDING)
- return cpufreq_table_find_index_ac(policy, target_freq);
+ return cpufreq_table_find_index_ac(policy, target_freq,
+ efficiencies);
else
- return cpufreq_table_find_index_dc(policy, target_freq);
+ return cpufreq_table_find_index_dc(policy, target_freq,
+ efficiencies);
}
static inline int cpufreq_frequency_table_target(struct cpufreq_policy *policy,
unsigned int target_freq,
unsigned int relation)
{
+ bool efficiencies = policy->efficiencies_available &&
+ (relation & CPUFREQ_RELATION_E);
+ int idx;
+
+ /* cpufreq_table_index_unsorted() has no use for this flag anyway */
+ relation &= ~CPUFREQ_RELATION_E;
+
if (unlikely(policy->freq_table_sorted == CPUFREQ_TABLE_UNSORTED))
return cpufreq_table_index_unsorted(policy, target_freq,
relation);
-
+retry:
switch (relation) {
case CPUFREQ_RELATION_L:
- return cpufreq_table_find_index_l(policy, target_freq);
+ idx = cpufreq_table_find_index_l(policy, target_freq,
+ efficiencies);
+ break;
case CPUFREQ_RELATION_H:
- return cpufreq_table_find_index_h(policy, target_freq);
+ idx = cpufreq_table_find_index_h(policy, target_freq,
+ efficiencies);
+ break;
case CPUFREQ_RELATION_C:
- return cpufreq_table_find_index_c(policy, target_freq);
+ idx = cpufreq_table_find_index_c(policy, target_freq,
+ efficiencies);
+ break;
default:
- pr_err("%s: Invalid relation: %d\n", __func__, relation);
- return -EINVAL;
+ WARN_ON_ONCE(1);
+ return 0;
}
+
+ if (idx < 0 && efficiencies) {
+ efficiencies = false;
+ goto retry;
+ }
+
+ return idx;
}
static inline int cpufreq_table_count_valid_entries(const struct cpufreq_policy *policy)
@@ -876,6 +1077,89 @@ static inline int cpufreq_table_count_valid_entries(const struct cpufreq_policy
return count;
}
+
+/**
+ * cpufreq_table_set_inefficient() - Mark a frequency as inefficient
+ * @policy: the &struct cpufreq_policy containing the inefficient frequency
+ * @frequency: the inefficient frequency
+ *
+ * The &struct cpufreq_policy must use a sorted frequency table
+ *
+ * Return: %0 on success or a negative errno code
+ */
+
+static inline int
+cpufreq_table_set_inefficient(struct cpufreq_policy *policy,
+ unsigned int frequency)
+{
+ struct cpufreq_frequency_table *pos;
+
+ /* Not supported */
+ if (policy->freq_table_sorted == CPUFREQ_TABLE_UNSORTED)
+ return -EINVAL;
+
+ cpufreq_for_each_valid_entry(pos, policy->freq_table) {
+ if (pos->frequency == frequency) {
+ pos->flags |= CPUFREQ_INEFFICIENT_FREQ;
+ policy->efficiencies_available = true;
+ return 0;
+ }
+ }
+
+ return -EINVAL;
+}
+
+static inline int parse_perf_domain(int cpu, const char *list_name,
+ const char *cell_name,
+ struct of_phandle_args *args)
+{
+ struct device_node *cpu_np;
+ int ret;
+
+ cpu_np = of_cpu_device_node_get(cpu);
+ if (!cpu_np)
+ return -ENODEV;
+
+ ret = of_parse_phandle_with_args(cpu_np, list_name, cell_name, 0,
+ args);
+ if (ret < 0)
+ return ret;
+
+ of_node_put(cpu_np);
+
+ return 0;
+}
+
+static inline int of_perf_domain_get_sharing_cpumask(int pcpu, const char *list_name,
+ const char *cell_name, struct cpumask *cpumask,
+ struct of_phandle_args *pargs)
+{
+ int cpu, ret;
+ struct of_phandle_args args;
+
+ ret = parse_perf_domain(pcpu, list_name, cell_name, pargs);
+ if (ret < 0)
+ return ret;
+
+ cpumask_set_cpu(pcpu, cpumask);
+
+ for_each_possible_cpu(cpu) {
+ if (cpu == pcpu)
+ continue;
+
+ ret = parse_perf_domain(cpu, list_name, cell_name, &args);
+ if (ret < 0)
+ continue;
+
+ if (pargs->np == args.np && pargs->args_count == args.args_count &&
+ !memcmp(pargs->args, args.args, sizeof(args.args[0]) * args.args_count))
+ cpumask_set_cpu(cpu, cpumask);
+
+ of_node_put(args.np);
+ }
+
+ return 0;
+}
#else
static inline int cpufreq_boost_trigger_state(int state)
{
@@ -895,19 +1179,54 @@ static inline bool policy_has_boost_freq(struct cpufreq_policy *policy)
{
return false;
}
+
+static inline int
+cpufreq_table_set_inefficient(struct cpufreq_policy *policy,
+ unsigned int frequency)
+{
+ return -EINVAL;
+}
+
+static inline int of_perf_domain_get_sharing_cpumask(int pcpu, const char *list_name,
+ const char *cell_name, struct cpumask *cpumask,
+ struct of_phandle_args *pargs)
+{
+ return -EOPNOTSUPP;
+}
+#endif
+
+#if defined(CONFIG_ENERGY_MODEL) && defined(CONFIG_CPU_FREQ_GOV_SCHEDUTIL)
+void sched_cpufreq_governor_change(struct cpufreq_policy *policy,
+ struct cpufreq_governor *old_gov);
+#else
+static inline void sched_cpufreq_governor_change(struct cpufreq_policy *policy,
+ struct cpufreq_governor *old_gov) { }
#endif
extern unsigned int arch_freq_get_on_cpu(int cpu);
+#ifndef arch_set_freq_scale
+static __always_inline
+void arch_set_freq_scale(const struct cpumask *cpus,
+ unsigned long cur_freq,
+ unsigned long max_freq)
+{
+}
+#endif
/* the following are really really optional */
extern struct freq_attr cpufreq_freq_attr_scaling_available_freqs;
extern struct freq_attr cpufreq_freq_attr_scaling_boost_freqs;
extern struct freq_attr *cpufreq_generic_attr[];
-int cpufreq_table_validate_and_show(struct cpufreq_policy *policy,
- struct cpufreq_frequency_table *table);
+int cpufreq_table_validate_and_sort(struct cpufreq_policy *policy);
unsigned int cpufreq_generic_get(unsigned int cpu);
-int cpufreq_generic_init(struct cpufreq_policy *policy,
+void cpufreq_generic_init(struct cpufreq_policy *policy,
struct cpufreq_frequency_table *table,
unsigned int transition_latency);
+
+static inline void cpufreq_register_em_with_opp(struct cpufreq_policy *policy)
+{
+ dev_pm_opp_of_register_em(get_cpu_device(policy->cpu),
+ policy->related_cpus);
+}
#endif /* _LINUX_CPUFREQ_H */
diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h
index b56573bf440d..0f1001dca0e0 100644
--- a/include/linux/cpuhotplug.h
+++ b/include/linux/cpuhotplug.h
@@ -1,23 +1,80 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __CPUHOTPLUG_H
#define __CPUHOTPLUG_H
#include <linux/types.h>
+/*
+ * CPU-up CPU-down
+ *
+ * BP AP BP AP
+ *
+ * OFFLINE OFFLINE
+ * | ^
+ * v |
+ * BRINGUP_CPU->AP_OFFLINE BRINGUP_CPU <- AP_IDLE_DEAD (idle thread/play_dead)
+ * | AP_OFFLINE
+ * v (IRQ-off) ,---------------^
+ * AP_ONLNE | (stop_machine)
+ * | TEARDOWN_CPU <- AP_ONLINE_IDLE
+ * | ^
+ * v |
+ * AP_ACTIVE AP_ACTIVE
+ */
+
+/*
+ * CPU hotplug states. The state machine invokes the installed state
+ * startup callbacks sequentially from CPUHP_OFFLINE + 1 to CPUHP_ONLINE
+ * during a CPU online operation. During a CPU offline operation the
+ * installed teardown callbacks are invoked in the reverse order from
+ * CPU_ONLINE - 1 down to CPUHP_OFFLINE.
+ *
+ * The state space has three sections: PREPARE, STARTING and ONLINE.
+ *
+ * PREPARE: The callbacks are invoked on a control CPU before the
+ * hotplugged CPU is started up or after the hotplugged CPU has died.
+ *
+ * STARTING: The callbacks are invoked on the hotplugged CPU from the low level
+ * hotplug startup/teardown code with interrupts disabled.
+ *
+ * ONLINE: The callbacks are invoked on the hotplugged CPU from the per CPU
+ * hotplug thread with interrupts and preemption enabled.
+ *
+ * Adding explicit states to this enum is only necessary when:
+ *
+ * 1) The state is within the STARTING section
+ *
+ * 2) The state has ordering constraints vs. other states in the
+ * same section.
+ *
+ * If neither #1 nor #2 apply, please use the dynamic state space when
+ * setting up a state by using CPUHP_PREPARE_DYN or CPUHP_PREPARE_ONLINE
+ * for the @state argument of the setup function.
+ *
+ * See Documentation/core-api/cpu_hotplug.rst for further information and
+ * examples.
+ */
enum cpuhp_state {
- CPUHP_OFFLINE,
+ CPUHP_INVALID = -1,
+
+ /* PREPARE section invoked on a control CPU */
+ CPUHP_OFFLINE = 0,
CPUHP_CREATE_THREADS,
CPUHP_PERF_PREPARE,
CPUHP_PERF_X86_PREPARE,
CPUHP_PERF_X86_AMD_UNCORE_PREP,
- CPUHP_PERF_BFIN,
CPUHP_PERF_POWER,
CPUHP_PERF_SUPERH,
CPUHP_X86_HPET_DEAD,
CPUHP_X86_APB_DEAD,
CPUHP_X86_MCE_DEAD,
CPUHP_VIRT_NET_DEAD,
+ CPUHP_IBMVNIC_DEAD,
CPUHP_SLUB_DEAD,
+ CPUHP_DEBUG_OBJ_DEAD,
CPUHP_MM_WRITEBACK_DEAD,
+ /* Must be after CPUHP_MM_VMSTAT_DEAD */
+ CPUHP_MM_DEMOTION_DEAD,
CPUHP_MM_VMSTAT_DEAD,
CPUHP_SOFTIRQ_DEAD,
CPUHP_NET_MVNETA_DEAD,
@@ -26,21 +83,25 @@ enum cpuhp_state {
CPUHP_ARM_OMAP_WAKE_DEAD,
CPUHP_IRQ_POLL_DEAD,
CPUHP_BLOCK_SOFTIRQ_DEAD,
+ CPUHP_BIO_DEAD,
CPUHP_ACPI_CPUDRV_DEAD,
CPUHP_S390_PFAULT_DEAD,
CPUHP_BLK_MQ_DEAD,
CPUHP_FS_BUFF_DEAD,
CPUHP_PRINTK_DEAD,
CPUHP_MM_MEMCQ_DEAD,
+ CPUHP_XFS_DEAD,
CPUHP_PERCPU_CNT_DEAD,
CPUHP_RADIX_DEAD,
- CPUHP_PAGE_ALLOC_DEAD,
+ CPUHP_PAGE_ALLOC,
CPUHP_NET_DEV_DEAD,
CPUHP_PCI_XGENE_DEAD,
- CPUHP_IOMMU_INTEL_DEAD,
+ CPUHP_IOMMU_IOVA_DEAD,
CPUHP_LUSTRE_CFS_DEAD,
- CPUHP_SCSI_BNX2FC_DEAD,
- CPUHP_SCSI_BNX2I_DEAD,
+ CPUHP_AP_ARM_CACHE_B15_RAC_DEAD,
+ CPUHP_PADATA_DEAD,
+ CPUHP_AP_DTPM_CPU_DEAD,
+ CPUHP_RANDOM_PREPARE,
CPUHP_WORKQUEUE_PREP,
CPUHP_POWER_NUMA_PREPARE,
CPUHP_HRTIMERS_PREPARE,
@@ -68,62 +129,85 @@ enum cpuhp_state {
CPUHP_MM_ZSWP_POOL_PREPARE,
CPUHP_KVM_PPC_BOOK3S_PREPARE,
CPUHP_ZCOMP_PREPARE,
- CPUHP_TIMERS_DEAD,
+ CPUHP_TIMERS_PREPARE,
CPUHP_MIPS_SOC_PREPARE,
CPUHP_BP_PREPARE_DYN,
CPUHP_BP_PREPARE_DYN_END = CPUHP_BP_PREPARE_DYN + 20,
CPUHP_BRINGUP_CPU,
+
+ /*
+ * STARTING section invoked on the hotplugged CPU in low level
+ * bringup and teardown code.
+ */
CPUHP_AP_IDLE_DEAD,
CPUHP_AP_OFFLINE,
+ CPUHP_AP_CACHECTRL_STARTING,
CPUHP_AP_SCHED_STARTING,
CPUHP_AP_RCUTREE_DYING,
+ CPUHP_AP_CPU_PM_STARTING,
CPUHP_AP_IRQ_GIC_STARTING,
CPUHP_AP_IRQ_HIP04_STARTING,
+ CPUHP_AP_IRQ_APPLE_AIC_STARTING,
CPUHP_AP_IRQ_ARMADA_XP_STARTING,
CPUHP_AP_IRQ_BCM2836_STARTING,
+ CPUHP_AP_IRQ_MIPS_GIC_STARTING,
+ CPUHP_AP_IRQ_RISCV_STARTING,
+ CPUHP_AP_IRQ_LOONGARCH_STARTING,
+ CPUHP_AP_IRQ_SIFIVE_PLIC_STARTING,
CPUHP_AP_ARM_MVEBU_COHERENCY,
+ CPUHP_AP_MICROCODE_LOADER,
CPUHP_AP_PERF_X86_AMD_UNCORE_STARTING,
CPUHP_AP_PERF_X86_STARTING,
CPUHP_AP_PERF_X86_AMD_IBS_STARTING,
CPUHP_AP_PERF_X86_CQM_STARTING,
CPUHP_AP_PERF_X86_CSTATE_STARTING,
CPUHP_AP_PERF_XTENSA_STARTING,
- CPUHP_AP_PERF_METAG_STARTING,
CPUHP_AP_MIPS_OP_LOONGSON3_STARTING,
CPUHP_AP_ARM_VFP_STARTING,
CPUHP_AP_ARM64_DEBUG_MONITORS_STARTING,
CPUHP_AP_PERF_ARM_HW_BREAKPOINT_STARTING,
CPUHP_AP_PERF_ARM_ACPI_STARTING,
CPUHP_AP_PERF_ARM_STARTING,
+ CPUHP_AP_PERF_RISCV_STARTING,
CPUHP_AP_ARM_L2X0_STARTING,
+ CPUHP_AP_EXYNOS4_MCT_TIMER_STARTING,
CPUHP_AP_ARM_ARCH_TIMER_STARTING,
CPUHP_AP_ARM_GLOBAL_TIMER_STARTING,
CPUHP_AP_JCORE_TIMER_STARTING,
- CPUHP_AP_EXYNOS4_MCT_TIMER_STARTING,
CPUHP_AP_ARM_TWD_STARTING,
- CPUHP_AP_METAG_TIMER_STARTING,
CPUHP_AP_QCOM_TIMER_STARTING,
+ CPUHP_AP_TEGRA_TIMER_STARTING,
CPUHP_AP_ARMADA_TIMER_STARTING,
CPUHP_AP_MARCO_TIMER_STARTING,
CPUHP_AP_MIPS_GIC_TIMER_STARTING,
CPUHP_AP_ARC_TIMER_STARTING,
- CPUHP_AP_KVM_STARTING,
- CPUHP_AP_KVM_ARM_VGIC_INIT_STARTING,
- CPUHP_AP_KVM_ARM_VGIC_STARTING,
- CPUHP_AP_KVM_ARM_TIMER_STARTING,
+ CPUHP_AP_RISCV_TIMER_STARTING,
+ CPUHP_AP_CLINT_TIMER_STARTING,
+ CPUHP_AP_CSKY_TIMER_STARTING,
+ CPUHP_AP_TI_GP_TIMER_STARTING,
+ CPUHP_AP_HYPERV_TIMER_STARTING,
/* Must be the last timer callback */
CPUHP_AP_DUMMY_TIMER_STARTING,
CPUHP_AP_ARM_XEN_STARTING,
CPUHP_AP_ARM_CORESIGHT_STARTING,
+ CPUHP_AP_ARM_CORESIGHT_CTI_STARTING,
CPUHP_AP_ARM64_ISNDEP_STARTING,
CPUHP_AP_SMPCFD_DYING,
CPUHP_AP_X86_TBOOT_DYING,
+ CPUHP_AP_ARM_CACHE_B15_RAC_DYING,
CPUHP_AP_ONLINE,
CPUHP_TEARDOWN_CPU,
+
+ /* Online section invoked on the hotplugged CPU from the hotplug thread */
CPUHP_AP_ONLINE_IDLE,
+ CPUHP_AP_KVM_ONLINE,
+ CPUHP_AP_SCHED_WAIT_EMPTY,
CPUHP_AP_SMPBOOT_THREADS,
CPUHP_AP_X86_VDSO_VMA_ONLINE,
CPUHP_AP_IRQ_AFFINITY_ONLINE,
+ CPUHP_AP_BLK_MQ_ONLINE,
+ CPUHP_AP_ARM_MVEBU_SYNC_CLOCKS,
+ CPUHP_AP_X86_INTEL_EPB_ONLINE,
CPUHP_AP_PERF_ONLINE,
CPUHP_AP_PERF_X86_ONLINE,
CPUHP_AP_PERF_X86_UNCORE_ONLINE,
@@ -132,17 +216,41 @@ enum cpuhp_state {
CPUHP_AP_PERF_X86_RAPL_ONLINE,
CPUHP_AP_PERF_X86_CQM_ONLINE,
CPUHP_AP_PERF_X86_CSTATE_ONLINE,
+ CPUHP_AP_PERF_X86_IDXD_ONLINE,
CPUHP_AP_PERF_S390_CF_ONLINE,
CPUHP_AP_PERF_S390_SF_ONLINE,
CPUHP_AP_PERF_ARM_CCI_ONLINE,
CPUHP_AP_PERF_ARM_CCN_ONLINE,
+ CPUHP_AP_PERF_ARM_HISI_CPA_ONLINE,
+ CPUHP_AP_PERF_ARM_HISI_DDRC_ONLINE,
+ CPUHP_AP_PERF_ARM_HISI_HHA_ONLINE,
+ CPUHP_AP_PERF_ARM_HISI_L3_ONLINE,
+ CPUHP_AP_PERF_ARM_HISI_PA_ONLINE,
+ CPUHP_AP_PERF_ARM_HISI_SLLC_ONLINE,
+ CPUHP_AP_PERF_ARM_HISI_PCIE_PMU_ONLINE,
+ CPUHP_AP_PERF_ARM_HNS3_PMU_ONLINE,
CPUHP_AP_PERF_ARM_L2X0_ONLINE,
CPUHP_AP_PERF_ARM_QCOM_L2_ONLINE,
CPUHP_AP_PERF_ARM_QCOM_L3_ONLINE,
+ CPUHP_AP_PERF_ARM_APM_XGENE_ONLINE,
+ CPUHP_AP_PERF_ARM_CAVIUM_TX2_UNCORE_ONLINE,
+ CPUHP_AP_PERF_ARM_MARVELL_CN10K_DDR_ONLINE,
+ CPUHP_AP_PERF_POWERPC_NEST_IMC_ONLINE,
+ CPUHP_AP_PERF_POWERPC_CORE_IMC_ONLINE,
+ CPUHP_AP_PERF_POWERPC_THREAD_IMC_ONLINE,
+ CPUHP_AP_PERF_POWERPC_TRACE_IMC_ONLINE,
+ CPUHP_AP_PERF_POWERPC_HV_24x7_ONLINE,
+ CPUHP_AP_PERF_POWERPC_HV_GPCI_ONLINE,
+ CPUHP_AP_PERF_CSKY_ONLINE,
+ CPUHP_AP_WATCHDOG_ONLINE,
CPUHP_AP_WORKQUEUE_ONLINE,
+ CPUHP_AP_RANDOM_ONLINE,
CPUHP_AP_RCUTREE_ONLINE,
+ CPUHP_AP_BASE_CACHEINFO_ONLINE,
CPUHP_AP_ONLINE_DYN,
CPUHP_AP_ONLINE_DYN_END = CPUHP_AP_ONLINE_DYN + 30,
+ /* Must be after CPUHP_AP_ONLINE_DYN for node_states[N_CPU] update */
+ CPUHP_AP_MM_DEMOTION_ONLINE,
CPUHP_AP_X86_HPET_ONLINE,
CPUHP_AP_X86_KVM_CLK_ONLINE,
CPUHP_AP_ACTIVE,
@@ -159,14 +267,15 @@ int __cpuhp_setup_state_cpuslocked(enum cpuhp_state state, const char *name,
int (*teardown)(unsigned int cpu),
bool multi_instance);
/**
- * cpuhp_setup_state - Setup hotplug state callbacks with calling the callbacks
+ * cpuhp_setup_state - Setup hotplug state callbacks with calling the @startup
+ * callback
* @state: The state for which the calls are installed
* @name: Name of the callback (will be used in debug output)
- * @startup: startup callback function
- * @teardown: teardown callback function
+ * @startup: startup callback function or NULL if not required
+ * @teardown: teardown callback function or NULL if not required
*
- * Installs the callback functions and invokes the startup callback on
- * the present cpus which have already reached the @state.
+ * Installs the callback functions and invokes the @startup callback on
+ * the online cpus which have already reached the @state.
*/
static inline int cpuhp_setup_state(enum cpuhp_state state,
const char *name,
@@ -176,6 +285,18 @@ static inline int cpuhp_setup_state(enum cpuhp_state state,
return __cpuhp_setup_state(state, name, true, startup, teardown, false);
}
+/**
+ * cpuhp_setup_state_cpuslocked - Setup hotplug state callbacks with calling
+ * @startup callback from a cpus_read_lock()
+ * held region
+ * @state: The state for which the calls are installed
+ * @name: Name of the callback (will be used in debug output)
+ * @startup: startup callback function or NULL if not required
+ * @teardown: teardown callback function or NULL if not required
+ *
+ * Same as cpuhp_setup_state() except that it must be invoked from within a
+ * cpus_read_lock() held region.
+ */
static inline int cpuhp_setup_state_cpuslocked(enum cpuhp_state state,
const char *name,
int (*startup)(unsigned int cpu),
@@ -187,14 +308,14 @@ static inline int cpuhp_setup_state_cpuslocked(enum cpuhp_state state,
/**
* cpuhp_setup_state_nocalls - Setup hotplug state callbacks without calling the
- * callbacks
+ * @startup callback
* @state: The state for which the calls are installed
* @name: Name of the callback.
- * @startup: startup callback function
- * @teardown: teardown callback function
+ * @startup: startup callback function or NULL if not required
+ * @teardown: teardown callback function or NULL if not required
*
- * Same as @cpuhp_setup_state except that no calls are executed are invoked
- * during installation of this callback. NOP if SMP=n or HOTPLUG_CPU=n.
+ * Same as cpuhp_setup_state() except that the @startup callback is not
+ * invoked during installation. NOP if SMP=n or HOTPLUG_CPU=n.
*/
static inline int cpuhp_setup_state_nocalls(enum cpuhp_state state,
const char *name,
@@ -205,6 +326,19 @@ static inline int cpuhp_setup_state_nocalls(enum cpuhp_state state,
false);
}
+/**
+ * cpuhp_setup_state_nocalls_cpuslocked - Setup hotplug state callbacks without
+ * invoking the @startup callback from
+ * a cpus_read_lock() held region
+ * callbacks
+ * @state: The state for which the calls are installed
+ * @name: Name of the callback.
+ * @startup: startup callback function or NULL if not required
+ * @teardown: teardown callback function or NULL if not required
+ *
+ * Same as cpuhp_setup_state_nocalls() except that it must be invoked from
+ * within a cpus_read_lock() held region.
+ */
static inline int cpuhp_setup_state_nocalls_cpuslocked(enum cpuhp_state state,
const char *name,
int (*startup)(unsigned int cpu),
@@ -218,13 +352,13 @@ static inline int cpuhp_setup_state_nocalls_cpuslocked(enum cpuhp_state state,
* cpuhp_setup_state_multi - Add callbacks for multi state
* @state: The state for which the calls are installed
* @name: Name of the callback.
- * @startup: startup callback function
- * @teardown: teardown callback function
+ * @startup: startup callback function or NULL if not required
+ * @teardown: teardown callback function or NULL if not required
*
* Sets the internal multi_instance flag and prepares a state to work as a multi
* instance callback. No callbacks are invoked at this point. The callbacks are
* invoked once an instance for this state are registered via
- * @cpuhp_state_add_instance or @cpuhp_state_add_instance_nocalls.
+ * cpuhp_state_add_instance() or cpuhp_state_add_instance_nocalls()
*/
static inline int cpuhp_setup_state_multi(enum cpuhp_state state,
const char *name,
@@ -249,9 +383,10 @@ int __cpuhp_state_add_instance_cpuslocked(enum cpuhp_state state,
* @state: The state for which the instance is installed
* @node: The node for this individual state.
*
- * Installs the instance for the @state and invokes the startup callback on
- * the present cpus which have already reached the @state. The @state must have
- * been earlier marked as multi-instance by @cpuhp_setup_state_multi.
+ * Installs the instance for the @state and invokes the registered startup
+ * callback on the online cpus which have already reached the @state. The
+ * @state must have been earlier marked as multi-instance by
+ * cpuhp_setup_state_multi().
*/
static inline int cpuhp_state_add_instance(enum cpuhp_state state,
struct hlist_node *node)
@@ -265,8 +400,9 @@ static inline int cpuhp_state_add_instance(enum cpuhp_state state,
* @state: The state for which the instance is installed
* @node: The node for this individual state.
*
- * Installs the instance for the @state The @state must have been earlier
- * marked as multi-instance by @cpuhp_setup_state_multi.
+ * Installs the instance for the @state. The @state must have been earlier
+ * marked as multi-instance by cpuhp_setup_state_multi. NOP if SMP=n or
+ * HOTPLUG_CPU=n.
*/
static inline int cpuhp_state_add_instance_nocalls(enum cpuhp_state state,
struct hlist_node *node)
@@ -274,6 +410,17 @@ static inline int cpuhp_state_add_instance_nocalls(enum cpuhp_state state,
return __cpuhp_state_add_instance(state, node, false);
}
+/**
+ * cpuhp_state_add_instance_nocalls_cpuslocked - Add an instance for a state
+ * without invoking the startup
+ * callback from a cpus_read_lock()
+ * held region.
+ * @state: The state for which the instance is installed
+ * @node: The node for this individual state.
+ *
+ * Same as cpuhp_state_add_instance_nocalls() except that it must be
+ * invoked from within a cpus_read_lock() held region.
+ */
static inline int
cpuhp_state_add_instance_nocalls_cpuslocked(enum cpuhp_state state,
struct hlist_node *node)
@@ -289,7 +436,7 @@ void __cpuhp_remove_state_cpuslocked(enum cpuhp_state state, bool invoke);
* @state: The state for which the calls are removed
*
* Removes the callback functions and invokes the teardown callback on
- * the present cpus which have already reached the @state.
+ * the online cpus which have already reached the @state.
*/
static inline void cpuhp_remove_state(enum cpuhp_state state)
{
@@ -298,7 +445,7 @@ static inline void cpuhp_remove_state(enum cpuhp_state state)
/**
* cpuhp_remove_state_nocalls - Remove hotplug state callbacks without invoking
- * teardown
+ * the teardown callback
* @state: The state for which the calls are removed
*/
static inline void cpuhp_remove_state_nocalls(enum cpuhp_state state)
@@ -306,6 +453,14 @@ static inline void cpuhp_remove_state_nocalls(enum cpuhp_state state)
__cpuhp_remove_state(state, false);
}
+/**
+ * cpuhp_remove_state_nocalls_cpuslocked - Remove hotplug state callbacks without invoking
+ * teardown from a cpus_read_lock() held region.
+ * @state: The state for which the calls are removed
+ *
+ * Same as cpuhp_remove_state nocalls() except that it must be invoked
+ * from within a cpus_read_lock() held region.
+ */
static inline void cpuhp_remove_state_nocalls_cpuslocked(enum cpuhp_state state)
{
__cpuhp_remove_state_cpuslocked(state, false);
@@ -333,8 +488,8 @@ int __cpuhp_state_remove_instance(enum cpuhp_state state,
* @state: The state from which the instance is removed
* @node: The node for this individual state.
*
- * Removes the instance and invokes the teardown callback on the present cpus
- * which have already reached the @state.
+ * Removes the instance and invokes the teardown callback on the online cpus
+ * which have already reached @state.
*/
static inline int cpuhp_state_remove_instance(enum cpuhp_state state,
struct hlist_node *node)
@@ -344,7 +499,7 @@ static inline int cpuhp_state_remove_instance(enum cpuhp_state state,
/**
* cpuhp_state_remove_instance_nocalls - Remove hotplug instance from state
- * without invoking the reatdown callback
+ * without invoking the teardown callback
* @state: The state from which the instance is removed
* @node: The node for this individual state.
*
diff --git a/include/linux/cpuidle.h b/include/linux/cpuidle.h
index fc1e5d7fc1c7..3183aeb7f5b4 100644
--- a/include/linux/cpuidle.h
+++ b/include/linux/cpuidle.h
@@ -14,6 +14,7 @@
#include <linux/percpu.h>
#include <linux/list.h>
#include <linux/hrtimer.h>
+#include <linux/context_tracking.h>
#define CPUIDLE_STATE_MAX 10
#define CPUIDLE_NAME_LEN 16
@@ -29,21 +30,32 @@ struct cpuidle_driver;
* CPUIDLE DEVICE INTERFACE *
****************************/
+#define CPUIDLE_STATE_DISABLED_BY_USER BIT(0)
+#define CPUIDLE_STATE_DISABLED_BY_DRIVER BIT(1)
+
struct cpuidle_state_usage {
unsigned long long disable;
unsigned long long usage;
- unsigned long long time; /* in US */
+ u64 time_ns;
+ unsigned long long above; /* Number of times it's been too deep */
+ unsigned long long below; /* Number of times it's been too shallow */
+ unsigned long long rejected; /* Number of times idle entry was rejected */
+#ifdef CONFIG_SUSPEND
+ unsigned long long s2idle_usage;
+ unsigned long long s2idle_time; /* in US */
+#endif
};
struct cpuidle_state {
char name[CPUIDLE_NAME_LEN];
char desc[CPUIDLE_DESC_LEN];
+ s64 exit_latency_ns;
+ s64 target_residency_ns;
unsigned int flags;
unsigned int exit_latency; /* in US */
int power_usage; /* in mW */
unsigned int target_residency; /* in US */
- bool disabled; /* disabled on all CPUs */
int (*enter) (struct cpuidle_device *dev,
struct cpuidle_driver *drv,
@@ -52,21 +64,27 @@ struct cpuidle_state {
int (*enter_dead) (struct cpuidle_device *dev, int index);
/*
- * CPUs execute ->enter_freeze with the local tick or entire timekeeping
+ * CPUs execute ->enter_s2idle with the local tick or entire timekeeping
* suspended, so it must not re-enable interrupts at any point (even
* temporarily) or attempt to change states of clock event devices.
+ *
+ * This callback may point to the same function as ->enter if all of
+ * the above requirements are met by it.
*/
- void (*enter_freeze) (struct cpuidle_device *dev,
- struct cpuidle_driver *drv,
- int index);
+ int (*enter_s2idle)(struct cpuidle_device *dev,
+ struct cpuidle_driver *drv,
+ int index);
};
/* Idle State Flags */
-#define CPUIDLE_FLAG_NONE (0x00)
-#define CPUIDLE_FLAG_COUPLED (0x02) /* state applies to multiple cpus */
-#define CPUIDLE_FLAG_TIMER_STOP (0x04) /* timer is stopped on this state */
-
-#define CPUIDLE_DRIVER_FLAGS_MASK (0xFFFF0000)
+#define CPUIDLE_FLAG_NONE (0x00)
+#define CPUIDLE_FLAG_POLLING BIT(0) /* polling state */
+#define CPUIDLE_FLAG_COUPLED BIT(1) /* state applies to multiple cpus */
+#define CPUIDLE_FLAG_TIMER_STOP BIT(2) /* timer is stopped on this state */
+#define CPUIDLE_FLAG_UNUSABLE BIT(3) /* avoid using this state */
+#define CPUIDLE_FLAG_OFF BIT(4) /* disable this state by default */
+#define CPUIDLE_FLAG_TLB_FLUSHED BIT(5) /* idle-state flushes TLBs */
+#define CPUIDLE_FLAG_RCU_IDLE BIT(6) /* idle-state takes care of RCU */
struct cpuidle_device_kobj;
struct cpuidle_state_kobj;
@@ -75,10 +93,14 @@ struct cpuidle_driver_kobj;
struct cpuidle_device {
unsigned int registered:1;
unsigned int enabled:1;
- unsigned int use_deepest_state:1;
+ unsigned int poll_time_limit:1;
unsigned int cpu;
+ ktime_t next_hrtimer;
- int last_residency;
+ int last_state_idx;
+ u64 last_residency_ns;
+ u64 poll_limit_ns;
+ u64 forced_idle_latency_limit_ns;
struct cpuidle_state_usage states_usage[CPUIDLE_STATE_MAX];
struct cpuidle_state_kobj *kobjs[CPUIDLE_STATE_MAX];
struct cpuidle_driver_kobj *kobj_driver;
@@ -94,15 +116,34 @@ struct cpuidle_device {
DECLARE_PER_CPU(struct cpuidle_device *, cpuidle_devices);
DECLARE_PER_CPU(struct cpuidle_device, cpuidle_dev);
-/**
- * cpuidle_get_last_residency - retrieves the last state's residency time
- * @dev: the target CPU
- */
-static inline int cpuidle_get_last_residency(struct cpuidle_device *dev)
+static __always_inline void ct_cpuidle_enter(void)
{
- return dev->last_residency;
+ lockdep_assert_irqs_disabled();
+ /*
+ * Idle is allowed to (temporary) enable IRQs. It
+ * will return with IRQs disabled.
+ *
+ * Trace IRQs enable here, then switch off RCU, and have
+ * arch_cpu_idle() use raw_local_irq_enable(). Note that
+ * ct_idle_enter() relies on lockdep IRQ state, so switch that
+ * last -- this is very similar to the entry code.
+ */
+ trace_hardirqs_on_prepare();
+ lockdep_hardirqs_on_prepare();
+ instrumentation_end();
+ ct_idle_enter();
+ lockdep_hardirqs_on(_RET_IP_);
}
+static __always_inline void ct_cpuidle_exit(void)
+{
+ /*
+ * Carefully undo the above.
+ */
+ lockdep_hardirqs_off(_RET_IP_);
+ ct_idle_exit();
+ instrumentation_begin();
+}
/****************************
* CPUIDLE DRIVER INTERFACE *
@@ -111,7 +152,6 @@ static inline int cpuidle_get_last_residency(struct cpuidle_device *dev)
struct cpuidle_driver {
const char *name;
struct module *owner;
- int refcnt;
/* used by the cpuidle framework to setup the broadcast timer */
unsigned int bctimer:1;
@@ -122,6 +162,9 @@ struct cpuidle_driver {
/* the driver handles the cpus in cpumask */
struct cpumask *cpumask;
+
+ /* preferred governor to switch at register time */
+ const char *governor;
};
#ifdef CONFIG_CPU_IDLE
@@ -130,15 +173,18 @@ extern bool cpuidle_not_available(struct cpuidle_driver *drv,
struct cpuidle_device *dev);
extern int cpuidle_select(struct cpuidle_driver *drv,
- struct cpuidle_device *dev);
+ struct cpuidle_device *dev,
+ bool *stop_tick);
extern int cpuidle_enter(struct cpuidle_driver *drv,
struct cpuidle_device *dev, int index);
extern void cpuidle_reflect(struct cpuidle_device *dev, int index);
+extern u64 cpuidle_poll_time(struct cpuidle_driver *drv,
+ struct cpuidle_device *dev);
extern int cpuidle_register_driver(struct cpuidle_driver *drv);
extern struct cpuidle_driver *cpuidle_get_driver(void);
-extern struct cpuidle_driver *cpuidle_driver_ref(void);
-extern void cpuidle_driver_unref(void);
+extern void cpuidle_driver_state_disabled(struct cpuidle_driver *drv, int idx,
+ bool disable);
extern void cpuidle_unregister_driver(struct cpuidle_driver *drv);
extern int cpuidle_register_device(struct cpuidle_device *dev);
extern void cpuidle_unregister_device(struct cpuidle_device *dev);
@@ -162,17 +208,20 @@ static inline bool cpuidle_not_available(struct cpuidle_driver *drv,
struct cpuidle_device *dev)
{return true; }
static inline int cpuidle_select(struct cpuidle_driver *drv,
- struct cpuidle_device *dev)
+ struct cpuidle_device *dev, bool *stop_tick)
{return -ENODEV; }
static inline int cpuidle_enter(struct cpuidle_driver *drv,
struct cpuidle_device *dev, int index)
{return -ENODEV; }
static inline void cpuidle_reflect(struct cpuidle_device *dev, int index) { }
+static inline u64 cpuidle_poll_time(struct cpuidle_driver *drv,
+ struct cpuidle_device *dev)
+{return 0; }
static inline int cpuidle_register_driver(struct cpuidle_driver *drv)
{return -ENODEV; }
static inline struct cpuidle_driver *cpuidle_get_driver(void) {return NULL; }
-static inline struct cpuidle_driver *cpuidle_driver_ref(void) {return NULL; }
-static inline void cpuidle_driver_unref(void) {}
+static inline void cpuidle_driver_state_disabled(struct cpuidle_driver *drv,
+ int idx, bool disable) { }
static inline void cpuidle_unregister_driver(struct cpuidle_driver *drv) { }
static inline int cpuidle_register_device(struct cpuidle_device *dev)
{return -ENODEV; }
@@ -196,18 +245,20 @@ static inline struct cpuidle_device *cpuidle_get_device(void) {return NULL; }
#ifdef CONFIG_CPU_IDLE
extern int cpuidle_find_deepest_state(struct cpuidle_driver *drv,
- struct cpuidle_device *dev);
-extern int cpuidle_enter_freeze(struct cpuidle_driver *drv,
+ struct cpuidle_device *dev,
+ u64 latency_limit_ns);
+extern int cpuidle_enter_s2idle(struct cpuidle_driver *drv,
struct cpuidle_device *dev);
-extern void cpuidle_use_deepest_state(bool enable);
+extern void cpuidle_use_deepest_state(u64 latency_limit_ns);
#else
static inline int cpuidle_find_deepest_state(struct cpuidle_driver *drv,
- struct cpuidle_device *dev)
+ struct cpuidle_device *dev,
+ u64 latency_limit_ns)
{return -ENODEV; }
-static inline int cpuidle_enter_freeze(struct cpuidle_driver *drv,
+static inline int cpuidle_enter_s2idle(struct cpuidle_driver *drv,
struct cpuidle_device *dev)
{return -ENODEV; }
-static inline void cpuidle_use_deepest_state(bool enable)
+static inline void cpuidle_use_deepest_state(u64 latency_limit_ns)
{
}
#endif
@@ -224,6 +275,12 @@ static inline void cpuidle_coupled_parallel_barrier(struct cpuidle_device *dev,
}
#endif
+#if defined(CONFIG_CPU_IDLE) && defined(CONFIG_ARCH_HAS_CPU_RELAX)
+void cpuidle_poll_state_init(struct cpuidle_driver *drv);
+#else
+static inline void cpuidle_poll_state_init(struct cpuidle_driver *drv) {}
+#endif
+
/******************************
* CPUIDLE GOVERNOR INTERFACE *
******************************/
@@ -239,39 +296,57 @@ struct cpuidle_governor {
struct cpuidle_device *dev);
int (*select) (struct cpuidle_driver *drv,
- struct cpuidle_device *dev);
+ struct cpuidle_device *dev,
+ bool *stop_tick);
void (*reflect) (struct cpuidle_device *dev, int index);
};
-#ifdef CONFIG_CPU_IDLE
extern int cpuidle_register_governor(struct cpuidle_governor *gov);
-#else
-static inline int cpuidle_register_governor(struct cpuidle_governor *gov)
-{return 0;}
-#endif
-
-#ifdef CONFIG_ARCH_HAS_CPU_RELAX
-#define CPUIDLE_DRIVER_STATE_START 1
-#else
-#define CPUIDLE_DRIVER_STATE_START 0
-#endif
+extern s64 cpuidle_governor_latency_req(unsigned int cpu);
+
+#define __CPU_PM_CPU_IDLE_ENTER(low_level_idle_enter, \
+ idx, \
+ state, \
+ is_retention, is_rcu) \
+({ \
+ int __ret = 0; \
+ \
+ if (!idx) { \
+ cpu_do_idle(); \
+ return idx; \
+ } \
+ \
+ if (!is_retention) \
+ __ret = cpu_pm_enter(); \
+ if (!__ret) { \
+ if (!is_rcu) \
+ ct_cpuidle_enter(); \
+ __ret = low_level_idle_enter(state); \
+ if (!is_rcu) \
+ ct_cpuidle_exit(); \
+ if (!is_retention) \
+ cpu_pm_exit(); \
+ } \
+ \
+ __ret ? -1 : idx; \
+})
#define CPU_PM_CPU_IDLE_ENTER(low_level_idle_enter, idx) \
-({ \
- int __ret; \
- \
- if (!idx) { \
- cpu_do_idle(); \
- return idx; \
- } \
- \
- __ret = cpu_pm_enter(); \
- if (!__ret) { \
- __ret = low_level_idle_enter(idx); \
- cpu_pm_exit(); \
- } \
- \
- __ret ? -1 : idx; \
-})
+ __CPU_PM_CPU_IDLE_ENTER(low_level_idle_enter, idx, idx, 0, 0)
+
+#define CPU_PM_CPU_IDLE_ENTER_RETENTION(low_level_idle_enter, idx) \
+ __CPU_PM_CPU_IDLE_ENTER(low_level_idle_enter, idx, idx, 1, 0)
+
+#define CPU_PM_CPU_IDLE_ENTER_PARAM(low_level_idle_enter, idx, state) \
+ __CPU_PM_CPU_IDLE_ENTER(low_level_idle_enter, idx, state, 0, 0)
+
+#define CPU_PM_CPU_IDLE_ENTER_PARAM_RCU(low_level_idle_enter, idx, state) \
+ __CPU_PM_CPU_IDLE_ENTER(low_level_idle_enter, idx, state, 0, 1)
+
+#define CPU_PM_CPU_IDLE_ENTER_RETENTION_PARAM(low_level_idle_enter, idx, state) \
+ __CPU_PM_CPU_IDLE_ENTER(low_level_idle_enter, idx, state, 1, 0)
+
+#define CPU_PM_CPU_IDLE_ENTER_RETENTION_PARAM_RCU(low_level_idle_enter, idx, state) \
+ __CPU_PM_CPU_IDLE_ENTER(low_level_idle_enter, idx, state, 1, 1)
#endif /* _LINUX_CPUIDLE_H */
diff --git a/include/linux/cpuidle_haltpoll.h b/include/linux/cpuidle_haltpoll.h
new file mode 100644
index 000000000000..d50c1e0411a2
--- /dev/null
+++ b/include/linux/cpuidle_haltpoll.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _CPUIDLE_HALTPOLL_H
+#define _CPUIDLE_HALTPOLL_H
+
+#ifdef CONFIG_ARCH_CPUIDLE_HALTPOLL
+#include <asm/cpuidle_haltpoll.h>
+#else
+static inline void arch_haltpoll_enable(unsigned int cpu)
+{
+}
+
+static inline void arch_haltpoll_disable(unsigned int cpu)
+{
+}
+#endif
+#endif
diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h
index 4bf4479a3a80..ca736b05ec7b 100644
--- a/include/linux/cpumask.h
+++ b/include/linux/cpumask.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __LINUX_CPUMASK_H
#define __LINUX_CPUMASK_H
@@ -9,7 +10,10 @@
#include <linux/kernel.h>
#include <linux/threads.h>
#include <linux/bitmap.h>
+#include <linux/atomic.h>
#include <linux/bug.h>
+#include <linux/gfp_types.h>
+#include <linux/numa.h>
/* Don't assign or return these: may not be this big! */
typedef struct cpumask { DECLARE_BITMAP(bits, NR_CPUS); } cpumask_t;
@@ -31,19 +35,56 @@ typedef struct cpumask { DECLARE_BITMAP(bits, NR_CPUS); } cpumask_t;
*/
#define cpumask_pr_args(maskp) nr_cpu_ids, cpumask_bits(maskp)
-#if NR_CPUS == 1
-#define nr_cpu_ids 1
+#if (NR_CPUS == 1) || defined(CONFIG_FORCE_NR_CPUS)
+#define nr_cpu_ids ((unsigned int)NR_CPUS)
#else
-extern int nr_cpu_ids;
+extern unsigned int nr_cpu_ids;
#endif
-#ifdef CONFIG_CPUMASK_OFFSTACK
-/* Assuming NR_CPUS is huge, a runtime limit is more efficient. Also,
- * not all bits may be allocated. */
-#define nr_cpumask_bits ((unsigned int)nr_cpu_ids)
+static inline void set_nr_cpu_ids(unsigned int nr)
+{
+#if (NR_CPUS == 1) || defined(CONFIG_FORCE_NR_CPUS)
+ WARN_ON(nr != nr_cpu_ids);
+#else
+ nr_cpu_ids = nr;
+#endif
+}
+
+/*
+ * We have several different "preferred sizes" for the cpumask
+ * operations, depending on operation.
+ *
+ * For example, the bitmap scanning and operating operations have
+ * optimized routines that work for the single-word case, but only when
+ * the size is constant. So if NR_CPUS fits in one single word, we are
+ * better off using that small constant, in order to trigger the
+ * optimized bit finding. That is 'small_cpumask_size'.
+ *
+ * The clearing and copying operations will similarly perform better
+ * with a constant size, but we limit that size arbitrarily to four
+ * words. We call this 'large_cpumask_size'.
+ *
+ * Finally, some operations just want the exact limit, either because
+ * they set bits or just don't have any faster fixed-sized versions. We
+ * call this just 'nr_cpumask_bits'.
+ *
+ * Note that these optional constants are always guaranteed to be at
+ * least as big as 'nr_cpu_ids' itself is, and all our cpumask
+ * allocations are at least that size (see cpumask_size()). The
+ * optimization comes from being able to potentially use a compile-time
+ * constant instead of a run-time generated exact number of CPUs.
+ */
+#if NR_CPUS <= BITS_PER_LONG
+ #define small_cpumask_bits ((unsigned int)NR_CPUS)
+ #define large_cpumask_bits ((unsigned int)NR_CPUS)
+#elif NR_CPUS <= 4*BITS_PER_LONG
+ #define small_cpumask_bits nr_cpu_ids
+ #define large_cpumask_bits ((unsigned int)NR_CPUS)
#else
-#define nr_cpumask_bits ((unsigned int)NR_CPUS)
+ #define small_cpumask_bits nr_cpu_ids
+ #define large_cpumask_bits nr_cpu_ids
#endif
+#define nr_cpumask_bits nr_cpu_ids
/*
* The following particular system cpumasks and operations manage
@@ -63,10 +104,6 @@ extern int nr_cpu_ids;
* cpu_online_mask is the dynamic subset of cpu_present_mask,
* indicating those CPUs available for scheduling.
*
- * If HOTPLUG is enabled, then cpu_possible_mask is forced to have
- * all NR_CPUS bits set, otherwise it is just the set of CPUs that
- * ACPI reports present at boot.
- *
* If HOTPLUG is enabled, then cpu_present_mask varies dynamically,
* depending on what ACPI reports as currently plugged in, otherwise
* cpu_present_mask is just a copy of cpu_possible_mask.
@@ -89,93 +126,75 @@ extern struct cpumask __cpu_possible_mask;
extern struct cpumask __cpu_online_mask;
extern struct cpumask __cpu_present_mask;
extern struct cpumask __cpu_active_mask;
+extern struct cpumask __cpu_dying_mask;
#define cpu_possible_mask ((const struct cpumask *)&__cpu_possible_mask)
#define cpu_online_mask ((const struct cpumask *)&__cpu_online_mask)
#define cpu_present_mask ((const struct cpumask *)&__cpu_present_mask)
#define cpu_active_mask ((const struct cpumask *)&__cpu_active_mask)
+#define cpu_dying_mask ((const struct cpumask *)&__cpu_dying_mask)
-#if NR_CPUS > 1
-#define num_online_cpus() cpumask_weight(cpu_online_mask)
-#define num_possible_cpus() cpumask_weight(cpu_possible_mask)
-#define num_present_cpus() cpumask_weight(cpu_present_mask)
-#define num_active_cpus() cpumask_weight(cpu_active_mask)
-#define cpu_online(cpu) cpumask_test_cpu((cpu), cpu_online_mask)
-#define cpu_possible(cpu) cpumask_test_cpu((cpu), cpu_possible_mask)
-#define cpu_present(cpu) cpumask_test_cpu((cpu), cpu_present_mask)
-#define cpu_active(cpu) cpumask_test_cpu((cpu), cpu_active_mask)
-#else
-#define num_online_cpus() 1U
-#define num_possible_cpus() 1U
-#define num_present_cpus() 1U
-#define num_active_cpus() 1U
-#define cpu_online(cpu) ((cpu) == 0)
-#define cpu_possible(cpu) ((cpu) == 0)
-#define cpu_present(cpu) ((cpu) == 0)
-#define cpu_active(cpu) ((cpu) == 0)
-#endif
+extern atomic_t __num_online_cpus;
-/* verify cpu argument to cpumask_* operators */
-static inline unsigned int cpumask_check(unsigned int cpu)
+extern cpumask_t cpus_booted_once_mask;
+
+static __always_inline void cpu_max_bits_warn(unsigned int cpu, unsigned int bits)
{
#ifdef CONFIG_DEBUG_PER_CPU_MAPS
- WARN_ON_ONCE(cpu >= nr_cpumask_bits);
+ WARN_ON_ONCE(cpu >= bits);
#endif /* CONFIG_DEBUG_PER_CPU_MAPS */
- return cpu;
-}
-
-#if NR_CPUS == 1
-/* Uniprocessor. Assume all masks are "1". */
-static inline unsigned int cpumask_first(const struct cpumask *srcp)
-{
- return 0;
-}
-
-/* Valid inputs for n are -1 and 0. */
-static inline unsigned int cpumask_next(int n, const struct cpumask *srcp)
-{
- return n+1;
}
-static inline unsigned int cpumask_next_zero(int n, const struct cpumask *srcp)
+/* verify cpu argument to cpumask_* operators */
+static __always_inline unsigned int cpumask_check(unsigned int cpu)
{
- return n+1;
+ cpu_max_bits_warn(cpu, small_cpumask_bits);
+ return cpu;
}
-static inline unsigned int cpumask_next_and(int n,
- const struct cpumask *srcp,
- const struct cpumask *andp)
+/**
+ * cpumask_first - get the first cpu in a cpumask
+ * @srcp: the cpumask pointer
+ *
+ * Returns >= nr_cpu_ids if no cpus set.
+ */
+static inline unsigned int cpumask_first(const struct cpumask *srcp)
{
- return n+1;
+ return find_first_bit(cpumask_bits(srcp), small_cpumask_bits);
}
-/* cpu must be a valid cpu, ie 0, so there's no other choice. */
-static inline unsigned int cpumask_any_but(const struct cpumask *mask,
- unsigned int cpu)
+/**
+ * cpumask_first_zero - get the first unset cpu in a cpumask
+ * @srcp: the cpumask pointer
+ *
+ * Returns >= nr_cpu_ids if all cpus are set.
+ */
+static inline unsigned int cpumask_first_zero(const struct cpumask *srcp)
{
- return 1;
+ return find_first_zero_bit(cpumask_bits(srcp), small_cpumask_bits);
}
-static inline unsigned int cpumask_local_spread(unsigned int i, int node)
+/**
+ * cpumask_first_and - return the first cpu from *srcp1 & *srcp2
+ * @src1p: the first input
+ * @src2p: the second input
+ *
+ * Returns >= nr_cpu_ids if no cpus set in both. See also cpumask_next_and().
+ */
+static inline
+unsigned int cpumask_first_and(const struct cpumask *srcp1, const struct cpumask *srcp2)
{
- return 0;
+ return find_first_and_bit(cpumask_bits(srcp1), cpumask_bits(srcp2), small_cpumask_bits);
}
-#define for_each_cpu(cpu, mask) \
- for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask)
-#define for_each_cpu_not(cpu, mask) \
- for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask)
-#define for_each_cpu_and(cpu, mask, and) \
- for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask, (void)and)
-#else
/**
- * cpumask_first - get the first cpu in a cpumask
- * @srcp: the cpumask pointer
+ * cpumask_last - get the last CPU in a cpumask
+ * @srcp: - the cpumask pointer
*
- * Returns >= nr_cpu_ids if no cpus set.
+ * Returns >= nr_cpumask_bits if no CPUs set.
*/
-static inline unsigned int cpumask_first(const struct cpumask *srcp)
+static inline unsigned int cpumask_last(const struct cpumask *srcp)
{
- return find_first_bit(cpumask_bits(srcp), nr_cpumask_bits);
+ return find_last_bit(cpumask_bits(srcp), small_cpumask_bits);
}
/**
@@ -185,12 +204,13 @@ static inline unsigned int cpumask_first(const struct cpumask *srcp)
*
* Returns >= nr_cpu_ids if no further cpus set.
*/
-static inline unsigned int cpumask_next(int n, const struct cpumask *srcp)
+static inline
+unsigned int cpumask_next(int n, const struct cpumask *srcp)
{
/* -1 is a legal arg here. */
if (n != -1)
cpumask_check(n);
- return find_next_bit(cpumask_bits(srcp), nr_cpumask_bits, n+1);
+ return find_next_bit(cpumask_bits(srcp), small_cpumask_bits, n + 1);
}
/**
@@ -205,12 +225,51 @@ static inline unsigned int cpumask_next_zero(int n, const struct cpumask *srcp)
/* -1 is a legal arg here. */
if (n != -1)
cpumask_check(n);
- return find_next_zero_bit(cpumask_bits(srcp), nr_cpumask_bits, n+1);
+ return find_next_zero_bit(cpumask_bits(srcp), small_cpumask_bits, n+1);
}
-int cpumask_next_and(int n, const struct cpumask *, const struct cpumask *);
-int cpumask_any_but(const struct cpumask *mask, unsigned int cpu);
+#if NR_CPUS == 1
+/* Uniprocessor: there is only one valid CPU */
+static inline unsigned int cpumask_local_spread(unsigned int i, int node)
+{
+ return 0;
+}
+
+static inline unsigned int cpumask_any_and_distribute(const struct cpumask *src1p,
+ const struct cpumask *src2p)
+{
+ return cpumask_first_and(src1p, src2p);
+}
+
+static inline unsigned int cpumask_any_distribute(const struct cpumask *srcp)
+{
+ return cpumask_first(srcp);
+}
+#else
unsigned int cpumask_local_spread(unsigned int i, int node);
+unsigned int cpumask_any_and_distribute(const struct cpumask *src1p,
+ const struct cpumask *src2p);
+unsigned int cpumask_any_distribute(const struct cpumask *srcp);
+#endif /* NR_CPUS */
+
+/**
+ * cpumask_next_and - get the next cpu in *src1p & *src2p
+ * @n: the cpu prior to the place to search (ie. return will be > @n)
+ * @src1p: the first cpumask pointer
+ * @src2p: the second cpumask pointer
+ *
+ * Returns >= nr_cpu_ids if no further cpus set in both.
+ */
+static inline
+unsigned int cpumask_next_and(int n, const struct cpumask *src1p,
+ const struct cpumask *src2p)
+{
+ /* -1 is a legal arg here. */
+ if (n != -1)
+ cpumask_check(n);
+ return find_next_and_bit(cpumask_bits(src1p), cpumask_bits(src2p),
+ small_cpumask_bits, n + 1);
+}
/**
* for_each_cpu - iterate over every cpu in a mask
@@ -220,58 +279,177 @@ unsigned int cpumask_local_spread(unsigned int i, int node);
* After the loop, cpu is >= nr_cpu_ids.
*/
#define for_each_cpu(cpu, mask) \
- for ((cpu) = -1; \
- (cpu) = cpumask_next((cpu), (mask)), \
- (cpu) < nr_cpu_ids;)
+ for_each_set_bit(cpu, cpumask_bits(mask), small_cpumask_bits)
+
+#if NR_CPUS == 1
+static inline
+unsigned int cpumask_next_wrap(int n, const struct cpumask *mask, int start, bool wrap)
+{
+ cpumask_check(start);
+ if (n != -1)
+ cpumask_check(n);
+
+ /*
+ * Return the first available CPU when wrapping, or when starting before cpu0,
+ * since there is only one valid option.
+ */
+ if (wrap && n >= 0)
+ return nr_cpumask_bits;
+
+ return cpumask_first(mask);
+}
+#else
+unsigned int __pure cpumask_next_wrap(int n, const struct cpumask *mask, int start, bool wrap);
+#endif
/**
- * for_each_cpu_not - iterate over every cpu in a complemented mask
+ * for_each_cpu_wrap - iterate over every cpu in a mask, starting at a specified location
* @cpu: the (optionally unsigned) integer iterator
* @mask: the cpumask pointer
+ * @start: the start location
+ *
+ * The implementation does not assume any bit in @mask is set (including @start).
*
* After the loop, cpu is >= nr_cpu_ids.
*/
-#define for_each_cpu_not(cpu, mask) \
- for ((cpu) = -1; \
- (cpu) = cpumask_next_zero((cpu), (mask)), \
- (cpu) < nr_cpu_ids;)
+#define for_each_cpu_wrap(cpu, mask, start) \
+ for_each_set_bit_wrap(cpu, cpumask_bits(mask), small_cpumask_bits, start)
-extern int cpumask_next_wrap(int n, const struct cpumask *mask, int start, bool wrap);
+/**
+ * for_each_cpu_and - iterate over every cpu in both masks
+ * @cpu: the (optionally unsigned) integer iterator
+ * @mask1: the first cpumask pointer
+ * @mask2: the second cpumask pointer
+ *
+ * This saves a temporary CPU mask in many places. It is equivalent to:
+ * struct cpumask tmp;
+ * cpumask_and(&tmp, &mask1, &mask2);
+ * for_each_cpu(cpu, &tmp)
+ * ...
+ *
+ * After the loop, cpu is >= nr_cpu_ids.
+ */
+#define for_each_cpu_and(cpu, mask1, mask2) \
+ for_each_and_bit(cpu, cpumask_bits(mask1), cpumask_bits(mask2), small_cpumask_bits)
/**
- * for_each_cpu_wrap - iterate over every cpu in a mask, starting at a specified location
+ * for_each_cpu_andnot - iterate over every cpu present in one mask, excluding
+ * those present in another.
* @cpu: the (optionally unsigned) integer iterator
- * @mask: the cpumask poiter
- * @start: the start location
+ * @mask1: the first cpumask pointer
+ * @mask2: the second cpumask pointer
*
- * The implementation does not assume any bit in @mask is set (including @start).
+ * This saves a temporary CPU mask in many places. It is equivalent to:
+ * struct cpumask tmp;
+ * cpumask_andnot(&tmp, &mask1, &mask2);
+ * for_each_cpu(cpu, &tmp)
+ * ...
*
* After the loop, cpu is >= nr_cpu_ids.
*/
-#define for_each_cpu_wrap(cpu, mask, start) \
- for ((cpu) = cpumask_next_wrap((start)-1, (mask), (start), false); \
- (cpu) < nr_cpumask_bits; \
- (cpu) = cpumask_next_wrap((cpu), (mask), (start), true))
+#define for_each_cpu_andnot(cpu, mask1, mask2) \
+ for_each_andnot_bit(cpu, cpumask_bits(mask1), cpumask_bits(mask2), small_cpumask_bits)
/**
- * for_each_cpu_and - iterate over every cpu in both masks
+ * for_each_cpu_or - iterate over every cpu present in either mask
* @cpu: the (optionally unsigned) integer iterator
- * @mask: the first cpumask pointer
- * @and: the second cpumask pointer
+ * @mask1: the first cpumask pointer
+ * @mask2: the second cpumask pointer
*
* This saves a temporary CPU mask in many places. It is equivalent to:
* struct cpumask tmp;
- * cpumask_and(&tmp, &mask, &and);
+ * cpumask_or(&tmp, &mask1, &mask2);
* for_each_cpu(cpu, &tmp)
* ...
*
* After the loop, cpu is >= nr_cpu_ids.
*/
-#define for_each_cpu_and(cpu, mask, and) \
- for ((cpu) = -1; \
- (cpu) = cpumask_next_and((cpu), (mask), (and)), \
- (cpu) < nr_cpu_ids;)
-#endif /* SMP */
+#define for_each_cpu_or(cpu, mask1, mask2) \
+ for_each_or_bit(cpu, cpumask_bits(mask1), cpumask_bits(mask2), small_cpumask_bits)
+
+/**
+ * cpumask_any_but - return a "random" in a cpumask, but not this one.
+ * @mask: the cpumask to search
+ * @cpu: the cpu to ignore.
+ *
+ * Often used to find any cpu but smp_processor_id() in a mask.
+ * Returns >= nr_cpu_ids if no cpus set.
+ */
+static inline
+unsigned int cpumask_any_but(const struct cpumask *mask, unsigned int cpu)
+{
+ unsigned int i;
+
+ cpumask_check(cpu);
+ for_each_cpu(i, mask)
+ if (i != cpu)
+ break;
+ return i;
+}
+
+/**
+ * cpumask_nth - get the first cpu in a cpumask
+ * @srcp: the cpumask pointer
+ * @cpu: the N'th cpu to find, starting from 0
+ *
+ * Returns >= nr_cpu_ids if such cpu doesn't exist.
+ */
+static inline unsigned int cpumask_nth(unsigned int cpu, const struct cpumask *srcp)
+{
+ return find_nth_bit(cpumask_bits(srcp), small_cpumask_bits, cpumask_check(cpu));
+}
+
+/**
+ * cpumask_nth_and - get the first cpu in 2 cpumasks
+ * @srcp1: the cpumask pointer
+ * @srcp2: the cpumask pointer
+ * @cpu: the N'th cpu to find, starting from 0
+ *
+ * Returns >= nr_cpu_ids if such cpu doesn't exist.
+ */
+static inline
+unsigned int cpumask_nth_and(unsigned int cpu, const struct cpumask *srcp1,
+ const struct cpumask *srcp2)
+{
+ return find_nth_and_bit(cpumask_bits(srcp1), cpumask_bits(srcp2),
+ small_cpumask_bits, cpumask_check(cpu));
+}
+
+/**
+ * cpumask_nth_andnot - get the first cpu set in 1st cpumask, and clear in 2nd.
+ * @srcp1: the cpumask pointer
+ * @srcp2: the cpumask pointer
+ * @cpu: the N'th cpu to find, starting from 0
+ *
+ * Returns >= nr_cpu_ids if such cpu doesn't exist.
+ */
+static inline
+unsigned int cpumask_nth_andnot(unsigned int cpu, const struct cpumask *srcp1,
+ const struct cpumask *srcp2)
+{
+ return find_nth_andnot_bit(cpumask_bits(srcp1), cpumask_bits(srcp2),
+ small_cpumask_bits, cpumask_check(cpu));
+}
+
+/**
+ * cpumask_nth_and_andnot - get the Nth cpu set in 1st and 2nd cpumask, and clear in 3rd.
+ * @srcp1: the cpumask pointer
+ * @srcp2: the cpumask pointer
+ * @srcp3: the cpumask pointer
+ * @cpu: the N'th cpu to find, starting from 0
+ *
+ * Returns >= nr_cpu_ids if such cpu doesn't exist.
+ */
+static __always_inline
+unsigned int cpumask_nth_and_andnot(unsigned int cpu, const struct cpumask *srcp1,
+ const struct cpumask *srcp2,
+ const struct cpumask *srcp3)
+{
+ return find_nth_and_andnot_bit(cpumask_bits(srcp1),
+ cpumask_bits(srcp2),
+ cpumask_bits(srcp3),
+ small_cpumask_bits, cpumask_check(cpu));
+}
#define CPU_BITS_NONE \
{ \
@@ -288,12 +466,12 @@ extern int cpumask_next_wrap(int n, const struct cpumask *mask, int start, bool
* @cpu: cpu number (< nr_cpu_ids)
* @dstp: the cpumask pointer
*/
-static inline void cpumask_set_cpu(unsigned int cpu, struct cpumask *dstp)
+static __always_inline void cpumask_set_cpu(unsigned int cpu, struct cpumask *dstp)
{
set_bit(cpumask_check(cpu), cpumask_bits(dstp));
}
-static inline void __cpumask_set_cpu(unsigned int cpu, struct cpumask *dstp)
+static __always_inline void __cpumask_set_cpu(unsigned int cpu, struct cpumask *dstp)
{
__set_bit(cpumask_check(cpu), cpumask_bits(dstp));
}
@@ -304,12 +482,12 @@ static inline void __cpumask_set_cpu(unsigned int cpu, struct cpumask *dstp)
* @cpu: cpu number (< nr_cpu_ids)
* @dstp: the cpumask pointer
*/
-static inline void cpumask_clear_cpu(int cpu, struct cpumask *dstp)
+static __always_inline void cpumask_clear_cpu(int cpu, struct cpumask *dstp)
{
clear_bit(cpumask_check(cpu), cpumask_bits(dstp));
}
-static inline void __cpumask_clear_cpu(int cpu, struct cpumask *dstp)
+static __always_inline void __cpumask_clear_cpu(int cpu, struct cpumask *dstp)
{
__clear_bit(cpumask_check(cpu), cpumask_bits(dstp));
}
@@ -319,9 +497,9 @@ static inline void __cpumask_clear_cpu(int cpu, struct cpumask *dstp)
* @cpu: cpu number (< nr_cpu_ids)
* @cpumask: the cpumask pointer
*
- * Returns 1 if @cpu is set in @cpumask, else returns 0
+ * Returns true if @cpu is set in @cpumask, else returns false
*/
-static inline int cpumask_test_cpu(int cpu, const struct cpumask *cpumask)
+static __always_inline bool cpumask_test_cpu(int cpu, const struct cpumask *cpumask)
{
return test_bit(cpumask_check(cpu), cpumask_bits((cpumask)));
}
@@ -331,11 +509,11 @@ static inline int cpumask_test_cpu(int cpu, const struct cpumask *cpumask)
* @cpu: cpu number (< nr_cpu_ids)
* @cpumask: the cpumask pointer
*
- * Returns 1 if @cpu is set in old bitmap of @cpumask, else returns 0
+ * Returns true if @cpu is set in old bitmap of @cpumask, else returns false
*
* test_and_set_bit wrapper for cpumasks.
*/
-static inline int cpumask_test_and_set_cpu(int cpu, struct cpumask *cpumask)
+static __always_inline bool cpumask_test_and_set_cpu(int cpu, struct cpumask *cpumask)
{
return test_and_set_bit(cpumask_check(cpu), cpumask_bits(cpumask));
}
@@ -345,11 +523,11 @@ static inline int cpumask_test_and_set_cpu(int cpu, struct cpumask *cpumask)
* @cpu: cpu number (< nr_cpu_ids)
* @cpumask: the cpumask pointer
*
- * Returns 1 if @cpu is set in old bitmap of @cpumask, else returns 0
+ * Returns true if @cpu is set in old bitmap of @cpumask, else returns false
*
* test_and_clear_bit wrapper for cpumasks.
*/
-static inline int cpumask_test_and_clear_cpu(int cpu, struct cpumask *cpumask)
+static __always_inline bool cpumask_test_and_clear_cpu(int cpu, struct cpumask *cpumask)
{
return test_and_clear_bit(cpumask_check(cpu), cpumask_bits(cpumask));
}
@@ -360,6 +538,10 @@ static inline int cpumask_test_and_clear_cpu(int cpu, struct cpumask *cpumask)
*/
static inline void cpumask_setall(struct cpumask *dstp)
{
+ if (small_const_nbits(small_cpumask_bits)) {
+ cpumask_bits(dstp)[0] = BITMAP_LAST_WORD_MASK(nr_cpumask_bits);
+ return;
+ }
bitmap_fill(cpumask_bits(dstp), nr_cpumask_bits);
}
@@ -369,7 +551,7 @@ static inline void cpumask_setall(struct cpumask *dstp)
*/
static inline void cpumask_clear(struct cpumask *dstp)
{
- bitmap_zero(cpumask_bits(dstp), nr_cpumask_bits);
+ bitmap_zero(cpumask_bits(dstp), large_cpumask_bits);
}
/**
@@ -378,14 +560,14 @@ static inline void cpumask_clear(struct cpumask *dstp)
* @src1p: the first input
* @src2p: the second input
*
- * If *@dstp is empty, returns 0, else returns 1
+ * If *@dstp is empty, returns false, else returns true
*/
-static inline int cpumask_and(struct cpumask *dstp,
+static inline bool cpumask_and(struct cpumask *dstp,
const struct cpumask *src1p,
const struct cpumask *src2p)
{
return bitmap_and(cpumask_bits(dstp), cpumask_bits(src1p),
- cpumask_bits(src2p), nr_cpumask_bits);
+ cpumask_bits(src2p), small_cpumask_bits);
}
/**
@@ -398,7 +580,7 @@ static inline void cpumask_or(struct cpumask *dstp, const struct cpumask *src1p,
const struct cpumask *src2p)
{
bitmap_or(cpumask_bits(dstp), cpumask_bits(src1p),
- cpumask_bits(src2p), nr_cpumask_bits);
+ cpumask_bits(src2p), small_cpumask_bits);
}
/**
@@ -412,7 +594,7 @@ static inline void cpumask_xor(struct cpumask *dstp,
const struct cpumask *src2p)
{
bitmap_xor(cpumask_bits(dstp), cpumask_bits(src1p),
- cpumask_bits(src2p), nr_cpumask_bits);
+ cpumask_bits(src2p), small_cpumask_bits);
}
/**
@@ -421,38 +603,40 @@ static inline void cpumask_xor(struct cpumask *dstp,
* @src1p: the first input
* @src2p: the second input
*
- * If *@dstp is empty, returns 0, else returns 1
+ * If *@dstp is empty, returns false, else returns true
*/
-static inline int cpumask_andnot(struct cpumask *dstp,
+static inline bool cpumask_andnot(struct cpumask *dstp,
const struct cpumask *src1p,
const struct cpumask *src2p)
{
return bitmap_andnot(cpumask_bits(dstp), cpumask_bits(src1p),
- cpumask_bits(src2p), nr_cpumask_bits);
+ cpumask_bits(src2p), small_cpumask_bits);
}
/**
- * cpumask_complement - *dstp = ~*srcp
- * @dstp: the cpumask result
- * @srcp: the input to invert
+ * cpumask_equal - *src1p == *src2p
+ * @src1p: the first input
+ * @src2p: the second input
*/
-static inline void cpumask_complement(struct cpumask *dstp,
- const struct cpumask *srcp)
+static inline bool cpumask_equal(const struct cpumask *src1p,
+ const struct cpumask *src2p)
{
- bitmap_complement(cpumask_bits(dstp), cpumask_bits(srcp),
- nr_cpumask_bits);
+ return bitmap_equal(cpumask_bits(src1p), cpumask_bits(src2p),
+ small_cpumask_bits);
}
/**
- * cpumask_equal - *src1p == *src2p
+ * cpumask_or_equal - *src1p | *src2p == *src3p
* @src1p: the first input
* @src2p: the second input
+ * @src3p: the third input
*/
-static inline bool cpumask_equal(const struct cpumask *src1p,
- const struct cpumask *src2p)
+static inline bool cpumask_or_equal(const struct cpumask *src1p,
+ const struct cpumask *src2p,
+ const struct cpumask *src3p)
{
- return bitmap_equal(cpumask_bits(src1p), cpumask_bits(src2p),
- nr_cpumask_bits);
+ return bitmap_or_equal(cpumask_bits(src1p), cpumask_bits(src2p),
+ cpumask_bits(src3p), small_cpumask_bits);
}
/**
@@ -464,7 +648,7 @@ static inline bool cpumask_intersects(const struct cpumask *src1p,
const struct cpumask *src2p)
{
return bitmap_intersects(cpumask_bits(src1p), cpumask_bits(src2p),
- nr_cpumask_bits);
+ small_cpumask_bits);
}
/**
@@ -472,13 +656,13 @@ static inline bool cpumask_intersects(const struct cpumask *src1p,
* @src1p: the first input
* @src2p: the second input
*
- * Returns 1 if *@src1p is a subset of *@src2p, else returns 0
+ * Returns true if *@src1p is a subset of *@src2p, else returns false
*/
-static inline int cpumask_subset(const struct cpumask *src1p,
+static inline bool cpumask_subset(const struct cpumask *src1p,
const struct cpumask *src2p)
{
return bitmap_subset(cpumask_bits(src1p), cpumask_bits(src2p),
- nr_cpumask_bits);
+ small_cpumask_bits);
}
/**
@@ -487,7 +671,7 @@ static inline int cpumask_subset(const struct cpumask *src1p,
*/
static inline bool cpumask_empty(const struct cpumask *srcp)
{
- return bitmap_empty(cpumask_bits(srcp), nr_cpumask_bits);
+ return bitmap_empty(cpumask_bits(srcp), small_cpumask_bits);
}
/**
@@ -505,7 +689,18 @@ static inline bool cpumask_full(const struct cpumask *srcp)
*/
static inline unsigned int cpumask_weight(const struct cpumask *srcp)
{
- return bitmap_weight(cpumask_bits(srcp), nr_cpumask_bits);
+ return bitmap_weight(cpumask_bits(srcp), small_cpumask_bits);
+}
+
+/**
+ * cpumask_weight_and - Count of bits in (*srcp1 & *srcp2)
+ * @srcp1: the cpumask to count bits (< nr_cpu_ids) in.
+ * @srcp2: the cpumask to count bits (< nr_cpu_ids) in.
+ */
+static inline unsigned int cpumask_weight_and(const struct cpumask *srcp1,
+ const struct cpumask *srcp2)
+{
+ return bitmap_weight_and(cpumask_bits(srcp1), cpumask_bits(srcp2), small_cpumask_bits);
}
/**
@@ -518,7 +713,7 @@ static inline void cpumask_shift_right(struct cpumask *dstp,
const struct cpumask *srcp, int n)
{
bitmap_shift_right(cpumask_bits(dstp), cpumask_bits(srcp), n,
- nr_cpumask_bits);
+ small_cpumask_bits);
}
/**
@@ -542,7 +737,7 @@ static inline void cpumask_shift_left(struct cpumask *dstp,
static inline void cpumask_copy(struct cpumask *dstp,
const struct cpumask *srcp)
{
- bitmap_copy(cpumask_bits(dstp), cpumask_bits(srcp), nr_cpumask_bits);
+ bitmap_copy(cpumask_bits(dstp), cpumask_bits(srcp), large_cpumask_bits);
}
/**
@@ -554,15 +749,6 @@ static inline void cpumask_copy(struct cpumask *dstp,
#define cpumask_any(srcp) cpumask_first(srcp)
/**
- * cpumask_first_and - return the first cpu from *srcp1 & *srcp2
- * @src1p: the first input
- * @src2p: the second input
- *
- * Returns >= nr_cpu_ids if no cpus set in both. See also cpumask_next_and().
- */
-#define cpumask_first_and(src1p, src2p) cpumask_next_and(-1, (src1p), (src2p))
-
-/**
* cpumask_any_and - pick a "random" cpu from *mask1 & *mask2
* @mask1: the first input cpumask
* @mask2: the second input cpumask
@@ -615,10 +801,7 @@ static inline int cpumask_parselist_user(const char __user *buf, int len,
*/
static inline int cpumask_parse(const char *buf, struct cpumask *dstp)
{
- char *nl = strchr(buf, '\n');
- unsigned int len = nl ? (unsigned int)(nl - buf) : strlen(buf);
-
- return bitmap_parse(buf, len, cpumask_bits(dstp), nr_cpumask_bits);
+ return bitmap_parse(buf, UINT_MAX, cpumask_bits(dstp), nr_cpumask_bits);
}
/**
@@ -636,9 +819,9 @@ static inline int cpulist_parse(const char *buf, struct cpumask *dstp)
/**
* cpumask_size - size to allocate for a 'struct cpumask' in bytes
*/
-static inline size_t cpumask_size(void)
+static inline unsigned int cpumask_size(void)
{
- return BITS_TO_LONGS(nr_cpumask_bits) * sizeof(long);
+ return BITS_TO_LONGS(large_cpumask_bits) * sizeof(long);
}
/*
@@ -688,9 +871,35 @@ typedef struct cpumask *cpumask_var_t;
#define __cpumask_var_read_mostly __read_mostly
bool alloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node);
-bool alloc_cpumask_var(cpumask_var_t *mask, gfp_t flags);
-bool zalloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node);
-bool zalloc_cpumask_var(cpumask_var_t *mask, gfp_t flags);
+
+static inline
+bool zalloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node)
+{
+ return alloc_cpumask_var_node(mask, flags | __GFP_ZERO, node);
+}
+
+/**
+ * alloc_cpumask_var - allocate a struct cpumask
+ * @mask: pointer to cpumask_var_t where the cpumask is returned
+ * @flags: GFP_ flags
+ *
+ * Only defined when CONFIG_CPUMASK_OFFSTACK=y, otherwise is
+ * a nop returning a constant 1 (in <linux/cpumask.h>).
+ *
+ * See alloc_cpumask_var_node.
+ */
+static inline
+bool alloc_cpumask_var(cpumask_var_t *mask, gfp_t flags)
+{
+ return alloc_cpumask_var_node(mask, flags, NUMA_NO_NODE);
+}
+
+static inline
+bool zalloc_cpumask_var(cpumask_var_t *mask, gfp_t flags)
+{
+ return alloc_cpumask_var(mask, flags | __GFP_ZERO);
+}
+
void alloc_bootmem_cpumask_var(cpumask_var_t *mask);
void free_cpumask_var(cpumask_var_t mask);
void free_bootmem_cpumask_var(cpumask_var_t mask);
@@ -756,9 +965,16 @@ extern const DECLARE_BITMAP(cpu_all_bits, NR_CPUS);
/* First bits of cpu_bit_bitmap are in fact unset. */
#define cpu_none_mask to_cpumask(cpu_bit_bitmap[0])
+#if NR_CPUS == 1
+/* Uniprocessor: the possible/online/present masks are always "1" */
+#define for_each_possible_cpu(cpu) for ((cpu) = 0; (cpu) < 1; (cpu)++)
+#define for_each_online_cpu(cpu) for ((cpu) = 0; (cpu) < 1; (cpu)++)
+#define for_each_present_cpu(cpu) for ((cpu) = 0; (cpu) < 1; (cpu)++)
+#else
#define for_each_possible_cpu(cpu) for_each_cpu((cpu), cpu_possible_mask)
#define for_each_online_cpu(cpu) for_each_cpu((cpu), cpu_online_mask)
#define for_each_present_cpu(cpu) for_each_cpu((cpu), cpu_present_mask)
+#endif
/* Wrappers for arch boot code to manipulate normally-constant masks */
void init_cpu_present(const struct cpumask *src);
@@ -788,14 +1004,7 @@ set_cpu_present(unsigned int cpu, bool present)
cpumask_clear_cpu(cpu, &__cpu_present_mask);
}
-static inline void
-set_cpu_online(unsigned int cpu, bool online)
-{
- if (online)
- cpumask_set_cpu(cpu, &__cpu_online_mask);
- else
- cpumask_clear_cpu(cpu, &__cpu_online_mask);
-}
+void set_cpu_online(unsigned int cpu, bool online);
static inline void
set_cpu_active(unsigned int cpu, bool active)
@@ -806,6 +1015,14 @@ set_cpu_active(unsigned int cpu, bool active)
cpumask_clear_cpu(cpu, &__cpu_active_mask);
}
+static inline void
+set_cpu_dying(unsigned int cpu, bool dying)
+{
+ if (dying)
+ cpumask_set_cpu(cpu, &__cpu_dying_mask);
+ else
+ cpumask_clear_cpu(cpu, &__cpu_dying_mask);
+}
/**
* to_cpumask - convert an NR_CPUS bitmap to a struct cpumask *
@@ -843,6 +1060,82 @@ static inline const struct cpumask *get_cpu_mask(unsigned int cpu)
return to_cpumask(p);
}
+#if NR_CPUS > 1
+/**
+ * num_online_cpus() - Read the number of online CPUs
+ *
+ * Despite the fact that __num_online_cpus is of type atomic_t, this
+ * interface gives only a momentary snapshot and is not protected against
+ * concurrent CPU hotplug operations unless invoked from a cpuhp_lock held
+ * region.
+ */
+static __always_inline unsigned int num_online_cpus(void)
+{
+ return arch_atomic_read(&__num_online_cpus);
+}
+#define num_possible_cpus() cpumask_weight(cpu_possible_mask)
+#define num_present_cpus() cpumask_weight(cpu_present_mask)
+#define num_active_cpus() cpumask_weight(cpu_active_mask)
+
+static inline bool cpu_online(unsigned int cpu)
+{
+ return cpumask_test_cpu(cpu, cpu_online_mask);
+}
+
+static inline bool cpu_possible(unsigned int cpu)
+{
+ return cpumask_test_cpu(cpu, cpu_possible_mask);
+}
+
+static inline bool cpu_present(unsigned int cpu)
+{
+ return cpumask_test_cpu(cpu, cpu_present_mask);
+}
+
+static inline bool cpu_active(unsigned int cpu)
+{
+ return cpumask_test_cpu(cpu, cpu_active_mask);
+}
+
+static inline bool cpu_dying(unsigned int cpu)
+{
+ return cpumask_test_cpu(cpu, cpu_dying_mask);
+}
+
+#else
+
+#define num_online_cpus() 1U
+#define num_possible_cpus() 1U
+#define num_present_cpus() 1U
+#define num_active_cpus() 1U
+
+static inline bool cpu_online(unsigned int cpu)
+{
+ return cpu == 0;
+}
+
+static inline bool cpu_possible(unsigned int cpu)
+{
+ return cpu == 0;
+}
+
+static inline bool cpu_present(unsigned int cpu)
+{
+ return cpu == 0;
+}
+
+static inline bool cpu_active(unsigned int cpu)
+{
+ return cpu == 0;
+}
+
+static inline bool cpu_dying(unsigned int cpu)
+{
+ return false;
+}
+
+#endif /* NR_CPUS > 1 */
+
#define cpu_is_offline(cpu) unlikely(!cpu_online(cpu))
#if NR_CPUS <= BITS_PER_LONG
@@ -877,6 +1170,45 @@ cpumap_print_to_pagebuf(bool list, char *buf, const struct cpumask *mask)
nr_cpu_ids);
}
+/**
+ * cpumap_print_bitmask_to_buf - copies the cpumask into the buffer as
+ * hex values of cpumask
+ *
+ * @buf: the buffer to copy into
+ * @mask: the cpumask to copy
+ * @off: in the string from which we are copying, we copy to @buf
+ * @count: the maximum number of bytes to print
+ *
+ * The function prints the cpumask into the buffer as hex values of
+ * cpumask; Typically used by bin_attribute to export cpumask bitmask
+ * ABI.
+ *
+ * Returns the length of how many bytes have been copied, excluding
+ * terminating '\0'.
+ */
+static inline ssize_t
+cpumap_print_bitmask_to_buf(char *buf, const struct cpumask *mask,
+ loff_t off, size_t count)
+{
+ return bitmap_print_bitmask_to_buf(buf, cpumask_bits(mask),
+ nr_cpu_ids, off, count) - 1;
+}
+
+/**
+ * cpumap_print_list_to_buf - copies the cpumask into the buffer as
+ * comma-separated list of cpus
+ *
+ * Everything is same with the above cpumap_print_bitmask_to_buf()
+ * except the print format.
+ */
+static inline ssize_t
+cpumap_print_list_to_buf(char *buf, const struct cpumask *mask,
+ loff_t off, size_t count)
+{
+ return bitmap_print_list_to_buf(buf, cpumask_bits(mask),
+ nr_cpu_ids, off, count) - 1;
+}
+
#if NR_CPUS <= BITS_PER_LONG
#define CPU_MASK_ALL \
(cpumask_t) { { \
@@ -900,4 +1232,23 @@ cpumap_print_to_pagebuf(bool list, char *buf, const struct cpumask *mask)
[0] = 1UL \
} }
+/*
+ * Provide a valid theoretical max size for cpumap and cpulist sysfs files
+ * to avoid breaking userspace which may allocate a buffer based on the size
+ * reported by e.g. fstat.
+ *
+ * for cpumap NR_CPUS * 9/32 - 1 should be an exact length.
+ *
+ * For cpulist 7 is (ceil(log10(NR_CPUS)) + 1) allowing for NR_CPUS to be up
+ * to 2 orders of magnitude larger than 8192. And then we divide by 2 to
+ * cover a worst-case of every other cpu being on one of two nodes for a
+ * very large NR_CPUS.
+ *
+ * Use PAGE_SIZE as a minimum for smaller configurations while avoiding
+ * unsigned comparison to -1.
+ */
+#define CPUMAP_FILE_MAX_BYTES (((NR_CPUS * 9)/32 > PAGE_SIZE) \
+ ? (NR_CPUS * 9)/32 - 1 : PAGE_SIZE)
+#define CPULIST_FILE_MAX_BYTES (((NR_CPUS * 7)/2 > PAGE_SIZE) ? (NR_CPUS * 7)/2 : PAGE_SIZE)
+
#endif /* __LINUX_CPUMASK_H */
diff --git a/include/linux/cpumask_api.h b/include/linux/cpumask_api.h
new file mode 100644
index 000000000000..83bd3ebe82b0
--- /dev/null
+++ b/include/linux/cpumask_api.h
@@ -0,0 +1 @@
+#include <linux/cpumask.h>
diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h
index 119a3f9604b0..980b76a1237e 100644
--- a/include/linux/cpuset.h
+++ b/include/linux/cpuset.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_CPUSET_H
#define _LINUX_CPUSET_H
/*
@@ -14,54 +15,76 @@
#include <linux/cpumask.h>
#include <linux/nodemask.h>
#include <linux/mm.h>
+#include <linux/mmu_context.h>
#include <linux/jump_label.h>
#ifdef CONFIG_CPUSETS
+/*
+ * Static branch rewrites can happen in an arbitrary order for a given
+ * key. In code paths where we need to loop with read_mems_allowed_begin() and
+ * read_mems_allowed_retry() to get a consistent view of mems_allowed, we need
+ * to ensure that begin() always gets rewritten before retry() in the
+ * disabled -> enabled transition. If not, then if local irqs are disabled
+ * around the loop, we can deadlock since retry() would always be
+ * comparing the latest value of the mems_allowed seqcount against 0 as
+ * begin() still would see cpusets_enabled() as false. The enabled -> disabled
+ * transition should happen in reverse order for the same reasons (want to stop
+ * looking at real value of mems_allowed.sequence in retry() first).
+ */
+extern struct static_key_false cpusets_pre_enable_key;
extern struct static_key_false cpusets_enabled_key;
+extern struct static_key_false cpusets_insane_config_key;
+
static inline bool cpusets_enabled(void)
{
return static_branch_unlikely(&cpusets_enabled_key);
}
-static inline int nr_cpusets(void)
+static inline void cpuset_inc(void)
{
- /* jump label reference count + the top-level cpuset */
- return static_key_count(&cpusets_enabled_key.key) + 1;
+ static_branch_inc_cpuslocked(&cpusets_pre_enable_key);
+ static_branch_inc_cpuslocked(&cpusets_enabled_key);
}
-static inline void cpuset_inc(void)
+static inline void cpuset_dec(void)
{
- static_branch_inc(&cpusets_enabled_key);
+ static_branch_dec_cpuslocked(&cpusets_enabled_key);
+ static_branch_dec_cpuslocked(&cpusets_pre_enable_key);
}
-static inline void cpuset_dec(void)
+/*
+ * This will get enabled whenever a cpuset configuration is considered
+ * unsupportable in general. E.g. movable only node which cannot satisfy
+ * any non movable allocations (see update_nodemask). Page allocator
+ * needs to make additional checks for those configurations and this
+ * check is meant to guard those checks without any overhead for sane
+ * configurations.
+ */
+static inline bool cpusets_insane_config(void)
{
- static_branch_dec(&cpusets_enabled_key);
+ return static_branch_unlikely(&cpusets_insane_config_key);
}
extern int cpuset_init(void);
extern void cpuset_init_smp(void);
+extern void cpuset_force_rebuild(void);
extern void cpuset_update_active_cpus(void);
+extern void cpuset_wait_for_hotplug(void);
+extern void cpuset_read_lock(void);
+extern void cpuset_read_unlock(void);
extern void cpuset_cpus_allowed(struct task_struct *p, struct cpumask *mask);
-extern void cpuset_cpus_allowed_fallback(struct task_struct *p);
+extern bool cpuset_cpus_allowed_fallback(struct task_struct *p);
extern nodemask_t cpuset_mems_allowed(struct task_struct *p);
#define cpuset_current_mems_allowed (current->mems_allowed)
void cpuset_init_current_mems_allowed(void);
int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask);
-extern bool __cpuset_node_allowed(int node, gfp_t gfp_mask);
-
-static inline bool cpuset_node_allowed(int node, gfp_t gfp_mask)
-{
- if (cpusets_enabled())
- return __cpuset_node_allowed(node, gfp_mask);
- return true;
-}
+extern bool cpuset_node_allowed(int node, gfp_t gfp_mask);
static inline bool __cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask)
{
- return __cpuset_node_allowed(zone_to_nid(z), gfp_mask);
+ return cpuset_node_allowed(zone_to_nid(z), gfp_mask);
}
static inline bool cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask)
@@ -100,7 +123,7 @@ static inline int cpuset_do_slab_mem_spread(void)
return task_spread_slab(current);
}
-extern int current_cpuset_is_being_rebound(void);
+extern bool current_cpuset_is_being_rebound(void);
extern void rebuild_sched_domains(void);
@@ -115,7 +138,7 @@ extern void cpuset_print_current_mems_allowed(void);
*/
static inline unsigned int read_mems_allowed_begin(void)
{
- if (!cpusets_enabled())
+ if (!static_branch_unlikely(&cpusets_pre_enable_key))
return 0;
return read_seqcount_begin(&current->mems_allowed_seq);
@@ -129,7 +152,7 @@ static inline unsigned int read_mems_allowed_begin(void)
*/
static inline bool read_mems_allowed_retry(unsigned int seq)
{
- if (!cpusets_enabled())
+ if (!static_branch_unlikely(&cpusets_enabled_key))
return false;
return read_seqcount_retry(&current->mems_allowed_seq, seq);
@@ -152,22 +175,32 @@ static inline void set_mems_allowed(nodemask_t nodemask)
static inline bool cpusets_enabled(void) { return false; }
+static inline bool cpusets_insane_config(void) { return false; }
+
static inline int cpuset_init(void) { return 0; }
static inline void cpuset_init_smp(void) {}
+static inline void cpuset_force_rebuild(void) { }
+
static inline void cpuset_update_active_cpus(void)
{
partition_sched_domains(1, NULL, NULL);
}
+static inline void cpuset_wait_for_hotplug(void) { }
+
+static inline void cpuset_read_lock(void) { }
+static inline void cpuset_read_unlock(void) { }
+
static inline void cpuset_cpus_allowed(struct task_struct *p,
struct cpumask *mask)
{
- cpumask_copy(mask, cpu_possible_mask);
+ cpumask_copy(mask, task_cpu_possible_mask(p));
}
-static inline void cpuset_cpus_allowed_fallback(struct task_struct *p)
+static inline bool cpuset_cpus_allowed_fallback(struct task_struct *p)
{
+ return false;
}
static inline nodemask_t cpuset_mems_allowed(struct task_struct *p)
@@ -183,11 +216,6 @@ static inline int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask)
return 1;
}
-static inline bool cpuset_node_allowed(int node, gfp_t gfp_mask)
-{
- return true;
-}
-
static inline bool __cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask)
{
return true;
@@ -231,9 +259,9 @@ static inline int cpuset_do_slab_mem_spread(void)
return 0;
}
-static inline int current_cpuset_is_being_rebound(void)
+static inline bool current_cpuset_is_being_rebound(void)
{
- return 0;
+ return false;
}
static inline void rebuild_sched_domains(void)
diff --git a/include/linux/crash_core.h b/include/linux/crash_core.h
index 2df2118fbe13..de62a722431e 100644
--- a/include/linux/crash_core.h
+++ b/include/linux/crash_core.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef LINUX_CRASH_CORE_H
#define LINUX_CRASH_CORE_H
@@ -37,10 +38,18 @@ phys_addr_t paddr_vmcoreinfo_note(void);
#define VMCOREINFO_OSRELEASE(value) \
vmcoreinfo_append_str("OSRELEASE=%s\n", value)
+#define VMCOREINFO_BUILD_ID() \
+ ({ \
+ static_assert(sizeof(vmlinux_build_id) == 20); \
+ vmcoreinfo_append_str("BUILD-ID=%20phN\n", vmlinux_build_id); \
+ })
+
#define VMCOREINFO_PAGESIZE(value) \
vmcoreinfo_append_str("PAGESIZE=%ld\n", value)
#define VMCOREINFO_SYMBOL(name) \
vmcoreinfo_append_str("SYMBOL(%s)=%lx\n", #name, (unsigned long)&name)
+#define VMCOREINFO_SYMBOL_ARRAY(name) \
+ vmcoreinfo_append_str("SYMBOL(%s)=%lx\n", #name, (unsigned long)name)
#define VMCOREINFO_SIZE(name) \
vmcoreinfo_append_str("SIZE(%s)=%lu\n", #name, \
(unsigned long)sizeof(name))
@@ -50,6 +59,9 @@ phys_addr_t paddr_vmcoreinfo_note(void);
#define VMCOREINFO_OFFSET(name, field) \
vmcoreinfo_append_str("OFFSET(%s.%s)=%lu\n", #name, #field, \
(unsigned long)offsetof(struct name, field))
+#define VMCOREINFO_TYPE_OFFSET(name, field) \
+ vmcoreinfo_append_str("OFFSET(%s.%s)=%lu\n", #name, #field, \
+ (unsigned long)offsetof(name, field))
#define VMCOREINFO_LENGTH(name, value) \
vmcoreinfo_append_str("LENGTH(%s)=%lu\n", #name, (unsigned long)value)
#define VMCOREINFO_NUMBER(name) \
@@ -57,6 +69,8 @@ phys_addr_t paddr_vmcoreinfo_note(void);
#define VMCOREINFO_CONFIG(name) \
vmcoreinfo_append_str("CONFIG_%s=y\n", #name)
+extern unsigned char *vmcoreinfo_data;
+extern size_t vmcoreinfo_size;
extern u32 *vmcoreinfo_note;
Elf_Word *append_elf_note(Elf_Word *buf, char *name, unsigned int type,
diff --git a/include/linux/crash_dump.h b/include/linux/crash_dump.h
index 3873697ba21c..0f3a656293b0 100644
--- a/include/linux/crash_dump.h
+++ b/include/linux/crash_dump.h
@@ -1,19 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef LINUX_CRASH_DUMP_H
#define LINUX_CRASH_DUMP_H
-#ifdef CONFIG_CRASH_DUMP
#include <linux/kexec.h>
#include <linux/proc_fs.h>
#include <linux/elf.h>
+#include <linux/pgtable.h>
+#include <uapi/linux/vmcore.h>
-#include <asm/pgtable.h> /* for pgprot_t */
-
+/* For IS_ENABLED(CONFIG_CRASH_DUMP) */
#define ELFCORE_ADDR_MAX (-1ULL)
#define ELFCORE_ADDR_ERR (-2ULL)
extern unsigned long long elfcorehdr_addr;
extern unsigned long long elfcorehdr_size;
+#ifdef CONFIG_CRASH_DUMP
extern int elfcorehdr_alloc(unsigned long long *addr, unsigned long long *size);
extern void elfcorehdr_free(unsigned long long addr);
extern ssize_t elfcorehdr_read(char *buf, size_t count, u64 *ppos);
@@ -22,8 +24,11 @@ extern int remap_oldmem_pfn_range(struct vm_area_struct *vma,
unsigned long from, unsigned long pfn,
unsigned long size, pgprot_t prot);
-extern ssize_t copy_oldmem_page(unsigned long, char *, size_t,
- unsigned long, int);
+ssize_t copy_oldmem_page(struct iov_iter *i, unsigned long pfn, size_t csize,
+ unsigned long offset);
+ssize_t copy_oldmem_page_encrypted(struct iov_iter *iter, unsigned long pfn,
+ size_t csize, unsigned long offset);
+
void vmcore_cleanup(void);
/* Architecture code defines this if there are other possible ELF
@@ -51,13 +56,13 @@ void vmcore_cleanup(void);
* has passed the elf core header address on command line.
*
* This is not just a test if CONFIG_CRASH_DUMP is enabled or not. It will
- * return 1 if CONFIG_CRASH_DUMP=y and if kernel is booting after a panic of
- * previous kernel.
+ * return true if CONFIG_CRASH_DUMP=y and if kernel is booting after a panic
+ * of previous kernel.
*/
-static inline int is_kdump_kernel(void)
+static inline bool is_kdump_kernel(void)
{
- return (elfcorehdr_addr != ELFCORE_ADDR_MAX) ? 1 : 0;
+ return elfcorehdr_addr != ELFCORE_ADDR_MAX;
}
/* is_vmcore_usable() checks if the kernel is booting after a panic and
@@ -83,13 +88,60 @@ static inline void vmcore_unusable(void)
elfcorehdr_addr = ELFCORE_ADDR_ERR;
}
-#define HAVE_OLDMEM_PFN_IS_RAM 1
-extern int register_oldmem_pfn_is_ram(int (*fn)(unsigned long pfn));
-extern void unregister_oldmem_pfn_is_ram(void);
+/**
+ * struct vmcore_cb - driver callbacks for /proc/vmcore handling
+ * @pfn_is_ram: check whether a PFN really is RAM and should be accessed when
+ * reading the vmcore. Will return "true" if it is RAM or if the
+ * callback cannot tell. If any callback returns "false", it's not
+ * RAM and the page must not be accessed; zeroes should be
+ * indicated in the vmcore instead. For example, a ballooned page
+ * contains no data and reading from such a page will cause high
+ * load in the hypervisor.
+ * @next: List head to manage registered callbacks internally; initialized by
+ * register_vmcore_cb().
+ *
+ * vmcore callbacks allow drivers managing physical memory ranges to
+ * coordinate with vmcore handling code, for example, to prevent accessing
+ * physical memory ranges that should not be accessed when reading the vmcore,
+ * although included in the vmcore header as memory ranges to dump.
+ */
+struct vmcore_cb {
+ bool (*pfn_is_ram)(struct vmcore_cb *cb, unsigned long pfn);
+ struct list_head next;
+};
+extern void register_vmcore_cb(struct vmcore_cb *cb);
+extern void unregister_vmcore_cb(struct vmcore_cb *cb);
#else /* !CONFIG_CRASH_DUMP */
-static inline int is_kdump_kernel(void) { return 0; }
+static inline bool is_kdump_kernel(void) { return false; }
#endif /* CONFIG_CRASH_DUMP */
-extern unsigned long saved_max_pfn;
+/* Device Dump information to be filled by drivers */
+struct vmcoredd_data {
+ char dump_name[VMCOREDD_MAX_NAME_BYTES]; /* Unique name of the dump */
+ unsigned int size; /* Size of the dump */
+ /* Driver's registered callback to be invoked to collect dump */
+ int (*vmcoredd_callback)(struct vmcoredd_data *data, void *buf);
+};
+
+#ifdef CONFIG_PROC_VMCORE_DEVICE_DUMP
+int vmcore_add_device_dump(struct vmcoredd_data *data);
+#else
+static inline int vmcore_add_device_dump(struct vmcoredd_data *data)
+{
+ return -EOPNOTSUPP;
+}
+#endif /* CONFIG_PROC_VMCORE_DEVICE_DUMP */
+
+#ifdef CONFIG_PROC_VMCORE
+ssize_t read_from_oldmem(struct iov_iter *iter, size_t count,
+ u64 *ppos, bool encrypted);
+#else
+static inline ssize_t read_from_oldmem(struct iov_iter *iter, size_t count,
+ u64 *ppos, bool encrypted)
+{
+ return -EOPNOTSUPP;
+}
+#endif /* CONFIG_PROC_VMCORE */
+
#endif /* LINUX_CRASHDUMP_H */
diff --git a/include/linux/crc-ccitt.h b/include/linux/crc-ccitt.h
index f52696a1ff0d..72c92c396bb8 100644
--- a/include/linux/crc-ccitt.h
+++ b/include/linux/crc-ccitt.h
@@ -1,15 +1,23 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_CRC_CCITT_H
#define _LINUX_CRC_CCITT_H
#include <linux/types.h>
extern u16 const crc_ccitt_table[256];
+extern u16 const crc_ccitt_false_table[256];
extern u16 crc_ccitt(u16 crc, const u8 *buffer, size_t len);
+extern u16 crc_ccitt_false(u16 crc, const u8 *buffer, size_t len);
static inline u16 crc_ccitt_byte(u16 crc, const u8 c)
{
return (crc >> 8) ^ crc_ccitt_table[(crc ^ c) & 0xff];
}
+static inline u16 crc_ccitt_false_byte(u16 crc, const u8 c)
+{
+ return (crc << 8) ^ crc_ccitt_false_table[(crc >> 8) ^ c];
+}
+
#endif /* _LINUX_CRC_CCITT_H */
diff --git a/include/linux/crc-itu-t.h b/include/linux/crc-itu-t.h
index a9953c762eee..2f991a427ade 100644
--- a/include/linux/crc-itu-t.h
+++ b/include/linux/crc-itu-t.h
@@ -1,13 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* crc-itu-t.h - CRC ITU-T V.41 routine
*
* Implements the standard CRC ITU-T V.41:
* Width 16
- * Poly 0x1021 (x^16 + x^12 + x^15 + 1)
+ * Poly 0x1021 (x^16 + x^12 + x^5 + 1)
* Init 0
- *
- * This source code is licensed under the GNU General Public License,
- * Version 2. See the file COPYING for more details.
*/
#ifndef CRC_ITU_T_H
diff --git a/include/linux/crc-t10dif.h b/include/linux/crc-t10dif.h
index d81961e9e37d..6bb0c0bf357b 100644
--- a/include/linux/crc-t10dif.h
+++ b/include/linux/crc-t10dif.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_CRC_T10DIF_H
#define _LINUX_CRC_T10DIF_H
@@ -5,6 +6,7 @@
#define CRC_T10DIF_DIGEST_SIZE 2
#define CRC_T10DIF_BLOCK_SIZE 1
+#define CRC_T10DIF_STRING "crct10dif"
extern __u16 crc_t10dif_generic(__u16 crc, const unsigned char *buffer,
size_t len);
diff --git a/include/linux/crc16.h b/include/linux/crc16.h
index 9443c084f881..9fa74529b317 100644
--- a/include/linux/crc16.h
+++ b/include/linux/crc16.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* crc16.h - CRC-16 routine
*
@@ -7,9 +8,6 @@
* Init 0
*
* Copyright (c) 2005 Ben Gardner <bgardner@wabtec.com>
- *
- * This source code is licensed under the GNU General Public License,
- * Version 2. See the file COPYING for more details.
*/
#ifndef __CRC16_H
diff --git a/include/linux/crc32c.h b/include/linux/crc32c.h
index bd8b44d96bdc..357ae4611a45 100644
--- a/include/linux/crc32c.h
+++ b/include/linux/crc32c.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_CRC32C_H
#define _LINUX_CRC32C_H
diff --git a/include/linux/crc32poly.h b/include/linux/crc32poly.h
new file mode 100644
index 000000000000..62c4b7790a28
--- /dev/null
+++ b/include/linux/crc32poly.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_CRC32_POLY_H
+#define _LINUX_CRC32_POLY_H
+
+/*
+ * There are multiple 16-bit CRC polynomials in common use, but this is
+ * *the* standard CRC-32 polynomial, first popularized by Ethernet.
+ * x^32+x^26+x^23+x^22+x^16+x^12+x^11+x^10+x^8+x^7+x^5+x^4+x^2+x^1+x^0
+ */
+#define CRC32_POLY_LE 0xedb88320
+#define CRC32_POLY_BE 0x04c11db7
+
+/*
+ * This is the CRC32c polynomial, as outlined by Castagnoli.
+ * x^32+x^28+x^27+x^26+x^25+x^23+x^22+x^20+x^19+x^18+x^14+x^13+x^11+x^10+x^9+
+ * x^8+x^6+x^0
+ */
+#define CRC32C_POLY_LE 0x82F63B78
+
+#endif /* _LINUX_CRC32_POLY_H */
diff --git a/include/linux/crc4.h b/include/linux/crc4.h
index 8f739f1d794f..bd2c90556a06 100644
--- a/include/linux/crc4.h
+++ b/include/linux/crc4.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_CRC4_H
#define _LINUX_CRC4_H
diff --git a/include/linux/crc64.h b/include/linux/crc64.h
new file mode 100644
index 000000000000..e044c60d1e61
--- /dev/null
+++ b/include/linux/crc64.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * See lib/crc64.c for the related specification and polynomial arithmetic.
+ */
+#ifndef _LINUX_CRC64_H
+#define _LINUX_CRC64_H
+
+#include <linux/types.h>
+
+#define CRC64_ROCKSOFT_STRING "crc64-rocksoft"
+
+u64 __pure crc64_be(u64 crc, const void *p, size_t len);
+u64 __pure crc64_rocksoft_generic(u64 crc, const void *p, size_t len);
+
+u64 crc64_rocksoft(const unsigned char *buffer, size_t len);
+u64 crc64_rocksoft_update(u64 crc, const unsigned char *buffer, size_t len);
+
+#endif /* _LINUX_CRC64_H */
diff --git a/include/linux/crc7.h b/include/linux/crc7.h
index d590765106f3..b462842f3c32 100644
--- a/include/linux/crc7.h
+++ b/include/linux/crc7.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_CRC7_H
#define _LINUX_CRC7_H
#include <linux/types.h>
diff --git a/include/linux/crc8.h b/include/linux/crc8.h
index 13c8dabb0441..674045c59a04 100644
--- a/include/linux/crc8.h
+++ b/include/linux/crc8.h
@@ -96,6 +96,6 @@ void crc8_populate_msb(u8 table[CRC8_TABLE_SIZE], u8 polynomial);
* Williams, Ross N., ross<at>ross.net
* (see URL http://www.ross.net/crc/download/crc_v3.txt).
*/
-u8 crc8(const u8 table[CRC8_TABLE_SIZE], u8 *pdata, size_t nbytes, u8 crc);
+u8 crc8(const u8 table[CRC8_TABLE_SIZE], const u8 *pdata, size_t nbytes, u8 crc);
#endif /* __CRC8_H_ */
diff --git a/include/linux/cred.h b/include/linux/cred.h
index c728d515e5e2..9ed9232af934 100644
--- a/include/linux/cred.h
+++ b/include/linux/cred.h
@@ -1,12 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/* Credentials management - see Documentation/security/credentials.rst
*
* Copyright (C) 2008 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public Licence
- * as published by the Free Software Foundation; either version
- * 2 of the Licence, or (at your option) any later version.
*/
#ifndef _LINUX_CRED_H
@@ -15,7 +11,6 @@
#include <linux/capability.h>
#include <linux/init.h>
#include <linux/key.h>
-#include <linux/selinux.h>
#include <linux/atomic.h>
#include <linux/uidgid.h>
#include <linux/sched.h>
@@ -30,8 +25,8 @@ struct inode;
struct group_info {
atomic_t usage;
int ngroups;
- kgid_t gid[0];
-};
+ kgid_t gid[];
+} __randomize_layout;
/**
* get_group_info - Get a reference to a group info structure
@@ -58,13 +53,18 @@ do { \
groups_free(group_info); \
} while (0)
-extern struct group_info init_groups;
#ifdef CONFIG_MULTIUSER
extern struct group_info *groups_alloc(int);
extern void groups_free(struct group_info *);
extern int in_group_p(kgid_t);
extern int in_egroup_p(kgid_t);
+extern int groups_search(const struct group_info *, kgid_t);
+
+extern int set_current_groups(struct group_info *);
+extern void set_groups(struct cred *, struct group_info *);
+extern bool may_setgroups(void);
+extern void groups_sort(struct group_info *);
#else
static inline void groups_free(struct group_info *group_info)
{
@@ -78,11 +78,11 @@ static inline int in_egroup_p(kgid_t grp)
{
return 1;
}
+static inline int groups_search(const struct group_info *group_info, kgid_t grp)
+{
+ return 1;
+}
#endif
-extern int set_current_groups(struct group_info *);
-extern void set_groups(struct cred *, struct group_info *);
-extern int groups_search(const struct group_info *, kgid_t);
-extern bool may_setgroups(void);
/*
* The security context of a task
@@ -133,19 +133,24 @@ struct cred {
#ifdef CONFIG_KEYS
unsigned char jit_keyring; /* default keyring to attach requested
* keys to */
- struct key __rcu *session_keyring; /* keyring inherited over fork */
+ struct key *session_keyring; /* keyring inherited over fork */
struct key *process_keyring; /* keyring private to this process */
struct key *thread_keyring; /* keyring private to this thread */
struct key *request_key_auth; /* assumed request_key authority */
#endif
#ifdef CONFIG_SECURITY
- void *security; /* subjective LSM security */
+ void *security; /* LSM security */
#endif
struct user_struct *user; /* real user ID subscription */
struct user_namespace *user_ns; /* user_ns the caps and keyrings are relative to. */
+ struct ucounts *ucounts;
struct group_info *group_info; /* supplementary groups for euid/fsgid */
- struct rcu_head rcu; /* RCU deletion hook */
-};
+ /* RCU deletion */
+ union {
+ int non_rcu; /* Can we skip RCU deletion? */
+ struct rcu_head rcu; /* RCU deletion hook */
+ };
+} __randomize_layout;
extern void __put_cred(struct cred *);
extern void exit_creds(struct task_struct *);
@@ -163,13 +168,15 @@ extern int change_create_files_as(struct cred *, struct inode *);
extern int set_security_override(struct cred *, u32);
extern int set_security_override_from_ctx(struct cred *, const char *);
extern int set_create_files_as(struct cred *, struct inode *);
+extern int cred_fscmp(const struct cred *, const struct cred *);
extern void __init cred_init(void);
+extern int set_cred_ucounts(struct cred *);
/*
* check for validity of credentials
*/
#ifdef CONFIG_DEBUG_CREDENTIALS
-extern void __invalid_creds(const struct cred *, const char *, unsigned);
+extern void __noreturn __invalid_creds(const struct cred *, const char *, unsigned);
extern void __validate_process_creds(struct task_struct *,
const char *, unsigned);
@@ -230,7 +237,7 @@ static inline struct cred *get_new_cred(struct cred *cred)
* @cred: The credentials to reference
*
* Get a reference on the specified set of credentials. The caller must
- * release the reference.
+ * release the reference. If %NULL is passed, it is returned with no action.
*
* This is used to deal with a committed set of credentials. Although the
* pointer is const, this will temporarily discard the const and increment the
@@ -241,16 +248,31 @@ static inline struct cred *get_new_cred(struct cred *cred)
static inline const struct cred *get_cred(const struct cred *cred)
{
struct cred *nonconst_cred = (struct cred *) cred;
+ if (!cred)
+ return cred;
validate_creds(cred);
+ nonconst_cred->non_rcu = 0;
return get_new_cred(nonconst_cred);
}
+static inline const struct cred *get_cred_rcu(const struct cred *cred)
+{
+ struct cred *nonconst_cred = (struct cred *) cred;
+ if (!cred)
+ return NULL;
+ if (!atomic_inc_not_zero(&nonconst_cred->usage))
+ return NULL;
+ validate_creds(cred);
+ nonconst_cred->non_rcu = 0;
+ return cred;
+}
+
/**
* put_cred - Release a reference to a set of credentials
* @cred: The credentials to release
*
* Release a reference to a set of credentials, deleting them when the last ref
- * is released.
+ * is released. If %NULL is passed, nothing is done.
*
* This takes a const pointer to a set of credentials because the credentials
* on task_struct are attached by const pointers to prevent accidental
@@ -260,9 +282,11 @@ static inline void put_cred(const struct cred *_cred)
{
struct cred *cred = (struct cred *) _cred;
- validate_creds(cred);
- if (atomic_dec_and_test(&(cred)->usage))
- __put_cred(cred);
+ if (cred) {
+ validate_creds(cred);
+ if (atomic_dec_and_test(&(cred)->usage))
+ __put_cred(cred);
+ }
}
/**
@@ -347,6 +371,7 @@ static inline void put_cred(const struct cred *_cred)
#define task_uid(task) (task_cred_xxx((task), uid))
#define task_euid(task) (task_cred_xxx((task), euid))
+#define task_ucounts(task) (task_cred_xxx((task), ucounts))
#define current_cred_xxx(xxx) \
({ \
@@ -363,7 +388,7 @@ static inline void put_cred(const struct cred *_cred)
#define current_fsgid() (current_cred_xxx(fsgid))
#define current_cap() (current_cred_xxx(cap_effective))
#define current_user() (current_cred_xxx(user))
-#define current_security() (current_cred_xxx(security))
+#define current_ucounts() (current_cred_xxx(ucounts))
extern struct user_namespace init_user_ns;
#ifdef CONFIG_USER_NS
diff --git a/include/linux/crush/crush.h b/include/linux/crush/crush.h
index 92e165d417a6..30dba392b730 100644
--- a/include/linux/crush/crush.h
+++ b/include/linux/crush/crush.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef CEPH_CRUSH_CRUSH_H
#define CEPH_CRUSH_CRUSH_H
@@ -16,7 +17,7 @@
* The algorithm was originally described in detail in this paper
* (although the algorithm has evolved somewhat since then):
*
- * http://www.ssrc.ucsc.edu/Papers/weil-sc06.pdf
+ * https://www.ssrc.ucsc.edu/Papers/weil-sc06.pdf
*
* LGPL2
*/
@@ -86,7 +87,7 @@ struct crush_rule_mask {
struct crush_rule {
__u32 len;
struct crush_rule_mask mask;
- struct crush_rule_step steps[0];
+ struct crush_rule_step steps[];
};
#define crush_rule_size(len) (sizeof(struct crush_rule) + \
@@ -193,7 +194,7 @@ struct crush_choose_arg {
struct crush_choose_arg_map {
#ifdef __KERNEL__
struct rb_node node;
- u64 choose_args_index;
+ s64 choose_args_index;
#endif
struct crush_choose_arg *args; /*!< replacement for each bucket
in the crushmap */
@@ -300,6 +301,12 @@ struct crush_map {
__u32 *choose_tries;
#else
+ /* device/bucket type id -> type name (CrushWrapper::type_map) */
+ struct rb_root type_names;
+
+ /* device/bucket id -> name (CrushWrapper::name_map) */
+ struct rb_root names;
+
/* CrushWrapper::choose_args */
struct rb_root choose_args;
#endif
@@ -339,6 +346,15 @@ struct crush_work_bucket {
struct crush_work {
struct crush_work_bucket **work; /* Per-bucket working store */
+#ifdef __KERNEL__
+ struct list_head item;
+#endif
};
+#ifdef __KERNEL__
+/* osdmap.c */
+void clear_crush_names(struct rb_root *root);
+void clear_choose_args(struct crush_map *c);
+#endif
+
#endif
diff --git a/include/linux/crush/hash.h b/include/linux/crush/hash.h
index d1d90258242e..904df41f7847 100644
--- a/include/linux/crush/hash.h
+++ b/include/linux/crush/hash.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef CEPH_CRUSH_HASH_H
#define CEPH_CRUSH_HASH_H
diff --git a/include/linux/crush/mapper.h b/include/linux/crush/mapper.h
index 141edabb947e..f9b99232f5a1 100644
--- a/include/linux/crush/mapper.h
+++ b/include/linux/crush/mapper.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef CEPH_CRUSH_MAPPER_H
#define CEPH_CRUSH_MAPPER_H
diff --git a/include/linux/crypto.h b/include/linux/crypto.h
index 84da9978e951..fa310ac1db59 100644
--- a/include/linux/crypto.h
+++ b/include/linux/crypto.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Scatterlist Cryptographic API.
*
@@ -7,36 +8,14 @@
*
* Portions derived from Cryptoapi, by Alexander Kjeldaas <astor@fast.no>
* and Nettle, by Niels Möller.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the Free
- * Software Foundation; either version 2 of the License, or (at your option)
- * any later version.
- *
*/
#ifndef _LINUX_CRYPTO_H
#define _LINUX_CRYPTO_H
-#include <linux/atomic.h>
-#include <linux/kernel.h>
-#include <linux/list.h>
-#include <linux/bug.h>
+#include <linux/completion.h>
+#include <linux/refcount.h>
#include <linux/slab.h>
-#include <linux/string.h>
-#include <linux/uaccess.h>
-
-/*
- * Autoloaded crypto modules should only use a prefixed name to avoid allowing
- * arbitrary modules to be loaded. Loading from userspace may still need the
- * unprefixed names, so retains those aliases as well.
- * This uses __MODULE_INFO directly instead of MODULE_ALIAS because pre-4.3
- * gcc (e.g. avr32 toolchain) uses __LINE__ for uniqueness, and this macro
- * expands twice on the same line. Instead, use a separate base name for the
- * alias.
- */
-#define MODULE_ALIAS_CRYPTO(name) \
- __MODULE_INFO(alias, alias_userspace, name); \
- __MODULE_INFO(alias, alias_crypto, "crypto-" name)
+#include <linux/types.h>
/*
* Algorithm masks and types.
@@ -45,23 +24,18 @@
#define CRYPTO_ALG_TYPE_CIPHER 0x00000001
#define CRYPTO_ALG_TYPE_COMPRESS 0x00000002
#define CRYPTO_ALG_TYPE_AEAD 0x00000003
-#define CRYPTO_ALG_TYPE_BLKCIPHER 0x00000004
-#define CRYPTO_ALG_TYPE_ABLKCIPHER 0x00000005
#define CRYPTO_ALG_TYPE_SKCIPHER 0x00000005
-#define CRYPTO_ALG_TYPE_GIVCIPHER 0x00000006
#define CRYPTO_ALG_TYPE_KPP 0x00000008
#define CRYPTO_ALG_TYPE_ACOMPRESS 0x0000000a
#define CRYPTO_ALG_TYPE_SCOMPRESS 0x0000000b
#define CRYPTO_ALG_TYPE_RNG 0x0000000c
#define CRYPTO_ALG_TYPE_AKCIPHER 0x0000000d
-#define CRYPTO_ALG_TYPE_DIGEST 0x0000000e
#define CRYPTO_ALG_TYPE_HASH 0x0000000e
#define CRYPTO_ALG_TYPE_SHASH 0x0000000e
#define CRYPTO_ALG_TYPE_AHASH 0x0000000f
#define CRYPTO_ALG_TYPE_HASH_MASK 0x0000000e
#define CRYPTO_ALG_TYPE_AHASH_MASK 0x0000000e
-#define CRYPTO_ALG_TYPE_BLKCIPHER_MASK 0x0000000c
#define CRYPTO_ALG_TYPE_ACOMPRESS_MASK 0x0000000e
#define CRYPTO_ALG_LARVAL 0x00000010
@@ -70,18 +44,12 @@
#define CRYPTO_ALG_ASYNC 0x00000080
/*
- * Set this bit if and only if the algorithm requires another algorithm of
- * the same type to handle corner cases.
+ * Set if the algorithm (or an algorithm which it uses) requires another
+ * algorithm of the same type to handle corner cases.
*/
#define CRYPTO_ALG_NEED_FALLBACK 0x00000100
/*
- * This bit is set for symmetric key ciphers that have already been wrapped
- * with a generic IV generator to prevent them from being wrapped again.
- */
-#define CRYPTO_ALG_GENIV 0x00000200
-
-/*
* Set if the algorithm has passed automated run-time testing. Note that
* if there is no run-time testing for a given algorithm it is considered
* to have passed.
@@ -106,19 +74,66 @@
#define CRYPTO_ALG_INTERNAL 0x00002000
/*
+ * Set if the algorithm has a ->setkey() method but can be used without
+ * calling it first, i.e. there is a default key.
+ */
+#define CRYPTO_ALG_OPTIONAL_KEY 0x00004000
+
+/*
+ * Don't trigger module loading
+ */
+#define CRYPTO_NOLOAD 0x00008000
+
+/*
+ * The algorithm may allocate memory during request processing, i.e. during
+ * encryption, decryption, or hashing. Users can request an algorithm with this
+ * flag unset if they can't handle memory allocation failures.
+ *
+ * This flag is currently only implemented for algorithms of type "skcipher",
+ * "aead", "ahash", "shash", and "cipher". Algorithms of other types might not
+ * have this flag set even if they allocate memory.
+ *
+ * In some edge cases, algorithms can allocate memory regardless of this flag.
+ * To avoid these cases, users must obey the following usage constraints:
+ * skcipher:
+ * - The IV buffer and all scatterlist elements must be aligned to the
+ * algorithm's alignmask.
+ * - If the data were to be divided into chunks of size
+ * crypto_skcipher_walksize() (with any remainder going at the end), no
+ * chunk can cross a page boundary or a scatterlist element boundary.
+ * aead:
+ * - The IV buffer and all scatterlist elements must be aligned to the
+ * algorithm's alignmask.
+ * - The first scatterlist element must contain all the associated data,
+ * and its pages must be !PageHighMem.
+ * - If the plaintext/ciphertext were to be divided into chunks of size
+ * crypto_aead_walksize() (with the remainder going at the end), no chunk
+ * can cross a page boundary or a scatterlist element boundary.
+ * ahash:
+ * - The result buffer must be aligned to the algorithm's alignmask.
+ * - crypto_ahash_finup() must not be used unless the algorithm implements
+ * ->finup() natively.
+ */
+#define CRYPTO_ALG_ALLOCATES_MEMORY 0x00010000
+
+/*
+ * Mark an algorithm as a service implementation only usable by a
+ * template and never by a normal user of the kernel crypto API.
+ * This is intended to be used by algorithms that are themselves
+ * not FIPS-approved but may instead be used to implement parts of
+ * a FIPS-approved algorithm (e.g., dh vs. ffdhe2048(dh)).
+ */
+#define CRYPTO_ALG_FIPS_INTERNAL 0x00020000
+
+/*
* Transform masks and values (for crt_flags).
*/
-#define CRYPTO_TFM_REQ_MASK 0x000fff00
-#define CRYPTO_TFM_RES_MASK 0xfff00000
+#define CRYPTO_TFM_NEED_KEY 0x00000001
-#define CRYPTO_TFM_REQ_WEAK_KEY 0x00000100
+#define CRYPTO_TFM_REQ_MASK 0x000fff00
+#define CRYPTO_TFM_REQ_FORBID_WEAK_KEYS 0x00000100
#define CRYPTO_TFM_REQ_MAY_SLEEP 0x00000200
#define CRYPTO_TFM_REQ_MAY_BACKLOG 0x00000400
-#define CRYPTO_TFM_RES_WEAK_KEY 0x00100000
-#define CRYPTO_TFM_RES_BAD_KEY_LEN 0x00200000
-#define CRYPTO_TFM_RES_BAD_KEY_SCHED 0x00400000
-#define CRYPTO_TFM_RES_BAD_BLOCK_LEN 0x00800000
-#define CRYPTO_TFM_RES_BAD_FLAGS 0x01000000
/*
* Miscellaneous stuff.
@@ -129,23 +144,22 @@
* The macro CRYPTO_MINALIGN_ATTR (along with the void * type in the actual
* declaration) is used to ensure that the crypto_tfm context structure is
* aligned correctly for the given architecture so that there are no alignment
- * faults for C data types. In particular, this is required on platforms such
- * as arm where pointers are 32-bit aligned but there are data types such as
- * u64 which require 64-bit alignment.
+ * faults for C data types. On architectures that support non-cache coherent
+ * DMA, such as ARM or arm64, it also takes into account the minimal alignment
+ * that is required to ensure that the context struct member does not share any
+ * cachelines with the rest of the struct. This is needed to ensure that cache
+ * maintenance for non-coherent DMA (cache invalidation in particular) does not
+ * affect data that may be accessed by the CPU concurrently.
*/
#define CRYPTO_MINALIGN ARCH_KMALLOC_MINALIGN
#define CRYPTO_MINALIGN_ATTR __attribute__ ((__aligned__(CRYPTO_MINALIGN)))
-struct scatterlist;
-struct crypto_ablkcipher;
-struct crypto_async_request;
-struct crypto_blkcipher;
struct crypto_tfm;
struct crypto_type;
-struct skcipher_givcrypt_request;
+struct module;
-typedef void (*crypto_completion_t)(struct crypto_async_request *req, int err);
+typedef void (*crypto_completion_t)(void *req, int err);
/**
* DOC: Block Cipher Context Data Structures
@@ -163,33 +177,6 @@ struct crypto_async_request {
u32 flags;
};
-struct ablkcipher_request {
- struct crypto_async_request base;
-
- unsigned int nbytes;
-
- void *info;
-
- struct scatterlist *src;
- struct scatterlist *dst;
-
- void *__ctx[] CRYPTO_MINALIGN_ATTR;
-};
-
-struct blkcipher_desc {
- struct crypto_blkcipher *tfm;
- void *info;
- u32 flags;
-};
-
-struct cipher_desc {
- struct crypto_tfm *tfm;
- void (*crfn)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
- unsigned int (*prfn)(const struct cipher_desc *desc, u8 *dst,
- const u8 *src, unsigned int nbytes);
- void *info;
-};
-
/**
* DOC: Block Cipher Algorithm Definitions
*
@@ -198,101 +185,6 @@ struct cipher_desc {
*/
/**
- * struct ablkcipher_alg - asynchronous block cipher definition
- * @min_keysize: Minimum key size supported by the transformation. This is the
- * smallest key length supported by this transformation algorithm.
- * This must be set to one of the pre-defined values as this is
- * not hardware specific. Possible values for this field can be
- * found via git grep "_MIN_KEY_SIZE" include/crypto/
- * @max_keysize: Maximum key size supported by the transformation. This is the
- * largest key length supported by this transformation algorithm.
- * This must be set to one of the pre-defined values as this is
- * not hardware specific. Possible values for this field can be
- * found via git grep "_MAX_KEY_SIZE" include/crypto/
- * @setkey: Set key for the transformation. This function is used to either
- * program a supplied key into the hardware or store the key in the
- * transformation context for programming it later. Note that this
- * function does modify the transformation context. This function can
- * be called multiple times during the existence of the transformation
- * object, so one must make sure the key is properly reprogrammed into
- * the hardware. This function is also responsible for checking the key
- * length for validity. In case a software fallback was put in place in
- * the @cra_init call, this function might need to use the fallback if
- * the algorithm doesn't support all of the key sizes.
- * @encrypt: Encrypt a scatterlist of blocks. This function is used to encrypt
- * the supplied scatterlist containing the blocks of data. The crypto
- * API consumer is responsible for aligning the entries of the
- * scatterlist properly and making sure the chunks are correctly
- * sized. In case a software fallback was put in place in the
- * @cra_init call, this function might need to use the fallback if
- * the algorithm doesn't support all of the key sizes. In case the
- * key was stored in transformation context, the key might need to be
- * re-programmed into the hardware in this function. This function
- * shall not modify the transformation context, as this function may
- * be called in parallel with the same transformation object.
- * @decrypt: Decrypt a single block. This is a reverse counterpart to @encrypt
- * and the conditions are exactly the same.
- * @givencrypt: Update the IV for encryption. With this function, a cipher
- * implementation may provide the function on how to update the IV
- * for encryption.
- * @givdecrypt: Update the IV for decryption. This is the reverse of
- * @givencrypt .
- * @geniv: The transformation implementation may use an "IV generator" provided
- * by the kernel crypto API. Several use cases have a predefined
- * approach how IVs are to be updated. For such use cases, the kernel
- * crypto API provides ready-to-use implementations that can be
- * referenced with this variable.
- * @ivsize: IV size applicable for transformation. The consumer must provide an
- * IV of exactly that size to perform the encrypt or decrypt operation.
- *
- * All fields except @givencrypt , @givdecrypt , @geniv and @ivsize are
- * mandatory and must be filled.
- */
-struct ablkcipher_alg {
- int (*setkey)(struct crypto_ablkcipher *tfm, const u8 *key,
- unsigned int keylen);
- int (*encrypt)(struct ablkcipher_request *req);
- int (*decrypt)(struct ablkcipher_request *req);
- int (*givencrypt)(struct skcipher_givcrypt_request *req);
- int (*givdecrypt)(struct skcipher_givcrypt_request *req);
-
- const char *geniv;
-
- unsigned int min_keysize;
- unsigned int max_keysize;
- unsigned int ivsize;
-};
-
-/**
- * struct blkcipher_alg - synchronous block cipher definition
- * @min_keysize: see struct ablkcipher_alg
- * @max_keysize: see struct ablkcipher_alg
- * @setkey: see struct ablkcipher_alg
- * @encrypt: see struct ablkcipher_alg
- * @decrypt: see struct ablkcipher_alg
- * @geniv: see struct ablkcipher_alg
- * @ivsize: see struct ablkcipher_alg
- *
- * All fields except @geniv and @ivsize are mandatory and must be filled.
- */
-struct blkcipher_alg {
- int (*setkey)(struct crypto_tfm *tfm, const u8 *key,
- unsigned int keylen);
- int (*encrypt)(struct blkcipher_desc *desc,
- struct scatterlist *dst, struct scatterlist *src,
- unsigned int nbytes);
- int (*decrypt)(struct blkcipher_desc *desc,
- struct scatterlist *dst, struct scatterlist *src,
- unsigned int nbytes);
-
- const char *geniv;
-
- unsigned int min_keysize;
- unsigned int max_keysize;
- unsigned int ivsize;
-};
-
-/**
* struct cipher_alg - single-block symmetric ciphers definition
* @cia_min_keysize: Minimum key size supported by the transformation. This is
* the smallest key length supported by this transformation
@@ -348,6 +240,17 @@ struct cipher_alg {
void (*cia_decrypt)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
};
+/**
+ * struct compress_alg - compression/decompression algorithm
+ * @coa_compress: Compress a buffer of specified length, storing the resulting
+ * data in the specified buffer. Return the length of the
+ * compressed data in dlen.
+ * @coa_decompress: Decompress the source buffer, storing the uncompressed
+ * data in the specified buffer. The length of the data is
+ * returned in dlen.
+ *
+ * All fields are mandatory.
+ */
struct compress_alg {
int (*coa_compress)(struct crypto_tfm *tfm, const u8 *src,
unsigned int slen, u8 *dst, unsigned int *dlen);
@@ -355,9 +258,6 @@ struct compress_alg {
unsigned int slen, u8 *dst, unsigned int *dlen);
};
-
-#define cra_ablkcipher cra_u.ablkcipher
-#define cra_blkcipher cra_u.blkcipher
#define cra_cipher cra_u.cipher
#define cra_compress cra_u.compress
@@ -405,9 +305,8 @@ struct compress_alg {
* transformation algorithm.
* @cra_type: Type of the cryptographic transformation. This is a pointer to
* struct crypto_type, which implements callbacks common for all
- * transformation types. There are multiple options:
- * &crypto_blkcipher_type, &crypto_ablkcipher_type,
- * &crypto_ahash_type, &crypto_rng_type.
+ * transformation types. There are multiple options, such as
+ * &crypto_skcipher_type, &crypto_ahash_type, &crypto_rng_type.
* This field might be empty. In that case, there are no common
* callbacks. This is the case for: cipher, compress, shash.
* @cra_u: Callbacks implementing the transformation. This is a union of
@@ -426,6 +325,10 @@ struct compress_alg {
* @cra_exit: Deinitialize the cryptographic transformation object. This is a
* counterpart to @cra_init, used to remove various changes set in
* @cra_init.
+ * @cra_u.cipher: Union member which contains a single-block symmetric cipher
+ * definition. See @struct @cipher_alg.
+ * @cra_u.compress: Union member which contains a (de)compression algorithm.
+ * See @struct @compress_alg.
* @cra_module: Owner of this transformation implementation. Set to THIS_MODULE
* @cra_list: internally used
* @cra_users: internally used
@@ -446,7 +349,7 @@ struct crypto_alg {
unsigned int cra_alignmask;
int cra_priority;
- atomic_t cra_refcnt;
+ refcount_t cra_refcnt;
char cra_name[CRYPTO_MAX_ALG_NAME];
char cra_driver_name[CRYPTO_MAX_ALG_NAME];
@@ -454,8 +357,6 @@ struct crypto_alg {
const struct crypto_type *cra_type;
union {
- struct ablkcipher_alg ablkcipher;
- struct blkcipher_alg blkcipher;
struct cipher_alg cipher;
struct compress_alg compress;
} cra_u;
@@ -468,78 +369,62 @@ struct crypto_alg {
} CRYPTO_MINALIGN_ATTR;
/*
- * Algorithm registration interface.
+ * A helper struct for waiting for completion of async crypto ops
*/
-int crypto_register_alg(struct crypto_alg *alg);
-int crypto_unregister_alg(struct crypto_alg *alg);
-int crypto_register_algs(struct crypto_alg *algs, int count);
-int crypto_unregister_algs(struct crypto_alg *algs, int count);
+struct crypto_wait {
+ struct completion completion;
+ int err;
+};
/*
- * Algorithm query interface.
+ * Macro for declaring a crypto op async wait object on stack
*/
-int crypto_has_alg(const char *name, u32 type, u32 mask);
+#define DECLARE_CRYPTO_WAIT(_wait) \
+ struct crypto_wait _wait = { \
+ COMPLETION_INITIALIZER_ONSTACK((_wait).completion), 0 }
/*
- * Transforms: user-instantiated objects which encapsulate algorithms
- * and core processing logic. Managed via crypto_alloc_*() and
- * crypto_free_*(), as well as the various helpers below.
+ * Async ops completion helper functioons
*/
+void crypto_req_done(void *req, int err);
-struct ablkcipher_tfm {
- int (*setkey)(struct crypto_ablkcipher *tfm, const u8 *key,
- unsigned int keylen);
- int (*encrypt)(struct ablkcipher_request *req);
- int (*decrypt)(struct ablkcipher_request *req);
-
- struct crypto_ablkcipher *base;
-
- unsigned int ivsize;
- unsigned int reqsize;
-};
+static inline int crypto_wait_req(int err, struct crypto_wait *wait)
+{
+ switch (err) {
+ case -EINPROGRESS:
+ case -EBUSY:
+ wait_for_completion(&wait->completion);
+ reinit_completion(&wait->completion);
+ err = wait->err;
+ break;
+ }
-struct blkcipher_tfm {
- void *iv;
- int (*setkey)(struct crypto_tfm *tfm, const u8 *key,
- unsigned int keylen);
- int (*encrypt)(struct blkcipher_desc *desc, struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes);
- int (*decrypt)(struct blkcipher_desc *desc, struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes);
-};
+ return err;
+}
-struct cipher_tfm {
- int (*cit_setkey)(struct crypto_tfm *tfm,
- const u8 *key, unsigned int keylen);
- void (*cit_encrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
- void (*cit_decrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
-};
+static inline void crypto_init_wait(struct crypto_wait *wait)
+{
+ init_completion(&wait->completion);
+}
-struct compress_tfm {
- int (*cot_compress)(struct crypto_tfm *tfm,
- const u8 *src, unsigned int slen,
- u8 *dst, unsigned int *dlen);
- int (*cot_decompress)(struct crypto_tfm *tfm,
- const u8 *src, unsigned int slen,
- u8 *dst, unsigned int *dlen);
-};
+/*
+ * Algorithm query interface.
+ */
+int crypto_has_alg(const char *name, u32 type, u32 mask);
-#define crt_ablkcipher crt_u.ablkcipher
-#define crt_blkcipher crt_u.blkcipher
-#define crt_cipher crt_u.cipher
-#define crt_compress crt_u.compress
+/*
+ * Transforms: user-instantiated objects which encapsulate algorithms
+ * and core processing logic. Managed via crypto_alloc_*() and
+ * crypto_free_*(), as well as the various helpers below.
+ */
struct crypto_tfm {
+ refcount_t refcnt;
u32 crt_flags;
-
- union {
- struct ablkcipher_tfm ablkcipher;
- struct blkcipher_tfm blkcipher;
- struct cipher_tfm cipher;
- struct compress_tfm compress;
- } crt_u;
+ int node;
+
void (*exit)(struct crypto_tfm *tfm);
struct crypto_alg *__crt_alg;
@@ -547,48 +432,10 @@ struct crypto_tfm {
void *__crt_ctx[] CRYPTO_MINALIGN_ATTR;
};
-struct crypto_ablkcipher {
- struct crypto_tfm base;
-};
-
-struct crypto_blkcipher {
- struct crypto_tfm base;
-};
-
-struct crypto_cipher {
- struct crypto_tfm base;
-};
-
struct crypto_comp {
struct crypto_tfm base;
};
-enum {
- CRYPTOA_UNSPEC,
- CRYPTOA_ALG,
- CRYPTOA_TYPE,
- CRYPTOA_U32,
- __CRYPTOA_MAX,
-};
-
-#define CRYPTOA_MAX (__CRYPTOA_MAX - 1)
-
-/* Maximum number of (rtattr) parameters for each template. */
-#define CRYPTO_MAX_ATTRS 32
-
-struct crypto_attr_alg {
- char name[CRYPTO_MAX_ALG_NAME];
-};
-
-struct crypto_attr_type {
- u32 type;
- u32 mask;
-};
-
-struct crypto_attr_u32 {
- u32 num;
-};
-
/*
* Transform user interface.
*/
@@ -601,8 +448,6 @@ static inline void crypto_free_tfm(struct crypto_tfm *tfm)
return crypto_destroy_tfm(tfm, tfm);
}
-int alg_test(const char *driver, const char *alg, u32 type, u32 mask);
-
/*
* Transform helpers which query the underlying algorithm.
*/
@@ -616,16 +461,6 @@ static inline const char *crypto_tfm_alg_driver_name(struct crypto_tfm *tfm)
return tfm->__crt_alg->cra_driver_name;
}
-static inline int crypto_tfm_alg_priority(struct crypto_tfm *tfm)
-{
- return tfm->__crt_alg->cra_priority;
-}
-
-static inline u32 crypto_tfm_alg_type(struct crypto_tfm *tfm)
-{
- return tfm->__crt_alg->cra_flags & CRYPTO_ALG_TYPE_MASK;
-}
-
static inline unsigned int crypto_tfm_alg_blocksize(struct crypto_tfm *tfm)
{
return tfm->__crt_alg->cra_blocksize;
@@ -651,904 +486,17 @@ static inline void crypto_tfm_clear_flags(struct crypto_tfm *tfm, u32 flags)
tfm->crt_flags &= ~flags;
}
-static inline void *crypto_tfm_ctx(struct crypto_tfm *tfm)
-{
- return tfm->__crt_ctx;
-}
-
static inline unsigned int crypto_tfm_ctx_alignment(void)
{
struct crypto_tfm *tfm;
return __alignof__(tfm->__crt_ctx);
}
-/*
- * API wrappers.
- */
-static inline struct crypto_ablkcipher *__crypto_ablkcipher_cast(
- struct crypto_tfm *tfm)
-{
- return (struct crypto_ablkcipher *)tfm;
-}
-
-static inline u32 crypto_skcipher_type(u32 type)
-{
- type &= ~(CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_GENIV);
- type |= CRYPTO_ALG_TYPE_BLKCIPHER;
- return type;
-}
-
-static inline u32 crypto_skcipher_mask(u32 mask)
-{
- mask &= ~(CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_GENIV);
- mask |= CRYPTO_ALG_TYPE_BLKCIPHER_MASK;
- return mask;
-}
-
-/**
- * DOC: Asynchronous Block Cipher API
- *
- * Asynchronous block cipher API is used with the ciphers of type
- * CRYPTO_ALG_TYPE_ABLKCIPHER (listed as type "ablkcipher" in /proc/crypto).
- *
- * Asynchronous cipher operations imply that the function invocation for a
- * cipher request returns immediately before the completion of the operation.
- * The cipher request is scheduled as a separate kernel thread and therefore
- * load-balanced on the different CPUs via the process scheduler. To allow
- * the kernel crypto API to inform the caller about the completion of a cipher
- * request, the caller must provide a callback function. That function is
- * invoked with the cipher handle when the request completes.
- *
- * To support the asynchronous operation, additional information than just the
- * cipher handle must be supplied to the kernel crypto API. That additional
- * information is given by filling in the ablkcipher_request data structure.
- *
- * For the asynchronous block cipher API, the state is maintained with the tfm
- * cipher handle. A single tfm can be used across multiple calls and in
- * parallel. For asynchronous block cipher calls, context data supplied and
- * only used by the caller can be referenced the request data structure in
- * addition to the IV used for the cipher request. The maintenance of such
- * state information would be important for a crypto driver implementer to
- * have, because when calling the callback function upon completion of the
- * cipher operation, that callback function may need some information about
- * which operation just finished if it invoked multiple in parallel. This
- * state information is unused by the kernel crypto API.
- */
-
-static inline struct crypto_tfm *crypto_ablkcipher_tfm(
- struct crypto_ablkcipher *tfm)
-{
- return &tfm->base;
-}
-
-/**
- * crypto_free_ablkcipher() - zeroize and free cipher handle
- * @tfm: cipher handle to be freed
- */
-static inline void crypto_free_ablkcipher(struct crypto_ablkcipher *tfm)
-{
- crypto_free_tfm(crypto_ablkcipher_tfm(tfm));
-}
-
-/**
- * crypto_has_ablkcipher() - Search for the availability of an ablkcipher.
- * @alg_name: is the cra_name / name or cra_driver_name / driver name of the
- * ablkcipher
- * @type: specifies the type of the cipher
- * @mask: specifies the mask for the cipher
- *
- * Return: true when the ablkcipher is known to the kernel crypto API; false
- * otherwise
- */
-static inline int crypto_has_ablkcipher(const char *alg_name, u32 type,
- u32 mask)
-{
- return crypto_has_alg(alg_name, crypto_skcipher_type(type),
- crypto_skcipher_mask(mask));
-}
-
-static inline struct ablkcipher_tfm *crypto_ablkcipher_crt(
- struct crypto_ablkcipher *tfm)
-{
- return &crypto_ablkcipher_tfm(tfm)->crt_ablkcipher;
-}
-
-/**
- * crypto_ablkcipher_ivsize() - obtain IV size
- * @tfm: cipher handle
- *
- * The size of the IV for the ablkcipher referenced by the cipher handle is
- * returned. This IV size may be zero if the cipher does not need an IV.
- *
- * Return: IV size in bytes
- */
-static inline unsigned int crypto_ablkcipher_ivsize(
- struct crypto_ablkcipher *tfm)
-{
- return crypto_ablkcipher_crt(tfm)->ivsize;
-}
-
-/**
- * crypto_ablkcipher_blocksize() - obtain block size of cipher
- * @tfm: cipher handle
- *
- * The block size for the ablkcipher referenced with the cipher handle is
- * returned. The caller may use that information to allocate appropriate
- * memory for the data returned by the encryption or decryption operation
- *
- * Return: block size of cipher
- */
-static inline unsigned int crypto_ablkcipher_blocksize(
- struct crypto_ablkcipher *tfm)
-{
- return crypto_tfm_alg_blocksize(crypto_ablkcipher_tfm(tfm));
-}
-
-static inline unsigned int crypto_ablkcipher_alignmask(
- struct crypto_ablkcipher *tfm)
-{
- return crypto_tfm_alg_alignmask(crypto_ablkcipher_tfm(tfm));
-}
-
-static inline u32 crypto_ablkcipher_get_flags(struct crypto_ablkcipher *tfm)
-{
- return crypto_tfm_get_flags(crypto_ablkcipher_tfm(tfm));
-}
-
-static inline void crypto_ablkcipher_set_flags(struct crypto_ablkcipher *tfm,
- u32 flags)
-{
- crypto_tfm_set_flags(crypto_ablkcipher_tfm(tfm), flags);
-}
-
-static inline void crypto_ablkcipher_clear_flags(struct crypto_ablkcipher *tfm,
- u32 flags)
-{
- crypto_tfm_clear_flags(crypto_ablkcipher_tfm(tfm), flags);
-}
-
-/**
- * crypto_ablkcipher_setkey() - set key for cipher
- * @tfm: cipher handle
- * @key: buffer holding the key
- * @keylen: length of the key in bytes
- *
- * The caller provided key is set for the ablkcipher referenced by the cipher
- * handle.
- *
- * Note, the key length determines the cipher type. Many block ciphers implement
- * different cipher modes depending on the key size, such as AES-128 vs AES-192
- * vs. AES-256. When providing a 16 byte key for an AES cipher handle, AES-128
- * is performed.
- *
- * Return: 0 if the setting of the key was successful; < 0 if an error occurred
- */
-static inline int crypto_ablkcipher_setkey(struct crypto_ablkcipher *tfm,
- const u8 *key, unsigned int keylen)
-{
- struct ablkcipher_tfm *crt = crypto_ablkcipher_crt(tfm);
-
- return crt->setkey(crt->base, key, keylen);
-}
-
-/**
- * crypto_ablkcipher_reqtfm() - obtain cipher handle from request
- * @req: ablkcipher_request out of which the cipher handle is to be obtained
- *
- * Return the crypto_ablkcipher handle when furnishing an ablkcipher_request
- * data structure.
- *
- * Return: crypto_ablkcipher handle
- */
-static inline struct crypto_ablkcipher *crypto_ablkcipher_reqtfm(
- struct ablkcipher_request *req)
-{
- return __crypto_ablkcipher_cast(req->base.tfm);
-}
-
-/**
- * crypto_ablkcipher_encrypt() - encrypt plaintext
- * @req: reference to the ablkcipher_request handle that holds all information
- * needed to perform the cipher operation
- *
- * Encrypt plaintext data using the ablkcipher_request handle. That data
- * structure and how it is filled with data is discussed with the
- * ablkcipher_request_* functions.
- *
- * Return: 0 if the cipher operation was successful; < 0 if an error occurred
- */
-static inline int crypto_ablkcipher_encrypt(struct ablkcipher_request *req)
-{
- struct ablkcipher_tfm *crt =
- crypto_ablkcipher_crt(crypto_ablkcipher_reqtfm(req));
- return crt->encrypt(req);
-}
-
-/**
- * crypto_ablkcipher_decrypt() - decrypt ciphertext
- * @req: reference to the ablkcipher_request handle that holds all information
- * needed to perform the cipher operation
- *
- * Decrypt ciphertext data using the ablkcipher_request handle. That data
- * structure and how it is filled with data is discussed with the
- * ablkcipher_request_* functions.
- *
- * Return: 0 if the cipher operation was successful; < 0 if an error occurred
- */
-static inline int crypto_ablkcipher_decrypt(struct ablkcipher_request *req)
-{
- struct ablkcipher_tfm *crt =
- crypto_ablkcipher_crt(crypto_ablkcipher_reqtfm(req));
- return crt->decrypt(req);
-}
-
-/**
- * DOC: Asynchronous Cipher Request Handle
- *
- * The ablkcipher_request data structure contains all pointers to data
- * required for the asynchronous cipher operation. This includes the cipher
- * handle (which can be used by multiple ablkcipher_request instances), pointer
- * to plaintext and ciphertext, asynchronous callback function, etc. It acts
- * as a handle to the ablkcipher_request_* API calls in a similar way as
- * ablkcipher handle to the crypto_ablkcipher_* API calls.
- */
-
-/**
- * crypto_ablkcipher_reqsize() - obtain size of the request data structure
- * @tfm: cipher handle
- *
- * Return: number of bytes
- */
-static inline unsigned int crypto_ablkcipher_reqsize(
- struct crypto_ablkcipher *tfm)
-{
- return crypto_ablkcipher_crt(tfm)->reqsize;
-}
-
-/**
- * ablkcipher_request_set_tfm() - update cipher handle reference in request
- * @req: request handle to be modified
- * @tfm: cipher handle that shall be added to the request handle
- *
- * Allow the caller to replace the existing ablkcipher handle in the request
- * data structure with a different one.
- */
-static inline void ablkcipher_request_set_tfm(
- struct ablkcipher_request *req, struct crypto_ablkcipher *tfm)
-{
- req->base.tfm = crypto_ablkcipher_tfm(crypto_ablkcipher_crt(tfm)->base);
-}
-
-static inline struct ablkcipher_request *ablkcipher_request_cast(
- struct crypto_async_request *req)
-{
- return container_of(req, struct ablkcipher_request, base);
-}
-
-/**
- * ablkcipher_request_alloc() - allocate request data structure
- * @tfm: cipher handle to be registered with the request
- * @gfp: memory allocation flag that is handed to kmalloc by the API call.
- *
- * Allocate the request data structure that must be used with the ablkcipher
- * encrypt and decrypt API calls. During the allocation, the provided ablkcipher
- * handle is registered in the request data structure.
- *
- * Return: allocated request handle in case of success, or NULL if out of memory
- */
-static inline struct ablkcipher_request *ablkcipher_request_alloc(
- struct crypto_ablkcipher *tfm, gfp_t gfp)
-{
- struct ablkcipher_request *req;
-
- req = kmalloc(sizeof(struct ablkcipher_request) +
- crypto_ablkcipher_reqsize(tfm), gfp);
-
- if (likely(req))
- ablkcipher_request_set_tfm(req, tfm);
-
- return req;
-}
-
-/**
- * ablkcipher_request_free() - zeroize and free request data structure
- * @req: request data structure cipher handle to be freed
- */
-static inline void ablkcipher_request_free(struct ablkcipher_request *req)
-{
- kzfree(req);
-}
-
-/**
- * ablkcipher_request_set_callback() - set asynchronous callback function
- * @req: request handle
- * @flags: specify zero or an ORing of the flags
- * CRYPTO_TFM_REQ_MAY_BACKLOG the request queue may back log and
- * increase the wait queue beyond the initial maximum size;
- * CRYPTO_TFM_REQ_MAY_SLEEP the request processing may sleep
- * @compl: callback function pointer to be registered with the request handle
- * @data: The data pointer refers to memory that is not used by the kernel
- * crypto API, but provided to the callback function for it to use. Here,
- * the caller can provide a reference to memory the callback function can
- * operate on. As the callback function is invoked asynchronously to the
- * related functionality, it may need to access data structures of the
- * related functionality which can be referenced using this pointer. The
- * callback function can access the memory via the "data" field in the
- * crypto_async_request data structure provided to the callback function.
- *
- * This function allows setting the callback function that is triggered once the
- * cipher operation completes.
- *
- * The callback function is registered with the ablkcipher_request handle and
- * must comply with the following template::
- *
- * void callback_function(struct crypto_async_request *req, int error)
- */
-static inline void ablkcipher_request_set_callback(
- struct ablkcipher_request *req,
- u32 flags, crypto_completion_t compl, void *data)
-{
- req->base.complete = compl;
- req->base.data = data;
- req->base.flags = flags;
-}
-
-/**
- * ablkcipher_request_set_crypt() - set data buffers
- * @req: request handle
- * @src: source scatter / gather list
- * @dst: destination scatter / gather list
- * @nbytes: number of bytes to process from @src
- * @iv: IV for the cipher operation which must comply with the IV size defined
- * by crypto_ablkcipher_ivsize
- *
- * This function allows setting of the source data and destination data
- * scatter / gather lists.
- *
- * For encryption, the source is treated as the plaintext and the
- * destination is the ciphertext. For a decryption operation, the use is
- * reversed - the source is the ciphertext and the destination is the plaintext.
- */
-static inline void ablkcipher_request_set_crypt(
- struct ablkcipher_request *req,
- struct scatterlist *src, struct scatterlist *dst,
- unsigned int nbytes, void *iv)
-{
- req->src = src;
- req->dst = dst;
- req->nbytes = nbytes;
- req->info = iv;
-}
-
-/**
- * DOC: Synchronous Block Cipher API
- *
- * The synchronous block cipher API is used with the ciphers of type
- * CRYPTO_ALG_TYPE_BLKCIPHER (listed as type "blkcipher" in /proc/crypto)
- *
- * Synchronous calls, have a context in the tfm. But since a single tfm can be
- * used in multiple calls and in parallel, this info should not be changeable
- * (unless a lock is used). This applies, for example, to the symmetric key.
- * However, the IV is changeable, so there is an iv field in blkcipher_tfm
- * structure for synchronous blkcipher api. So, its the only state info that can
- * be kept for synchronous calls without using a big lock across a tfm.
- *
- * The block cipher API allows the use of a complete cipher, i.e. a cipher
- * consisting of a template (a block chaining mode) and a single block cipher
- * primitive (e.g. AES).
- *
- * The plaintext data buffer and the ciphertext data buffer are pointed to
- * by using scatter/gather lists. The cipher operation is performed
- * on all segments of the provided scatter/gather lists.
- *
- * The kernel crypto API supports a cipher operation "in-place" which means that
- * the caller may provide the same scatter/gather list for the plaintext and
- * cipher text. After the completion of the cipher operation, the plaintext
- * data is replaced with the ciphertext data in case of an encryption and vice
- * versa for a decryption. The caller must ensure that the scatter/gather lists
- * for the output data point to sufficiently large buffers, i.e. multiples of
- * the block size of the cipher.
- */
-
-static inline struct crypto_blkcipher *__crypto_blkcipher_cast(
- struct crypto_tfm *tfm)
-{
- return (struct crypto_blkcipher *)tfm;
-}
-
-static inline struct crypto_blkcipher *crypto_blkcipher_cast(
- struct crypto_tfm *tfm)
-{
- BUG_ON(crypto_tfm_alg_type(tfm) != CRYPTO_ALG_TYPE_BLKCIPHER);
- return __crypto_blkcipher_cast(tfm);
-}
-
-/**
- * crypto_alloc_blkcipher() - allocate synchronous block cipher handle
- * @alg_name: is the cra_name / name or cra_driver_name / driver name of the
- * blkcipher cipher
- * @type: specifies the type of the cipher
- * @mask: specifies the mask for the cipher
- *
- * Allocate a cipher handle for a block cipher. The returned struct
- * crypto_blkcipher is the cipher handle that is required for any subsequent
- * API invocation for that block cipher.
- *
- * Return: allocated cipher handle in case of success; IS_ERR() is true in case
- * of an error, PTR_ERR() returns the error code.
- */
-static inline struct crypto_blkcipher *crypto_alloc_blkcipher(
- const char *alg_name, u32 type, u32 mask)
-{
- type &= ~CRYPTO_ALG_TYPE_MASK;
- type |= CRYPTO_ALG_TYPE_BLKCIPHER;
- mask |= CRYPTO_ALG_TYPE_MASK;
-
- return __crypto_blkcipher_cast(crypto_alloc_base(alg_name, type, mask));
-}
-
-static inline struct crypto_tfm *crypto_blkcipher_tfm(
- struct crypto_blkcipher *tfm)
-{
- return &tfm->base;
-}
-
-/**
- * crypto_free_blkcipher() - zeroize and free the block cipher handle
- * @tfm: cipher handle to be freed
- */
-static inline void crypto_free_blkcipher(struct crypto_blkcipher *tfm)
-{
- crypto_free_tfm(crypto_blkcipher_tfm(tfm));
-}
-
-/**
- * crypto_has_blkcipher() - Search for the availability of a block cipher
- * @alg_name: is the cra_name / name or cra_driver_name / driver name of the
- * block cipher
- * @type: specifies the type of the cipher
- * @mask: specifies the mask for the cipher
- *
- * Return: true when the block cipher is known to the kernel crypto API; false
- * otherwise
- */
-static inline int crypto_has_blkcipher(const char *alg_name, u32 type, u32 mask)
-{
- type &= ~CRYPTO_ALG_TYPE_MASK;
- type |= CRYPTO_ALG_TYPE_BLKCIPHER;
- mask |= CRYPTO_ALG_TYPE_MASK;
-
- return crypto_has_alg(alg_name, type, mask);
-}
-
-/**
- * crypto_blkcipher_name() - return the name / cra_name from the cipher handle
- * @tfm: cipher handle
- *
- * Return: The character string holding the name of the cipher
- */
-static inline const char *crypto_blkcipher_name(struct crypto_blkcipher *tfm)
-{
- return crypto_tfm_alg_name(crypto_blkcipher_tfm(tfm));
-}
-
-static inline struct blkcipher_tfm *crypto_blkcipher_crt(
- struct crypto_blkcipher *tfm)
-{
- return &crypto_blkcipher_tfm(tfm)->crt_blkcipher;
-}
-
-static inline struct blkcipher_alg *crypto_blkcipher_alg(
- struct crypto_blkcipher *tfm)
-{
- return &crypto_blkcipher_tfm(tfm)->__crt_alg->cra_blkcipher;
-}
-
-/**
- * crypto_blkcipher_ivsize() - obtain IV size
- * @tfm: cipher handle
- *
- * The size of the IV for the block cipher referenced by the cipher handle is
- * returned. This IV size may be zero if the cipher does not need an IV.
- *
- * Return: IV size in bytes
- */
-static inline unsigned int crypto_blkcipher_ivsize(struct crypto_blkcipher *tfm)
-{
- return crypto_blkcipher_alg(tfm)->ivsize;
-}
-
-/**
- * crypto_blkcipher_blocksize() - obtain block size of cipher
- * @tfm: cipher handle
- *
- * The block size for the block cipher referenced with the cipher handle is
- * returned. The caller may use that information to allocate appropriate
- * memory for the data returned by the encryption or decryption operation.
- *
- * Return: block size of cipher
- */
-static inline unsigned int crypto_blkcipher_blocksize(
- struct crypto_blkcipher *tfm)
-{
- return crypto_tfm_alg_blocksize(crypto_blkcipher_tfm(tfm));
-}
-
-static inline unsigned int crypto_blkcipher_alignmask(
- struct crypto_blkcipher *tfm)
-{
- return crypto_tfm_alg_alignmask(crypto_blkcipher_tfm(tfm));
-}
-
-static inline u32 crypto_blkcipher_get_flags(struct crypto_blkcipher *tfm)
-{
- return crypto_tfm_get_flags(crypto_blkcipher_tfm(tfm));
-}
-
-static inline void crypto_blkcipher_set_flags(struct crypto_blkcipher *tfm,
- u32 flags)
-{
- crypto_tfm_set_flags(crypto_blkcipher_tfm(tfm), flags);
-}
-
-static inline void crypto_blkcipher_clear_flags(struct crypto_blkcipher *tfm,
- u32 flags)
-{
- crypto_tfm_clear_flags(crypto_blkcipher_tfm(tfm), flags);
-}
-
-/**
- * crypto_blkcipher_setkey() - set key for cipher
- * @tfm: cipher handle
- * @key: buffer holding the key
- * @keylen: length of the key in bytes
- *
- * The caller provided key is set for the block cipher referenced by the cipher
- * handle.
- *
- * Note, the key length determines the cipher type. Many block ciphers implement
- * different cipher modes depending on the key size, such as AES-128 vs AES-192
- * vs. AES-256. When providing a 16 byte key for an AES cipher handle, AES-128
- * is performed.
- *
- * Return: 0 if the setting of the key was successful; < 0 if an error occurred
- */
-static inline int crypto_blkcipher_setkey(struct crypto_blkcipher *tfm,
- const u8 *key, unsigned int keylen)
-{
- return crypto_blkcipher_crt(tfm)->setkey(crypto_blkcipher_tfm(tfm),
- key, keylen);
-}
-
-/**
- * crypto_blkcipher_encrypt() - encrypt plaintext
- * @desc: reference to the block cipher handle with meta data
- * @dst: scatter/gather list that is filled by the cipher operation with the
- * ciphertext
- * @src: scatter/gather list that holds the plaintext
- * @nbytes: number of bytes of the plaintext to encrypt.
- *
- * Encrypt plaintext data using the IV set by the caller with a preceding
- * call of crypto_blkcipher_set_iv.
- *
- * The blkcipher_desc data structure must be filled by the caller and can
- * reside on the stack. The caller must fill desc as follows: desc.tfm is filled
- * with the block cipher handle; desc.flags is filled with either
- * CRYPTO_TFM_REQ_MAY_SLEEP or 0.
- *
- * Return: 0 if the cipher operation was successful; < 0 if an error occurred
- */
-static inline int crypto_blkcipher_encrypt(struct blkcipher_desc *desc,
- struct scatterlist *dst,
- struct scatterlist *src,
- unsigned int nbytes)
-{
- desc->info = crypto_blkcipher_crt(desc->tfm)->iv;
- return crypto_blkcipher_crt(desc->tfm)->encrypt(desc, dst, src, nbytes);
-}
-
-/**
- * crypto_blkcipher_encrypt_iv() - encrypt plaintext with dedicated IV
- * @desc: reference to the block cipher handle with meta data
- * @dst: scatter/gather list that is filled by the cipher operation with the
- * ciphertext
- * @src: scatter/gather list that holds the plaintext
- * @nbytes: number of bytes of the plaintext to encrypt.
- *
- * Encrypt plaintext data with the use of an IV that is solely used for this
- * cipher operation. Any previously set IV is not used.
- *
- * The blkcipher_desc data structure must be filled by the caller and can
- * reside on the stack. The caller must fill desc as follows: desc.tfm is filled
- * with the block cipher handle; desc.info is filled with the IV to be used for
- * the current operation; desc.flags is filled with either
- * CRYPTO_TFM_REQ_MAY_SLEEP or 0.
- *
- * Return: 0 if the cipher operation was successful; < 0 if an error occurred
- */
-static inline int crypto_blkcipher_encrypt_iv(struct blkcipher_desc *desc,
- struct scatterlist *dst,
- struct scatterlist *src,
- unsigned int nbytes)
-{
- return crypto_blkcipher_crt(desc->tfm)->encrypt(desc, dst, src, nbytes);
-}
-
-/**
- * crypto_blkcipher_decrypt() - decrypt ciphertext
- * @desc: reference to the block cipher handle with meta data
- * @dst: scatter/gather list that is filled by the cipher operation with the
- * plaintext
- * @src: scatter/gather list that holds the ciphertext
- * @nbytes: number of bytes of the ciphertext to decrypt.
- *
- * Decrypt ciphertext data using the IV set by the caller with a preceding
- * call of crypto_blkcipher_set_iv.
- *
- * The blkcipher_desc data structure must be filled by the caller as documented
- * for the crypto_blkcipher_encrypt call above.
- *
- * Return: 0 if the cipher operation was successful; < 0 if an error occurred
- *
- */
-static inline int crypto_blkcipher_decrypt(struct blkcipher_desc *desc,
- struct scatterlist *dst,
- struct scatterlist *src,
- unsigned int nbytes)
-{
- desc->info = crypto_blkcipher_crt(desc->tfm)->iv;
- return crypto_blkcipher_crt(desc->tfm)->decrypt(desc, dst, src, nbytes);
-}
-
-/**
- * crypto_blkcipher_decrypt_iv() - decrypt ciphertext with dedicated IV
- * @desc: reference to the block cipher handle with meta data
- * @dst: scatter/gather list that is filled by the cipher operation with the
- * plaintext
- * @src: scatter/gather list that holds the ciphertext
- * @nbytes: number of bytes of the ciphertext to decrypt.
- *
- * Decrypt ciphertext data with the use of an IV that is solely used for this
- * cipher operation. Any previously set IV is not used.
- *
- * The blkcipher_desc data structure must be filled by the caller as documented
- * for the crypto_blkcipher_encrypt_iv call above.
- *
- * Return: 0 if the cipher operation was successful; < 0 if an error occurred
- */
-static inline int crypto_blkcipher_decrypt_iv(struct blkcipher_desc *desc,
- struct scatterlist *dst,
- struct scatterlist *src,
- unsigned int nbytes)
-{
- return crypto_blkcipher_crt(desc->tfm)->decrypt(desc, dst, src, nbytes);
-}
-
-/**
- * crypto_blkcipher_set_iv() - set IV for cipher
- * @tfm: cipher handle
- * @src: buffer holding the IV
- * @len: length of the IV in bytes
- *
- * The caller provided IV is set for the block cipher referenced by the cipher
- * handle.
- */
-static inline void crypto_blkcipher_set_iv(struct crypto_blkcipher *tfm,
- const u8 *src, unsigned int len)
-{
- memcpy(crypto_blkcipher_crt(tfm)->iv, src, len);
-}
-
-/**
- * crypto_blkcipher_get_iv() - obtain IV from cipher
- * @tfm: cipher handle
- * @dst: buffer filled with the IV
- * @len: length of the buffer dst
- *
- * The caller can obtain the IV set for the block cipher referenced by the
- * cipher handle and store it into the user-provided buffer. If the buffer
- * has an insufficient space, the IV is truncated to fit the buffer.
- */
-static inline void crypto_blkcipher_get_iv(struct crypto_blkcipher *tfm,
- u8 *dst, unsigned int len)
-{
- memcpy(dst, crypto_blkcipher_crt(tfm)->iv, len);
-}
-
-/**
- * DOC: Single Block Cipher API
- *
- * The single block cipher API is used with the ciphers of type
- * CRYPTO_ALG_TYPE_CIPHER (listed as type "cipher" in /proc/crypto).
- *
- * Using the single block cipher API calls, operations with the basic cipher
- * primitive can be implemented. These cipher primitives exclude any block
- * chaining operations including IV handling.
- *
- * The purpose of this single block cipher API is to support the implementation
- * of templates or other concepts that only need to perform the cipher operation
- * on one block at a time. Templates invoke the underlying cipher primitive
- * block-wise and process either the input or the output data of these cipher
- * operations.
- */
-
-static inline struct crypto_cipher *__crypto_cipher_cast(struct crypto_tfm *tfm)
-{
- return (struct crypto_cipher *)tfm;
-}
-
-static inline struct crypto_cipher *crypto_cipher_cast(struct crypto_tfm *tfm)
-{
- BUG_ON(crypto_tfm_alg_type(tfm) != CRYPTO_ALG_TYPE_CIPHER);
- return __crypto_cipher_cast(tfm);
-}
-
-/**
- * crypto_alloc_cipher() - allocate single block cipher handle
- * @alg_name: is the cra_name / name or cra_driver_name / driver name of the
- * single block cipher
- * @type: specifies the type of the cipher
- * @mask: specifies the mask for the cipher
- *
- * Allocate a cipher handle for a single block cipher. The returned struct
- * crypto_cipher is the cipher handle that is required for any subsequent API
- * invocation for that single block cipher.
- *
- * Return: allocated cipher handle in case of success; IS_ERR() is true in case
- * of an error, PTR_ERR() returns the error code.
- */
-static inline struct crypto_cipher *crypto_alloc_cipher(const char *alg_name,
- u32 type, u32 mask)
-{
- type &= ~CRYPTO_ALG_TYPE_MASK;
- type |= CRYPTO_ALG_TYPE_CIPHER;
- mask |= CRYPTO_ALG_TYPE_MASK;
-
- return __crypto_cipher_cast(crypto_alloc_base(alg_name, type, mask));
-}
-
-static inline struct crypto_tfm *crypto_cipher_tfm(struct crypto_cipher *tfm)
-{
- return &tfm->base;
-}
-
-/**
- * crypto_free_cipher() - zeroize and free the single block cipher handle
- * @tfm: cipher handle to be freed
- */
-static inline void crypto_free_cipher(struct crypto_cipher *tfm)
-{
- crypto_free_tfm(crypto_cipher_tfm(tfm));
-}
-
-/**
- * crypto_has_cipher() - Search for the availability of a single block cipher
- * @alg_name: is the cra_name / name or cra_driver_name / driver name of the
- * single block cipher
- * @type: specifies the type of the cipher
- * @mask: specifies the mask for the cipher
- *
- * Return: true when the single block cipher is known to the kernel crypto API;
- * false otherwise
- */
-static inline int crypto_has_cipher(const char *alg_name, u32 type, u32 mask)
-{
- type &= ~CRYPTO_ALG_TYPE_MASK;
- type |= CRYPTO_ALG_TYPE_CIPHER;
- mask |= CRYPTO_ALG_TYPE_MASK;
-
- return crypto_has_alg(alg_name, type, mask);
-}
-
-static inline struct cipher_tfm *crypto_cipher_crt(struct crypto_cipher *tfm)
-{
- return &crypto_cipher_tfm(tfm)->crt_cipher;
-}
-
-/**
- * crypto_cipher_blocksize() - obtain block size for cipher
- * @tfm: cipher handle
- *
- * The block size for the single block cipher referenced with the cipher handle
- * tfm is returned. The caller may use that information to allocate appropriate
- * memory for the data returned by the encryption or decryption operation
- *
- * Return: block size of cipher
- */
-static inline unsigned int crypto_cipher_blocksize(struct crypto_cipher *tfm)
-{
- return crypto_tfm_alg_blocksize(crypto_cipher_tfm(tfm));
-}
-
-static inline unsigned int crypto_cipher_alignmask(struct crypto_cipher *tfm)
-{
- return crypto_tfm_alg_alignmask(crypto_cipher_tfm(tfm));
-}
-
-static inline u32 crypto_cipher_get_flags(struct crypto_cipher *tfm)
-{
- return crypto_tfm_get_flags(crypto_cipher_tfm(tfm));
-}
-
-static inline void crypto_cipher_set_flags(struct crypto_cipher *tfm,
- u32 flags)
-{
- crypto_tfm_set_flags(crypto_cipher_tfm(tfm), flags);
-}
-
-static inline void crypto_cipher_clear_flags(struct crypto_cipher *tfm,
- u32 flags)
-{
- crypto_tfm_clear_flags(crypto_cipher_tfm(tfm), flags);
-}
-
-/**
- * crypto_cipher_setkey() - set key for cipher
- * @tfm: cipher handle
- * @key: buffer holding the key
- * @keylen: length of the key in bytes
- *
- * The caller provided key is set for the single block cipher referenced by the
- * cipher handle.
- *
- * Note, the key length determines the cipher type. Many block ciphers implement
- * different cipher modes depending on the key size, such as AES-128 vs AES-192
- * vs. AES-256. When providing a 16 byte key for an AES cipher handle, AES-128
- * is performed.
- *
- * Return: 0 if the setting of the key was successful; < 0 if an error occurred
- */
-static inline int crypto_cipher_setkey(struct crypto_cipher *tfm,
- const u8 *key, unsigned int keylen)
-{
- return crypto_cipher_crt(tfm)->cit_setkey(crypto_cipher_tfm(tfm),
- key, keylen);
-}
-
-/**
- * crypto_cipher_encrypt_one() - encrypt one block of plaintext
- * @tfm: cipher handle
- * @dst: points to the buffer that will be filled with the ciphertext
- * @src: buffer holding the plaintext to be encrypted
- *
- * Invoke the encryption operation of one block. The caller must ensure that
- * the plaintext and ciphertext buffers are at least one block in size.
- */
-static inline void crypto_cipher_encrypt_one(struct crypto_cipher *tfm,
- u8 *dst, const u8 *src)
-{
- crypto_cipher_crt(tfm)->cit_encrypt_one(crypto_cipher_tfm(tfm),
- dst, src);
-}
-
-/**
- * crypto_cipher_decrypt_one() - decrypt one block of ciphertext
- * @tfm: cipher handle
- * @dst: points to the buffer that will be filled with the plaintext
- * @src: buffer holding the ciphertext to be decrypted
- *
- * Invoke the decryption operation of one block. The caller must ensure that
- * the plaintext and ciphertext buffers are at least one block in size.
- */
-static inline void crypto_cipher_decrypt_one(struct crypto_cipher *tfm,
- u8 *dst, const u8 *src)
-{
- crypto_cipher_crt(tfm)->cit_decrypt_one(crypto_cipher_tfm(tfm),
- dst, src);
-}
-
static inline struct crypto_comp *__crypto_comp_cast(struct crypto_tfm *tfm)
{
return (struct crypto_comp *)tfm;
}
-static inline struct crypto_comp *crypto_comp_cast(struct crypto_tfm *tfm)
-{
- BUG_ON((crypto_tfm_alg_type(tfm) ^ CRYPTO_ALG_TYPE_COMPRESS) &
- CRYPTO_ALG_TYPE_MASK);
- return __crypto_comp_cast(tfm);
-}
-
static inline struct crypto_comp *crypto_alloc_comp(const char *alg_name,
u32 type, u32 mask)
{
@@ -1583,26 +531,13 @@ static inline const char *crypto_comp_name(struct crypto_comp *tfm)
return crypto_tfm_alg_name(crypto_comp_tfm(tfm));
}
-static inline struct compress_tfm *crypto_comp_crt(struct crypto_comp *tfm)
-{
- return &crypto_comp_tfm(tfm)->crt_compress;
-}
+int crypto_comp_compress(struct crypto_comp *tfm,
+ const u8 *src, unsigned int slen,
+ u8 *dst, unsigned int *dlen);
-static inline int crypto_comp_compress(struct crypto_comp *tfm,
- const u8 *src, unsigned int slen,
- u8 *dst, unsigned int *dlen)
-{
- return crypto_comp_crt(tfm)->cot_compress(crypto_comp_tfm(tfm),
- src, slen, dst, dlen);
-}
-
-static inline int crypto_comp_decompress(struct crypto_comp *tfm,
- const u8 *src, unsigned int slen,
- u8 *dst, unsigned int *dlen)
-{
- return crypto_comp_crt(tfm)->cot_decompress(crypto_comp_tfm(tfm),
- src, slen, dst, dlen);
-}
+int crypto_comp_decompress(struct crypto_comp *tfm,
+ const u8 *src, unsigned int slen,
+ u8 *dst, unsigned int *dlen);
#endif /* _LINUX_CRYPTO_H */
diff --git a/include/linux/cryptohash.h b/include/linux/cryptohash.h
deleted file mode 100644
index df4d3e943d28..000000000000
--- a/include/linux/cryptohash.h
+++ /dev/null
@@ -1,13 +0,0 @@
-#ifndef __CRYPTOHASH_H
-#define __CRYPTOHASH_H
-
-#include <uapi/linux/types.h>
-
-#define SHA_DIGEST_WORDS 5
-#define SHA_MESSAGE_BYTES (512 /*bits*/ / 8)
-#define SHA_WORKSPACE_WORDS 16
-
-void sha_init(__u32 *buf);
-void sha_transform(__u32 *digest, const char *data, __u32 *W);
-
-#endif
diff --git a/include/linux/cs5535.h b/include/linux/cs5535.h
index cfe83239d7f0..2be1120174eb 100644
--- a/include/linux/cs5535.h
+++ b/include/linux/cs5535.h
@@ -1,11 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* AMD CS5535/CS5536 definitions
* Copyright (C) 2006 Advanced Micro Devices, Inc.
* Copyright (C) 2009 Andres Salomon <dilinger@collabora.co.uk>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of version 2 of the GNU General Public License
- * as published by the Free Software Foundation.
*/
#ifndef _CS5535_H
diff --git a/include/linux/ctype.h b/include/linux/ctype.h
index f13e4ff6835a..bc95aef2219c 100644
--- a/include/linux/ctype.h
+++ b/include/linux/ctype.h
@@ -1,6 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_CTYPE_H
#define _LINUX_CTYPE_H
+#include <linux/compiler.h>
+
/*
* NOTE! This ctype does not handle EOF like the standard C
* library is required to.
@@ -22,10 +25,6 @@ extern const unsigned char _ctype[];
#define isalnum(c) ((__ismask(c)&(_U|_L|_D)) != 0)
#define isalpha(c) ((__ismask(c)&(_U|_L)) != 0)
#define iscntrl(c) ((__ismask(c)&(_C)) != 0)
-static inline int isdigit(int c)
-{
- return '0' <= c && c <= '9';
-}
#define isgraph(c) ((__ismask(c)&(_P|_U|_L|_D)) != 0)
#define islower(c) ((__ismask(c)&(_L)) != 0)
#define isprint(c) ((__ismask(c)&(_P|_U|_L|_D|_SP)) != 0)
@@ -38,6 +37,15 @@ static inline int isdigit(int c)
#define isascii(c) (((unsigned char)(c))<=0x7f)
#define toascii(c) (((unsigned char)(c))&0x7f)
+#if __has_builtin(__builtin_isdigit)
+#define isdigit(c) __builtin_isdigit(c)
+#else
+static inline int isdigit(int c)
+{
+ return '0' <= c && c <= '9';
+}
+#endif
+
static inline unsigned char __tolower(unsigned char c)
{
if (isupper(c))
diff --git a/include/linux/cuda.h b/include/linux/cuda.h
index b72332823807..daf3e6f98444 100644
--- a/include/linux/cuda.h
+++ b/include/linux/cuda.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Definitions for talking to the CUDA. The CUDA is a microcontroller
* which controls the ADB, system power, RTC, and various other things.
@@ -7,12 +8,16 @@
#ifndef _LINUX_CUDA_H
#define _LINUX_CUDA_H
+#include <linux/rtc.h>
#include <uapi/linux/cuda.h>
-extern int find_via_cuda(void);
+extern int __init find_via_cuda(void);
extern int cuda_request(struct adb_request *req,
void (*done)(struct adb_request *), int nbytes, ...);
extern void cuda_poll(void);
+extern time64_t cuda_get_time(void);
+extern int cuda_set_rtc_time(struct rtc_time *tm);
+
#endif /* _LINUX_CUDA_H */
diff --git a/include/linux/cyclades.h b/include/linux/cyclades.h
deleted file mode 100644
index 19ae518f5471..000000000000
--- a/include/linux/cyclades.h
+++ /dev/null
@@ -1,360 +0,0 @@
-/* $Revision: 3.0 $$Date: 1998/11/02 14:20:59 $
- * linux/include/linux/cyclades.h
- *
- * This file was initially written by
- * Randolph Bentson <bentson@grieg.seaslug.org> and is maintained by
- * Ivan Passos <ivan@cyclades.com>.
- *
- * This file contains the general definitions for the cyclades.c driver
- *$Log: cyclades.h,v $
- *Revision 3.1 2002/01/29 11:36:16 henrique
- *added throttle field on struct cyclades_port to indicate whether the
- *port is throttled or not
- *
- *Revision 3.1 2000/04/19 18:52:52 ivan
- *converted address fields to unsigned long and added fields for physical
- *addresses on cyclades_card structure;
- *
- *Revision 3.0 1998/11/02 14:20:59 ivan
- *added nports field on cyclades_card structure;
- *
- *Revision 2.5 1998/08/03 16:57:01 ivan
- *added cyclades_idle_stats structure;
- *
- *Revision 2.4 1998/06/01 12:09:53 ivan
- *removed closing_wait2 from cyclades_port structure;
- *
- *Revision 2.3 1998/03/16 18:01:12 ivan
- *changes in the cyclades_port structure to get it closer to the
- *standard serial port structure;
- *added constants for new ioctls;
- *
- *Revision 2.2 1998/02/17 16:50:00 ivan
- *changes in the cyclades_port structure (addition of shutdown_wait and
- *chip_rev variables);
- *added constants for new ioctls and for CD1400 rev. numbers.
- *
- *Revision 2.1 1997/10/24 16:03:00 ivan
- *added rflow (which allows enabling the CD1400 special flow control
- *feature) and rtsdtr_inv (which allows DTR/RTS pin inversion) to
- *cyclades_port structure;
- *added Alpha support
- *
- *Revision 2.0 1997/06/30 10:30:00 ivan
- *added some new doorbell command constants related to IOCTLW and
- *UART error signaling
- *
- *Revision 1.8 1997/06/03 15:30:00 ivan
- *added constant ZFIRM_HLT
- *added constant CyPCI_Ze_win ( = 2 * Cy_PCI_Zwin)
- *
- *Revision 1.7 1997/03/26 10:30:00 daniel
- *new entries at the end of cyclades_port struct to reallocate
- *variables illegally allocated within card memory.
- *
- *Revision 1.6 1996/09/09 18:35:30 bentson
- *fold in changes for Cyclom-Z -- including structures for
- *communicating with board as well modest changes to original
- *structures to support new features.
- *
- *Revision 1.5 1995/11/13 21:13:31 bentson
- *changes suggested by Michael Chastain <mec@duracef.shout.net>
- *to support use of this file in non-kernel applications
- *
- *
- */
-#ifndef _LINUX_CYCLADES_H
-#define _LINUX_CYCLADES_H
-
-#include <uapi/linux/cyclades.h>
-
-
-/* Per card data structure */
-struct cyclades_card {
- void __iomem *base_addr;
- union {
- void __iomem *p9050;
- struct RUNTIME_9060 __iomem *p9060;
- } ctl_addr;
- struct BOARD_CTRL __iomem *board_ctrl; /* cyz specific */
- int irq;
- unsigned int num_chips; /* 0 if card absent, -1 if Z/PCI, else Y */
- unsigned int first_line; /* minor number of first channel on card */
- unsigned int nports; /* Number of ports in the card */
- int bus_index; /* address shift - 0 for ISA, 1 for PCI */
- int intr_enabled; /* FW Interrupt flag - 0 disabled, 1 enabled */
- u32 hw_ver;
- spinlock_t card_lock;
- struct cyclades_port *ports;
-};
-
-/***************************************
- * Memory access functions/macros *
- * (required to support Alpha systems) *
- ***************************************/
-
-#define cy_writeb(port,val) do { writeb((val), (port)); mb(); } while (0)
-#define cy_writew(port,val) do { writew((val), (port)); mb(); } while (0)
-#define cy_writel(port,val) do { writel((val), (port)); mb(); } while (0)
-
-/*
- * Statistics counters
- */
-struct cyclades_icount {
- __u32 cts, dsr, rng, dcd, tx, rx;
- __u32 frame, parity, overrun, brk;
- __u32 buf_overrun;
-};
-
-/*
- * This is our internal structure for each serial port's state.
- *
- * Many fields are paralleled by the structure used by the serial_struct
- * structure.
- *
- * For definitions of the flags field, see tty.h
- */
-
-struct cyclades_port {
- int magic;
- struct tty_port port;
- struct cyclades_card *card;
- union {
- struct {
- void __iomem *base_addr;
- } cyy;
- struct {
- struct CH_CTRL __iomem *ch_ctrl;
- struct BUF_CTRL __iomem *buf_ctrl;
- } cyz;
- } u;
- int line;
- int flags; /* defined in tty.h */
- int type; /* UART type */
- int read_status_mask;
- int ignore_status_mask;
- int timeout;
- int xmit_fifo_size;
- int cor1,cor2,cor3,cor4,cor5;
- int tbpr,tco,rbpr,rco;
- int baud;
- int rflow;
- int rtsdtr_inv;
- int chip_rev;
- int custom_divisor;
- u8 x_char; /* to be pushed out ASAP */
- int breakon;
- int breakoff;
- int xmit_head;
- int xmit_tail;
- int xmit_cnt;
- int default_threshold;
- int default_timeout;
- unsigned long rflush_count;
- struct cyclades_monitor mon;
- struct cyclades_idle_stats idle_stats;
- struct cyclades_icount icount;
- struct completion shutdown_wait;
- int throttle;
-};
-
-#define CLOSING_WAIT_DELAY 30*HZ
-#define CY_CLOSING_WAIT_NONE ASYNC_CLOSING_WAIT_NONE
-#define CY_CLOSING_WAIT_INF ASYNC_CLOSING_WAIT_INF
-
-
-#define CyMAX_CHIPS_PER_CARD 8
-#define CyMAX_CHAR_FIFO 12
-#define CyPORTS_PER_CHIP 4
-#define CD1400_MAX_SPEED 115200
-
-#define CyISA_Ywin 0x2000
-
-#define CyPCI_Ywin 0x4000
-#define CyPCI_Yctl 0x80
-#define CyPCI_Zctl CTRL_WINDOW_SIZE
-#define CyPCI_Zwin 0x80000
-#define CyPCI_Ze_win (2 * CyPCI_Zwin)
-
-#define PCI_DEVICE_ID_MASK 0x06
-
-/**** CD1400 registers ****/
-
-#define CD1400_REV_G 0x46
-#define CD1400_REV_J 0x48
-
-#define CyRegSize 0x0400
-#define Cy_HwReset 0x1400
-#define Cy_ClrIntr 0x1800
-#define Cy_EpldRev 0x1e00
-
-/* Global Registers */
-
-#define CyGFRCR (0x40*2)
-#define CyRevE (44)
-#define CyCAR (0x68*2)
-#define CyCHAN_0 (0x00)
-#define CyCHAN_1 (0x01)
-#define CyCHAN_2 (0x02)
-#define CyCHAN_3 (0x03)
-#define CyGCR (0x4B*2)
-#define CyCH0_SERIAL (0x00)
-#define CyCH0_PARALLEL (0x80)
-#define CySVRR (0x67*2)
-#define CySRModem (0x04)
-#define CySRTransmit (0x02)
-#define CySRReceive (0x01)
-#define CyRICR (0x44*2)
-#define CyTICR (0x45*2)
-#define CyMICR (0x46*2)
-#define CyICR0 (0x00)
-#define CyICR1 (0x01)
-#define CyICR2 (0x02)
-#define CyICR3 (0x03)
-#define CyRIR (0x6B*2)
-#define CyTIR (0x6A*2)
-#define CyMIR (0x69*2)
-#define CyIRDirEq (0x80)
-#define CyIRBusy (0x40)
-#define CyIRUnfair (0x20)
-#define CyIRContext (0x1C)
-#define CyIRChannel (0x03)
-#define CyPPR (0x7E*2)
-#define CyCLOCK_20_1MS (0x27)
-#define CyCLOCK_25_1MS (0x31)
-#define CyCLOCK_25_5MS (0xf4)
-#define CyCLOCK_60_1MS (0x75)
-#define CyCLOCK_60_2MS (0xea)
-
-/* Virtual Registers */
-
-#define CyRIVR (0x43*2)
-#define CyTIVR (0x42*2)
-#define CyMIVR (0x41*2)
-#define CyIVRMask (0x07)
-#define CyIVRRxEx (0x07)
-#define CyIVRRxOK (0x03)
-#define CyIVRTxOK (0x02)
-#define CyIVRMdmOK (0x01)
-#define CyTDR (0x63*2)
-#define CyRDSR (0x62*2)
-#define CyTIMEOUT (0x80)
-#define CySPECHAR (0x70)
-#define CyBREAK (0x08)
-#define CyPARITY (0x04)
-#define CyFRAME (0x02)
-#define CyOVERRUN (0x01)
-#define CyMISR (0x4C*2)
-/* see CyMCOR_ and CyMSVR_ for bits*/
-#define CyEOSRR (0x60*2)
-
-/* Channel Registers */
-
-#define CyLIVR (0x18*2)
-#define CyMscsr (0x01)
-#define CyTdsr (0x02)
-#define CyRgdsr (0x03)
-#define CyRedsr (0x07)
-#define CyCCR (0x05*2)
-/* Format 1 */
-#define CyCHAN_RESET (0x80)
-#define CyCHIP_RESET (0x81)
-#define CyFlushTransFIFO (0x82)
-/* Format 2 */
-#define CyCOR_CHANGE (0x40)
-#define CyCOR1ch (0x02)
-#define CyCOR2ch (0x04)
-#define CyCOR3ch (0x08)
-/* Format 3 */
-#define CySEND_SPEC_1 (0x21)
-#define CySEND_SPEC_2 (0x22)
-#define CySEND_SPEC_3 (0x23)
-#define CySEND_SPEC_4 (0x24)
-/* Format 4 */
-#define CyCHAN_CTL (0x10)
-#define CyDIS_RCVR (0x01)
-#define CyENB_RCVR (0x02)
-#define CyDIS_XMTR (0x04)
-#define CyENB_XMTR (0x08)
-#define CySRER (0x06*2)
-#define CyMdmCh (0x80)
-#define CyRxData (0x10)
-#define CyTxRdy (0x04)
-#define CyTxMpty (0x02)
-#define CyNNDT (0x01)
-#define CyCOR1 (0x08*2)
-#define CyPARITY_NONE (0x00)
-#define CyPARITY_0 (0x20)
-#define CyPARITY_1 (0xA0)
-#define CyPARITY_E (0x40)
-#define CyPARITY_O (0xC0)
-#define Cy_1_STOP (0x00)
-#define Cy_1_5_STOP (0x04)
-#define Cy_2_STOP (0x08)
-#define Cy_5_BITS (0x00)
-#define Cy_6_BITS (0x01)
-#define Cy_7_BITS (0x02)
-#define Cy_8_BITS (0x03)
-#define CyCOR2 (0x09*2)
-#define CyIXM (0x80)
-#define CyTxIBE (0x40)
-#define CyETC (0x20)
-#define CyAUTO_TXFL (0x60)
-#define CyLLM (0x10)
-#define CyRLM (0x08)
-#define CyRtsAO (0x04)
-#define CyCtsAE (0x02)
-#define CyDsrAE (0x01)
-#define CyCOR3 (0x0A*2)
-#define CySPL_CH_DRANGE (0x80) /* special character detect range */
-#define CySPL_CH_DET1 (0x40) /* enable special character detection
- on SCHR4-SCHR3 */
-#define CyFL_CTRL_TRNSP (0x20) /* Flow Control Transparency */
-#define CySPL_CH_DET2 (0x10) /* Enable special character detection
- on SCHR2-SCHR1 */
-#define CyREC_FIFO (0x0F) /* Receive FIFO threshold */
-#define CyCOR4 (0x1E*2)
-#define CyCOR5 (0x1F*2)
-#define CyCCSR (0x0B*2)
-#define CyRxEN (0x80)
-#define CyRxFloff (0x40)
-#define CyRxFlon (0x20)
-#define CyTxEN (0x08)
-#define CyTxFloff (0x04)
-#define CyTxFlon (0x02)
-#define CyRDCR (0x0E*2)
-#define CySCHR1 (0x1A*2)
-#define CySCHR2 (0x1B*2)
-#define CySCHR3 (0x1C*2)
-#define CySCHR4 (0x1D*2)
-#define CySCRL (0x22*2)
-#define CySCRH (0x23*2)
-#define CyLNC (0x24*2)
-#define CyMCOR1 (0x15*2)
-#define CyMCOR2 (0x16*2)
-#define CyRTPR (0x21*2)
-#define CyMSVR1 (0x6C*2)
-#define CyMSVR2 (0x6D*2)
-#define CyANY_DELTA (0xF0)
-#define CyDSR (0x80)
-#define CyCTS (0x40)
-#define CyRI (0x20)
-#define CyDCD (0x10)
-#define CyDTR (0x02)
-#define CyRTS (0x01)
-#define CyPVSR (0x6F*2)
-#define CyRBPR (0x78*2)
-#define CyRCOR (0x7C*2)
-#define CyTBPR (0x72*2)
-#define CyTCOR (0x76*2)
-
-/* Custom Registers */
-
-#define CyPLX_VER (0x3400)
-#define PLX_9050 0x0b
-#define PLX_9060 0x0c
-#define PLX_9080 0x0d
-
-/***************************************************************************/
-
-#endif /* _LINUX_CYCLADES_H */
diff --git a/include/linux/damon.h b/include/linux/damon.h
new file mode 100644
index 000000000000..d5d4d19928e0
--- /dev/null
+++ b/include/linux/damon.h
@@ -0,0 +1,638 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * DAMON api
+ *
+ * Author: SeongJae Park <sjpark@amazon.de>
+ */
+
+#ifndef _DAMON_H_
+#define _DAMON_H_
+
+#include <linux/memcontrol.h>
+#include <linux/mutex.h>
+#include <linux/time64.h>
+#include <linux/types.h>
+#include <linux/random.h>
+
+/* Minimal region size. Every damon_region is aligned by this. */
+#define DAMON_MIN_REGION PAGE_SIZE
+/* Max priority score for DAMON-based operation schemes */
+#define DAMOS_MAX_SCORE (99)
+
+/* Get a random number in [l, r) */
+static inline unsigned long damon_rand(unsigned long l, unsigned long r)
+{
+ return l + get_random_u32_below(r - l);
+}
+
+/**
+ * struct damon_addr_range - Represents an address region of [@start, @end).
+ * @start: Start address of the region (inclusive).
+ * @end: End address of the region (exclusive).
+ */
+struct damon_addr_range {
+ unsigned long start;
+ unsigned long end;
+};
+
+/**
+ * struct damon_region - Represents a monitoring target region.
+ * @ar: The address range of the region.
+ * @sampling_addr: Address of the sample for the next access check.
+ * @nr_accesses: Access frequency of this region.
+ * @list: List head for siblings.
+ * @age: Age of this region.
+ *
+ * @age is initially zero, increased for each aggregation interval, and reset
+ * to zero again if the access frequency is significantly changed. If two
+ * regions are merged into a new region, both @nr_accesses and @age of the new
+ * region are set as region size-weighted average of those of the two regions.
+ */
+struct damon_region {
+ struct damon_addr_range ar;
+ unsigned long sampling_addr;
+ unsigned int nr_accesses;
+ struct list_head list;
+
+ unsigned int age;
+/* private: Internal value for age calculation. */
+ unsigned int last_nr_accesses;
+};
+
+/**
+ * struct damon_target - Represents a monitoring target.
+ * @pid: The PID of the virtual address space to monitor.
+ * @nr_regions: Number of monitoring target regions of this target.
+ * @regions_list: Head of the monitoring target regions of this target.
+ * @list: List head for siblings.
+ *
+ * Each monitoring context could have multiple targets. For example, a context
+ * for virtual memory address spaces could have multiple target processes. The
+ * @pid should be set for appropriate &struct damon_operations including the
+ * virtual address spaces monitoring operations.
+ */
+struct damon_target {
+ struct pid *pid;
+ unsigned int nr_regions;
+ struct list_head regions_list;
+ struct list_head list;
+};
+
+/**
+ * enum damos_action - Represents an action of a Data Access Monitoring-based
+ * Operation Scheme.
+ *
+ * @DAMOS_WILLNEED: Call ``madvise()`` for the region with MADV_WILLNEED.
+ * @DAMOS_COLD: Call ``madvise()`` for the region with MADV_COLD.
+ * @DAMOS_PAGEOUT: Call ``madvise()`` for the region with MADV_PAGEOUT.
+ * @DAMOS_HUGEPAGE: Call ``madvise()`` for the region with MADV_HUGEPAGE.
+ * @DAMOS_NOHUGEPAGE: Call ``madvise()`` for the region with MADV_NOHUGEPAGE.
+ * @DAMOS_LRU_PRIO: Prioritize the region on its LRU lists.
+ * @DAMOS_LRU_DEPRIO: Deprioritize the region on its LRU lists.
+ * @DAMOS_STAT: Do nothing but count the stat.
+ * @NR_DAMOS_ACTIONS: Total number of DAMOS actions
+ *
+ * The support of each action is up to running &struct damon_operations.
+ * &enum DAMON_OPS_VADDR and &enum DAMON_OPS_FVADDR supports all actions except
+ * &enum DAMOS_LRU_PRIO and &enum DAMOS_LRU_DEPRIO. &enum DAMON_OPS_PADDR
+ * supports only &enum DAMOS_PAGEOUT, &enum DAMOS_LRU_PRIO, &enum
+ * DAMOS_LRU_DEPRIO, and &DAMOS_STAT.
+ */
+enum damos_action {
+ DAMOS_WILLNEED,
+ DAMOS_COLD,
+ DAMOS_PAGEOUT,
+ DAMOS_HUGEPAGE,
+ DAMOS_NOHUGEPAGE,
+ DAMOS_LRU_PRIO,
+ DAMOS_LRU_DEPRIO,
+ DAMOS_STAT, /* Do nothing but only record the stat */
+ NR_DAMOS_ACTIONS,
+};
+
+/**
+ * struct damos_quota - Controls the aggressiveness of the given scheme.
+ * @ms: Maximum milliseconds that the scheme can use.
+ * @sz: Maximum bytes of memory that the action can be applied.
+ * @reset_interval: Charge reset interval in milliseconds.
+ *
+ * @weight_sz: Weight of the region's size for prioritization.
+ * @weight_nr_accesses: Weight of the region's nr_accesses for prioritization.
+ * @weight_age: Weight of the region's age for prioritization.
+ *
+ * To avoid consuming too much CPU time or IO resources for applying the
+ * &struct damos->action to large memory, DAMON allows users to set time and/or
+ * size quotas. The quotas can be set by writing non-zero values to &ms and
+ * &sz, respectively. If the time quota is set, DAMON tries to use only up to
+ * &ms milliseconds within &reset_interval for applying the action. If the
+ * size quota is set, DAMON tries to apply the action only up to &sz bytes
+ * within &reset_interval.
+ *
+ * Internally, the time quota is transformed to a size quota using estimated
+ * throughput of the scheme's action. DAMON then compares it against &sz and
+ * uses smaller one as the effective quota.
+ *
+ * For selecting regions within the quota, DAMON prioritizes current scheme's
+ * target memory regions using the &struct damon_operations->get_scheme_score.
+ * You could customize the prioritization logic by setting &weight_sz,
+ * &weight_nr_accesses, and &weight_age, because monitoring operations are
+ * encouraged to respect those.
+ */
+struct damos_quota {
+ unsigned long ms;
+ unsigned long sz;
+ unsigned long reset_interval;
+
+ unsigned int weight_sz;
+ unsigned int weight_nr_accesses;
+ unsigned int weight_age;
+
+/* private: */
+ /* For throughput estimation */
+ unsigned long total_charged_sz;
+ unsigned long total_charged_ns;
+
+ unsigned long esz; /* Effective size quota in bytes */
+
+ /* For charging the quota */
+ unsigned long charged_sz;
+ unsigned long charged_from;
+ struct damon_target *charge_target_from;
+ unsigned long charge_addr_from;
+
+ /* For prioritization */
+ unsigned long histogram[DAMOS_MAX_SCORE + 1];
+ unsigned int min_score;
+};
+
+/**
+ * enum damos_wmark_metric - Represents the watermark metric.
+ *
+ * @DAMOS_WMARK_NONE: Ignore the watermarks of the given scheme.
+ * @DAMOS_WMARK_FREE_MEM_RATE: Free memory rate of the system in [0,1000].
+ * @NR_DAMOS_WMARK_METRICS: Total number of DAMOS watermark metrics
+ */
+enum damos_wmark_metric {
+ DAMOS_WMARK_NONE,
+ DAMOS_WMARK_FREE_MEM_RATE,
+ NR_DAMOS_WMARK_METRICS,
+};
+
+/**
+ * struct damos_watermarks - Controls when a given scheme should be activated.
+ * @metric: Metric for the watermarks.
+ * @interval: Watermarks check time interval in microseconds.
+ * @high: High watermark.
+ * @mid: Middle watermark.
+ * @low: Low watermark.
+ *
+ * If &metric is &DAMOS_WMARK_NONE, the scheme is always active. Being active
+ * means DAMON does monitoring and applying the action of the scheme to
+ * appropriate memory regions. Else, DAMON checks &metric of the system for at
+ * least every &interval microseconds and works as below.
+ *
+ * If &metric is higher than &high, the scheme is inactivated. If &metric is
+ * between &mid and &low, the scheme is activated. If &metric is lower than
+ * &low, the scheme is inactivated.
+ */
+struct damos_watermarks {
+ enum damos_wmark_metric metric;
+ unsigned long interval;
+ unsigned long high;
+ unsigned long mid;
+ unsigned long low;
+
+/* private: */
+ bool activated;
+};
+
+/**
+ * struct damos_stat - Statistics on a given scheme.
+ * @nr_tried: Total number of regions that the scheme is tried to be applied.
+ * @sz_tried: Total size of regions that the scheme is tried to be applied.
+ * @nr_applied: Total number of regions that the scheme is applied.
+ * @sz_applied: Total size of regions that the scheme is applied.
+ * @qt_exceeds: Total number of times the quota of the scheme has exceeded.
+ */
+struct damos_stat {
+ unsigned long nr_tried;
+ unsigned long sz_tried;
+ unsigned long nr_applied;
+ unsigned long sz_applied;
+ unsigned long qt_exceeds;
+};
+
+/**
+ * enum damos_filter_type - Type of memory for &struct damos_filter
+ * @DAMOS_FILTER_TYPE_ANON: Anonymous pages.
+ * @DAMOS_FILTER_TYPE_MEMCG: Specific memcg's pages.
+ * @NR_DAMOS_FILTER_TYPES: Number of filter types.
+ *
+ * The support of each filter type is up to running &struct damon_operations.
+ * &enum DAMON_OPS_PADDR is supporting all filter types, while
+ * &enum DAMON_OPS_VADDR and &enum DAMON_OPS_FVADDR are not supporting any
+ * filter types.
+ */
+enum damos_filter_type {
+ DAMOS_FILTER_TYPE_ANON,
+ DAMOS_FILTER_TYPE_MEMCG,
+ NR_DAMOS_FILTER_TYPES,
+};
+
+/**
+ * struct damos_filter - DAMOS action target memory filter.
+ * @type: Type of the page.
+ * @matching: If the matching page should filtered out or in.
+ * @memcg_id: Memcg id of the question if @type is DAMOS_FILTER_MEMCG.
+ * @list: List head for siblings.
+ *
+ * Before applying the &damos->action to a memory region, DAMOS checks if each
+ * page of the region matches to this and avoid applying the action if so.
+ * Note that the check support is up to &struct damon_operations
+ * implementation.
+ */
+struct damos_filter {
+ enum damos_filter_type type;
+ bool matching;
+ union {
+ unsigned short memcg_id;
+ };
+ struct list_head list;
+};
+
+/**
+ * struct damos_access_pattern - Target access pattern of the given scheme.
+ * @min_sz_region: Minimum size of target regions.
+ * @max_sz_region: Maximum size of target regions.
+ * @min_nr_accesses: Minimum ``->nr_accesses`` of target regions.
+ * @max_nr_accesses: Maximum ``->nr_accesses`` of target regions.
+ * @min_age_region: Minimum age of target regions.
+ * @max_age_region: Maximum age of target regions.
+ */
+struct damos_access_pattern {
+ unsigned long min_sz_region;
+ unsigned long max_sz_region;
+ unsigned int min_nr_accesses;
+ unsigned int max_nr_accesses;
+ unsigned int min_age_region;
+ unsigned int max_age_region;
+};
+
+/**
+ * struct damos - Represents a Data Access Monitoring-based Operation Scheme.
+ * @pattern: Access pattern of target regions.
+ * @action: &damo_action to be applied to the target regions.
+ * @quota: Control the aggressiveness of this scheme.
+ * @wmarks: Watermarks for automated (in)activation of this scheme.
+ * @filters: Additional set of &struct damos_filter for &action.
+ * @stat: Statistics of this scheme.
+ * @list: List head for siblings.
+ *
+ * For each aggregation interval, DAMON finds regions which fit in the
+ * &pattern and applies &action to those. To avoid consuming too much
+ * CPU time or IO resources for the &action, &quota is used.
+ *
+ * To do the work only when needed, schemes can be activated for specific
+ * system situations using &wmarks. If all schemes that registered to the
+ * monitoring context are inactive, DAMON stops monitoring either, and just
+ * repeatedly checks the watermarks.
+ *
+ * If all schemes that registered to a &struct damon_ctx are inactive, DAMON
+ * stops monitoring and just repeatedly checks the watermarks.
+ *
+ * Before applying the &action to a memory region, &struct damon_operations
+ * implementation could check pages of the region and skip &action to respect
+ * &filters
+ *
+ * After applying the &action to each region, &stat_count and &stat_sz is
+ * updated to reflect the number of regions and total size of regions that the
+ * &action is applied.
+ */
+struct damos {
+ struct damos_access_pattern pattern;
+ enum damos_action action;
+ struct damos_quota quota;
+ struct damos_watermarks wmarks;
+ struct list_head filters;
+ struct damos_stat stat;
+ struct list_head list;
+};
+
+/**
+ * enum damon_ops_id - Identifier for each monitoring operations implementation
+ *
+ * @DAMON_OPS_VADDR: Monitoring operations for virtual address spaces
+ * @DAMON_OPS_FVADDR: Monitoring operations for only fixed ranges of virtual
+ * address spaces
+ * @DAMON_OPS_PADDR: Monitoring operations for the physical address space
+ * @NR_DAMON_OPS: Number of monitoring operations implementations
+ */
+enum damon_ops_id {
+ DAMON_OPS_VADDR,
+ DAMON_OPS_FVADDR,
+ DAMON_OPS_PADDR,
+ NR_DAMON_OPS,
+};
+
+struct damon_ctx;
+
+/**
+ * struct damon_operations - Monitoring operations for given use cases.
+ *
+ * @id: Identifier of this operations set.
+ * @init: Initialize operations-related data structures.
+ * @update: Update operations-related data structures.
+ * @prepare_access_checks: Prepare next access check of target regions.
+ * @check_accesses: Check the accesses to target regions.
+ * @reset_aggregated: Reset aggregated accesses monitoring results.
+ * @get_scheme_score: Get the score of a region for a scheme.
+ * @apply_scheme: Apply a DAMON-based operation scheme.
+ * @target_valid: Determine if the target is valid.
+ * @cleanup: Clean up the context.
+ *
+ * DAMON can be extended for various address spaces and usages. For this,
+ * users should register the low level operations for their target address
+ * space and usecase via the &damon_ctx.ops. Then, the monitoring thread
+ * (&damon_ctx.kdamond) calls @init and @prepare_access_checks before starting
+ * the monitoring, @update after each &damon_attrs.ops_update_interval, and
+ * @check_accesses, @target_valid and @prepare_access_checks after each
+ * &damon_attrs.sample_interval. Finally, @reset_aggregated is called after
+ * each &damon_attrs.aggr_interval.
+ *
+ * Each &struct damon_operations instance having valid @id can be registered
+ * via damon_register_ops() and selected by damon_select_ops() later.
+ * @init should initialize operations-related data structures. For example,
+ * this could be used to construct proper monitoring target regions and link
+ * those to @damon_ctx.adaptive_targets.
+ * @update should update the operations-related data structures. For example,
+ * this could be used to update monitoring target regions for current status.
+ * @prepare_access_checks should manipulate the monitoring regions to be
+ * prepared for the next access check.
+ * @check_accesses should check the accesses to each region that made after the
+ * last preparation and update the number of observed accesses of each region.
+ * It should also return max number of observed accesses that made as a result
+ * of its update. The value will be used for regions adjustment threshold.
+ * @reset_aggregated should reset the access monitoring results that aggregated
+ * by @check_accesses.
+ * @get_scheme_score should return the priority score of a region for a scheme
+ * as an integer in [0, &DAMOS_MAX_SCORE].
+ * @apply_scheme is called from @kdamond when a region for user provided
+ * DAMON-based operation scheme is found. It should apply the scheme's action
+ * to the region and return bytes of the region that the action is successfully
+ * applied.
+ * @target_valid should check whether the target is still valid for the
+ * monitoring.
+ * @cleanup is called from @kdamond just before its termination.
+ */
+struct damon_operations {
+ enum damon_ops_id id;
+ void (*init)(struct damon_ctx *context);
+ void (*update)(struct damon_ctx *context);
+ void (*prepare_access_checks)(struct damon_ctx *context);
+ unsigned int (*check_accesses)(struct damon_ctx *context);
+ void (*reset_aggregated)(struct damon_ctx *context);
+ int (*get_scheme_score)(struct damon_ctx *context,
+ struct damon_target *t, struct damon_region *r,
+ struct damos *scheme);
+ unsigned long (*apply_scheme)(struct damon_ctx *context,
+ struct damon_target *t, struct damon_region *r,
+ struct damos *scheme);
+ bool (*target_valid)(struct damon_target *t);
+ void (*cleanup)(struct damon_ctx *context);
+};
+
+/**
+ * struct damon_callback - Monitoring events notification callbacks.
+ *
+ * @before_start: Called before starting the monitoring.
+ * @after_wmarks_check: Called after each schemes' watermarks check.
+ * @after_sampling: Called after each sampling.
+ * @after_aggregation: Called after each aggregation.
+ * @before_damos_apply: Called before applying DAMOS action.
+ * @before_terminate: Called before terminating the monitoring.
+ * @private: User private data.
+ *
+ * The monitoring thread (&damon_ctx.kdamond) calls @before_start and
+ * @before_terminate just before starting and finishing the monitoring,
+ * respectively. Therefore, those are good places for installing and cleaning
+ * @private.
+ *
+ * The monitoring thread calls @after_wmarks_check after each DAMON-based
+ * operation schemes' watermarks check. If users need to make changes to the
+ * attributes of the monitoring context while it's deactivated due to the
+ * watermarks, this is the good place to do.
+ *
+ * The monitoring thread calls @after_sampling and @after_aggregation for each
+ * of the sampling intervals and aggregation intervals, respectively.
+ * Therefore, users can safely access the monitoring results without additional
+ * protection. For the reason, users are recommended to use these callback for
+ * the accesses to the results.
+ *
+ * If any callback returns non-zero, monitoring stops.
+ */
+struct damon_callback {
+ void *private;
+
+ int (*before_start)(struct damon_ctx *context);
+ int (*after_wmarks_check)(struct damon_ctx *context);
+ int (*after_sampling)(struct damon_ctx *context);
+ int (*after_aggregation)(struct damon_ctx *context);
+ int (*before_damos_apply)(struct damon_ctx *context,
+ struct damon_target *target,
+ struct damon_region *region,
+ struct damos *scheme);
+ void (*before_terminate)(struct damon_ctx *context);
+};
+
+/**
+ * struct damon_attrs - Monitoring attributes for accuracy/overhead control.
+ *
+ * @sample_interval: The time between access samplings.
+ * @aggr_interval: The time between monitor results aggregations.
+ * @ops_update_interval: The time between monitoring operations updates.
+ * @min_nr_regions: The minimum number of adaptive monitoring
+ * regions.
+ * @max_nr_regions: The maximum number of adaptive monitoring
+ * regions.
+ *
+ * For each @sample_interval, DAMON checks whether each region is accessed or
+ * not. It aggregates and keeps the access information (number of accesses to
+ * each region) for @aggr_interval time. DAMON also checks whether the target
+ * memory regions need update (e.g., by ``mmap()`` calls from the application,
+ * in case of virtual memory monitoring) and applies the changes for each
+ * @ops_update_interval. All time intervals are in micro-seconds.
+ * Please refer to &struct damon_operations and &struct damon_callback for more
+ * detail.
+ */
+struct damon_attrs {
+ unsigned long sample_interval;
+ unsigned long aggr_interval;
+ unsigned long ops_update_interval;
+ unsigned long min_nr_regions;
+ unsigned long max_nr_regions;
+};
+
+/**
+ * struct damon_ctx - Represents a context for each monitoring. This is the
+ * main interface that allows users to set the attributes and get the results
+ * of the monitoring.
+ *
+ * @attrs: Monitoring attributes for accuracy/overhead control.
+ * @kdamond: Kernel thread who does the monitoring.
+ * @kdamond_lock: Mutex for the synchronizations with @kdamond.
+ *
+ * For each monitoring context, one kernel thread for the monitoring is
+ * created. The pointer to the thread is stored in @kdamond.
+ *
+ * Once started, the monitoring thread runs until explicitly required to be
+ * terminated or every monitoring target is invalid. The validity of the
+ * targets is checked via the &damon_operations.target_valid of @ops. The
+ * termination can also be explicitly requested by calling damon_stop().
+ * The thread sets @kdamond to NULL when it terminates. Therefore, users can
+ * know whether the monitoring is ongoing or terminated by reading @kdamond.
+ * Reads and writes to @kdamond from outside of the monitoring thread must
+ * be protected by @kdamond_lock.
+ *
+ * Note that the monitoring thread protects only @kdamond via @kdamond_lock.
+ * Accesses to other fields must be protected by themselves.
+ *
+ * @ops: Set of monitoring operations for given use cases.
+ * @callback: Set of callbacks for monitoring events notifications.
+ *
+ * @adaptive_targets: Head of monitoring targets (&damon_target) list.
+ * @schemes: Head of schemes (&damos) list.
+ */
+struct damon_ctx {
+ struct damon_attrs attrs;
+
+/* private: internal use only */
+ struct timespec64 last_aggregation;
+ struct timespec64 last_ops_update;
+
+/* public: */
+ struct task_struct *kdamond;
+ struct mutex kdamond_lock;
+
+ struct damon_operations ops;
+ struct damon_callback callback;
+
+ struct list_head adaptive_targets;
+ struct list_head schemes;
+};
+
+static inline struct damon_region *damon_next_region(struct damon_region *r)
+{
+ return container_of(r->list.next, struct damon_region, list);
+}
+
+static inline struct damon_region *damon_prev_region(struct damon_region *r)
+{
+ return container_of(r->list.prev, struct damon_region, list);
+}
+
+static inline struct damon_region *damon_last_region(struct damon_target *t)
+{
+ return list_last_entry(&t->regions_list, struct damon_region, list);
+}
+
+static inline struct damon_region *damon_first_region(struct damon_target *t)
+{
+ return list_first_entry(&t->regions_list, struct damon_region, list);
+}
+
+static inline unsigned long damon_sz_region(struct damon_region *r)
+{
+ return r->ar.end - r->ar.start;
+}
+
+
+#define damon_for_each_region(r, t) \
+ list_for_each_entry(r, &t->regions_list, list)
+
+#define damon_for_each_region_from(r, t) \
+ list_for_each_entry_from(r, &t->regions_list, list)
+
+#define damon_for_each_region_safe(r, next, t) \
+ list_for_each_entry_safe(r, next, &t->regions_list, list)
+
+#define damon_for_each_target(t, ctx) \
+ list_for_each_entry(t, &(ctx)->adaptive_targets, list)
+
+#define damon_for_each_target_safe(t, next, ctx) \
+ list_for_each_entry_safe(t, next, &(ctx)->adaptive_targets, list)
+
+#define damon_for_each_scheme(s, ctx) \
+ list_for_each_entry(s, &(ctx)->schemes, list)
+
+#define damon_for_each_scheme_safe(s, next, ctx) \
+ list_for_each_entry_safe(s, next, &(ctx)->schemes, list)
+
+#define damos_for_each_filter(f, scheme) \
+ list_for_each_entry(f, &(scheme)->filters, list)
+
+#define damos_for_each_filter_safe(f, next, scheme) \
+ list_for_each_entry_safe(f, next, &(scheme)->filters, list)
+
+#ifdef CONFIG_DAMON
+
+struct damon_region *damon_new_region(unsigned long start, unsigned long end);
+
+/*
+ * Add a region between two other regions
+ */
+static inline void damon_insert_region(struct damon_region *r,
+ struct damon_region *prev, struct damon_region *next,
+ struct damon_target *t)
+{
+ __list_add(&r->list, &prev->list, &next->list);
+ t->nr_regions++;
+}
+
+void damon_add_region(struct damon_region *r, struct damon_target *t);
+void damon_destroy_region(struct damon_region *r, struct damon_target *t);
+int damon_set_regions(struct damon_target *t, struct damon_addr_range *ranges,
+ unsigned int nr_ranges);
+
+struct damos_filter *damos_new_filter(enum damos_filter_type type,
+ bool matching);
+void damos_add_filter(struct damos *s, struct damos_filter *f);
+void damos_destroy_filter(struct damos_filter *f);
+
+struct damos *damon_new_scheme(struct damos_access_pattern *pattern,
+ enum damos_action action, struct damos_quota *quota,
+ struct damos_watermarks *wmarks);
+void damon_add_scheme(struct damon_ctx *ctx, struct damos *s);
+void damon_destroy_scheme(struct damos *s);
+
+struct damon_target *damon_new_target(void);
+void damon_add_target(struct damon_ctx *ctx, struct damon_target *t);
+bool damon_targets_empty(struct damon_ctx *ctx);
+void damon_free_target(struct damon_target *t);
+void damon_destroy_target(struct damon_target *t);
+unsigned int damon_nr_regions(struct damon_target *t);
+
+struct damon_ctx *damon_new_ctx(void);
+void damon_destroy_ctx(struct damon_ctx *ctx);
+int damon_set_attrs(struct damon_ctx *ctx, struct damon_attrs *attrs);
+void damon_set_schemes(struct damon_ctx *ctx,
+ struct damos **schemes, ssize_t nr_schemes);
+int damon_nr_running_ctxs(void);
+bool damon_is_registered_ops(enum damon_ops_id id);
+int damon_register_ops(struct damon_operations *ops);
+int damon_select_ops(struct damon_ctx *ctx, enum damon_ops_id id);
+
+static inline bool damon_target_has_pid(const struct damon_ctx *ctx)
+{
+ return ctx->ops.id == DAMON_OPS_VADDR || ctx->ops.id == DAMON_OPS_FVADDR;
+}
+
+
+int damon_start(struct damon_ctx **ctxs, int nr_ctxs, bool exclusive);
+int damon_stop(struct damon_ctx **ctxs, int nr_ctxs);
+
+int damon_set_region_biggest_system_ram_default(struct damon_target *t,
+ unsigned long *start, unsigned long *end);
+
+#endif /* CONFIG_DAMON */
+
+#endif /* _DAMON_H */
diff --git a/include/linux/dasd_mod.h b/include/linux/dasd_mod.h
new file mode 100644
index 000000000000..14e6cf8c6267
--- /dev/null
+++ b/include/linux/dasd_mod.h
@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef DASD_MOD_H
+#define DASD_MOD_H
+
+#include <asm/dasd.h>
+
+struct gendisk;
+
+extern int dasd_biodasdinfo(struct gendisk *disk, dasd_information2_t *info);
+
+#endif
diff --git a/include/linux/davinci_emac.h b/include/linux/davinci_emac.h
index 05b97144d342..28e6cf1356da 100644
--- a/include/linux/davinci_emac.h
+++ b/include/linux/davinci_emac.h
@@ -46,5 +46,4 @@ enum {
EMAC_VERSION_2, /* DM646x */
};
-void davinci_get_mac_addr(struct nvmem_device *nvmem, void *context);
#endif
diff --git a/include/linux/dax.h b/include/linux/dax.h
index 794811875732..bf6258472e49 100644
--- a/include/linux/dax.h
+++ b/include/linux/dax.h
@@ -1,13 +1,24 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_DAX_H
#define _LINUX_DAX_H
#include <linux/fs.h>
#include <linux/mm.h>
#include <linux/radix-tree.h>
-#include <asm/pgtable.h>
-struct iomap_ops;
+typedef unsigned long dax_entry_t;
+
struct dax_device;
+struct gendisk;
+struct iomap_ops;
+struct iomap_iter;
+struct iomap;
+
+enum dax_access_mode {
+ DAX_ACCESS,
+ DAX_RECOVERY_WRITE,
+};
+
struct dax_operations {
/*
* direct_access: translate a device-relative
@@ -15,151 +26,250 @@ struct dax_operations {
* number of pages available for DAX at that pfn.
*/
long (*direct_access)(struct dax_device *, pgoff_t, long,
- void **, pfn_t *);
- /* copy_from_iter: required operation for fs-dax direct-i/o */
- size_t (*copy_from_iter)(struct dax_device *, pgoff_t, void *, size_t,
- struct iov_iter *);
- /* flush: optional driver-specific cache management after writes */
- void (*flush)(struct dax_device *, pgoff_t, void *, size_t);
+ enum dax_access_mode, void **, pfn_t *);
+ /*
+ * Validate whether this device is usable as an fsdax backing
+ * device.
+ */
+ bool (*dax_supported)(struct dax_device *, struct block_device *, int,
+ sector_t, sector_t);
+ /* zero_page_range: required operation. Zero page range */
+ int (*zero_page_range)(struct dax_device *, pgoff_t, size_t);
+ /*
+ * recovery_write: recover a poisoned range by DAX device driver
+ * capable of clearing poison.
+ */
+ size_t (*recovery_write)(struct dax_device *dax_dev, pgoff_t pgoff,
+ void *addr, size_t bytes, struct iov_iter *iter);
};
-extern struct attribute_group dax_attribute_group;
+struct dax_holder_operations {
+ /*
+ * notify_failure - notify memory failure into inner holder device
+ * @dax_dev: the dax device which contains the holder
+ * @offset: offset on this dax device where memory failure occurs
+ * @len: length of this memory failure event
+ * @flags: action flags for memory failure handler
+ */
+ int (*notify_failure)(struct dax_device *dax_dev, u64 offset,
+ u64 len, int mf_flags);
+};
#if IS_ENABLED(CONFIG_DAX)
-struct dax_device *dax_get_by_host(const char *host);
+struct dax_device *alloc_dax(void *private, const struct dax_operations *ops);
+void *dax_holder(struct dax_device *dax_dev);
void put_dax(struct dax_device *dax_dev);
+void kill_dax(struct dax_device *dax_dev);
+void dax_write_cache(struct dax_device *dax_dev, bool wc);
+bool dax_write_cache_enabled(struct dax_device *dax_dev);
+bool dax_synchronous(struct dax_device *dax_dev);
+void set_dax_synchronous(struct dax_device *dax_dev);
+size_t dax_recovery_write(struct dax_device *dax_dev, pgoff_t pgoff,
+ void *addr, size_t bytes, struct iov_iter *i);
+/*
+ * Check if given mapping is supported by the file / underlying device.
+ */
+static inline bool daxdev_mapping_supported(struct vm_area_struct *vma,
+ struct dax_device *dax_dev)
+{
+ if (!(vma->vm_flags & VM_SYNC))
+ return true;
+ if (!IS_DAX(file_inode(vma->vm_file)))
+ return false;
+ return dax_synchronous(dax_dev);
+}
#else
-static inline struct dax_device *dax_get_by_host(const char *host)
+static inline void *dax_holder(struct dax_device *dax_dev)
{
return NULL;
}
-
+static inline struct dax_device *alloc_dax(void *private,
+ const struct dax_operations *ops)
+{
+ /*
+ * Callers should check IS_ENABLED(CONFIG_DAX) to know if this
+ * NULL is an error or expected.
+ */
+ return NULL;
+}
static inline void put_dax(struct dax_device *dax_dev)
{
}
+static inline void kill_dax(struct dax_device *dax_dev)
+{
+}
+static inline void dax_write_cache(struct dax_device *dax_dev, bool wc)
+{
+}
+static inline bool dax_write_cache_enabled(struct dax_device *dax_dev)
+{
+ return false;
+}
+static inline bool dax_synchronous(struct dax_device *dax_dev)
+{
+ return true;
+}
+static inline void set_dax_synchronous(struct dax_device *dax_dev)
+{
+}
+static inline bool daxdev_mapping_supported(struct vm_area_struct *vma,
+ struct dax_device *dax_dev)
+{
+ return !(vma->vm_flags & VM_SYNC);
+}
+static inline size_t dax_recovery_write(struct dax_device *dax_dev,
+ pgoff_t pgoff, void *addr, size_t bytes, struct iov_iter *i)
+{
+ return 0;
+}
#endif
-int bdev_dax_pgoff(struct block_device *, sector_t, size_t, pgoff_t *pgoff);
-#if IS_ENABLED(CONFIG_FS_DAX)
-int __bdev_dax_supported(struct super_block *sb, int blocksize);
-static inline int bdev_dax_supported(struct super_block *sb, int blocksize)
+void set_dax_nocache(struct dax_device *dax_dev);
+void set_dax_nomc(struct dax_device *dax_dev);
+
+struct writeback_control;
+#if defined(CONFIG_BLOCK) && defined(CONFIG_FS_DAX)
+int dax_add_host(struct dax_device *dax_dev, struct gendisk *disk);
+void dax_remove_host(struct gendisk *disk);
+struct dax_device *fs_dax_get_by_bdev(struct block_device *bdev, u64 *start_off,
+ void *holder, const struct dax_holder_operations *ops);
+void fs_put_dax(struct dax_device *dax_dev, void *holder);
+#else
+static inline int dax_add_host(struct dax_device *dax_dev, struct gendisk *disk)
{
- return __bdev_dax_supported(sb, blocksize);
+ return 0;
}
+static inline void dax_remove_host(struct gendisk *disk)
+{
+}
+static inline struct dax_device *fs_dax_get_by_bdev(struct block_device *bdev,
+ u64 *start_off, void *holder,
+ const struct dax_holder_operations *ops)
+{
+ return NULL;
+}
+static inline void fs_put_dax(struct dax_device *dax_dev, void *holder)
+{
+}
+#endif /* CONFIG_BLOCK && CONFIG_FS_DAX */
+
+#if IS_ENABLED(CONFIG_FS_DAX)
+int dax_writeback_mapping_range(struct address_space *mapping,
+ struct dax_device *dax_dev, struct writeback_control *wbc);
-static inline struct dax_device *fs_dax_get_by_host(const char *host)
+struct page *dax_layout_busy_page(struct address_space *mapping);
+struct page *dax_layout_busy_page_range(struct address_space *mapping, loff_t start, loff_t end);
+dax_entry_t dax_lock_page(struct page *page);
+void dax_unlock_page(struct page *page, dax_entry_t cookie);
+dax_entry_t dax_lock_mapping_entry(struct address_space *mapping,
+ unsigned long index, struct page **page);
+void dax_unlock_mapping_entry(struct address_space *mapping,
+ unsigned long index, dax_entry_t cookie);
+#else
+static inline struct page *dax_layout_busy_page(struct address_space *mapping)
{
- return dax_get_by_host(host);
+ return NULL;
}
-static inline void fs_put_dax(struct dax_device *dax_dev)
+static inline struct page *dax_layout_busy_page_range(struct address_space *mapping, pgoff_t start, pgoff_t nr_pages)
{
- put_dax(dax_dev);
+ return NULL;
}
-#else
-static inline int bdev_dax_supported(struct super_block *sb, int blocksize)
+static inline int dax_writeback_mapping_range(struct address_space *mapping,
+ struct dax_device *dax_dev, struct writeback_control *wbc)
{
return -EOPNOTSUPP;
}
-static inline struct dax_device *fs_dax_get_by_host(const char *host)
+static inline dax_entry_t dax_lock_page(struct page *page)
{
- return NULL;
+ if (IS_DAX(page->mapping->host))
+ return ~0UL;
+ return 0;
+}
+
+static inline void dax_unlock_page(struct page *page, dax_entry_t cookie)
+{
+}
+
+static inline dax_entry_t dax_lock_mapping_entry(struct address_space *mapping,
+ unsigned long index, struct page **page)
+{
+ return 0;
}
-static inline void fs_put_dax(struct dax_device *dax_dev)
+static inline void dax_unlock_mapping_entry(struct address_space *mapping,
+ unsigned long index, dax_entry_t cookie)
{
}
#endif
+int dax_file_unshare(struct inode *inode, loff_t pos, loff_t len,
+ const struct iomap_ops *ops);
+int dax_zero_range(struct inode *inode, loff_t pos, loff_t len, bool *did_zero,
+ const struct iomap_ops *ops);
+int dax_truncate_page(struct inode *inode, loff_t pos, bool *did_zero,
+ const struct iomap_ops *ops);
+
+#if IS_ENABLED(CONFIG_DAX)
int dax_read_lock(void);
void dax_read_unlock(int id);
-struct dax_device *alloc_dax(void *private, const char *host,
- const struct dax_operations *ops);
-bool dax_alive(struct dax_device *dax_dev);
-void kill_dax(struct dax_device *dax_dev);
-void *dax_get_private(struct dax_device *dax_dev);
-long dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff, long nr_pages,
- void **kaddr, pfn_t *pfn);
-size_t dax_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff, void *addr,
- size_t bytes, struct iov_iter *i);
-void dax_flush(struct dax_device *dax_dev, pgoff_t pgoff, void *addr,
- size_t size);
-void dax_write_cache(struct dax_device *dax_dev, bool wc);
-
-/*
- * We use lowest available bit in exceptional entry for locking, one bit for
- * the entry size (PMD) and two more to tell us if the entry is a huge zero
- * page (HZP) or an empty entry that is just used for locking. In total four
- * special bits.
- *
- * If the PMD bit isn't set the entry has size PAGE_SIZE, and if the HZP and
- * EMPTY bits aren't set the entry is a normal DAX entry with a filesystem
- * block allocation.
- */
-#define RADIX_DAX_SHIFT (RADIX_TREE_EXCEPTIONAL_SHIFT + 4)
-#define RADIX_DAX_ENTRY_LOCK (1 << RADIX_TREE_EXCEPTIONAL_SHIFT)
-#define RADIX_DAX_PMD (1 << (RADIX_TREE_EXCEPTIONAL_SHIFT + 1))
-#define RADIX_DAX_HZP (1 << (RADIX_TREE_EXCEPTIONAL_SHIFT + 2))
-#define RADIX_DAX_EMPTY (1 << (RADIX_TREE_EXCEPTIONAL_SHIFT + 3))
-
-static inline unsigned long dax_radix_sector(void *entry)
+#else
+static inline int dax_read_lock(void)
{
- return (unsigned long)entry >> RADIX_DAX_SHIFT;
+ return 0;
}
-static inline void *dax_radix_locked_entry(sector_t sector, unsigned long flags)
+static inline void dax_read_unlock(int id)
{
- return (void *)(RADIX_TREE_EXCEPTIONAL_ENTRY | flags |
- ((unsigned long)sector << RADIX_DAX_SHIFT) |
- RADIX_DAX_ENTRY_LOCK);
}
+#endif /* CONFIG_DAX */
+bool dax_alive(struct dax_device *dax_dev);
+void *dax_get_private(struct dax_device *dax_dev);
+long dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff, long nr_pages,
+ enum dax_access_mode mode, void **kaddr, pfn_t *pfn);
+size_t dax_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff, void *addr,
+ size_t bytes, struct iov_iter *i);
+size_t dax_copy_to_iter(struct dax_device *dax_dev, pgoff_t pgoff, void *addr,
+ size_t bytes, struct iov_iter *i);
+int dax_zero_page_range(struct dax_device *dax_dev, pgoff_t pgoff,
+ size_t nr_pages);
+int dax_holder_notify_failure(struct dax_device *dax_dev, u64 off, u64 len,
+ int mf_flags);
+void dax_flush(struct dax_device *dax_dev, void *addr, size_t size);
ssize_t dax_iomap_rw(struct kiocb *iocb, struct iov_iter *iter,
const struct iomap_ops *ops);
-int dax_iomap_fault(struct vm_fault *vmf, enum page_entry_size pe_size,
- const struct iomap_ops *ops);
+vm_fault_t dax_iomap_fault(struct vm_fault *vmf, enum page_entry_size pe_size,
+ pfn_t *pfnp, int *errp, const struct iomap_ops *ops);
+vm_fault_t dax_finish_sync_fault(struct vm_fault *vmf,
+ enum page_entry_size pe_size, pfn_t pfn);
int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index);
int dax_invalidate_mapping_entry_sync(struct address_space *mapping,
pgoff_t index);
-void dax_wake_mapping_entry_waiter(struct address_space *mapping,
- pgoff_t index, void *entry, bool wake_all);
-
-#ifdef CONFIG_FS_DAX
-int __dax_zero_page_range(struct block_device *bdev,
- struct dax_device *dax_dev, sector_t sector,
- unsigned int offset, unsigned int length);
-#else
-static inline int __dax_zero_page_range(struct block_device *bdev,
- struct dax_device *dax_dev, sector_t sector,
- unsigned int offset, unsigned int length)
+int dax_dedupe_file_range_compare(struct inode *src, loff_t srcoff,
+ struct inode *dest, loff_t destoff,
+ loff_t len, bool *is_same,
+ const struct iomap_ops *ops);
+int dax_remap_file_range_prep(struct file *file_in, loff_t pos_in,
+ struct file *file_out, loff_t pos_out,
+ loff_t *len, unsigned int remap_flags,
+ const struct iomap_ops *ops);
+static inline bool dax_mapping(struct address_space *mapping)
{
- return -ENXIO;
+ return mapping->host && IS_DAX(mapping->host);
}
-#endif
-#ifdef CONFIG_FS_DAX_PMD
-static inline unsigned int dax_radix_order(void *entry)
-{
- if ((unsigned long)entry & RADIX_DAX_PMD)
- return PMD_SHIFT - PAGE_SHIFT;
- return 0;
-}
+#ifdef CONFIG_DEV_DAX_HMEM_DEVICES
+void hmem_register_resource(int target_nid, struct resource *r);
#else
-static inline unsigned int dax_radix_order(void *entry)
+static inline void hmem_register_resource(int target_nid, struct resource *r)
{
- return 0;
}
#endif
-int dax_pfn_mkwrite(struct vm_fault *vmf);
-static inline bool dax_mapping(struct address_space *mapping)
-{
- return mapping->host && IS_DAX(mapping->host);
-}
-
-struct writeback_control;
-int dax_writeback_mapping_range(struct address_space *mapping,
- struct block_device *bdev, struct writeback_control *wbc);
+typedef int (*walk_hmem_fn)(struct device *dev, int target_nid,
+ const struct resource *res);
+int walk_hmem_resources(struct device *dev, walk_hmem_fn fn);
#endif
diff --git a/include/linux/dca.h b/include/linux/dca.h
index ad956c2e07a8..d6228e334f48 100644
--- a/include/linux/dca.h
+++ b/include/linux/dca.h
@@ -1,22 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Copyright(c) 2007 - 2009 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the Free
- * Software Foundation; either version 2 of the License, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc., 59
- * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called COPYING.
*/
#ifndef DCA_H
#define DCA_H
diff --git a/include/linux/dcache.h b/include/linux/dcache.h
index 3f3ff4ccdc3f..6b351e009f59 100644
--- a/include/linux/dcache.h
+++ b/include/linux/dcache.h
@@ -1,8 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __LINUX_DCACHE_H
#define __LINUX_DCACHE_H
#include <linux/atomic.h>
#include <linux/list.h>
+#include <linux/math.h>
#include <linux/rculist.h>
#include <linux/rculist_bl.h>
#include <linux/spinlock.h>
@@ -14,6 +16,7 @@
#include <linux/wait.h>
struct path;
+struct file;
struct vfsmount;
/*
@@ -55,19 +58,9 @@ struct qstr {
#define QSTR_INIT(n,l) { { { .len = l } }, .name = n }
-extern const char empty_string[];
extern const struct qstr empty_name;
-extern const char slash_string[];
extern const struct qstr slash_name;
-
-struct dentry_stat_t {
- long nr_dentry;
- long nr_unused;
- long age_limit; /* age in seconds */
- long want_pages; /* pages requested by system */
- long dummy[2];
-};
-extern struct dentry_stat_t dentry_stat;
+extern const struct qstr dotdot_name;
/*
* Try to keep struct dentry aligned on 64 byte cachelines (this will
@@ -89,7 +82,7 @@ extern struct dentry_stat_t dentry_stat;
struct dentry {
/* RCU lookup touched fields */
unsigned int d_flags; /* protected by d_lock */
- seqcount_t d_seq; /* per dentry seqlock */
+ seqcount_spinlock_t d_seq; /* per dentry seqlock */
struct hlist_bl_node d_hash; /* lookup hash list */
struct dentry *d_parent; /* parent directory */
struct qstr d_name;
@@ -118,7 +111,7 @@ struct dentry {
struct hlist_bl_node d_in_lookup_hash; /* only for in-lookup ones */
struct rcu_head d_rcu;
} d_u;
-};
+} __randomize_layout;
/*
* dentry->d_lock spinlock nesting subclasses:
@@ -146,15 +139,14 @@ struct dentry_operations {
char *(*d_dname)(struct dentry *, char *, int);
struct vfsmount *(*d_automount)(struct path *);
int (*d_manage)(const struct path *, bool);
- struct dentry *(*d_real)(struct dentry *, const struct inode *,
- unsigned int);
+ struct dentry *(*d_real)(struct dentry *, const struct inode *);
} ____cacheline_aligned;
/*
* Locking rules for dentry_operations callbacks are to be found in
- * Documentation/filesystems/Locking. Keep it updated!
+ * Documentation/filesystems/locking.rst. Keep it updated!
*
- * FUrther descriptions are found in Documentation/filesystems/vfs.txt.
+ * FUrther descriptions are found in Documentation/filesystems/vfs.rst.
* Keep it updated too!
*/
@@ -177,7 +169,8 @@ struct dentry_operations {
* typically using d_splice_alias. */
#define DCACHE_REFERENCED 0x00000040 /* Recently used, don't discard. */
-#define DCACHE_RCUACCESS 0x00000080 /* Entry has ever been RCU-visible */
+
+#define DCACHE_DONTCACHE 0x00000080 /* Purge from memory on final dput() */
#define DCACHE_CANT_MOUNT 0x00000100
#define DCACHE_GENOCIDE 0x00000200
@@ -213,11 +206,12 @@ struct dentry_operations {
#define DCACHE_MAY_FREE 0x00800000
#define DCACHE_FALLTHRU 0x01000000 /* Fall through to lower layer */
-#define DCACHE_ENCRYPTED_WITH_KEY 0x02000000 /* dir is encrypted with a valid key */
+#define DCACHE_NOKEY_NAME 0x02000000 /* Encrypted name encoded without key */
#define DCACHE_OP_REAL 0x04000000
#define DCACHE_PAR_LOOKUP 0x10000000 /* being looked up (with parent locked shared) */
#define DCACHE_DENTRY_CURSOR 0x20000000
+#define DCACHE_NORCU 0x40000000 /* No RCU delay for freeing */
extern seqlock_t rename_lock;
@@ -225,8 +219,9 @@ extern seqlock_t rename_lock;
* These are the low-level FS interfaces to the dcache..
*/
extern void d_instantiate(struct dentry *, struct inode *);
+extern void d_instantiate_new(struct dentry *, struct inode *);
extern struct dentry * d_instantiate_unique(struct dentry *, struct inode *);
-extern int d_instantiate_no_diralias(struct dentry *, struct inode *);
+extern struct dentry * d_instantiate_anon(struct dentry *, struct inode *);
extern void __d_drop(struct dentry *dentry);
extern void d_drop(struct dentry *dentry);
extern void d_delete(struct dentry *);
@@ -234,11 +229,13 @@ extern void d_set_d_op(struct dentry *dentry, const struct dentry_operations *op
/* allocate/de-allocate */
extern struct dentry * d_alloc(struct dentry *, const struct qstr *);
-extern struct dentry * d_alloc_pseudo(struct super_block *, const struct qstr *);
+extern struct dentry * d_alloc_anon(struct super_block *);
extern struct dentry * d_alloc_parallel(struct dentry *, const struct qstr *,
wait_queue_head_t *);
extern struct dentry * d_splice_alias(struct inode *, struct dentry *);
extern struct dentry * d_add_ci(struct dentry *, struct inode *, struct qstr *);
+extern bool d_same_name(const struct dentry *dentry, const struct dentry *parent,
+ const struct qstr *name);
extern struct dentry * d_exact_alias(struct dentry *, struct inode *);
extern struct dentry *d_find_any_alias(struct inode *inode);
extern struct dentry * d_obtain_alias(struct inode *);
@@ -254,11 +251,13 @@ extern struct dentry * d_make_root(struct inode *);
/* <clickety>-<click> the ramfs-type tree */
extern void d_genocide(struct dentry *);
-extern void d_tmpfile(struct dentry *, struct inode *);
+extern void d_tmpfile(struct file *, struct inode *);
extern struct dentry *d_find_alias(struct inode *);
extern void d_prune_aliases(struct inode *);
+extern struct dentry *d_find_alias_rcu(struct inode *);
+
/* test whether we have any submounts in a subdir tree */
extern int path_has_submounts(const struct path *);
@@ -269,8 +268,6 @@ extern void d_rehash(struct dentry *);
extern void d_add(struct dentry *, struct inode *);
-extern void dentry_update_name_case(struct dentry *, const struct qstr *);
-
/* used for rename() and baskets */
extern void d_move(struct dentry *, struct dentry *);
extern void d_exchange(struct dentry *, struct dentry *);
@@ -291,15 +288,14 @@ static inline unsigned d_count(const struct dentry *dentry)
/*
* helper function for dentry_operations.d_dname() members
*/
-extern __printf(4, 5)
-char *dynamic_dname(struct dentry *, char *, int, const char *, ...);
-extern char *simple_dname(struct dentry *, char *, int);
+extern __printf(3, 4)
+char *dynamic_dname(char *, int, const char *, ...);
extern char *__d_path(const struct path *, const struct path *, char *, int);
extern char *d_absolute_path(const struct path *, char *, int);
extern char *d_path(const struct path *, char *, int);
-extern char *dentry_path_raw(struct dentry *, char *, int);
-extern char *dentry_path(struct dentry *, char *, int);
+extern char *dentry_path_raw(const struct dentry *, char *, int);
+extern char *dentry_path(const struct dentry *, char *, int);
/* Allocation counts.. */
@@ -356,20 +352,17 @@ static inline void dont_mount(struct dentry *dentry)
spin_unlock(&dentry->d_lock);
}
-extern void __d_lookup_done(struct dentry *);
+extern void __d_lookup_unhash_wake(struct dentry *dentry);
-static inline int d_in_lookup(struct dentry *dentry)
+static inline int d_in_lookup(const struct dentry *dentry)
{
return dentry->d_flags & DCACHE_PAR_LOOKUP;
}
static inline void d_lookup_done(struct dentry *dentry)
{
- if (unlikely(d_in_lookup(dentry))) {
- spin_lock(&dentry->d_lock);
- __d_lookup_done(dentry);
- spin_unlock(&dentry->d_lock);
- }
+ if (unlikely(d_in_lookup(dentry)))
+ __d_lookup_unhash_wake(dentry);
}
extern void dput(struct dentry *);
@@ -443,6 +436,11 @@ static inline bool d_is_negative(const struct dentry *dentry)
return d_is_miss(dentry);
}
+static inline bool d_flags_negative(unsigned flags)
+{
+ return (flags & DCACHE_ENTRY_TYPE) == DCACHE_MISS_TYPE;
+}
+
static inline bool d_is_positive(const struct dentry *dentry)
{
return !d_is_negative(dentry);
@@ -486,7 +484,7 @@ static inline bool d_really_is_positive(const struct dentry *dentry)
return dentry->d_inode != NULL;
}
-static inline int simple_positive(struct dentry *dentry)
+static inline int simple_positive(const struct dentry *dentry)
{
return d_really_is_positive(dentry) && !d_unhashed(dentry);
}
@@ -519,7 +517,7 @@ static inline struct inode *d_inode(const struct dentry *dentry)
}
/**
- * d_inode_rcu - Get the actual inode of this dentry with ACCESS_ONCE()
+ * d_inode_rcu - Get the actual inode of this dentry with READ_ONCE()
* @dentry: The dentry to query
*
* This is the helper normal filesystems should use to get at their own inodes
@@ -527,7 +525,7 @@ static inline struct inode *d_inode(const struct dentry *dentry)
*/
static inline struct inode *d_inode_rcu(const struct dentry *dentry)
{
- return ACCESS_ONCE(dentry->d_inode);
+ return READ_ONCE(dentry->d_inode);
}
/**
@@ -566,19 +564,17 @@ static inline struct dentry *d_backing_dentry(struct dentry *upper)
* d_real - Return the real dentry
* @dentry: the dentry to query
* @inode: inode to select the dentry from multiple layers (can be NULL)
- * @flags: open flags to control copy-up behavior
*
* If dentry is on a union/overlay, then return the underlying, real dentry.
* Otherwise return the dentry itself.
*
- * See also: Documentation/filesystems/vfs.txt
+ * See also: Documentation/filesystems/vfs.rst
*/
static inline struct dentry *d_real(struct dentry *dentry,
- const struct inode *inode,
- unsigned int flags)
+ const struct inode *inode)
{
if (unlikely(dentry->d_flags & DCACHE_OP_REAL))
- return dentry->d_op->d_real(dentry, inode, flags);
+ return dentry->d_op->d_real(dentry, inode);
else
return dentry;
}
@@ -593,11 +589,11 @@ static inline struct dentry *d_real(struct dentry *dentry,
static inline struct inode *d_real_inode(const struct dentry *dentry)
{
/* This usage of d_real() results in const dentry */
- return d_backing_inode(d_real((struct dentry *) dentry, NULL, 0));
+ return d_backing_inode(d_real((struct dentry *) dentry, NULL));
}
struct name_snapshot {
- const unsigned char *name;
+ struct qstr name;
unsigned char inline_name[DNAME_INLINE_LEN];
};
void take_dentry_name_snapshot(struct name_snapshot *, struct dentry *);
diff --git a/include/linux/dccp.h b/include/linux/dccp.h
index 68449293c4b6..325af611909f 100644
--- a/include/linux/dccp.h
+++ b/include/linux/dccp.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_DCCP_H
#define _LINUX_DCCP_H
@@ -197,7 +198,7 @@ enum dccp_role {
struct dccp_service_list {
__u32 dccpsl_nr;
- __be32 dccpsl_list[0];
+ __be32 dccpsl_list[];
};
#define DCCP_SERVICE_INVALID_VALUE htonl((__u32)-1)
@@ -304,10 +305,8 @@ struct dccp_sock {
struct timer_list dccps_xmit_timer;
};
-static inline struct dccp_sock *dccp_sk(const struct sock *sk)
-{
- return (struct dccp_sock *)sk;
-}
+#define dccp_sk(ptr) container_of_const(ptr, struct dccp_sock, \
+ dccps_inet_connection.icsk_inet.sk)
static inline const char *dccp_role(const struct sock *sk)
{
diff --git a/include/linux/dcookies.h b/include/linux/dcookies.h
deleted file mode 100644
index 699b6c499c4f..000000000000
--- a/include/linux/dcookies.h
+++ /dev/null
@@ -1,68 +0,0 @@
-/*
- * dcookies.h
- *
- * Persistent cookie-path mappings
- *
- * Copyright 2002 John Levon <levon@movementarian.org>
- */
-
-#ifndef DCOOKIES_H
-#define DCOOKIES_H
-
-
-#ifdef CONFIG_PROFILING
-
-#include <linux/dcache.h>
-#include <linux/types.h>
-
-struct dcookie_user;
-struct path;
-
-/**
- * dcookie_register - register a user of dcookies
- *
- * Register as a dcookie user. Returns %NULL on failure.
- */
-struct dcookie_user * dcookie_register(void);
-
-/**
- * dcookie_unregister - unregister a user of dcookies
- *
- * Unregister as a dcookie user. This may invalidate
- * any dcookie values returned from get_dcookie().
- */
-void dcookie_unregister(struct dcookie_user * user);
-
-/**
- * get_dcookie - acquire a dcookie
- *
- * Convert the given dentry/vfsmount pair into
- * a cookie value.
- *
- * Returns -EINVAL if no living task has registered as a
- * dcookie user.
- *
- * Returns 0 on success, with *cookie filled in
- */
-int get_dcookie(const struct path *path, unsigned long *cookie);
-
-#else
-
-static inline struct dcookie_user * dcookie_register(void)
-{
- return NULL;
-}
-
-static inline void dcookie_unregister(struct dcookie_user * user)
-{
- return;
-}
-
-static inline int get_dcookie(const struct path *path, unsigned long *cookie)
-{
- return -ENOSYS;
-}
-
-#endif /* CONFIG_PROFILING */
-
-#endif /* DCOOKIES_H */
diff --git a/include/linux/debug_locks.h b/include/linux/debug_locks.h
index 822c1354f3a6..dbb409d77d4f 100644
--- a/include/linux/debug_locks.h
+++ b/include/linux/debug_locks.h
@@ -1,17 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __LINUX_DEBUG_LOCKING_H
#define __LINUX_DEBUG_LOCKING_H
-#include <linux/kernel.h>
#include <linux/atomic.h>
-#include <linux/bug.h>
+#include <linux/cache.h>
struct task_struct;
-extern int debug_locks;
-extern int debug_locks_silent;
+extern int debug_locks __read_mostly;
+extern int debug_locks_silent __read_mostly;
-static inline int __debug_locks_off(void)
+static __always_inline int __debug_locks_off(void)
{
return xchg(&debug_locks, 0);
}
@@ -26,8 +26,10 @@ extern int debug_locks_off(void);
int __ret = 0; \
\
if (!oops_in_progress && unlikely(c)) { \
+ instrumentation_begin(); \
if (debug_locks_off() && !debug_locks_silent) \
WARN(1, "DEBUG_LOCKS_WARN_ON(%s)", #c); \
+ instrumentation_end(); \
__ret = 1; \
} \
__ret; \
@@ -45,8 +47,6 @@ extern int debug_locks_off(void);
# define locking_selftest() do { } while (0)
#endif
-struct task_struct;
-
#ifdef CONFIG_LOCKDEP
extern void debug_show_all_locks(void);
extern void debug_show_held_locks(struct task_struct *task);
diff --git a/include/linux/debugfs.h b/include/linux/debugfs.h
index aa86e6d8c1aa..ea2d919fd9c7 100644
--- a/include/linux/debugfs.h
+++ b/include/linux/debugfs.h
@@ -1,13 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* debugfs.h - a tiny little debug file system
*
* Copyright (C) 2004 Greg Kroah-Hartman <greg@kroah.com>
* Copyright (C) 2004 IBM Inc.
*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- *
* debugfs is for people to use instead of /proc or /sys.
* See Documentation/filesystems/ for more details.
*/
@@ -23,7 +20,6 @@
struct device;
struct file_operations;
-struct srcu_struct;
struct debugfs_blob_wrapper {
void *data;
@@ -39,30 +35,17 @@ struct debugfs_regset32 {
const struct debugfs_reg32 *regs;
int nregs;
void __iomem *base;
+ struct device *dev; /* Optional device for Runtime PM */
};
-extern struct dentry *arch_debugfs_dir;
-
-extern struct srcu_struct debugfs_srcu;
+struct debugfs_u32_array {
+ u32 *array;
+ u32 n_elements;
+};
-/**
- * debugfs_real_fops - getter for the real file operation
- * @filp: a pointer to a struct file
- *
- * Must only be called under the protection established by
- * debugfs_use_file_start().
- */
-static inline const struct file_operations *debugfs_real_fops(const struct file *filp)
- __must_hold(&debugfs_srcu)
-{
- /*
- * Neither the pointer to the struct file_operations, nor its
- * contents ever change -- srcu_dereference() is not needed here.
- */
- return filp->f_path.dentry->d_fsdata;
-}
+extern struct dentry *arch_debugfs_dir;
-#define DEFINE_DEBUGFS_ATTRIBUTE(__fops, __get, __set, __fmt) \
+#define DEFINE_DEBUGFS_ATTRIBUTE_XSIGNED(__fops, __get, __set, __fmt, __is_signed) \
static int __fops ## _open(struct inode *inode, struct file *file) \
{ \
__simple_attr_check_format(__fmt, 0ull); \
@@ -73,10 +56,18 @@ static const struct file_operations __fops = { \
.open = __fops ## _open, \
.release = simple_attr_release, \
.read = debugfs_attr_read, \
- .write = debugfs_attr_write, \
+ .write = (__is_signed) ? debugfs_attr_write_signed : debugfs_attr_write, \
.llseek = no_llseek, \
}
+#define DEFINE_DEBUGFS_ATTRIBUTE(__fops, __get, __set, __fmt) \
+ DEFINE_DEBUGFS_ATTRIBUTE_XSIGNED(__fops, __get, __set, __fmt, false)
+
+#define DEFINE_DEBUGFS_ATTRIBUTE_SIGNED(__fops, __get, __set, __fmt) \
+ DEFINE_DEBUGFS_ATTRIBUTE_XSIGNED(__fops, __get, __set, __fmt, true)
+
+typedef struct vfsmount *(*debugfs_automount_t)(struct dentry *, void *);
+
#if defined(CONFIG_DEBUG_FS)
struct dentry *debugfs_lookup(const char *name, struct dentry *parent);
@@ -88,82 +79,86 @@ struct dentry *debugfs_create_file_unsafe(const char *name, umode_t mode,
struct dentry *parent, void *data,
const struct file_operations *fops);
-struct dentry *debugfs_create_file_size(const char *name, umode_t mode,
- struct dentry *parent, void *data,
- const struct file_operations *fops,
- loff_t file_size);
+void debugfs_create_file_size(const char *name, umode_t mode,
+ struct dentry *parent, void *data,
+ const struct file_operations *fops,
+ loff_t file_size);
struct dentry *debugfs_create_dir(const char *name, struct dentry *parent);
struct dentry *debugfs_create_symlink(const char *name, struct dentry *parent,
const char *dest);
-typedef struct vfsmount *(*debugfs_automount_t)(struct dentry *, void *);
struct dentry *debugfs_create_automount(const char *name,
struct dentry *parent,
debugfs_automount_t f,
void *data);
void debugfs_remove(struct dentry *dentry);
-void debugfs_remove_recursive(struct dentry *dentry);
+#define debugfs_remove_recursive debugfs_remove
-int debugfs_use_file_start(const struct dentry *dentry, int *srcu_idx)
- __acquires(&debugfs_srcu);
+void debugfs_lookup_and_remove(const char *name, struct dentry *parent);
-void debugfs_use_file_finish(int srcu_idx) __releases(&debugfs_srcu);
+const struct file_operations *debugfs_real_fops(const struct file *filp);
+
+int debugfs_file_get(struct dentry *dentry);
+void debugfs_file_put(struct dentry *dentry);
ssize_t debugfs_attr_read(struct file *file, char __user *buf,
size_t len, loff_t *ppos);
ssize_t debugfs_attr_write(struct file *file, const char __user *buf,
size_t len, loff_t *ppos);
+ssize_t debugfs_attr_write_signed(struct file *file, const char __user *buf,
+ size_t len, loff_t *ppos);
struct dentry *debugfs_rename(struct dentry *old_dir, struct dentry *old_dentry,
struct dentry *new_dir, const char *new_name);
-struct dentry *debugfs_create_u8(const char *name, umode_t mode,
- struct dentry *parent, u8 *value);
-struct dentry *debugfs_create_u16(const char *name, umode_t mode,
- struct dentry *parent, u16 *value);
-struct dentry *debugfs_create_u32(const char *name, umode_t mode,
- struct dentry *parent, u32 *value);
-struct dentry *debugfs_create_u64(const char *name, umode_t mode,
- struct dentry *parent, u64 *value);
-struct dentry *debugfs_create_ulong(const char *name, umode_t mode,
- struct dentry *parent, unsigned long *value);
-struct dentry *debugfs_create_x8(const char *name, umode_t mode,
- struct dentry *parent, u8 *value);
-struct dentry *debugfs_create_x16(const char *name, umode_t mode,
- struct dentry *parent, u16 *value);
-struct dentry *debugfs_create_x32(const char *name, umode_t mode,
- struct dentry *parent, u32 *value);
-struct dentry *debugfs_create_x64(const char *name, umode_t mode,
- struct dentry *parent, u64 *value);
-struct dentry *debugfs_create_size_t(const char *name, umode_t mode,
- struct dentry *parent, size_t *value);
-struct dentry *debugfs_create_atomic_t(const char *name, umode_t mode,
- struct dentry *parent, atomic_t *value);
-struct dentry *debugfs_create_bool(const char *name, umode_t mode,
- struct dentry *parent, bool *value);
+void debugfs_create_u8(const char *name, umode_t mode, struct dentry *parent,
+ u8 *value);
+void debugfs_create_u16(const char *name, umode_t mode, struct dentry *parent,
+ u16 *value);
+void debugfs_create_u32(const char *name, umode_t mode, struct dentry *parent,
+ u32 *value);
+void debugfs_create_u64(const char *name, umode_t mode, struct dentry *parent,
+ u64 *value);
+void debugfs_create_ulong(const char *name, umode_t mode, struct dentry *parent,
+ unsigned long *value);
+void debugfs_create_x8(const char *name, umode_t mode, struct dentry *parent,
+ u8 *value);
+void debugfs_create_x16(const char *name, umode_t mode, struct dentry *parent,
+ u16 *value);
+void debugfs_create_x32(const char *name, umode_t mode, struct dentry *parent,
+ u32 *value);
+void debugfs_create_x64(const char *name, umode_t mode, struct dentry *parent,
+ u64 *value);
+void debugfs_create_size_t(const char *name, umode_t mode,
+ struct dentry *parent, size_t *value);
+void debugfs_create_atomic_t(const char *name, umode_t mode,
+ struct dentry *parent, atomic_t *value);
+void debugfs_create_bool(const char *name, umode_t mode, struct dentry *parent,
+ bool *value);
+void debugfs_create_str(const char *name, umode_t mode,
+ struct dentry *parent, char **value);
struct dentry *debugfs_create_blob(const char *name, umode_t mode,
struct dentry *parent,
struct debugfs_blob_wrapper *blob);
-struct dentry *debugfs_create_regset32(const char *name, umode_t mode,
- struct dentry *parent,
- struct debugfs_regset32 *regset);
+void debugfs_create_regset32(const char *name, umode_t mode,
+ struct dentry *parent,
+ struct debugfs_regset32 *regset);
void debugfs_print_regs32(struct seq_file *s, const struct debugfs_reg32 *regs,
int nregs, void __iomem *base, char *prefix);
-struct dentry *debugfs_create_u32_array(const char *name, umode_t mode,
- struct dentry *parent,
- u32 *array, u32 elements);
+void debugfs_create_u32_array(const char *name, umode_t mode,
+ struct dentry *parent,
+ struct debugfs_u32_array *array);
-struct dentry *debugfs_create_devm_seqfile(struct device *dev, const char *name,
- struct dentry *parent,
- int (*read_fn)(struct seq_file *s,
- void *data));
+void debugfs_create_devm_seqfile(struct device *dev, const char *name,
+ struct dentry *parent,
+ int (*read_fn)(struct seq_file *s, void *data));
bool debugfs_initialized(void);
@@ -173,6 +168,9 @@ ssize_t debugfs_read_file_bool(struct file *file, char __user *user_buf,
ssize_t debugfs_write_file_bool(struct file *file, const char __user *user_buf,
size_t count, loff_t *ppos);
+ssize_t debugfs_read_file_str(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos);
+
#else
#include <linux/err.h>
@@ -196,14 +194,20 @@ static inline struct dentry *debugfs_create_file(const char *name, umode_t mode,
return ERR_PTR(-ENODEV);
}
-static inline struct dentry *debugfs_create_file_size(const char *name, umode_t mode,
- struct dentry *parent, void *data,
- const struct file_operations *fops,
- loff_t file_size)
+static inline struct dentry *debugfs_create_file_unsafe(const char *name,
+ umode_t mode, struct dentry *parent,
+ void *data,
+ const struct file_operations *fops)
{
return ERR_PTR(-ENODEV);
}
+static inline void debugfs_create_file_size(const char *name, umode_t mode,
+ struct dentry *parent, void *data,
+ const struct file_operations *fops,
+ loff_t file_size)
+{ }
+
static inline struct dentry *debugfs_create_dir(const char *name,
struct dentry *parent)
{
@@ -219,7 +223,7 @@ static inline struct dentry *debugfs_create_symlink(const char *name,
static inline struct dentry *debugfs_create_automount(const char *name,
struct dentry *parent,
- struct vfsmount *(*f)(void *),
+ debugfs_automount_t f,
void *data)
{
return ERR_PTR(-ENODEV);
@@ -231,15 +235,18 @@ static inline void debugfs_remove(struct dentry *dentry)
static inline void debugfs_remove_recursive(struct dentry *dentry)
{ }
-static inline int debugfs_use_file_start(const struct dentry *dentry,
- int *srcu_idx)
- __acquires(&debugfs_srcu)
+static inline void debugfs_lookup_and_remove(const char *name,
+ struct dentry *parent)
+{ }
+
+const struct file_operations *debugfs_real_fops(const struct file *filp);
+
+static inline int debugfs_file_get(struct dentry *dentry)
{
return 0;
}
-static inline void debugfs_use_file_finish(int srcu_idx)
- __releases(&debugfs_srcu)
+static inline void debugfs_file_put(struct dentry *dentry)
{ }
static inline ssize_t debugfs_attr_read(struct file *file, char __user *buf,
@@ -255,87 +262,63 @@ static inline ssize_t debugfs_attr_write(struct file *file,
return -ENODEV;
}
-static inline struct dentry *debugfs_rename(struct dentry *old_dir, struct dentry *old_dentry,
- struct dentry *new_dir, char *new_name)
+static inline ssize_t debugfs_attr_write_signed(struct file *file,
+ const char __user *buf,
+ size_t len, loff_t *ppos)
{
- return ERR_PTR(-ENODEV);
+ return -ENODEV;
}
-static inline struct dentry *debugfs_create_u8(const char *name, umode_t mode,
- struct dentry *parent,
- u8 *value)
+static inline struct dentry *debugfs_rename(struct dentry *old_dir, struct dentry *old_dentry,
+ struct dentry *new_dir, char *new_name)
{
return ERR_PTR(-ENODEV);
}
-static inline struct dentry *debugfs_create_u16(const char *name, umode_t mode,
- struct dentry *parent,
- u16 *value)
-{
- return ERR_PTR(-ENODEV);
-}
+static inline void debugfs_create_u8(const char *name, umode_t mode,
+ struct dentry *parent, u8 *value) { }
-static inline struct dentry *debugfs_create_u32(const char *name, umode_t mode,
- struct dentry *parent,
- u32 *value)
-{
- return ERR_PTR(-ENODEV);
-}
+static inline void debugfs_create_u16(const char *name, umode_t mode,
+ struct dentry *parent, u16 *value) { }
-static inline struct dentry *debugfs_create_u64(const char *name, umode_t mode,
- struct dentry *parent,
- u64 *value)
-{
- return ERR_PTR(-ENODEV);
-}
+static inline void debugfs_create_u32(const char *name, umode_t mode,
+ struct dentry *parent, u32 *value) { }
-static inline struct dentry *debugfs_create_x8(const char *name, umode_t mode,
- struct dentry *parent,
- u8 *value)
-{
- return ERR_PTR(-ENODEV);
-}
+static inline void debugfs_create_u64(const char *name, umode_t mode,
+ struct dentry *parent, u64 *value) { }
-static inline struct dentry *debugfs_create_x16(const char *name, umode_t mode,
- struct dentry *parent,
- u16 *value)
-{
- return ERR_PTR(-ENODEV);
-}
+static inline void debugfs_create_ulong(const char *name, umode_t mode,
+ struct dentry *parent,
+ unsigned long *value) { }
-static inline struct dentry *debugfs_create_x32(const char *name, umode_t mode,
- struct dentry *parent,
- u32 *value)
-{
- return ERR_PTR(-ENODEV);
-}
+static inline void debugfs_create_x8(const char *name, umode_t mode,
+ struct dentry *parent, u8 *value) { }
-static inline struct dentry *debugfs_create_x64(const char *name, umode_t mode,
- struct dentry *parent,
- u64 *value)
-{
- return ERR_PTR(-ENODEV);
-}
+static inline void debugfs_create_x16(const char *name, umode_t mode,
+ struct dentry *parent, u16 *value) { }
-static inline struct dentry *debugfs_create_size_t(const char *name, umode_t mode,
- struct dentry *parent,
- size_t *value)
-{
- return ERR_PTR(-ENODEV);
-}
+static inline void debugfs_create_x32(const char *name, umode_t mode,
+ struct dentry *parent, u32 *value) { }
-static inline struct dentry *debugfs_create_atomic_t(const char *name, umode_t mode,
- struct dentry *parent, atomic_t *value)
-{
- return ERR_PTR(-ENODEV);
-}
+static inline void debugfs_create_x64(const char *name, umode_t mode,
+ struct dentry *parent, u64 *value) { }
-static inline struct dentry *debugfs_create_bool(const char *name, umode_t mode,
- struct dentry *parent,
- bool *value)
-{
- return ERR_PTR(-ENODEV);
-}
+static inline void debugfs_create_size_t(const char *name, umode_t mode,
+ struct dentry *parent, size_t *value)
+{ }
+
+static inline void debugfs_create_atomic_t(const char *name, umode_t mode,
+ struct dentry *parent,
+ atomic_t *value)
+{ }
+
+static inline void debugfs_create_bool(const char *name, umode_t mode,
+ struct dentry *parent, bool *value) { }
+
+static inline void debugfs_create_str(const char *name, umode_t mode,
+ struct dentry *parent,
+ char **value)
+{ }
static inline struct dentry *debugfs_create_blob(const char *name, umode_t mode,
struct dentry *parent,
@@ -344,11 +327,10 @@ static inline struct dentry *debugfs_create_blob(const char *name, umode_t mode,
return ERR_PTR(-ENODEV);
}
-static inline struct dentry *debugfs_create_regset32(const char *name,
- umode_t mode, struct dentry *parent,
- struct debugfs_regset32 *regset)
+static inline void debugfs_create_regset32(const char *name, umode_t mode,
+ struct dentry *parent,
+ struct debugfs_regset32 *regset)
{
- return ERR_PTR(-ENODEV);
}
static inline void debugfs_print_regs32(struct seq_file *s, const struct debugfs_reg32 *regs,
@@ -361,20 +343,18 @@ static inline bool debugfs_initialized(void)
return false;
}
-static inline struct dentry *debugfs_create_u32_array(const char *name, umode_t mode,
- struct dentry *parent,
- u32 *array, u32 elements)
+static inline void debugfs_create_u32_array(const char *name, umode_t mode,
+ struct dentry *parent,
+ struct debugfs_u32_array *array)
{
- return ERR_PTR(-ENODEV);
}
-static inline struct dentry *debugfs_create_devm_seqfile(struct device *dev,
- const char *name,
- struct dentry *parent,
- int (*read_fn)(struct seq_file *s,
- void *data))
+static inline void debugfs_create_devm_seqfile(struct device *dev,
+ const char *name,
+ struct dentry *parent,
+ int (*read_fn)(struct seq_file *s,
+ void *data))
{
- return ERR_PTR(-ENODEV);
}
static inline ssize_t debugfs_read_file_bool(struct file *file,
@@ -391,6 +371,34 @@ static inline ssize_t debugfs_write_file_bool(struct file *file,
return -ENODEV;
}
+static inline ssize_t debugfs_read_file_str(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ return -ENODEV;
+}
+
#endif
+/**
+ * debugfs_create_xul - create a debugfs file that is used to read and write an
+ * unsigned long value, formatted in hexadecimal
+ * @name: a pointer to a string containing the name of the file to create.
+ * @mode: the permission that the file should have
+ * @parent: a pointer to the parent dentry for this file. This should be a
+ * directory dentry if set. If this parameter is %NULL, then the
+ * file will be created in the root of the debugfs filesystem.
+ * @value: a pointer to the variable that the file should read to and write
+ * from.
+ */
+static inline void debugfs_create_xul(const char *name, umode_t mode,
+ struct dentry *parent,
+ unsigned long *value)
+{
+ if (sizeof(*value) == sizeof(u32))
+ debugfs_create_x32(name, mode, parent, (u32 *)value);
+ else
+ debugfs_create_x64(name, mode, parent, (u64 *)value);
+}
+
#endif
diff --git a/include/linux/debugobjects.h b/include/linux/debugobjects.h
index d82bf1994485..32444686b6ff 100644
--- a/include/linux/debugobjects.h
+++ b/include/linux/debugobjects.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_DEBUGOBJECTS_H
#define _LINUX_DEBUGOBJECTS_H
@@ -17,7 +18,7 @@ enum debug_obj_state {
struct debug_obj_descr;
/**
- * struct debug_obj - representaion of an tracked object
+ * struct debug_obj - representation of an tracked object
* @node: hlist node to link the object into the tracker list
* @state: tracked object state
* @astate: current active state
@@ -29,7 +30,7 @@ struct debug_obj {
enum debug_obj_state state;
unsigned int astate;
void *object;
- struct debug_obj_descr *descr;
+ const struct debug_obj_descr *descr;
};
/**
@@ -63,14 +64,14 @@ struct debug_obj_descr {
};
#ifdef CONFIG_DEBUG_OBJECTS
-extern void debug_object_init (void *addr, struct debug_obj_descr *descr);
+extern void debug_object_init (void *addr, const struct debug_obj_descr *descr);
extern void
-debug_object_init_on_stack(void *addr, struct debug_obj_descr *descr);
-extern int debug_object_activate (void *addr, struct debug_obj_descr *descr);
-extern void debug_object_deactivate(void *addr, struct debug_obj_descr *descr);
-extern void debug_object_destroy (void *addr, struct debug_obj_descr *descr);
-extern void debug_object_free (void *addr, struct debug_obj_descr *descr);
-extern void debug_object_assert_init(void *addr, struct debug_obj_descr *descr);
+debug_object_init_on_stack(void *addr, const struct debug_obj_descr *descr);
+extern int debug_object_activate (void *addr, const struct debug_obj_descr *descr);
+extern void debug_object_deactivate(void *addr, const struct debug_obj_descr *descr);
+extern void debug_object_destroy (void *addr, const struct debug_obj_descr *descr);
+extern void debug_object_free (void *addr, const struct debug_obj_descr *descr);
+extern void debug_object_assert_init(void *addr, const struct debug_obj_descr *descr);
/*
* Active state:
@@ -78,26 +79,26 @@ extern void debug_object_assert_init(void *addr, struct debug_obj_descr *descr);
* - Must return to 0 before deactivation.
*/
extern void
-debug_object_active_state(void *addr, struct debug_obj_descr *descr,
+debug_object_active_state(void *addr, const struct debug_obj_descr *descr,
unsigned int expect, unsigned int next);
extern void debug_objects_early_init(void);
extern void debug_objects_mem_init(void);
#else
static inline void
-debug_object_init (void *addr, struct debug_obj_descr *descr) { }
+debug_object_init (void *addr, const struct debug_obj_descr *descr) { }
static inline void
-debug_object_init_on_stack(void *addr, struct debug_obj_descr *descr) { }
+debug_object_init_on_stack(void *addr, const struct debug_obj_descr *descr) { }
static inline int
-debug_object_activate (void *addr, struct debug_obj_descr *descr) { return 0; }
+debug_object_activate (void *addr, const struct debug_obj_descr *descr) { return 0; }
static inline void
-debug_object_deactivate(void *addr, struct debug_obj_descr *descr) { }
+debug_object_deactivate(void *addr, const struct debug_obj_descr *descr) { }
static inline void
-debug_object_destroy (void *addr, struct debug_obj_descr *descr) { }
+debug_object_destroy (void *addr, const struct debug_obj_descr *descr) { }
static inline void
-debug_object_free (void *addr, struct debug_obj_descr *descr) { }
+debug_object_free (void *addr, const struct debug_obj_descr *descr) { }
static inline void
-debug_object_assert_init(void *addr, struct debug_obj_descr *descr) { }
+debug_object_assert_init(void *addr, const struct debug_obj_descr *descr) { }
static inline void debug_objects_early_init(void) { }
static inline void debug_objects_mem_init(void) { }
diff --git a/include/linux/decompress/bunzip2.h b/include/linux/decompress/bunzip2.h
index 4d683df898e6..5860163942a4 100644
--- a/include/linux/decompress/bunzip2.h
+++ b/include/linux/decompress/bunzip2.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef DECOMPRESS_BUNZIP2_H
#define DECOMPRESS_BUNZIP2_H
diff --git a/include/linux/decompress/generic.h b/include/linux/decompress/generic.h
index 1fcfd64b5076..207d80138db5 100644
--- a/include/linux/decompress/generic.h
+++ b/include/linux/decompress/generic.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef DECOMPRESS_GENERIC_H
#define DECOMPRESS_GENERIC_H
diff --git a/include/linux/decompress/inflate.h b/include/linux/decompress/inflate.h
index e4f411fdbd24..b65f24e7d442 100644
--- a/include/linux/decompress/inflate.h
+++ b/include/linux/decompress/inflate.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef LINUX_DECOMPRESS_INFLATE_H
#define LINUX_DECOMPRESS_INFLATE_H
diff --git a/include/linux/decompress/mm.h b/include/linux/decompress/mm.h
index 7925bf0ee836..9192986b1a73 100644
--- a/include/linux/decompress/mm.h
+++ b/include/linux/decompress/mm.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* linux/compr_mm.h
*
@@ -24,13 +25,21 @@
#define STATIC_RW_DATA static
#endif
+/*
+ * When an architecture needs to share the malloc()/free() implementation
+ * between compilation units, it needs to have non-local visibility.
+ */
+#ifndef MALLOC_VISIBLE
+#define MALLOC_VISIBLE static
+#endif
+
/* A trivial malloc implementation, adapted from
* malloc by Hannu Savolainen 1993 and Matthias Urlichs 1994
*/
STATIC_RW_DATA unsigned long malloc_ptr;
STATIC_RW_DATA int malloc_count;
-static void *malloc(int size)
+MALLOC_VISIBLE void *malloc(int size)
{
void *p;
@@ -51,7 +60,7 @@ static void *malloc(int size)
return p;
}
-static void free(void *where)
+MALLOC_VISIBLE void free(void *where)
{
malloc_count--;
if (!malloc_count)
diff --git a/include/linux/decompress/unlz4.h b/include/linux/decompress/unlz4.h
index 3273c2f36496..5a235f605d5f 100644
--- a/include/linux/decompress/unlz4.h
+++ b/include/linux/decompress/unlz4.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef DECOMPRESS_UNLZ4_H
#define DECOMPRESS_UNLZ4_H
diff --git a/include/linux/decompress/unlzma.h b/include/linux/decompress/unlzma.h
index 8a891a193840..1c930f125182 100644
--- a/include/linux/decompress/unlzma.h
+++ b/include/linux/decompress/unlzma.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef DECOMPRESS_UNLZMA_H
#define DECOMPRESS_UNLZMA_H
diff --git a/include/linux/decompress/unlzo.h b/include/linux/decompress/unlzo.h
index af18f95d6570..550ae8783d1b 100644
--- a/include/linux/decompress/unlzo.h
+++ b/include/linux/decompress/unlzo.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef DECOMPRESS_UNLZO_H
#define DECOMPRESS_UNLZO_H
diff --git a/include/linux/decompress/unzstd.h b/include/linux/decompress/unzstd.h
new file mode 100644
index 000000000000..56d539ae880f
--- /dev/null
+++ b/include/linux/decompress/unzstd.h
@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef LINUX_DECOMPRESS_UNZSTD_H
+#define LINUX_DECOMPRESS_UNZSTD_H
+
+int unzstd(unsigned char *inbuf, long len,
+ long (*fill)(void*, unsigned long),
+ long (*flush)(void*, unsigned long),
+ unsigned char *output,
+ long *pos,
+ void (*error_fn)(char *x));
+#endif
diff --git a/include/linux/delay.h b/include/linux/delay.h
index 2ecb3c46b20a..039e7e0c7378 100644
--- a/include/linux/delay.h
+++ b/include/linux/delay.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_DELAY_H
#define _LINUX_DELAY_H
@@ -15,10 +16,11 @@
* 3. CPU clock rate changes.
*
* Please see this thread:
- * http://lists.openwall.net/linux-kernel/2011/01/09/56
+ * https://lists.openwall.net/linux-kernel/2011/01/09/56
*/
-#include <linux/kernel.h>
+#include <linux/math.h>
+#include <linux/sched.h>
extern unsigned long loops_per_jiffy;
@@ -54,13 +56,36 @@ static inline void ndelay(unsigned long x)
extern unsigned long lpj_fine;
void calibrate_delay(void);
+void __attribute__((weak)) calibration_delay_done(void);
void msleep(unsigned int msecs);
unsigned long msleep_interruptible(unsigned int msecs);
-void usleep_range(unsigned long min, unsigned long max);
+void usleep_range_state(unsigned long min, unsigned long max,
+ unsigned int state);
+
+static inline void usleep_range(unsigned long min, unsigned long max)
+{
+ usleep_range_state(min, max, TASK_UNINTERRUPTIBLE);
+}
+
+static inline void usleep_idle_range(unsigned long min, unsigned long max)
+{
+ usleep_range_state(min, max, TASK_IDLE);
+}
static inline void ssleep(unsigned int seconds)
{
msleep(seconds * 1000);
}
+/* see Documentation/timers/timers-howto.rst for the thresholds */
+static inline void fsleep(unsigned long usecs)
+{
+ if (usecs <= 10)
+ udelay(usecs);
+ else if (usecs <= 20000)
+ usleep_range(usecs, 2 * usecs);
+ else
+ msleep(DIV_ROUND_UP(usecs, 1000));
+}
+
#endif /* defined(_LINUX_DELAY_H) */
diff --git a/include/linux/delayacct.h b/include/linux/delayacct.h
index 4178d2493547..6639f48dac36 100644
--- a/include/linux/delayacct.h
+++ b/include/linux/delayacct.h
@@ -1,17 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/* delayacct.h - per-task delay accounting
*
* Copyright (C) Shailabh Nagar, IBM Corp. 2006
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
- * the GNU General Public License for more details.
- *
*/
#ifndef _LINUX_DELAYACCT_H
@@ -19,18 +9,9 @@
#include <uapi/linux/taskstats.h>
-/*
- * Per-task flags relevant to delay accounting
- * maintained privately to avoid exhausting similar flags in sched.h:PF_*
- * Used to set current->delays->flags
- */
-#define DELAYACCT_PF_SWAPIN 0x00000001 /* I am doing a swapin */
-#define DELAYACCT_PF_BLKIO 0x00000002 /* I am waiting on IO */
-
#ifdef CONFIG_TASK_DELAY_ACCT
struct task_delay_info {
- spinlock_t lock;
- unsigned int flags; /* Private per-task flags */
+ raw_spinlock_t lock;
/* For each stat XXX, add following, aligned appropriately
*
@@ -47,55 +28,63 @@ struct task_delay_info {
* associated with the operation is added to XXX_delay.
* XXX_delay contains the accumulated delay time in nanoseconds.
*/
- u64 blkio_start; /* Shared by blkio, swapin */
+ u64 blkio_start;
u64 blkio_delay; /* wait for sync block io completion */
- u64 swapin_delay; /* wait for swapin block io completion */
+ u64 swapin_start;
+ u64 swapin_delay; /* wait for swapin */
u32 blkio_count; /* total count of the number of sync block */
/* io operations performed */
- u32 swapin_count; /* total count of the number of swapin block */
- /* io operations performed */
+ u32 swapin_count; /* total count of swapin */
u64 freepages_start;
u64 freepages_delay; /* wait for memory reclaim */
+
+ u64 thrashing_start;
+ u64 thrashing_delay; /* wait for thrashing page */
+
+ u64 compact_start;
+ u64 compact_delay; /* wait for memory compact */
+
+ u64 wpcopy_start;
+ u64 wpcopy_delay; /* wait for write-protect copy */
+
+ u64 irq_delay; /* wait for IRQ/SOFTIRQ */
+
u32 freepages_count; /* total count of memory reclaim */
+ u32 thrashing_count; /* total count of thrash waits */
+ u32 compact_count; /* total count of memory compact */
+ u32 wpcopy_count; /* total count of write-protect copy */
+ u32 irq_count; /* total count of IRQ/SOFTIRQ */
};
#endif
#include <linux/sched.h>
#include <linux/slab.h>
+#include <linux/jump_label.h>
#ifdef CONFIG_TASK_DELAY_ACCT
+DECLARE_STATIC_KEY_FALSE(delayacct_key);
extern int delayacct_on; /* Delay accounting turned on/off */
extern struct kmem_cache *delayacct_cache;
extern void delayacct_init(void);
+
extern void __delayacct_tsk_init(struct task_struct *);
extern void __delayacct_tsk_exit(struct task_struct *);
extern void __delayacct_blkio_start(void);
-extern void __delayacct_blkio_end(void);
-extern int __delayacct_add_tsk(struct taskstats *, struct task_struct *);
+extern void __delayacct_blkio_end(struct task_struct *);
+extern int delayacct_add_tsk(struct taskstats *, struct task_struct *);
extern __u64 __delayacct_blkio_ticks(struct task_struct *);
extern void __delayacct_freepages_start(void);
extern void __delayacct_freepages_end(void);
-
-static inline int delayacct_is_task_waiting_on_io(struct task_struct *p)
-{
- if (p->delays)
- return (p->delays->flags & DELAYACCT_PF_BLKIO);
- else
- return 0;
-}
-
-static inline void delayacct_set_flag(int flag)
-{
- if (current->delays)
- current->delays->flags |= flag;
-}
-
-static inline void delayacct_clear_flag(int flag)
-{
- if (current->delays)
- current->delays->flags &= ~flag;
-}
+extern void __delayacct_thrashing_start(bool *in_thrashing);
+extern void __delayacct_thrashing_end(bool *in_thrashing);
+extern void __delayacct_swapin_start(void);
+extern void __delayacct_swapin_end(void);
+extern void __delayacct_compact_start(void);
+extern void __delayacct_compact_end(void);
+extern void __delayacct_wpcopy_start(void);
+extern void __delayacct_wpcopy_end(void);
+extern void __delayacct_irq(struct task_struct *task, u32 delta);
static inline void delayacct_tsk_init(struct task_struct *tsk)
{
@@ -117,24 +106,20 @@ static inline void delayacct_tsk_free(struct task_struct *tsk)
static inline void delayacct_blkio_start(void)
{
- delayacct_set_flag(DELAYACCT_PF_BLKIO);
+ if (!static_branch_unlikely(&delayacct_key))
+ return;
+
if (current->delays)
__delayacct_blkio_start();
}
-static inline void delayacct_blkio_end(void)
+static inline void delayacct_blkio_end(struct task_struct *p)
{
- if (current->delays)
- __delayacct_blkio_end();
- delayacct_clear_flag(DELAYACCT_PF_BLKIO);
-}
+ if (!static_branch_unlikely(&delayacct_key))
+ return;
-static inline int delayacct_add_tsk(struct taskstats *d,
- struct task_struct *tsk)
-{
- if (!delayacct_on || !tsk->delays)
- return 0;
- return __delayacct_add_tsk(d, tsk);
+ if (p->delays)
+ __delayacct_blkio_end(p);
}
static inline __u64 delayacct_blkio_ticks(struct task_struct *tsk)
@@ -146,21 +131,104 @@ static inline __u64 delayacct_blkio_ticks(struct task_struct *tsk)
static inline void delayacct_freepages_start(void)
{
+ if (!static_branch_unlikely(&delayacct_key))
+ return;
+
if (current->delays)
__delayacct_freepages_start();
}
static inline void delayacct_freepages_end(void)
{
+ if (!static_branch_unlikely(&delayacct_key))
+ return;
+
if (current->delays)
__delayacct_freepages_end();
}
+static inline void delayacct_thrashing_start(bool *in_thrashing)
+{
+ if (!static_branch_unlikely(&delayacct_key))
+ return;
+
+ if (current->delays)
+ __delayacct_thrashing_start(in_thrashing);
+}
+
+static inline void delayacct_thrashing_end(bool *in_thrashing)
+{
+ if (!static_branch_unlikely(&delayacct_key))
+ return;
+
+ if (current->delays)
+ __delayacct_thrashing_end(in_thrashing);
+}
+
+static inline void delayacct_swapin_start(void)
+{
+ if (!static_branch_unlikely(&delayacct_key))
+ return;
+
+ if (current->delays)
+ __delayacct_swapin_start();
+}
+
+static inline void delayacct_swapin_end(void)
+{
+ if (!static_branch_unlikely(&delayacct_key))
+ return;
+
+ if (current->delays)
+ __delayacct_swapin_end();
+}
+
+static inline void delayacct_compact_start(void)
+{
+ if (!static_branch_unlikely(&delayacct_key))
+ return;
+
+ if (current->delays)
+ __delayacct_compact_start();
+}
+
+static inline void delayacct_compact_end(void)
+{
+ if (!static_branch_unlikely(&delayacct_key))
+ return;
+
+ if (current->delays)
+ __delayacct_compact_end();
+}
+
+static inline void delayacct_wpcopy_start(void)
+{
+ if (!static_branch_unlikely(&delayacct_key))
+ return;
+
+ if (current->delays)
+ __delayacct_wpcopy_start();
+}
+
+static inline void delayacct_wpcopy_end(void)
+{
+ if (!static_branch_unlikely(&delayacct_key))
+ return;
+
+ if (current->delays)
+ __delayacct_wpcopy_end();
+}
+
+static inline void delayacct_irq(struct task_struct *task, u32 delta)
+{
+ if (!static_branch_unlikely(&delayacct_key))
+ return;
+
+ if (task->delays)
+ __delayacct_irq(task, delta);
+}
+
#else
-static inline void delayacct_set_flag(int flag)
-{}
-static inline void delayacct_clear_flag(int flag)
-{}
static inline void delayacct_init(void)
{}
static inline void delayacct_tsk_init(struct task_struct *tsk)
@@ -169,7 +237,7 @@ static inline void delayacct_tsk_free(struct task_struct *tsk)
{}
static inline void delayacct_blkio_start(void)
{}
-static inline void delayacct_blkio_end(void)
+static inline void delayacct_blkio_end(struct task_struct *p)
{}
static inline int delayacct_add_tsk(struct taskstats *d,
struct task_struct *tsk)
@@ -182,6 +250,24 @@ static inline void delayacct_freepages_start(void)
{}
static inline void delayacct_freepages_end(void)
{}
+static inline void delayacct_thrashing_start(bool *in_thrashing)
+{}
+static inline void delayacct_thrashing_end(bool *in_thrashing)
+{}
+static inline void delayacct_swapin_start(void)
+{}
+static inline void delayacct_swapin_end(void)
+{}
+static inline void delayacct_compact_start(void)
+{}
+static inline void delayacct_compact_end(void)
+{}
+static inline void delayacct_wpcopy_start(void)
+{}
+static inline void delayacct_wpcopy_end(void)
+{}
+static inline void delayacct_irq(struct task_struct *task, u32 delta)
+{}
#endif /* CONFIG_TASK_DELAY_ACCT */
diff --git a/include/linux/delayed_call.h b/include/linux/delayed_call.h
index f7fa76ae1a9b..a26c3b95b5cf 100644
--- a/include/linux/delayed_call.h
+++ b/include/linux/delayed_call.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _DELAYED_CALL_H
#define _DELAYED_CALL_H
diff --git a/include/linux/dell-led.h b/include/linux/dell-led.h
deleted file mode 100644
index 3f033c48071e..000000000000
--- a/include/linux/dell-led.h
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef __DELL_LED_H__
-#define __DELL_LED_H__
-
-int dell_micmute_led_set(int on);
-
-#endif
diff --git a/include/linux/dev_printk.h b/include/linux/dev_printk.h
new file mode 100644
index 000000000000..8904063d4c9f
--- /dev/null
+++ b/include/linux/dev_printk.h
@@ -0,0 +1,277 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * dev_printk.h - printk messages helpers for devices
+ *
+ * Copyright (c) 2001-2003 Patrick Mochel <mochel@osdl.org>
+ * Copyright (c) 2004-2009 Greg Kroah-Hartman <gregkh@suse.de>
+ * Copyright (c) 2008-2009 Novell Inc.
+ *
+ */
+
+#ifndef _DEVICE_PRINTK_H_
+#define _DEVICE_PRINTK_H_
+
+#include <linux/compiler.h>
+#include <linux/types.h>
+#include <linux/ratelimit.h>
+
+#ifndef dev_fmt
+#define dev_fmt(fmt) fmt
+#endif
+
+struct device;
+
+#define PRINTK_INFO_SUBSYSTEM_LEN 16
+#define PRINTK_INFO_DEVICE_LEN 48
+
+struct dev_printk_info {
+ char subsystem[PRINTK_INFO_SUBSYSTEM_LEN];
+ char device[PRINTK_INFO_DEVICE_LEN];
+};
+
+#ifdef CONFIG_PRINTK
+
+__printf(3, 0) __cold
+int dev_vprintk_emit(int level, const struct device *dev,
+ const char *fmt, va_list args);
+__printf(3, 4) __cold
+int dev_printk_emit(int level, const struct device *dev, const char *fmt, ...);
+
+__printf(3, 4) __cold
+void _dev_printk(const char *level, const struct device *dev,
+ const char *fmt, ...);
+__printf(2, 3) __cold
+void _dev_emerg(const struct device *dev, const char *fmt, ...);
+__printf(2, 3) __cold
+void _dev_alert(const struct device *dev, const char *fmt, ...);
+__printf(2, 3) __cold
+void _dev_crit(const struct device *dev, const char *fmt, ...);
+__printf(2, 3) __cold
+void _dev_err(const struct device *dev, const char *fmt, ...);
+__printf(2, 3) __cold
+void _dev_warn(const struct device *dev, const char *fmt, ...);
+__printf(2, 3) __cold
+void _dev_notice(const struct device *dev, const char *fmt, ...);
+__printf(2, 3) __cold
+void _dev_info(const struct device *dev, const char *fmt, ...);
+
+#else
+
+static inline __printf(3, 0)
+int dev_vprintk_emit(int level, const struct device *dev,
+ const char *fmt, va_list args)
+{ return 0; }
+static inline __printf(3, 4)
+int dev_printk_emit(int level, const struct device *dev, const char *fmt, ...)
+{ return 0; }
+
+static inline void __dev_printk(const char *level, const struct device *dev,
+ struct va_format *vaf)
+{}
+static inline __printf(3, 4)
+void _dev_printk(const char *level, const struct device *dev,
+ const char *fmt, ...)
+{}
+
+static inline __printf(2, 3)
+void _dev_emerg(const struct device *dev, const char *fmt, ...)
+{}
+static inline __printf(2, 3)
+void _dev_crit(const struct device *dev, const char *fmt, ...)
+{}
+static inline __printf(2, 3)
+void _dev_alert(const struct device *dev, const char *fmt, ...)
+{}
+static inline __printf(2, 3)
+void _dev_err(const struct device *dev, const char *fmt, ...)
+{}
+static inline __printf(2, 3)
+void _dev_warn(const struct device *dev, const char *fmt, ...)
+{}
+static inline __printf(2, 3)
+void _dev_notice(const struct device *dev, const char *fmt, ...)
+{}
+static inline __printf(2, 3)
+void _dev_info(const struct device *dev, const char *fmt, ...)
+{}
+
+#endif
+
+/*
+ * Need to take variadic arguments even though we don't use them, as dev_fmt()
+ * may only just have been expanded and may result in multiple arguments.
+ */
+#define dev_printk_index_emit(level, fmt, ...) \
+ printk_index_subsys_emit("%s %s: ", level, fmt)
+
+#define dev_printk_index_wrap(_p_func, level, dev, fmt, ...) \
+ ({ \
+ dev_printk_index_emit(level, fmt); \
+ _p_func(dev, fmt, ##__VA_ARGS__); \
+ })
+
+/*
+ * Some callsites directly call dev_printk rather than going through the
+ * dev_<level> infrastructure, so we need to emit here as well as inside those
+ * level-specific macros. Only one index entry will be produced, either way,
+ * since dev_printk's `fmt` isn't known at compile time if going through the
+ * dev_<level> macros.
+ *
+ * dev_fmt() isn't called for dev_printk when used directly, as it's used by
+ * the dev_<level> macros internally which already have dev_fmt() processed.
+ *
+ * We also can't use dev_printk_index_wrap directly, because we have a separate
+ * level to process.
+ */
+#define dev_printk(level, dev, fmt, ...) \
+ ({ \
+ dev_printk_index_emit(level, fmt); \
+ _dev_printk(level, dev, fmt, ##__VA_ARGS__); \
+ })
+
+/*
+ * #defines for all the dev_<level> macros to prefix with whatever
+ * possible use of #define dev_fmt(fmt) ...
+ */
+
+#define dev_emerg(dev, fmt, ...) \
+ dev_printk_index_wrap(_dev_emerg, KERN_EMERG, dev, dev_fmt(fmt), ##__VA_ARGS__)
+#define dev_crit(dev, fmt, ...) \
+ dev_printk_index_wrap(_dev_crit, KERN_CRIT, dev, dev_fmt(fmt), ##__VA_ARGS__)
+#define dev_alert(dev, fmt, ...) \
+ dev_printk_index_wrap(_dev_alert, KERN_ALERT, dev, dev_fmt(fmt), ##__VA_ARGS__)
+#define dev_err(dev, fmt, ...) \
+ dev_printk_index_wrap(_dev_err, KERN_ERR, dev, dev_fmt(fmt), ##__VA_ARGS__)
+#define dev_warn(dev, fmt, ...) \
+ dev_printk_index_wrap(_dev_warn, KERN_WARNING, dev, dev_fmt(fmt), ##__VA_ARGS__)
+#define dev_notice(dev, fmt, ...) \
+ dev_printk_index_wrap(_dev_notice, KERN_NOTICE, dev, dev_fmt(fmt), ##__VA_ARGS__)
+#define dev_info(dev, fmt, ...) \
+ dev_printk_index_wrap(_dev_info, KERN_INFO, dev, dev_fmt(fmt), ##__VA_ARGS__)
+
+#if defined(CONFIG_DYNAMIC_DEBUG) || \
+ (defined(CONFIG_DYNAMIC_DEBUG_CORE) && defined(DYNAMIC_DEBUG_MODULE))
+#define dev_dbg(dev, fmt, ...) \
+ dynamic_dev_dbg(dev, dev_fmt(fmt), ##__VA_ARGS__)
+#elif defined(DEBUG)
+#define dev_dbg(dev, fmt, ...) \
+ dev_printk(KERN_DEBUG, dev, dev_fmt(fmt), ##__VA_ARGS__)
+#else
+#define dev_dbg(dev, fmt, ...) \
+({ \
+ if (0) \
+ dev_printk(KERN_DEBUG, dev, dev_fmt(fmt), ##__VA_ARGS__); \
+})
+#endif
+
+#ifdef CONFIG_PRINTK
+#define dev_level_once(dev_level, dev, fmt, ...) \
+do { \
+ static bool __print_once __read_mostly; \
+ \
+ if (!__print_once) { \
+ __print_once = true; \
+ dev_level(dev, fmt, ##__VA_ARGS__); \
+ } \
+} while (0)
+#else
+#define dev_level_once(dev_level, dev, fmt, ...) \
+do { \
+ if (0) \
+ dev_level(dev, fmt, ##__VA_ARGS__); \
+} while (0)
+#endif
+
+#define dev_emerg_once(dev, fmt, ...) \
+ dev_level_once(dev_emerg, dev, fmt, ##__VA_ARGS__)
+#define dev_alert_once(dev, fmt, ...) \
+ dev_level_once(dev_alert, dev, fmt, ##__VA_ARGS__)
+#define dev_crit_once(dev, fmt, ...) \
+ dev_level_once(dev_crit, dev, fmt, ##__VA_ARGS__)
+#define dev_err_once(dev, fmt, ...) \
+ dev_level_once(dev_err, dev, fmt, ##__VA_ARGS__)
+#define dev_warn_once(dev, fmt, ...) \
+ dev_level_once(dev_warn, dev, fmt, ##__VA_ARGS__)
+#define dev_notice_once(dev, fmt, ...) \
+ dev_level_once(dev_notice, dev, fmt, ##__VA_ARGS__)
+#define dev_info_once(dev, fmt, ...) \
+ dev_level_once(dev_info, dev, fmt, ##__VA_ARGS__)
+#define dev_dbg_once(dev, fmt, ...) \
+ dev_level_once(dev_dbg, dev, fmt, ##__VA_ARGS__)
+
+#define dev_level_ratelimited(dev_level, dev, fmt, ...) \
+do { \
+ static DEFINE_RATELIMIT_STATE(_rs, \
+ DEFAULT_RATELIMIT_INTERVAL, \
+ DEFAULT_RATELIMIT_BURST); \
+ if (__ratelimit(&_rs)) \
+ dev_level(dev, fmt, ##__VA_ARGS__); \
+} while (0)
+
+#define dev_emerg_ratelimited(dev, fmt, ...) \
+ dev_level_ratelimited(dev_emerg, dev, fmt, ##__VA_ARGS__)
+#define dev_alert_ratelimited(dev, fmt, ...) \
+ dev_level_ratelimited(dev_alert, dev, fmt, ##__VA_ARGS__)
+#define dev_crit_ratelimited(dev, fmt, ...) \
+ dev_level_ratelimited(dev_crit, dev, fmt, ##__VA_ARGS__)
+#define dev_err_ratelimited(dev, fmt, ...) \
+ dev_level_ratelimited(dev_err, dev, fmt, ##__VA_ARGS__)
+#define dev_warn_ratelimited(dev, fmt, ...) \
+ dev_level_ratelimited(dev_warn, dev, fmt, ##__VA_ARGS__)
+#define dev_notice_ratelimited(dev, fmt, ...) \
+ dev_level_ratelimited(dev_notice, dev, fmt, ##__VA_ARGS__)
+#define dev_info_ratelimited(dev, fmt, ...) \
+ dev_level_ratelimited(dev_info, dev, fmt, ##__VA_ARGS__)
+#if defined(CONFIG_DYNAMIC_DEBUG) || \
+ (defined(CONFIG_DYNAMIC_DEBUG_CORE) && defined(DYNAMIC_DEBUG_MODULE))
+/* descriptor check is first to prevent flooding with "callbacks suppressed" */
+#define dev_dbg_ratelimited(dev, fmt, ...) \
+do { \
+ static DEFINE_RATELIMIT_STATE(_rs, \
+ DEFAULT_RATELIMIT_INTERVAL, \
+ DEFAULT_RATELIMIT_BURST); \
+ DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, fmt); \
+ if (DYNAMIC_DEBUG_BRANCH(descriptor) && \
+ __ratelimit(&_rs)) \
+ __dynamic_dev_dbg(&descriptor, dev, dev_fmt(fmt), \
+ ##__VA_ARGS__); \
+} while (0)
+#elif defined(DEBUG)
+#define dev_dbg_ratelimited(dev, fmt, ...) \
+do { \
+ static DEFINE_RATELIMIT_STATE(_rs, \
+ DEFAULT_RATELIMIT_INTERVAL, \
+ DEFAULT_RATELIMIT_BURST); \
+ if (__ratelimit(&_rs)) \
+ dev_printk(KERN_DEBUG, dev, dev_fmt(fmt), ##__VA_ARGS__); \
+} while (0)
+#else
+#define dev_dbg_ratelimited(dev, fmt, ...) \
+do { \
+ if (0) \
+ dev_printk(KERN_DEBUG, dev, dev_fmt(fmt), ##__VA_ARGS__); \
+} while (0)
+#endif
+
+#ifdef VERBOSE_DEBUG
+#define dev_vdbg dev_dbg
+#else
+#define dev_vdbg(dev, fmt, ...) \
+({ \
+ if (0) \
+ dev_printk(KERN_DEBUG, dev, dev_fmt(fmt), ##__VA_ARGS__); \
+})
+#endif
+
+/*
+ * dev_WARN*() acts like dev_printk(), but with the key difference of
+ * using WARN/WARN_ONCE to include file/line information and a backtrace.
+ */
+#define dev_WARN(dev, format, arg...) \
+ WARN(1, "%s %s: " format, dev_driver_string(dev), dev_name(dev), ## arg)
+
+#define dev_WARN_ONCE(dev, condition, format, arg...) \
+ WARN_ONCE(condition, "%s %s: " format, \
+ dev_driver_string(dev), dev_name(dev), ## arg)
+
+#endif /* _DEVICE_PRINTK_H_ */
diff --git a/include/linux/devcoredump.h b/include/linux/devcoredump.h
index 269521f143ac..c008169ed2c6 100644
--- a/include/linux/devcoredump.h
+++ b/include/linux/devcoredump.h
@@ -1,21 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * This file is provided under the GPLv2 license.
- *
- * GPL LICENSE SUMMARY
- *
* Copyright(c) 2015 Intel Deutschland GmbH
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * The full GNU General Public License is included in this distribution
- * in the file called COPYING.
*/
#ifndef __DEVCOREDUMP_H
#define __DEVCOREDUMP_H
diff --git a/include/linux/devfreq-event.h b/include/linux/devfreq-event.h
index 4db00b02ca3f..4a50a5c71a5f 100644
--- a/include/linux/devfreq-event.h
+++ b/include/linux/devfreq-event.h
@@ -1,12 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* devfreq-event: a framework to provide raw data and events of devfreq devices
*
* Copyright (C) 2014 Samsung Electronics
* Author: Chanwoo Choi <cw00.choi@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef __LINUX_DEVFREQ_EVENT_H__
@@ -81,14 +78,20 @@ struct devfreq_event_ops {
* struct devfreq_event_desc - the descriptor of devfreq-event device
*
* @name : the name of devfreq-event device.
+ * @event_type : the type of the event determined and used by driver
* @driver_data : the private data for devfreq-event driver.
* @ops : the operation to control devfreq-event device.
*
* Each devfreq-event device is described with a this structure.
* This structure contains the various data for devfreq-event device.
+ * The event_type describes what is going to be counted in the register.
+ * It might choose to count e.g. read requests, write data in bytes, etc.
+ * The full supported list of types is present in specyfic header in:
+ * include/dt-bindings/pmu/.
*/
struct devfreq_event_desc {
const char *name;
+ u32 event_type;
void *driver_data;
const struct devfreq_event_ops *ops;
@@ -103,8 +106,11 @@ extern int devfreq_event_get_event(struct devfreq_event_dev *edev,
struct devfreq_event_data *edata);
extern int devfreq_event_reset_event(struct devfreq_event_dev *edev);
extern struct devfreq_event_dev *devfreq_event_get_edev_by_phandle(
- struct device *dev, int index);
-extern int devfreq_event_get_edev_count(struct device *dev);
+ struct device *dev,
+ const char *phandle_name,
+ int index);
+extern int devfreq_event_get_edev_count(struct device *dev,
+ const char *phandle_name);
extern struct devfreq_event_dev *devfreq_event_add_edev(struct device *dev,
struct devfreq_event_desc *desc);
extern int devfreq_event_remove_edev(struct devfreq_event_dev *edev);
@@ -149,12 +155,15 @@ static inline int devfreq_event_reset_event(struct devfreq_event_dev *edev)
}
static inline struct devfreq_event_dev *devfreq_event_get_edev_by_phandle(
- struct device *dev, int index)
+ struct device *dev,
+ const char *phandle_name,
+ int index)
{
return ERR_PTR(-EINVAL);
}
-static inline int devfreq_event_get_edev_count(struct device *dev)
+static inline int devfreq_event_get_edev_count(struct device *dev,
+ const char *phandle_name)
{
return -EINVAL;
}
diff --git a/include/linux/devfreq.h b/include/linux/devfreq.h
index 6c220e4ebb6b..7fd704bb8f3d 100644
--- a/include/linux/devfreq.h
+++ b/include/linux/devfreq.h
@@ -1,13 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* devfreq: Generic Dynamic Voltage and Frequency Scaling (DVFS) Framework
* for Non-CPU Devices.
*
* Copyright (C) 2011 Samsung Electronics
* MyungJoo Ham <myungjoo.ham@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef __LINUX_DEVFREQ_H__
@@ -16,8 +13,14 @@
#include <linux/device.h>
#include <linux/notifier.h>
#include <linux/pm_opp.h>
+#include <linux/pm_qos.h>
-#define DEVFREQ_NAME_LEN 16
+/* DEVFREQ governor name */
+#define DEVFREQ_GOV_SIMPLE_ONDEMAND "simple_ondemand"
+#define DEVFREQ_GOV_PERFORMANCE "performance"
+#define DEVFREQ_GOV_POWERSAVE "powersave"
+#define DEVFREQ_GOV_USERSPACE "userspace"
+#define DEVFREQ_GOV_PASSIVE "passive"
/* DEVFREQ notifier interface */
#define DEVFREQ_TRANSITION_NOTIFIER (0)
@@ -26,8 +29,17 @@
#define DEVFREQ_PRECHANGE (0)
#define DEVFREQ_POSTCHANGE (1)
+/* DEVFREQ work timers */
+enum devfreq_timer {
+ DEVFREQ_TIMER_DEFERRABLE = 0,
+ DEVFREQ_TIMER_DELAYED,
+ DEVFREQ_TIMER_NUM,
+};
+
struct devfreq;
struct devfreq_governor;
+struct devfreq_cpu_data;
+struct thermal_cooling_device;
/**
* struct devfreq_dev_status - Data given from devfreq user device to
@@ -65,6 +77,7 @@ struct devfreq_dev_status {
* @initial_freq: The operating frequency when devfreq_add_device() is
* called.
* @polling_ms: The polling interval in ms. 0 disables polling.
+ * @timer: Timer type is either deferrable or delayed timer.
* @target: The device should set its operating frequency at
* freq or lowest-upper-than-freq value. If freq is
* higher than any operable frequency, set maximum.
@@ -84,12 +97,18 @@ struct devfreq_dev_status {
* from devfreq_remove_device() call. If the user
* has registered devfreq->nb at a notifier-head,
* this is the time to unregister it.
- * @freq_table: Optional list of frequencies to support statistics.
- * @max_state: The size of freq_table.
+ * @freq_table: Optional list of frequencies to support statistics
+ * and freq_table must be generated in ascending order.
+ * @max_state: The size of freq_table.
+ *
+ * @is_cooling_device: A self-explanatory boolean giving the device a
+ * cooling effect property.
*/
struct devfreq_dev_profile {
unsigned long initial_freq;
unsigned int polling_ms;
+ enum devfreq_timer timer;
+ bool is_cooling_device;
int (*target)(struct device *dev, unsigned long *freq, u32 flags);
int (*get_dev_status)(struct device *dev,
@@ -102,6 +121,20 @@ struct devfreq_dev_profile {
};
/**
+ * struct devfreq_stats - Statistics of devfreq device behavior
+ * @total_trans: Number of devfreq transitions.
+ * @trans_table: Statistics of devfreq transitions.
+ * @time_in_state: Statistics of devfreq states.
+ * @last_update: The last time stats were updated.
+ */
+struct devfreq_stats {
+ unsigned int total_trans;
+ unsigned int *trans_table;
+ u64 *time_in_state;
+ u64 last_update;
+};
+
+/**
* struct devfreq - Device devfreq structure
* @node: list node - contains the devices with devfreq that have been
* registered.
@@ -110,30 +143,38 @@ struct devfreq_dev_profile {
* using devfreq.
* @profile: device-specific devfreq profile
* @governor: method how to choose frequency based on the usage.
- * @governor_name: devfreq governor name for use with this devfreq
+ * @opp_table: Reference to OPP table of dev.parent, if one exists.
* @nb: notifier block used to notify devfreq object that it should
* reevaluate operable frequencies. Devfreq users may use
* devfreq.nb to the corresponding register notifier call chain.
* @work: delayed work for load monitoring.
+ * @freq_table: current frequency table used by the devfreq driver.
+ * @max_state: count of entry present in the frequency table.
* @previous_freq: previously configured frequency value.
- * @data: Private data of the governor. The devfreq framework does not
- * touch this.
- * @min_freq: Limit minimum frequency requested by user (0: none)
- * @max_freq: Limit maximum frequency requested by user (0: none)
+ * @last_status: devfreq user device info, performance statistics
+ * @data: devfreq driver pass to governors, governor should not change it.
+ * @governor_data: private data for governors, devfreq core doesn't touch it.
+ * @user_min_freq_req: PM QoS minimum frequency request from user (via sysfs)
+ * @user_max_freq_req: PM QoS maximum frequency request from user (via sysfs)
+ * @scaling_min_freq: Limit minimum frequency requested by OPP interface
+ * @scaling_max_freq: Limit maximum frequency requested by OPP interface
* @stop_polling: devfreq polling status of a device.
- * @total_trans: Number of devfreq transitions
- * @trans_table: Statistics of devfreq transitions
- * @time_in_state: Statistics of devfreq states
- * @last_stat_updated: The last time stat updated
+ * @suspend_freq: frequency of a device set during suspend phase.
+ * @resume_freq: frequency of a device set in resume phase.
+ * @suspend_count: suspend requests counter for a device.
+ * @stats: Statistics of devfreq device behavior
* @transition_notifier_list: list head of DEVFREQ_TRANSITION_NOTIFIER notifier
+ * @cdev: Cooling device pointer if the devfreq has cooling property
+ * @nb_min: Notifier block for DEV_PM_QOS_MIN_FREQUENCY
+ * @nb_max: Notifier block for DEV_PM_QOS_MAX_FREQUENCY
*
- * This structure stores the devfreq information for a give device.
+ * This structure stores the devfreq information for a given device.
*
* Note that when a governor accesses entries in struct devfreq in its
* functions except for the context of callbacks defined in struct
* devfreq_governor, the governor should protect its access with the
* struct mutex lock in struct devfreq. A governor may use this mutex
- * to protect its own private data in void *data as well.
+ * to protect its own private data in ``void *data`` as well.
*/
struct devfreq {
struct list_head node;
@@ -142,26 +183,39 @@ struct devfreq {
struct device dev;
struct devfreq_dev_profile *profile;
const struct devfreq_governor *governor;
- char governor_name[DEVFREQ_NAME_LEN];
+ struct opp_table *opp_table;
struct notifier_block nb;
struct delayed_work work;
+ unsigned long *freq_table;
+ unsigned int max_state;
+
unsigned long previous_freq;
struct devfreq_dev_status last_status;
- void *data; /* private data for governors */
+ void *data;
+ void *governor_data;
- unsigned long min_freq;
- unsigned long max_freq;
+ struct dev_pm_qos_request user_min_freq_req;
+ struct dev_pm_qos_request user_max_freq_req;
+ unsigned long scaling_min_freq;
+ unsigned long scaling_max_freq;
bool stop_polling;
- /* information for device frequency transition */
- unsigned int total_trans;
- unsigned int *trans_table;
- unsigned long *time_in_state;
- unsigned long last_stat_updated;
+ unsigned long suspend_freq;
+ unsigned long resume_freq;
+ atomic_t suspend_count;
+
+ /* information for device frequency transitions */
+ struct devfreq_stats stats;
struct srcu_notifier_head transition_notifier_list;
+
+ /* Pointer to the cooling device if used for thermal mitigation */
+ struct thermal_cooling_device *cdev;
+
+ struct notifier_block nb_min;
+ struct notifier_block nb_max;
};
struct devfreq_freqs {
@@ -170,66 +224,59 @@ struct devfreq_freqs {
};
#if defined(CONFIG_PM_DEVFREQ)
-extern struct devfreq *devfreq_add_device(struct device *dev,
- struct devfreq_dev_profile *profile,
- const char *governor_name,
- void *data);
-extern int devfreq_remove_device(struct devfreq *devfreq);
-extern struct devfreq *devm_devfreq_add_device(struct device *dev,
- struct devfreq_dev_profile *profile,
- const char *governor_name,
- void *data);
-extern void devm_devfreq_remove_device(struct device *dev,
- struct devfreq *devfreq);
+struct devfreq *devfreq_add_device(struct device *dev,
+ struct devfreq_dev_profile *profile,
+ const char *governor_name,
+ void *data);
+int devfreq_remove_device(struct devfreq *devfreq);
+struct devfreq *devm_devfreq_add_device(struct device *dev,
+ struct devfreq_dev_profile *profile,
+ const char *governor_name,
+ void *data);
+void devm_devfreq_remove_device(struct device *dev, struct devfreq *devfreq);
/* Supposed to be called by PM callbacks */
-extern int devfreq_suspend_device(struct devfreq *devfreq);
-extern int devfreq_resume_device(struct devfreq *devfreq);
+int devfreq_suspend_device(struct devfreq *devfreq);
+int devfreq_resume_device(struct devfreq *devfreq);
+
+void devfreq_suspend(void);
+void devfreq_resume(void);
+
+/* update_devfreq() - Reevaluate the device and configure frequency */
+int update_devfreq(struct devfreq *devfreq);
/* Helper functions for devfreq user device driver with OPP. */
-extern struct dev_pm_opp *devfreq_recommended_opp(struct device *dev,
- unsigned long *freq, u32 flags);
-extern int devfreq_register_opp_notifier(struct device *dev,
- struct devfreq *devfreq);
-extern int devfreq_unregister_opp_notifier(struct device *dev,
- struct devfreq *devfreq);
-extern int devm_devfreq_register_opp_notifier(struct device *dev,
- struct devfreq *devfreq);
-extern void devm_devfreq_unregister_opp_notifier(struct device *dev,
- struct devfreq *devfreq);
-extern int devfreq_register_notifier(struct devfreq *devfreq,
- struct notifier_block *nb,
- unsigned int list);
-extern int devfreq_unregister_notifier(struct devfreq *devfreq,
- struct notifier_block *nb,
- unsigned int list);
-extern int devm_devfreq_register_notifier(struct device *dev,
+struct dev_pm_opp *devfreq_recommended_opp(struct device *dev,
+ unsigned long *freq, u32 flags);
+int devfreq_register_opp_notifier(struct device *dev,
+ struct devfreq *devfreq);
+int devfreq_unregister_opp_notifier(struct device *dev,
+ struct devfreq *devfreq);
+int devm_devfreq_register_opp_notifier(struct device *dev,
+ struct devfreq *devfreq);
+void devm_devfreq_unregister_opp_notifier(struct device *dev,
+ struct devfreq *devfreq);
+int devfreq_register_notifier(struct devfreq *devfreq,
+ struct notifier_block *nb,
+ unsigned int list);
+int devfreq_unregister_notifier(struct devfreq *devfreq,
+ struct notifier_block *nb,
+ unsigned int list);
+int devm_devfreq_register_notifier(struct device *dev,
struct devfreq *devfreq,
struct notifier_block *nb,
unsigned int list);
-extern void devm_devfreq_unregister_notifier(struct device *dev,
+void devm_devfreq_unregister_notifier(struct device *dev,
struct devfreq *devfreq,
struct notifier_block *nb,
unsigned int list);
-extern struct devfreq *devfreq_get_devfreq_by_phandle(struct device *dev,
- int index);
-
-/**
- * devfreq_update_stats() - update the last_status pointer in struct devfreq
- * @df: the devfreq instance whose status needs updating
- *
- * Governors are recommended to use this function along with last_status,
- * which allows other entities to reuse the last_status without affecting
- * the values fetched later by governors.
- */
-static inline int devfreq_update_stats(struct devfreq *df)
-{
- return df->profile->get_dev_status(df->dev.parent, &df->last_status);
-}
+struct devfreq *devfreq_get_devfreq_by_node(struct device_node *node);
+struct devfreq *devfreq_get_devfreq_by_phandle(struct device *dev,
+ const char *phandle_name, int index);
+#endif /* CONFIG_PM_DEVFREQ */
-#if IS_ENABLED(CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND)
/**
- * struct devfreq_simple_ondemand_data - void *data fed to struct devfreq
+ * struct devfreq_simple_ondemand_data - ``void *data`` fed to struct devfreq
* and devfreq_add_device
* @upthreshold: If the load is over this value, the frequency jumps.
* Specify 0 to use the default. Valid value = 0 to 100.
@@ -245,11 +292,14 @@ struct devfreq_simple_ondemand_data {
unsigned int upthreshold;
unsigned int downdifferential;
};
-#endif
-#if IS_ENABLED(CONFIG_DEVFREQ_GOV_PASSIVE)
+enum devfreq_parent_dev_type {
+ DEVFREQ_PARENT_DEV,
+ CPUFREQ_PARENT_DEV,
+};
+
/**
- * struct devfreq_passive_data - void *data fed to struct devfreq
+ * struct devfreq_passive_data - ``void *data`` fed to struct devfreq
* and devfreq_add_device
* @parent: the devfreq instance of parent device.
* @get_target_freq: Optional callback, Returns desired operating frequency
@@ -259,8 +309,11 @@ struct devfreq_simple_ondemand_data {
* using governors except for passive governor.
* If the devfreq device has the specific method to decide
* the next frequency, should use this callback.
- * @this: the devfreq instance of own device.
- * @nb: the notifier block for DEVFREQ_TRANSITION_NOTIFIER list
+ * @parent_type: the parent type of the device.
+ * @this: the devfreq instance of own device.
+ * @nb: the notifier block for DEVFREQ_TRANSITION_NOTIFIER or
+ * CPUFREQ_TRANSITION_NOTIFIER list.
+ * @cpu_data_list: the list of cpu frequency data for all cpufreq_policy.
*
* The devfreq_passive_data have to set the devfreq instance of parent
* device with governors except for the passive governor. But, don't need to
@@ -274,17 +327,20 @@ struct devfreq_passive_data {
/* Optional callback to decide the next frequency of passvice device */
int (*get_target_freq)(struct devfreq *this, unsigned long *freq);
+ /* Should set the type of parent device */
+ enum devfreq_parent_dev_type parent_type;
+
/* For passive governor's internal use. Don't need to set them */
struct devfreq *this;
struct notifier_block nb;
+ struct list_head cpu_data_list;
};
-#endif
-#else /* !CONFIG_PM_DEVFREQ */
+#if !defined(CONFIG_PM_DEVFREQ)
static inline struct devfreq *devfreq_add_device(struct device *dev,
- struct devfreq_dev_profile *profile,
- const char *governor_name,
- void *data)
+ struct devfreq_dev_profile *profile,
+ const char *governor_name,
+ void *data)
{
return ERR_PTR(-ENOSYS);
}
@@ -317,32 +373,35 @@ static inline int devfreq_resume_device(struct devfreq *devfreq)
return 0;
}
+static inline void devfreq_suspend(void) {}
+static inline void devfreq_resume(void) {}
+
static inline struct dev_pm_opp *devfreq_recommended_opp(struct device *dev,
- unsigned long *freq, u32 flags)
+ unsigned long *freq, u32 flags)
{
return ERR_PTR(-EINVAL);
}
static inline int devfreq_register_opp_notifier(struct device *dev,
- struct devfreq *devfreq)
+ struct devfreq *devfreq)
{
return -EINVAL;
}
static inline int devfreq_unregister_opp_notifier(struct device *dev,
- struct devfreq *devfreq)
+ struct devfreq *devfreq)
{
return -EINVAL;
}
static inline int devm_devfreq_register_opp_notifier(struct device *dev,
- struct devfreq *devfreq)
+ struct devfreq *devfreq)
{
return -EINVAL;
}
static inline void devm_devfreq_unregister_opp_notifier(struct device *dev,
- struct devfreq *devfreq)
+ struct devfreq *devfreq)
{
}
@@ -361,22 +420,27 @@ static inline int devfreq_unregister_notifier(struct devfreq *devfreq,
}
static inline int devm_devfreq_register_notifier(struct device *dev,
- struct devfreq *devfreq,
- struct notifier_block *nb,
- unsigned int list)
+ struct devfreq *devfreq,
+ struct notifier_block *nb,
+ unsigned int list)
{
return 0;
}
static inline void devm_devfreq_unregister_notifier(struct device *dev,
- struct devfreq *devfreq,
- struct notifier_block *nb,
- unsigned int list)
+ struct devfreq *devfreq,
+ struct notifier_block *nb,
+ unsigned int list)
{
}
+static inline struct devfreq *devfreq_get_devfreq_by_node(struct device_node *node)
+{
+ return ERR_PTR(-ENODEV);
+}
+
static inline struct devfreq *devfreq_get_devfreq_by_phandle(struct device *dev,
- int index)
+ const char *phandle_name, int index)
{
return ERR_PTR(-ENODEV);
}
diff --git a/include/linux/devfreq_cooling.h b/include/linux/devfreq_cooling.h
index 4635f95000a4..14baa73fc2de 100644
--- a/include/linux/devfreq_cooling.h
+++ b/include/linux/devfreq_cooling.h
@@ -1,17 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* devfreq_cooling: Thermal cooling device implementation for devices using
* devfreq
*
* Copyright (C) 2014-2015 ARM Limited
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#ifndef __DEVFREQ_COOLING_H__
@@ -23,17 +16,6 @@
/**
* struct devfreq_cooling_power - Devfreq cooling power ops
- * @get_static_power: Take voltage, in mV, and return the static power
- * in mW. If NULL, the static power is assumed
- * to be 0.
- * @get_dynamic_power: Take voltage, in mV, and frequency, in HZ, and
- * return the dynamic power draw in mW. If NULL,
- * a simple power model is used.
- * @dyn_power_coeff: Coefficient for the simple dynamic power model in
- * mW/(MHz mV mV).
- * If get_dynamic_power() is NULL, then the
- * dynamic power is calculated as
- * @dyn_power_coeff * frequency * voltage^2
* @get_real_power: When this is set, the framework uses it to ask the
* device driver for the actual power.
* Some devices have more sophisticated methods
@@ -53,14 +35,8 @@
* max total (static + dynamic) power value for each OPP.
*/
struct devfreq_cooling_power {
- unsigned long (*get_static_power)(struct devfreq *devfreq,
- unsigned long voltage);
- unsigned long (*get_dynamic_power)(struct devfreq *devfreq,
- unsigned long freq,
- unsigned long voltage);
int (*get_real_power)(struct devfreq *df, u32 *power,
unsigned long freq, unsigned long voltage);
- unsigned long dyn_power_coeff;
};
#ifdef CONFIG_DEVFREQ_THERMAL
@@ -72,10 +48,13 @@ struct thermal_cooling_device *
of_devfreq_cooling_register(struct device_node *np, struct devfreq *df);
struct thermal_cooling_device *devfreq_cooling_register(struct devfreq *df);
void devfreq_cooling_unregister(struct thermal_cooling_device *dfc);
+struct thermal_cooling_device *
+devfreq_cooling_em_register(struct devfreq *df,
+ struct devfreq_cooling_power *dfc_power);
#else /* !CONFIG_DEVFREQ_THERMAL */
-struct thermal_cooling_device *
+static inline struct thermal_cooling_device *
of_devfreq_cooling_register_power(struct device_node *np, struct devfreq *df,
struct devfreq_cooling_power *dfc_power)
{
@@ -94,6 +73,13 @@ devfreq_cooling_register(struct devfreq *df)
return ERR_PTR(-EINVAL);
}
+static inline struct thermal_cooling_device *
+devfreq_cooling_em_register(struct devfreq *df,
+ struct devfreq_cooling_power *dfc_power)
+{
+ return ERR_PTR(-EINVAL);
+}
+
static inline void
devfreq_cooling_unregister(struct thermal_cooling_device *dfc)
{
diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h
index 1473455d0341..a52d2b9a6846 100644
--- a/include/linux/device-mapper.h
+++ b/include/linux/device-mapper.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2001 Sistina Software (UK) Limited.
* Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
@@ -10,14 +11,17 @@
#include <linux/bio.h>
#include <linux/blkdev.h>
+#include <linux/dm-ioctl.h>
#include <linux/math64.h>
#include <linux/ratelimit.h>
struct dm_dev;
struct dm_target;
struct dm_table;
+struct dm_report_zones_args;
struct mapped_device;
struct bio_vec;
+enum dax_access_mode;
/*
* Type of table, mapped_device's mempool and request_queue
@@ -26,11 +30,10 @@ enum dm_queue_mode {
DM_TYPE_NONE = 0,
DM_TYPE_BIO_BASED = 1,
DM_TYPE_REQUEST_BASED = 2,
- DM_TYPE_MQ_REQUEST_BASED = 3,
- DM_TYPE_DAX_BIO_BASED = 4,
+ DM_TYPE_DAX_BIO_BASED = 3,
};
-typedef enum { STATUSTYPE_INFO, STATUSTYPE_TABLE } status_type_t;
+typedef enum { STATUSTYPE_INFO, STATUSTYPE_TABLE, STATUSTYPE_IMA } status_type_t;
union map_info {
void *ptr;
@@ -61,7 +64,8 @@ typedef int (*dm_clone_and_map_request_fn) (struct dm_target *ti,
struct request *rq,
union map_info *map_context,
struct request **clone);
-typedef void (*dm_release_clone_request_fn) (struct request *clone);
+typedef void (*dm_release_clone_request_fn) (struct request *clone,
+ union map_info *map_context);
/*
* Returns:
@@ -84,12 +88,25 @@ typedef int (*dm_preresume_fn) (struct dm_target *ti);
typedef void (*dm_resume_fn) (struct dm_target *ti);
typedef void (*dm_status_fn) (struct dm_target *ti, status_type_t status_type,
- unsigned status_flags, char *result, unsigned maxlen);
+ unsigned int status_flags, char *result, unsigned int maxlen);
-typedef int (*dm_message_fn) (struct dm_target *ti, unsigned argc, char **argv);
+typedef int (*dm_message_fn) (struct dm_target *ti, unsigned int argc, char **argv,
+ char *result, unsigned int maxlen);
-typedef int (*dm_prepare_ioctl_fn) (struct dm_target *ti,
- struct block_device **bdev, fmode_t *mode);
+typedef int (*dm_prepare_ioctl_fn) (struct dm_target *ti, struct block_device **bdev);
+
+#ifdef CONFIG_BLK_DEV_ZONED
+typedef int (*dm_report_zones_fn) (struct dm_target *ti,
+ struct dm_report_zones_args *args,
+ unsigned int nr_zones);
+#else
+/*
+ * Define dm_report_zones_fn so that targets can assign to NULL if
+ * CONFIG_BLK_DEV_ZONED disabled. Otherwise each target needs to do
+ * awkward #ifdefs in their target_type, etc.
+ */
+typedef int (*dm_report_zones_fn) (struct dm_target *dummy);
+#endif
/*
* These iteration functions are typically used to check (and combine)
@@ -131,12 +148,18 @@ typedef int (*dm_busy_fn) (struct dm_target *ti);
* >= 0 : the number of bytes accessible at the address
*/
typedef long (*dm_dax_direct_access_fn) (struct dm_target *ti, pgoff_t pgoff,
- long nr_pages, void **kaddr, pfn_t *pfn);
-typedef size_t (*dm_dax_copy_from_iter_fn)(struct dm_target *ti, pgoff_t pgoff,
+ long nr_pages, enum dax_access_mode node, void **kaddr,
+ pfn_t *pfn);
+typedef int (*dm_dax_zero_page_range_fn)(struct dm_target *ti, pgoff_t pgoff,
+ size_t nr_pages);
+
+/*
+ * Returns:
+ * != 0 : number of bytes transferred
+ * 0 : recovery write failed
+ */
+typedef size_t (*dm_dax_recovery_write_fn)(struct dm_target *ti, pgoff_t pgoff,
void *addr, size_t bytes, struct iov_iter *i);
-typedef void (*dm_dax_flush_fn)(struct dm_target *ti, pgoff_t pgoff, void *addr,
- size_t size);
-#define PAGE_SECTORS (PAGE_SIZE / 512)
void dm_error(const char *message);
@@ -165,7 +188,7 @@ struct target_type {
uint64_t features;
const char *name;
struct module *module;
- unsigned version[3];
+ unsigned int version[3];
dm_ctr_fn ctr;
dm_dtr_fn dtr;
dm_map_fn map;
@@ -181,12 +204,13 @@ struct target_type {
dm_status_fn status;
dm_message_fn message;
dm_prepare_ioctl_fn prepare_ioctl;
+ dm_report_zones_fn report_zones;
dm_busy_fn busy;
dm_iterate_devices_fn iterate_devices;
dm_io_hints_fn io_hints;
dm_dax_direct_access_fn direct_access;
- dm_dax_copy_from_iter_fn dax_copy_from_iter;
- dm_dax_flush_fn dax_flush;
+ dm_dax_zero_page_range_fn dax_zero_page_range;
+ dm_dax_recovery_write_fn dax_recovery_write;
/* For internal device-mapper use. */
struct list_head list;
@@ -224,14 +248,6 @@ struct target_type {
#define dm_target_is_wildcard(type) ((type)->features & DM_TARGET_WILDCARD)
/*
- * Some targets need to be sent the same WRITE bio severals times so
- * that they can send copies of it to different devices. This function
- * examines any supplied bio and returns the number of copies of it the
- * target requires.
- */
-typedef unsigned (*dm_num_write_bios_fn) (struct dm_target *ti, struct bio *bio);
-
-/*
* A target implements own bio data integrity.
*/
#define DM_TARGET_INTEGRITY 0x00000010
@@ -244,10 +260,40 @@ typedef unsigned (*dm_num_write_bios_fn) (struct dm_target *ti, struct bio *bio)
#define dm_target_passes_integrity(type) ((type)->features & DM_TARGET_PASSES_INTEGRITY)
/*
- * Indicates that a target supports host-managed zoned block devices.
+ * Indicates support for zoned block devices:
+ * - DM_TARGET_ZONED_HM: the target also supports host-managed zoned
+ * block devices but does not support combining different zoned models.
+ * - DM_TARGET_MIXED_ZONED_MODEL: the target supports combining multiple
+ * devices with different zoned models.
*/
+#ifdef CONFIG_BLK_DEV_ZONED
#define DM_TARGET_ZONED_HM 0x00000040
#define dm_target_supports_zoned_hm(type) ((type)->features & DM_TARGET_ZONED_HM)
+#else
+#define DM_TARGET_ZONED_HM 0x00000000
+#define dm_target_supports_zoned_hm(type) (false)
+#endif
+
+/*
+ * A target handles REQ_NOWAIT
+ */
+#define DM_TARGET_NOWAIT 0x00000080
+#define dm_target_supports_nowait(type) ((type)->features & DM_TARGET_NOWAIT)
+
+/*
+ * A target supports passing through inline crypto support.
+ */
+#define DM_TARGET_PASSES_CRYPTO 0x00000100
+#define dm_target_passes_crypto(type) ((type)->features & DM_TARGET_PASSES_CRYPTO)
+
+#ifdef CONFIG_BLK_DEV_ZONED
+#define DM_TARGET_MIXED_ZONED_MODEL 0x00000200
+#define dm_target_supports_mixed_zoned_model(type) \
+ ((type)->features & DM_TARGET_MIXED_ZONED_MODEL)
+#else
+#define DM_TARGET_MIXED_ZONED_MODEL 0x00000000
+#define dm_target_supports_mixed_zoned_model(type) (false)
+#endif
struct dm_target {
struct dm_table *table;
@@ -268,38 +314,31 @@ struct dm_target {
* It is a responsibility of the target driver to remap these bios
* to the real underlying devices.
*/
- unsigned num_flush_bios;
+ unsigned int num_flush_bios;
/*
* The number of discard bios that will be submitted to the target.
* The bio number can be accessed with dm_bio_get_target_bio_nr.
*/
- unsigned num_discard_bios;
+ unsigned int num_discard_bios;
/*
- * The number of WRITE SAME bios that will be submitted to the target.
+ * The number of secure erase bios that will be submitted to the target.
* The bio number can be accessed with dm_bio_get_target_bio_nr.
*/
- unsigned num_write_same_bios;
+ unsigned int num_secure_erase_bios;
/*
* The number of WRITE ZEROES bios that will be submitted to the target.
* The bio number can be accessed with dm_bio_get_target_bio_nr.
*/
- unsigned num_write_zeroes_bios;
+ unsigned int num_write_zeroes_bios;
/*
* The minimum number of extra bytes allocated in each io for the
* target to use.
*/
- unsigned per_io_data_size;
-
- /*
- * If defined, this function is called to find out how many
- * duplicate bios should be sent to the target when writing
- * data.
- */
- dm_num_write_bios_fn num_write_bios;
+ unsigned int per_io_data_size;
/* target specific data */
void *private;
@@ -320,47 +359,52 @@ struct dm_target {
bool discards_supported:1;
/*
- * Set if the target required discard bios to be split
- * on max_io_len boundary.
+ * Set if this target requires that discards be split on
+ * 'max_discard_sectors' boundaries.
*/
- bool split_discard_bios:1;
-};
+ bool max_discard_granularity:1;
-/* Each target can link one of these into the table */
-struct dm_target_callbacks {
- struct list_head list;
- int (*congested_fn) (struct dm_target_callbacks *, int);
-};
+ /*
+ * Set if this target requires that secure_erases be split on
+ * 'max_secure_erase_sectors' boundaries.
+ */
+ bool max_secure_erase_granularity:1;
-/*
- * For bio-based dm.
- * One of these is allocated for each bio.
- * This structure shouldn't be touched directly by target drivers.
- * It is here so that we can inline dm_per_bio_data and
- * dm_bio_from_per_bio_data
- */
-struct dm_target_io {
- struct dm_io *io;
- struct dm_target *ti;
- unsigned target_bio_nr;
- unsigned *len_ptr;
- struct bio clone;
-};
+ /*
+ * Set if this target requires that write_zeroes be split on
+ * 'max_write_zeroes_sectors' boundaries.
+ */
+ bool max_write_zeroes_granularity:1;
-static inline void *dm_per_bio_data(struct bio *bio, size_t data_size)
-{
- return (char *)bio - offsetof(struct dm_target_io, clone) - data_size;
-}
+ /*
+ * Set if we need to limit the number of in-flight bios when swapping.
+ */
+ bool limit_swap_bios:1;
-static inline struct bio *dm_bio_from_per_bio_data(void *data, size_t data_size)
-{
- return (struct bio *)((char *)data + data_size + offsetof(struct dm_target_io, clone));
-}
+ /*
+ * Set if this target implements a zoned device and needs emulation of
+ * zone append operations using regular writes.
+ */
+ bool emulate_zone_append:1;
-static inline unsigned dm_bio_get_target_bio_nr(const struct bio *bio)
-{
- return container_of(bio, struct dm_target_io, clone)->target_bio_nr;
-}
+ /*
+ * Set if the target will submit IO using dm_submit_bio_remap()
+ * after returning DM_MAPIO_SUBMITTED from its map function.
+ */
+ bool accounts_remapped_io:1;
+
+ /*
+ * Set if the target will submit the DM bio without first calling
+ * bio_set_dev(). NOTE: ideally a target should _not_ need this.
+ */
+ bool needs_bio_set_dev:1;
+};
+
+void *dm_per_bio_data(struct bio *bio, size_t data_size);
+struct bio *dm_bio_from_per_bio_data(void *data, size_t data_size);
+unsigned int dm_bio_get_target_bio_nr(const struct bio *bio);
+
+u64 dm_start_time_ns_from_clone(struct bio *bio);
int dm_register_target(struct target_type *t);
void dm_unregister_target(struct target_type *t);
@@ -369,7 +413,7 @@ void dm_unregister_target(struct target_type *t);
* Target argument parsing.
*/
struct dm_arg_set {
- unsigned argc;
+ unsigned int argc;
char **argv;
};
@@ -378,8 +422,8 @@ struct dm_arg_set {
* the error message to use if the number is found to be outside that range.
*/
struct dm_arg {
- unsigned min;
- unsigned max;
+ unsigned int min;
+ unsigned int max;
char *error;
};
@@ -387,16 +431,16 @@ struct dm_arg {
* Validate the next argument, either returning it as *value or, if invalid,
* returning -EINVAL and setting *error.
*/
-int dm_read_arg(struct dm_arg *arg, struct dm_arg_set *arg_set,
- unsigned *value, char **error);
+int dm_read_arg(const struct dm_arg *arg, struct dm_arg_set *arg_set,
+ unsigned int *value, char **error);
/*
* Process the next argument as the start of a group containing between
* arg->min and arg->max further arguments. Either return the size as
* *num_args or, if invalid, return -EINVAL and set *error.
*/
-int dm_read_arg_group(struct dm_arg *arg, struct dm_arg_set *arg_set,
- unsigned *num_args, char **error);
+int dm_read_arg_group(const struct dm_arg *arg, struct dm_arg_set *arg_set,
+ unsigned int *num_args, char **error);
/*
* Return the current argument and shift to the next.
@@ -406,12 +450,14 @@ const char *dm_shift_arg(struct dm_arg_set *as);
/*
* Move through num_args arguments.
*/
-void dm_consume_args(struct dm_arg_set *as, unsigned num_args);
+void dm_consume_args(struct dm_arg_set *as, unsigned int num_args);
-/*-----------------------------------------------------------------
+/*
+ *----------------------------------------------------------------
* Functions for creating and manipulating mapped devices.
* Drop the reference with dm_put when you finish with the object.
- *---------------------------------------------------------------*/
+ *----------------------------------------------------------------
+ */
/*
* DM_ANY_MINOR chooses the next available minor number.
@@ -436,7 +482,7 @@ void *dm_get_mdptr(struct mapped_device *md);
/*
* A device can still be used while suspended, but I/O is deferred.
*/
-int dm_suspend(struct mapped_device *md, unsigned suspend_flags);
+int dm_suspend(struct mapped_device *md, unsigned int suspend_flags);
int dm_resume(struct mapped_device *md);
/*
@@ -454,13 +500,35 @@ const char *dm_device_name(struct mapped_device *md);
int dm_copy_name_and_uuid(struct mapped_device *md, char *name, char *uuid);
struct gendisk *dm_disk(struct mapped_device *md);
int dm_suspended(struct dm_target *ti);
+int dm_post_suspending(struct dm_target *ti);
int dm_noflush_suspending(struct dm_target *ti);
-void dm_accept_partial_bio(struct bio *bio, unsigned n_sectors);
-void dm_remap_zone_report(struct dm_target *ti, struct bio *bio,
- sector_t start);
+void dm_accept_partial_bio(struct bio *bio, unsigned int n_sectors);
+void dm_submit_bio_remap(struct bio *clone, struct bio *tgt_clone);
union map_info *dm_get_rq_mapinfo(struct request *rq);
-struct queue_limits *dm_get_queue_limits(struct mapped_device *md);
+#ifdef CONFIG_BLK_DEV_ZONED
+struct dm_report_zones_args {
+ struct dm_target *tgt;
+ sector_t next_sector;
+
+ void *orig_data;
+ report_zones_cb orig_cb;
+ unsigned int zone_idx;
+
+ /* must be filled by ->report_zones before calling dm_report_zones_cb */
+ sector_t start;
+};
+int dm_report_zones(struct block_device *bdev, sector_t start, sector_t sector,
+ struct dm_report_zones_args *args, unsigned int nr_zones);
+#endif /* CONFIG_BLK_DEV_ZONED */
+
+/*
+ * Device mapper functions to parse and create devices specified by the
+ * parameter "dm-mod.create="
+ */
+int __init dm_early_create(struct dm_ioctl *dmi,
+ struct dm_target_spec **spec_array,
+ char **target_params_array);
/*
* Geometry functions.
@@ -468,15 +536,17 @@ struct queue_limits *dm_get_queue_limits(struct mapped_device *md);
int dm_get_geometry(struct mapped_device *md, struct hd_geometry *geo);
int dm_set_geometry(struct mapped_device *md, struct hd_geometry *geo);
-/*-----------------------------------------------------------------
+/*
+ *---------------------------------------------------------------
* Functions for manipulating device-mapper tables.
- *---------------------------------------------------------------*/
+ *---------------------------------------------------------------
+ */
/*
* First create an empty table.
*/
int dm_table_create(struct dm_table **result, fmode_t mode,
- unsigned num_targets, struct mapped_device *md);
+ unsigned int num_targets, struct mapped_device *md);
/*
* Then call this once for each target.
@@ -485,11 +555,6 @@ int dm_table_add_target(struct dm_table *t, const char *type,
sector_t start, sector_t len, char *params);
/*
- * Target_ctr should call this if it needs to add any callbacks.
- */
-void dm_table_add_target_callbacks(struct dm_table *t, struct dm_target_callbacks *cb);
-
-/*
* Target can use this to set the table's type.
* Can only ever be called from a target's ctr.
* Useful for "hybrid" target (supports both bio-based
@@ -503,6 +568,11 @@ void dm_table_set_type(struct dm_table *t, enum dm_queue_mode type);
int dm_table_complete(struct dm_table *t);
/*
+ * Destroy the table when finished.
+ */
+void dm_table_destroy(struct dm_table *t);
+
+/*
* Target may require that it is never sent I/O larger than len.
*/
int __must_check dm_set_target_max_io_len(struct dm_target *ti, sector_t len);
@@ -518,9 +588,9 @@ void dm_sync_table(struct mapped_device *md);
* Queries
*/
sector_t dm_table_get_size(struct dm_table *t);
-unsigned int dm_table_get_num_targets(struct dm_table *t);
fmode_t dm_table_get_mode(struct dm_table *t);
struct mapped_device *dm_table_get_md(struct dm_table *t);
+const char *dm_table_device_name(struct dm_table *t);
/*
* Trigger an event.
@@ -540,64 +610,56 @@ struct dm_table *dm_swap_table(struct mapped_device *md,
struct dm_table *t);
/*
- * A wrapper around vmalloc.
+ * Table blk_crypto_profile functions
*/
-void *dm_vcalloc(unsigned long nmemb, unsigned long elem_size);
+void dm_destroy_crypto_profile(struct blk_crypto_profile *profile);
-/*-----------------------------------------------------------------
+/*
+ *---------------------------------------------------------------
* Macros.
- *---------------------------------------------------------------*/
+ *---------------------------------------------------------------
+ */
#define DM_NAME "device-mapper"
-#ifdef CONFIG_PRINTK
-extern struct ratelimit_state dm_ratelimit_state;
-
-#define dm_ratelimit() __ratelimit(&dm_ratelimit_state)
-#else
-#define dm_ratelimit() 0
-#endif
-
#define DM_FMT(fmt) DM_NAME ": " DM_MSG_PREFIX ": " fmt "\n"
#define DMCRIT(fmt, ...) pr_crit(DM_FMT(fmt), ##__VA_ARGS__)
#define DMERR(fmt, ...) pr_err(DM_FMT(fmt), ##__VA_ARGS__)
-#define DMERR_LIMIT(fmt, ...) \
-do { \
- if (dm_ratelimit()) \
- DMERR(fmt, ##__VA_ARGS__); \
-} while (0)
-
+#define DMERR_LIMIT(fmt, ...) pr_err_ratelimited(DM_FMT(fmt), ##__VA_ARGS__)
#define DMWARN(fmt, ...) pr_warn(DM_FMT(fmt), ##__VA_ARGS__)
-#define DMWARN_LIMIT(fmt, ...) \
-do { \
- if (dm_ratelimit()) \
- DMWARN(fmt, ##__VA_ARGS__); \
-} while (0)
-
+#define DMWARN_LIMIT(fmt, ...) pr_warn_ratelimited(DM_FMT(fmt), ##__VA_ARGS__)
#define DMINFO(fmt, ...) pr_info(DM_FMT(fmt), ##__VA_ARGS__)
-#define DMINFO_LIMIT(fmt, ...) \
-do { \
- if (dm_ratelimit()) \
- DMINFO(fmt, ##__VA_ARGS__); \
-} while (0)
-
-#ifdef CONFIG_DM_DEBUG
-#define DMDEBUG(fmt, ...) printk(KERN_DEBUG DM_FMT(fmt), ##__VA_ARGS__)
-#define DMDEBUG_LIMIT(fmt, ...) \
-do { \
- if (dm_ratelimit()) \
- DMDEBUG(fmt, ##__VA_ARGS__); \
-} while (0)
-#else
-#define DMDEBUG(fmt, ...) no_printk(fmt, ##__VA_ARGS__)
-#define DMDEBUG_LIMIT(fmt, ...) no_printk(fmt, ##__VA_ARGS__)
-#endif
+#define DMINFO_LIMIT(fmt, ...) pr_info_ratelimited(DM_FMT(fmt), ##__VA_ARGS__)
+
+#define DMDEBUG(fmt, ...) pr_debug(DM_FMT(fmt), ##__VA_ARGS__)
+#define DMDEBUG_LIMIT(fmt, ...) pr_debug_ratelimited(DM_FMT(fmt), ##__VA_ARGS__)
-#define DMEMIT(x...) sz += ((sz >= maxlen) ? \
- 0 : scnprintf(result + sz, maxlen - sz, x))
+#define DMEMIT(x...) (sz += ((sz >= maxlen) ? 0 : scnprintf(result + sz, maxlen - sz, x)))
-#define SECTOR_SHIFT 9
+#define DMEMIT_TARGET_NAME_VERSION(y) \
+ DMEMIT("target_name=%s,target_version=%u.%u.%u", \
+ (y)->name, (y)->version[0], (y)->version[1], (y)->version[2])
+
+/**
+ * module_dm() - Helper macro for DM targets that don't do anything
+ * special in their module_init and module_exit.
+ * Each module may only use this macro once, and calling it replaces
+ * module_init() and module_exit().
+ *
+ * @name: DM target's name
+ */
+#define module_dm(name) \
+static int __init dm_##name##_init(void) \
+{ \
+ return dm_register_target(&(name##_target)); \
+} \
+module_init(dm_##name##_init) \
+static void __exit dm_##name##_exit(void) \
+{ \
+ dm_unregister_target(&(name##_target)); \
+} \
+module_exit(dm_##name##_exit)
/*
* Definitions of return values from target end_io function.
@@ -605,6 +667,7 @@ do { \
#define DM_ENDIO_DONE 0
#define DM_ENDIO_INCOMPLETE 1
#define DM_ENDIO_REQUEUE 2
+#define DM_ENDIO_DELAY_REQUEUE 3
/*
* Definitions of return values from target map function.
@@ -612,7 +675,7 @@ do { \
#define DM_MAPIO_SUBMITTED 0
#define DM_MAPIO_REMAPPED 1
#define DM_MAPIO_REQUEUE DM_ENDIO_REQUEUE
-#define DM_MAPIO_DELAY_REQUEUE 3
+#define DM_MAPIO_DELAY_REQUEUE DM_ENDIO_DELAY_REQUEUE
#define DM_MAPIO_KILL 4
#define dm_sector_div64(x, y)( \
@@ -641,16 +704,13 @@ do { \
*/
#define dm_round_up(n, sz) (dm_div_up((n), (sz)) * (sz))
-#define dm_array_too_big(fixed, obj, num) \
- ((num) > (UINT_MAX - (fixed)) / (obj))
-
/*
* Sector offset taken relative to the start of the target instead of
* relative to the start of the device.
*/
#define dm_target_offset(ti, sector) ((sector) - (ti)->begin)
-static inline sector_t to_sector(unsigned long n)
+static inline sector_t to_sector(unsigned long long n)
{
return (n >> SECTOR_SHIFT);
}
diff --git a/include/linux/device.h b/include/linux/device.h
index 723cd54b94da..472dd24d4823 100644
--- a/include/linux/device.h
+++ b/include/linux/device.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* device.h - generic, centralized driver model
*
@@ -5,14 +6,14 @@
* Copyright (c) 2004-2009 Greg Kroah-Hartman <gregkh@suse.de>
* Copyright (c) 2008-2009 Novell Inc.
*
- * This file is released under the GPLv2
- *
- * See Documentation/driver-model/ for more information.
+ * See Documentation/driver-api/driver-model/ for more information.
*/
#ifndef _DEVICE_H_
#define _DEVICE_H_
+#include <linux/dev_printk.h>
+#include <linux/energy_model.h>
#include <linux/ioport.h>
#include <linux/kobject.h>
#include <linux/klist.h>
@@ -21,12 +22,14 @@
#include <linux/compiler.h>
#include <linux/types.h>
#include <linux/mutex.h>
-#include <linux/pinctrl/devinfo.h>
#include <linux/pm.h>
#include <linux/atomic.h>
-#include <linux/ratelimit.h>
#include <linux/uidgid.h>
#include <linux/gfp.h>
+#include <linux/overflow.h>
+#include <linux/device/bus.h>
+#include <linux/device/class.h>
+#include <linux/device/driver.h>
#include <asm/device.h>
struct device;
@@ -36,304 +39,18 @@ struct driver_private;
struct module;
struct class;
struct subsys_private;
-struct bus_type;
struct device_node;
struct fwnode_handle;
struct iommu_ops;
struct iommu_group;
-struct iommu_fwspec;
-
-struct bus_attribute {
- struct attribute attr;
- ssize_t (*show)(struct bus_type *bus, char *buf);
- ssize_t (*store)(struct bus_type *bus, const char *buf, size_t count);
-};
-
-#define BUS_ATTR(_name, _mode, _show, _store) \
- struct bus_attribute bus_attr_##_name = __ATTR(_name, _mode, _show, _store)
-#define BUS_ATTR_RW(_name) \
- struct bus_attribute bus_attr_##_name = __ATTR_RW(_name)
-#define BUS_ATTR_RO(_name) \
- struct bus_attribute bus_attr_##_name = __ATTR_RO(_name)
-
-extern int __must_check bus_create_file(struct bus_type *,
- struct bus_attribute *);
-extern void bus_remove_file(struct bus_type *, struct bus_attribute *);
-
-/**
- * struct bus_type - The bus type of the device
- *
- * @name: The name of the bus.
- * @dev_name: Used for subsystems to enumerate devices like ("foo%u", dev->id).
- * @dev_root: Default device to use as the parent.
- * @bus_groups: Default attributes of the bus.
- * @dev_groups: Default attributes of the devices on the bus.
- * @drv_groups: Default attributes of the device drivers on the bus.
- * @match: Called, perhaps multiple times, whenever a new device or driver
- * is added for this bus. It should return a positive value if the
- * given device can be handled by the given driver and zero
- * otherwise. It may also return error code if determining that
- * the driver supports the device is not possible. In case of
- * -EPROBE_DEFER it will queue the device for deferred probing.
- * @uevent: Called when a device is added, removed, or a few other things
- * that generate uevents to add the environment variables.
- * @probe: Called when a new device or driver add to this bus, and callback
- * the specific driver's probe to initial the matched device.
- * @remove: Called when a device removed from this bus.
- * @shutdown: Called at shut-down time to quiesce the device.
- *
- * @online: Called to put the device back online (after offlining it).
- * @offline: Called to put the device offline for hot-removal. May fail.
- *
- * @suspend: Called when a device on this bus wants to go to sleep mode.
- * @resume: Called to bring a device on this bus out of sleep mode.
- * @num_vf: Called to find out how many virtual functions a device on this
- * bus supports.
- * @pm: Power management operations of this bus, callback the specific
- * device driver's pm-ops.
- * @iommu_ops: IOMMU specific operations for this bus, used to attach IOMMU
- * driver implementations to a bus and allow the driver to do
- * bus-specific setup
- * @p: The private data of the driver core, only the driver core can
- * touch this.
- * @lock_key: Lock class key for use by the lock validator
- *
- * A bus is a channel between the processor and one or more devices. For the
- * purposes of the device model, all devices are connected via a bus, even if
- * it is an internal, virtual, "platform" bus. Buses can plug into each other.
- * A USB controller is usually a PCI device, for example. The device model
- * represents the actual connections between buses and the devices they control.
- * A bus is represented by the bus_type structure. It contains the name, the
- * default attributes, the bus' methods, PM operations, and the driver core's
- * private data.
- */
-struct bus_type {
- const char *name;
- const char *dev_name;
- struct device *dev_root;
- const struct attribute_group **bus_groups;
- const struct attribute_group **dev_groups;
- const struct attribute_group **drv_groups;
-
- int (*match)(struct device *dev, struct device_driver *drv);
- int (*uevent)(struct device *dev, struct kobj_uevent_env *env);
- int (*probe)(struct device *dev);
- int (*remove)(struct device *dev);
- void (*shutdown)(struct device *dev);
-
- int (*online)(struct device *dev);
- int (*offline)(struct device *dev);
-
- int (*suspend)(struct device *dev, pm_message_t state);
- int (*resume)(struct device *dev);
-
- int (*num_vf)(struct device *dev);
-
- const struct dev_pm_ops *pm;
-
- const struct iommu_ops *iommu_ops;
-
- struct subsys_private *p;
- struct lock_class_key lock_key;
-};
-
-extern int __must_check bus_register(struct bus_type *bus);
-
-extern void bus_unregister(struct bus_type *bus);
-
-extern int __must_check bus_rescan_devices(struct bus_type *bus);
-
-/* iterator helpers for buses */
-struct subsys_dev_iter {
- struct klist_iter ki;
- const struct device_type *type;
-};
-void subsys_dev_iter_init(struct subsys_dev_iter *iter,
- struct bus_type *subsys,
- struct device *start,
- const struct device_type *type);
-struct device *subsys_dev_iter_next(struct subsys_dev_iter *iter);
-void subsys_dev_iter_exit(struct subsys_dev_iter *iter);
-
-int bus_for_each_dev(struct bus_type *bus, struct device *start, void *data,
- int (*fn)(struct device *dev, void *data));
-struct device *bus_find_device(struct bus_type *bus, struct device *start,
- void *data,
- int (*match)(struct device *dev, void *data));
-struct device *bus_find_device_by_name(struct bus_type *bus,
- struct device *start,
- const char *name);
-struct device *subsys_find_device_by_id(struct bus_type *bus, unsigned int id,
- struct device *hint);
-int bus_for_each_drv(struct bus_type *bus, struct device_driver *start,
- void *data, int (*fn)(struct device_driver *, void *));
-void bus_sort_breadthfirst(struct bus_type *bus,
- int (*compare)(const struct device *a,
- const struct device *b));
-/*
- * Bus notifiers: Get notified of addition/removal of devices
- * and binding/unbinding of drivers to devices.
- * In the long run, it should be a replacement for the platform
- * notify hooks.
- */
-struct notifier_block;
-
-extern int bus_register_notifier(struct bus_type *bus,
- struct notifier_block *nb);
-extern int bus_unregister_notifier(struct bus_type *bus,
- struct notifier_block *nb);
-
-/* All 4 notifers below get called with the target struct device *
- * as an argument. Note that those functions are likely to be called
- * with the device lock held in the core, so be careful.
- */
-#define BUS_NOTIFY_ADD_DEVICE 0x00000001 /* device added */
-#define BUS_NOTIFY_DEL_DEVICE 0x00000002 /* device to be removed */
-#define BUS_NOTIFY_REMOVED_DEVICE 0x00000003 /* device removed */
-#define BUS_NOTIFY_BIND_DRIVER 0x00000004 /* driver about to be
- bound */
-#define BUS_NOTIFY_BOUND_DRIVER 0x00000005 /* driver bound to device */
-#define BUS_NOTIFY_UNBIND_DRIVER 0x00000006 /* driver about to be
- unbound */
-#define BUS_NOTIFY_UNBOUND_DRIVER 0x00000007 /* driver is unbound
- from the device */
-#define BUS_NOTIFY_DRIVER_NOT_BOUND 0x00000008 /* driver fails to be bound */
-
-extern struct kset *bus_get_kset(struct bus_type *bus);
-extern struct klist *bus_get_device_klist(struct bus_type *bus);
-
-/**
- * enum probe_type - device driver probe type to try
- * Device drivers may opt in for special handling of their
- * respective probe routines. This tells the core what to
- * expect and prefer.
- *
- * @PROBE_DEFAULT_STRATEGY: Used by drivers that work equally well
- * whether probed synchronously or asynchronously.
- * @PROBE_PREFER_ASYNCHRONOUS: Drivers for "slow" devices which
- * probing order is not essential for booting the system may
- * opt into executing their probes asynchronously.
- * @PROBE_FORCE_SYNCHRONOUS: Use this to annotate drivers that need
- * their probe routines to run synchronously with driver and
- * device registration (with the exception of -EPROBE_DEFER
- * handling - re-probing always ends up being done asynchronously).
- *
- * Note that the end goal is to switch the kernel to use asynchronous
- * probing by default, so annotating drivers with
- * %PROBE_PREFER_ASYNCHRONOUS is a temporary measure that allows us
- * to speed up boot process while we are validating the rest of the
- * drivers.
- */
-enum probe_type {
- PROBE_DEFAULT_STRATEGY,
- PROBE_PREFER_ASYNCHRONOUS,
- PROBE_FORCE_SYNCHRONOUS,
-};
-
-/**
- * struct device_driver - The basic device driver structure
- * @name: Name of the device driver.
- * @bus: The bus which the device of this driver belongs to.
- * @owner: The module owner.
- * @mod_name: Used for built-in modules.
- * @suppress_bind_attrs: Disables bind/unbind via sysfs.
- * @probe_type: Type of the probe (synchronous or asynchronous) to use.
- * @of_match_table: The open firmware table.
- * @acpi_match_table: The ACPI match table.
- * @probe: Called to query the existence of a specific device,
- * whether this driver can work with it, and bind the driver
- * to a specific device.
- * @remove: Called when the device is removed from the system to
- * unbind a device from this driver.
- * @shutdown: Called at shut-down time to quiesce the device.
- * @suspend: Called to put the device to sleep mode. Usually to a
- * low power state.
- * @resume: Called to bring a device from sleep mode.
- * @groups: Default attributes that get created by the driver core
- * automatically.
- * @pm: Power management operations of the device which matched
- * this driver.
- * @p: Driver core's private data, no one other than the driver
- * core can touch this.
- *
- * The device driver-model tracks all of the drivers known to the system.
- * The main reason for this tracking is to enable the driver core to match
- * up drivers with new devices. Once drivers are known objects within the
- * system, however, a number of other things become possible. Device drivers
- * can export information and configuration variables that are independent
- * of any specific device.
- */
-struct device_driver {
- const char *name;
- struct bus_type *bus;
-
- struct module *owner;
- const char *mod_name; /* used for built-in modules */
-
- bool suppress_bind_attrs; /* disables bind/unbind via sysfs */
- enum probe_type probe_type;
-
- const struct of_device_id *of_match_table;
- const struct acpi_device_id *acpi_match_table;
-
- int (*probe) (struct device *dev);
- int (*remove) (struct device *dev);
- void (*shutdown) (struct device *dev);
- int (*suspend) (struct device *dev, pm_message_t state);
- int (*resume) (struct device *dev);
- const struct attribute_group **groups;
-
- const struct dev_pm_ops *pm;
-
- struct driver_private *p;
-};
-
-
-extern int __must_check driver_register(struct device_driver *drv);
-extern void driver_unregister(struct device_driver *drv);
-
-extern struct device_driver *driver_find(const char *name,
- struct bus_type *bus);
-extern int driver_probe_done(void);
-extern void wait_for_device_probe(void);
-
-
-/* sysfs interface for exporting driver attributes */
-
-struct driver_attribute {
- struct attribute attr;
- ssize_t (*show)(struct device_driver *driver, char *buf);
- ssize_t (*store)(struct device_driver *driver, const char *buf,
- size_t count);
-};
-
-#define DRIVER_ATTR(_name, _mode, _show, _store) \
- struct driver_attribute driver_attr_##_name = __ATTR(_name, _mode, _show, _store)
-#define DRIVER_ATTR_RW(_name) \
- struct driver_attribute driver_attr_##_name = __ATTR_RW(_name)
-#define DRIVER_ATTR_RO(_name) \
- struct driver_attribute driver_attr_##_name = __ATTR_RO(_name)
-#define DRIVER_ATTR_WO(_name) \
- struct driver_attribute driver_attr_##_name = __ATTR_WO(_name)
-
-extern int __must_check driver_create_file(struct device_driver *driver,
- const struct driver_attribute *attr);
-extern void driver_remove_file(struct device_driver *driver,
- const struct driver_attribute *attr);
-
-extern int __must_check driver_for_each_device(struct device_driver *drv,
- struct device *start,
- void *data,
- int (*fn)(struct device *dev,
- void *));
-struct device *driver_find_device(struct device_driver *drv,
- struct device *start, void *data,
- int (*match)(struct device *dev, void *data));
+struct dev_pin_info;
+struct dev_iommu;
+struct msi_device_data;
/**
* struct subsys_interface - interfaces to device functions
* @name: name of the device function
- * @subsys: subsytem of the devices to attach to
+ * @subsys: subsystem of the devices to attach to
* @node: the list of functions registered at the subsystem
* @add_dev: device hookup to device function handler
* @remove_dev: device hookup to device function handler
@@ -359,176 +76,6 @@ int subsys_system_register(struct bus_type *subsys,
int subsys_virtual_register(struct bus_type *subsys,
const struct attribute_group **groups);
-/**
- * struct class - device classes
- * @name: Name of the class.
- * @owner: The module owner.
- * @class_groups: Default attributes of this class.
- * @dev_groups: Default attributes of the devices that belong to the class.
- * @dev_kobj: The kobject that represents this class and links it into the hierarchy.
- * @dev_uevent: Called when a device is added, removed from this class, or a
- * few other things that generate uevents to add the environment
- * variables.
- * @devnode: Callback to provide the devtmpfs.
- * @class_release: Called to release this class.
- * @dev_release: Called to release the device.
- * @suspend: Used to put the device to sleep mode, usually to a low power
- * state.
- * @resume: Used to bring the device from the sleep mode.
- * @shutdown: Called at shut-down time to quiesce the device.
- * @ns_type: Callbacks so sysfs can detemine namespaces.
- * @namespace: Namespace of the device belongs to this class.
- * @pm: The default device power management operations of this class.
- * @p: The private data of the driver core, no one other than the
- * driver core can touch this.
- *
- * A class is a higher-level view of a device that abstracts out low-level
- * implementation details. Drivers may see a SCSI disk or an ATA disk, but,
- * at the class level, they are all simply disks. Classes allow user space
- * to work with devices based on what they do, rather than how they are
- * connected or how they work.
- */
-struct class {
- const char *name;
- struct module *owner;
-
- const struct attribute_group **class_groups;
- const struct attribute_group **dev_groups;
- struct kobject *dev_kobj;
-
- int (*dev_uevent)(struct device *dev, struct kobj_uevent_env *env);
- char *(*devnode)(struct device *dev, umode_t *mode);
-
- void (*class_release)(struct class *class);
- void (*dev_release)(struct device *dev);
-
- int (*suspend)(struct device *dev, pm_message_t state);
- int (*resume)(struct device *dev);
- int (*shutdown)(struct device *dev);
-
- const struct kobj_ns_type_operations *ns_type;
- const void *(*namespace)(struct device *dev);
-
- const struct dev_pm_ops *pm;
-
- struct subsys_private *p;
-};
-
-struct class_dev_iter {
- struct klist_iter ki;
- const struct device_type *type;
-};
-
-extern struct kobject *sysfs_dev_block_kobj;
-extern struct kobject *sysfs_dev_char_kobj;
-extern int __must_check __class_register(struct class *class,
- struct lock_class_key *key);
-extern void class_unregister(struct class *class);
-
-/* This is a #define to keep the compiler from merging different
- * instances of the __key variable */
-#define class_register(class) \
-({ \
- static struct lock_class_key __key; \
- __class_register(class, &__key); \
-})
-
-struct class_compat;
-struct class_compat *class_compat_register(const char *name);
-void class_compat_unregister(struct class_compat *cls);
-int class_compat_create_link(struct class_compat *cls, struct device *dev,
- struct device *device_link);
-void class_compat_remove_link(struct class_compat *cls, struct device *dev,
- struct device *device_link);
-
-extern void class_dev_iter_init(struct class_dev_iter *iter,
- struct class *class,
- struct device *start,
- const struct device_type *type);
-extern struct device *class_dev_iter_next(struct class_dev_iter *iter);
-extern void class_dev_iter_exit(struct class_dev_iter *iter);
-
-extern int class_for_each_device(struct class *class, struct device *start,
- void *data,
- int (*fn)(struct device *dev, void *data));
-extern struct device *class_find_device(struct class *class,
- struct device *start, const void *data,
- int (*match)(struct device *, const void *));
-
-struct class_attribute {
- struct attribute attr;
- ssize_t (*show)(struct class *class, struct class_attribute *attr,
- char *buf);
- ssize_t (*store)(struct class *class, struct class_attribute *attr,
- const char *buf, size_t count);
-};
-
-#define CLASS_ATTR_RW(_name) \
- struct class_attribute class_attr_##_name = __ATTR_RW(_name)
-#define CLASS_ATTR_RO(_name) \
- struct class_attribute class_attr_##_name = __ATTR_RO(_name)
-#define CLASS_ATTR_WO(_name) \
- struct class_attribute class_attr_##_name = __ATTR_WO(_name)
-
-extern int __must_check class_create_file_ns(struct class *class,
- const struct class_attribute *attr,
- const void *ns);
-extern void class_remove_file_ns(struct class *class,
- const struct class_attribute *attr,
- const void *ns);
-
-static inline int __must_check class_create_file(struct class *class,
- const struct class_attribute *attr)
-{
- return class_create_file_ns(class, attr, NULL);
-}
-
-static inline void class_remove_file(struct class *class,
- const struct class_attribute *attr)
-{
- return class_remove_file_ns(class, attr, NULL);
-}
-
-/* Simple class attribute that is just a static string */
-struct class_attribute_string {
- struct class_attribute attr;
- char *str;
-};
-
-/* Currently read-only only */
-#define _CLASS_ATTR_STRING(_name, _mode, _str) \
- { __ATTR(_name, _mode, show_class_attr_string, NULL), _str }
-#define CLASS_ATTR_STRING(_name, _mode, _str) \
- struct class_attribute_string class_attr_##_name = \
- _CLASS_ATTR_STRING(_name, _mode, _str)
-
-extern ssize_t show_class_attr_string(struct class *class, struct class_attribute *attr,
- char *buf);
-
-struct class_interface {
- struct list_head node;
- struct class *class;
-
- int (*add_dev) (struct device *, struct class_interface *);
- void (*remove_dev) (struct device *, struct class_interface *);
-};
-
-extern int __must_check class_interface_register(struct class_interface *);
-extern void class_interface_unregister(struct class_interface *);
-
-extern struct class * __must_check __class_create(struct module *owner,
- const char *name,
- struct lock_class_key *key);
-extern void class_destroy(struct class *cls);
-
-/* This is a #define to keep the compiler from merging different
- * instances of the __key variable */
-#define class_create(owner, name) \
-({ \
- static struct lock_class_key __key; \
- __class_create(owner, name, &__key); \
-})
-
/*
* The type of device, "struct device" is embedded in. A class
* or bus can contain devices of different types
@@ -541,8 +88,8 @@ extern void class_destroy(struct class *cls);
struct device_type {
const char *name;
const struct attribute_group **groups;
- int (*uevent)(struct device *dev, struct kobj_uevent_env *env);
- char *(*devnode)(struct device *dev, umode_t *mode,
+ int (*uevent)(const struct device *dev, struct kobj_uevent_env *env);
+ char *(*devnode)(const struct device *dev, umode_t *mode,
kuid_t *uid, kgid_t *gid);
void (*release)(struct device *dev);
@@ -578,10 +125,17 @@ ssize_t device_store_bool(struct device *dev, struct device_attribute *attr,
#define DEVICE_ATTR(_name, _mode, _show, _store) \
struct device_attribute dev_attr_##_name = __ATTR(_name, _mode, _show, _store)
+#define DEVICE_ATTR_PREALLOC(_name, _mode, _show, _store) \
+ struct device_attribute dev_attr_##_name = \
+ __ATTR_PREALLOC(_name, _mode, _show, _store)
#define DEVICE_ATTR_RW(_name) \
struct device_attribute dev_attr_##_name = __ATTR_RW(_name)
+#define DEVICE_ATTR_ADMIN_RW(_name) \
+ struct device_attribute dev_attr_##_name = __ATTR_RW_MODE(_name, 0600)
#define DEVICE_ATTR_RO(_name) \
struct device_attribute dev_attr_##_name = __ATTR_RO(_name)
+#define DEVICE_ATTR_ADMIN_RO(_name) \
+ struct device_attribute dev_attr_##_name = __ATTR_RO_MODE(_name, 0400)
#define DEVICE_ATTR_WO(_name) \
struct device_attribute dev_attr_##_name = __ATTR_WO(_name)
#define DEVICE_ULONG_ATTR(_name, _mode, _var) \
@@ -597,68 +151,59 @@ ssize_t device_store_bool(struct device *dev, struct device_attribute *attr,
struct device_attribute dev_attr_##_name = \
__ATTR_IGNORE_LOCKDEP(_name, _mode, _show, _store)
-extern int device_create_file(struct device *device,
- const struct device_attribute *entry);
-extern void device_remove_file(struct device *dev,
- const struct device_attribute *attr);
-extern bool device_remove_file_self(struct device *dev,
- const struct device_attribute *attr);
-extern int __must_check device_create_bin_file(struct device *dev,
+int device_create_file(struct device *device,
+ const struct device_attribute *entry);
+void device_remove_file(struct device *dev,
+ const struct device_attribute *attr);
+bool device_remove_file_self(struct device *dev,
+ const struct device_attribute *attr);
+int __must_check device_create_bin_file(struct device *dev,
const struct bin_attribute *attr);
-extern void device_remove_bin_file(struct device *dev,
- const struct bin_attribute *attr);
+void device_remove_bin_file(struct device *dev,
+ const struct bin_attribute *attr);
/* device resource management */
typedef void (*dr_release_t)(struct device *dev, void *res);
typedef int (*dr_match_t)(struct device *dev, void *res, void *match_data);
-#ifdef CONFIG_DEBUG_DEVRES
-extern void *__devres_alloc_node(dr_release_t release, size_t size, gfp_t gfp,
- int nid, const char *name) __malloc;
+void *__devres_alloc_node(dr_release_t release, size_t size, gfp_t gfp,
+ int nid, const char *name) __malloc;
#define devres_alloc(release, size, gfp) \
__devres_alloc_node(release, size, gfp, NUMA_NO_NODE, #release)
#define devres_alloc_node(release, size, gfp, nid) \
__devres_alloc_node(release, size, gfp, nid, #release)
-#else
-extern void *devres_alloc_node(dr_release_t release, size_t size, gfp_t gfp,
- int nid) __malloc;
-static inline void *devres_alloc(dr_release_t release, size_t size, gfp_t gfp)
-{
- return devres_alloc_node(release, size, gfp, NUMA_NO_NODE);
-}
-#endif
-extern void devres_for_each_res(struct device *dev, dr_release_t release,
- dr_match_t match, void *match_data,
- void (*fn)(struct device *, void *, void *),
- void *data);
-extern void devres_free(void *res);
-extern void devres_add(struct device *dev, void *res);
-extern void *devres_find(struct device *dev, dr_release_t release,
- dr_match_t match, void *match_data);
-extern void *devres_get(struct device *dev, void *new_res,
- dr_match_t match, void *match_data);
-extern void *devres_remove(struct device *dev, dr_release_t release,
- dr_match_t match, void *match_data);
-extern int devres_destroy(struct device *dev, dr_release_t release,
- dr_match_t match, void *match_data);
-extern int devres_release(struct device *dev, dr_release_t release,
- dr_match_t match, void *match_data);
+void devres_for_each_res(struct device *dev, dr_release_t release,
+ dr_match_t match, void *match_data,
+ void (*fn)(struct device *, void *, void *),
+ void *data);
+void devres_free(void *res);
+void devres_add(struct device *dev, void *res);
+void *devres_find(struct device *dev, dr_release_t release,
+ dr_match_t match, void *match_data);
+void *devres_get(struct device *dev, void *new_res,
+ dr_match_t match, void *match_data);
+void *devres_remove(struct device *dev, dr_release_t release,
+ dr_match_t match, void *match_data);
+int devres_destroy(struct device *dev, dr_release_t release,
+ dr_match_t match, void *match_data);
+int devres_release(struct device *dev, dr_release_t release,
+ dr_match_t match, void *match_data);
/* devres group */
-extern void * __must_check devres_open_group(struct device *dev, void *id,
- gfp_t gfp);
-extern void devres_close_group(struct device *dev, void *id);
-extern void devres_remove_group(struct device *dev, void *id);
-extern int devres_release_group(struct device *dev, void *id);
+void * __must_check devres_open_group(struct device *dev, void *id, gfp_t gfp);
+void devres_close_group(struct device *dev, void *id);
+void devres_remove_group(struct device *dev, void *id);
+int devres_release_group(struct device *dev, void *id);
/* managed devm_k.alloc/kfree for device drivers */
-extern void *devm_kmalloc(struct device *dev, size_t size, gfp_t gfp) __malloc;
-extern __printf(3, 0)
-char *devm_kvasprintf(struct device *dev, gfp_t gfp, const char *fmt,
- va_list ap) __malloc;
-extern __printf(3, 4)
-char *devm_kasprintf(struct device *dev, gfp_t gfp, const char *fmt, ...) __malloc;
+void *devm_kmalloc(struct device *dev, size_t size, gfp_t gfp) __alloc_size(2);
+void *devm_krealloc(struct device *dev, void *ptr, size_t size,
+ gfp_t gfp) __must_check __realloc_size(3);
+__printf(3, 0) char *devm_kvasprintf(struct device *dev, gfp_t gfp,
+ const char *fmt, va_list ap) __malloc;
+__printf(3, 4) char *devm_kasprintf(struct device *dev, gfp_t gfp,
+ const char *fmt, ...) __malloc;
static inline void *devm_kzalloc(struct device *dev, size_t size, gfp_t gfp)
{
return devm_kmalloc(dev, size, gfp | __GFP_ZERO);
@@ -666,41 +211,58 @@ static inline void *devm_kzalloc(struct device *dev, size_t size, gfp_t gfp)
static inline void *devm_kmalloc_array(struct device *dev,
size_t n, size_t size, gfp_t flags)
{
- if (size != 0 && n > SIZE_MAX / size)
+ size_t bytes;
+
+ if (unlikely(check_mul_overflow(n, size, &bytes)))
return NULL;
- return devm_kmalloc(dev, n * size, flags);
+
+ return devm_kmalloc(dev, bytes, flags);
}
static inline void *devm_kcalloc(struct device *dev,
size_t n, size_t size, gfp_t flags)
{
return devm_kmalloc_array(dev, n, size, flags | __GFP_ZERO);
}
-extern void devm_kfree(struct device *dev, void *p);
-extern char *devm_kstrdup(struct device *dev, const char *s, gfp_t gfp) __malloc;
-extern void *devm_kmemdup(struct device *dev, const void *src, size_t len,
- gfp_t gfp);
+void devm_kfree(struct device *dev, const void *p);
+char *devm_kstrdup(struct device *dev, const char *s, gfp_t gfp) __malloc;
+const char *devm_kstrdup_const(struct device *dev, const char *s, gfp_t gfp);
+void *devm_kmemdup(struct device *dev, const void *src, size_t len, gfp_t gfp)
+ __realloc_size(3);
+
+unsigned long devm_get_free_pages(struct device *dev,
+ gfp_t gfp_mask, unsigned int order);
+void devm_free_pages(struct device *dev, unsigned long addr);
-extern unsigned long devm_get_free_pages(struct device *dev,
- gfp_t gfp_mask, unsigned int order);
-extern void devm_free_pages(struct device *dev, unsigned long addr);
+void __iomem *devm_ioremap_resource(struct device *dev,
+ const struct resource *res);
+void __iomem *devm_ioremap_resource_wc(struct device *dev,
+ const struct resource *res);
-void __iomem *devm_ioremap_resource(struct device *dev, struct resource *res);
+void __iomem *devm_of_iomap(struct device *dev,
+ struct device_node *node, int index,
+ resource_size_t *size);
/* allows to add/remove a custom action to devres stack */
-int devm_add_action(struct device *dev, void (*action)(void *), void *data);
void devm_remove_action(struct device *dev, void (*action)(void *), void *data);
+void devm_release_action(struct device *dev, void (*action)(void *), void *data);
-static inline int devm_add_action_or_reset(struct device *dev,
- void (*action)(void *), void *data)
+int __devm_add_action(struct device *dev, void (*action)(void *), void *data, const char *name);
+#define devm_add_action(release, action, data) \
+ __devm_add_action(release, action, data, #action)
+
+static inline int __devm_add_action_or_reset(struct device *dev, void (*action)(void *),
+ void *data, const char *name)
{
int ret;
- ret = devm_add_action(dev, action, data);
+ ret = __devm_add_action(dev, action, data, name);
if (ret)
action(data);
return ret;
}
+#define devm_add_action_or_reset(release, action, data) \
+ __devm_add_action_or_reset(release, action, data, #action)
/**
* devm_alloc_percpu - Resource-managed alloc_percpu
@@ -727,6 +289,7 @@ struct device_dma_parameters {
* sg limitations.
*/
unsigned int max_segment_size;
+ unsigned int min_align_mask;
unsigned long segment_boundary_mask;
};
@@ -751,39 +314,26 @@ enum device_link_state {
/*
* Device link flags.
*
- * STATELESS: The core won't track the presence of supplier/consumer drivers.
- * AUTOREMOVE: Remove this link automatically on consumer driver unbind.
+ * STATELESS: The core will not remove this link automatically.
+ * AUTOREMOVE_CONSUMER: Remove the link automatically on consumer driver unbind.
* PM_RUNTIME: If set, the runtime PM framework will use this link.
* RPM_ACTIVE: Run pm_runtime_get_sync() on the supplier during link creation.
+ * AUTOREMOVE_SUPPLIER: Remove the link automatically on supplier driver unbind.
+ * AUTOPROBE_CONSUMER: Probe consumer driver automatically after supplier binds.
+ * MANAGED: The core tracks presence of supplier/consumer drivers (internal).
+ * SYNC_STATE_ONLY: Link only affects sync_state() behavior.
+ * INFERRED: Inferred from data (eg: firmware) and not from driver actions.
*/
-#define DL_FLAG_STATELESS BIT(0)
-#define DL_FLAG_AUTOREMOVE BIT(1)
-#define DL_FLAG_PM_RUNTIME BIT(2)
-#define DL_FLAG_RPM_ACTIVE BIT(3)
-
-/**
- * struct device_link - Device link representation.
- * @supplier: The device on the supplier end of the link.
- * @s_node: Hook to the supplier device's list of links to consumers.
- * @consumer: The device on the consumer end of the link.
- * @c_node: Hook to the consumer device's list of links to suppliers.
- * @status: The state of the link (with respect to the presence of drivers).
- * @flags: Link flags.
- * @rpm_active: Whether or not the consumer device is runtime-PM-active.
- * @rcu_head: An RCU head to use for deferred execution of SRCU callbacks.
- */
-struct device_link {
- struct device *supplier;
- struct list_head s_node;
- struct device *consumer;
- struct list_head c_node;
- enum device_link_state status;
- u32 flags;
- bool rpm_active;
-#ifdef CONFIG_SRCU
- struct rcu_head rcu_head;
-#endif
-};
+#define DL_FLAG_STATELESS BIT(0)
+#define DL_FLAG_AUTOREMOVE_CONSUMER BIT(1)
+#define DL_FLAG_PM_RUNTIME BIT(2)
+#define DL_FLAG_RPM_ACTIVE BIT(3)
+#define DL_FLAG_AUTOREMOVE_SUPPLIER BIT(4)
+#define DL_FLAG_AUTOPROBE_CONSUMER BIT(5)
+#define DL_FLAG_MANAGED BIT(6)
+#define DL_FLAG_SYNC_STATE_ONLY BIT(7)
+#define DL_FLAG_INFERRED BIT(8)
+#define DL_FLAG_CYCLE BIT(9)
/**
* enum dl_dev_state - Device driver presence tracking information.
@@ -800,18 +350,117 @@ enum dl_dev_state {
};
/**
+ * enum device_removable - Whether the device is removable. The criteria for a
+ * device to be classified as removable is determined by its subsystem or bus.
+ * @DEVICE_REMOVABLE_NOT_SUPPORTED: This attribute is not supported for this
+ * device (default).
+ * @DEVICE_REMOVABLE_UNKNOWN: Device location is Unknown.
+ * @DEVICE_FIXED: Device is not removable by the user.
+ * @DEVICE_REMOVABLE: Device is removable by the user.
+ */
+enum device_removable {
+ DEVICE_REMOVABLE_NOT_SUPPORTED = 0, /* must be 0 */
+ DEVICE_REMOVABLE_UNKNOWN,
+ DEVICE_FIXED,
+ DEVICE_REMOVABLE,
+};
+
+/**
* struct dev_links_info - Device data related to device links.
* @suppliers: List of links to supplier devices.
* @consumers: List of links to consumer devices.
+ * @defer_sync: Hook to global list of devices that have deferred sync_state.
* @status: Driver status information.
*/
struct dev_links_info {
struct list_head suppliers;
struct list_head consumers;
+ struct list_head defer_sync;
enum dl_dev_state status;
};
/**
+ * struct dev_msi_info - Device data related to MSI
+ * @domain: The MSI interrupt domain associated to the device
+ * @data: Pointer to MSI device data
+ */
+struct dev_msi_info {
+#ifdef CONFIG_GENERIC_MSI_IRQ
+ struct irq_domain *domain;
+ struct msi_device_data *data;
+#endif
+};
+
+/**
+ * enum device_physical_location_panel - Describes which panel surface of the
+ * system's housing the device connection point resides on.
+ * @DEVICE_PANEL_TOP: Device connection point is on the top panel.
+ * @DEVICE_PANEL_BOTTOM: Device connection point is on the bottom panel.
+ * @DEVICE_PANEL_LEFT: Device connection point is on the left panel.
+ * @DEVICE_PANEL_RIGHT: Device connection point is on the right panel.
+ * @DEVICE_PANEL_FRONT: Device connection point is on the front panel.
+ * @DEVICE_PANEL_BACK: Device connection point is on the back panel.
+ * @DEVICE_PANEL_UNKNOWN: The panel with device connection point is unknown.
+ */
+enum device_physical_location_panel {
+ DEVICE_PANEL_TOP,
+ DEVICE_PANEL_BOTTOM,
+ DEVICE_PANEL_LEFT,
+ DEVICE_PANEL_RIGHT,
+ DEVICE_PANEL_FRONT,
+ DEVICE_PANEL_BACK,
+ DEVICE_PANEL_UNKNOWN,
+};
+
+/**
+ * enum device_physical_location_vertical_position - Describes vertical
+ * position of the device connection point on the panel surface.
+ * @DEVICE_VERT_POS_UPPER: Device connection point is at upper part of panel.
+ * @DEVICE_VERT_POS_CENTER: Device connection point is at center part of panel.
+ * @DEVICE_VERT_POS_LOWER: Device connection point is at lower part of panel.
+ */
+enum device_physical_location_vertical_position {
+ DEVICE_VERT_POS_UPPER,
+ DEVICE_VERT_POS_CENTER,
+ DEVICE_VERT_POS_LOWER,
+};
+
+/**
+ * enum device_physical_location_horizontal_position - Describes horizontal
+ * position of the device connection point on the panel surface.
+ * @DEVICE_HORI_POS_LEFT: Device connection point is at left part of panel.
+ * @DEVICE_HORI_POS_CENTER: Device connection point is at center part of panel.
+ * @DEVICE_HORI_POS_RIGHT: Device connection point is at right part of panel.
+ */
+enum device_physical_location_horizontal_position {
+ DEVICE_HORI_POS_LEFT,
+ DEVICE_HORI_POS_CENTER,
+ DEVICE_HORI_POS_RIGHT,
+};
+
+/**
+ * struct device_physical_location - Device data related to physical location
+ * of the device connection point.
+ * @panel: Panel surface of the system's housing that the device connection
+ * point resides on.
+ * @vertical_position: Vertical position of the device connection point within
+ * the panel.
+ * @horizontal_position: Horizontal position of the device connection point
+ * within the panel.
+ * @dock: Set if the device connection point resides in a docking station or
+ * port replicator.
+ * @lid: Set if this device connection point resides on the lid of laptop
+ * system.
+ */
+struct device_physical_location {
+ enum device_physical_location_panel panel;
+ enum device_physical_location_vertical_position vertical_position;
+ enum device_physical_location_horizontal_position horizontal_position;
+ bool dock;
+ bool lid;
+};
+
+/**
* struct device - The basic device structure
* @parent: The device's "parent" device, the device to which it is attached.
* In most cases, a parent device is some sort of bus or host
@@ -838,25 +487,29 @@ struct dev_links_info {
* @driver_data: Private pointer for driver specific info.
* @links: Links to suppliers and consumers of this device.
* @power: For device power management.
- * See Documentation/power/admin-guide/devices.rst for details.
+ * See Documentation/driver-api/pm/devices.rst for details.
* @pm_domain: Provide callbacks that are executed during system suspend,
* hibernation, system resume and during runtime PM transitions
* along with subsystem-level and driver-level callbacks.
+ * @em_pd: device's energy model performance domain
* @pins: For device pin management.
- * See Documentation/pinctrl.txt for details.
- * @msi_list: Hosts MSI descriptors
- * @msi_domain: The generic MSI domain this device is using.
+ * See Documentation/driver-api/pin-control.rst for details.
+ * @msi: MSI related data
* @numa_node: NUMA node this device is close to.
+ * @dma_ops: DMA mapping operations for this device.
* @dma_mask: Dma mask (if dma'ble device).
* @coherent_dma_mask: Like dma_mask, but for alloc_coherent mapping as not all
* hardware supports 64-bit addresses for consistent allocations
* such descriptors.
- * @dma_pfn_offset: offset of DMA memory range relatively of RAM
+ * @bus_dma_limit: Limit of an upstream bridge or bus which imposes a smaller
+ * DMA limit than the device itself supports.
+ * @dma_range_map: map for DMA memory ranges relative to that of RAM
* @dma_parms: A low level driver may set these to teach IOMMU code about
* segment limitations.
* @dma_pools: Dma pools (if dma'ble device).
* @dma_mem: Internal for coherent mem override.
* @cma_area: Contiguous memory area for dma allocations
+ * @dma_io_tlb_mem: Pointer to the swiotlb pool used. Not for driver use.
* @archdata: For arch-specific additions.
* @of_node: Associated device tree node.
* @fwnode: Associated device node supplied by platform firmware.
@@ -871,12 +524,30 @@ struct dev_links_info {
* gone away. This should be set by the allocator of the
* device (i.e. the bus driver that discovered the device).
* @iommu_group: IOMMU group the device belongs to.
- * @iommu_fwspec: IOMMU-specific properties supplied by firmware.
+ * @iommu: Per device generic IOMMU runtime data
+ * @physical_location: Describes physical location of the device connection
+ * point in the system housing.
+ * @removable: Whether the device can be removed from the system. This
+ * should be set by the subsystem / bus driver that discovered
+ * the device.
*
* @offline_disabled: If set, the device is permanently online.
* @offline: Set after successful invocation of bus type's .offline().
* @of_node_reused: Set if the device-tree node is shared with an ancestor
* device.
+ * @state_synced: The hardware state of this device has been synced to match
+ * the software state of this device by calling the driver/bus
+ * sync_state() callback.
+ * @can_match: The device has matched with a driver at least once or it is in
+ * a bus (like AMBA) which can't check for matching drivers until
+ * other devices probe successfully.
+ * @dma_coherent: this particular device is dma coherent, even if the
+ * architecture supports non-coherent devices.
+ * @dma_ops_bypass: If set to %true then the dma_ops are bypassed for the
+ * streaming DMA operations (->map_* / ->unmap_* / ->sync_*),
+ * and optionall (if the coherent mask is large enough) also
+ * for dma allocations. This flag is managed by the dma ops
+ * instance from ->dma_supported.
*
* At the lowest level, every device in a Linux system is represented by an
* instance of struct device. The device structure contains the information
@@ -887,89 +558,143 @@ struct dev_links_info {
* a higher-level representation of the device.
*/
struct device {
+ struct kobject kobj;
struct device *parent;
struct device_private *p;
- struct kobject kobj;
const char *init_name; /* initial name of the device */
const struct device_type *type;
- struct mutex mutex; /* mutex to synchronize calls to
- * its driver.
- */
-
- struct bus_type *bus; /* type of bus device is on */
+ const struct bus_type *bus; /* type of bus device is on */
struct device_driver *driver; /* which driver has allocated this
device */
void *platform_data; /* Platform specific data, device
core doesn't touch it */
void *driver_data; /* Driver data, set and get with
- dev_set/get_drvdata */
+ dev_set_drvdata/dev_get_drvdata */
+ struct mutex mutex; /* mutex to synchronize calls to
+ * its driver.
+ */
+
struct dev_links_info links;
struct dev_pm_info power;
struct dev_pm_domain *pm_domain;
-#ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN
- struct irq_domain *msi_domain;
+#ifdef CONFIG_ENERGY_MODEL
+ struct em_perf_domain *em_pd;
#endif
+
#ifdef CONFIG_PINCTRL
struct dev_pin_info *pins;
#endif
-#ifdef CONFIG_GENERIC_MSI_IRQ
- struct list_head msi_list;
-#endif
-
-#ifdef CONFIG_NUMA
- int numa_node; /* NUMA node this device is close to */
-#endif
+ struct dev_msi_info msi;
+#ifdef CONFIG_DMA_OPS
const struct dma_map_ops *dma_ops;
+#endif
u64 *dma_mask; /* dma mask (if dma'able device) */
u64 coherent_dma_mask;/* Like dma_mask, but for
alloc_coherent mappings as
not all hardware supports
64 bit addresses for consistent
allocations such descriptors. */
- unsigned long dma_pfn_offset;
+ u64 bus_dma_limit; /* upstream dma constraint */
+ const struct bus_dma_region *dma_range_map;
struct device_dma_parameters *dma_parms;
struct list_head dma_pools; /* dma pools (if dma'ble) */
+#ifdef CONFIG_DMA_DECLARE_COHERENT
struct dma_coherent_mem *dma_mem; /* internal for coherent mem
override */
+#endif
#ifdef CONFIG_DMA_CMA
struct cma *cma_area; /* contiguous memory area for dma
allocations */
#endif
+#ifdef CONFIG_SWIOTLB
+ struct io_tlb_mem *dma_io_tlb_mem;
+#endif
/* arch specific additions */
struct dev_archdata archdata;
struct device_node *of_node; /* associated device tree node */
struct fwnode_handle *fwnode; /* firmware device node */
+#ifdef CONFIG_NUMA
+ int numa_node; /* NUMA node this device is close to */
+#endif
dev_t devt; /* dev_t, creates the sysfs "dev" */
u32 id; /* device instance */
spinlock_t devres_lock;
struct list_head devres_head;
- struct klist_node knode_class;
- struct class *class;
+ const struct class *class;
const struct attribute_group **groups; /* optional groups */
void (*release)(struct device *dev);
struct iommu_group *iommu_group;
- struct iommu_fwspec *iommu_fwspec;
+ struct dev_iommu *iommu;
+
+ struct device_physical_location *physical_location;
+
+ enum device_removable removable;
bool offline_disabled:1;
bool offline:1;
bool of_node_reused:1;
+ bool state_synced:1;
+ bool can_match:1;
+#if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE) || \
+ defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) || \
+ defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL)
+ bool dma_coherent:1;
+#endif
+#ifdef CONFIG_DMA_OPS_BYPASS
+ bool dma_ops_bypass : 1;
+#endif
+};
+
+/**
+ * struct device_link - Device link representation.
+ * @supplier: The device on the supplier end of the link.
+ * @s_node: Hook to the supplier device's list of links to consumers.
+ * @consumer: The device on the consumer end of the link.
+ * @c_node: Hook to the consumer device's list of links to suppliers.
+ * @link_dev: device used to expose link details in sysfs
+ * @status: The state of the link (with respect to the presence of drivers).
+ * @flags: Link flags.
+ * @rpm_active: Whether or not the consumer device is runtime-PM-active.
+ * @kref: Count repeated addition of the same link.
+ * @rm_work: Work structure used for removing the link.
+ * @supplier_preactivated: Supplier has been made active before consumer probe.
+ */
+struct device_link {
+ struct device *supplier;
+ struct list_head s_node;
+ struct device *consumer;
+ struct list_head c_node;
+ struct device link_dev;
+ enum device_link_state status;
+ u32 flags;
+ refcount_t rpm_active;
+ struct kref kref;
+ struct work_struct rm_work;
+ bool supplier_preactivated; /* Owned by consumer probe. */
};
-static inline struct device *kobj_to_dev(struct kobject *kobj)
+#define kobj_to_dev(__kobj) container_of_const(__kobj, struct device, kobj)
+
+/**
+ * device_iommu_mapped - Returns true when the device DMA is translated
+ * by an IOMMU
+ * @dev: Device to perform the check on
+ */
+static inline bool device_iommu_mapped(struct device *dev)
{
- return container_of(kobj, struct device, kobj);
+ return (dev->iommu_group != NULL);
}
/* Get the wakeup routines, which depend on struct device */
@@ -984,8 +709,19 @@ static inline const char *dev_name(const struct device *dev)
return kobject_name(&dev->kobj);
}
-extern __printf(2, 3)
-int dev_set_name(struct device *dev, const char *name, ...);
+/**
+ * dev_bus_name - Return a device's bus/class name, if at all possible
+ * @dev: struct device to get the bus/class name of
+ *
+ * Will return the name of the bus/class the device is attached to. If it is
+ * not attached to a bus/class, an empty string will be returned.
+ */
+static inline const char *dev_bus_name(const struct device *dev)
+{
+ return dev->bus ? dev->bus->name : (dev->class ? dev->class->name : "");
+}
+
+__printf(2, 3) int dev_set_name(struct device *dev, const char *name, ...);
#ifdef CONFIG_NUMA
static inline int dev_to_node(struct device *dev)
@@ -999,7 +735,7 @@ static inline void set_dev_node(struct device *dev, int node)
#else
static inline int dev_to_node(struct device *dev)
{
- return -1;
+ return NUMA_NO_NODE;
}
static inline void set_dev_node(struct device *dev, int node)
{
@@ -1008,8 +744,8 @@ static inline void set_dev_node(struct device *dev, int node)
static inline struct irq_domain *dev_get_msi_domain(const struct device *dev)
{
-#ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN
- return dev->msi_domain;
+#ifdef CONFIG_GENERIC_MSI_IRQ
+ return dev->msi.domain;
#else
return NULL;
#endif
@@ -1017,8 +753,8 @@ static inline struct irq_domain *dev_get_msi_domain(const struct device *dev)
static inline void dev_set_msi_domain(struct device *dev, struct irq_domain *d)
{
-#ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN
- dev->msi_domain = d;
+#ifdef CONFIG_GENERIC_MSI_IRQ
+ dev->msi.domain = d;
#endif
}
@@ -1069,6 +805,16 @@ static inline bool device_async_suspend_enabled(struct device *dev)
return !!dev->power.async_suspend;
}
+static inline bool device_pm_not_required(struct device *dev)
+{
+ return dev->power.no_pm;
+}
+
+static inline void device_set_pm_not_required(struct device *dev)
+{
+ dev->power.no_pm = true;
+}
+
static inline void dev_pm_syscore_device(struct device *dev, bool val)
{
#ifdef CONFIG_PM_SLEEP
@@ -1076,6 +822,16 @@ static inline void dev_pm_syscore_device(struct device *dev, bool val)
#endif
}
+static inline void dev_pm_set_driver_flags(struct device *dev, u32 flags)
+{
+ dev->power.driver_flags = flags;
+}
+
+static inline bool dev_pm_test_driver_flags(struct device *dev, u32 flags)
+{
+ return !!(dev->power.driver_flags & flags);
+}
+
static inline void device_lock(struct device *dev)
{
mutex_lock(&dev->mutex);
@@ -1103,47 +859,119 @@ static inline void device_lock_assert(struct device *dev)
static inline struct device_node *dev_of_node(struct device *dev)
{
- if (!IS_ENABLED(CONFIG_OF))
+ if (!IS_ENABLED(CONFIG_OF) || !dev)
return NULL;
return dev->of_node;
}
-void driver_init(void);
+static inline bool dev_has_sync_state(struct device *dev)
+{
+ if (!dev)
+ return false;
+ if (dev->driver && dev->driver->sync_state)
+ return true;
+ if (dev->bus && dev->bus->sync_state)
+ return true;
+ return false;
+}
+
+static inline void dev_set_removable(struct device *dev,
+ enum device_removable removable)
+{
+ dev->removable = removable;
+}
+
+static inline bool dev_is_removable(struct device *dev)
+{
+ return dev->removable == DEVICE_REMOVABLE;
+}
+
+static inline bool dev_removable_is_valid(struct device *dev)
+{
+ return dev->removable != DEVICE_REMOVABLE_NOT_SUPPORTED;
+}
/*
* High level routines for use by the bus drivers
*/
-extern int __must_check device_register(struct device *dev);
-extern void device_unregister(struct device *dev);
-extern void device_initialize(struct device *dev);
-extern int __must_check device_add(struct device *dev);
-extern void device_del(struct device *dev);
-extern int device_for_each_child(struct device *dev, void *data,
- int (*fn)(struct device *dev, void *data));
-extern int device_for_each_child_reverse(struct device *dev, void *data,
- int (*fn)(struct device *dev, void *data));
-extern struct device *device_find_child(struct device *dev, void *data,
- int (*match)(struct device *dev, void *data));
-extern int device_rename(struct device *dev, const char *new_name);
-extern int device_move(struct device *dev, struct device *new_parent,
- enum dpm_order dpm_order);
-extern const char *device_get_devnode(struct device *dev,
- umode_t *mode, kuid_t *uid, kgid_t *gid,
- const char **tmp);
+int __must_check device_register(struct device *dev);
+void device_unregister(struct device *dev);
+void device_initialize(struct device *dev);
+int __must_check device_add(struct device *dev);
+void device_del(struct device *dev);
+int device_for_each_child(struct device *dev, void *data,
+ int (*fn)(struct device *dev, void *data));
+int device_for_each_child_reverse(struct device *dev, void *data,
+ int (*fn)(struct device *dev, void *data));
+struct device *device_find_child(struct device *dev, void *data,
+ int (*match)(struct device *dev, void *data));
+struct device *device_find_child_by_name(struct device *parent,
+ const char *name);
+struct device *device_find_any_child(struct device *parent);
+
+int device_rename(struct device *dev, const char *new_name);
+int device_move(struct device *dev, struct device *new_parent,
+ enum dpm_order dpm_order);
+int device_change_owner(struct device *dev, kuid_t kuid, kgid_t kgid);
+int device_is_dependent(struct device *dev, void *target);
static inline bool device_supports_offline(struct device *dev)
{
return dev->bus && dev->bus->offline && dev->bus->online;
}
-extern void lock_device_hotplug(void);
-extern void unlock_device_hotplug(void);
-extern int lock_device_hotplug_sysfs(void);
-extern int device_offline(struct device *dev);
-extern int device_online(struct device *dev);
-extern void set_primary_fwnode(struct device *dev, struct fwnode_handle *fwnode);
-extern void set_secondary_fwnode(struct device *dev, struct fwnode_handle *fwnode);
+#define __device_lock_set_class(dev, name, key) \
+do { \
+ struct device *__d2 __maybe_unused = dev; \
+ lock_set_class(&__d2->mutex.dep_map, name, key, 0, _THIS_IP_); \
+} while (0)
+
+/**
+ * device_lock_set_class - Specify a temporary lock class while a device
+ * is attached to a driver
+ * @dev: device to modify
+ * @key: lock class key data
+ *
+ * This must be called with the device_lock() already held, for example
+ * from driver ->probe(). Take care to only override the default
+ * lockdep_no_validate class.
+ */
+#ifdef CONFIG_LOCKDEP
+#define device_lock_set_class(dev, key) \
+do { \
+ struct device *__d = dev; \
+ dev_WARN_ONCE(__d, !lockdep_match_class(&__d->mutex, \
+ &__lockdep_no_validate__), \
+ "overriding existing custom lock class\n"); \
+ __device_lock_set_class(__d, #key, key); \
+} while (0)
+#else
+#define device_lock_set_class(dev, key) __device_lock_set_class(dev, #key, key)
+#endif
+
+/**
+ * device_lock_reset_class - Return a device to the default lockdep novalidate state
+ * @dev: device to modify
+ *
+ * This must be called with the device_lock() already held, for example
+ * from driver ->remove().
+ */
+#define device_lock_reset_class(dev) \
+do { \
+ struct device *__d __maybe_unused = dev; \
+ lock_set_novalidate_class(&__d->mutex.dep_map, "&dev->mutex", \
+ _THIS_IP_); \
+} while (0)
+
+void lock_device_hotplug(void);
+void unlock_device_hotplug(void);
+int lock_device_hotplug_sysfs(void);
+int device_offline(struct device *dev);
+int device_online(struct device *dev);
+void set_primary_fwnode(struct device *dev, struct fwnode_handle *fwnode);
+void set_secondary_fwnode(struct device *dev, struct fwnode_handle *fwnode);
void device_set_of_node_from_dev(struct device *dev, const struct device *dev2);
+void device_set_node(struct device *dev, struct fwnode_handle *fwnode);
static inline int dev_num_vf(struct device *dev)
{
@@ -1155,14 +983,13 @@ static inline int dev_num_vf(struct device *dev)
/*
* Root device objects for grouping under /sys/devices
*/
-extern struct device *__root_device_register(const char *name,
- struct module *owner);
+struct device *__root_device_register(const char *name, struct module *owner);
/* This is a macro to avoid include problems with THIS_MODULE */
#define root_device_register(name) \
__root_device_register(name, THIS_MODULE)
-extern void root_device_unregister(struct device *root);
+void root_device_unregister(struct device *root);
static inline void *dev_get_platdata(const struct device *dev)
{
@@ -1173,32 +1000,54 @@ static inline void *dev_get_platdata(const struct device *dev)
* Manual binding of a device to driver. See drivers/base/bus.c
* for information on use.
*/
-extern int __must_check device_bind_driver(struct device *dev);
-extern void device_release_driver(struct device *dev);
-extern int __must_check device_attach(struct device *dev);
-extern int __must_check driver_attach(struct device_driver *drv);
-extern void device_initial_probe(struct device *dev);
-extern int __must_check device_reprobe(struct device *dev);
+int __must_check device_driver_attach(struct device_driver *drv,
+ struct device *dev);
+int __must_check device_bind_driver(struct device *dev);
+void device_release_driver(struct device *dev);
+int __must_check device_attach(struct device *dev);
+int __must_check driver_attach(struct device_driver *drv);
+void device_initial_probe(struct device *dev);
+int __must_check device_reprobe(struct device *dev);
-extern bool device_is_bound(struct device *dev);
+bool device_is_bound(struct device *dev);
/*
* Easy functions for dynamically creating devices on the fly
*/
-extern __printf(5, 0)
-struct device *device_create_vargs(struct class *cls, struct device *parent,
- dev_t devt, void *drvdata,
- const char *fmt, va_list vargs);
-extern __printf(5, 6)
-struct device *device_create(struct class *cls, struct device *parent,
- dev_t devt, void *drvdata,
- const char *fmt, ...);
-extern __printf(6, 7)
-struct device *device_create_with_groups(struct class *cls,
- struct device *parent, dev_t devt, void *drvdata,
- const struct attribute_group **groups,
- const char *fmt, ...);
-extern void device_destroy(struct class *cls, dev_t devt);
+__printf(5, 6) struct device *
+device_create(const struct class *cls, struct device *parent, dev_t devt,
+ void *drvdata, const char *fmt, ...);
+__printf(6, 7) struct device *
+device_create_with_groups(const struct class *cls, struct device *parent, dev_t devt,
+ void *drvdata, const struct attribute_group **groups,
+ const char *fmt, ...);
+void device_destroy(const struct class *cls, dev_t devt);
+
+int __must_check device_add_groups(struct device *dev,
+ const struct attribute_group **groups);
+void device_remove_groups(struct device *dev,
+ const struct attribute_group **groups);
+
+static inline int __must_check device_add_group(struct device *dev,
+ const struct attribute_group *grp)
+{
+ const struct attribute_group *groups[] = { grp, NULL };
+
+ return device_add_groups(dev, groups);
+}
+
+static inline void device_remove_group(struct device *dev,
+ const struct attribute_group *grp)
+{
+ const struct attribute_group *groups[] = { grp, NULL };
+
+ return device_remove_groups(dev, groups);
+}
+
+int __must_check devm_device_add_groups(struct device *dev,
+ const struct attribute_group **groups);
+int __must_check devm_device_add_group(struct device *dev,
+ const struct attribute_group *grp);
/*
* Platform "fixup" functions - allow the platform to have their say
@@ -1215,231 +1064,31 @@ extern int (*platform_notify_remove)(struct device *dev);
* get_device - atomically increment the reference count for the device.
*
*/
-extern struct device *get_device(struct device *dev);
-extern void put_device(struct device *dev);
+struct device *get_device(struct device *dev);
+void put_device(struct device *dev);
+bool kill_device(struct device *dev);
#ifdef CONFIG_DEVTMPFS
-extern int devtmpfs_create_node(struct device *dev);
-extern int devtmpfs_delete_node(struct device *dev);
-extern int devtmpfs_mount(const char *mntdir);
+int devtmpfs_mount(void);
#else
-static inline int devtmpfs_create_node(struct device *dev) { return 0; }
-static inline int devtmpfs_delete_node(struct device *dev) { return 0; }
-static inline int devtmpfs_mount(const char *mountpoint) { return 0; }
+static inline int devtmpfs_mount(void) { return 0; }
#endif
/* drivers/base/power/shutdown.c */
-extern void device_shutdown(void);
+void device_shutdown(void);
/* debugging and troubleshooting/diagnostic helpers. */
-extern const char *dev_driver_string(const struct device *dev);
+const char *dev_driver_string(const struct device *dev);
/* Device links interface. */
struct device_link *device_link_add(struct device *consumer,
struct device *supplier, u32 flags);
void device_link_del(struct device_link *link);
+void device_link_remove(void *consumer, struct device *supplier);
+void device_links_supplier_sync_state_pause(void);
+void device_links_supplier_sync_state_resume(void);
-#ifdef CONFIG_PRINTK
-
-extern __printf(3, 0)
-int dev_vprintk_emit(int level, const struct device *dev,
- const char *fmt, va_list args);
-extern __printf(3, 4)
-int dev_printk_emit(int level, const struct device *dev, const char *fmt, ...);
-
-extern __printf(3, 4)
-void dev_printk(const char *level, const struct device *dev,
- const char *fmt, ...);
-extern __printf(2, 3)
-void dev_emerg(const struct device *dev, const char *fmt, ...);
-extern __printf(2, 3)
-void dev_alert(const struct device *dev, const char *fmt, ...);
-extern __printf(2, 3)
-void dev_crit(const struct device *dev, const char *fmt, ...);
-extern __printf(2, 3)
-void dev_err(const struct device *dev, const char *fmt, ...);
-extern __printf(2, 3)
-void dev_warn(const struct device *dev, const char *fmt, ...);
-extern __printf(2, 3)
-void dev_notice(const struct device *dev, const char *fmt, ...);
-extern __printf(2, 3)
-void _dev_info(const struct device *dev, const char *fmt, ...);
-
-#else
-
-static inline __printf(3, 0)
-int dev_vprintk_emit(int level, const struct device *dev,
- const char *fmt, va_list args)
-{ return 0; }
-static inline __printf(3, 4)
-int dev_printk_emit(int level, const struct device *dev, const char *fmt, ...)
-{ return 0; }
-
-static inline void __dev_printk(const char *level, const struct device *dev,
- struct va_format *vaf)
-{}
-static inline __printf(3, 4)
-void dev_printk(const char *level, const struct device *dev,
- const char *fmt, ...)
-{}
-
-static inline __printf(2, 3)
-void dev_emerg(const struct device *dev, const char *fmt, ...)
-{}
-static inline __printf(2, 3)
-void dev_crit(const struct device *dev, const char *fmt, ...)
-{}
-static inline __printf(2, 3)
-void dev_alert(const struct device *dev, const char *fmt, ...)
-{}
-static inline __printf(2, 3)
-void dev_err(const struct device *dev, const char *fmt, ...)
-{}
-static inline __printf(2, 3)
-void dev_warn(const struct device *dev, const char *fmt, ...)
-{}
-static inline __printf(2, 3)
-void dev_notice(const struct device *dev, const char *fmt, ...)
-{}
-static inline __printf(2, 3)
-void _dev_info(const struct device *dev, const char *fmt, ...)
-{}
-
-#endif
-
-/*
- * Stupid hackaround for existing uses of non-printk uses dev_info
- *
- * Note that the definition of dev_info below is actually _dev_info
- * and a macro is used to avoid redefining dev_info
- */
-
-#define dev_info(dev, fmt, arg...) _dev_info(dev, fmt, ##arg)
-
-#if defined(CONFIG_DYNAMIC_DEBUG)
-#define dev_dbg(dev, format, ...) \
-do { \
- dynamic_dev_dbg(dev, format, ##__VA_ARGS__); \
-} while (0)
-#elif defined(DEBUG)
-#define dev_dbg(dev, format, arg...) \
- dev_printk(KERN_DEBUG, dev, format, ##arg)
-#else
-#define dev_dbg(dev, format, arg...) \
-({ \
- if (0) \
- dev_printk(KERN_DEBUG, dev, format, ##arg); \
-})
-#endif
-
-#ifdef CONFIG_PRINTK
-#define dev_level_once(dev_level, dev, fmt, ...) \
-do { \
- static bool __print_once __read_mostly; \
- \
- if (!__print_once) { \
- __print_once = true; \
- dev_level(dev, fmt, ##__VA_ARGS__); \
- } \
-} while (0)
-#else
-#define dev_level_once(dev_level, dev, fmt, ...) \
-do { \
- if (0) \
- dev_level(dev, fmt, ##__VA_ARGS__); \
-} while (0)
-#endif
-
-#define dev_emerg_once(dev, fmt, ...) \
- dev_level_once(dev_emerg, dev, fmt, ##__VA_ARGS__)
-#define dev_alert_once(dev, fmt, ...) \
- dev_level_once(dev_alert, dev, fmt, ##__VA_ARGS__)
-#define dev_crit_once(dev, fmt, ...) \
- dev_level_once(dev_crit, dev, fmt, ##__VA_ARGS__)
-#define dev_err_once(dev, fmt, ...) \
- dev_level_once(dev_err, dev, fmt, ##__VA_ARGS__)
-#define dev_warn_once(dev, fmt, ...) \
- dev_level_once(dev_warn, dev, fmt, ##__VA_ARGS__)
-#define dev_notice_once(dev, fmt, ...) \
- dev_level_once(dev_notice, dev, fmt, ##__VA_ARGS__)
-#define dev_info_once(dev, fmt, ...) \
- dev_level_once(dev_info, dev, fmt, ##__VA_ARGS__)
-#define dev_dbg_once(dev, fmt, ...) \
- dev_level_once(dev_dbg, dev, fmt, ##__VA_ARGS__)
-
-#define dev_level_ratelimited(dev_level, dev, fmt, ...) \
-do { \
- static DEFINE_RATELIMIT_STATE(_rs, \
- DEFAULT_RATELIMIT_INTERVAL, \
- DEFAULT_RATELIMIT_BURST); \
- if (__ratelimit(&_rs)) \
- dev_level(dev, fmt, ##__VA_ARGS__); \
-} while (0)
-
-#define dev_emerg_ratelimited(dev, fmt, ...) \
- dev_level_ratelimited(dev_emerg, dev, fmt, ##__VA_ARGS__)
-#define dev_alert_ratelimited(dev, fmt, ...) \
- dev_level_ratelimited(dev_alert, dev, fmt, ##__VA_ARGS__)
-#define dev_crit_ratelimited(dev, fmt, ...) \
- dev_level_ratelimited(dev_crit, dev, fmt, ##__VA_ARGS__)
-#define dev_err_ratelimited(dev, fmt, ...) \
- dev_level_ratelimited(dev_err, dev, fmt, ##__VA_ARGS__)
-#define dev_warn_ratelimited(dev, fmt, ...) \
- dev_level_ratelimited(dev_warn, dev, fmt, ##__VA_ARGS__)
-#define dev_notice_ratelimited(dev, fmt, ...) \
- dev_level_ratelimited(dev_notice, dev, fmt, ##__VA_ARGS__)
-#define dev_info_ratelimited(dev, fmt, ...) \
- dev_level_ratelimited(dev_info, dev, fmt, ##__VA_ARGS__)
-#if defined(CONFIG_DYNAMIC_DEBUG)
-/* descriptor check is first to prevent flooding with "callbacks suppressed" */
-#define dev_dbg_ratelimited(dev, fmt, ...) \
-do { \
- static DEFINE_RATELIMIT_STATE(_rs, \
- DEFAULT_RATELIMIT_INTERVAL, \
- DEFAULT_RATELIMIT_BURST); \
- DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, fmt); \
- if (unlikely(descriptor.flags & _DPRINTK_FLAGS_PRINT) && \
- __ratelimit(&_rs)) \
- __dynamic_dev_dbg(&descriptor, dev, fmt, \
- ##__VA_ARGS__); \
-} while (0)
-#elif defined(DEBUG)
-#define dev_dbg_ratelimited(dev, fmt, ...) \
-do { \
- static DEFINE_RATELIMIT_STATE(_rs, \
- DEFAULT_RATELIMIT_INTERVAL, \
- DEFAULT_RATELIMIT_BURST); \
- if (__ratelimit(&_rs)) \
- dev_printk(KERN_DEBUG, dev, fmt, ##__VA_ARGS__); \
-} while (0)
-#else
-#define dev_dbg_ratelimited(dev, fmt, ...) \
-do { \
- if (0) \
- dev_printk(KERN_DEBUG, dev, fmt, ##__VA_ARGS__); \
-} while (0)
-#endif
-
-#ifdef VERBOSE_DEBUG
-#define dev_vdbg dev_dbg
-#else
-#define dev_vdbg(dev, format, arg...) \
-({ \
- if (0) \
- dev_printk(KERN_DEBUG, dev, format, ##arg); \
-})
-#endif
-
-/*
- * dev_WARN*() acts like dev_printk(), but with the key difference of
- * using WARN/WARN_ONCE to include file/line information and a backtrace.
- */
-#define dev_WARN(dev, format, arg...) \
- WARN(1, "%s %s: " format, dev_driver_string(dev), dev_name(dev), ## arg);
-
-#define dev_WARN_ONCE(dev, condition, format, arg...) \
- WARN_ONCE(condition, "%s %s: " format, \
- dev_driver_string(dev), dev_name(dev), ## arg)
+__printf(3, 4) int dev_err_probe(const struct device *dev, int err, const char *fmt, ...);
/* Create alias, so I can be autoloaded. */
#define MODULE_ALIAS_CHARDEV(major,minor) \
@@ -1447,58 +1096,4 @@ do { \
#define MODULE_ALIAS_CHARDEV_MAJOR(major) \
MODULE_ALIAS("char-major-" __stringify(major) "-*")
-#ifdef CONFIG_SYSFS_DEPRECATED
-extern long sysfs_deprecated;
-#else
-#define sysfs_deprecated 0
-#endif
-
-/**
- * module_driver() - Helper macro for drivers that don't do anything
- * special in module init/exit. This eliminates a lot of boilerplate.
- * Each module may only use this macro once, and calling it replaces
- * module_init() and module_exit().
- *
- * @__driver: driver name
- * @__register: register function for this driver type
- * @__unregister: unregister function for this driver type
- * @...: Additional arguments to be passed to __register and __unregister.
- *
- * Use this macro to construct bus specific macros for registering
- * drivers, and do not use it on its own.
- */
-#define module_driver(__driver, __register, __unregister, ...) \
-static int __init __driver##_init(void) \
-{ \
- return __register(&(__driver) , ##__VA_ARGS__); \
-} \
-module_init(__driver##_init); \
-static void __exit __driver##_exit(void) \
-{ \
- __unregister(&(__driver) , ##__VA_ARGS__); \
-} \
-module_exit(__driver##_exit);
-
-/**
- * builtin_driver() - Helper macro for drivers that don't do anything
- * special in init and have no exit. This eliminates some boilerplate.
- * Each driver may only use this macro once, and calling it replaces
- * device_initcall (or in some cases, the legacy __initcall). This is
- * meant to be a direct parallel of module_driver() above but without
- * the __exit stuff that is not used for builtin cases.
- *
- * @__driver: driver name
- * @__register: register function for this driver type
- * @...: Additional arguments to be passed to __register
- *
- * Use this macro to construct bus specific macros for registering
- * drivers, and do not use it on its own.
- */
-#define builtin_driver(__driver, __register, ...) \
-static int __init __driver##_init(void) \
-{ \
- return __register(&(__driver) , ##__VA_ARGS__); \
-} \
-device_initcall(__driver##_init);
-
#endif /* _DEVICE_H_ */
diff --git a/include/linux/device/bus.h b/include/linux/device/bus.h
new file mode 100644
index 000000000000..ae10c4322754
--- /dev/null
+++ b/include/linux/device/bus.h
@@ -0,0 +1,282 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * bus.h - the bus-specific portions of the driver model
+ *
+ * Copyright (c) 2001-2003 Patrick Mochel <mochel@osdl.org>
+ * Copyright (c) 2004-2009 Greg Kroah-Hartman <gregkh@suse.de>
+ * Copyright (c) 2008-2009 Novell Inc.
+ * Copyright (c) 2012-2019 Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+ * Copyright (c) 2012-2019 Linux Foundation
+ *
+ * See Documentation/driver-api/driver-model/ for more information.
+ */
+
+#ifndef _DEVICE_BUS_H_
+#define _DEVICE_BUS_H_
+
+#include <linux/kobject.h>
+#include <linux/klist.h>
+#include <linux/pm.h>
+
+struct device_driver;
+struct fwnode_handle;
+
+/**
+ * struct bus_type - The bus type of the device
+ *
+ * @name: The name of the bus.
+ * @dev_name: Used for subsystems to enumerate devices like ("foo%u", dev->id).
+ * @bus_groups: Default attributes of the bus.
+ * @dev_groups: Default attributes of the devices on the bus.
+ * @drv_groups: Default attributes of the device drivers on the bus.
+ * @match: Called, perhaps multiple times, whenever a new device or driver
+ * is added for this bus. It should return a positive value if the
+ * given device can be handled by the given driver and zero
+ * otherwise. It may also return error code if determining that
+ * the driver supports the device is not possible. In case of
+ * -EPROBE_DEFER it will queue the device for deferred probing.
+ * @uevent: Called when a device is added, removed, or a few other things
+ * that generate uevents to add the environment variables.
+ * @probe: Called when a new device or driver add to this bus, and callback
+ * the specific driver's probe to initial the matched device.
+ * @sync_state: Called to sync device state to software state after all the
+ * state tracking consumers linked to this device (present at
+ * the time of late_initcall) have successfully bound to a
+ * driver. If the device has no consumers, this function will
+ * be called at late_initcall_sync level. If the device has
+ * consumers that are never bound to a driver, this function
+ * will never get called until they do.
+ * @remove: Called when a device removed from this bus.
+ * @shutdown: Called at shut-down time to quiesce the device.
+ *
+ * @online: Called to put the device back online (after offlining it).
+ * @offline: Called to put the device offline for hot-removal. May fail.
+ *
+ * @suspend: Called when a device on this bus wants to go to sleep mode.
+ * @resume: Called to bring a device on this bus out of sleep mode.
+ * @num_vf: Called to find out how many virtual functions a device on this
+ * bus supports.
+ * @dma_configure: Called to setup DMA configuration on a device on
+ * this bus.
+ * @dma_cleanup: Called to cleanup DMA configuration on a device on
+ * this bus.
+ * @pm: Power management operations of this bus, callback the specific
+ * device driver's pm-ops.
+ * @iommu_ops: IOMMU specific operations for this bus, used to attach IOMMU
+ * driver implementations to a bus and allow the driver to do
+ * bus-specific setup
+ * @need_parent_lock: When probing or removing a device on this bus, the
+ * device core should lock the device's parent.
+ *
+ * A bus is a channel between the processor and one or more devices. For the
+ * purposes of the device model, all devices are connected via a bus, even if
+ * it is an internal, virtual, "platform" bus. Buses can plug into each other.
+ * A USB controller is usually a PCI device, for example. The device model
+ * represents the actual connections between buses and the devices they control.
+ * A bus is represented by the bus_type structure. It contains the name, the
+ * default attributes, the bus' methods, PM operations, and the driver core's
+ * private data.
+ */
+struct bus_type {
+ const char *name;
+ const char *dev_name;
+ const struct attribute_group **bus_groups;
+ const struct attribute_group **dev_groups;
+ const struct attribute_group **drv_groups;
+
+ int (*match)(struct device *dev, struct device_driver *drv);
+ int (*uevent)(const struct device *dev, struct kobj_uevent_env *env);
+ int (*probe)(struct device *dev);
+ void (*sync_state)(struct device *dev);
+ void (*remove)(struct device *dev);
+ void (*shutdown)(struct device *dev);
+
+ int (*online)(struct device *dev);
+ int (*offline)(struct device *dev);
+
+ int (*suspend)(struct device *dev, pm_message_t state);
+ int (*resume)(struct device *dev);
+
+ int (*num_vf)(struct device *dev);
+
+ int (*dma_configure)(struct device *dev);
+ void (*dma_cleanup)(struct device *dev);
+
+ const struct dev_pm_ops *pm;
+
+ const struct iommu_ops *iommu_ops;
+
+ bool need_parent_lock;
+};
+
+int __must_check bus_register(const struct bus_type *bus);
+
+void bus_unregister(const struct bus_type *bus);
+
+int __must_check bus_rescan_devices(const struct bus_type *bus);
+
+struct bus_attribute {
+ struct attribute attr;
+ ssize_t (*show)(const struct bus_type *bus, char *buf);
+ ssize_t (*store)(const struct bus_type *bus, const char *buf, size_t count);
+};
+
+#define BUS_ATTR_RW(_name) \
+ struct bus_attribute bus_attr_##_name = __ATTR_RW(_name)
+#define BUS_ATTR_RO(_name) \
+ struct bus_attribute bus_attr_##_name = __ATTR_RO(_name)
+#define BUS_ATTR_WO(_name) \
+ struct bus_attribute bus_attr_##_name = __ATTR_WO(_name)
+
+int __must_check bus_create_file(const struct bus_type *bus, struct bus_attribute *attr);
+void bus_remove_file(const struct bus_type *bus, struct bus_attribute *attr);
+
+/* Generic device matching functions that all busses can use to match with */
+int device_match_name(struct device *dev, const void *name);
+int device_match_of_node(struct device *dev, const void *np);
+int device_match_fwnode(struct device *dev, const void *fwnode);
+int device_match_devt(struct device *dev, const void *pdevt);
+int device_match_acpi_dev(struct device *dev, const void *adev);
+int device_match_acpi_handle(struct device *dev, const void *handle);
+int device_match_any(struct device *dev, const void *unused);
+
+/* iterator helpers for buses */
+int bus_for_each_dev(const struct bus_type *bus, struct device *start, void *data,
+ int (*fn)(struct device *dev, void *data));
+struct device *bus_find_device(const struct bus_type *bus, struct device *start,
+ const void *data,
+ int (*match)(struct device *dev, const void *data));
+/**
+ * bus_find_device_by_name - device iterator for locating a particular device
+ * of a specific name.
+ * @bus: bus type
+ * @start: Device to begin with
+ * @name: name of the device to match
+ */
+static inline struct device *bus_find_device_by_name(const struct bus_type *bus,
+ struct device *start,
+ const char *name)
+{
+ return bus_find_device(bus, start, name, device_match_name);
+}
+
+/**
+ * bus_find_device_by_of_node : device iterator for locating a particular device
+ * matching the of_node.
+ * @bus: bus type
+ * @np: of_node of the device to match.
+ */
+static inline struct device *
+bus_find_device_by_of_node(const struct bus_type *bus, const struct device_node *np)
+{
+ return bus_find_device(bus, NULL, np, device_match_of_node);
+}
+
+/**
+ * bus_find_device_by_fwnode : device iterator for locating a particular device
+ * matching the fwnode.
+ * @bus: bus type
+ * @fwnode: fwnode of the device to match.
+ */
+static inline struct device *
+bus_find_device_by_fwnode(const struct bus_type *bus, const struct fwnode_handle *fwnode)
+{
+ return bus_find_device(bus, NULL, fwnode, device_match_fwnode);
+}
+
+/**
+ * bus_find_device_by_devt : device iterator for locating a particular device
+ * matching the device type.
+ * @bus: bus type
+ * @devt: device type of the device to match.
+ */
+static inline struct device *bus_find_device_by_devt(const struct bus_type *bus,
+ dev_t devt)
+{
+ return bus_find_device(bus, NULL, &devt, device_match_devt);
+}
+
+/**
+ * bus_find_next_device - Find the next device after a given device in a
+ * given bus.
+ * @bus: bus type
+ * @cur: device to begin the search with.
+ */
+static inline struct device *
+bus_find_next_device(const struct bus_type *bus,struct device *cur)
+{
+ return bus_find_device(bus, cur, NULL, device_match_any);
+}
+
+#ifdef CONFIG_ACPI
+struct acpi_device;
+
+/**
+ * bus_find_device_by_acpi_dev : device iterator for locating a particular device
+ * matching the ACPI COMPANION device.
+ * @bus: bus type
+ * @adev: ACPI COMPANION device to match.
+ */
+static inline struct device *
+bus_find_device_by_acpi_dev(const struct bus_type *bus, const struct acpi_device *adev)
+{
+ return bus_find_device(bus, NULL, adev, device_match_acpi_dev);
+}
+#else
+static inline struct device *
+bus_find_device_by_acpi_dev(const struct bus_type *bus, const void *adev)
+{
+ return NULL;
+}
+#endif
+
+int bus_for_each_drv(const struct bus_type *bus, struct device_driver *start,
+ void *data, int (*fn)(struct device_driver *, void *));
+void bus_sort_breadthfirst(struct bus_type *bus,
+ int (*compare)(const struct device *a,
+ const struct device *b));
+/*
+ * Bus notifiers: Get notified of addition/removal of devices
+ * and binding/unbinding of drivers to devices.
+ * In the long run, it should be a replacement for the platform
+ * notify hooks.
+ */
+struct notifier_block;
+
+int bus_register_notifier(const struct bus_type *bus, struct notifier_block *nb);
+int bus_unregister_notifier(const struct bus_type *bus, struct notifier_block *nb);
+
+/**
+ * enum bus_notifier_event - Bus Notifier events that have happened
+ * @BUS_NOTIFY_ADD_DEVICE: device is added to this bus
+ * @BUS_NOTIFY_DEL_DEVICE: device is about to be removed from this bus
+ * @BUS_NOTIFY_REMOVED_DEVICE: device is successfully removed from this bus
+ * @BUS_NOTIFY_BIND_DRIVER: a driver is about to be bound to this device on this bus
+ * @BUS_NOTIFY_BOUND_DRIVER: a driver is successfully bound to this device on this bus
+ * @BUS_NOTIFY_UNBIND_DRIVER: a driver is about to be unbound from this device on this bus
+ * @BUS_NOTIFY_UNBOUND_DRIVER: a driver is successfully unbound from this device on this bus
+ * @BUS_NOTIFY_DRIVER_NOT_BOUND: a driver failed to be bound to this device on this bus
+ *
+ * These are the value passed to a bus notifier when a specific event happens.
+ *
+ * Note that bus notifiers are likely to be called with the device lock already
+ * held by the driver core, so be careful in any notifier callback as to what
+ * you do with the device structure.
+ *
+ * All bus notifiers are called with the target struct device * as an argument.
+ */
+enum bus_notifier_event {
+ BUS_NOTIFY_ADD_DEVICE,
+ BUS_NOTIFY_DEL_DEVICE,
+ BUS_NOTIFY_REMOVED_DEVICE,
+ BUS_NOTIFY_BIND_DRIVER,
+ BUS_NOTIFY_BOUND_DRIVER,
+ BUS_NOTIFY_UNBIND_DRIVER,
+ BUS_NOTIFY_UNBOUND_DRIVER,
+ BUS_NOTIFY_DRIVER_NOT_BOUND,
+};
+
+struct kset *bus_get_kset(const struct bus_type *bus);
+struct device *bus_get_dev_root(const struct bus_type *bus);
+
+#endif
diff --git a/include/linux/device/class.h b/include/linux/device/class.h
new file mode 100644
index 000000000000..9deeaeb457bb
--- /dev/null
+++ b/include/linux/device/class.h
@@ -0,0 +1,232 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * The class-specific portions of the driver model
+ *
+ * Copyright (c) 2001-2003 Patrick Mochel <mochel@osdl.org>
+ * Copyright (c) 2004-2009 Greg Kroah-Hartman <gregkh@suse.de>
+ * Copyright (c) 2008-2009 Novell Inc.
+ * Copyright (c) 2012-2019 Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+ * Copyright (c) 2012-2019 Linux Foundation
+ *
+ * See Documentation/driver-api/driver-model/ for more information.
+ */
+
+#ifndef _DEVICE_CLASS_H_
+#define _DEVICE_CLASS_H_
+
+#include <linux/kobject.h>
+#include <linux/klist.h>
+#include <linux/pm.h>
+#include <linux/device/bus.h>
+
+struct device;
+struct fwnode_handle;
+
+/**
+ * struct class - device classes
+ * @name: Name of the class.
+ * @class_groups: Default attributes of this class.
+ * @dev_groups: Default attributes of the devices that belong to the class.
+ * @dev_uevent: Called when a device is added, removed from this class, or a
+ * few other things that generate uevents to add the environment
+ * variables.
+ * @devnode: Callback to provide the devtmpfs.
+ * @class_release: Called to release this class.
+ * @dev_release: Called to release the device.
+ * @shutdown_pre: Called at shut-down time before driver shutdown.
+ * @ns_type: Callbacks so sysfs can detemine namespaces.
+ * @namespace: Namespace of the device belongs to this class.
+ * @get_ownership: Allows class to specify uid/gid of the sysfs directories
+ * for the devices belonging to the class. Usually tied to
+ * device's namespace.
+ * @pm: The default device power management operations of this class.
+ * @p: The private data of the driver core, no one other than the
+ * driver core can touch this.
+ *
+ * A class is a higher-level view of a device that abstracts out low-level
+ * implementation details. Drivers may see a SCSI disk or an ATA disk, but,
+ * at the class level, they are all simply disks. Classes allow user space
+ * to work with devices based on what they do, rather than how they are
+ * connected or how they work.
+ */
+struct class {
+ const char *name;
+
+ const struct attribute_group **class_groups;
+ const struct attribute_group **dev_groups;
+
+ int (*dev_uevent)(const struct device *dev, struct kobj_uevent_env *env);
+ char *(*devnode)(const struct device *dev, umode_t *mode);
+
+ void (*class_release)(const struct class *class);
+ void (*dev_release)(struct device *dev);
+
+ int (*shutdown_pre)(struct device *dev);
+
+ const struct kobj_ns_type_operations *ns_type;
+ const void *(*namespace)(const struct device *dev);
+
+ void (*get_ownership)(const struct device *dev, kuid_t *uid, kgid_t *gid);
+
+ const struct dev_pm_ops *pm;
+};
+
+struct class_dev_iter {
+ struct klist_iter ki;
+ const struct device_type *type;
+};
+
+int __must_check class_register(const struct class *class);
+void class_unregister(const struct class *class);
+bool class_is_registered(const struct class *class);
+
+struct class_compat;
+struct class_compat *class_compat_register(const char *name);
+void class_compat_unregister(struct class_compat *cls);
+int class_compat_create_link(struct class_compat *cls, struct device *dev,
+ struct device *device_link);
+void class_compat_remove_link(struct class_compat *cls, struct device *dev,
+ struct device *device_link);
+
+void class_dev_iter_init(struct class_dev_iter *iter, const struct class *class,
+ const struct device *start, const struct device_type *type);
+struct device *class_dev_iter_next(struct class_dev_iter *iter);
+void class_dev_iter_exit(struct class_dev_iter *iter);
+
+int class_for_each_device(const struct class *class, const struct device *start, void *data,
+ int (*fn)(struct device *dev, void *data));
+struct device *class_find_device(const struct class *class, const struct device *start,
+ const void *data, int (*match)(struct device *, const void *));
+
+/**
+ * class_find_device_by_name - device iterator for locating a particular device
+ * of a specific name.
+ * @class: class type
+ * @name: name of the device to match
+ */
+static inline struct device *class_find_device_by_name(const struct class *class,
+ const char *name)
+{
+ return class_find_device(class, NULL, name, device_match_name);
+}
+
+/**
+ * class_find_device_by_of_node : device iterator for locating a particular device
+ * matching the of_node.
+ * @class: class type
+ * @np: of_node of the device to match.
+ */
+static inline struct device *class_find_device_by_of_node(const struct class *class,
+ const struct device_node *np)
+{
+ return class_find_device(class, NULL, np, device_match_of_node);
+}
+
+/**
+ * class_find_device_by_fwnode : device iterator for locating a particular device
+ * matching the fwnode.
+ * @class: class type
+ * @fwnode: fwnode of the device to match.
+ */
+static inline struct device *class_find_device_by_fwnode(const struct class *class,
+ const struct fwnode_handle *fwnode)
+{
+ return class_find_device(class, NULL, fwnode, device_match_fwnode);
+}
+
+/**
+ * class_find_device_by_devt : device iterator for locating a particular device
+ * matching the device type.
+ * @class: class type
+ * @devt: device type of the device to match.
+ */
+static inline struct device *class_find_device_by_devt(const struct class *class,
+ dev_t devt)
+{
+ return class_find_device(class, NULL, &devt, device_match_devt);
+}
+
+#ifdef CONFIG_ACPI
+struct acpi_device;
+/**
+ * class_find_device_by_acpi_dev : device iterator for locating a particular
+ * device matching the ACPI_COMPANION device.
+ * @class: class type
+ * @adev: ACPI_COMPANION device to match.
+ */
+static inline struct device *class_find_device_by_acpi_dev(const struct class *class,
+ const struct acpi_device *adev)
+{
+ return class_find_device(class, NULL, adev, device_match_acpi_dev);
+}
+#else
+static inline struct device *class_find_device_by_acpi_dev(const struct class *class,
+ const void *adev)
+{
+ return NULL;
+}
+#endif
+
+struct class_attribute {
+ struct attribute attr;
+ ssize_t (*show)(const struct class *class, const struct class_attribute *attr,
+ char *buf);
+ ssize_t (*store)(const struct class *class, const struct class_attribute *attr,
+ const char *buf, size_t count);
+};
+
+#define CLASS_ATTR_RW(_name) \
+ struct class_attribute class_attr_##_name = __ATTR_RW(_name)
+#define CLASS_ATTR_RO(_name) \
+ struct class_attribute class_attr_##_name = __ATTR_RO(_name)
+#define CLASS_ATTR_WO(_name) \
+ struct class_attribute class_attr_##_name = __ATTR_WO(_name)
+
+int __must_check class_create_file_ns(const struct class *class, const struct class_attribute *attr,
+ const void *ns);
+void class_remove_file_ns(const struct class *class, const struct class_attribute *attr,
+ const void *ns);
+
+static inline int __must_check class_create_file(const struct class *class,
+ const struct class_attribute *attr)
+{
+ return class_create_file_ns(class, attr, NULL);
+}
+
+static inline void class_remove_file(const struct class *class,
+ const struct class_attribute *attr)
+{
+ return class_remove_file_ns(class, attr, NULL);
+}
+
+/* Simple class attribute that is just a static string */
+struct class_attribute_string {
+ struct class_attribute attr;
+ char *str;
+};
+
+/* Currently read-only only */
+#define _CLASS_ATTR_STRING(_name, _mode, _str) \
+ { __ATTR(_name, _mode, show_class_attr_string, NULL), _str }
+#define CLASS_ATTR_STRING(_name, _mode, _str) \
+ struct class_attribute_string class_attr_##_name = \
+ _CLASS_ATTR_STRING(_name, _mode, _str)
+
+ssize_t show_class_attr_string(const struct class *class, const struct class_attribute *attr,
+ char *buf);
+
+struct class_interface {
+ struct list_head node;
+ const struct class *class;
+
+ int (*add_dev) (struct device *dev);
+ void (*remove_dev) (struct device *dev);
+};
+
+int __must_check class_interface_register(struct class_interface *);
+void class_interface_unregister(struct class_interface *);
+
+struct class * __must_check class_create(const char *name);
+void class_destroy(const struct class *cls);
+
+#endif /* _DEVICE_CLASS_H_ */
diff --git a/include/linux/device/driver.h b/include/linux/device/driver.h
new file mode 100644
index 000000000000..c244267a6744
--- /dev/null
+++ b/include/linux/device/driver.h
@@ -0,0 +1,291 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * The driver-specific portions of the driver model
+ *
+ * Copyright (c) 2001-2003 Patrick Mochel <mochel@osdl.org>
+ * Copyright (c) 2004-2009 Greg Kroah-Hartman <gregkh@suse.de>
+ * Copyright (c) 2008-2009 Novell Inc.
+ * Copyright (c) 2012-2019 Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+ * Copyright (c) 2012-2019 Linux Foundation
+ *
+ * See Documentation/driver-api/driver-model/ for more information.
+ */
+
+#ifndef _DEVICE_DRIVER_H_
+#define _DEVICE_DRIVER_H_
+
+#include <linux/kobject.h>
+#include <linux/klist.h>
+#include <linux/pm.h>
+#include <linux/device/bus.h>
+#include <linux/module.h>
+
+/**
+ * enum probe_type - device driver probe type to try
+ * Device drivers may opt in for special handling of their
+ * respective probe routines. This tells the core what to
+ * expect and prefer.
+ *
+ * @PROBE_DEFAULT_STRATEGY: Used by drivers that work equally well
+ * whether probed synchronously or asynchronously.
+ * @PROBE_PREFER_ASYNCHRONOUS: Drivers for "slow" devices which
+ * probing order is not essential for booting the system may
+ * opt into executing their probes asynchronously.
+ * @PROBE_FORCE_SYNCHRONOUS: Use this to annotate drivers that need
+ * their probe routines to run synchronously with driver and
+ * device registration (with the exception of -EPROBE_DEFER
+ * handling - re-probing always ends up being done asynchronously).
+ *
+ * Note that the end goal is to switch the kernel to use asynchronous
+ * probing by default, so annotating drivers with
+ * %PROBE_PREFER_ASYNCHRONOUS is a temporary measure that allows us
+ * to speed up boot process while we are validating the rest of the
+ * drivers.
+ */
+enum probe_type {
+ PROBE_DEFAULT_STRATEGY,
+ PROBE_PREFER_ASYNCHRONOUS,
+ PROBE_FORCE_SYNCHRONOUS,
+};
+
+/**
+ * struct device_driver - The basic device driver structure
+ * @name: Name of the device driver.
+ * @bus: The bus which the device of this driver belongs to.
+ * @owner: The module owner.
+ * @mod_name: Used for built-in modules.
+ * @suppress_bind_attrs: Disables bind/unbind via sysfs.
+ * @probe_type: Type of the probe (synchronous or asynchronous) to use.
+ * @of_match_table: The open firmware table.
+ * @acpi_match_table: The ACPI match table.
+ * @probe: Called to query the existence of a specific device,
+ * whether this driver can work with it, and bind the driver
+ * to a specific device.
+ * @sync_state: Called to sync device state to software state after all the
+ * state tracking consumers linked to this device (present at
+ * the time of late_initcall) have successfully bound to a
+ * driver. If the device has no consumers, this function will
+ * be called at late_initcall_sync level. If the device has
+ * consumers that are never bound to a driver, this function
+ * will never get called until they do.
+ * @remove: Called when the device is removed from the system to
+ * unbind a device from this driver.
+ * @shutdown: Called at shut-down time to quiesce the device.
+ * @suspend: Called to put the device to sleep mode. Usually to a
+ * low power state.
+ * @resume: Called to bring a device from sleep mode.
+ * @groups: Default attributes that get created by the driver core
+ * automatically.
+ * @dev_groups: Additional attributes attached to device instance once
+ * it is bound to the driver.
+ * @pm: Power management operations of the device which matched
+ * this driver.
+ * @coredump: Called when sysfs entry is written to. The device driver
+ * is expected to call the dev_coredump API resulting in a
+ * uevent.
+ * @p: Driver core's private data, no one other than the driver
+ * core can touch this.
+ *
+ * The device driver-model tracks all of the drivers known to the system.
+ * The main reason for this tracking is to enable the driver core to match
+ * up drivers with new devices. Once drivers are known objects within the
+ * system, however, a number of other things become possible. Device drivers
+ * can export information and configuration variables that are independent
+ * of any specific device.
+ */
+struct device_driver {
+ const char *name;
+ const struct bus_type *bus;
+
+ struct module *owner;
+ const char *mod_name; /* used for built-in modules */
+
+ bool suppress_bind_attrs; /* disables bind/unbind via sysfs */
+ enum probe_type probe_type;
+
+ const struct of_device_id *of_match_table;
+ const struct acpi_device_id *acpi_match_table;
+
+ int (*probe) (struct device *dev);
+ void (*sync_state)(struct device *dev);
+ int (*remove) (struct device *dev);
+ void (*shutdown) (struct device *dev);
+ int (*suspend) (struct device *dev, pm_message_t state);
+ int (*resume) (struct device *dev);
+ const struct attribute_group **groups;
+ const struct attribute_group **dev_groups;
+
+ const struct dev_pm_ops *pm;
+ void (*coredump) (struct device *dev);
+
+ struct driver_private *p;
+};
+
+
+int __must_check driver_register(struct device_driver *drv);
+void driver_unregister(struct device_driver *drv);
+
+struct device_driver *driver_find(const char *name, const struct bus_type *bus);
+int driver_probe_done(void);
+void wait_for_device_probe(void);
+void __init wait_for_init_devices_probe(void);
+
+/* sysfs interface for exporting driver attributes */
+
+struct driver_attribute {
+ struct attribute attr;
+ ssize_t (*show)(struct device_driver *driver, char *buf);
+ ssize_t (*store)(struct device_driver *driver, const char *buf,
+ size_t count);
+};
+
+#define DRIVER_ATTR_RW(_name) \
+ struct driver_attribute driver_attr_##_name = __ATTR_RW(_name)
+#define DRIVER_ATTR_RO(_name) \
+ struct driver_attribute driver_attr_##_name = __ATTR_RO(_name)
+#define DRIVER_ATTR_WO(_name) \
+ struct driver_attribute driver_attr_##_name = __ATTR_WO(_name)
+
+int __must_check driver_create_file(struct device_driver *driver,
+ const struct driver_attribute *attr);
+void driver_remove_file(struct device_driver *driver,
+ const struct driver_attribute *attr);
+
+int driver_set_override(struct device *dev, const char **override,
+ const char *s, size_t len);
+int __must_check driver_for_each_device(struct device_driver *drv, struct device *start,
+ void *data, int (*fn)(struct device *dev, void *));
+struct device *driver_find_device(struct device_driver *drv,
+ struct device *start, const void *data,
+ int (*match)(struct device *dev, const void *data));
+
+/**
+ * driver_find_device_by_name - device iterator for locating a particular device
+ * of a specific name.
+ * @drv: the driver we're iterating
+ * @name: name of the device to match
+ */
+static inline struct device *driver_find_device_by_name(struct device_driver *drv,
+ const char *name)
+{
+ return driver_find_device(drv, NULL, name, device_match_name);
+}
+
+/**
+ * driver_find_device_by_of_node- device iterator for locating a particular device
+ * by of_node pointer.
+ * @drv: the driver we're iterating
+ * @np: of_node pointer to match.
+ */
+static inline struct device *
+driver_find_device_by_of_node(struct device_driver *drv,
+ const struct device_node *np)
+{
+ return driver_find_device(drv, NULL, np, device_match_of_node);
+}
+
+/**
+ * driver_find_device_by_fwnode- device iterator for locating a particular device
+ * by fwnode pointer.
+ * @drv: the driver we're iterating
+ * @fwnode: fwnode pointer to match.
+ */
+static inline struct device *
+driver_find_device_by_fwnode(struct device_driver *drv,
+ const struct fwnode_handle *fwnode)
+{
+ return driver_find_device(drv, NULL, fwnode, device_match_fwnode);
+}
+
+/**
+ * driver_find_device_by_devt- device iterator for locating a particular device
+ * by devt.
+ * @drv: the driver we're iterating
+ * @devt: devt pointer to match.
+ */
+static inline struct device *driver_find_device_by_devt(struct device_driver *drv,
+ dev_t devt)
+{
+ return driver_find_device(drv, NULL, &devt, device_match_devt);
+}
+
+static inline struct device *driver_find_next_device(struct device_driver *drv,
+ struct device *start)
+{
+ return driver_find_device(drv, start, NULL, device_match_any);
+}
+
+#ifdef CONFIG_ACPI
+/**
+ * driver_find_device_by_acpi_dev : device iterator for locating a particular
+ * device matching the ACPI_COMPANION device.
+ * @drv: the driver we're iterating
+ * @adev: ACPI_COMPANION device to match.
+ */
+static inline struct device *
+driver_find_device_by_acpi_dev(struct device_driver *drv,
+ const struct acpi_device *adev)
+{
+ return driver_find_device(drv, NULL, adev, device_match_acpi_dev);
+}
+#else
+static inline struct device *
+driver_find_device_by_acpi_dev(struct device_driver *drv, const void *adev)
+{
+ return NULL;
+}
+#endif
+
+void driver_deferred_probe_add(struct device *dev);
+int driver_deferred_probe_check_state(struct device *dev);
+void driver_init(void);
+
+/**
+ * module_driver() - Helper macro for drivers that don't do anything
+ * special in module init/exit. This eliminates a lot of boilerplate.
+ * Each module may only use this macro once, and calling it replaces
+ * module_init() and module_exit().
+ *
+ * @__driver: driver name
+ * @__register: register function for this driver type
+ * @__unregister: unregister function for this driver type
+ * @...: Additional arguments to be passed to __register and __unregister.
+ *
+ * Use this macro to construct bus specific macros for registering
+ * drivers, and do not use it on its own.
+ */
+#define module_driver(__driver, __register, __unregister, ...) \
+static int __init __driver##_init(void) \
+{ \
+ return __register(&(__driver) , ##__VA_ARGS__); \
+} \
+module_init(__driver##_init); \
+static void __exit __driver##_exit(void) \
+{ \
+ __unregister(&(__driver) , ##__VA_ARGS__); \
+} \
+module_exit(__driver##_exit);
+
+/**
+ * builtin_driver() - Helper macro for drivers that don't do anything
+ * special in init and have no exit. This eliminates some boilerplate.
+ * Each driver may only use this macro once, and calling it replaces
+ * device_initcall (or in some cases, the legacy __initcall). This is
+ * meant to be a direct parallel of module_driver() above but without
+ * the __exit stuff that is not used for builtin cases.
+ *
+ * @__driver: driver name
+ * @__register: register function for this driver type
+ * @...: Additional arguments to be passed to __register
+ *
+ * Use this macro to construct bus specific macros for registering
+ * drivers, and do not use it on its own.
+ */
+#define builtin_driver(__driver, __register, ...) \
+static int __init __driver##_init(void) \
+{ \
+ return __register(&(__driver) , ##__VA_ARGS__); \
+} \
+device_initcall(__driver##_init);
+
+#endif /* _DEVICE_DRIVER_H_ */
diff --git a/include/linux/device_cgroup.h b/include/linux/device_cgroup.h
index 8b64221b432b..d02f32b7514e 100644
--- a/include/linux/device_cgroup.h
+++ b/include/linux/device_cgroup.h
@@ -1,17 +1,65 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#include <linux/fs.h>
-#ifdef CONFIG_CGROUP_DEVICE
-extern int __devcgroup_inode_permission(struct inode *inode, int mask);
-extern int devcgroup_inode_mknod(int mode, dev_t dev);
+#define DEVCG_ACC_MKNOD 1
+#define DEVCG_ACC_READ 2
+#define DEVCG_ACC_WRITE 4
+#define DEVCG_ACC_MASK (DEVCG_ACC_MKNOD | DEVCG_ACC_READ | DEVCG_ACC_WRITE)
+
+#define DEVCG_DEV_BLOCK 1
+#define DEVCG_DEV_CHAR 2
+#define DEVCG_DEV_ALL 4 /* this represents all devices */
+
+
+#if defined(CONFIG_CGROUP_DEVICE) || defined(CONFIG_CGROUP_BPF)
+int devcgroup_check_permission(short type, u32 major, u32 minor,
+ short access);
static inline int devcgroup_inode_permission(struct inode *inode, int mask)
{
+ short type, access = 0;
+
if (likely(!inode->i_rdev))
return 0;
- if (!S_ISBLK(inode->i_mode) && !S_ISCHR(inode->i_mode))
+
+ if (S_ISBLK(inode->i_mode))
+ type = DEVCG_DEV_BLOCK;
+ else if (S_ISCHR(inode->i_mode))
+ type = DEVCG_DEV_CHAR;
+ else
return 0;
- return __devcgroup_inode_permission(inode, mask);
+
+ if (mask & MAY_WRITE)
+ access |= DEVCG_ACC_WRITE;
+ if (mask & MAY_READ)
+ access |= DEVCG_ACC_READ;
+
+ return devcgroup_check_permission(type, imajor(inode), iminor(inode),
+ access);
}
+
+static inline int devcgroup_inode_mknod(int mode, dev_t dev)
+{
+ short type;
+
+ if (!S_ISBLK(mode) && !S_ISCHR(mode))
+ return 0;
+
+ if (S_ISCHR(mode) && dev == WHITEOUT_DEV)
+ return 0;
+
+ if (S_ISBLK(mode))
+ type = DEVCG_DEV_BLOCK;
+ else
+ type = DEVCG_DEV_CHAR;
+
+ return devcgroup_check_permission(type, MAJOR(dev), MINOR(dev),
+ DEVCG_ACC_MKNOD);
+}
+
#else
+static inline int devcgroup_check_permission(short type, u32 major, u32 minor,
+ short access)
+{ return 0; }
static inline int devcgroup_inode_permission(struct inode *inode, int mask)
{ return 0; }
static inline int devcgroup_inode_mknod(int mode, dev_t dev)
diff --git a/include/linux/devm-helpers.h b/include/linux/devm-helpers.h
new file mode 100644
index 000000000000..74891802200d
--- /dev/null
+++ b/include/linux/devm-helpers.h
@@ -0,0 +1,79 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+#ifndef __LINUX_DEVM_HELPERS_H
+#define __LINUX_DEVM_HELPERS_H
+
+/*
+ * Functions which do automatically cancel operations or release resources upon
+ * driver detach.
+ *
+ * These should be helpful to avoid mixing the manual and devm-based resource
+ * management which can be source of annoying, rarely occurring,
+ * hard-to-reproduce bugs.
+ *
+ * Please take into account that devm based cancellation may be performed some
+ * time after the remove() is ran.
+ *
+ * Thus mixing devm and manual resource management can easily cause problems
+ * when unwinding operations with dependencies. IRQ scheduling a work in a queue
+ * is typical example where IRQs are often devm-managed and WQs are manually
+ * cleaned at remove(). If IRQs are not manually freed at remove() (and this is
+ * often the case when we use devm for IRQs) we have a period of time after
+ * remove() - and before devm managed IRQs are freed - where new IRQ may fire
+ * and schedule a work item which won't be cancelled because remove() was
+ * already ran.
+ */
+
+#include <linux/device.h>
+#include <linux/workqueue.h>
+
+static inline void devm_delayed_work_drop(void *res)
+{
+ cancel_delayed_work_sync(res);
+}
+
+/**
+ * devm_delayed_work_autocancel - Resource-managed delayed work allocation
+ * @dev: Device which lifetime work is bound to
+ * @w: Work item to be queued
+ * @worker: Worker function
+ *
+ * Initialize delayed work which is automatically cancelled when driver is
+ * detached. A few drivers need delayed work which must be cancelled before
+ * driver is detached to avoid accessing removed resources.
+ * devm_delayed_work_autocancel() can be used to omit the explicit
+ * cancelleation when driver is detached.
+ */
+static inline int devm_delayed_work_autocancel(struct device *dev,
+ struct delayed_work *w,
+ work_func_t worker)
+{
+ INIT_DELAYED_WORK(w, worker);
+ return devm_add_action(dev, devm_delayed_work_drop, w);
+}
+
+static inline void devm_work_drop(void *res)
+{
+ cancel_work_sync(res);
+}
+
+/**
+ * devm_work_autocancel - Resource-managed work allocation
+ * @dev: Device which lifetime work is bound to
+ * @w: Work to be added (and automatically cancelled)
+ * @worker: Worker function
+ *
+ * Initialize work which is automatically cancelled when driver is detached.
+ * A few drivers need to queue work which must be cancelled before driver
+ * is detached to avoid accessing removed resources.
+ * devm_work_autocancel() can be used to omit the explicit
+ * cancelleation when driver is detached.
+ */
+static inline int devm_work_autocancel(struct device *dev,
+ struct work_struct *w,
+ work_func_t worker)
+{
+ INIT_WORK(w, worker);
+ return devm_add_action(dev, devm_work_drop, w);
+}
+
+#endif
diff --git a/include/linux/devpts_fs.h b/include/linux/devpts_fs.h
index 277ab9af9ac2..45f746a48dcd 100644
--- a/include/linux/devpts_fs.h
+++ b/include/linux/devpts_fs.h
@@ -1,13 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/* -*- linux-c -*- --------------------------------------------------------- *
*
* linux/include/linux/devpts_fs.h
*
* Copyright 1998-2004 H. Peter Anvin -- All Rights Reserved
*
- * This file is part of the Linux kernel and is made available under
- * the terms of the GNU General Public License, version 2, or at your
- * option, any later version, incorporated herein by reference.
- *
* ------------------------------------------------------------------------- */
#ifndef _LINUX_DEVPTS_FS_H
@@ -19,6 +16,7 @@
struct pts_fs_info;
+struct vfsmount *devpts_mntget(struct file *, struct pts_fs_info *);
struct pts_fs_info *devpts_acquire(struct file *);
void devpts_release(struct pts_fs_info *);
@@ -32,6 +30,15 @@ void *devpts_get_priv(struct dentry *);
/* unlink */
void devpts_pty_kill(struct dentry *);
+/* in pty.c */
+int ptm_open_peer(struct file *master, struct tty_struct *tty, int flags);
+
+#else
+static inline int
+ptm_open_peer(struct file *master, struct tty_struct *tty, int flags)
+{
+ return -EIO;
+}
#endif
diff --git a/include/linux/dfl.h b/include/linux/dfl.h
new file mode 100644
index 000000000000..0a7a00a0ee7f
--- /dev/null
+++ b/include/linux/dfl.h
@@ -0,0 +1,95 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Header file for DFL driver and device API
+ *
+ * Copyright (C) 2020 Intel Corporation, Inc.
+ */
+
+#ifndef __LINUX_DFL_H
+#define __LINUX_DFL_H
+
+#include <linux/device.h>
+#include <linux/mod_devicetable.h>
+
+/**
+ * enum dfl_id_type - define the DFL FIU types
+ */
+enum dfl_id_type {
+ FME_ID = 0,
+ PORT_ID = 1,
+ DFL_ID_MAX,
+};
+
+/**
+ * struct dfl_device - represent an dfl device on dfl bus
+ *
+ * @dev: generic device interface.
+ * @id: id of the dfl device.
+ * @type: type of DFL FIU of the device. See enum dfl_id_type.
+ * @feature_id: feature identifier local to its DFL FIU type.
+ * @revision: revision of this dfl device feature.
+ * @mmio_res: mmio resource of this dfl device.
+ * @irqs: list of Linux IRQ numbers of this dfl device.
+ * @num_irqs: number of IRQs supported by this dfl device.
+ * @cdev: pointer to DFL FPGA container device this dfl device belongs to.
+ * @id_entry: matched id entry in dfl driver's id table.
+ * @dfh_version: version of DFH for the device
+ * @param_size: size of the block parameters in bytes
+ * @params: pointer to block of parameters copied memory
+ */
+struct dfl_device {
+ struct device dev;
+ int id;
+ u16 type;
+ u16 feature_id;
+ u8 revision;
+ struct resource mmio_res;
+ int *irqs;
+ unsigned int num_irqs;
+ struct dfl_fpga_cdev *cdev;
+ const struct dfl_device_id *id_entry;
+ u8 dfh_version;
+ unsigned int param_size;
+ void *params;
+};
+
+/**
+ * struct dfl_driver - represent an dfl device driver
+ *
+ * @drv: driver model structure.
+ * @id_table: pointer to table of device IDs the driver is interested in.
+ * { } member terminated.
+ * @probe: mandatory callback for device binding.
+ * @remove: callback for device unbinding.
+ */
+struct dfl_driver {
+ struct device_driver drv;
+ const struct dfl_device_id *id_table;
+
+ int (*probe)(struct dfl_device *dfl_dev);
+ void (*remove)(struct dfl_device *dfl_dev);
+};
+
+#define to_dfl_dev(d) container_of(d, struct dfl_device, dev)
+#define to_dfl_drv(d) container_of(d, struct dfl_driver, drv)
+
+/*
+ * use a macro to avoid include chaining to get THIS_MODULE.
+ */
+#define dfl_driver_register(drv) \
+ __dfl_driver_register(drv, THIS_MODULE)
+int __dfl_driver_register(struct dfl_driver *dfl_drv, struct module *owner);
+void dfl_driver_unregister(struct dfl_driver *dfl_drv);
+
+/*
+ * module_dfl_driver() - Helper macro for drivers that don't do
+ * anything special in module init/exit. This eliminates a lot of
+ * boilerplate. Each module may only use this macro once, and
+ * calling it replaces module_init() and module_exit().
+ */
+#define module_dfl_driver(__dfl_driver) \
+ module_driver(__dfl_driver, dfl_driver_register, \
+ dfl_driver_unregister)
+
+void *dfh_find_param(struct dfl_device *dfl_dev, int param_id, size_t *pcount);
+#endif /* __LINUX_DFL_H */
diff --git a/include/linux/digsig.h b/include/linux/digsig.h
index 6f85a070bb45..2ace69e41088 100644
--- a/include/linux/digsig.h
+++ b/include/linux/digsig.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2011 Nokia Corporation
* Copyright (C) 2011 Intel Corporation
@@ -5,11 +6,6 @@
* Author:
* Dmitry Kasatkin <dmitry.kasatkin@nokia.com>
* <dmitry.kasatkin@intel.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation, version 2 of the License.
- *
*/
#ifndef _DIGSIG_H
@@ -33,7 +29,7 @@ struct pubkey_hdr {
uint32_t timestamp; /* key made, always 0 for now */
uint8_t algo;
uint8_t nmpi;
- char mpi[0];
+ char mpi[];
} __packed;
struct signature_hdr {
@@ -43,7 +39,7 @@ struct signature_hdr {
uint8_t hash;
uint8_t keyid[8];
uint8_t nmpi;
- char mpi[0];
+ char mpi[];
} __packed;
#if defined(CONFIG_SIGNATURE) || defined(CONFIG_SIGNATURE_MODULE)
diff --git a/include/linux/dim.h b/include/linux/dim.h
new file mode 100644
index 000000000000..f343bc9aa2ec
--- /dev/null
+++ b/include/linux/dim.h
@@ -0,0 +1,337 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2019 Mellanox Technologies. */
+
+#ifndef DIM_H
+#define DIM_H
+
+#include <linux/bits.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/workqueue.h>
+
+/*
+ * Number of events between DIM iterations.
+ * Causes a moderation of the algorithm run.
+ */
+#define DIM_NEVENTS 64
+
+/*
+ * Is a difference between values justifies taking an action.
+ * We consider 10% difference as significant.
+ */
+#define IS_SIGNIFICANT_DIFF(val, ref) \
+ ((ref) && (((100UL * abs((val) - (ref))) / (ref)) > 10))
+
+/*
+ * Calculate the gap between two values.
+ * Take wrap-around and variable size into consideration.
+ */
+#define BIT_GAP(bits, end, start) ((((end) - (start)) + BIT_ULL(bits)) \
+ & (BIT_ULL(bits) - 1))
+
+/**
+ * struct dim_cq_moder - Structure for CQ moderation values.
+ * Used for communications between DIM and its consumer.
+ *
+ * @usec: CQ timer suggestion (by DIM)
+ * @pkts: CQ packet counter suggestion (by DIM)
+ * @comps: Completion counter
+ * @cq_period_mode: CQ period count mode (from CQE/EQE)
+ */
+struct dim_cq_moder {
+ u16 usec;
+ u16 pkts;
+ u16 comps;
+ u8 cq_period_mode;
+};
+
+/**
+ * struct dim_sample - Structure for DIM sample data.
+ * Used for communications between DIM and its consumer.
+ *
+ * @time: Sample timestamp
+ * @pkt_ctr: Number of packets
+ * @byte_ctr: Number of bytes
+ * @event_ctr: Number of events
+ * @comp_ctr: Current completion counter
+ */
+struct dim_sample {
+ ktime_t time;
+ u32 pkt_ctr;
+ u32 byte_ctr;
+ u16 event_ctr;
+ u32 comp_ctr;
+};
+
+/**
+ * struct dim_stats - Structure for DIM stats.
+ * Used for holding current measured rates.
+ *
+ * @ppms: Packets per msec
+ * @bpms: Bytes per msec
+ * @epms: Events per msec
+ * @cpms: Completions per msec
+ * @cpe_ratio: Ratio of completions to events
+ */
+struct dim_stats {
+ int ppms; /* packets per msec */
+ int bpms; /* bytes per msec */
+ int epms; /* events per msec */
+ int cpms; /* completions per msec */
+ int cpe_ratio; /* ratio of completions to events */
+};
+
+/**
+ * struct dim - Main structure for dynamic interrupt moderation (DIM).
+ * Used for holding all information about a specific DIM instance.
+ *
+ * @state: Algorithm state (see below)
+ * @prev_stats: Measured rates from previous iteration (for comparison)
+ * @start_sample: Sampled data at start of current iteration
+ * @measuring_sample: A &dim_sample that is used to update the current events
+ * @work: Work to perform on action required
+ * @priv: A pointer to the struct that points to dim
+ * @profile_ix: Current moderation profile
+ * @mode: CQ period count mode
+ * @tune_state: Algorithm tuning state (see below)
+ * @steps_right: Number of steps taken towards higher moderation
+ * @steps_left: Number of steps taken towards lower moderation
+ * @tired: Parking depth counter
+ */
+struct dim {
+ u8 state;
+ struct dim_stats prev_stats;
+ struct dim_sample start_sample;
+ struct dim_sample measuring_sample;
+ struct work_struct work;
+ void *priv;
+ u8 profile_ix;
+ u8 mode;
+ u8 tune_state;
+ u8 steps_right;
+ u8 steps_left;
+ u8 tired;
+};
+
+/**
+ * enum dim_cq_period_mode - Modes for CQ period count
+ *
+ * @DIM_CQ_PERIOD_MODE_START_FROM_EQE: Start counting from EQE
+ * @DIM_CQ_PERIOD_MODE_START_FROM_CQE: Start counting from CQE (implies timer reset)
+ * @DIM_CQ_PERIOD_NUM_MODES: Number of modes
+ */
+enum dim_cq_period_mode {
+ DIM_CQ_PERIOD_MODE_START_FROM_EQE = 0x0,
+ DIM_CQ_PERIOD_MODE_START_FROM_CQE = 0x1,
+ DIM_CQ_PERIOD_NUM_MODES
+};
+
+/**
+ * enum dim_state - DIM algorithm states
+ *
+ * These will determine if the algorithm is in a valid state to start an iteration.
+ *
+ * @DIM_START_MEASURE: This is the first iteration (also after applying a new profile)
+ * @DIM_MEASURE_IN_PROGRESS: Algorithm is already in progress - check if
+ * need to perform an action
+ * @DIM_APPLY_NEW_PROFILE: DIM consumer is currently applying a profile - no need to measure
+ */
+enum dim_state {
+ DIM_START_MEASURE,
+ DIM_MEASURE_IN_PROGRESS,
+ DIM_APPLY_NEW_PROFILE,
+};
+
+/**
+ * enum dim_tune_state - DIM algorithm tune states
+ *
+ * These will determine which action the algorithm should perform.
+ *
+ * @DIM_PARKING_ON_TOP: Algorithm found a local top point - exit on significant difference
+ * @DIM_PARKING_TIRED: Algorithm found a deep top point - don't exit if tired > 0
+ * @DIM_GOING_RIGHT: Algorithm is currently trying higher moderation levels
+ * @DIM_GOING_LEFT: Algorithm is currently trying lower moderation levels
+ */
+enum dim_tune_state {
+ DIM_PARKING_ON_TOP,
+ DIM_PARKING_TIRED,
+ DIM_GOING_RIGHT,
+ DIM_GOING_LEFT,
+};
+
+/**
+ * enum dim_stats_state - DIM algorithm statistics states
+ *
+ * These will determine the verdict of current iteration.
+ *
+ * @DIM_STATS_WORSE: Current iteration shows worse performance than before
+ * @DIM_STATS_SAME: Current iteration shows same performance than before
+ * @DIM_STATS_BETTER: Current iteration shows better performance than before
+ */
+enum dim_stats_state {
+ DIM_STATS_WORSE,
+ DIM_STATS_SAME,
+ DIM_STATS_BETTER,
+};
+
+/**
+ * enum dim_step_result - DIM algorithm step results
+ *
+ * These describe the result of a step.
+ *
+ * @DIM_STEPPED: Performed a regular step
+ * @DIM_TOO_TIRED: Same kind of step was done multiple times - should go to
+ * tired parking
+ * @DIM_ON_EDGE: Stepped to the most left/right profile
+ */
+enum dim_step_result {
+ DIM_STEPPED,
+ DIM_TOO_TIRED,
+ DIM_ON_EDGE,
+};
+
+/**
+ * dim_on_top - check if current state is a good place to stop (top location)
+ * @dim: DIM context
+ *
+ * Check if current profile is a good place to park at.
+ * This will result in reducing the DIM checks frequency as we assume we
+ * shouldn't probably change profiles, unless traffic pattern wasn't changed.
+ */
+bool dim_on_top(struct dim *dim);
+
+/**
+ * dim_turn - change profile altering direction
+ * @dim: DIM context
+ *
+ * Go left if we were going right and vice-versa.
+ * Do nothing if currently parking.
+ */
+void dim_turn(struct dim *dim);
+
+/**
+ * dim_park_on_top - enter a parking state on a top location
+ * @dim: DIM context
+ *
+ * Enter parking state.
+ * Clear all movement history.
+ */
+void dim_park_on_top(struct dim *dim);
+
+/**
+ * dim_park_tired - enter a tired parking state
+ * @dim: DIM context
+ *
+ * Enter parking state.
+ * Clear all movement history and cause DIM checks frequency to reduce.
+ */
+void dim_park_tired(struct dim *dim);
+
+/**
+ * dim_calc_stats - calculate the difference between two samples
+ * @start: start sample
+ * @end: end sample
+ * @curr_stats: delta between samples
+ *
+ * Calculate the delta between two samples (in data rates).
+ * Takes into consideration counter wrap-around.
+ * Returned boolean indicates whether curr_stats are reliable.
+ */
+bool dim_calc_stats(struct dim_sample *start, struct dim_sample *end,
+ struct dim_stats *curr_stats);
+
+/**
+ * dim_update_sample - set a sample's fields with given values
+ * @event_ctr: number of events to set
+ * @packets: number of packets to set
+ * @bytes: number of bytes to set
+ * @s: DIM sample
+ */
+static inline void
+dim_update_sample(u16 event_ctr, u64 packets, u64 bytes, struct dim_sample *s)
+{
+ s->time = ktime_get();
+ s->pkt_ctr = packets;
+ s->byte_ctr = bytes;
+ s->event_ctr = event_ctr;
+}
+
+/**
+ * dim_update_sample_with_comps - set a sample's fields with given
+ * values including the completion parameter
+ * @event_ctr: number of events to set
+ * @packets: number of packets to set
+ * @bytes: number of bytes to set
+ * @comps: number of completions to set
+ * @s: DIM sample
+ */
+static inline void
+dim_update_sample_with_comps(u16 event_ctr, u64 packets, u64 bytes, u64 comps,
+ struct dim_sample *s)
+{
+ dim_update_sample(event_ctr, packets, bytes, s);
+ s->comp_ctr = comps;
+}
+
+/* Net DIM */
+
+/**
+ * net_dim_get_rx_moderation - provide a CQ moderation object for the given RX profile
+ * @cq_period_mode: CQ period mode
+ * @ix: Profile index
+ */
+struct dim_cq_moder net_dim_get_rx_moderation(u8 cq_period_mode, int ix);
+
+/**
+ * net_dim_get_def_rx_moderation - provide the default RX moderation
+ * @cq_period_mode: CQ period mode
+ */
+struct dim_cq_moder net_dim_get_def_rx_moderation(u8 cq_period_mode);
+
+/**
+ * net_dim_get_tx_moderation - provide a CQ moderation object for the given TX profile
+ * @cq_period_mode: CQ period mode
+ * @ix: Profile index
+ */
+struct dim_cq_moder net_dim_get_tx_moderation(u8 cq_period_mode, int ix);
+
+/**
+ * net_dim_get_def_tx_moderation - provide the default TX moderation
+ * @cq_period_mode: CQ period mode
+ */
+struct dim_cq_moder net_dim_get_def_tx_moderation(u8 cq_period_mode);
+
+/**
+ * net_dim - main DIM algorithm entry point
+ * @dim: DIM instance information
+ * @end_sample: Current data measurement
+ *
+ * Called by the consumer.
+ * This is the main logic of the algorithm, where data is processed in order
+ * to decide on next required action.
+ */
+void net_dim(struct dim *dim, struct dim_sample end_sample);
+
+/* RDMA DIM */
+
+/*
+ * RDMA DIM profile:
+ * profile size must be of RDMA_DIM_PARAMS_NUM_PROFILES.
+ */
+#define RDMA_DIM_PARAMS_NUM_PROFILES 9
+#define RDMA_DIM_START_PROFILE 0
+
+/**
+ * rdma_dim - Runs the adaptive moderation.
+ * @dim: The moderation struct.
+ * @completions: The number of completions collected in this round.
+ *
+ * Each call to rdma_dim takes the latest amount of completions that
+ * have been collected and counts them as a new event.
+ * Once enough events have been collected the algorithm decides a new
+ * moderation level.
+ */
+void rdma_dim(struct dim *dim, u64 completions);
+
+#endif /* DIM_H */
diff --git a/include/linux/dio.h b/include/linux/dio.h
index 2cc0fd00463f..5abd07361eb5 100644
--- a/include/linux/dio.h
+++ b/include/linux/dio.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/* header file for DIO boards for the HP300 architecture.
* Maybe this should handle DIO-II later?
* The general structure of this is vaguely based on how
@@ -246,11 +247,6 @@ extern int dio_create_sysfs_dev_files(struct dio_dev *);
/* New-style probing */
extern int dio_register_driver(struct dio_driver *);
extern void dio_unregister_driver(struct dio_driver *);
-extern const struct dio_device_id *dio_match_device(const struct dio_device_id *ids, const struct dio_dev *z);
-static inline struct dio_driver *dio_dev_driver(const struct dio_dev *d)
-{
- return d->driver;
-}
#define dio_resource_start(d) ((d)->resource.start)
#define dio_resource_end(d) ((d)->resource.end)
diff --git a/include/linux/dirent.h b/include/linux/dirent.h
index f072fb8d10a3..99002220cd45 100644
--- a/include/linux/dirent.h
+++ b/include/linux/dirent.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_DIRENT_H
#define _LINUX_DIRENT_H
@@ -6,7 +7,7 @@ struct linux_dirent64 {
s64 d_off;
unsigned short d_reclen;
unsigned char d_type;
- char d_name[0];
+ char d_name[];
};
#endif
diff --git a/include/linux/dlm.h b/include/linux/dlm.h
index d02da2c6fc1a..c58c4f790c04 100644
--- a/include/linux/dlm.h
+++ b/include/linux/dlm.h
@@ -1,12 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/******************************************************************************
*******************************************************************************
**
** Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
** Copyright (C) 2004-2011 Red Hat, Inc. All rights reserved.
**
-** This copyrighted material is made available to anyone wishing to use,
-** modify, copy, or redistribute it subject to the terms and conditions
-** of the GNU General Public License v.2.
**
*******************************************************************************
******************************************************************************/
@@ -55,12 +53,6 @@ struct dlm_lockspace_ops {
* The dlm should not use a resource directory, but statically assign
* resource mastery to nodes based on the name hash that is otherwise
* used to select the directory node. Must be the same on all nodes.
- * DLM_LSFL_TIMEWARN
- * The dlm should emit netlink messages if locks have been waiting
- * for a configurable amount of time. (Unused.)
- * DLM_LSFL_FS
- * The lockspace user is in the kernel (i.e. filesystem). Enables
- * direct bast/cast callbacks.
* DLM_LSFL_NEWEXCL
* dlm_new_lockspace() should return -EEXIST if the lockspace exists.
*
@@ -136,7 +128,7 @@ int dlm_lock(dlm_lockspace_t *lockspace,
int mode,
struct dlm_lksb *lksb,
uint32_t flags,
- void *name,
+ const void *name,
unsigned int namelen,
uint32_t parent_lkid,
void (*lockast) (void *astarg),
diff --git a/include/linux/dlm_plock.h b/include/linux/dlm_plock.h
index 95ad387a7769..e6d76e8715a6 100644
--- a/include/linux/dlm_plock.h
+++ b/include/linux/dlm_plock.h
@@ -1,9 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2005-2008 Red Hat, Inc. All rights reserved.
- *
- * This copyrighted material is made available to anyone wishing to use,
- * modify, copy, or redistribute it subject to the terms and conditions
- * of the GNU General Public License v.2.
*/
#ifndef __DLM_PLOCK_DOT_H__
#define __DLM_PLOCK_DOT_H__
diff --git a/include/linux/dm-bufio.h b/include/linux/dm-bufio.h
new file mode 100644
index 000000000000..681656a1c03d
--- /dev/null
+++ b/include/linux/dm-bufio.h
@@ -0,0 +1,162 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2009-2011 Red Hat, Inc.
+ *
+ * Author: Mikulas Patocka <mpatocka@redhat.com>
+ *
+ * This file is released under the GPL.
+ */
+
+#ifndef _LINUX_DM_BUFIO_H
+#define _LINUX_DM_BUFIO_H
+
+#include <linux/blkdev.h>
+#include <linux/types.h>
+
+/*----------------------------------------------------------------*/
+
+struct dm_bufio_client;
+struct dm_buffer;
+
+/*
+ * Flags for dm_bufio_client_create
+ */
+#define DM_BUFIO_CLIENT_NO_SLEEP 0x1
+
+/*
+ * Create a buffered IO cache on a given device
+ */
+struct dm_bufio_client *
+dm_bufio_client_create(struct block_device *bdev, unsigned int block_size,
+ unsigned int reserved_buffers, unsigned int aux_size,
+ void (*alloc_callback)(struct dm_buffer *),
+ void (*write_callback)(struct dm_buffer *),
+ unsigned int flags);
+
+/*
+ * Release a buffered IO cache.
+ */
+void dm_bufio_client_destroy(struct dm_bufio_client *c);
+
+/*
+ * Set the sector range.
+ * When this function is called, there must be no I/O in progress on the bufio
+ * client.
+ */
+void dm_bufio_set_sector_offset(struct dm_bufio_client *c, sector_t start);
+
+/*
+ * WARNING: to avoid deadlocks, these conditions are observed:
+ *
+ * - At most one thread can hold at most "reserved_buffers" simultaneously.
+ * - Each other threads can hold at most one buffer.
+ * - Threads which call only dm_bufio_get can hold unlimited number of
+ * buffers.
+ */
+
+/*
+ * Read a given block from disk. Returns pointer to data. Returns a
+ * pointer to dm_buffer that can be used to release the buffer or to make
+ * it dirty.
+ */
+void *dm_bufio_read(struct dm_bufio_client *c, sector_t block,
+ struct dm_buffer **bp);
+
+/*
+ * Like dm_bufio_read, but return buffer from cache, don't read
+ * it. If the buffer is not in the cache, return NULL.
+ */
+void *dm_bufio_get(struct dm_bufio_client *c, sector_t block,
+ struct dm_buffer **bp);
+
+/*
+ * Like dm_bufio_read, but don't read anything from the disk. It is
+ * expected that the caller initializes the buffer and marks it dirty.
+ */
+void *dm_bufio_new(struct dm_bufio_client *c, sector_t block,
+ struct dm_buffer **bp);
+
+/*
+ * Prefetch the specified blocks to the cache.
+ * The function starts to read the blocks and returns without waiting for
+ * I/O to finish.
+ */
+void dm_bufio_prefetch(struct dm_bufio_client *c,
+ sector_t block, unsigned int n_blocks);
+
+/*
+ * Release a reference obtained with dm_bufio_{read,get,new}. The data
+ * pointer and dm_buffer pointer is no longer valid after this call.
+ */
+void dm_bufio_release(struct dm_buffer *b);
+
+/*
+ * Mark a buffer dirty. It should be called after the buffer is modified.
+ *
+ * In case of memory pressure, the buffer may be written after
+ * dm_bufio_mark_buffer_dirty, but before dm_bufio_write_dirty_buffers. So
+ * dm_bufio_write_dirty_buffers guarantees that the buffer is on-disk but
+ * the actual writing may occur earlier.
+ */
+void dm_bufio_mark_buffer_dirty(struct dm_buffer *b);
+
+/*
+ * Mark a part of the buffer dirty.
+ *
+ * The specified part of the buffer is scheduled to be written. dm-bufio may
+ * write the specified part of the buffer or it may write a larger superset.
+ */
+void dm_bufio_mark_partial_buffer_dirty(struct dm_buffer *b,
+ unsigned int start, unsigned int end);
+
+/*
+ * Initiate writing of dirty buffers, without waiting for completion.
+ */
+void dm_bufio_write_dirty_buffers_async(struct dm_bufio_client *c);
+
+/*
+ * Write all dirty buffers. Guarantees that all dirty buffers created prior
+ * to this call are on disk when this call exits.
+ */
+int dm_bufio_write_dirty_buffers(struct dm_bufio_client *c);
+
+/*
+ * Send an empty write barrier to the device to flush hardware disk cache.
+ */
+int dm_bufio_issue_flush(struct dm_bufio_client *c);
+
+/*
+ * Send a discard request to the underlying device.
+ */
+int dm_bufio_issue_discard(struct dm_bufio_client *c, sector_t block, sector_t count);
+
+/*
+ * Free the given buffer.
+ * This is just a hint, if the buffer is in use or dirty, this function
+ * does nothing.
+ */
+void dm_bufio_forget(struct dm_bufio_client *c, sector_t block);
+
+/*
+ * Free the given range of buffers.
+ * This is just a hint, if the buffer is in use or dirty, this function
+ * does nothing.
+ */
+void dm_bufio_forget_buffers(struct dm_bufio_client *c, sector_t block, sector_t n_blocks);
+
+/*
+ * Set the minimum number of buffers before cleanup happens.
+ */
+void dm_bufio_set_minimum_buffers(struct dm_bufio_client *c, unsigned int n);
+
+unsigned int dm_bufio_get_block_size(struct dm_bufio_client *c);
+sector_t dm_bufio_get_device_size(struct dm_bufio_client *c);
+struct dm_io_client *dm_bufio_get_dm_io_client(struct dm_bufio_client *c);
+sector_t dm_bufio_get_block_number(struct dm_buffer *b);
+void *dm_bufio_get_block_data(struct dm_buffer *b);
+void *dm_bufio_get_aux_data(struct dm_buffer *b);
+struct dm_bufio_client *dm_bufio_get_client(struct dm_buffer *b);
+
+/*----------------------------------------------------------------*/
+
+#endif
diff --git a/include/linux/dm-dirty-log.h b/include/linux/dm-dirty-log.h
index 7084503c3405..0b10faedb26a 100644
--- a/include/linux/dm-dirty-log.h
+++ b/include/linux/dm-dirty-log.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2003 Sistina Software
* Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
@@ -33,7 +34,7 @@ struct dm_dirty_log_type {
struct list_head list;
int (*ctr)(struct dm_dirty_log *log, struct dm_target *ti,
- unsigned argc, char **argv);
+ unsigned int argc, char **argv);
void (*dtr)(struct dm_dirty_log *log);
/*
@@ -96,7 +97,7 @@ struct dm_dirty_log_type {
* Do not confuse this function with 'in_sync()', one
* tells you if an area is synchronised, the other
* assigns recovery work.
- */
+ */
int (*get_resync_work)(struct dm_dirty_log *log, region_t *region);
/*
@@ -116,7 +117,7 @@ struct dm_dirty_log_type {
* Support function for mirror status requests.
*/
int (*status)(struct dm_dirty_log *log, status_type_t status_type,
- char *result, unsigned maxlen);
+ char *result, unsigned int maxlen);
/*
* is_remote_recovering is necessary for cluster mirroring. It provides
@@ -139,7 +140,7 @@ int dm_dirty_log_type_unregister(struct dm_dirty_log_type *type);
struct dm_dirty_log *dm_dirty_log_create(const char *type_name,
struct dm_target *ti,
int (*flush_callback_fn)(struct dm_target *ti),
- unsigned argc, char **argv);
+ unsigned int argc, char **argv);
void dm_dirty_log_destroy(struct dm_dirty_log *log);
#endif /* __KERNEL__ */
diff --git a/include/linux/dm-io.h b/include/linux/dm-io.h
index a52c6580cc9a..7595142f3fc5 100644
--- a/include/linux/dm-io.h
+++ b/include/linux/dm-io.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2003 Sistina Software
* Copyright (C) 2004 - 2008 Red Hat, Inc. All rights reserved.
@@ -13,6 +14,7 @@
#ifdef __KERNEL__
#include <linux/types.h>
+#include <linux/blk_types.h>
struct dm_io_region {
struct block_device *bdev;
@@ -25,7 +27,7 @@ struct page_list {
struct page *page;
};
-typedef void (*io_notify_fn)(unsigned long error, void *context);
+typedef void (*io_notify_fn)(unsigned int long error, void *context);
enum dm_io_mem_type {
DM_IO_PAGE_LIST,/* Page list */
@@ -37,7 +39,7 @@ enum dm_io_mem_type {
struct dm_io_memory {
enum dm_io_mem_type type;
- unsigned offset;
+ unsigned int offset;
union {
struct page_list *pl;
@@ -57,8 +59,7 @@ struct dm_io_notify {
*/
struct dm_io_client;
struct dm_io_request {
- int bi_op; /* REQ_OP */
- int bi_op_flags; /* req_flag_bits */
+ blk_opf_t bi_opf; /* Request type and flags */
struct dm_io_memory mem; /* Memory to use for io */
struct dm_io_notify notify; /* Synchronous if notify.fn is NULL */
struct dm_io_client *client; /* Client memory handler */
@@ -78,8 +79,8 @@ void dm_io_client_destroy(struct dm_io_client *client);
* Each bit in the optional 'sync_error_bits' bitset indicates whether an
* error occurred doing io to the corresponding region.
*/
-int dm_io(struct dm_io_request *io_req, unsigned num_regions,
- struct dm_io_region *region, unsigned long *sync_error_bits);
+int dm_io(struct dm_io_request *io_req, unsigned int num_regions,
+ struct dm_io_region *region, unsigned int long *sync_error_bits);
#endif /* __KERNEL__ */
#endif /* _LINUX_DM_IO_H */
diff --git a/include/linux/dm-kcopyd.h b/include/linux/dm-kcopyd.h
index cfac8588ed56..51fb1af0b63e 100644
--- a/include/linux/dm-kcopyd.h
+++ b/include/linux/dm-kcopyd.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2001 - 2003 Sistina Software
* Copyright (C) 2004 - 2008 Red Hat, Inc. All rights reserved.
@@ -23,11 +24,11 @@
#define DM_KCOPYD_WRITE_SEQ 2
struct dm_kcopyd_throttle {
- unsigned throttle;
- unsigned num_io_jobs;
- unsigned io_period;
- unsigned total_period;
- unsigned last_jiffies;
+ unsigned int throttle;
+ unsigned int num_io_jobs;
+ unsigned int io_period;
+ unsigned int total_period;
+ unsigned int last_jiffies;
};
/*
@@ -51,6 +52,7 @@ MODULE_PARM_DESC(name, description)
struct dm_kcopyd_client;
struct dm_kcopyd_client *dm_kcopyd_client_create(struct dm_kcopyd_throttle *throttle);
void dm_kcopyd_client_destroy(struct dm_kcopyd_client *kc);
+void dm_kcopyd_client_flush(struct dm_kcopyd_client *kc);
/*
* Submit a copy job to kcopyd. This is built on top of the
@@ -59,12 +61,12 @@ void dm_kcopyd_client_destroy(struct dm_kcopyd_client *kc);
* read_err is a boolean,
* write_err is a bitset, with 1 bit for each destination region
*/
-typedef void (*dm_kcopyd_notify_fn)(int read_err, unsigned long write_err,
+typedef void (*dm_kcopyd_notify_fn)(int read_err, unsigned int long write_err,
void *context);
-int dm_kcopyd_copy(struct dm_kcopyd_client *kc, struct dm_io_region *from,
- unsigned num_dests, struct dm_io_region *dests,
- unsigned flags, dm_kcopyd_notify_fn fn, void *context);
+void dm_kcopyd_copy(struct dm_kcopyd_client *kc, struct dm_io_region *from,
+ unsigned int num_dests, struct dm_io_region *dests,
+ unsigned int flags, dm_kcopyd_notify_fn fn, void *context);
/*
* Prepare a callback and submit it via the kcopyd thread.
@@ -79,11 +81,11 @@ int dm_kcopyd_copy(struct dm_kcopyd_client *kc, struct dm_io_region *from,
*/
void *dm_kcopyd_prepare_callback(struct dm_kcopyd_client *kc,
dm_kcopyd_notify_fn fn, void *context);
-void dm_kcopyd_do_callback(void *job, int read_err, unsigned long write_err);
+void dm_kcopyd_do_callback(void *job, int read_err, unsigned int long write_err);
-int dm_kcopyd_zero(struct dm_kcopyd_client *kc,
- unsigned num_dests, struct dm_io_region *dests,
- unsigned flags, dm_kcopyd_notify_fn fn, void *context);
+void dm_kcopyd_zero(struct dm_kcopyd_client *kc,
+ unsigned int num_dests, struct dm_io_region *dests,
+ unsigned int flags, dm_kcopyd_notify_fn fn, void *context);
#endif /* __KERNEL__ */
#endif /* _LINUX_DM_KCOPYD_H */
diff --git a/include/linux/dm-region-hash.h b/include/linux/dm-region-hash.h
index 9e2a7a401df5..3079ed93dd2d 100644
--- a/include/linux/dm-region-hash.h
+++ b/include/linux/dm-region-hash.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2003 Sistina Software Limited.
* Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
@@ -12,9 +13,11 @@
#include <linux/dm-dirty-log.h>
-/*-----------------------------------------------------------------
+/*
+ *----------------------------------------------------------------
* Region hash
- *----------------------------------------------------------------*/
+ *----------------------------------------------------------------
+ */
struct dm_region_hash;
struct dm_region;
@@ -37,7 +40,7 @@ struct dm_region_hash *dm_region_hash_create(
struct bio_list *bios),
void (*wakeup_workers)(void *context),
void (*wakeup_all_recovery_waiters)(void *context),
- sector_t target_begin, unsigned max_recovery,
+ sector_t target_begin, unsigned int max_recovery,
struct dm_dirty_log *log, uint32_t region_size,
region_t nr_regions);
void dm_region_hash_destroy(struct dm_region_hash *rh);
diff --git a/include/linux/dm-verity-loadpin.h b/include/linux/dm-verity-loadpin.h
new file mode 100644
index 000000000000..552b817ab102
--- /dev/null
+++ b/include/linux/dm-verity-loadpin.h
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __LINUX_DM_VERITY_LOADPIN_H
+#define __LINUX_DM_VERITY_LOADPIN_H
+
+#include <linux/list.h>
+
+struct block_device;
+
+extern struct list_head dm_verity_loadpin_trusted_root_digests;
+
+struct dm_verity_loadpin_trusted_root_digest {
+ struct list_head node;
+ unsigned int len;
+ u8 data[];
+};
+
+#if IS_ENABLED(CONFIG_SECURITY_LOADPIN_VERITY)
+bool dm_verity_loadpin_is_bdev_trusted(struct block_device *bdev);
+#else
+static inline bool dm_verity_loadpin_is_bdev_trusted(struct block_device *bdev)
+{
+ return false;
+}
+#endif
+
+#endif /* __LINUX_DM_VERITY_LOADPIN_H */
diff --git a/include/linux/dm9000.h b/include/linux/dm9000.h
index 841925fbfe8a..df0341dbb451 100644
--- a/include/linux/dm9000.h
+++ b/include/linux/dm9000.h
@@ -1,14 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/* include/linux/dm9000.h
*
* Copyright (c) 2004 Simtec Electronics
* Ben Dooks <ben@simtec.co.uk>
*
* Header file for dm9000 platform data
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
*/
#ifndef __DM9000_PLATFORM_DATA
diff --git a/include/linux/dma-buf.h b/include/linux/dma-buf.h
index 79f27d60ec66..3f31baa3293f 100644
--- a/include/linux/dma-buf.h
+++ b/include/linux/dma-buf.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Header file for dma buffer sharing framework.
*
@@ -8,22 +9,11 @@
* Arnd Bergmann <arnd@arndb.de>, Rob Clark <rob@ti.com> and
* Daniel Vetter <daniel@ffwll.ch> for their support in creation and
* refining of this idea.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef __DMA_BUF_H__
#define __DMA_BUF_H__
+#include <linux/iosys-map.h>
#include <linux/file.h>
#include <linux/err.h>
#include <linux/scatterlist.h>
@@ -39,30 +29,32 @@ struct dma_buf_attachment;
/**
* struct dma_buf_ops - operations possible on struct dma_buf
- * @map_atomic: maps a page from the buffer into kernel address
- * space, users may not block until the subsequent unmap call.
- * This callback must not sleep.
- * @unmap_atomic: [optional] unmaps a atomically mapped page from the buffer.
- * This Callback must not sleep.
- * @map: maps a page from the buffer into kernel address space.
- * @unmap: [optional] unmaps a page from the buffer.
* @vmap: [optional] creates a virtual mapping for the buffer into kernel
* address space. Same restrictions as for vmap and friends apply.
* @vunmap: [optional] unmaps a vmap from the buffer
*/
struct dma_buf_ops {
/**
+ * @cache_sgt_mapping:
+ *
+ * If true the framework will cache the first mapping made for each
+ * attachment. This avoids creating mappings for attachments multiple
+ * times.
+ */
+ bool cache_sgt_mapping;
+
+ /**
* @attach:
*
* This is called from dma_buf_attach() to make sure that a given
- * &device can access the provided &dma_buf. Exporters which support
- * buffer objects in special locations like VRAM or device-specific
- * carveout areas should check whether the buffer could be move to
- * system memory (or directly accessed by the provided device), and
- * otherwise need to fail the attach operation.
+ * &dma_buf_attachment.dev can access the provided &dma_buf. Exporters
+ * which support buffer objects in special locations like VRAM or
+ * device-specific carveout areas should check whether the buffer could
+ * be move to system memory (or directly accessed by the provided
+ * device), and otherwise need to fail the attach operation.
*
* The exporter should also in general check whether the current
- * allocation fullfills the DMA constraints of the new device. If this
+ * allocation fulfills the DMA constraints of the new device. If this
* is not the case, and the allocation cannot be moved, it should also
* fail the attach operation.
*
@@ -77,8 +69,7 @@ struct dma_buf_ops {
* to signal that backing storage is already allocated and incompatible
* with the requirements of requesting device.
*/
- int (*attach)(struct dma_buf *, struct device *,
- struct dma_buf_attachment *);
+ int (*attach)(struct dma_buf *, struct dma_buf_attachment *);
/**
* @detach:
@@ -92,13 +83,50 @@ struct dma_buf_ops {
void (*detach)(struct dma_buf *, struct dma_buf_attachment *);
/**
+ * @pin:
+ *
+ * This is called by dma_buf_pin() and lets the exporter know that the
+ * DMA-buf can't be moved any more. Ideally, the exporter should
+ * pin the buffer so that it is generally accessible by all
+ * devices.
+ *
+ * This is called with the &dmabuf.resv object locked and is mutual
+ * exclusive with @cache_sgt_mapping.
+ *
+ * This is called automatically for non-dynamic importers from
+ * dma_buf_attach().
+ *
+ * Note that similar to non-dynamic exporters in their @map_dma_buf
+ * callback the driver must guarantee that the memory is available for
+ * use and cleared of any old data by the time this function returns.
+ * Drivers which pipeline their buffer moves internally must wait for
+ * all moves and clears to complete.
+ *
+ * Returns:
+ *
+ * 0 on success, negative error code on failure.
+ */
+ int (*pin)(struct dma_buf_attachment *attach);
+
+ /**
+ * @unpin:
+ *
+ * This is called by dma_buf_unpin() and lets the exporter know that the
+ * DMA-buf can be moved again.
+ *
+ * This is called with the dmabuf->resv object locked and is mutual
+ * exclusive with @cache_sgt_mapping.
+ *
+ * This callback is optional.
+ */
+ void (*unpin)(struct dma_buf_attachment *attach);
+
+ /**
* @map_dma_buf:
*
* This is called by dma_buf_map_attachment() and is used to map a
* shared &dma_buf into device address space, and it is mandatory. It
- * can only be called if @attach has been called successfully. This
- * essentially pins the DMA buffer into place, and it cannot be moved
- * any more
+ * can only be called if @attach has been called successfully.
*
* This call may sleep, e.g. when the backing storage first needs to be
* allocated, or moved to a location suitable for all currently attached
@@ -119,15 +147,34 @@ struct dma_buf_ops {
* any other kind of sharing that the exporter might wish to make
* available to buffer-users.
*
+ * This is always called with the dmabuf->resv object locked when
+ * the dynamic_mapping flag is true.
+ *
+ * Note that for non-dynamic exporters the driver must guarantee that
+ * that the memory is available for use and cleared of any old data by
+ * the time this function returns. Drivers which pipeline their buffer
+ * moves internally must wait for all moves and clears to complete.
+ * Dynamic exporters do not need to follow this rule: For non-dynamic
+ * importers the buffer is already pinned through @pin, which has the
+ * same requirements. Dynamic importers otoh are required to obey the
+ * dma_resv fences.
+ *
* Returns:
*
- * A &sg_table scatter list of or the backing storage of the DMA buffer,
+ * A &sg_table scatter list of the backing storage of the DMA buffer,
* already mapped into the device address space of the &device attached
- * with the provided &dma_buf_attachment.
+ * with the provided &dma_buf_attachment. The addresses and lengths in
+ * the scatter list are PAGE_SIZE aligned.
*
* On failure, returns a negative error value wrapped into a pointer.
* May also return -EINTR when a signal was received while being
* blocked.
+ *
+ * Note that exporters should not try to cache the scatter list, or
+ * return the same one for multiple calls. Caching is done either by the
+ * DMA-BUF code (for non-dynamic importers) or the importer. Ownership
+ * of the scatter list is transferred to the caller, and returned by
+ * @unmap_dma_buf.
*/
struct sg_table * (*map_dma_buf)(struct dma_buf_attachment *,
enum dma_data_direction);
@@ -136,9 +183,8 @@ struct dma_buf_ops {
*
* This is called by dma_buf_unmap_attachment() and should unmap and
* release the &sg_table allocated in @map_dma_buf, and it is mandatory.
- * It should also unpin the backing storage if this is the last mapping
- * of the DMA buffer, it the exporter supports backing storage
- * migration.
+ * For static dma_buf handling this might also unpin the backing
+ * storage if this is the last mapping of the DMA buffer.
*/
void (*unmap_dma_buf)(struct dma_buf_attachment *,
struct sg_table *,
@@ -160,24 +206,19 @@ struct dma_buf_ops {
* @begin_cpu_access:
*
* This is called from dma_buf_begin_cpu_access() and allows the
- * exporter to ensure that the memory is actually available for cpu
- * access - the exporter might need to allocate or swap-in and pin the
- * backing storage. The exporter also needs to ensure that cpu access is
- * coherent for the access direction. The direction can be used by the
- * exporter to optimize the cache flushing, i.e. access with a different
+ * exporter to ensure that the memory is actually coherent for cpu
+ * access. The exporter also needs to ensure that cpu access is coherent
+ * for the access direction. The direction can be used by the exporter
+ * to optimize the cache flushing, i.e. access with a different
* direction (read instead of write) might return stale or even bogus
* data (e.g. when the exporter needs to copy the data to temporary
* storage).
*
- * This callback is optional.
+ * Note that this is both called through the DMA_BUF_IOCTL_SYNC IOCTL
+ * command for userspace mappings established through @mmap, and also
+ * for kernel mappings established with @vmap.
*
- * FIXME: This is both called through the DMA_BUF_IOCTL_SYNC command
- * from userspace (where storage shouldn't be pinned to avoid handing
- * de-factor mlock rights to userspace) and for the kernel-internal
- * users of the various kmap interfaces, where the backing storage must
- * be pinned to guarantee that the atomic kmap calls can succeed. Since
- * there's no in-kernel users of the kmap interfaces yet this isn't a
- * real problem.
+ * This callback is optional.
*
* Returns:
*
@@ -193,9 +234,7 @@ struct dma_buf_ops {
*
* This is called from dma_buf_end_cpu_access() when the importer is
* done accessing the CPU. The exporter can use this to flush caches and
- * unpin any resources pinned in @begin_cpu_access.
- * The result of any dma_buf kmap calls after end_cpu_access is
- * undefined.
+ * undo anything else done in @begin_cpu_access.
*
* This callback is optional.
*
@@ -206,10 +245,6 @@ struct dma_buf_ops {
* to be restarted.
*/
int (*end_cpu_access)(struct dma_buf *, enum dma_data_direction);
- void *(*map_atomic)(struct dma_buf *, unsigned long);
- void (*unmap_atomic)(struct dma_buf *, unsigned long, void *);
- void *(*map)(struct dma_buf *, unsigned long);
- void (*unmap)(struct dma_buf *, unsigned long, void *);
/**
* @mmap:
@@ -217,7 +252,7 @@ struct dma_buf_ops {
* This callback is used by the dma_buf_mmap() function
*
* Note that the mapping needs to be incoherent, userspace is expected
- * to braket CPU access using the DMA_BUF_IOCTL_SYNC interface.
+ * to bracket CPU access using the DMA_BUF_IOCTL_SYNC interface.
*
* Because dma-buf buffers have invariant size over their lifetime, the
* dma-buf core checks whether a vma is too large and rejects such
@@ -248,28 +283,12 @@ struct dma_buf_ops {
*/
int (*mmap)(struct dma_buf *, struct vm_area_struct *vma);
- void *(*vmap)(struct dma_buf *);
- void (*vunmap)(struct dma_buf *, void *vaddr);
+ int (*vmap)(struct dma_buf *dmabuf, struct iosys_map *map);
+ void (*vunmap)(struct dma_buf *dmabuf, struct iosys_map *map);
};
/**
* struct dma_buf - shared buffer object
- * @size: size of the buffer
- * @file: file pointer used for sharing buffers across, and for refcounting.
- * @attachments: list of dma_buf_attachment that denotes all devices attached.
- * @ops: dma_buf_ops associated with this buffer object.
- * @lock: used internally to serialize list manipulation, attach/detach and vmap/unmap
- * @vmapping_counter: used internally to refcnt the vmaps
- * @vmap_ptr: the current vmap ptr if vmapping_counter > 0
- * @exp_name: name of the exporter; useful for debugging.
- * @owner: pointer to exporter module; used for refcounting when exporter is a
- * kernel module.
- * @list_node: node for dma_buf accounting and debugging.
- * @priv: exporter specific private data for this buffer object.
- * @resv: reservation object linked to this dma-buf
- * @poll: for userspace poll support
- * @cb_excl: for userspace poll support
- * @cb_shared: for userspace poll support
*
* This represents a shared buffer, created by calling dma_buf_export(). The
* userspace representation is a normal file descriptor, which can be created by
@@ -281,36 +300,201 @@ struct dma_buf_ops {
* Device DMA access is handled by the separate &struct dma_buf_attachment.
*/
struct dma_buf {
+ /**
+ * @size:
+ *
+ * Size of the buffer; invariant over the lifetime of the buffer.
+ */
size_t size;
+
+ /**
+ * @file:
+ *
+ * File pointer used for sharing buffers across, and for refcounting.
+ * See dma_buf_get() and dma_buf_put().
+ */
struct file *file;
+
+ /**
+ * @attachments:
+ *
+ * List of dma_buf_attachment that denotes all devices attached,
+ * protected by &dma_resv lock @resv.
+ */
struct list_head attachments;
+
+ /** @ops: dma_buf_ops associated with this buffer object. */
const struct dma_buf_ops *ops;
- struct mutex lock;
+
+ /**
+ * @vmapping_counter:
+ *
+ * Used internally to refcnt the vmaps returned by dma_buf_vmap().
+ * Protected by @lock.
+ */
unsigned vmapping_counter;
- void *vmap_ptr;
+
+ /**
+ * @vmap_ptr:
+ * The current vmap ptr if @vmapping_counter > 0. Protected by @lock.
+ */
+ struct iosys_map vmap_ptr;
+
+ /**
+ * @exp_name:
+ *
+ * Name of the exporter; useful for debugging. See the
+ * DMA_BUF_SET_NAME IOCTL.
+ */
const char *exp_name;
+
+ /**
+ * @name:
+ *
+ * Userspace-provided name; useful for accounting and debugging,
+ * protected by dma_resv_lock() on @resv and @name_lock for read access.
+ */
+ const char *name;
+
+ /** @name_lock: Spinlock to protect name access for read access. */
+ spinlock_t name_lock;
+
+ /**
+ * @owner:
+ *
+ * Pointer to exporter module; used for refcounting when exporter is a
+ * kernel module.
+ */
struct module *owner;
+
+ /** @list_node: node for dma_buf accounting and debugging. */
struct list_head list_node;
+
+ /** @priv: exporter specific private data for this buffer object. */
void *priv;
- struct reservation_object *resv;
- /* poll support */
+ /**
+ * @resv:
+ *
+ * Reservation object linked to this dma-buf.
+ *
+ * IMPLICIT SYNCHRONIZATION RULES:
+ *
+ * Drivers which support implicit synchronization of buffer access as
+ * e.g. exposed in `Implicit Fence Poll Support`_ must follow the
+ * below rules.
+ *
+ * - Drivers must add a read fence through dma_resv_add_fence() with the
+ * DMA_RESV_USAGE_READ flag for anything the userspace API considers a
+ * read access. This highly depends upon the API and window system.
+ *
+ * - Similarly drivers must add a write fence through
+ * dma_resv_add_fence() with the DMA_RESV_USAGE_WRITE flag for
+ * anything the userspace API considers write access.
+ *
+ * - Drivers may just always add a write fence, since that only
+ * causes unnecessary synchronization, but no correctness issues.
+ *
+ * - Some drivers only expose a synchronous userspace API with no
+ * pipelining across drivers. These do not set any fences for their
+ * access. An example here is v4l.
+ *
+ * - Driver should use dma_resv_usage_rw() when retrieving fences as
+ * dependency for implicit synchronization.
+ *
+ * DYNAMIC IMPORTER RULES:
+ *
+ * Dynamic importers, see dma_buf_attachment_is_dynamic(), have
+ * additional constraints on how they set up fences:
+ *
+ * - Dynamic importers must obey the write fences and wait for them to
+ * signal before allowing access to the buffer's underlying storage
+ * through the device.
+ *
+ * - Dynamic importers should set fences for any access that they can't
+ * disable immediately from their &dma_buf_attach_ops.move_notify
+ * callback.
+ *
+ * IMPORTANT:
+ *
+ * All drivers and memory management related functions must obey the
+ * struct dma_resv rules, specifically the rules for updating and
+ * obeying fences. See enum dma_resv_usage for further descriptions.
+ */
+ struct dma_resv *resv;
+
+ /** @poll: for userspace poll support */
wait_queue_head_t poll;
+ /** @cb_in: for userspace poll support */
+ /** @cb_out: for userspace poll support */
struct dma_buf_poll_cb_t {
struct dma_fence_cb cb;
wait_queue_head_t *poll;
- unsigned long active;
- } cb_excl, cb_shared;
+ __poll_t active;
+ } cb_in, cb_out;
+#ifdef CONFIG_DMABUF_SYSFS_STATS
+ /**
+ * @sysfs_entry:
+ *
+ * For exposing information about this buffer in sysfs. See also
+ * `DMA-BUF statistics`_ for the uapi this enables.
+ */
+ struct dma_buf_sysfs_entry {
+ struct kobject kobj;
+ struct dma_buf *dmabuf;
+ } *sysfs_entry;
+#endif
+};
+
+/**
+ * struct dma_buf_attach_ops - importer operations for an attachment
+ *
+ * Attachment operations implemented by the importer.
+ */
+struct dma_buf_attach_ops {
+ /**
+ * @allow_peer2peer:
+ *
+ * If this is set to true the importer must be able to handle peer
+ * resources without struct pages.
+ */
+ bool allow_peer2peer;
+
+ /**
+ * @move_notify: [optional] notification that the DMA-buf is moving
+ *
+ * If this callback is provided the framework can avoid pinning the
+ * backing store while mappings exists.
+ *
+ * This callback is called with the lock of the reservation object
+ * associated with the dma_buf held and the mapping function must be
+ * called with this lock held as well. This makes sure that no mapping
+ * is created concurrently with an ongoing move operation.
+ *
+ * Mappings stay valid and are not directly affected by this callback.
+ * But the DMA-buf can now be in a different physical location, so all
+ * mappings should be destroyed and re-created as soon as possible.
+ *
+ * New mappings can be created after this callback returns, and will
+ * point to the new location of the DMA-buf.
+ */
+ void (*move_notify)(struct dma_buf_attachment *attach);
};
/**
* struct dma_buf_attachment - holds device-buffer attachment data
* @dmabuf: buffer for this attachment.
* @dev: device attached to the buffer.
- * @node: list of dma_buf_attachment.
+ * @node: list of dma_buf_attachment, protected by dma_resv lock of the dmabuf.
+ * @sgt: cached mapping.
+ * @dir: direction of cached mapping.
+ * @peer2peer: true if the importer can handle peer resources without pages.
* @priv: exporter specific attachment data.
+ * @importer_ops: importer operations for this attachment, if provided
+ * dma_buf_map/unmap_attachment() must be called with the dma_resv lock held.
+ * @importer_priv: importer specific attachment data.
*
* This structure holds the attachment information between the dma_buf buffer
* and its user device(s). The list contains one attachment struct per device
@@ -325,6 +509,11 @@ struct dma_buf_attachment {
struct dma_buf *dmabuf;
struct device *dev;
struct list_head node;
+ struct sg_table *sgt;
+ enum dma_data_direction dir;
+ bool peer2peer;
+ const struct dma_buf_attach_ops *importer_ops;
+ void *importer_priv;
void *priv;
};
@@ -333,7 +522,7 @@ struct dma_buf_attachment {
* @exp_name: name of the exporter - useful for debugging.
* @owner: pointer to exporter module - used for refcounting kernel module
* @ops: Attach allocator-defined dma buf ops to the new buffer
- * @size: Size of the buffer
+ * @size: Size of the buffer - invariant over the lifetime of the buffer
* @flags: mode flags for the file
* @resv: reservation-object, NULL to allocate default one
* @priv: Attach private data of allocator to this buffer
@@ -347,7 +536,7 @@ struct dma_buf_export_info {
const struct dma_buf_ops *ops;
size_t size;
int flags;
- struct reservation_object *resv;
+ struct dma_resv *resv;
void *priv;
};
@@ -376,10 +565,43 @@ static inline void get_dma_buf(struct dma_buf *dmabuf)
get_file(dmabuf->file);
}
+/**
+ * dma_buf_is_dynamic - check if a DMA-buf uses dynamic mappings.
+ * @dmabuf: the DMA-buf to check
+ *
+ * Returns true if a DMA-buf exporter wants to be called with the dma_resv
+ * locked for the map/unmap callbacks, false if it doesn't wants to be called
+ * with the lock held.
+ */
+static inline bool dma_buf_is_dynamic(struct dma_buf *dmabuf)
+{
+ return !!dmabuf->ops->pin;
+}
+
+/**
+ * dma_buf_attachment_is_dynamic - check if a DMA-buf attachment uses dynamic
+ * mappings
+ * @attach: the DMA-buf attachment to check
+ *
+ * Returns true if a DMA-buf importer wants to call the map/unmap functions with
+ * the dma_resv lock held.
+ */
+static inline bool
+dma_buf_attachment_is_dynamic(struct dma_buf_attachment *attach)
+{
+ return !!attach->importer_ops;
+}
+
struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf,
- struct device *dev);
+ struct device *dev);
+struct dma_buf_attachment *
+dma_buf_dynamic_attach(struct dma_buf *dmabuf, struct device *dev,
+ const struct dma_buf_attach_ops *importer_ops,
+ void *importer_priv);
void dma_buf_detach(struct dma_buf *dmabuf,
- struct dma_buf_attachment *dmabuf_attach);
+ struct dma_buf_attachment *attach);
+int dma_buf_pin(struct dma_buf_attachment *attach);
+void dma_buf_unpin(struct dma_buf_attachment *attach);
struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info);
@@ -391,17 +613,22 @@ struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *,
enum dma_data_direction);
void dma_buf_unmap_attachment(struct dma_buf_attachment *, struct sg_table *,
enum dma_data_direction);
+void dma_buf_move_notify(struct dma_buf *dma_buf);
int dma_buf_begin_cpu_access(struct dma_buf *dma_buf,
enum dma_data_direction dir);
int dma_buf_end_cpu_access(struct dma_buf *dma_buf,
enum dma_data_direction dir);
-void *dma_buf_kmap_atomic(struct dma_buf *, unsigned long);
-void dma_buf_kunmap_atomic(struct dma_buf *, unsigned long, void *);
-void *dma_buf_kmap(struct dma_buf *, unsigned long);
-void dma_buf_kunmap(struct dma_buf *, unsigned long, void *);
+struct sg_table *
+dma_buf_map_attachment_unlocked(struct dma_buf_attachment *attach,
+ enum dma_data_direction direction);
+void dma_buf_unmap_attachment_unlocked(struct dma_buf_attachment *attach,
+ struct sg_table *sg_table,
+ enum dma_data_direction direction);
int dma_buf_mmap(struct dma_buf *, struct vm_area_struct *,
unsigned long);
-void *dma_buf_vmap(struct dma_buf *);
-void dma_buf_vunmap(struct dma_buf *, void *vaddr);
+int dma_buf_vmap(struct dma_buf *dmabuf, struct iosys_map *map);
+void dma_buf_vunmap(struct dma_buf *dmabuf, struct iosys_map *map);
+int dma_buf_vmap_unlocked(struct dma_buf *dmabuf, struct iosys_map *map);
+void dma_buf_vunmap_unlocked(struct dma_buf *dmabuf, struct iosys_map *map);
#endif /* __DMA_BUF_H__ */
diff --git a/include/linux/dma-contiguous.h b/include/linux/dma-contiguous.h
deleted file mode 100644
index b67bf6ac907d..000000000000
--- a/include/linux/dma-contiguous.h
+++ /dev/null
@@ -1,164 +0,0 @@
-#ifndef __LINUX_CMA_H
-#define __LINUX_CMA_H
-
-/*
- * Contiguous Memory Allocator for DMA mapping framework
- * Copyright (c) 2010-2011 by Samsung Electronics.
- * Written by:
- * Marek Szyprowski <m.szyprowski@samsung.com>
- * Michal Nazarewicz <mina86@mina86.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation; either version 2 of the
- * License or (at your optional) any later version of the license.
- */
-
-/*
- * Contiguous Memory Allocator
- *
- * The Contiguous Memory Allocator (CMA) makes it possible to
- * allocate big contiguous chunks of memory after the system has
- * booted.
- *
- * Why is it needed?
- *
- * Various devices on embedded systems have no scatter-getter and/or
- * IO map support and require contiguous blocks of memory to
- * operate. They include devices such as cameras, hardware video
- * coders, etc.
- *
- * Such devices often require big memory buffers (a full HD frame
- * is, for instance, more then 2 mega pixels large, i.e. more than 6
- * MB of memory), which makes mechanisms such as kmalloc() or
- * alloc_page() ineffective.
- *
- * At the same time, a solution where a big memory region is
- * reserved for a device is suboptimal since often more memory is
- * reserved then strictly required and, moreover, the memory is
- * inaccessible to page system even if device drivers don't use it.
- *
- * CMA tries to solve this issue by operating on memory regions
- * where only movable pages can be allocated from. This way, kernel
- * can use the memory for pagecache and when device driver requests
- * it, allocated pages can be migrated.
- *
- * Driver usage
- *
- * CMA should not be used by the device drivers directly. It is
- * only a helper framework for dma-mapping subsystem.
- *
- * For more information, see kernel-docs in drivers/base/dma-contiguous.c
- */
-
-#ifdef __KERNEL__
-
-#include <linux/device.h>
-
-struct cma;
-struct page;
-
-#ifdef CONFIG_DMA_CMA
-
-extern struct cma *dma_contiguous_default_area;
-
-static inline struct cma *dev_get_cma_area(struct device *dev)
-{
- if (dev && dev->cma_area)
- return dev->cma_area;
- return dma_contiguous_default_area;
-}
-
-static inline void dev_set_cma_area(struct device *dev, struct cma *cma)
-{
- if (dev)
- dev->cma_area = cma;
-}
-
-static inline void dma_contiguous_set_default(struct cma *cma)
-{
- dma_contiguous_default_area = cma;
-}
-
-void dma_contiguous_reserve(phys_addr_t addr_limit);
-
-int __init dma_contiguous_reserve_area(phys_addr_t size, phys_addr_t base,
- phys_addr_t limit, struct cma **res_cma,
- bool fixed);
-
-/**
- * dma_declare_contiguous() - reserve area for contiguous memory handling
- * for particular device
- * @dev: Pointer to device structure.
- * @size: Size of the reserved memory.
- * @base: Start address of the reserved memory (optional, 0 for any).
- * @limit: End address of the reserved memory (optional, 0 for any).
- *
- * This function reserves memory for specified device. It should be
- * called by board specific code when early allocator (memblock or bootmem)
- * is still activate.
- */
-
-static inline int dma_declare_contiguous(struct device *dev, phys_addr_t size,
- phys_addr_t base, phys_addr_t limit)
-{
- struct cma *cma;
- int ret;
- ret = dma_contiguous_reserve_area(size, base, limit, &cma, true);
- if (ret == 0)
- dev_set_cma_area(dev, cma);
-
- return ret;
-}
-
-struct page *dma_alloc_from_contiguous(struct device *dev, size_t count,
- unsigned int order, gfp_t gfp_mask);
-bool dma_release_from_contiguous(struct device *dev, struct page *pages,
- int count);
-
-#else
-
-static inline struct cma *dev_get_cma_area(struct device *dev)
-{
- return NULL;
-}
-
-static inline void dev_set_cma_area(struct device *dev, struct cma *cma) { }
-
-static inline void dma_contiguous_set_default(struct cma *cma) { }
-
-static inline void dma_contiguous_reserve(phys_addr_t limit) { }
-
-static inline int dma_contiguous_reserve_area(phys_addr_t size, phys_addr_t base,
- phys_addr_t limit, struct cma **res_cma,
- bool fixed)
-{
- return -ENOSYS;
-}
-
-static inline
-int dma_declare_contiguous(struct device *dev, phys_addr_t size,
- phys_addr_t base, phys_addr_t limit)
-{
- return -ENOSYS;
-}
-
-static inline
-struct page *dma_alloc_from_contiguous(struct device *dev, size_t count,
- unsigned int order, gfp_t gfp_mask)
-{
- return NULL;
-}
-
-static inline
-bool dma_release_from_contiguous(struct device *dev, struct page *pages,
- int count)
-{
- return false;
-}
-
-#endif
-
-#endif
-
-#endif
diff --git a/include/linux/dma-debug.h b/include/linux/dma-debug.h
deleted file mode 100644
index c7d844f09c3a..000000000000
--- a/include/linux/dma-debug.h
+++ /dev/null
@@ -1,213 +0,0 @@
-/*
- * Copyright (C) 2008 Advanced Micro Devices, Inc.
- *
- * Author: Joerg Roedel <joerg.roedel@amd.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published
- * by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-
-#ifndef __DMA_DEBUG_H
-#define __DMA_DEBUG_H
-
-#include <linux/types.h>
-
-struct device;
-struct scatterlist;
-struct bus_type;
-
-#ifdef CONFIG_DMA_API_DEBUG
-
-extern void dma_debug_add_bus(struct bus_type *bus);
-
-extern void dma_debug_init(u32 num_entries);
-
-extern int dma_debug_resize_entries(u32 num_entries);
-
-extern void debug_dma_map_page(struct device *dev, struct page *page,
- size_t offset, size_t size,
- int direction, dma_addr_t dma_addr,
- bool map_single);
-
-extern void debug_dma_mapping_error(struct device *dev, dma_addr_t dma_addr);
-
-extern void debug_dma_unmap_page(struct device *dev, dma_addr_t addr,
- size_t size, int direction, bool map_single);
-
-extern void debug_dma_map_sg(struct device *dev, struct scatterlist *sg,
- int nents, int mapped_ents, int direction);
-
-extern void debug_dma_unmap_sg(struct device *dev, struct scatterlist *sglist,
- int nelems, int dir);
-
-extern void debug_dma_alloc_coherent(struct device *dev, size_t size,
- dma_addr_t dma_addr, void *virt);
-
-extern void debug_dma_free_coherent(struct device *dev, size_t size,
- void *virt, dma_addr_t addr);
-
-extern void debug_dma_map_resource(struct device *dev, phys_addr_t addr,
- size_t size, int direction,
- dma_addr_t dma_addr);
-
-extern void debug_dma_unmap_resource(struct device *dev, dma_addr_t dma_addr,
- size_t size, int direction);
-
-extern void debug_dma_sync_single_for_cpu(struct device *dev,
- dma_addr_t dma_handle, size_t size,
- int direction);
-
-extern void debug_dma_sync_single_for_device(struct device *dev,
- dma_addr_t dma_handle,
- size_t size, int direction);
-
-extern void debug_dma_sync_single_range_for_cpu(struct device *dev,
- dma_addr_t dma_handle,
- unsigned long offset,
- size_t size,
- int direction);
-
-extern void debug_dma_sync_single_range_for_device(struct device *dev,
- dma_addr_t dma_handle,
- unsigned long offset,
- size_t size, int direction);
-
-extern void debug_dma_sync_sg_for_cpu(struct device *dev,
- struct scatterlist *sg,
- int nelems, int direction);
-
-extern void debug_dma_sync_sg_for_device(struct device *dev,
- struct scatterlist *sg,
- int nelems, int direction);
-
-extern void debug_dma_dump_mappings(struct device *dev);
-
-extern void debug_dma_assert_idle(struct page *page);
-
-#else /* CONFIG_DMA_API_DEBUG */
-
-static inline void dma_debug_add_bus(struct bus_type *bus)
-{
-}
-
-static inline void dma_debug_init(u32 num_entries)
-{
-}
-
-static inline int dma_debug_resize_entries(u32 num_entries)
-{
- return 0;
-}
-
-static inline void debug_dma_map_page(struct device *dev, struct page *page,
- size_t offset, size_t size,
- int direction, dma_addr_t dma_addr,
- bool map_single)
-{
-}
-
-static inline void debug_dma_mapping_error(struct device *dev,
- dma_addr_t dma_addr)
-{
-}
-
-static inline void debug_dma_unmap_page(struct device *dev, dma_addr_t addr,
- size_t size, int direction,
- bool map_single)
-{
-}
-
-static inline void debug_dma_map_sg(struct device *dev, struct scatterlist *sg,
- int nents, int mapped_ents, int direction)
-{
-}
-
-static inline void debug_dma_unmap_sg(struct device *dev,
- struct scatterlist *sglist,
- int nelems, int dir)
-{
-}
-
-static inline void debug_dma_alloc_coherent(struct device *dev, size_t size,
- dma_addr_t dma_addr, void *virt)
-{
-}
-
-static inline void debug_dma_free_coherent(struct device *dev, size_t size,
- void *virt, dma_addr_t addr)
-{
-}
-
-static inline void debug_dma_map_resource(struct device *dev, phys_addr_t addr,
- size_t size, int direction,
- dma_addr_t dma_addr)
-{
-}
-
-static inline void debug_dma_unmap_resource(struct device *dev,
- dma_addr_t dma_addr, size_t size,
- int direction)
-{
-}
-
-static inline void debug_dma_sync_single_for_cpu(struct device *dev,
- dma_addr_t dma_handle,
- size_t size, int direction)
-{
-}
-
-static inline void debug_dma_sync_single_for_device(struct device *dev,
- dma_addr_t dma_handle,
- size_t size, int direction)
-{
-}
-
-static inline void debug_dma_sync_single_range_for_cpu(struct device *dev,
- dma_addr_t dma_handle,
- unsigned long offset,
- size_t size,
- int direction)
-{
-}
-
-static inline void debug_dma_sync_single_range_for_device(struct device *dev,
- dma_addr_t dma_handle,
- unsigned long offset,
- size_t size,
- int direction)
-{
-}
-
-static inline void debug_dma_sync_sg_for_cpu(struct device *dev,
- struct scatterlist *sg,
- int nelems, int direction)
-{
-}
-
-static inline void debug_dma_sync_sg_for_device(struct device *dev,
- struct scatterlist *sg,
- int nelems, int direction)
-{
-}
-
-static inline void debug_dma_dump_mappings(struct device *dev)
-{
-}
-
-static inline void debug_dma_assert_idle(struct page *page)
-{
-}
-
-#endif /* CONFIG_DMA_API_DEBUG */
-
-#endif /* __DMA_DEBUG_H */
diff --git a/include/linux/dma-direct.h b/include/linux/dma-direct.h
new file mode 100644
index 000000000000..18aade195884
--- /dev/null
+++ b/include/linux/dma-direct.h
@@ -0,0 +1,127 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Internals of the DMA direct mapping implementation. Only for use by the
+ * DMA mapping code and IOMMU drivers.
+ */
+#ifndef _LINUX_DMA_DIRECT_H
+#define _LINUX_DMA_DIRECT_H 1
+
+#include <linux/dma-mapping.h>
+#include <linux/dma-map-ops.h>
+#include <linux/memblock.h> /* for min_low_pfn */
+#include <linux/mem_encrypt.h>
+#include <linux/swiotlb.h>
+
+extern unsigned int zone_dma_bits;
+
+/*
+ * Record the mapping of CPU physical to DMA addresses for a given region.
+ */
+struct bus_dma_region {
+ phys_addr_t cpu_start;
+ dma_addr_t dma_start;
+ u64 size;
+ u64 offset;
+};
+
+static inline dma_addr_t translate_phys_to_dma(struct device *dev,
+ phys_addr_t paddr)
+{
+ const struct bus_dma_region *m;
+
+ for (m = dev->dma_range_map; m->size; m++)
+ if (paddr >= m->cpu_start && paddr - m->cpu_start < m->size)
+ return (dma_addr_t)paddr - m->offset;
+
+ /* make sure dma_capable fails when no translation is available */
+ return DMA_MAPPING_ERROR;
+}
+
+static inline phys_addr_t translate_dma_to_phys(struct device *dev,
+ dma_addr_t dma_addr)
+{
+ const struct bus_dma_region *m;
+
+ for (m = dev->dma_range_map; m->size; m++)
+ if (dma_addr >= m->dma_start && dma_addr - m->dma_start < m->size)
+ return (phys_addr_t)dma_addr + m->offset;
+
+ return (phys_addr_t)-1;
+}
+
+#ifdef CONFIG_ARCH_HAS_PHYS_TO_DMA
+#include <asm/dma-direct.h>
+#ifndef phys_to_dma_unencrypted
+#define phys_to_dma_unencrypted phys_to_dma
+#endif
+#else
+static inline dma_addr_t phys_to_dma_unencrypted(struct device *dev,
+ phys_addr_t paddr)
+{
+ if (dev->dma_range_map)
+ return translate_phys_to_dma(dev, paddr);
+ return paddr;
+}
+
+/*
+ * If memory encryption is supported, phys_to_dma will set the memory encryption
+ * bit in the DMA address, and dma_to_phys will clear it.
+ * phys_to_dma_unencrypted is for use on special unencrypted memory like swiotlb
+ * buffers.
+ */
+static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
+{
+ return __sme_set(phys_to_dma_unencrypted(dev, paddr));
+}
+
+static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t dma_addr)
+{
+ phys_addr_t paddr;
+
+ if (dev->dma_range_map)
+ paddr = translate_dma_to_phys(dev, dma_addr);
+ else
+ paddr = dma_addr;
+
+ return __sme_clr(paddr);
+}
+#endif /* !CONFIG_ARCH_HAS_PHYS_TO_DMA */
+
+#ifdef CONFIG_ARCH_HAS_FORCE_DMA_UNENCRYPTED
+bool force_dma_unencrypted(struct device *dev);
+#else
+static inline bool force_dma_unencrypted(struct device *dev)
+{
+ return false;
+}
+#endif /* CONFIG_ARCH_HAS_FORCE_DMA_UNENCRYPTED */
+
+static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size,
+ bool is_ram)
+{
+ dma_addr_t end = addr + size - 1;
+
+ if (addr == DMA_MAPPING_ERROR)
+ return false;
+ if (is_ram && !IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT) &&
+ min(addr, end) < phys_to_dma(dev, PFN_PHYS(min_low_pfn)))
+ return false;
+
+ return end <= min_not_zero(*dev->dma_mask, dev->bus_dma_limit);
+}
+
+u64 dma_direct_get_required_mask(struct device *dev);
+void *dma_direct_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
+ gfp_t gfp, unsigned long attrs);
+void dma_direct_free(struct device *dev, size_t size, void *cpu_addr,
+ dma_addr_t dma_addr, unsigned long attrs);
+struct page *dma_direct_alloc_pages(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, enum dma_data_direction dir, gfp_t gfp);
+void dma_direct_free_pages(struct device *dev, size_t size,
+ struct page *page, dma_addr_t dma_addr,
+ enum dma_data_direction dir);
+int dma_direct_supported(struct device *dev, u64 mask);
+dma_addr_t dma_direct_map_resource(struct device *dev, phys_addr_t paddr,
+ size_t size, enum dma_data_direction dir, unsigned long attrs);
+
+#endif /* _LINUX_DMA_DIRECT_H */
diff --git a/include/linux/dma-direction.h b/include/linux/dma-direction.h
index 95b6a82f5951..a2fe4571bc92 100644
--- a/include/linux/dma-direction.h
+++ b/include/linux/dma-direction.h
@@ -1,13 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_DMA_DIRECTION_H
#define _LINUX_DMA_DIRECTION_H
-/*
- * These definitions mirror those in pci.h, so they can be used
- * interchangeably with their PCI_ counterparts.
- */
+
enum dma_data_direction {
DMA_BIDIRECTIONAL = 0,
DMA_TO_DEVICE = 1,
DMA_FROM_DEVICE = 2,
DMA_NONE = 3,
};
-#endif
+
+static inline int valid_dma_direction(enum dma_data_direction dir)
+{
+ return dir == DMA_BIDIRECTIONAL || dir == DMA_TO_DEVICE ||
+ dir == DMA_FROM_DEVICE;
+}
+
+#endif /* _LINUX_DMA_DIRECTION_H */
diff --git a/include/linux/dma-fence-array.h b/include/linux/dma-fence-array.h
index 332a5420243c..ec7f25def392 100644
--- a/include/linux/dma-fence-array.h
+++ b/include/linux/dma-fence-array.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* fence-array: aggregates fence to be waited together
*
@@ -6,21 +7,13 @@
* Authors:
* Gustavo Padovan <gustavo@padovan.org>
* Christian König <christian.koenig@amd.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
*/
#ifndef __LINUX_DMA_FENCE_ARRAY_H
#define __LINUX_DMA_FENCE_ARRAY_H
#include <linux/dma-fence.h>
+#include <linux/irq_work.h>
/**
* struct dma_fence_array_cb - callback helper for fence array
@@ -39,6 +32,7 @@ struct dma_fence_array_cb {
* @num_fences: number of fences in the array
* @num_pending: fences in the array still pending
* @fences: array of the fences
+ * @work: internal irq_work function
*/
struct dma_fence_array {
struct dma_fence base;
@@ -47,20 +41,9 @@ struct dma_fence_array {
unsigned num_fences;
atomic_t num_pending;
struct dma_fence **fences;
-};
-
-extern const struct dma_fence_ops dma_fence_array_ops;
-/**
- * dma_fence_is_array - check if a fence is from the array subsclass
- * @fence: fence to test
- *
- * Return true if it is a dma_fence_array and false otherwise.
- */
-static inline bool dma_fence_is_array(struct dma_fence *fence)
-{
- return fence->ops == &dma_fence_array_ops;
-}
+ struct irq_work work;
+};
/**
* to_dma_fence_array - cast a fence to a dma_fence_array
@@ -72,12 +55,27 @@ static inline bool dma_fence_is_array(struct dma_fence *fence)
static inline struct dma_fence_array *
to_dma_fence_array(struct dma_fence *fence)
{
- if (fence->ops != &dma_fence_array_ops)
+ if (!fence || !dma_fence_is_array(fence))
return NULL;
return container_of(fence, struct dma_fence_array, base);
}
+/**
+ * dma_fence_array_for_each - iterate over all fences in array
+ * @fence: current fence
+ * @index: index into the array
+ * @head: potential dma_fence_array object
+ *
+ * Test if @array is a dma_fence_array object and if yes iterate over all fences
+ * in the array. If not just iterate over the fence in @array itself.
+ *
+ * For a deep dive iterator see dma_fence_unwrap_for_each().
+ */
+#define dma_fence_array_for_each(fence, index, head) \
+ for (index = 0, fence = dma_fence_array_first(head); fence; \
+ ++(index), fence = dma_fence_array_next(head, index))
+
struct dma_fence_array *dma_fence_array_create(int num_fences,
struct dma_fence **fences,
u64 context, unsigned seqno,
@@ -85,4 +83,8 @@ struct dma_fence_array *dma_fence_array_create(int num_fences,
bool dma_fence_match_context(struct dma_fence *fence, u64 context);
+struct dma_fence *dma_fence_array_first(struct dma_fence *head);
+struct dma_fence *dma_fence_array_next(struct dma_fence *head,
+ unsigned int index);
+
#endif /* __LINUX_DMA_FENCE_ARRAY_H */
diff --git a/include/linux/dma-fence-chain.h b/include/linux/dma-fence-chain.h
new file mode 100644
index 000000000000..4bdf0b96da28
--- /dev/null
+++ b/include/linux/dma-fence-chain.h
@@ -0,0 +1,129 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * fence-chain: chain fences together in a timeline
+ *
+ * Copyright (C) 2018 Advanced Micro Devices, Inc.
+ * Authors:
+ * Christian König <christian.koenig@amd.com>
+ */
+
+#ifndef __LINUX_DMA_FENCE_CHAIN_H
+#define __LINUX_DMA_FENCE_CHAIN_H
+
+#include <linux/dma-fence.h>
+#include <linux/irq_work.h>
+#include <linux/slab.h>
+
+/**
+ * struct dma_fence_chain - fence to represent an node of a fence chain
+ * @base: fence base class
+ * @prev: previous fence of the chain
+ * @prev_seqno: original previous seqno before garbage collection
+ * @fence: encapsulated fence
+ * @lock: spinlock for fence handling
+ */
+struct dma_fence_chain {
+ struct dma_fence base;
+ struct dma_fence __rcu *prev;
+ u64 prev_seqno;
+ struct dma_fence *fence;
+ union {
+ /**
+ * @cb: callback for signaling
+ *
+ * This is used to add the callback for signaling the
+ * complection of the fence chain. Never used at the same time
+ * as the irq work.
+ */
+ struct dma_fence_cb cb;
+
+ /**
+ * @work: irq work item for signaling
+ *
+ * Irq work structure to allow us to add the callback without
+ * running into lock inversion. Never used at the same time as
+ * the callback.
+ */
+ struct irq_work work;
+ };
+ spinlock_t lock;
+};
+
+
+/**
+ * to_dma_fence_chain - cast a fence to a dma_fence_chain
+ * @fence: fence to cast to a dma_fence_array
+ *
+ * Returns NULL if the fence is not a dma_fence_chain,
+ * or the dma_fence_chain otherwise.
+ */
+static inline struct dma_fence_chain *
+to_dma_fence_chain(struct dma_fence *fence)
+{
+ if (!fence || !dma_fence_is_chain(fence))
+ return NULL;
+
+ return container_of(fence, struct dma_fence_chain, base);
+}
+
+/**
+ * dma_fence_chain_contained - return the contained fence
+ * @fence: the fence to test
+ *
+ * If the fence is a dma_fence_chain the function returns the fence contained
+ * inside the chain object, otherwise it returns the fence itself.
+ */
+static inline struct dma_fence *
+dma_fence_chain_contained(struct dma_fence *fence)
+{
+ struct dma_fence_chain *chain = to_dma_fence_chain(fence);
+
+ return chain ? chain->fence : fence;
+}
+
+/**
+ * dma_fence_chain_alloc
+ *
+ * Returns a new struct dma_fence_chain object or NULL on failure.
+ */
+static inline struct dma_fence_chain *dma_fence_chain_alloc(void)
+{
+ return kmalloc(sizeof(struct dma_fence_chain), GFP_KERNEL);
+};
+
+/**
+ * dma_fence_chain_free
+ * @chain: chain node to free
+ *
+ * Frees up an allocated but not used struct dma_fence_chain object. This
+ * doesn't need an RCU grace period since the fence was never initialized nor
+ * published. After dma_fence_chain_init() has been called the fence must be
+ * released by calling dma_fence_put(), and not through this function.
+ */
+static inline void dma_fence_chain_free(struct dma_fence_chain *chain)
+{
+ kfree(chain);
+};
+
+/**
+ * dma_fence_chain_for_each - iterate over all fences in chain
+ * @iter: current fence
+ * @head: starting point
+ *
+ * Iterate over all fences in the chain. We keep a reference to the current
+ * fence while inside the loop which must be dropped when breaking out.
+ *
+ * For a deep dive iterator see dma_fence_unwrap_for_each().
+ */
+#define dma_fence_chain_for_each(iter, head) \
+ for (iter = dma_fence_get(head); iter; \
+ iter = dma_fence_chain_walk(iter))
+
+struct dma_fence *dma_fence_chain_walk(struct dma_fence *fence);
+int dma_fence_chain_find_seqno(struct dma_fence **pfence, uint64_t seqno);
+void dma_fence_chain_init(struct dma_fence_chain *chain,
+ struct dma_fence *prev,
+ struct dma_fence *fence,
+ uint64_t seqno);
+
+#endif /* __LINUX_DMA_FENCE_CHAIN_H */
diff --git a/include/linux/dma-fence-unwrap.h b/include/linux/dma-fence-unwrap.h
new file mode 100644
index 000000000000..66b1e56fbb81
--- /dev/null
+++ b/include/linux/dma-fence-unwrap.h
@@ -0,0 +1,75 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2022 Advanced Micro Devices, Inc.
+ * Authors:
+ * Christian König <christian.koenig@amd.com>
+ */
+
+#ifndef __LINUX_DMA_FENCE_UNWRAP_H
+#define __LINUX_DMA_FENCE_UNWRAP_H
+
+struct dma_fence;
+
+/**
+ * struct dma_fence_unwrap - cursor into the container structure
+ *
+ * Should be used with dma_fence_unwrap_for_each() iterator macro.
+ */
+struct dma_fence_unwrap {
+ /**
+ * @chain: potential dma_fence_chain, but can be other fence as well
+ */
+ struct dma_fence *chain;
+ /**
+ * @array: potential dma_fence_array, but can be other fence as well
+ */
+ struct dma_fence *array;
+ /**
+ * @index: last returned index if @array is really a dma_fence_array
+ */
+ unsigned int index;
+};
+
+struct dma_fence *dma_fence_unwrap_first(struct dma_fence *head,
+ struct dma_fence_unwrap *cursor);
+struct dma_fence *dma_fence_unwrap_next(struct dma_fence_unwrap *cursor);
+
+/**
+ * dma_fence_unwrap_for_each - iterate over all fences in containers
+ * @fence: current fence
+ * @cursor: current position inside the containers
+ * @head: starting point for the iterator
+ *
+ * Unwrap dma_fence_chain and dma_fence_array containers and deep dive into all
+ * potential fences in them. If @head is just a normal fence only that one is
+ * returned.
+ */
+#define dma_fence_unwrap_for_each(fence, cursor, head) \
+ for (fence = dma_fence_unwrap_first(head, cursor); fence; \
+ fence = dma_fence_unwrap_next(cursor))
+
+struct dma_fence *__dma_fence_unwrap_merge(unsigned int num_fences,
+ struct dma_fence **fences,
+ struct dma_fence_unwrap *cursors);
+
+/**
+ * dma_fence_unwrap_merge - unwrap and merge fences
+ *
+ * All fences given as parameters are unwrapped and merged back together as flat
+ * dma_fence_array. Useful if multiple containers need to be merged together.
+ *
+ * Implemented as a macro to allocate the necessary arrays on the stack and
+ * account the stack frame size to the caller.
+ *
+ * Returns NULL on memory allocation failure, a dma_fence object representing
+ * all the given fences otherwise.
+ */
+#define dma_fence_unwrap_merge(...) \
+ ({ \
+ struct dma_fence *__f[] = { __VA_ARGS__ }; \
+ struct dma_fence_unwrap __c[ARRAY_SIZE(__f)]; \
+ \
+ __dma_fence_unwrap_merge(ARRAY_SIZE(__f), __f, __c); \
+ })
+
+#endif
diff --git a/include/linux/dma-fence.h b/include/linux/dma-fence.h
index a5195a7d6f77..d54b595a0fe0 100644
--- a/include/linux/dma-fence.h
+++ b/include/linux/dma-fence.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Fence mechanism for dma-buf to allow for asynchronous dma access
*
@@ -7,15 +8,6 @@
* Authors:
* Rob Clark <robdclark@gmail.com>
* Maarten Lankhorst <maarten.lankhorst@canonical.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
*/
#ifndef __LINUX_DMA_FENCE_H
@@ -55,6 +47,7 @@ struct dma_fence_cb;
* of the time.
*
* DMA_FENCE_FLAG_SIGNALED_BIT - fence is already signaled
+ * DMA_FENCE_FLAG_TIMESTAMP_BIT - timestamp recorded for fence signaling
* DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT - enable_signaling might have been called
* DMA_FENCE_FLAG_USER_BITS - start of the unused bits, can be used by the
* implementer of the fence for its own purposes. Can be used in different
@@ -70,20 +63,41 @@ struct dma_fence_cb;
* been completed, or never called at all.
*/
struct dma_fence {
- struct kref refcount;
- const struct dma_fence_ops *ops;
- struct rcu_head rcu;
- struct list_head cb_list;
spinlock_t *lock;
+ const struct dma_fence_ops *ops;
+ /*
+ * We clear the callback list on kref_put so that by the time we
+ * release the fence it is unused. No one should be adding to the
+ * cb_list that they don't themselves hold a reference for.
+ *
+ * The lifetime of the timestamp is similarly tied to both the
+ * rcu freelist and the cb_list. The timestamp is only set upon
+ * signaling while simultaneously notifying the cb_list. Ergo, we
+ * only use either the cb_list of timestamp. Upon destruction,
+ * neither are accessible, and so we can use the rcu. This means
+ * that the cb_list is *only* valid until the signal bit is set,
+ * and to read either you *must* hold a reference to the fence,
+ * and not just the rcu_read_lock.
+ *
+ * Listed in chronological order.
+ */
+ union {
+ struct list_head cb_list;
+ /* @cb_list replaced by @timestamp on dma_fence_signal() */
+ ktime_t timestamp;
+ /* @timestamp replaced by @rcu on dma_fence_release() */
+ struct rcu_head rcu;
+ };
u64 context;
- unsigned seqno;
+ u64 seqno;
unsigned long flags;
- ktime_t timestamp;
+ struct kref refcount;
int error;
};
enum dma_fence_flag_bits {
DMA_FENCE_FLAG_SIGNALED_BIT,
+ DMA_FENCE_FLAG_TIMESTAMP_BIT,
DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
DMA_FENCE_FLAG_USER_BITS, /* must always be last member */
};
@@ -92,11 +106,11 @@ typedef void (*dma_fence_func_t)(struct dma_fence *fence,
struct dma_fence_cb *cb);
/**
- * struct dma_fence_cb - callback for dma_fence_add_callback
- * @node: used by dma_fence_add_callback to append this struct to fence::cb_list
+ * struct dma_fence_cb - callback for dma_fence_add_callback()
+ * @node: used by dma_fence_add_callback() to append this struct to fence::cb_list
* @func: dma_fence_func_t to call
*
- * This struct will be initialized by dma_fence_add_callback, additional
+ * This struct will be initialized by dma_fence_add_callback(), additional
* data can be passed along by embedding dma_fence_cb in another struct.
*/
struct dma_fence_cb {
@@ -106,88 +120,175 @@ struct dma_fence_cb {
/**
* struct dma_fence_ops - operations implemented for fence
- * @get_driver_name: returns the driver name.
- * @get_timeline_name: return the name of the context this fence belongs to.
- * @enable_signaling: enable software signaling of fence.
- * @signaled: [optional] peek whether the fence is signaled, can be null.
- * @wait: custom wait implementation, or dma_fence_default_wait.
- * @release: [optional] called on destruction of fence, can be null
- * @fill_driver_data: [optional] callback to fill in free-form debug info
- * Returns amount of bytes filled, or -errno.
- * @fence_value_str: [optional] fills in the value of the fence as a string
- * @timeline_value_str: [optional] fills in the current value of the timeline
- * as a string
- *
- * Notes on enable_signaling:
- * For fence implementations that have the capability for hw->hw
- * signaling, they can implement this op to enable the necessary
- * irqs, or insert commands into cmdstream, etc. This is called
- * in the first wait() or add_callback() path to let the fence
- * implementation know that there is another driver waiting on
- * the signal (ie. hw->sw case).
*
- * This function can be called called from atomic context, but not
- * from irq context, so normal spinlocks can be used.
- *
- * A return value of false indicates the fence already passed,
- * or some failure occurred that made it impossible to enable
- * signaling. True indicates successful enabling.
- *
- * fence->error may be set in enable_signaling, but only when false is
- * returned.
- *
- * Calling dma_fence_signal before enable_signaling is called allows
- * for a tiny race window in which enable_signaling is called during,
- * before, or after dma_fence_signal. To fight this, it is recommended
- * that before enable_signaling returns true an extra reference is
- * taken on the fence, to be released when the fence is signaled.
- * This will mean dma_fence_signal will still be called twice, but
- * the second time will be a noop since it was already signaled.
- *
- * Notes on signaled:
- * May set fence->error if returning true.
- *
- * Notes on wait:
- * Must not be NULL, set to dma_fence_default_wait for default implementation.
- * the dma_fence_default_wait implementation should work for any fence, as long
- * as enable_signaling works correctly.
- *
- * Must return -ERESTARTSYS if the wait is intr = true and the wait was
- * interrupted, and remaining jiffies if fence has signaled, or 0 if wait
- * timed out. Can also return other error values on custom implementations,
- * which should be treated as if the fence is signaled. For example a hardware
- * lockup could be reported like that.
- *
- * Notes on release:
- * Can be NULL, this function allows additional commands to run on
- * destruction of the fence. Can be called from irq context.
- * If pointer is set to NULL, kfree will get called instead.
*/
-
struct dma_fence_ops {
+ /**
+ * @use_64bit_seqno:
+ *
+ * True if this dma_fence implementation uses 64bit seqno, false
+ * otherwise.
+ */
+ bool use_64bit_seqno;
+
+ /**
+ * @get_driver_name:
+ *
+ * Returns the driver name. This is a callback to allow drivers to
+ * compute the name at runtime, without having it to store permanently
+ * for each fence, or build a cache of some sort.
+ *
+ * This callback is mandatory.
+ */
const char * (*get_driver_name)(struct dma_fence *fence);
+
+ /**
+ * @get_timeline_name:
+ *
+ * Return the name of the context this fence belongs to. This is a
+ * callback to allow drivers to compute the name at runtime, without
+ * having it to store permanently for each fence, or build a cache of
+ * some sort.
+ *
+ * This callback is mandatory.
+ */
const char * (*get_timeline_name)(struct dma_fence *fence);
+
+ /**
+ * @enable_signaling:
+ *
+ * Enable software signaling of fence.
+ *
+ * For fence implementations that have the capability for hw->hw
+ * signaling, they can implement this op to enable the necessary
+ * interrupts, or insert commands into cmdstream, etc, to avoid these
+ * costly operations for the common case where only hw->hw
+ * synchronization is required. This is called in the first
+ * dma_fence_wait() or dma_fence_add_callback() path to let the fence
+ * implementation know that there is another driver waiting on the
+ * signal (ie. hw->sw case).
+ *
+ * This function can be called from atomic context, but not
+ * from irq context, so normal spinlocks can be used.
+ *
+ * A return value of false indicates the fence already passed,
+ * or some failure occurred that made it impossible to enable
+ * signaling. True indicates successful enabling.
+ *
+ * &dma_fence.error may be set in enable_signaling, but only when false
+ * is returned.
+ *
+ * Since many implementations can call dma_fence_signal() even when before
+ * @enable_signaling has been called there's a race window, where the
+ * dma_fence_signal() might result in the final fence reference being
+ * released and its memory freed. To avoid this, implementations of this
+ * callback should grab their own reference using dma_fence_get(), to be
+ * released when the fence is signalled (through e.g. the interrupt
+ * handler).
+ *
+ * This callback is optional. If this callback is not present, then the
+ * driver must always have signaling enabled.
+ */
bool (*enable_signaling)(struct dma_fence *fence);
+
+ /**
+ * @signaled:
+ *
+ * Peek whether the fence is signaled, as a fastpath optimization for
+ * e.g. dma_fence_wait() or dma_fence_add_callback(). Note that this
+ * callback does not need to make any guarantees beyond that a fence
+ * once indicates as signalled must always return true from this
+ * callback. This callback may return false even if the fence has
+ * completed already, in this case information hasn't propogated throug
+ * the system yet. See also dma_fence_is_signaled().
+ *
+ * May set &dma_fence.error if returning true.
+ *
+ * This callback is optional.
+ */
bool (*signaled)(struct dma_fence *fence);
+
+ /**
+ * @wait:
+ *
+ * Custom wait implementation, defaults to dma_fence_default_wait() if
+ * not set.
+ *
+ * Deprecated and should not be used by new implementations. Only used
+ * by existing implementations which need special handling for their
+ * hardware reset procedure.
+ *
+ * Must return -ERESTARTSYS if the wait is intr = true and the wait was
+ * interrupted, and remaining jiffies if fence has signaled, or 0 if wait
+ * timed out. Can also return other error values on custom implementations,
+ * which should be treated as if the fence is signaled. For example a hardware
+ * lockup could be reported like that.
+ */
signed long (*wait)(struct dma_fence *fence,
bool intr, signed long timeout);
+
+ /**
+ * @release:
+ *
+ * Called on destruction of fence to release additional resources.
+ * Can be called from irq context. This callback is optional. If it is
+ * NULL, then dma_fence_free() is instead called as the default
+ * implementation.
+ */
void (*release)(struct dma_fence *fence);
- int (*fill_driver_data)(struct dma_fence *fence, void *data, int size);
+ /**
+ * @fence_value_str:
+ *
+ * Callback to fill in free-form debug info specific to this fence, like
+ * the sequence number.
+ *
+ * This callback is optional.
+ */
void (*fence_value_str)(struct dma_fence *fence, char *str, int size);
+
+ /**
+ * @timeline_value_str:
+ *
+ * Fills in the current value of the timeline as a string, like the
+ * sequence number. Note that the specific fence passed to this function
+ * should not matter, drivers should only use it to look up the
+ * corresponding timeline structures.
+ */
void (*timeline_value_str)(struct dma_fence *fence,
char *str, int size);
+
+ /**
+ * @set_deadline:
+ *
+ * Callback to allow a fence waiter to inform the fence signaler of
+ * an upcoming deadline, such as vblank, by which point the waiter
+ * would prefer the fence to be signaled by. This is intended to
+ * give feedback to the fence signaler to aid in power management
+ * decisions, such as boosting GPU frequency.
+ *
+ * This is called without &dma_fence.lock held, it can be called
+ * multiple times and from any context. Locking is up to the callee
+ * if it has some state to manage. If multiple deadlines are set,
+ * the expectation is to track the soonest one. If the deadline is
+ * before the current time, it should be interpreted as an immediate
+ * deadline.
+ *
+ * This callback is optional.
+ */
+ void (*set_deadline)(struct dma_fence *fence, ktime_t deadline);
};
void dma_fence_init(struct dma_fence *fence, const struct dma_fence_ops *ops,
- spinlock_t *lock, u64 context, unsigned seqno);
+ spinlock_t *lock, u64 context, u64 seqno);
void dma_fence_release(struct kref *kref);
void dma_fence_free(struct dma_fence *fence);
+void dma_fence_describe(struct dma_fence *fence, struct seq_file *seq);
/**
* dma_fence_put - decreases refcount of the fence
- * @fence: [in] fence to reduce refcount of
+ * @fence: fence to reduce refcount of
*/
static inline void dma_fence_put(struct dma_fence *fence)
{
@@ -197,7 +298,7 @@ static inline void dma_fence_put(struct dma_fence *fence)
/**
* dma_fence_get - increases refcount of the fence
- * @fence: [in] fence to increase refcount of
+ * @fence: fence to increase refcount of
*
* Returns the same fence, with refcount increased by 1.
*/
@@ -209,9 +310,9 @@ static inline struct dma_fence *dma_fence_get(struct dma_fence *fence)
}
/**
- * dma_fence_get_rcu - get a fence from a reservation_object_list with
+ * dma_fence_get_rcu - get a fence from a dma_resv_list with
* rcu read lock
- * @fence: [in] fence to increase refcount of
+ * @fence: fence to increase refcount of
*
* Function returns NULL if no refcount could be obtained, or the fence.
*/
@@ -225,7 +326,7 @@ static inline struct dma_fence *dma_fence_get_rcu(struct dma_fence *fence)
/**
* dma_fence_get_rcu_safe - acquire a reference to an RCU tracked fence
- * @fencep: [in] pointer to fence to increase refcount of
+ * @fencep: pointer to fence to increase refcount of
*
* Function returns NULL if no refcount could be obtained, or the fence.
* This function handles acquiring a reference to a fence that may be
@@ -233,22 +334,25 @@ static inline struct dma_fence *dma_fence_get_rcu(struct dma_fence *fence)
* so long as the caller is using RCU on the pointer to the fence.
*
* An alternative mechanism is to employ a seqlock to protect a bunch of
- * fences, such as used by struct reservation_object. When using a seqlock,
+ * fences, such as used by struct dma_resv. When using a seqlock,
* the seqlock must be taken before and checked after a reference to the
* fence is acquired (as shown here).
*
* The caller is required to hold the RCU read lock.
*/
static inline struct dma_fence *
-dma_fence_get_rcu_safe(struct dma_fence * __rcu *fencep)
+dma_fence_get_rcu_safe(struct dma_fence __rcu **fencep)
{
do {
struct dma_fence *fence;
fence = rcu_dereference(*fencep);
- if (!fence || !dma_fence_get_rcu(fence))
+ if (!fence)
return NULL;
+ if (!dma_fence_get_rcu(fence))
+ continue;
+
/* The atomic_inc_not_zero() inside dma_fence_get_rcu()
* provides a full memory barrier upon success (such as now).
* This is paired with the write barrier from assigning
@@ -270,8 +374,24 @@ dma_fence_get_rcu_safe(struct dma_fence * __rcu *fencep)
} while (1);
}
+#ifdef CONFIG_LOCKDEP
+bool dma_fence_begin_signalling(void);
+void dma_fence_end_signalling(bool cookie);
+void __dma_fence_might_wait(void);
+#else
+static inline bool dma_fence_begin_signalling(void)
+{
+ return true;
+}
+static inline void dma_fence_end_signalling(bool cookie) {}
+static inline void __dma_fence_might_wait(void) {}
+#endif
+
int dma_fence_signal(struct dma_fence *fence);
int dma_fence_signal_locked(struct dma_fence *fence);
+int dma_fence_signal_timestamp(struct dma_fence *fence, ktime_t timestamp);
+int dma_fence_signal_timestamp_locked(struct dma_fence *fence,
+ ktime_t timestamp);
signed long dma_fence_default_wait(struct dma_fence *fence,
bool intr, signed long timeout);
int dma_fence_add_callback(struct dma_fence *fence,
@@ -284,14 +404,16 @@ void dma_fence_enable_sw_signaling(struct dma_fence *fence);
/**
* dma_fence_is_signaled_locked - Return an indication if the fence
* is signaled yet.
- * @fence: [in] the fence to check
+ * @fence: the fence to check
*
* Returns true if the fence was already signaled, false if not. Since this
* function doesn't enable signaling, it is not guaranteed to ever return
- * true if dma_fence_add_callback, dma_fence_wait or
- * dma_fence_enable_sw_signaling haven't been called before.
+ * true if dma_fence_add_callback(), dma_fence_wait() or
+ * dma_fence_enable_sw_signaling() haven't been called before.
*
- * This function requires fence->lock to be held.
+ * This function requires &dma_fence.lock to be held.
+ *
+ * See also dma_fence_is_signaled().
*/
static inline bool
dma_fence_is_signaled_locked(struct dma_fence *fence)
@@ -309,17 +431,19 @@ dma_fence_is_signaled_locked(struct dma_fence *fence)
/**
* dma_fence_is_signaled - Return an indication if the fence is signaled yet.
- * @fence: [in] the fence to check
+ * @fence: the fence to check
*
* Returns true if the fence was already signaled, false if not. Since this
* function doesn't enable signaling, it is not guaranteed to ever return
- * true if dma_fence_add_callback, dma_fence_wait or
- * dma_fence_enable_sw_signaling haven't been called before.
+ * true if dma_fence_add_callback(), dma_fence_wait() or
+ * dma_fence_enable_sw_signaling() haven't been called before.
*
* It's recommended for seqno fences to call dma_fence_signal when the
* operation is complete, it makes it possible to prevent issues from
* wraparound between time of issue and time of use by checking the return
* value of this function before calling hardware-specific wait instructions.
+ *
+ * See also dma_fence_is_signaled_locked().
*/
static inline bool
dma_fence_is_signaled(struct dma_fence *fence)
@@ -336,9 +460,31 @@ dma_fence_is_signaled(struct dma_fence *fence)
}
/**
+ * __dma_fence_is_later - return if f1 is chronologically later than f2
+ * @f1: the first fence's seqno
+ * @f2: the second fence's seqno from the same context
+ * @ops: dma_fence_ops associated with the seqno
+ *
+ * Returns true if f1 is chronologically later than f2. Both fences must be
+ * from the same context, since a seqno is not common across contexts.
+ */
+static inline bool __dma_fence_is_later(u64 f1, u64 f2,
+ const struct dma_fence_ops *ops)
+{
+ /* This is for backward compatibility with drivers which can only handle
+ * 32bit sequence numbers. Use a 64bit compare when the driver says to
+ * do so.
+ */
+ if (ops->use_64bit_seqno)
+ return f1 > f2;
+
+ return (int)(lower_32_bits(f1) - lower_32_bits(f2)) > 0;
+}
+
+/**
* dma_fence_is_later - return if f1 is chronologically later than f2
- * @f1: [in] the first fence from the same context
- * @f2: [in] the second fence from the same context
+ * @f1: the first fence from the same context
+ * @f2: the second fence from the same context
*
* Returns true if f1 is chronologically later than f2. Both fences must be
* from the same context, since a seqno is not re-used across contexts.
@@ -349,13 +495,13 @@ static inline bool dma_fence_is_later(struct dma_fence *f1,
if (WARN_ON(f1->context != f2->context))
return false;
- return (int)(f1->seqno - f2->seqno) > 0;
+ return __dma_fence_is_later(f1->seqno, f2->seqno, f1->ops);
}
/**
* dma_fence_later - return the chronologically later fence
- * @f1: [in] the first fence from the same context
- * @f2: [in] the second fence from the same context
+ * @f1: the first fence from the same context
+ * @f2: the second fence from the same context
*
* Returns NULL if both fences are signaled, otherwise the fence that would be
* signaled last. Both fences must be from the same context, since a seqno is
@@ -380,7 +526,7 @@ static inline struct dma_fence *dma_fence_later(struct dma_fence *f1,
/**
* dma_fence_get_status_locked - returns the status upon completion
- * @fence: [in] the dma_fence to query
+ * @fence: the dma_fence to query
*
* Drivers can supply an optional error status condition before they signal
* the fence (to indicate whether the fence was completed due to an error
@@ -404,8 +550,8 @@ int dma_fence_get_status(struct dma_fence *fence);
/**
* dma_fence_set_error - flag an error condition on the fence
- * @fence: [in] the dma_fence
- * @error: [in] the error to store
+ * @fence: the dma_fence
+ * @error: the error to store
*
* Drivers can supply an optional error status condition before they signal
* the fence, to indicate that the fence was completed due to an error
@@ -416,8 +562,8 @@ int dma_fence_get_status(struct dma_fence *fence);
static inline void dma_fence_set_error(struct dma_fence *fence,
int error)
{
- BUG_ON(test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags));
- BUG_ON(error >= 0 || error < -MAX_ERRNO);
+ WARN_ON(test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags));
+ WARN_ON(error >= 0 || error < -MAX_ERRNO);
fence->error = error;
}
@@ -431,8 +577,8 @@ signed long dma_fence_wait_any_timeout(struct dma_fence **fences,
/**
* dma_fence_wait - sleep until the fence gets signaled
- * @fence: [in] the fence to wait on
- * @intr: [in] if true, do an interruptible wait
+ * @fence: the fence to wait on
+ * @intr: if true, do an interruptible wait
*
* This function will return -ERESTARTSYS if interrupted by a signal,
* or 0 if the fence was signaled. Other error values may be
@@ -441,6 +587,8 @@ signed long dma_fence_wait_any_timeout(struct dma_fence **fences,
* Performs a synchronous wait on this fence. It is assumed the caller
* directly or indirectly holds a reference to the fence, otherwise the
* fence might be freed before return, resulting in undefined behavior.
+ *
+ * See also dma_fence_wait_timeout() and dma_fence_wait_any_timeout().
*/
static inline signed long dma_fence_wait(struct dma_fence *fence, bool intr)
{
@@ -455,28 +603,48 @@ static inline signed long dma_fence_wait(struct dma_fence *fence, bool intr)
return ret < 0 ? ret : 0;
}
+void dma_fence_set_deadline(struct dma_fence *fence, ktime_t deadline);
+
+struct dma_fence *dma_fence_get_stub(void);
+struct dma_fence *dma_fence_allocate_private_stub(void);
u64 dma_fence_context_alloc(unsigned num);
-#define DMA_FENCE_TRACE(f, fmt, args...) \
- do { \
- struct dma_fence *__ff = (f); \
- if (IS_ENABLED(CONFIG_DMA_FENCE_TRACE)) \
- pr_info("f %llu#%u: " fmt, \
- __ff->context, __ff->seqno, ##args); \
- } while (0)
-
-#define DMA_FENCE_WARN(f, fmt, args...) \
- do { \
- struct dma_fence *__ff = (f); \
- pr_warn("f %llu#%u: " fmt, __ff->context, __ff->seqno, \
- ##args); \
- } while (0)
-
-#define DMA_FENCE_ERR(f, fmt, args...) \
- do { \
- struct dma_fence *__ff = (f); \
- pr_err("f %llu#%u: " fmt, __ff->context, __ff->seqno, \
- ##args); \
- } while (0)
+extern const struct dma_fence_ops dma_fence_array_ops;
+extern const struct dma_fence_ops dma_fence_chain_ops;
+
+/**
+ * dma_fence_is_array - check if a fence is from the array subclass
+ * @fence: the fence to test
+ *
+ * Return true if it is a dma_fence_array and false otherwise.
+ */
+static inline bool dma_fence_is_array(struct dma_fence *fence)
+{
+ return fence->ops == &dma_fence_array_ops;
+}
+
+/**
+ * dma_fence_is_chain - check if a fence is from the chain subclass
+ * @fence: the fence to test
+ *
+ * Return true if it is a dma_fence_chain and false otherwise.
+ */
+static inline bool dma_fence_is_chain(struct dma_fence *fence)
+{
+ return fence->ops == &dma_fence_chain_ops;
+}
+
+/**
+ * dma_fence_is_container - check if a fence is a container for other fences
+ * @fence: the fence to test
+ *
+ * Return true if this fence is a container for other fences, false otherwise.
+ * This is important since we can't build up large fence structure or otherwise
+ * we run into recursion during operation on those fences.
+ */
+static inline bool dma_fence_is_container(struct dma_fence *fence)
+{
+ return dma_fence_is_array(fence) || dma_fence_is_chain(fence);
+}
#endif /* __LINUX_DMA_FENCE_H */
diff --git a/include/linux/dma-heap.h b/include/linux/dma-heap.h
new file mode 100644
index 000000000000..0c05561cad6e
--- /dev/null
+++ b/include/linux/dma-heap.h
@@ -0,0 +1,68 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * DMABUF Heaps Allocation Infrastructure
+ *
+ * Copyright (C) 2011 Google, Inc.
+ * Copyright (C) 2019 Linaro Ltd.
+ */
+
+#ifndef _DMA_HEAPS_H
+#define _DMA_HEAPS_H
+
+#include <linux/cdev.h>
+#include <linux/types.h>
+
+struct dma_heap;
+
+/**
+ * struct dma_heap_ops - ops to operate on a given heap
+ * @allocate: allocate dmabuf and return struct dma_buf ptr
+ *
+ * allocate returns dmabuf on success, ERR_PTR(-errno) on error.
+ */
+struct dma_heap_ops {
+ struct dma_buf *(*allocate)(struct dma_heap *heap,
+ unsigned long len,
+ unsigned long fd_flags,
+ unsigned long heap_flags);
+};
+
+/**
+ * struct dma_heap_export_info - information needed to export a new dmabuf heap
+ * @name: used for debugging/device-node name
+ * @ops: ops struct for this heap
+ * @priv: heap exporter private data
+ *
+ * Information needed to export a new dmabuf heap.
+ */
+struct dma_heap_export_info {
+ const char *name;
+ const struct dma_heap_ops *ops;
+ void *priv;
+};
+
+/**
+ * dma_heap_get_drvdata() - get per-heap driver data
+ * @heap: DMA-Heap to retrieve private data for
+ *
+ * Returns:
+ * The per-heap data for the heap.
+ */
+void *dma_heap_get_drvdata(struct dma_heap *heap);
+
+/**
+ * dma_heap_get_name() - get heap name
+ * @heap: DMA-Heap to retrieve private data for
+ *
+ * Returns:
+ * The char* for the heap name.
+ */
+const char *dma_heap_get_name(struct dma_heap *heap);
+
+/**
+ * dma_heap_add - adds a heap to dmabuf heaps
+ * @exp_info: information needed to register this heap
+ */
+struct dma_heap *dma_heap_add(const struct dma_heap_export_info *exp_info);
+
+#endif /* _DMA_HEAPS_H */
diff --git a/include/linux/dma-iommu.h b/include/linux/dma-iommu.h
deleted file mode 100644
index 92f20832fd28..000000000000
--- a/include/linux/dma-iommu.h
+++ /dev/null
@@ -1,112 +0,0 @@
-/*
- * Copyright (C) 2014-2015 ARM Ltd.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- */
-#ifndef __DMA_IOMMU_H
-#define __DMA_IOMMU_H
-
-#ifdef __KERNEL__
-#include <asm/errno.h>
-
-#ifdef CONFIG_IOMMU_DMA
-#include <linux/dma-mapping.h>
-#include <linux/iommu.h>
-#include <linux/msi.h>
-
-int iommu_dma_init(void);
-
-/* Domain management interface for IOMMU drivers */
-int iommu_get_dma_cookie(struct iommu_domain *domain);
-int iommu_get_msi_cookie(struct iommu_domain *domain, dma_addr_t base);
-void iommu_put_dma_cookie(struct iommu_domain *domain);
-
-/* Setup call for arch DMA mapping code */
-int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base,
- u64 size, struct device *dev);
-
-/* General helpers for DMA-API <-> IOMMU-API interaction */
-int dma_info_to_prot(enum dma_data_direction dir, bool coherent,
- unsigned long attrs);
-
-/*
- * These implement the bulk of the relevant DMA mapping callbacks, but require
- * the arch code to take care of attributes and cache maintenance
- */
-struct page **iommu_dma_alloc(struct device *dev, size_t size, gfp_t gfp,
- unsigned long attrs, int prot, dma_addr_t *handle,
- void (*flush_page)(struct device *, const void *, phys_addr_t));
-void iommu_dma_free(struct device *dev, struct page **pages, size_t size,
- dma_addr_t *handle);
-
-int iommu_dma_mmap(struct page **pages, size_t size, struct vm_area_struct *vma);
-
-dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page,
- unsigned long offset, size_t size, int prot);
-int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg,
- int nents, int prot);
-
-/*
- * Arch code with no special attribute handling may use these
- * directly as DMA mapping callbacks for simplicity
- */
-void iommu_dma_unmap_page(struct device *dev, dma_addr_t handle, size_t size,
- enum dma_data_direction dir, unsigned long attrs);
-void iommu_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
- enum dma_data_direction dir, unsigned long attrs);
-dma_addr_t iommu_dma_map_resource(struct device *dev, phys_addr_t phys,
- size_t size, enum dma_data_direction dir, unsigned long attrs);
-void iommu_dma_unmap_resource(struct device *dev, dma_addr_t handle,
- size_t size, enum dma_data_direction dir, unsigned long attrs);
-int iommu_dma_mapping_error(struct device *dev, dma_addr_t dma_addr);
-
-/* The DMA API isn't _quite_ the whole story, though... */
-void iommu_dma_map_msi_msg(int irq, struct msi_msg *msg);
-void iommu_dma_get_resv_regions(struct device *dev, struct list_head *list);
-
-#else
-
-struct iommu_domain;
-struct msi_msg;
-struct device;
-
-static inline int iommu_dma_init(void)
-{
- return 0;
-}
-
-static inline int iommu_get_dma_cookie(struct iommu_domain *domain)
-{
- return -ENODEV;
-}
-
-static inline int iommu_get_msi_cookie(struct iommu_domain *domain, dma_addr_t base)
-{
- return -ENODEV;
-}
-
-static inline void iommu_put_dma_cookie(struct iommu_domain *domain)
-{
-}
-
-static inline void iommu_dma_map_msi_msg(int irq, struct msi_msg *msg)
-{
-}
-
-static inline void iommu_dma_get_resv_regions(struct device *dev, struct list_head *list)
-{
-}
-
-#endif /* CONFIG_IOMMU_DMA */
-#endif /* __KERNEL__ */
-#endif /* __DMA_IOMMU_H */
diff --git a/include/linux/dma-map-ops.h b/include/linux/dma-map-ops.h
new file mode 100644
index 000000000000..31f114f486c4
--- /dev/null
+++ b/include/linux/dma-map-ops.h
@@ -0,0 +1,451 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * This header is for implementations of dma_map_ops and related code.
+ * It should not be included in drivers just using the DMA API.
+ */
+#ifndef _LINUX_DMA_MAP_OPS_H
+#define _LINUX_DMA_MAP_OPS_H
+
+#include <linux/dma-mapping.h>
+#include <linux/pgtable.h>
+
+struct cma;
+
+/*
+ * Values for struct dma_map_ops.flags:
+ *
+ * DMA_F_PCI_P2PDMA_SUPPORTED: Indicates the dma_map_ops implementation can
+ * handle PCI P2PDMA pages in the map_sg/unmap_sg operation.
+ */
+#define DMA_F_PCI_P2PDMA_SUPPORTED (1 << 0)
+
+struct dma_map_ops {
+ unsigned int flags;
+
+ void *(*alloc)(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, gfp_t gfp,
+ unsigned long attrs);
+ void (*free)(struct device *dev, size_t size, void *vaddr,
+ dma_addr_t dma_handle, unsigned long attrs);
+ struct page *(*alloc_pages)(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, enum dma_data_direction dir,
+ gfp_t gfp);
+ void (*free_pages)(struct device *dev, size_t size, struct page *vaddr,
+ dma_addr_t dma_handle, enum dma_data_direction dir);
+ struct sg_table *(*alloc_noncontiguous)(struct device *dev, size_t size,
+ enum dma_data_direction dir, gfp_t gfp,
+ unsigned long attrs);
+ void (*free_noncontiguous)(struct device *dev, size_t size,
+ struct sg_table *sgt, enum dma_data_direction dir);
+ int (*mmap)(struct device *, struct vm_area_struct *,
+ void *, dma_addr_t, size_t, unsigned long attrs);
+
+ int (*get_sgtable)(struct device *dev, struct sg_table *sgt,
+ void *cpu_addr, dma_addr_t dma_addr, size_t size,
+ unsigned long attrs);
+
+ dma_addr_t (*map_page)(struct device *dev, struct page *page,
+ unsigned long offset, size_t size,
+ enum dma_data_direction dir, unsigned long attrs);
+ void (*unmap_page)(struct device *dev, dma_addr_t dma_handle,
+ size_t size, enum dma_data_direction dir,
+ unsigned long attrs);
+ /*
+ * map_sg should return a negative error code on error. See
+ * dma_map_sgtable() for a list of appropriate error codes
+ * and their meanings.
+ */
+ int (*map_sg)(struct device *dev, struct scatterlist *sg, int nents,
+ enum dma_data_direction dir, unsigned long attrs);
+ void (*unmap_sg)(struct device *dev, struct scatterlist *sg, int nents,
+ enum dma_data_direction dir, unsigned long attrs);
+ dma_addr_t (*map_resource)(struct device *dev, phys_addr_t phys_addr,
+ size_t size, enum dma_data_direction dir,
+ unsigned long attrs);
+ void (*unmap_resource)(struct device *dev, dma_addr_t dma_handle,
+ size_t size, enum dma_data_direction dir,
+ unsigned long attrs);
+ void (*sync_single_for_cpu)(struct device *dev, dma_addr_t dma_handle,
+ size_t size, enum dma_data_direction dir);
+ void (*sync_single_for_device)(struct device *dev,
+ dma_addr_t dma_handle, size_t size,
+ enum dma_data_direction dir);
+ void (*sync_sg_for_cpu)(struct device *dev, struct scatterlist *sg,
+ int nents, enum dma_data_direction dir);
+ void (*sync_sg_for_device)(struct device *dev, struct scatterlist *sg,
+ int nents, enum dma_data_direction dir);
+ void (*cache_sync)(struct device *dev, void *vaddr, size_t size,
+ enum dma_data_direction direction);
+ int (*dma_supported)(struct device *dev, u64 mask);
+ u64 (*get_required_mask)(struct device *dev);
+ size_t (*max_mapping_size)(struct device *dev);
+ size_t (*opt_mapping_size)(void);
+ unsigned long (*get_merge_boundary)(struct device *dev);
+};
+
+#ifdef CONFIG_DMA_OPS
+#include <asm/dma-mapping.h>
+
+static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
+{
+ if (dev->dma_ops)
+ return dev->dma_ops;
+ return get_arch_dma_ops();
+}
+
+static inline void set_dma_ops(struct device *dev,
+ const struct dma_map_ops *dma_ops)
+{
+ dev->dma_ops = dma_ops;
+}
+#else /* CONFIG_DMA_OPS */
+static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
+{
+ return NULL;
+}
+static inline void set_dma_ops(struct device *dev,
+ const struct dma_map_ops *dma_ops)
+{
+}
+#endif /* CONFIG_DMA_OPS */
+
+#ifdef CONFIG_DMA_CMA
+extern struct cma *dma_contiguous_default_area;
+
+static inline struct cma *dev_get_cma_area(struct device *dev)
+{
+ if (dev && dev->cma_area)
+ return dev->cma_area;
+ return dma_contiguous_default_area;
+}
+
+void dma_contiguous_reserve(phys_addr_t addr_limit);
+int __init dma_contiguous_reserve_area(phys_addr_t size, phys_addr_t base,
+ phys_addr_t limit, struct cma **res_cma, bool fixed);
+
+struct page *dma_alloc_from_contiguous(struct device *dev, size_t count,
+ unsigned int order, bool no_warn);
+bool dma_release_from_contiguous(struct device *dev, struct page *pages,
+ int count);
+struct page *dma_alloc_contiguous(struct device *dev, size_t size, gfp_t gfp);
+void dma_free_contiguous(struct device *dev, struct page *page, size_t size);
+
+void dma_contiguous_early_fixup(phys_addr_t base, unsigned long size);
+#else /* CONFIG_DMA_CMA */
+static inline struct cma *dev_get_cma_area(struct device *dev)
+{
+ return NULL;
+}
+static inline void dma_contiguous_reserve(phys_addr_t limit)
+{
+}
+static inline int dma_contiguous_reserve_area(phys_addr_t size,
+ phys_addr_t base, phys_addr_t limit, struct cma **res_cma,
+ bool fixed)
+{
+ return -ENOSYS;
+}
+static inline struct page *dma_alloc_from_contiguous(struct device *dev,
+ size_t count, unsigned int order, bool no_warn)
+{
+ return NULL;
+}
+static inline bool dma_release_from_contiguous(struct device *dev,
+ struct page *pages, int count)
+{
+ return false;
+}
+/* Use fallback alloc() and free() when CONFIG_DMA_CMA=n */
+static inline struct page *dma_alloc_contiguous(struct device *dev, size_t size,
+ gfp_t gfp)
+{
+ return NULL;
+}
+static inline void dma_free_contiguous(struct device *dev, struct page *page,
+ size_t size)
+{
+ __free_pages(page, get_order(size));
+}
+#endif /* CONFIG_DMA_CMA*/
+
+#ifdef CONFIG_DMA_PERNUMA_CMA
+void dma_pernuma_cma_reserve(void);
+#else
+static inline void dma_pernuma_cma_reserve(void) { }
+#endif /* CONFIG_DMA_PERNUMA_CMA */
+
+#ifdef CONFIG_DMA_DECLARE_COHERENT
+int dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr,
+ dma_addr_t device_addr, size_t size);
+void dma_release_coherent_memory(struct device *dev);
+int dma_alloc_from_dev_coherent(struct device *dev, ssize_t size,
+ dma_addr_t *dma_handle, void **ret);
+int dma_release_from_dev_coherent(struct device *dev, int order, void *vaddr);
+int dma_mmap_from_dev_coherent(struct device *dev, struct vm_area_struct *vma,
+ void *cpu_addr, size_t size, int *ret);
+#else
+static inline int dma_declare_coherent_memory(struct device *dev,
+ phys_addr_t phys_addr, dma_addr_t device_addr, size_t size)
+{
+ return -ENOSYS;
+}
+
+#define dma_alloc_from_dev_coherent(dev, size, handle, ret) (0)
+#define dma_release_from_dev_coherent(dev, order, vaddr) (0)
+#define dma_mmap_from_dev_coherent(dev, vma, vaddr, order, ret) (0)
+static inline void dma_release_coherent_memory(struct device *dev) { }
+#endif /* CONFIG_DMA_DECLARE_COHERENT */
+
+#ifdef CONFIG_DMA_GLOBAL_POOL
+void *dma_alloc_from_global_coherent(struct device *dev, ssize_t size,
+ dma_addr_t *dma_handle);
+int dma_release_from_global_coherent(int order, void *vaddr);
+int dma_mmap_from_global_coherent(struct vm_area_struct *vma, void *cpu_addr,
+ size_t size, int *ret);
+int dma_init_global_coherent(phys_addr_t phys_addr, size_t size);
+#else
+static inline void *dma_alloc_from_global_coherent(struct device *dev,
+ ssize_t size, dma_addr_t *dma_handle)
+{
+ return NULL;
+}
+static inline int dma_release_from_global_coherent(int order, void *vaddr)
+{
+ return 0;
+}
+static inline int dma_mmap_from_global_coherent(struct vm_area_struct *vma,
+ void *cpu_addr, size_t size, int *ret)
+{
+ return 0;
+}
+#endif /* CONFIG_DMA_GLOBAL_POOL */
+
+/*
+ * This is the actual return value from the ->alloc_noncontiguous method.
+ * The users of the DMA API should only care about the sg_table, but to make
+ * the DMA-API internal vmaping and freeing easier we stash away the page
+ * array as well (except for the fallback case). This can go away any time,
+ * e.g. when a vmap-variant that takes a scatterlist comes along.
+ */
+struct dma_sgt_handle {
+ struct sg_table sgt;
+ struct page **pages;
+};
+#define sgt_handle(sgt) \
+ container_of((sgt), struct dma_sgt_handle, sgt)
+
+int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt,
+ void *cpu_addr, dma_addr_t dma_addr, size_t size,
+ unsigned long attrs);
+int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
+ void *cpu_addr, dma_addr_t dma_addr, size_t size,
+ unsigned long attrs);
+struct page *dma_common_alloc_pages(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, enum dma_data_direction dir, gfp_t gfp);
+void dma_common_free_pages(struct device *dev, size_t size, struct page *vaddr,
+ dma_addr_t dma_handle, enum dma_data_direction dir);
+
+struct page **dma_common_find_pages(void *cpu_addr);
+void *dma_common_contiguous_remap(struct page *page, size_t size, pgprot_t prot,
+ const void *caller);
+void *dma_common_pages_remap(struct page **pages, size_t size, pgprot_t prot,
+ const void *caller);
+void dma_common_free_remap(void *cpu_addr, size_t size);
+
+struct page *dma_alloc_from_pool(struct device *dev, size_t size,
+ void **cpu_addr, gfp_t flags,
+ bool (*phys_addr_ok)(struct device *, phys_addr_t, size_t));
+bool dma_free_from_pool(struct device *dev, void *start, size_t size);
+
+int dma_direct_set_offset(struct device *dev, phys_addr_t cpu_start,
+ dma_addr_t dma_start, u64 size);
+
+#if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE) || \
+ defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) || \
+ defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL)
+extern bool dma_default_coherent;
+static inline bool dev_is_dma_coherent(struct device *dev)
+{
+ return dev->dma_coherent;
+}
+#else
+#define dma_default_coherent true
+
+static inline bool dev_is_dma_coherent(struct device *dev)
+{
+ return true;
+}
+#endif /* CONFIG_ARCH_HAS_DMA_COHERENCE_H */
+
+void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
+ gfp_t gfp, unsigned long attrs);
+void arch_dma_free(struct device *dev, size_t size, void *cpu_addr,
+ dma_addr_t dma_addr, unsigned long attrs);
+
+#ifdef CONFIG_MMU
+/*
+ * Page protection so that devices that can't snoop CPU caches can use the
+ * memory coherently. We default to pgprot_noncached which is usually used
+ * for ioremap as a safe bet, but architectures can override this with less
+ * strict semantics if possible.
+ */
+#ifndef pgprot_dmacoherent
+#define pgprot_dmacoherent(prot) pgprot_noncached(prot)
+#endif
+
+pgprot_t dma_pgprot(struct device *dev, pgprot_t prot, unsigned long attrs);
+#else
+static inline pgprot_t dma_pgprot(struct device *dev, pgprot_t prot,
+ unsigned long attrs)
+{
+ return prot; /* no protection bits supported without page tables */
+}
+#endif /* CONFIG_MMU */
+
+#ifdef CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE
+void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
+ enum dma_data_direction dir);
+#else
+static inline void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
+ enum dma_data_direction dir)
+{
+}
+#endif /* ARCH_HAS_SYNC_DMA_FOR_DEVICE */
+
+#ifdef CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU
+void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
+ enum dma_data_direction dir);
+#else
+static inline void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
+ enum dma_data_direction dir)
+{
+}
+#endif /* ARCH_HAS_SYNC_DMA_FOR_CPU */
+
+#ifdef CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL
+void arch_sync_dma_for_cpu_all(void);
+#else
+static inline void arch_sync_dma_for_cpu_all(void)
+{
+}
+#endif /* CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL */
+
+#ifdef CONFIG_ARCH_HAS_DMA_PREP_COHERENT
+void arch_dma_prep_coherent(struct page *page, size_t size);
+#else
+static inline void arch_dma_prep_coherent(struct page *page, size_t size)
+{
+}
+#endif /* CONFIG_ARCH_HAS_DMA_PREP_COHERENT */
+
+#ifdef CONFIG_ARCH_HAS_DMA_MARK_CLEAN
+void arch_dma_mark_clean(phys_addr_t paddr, size_t size);
+#else
+static inline void arch_dma_mark_clean(phys_addr_t paddr, size_t size)
+{
+}
+#endif /* ARCH_HAS_DMA_MARK_CLEAN */
+
+void *arch_dma_set_uncached(void *addr, size_t size);
+void arch_dma_clear_uncached(void *addr, size_t size);
+
+#ifdef CONFIG_ARCH_HAS_DMA_MAP_DIRECT
+bool arch_dma_map_page_direct(struct device *dev, phys_addr_t addr);
+bool arch_dma_unmap_page_direct(struct device *dev, dma_addr_t dma_handle);
+bool arch_dma_map_sg_direct(struct device *dev, struct scatterlist *sg,
+ int nents);
+bool arch_dma_unmap_sg_direct(struct device *dev, struct scatterlist *sg,
+ int nents);
+#else
+#define arch_dma_map_page_direct(d, a) (false)
+#define arch_dma_unmap_page_direct(d, a) (false)
+#define arch_dma_map_sg_direct(d, s, n) (false)
+#define arch_dma_unmap_sg_direct(d, s, n) (false)
+#endif
+
+#ifdef CONFIG_ARCH_HAS_SETUP_DMA_OPS
+void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
+ const struct iommu_ops *iommu, bool coherent);
+#else
+static inline void arch_setup_dma_ops(struct device *dev, u64 dma_base,
+ u64 size, const struct iommu_ops *iommu, bool coherent)
+{
+}
+#endif /* CONFIG_ARCH_HAS_SETUP_DMA_OPS */
+
+#ifdef CONFIG_ARCH_HAS_TEARDOWN_DMA_OPS
+void arch_teardown_dma_ops(struct device *dev);
+#else
+static inline void arch_teardown_dma_ops(struct device *dev)
+{
+}
+#endif /* CONFIG_ARCH_HAS_TEARDOWN_DMA_OPS */
+
+#ifdef CONFIG_DMA_API_DEBUG
+void dma_debug_add_bus(struct bus_type *bus);
+void debug_dma_dump_mappings(struct device *dev);
+#else
+static inline void dma_debug_add_bus(struct bus_type *bus)
+{
+}
+static inline void debug_dma_dump_mappings(struct device *dev)
+{
+}
+#endif /* CONFIG_DMA_API_DEBUG */
+
+extern const struct dma_map_ops dma_dummy_ops;
+
+enum pci_p2pdma_map_type {
+ /*
+ * PCI_P2PDMA_MAP_UNKNOWN: Used internally for indicating the mapping
+ * type hasn't been calculated yet. Functions that return this enum
+ * never return this value.
+ */
+ PCI_P2PDMA_MAP_UNKNOWN = 0,
+
+ /*
+ * PCI_P2PDMA_MAP_NOT_SUPPORTED: Indicates the transaction will
+ * traverse the host bridge and the host bridge is not in the
+ * allowlist. DMA Mapping routines should return an error when
+ * this is returned.
+ */
+ PCI_P2PDMA_MAP_NOT_SUPPORTED,
+
+ /*
+ * PCI_P2PDMA_BUS_ADDR: Indicates that two devices can talk to
+ * each other directly through a PCI switch and the transaction will
+ * not traverse the host bridge. Such a mapping should program
+ * the DMA engine with PCI bus addresses.
+ */
+ PCI_P2PDMA_MAP_BUS_ADDR,
+
+ /*
+ * PCI_P2PDMA_MAP_THRU_HOST_BRIDGE: Indicates two devices can talk
+ * to each other, but the transaction traverses a host bridge on the
+ * allowlist. In this case, a normal mapping either with CPU physical
+ * addresses (in the case of dma-direct) or IOVA addresses (in the
+ * case of IOMMUs) should be used to program the DMA engine.
+ */
+ PCI_P2PDMA_MAP_THRU_HOST_BRIDGE,
+};
+
+struct pci_p2pdma_map_state {
+ struct dev_pagemap *pgmap;
+ int map;
+ u64 bus_off;
+};
+
+#ifdef CONFIG_PCI_P2PDMA
+enum pci_p2pdma_map_type
+pci_p2pdma_map_segment(struct pci_p2pdma_map_state *state, struct device *dev,
+ struct scatterlist *sg);
+#else /* CONFIG_PCI_P2PDMA */
+static inline enum pci_p2pdma_map_type
+pci_p2pdma_map_segment(struct pci_p2pdma_map_state *state, struct device *dev,
+ struct scatterlist *sg)
+{
+ return PCI_P2PDMA_MAP_NOT_SUPPORTED;
+}
+#endif /* CONFIG_PCI_P2PDMA */
+
+#endif /* _LINUX_DMA_MAP_OPS_H */
diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
index 843ab866e0f4..0ee20b764000 100644
--- a/include/linux/dma-mapping.h
+++ b/include/linux/dma-mapping.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_DMA_MAPPING_H
#define _LINUX_DMA_MAPPING_H
@@ -5,20 +6,16 @@
#include <linux/string.h>
#include <linux/device.h>
#include <linux/err.h>
-#include <linux/dma-debug.h>
#include <linux/dma-direction.h>
#include <linux/scatterlist.h>
-#include <linux/kmemcheck.h>
#include <linux/bug.h>
+#include <linux/mem_encrypt.h>
/**
* List of possible attributes associated with a DMA mapping. The semantics
- * of each attribute should be defined in Documentation/DMA-attributes.txt.
- *
- * DMA_ATTR_WRITE_BARRIER: DMA to a memory region with this attribute
- * forces all pending DMA writes to complete.
+ * of each attribute should be defined in Documentation/core-api/dma-attributes.rst.
*/
-#define DMA_ATTR_WRITE_BARRIER (1UL << 0)
+
/*
* DMA_ATTR_WEAK_ORDERING: Specifies that reads and writes to the mapping
* may be weakly ordered, that is that reads and writes may pass each other.
@@ -30,11 +27,6 @@
*/
#define DMA_ATTR_WRITE_COMBINE (1UL << 2)
/*
- * DMA_ATTR_NON_CONSISTENT: Lets the platform to choose to return either
- * consistent or non-consistent memory as it sees fit.
- */
-#define DMA_ATTR_NON_CONSISTENT (1UL << 3)
-/*
* DMA_ATTR_NO_KERNEL_MAPPING: Lets the platform to avoid creating a kernel
* virtual mapping for the allocated buffer.
*/
@@ -70,524 +62,382 @@
#define DMA_ATTR_PRIVILEGED (1UL << 9)
/*
- * A dma_addr_t can hold any valid DMA or bus address for the platform.
- * It can be given to a device to use as a DMA source or target. A CPU cannot
- * reference a dma_addr_t directly because there may be translation between
- * its physical address space and the bus address space.
+ * A dma_addr_t can hold any valid DMA or bus address for the platform. It can
+ * be given to a device to use as a DMA source or target. It is specific to a
+ * given device and there may be a translation between the CPU physical address
+ * space and the bus address space.
+ *
+ * DMA_MAPPING_ERROR is the magic error code if a mapping failed. It should not
+ * be used directly in drivers, but checked for using dma_mapping_error()
+ * instead.
*/
-struct dma_map_ops {
- void* (*alloc)(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t gfp,
- unsigned long attrs);
- void (*free)(struct device *dev, size_t size,
- void *vaddr, dma_addr_t dma_handle,
- unsigned long attrs);
- int (*mmap)(struct device *, struct vm_area_struct *,
- void *, dma_addr_t, size_t,
- unsigned long attrs);
-
- int (*get_sgtable)(struct device *dev, struct sg_table *sgt, void *,
- dma_addr_t, size_t, unsigned long attrs);
-
- dma_addr_t (*map_page)(struct device *dev, struct page *page,
- unsigned long offset, size_t size,
- enum dma_data_direction dir,
- unsigned long attrs);
- void (*unmap_page)(struct device *dev, dma_addr_t dma_handle,
- size_t size, enum dma_data_direction dir,
- unsigned long attrs);
- /*
- * map_sg returns 0 on error and a value > 0 on success.
- * It should never return a value < 0.
- */
- int (*map_sg)(struct device *dev, struct scatterlist *sg,
- int nents, enum dma_data_direction dir,
- unsigned long attrs);
- void (*unmap_sg)(struct device *dev,
- struct scatterlist *sg, int nents,
- enum dma_data_direction dir,
- unsigned long attrs);
- dma_addr_t (*map_resource)(struct device *dev, phys_addr_t phys_addr,
- size_t size, enum dma_data_direction dir,
- unsigned long attrs);
- void (*unmap_resource)(struct device *dev, dma_addr_t dma_handle,
- size_t size, enum dma_data_direction dir,
- unsigned long attrs);
- void (*sync_single_for_cpu)(struct device *dev,
- dma_addr_t dma_handle, size_t size,
- enum dma_data_direction dir);
- void (*sync_single_for_device)(struct device *dev,
- dma_addr_t dma_handle, size_t size,
- enum dma_data_direction dir);
- void (*sync_sg_for_cpu)(struct device *dev,
- struct scatterlist *sg, int nents,
- enum dma_data_direction dir);
- void (*sync_sg_for_device)(struct device *dev,
- struct scatterlist *sg, int nents,
- enum dma_data_direction dir);
- int (*mapping_error)(struct device *dev, dma_addr_t dma_addr);
- int (*dma_supported)(struct device *dev, u64 mask);
-#ifdef ARCH_HAS_DMA_GET_REQUIRED_MASK
- u64 (*get_required_mask)(struct device *dev);
-#endif
- int is_phys;
-};
-
-extern const struct dma_map_ops dma_noop_ops;
-extern const struct dma_map_ops dma_virt_ops;
+#define DMA_MAPPING_ERROR (~(dma_addr_t)0)
#define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
-#define DMA_MASK_NONE 0x0ULL
-
-static inline int valid_dma_direction(int dma_direction)
+#ifdef CONFIG_DMA_API_DEBUG
+void debug_dma_mapping_error(struct device *dev, dma_addr_t dma_addr);
+void debug_dma_map_single(struct device *dev, const void *addr,
+ unsigned long len);
+#else
+static inline void debug_dma_mapping_error(struct device *dev,
+ dma_addr_t dma_addr)
{
- return ((dma_direction == DMA_BIDIRECTIONAL) ||
- (dma_direction == DMA_TO_DEVICE) ||
- (dma_direction == DMA_FROM_DEVICE));
}
-
-static inline int is_device_dma_capable(struct device *dev)
+static inline void debug_dma_map_single(struct device *dev, const void *addr,
+ unsigned long len)
{
- return dev->dma_mask != NULL && *dev->dma_mask != DMA_MASK_NONE;
}
-
-#ifdef CONFIG_HAVE_GENERIC_DMA_COHERENT
-/*
- * These three functions are only for dma allocator.
- * Don't use them in device drivers.
- */
-int dma_alloc_from_coherent(struct device *dev, ssize_t size,
- dma_addr_t *dma_handle, void **ret);
-int dma_release_from_coherent(struct device *dev, int order, void *vaddr);
-
-int dma_mmap_from_coherent(struct device *dev, struct vm_area_struct *vma,
- void *cpu_addr, size_t size, int *ret);
-#else
-#define dma_alloc_from_coherent(dev, size, handle, ret) (0)
-#define dma_release_from_coherent(dev, order, vaddr) (0)
-#define dma_mmap_from_coherent(dev, vma, vaddr, order, ret) (0)
-#endif /* CONFIG_HAVE_GENERIC_DMA_COHERENT */
+#endif /* CONFIG_DMA_API_DEBUG */
#ifdef CONFIG_HAS_DMA
-#include <asm/dma-mapping.h>
-static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
+static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
{
- if (dev && dev->dma_ops)
- return dev->dma_ops;
- return get_arch_dma_ops(dev ? dev->bus : NULL);
+ debug_dma_mapping_error(dev, dma_addr);
+
+ if (unlikely(dma_addr == DMA_MAPPING_ERROR))
+ return -ENOMEM;
+ return 0;
}
-static inline void set_dma_ops(struct device *dev,
- const struct dma_map_ops *dma_ops)
+dma_addr_t dma_map_page_attrs(struct device *dev, struct page *page,
+ size_t offset, size_t size, enum dma_data_direction dir,
+ unsigned long attrs);
+void dma_unmap_page_attrs(struct device *dev, dma_addr_t addr, size_t size,
+ enum dma_data_direction dir, unsigned long attrs);
+unsigned int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
+ int nents, enum dma_data_direction dir, unsigned long attrs);
+void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg,
+ int nents, enum dma_data_direction dir,
+ unsigned long attrs);
+int dma_map_sgtable(struct device *dev, struct sg_table *sgt,
+ enum dma_data_direction dir, unsigned long attrs);
+dma_addr_t dma_map_resource(struct device *dev, phys_addr_t phys_addr,
+ size_t size, enum dma_data_direction dir, unsigned long attrs);
+void dma_unmap_resource(struct device *dev, dma_addr_t addr, size_t size,
+ enum dma_data_direction dir, unsigned long attrs);
+void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, size_t size,
+ enum dma_data_direction dir);
+void dma_sync_single_for_device(struct device *dev, dma_addr_t addr,
+ size_t size, enum dma_data_direction dir);
+void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
+ int nelems, enum dma_data_direction dir);
+void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
+ int nelems, enum dma_data_direction dir);
+void *dma_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle,
+ gfp_t flag, unsigned long attrs);
+void dma_free_attrs(struct device *dev, size_t size, void *cpu_addr,
+ dma_addr_t dma_handle, unsigned long attrs);
+void *dmam_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle,
+ gfp_t gfp, unsigned long attrs);
+void dmam_free_coherent(struct device *dev, size_t size, void *vaddr,
+ dma_addr_t dma_handle);
+int dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt,
+ void *cpu_addr, dma_addr_t dma_addr, size_t size,
+ unsigned long attrs);
+int dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
+ void *cpu_addr, dma_addr_t dma_addr, size_t size,
+ unsigned long attrs);
+bool dma_can_mmap(struct device *dev);
+bool dma_pci_p2pdma_supported(struct device *dev);
+int dma_set_mask(struct device *dev, u64 mask);
+int dma_set_coherent_mask(struct device *dev, u64 mask);
+u64 dma_get_required_mask(struct device *dev);
+size_t dma_max_mapping_size(struct device *dev);
+size_t dma_opt_mapping_size(struct device *dev);
+bool dma_need_sync(struct device *dev, dma_addr_t dma_addr);
+unsigned long dma_get_merge_boundary(struct device *dev);
+struct sg_table *dma_alloc_noncontiguous(struct device *dev, size_t size,
+ enum dma_data_direction dir, gfp_t gfp, unsigned long attrs);
+void dma_free_noncontiguous(struct device *dev, size_t size,
+ struct sg_table *sgt, enum dma_data_direction dir);
+void *dma_vmap_noncontiguous(struct device *dev, size_t size,
+ struct sg_table *sgt);
+void dma_vunmap_noncontiguous(struct device *dev, void *vaddr);
+int dma_mmap_noncontiguous(struct device *dev, struct vm_area_struct *vma,
+ size_t size, struct sg_table *sgt);
+#else /* CONFIG_HAS_DMA */
+static inline dma_addr_t dma_map_page_attrs(struct device *dev,
+ struct page *page, size_t offset, size_t size,
+ enum dma_data_direction dir, unsigned long attrs)
{
- dev->dma_ops = dma_ops;
+ return DMA_MAPPING_ERROR;
}
-#else
-/*
- * Define the dma api to allow compilation but not linking of
- * dma dependent code. Code that depends on the dma-mapping
- * API needs to set 'depends on HAS_DMA' in its Kconfig
- */
-extern const struct dma_map_ops bad_dma_ops;
-static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
+static inline void dma_unmap_page_attrs(struct device *dev, dma_addr_t addr,
+ size_t size, enum dma_data_direction dir, unsigned long attrs)
{
- return &bad_dma_ops;
}
-#endif
-
-static inline dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr,
- size_t size,
- enum dma_data_direction dir,
- unsigned long attrs)
+static inline unsigned int dma_map_sg_attrs(struct device *dev,
+ struct scatterlist *sg, int nents, enum dma_data_direction dir,
+ unsigned long attrs)
{
- const struct dma_map_ops *ops = get_dma_ops(dev);
- dma_addr_t addr;
-
- kmemcheck_mark_initialized(ptr, size);
- BUG_ON(!valid_dma_direction(dir));
- addr = ops->map_page(dev, virt_to_page(ptr),
- offset_in_page(ptr), size,
- dir, attrs);
- debug_dma_map_page(dev, virt_to_page(ptr),
- offset_in_page(ptr), size,
- dir, addr, true);
- return addr;
+ return 0;
}
-
-static inline void dma_unmap_single_attrs(struct device *dev, dma_addr_t addr,
- size_t size,
- enum dma_data_direction dir,
- unsigned long attrs)
+static inline void dma_unmap_sg_attrs(struct device *dev,
+ struct scatterlist *sg, int nents, enum dma_data_direction dir,
+ unsigned long attrs)
{
- const struct dma_map_ops *ops = get_dma_ops(dev);
-
- BUG_ON(!valid_dma_direction(dir));
- if (ops->unmap_page)
- ops->unmap_page(dev, addr, size, dir, attrs);
- debug_dma_unmap_page(dev, addr, size, dir, true);
}
-
-/*
- * dma_maps_sg_attrs returns 0 on error and > 0 on success.
- * It should never return a value < 0.
- */
-static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
- int nents, enum dma_data_direction dir,
- unsigned long attrs)
+static inline int dma_map_sgtable(struct device *dev, struct sg_table *sgt,
+ enum dma_data_direction dir, unsigned long attrs)
{
- const struct dma_map_ops *ops = get_dma_ops(dev);
- int i, ents;
- struct scatterlist *s;
-
- for_each_sg(sg, s, nents, i)
- kmemcheck_mark_initialized(sg_virt(s), s->length);
- BUG_ON(!valid_dma_direction(dir));
- ents = ops->map_sg(dev, sg, nents, dir, attrs);
- BUG_ON(ents < 0);
- debug_dma_map_sg(dev, sg, nents, ents, dir);
-
- return ents;
+ return -EOPNOTSUPP;
}
-
-static inline void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg,
- int nents, enum dma_data_direction dir,
- unsigned long attrs)
+static inline dma_addr_t dma_map_resource(struct device *dev,
+ phys_addr_t phys_addr, size_t size, enum dma_data_direction dir,
+ unsigned long attrs)
{
- const struct dma_map_ops *ops = get_dma_ops(dev);
-
- BUG_ON(!valid_dma_direction(dir));
- debug_dma_unmap_sg(dev, sg, nents, dir);
- if (ops->unmap_sg)
- ops->unmap_sg(dev, sg, nents, dir, attrs);
+ return DMA_MAPPING_ERROR;
}
-
-static inline dma_addr_t dma_map_page_attrs(struct device *dev,
- struct page *page,
- size_t offset, size_t size,
- enum dma_data_direction dir,
- unsigned long attrs)
+static inline void dma_unmap_resource(struct device *dev, dma_addr_t addr,
+ size_t size, enum dma_data_direction dir, unsigned long attrs)
{
- const struct dma_map_ops *ops = get_dma_ops(dev);
- dma_addr_t addr;
-
- kmemcheck_mark_initialized(page_address(page) + offset, size);
- BUG_ON(!valid_dma_direction(dir));
- addr = ops->map_page(dev, page, offset, size, dir, attrs);
- debug_dma_map_page(dev, page, offset, size, dir, addr, false);
-
- return addr;
}
-
-static inline void dma_unmap_page_attrs(struct device *dev,
- dma_addr_t addr, size_t size,
- enum dma_data_direction dir,
- unsigned long attrs)
+static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr,
+ size_t size, enum dma_data_direction dir)
{
- const struct dma_map_ops *ops = get_dma_ops(dev);
-
- BUG_ON(!valid_dma_direction(dir));
- if (ops->unmap_page)
- ops->unmap_page(dev, addr, size, dir, attrs);
- debug_dma_unmap_page(dev, addr, size, dir, false);
}
-
-static inline dma_addr_t dma_map_resource(struct device *dev,
- phys_addr_t phys_addr,
- size_t size,
- enum dma_data_direction dir,
- unsigned long attrs)
+static inline void dma_sync_single_for_device(struct device *dev,
+ dma_addr_t addr, size_t size, enum dma_data_direction dir)
{
- const struct dma_map_ops *ops = get_dma_ops(dev);
- dma_addr_t addr;
-
- BUG_ON(!valid_dma_direction(dir));
-
- /* Don't allow RAM to be mapped */
- BUG_ON(pfn_valid(PHYS_PFN(phys_addr)));
-
- addr = phys_addr;
- if (ops->map_resource)
- addr = ops->map_resource(dev, phys_addr, size, dir, attrs);
-
- debug_dma_map_resource(dev, phys_addr, size, dir, addr);
-
- return addr;
}
-
-static inline void dma_unmap_resource(struct device *dev, dma_addr_t addr,
- size_t size, enum dma_data_direction dir,
- unsigned long attrs)
+static inline void dma_sync_sg_for_cpu(struct device *dev,
+ struct scatterlist *sg, int nelems, enum dma_data_direction dir)
{
- const struct dma_map_ops *ops = get_dma_ops(dev);
-
- BUG_ON(!valid_dma_direction(dir));
- if (ops->unmap_resource)
- ops->unmap_resource(dev, addr, size, dir, attrs);
- debug_dma_unmap_resource(dev, addr, size, dir);
}
-
-static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr,
- size_t size,
- enum dma_data_direction dir)
+static inline void dma_sync_sg_for_device(struct device *dev,
+ struct scatterlist *sg, int nelems, enum dma_data_direction dir)
{
- const struct dma_map_ops *ops = get_dma_ops(dev);
-
- BUG_ON(!valid_dma_direction(dir));
- if (ops->sync_single_for_cpu)
- ops->sync_single_for_cpu(dev, addr, size, dir);
- debug_dma_sync_single_for_cpu(dev, addr, size, dir);
}
-
-static inline void dma_sync_single_for_device(struct device *dev,
- dma_addr_t addr, size_t size,
- enum dma_data_direction dir)
+static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
{
- const struct dma_map_ops *ops = get_dma_ops(dev);
-
- BUG_ON(!valid_dma_direction(dir));
- if (ops->sync_single_for_device)
- ops->sync_single_for_device(dev, addr, size, dir);
- debug_dma_sync_single_for_device(dev, addr, size, dir);
+ return -ENOMEM;
}
-
-static inline void dma_sync_single_range_for_cpu(struct device *dev,
- dma_addr_t addr,
- unsigned long offset,
- size_t size,
- enum dma_data_direction dir)
+static inline void *dma_alloc_attrs(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, gfp_t flag, unsigned long attrs)
{
- const struct dma_map_ops *ops = get_dma_ops(dev);
-
- BUG_ON(!valid_dma_direction(dir));
- if (ops->sync_single_for_cpu)
- ops->sync_single_for_cpu(dev, addr + offset, size, dir);
- debug_dma_sync_single_range_for_cpu(dev, addr, offset, size, dir);
+ return NULL;
}
-
-static inline void dma_sync_single_range_for_device(struct device *dev,
- dma_addr_t addr,
- unsigned long offset,
- size_t size,
- enum dma_data_direction dir)
+static void dma_free_attrs(struct device *dev, size_t size, void *cpu_addr,
+ dma_addr_t dma_handle, unsigned long attrs)
{
- const struct dma_map_ops *ops = get_dma_ops(dev);
-
- BUG_ON(!valid_dma_direction(dir));
- if (ops->sync_single_for_device)
- ops->sync_single_for_device(dev, addr + offset, size, dir);
- debug_dma_sync_single_range_for_device(dev, addr, offset, size, dir);
}
-
-static inline void
-dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
- int nelems, enum dma_data_direction dir)
+static inline void *dmam_alloc_attrs(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
{
- const struct dma_map_ops *ops = get_dma_ops(dev);
-
- BUG_ON(!valid_dma_direction(dir));
- if (ops->sync_sg_for_cpu)
- ops->sync_sg_for_cpu(dev, sg, nelems, dir);
- debug_dma_sync_sg_for_cpu(dev, sg, nelems, dir);
+ return NULL;
}
-
-static inline void
-dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
- int nelems, enum dma_data_direction dir)
+static inline void dmam_free_coherent(struct device *dev, size_t size,
+ void *vaddr, dma_addr_t dma_handle)
{
- const struct dma_map_ops *ops = get_dma_ops(dev);
-
- BUG_ON(!valid_dma_direction(dir));
- if (ops->sync_sg_for_device)
- ops->sync_sg_for_device(dev, sg, nelems, dir);
- debug_dma_sync_sg_for_device(dev, sg, nelems, dir);
-
}
-
-#define dma_map_single(d, a, s, r) dma_map_single_attrs(d, a, s, r, 0)
-#define dma_unmap_single(d, a, s, r) dma_unmap_single_attrs(d, a, s, r, 0)
-#define dma_map_sg(d, s, n, r) dma_map_sg_attrs(d, s, n, r, 0)
-#define dma_unmap_sg(d, s, n, r) dma_unmap_sg_attrs(d, s, n, r, 0)
-#define dma_map_page(d, p, o, s, r) dma_map_page_attrs(d, p, o, s, r, 0)
-#define dma_unmap_page(d, a, s, r) dma_unmap_page_attrs(d, a, s, r, 0)
-
-extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
- void *cpu_addr, dma_addr_t dma_addr, size_t size);
-
-void *dma_common_contiguous_remap(struct page *page, size_t size,
- unsigned long vm_flags,
- pgprot_t prot, const void *caller);
-
-void *dma_common_pages_remap(struct page **pages, size_t size,
- unsigned long vm_flags, pgprot_t prot,
- const void *caller);
-void dma_common_free_remap(void *cpu_addr, size_t size, unsigned long vm_flags);
-
-/**
- * dma_mmap_attrs - map a coherent DMA allocation into user space
- * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
- * @vma: vm_area_struct describing requested user mapping
- * @cpu_addr: kernel CPU-view address returned from dma_alloc_attrs
- * @handle: device-view address returned from dma_alloc_attrs
- * @size: size of memory originally requested in dma_alloc_attrs
- * @attrs: attributes of mapping properties requested in dma_alloc_attrs
- *
- * Map a coherent DMA buffer previously allocated by dma_alloc_attrs
- * into user space. The coherent DMA buffer must not be freed by the
- * driver until the user space mapping has been released.
- */
-static inline int
-dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma, void *cpu_addr,
- dma_addr_t dma_addr, size_t size, unsigned long attrs)
+static inline int dma_get_sgtable_attrs(struct device *dev,
+ struct sg_table *sgt, void *cpu_addr, dma_addr_t dma_addr,
+ size_t size, unsigned long attrs)
{
- const struct dma_map_ops *ops = get_dma_ops(dev);
- BUG_ON(!ops);
- if (ops->mmap)
- return ops->mmap(dev, vma, cpu_addr, dma_addr, size, attrs);
- return dma_common_mmap(dev, vma, cpu_addr, dma_addr, size);
+ return -ENXIO;
}
-
-#define dma_mmap_coherent(d, v, c, h, s) dma_mmap_attrs(d, v, c, h, s, 0)
-
-int
-dma_common_get_sgtable(struct device *dev, struct sg_table *sgt,
- void *cpu_addr, dma_addr_t dma_addr, size_t size);
-
-static inline int
-dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt, void *cpu_addr,
- dma_addr_t dma_addr, size_t size,
- unsigned long attrs)
+static inline int dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
+ void *cpu_addr, dma_addr_t dma_addr, size_t size,
+ unsigned long attrs)
{
- const struct dma_map_ops *ops = get_dma_ops(dev);
- BUG_ON(!ops);
- if (ops->get_sgtable)
- return ops->get_sgtable(dev, sgt, cpu_addr, dma_addr, size,
- attrs);
- return dma_common_get_sgtable(dev, sgt, cpu_addr, dma_addr, size);
+ return -ENXIO;
}
-
-#define dma_get_sgtable(d, t, v, h, s) dma_get_sgtable_attrs(d, t, v, h, s, 0)
-
-#ifndef arch_dma_alloc_attrs
-#define arch_dma_alloc_attrs(dev, flag) (true)
-#endif
-
-static inline void *dma_alloc_attrs(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t flag,
- unsigned long attrs)
+static inline bool dma_can_mmap(struct device *dev)
{
- const struct dma_map_ops *ops = get_dma_ops(dev);
- void *cpu_addr;
-
- BUG_ON(!ops);
-
- if (dma_alloc_from_coherent(dev, size, dma_handle, &cpu_addr))
- return cpu_addr;
-
- if (!arch_dma_alloc_attrs(&dev, &flag))
- return NULL;
- if (!ops->alloc)
- return NULL;
-
- cpu_addr = ops->alloc(dev, size, dma_handle, flag, attrs);
- debug_dma_alloc_coherent(dev, size, *dma_handle, cpu_addr);
- return cpu_addr;
+ return false;
}
-
-static inline void dma_free_attrs(struct device *dev, size_t size,
- void *cpu_addr, dma_addr_t dma_handle,
- unsigned long attrs)
+static inline bool dma_pci_p2pdma_supported(struct device *dev)
{
- const struct dma_map_ops *ops = get_dma_ops(dev);
-
- BUG_ON(!ops);
- WARN_ON(irqs_disabled());
+ return false;
+}
+static inline int dma_set_mask(struct device *dev, u64 mask)
+{
+ return -EIO;
+}
+static inline int dma_set_coherent_mask(struct device *dev, u64 mask)
+{
+ return -EIO;
+}
+static inline u64 dma_get_required_mask(struct device *dev)
+{
+ return 0;
+}
+static inline size_t dma_max_mapping_size(struct device *dev)
+{
+ return 0;
+}
+static inline size_t dma_opt_mapping_size(struct device *dev)
+{
+ return 0;
+}
+static inline bool dma_need_sync(struct device *dev, dma_addr_t dma_addr)
+{
+ return false;
+}
+static inline unsigned long dma_get_merge_boundary(struct device *dev)
+{
+ return 0;
+}
+static inline struct sg_table *dma_alloc_noncontiguous(struct device *dev,
+ size_t size, enum dma_data_direction dir, gfp_t gfp,
+ unsigned long attrs)
+{
+ return NULL;
+}
+static inline void dma_free_noncontiguous(struct device *dev, size_t size,
+ struct sg_table *sgt, enum dma_data_direction dir)
+{
+}
+static inline void *dma_vmap_noncontiguous(struct device *dev, size_t size,
+ struct sg_table *sgt)
+{
+ return NULL;
+}
+static inline void dma_vunmap_noncontiguous(struct device *dev, void *vaddr)
+{
+}
+static inline int dma_mmap_noncontiguous(struct device *dev,
+ struct vm_area_struct *vma, size_t size, struct sg_table *sgt)
+{
+ return -EINVAL;
+}
+#endif /* CONFIG_HAS_DMA */
- if (dma_release_from_coherent(dev, get_order(size), cpu_addr))
- return;
+struct page *dma_alloc_pages(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, enum dma_data_direction dir, gfp_t gfp);
+void dma_free_pages(struct device *dev, size_t size, struct page *page,
+ dma_addr_t dma_handle, enum dma_data_direction dir);
+int dma_mmap_pages(struct device *dev, struct vm_area_struct *vma,
+ size_t size, struct page *page);
- if (!ops->free || !cpu_addr)
- return;
+static inline void *dma_alloc_noncoherent(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, enum dma_data_direction dir, gfp_t gfp)
+{
+ struct page *page = dma_alloc_pages(dev, size, dma_handle, dir, gfp);
+ return page ? page_address(page) : NULL;
+}
- debug_dma_free_coherent(dev, size, cpu_addr, dma_handle);
- ops->free(dev, size, cpu_addr, dma_handle, attrs);
+static inline void dma_free_noncoherent(struct device *dev, size_t size,
+ void *vaddr, dma_addr_t dma_handle, enum dma_data_direction dir)
+{
+ dma_free_pages(dev, size, virt_to_page(vaddr), dma_handle, dir);
}
-static inline void *dma_alloc_coherent(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t flag)
+static inline dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr,
+ size_t size, enum dma_data_direction dir, unsigned long attrs)
{
- return dma_alloc_attrs(dev, size, dma_handle, flag, 0);
+ /* DMA must never operate on areas that might be remapped. */
+ if (dev_WARN_ONCE(dev, is_vmalloc_addr(ptr),
+ "rejecting DMA map of vmalloc memory\n"))
+ return DMA_MAPPING_ERROR;
+ debug_dma_map_single(dev, ptr, size);
+ return dma_map_page_attrs(dev, virt_to_page(ptr), offset_in_page(ptr),
+ size, dir, attrs);
}
-static inline void dma_free_coherent(struct device *dev, size_t size,
- void *cpu_addr, dma_addr_t dma_handle)
+static inline void dma_unmap_single_attrs(struct device *dev, dma_addr_t addr,
+ size_t size, enum dma_data_direction dir, unsigned long attrs)
{
- return dma_free_attrs(dev, size, cpu_addr, dma_handle, 0);
+ return dma_unmap_page_attrs(dev, addr, size, dir, attrs);
}
-static inline void *dma_alloc_noncoherent(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t gfp)
+static inline void dma_sync_single_range_for_cpu(struct device *dev,
+ dma_addr_t addr, unsigned long offset, size_t size,
+ enum dma_data_direction dir)
{
- return dma_alloc_attrs(dev, size, dma_handle, gfp,
- DMA_ATTR_NON_CONSISTENT);
+ return dma_sync_single_for_cpu(dev, addr + offset, size, dir);
}
-static inline void dma_free_noncoherent(struct device *dev, size_t size,
- void *cpu_addr, dma_addr_t dma_handle)
+static inline void dma_sync_single_range_for_device(struct device *dev,
+ dma_addr_t addr, unsigned long offset, size_t size,
+ enum dma_data_direction dir)
{
- dma_free_attrs(dev, size, cpu_addr, dma_handle,
- DMA_ATTR_NON_CONSISTENT);
+ return dma_sync_single_for_device(dev, addr + offset, size, dir);
}
-static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
+/**
+ * dma_unmap_sgtable - Unmap the given buffer for DMA
+ * @dev: The device for which to perform the DMA operation
+ * @sgt: The sg_table object describing the buffer
+ * @dir: DMA direction
+ * @attrs: Optional DMA attributes for the unmap operation
+ *
+ * Unmaps a buffer described by a scatterlist stored in the given sg_table
+ * object for the @dir DMA operation by the @dev device. After this function
+ * the ownership of the buffer is transferred back to the CPU domain.
+ */
+static inline void dma_unmap_sgtable(struct device *dev, struct sg_table *sgt,
+ enum dma_data_direction dir, unsigned long attrs)
{
- debug_dma_mapping_error(dev, dma_addr);
+ dma_unmap_sg_attrs(dev, sgt->sgl, sgt->orig_nents, dir, attrs);
+}
- if (get_dma_ops(dev)->mapping_error)
- return get_dma_ops(dev)->mapping_error(dev, dma_addr);
- return 0;
+/**
+ * dma_sync_sgtable_for_cpu - Synchronize the given buffer for CPU access
+ * @dev: The device for which to perform the DMA operation
+ * @sgt: The sg_table object describing the buffer
+ * @dir: DMA direction
+ *
+ * Performs the needed cache synchronization and moves the ownership of the
+ * buffer back to the CPU domain, so it is safe to perform any access to it
+ * by the CPU. Before doing any further DMA operations, one has to transfer
+ * the ownership of the buffer back to the DMA domain by calling the
+ * dma_sync_sgtable_for_device().
+ */
+static inline void dma_sync_sgtable_for_cpu(struct device *dev,
+ struct sg_table *sgt, enum dma_data_direction dir)
+{
+ dma_sync_sg_for_cpu(dev, sgt->sgl, sgt->orig_nents, dir);
}
-static inline int dma_supported(struct device *dev, u64 mask)
+/**
+ * dma_sync_sgtable_for_device - Synchronize the given buffer for DMA
+ * @dev: The device for which to perform the DMA operation
+ * @sgt: The sg_table object describing the buffer
+ * @dir: DMA direction
+ *
+ * Performs the needed cache synchronization and moves the ownership of the
+ * buffer back to the DMA domain, so it is safe to perform the DMA operation.
+ * Once finished, one has to call dma_sync_sgtable_for_cpu() or
+ * dma_unmap_sgtable().
+ */
+static inline void dma_sync_sgtable_for_device(struct device *dev,
+ struct sg_table *sgt, enum dma_data_direction dir)
{
- const struct dma_map_ops *ops = get_dma_ops(dev);
+ dma_sync_sg_for_device(dev, sgt->sgl, sgt->orig_nents, dir);
+}
- if (!ops)
- return 0;
- if (!ops->dma_supported)
- return 1;
- return ops->dma_supported(dev, mask);
+#define dma_map_single(d, a, s, r) dma_map_single_attrs(d, a, s, r, 0)
+#define dma_unmap_single(d, a, s, r) dma_unmap_single_attrs(d, a, s, r, 0)
+#define dma_map_sg(d, s, n, r) dma_map_sg_attrs(d, s, n, r, 0)
+#define dma_unmap_sg(d, s, n, r) dma_unmap_sg_attrs(d, s, n, r, 0)
+#define dma_map_page(d, p, o, s, r) dma_map_page_attrs(d, p, o, s, r, 0)
+#define dma_unmap_page(d, a, s, r) dma_unmap_page_attrs(d, a, s, r, 0)
+#define dma_get_sgtable(d, t, v, h, s) dma_get_sgtable_attrs(d, t, v, h, s, 0)
+#define dma_mmap_coherent(d, v, c, h, s) dma_mmap_attrs(d, v, c, h, s, 0)
+
+static inline void *dma_alloc_coherent(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, gfp_t gfp)
+{
+ return dma_alloc_attrs(dev, size, dma_handle, gfp,
+ (gfp & __GFP_NOWARN) ? DMA_ATTR_NO_WARN : 0);
}
-#ifndef HAVE_ARCH_DMA_SET_MASK
-static inline int dma_set_mask(struct device *dev, u64 mask)
+static inline void dma_free_coherent(struct device *dev, size_t size,
+ void *cpu_addr, dma_addr_t dma_handle)
{
- if (!dev->dma_mask || !dma_supported(dev, mask))
- return -EIO;
- *dev->dma_mask = mask;
- return 0;
+ return dma_free_attrs(dev, size, cpu_addr, dma_handle, 0);
}
-#endif
+
static inline u64 dma_get_mask(struct device *dev)
{
- if (dev && dev->dma_mask && *dev->dma_mask)
+ if (dev->dma_mask && *dev->dma_mask)
return *dev->dma_mask;
return DMA_BIT_MASK(32);
}
-#ifdef CONFIG_ARCH_HAS_DMA_SET_COHERENT_MASK
-int dma_set_coherent_mask(struct device *dev, u64 mask);
-#else
-static inline int dma_set_coherent_mask(struct device *dev, u64 mask)
-{
- if (!dma_supported(dev, mask))
- return -EIO;
- dev->coherent_dma_mask = mask;
- return 0;
-}
-#endif
-
/*
* Set both the DMA mask and the coherent DMA mask to the same thing.
* Note that we don't check the return value from dma_set_coherent_mask()
@@ -612,17 +462,19 @@ static inline int dma_coerce_mask_and_coherent(struct device *dev, u64 mask)
return dma_set_mask_and_coherent(dev, mask);
}
-extern u64 dma_get_required_mask(struct device *dev);
-
-#ifndef arch_setup_dma_ops
-static inline void arch_setup_dma_ops(struct device *dev, u64 dma_base,
- u64 size, const struct iommu_ops *iommu,
- bool coherent) { }
-#endif
-
-#ifndef arch_teardown_dma_ops
-static inline void arch_teardown_dma_ops(struct device *dev) { }
-#endif
+/**
+ * dma_addressing_limited - return if the device is addressing limited
+ * @dev: device to check
+ *
+ * Return %true if the devices DMA mask is too small to address all memory in
+ * the system, else %false. Lack of addressing bits is the prime reason for
+ * bounce buffering, but might not be the only one.
+ */
+static inline bool dma_addressing_limited(struct device *dev)
+{
+ return min_not_zero(dma_get_mask(dev), dev->bus_dma_limit) <
+ dma_get_required_mask(dev);
+}
static inline unsigned int dma_get_max_seg_size(struct device *dev)
{
@@ -631,8 +483,7 @@ static inline unsigned int dma_get_max_seg_size(struct device *dev)
return SZ_64K;
}
-static inline unsigned int dma_set_max_seg_size(struct device *dev,
- unsigned int size)
+static inline int dma_set_max_seg_size(struct device *dev, unsigned int size)
{
if (dev->dma_parms) {
dev->dma_parms->max_segment_size = size;
@@ -645,7 +496,26 @@ static inline unsigned long dma_get_seg_boundary(struct device *dev)
{
if (dev->dma_parms && dev->dma_parms->segment_boundary_mask)
return dev->dma_parms->segment_boundary_mask;
- return DMA_BIT_MASK(32);
+ return ULONG_MAX;
+}
+
+/**
+ * dma_get_seg_boundary_nr_pages - return the segment boundary in "page" units
+ * @dev: device to guery the boundary for
+ * @page_shift: ilog() of the IOMMU page size
+ *
+ * Return the segment boundary in IOMMU page units (which may be different from
+ * the CPU page size) for the passed in device.
+ *
+ * If @dev is NULL a boundary of U32_MAX is assumed, this case is just for
+ * non-DMA API callers.
+ */
+static inline unsigned long dma_get_seg_boundary_nr_pages(struct device *dev,
+ unsigned int page_shift)
+{
+ if (!dev)
+ return (U32_MAX >> page_shift) + 1;
+ return (dma_get_seg_boundary(dev) >> page_shift) + 1;
}
static inline int dma_set_seg_boundary(struct device *dev, unsigned long mask)
@@ -657,22 +527,22 @@ static inline int dma_set_seg_boundary(struct device *dev, unsigned long mask)
return -EIO;
}
-#ifndef dma_max_pfn
-static inline unsigned long dma_max_pfn(struct device *dev)
+static inline unsigned int dma_get_min_align_mask(struct device *dev)
{
- return *dev->dma_mask >> PAGE_SHIFT;
+ if (dev->dma_parms)
+ return dev->dma_parms->min_align_mask;
+ return 0;
}
-#endif
-static inline void *dma_zalloc_coherent(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t flag)
+static inline int dma_set_min_align_mask(struct device *dev,
+ unsigned int min_align_mask)
{
- void *ret = dma_alloc_coherent(dev, size, dma_handle,
- flag | __GFP_ZERO);
- return ret;
+ if (WARN_ON_ONCE(!dev->dma_parms))
+ return -EIO;
+ dev->dma_parms->min_align_mask = min_align_mask;
+ return 0;
}
-#ifdef CONFIG_HAS_DMA
static inline int dma_get_cache_alignment(void)
{
#ifdef ARCH_DMA_MINALIGN
@@ -680,91 +550,24 @@ static inline int dma_get_cache_alignment(void)
#endif
return 1;
}
-#endif
-/* flags for the coherent memory api */
-#define DMA_MEMORY_MAP 0x01
-#define DMA_MEMORY_IO 0x02
-#define DMA_MEMORY_INCLUDES_CHILDREN 0x04
-#define DMA_MEMORY_EXCLUSIVE 0x08
-
-#ifdef CONFIG_HAVE_GENERIC_DMA_COHERENT
-int dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr,
- dma_addr_t device_addr, size_t size, int flags);
-void dma_release_declared_memory(struct device *dev);
-void *dma_mark_declared_memory_occupied(struct device *dev,
- dma_addr_t device_addr, size_t size);
-#else
-static inline int
-dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr,
- dma_addr_t device_addr, size_t size, int flags)
-{
- return 0;
-}
-
-static inline void
-dma_release_declared_memory(struct device *dev)
-{
-}
-
-static inline void *
-dma_mark_declared_memory_occupied(struct device *dev,
- dma_addr_t device_addr, size_t size)
-{
- return ERR_PTR(-EBUSY);
-}
-#endif /* CONFIG_HAVE_GENERIC_DMA_COHERENT */
-
-#ifdef CONFIG_HAS_DMA
-int dma_configure(struct device *dev);
-void dma_deconfigure(struct device *dev);
-#else
-static inline int dma_configure(struct device *dev)
-{
- return 0;
-}
-
-static inline void dma_deconfigure(struct device *dev) {}
-#endif
-
-/*
- * Managed DMA API
- */
-extern void *dmam_alloc_coherent(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t gfp);
-extern void dmam_free_coherent(struct device *dev, size_t size, void *vaddr,
- dma_addr_t dma_handle);
-extern void *dmam_alloc_attrs(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t gfp,
- unsigned long attrs);
-#ifdef CONFIG_HAVE_GENERIC_DMA_COHERENT
-extern int dmam_declare_coherent_memory(struct device *dev,
- phys_addr_t phys_addr,
- dma_addr_t device_addr, size_t size,
- int flags);
-extern void dmam_release_declared_memory(struct device *dev);
-#else /* CONFIG_HAVE_GENERIC_DMA_COHERENT */
-static inline int dmam_declare_coherent_memory(struct device *dev,
- phys_addr_t phys_addr, dma_addr_t device_addr,
- size_t size, gfp_t gfp)
-{
- return 0;
-}
-
-static inline void dmam_release_declared_memory(struct device *dev)
+static inline void *dmam_alloc_coherent(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, gfp_t gfp)
{
+ return dmam_alloc_attrs(dev, size, dma_handle, gfp,
+ (gfp & __GFP_NOWARN) ? DMA_ATTR_NO_WARN : 0);
}
-#endif /* CONFIG_HAVE_GENERIC_DMA_COHERENT */
static inline void *dma_alloc_wc(struct device *dev, size_t size,
dma_addr_t *dma_addr, gfp_t gfp)
{
- return dma_alloc_attrs(dev, size, dma_addr, gfp,
- DMA_ATTR_WRITE_COMBINE);
+ unsigned long attrs = DMA_ATTR_WRITE_COMBINE;
+
+ if (gfp & __GFP_NOWARN)
+ attrs |= DMA_ATTR_NO_WARN;
+
+ return dma_alloc_attrs(dev, size, dma_addr, gfp, attrs);
}
-#ifndef dma_alloc_writecombine
-#define dma_alloc_writecombine dma_alloc_wc
-#endif
static inline void dma_free_wc(struct device *dev, size_t size,
void *cpu_addr, dma_addr_t dma_addr)
@@ -772,9 +575,6 @@ static inline void dma_free_wc(struct device *dev, size_t size,
return dma_free_attrs(dev, size, cpu_addr, dma_addr,
DMA_ATTR_WRITE_COMBINE);
}
-#ifndef dma_free_writecombine
-#define dma_free_writecombine dma_free_wc
-#endif
static inline int dma_mmap_wc(struct device *dev,
struct vm_area_struct *vma,
@@ -784,11 +584,8 @@ static inline int dma_mmap_wc(struct device *dev,
return dma_mmap_attrs(dev, vma, cpu_addr, dma_addr, size,
DMA_ATTR_WRITE_COMBINE);
}
-#ifndef dma_mmap_writecombine
-#define dma_mmap_writecombine dma_mmap_wc
-#endif
-#if defined(CONFIG_NEED_DMA_MAP_STATE) || defined(CONFIG_DMA_API_DEBUG)
+#ifdef CONFIG_NEED_DMA_MAP_STATE
#define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME) dma_addr_t ADDR_NAME
#define DEFINE_DMA_UNMAP_LEN(LEN_NAME) __u32 LEN_NAME
#define dma_unmap_addr(PTR, ADDR_NAME) ((PTR)->ADDR_NAME)
@@ -804,4 +601,4 @@ static inline int dma_mmap_wc(struct device *dev,
#define dma_unmap_len_set(PTR, LEN_NAME, VAL) do { } while (0)
#endif
-#endif
+#endif /* _LINUX_DMA_MAPPING_H */
diff --git a/include/linux/dma-resv.h b/include/linux/dma-resv.h
new file mode 100644
index 000000000000..8d0e34dad446
--- /dev/null
+++ b/include/linux/dma-resv.h
@@ -0,0 +1,487 @@
+/*
+ * Header file for reservations for dma-buf and ttm
+ *
+ * Copyright(C) 2011 Linaro Limited. All rights reserved.
+ * Copyright (C) 2012-2013 Canonical Ltd
+ * Copyright (C) 2012 Texas Instruments
+ *
+ * Authors:
+ * Rob Clark <robdclark@gmail.com>
+ * Maarten Lankhorst <maarten.lankhorst@canonical.com>
+ * Thomas Hellstrom <thellstrom-at-vmware-dot-com>
+ *
+ * Based on bo.c which bears the following copyright notice,
+ * but is dual licensed:
+ *
+ * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#ifndef _LINUX_RESERVATION_H
+#define _LINUX_RESERVATION_H
+
+#include <linux/ww_mutex.h>
+#include <linux/dma-fence.h>
+#include <linux/slab.h>
+#include <linux/seqlock.h>
+#include <linux/rcupdate.h>
+
+extern struct ww_class reservation_ww_class;
+
+struct dma_resv_list;
+
+/**
+ * enum dma_resv_usage - how the fences from a dma_resv obj are used
+ *
+ * This enum describes the different use cases for a dma_resv object and
+ * controls which fences are returned when queried.
+ *
+ * An important fact is that there is the order KERNEL<WRITE<READ<BOOKKEEP and
+ * when the dma_resv object is asked for fences for one use case the fences
+ * for the lower use case are returned as well.
+ *
+ * For example when asking for WRITE fences then the KERNEL fences are returned
+ * as well. Similar when asked for READ fences then both WRITE and KERNEL
+ * fences are returned as well.
+ *
+ * Already used fences can be promoted in the sense that a fence with
+ * DMA_RESV_USAGE_BOOKKEEP could become DMA_RESV_USAGE_READ by adding it again
+ * with this usage. But fences can never be degraded in the sense that a fence
+ * with DMA_RESV_USAGE_WRITE could become DMA_RESV_USAGE_READ.
+ */
+enum dma_resv_usage {
+ /**
+ * @DMA_RESV_USAGE_KERNEL: For in kernel memory management only.
+ *
+ * This should only be used for things like copying or clearing memory
+ * with a DMA hardware engine for the purpose of kernel memory
+ * management.
+ *
+ * Drivers *always* must wait for those fences before accessing the
+ * resource protected by the dma_resv object. The only exception for
+ * that is when the resource is known to be locked down in place by
+ * pinning it previously.
+ */
+ DMA_RESV_USAGE_KERNEL,
+
+ /**
+ * @DMA_RESV_USAGE_WRITE: Implicit write synchronization.
+ *
+ * This should only be used for userspace command submissions which add
+ * an implicit write dependency.
+ */
+ DMA_RESV_USAGE_WRITE,
+
+ /**
+ * @DMA_RESV_USAGE_READ: Implicit read synchronization.
+ *
+ * This should only be used for userspace command submissions which add
+ * an implicit read dependency.
+ */
+ DMA_RESV_USAGE_READ,
+
+ /**
+ * @DMA_RESV_USAGE_BOOKKEEP: No implicit sync.
+ *
+ * This should be used by submissions which don't want to participate in
+ * any implicit synchronization.
+ *
+ * The most common case are preemption fences, page table updates, TLB
+ * flushes as well as explicit synced user submissions.
+ *
+ * Explicit synced user user submissions can be promoted to
+ * DMA_RESV_USAGE_READ or DMA_RESV_USAGE_WRITE as needed using
+ * dma_buf_import_sync_file() when implicit synchronization should
+ * become necessary after initial adding of the fence.
+ */
+ DMA_RESV_USAGE_BOOKKEEP
+};
+
+/**
+ * dma_resv_usage_rw - helper for implicit sync
+ * @write: true if we create a new implicit sync write
+ *
+ * This returns the implicit synchronization usage for write or read accesses,
+ * see enum dma_resv_usage and &dma_buf.resv.
+ */
+static inline enum dma_resv_usage dma_resv_usage_rw(bool write)
+{
+ /* This looks confusing at first sight, but is indeed correct.
+ *
+ * The rational is that new write operations needs to wait for the
+ * existing read and write operations to finish.
+ * But a new read operation only needs to wait for the existing write
+ * operations to finish.
+ */
+ return write ? DMA_RESV_USAGE_READ : DMA_RESV_USAGE_WRITE;
+}
+
+/**
+ * struct dma_resv - a reservation object manages fences for a buffer
+ *
+ * This is a container for dma_fence objects which needs to handle multiple use
+ * cases.
+ *
+ * One use is to synchronize cross-driver access to a struct dma_buf, either for
+ * dynamic buffer management or just to handle implicit synchronization between
+ * different users of the buffer in userspace. See &dma_buf.resv for a more
+ * in-depth discussion.
+ *
+ * The other major use is to manage access and locking within a driver in a
+ * buffer based memory manager. struct ttm_buffer_object is the canonical
+ * example here, since this is where reservation objects originated from. But
+ * use in drivers is spreading and some drivers also manage struct
+ * drm_gem_object with the same scheme.
+ */
+struct dma_resv {
+ /**
+ * @lock:
+ *
+ * Update side lock. Don't use directly, instead use the wrapper
+ * functions like dma_resv_lock() and dma_resv_unlock().
+ *
+ * Drivers which use the reservation object to manage memory dynamically
+ * also use this lock to protect buffer object state like placement,
+ * allocation policies or throughout command submission.
+ */
+ struct ww_mutex lock;
+
+ /**
+ * @fences:
+ *
+ * Array of fences which where added to the dma_resv object
+ *
+ * A new fence is added by calling dma_resv_add_fence(). Since this
+ * often needs to be done past the point of no return in command
+ * submission it cannot fail, and therefore sufficient slots need to be
+ * reserved by calling dma_resv_reserve_fences().
+ */
+ struct dma_resv_list __rcu *fences;
+};
+
+/**
+ * struct dma_resv_iter - current position into the dma_resv fences
+ *
+ * Don't touch this directly in the driver, use the accessor function instead.
+ *
+ * IMPORTANT
+ *
+ * When using the lockless iterators like dma_resv_iter_next_unlocked() or
+ * dma_resv_for_each_fence_unlocked() beware that the iterator can be restarted.
+ * Code which accumulates statistics or similar needs to check for this with
+ * dma_resv_iter_is_restarted().
+ */
+struct dma_resv_iter {
+ /** @obj: The dma_resv object we iterate over */
+ struct dma_resv *obj;
+
+ /** @usage: Return fences with this usage or lower. */
+ enum dma_resv_usage usage;
+
+ /** @fence: the currently handled fence */
+ struct dma_fence *fence;
+
+ /** @fence_usage: the usage of the current fence */
+ enum dma_resv_usage fence_usage;
+
+ /** @index: index into the shared fences */
+ unsigned int index;
+
+ /** @fences: the shared fences; private, *MUST* not dereference */
+ struct dma_resv_list *fences;
+
+ /** @num_fences: number of fences */
+ unsigned int num_fences;
+
+ /** @is_restarted: true if this is the first returned fence */
+ bool is_restarted;
+};
+
+struct dma_fence *dma_resv_iter_first_unlocked(struct dma_resv_iter *cursor);
+struct dma_fence *dma_resv_iter_next_unlocked(struct dma_resv_iter *cursor);
+struct dma_fence *dma_resv_iter_first(struct dma_resv_iter *cursor);
+struct dma_fence *dma_resv_iter_next(struct dma_resv_iter *cursor);
+
+/**
+ * dma_resv_iter_begin - initialize a dma_resv_iter object
+ * @cursor: The dma_resv_iter object to initialize
+ * @obj: The dma_resv object which we want to iterate over
+ * @usage: controls which fences to include, see enum dma_resv_usage.
+ */
+static inline void dma_resv_iter_begin(struct dma_resv_iter *cursor,
+ struct dma_resv *obj,
+ enum dma_resv_usage usage)
+{
+ cursor->obj = obj;
+ cursor->usage = usage;
+ cursor->fence = NULL;
+}
+
+/**
+ * dma_resv_iter_end - cleanup a dma_resv_iter object
+ * @cursor: the dma_resv_iter object which should be cleaned up
+ *
+ * Make sure that the reference to the fence in the cursor is properly
+ * dropped.
+ */
+static inline void dma_resv_iter_end(struct dma_resv_iter *cursor)
+{
+ dma_fence_put(cursor->fence);
+}
+
+/**
+ * dma_resv_iter_usage - Return the usage of the current fence
+ * @cursor: the cursor of the current position
+ *
+ * Returns the usage of the currently processed fence.
+ */
+static inline enum dma_resv_usage
+dma_resv_iter_usage(struct dma_resv_iter *cursor)
+{
+ return cursor->fence_usage;
+}
+
+/**
+ * dma_resv_iter_is_restarted - test if this is the first fence after a restart
+ * @cursor: the cursor with the current position
+ *
+ * Return true if this is the first fence in an iteration after a restart.
+ */
+static inline bool dma_resv_iter_is_restarted(struct dma_resv_iter *cursor)
+{
+ return cursor->is_restarted;
+}
+
+/**
+ * dma_resv_for_each_fence_unlocked - unlocked fence iterator
+ * @cursor: a struct dma_resv_iter pointer
+ * @fence: the current fence
+ *
+ * Iterate over the fences in a struct dma_resv object without holding the
+ * &dma_resv.lock and using RCU instead. The cursor needs to be initialized
+ * with dma_resv_iter_begin() and cleaned up with dma_resv_iter_end(). Inside
+ * the iterator a reference to the dma_fence is held and the RCU lock dropped.
+ *
+ * Beware that the iterator can be restarted when the struct dma_resv for
+ * @cursor is modified. Code which accumulates statistics or similar needs to
+ * check for this with dma_resv_iter_is_restarted(). For this reason prefer the
+ * lock iterator dma_resv_for_each_fence() whenever possible.
+ */
+#define dma_resv_for_each_fence_unlocked(cursor, fence) \
+ for (fence = dma_resv_iter_first_unlocked(cursor); \
+ fence; fence = dma_resv_iter_next_unlocked(cursor))
+
+/**
+ * dma_resv_for_each_fence - fence iterator
+ * @cursor: a struct dma_resv_iter pointer
+ * @obj: a dma_resv object pointer
+ * @usage: controls which fences to return
+ * @fence: the current fence
+ *
+ * Iterate over the fences in a struct dma_resv object while holding the
+ * &dma_resv.lock. @all_fences controls if the shared fences are returned as
+ * well. The cursor initialisation is part of the iterator and the fence stays
+ * valid as long as the lock is held and so no extra reference to the fence is
+ * taken.
+ */
+#define dma_resv_for_each_fence(cursor, obj, usage, fence) \
+ for (dma_resv_iter_begin(cursor, obj, usage), \
+ fence = dma_resv_iter_first(cursor); fence; \
+ fence = dma_resv_iter_next(cursor))
+
+#define dma_resv_held(obj) lockdep_is_held(&(obj)->lock.base)
+#define dma_resv_assert_held(obj) lockdep_assert_held(&(obj)->lock.base)
+
+#ifdef CONFIG_DEBUG_MUTEXES
+void dma_resv_reset_max_fences(struct dma_resv *obj);
+#else
+static inline void dma_resv_reset_max_fences(struct dma_resv *obj) {}
+#endif
+
+/**
+ * dma_resv_lock - lock the reservation object
+ * @obj: the reservation object
+ * @ctx: the locking context
+ *
+ * Locks the reservation object for exclusive access and modification. Note,
+ * that the lock is only against other writers, readers will run concurrently
+ * with a writer under RCU. The seqlock is used to notify readers if they
+ * overlap with a writer.
+ *
+ * As the reservation object may be locked by multiple parties in an
+ * undefined order, a #ww_acquire_ctx is passed to unwind if a cycle
+ * is detected. See ww_mutex_lock() and ww_acquire_init(). A reservation
+ * object may be locked by itself by passing NULL as @ctx.
+ *
+ * When a die situation is indicated by returning -EDEADLK all locks held by
+ * @ctx must be unlocked and then dma_resv_lock_slow() called on @obj.
+ *
+ * Unlocked by calling dma_resv_unlock().
+ *
+ * See also dma_resv_lock_interruptible() for the interruptible variant.
+ */
+static inline int dma_resv_lock(struct dma_resv *obj,
+ struct ww_acquire_ctx *ctx)
+{
+ return ww_mutex_lock(&obj->lock, ctx);
+}
+
+/**
+ * dma_resv_lock_interruptible - lock the reservation object
+ * @obj: the reservation object
+ * @ctx: the locking context
+ *
+ * Locks the reservation object interruptible for exclusive access and
+ * modification. Note, that the lock is only against other writers, readers
+ * will run concurrently with a writer under RCU. The seqlock is used to
+ * notify readers if they overlap with a writer.
+ *
+ * As the reservation object may be locked by multiple parties in an
+ * undefined order, a #ww_acquire_ctx is passed to unwind if a cycle
+ * is detected. See ww_mutex_lock() and ww_acquire_init(). A reservation
+ * object may be locked by itself by passing NULL as @ctx.
+ *
+ * When a die situation is indicated by returning -EDEADLK all locks held by
+ * @ctx must be unlocked and then dma_resv_lock_slow_interruptible() called on
+ * @obj.
+ *
+ * Unlocked by calling dma_resv_unlock().
+ */
+static inline int dma_resv_lock_interruptible(struct dma_resv *obj,
+ struct ww_acquire_ctx *ctx)
+{
+ return ww_mutex_lock_interruptible(&obj->lock, ctx);
+}
+
+/**
+ * dma_resv_lock_slow - slowpath lock the reservation object
+ * @obj: the reservation object
+ * @ctx: the locking context
+ *
+ * Acquires the reservation object after a die case. This function
+ * will sleep until the lock becomes available. See dma_resv_lock() as
+ * well.
+ *
+ * See also dma_resv_lock_slow_interruptible() for the interruptible variant.
+ */
+static inline void dma_resv_lock_slow(struct dma_resv *obj,
+ struct ww_acquire_ctx *ctx)
+{
+ ww_mutex_lock_slow(&obj->lock, ctx);
+}
+
+/**
+ * dma_resv_lock_slow_interruptible - slowpath lock the reservation
+ * object, interruptible
+ * @obj: the reservation object
+ * @ctx: the locking context
+ *
+ * Acquires the reservation object interruptible after a die case. This function
+ * will sleep until the lock becomes available. See
+ * dma_resv_lock_interruptible() as well.
+ */
+static inline int dma_resv_lock_slow_interruptible(struct dma_resv *obj,
+ struct ww_acquire_ctx *ctx)
+{
+ return ww_mutex_lock_slow_interruptible(&obj->lock, ctx);
+}
+
+/**
+ * dma_resv_trylock - trylock the reservation object
+ * @obj: the reservation object
+ *
+ * Tries to lock the reservation object for exclusive access and modification.
+ * Note, that the lock is only against other writers, readers will run
+ * concurrently with a writer under RCU. The seqlock is used to notify readers
+ * if they overlap with a writer.
+ *
+ * Also note that since no context is provided, no deadlock protection is
+ * possible, which is also not needed for a trylock.
+ *
+ * Returns true if the lock was acquired, false otherwise.
+ */
+static inline bool __must_check dma_resv_trylock(struct dma_resv *obj)
+{
+ return ww_mutex_trylock(&obj->lock, NULL);
+}
+
+/**
+ * dma_resv_is_locked - is the reservation object locked
+ * @obj: the reservation object
+ *
+ * Returns true if the mutex is locked, false if unlocked.
+ */
+static inline bool dma_resv_is_locked(struct dma_resv *obj)
+{
+ return ww_mutex_is_locked(&obj->lock);
+}
+
+/**
+ * dma_resv_locking_ctx - returns the context used to lock the object
+ * @obj: the reservation object
+ *
+ * Returns the context used to lock a reservation object or NULL if no context
+ * was used or the object is not locked at all.
+ *
+ * WARNING: This interface is pretty horrible, but TTM needs it because it
+ * doesn't pass the struct ww_acquire_ctx around in some very long callchains.
+ * Everyone else just uses it to check whether they're holding a reservation or
+ * not.
+ */
+static inline struct ww_acquire_ctx *dma_resv_locking_ctx(struct dma_resv *obj)
+{
+ return READ_ONCE(obj->lock.ctx);
+}
+
+/**
+ * dma_resv_unlock - unlock the reservation object
+ * @obj: the reservation object
+ *
+ * Unlocks the reservation object following exclusive access.
+ */
+static inline void dma_resv_unlock(struct dma_resv *obj)
+{
+ dma_resv_reset_max_fences(obj);
+ ww_mutex_unlock(&obj->lock);
+}
+
+void dma_resv_init(struct dma_resv *obj);
+void dma_resv_fini(struct dma_resv *obj);
+int dma_resv_reserve_fences(struct dma_resv *obj, unsigned int num_fences);
+void dma_resv_add_fence(struct dma_resv *obj, struct dma_fence *fence,
+ enum dma_resv_usage usage);
+void dma_resv_replace_fences(struct dma_resv *obj, uint64_t context,
+ struct dma_fence *fence,
+ enum dma_resv_usage usage);
+int dma_resv_get_fences(struct dma_resv *obj, enum dma_resv_usage usage,
+ unsigned int *num_fences, struct dma_fence ***fences);
+int dma_resv_get_singleton(struct dma_resv *obj, enum dma_resv_usage usage,
+ struct dma_fence **fence);
+int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv *src);
+long dma_resv_wait_timeout(struct dma_resv *obj, enum dma_resv_usage usage,
+ bool intr, unsigned long timeout);
+void dma_resv_set_deadline(struct dma_resv *obj, enum dma_resv_usage usage,
+ ktime_t deadline);
+bool dma_resv_test_signaled(struct dma_resv *obj, enum dma_resv_usage usage);
+void dma_resv_describe(struct dma_resv *obj, struct seq_file *seq);
+
+#endif /* _LINUX_RESERVATION_H */
diff --git a/include/linux/dma/amd_xdma.h b/include/linux/dma/amd_xdma.h
new file mode 100644
index 000000000000..ceba69ed7cb4
--- /dev/null
+++ b/include/linux/dma/amd_xdma.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (C) 2022, Advanced Micro Devices, Inc.
+ */
+
+#ifndef _DMAENGINE_AMD_XDMA_H
+#define _DMAENGINE_AMD_XDMA_H
+
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+
+int xdma_enable_user_irq(struct platform_device *pdev, u32 irq_num);
+void xdma_disable_user_irq(struct platform_device *pdev, u32 irq_num);
+int xdma_get_user_irq(struct platform_device *pdev, u32 user_irq_index);
+
+#endif /* _DMAENGINE_AMD_XDMA_H */
diff --git a/include/linux/dma/dw.h b/include/linux/dma/dw.h
index e166cac8e870..9752f3745f76 100644
--- a/include/linux/dma/dw.h
+++ b/include/linux/dma/dw.h
@@ -1,13 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Driver for the Synopsys DesignWare DMA Controller
*
* Copyright (C) 2007 Atmel Corporation
* Copyright (C) 2010-2011 ST Microelectronics
* Copyright (C) 2014 Intel Corporation
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef _DMA_DW_H
#define _DMA_DW_H
@@ -45,9 +42,13 @@ struct dw_dma_chip {
#if IS_ENABLED(CONFIG_DW_DMAC_CORE)
int dw_dma_probe(struct dw_dma_chip *chip);
int dw_dma_remove(struct dw_dma_chip *chip);
+int idma32_dma_probe(struct dw_dma_chip *chip);
+int idma32_dma_remove(struct dw_dma_chip *chip);
#else
static inline int dw_dma_probe(struct dw_dma_chip *chip) { return -ENODEV; }
static inline int dw_dma_remove(struct dw_dma_chip *chip) { return 0; }
+static inline int idma32_dma_probe(struct dw_dma_chip *chip) { return -ENODEV; }
+static inline int idma32_dma_remove(struct dw_dma_chip *chip) { return 0; }
#endif /* CONFIG_DW_DMAC_CORE */
#endif /* _DMA_DW_H */
diff --git a/include/linux/dma/edma.h b/include/linux/dma/edma.h
new file mode 100644
index 000000000000..d2638d9259dc
--- /dev/null
+++ b/include/linux/dma/edma.h
@@ -0,0 +1,119 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2018-2019 Synopsys, Inc. and/or its affiliates.
+ * Synopsys DesignWare eDMA core driver
+ *
+ * Author: Gustavo Pimentel <gustavo.pimentel@synopsys.com>
+ */
+
+#ifndef _DW_EDMA_H
+#define _DW_EDMA_H
+
+#include <linux/device.h>
+#include <linux/dmaengine.h>
+
+#define EDMA_MAX_WR_CH 8
+#define EDMA_MAX_RD_CH 8
+
+struct dw_edma;
+
+struct dw_edma_region {
+ u64 paddr;
+ union {
+ void *mem;
+ void __iomem *io;
+ } vaddr;
+ size_t sz;
+};
+
+/**
+ * struct dw_edma_core_ops - platform-specific eDMA methods
+ * @irq_vector: Get IRQ number of the passed eDMA channel. Note the
+ * method accepts the channel id in the end-to-end
+ * numbering with the eDMA write channels being placed
+ * first in the row.
+ * @pci_address: Get PCIe bus address corresponding to the passed CPU
+ * address. Note there is no need in specifying this
+ * function if the address translation is performed by
+ * the DW PCIe RP/EP controller with the DW eDMA device in
+ * subject and DMA_BYPASS isn't set for all the outbound
+ * iATU windows. That will be done by the controller
+ * automatically.
+ */
+struct dw_edma_core_ops {
+ int (*irq_vector)(struct device *dev, unsigned int nr);
+ u64 (*pci_address)(struct device *dev, phys_addr_t cpu_addr);
+};
+
+enum dw_edma_map_format {
+ EDMA_MF_EDMA_LEGACY = 0x0,
+ EDMA_MF_EDMA_UNROLL = 0x1,
+ EDMA_MF_HDMA_COMPAT = 0x5
+};
+
+/**
+ * enum dw_edma_chip_flags - Flags specific to an eDMA chip
+ * @DW_EDMA_CHIP_LOCAL: eDMA is used locally by an endpoint
+ */
+enum dw_edma_chip_flags {
+ DW_EDMA_CHIP_LOCAL = BIT(0),
+};
+
+/**
+ * struct dw_edma_chip - representation of DesignWare eDMA controller hardware
+ * @dev: struct device of the eDMA controller
+ * @id: instance ID
+ * @nr_irqs: total number of DMA IRQs
+ * @ops DMA channel to IRQ number mapping
+ * @flags dw_edma_chip_flags
+ * @reg_base DMA register base address
+ * @ll_wr_cnt DMA write link list count
+ * @ll_rd_cnt DMA read link list count
+ * @rg_region DMA register region
+ * @ll_region_wr DMA descriptor link list memory for write channel
+ * @ll_region_rd DMA descriptor link list memory for read channel
+ * @dt_region_wr DMA data memory for write channel
+ * @dt_region_rd DMA data memory for read channel
+ * @mf DMA register map format
+ * @dw: struct dw_edma that is filled by dw_edma_probe()
+ */
+struct dw_edma_chip {
+ struct device *dev;
+ int nr_irqs;
+ const struct dw_edma_core_ops *ops;
+ u32 flags;
+
+ void __iomem *reg_base;
+
+ u16 ll_wr_cnt;
+ u16 ll_rd_cnt;
+ /* link list address */
+ struct dw_edma_region ll_region_wr[EDMA_MAX_WR_CH];
+ struct dw_edma_region ll_region_rd[EDMA_MAX_RD_CH];
+
+ /* data region */
+ struct dw_edma_region dt_region_wr[EDMA_MAX_WR_CH];
+ struct dw_edma_region dt_region_rd[EDMA_MAX_RD_CH];
+
+ enum dw_edma_map_format mf;
+
+ struct dw_edma *dw;
+};
+
+/* Export to the platform drivers */
+#if IS_REACHABLE(CONFIG_DW_EDMA)
+int dw_edma_probe(struct dw_edma_chip *chip);
+int dw_edma_remove(struct dw_edma_chip *chip);
+#else
+static inline int dw_edma_probe(struct dw_edma_chip *chip)
+{
+ return -ENODEV;
+}
+
+static inline int dw_edma_remove(struct dw_edma_chip *chip)
+{
+ return 0;
+}
+#endif /* CONFIG_DW_EDMA */
+
+#endif /* _DW_EDMA_H */
diff --git a/include/linux/dma/hsu.h b/include/linux/dma/hsu.h
index 197eec63e501..77ea602c287c 100644
--- a/include/linux/dma/hsu.h
+++ b/include/linux/dma/hsu.h
@@ -1,21 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Driver for the High Speed UART DMA
*
* Copyright (C) 2015 Intel Corporation
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef _DMA_HSU_H
#define _DMA_HSU_H
-#include <linux/device.h>
-#include <linux/interrupt.h>
+#include <linux/errno.h>
+#include <linux/kconfig.h>
+#include <linux/types.h>
#include <linux/platform_data/dma-hsu.h>
+struct device;
struct hsu_dma;
/**
diff --git a/include/linux/dma/idma64.h b/include/linux/dma/idma64.h
new file mode 100644
index 000000000000..621cfae60554
--- /dev/null
+++ b/include/linux/dma/idma64.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Definitions for the Intel integrated DMA 64-bit
+ *
+ * Copyright (C) 2019 Intel Corporation
+ */
+
+#ifndef __LINUX_DMA_IDMA64_H__
+#define __LINUX_DMA_IDMA64_H__
+
+/* Platform driver name */
+#define LPSS_IDMA64_DRIVER_NAME "idma64"
+
+#endif /* __LINUX_DMA_IDMA64_H__ */
diff --git a/include/linux/platform_data/dma-imx.h b/include/linux/dma/imx-dma.h
index 7d964e787299..cfec5f946e23 100644
--- a/include/linux/platform_data/dma-imx.h
+++ b/include/linux/dma/imx-dma.h
@@ -1,13 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright 2004-2009 Freescale Semiconductor, Inc. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
-#ifndef __ASM_ARCH_MXC_DMA_H__
-#define __ASM_ARCH_MXC_DMA_H__
+#ifndef __LINUX_DMA_IMX_H
+#define __LINUX_DMA_IMX_H
#include <linux/scatterlist.h>
#include <linux/device.h>
@@ -42,6 +39,8 @@ enum sdma_peripheral_type {
IMX_DMATYPE_SSI_DUAL, /* SSI Dual FIFO */
IMX_DMATYPE_ASRC_SP, /* Shared ASRC */
IMX_DMATYPE_SAI, /* SAI */
+ IMX_DMATYPE_MULTI_SAI, /* MULTI FIFOs For Audio */
+ IMX_DMATYPE_HDMI, /* HDMI Audio */
};
enum imx_dma_prio {
@@ -68,4 +67,36 @@ static inline int imx_dma_is_general_purpose(struct dma_chan *chan)
!strcmp(chan->device->dev->driver->name, "imx-dma");
}
-#endif
+/**
+ * struct sdma_peripheral_config - SDMA config for audio
+ * @n_fifos_src: Number of FIFOs for recording
+ * @n_fifos_dst: Number of FIFOs for playback
+ * @stride_fifos_src: FIFO address stride for recording, 0 means all FIFOs are
+ * continuous, 1 means 1 word stride between FIFOs. All stride
+ * between FIFOs should be same.
+ * @stride_fifos_dst: FIFO address stride for playback
+ * @words_per_fifo: numbers of words per FIFO fetch/fill, 1 means
+ * one channel per FIFO, 2 means 2 channels per FIFO..
+ * If 'n_fifos_src = 4' and 'words_per_fifo = 2', it
+ * means the first two words(channels) fetch from FIFO0
+ * and then jump to FIFO1 for next two words, and so on
+ * after the last FIFO3 fetched, roll back to FIFO0.
+ * @sw_done: Use software done. Needed for PDM (micfil)
+ *
+ * Some i.MX Audio devices (SAI, micfil) have multiple successive FIFO
+ * registers. For multichannel recording/playback the SAI/micfil have
+ * one FIFO register per channel and the SDMA engine has to read/write
+ * the next channel from/to the next register and wrap around to the
+ * first register when all channels are handled. The number of active
+ * channels must be communicated to the SDMA engine using this struct.
+ */
+struct sdma_peripheral_config {
+ int n_fifos_src;
+ int n_fifos_dst;
+ int stride_fifos_src;
+ int stride_fifos_dst;
+ int words_per_fifo;
+ bool sw_done;
+};
+
+#endif /* __LINUX_DMA_IMX_H */
diff --git a/include/linux/dma/ipu-dma.h b/include/linux/dma/ipu-dma.h
index 18031115c668..6969391580d2 100644
--- a/include/linux/dma/ipu-dma.h
+++ b/include/linux/dma/ipu-dma.h
@@ -1,12 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2008
* Guennadi Liakhovetski, DENX Software Engineering, <lg@denx.de>
*
* Copyright (C) 2005-2007 Freescale Semiconductor, Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef __LINUX_DMA_IPU_DMA_H
diff --git a/include/linux/dma/k3-event-router.h b/include/linux/dma/k3-event-router.h
new file mode 100644
index 000000000000..e3f88b2f87be
--- /dev/null
+++ b/include/linux/dma/k3-event-router.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020 Texas Instruments Incorporated - https://www.ti.com
+ */
+
+#ifndef K3_EVENT_ROUTER_
+#define K3_EVENT_ROUTER_
+
+#include <linux/types.h>
+
+struct k3_event_route_data {
+ void *priv;
+ int (*set_event)(void *priv, u32 event);
+};
+
+#endif /* K3_EVENT_ROUTER_ */
diff --git a/include/linux/dma/k3-psil.h b/include/linux/dma/k3-psil.h
new file mode 100644
index 000000000000..5f106d852f1c
--- /dev/null
+++ b/include/linux/dma/k3-psil.h
@@ -0,0 +1,86 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2019 Texas Instruments Incorporated - https://www.ti.com
+ */
+
+#ifndef K3_PSIL_H_
+#define K3_PSIL_H_
+
+#include <linux/types.h>
+
+#define K3_PSIL_DST_THREAD_ID_OFFSET 0x8000
+
+struct device;
+
+/**
+ * enum udma_tp_level - Channel Throughput Levels
+ * @UDMA_TP_NORMAL: Normal channel
+ * @UDMA_TP_HIGH: High Throughput channel
+ * @UDMA_TP_ULTRAHIGH: Ultra High Throughput channel
+ */
+enum udma_tp_level {
+ UDMA_TP_NORMAL = 0,
+ UDMA_TP_HIGH,
+ UDMA_TP_ULTRAHIGH,
+ UDMA_TP_LAST,
+};
+
+/**
+ * enum psil_endpoint_type - PSI-L Endpoint type
+ * @PSIL_EP_NATIVE: Normal channel
+ * @PSIL_EP_PDMA_XY: XY mode PDMA
+ * @PSIL_EP_PDMA_MCAN: MCAN mode PDMA
+ * @PSIL_EP_PDMA_AASRC: AASRC mode PDMA
+ */
+enum psil_endpoint_type {
+ PSIL_EP_NATIVE = 0,
+ PSIL_EP_PDMA_XY,
+ PSIL_EP_PDMA_MCAN,
+ PSIL_EP_PDMA_AASRC,
+};
+
+/**
+ * struct psil_endpoint_config - PSI-L Endpoint configuration
+ * @ep_type: PSI-L endpoint type
+ * @channel_tpl: Desired throughput level for the channel
+ * @pkt_mode: If set, the channel must be in Packet mode, otherwise in
+ * TR mode
+ * @notdpkt: TDCM must be suppressed on the TX channel
+ * @needs_epib: Endpoint needs EPIB
+ * @pdma_acc32: ACC32 must be enabled on the PDMA side
+ * @pdma_burst: BURST must be enabled on the PDMA side
+ * @psd_size: If set, PSdata is used by the endpoint
+ * @mapped_channel_id: PKTDMA thread to channel mapping for mapped channels.
+ * The thread must be serviced by the specified channel if
+ * mapped_channel_id is >= 0 in case of PKTDMA
+ * @flow_start: PKDMA flow range start of mapped channel. Unmapped
+ * channels use flow_id == chan_id
+ * @flow_num: PKDMA flow count of mapped channel. Unmapped channels
+ * use flow_id == chan_id
+ * @default_flow_id: PKDMA default (r)flow index of mapped channel.
+ * Must be within the flow range of the mapped channel.
+ */
+struct psil_endpoint_config {
+ enum psil_endpoint_type ep_type;
+ enum udma_tp_level channel_tpl;
+
+ unsigned pkt_mode:1;
+ unsigned notdpkt:1;
+ unsigned needs_epib:1;
+ /* PDMA properties, valid for PSIL_EP_PDMA_* */
+ unsigned pdma_acc32:1;
+ unsigned pdma_burst:1;
+
+ u32 psd_size;
+ /* PKDMA mapped channel */
+ s16 mapped_channel_id;
+ /* PKTDMA tflow and rflow ranges for mapped channel */
+ u16 flow_start;
+ u16 flow_num;
+ s16 default_flow_id;
+};
+
+int psil_set_new_ep_config(struct device *dev, const char *name,
+ struct psil_endpoint_config *ep_config);
+
+#endif /* K3_PSIL_H_ */
diff --git a/include/linux/dma/k3-udma-glue.h b/include/linux/dma/k3-udma-glue.h
new file mode 100644
index 000000000000..e443be4d3b4b
--- /dev/null
+++ b/include/linux/dma/k3-udma-glue.h
@@ -0,0 +1,146 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2019 Texas Instruments Incorporated - https://www.ti.com
+ */
+
+#ifndef K3_UDMA_GLUE_H_
+#define K3_UDMA_GLUE_H_
+
+#include <linux/types.h>
+#include <linux/soc/ti/k3-ringacc.h>
+#include <linux/dma/ti-cppi5.h>
+
+struct k3_udma_glue_tx_channel_cfg {
+ struct k3_ring_cfg tx_cfg;
+ struct k3_ring_cfg txcq_cfg;
+
+ bool tx_pause_on_err;
+ bool tx_filt_einfo;
+ bool tx_filt_pswords;
+ bool tx_supr_tdpkt;
+ u32 swdata_size;
+};
+
+struct k3_udma_glue_tx_channel;
+
+struct k3_udma_glue_tx_channel *k3_udma_glue_request_tx_chn(struct device *dev,
+ const char *name, struct k3_udma_glue_tx_channel_cfg *cfg);
+
+void k3_udma_glue_release_tx_chn(struct k3_udma_glue_tx_channel *tx_chn);
+int k3_udma_glue_push_tx_chn(struct k3_udma_glue_tx_channel *tx_chn,
+ struct cppi5_host_desc_t *desc_tx,
+ dma_addr_t desc_dma);
+int k3_udma_glue_pop_tx_chn(struct k3_udma_glue_tx_channel *tx_chn,
+ dma_addr_t *desc_dma);
+int k3_udma_glue_enable_tx_chn(struct k3_udma_glue_tx_channel *tx_chn);
+void k3_udma_glue_disable_tx_chn(struct k3_udma_glue_tx_channel *tx_chn);
+void k3_udma_glue_tdown_tx_chn(struct k3_udma_glue_tx_channel *tx_chn,
+ bool sync);
+void k3_udma_glue_reset_tx_chn(struct k3_udma_glue_tx_channel *tx_chn,
+ void *data, void (*cleanup)(void *data, dma_addr_t desc_dma));
+u32 k3_udma_glue_tx_get_hdesc_size(struct k3_udma_glue_tx_channel *tx_chn);
+u32 k3_udma_glue_tx_get_txcq_id(struct k3_udma_glue_tx_channel *tx_chn);
+int k3_udma_glue_tx_get_irq(struct k3_udma_glue_tx_channel *tx_chn);
+struct device *
+ k3_udma_glue_tx_get_dma_device(struct k3_udma_glue_tx_channel *tx_chn);
+void k3_udma_glue_tx_dma_to_cppi5_addr(struct k3_udma_glue_tx_channel *tx_chn,
+ dma_addr_t *addr);
+void k3_udma_glue_tx_cppi5_to_dma_addr(struct k3_udma_glue_tx_channel *tx_chn,
+ dma_addr_t *addr);
+
+enum {
+ K3_UDMA_GLUE_SRC_TAG_LO_KEEP = 0,
+ K3_UDMA_GLUE_SRC_TAG_LO_USE_FLOW_REG = 1,
+ K3_UDMA_GLUE_SRC_TAG_LO_USE_REMOTE_FLOW_ID = 2,
+ K3_UDMA_GLUE_SRC_TAG_LO_USE_REMOTE_SRC_TAG = 4,
+};
+
+/**
+ * k3_udma_glue_rx_flow_cfg - UDMA RX flow cfg
+ *
+ * @rx_cfg: RX ring configuration
+ * @rxfdq_cfg: RX free Host PD ring configuration
+ * @ring_rxq_id: RX ring id (or -1 for any)
+ * @ring_rxfdq0_id: RX free Host PD ring (FDQ) if (or -1 for any)
+ * @rx_error_handling: Rx Error Handling Mode (0 - drop, 1 - re-try)
+ * @src_tag_lo_sel: Rx Source Tag Low Byte Selector in Host PD
+ */
+struct k3_udma_glue_rx_flow_cfg {
+ struct k3_ring_cfg rx_cfg;
+ struct k3_ring_cfg rxfdq_cfg;
+ int ring_rxq_id;
+ int ring_rxfdq0_id;
+ bool rx_error_handling;
+ int src_tag_lo_sel;
+};
+
+/**
+ * k3_udma_glue_rx_channel_cfg - UDMA RX channel cfg
+ *
+ * @psdata_size: SW Data is present in Host PD of @swdata_size bytes
+ * @flow_id_base: first flow_id used by channel.
+ * if @flow_id_base = -1 - range of GP rflows will be
+ * allocated dynamically.
+ * @flow_id_num: number of RX flows used by channel
+ * @flow_id_use_rxchan_id: use RX channel id as flow id,
+ * used only if @flow_id_num = 1
+ * @remote indication that RX channel is remote - some remote CPU
+ * core owns and control the RX channel. Linux Host only
+ * allowed to attach and configure RX Flow within RX
+ * channel. if set - not RX channel operation will be
+ * performed by K3 NAVSS DMA glue interface.
+ * @def_flow_cfg default RX flow configuration,
+ * used only if @flow_id_num = 1
+ */
+struct k3_udma_glue_rx_channel_cfg {
+ u32 swdata_size;
+ int flow_id_base;
+ int flow_id_num;
+ bool flow_id_use_rxchan_id;
+ bool remote;
+
+ struct k3_udma_glue_rx_flow_cfg *def_flow_cfg;
+};
+
+struct k3_udma_glue_rx_channel;
+
+struct k3_udma_glue_rx_channel *k3_udma_glue_request_rx_chn(
+ struct device *dev,
+ const char *name,
+ struct k3_udma_glue_rx_channel_cfg *cfg);
+
+void k3_udma_glue_release_rx_chn(struct k3_udma_glue_rx_channel *rx_chn);
+int k3_udma_glue_enable_rx_chn(struct k3_udma_glue_rx_channel *rx_chn);
+void k3_udma_glue_disable_rx_chn(struct k3_udma_glue_rx_channel *rx_chn);
+void k3_udma_glue_tdown_rx_chn(struct k3_udma_glue_rx_channel *rx_chn,
+ bool sync);
+int k3_udma_glue_push_rx_chn(struct k3_udma_glue_rx_channel *rx_chn,
+ u32 flow_num, struct cppi5_host_desc_t *desc_tx,
+ dma_addr_t desc_dma);
+int k3_udma_glue_pop_rx_chn(struct k3_udma_glue_rx_channel *rx_chn,
+ u32 flow_num, dma_addr_t *desc_dma);
+int k3_udma_glue_rx_flow_init(struct k3_udma_glue_rx_channel *rx_chn,
+ u32 flow_idx, struct k3_udma_glue_rx_flow_cfg *flow_cfg);
+u32 k3_udma_glue_rx_flow_get_fdq_id(struct k3_udma_glue_rx_channel *rx_chn,
+ u32 flow_idx);
+u32 k3_udma_glue_rx_get_flow_id_base(struct k3_udma_glue_rx_channel *rx_chn);
+int k3_udma_glue_rx_get_irq(struct k3_udma_glue_rx_channel *rx_chn,
+ u32 flow_num);
+void k3_udma_glue_rx_put_irq(struct k3_udma_glue_rx_channel *rx_chn,
+ u32 flow_num);
+void k3_udma_glue_reset_rx_chn(struct k3_udma_glue_rx_channel *rx_chn,
+ u32 flow_num, void *data,
+ void (*cleanup)(void *data, dma_addr_t desc_dma),
+ bool skip_fdq);
+int k3_udma_glue_rx_flow_enable(struct k3_udma_glue_rx_channel *rx_chn,
+ u32 flow_idx);
+int k3_udma_glue_rx_flow_disable(struct k3_udma_glue_rx_channel *rx_chn,
+ u32 flow_idx);
+struct device *
+ k3_udma_glue_rx_get_dma_device(struct k3_udma_glue_rx_channel *rx_chn);
+void k3_udma_glue_rx_dma_to_cppi5_addr(struct k3_udma_glue_rx_channel *rx_chn,
+ dma_addr_t *addr);
+void k3_udma_glue_rx_cppi5_to_dma_addr(struct k3_udma_glue_rx_channel *rx_chn,
+ dma_addr_t *addr);
+
+#endif /* K3_UDMA_GLUE_H_ */
diff --git a/include/linux/dma/mmp-pdma.h b/include/linux/dma/mmp-pdma.h
deleted file mode 100644
index 2dc9b2bc18fc..000000000000
--- a/include/linux/dma/mmp-pdma.h
+++ /dev/null
@@ -1,15 +0,0 @@
-#ifndef _MMP_PDMA_H_
-#define _MMP_PDMA_H_
-
-struct dma_chan;
-
-#ifdef CONFIG_MMP_PDMA
-bool mmp_pdma_filter_fn(struct dma_chan *chan, void *param);
-#else
-static inline bool mmp_pdma_filter_fn(struct dma_chan *chan, void *param)
-{
- return false;
-}
-#endif
-
-#endif /* _MMP_PDMA_H_ */
diff --git a/include/linux/dma/mxs-dma.h b/include/linux/dma/mxs-dma.h
new file mode 100644
index 000000000000..069d9f5a609e
--- /dev/null
+++ b/include/linux/dma/mxs-dma.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _MXS_DMA_H_
+#define _MXS_DMA_H_
+
+#include <linux/dmaengine.h>
+
+#define MXS_DMA_CTRL_WAIT4END BIT(31)
+#define MXS_DMA_CTRL_WAIT4RDY BIT(30)
+
+/*
+ * The mxs dmaengine can do PIO transfers. We pass a pointer to the PIO words
+ * in the second argument to dmaengine_prep_slave_sg when the direction is
+ * set to DMA_TRANS_NONE. To make this clear and to prevent users from doing
+ * the error prone casting we have this wrapper function
+ */
+static inline struct dma_async_tx_descriptor *mxs_dmaengine_prep_pio(
+ struct dma_chan *chan, u32 *pio, unsigned int npio,
+ enum dma_transfer_direction dir, unsigned long flags)
+{
+ return dmaengine_prep_slave_sg(chan, (struct scatterlist *)pio, npio,
+ dir, flags);
+}
+
+#endif /* _MXS_DMA_H_ */
diff --git a/include/linux/dma/pxa-dma.h b/include/linux/dma/pxa-dma.h
index 3edc99294bf6..fceb5df07097 100644
--- a/include/linux/dma/pxa-dma.h
+++ b/include/linux/dma/pxa-dma.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _PXA_DMA_H_
#define _PXA_DMA_H_
@@ -8,20 +9,18 @@ enum pxad_chan_prio {
PXAD_PRIO_LOWEST,
};
+/**
+ * struct pxad_param - dma channel request parameters
+ * @drcmr: requestor line number
+ * @prio: minimal mandatory priority of the channel
+ *
+ * If a requested channel is granted, its priority will be at least @prio,
+ * ie. if PXAD_PRIO_LOW is required, the requested channel will be either
+ * PXAD_PRIO_LOW, PXAD_PRIO_NORMAL or PXAD_PRIO_HIGHEST.
+ */
struct pxad_param {
unsigned int drcmr;
enum pxad_chan_prio prio;
};
-struct dma_chan;
-
-#ifdef CONFIG_PXA_DMA
-bool pxad_filter_fn(struct dma_chan *chan, void *param);
-#else
-static inline bool pxad_filter_fn(struct dma_chan *chan, void *param)
-{
- return false;
-}
-#endif
-
#endif /* _PXA_DMA_H_ */
diff --git a/include/linux/dma/qcom-gpi-dma.h b/include/linux/dma/qcom-gpi-dma.h
new file mode 100644
index 000000000000..6680dd1a43c6
--- /dev/null
+++ b/include/linux/dma/qcom-gpi-dma.h
@@ -0,0 +1,83 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2020, Linaro Limited
+ */
+
+#ifndef QCOM_GPI_DMA_H
+#define QCOM_GPI_DMA_H
+
+/**
+ * enum spi_transfer_cmd - spi transfer commands
+ */
+enum spi_transfer_cmd {
+ SPI_TX = 1,
+ SPI_RX,
+ SPI_DUPLEX,
+};
+
+/**
+ * struct gpi_spi_config - spi config for peripheral
+ *
+ * @loopback_en: spi loopback enable when set
+ * @clock_pol_high: clock polarity
+ * @data_pol_high: data polarity
+ * @pack_en: process tx/rx buffers as packed
+ * @word_len: spi word length
+ * @clk_div: source clock divider
+ * @clk_src: serial clock
+ * @cmd: spi cmd
+ * @fragmentation: keep CS asserted at end of sequence
+ * @cs: chip select toggle
+ * @set_config: set peripheral config
+ * @rx_len: receive length for buffer
+ */
+struct gpi_spi_config {
+ u8 set_config;
+ u8 loopback_en;
+ u8 clock_pol_high;
+ u8 data_pol_high;
+ u8 pack_en;
+ u8 word_len;
+ u8 fragmentation;
+ u8 cs;
+ u32 clk_div;
+ u32 clk_src;
+ enum spi_transfer_cmd cmd;
+ u32 rx_len;
+};
+
+enum i2c_op {
+ I2C_WRITE = 1,
+ I2C_READ,
+};
+
+/**
+ * struct gpi_i2c_config - i2c config for peripheral
+ *
+ * @pack_enable: process tx/rx buffers as packed
+ * @cycle_count: clock cycles to be sent
+ * @high_count: high period of clock
+ * @low_count: low period of clock
+ * @clk_div: source clock divider
+ * @addr: i2c bus address
+ * @stretch: stretch the clock at eot
+ * @set_config: set peripheral config
+ * @rx_len: receive length for buffer
+ * @op: i2c cmd
+ * @muli-msg: is part of multi i2c r-w msgs
+ */
+struct gpi_i2c_config {
+ u8 set_config;
+ u8 pack_enable;
+ u8 cycle_count;
+ u8 high_count;
+ u8 low_count;
+ u8 addr;
+ u8 stretch;
+ u16 clk_div;
+ u32 rx_len;
+ enum i2c_op op;
+ bool multi_msg;
+};
+
+#endif /* QCOM_GPI_DMA_H */
diff --git a/include/linux/dma/qcom_adm.h b/include/linux/dma/qcom_adm.h
new file mode 100644
index 000000000000..af20df674f0c
--- /dev/null
+++ b/include/linux/dma/qcom_adm.h
@@ -0,0 +1,12 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#ifndef __LINUX_DMA_QCOM_ADM_H
+#define __LINUX_DMA_QCOM_ADM_H
+
+#include <linux/types.h>
+
+struct qcom_adm_peripheral_config {
+ u32 crci;
+ u32 mux;
+};
+
+#endif /* __LINUX_DMA_QCOM_ADM_H */
diff --git a/include/linux/dma/qcom_bam_dma.h b/include/linux/dma/qcom_bam_dma.h
new file mode 100644
index 000000000000..68fc0e643b1b
--- /dev/null
+++ b/include/linux/dma/qcom_bam_dma.h
@@ -0,0 +1,71 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _QCOM_BAM_DMA_H
+#define _QCOM_BAM_DMA_H
+
+#include <asm/byteorder.h>
+
+/*
+ * This data type corresponds to the native Command Element
+ * supported by BAM DMA Engine.
+ *
+ * @cmd_and_addr - upper 8 bits command and lower 24 bits register address.
+ * @data - for write command: content to be written into peripheral register.
+ * for read command: dest addr to write peripheral register value.
+ * @mask - register mask.
+ * @reserved - for future usage.
+ *
+ */
+struct bam_cmd_element {
+ __le32 cmd_and_addr;
+ __le32 data;
+ __le32 mask;
+ __le32 reserved;
+};
+
+/*
+ * This enum indicates the command type in a command element
+ */
+enum bam_command_type {
+ BAM_WRITE_COMMAND = 0,
+ BAM_READ_COMMAND,
+};
+
+/*
+ * prep_bam_ce_le32 - Wrapper function to prepare a single BAM command
+ * element with the data already in le32 format.
+ *
+ * @bam_ce: bam command element
+ * @addr: target address
+ * @cmd: BAM command
+ * @data: actual data for write and dest addr for read in le32
+ */
+static inline void
+bam_prep_ce_le32(struct bam_cmd_element *bam_ce, u32 addr,
+ enum bam_command_type cmd, __le32 data)
+{
+ bam_ce->cmd_and_addr =
+ cpu_to_le32((addr & 0xffffff) | ((cmd & 0xff) << 24));
+ bam_ce->data = data;
+ bam_ce->mask = cpu_to_le32(0xffffffff);
+}
+
+/*
+ * bam_prep_ce - Wrapper function to prepare a single BAM command element
+ * with the data.
+ *
+ * @bam_ce: BAM command element
+ * @addr: target address
+ * @cmd: BAM command
+ * @data: actual data for write and dest addr for read
+ */
+static inline void
+bam_prep_ce(struct bam_cmd_element *bam_ce, u32 addr,
+ enum bam_command_type cmd, u32 data)
+{
+ bam_prep_ce_le32(bam_ce, addr, cmd, cpu_to_le32(data));
+}
+#endif
diff --git a/include/linux/dma/sprd-dma.h b/include/linux/dma/sprd-dma.h
new file mode 100644
index 000000000000..d09c6f6f6da5
--- /dev/null
+++ b/include/linux/dma/sprd-dma.h
@@ -0,0 +1,190 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef _SPRD_DMA_H_
+#define _SPRD_DMA_H_
+
+#define SPRD_DMA_REQ_SHIFT 8
+#define SPRD_DMA_TRG_MODE_SHIFT 16
+#define SPRD_DMA_CHN_MODE_SHIFT 24
+#define SPRD_DMA_FLAGS(chn_mode, trg_mode, req_mode, int_type) \
+ ((chn_mode) << SPRD_DMA_CHN_MODE_SHIFT | \
+ (trg_mode) << SPRD_DMA_TRG_MODE_SHIFT | \
+ (req_mode) << SPRD_DMA_REQ_SHIFT | (int_type))
+
+/*
+ * The Spreadtrum DMA controller supports channel 2-stage tansfer, that means
+ * we can request 2 dma channels, one for source channel, and another one for
+ * destination channel. Each channel is independent, and has its own
+ * configurations. Once the source channel's transaction is done, it will
+ * trigger the destination channel's transaction automatically by hardware
+ * signal.
+ *
+ * To support 2-stage tansfer, we must configure the channel mode and trigger
+ * mode as below definition.
+ */
+
+/*
+ * enum sprd_dma_chn_mode: define the DMA channel mode for 2-stage transfer
+ * @SPRD_DMA_CHN_MODE_NONE: No channel mode setting which means channel doesn't
+ * support the 2-stage transfer.
+ * @SPRD_DMA_SRC_CHN0: Channel used as source channel 0.
+ * @SPRD_DMA_SRC_CHN1: Channel used as source channel 1.
+ * @SPRD_DMA_DST_CHN0: Channel used as destination channel 0.
+ * @SPRD_DMA_DST_CHN1: Channel used as destination channel 1.
+ *
+ * Now the DMA controller can supports 2 groups 2-stage transfer.
+ */
+enum sprd_dma_chn_mode {
+ SPRD_DMA_CHN_MODE_NONE,
+ SPRD_DMA_SRC_CHN0,
+ SPRD_DMA_SRC_CHN1,
+ SPRD_DMA_DST_CHN0,
+ SPRD_DMA_DST_CHN1,
+};
+
+/*
+ * enum sprd_dma_trg_mode: define the DMA channel trigger mode for 2-stage
+ * transfer
+ * @SPRD_DMA_NO_TRG: No trigger setting.
+ * @SPRD_DMA_FRAG_DONE_TRG: Trigger the transaction of destination channel
+ * automatically once the source channel's fragment request is done.
+ * @SPRD_DMA_BLOCK_DONE_TRG: Trigger the transaction of destination channel
+ * automatically once the source channel's block request is done.
+ * @SPRD_DMA_TRANS_DONE_TRG: Trigger the transaction of destination channel
+ * automatically once the source channel's transfer request is done.
+ * @SPRD_DMA_LIST_DONE_TRG: Trigger the transaction of destination channel
+ * automatically once the source channel's link-list request is done.
+ */
+enum sprd_dma_trg_mode {
+ SPRD_DMA_NO_TRG,
+ SPRD_DMA_FRAG_DONE_TRG,
+ SPRD_DMA_BLOCK_DONE_TRG,
+ SPRD_DMA_TRANS_DONE_TRG,
+ SPRD_DMA_LIST_DONE_TRG,
+};
+
+/*
+ * enum sprd_dma_req_mode: define the DMA request mode
+ * @SPRD_DMA_FRAG_REQ: fragment request mode
+ * @SPRD_DMA_BLK_REQ: block request mode
+ * @SPRD_DMA_TRANS_REQ: transaction request mode
+ * @SPRD_DMA_LIST_REQ: link-list request mode
+ *
+ * We have 4 types request mode: fragment mode, block mode, transaction mode
+ * and linklist mode. One transaction can contain several blocks, one block can
+ * contain several fragments. Link-list mode means we can save several DMA
+ * configuration into one reserved memory, then DMA can fetch each DMA
+ * configuration automatically to start transfer.
+ */
+enum sprd_dma_req_mode {
+ SPRD_DMA_FRAG_REQ,
+ SPRD_DMA_BLK_REQ,
+ SPRD_DMA_TRANS_REQ,
+ SPRD_DMA_LIST_REQ,
+};
+
+/*
+ * enum sprd_dma_int_type: define the DMA interrupt type
+ * @SPRD_DMA_NO_INT: do not need generate DMA interrupts.
+ * @SPRD_DMA_FRAG_INT: fragment done interrupt when one fragment request
+ * is done.
+ * @SPRD_DMA_BLK_INT: block done interrupt when one block request is done.
+ * @SPRD_DMA_BLK_FRAG_INT: block and fragment interrupt when one fragment
+ * or one block request is done.
+ * @SPRD_DMA_TRANS_INT: tansaction done interrupt when one transaction
+ * request is done.
+ * @SPRD_DMA_TRANS_FRAG_INT: transaction and fragment interrupt when one
+ * transaction request or fragment request is done.
+ * @SPRD_DMA_TRANS_BLK_INT: transaction and block interrupt when one
+ * transaction request or block request is done.
+ * @SPRD_DMA_LIST_INT: link-list done interrupt when one link-list request
+ * is done.
+ * @SPRD_DMA_CFGERR_INT: configure error interrupt when configuration is
+ * incorrect.
+ */
+enum sprd_dma_int_type {
+ SPRD_DMA_NO_INT,
+ SPRD_DMA_FRAG_INT,
+ SPRD_DMA_BLK_INT,
+ SPRD_DMA_BLK_FRAG_INT,
+ SPRD_DMA_TRANS_INT,
+ SPRD_DMA_TRANS_FRAG_INT,
+ SPRD_DMA_TRANS_BLK_INT,
+ SPRD_DMA_LIST_INT,
+ SPRD_DMA_CFGERR_INT,
+};
+
+/*
+ * struct sprd_dma_linklist - DMA link-list address structure
+ * @virt_addr: link-list virtual address to configure link-list node
+ * @phy_addr: link-list physical address to link DMA transfer
+ * @wrap_addr: the wrap address for link-list mode, which means once the
+ * transfer address reaches the wrap address, the next transfer address
+ * will jump to the address specified by wrap_to register.
+ *
+ * The Spreadtrum DMA controller supports the link-list mode, that means slaves
+ * can supply several groups configurations (each configuration represents one
+ * DMA transfer) saved in memory, and DMA controller will link these groups
+ * configurations by writing the physical address of each configuration into the
+ * link-list register.
+ *
+ * Just as shown below, the link-list pointer register will be pointed to the
+ * physical address of 'configuration 1', and the 'configuration 1' link-list
+ * pointer will be pointed to 'configuration 2', and so on.
+ * Once trigger the DMA transfer, the DMA controller will load 'configuration
+ * 1' to its registers automatically, after 'configuration 1' transaction is
+ * done, DMA controller will load 'configuration 2' automatically, until all
+ * DMA transactions are done.
+ *
+ * Note: The last link-list pointer should point to the physical address
+ * of 'configuration 1', which can avoid DMA controller loads incorrect
+ * configuration when the last configuration transaction is done.
+ *
+ * DMA controller linklist memory
+ * ====================== -----------------------
+ *| | | configuration 1 |<---
+ *| DMA controller | ------->| | |
+ *| | | | | |
+ *| | | | | |
+ *| | | | | |
+ *| linklist pointer reg |---- ----| linklist pointer | |
+ * ====================== | ----------------------- |
+ * | |
+ * | ----------------------- |
+ * | | configuration 2 | |
+ * --->| | |
+ * | | |
+ * | | |
+ * | | |
+ * ----| linklist pointer | |
+ * | ----------------------- |
+ * | |
+ * | ----------------------- |
+ * | | configuration 3 | |
+ * --->| | |
+ * | | |
+ * | . | |
+ * . |
+ * . |
+ * . |
+ * | . |
+ * | ----------------------- |
+ * | | configuration n | |
+ * --->| | |
+ * | | |
+ * | | |
+ * | | |
+ * | linklist pointer |----
+ * -----------------------
+ *
+ * To support the link-list mode, DMA slaves should allocate one segment memory
+ * from always-on IRAM or dma coherent memory to store these groups of DMA
+ * configuration, and pass the virtual and physical address to DMA controller.
+ */
+struct sprd_dma_linklist {
+ unsigned long virt_addr;
+ phys_addr_t phy_addr;
+ phys_addr_t wrap_addr;
+};
+
+#endif
diff --git a/include/linux/dma/ti-cppi5.h b/include/linux/dma/ti-cppi5.h
new file mode 100644
index 000000000000..c53c0f6e3b1a
--- /dev/null
+++ b/include/linux/dma/ti-cppi5.h
@@ -0,0 +1,1060 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * CPPI5 descriptors interface
+ *
+ * Copyright (C) 2019 Texas Instruments Incorporated - https://www.ti.com
+ */
+
+#ifndef __TI_CPPI5_H__
+#define __TI_CPPI5_H__
+
+#include <linux/bitops.h>
+#include <linux/printk.h>
+#include <linux/bug.h>
+
+/**
+ * struct cppi5_desc_hdr_t - Descriptor header, present in all types of
+ * descriptors
+ * @pkt_info0: Packet info word 0 (n/a in Buffer desc)
+ * @pkt_info0: Packet info word 1 (n/a in Buffer desc)
+ * @pkt_info0: Packet info word 2 (n/a in Buffer desc)
+ * @src_dst_tag: Packet info word 3 (n/a in Buffer desc)
+ */
+struct cppi5_desc_hdr_t {
+ u32 pkt_info0;
+ u32 pkt_info1;
+ u32 pkt_info2;
+ u32 src_dst_tag;
+} __packed;
+
+/**
+ * struct cppi5_host_desc_t - Host-mode packet and buffer descriptor definition
+ * @hdr: Descriptor header
+ * @next_desc: word 4/5: Linking word
+ * @buf_ptr: word 6/7: Buffer pointer
+ * @buf_info1: word 8: Buffer valid data length
+ * @org_buf_len: word 9: Original buffer length
+ * @org_buf_ptr: word 10/11: Original buffer pointer
+ * @epib[0]: Extended Packet Info Data (optional, 4 words), and/or
+ * Protocol Specific Data (optional, 0-128 bytes in
+ * multiples of 4), and/or
+ * Other Software Data (0-N bytes, optional)
+ */
+struct cppi5_host_desc_t {
+ struct cppi5_desc_hdr_t hdr;
+ u64 next_desc;
+ u64 buf_ptr;
+ u32 buf_info1;
+ u32 org_buf_len;
+ u64 org_buf_ptr;
+ u32 epib[];
+} __packed;
+
+#define CPPI5_DESC_MIN_ALIGN (16U)
+
+#define CPPI5_INFO0_HDESC_EPIB_SIZE (16U)
+#define CPPI5_INFO0_HDESC_PSDATA_MAX_SIZE (128U)
+
+#define CPPI5_INFO0_HDESC_TYPE_SHIFT (30U)
+#define CPPI5_INFO0_HDESC_TYPE_MASK GENMASK(31, 30)
+#define CPPI5_INFO0_DESC_TYPE_VAL_HOST (1U)
+#define CPPI5_INFO0_DESC_TYPE_VAL_MONO (2U)
+#define CPPI5_INFO0_DESC_TYPE_VAL_TR (3U)
+#define CPPI5_INFO0_HDESC_EPIB_PRESENT BIT(29)
+/*
+ * Protocol Specific Words location:
+ * 0 - located in the descriptor,
+ * 1 = located in the SOP Buffer immediately prior to the data.
+ */
+#define CPPI5_INFO0_HDESC_PSINFO_LOCATION BIT(28)
+#define CPPI5_INFO0_HDESC_PSINFO_SIZE_SHIFT (22U)
+#define CPPI5_INFO0_HDESC_PSINFO_SIZE_MASK GENMASK(27, 22)
+#define CPPI5_INFO0_HDESC_PKTLEN_SHIFT (0)
+#define CPPI5_INFO0_HDESC_PKTLEN_MASK GENMASK(21, 0)
+
+#define CPPI5_INFO1_DESC_PKTERROR_SHIFT (28U)
+#define CPPI5_INFO1_DESC_PKTERROR_MASK GENMASK(31, 28)
+#define CPPI5_INFO1_HDESC_PSFLGS_SHIFT (24U)
+#define CPPI5_INFO1_HDESC_PSFLGS_MASK GENMASK(27, 24)
+#define CPPI5_INFO1_DESC_PKTID_SHIFT (14U)
+#define CPPI5_INFO1_DESC_PKTID_MASK GENMASK(23, 14)
+#define CPPI5_INFO1_DESC_FLOWID_SHIFT (0)
+#define CPPI5_INFO1_DESC_FLOWID_MASK GENMASK(13, 0)
+#define CPPI5_INFO1_DESC_FLOWID_DEFAULT CPPI5_INFO1_DESC_FLOWID_MASK
+
+#define CPPI5_INFO2_HDESC_PKTTYPE_SHIFT (27U)
+#define CPPI5_INFO2_HDESC_PKTTYPE_MASK GENMASK(31, 27)
+/* Return Policy: 0 - Entire packet 1 - Each buffer */
+#define CPPI5_INFO2_HDESC_RETPOLICY BIT(18)
+/*
+ * Early Return:
+ * 0 = desc pointers should be returned after all reads have been completed
+ * 1 = desc pointers should be returned immediately upon fetching
+ * the descriptor and beginning to transfer data.
+ */
+#define CPPI5_INFO2_HDESC_EARLYRET BIT(17)
+/*
+ * Return Push Policy:
+ * 0 = Descriptor must be returned to tail of queue
+ * 1 = Descriptor must be returned to head of queue
+ */
+#define CPPI5_INFO2_DESC_RETPUSHPOLICY BIT(16)
+#define CPPI5_INFO2_DESC_RETP_MASK GENMASK(18, 16)
+
+#define CPPI5_INFO2_DESC_RETQ_SHIFT (0)
+#define CPPI5_INFO2_DESC_RETQ_MASK GENMASK(15, 0)
+
+#define CPPI5_INFO3_DESC_SRCTAG_SHIFT (16U)
+#define CPPI5_INFO3_DESC_SRCTAG_MASK GENMASK(31, 16)
+#define CPPI5_INFO3_DESC_DSTTAG_SHIFT (0)
+#define CPPI5_INFO3_DESC_DSTTAG_MASK GENMASK(15, 0)
+
+#define CPPI5_BUFINFO1_HDESC_DATA_LEN_SHIFT (0)
+#define CPPI5_BUFINFO1_HDESC_DATA_LEN_MASK GENMASK(27, 0)
+
+#define CPPI5_OBUFINFO0_HDESC_BUF_LEN_SHIFT (0)
+#define CPPI5_OBUFINFO0_HDESC_BUF_LEN_MASK GENMASK(27, 0)
+
+/**
+ * struct cppi5_desc_epib_t - Host Packet Descriptor Extended Packet Info Block
+ * @timestamp: word 0: application specific timestamp
+ * @sw_info0: word 1: Software Info 0
+ * @sw_info1: word 1: Software Info 1
+ * @sw_info2: word 1: Software Info 2
+ */
+struct cppi5_desc_epib_t {
+ u32 timestamp; /* w0: application specific timestamp */
+ u32 sw_info0; /* w1: Software Info 0 */
+ u32 sw_info1; /* w2: Software Info 1 */
+ u32 sw_info2; /* w3: Software Info 2 */
+};
+
+/**
+ * struct cppi5_monolithic_desc_t - Monolithic-mode packet descriptor
+ * @hdr: Descriptor header
+ * @epib[0]: Extended Packet Info Data (optional, 4 words), and/or
+ * Protocol Specific Data (optional, 0-128 bytes in
+ * multiples of 4), and/or
+ * Other Software Data (0-N bytes, optional)
+ */
+struct cppi5_monolithic_desc_t {
+ struct cppi5_desc_hdr_t hdr;
+ u32 epib[];
+};
+
+#define CPPI5_INFO2_MDESC_DATA_OFFSET_SHIFT (18U)
+#define CPPI5_INFO2_MDESC_DATA_OFFSET_MASK GENMASK(26, 18)
+
+/*
+ * Reload Count:
+ * 0 = Finish the packet and place the descriptor back on the return queue
+ * 1-0x1ff = Vector to the Reload Index and resume processing
+ * 0x1ff indicates perpetual loop, infinite reload until the channel is stopped
+ */
+#define CPPI5_INFO0_TRDESC_RLDCNT_SHIFT (20U)
+#define CPPI5_INFO0_TRDESC_RLDCNT_MASK GENMASK(28, 20)
+#define CPPI5_INFO0_TRDESC_RLDCNT_MAX (0x1ff)
+#define CPPI5_INFO0_TRDESC_RLDCNT_INFINITE CPPI5_INFO0_TRDESC_RLDCNT_MAX
+#define CPPI5_INFO0_TRDESC_RLDIDX_SHIFT (14U)
+#define CPPI5_INFO0_TRDESC_RLDIDX_MASK GENMASK(19, 14)
+#define CPPI5_INFO0_TRDESC_RLDIDX_MAX (0x3f)
+#define CPPI5_INFO0_TRDESC_LASTIDX_SHIFT (0)
+#define CPPI5_INFO0_TRDESC_LASTIDX_MASK GENMASK(13, 0)
+
+#define CPPI5_INFO1_TRDESC_RECSIZE_SHIFT (24U)
+#define CPPI5_INFO1_TRDESC_RECSIZE_MASK GENMASK(26, 24)
+#define CPPI5_INFO1_TRDESC_RECSIZE_VAL_16B (0)
+#define CPPI5_INFO1_TRDESC_RECSIZE_VAL_32B (1U)
+#define CPPI5_INFO1_TRDESC_RECSIZE_VAL_64B (2U)
+#define CPPI5_INFO1_TRDESC_RECSIZE_VAL_128B (3U)
+
+static inline void cppi5_desc_dump(void *desc, u32 size)
+{
+ print_hex_dump(KERN_ERR, "dump udmap_desc: ", DUMP_PREFIX_NONE,
+ 32, 4, desc, size, false);
+}
+
+#define CPPI5_TDCM_MARKER (0x1)
+/**
+ * cppi5_desc_is_tdcm - check if the paddr indicates Teardown Complete Message
+ * @paddr: Physical address of the packet popped from the ring
+ *
+ * Returns true if the address indicates TDCM
+ */
+static inline bool cppi5_desc_is_tdcm(dma_addr_t paddr)
+{
+ return (paddr & CPPI5_TDCM_MARKER) ? true : false;
+}
+
+/**
+ * cppi5_desc_get_type - get descriptor type
+ * @desc_hdr: packet descriptor/TR header
+ *
+ * Returns descriptor type:
+ * CPPI5_INFO0_DESC_TYPE_VAL_HOST
+ * CPPI5_INFO0_DESC_TYPE_VAL_MONO
+ * CPPI5_INFO0_DESC_TYPE_VAL_TR
+ */
+static inline u32 cppi5_desc_get_type(struct cppi5_desc_hdr_t *desc_hdr)
+{
+ return (desc_hdr->pkt_info0 & CPPI5_INFO0_HDESC_TYPE_MASK) >>
+ CPPI5_INFO0_HDESC_TYPE_SHIFT;
+}
+
+/**
+ * cppi5_desc_get_errflags - get Error Flags from Desc
+ * @desc_hdr: packet/TR descriptor header
+ *
+ * Returns Error Flags from Packet/TR Descriptor
+ */
+static inline u32 cppi5_desc_get_errflags(struct cppi5_desc_hdr_t *desc_hdr)
+{
+ return (desc_hdr->pkt_info1 & CPPI5_INFO1_DESC_PKTERROR_MASK) >>
+ CPPI5_INFO1_DESC_PKTERROR_SHIFT;
+}
+
+/**
+ * cppi5_desc_get_pktids - get Packet and Flow ids from Desc
+ * @desc_hdr: packet/TR descriptor header
+ * @pkt_id: Packet ID
+ * @flow_id: Flow ID
+ *
+ * Returns Packet and Flow ids from packet/TR descriptor
+ */
+static inline void cppi5_desc_get_pktids(struct cppi5_desc_hdr_t *desc_hdr,
+ u32 *pkt_id, u32 *flow_id)
+{
+ *pkt_id = (desc_hdr->pkt_info1 & CPPI5_INFO1_DESC_PKTID_MASK) >>
+ CPPI5_INFO1_DESC_PKTID_SHIFT;
+ *flow_id = (desc_hdr->pkt_info1 & CPPI5_INFO1_DESC_FLOWID_MASK) >>
+ CPPI5_INFO1_DESC_FLOWID_SHIFT;
+}
+
+/**
+ * cppi5_desc_set_pktids - set Packet and Flow ids in Desc
+ * @desc_hdr: packet/TR descriptor header
+ * @pkt_id: Packet ID
+ * @flow_id: Flow ID
+ */
+static inline void cppi5_desc_set_pktids(struct cppi5_desc_hdr_t *desc_hdr,
+ u32 pkt_id, u32 flow_id)
+{
+ desc_hdr->pkt_info1 &= ~(CPPI5_INFO1_DESC_PKTID_MASK |
+ CPPI5_INFO1_DESC_FLOWID_MASK);
+ desc_hdr->pkt_info1 |= (pkt_id << CPPI5_INFO1_DESC_PKTID_SHIFT) &
+ CPPI5_INFO1_DESC_PKTID_MASK;
+ desc_hdr->pkt_info1 |= (flow_id << CPPI5_INFO1_DESC_FLOWID_SHIFT) &
+ CPPI5_INFO1_DESC_FLOWID_MASK;
+}
+
+/**
+ * cppi5_desc_set_retpolicy - set Packet Return Policy in Desc
+ * @desc_hdr: packet/TR descriptor header
+ * @flags: fags, supported values
+ * CPPI5_INFO2_HDESC_RETPOLICY
+ * CPPI5_INFO2_HDESC_EARLYRET
+ * CPPI5_INFO2_DESC_RETPUSHPOLICY
+ * @return_ring_id: Packet Return Queue/Ring id, value 0xFFFF reserved
+ */
+static inline void cppi5_desc_set_retpolicy(struct cppi5_desc_hdr_t *desc_hdr,
+ u32 flags, u32 return_ring_id)
+{
+ desc_hdr->pkt_info2 &= ~(CPPI5_INFO2_DESC_RETP_MASK |
+ CPPI5_INFO2_DESC_RETQ_MASK);
+ desc_hdr->pkt_info2 |= flags & CPPI5_INFO2_DESC_RETP_MASK;
+ desc_hdr->pkt_info2 |= return_ring_id & CPPI5_INFO2_DESC_RETQ_MASK;
+}
+
+/**
+ * cppi5_desc_get_tags_ids - get Packet Src/Dst Tags from Desc
+ * @desc_hdr: packet/TR descriptor header
+ * @src_tag_id: Source Tag
+ * @dst_tag_id: Dest Tag
+ *
+ * Returns Packet Src/Dst Tags from packet/TR descriptor
+ */
+static inline void cppi5_desc_get_tags_ids(struct cppi5_desc_hdr_t *desc_hdr,
+ u32 *src_tag_id, u32 *dst_tag_id)
+{
+ if (src_tag_id)
+ *src_tag_id = (desc_hdr->src_dst_tag &
+ CPPI5_INFO3_DESC_SRCTAG_MASK) >>
+ CPPI5_INFO3_DESC_SRCTAG_SHIFT;
+ if (dst_tag_id)
+ *dst_tag_id = desc_hdr->src_dst_tag &
+ CPPI5_INFO3_DESC_DSTTAG_MASK;
+}
+
+/**
+ * cppi5_desc_set_tags_ids - set Packet Src/Dst Tags in HDesc
+ * @desc_hdr: packet/TR descriptor header
+ * @src_tag_id: Source Tag
+ * @dst_tag_id: Dest Tag
+ *
+ * Returns Packet Src/Dst Tags from packet/TR descriptor
+ */
+static inline void cppi5_desc_set_tags_ids(struct cppi5_desc_hdr_t *desc_hdr,
+ u32 src_tag_id, u32 dst_tag_id)
+{
+ desc_hdr->src_dst_tag = (src_tag_id << CPPI5_INFO3_DESC_SRCTAG_SHIFT) &
+ CPPI5_INFO3_DESC_SRCTAG_MASK;
+ desc_hdr->src_dst_tag |= dst_tag_id & CPPI5_INFO3_DESC_DSTTAG_MASK;
+}
+
+/**
+ * cppi5_hdesc_calc_size - Calculate Host Packet Descriptor size
+ * @epib: is EPIB present
+ * @psdata_size: PSDATA size
+ * @sw_data_size: SWDATA size
+ *
+ * Returns required Host Packet Descriptor size
+ * 0 - if PSDATA > CPPI5_INFO0_HDESC_PSDATA_MAX_SIZE
+ */
+static inline u32 cppi5_hdesc_calc_size(bool epib, u32 psdata_size,
+ u32 sw_data_size)
+{
+ u32 desc_size;
+
+ if (psdata_size > CPPI5_INFO0_HDESC_PSDATA_MAX_SIZE)
+ return 0;
+
+ desc_size = sizeof(struct cppi5_host_desc_t) + psdata_size +
+ sw_data_size;
+
+ if (epib)
+ desc_size += CPPI5_INFO0_HDESC_EPIB_SIZE;
+
+ return ALIGN(desc_size, CPPI5_DESC_MIN_ALIGN);
+}
+
+/**
+ * cppi5_hdesc_init - Init Host Packet Descriptor size
+ * @desc: Host packet descriptor
+ * @flags: supported values
+ * CPPI5_INFO0_HDESC_EPIB_PRESENT
+ * CPPI5_INFO0_HDESC_PSINFO_LOCATION
+ * @psdata_size: PSDATA size
+ *
+ * Returns required Host Packet Descriptor size
+ * 0 - if PSDATA > CPPI5_INFO0_HDESC_PSDATA_MAX_SIZE
+ */
+static inline void cppi5_hdesc_init(struct cppi5_host_desc_t *desc, u32 flags,
+ u32 psdata_size)
+{
+ desc->hdr.pkt_info0 = (CPPI5_INFO0_DESC_TYPE_VAL_HOST <<
+ CPPI5_INFO0_HDESC_TYPE_SHIFT) | (flags);
+ desc->hdr.pkt_info0 |= ((psdata_size >> 2) <<
+ CPPI5_INFO0_HDESC_PSINFO_SIZE_SHIFT) &
+ CPPI5_INFO0_HDESC_PSINFO_SIZE_MASK;
+ desc->next_desc = 0;
+}
+
+/**
+ * cppi5_hdesc_update_flags - Replace descriptor flags
+ * @desc: Host packet descriptor
+ * @flags: supported values
+ * CPPI5_INFO0_HDESC_EPIB_PRESENT
+ * CPPI5_INFO0_HDESC_PSINFO_LOCATION
+ */
+static inline void cppi5_hdesc_update_flags(struct cppi5_host_desc_t *desc,
+ u32 flags)
+{
+ desc->hdr.pkt_info0 &= ~(CPPI5_INFO0_HDESC_EPIB_PRESENT |
+ CPPI5_INFO0_HDESC_PSINFO_LOCATION);
+ desc->hdr.pkt_info0 |= flags;
+}
+
+/**
+ * cppi5_hdesc_update_psdata_size - Replace PSdata size
+ * @desc: Host packet descriptor
+ * @psdata_size: PSDATA size
+ */
+static inline void
+cppi5_hdesc_update_psdata_size(struct cppi5_host_desc_t *desc, u32 psdata_size)
+{
+ desc->hdr.pkt_info0 &= ~CPPI5_INFO0_HDESC_PSINFO_SIZE_MASK;
+ desc->hdr.pkt_info0 |= ((psdata_size >> 2) <<
+ CPPI5_INFO0_HDESC_PSINFO_SIZE_SHIFT) &
+ CPPI5_INFO0_HDESC_PSINFO_SIZE_MASK;
+}
+
+/**
+ * cppi5_hdesc_get_psdata_size - get PSdata size in bytes
+ * @desc: Host packet descriptor
+ */
+static inline u32 cppi5_hdesc_get_psdata_size(struct cppi5_host_desc_t *desc)
+{
+ u32 psdata_size = 0;
+
+ if (!(desc->hdr.pkt_info0 & CPPI5_INFO0_HDESC_PSINFO_LOCATION))
+ psdata_size = (desc->hdr.pkt_info0 &
+ CPPI5_INFO0_HDESC_PSINFO_SIZE_MASK) >>
+ CPPI5_INFO0_HDESC_PSINFO_SIZE_SHIFT;
+
+ return (psdata_size << 2);
+}
+
+/**
+ * cppi5_hdesc_get_pktlen - get Packet Length from HDesc
+ * @desc: Host packet descriptor
+ *
+ * Returns Packet Length from Host Packet Descriptor
+ */
+static inline u32 cppi5_hdesc_get_pktlen(struct cppi5_host_desc_t *desc)
+{
+ return (desc->hdr.pkt_info0 & CPPI5_INFO0_HDESC_PKTLEN_MASK);
+}
+
+/**
+ * cppi5_hdesc_set_pktlen - set Packet Length in HDesc
+ * @desc: Host packet descriptor
+ */
+static inline void cppi5_hdesc_set_pktlen(struct cppi5_host_desc_t *desc,
+ u32 pkt_len)
+{
+ desc->hdr.pkt_info0 &= ~CPPI5_INFO0_HDESC_PKTLEN_MASK;
+ desc->hdr.pkt_info0 |= (pkt_len & CPPI5_INFO0_HDESC_PKTLEN_MASK);
+}
+
+/**
+ * cppi5_hdesc_get_psflags - get Protocol Specific Flags from HDesc
+ * @desc: Host packet descriptor
+ *
+ * Returns Protocol Specific Flags from Host Packet Descriptor
+ */
+static inline u32 cppi5_hdesc_get_psflags(struct cppi5_host_desc_t *desc)
+{
+ return (desc->hdr.pkt_info1 & CPPI5_INFO1_HDESC_PSFLGS_MASK) >>
+ CPPI5_INFO1_HDESC_PSFLGS_SHIFT;
+}
+
+/**
+ * cppi5_hdesc_set_psflags - set Protocol Specific Flags in HDesc
+ * @desc: Host packet descriptor
+ */
+static inline void cppi5_hdesc_set_psflags(struct cppi5_host_desc_t *desc,
+ u32 ps_flags)
+{
+ desc->hdr.pkt_info1 &= ~CPPI5_INFO1_HDESC_PSFLGS_MASK;
+ desc->hdr.pkt_info1 |= (ps_flags <<
+ CPPI5_INFO1_HDESC_PSFLGS_SHIFT) &
+ CPPI5_INFO1_HDESC_PSFLGS_MASK;
+}
+
+/**
+ * cppi5_hdesc_get_errflags - get Packet Type from HDesc
+ * @desc: Host packet descriptor
+ */
+static inline u32 cppi5_hdesc_get_pkttype(struct cppi5_host_desc_t *desc)
+{
+ return (desc->hdr.pkt_info2 & CPPI5_INFO2_HDESC_PKTTYPE_MASK) >>
+ CPPI5_INFO2_HDESC_PKTTYPE_SHIFT;
+}
+
+/**
+ * cppi5_hdesc_get_errflags - set Packet Type in HDesc
+ * @desc: Host packet descriptor
+ * @pkt_type: Packet Type
+ */
+static inline void cppi5_hdesc_set_pkttype(struct cppi5_host_desc_t *desc,
+ u32 pkt_type)
+{
+ desc->hdr.pkt_info2 &= ~CPPI5_INFO2_HDESC_PKTTYPE_MASK;
+ desc->hdr.pkt_info2 |=
+ (pkt_type << CPPI5_INFO2_HDESC_PKTTYPE_SHIFT) &
+ CPPI5_INFO2_HDESC_PKTTYPE_MASK;
+}
+
+/**
+ * cppi5_hdesc_attach_buf - attach buffer to HDesc
+ * @desc: Host packet descriptor
+ * @buf: Buffer physical address
+ * @buf_data_len: Buffer length
+ * @obuf: Original Buffer physical address
+ * @obuf_len: Original Buffer length
+ *
+ * Attaches buffer to Host Packet Descriptor
+ */
+static inline void cppi5_hdesc_attach_buf(struct cppi5_host_desc_t *desc,
+ dma_addr_t buf, u32 buf_data_len,
+ dma_addr_t obuf, u32 obuf_len)
+{
+ desc->buf_ptr = buf;
+ desc->buf_info1 = buf_data_len & CPPI5_BUFINFO1_HDESC_DATA_LEN_MASK;
+ desc->org_buf_ptr = obuf;
+ desc->org_buf_len = obuf_len & CPPI5_OBUFINFO0_HDESC_BUF_LEN_MASK;
+}
+
+static inline void cppi5_hdesc_get_obuf(struct cppi5_host_desc_t *desc,
+ dma_addr_t *obuf, u32 *obuf_len)
+{
+ *obuf = desc->org_buf_ptr;
+ *obuf_len = desc->org_buf_len & CPPI5_OBUFINFO0_HDESC_BUF_LEN_MASK;
+}
+
+static inline void cppi5_hdesc_reset_to_original(struct cppi5_host_desc_t *desc)
+{
+ desc->buf_ptr = desc->org_buf_ptr;
+ desc->buf_info1 = desc->org_buf_len;
+}
+
+/**
+ * cppi5_hdesc_link_hbdesc - link Host Buffer Descriptor to HDesc
+ * @desc: Host Packet Descriptor
+ * @buf_desc: Host Buffer Descriptor physical address
+ *
+ * add and link Host Buffer Descriptor to HDesc
+ */
+static inline void cppi5_hdesc_link_hbdesc(struct cppi5_host_desc_t *desc,
+ dma_addr_t hbuf_desc)
+{
+ desc->next_desc = hbuf_desc;
+}
+
+static inline dma_addr_t
+cppi5_hdesc_get_next_hbdesc(struct cppi5_host_desc_t *desc)
+{
+ return (dma_addr_t)desc->next_desc;
+}
+
+static inline void cppi5_hdesc_reset_hbdesc(struct cppi5_host_desc_t *desc)
+{
+ desc->hdr = (struct cppi5_desc_hdr_t) { 0 };
+ desc->next_desc = 0;
+}
+
+/**
+ * cppi5_hdesc_epib_present - check if EPIB present
+ * @desc_hdr: packet descriptor/TR header
+ *
+ * Returns true if EPIB present in the packet
+ */
+static inline bool cppi5_hdesc_epib_present(struct cppi5_desc_hdr_t *desc_hdr)
+{
+ return !!(desc_hdr->pkt_info0 & CPPI5_INFO0_HDESC_EPIB_PRESENT);
+}
+
+/**
+ * cppi5_hdesc_get_psdata - Get pointer on PSDATA
+ * @desc: Host packet descriptor
+ *
+ * Returns pointer on PSDATA in HDesc.
+ * NULL - if ps_data placed at the start of data buffer.
+ */
+static inline void *cppi5_hdesc_get_psdata(struct cppi5_host_desc_t *desc)
+{
+ u32 psdata_size;
+ void *psdata;
+
+ if (desc->hdr.pkt_info0 & CPPI5_INFO0_HDESC_PSINFO_LOCATION)
+ return NULL;
+
+ psdata_size = (desc->hdr.pkt_info0 &
+ CPPI5_INFO0_HDESC_PSINFO_SIZE_MASK) >>
+ CPPI5_INFO0_HDESC_PSINFO_SIZE_SHIFT;
+
+ if (!psdata_size)
+ return NULL;
+
+ psdata = &desc->epib;
+
+ if (cppi5_hdesc_epib_present(&desc->hdr))
+ psdata += CPPI5_INFO0_HDESC_EPIB_SIZE;
+
+ return psdata;
+}
+
+/**
+ * cppi5_hdesc_get_swdata - Get pointer on swdata
+ * @desc: Host packet descriptor
+ *
+ * Returns pointer on SWDATA in HDesc.
+ * NOTE. It's caller responsibility to be sure hdesc actually has swdata.
+ */
+static inline void *cppi5_hdesc_get_swdata(struct cppi5_host_desc_t *desc)
+{
+ u32 psdata_size = 0;
+ void *swdata;
+
+ if (!(desc->hdr.pkt_info0 & CPPI5_INFO0_HDESC_PSINFO_LOCATION))
+ psdata_size = (desc->hdr.pkt_info0 &
+ CPPI5_INFO0_HDESC_PSINFO_SIZE_MASK) >>
+ CPPI5_INFO0_HDESC_PSINFO_SIZE_SHIFT;
+
+ swdata = &desc->epib;
+
+ if (cppi5_hdesc_epib_present(&desc->hdr))
+ swdata += CPPI5_INFO0_HDESC_EPIB_SIZE;
+
+ swdata += (psdata_size << 2);
+
+ return swdata;
+}
+
+/* ================================== TR ================================== */
+
+#define CPPI5_TR_TYPE_SHIFT (0U)
+#define CPPI5_TR_TYPE_MASK GENMASK(3, 0)
+#define CPPI5_TR_STATIC BIT(4)
+#define CPPI5_TR_WAIT BIT(5)
+#define CPPI5_TR_EVENT_SIZE_SHIFT (6U)
+#define CPPI5_TR_EVENT_SIZE_MASK GENMASK(7, 6)
+#define CPPI5_TR_TRIGGER0_SHIFT (8U)
+#define CPPI5_TR_TRIGGER0_MASK GENMASK(9, 8)
+#define CPPI5_TR_TRIGGER0_TYPE_SHIFT (10U)
+#define CPPI5_TR_TRIGGER0_TYPE_MASK GENMASK(11, 10)
+#define CPPI5_TR_TRIGGER1_SHIFT (12U)
+#define CPPI5_TR_TRIGGER1_MASK GENMASK(13, 12)
+#define CPPI5_TR_TRIGGER1_TYPE_SHIFT (14U)
+#define CPPI5_TR_TRIGGER1_TYPE_MASK GENMASK(15, 14)
+#define CPPI5_TR_CMD_ID_SHIFT (16U)
+#define CPPI5_TR_CMD_ID_MASK GENMASK(23, 16)
+#define CPPI5_TR_CSF_FLAGS_SHIFT (24U)
+#define CPPI5_TR_CSF_FLAGS_MASK GENMASK(31, 24)
+#define CPPI5_TR_CSF_SA_INDIRECT BIT(0)
+#define CPPI5_TR_CSF_DA_INDIRECT BIT(1)
+#define CPPI5_TR_CSF_SUPR_EVT BIT(2)
+#define CPPI5_TR_CSF_EOL_ADV_SHIFT (4U)
+#define CPPI5_TR_CSF_EOL_ADV_MASK GENMASK(6, 4)
+#define CPPI5_TR_CSF_EOL_ICNT0 BIT(4)
+#define CPPI5_TR_CSF_EOP BIT(7)
+
+/**
+ * enum cppi5_tr_types - TR types
+ * @CPPI5_TR_TYPE0: One dimensional data move
+ * @CPPI5_TR_TYPE1: Two dimensional data move
+ * @CPPI5_TR_TYPE2: Three dimensional data move
+ * @CPPI5_TR_TYPE3: Four dimensional data move
+ * @CPPI5_TR_TYPE4: Four dimensional data move with data formatting
+ * @CPPI5_TR_TYPE5: Four dimensional Cache Warm
+ * @CPPI5_TR_TYPE8: Four Dimensional Block Move
+ * @CPPI5_TR_TYPE9: Four Dimensional Block Move with Repacking
+ * @CPPI5_TR_TYPE10: Two Dimensional Block Move
+ * @CPPI5_TR_TYPE11: Two Dimensional Block Move with Repacking
+ * @CPPI5_TR_TYPE15: Four Dimensional Block Move with Repacking and
+ * Indirection
+ */
+enum cppi5_tr_types {
+ CPPI5_TR_TYPE0 = 0,
+ CPPI5_TR_TYPE1,
+ CPPI5_TR_TYPE2,
+ CPPI5_TR_TYPE3,
+ CPPI5_TR_TYPE4,
+ CPPI5_TR_TYPE5,
+ /* type6-7: Reserved */
+ CPPI5_TR_TYPE8 = 8,
+ CPPI5_TR_TYPE9,
+ CPPI5_TR_TYPE10,
+ CPPI5_TR_TYPE11,
+ /* type12-14: Reserved */
+ CPPI5_TR_TYPE15 = 15,
+ CPPI5_TR_TYPE_MAX
+};
+
+/**
+ * enum cppi5_tr_event_size - TR Flags EVENT_SIZE field specifies when an event
+ * is generated for each TR.
+ * @CPPI5_TR_EVENT_SIZE_COMPLETION: When TR is complete and all status for
+ * the TR has been received
+ * @CPPI5_TR_EVENT_SIZE_ICNT1_DEC: Type 0: when the last data transaction
+ * is sent for the TR
+ * Type 1-11: when ICNT1 is decremented
+ * @CPPI5_TR_EVENT_SIZE_ICNT2_DEC: Type 0-1,10-11: when the last data
+ * transaction is sent for the TR
+ * All other types: when ICNT2 is
+ * decremented
+ * @CPPI5_TR_EVENT_SIZE_ICNT3_DEC: Type 0-2,10-11: when the last data
+ * transaction is sent for the TR
+ * All other types: when ICNT3 is
+ * decremented
+ */
+enum cppi5_tr_event_size {
+ CPPI5_TR_EVENT_SIZE_COMPLETION,
+ CPPI5_TR_EVENT_SIZE_ICNT1_DEC,
+ CPPI5_TR_EVENT_SIZE_ICNT2_DEC,
+ CPPI5_TR_EVENT_SIZE_ICNT3_DEC,
+ CPPI5_TR_EVENT_SIZE_MAX
+};
+
+/**
+ * enum cppi5_tr_trigger - TR Flags TRIGGERx field specifies the type of trigger
+ * used to enable the TR to transfer data as specified
+ * by TRIGGERx_TYPE field.
+ * @CPPI5_TR_TRIGGER_NONE: No trigger
+ * @CPPI5_TR_TRIGGER_GLOBAL0: Global trigger 0
+ * @CPPI5_TR_TRIGGER_GLOBAL1: Global trigger 1
+ * @CPPI5_TR_TRIGGER_LOCAL_EVENT: Local Event
+ */
+enum cppi5_tr_trigger {
+ CPPI5_TR_TRIGGER_NONE,
+ CPPI5_TR_TRIGGER_GLOBAL0,
+ CPPI5_TR_TRIGGER_GLOBAL1,
+ CPPI5_TR_TRIGGER_LOCAL_EVENT,
+ CPPI5_TR_TRIGGER_MAX
+};
+
+/**
+ * enum cppi5_tr_trigger_type - TR Flags TRIGGERx_TYPE field specifies the type
+ * of data transfer that will be enabled by
+ * receiving a trigger as specified by TRIGGERx.
+ * @CPPI5_TR_TRIGGER_TYPE_ICNT1_DEC: The second inner most loop (ICNT1) will
+ * be decremented by 1
+ * @CPPI5_TR_TRIGGER_TYPE_ICNT2_DEC: The third inner most loop (ICNT2) will
+ * be decremented by 1
+ * @CPPI5_TR_TRIGGER_TYPE_ICNT3_DEC: The outer most loop (ICNT3) will be
+ * decremented by 1
+ * @CPPI5_TR_TRIGGER_TYPE_ALL: The entire TR will be allowed to
+ * complete
+ */
+enum cppi5_tr_trigger_type {
+ CPPI5_TR_TRIGGER_TYPE_ICNT1_DEC,
+ CPPI5_TR_TRIGGER_TYPE_ICNT2_DEC,
+ CPPI5_TR_TRIGGER_TYPE_ICNT3_DEC,
+ CPPI5_TR_TRIGGER_TYPE_ALL,
+ CPPI5_TR_TRIGGER_TYPE_MAX
+};
+
+typedef u32 cppi5_tr_flags_t;
+
+/**
+ * struct cppi5_tr_type0_t - Type 0 (One dimensional data move) TR (16 byte)
+ * @flags: TR flags (type, triggers, event, configuration)
+ * @icnt0: Total loop iteration count for level 0 (innermost)
+ * @_reserved: Not used
+ * @addr: Starting address for the source data or destination data
+ */
+struct cppi5_tr_type0_t {
+ cppi5_tr_flags_t flags;
+ u16 icnt0;
+ u16 _reserved;
+ u64 addr;
+} __aligned(16) __packed;
+
+/**
+ * struct cppi5_tr_type1_t - Type 1 (Two dimensional data move) TR (32 byte)
+ * @flags: TR flags (type, triggers, event, configuration)
+ * @icnt0: Total loop iteration count for level 0 (innermost)
+ * @icnt1: Total loop iteration count for level 1
+ * @addr: Starting address for the source data or destination data
+ * @dim1: Signed dimension for loop level 1
+ */
+struct cppi5_tr_type1_t {
+ cppi5_tr_flags_t flags;
+ u16 icnt0;
+ u16 icnt1;
+ u64 addr;
+ s32 dim1;
+} __aligned(32) __packed;
+
+/**
+ * struct cppi5_tr_type2_t - Type 2 (Three dimensional data move) TR (32 byte)
+ * @flags: TR flags (type, triggers, event, configuration)
+ * @icnt0: Total loop iteration count for level 0 (innermost)
+ * @icnt1: Total loop iteration count for level 1
+ * @addr: Starting address for the source data or destination data
+ * @dim1: Signed dimension for loop level 1
+ * @icnt2: Total loop iteration count for level 2
+ * @_reserved: Not used
+ * @dim2: Signed dimension for loop level 2
+ */
+struct cppi5_tr_type2_t {
+ cppi5_tr_flags_t flags;
+ u16 icnt0;
+ u16 icnt1;
+ u64 addr;
+ s32 dim1;
+ u16 icnt2;
+ u16 _reserved;
+ s32 dim2;
+} __aligned(32) __packed;
+
+/**
+ * struct cppi5_tr_type3_t - Type 3 (Four dimensional data move) TR (32 byte)
+ * @flags: TR flags (type, triggers, event, configuration)
+ * @icnt0: Total loop iteration count for level 0 (innermost)
+ * @icnt1: Total loop iteration count for level 1
+ * @addr: Starting address for the source data or destination data
+ * @dim1: Signed dimension for loop level 1
+ * @icnt2: Total loop iteration count for level 2
+ * @icnt3: Total loop iteration count for level 3 (outermost)
+ * @dim2: Signed dimension for loop level 2
+ * @dim3: Signed dimension for loop level 3
+ */
+struct cppi5_tr_type3_t {
+ cppi5_tr_flags_t flags;
+ u16 icnt0;
+ u16 icnt1;
+ u64 addr;
+ s32 dim1;
+ u16 icnt2;
+ u16 icnt3;
+ s32 dim2;
+ s32 dim3;
+} __aligned(32) __packed;
+
+/**
+ * struct cppi5_tr_type15_t - Type 15 (Four Dimensional Block Copy with
+ * Repacking and Indirection Support) TR (64 byte)
+ * @flags: TR flags (type, triggers, event, configuration)
+ * @icnt0: Total loop iteration count for level 0 (innermost) for
+ * source
+ * @icnt1: Total loop iteration count for level 1 for source
+ * @addr: Starting address for the source data
+ * @dim1: Signed dimension for loop level 1 for source
+ * @icnt2: Total loop iteration count for level 2 for source
+ * @icnt3: Total loop iteration count for level 3 (outermost) for
+ * source
+ * @dim2: Signed dimension for loop level 2 for source
+ * @dim3: Signed dimension for loop level 3 for source
+ * @_reserved: Not used
+ * @ddim1: Signed dimension for loop level 1 for destination
+ * @daddr: Starting address for the destination data
+ * @ddim2: Signed dimension for loop level 2 for destination
+ * @ddim3: Signed dimension for loop level 3 for destination
+ * @dicnt0: Total loop iteration count for level 0 (innermost) for
+ * destination
+ * @dicnt1: Total loop iteration count for level 1 for destination
+ * @dicnt2: Total loop iteration count for level 2 for destination
+ * @sicnt3: Total loop iteration count for level 3 (outermost) for
+ * destination
+ */
+struct cppi5_tr_type15_t {
+ cppi5_tr_flags_t flags;
+ u16 icnt0;
+ u16 icnt1;
+ u64 addr;
+ s32 dim1;
+ u16 icnt2;
+ u16 icnt3;
+ s32 dim2;
+ s32 dim3;
+ u32 _reserved;
+ s32 ddim1;
+ u64 daddr;
+ s32 ddim2;
+ s32 ddim3;
+ u16 dicnt0;
+ u16 dicnt1;
+ u16 dicnt2;
+ u16 dicnt3;
+} __aligned(64) __packed;
+
+/**
+ * struct cppi5_tr_resp_t - TR response record
+ * @status: Status type and info
+ * @_reserved: Not used
+ * @cmd_id: Command ID for the TR for TR identification
+ * @flags: Configuration Specific Flags
+ */
+struct cppi5_tr_resp_t {
+ u8 status;
+ u8 _reserved;
+ u8 cmd_id;
+ u8 flags;
+} __packed;
+
+#define CPPI5_TR_RESPONSE_STATUS_TYPE_SHIFT (0U)
+#define CPPI5_TR_RESPONSE_STATUS_TYPE_MASK GENMASK(3, 0)
+#define CPPI5_TR_RESPONSE_STATUS_INFO_SHIFT (4U)
+#define CPPI5_TR_RESPONSE_STATUS_INFO_MASK GENMASK(7, 4)
+#define CPPI5_TR_RESPONSE_CMDID_SHIFT (16U)
+#define CPPI5_TR_RESPONSE_CMDID_MASK GENMASK(23, 16)
+#define CPPI5_TR_RESPONSE_CFG_SPECIFIC_SHIFT (24U)
+#define CPPI5_TR_RESPONSE_CFG_SPECIFIC_MASK GENMASK(31, 24)
+
+/**
+ * enum cppi5_tr_resp_status_type - TR Response Status Type field is used to
+ * determine what type of status is being
+ * returned.
+ * @CPPI5_TR_RESPONSE_STATUS_NONE: No error, completion: completed
+ * @CPPI5_TR_RESPONSE_STATUS_TRANSFER_ERR: Transfer Error, completion: none
+ * or partially completed
+ * @CPPI5_TR_RESPONSE_STATUS_ABORTED_ERR: Aborted Error, completion: none
+ * or partially completed
+ * @CPPI5_TR_RESPONSE_STATUS_SUBMISSION_ERR: Submission Error, completion:
+ * none
+ * @CPPI5_TR_RESPONSE_STATUS_UNSUPPORTED_ERR: Unsupported Error, completion:
+ * none
+ * @CPPI5_TR_RESPONSE_STATUS_TRANSFER_EXCEPTION: Transfer Exception, completion:
+ * partially completed
+ * @CPPI5_TR_RESPONSE_STATUS__TEARDOWN_FLUSH: Teardown Flush, completion: none
+ */
+enum cppi5_tr_resp_status_type {
+ CPPI5_TR_RESPONSE_STATUS_NONE,
+ CPPI5_TR_RESPONSE_STATUS_TRANSFER_ERR,
+ CPPI5_TR_RESPONSE_STATUS_ABORTED_ERR,
+ CPPI5_TR_RESPONSE_STATUS_SUBMISSION_ERR,
+ CPPI5_TR_RESPONSE_STATUS_UNSUPPORTED_ERR,
+ CPPI5_TR_RESPONSE_STATUS_TRANSFER_EXCEPTION,
+ CPPI5_TR_RESPONSE_STATUS__TEARDOWN_FLUSH,
+ CPPI5_TR_RESPONSE_STATUS_MAX
+};
+
+/**
+ * enum cppi5_tr_resp_status_submission - TR Response Status field values which
+ * corresponds Submission Error
+ * @CPPI5_TR_RESPONSE_STATUS_SUBMISSION_ICNT0: ICNT0 was 0
+ * @CPPI5_TR_RESPONSE_STATUS_SUBMISSION_FIFO_FULL: Channel FIFO was full when TR
+ * received
+ * @CPPI5_TR_RESPONSE_STATUS_SUBMISSION_OWN: Channel is not owned by the
+ * submitter
+ */
+enum cppi5_tr_resp_status_submission {
+ CPPI5_TR_RESPONSE_STATUS_SUBMISSION_ICNT0,
+ CPPI5_TR_RESPONSE_STATUS_SUBMISSION_FIFO_FULL,
+ CPPI5_TR_RESPONSE_STATUS_SUBMISSION_OWN,
+ CPPI5_TR_RESPONSE_STATUS_SUBMISSION_MAX
+};
+
+/**
+ * enum cppi5_tr_resp_status_unsupported - TR Response Status field values which
+ * corresponds Unsupported Error
+ * @CPPI5_TR_RESPONSE_STATUS_UNSUPPORTED_TR_TYPE: TR Type not supported
+ * @CPPI5_TR_RESPONSE_STATUS_UNSUPPORTED_STATIC: STATIC not supported
+ * @CPPI5_TR_RESPONSE_STATUS_UNSUPPORTED_EOL: EOL not supported
+ * @CPPI5_TR_RESPONSE_STATUS_UNSUPPORTED_CFG_SPECIFIC: CONFIGURATION SPECIFIC
+ * not supported
+ * @CPPI5_TR_RESPONSE_STATUS_UNSUPPORTED_AMODE: AMODE not supported
+ * @CPPI5_TR_RESPONSE_STATUS_UNSUPPORTED_ELTYPE: ELTYPE not supported
+ * @CPPI5_TR_RESPONSE_STATUS_UNSUPPORTED_DFMT: DFMT not supported
+ * @CPPI5_TR_RESPONSE_STATUS_UNSUPPORTED_SECTR: SECTR not supported
+ * @CPPI5_TR_RESPONSE_STATUS_UNSUPPORTED_AMODE_SPECIFIC: AMODE SPECIFIC field
+ * not supported
+ */
+enum cppi5_tr_resp_status_unsupported {
+ CPPI5_TR_RESPONSE_STATUS_UNSUPPORTED_TR_TYPE,
+ CPPI5_TR_RESPONSE_STATUS_UNSUPPORTED_STATIC,
+ CPPI5_TR_RESPONSE_STATUS_UNSUPPORTED_EOL,
+ CPPI5_TR_RESPONSE_STATUS_UNSUPPORTED_CFG_SPECIFIC,
+ CPPI5_TR_RESPONSE_STATUS_UNSUPPORTED_AMODE,
+ CPPI5_TR_RESPONSE_STATUS_UNSUPPORTED_ELTYPE,
+ CPPI5_TR_RESPONSE_STATUS_UNSUPPORTED_DFMT,
+ CPPI5_TR_RESPONSE_STATUS_UNSUPPORTED_SECTR,
+ CPPI5_TR_RESPONSE_STATUS_UNSUPPORTED_AMODE_SPECIFIC,
+ CPPI5_TR_RESPONSE_STATUS_UNSUPPORTED_MAX
+};
+
+/**
+ * cppi5_trdesc_calc_size - Calculate TR Descriptor size
+ * @tr_count: number of TR records
+ * @tr_size: Nominal size of TR record (max) [16, 32, 64, 128]
+ *
+ * Returns required TR Descriptor size
+ */
+static inline size_t cppi5_trdesc_calc_size(u32 tr_count, u32 tr_size)
+{
+ /*
+ * The Size of a TR descriptor is:
+ * 1 x tr_size : the first 16 bytes is used by the packet info block +
+ * tr_count x tr_size : Transfer Request Records +
+ * tr_count x sizeof(struct cppi5_tr_resp_t) : Transfer Response Records
+ */
+ return tr_size * (tr_count + 1) +
+ sizeof(struct cppi5_tr_resp_t) * tr_count;
+}
+
+/**
+ * cppi5_trdesc_init - Init TR Descriptor
+ * @desc: TR Descriptor
+ * @tr_count: number of TR records
+ * @tr_size: Nominal size of TR record (max) [16, 32, 64, 128]
+ * @reload_idx: Absolute index to jump to on the 2nd and following passes
+ * through the TR packet.
+ * @reload_count: Number of times to jump from last entry to reload_idx. 0x1ff
+ * indicates infinite looping.
+ *
+ * Init TR Descriptor
+ */
+static inline void cppi5_trdesc_init(struct cppi5_desc_hdr_t *desc_hdr,
+ u32 tr_count, u32 tr_size, u32 reload_idx,
+ u32 reload_count)
+{
+ desc_hdr->pkt_info0 = CPPI5_INFO0_DESC_TYPE_VAL_TR <<
+ CPPI5_INFO0_HDESC_TYPE_SHIFT;
+ desc_hdr->pkt_info0 |=
+ (reload_count << CPPI5_INFO0_TRDESC_RLDCNT_SHIFT) &
+ CPPI5_INFO0_TRDESC_RLDCNT_MASK;
+ desc_hdr->pkt_info0 |=
+ (reload_idx << CPPI5_INFO0_TRDESC_RLDIDX_SHIFT) &
+ CPPI5_INFO0_TRDESC_RLDIDX_MASK;
+ desc_hdr->pkt_info0 |= (tr_count - 1) & CPPI5_INFO0_TRDESC_LASTIDX_MASK;
+
+ desc_hdr->pkt_info1 |= ((ffs(tr_size >> 4) - 1) <<
+ CPPI5_INFO1_TRDESC_RECSIZE_SHIFT) &
+ CPPI5_INFO1_TRDESC_RECSIZE_MASK;
+}
+
+/**
+ * cppi5_tr_init - Init TR record
+ * @flags: Pointer to the TR's flags
+ * @type: TR type
+ * @static_tr: TR is static
+ * @wait: Wait for TR completion before allow the next TR to start
+ * @event_size: output event generation cfg
+ * @cmd_id: TR identifier (application specifics)
+ *
+ * Init TR record
+ */
+static inline void cppi5_tr_init(cppi5_tr_flags_t *flags,
+ enum cppi5_tr_types type, bool static_tr,
+ bool wait, enum cppi5_tr_event_size event_size,
+ u32 cmd_id)
+{
+ *flags = type;
+ *flags |= (event_size << CPPI5_TR_EVENT_SIZE_SHIFT) &
+ CPPI5_TR_EVENT_SIZE_MASK;
+
+ *flags |= (cmd_id << CPPI5_TR_CMD_ID_SHIFT) &
+ CPPI5_TR_CMD_ID_MASK;
+
+ if (static_tr && (type == CPPI5_TR_TYPE8 || type == CPPI5_TR_TYPE9))
+ *flags |= CPPI5_TR_STATIC;
+
+ if (wait)
+ *flags |= CPPI5_TR_WAIT;
+}
+
+/**
+ * cppi5_tr_set_trigger - Configure trigger0/1 and trigger0/1_type
+ * @flags: Pointer to the TR's flags
+ * @trigger0: trigger0 selection
+ * @trigger0_type: type of data transfer that will be enabled by trigger0
+ * @trigger1: trigger1 selection
+ * @trigger1_type: type of data transfer that will be enabled by trigger1
+ *
+ * Configure the triggers for the TR
+ */
+static inline void cppi5_tr_set_trigger(cppi5_tr_flags_t *flags,
+ enum cppi5_tr_trigger trigger0,
+ enum cppi5_tr_trigger_type trigger0_type,
+ enum cppi5_tr_trigger trigger1,
+ enum cppi5_tr_trigger_type trigger1_type)
+{
+ *flags &= ~(CPPI5_TR_TRIGGER0_MASK | CPPI5_TR_TRIGGER0_TYPE_MASK |
+ CPPI5_TR_TRIGGER1_MASK | CPPI5_TR_TRIGGER1_TYPE_MASK);
+ *flags |= (trigger0 << CPPI5_TR_TRIGGER0_SHIFT) &
+ CPPI5_TR_TRIGGER0_MASK;
+ *flags |= (trigger0_type << CPPI5_TR_TRIGGER0_TYPE_SHIFT) &
+ CPPI5_TR_TRIGGER0_TYPE_MASK;
+
+ *flags |= (trigger1 << CPPI5_TR_TRIGGER1_SHIFT) &
+ CPPI5_TR_TRIGGER1_MASK;
+ *flags |= (trigger1_type << CPPI5_TR_TRIGGER1_TYPE_SHIFT) &
+ CPPI5_TR_TRIGGER1_TYPE_MASK;
+}
+
+/**
+ * cppi5_tr_cflag_set - Update the Configuration specific flags
+ * @flags: Pointer to the TR's flags
+ * @csf: Configuration specific flags
+ *
+ * Set a bit in Configuration Specific Flags section of the TR flags.
+ */
+static inline void cppi5_tr_csf_set(cppi5_tr_flags_t *flags, u32 csf)
+{
+ *flags &= ~CPPI5_TR_CSF_FLAGS_MASK;
+ *flags |= (csf << CPPI5_TR_CSF_FLAGS_SHIFT) &
+ CPPI5_TR_CSF_FLAGS_MASK;
+}
+
+#endif /* __TI_CPPI5_H__ */
diff --git a/include/linux/dma/xilinx_dma.h b/include/linux/dma/xilinx_dma.h
index 3ae300052553..0dde1a46ab75 100644
--- a/include/linux/dma/xilinx_dma.h
+++ b/include/linux/dma/xilinx_dma.h
@@ -1,12 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Xilinx DMA Engine drivers support header file
*
* Copyright (C) 2010-2014 Xilinx, Inc. All rights reserved.
- *
- * This is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
#ifndef __DMA_XILINX_DMA_H
@@ -27,6 +23,7 @@
* @delay: Delay counter
* @reset: Reset Channel
* @ext_fsync: External Frame Sync source
+ * @vflip_en: Vertical Flip enable
*/
struct xilinx_vdma_config {
int frm_dly;
@@ -39,20 +36,7 @@ struct xilinx_vdma_config {
int delay;
int reset;
int ext_fsync;
-};
-
-/**
- * enum xdma_ip_type: DMA IP type.
- *
- * XDMA_TYPE_AXIDMA: Axi dma ip.
- * XDMA_TYPE_CDMA: Axi cdma ip.
- * XDMA_TYPE_VDMA: Axi vdma ip.
- *
- */
-enum xdma_ip_type {
- XDMA_TYPE_AXIDMA = 0,
- XDMA_TYPE_CDMA,
- XDMA_TYPE_VDMA,
+ bool vflip_en;
};
int xilinx_vdma_channel_set_config(struct dma_chan *dchan,
diff --git a/include/linux/dma/xilinx_dpdma.h b/include/linux/dma/xilinx_dpdma.h
new file mode 100644
index 000000000000..02a4adf8921b
--- /dev/null
+++ b/include/linux/dma/xilinx_dpdma.h
@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __LINUX_DMA_XILINX_DPDMA_H
+#define __LINUX_DMA_XILINX_DPDMA_H
+
+#include <linux/types.h>
+
+struct xilinx_dpdma_peripheral_config {
+ bool video_group;
+};
+
+#endif /* __LINUX_DMA_XILINX_DPDMA_H */
diff --git a/include/linux/dma_remapping.h b/include/linux/dma_remapping.h
deleted file mode 100644
index 90884072fa73..000000000000
--- a/include/linux/dma_remapping.h
+++ /dev/null
@@ -1,57 +0,0 @@
-#ifndef _DMA_REMAPPING_H
-#define _DMA_REMAPPING_H
-
-/*
- * VT-d hardware uses 4KiB page size regardless of host page size.
- */
-#define VTD_PAGE_SHIFT (12)
-#define VTD_PAGE_SIZE (1UL << VTD_PAGE_SHIFT)
-#define VTD_PAGE_MASK (((u64)-1) << VTD_PAGE_SHIFT)
-#define VTD_PAGE_ALIGN(addr) (((addr) + VTD_PAGE_SIZE - 1) & VTD_PAGE_MASK)
-
-#define VTD_STRIDE_SHIFT (9)
-#define VTD_STRIDE_MASK (((u64)-1) << VTD_STRIDE_SHIFT)
-
-#define DMA_PTE_READ (1)
-#define DMA_PTE_WRITE (2)
-#define DMA_PTE_LARGE_PAGE (1 << 7)
-#define DMA_PTE_SNP (1 << 11)
-
-#define CONTEXT_TT_MULTI_LEVEL 0
-#define CONTEXT_TT_DEV_IOTLB 1
-#define CONTEXT_TT_PASS_THROUGH 2
-/* Extended context entry types */
-#define CONTEXT_TT_PT_PASID 4
-#define CONTEXT_TT_PT_PASID_DEV_IOTLB 5
-#define CONTEXT_TT_MASK (7ULL << 2)
-
-#define CONTEXT_DINVE (1ULL << 8)
-#define CONTEXT_PRS (1ULL << 9)
-#define CONTEXT_PASIDE (1ULL << 11)
-
-struct intel_iommu;
-struct dmar_domain;
-struct root_entry;
-
-
-#ifdef CONFIG_INTEL_IOMMU
-extern int iommu_calculate_agaw(struct intel_iommu *iommu);
-extern int iommu_calculate_max_sagaw(struct intel_iommu *iommu);
-extern int dmar_disabled;
-extern int intel_iommu_enabled;
-extern int intel_iommu_tboot_noforce;
-#else
-static inline int iommu_calculate_agaw(struct intel_iommu *iommu)
-{
- return 0;
-}
-static inline int iommu_calculate_max_sagaw(struct intel_iommu *iommu)
-{
- return 0;
-}
-#define dmar_disabled (1)
-#define intel_iommu_enabled (0)
-#endif
-
-
-#endif
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
index 533680860865..c3656e590213 100644
--- a/include/linux/dmaengine.h
+++ b/include/linux/dmaengine.h
@@ -1,18 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Copyright(c) 2004 - 2006 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the Free
- * Software Foundation; either version 2 of the License, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called COPYING.
*/
#ifndef LINUX_DMAENGINE_H
#define LINUX_DMAENGINE_H
@@ -51,6 +39,7 @@ enum dma_status {
DMA_IN_PROGRESS,
DMA_PAUSED,
DMA_ERROR,
+ DMA_OUT_OF_ORDER,
};
/**
@@ -68,12 +57,14 @@ enum dma_transaction_type {
DMA_MEMSET,
DMA_MEMSET_SG,
DMA_INTERRUPT,
- DMA_SG,
DMA_PRIVATE,
DMA_ASYNC_TX,
DMA_SLAVE,
DMA_CYCLIC,
DMA_INTERLEAVE,
+ DMA_COMPLETION_NO_ORDER,
+ DMA_REPEAT,
+ DMA_LOAD_EOT,
/* last transaction type for creation of the capabilities mask */
DMA_TX_TYPE_END,
};
@@ -96,9 +87,9 @@ enum dma_transfer_direction {
/**
* Interleaved Transfer Request
* ----------------------------
- * A chunk is collection of contiguous bytes to be transfered.
+ * A chunk is collection of contiguous bytes to be transferred.
* The gap(in bytes) between two chunks is called inter-chunk-gap(ICG).
- * ICGs may or maynot change between chunks.
+ * ICGs may or may not change between chunks.
* A FRAME is the smallest series of contiguous {chunk,icg} pairs,
* that when repeated an integral number of times, specifies the transfer.
* A transfer template is specification of a Frame, the number of times
@@ -166,7 +157,7 @@ struct dma_interleaved_template {
bool dst_sgl;
size_t numf;
size_t frame_size;
- struct data_chunk sgl[0];
+ struct data_chunk sgl[];
};
/**
@@ -175,7 +166,7 @@ struct dma_interleaved_template {
* @DMA_PREP_INTERRUPT - trigger an interrupt (callback) upon completion of
* this transaction
* @DMA_CTRL_ACK - if clear, the descriptor cannot be reused until the client
- * acknowledges receipt, i.e. has has a chance to establish any dependency
+ * acknowledges receipt, i.e. has a chance to establish any dependency
* chains
* @DMA_PREP_PQ_DISABLE_P - prevent generation of P while generating Q
* @DMA_PREP_PQ_DISABLE_Q - prevent generation of Q while generating P
@@ -186,6 +177,19 @@ struct dma_interleaved_template {
* on the result of this operation
* @DMA_CTRL_REUSE: client can reuse the descriptor and submit again till
* cleared or freed
+ * @DMA_PREP_CMD: tell the driver that the data passed to DMA API is command
+ * data and the descriptor should be in different format from normal
+ * data descriptors.
+ * @DMA_PREP_REPEAT: tell the driver that the transaction shall be automatically
+ * repeated when it ends until a transaction is issued on the same channel
+ * with the DMA_PREP_LOAD_EOT flag set. This flag is only applicable to
+ * interleaved transactions and is ignored for all other transaction types.
+ * @DMA_PREP_LOAD_EOT: tell the driver that the transaction shall replace any
+ * active repeated (as indicated by DMA_PREP_REPEAT) transaction when the
+ * repeated transaction ends. Not setting this flag when the previously queued
+ * transaction is marked with DMA_PREP_REPEAT will cause the new transaction
+ * to never be processed and stay in the issued queue forever. The flag is
+ * ignored if the previous transaction is not a repeated transaction.
*/
enum dma_ctrl_flags {
DMA_PREP_INTERRUPT = (1 << 0),
@@ -195,6 +199,9 @@ enum dma_ctrl_flags {
DMA_PREP_CONTINUE = (1 << 4),
DMA_PREP_FENCE = (1 << 5),
DMA_CTRL_REUSE = (1 << 6),
+ DMA_PREP_CMD = (1 << 7),
+ DMA_PREP_REPEAT = (1 << 8),
+ DMA_PREP_LOAD_EOT = (1 << 9),
};
/**
@@ -223,11 +230,66 @@ enum sum_check_flags {
typedef struct { DECLARE_BITMAP(bits, DMA_TX_TYPE_END); } dma_cap_mask_t;
/**
+ * enum dma_desc_metadata_mode - per descriptor metadata mode types supported
+ * @DESC_METADATA_CLIENT - the metadata buffer is allocated/provided by the
+ * client driver and it is attached (via the dmaengine_desc_attach_metadata()
+ * helper) to the descriptor.
+ *
+ * Client drivers interested to use this mode can follow:
+ * - DMA_MEM_TO_DEV / DEV_MEM_TO_MEM:
+ * 1. prepare the descriptor (dmaengine_prep_*)
+ * construct the metadata in the client's buffer
+ * 2. use dmaengine_desc_attach_metadata() to attach the buffer to the
+ * descriptor
+ * 3. submit the transfer
+ * - DMA_DEV_TO_MEM:
+ * 1. prepare the descriptor (dmaengine_prep_*)
+ * 2. use dmaengine_desc_attach_metadata() to attach the buffer to the
+ * descriptor
+ * 3. submit the transfer
+ * 4. when the transfer is completed, the metadata should be available in the
+ * attached buffer
+ *
+ * @DESC_METADATA_ENGINE - the metadata buffer is allocated/managed by the DMA
+ * driver. The client driver can ask for the pointer, maximum size and the
+ * currently used size of the metadata and can directly update or read it.
+ * dmaengine_desc_get_metadata_ptr() and dmaengine_desc_set_metadata_len() is
+ * provided as helper functions.
+ *
+ * Note: the metadata area for the descriptor is no longer valid after the
+ * transfer has been completed (valid up to the point when the completion
+ * callback returns if used).
+ *
+ * Client drivers interested to use this mode can follow:
+ * - DMA_MEM_TO_DEV / DEV_MEM_TO_MEM:
+ * 1. prepare the descriptor (dmaengine_prep_*)
+ * 2. use dmaengine_desc_get_metadata_ptr() to get the pointer to the engine's
+ * metadata area
+ * 3. update the metadata at the pointer
+ * 4. use dmaengine_desc_set_metadata_len() to tell the DMA engine the amount
+ * of data the client has placed into the metadata buffer
+ * 5. submit the transfer
+ * - DMA_DEV_TO_MEM:
+ * 1. prepare the descriptor (dmaengine_prep_*)
+ * 2. submit the transfer
+ * 3. on transfer completion, use dmaengine_desc_get_metadata_ptr() to get the
+ * pointer to the engine's metadata area
+ * 4. Read out the metadata from the pointer
+ *
+ * Note: the two mode is not compatible and clients must use one mode for a
+ * descriptor.
+ */
+enum dma_desc_metadata_mode {
+ DESC_METADATA_NONE = 0,
+ DESC_METADATA_CLIENT = BIT(0),
+ DESC_METADATA_ENGINE = BIT(1),
+};
+
+/**
* struct dma_chan_percpu - the per-CPU part of struct dma_chan
* @memcpy_count: transaction counter
* @bytes_transferred: byte counter
*/
-
struct dma_chan_percpu {
/* stats */
unsigned long memcpy_count;
@@ -247,10 +309,14 @@ struct dma_router {
/**
* struct dma_chan - devices supply DMA channels, clients use them
* @device: ptr to the dma device who supplies this channel, always !%NULL
+ * @slave: ptr to the device using this channel
* @cookie: last cookie value returned to client
* @completed_cookie: last completed cookie for this channel
* @chan_id: channel ID for sysfs
* @dev: class device for sysfs
+ * @name: backlink name for sysfs
+ * @dbg_client_name: slave name for debugfs in format:
+ * dev_name(requester's dev):channel name, for example: "2b00000.mcasp:tx"
* @device_node: used to add this to the device chan list
* @local: per-cpu pointer to a struct dma_chan_percpu
* @client_count: how many clients are using this channel
@@ -261,12 +327,17 @@ struct dma_router {
*/
struct dma_chan {
struct dma_device *device;
+ struct device *slave;
dma_cookie_t cookie;
dma_cookie_t completed_cookie;
/* sysfs */
int chan_id;
struct dma_chan_dev *dev;
+ const char *name;
+#ifdef CONFIG_DEBUG_FS
+ char *dbg_client_name;
+#endif
struct list_head device_node;
struct dma_chan_percpu __percpu *local;
@@ -285,13 +356,14 @@ struct dma_chan {
* @chan: driver channel device
* @device: sysfs device
* @dev_id: parent dma_device dev_id
- * @idr_ref: reference count to gate release of dma_device dev_id
+ * @chan_dma_dev: The channel is using custom/different dma-mapping
+ * compared to the parent dma_device
*/
struct dma_chan_dev {
struct dma_chan *chan;
struct device device;
int dev_id;
- atomic_t *idr_ref;
+ bool chan_dma_dev;
};
/**
@@ -308,6 +380,7 @@ enum dma_slave_buswidth {
DMA_SLAVE_BUSWIDTH_16_BYTES = 16,
DMA_SLAVE_BUSWIDTH_32_BYTES = 32,
DMA_SLAVE_BUSWIDTH_64_BYTES = 64,
+ DMA_SLAVE_BUSWIDTH_128_BYTES = 128,
};
/**
@@ -321,12 +394,12 @@ enum dma_slave_buswidth {
* should be read (RX), if the source is memory this argument is
* ignored.
* @dst_addr: this is the physical address where DMA slave data
- * should be written (TX), if the source is memory this argument
+ * should be written (TX), if the destination is memory this argument
* is ignored.
* @src_addr_width: this is the width in bytes of the source (RX)
* register where DMA data shall be read. If the source
* is memory this may be ignored depending on architecture.
- * Legal values: 1, 2, 4, 8.
+ * Legal values: 1, 2, 3, 4, 8, 16, 32, 64, 128.
* @dst_addr_width: same as src_addr_width but for destination
* target (TX) mutatis mutandis.
* @src_maxburst: the maximum number of words (note: words, as in
@@ -345,9 +418,9 @@ enum dma_slave_buswidth {
* @device_fc: Flow Controller Settings. Only valid for slave channels. Fill
* with 'true' if peripheral should be flow controller. Direction will be
* selected at Runtime.
- * @slave_id: Slave requester id. Only valid for slave channels. The dma
- * slave peripheral will have unique id as dma requester which need to be
- * pass as slave config.
+ * @peripheral_config: peripheral configuration for programming peripheral
+ * for dmaengine transfer
+ * @peripheral_size: peripheral configuration buffer size
*
* This struct is passed in as configuration data to a DMA engine
* in order to set up a certain channel for DMA transport at runtime.
@@ -372,7 +445,8 @@ struct dma_slave_config {
u32 src_port_window_size;
u32 dst_port_window_size;
bool device_fc;
- unsigned int slave_id;
+ void *peripheral_config;
+ size_t peripheral_size;
};
/**
@@ -401,16 +475,24 @@ enum dma_residue_granularity {
DMA_RESIDUE_GRANULARITY_BURST = 2,
};
-/* struct dma_slave_caps - expose capabilities of a slave channel only
- *
- * @src_addr_widths: bit mask of src addr widths the channel supports
- * @dst_addr_widths: bit mask of dstn addr widths the channel supports
- * @directions: bit mask of slave direction the channel supported
- * since the enum dma_transfer_direction is not defined as bits for each
- * type of direction, the dma controller should fill (1 << <TYPE>) and same
- * should be checked by controller as well
+/**
+ * struct dma_slave_caps - expose capabilities of a slave channel only
+ * @src_addr_widths: bit mask of src addr widths the channel supports.
+ * Width is specified in bytes, e.g. for a channel supporting
+ * a width of 4 the mask should have BIT(4) set.
+ * @dst_addr_widths: bit mask of dst addr widths the channel supports
+ * @directions: bit mask of slave directions the channel supports.
+ * Since the enum dma_transfer_direction is not defined as bit flag for
+ * each type, the dma controller should set BIT(<TYPE>) and same
+ * should be checked by controller as well
+ * @min_burst: min burst capability per-transfer
* @max_burst: max burst capability per-transfer
- * @cmd_pause: true, if pause and thereby resume is supported
+ * @max_sg_burst: max number of SG list entries executed in a single burst
+ * DMA tansaction with no software intervention for reinitialization.
+ * Zero value means unlimited number of entries.
+ * @cmd_pause: true, if pause is supported (i.e. for reading residue or
+ * for resume later)
+ * @cmd_resume: true, if resume is supported
* @cmd_terminate: true, if terminate cmd is supported
* @residue_granularity: granularity of the reported transfer residue
* @descriptor_reuse: if a descriptor can be reused by client and
@@ -420,8 +502,11 @@ struct dma_slave_caps {
u32 src_addr_widths;
u32 dst_addr_widths;
u32 directions;
+ u32 min_burst;
u32 max_burst;
+ u32 max_sg_burst;
bool cmd_pause;
+ bool cmd_resume;
bool cmd_terminate;
enum dma_residue_granularity residue_granularity;
bool descriptor_reuse;
@@ -465,14 +550,30 @@ typedef void (*dma_async_tx_callback_result)(void *dma_async_param,
const struct dmaengine_result *result);
struct dmaengine_unmap_data {
+#if IS_ENABLED(CONFIG_DMA_ENGINE_RAID)
+ u16 map_cnt;
+#else
u8 map_cnt;
+#endif
u8 to_cnt;
u8 from_cnt;
u8 bidi_cnt;
struct device *dev;
struct kref kref;
size_t len;
- dma_addr_t addr[0];
+ dma_addr_t addr[];
+};
+
+struct dma_async_tx_descriptor;
+
+struct dma_descriptor_metadata_ops {
+ int (*attach)(struct dma_async_tx_descriptor *desc, void *data,
+ size_t len);
+
+ void *(*get_ptr)(struct dma_async_tx_descriptor *desc,
+ size_t *payload_len, size_t *max_len);
+ int (*set_len)(struct dma_async_tx_descriptor *desc,
+ size_t payload_len);
};
/**
@@ -481,13 +582,18 @@ struct dmaengine_unmap_data {
* @cookie: tracking cookie for this transaction, set to -EBUSY if
* this tx is sitting on a dependency list
* @flags: flags to augment operation preparation, control completion, and
- * communicate status
+ * communicate status
* @phys: physical address of the descriptor
* @chan: target channel for this operation
* @tx_submit: accept the descriptor, assign ordered cookie and mark the
* descriptor pending. To be pushed on .issue_pending() call
* @callback: routine to call after this operation is complete
* @callback_param: general parameter to pass to the callback routine
+ * @desc_metadata_mode: core managed metadata mode to protect mixed use of
+ * DESC_METADATA_CLIENT or DESC_METADATA_ENGINE. Otherwise
+ * DESC_METADATA_NONE
+ * @metadata_ops: DMA driver provided metadata mode ops, need to be set by the
+ * DMA driver if metadata mode is supported with the descriptor
* ---async_tx api specific fields---
* @next: at completion submit this descriptor
* @parent: pointer to the next level up in the dependency chain
@@ -504,6 +610,8 @@ struct dma_async_tx_descriptor {
dma_async_tx_callback_result callback_result;
void *callback_param;
struct dmaengine_unmap_data *unmap;
+ enum dma_desc_metadata_mode desc_metadata_mode;
+ struct dma_descriptor_metadata_ops *metadata_ops;
#ifdef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH
struct dma_async_tx_descriptor *next;
struct dma_async_tx_descriptor *parent;
@@ -539,10 +647,11 @@ static inline void dmaengine_unmap_put(struct dmaengine_unmap_data *unmap)
static inline void dma_descriptor_unmap(struct dma_async_tx_descriptor *tx)
{
- if (tx->unmap) {
- dmaengine_unmap_put(tx->unmap);
- tx->unmap = NULL;
- }
+ if (!tx->unmap)
+ return;
+
+ dmaengine_unmap_put(tx->unmap);
+ tx->unmap = NULL;
}
#ifndef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH
@@ -611,11 +720,13 @@ static inline struct dma_async_tx_descriptor *txd_next(struct dma_async_tx_descr
* @residue: the remaining number of bytes left to transmit
* on the selected transfer for states DMA_IN_PROGRESS and
* DMA_PAUSED if this is implemented in the driver, else 0
+ * @in_flight_bytes: amount of data in bytes cached by the DMA.
*/
struct dma_tx_state {
dma_cookie_t last;
dma_cookie_t used;
u32 residue;
+ u32 in_flight_bytes;
};
/**
@@ -630,6 +741,8 @@ enum dmaengine_alignment {
DMAENGINE_ALIGN_16_BYTES = 4,
DMAENGINE_ALIGN_32_BYTES = 5,
DMAENGINE_ALIGN_64_BYTES = 6,
+ DMAENGINE_ALIGN_128_BYTES = 7,
+ DMAENGINE_ALIGN_256_BYTES = 8,
};
/**
@@ -660,12 +773,14 @@ struct dma_filter {
/**
* struct dma_device - info on the entity supplying DMA services
+ * @ref: reference is taken and put every time a channel is allocated or freed
* @chancnt: how many DMA channels are supported
* @privatecnt: how many DMA channels are requested by dma_request_channel
* @channels: the list of struct dma_chan
* @global_node: list_head for global dma_device_list
* @filter: information for device/slave to filter function/param mapping
* @cap_mask: one or more dma_capability flags
+ * @desc_metadata_modes: supported metadata modes by the DMA device
* @max_xor: maximum number of xor sources, 0 if no capability
* @max_pq: maximum number of PQ sources and PQ-continue capability
* @copy_align: alignment shift for memcpy operations
@@ -674,17 +789,27 @@ struct dma_filter {
* @fill_align: alignment shift for memset operations
* @dev_id: unique device ID
* @dev: struct device reference for dma mapping api
+ * @owner: owner module (automatically set based on the provided dev)
+ * @chan_ida: unique channel ID
* @src_addr_widths: bit mask of src addr widths the device supports
+ * Width is specified in bytes, e.g. for a device supporting
+ * a width of 4 the mask should have BIT(4) set.
* @dst_addr_widths: bit mask of dst addr widths the device supports
- * @directions: bit mask of slave direction the device supports since
- * the enum dma_transfer_direction is not defined as bits for
- * each type of direction, the dma controller should fill (1 <<
- * <TYPE>) and same should be checked by controller as well
+ * @directions: bit mask of slave directions the device supports.
+ * Since the enum dma_transfer_direction is not defined as bit flag for
+ * each type, the dma controller should set BIT(<TYPE>) and same
+ * should be checked by controller as well
+ * @min_burst: min burst capability per-transfer
* @max_burst: max burst capability per-transfer
+ * @max_sg_burst: max number of SG list entries executed in a single burst
+ * DMA tansaction with no software intervention for reinitialization.
+ * Zero value means unlimited number of entries.
+ * @descriptor_reuse: a submitted transfer can be resubmitted after completion
* @residue_granularity: granularity of the transfer residue reported
* by tx_status
* @device_alloc_chan_resources: allocate resources and return the
* number of allocated descriptors
+ * @device_router_config: optional callback for DMA router configuration
* @device_free_chan_resources: release DMA channel's resources
* @device_prep_dma_memcpy: prepares a memcpy operation
* @device_prep_dma_xor: prepares a xor operation
@@ -700,6 +825,8 @@ struct dma_filter {
* be called after period_len bytes have been transferred.
* @device_prep_interleaved_dma: Transfer expression in a generic way.
* @device_prep_dma_imm_data: DMA's 8 byte immediate data to the dst address
+ * @device_caps: May be used to override the generic DMA slave capabilities
+ * with per-channel specific ones
* @device_config: Pushes a new configuration to a channel, return 0 or an error
* code
* @device_pause: Pauses any transfer happening on a channel. Returns
@@ -715,16 +842,24 @@ struct dma_filter {
* struct with auxiliary transfer status information, otherwise the call
* will just return a simple status code
* @device_issue_pending: push pending transactions to hardware
- * @descriptor_reuse: a submitted transfer can be resubmitted after completion
+ * @device_release: called sometime atfer dma_async_device_unregister() is
+ * called and there are no further references to this structure. This
+ * must be implemented to free resources however many existing drivers
+ * do not and are therefore not safe to unbind while in use.
+ * @dbg_summary_show: optional routine to show contents in debugfs; default code
+ * will be used when this is omitted, but custom code can show extra,
+ * controller specific information.
+ * @dbg_dev_root: the root folder in debugfs for this device
*/
struct dma_device {
-
+ struct kref ref;
unsigned int chancnt;
unsigned int privatecnt;
struct list_head channels;
struct list_head global_node;
struct dma_filter filter;
- dma_cap_mask_t cap_mask;
+ dma_cap_mask_t cap_mask;
+ enum dma_desc_metadata_mode desc_metadata_modes;
unsigned short max_xor;
unsigned short max_pq;
enum dmaengine_alignment copy_align;
@@ -735,15 +870,20 @@ struct dma_device {
int dev_id;
struct device *dev;
+ struct module *owner;
+ struct ida chan_ida;
u32 src_addr_widths;
u32 dst_addr_widths;
u32 directions;
+ u32 min_burst;
u32 max_burst;
+ u32 max_sg_burst;
bool descriptor_reuse;
enum dma_residue_granularity residue_granularity;
int (*device_alloc_chan_resources)(struct dma_chan *chan);
+ int (*device_router_config)(struct dma_chan *chan);
void (*device_free_chan_resources)(struct dma_chan *chan);
struct dma_async_tx_descriptor *(*device_prep_dma_memcpy)(
@@ -771,11 +911,6 @@ struct dma_device {
unsigned int nents, int value, unsigned long flags);
struct dma_async_tx_descriptor *(*device_prep_dma_interrupt)(
struct dma_chan *chan, unsigned long flags);
- struct dma_async_tx_descriptor *(*device_prep_dma_sg)(
- struct dma_chan *chan,
- struct scatterlist *dst_sg, unsigned int dst_nents,
- struct scatterlist *src_sg, unsigned int src_nents,
- unsigned long flags);
struct dma_async_tx_descriptor *(*device_prep_slave_sg)(
struct dma_chan *chan, struct scatterlist *sgl,
@@ -792,8 +927,8 @@ struct dma_device {
struct dma_chan *chan, dma_addr_t dst, u64 data,
unsigned long flags);
- int (*device_config)(struct dma_chan *chan,
- struct dma_slave_config *config);
+ void (*device_caps)(struct dma_chan *chan, struct dma_slave_caps *caps);
+ int (*device_config)(struct dma_chan *chan, struct dma_slave_config *config);
int (*device_pause)(struct dma_chan *chan);
int (*device_resume)(struct dma_chan *chan);
int (*device_terminate_all)(struct dma_chan *chan);
@@ -803,6 +938,10 @@ struct dma_device {
dma_cookie_t cookie,
struct dma_tx_state *txstate);
void (*device_issue_pending)(struct dma_chan *chan);
+ void (*device_release)(struct dma_device *dev);
+ /* debugfs support */
+ void (*dbg_summary_show)(struct seq_file *s, struct dma_device *dev);
+ struct dentry *dbg_dev_root;
};
static inline int dmaengine_slave_config(struct dma_chan *chan,
@@ -879,10 +1018,21 @@ static inline struct dma_async_tx_descriptor *dmaengine_prep_interleaved_dma(
{
if (!chan || !chan->device || !chan->device->device_prep_interleaved_dma)
return NULL;
+ if (flags & DMA_PREP_REPEAT &&
+ !test_bit(DMA_REPEAT, chan->device->cap_mask.bits))
+ return NULL;
return chan->device->device_prep_interleaved_dma(chan, xt, flags);
}
+/**
+ * dmaengine_prep_dma_memset() - Prepare a DMA memset descriptor.
+ * @chan: The channel to be used for this descriptor
+ * @dest: Address of buffer to be set
+ * @value: Treated as a single byte value that fills the destination buffer
+ * @len: The total size of dest
+ * @flags: DMA engine flags
+ */
static inline struct dma_async_tx_descriptor *dmaengine_prep_dma_memset(
struct dma_chan *chan, dma_addr_t dest, int value, size_t len,
unsigned long flags)
@@ -905,18 +1055,40 @@ static inline struct dma_async_tx_descriptor *dmaengine_prep_dma_memcpy(
len, flags);
}
-static inline struct dma_async_tx_descriptor *dmaengine_prep_dma_sg(
- struct dma_chan *chan,
- struct scatterlist *dst_sg, unsigned int dst_nents,
- struct scatterlist *src_sg, unsigned int src_nents,
- unsigned long flags)
+static inline bool dmaengine_is_metadata_mode_supported(struct dma_chan *chan,
+ enum dma_desc_metadata_mode mode)
{
- if (!chan || !chan->device || !chan->device->device_prep_dma_sg)
- return NULL;
+ if (!chan)
+ return false;
+
+ return !!(chan->device->desc_metadata_modes & mode);
+}
- return chan->device->device_prep_dma_sg(chan, dst_sg, dst_nents,
- src_sg, src_nents, flags);
+#ifdef CONFIG_DMA_ENGINE
+int dmaengine_desc_attach_metadata(struct dma_async_tx_descriptor *desc,
+ void *data, size_t len);
+void *dmaengine_desc_get_metadata_ptr(struct dma_async_tx_descriptor *desc,
+ size_t *payload_len, size_t *max_len);
+int dmaengine_desc_set_metadata_len(struct dma_async_tx_descriptor *desc,
+ size_t payload_len);
+#else /* CONFIG_DMA_ENGINE */
+static inline int dmaengine_desc_attach_metadata(
+ struct dma_async_tx_descriptor *desc, void *data, size_t len)
+{
+ return -EINVAL;
+}
+static inline void *dmaengine_desc_get_metadata_ptr(
+ struct dma_async_tx_descriptor *desc, size_t *payload_len,
+ size_t *max_len)
+{
+ return NULL;
}
+static inline int dmaengine_desc_set_metadata_len(
+ struct dma_async_tx_descriptor *desc, size_t payload_len)
+{
+ return -EINVAL;
+}
+#endif /* CONFIG_DMA_ENGINE */
/**
* dmaengine_terminate_all() - Terminate all active DMA transfers
@@ -946,7 +1118,7 @@ static inline int dmaengine_terminate_all(struct dma_chan *chan)
* dmaengine_synchronize() needs to be called before it is safe to free
* any memory that is accessed by previously submitted descriptors or before
* freeing any resources accessed from within the completion callback of any
- * perviously submitted descriptors.
+ * previously submitted descriptors.
*
* This function can be called from atomic context as well as from within a
* complete callback of a descriptor submitted on the same channel.
@@ -968,7 +1140,7 @@ static inline int dmaengine_terminate_async(struct dma_chan *chan)
*
* Synchronizes to the DMA channel termination to the current context. When this
* function returns it is guaranteed that all transfers for previously issued
- * descriptors have stopped and and it is safe to free the memory assoicated
+ * descriptors have stopped and it is safe to free the memory associated
* with them. Furthermore it is guaranteed that all complete callback functions
* for a previously submitted descriptor have finished running and it is safe to
* free resources accessed from within the complete callbacks.
@@ -1045,14 +1217,7 @@ static inline dma_cookie_t dmaengine_submit(struct dma_async_tx_descriptor *desc
static inline bool dmaengine_check_align(enum dmaengine_alignment align,
size_t off1, size_t off2, size_t len)
{
- size_t mask;
-
- if (!align)
- return true;
- mask = (1 << align) - 1;
- if (mask & (off1 | off2 | len))
- return false;
- return true;
+ return !(((1 << align) - 1) & (off1 | off2 | len));
}
static inline bool is_dma_copy_aligned(struct dma_device *dev, size_t off1,
@@ -1126,9 +1291,9 @@ static inline int dma_maxpq(struct dma_device *dma, enum dma_ctrl_flags flags)
{
if (dma_dev_has_pq_continue(dma) || !dmaf_continue(flags))
return dma_dev_to_maxpq(dma);
- else if (dmaf_p_disabled_continue(flags))
+ if (dmaf_p_disabled_continue(flags))
return dma_dev_to_maxpq(dma) - 1;
- else if (dmaf_continue(flags))
+ if (dmaf_continue(flags))
return dma_dev_to_maxpq(dma) - 3;
BUG();
}
@@ -1139,7 +1304,7 @@ static inline size_t dmaengine_get_icg(bool inc, bool sgl, size_t icg,
if (inc) {
if (dir_icg)
return dir_icg;
- else if (sgl)
+ if (sgl)
return icg;
}
@@ -1305,11 +1470,12 @@ static inline enum dma_status dma_async_is_complete(dma_cookie_t cookie,
static inline void
dma_set_tx_state(struct dma_tx_state *st, dma_cookie_t last, dma_cookie_t used, u32 residue)
{
- if (st) {
- st->last = last;
- st->used = used;
- st->residue = residue;
- }
+ if (!st)
+ return;
+
+ st->last = last;
+ st->used = used;
+ st->residue = residue;
}
#ifdef CONFIG_DMA_ENGINE
@@ -1318,8 +1484,8 @@ enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie);
enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx);
void dma_issue_pending_all(void);
struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask,
- dma_filter_fn fn, void *fn_param);
-struct dma_chan *dma_request_slave_channel(struct device *dev, const char *name);
+ dma_filter_fn fn, void *fn_param,
+ struct device_node *np);
struct dma_chan *dma_request_chan(struct device *dev, const char *name);
struct dma_chan *dma_request_chan_by_mask(const dma_cap_mask_t *mask);
@@ -1343,12 +1509,9 @@ static inline void dma_issue_pending_all(void)
{
}
static inline struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask,
- dma_filter_fn fn, void *fn_param)
-{
- return NULL;
-}
-static inline struct dma_chan *dma_request_slave_channel(struct device *dev,
- const char *name)
+ dma_filter_fn fn,
+ void *fn_param,
+ struct device_node *np)
{
return NULL;
}
@@ -1372,20 +1535,20 @@ static inline int dma_get_slave_caps(struct dma_chan *chan,
}
#endif
-#define dma_request_slave_channel_reason(dev, name) dma_request_chan(dev, name)
-
static inline int dmaengine_desc_set_reuse(struct dma_async_tx_descriptor *tx)
{
struct dma_slave_caps caps;
+ int ret;
- dma_get_slave_caps(tx->chan, &caps);
+ ret = dma_get_slave_caps(tx->chan, &caps);
+ if (ret)
+ return ret;
- if (caps.descriptor_reuse) {
- tx->flags |= DMA_CTRL_REUSE;
- return 0;
- } else {
+ if (!caps.descriptor_reuse)
return -EPERM;
- }
+
+ tx->flags |= DMA_CTRL_REUSE;
+ return 0;
}
static inline void dmaengine_desc_clear_reuse(struct dma_async_tx_descriptor *tx)
@@ -1401,25 +1564,36 @@ static inline bool dmaengine_desc_test_reuse(struct dma_async_tx_descriptor *tx)
static inline int dmaengine_desc_free(struct dma_async_tx_descriptor *desc)
{
/* this is supported for reusable desc, so check that */
- if (dmaengine_desc_test_reuse(desc))
- return desc->desc_free(desc);
- else
+ if (!dmaengine_desc_test_reuse(desc))
return -EPERM;
+
+ return desc->desc_free(desc);
}
/* --- DMA device --- */
int dma_async_device_register(struct dma_device *device);
+int dmaenginem_async_device_register(struct dma_device *device);
void dma_async_device_unregister(struct dma_device *device);
+int dma_async_device_channel_register(struct dma_device *device,
+ struct dma_chan *chan);
+void dma_async_device_channel_unregister(struct dma_device *device,
+ struct dma_chan *chan);
void dma_run_dependencies(struct dma_async_tx_descriptor *tx);
-struct dma_chan *dma_get_slave_channel(struct dma_chan *chan);
-struct dma_chan *dma_get_any_slave_channel(struct dma_device *device);
-#define dma_request_channel(mask, x, y) __dma_request_channel(&(mask), x, y)
-#define dma_request_slave_channel_compat(mask, x, y, dev, name) \
- __dma_request_slave_channel_compat(&(mask), x, y, dev, name)
+#define dma_request_channel(mask, x, y) \
+ __dma_request_channel(&(mask), x, y, NULL)
+
+/* Deprecated, please use dma_request_chan() directly */
+static inline struct dma_chan * __deprecated
+dma_request_slave_channel(struct device *dev, const char *name)
+{
+ struct dma_chan *ch = dma_request_chan(dev, name);
+
+ return IS_ERR(ch) ? NULL : ch;
+}
static inline struct dma_chan
-*__dma_request_slave_channel_compat(const dma_cap_mask_t *mask,
+*dma_request_slave_channel_compat(const dma_cap_mask_t mask,
dma_filter_fn fn, void *fn_param,
struct device *dev, const char *name)
{
@@ -1432,6 +1606,32 @@ static inline struct dma_chan
if (!fn || !fn_param)
return NULL;
- return __dma_request_channel(mask, fn, fn_param);
+ return __dma_request_channel(&mask, fn, fn_param, NULL);
}
+
+static inline char *
+dmaengine_get_direction_text(enum dma_transfer_direction dir)
+{
+ switch (dir) {
+ case DMA_DEV_TO_MEM:
+ return "DEV_TO_MEM";
+ case DMA_MEM_TO_DEV:
+ return "MEM_TO_DEV";
+ case DMA_MEM_TO_MEM:
+ return "MEM_TO_MEM";
+ case DMA_DEV_TO_DEV:
+ return "DEV_TO_DEV";
+ default:
+ return "invalid";
+ }
+}
+
+static inline struct device *dmaengine_get_dma_device(struct dma_chan *chan)
+{
+ if (chan->dev->chan_dma_dev)
+ return &chan->dev->device;
+
+ return chan->device->dev;
+}
+
#endif /* DMAENGINE_H */
diff --git a/include/linux/dmapool.h b/include/linux/dmapool.h
index 53ba737505df..f632ecfb4238 100644
--- a/include/linux/dmapool.h
+++ b/include/linux/dmapool.h
@@ -16,6 +16,8 @@
struct device;
+#ifdef CONFIG_HAS_DMA
+
struct dma_pool *dma_pool_create(const char *name, struct device *dev,
size_t size, size_t align, size_t allocation);
@@ -23,13 +25,6 @@ void dma_pool_destroy(struct dma_pool *pool);
void *dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags,
dma_addr_t *handle);
-
-static inline void *dma_pool_zalloc(struct dma_pool *pool, gfp_t mem_flags,
- dma_addr_t *handle)
-{
- return dma_pool_alloc(pool, mem_flags | __GFP_ZERO, handle);
-}
-
void dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t addr);
/*
@@ -39,5 +34,26 @@ struct dma_pool *dmam_pool_create(const char *name, struct device *dev,
size_t size, size_t align, size_t allocation);
void dmam_pool_destroy(struct dma_pool *pool);
+#else /* !CONFIG_HAS_DMA */
+static inline struct dma_pool *dma_pool_create(const char *name,
+ struct device *dev, size_t size, size_t align, size_t allocation)
+{ return NULL; }
+static inline void dma_pool_destroy(struct dma_pool *pool) { }
+static inline void *dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags,
+ dma_addr_t *handle) { return NULL; }
+static inline void dma_pool_free(struct dma_pool *pool, void *vaddr,
+ dma_addr_t addr) { }
+static inline struct dma_pool *dmam_pool_create(const char *name,
+ struct device *dev, size_t size, size_t align, size_t allocation)
+{ return NULL; }
+static inline void dmam_pool_destroy(struct dma_pool *pool) { }
+#endif /* !CONFIG_HAS_DMA */
+
+static inline void *dma_pool_zalloc(struct dma_pool *pool, gfp_t mem_flags,
+ dma_addr_t *handle)
+{
+ return dma_pool_alloc(pool, mem_flags | __GFP_ZERO, handle);
+}
+
#endif
diff --git a/include/linux/dmar.h b/include/linux/dmar.h
index e8ffba1052d3..725d5e6acec0 100644
--- a/include/linux/dmar.h
+++ b/include/linux/dmar.h
@@ -1,19 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2006, Intel Corporation.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
- * Place - Suite 330, Boston, MA 02111-1307 USA.
- *
* Copyright (C) Ashok Raj <ashok.raj@intel.com>
* Copyright (C) Shaohua Li <shaohua.li@intel.com>
*/
@@ -30,15 +18,12 @@
struct acpi_dmar_header;
-#ifdef CONFIG_X86
-# define DMAR_UNITS_SUPPORTED MAX_IO_APICS
-#else
-# define DMAR_UNITS_SUPPORTED 64
-#endif
+#define DMAR_UNITS_SUPPORTED 1024
/* DMAR Flags */
#define DMAR_INTR_REMAP 0x1
#define DMAR_X2APIC_OPT_OUT 0x2
+#define DMAR_PLATFORM_OPT_IN 0x4
struct intel_iommu;
@@ -54,11 +39,13 @@ struct dmar_drhd_unit {
struct list_head list; /* list of drhd units */
struct acpi_dmar_header *hdr; /* ACPI header */
u64 reg_base_addr; /* register base address*/
+ unsigned long reg_size; /* size of register set */
struct dmar_dev_scope *devices;/* target device array */
int devices_cnt; /* target device count */
u16 segment; /* PCI domain */
u8 ignored:1; /* ignore drhd */
u8 include_all:1;
+ u8 gfx_dedicated:1; /* graphic dedicated */
struct intel_iommu *iommu;
};
@@ -80,19 +67,23 @@ struct dmar_pci_notify_info {
extern struct rw_semaphore dmar_global_lock;
extern struct list_head dmar_drhd_units;
-#define for_each_drhd_unit(drhd) \
- list_for_each_entry_rcu(drhd, &dmar_drhd_units, list)
+#define for_each_drhd_unit(drhd) \
+ list_for_each_entry_rcu(drhd, &dmar_drhd_units, list, \
+ dmar_rcu_check())
#define for_each_active_drhd_unit(drhd) \
- list_for_each_entry_rcu(drhd, &dmar_drhd_units, list) \
+ list_for_each_entry_rcu(drhd, &dmar_drhd_units, list, \
+ dmar_rcu_check()) \
if (drhd->ignored) {} else
#define for_each_active_iommu(i, drhd) \
- list_for_each_entry_rcu(drhd, &dmar_drhd_units, list) \
+ list_for_each_entry_rcu(drhd, &dmar_drhd_units, list, \
+ dmar_rcu_check()) \
if (i=drhd->iommu, drhd->ignored) {} else
#define for_each_iommu(i, drhd) \
- list_for_each_entry_rcu(drhd, &dmar_drhd_units, list) \
+ list_for_each_entry_rcu(drhd, &dmar_drhd_units, list, \
+ dmar_rcu_check()) \
if (i=drhd->iommu, 0) {} else
static inline bool dmar_rcu_check(void)
@@ -103,15 +94,18 @@ static inline bool dmar_rcu_check(void)
#define dmar_rcu_dereference(p) rcu_dereference_check((p), dmar_rcu_check())
-#define for_each_dev_scope(a, c, p, d) \
- for ((p) = 0; ((d) = (p) < (c) ? dmar_rcu_dereference((a)[(p)].dev) : \
- NULL, (p) < (c)); (p)++)
+#define for_each_dev_scope(devs, cnt, i, tmp) \
+ for ((i) = 0; ((tmp) = (i) < (cnt) ? \
+ dmar_rcu_dereference((devs)[(i)].dev) : NULL, (i) < (cnt)); \
+ (i)++)
-#define for_each_active_dev_scope(a, c, p, d) \
- for_each_dev_scope((a), (c), (p), (d)) if (!(d)) { continue; } else
+#define for_each_active_dev_scope(devs, cnt, i, tmp) \
+ for_each_dev_scope((devs), (cnt), (i), (tmp)) \
+ if (!(tmp)) { continue; } else
extern int dmar_table_init(void);
extern int dmar_dev_scope_init(void);
+extern void dmar_register_bus_notifier(void);
extern int dmar_parse_dev_scope(void *start, void *end, int *cnt,
struct dmar_dev_scope **devices, u16 segment);
extern void *dmar_alloc_dev_scope(void *start, void *end, int *cnt);
@@ -124,7 +118,7 @@ extern int dmar_remove_dev_scope(struct dmar_pci_notify_info *info,
u16 segment, struct dmar_dev_scope *devices,
int count);
/* Intel IOMMU detection */
-extern int detect_intel_iommu(void);
+void detect_intel_iommu(void);
extern int enable_drhd_fault_handling(void);
extern int dmar_device_add(acpi_handle handle);
extern int dmar_device_remove(acpi_handle handle);
@@ -134,22 +128,34 @@ static inline int dmar_res_noop(struct acpi_dmar_header *hdr, void *arg)
return 0;
}
+#ifdef CONFIG_DMAR_DEBUG
+void dmar_fault_dump_ptes(struct intel_iommu *iommu, u16 source_id,
+ unsigned long long addr, u32 pasid);
+#else
+static inline void dmar_fault_dump_ptes(struct intel_iommu *iommu, u16 source_id,
+ unsigned long long addr, u32 pasid) {}
+#endif
+
#ifdef CONFIG_INTEL_IOMMU
extern int iommu_detected, no_iommu;
extern int intel_iommu_init(void);
+extern void intel_iommu_shutdown(void);
extern int dmar_parse_one_rmrr(struct acpi_dmar_header *header, void *arg);
extern int dmar_parse_one_atsr(struct acpi_dmar_header *header, void *arg);
extern int dmar_check_one_atsr(struct acpi_dmar_header *hdr, void *arg);
+extern int dmar_parse_one_satc(struct acpi_dmar_header *hdr, void *arg);
extern int dmar_release_one_atsr(struct acpi_dmar_header *hdr, void *arg);
extern int dmar_iommu_hotplug(struct dmar_drhd_unit *dmaru, bool insert);
extern int dmar_iommu_notify_scope_dev(struct dmar_pci_notify_info *info);
#else /* !CONFIG_INTEL_IOMMU: */
static inline int intel_iommu_init(void) { return -ENODEV; }
+static inline void intel_iommu_shutdown(void) { }
#define dmar_parse_one_rmrr dmar_res_noop
#define dmar_parse_one_atsr dmar_res_noop
#define dmar_check_one_atsr dmar_res_noop
#define dmar_release_one_atsr dmar_res_noop
+#define dmar_parse_one_satc dmar_res_noop
static inline int dmar_iommu_notify_scope_dev(struct dmar_pci_notify_info *info)
{
@@ -169,6 +175,8 @@ static inline int dmar_ir_hotplug(struct dmar_drhd_unit *dmaru, bool insert)
{ return 0; }
#endif /* CONFIG_IRQ_REMAP */
+extern bool dmar_platform_optin(void);
+
#else /* CONFIG_DMAR_TABLE */
static inline int dmar_device_add(void *handle)
@@ -181,6 +189,15 @@ static inline int dmar_device_remove(void *handle)
return 0;
}
+static inline bool dmar_platform_optin(void)
+{
+ return false;
+}
+
+static inline void detect_intel_iommu(void)
+{
+}
+
#endif /* CONFIG_DMAR_TABLE */
struct irte {
@@ -264,11 +281,6 @@ static inline void dmar_copy_shared_irte(struct irte *dst, struct irte *src)
#define PDA_LOW_BIT 26
#define PDA_HIGH_BIT 32
-enum {
- IRQ_REMAP_XAPIC_MODE,
- IRQ_REMAP_X2APIC_MODE,
-};
-
/* Can't use the common MSI interrupt functions
* since DMAR is not a pci device
*/
diff --git a/include/linux/dmi.h b/include/linux/dmi.h
index 9bbf21a516e4..927f8a8b7a1d 100644
--- a/include/linux/dmi.h
+++ b/include/linux/dmi.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __DMI_H__
#define __DMI_H__
@@ -101,10 +102,9 @@ const struct dmi_system_id *dmi_first_match(const struct dmi_system_id *list);
extern const char * dmi_get_system_info(int field);
extern const struct dmi_device * dmi_find_device(int type, const char *name,
const struct dmi_device *from);
-extern void dmi_scan_machine(void);
-extern void dmi_memdev_walk(void);
-extern void dmi_set_dump_stack_arch_desc(void);
+extern void dmi_setup(void);
extern bool dmi_get_date(int field, int *yearp, int *monthp, int *dayp);
+extern int dmi_get_bios_year(void);
extern int dmi_name_in_vendors(const char *str);
extern int dmi_name_in_serial(const char *str);
extern int dmi_available;
@@ -112,6 +112,9 @@ extern int dmi_walk(void (*decode)(const struct dmi_header *, void *),
void *private_data);
extern bool dmi_match(enum dmi_field f, const char *str);
extern void dmi_memdev_name(u16 handle, const char **bank, const char **device);
+extern u64 dmi_memdev_size(u16 handle);
+extern u8 dmi_memdev_type(u16 handle);
+extern u16 dmi_memdev_handle(int slot);
#else
@@ -119,9 +122,7 @@ static inline int dmi_check_system(const struct dmi_system_id *list) { return 0;
static inline const char * dmi_get_system_info(int field) { return NULL; }
static inline const struct dmi_device * dmi_find_device(int type, const char *name,
const struct dmi_device *from) { return NULL; }
-static inline void dmi_scan_machine(void) { return; }
-static inline void dmi_memdev_walk(void) { }
-static inline void dmi_set_dump_stack_arch_desc(void) { }
+static inline void dmi_setup(void) { }
static inline bool dmi_get_date(int field, int *yearp, int *monthp, int *dayp)
{
if (yearp)
@@ -132,6 +133,7 @@ static inline bool dmi_get_date(int field, int *yearp, int *monthp, int *dayp)
*dayp = 0;
return false;
}
+static inline int dmi_get_bios_year(void) { return -ENXIO; }
static inline int dmi_name_in_vendors(const char *s) { return 0; }
static inline int dmi_name_in_serial(const char *s) { return 0; }
#define dmi_available 0
@@ -141,6 +143,9 @@ static inline bool dmi_match(enum dmi_field f, const char *str)
{ return false; }
static inline void dmi_memdev_name(u16 handle, const char **bank,
const char **device) { }
+static inline u64 dmi_memdev_size(u16 handle) { return ~0ul; }
+static inline u8 dmi_memdev_type(u16 handle) { return 0x0; }
+static inline u16 dmi_memdev_handle(int slot) { return 0xffff; }
static inline const struct dmi_system_id *
dmi_first_match(const struct dmi_system_id *list) { return NULL; }
diff --git a/include/linux/dnotify.h b/include/linux/dnotify.h
index 3290555a52ee..b1d26f9f1c9f 100644
--- a/include/linux/dnotify.h
+++ b/include/linux/dnotify.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_DNOTIFY_H
#define _LINUX_DNOTIFY_H
/*
@@ -25,10 +26,9 @@ struct dnotify_struct {
FS_MODIFY | FS_MODIFY_CHILD |\
FS_ACCESS | FS_ACCESS_CHILD |\
FS_ATTRIB | FS_ATTRIB_CHILD |\
- FS_CREATE | FS_DN_RENAME |\
+ FS_CREATE | FS_RENAME |\
FS_MOVED_FROM | FS_MOVED_TO)
-extern int dir_notify_enable;
extern void dnotify_flush(struct file *, fl_owner_t);
extern int fcntl_dirnotify(int, struct file *, unsigned long);
diff --git a/include/linux/dns_resolver.h b/include/linux/dns_resolver.h
index 6ac3cad9aef1..976cbbdb2832 100644
--- a/include/linux/dns_resolver.h
+++ b/include/linux/dns_resolver.h
@@ -24,11 +24,11 @@
#ifndef _LINUX_DNS_RESOLVER_H
#define _LINUX_DNS_RESOLVER_H
-#ifdef __KERNEL__
+#include <uapi/linux/dns_resolver.h>
-extern int dns_query(const char *type, const char *name, size_t namelen,
- const char *options, char **_result, time64_t *_expiry);
-
-#endif /* KERNEL */
+struct net;
+extern int dns_query(struct net *net, const char *type, const char *name, size_t namelen,
+ const char *options, char **_result, time64_t *_expiry,
+ bool invalidate);
#endif /* _LINUX_DNS_RESOLVER_H */
diff --git a/include/linux/dqblk_qtree.h b/include/linux/dqblk_qtree.h
index 0de21e935976..100d22a46b82 100644
--- a/include/linux/dqblk_qtree.h
+++ b/include/linux/dqblk_qtree.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Definitions of structures and functions for quota formats using trie
*/
diff --git a/include/linux/dqblk_v1.h b/include/linux/dqblk_v1.h
index c0d4d1e2a45c..85d837a14838 100644
--- a/include/linux/dqblk_v1.h
+++ b/include/linux/dqblk_v1.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* File with in-memory structures of old quota format
*/
diff --git a/include/linux/dqblk_v2.h b/include/linux/dqblk_v2.h
index 18000a542677..da95932ad9e7 100644
--- a/include/linux/dqblk_v2.h
+++ b/include/linux/dqblk_v2.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Definitions for vfsv0 quota format
*/
diff --git a/include/linux/drbd.h b/include/linux/drbd.h
index 002611c85318..5468a2399d48 100644
--- a/include/linux/drbd.h
+++ b/include/linux/drbd.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
drbd.h
Kernel module for 2.6.x Kernels
@@ -8,19 +9,6 @@
Copyright (C) 2001-2008, Philipp Reisner <philipp.reisner@linbit.com>.
Copyright (C) 2001-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
- drbd is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2, or (at your option)
- any later version.
-
- drbd is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with drbd; see the file COPYING. If not, write to
- the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#ifndef DRBD_H
@@ -50,13 +38,6 @@
#endif
-extern const char *drbd_buildtag(void);
-#define REL_VERSION "8.4.7"
-#define API_VERSION 1
-#define PRO_VERSION_MIN 86
-#define PRO_VERSION_MAX 101
-
-
enum drbd_io_error_p {
EP_PASS_ON, /* FIXME should the better be named "Ignore"? */
EP_CALL_HELPER,
diff --git a/include/linux/drbd_config.h b/include/linux/drbd_config.h
new file mode 100644
index 000000000000..d215365c6bb1
--- /dev/null
+++ b/include/linux/drbd_config.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * drbd_config.h
+ * DRBD's compile time configuration.
+ */
+
+#ifndef DRBD_CONFIG_H
+#define DRBD_CONFIG_H
+
+extern const char *drbd_buildtag(void);
+
+#define REL_VERSION "8.4.11"
+#define PRO_VERSION_MIN 86
+#define PRO_VERSION_MAX 101
+
+#endif
diff --git a/include/linux/drbd_genl.h b/include/linux/drbd_genl.h
index 2896f93808ae..53f44b8cd75f 100644
--- a/include/linux/drbd_genl.h
+++ b/include/linux/drbd_genl.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* General overview:
* full generic netlink message:
@@ -132,7 +133,8 @@ GENL_struct(DRBD_NLA_DISK_CONF, 3, disk_conf,
__flg_field_def(18, DRBD_GENLA_F_MANDATORY, disk_drain, DRBD_DISK_DRAIN_DEF)
__flg_field_def(19, DRBD_GENLA_F_MANDATORY, md_flushes, DRBD_MD_FLUSHES_DEF)
__flg_field_def(23, 0 /* OPTIONAL */, al_updates, DRBD_AL_UPDATES_DEF)
- __flg_field_def(24, 0 /* OPTIONAL */, discard_zeroes_if_aligned, DRBD_DISCARD_ZEROES_IF_ALIGNED)
+ __flg_field_def(24, 0 /* OPTIONAL */, discard_zeroes_if_aligned, DRBD_DISCARD_ZEROES_IF_ALIGNED_DEF)
+ __flg_field_def(26, 0 /* OPTIONAL */, disable_write_same, DRBD_DISABLE_WRITE_SAME_DEF)
)
GENL_struct(DRBD_NLA_RESOURCE_OPTS, 4, res_opts,
diff --git a/include/linux/drbd_genl_api.h b/include/linux/drbd_genl_api.h
index 9ef50d51e34e..70682c058027 100644
--- a/include/linux/drbd_genl_api.h
+++ b/include/linux/drbd_genl_api.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef DRBD_GENL_STRUCT_H
#define DRBD_GENL_STRUCT_H
@@ -46,7 +47,7 @@ enum drbd_state_info_bcast_reason {
#undef linux
#include <linux/drbd.h>
-#define GENL_MAGIC_VERSION API_VERSION
+#define GENL_MAGIC_VERSION 1
#define GENL_MAGIC_FAMILY drbd
#define GENL_MAGIC_FAMILY_HDRSZ sizeof(struct drbd_genlmsghdr)
#define GENL_MAGIC_INCLUDE_FILE <linux/drbd_genl.h>
diff --git a/include/linux/drbd_limits.h b/include/linux/drbd_limits.h
index ddac68422a96..5b042fb427e9 100644
--- a/include/linux/drbd_limits.h
+++ b/include/linux/drbd_limits.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
drbd_limits.h
This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
@@ -15,123 +16,123 @@
#define DEBUG_RANGE_CHECK 0
-#define DRBD_MINOR_COUNT_MIN 1
-#define DRBD_MINOR_COUNT_MAX 255
-#define DRBD_MINOR_COUNT_DEF 32
+#define DRBD_MINOR_COUNT_MIN 1U
+#define DRBD_MINOR_COUNT_MAX 255U
+#define DRBD_MINOR_COUNT_DEF 32U
#define DRBD_MINOR_COUNT_SCALE '1'
-#define DRBD_VOLUME_MAX 65535
+#define DRBD_VOLUME_MAX 65534U
-#define DRBD_DIALOG_REFRESH_MIN 0
-#define DRBD_DIALOG_REFRESH_MAX 600
+#define DRBD_DIALOG_REFRESH_MIN 0U
+#define DRBD_DIALOG_REFRESH_MAX 600U
#define DRBD_DIALOG_REFRESH_SCALE '1'
/* valid port number */
-#define DRBD_PORT_MIN 1
-#define DRBD_PORT_MAX 0xffff
+#define DRBD_PORT_MIN 1U
+#define DRBD_PORT_MAX 0xffffU
#define DRBD_PORT_SCALE '1'
/* startup { */
/* if you want more than 3.4 days, disable */
-#define DRBD_WFC_TIMEOUT_MIN 0
-#define DRBD_WFC_TIMEOUT_MAX 300000
-#define DRBD_WFC_TIMEOUT_DEF 0
+#define DRBD_WFC_TIMEOUT_MIN 0U
+#define DRBD_WFC_TIMEOUT_MAX 300000U
+#define DRBD_WFC_TIMEOUT_DEF 0U
#define DRBD_WFC_TIMEOUT_SCALE '1'
-#define DRBD_DEGR_WFC_TIMEOUT_MIN 0
-#define DRBD_DEGR_WFC_TIMEOUT_MAX 300000
-#define DRBD_DEGR_WFC_TIMEOUT_DEF 0
+#define DRBD_DEGR_WFC_TIMEOUT_MIN 0U
+#define DRBD_DEGR_WFC_TIMEOUT_MAX 300000U
+#define DRBD_DEGR_WFC_TIMEOUT_DEF 0U
#define DRBD_DEGR_WFC_TIMEOUT_SCALE '1'
-#define DRBD_OUTDATED_WFC_TIMEOUT_MIN 0
-#define DRBD_OUTDATED_WFC_TIMEOUT_MAX 300000
-#define DRBD_OUTDATED_WFC_TIMEOUT_DEF 0
+#define DRBD_OUTDATED_WFC_TIMEOUT_MIN 0U
+#define DRBD_OUTDATED_WFC_TIMEOUT_MAX 300000U
+#define DRBD_OUTDATED_WFC_TIMEOUT_DEF 0U
#define DRBD_OUTDATED_WFC_TIMEOUT_SCALE '1'
/* }*/
/* net { */
/* timeout, unit centi seconds
* more than one minute timeout is not useful */
-#define DRBD_TIMEOUT_MIN 1
-#define DRBD_TIMEOUT_MAX 600
-#define DRBD_TIMEOUT_DEF 60 /* 6 seconds */
+#define DRBD_TIMEOUT_MIN 1U
+#define DRBD_TIMEOUT_MAX 600U
+#define DRBD_TIMEOUT_DEF 60U /* 6 seconds */
#define DRBD_TIMEOUT_SCALE '1'
/* If backing disk takes longer than disk_timeout, mark the disk as failed */
-#define DRBD_DISK_TIMEOUT_MIN 0 /* 0 = disabled */
-#define DRBD_DISK_TIMEOUT_MAX 6000 /* 10 Minutes */
-#define DRBD_DISK_TIMEOUT_DEF 0 /* disabled */
+#define DRBD_DISK_TIMEOUT_MIN 0U /* 0 = disabled */
+#define DRBD_DISK_TIMEOUT_MAX 6000U /* 10 Minutes */
+#define DRBD_DISK_TIMEOUT_DEF 0U /* disabled */
#define DRBD_DISK_TIMEOUT_SCALE '1'
/* active connection retries when C_WF_CONNECTION */
-#define DRBD_CONNECT_INT_MIN 1
-#define DRBD_CONNECT_INT_MAX 120
-#define DRBD_CONNECT_INT_DEF 10 /* seconds */
+#define DRBD_CONNECT_INT_MIN 1U
+#define DRBD_CONNECT_INT_MAX 120U
+#define DRBD_CONNECT_INT_DEF 10U /* seconds */
#define DRBD_CONNECT_INT_SCALE '1'
/* keep-alive probes when idle */
-#define DRBD_PING_INT_MIN 1
-#define DRBD_PING_INT_MAX 120
-#define DRBD_PING_INT_DEF 10
+#define DRBD_PING_INT_MIN 1U
+#define DRBD_PING_INT_MAX 120U
+#define DRBD_PING_INT_DEF 10U
#define DRBD_PING_INT_SCALE '1'
/* timeout for the ping packets.*/
-#define DRBD_PING_TIMEO_MIN 1
-#define DRBD_PING_TIMEO_MAX 300
-#define DRBD_PING_TIMEO_DEF 5
+#define DRBD_PING_TIMEO_MIN 1U
+#define DRBD_PING_TIMEO_MAX 300U
+#define DRBD_PING_TIMEO_DEF 5U
#define DRBD_PING_TIMEO_SCALE '1'
/* max number of write requests between write barriers */
-#define DRBD_MAX_EPOCH_SIZE_MIN 1
-#define DRBD_MAX_EPOCH_SIZE_MAX 20000
-#define DRBD_MAX_EPOCH_SIZE_DEF 2048
+#define DRBD_MAX_EPOCH_SIZE_MIN 1U
+#define DRBD_MAX_EPOCH_SIZE_MAX 20000U
+#define DRBD_MAX_EPOCH_SIZE_DEF 2048U
#define DRBD_MAX_EPOCH_SIZE_SCALE '1'
/* I don't think that a tcp send buffer of more than 10M is useful */
-#define DRBD_SNDBUF_SIZE_MIN 0
-#define DRBD_SNDBUF_SIZE_MAX (10<<20)
-#define DRBD_SNDBUF_SIZE_DEF 0
+#define DRBD_SNDBUF_SIZE_MIN 0U
+#define DRBD_SNDBUF_SIZE_MAX (10U<<20)
+#define DRBD_SNDBUF_SIZE_DEF 0U
#define DRBD_SNDBUF_SIZE_SCALE '1'
-#define DRBD_RCVBUF_SIZE_MIN 0
-#define DRBD_RCVBUF_SIZE_MAX (10<<20)
-#define DRBD_RCVBUF_SIZE_DEF 0
+#define DRBD_RCVBUF_SIZE_MIN 0U
+#define DRBD_RCVBUF_SIZE_MAX (10U<<20)
+#define DRBD_RCVBUF_SIZE_DEF 0U
#define DRBD_RCVBUF_SIZE_SCALE '1'
/* @4k PageSize -> 128kB - 512MB */
-#define DRBD_MAX_BUFFERS_MIN 32
-#define DRBD_MAX_BUFFERS_MAX 131072
-#define DRBD_MAX_BUFFERS_DEF 2048
+#define DRBD_MAX_BUFFERS_MIN 32U
+#define DRBD_MAX_BUFFERS_MAX 131072U
+#define DRBD_MAX_BUFFERS_DEF 2048U
#define DRBD_MAX_BUFFERS_SCALE '1'
/* @4k PageSize -> 4kB - 512MB */
-#define DRBD_UNPLUG_WATERMARK_MIN 1
-#define DRBD_UNPLUG_WATERMARK_MAX 131072
+#define DRBD_UNPLUG_WATERMARK_MIN 1U
+#define DRBD_UNPLUG_WATERMARK_MAX 131072U
#define DRBD_UNPLUG_WATERMARK_DEF (DRBD_MAX_BUFFERS_DEF/16)
#define DRBD_UNPLUG_WATERMARK_SCALE '1'
/* 0 is disabled.
* 200 should be more than enough even for very short timeouts */
-#define DRBD_KO_COUNT_MIN 0
-#define DRBD_KO_COUNT_MAX 200
-#define DRBD_KO_COUNT_DEF 7
+#define DRBD_KO_COUNT_MIN 0U
+#define DRBD_KO_COUNT_MAX 200U
+#define DRBD_KO_COUNT_DEF 7U
#define DRBD_KO_COUNT_SCALE '1'
/* } */
/* syncer { */
/* FIXME allow rate to be zero? */
-#define DRBD_RESYNC_RATE_MIN 1
+#define DRBD_RESYNC_RATE_MIN 1U
/* channel bonding 10 GbE, or other hardware */
#define DRBD_RESYNC_RATE_MAX (4 << 20)
-#define DRBD_RESYNC_RATE_DEF 250
+#define DRBD_RESYNC_RATE_DEF 250U
#define DRBD_RESYNC_RATE_SCALE 'k' /* kilobytes */
-#define DRBD_AL_EXTENTS_MIN 67
+#define DRBD_AL_EXTENTS_MIN 67U
/* we use u16 as "slot number", (u16)~0 is "FREE".
* If you use >= 292 kB on-disk ring buffer,
* this is the maximum you can use: */
-#define DRBD_AL_EXTENTS_MAX 0xfffe
-#define DRBD_AL_EXTENTS_DEF 1237
+#define DRBD_AL_EXTENTS_MAX 0xfffeU
+#define DRBD_AL_EXTENTS_DEF 1237U
#define DRBD_AL_EXTENTS_SCALE '1'
#define DRBD_MINOR_NUMBER_MIN -1
@@ -146,9 +147,9 @@
* the upper limit with 64bit kernel, enough ram and flexible meta data
* is 1 PiB, currently. */
/* DRBD_MAX_SECTORS */
-#define DRBD_DISK_SIZE_MIN 0
-#define DRBD_DISK_SIZE_MAX (1 * (2LLU << 40))
-#define DRBD_DISK_SIZE_DEF 0 /* = disabled = no user size... */
+#define DRBD_DISK_SIZE_MIN 0LLU
+#define DRBD_DISK_SIZE_MAX (1LLU * (2LLU << 40))
+#define DRBD_DISK_SIZE_DEF 0LLU /* = disabled = no user size... */
#define DRBD_DISK_SIZE_SCALE 's' /* sectors */
#define DRBD_ON_IO_ERROR_DEF EP_DETACH
@@ -161,39 +162,39 @@
#define DRBD_ON_CONGESTION_DEF OC_BLOCK
#define DRBD_READ_BALANCING_DEF RB_PREFER_LOCAL
-#define DRBD_MAX_BIO_BVECS_MIN 0
-#define DRBD_MAX_BIO_BVECS_MAX 128
-#define DRBD_MAX_BIO_BVECS_DEF 0
+#define DRBD_MAX_BIO_BVECS_MIN 0U
+#define DRBD_MAX_BIO_BVECS_MAX 128U
+#define DRBD_MAX_BIO_BVECS_DEF 0U
#define DRBD_MAX_BIO_BVECS_SCALE '1'
-#define DRBD_C_PLAN_AHEAD_MIN 0
-#define DRBD_C_PLAN_AHEAD_MAX 300
-#define DRBD_C_PLAN_AHEAD_DEF 20
+#define DRBD_C_PLAN_AHEAD_MIN 0U
+#define DRBD_C_PLAN_AHEAD_MAX 300U
+#define DRBD_C_PLAN_AHEAD_DEF 20U
#define DRBD_C_PLAN_AHEAD_SCALE '1'
-#define DRBD_C_DELAY_TARGET_MIN 1
-#define DRBD_C_DELAY_TARGET_MAX 100
-#define DRBD_C_DELAY_TARGET_DEF 10
+#define DRBD_C_DELAY_TARGET_MIN 1U
+#define DRBD_C_DELAY_TARGET_MAX 100U
+#define DRBD_C_DELAY_TARGET_DEF 10U
#define DRBD_C_DELAY_TARGET_SCALE '1'
-#define DRBD_C_FILL_TARGET_MIN 0
-#define DRBD_C_FILL_TARGET_MAX (1<<20) /* 500MByte in sec */
-#define DRBD_C_FILL_TARGET_DEF 100 /* Try to place 50KiB in socket send buffer during resync */
+#define DRBD_C_FILL_TARGET_MIN 0U
+#define DRBD_C_FILL_TARGET_MAX (1U<<20) /* 500MByte in sec */
+#define DRBD_C_FILL_TARGET_DEF 100U /* Try to place 50KiB in socket send buffer during resync */
#define DRBD_C_FILL_TARGET_SCALE 's' /* sectors */
-#define DRBD_C_MAX_RATE_MIN 250
-#define DRBD_C_MAX_RATE_MAX (4 << 20)
-#define DRBD_C_MAX_RATE_DEF 102400
+#define DRBD_C_MAX_RATE_MIN 250U
+#define DRBD_C_MAX_RATE_MAX (4U << 20)
+#define DRBD_C_MAX_RATE_DEF 102400U
#define DRBD_C_MAX_RATE_SCALE 'k' /* kilobytes */
-#define DRBD_C_MIN_RATE_MIN 0
-#define DRBD_C_MIN_RATE_MAX (4 << 20)
-#define DRBD_C_MIN_RATE_DEF 250
+#define DRBD_C_MIN_RATE_MIN 0U
+#define DRBD_C_MIN_RATE_MAX (4U << 20)
+#define DRBD_C_MIN_RATE_DEF 250U
#define DRBD_C_MIN_RATE_SCALE 'k' /* kilobytes */
-#define DRBD_CONG_FILL_MIN 0
-#define DRBD_CONG_FILL_MAX (10<<21) /* 10GByte in sectors */
-#define DRBD_CONG_FILL_DEF 0
+#define DRBD_CONG_FILL_MIN 0U
+#define DRBD_CONG_FILL_MAX (10U<<21) /* 10GByte in sectors */
+#define DRBD_CONG_FILL_DEF 0U
#define DRBD_CONG_FILL_SCALE 's' /* sectors */
#define DRBD_CONG_EXTENTS_MIN DRBD_AL_EXTENTS_MIN
@@ -203,42 +204,48 @@
#define DRBD_PROTOCOL_DEF DRBD_PROT_C
-#define DRBD_DISK_BARRIER_DEF 0
-#define DRBD_DISK_FLUSHES_DEF 1
-#define DRBD_DISK_DRAIN_DEF 1
-#define DRBD_MD_FLUSHES_DEF 1
-#define DRBD_TCP_CORK_DEF 1
-#define DRBD_AL_UPDATES_DEF 1
+#define DRBD_DISK_BARRIER_DEF 0U
+#define DRBD_DISK_FLUSHES_DEF 1U
+#define DRBD_DISK_DRAIN_DEF 1U
+#define DRBD_MD_FLUSHES_DEF 1U
+#define DRBD_TCP_CORK_DEF 1U
+#define DRBD_AL_UPDATES_DEF 1U
+
/* We used to ignore the discard_zeroes_data setting.
* To not change established (and expected) behaviour,
* by default assume that, for discard_zeroes_data=0,
* we can make that an effective discard_zeroes_data=1,
* if we only explicitly zero-out unaligned partial chunks. */
-#define DRBD_DISCARD_ZEROES_IF_ALIGNED 1
+#define DRBD_DISCARD_ZEROES_IF_ALIGNED_DEF 1U
+
+/* Some backends pretend to support WRITE SAME,
+ * but fail such requests when they are actually submitted.
+ * This is to tell DRBD to not even try. */
+#define DRBD_DISABLE_WRITE_SAME_DEF 0U
-#define DRBD_ALLOW_TWO_PRIMARIES_DEF 0
-#define DRBD_ALWAYS_ASBP_DEF 0
-#define DRBD_USE_RLE_DEF 1
-#define DRBD_CSUMS_AFTER_CRASH_ONLY_DEF 0
+#define DRBD_ALLOW_TWO_PRIMARIES_DEF 0U
+#define DRBD_ALWAYS_ASBP_DEF 0U
+#define DRBD_USE_RLE_DEF 1U
+#define DRBD_CSUMS_AFTER_CRASH_ONLY_DEF 0U
-#define DRBD_AL_STRIPES_MIN 1
-#define DRBD_AL_STRIPES_MAX 1024
-#define DRBD_AL_STRIPES_DEF 1
+#define DRBD_AL_STRIPES_MIN 1U
+#define DRBD_AL_STRIPES_MAX 1024U
+#define DRBD_AL_STRIPES_DEF 1U
#define DRBD_AL_STRIPES_SCALE '1'
-#define DRBD_AL_STRIPE_SIZE_MIN 4
-#define DRBD_AL_STRIPE_SIZE_MAX 16777216
-#define DRBD_AL_STRIPE_SIZE_DEF 32
+#define DRBD_AL_STRIPE_SIZE_MIN 4U
+#define DRBD_AL_STRIPE_SIZE_MAX 16777216U
+#define DRBD_AL_STRIPE_SIZE_DEF 32U
#define DRBD_AL_STRIPE_SIZE_SCALE 'k' /* kilobytes */
-#define DRBD_SOCKET_CHECK_TIMEO_MIN 0
+#define DRBD_SOCKET_CHECK_TIMEO_MIN 0U
#define DRBD_SOCKET_CHECK_TIMEO_MAX DRBD_PING_TIMEO_MAX
-#define DRBD_SOCKET_CHECK_TIMEO_DEF 0
+#define DRBD_SOCKET_CHECK_TIMEO_DEF 0U
#define DRBD_SOCKET_CHECK_TIMEO_SCALE '1'
-#define DRBD_RS_DISCARD_GRANULARITY_MIN 0
-#define DRBD_RS_DISCARD_GRANULARITY_MAX (1<<20) /* 1MiByte */
-#define DRBD_RS_DISCARD_GRANULARITY_DEF 0 /* disabled by default */
+#define DRBD_RS_DISCARD_GRANULARITY_MIN 0U
+#define DRBD_RS_DISCARD_GRANULARITY_MAX (1U<<20) /* 1MiByte */
+#define DRBD_RS_DISCARD_GRANULARITY_DEF 0U /* disabled by default */
#define DRBD_RS_DISCARD_GRANULARITY_SCALE '1' /* bytes */
#endif
diff --git a/include/linux/ds2782_battery.h b/include/linux/ds2782_battery.h
index b4e281f65c15..fb6c97e10956 100644
--- a/include/linux/ds2782_battery.h
+++ b/include/linux/ds2782_battery.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __LINUX_DS2782_BATTERY_H
#define __LINUX_DS2782_BATTERY_H
diff --git a/include/linux/dsa/8021q.h b/include/linux/dsa/8021q.h
new file mode 100644
index 000000000000..f3664ee12170
--- /dev/null
+++ b/include/linux/dsa/8021q.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: GPL-2.0
+ * Copyright (c) 2019, Vladimir Oltean <olteanv@gmail.com>
+ */
+
+#ifndef _NET_DSA_8021Q_H
+#define _NET_DSA_8021Q_H
+
+#include <net/dsa.h>
+#include <linux/types.h>
+
+int dsa_tag_8021q_register(struct dsa_switch *ds, __be16 proto);
+
+void dsa_tag_8021q_unregister(struct dsa_switch *ds);
+
+int dsa_tag_8021q_bridge_join(struct dsa_switch *ds, int port,
+ struct dsa_bridge bridge);
+
+void dsa_tag_8021q_bridge_leave(struct dsa_switch *ds, int port,
+ struct dsa_bridge bridge);
+
+u16 dsa_tag_8021q_bridge_vid(unsigned int bridge_num);
+
+u16 dsa_tag_8021q_standalone_vid(const struct dsa_port *dp);
+
+int dsa_8021q_rx_switch_id(u16 vid);
+
+int dsa_8021q_rx_source_port(u16 vid);
+
+bool vid_is_dsa_8021q(u16 vid);
+
+#endif /* _NET_DSA_8021Q_H */
diff --git a/include/linux/dsa/brcm.h b/include/linux/dsa/brcm.h
new file mode 100644
index 000000000000..47545a948784
--- /dev/null
+++ b/include/linux/dsa/brcm.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0-only
+ * Copyright (C) 2014 Broadcom Corporation
+ */
+
+/* Included by drivers/net/ethernet/broadcom/bcmsysport.c and
+ * net/dsa/tag_brcm.c
+ */
+#ifndef _NET_DSA_BRCM_H
+#define _NET_DSA_BRCM_H
+
+/* Broadcom tag specific helpers to insert and extract queue/port number */
+#define BRCM_TAG_SET_PORT_QUEUE(p, q) ((p) << 8 | q)
+#define BRCM_TAG_GET_PORT(v) ((v) >> 8)
+#define BRCM_TAG_GET_QUEUE(v) ((v) & 0xff)
+
+#endif
diff --git a/include/linux/dsa/ksz_common.h b/include/linux/dsa/ksz_common.h
new file mode 100644
index 000000000000..576a99ca698d
--- /dev/null
+++ b/include/linux/dsa/ksz_common.h
@@ -0,0 +1,53 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Microchip switch tag common header
+ *
+ * Copyright (C) 2022 Microchip Technology Inc.
+ */
+
+#ifndef _NET_DSA_KSZ_COMMON_H_
+#define _NET_DSA_KSZ_COMMON_H_
+
+#include <net/dsa.h>
+
+/* All time stamps from the KSZ consist of 2 bits for seconds and 30 bits for
+ * nanoseconds. This is NOT the same as 32 bits for nanoseconds.
+ */
+#define KSZ_TSTAMP_SEC_MASK GENMASK(31, 30)
+#define KSZ_TSTAMP_NSEC_MASK GENMASK(29, 0)
+
+static inline ktime_t ksz_decode_tstamp(u32 tstamp)
+{
+ u64 ns = FIELD_GET(KSZ_TSTAMP_SEC_MASK, tstamp) * NSEC_PER_SEC +
+ FIELD_GET(KSZ_TSTAMP_NSEC_MASK, tstamp);
+
+ return ns_to_ktime(ns);
+}
+
+struct ksz_deferred_xmit_work {
+ struct dsa_port *dp;
+ struct sk_buff *skb;
+ struct kthread_work work;
+};
+
+struct ksz_tagger_data {
+ void (*xmit_work_fn)(struct kthread_work *work);
+ void (*hwtstamp_set_state)(struct dsa_switch *ds, bool on);
+};
+
+struct ksz_skb_cb {
+ struct sk_buff *clone;
+ unsigned int ptp_type;
+ bool update_correction;
+ u32 tstamp;
+};
+
+#define KSZ_SKB_CB(skb) \
+ ((struct ksz_skb_cb *)((skb)->cb))
+
+static inline struct ksz_tagger_data *
+ksz_tagger_data(struct dsa_switch *ds)
+{
+ return ds->tagger_data;
+}
+
+#endif /* _NET_DSA_KSZ_COMMON_H_ */
diff --git a/include/linux/dsa/lan9303.h b/include/linux/dsa/lan9303.h
new file mode 100644
index 000000000000..b4f22112ba75
--- /dev/null
+++ b/include/linux/dsa/lan9303.h
@@ -0,0 +1,39 @@
+/* Included by drivers/net/dsa/lan9303.h and net/dsa/tag_lan9303.c */
+#include <linux/if_ether.h>
+
+struct lan9303;
+
+struct lan9303_phy_ops {
+ /* PHY 1 and 2 access*/
+ int (*phy_read)(struct lan9303 *chip, int port, int regnum);
+ int (*phy_write)(struct lan9303 *chip, int port,
+ int regnum, u16 val);
+};
+
+#define LAN9303_NUM_ALR_RECORDS 512
+struct lan9303_alr_cache_entry {
+ u8 mac_addr[ETH_ALEN];
+ u8 port_map; /* Bitmap of ports. Zero if unused entry */
+ u8 stp_override; /* non zero if set LAN9303_ALR_DAT1_AGE_OVERRID */
+};
+
+struct lan9303 {
+ struct device *dev;
+ struct regmap *regmap;
+ struct regmap_irq_chip_data *irq_data;
+ struct gpio_desc *reset_gpio;
+ u32 reset_duration; /* in [ms] */
+ int phy_addr_base;
+ struct dsa_switch *ds;
+ struct mutex indirect_mutex; /* protect indexed register access */
+ struct mutex alr_mutex; /* protect ALR access */
+ const struct lan9303_phy_ops *ops;
+ bool is_bridged; /* true if port 1 and 2 are bridged */
+
+ /* remember LAN9303_SWE_PORT_STATE while not bridged */
+ u32 swe_port_state;
+ /* LAN9303 do not offer reading specific ALR entry. Cache all
+ * static entries in a flat table
+ **/
+ struct lan9303_alr_cache_entry alr_cache[LAN9303_NUM_ALR_RECORDS];
+};
diff --git a/include/linux/dsa/loop.h b/include/linux/dsa/loop.h
new file mode 100644
index 000000000000..b8fef35591aa
--- /dev/null
+++ b/include/linux/dsa/loop.h
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef DSA_LOOP_H
+#define DSA_LOOP_H
+
+#include <linux/if_vlan.h>
+#include <linux/types.h>
+#include <linux/ethtool.h>
+#include <net/dsa.h>
+
+struct dsa_loop_vlan {
+ u16 members;
+ u16 untagged;
+};
+
+struct dsa_loop_mib_entry {
+ char name[ETH_GSTRING_LEN];
+ unsigned long val;
+};
+
+enum dsa_loop_mib_counters {
+ DSA_LOOP_PHY_READ_OK,
+ DSA_LOOP_PHY_READ_ERR,
+ DSA_LOOP_PHY_WRITE_OK,
+ DSA_LOOP_PHY_WRITE_ERR,
+ __DSA_LOOP_CNT_MAX,
+};
+
+struct dsa_loop_port {
+ struct dsa_loop_mib_entry mib[__DSA_LOOP_CNT_MAX];
+ u16 pvid;
+ int mtu;
+};
+
+struct dsa_loop_priv {
+ struct mii_bus *bus;
+ unsigned int port_base;
+ struct dsa_loop_vlan vlans[VLAN_N_VID];
+ struct net_device *netdev;
+ struct dsa_loop_port ports[DSA_MAX_PORTS];
+};
+
+#endif /* DSA_LOOP_H */
diff --git a/include/linux/dsa/mv88e6xxx.h b/include/linux/dsa/mv88e6xxx.h
new file mode 100644
index 000000000000..8c3d45eca46b
--- /dev/null
+++ b/include/linux/dsa/mv88e6xxx.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0
+ * Copyright 2021 NXP
+ */
+
+#ifndef _NET_DSA_TAG_MV88E6XXX_H
+#define _NET_DSA_TAG_MV88E6XXX_H
+
+#include <linux/if_vlan.h>
+
+#define MV88E6XXX_VID_STANDALONE 0
+#define MV88E6XXX_VID_BRIDGED (VLAN_N_VID - 1)
+
+#endif
diff --git a/include/linux/dsa/ocelot.h b/include/linux/dsa/ocelot.h
new file mode 100644
index 000000000000..dca2969015d8
--- /dev/null
+++ b/include/linux/dsa/ocelot.h
@@ -0,0 +1,276 @@
+/* SPDX-License-Identifier: GPL-2.0
+ * Copyright 2019-2021 NXP
+ */
+
+#ifndef _NET_DSA_TAG_OCELOT_H
+#define _NET_DSA_TAG_OCELOT_H
+
+#include <linux/kthread.h>
+#include <linux/packing.h>
+#include <linux/skbuff.h>
+#include <net/dsa.h>
+
+struct ocelot_skb_cb {
+ struct sk_buff *clone;
+ unsigned int ptp_class; /* valid only for clones */
+ u32 tstamp_lo;
+ u8 ptp_cmd;
+ u8 ts_id;
+};
+
+#define OCELOT_SKB_CB(skb) \
+ ((struct ocelot_skb_cb *)((skb)->cb))
+
+#define IFH_TAG_TYPE_C 0
+#define IFH_TAG_TYPE_S 1
+
+#define IFH_REW_OP_NOOP 0x0
+#define IFH_REW_OP_DSCP 0x1
+#define IFH_REW_OP_ONE_STEP_PTP 0x2
+#define IFH_REW_OP_TWO_STEP_PTP 0x3
+#define IFH_REW_OP_ORIGIN_PTP 0x5
+
+#define OCELOT_TAG_LEN 16
+#define OCELOT_SHORT_PREFIX_LEN 4
+#define OCELOT_LONG_PREFIX_LEN 16
+#define OCELOT_TOTAL_TAG_LEN (OCELOT_SHORT_PREFIX_LEN + OCELOT_TAG_LEN)
+
+/* The CPU injection header and the CPU extraction header can have 3 types of
+ * prefixes: long, short and no prefix. The format of the header itself is the
+ * same in all 3 cases.
+ *
+ * Extraction with long prefix:
+ *
+ * +-------------------+-------------------+------+------+------------+-------+
+ * | ff:ff:ff:ff:ff:ff | fe:ff:ff:ff:ff:ff | 8880 | 000a | extraction | frame |
+ * | | | | | header | |
+ * +-------------------+-------------------+------+------+------------+-------+
+ * 48 bits 48 bits 16 bits 16 bits 128 bits
+ *
+ * Extraction with short prefix:
+ *
+ * +------+------+------------+-------+
+ * | 8880 | 000a | extraction | frame |
+ * | | | header | |
+ * +------+------+------------+-------+
+ * 16 bits 16 bits 128 bits
+ *
+ * Extraction with no prefix:
+ *
+ * +------------+-------+
+ * | extraction | frame |
+ * | header | |
+ * +------------+-------+
+ * 128 bits
+ *
+ *
+ * Injection with long prefix:
+ *
+ * +-------------------+-------------------+------+------+------------+-------+
+ * | any dmac | any smac | 8880 | 000a | injection | frame |
+ * | | | | | header | |
+ * +-------------------+-------------------+------+------+------------+-------+
+ * 48 bits 48 bits 16 bits 16 bits 128 bits
+ *
+ * Injection with short prefix:
+ *
+ * +------+------+------------+-------+
+ * | 8880 | 000a | injection | frame |
+ * | | | header | |
+ * +------+------+------------+-------+
+ * 16 bits 16 bits 128 bits
+ *
+ * Injection with no prefix:
+ *
+ * +------------+-------+
+ * | injection | frame |
+ * | header | |
+ * +------------+-------+
+ * 128 bits
+ *
+ * The injection header looks like this (network byte order, bit 127
+ * is part of lowest address byte in memory, bit 0 is part of highest
+ * address byte):
+ *
+ * +------+------+------+------+------+------+------+------+
+ * 127:120 |BYPASS| MASQ | MASQ_PORT |REW_OP|REW_OP|
+ * +------+------+------+------+------+------+------+------+
+ * 119:112 | REW_OP |
+ * +------+------+------+------+------+------+------+------+
+ * 111:104 | REW_VAL |
+ * +------+------+------+------+------+------+------+------+
+ * 103: 96 | REW_VAL |
+ * +------+------+------+------+------+------+------+------+
+ * 95: 88 | REW_VAL |
+ * +------+------+------+------+------+------+------+------+
+ * 87: 80 | REW_VAL |
+ * +------+------+------+------+------+------+------+------+
+ * 79: 72 | RSV |
+ * +------+------+------+------+------+------+------+------+
+ * 71: 64 | RSV | DEST |
+ * +------+------+------+------+------+------+------+------+
+ * 63: 56 | DEST |
+ * +------+------+------+------+------+------+------+------+
+ * 55: 48 | RSV |
+ * +------+------+------+------+------+------+------+------+
+ * 47: 40 | RSV | SRC_PORT | RSV |TFRM_TIMER|
+ * +------+------+------+------+------+------+------+------+
+ * 39: 32 | TFRM_TIMER | RSV |
+ * +------+------+------+------+------+------+------+------+
+ * 31: 24 | RSV | DP | POP_CNT | CPUQ |
+ * +------+------+------+------+------+------+------+------+
+ * 23: 16 | CPUQ | QOS_CLASS |TAG_TYPE|
+ * +------+------+------+------+------+------+------+------+
+ * 15: 8 | PCP | DEI | VID |
+ * +------+------+------+------+------+------+------+------+
+ * 7: 0 | VID |
+ * +------+------+------+------+------+------+------+------+
+ *
+ * And the extraction header looks like this:
+ *
+ * +------+------+------+------+------+------+------+------+
+ * 127:120 | RSV | REW_OP |
+ * +------+------+------+------+------+------+------+------+
+ * 119:112 | REW_OP | REW_VAL |
+ * +------+------+------+------+------+------+------+------+
+ * 111:104 | REW_VAL |
+ * +------+------+------+------+------+------+------+------+
+ * 103: 96 | REW_VAL |
+ * +------+------+------+------+------+------+------+------+
+ * 95: 88 | REW_VAL |
+ * +------+------+------+------+------+------+------+------+
+ * 87: 80 | REW_VAL | LLEN |
+ * +------+------+------+------+------+------+------+------+
+ * 79: 72 | LLEN | WLEN |
+ * +------+------+------+------+------+------+------+------+
+ * 71: 64 | WLEN | RSV |
+ * +------+------+------+------+------+------+------+------+
+ * 63: 56 | RSV |
+ * +------+------+------+------+------+------+------+------+
+ * 55: 48 | RSV |
+ * +------+------+------+------+------+------+------+------+
+ * 47: 40 | RSV | SRC_PORT | ACL_ID |
+ * +------+------+------+------+------+------+------+------+
+ * 39: 32 | ACL_ID | RSV | SFLOW_ID |
+ * +------+------+------+------+------+------+------+------+
+ * 31: 24 |ACL_HIT| DP | LRN_FLAGS | CPUQ |
+ * +------+------+------+------+------+------+------+------+
+ * 23: 16 | CPUQ | QOS_CLASS |TAG_TYPE|
+ * +------+------+------+------+------+------+------+------+
+ * 15: 8 | PCP | DEI | VID |
+ * +------+------+------+------+------+------+------+------+
+ * 7: 0 | VID |
+ * +------+------+------+------+------+------+------+------+
+ */
+
+struct felix_deferred_xmit_work {
+ struct dsa_port *dp;
+ struct sk_buff *skb;
+ struct kthread_work work;
+};
+
+struct ocelot_8021q_tagger_data {
+ void (*xmit_work_fn)(struct kthread_work *work);
+};
+
+static inline struct ocelot_8021q_tagger_data *
+ocelot_8021q_tagger_data(struct dsa_switch *ds)
+{
+ BUG_ON(ds->dst->tag_ops->proto != DSA_TAG_PROTO_OCELOT_8021Q);
+
+ return ds->tagger_data;
+}
+
+static inline void ocelot_xfh_get_rew_val(void *extraction, u64 *rew_val)
+{
+ packing(extraction, rew_val, 116, 85, OCELOT_TAG_LEN, UNPACK, 0);
+}
+
+static inline void ocelot_xfh_get_len(void *extraction, u64 *len)
+{
+ u64 llen, wlen;
+
+ packing(extraction, &llen, 84, 79, OCELOT_TAG_LEN, UNPACK, 0);
+ packing(extraction, &wlen, 78, 71, OCELOT_TAG_LEN, UNPACK, 0);
+
+ *len = 60 * wlen + llen - 80;
+}
+
+static inline void ocelot_xfh_get_src_port(void *extraction, u64 *src_port)
+{
+ packing(extraction, src_port, 46, 43, OCELOT_TAG_LEN, UNPACK, 0);
+}
+
+static inline void ocelot_xfh_get_qos_class(void *extraction, u64 *qos_class)
+{
+ packing(extraction, qos_class, 19, 17, OCELOT_TAG_LEN, UNPACK, 0);
+}
+
+static inline void ocelot_xfh_get_tag_type(void *extraction, u64 *tag_type)
+{
+ packing(extraction, tag_type, 16, 16, OCELOT_TAG_LEN, UNPACK, 0);
+}
+
+static inline void ocelot_xfh_get_vlan_tci(void *extraction, u64 *vlan_tci)
+{
+ packing(extraction, vlan_tci, 15, 0, OCELOT_TAG_LEN, UNPACK, 0);
+}
+
+static inline void ocelot_ifh_set_bypass(void *injection, u64 bypass)
+{
+ packing(injection, &bypass, 127, 127, OCELOT_TAG_LEN, PACK, 0);
+}
+
+static inline void ocelot_ifh_set_rew_op(void *injection, u64 rew_op)
+{
+ packing(injection, &rew_op, 125, 117, OCELOT_TAG_LEN, PACK, 0);
+}
+
+static inline void ocelot_ifh_set_dest(void *injection, u64 dest)
+{
+ packing(injection, &dest, 67, 56, OCELOT_TAG_LEN, PACK, 0);
+}
+
+static inline void ocelot_ifh_set_qos_class(void *injection, u64 qos_class)
+{
+ packing(injection, &qos_class, 19, 17, OCELOT_TAG_LEN, PACK, 0);
+}
+
+static inline void seville_ifh_set_dest(void *injection, u64 dest)
+{
+ packing(injection, &dest, 67, 57, OCELOT_TAG_LEN, PACK, 0);
+}
+
+static inline void ocelot_ifh_set_src(void *injection, u64 src)
+{
+ packing(injection, &src, 46, 43, OCELOT_TAG_LEN, PACK, 0);
+}
+
+static inline void ocelot_ifh_set_tag_type(void *injection, u64 tag_type)
+{
+ packing(injection, &tag_type, 16, 16, OCELOT_TAG_LEN, PACK, 0);
+}
+
+static inline void ocelot_ifh_set_vlan_tci(void *injection, u64 vlan_tci)
+{
+ packing(injection, &vlan_tci, 15, 0, OCELOT_TAG_LEN, PACK, 0);
+}
+
+/* Determine the PTP REW_OP to use for injecting the given skb */
+static inline u32 ocelot_ptp_rew_op(struct sk_buff *skb)
+{
+ struct sk_buff *clone = OCELOT_SKB_CB(skb)->clone;
+ u8 ptp_cmd = OCELOT_SKB_CB(skb)->ptp_cmd;
+ u32 rew_op = 0;
+
+ if (ptp_cmd == IFH_REW_OP_TWO_STEP_PTP && clone) {
+ rew_op = ptp_cmd;
+ rew_op |= OCELOT_SKB_CB(clone)->ts_id << 3;
+ } else if (ptp_cmd == IFH_REW_OP_ORIGIN_PTP) {
+ rew_op = ptp_cmd;
+ }
+
+ return rew_op;
+}
+
+#endif
diff --git a/include/linux/dsa/sja1105.h b/include/linux/dsa/sja1105.h
new file mode 100644
index 000000000000..159e43171ccc
--- /dev/null
+++ b/include/linux/dsa/sja1105.h
@@ -0,0 +1,79 @@
+/* SPDX-License-Identifier: GPL-2.0
+ * Copyright (c) 2019, Vladimir Oltean <olteanv@gmail.com>
+ */
+
+/* Included by drivers/net/dsa/sja1105/sja1105.h and net/dsa/tag_sja1105.c */
+
+#ifndef _NET_DSA_SJA1105_H
+#define _NET_DSA_SJA1105_H
+
+#include <linux/skbuff.h>
+#include <linux/etherdevice.h>
+#include <linux/dsa/8021q.h>
+#include <net/dsa.h>
+
+#define ETH_P_SJA1105 ETH_P_DSA_8021Q
+#define ETH_P_SJA1105_META 0x0008
+#define ETH_P_SJA1110 0xdadc
+
+#define SJA1105_DEFAULT_VLAN (VLAN_N_VID - 1)
+
+/* IEEE 802.3 Annex 57A: Slow Protocols PDUs (01:80:C2:xx:xx:xx) */
+#define SJA1105_LINKLOCAL_FILTER_A 0x0180C2000000ull
+#define SJA1105_LINKLOCAL_FILTER_A_MASK 0xFFFFFF000000ull
+/* IEEE 1588 Annex F: Transport of PTP over Ethernet (01:1B:19:xx:xx:xx) */
+#define SJA1105_LINKLOCAL_FILTER_B 0x011B19000000ull
+#define SJA1105_LINKLOCAL_FILTER_B_MASK 0xFFFFFF000000ull
+
+/* Source and Destination MAC of follow-up meta frames.
+ * Whereas the choice of SMAC only affects the unique identification of the
+ * switch as sender of meta frames, the DMAC must be an address that is present
+ * in the DSA master port's multicast MAC filter.
+ * 01-80-C2-00-00-0E is a good choice for this, as all profiles of IEEE 1588
+ * over L2 use this address for some purpose already.
+ */
+#define SJA1105_META_SMAC 0x222222222222ull
+#define SJA1105_META_DMAC 0x0180C200000Eull
+
+enum sja1110_meta_tstamp {
+ SJA1110_META_TSTAMP_TX = 0,
+ SJA1110_META_TSTAMP_RX = 1,
+};
+
+struct sja1105_deferred_xmit_work {
+ struct dsa_port *dp;
+ struct sk_buff *skb;
+ struct kthread_work work;
+};
+
+/* Global tagger data */
+struct sja1105_tagger_data {
+ /* Tagger to switch */
+ void (*xmit_work_fn)(struct kthread_work *work);
+ void (*meta_tstamp_handler)(struct dsa_switch *ds, int port, u8 ts_id,
+ enum sja1110_meta_tstamp dir, u64 tstamp);
+ /* Switch to tagger */
+ bool (*rxtstamp_get_state)(struct dsa_switch *ds);
+ void (*rxtstamp_set_state)(struct dsa_switch *ds, bool on);
+};
+
+struct sja1105_skb_cb {
+ struct sk_buff *clone;
+ u64 tstamp;
+ /* Only valid for packets cloned for 2-step TX timestamping */
+ u8 ts_id;
+};
+
+#define SJA1105_SKB_CB(skb) \
+ ((struct sja1105_skb_cb *)((skb)->cb))
+
+static inline struct sja1105_tagger_data *
+sja1105_tagger_data(struct dsa_switch *ds)
+{
+ BUG_ON(ds->dst->tag_ops->proto != DSA_TAG_PROTO_SJA1105 &&
+ ds->dst->tag_ops->proto != DSA_TAG_PROTO_SJA1110);
+
+ return ds->tagger_data;
+}
+
+#endif /* _NET_DSA_SJA1105_H */
diff --git a/include/linux/dsa/tag_qca.h b/include/linux/dsa/tag_qca.h
new file mode 100644
index 000000000000..ee657452f122
--- /dev/null
+++ b/include/linux/dsa/tag_qca.h
@@ -0,0 +1,87 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __TAG_QCA_H
+#define __TAG_QCA_H
+
+#include <linux/types.h>
+
+struct dsa_switch;
+struct sk_buff;
+
+#define QCA_HDR_LEN 2
+#define QCA_HDR_VERSION 0x2
+
+#define QCA_HDR_RECV_VERSION GENMASK(15, 14)
+#define QCA_HDR_RECV_PRIORITY GENMASK(13, 11)
+#define QCA_HDR_RECV_TYPE GENMASK(10, 6)
+#define QCA_HDR_RECV_FRAME_IS_TAGGED BIT(3)
+#define QCA_HDR_RECV_SOURCE_PORT GENMASK(2, 0)
+
+/* Packet type for recv */
+#define QCA_HDR_RECV_TYPE_NORMAL 0x0
+#define QCA_HDR_RECV_TYPE_MIB 0x1
+#define QCA_HDR_RECV_TYPE_RW_REG_ACK 0x2
+
+#define QCA_HDR_XMIT_VERSION GENMASK(15, 14)
+#define QCA_HDR_XMIT_PRIORITY GENMASK(13, 11)
+#define QCA_HDR_XMIT_CONTROL GENMASK(10, 8)
+#define QCA_HDR_XMIT_FROM_CPU BIT(7)
+#define QCA_HDR_XMIT_DP_BIT GENMASK(6, 0)
+
+/* Packet type for xmit */
+#define QCA_HDR_XMIT_TYPE_NORMAL 0x0
+#define QCA_HDR_XMIT_TYPE_RW_REG 0x1
+
+/* Check code for a valid mgmt packet. Switch will ignore the packet
+ * with this wrong.
+ */
+#define QCA_HDR_MGMT_CHECK_CODE_VAL 0x5
+
+/* Specific define for in-band MDIO read/write with Ethernet packet */
+#define QCA_HDR_MGMT_SEQ_LEN 4 /* 4 byte for the seq */
+#define QCA_HDR_MGMT_COMMAND_LEN 4 /* 4 byte for the command */
+#define QCA_HDR_MGMT_DATA1_LEN 4 /* First 4 byte for the mdio data */
+#define QCA_HDR_MGMT_HEADER_LEN (QCA_HDR_MGMT_SEQ_LEN + \
+ QCA_HDR_MGMT_COMMAND_LEN + \
+ QCA_HDR_MGMT_DATA1_LEN)
+
+#define QCA_HDR_MGMT_DATA2_LEN 28 /* Other 28 byte for the mdio data */
+#define QCA_HDR_MGMT_PADDING_LEN 18 /* Padding to reach the min Ethernet packet */
+
+#define QCA_HDR_MGMT_PKT_LEN (QCA_HDR_MGMT_HEADER_LEN + \
+ QCA_HDR_LEN + \
+ QCA_HDR_MGMT_DATA2_LEN + \
+ QCA_HDR_MGMT_PADDING_LEN)
+
+#define QCA_HDR_MGMT_SEQ_NUM GENMASK(31, 0) /* 63, 32 */
+#define QCA_HDR_MGMT_CHECK_CODE GENMASK(31, 29) /* 31, 29 */
+#define QCA_HDR_MGMT_CMD BIT(28) /* 28 */
+#define QCA_HDR_MGMT_LENGTH GENMASK(23, 20) /* 23, 20 */
+#define QCA_HDR_MGMT_ADDR GENMASK(18, 0) /* 18, 0 */
+
+/* Special struct emulating a Ethernet header */
+struct qca_mgmt_ethhdr {
+ __le32 command; /* command bit 31:0 */
+ __le32 seq; /* seq 63:32 */
+ __le32 mdio_data; /* first 4byte mdio */
+ __be16 hdr; /* qca hdr */
+} __packed;
+
+enum mdio_cmd {
+ MDIO_WRITE = 0x0,
+ MDIO_READ
+};
+
+struct mib_ethhdr {
+ __le32 data[3]; /* first 3 mib counter */
+ __be16 hdr; /* qca hdr */
+} __packed;
+
+struct qca_tagger_data {
+ void (*rw_reg_ack_handler)(struct dsa_switch *ds,
+ struct sk_buff *skb);
+ void (*mib_autocast_handler)(struct dsa_switch *ds,
+ struct sk_buff *skb);
+};
+
+#endif /* __TAG_QCA_H */
diff --git a/include/linux/dtlk.h b/include/linux/dtlk.h
index 22a7b9a5f5d1..27b95e70bde3 100644
--- a/include/linux/dtlk.h
+++ b/include/linux/dtlk.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#define DTLK_MINOR 0
#define DTLK_IO_EXTENT 0x02
diff --git a/include/linux/dtpm.h b/include/linux/dtpm.h
new file mode 100644
index 000000000000..a4a13514b730
--- /dev/null
+++ b/include/linux/dtpm.h
@@ -0,0 +1,73 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2020 Linaro Ltd
+ *
+ * Author: Daniel Lezcano <daniel.lezcano@linaro.org>
+ */
+#ifndef ___DTPM_H__
+#define ___DTPM_H__
+
+#include <linux/powercap.h>
+
+#define MAX_DTPM_DESCR 8
+#define MAX_DTPM_CONSTRAINTS 1
+
+struct dtpm {
+ struct powercap_zone zone;
+ struct dtpm *parent;
+ struct list_head sibling;
+ struct list_head children;
+ struct dtpm_ops *ops;
+ unsigned long flags;
+ u64 power_limit;
+ u64 power_max;
+ u64 power_min;
+ int weight;
+};
+
+struct dtpm_ops {
+ u64 (*set_power_uw)(struct dtpm *, u64);
+ u64 (*get_power_uw)(struct dtpm *);
+ int (*update_power_uw)(struct dtpm *);
+ void (*release)(struct dtpm *);
+};
+
+struct device_node;
+
+struct dtpm_subsys_ops {
+ const char *name;
+ int (*init)(void);
+ void (*exit)(void);
+ int (*setup)(struct dtpm *, struct device_node *);
+};
+
+enum DTPM_NODE_TYPE {
+ DTPM_NODE_VIRTUAL = 0,
+ DTPM_NODE_DT,
+};
+
+struct dtpm_node {
+ enum DTPM_NODE_TYPE type;
+ const char *name;
+ struct dtpm_node *parent;
+};
+
+static inline struct dtpm *to_dtpm(struct powercap_zone *zone)
+{
+ return container_of(zone, struct dtpm, zone);
+}
+
+int dtpm_update_power(struct dtpm *dtpm);
+
+int dtpm_release_zone(struct powercap_zone *pcz);
+
+void dtpm_init(struct dtpm *dtpm, struct dtpm_ops *ops);
+
+void dtpm_unregister(struct dtpm *dtpm);
+
+int dtpm_register(const char *name, struct dtpm *dtpm, struct dtpm *parent);
+
+int dtpm_create_hierarchy(struct of_device_id *dtpm_match_table);
+
+void dtpm_destroy_hierarchy(void);
+#endif
diff --git a/include/linux/dw_apb_timer.h b/include/linux/dw_apb_timer.h
index 4334106f44c3..82ebf9223948 100644
--- a/include/linux/dw_apb_timer.h
+++ b/include/linux/dw_apb_timer.h
@@ -1,13 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* (C) Copyright 2009 Intel Corporation
* Author: Jacob Pan (jacob.jun.pan@intel.com)
*
* Shared with ARM platforms, Jamie Iles, Picochip 2011
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
* Support for the Synopsys DesignWare APB Timers.
*/
#ifndef __DW_APB_TIMER_H__
@@ -28,7 +25,6 @@ struct dw_apb_timer {
struct dw_apb_clock_event_device {
struct clock_event_device ced;
struct dw_apb_timer timer;
- struct irqaction irqaction;
void (*eoi)(struct dw_apb_timer *);
};
diff --git a/include/linux/dynamic_debug.h b/include/linux/dynamic_debug.h
index 546d68057e3b..061dd84d09f3 100644
--- a/include/linux/dynamic_debug.h
+++ b/include/linux/dynamic_debug.h
@@ -1,10 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _DYNAMIC_DEBUG_H
#define _DYNAMIC_DEBUG_H
-#if defined(CC_HAVE_ASM_GOTO) && defined(CONFIG_JUMP_LABEL)
+#if defined(CONFIG_JUMP_LABEL)
#include <linux/jump_label.h>
#endif
+#include <linux/build_bug.h>
+
/*
* An instance of this structure is created in a special
* ELF section at every dynamic debug callsite. At runtime,
@@ -20,6 +23,9 @@ struct _ddebug {
const char *filename;
const char *format;
unsigned int lineno:18;
+#define CLS_BITS 6
+ unsigned int class_id:CLS_BITS;
+#define _DPRINTK_CLASS_DFLT ((1 << CLS_BITS) - 1)
/*
* The flags field controls the behaviour at the callsite.
* The bits here are changed dynamically when the user
@@ -31,13 +37,18 @@ struct _ddebug {
#define _DPRINTK_FLAGS_INCL_FUNCNAME (1<<2)
#define _DPRINTK_FLAGS_INCL_LINENO (1<<3)
#define _DPRINTK_FLAGS_INCL_TID (1<<4)
+
+#define _DPRINTK_FLAGS_INCL_ANY \
+ (_DPRINTK_FLAGS_INCL_MODNAME | _DPRINTK_FLAGS_INCL_FUNCNAME |\
+ _DPRINTK_FLAGS_INCL_LINENO | _DPRINTK_FLAGS_INCL_TID)
+
#if defined DEBUG
#define _DPRINTK_FLAGS_DEFAULT _DPRINTK_FLAGS_PRINT
#else
#define _DPRINTK_FLAGS_DEFAULT 0
#endif
unsigned int flags:8;
-#ifdef HAVE_JUMP_LABEL
+#ifdef CONFIG_JUMP_LABEL
union {
struct static_key_true dd_key_true;
struct static_key_false dd_key_false;
@@ -45,18 +56,88 @@ struct _ddebug {
#endif
} __attribute__((aligned(8)));
+enum class_map_type {
+ DD_CLASS_TYPE_DISJOINT_BITS,
+ /**
+ * DD_CLASS_TYPE_DISJOINT_BITS: classes are independent, one per bit.
+ * expecting hex input. Built for drm.debug, basis for other types.
+ */
+ DD_CLASS_TYPE_LEVEL_NUM,
+ /**
+ * DD_CLASS_TYPE_LEVEL_NUM: input is numeric level, 0-N.
+ * N turns on just bits N-1 .. 0, so N=0 turns all bits off.
+ */
+ DD_CLASS_TYPE_DISJOINT_NAMES,
+ /**
+ * DD_CLASS_TYPE_DISJOINT_NAMES: input is a CSV of [+-]CLASS_NAMES,
+ * classes are independent, like _DISJOINT_BITS.
+ */
+ DD_CLASS_TYPE_LEVEL_NAMES,
+ /**
+ * DD_CLASS_TYPE_LEVEL_NAMES: input is a CSV of [+-]CLASS_NAMES,
+ * intended for names like: INFO,DEBUG,TRACE, with a module prefix
+ * avoid EMERG,ALERT,CRIT,ERR,WARNING: they're not debug
+ */
+};
+
+struct ddebug_class_map {
+ struct list_head link;
+ struct module *mod;
+ const char *mod_name; /* needed for builtins */
+ const char **class_names;
+ const int length;
+ const int base; /* index of 1st .class_id, allows split/shared space */
+ enum class_map_type map_type;
+};
+
+/**
+ * DECLARE_DYNDBG_CLASSMAP - declare classnames known by a module
+ * @_var: a struct ddebug_class_map, passed to module_param_cb
+ * @_type: enum class_map_type, chooses bits/verbose, numeric/symbolic
+ * @_base: offset of 1st class-name. splits .class_id space
+ * @classes: class-names used to control class'd prdbgs
+ */
+#define DECLARE_DYNDBG_CLASSMAP(_var, _maptype, _base, ...) \
+ static const char *_var##_classnames[] = { __VA_ARGS__ }; \
+ static struct ddebug_class_map __aligned(8) __used \
+ __section("__dyndbg_classes") _var = { \
+ .mod = THIS_MODULE, \
+ .mod_name = KBUILD_MODNAME, \
+ .base = _base, \
+ .map_type = _maptype, \
+ .length = NUM_TYPE_ARGS(char*, __VA_ARGS__), \
+ .class_names = _var##_classnames, \
+ }
+#define NUM_TYPE_ARGS(eltype, ...) \
+ (sizeof((eltype[]){__VA_ARGS__}) / sizeof(eltype))
-int ddebug_add_module(struct _ddebug *tab, unsigned int n,
- const char *modname);
+/* encapsulate linker provided built-in (or module) dyndbg data */
+struct _ddebug_info {
+ struct _ddebug *descs;
+ struct ddebug_class_map *classes;
+ unsigned int num_descs;
+ unsigned int num_classes;
+};
+
+struct ddebug_class_param {
+ union {
+ unsigned long *bits;
+ unsigned int *lvl;
+ };
+ char flags[8];
+ const struct ddebug_class_map *map;
+};
+
+/*
+ * pr_debug() and friends are globally enabled or modules have selectively
+ * enabled them.
+ */
+#if defined(CONFIG_DYNAMIC_DEBUG) || \
+ (defined(CONFIG_DYNAMIC_DEBUG_CORE) && defined(DYNAMIC_DEBUG_MODULE))
-#if defined(CONFIG_DYNAMIC_DEBUG)
-extern int ddebug_remove_module(const char *mod_name);
extern __printf(2, 3)
void __dynamic_pr_debug(struct _ddebug *descriptor, const char *fmt, ...);
-extern int ddebug_dyndbg_module_param_cb(char *param, char *val,
- const char *modname);
-
struct device;
extern __printf(3, 4)
@@ -70,44 +151,49 @@ void __dynamic_netdev_dbg(struct _ddebug *descriptor,
const struct net_device *dev,
const char *fmt, ...);
-#define DEFINE_DYNAMIC_DEBUG_METADATA_KEY(name, fmt, key, init) \
+struct ib_device;
+
+extern __printf(3, 4)
+void __dynamic_ibdev_dbg(struct _ddebug *descriptor,
+ const struct ib_device *ibdev,
+ const char *fmt, ...);
+
+#define DEFINE_DYNAMIC_DEBUG_METADATA_CLS(name, cls, fmt) \
static struct _ddebug __aligned(8) \
- __attribute__((section("__verbose"))) name = { \
+ __section("__dyndbg") name = { \
.modname = KBUILD_MODNAME, \
.function = __func__, \
.filename = __FILE__, \
.format = (fmt), \
.lineno = __LINE__, \
.flags = _DPRINTK_FLAGS_DEFAULT, \
- dd_key_init(key, init) \
- }
+ .class_id = cls, \
+ _DPRINTK_KEY_INIT \
+ }; \
+ BUILD_BUG_ON_MSG(cls > _DPRINTK_CLASS_DFLT, \
+ "classid value overflow")
-#ifdef HAVE_JUMP_LABEL
+#define DEFINE_DYNAMIC_DEBUG_METADATA(name, fmt) \
+ DEFINE_DYNAMIC_DEBUG_METADATA_CLS(name, _DPRINTK_CLASS_DFLT, fmt)
-#define dd_key_init(key, init) key = (init)
+#ifdef CONFIG_JUMP_LABEL
#ifdef DEBUG
-#define DEFINE_DYNAMIC_DEBUG_METADATA(name, fmt) \
- DEFINE_DYNAMIC_DEBUG_METADATA_KEY(name, fmt, .key.dd_key_true, \
- (STATIC_KEY_TRUE_INIT))
+
+#define _DPRINTK_KEY_INIT .key.dd_key_true = (STATIC_KEY_TRUE_INIT)
#define DYNAMIC_DEBUG_BRANCH(descriptor) \
static_branch_likely(&descriptor.key.dd_key_true)
#else
-#define DEFINE_DYNAMIC_DEBUG_METADATA(name, fmt) \
- DEFINE_DYNAMIC_DEBUG_METADATA_KEY(name, fmt, .key.dd_key_false, \
- (STATIC_KEY_FALSE_INIT))
+#define _DPRINTK_KEY_INIT .key.dd_key_false = (STATIC_KEY_FALSE_INIT)
#define DYNAMIC_DEBUG_BRANCH(descriptor) \
static_branch_unlikely(&descriptor.key.dd_key_false)
#endif
-#else
-
-#define dd_key_init(key, init)
+#else /* !CONFIG_JUMP_LABEL */
-#define DEFINE_DYNAMIC_DEBUG_METADATA(name, fmt) \
- DEFINE_DYNAMIC_DEBUG_METADATA_KEY(name, fmt, 0, 0)
+#define _DPRINTK_KEY_INIT
#ifdef DEBUG
#define DYNAMIC_DEBUG_BRANCH(descriptor) \
@@ -117,57 +203,133 @@ void __dynamic_netdev_dbg(struct _ddebug *descriptor,
unlikely(descriptor.flags & _DPRINTK_FLAGS_PRINT)
#endif
-#endif
+#endif /* CONFIG_JUMP_LABEL */
-#define dynamic_pr_debug(fmt, ...) \
-do { \
- DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, fmt); \
- if (DYNAMIC_DEBUG_BRANCH(descriptor)) \
- __dynamic_pr_debug(&descriptor, pr_fmt(fmt), \
- ##__VA_ARGS__); \
+/*
+ * Factory macros: ($prefix)dynamic_func_call($suffix)
+ *
+ * Lower layer (with __ prefix) gets the callsite metadata, and wraps
+ * the func inside a debug-branch/static-key construct. Upper layer
+ * (with _ prefix) does the UNIQUE_ID once, so that lower can ref the
+ * name/label multiple times, and tie the elements together.
+ * Multiple flavors:
+ * (|_cls): adds in _DPRINT_CLASS_DFLT as needed
+ * (|_no_desc): former gets callsite descriptor as 1st arg (for prdbgs)
+ */
+#define __dynamic_func_call_cls(id, cls, fmt, func, ...) do { \
+ DEFINE_DYNAMIC_DEBUG_METADATA_CLS(id, cls, fmt); \
+ if (DYNAMIC_DEBUG_BRANCH(id)) \
+ func(&id, ##__VA_ARGS__); \
} while (0)
+#define __dynamic_func_call(id, fmt, func, ...) \
+ __dynamic_func_call_cls(id, _DPRINTK_CLASS_DFLT, fmt, \
+ func, ##__VA_ARGS__)
-#define dynamic_dev_dbg(dev, fmt, ...) \
-do { \
- DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, fmt); \
- if (DYNAMIC_DEBUG_BRANCH(descriptor)) \
- __dynamic_dev_dbg(&descriptor, dev, fmt, \
- ##__VA_ARGS__); \
+#define __dynamic_func_call_cls_no_desc(id, cls, fmt, func, ...) do { \
+ DEFINE_DYNAMIC_DEBUG_METADATA_CLS(id, cls, fmt); \
+ if (DYNAMIC_DEBUG_BRANCH(id)) \
+ func(__VA_ARGS__); \
} while (0)
+#define __dynamic_func_call_no_desc(id, fmt, func, ...) \
+ __dynamic_func_call_cls_no_desc(id, _DPRINTK_CLASS_DFLT, \
+ fmt, func, ##__VA_ARGS__)
+
+/*
+ * "Factory macro" for generating a call to func, guarded by a
+ * DYNAMIC_DEBUG_BRANCH. The dynamic debug descriptor will be
+ * initialized using the fmt argument. The function will be called with
+ * the address of the descriptor as first argument, followed by all
+ * the varargs. Note that fmt is repeated in invocations of this
+ * macro.
+ */
+#define _dynamic_func_call_cls(cls, fmt, func, ...) \
+ __dynamic_func_call_cls(__UNIQUE_ID(ddebug), cls, fmt, func, ##__VA_ARGS__)
+#define _dynamic_func_call(fmt, func, ...) \
+ _dynamic_func_call_cls(_DPRINTK_CLASS_DFLT, fmt, func, ##__VA_ARGS__)
+
+/*
+ * A variant that does the same, except that the descriptor is not
+ * passed as the first argument to the function; it is only called
+ * with precisely the macro's varargs.
+ */
+#define _dynamic_func_call_cls_no_desc(cls, fmt, func, ...) \
+ __dynamic_func_call_cls_no_desc(__UNIQUE_ID(ddebug), cls, fmt, \
+ func, ##__VA_ARGS__)
+#define _dynamic_func_call_no_desc(fmt, func, ...) \
+ _dynamic_func_call_cls_no_desc(_DPRINTK_CLASS_DFLT, fmt, \
+ func, ##__VA_ARGS__)
+
+#define dynamic_pr_debug_cls(cls, fmt, ...) \
+ _dynamic_func_call_cls(cls, fmt, __dynamic_pr_debug, \
+ pr_fmt(fmt), ##__VA_ARGS__)
+
+#define dynamic_pr_debug(fmt, ...) \
+ _dynamic_func_call(fmt, __dynamic_pr_debug, \
+ pr_fmt(fmt), ##__VA_ARGS__)
+
+#define dynamic_dev_dbg(dev, fmt, ...) \
+ _dynamic_func_call(fmt, __dynamic_dev_dbg, \
+ dev, fmt, ##__VA_ARGS__)
#define dynamic_netdev_dbg(dev, fmt, ...) \
-do { \
- DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, fmt); \
- if (DYNAMIC_DEBUG_BRANCH(descriptor)) \
- __dynamic_netdev_dbg(&descriptor, dev, fmt, \
- ##__VA_ARGS__); \
-} while (0)
+ _dynamic_func_call(fmt, __dynamic_netdev_dbg, \
+ dev, fmt, ##__VA_ARGS__)
-#define dynamic_hex_dump(prefix_str, prefix_type, rowsize, \
- groupsize, buf, len, ascii) \
-do { \
- DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, \
- __builtin_constant_p(prefix_str) ? prefix_str : "hexdump");\
- if (DYNAMIC_DEBUG_BRANCH(descriptor)) \
- print_hex_dump(KERN_DEBUG, prefix_str, \
- prefix_type, rowsize, groupsize, \
- buf, len, ascii); \
-} while (0)
+#define dynamic_ibdev_dbg(dev, fmt, ...) \
+ _dynamic_func_call(fmt, __dynamic_ibdev_dbg, \
+ dev, fmt, ##__VA_ARGS__)
-#else
+#define dynamic_hex_dump(prefix_str, prefix_type, rowsize, \
+ groupsize, buf, len, ascii) \
+ _dynamic_func_call_no_desc(__builtin_constant_p(prefix_str) ? prefix_str : "hexdump", \
+ print_hex_dump, \
+ KERN_DEBUG, prefix_str, prefix_type, \
+ rowsize, groupsize, buf, len, ascii)
+
+/* for test only, generally expect drm.debug style macro wrappers */
+#define __pr_debug_cls(cls, fmt, ...) do { \
+ BUILD_BUG_ON_MSG(!__builtin_constant_p(cls), \
+ "expecting constant class int/enum"); \
+ dynamic_pr_debug_cls(cls, fmt, ##__VA_ARGS__); \
+ } while (0)
+
+#else /* !(CONFIG_DYNAMIC_DEBUG || (CONFIG_DYNAMIC_DEBUG_CORE && DYNAMIC_DEBUG_MODULE)) */
#include <linux/string.h>
#include <linux/errno.h>
+#include <linux/printk.h>
-static inline int ddebug_remove_module(const char *mod)
-{
- return 0;
-}
+#define DEFINE_DYNAMIC_DEBUG_METADATA(name, fmt)
+#define DYNAMIC_DEBUG_BRANCH(descriptor) false
+
+#define dynamic_pr_debug(fmt, ...) \
+ do { if (0) printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__); } while (0)
+#define dynamic_dev_dbg(dev, fmt, ...) \
+ do { if (0) dev_printk(KERN_DEBUG, dev, fmt, ##__VA_ARGS__); } while (0)
+#define dynamic_hex_dump(prefix_str, prefix_type, rowsize, \
+ groupsize, buf, len, ascii) \
+ do { if (0) \
+ print_hex_dump(KERN_DEBUG, prefix_str, prefix_type, \
+ rowsize, groupsize, buf, len, ascii); \
+ } while (0)
+
+#endif /* CONFIG_DYNAMIC_DEBUG || (CONFIG_DYNAMIC_DEBUG_CORE && DYNAMIC_DEBUG_MODULE) */
+
+
+#ifdef CONFIG_DYNAMIC_DEBUG_CORE
+
+extern int ddebug_dyndbg_module_param_cb(char *param, char *val,
+ const char *modname);
+struct kernel_param;
+int param_set_dyndbg_classes(const char *instr, const struct kernel_param *kp);
+int param_get_dyndbg_classes(char *buffer, const struct kernel_param *kp);
+
+#else
static inline int ddebug_dyndbg_module_param_cb(char *param, char *val,
const char *modname)
{
- if (strstr(param, "dyndbg")) {
+ if (!strcmp(param, "dyndbg")) {
/* avoid pr_warn(), which wants pr_fmt() fully defined */
printk(KERN_WARNING "dyndbg param is supported only in "
"CONFIG_DYNAMIC_DEBUG builds\n");
@@ -176,10 +338,15 @@ static inline int ddebug_dyndbg_module_param_cb(char *param, char *val,
return -EINVAL;
}
-#define dynamic_pr_debug(fmt, ...) \
- do { if (0) printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__); } while (0)
-#define dynamic_dev_dbg(dev, fmt, ...) \
- do { if (0) dev_printk(KERN_DEBUG, dev, fmt, ##__VA_ARGS__); } while (0)
-#endif
+struct kernel_param;
+static inline int param_set_dyndbg_classes(const char *instr, const struct kernel_param *kp)
+{ return 0; }
+static inline int param_get_dyndbg_classes(char *buffer, const struct kernel_param *kp)
+{ return 0; }
#endif
+
+
+extern const struct kernel_param_ops param_ops_dyndbg_classes;
+
+#endif /* _DYNAMIC_DEBUG_H */
diff --git a/include/linux/dynamic_queue_limits.h b/include/linux/dynamic_queue_limits.h
index a4be70398ce1..407c2f281b64 100644
--- a/include/linux/dynamic_queue_limits.h
+++ b/include/linux/dynamic_queue_limits.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Dynamic queue limits (dql) - Definitions
*
@@ -37,6 +38,8 @@
#ifdef __KERNEL__
+#include <asm/bug.h>
+
struct dql {
/* Fields accessed in enqueue path (dql_queued) */
unsigned int num_queued; /* Total ever queued */
@@ -88,7 +91,7 @@ static inline void dql_queued(struct dql *dql, unsigned int count)
/* Returns how many objects can be queued, < 0 indicates over limit. */
static inline int dql_avail(const struct dql *dql)
{
- return ACCESS_ONCE(dql->adj_limit) - ACCESS_ONCE(dql->num_queued);
+ return READ_ONCE(dql->adj_limit) - READ_ONCE(dql->num_queued);
}
/* Record number of completed objects and recalculate the limit. */
@@ -98,7 +101,7 @@ void dql_completed(struct dql *dql, unsigned int count);
void dql_reset(struct dql *dql);
/* Initialize dql state */
-int dql_init(struct dql *dql, unsigned hold_time);
+void dql_init(struct dql *dql, unsigned int hold_time);
#endif /* _KERNEL_ */
diff --git a/include/linux/earlycpio.h b/include/linux/earlycpio.h
index 111f46d83d00..c70519267c77 100644
--- a/include/linux/earlycpio.h
+++ b/include/linux/earlycpio.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_EARLYCPIO_H
#define _LINUX_EARLYCPIO_H
diff --git a/include/linux/ecryptfs.h b/include/linux/ecryptfs.h
index 8d5ab998a222..91e142abf7e8 100644
--- a/include/linux/ecryptfs.h
+++ b/include/linux/ecryptfs.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_ECRYPTFS_H
#define _LINUX_ECRYPTFS_H
diff --git a/include/linux/edac.h b/include/linux/edac.h
index 8ae0f45fafd6..fa4bda2a70f6 100644
--- a/include/linux/edac.h
+++ b/include/linux/edac.h
@@ -17,6 +17,7 @@
#include <linux/completion.h>
#include <linux/workqueue.h>
#include <linux/debugfs.h>
+#include <linux/numa.h>
#define EDAC_DEVICE_NAME_LEN 31
@@ -30,14 +31,6 @@ struct device;
extern int edac_op_state;
struct bus_type *edac_get_sysfs_subsys(void);
-int edac_get_report_status(void);
-void edac_set_report_status(int new);
-
-enum {
- EDAC_REPORTING_ENABLED,
- EDAC_REPORTING_DISABLED,
- EDAC_REPORTING_FORCE
-};
static inline void opstate_init(void)
{
@@ -182,10 +175,18 @@ static inline char *mc_event_error_type(const unsigned int err_type)
* @MEM_RDDR3: Registered DDR3 RAM
* This is a variant of the DDR3 memories.
* @MEM_LRDDR3: Load-Reduced DDR3 memory.
+ * @MEM_LPDDR3: Low-Power DDR3 memory.
* @MEM_DDR4: Unbuffered DDR4 RAM
* @MEM_RDDR4: Registered DDR4 RAM
* This is a variant of the DDR4 memories.
* @MEM_LRDDR4: Load-Reduced DDR4 memory.
+ * @MEM_LPDDR4: Low-Power DDR4 memory.
+ * @MEM_DDR5: Unbuffered DDR5 RAM
+ * @MEM_RDDR5: Registered DDR5 RAM
+ * @MEM_LRDDR5: Load-Reduced DDR5 memory.
+ * @MEM_NVDIMM: Non-volatile RAM
+ * @MEM_WIO2: Wide I/O 2.
+ * @MEM_HBM2: High bandwidth Memory Gen 2.
*/
enum mem_type {
MEM_EMPTY = 0,
@@ -206,9 +207,17 @@ enum mem_type {
MEM_DDR3,
MEM_RDDR3,
MEM_LRDDR3,
+ MEM_LPDDR3,
MEM_DDR4,
MEM_RDDR4,
MEM_LRDDR4,
+ MEM_LPDDR4,
+ MEM_DDR5,
+ MEM_RDDR5,
+ MEM_LRDDR5,
+ MEM_NVDIMM,
+ MEM_WIO2,
+ MEM_HBM2,
};
#define MEM_FLAG_EMPTY BIT(MEM_EMPTY)
@@ -222,18 +231,26 @@ enum mem_type {
#define MEM_FLAG_DDR BIT(MEM_DDR)
#define MEM_FLAG_RDDR BIT(MEM_RDDR)
#define MEM_FLAG_RMBS BIT(MEM_RMBS)
-#define MEM_FLAG_DDR2 BIT(MEM_DDR2)
-#define MEM_FLAG_FB_DDR2 BIT(MEM_FB_DDR2)
-#define MEM_FLAG_RDDR2 BIT(MEM_RDDR2)
-#define MEM_FLAG_XDR BIT(MEM_XDR)
-#define MEM_FLAG_DDR3 BIT(MEM_DDR3)
-#define MEM_FLAG_RDDR3 BIT(MEM_RDDR3)
-#define MEM_FLAG_DDR4 BIT(MEM_DDR4)
-#define MEM_FLAG_RDDR4 BIT(MEM_RDDR4)
-#define MEM_FLAG_LRDDR4 BIT(MEM_LRDDR4)
+#define MEM_FLAG_DDR2 BIT(MEM_DDR2)
+#define MEM_FLAG_FB_DDR2 BIT(MEM_FB_DDR2)
+#define MEM_FLAG_RDDR2 BIT(MEM_RDDR2)
+#define MEM_FLAG_XDR BIT(MEM_XDR)
+#define MEM_FLAG_DDR3 BIT(MEM_DDR3)
+#define MEM_FLAG_RDDR3 BIT(MEM_RDDR3)
+#define MEM_FLAG_LPDDR3 BIT(MEM_LPDDR3)
+#define MEM_FLAG_DDR4 BIT(MEM_DDR4)
+#define MEM_FLAG_RDDR4 BIT(MEM_RDDR4)
+#define MEM_FLAG_LRDDR4 BIT(MEM_LRDDR4)
+#define MEM_FLAG_LPDDR4 BIT(MEM_LPDDR4)
+#define MEM_FLAG_DDR5 BIT(MEM_DDR5)
+#define MEM_FLAG_RDDR5 BIT(MEM_RDDR5)
+#define MEM_FLAG_LRDDR5 BIT(MEM_LRDDR5)
+#define MEM_FLAG_NVDIMM BIT(MEM_NVDIMM)
+#define MEM_FLAG_WIO2 BIT(MEM_WIO2)
+#define MEM_FLAG_HBM2 BIT(MEM_HBM2)
/**
- * enum edac-type - Error Detection and Correction capabilities and mode
+ * enum edac_type - Error Detection and Correction capabilities and mode
* @EDAC_UNKNOWN: Unknown if ECC is available
* @EDAC_NONE: Doesn't support ECC
* @EDAC_RESERVED: Reserved ECC type
@@ -313,7 +330,7 @@ enum scrub_type {
#define OP_OFFLINE 0x300
/**
- * enum edac_mc_layer - memory controller hierarchy layer
+ * enum edac_mc_layer_type - memory controller hierarchy layer
*
* @EDAC_MC_LAYER_BRANCH: memory layer is named "branch"
* @EDAC_MC_LAYER_CHANNEL: memory layer is named "channel"
@@ -358,87 +375,16 @@ struct edac_mc_layer {
*/
#define EDAC_MAX_LAYERS 3
-/**
- * EDAC_DIMM_OFF - Macro responsible to get a pointer offset inside a pointer
- * array for the element given by [layer0,layer1,layer2]
- * position
- *
- * @layers: a struct edac_mc_layer array, describing how many elements
- * were allocated for each layer
- * @nlayers: Number of layers at the @layers array
- * @layer0: layer0 position
- * @layer1: layer1 position. Unused if n_layers < 2
- * @layer2: layer2 position. Unused if n_layers < 3
- *
- * For 1 layer, this macro returns "var[layer0] - var";
- *
- * For 2 layers, this macro is similar to allocate a bi-dimensional array
- * and to return "var[layer0][layer1] - var";
- *
- * For 3 layers, this macro is similar to allocate a tri-dimensional array
- * and to return "var[layer0][layer1][layer2] - var".
- *
- * A loop could be used here to make it more generic, but, as we only have
- * 3 layers, this is a little faster.
- *
- * By design, layers can never be 0 or more than 3. If that ever happens,
- * a NULL is returned, causing an OOPS during the memory allocation routine,
- * with would point to the developer that he's doing something wrong.
- */
-#define EDAC_DIMM_OFF(layers, nlayers, layer0, layer1, layer2) ({ \
- int __i; \
- if ((nlayers) == 1) \
- __i = layer0; \
- else if ((nlayers) == 2) \
- __i = (layer1) + ((layers[1]).size * (layer0)); \
- else if ((nlayers) == 3) \
- __i = (layer2) + ((layers[2]).size * ((layer1) + \
- ((layers[1]).size * (layer0)))); \
- else \
- __i = -EINVAL; \
- __i; \
-})
-
-/**
- * EDAC_DIMM_PTR - Macro responsible to get a pointer inside a pointer array
- * for the element given by [layer0,layer1,layer2] position
- *
- * @layers: a struct edac_mc_layer array, describing how many elements
- * were allocated for each layer
- * @var: name of the var where we want to get the pointer
- * (like mci->dimms)
- * @nlayers: Number of layers at the @layers array
- * @layer0: layer0 position
- * @layer1: layer1 position. Unused if n_layers < 2
- * @layer2: layer2 position. Unused if n_layers < 3
- *
- * For 1 layer, this macro returns "var[layer0]";
- *
- * For 2 layers, this macro is similar to allocate a bi-dimensional array
- * and to return "var[layer0][layer1]";
- *
- * For 3 layers, this macro is similar to allocate a tri-dimensional array
- * and to return "var[layer0][layer1][layer2]";
- */
-#define EDAC_DIMM_PTR(layers, var, nlayers, layer0, layer1, layer2) ({ \
- typeof(*var) __p; \
- int ___i = EDAC_DIMM_OFF(layers, nlayers, layer0, layer1, layer2); \
- if (___i < 0) \
- __p = NULL; \
- else \
- __p = (var)[___i]; \
- __p; \
-})
-
struct dimm_info {
struct device dev;
char label[EDAC_MC_LABEL_LEN + 1]; /* DIMM label on motherboard */
/* Memory location data */
- unsigned location[EDAC_MAX_LAYERS];
+ unsigned int location[EDAC_MAX_LAYERS];
struct mem_ctl_info *mci; /* the parent */
+ unsigned int idx; /* index within the parent dimm array */
u32 grain; /* granularity of reported error in bytes */
enum dev_type dtype; /* memory device type */
@@ -447,7 +393,12 @@ struct dimm_info {
u32 nr_pages; /* number of pages on this dimm */
- unsigned csrow, cschannel; /* Points to the old API data */
+ unsigned int csrow, cschannel; /* Points to the old API data */
+
+ u16 smbios_handle; /* Handle for SMBIOS type 17 */
+
+ u32 ce_count;
+ u32 ue_count;
};
/**
@@ -507,6 +458,7 @@ struct errcount_attribute_data {
* struct edac_raw_error_desc - Raw error report structure
* @grain: minimum granularity for an error report, in bytes
* @error_count: number of errors of the same type
+ * @type: severity of the error (CE/UE/Fatal)
* @top_layer: top layer of the error (layer[0])
* @mid_layer: middle layer of the error (layer[1])
* @low_layer: low layer of the error (layer[2])
@@ -518,20 +470,14 @@ struct errcount_attribute_data {
* @location: location of the error
* @label: label of the affected DIMM(s)
* @other_detail: other driver-specific detail about the error
- * @enable_per_layer_report: if false, the error affects all layers
- * (typically, a memory controller error)
*/
struct edac_raw_error_desc {
- /*
- * NOTE: everything before grain won't be cleaned by
- * edac_raw_error_desc_clean()
- */
char location[LOCATION_SIZE];
char label[(EDAC_MC_LABEL_LEN + 1 + sizeof(OTHER_LABEL)) * EDAC_MAX_LABELS];
long grain;
- /* the vars below and grain will be cleaned on every new error report */
u16 error_count;
+ enum hw_event_mc_err_type type;
int top_layer;
int mid_layer;
int low_layer;
@@ -540,7 +486,6 @@ struct edac_raw_error_desc {
unsigned long syndrome;
const char *msg;
const char *other_detail;
- bool enable_per_layer_report;
};
/* MEMORY controller information structure
@@ -591,7 +536,7 @@ struct mem_ctl_info {
unsigned long page);
int mc_idx;
struct csrow_info **csrows;
- unsigned nr_csrows, num_cschannel;
+ unsigned int nr_csrows, num_cschannel;
/*
* Memory Controller hierarchy
@@ -602,14 +547,14 @@ struct mem_ctl_info {
* of the recent drivers enumerate memories per DIMM, instead.
* When the memory controller is per rank, csbased is true.
*/
- unsigned n_layers;
+ unsigned int n_layers;
struct edac_mc_layer *layers;
bool csbased;
/*
* DIMM info. Will eventually remove the entire csrows_info some day
*/
- unsigned tot_dimms;
+ unsigned int tot_dimms;
struct dimm_info **dimms;
/*
@@ -619,7 +564,6 @@ struct mem_ctl_info {
*/
struct device *pdev;
const char *mod_name;
- const char *mod_ver;
const char *ctl_name;
const char *dev_name;
void *pvt_info;
@@ -631,7 +575,6 @@ struct mem_ctl_info {
*/
u32 ce_noinfo_count, ue_noinfo_count;
u32 ue_mc, ce_mc;
- u32 *ce_per_layer[EDAC_MAX_LAYERS], *ue_per_layer[EDAC_MAX_LAYERS];
struct completion complete;
@@ -665,9 +608,54 @@ struct mem_ctl_info {
u16 fake_inject_count;
};
-/*
- * Maximum number of memory controllers in the coherent fabric.
+#define mci_for_each_dimm(mci, dimm) \
+ for ((dimm) = (mci)->dimms[0]; \
+ (dimm); \
+ (dimm) = (dimm)->idx + 1 < (mci)->tot_dimms \
+ ? (mci)->dimms[(dimm)->idx + 1] \
+ : NULL)
+
+/**
+ * edac_get_dimm - Get DIMM info from a memory controller given by
+ * [layer0,layer1,layer2] position
+ *
+ * @mci: MC descriptor struct mem_ctl_info
+ * @layer0: layer0 position
+ * @layer1: layer1 position. Unused if n_layers < 2
+ * @layer2: layer2 position. Unused if n_layers < 3
+ *
+ * For 1 layer, this function returns "dimms[layer0]";
+ *
+ * For 2 layers, this function is similar to allocating a two-dimensional
+ * array and returning "dimms[layer0][layer1]";
+ *
+ * For 3 layers, this function is similar to allocating a tri-dimensional
+ * array and returning "dimms[layer0][layer1][layer2]";
*/
-#define EDAC_MAX_MCS 16
+static inline struct dimm_info *edac_get_dimm(struct mem_ctl_info *mci,
+ int layer0, int layer1, int layer2)
+{
+ int index;
+
+ if (layer0 < 0
+ || (mci->n_layers > 1 && layer1 < 0)
+ || (mci->n_layers > 2 && layer2 < 0))
+ return NULL;
+
+ index = layer0;
-#endif
+ if (mci->n_layers > 1)
+ index = index * mci->layers[1].size + layer1;
+
+ if (mci->n_layers > 2)
+ index = index * mci->layers[2].size + layer2;
+
+ if (index < 0 || index >= mci->tot_dimms)
+ return NULL;
+
+ if (WARN_ON_ONCE(mci->dimms[index]->idx != index))
+ return NULL;
+
+ return mci->dimms[index];
+}
+#endif /* _LINUX_EDAC_H_ */
diff --git a/include/linux/edd.h b/include/linux/edd.h
index 83d4371ec996..1c16fbcb81c0 100644
--- a/include/linux/edd.h
+++ b/include/linux/edd.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* linux/include/linux/edd.h
* Copyright (C) 2002, 2003, 2004 Dell Inc.
@@ -16,16 +17,6 @@
* transferred into the edd structure, and in drivers/firmware/edd.c, that
* information is used to identify BIOS boot disk. The code in setup.S
* is very sensitive to the size of these structures.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License v2.0 as published by
- * the Free Software Foundation
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
*/
#ifndef _LINUX_EDD_H
#define _LINUX_EDD_H
diff --git a/include/linux/edma.h b/include/linux/edma.h
deleted file mode 100644
index a1307e7827e8..000000000000
--- a/include/linux/edma.h
+++ /dev/null
@@ -1,29 +0,0 @@
-/*
- * TI EDMA DMA engine driver
- *
- * Copyright 2012 Texas Instruments
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-#ifndef __LINUX_EDMA_H
-#define __LINUX_EDMA_H
-
-struct dma_chan;
-
-#if defined(CONFIG_TI_EDMA) || defined(CONFIG_TI_EDMA_MODULE)
-bool edma_filter_fn(struct dma_chan *, void *);
-#else
-static inline bool edma_filter_fn(struct dma_chan *chan, void *param)
-{
- return false;
-}
-#endif
-
-#endif
diff --git a/include/linux/eeprom_93cx6.h b/include/linux/eeprom_93cx6.h
index eb0b1988050a..c860c72a921d 100644
--- a/include/linux/eeprom_93cx6.h
+++ b/include/linux/eeprom_93cx6.h
@@ -1,21 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
Copyright (C) 2004 - 2006 rt2x00 SourceForge Project
<http://rt2x00.serialmonkey.com>
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the
- Free Software Foundation, Inc.,
- 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
/*
diff --git a/include/linux/eeprom_93xx46.h b/include/linux/eeprom_93xx46.h
index 885f587a3555..34c2175e6a1e 100644
--- a/include/linux/eeprom_93xx46.h
+++ b/include/linux/eeprom_93xx46.h
@@ -1,21 +1,26 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Module: eeprom_93xx46
* platform description for 93xx46 EEPROMs.
*/
-
-struct gpio_desc;
+#include <linux/gpio/consumer.h>
struct eeprom_93xx46_platform_data {
unsigned char flags;
#define EE_ADDR8 0x01 /* 8 bit addr. cfg */
#define EE_ADDR16 0x02 /* 16 bit addr. cfg */
#define EE_READONLY 0x08 /* forbid writing */
+#define EE_SIZE1K 0x10 /* 1 kb of data, that is a 93xx46 */
+#define EE_SIZE2K 0x20 /* 2 kb of data, that is a 93xx56 */
+#define EE_SIZE4K 0x40 /* 4 kb of data, that is a 93xx66 */
unsigned int quirks;
/* Single word read transfers only; no sequential read. */
#define EEPROM_93XX46_QUIRK_SINGLE_WORD_READ (1 << 0)
/* Instructions such as EWEN are (addrlen + 2) in length. */
#define EEPROM_93XX46_QUIRK_INSTRUCTION_LENGTH (1 << 1)
+/* Add extra cycle after address during a read */
+#define EEPROM_93XX46_QUIRK_EXTRA_READ_CYCLE BIT(2)
/*
* optional hooks to control additional logic
diff --git a/include/linux/efi-bgrt.h b/include/linux/efi-bgrt.h
index e6f624b53c3d..e6cd51005633 100644
--- a/include/linux/efi-bgrt.h
+++ b/include/linux/efi-bgrt.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_EFI_BGRT_H
#define _LINUX_EFI_BGRT_H
diff --git a/include/linux/efi.h b/include/linux/efi.h
index 8269bcb8ccf7..7aa62c92185f 100644
--- a/include/linux/efi.h
+++ b/include/linux/efi.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_EFI_H
#define _LINUX_EFI_H
@@ -28,16 +29,17 @@
#include <asm/page.h>
#define EFI_SUCCESS 0
-#define EFI_LOAD_ERROR ( 1 | (1UL << (BITS_PER_LONG-1)))
+#define EFI_LOAD_ERROR ( 1 | (1UL << (BITS_PER_LONG-1)))
#define EFI_INVALID_PARAMETER ( 2 | (1UL << (BITS_PER_LONG-1)))
#define EFI_UNSUPPORTED ( 3 | (1UL << (BITS_PER_LONG-1)))
-#define EFI_BAD_BUFFER_SIZE ( 4 | (1UL << (BITS_PER_LONG-1)))
+#define EFI_BAD_BUFFER_SIZE ( 4 | (1UL << (BITS_PER_LONG-1)))
#define EFI_BUFFER_TOO_SMALL ( 5 | (1UL << (BITS_PER_LONG-1)))
#define EFI_NOT_READY ( 6 | (1UL << (BITS_PER_LONG-1)))
#define EFI_DEVICE_ERROR ( 7 | (1UL << (BITS_PER_LONG-1)))
#define EFI_WRITE_PROTECTED ( 8 | (1UL << (BITS_PER_LONG-1)))
#define EFI_OUT_OF_RESOURCES ( 9 | (1UL << (BITS_PER_LONG-1)))
#define EFI_NOT_FOUND (14 | (1UL << (BITS_PER_LONG-1)))
+#define EFI_TIMEOUT (18 | (1UL << (BITS_PER_LONG-1)))
#define EFI_ABORTED (21 | (1UL << (BITS_PER_LONG-1)))
#define EFI_SECURITY_VIOLATION (26 | (1UL << (BITS_PER_LONG-1)))
@@ -47,10 +49,33 @@ typedef u16 efi_char16_t; /* UNICODE character */
typedef u64 efi_physical_addr_t;
typedef void *efi_handle_t;
-typedef uuid_le efi_guid_t;
+#if defined(CONFIG_X86_64)
+#define __efiapi __attribute__((ms_abi))
+#elif defined(CONFIG_X86_32)
+#define __efiapi __attribute__((regparm(0)))
+#else
+#define __efiapi
+#endif
+
+/*
+ * The UEFI spec and EDK2 reference implementation both define EFI_GUID as
+ * struct { u32 a; u16; b; u16 c; u8 d[8]; }; and so the implied alignment
+ * is 32 bits not 8 bits like our guid_t. In some cases (i.e., on 32-bit ARM),
+ * this means that firmware services invoked by the kernel may assume that
+ * efi_guid_t* arguments are 32-bit aligned, and use memory accessors that
+ * do not tolerate misalignment. So let's set the minimum alignment to 32 bits.
+ *
+ * Note that the UEFI spec as well as some comments in the EDK2 code base
+ * suggest that EFI_GUID should be 64-bit aligned, but this appears to be
+ * a mistake, given that no code seems to exist that actually enforces that
+ * or relies on it.
+ */
+typedef guid_t efi_guid_t __aligned(__alignof__(u32));
-#define EFI_GUID(a,b,c,d0,d1,d2,d3,d4,d5,d6,d7) \
- UUID_LE(a, b, c, d0, d1, d2, d3, d4, d5, d6, d7)
+#define EFI_GUID(a, b, c, d...) (efi_guid_t){ { \
+ (a) & 0xff, ((a) >> 8) & 0xff, ((a) >> 16) & 0xff, ((a) >> 24) & 0xff, \
+ (b) & 0xff, ((b) >> 8) & 0xff, \
+ (c) & 0xff, ((c) >> 8) & 0xff, d } }
/*
* Generic EFI table header
@@ -98,6 +123,8 @@ typedef struct {
#define EFI_MEMORY_MORE_RELIABLE \
((u64)0x0000000000010000ULL) /* higher reliability */
#define EFI_MEMORY_RO ((u64)0x0000000000020000ULL) /* read-only */
+#define EFI_MEMORY_SP ((u64)0x0000000000040000ULL) /* soft reserved */
+#define EFI_MEMORY_CPU_CRYPTO ((u64)0x0000000000080000ULL) /* supports encryption */
#define EFI_MEMORY_RUNTIME ((u64)0x8000000000000000ULL) /* range requires runtime mapping */
#define EFI_MEMORY_DESCRIPTOR_VERSION 1
@@ -121,13 +148,50 @@ typedef struct {
u32 imagesize;
} efi_capsule_header_t;
-struct efi_boot_memmap {
- efi_memory_desc_t **map;
- unsigned long *map_size;
- unsigned long *desc_size;
- u32 *desc_ver;
- unsigned long *key_ptr;
- unsigned long *buff_size;
+/* EFI_FIRMWARE_MANAGEMENT_CAPSULE_HEADER */
+struct efi_manage_capsule_header {
+ u32 ver;
+ u16 emb_drv_cnt;
+ u16 payload_cnt;
+ /*
+ * Variable-size array of the size given by the sum of
+ * emb_drv_cnt and payload_cnt.
+ */
+ u64 offset_list[];
+} __packed;
+
+/* EFI_FIRMWARE_MANAGEMENT_CAPSULE_IMAGE_HEADER */
+struct efi_manage_capsule_image_header {
+ u32 ver;
+ efi_guid_t image_type_id;
+ u8 image_index;
+ u8 reserved_bytes[3];
+ u32 image_size;
+ u32 vendor_code_size;
+ /* hw_ins was introduced in version 2 */
+ u64 hw_ins;
+ /* capsule_support was introduced in version 3 */
+ u64 capsule_support;
+} __packed;
+
+/* WIN_CERTIFICATE */
+struct win_cert {
+ u32 len;
+ u16 rev;
+ u16 cert_type;
+};
+
+/* WIN_CERTIFICATE_UEFI_GUID */
+struct win_cert_uefi_guid {
+ struct win_cert hdr;
+ efi_guid_t cert_type;
+ u8 cert_data[];
+};
+
+/* EFI_FIRMWARE_IMAGE_AUTHENTICATION */
+struct efi_image_auth {
+ u64 mon_count;
+ struct win_cert_uefi_guid auth_info;
};
/*
@@ -139,27 +203,21 @@ struct efi_boot_memmap {
struct capsule_info {
efi_capsule_header_t header;
+ efi_capsule_header_t *capsule;
int reset_type;
long index;
size_t count;
size_t total_size;
- phys_addr_t *pages;
+ struct page **pages;
+ phys_addr_t *phys;
size_t page_bytes_remain;
};
+int efi_capsule_setup_info(struct capsule_info *cap_info, void *kbuff,
+ size_t hdr_bytes);
int __efi_capsule_setup_info(struct capsule_info *cap_info);
/*
- * Allocation types for calls to boottime->allocate_pages.
- */
-#define EFI_ALLOCATE_ANY_PAGES 0
-#define EFI_ALLOCATE_MAX_ADDRESS 1
-#define EFI_ALLOCATE_ADDRESS 2
-#define EFI_MAX_ALLOCATE_TYPE 3
-
-typedef int (*efi_freemem_callback_t) (u64 start, u64 end, void *arg);
-
-/*
* Types and defines for Time Services
*/
#define EFI_TIME_ADJUST_DAYLIGHT 0x1
@@ -186,291 +244,7 @@ typedef struct {
u8 sets_to_zero;
} efi_time_cap_t;
-typedef struct {
- efi_table_hdr_t hdr;
- u32 raise_tpl;
- u32 restore_tpl;
- u32 allocate_pages;
- u32 free_pages;
- u32 get_memory_map;
- u32 allocate_pool;
- u32 free_pool;
- u32 create_event;
- u32 set_timer;
- u32 wait_for_event;
- u32 signal_event;
- u32 close_event;
- u32 check_event;
- u32 install_protocol_interface;
- u32 reinstall_protocol_interface;
- u32 uninstall_protocol_interface;
- u32 handle_protocol;
- u32 __reserved;
- u32 register_protocol_notify;
- u32 locate_handle;
- u32 locate_device_path;
- u32 install_configuration_table;
- u32 load_image;
- u32 start_image;
- u32 exit;
- u32 unload_image;
- u32 exit_boot_services;
- u32 get_next_monotonic_count;
- u32 stall;
- u32 set_watchdog_timer;
- u32 connect_controller;
- u32 disconnect_controller;
- u32 open_protocol;
- u32 close_protocol;
- u32 open_protocol_information;
- u32 protocols_per_handle;
- u32 locate_handle_buffer;
- u32 locate_protocol;
- u32 install_multiple_protocol_interfaces;
- u32 uninstall_multiple_protocol_interfaces;
- u32 calculate_crc32;
- u32 copy_mem;
- u32 set_mem;
- u32 create_event_ex;
-} __packed efi_boot_services_32_t;
-
-typedef struct {
- efi_table_hdr_t hdr;
- u64 raise_tpl;
- u64 restore_tpl;
- u64 allocate_pages;
- u64 free_pages;
- u64 get_memory_map;
- u64 allocate_pool;
- u64 free_pool;
- u64 create_event;
- u64 set_timer;
- u64 wait_for_event;
- u64 signal_event;
- u64 close_event;
- u64 check_event;
- u64 install_protocol_interface;
- u64 reinstall_protocol_interface;
- u64 uninstall_protocol_interface;
- u64 handle_protocol;
- u64 __reserved;
- u64 register_protocol_notify;
- u64 locate_handle;
- u64 locate_device_path;
- u64 install_configuration_table;
- u64 load_image;
- u64 start_image;
- u64 exit;
- u64 unload_image;
- u64 exit_boot_services;
- u64 get_next_monotonic_count;
- u64 stall;
- u64 set_watchdog_timer;
- u64 connect_controller;
- u64 disconnect_controller;
- u64 open_protocol;
- u64 close_protocol;
- u64 open_protocol_information;
- u64 protocols_per_handle;
- u64 locate_handle_buffer;
- u64 locate_protocol;
- u64 install_multiple_protocol_interfaces;
- u64 uninstall_multiple_protocol_interfaces;
- u64 calculate_crc32;
- u64 copy_mem;
- u64 set_mem;
- u64 create_event_ex;
-} __packed efi_boot_services_64_t;
-
-/*
- * EFI Boot Services table
- */
-typedef struct {
- efi_table_hdr_t hdr;
- void *raise_tpl;
- void *restore_tpl;
- efi_status_t (*allocate_pages)(int, int, unsigned long,
- efi_physical_addr_t *);
- efi_status_t (*free_pages)(efi_physical_addr_t, unsigned long);
- efi_status_t (*get_memory_map)(unsigned long *, void *, unsigned long *,
- unsigned long *, u32 *);
- efi_status_t (*allocate_pool)(int, unsigned long, void **);
- efi_status_t (*free_pool)(void *);
- void *create_event;
- void *set_timer;
- void *wait_for_event;
- void *signal_event;
- void *close_event;
- void *check_event;
- void *install_protocol_interface;
- void *reinstall_protocol_interface;
- void *uninstall_protocol_interface;
- efi_status_t (*handle_protocol)(efi_handle_t, efi_guid_t *, void **);
- void *__reserved;
- void *register_protocol_notify;
- efi_status_t (*locate_handle)(int, efi_guid_t *, void *,
- unsigned long *, efi_handle_t *);
- void *locate_device_path;
- efi_status_t (*install_configuration_table)(efi_guid_t *, void *);
- void *load_image;
- void *start_image;
- void *exit;
- void *unload_image;
- efi_status_t (*exit_boot_services)(efi_handle_t, unsigned long);
- void *get_next_monotonic_count;
- void *stall;
- void *set_watchdog_timer;
- void *connect_controller;
- void *disconnect_controller;
- void *open_protocol;
- void *close_protocol;
- void *open_protocol_information;
- void *protocols_per_handle;
- void *locate_handle_buffer;
- efi_status_t (*locate_protocol)(efi_guid_t *, void *, void **);
- void *install_multiple_protocol_interfaces;
- void *uninstall_multiple_protocol_interfaces;
- void *calculate_crc32;
- void *copy_mem;
- void *set_mem;
- void *create_event_ex;
-} efi_boot_services_t;
-
-typedef enum {
- EfiPciIoWidthUint8,
- EfiPciIoWidthUint16,
- EfiPciIoWidthUint32,
- EfiPciIoWidthUint64,
- EfiPciIoWidthFifoUint8,
- EfiPciIoWidthFifoUint16,
- EfiPciIoWidthFifoUint32,
- EfiPciIoWidthFifoUint64,
- EfiPciIoWidthFillUint8,
- EfiPciIoWidthFillUint16,
- EfiPciIoWidthFillUint32,
- EfiPciIoWidthFillUint64,
- EfiPciIoWidthMaximum
-} EFI_PCI_IO_PROTOCOL_WIDTH;
-
-typedef enum {
- EfiPciIoAttributeOperationGet,
- EfiPciIoAttributeOperationSet,
- EfiPciIoAttributeOperationEnable,
- EfiPciIoAttributeOperationDisable,
- EfiPciIoAttributeOperationSupported,
- EfiPciIoAttributeOperationMaximum
-} EFI_PCI_IO_PROTOCOL_ATTRIBUTE_OPERATION;
-
-typedef struct {
- u32 read;
- u32 write;
-} efi_pci_io_protocol_access_32_t;
-
-typedef struct {
- u64 read;
- u64 write;
-} efi_pci_io_protocol_access_64_t;
-
-typedef struct {
- void *read;
- void *write;
-} efi_pci_io_protocol_access_t;
-
-typedef struct {
- u32 poll_mem;
- u32 poll_io;
- efi_pci_io_protocol_access_32_t mem;
- efi_pci_io_protocol_access_32_t io;
- efi_pci_io_protocol_access_32_t pci;
- u32 copy_mem;
- u32 map;
- u32 unmap;
- u32 allocate_buffer;
- u32 free_buffer;
- u32 flush;
- u32 get_location;
- u32 attributes;
- u32 get_bar_attributes;
- u32 set_bar_attributes;
- uint64_t romsize;
- void *romimage;
-} efi_pci_io_protocol_32;
-
-typedef struct {
- u64 poll_mem;
- u64 poll_io;
- efi_pci_io_protocol_access_64_t mem;
- efi_pci_io_protocol_access_64_t io;
- efi_pci_io_protocol_access_64_t pci;
- u64 copy_mem;
- u64 map;
- u64 unmap;
- u64 allocate_buffer;
- u64 free_buffer;
- u64 flush;
- u64 get_location;
- u64 attributes;
- u64 get_bar_attributes;
- u64 set_bar_attributes;
- uint64_t romsize;
- void *romimage;
-} efi_pci_io_protocol_64;
-
-typedef struct {
- void *poll_mem;
- void *poll_io;
- efi_pci_io_protocol_access_t mem;
- efi_pci_io_protocol_access_t io;
- efi_pci_io_protocol_access_t pci;
- void *copy_mem;
- void *map;
- void *unmap;
- void *allocate_buffer;
- void *free_buffer;
- void *flush;
- void *get_location;
- void *attributes;
- void *get_bar_attributes;
- void *set_bar_attributes;
- uint64_t romsize;
- void *romimage;
-} efi_pci_io_protocol;
-
-#define EFI_PCI_IO_ATTRIBUTE_ISA_MOTHERBOARD_IO 0x0001
-#define EFI_PCI_IO_ATTRIBUTE_ISA_IO 0x0002
-#define EFI_PCI_IO_ATTRIBUTE_VGA_PALETTE_IO 0x0004
-#define EFI_PCI_IO_ATTRIBUTE_VGA_MEMORY 0x0008
-#define EFI_PCI_IO_ATTRIBUTE_VGA_IO 0x0010
-#define EFI_PCI_IO_ATTRIBUTE_IDE_PRIMARY_IO 0x0020
-#define EFI_PCI_IO_ATTRIBUTE_IDE_SECONDARY_IO 0x0040
-#define EFI_PCI_IO_ATTRIBUTE_MEMORY_WRITE_COMBINE 0x0080
-#define EFI_PCI_IO_ATTRIBUTE_IO 0x0100
-#define EFI_PCI_IO_ATTRIBUTE_MEMORY 0x0200
-#define EFI_PCI_IO_ATTRIBUTE_BUS_MASTER 0x0400
-#define EFI_PCI_IO_ATTRIBUTE_MEMORY_CACHED 0x0800
-#define EFI_PCI_IO_ATTRIBUTE_MEMORY_DISABLE 0x1000
-#define EFI_PCI_IO_ATTRIBUTE_EMBEDDED_DEVICE 0x2000
-#define EFI_PCI_IO_ATTRIBUTE_EMBEDDED_ROM 0x4000
-#define EFI_PCI_IO_ATTRIBUTE_DUAL_ADDRESS_CYCLE 0x8000
-#define EFI_PCI_IO_ATTRIBUTE_ISA_IO_16 0x10000
-#define EFI_PCI_IO_ATTRIBUTE_VGA_PALETTE_IO_16 0x20000
-#define EFI_PCI_IO_ATTRIBUTE_VGA_IO_16 0x40000
-
-typedef struct {
- u32 version;
- u32 get;
- u32 set;
- u32 del;
- u32 get_all;
-} apple_properties_protocol_32_t;
-
-typedef struct {
- u64 version;
- u64 get;
- u64 set;
- u64 del;
- u64 get_all;
-} apple_properties_protocol_64_t;
+typedef union efi_boot_services efi_boot_services_t;
/*
* Types and defines for EFI ResetSystem
@@ -503,24 +277,6 @@ typedef struct {
u32 query_variable_info;
} efi_runtime_services_32_t;
-typedef struct {
- efi_table_hdr_t hdr;
- u64 get_time;
- u64 set_time;
- u64 get_wakeup_time;
- u64 set_wakeup_time;
- u64 set_virtual_address_map;
- u64 convert_pointer;
- u64 get_variable;
- u64 get_next_variable;
- u64 set_variable;
- u64 get_next_high_mono_count;
- u64 reset_system;
- u64 update_capsule;
- u64 query_capsule_caps;
- u64 query_variable_info;
-} efi_runtime_services_64_t;
-
typedef efi_status_t efi_get_time_t (efi_time_t *tm, efi_time_cap_t *tc);
typedef efi_status_t efi_set_time_t (efi_time_t *tm);
typedef efi_status_t efi_get_wakeup_time_t (efi_bool_t *enabled, efi_bool_t *pending,
@@ -555,22 +311,25 @@ typedef efi_status_t efi_query_variable_store_t(u32 attributes,
unsigned long size,
bool nonblocking);
-typedef struct {
- efi_table_hdr_t hdr;
- efi_get_time_t *get_time;
- efi_set_time_t *set_time;
- efi_get_wakeup_time_t *get_wakeup_time;
- efi_set_wakeup_time_t *set_wakeup_time;
- efi_set_virtual_address_map_t *set_virtual_address_map;
- void *convert_pointer;
- efi_get_variable_t *get_variable;
- efi_get_next_variable_t *get_next_variable;
- efi_set_variable_t *set_variable;
- efi_get_next_high_mono_count_t *get_next_high_mono_count;
- efi_reset_system_t *reset_system;
- efi_update_capsule_t *update_capsule;
- efi_query_capsule_caps_t *query_capsule_caps;
- efi_query_variable_info_t *query_variable_info;
+typedef union {
+ struct {
+ efi_table_hdr_t hdr;
+ efi_get_time_t __efiapi *get_time;
+ efi_set_time_t __efiapi *set_time;
+ efi_get_wakeup_time_t __efiapi *get_wakeup_time;
+ efi_set_wakeup_time_t __efiapi *set_wakeup_time;
+ efi_set_virtual_address_map_t __efiapi *set_virtual_address_map;
+ void *convert_pointer;
+ efi_get_variable_t __efiapi *get_variable;
+ efi_get_next_variable_t __efiapi *get_next_variable;
+ efi_set_variable_t __efiapi *set_variable;
+ efi_get_next_high_mono_count_t __efiapi *get_next_high_mono_count;
+ efi_reset_system_t __efiapi *reset_system;
+ efi_update_capsule_t __efiapi *update_capsule;
+ efi_query_capsule_caps_t __efiapi *query_capsule_caps;
+ efi_query_variable_info_t __efiapi *query_variable_info;
+ };
+ efi_runtime_services_32_t mixed_mode;
} efi_runtime_services_t;
void efi_native_runtime_setup(void);
@@ -609,6 +368,10 @@ void efi_native_runtime_setup(void);
#define UV_SYSTEM_TABLE_GUID EFI_GUID(0x3b13a7d4, 0x633e, 0x11dd, 0x93, 0xec, 0xda, 0x25, 0x56, 0xd8, 0x95, 0x93)
#define LINUX_EFI_CRASH_GUID EFI_GUID(0xcfc8fc79, 0xbe2e, 0x4ddc, 0x97, 0xf0, 0x9f, 0x98, 0xbf, 0xe2, 0x98, 0xa0)
#define LOADED_IMAGE_PROTOCOL_GUID EFI_GUID(0x5b1b31a1, 0x9562, 0x11d2, 0x8e, 0x3f, 0x00, 0xa0, 0xc9, 0x69, 0x72, 0x3b)
+#define LOADED_IMAGE_DEVICE_PATH_PROTOCOL_GUID EFI_GUID(0xbc62157e, 0x3e33, 0x4fec, 0x99, 0x20, 0x2d, 0x3b, 0x36, 0xd7, 0x50, 0xdf)
+#define EFI_DEVICE_PATH_PROTOCOL_GUID EFI_GUID(0x09576e91, 0x6d3f, 0x11d2, 0x8e, 0x39, 0x00, 0xa0, 0xc9, 0x69, 0x72, 0x3b)
+#define EFI_DEVICE_PATH_TO_TEXT_PROTOCOL_GUID EFI_GUID(0x8b843e20, 0x8132, 0x4852, 0x90, 0xcc, 0x55, 0x1a, 0x4e, 0x4a, 0x7f, 0x1c)
+#define EFI_DEVICE_PATH_FROM_TEXT_PROTOCOL_GUID EFI_GUID(0x05c99a21, 0xc70f, 0x4ad2, 0x8a, 0x5f, 0x35, 0xdf, 0x33, 0x43, 0xf5, 0x1e)
#define EFI_GRAPHICS_OUTPUT_PROTOCOL_GUID EFI_GUID(0x9042a9de, 0x23dc, 0x4a38, 0x96, 0xfb, 0x7a, 0xde, 0xd0, 0x80, 0x51, 0x6a)
#define EFI_UGA_PROTOCOL_GUID EFI_GUID(0x982c298b, 0xf4fa, 0x41cb, 0xb8, 0x38, 0x77, 0xaa, 0x68, 0x8f, 0xb8, 0x39)
#define EFI_PCI_IO_PROTOCOL_GUID EFI_GUID(0x4cf5b200, 0x68b8, 0x4ca5, 0x9e, 0xec, 0xb2, 0x3e, 0x3f, 0x50, 0x02, 0x9a)
@@ -622,18 +385,55 @@ void efi_native_runtime_setup(void);
#define EFI_MEMORY_ATTRIBUTES_TABLE_GUID EFI_GUID(0xdcfa911d, 0x26eb, 0x469f, 0xa2, 0x20, 0x38, 0xb7, 0xdc, 0x46, 0x12, 0x20)
#define EFI_CONSOLE_OUT_DEVICE_GUID EFI_GUID(0xd3b36f2c, 0xd551, 0x11d4, 0x9a, 0x46, 0x00, 0x90, 0x27, 0x3f, 0xc1, 0x4d)
#define APPLE_PROPERTIES_PROTOCOL_GUID EFI_GUID(0x91bd12fe, 0xf6c3, 0x44fb, 0xa5, 0xb7, 0x51, 0x22, 0xab, 0x30, 0x3a, 0xe0)
+#define EFI_TCG2_PROTOCOL_GUID EFI_GUID(0x607f766c, 0x7455, 0x42be, 0x93, 0x0b, 0xe4, 0xd7, 0x6d, 0xb2, 0x72, 0x0f)
+#define EFI_LOAD_FILE_PROTOCOL_GUID EFI_GUID(0x56ec3091, 0x954c, 0x11d2, 0x8e, 0x3f, 0x00, 0xa0, 0xc9, 0x69, 0x72, 0x3b)
+#define EFI_LOAD_FILE2_PROTOCOL_GUID EFI_GUID(0x4006c0c1, 0xfcb3, 0x403e, 0x99, 0x6d, 0x4a, 0x6c, 0x87, 0x24, 0xe0, 0x6d)
+#define EFI_RT_PROPERTIES_TABLE_GUID EFI_GUID(0xeb66918a, 0x7eef, 0x402a, 0x84, 0x2e, 0x93, 0x1d, 0x21, 0xc3, 0x8a, 0xe9)
+#define EFI_DXE_SERVICES_TABLE_GUID EFI_GUID(0x05ad34ba, 0x6f02, 0x4214, 0x95, 0x2e, 0x4d, 0xa0, 0x39, 0x8e, 0x2b, 0xb9)
+#define EFI_SMBIOS_PROTOCOL_GUID EFI_GUID(0x03583ff6, 0xcb36, 0x4940, 0x94, 0x7e, 0xb9, 0xb3, 0x9f, 0x4a, 0xfa, 0xf7)
+#define EFI_MEMORY_ATTRIBUTE_PROTOCOL_GUID EFI_GUID(0xf4560cf6, 0x40ec, 0x4b4a, 0xa1, 0x92, 0xbf, 0x1d, 0x57, 0xd0, 0xb1, 0x89)
#define EFI_IMAGE_SECURITY_DATABASE_GUID EFI_GUID(0xd719b2cb, 0x3d3a, 0x4596, 0xa3, 0xbc, 0xda, 0xd0, 0x0e, 0x67, 0x65, 0x6f)
#define EFI_SHIM_LOCK_GUID EFI_GUID(0x605dab50, 0xe046, 0x4300, 0xab, 0xb6, 0x3d, 0xd8, 0x10, 0xdd, 0x8b, 0x23)
+#define EFI_CERT_SHA256_GUID EFI_GUID(0xc1c41626, 0x504c, 0x4092, 0xac, 0xa9, 0x41, 0xf9, 0x36, 0x93, 0x43, 0x28)
+#define EFI_CERT_X509_GUID EFI_GUID(0xa5c059a1, 0x94e4, 0x4aa7, 0x87, 0xb5, 0xab, 0x15, 0x5c, 0x2b, 0xf0, 0x72)
+#define EFI_CERT_X509_SHA256_GUID EFI_GUID(0x3bd2a492, 0x96c0, 0x4079, 0xb4, 0x20, 0xfc, 0xf9, 0x8e, 0xf1, 0x03, 0xed)
+#define EFI_CC_BLOB_GUID EFI_GUID(0x067b1f5f, 0xcf26, 0x44c5, 0x85, 0x54, 0x93, 0xd7, 0x77, 0x91, 0x2d, 0x42)
+
/*
* This GUID is used to pass to the kernel proper the struct screen_info
* structure that was populated by the stub based on the GOP protocol instance
* associated with ConOut
*/
-#define LINUX_EFI_ARM_SCREEN_INFO_TABLE_GUID EFI_GUID(0xe03fc20a, 0x85dc, 0x406e, 0xb9, 0x0e, 0x4a, 0xb5, 0x02, 0x37, 0x1d, 0x95)
+#define LINUX_EFI_SCREEN_INFO_TABLE_GUID EFI_GUID(0xe03fc20a, 0x85dc, 0x406e, 0xb9, 0x0e, 0x4a, 0xb5, 0x02, 0x37, 0x1d, 0x95)
+#define LINUX_EFI_ARM_CPU_STATE_TABLE_GUID EFI_GUID(0xef79e4aa, 0x3c3d, 0x4989, 0xb9, 0x02, 0x07, 0xa9, 0x43, 0xe5, 0x50, 0xd2)
#define LINUX_EFI_LOADER_ENTRY_GUID EFI_GUID(0x4a67b082, 0x0a4c, 0x41cf, 0xb6, 0xc7, 0x44, 0x0b, 0x29, 0xbb, 0x8c, 0x4f)
#define LINUX_EFI_RANDOM_SEED_TABLE_GUID EFI_GUID(0x1ce1e5bc, 0x7ceb, 0x42f2, 0x81, 0xe5, 0x8a, 0xad, 0xf1, 0x80, 0xf5, 0x7b)
+#define LINUX_EFI_TPM_EVENT_LOG_GUID EFI_GUID(0xb7799cb0, 0xeca2, 0x4943, 0x96, 0x67, 0x1f, 0xae, 0x07, 0xb7, 0x47, 0xfa)
+#define LINUX_EFI_TPM_FINAL_LOG_GUID EFI_GUID(0x1e2ed096, 0x30e2, 0x4254, 0xbd, 0x89, 0x86, 0x3b, 0xbe, 0xf8, 0x23, 0x25)
+#define LINUX_EFI_MEMRESERVE_TABLE_GUID EFI_GUID(0x888eb0c6, 0x8ede, 0x4ff5, 0xa8, 0xf0, 0x9a, 0xee, 0x5c, 0xb9, 0x77, 0xc2)
+#define LINUX_EFI_INITRD_MEDIA_GUID EFI_GUID(0x5568e427, 0x68fc, 0x4f3d, 0xac, 0x74, 0xca, 0x55, 0x52, 0x31, 0xcc, 0x68)
+#define LINUX_EFI_MOK_VARIABLE_TABLE_GUID EFI_GUID(0xc451ed2b, 0x9694, 0x45d3, 0xba, 0xba, 0xed, 0x9f, 0x89, 0x88, 0xa3, 0x89)
+#define LINUX_EFI_COCO_SECRET_AREA_GUID EFI_GUID(0xadf956ad, 0xe98c, 0x484c, 0xae, 0x11, 0xb5, 0x1c, 0x7d, 0x33, 0x64, 0x47)
+#define LINUX_EFI_BOOT_MEMMAP_GUID EFI_GUID(0x800f683f, 0xd08b, 0x423a, 0xa2, 0x93, 0x96, 0x5c, 0x3c, 0x6f, 0xe2, 0xb4)
+
+#define RISCV_EFI_BOOT_PROTOCOL_GUID EFI_GUID(0xccd15fec, 0x6f73, 0x4eec, 0x83, 0x95, 0x3e, 0x69, 0xe4, 0xb9, 0x40, 0xbf)
+
+/*
+ * This GUID may be installed onto the kernel image's handle as a NULL protocol
+ * to signal to the stub that the placement of the image should be respected,
+ * and moving the image in physical memory is undesirable. To ensure
+ * compatibility with 64k pages kernels with virtually mapped stacks, and to
+ * avoid defeating physical randomization, this protocol should only be
+ * installed if the image was placed at a randomized 128k aligned address in
+ * memory.
+ */
+#define LINUX_EFI_LOADED_IMAGE_FIXED_GUID EFI_GUID(0xf5a37b6d, 0x3344, 0x42a5, 0xb6, 0xbb, 0x97, 0x86, 0x48, 0xc1, 0x89, 0x0a)
+
+/* OEM GUIDs */
+#define DELLEMC_EFI_RCI2_TABLE_GUID EFI_GUID(0x2d9f28a2, 0xa886, 0x456a, 0x97, 0xa8, 0xf1, 0x1e, 0xf2, 0x4f, 0xf4, 0x55)
+#define AMD_SEV_MEM_ENCRYPT_GUID EFI_GUID(0x0cf29b71, 0x9e51, 0x433a, 0xa3, 0xb7, 0x81, 0xf3, 0xab, 0x16, 0xb8, 0x75)
typedef struct {
efi_guid_t guid;
@@ -645,18 +445,22 @@ typedef struct {
u32 table;
} efi_config_table_32_t;
-typedef struct {
- efi_guid_t guid;
- unsigned long table;
+typedef union {
+ struct {
+ efi_guid_t guid;
+ void *table;
+ };
+ efi_config_table_32_t mixed_mode;
} efi_config_table_t;
typedef struct {
efi_guid_t guid;
- const char *name;
unsigned long *ptr;
+ const char name[16];
} efi_config_table_type_t;
#define EFI_SYSTEM_TABLE_SIGNATURE ((u64)0x5453595320494249ULL)
+#define EFI_DXE_SERVICES_TABLE_SIGNATURE ((u64)0x565245535f455844ULL)
#define EFI_2_30_SYSTEM_TABLE_REVISION ((2 << 16) | (30))
#define EFI_2_20_SYSTEM_TABLE_REVISION ((2 << 16) | (20))
@@ -699,32 +503,48 @@ typedef struct {
u32 tables;
} efi_system_table_32_t;
-typedef struct {
- efi_table_hdr_t hdr;
- unsigned long fw_vendor; /* physical addr of CHAR16 vendor string */
- u32 fw_revision;
- unsigned long con_in_handle;
- unsigned long con_in;
- unsigned long con_out_handle;
- unsigned long con_out;
- unsigned long stderr_handle;
- unsigned long stderr;
- efi_runtime_services_t *runtime;
- efi_boot_services_t *boottime;
- unsigned long nr_tables;
- unsigned long tables;
+typedef union efi_simple_text_input_protocol efi_simple_text_input_protocol_t;
+typedef union efi_simple_text_output_protocol efi_simple_text_output_protocol_t;
+
+typedef union {
+ struct {
+ efi_table_hdr_t hdr;
+ unsigned long fw_vendor; /* physical addr of CHAR16 vendor string */
+ u32 fw_revision;
+ unsigned long con_in_handle;
+ efi_simple_text_input_protocol_t *con_in;
+ unsigned long con_out_handle;
+ efi_simple_text_output_protocol_t *con_out;
+ unsigned long stderr_handle;
+ unsigned long stderr;
+ efi_runtime_services_t *runtime;
+ efi_boot_services_t *boottime;
+ unsigned long nr_tables;
+ unsigned long tables;
+ };
+ efi_system_table_32_t mixed_mode;
} efi_system_table_t;
+struct efi_boot_memmap {
+ unsigned long map_size;
+ unsigned long desc_size;
+ u32 desc_ver;
+ unsigned long map_key;
+ unsigned long buff_size;
+ efi_memory_desc_t map[];
+};
+
/*
* Architecture independent structure for describing a memory map for the
- * benefit of efi_memmap_init_early(), saving us the need to pass four
- * parameters.
+ * benefit of efi_memmap_init_early(), and for passing context between
+ * efi_memmap_alloc() and efi_memmap_install().
*/
struct efi_memory_map_data {
phys_addr_t phys_map;
unsigned long size;
unsigned long desc_version;
unsigned long desc_size;
+ unsigned long flags;
};
struct efi_memory_map {
@@ -734,7 +554,10 @@ struct efi_memory_map {
int nr_map;
unsigned long desc_version;
unsigned long desc_size;
- bool late;
+#define EFI_MEMMAP_LATE (1UL << 0)
+#define EFI_MEMMAP_MEMBLOCK (1UL << 1)
+#define EFI_MEMMAP_SLAB (1UL << 2)
+ unsigned long flags;
};
struct efi_mem_range {
@@ -742,130 +565,6 @@ struct efi_mem_range {
u64 attribute;
};
-struct efi_fdt_params {
- u64 system_table;
- u64 mmap;
- u32 mmap_size;
- u32 desc_size;
- u32 desc_ver;
-};
-
-typedef struct {
- u32 revision;
- u32 parent_handle;
- u32 system_table;
- u32 device_handle;
- u32 file_path;
- u32 reserved;
- u32 load_options_size;
- u32 load_options;
- u32 image_base;
- __aligned_u64 image_size;
- unsigned int image_code_type;
- unsigned int image_data_type;
- unsigned long unload;
-} efi_loaded_image_32_t;
-
-typedef struct {
- u32 revision;
- u64 parent_handle;
- u64 system_table;
- u64 device_handle;
- u64 file_path;
- u64 reserved;
- u32 load_options_size;
- u64 load_options;
- u64 image_base;
- __aligned_u64 image_size;
- unsigned int image_code_type;
- unsigned int image_data_type;
- unsigned long unload;
-} efi_loaded_image_64_t;
-
-typedef struct {
- u32 revision;
- void *parent_handle;
- efi_system_table_t *system_table;
- void *device_handle;
- void *file_path;
- void *reserved;
- u32 load_options_size;
- void *load_options;
- void *image_base;
- __aligned_u64 image_size;
- unsigned int image_code_type;
- unsigned int image_data_type;
- unsigned long unload;
-} efi_loaded_image_t;
-
-
-typedef struct {
- u64 size;
- u64 file_size;
- u64 phys_size;
- efi_time_t create_time;
- efi_time_t last_access_time;
- efi_time_t modification_time;
- __aligned_u64 attribute;
- efi_char16_t filename[1];
-} efi_file_info_t;
-
-typedef struct {
- u64 revision;
- u32 open;
- u32 close;
- u32 delete;
- u32 read;
- u32 write;
- u32 get_position;
- u32 set_position;
- u32 get_info;
- u32 set_info;
- u32 flush;
-} efi_file_handle_32_t;
-
-typedef struct {
- u64 revision;
- u64 open;
- u64 close;
- u64 delete;
- u64 read;
- u64 write;
- u64 get_position;
- u64 set_position;
- u64 get_info;
- u64 set_info;
- u64 flush;
-} efi_file_handle_64_t;
-
-typedef struct _efi_file_handle {
- u64 revision;
- efi_status_t (*open)(struct _efi_file_handle *,
- struct _efi_file_handle **,
- efi_char16_t *, u64, u64);
- efi_status_t (*close)(struct _efi_file_handle *);
- void *delete;
- efi_status_t (*read)(struct _efi_file_handle *, unsigned long *,
- void *);
- void *write;
- void *get_position;
- void *set_position;
- efi_status_t (*get_info)(struct _efi_file_handle *, efi_guid_t *,
- unsigned long *, void *);
- void *set_info;
- void *flush;
-} efi_file_handle_t;
-
-typedef struct _efi_file_io_interface {
- u64 revision;
- int (*open_volume)(struct _efi_file_io_interface *,
- efi_file_handle_t **);
-} efi_file_io_interface_t;
-
-#define EFI_FILE_MODE_READ 0x0000000000000001
-#define EFI_FILE_MODE_WRITE 0x0000000000000002
-#define EFI_FILE_MODE_CREATE 0x8000000000000000
-
typedef struct {
u32 version;
u32 length;
@@ -875,58 +574,111 @@ typedef struct {
#define EFI_PROPERTIES_TABLE_VERSION 0x00010000
#define EFI_PROPERTIES_RUNTIME_MEMORY_PROTECTION_NON_EXECUTABLE_PE_DATA 0x1
+typedef struct {
+ u16 version;
+ u16 length;
+ u32 runtime_services_supported;
+} efi_rt_properties_table_t;
+
+#define EFI_RT_PROPERTIES_TABLE_VERSION 0x1
+
#define EFI_INVALID_TABLE_ADDR (~0UL)
+// BIT0 implies that Runtime code includes the forward control flow guard
+// instruction, such as X86 CET-IBT or ARM BTI.
+#define EFI_MEMORY_ATTRIBUTES_FLAGS_RT_FORWARD_CONTROL_FLOW_GUARD 0x1
+
typedef struct {
u32 version;
u32 num_entries;
u32 desc_size;
- u32 reserved;
+ u32 flags;
efi_memory_desc_t entry[0];
} efi_memory_attributes_table_t;
+typedef struct {
+ efi_guid_t signature_owner;
+ u8 signature_data[];
+} efi_signature_data_t;
+
+typedef struct {
+ efi_guid_t signature_type;
+ u32 signature_list_size;
+ u32 signature_header_size;
+ u32 signature_size;
+ u8 signature_header[];
+ /* efi_signature_data_t signatures[][] */
+} efi_signature_list_t;
+
+typedef u8 efi_sha256_hash_t[32];
+
+typedef struct {
+ efi_sha256_hash_t to_be_signed_hash;
+ efi_time_t time_of_revocation;
+} efi_cert_x509_sha256_t;
+
+extern unsigned long __ro_after_init efi_rng_seed; /* RNG Seed table */
+
/*
* All runtime access to EFI goes through this structure:
*/
extern struct efi {
- efi_system_table_t *systab; /* EFI system table */
- unsigned int runtime_version; /* Runtime services version */
- unsigned long mps; /* MPS table */
- unsigned long acpi; /* ACPI table (IA64 ext 0.71) */
- unsigned long acpi20; /* ACPI table (ACPI 2.0) */
- unsigned long smbios; /* SMBIOS table (32 bit entry point) */
- unsigned long smbios3; /* SMBIOS table (64 bit entry point) */
- unsigned long sal_systab; /* SAL system table */
- unsigned long boot_info; /* boot info table */
- unsigned long hcdp; /* HCDP table */
- unsigned long uga; /* UGA table */
- unsigned long uv_systab; /* UV system table */
- unsigned long fw_vendor; /* fw_vendor */
- unsigned long runtime; /* runtime table */
- unsigned long config_table; /* config tables */
- unsigned long esrt; /* ESRT table */
- unsigned long properties_table; /* properties table */
- unsigned long mem_attr_table; /* memory attributes table */
- unsigned long rng_seed; /* UEFI firmware random seed */
- efi_get_time_t *get_time;
- efi_set_time_t *set_time;
- efi_get_wakeup_time_t *get_wakeup_time;
- efi_set_wakeup_time_t *set_wakeup_time;
- efi_get_variable_t *get_variable;
- efi_get_next_variable_t *get_next_variable;
- efi_set_variable_t *set_variable;
- efi_set_variable_t *set_variable_nonblocking;
- efi_query_variable_info_t *query_variable_info;
- efi_query_variable_info_t *query_variable_info_nonblocking;
- efi_update_capsule_t *update_capsule;
- efi_query_capsule_caps_t *query_capsule_caps;
- efi_get_next_high_mono_count_t *get_next_high_mono_count;
- efi_reset_system_t *reset_system;
- efi_set_virtual_address_map_t *set_virtual_address_map;
- struct efi_memory_map memmap;
- unsigned long flags;
+ const efi_runtime_services_t *runtime; /* EFI runtime services table */
+ unsigned int runtime_version; /* Runtime services version */
+ unsigned int runtime_supported_mask;
+
+ unsigned long acpi; /* ACPI table (IA64 ext 0.71) */
+ unsigned long acpi20; /* ACPI table (ACPI 2.0) */
+ unsigned long smbios; /* SMBIOS table (32 bit entry point) */
+ unsigned long smbios3; /* SMBIOS table (64 bit entry point) */
+ unsigned long esrt; /* ESRT table */
+ unsigned long tpm_log; /* TPM2 Event Log table */
+ unsigned long tpm_final_log; /* TPM2 Final Events Log table */
+ unsigned long mokvar_table; /* MOK variable config table */
+ unsigned long coco_secret; /* Confidential computing secret table */
+
+ efi_get_time_t *get_time;
+ efi_set_time_t *set_time;
+ efi_get_wakeup_time_t *get_wakeup_time;
+ efi_set_wakeup_time_t *set_wakeup_time;
+ efi_get_variable_t *get_variable;
+ efi_get_next_variable_t *get_next_variable;
+ efi_set_variable_t *set_variable;
+ efi_set_variable_t *set_variable_nonblocking;
+ efi_query_variable_info_t *query_variable_info;
+ efi_query_variable_info_t *query_variable_info_nonblocking;
+ efi_update_capsule_t *update_capsule;
+ efi_query_capsule_caps_t *query_capsule_caps;
+ efi_get_next_high_mono_count_t *get_next_high_mono_count;
+ efi_reset_system_t *reset_system;
+
+ struct efi_memory_map memmap;
+ unsigned long flags;
} efi;
+#define EFI_RT_SUPPORTED_GET_TIME 0x0001
+#define EFI_RT_SUPPORTED_SET_TIME 0x0002
+#define EFI_RT_SUPPORTED_GET_WAKEUP_TIME 0x0004
+#define EFI_RT_SUPPORTED_SET_WAKEUP_TIME 0x0008
+#define EFI_RT_SUPPORTED_GET_VARIABLE 0x0010
+#define EFI_RT_SUPPORTED_GET_NEXT_VARIABLE_NAME 0x0020
+#define EFI_RT_SUPPORTED_SET_VARIABLE 0x0040
+#define EFI_RT_SUPPORTED_SET_VIRTUAL_ADDRESS_MAP 0x0080
+#define EFI_RT_SUPPORTED_CONVERT_POINTER 0x0100
+#define EFI_RT_SUPPORTED_GET_NEXT_HIGH_MONOTONIC_COUNT 0x0200
+#define EFI_RT_SUPPORTED_RESET_SYSTEM 0x0400
+#define EFI_RT_SUPPORTED_UPDATE_CAPSULE 0x0800
+#define EFI_RT_SUPPORTED_QUERY_CAPSULE_CAPABILITIES 0x1000
+#define EFI_RT_SUPPORTED_QUERY_VARIABLE_INFO 0x2000
+
+#define EFI_RT_SUPPORTED_ALL 0x3fff
+
+#define EFI_RT_SUPPORTED_TIME_SERVICES 0x0003
+#define EFI_RT_SUPPORTED_WAKEUP_SERVICES 0x000c
+#define EFI_RT_SUPPORTED_VARIABLE_SERVICES 0x0070
+
+extern struct mm_struct efi_mm;
+
static inline int
efi_guidcmp (efi_guid_t left, efi_guid_t right)
{
@@ -941,21 +693,17 @@ efi_guid_to_str(efi_guid_t *guid, char *out)
}
extern void efi_init (void);
-extern void *efi_get_pal_addr (void);
-extern void efi_map_pal_code (void);
-extern void efi_memmap_walk (efi_freemem_callback_t callback, void *arg);
-extern void efi_gettimeofday (struct timespec64 *ts);
+extern void efi_earlycon_reprobe(void);
+#ifdef CONFIG_EFI
extern void efi_enter_virtual_mode (void); /* switch EFI to virtual mode, if possible */
+#else
+static inline void efi_enter_virtual_mode (void) {}
+#endif
#ifdef CONFIG_X86
-extern void efi_late_init(void);
-extern void efi_free_boot_services(void);
extern efi_status_t efi_query_variable_store(u32 attributes,
unsigned long size,
bool nonblocking);
-extern void efi_find_mirror(void);
#else
-static inline void efi_late_init(void) {}
-static inline void efi_free_boot_services(void) {}
static inline efi_status_t efi_query_variable_store(u32 attributes,
unsigned long size,
@@ -966,46 +714,41 @@ static inline efi_status_t efi_query_variable_store(u32 attributes,
#endif
extern void __iomem *efi_lookup_mapped_addr(u64 phys_addr);
-extern phys_addr_t __init efi_memmap_alloc(unsigned int num_entries);
+extern int __init __efi_memmap_init(struct efi_memory_map_data *data);
extern int __init efi_memmap_init_early(struct efi_memory_map_data *data);
extern int __init efi_memmap_init_late(phys_addr_t addr, unsigned long size);
extern void __init efi_memmap_unmap(void);
-extern int __init efi_memmap_install(phys_addr_t addr, unsigned int nr_map);
-extern int __init efi_memmap_split_count(efi_memory_desc_t *md,
- struct range *range);
-extern void __init efi_memmap_insert(struct efi_memory_map *old_memmap,
- void *buf, struct efi_mem_range *mem);
-extern int efi_config_init(efi_config_table_type_t *arch_tables);
#ifdef CONFIG_EFI_ESRT
extern void __init efi_esrt_init(void);
#else
static inline void efi_esrt_init(void) { }
#endif
-extern int efi_config_parse_tables(void *config_tables, int count, int sz,
- efi_config_table_type_t *arch_tables);
+extern int efi_config_parse_tables(const efi_config_table_t *config_tables,
+ int count,
+ const efi_config_table_type_t *arch_tables);
+extern int efi_systab_check_header(const efi_table_hdr_t *systab_hdr);
+extern void efi_systab_report_header(const efi_table_hdr_t *systab_hdr,
+ unsigned long fw_vendor);
extern u64 efi_get_iobase (void);
-extern u32 efi_mem_type (unsigned long phys_addr);
+extern int efi_mem_type(unsigned long phys_addr);
extern u64 efi_mem_attributes (unsigned long phys_addr);
extern u64 efi_mem_attribute (unsigned long phys_addr, unsigned long size);
extern int __init efi_uart_console_only (void);
extern u64 efi_mem_desc_end(efi_memory_desc_t *md);
extern int efi_mem_desc_lookup(u64 phys_addr, efi_memory_desc_t *out_md);
+extern int __efi_mem_desc_lookup(u64 phys_addr, efi_memory_desc_t *out_md);
extern void efi_mem_reserve(phys_addr_t addr, u64 size);
+extern int efi_mem_reserve_persistent(phys_addr_t addr, u64 size);
extern void efi_initialize_iomem_resources(struct resource *code_resource,
struct resource *data_resource, struct resource *bss_resource);
-extern void efi_reserve_boot_services(void);
-extern int efi_get_fdt_params(struct efi_fdt_params *params);
+extern u64 efi_get_fdt_params(struct efi_memory_map_data *data);
extern struct kobject *efi_kobj;
extern int efi_reboot_quirk_mode;
extern bool efi_poweroff_required(void);
-#ifdef CONFIG_EFI_FAKE_MEMMAP
-extern void __init efi_fake_memmap(void);
-#else
-static inline void efi_fake_memmap(void) { }
-#endif
+extern unsigned long efi_mem_attr_table;
/*
* efi_memattr_perm_setter - arch specific callback function passed into
@@ -1014,12 +757,34 @@ static inline void efi_fake_memmap(void) { }
* argument in the page tables referred to by the
* first argument.
*/
-typedef int (*efi_memattr_perm_setter)(struct mm_struct *, efi_memory_desc_t *);
+typedef int (*efi_memattr_perm_setter)(struct mm_struct *, efi_memory_desc_t *, bool);
extern int efi_memattr_init(void);
extern int efi_memattr_apply_permissions(struct mm_struct *mm,
efi_memattr_perm_setter fn);
+/*
+ * efi_early_memdesc_ptr - get the n-th EFI memmap descriptor
+ * @map: the start of efi memmap
+ * @desc_size: the size of space for each EFI memmap descriptor
+ * @n: the index of efi memmap descriptor
+ *
+ * EFI boot service provides the GetMemoryMap() function to get a copy of the
+ * current memory map which is an array of memory descriptors, each of
+ * which describes a contiguous block of memory. It also gets the size of the
+ * map, and the size of each descriptor, etc.
+ *
+ * Note that per section 6.2 of UEFI Spec 2.6 Errata A, the returned size of
+ * each descriptor might not be equal to sizeof(efi_memory_memdesc_t),
+ * since efi_memory_memdesc_t may be extended in the future. Thus the OS
+ * MUST use the returned size of the descriptor to find the start of each
+ * efi_memory_memdesc_t in the memory map array. This should only be used
+ * during bootup since for_each_efi_memory_desc_xxx() is available after the
+ * kernel initializes the EFI subsystem to set up struct efi_memory_map.
+ */
+#define efi_early_memdesc_ptr(map, desc_size, n) \
+ (efi_memory_desc_t *)((void *)(map) + ((n) * (desc_size)))
+
/* Iterate through an efi_memory_map */
#define for_each_efi_memory_desc_in_map(m, md) \
for ((md) = (m)->map; \
@@ -1042,6 +807,15 @@ extern int efi_memattr_apply_permissions(struct mm_struct *mm,
char * __init efi_md_typeattr_format(char *buf, size_t size,
const efi_memory_desc_t *md);
+
+typedef void (*efi_element_handler_t)(const char *source,
+ const void *element_data,
+ size_t element_size);
+extern int __init parse_efi_signature_list(
+ const char *source,
+ const void *data, size_t size,
+ efi_element_handler_t (*get_handler_for_guid)(const efi_guid_t *));
+
/**
* efi_range_is_wc - check the WC bit on an address range
* @start: starting kvirt address
@@ -1081,6 +855,8 @@ extern int __init efi_setup_pcdp_console(char *);
#define EFI_DBG 8 /* Print additional debug info at runtime */
#define EFI_NX_PE_DATA 9 /* Can runtime data regions be mapped non-executable? */
#define EFI_MEM_ATTR 10 /* Did firmware publish an EFI_MEMORY_ATTRIBUTES table? */
+#define EFI_MEM_NO_SOFT_RESERVE 11 /* Is the kernel configured to ignore soft reservations? */
+#define EFI_PRESERVE_BS_REGIONS 12 /* Are EFI boot-services memory segments available? */
#ifdef CONFIG_EFI
/*
@@ -1091,6 +867,20 @@ static inline bool efi_enabled(int feature)
return test_bit(feature, &efi.flags) != 0;
}
extern void efi_reboot(enum reboot_mode reboot_mode, const char *__unused);
+
+bool __pure __efi_soft_reserve_enabled(void);
+
+static inline bool __pure efi_soft_reserve_enabled(void)
+{
+ return IS_ENABLED(CONFIG_EFI_SOFT_RESERVE)
+ && __efi_soft_reserve_enabled();
+}
+
+static inline bool efi_rt_services_supported(unsigned int mask)
+{
+ return (efi.runtime_supported_mask & mask) == mask;
+}
+extern void efi_find_mirror(void);
#else
static inline bool efi_enabled(int feature)
{
@@ -1099,11 +889,17 @@ static inline bool efi_enabled(int feature)
static inline void
efi_reboot(enum reboot_mode reboot_mode, const char *__unused) {}
-static inline bool
-efi_capsule_pending(int *reset_type)
+static inline bool efi_soft_reserve_enabled(void)
{
return false;
}
+
+static inline bool efi_rt_services_supported(unsigned int mask)
+{
+ return false;
+}
+
+static inline void efi_find_mirror(void) {}
#endif
extern int efi_status_to_err(efi_status_t status);
@@ -1119,7 +915,7 @@ extern int efi_status_to_err(efi_status_t status);
#define EFI_VARIABLE_TIME_BASED_AUTHENTICATED_WRITE_ACCESS 0x0000000000000020
#define EFI_VARIABLE_APPEND_WRITE 0x0000000000000040
-#define EFI_VARIABLE_MASK (EFI_VARIABLE_NON_VOLATILE | \
+#define EFI_VARIABLE_MASK (EFI_VARIABLE_NON_VOLATILE | \
EFI_VARIABLE_BOOTSERVICE_ACCESS | \
EFI_VARIABLE_RUNTIME_ACCESS | \
EFI_VARIABLE_HARDWARE_ERROR_RECORD | \
@@ -1133,13 +929,6 @@ extern int efi_status_to_err(efi_status_t status);
#define EFI_VARIABLE_GUID_LEN UUID_STRING_LEN
/*
- * The type of search to perform when calling boottime->locate_handle
- */
-#define EFI_LOCATE_ALL_HANDLES 0
-#define EFI_LOCATE_BY_REGISTER_NOTIFY 1
-#define EFI_LOCATE_BY_PROTOCOL 2
-
-/*
* EFI Device Path information
*/
#define EFI_DEV_HW 0x01
@@ -1171,6 +960,7 @@ extern int efi_status_to_err(efi_status_t status);
#define EFI_DEV_MEDIA_VENDOR 3
#define EFI_DEV_MEDIA_FILE 4
#define EFI_DEV_MEDIA_PROTOCOL 5
+#define EFI_DEV_MEDIA_REL_OFFSET 8
#define EFI_DEV_BIOS_BOOT 0x05
#define EFI_DEV_END_PATH 0x7F
#define EFI_DEV_END_PATH2 0xFF
@@ -1178,30 +968,60 @@ extern int efi_status_to_err(efi_status_t status);
#define EFI_DEV_END_ENTIRE 0xFF
struct efi_generic_dev_path {
- u8 type;
- u8 sub_type;
- u16 length;
-} __attribute ((packed));
+ u8 type;
+ u8 sub_type;
+ u16 length;
+} __packed;
+
+struct efi_acpi_dev_path {
+ struct efi_generic_dev_path header;
+ u32 hid;
+ u32 uid;
+} __packed;
+
+struct efi_pci_dev_path {
+ struct efi_generic_dev_path header;
+ u8 fn;
+ u8 dev;
+} __packed;
+
+struct efi_vendor_dev_path {
+ struct efi_generic_dev_path header;
+ efi_guid_t vendorguid;
+ u8 vendordata[];
+} __packed;
+
+struct efi_rel_offset_dev_path {
+ struct efi_generic_dev_path header;
+ u32 reserved;
+ u64 starting_offset;
+ u64 ending_offset;
+} __packed;
+
+struct efi_mem_mapped_dev_path {
+ struct efi_generic_dev_path header;
+ u32 memory_type;
+ u64 starting_addr;
+ u64 ending_addr;
+} __packed;
+
+struct efi_file_path_dev_path {
+ struct efi_generic_dev_path header;
+ efi_char16_t filename[];
+} __packed;
struct efi_dev_path {
- u8 type; /* can be replaced with unnamed */
- u8 sub_type; /* struct efi_generic_dev_path; */
- u16 length; /* once we've moved to -std=c11 */
union {
- struct {
- u32 hid;
- u32 uid;
- } acpi;
- struct {
- u8 fn;
- u8 dev;
- } pci;
+ struct efi_generic_dev_path header;
+ struct efi_acpi_dev_path acpi;
+ struct efi_pci_dev_path pci;
+ struct efi_vendor_dev_path vendor;
+ struct efi_rel_offset_dev_path rel_offset;
};
-} __attribute ((packed));
+} __packed;
-#if IS_ENABLED(CONFIG_EFI_DEV_PATH_PARSER)
-struct device *efi_get_device_by_path(struct efi_dev_path **node, size_t *len);
-#endif
+struct device *efi_get_device_by_path(const struct efi_dev_path **node,
+ size_t *len);
static inline void memrange_efi_to_native(u64 *addr, u64 *npages)
{
@@ -1226,7 +1046,6 @@ struct efivar_operations {
struct efivars {
struct kset *kset;
- struct kobject *kobject;
const struct efivar_operations *ops;
};
@@ -1239,176 +1058,36 @@ struct efivars {
#define EFI_VAR_NAME_LEN 1024
-struct efi_variable {
- efi_char16_t VariableName[EFI_VAR_NAME_LEN/sizeof(efi_char16_t)];
- efi_guid_t VendorGuid;
- unsigned long DataSize;
- __u8 Data[1024];
- efi_status_t Status;
- __u32 Attributes;
-} __attribute__((packed));
-
-struct efivar_entry {
- struct efi_variable var;
- struct list_head list;
- struct kobject kobj;
- bool scanning;
- bool deleting;
-};
-
-typedef struct {
- u32 reset;
- u32 output_string;
- u32 test_string;
-} efi_simple_text_output_protocol_32_t;
-
-typedef struct {
- u64 reset;
- u64 output_string;
- u64 test_string;
-} efi_simple_text_output_protocol_64_t;
-
-struct efi_simple_text_output_protocol {
- void *reset;
- efi_status_t (*output_string)(void *, void *);
- void *test_string;
-};
-
-#define PIXEL_RGB_RESERVED_8BIT_PER_COLOR 0
-#define PIXEL_BGR_RESERVED_8BIT_PER_COLOR 1
-#define PIXEL_BIT_MASK 2
-#define PIXEL_BLT_ONLY 3
-#define PIXEL_FORMAT_MAX 4
-
-struct efi_pixel_bitmask {
- u32 red_mask;
- u32 green_mask;
- u32 blue_mask;
- u32 reserved_mask;
-};
-
-struct efi_graphics_output_mode_info {
- u32 version;
- u32 horizontal_resolution;
- u32 vertical_resolution;
- int pixel_format;
- struct efi_pixel_bitmask pixel_information;
- u32 pixels_per_scan_line;
-} __packed;
-
-struct efi_graphics_output_protocol_mode_32 {
- u32 max_mode;
- u32 mode;
- u32 info;
- u32 size_of_info;
- u64 frame_buffer_base;
- u32 frame_buffer_size;
-} __packed;
-
-struct efi_graphics_output_protocol_mode_64 {
- u32 max_mode;
- u32 mode;
- u64 info;
- u64 size_of_info;
- u64 frame_buffer_base;
- u64 frame_buffer_size;
-} __packed;
-
-struct efi_graphics_output_protocol_mode {
- u32 max_mode;
- u32 mode;
- unsigned long info;
- unsigned long size_of_info;
- u64 frame_buffer_base;
- unsigned long frame_buffer_size;
-} __packed;
-
-struct efi_graphics_output_protocol_32 {
- u32 query_mode;
- u32 set_mode;
- u32 blt;
- u32 mode;
-};
-
-struct efi_graphics_output_protocol_64 {
- u64 query_mode;
- u64 set_mode;
- u64 blt;
- u64 mode;
-};
-
-struct efi_graphics_output_protocol {
- unsigned long query_mode;
- unsigned long set_mode;
- unsigned long blt;
- struct efi_graphics_output_protocol_mode *mode;
-};
-
-typedef efi_status_t (*efi_graphics_output_protocol_query_mode)(
- struct efi_graphics_output_protocol *, u32, unsigned long *,
- struct efi_graphics_output_mode_info **);
-
-extern struct list_head efivar_sysfs_list;
-
-static inline void
-efivar_unregister(struct efivar_entry *var)
-{
- kobject_put(&var->kobj);
-}
-
int efivars_register(struct efivars *efivars,
- const struct efivar_operations *ops,
- struct kobject *kobject);
+ const struct efivar_operations *ops);
int efivars_unregister(struct efivars *efivars);
-struct kobject *efivars_kobject(void);
-
-int efivar_init(int (*func)(efi_char16_t *, efi_guid_t, unsigned long, void *),
- void *data, bool duplicates, struct list_head *head);
-
-int efivar_entry_add(struct efivar_entry *entry, struct list_head *head);
-int efivar_entry_remove(struct efivar_entry *entry);
-int __efivar_entry_delete(struct efivar_entry *entry);
-int efivar_entry_delete(struct efivar_entry *entry);
-
-int efivar_entry_size(struct efivar_entry *entry, unsigned long *size);
-int __efivar_entry_get(struct efivar_entry *entry, u32 *attributes,
- unsigned long *size, void *data);
-int efivar_entry_get(struct efivar_entry *entry, u32 *attributes,
- unsigned long *size, void *data);
-int efivar_entry_set(struct efivar_entry *entry, u32 attributes,
- unsigned long size, void *data, struct list_head *head);
-int efivar_entry_set_get_size(struct efivar_entry *entry, u32 attributes,
- unsigned long *size, void *data, bool *set);
-int efivar_entry_set_safe(efi_char16_t *name, efi_guid_t vendor, u32 attributes,
- bool block, unsigned long size, void *data);
-
-int efivar_entry_iter_begin(void);
-void efivar_entry_iter_end(void);
+#ifdef CONFIG_EFI
+bool efivar_is_available(void);
+#else
+static inline bool efivar_is_available(void) { return false; }
+#endif
-int __efivar_entry_iter(int (*func)(struct efivar_entry *, void *),
- struct list_head *head, void *data,
- struct efivar_entry **prev);
-int efivar_entry_iter(int (*func)(struct efivar_entry *, void *),
- struct list_head *head, void *data);
+bool efivar_supports_writes(void);
-struct efivar_entry *efivar_entry_find(efi_char16_t *name, efi_guid_t guid,
- struct list_head *head, bool remove);
+int efivar_lock(void);
+int efivar_trylock(void);
+void efivar_unlock(void);
-bool efivar_validate(efi_guid_t vendor, efi_char16_t *var_name, u8 *data,
- unsigned long data_size);
-bool efivar_variable_is_removable(efi_guid_t vendor, const char *name,
- size_t len);
+efi_status_t efivar_get_variable(efi_char16_t *name, efi_guid_t *vendor,
+ u32 *attr, unsigned long *size, void *data);
-extern struct work_struct efivar_work;
-void efivar_run_worker(void);
+efi_status_t efivar_get_next_variable(unsigned long *name_size,
+ efi_char16_t *name, efi_guid_t *vendor);
-#if defined(CONFIG_EFI_VARS) || defined(CONFIG_EFI_VARS_MODULE)
-int efivars_sysfs_init(void);
+efi_status_t efivar_set_variable_locked(efi_char16_t *name, efi_guid_t *vendor,
+ u32 attr, unsigned long data_size,
+ void *data, bool nonblocking);
-#define EFIVARS_DATA_SIZE_MAX 1024
+efi_status_t efivar_set_variable(efi_char16_t *name, efi_guid_t *vendor,
+ u32 attr, unsigned long data_size, void *data);
-#endif /* CONFIG_EFI_VARS */
+#if IS_ENABLED(CONFIG_EFI_CAPSULE_LOADER)
extern bool efi_capsule_pending(int *reset_type);
extern int efi_capsule_supported(efi_guid_t guid, u32 flags,
@@ -1416,78 +1095,18 @@ extern int efi_capsule_supported(efi_guid_t guid, u32 flags,
extern int efi_capsule_update(efi_capsule_header_t *capsule,
phys_addr_t *pages);
-
-#ifdef CONFIG_EFI_RUNTIME_MAP
-int efi_runtime_map_init(struct kobject *);
-int efi_get_runtime_map_size(void);
-int efi_get_runtime_map_desc_size(void);
-int efi_runtime_map_copy(void *buf, size_t bufsz);
#else
-static inline int efi_runtime_map_init(struct kobject *kobj)
-{
- return 0;
-}
-
-static inline int efi_get_runtime_map_size(void)
-{
- return 0;
-}
-
-static inline int efi_get_runtime_map_desc_size(void)
-{
- return 0;
-}
-
-static inline int efi_runtime_map_copy(void *buf, size_t bufsz)
-{
- return 0;
-}
-
+static inline bool efi_capsule_pending(int *reset_type) { return false; }
#endif
-/* prototypes shared between arch specific and generic stub code */
-
-void efi_printk(efi_system_table_t *sys_table_arg, char *str);
-
-void efi_free(efi_system_table_t *sys_table_arg, unsigned long size,
- unsigned long addr);
-
-char *efi_convert_cmdline(efi_system_table_t *sys_table_arg,
- efi_loaded_image_t *image, int *cmd_line_len);
-
-efi_status_t efi_get_memory_map(efi_system_table_t *sys_table_arg,
- struct efi_boot_memmap *map);
-
-efi_status_t efi_low_alloc(efi_system_table_t *sys_table_arg,
- unsigned long size, unsigned long align,
- unsigned long *addr);
-
-efi_status_t efi_high_alloc(efi_system_table_t *sys_table_arg,
- unsigned long size, unsigned long align,
- unsigned long *addr, unsigned long max);
-
-efi_status_t efi_relocate_kernel(efi_system_table_t *sys_table_arg,
- unsigned long *image_addr,
- unsigned long image_size,
- unsigned long alloc_size,
- unsigned long preferred_addr,
- unsigned long alignment);
-
-efi_status_t handle_cmdline_files(efi_system_table_t *sys_table_arg,
- efi_loaded_image_t *image,
- char *cmd_line, char *option_string,
- unsigned long max_addr,
- unsigned long *load_addr,
- unsigned long *load_size);
-
-efi_status_t efi_parse_options(char const *cmdline);
-
-efi_status_t efi_setup_gop(efi_system_table_t *sys_table_arg,
- struct screen_info *si, efi_guid_t *proto,
- unsigned long size);
+#ifdef CONFIG_EFI
+extern bool efi_runtime_disabled(void);
+#else
+static inline bool efi_runtime_disabled(void) { return true; }
+#endif
-bool efi_runtime_disabled(void);
extern void efi_call_virt_check_flags(unsigned long flags, const char *call);
+extern unsigned long efi_call_virt_save_flags(void);
enum efi_secureboot_mode {
efi_secureboot_mode_unset,
@@ -1495,7 +1114,36 @@ enum efi_secureboot_mode {
efi_secureboot_mode_disabled,
efi_secureboot_mode_enabled,
};
-enum efi_secureboot_mode efi_get_secureboot(efi_system_table_t *sys_table);
+
+static inline
+enum efi_secureboot_mode efi_get_secureboot_mode(efi_get_variable_t *get_var)
+{
+ u8 secboot, setupmode = 0;
+ efi_status_t status;
+ unsigned long size;
+
+ size = sizeof(secboot);
+ status = get_var(L"SecureBoot", &EFI_GLOBAL_VARIABLE_GUID, NULL, &size,
+ &secboot);
+ if (status == EFI_NOT_FOUND)
+ return efi_secureboot_mode_disabled;
+ if (status != EFI_SUCCESS)
+ return efi_secureboot_mode_unknown;
+
+ size = sizeof(setupmode);
+ get_var(L"SetupMode", &EFI_GLOBAL_VARIABLE_GUID, NULL, &size, &setupmode);
+ if (secboot == 0 || setupmode == 1)
+ return efi_secureboot_mode_disabled;
+ return efi_secureboot_mode_enabled;
+}
+
+#ifdef CONFIG_EFI_EMBEDDED_FIRMWARE
+void efi_check_for_embedded_firmwares(void);
+#else
+static inline void efi_check_for_embedded_firmwares(void) { }
+#endif
+
+#define arch_efi_call_virt(p, f, args...) ((p)->f(args))
/*
* Arch code can implement the following three template macros, avoiding
@@ -1524,7 +1172,7 @@ enum efi_secureboot_mode efi_get_secureboot(efi_system_table_t *sys_table);
\
arch_efi_call_virt_setup(); \
\
- local_save_flags(__flags); \
+ __flags = efi_call_virt_save_flags(); \
__s = arch_efi_call_virt(p, f, args); \
efi_call_virt_check_flags(__flags, __stringify(f)); \
\
@@ -1539,27 +1187,155 @@ enum efi_secureboot_mode efi_get_secureboot(efi_system_table_t *sys_table);
\
arch_efi_call_virt_setup(); \
\
- local_save_flags(__flags); \
+ __flags = efi_call_virt_save_flags(); \
arch_efi_call_virt(p, f, args); \
efi_call_virt_check_flags(__flags, __stringify(f)); \
\
arch_efi_call_virt_teardown(); \
})
-typedef efi_status_t (*efi_exit_boot_map_processing)(
- efi_system_table_t *sys_table_arg,
- struct efi_boot_memmap *map,
- void *priv);
-
-efi_status_t efi_exit_boot_services(efi_system_table_t *sys_table,
- void *handle,
- struct efi_boot_memmap *map,
- void *priv,
- efi_exit_boot_map_processing priv_func);
+#define EFI_RANDOM_SEED_SIZE 32U // BLAKE2S_HASH_SIZE
struct linux_efi_random_seed {
u32 size;
u8 bits[];
};
+struct linux_efi_tpm_eventlog {
+ u32 size;
+ u32 final_events_preboot_size;
+ u8 version;
+ u8 log[];
+};
+
+extern int efi_tpm_eventlog_init(void);
+
+struct efi_tcg2_final_events_table {
+ u64 version;
+ u64 nr_events;
+ u8 events[];
+};
+extern int efi_tpm_final_log_size;
+
+extern unsigned long rci2_table_phys;
+
+/*
+ * efi_runtime_service() function identifiers.
+ * "NONE" is used by efi_recover_from_page_fault() to check if the page
+ * fault happened while executing an efi runtime service.
+ */
+enum efi_rts_ids {
+ EFI_NONE,
+ EFI_GET_TIME,
+ EFI_SET_TIME,
+ EFI_GET_WAKEUP_TIME,
+ EFI_SET_WAKEUP_TIME,
+ EFI_GET_VARIABLE,
+ EFI_GET_NEXT_VARIABLE,
+ EFI_SET_VARIABLE,
+ EFI_QUERY_VARIABLE_INFO,
+ EFI_GET_NEXT_HIGH_MONO_COUNT,
+ EFI_RESET_SYSTEM,
+ EFI_UPDATE_CAPSULE,
+ EFI_QUERY_CAPSULE_CAPS,
+};
+
+/*
+ * efi_runtime_work: Details of EFI Runtime Service work
+ * @arg<1-5>: EFI Runtime Service function arguments
+ * @status: Status of executing EFI Runtime Service
+ * @efi_rts_id: EFI Runtime Service function identifier
+ * @efi_rts_comp: Struct used for handling completions
+ */
+struct efi_runtime_work {
+ void *arg1;
+ void *arg2;
+ void *arg3;
+ void *arg4;
+ void *arg5;
+ efi_status_t status;
+ struct work_struct work;
+ enum efi_rts_ids efi_rts_id;
+ struct completion efi_rts_comp;
+};
+
+extern struct efi_runtime_work efi_rts_work;
+
+/* Workqueue to queue EFI Runtime Services */
+extern struct workqueue_struct *efi_rts_wq;
+
+struct linux_efi_memreserve {
+ int size; // allocated size of the array
+ atomic_t count; // number of entries used
+ phys_addr_t next; // pa of next struct instance
+ struct {
+ phys_addr_t base;
+ phys_addr_t size;
+ } entry[];
+};
+
+#define EFI_MEMRESERVE_COUNT(size) (((size) - sizeof(struct linux_efi_memreserve)) \
+ / sizeof_field(struct linux_efi_memreserve, entry[0]))
+
+void __init efi_arch_mem_reserve(phys_addr_t addr, u64 size);
+
+char *efi_systab_show_arch(char *str);
+
+/*
+ * The LINUX_EFI_MOK_VARIABLE_TABLE_GUID config table can be provided
+ * to the kernel by an EFI boot loader. The table contains a packed
+ * sequence of these entries, one for each named MOK variable.
+ * The sequence is terminated by an entry with a completely NULL
+ * name and 0 data size.
+ */
+struct efi_mokvar_table_entry {
+ char name[256];
+ u64 data_size;
+ u8 data[];
+} __attribute((packed));
+
+#ifdef CONFIG_LOAD_UEFI_KEYS
+extern void __init efi_mokvar_table_init(void);
+extern struct efi_mokvar_table_entry *efi_mokvar_entry_next(
+ struct efi_mokvar_table_entry **mokvar_entry);
+extern struct efi_mokvar_table_entry *efi_mokvar_entry_find(const char *name);
+#else
+static inline void efi_mokvar_table_init(void) { }
+static inline struct efi_mokvar_table_entry *efi_mokvar_entry_next(
+ struct efi_mokvar_table_entry **mokvar_entry)
+{
+ return NULL;
+}
+static inline struct efi_mokvar_table_entry *efi_mokvar_entry_find(
+ const char *name)
+{
+ return NULL;
+}
+#endif
+
+extern void efifb_setup_from_dmi(struct screen_info *si, const char *opt);
+
+struct linux_efi_coco_secret_area {
+ u64 base_pa;
+ u64 size;
+};
+
+struct linux_efi_initrd {
+ unsigned long base;
+ unsigned long size;
+};
+
+/* Header of a populated EFI secret area */
+#define EFI_SECRET_TABLE_HEADER_GUID EFI_GUID(0x1e74f542, 0x71dd, 0x4d66, 0x96, 0x3e, 0xef, 0x42, 0x87, 0xff, 0x17, 0x3b)
+
+bool xen_efi_config_table_is_usable(const efi_guid_t *guid, unsigned long table);
+
+static inline
+bool efi_config_table_is_usable(const efi_guid_t *guid, unsigned long table)
+{
+ if (!IS_ENABLED(CONFIG_XEN_EFI))
+ return true;
+ return xen_efi_config_table_is_usable(guid, table);
+}
+
#endif /* _LINUX_EFI_H */
diff --git a/include/linux/efi_embedded_fw.h b/include/linux/efi_embedded_fw.h
new file mode 100644
index 000000000000..a97a12bb2c9e
--- /dev/null
+++ b/include/linux/efi_embedded_fw.h
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_EFI_EMBEDDED_FW_H
+#define _LINUX_EFI_EMBEDDED_FW_H
+
+#include <linux/list.h>
+#include <linux/mod_devicetable.h>
+
+#define EFI_EMBEDDED_FW_PREFIX_LEN 8
+
+/*
+ * This struct is private to the efi-embedded fw implementation.
+ * They are in this header for use by lib/test_firmware.c only!
+ */
+struct efi_embedded_fw {
+ struct list_head list;
+ const char *name;
+ const u8 *data;
+ size_t length;
+};
+
+/**
+ * struct efi_embedded_fw_desc - This struct is used by the EFI embedded-fw
+ * code to search for embedded firmwares.
+ *
+ * @name: Name to register the firmware with if found
+ * @prefix: First 8 bytes of the firmware
+ * @length: Length of the firmware in bytes including prefix
+ * @sha256: SHA256 of the firmware
+ */
+struct efi_embedded_fw_desc {
+ const char *name;
+ u8 prefix[EFI_EMBEDDED_FW_PREFIX_LEN];
+ u32 length;
+ u8 sha256[32];
+};
+
+extern const struct dmi_system_id touchscreen_dmi_table[];
+
+int efi_get_embedded_fw(const char *name, const u8 **dat, size_t *sz);
+
+#endif
diff --git a/include/linux/efs_vh.h b/include/linux/efs_vh.h
index 8a11150c61fe..206c5270f7b8 100644
--- a/include/linux/efs_vh.h
+++ b/include/linux/efs_vh.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* efs_vh.h
*
diff --git a/include/linux/eisa.h b/include/linux/eisa.h
index 6925249a5ac6..b012e30afebd 100644
--- a/include/linux/eisa.h
+++ b/include/linux/eisa.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_EISA_H
#define _LINUX_EISA_H
diff --git a/include/linux/elevator.h b/include/linux/elevator.h
deleted file mode 100644
index 5bc8f8682a3e..000000000000
--- a/include/linux/elevator.h
+++ /dev/null
@@ -1,270 +0,0 @@
-#ifndef _LINUX_ELEVATOR_H
-#define _LINUX_ELEVATOR_H
-
-#include <linux/percpu.h>
-#include <linux/hashtable.h>
-
-#ifdef CONFIG_BLOCK
-
-struct io_cq;
-struct elevator_type;
-#ifdef CONFIG_BLK_DEBUG_FS
-struct blk_mq_debugfs_attr;
-#endif
-
-/*
- * Return values from elevator merger
- */
-enum elv_merge {
- ELEVATOR_NO_MERGE = 0,
- ELEVATOR_FRONT_MERGE = 1,
- ELEVATOR_BACK_MERGE = 2,
- ELEVATOR_DISCARD_MERGE = 3,
-};
-
-typedef enum elv_merge (elevator_merge_fn) (struct request_queue *, struct request **,
- struct bio *);
-
-typedef void (elevator_merge_req_fn) (struct request_queue *, struct request *, struct request *);
-
-typedef void (elevator_merged_fn) (struct request_queue *, struct request *, enum elv_merge);
-
-typedef int (elevator_allow_bio_merge_fn) (struct request_queue *,
- struct request *, struct bio *);
-
-typedef int (elevator_allow_rq_merge_fn) (struct request_queue *,
- struct request *, struct request *);
-
-typedef void (elevator_bio_merged_fn) (struct request_queue *,
- struct request *, struct bio *);
-
-typedef int (elevator_dispatch_fn) (struct request_queue *, int);
-
-typedef void (elevator_add_req_fn) (struct request_queue *, struct request *);
-typedef struct request *(elevator_request_list_fn) (struct request_queue *, struct request *);
-typedef void (elevator_completed_req_fn) (struct request_queue *, struct request *);
-typedef int (elevator_may_queue_fn) (struct request_queue *, unsigned int);
-
-typedef void (elevator_init_icq_fn) (struct io_cq *);
-typedef void (elevator_exit_icq_fn) (struct io_cq *);
-typedef int (elevator_set_req_fn) (struct request_queue *, struct request *,
- struct bio *, gfp_t);
-typedef void (elevator_put_req_fn) (struct request *);
-typedef void (elevator_activate_req_fn) (struct request_queue *, struct request *);
-typedef void (elevator_deactivate_req_fn) (struct request_queue *, struct request *);
-
-typedef int (elevator_init_fn) (struct request_queue *,
- struct elevator_type *e);
-typedef void (elevator_exit_fn) (struct elevator_queue *);
-typedef void (elevator_registered_fn) (struct request_queue *);
-
-struct elevator_ops
-{
- elevator_merge_fn *elevator_merge_fn;
- elevator_merged_fn *elevator_merged_fn;
- elevator_merge_req_fn *elevator_merge_req_fn;
- elevator_allow_bio_merge_fn *elevator_allow_bio_merge_fn;
- elevator_allow_rq_merge_fn *elevator_allow_rq_merge_fn;
- elevator_bio_merged_fn *elevator_bio_merged_fn;
-
- elevator_dispatch_fn *elevator_dispatch_fn;
- elevator_add_req_fn *elevator_add_req_fn;
- elevator_activate_req_fn *elevator_activate_req_fn;
- elevator_deactivate_req_fn *elevator_deactivate_req_fn;
-
- elevator_completed_req_fn *elevator_completed_req_fn;
-
- elevator_request_list_fn *elevator_former_req_fn;
- elevator_request_list_fn *elevator_latter_req_fn;
-
- elevator_init_icq_fn *elevator_init_icq_fn; /* see iocontext.h */
- elevator_exit_icq_fn *elevator_exit_icq_fn; /* ditto */
-
- elevator_set_req_fn *elevator_set_req_fn;
- elevator_put_req_fn *elevator_put_req_fn;
-
- elevator_may_queue_fn *elevator_may_queue_fn;
-
- elevator_init_fn *elevator_init_fn;
- elevator_exit_fn *elevator_exit_fn;
- elevator_registered_fn *elevator_registered_fn;
-};
-
-struct blk_mq_alloc_data;
-struct blk_mq_hw_ctx;
-
-struct elevator_mq_ops {
- int (*init_sched)(struct request_queue *, struct elevator_type *);
- void (*exit_sched)(struct elevator_queue *);
- int (*init_hctx)(struct blk_mq_hw_ctx *, unsigned int);
- void (*exit_hctx)(struct blk_mq_hw_ctx *, unsigned int);
-
- bool (*allow_merge)(struct request_queue *, struct request *, struct bio *);
- bool (*bio_merge)(struct blk_mq_hw_ctx *, struct bio *);
- int (*request_merge)(struct request_queue *q, struct request **, struct bio *);
- void (*request_merged)(struct request_queue *, struct request *, enum elv_merge);
- void (*requests_merged)(struct request_queue *, struct request *, struct request *);
- void (*limit_depth)(unsigned int, struct blk_mq_alloc_data *);
- void (*prepare_request)(struct request *, struct bio *bio);
- void (*finish_request)(struct request *);
- void (*insert_requests)(struct blk_mq_hw_ctx *, struct list_head *, bool);
- struct request *(*dispatch_request)(struct blk_mq_hw_ctx *);
- bool (*has_work)(struct blk_mq_hw_ctx *);
- void (*completed_request)(struct request *);
- void (*started_request)(struct request *);
- void (*requeue_request)(struct request *);
- struct request *(*former_request)(struct request_queue *, struct request *);
- struct request *(*next_request)(struct request_queue *, struct request *);
- void (*init_icq)(struct io_cq *);
- void (*exit_icq)(struct io_cq *);
-};
-
-#define ELV_NAME_MAX (16)
-
-struct elv_fs_entry {
- struct attribute attr;
- ssize_t (*show)(struct elevator_queue *, char *);
- ssize_t (*store)(struct elevator_queue *, const char *, size_t);
-};
-
-/*
- * identifies an elevator type, such as AS or deadline
- */
-struct elevator_type
-{
- /* managed by elevator core */
- struct kmem_cache *icq_cache;
-
- /* fields provided by elevator implementation */
- union {
- struct elevator_ops sq;
- struct elevator_mq_ops mq;
- } ops;
- size_t icq_size; /* see iocontext.h */
- size_t icq_align; /* ditto */
- struct elv_fs_entry *elevator_attrs;
- char elevator_name[ELV_NAME_MAX];
- struct module *elevator_owner;
- bool uses_mq;
-#ifdef CONFIG_BLK_DEBUG_FS
- const struct blk_mq_debugfs_attr *queue_debugfs_attrs;
- const struct blk_mq_debugfs_attr *hctx_debugfs_attrs;
-#endif
-
- /* managed by elevator core */
- char icq_cache_name[ELV_NAME_MAX + 6]; /* elvname + "_io_cq" */
- struct list_head list;
-};
-
-#define ELV_HASH_BITS 6
-
-void elv_rqhash_del(struct request_queue *q, struct request *rq);
-void elv_rqhash_add(struct request_queue *q, struct request *rq);
-void elv_rqhash_reposition(struct request_queue *q, struct request *rq);
-struct request *elv_rqhash_find(struct request_queue *q, sector_t offset);
-
-/*
- * each queue has an elevator_queue associated with it
- */
-struct elevator_queue
-{
- struct elevator_type *type;
- void *elevator_data;
- struct kobject kobj;
- struct mutex sysfs_lock;
- unsigned int registered:1;
- unsigned int uses_mq:1;
- DECLARE_HASHTABLE(hash, ELV_HASH_BITS);
-};
-
-/*
- * block elevator interface
- */
-extern void elv_dispatch_sort(struct request_queue *, struct request *);
-extern void elv_dispatch_add_tail(struct request_queue *, struct request *);
-extern void elv_add_request(struct request_queue *, struct request *, int);
-extern void __elv_add_request(struct request_queue *, struct request *, int);
-extern enum elv_merge elv_merge(struct request_queue *, struct request **,
- struct bio *);
-extern void elv_merge_requests(struct request_queue *, struct request *,
- struct request *);
-extern void elv_merged_request(struct request_queue *, struct request *,
- enum elv_merge);
-extern void elv_bio_merged(struct request_queue *q, struct request *,
- struct bio *);
-extern bool elv_attempt_insert_merge(struct request_queue *, struct request *);
-extern void elv_requeue_request(struct request_queue *, struct request *);
-extern struct request *elv_former_request(struct request_queue *, struct request *);
-extern struct request *elv_latter_request(struct request_queue *, struct request *);
-extern int elv_register_queue(struct request_queue *q);
-extern void elv_unregister_queue(struct request_queue *q);
-extern int elv_may_queue(struct request_queue *, unsigned int);
-extern void elv_completed_request(struct request_queue *, struct request *);
-extern int elv_set_request(struct request_queue *q, struct request *rq,
- struct bio *bio, gfp_t gfp_mask);
-extern void elv_put_request(struct request_queue *, struct request *);
-extern void elv_drain_elevator(struct request_queue *);
-
-/*
- * io scheduler registration
- */
-extern void __init load_default_elevator_module(void);
-extern int elv_register(struct elevator_type *);
-extern void elv_unregister(struct elevator_type *);
-
-/*
- * io scheduler sysfs switching
- */
-extern ssize_t elv_iosched_show(struct request_queue *, char *);
-extern ssize_t elv_iosched_store(struct request_queue *, const char *, size_t);
-
-extern int elevator_init(struct request_queue *, char *);
-extern void elevator_exit(struct request_queue *, struct elevator_queue *);
-extern bool elv_bio_merge_ok(struct request *, struct bio *);
-extern struct elevator_queue *elevator_alloc(struct request_queue *,
- struct elevator_type *);
-
-/*
- * Helper functions.
- */
-extern struct request *elv_rb_former_request(struct request_queue *, struct request *);
-extern struct request *elv_rb_latter_request(struct request_queue *, struct request *);
-
-/*
- * rb support functions.
- */
-extern void elv_rb_add(struct rb_root *, struct request *);
-extern void elv_rb_del(struct rb_root *, struct request *);
-extern struct request *elv_rb_find(struct rb_root *, sector_t);
-
-/*
- * Insertion selection
- */
-#define ELEVATOR_INSERT_FRONT 1
-#define ELEVATOR_INSERT_BACK 2
-#define ELEVATOR_INSERT_SORT 3
-#define ELEVATOR_INSERT_REQUEUE 4
-#define ELEVATOR_INSERT_FLUSH 5
-#define ELEVATOR_INSERT_SORT_MERGE 6
-
-/*
- * return values from elevator_may_queue_fn
- */
-enum {
- ELV_MQUEUE_MAY,
- ELV_MQUEUE_NO,
- ELV_MQUEUE_MUST,
-};
-
-#define rq_end_sector(rq) (blk_rq_pos(rq) + blk_rq_sectors(rq))
-#define rb_entry_rq(node) rb_entry((node), struct request, rb_node)
-
-#define rq_entry_fifo(ptr) list_entry((ptr), struct request, queuelist)
-#define rq_fifo_clear(rq) list_del_init(&(rq)->queuelist)
-
-#else /* CONFIG_BLOCK */
-
-static inline void load_default_elevator_module(void) { }
-
-#endif /* CONFIG_BLOCK */
-#endif
diff --git a/include/linux/elf-fdpic.h b/include/linux/elf-fdpic.h
index 386440317b0c..3bea95a1af53 100644
--- a/include/linux/elf-fdpic.h
+++ b/include/linux/elf-fdpic.h
@@ -1,12 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/* FDPIC ELF load map
*
* Copyright (C) 2003 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
*/
#ifndef _LINUX_ELF_FDPIC_H
diff --git a/include/linux/elf-randomize.h b/include/linux/elf-randomize.h
index b5f0bda9472e..da0dbb7b6be3 100644
--- a/include/linux/elf-randomize.h
+++ b/include/linux/elf-randomize.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ELF_RANDOMIZE_H
#define _ELF_RANDOMIZE_H
diff --git a/include/linux/elf.h b/include/linux/elf.h
index ba069e8f4f78..c9a46c4e183b 100644
--- a/include/linux/elf.h
+++ b/include/linux/elf.h
@@ -1,6 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_ELF_H
#define _LINUX_ELF_H
+#include <linux/types.h>
#include <asm/elf.h>
#include <uapi/linux/elf.h>
@@ -20,6 +22,19 @@
SET_PERSONALITY(ex)
#endif
+#ifndef START_THREAD
+#define START_THREAD(elf_ex, regs, elf_entry, start_stack) \
+ start_thread(regs, elf_entry, start_stack)
+#endif
+
+#if defined(ARCH_HAS_SETUP_ADDITIONAL_PAGES) && !defined(ARCH_SETUP_ADDITIONAL_PAGES)
+#define ARCH_SETUP_ADDITIONAL_PAGES(bprm, ex, interpreter) \
+ arch_setup_additional_pages(bprm, interpreter)
+#endif
+
+#define ELF32_GNU_PROPERTY_ALIGN 4
+#define ELF64_GNU_PROPERTY_ALIGN 8
+
#if ELF_CLASS == ELFCLASS32
extern Elf32_Dyn _DYNAMIC [];
@@ -30,6 +45,7 @@ extern Elf32_Dyn _DYNAMIC [];
#define elf_addr_t Elf32_Off
#define Elf_Half Elf32_Half
#define Elf_Word Elf32_Word
+#define ELF_GNU_PROPERTY_ALIGN ELF32_GNU_PROPERTY_ALIGN
#else
@@ -41,6 +57,7 @@ extern Elf64_Dyn _DYNAMIC [];
#define elf_addr_t Elf64_Off
#define Elf_Half Elf64_Half
#define Elf_Word Elf64_Word
+#define ELF_GNU_PROPERTY_ALIGN ELF64_GNU_PROPERTY_ALIGN
#endif
@@ -55,4 +72,41 @@ static inline int elf_coredump_extra_notes_write(struct coredump_params *cprm) {
extern int elf_coredump_extra_notes_size(void);
extern int elf_coredump_extra_notes_write(struct coredump_params *cprm);
#endif
+
+/*
+ * NT_GNU_PROPERTY_TYPE_0 header:
+ * Keep this internal until/unless there is an agreed UAPI definition.
+ * pr_type values (GNU_PROPERTY_*) are public and defined in the UAPI header.
+ */
+struct gnu_property {
+ u32 pr_type;
+ u32 pr_datasz;
+};
+
+struct arch_elf_state;
+
+#ifndef CONFIG_ARCH_USE_GNU_PROPERTY
+static inline int arch_parse_elf_property(u32 type, const void *data,
+ size_t datasz, bool compat,
+ struct arch_elf_state *arch)
+{
+ return 0;
+}
+#else
+extern int arch_parse_elf_property(u32 type, const void *data, size_t datasz,
+ bool compat, struct arch_elf_state *arch);
+#endif
+
+#ifdef CONFIG_ARCH_HAVE_ELF_PROT
+int arch_elf_adjust_prot(int prot, const struct arch_elf_state *state,
+ bool has_interp, bool is_interp);
+#else
+static inline int arch_elf_adjust_prot(int prot,
+ const struct arch_elf_state *state,
+ bool has_interp, bool is_interp)
+{
+ return prot;
+}
+#endif
+
#endif /* _LINUX_ELF_H */
diff --git a/include/linux/elfcore-compat.h b/include/linux/elfcore-compat.h
index 0a90e1c3a422..54feb64e9b5d 100644
--- a/include/linux/elfcore-compat.h
+++ b/include/linux/elfcore-compat.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_ELFCORE_COMPAT_H
#define _LINUX_ELFCORE_COMPAT_H
@@ -16,7 +17,7 @@ struct compat_elf_siginfo
compat_int_t si_errno;
};
-struct compat_elf_prstatus
+struct compat_elf_prstatus_common
{
struct compat_elf_siginfo pr_info;
short pr_cursig;
@@ -26,16 +27,10 @@ struct compat_elf_prstatus
compat_pid_t pr_ppid;
compat_pid_t pr_pgrp;
compat_pid_t pr_sid;
- struct compat_timeval pr_utime;
- struct compat_timeval pr_stime;
- struct compat_timeval pr_cutime;
- struct compat_timeval pr_cstime;
- compat_elf_gregset_t pr_reg;
-#ifdef CONFIG_BINFMT_ELF_FDPIC
- compat_ulong_t pr_exec_fdpic_loadmap;
- compat_ulong_t pr_interp_fdpic_loadmap;
-#endif
- compat_int_t pr_fpvalid;
+ struct old_timeval32 pr_utime;
+ struct old_timeval32 pr_stime;
+ struct old_timeval32 pr_cutime;
+ struct old_timeval32 pr_cstime;
};
struct compat_elf_prpsinfo
@@ -48,8 +43,24 @@ struct compat_elf_prpsinfo
__compat_uid_t pr_uid;
__compat_gid_t pr_gid;
compat_pid_t pr_pid, pr_ppid, pr_pgrp, pr_sid;
+ /*
+ * The hard-coded 16 is derived from TASK_COMM_LEN, but it can't be
+ * changed as it is exposed to userspace. We'd better make it hard-coded
+ * here.
+ */
char pr_fname[16];
char pr_psargs[ELF_PRARGSZ];
};
+#ifdef CONFIG_ARCH_HAS_ELFCORE_COMPAT
+#include <asm/elfcore-compat.h>
+#endif
+
+struct compat_elf_prstatus
+{
+ struct compat_elf_prstatus_common common;
+ compat_elf_gregset_t pr_reg;
+ compat_int_t pr_fpvalid;
+};
+
#endif /* _LINUX_ELFCORE_COMPAT_H */
diff --git a/include/linux/elfcore.h b/include/linux/elfcore.h
index c8240a12c42d..bd5560542c79 100644
--- a/include/linux/elfcore.h
+++ b/include/linux/elfcore.h
@@ -1,15 +1,79 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_ELFCORE_H
#define _LINUX_ELFCORE_H
#include <linux/user.h>
#include <linux/bug.h>
#include <linux/sched/task_stack.h>
-
-#include <asm/elf.h>
-#include <uapi/linux/elfcore.h>
+#include <linux/types.h>
+#include <linux/signal.h>
+#include <linux/time.h>
+#include <linux/ptrace.h>
+#include <linux/fs.h>
+#include <linux/elf.h>
struct coredump_params;
+struct elf_siginfo
+{
+ int si_signo; /* signal number */
+ int si_code; /* extra code */
+ int si_errno; /* errno */
+};
+
+/*
+ * Definitions to generate Intel SVR4-like core files.
+ * These mostly have the same names as the SVR4 types with "elf_"
+ * tacked on the front to prevent clashes with linux definitions,
+ * and the typedef forms have been avoided. This is mostly like
+ * the SVR4 structure, but more Linuxy, with things that Linux does
+ * not support and which gdb doesn't really use excluded.
+ */
+struct elf_prstatus_common
+{
+ struct elf_siginfo pr_info; /* Info associated with signal */
+ short pr_cursig; /* Current signal */
+ unsigned long pr_sigpend; /* Set of pending signals */
+ unsigned long pr_sighold; /* Set of held signals */
+ pid_t pr_pid;
+ pid_t pr_ppid;
+ pid_t pr_pgrp;
+ pid_t pr_sid;
+ struct __kernel_old_timeval pr_utime; /* User time */
+ struct __kernel_old_timeval pr_stime; /* System time */
+ struct __kernel_old_timeval pr_cutime; /* Cumulative user time */
+ struct __kernel_old_timeval pr_cstime; /* Cumulative system time */
+};
+
+struct elf_prstatus
+{
+ struct elf_prstatus_common common;
+ elf_gregset_t pr_reg; /* GP registers */
+ int pr_fpvalid; /* True if math co-processor being used. */
+};
+
+#define ELF_PRARGSZ (80) /* Number of chars for args */
+
+struct elf_prpsinfo
+{
+ char pr_state; /* numeric process state */
+ char pr_sname; /* char for pr_state */
+ char pr_zomb; /* zombie */
+ char pr_nice; /* nice val */
+ unsigned long pr_flag; /* flags */
+ __kernel_uid_t pr_uid;
+ __kernel_gid_t pr_gid;
+ pid_t pr_pid, pr_ppid, pr_pgrp, pr_sid;
+ /* Lots missing */
+ /*
+ * The hard-coded 16 is derived from TASK_COMM_LEN, but it can't be
+ * changed as it is exposed to userspace. We'd better make it hard-coded
+ * here.
+ */
+ char pr_fname[16]; /* filename of executable */
+ char pr_psargs[ELF_PRARGSZ]; /* initial part of arg list */
+};
+
static inline void elf_core_copy_regs(elf_gregset_t *elfregs, struct pt_regs *regs)
{
#ifdef ELF_CORE_COPY_REGS
@@ -20,43 +84,19 @@ static inline void elf_core_copy_regs(elf_gregset_t *elfregs, struct pt_regs *re
#endif
}
-static inline void elf_core_copy_kernel_regs(elf_gregset_t *elfregs, struct pt_regs *regs)
-{
-#ifdef ELF_CORE_COPY_KERNEL_REGS
- ELF_CORE_COPY_KERNEL_REGS((*elfregs), regs);
-#else
- elf_core_copy_regs(elfregs, regs);
-#endif
-}
-
static inline int elf_core_copy_task_regs(struct task_struct *t, elf_gregset_t* elfregs)
{
#if defined (ELF_CORE_COPY_TASK_REGS)
return ELF_CORE_COPY_TASK_REGS(t, elfregs);
-#elif defined (task_pt_regs)
+#else
elf_core_copy_regs(elfregs, task_pt_regs(t));
#endif
return 0;
}
-extern int dump_fpu (struct pt_regs *, elf_fpregset_t *);
-
-static inline int elf_core_copy_task_fpregs(struct task_struct *t, struct pt_regs *regs, elf_fpregset_t *fpu)
-{
-#ifdef ELF_CORE_COPY_FPREGS
- return ELF_CORE_COPY_FPREGS(t, fpu);
-#else
- return dump_fpu(regs, fpu);
-#endif
-}
-
-#ifdef ELF_CORE_COPY_XFPREGS
-static inline int elf_core_copy_task_xfpregs(struct task_struct *t, elf_fpxregset_t *xfpu)
-{
- return ELF_CORE_COPY_XFPREGS(t, xfpu);
-}
-#endif
+int elf_core_copy_task_fpregs(struct task_struct *t, elf_fpregset_t *fpu);
+#ifdef CONFIG_ARCH_BINFMT_ELF_EXTRA_PHDRS
/*
* These functions parameterize elf_core_dump in fs/binfmt_elf.c to write out
* extra segments containing the gate DSO contents. Dumping its
@@ -65,11 +105,32 @@ static inline int elf_core_copy_task_xfpregs(struct task_struct *t, elf_fpxregse
* Dumping its extra ELF program headers includes all the other information
* a debugger needs to easily find how the gate DSO was being used.
*/
-extern Elf_Half elf_core_extra_phdrs(void);
+extern Elf_Half elf_core_extra_phdrs(struct coredump_params *cprm);
extern int
elf_core_write_extra_phdrs(struct coredump_params *cprm, loff_t offset);
extern int
elf_core_write_extra_data(struct coredump_params *cprm);
-extern size_t elf_core_extra_data_size(void);
+extern size_t elf_core_extra_data_size(struct coredump_params *cprm);
+#else
+static inline Elf_Half elf_core_extra_phdrs(struct coredump_params *cprm)
+{
+ return 0;
+}
+
+static inline int elf_core_write_extra_phdrs(struct coredump_params *cprm, loff_t offset)
+{
+ return 1;
+}
+
+static inline int elf_core_write_extra_data(struct coredump_params *cprm)
+{
+ return 1;
+}
+
+static inline size_t elf_core_extra_data_size(struct coredump_params *cprm)
+{
+ return 0;
+}
+#endif /* CONFIG_ARCH_BINFMT_ELF_EXTRA_PHDRS */
#endif /* _LINUX_ELFCORE_H */
diff --git a/include/linux/elfnote-lto.h b/include/linux/elfnote-lto.h
new file mode 100644
index 000000000000..d4635a3ecc4f
--- /dev/null
+++ b/include/linux/elfnote-lto.h
@@ -0,0 +1,14 @@
+#ifndef __ELFNOTE_LTO_H
+#define __ELFNOTE_LTO_H
+
+#include <linux/elfnote.h>
+
+#define LINUX_ELFNOTE_LTO_INFO 0x101
+
+#ifdef CONFIG_LTO
+#define BUILD_LTO_INFO ELFNOTE32("Linux", LINUX_ELFNOTE_LTO_INFO, 1)
+#else
+#define BUILD_LTO_INFO ELFNOTE32("Linux", LINUX_ELFNOTE_LTO_INFO, 0)
+#endif
+
+#endif /* __ELFNOTE_LTO_H */
diff --git a/include/linux/elfnote.h b/include/linux/elfnote.h
index 278e3ef05336..69b136e4dd2b 100644
--- a/include/linux/elfnote.h
+++ b/include/linux/elfnote.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_ELFNOTE_H
#define _LINUX_ELFNOTE_H
/*
@@ -53,12 +54,12 @@
.popsection ;
#define ELFNOTE(name, type, desc) \
- ELFNOTE_START(name, type, "") \
+ ELFNOTE_START(name, type, "a") \
desc ; \
ELFNOTE_END
#else /* !__ASSEMBLER__ */
-#include <linux/elf.h>
+#include <uapi/linux/elf.h>
/*
* Use an anonymous structure which matches the shape of
* Elf{32,64}_Nhdr, but includes the name and desc data. The size and
diff --git a/include/linux/enclosure.h b/include/linux/enclosure.h
index a4cf57cd0f75..1c630e2c2756 100644
--- a/include/linux/enclosure.h
+++ b/include/linux/enclosure.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Enclosure Services
*
@@ -5,18 +6,6 @@
*
**-----------------------------------------------------------------------------
**
-** This program is free software; you can redistribute it and/or
-** modify it under the terms of the GNU General Public License
-** version 2 as published by the Free Software Foundation.
-**
-** This program is distributed in the hope that it will be useful,
-** but WITHOUT ANY WARRANTY; without even the implied warranty of
-** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-** GNU General Public License for more details.
-**
-** You should have received a copy of the GNU General Public License
-** along with this program; if not, write to the Free Software
-** Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
**
**-----------------------------------------------------------------------------
*/
@@ -112,7 +101,7 @@ struct enclosure_device {
struct device edev;
struct enclosure_component_callbacks *cb;
int components;
- struct enclosure_component component[0];
+ struct enclosure_component component[];
};
static inline struct enclosure_device *
diff --git a/include/linux/energy_model.h b/include/linux/energy_model.h
new file mode 100644
index 000000000000..b9caa01dfac4
--- /dev/null
+++ b/include/linux/energy_model.h
@@ -0,0 +1,349 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_ENERGY_MODEL_H
+#define _LINUX_ENERGY_MODEL_H
+#include <linux/cpumask.h>
+#include <linux/device.h>
+#include <linux/jump_label.h>
+#include <linux/kobject.h>
+#include <linux/rcupdate.h>
+#include <linux/sched/cpufreq.h>
+#include <linux/sched/topology.h>
+#include <linux/types.h>
+
+/**
+ * struct em_perf_state - Performance state of a performance domain
+ * @frequency: The frequency in KHz, for consistency with CPUFreq
+ * @power: The power consumed at this level (by 1 CPU or by a registered
+ * device). It can be a total power: static and dynamic.
+ * @cost: The cost coefficient associated with this level, used during
+ * energy calculation. Equal to: power * max_frequency / frequency
+ * @flags: see "em_perf_state flags" description below.
+ */
+struct em_perf_state {
+ unsigned long frequency;
+ unsigned long power;
+ unsigned long cost;
+ unsigned long flags;
+};
+
+/*
+ * em_perf_state flags:
+ *
+ * EM_PERF_STATE_INEFFICIENT: The performance state is inefficient. There is
+ * in this em_perf_domain, another performance state with a higher frequency
+ * but a lower or equal power cost. Such inefficient states are ignored when
+ * using em_pd_get_efficient_*() functions.
+ */
+#define EM_PERF_STATE_INEFFICIENT BIT(0)
+
+/**
+ * struct em_perf_domain - Performance domain
+ * @table: List of performance states, in ascending order
+ * @nr_perf_states: Number of performance states
+ * @flags: See "em_perf_domain flags"
+ * @cpus: Cpumask covering the CPUs of the domain. It's here
+ * for performance reasons to avoid potential cache
+ * misses during energy calculations in the scheduler
+ * and simplifies allocating/freeing that memory region.
+ *
+ * In case of CPU device, a "performance domain" represents a group of CPUs
+ * whose performance is scaled together. All CPUs of a performance domain
+ * must have the same micro-architecture. Performance domains often have
+ * a 1-to-1 mapping with CPUFreq policies. In case of other devices the @cpus
+ * field is unused.
+ */
+struct em_perf_domain {
+ struct em_perf_state *table;
+ int nr_perf_states;
+ unsigned long flags;
+ unsigned long cpus[];
+};
+
+/*
+ * em_perf_domain flags:
+ *
+ * EM_PERF_DOMAIN_MICROWATTS: The power values are in micro-Watts or some
+ * other scale.
+ *
+ * EM_PERF_DOMAIN_SKIP_INEFFICIENCIES: Skip inefficient states when estimating
+ * energy consumption.
+ *
+ * EM_PERF_DOMAIN_ARTIFICIAL: The power values are artificial and might be
+ * created by platform missing real power information
+ */
+#define EM_PERF_DOMAIN_MICROWATTS BIT(0)
+#define EM_PERF_DOMAIN_SKIP_INEFFICIENCIES BIT(1)
+#define EM_PERF_DOMAIN_ARTIFICIAL BIT(2)
+
+#define em_span_cpus(em) (to_cpumask((em)->cpus))
+#define em_is_artificial(em) ((em)->flags & EM_PERF_DOMAIN_ARTIFICIAL)
+
+#ifdef CONFIG_ENERGY_MODEL
+/*
+ * The max power value in micro-Watts. The limit of 64 Watts is set as
+ * a safety net to not overflow multiplications on 32bit platforms. The
+ * 32bit value limit for total Perf Domain power implies a limit of
+ * maximum CPUs in such domain to 64.
+ */
+#define EM_MAX_POWER (64000000) /* 64 Watts */
+
+/*
+ * To avoid possible energy estimation overflow on 32bit machines add
+ * limits to number of CPUs in the Perf. Domain.
+ * We are safe on 64bit machine, thus some big number.
+ */
+#ifdef CONFIG_64BIT
+#define EM_MAX_NUM_CPUS 4096
+#else
+#define EM_MAX_NUM_CPUS 16
+#endif
+
+/*
+ * To avoid an overflow on 32bit machines while calculating the energy
+ * use a different order in the operation. First divide by the 'cpu_scale'
+ * which would reduce big value stored in the 'cost' field, then multiply by
+ * the 'sum_util'. This would allow to handle existing platforms, which have
+ * e.g. power ~1.3 Watt at max freq, so the 'cost' value > 1mln micro-Watts.
+ * In such scenario, where there are 4 CPUs in the Perf. Domain the 'sum_util'
+ * could be 4096, then multiplication: 'cost' * 'sum_util' would overflow.
+ * This reordering of operations has some limitations, we lose small
+ * precision in the estimation (comparing to 64bit platform w/o reordering).
+ *
+ * We are safe on 64bit machine.
+ */
+#ifdef CONFIG_64BIT
+#define em_estimate_energy(cost, sum_util, scale_cpu) \
+ (((cost) * (sum_util)) / (scale_cpu))
+#else
+#define em_estimate_energy(cost, sum_util, scale_cpu) \
+ (((cost) / (scale_cpu)) * (sum_util))
+#endif
+
+struct em_data_callback {
+ /**
+ * active_power() - Provide power at the next performance state of
+ * a device
+ * @dev : Device for which we do this operation (can be a CPU)
+ * @power : Active power at the performance state
+ * (modified)
+ * @freq : Frequency at the performance state in kHz
+ * (modified)
+ *
+ * active_power() must find the lowest performance state of 'dev' above
+ * 'freq' and update 'power' and 'freq' to the matching active power
+ * and frequency.
+ *
+ * In case of CPUs, the power is the one of a single CPU in the domain,
+ * expressed in micro-Watts or an abstract scale. It is expected to
+ * fit in the [0, EM_MAX_POWER] range.
+ *
+ * Return 0 on success.
+ */
+ int (*active_power)(struct device *dev, unsigned long *power,
+ unsigned long *freq);
+
+ /**
+ * get_cost() - Provide the cost at the given performance state of
+ * a device
+ * @dev : Device for which we do this operation (can be a CPU)
+ * @freq : Frequency at the performance state in kHz
+ * @cost : The cost value for the performance state
+ * (modified)
+ *
+ * In case of CPUs, the cost is the one of a single CPU in the domain.
+ * It is expected to fit in the [0, EM_MAX_POWER] range due to internal
+ * usage in EAS calculation.
+ *
+ * Return 0 on success, or appropriate error value in case of failure.
+ */
+ int (*get_cost)(struct device *dev, unsigned long freq,
+ unsigned long *cost);
+};
+#define EM_SET_ACTIVE_POWER_CB(em_cb, cb) ((em_cb).active_power = cb)
+#define EM_ADV_DATA_CB(_active_power_cb, _cost_cb) \
+ { .active_power = _active_power_cb, \
+ .get_cost = _cost_cb }
+#define EM_DATA_CB(_active_power_cb) \
+ EM_ADV_DATA_CB(_active_power_cb, NULL)
+
+struct em_perf_domain *em_cpu_get(int cpu);
+struct em_perf_domain *em_pd_get(struct device *dev);
+int em_dev_register_perf_domain(struct device *dev, unsigned int nr_states,
+ struct em_data_callback *cb, cpumask_t *span,
+ bool microwatts);
+void em_dev_unregister_perf_domain(struct device *dev);
+
+/**
+ * em_pd_get_efficient_state() - Get an efficient performance state from the EM
+ * @pd : Performance domain for which we want an efficient frequency
+ * @freq : Frequency to map with the EM
+ *
+ * It is called from the scheduler code quite frequently and as a consequence
+ * doesn't implement any check.
+ *
+ * Return: An efficient performance state, high enough to meet @freq
+ * requirement.
+ */
+static inline
+struct em_perf_state *em_pd_get_efficient_state(struct em_perf_domain *pd,
+ unsigned long freq)
+{
+ struct em_perf_state *ps;
+ int i;
+
+ for (i = 0; i < pd->nr_perf_states; i++) {
+ ps = &pd->table[i];
+ if (ps->frequency >= freq) {
+ if (pd->flags & EM_PERF_DOMAIN_SKIP_INEFFICIENCIES &&
+ ps->flags & EM_PERF_STATE_INEFFICIENT)
+ continue;
+ break;
+ }
+ }
+
+ return ps;
+}
+
+/**
+ * em_cpu_energy() - Estimates the energy consumed by the CPUs of a
+ * performance domain
+ * @pd : performance domain for which energy has to be estimated
+ * @max_util : highest utilization among CPUs of the domain
+ * @sum_util : sum of the utilization of all CPUs in the domain
+ * @allowed_cpu_cap : maximum allowed CPU capacity for the @pd, which
+ * might reflect reduced frequency (due to thermal)
+ *
+ * This function must be used only for CPU devices. There is no validation,
+ * i.e. if the EM is a CPU type and has cpumask allocated. It is called from
+ * the scheduler code quite frequently and that is why there is not checks.
+ *
+ * Return: the sum of the energy consumed by the CPUs of the domain assuming
+ * a capacity state satisfying the max utilization of the domain.
+ */
+static inline unsigned long em_cpu_energy(struct em_perf_domain *pd,
+ unsigned long max_util, unsigned long sum_util,
+ unsigned long allowed_cpu_cap)
+{
+ unsigned long freq, scale_cpu;
+ struct em_perf_state *ps;
+ int cpu;
+
+ if (!sum_util)
+ return 0;
+
+ /*
+ * In order to predict the performance state, map the utilization of
+ * the most utilized CPU of the performance domain to a requested
+ * frequency, like schedutil. Take also into account that the real
+ * frequency might be set lower (due to thermal capping). Thus, clamp
+ * max utilization to the allowed CPU capacity before calculating
+ * effective frequency.
+ */
+ cpu = cpumask_first(to_cpumask(pd->cpus));
+ scale_cpu = arch_scale_cpu_capacity(cpu);
+ ps = &pd->table[pd->nr_perf_states - 1];
+
+ max_util = map_util_perf(max_util);
+ max_util = min(max_util, allowed_cpu_cap);
+ freq = map_util_freq(max_util, ps->frequency, scale_cpu);
+
+ /*
+ * Find the lowest performance state of the Energy Model above the
+ * requested frequency.
+ */
+ ps = em_pd_get_efficient_state(pd, freq);
+
+ /*
+ * The capacity of a CPU in the domain at the performance state (ps)
+ * can be computed as:
+ *
+ * ps->freq * scale_cpu
+ * ps->cap = -------------------- (1)
+ * cpu_max_freq
+ *
+ * So, ignoring the costs of idle states (which are not available in
+ * the EM), the energy consumed by this CPU at that performance state
+ * is estimated as:
+ *
+ * ps->power * cpu_util
+ * cpu_nrg = -------------------- (2)
+ * ps->cap
+ *
+ * since 'cpu_util / ps->cap' represents its percentage of busy time.
+ *
+ * NOTE: Although the result of this computation actually is in
+ * units of power, it can be manipulated as an energy value
+ * over a scheduling period, since it is assumed to be
+ * constant during that interval.
+ *
+ * By injecting (1) in (2), 'cpu_nrg' can be re-expressed as a product
+ * of two terms:
+ *
+ * ps->power * cpu_max_freq cpu_util
+ * cpu_nrg = ------------------------ * --------- (3)
+ * ps->freq scale_cpu
+ *
+ * The first term is static, and is stored in the em_perf_state struct
+ * as 'ps->cost'.
+ *
+ * Since all CPUs of the domain have the same micro-architecture, they
+ * share the same 'ps->cost', and the same CPU capacity. Hence, the
+ * total energy of the domain (which is the simple sum of the energy of
+ * all of its CPUs) can be factorized as:
+ *
+ * ps->cost * \Sum cpu_util
+ * pd_nrg = ------------------------ (4)
+ * scale_cpu
+ */
+ return em_estimate_energy(ps->cost, sum_util, scale_cpu);
+}
+
+/**
+ * em_pd_nr_perf_states() - Get the number of performance states of a perf.
+ * domain
+ * @pd : performance domain for which this must be done
+ *
+ * Return: the number of performance states in the performance domain table
+ */
+static inline int em_pd_nr_perf_states(struct em_perf_domain *pd)
+{
+ return pd->nr_perf_states;
+}
+
+#else
+struct em_data_callback {};
+#define EM_ADV_DATA_CB(_active_power_cb, _cost_cb) { }
+#define EM_DATA_CB(_active_power_cb) { }
+#define EM_SET_ACTIVE_POWER_CB(em_cb, cb) do { } while (0)
+
+static inline
+int em_dev_register_perf_domain(struct device *dev, unsigned int nr_states,
+ struct em_data_callback *cb, cpumask_t *span,
+ bool microwatts)
+{
+ return -EINVAL;
+}
+static inline void em_dev_unregister_perf_domain(struct device *dev)
+{
+}
+static inline struct em_perf_domain *em_cpu_get(int cpu)
+{
+ return NULL;
+}
+static inline struct em_perf_domain *em_pd_get(struct device *dev)
+{
+ return NULL;
+}
+static inline unsigned long em_cpu_energy(struct em_perf_domain *pd,
+ unsigned long max_util, unsigned long sum_util,
+ unsigned long allowed_cpu_cap)
+{
+ return 0;
+}
+static inline int em_pd_nr_perf_states(struct em_perf_domain *pd)
+{
+ return 0;
+}
+#endif
+
+#endif
diff --git a/include/linux/entry-common.h b/include/linux/entry-common.h
new file mode 100644
index 000000000000..d95ab85f96ba
--- /dev/null
+++ b/include/linux/entry-common.h
@@ -0,0 +1,468 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __LINUX_ENTRYCOMMON_H
+#define __LINUX_ENTRYCOMMON_H
+
+#include <linux/static_call_types.h>
+#include <linux/ptrace.h>
+#include <linux/syscalls.h>
+#include <linux/seccomp.h>
+#include <linux/sched.h>
+
+#include <asm/entry-common.h>
+
+/*
+ * Define dummy _TIF work flags if not defined by the architecture or for
+ * disabled functionality.
+ */
+#ifndef _TIF_PATCH_PENDING
+# define _TIF_PATCH_PENDING (0)
+#endif
+
+#ifndef _TIF_UPROBE
+# define _TIF_UPROBE (0)
+#endif
+
+/*
+ * SYSCALL_WORK flags handled in syscall_enter_from_user_mode()
+ */
+#ifndef ARCH_SYSCALL_WORK_ENTER
+# define ARCH_SYSCALL_WORK_ENTER (0)
+#endif
+
+/*
+ * SYSCALL_WORK flags handled in syscall_exit_to_user_mode()
+ */
+#ifndef ARCH_SYSCALL_WORK_EXIT
+# define ARCH_SYSCALL_WORK_EXIT (0)
+#endif
+
+#define SYSCALL_WORK_ENTER (SYSCALL_WORK_SECCOMP | \
+ SYSCALL_WORK_SYSCALL_TRACEPOINT | \
+ SYSCALL_WORK_SYSCALL_TRACE | \
+ SYSCALL_WORK_SYSCALL_EMU | \
+ SYSCALL_WORK_SYSCALL_AUDIT | \
+ SYSCALL_WORK_SYSCALL_USER_DISPATCH | \
+ ARCH_SYSCALL_WORK_ENTER)
+#define SYSCALL_WORK_EXIT (SYSCALL_WORK_SYSCALL_TRACEPOINT | \
+ SYSCALL_WORK_SYSCALL_TRACE | \
+ SYSCALL_WORK_SYSCALL_AUDIT | \
+ SYSCALL_WORK_SYSCALL_USER_DISPATCH | \
+ SYSCALL_WORK_SYSCALL_EXIT_TRAP | \
+ ARCH_SYSCALL_WORK_EXIT)
+
+/*
+ * TIF flags handled in exit_to_user_mode_loop()
+ */
+#ifndef ARCH_EXIT_TO_USER_MODE_WORK
+# define ARCH_EXIT_TO_USER_MODE_WORK (0)
+#endif
+
+#define EXIT_TO_USER_MODE_WORK \
+ (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_UPROBE | \
+ _TIF_NEED_RESCHED | _TIF_PATCH_PENDING | _TIF_NOTIFY_SIGNAL | \
+ ARCH_EXIT_TO_USER_MODE_WORK)
+
+/**
+ * arch_enter_from_user_mode - Architecture specific sanity check for user mode regs
+ * @regs: Pointer to currents pt_regs
+ *
+ * Defaults to an empty implementation. Can be replaced by architecture
+ * specific code.
+ *
+ * Invoked from syscall_enter_from_user_mode() in the non-instrumentable
+ * section. Use __always_inline so the compiler cannot push it out of line
+ * and make it instrumentable.
+ */
+static __always_inline void arch_enter_from_user_mode(struct pt_regs *regs);
+
+#ifndef arch_enter_from_user_mode
+static __always_inline void arch_enter_from_user_mode(struct pt_regs *regs) {}
+#endif
+
+/**
+ * enter_from_user_mode - Establish state when coming from user mode
+ *
+ * Syscall/interrupt entry disables interrupts, but user mode is traced as
+ * interrupts enabled. Also with NO_HZ_FULL RCU might be idle.
+ *
+ * 1) Tell lockdep that interrupts are disabled
+ * 2) Invoke context tracking if enabled to reactivate RCU
+ * 3) Trace interrupts off state
+ *
+ * Invoked from architecture specific syscall entry code with interrupts
+ * disabled. The calling code has to be non-instrumentable. When the
+ * function returns all state is correct and interrupts are still
+ * disabled. The subsequent functions can be instrumented.
+ *
+ * This is invoked when there is architecture specific functionality to be
+ * done between establishing state and enabling interrupts. The caller must
+ * enable interrupts before invoking syscall_enter_from_user_mode_work().
+ */
+void enter_from_user_mode(struct pt_regs *regs);
+
+/**
+ * syscall_enter_from_user_mode_prepare - Establish state and enable interrupts
+ * @regs: Pointer to currents pt_regs
+ *
+ * Invoked from architecture specific syscall entry code with interrupts
+ * disabled. The calling code has to be non-instrumentable. When the
+ * function returns all state is correct, interrupts are enabled and the
+ * subsequent functions can be instrumented.
+ *
+ * This handles lockdep, RCU (context tracking) and tracing state, i.e.
+ * the functionality provided by enter_from_user_mode().
+ *
+ * This is invoked when there is extra architecture specific functionality
+ * to be done between establishing state and handling user mode entry work.
+ */
+void syscall_enter_from_user_mode_prepare(struct pt_regs *regs);
+
+/**
+ * syscall_enter_from_user_mode_work - Check and handle work before invoking
+ * a syscall
+ * @regs: Pointer to currents pt_regs
+ * @syscall: The syscall number
+ *
+ * Invoked from architecture specific syscall entry code with interrupts
+ * enabled after invoking syscall_enter_from_user_mode_prepare() and extra
+ * architecture specific work.
+ *
+ * Returns: The original or a modified syscall number
+ *
+ * If the returned syscall number is -1 then the syscall should be
+ * skipped. In this case the caller may invoke syscall_set_error() or
+ * syscall_set_return_value() first. If neither of those are called and -1
+ * is returned, then the syscall will fail with ENOSYS.
+ *
+ * It handles the following work items:
+ *
+ * 1) syscall_work flag dependent invocations of
+ * ptrace_report_syscall_entry(), __secure_computing(), trace_sys_enter()
+ * 2) Invocation of audit_syscall_entry()
+ */
+long syscall_enter_from_user_mode_work(struct pt_regs *regs, long syscall);
+
+/**
+ * syscall_enter_from_user_mode - Establish state and check and handle work
+ * before invoking a syscall
+ * @regs: Pointer to currents pt_regs
+ * @syscall: The syscall number
+ *
+ * Invoked from architecture specific syscall entry code with interrupts
+ * disabled. The calling code has to be non-instrumentable. When the
+ * function returns all state is correct, interrupts are enabled and the
+ * subsequent functions can be instrumented.
+ *
+ * This is combination of syscall_enter_from_user_mode_prepare() and
+ * syscall_enter_from_user_mode_work().
+ *
+ * Returns: The original or a modified syscall number. See
+ * syscall_enter_from_user_mode_work() for further explanation.
+ */
+long syscall_enter_from_user_mode(struct pt_regs *regs, long syscall);
+
+/**
+ * local_irq_enable_exit_to_user - Exit to user variant of local_irq_enable()
+ * @ti_work: Cached TIF flags gathered with interrupts disabled
+ *
+ * Defaults to local_irq_enable(). Can be supplied by architecture specific
+ * code.
+ */
+static inline void local_irq_enable_exit_to_user(unsigned long ti_work);
+
+#ifndef local_irq_enable_exit_to_user
+static inline void local_irq_enable_exit_to_user(unsigned long ti_work)
+{
+ local_irq_enable();
+}
+#endif
+
+/**
+ * local_irq_disable_exit_to_user - Exit to user variant of local_irq_disable()
+ *
+ * Defaults to local_irq_disable(). Can be supplied by architecture specific
+ * code.
+ */
+static inline void local_irq_disable_exit_to_user(void);
+
+#ifndef local_irq_disable_exit_to_user
+static inline void local_irq_disable_exit_to_user(void)
+{
+ local_irq_disable();
+}
+#endif
+
+/**
+ * arch_exit_to_user_mode_work - Architecture specific TIF work for exit
+ * to user mode.
+ * @regs: Pointer to currents pt_regs
+ * @ti_work: Cached TIF flags gathered with interrupts disabled
+ *
+ * Invoked from exit_to_user_mode_loop() with interrupt enabled
+ *
+ * Defaults to NOOP. Can be supplied by architecture specific code.
+ */
+static inline void arch_exit_to_user_mode_work(struct pt_regs *regs,
+ unsigned long ti_work);
+
+#ifndef arch_exit_to_user_mode_work
+static inline void arch_exit_to_user_mode_work(struct pt_regs *regs,
+ unsigned long ti_work)
+{
+}
+#endif
+
+/**
+ * arch_exit_to_user_mode_prepare - Architecture specific preparation for
+ * exit to user mode.
+ * @regs: Pointer to currents pt_regs
+ * @ti_work: Cached TIF flags gathered with interrupts disabled
+ *
+ * Invoked from exit_to_user_mode_prepare() with interrupt disabled as the last
+ * function before return. Defaults to NOOP.
+ */
+static inline void arch_exit_to_user_mode_prepare(struct pt_regs *regs,
+ unsigned long ti_work);
+
+#ifndef arch_exit_to_user_mode_prepare
+static inline void arch_exit_to_user_mode_prepare(struct pt_regs *regs,
+ unsigned long ti_work)
+{
+}
+#endif
+
+/**
+ * arch_exit_to_user_mode - Architecture specific final work before
+ * exit to user mode.
+ *
+ * Invoked from exit_to_user_mode() with interrupt disabled as the last
+ * function before return. Defaults to NOOP.
+ *
+ * This needs to be __always_inline because it is non-instrumentable code
+ * invoked after context tracking switched to user mode.
+ *
+ * An architecture implementation must not do anything complex, no locking
+ * etc. The main purpose is for speculation mitigations.
+ */
+static __always_inline void arch_exit_to_user_mode(void);
+
+#ifndef arch_exit_to_user_mode
+static __always_inline void arch_exit_to_user_mode(void) { }
+#endif
+
+/**
+ * arch_do_signal_or_restart - Architecture specific signal delivery function
+ * @regs: Pointer to currents pt_regs
+ *
+ * Invoked from exit_to_user_mode_loop().
+ */
+void arch_do_signal_or_restart(struct pt_regs *regs);
+
+/**
+ * exit_to_user_mode - Fixup state when exiting to user mode
+ *
+ * Syscall/interrupt exit enables interrupts, but the kernel state is
+ * interrupts disabled when this is invoked. Also tell RCU about it.
+ *
+ * 1) Trace interrupts on state
+ * 2) Invoke context tracking if enabled to adjust RCU state
+ * 3) Invoke architecture specific last minute exit code, e.g. speculation
+ * mitigations, etc.: arch_exit_to_user_mode()
+ * 4) Tell lockdep that interrupts are enabled
+ *
+ * Invoked from architecture specific code when syscall_exit_to_user_mode()
+ * is not suitable as the last step before returning to userspace. Must be
+ * invoked with interrupts disabled and the caller must be
+ * non-instrumentable.
+ * The caller has to invoke syscall_exit_to_user_mode_work() before this.
+ */
+void exit_to_user_mode(void);
+
+/**
+ * syscall_exit_to_user_mode_work - Handle work before returning to user mode
+ * @regs: Pointer to currents pt_regs
+ *
+ * Same as step 1 and 2 of syscall_exit_to_user_mode() but without calling
+ * exit_to_user_mode() to perform the final transition to user mode.
+ *
+ * Calling convention is the same as for syscall_exit_to_user_mode() and it
+ * returns with all work handled and interrupts disabled. The caller must
+ * invoke exit_to_user_mode() before actually switching to user mode to
+ * make the final state transitions. Interrupts must stay disabled between
+ * return from this function and the invocation of exit_to_user_mode().
+ */
+void syscall_exit_to_user_mode_work(struct pt_regs *regs);
+
+/**
+ * syscall_exit_to_user_mode - Handle work before returning to user mode
+ * @regs: Pointer to currents pt_regs
+ *
+ * Invoked with interrupts enabled and fully valid regs. Returns with all
+ * work handled, interrupts disabled such that the caller can immediately
+ * switch to user mode. Called from architecture specific syscall and ret
+ * from fork code.
+ *
+ * The call order is:
+ * 1) One-time syscall exit work:
+ * - rseq syscall exit
+ * - audit
+ * - syscall tracing
+ * - ptrace (single stepping)
+ *
+ * 2) Preparatory work
+ * - Exit to user mode loop (common TIF handling). Invokes
+ * arch_exit_to_user_mode_work() for architecture specific TIF work
+ * - Architecture specific one time work arch_exit_to_user_mode_prepare()
+ * - Address limit and lockdep checks
+ *
+ * 3) Final transition (lockdep, tracing, context tracking, RCU), i.e. the
+ * functionality in exit_to_user_mode().
+ *
+ * This is a combination of syscall_exit_to_user_mode_work() (1,2) and
+ * exit_to_user_mode(). This function is preferred unless there is a
+ * compelling architectural reason to use the separate functions.
+ */
+void syscall_exit_to_user_mode(struct pt_regs *regs);
+
+/**
+ * irqentry_enter_from_user_mode - Establish state before invoking the irq handler
+ * @regs: Pointer to currents pt_regs
+ *
+ * Invoked from architecture specific entry code with interrupts disabled.
+ * Can only be called when the interrupt entry came from user mode. The
+ * calling code must be non-instrumentable. When the function returns all
+ * state is correct and the subsequent functions can be instrumented.
+ *
+ * The function establishes state (lockdep, RCU (context tracking), tracing)
+ */
+void irqentry_enter_from_user_mode(struct pt_regs *regs);
+
+/**
+ * irqentry_exit_to_user_mode - Interrupt exit work
+ * @regs: Pointer to current's pt_regs
+ *
+ * Invoked with interrupts disabled and fully valid regs. Returns with all
+ * work handled, interrupts disabled such that the caller can immediately
+ * switch to user mode. Called from architecture specific interrupt
+ * handling code.
+ *
+ * The call order is #2 and #3 as described in syscall_exit_to_user_mode().
+ * Interrupt exit is not invoking #1 which is the syscall specific one time
+ * work.
+ */
+void irqentry_exit_to_user_mode(struct pt_regs *regs);
+
+#ifndef irqentry_state
+/**
+ * struct irqentry_state - Opaque object for exception state storage
+ * @exit_rcu: Used exclusively in the irqentry_*() calls; signals whether the
+ * exit path has to invoke ct_irq_exit().
+ * @lockdep: Used exclusively in the irqentry_nmi_*() calls; ensures that
+ * lockdep state is restored correctly on exit from nmi.
+ *
+ * This opaque object is filled in by the irqentry_*_enter() functions and
+ * must be passed back into the corresponding irqentry_*_exit() functions
+ * when the exception is complete.
+ *
+ * Callers of irqentry_*_[enter|exit]() must consider this structure opaque
+ * and all members private. Descriptions of the members are provided to aid in
+ * the maintenance of the irqentry_*() functions.
+ */
+typedef struct irqentry_state {
+ union {
+ bool exit_rcu;
+ bool lockdep;
+ };
+} irqentry_state_t;
+#endif
+
+/**
+ * irqentry_enter - Handle state tracking on ordinary interrupt entries
+ * @regs: Pointer to pt_regs of interrupted context
+ *
+ * Invokes:
+ * - lockdep irqflag state tracking as low level ASM entry disabled
+ * interrupts.
+ *
+ * - Context tracking if the exception hit user mode.
+ *
+ * - The hardirq tracer to keep the state consistent as low level ASM
+ * entry disabled interrupts.
+ *
+ * As a precondition, this requires that the entry came from user mode,
+ * idle, or a kernel context in which RCU is watching.
+ *
+ * For kernel mode entries RCU handling is done conditional. If RCU is
+ * watching then the only RCU requirement is to check whether the tick has
+ * to be restarted. If RCU is not watching then ct_irq_enter() has to be
+ * invoked on entry and ct_irq_exit() on exit.
+ *
+ * Avoiding the ct_irq_enter/exit() calls is an optimization but also
+ * solves the problem of kernel mode pagefaults which can schedule, which
+ * is not possible after invoking ct_irq_enter() without undoing it.
+ *
+ * For user mode entries irqentry_enter_from_user_mode() is invoked to
+ * establish the proper context for NOHZ_FULL. Otherwise scheduling on exit
+ * would not be possible.
+ *
+ * Returns: An opaque object that must be passed to idtentry_exit()
+ */
+irqentry_state_t noinstr irqentry_enter(struct pt_regs *regs);
+
+/**
+ * irqentry_exit_cond_resched - Conditionally reschedule on return from interrupt
+ *
+ * Conditional reschedule with additional sanity checks.
+ */
+void raw_irqentry_exit_cond_resched(void);
+#ifdef CONFIG_PREEMPT_DYNAMIC
+#if defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL)
+#define irqentry_exit_cond_resched_dynamic_enabled raw_irqentry_exit_cond_resched
+#define irqentry_exit_cond_resched_dynamic_disabled NULL
+DECLARE_STATIC_CALL(irqentry_exit_cond_resched, raw_irqentry_exit_cond_resched);
+#define irqentry_exit_cond_resched() static_call(irqentry_exit_cond_resched)()
+#elif defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY)
+DECLARE_STATIC_KEY_TRUE(sk_dynamic_irqentry_exit_cond_resched);
+void dynamic_irqentry_exit_cond_resched(void);
+#define irqentry_exit_cond_resched() dynamic_irqentry_exit_cond_resched()
+#endif
+#else /* CONFIG_PREEMPT_DYNAMIC */
+#define irqentry_exit_cond_resched() raw_irqentry_exit_cond_resched()
+#endif /* CONFIG_PREEMPT_DYNAMIC */
+
+/**
+ * irqentry_exit - Handle return from exception that used irqentry_enter()
+ * @regs: Pointer to pt_regs (exception entry regs)
+ * @state: Return value from matching call to irqentry_enter()
+ *
+ * Depending on the return target (kernel/user) this runs the necessary
+ * preemption and work checks if possible and required and returns to
+ * the caller with interrupts disabled and no further work pending.
+ *
+ * This is the last action before returning to the low level ASM code which
+ * just needs to return to the appropriate context.
+ *
+ * Counterpart to irqentry_enter().
+ */
+void noinstr irqentry_exit(struct pt_regs *regs, irqentry_state_t state);
+
+/**
+ * irqentry_nmi_enter - Handle NMI entry
+ * @regs: Pointer to currents pt_regs
+ *
+ * Similar to irqentry_enter() but taking care of the NMI constraints.
+ */
+irqentry_state_t noinstr irqentry_nmi_enter(struct pt_regs *regs);
+
+/**
+ * irqentry_nmi_exit - Handle return from NMI handling
+ * @regs: Pointer to pt_regs (NMI entry regs)
+ * @irq_state: Return value from matching call to irqentry_nmi_enter()
+ *
+ * Last action before returning to the low level assembly code.
+ *
+ * Counterpart to irqentry_nmi_enter().
+ */
+void noinstr irqentry_nmi_exit(struct pt_regs *regs, irqentry_state_t irq_state);
+
+#endif
diff --git a/include/linux/entry-kvm.h b/include/linux/entry-kvm.h
new file mode 100644
index 000000000000..6813171afccb
--- /dev/null
+++ b/include/linux/entry-kvm.h
@@ -0,0 +1,99 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __LINUX_ENTRYKVM_H
+#define __LINUX_ENTRYKVM_H
+
+#include <linux/static_call_types.h>
+#include <linux/resume_user_mode.h>
+#include <linux/syscalls.h>
+#include <linux/seccomp.h>
+#include <linux/sched.h>
+#include <linux/tick.h>
+
+/* Transfer to guest mode work */
+#ifdef CONFIG_KVM_XFER_TO_GUEST_WORK
+
+#ifndef ARCH_XFER_TO_GUEST_MODE_WORK
+# define ARCH_XFER_TO_GUEST_MODE_WORK (0)
+#endif
+
+#define XFER_TO_GUEST_MODE_WORK \
+ (_TIF_NEED_RESCHED | _TIF_SIGPENDING | _TIF_NOTIFY_SIGNAL | \
+ _TIF_NOTIFY_RESUME | ARCH_XFER_TO_GUEST_MODE_WORK)
+
+struct kvm_vcpu;
+
+/**
+ * arch_xfer_to_guest_mode_handle_work - Architecture specific xfer to guest
+ * mode work handling function.
+ * @vcpu: Pointer to current's VCPU data
+ * @ti_work: Cached TIF flags gathered in xfer_to_guest_mode_handle_work()
+ *
+ * Invoked from xfer_to_guest_mode_handle_work(). Defaults to NOOP. Can be
+ * replaced by architecture specific code.
+ */
+static inline int arch_xfer_to_guest_mode_handle_work(struct kvm_vcpu *vcpu,
+ unsigned long ti_work);
+
+#ifndef arch_xfer_to_guest_mode_work
+static inline int arch_xfer_to_guest_mode_handle_work(struct kvm_vcpu *vcpu,
+ unsigned long ti_work)
+{
+ return 0;
+}
+#endif
+
+/**
+ * xfer_to_guest_mode_handle_work - Check and handle pending work which needs
+ * to be handled before going to guest mode
+ * @vcpu: Pointer to current's VCPU data
+ *
+ * Returns: 0 or an error code
+ */
+int xfer_to_guest_mode_handle_work(struct kvm_vcpu *vcpu);
+
+/**
+ * xfer_to_guest_mode_prepare - Perform last minute preparation work that
+ * need to be handled while IRQs are disabled
+ * upon entering to guest.
+ *
+ * Has to be invoked with interrupts disabled before the last call
+ * to xfer_to_guest_mode_work_pending().
+ */
+static inline void xfer_to_guest_mode_prepare(void)
+{
+ lockdep_assert_irqs_disabled();
+ tick_nohz_user_enter_prepare();
+}
+
+/**
+ * __xfer_to_guest_mode_work_pending - Check if work is pending
+ *
+ * Returns: True if work pending, False otherwise.
+ *
+ * Bare variant of xfer_to_guest_mode_work_pending(). Can be called from
+ * interrupt enabled code for racy quick checks with care.
+ */
+static inline bool __xfer_to_guest_mode_work_pending(void)
+{
+ unsigned long ti_work = read_thread_flags();
+
+ return !!(ti_work & XFER_TO_GUEST_MODE_WORK);
+}
+
+/**
+ * xfer_to_guest_mode_work_pending - Check if work is pending which needs to be
+ * handled before returning to guest mode
+ *
+ * Returns: True if work pending, False otherwise.
+ *
+ * Has to be invoked with interrupts disabled before the transition to
+ * guest mode.
+ */
+static inline bool xfer_to_guest_mode_work_pending(void)
+{
+ lockdep_assert_irqs_disabled();
+ return __xfer_to_guest_mode_work_pending();
+}
+#endif /* CONFIG_KVM_XFER_TO_GUEST_WORK */
+
+#endif
diff --git a/include/linux/err.h b/include/linux/err.h
index 1e3558845e4c..a139c64aef2a 100644
--- a/include/linux/err.h
+++ b/include/linux/err.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_ERR_H
#define _LINUX_ERR_H
@@ -61,9 +62,6 @@ static inline int __must_check PTR_ERR_OR_ZERO(__force const void *ptr)
return 0;
}
-/* Deprecated */
-#define PTR_RET(p) PTR_ERR_OR_ZERO(p)
-
#endif
#endif /* _LINUX_ERR_H */
diff --git a/include/linux/errname.h b/include/linux/errname.h
new file mode 100644
index 000000000000..e8576ad90cb7
--- /dev/null
+++ b/include/linux/errname.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_ERRNAME_H
+#define _LINUX_ERRNAME_H
+
+#include <linux/stddef.h>
+
+#ifdef CONFIG_SYMBOLIC_ERRNAME
+const char *errname(int err);
+#else
+static inline const char *errname(int err)
+{
+ return NULL;
+}
+#endif
+
+#endif /* _LINUX_ERRNAME_H */
diff --git a/include/linux/errno.h b/include/linux/errno.h
index 7ce9fb1b7d28..8b0c754bab02 100644
--- a/include/linux/errno.h
+++ b/include/linux/errno.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_ERRNO_H
#define _LINUX_ERRNO_H
@@ -17,6 +18,7 @@
#define ERESTART_RESTARTBLOCK 516 /* restart by calling sys_restart_syscall */
#define EPROBE_DEFER 517 /* Driver requests probe retry */
#define EOPENSTALE 518 /* open found a stale dentry */
+#define ENOPARAM 519 /* Parameter not supported */
/* Defined for the NFSv3 protocol */
#define EBADHANDLE 521 /* Illegal NFS file handle */
@@ -29,5 +31,6 @@
#define EJUKEBOX 528 /* Request initiated, but will not complete before timeout */
#define EIOCBQUEUED 529 /* iocb queued, will get completion event */
#define ERECALLCONFLICT 530 /* conflict with recalled state */
+#define ENOGRACE 531 /* NFS file lock reclaim refused */
#endif
diff --git a/include/linux/error-injection.h b/include/linux/error-injection.h
new file mode 100644
index 000000000000..20e738f4eae8
--- /dev/null
+++ b/include/linux/error-injection.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_ERROR_INJECTION_H
+#define _LINUX_ERROR_INJECTION_H
+
+#include <linux/compiler.h>
+#include <linux/errno.h>
+#include <asm-generic/error-injection.h>
+
+#ifdef CONFIG_FUNCTION_ERROR_INJECTION
+
+extern bool within_error_injection_list(unsigned long addr);
+extern int get_injectable_error_type(unsigned long addr);
+
+#else /* !CONFIG_FUNCTION_ERROR_INJECTION */
+
+static inline bool within_error_injection_list(unsigned long addr)
+{
+ return false;
+}
+
+static inline int get_injectable_error_type(unsigned long addr)
+{
+ return -EOPNOTSUPP;
+}
+
+#endif
+
+#endif /* _LINUX_ERROR_INJECTION_H */
diff --git a/include/linux/errqueue.h b/include/linux/errqueue.h
index 6fdfc884fdeb..be1cf7291d6c 100644
--- a/include/linux/errqueue.h
+++ b/include/linux/errqueue.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_ERRQUEUE_H
#define _LINUX_ERRQUEUE_H 1
diff --git a/include/linux/errseq.h b/include/linux/errseq.h
index 9e0d444ac88d..fc2777770768 100644
--- a/include/linux/errseq.h
+++ b/include/linux/errseq.h
@@ -1,18 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * See Documentation/core-api/errseq.rst and lib/errseq.c
+ */
#ifndef _LINUX_ERRSEQ_H
#define _LINUX_ERRSEQ_H
-/* See lib/errseq.c for more info */
-
typedef u32 errseq_t;
-errseq_t __errseq_set(errseq_t *eseq, int err);
-static inline void errseq_set(errseq_t *eseq, int err)
-{
- /* Optimize for the common case of no error */
- if (unlikely(err))
- __errseq_set(eseq, err);
-}
-
+errseq_t errseq_set(errseq_t *eseq, int err);
errseq_t errseq_sample(errseq_t *eseq);
int errseq_check(errseq_t *eseq, errseq_t since);
int errseq_check_and_advance(errseq_t *eseq, errseq_t *since);
diff --git a/include/linux/etherdevice.h b/include/linux/etherdevice.h
index 2d9f80848d4b..224645f17c33 100644
--- a/include/linux/etherdevice.h
+++ b/include/linux/etherdevice.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* INET An implementation of the TCP/IP protocol suite for the LINUX
* operating system. NET is implemented using the BSD Socket
@@ -10,14 +11,8 @@
* Authors: Ross Biro
* Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
*
- * Relocated to include/linux where it belongs by Alan Cox
+ * Relocated to include/linux where it belongs by Alan Cox
* <gw4pts@gw4pts.ampr.org>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- *
*/
#ifndef _LINUX_ETHERDEVICE_H
#define _LINUX_ETHERDEVICE_H
@@ -25,14 +20,23 @@
#include <linux/if_ether.h>
#include <linux/netdevice.h>
#include <linux/random.h>
+#include <linux/crc32.h>
#include <asm/unaligned.h>
#include <asm/bitsperlong.h>
#ifdef __KERNEL__
struct device;
+struct fwnode_handle;
+
int eth_platform_get_mac_address(struct device *dev, u8 *mac_addr);
-unsigned char *arch_get_platform_get_mac_address(void);
-u32 eth_get_headlen(void *data, unsigned int max_len);
+int platform_get_ethdev_address(struct device *dev, struct net_device *netdev);
+unsigned char *arch_get_platform_mac_address(void);
+int nvmem_get_mac_address(struct device *dev, void *addrbuf);
+int device_get_mac_address(struct device *dev, char *addr);
+int device_get_ethdev_address(struct device *dev, struct net_device *netdev);
+int fwnode_get_mac_address(struct fwnode_handle *fwnode, char *addr);
+
+u32 eth_get_headlen(const struct net_device *dev, const void *data, u32 len);
__be16 eth_type_trans(struct sk_buff *skb, struct net_device *dev);
extern const struct header_ops eth_header_ops;
@@ -43,10 +47,10 @@ int eth_header_cache(const struct neighbour *neigh, struct hh_cache *hh,
__be16 type);
void eth_header_cache_update(struct hh_cache *hh, const struct net_device *dev,
const unsigned char *haddr);
+__be16 eth_header_parse_protocol(const struct sk_buff *skb);
int eth_prepare_mac_addr_change(struct net_device *dev, void *p);
void eth_commit_mac_addr_change(struct net_device *dev, void *p);
int eth_mac_addr(struct net_device *dev, void *p);
-int eth_change_mtu(struct net_device *dev, int new_mtu);
int eth_validate_addr(struct net_device *dev);
struct net_device *alloc_etherdev_mqs(int sizeof_priv, unsigned int txqs,
@@ -59,13 +63,13 @@ struct net_device *devm_alloc_etherdev_mqs(struct device *dev, int sizeof_priv,
unsigned int rxqs);
#define devm_alloc_etherdev(dev, sizeof_priv) devm_alloc_etherdev_mqs(dev, sizeof_priv, 1, 1)
-struct sk_buff **eth_gro_receive(struct sk_buff **head,
- struct sk_buff *skb);
+struct sk_buff *eth_gro_receive(struct list_head *head, struct sk_buff *skb);
int eth_gro_complete(struct sk_buff *skb, int nhoff);
/* Reserved Ethernet Addresses per IEEE 802.1Q */
static const u8 eth_reserved_addr_base[ETH_ALEN] __aligned(2) =
{ 0x01, 0x80, 0xc2, 0x00, 0x00, 0x00 };
+#define eth_stp_addr eth_reserved_addr_base
/**
* is_link_local_ether_addr - Determine if given Ethernet address is link-local
@@ -130,7 +134,7 @@ static inline bool is_multicast_ether_addr(const u8 *addr)
#endif
}
-static inline bool is_multicast_ether_addr_64bits(const u8 addr[6+2])
+static inline bool is_multicast_ether_addr_64bits(const u8 *addr)
{
#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64
#ifdef __BIG_ENDIAN
@@ -230,8 +234,6 @@ static inline void eth_random_addr(u8 *addr)
addr[0] |= 0x02; /* set local assignment bit (IEEE802) */
}
-#define random_ether_addr(addr) eth_random_addr(addr)
-
/**
* eth_broadcast_addr - Assign broadcast address
* @addr: Pointer to a six-byte array containing the Ethernet address
@@ -265,8 +267,22 @@ static inline void eth_zero_addr(u8 *addr)
*/
static inline void eth_hw_addr_random(struct net_device *dev)
{
+ u8 addr[ETH_ALEN];
+
+ eth_random_addr(addr);
+ __dev_addr_set(dev, addr, ETH_ALEN);
dev->addr_assign_type = NET_ADDR_RANDOM;
- eth_random_addr(dev->dev_addr);
+}
+
+/**
+ * eth_hw_addr_crc - Calculate CRC from netdev_hw_addr
+ * @ha: pointer to hardware address
+ *
+ * Calculate CRC from a hardware address as basis for filter hashes.
+ */
+static inline u32 eth_hw_addr_crc(struct netdev_hw_addr *ha)
+{
+ return ether_crc(ETH_ALEN, ha->addr);
}
/**
@@ -292,6 +308,18 @@ static inline void ether_addr_copy(u8 *dst, const u8 *src)
}
/**
+ * eth_hw_addr_set - Assign Ethernet address to a net_device
+ * @dev: pointer to net_device structure
+ * @addr: address to assign
+ *
+ * Assign given address to the net_device, addr_assign_type is not changed.
+ */
+static inline void eth_hw_addr_set(struct net_device *dev, const u8 *addr)
+{
+ __dev_addr_set(dev, addr, ETH_ALEN);
+}
+
+/**
* eth_hw_addr_inherit - Copy dev_addr from another net_device
* @dst: pointer to net_device to copy dev_addr to
* @src: pointer to net_device to copy dev_addr from
@@ -303,7 +331,7 @@ static inline void eth_hw_addr_inherit(struct net_device *dst,
struct net_device *src)
{
dst->addr_assign_type = src->addr_assign_type;
- ether_addr_copy(dst->dev_addr, src->dev_addr);
+ eth_hw_addr_set(dst, src->dev_addr);
}
/**
@@ -344,8 +372,7 @@ static inline bool ether_addr_equal(const u8 *addr1, const u8 *addr2)
* Please note that alignment of addr1 & addr2 are only guaranteed to be 16 bits.
*/
-static inline bool ether_addr_equal_64bits(const u8 addr1[6+2],
- const u8 addr2[6+2])
+static inline bool ether_addr_equal_64bits(const u8 *addr1, const u8 *addr2)
{
#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64
u64 fold = (*(const u64 *)addr1) ^ (*(const u64 *)addr2);
@@ -401,6 +428,28 @@ static inline bool ether_addr_equal_masked(const u8 *addr1, const u8 *addr2,
return true;
}
+static inline bool ether_addr_is_ipv4_mcast(const u8 *addr)
+{
+ u8 base[ETH_ALEN] = { 0x01, 0x00, 0x5e, 0x00, 0x00, 0x00 };
+ u8 mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0x80, 0x00, 0x00 };
+
+ return ether_addr_equal_masked(addr, base, mask);
+}
+
+static inline bool ether_addr_is_ipv6_mcast(const u8 *addr)
+{
+ u8 base[ETH_ALEN] = { 0x33, 0x33, 0x00, 0x00, 0x00, 0x00 };
+ u8 mask[ETH_ALEN] = { 0xff, 0xff, 0x00, 0x00, 0x00, 0x00 };
+
+ return ether_addr_equal_masked(addr, base, mask);
+}
+
+static inline bool ether_addr_is_ip_mcast(const u8 *addr)
+{
+ return ether_addr_is_ipv4_mcast(addr) ||
+ ether_addr_is_ipv6_mcast(addr);
+}
+
/**
* ether_addr_to_u64 - Convert an Ethernet address into a u64 value.
* @addr: Pointer to a six-byte array containing the Ethernet address
@@ -447,6 +496,32 @@ static inline void eth_addr_dec(u8 *addr)
}
/**
+ * eth_addr_inc() - Increment the given MAC address.
+ * @addr: Pointer to a six-byte array containing Ethernet address to increment.
+ */
+static inline void eth_addr_inc(u8 *addr)
+{
+ u64 u = ether_addr_to_u64(addr);
+
+ u++;
+ u64_to_ether_addr(u, addr);
+}
+
+/**
+ * eth_addr_add() - Add (or subtract) an offset to/from the given MAC address.
+ *
+ * @offset: Offset to add.
+ * @addr: Pointer to a six-byte array containing Ethernet address to increment.
+ */
+static inline void eth_addr_add(u8 *addr, long offset)
+{
+ u64 u = ether_addr_to_u64(addr);
+
+ u += offset;
+ u64_to_ether_addr(u, addr);
+}
+
+/**
* is_etherdev_addr - Tell if given Ethernet address belongs to the device.
* @dev: Pointer to a device structure
* @addr: Pointer to a six-byte array containing the Ethernet address
@@ -512,6 +587,27 @@ static inline unsigned long compare_ether_header(const void *a, const void *b)
}
/**
+ * eth_hw_addr_gen - Generate and assign Ethernet address to a port
+ * @dev: pointer to port's net_device structure
+ * @base_addr: base Ethernet address
+ * @id: offset to add to the base address
+ *
+ * Generate a MAC address using a base address and an offset and assign it
+ * to a net_device. Commonly used by switch drivers which need to compute
+ * addresses for all their ports. addr_assign_type is not changed.
+ */
+static inline void eth_hw_addr_gen(struct net_device *dev, const u8 *base_addr,
+ unsigned int id)
+{
+ u64 u = ether_addr_to_u64(base_addr);
+ u8 addr[ETH_ALEN];
+
+ u += id;
+ u64_to_ether_addr(u, addr);
+ eth_hw_addr_set(dev, addr);
+}
+
+/**
* eth_skb_pad - Pad buffer to mininum number of octets for Ethernet frame
* @skb: Buffer to pad
*
diff --git a/include/linux/ethtool.h b/include/linux/ethtool.h
index 83cc9863444b..62b61527bcc4 100644
--- a/include/linux/ethtool.h
+++ b/include/linux/ethtool.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* ethtool.h: Defines for Linux ethtool.
*
@@ -14,10 +15,10 @@
#include <linux/bitmap.h>
#include <linux/compat.h>
+#include <linux/if_ether.h>
+#include <linux/netlink.h>
#include <uapi/linux/ethtool.h>
-#ifdef CONFIG_COMPAT
-
struct compat_ethtool_rx_flow_spec {
u32 flow_type;
union ethtool_flow_union h_u;
@@ -34,11 +35,9 @@ struct compat_ethtool_rxnfc {
compat_u64 data;
struct compat_ethtool_rx_flow_spec fs;
u32 rule_cnt;
- u32 rule_locs[0];
+ u32 rule_locs[];
};
-#endif /* CONFIG_COMPAT */
-
#include <linux/rculist.h>
/**
@@ -69,6 +68,42 @@ enum {
ETH_RSS_HASH_FUNCS_COUNT
};
+/**
+ * struct kernel_ethtool_ringparam - RX/TX ring configuration
+ * @rx_buf_len: Current length of buffers on the rx ring.
+ * @tcp_data_split: Scatter packet headers and data to separate buffers
+ * @tx_push: The flag of tx push mode
+ * @rx_push: The flag of rx push mode
+ * @cqe_size: Size of TX/RX completion queue event
+ * @tx_push_buf_len: Size of TX push buffer
+ * @tx_push_buf_max_len: Maximum allowed size of TX push buffer
+ */
+struct kernel_ethtool_ringparam {
+ u32 rx_buf_len;
+ u8 tcp_data_split;
+ u8 tx_push;
+ u8 rx_push;
+ u32 cqe_size;
+ u32 tx_push_buf_len;
+ u32 tx_push_buf_max_len;
+};
+
+/**
+ * enum ethtool_supported_ring_param - indicator caps for setting ring params
+ * @ETHTOOL_RING_USE_RX_BUF_LEN: capture for setting rx_buf_len
+ * @ETHTOOL_RING_USE_CQE_SIZE: capture for setting cqe_size
+ * @ETHTOOL_RING_USE_TX_PUSH: capture for setting tx_push
+ * @ETHTOOL_RING_USE_RX_PUSH: capture for setting rx_push
+ * @ETHTOOL_RING_USE_TX_PUSH_BUF_LEN: capture for setting tx_push_buf_len
+ */
+enum ethtool_supported_ring_param {
+ ETHTOOL_RING_USE_RX_BUF_LEN = BIT(0),
+ ETHTOOL_RING_USE_CQE_SIZE = BIT(1),
+ ETHTOOL_RING_USE_TX_PUSH = BIT(2),
+ ETHTOOL_RING_USE_RX_PUSH = BIT(3),
+ ETHTOOL_RING_USE_TX_PUSH_BUF_LEN = BIT(4),
+};
+
#define __ETH_RSS_HASH_BIT(bit) ((u32)1 << (bit))
#define __ETH_RSS_HASH(name) __ETH_RSS_HASH_BIT(ETH_RSS_HASH_##name##_BIT)
@@ -80,10 +115,35 @@ enum {
#define ETH_RSS_HASH_NO_CHANGE 0
struct net_device;
+struct netlink_ext_ack;
-/* Some generic methods drivers may use in their ethtool_ops */
-u32 ethtool_op_get_link(struct net_device *dev);
-int ethtool_op_get_ts_info(struct net_device *dev, struct ethtool_ts_info *eti);
+/* Link extended state and substate. */
+struct ethtool_link_ext_state_info {
+ enum ethtool_link_ext_state link_ext_state;
+ union {
+ enum ethtool_link_ext_substate_autoneg autoneg;
+ enum ethtool_link_ext_substate_link_training link_training;
+ enum ethtool_link_ext_substate_link_logical_mismatch link_logical_mismatch;
+ enum ethtool_link_ext_substate_bad_signal_integrity bad_signal_integrity;
+ enum ethtool_link_ext_substate_cable_issue cable_issue;
+ enum ethtool_link_ext_substate_module module;
+ u32 __link_ext_substate;
+ };
+};
+
+struct ethtool_link_ext_stats {
+ /* Custom Linux statistic for PHY level link down events.
+ * In a simpler world it should be equal to netdev->carrier_down_count
+ * unfortunately netdev also counts local reconfigurations which don't
+ * actually take the physical link down, not to mention NC-SI which,
+ * if present, keeps the link up regardless of host state.
+ * This statistic counts when PHY _actually_ went down, or lost link.
+ *
+ * Note that we need u64 for ethtool_stats_init() and comparisons
+ * to ETHTOOL_STAT_NOT_SET, but only u32 is exposed to the user.
+ */
+ u64 link_down_events;
+};
/**
* ethtool_rxfh_indir_default - get default value for RX flow hash indirection
@@ -97,10 +157,6 @@ static inline u32 ethtool_rxfh_indir_default(u32 index, u32 n_rx_rings)
return index % n_rx_rings;
}
-/* number of link mode bits/ulongs handled internally by kernel */
-#define __ETHTOOL_LINK_MODE_MASK_NBITS \
- (__ETHTOOL_LINK_MODE_LAST + 1)
-
/* declare a link mode bitmap */
#define __ETHTOOL_DECLARE_LINK_MODE_MASK(name) \
DECLARE_BITMAP(name, __ETHTOOL_LINK_MODE_MASK_NBITS)
@@ -115,6 +171,7 @@ struct ethtool_link_ksettings {
__ETHTOOL_DECLARE_LINK_MODE_MASK(advertising);
__ETHTOOL_DECLARE_LINK_MODE_MASK(lp_advertising);
} link_modes;
+ u32 lanes;
};
/**
@@ -137,6 +194,17 @@ struct ethtool_link_ksettings {
__set_bit(ETHTOOL_LINK_MODE_ ## mode ## _BIT, (ptr)->link_modes.name)
/**
+ * ethtool_link_ksettings_del_link_mode - clear bit in link_ksettings
+ * link mode mask
+ * @ptr : pointer to struct ethtool_link_ksettings
+ * @name : one of supported/advertising/lp_advertising
+ * @mode : one of the ETHTOOL_LINK_MODE_*_BIT
+ * (not atomic, no bound checking)
+ */
+#define ethtool_link_ksettings_del_link_mode(ptr, name, mode) \
+ __clear_bit(ETHTOOL_LINK_MODE_ ## mode ## _BIT, (ptr)->link_modes.name)
+
+/**
* ethtool_link_ksettings_test_link_mode - test bit in ksettings link mode mask
* @ptr : pointer to struct ethtool_link_ksettings
* @name : one of supported/advertising/lp_advertising
@@ -152,6 +220,24 @@ extern int
__ethtool_get_link_ksettings(struct net_device *dev,
struct ethtool_link_ksettings *link_ksettings);
+struct kernel_ethtool_coalesce {
+ u8 use_cqe_mode_tx;
+ u8 use_cqe_mode_rx;
+ u32 tx_aggr_max_bytes;
+ u32 tx_aggr_max_frames;
+ u32 tx_aggr_time_usecs;
+};
+
+/**
+ * ethtool_intersect_link_masks - Given two link masks, AND them together
+ * @dst: first mask and where result is stored
+ * @src: second mask to intersect with
+ *
+ * Given two link mode masks, AND them together and save the result in dst.
+ */
+void ethtool_intersect_link_masks(struct ethtool_link_ksettings *dst,
+ struct ethtool_link_ksettings *src);
+
void ethtool_convert_legacy_u32_to_link_mode(unsigned long *dst,
u32 legacy_u32);
@@ -159,20 +245,365 @@ void ethtool_convert_legacy_u32_to_link_mode(unsigned long *dst,
bool ethtool_convert_link_mode_to_legacy_u32(u32 *legacy_u32,
const unsigned long *src);
+#define ETHTOOL_COALESCE_RX_USECS BIT(0)
+#define ETHTOOL_COALESCE_RX_MAX_FRAMES BIT(1)
+#define ETHTOOL_COALESCE_RX_USECS_IRQ BIT(2)
+#define ETHTOOL_COALESCE_RX_MAX_FRAMES_IRQ BIT(3)
+#define ETHTOOL_COALESCE_TX_USECS BIT(4)
+#define ETHTOOL_COALESCE_TX_MAX_FRAMES BIT(5)
+#define ETHTOOL_COALESCE_TX_USECS_IRQ BIT(6)
+#define ETHTOOL_COALESCE_TX_MAX_FRAMES_IRQ BIT(7)
+#define ETHTOOL_COALESCE_STATS_BLOCK_USECS BIT(8)
+#define ETHTOOL_COALESCE_USE_ADAPTIVE_RX BIT(9)
+#define ETHTOOL_COALESCE_USE_ADAPTIVE_TX BIT(10)
+#define ETHTOOL_COALESCE_PKT_RATE_LOW BIT(11)
+#define ETHTOOL_COALESCE_RX_USECS_LOW BIT(12)
+#define ETHTOOL_COALESCE_RX_MAX_FRAMES_LOW BIT(13)
+#define ETHTOOL_COALESCE_TX_USECS_LOW BIT(14)
+#define ETHTOOL_COALESCE_TX_MAX_FRAMES_LOW BIT(15)
+#define ETHTOOL_COALESCE_PKT_RATE_HIGH BIT(16)
+#define ETHTOOL_COALESCE_RX_USECS_HIGH BIT(17)
+#define ETHTOOL_COALESCE_RX_MAX_FRAMES_HIGH BIT(18)
+#define ETHTOOL_COALESCE_TX_USECS_HIGH BIT(19)
+#define ETHTOOL_COALESCE_TX_MAX_FRAMES_HIGH BIT(20)
+#define ETHTOOL_COALESCE_RATE_SAMPLE_INTERVAL BIT(21)
+#define ETHTOOL_COALESCE_USE_CQE_RX BIT(22)
+#define ETHTOOL_COALESCE_USE_CQE_TX BIT(23)
+#define ETHTOOL_COALESCE_TX_AGGR_MAX_BYTES BIT(24)
+#define ETHTOOL_COALESCE_TX_AGGR_MAX_FRAMES BIT(25)
+#define ETHTOOL_COALESCE_TX_AGGR_TIME_USECS BIT(26)
+#define ETHTOOL_COALESCE_ALL_PARAMS GENMASK(26, 0)
+
+#define ETHTOOL_COALESCE_USECS \
+ (ETHTOOL_COALESCE_RX_USECS | ETHTOOL_COALESCE_TX_USECS)
+#define ETHTOOL_COALESCE_MAX_FRAMES \
+ (ETHTOOL_COALESCE_RX_MAX_FRAMES | ETHTOOL_COALESCE_TX_MAX_FRAMES)
+#define ETHTOOL_COALESCE_USECS_IRQ \
+ (ETHTOOL_COALESCE_RX_USECS_IRQ | ETHTOOL_COALESCE_TX_USECS_IRQ)
+#define ETHTOOL_COALESCE_MAX_FRAMES_IRQ \
+ (ETHTOOL_COALESCE_RX_MAX_FRAMES_IRQ | \
+ ETHTOOL_COALESCE_TX_MAX_FRAMES_IRQ)
+#define ETHTOOL_COALESCE_USE_ADAPTIVE \
+ (ETHTOOL_COALESCE_USE_ADAPTIVE_RX | ETHTOOL_COALESCE_USE_ADAPTIVE_TX)
+#define ETHTOOL_COALESCE_USECS_LOW_HIGH \
+ (ETHTOOL_COALESCE_RX_USECS_LOW | ETHTOOL_COALESCE_TX_USECS_LOW | \
+ ETHTOOL_COALESCE_RX_USECS_HIGH | ETHTOOL_COALESCE_TX_USECS_HIGH)
+#define ETHTOOL_COALESCE_MAX_FRAMES_LOW_HIGH \
+ (ETHTOOL_COALESCE_RX_MAX_FRAMES_LOW | \
+ ETHTOOL_COALESCE_TX_MAX_FRAMES_LOW | \
+ ETHTOOL_COALESCE_RX_MAX_FRAMES_HIGH | \
+ ETHTOOL_COALESCE_TX_MAX_FRAMES_HIGH)
+#define ETHTOOL_COALESCE_PKT_RATE_RX_USECS \
+ (ETHTOOL_COALESCE_USE_ADAPTIVE_RX | \
+ ETHTOOL_COALESCE_RX_USECS_LOW | ETHTOOL_COALESCE_RX_USECS_HIGH | \
+ ETHTOOL_COALESCE_PKT_RATE_LOW | ETHTOOL_COALESCE_PKT_RATE_HIGH | \
+ ETHTOOL_COALESCE_RATE_SAMPLE_INTERVAL)
+#define ETHTOOL_COALESCE_USE_CQE \
+ (ETHTOOL_COALESCE_USE_CQE_RX | ETHTOOL_COALESCE_USE_CQE_TX)
+#define ETHTOOL_COALESCE_TX_AGGR \
+ (ETHTOOL_COALESCE_TX_AGGR_MAX_BYTES | \
+ ETHTOOL_COALESCE_TX_AGGR_MAX_FRAMES | \
+ ETHTOOL_COALESCE_TX_AGGR_TIME_USECS)
+
+#define ETHTOOL_STAT_NOT_SET (~0ULL)
+
+static inline void ethtool_stats_init(u64 *stats, unsigned int n)
+{
+ while (n--)
+ stats[n] = ETHTOOL_STAT_NOT_SET;
+}
+
+/* Basic IEEE 802.3 MAC statistics (30.3.1.1.*), not otherwise exposed
+ * via a more targeted API.
+ */
+struct ethtool_eth_mac_stats {
+ enum ethtool_mac_stats_src src;
+ struct_group(stats,
+ u64 FramesTransmittedOK;
+ u64 SingleCollisionFrames;
+ u64 MultipleCollisionFrames;
+ u64 FramesReceivedOK;
+ u64 FrameCheckSequenceErrors;
+ u64 AlignmentErrors;
+ u64 OctetsTransmittedOK;
+ u64 FramesWithDeferredXmissions;
+ u64 LateCollisions;
+ u64 FramesAbortedDueToXSColls;
+ u64 FramesLostDueToIntMACXmitError;
+ u64 CarrierSenseErrors;
+ u64 OctetsReceivedOK;
+ u64 FramesLostDueToIntMACRcvError;
+ u64 MulticastFramesXmittedOK;
+ u64 BroadcastFramesXmittedOK;
+ u64 FramesWithExcessiveDeferral;
+ u64 MulticastFramesReceivedOK;
+ u64 BroadcastFramesReceivedOK;
+ u64 InRangeLengthErrors;
+ u64 OutOfRangeLengthField;
+ u64 FrameTooLongErrors;
+ );
+};
+
+/* Basic IEEE 802.3 PHY statistics (30.3.2.1.*), not otherwise exposed
+ * via a more targeted API.
+ */
+struct ethtool_eth_phy_stats {
+ enum ethtool_mac_stats_src src;
+ struct_group(stats,
+ u64 SymbolErrorDuringCarrier;
+ );
+};
+
+/* Basic IEEE 802.3 MAC Ctrl statistics (30.3.3.*), not otherwise exposed
+ * via a more targeted API.
+ */
+struct ethtool_eth_ctrl_stats {
+ enum ethtool_mac_stats_src src;
+ struct_group(stats,
+ u64 MACControlFramesTransmitted;
+ u64 MACControlFramesReceived;
+ u64 UnsupportedOpcodesReceived;
+ );
+};
+
+/**
+ * struct ethtool_pause_stats - statistics for IEEE 802.3x pause frames
+ * @src: input field denoting whether stats should be queried from the eMAC or
+ * pMAC (if the MM layer is supported). To be ignored otherwise.
+ * @tx_pause_frames: transmitted pause frame count. Reported to user space
+ * as %ETHTOOL_A_PAUSE_STAT_TX_FRAMES.
+ *
+ * Equivalent to `30.3.4.2 aPAUSEMACCtrlFramesTransmitted`
+ * from the standard.
+ *
+ * @rx_pause_frames: received pause frame count. Reported to user space
+ * as %ETHTOOL_A_PAUSE_STAT_RX_FRAMES. Equivalent to:
+ *
+ * Equivalent to `30.3.4.3 aPAUSEMACCtrlFramesReceived`
+ * from the standard.
+ */
+struct ethtool_pause_stats {
+ enum ethtool_mac_stats_src src;
+ struct_group(stats,
+ u64 tx_pause_frames;
+ u64 rx_pause_frames;
+ );
+};
+
+#define ETHTOOL_MAX_LANES 8
+
+/**
+ * struct ethtool_fec_stats - statistics for IEEE 802.3 FEC
+ * @corrected_blocks: number of received blocks corrected by FEC
+ * Reported to user space as %ETHTOOL_A_FEC_STAT_CORRECTED.
+ *
+ * Equivalent to `30.5.1.1.17 aFECCorrectedBlocks` from the standard.
+ *
+ * @uncorrectable_blocks: number of received blocks FEC was not able to correct
+ * Reported to user space as %ETHTOOL_A_FEC_STAT_UNCORR.
+ *
+ * Equivalent to `30.5.1.1.18 aFECUncorrectableBlocks` from the standard.
+ *
+ * @corrected_bits: number of bits corrected by FEC
+ * Similar to @corrected_blocks but counts individual bit changes,
+ * not entire FEC data blocks. This is a non-standard statistic.
+ * Reported to user space as %ETHTOOL_A_FEC_STAT_CORR_BITS.
+ *
+ * @lane: per-lane/PCS-instance counts as defined by the standard
+ * @total: error counts for the entire port, for drivers incapable of reporting
+ * per-lane stats
+ *
+ * Drivers should fill in either only total or per-lane statistics, core
+ * will take care of adding lane values up to produce the total.
+ */
+struct ethtool_fec_stats {
+ struct ethtool_fec_stat {
+ u64 total;
+ u64 lanes[ETHTOOL_MAX_LANES];
+ } corrected_blocks, uncorrectable_blocks, corrected_bits;
+};
+
+/**
+ * struct ethtool_rmon_hist_range - byte range for histogram statistics
+ * @low: low bound of the bucket (inclusive)
+ * @high: high bound of the bucket (inclusive)
+ */
+struct ethtool_rmon_hist_range {
+ u16 low;
+ u16 high;
+};
+
+#define ETHTOOL_RMON_HIST_MAX 10
+
+/**
+ * struct ethtool_rmon_stats - selected RMON (RFC 2819) statistics
+ * @src: input field denoting whether stats should be queried from the eMAC or
+ * pMAC (if the MM layer is supported). To be ignored otherwise.
+ * @undersize_pkts: Equivalent to `etherStatsUndersizePkts` from the RFC.
+ * @oversize_pkts: Equivalent to `etherStatsOversizePkts` from the RFC.
+ * @fragments: Equivalent to `etherStatsFragments` from the RFC.
+ * @jabbers: Equivalent to `etherStatsJabbers` from the RFC.
+ * @hist: Packet counter for packet length buckets (e.g.
+ * `etherStatsPkts128to255Octets` from the RFC).
+ * @hist_tx: Tx counters in similar form to @hist, not defined in the RFC.
+ *
+ * Selection of RMON (RFC 2819) statistics which are not exposed via different
+ * APIs, primarily the packet-length-based counters.
+ * Unfortunately different designs choose different buckets beyond
+ * the 1024B mark (jumbo frame teritory), so the definition of the bucket
+ * ranges is left to the driver.
+ */
+struct ethtool_rmon_stats {
+ enum ethtool_mac_stats_src src;
+ struct_group(stats,
+ u64 undersize_pkts;
+ u64 oversize_pkts;
+ u64 fragments;
+ u64 jabbers;
+
+ u64 hist[ETHTOOL_RMON_HIST_MAX];
+ u64 hist_tx[ETHTOOL_RMON_HIST_MAX];
+ );
+};
+
+#define ETH_MODULE_EEPROM_PAGE_LEN 128
+#define ETH_MODULE_MAX_I2C_ADDRESS 0x7f
+
+/**
+ * struct ethtool_module_eeprom - EEPROM dump from specified page
+ * @offset: Offset within the specified EEPROM page to begin read, in bytes.
+ * @length: Number of bytes to read.
+ * @page: Page number to read from.
+ * @bank: Page bank number to read from, if applicable by EEPROM spec.
+ * @i2c_address: I2C address of a page. Value less than 0x7f expected. Most
+ * EEPROMs use 0x50 or 0x51.
+ * @data: Pointer to buffer with EEPROM data of @length size.
+ *
+ * This can be used to manage pages during EEPROM dump in ethtool and pass
+ * required information to the driver.
+ */
+struct ethtool_module_eeprom {
+ u32 offset;
+ u32 length;
+ u8 page;
+ u8 bank;
+ u8 i2c_address;
+ u8 *data;
+};
+
+/**
+ * struct ethtool_module_power_mode_params - module power mode parameters
+ * @policy: The power mode policy enforced by the host for the plug-in module.
+ * @mode: The operational power mode of the plug-in module. Should be filled by
+ * device drivers on get operations.
+ */
+struct ethtool_module_power_mode_params {
+ enum ethtool_module_power_mode_policy policy;
+ enum ethtool_module_power_mode mode;
+};
+
+/**
+ * struct ethtool_mm_state - 802.3 MAC merge layer state
+ * @verify_time:
+ * wait time between verification attempts in ms (according to clause
+ * 30.14.1.6 aMACMergeVerifyTime)
+ * @max_verify_time:
+ * maximum accepted value for the @verify_time variable in set requests
+ * @verify_status:
+ * state of the verification state machine of the MM layer (according to
+ * clause 30.14.1.2 aMACMergeStatusVerify)
+ * @tx_enabled:
+ * set if the MM layer is administratively enabled in the TX direction
+ * (according to clause 30.14.1.3 aMACMergeEnableTx)
+ * @tx_active:
+ * set if the MM layer is enabled in the TX direction, which makes FP
+ * possible (according to 30.14.1.5 aMACMergeStatusTx). This should be
+ * true if MM is enabled, and the verification status is either verified,
+ * or disabled.
+ * @pmac_enabled:
+ * set if the preemptible MAC is powered on and is able to receive
+ * preemptible packets and respond to verification frames.
+ * @verify_enabled:
+ * set if the Verify function of the MM layer (which sends SMD-V
+ * verification requests) is administratively enabled (regardless of
+ * whether it is currently in the ETHTOOL_MM_VERIFY_STATUS_DISABLED state
+ * or not), according to clause 30.14.1.4 aMACMergeVerifyDisableTx (but
+ * using positive rather than negative logic). The device should always
+ * respond to received SMD-V requests as long as @pmac_enabled is set.
+ * @tx_min_frag_size:
+ * the minimum size of non-final mPacket fragments that the link partner
+ * supports receiving, expressed in octets. Compared to the definition
+ * from clause 30.14.1.7 aMACMergeAddFragSize which is expressed in the
+ * range 0 to 3 (requiring a translation to the size in octets according
+ * to the formula 64 * (1 + addFragSize) - 4), a value in a continuous and
+ * unbounded range can be specified here.
+ * @rx_min_frag_size:
+ * the minimum size of non-final mPacket fragments that this device
+ * supports receiving, expressed in octets.
+ */
+struct ethtool_mm_state {
+ u32 verify_time;
+ u32 max_verify_time;
+ enum ethtool_mm_verify_status verify_status;
+ bool tx_enabled;
+ bool tx_active;
+ bool pmac_enabled;
+ bool verify_enabled;
+ u32 tx_min_frag_size;
+ u32 rx_min_frag_size;
+};
+
+/**
+ * struct ethtool_mm_cfg - 802.3 MAC merge layer configuration
+ * @verify_time: see struct ethtool_mm_state
+ * @verify_enabled: see struct ethtool_mm_state
+ * @tx_enabled: see struct ethtool_mm_state
+ * @pmac_enabled: see struct ethtool_mm_state
+ * @tx_min_frag_size: see struct ethtool_mm_state
+ */
+struct ethtool_mm_cfg {
+ u32 verify_time;
+ bool verify_enabled;
+ bool tx_enabled;
+ bool pmac_enabled;
+ u32 tx_min_frag_size;
+};
+
+/**
+ * struct ethtool_mm_stats - 802.3 MAC merge layer statistics
+ * @MACMergeFrameAssErrorCount:
+ * received MAC frames with reassembly errors
+ * @MACMergeFrameSmdErrorCount:
+ * received MAC frames/fragments rejected due to unknown or incorrect SMD
+ * @MACMergeFrameAssOkCount:
+ * received MAC frames that were successfully reassembled and passed up
+ * @MACMergeFragCountRx:
+ * number of additional correct SMD-C mPackets received due to preemption
+ * @MACMergeFragCountTx:
+ * number of additional mPackets sent due to preemption
+ * @MACMergeHoldCount:
+ * number of times the MM layer entered the HOLD state, which blocks
+ * transmission of preemptible traffic
+ */
+struct ethtool_mm_stats {
+ u64 MACMergeFrameAssErrorCount;
+ u64 MACMergeFrameSmdErrorCount;
+ u64 MACMergeFrameAssOkCount;
+ u64 MACMergeFragCountRx;
+ u64 MACMergeFragCountTx;
+ u64 MACMergeHoldCount;
+};
+
/**
* struct ethtool_ops - optional netdev operations
- * @get_settings: DEPRECATED, use %get_link_ksettings/%set_link_ksettings
- * API. Get various device settings including Ethernet link
- * settings. The @cmd parameter is expected to have been cleared
- * before get_settings is called. Returns a negative error code
- * or zero.
- * @set_settings: DEPRECATED, use %get_link_ksettings/%set_link_ksettings
- * API. Set various device settings including Ethernet link
- * settings. Returns a negative error code or zero.
- * @get_drvinfo: Report driver/device information. Should only set the
- * @driver, @version, @fw_version and @bus_info fields. If not
- * implemented, the @driver and @bus_info fields will be filled in
- * according to the netdev's parent device.
+ * @cap_link_lanes_supported: indicates if the driver supports lanes
+ * parameter.
+ * @supported_coalesce_params: supported types of interrupt coalescing.
+ * @supported_ring_params: supported ring params.
+ * @get_drvinfo: Report driver/device information. Modern drivers no
+ * longer have to implement this callback. Most fields are
+ * correctly filled in by the core using system information, or
+ * populated using other driver operations.
* @get_regs_len: Get buffer length required for @get_regs
* @get_regs: Get device registers
* @get_wol: Report whether Wake-on-Lan is enabled
@@ -186,6 +617,15 @@ bool ethtool_convert_link_mode_to_legacy_u32(u32 *legacy_u32,
* @get_link: Report whether physical link is up. Will only be called if
* the netdev is up. Should usually be set to ethtool_op_get_link(),
* which uses netif_carrier_ok().
+ * @get_link_ext_state: Report link extended state. Should set link_ext_state and
+ * link_ext_substate (link_ext_substate of 0 means link_ext_substate is unknown,
+ * do not attach ext_substate attribute to netlink message). If link_ext_state
+ * and link_ext_substate are unknown, return -ENODATA. If not implemented,
+ * link_ext_state and link_ext_substate will not be sent to userspace.
+ * @get_link_ext_stats: Read extra link-related counters.
+ * @get_eeprom_len: Read range of EEPROM addresses for validation of
+ * @get_eeprom and @set_eeprom requests.
+ * Returns 0 if device does not support EEPROM access.
* @get_eeprom: Read data from the device EEPROM.
* Should fill in the magic field. Don't need to check len for zero
* or wraparound. Fill in the data argument with the eeprom values
@@ -197,10 +637,14 @@ bool ethtool_convert_link_mode_to_legacy_u32(u32 *legacy_u32,
* or zero.
* @get_coalesce: Get interrupt coalescing parameters. Returns a negative
* error code or zero.
- * @set_coalesce: Set interrupt coalescing parameters. Returns a negative
- * error code or zero.
+ * @set_coalesce: Set interrupt coalescing parameters. Supported coalescing
+ * types should be set in @supported_coalesce_params.
+ * Returns a negative error code or zero.
* @get_ringparam: Report ring sizes
* @set_ringparam: Set ring sizes. Returns a negative error code or zero.
+ * @get_pause_stats: Report pause frame statistics. Drivers must not zero
+ * statistics which they don't report. The stats structure is initialized
+ * to ETHTOOL_STAT_NOT_SET indicating driver does not report statistics.
* @get_pauseparam: Report pause parameters
* @set_pauseparam: Set pause parameters. Returns a negative error code
* or zero.
@@ -250,6 +694,15 @@ bool ethtool_convert_link_mode_to_legacy_u32(u32 *legacy_u32,
* will remain unchanged.
* Returns a negative error code or zero. An error code must be returned
* if at least one unsupported change was requested.
+ * @get_rxfh_context: Get the contents of the RX flow hash indirection table,
+ * hash key, and/or hash function assiciated to the given rss context.
+ * Returns a negative error code or zero.
+ * @set_rxfh_context: Create, remove and configure RSS contexts. Allows setting
+ * the contents of the RX flow hash indirection table, hash key, and/or
+ * hash function associated to the given context. Arguments which are set
+ * to %NULL or zero will remain unchanged.
+ * Returns a negative error code or zero. An error code must be returned
+ * if at least one unsupported change was requested.
* @get_channels: Get number of channels.
* @set_channels: Set number of channels. Returns a negative error code or
* zero.
@@ -258,6 +711,7 @@ bool ethtool_convert_link_mode_to_legacy_u32(u32 *legacy_u32,
* @get_dump_data: Get dump data.
* @set_dump: Set dump specific flags to the device.
* @get_ts_info: Get the time stamping and PTP hardware clock capabilities.
+ * It may be called with RCU, or rtnl or reference on the device.
* Drivers supporting transmit time stamps in software should set this to
* ethtool_op_get_ts_info().
* @get_module_info: Get the size and type of the eeprom contained within
@@ -265,6 +719,8 @@ bool ethtool_convert_link_mode_to_legacy_u32(u32 *legacy_u32,
* @get_module_eeprom: Get the eeprom information from the plug-in module
* @get_eee: Get Energy-Efficient (EEE) supported and status.
* @set_eee: Set EEE status (enable/disable) as well as LPI timers.
+ * @get_tunable: Read the value of a driver / device tunable.
+ * @set_tunable: Set the value of a driver / device tunable.
* @get_per_queue_coalesce: Get interrupt coalescing parameters per queue.
* It must check that the given queue number is valid. If neither a RX nor
* a TX queue has this number, return -EINVAL. If only a RX queue or a TX
@@ -273,21 +729,47 @@ bool ethtool_convert_link_mode_to_legacy_u32(u32 *legacy_u32,
* @set_per_queue_coalesce: Set interrupt coalescing parameters per queue.
* It must check that the given queue number is valid. If neither a RX nor
* a TX queue has this number, return -EINVAL. If only a RX queue or a TX
- * queue has this number, ignore the inapplicable fields.
+ * queue has this number, ignore the inapplicable fields. Supported
+ * coalescing types should be set in @supported_coalesce_params.
* Returns a negative error code or zero.
- * @get_link_ksettings: When defined, takes precedence over the
- * %get_settings method. Get various device settings
- * including Ethernet link settings. The %cmd and
- * %link_mode_masks_nwords fields should be ignored (use
- * %__ETHTOOL_LINK_MODE_MASK_NBITS instead of the latter), any
- * change to them will be overwritten by kernel. Returns a
- * negative error code or zero.
- * @set_link_ksettings: When defined, takes precedence over the
- * %set_settings method. Set various device settings including
- * Ethernet link settings. The %cmd and %link_mode_masks_nwords
- * fields should be ignored (use %__ETHTOOL_LINK_MODE_MASK_NBITS
- * instead of the latter), any change to them will be overwritten
- * by kernel. Returns a negative error code or zero.
+ * @get_link_ksettings: Get various device settings including Ethernet link
+ * settings. The %cmd and %link_mode_masks_nwords fields should be
+ * ignored (use %__ETHTOOL_LINK_MODE_MASK_NBITS instead of the latter),
+ * any change to them will be overwritten by kernel. Returns a negative
+ * error code or zero.
+ * @set_link_ksettings: Set various device settings including Ethernet link
+ * settings. The %cmd and %link_mode_masks_nwords fields should be
+ * ignored (use %__ETHTOOL_LINK_MODE_MASK_NBITS instead of the latter),
+ * any change to them will be overwritten by kernel. Returns a negative
+ * error code or zero.
+ * @get_fec_stats: Report FEC statistics.
+ * Core will sum up per-lane stats to get the total.
+ * Drivers must not zero statistics which they don't report. The stats
+ * structure is initialized to ETHTOOL_STAT_NOT_SET indicating driver does
+ * not report statistics.
+ * @get_fecparam: Get the network device Forward Error Correction parameters.
+ * @set_fecparam: Set the network device Forward Error Correction parameters.
+ * @get_ethtool_phy_stats: Return extended statistics about the PHY device.
+ * This is only useful if the device maintains PHY statistics and
+ * cannot use the standard PHY library helpers.
+ * @get_phy_tunable: Read the value of a PHY tunable.
+ * @set_phy_tunable: Set the value of a PHY tunable.
+ * @get_module_eeprom_by_page: Get a region of plug-in module EEPROM data from
+ * specified page. Returns a negative error code or the amount of bytes
+ * read.
+ * @get_eth_phy_stats: Query some of the IEEE 802.3 PHY statistics.
+ * @get_eth_mac_stats: Query some of the IEEE 802.3 MAC statistics.
+ * @get_eth_ctrl_stats: Query some of the IEEE 802.3 MAC Ctrl statistics.
+ * @get_rmon_stats: Query some of the RMON (RFC 2819) statistics.
+ * Set %ranges to a pointer to zero-terminated array of byte ranges.
+ * @get_module_power_mode: Get the power mode policy for the plug-in module
+ * used by the network device and its operational power mode, if
+ * plugged-in.
+ * @set_module_power_mode: Set the power mode policy for the plug-in module
+ * used by the network device.
+ * @get_mm: Query the 802.3 MAC Merge layer state.
+ * @set_mm: Set the 802.3 MAC Merge layer parameters.
+ * @get_mm_stats: Query the 802.3 MAC Merge layer statistics.
*
* All operations are optional (i.e. the function pointer may be set
* to %NULL) and callers must take this into account. Callers must
@@ -302,8 +784,9 @@ bool ethtool_convert_link_mode_to_legacy_u32(u32 *legacy_u32,
* of the generic netdev features interface.
*/
struct ethtool_ops {
- int (*get_settings)(struct net_device *, struct ethtool_cmd *);
- int (*set_settings)(struct net_device *, struct ethtool_cmd *);
+ u32 cap_link_lanes_supported:1;
+ u32 supported_coalesce_params;
+ u32 supported_ring_params;
void (*get_drvinfo)(struct net_device *, struct ethtool_drvinfo *);
int (*get_regs_len)(struct net_device *);
void (*get_regs)(struct net_device *, struct ethtool_regs *, void *);
@@ -313,17 +796,33 @@ struct ethtool_ops {
void (*set_msglevel)(struct net_device *, u32);
int (*nway_reset)(struct net_device *);
u32 (*get_link)(struct net_device *);
+ int (*get_link_ext_state)(struct net_device *,
+ struct ethtool_link_ext_state_info *);
+ void (*get_link_ext_stats)(struct net_device *dev,
+ struct ethtool_link_ext_stats *stats);
int (*get_eeprom_len)(struct net_device *);
int (*get_eeprom)(struct net_device *,
struct ethtool_eeprom *, u8 *);
int (*set_eeprom)(struct net_device *,
struct ethtool_eeprom *, u8 *);
- int (*get_coalesce)(struct net_device *, struct ethtool_coalesce *);
- int (*set_coalesce)(struct net_device *, struct ethtool_coalesce *);
+ int (*get_coalesce)(struct net_device *,
+ struct ethtool_coalesce *,
+ struct kernel_ethtool_coalesce *,
+ struct netlink_ext_ack *);
+ int (*set_coalesce)(struct net_device *,
+ struct ethtool_coalesce *,
+ struct kernel_ethtool_coalesce *,
+ struct netlink_ext_ack *);
void (*get_ringparam)(struct net_device *,
- struct ethtool_ringparam *);
+ struct ethtool_ringparam *,
+ struct kernel_ethtool_ringparam *,
+ struct netlink_ext_ack *);
int (*set_ringparam)(struct net_device *,
- struct ethtool_ringparam *);
+ struct ethtool_ringparam *,
+ struct kernel_ethtool_ringparam *,
+ struct netlink_ext_ack *);
+ void (*get_pause_stats)(struct net_device *dev,
+ struct ethtool_pause_stats *pause_stats);
void (*get_pauseparam)(struct net_device *,
struct ethtool_pauseparam*);
int (*set_pauseparam)(struct net_device *,
@@ -349,6 +848,11 @@ struct ethtool_ops {
u8 *hfunc);
int (*set_rxfh)(struct net_device *, const u32 *indir,
const u8 *key, const u8 hfunc);
+ int (*get_rxfh_context)(struct net_device *, u32 *indir, u8 *key,
+ u8 *hfunc, u32 rss_context);
+ int (*set_rxfh_context)(struct net_device *, const u32 *indir,
+ const u8 *key, const u8 hfunc,
+ u32 *rss_context, bool delete);
void (*get_channels)(struct net_device *, struct ethtool_channels *);
int (*set_channels)(struct net_device *, struct ethtool_channels *);
int (*get_dump_flag)(struct net_device *, struct ethtool_dump *);
@@ -374,5 +878,178 @@ struct ethtool_ops {
struct ethtool_link_ksettings *);
int (*set_link_ksettings)(struct net_device *,
const struct ethtool_link_ksettings *);
+ void (*get_fec_stats)(struct net_device *dev,
+ struct ethtool_fec_stats *fec_stats);
+ int (*get_fecparam)(struct net_device *,
+ struct ethtool_fecparam *);
+ int (*set_fecparam)(struct net_device *,
+ struct ethtool_fecparam *);
+ void (*get_ethtool_phy_stats)(struct net_device *,
+ struct ethtool_stats *, u64 *);
+ int (*get_phy_tunable)(struct net_device *,
+ const struct ethtool_tunable *, void *);
+ int (*set_phy_tunable)(struct net_device *,
+ const struct ethtool_tunable *, const void *);
+ int (*get_module_eeprom_by_page)(struct net_device *dev,
+ const struct ethtool_module_eeprom *page,
+ struct netlink_ext_ack *extack);
+ void (*get_eth_phy_stats)(struct net_device *dev,
+ struct ethtool_eth_phy_stats *phy_stats);
+ void (*get_eth_mac_stats)(struct net_device *dev,
+ struct ethtool_eth_mac_stats *mac_stats);
+ void (*get_eth_ctrl_stats)(struct net_device *dev,
+ struct ethtool_eth_ctrl_stats *ctrl_stats);
+ void (*get_rmon_stats)(struct net_device *dev,
+ struct ethtool_rmon_stats *rmon_stats,
+ const struct ethtool_rmon_hist_range **ranges);
+ int (*get_module_power_mode)(struct net_device *dev,
+ struct ethtool_module_power_mode_params *params,
+ struct netlink_ext_ack *extack);
+ int (*set_module_power_mode)(struct net_device *dev,
+ const struct ethtool_module_power_mode_params *params,
+ struct netlink_ext_ack *extack);
+ int (*get_mm)(struct net_device *dev, struct ethtool_mm_state *state);
+ int (*set_mm)(struct net_device *dev, struct ethtool_mm_cfg *cfg,
+ struct netlink_ext_ack *extack);
+ void (*get_mm_stats)(struct net_device *dev, struct ethtool_mm_stats *stats);
+};
+
+int ethtool_check_ops(const struct ethtool_ops *ops);
+
+struct ethtool_rx_flow_rule {
+ struct flow_rule *rule;
+ unsigned long priv[];
+};
+
+struct ethtool_rx_flow_spec_input {
+ const struct ethtool_rx_flow_spec *fs;
+ u32 rss_ctx;
+};
+
+struct ethtool_rx_flow_rule *
+ethtool_rx_flow_rule_create(const struct ethtool_rx_flow_spec_input *input);
+void ethtool_rx_flow_rule_destroy(struct ethtool_rx_flow_rule *rule);
+
+bool ethtool_virtdev_validate_cmd(const struct ethtool_link_ksettings *cmd);
+int ethtool_virtdev_set_link_ksettings(struct net_device *dev,
+ const struct ethtool_link_ksettings *cmd,
+ u32 *dev_speed, u8 *dev_duplex);
+
+struct phy_device;
+struct phy_tdr_config;
+struct phy_plca_cfg;
+struct phy_plca_status;
+
+/**
+ * struct ethtool_phy_ops - Optional PHY device options
+ * @get_sset_count: Get number of strings that @get_strings will write.
+ * @get_strings: Return a set of strings that describe the requested objects
+ * @get_stats: Return extended statistics about the PHY device.
+ * @get_plca_cfg: Return PLCA configuration.
+ * @set_plca_cfg: Set PLCA configuration.
+ * @get_plca_status: Get PLCA configuration.
+ * @start_cable_test: Start a cable test
+ * @start_cable_test_tdr: Start a Time Domain Reflectometry cable test
+ *
+ * All operations are optional (i.e. the function pointer may be set to %NULL)
+ * and callers must take this into account. Callers must hold the RTNL lock.
+ */
+struct ethtool_phy_ops {
+ int (*get_sset_count)(struct phy_device *dev);
+ int (*get_strings)(struct phy_device *dev, u8 *data);
+ int (*get_stats)(struct phy_device *dev,
+ struct ethtool_stats *stats, u64 *data);
+ int (*get_plca_cfg)(struct phy_device *dev,
+ struct phy_plca_cfg *plca_cfg);
+ int (*set_plca_cfg)(struct phy_device *dev,
+ const struct phy_plca_cfg *plca_cfg,
+ struct netlink_ext_ack *extack);
+ int (*get_plca_status)(struct phy_device *dev,
+ struct phy_plca_status *plca_st);
+ int (*start_cable_test)(struct phy_device *phydev,
+ struct netlink_ext_ack *extack);
+ int (*start_cable_test_tdr)(struct phy_device *phydev,
+ struct netlink_ext_ack *extack,
+ const struct phy_tdr_config *config);
};
+
+/**
+ * ethtool_set_ethtool_phy_ops - Set the ethtool_phy_ops singleton
+ * @ops: Ethtool PHY operations to set
+ */
+void ethtool_set_ethtool_phy_ops(const struct ethtool_phy_ops *ops);
+
+/**
+ * ethtool_params_from_link_mode - Derive link parameters from a given link mode
+ * @link_ksettings: Link parameters to be derived from the link mode
+ * @link_mode: Link mode
+ */
+void
+ethtool_params_from_link_mode(struct ethtool_link_ksettings *link_ksettings,
+ enum ethtool_link_mode_bit_indices link_mode);
+
+/**
+ * ethtool_get_phc_vclocks - Derive phc vclocks information, and caller
+ * is responsible to free memory of vclock_index
+ * @dev: pointer to net_device structure
+ * @vclock_index: pointer to pointer of vclock index
+ *
+ * Return number of phc vclocks
+ */
+int ethtool_get_phc_vclocks(struct net_device *dev, int **vclock_index);
+
+/* Some generic methods drivers may use in their ethtool_ops */
+u32 ethtool_op_get_link(struct net_device *dev);
+int ethtool_op_get_ts_info(struct net_device *dev, struct ethtool_ts_info *eti);
+
+/**
+ * ethtool_mm_frag_size_add_to_min - Translate (standard) additional fragment
+ * size expressed as multiplier into (absolute) minimum fragment size
+ * value expressed in octets
+ * @val_add: Value of addFragSize multiplier
+ */
+static inline u32 ethtool_mm_frag_size_add_to_min(u32 val_add)
+{
+ return (ETH_ZLEN + ETH_FCS_LEN) * (1 + val_add) - ETH_FCS_LEN;
+}
+
+/**
+ * ethtool_mm_frag_size_min_to_add - Translate (absolute) minimum fragment size
+ * expressed in octets into (standard) additional fragment size expressed
+ * as multiplier
+ * @val_min: Value of addFragSize variable in octets
+ * @val_add: Pointer where the standard addFragSize value is to be returned
+ * @extack: Netlink extended ack
+ *
+ * Translate a value in octets to one of 0, 1, 2, 3 according to the reverse
+ * application of the 802.3 formula 64 * (1 + addFragSize) - 4. To be called
+ * by drivers which do not support programming the minimum fragment size to a
+ * continuous range. Returns error on other fragment length values.
+ */
+static inline int ethtool_mm_frag_size_min_to_add(u32 val_min, u32 *val_add,
+ struct netlink_ext_ack *extack)
+{
+ u32 add_frag_size;
+
+ for (add_frag_size = 0; add_frag_size < 4; add_frag_size++) {
+ if (ethtool_mm_frag_size_add_to_min(add_frag_size) == val_min) {
+ *val_add = add_frag_size;
+ return 0;
+ }
+ }
+
+ NL_SET_ERR_MSG_MOD(extack,
+ "minFragSize required to be one of 60, 124, 188 or 252");
+ return -EINVAL;
+}
+
+/**
+ * ethtool_sprintf - Write formatted string to ethtool string data
+ * @data: Pointer to start of string to update
+ * @fmt: Format of string to write
+ *
+ * Write formatted string to data. Update data to point at start of
+ * next string.
+ */
+extern __printf(2, 3) void ethtool_sprintf(u8 **data, const char *fmt, ...);
#endif /* _LINUX_ETHTOOL_H */
diff --git a/include/linux/ethtool_netlink.h b/include/linux/ethtool_netlink.h
new file mode 100644
index 000000000000..fae0dfb9a9c8
--- /dev/null
+++ b/include/linux/ethtool_netlink.h
@@ -0,0 +1,122 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef _LINUX_ETHTOOL_NETLINK_H_
+#define _LINUX_ETHTOOL_NETLINK_H_
+
+#include <uapi/linux/ethtool_netlink.h>
+#include <linux/ethtool.h>
+#include <linux/netdevice.h>
+
+#define __ETHTOOL_LINK_MODE_MASK_NWORDS \
+ DIV_ROUND_UP(__ETHTOOL_LINK_MODE_MASK_NBITS, 32)
+
+#define ETHTOOL_PAUSE_STAT_CNT (__ETHTOOL_A_PAUSE_STAT_CNT - \
+ ETHTOOL_A_PAUSE_STAT_TX_FRAMES)
+
+enum ethtool_multicast_groups {
+ ETHNL_MCGRP_MONITOR,
+};
+
+struct phy_device;
+
+#if IS_ENABLED(CONFIG_ETHTOOL_NETLINK)
+int ethnl_cable_test_alloc(struct phy_device *phydev, u8 cmd);
+void ethnl_cable_test_free(struct phy_device *phydev);
+void ethnl_cable_test_finished(struct phy_device *phydev);
+int ethnl_cable_test_result(struct phy_device *phydev, u8 pair, u8 result);
+int ethnl_cable_test_fault_length(struct phy_device *phydev, u8 pair, u32 cm);
+int ethnl_cable_test_amplitude(struct phy_device *phydev, u8 pair, s16 mV);
+int ethnl_cable_test_pulse(struct phy_device *phydev, u16 mV);
+int ethnl_cable_test_step(struct phy_device *phydev, u32 first, u32 last,
+ u32 step);
+void ethtool_aggregate_mac_stats(struct net_device *dev,
+ struct ethtool_eth_mac_stats *mac_stats);
+void ethtool_aggregate_phy_stats(struct net_device *dev,
+ struct ethtool_eth_phy_stats *phy_stats);
+void ethtool_aggregate_ctrl_stats(struct net_device *dev,
+ struct ethtool_eth_ctrl_stats *ctrl_stats);
+void ethtool_aggregate_pause_stats(struct net_device *dev,
+ struct ethtool_pause_stats *pause_stats);
+void ethtool_aggregate_rmon_stats(struct net_device *dev,
+ struct ethtool_rmon_stats *rmon_stats);
+bool ethtool_dev_mm_supported(struct net_device *dev);
+
+#else
+static inline int ethnl_cable_test_alloc(struct phy_device *phydev, u8 cmd)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline void ethnl_cable_test_free(struct phy_device *phydev)
+{
+}
+
+static inline void ethnl_cable_test_finished(struct phy_device *phydev)
+{
+}
+static inline int ethnl_cable_test_result(struct phy_device *phydev, u8 pair,
+ u8 result)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int ethnl_cable_test_fault_length(struct phy_device *phydev,
+ u8 pair, u32 cm)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int ethnl_cable_test_amplitude(struct phy_device *phydev,
+ u8 pair, s16 mV)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int ethnl_cable_test_pulse(struct phy_device *phydev, u16 mV)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int ethnl_cable_test_step(struct phy_device *phydev, u32 first,
+ u32 last, u32 step)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline void
+ethtool_aggregate_mac_stats(struct net_device *dev,
+ struct ethtool_eth_mac_stats *mac_stats)
+{
+}
+
+static inline void
+ethtool_aggregate_phy_stats(struct net_device *dev,
+ struct ethtool_eth_phy_stats *phy_stats)
+{
+}
+
+static inline void
+ethtool_aggregate_ctrl_stats(struct net_device *dev,
+ struct ethtool_eth_ctrl_stats *ctrl_stats)
+{
+}
+
+static inline void
+ethtool_aggregate_pause_stats(struct net_device *dev,
+ struct ethtool_pause_stats *pause_stats)
+{
+}
+
+static inline void
+ethtool_aggregate_rmon_stats(struct net_device *dev,
+ struct ethtool_rmon_stats *rmon_stats)
+{
+}
+
+static inline bool ethtool_dev_mm_supported(struct net_device *dev)
+{
+ return false;
+}
+
+#endif /* IS_ENABLED(CONFIG_ETHTOOL_NETLINK) */
+#endif /* _LINUX_ETHTOOL_NETLINK_H_ */
diff --git a/include/linux/eventfd.h b/include/linux/eventfd.h
index 9e4befd95bc7..36a486505b08 100644
--- a/include/linux/eventfd.h
+++ b/include/linux/eventfd.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* include/linux/eventfd.h
*
@@ -10,6 +11,10 @@
#include <linux/fcntl.h>
#include <linux/wait.h>
+#include <linux/err.h>
+#include <linux/percpu-defs.h>
+#include <linux/percpu.h>
+#include <linux/sched.h>
/*
* CAREFUL: Check include/uapi/asm-generic/fcntl.h when defining
@@ -25,20 +30,25 @@
#define EFD_SHARED_FCNTL_FLAGS (O_CLOEXEC | O_NONBLOCK)
#define EFD_FLAGS_SET (EFD_SHARED_FCNTL_FLAGS | EFD_SEMAPHORE)
+struct eventfd_ctx;
struct file;
#ifdef CONFIG_EVENTFD
-struct file *eventfd_file_create(unsigned int count, int flags);
-struct eventfd_ctx *eventfd_ctx_get(struct eventfd_ctx *ctx);
void eventfd_ctx_put(struct eventfd_ctx *ctx);
struct file *eventfd_fget(int fd);
struct eventfd_ctx *eventfd_ctx_fdget(int fd);
struct eventfd_ctx *eventfd_ctx_fileget(struct file *file);
__u64 eventfd_signal(struct eventfd_ctx *ctx, __u64 n);
-ssize_t eventfd_ctx_read(struct eventfd_ctx *ctx, int no_wait, __u64 *cnt);
+__u64 eventfd_signal_mask(struct eventfd_ctx *ctx, __u64 n, unsigned mask);
int eventfd_ctx_remove_wait_queue(struct eventfd_ctx *ctx, wait_queue_entry_t *wait,
__u64 *cnt);
+void eventfd_ctx_do_read(struct eventfd_ctx *ctx, __u64 *cnt);
+
+static inline bool eventfd_signal_allowed(void)
+{
+ return !current->in_eventfd;
+}
#else /* CONFIG_EVENTFD */
@@ -46,17 +56,19 @@ int eventfd_ctx_remove_wait_queue(struct eventfd_ctx *ctx, wait_queue_entry_t *w
* Ugly ugly ugly error layer to support modules that uses eventfd but
* pretend to work in !CONFIG_EVENTFD configurations. Namely, AIO.
*/
-static inline struct file *eventfd_file_create(unsigned int count, int flags)
+
+static inline struct eventfd_ctx *eventfd_ctx_fdget(int fd)
{
return ERR_PTR(-ENOSYS);
}
-static inline struct eventfd_ctx *eventfd_ctx_fdget(int fd)
+static inline int eventfd_signal(struct eventfd_ctx *ctx, __u64 n)
{
- return ERR_PTR(-ENOSYS);
+ return -ENOSYS;
}
-static inline int eventfd_signal(struct eventfd_ctx *ctx, int n)
+static inline int eventfd_signal_mask(struct eventfd_ctx *ctx, __u64 n,
+ unsigned mask)
{
return -ENOSYS;
}
@@ -66,16 +78,20 @@ static inline void eventfd_ctx_put(struct eventfd_ctx *ctx)
}
-static inline ssize_t eventfd_ctx_read(struct eventfd_ctx *ctx, int no_wait,
- __u64 *cnt)
+static inline int eventfd_ctx_remove_wait_queue(struct eventfd_ctx *ctx,
+ wait_queue_entry_t *wait, __u64 *cnt)
{
return -ENOSYS;
}
-static inline int eventfd_ctx_remove_wait_queue(struct eventfd_ctx *ctx,
- wait_queue_entry_t *wait, __u64 *cnt)
+static inline bool eventfd_signal_allowed(void)
{
- return -ENOSYS;
+ return true;
+}
+
+static inline void eventfd_ctx_do_read(struct eventfd_ctx *ctx, __u64 *cnt)
+{
+
}
#endif
diff --git a/include/linux/eventpoll.h b/include/linux/eventpoll.h
index 2f14ac73d01d..3337745d81bd 100644
--- a/include/linux/eventpoll.h
+++ b/include/linux/eventpoll.h
@@ -1,14 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* include/linux/eventpoll.h ( Efficient event polling implementation )
* Copyright (C) 2001,...,2006 Davide Libenzi
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
* Davide Libenzi <davidel@xmailserver.org>
- *
*/
#ifndef _LINUX_EVENTPOLL_H
#define _LINUX_EVENTPOLL_H
@@ -23,18 +18,10 @@ struct file;
#ifdef CONFIG_EPOLL
-#ifdef CONFIG_CHECKPOINT_RESTORE
+#ifdef CONFIG_KCMP
struct file *get_epoll_tfile_raw_ptr(struct file *file, int tfd, unsigned long toff);
#endif
-/* Used to initialize the epoll bits inside the "struct file" */
-static inline void eventpoll_init_file(struct file *file)
-{
- INIT_LIST_HEAD(&file->f_ep_links);
- INIT_LIST_HEAD(&file->f_tfile_llink);
-}
-
-
/* Used to release the epoll bits inside the "struct file" */
void eventpoll_release_file(struct file *file);
@@ -55,7 +42,7 @@ static inline void eventpoll_release(struct file *file)
* because the file in on the way to be removed and nobody ( but
* eventpoll ) has still a reference to this file.
*/
- if (likely(list_empty(&file->f_ep_links)))
+ if (likely(!file->f_ep))
return;
/*
@@ -66,11 +53,37 @@ static inline void eventpoll_release(struct file *file)
eventpoll_release_file(file);
}
+int do_epoll_ctl(int epfd, int op, int fd, struct epoll_event *epds,
+ bool nonblock);
+
+/* Tells if the epoll_ctl(2) operation needs an event copy from userspace */
+static inline int ep_op_has_event(int op)
+{
+ return op != EPOLL_CTL_DEL;
+}
+
#else
-static inline void eventpoll_init_file(struct file *file) {}
static inline void eventpoll_release(struct file *file) {}
#endif
+#if defined(CONFIG_ARM) && defined(CONFIG_OABI_COMPAT)
+/* ARM OABI has an incompatible struct layout and needs a special handler */
+extern struct epoll_event __user *
+epoll_put_uevent(__poll_t revents, __u64 data,
+ struct epoll_event __user *uevent);
+#else
+static inline struct epoll_event __user *
+epoll_put_uevent(__poll_t revents, __u64 data,
+ struct epoll_event __user *uevent)
+{
+ if (__put_user(revents, &uevent->events) ||
+ __put_user(data, &uevent->data))
+ return NULL;
+
+ return uevent+1;
+}
+#endif
+
#endif /* #ifndef _LINUX_EVENTPOLL_H */
diff --git a/include/linux/evm.h b/include/linux/evm.h
index 35ed9a8a403a..7dc1ee74169f 100644
--- a/include/linux/evm.h
+++ b/include/linux/evm.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* evm.h
*
@@ -20,20 +21,49 @@ extern enum integrity_status evm_verifyxattr(struct dentry *dentry,
void *xattr_value,
size_t xattr_value_len,
struct integrity_iint_cache *iint);
-extern int evm_inode_setattr(struct dentry *dentry, struct iattr *attr);
+extern int evm_inode_setattr(struct mnt_idmap *idmap,
+ struct dentry *dentry, struct iattr *attr);
extern void evm_inode_post_setattr(struct dentry *dentry, int ia_valid);
-extern int evm_inode_setxattr(struct dentry *dentry, const char *name,
+extern int evm_inode_setxattr(struct mnt_idmap *idmap,
+ struct dentry *dentry, const char *name,
const void *value, size_t size);
extern void evm_inode_post_setxattr(struct dentry *dentry,
const char *xattr_name,
const void *xattr_value,
size_t xattr_value_len);
-extern int evm_inode_removexattr(struct dentry *dentry, const char *xattr_name);
+extern int evm_inode_removexattr(struct mnt_idmap *idmap,
+ struct dentry *dentry, const char *xattr_name);
extern void evm_inode_post_removexattr(struct dentry *dentry,
const char *xattr_name);
+static inline void evm_inode_post_remove_acl(struct mnt_idmap *idmap,
+ struct dentry *dentry,
+ const char *acl_name)
+{
+ evm_inode_post_removexattr(dentry, acl_name);
+}
+extern int evm_inode_set_acl(struct mnt_idmap *idmap,
+ struct dentry *dentry, const char *acl_name,
+ struct posix_acl *kacl);
+static inline int evm_inode_remove_acl(struct mnt_idmap *idmap,
+ struct dentry *dentry,
+ const char *acl_name)
+{
+ return evm_inode_set_acl(idmap, dentry, acl_name, NULL);
+}
+static inline void evm_inode_post_set_acl(struct dentry *dentry,
+ const char *acl_name,
+ struct posix_acl *kacl)
+{
+ return evm_inode_post_setxattr(dentry, acl_name, NULL, 0);
+}
extern int evm_inode_init_security(struct inode *inode,
const struct xattr *xattr_array,
struct xattr *evm);
+extern bool evm_revalidate_status(const char *xattr_name);
+extern int evm_protected_xattr_if_enabled(const char *req_xattr_name);
+extern int evm_read_protected_xattrs(struct dentry *dentry, u8 *buffer,
+ int buffer_size, char type,
+ bool canonical_fmt);
#ifdef CONFIG_FS_POSIX_ACL
extern int posix_xattr_acl(const char *xattrname);
#else
@@ -60,7 +90,8 @@ static inline enum integrity_status evm_verifyxattr(struct dentry *dentry,
}
#endif
-static inline int evm_inode_setattr(struct dentry *dentry, struct iattr *attr)
+static inline int evm_inode_setattr(struct mnt_idmap *idmap,
+ struct dentry *dentry, struct iattr *attr)
{
return 0;
}
@@ -70,7 +101,8 @@ static inline void evm_inode_post_setattr(struct dentry *dentry, int ia_valid)
return;
}
-static inline int evm_inode_setxattr(struct dentry *dentry, const char *name,
+static inline int evm_inode_setxattr(struct mnt_idmap *idmap,
+ struct dentry *dentry, const char *name,
const void *value, size_t size)
{
return 0;
@@ -84,7 +116,8 @@ static inline void evm_inode_post_setxattr(struct dentry *dentry,
return;
}
-static inline int evm_inode_removexattr(struct dentry *dentry,
+static inline int evm_inode_removexattr(struct mnt_idmap *idmap,
+ struct dentry *dentry,
const char *xattr_name)
{
return 0;
@@ -96,6 +129,34 @@ static inline void evm_inode_post_removexattr(struct dentry *dentry,
return;
}
+static inline void evm_inode_post_remove_acl(struct mnt_idmap *idmap,
+ struct dentry *dentry,
+ const char *acl_name)
+{
+ return;
+}
+
+static inline int evm_inode_set_acl(struct mnt_idmap *idmap,
+ struct dentry *dentry, const char *acl_name,
+ struct posix_acl *kacl)
+{
+ return 0;
+}
+
+static inline int evm_inode_remove_acl(struct mnt_idmap *idmap,
+ struct dentry *dentry,
+ const char *acl_name)
+{
+ return 0;
+}
+
+static inline void evm_inode_post_set_acl(struct dentry *dentry,
+ const char *acl_name,
+ struct posix_acl *kacl)
+{
+ return;
+}
+
static inline int evm_inode_init_security(struct inode *inode,
const struct xattr *xattr_array,
struct xattr *evm)
@@ -103,5 +164,22 @@ static inline int evm_inode_init_security(struct inode *inode,
return 0;
}
+static inline bool evm_revalidate_status(const char *xattr_name)
+{
+ return false;
+}
+
+static inline int evm_protected_xattr_if_enabled(const char *req_xattr_name)
+{
+ return false;
+}
+
+static inline int evm_read_protected_xattrs(struct dentry *dentry, u8 *buffer,
+ int buffer_size, char type,
+ bool canonical_fmt)
+{
+ return -EOPNOTSUPP;
+}
+
#endif /* CONFIG_EVM */
#endif /* LINUX_EVM_H */
diff --git a/include/linux/export-internal.h b/include/linux/export-internal.h
new file mode 100644
index 000000000000..fe7e6ba918f1
--- /dev/null
+++ b/include/linux/export-internal.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Please do not include this explicitly.
+ * This is used by C files generated by modpost.
+ */
+
+#ifndef __LINUX_EXPORT_INTERNAL_H__
+#define __LINUX_EXPORT_INTERNAL_H__
+
+#include <linux/compiler.h>
+#include <linux/types.h>
+
+#define SYMBOL_CRC(sym, crc, sec) \
+ asm(".section \"___kcrctab" sec "+" #sym "\",\"a\"" "\n" \
+ "__crc_" #sym ":" "\n" \
+ ".long " #crc "\n" \
+ ".previous" "\n")
+
+#endif /* __LINUX_EXPORT_INTERNAL_H__ */
diff --git a/include/linux/export.h b/include/linux/export.h
index 1a1dfdb2a5c6..3f31ced0d977 100644
--- a/include/linux/export.h
+++ b/include/linux/export.h
@@ -1,6 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
#ifndef _LINUX_EXPORT_H
#define _LINUX_EXPORT_H
+#include <linux/stringify.h>
+
/*
* Export symbols from the kernel to modules. Forked from module.h
* to reduce the amount of pointless cruft we feed to gcc when only
@@ -10,26 +13,15 @@
* hackers place grumpy comments in header files.
*/
-/* Some toolchains use a `_' prefix for all user symbols. */
-#ifdef CONFIG_HAVE_UNDERSCORE_SYMBOL_PREFIX
-#define __VMLINUX_SYMBOL(x) _##x
-#define __VMLINUX_SYMBOL_STR(x) "_" #x
-#else
-#define __VMLINUX_SYMBOL(x) x
-#define __VMLINUX_SYMBOL_STR(x) #x
-#endif
-
-/* Indirect, so macros are expanded before pasting. */
-#define VMLINUX_SYMBOL(x) __VMLINUX_SYMBOL(x)
-#define VMLINUX_SYMBOL_STR(x) __VMLINUX_SYMBOL_STR(x)
+/*
+ * This comment block is used by fixdep. Please do not remove.
+ *
+ * When CONFIG_MODVERSIONS is changed from n to y, all source files having
+ * EXPORT_SYMBOL variants must be re-compiled because genksyms is run as a
+ * side effect of the *.o build rule.
+ */
#ifndef __ASSEMBLY__
-struct kernel_symbol
-{
- unsigned long value;
- const char *name;
-};
-
#ifdef MODULE
extern struct module __this_module;
#define THIS_MODULE (&__this_module)
@@ -37,97 +29,129 @@ extern struct module __this_module;
#define THIS_MODULE ((struct module *)0)
#endif
-#ifdef CONFIG_MODULES
-
-#if defined(__KERNEL__) && !defined(__GENKSYMS__)
-#ifdef CONFIG_MODVERSIONS
-/* Mark the CRC weak since genksyms apparently decides not to
- * generate a checksums for some symbols */
-#if defined(CONFIG_MODULE_REL_CRCS)
-#define __CRC_SYMBOL(sym, sec) \
- asm(" .section \"___kcrctab" sec "+" #sym "\", \"a\" \n" \
- " .weak " VMLINUX_SYMBOL_STR(__crc_##sym) " \n" \
- " .long " VMLINUX_SYMBOL_STR(__crc_##sym) " - . \n" \
- " .previous \n");
+#ifdef CONFIG_HAVE_ARCH_PREL32_RELOCATIONS
+#include <linux/compiler.h>
+/*
+ * Emit the ksymtab entry as a pair of relative references: this reduces
+ * the size by half on 64-bit architectures, and eliminates the need for
+ * absolute relocations that require runtime processing on relocatable
+ * kernels.
+ */
+#define __KSYMTAB_ENTRY(sym, sec) \
+ __ADDRESSABLE(sym) \
+ asm(" .section \"___ksymtab" sec "+" #sym "\", \"a\" \n" \
+ " .balign 4 \n" \
+ "__ksymtab_" #sym ": \n" \
+ " .long " #sym "- . \n" \
+ " .long __kstrtab_" #sym "- . \n" \
+ " .long __kstrtabns_" #sym "- . \n" \
+ " .previous \n")
+
+struct kernel_symbol {
+ int value_offset;
+ int name_offset;
+ int namespace_offset;
+};
#else
-#define __CRC_SYMBOL(sym, sec) \
- asm(" .section \"___kcrctab" sec "+" #sym "\", \"a\" \n" \
- " .weak " VMLINUX_SYMBOL_STR(__crc_##sym) " \n" \
- " .long " VMLINUX_SYMBOL_STR(__crc_##sym) " \n" \
- " .previous \n");
+#define __KSYMTAB_ENTRY(sym, sec) \
+ static const struct kernel_symbol __ksymtab_##sym \
+ __attribute__((section("___ksymtab" sec "+" #sym), used)) \
+ __aligned(sizeof(void *)) \
+ = { (unsigned long)&sym, __kstrtab_##sym, __kstrtabns_##sym }
+
+struct kernel_symbol {
+ unsigned long value;
+ const char *name;
+ const char *namespace;
+};
#endif
+
+#ifdef __GENKSYMS__
+
+#define ___EXPORT_SYMBOL(sym, sec, ns) __GENKSYMS_EXPORT_SYMBOL(sym)
+
#else
-#define __CRC_SYMBOL(sym, sec)
-#endif
-/* For every exported symbol, place a struct in the __ksymtab section */
-#define ___EXPORT_SYMBOL(sym, sec) \
- extern typeof(sym) sym; \
- __CRC_SYMBOL(sym, sec) \
- static const char __kstrtab_##sym[] \
- __attribute__((section("__ksymtab_strings"), aligned(1))) \
- = VMLINUX_SYMBOL_STR(sym); \
- static const struct kernel_symbol __ksymtab_##sym \
- __used \
- __attribute__((section("___ksymtab" sec "+" #sym), used)) \
- = { (unsigned long)&sym, __kstrtab_##sym }
+/*
+ * For every exported symbol, do the following:
+ *
+ * - Put the name of the symbol and namespace (empty string "" for none) in
+ * __ksymtab_strings.
+ * - Place a struct kernel_symbol entry in the __ksymtab section.
+ *
+ * note on .section use: we specify progbits since usage of the "M" (SHF_MERGE)
+ * section flag requires it. Use '%progbits' instead of '@progbits' since the
+ * former apparently works on all arches according to the binutils source.
+ */
+#define ___EXPORT_SYMBOL(sym, sec, ns) \
+ extern typeof(sym) sym; \
+ extern const char __kstrtab_##sym[]; \
+ extern const char __kstrtabns_##sym[]; \
+ asm(" .section \"__ksymtab_strings\",\"aMS\",%progbits,1 \n" \
+ "__kstrtab_" #sym ": \n" \
+ " .asciz \"" #sym "\" \n" \
+ "__kstrtabns_" #sym ": \n" \
+ " .asciz \"" ns "\" \n" \
+ " .previous \n"); \
+ __KSYMTAB_ENTRY(sym, sec)
-#if defined(__KSYM_DEPS__)
+#endif
+
+#if !defined(CONFIG_MODULES) || defined(__DISABLE_EXPORTS)
/*
- * For fine grained build dependencies, we want to tell the build system
- * about each possible exported symbol even if they're not actually exported.
- * We use a string pattern that is unlikely to be valid code that the build
- * system filters out from the preprocessor output (see ksym_dep_filter
- * in scripts/Kbuild.include).
+ * Allow symbol exports to be disabled completely so that C code may
+ * be reused in other execution contexts such as the UEFI stub or the
+ * decompressor.
*/
-#define __EXPORT_SYMBOL(sym, sec) === __KSYM_##sym ===
+#define __EXPORT_SYMBOL(sym, sec, ns)
#elif defined(CONFIG_TRIM_UNUSED_KSYMS)
#include <generated/autoksyms.h>
-#define __EXPORT_SYMBOL(sym, sec) \
- __cond_export_sym(sym, sec, __is_defined(__KSYM_##sym))
-#define __cond_export_sym(sym, sec, conf) \
- ___cond_export_sym(sym, sec, conf)
-#define ___cond_export_sym(sym, sec, enabled) \
- __cond_export_sym_##enabled(sym, sec)
-#define __cond_export_sym_1(sym, sec) ___EXPORT_SYMBOL(sym, sec)
-#define __cond_export_sym_0(sym, sec) /* nothing */
-
+/*
+ * For fine grained build dependencies, we want to tell the build system
+ * about each possible exported symbol even if they're not actually exported.
+ * We use a symbol pattern __ksym_marker_<symbol> that the build system filters
+ * from the $(NM) output (see scripts/gen_ksymdeps.sh). These symbols are
+ * discarded in the final link stage.
+ */
+#define __ksym_marker(sym) \
+ static int __ksym_marker_##sym[0] __section(".discard.ksym") __used
+
+#define __EXPORT_SYMBOL(sym, sec, ns) \
+ __ksym_marker(sym); \
+ __cond_export_sym(sym, sec, ns, __is_defined(__KSYM_##sym))
+#define __cond_export_sym(sym, sec, ns, conf) \
+ ___cond_export_sym(sym, sec, ns, conf)
+#define ___cond_export_sym(sym, sec, ns, enabled) \
+ __cond_export_sym_##enabled(sym, sec, ns)
+#define __cond_export_sym_1(sym, sec, ns) ___EXPORT_SYMBOL(sym, sec, ns)
+
+#ifdef __GENKSYMS__
+#define __cond_export_sym_0(sym, sec, ns) __GENKSYMS_EXPORT_SYMBOL(sym)
#else
-#define __EXPORT_SYMBOL ___EXPORT_SYMBOL
+#define __cond_export_sym_0(sym, sec, ns) /* nothing */
#endif
-#define EXPORT_SYMBOL(sym) \
- __EXPORT_SYMBOL(sym, "")
+#else
-#define EXPORT_SYMBOL_GPL(sym) \
- __EXPORT_SYMBOL(sym, "_gpl")
+#define __EXPORT_SYMBOL(sym, sec, ns) ___EXPORT_SYMBOL(sym, sec, ns)
-#define EXPORT_SYMBOL_GPL_FUTURE(sym) \
- __EXPORT_SYMBOL(sym, "_gpl_future")
+#endif /* CONFIG_MODULES */
-#ifdef CONFIG_UNUSED_SYMBOLS
-#define EXPORT_UNUSED_SYMBOL(sym) __EXPORT_SYMBOL(sym, "_unused")
-#define EXPORT_UNUSED_SYMBOL_GPL(sym) __EXPORT_SYMBOL(sym, "_unused_gpl")
+#ifdef DEFAULT_SYMBOL_NAMESPACE
+#define _EXPORT_SYMBOL(sym, sec) __EXPORT_SYMBOL(sym, sec, __stringify(DEFAULT_SYMBOL_NAMESPACE))
#else
-#define EXPORT_UNUSED_SYMBOL(sym)
-#define EXPORT_UNUSED_SYMBOL_GPL(sym)
+#define _EXPORT_SYMBOL(sym, sec) __EXPORT_SYMBOL(sym, sec, "")
#endif
-#endif /* __GENKSYMS__ */
-
-#else /* !CONFIG_MODULES... */
+#define EXPORT_SYMBOL(sym) _EXPORT_SYMBOL(sym, "")
+#define EXPORT_SYMBOL_GPL(sym) _EXPORT_SYMBOL(sym, "_gpl")
+#define EXPORT_SYMBOL_NS(sym, ns) __EXPORT_SYMBOL(sym, "", __stringify(ns))
+#define EXPORT_SYMBOL_NS_GPL(sym, ns) __EXPORT_SYMBOL(sym, "_gpl", __stringify(ns))
-#define EXPORT_SYMBOL(sym)
-#define EXPORT_SYMBOL_GPL(sym)
-#define EXPORT_SYMBOL_GPL_FUTURE(sym)
-#define EXPORT_UNUSED_SYMBOL(sym)
-#define EXPORT_UNUSED_SYMBOL_GPL(sym)
-
-#endif /* CONFIG_MODULES */
#endif /* !__ASSEMBLY__ */
#endif /* _LINUX_EXPORT_H */
diff --git a/include/linux/exportfs.h b/include/linux/exportfs.h
index 5ab958cdc50b..9edb29101ec8 100644
--- a/include/linux/exportfs.h
+++ b/include/linux/exportfs.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef LINUX_EXPORTFS_H
#define LINUX_EXPORTFS_H 1
@@ -104,6 +105,11 @@ enum fid_type {
FILEID_LUSTRE = 0x97,
/*
+ * 64 bit unique kernfs id
+ */
+ FILEID_KERNFS = 0xfe,
+
+ /*
* Filesystems must not use 0xff file ID.
*/
FILEID_INVALID = 0xff,
@@ -125,7 +131,7 @@ struct fid {
u32 parent_block;
u32 parent_generation;
} udf;
- __u32 raw[0];
+ DECLARE_FLEX_ARRAY(__u32, raw);
};
};
@@ -138,7 +144,7 @@ struct fid {
* @get_parent: find the parent of a given directory
* @commit_metadata: commit metadata changes to stable storage
*
- * See Documentation/filesystems/nfs/Exporting for details on how to use
+ * See Documentation/filesystems/nfs/exporting.rst for details on how to use
* this interface correctly.
*
* encode_fh:
@@ -172,7 +178,7 @@ struct fid {
* get_name:
* @get_name should find a name for the given @child in the given @parent
* directory. The name should be stored in the @name (with the
- * understanding that it is already pointing to a a %NAME_MAX+1 sized
+ * understanding that it is already pointing to a %NAME_MAX+1 sized
* buffer. get_name() should return %0 on success, a negative error code
* or error. @get_name will be called without @parent->i_mutex held.
*
@@ -207,12 +213,26 @@ struct export_operations {
bool write, u32 *device_generation);
int (*commit_blocks)(struct inode *inode, struct iomap *iomaps,
int nr_iomaps, struct iattr *iattr);
+#define EXPORT_OP_NOWCC (0x1) /* don't collect v3 wcc data */
+#define EXPORT_OP_NOSUBTREECHK (0x2) /* no subtree checking */
+#define EXPORT_OP_CLOSE_BEFORE_UNLINK (0x4) /* close files before unlink */
+#define EXPORT_OP_REMOTE_FS (0x8) /* Filesystem is remote */
+#define EXPORT_OP_NOATOMIC_ATTR (0x10) /* Filesystem cannot supply
+ atomic attribute updates
+ */
+#define EXPORT_OP_FLUSH_ON_CLOSE (0x20) /* fs flushes file data on close */
+ unsigned long flags;
};
extern int exportfs_encode_inode_fh(struct inode *inode, struct fid *fid,
int *max_len, struct inode *parent);
extern int exportfs_encode_fh(struct dentry *dentry, struct fid *fid,
int *max_len, int connectable);
+extern struct dentry *exportfs_decode_fh_raw(struct vfsmount *mnt,
+ struct fid *fid, int fh_len,
+ int fileid_type,
+ int (*acceptable)(void *, struct dentry *),
+ void *context);
extern struct dentry *exportfs_decode_fh(struct vfsmount *mnt, struct fid *fid,
int fh_len, int fileid_type, int (*acceptable)(void *, struct dentry *),
void *context);
diff --git a/include/linux/ext2_fs.h b/include/linux/ext2_fs.h
index 2723e715f67a..1fef88569037 100644
--- a/include/linux/ext2_fs.h
+++ b/include/linux/ext2_fs.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* linux/include/linux/ext2_fs.h
*
diff --git a/include/linux/extable.h b/include/linux/extable.h
index 28addad0dda7..4ab9e78f313b 100644
--- a/include/linux/extable.h
+++ b/include/linux/extable.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_EXTABLE_H
#define _LINUX_EXTABLE_H
@@ -18,6 +19,8 @@ void trim_init_extable(struct module *m);
/* Given an address, look for it in the exception tables */
const struct exception_table_entry *search_exception_tables(unsigned long add);
+const struct exception_table_entry *
+search_kernel_exception_table(unsigned long addr);
#ifdef CONFIG_MODULES
/* For extable.c to search modules' exception tables. */
@@ -30,4 +33,14 @@ search_module_extables(unsigned long addr)
}
#endif /*CONFIG_MODULES*/
+#ifdef CONFIG_BPF_JIT
+const struct exception_table_entry *search_bpf_extables(unsigned long addr);
+#else
+static inline const struct exception_table_entry *
+search_bpf_extables(unsigned long addr)
+{
+ return NULL;
+}
+#endif
+
#endif /* _LINUX_EXTABLE_H */
diff --git a/include/linux/extcon-provider.h b/include/linux/extcon-provider.h
new file mode 100644
index 000000000000..fa70945f4e6b
--- /dev/null
+++ b/include/linux/extcon-provider.h
@@ -0,0 +1,134 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * External Connector (extcon) framework
+ * - linux/include/linux/extcon-provider.h for extcon provider device driver.
+ *
+ * Copyright (C) 2017 Samsung Electronics
+ * Author: Chanwoo Choi <cw00.choi@samsung.com>
+ */
+
+#ifndef __LINUX_EXTCON_PROVIDER_H__
+#define __LINUX_EXTCON_PROVIDER_H__
+
+#include <linux/extcon.h>
+
+struct extcon_dev;
+
+#if IS_ENABLED(CONFIG_EXTCON)
+
+/* Following APIs register/unregister the extcon device. */
+int extcon_dev_register(struct extcon_dev *edev);
+void extcon_dev_unregister(struct extcon_dev *edev);
+int devm_extcon_dev_register(struct device *dev,
+ struct extcon_dev *edev);
+void devm_extcon_dev_unregister(struct device *dev,
+ struct extcon_dev *edev);
+
+/* Following APIs allocate/free the memory of the extcon device. */
+struct extcon_dev *extcon_dev_allocate(const unsigned int *cable);
+void extcon_dev_free(struct extcon_dev *edev);
+struct extcon_dev *devm_extcon_dev_allocate(struct device *dev,
+ const unsigned int *cable);
+void devm_extcon_dev_free(struct device *dev, struct extcon_dev *edev);
+
+/* Synchronize the state and property value for each external connector. */
+int extcon_sync(struct extcon_dev *edev, unsigned int id);
+
+/*
+ * Following APIs set the connected state of each external connector.
+ * The 'id' argument indicates the defined external connector.
+ */
+int extcon_set_state(struct extcon_dev *edev, unsigned int id,
+ bool state);
+int extcon_set_state_sync(struct extcon_dev *edev, unsigned int id,
+ bool state);
+
+/*
+ * Following APIs set the property of each external connector.
+ * The 'id' argument indicates the defined external connector
+ * and the 'prop' indicates the extcon property.
+ *
+ * And extcon_set_property_capability() set the capability of the property
+ * for each external connector. They are used to set the capability of the
+ * property of each external connector based on the id and property.
+ */
+int extcon_set_property(struct extcon_dev *edev, unsigned int id,
+ unsigned int prop,
+ union extcon_property_value prop_val);
+int extcon_set_property_sync(struct extcon_dev *edev, unsigned int id,
+ unsigned int prop,
+ union extcon_property_value prop_val);
+int extcon_set_property_capability(struct extcon_dev *edev,
+ unsigned int id, unsigned int prop);
+
+#else /* CONFIG_EXTCON */
+static inline int extcon_dev_register(struct extcon_dev *edev)
+{
+ return 0;
+}
+
+static inline void extcon_dev_unregister(struct extcon_dev *edev) { }
+
+static inline int devm_extcon_dev_register(struct device *dev,
+ struct extcon_dev *edev)
+{
+ return -EINVAL;
+}
+
+static inline void devm_extcon_dev_unregister(struct device *dev,
+ struct extcon_dev *edev) { }
+
+static inline struct extcon_dev *extcon_dev_allocate(const unsigned int *cable)
+{
+ return ERR_PTR(-ENOSYS);
+}
+
+static inline void extcon_dev_free(struct extcon_dev *edev) { }
+
+static inline struct extcon_dev *devm_extcon_dev_allocate(struct device *dev,
+ const unsigned int *cable)
+{
+ return ERR_PTR(-ENOSYS);
+}
+
+static inline void devm_extcon_dev_free(struct extcon_dev *edev) { }
+
+
+static inline int extcon_set_state(struct extcon_dev *edev, unsigned int id,
+ bool state)
+{
+ return 0;
+}
+
+static inline int extcon_set_state_sync(struct extcon_dev *edev, unsigned int id,
+ bool state)
+{
+ return 0;
+}
+
+static inline int extcon_sync(struct extcon_dev *edev, unsigned int id)
+{
+ return 0;
+}
+
+static inline int extcon_set_property(struct extcon_dev *edev, unsigned int id,
+ unsigned int prop,
+ union extcon_property_value prop_val)
+{
+ return 0;
+}
+
+static inline int extcon_set_property_sync(struct extcon_dev *edev,
+ unsigned int id, unsigned int prop,
+ union extcon_property_value prop_val)
+{
+ return 0;
+}
+
+static inline int extcon_set_property_capability(struct extcon_dev *edev,
+ unsigned int id, unsigned int prop)
+{
+ return 0;
+}
+#endif /* CONFIG_EXTCON */
+#endif /* __LINUX_EXTCON_PROVIDER_H__ */
diff --git a/include/linux/extcon.h b/include/linux/extcon.h
index 7e206a9f88db..3c45c3846fe9 100644
--- a/include/linux/extcon.h
+++ b/include/linux/extcon.h
@@ -1,5 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * External connector (extcon) class driver
+ * External Connector (extcon) framework
+ * - linux/include/linux/extcon.h for extcon consumer device driver.
*
* Copyright (C) 2015 Samsung Electronics
* Author: Chanwoo Choi <cw00.choi@samsung.com>
@@ -11,17 +13,7 @@
* based on switch class driver
* Copyright (C) 2008 Google, Inc.
* Author: Mike Lockwood <lockwood@android.com>
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
-*/
+ */
#ifndef __LINUX_EXTCON_H__
#define __LINUX_EXTCON_H__
@@ -84,6 +76,8 @@
#define EXTCON_DISP_VGA 43 /* Video Graphics Array */
#define EXTCON_DISP_DP 44 /* Display Port */
#define EXTCON_DISP_HMD 45 /* Head-Mounted Display */
+#define EXTCON_DISP_CVBS 46 /* Composite Video Broadcast Signal */
+#define EXTCON_DISP_EDP 47 /* Embedded Display Port */
/* Miscellaneous external connector */
#define EXTCON_DOCK 60
@@ -93,7 +87,7 @@
#define EXTCON_NUM 63
/*
- * Define the property of supported external connectors.
+ * Define the properties of supported external connectors.
*
* When adding the new extcon property, they *must* have
* the type/value/default information. Also, you *have to*
@@ -171,231 +165,156 @@ union extcon_property_value {
int intval; /* type : integer (intval) */
};
-struct extcon_cable;
struct extcon_dev;
#if IS_ENABLED(CONFIG_EXTCON)
-
-/*
- * Following APIs are for notifiers or configurations.
- * Notifiers are the external port and connection devices.
- */
-extern int extcon_dev_register(struct extcon_dev *edev);
-extern void extcon_dev_unregister(struct extcon_dev *edev);
-extern int devm_extcon_dev_register(struct device *dev,
- struct extcon_dev *edev);
-extern void devm_extcon_dev_unregister(struct device *dev,
- struct extcon_dev *edev);
-extern struct extcon_dev *extcon_get_extcon_dev(const char *extcon_name);
-
/*
- * Following APIs control the memory of extcon device.
+ * Following APIs get the connected state of each external connector.
+ * The 'id' argument indicates the defined external connector.
*/
-extern struct extcon_dev *extcon_dev_allocate(const unsigned int *cable);
-extern void extcon_dev_free(struct extcon_dev *edev);
-extern struct extcon_dev *devm_extcon_dev_allocate(struct device *dev,
- const unsigned int *cable);
-extern void devm_extcon_dev_free(struct device *dev, struct extcon_dev *edev);
+int extcon_get_state(struct extcon_dev *edev, unsigned int id);
/*
- * get/set_state access each bit of the 32b encoded state value.
- * They are used to access the status of each cable based on the cable id.
- */
-extern int extcon_get_state(struct extcon_dev *edev, unsigned int id);
-extern int extcon_set_state(struct extcon_dev *edev, unsigned int id,
- bool cable_state);
-extern int extcon_set_state_sync(struct extcon_dev *edev, unsigned int id,
- bool cable_state);
-/*
- * Synchronize the state and property data for a specific external connector.
- */
-extern int extcon_sync(struct extcon_dev *edev, unsigned int id);
-
-/*
- * get/set_property access the property value of each external connector.
- * They are used to access the property of each cable based on the property id.
+ * Following APIs get the property of each external connector.
+ * The 'id' argument indicates the defined external connector
+ * and the 'prop' indicates the extcon property.
+ *
+ * And extcon_get_property_capability() get the capability of the property
+ * for each external connector. They are used to get the capability of the
+ * property of each external connector based on the id and property.
*/
-extern int extcon_get_property(struct extcon_dev *edev, unsigned int id,
+int extcon_get_property(struct extcon_dev *edev, unsigned int id,
unsigned int prop,
union extcon_property_value *prop_val);
-extern int extcon_set_property(struct extcon_dev *edev, unsigned int id,
- unsigned int prop,
- union extcon_property_value prop_val);
-extern int extcon_set_property_sync(struct extcon_dev *edev, unsigned int id,
- unsigned int prop,
- union extcon_property_value prop_val);
-
-/*
- * get/set_property_capability set the capability of the property for each
- * external connector. They are used to set the capability of the property
- * of each external connector based on the id and property.
- */
-extern int extcon_get_property_capability(struct extcon_dev *edev,
- unsigned int id, unsigned int prop);
-extern int extcon_set_property_capability(struct extcon_dev *edev,
+int extcon_get_property_capability(struct extcon_dev *edev,
unsigned int id, unsigned int prop);
/*
- * Following APIs are to monitor the status change of the external connectors.
+ * Following APIs register the notifier block in order to detect
+ * the change of both state and property value for each external connector.
+ *
* extcon_register_notifier(*edev, id, *nb) : Register a notifier block
* for specific external connector of the extcon.
* extcon_register_notifier_all(*edev, *nb) : Register a notifier block
* for all supported external connectors of the extcon.
*/
-extern int extcon_register_notifier(struct extcon_dev *edev, unsigned int id,
- struct notifier_block *nb);
-extern int extcon_unregister_notifier(struct extcon_dev *edev, unsigned int id,
- struct notifier_block *nb);
-extern int devm_extcon_register_notifier(struct device *dev,
+int extcon_register_notifier(struct extcon_dev *edev, unsigned int id,
+ struct notifier_block *nb);
+int extcon_unregister_notifier(struct extcon_dev *edev, unsigned int id,
+ struct notifier_block *nb);
+int devm_extcon_register_notifier(struct device *dev,
struct extcon_dev *edev, unsigned int id,
struct notifier_block *nb);
-extern void devm_extcon_unregister_notifier(struct device *dev,
+void devm_extcon_unregister_notifier(struct device *dev,
struct extcon_dev *edev, unsigned int id,
struct notifier_block *nb);
-extern int extcon_register_notifier_all(struct extcon_dev *edev,
+int extcon_register_notifier_all(struct extcon_dev *edev,
struct notifier_block *nb);
-extern int extcon_unregister_notifier_all(struct extcon_dev *edev,
+int extcon_unregister_notifier_all(struct extcon_dev *edev,
struct notifier_block *nb);
-extern int devm_extcon_register_notifier_all(struct device *dev,
+int devm_extcon_register_notifier_all(struct device *dev,
struct extcon_dev *edev,
struct notifier_block *nb);
-extern void devm_extcon_unregister_notifier_all(struct device *dev,
+void devm_extcon_unregister_notifier_all(struct device *dev,
struct extcon_dev *edev,
struct notifier_block *nb);
/*
- * Following API get the extcon device from devicetree.
- * This function use phandle of devicetree to get extcon device directly.
+ * Following APIs get the extcon_dev from devicetree or by through extcon name.
*/
-extern struct extcon_dev *extcon_get_edev_by_phandle(struct device *dev,
+struct extcon_dev *extcon_get_extcon_dev(const char *extcon_name);
+struct extcon_dev *extcon_find_edev_by_node(struct device_node *node);
+struct extcon_dev *extcon_get_edev_by_phandle(struct device *dev,
int index);
-/* Following API to get information of extcon device */
-extern const char *extcon_get_edev_name(struct extcon_dev *edev);
-
+/* Following API get the name of extcon device. */
+const char *extcon_get_edev_name(struct extcon_dev *edev);
#else /* CONFIG_EXTCON */
-static inline int extcon_dev_register(struct extcon_dev *edev)
-{
- return 0;
-}
-
-static inline void extcon_dev_unregister(struct extcon_dev *edev) { }
-
-static inline int devm_extcon_dev_register(struct device *dev,
- struct extcon_dev *edev)
-{
- return -EINVAL;
-}
-
-static inline void devm_extcon_dev_unregister(struct device *dev,
- struct extcon_dev *edev) { }
-
-static inline struct extcon_dev *extcon_dev_allocate(const unsigned int *cable)
-{
- return ERR_PTR(-ENOSYS);
-}
-
-static inline void extcon_dev_free(struct extcon_dev *edev) { }
-
-static inline struct extcon_dev *devm_extcon_dev_allocate(struct device *dev,
- const unsigned int *cable)
-{
- return ERR_PTR(-ENOSYS);
-}
-
-static inline void devm_extcon_dev_free(struct extcon_dev *edev) { }
-
-
static inline int extcon_get_state(struct extcon_dev *edev, unsigned int id)
{
return 0;
}
-static inline int extcon_set_state(struct extcon_dev *edev, unsigned int id,
- bool cable_state)
+static inline int extcon_get_property(struct extcon_dev *edev, unsigned int id,
+ unsigned int prop,
+ union extcon_property_value *prop_val)
{
return 0;
}
-static inline int extcon_set_state_sync(struct extcon_dev *edev, unsigned int id,
- bool cable_state)
+static inline int extcon_get_property_capability(struct extcon_dev *edev,
+ unsigned int id, unsigned int prop)
{
return 0;
}
-static inline int extcon_sync(struct extcon_dev *edev, unsigned int id)
+static inline int extcon_register_notifier(struct extcon_dev *edev,
+ unsigned int id, struct notifier_block *nb)
{
return 0;
}
-static inline int extcon_get_property(struct extcon_dev *edev, unsigned int id,
- unsigned int prop,
- union extcon_property_value *prop_val)
+static inline int extcon_unregister_notifier(struct extcon_dev *edev,
+ unsigned int id, struct notifier_block *nb)
{
return 0;
}
-static inline int extcon_set_property(struct extcon_dev *edev, unsigned int id,
- unsigned int prop,
- union extcon_property_value prop_val)
+
+static inline int devm_extcon_register_notifier(struct device *dev,
+ struct extcon_dev *edev, unsigned int id,
+ struct notifier_block *nb)
{
- return 0;
+ return -ENOSYS;
}
-static inline int extcon_set_property_sync(struct extcon_dev *edev,
- unsigned int id, unsigned int prop,
- union extcon_property_value prop_val)
+static inline void devm_extcon_unregister_notifier(struct device *dev,
+ struct extcon_dev *edev, unsigned int id,
+ struct notifier_block *nb) { }
+
+static inline int extcon_register_notifier_all(struct extcon_dev *edev,
+ struct notifier_block *nb)
{
return 0;
}
-static inline int extcon_get_property_capability(struct extcon_dev *edev,
- unsigned int id, unsigned int prop)
+static inline int extcon_unregister_notifier_all(struct extcon_dev *edev,
+ struct notifier_block *nb)
{
return 0;
}
-static inline int extcon_set_property_capability(struct extcon_dev *edev,
- unsigned int id, unsigned int prop)
+static inline int devm_extcon_register_notifier_all(struct device *dev,
+ struct extcon_dev *edev,
+ struct notifier_block *nb)
{
return 0;
}
+static inline void devm_extcon_unregister_notifier_all(struct device *dev,
+ struct extcon_dev *edev,
+ struct notifier_block *nb) { }
+
static inline struct extcon_dev *extcon_get_extcon_dev(const char *extcon_name)
{
return NULL;
}
-static inline int extcon_register_notifier(struct extcon_dev *edev,
- unsigned int id,
- struct notifier_block *nb)
-{
- return 0;
-}
-
-static inline int extcon_unregister_notifier(struct extcon_dev *edev,
- unsigned int id,
- struct notifier_block *nb)
+static inline struct extcon_dev *extcon_find_edev_by_node(struct device_node *node)
{
- return 0;
+ return ERR_PTR(-ENODEV);
}
-static inline int devm_extcon_register_notifier(struct device *dev,
- struct extcon_dev *edev, unsigned int id,
- struct notifier_block *nb)
+static inline struct extcon_dev *extcon_get_edev_by_phandle(struct device *dev,
+ int index)
{
- return -ENOSYS;
+ return ERR_PTR(-ENODEV);
}
-static inline void devm_extcon_unregister_notifier(struct device *dev,
- struct extcon_dev *edev, unsigned int id,
- struct notifier_block *nb) { }
-
-static inline struct extcon_dev *extcon_get_edev_by_phandle(struct device *dev,
- int index)
+static inline const char *extcon_get_edev_name(struct extcon_dev *edev)
{
- return ERR_PTR(-ENODEV);
+ return NULL;
}
#endif /* CONFIG_EXTCON */
@@ -411,26 +330,14 @@ struct extcon_specific_cable_nb {
};
static inline int extcon_register_interest(struct extcon_specific_cable_nb *obj,
- const char *extcon_name, const char *cable_name,
- struct notifier_block *nb)
+ const char *extcon_name, const char *cable_name,
+ struct notifier_block *nb)
{
return -EINVAL;
}
-static inline int extcon_unregister_interest(struct extcon_specific_cable_nb
- *obj)
+static inline int extcon_unregister_interest(struct extcon_specific_cable_nb *obj)
{
return -EINVAL;
}
-
-static inline int extcon_get_cable_state_(struct extcon_dev *edev, unsigned int id)
-{
- return extcon_get_state(edev, id);
-}
-
-static inline int extcon_set_cable_state_(struct extcon_dev *edev, unsigned int id,
- bool cable_state)
-{
- return extcon_set_state_sync(edev, id, cable_state);
-}
#endif /* __LINUX_EXTCON_H__ */
diff --git a/include/linux/extcon/extcon-adc-jack.h b/include/linux/extcon/extcon-adc-jack.h
index 2aa32075bca1..19b437e9c080 100644
--- a/include/linux/extcon/extcon-adc-jack.h
+++ b/include/linux/extcon/extcon-adc-jack.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* include/linux/extcon/extcon-adc-jack.h
*
@@ -5,11 +6,6 @@
*
* Copyright (C) 2012 Samsung Electronics
* MyungJoo Ham <myungjoo.ham@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
*/
#ifndef _EXTCON_ADC_JACK_H_
diff --git a/include/linux/extcon/extcon-gpio.h b/include/linux/extcon/extcon-gpio.h
deleted file mode 100644
index 7cacafb78b09..000000000000
--- a/include/linux/extcon/extcon-gpio.h
+++ /dev/null
@@ -1,47 +0,0 @@
-/*
- * Single-state GPIO extcon driver based on extcon class
- *
- * Copyright (C) 2012 Samsung Electronics
- * Author: MyungJoo Ham <myungjoo.ham@samsung.com>
- *
- * based on switch class driver
- * Copyright (C) 2008 Google, Inc.
- * Author: Mike Lockwood <lockwood@android.com>
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-#ifndef __EXTCON_GPIO_H__
-#define __EXTCON_GPIO_H__ __FILE__
-
-#include <linux/extcon.h>
-
-/**
- * struct gpio_extcon_pdata - A simple GPIO-controlled extcon device.
- * @extcon_id: The unique id of specific external connector.
- * @gpio: Corresponding GPIO.
- * @gpio_active_low: Boolean describing whether gpio active state is 1 or 0
- * If true, low state of gpio means active.
- * If false, high state of gpio means active.
- * @debounce: Debounce time for GPIO IRQ in ms.
- * @irq_flags: IRQ Flags (e.g., IRQF_TRIGGER_LOW).
- * @check_on_resume: Boolean describing whether to check the state of gpio
- * while resuming from sleep.
- */
-struct gpio_extcon_pdata {
- unsigned int extcon_id;
- unsigned gpio;
- bool gpio_active_low;
- unsigned long debounce;
- unsigned long irq_flags;
-
- bool check_on_resume;
-};
-
-#endif /* __EXTCON_GPIO_H__ */
diff --git a/include/linux/f2fs_fs.h b/include/linux/f2fs_fs.h
index b6feed6547ce..1d6402529d10 100644
--- a/include/linux/f2fs_fs.h
+++ b/include/linux/f2fs_fs.h
@@ -1,12 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/**
* include/linux/f2fs_fs.h
*
* Copyright (c) 2012 Samsung Electronics Co., Ltd.
* http://www.samsung.com/
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef _LINUX_F2FS_FS_H
#define _LINUX_F2FS_FS_H
@@ -21,10 +18,12 @@
#define F2FS_BLKSIZE 4096 /* support only 4KB block */
#define F2FS_BLKSIZE_BITS 12 /* bits for F2FS_BLKSIZE */
#define F2FS_MAX_EXTENSION 64 /* # of extension entries */
+#define F2FS_EXTENSION_LEN 8 /* max size of extension */
#define F2FS_BLK_ALIGN(x) (((x) + F2FS_BLKSIZE - 1) >> F2FS_BLKSIZE_BITS)
#define NULL_ADDR ((block_t)0) /* used as block_t addresses */
#define NEW_ADDR ((block_t)-1) /* used as block_t addresses */
+#define COMPRESS_ADDR ((block_t)-2) /* used as compressed data flag */
#define F2FS_BYTES_TO_BLK(bytes) ((bytes) >> F2FS_BLKSIZE_BITS)
#define F2FS_BLK_TO_BYTES(blk) ((blk) << F2FS_BLKSIZE_BITS)
@@ -35,16 +34,20 @@
#define F2FS_ROOT_INO(sbi) ((sbi)->root_ino_num)
#define F2FS_NODE_INO(sbi) ((sbi)->node_ino_num)
#define F2FS_META_INO(sbi) ((sbi)->meta_ino_num)
+#define F2FS_COMPRESS_INO(sbi) (NM_I(sbi)->max_nid)
+
+#define F2FS_MAX_QUOTAS 3
+
+#define F2FS_ENC_UTF8_12_1 1
-#define F2FS_IO_SIZE(sbi) (1 << (sbi)->write_io_size_bits) /* Blocks */
-#define F2FS_IO_SIZE_KB(sbi) (1 << ((sbi)->write_io_size_bits + 2)) /* KB */
-#define F2FS_IO_SIZE_BYTES(sbi) (1 << ((sbi)->write_io_size_bits + 12)) /* B */
-#define F2FS_IO_SIZE_BITS(sbi) ((sbi)->write_io_size_bits) /* power of 2 */
+#define F2FS_IO_SIZE(sbi) BIT(F2FS_OPTION(sbi).write_io_size_bits) /* Blocks */
+#define F2FS_IO_SIZE_KB(sbi) BIT(F2FS_OPTION(sbi).write_io_size_bits + 2) /* KB */
+#define F2FS_IO_SIZE_BITS(sbi) (F2FS_OPTION(sbi).write_io_size_bits) /* power of 2 */
#define F2FS_IO_SIZE_MASK(sbi) (F2FS_IO_SIZE(sbi) - 1)
+#define F2FS_IO_ALIGNED(sbi) (F2FS_IO_SIZE(sbi) > 1)
/* This flag is used by node and meta inodes, and by recovery */
#define GFP_F2FS_ZERO (GFP_NOFS | __GFP_ZERO)
-#define GFP_F2FS_HIGH_ZERO (GFP_NOFS | __GFP_ZERO | __GFP_HIGHMEM)
/*
* For further optimization on multi-head logs, on-disk layout supports maximum
@@ -69,6 +72,42 @@ struct f2fs_device {
__le32 total_segments;
} __packed;
+/* reason of stop_checkpoint */
+enum stop_cp_reason {
+ STOP_CP_REASON_SHUTDOWN,
+ STOP_CP_REASON_FAULT_INJECT,
+ STOP_CP_REASON_META_PAGE,
+ STOP_CP_REASON_WRITE_FAIL,
+ STOP_CP_REASON_CORRUPTED_SUMMARY,
+ STOP_CP_REASON_UPDATE_INODE,
+ STOP_CP_REASON_FLUSH_FAIL,
+ STOP_CP_REASON_MAX,
+};
+
+#define MAX_STOP_REASON 32
+
+/* detail reason for EFSCORRUPTED */
+enum f2fs_error {
+ ERROR_CORRUPTED_CLUSTER,
+ ERROR_FAIL_DECOMPRESSION,
+ ERROR_INVALID_BLKADDR,
+ ERROR_CORRUPTED_DIRENT,
+ ERROR_CORRUPTED_INODE,
+ ERROR_INCONSISTENT_SUMMARY,
+ ERROR_INCONSISTENT_FOOTER,
+ ERROR_INCONSISTENT_SUM_TYPE,
+ ERROR_CORRUPTED_JOURNAL,
+ ERROR_INCONSISTENT_NODE_COUNT,
+ ERROR_INCONSISTENT_BLOCK_COUNT,
+ ERROR_INVALID_CURSEG,
+ ERROR_INCONSISTENT_SIT,
+ ERROR_CORRUPTED_VERITY_XATTR,
+ ERROR_CORRUPTED_XATTR,
+ ERROR_MAX,
+};
+
+#define MAX_F2FS_ERRORS 16
+
struct f2fs_super_block {
__le32 magic; /* Magic Number */
__le16 major_ver; /* Major Version */
@@ -100,7 +139,7 @@ struct f2fs_super_block {
__u8 uuid[16]; /* 128-bit uuid for volume */
__le16 volume_name[MAX_VOLUME_NAME]; /* volume name */
__le32 extension_count; /* # of extensions below */
- __u8 extension_list[F2FS_MAX_EXTENSION][8]; /* extension array */
+ __u8 extension_list[F2FS_MAX_EXTENSION][F2FS_EXTENSION_LEN];/* extension array */
__le32 cp_payload;
__u8 version[VERSION_LEN]; /* the kernel version */
__u8 init_version[VERSION_LEN]; /* the initial kernel version */
@@ -108,12 +147,25 @@ struct f2fs_super_block {
__u8 encryption_level; /* versioning level for encryption */
__u8 encrypt_pw_salt[16]; /* Salt used for string2key algorithm */
struct f2fs_device devs[MAX_DEVICES]; /* device list */
- __u8 reserved[327]; /* valid reserved region */
+ __le32 qf_ino[F2FS_MAX_QUOTAS]; /* quota inode numbers */
+ __u8 hot_ext_count; /* # of hot file extension */
+ __le16 s_encoding; /* Filename charset encoding */
+ __le16 s_encoding_flags; /* Filename charset encoding flags */
+ __u8 s_stop_reason[MAX_STOP_REASON]; /* stop checkpoint reason */
+ __u8 s_errors[MAX_F2FS_ERRORS]; /* reason of image corrupts */
+ __u8 reserved[258]; /* valid reserved region */
+ __le32 crc; /* checksum of superblock */
} __packed;
/*
* For checkpoint
*/
+#define CP_RESIZEFS_FLAG 0x00004000
+#define CP_DISABLED_QUICK_FLAG 0x00002000
+#define CP_DISABLED_FLAG 0x00001000
+#define CP_QUOTA_NEED_FSCK_FLAG 0x00000800
+#define CP_LARGE_NAT_BITMAP_FLAG 0x00000400
+#define CP_NOCRC_RECOVERY_FLAG 0x00000200
#define CP_TRIMMED_FLAG 0x00000100
#define CP_NAT_BITS_FLAG 0x00000080
#define CP_CRC_RECOVERY_FLAG 0x00000040
@@ -154,9 +206,13 @@ struct f2fs_checkpoint {
unsigned char alloc_type[MAX_ACTIVE_LOGS];
/* SIT and NAT version bitmap */
- unsigned char sit_nat_version_bitmap[1];
+ unsigned char sit_nat_version_bitmap[];
} __packed;
+#define CP_CHKSUM_OFFSET 4092 /* default chksum offset in checkpoint */
+#define CP_MIN_CHKSUM_OFFSET \
+ (offsetof(struct f2fs_checkpoint, sit_nat_version_bitmap))
+
/*
* For orphan inode management
*/
@@ -180,19 +236,23 @@ struct f2fs_orphan_block {
struct f2fs_extent {
__le32 fofs; /* start file offset of the extent */
__le32 blk; /* start block address of the extent */
- __le32 len; /* lengh of the extent */
+ __le32 len; /* length of the extent */
} __packed;
#define F2FS_NAME_LEN 255
-#define F2FS_INLINE_XATTR_ADDRS 50 /* 200 bytes for inline xattrs */
+/* 200 bytes for inline xattrs by default */
+#define DEFAULT_INLINE_XATTR_ADDRS 50
#define DEF_ADDRS_PER_INODE 923 /* Address Pointers in an Inode */
+#define CUR_ADDRS_PER_INODE(inode) (DEF_ADDRS_PER_INODE - \
+ get_extra_isize(inode))
#define DEF_NIDS_PER_INODE 5 /* Node IDs in an Inode */
#define ADDRS_PER_INODE(inode) addrs_per_inode(inode)
-#define ADDRS_PER_BLOCK 1018 /* Address Pointers in a Direct Block */
+#define DEF_ADDRS_PER_BLOCK 1018 /* Address Pointers in a Direct Block */
+#define ADDRS_PER_BLOCK(inode) addrs_per_block(inode)
#define NIDS_PER_BLOCK 1018 /* Node IDs in an Indirect Block */
#define ADDRS_PER_PAGE(page, inode) \
- (IS_INODE(page) ? ADDRS_PER_INODE(inode) : ADDRS_PER_BLOCK)
+ (IS_INODE(page) ? ADDRS_PER_INODE(inode) : ADDRS_PER_BLOCK(inode))
#define NODE_DIR1_BLOCK (DEF_ADDRS_PER_INODE + 1)
#define NODE_DIR2_BLOCK (DEF_ADDRS_PER_INODE + 2)
@@ -205,9 +265,9 @@ struct f2fs_extent {
#define F2FS_INLINE_DENTRY 0x04 /* file inline dentry flag */
#define F2FS_DATA_EXIST 0x08 /* file inline data exist flag */
#define F2FS_INLINE_DOTS 0x10 /* file having implicit dot dentries */
-
-#define MAX_INLINE_DATA (sizeof(__le32) * (DEF_ADDRS_PER_INODE - \
- F2FS_INLINE_XATTR_ADDRS - 1))
+#define F2FS_EXTRA_ATTR 0x20 /* file having extra attribute */
+#define F2FS_PIN_FILE 0x40 /* file should not be gced */
+#define F2FS_COMPRESS_RELEASED 0x80 /* file released compressed blocks */
struct f2fs_inode {
__le16 i_mode; /* file mode */
@@ -225,7 +285,13 @@ struct f2fs_inode {
__le32 i_ctime_nsec; /* change time in nano scale */
__le32 i_mtime_nsec; /* modification time in nano scale */
__le32 i_generation; /* file version (for NFS) */
- __le32 i_current_depth; /* only for directory depth */
+ union {
+ __le32 i_current_depth; /* only for directory depth */
+ __le16 i_gc_failures; /*
+ * # of gc failures on pinned file.
+ * only for regular files.
+ */
+ };
__le32 i_xattr_nid; /* nid to save xattr */
__le32 i_flags; /* file attributes */
__le32 i_pino; /* parent inode number */
@@ -235,14 +301,31 @@ struct f2fs_inode {
struct f2fs_extent i_ext; /* caching a largest extent */
- __le32 i_addr[DEF_ADDRS_PER_INODE]; /* Pointers to data blocks */
-
+ union {
+ struct {
+ __le16 i_extra_isize; /* extra inode attribute size */
+ __le16 i_inline_xattr_size; /* inline xattr size, unit: 4 bytes */
+ __le32 i_projid; /* project id */
+ __le32 i_inode_checksum;/* inode meta checksum */
+ __le64 i_crtime; /* creation time */
+ __le32 i_crtime_nsec; /* creation time in nano scale */
+ __le64 i_compr_blocks; /* # of compressed blocks */
+ __u8 i_compress_algorithm; /* compress algorithm */
+ __u8 i_log_cluster_size; /* log of cluster size */
+ __le16 i_compress_flag; /* compress flag */
+ /* 0 bit: chksum flag
+ * [8,15] bits: compress level
+ */
+ __le32 i_extra_end[0]; /* for attribute size calculation */
+ } __packed;
+ __le32 i_addr[DEF_ADDRS_PER_INODE]; /* Pointers to data blocks */
+ };
__le32 i_nid[DEF_NIDS_PER_INODE]; /* direct(2), indirect(2),
double_indirect(1) node id */
} __packed;
struct direct_node {
- __le32 addr[ADDRS_PER_BLOCK]; /* array of data block address */
+ __le32 addr[DEF_ADDRS_PER_BLOCK]; /* array of data block address */
} __packed;
struct indirect_node {
@@ -256,11 +339,11 @@ enum {
OFFSET_BIT_SHIFT
};
-#define OFFSET_BIT_MASK (0x07) /* (0x01 << OFFSET_BIT_SHIFT) - 1 */
+#define OFFSET_BIT_MASK GENMASK(OFFSET_BIT_SHIFT - 1, 0)
struct node_footer {
__le32 nid; /* node id */
- __le32 ino; /* inode nunmber */
+ __le32 ino; /* inode number */
__le32 flag; /* include cold/fsync/dentry marks and offset */
__le64 cp_ver; /* checkpoint version */
__le32 next_blkaddr; /* next node page block address */
@@ -280,7 +363,6 @@ struct f2fs_node {
* For NAT entries
*/
#define NAT_ENTRY_PER_BLOCK (PAGE_SIZE / sizeof(struct f2fs_nat_entry))
-#define NAT_ENTRY_BITMAP_SIZE ((NAT_ENTRY_PER_BLOCK + 7) / 8)
struct f2fs_nat_entry {
__u8 version; /* latest version of cached nat entry */
@@ -462,16 +544,16 @@ typedef __le32 f2fs_hash_t;
#define MAX_DIR_HASH_DEPTH 63
/* MAX buckets in one level of dir */
-#define MAX_DIR_BUCKETS (1 << ((MAX_DIR_HASH_DEPTH / 2) - 1))
+#define MAX_DIR_BUCKETS BIT((MAX_DIR_HASH_DEPTH / 2) - 1)
/*
- * space utilization of regular dentry and inline dentry
- * regular dentry inline dentry
- * bitmap 1 * 27 = 27 1 * 23 = 23
- * reserved 1 * 3 = 3 1 * 7 = 7
- * dentry 11 * 214 = 2354 11 * 182 = 2002
- * filename 8 * 214 = 1712 8 * 182 = 1456
- * total 4096 3488
+ * space utilization of regular dentry and inline dentry (w/o extra reservation)
+ * regular dentry inline dentry (def) inline dentry (min)
+ * bitmap 1 * 27 = 27 1 * 23 = 23 1 * 1 = 1
+ * reserved 1 * 3 = 3 1 * 7 = 7 1 * 1 = 1
+ * dentry 11 * 214 = 2354 11 * 182 = 2002 11 * 2 = 22
+ * filename 8 * 214 = 1712 8 * 182 = 1456 8 * 2 = 16
+ * total 4096 3488 40
*
* Note: there are more reserved space in inline dentry than in regular
* dentry, when converting inline dentry we should handle this carefully.
@@ -483,12 +565,13 @@ typedef __le32 f2fs_hash_t;
#define SIZE_OF_RESERVED (PAGE_SIZE - ((SIZE_OF_DIR_ENTRY + \
F2FS_SLOT_LEN) * \
NR_DENTRY_IN_BLOCK + SIZE_OF_DENTRY_BITMAP))
+#define MIN_INLINE_DENTRY_SIZE 40 /* just include '.' and '..' entries */
/* One directory entry slot representing F2FS_SLOT_LEN-sized file name */
struct f2fs_dir_entry {
__le32 hash_code; /* hash code of file name */
__le32 ino; /* inode number */
- __le16 name_len; /* lengh of file name */
+ __le16 name_len; /* length of file name */
__u8 file_type; /* file type */
} __packed;
@@ -501,37 +584,6 @@ struct f2fs_dentry_block {
__u8 filename[NR_DENTRY_IN_BLOCK][F2FS_SLOT_LEN];
} __packed;
-/* for inline dir */
-#define NR_INLINE_DENTRY (MAX_INLINE_DATA * BITS_PER_BYTE / \
- ((SIZE_OF_DIR_ENTRY + F2FS_SLOT_LEN) * \
- BITS_PER_BYTE + 1))
-#define INLINE_DENTRY_BITMAP_SIZE ((NR_INLINE_DENTRY + \
- BITS_PER_BYTE - 1) / BITS_PER_BYTE)
-#define INLINE_RESERVED_SIZE (MAX_INLINE_DATA - \
- ((SIZE_OF_DIR_ENTRY + F2FS_SLOT_LEN) * \
- NR_INLINE_DENTRY + INLINE_DENTRY_BITMAP_SIZE))
-
-/* inline directory entry structure */
-struct f2fs_inline_dentry {
- __u8 dentry_bitmap[INLINE_DENTRY_BITMAP_SIZE];
- __u8 reserved[INLINE_RESERVED_SIZE];
- struct f2fs_dir_entry dentry[NR_INLINE_DENTRY];
- __u8 filename[NR_INLINE_DENTRY][F2FS_SLOT_LEN];
-} __packed;
-
-/* file types used in inode_info->flags */
-enum {
- F2FS_FT_UNKNOWN,
- F2FS_FT_REG_FILE,
- F2FS_FT_DIR,
- F2FS_FT_CHRDEV,
- F2FS_FT_BLKDEV,
- F2FS_FT_FIFO,
- F2FS_FT_SOCK,
- F2FS_FT_SYMLINK,
- F2FS_FT_MAX
-};
-
-#define S_SHIFT 12
+#define F2FS_DEF_PROJID 0 /* default project ID */
#endif /* _LINUX_F2FS_FS_H */
diff --git a/include/linux/falloc.h b/include/linux/falloc.h
index 7494dc67c66f..f3f0b97b1675 100644
--- a/include/linux/falloc.h
+++ b/include/linux/falloc.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _FALLOC_H_
#define _FALLOC_H_
@@ -19,7 +20,10 @@ struct space_resv {
};
#define FS_IOC_RESVSP _IOW('X', 40, struct space_resv)
+#define FS_IOC_UNRESVSP _IOW('X', 41, struct space_resv)
#define FS_IOC_RESVSP64 _IOW('X', 42, struct space_resv)
+#define FS_IOC_UNRESVSP64 _IOW('X', 43, struct space_resv)
+#define FS_IOC_ZERO_RANGE _IOW('X', 57, struct space_resv)
#define FALLOC_FL_SUPPORTED_MASK (FALLOC_FL_KEEP_SIZE | \
FALLOC_FL_PUNCH_HOLE | \
@@ -28,4 +32,25 @@ struct space_resv {
FALLOC_FL_INSERT_RANGE | \
FALLOC_FL_UNSHARE_RANGE)
+/* on ia32 l_start is on a 32-bit boundary */
+#if defined(CONFIG_X86_64)
+struct space_resv_32 {
+ __s16 l_type;
+ __s16 l_whence;
+ __s64 l_start __attribute__((packed));
+ /* len == 0 means until end of file */
+ __s64 l_len __attribute__((packed));
+ __s32 l_sysid;
+ __u32 l_pid;
+ __s32 l_pad[4]; /* reserve area */
+};
+
+#define FS_IOC_RESVSP_32 _IOW ('X', 40, struct space_resv_32)
+#define FS_IOC_UNRESVSP_32 _IOW ('X', 41, struct space_resv_32)
+#define FS_IOC_RESVSP64_32 _IOW ('X', 42, struct space_resv_32)
+#define FS_IOC_UNRESVSP64_32 _IOW ('X', 43, struct space_resv_32)
+#define FS_IOC_ZERO_RANGE_32 _IOW ('X', 57, struct space_resv_32)
+
+#endif
+
#endif /* _FALLOC_H_ */
diff --git a/include/linux/fanotify.h b/include/linux/fanotify.h
index cef93ddcc5a0..4f1c4f603118 100644
--- a/include/linux/fanotify.h
+++ b/include/linux/fanotify.h
@@ -1,8 +1,138 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_FANOTIFY_H
#define _LINUX_FANOTIFY_H
+#include <linux/sysctl.h>
#include <uapi/linux/fanotify.h>
-/* not valid from userspace, only kernel internal */
-#define FAN_MARK_ONDIR 0x00000100
+#define FAN_GROUP_FLAG(group, flag) \
+ ((group)->fanotify_data.flags & (flag))
+
+/*
+ * Flags allowed to be passed from/to userspace.
+ *
+ * We intentionally do not add new bits to the old FAN_ALL_* constants, because
+ * they are uapi exposed constants. If there are programs out there using
+ * these constant, the programs may break if re-compiled with new uapi headers
+ * and then run on an old kernel.
+ */
+
+/* Group classes where permission events are allowed */
+#define FANOTIFY_PERM_CLASSES (FAN_CLASS_CONTENT | \
+ FAN_CLASS_PRE_CONTENT)
+
+#define FANOTIFY_CLASS_BITS (FAN_CLASS_NOTIF | FANOTIFY_PERM_CLASSES)
+
+#define FANOTIFY_FID_BITS (FAN_REPORT_DFID_NAME_TARGET)
+
+#define FANOTIFY_INFO_MODES (FANOTIFY_FID_BITS | FAN_REPORT_PIDFD)
+
+/*
+ * fanotify_init() flags that require CAP_SYS_ADMIN.
+ * We do not allow unprivileged groups to request permission events.
+ * We do not allow unprivileged groups to get other process pid in events.
+ * We do not allow unprivileged groups to use unlimited resources.
+ */
+#define FANOTIFY_ADMIN_INIT_FLAGS (FANOTIFY_PERM_CLASSES | \
+ FAN_REPORT_TID | \
+ FAN_REPORT_PIDFD | \
+ FAN_UNLIMITED_QUEUE | \
+ FAN_UNLIMITED_MARKS)
+
+/*
+ * fanotify_init() flags that are allowed for user without CAP_SYS_ADMIN.
+ * FAN_CLASS_NOTIF is the only class we allow for unprivileged group.
+ * We do not allow unprivileged groups to get file descriptors in events,
+ * so one of the flags for reporting file handles is required.
+ */
+#define FANOTIFY_USER_INIT_FLAGS (FAN_CLASS_NOTIF | \
+ FANOTIFY_FID_BITS | \
+ FAN_CLOEXEC | FAN_NONBLOCK)
+
+#define FANOTIFY_INIT_FLAGS (FANOTIFY_ADMIN_INIT_FLAGS | \
+ FANOTIFY_USER_INIT_FLAGS)
+
+/* Internal group flags */
+#define FANOTIFY_UNPRIV 0x80000000
+#define FANOTIFY_INTERNAL_GROUP_FLAGS (FANOTIFY_UNPRIV)
+
+#define FANOTIFY_MARK_TYPE_BITS (FAN_MARK_INODE | FAN_MARK_MOUNT | \
+ FAN_MARK_FILESYSTEM)
+
+#define FANOTIFY_MARK_CMD_BITS (FAN_MARK_ADD | FAN_MARK_REMOVE | \
+ FAN_MARK_FLUSH)
+
+#define FANOTIFY_MARK_IGNORE_BITS (FAN_MARK_IGNORED_MASK | \
+ FAN_MARK_IGNORE)
+
+#define FANOTIFY_MARK_FLAGS (FANOTIFY_MARK_TYPE_BITS | \
+ FANOTIFY_MARK_CMD_BITS | \
+ FANOTIFY_MARK_IGNORE_BITS | \
+ FAN_MARK_DONT_FOLLOW | \
+ FAN_MARK_ONLYDIR | \
+ FAN_MARK_IGNORED_SURV_MODIFY | \
+ FAN_MARK_EVICTABLE)
+
+/*
+ * Events that can be reported with data type FSNOTIFY_EVENT_PATH.
+ * Note that FAN_MODIFY can also be reported with data type
+ * FSNOTIFY_EVENT_INODE.
+ */
+#define FANOTIFY_PATH_EVENTS (FAN_ACCESS | FAN_MODIFY | \
+ FAN_CLOSE | FAN_OPEN | FAN_OPEN_EXEC)
+
+/*
+ * Directory entry modification events - reported only to directory
+ * where entry is modified and not to a watching parent.
+ */
+#define FANOTIFY_DIRENT_EVENTS (FAN_MOVE | FAN_CREATE | FAN_DELETE | \
+ FAN_RENAME)
+
+/* Events that can be reported with event->fd */
+#define FANOTIFY_FD_EVENTS (FANOTIFY_PATH_EVENTS | FANOTIFY_PERM_EVENTS)
+
+/* Events that can only be reported with data type FSNOTIFY_EVENT_INODE */
+#define FANOTIFY_INODE_EVENTS (FANOTIFY_DIRENT_EVENTS | \
+ FAN_ATTRIB | FAN_MOVE_SELF | FAN_DELETE_SELF)
+
+/* Events that can only be reported with data type FSNOTIFY_EVENT_ERROR */
+#define FANOTIFY_ERROR_EVENTS (FAN_FS_ERROR)
+
+/* Events that user can request to be notified on */
+#define FANOTIFY_EVENTS (FANOTIFY_PATH_EVENTS | \
+ FANOTIFY_INODE_EVENTS | \
+ FANOTIFY_ERROR_EVENTS)
+
+/* Events that require a permission response from user */
+#define FANOTIFY_PERM_EVENTS (FAN_OPEN_PERM | FAN_ACCESS_PERM | \
+ FAN_OPEN_EXEC_PERM)
+
+/* Extra flags that may be reported with event or control handling of events */
+#define FANOTIFY_EVENT_FLAGS (FAN_EVENT_ON_CHILD | FAN_ONDIR)
+
+/* Events that may be reported to user */
+#define FANOTIFY_OUTGOING_EVENTS (FANOTIFY_EVENTS | \
+ FANOTIFY_PERM_EVENTS | \
+ FAN_Q_OVERFLOW | FAN_ONDIR)
+
+/* Events and flags relevant only for directories */
+#define FANOTIFY_DIRONLY_EVENT_BITS (FANOTIFY_DIRENT_EVENTS | \
+ FAN_EVENT_ON_CHILD | FAN_ONDIR)
+
+#define ALL_FANOTIFY_EVENT_BITS (FANOTIFY_OUTGOING_EVENTS | \
+ FANOTIFY_EVENT_FLAGS)
+
+/* These masks check for invalid bits in permission responses. */
+#define FANOTIFY_RESPONSE_ACCESS (FAN_ALLOW | FAN_DENY)
+#define FANOTIFY_RESPONSE_FLAGS (FAN_AUDIT | FAN_INFO)
+#define FANOTIFY_RESPONSE_VALID_MASK (FANOTIFY_RESPONSE_ACCESS | FANOTIFY_RESPONSE_FLAGS)
+
+/* Do not use these old uapi constants internally */
+#undef FAN_ALL_CLASS_BITS
+#undef FAN_ALL_INIT_FLAGS
+#undef FAN_ALL_MARK_FLAGS
+#undef FAN_ALL_EVENTS
+#undef FAN_ALL_PERM_EVENTS
+#undef FAN_ALL_OUTGOING_EVENTS
+
#endif /* _LINUX_FANOTIFY_H */
diff --git a/include/linux/fault-inject-usercopy.h b/include/linux/fault-inject-usercopy.h
new file mode 100644
index 000000000000..56c3a693fdd9
--- /dev/null
+++ b/include/linux/fault-inject-usercopy.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __LINUX_FAULT_INJECT_USERCOPY_H__
+#define __LINUX_FAULT_INJECT_USERCOPY_H__
+
+/*
+ * This header provides a wrapper for injecting failures to user space memory
+ * access functions.
+ */
+
+#include <linux/types.h>
+
+#ifdef CONFIG_FAULT_INJECTION_USERCOPY
+
+bool should_fail_usercopy(void);
+
+#else
+
+static inline bool should_fail_usercopy(void) { return false; }
+
+#endif /* CONFIG_FAULT_INJECTION_USERCOPY */
+
+#endif /* __LINUX_FAULT_INJECT_USERCOPY_H__ */
diff --git a/include/linux/fault-inject.h b/include/linux/fault-inject.h
index 728d4e0292aa..481abf530b3c 100644
--- a/include/linux/fault-inject.h
+++ b/include/linux/fault-inject.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_FAULT_INJECT_H
#define _LINUX_FAULT_INJECT_H
@@ -5,12 +6,13 @@
#include <linux/types.h>
#include <linux/debugfs.h>
+#include <linux/configfs.h>
#include <linux/ratelimit.h>
#include <linux/atomic.h>
/*
* For explanation of the elements of this struct, see
- * Documentation/fault-injection/fault-injection.txt
+ * Documentation/fault-injection/fault-injection.rst
*/
struct fault_attr {
unsigned long probability;
@@ -30,6 +32,10 @@ struct fault_attr {
struct dentry *dname;
};
+enum fault_flags {
+ FAULT_NOWARN = 1 << 0,
+};
+
#define FAULT_ATTR_INITIALIZER { \
.interval = 1, \
.times = ATOMIC_INIT(1), \
@@ -42,6 +48,7 @@ struct fault_attr {
#define DECLARE_FAULT_ATTR(name) struct fault_attr name = FAULT_ATTR_INITIALIZER
int setup_fault_attr(struct fault_attr *attr, char *str);
+bool should_fail_ex(struct fault_attr *attr, ssize_t size, int flags);
bool should_fail(struct fault_attr *attr, ssize_t size);
#ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
@@ -59,14 +66,38 @@ static inline struct dentry *fault_create_debugfs_attr(const char *name,
#endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */
+#ifdef CONFIG_FAULT_INJECTION_CONFIGFS
+
+struct fault_config {
+ struct fault_attr attr;
+ struct config_group group;
+};
+
+void fault_config_init(struct fault_config *config, const char *name);
+
+#else /* CONFIG_FAULT_INJECTION_CONFIGFS */
+
+struct fault_config {
+};
+
+static inline void fault_config_init(struct fault_config *config,
+ const char *name)
+{
+}
+
+#endif /* CONFIG_FAULT_INJECTION_CONFIGFS */
+
#endif /* CONFIG_FAULT_INJECTION */
struct kmem_cache;
+bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order);
+
+int should_failslab(struct kmem_cache *s, gfp_t gfpflags);
#ifdef CONFIG_FAILSLAB
-extern bool should_failslab(struct kmem_cache *s, gfp_t gfpflags);
+extern bool __should_failslab(struct kmem_cache *s, gfp_t gfpflags);
#else
-static inline bool should_failslab(struct kmem_cache *s, gfp_t gfpflags)
+static inline bool __should_failslab(struct kmem_cache *s, gfp_t gfpflags)
{
return false;
}
diff --git a/include/linux/fb.h b/include/linux/fb.h
index a964d076b4dc..08cb47da71f8 100644
--- a/include/linux/fb.h
+++ b/include/linux/fb.h
@@ -1,6 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_FB_H
#define _LINUX_FB_H
+#include <linux/refcount.h>
#include <linux/kgdb.h>
#include <uapi/linux/fb.h>
@@ -123,45 +125,17 @@ struct fb_cursor_user {
* Register/unregister for framebuffer events
*/
-/* The resolution of the passed in fb_info about to change */
+/* The resolution of the passed in fb_info about to change */
#define FB_EVENT_MODE_CHANGE 0x01
-/* The display on this fb_info is beeing suspended, no access to the
- * framebuffer is allowed any more after that call returns
- */
-#define FB_EVENT_SUSPEND 0x02
-/* The display on this fb_info was resumed, you can restore the display
- * if you own it
- */
-#define FB_EVENT_RESUME 0x03
-/* An entry from the modelist was removed */
-#define FB_EVENT_MODE_DELETE 0x04
-/* A driver registered itself */
+
+#ifdef CONFIG_GUMSTIX_AM200EPD
+/* only used by mach-pxa/am200epd.c */
#define FB_EVENT_FB_REGISTERED 0x05
-/* A driver unregistered itself */
#define FB_EVENT_FB_UNREGISTERED 0x06
-/* CONSOLE-SPECIFIC: get console to framebuffer mapping */
-#define FB_EVENT_GET_CONSOLE_MAP 0x07
-/* CONSOLE-SPECIFIC: set console to framebuffer mapping */
-#define FB_EVENT_SET_CONSOLE_MAP 0x08
-/* A hardware display blank change occurred */
+#endif
+
+/* A display blank is requested */
#define FB_EVENT_BLANK 0x09
-/* Private modelist is to be replaced */
-#define FB_EVENT_NEW_MODELIST 0x0A
-/* The resolution of the passed in fb_info about to change and
- all vc's should be changed */
-#define FB_EVENT_MODE_CHANGE_ALL 0x0B
-/* A software display blank change occurred */
-#define FB_EVENT_CONBLANK 0x0C
-/* Get drawing requirements */
-#define FB_EVENT_GET_REQ 0x0D
-/* Unbind from the console if possible */
-#define FB_EVENT_FB_UNBIND 0x0E
-/* CONSOLE-SPECIFIC: remap all consoles to new fb - for vga_switcheroo */
-#define FB_EVENT_REMAP_ALL_CONSOLE 0x0F
-/* A hardware display blank early change occured */
-#define FB_EARLY_EVENT_BLANK 0x10
-/* A hardware display blank revert early change occured */
-#define FB_R_EARLY_EVENT_BLANK 0x11
struct fb_event {
struct fb_info *info;
@@ -227,13 +201,21 @@ struct fb_pixmap {
};
#ifdef CONFIG_FB_DEFERRED_IO
+struct fb_deferred_io_pageref {
+ struct page *page;
+ unsigned long offset;
+ /* private */
+ struct list_head list;
+};
+
struct fb_deferred_io {
/* delay between mkwrite and deferred handler */
unsigned long delay;
- struct mutex lock; /* mutex that protects the page list */
- struct list_head pagelist; /* list of touched pages */
+ bool sort_pagereflist; /* sort pagelist by offset */
+ int open_count; /* number of opened files; protected by fb_info lock */
+ struct mutex lock; /* mutex that protects the pageref list */
+ struct list_head pagereflist; /* list of pagerefs for touched pages */
/* callback */
- void (*first_io)(struct fb_info *info);
void (*deferred_io)(struct fb_info *info, struct list_head *pagelist);
};
#endif
@@ -400,7 +382,7 @@ struct fb_tile_ops {
#endif /* CONFIG_FB_TILEBLITTING */
/* FBINFO_* = fb_info.flags bit flags */
-#define FBINFO_MODULE 0x0001 /* Low-level driver is a module */
+#define FBINFO_DEFAULT 0
#define FBINFO_HWACCEL_DISABLED 0x0002
/* When FBINFO_HWACCEL_DISABLED is set:
* Hardware acceleration is turned off. Software implementations
@@ -427,8 +409,6 @@ struct fb_tile_ops {
#define FBINFO_HWACCEL_YPAN 0x2000 /* optional */
#define FBINFO_HWACCEL_YWRAP 0x4000 /* optional */
-#define FBINFO_MISC_USEREVENT 0x10000 /* event request
- from userspace */
#define FBINFO_MISC_TILEBLITTING 0x20000 /* use tile blitting */
/* A driver may set this flag to indicate that it does want a set_par to be
@@ -443,8 +423,6 @@ struct fb_tile_ops {
*/
#define FBINFO_MISC_ALWAYS_SETPAR 0x40000
-/* where the fb is a firmware driver, and can be replaced with a proper one */
-#define FBINFO_MISC_FIRMWARE 0x80000
/*
* Host and GPU endianness differ.
*/
@@ -455,43 +433,52 @@ struct fb_tile_ops {
* and host endianness. Drivers should not use this flag.
*/
#define FBINFO_BE_MATH 0x100000
+/*
+ * Hide smem_start in the FBIOGET_FSCREENINFO IOCTL. This is used by modern DRM
+ * drivers to stop userspace from trying to share buffers behind the kernel's
+ * back. Instead dma-buf based buffer sharing should be used.
+ */
+#define FBINFO_HIDE_SMEM_START 0x200000
-/* report to the VT layer that this fb driver can accept forced console
- output like oopses */
-#define FBINFO_CAN_FORCE_OUTPUT 0x200000
struct fb_info {
- atomic_t count;
+ refcount_t count;
int node;
int flags;
+ /*
+ * -1 by default, set to a FB_ROTATE_* value by the driver, if it knows
+ * a lcd is not mounted upright and fbcon should rotate to compensate.
+ */
+ int fbcon_rotate_hint;
struct mutex lock; /* Lock for open/release/ioctl funcs */
struct mutex mm_lock; /* Lock for fb_mmap and smem_* fields */
struct fb_var_screeninfo var; /* Current var */
struct fb_fix_screeninfo fix; /* Current fix */
struct fb_monspecs monspecs; /* Current Monitor specs */
- struct work_struct queue; /* Framebuffer event queue */
struct fb_pixmap pixmap; /* Image hardware mapper */
struct fb_pixmap sprite; /* Cursor hardware mapper */
struct fb_cmap cmap; /* Current cmap */
struct list_head modelist; /* mode list */
struct fb_videomode *mode; /* current mode */
-#ifdef CONFIG_FB_BACKLIGHT
+#if IS_ENABLED(CONFIG_FB_BACKLIGHT)
/* assigned backlight device */
- /* set before framebuffer registration,
+ /* set before framebuffer registration,
remove after unregister */
struct backlight_device *bl_dev;
/* Backlight level curve */
- struct mutex bl_curve_mutex;
+ struct mutex bl_curve_mutex;
u8 bl_curve[FB_BACKLIGHT_LEVELS];
#endif
#ifdef CONFIG_FB_DEFERRED_IO
struct delayed_work deferred_work;
+ unsigned long npagerefs;
+ struct fb_deferred_io_pageref *pagerefs;
struct fb_deferred_io *fbdefio;
#endif
- struct fb_ops *fbops;
+ const struct fb_ops *fbops;
struct device *device; /* This is the parent */
struct device *dev; /* This is this fb device */
int class_flag; /* private sysfs flags */
@@ -502,45 +489,18 @@ struct fb_info {
char __iomem *screen_base; /* Virtual address */
char *screen_buffer;
};
- unsigned long screen_size; /* Amount of ioremapped VRAM or 0 */
- void *pseudo_palette; /* Fake palette of 16 colors */
+ unsigned long screen_size; /* Amount of ioremapped VRAM or 0 */
+ void *pseudo_palette; /* Fake palette of 16 colors */
#define FBINFO_STATE_RUNNING 0
#define FBINFO_STATE_SUSPENDED 1
u32 state; /* Hardware state i.e suspend */
void *fbcon_par; /* fbcon use-only private area */
/* From here on everything is device dependent */
void *par;
- /* we need the PCI or similar aperture base/size not
- smem_start/size as smem_start may just be an object
- allocated inside the aperture so may not actually overlap */
- struct apertures_struct {
- unsigned int count;
- struct aperture {
- resource_size_t base;
- resource_size_t size;
- } ranges[0];
- } *apertures;
bool skip_vt_switch; /* no VT switch on suspend/resume required */
};
-static inline struct apertures_struct *alloc_apertures(unsigned int max_num) {
- struct apertures_struct *a = kzalloc(sizeof(struct apertures_struct)
- + max_num * sizeof(struct aperture), GFP_KERNEL);
- if (!a)
- return NULL;
- a->count = max_num;
- return a;
-}
-
-#ifdef MODULE
-#define FBINFO_DEFAULT FBINFO_MODULE
-#else
-#define FBINFO_DEFAULT 0
-#endif
-
-// This will go away
-#define FBINFO_FLAG_MODULE FBINFO_MODULE
#define FBINFO_FLAG_DEFAULT FBINFO_DEFAULT
/* This will go away
@@ -571,7 +531,9 @@ static inline struct apertures_struct *alloc_apertures(unsigned int max_num) {
#define fb_memcpy_fromfb sbus_memcpy_fromio
#define fb_memcpy_tofb sbus_memcpy_toio
-#elif defined(__i386__) || defined(__alpha__) || defined(__x86_64__) || defined(__hppa__) || defined(__sh__) || defined(__powerpc__) || defined(__avr32__) || defined(__bfin__) || defined(__arm__)
+#elif defined(__i386__) || defined(__alpha__) || defined(__x86_64__) || \
+ defined(__hppa__) || defined(__sh__) || defined(__powerpc__) || \
+ defined(__arm__) || defined(__aarch64__) || defined(__mips__)
#define fb_readb __raw_readb
#define fb_readw __raw_readw
@@ -611,11 +573,11 @@ static inline struct apertures_struct *alloc_apertures(unsigned int max_num) {
* `Generic' versions of the frame buffer device operations
*/
-extern int fb_set_var(struct fb_info *info, struct fb_var_screeninfo *var);
-extern int fb_pan_display(struct fb_info *info, struct fb_var_screeninfo *var);
+extern int fb_set_var(struct fb_info *info, struct fb_var_screeninfo *var);
+extern int fb_pan_display(struct fb_info *info, struct fb_var_screeninfo *var);
extern int fb_blank(struct fb_info *info, int blank);
-extern void cfb_fillrect(struct fb_info *info, const struct fb_fillrect *rect);
-extern void cfb_copyarea(struct fb_info *info, const struct fb_copyarea *area);
+extern void cfb_fillrect(struct fb_info *info, const struct fb_fillrect *rect);
+extern void cfb_copyarea(struct fb_info *info, const struct fb_copyarea *area);
extern void cfb_imageblit(struct fb_info *info, const struct fb_image *image);
/*
* Drawing operations where framebuffer is in system RAM
@@ -630,10 +592,7 @@ extern ssize_t fb_sys_write(struct fb_info *info, const char __user *buf,
/* drivers/video/fbmem.c */
extern int register_framebuffer(struct fb_info *fb_info);
-extern int unregister_framebuffer(struct fb_info *fb_info);
-extern int unlink_framebuffer(struct fb_info *fb_info);
-extern int remove_conflicting_framebuffers(struct apertures_struct *a,
- const char *name, bool primary);
+extern void unregister_framebuffer(struct fb_info *fb_info);
extern int fb_prepare_logo(struct fb_info *fb_info, int rotate);
extern int fb_show_logo(struct fb_info *fb_info, int rotate);
extern char* fb_get_buffer_offset(struct fb_info *info, struct fb_pixmap *buf, u32 size);
@@ -646,11 +605,14 @@ extern int fb_get_color_depth(struct fb_var_screeninfo *var,
extern int fb_get_options(const char *name, char **option);
extern int fb_new_modelist(struct fb_info *info);
-extern struct fb_info *registered_fb[FB_MAX];
-extern int num_registered_fb;
+extern bool fb_center_logo;
+extern int fb_logo_count;
extern struct class *fb_class;
-extern int lock_fb_info(struct fb_info *info);
+static inline void lock_fb_info(struct fb_info *info)
+{
+ mutex_lock(&info->lock);
+}
static inline void unlock_fb_info(struct fb_info *info)
{
@@ -674,10 +636,11 @@ static inline void __fb_pad_aligned_buffer(u8 *dst, u32 d_pitch,
/* drivers/video/fb_defio.c */
int fb_deferred_io_mmap(struct fb_info *info, struct vm_area_struct *vma);
-extern void fb_deferred_io_init(struct fb_info *info);
+extern int fb_deferred_io_init(struct fb_info *info);
extern void fb_deferred_io_open(struct fb_info *info,
struct inode *inode,
struct file *file);
+extern void fb_deferred_io_release(struct fb_info *info);
extern void fb_deferred_io_cleanup(struct fb_info *info);
extern int fb_deferred_io_fsync(struct file *file, loff_t start,
loff_t end, int datasync);
@@ -732,8 +695,6 @@ extern int fb_parse_edid(unsigned char *edid, struct fb_var_screeninfo *var);
extern const unsigned char *fb_firmware_edid(struct device *device);
extern void fb_edid_to_monspecs(unsigned char *edid,
struct fb_monspecs *specs);
-extern void fb_edid_add_monspecs(unsigned char *edid,
- struct fb_monspecs *specs);
extern void fb_destroy_modedb(struct fb_videomode *modedb);
extern int fb_find_mode_cvt(struct fb_videomode *mode, int margins, int rb);
extern unsigned char *fb_ddc_read(struct i2c_adapter *adapter);
@@ -805,9 +766,7 @@ struct dmt_videomode {
const struct fb_videomode *mode;
};
-extern const char *fb_mode_option;
extern const struct fb_videomode vesa_modes[];
-extern const struct fb_videomode cea_modes[65];
extern const struct dmt_videomode dmt_modes[];
struct fb_modelist {
@@ -822,6 +781,15 @@ extern int fb_find_mode(struct fb_var_screeninfo *var,
const struct fb_videomode *default_mode,
unsigned int default_bpp);
+#if defined(CONFIG_VIDEO_NOMODESET)
+bool fb_modesetting_disabled(const char *drvname);
+#else
+static inline bool fb_modesetting_disabled(const char *drvname)
+{
+ return false;
+}
+#endif
+
/* Convenience logging macros */
#define fb_err(fb_info, fmt, ...) \
pr_err("fb%d: " fmt, (fb_info)->node, ##__VA_ARGS__)
diff --git a/include/linux/fbcon.h b/include/linux/fbcon.h
new file mode 100644
index 000000000000..2382dec6d6ab
--- /dev/null
+++ b/include/linux/fbcon.h
@@ -0,0 +1,46 @@
+#ifndef _LINUX_FBCON_H
+#define _LINUX_FBCON_H
+
+#ifdef CONFIG_FRAMEBUFFER_CONSOLE
+void __init fb_console_init(void);
+void __exit fb_console_exit(void);
+int fbcon_fb_registered(struct fb_info *info);
+void fbcon_fb_unregistered(struct fb_info *info);
+void fbcon_fb_unbind(struct fb_info *info);
+void fbcon_suspended(struct fb_info *info);
+void fbcon_resumed(struct fb_info *info);
+int fbcon_mode_deleted(struct fb_info *info,
+ struct fb_videomode *mode);
+void fbcon_new_modelist(struct fb_info *info);
+void fbcon_get_requirement(struct fb_info *info,
+ struct fb_blit_caps *caps);
+void fbcon_fb_blanked(struct fb_info *info, int blank);
+int fbcon_modechange_possible(struct fb_info *info,
+ struct fb_var_screeninfo *var);
+void fbcon_update_vcs(struct fb_info *info, bool all);
+void fbcon_remap_all(struct fb_info *info);
+int fbcon_set_con2fb_map_ioctl(void __user *argp);
+int fbcon_get_con2fb_map_ioctl(void __user *argp);
+#else
+static inline void fb_console_init(void) {}
+static inline void fb_console_exit(void) {}
+static inline int fbcon_fb_registered(struct fb_info *info) { return 0; }
+static inline void fbcon_fb_unregistered(struct fb_info *info) {}
+static inline void fbcon_fb_unbind(struct fb_info *info) {}
+static inline void fbcon_suspended(struct fb_info *info) {}
+static inline void fbcon_resumed(struct fb_info *info) {}
+static inline int fbcon_mode_deleted(struct fb_info *info,
+ struct fb_videomode *mode) { return 0; }
+static inline void fbcon_new_modelist(struct fb_info *info) {}
+static inline void fbcon_get_requirement(struct fb_info *info,
+ struct fb_blit_caps *caps) {}
+static inline void fbcon_fb_blanked(struct fb_info *info, int blank) {}
+static inline int fbcon_modechange_possible(struct fb_info *info,
+ struct fb_var_screeninfo *var) { return 0; }
+static inline void fbcon_update_vcs(struct fb_info *info, bool all) {}
+static inline void fbcon_remap_all(struct fb_info *info) {}
+static inline int fbcon_set_con2fb_map_ioctl(void __user *argp) { return 0; }
+static inline int fbcon_get_con2fb_map_ioctl(void __user *argp) { return 0; }
+#endif
+
+#endif /* _LINUX_FBCON_H */
diff --git a/include/linux/fcdevice.h b/include/linux/fcdevice.h
index 5009fa16b5d8..3d14ebe59dc9 100644
--- a/include/linux/fcdevice.h
+++ b/include/linux/fcdevice.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* INET An implementation of the TCP/IP protocol suite for the LINUX
* operating system. NET is implemented using the BSD Socket
@@ -12,13 +13,7 @@
* Relocated to include/linux where it belongs by Alan Cox
* <gw4pts@gw4pts.ampr.org>
*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- *
* WARNING: This move may well be temporary. This file will get merged with others RSN.
- *
*/
#ifndef _LINUX_FCDEVICE_H
#define _LINUX_FCDEVICE_H
diff --git a/include/linux/fcntl.h b/include/linux/fcntl.h
index 1b48d9c9a561..a332e79b3207 100644
--- a/include/linux/fcntl.h
+++ b/include/linux/fcntl.h
@@ -1,17 +1,28 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_FCNTL_H
#define _LINUX_FCNTL_H
+#include <linux/stat.h>
#include <uapi/linux/fcntl.h>
-/* list of all valid flags for the open/openat flags argument: */
+/* List of all valid flags for the open/openat flags argument: */
#define VALID_OPEN_FLAGS \
(O_RDONLY | O_WRONLY | O_RDWR | O_CREAT | O_EXCL | O_NOCTTY | O_TRUNC | \
- O_APPEND | O_NDELAY | O_NONBLOCK | O_NDELAY | __O_SYNC | O_DSYNC | \
+ O_APPEND | O_NDELAY | O_NONBLOCK | __O_SYNC | O_DSYNC | \
FASYNC | O_DIRECT | O_LARGEFILE | O_DIRECTORY | O_NOFOLLOW | \
O_NOATIME | O_CLOEXEC | O_PATH | __O_TMPFILE)
+/* List of all valid flags for the how->resolve argument: */
+#define VALID_RESOLVE_FLAGS \
+ (RESOLVE_NO_XDEV | RESOLVE_NO_MAGICLINKS | RESOLVE_NO_SYMLINKS | \
+ RESOLVE_BENEATH | RESOLVE_IN_ROOT | RESOLVE_CACHED)
+
+/* List of all open_how "versions". */
+#define OPEN_HOW_SIZE_VER0 24 /* sizeof first published struct */
+#define OPEN_HOW_SIZE_LATEST OPEN_HOW_SIZE_VER0
+
#ifndef force_o_largefile
-#define force_o_largefile() (BITS_PER_LONG != 32)
+#define force_o_largefile() (!IS_ENABLED(CONFIG_ARCH_32BIT_OFF_T))
#endif
#if BITS_PER_LONG == 32
diff --git a/include/linux/fd.h b/include/linux/fd.h
index 69275bccc3e4..ece5ea53205b 100644
--- a/include/linux/fd.h
+++ b/include/linux/fd.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_FD_H
#define _LINUX_FD_H
diff --git a/include/linux/fddidevice.h b/include/linux/fddidevice.h
index 32c22cfb238b..906ee446db92 100644
--- a/include/linux/fddidevice.h
+++ b/include/linux/fddidevice.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* INET An implementation of the TCP/IP protocol suite for the LINUX
* operating system. INET is implemented using the BSD Socket
@@ -13,11 +14,6 @@
* Ross Biro
* Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
* Alan Cox, <gw4pts@gw4pts.ampr.org>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
*/
#ifndef _LINUX_FDDIDEVICE_H
#define _LINUX_FDDIDEVICE_H
diff --git a/include/linux/fdtable.h b/include/linux/fdtable.h
index 6e84b2cae6ad..e066816f3519 100644
--- a/include/linux/fdtable.h
+++ b/include/linux/fdtable.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* descriptor table internals; you almost certainly want file.h instead.
*/
@@ -9,6 +10,7 @@
#include <linux/compiler.h>
#include <linux/spinlock.h>
#include <linux/rcupdate.h>
+#include <linux/nospec.h>
#include <linux/types.h>
#include <linux/init.h>
#include <linux/fs.h>
@@ -20,6 +22,7 @@
* as this is the granularity returned by copy_fdset().
*/
#define NR_OPEN_DEFAULT BITS_PER_LONG
+#define NR_OPEN_MAX ~0U
struct fdtable {
unsigned int max_fds;
@@ -77,46 +80,54 @@ struct dentry;
/*
* The caller must ensure that fd table isn't shared or hold rcu or file lock
*/
-static inline struct file *__fcheck_files(struct files_struct *files, unsigned int fd)
+static inline struct file *files_lookup_fd_raw(struct files_struct *files, unsigned int fd)
{
struct fdtable *fdt = rcu_dereference_raw(files->fdt);
- if (fd < fdt->max_fds)
+ if (fd < fdt->max_fds) {
+ fd = array_index_nospec(fd, fdt->max_fds);
return rcu_dereference_raw(fdt->fd[fd]);
+ }
return NULL;
}
-static inline struct file *fcheck_files(struct files_struct *files, unsigned int fd)
+static inline struct file *files_lookup_fd_locked(struct files_struct *files, unsigned int fd)
{
- RCU_LOCKDEP_WARN(!rcu_read_lock_held() &&
- !lockdep_is_held(&files->file_lock),
+ RCU_LOCKDEP_WARN(!lockdep_is_held(&files->file_lock),
"suspicious rcu_dereference_check() usage");
- return __fcheck_files(files, fd);
+ return files_lookup_fd_raw(files, fd);
}
-/*
- * Check whether the specified fd has an open file.
- */
-#define fcheck(fd) fcheck_files(current->files, fd)
+static inline struct file *files_lookup_fd_rcu(struct files_struct *files, unsigned int fd)
+{
+ RCU_LOCKDEP_WARN(!rcu_read_lock_held(),
+ "suspicious rcu_dereference_check() usage");
+ return files_lookup_fd_raw(files, fd);
+}
+
+static inline struct file *lookup_fd_rcu(unsigned int fd)
+{
+ return files_lookup_fd_rcu(current->files, fd);
+}
+
+struct file *task_lookup_fd_rcu(struct task_struct *task, unsigned int fd);
+struct file *task_lookup_next_fd_rcu(struct task_struct *task, unsigned int *fd);
struct task_struct;
-struct files_struct *get_files_struct(struct task_struct *);
void put_files_struct(struct files_struct *fs);
-void reset_files_struct(struct files_struct *);
-int unshare_files(struct files_struct **);
-struct files_struct *dup_fd(struct files_struct *, int *) __latent_entropy;
+int unshare_files(void);
+struct files_struct *dup_fd(struct files_struct *, unsigned, int *) __latent_entropy;
void do_close_on_exec(struct files_struct *);
int iterate_fd(struct files_struct *, unsigned,
int (*)(const void *, struct file *, unsigned),
const void *);
-extern int __alloc_fd(struct files_struct *files,
- unsigned start, unsigned end, unsigned flags);
-extern void __fd_install(struct files_struct *files,
- unsigned int fd, struct file *file);
-extern int __close_fd(struct files_struct *files,
- unsigned int fd);
+extern int close_fd(unsigned int fd);
+extern int __close_range(unsigned int fd, unsigned int max_fd, unsigned int flags);
+extern struct file *close_fd_get_file(unsigned int fd);
+extern int unshare_fd(unsigned long unshare_flags, unsigned int max_fds,
+ struct files_struct **new_fdp);
extern struct kmem_cache *files_cachep;
diff --git a/include/linux/fec.h b/include/linux/fec.h
index 1454a503622d..9aaf53f07269 100644
--- a/include/linux/fec.h
+++ b/include/linux/fec.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/* include/linux/fec.h
*
* Copyright (c) 2009 Orex Computed Radiography
@@ -6,10 +7,6 @@
* Copyright (C) 2010 Freescale Semiconductor, Inc.
*
* Header file for the FEC platform data
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef __LINUX_FEC_H__
#define __LINUX_FEC_H__
diff --git a/include/linux/fiemap.h b/include/linux/fiemap.h
new file mode 100644
index 000000000000..c50882f19235
--- /dev/null
+++ b/include/linux/fiemap.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_FIEMAP_H
+#define _LINUX_FIEMAP_H 1
+
+#include <uapi/linux/fiemap.h>
+#include <linux/fs.h>
+
+struct fiemap_extent_info {
+ unsigned int fi_flags; /* Flags as passed from user */
+ unsigned int fi_extents_mapped; /* Number of mapped extents */
+ unsigned int fi_extents_max; /* Size of fiemap_extent array */
+ struct fiemap_extent __user *fi_extents_start; /* Start of
+ fiemap_extent array */
+};
+
+int fiemap_prep(struct inode *inode, struct fiemap_extent_info *fieinfo,
+ u64 start, u64 *len, u32 supported_flags);
+int fiemap_fill_next_extent(struct fiemap_extent_info *info, u64 logical,
+ u64 phys, u64 len, u32 flags);
+
+#endif /* _LINUX_FIEMAP_H 1 */
diff --git a/include/linux/file.h b/include/linux/file.h
index 61eb82cbafba..39704eae83e2 100644
--- a/include/linux/file.h
+++ b/include/linux/file.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Wrapper functions for accessing the file_struct fd array.
*/
@@ -8,17 +9,22 @@
#include <linux/compiler.h>
#include <linux/types.h>
#include <linux/posix_types.h>
+#include <linux/errno.h>
struct file;
extern void fput(struct file *);
struct file_operations;
+struct task_struct;
struct vfsmount;
struct dentry;
+struct inode;
struct path;
-extern struct file *alloc_file(const struct path *, fmode_t mode,
- const struct file_operations *fop);
+extern struct file *alloc_file_pseudo(struct inode *, struct vfsmount *,
+ const char *, int flags, const struct file_operations *);
+extern struct file *alloc_file_clone(struct file *, int flags,
+ const struct file_operations *);
static inline void fput_light(struct file *file, int fput_needed)
{
@@ -41,6 +47,7 @@ static inline void fdput(struct fd fd)
extern struct file *fget(unsigned int fd);
extern struct file *fget_raw(unsigned int fd);
+extern struct file *fget_task(struct task_struct *task, unsigned int fd);
extern unsigned long __fdget(unsigned int fd);
extern unsigned long __fdget_raw(unsigned int fd);
extern unsigned long __fdget_pos(unsigned int fd);
@@ -77,13 +84,29 @@ extern int f_dupfd(unsigned int from, struct file *file, unsigned flags);
extern int replace_fd(unsigned fd, struct file *file, unsigned flags);
extern void set_close_on_exec(unsigned int fd, int flag);
extern bool get_close_on_exec(unsigned int fd);
-extern void put_filp(struct file *);
+extern int __get_unused_fd_flags(unsigned flags, unsigned long nofile);
extern int get_unused_fd_flags(unsigned flags);
extern void put_unused_fd(unsigned int fd);
extern void fd_install(unsigned int fd, struct file *file);
+extern int __receive_fd(struct file *file, int __user *ufd,
+ unsigned int o_flags);
+
+extern int receive_fd(struct file *file, unsigned int o_flags);
+
+static inline int receive_fd_user(struct file *file, int __user *ufd,
+ unsigned int o_flags)
+{
+ if (ufd == NULL)
+ return -EFAULT;
+ return __receive_fd(file, ufd, o_flags);
+}
+int receive_fd_replace(int new_fd, struct file *file, unsigned int o_flags);
+
extern void flush_delayed_fput(void);
extern void __fput_sync(struct file *);
+extern unsigned int sysctl_nr_open_min, sysctl_nr_open_max;
+
#endif /* __LINUX_FILE_H */
diff --git a/include/linux/fileattr.h b/include/linux/fileattr.h
new file mode 100644
index 000000000000..47c05a9851d0
--- /dev/null
+++ b/include/linux/fileattr.h
@@ -0,0 +1,59 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef _LINUX_FILEATTR_H
+#define _LINUX_FILEATTR_H
+
+/* Flags shared betwen flags/xflags */
+#define FS_COMMON_FL \
+ (FS_SYNC_FL | FS_IMMUTABLE_FL | FS_APPEND_FL | \
+ FS_NODUMP_FL | FS_NOATIME_FL | FS_DAX_FL | \
+ FS_PROJINHERIT_FL)
+
+#define FS_XFLAG_COMMON \
+ (FS_XFLAG_SYNC | FS_XFLAG_IMMUTABLE | FS_XFLAG_APPEND | \
+ FS_XFLAG_NODUMP | FS_XFLAG_NOATIME | FS_XFLAG_DAX | \
+ FS_XFLAG_PROJINHERIT)
+
+/*
+ * Merged interface for miscellaneous file attributes. 'flags' originates from
+ * ext* and 'fsx_flags' from xfs. There's some overlap between the two, which
+ * is handled by the VFS helpers, so filesystems are free to implement just one
+ * or both of these sub-interfaces.
+ */
+struct fileattr {
+ u32 flags; /* flags (FS_IOC_GETFLAGS/FS_IOC_SETFLAGS) */
+ /* struct fsxattr: */
+ u32 fsx_xflags; /* xflags field value (get/set) */
+ u32 fsx_extsize; /* extsize field value (get/set)*/
+ u32 fsx_nextents; /* nextents field value (get) */
+ u32 fsx_projid; /* project identifier (get/set) */
+ u32 fsx_cowextsize; /* CoW extsize field value (get/set)*/
+ /* selectors: */
+ bool flags_valid:1;
+ bool fsx_valid:1;
+};
+
+int copy_fsxattr_to_user(const struct fileattr *fa, struct fsxattr __user *ufa);
+
+void fileattr_fill_xflags(struct fileattr *fa, u32 xflags);
+void fileattr_fill_flags(struct fileattr *fa, u32 flags);
+
+/**
+ * fileattr_has_fsx - check for extended flags/attributes
+ * @fa: fileattr pointer
+ *
+ * Return: true if any attributes are present that are not represented in
+ * ->flags.
+ */
+static inline bool fileattr_has_fsx(const struct fileattr *fa)
+{
+ return fa->fsx_valid &&
+ ((fa->fsx_xflags & ~FS_XFLAG_COMMON) || fa->fsx_extsize != 0 ||
+ fa->fsx_projid != 0 || fa->fsx_cowextsize != 0);
+}
+
+int vfs_fileattr_get(struct dentry *dentry, struct fileattr *fa);
+int vfs_fileattr_set(struct mnt_idmap *idmap, struct dentry *dentry,
+ struct fileattr *fa);
+
+#endif /* _LINUX_FILEATTR_H */
diff --git a/include/linux/filelock.h b/include/linux/filelock.h
new file mode 100644
index 000000000000..efcdd1631d9b
--- /dev/null
+++ b/include/linux/filelock.h
@@ -0,0 +1,439 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_FILELOCK_H
+#define _LINUX_FILELOCK_H
+
+#include <linux/fs.h>
+
+#define FL_POSIX 1
+#define FL_FLOCK 2
+#define FL_DELEG 4 /* NFSv4 delegation */
+#define FL_ACCESS 8 /* not trying to lock, just looking */
+#define FL_EXISTS 16 /* when unlocking, test for existence */
+#define FL_LEASE 32 /* lease held on this file */
+#define FL_CLOSE 64 /* unlock on close */
+#define FL_SLEEP 128 /* A blocking lock */
+#define FL_DOWNGRADE_PENDING 256 /* Lease is being downgraded */
+#define FL_UNLOCK_PENDING 512 /* Lease is being broken */
+#define FL_OFDLCK 1024 /* lock is "owned" by struct file */
+#define FL_LAYOUT 2048 /* outstanding pNFS layout */
+#define FL_RECLAIM 4096 /* reclaiming from a reboot server */
+
+#define FL_CLOSE_POSIX (FL_POSIX | FL_CLOSE)
+
+/*
+ * Special return value from posix_lock_file() and vfs_lock_file() for
+ * asynchronous locking.
+ */
+#define FILE_LOCK_DEFERRED 1
+
+struct file_lock;
+
+struct file_lock_operations {
+ void (*fl_copy_lock)(struct file_lock *, struct file_lock *);
+ void (*fl_release_private)(struct file_lock *);
+};
+
+struct lock_manager_operations {
+ void *lm_mod_owner;
+ fl_owner_t (*lm_get_owner)(fl_owner_t);
+ void (*lm_put_owner)(fl_owner_t);
+ void (*lm_notify)(struct file_lock *); /* unblock callback */
+ int (*lm_grant)(struct file_lock *, int);
+ bool (*lm_break)(struct file_lock *);
+ int (*lm_change)(struct file_lock *, int, struct list_head *);
+ void (*lm_setup)(struct file_lock *, void **);
+ bool (*lm_breaker_owns_lease)(struct file_lock *);
+ bool (*lm_lock_expirable)(struct file_lock *cfl);
+ void (*lm_expire_lock)(void);
+};
+
+struct lock_manager {
+ struct list_head list;
+ /*
+ * NFSv4 and up also want opens blocked during the grace period;
+ * NLM doesn't care:
+ */
+ bool block_opens;
+};
+
+struct net;
+void locks_start_grace(struct net *, struct lock_manager *);
+void locks_end_grace(struct lock_manager *);
+bool locks_in_grace(struct net *);
+bool opens_in_grace(struct net *);
+
+/*
+ * struct file_lock has a union that some filesystems use to track
+ * their own private info. The NFS side of things is defined here:
+ */
+#include <linux/nfs_fs_i.h>
+
+/*
+ * struct file_lock represents a generic "file lock". It's used to represent
+ * POSIX byte range locks, BSD (flock) locks, and leases. It's important to
+ * note that the same struct is used to represent both a request for a lock and
+ * the lock itself, but the same object is never used for both.
+ *
+ * FIXME: should we create a separate "struct lock_request" to help distinguish
+ * these two uses?
+ *
+ * The varous i_flctx lists are ordered by:
+ *
+ * 1) lock owner
+ * 2) lock range start
+ * 3) lock range end
+ *
+ * Obviously, the last two criteria only matter for POSIX locks.
+ */
+struct file_lock {
+ struct file_lock *fl_blocker; /* The lock, that is blocking us */
+ struct list_head fl_list; /* link into file_lock_context */
+ struct hlist_node fl_link; /* node in global lists */
+ struct list_head fl_blocked_requests; /* list of requests with
+ * ->fl_blocker pointing here
+ */
+ struct list_head fl_blocked_member; /* node in
+ * ->fl_blocker->fl_blocked_requests
+ */
+ fl_owner_t fl_owner;
+ unsigned int fl_flags;
+ unsigned char fl_type;
+ unsigned int fl_pid;
+ int fl_link_cpu; /* what cpu's list is this on? */
+ wait_queue_head_t fl_wait;
+ struct file *fl_file;
+ loff_t fl_start;
+ loff_t fl_end;
+
+ struct fasync_struct * fl_fasync; /* for lease break notifications */
+ /* for lease breaks: */
+ unsigned long fl_break_time;
+ unsigned long fl_downgrade_time;
+
+ const struct file_lock_operations *fl_ops; /* Callbacks for filesystems */
+ const struct lock_manager_operations *fl_lmops; /* Callbacks for lockmanagers */
+ union {
+ struct nfs_lock_info nfs_fl;
+ struct nfs4_lock_info nfs4_fl;
+ struct {
+ struct list_head link; /* link in AFS vnode's pending_locks list */
+ int state; /* state of grant or error if -ve */
+ unsigned int debug_id;
+ } afs;
+ struct {
+ struct inode *inode;
+ } ceph;
+ } fl_u;
+} __randomize_layout;
+
+struct file_lock_context {
+ spinlock_t flc_lock;
+ struct list_head flc_flock;
+ struct list_head flc_posix;
+ struct list_head flc_lease;
+};
+
+#ifdef CONFIG_FILE_LOCKING
+int fcntl_getlk(struct file *, unsigned int, struct flock *);
+int fcntl_setlk(unsigned int, struct file *, unsigned int,
+ struct flock *);
+
+#if BITS_PER_LONG == 32
+int fcntl_getlk64(struct file *, unsigned int, struct flock64 *);
+int fcntl_setlk64(unsigned int, struct file *, unsigned int,
+ struct flock64 *);
+#endif
+
+int fcntl_setlease(unsigned int fd, struct file *filp, long arg);
+int fcntl_getlease(struct file *filp);
+
+/* fs/locks.c */
+void locks_free_lock_context(struct inode *inode);
+void locks_free_lock(struct file_lock *fl);
+void locks_init_lock(struct file_lock *);
+struct file_lock * locks_alloc_lock(void);
+void locks_copy_lock(struct file_lock *, struct file_lock *);
+void locks_copy_conflock(struct file_lock *, struct file_lock *);
+void locks_remove_posix(struct file *, fl_owner_t);
+void locks_remove_file(struct file *);
+void locks_release_private(struct file_lock *);
+void posix_test_lock(struct file *, struct file_lock *);
+int posix_lock_file(struct file *, struct file_lock *, struct file_lock *);
+int locks_delete_block(struct file_lock *);
+int vfs_test_lock(struct file *, struct file_lock *);
+int vfs_lock_file(struct file *, unsigned int, struct file_lock *, struct file_lock *);
+int vfs_cancel_lock(struct file *filp, struct file_lock *fl);
+bool vfs_inode_has_locks(struct inode *inode);
+int locks_lock_inode_wait(struct inode *inode, struct file_lock *fl);
+int __break_lease(struct inode *inode, unsigned int flags, unsigned int type);
+void lease_get_mtime(struct inode *, struct timespec64 *time);
+int generic_setlease(struct file *, long, struct file_lock **, void **priv);
+int vfs_setlease(struct file *, long, struct file_lock **, void **);
+int lease_modify(struct file_lock *, int, struct list_head *);
+
+struct notifier_block;
+int lease_register_notifier(struct notifier_block *);
+void lease_unregister_notifier(struct notifier_block *);
+
+struct files_struct;
+void show_fd_locks(struct seq_file *f,
+ struct file *filp, struct files_struct *files);
+bool locks_owner_has_blockers(struct file_lock_context *flctx,
+ fl_owner_t owner);
+
+static inline struct file_lock_context *
+locks_inode_context(const struct inode *inode)
+{
+ return smp_load_acquire(&inode->i_flctx);
+}
+
+#else /* !CONFIG_FILE_LOCKING */
+static inline int fcntl_getlk(struct file *file, unsigned int cmd,
+ struct flock __user *user)
+{
+ return -EINVAL;
+}
+
+static inline int fcntl_setlk(unsigned int fd, struct file *file,
+ unsigned int cmd, struct flock __user *user)
+{
+ return -EACCES;
+}
+
+#if BITS_PER_LONG == 32
+static inline int fcntl_getlk64(struct file *file, unsigned int cmd,
+ struct flock64 *user)
+{
+ return -EINVAL;
+}
+
+static inline int fcntl_setlk64(unsigned int fd, struct file *file,
+ unsigned int cmd, struct flock64 *user)
+{
+ return -EACCES;
+}
+#endif
+static inline int fcntl_setlease(unsigned int fd, struct file *filp, long arg)
+{
+ return -EINVAL;
+}
+
+static inline int fcntl_getlease(struct file *filp)
+{
+ return F_UNLCK;
+}
+
+static inline void
+locks_free_lock_context(struct inode *inode)
+{
+}
+
+static inline void locks_init_lock(struct file_lock *fl)
+{
+ return;
+}
+
+static inline void locks_copy_conflock(struct file_lock *new, struct file_lock *fl)
+{
+ return;
+}
+
+static inline void locks_copy_lock(struct file_lock *new, struct file_lock *fl)
+{
+ return;
+}
+
+static inline void locks_remove_posix(struct file *filp, fl_owner_t owner)
+{
+ return;
+}
+
+static inline void locks_remove_file(struct file *filp)
+{
+ return;
+}
+
+static inline void posix_test_lock(struct file *filp, struct file_lock *fl)
+{
+ return;
+}
+
+static inline int posix_lock_file(struct file *filp, struct file_lock *fl,
+ struct file_lock *conflock)
+{
+ return -ENOLCK;
+}
+
+static inline int locks_delete_block(struct file_lock *waiter)
+{
+ return -ENOENT;
+}
+
+static inline int vfs_test_lock(struct file *filp, struct file_lock *fl)
+{
+ return 0;
+}
+
+static inline int vfs_lock_file(struct file *filp, unsigned int cmd,
+ struct file_lock *fl, struct file_lock *conf)
+{
+ return -ENOLCK;
+}
+
+static inline int vfs_cancel_lock(struct file *filp, struct file_lock *fl)
+{
+ return 0;
+}
+
+static inline bool vfs_inode_has_locks(struct inode *inode)
+{
+ return false;
+}
+
+static inline int locks_lock_inode_wait(struct inode *inode, struct file_lock *fl)
+{
+ return -ENOLCK;
+}
+
+static inline int __break_lease(struct inode *inode, unsigned int mode, unsigned int type)
+{
+ return 0;
+}
+
+static inline void lease_get_mtime(struct inode *inode,
+ struct timespec64 *time)
+{
+ return;
+}
+
+static inline int generic_setlease(struct file *filp, long arg,
+ struct file_lock **flp, void **priv)
+{
+ return -EINVAL;
+}
+
+static inline int vfs_setlease(struct file *filp, long arg,
+ struct file_lock **lease, void **priv)
+{
+ return -EINVAL;
+}
+
+static inline int lease_modify(struct file_lock *fl, int arg,
+ struct list_head *dispose)
+{
+ return -EINVAL;
+}
+
+struct files_struct;
+static inline void show_fd_locks(struct seq_file *f,
+ struct file *filp, struct files_struct *files) {}
+static inline bool locks_owner_has_blockers(struct file_lock_context *flctx,
+ fl_owner_t owner)
+{
+ return false;
+}
+
+static inline struct file_lock_context *
+locks_inode_context(const struct inode *inode)
+{
+ return NULL;
+}
+
+#endif /* !CONFIG_FILE_LOCKING */
+
+static inline int locks_lock_file_wait(struct file *filp, struct file_lock *fl)
+{
+ return locks_lock_inode_wait(file_inode(filp), fl);
+}
+
+#ifdef CONFIG_FILE_LOCKING
+static inline int break_lease(struct inode *inode, unsigned int mode)
+{
+ /*
+ * Since this check is lockless, we must ensure that any refcounts
+ * taken are done before checking i_flctx->flc_lease. Otherwise, we
+ * could end up racing with tasks trying to set a new lease on this
+ * file.
+ */
+ smp_mb();
+ if (inode->i_flctx && !list_empty_careful(&inode->i_flctx->flc_lease))
+ return __break_lease(inode, mode, FL_LEASE);
+ return 0;
+}
+
+static inline int break_deleg(struct inode *inode, unsigned int mode)
+{
+ /*
+ * Since this check is lockless, we must ensure that any refcounts
+ * taken are done before checking i_flctx->flc_lease. Otherwise, we
+ * could end up racing with tasks trying to set a new lease on this
+ * file.
+ */
+ smp_mb();
+ if (inode->i_flctx && !list_empty_careful(&inode->i_flctx->flc_lease))
+ return __break_lease(inode, mode, FL_DELEG);
+ return 0;
+}
+
+static inline int try_break_deleg(struct inode *inode, struct inode **delegated_inode)
+{
+ int ret;
+
+ ret = break_deleg(inode, O_WRONLY|O_NONBLOCK);
+ if (ret == -EWOULDBLOCK && delegated_inode) {
+ *delegated_inode = inode;
+ ihold(inode);
+ }
+ return ret;
+}
+
+static inline int break_deleg_wait(struct inode **delegated_inode)
+{
+ int ret;
+
+ ret = break_deleg(*delegated_inode, O_WRONLY);
+ iput(*delegated_inode);
+ *delegated_inode = NULL;
+ return ret;
+}
+
+static inline int break_layout(struct inode *inode, bool wait)
+{
+ smp_mb();
+ if (inode->i_flctx && !list_empty_careful(&inode->i_flctx->flc_lease))
+ return __break_lease(inode,
+ wait ? O_WRONLY : O_WRONLY | O_NONBLOCK,
+ FL_LAYOUT);
+ return 0;
+}
+
+#else /* !CONFIG_FILE_LOCKING */
+static inline int break_lease(struct inode *inode, unsigned int mode)
+{
+ return 0;
+}
+
+static inline int break_deleg(struct inode *inode, unsigned int mode)
+{
+ return 0;
+}
+
+static inline int try_break_deleg(struct inode *inode, struct inode **delegated_inode)
+{
+ return 0;
+}
+
+static inline int break_deleg_wait(struct inode **delegated_inode)
+{
+ BUG();
+ return 0;
+}
+
+static inline int break_layout(struct inode *inode, bool wait)
+{
+ return 0;
+}
+
+#endif /* CONFIG_FILE_LOCKING */
+
+#endif /* _LINUX_FILELOCK_H */
diff --git a/include/linux/filter.h b/include/linux/filter.h
index bfef1e5734f8..bbce89937fde 100644
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -1,12 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Linux Socket Filter Data Structures
*/
#ifndef __LINUX_FILTER_H__
#define __LINUX_FILTER_H__
-#include <stdarg.h>
-
#include <linux/atomic.h>
+#include <linux/bpf.h>
#include <linux/refcount.h>
#include <linux/compat.h>
#include <linux/skbuff.h>
@@ -14,19 +14,30 @@
#include <linux/printk.h>
#include <linux/workqueue.h>
#include <linux/sched.h>
+#include <linux/sched/clock.h>
#include <linux/capability.h>
-#include <linux/cryptohash.h>
#include <linux/set_memory.h>
+#include <linux/kallsyms.h>
+#include <linux/if_vlan.h>
+#include <linux/vmalloc.h>
+#include <linux/sockptr.h>
+#include <crypto/sha1.h>
+#include <linux/u64_stats_sync.h>
#include <net/sch_generic.h>
+#include <asm/byteorder.h>
#include <uapi/linux/filter.h>
-#include <uapi/linux/bpf.h>
struct sk_buff;
struct sock;
struct seccomp_data;
struct bpf_prog_aux;
+struct xdp_rxq_info;
+struct xdp_buff;
+struct sock_reuseport;
+struct ctl_table;
+struct ctl_table_header;
/* ArgX, context and stack frame pointer register positions. Note,
* Arg1, Arg2, Arg3, etc are used as argument mappings of function
@@ -43,20 +54,29 @@ struct bpf_prog_aux;
/* Additional register mappings for converted user programs. */
#define BPF_REG_A BPF_REG_0
#define BPF_REG_X BPF_REG_7
-#define BPF_REG_TMP BPF_REG_8
+#define BPF_REG_TMP BPF_REG_2 /* scratch reg */
+#define BPF_REG_D BPF_REG_8 /* data, callee-saved */
+#define BPF_REG_H BPF_REG_9 /* hlen, callee-saved */
-/* Kernel hidden auxiliary/helper register for hardening step.
- * Only used by eBPF JITs. It's nothing more than a temporary
- * register that JITs use internally, only that here it's part
- * of eBPF instructions that have been rewritten for blinding
- * constants. See JIT pre-step in bpf_jit_blind_constants().
- */
+/* Kernel hidden auxiliary/helper register. */
#define BPF_REG_AX MAX_BPF_REG
-#define MAX_BPF_JIT_REG (MAX_BPF_REG + 1)
+#define MAX_BPF_EXT_REG (MAX_BPF_REG + 1)
+#define MAX_BPF_JIT_REG MAX_BPF_EXT_REG
/* unused opcode to mark special call to bpf_tail_call() helper */
#define BPF_TAIL_CALL 0xf0
+/* unused opcode to mark special load instruction. Same as BPF_ABS */
+#define BPF_PROBE_MEM 0x20
+
+/* unused opcode to mark call to interpreter with arguments */
+#define BPF_CALL_ARGS 0xe0
+
+/* unused opcode to mark speculation barrier for mitigating
+ * Speculative Store Bypass
+ */
+#define BPF_NOSPEC 0xc0
+
/* As per nm, we expose JITed images as text (code) section for
* kallsyms. That way, tools like perf can find it to match
* addresses.
@@ -150,6 +170,20 @@ struct bpf_prog_aux;
.off = 0, \
.imm = IMM })
+/* Special form of mov32, used for doing explicit zero extension on dst. */
+#define BPF_ZEXT_REG(DST) \
+ ((struct bpf_insn) { \
+ .code = BPF_ALU | BPF_MOV | BPF_X, \
+ .dst_reg = DST, \
+ .src_reg = DST, \
+ .off = 0, \
+ .imm = 1 })
+
+static inline bool insn_is_zext(const struct bpf_insn *insn)
+{
+ return insn->code == (BPF_ALU | BPF_MOV | BPF_X) && insn->imm == 1;
+}
+
/* BPF_LD_IMM64 macro encodes single 'load 64-bit immediate' insn */
#define BPF_LD_IMM64(DST, IMM) \
BPF_LD_IMM64_RAW(DST, 0, IMM)
@@ -230,15 +264,32 @@ struct bpf_prog_aux;
.off = OFF, \
.imm = 0 })
-/* Atomic memory add, *(uint *)(dst_reg + off16) += src_reg */
-#define BPF_STX_XADD(SIZE, DST, SRC, OFF) \
+/*
+ * Atomic operations:
+ *
+ * BPF_ADD *(uint *) (dst_reg + off16) += src_reg
+ * BPF_AND *(uint *) (dst_reg + off16) &= src_reg
+ * BPF_OR *(uint *) (dst_reg + off16) |= src_reg
+ * BPF_XOR *(uint *) (dst_reg + off16) ^= src_reg
+ * BPF_ADD | BPF_FETCH src_reg = atomic_fetch_add(dst_reg + off16, src_reg);
+ * BPF_AND | BPF_FETCH src_reg = atomic_fetch_and(dst_reg + off16, src_reg);
+ * BPF_OR | BPF_FETCH src_reg = atomic_fetch_or(dst_reg + off16, src_reg);
+ * BPF_XOR | BPF_FETCH src_reg = atomic_fetch_xor(dst_reg + off16, src_reg);
+ * BPF_XCHG src_reg = atomic_xchg(dst_reg + off16, src_reg)
+ * BPF_CMPXCHG r0 = atomic_cmpxchg(dst_reg + off16, r0, src_reg)
+ */
+
+#define BPF_ATOMIC_OP(SIZE, OP, DST, SRC, OFF) \
((struct bpf_insn) { \
- .code = BPF_STX | BPF_SIZE(SIZE) | BPF_XADD, \
+ .code = BPF_STX | BPF_SIZE(SIZE) | BPF_ATOMIC, \
.dst_reg = DST, \
.src_reg = SRC, \
.off = OFF, \
- .imm = 0 })
+ .imm = OP })
+
+/* Legacy alias */
+#define BPF_STX_XADD(SIZE, DST, SRC, OFF) BPF_ATOMIC_OP(SIZE, BPF_ADD, DST, SRC, OFF)
/* Memory store, *(uint *) (dst_reg + off16) = imm32 */
@@ -270,6 +321,26 @@ struct bpf_prog_aux;
.off = OFF, \
.imm = IMM })
+/* Like BPF_JMP_REG, but with 32-bit wide operands for comparison. */
+
+#define BPF_JMP32_REG(OP, DST, SRC, OFF) \
+ ((struct bpf_insn) { \
+ .code = BPF_JMP32 | BPF_OP(OP) | BPF_X, \
+ .dst_reg = DST, \
+ .src_reg = SRC, \
+ .off = OFF, \
+ .imm = 0 })
+
+/* Like BPF_JMP_IMM, but with 32-bit wide operands for comparison. */
+
+#define BPF_JMP32_IMM(OP, DST, IMM, OFF) \
+ ((struct bpf_insn) { \
+ .code = BPF_JMP32 | BPF_OP(OP) | BPF_K, \
+ .dst_reg = DST, \
+ .src_reg = 0, \
+ .off = OFF, \
+ .imm = IMM })
+
/* Unconditional jumps, goto pc + off16 */
#define BPF_JMP_A(OFF) \
@@ -280,7 +351,19 @@ struct bpf_prog_aux;
.off = OFF, \
.imm = 0 })
-/* Function call */
+/* Relative call */
+
+#define BPF_CALL_REL(TGT) \
+ ((struct bpf_insn) { \
+ .code = BPF_JMP | BPF_CALL, \
+ .dst_reg = 0, \
+ .src_reg = BPF_PSEUDO_CALL, \
+ .off = 0, \
+ .imm = TGT })
+
+/* Convert function address to BPF immediate */
+
+#define BPF_CALL_IMM(x) ((void *)(x) - (void *)__bpf_call_base)
#define BPF_EMIT_CALL(FUNC) \
((struct bpf_insn) { \
@@ -288,7 +371,7 @@ struct bpf_prog_aux;
.dst_reg = 0, \
.src_reg = 0, \
.off = 0, \
- .imm = ((FUNC) - __bpf_call_base) })
+ .imm = BPF_CALL_IMM(FUNC) })
/* Raw code statement block */
@@ -310,6 +393,16 @@ struct bpf_prog_aux;
.off = 0, \
.imm = 0 })
+/* Speculation barrier */
+
+#define BPF_ST_NOSPEC() \
+ ((struct bpf_insn) { \
+ .code = BPF_ST | BPF_NOSPEC, \
+ .dst_reg = 0, \
+ .src_reg = 0, \
+ .off = 0, \
+ .imm = 0 })
+
/* Internal classic blocks for direct assignment */
#define __BPF_STMT(CODE, K) \
@@ -359,14 +452,14 @@ struct bpf_prog_aux;
#define BPF_FIELD_SIZEOF(type, field) \
({ \
- const int __size = bytes_to_bpf_size(FIELD_SIZEOF(type, field)); \
+ const int __size = bytes_to_bpf_size(sizeof_field(type, field)); \
BUILD_BUG_ON(__size < 0); \
__size; \
})
#define BPF_LDST_BYTES(insn) \
({ \
- const int __size = bpf_size_to_bytes(BPF_SIZE(insn->code)); \
+ const int __size = bpf_size_to_bytes(BPF_SIZE((insn)->code)); \
WARN_ON(__size < 0); \
__size; \
})
@@ -406,10 +499,11 @@ struct bpf_prog_aux;
#define BPF_CALL_x(x, name, ...) \
static __always_inline \
u64 ____##name(__BPF_MAP(x, __BPF_DECL_ARGS, __BPF_V, __VA_ARGS__)); \
+ typedef u64 (*btf_##name)(__BPF_MAP(x, __BPF_DECL_ARGS, __BPF_V, __VA_ARGS__)); \
u64 name(__BPF_REG(x, __BPF_DECL_REGS, __BPF_N, __VA_ARGS__)); \
u64 name(__BPF_REG(x, __BPF_DECL_REGS, __BPF_N, __VA_ARGS__)) \
{ \
- return ____##name(__BPF_MAP(x,__BPF_CAST,__BPF_N,__VA_ARGS__));\
+ return ((btf_##name)____##name)(__BPF_MAP(x,__BPF_CAST,__BPF_N,__VA_ARGS__));\
} \
static __always_inline \
u64 ____##name(__BPF_MAP(x, __BPF_DECL_ARGS, __BPF_V, __VA_ARGS__))
@@ -425,55 +519,46 @@ struct bpf_prog_aux;
offsetof(TYPE, MEMBER) ... offsetofend(TYPE, MEMBER) - 1
#define bpf_ctx_range_till(TYPE, MEMBER1, MEMBER2) \
offsetof(TYPE, MEMBER1) ... offsetofend(TYPE, MEMBER2) - 1
+#if BITS_PER_LONG == 64
+# define bpf_ctx_range_ptr(TYPE, MEMBER) \
+ offsetof(TYPE, MEMBER) ... offsetofend(TYPE, MEMBER) - 1
+#else
+# define bpf_ctx_range_ptr(TYPE, MEMBER) \
+ offsetof(TYPE, MEMBER) ... offsetof(TYPE, MEMBER) + 8 - 1
+#endif /* BITS_PER_LONG == 64 */
#define bpf_target_off(TYPE, MEMBER, SIZE, PTR_SIZE) \
({ \
- BUILD_BUG_ON(FIELD_SIZEOF(TYPE, MEMBER) != (SIZE)); \
+ BUILD_BUG_ON(sizeof_field(TYPE, MEMBER) != (SIZE)); \
*(PTR_SIZE) = (SIZE); \
offsetof(TYPE, MEMBER); \
})
-#ifdef CONFIG_COMPAT
/* A struct sock_filter is architecture independent. */
struct compat_sock_fprog {
u16 len;
compat_uptr_t filter; /* struct sock_filter * */
};
-#endif
struct sock_fprog_kern {
u16 len;
struct sock_filter *filter;
};
+/* Some arches need doubleword alignment for their instructions and/or data */
+#define BPF_IMAGE_ALIGNMENT 8
+
struct bpf_binary_header {
- unsigned int pages;
- u8 image[];
+ u32 size;
+ u8 image[] __aligned(BPF_IMAGE_ALIGNMENT);
};
-struct bpf_prog {
- u16 pages; /* Number of allocated pages */
- kmemcheck_bitfield_begin(meta);
- u16 jited:1, /* Is our filter JIT'ed? */
- locked:1, /* Program image locked? */
- gpl_compatible:1, /* Is filter GPL compatible? */
- cb_access:1, /* Is control block accessed? */
- dst_needed:1; /* Do we need dst entry? */
- kmemcheck_bitfield_end(meta);
- enum bpf_prog_type type; /* Type of BPF program */
- u32 len; /* Number of filter blocks */
- u32 jited_len; /* Size of jited insns in bytes */
- u8 tag[BPF_TAG_SIZE];
- struct bpf_prog_aux *aux; /* Auxiliary fields */
- struct sock_fprog_kern *orig_prog; /* Original BPF program */
- unsigned int (*bpf_func)(const void *ctx,
- const struct bpf_insn *insn);
- /* Instructions for interpreter */
- union {
- struct sock_filter insns[0];
- struct bpf_insn insnsi[0];
- };
-};
+struct bpf_prog_stats {
+ u64_stats_t cnt;
+ u64_stats_t nsecs;
+ u64_stats_t misses;
+ struct u64_stats_sync syncp;
+} __aligned(2 * sizeof(u64));
struct sk_filter {
refcount_t refcnt;
@@ -481,33 +566,135 @@ struct sk_filter {
struct bpf_prog *prog;
};
-#define BPF_PROG_RUN(filter, ctx) (*filter->bpf_func)(ctx, filter->insnsi)
+DECLARE_STATIC_KEY_FALSE(bpf_stats_enabled_key);
+
+extern struct mutex nf_conn_btf_access_lock;
+extern int (*nfct_btf_struct_access)(struct bpf_verifier_log *log,
+ const struct bpf_reg_state *reg,
+ int off, int size);
+
+typedef unsigned int (*bpf_dispatcher_fn)(const void *ctx,
+ const struct bpf_insn *insnsi,
+ unsigned int (*bpf_func)(const void *,
+ const struct bpf_insn *));
+
+static __always_inline u32 __bpf_prog_run(const struct bpf_prog *prog,
+ const void *ctx,
+ bpf_dispatcher_fn dfunc)
+{
+ u32 ret;
+
+ cant_migrate();
+ if (static_branch_unlikely(&bpf_stats_enabled_key)) {
+ struct bpf_prog_stats *stats;
+ u64 start = sched_clock();
+ unsigned long flags;
+
+ ret = dfunc(ctx, prog->insnsi, prog->bpf_func);
+ stats = this_cpu_ptr(prog->stats);
+ flags = u64_stats_update_begin_irqsave(&stats->syncp);
+ u64_stats_inc(&stats->cnt);
+ u64_stats_add(&stats->nsecs, sched_clock() - start);
+ u64_stats_update_end_irqrestore(&stats->syncp, flags);
+ } else {
+ ret = dfunc(ctx, prog->insnsi, prog->bpf_func);
+ }
+ return ret;
+}
+
+static __always_inline u32 bpf_prog_run(const struct bpf_prog *prog, const void *ctx)
+{
+ return __bpf_prog_run(prog, ctx, bpf_dispatcher_nop_func);
+}
+
+/*
+ * Use in preemptible and therefore migratable context to make sure that
+ * the execution of the BPF program runs on one CPU.
+ *
+ * This uses migrate_disable/enable() explicitly to document that the
+ * invocation of a BPF program does not require reentrancy protection
+ * against a BPF program which is invoked from a preempting task.
+ */
+static inline u32 bpf_prog_run_pin_on_cpu(const struct bpf_prog *prog,
+ const void *ctx)
+{
+ u32 ret;
+
+ migrate_disable();
+ ret = bpf_prog_run(prog, ctx);
+ migrate_enable();
+ return ret;
+}
#define BPF_SKB_CB_LEN QDISC_CB_PRIV_LEN
struct bpf_skb_data_end {
struct qdisc_skb_cb qdisc_cb;
+ void *data_meta;
void *data_end;
};
-struct xdp_buff {
- void *data;
- void *data_end;
- void *data_hard_start;
+struct bpf_nh_params {
+ u32 nh_family;
+ union {
+ u32 ipv4_nh;
+ struct in6_addr ipv6_nh;
+ };
};
-/* compute the linear packet data range [data, data_end) which
- * will be accessed by cls_bpf, act_bpf and lwt programs
+struct bpf_redirect_info {
+ u64 tgt_index;
+ void *tgt_value;
+ struct bpf_map *map;
+ u32 flags;
+ u32 kern_flags;
+ u32 map_id;
+ enum bpf_map_type map_type;
+ struct bpf_nh_params nh;
+};
+
+DECLARE_PER_CPU(struct bpf_redirect_info, bpf_redirect_info);
+
+/* flags for bpf_redirect_info kern_flags */
+#define BPF_RI_F_RF_NO_DIRECT BIT(0) /* no napi_direct on return_frame */
+
+/* Compute the linear packet data range [data, data_end) which
+ * will be accessed by various program types (cls_bpf, act_bpf,
+ * lwt, ...). Subsystems allowing direct data access must (!)
+ * ensure that cb[] area can be written to when BPF program is
+ * invoked (otherwise cb[] save/restore is necessary).
*/
-static inline void bpf_compute_data_end(struct sk_buff *skb)
+static inline void bpf_compute_data_pointers(struct sk_buff *skb)
{
struct bpf_skb_data_end *cb = (struct bpf_skb_data_end *)skb->cb;
- BUILD_BUG_ON(sizeof(*cb) > FIELD_SIZEOF(struct sk_buff, cb));
- cb->data_end = skb->data + skb_headlen(skb);
+ BUILD_BUG_ON(sizeof(*cb) > sizeof_field(struct sk_buff, cb));
+ cb->data_meta = skb->data - skb_metadata_len(skb);
+ cb->data_end = skb->data + skb_headlen(skb);
}
-static inline u8 *bpf_skb_cb(struct sk_buff *skb)
+/* Similar to bpf_compute_data_pointers(), except that save orginal
+ * data in cb->data and cb->meta_data for restore.
+ */
+static inline void bpf_compute_and_save_data_end(
+ struct sk_buff *skb, void **saved_data_end)
+{
+ struct bpf_skb_data_end *cb = (struct bpf_skb_data_end *)skb->cb;
+
+ *saved_data_end = cb->data_end;
+ cb->data_end = skb->data + skb_headlen(skb);
+}
+
+/* Restore data saved by bpf_compute_data_pointers(). */
+static inline void bpf_restore_data_end(
+ struct sk_buff *skb, void *saved_data_end)
+{
+ struct bpf_skb_data_end *cb = (struct bpf_skb_data_end *)skb->cb;
+
+ cb->data_end = saved_data_end;
+}
+
+static inline u8 *bpf_skb_cb(const struct sk_buff *skb)
{
/* eBPF programs may read/write skb->cb[] area to transfer meta
* data between tail calls. Since this also needs to work with
@@ -519,16 +706,18 @@ static inline u8 *bpf_skb_cb(struct sk_buff *skb)
* attached to sockets, we need to clear the bpf_skb_cb() area
* to not leak previous contents to user space.
*/
- BUILD_BUG_ON(FIELD_SIZEOF(struct __sk_buff, cb) != BPF_SKB_CB_LEN);
- BUILD_BUG_ON(FIELD_SIZEOF(struct __sk_buff, cb) !=
- FIELD_SIZEOF(struct qdisc_skb_cb, data));
+ BUILD_BUG_ON(sizeof_field(struct __sk_buff, cb) != BPF_SKB_CB_LEN);
+ BUILD_BUG_ON(sizeof_field(struct __sk_buff, cb) !=
+ sizeof_field(struct qdisc_skb_cb, data));
return qdisc_skb_cb(skb)->data;
}
-static inline u32 bpf_prog_run_save_cb(const struct bpf_prog *prog,
- struct sk_buff *skb)
+/* Must be invoked with migration disabled */
+static inline u32 __bpf_prog_run_save_cb(const struct bpf_prog *prog,
+ const void *ctx)
{
+ const struct sk_buff *skb = ctx;
u8 *cb_data = bpf_skb_cb(skb);
u8 cb_saved[BPF_SKB_CB_LEN];
u32 res;
@@ -538,7 +727,7 @@ static inline u32 bpf_prog_run_save_cb(const struct bpf_prog *prog,
memset(cb_data, 0, sizeof(cb_saved));
}
- res = BPF_PROG_RUN(prog, skb);
+ res = bpf_prog_run(prog, skb);
if (unlikely(prog->cb_access))
memcpy(cb_data, cb_saved, sizeof(cb_saved));
@@ -546,29 +735,55 @@ static inline u32 bpf_prog_run_save_cb(const struct bpf_prog *prog,
return res;
}
+static inline u32 bpf_prog_run_save_cb(const struct bpf_prog *prog,
+ struct sk_buff *skb)
+{
+ u32 res;
+
+ migrate_disable();
+ res = __bpf_prog_run_save_cb(prog, skb);
+ migrate_enable();
+ return res;
+}
+
static inline u32 bpf_prog_run_clear_cb(const struct bpf_prog *prog,
struct sk_buff *skb)
{
u8 *cb_data = bpf_skb_cb(skb);
+ u32 res;
if (unlikely(prog->cb_access))
memset(cb_data, 0, BPF_SKB_CB_LEN);
- return BPF_PROG_RUN(prog, skb);
+ res = bpf_prog_run_pin_on_cpu(prog, skb);
+ return res;
}
+DECLARE_BPF_DISPATCHER(xdp)
+
+DECLARE_STATIC_KEY_FALSE(bpf_master_redirect_enabled_key);
+
+u32 xdp_master_redirect(struct xdp_buff *xdp);
+
static __always_inline u32 bpf_prog_run_xdp(const struct bpf_prog *prog,
struct xdp_buff *xdp)
{
- /* Caller needs to hold rcu_read_lock() (!), otherwise program
- * can be released while still running, or map elements could be
- * freed early while still having concurrent users. XDP fastpath
- * already takes rcu_read_lock() when fetching the program, so
- * it's not necessary here anymore.
+ /* Driver XDP hooks are invoked within a single NAPI poll cycle and thus
+ * under local_bh_disable(), which provides the needed RCU protection
+ * for accessing map entries.
*/
- return BPF_PROG_RUN(prog, xdp);
+ u32 act = __bpf_prog_run(prog, xdp, BPF_DISPATCHER_FUNC(xdp));
+
+ if (static_branch_unlikely(&bpf_master_redirect_enabled_key)) {
+ if (act == XDP_TX && netif_is_bond_slave(xdp->rxq->dev))
+ act = xdp_master_redirect(xdp);
+ }
+
+ return act;
}
+void bpf_prog_change_xdp(struct bpf_prog *prev_prog, struct bpf_prog *prog);
+
static inline u32 bpf_prog_insn_size(const struct bpf_prog *prog)
{
return prog->len * sizeof(struct bpf_insn);
@@ -577,7 +792,7 @@ static inline u32 bpf_prog_insn_size(const struct bpf_prog *prog)
static inline u32 bpf_prog_tag_scratch_size(const struct bpf_prog *prog)
{
return round_up(bpf_prog_insn_size(prog) +
- sizeof(__be64) + 1, SHA_MESSAGE_BYTES);
+ sizeof(__be64) + 1, SHA1_BLOCK_SIZE);
}
static inline unsigned int bpf_prog_size(unsigned int proglen)
@@ -596,72 +811,56 @@ static inline bool bpf_prog_was_classic(const struct bpf_prog *prog)
return prog->type == BPF_PROG_TYPE_UNSPEC;
}
-static inline bool
-bpf_ctx_narrow_access_ok(u32 off, u32 size, const u32 size_default)
+static inline u32 bpf_ctx_off_adjust_machine(u32 size)
{
- bool off_ok;
-#ifdef __LITTLE_ENDIAN
- off_ok = (off & (size_default - 1)) == 0;
-#else
- off_ok = (off & (size_default - 1)) + size == size_default;
-#endif
- return off_ok && size <= size_default && (size & (size - 1)) == 0;
-}
+ const u32 size_machine = sizeof(unsigned long);
-#define bpf_classic_proglen(fprog) (fprog->len * sizeof(fprog->filter[0]))
+ if (size > size_machine && size % size_machine == 0)
+ size = size_machine;
-#ifdef CONFIG_ARCH_HAS_SET_MEMORY
-static inline void bpf_prog_lock_ro(struct bpf_prog *fp)
-{
- fp->locked = 1;
- WARN_ON_ONCE(set_memory_ro((unsigned long)fp, fp->pages));
+ return size;
}
-static inline void bpf_prog_unlock_ro(struct bpf_prog *fp)
+static inline bool
+bpf_ctx_narrow_access_ok(u32 off, u32 size, u32 size_default)
{
- if (fp->locked) {
- WARN_ON_ONCE(set_memory_rw((unsigned long)fp, fp->pages));
- /* In case set_memory_rw() fails, we want to be the first
- * to crash here instead of some random place later on.
- */
- fp->locked = 0;
- }
+ return size <= size_default && (size & (size - 1)) == 0;
}
-static inline void bpf_jit_binary_lock_ro(struct bpf_binary_header *hdr)
+static inline u8
+bpf_ctx_narrow_access_offset(u32 off, u32 size, u32 size_default)
{
- WARN_ON_ONCE(set_memory_ro((unsigned long)hdr, hdr->pages));
-}
+ u8 access_off = off & (size_default - 1);
-static inline void bpf_jit_binary_unlock_ro(struct bpf_binary_header *hdr)
-{
- WARN_ON_ONCE(set_memory_rw((unsigned long)hdr, hdr->pages));
-}
+#ifdef __LITTLE_ENDIAN
+ return access_off;
#else
-static inline void bpf_prog_lock_ro(struct bpf_prog *fp)
-{
+ return size_default - (access_off + size);
+#endif
}
-static inline void bpf_prog_unlock_ro(struct bpf_prog *fp)
-{
-}
+#define bpf_ctx_wide_access_ok(off, size, type, field) \
+ (size == sizeof(__u64) && \
+ off >= offsetof(type, field) && \
+ off + sizeof(__u64) <= offsetofend(type, field) && \
+ off % sizeof(__u64) == 0)
-static inline void bpf_jit_binary_lock_ro(struct bpf_binary_header *hdr)
-{
-}
+#define bpf_classic_proglen(fprog) (fprog->len * sizeof(fprog->filter[0]))
-static inline void bpf_jit_binary_unlock_ro(struct bpf_binary_header *hdr)
+static inline void bpf_prog_lock_ro(struct bpf_prog *fp)
{
+#ifndef CONFIG_BPF_JIT_ALWAYS_ON
+ if (!fp->jited) {
+ set_vm_flush_reset_perms(fp);
+ set_memory_ro((unsigned long)fp, fp->pages);
+ }
+#endif
}
-#endif /* CONFIG_ARCH_HAS_SET_MEMORY */
-static inline struct bpf_binary_header *
-bpf_jit_binary_hdr(const struct bpf_prog *fp)
+static inline void bpf_jit_binary_lock_ro(struct bpf_binary_header *hdr)
{
- unsigned long real_start = (unsigned long)fp->bpf_func;
- unsigned long addr = real_start & PAGE_MASK;
-
- return (void *)addr;
+ set_vm_flush_reset_perms(hdr);
+ set_memory_rox((unsigned long)hdr, hdr->size >> PAGE_SHIFT);
}
int sk_filter_trim_cap(struct sock *sk, struct sk_buff *skb, unsigned int cap);
@@ -673,14 +872,22 @@ static inline int sk_filter(struct sock *sk, struct sk_buff *skb)
struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err);
void bpf_prog_free(struct bpf_prog *fp);
+bool bpf_opcode_in_insntable(u8 code);
+
+void bpf_prog_free_linfo(struct bpf_prog *prog);
+void bpf_prog_fill_jited_linfo(struct bpf_prog *prog,
+ const u32 *insn_to_jit_off);
+int bpf_prog_alloc_jited_linfo(struct bpf_prog *prog);
+void bpf_prog_jit_attempt_done(struct bpf_prog *prog);
+
struct bpf_prog *bpf_prog_alloc(unsigned int size, gfp_t gfp_extra_flags);
+struct bpf_prog *bpf_prog_alloc_no_stats(unsigned int size, gfp_t gfp_extra_flags);
struct bpf_prog *bpf_prog_realloc(struct bpf_prog *fp_old, unsigned int size,
gfp_t gfp_extra_flags);
void __bpf_prog_free(struct bpf_prog *fp);
static inline void bpf_prog_unlock_free(struct bpf_prog *fp)
{
- bpf_prog_unlock_ro(fp);
__bpf_prog_free(fp);
}
@@ -696,37 +903,167 @@ int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk);
int sk_attach_bpf(u32 ufd, struct sock *sk);
int sk_reuseport_attach_filter(struct sock_fprog *fprog, struct sock *sk);
int sk_reuseport_attach_bpf(u32 ufd, struct sock *sk);
+void sk_reuseport_prog_free(struct bpf_prog *prog);
int sk_detach_filter(struct sock *sk);
-int sk_get_filter(struct sock *sk, struct sock_filter __user *filter,
- unsigned int len);
+int sk_get_filter(struct sock *sk, sockptr_t optval, unsigned int len);
bool sk_filter_charge(struct sock *sk, struct sk_filter *fp);
void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp);
u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
+#define __bpf_call_base_args \
+ ((u64 (*)(u64, u64, u64, u64, u64, const struct bpf_insn *)) \
+ (void *)__bpf_call_base)
struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog);
void bpf_jit_compile(struct bpf_prog *prog);
+bool bpf_jit_needs_zext(void);
+bool bpf_jit_supports_subprog_tailcalls(void);
+bool bpf_jit_supports_kfunc_call(void);
+bool bpf_jit_supports_far_kfunc_call(void);
bool bpf_helper_changes_pkt_data(void *func);
+static inline bool bpf_dump_raw_ok(const struct cred *cred)
+{
+ /* Reconstruction of call-sites is dependent on kallsyms,
+ * thus make dump the same restriction.
+ */
+ return kallsyms_show_value(cred);
+}
+
struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off,
const struct bpf_insn *patch, u32 len);
-void bpf_warn_invalid_xdp_action(u32 act);
+int bpf_remove_insns(struct bpf_prog *prog, u32 off, u32 cnt);
+
+void bpf_clear_redirect_map(struct bpf_map *map);
+
+static inline bool xdp_return_frame_no_direct(void)
+{
+ struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
+
+ return ri->kern_flags & BPF_RI_F_RF_NO_DIRECT;
+}
+
+static inline void xdp_set_return_frame_no_direct(void)
+{
+ struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
+
+ ri->kern_flags |= BPF_RI_F_RF_NO_DIRECT;
+}
+
+static inline void xdp_clear_return_frame_no_direct(void)
+{
+ struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
+
+ ri->kern_flags &= ~BPF_RI_F_RF_NO_DIRECT;
+}
+
+static inline int xdp_ok_fwd_dev(const struct net_device *fwd,
+ unsigned int pktlen)
+{
+ unsigned int len;
+
+ if (unlikely(!(fwd->flags & IFF_UP)))
+ return -ENETDOWN;
+
+ len = fwd->mtu + fwd->hard_header_len + VLAN_HLEN;
+ if (pktlen > len)
+ return -EMSGSIZE;
+
+ return 0;
+}
+
+/* The pair of xdp_do_redirect and xdp_do_flush MUST be called in the
+ * same cpu context. Further for best results no more than a single map
+ * for the do_redirect/do_flush pair should be used. This limitation is
+ * because we only track one map and force a flush when the map changes.
+ * This does not appear to be a real limitation for existing software.
+ */
+int xdp_do_generic_redirect(struct net_device *dev, struct sk_buff *skb,
+ struct xdp_buff *xdp, struct bpf_prog *prog);
+int xdp_do_redirect(struct net_device *dev,
+ struct xdp_buff *xdp,
+ struct bpf_prog *prog);
+int xdp_do_redirect_frame(struct net_device *dev,
+ struct xdp_buff *xdp,
+ struct xdp_frame *xdpf,
+ struct bpf_prog *prog);
+void xdp_do_flush(void);
+
+/* The xdp_do_flush_map() helper has been renamed to drop the _map suffix, as
+ * it is no longer only flushing maps. Keep this define for compatibility
+ * until all drivers are updated - do not use xdp_do_flush_map() in new code!
+ */
+#define xdp_do_flush_map xdp_do_flush
+
+void bpf_warn_invalid_xdp_action(struct net_device *dev, struct bpf_prog *prog, u32 act);
+
+#ifdef CONFIG_INET
+struct sock *bpf_run_sk_reuseport(struct sock_reuseport *reuse, struct sock *sk,
+ struct bpf_prog *prog, struct sk_buff *skb,
+ struct sock *migrating_sk,
+ u32 hash);
+#else
+static inline struct sock *
+bpf_run_sk_reuseport(struct sock_reuseport *reuse, struct sock *sk,
+ struct bpf_prog *prog, struct sk_buff *skb,
+ struct sock *migrating_sk,
+ u32 hash)
+{
+ return NULL;
+}
+#endif
#ifdef CONFIG_BPF_JIT
extern int bpf_jit_enable;
extern int bpf_jit_harden;
extern int bpf_jit_kallsyms;
+extern long bpf_jit_limit;
+extern long bpf_jit_limit_max;
typedef void (*bpf_jit_fill_hole_t)(void *area, unsigned int size);
+void bpf_jit_fill_hole_with_zero(void *area, unsigned int size);
+
struct bpf_binary_header *
bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr,
unsigned int alignment,
bpf_jit_fill_hole_t bpf_fill_ill_insns);
void bpf_jit_binary_free(struct bpf_binary_header *hdr);
-
+u64 bpf_jit_alloc_exec_limit(void);
+void *bpf_jit_alloc_exec(unsigned long size);
+void bpf_jit_free_exec(void *addr);
void bpf_jit_free(struct bpf_prog *fp);
+struct bpf_binary_header *
+bpf_jit_binary_pack_hdr(const struct bpf_prog *fp);
+
+void *bpf_prog_pack_alloc(u32 size, bpf_jit_fill_hole_t bpf_fill_ill_insns);
+void bpf_prog_pack_free(struct bpf_binary_header *hdr);
+
+static inline bool bpf_prog_kallsyms_verify_off(const struct bpf_prog *fp)
+{
+ return list_empty(&fp->aux->ksym.lnode) ||
+ fp->aux->ksym.lnode.prev == LIST_POISON2;
+}
+
+struct bpf_binary_header *
+bpf_jit_binary_pack_alloc(unsigned int proglen, u8 **ro_image,
+ unsigned int alignment,
+ struct bpf_binary_header **rw_hdr,
+ u8 **rw_image,
+ bpf_jit_fill_hole_t bpf_fill_ill_insns);
+int bpf_jit_binary_pack_finalize(struct bpf_prog *prog,
+ struct bpf_binary_header *ro_header,
+ struct bpf_binary_header *rw_header);
+void bpf_jit_binary_pack_free(struct bpf_binary_header *ro_header,
+ struct bpf_binary_header *rw_header);
+
+int bpf_jit_add_poke_descriptor(struct bpf_prog *prog,
+ struct bpf_jit_poke_descriptor *poke);
+
+int bpf_jit_get_func_addr(const struct bpf_prog *prog,
+ const struct bpf_insn *insn, bool extra_pass,
+ u64 *func_addr, bool *func_addr_fixed);
struct bpf_prog *bpf_jit_blind_constants(struct bpf_prog *fp);
void bpf_jit_prog_release_other(struct bpf_prog *fp, struct bpf_prog *fp_other);
@@ -761,7 +1098,7 @@ static inline bool bpf_prog_ebpf_jited(const struct bpf_prog *fp)
return fp->jited && bpf_jit_is_ebpf();
}
-static inline bool bpf_jit_blinding_enabled(void)
+static inline bool bpf_jit_blinding_enabled(struct bpf_prog *prog)
{
/* These are the prerequisites, should someone ever have the
* idea to call blinding outside of them, we make sure to
@@ -769,11 +1106,11 @@ static inline bool bpf_jit_blinding_enabled(void)
*/
if (!bpf_jit_is_ebpf())
return false;
- if (!bpf_jit_enable)
+ if (!prog->jit_requested)
return false;
if (!bpf_jit_harden)
return false;
- if (bpf_jit_harden == 1 && capable(CAP_SYS_ADMIN))
+ if (bpf_jit_harden == 1 && bpf_capable())
return false;
return true;
@@ -821,11 +1158,23 @@ static inline bool ebpf_jit_enabled(void)
return false;
}
+static inline bool bpf_jit_blinding_enabled(struct bpf_prog *prog)
+{
+ return false;
+}
+
static inline bool bpf_prog_ebpf_jited(const struct bpf_prog *fp)
{
return false;
}
+static inline int
+bpf_jit_add_poke_descriptor(struct bpf_prog *prog,
+ struct bpf_jit_poke_descriptor *poke)
+{
+ return -ENOTSUPP;
+}
+
static inline void bpf_jit_free(struct bpf_prog *fp)
{
bpf_prog_unlock_free(fp);
@@ -868,8 +1217,11 @@ static inline void bpf_prog_kallsyms_add(struct bpf_prog *fp)
static inline void bpf_prog_kallsyms_del(struct bpf_prog *fp)
{
}
+
#endif /* CONFIG_BPF_JIT */
+void bpf_prog_kallsyms_del_all(struct bpf_prog *fp);
+
#define BPF_ANC BIT(15)
static inline bool bpf_needs_clear_a(const struct sock_filter *first)
@@ -919,7 +1271,7 @@ static inline u16 bpf_anc_helper(const struct sock_filter *ftest)
BPF_ANCILLARY(RANDOM);
BPF_ANCILLARY(VLAN_TPID);
}
- /* Fallthrough. */
+ fallthrough;
default:
return ftest->code;
}
@@ -928,27 +1280,312 @@ static inline u16 bpf_anc_helper(const struct sock_filter *ftest)
void *bpf_internal_load_pointer_neg_helper(const struct sk_buff *skb,
int k, unsigned int size);
-static inline void *bpf_load_pointer(const struct sk_buff *skb, int k,
- unsigned int size, void *buffer)
-{
- if (k >= 0)
- return skb_header_pointer(skb, k, size, buffer);
-
- return bpf_internal_load_pointer_neg_helper(skb, k, size);
-}
-
static inline int bpf_tell_extensions(void)
{
return SKF_AD_MAX;
}
+struct bpf_sock_addr_kern {
+ struct sock *sk;
+ struct sockaddr *uaddr;
+ /* Temporary "register" to make indirect stores to nested structures
+ * defined above. We need three registers to make such a store, but
+ * only two (src and dst) are available at convert_ctx_access time
+ */
+ u64 tmp_reg;
+ void *t_ctx; /* Attach type specific context. */
+};
+
struct bpf_sock_ops_kern {
struct sock *sk;
- u32 op;
union {
+ u32 args[4];
u32 reply;
u32 replylong[4];
};
+ struct sk_buff *syn_skb;
+ struct sk_buff *skb;
+ void *skb_data_end;
+ u8 op;
+ u8 is_fullsock;
+ u8 remaining_opt_len;
+ u64 temp; /* temp and everything after is not
+ * initialized to 0 before calling
+ * the BPF program. New fields that
+ * should be initialized to 0 should
+ * be inserted before temp.
+ * temp is scratch storage used by
+ * sock_ops_convert_ctx_access
+ * as temporary storage of a register.
+ */
+};
+
+struct bpf_sysctl_kern {
+ struct ctl_table_header *head;
+ struct ctl_table *table;
+ void *cur_val;
+ size_t cur_len;
+ void *new_val;
+ size_t new_len;
+ int new_updated;
+ int write;
+ loff_t *ppos;
+ /* Temporary "register" for indirect stores to ppos. */
+ u64 tmp_reg;
};
+#define BPF_SOCKOPT_KERN_BUF_SIZE 32
+struct bpf_sockopt_buf {
+ u8 data[BPF_SOCKOPT_KERN_BUF_SIZE];
+};
+
+struct bpf_sockopt_kern {
+ struct sock *sk;
+ u8 *optval;
+ u8 *optval_end;
+ s32 level;
+ s32 optname;
+ s32 optlen;
+ /* for retval in struct bpf_cg_run_ctx */
+ struct task_struct *current_task;
+ /* Temporary "register" for indirect stores to ppos. */
+ u64 tmp_reg;
+};
+
+int copy_bpf_fprog_from_user(struct sock_fprog *dst, sockptr_t src, int len);
+
+struct bpf_sk_lookup_kern {
+ u16 family;
+ u16 protocol;
+ __be16 sport;
+ u16 dport;
+ struct {
+ __be32 saddr;
+ __be32 daddr;
+ } v4;
+ struct {
+ const struct in6_addr *saddr;
+ const struct in6_addr *daddr;
+ } v6;
+ struct sock *selected_sk;
+ u32 ingress_ifindex;
+ bool no_reuseport;
+};
+
+extern struct static_key_false bpf_sk_lookup_enabled;
+
+/* Runners for BPF_SK_LOOKUP programs to invoke on socket lookup.
+ *
+ * Allowed return values for a BPF SK_LOOKUP program are SK_PASS and
+ * SK_DROP. Their meaning is as follows:
+ *
+ * SK_PASS && ctx.selected_sk != NULL: use selected_sk as lookup result
+ * SK_PASS && ctx.selected_sk == NULL: continue to htable-based socket lookup
+ * SK_DROP : terminate lookup with -ECONNREFUSED
+ *
+ * This macro aggregates return values and selected sockets from
+ * multiple BPF programs according to following rules in order:
+ *
+ * 1. If any program returned SK_PASS and a non-NULL ctx.selected_sk,
+ * macro result is SK_PASS and last ctx.selected_sk is used.
+ * 2. If any program returned SK_DROP return value,
+ * macro result is SK_DROP.
+ * 3. Otherwise result is SK_PASS and ctx.selected_sk is NULL.
+ *
+ * Caller must ensure that the prog array is non-NULL, and that the
+ * array as well as the programs it contains remain valid.
+ */
+#define BPF_PROG_SK_LOOKUP_RUN_ARRAY(array, ctx, func) \
+ ({ \
+ struct bpf_sk_lookup_kern *_ctx = &(ctx); \
+ struct bpf_prog_array_item *_item; \
+ struct sock *_selected_sk = NULL; \
+ bool _no_reuseport = false; \
+ struct bpf_prog *_prog; \
+ bool _all_pass = true; \
+ u32 _ret; \
+ \
+ migrate_disable(); \
+ _item = &(array)->items[0]; \
+ while ((_prog = READ_ONCE(_item->prog))) { \
+ /* restore most recent selection */ \
+ _ctx->selected_sk = _selected_sk; \
+ _ctx->no_reuseport = _no_reuseport; \
+ \
+ _ret = func(_prog, _ctx); \
+ if (_ret == SK_PASS && _ctx->selected_sk) { \
+ /* remember last non-NULL socket */ \
+ _selected_sk = _ctx->selected_sk; \
+ _no_reuseport = _ctx->no_reuseport; \
+ } else if (_ret == SK_DROP && _all_pass) { \
+ _all_pass = false; \
+ } \
+ _item++; \
+ } \
+ _ctx->selected_sk = _selected_sk; \
+ _ctx->no_reuseport = _no_reuseport; \
+ migrate_enable(); \
+ _all_pass || _selected_sk ? SK_PASS : SK_DROP; \
+ })
+
+static inline bool bpf_sk_lookup_run_v4(struct net *net, int protocol,
+ const __be32 saddr, const __be16 sport,
+ const __be32 daddr, const u16 dport,
+ const int ifindex, struct sock **psk)
+{
+ struct bpf_prog_array *run_array;
+ struct sock *selected_sk = NULL;
+ bool no_reuseport = false;
+
+ rcu_read_lock();
+ run_array = rcu_dereference(net->bpf.run_array[NETNS_BPF_SK_LOOKUP]);
+ if (run_array) {
+ struct bpf_sk_lookup_kern ctx = {
+ .family = AF_INET,
+ .protocol = protocol,
+ .v4.saddr = saddr,
+ .v4.daddr = daddr,
+ .sport = sport,
+ .dport = dport,
+ .ingress_ifindex = ifindex,
+ };
+ u32 act;
+
+ act = BPF_PROG_SK_LOOKUP_RUN_ARRAY(run_array, ctx, bpf_prog_run);
+ if (act == SK_PASS) {
+ selected_sk = ctx.selected_sk;
+ no_reuseport = ctx.no_reuseport;
+ } else {
+ selected_sk = ERR_PTR(-ECONNREFUSED);
+ }
+ }
+ rcu_read_unlock();
+ *psk = selected_sk;
+ return no_reuseport;
+}
+
+#if IS_ENABLED(CONFIG_IPV6)
+static inline bool bpf_sk_lookup_run_v6(struct net *net, int protocol,
+ const struct in6_addr *saddr,
+ const __be16 sport,
+ const struct in6_addr *daddr,
+ const u16 dport,
+ const int ifindex, struct sock **psk)
+{
+ struct bpf_prog_array *run_array;
+ struct sock *selected_sk = NULL;
+ bool no_reuseport = false;
+
+ rcu_read_lock();
+ run_array = rcu_dereference(net->bpf.run_array[NETNS_BPF_SK_LOOKUP]);
+ if (run_array) {
+ struct bpf_sk_lookup_kern ctx = {
+ .family = AF_INET6,
+ .protocol = protocol,
+ .v6.saddr = saddr,
+ .v6.daddr = daddr,
+ .sport = sport,
+ .dport = dport,
+ .ingress_ifindex = ifindex,
+ };
+ u32 act;
+
+ act = BPF_PROG_SK_LOOKUP_RUN_ARRAY(run_array, ctx, bpf_prog_run);
+ if (act == SK_PASS) {
+ selected_sk = ctx.selected_sk;
+ no_reuseport = ctx.no_reuseport;
+ } else {
+ selected_sk = ERR_PTR(-ECONNREFUSED);
+ }
+ }
+ rcu_read_unlock();
+ *psk = selected_sk;
+ return no_reuseport;
+}
+#endif /* IS_ENABLED(CONFIG_IPV6) */
+
+static __always_inline long __bpf_xdp_redirect_map(struct bpf_map *map, u64 index,
+ u64 flags, const u64 flag_mask,
+ void *lookup_elem(struct bpf_map *map, u32 key))
+{
+ struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
+ const u64 action_mask = XDP_ABORTED | XDP_DROP | XDP_PASS | XDP_TX;
+
+ /* Lower bits of the flags are used as return code on lookup failure */
+ if (unlikely(flags & ~(action_mask | flag_mask)))
+ return XDP_ABORTED;
+
+ ri->tgt_value = lookup_elem(map, index);
+ if (unlikely(!ri->tgt_value) && !(flags & BPF_F_BROADCAST)) {
+ /* If the lookup fails we want to clear out the state in the
+ * redirect_info struct completely, so that if an eBPF program
+ * performs multiple lookups, the last one always takes
+ * precedence.
+ */
+ ri->map_id = INT_MAX; /* Valid map id idr range: [1,INT_MAX[ */
+ ri->map_type = BPF_MAP_TYPE_UNSPEC;
+ return flags & action_mask;
+ }
+
+ ri->tgt_index = index;
+ ri->map_id = map->id;
+ ri->map_type = map->map_type;
+
+ if (flags & BPF_F_BROADCAST) {
+ WRITE_ONCE(ri->map, map);
+ ri->flags = flags;
+ } else {
+ WRITE_ONCE(ri->map, NULL);
+ ri->flags = 0;
+ }
+
+ return XDP_REDIRECT;
+}
+
+#ifdef CONFIG_NET
+int __bpf_skb_load_bytes(const struct sk_buff *skb, u32 offset, void *to, u32 len);
+int __bpf_skb_store_bytes(struct sk_buff *skb, u32 offset, const void *from,
+ u32 len, u64 flags);
+int __bpf_xdp_load_bytes(struct xdp_buff *xdp, u32 offset, void *buf, u32 len);
+int __bpf_xdp_store_bytes(struct xdp_buff *xdp, u32 offset, void *buf, u32 len);
+void *bpf_xdp_pointer(struct xdp_buff *xdp, u32 offset, u32 len);
+void bpf_xdp_copy_buf(struct xdp_buff *xdp, unsigned long off,
+ void *buf, unsigned long len, bool flush);
+#else /* CONFIG_NET */
+static inline int __bpf_skb_load_bytes(const struct sk_buff *skb, u32 offset,
+ void *to, u32 len)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int __bpf_skb_store_bytes(struct sk_buff *skb, u32 offset,
+ const void *from, u32 len, u64 flags)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int __bpf_xdp_load_bytes(struct xdp_buff *xdp, u32 offset,
+ void *buf, u32 len)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int __bpf_xdp_store_bytes(struct xdp_buff *xdp, u32 offset,
+ void *buf, u32 len)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline void *bpf_xdp_pointer(struct xdp_buff *xdp, u32 offset, u32 len)
+{
+ return NULL;
+}
+
+static inline void *bpf_xdp_copy_buf(struct xdp_buff *xdp, unsigned long off, void *buf,
+ unsigned long len, bool flush)
+{
+ return NULL;
+}
+#endif /* CONFIG_NET */
+
#endif /* __LINUX_FILTER_H__ */
diff --git a/include/linux/find.h b/include/linux/find.h
new file mode 100644
index 000000000000..5e4f39ef2e72
--- /dev/null
+++ b/include/linux/find.h
@@ -0,0 +1,670 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __LINUX_FIND_H_
+#define __LINUX_FIND_H_
+
+#ifndef __LINUX_BITMAP_H
+#error only <linux/bitmap.h> can be included directly
+#endif
+
+#include <linux/bitops.h>
+
+unsigned long _find_next_bit(const unsigned long *addr1, unsigned long nbits,
+ unsigned long start);
+unsigned long _find_next_and_bit(const unsigned long *addr1, const unsigned long *addr2,
+ unsigned long nbits, unsigned long start);
+unsigned long _find_next_andnot_bit(const unsigned long *addr1, const unsigned long *addr2,
+ unsigned long nbits, unsigned long start);
+unsigned long _find_next_or_bit(const unsigned long *addr1, const unsigned long *addr2,
+ unsigned long nbits, unsigned long start);
+unsigned long _find_next_zero_bit(const unsigned long *addr, unsigned long nbits,
+ unsigned long start);
+extern unsigned long _find_first_bit(const unsigned long *addr, unsigned long size);
+unsigned long __find_nth_bit(const unsigned long *addr, unsigned long size, unsigned long n);
+unsigned long __find_nth_and_bit(const unsigned long *addr1, const unsigned long *addr2,
+ unsigned long size, unsigned long n);
+unsigned long __find_nth_andnot_bit(const unsigned long *addr1, const unsigned long *addr2,
+ unsigned long size, unsigned long n);
+unsigned long __find_nth_and_andnot_bit(const unsigned long *addr1, const unsigned long *addr2,
+ const unsigned long *addr3, unsigned long size,
+ unsigned long n);
+extern unsigned long _find_first_and_bit(const unsigned long *addr1,
+ const unsigned long *addr2, unsigned long size);
+extern unsigned long _find_first_zero_bit(const unsigned long *addr, unsigned long size);
+extern unsigned long _find_last_bit(const unsigned long *addr, unsigned long size);
+
+#ifdef __BIG_ENDIAN
+unsigned long _find_first_zero_bit_le(const unsigned long *addr, unsigned long size);
+unsigned long _find_next_zero_bit_le(const unsigned long *addr, unsigned
+ long size, unsigned long offset);
+unsigned long _find_next_bit_le(const unsigned long *addr, unsigned
+ long size, unsigned long offset);
+#endif
+
+#ifndef find_next_bit
+/**
+ * find_next_bit - find the next set bit in a memory region
+ * @addr: The address to base the search on
+ * @size: The bitmap size in bits
+ * @offset: The bitnumber to start searching at
+ *
+ * Returns the bit number for the next set bit
+ * If no bits are set, returns @size.
+ */
+static inline
+unsigned long find_next_bit(const unsigned long *addr, unsigned long size,
+ unsigned long offset)
+{
+ if (small_const_nbits(size)) {
+ unsigned long val;
+
+ if (unlikely(offset >= size))
+ return size;
+
+ val = *addr & GENMASK(size - 1, offset);
+ return val ? __ffs(val) : size;
+ }
+
+ return _find_next_bit(addr, size, offset);
+}
+#endif
+
+#ifndef find_next_and_bit
+/**
+ * find_next_and_bit - find the next set bit in both memory regions
+ * @addr1: The first address to base the search on
+ * @addr2: The second address to base the search on
+ * @size: The bitmap size in bits
+ * @offset: The bitnumber to start searching at
+ *
+ * Returns the bit number for the next set bit
+ * If no bits are set, returns @size.
+ */
+static inline
+unsigned long find_next_and_bit(const unsigned long *addr1,
+ const unsigned long *addr2, unsigned long size,
+ unsigned long offset)
+{
+ if (small_const_nbits(size)) {
+ unsigned long val;
+
+ if (unlikely(offset >= size))
+ return size;
+
+ val = *addr1 & *addr2 & GENMASK(size - 1, offset);
+ return val ? __ffs(val) : size;
+ }
+
+ return _find_next_and_bit(addr1, addr2, size, offset);
+}
+#endif
+
+#ifndef find_next_andnot_bit
+/**
+ * find_next_andnot_bit - find the next set bit in *addr1 excluding all the bits
+ * in *addr2
+ * @addr1: The first address to base the search on
+ * @addr2: The second address to base the search on
+ * @size: The bitmap size in bits
+ * @offset: The bitnumber to start searching at
+ *
+ * Returns the bit number for the next set bit
+ * If no bits are set, returns @size.
+ */
+static inline
+unsigned long find_next_andnot_bit(const unsigned long *addr1,
+ const unsigned long *addr2, unsigned long size,
+ unsigned long offset)
+{
+ if (small_const_nbits(size)) {
+ unsigned long val;
+
+ if (unlikely(offset >= size))
+ return size;
+
+ val = *addr1 & ~*addr2 & GENMASK(size - 1, offset);
+ return val ? __ffs(val) : size;
+ }
+
+ return _find_next_andnot_bit(addr1, addr2, size, offset);
+}
+#endif
+
+#ifndef find_next_or_bit
+/**
+ * find_next_or_bit - find the next set bit in either memory regions
+ * @addr1: The first address to base the search on
+ * @addr2: The second address to base the search on
+ * @size: The bitmap size in bits
+ * @offset: The bitnumber to start searching at
+ *
+ * Returns the bit number for the next set bit
+ * If no bits are set, returns @size.
+ */
+static inline
+unsigned long find_next_or_bit(const unsigned long *addr1,
+ const unsigned long *addr2, unsigned long size,
+ unsigned long offset)
+{
+ if (small_const_nbits(size)) {
+ unsigned long val;
+
+ if (unlikely(offset >= size))
+ return size;
+
+ val = (*addr1 | *addr2) & GENMASK(size - 1, offset);
+ return val ? __ffs(val) : size;
+ }
+
+ return _find_next_or_bit(addr1, addr2, size, offset);
+}
+#endif
+
+#ifndef find_next_zero_bit
+/**
+ * find_next_zero_bit - find the next cleared bit in a memory region
+ * @addr: The address to base the search on
+ * @size: The bitmap size in bits
+ * @offset: The bitnumber to start searching at
+ *
+ * Returns the bit number of the next zero bit
+ * If no bits are zero, returns @size.
+ */
+static inline
+unsigned long find_next_zero_bit(const unsigned long *addr, unsigned long size,
+ unsigned long offset)
+{
+ if (small_const_nbits(size)) {
+ unsigned long val;
+
+ if (unlikely(offset >= size))
+ return size;
+
+ val = *addr | ~GENMASK(size - 1, offset);
+ return val == ~0UL ? size : ffz(val);
+ }
+
+ return _find_next_zero_bit(addr, size, offset);
+}
+#endif
+
+#ifndef find_first_bit
+/**
+ * find_first_bit - find the first set bit in a memory region
+ * @addr: The address to start the search at
+ * @size: The maximum number of bits to search
+ *
+ * Returns the bit number of the first set bit.
+ * If no bits are set, returns @size.
+ */
+static inline
+unsigned long find_first_bit(const unsigned long *addr, unsigned long size)
+{
+ if (small_const_nbits(size)) {
+ unsigned long val = *addr & GENMASK(size - 1, 0);
+
+ return val ? __ffs(val) : size;
+ }
+
+ return _find_first_bit(addr, size);
+}
+#endif
+
+/**
+ * find_nth_bit - find N'th set bit in a memory region
+ * @addr: The address to start the search at
+ * @size: The maximum number of bits to search
+ * @n: The number of set bit, which position is needed, counting from 0
+ *
+ * The following is semantically equivalent:
+ * idx = find_nth_bit(addr, size, 0);
+ * idx = find_first_bit(addr, size);
+ *
+ * Returns the bit number of the N'th set bit.
+ * If no such, returns @size.
+ */
+static inline
+unsigned long find_nth_bit(const unsigned long *addr, unsigned long size, unsigned long n)
+{
+ if (n >= size)
+ return size;
+
+ if (small_const_nbits(size)) {
+ unsigned long val = *addr & GENMASK(size - 1, 0);
+
+ return val ? fns(val, n) : size;
+ }
+
+ return __find_nth_bit(addr, size, n);
+}
+
+/**
+ * find_nth_and_bit - find N'th set bit in 2 memory regions
+ * @addr1: The 1st address to start the search at
+ * @addr2: The 2nd address to start the search at
+ * @size: The maximum number of bits to search
+ * @n: The number of set bit, which position is needed, counting from 0
+ *
+ * Returns the bit number of the N'th set bit.
+ * If no such, returns @size.
+ */
+static inline
+unsigned long find_nth_and_bit(const unsigned long *addr1, const unsigned long *addr2,
+ unsigned long size, unsigned long n)
+{
+ if (n >= size)
+ return size;
+
+ if (small_const_nbits(size)) {
+ unsigned long val = *addr1 & *addr2 & GENMASK(size - 1, 0);
+
+ return val ? fns(val, n) : size;
+ }
+
+ return __find_nth_and_bit(addr1, addr2, size, n);
+}
+
+/**
+ * find_nth_andnot_bit - find N'th set bit in 2 memory regions,
+ * flipping bits in 2nd region
+ * @addr1: The 1st address to start the search at
+ * @addr2: The 2nd address to start the search at
+ * @size: The maximum number of bits to search
+ * @n: The number of set bit, which position is needed, counting from 0
+ *
+ * Returns the bit number of the N'th set bit.
+ * If no such, returns @size.
+ */
+static inline
+unsigned long find_nth_andnot_bit(const unsigned long *addr1, const unsigned long *addr2,
+ unsigned long size, unsigned long n)
+{
+ if (n >= size)
+ return size;
+
+ if (small_const_nbits(size)) {
+ unsigned long val = *addr1 & (~*addr2) & GENMASK(size - 1, 0);
+
+ return val ? fns(val, n) : size;
+ }
+
+ return __find_nth_andnot_bit(addr1, addr2, size, n);
+}
+
+/**
+ * find_nth_and_andnot_bit - find N'th set bit in 2 memory regions,
+ * excluding those set in 3rd region
+ * @addr1: The 1st address to start the search at
+ * @addr2: The 2nd address to start the search at
+ * @addr3: The 3rd address to start the search at
+ * @size: The maximum number of bits to search
+ * @n: The number of set bit, which position is needed, counting from 0
+ *
+ * Returns the bit number of the N'th set bit.
+ * If no such, returns @size.
+ */
+static __always_inline
+unsigned long find_nth_and_andnot_bit(const unsigned long *addr1,
+ const unsigned long *addr2,
+ const unsigned long *addr3,
+ unsigned long size, unsigned long n)
+{
+ if (n >= size)
+ return size;
+
+ if (small_const_nbits(size)) {
+ unsigned long val = *addr1 & *addr2 & (~*addr3) & GENMASK(size - 1, 0);
+
+ return val ? fns(val, n) : size;
+ }
+
+ return __find_nth_and_andnot_bit(addr1, addr2, addr3, size, n);
+}
+
+#ifndef find_first_and_bit
+/**
+ * find_first_and_bit - find the first set bit in both memory regions
+ * @addr1: The first address to base the search on
+ * @addr2: The second address to base the search on
+ * @size: The bitmap size in bits
+ *
+ * Returns the bit number for the next set bit
+ * If no bits are set, returns @size.
+ */
+static inline
+unsigned long find_first_and_bit(const unsigned long *addr1,
+ const unsigned long *addr2,
+ unsigned long size)
+{
+ if (small_const_nbits(size)) {
+ unsigned long val = *addr1 & *addr2 & GENMASK(size - 1, 0);
+
+ return val ? __ffs(val) : size;
+ }
+
+ return _find_first_and_bit(addr1, addr2, size);
+}
+#endif
+
+#ifndef find_first_zero_bit
+/**
+ * find_first_zero_bit - find the first cleared bit in a memory region
+ * @addr: The address to start the search at
+ * @size: The maximum number of bits to search
+ *
+ * Returns the bit number of the first cleared bit.
+ * If no bits are zero, returns @size.
+ */
+static inline
+unsigned long find_first_zero_bit(const unsigned long *addr, unsigned long size)
+{
+ if (small_const_nbits(size)) {
+ unsigned long val = *addr | ~GENMASK(size - 1, 0);
+
+ return val == ~0UL ? size : ffz(val);
+ }
+
+ return _find_first_zero_bit(addr, size);
+}
+#endif
+
+#ifndef find_last_bit
+/**
+ * find_last_bit - find the last set bit in a memory region
+ * @addr: The address to start the search at
+ * @size: The number of bits to search
+ *
+ * Returns the bit number of the last set bit, or size.
+ */
+static inline
+unsigned long find_last_bit(const unsigned long *addr, unsigned long size)
+{
+ if (small_const_nbits(size)) {
+ unsigned long val = *addr & GENMASK(size - 1, 0);
+
+ return val ? __fls(val) : size;
+ }
+
+ return _find_last_bit(addr, size);
+}
+#endif
+
+/**
+ * find_next_and_bit_wrap - find the next set bit in both memory regions
+ * @addr1: The first address to base the search on
+ * @addr2: The second address to base the search on
+ * @size: The bitmap size in bits
+ * @offset: The bitnumber to start searching at
+ *
+ * Returns the bit number for the next set bit, or first set bit up to @offset
+ * If no bits are set, returns @size.
+ */
+static inline
+unsigned long find_next_and_bit_wrap(const unsigned long *addr1,
+ const unsigned long *addr2,
+ unsigned long size, unsigned long offset)
+{
+ unsigned long bit = find_next_and_bit(addr1, addr2, size, offset);
+
+ if (bit < size)
+ return bit;
+
+ bit = find_first_and_bit(addr1, addr2, offset);
+ return bit < offset ? bit : size;
+}
+
+/**
+ * find_next_bit_wrap - find the next set bit in both memory regions
+ * @addr: The first address to base the search on
+ * @size: The bitmap size in bits
+ * @offset: The bitnumber to start searching at
+ *
+ * Returns the bit number for the next set bit, or first set bit up to @offset
+ * If no bits are set, returns @size.
+ */
+static inline
+unsigned long find_next_bit_wrap(const unsigned long *addr,
+ unsigned long size, unsigned long offset)
+{
+ unsigned long bit = find_next_bit(addr, size, offset);
+
+ if (bit < size)
+ return bit;
+
+ bit = find_first_bit(addr, offset);
+ return bit < offset ? bit : size;
+}
+
+/*
+ * Helper for for_each_set_bit_wrap(). Make sure you're doing right thing
+ * before using it alone.
+ */
+static inline
+unsigned long __for_each_wrap(const unsigned long *bitmap, unsigned long size,
+ unsigned long start, unsigned long n)
+{
+ unsigned long bit;
+
+ /* If not wrapped around */
+ if (n > start) {
+ /* and have a bit, just return it. */
+ bit = find_next_bit(bitmap, size, n);
+ if (bit < size)
+ return bit;
+
+ /* Otherwise, wrap around and ... */
+ n = 0;
+ }
+
+ /* Search the other part. */
+ bit = find_next_bit(bitmap, start, n);
+ return bit < start ? bit : size;
+}
+
+/**
+ * find_next_clump8 - find next 8-bit clump with set bits in a memory region
+ * @clump: location to store copy of found clump
+ * @addr: address to base the search on
+ * @size: bitmap size in number of bits
+ * @offset: bit offset at which to start searching
+ *
+ * Returns the bit offset for the next set clump; the found clump value is
+ * copied to the location pointed by @clump. If no bits are set, returns @size.
+ */
+extern unsigned long find_next_clump8(unsigned long *clump,
+ const unsigned long *addr,
+ unsigned long size, unsigned long offset);
+
+#define find_first_clump8(clump, bits, size) \
+ find_next_clump8((clump), (bits), (size), 0)
+
+#if defined(__LITTLE_ENDIAN)
+
+static inline unsigned long find_next_zero_bit_le(const void *addr,
+ unsigned long size, unsigned long offset)
+{
+ return find_next_zero_bit(addr, size, offset);
+}
+
+static inline unsigned long find_next_bit_le(const void *addr,
+ unsigned long size, unsigned long offset)
+{
+ return find_next_bit(addr, size, offset);
+}
+
+static inline unsigned long find_first_zero_bit_le(const void *addr,
+ unsigned long size)
+{
+ return find_first_zero_bit(addr, size);
+}
+
+#elif defined(__BIG_ENDIAN)
+
+#ifndef find_next_zero_bit_le
+static inline
+unsigned long find_next_zero_bit_le(const void *addr, unsigned
+ long size, unsigned long offset)
+{
+ if (small_const_nbits(size)) {
+ unsigned long val = *(const unsigned long *)addr;
+
+ if (unlikely(offset >= size))
+ return size;
+
+ val = swab(val) | ~GENMASK(size - 1, offset);
+ return val == ~0UL ? size : ffz(val);
+ }
+
+ return _find_next_zero_bit_le(addr, size, offset);
+}
+#endif
+
+#ifndef find_first_zero_bit_le
+static inline
+unsigned long find_first_zero_bit_le(const void *addr, unsigned long size)
+{
+ if (small_const_nbits(size)) {
+ unsigned long val = swab(*(const unsigned long *)addr) | ~GENMASK(size - 1, 0);
+
+ return val == ~0UL ? size : ffz(val);
+ }
+
+ return _find_first_zero_bit_le(addr, size);
+}
+#endif
+
+#ifndef find_next_bit_le
+static inline
+unsigned long find_next_bit_le(const void *addr, unsigned
+ long size, unsigned long offset)
+{
+ if (small_const_nbits(size)) {
+ unsigned long val = *(const unsigned long *)addr;
+
+ if (unlikely(offset >= size))
+ return size;
+
+ val = swab(val) & GENMASK(size - 1, offset);
+ return val ? __ffs(val) : size;
+ }
+
+ return _find_next_bit_le(addr, size, offset);
+}
+#endif
+
+#else
+#error "Please fix <asm/byteorder.h>"
+#endif
+
+#define for_each_set_bit(bit, addr, size) \
+ for ((bit) = 0; (bit) = find_next_bit((addr), (size), (bit)), (bit) < (size); (bit)++)
+
+#define for_each_and_bit(bit, addr1, addr2, size) \
+ for ((bit) = 0; \
+ (bit) = find_next_and_bit((addr1), (addr2), (size), (bit)), (bit) < (size);\
+ (bit)++)
+
+#define for_each_andnot_bit(bit, addr1, addr2, size) \
+ for ((bit) = 0; \
+ (bit) = find_next_andnot_bit((addr1), (addr2), (size), (bit)), (bit) < (size);\
+ (bit)++)
+
+#define for_each_or_bit(bit, addr1, addr2, size) \
+ for ((bit) = 0; \
+ (bit) = find_next_or_bit((addr1), (addr2), (size), (bit)), (bit) < (size);\
+ (bit)++)
+
+/* same as for_each_set_bit() but use bit as value to start with */
+#define for_each_set_bit_from(bit, addr, size) \
+ for (; (bit) = find_next_bit((addr), (size), (bit)), (bit) < (size); (bit)++)
+
+#define for_each_clear_bit(bit, addr, size) \
+ for ((bit) = 0; \
+ (bit) = find_next_zero_bit((addr), (size), (bit)), (bit) < (size); \
+ (bit)++)
+
+/* same as for_each_clear_bit() but use bit as value to start with */
+#define for_each_clear_bit_from(bit, addr, size) \
+ for (; (bit) = find_next_zero_bit((addr), (size), (bit)), (bit) < (size); (bit)++)
+
+/**
+ * for_each_set_bitrange - iterate over all set bit ranges [b; e)
+ * @b: bit offset of start of current bitrange (first set bit)
+ * @e: bit offset of end of current bitrange (first unset bit)
+ * @addr: bitmap address to base the search on
+ * @size: bitmap size in number of bits
+ */
+#define for_each_set_bitrange(b, e, addr, size) \
+ for ((b) = 0; \
+ (b) = find_next_bit((addr), (size), b), \
+ (e) = find_next_zero_bit((addr), (size), (b) + 1), \
+ (b) < (size); \
+ (b) = (e) + 1)
+
+/**
+ * for_each_set_bitrange_from - iterate over all set bit ranges [b; e)
+ * @b: bit offset of start of current bitrange (first set bit); must be initialized
+ * @e: bit offset of end of current bitrange (first unset bit)
+ * @addr: bitmap address to base the search on
+ * @size: bitmap size in number of bits
+ */
+#define for_each_set_bitrange_from(b, e, addr, size) \
+ for (; \
+ (b) = find_next_bit((addr), (size), (b)), \
+ (e) = find_next_zero_bit((addr), (size), (b) + 1), \
+ (b) < (size); \
+ (b) = (e) + 1)
+
+/**
+ * for_each_clear_bitrange - iterate over all unset bit ranges [b; e)
+ * @b: bit offset of start of current bitrange (first unset bit)
+ * @e: bit offset of end of current bitrange (first set bit)
+ * @addr: bitmap address to base the search on
+ * @size: bitmap size in number of bits
+ */
+#define for_each_clear_bitrange(b, e, addr, size) \
+ for ((b) = 0; \
+ (b) = find_next_zero_bit((addr), (size), (b)), \
+ (e) = find_next_bit((addr), (size), (b) + 1), \
+ (b) < (size); \
+ (b) = (e) + 1)
+
+/**
+ * for_each_clear_bitrange_from - iterate over all unset bit ranges [b; e)
+ * @b: bit offset of start of current bitrange (first set bit); must be initialized
+ * @e: bit offset of end of current bitrange (first unset bit)
+ * @addr: bitmap address to base the search on
+ * @size: bitmap size in number of bits
+ */
+#define for_each_clear_bitrange_from(b, e, addr, size) \
+ for (; \
+ (b) = find_next_zero_bit((addr), (size), (b)), \
+ (e) = find_next_bit((addr), (size), (b) + 1), \
+ (b) < (size); \
+ (b) = (e) + 1)
+
+/**
+ * for_each_set_bit_wrap - iterate over all set bits starting from @start, and
+ * wrapping around the end of bitmap.
+ * @bit: offset for current iteration
+ * @addr: bitmap address to base the search on
+ * @size: bitmap size in number of bits
+ * @start: Starting bit for bitmap traversing, wrapping around the bitmap end
+ */
+#define for_each_set_bit_wrap(bit, addr, size, start) \
+ for ((bit) = find_next_bit_wrap((addr), (size), (start)); \
+ (bit) < (size); \
+ (bit) = __for_each_wrap((addr), (size), (start), (bit) + 1))
+
+/**
+ * for_each_set_clump8 - iterate over bitmap for each 8-bit clump with set bits
+ * @start: bit offset to start search and to store the current iteration offset
+ * @clump: location to store copy of current 8-bit clump
+ * @bits: bitmap address to base the search on
+ * @size: bitmap size in number of bits
+ */
+#define for_each_set_clump8(start, clump, bits, size) \
+ for ((start) = find_first_clump8(&(clump), (bits), (size)); \
+ (start) < (size); \
+ (start) = find_next_clump8(&(clump), (bits), (size), (start) + 8))
+
+#endif /*__LINUX_FIND_H_ */
diff --git a/include/linux/fips.h b/include/linux/fips.h
index f8fb07b0b6b8..c6961e932fef 100644
--- a/include/linux/fips.h
+++ b/include/linux/fips.h
@@ -1,10 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _FIPS_H
#define _FIPS_H
#ifdef CONFIG_CRYPTO_FIPS
extern int fips_enabled;
+extern struct atomic_notifier_head fips_fail_notif_chain;
+
+void fips_fail_notify(void);
+
#else
#define fips_enabled 0
+
+static inline void fips_fail_notify(void) {}
+
#endif
#endif
diff --git a/include/linux/firewire.h b/include/linux/firewire.h
index d4b7683c722d..1716c01c4e54 100644
--- a/include/linux/firewire.h
+++ b/include/linux/firewire.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_FIREWIRE_H
#define _LINUX_FIREWIRE_H
@@ -149,6 +150,8 @@ static inline void fw_card_put(struct fw_card *card)
kref_put(&card->kref, fw_card_release);
}
+int fw_card_read_cycle_time(struct fw_card *card, u32 *cycle_time);
+
struct fw_attribute_group {
struct attribute_group *groups[2];
struct attribute_group group;
@@ -205,10 +208,7 @@ struct fw_device {
struct fw_attribute_group attribute_group;
};
-static inline struct fw_device *fw_device(struct device *dev)
-{
- return container_of(dev, struct fw_device, device);
-}
+#define fw_device(dev) container_of_const(dev, struct fw_device, device)
static inline int fw_device_is_shutdown(struct fw_device *device)
{
@@ -226,10 +226,7 @@ struct fw_unit {
struct fw_attribute_group attribute_group;
};
-static inline struct fw_unit *fw_unit(struct device *dev)
-{
- return container_of(dev, struct fw_unit, device);
-}
+#define fw_unit(dev) container_of_const(dev, struct fw_unit, device)
static inline struct fw_unit *fw_unit_get(struct fw_unit *unit)
{
@@ -243,10 +240,7 @@ static inline void fw_unit_put(struct fw_unit *unit)
put_device(&unit->device);
}
-static inline struct fw_device *fw_parent_device(struct fw_unit *unit)
-{
- return fw_device(unit->device.parent);
-}
+#define fw_parent_device(unit) fw_device(unit->device.parent)
struct ieee1394_device_id;
@@ -275,9 +269,8 @@ typedef void (*fw_transaction_callback_t)(struct fw_card *card, int rcode,
* Otherwise there is a danger of recursion of inbound and outbound
* transactions from and to the local node.
*
- * The callback is responsible that either fw_send_response() or kfree()
- * is called on the @request, except for FCP registers for which the core
- * takes care of that.
+ * The callback is responsible that fw_send_response() is called on the @request, except for FCP
+ * registers for which the core takes care of that.
*/
typedef void (*fw_address_callback_t)(struct fw_card *card,
struct fw_request *request,
@@ -351,6 +344,7 @@ void fw_core_remove_address_handler(struct fw_address_handler *handler);
void fw_send_response(struct fw_card *card,
struct fw_request *request, int rcode);
int fw_get_request_speed(struct fw_request *request);
+u32 fw_request_get_timestamp(const struct fw_request *request);
void fw_send_request(struct fw_card *card, struct fw_transaction *t,
int tcode, int destination_id, int generation, int speed,
unsigned long long offset, void *payload, size_t length,
@@ -435,6 +429,12 @@ typedef void (*fw_iso_callback_t)(struct fw_iso_context *context,
void *header, void *data);
typedef void (*fw_iso_mc_callback_t)(struct fw_iso_context *context,
dma_addr_t completed, void *data);
+
+union fw_iso_callback {
+ fw_iso_callback_t sc;
+ fw_iso_mc_callback_t mc;
+};
+
struct fw_iso_context {
struct fw_card *card;
int type;
@@ -442,10 +442,7 @@ struct fw_iso_context {
int speed;
bool drop_overflow_headers;
size_t header_size;
- union {
- fw_iso_callback_t sc;
- fw_iso_mc_callback_t mc;
- } callback;
+ union fw_iso_callback callback;
void *callback_data;
};
diff --git a/include/linux/firmware-map.h b/include/linux/firmware-map.h
index 71d4fa721db9..3e1077e99002 100644
--- a/include/linux/firmware-map.h
+++ b/include/linux/firmware-map.h
@@ -1,17 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* include/linux/firmware-map.h:
* Copyright (C) 2008 SUSE LINUX Products GmbH
* by Bernhard Walle <bernhard.walle@gmx.de>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License v2.0 as published by
- * the Free Software Foundation
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
*/
#ifndef _LINUX_FIRMWARE_MAP_H
#define _LINUX_FIRMWARE_MAP_H
diff --git a/include/linux/firmware.h b/include/linux/firmware.h
index b1f9f0ccb8ac..de7fea3bca51 100644
--- a/include/linux/firmware.h
+++ b/include/linux/firmware.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_FIRMWARE_H
#define _LINUX_FIRMWARE_H
@@ -5,42 +6,99 @@
#include <linux/compiler.h>
#include <linux/gfp.h>
-#define FW_ACTION_NOHOTPLUG 0
-#define FW_ACTION_HOTPLUG 1
+#define FW_ACTION_NOUEVENT 0
+#define FW_ACTION_UEVENT 1
struct firmware {
size_t size;
const u8 *data;
- struct page **pages;
/* firmware loader private fields */
void *priv;
};
-struct module;
-struct device;
+/**
+ * enum fw_upload_err - firmware upload error codes
+ * @FW_UPLOAD_ERR_NONE: returned to indicate success
+ * @FW_UPLOAD_ERR_HW_ERROR: error signalled by hardware, see kernel log
+ * @FW_UPLOAD_ERR_TIMEOUT: SW timed out on handshake with HW/firmware
+ * @FW_UPLOAD_ERR_CANCELED: upload was cancelled by the user
+ * @FW_UPLOAD_ERR_BUSY: there is an upload operation already in progress
+ * @FW_UPLOAD_ERR_INVALID_SIZE: invalid firmware image size
+ * @FW_UPLOAD_ERR_RW_ERROR: read or write to HW failed, see kernel log
+ * @FW_UPLOAD_ERR_WEAROUT: FLASH device is approaching wear-out, wait & retry
+ * @FW_UPLOAD_ERR_MAX: Maximum error code marker
+ */
+enum fw_upload_err {
+ FW_UPLOAD_ERR_NONE,
+ FW_UPLOAD_ERR_HW_ERROR,
+ FW_UPLOAD_ERR_TIMEOUT,
+ FW_UPLOAD_ERR_CANCELED,
+ FW_UPLOAD_ERR_BUSY,
+ FW_UPLOAD_ERR_INVALID_SIZE,
+ FW_UPLOAD_ERR_RW_ERROR,
+ FW_UPLOAD_ERR_WEAROUT,
+ FW_UPLOAD_ERR_MAX
+};
-struct builtin_fw {
- char *name;
- void *data;
- unsigned long size;
+struct fw_upload {
+ void *dd_handle; /* reference to parent driver */
+ void *priv; /* firmware loader private fields */
};
-/* We have to play tricks here much like stringify() to get the
- __COUNTER__ macro to be expanded as we want it */
-#define __fw_concat1(x, y) x##y
-#define __fw_concat(x, y) __fw_concat1(x, y)
+/**
+ * struct fw_upload_ops - device specific operations to support firmware upload
+ * @prepare: Required: Prepare secure update
+ * @write: Required: The write() op receives the remaining
+ * size to be written and must return the actual
+ * size written or a negative error code. The write()
+ * op will be called repeatedly until all data is
+ * written.
+ * @poll_complete: Required: Check for the completion of the
+ * HW authentication/programming process.
+ * @cancel: Required: Request cancellation of update. This op
+ * is called from the context of a different kernel
+ * thread, so race conditions need to be considered.
+ * @cleanup: Optional: Complements the prepare()
+ * function and is called at the completion
+ * of the update, on success or failure, if the
+ * prepare function succeeded.
+ */
+struct fw_upload_ops {
+ enum fw_upload_err (*prepare)(struct fw_upload *fw_upload,
+ const u8 *data, u32 size);
+ enum fw_upload_err (*write)(struct fw_upload *fw_upload,
+ const u8 *data, u32 offset,
+ u32 size, u32 *written);
+ enum fw_upload_err (*poll_complete)(struct fw_upload *fw_upload);
+ void (*cancel)(struct fw_upload *fw_upload);
+ void (*cleanup)(struct fw_upload *fw_upload);
+};
-#define DECLARE_BUILTIN_FIRMWARE(name, blob) \
- DECLARE_BUILTIN_FIRMWARE_SIZE(name, &(blob), sizeof(blob))
+struct module;
+struct device;
-#define DECLARE_BUILTIN_FIRMWARE_SIZE(name, blob, size) \
- static const struct builtin_fw __fw_concat(__builtin_fw,__COUNTER__) \
- __used __section(.builtin_fw) = { name, blob, size }
+/*
+ * Built-in firmware functionality is only available if FW_LOADER=y, but not
+ * FW_LOADER=m
+ */
+#ifdef CONFIG_FW_LOADER
+bool firmware_request_builtin(struct firmware *fw, const char *name);
+#else
+static inline bool firmware_request_builtin(struct firmware *fw,
+ const char *name)
+{
+ return false;
+}
+#endif
-#if defined(CONFIG_FW_LOADER) || (defined(CONFIG_FW_LOADER_MODULE) && defined(MODULE))
+#if IS_REACHABLE(CONFIG_FW_LOADER)
int request_firmware(const struct firmware **fw, const char *name,
struct device *device);
+int firmware_request_nowarn(const struct firmware **fw, const char *name,
+ struct device *device);
+int firmware_request_platform(const struct firmware **fw, const char *name,
+ struct device *device);
int request_firmware_nowait(
struct module *module, bool uevent,
const char *name, struct device *device, gfp_t gfp, void *context,
@@ -49,6 +107,9 @@ int request_firmware_direct(const struct firmware **fw, const char *name,
struct device *device);
int request_firmware_into_buf(const struct firmware **firmware_p,
const char *name, struct device *device, void *buf, size_t size);
+int request_partial_firmware_into_buf(const struct firmware **firmware_p,
+ const char *name, struct device *device,
+ void *buf, size_t size, size_t offset);
void release_firmware(const struct firmware *fw);
#else
@@ -58,6 +119,21 @@ static inline int request_firmware(const struct firmware **fw,
{
return -EINVAL;
}
+
+static inline int firmware_request_nowarn(const struct firmware **fw,
+ const char *name,
+ struct device *device)
+{
+ return -EINVAL;
+}
+
+static inline int firmware_request_platform(const struct firmware **fw,
+ const char *name,
+ struct device *device)
+{
+ return -EINVAL;
+}
+
static inline int request_firmware_nowait(
struct module *module, bool uevent,
const char *name, struct device *device, gfp_t gfp, void *context,
@@ -83,5 +159,41 @@ static inline int request_firmware_into_buf(const struct firmware **firmware_p,
return -EINVAL;
}
+static inline int request_partial_firmware_into_buf
+ (const struct firmware **firmware_p,
+ const char *name,
+ struct device *device,
+ void *buf, size_t size, size_t offset)
+{
+ return -EINVAL;
+}
+
+#endif
+
+#ifdef CONFIG_FW_UPLOAD
+
+struct fw_upload *
+firmware_upload_register(struct module *module, struct device *parent,
+ const char *name, const struct fw_upload_ops *ops,
+ void *dd_handle);
+void firmware_upload_unregister(struct fw_upload *fw_upload);
+
+#else
+
+static inline struct fw_upload *
+firmware_upload_register(struct module *module, struct device *parent,
+ const char *name, const struct fw_upload_ops *ops,
+ void *dd_handle)
+{
+ return ERR_PTR(-EINVAL);
+}
+
+static inline void firmware_upload_unregister(struct fw_upload *fw_upload)
+{
+}
+
#endif
+
+int firmware_request_cache(struct device *device, const char *name);
+
#endif
diff --git a/include/linux/firmware/broadcom/tee_bnxt_fw.h b/include/linux/firmware/broadcom/tee_bnxt_fw.h
new file mode 100644
index 000000000000..f24c82d6ef73
--- /dev/null
+++ b/include/linux/firmware/broadcom/tee_bnxt_fw.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright 2019 Broadcom.
+ */
+
+#ifndef _BROADCOM_TEE_BNXT_FW_H
+#define _BROADCOM_TEE_BNXT_FW_H
+
+#include <linux/types.h>
+
+int tee_bnxt_fw_load(void);
+int tee_bnxt_copy_coredump(void *buf, u32 offset, u32 size);
+
+#endif /* _BROADCOM_TEE_BNXT_FW_H */
diff --git a/include/linux/firmware/cirrus/cs_dsp.h b/include/linux/firmware/cirrus/cs_dsp.h
new file mode 100644
index 000000000000..29cd11d5a3cf
--- /dev/null
+++ b/include/linux/firmware/cirrus/cs_dsp.h
@@ -0,0 +1,329 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * cs_dsp.h -- Cirrus Logic DSP firmware support
+ *
+ * Based on sound/soc/codecs/wm_adsp.h
+ *
+ * Copyright 2012 Wolfson Microelectronics plc
+ * Copyright (C) 2015-2021 Cirrus Logic, Inc. and
+ * Cirrus Logic International Semiconductor Ltd.
+ */
+#ifndef __CS_DSP_H
+#define __CS_DSP_H
+
+#include <linux/bits.h>
+#include <linux/device.h>
+#include <linux/firmware.h>
+#include <linux/list.h>
+#include <linux/regmap.h>
+
+#define CS_ADSP2_REGION_0 BIT(0)
+#define CS_ADSP2_REGION_1 BIT(1)
+#define CS_ADSP2_REGION_2 BIT(2)
+#define CS_ADSP2_REGION_3 BIT(3)
+#define CS_ADSP2_REGION_4 BIT(4)
+#define CS_ADSP2_REGION_5 BIT(5)
+#define CS_ADSP2_REGION_6 BIT(6)
+#define CS_ADSP2_REGION_7 BIT(7)
+#define CS_ADSP2_REGION_8 BIT(8)
+#define CS_ADSP2_REGION_9 BIT(9)
+#define CS_ADSP2_REGION_1_9 (CS_ADSP2_REGION_1 | \
+ CS_ADSP2_REGION_2 | CS_ADSP2_REGION_3 | \
+ CS_ADSP2_REGION_4 | CS_ADSP2_REGION_5 | \
+ CS_ADSP2_REGION_6 | CS_ADSP2_REGION_7 | \
+ CS_ADSP2_REGION_8 | CS_ADSP2_REGION_9)
+#define CS_ADSP2_REGION_ALL (CS_ADSP2_REGION_0 | CS_ADSP2_REGION_1_9)
+
+#define CS_DSP_DATA_WORD_SIZE 3
+#define CS_DSP_DATA_WORD_BITS (3 * BITS_PER_BYTE)
+
+#define CS_DSP_ACKED_CTL_TIMEOUT_MS 100
+#define CS_DSP_ACKED_CTL_N_QUICKPOLLS 10
+#define CS_DSP_ACKED_CTL_MIN_VALUE 0
+#define CS_DSP_ACKED_CTL_MAX_VALUE 0xFFFFFF
+
+/**
+ * struct cs_dsp_region - Describes a logical memory region in DSP address space
+ * @type: Memory region type
+ * @base: Address of region
+ */
+struct cs_dsp_region {
+ int type;
+ unsigned int base;
+};
+
+/**
+ * struct cs_dsp_alg_region - Describes a logical algorithm region in DSP address space
+ * @list: List node for internal use
+ * @alg: Algorithm id
+ * @ver: Expected algorithm version
+ * @type: Memory region type
+ * @base: Address of region
+ */
+struct cs_dsp_alg_region {
+ struct list_head list;
+ unsigned int alg;
+ unsigned int ver;
+ int type;
+ unsigned int base;
+};
+
+/**
+ * struct cs_dsp_coeff_ctl - Describes a coefficient control
+ * @list: List node for internal use
+ * @dsp: DSP instance associated with this control
+ * @cache: Cached value of the control
+ * @fw_name: Name of the firmware
+ * @subname: Name of the control parsed from the WMFW
+ * @subname_len: Length of subname
+ * @offset: Offset of control within alg_region in words
+ * @len: Length of the cached value in bytes
+ * @type: One of the WMFW_CTL_TYPE_ control types defined in wmfw.h
+ * @flags: Bitfield of WMFW_CTL_FLAG_ control flags defined in wmfw.h
+ * @set: Flag indicating the value has been written by the user
+ * @enabled: Flag indicating whether control is enabled
+ * @alg_region: Logical region associated with this control
+ * @priv: For use by the client
+ */
+struct cs_dsp_coeff_ctl {
+ struct list_head list;
+ struct cs_dsp *dsp;
+ void *cache;
+ const char *fw_name;
+ /* Subname is needed to match with firmware */
+ const char *subname;
+ unsigned int subname_len;
+ unsigned int offset;
+ size_t len;
+ unsigned int type;
+ unsigned int flags;
+ unsigned int set:1;
+ unsigned int enabled:1;
+ struct cs_dsp_alg_region alg_region;
+
+ void *priv;
+};
+
+struct cs_dsp_ops;
+struct cs_dsp_client_ops;
+
+/**
+ * struct cs_dsp - Configuration and state of a Cirrus Logic DSP
+ * @name: The name of the DSP instance
+ * @rev: Revision of the DSP
+ * @num: DSP instance number
+ * @type: Type of DSP
+ * @dev: Driver model representation of the device
+ * @regmap: Register map of the device
+ * @ops: Function pointers for internal callbacks
+ * @client_ops: Function pointers for client callbacks
+ * @base: Address of the DSP registers
+ * @base_sysinfo: Address of the sysinfo register (Halo only)
+ * @sysclk_reg: Address of the sysclk register (ADSP1 only)
+ * @sysclk_mask: Mask of frequency bits within sysclk register (ADSP1 only)
+ * @sysclk_shift: Shift of frequency bits within sysclk register (ADSP1 only)
+ * @alg_regions: List of currently loaded algorithm regions
+ * @fw_file_name: Filename of the current firmware
+ * @fw_name: Name of the current firmware
+ * @fw_id: ID of the current firmware, obtained from the wmfw
+ * @fw_id_version: Version of the firmware, obtained from the wmfw
+ * @fw_vendor_id: Vendor of the firmware, obtained from the wmfw
+ * @mem: DSP memory region descriptions
+ * @num_mems: Number of memory regions in this DSP
+ * @fw_ver: Version of the wmfw file format
+ * @booted: Flag indicating DSP has been configured
+ * @running: Flag indicating DSP is executing firmware
+ * @ctl_list: Controls defined within the loaded DSP firmware
+ * @lock_regions: Enable MPU traps on specified memory regions
+ * @pwr_lock: Lock used to serialize accesses
+ * @debugfs_root: Debugfs directory for this DSP instance
+ * @wmfw_file_name: Filename of the currently loaded firmware
+ * @bin_file_name: Filename of the currently loaded coefficients
+ */
+struct cs_dsp {
+ const char *name;
+ int rev;
+ int num;
+ int type;
+ struct device *dev;
+ struct regmap *regmap;
+
+ const struct cs_dsp_ops *ops;
+ const struct cs_dsp_client_ops *client_ops;
+
+ unsigned int base;
+ unsigned int base_sysinfo;
+ unsigned int sysclk_reg;
+ unsigned int sysclk_mask;
+ unsigned int sysclk_shift;
+ bool no_core_startstop;
+
+ struct list_head alg_regions;
+
+ const char *fw_name;
+ unsigned int fw_id;
+ unsigned int fw_id_version;
+ unsigned int fw_vendor_id;
+
+ const struct cs_dsp_region *mem;
+ int num_mems;
+
+ int fw_ver;
+
+ bool booted;
+ bool running;
+
+ struct list_head ctl_list;
+
+ struct mutex pwr_lock;
+
+ unsigned int lock_regions;
+
+#ifdef CONFIG_DEBUG_FS
+ struct dentry *debugfs_root;
+ char *wmfw_file_name;
+ char *bin_file_name;
+#endif
+};
+
+/**
+ * struct cs_dsp_client_ops - client callbacks
+ * @control_add: Called under the pwr_lock when a control is created
+ * @control_remove: Called under the pwr_lock when a control is destroyed
+ * @pre_run: Called under the pwr_lock by cs_dsp_run() before the core is started
+ * @post_run: Called under the pwr_lock by cs_dsp_run() after the core is started
+ * @pre_stop: Called under the pwr_lock by cs_dsp_stop() before the core is stopped
+ * @post_stop: Called under the pwr_lock by cs_dsp_stop() after the core is stopped
+ * @watchdog_expired: Called when a watchdog expiry is detected
+ *
+ * These callbacks give the cs_dsp client an opportunity to respond to events
+ * or to perform actions atomically.
+ */
+struct cs_dsp_client_ops {
+ int (*control_add)(struct cs_dsp_coeff_ctl *ctl);
+ void (*control_remove)(struct cs_dsp_coeff_ctl *ctl);
+ int (*pre_run)(struct cs_dsp *dsp);
+ int (*post_run)(struct cs_dsp *dsp);
+ void (*pre_stop)(struct cs_dsp *dsp);
+ void (*post_stop)(struct cs_dsp *dsp);
+ void (*watchdog_expired)(struct cs_dsp *dsp);
+};
+
+int cs_dsp_adsp1_init(struct cs_dsp *dsp);
+int cs_dsp_adsp2_init(struct cs_dsp *dsp);
+int cs_dsp_halo_init(struct cs_dsp *dsp);
+
+int cs_dsp_adsp1_power_up(struct cs_dsp *dsp,
+ const struct firmware *wmfw_firmware, char *wmfw_filename,
+ const struct firmware *coeff_firmware, char *coeff_filename,
+ const char *fw_name);
+void cs_dsp_adsp1_power_down(struct cs_dsp *dsp);
+int cs_dsp_power_up(struct cs_dsp *dsp,
+ const struct firmware *wmfw_firmware, char *wmfw_filename,
+ const struct firmware *coeff_firmware, char *coeff_filename,
+ const char *fw_name);
+void cs_dsp_power_down(struct cs_dsp *dsp);
+int cs_dsp_run(struct cs_dsp *dsp);
+void cs_dsp_stop(struct cs_dsp *dsp);
+
+void cs_dsp_remove(struct cs_dsp *dsp);
+
+int cs_dsp_set_dspclk(struct cs_dsp *dsp, unsigned int freq);
+void cs_dsp_adsp2_bus_error(struct cs_dsp *dsp);
+void cs_dsp_halo_bus_error(struct cs_dsp *dsp);
+void cs_dsp_halo_wdt_expire(struct cs_dsp *dsp);
+
+void cs_dsp_init_debugfs(struct cs_dsp *dsp, struct dentry *debugfs_root);
+void cs_dsp_cleanup_debugfs(struct cs_dsp *dsp);
+
+int cs_dsp_coeff_write_acked_control(struct cs_dsp_coeff_ctl *ctl, unsigned int event_id);
+int cs_dsp_coeff_write_ctrl(struct cs_dsp_coeff_ctl *ctl, unsigned int off,
+ const void *buf, size_t len);
+int cs_dsp_coeff_read_ctrl(struct cs_dsp_coeff_ctl *ctl, unsigned int off,
+ void *buf, size_t len);
+struct cs_dsp_coeff_ctl *cs_dsp_get_ctl(struct cs_dsp *dsp, const char *name, int type,
+ unsigned int alg);
+
+int cs_dsp_read_raw_data_block(struct cs_dsp *dsp, int mem_type, unsigned int mem_addr,
+ unsigned int num_words, __be32 *data);
+int cs_dsp_read_data_word(struct cs_dsp *dsp, int mem_type, unsigned int mem_addr, u32 *data);
+int cs_dsp_write_data_word(struct cs_dsp *dsp, int mem_type, unsigned int mem_addr, u32 data);
+void cs_dsp_remove_padding(u32 *buf, int nwords);
+
+struct cs_dsp_alg_region *cs_dsp_find_alg_region(struct cs_dsp *dsp,
+ int type, unsigned int id);
+
+const char *cs_dsp_mem_region_name(unsigned int type);
+
+/**
+ * struct cs_dsp_chunk - Describes a buffer holding data formatted for the DSP
+ * @data: Pointer to underlying buffer memory
+ * @max: Pointer to end of the buffer memory
+ * @bytes: Number of bytes read/written into the memory chunk
+ * @cache: Temporary holding data as it is formatted
+ * @cachebits: Number of bits of data currently in cache
+ */
+struct cs_dsp_chunk {
+ u8 *data;
+ u8 *max;
+ int bytes;
+
+ u32 cache;
+ int cachebits;
+};
+
+/**
+ * cs_dsp_chunk() - Create a DSP memory chunk
+ * @data: Pointer to the buffer that will be used to store data
+ * @size: Size of the buffer in bytes
+ *
+ * Return: A cs_dsp_chunk structure
+ */
+static inline struct cs_dsp_chunk cs_dsp_chunk(void *data, int size)
+{
+ struct cs_dsp_chunk ch = {
+ .data = data,
+ .max = data + size,
+ };
+
+ return ch;
+}
+
+/**
+ * cs_dsp_chunk_end() - Check if a DSP memory chunk is full
+ * @ch: Pointer to the chunk structure
+ *
+ * Return: True if the whole buffer has been read/written
+ */
+static inline bool cs_dsp_chunk_end(struct cs_dsp_chunk *ch)
+{
+ return ch->data == ch->max;
+}
+
+/**
+ * cs_dsp_chunk_bytes() - Number of bytes written/read from a DSP memory chunk
+ * @ch: Pointer to the chunk structure
+ *
+ * Return: Number of bytes read/written to the buffer
+ */
+static inline int cs_dsp_chunk_bytes(struct cs_dsp_chunk *ch)
+{
+ return ch->bytes;
+}
+
+/**
+ * cs_dsp_chunk_valid_addr() - Check if an address is in a DSP memory chunk
+ * @ch: Pointer to the chunk structure
+ *
+ * Return: True if the given address is within the buffer
+ */
+static inline bool cs_dsp_chunk_valid_addr(struct cs_dsp_chunk *ch, void *addr)
+{
+ return (u8 *)addr >= ch->data && (u8 *)addr < ch->max;
+}
+
+int cs_dsp_chunk_write(struct cs_dsp_chunk *ch, int nbits, u32 val);
+int cs_dsp_chunk_flush(struct cs_dsp_chunk *ch);
+int cs_dsp_chunk_read(struct cs_dsp_chunk *ch, int nbits);
+
+#endif
diff --git a/include/linux/firmware/cirrus/wmfw.h b/include/linux/firmware/cirrus/wmfw.h
new file mode 100644
index 000000000000..74e5a4f6c13a
--- /dev/null
+++ b/include/linux/firmware/cirrus/wmfw.h
@@ -0,0 +1,203 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * wmfw.h - Wolfson firmware format information
+ *
+ * Copyright 2012 Wolfson Microelectronics plc
+ *
+ * Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
+ */
+
+#ifndef __WMFW_H
+#define __WMFW_H
+
+#include <linux/types.h>
+
+#define WMFW_MAX_ALG_NAME 256
+#define WMFW_MAX_ALG_DESCR_NAME 256
+
+#define WMFW_MAX_COEFF_NAME 256
+#define WMFW_MAX_COEFF_DESCR_NAME 256
+
+#define WMFW_CTL_FLAG_SYS 0x8000
+#define WMFW_CTL_FLAG_VOLATILE 0x0004
+#define WMFW_CTL_FLAG_WRITEABLE 0x0002
+#define WMFW_CTL_FLAG_READABLE 0x0001
+
+#define WMFW_CTL_TYPE_BYTES 0x0004 /* byte control */
+
+/* Non-ALSA coefficient types start at 0x1000 */
+#define WMFW_CTL_TYPE_ACKED 0x1000 /* acked control */
+#define WMFW_CTL_TYPE_HOSTEVENT 0x1001 /* event control */
+#define WMFW_CTL_TYPE_HOST_BUFFER 0x1002 /* host buffer pointer */
+#define WMFW_CTL_TYPE_FWEVENT 0x1004 /* firmware event control */
+
+struct wmfw_header {
+ char magic[4];
+ __le32 len;
+ __le16 rev;
+ u8 core;
+ u8 ver;
+} __packed;
+
+struct wmfw_footer {
+ __le64 timestamp;
+ __le32 checksum;
+} __packed;
+
+struct wmfw_adsp1_sizes {
+ __le32 dm;
+ __le32 pm;
+ __le32 zm;
+} __packed;
+
+struct wmfw_adsp2_sizes {
+ __le32 xm;
+ __le32 ym;
+ __le32 pm;
+ __le32 zm;
+} __packed;
+
+struct wmfw_region {
+ union {
+ __be32 type;
+ __le32 offset;
+ };
+ __le32 len;
+ u8 data[];
+} __packed;
+
+struct wmfw_id_hdr {
+ __be32 core_id;
+ __be32 core_rev;
+ __be32 id;
+ __be32 ver;
+} __packed;
+
+struct wmfw_v3_id_hdr {
+ __be32 core_id;
+ __be32 block_rev;
+ __be32 vendor_id;
+ __be32 id;
+ __be32 ver;
+} __packed;
+
+struct wmfw_adsp1_id_hdr {
+ struct wmfw_id_hdr fw;
+ __be32 zm;
+ __be32 dm;
+ __be32 n_algs;
+} __packed;
+
+struct wmfw_adsp2_id_hdr {
+ struct wmfw_id_hdr fw;
+ __be32 zm;
+ __be32 xm;
+ __be32 ym;
+ __be32 n_algs;
+} __packed;
+
+struct wmfw_halo_id_hdr {
+ struct wmfw_v3_id_hdr fw;
+ __be32 xm_base;
+ __be32 xm_size;
+ __be32 ym_base;
+ __be32 ym_size;
+ __be32 n_algs;
+} __packed;
+
+struct wmfw_alg_hdr {
+ __be32 id;
+ __be32 ver;
+} __packed;
+
+struct wmfw_adsp1_alg_hdr {
+ struct wmfw_alg_hdr alg;
+ __be32 zm;
+ __be32 dm;
+} __packed;
+
+struct wmfw_adsp2_alg_hdr {
+ struct wmfw_alg_hdr alg;
+ __be32 zm;
+ __be32 xm;
+ __be32 ym;
+} __packed;
+
+struct wmfw_halo_alg_hdr {
+ struct wmfw_alg_hdr alg;
+ __be32 xm_base;
+ __be32 xm_size;
+ __be32 ym_base;
+ __be32 ym_size;
+} __packed;
+
+struct wmfw_adsp_alg_data {
+ __le32 id;
+ u8 name[WMFW_MAX_ALG_NAME];
+ u8 descr[WMFW_MAX_ALG_DESCR_NAME];
+ __le32 ncoeff;
+ u8 data[];
+} __packed;
+
+struct wmfw_adsp_coeff_data {
+ struct {
+ __le16 offset;
+ __le16 type;
+ __le32 size;
+ } hdr;
+ u8 name[WMFW_MAX_COEFF_NAME];
+ u8 descr[WMFW_MAX_COEFF_DESCR_NAME];
+ __le16 ctl_type;
+ __le16 flags;
+ __le32 len;
+ u8 data[];
+} __packed;
+
+struct wmfw_coeff_hdr {
+ u8 magic[4];
+ __le32 len;
+ union {
+ __be32 rev;
+ __le32 ver;
+ };
+ union {
+ __be32 core;
+ __le32 core_ver;
+ };
+ u8 data[];
+} __packed;
+
+struct wmfw_coeff_item {
+ __le16 offset;
+ __le16 type;
+ __le32 id;
+ __le32 ver;
+ __le32 sr;
+ __le32 len;
+ u8 data[];
+} __packed;
+
+#define WMFW_ADSP1 1
+#define WMFW_ADSP2 2
+#define WMFW_HALO 4
+
+#define WMFW_ABSOLUTE 0xf0
+#define WMFW_ALGORITHM_DATA 0xf2
+#define WMFW_METADATA 0xfc
+#define WMFW_NAME_TEXT 0xfe
+#define WMFW_INFO_TEXT 0xff
+
+#define WMFW_ADSP1_PM 2
+#define WMFW_ADSP1_DM 3
+#define WMFW_ADSP1_ZM 4
+
+#define WMFW_ADSP2_PM 2
+#define WMFW_ADSP2_ZM 4
+#define WMFW_ADSP2_XM 5
+#define WMFW_ADSP2_YM 6
+
+#define WMFW_HALO_PM_PACKED 0x10
+#define WMFW_HALO_XM_PACKED 0x11
+#define WMFW_HALO_YM_PACKED 0x12
+
+#endif
diff --git a/include/linux/firmware/imx/dsp.h b/include/linux/firmware/imx/dsp.h
new file mode 100644
index 000000000000..4f7895a3b73c
--- /dev/null
+++ b/include/linux/firmware/imx/dsp.h
@@ -0,0 +1,77 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright 2019 NXP
+ *
+ * Header file for the DSP IPC implementation
+ */
+
+#ifndef _IMX_DSP_IPC_H
+#define _IMX_DSP_IPC_H
+
+#include <linux/device.h>
+#include <linux/types.h>
+#include <linux/mailbox_client.h>
+
+#define DSP_MU_CHAN_NUM 4
+
+struct imx_dsp_chan {
+ struct imx_dsp_ipc *ipc;
+ struct mbox_client cl;
+ struct mbox_chan *ch;
+ char *name;
+ int idx;
+};
+
+struct imx_dsp_ops {
+ void (*handle_reply)(struct imx_dsp_ipc *ipc);
+ void (*handle_request)(struct imx_dsp_ipc *ipc);
+};
+
+struct imx_dsp_ipc {
+ /* Host <-> DSP communication uses 2 txdb and 2 rxdb channels */
+ struct imx_dsp_chan chans[DSP_MU_CHAN_NUM];
+ struct device *dev;
+ struct imx_dsp_ops *ops;
+ void *private_data;
+};
+
+static inline void imx_dsp_set_data(struct imx_dsp_ipc *ipc, void *data)
+{
+ if (!ipc)
+ return;
+
+ ipc->private_data = data;
+}
+
+static inline void *imx_dsp_get_data(struct imx_dsp_ipc *ipc)
+{
+ if (!ipc)
+ return NULL;
+
+ return ipc->private_data;
+}
+
+#if IS_ENABLED(CONFIG_IMX_DSP)
+
+int imx_dsp_ring_doorbell(struct imx_dsp_ipc *dsp, unsigned int chan_idx);
+
+struct mbox_chan *imx_dsp_request_channel(struct imx_dsp_ipc *ipc, int idx);
+void imx_dsp_free_channel(struct imx_dsp_ipc *ipc, int idx);
+
+#else
+
+static inline int imx_dsp_ring_doorbell(struct imx_dsp_ipc *ipc,
+ unsigned int chan_idx)
+{
+ return -ENOTSUPP;
+}
+
+struct mbox_chan *imx_dsp_request_channel(struct imx_dsp_ipc *ipc, int idx)
+{
+ return ERR_PTR(-EOPNOTSUPP);
+}
+
+void imx_dsp_free_channel(struct imx_dsp_ipc *ipc, int idx) { }
+
+#endif
+#endif /* _IMX_DSP_IPC_H */
diff --git a/include/linux/firmware/imx/ipc.h b/include/linux/firmware/imx/ipc.h
new file mode 100644
index 000000000000..0b4643571625
--- /dev/null
+++ b/include/linux/firmware/imx/ipc.h
@@ -0,0 +1,71 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright 2018 NXP
+ *
+ * Header file for the IPC implementation.
+ */
+
+#ifndef _SC_IPC_H
+#define _SC_IPC_H
+
+#include <linux/device.h>
+#include <linux/types.h>
+
+#define IMX_SC_RPC_VERSION 1
+#define IMX_SC_RPC_MAX_MSG 8
+
+struct imx_sc_ipc;
+
+enum imx_sc_rpc_svc {
+ IMX_SC_RPC_SVC_UNKNOWN = 0,
+ IMX_SC_RPC_SVC_RETURN = 1,
+ IMX_SC_RPC_SVC_PM = 2,
+ IMX_SC_RPC_SVC_RM = 3,
+ IMX_SC_RPC_SVC_TIMER = 5,
+ IMX_SC_RPC_SVC_PAD = 6,
+ IMX_SC_RPC_SVC_MISC = 7,
+ IMX_SC_RPC_SVC_IRQ = 8,
+};
+
+struct imx_sc_rpc_msg {
+ uint8_t ver;
+ uint8_t size;
+ uint8_t svc;
+ uint8_t func;
+};
+
+#ifdef CONFIG_IMX_SCU
+/*
+ * This is an function to send an RPC message over an IPC channel.
+ * It is called by client-side SCFW API function shims.
+ *
+ * @param[in] ipc IPC handle
+ * @param[in,out] msg handle to a message
+ * @param[in] have_resp response flag
+ *
+ * If have_resp is true then this function waits for a response
+ * and returns the result in msg.
+ */
+int imx_scu_call_rpc(struct imx_sc_ipc *ipc, void *msg, bool have_resp);
+
+/*
+ * This function gets the default ipc handle used by SCU
+ *
+ * @param[out] ipc sc ipc handle
+ *
+ * @return Returns an error code (0 = success, failed if < 0)
+ */
+int imx_scu_get_handle(struct imx_sc_ipc **ipc);
+#else
+static inline int imx_scu_call_rpc(struct imx_sc_ipc *ipc, void *msg,
+ bool have_resp)
+{
+ return -ENOTSUPP;
+}
+
+static inline int imx_scu_get_handle(struct imx_sc_ipc **ipc)
+{
+ return -ENOTSUPP;
+}
+#endif
+#endif /* _SC_IPC_H */
diff --git a/include/linux/firmware/imx/s4.h b/include/linux/firmware/imx/s4.h
new file mode 100644
index 000000000000..9e34923ae1d6
--- /dev/null
+++ b/include/linux/firmware/imx/s4.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright 2021 NXP
+ *
+ * Header file for the IPC implementation.
+ */
+
+#ifndef _S4_IPC_H
+#define _S4_IPC_H
+
+struct imx_s4_ipc;
+
+struct imx_s4_rpc_msg {
+ uint8_t ver;
+ uint8_t size;
+ uint8_t cmd;
+ uint8_t tag;
+} __packed;
+
+#endif /* _S4_IPC_H */
diff --git a/include/linux/firmware/imx/sci.h b/include/linux/firmware/imx/sci.h
new file mode 100644
index 000000000000..5cc63fe7e84d
--- /dev/null
+++ b/include/linux/firmware/imx/sci.h
@@ -0,0 +1,51 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright (C) 2016 Freescale Semiconductor, Inc.
+ * Copyright 2017~2018 NXP
+ *
+ * Header file containing the public System Controller Interface (SCI)
+ * definitions.
+ */
+
+#ifndef _SC_SCI_H
+#define _SC_SCI_H
+
+#include <linux/firmware/imx/ipc.h>
+
+#include <linux/firmware/imx/svc/misc.h>
+#include <linux/firmware/imx/svc/pm.h>
+#include <linux/firmware/imx/svc/rm.h>
+
+#if IS_ENABLED(CONFIG_IMX_SCU)
+int imx_scu_enable_general_irq_channel(struct device *dev);
+int imx_scu_irq_register_notifier(struct notifier_block *nb);
+int imx_scu_irq_unregister_notifier(struct notifier_block *nb);
+int imx_scu_irq_group_enable(u8 group, u32 mask, u8 enable);
+int imx_scu_soc_init(struct device *dev);
+#else
+static inline int imx_scu_soc_init(struct device *dev)
+{
+ return -ENOTSUPP;
+}
+
+static inline int imx_scu_enable_general_irq_channel(struct device *dev)
+{
+ return -ENOTSUPP;
+}
+
+static inline int imx_scu_irq_register_notifier(struct notifier_block *nb)
+{
+ return -ENOTSUPP;
+}
+
+static inline int imx_scu_irq_unregister_notifier(struct notifier_block *nb)
+{
+ return -ENOTSUPP;
+}
+
+static inline int imx_scu_irq_group_enable(u8 group, u32 mask, u8 enable)
+{
+ return -ENOTSUPP;
+}
+#endif
+#endif /* _SC_SCI_H */
diff --git a/include/linux/firmware/imx/svc/misc.h b/include/linux/firmware/imx/svc/misc.h
new file mode 100644
index 000000000000..760db08a67fc
--- /dev/null
+++ b/include/linux/firmware/imx/svc/misc.h
@@ -0,0 +1,77 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright (C) 2016 Freescale Semiconductor, Inc.
+ * Copyright 2017~2018 NXP
+ *
+ * Header file containing the public API for the System Controller (SC)
+ * Miscellaneous (MISC) function.
+ *
+ * MISC_SVC (SVC) Miscellaneous Service
+ *
+ * Module for the Miscellaneous (MISC) service.
+ */
+
+#ifndef _SC_MISC_API_H
+#define _SC_MISC_API_H
+
+#include <linux/firmware/imx/sci.h>
+
+/*
+ * This type is used to indicate RPC MISC function calls.
+ */
+enum imx_misc_func {
+ IMX_SC_MISC_FUNC_UNKNOWN = 0,
+ IMX_SC_MISC_FUNC_SET_CONTROL = 1,
+ IMX_SC_MISC_FUNC_GET_CONTROL = 2,
+ IMX_SC_MISC_FUNC_SET_MAX_DMA_GROUP = 4,
+ IMX_SC_MISC_FUNC_SET_DMA_GROUP = 5,
+ IMX_SC_MISC_FUNC_SECO_IMAGE_LOAD = 8,
+ IMX_SC_MISC_FUNC_SECO_AUTHENTICATE = 9,
+ IMX_SC_MISC_FUNC_DEBUG_OUT = 10,
+ IMX_SC_MISC_FUNC_WAVEFORM_CAPTURE = 6,
+ IMX_SC_MISC_FUNC_BUILD_INFO = 15,
+ IMX_SC_MISC_FUNC_UNIQUE_ID = 19,
+ IMX_SC_MISC_FUNC_SET_ARI = 3,
+ IMX_SC_MISC_FUNC_BOOT_STATUS = 7,
+ IMX_SC_MISC_FUNC_BOOT_DONE = 14,
+ IMX_SC_MISC_FUNC_OTP_FUSE_READ = 11,
+ IMX_SC_MISC_FUNC_OTP_FUSE_WRITE = 17,
+ IMX_SC_MISC_FUNC_SET_TEMP = 12,
+ IMX_SC_MISC_FUNC_GET_TEMP = 13,
+ IMX_SC_MISC_FUNC_GET_BOOT_DEV = 16,
+ IMX_SC_MISC_FUNC_GET_BUTTON_STATUS = 18,
+};
+
+/*
+ * Control Functions
+ */
+
+#ifdef CONFIG_IMX_SCU
+int imx_sc_misc_set_control(struct imx_sc_ipc *ipc, u32 resource,
+ u8 ctrl, u32 val);
+
+int imx_sc_misc_get_control(struct imx_sc_ipc *ipc, u32 resource,
+ u8 ctrl, u32 *val);
+
+int imx_sc_pm_cpu_start(struct imx_sc_ipc *ipc, u32 resource,
+ bool enable, u64 phys_addr);
+#else
+static inline int imx_sc_misc_set_control(struct imx_sc_ipc *ipc,
+ u32 resource, u8 ctrl, u32 val)
+{
+ return -ENOTSUPP;
+}
+
+static inline int imx_sc_misc_get_control(struct imx_sc_ipc *ipc,
+ u32 resource, u8 ctrl, u32 *val)
+{
+ return -ENOTSUPP;
+}
+
+static inline int imx_sc_pm_cpu_start(struct imx_sc_ipc *ipc, u32 resource,
+ bool enable, u64 phys_addr)
+{
+ return -ENOTSUPP;
+}
+#endif
+#endif /* _SC_MISC_API_H */
diff --git a/include/linux/firmware/imx/svc/pm.h b/include/linux/firmware/imx/svc/pm.h
new file mode 100644
index 000000000000..1f6975dd37b0
--- /dev/null
+++ b/include/linux/firmware/imx/svc/pm.h
@@ -0,0 +1,85 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright (C) 2016 Freescale Semiconductor, Inc.
+ * Copyright 2017-2018 NXP
+ *
+ * Header file containing the public API for the System Controller (SC)
+ * Power Management (PM) function. This includes functions for power state
+ * control, clock control, reset control, and wake-up event control.
+ *
+ * PM_SVC (SVC) Power Management Service
+ *
+ * Module for the Power Management (PM) service.
+ */
+
+#ifndef _SC_PM_API_H
+#define _SC_PM_API_H
+
+#include <linux/firmware/imx/sci.h>
+
+/*
+ * This type is used to indicate RPC PM function calls.
+ */
+enum imx_sc_pm_func {
+ IMX_SC_PM_FUNC_UNKNOWN = 0,
+ IMX_SC_PM_FUNC_SET_SYS_POWER_MODE = 19,
+ IMX_SC_PM_FUNC_SET_PARTITION_POWER_MODE = 1,
+ IMX_SC_PM_FUNC_GET_SYS_POWER_MODE = 2,
+ IMX_SC_PM_FUNC_SET_RESOURCE_POWER_MODE = 3,
+ IMX_SC_PM_FUNC_GET_RESOURCE_POWER_MODE = 4,
+ IMX_SC_PM_FUNC_REQ_LOW_POWER_MODE = 16,
+ IMX_SC_PM_FUNC_SET_CPU_RESUME_ADDR = 17,
+ IMX_SC_PM_FUNC_REQ_SYS_IF_POWER_MODE = 18,
+ IMX_SC_PM_FUNC_SET_CLOCK_RATE = 5,
+ IMX_SC_PM_FUNC_GET_CLOCK_RATE = 6,
+ IMX_SC_PM_FUNC_CLOCK_ENABLE = 7,
+ IMX_SC_PM_FUNC_SET_CLOCK_PARENT = 14,
+ IMX_SC_PM_FUNC_GET_CLOCK_PARENT = 15,
+ IMX_SC_PM_FUNC_RESET = 13,
+ IMX_SC_PM_FUNC_RESET_REASON = 10,
+ IMX_SC_PM_FUNC_BOOT = 8,
+ IMX_SC_PM_FUNC_REBOOT = 9,
+ IMX_SC_PM_FUNC_REBOOT_PARTITION = 12,
+ IMX_SC_PM_FUNC_CPU_START = 11,
+};
+
+/*
+ * Defines for ALL parameters
+ */
+#define IMX_SC_PM_CLK_ALL UINT8_MAX /* All clocks */
+
+/*
+ * Defines for SC PM Power Mode
+ */
+#define IMX_SC_PM_PW_MODE_OFF 0 /* Power off */
+#define IMX_SC_PM_PW_MODE_STBY 1 /* Power in standby */
+#define IMX_SC_PM_PW_MODE_LP 2 /* Power in low-power */
+#define IMX_SC_PM_PW_MODE_ON 3 /* Power on */
+
+/*
+ * Defines for SC PM CLK
+ */
+#define IMX_SC_PM_CLK_SLV_BUS 0 /* Slave bus clock */
+#define IMX_SC_PM_CLK_MST_BUS 1 /* Master bus clock */
+#define IMX_SC_PM_CLK_PER 2 /* Peripheral clock */
+#define IMX_SC_PM_CLK_PHY 3 /* Phy clock */
+#define IMX_SC_PM_CLK_MISC 4 /* Misc clock */
+#define IMX_SC_PM_CLK_MISC0 0 /* Misc 0 clock */
+#define IMX_SC_PM_CLK_MISC1 1 /* Misc 1 clock */
+#define IMX_SC_PM_CLK_MISC2 2 /* Misc 2 clock */
+#define IMX_SC_PM_CLK_MISC3 3 /* Misc 3 clock */
+#define IMX_SC_PM_CLK_MISC4 4 /* Misc 4 clock */
+#define IMX_SC_PM_CLK_CPU 2 /* CPU clock */
+#define IMX_SC_PM_CLK_PLL 4 /* PLL */
+#define IMX_SC_PM_CLK_BYPASS 4 /* Bypass clock */
+
+/*
+ * Defines for SC PM CLK Parent
+ */
+#define IMX_SC_PM_PARENT_XTAL 0 /* Parent is XTAL. */
+#define IMX_SC_PM_PARENT_PLL0 1 /* Parent is PLL0 */
+#define IMX_SC_PM_PARENT_PLL1 2 /* Parent is PLL1 or PLL0/2 */
+#define IMX_SC_PM_PARENT_PLL2 3 /* Parent in PLL2 or PLL0/4 */
+#define IMX_SC_PM_PARENT_BYPS 4 /* Parent is a bypass clock. */
+
+#endif /* _SC_PM_API_H */
diff --git a/include/linux/firmware/imx/svc/rm.h b/include/linux/firmware/imx/svc/rm.h
new file mode 100644
index 000000000000..31456f897aa9
--- /dev/null
+++ b/include/linux/firmware/imx/svc/rm.h
@@ -0,0 +1,74 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright (C) 2016 Freescale Semiconductor, Inc.
+ * Copyright 2017-2020 NXP
+ *
+ * Header file containing the public API for the System Controller (SC)
+ * Resource Management (RM) function. This includes functions for
+ * partitioning resources, pads, and memory regions.
+ *
+ * RM_SVC (SVC) Resource Management Service
+ *
+ * Module for the Resource Management (RM) service.
+ */
+
+#ifndef _SC_RM_API_H
+#define _SC_RM_API_H
+
+#include <linux/firmware/imx/sci.h>
+
+/*
+ * This type is used to indicate RPC RM function calls.
+ */
+enum imx_sc_rm_func {
+ IMX_SC_RM_FUNC_UNKNOWN = 0,
+ IMX_SC_RM_FUNC_PARTITION_ALLOC = 1,
+ IMX_SC_RM_FUNC_SET_CONFIDENTIAL = 31,
+ IMX_SC_RM_FUNC_PARTITION_FREE = 2,
+ IMX_SC_RM_FUNC_GET_DID = 26,
+ IMX_SC_RM_FUNC_PARTITION_STATIC = 3,
+ IMX_SC_RM_FUNC_PARTITION_LOCK = 4,
+ IMX_SC_RM_FUNC_GET_PARTITION = 5,
+ IMX_SC_RM_FUNC_SET_PARENT = 6,
+ IMX_SC_RM_FUNC_MOVE_ALL = 7,
+ IMX_SC_RM_FUNC_ASSIGN_RESOURCE = 8,
+ IMX_SC_RM_FUNC_SET_RESOURCE_MOVABLE = 9,
+ IMX_SC_RM_FUNC_SET_SUBSYS_RSRC_MOVABLE = 28,
+ IMX_SC_RM_FUNC_SET_MASTER_ATTRIBUTES = 10,
+ IMX_SC_RM_FUNC_SET_MASTER_SID = 11,
+ IMX_SC_RM_FUNC_SET_PERIPHERAL_PERMISSIONS = 12,
+ IMX_SC_RM_FUNC_IS_RESOURCE_OWNED = 13,
+ IMX_SC_RM_FUNC_GET_RESOURCE_OWNER = 33,
+ IMX_SC_RM_FUNC_IS_RESOURCE_MASTER = 14,
+ IMX_SC_RM_FUNC_IS_RESOURCE_PERIPHERAL = 15,
+ IMX_SC_RM_FUNC_GET_RESOURCE_INFO = 16,
+ IMX_SC_RM_FUNC_MEMREG_ALLOC = 17,
+ IMX_SC_RM_FUNC_MEMREG_SPLIT = 29,
+ IMX_SC_RM_FUNC_MEMREG_FRAG = 32,
+ IMX_SC_RM_FUNC_MEMREG_FREE = 18,
+ IMX_SC_RM_FUNC_FIND_MEMREG = 30,
+ IMX_SC_RM_FUNC_ASSIGN_MEMREG = 19,
+ IMX_SC_RM_FUNC_SET_MEMREG_PERMISSIONS = 20,
+ IMX_SC_RM_FUNC_IS_MEMREG_OWNED = 21,
+ IMX_SC_RM_FUNC_GET_MEMREG_INFO = 22,
+ IMX_SC_RM_FUNC_ASSIGN_PAD = 23,
+ IMX_SC_RM_FUNC_SET_PAD_MOVABLE = 24,
+ IMX_SC_RM_FUNC_IS_PAD_OWNED = 25,
+ IMX_SC_RM_FUNC_DUMP = 27,
+};
+
+#if IS_ENABLED(CONFIG_IMX_SCU)
+bool imx_sc_rm_is_resource_owned(struct imx_sc_ipc *ipc, u16 resource);
+int imx_sc_rm_get_resource_owner(struct imx_sc_ipc *ipc, u16 resource, u8 *pt);
+#else
+static inline bool
+imx_sc_rm_is_resource_owned(struct imx_sc_ipc *ipc, u16 resource)
+{
+ return true;
+}
+static inline int imx_sc_rm_get_resource_owner(struct imx_sc_ipc *ipc, u16 resource, u8 *pt)
+{
+ return -EOPNOTSUPP;
+}
+#endif
+#endif
diff --git a/include/linux/firmware/intel/stratix10-smc.h b/include/linux/firmware/intel/stratix10-smc.h
new file mode 100644
index 000000000000..a718f853d457
--- /dev/null
+++ b/include/linux/firmware/intel/stratix10-smc.h
@@ -0,0 +1,598 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2017-2018, Intel Corporation
+ */
+
+#ifndef __STRATIX10_SMC_H
+#define __STRATIX10_SMC_H
+
+#include <linux/arm-smccc.h>
+#include <linux/bitops.h>
+
+/**
+ * This file defines the Secure Monitor Call (SMC) message protocol used for
+ * service layer driver in normal world (EL1) to communicate with secure
+ * monitor software in Secure Monitor Exception Level 3 (EL3).
+ *
+ * This file is shared with secure firmware (FW) which is out of kernel tree.
+ *
+ * An ARM SMC instruction takes a function identifier and up to 6 64-bit
+ * register values as arguments, and can return up to 4 64-bit register
+ * value. The operation of the secure monitor is determined by the parameter
+ * values passed in through registers.
+ *
+ * EL1 and EL3 communicates pointer as physical address rather than the
+ * virtual address.
+ *
+ * Functions specified by ARM SMC Calling convention:
+ *
+ * FAST call executes atomic operations, returns when the requested operation
+ * has completed.
+ * STD call starts a operation which can be preempted by a non-secure
+ * interrupt. The call can return before the requested operation has
+ * completed.
+ *
+ * a0..a7 is used as register names in the descriptions below, on arm32
+ * that translates to r0..r7 and on arm64 to w0..w7.
+ */
+
+/**
+ * @func_num: function ID
+ */
+#define INTEL_SIP_SMC_STD_CALL_VAL(func_num) \
+ ARM_SMCCC_CALL_VAL(ARM_SMCCC_STD_CALL, ARM_SMCCC_SMC_64, \
+ ARM_SMCCC_OWNER_SIP, (func_num))
+
+#define INTEL_SIP_SMC_FAST_CALL_VAL(func_num) \
+ ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, ARM_SMCCC_SMC_64, \
+ ARM_SMCCC_OWNER_SIP, (func_num))
+
+/**
+ * Return values in INTEL_SIP_SMC_* call
+ *
+ * INTEL_SIP_SMC_RETURN_UNKNOWN_FUNCTION:
+ * Secure monitor software doesn't recognize the request.
+ *
+ * INTEL_SIP_SMC_STATUS_OK:
+ * Secure monitor software accepts the service client's request.
+ *
+ * INTEL_SIP_SMC_STATUS_BUSY:
+ * Secure monitor software is still processing service client's request.
+ *
+ * INTEL_SIP_SMC_STATUS_REJECTED:
+ * Secure monitor software reject the service client's request.
+ *
+ * INTEL_SIP_SMC_STATUS_ERROR:
+ * There is error during the process of service request.
+ *
+ * INTEL_SIP_SMC_RSU_ERROR:
+ * There is error during the process of remote status update request.
+ */
+#define INTEL_SIP_SMC_RETURN_UNKNOWN_FUNCTION 0xFFFFFFFF
+#define INTEL_SIP_SMC_STATUS_OK 0x0
+#define INTEL_SIP_SMC_STATUS_BUSY 0x1
+#define INTEL_SIP_SMC_STATUS_REJECTED 0x2
+#define INTEL_SIP_SMC_STATUS_ERROR 0x4
+#define INTEL_SIP_SMC_RSU_ERROR 0x7
+
+/**
+ * Request INTEL_SIP_SMC_FPGA_CONFIG_START
+ *
+ * Sync call used by service driver at EL1 to request the FPGA in EL3 to
+ * be prepare to receive a new configuration.
+ *
+ * Call register usage:
+ * a0: INTEL_SIP_SMC_FPGA_CONFIG_START.
+ * a1: flag for full or partial configuration. 0 for full and 1 for partial
+ * configuration.
+ * a2-7: not used.
+ *
+ * Return status:
+ * a0: INTEL_SIP_SMC_STATUS_OK, or INTEL_SIP_SMC_STATUS_ERROR.
+ * a1-3: not used.
+ */
+#define INTEL_SIP_SMC_FUNCID_FPGA_CONFIG_START 1
+#define INTEL_SIP_SMC_FPGA_CONFIG_START \
+ INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_FPGA_CONFIG_START)
+
+/**
+ * Request INTEL_SIP_SMC_FPGA_CONFIG_WRITE
+ *
+ * Async call used by service driver at EL1 to provide FPGA configuration data
+ * to secure world.
+ *
+ * Call register usage:
+ * a0: INTEL_SIP_SMC_FPGA_CONFIG_WRITE.
+ * a1: 64bit physical address of the configuration data memory block
+ * a2: Size of configuration data block.
+ * a3-7: not used.
+ *
+ * Return status:
+ * a0: INTEL_SIP_SMC_STATUS_OK, INTEL_SIP_SMC_STATUS_BUSY or
+ * INTEL_SIP_SMC_STATUS_ERROR.
+ * a1: 64bit physical address of 1st completed memory block if any completed
+ * block, otherwise zero value.
+ * a2: 64bit physical address of 2nd completed memory block if any completed
+ * block, otherwise zero value.
+ * a3: 64bit physical address of 3rd completed memory block if any completed
+ * block, otherwise zero value.
+ */
+#define INTEL_SIP_SMC_FUNCID_FPGA_CONFIG_WRITE 2
+#define INTEL_SIP_SMC_FPGA_CONFIG_WRITE \
+ INTEL_SIP_SMC_STD_CALL_VAL(INTEL_SIP_SMC_FUNCID_FPGA_CONFIG_WRITE)
+
+/**
+ * Request INTEL_SIP_SMC_FPGA_CONFIG_COMPLETED_WRITE
+ *
+ * Sync call used by service driver at EL1 to track the completed write
+ * transactions. This request is called after INTEL_SIP_SMC_FPGA_CONFIG_WRITE
+ * call returns INTEL_SIP_SMC_STATUS_BUSY.
+ *
+ * Call register usage:
+ * a0: INTEL_SIP_SMC_FPGA_CONFIG_COMPLETED_WRITE.
+ * a1-7: not used.
+ *
+ * Return status:
+ * a0: INTEL_SIP_SMC_STATUS_OK, INTEL_SIP_SMC_FPGA_BUSY or
+ * INTEL_SIP_SMC_STATUS_ERROR.
+ * a1: 64bit physical address of 1st completed memory block.
+ * a2: 64bit physical address of 2nd completed memory block if
+ * any completed block, otherwise zero value.
+ * a3: 64bit physical address of 3rd completed memory block if
+ * any completed block, otherwise zero value.
+ */
+#define INTEL_SIP_SMC_FUNCID_FPGA_CONFIG_COMPLETED_WRITE 3
+#define INTEL_SIP_SMC_FPGA_CONFIG_COMPLETED_WRITE \
+INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_FPGA_CONFIG_COMPLETED_WRITE)
+
+/**
+ * Request INTEL_SIP_SMC_FPGA_CONFIG_ISDONE
+ *
+ * Sync call used by service driver at EL1 to inform secure world that all
+ * data are sent, to check whether or not the secure world had completed
+ * the FPGA configuration process.
+ *
+ * Call register usage:
+ * a0: INTEL_SIP_SMC_FPGA_CONFIG_ISDONE.
+ * a1-7: not used.
+ *
+ * Return status:
+ * a0: INTEL_SIP_SMC_STATUS_OK, INTEL_SIP_SMC_STATUS_BUSY or
+ * INTEL_SIP_SMC_STATUS_ERROR.
+ * a1-3: not used.
+ */
+#define INTEL_SIP_SMC_FUNCID_FPGA_CONFIG_ISDONE 4
+#define INTEL_SIP_SMC_FPGA_CONFIG_ISDONE \
+ INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_FPGA_CONFIG_ISDONE)
+
+/**
+ * Request INTEL_SIP_SMC_FPGA_CONFIG_GET_MEM
+ *
+ * Sync call used by service driver at EL1 to query the physical address of
+ * memory block reserved by secure monitor software.
+ *
+ * Call register usage:
+ * a0:INTEL_SIP_SMC_FPGA_CONFIG_GET_MEM.
+ * a1-7: not used.
+ *
+ * Return status:
+ * a0: INTEL_SIP_SMC_STATUS_OK or INTEL_SIP_SMC_STATUS_ERROR.
+ * a1: start of physical address of reserved memory block.
+ * a2: size of reserved memory block.
+ * a3: not used.
+ */
+#define INTEL_SIP_SMC_FUNCID_FPGA_CONFIG_GET_MEM 5
+#define INTEL_SIP_SMC_FPGA_CONFIG_GET_MEM \
+ INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_FPGA_CONFIG_GET_MEM)
+
+/**
+ * Request INTEL_SIP_SMC_FPGA_CONFIG_LOOPBACK
+ *
+ * For SMC loop-back mode only, used for internal integration, debugging
+ * or troubleshooting.
+ *
+ * Call register usage:
+ * a0: INTEL_SIP_SMC_FPGA_CONFIG_LOOPBACK.
+ * a1-7: not used.
+ *
+ * Return status:
+ * a0: INTEL_SIP_SMC_STATUS_OK or INTEL_SIP_SMC_STATUS_ERROR.
+ * a1-3: not used.
+ */
+#define INTEL_SIP_SMC_FUNCID_FPGA_CONFIG_LOOPBACK 6
+#define INTEL_SIP_SMC_FPGA_CONFIG_LOOPBACK \
+ INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_FPGA_CONFIG_LOOPBACK)
+
+/**
+ * Request INTEL_SIP_SMC_REG_READ
+ *
+ * Read a protected register at EL3
+ *
+ * Call register usage:
+ * a0: INTEL_SIP_SMC_REG_READ.
+ * a1: register address.
+ * a2-7: not used.
+ *
+ * Return status:
+ * a0: INTEL_SIP_SMC_STATUS_OK or INTEL_SIP_SMC_REG_ERROR.
+ * a1: value in the register
+ * a2-3: not used.
+ */
+#define INTEL_SIP_SMC_FUNCID_REG_READ 7
+#define INTEL_SIP_SMC_REG_READ \
+ INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_REG_READ)
+
+/**
+ * Request INTEL_SIP_SMC_REG_WRITE
+ *
+ * Write a protected register at EL3
+ *
+ * Call register usage:
+ * a0: INTEL_SIP_SMC_REG_WRITE.
+ * a1: register address
+ * a2: value to program into register.
+ * a3-7: not used.
+ *
+ * Return status:
+ * a0: INTEL_SIP_SMC_STATUS_OK or INTEL_SIP_SMC_REG_ERROR.
+ * a1-3: not used.
+ */
+#define INTEL_SIP_SMC_FUNCID_REG_WRITE 8
+#define INTEL_SIP_SMC_REG_WRITE \
+ INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_REG_WRITE)
+
+/**
+ * Request INTEL_SIP_SMC_FUNCID_REG_UPDATE
+ *
+ * Update one or more bits in a protected register at EL3 using a
+ * read-modify-write operation.
+ *
+ * Call register usage:
+ * a0: INTEL_SIP_SMC_REG_UPDATE.
+ * a1: register address
+ * a2: write Mask.
+ * a3: value to write.
+ * a4-7: not used.
+ *
+ * Return status:
+ * a0: INTEL_SIP_SMC_STATUS_OK or INTEL_SIP_SMC_REG_ERROR.
+ * a1-3: Not used.
+ */
+#define INTEL_SIP_SMC_FUNCID_REG_UPDATE 9
+#define INTEL_SIP_SMC_REG_UPDATE \
+ INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_REG_UPDATE)
+
+/**
+ * Request INTEL_SIP_SMC_RSU_STATUS
+ *
+ * Request remote status update boot log, call is synchronous.
+ *
+ * Call register usage:
+ * a0 INTEL_SIP_SMC_RSU_STATUS
+ * a1-7 not used
+ *
+ * Return status
+ * a0: Current Image
+ * a1: Last Failing Image
+ * a2: Version | State
+ * a3: Error details | Error location
+ *
+ * Or
+ *
+ * a0: INTEL_SIP_SMC_RSU_ERROR
+ */
+#define INTEL_SIP_SMC_FUNCID_RSU_STATUS 11
+#define INTEL_SIP_SMC_RSU_STATUS \
+ INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_RSU_STATUS)
+
+/**
+ * Request INTEL_SIP_SMC_RSU_UPDATE
+ *
+ * Request to set the offset of the bitstream to boot after reboot, call
+ * is synchronous.
+ *
+ * Call register usage:
+ * a0 INTEL_SIP_SMC_RSU_UPDATE
+ * a1 64bit physical address of the configuration data memory in flash
+ * a2-7 not used
+ *
+ * Return status
+ * a0 INTEL_SIP_SMC_STATUS_OK
+ */
+#define INTEL_SIP_SMC_FUNCID_RSU_UPDATE 12
+#define INTEL_SIP_SMC_RSU_UPDATE \
+ INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_RSU_UPDATE)
+
+/**
+ * Request INTEL_SIP_SMC_ECC_DBE
+ *
+ * Sync call used by service driver at EL1 to alert EL3 that a Double
+ * Bit ECC error has occurred.
+ *
+ * Call register usage:
+ * a0 INTEL_SIP_SMC_ECC_DBE
+ * a1 SysManager Double Bit Error value
+ * a2-7 not used
+ *
+ * Return status
+ * a0 INTEL_SIP_SMC_STATUS_OK
+ */
+#define INTEL_SIP_SMC_FUNCID_ECC_DBE 13
+#define INTEL_SIP_SMC_ECC_DBE \
+ INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_ECC_DBE)
+
+/**
+ * Request INTEL_SIP_SMC_RSU_NOTIFY
+ *
+ * Sync call used by service driver at EL1 to report hard processor
+ * system execution stage to firmware
+ *
+ * Call register usage:
+ * a0 INTEL_SIP_SMC_RSU_NOTIFY
+ * a1 32bit value representing hard processor system execution stage
+ * a2-7 not used
+ *
+ * Return status
+ * a0 INTEL_SIP_SMC_STATUS_OK
+ */
+#define INTEL_SIP_SMC_FUNCID_RSU_NOTIFY 14
+#define INTEL_SIP_SMC_RSU_NOTIFY \
+ INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_RSU_NOTIFY)
+
+/**
+ * Request INTEL_SIP_SMC_RSU_RETRY_COUNTER
+ *
+ * Sync call used by service driver at EL1 to query RSU retry counter
+ *
+ * Call register usage:
+ * a0 INTEL_SIP_SMC_RSU_RETRY_COUNTER
+ * a1-7 not used
+ *
+ * Return status
+ * a0 INTEL_SIP_SMC_STATUS_OK
+ * a1 the retry counter
+ *
+ * Or
+ *
+ * a0 INTEL_SIP_SMC_RSU_ERROR
+ */
+#define INTEL_SIP_SMC_FUNCID_RSU_RETRY_COUNTER 15
+#define INTEL_SIP_SMC_RSU_RETRY_COUNTER \
+ INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_RSU_RETRY_COUNTER)
+
+/**
+ * Request INTEL_SIP_SMC_RSU_DCMF_VERSION
+ *
+ * Sync call used by service driver at EL1 to query DCMF (Decision
+ * Configuration Management Firmware) version from FW
+ *
+ * Call register usage:
+ * a0 INTEL_SIP_SMC_RSU_DCMF_VERSION
+ * a1-7 not used
+ *
+ * Return status
+ * a0 INTEL_SIP_SMC_STATUS_OK
+ * a1 dcmf1 | dcmf0
+ * a2 dcmf3 | dcmf2
+ *
+ * Or
+ *
+ * a0 INTEL_SIP_SMC_RSU_ERROR
+ */
+#define INTEL_SIP_SMC_FUNCID_RSU_DCMF_VERSION 16
+#define INTEL_SIP_SMC_RSU_DCMF_VERSION \
+ INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_RSU_DCMF_VERSION)
+
+/**
+ * Request INTEL_SIP_SMC_RSU_MAX_RETRY
+ *
+ * Sync call used by service driver at EL1 to query max retry value from FW
+ *
+ * Call register usage:
+ * a0 INTEL_SIP_SMC_RSU_MAX_RETRY
+ * a1-7 not used
+ *
+ * Return status
+ * a0 INTEL_SIP_SMC_STATUS_OK
+ * a1 max retry value
+ *
+ * Or
+ * a0 INTEL_SIP_SMC_RSU_ERROR
+ */
+#define INTEL_SIP_SMC_FUNCID_RSU_MAX_RETRY 18
+#define INTEL_SIP_SMC_RSU_MAX_RETRY \
+ INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_RSU_MAX_RETRY)
+
+/**
+ * Request INTEL_SIP_SMC_RSU_DCMF_STATUS
+ *
+ * Sync call used by service driver at EL1 to query DCMF status from FW
+ *
+ * Call register usage:
+ * a0 INTEL_SIP_SMC_RSU_DCMF_STATUS
+ * a1-7 not used
+ *
+ * Return status
+ * a0 INTEL_SIP_SMC_STATUS_OK
+ * a1 dcmf3 | dcmf2 | dcmf1 | dcmf0
+ *
+ * Or
+ *
+ * a0 INTEL_SIP_SMC_RSU_ERROR
+ */
+#define INTEL_SIP_SMC_FUNCID_RSU_DCMF_STATUS 20
+#define INTEL_SIP_SMC_RSU_DCMF_STATUS \
+ INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_RSU_DCMF_STATUS)
+
+/**
+ * Request INTEL_SIP_SMC_SERVICE_COMPLETED
+ * Sync call to check if the secure world have completed service request
+ * or not.
+ *
+ * Call register usage:
+ * a0: INTEL_SIP_SMC_SERVICE_COMPLETED
+ * a1: this register is optional. If used, it is the physical address for
+ * secure firmware to put output data
+ * a2: this register is optional. If used, it is the size of output data
+ * a3-a7: not used
+ *
+ * Return status:
+ * a0: INTEL_SIP_SMC_STATUS_OK, INTEL_SIP_SMC_STATUS_ERROR,
+ * INTEL_SIP_SMC_REJECTED or INTEL_SIP_SMC_STATUS_BUSY
+ * a1: mailbox error if a0 is INTEL_SIP_SMC_STATUS_ERROR
+ * a2: physical address containing the process info
+ * for FCS certificate -- the data contains the certificate status
+ * for FCS cryption -- the data contains the actual data size FW processes
+ * a3: output data size
+ */
+#define INTEL_SIP_SMC_FUNCID_SERVICE_COMPLETED 30
+#define INTEL_SIP_SMC_SERVICE_COMPLETED \
+ INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_SERVICE_COMPLETED)
+
+/**
+ * Request INTEL_SIP_SMC_FIRMWARE_VERSION
+ *
+ * Sync call used to query the version of running firmware
+ *
+ * Call register usage:
+ * a0 INTEL_SIP_SMC_FIRMWARE_VERSION
+ * a1-a7 not used
+ *
+ * Return status:
+ * a0 INTEL_SIP_SMC_STATUS_OK or INTEL_SIP_SMC_STATUS_ERROR
+ * a1 running firmware version
+ */
+#define INTEL_SIP_SMC_FUNCID_FIRMWARE_VERSION 31
+#define INTEL_SIP_SMC_FIRMWARE_VERSION \
+ INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_FIRMWARE_VERSION)
+
+/**
+ * Request INTEL_SIP_SMC_SVC_VERSION
+ *
+ * Sync call used to query the SIP SMC API Version
+ *
+ * Call register usage:
+ * a0 INTEL_SIP_SMC_SVC_VERSION
+ * a1-a7 not used
+ *
+ * Return status:
+ * a0 INTEL_SIP_SMC_STATUS_OK
+ * a1 Major
+ * a2 Minor
+ */
+#define INTEL_SIP_SMC_SVC_FUNCID_VERSION 512
+#define INTEL_SIP_SMC_SVC_VERSION \
+ INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_SVC_FUNCID_VERSION)
+
+/**
+ * SMC call protocol for FPGA Crypto Service (FCS)
+ * FUNCID starts from 90
+ */
+
+/**
+ * Request INTEL_SIP_SMC_FCS_RANDOM_NUMBER
+ *
+ * Sync call used to query the random number generated by the firmware
+ *
+ * Call register usage:
+ * a0 INTEL_SIP_SMC_FCS_RANDOM_NUMBER
+ * a1 the physical address for firmware to write generated random data
+ * a2-a7 not used
+ *
+ * Return status:
+ * a0 INTEL_SIP_SMC_STATUS_OK, INTEL_SIP_SMC_FCS_ERROR or
+ * INTEL_SIP_SMC_FCS_REJECTED
+ * a1 mailbox error
+ * a2 the physical address of generated random number
+ * a3 size
+ */
+#define INTEL_SIP_SMC_FUNCID_FCS_RANDOM_NUMBER 90
+#define INTEL_SIP_SMC_FCS_RANDOM_NUMBER \
+ INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_FCS_RANDOM_NUMBER)
+
+/**
+ * Request INTEL_SIP_SMC_FCS_CRYPTION
+ * Async call for data encryption and HMAC signature generation, or for
+ * data decryption and HMAC verification.
+ *
+ * Call INTEL_SIP_SMC_SERVICE_COMPLETED to get the output encrypted or
+ * decrypted data
+ *
+ * Call register usage:
+ * a0 INTEL_SIP_SMC_FCS_CRYPTION
+ * a1 cryption mode (1 for encryption and 0 for decryption)
+ * a2 physical address which stores to be encrypted or decrypted data
+ * a3 input data size
+ * a4 physical address which will hold the encrypted or decrypted output data
+ * a5 output data size
+ * a6-a7 not used
+ *
+ * Return status:
+ * a0 INTEL_SIP_SMC_STATUS_OK, INTEL_SIP_SMC_STATUS_ERROR or
+ * INTEL_SIP_SMC_STATUS_REJECTED
+ * a1-3 not used
+ */
+#define INTEL_SIP_SMC_FUNCID_FCS_CRYPTION 91
+#define INTEL_SIP_SMC_FCS_CRYPTION \
+ INTEL_SIP_SMC_STD_CALL_VAL(INTEL_SIP_SMC_FUNCID_FCS_CRYPTION)
+
+/**
+ * Request INTEL_SIP_SMC_FCS_SERVICE_REQUEST
+ * Async call for authentication service of HPS software
+ *
+ * Call register usage:
+ * a0 INTEL_SIP_SMC_FCS_SERVICE_REQUEST
+ * a1 the physical address of data block
+ * a2 size of data block
+ * a3-a7 not used
+ *
+ * Return status:
+ * a0 INTEL_SIP_SMC_STATUS_OK, INTEL_SIP_SMC_ERROR or
+ * INTEL_SIP_SMC_REJECTED
+ * a1-a3 not used
+ */
+#define INTEL_SIP_SMC_FUNCID_FCS_SERVICE_REQUEST 92
+#define INTEL_SIP_SMC_FCS_SERVICE_REQUEST \
+ INTEL_SIP_SMC_STD_CALL_VAL(INTEL_SIP_SMC_FUNCID_FCS_SERVICE_REQUEST)
+
+/**
+ * Request INTEL_SIP_SMC_FUNCID_FCS_SEND_CERTIFICATE
+ * Sync call to send a signed certificate
+ *
+ * Call register usage:
+ * a0 INTEL_SIP_SMC_FCS_SEND_CERTIFICATE
+ * a1 the physical address of CERTIFICATE block
+ * a2 size of data block
+ * a3-a7 not used
+ *
+ * Return status:
+ * a0 INTEL_SIP_SMC_STATUS_OK or INTEL_SIP_SMC_FCS_REJECTED
+ * a1-a3 not used
+ */
+#define INTEL_SIP_SMC_FUNCID_FCS_SEND_CERTIFICATE 93
+#define INTEL_SIP_SMC_FCS_SEND_CERTIFICATE \
+ INTEL_SIP_SMC_STD_CALL_VAL(INTEL_SIP_SMC_FUNCID_FCS_SEND_CERTIFICATE)
+
+/**
+ * Request INTEL_SIP_SMC_FCS_GET_PROVISION_DATA
+ * Sync call to dump all the fuses and key hashes
+ *
+ * Call register usage:
+ * a0 INTEL_SIP_SMC_FCS_GET_PROVISION_DATA
+ * a1 the physical address for firmware to write structure of fuse and
+ * key hashes
+ * a2-a7 not used
+ *
+ * Return status:
+ * a0 INTEL_SIP_SMC_STATUS_OK, INTEL_SIP_SMC_FCS_ERROR or
+ * INTEL_SIP_SMC_FCS_REJECTED
+ * a1 mailbox error
+ * a2 physical address for the structure of fuse and key hashes
+ * a3 the size of structure
+ *
+ */
+#define INTEL_SIP_SMC_FUNCID_FCS_GET_PROVISION_DATA 94
+#define INTEL_SIP_SMC_FCS_GET_PROVISION_DATA \
+ INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_FCS_GET_PROVISION_DATA)
+
+#endif
diff --git a/include/linux/firmware/intel/stratix10-svc-client.h b/include/linux/firmware/intel/stratix10-svc-client.h
new file mode 100644
index 000000000000..0c16037fd08d
--- /dev/null
+++ b/include/linux/firmware/intel/stratix10-svc-client.h
@@ -0,0 +1,283 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2017-2018, Intel Corporation
+ */
+
+#ifndef __STRATIX10_SVC_CLIENT_H
+#define __STRATIX10_SVC_CLIENT_H
+
+/*
+ * Service layer driver supports client names
+ *
+ * fpga: for FPGA configuration
+ * rsu: for remote status update
+ */
+#define SVC_CLIENT_FPGA "fpga"
+#define SVC_CLIENT_RSU "rsu"
+#define SVC_CLIENT_FCS "fcs"
+
+/*
+ * Status of the sent command, in bit number
+ *
+ * SVC_STATUS_OK:
+ * Secure firmware accepts the request issued by one of service clients.
+ *
+ * SVC_STATUS_BUFFER_SUBMITTED:
+ * Service client successfully submits data buffer to secure firmware.
+ *
+ * SVC_STATUS_BUFFER_DONE:
+ * Secure firmware completes data process, ready to accept the
+ * next WRITE transaction.
+ *
+ * SVC_STATUS_COMPLETED:
+ * Secure firmware completes service request successfully. In case of
+ * FPGA configuration, FPGA should be in user mode.
+ *
+ * SVC_COMMAND_STATUS_BUSY:
+ * Service request is still in process.
+ *
+ * SVC_COMMAND_STATUS_ERROR:
+ * Error encountered during the process of the service request.
+ *
+ * SVC_STATUS_NO_SUPPORT:
+ * Secure firmware doesn't support requested features such as RSU retry
+ * or RSU notify.
+ */
+#define SVC_STATUS_OK 0
+#define SVC_STATUS_BUFFER_SUBMITTED 1
+#define SVC_STATUS_BUFFER_DONE 2
+#define SVC_STATUS_COMPLETED 3
+#define SVC_STATUS_BUSY 4
+#define SVC_STATUS_ERROR 5
+#define SVC_STATUS_NO_SUPPORT 6
+#define SVC_STATUS_INVALID_PARAM 7
+
+/*
+ * Flag bit for COMMAND_RECONFIG
+ *
+ * COMMAND_RECONFIG_FLAG_PARTIAL:
+ * Set to FPGA configuration type (full or partial).
+ */
+#define COMMAND_RECONFIG_FLAG_PARTIAL 0
+
+/*
+ * Timeout settings for service clients:
+ * timeout value used in Stratix10 FPGA manager driver.
+ * timeout value used in RSU driver
+ */
+#define SVC_RECONFIG_REQUEST_TIMEOUT_MS 300
+#define SVC_RECONFIG_BUFFER_TIMEOUT_MS 720
+#define SVC_RSU_REQUEST_TIMEOUT_MS 300
+#define SVC_FCS_REQUEST_TIMEOUT_MS 2000
+#define SVC_COMPLETED_TIMEOUT_MS 30000
+
+struct stratix10_svc_chan;
+
+/**
+ * enum stratix10_svc_command_code - supported service commands
+ *
+ * @COMMAND_NOOP: do 'dummy' request for integration/debug/trouble-shooting
+ *
+ * @COMMAND_RECONFIG: ask for FPGA configuration preparation, return status
+ * is SVC_STATUS_OK
+ *
+ * @COMMAND_RECONFIG_DATA_SUBMIT: submit buffer(s) of bit-stream data for the
+ * FPGA configuration, return status is SVC_STATUS_SUBMITTED or SVC_STATUS_ERROR
+ *
+ * @COMMAND_RECONFIG_DATA_CLAIM: check the status of the configuration, return
+ * status is SVC_STATUS_COMPLETED, or SVC_STATUS_BUSY, or SVC_STATUS_ERROR
+ *
+ * @COMMAND_RECONFIG_STATUS: check the status of the configuration, return
+ * status is SVC_STATUS_COMPLETED, or SVC_STATUS_BUSY, or SVC_STATUS_ERROR
+ *
+ * @COMMAND_RSU_STATUS: request remote system update boot log, return status
+ * is log data or SVC_STATUS_RSU_ERROR
+ *
+ * @COMMAND_RSU_UPDATE: set the offset of the bitstream to boot after reboot,
+ * return status is SVC_STATUS_OK or SVC_STATUS_ERROR
+ *
+ * @COMMAND_RSU_NOTIFY: report the status of hard processor system
+ * software to firmware, return status is SVC_STATUS_OK or
+ * SVC_STATUS_ERROR
+ *
+ * @COMMAND_RSU_RETRY: query firmware for the current image's retry counter,
+ * return status is SVC_STATUS_OK or SVC_STATUS_ERROR
+ *
+ * @COMMAND_RSU_MAX_RETRY: query firmware for the max retry value,
+ * return status is SVC_STATUS_OK or SVC_STATUS_ERROR
+ *
+ * @COMMAND_RSU_DCMF_VERSION: query firmware for the DCMF version, return status
+ * is SVC_STATUS_OK or SVC_STATUS_ERROR
+ *
+ * @COMMAND_POLL_SERVICE_STATUS: poll if the service request is complete,
+ * return statis is SVC_STATUS_OK, SVC_STATUS_ERROR or SVC_STATUS_BUSY
+ *
+ * @COMMAND_FIRMWARE_VERSION: query running firmware version, return status
+ * is SVC_STATUS_OK or SVC_STATUS_ERROR
+ *
+ * @COMMAND_SMC_SVC_VERSION: Non-mailbox SMC SVC API Version,
+ * return status is SVC_STATUS_OK
+ *
+ * @COMMAND_RSU_DCMF_STATUS: query firmware for the DCMF status
+ * return status is SVC_STATUS_OK or SVC_STATUS_ERROR
+ *
+ * @COMMAND_FCS_REQUEST_SERVICE: request validation of image from firmware,
+ * return status is SVC_STATUS_OK, SVC_STATUS_INVALID_PARAM
+ *
+ * @COMMAND_FCS_SEND_CERTIFICATE: send a certificate, return status is
+ * SVC_STATUS_OK, SVC_STATUS_INVALID_PARAM, SVC_STATUS_ERROR
+ *
+ * @COMMAND_FCS_GET_PROVISION_DATA: read the provisioning data, return status is
+ * SVC_STATUS_OK, SVC_STATUS_INVALID_PARAM, SVC_STATUS_ERROR
+ *
+ * @COMMAND_FCS_DATA_ENCRYPTION: encrypt the data, return status is
+ * SVC_STATUS_OK, SVC_STATUS_INVALID_PARAM, SVC_STATUS_ERROR
+ *
+ * @COMMAND_FCS_DATA_DECRYPTION: decrypt the data, return status is
+ * SVC_STATUS_OK, SVC_STATUS_INVALID_PARAM, SVC_STATUS_ERROR
+ *
+ * @COMMAND_FCS_RANDOM_NUMBER_GEN: generate a random number, return status
+ * is SVC_STATUS_OK, SVC_STATUS_ERROR
+ */
+enum stratix10_svc_command_code {
+ /* for FPGA */
+ COMMAND_NOOP = 0,
+ COMMAND_RECONFIG,
+ COMMAND_RECONFIG_DATA_SUBMIT,
+ COMMAND_RECONFIG_DATA_CLAIM,
+ COMMAND_RECONFIG_STATUS,
+ /* for RSU */
+ COMMAND_RSU_STATUS = 10,
+ COMMAND_RSU_UPDATE,
+ COMMAND_RSU_NOTIFY,
+ COMMAND_RSU_RETRY,
+ COMMAND_RSU_MAX_RETRY,
+ COMMAND_RSU_DCMF_VERSION,
+ COMMAND_RSU_DCMF_STATUS,
+ COMMAND_FIRMWARE_VERSION,
+ /* for FCS */
+ COMMAND_FCS_REQUEST_SERVICE = 20,
+ COMMAND_FCS_SEND_CERTIFICATE,
+ COMMAND_FCS_GET_PROVISION_DATA,
+ COMMAND_FCS_DATA_ENCRYPTION,
+ COMMAND_FCS_DATA_DECRYPTION,
+ COMMAND_FCS_RANDOM_NUMBER_GEN,
+ /* for general status poll */
+ COMMAND_POLL_SERVICE_STATUS = 40,
+ /* Non-mailbox SMC Call */
+ COMMAND_SMC_SVC_VERSION = 200,
+};
+
+/**
+ * struct stratix10_svc_client_msg - message sent by client to service
+ * @payload: starting address of data need be processed
+ * @payload_length: to be processed data size in bytes
+ * @payload_output: starting address of processed data
+ * @payload_length_output: processed data size in bytes
+ * @command: service command
+ * @arg: args to be passed via registers and not physically mapped buffers
+ */
+struct stratix10_svc_client_msg {
+ void *payload;
+ size_t payload_length;
+ void *payload_output;
+ size_t payload_length_output;
+ enum stratix10_svc_command_code command;
+ u64 arg[3];
+};
+
+/**
+ * struct stratix10_svc_command_config_type - config type
+ * @flags: flag bit for the type of FPGA configuration
+ */
+struct stratix10_svc_command_config_type {
+ u32 flags;
+};
+
+/**
+ * struct stratix10_svc_cb_data - callback data structure from service layer
+ * @status: the status of sent command
+ * @kaddr1: address of 1st completed data block
+ * @kaddr2: address of 2nd completed data block
+ * @kaddr3: address of 3rd completed data block
+ */
+struct stratix10_svc_cb_data {
+ u32 status;
+ void *kaddr1;
+ void *kaddr2;
+ void *kaddr3;
+};
+
+/**
+ * struct stratix10_svc_client - service client structure
+ * @dev: the client device
+ * @receive_cb: callback to provide service client the received data
+ * @priv: client private data
+ */
+struct stratix10_svc_client {
+ struct device *dev;
+ void (*receive_cb)(struct stratix10_svc_client *client,
+ struct stratix10_svc_cb_data *cb_data);
+ void *priv;
+};
+
+/**
+ * stratix10_svc_request_channel_byname() - request service channel
+ * @client: identity of the client requesting the channel
+ * @name: supporting client name defined above
+ *
+ * Return: a pointer to channel assigned to the client on success,
+ * or ERR_PTR() on error.
+ */
+struct stratix10_svc_chan
+*stratix10_svc_request_channel_byname(struct stratix10_svc_client *client,
+ const char *name);
+
+/**
+ * stratix10_svc_free_channel() - free service channel.
+ * @chan: service channel to be freed
+ */
+void stratix10_svc_free_channel(struct stratix10_svc_chan *chan);
+
+/**
+ * stratix10_svc_allocate_memory() - allocate the momory
+ * @chan: service channel assigned to the client
+ * @size: number of bytes client requests
+ *
+ * Service layer allocates the requested number of bytes from the memory
+ * pool for the client.
+ *
+ * Return: the starting address of allocated memory on success, or
+ * ERR_PTR() on error.
+ */
+void *stratix10_svc_allocate_memory(struct stratix10_svc_chan *chan,
+ size_t size);
+
+/**
+ * stratix10_svc_free_memory() - free allocated memory
+ * @chan: service channel assigned to the client
+ * @kaddr: starting address of memory to be free back to pool
+ */
+void stratix10_svc_free_memory(struct stratix10_svc_chan *chan, void *kaddr);
+
+/**
+ * stratix10_svc_send() - send a message to the remote
+ * @chan: service channel assigned to the client
+ * @msg: message data to be sent, in the format of
+ * struct stratix10_svc_client_msg
+ *
+ * Return: 0 for success, -ENOMEM or -ENOBUFS on error.
+ */
+int stratix10_svc_send(struct stratix10_svc_chan *chan, void *msg);
+
+/**
+ * stratix10_svc_done() - complete service request
+ * @chan: service channel assigned to the client
+ *
+ * This function is used by service client to inform service layer that
+ * client's service requests are completed, or there is an error in the
+ * request process.
+ */
+void stratix10_svc_done(struct stratix10_svc_chan *chan);
+#endif
+
diff --git a/include/linux/firmware/mediatek/mtk-adsp-ipc.h b/include/linux/firmware/mediatek/mtk-adsp-ipc.h
new file mode 100644
index 000000000000..28fd313340b8
--- /dev/null
+++ b/include/linux/firmware/mediatek/mtk-adsp-ipc.h
@@ -0,0 +1,65 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2022 MediaTek Inc.
+ */
+
+#ifndef MTK_ADSP_IPC_H
+#define MTK_ADSP_IPC_H
+
+#include <linux/device.h>
+#include <linux/types.h>
+#include <linux/mailbox_controller.h>
+#include <linux/mailbox_client.h>
+
+#define MTK_ADSP_IPC_REQ 0
+#define MTK_ADSP_IPC_RSP 1
+#define MTK_ADSP_IPC_OP_REQ 0x1
+#define MTK_ADSP_IPC_OP_RSP 0x2
+
+enum {
+ MTK_ADSP_MBOX_REPLY,
+ MTK_ADSP_MBOX_REQUEST,
+ MTK_ADSP_MBOX_NUM,
+};
+
+struct mtk_adsp_ipc;
+
+struct mtk_adsp_ipc_ops {
+ void (*handle_reply)(struct mtk_adsp_ipc *ipc);
+ void (*handle_request)(struct mtk_adsp_ipc *ipc);
+};
+
+struct mtk_adsp_chan {
+ struct mtk_adsp_ipc *ipc;
+ struct mbox_client cl;
+ struct mbox_chan *ch;
+ char *name;
+ int idx;
+};
+
+struct mtk_adsp_ipc {
+ struct mtk_adsp_chan chans[MTK_ADSP_MBOX_NUM];
+ struct device *dev;
+ struct mtk_adsp_ipc_ops *ops;
+ void *private_data;
+};
+
+static inline void mtk_adsp_ipc_set_data(struct mtk_adsp_ipc *ipc, void *data)
+{
+ if (!ipc)
+ return;
+
+ ipc->private_data = data;
+}
+
+static inline void *mtk_adsp_ipc_get_data(struct mtk_adsp_ipc *ipc)
+{
+ if (!ipc)
+ return NULL;
+
+ return ipc->private_data;
+}
+
+int mtk_adsp_ipc_send(struct mtk_adsp_ipc *ipc, unsigned int idx, uint32_t op);
+
+#endif /* MTK_ADSP_IPC_H */
diff --git a/include/linux/firmware/meson/meson_sm.h b/include/linux/firmware/meson/meson_sm.h
index 37a5eaea69dd..95b0da2326a9 100644
--- a/include/linux/firmware/meson/meson_sm.h
+++ b/include/linux/firmware/meson/meson_sm.h
@@ -1,13 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2016 Endless Mobile, Inc.
* Author: Carlo Caione <carlo@endlessm.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2 as published by the Free Software Foundation.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef _MESON_SM_FW_H_
@@ -17,15 +11,21 @@ enum {
SM_EFUSE_READ,
SM_EFUSE_WRITE,
SM_EFUSE_USER_MAX,
+ SM_GET_CHIP_ID,
+ SM_A1_PWRC_SET,
+ SM_A1_PWRC_GET,
};
struct meson_sm_firmware;
-int meson_sm_call(unsigned int cmd_index, u32 *ret, u32 arg0, u32 arg1,
- u32 arg2, u32 arg3, u32 arg4);
-int meson_sm_call_write(void *buffer, unsigned int b_size, unsigned int cmd_index,
- u32 arg0, u32 arg1, u32 arg2, u32 arg3, u32 arg4);
-int meson_sm_call_read(void *buffer, unsigned int bsize, unsigned int cmd_index,
- u32 arg0, u32 arg1, u32 arg2, u32 arg3, u32 arg4);
+int meson_sm_call(struct meson_sm_firmware *fw, unsigned int cmd_index,
+ u32 *ret, u32 arg0, u32 arg1, u32 arg2, u32 arg3, u32 arg4);
+int meson_sm_call_write(struct meson_sm_firmware *fw, void *buffer,
+ unsigned int b_size, unsigned int cmd_index, u32 arg0,
+ u32 arg1, u32 arg2, u32 arg3, u32 arg4);
+int meson_sm_call_read(struct meson_sm_firmware *fw, void *buffer,
+ unsigned int bsize, unsigned int cmd_index, u32 arg0,
+ u32 arg1, u32 arg2, u32 arg3, u32 arg4);
+struct meson_sm_firmware *meson_sm_get(struct device_node *firmware_node);
#endif /* _MESON_SM_FW_H_ */
diff --git a/include/linux/firmware/qcom/qcom_scm.h b/include/linux/firmware/qcom/qcom_scm.h
new file mode 100644
index 000000000000..250ea4efb7cb
--- /dev/null
+++ b/include/linux/firmware/qcom/qcom_scm.h
@@ -0,0 +1,125 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (c) 2010-2015, 2018-2019 The Linux Foundation. All rights reserved.
+ * Copyright (C) 2015 Linaro Ltd.
+ */
+#ifndef __QCOM_SCM_H
+#define __QCOM_SCM_H
+
+#include <linux/err.h>
+#include <linux/types.h>
+#include <linux/cpumask.h>
+
+#include <dt-bindings/firmware/qcom,scm.h>
+
+#define QCOM_SCM_VERSION(major, minor) (((major) << 16) | ((minor) & 0xFF))
+#define QCOM_SCM_CPU_PWR_DOWN_L2_ON 0x0
+#define QCOM_SCM_CPU_PWR_DOWN_L2_OFF 0x1
+#define QCOM_SCM_HDCP_MAX_REQ_CNT 5
+
+struct qcom_scm_hdcp_req {
+ u32 addr;
+ u32 val;
+};
+
+struct qcom_scm_vmperm {
+ int vmid;
+ int perm;
+};
+
+enum qcom_scm_ocmem_client {
+ QCOM_SCM_OCMEM_UNUSED_ID = 0x0,
+ QCOM_SCM_OCMEM_GRAPHICS_ID,
+ QCOM_SCM_OCMEM_VIDEO_ID,
+ QCOM_SCM_OCMEM_LP_AUDIO_ID,
+ QCOM_SCM_OCMEM_SENSORS_ID,
+ QCOM_SCM_OCMEM_OTHER_OS_ID,
+ QCOM_SCM_OCMEM_DEBUG_ID,
+};
+
+enum qcom_scm_sec_dev_id {
+ QCOM_SCM_MDSS_DEV_ID = 1,
+ QCOM_SCM_OCMEM_DEV_ID = 5,
+ QCOM_SCM_PCIE0_DEV_ID = 11,
+ QCOM_SCM_PCIE1_DEV_ID = 12,
+ QCOM_SCM_GFX_DEV_ID = 18,
+ QCOM_SCM_UFS_DEV_ID = 19,
+ QCOM_SCM_ICE_DEV_ID = 20,
+};
+
+enum qcom_scm_ice_cipher {
+ QCOM_SCM_ICE_CIPHER_AES_128_XTS = 0,
+ QCOM_SCM_ICE_CIPHER_AES_128_CBC = 1,
+ QCOM_SCM_ICE_CIPHER_AES_256_XTS = 3,
+ QCOM_SCM_ICE_CIPHER_AES_256_CBC = 4,
+};
+
+#define QCOM_SCM_PERM_READ 0x4
+#define QCOM_SCM_PERM_WRITE 0x2
+#define QCOM_SCM_PERM_EXEC 0x1
+#define QCOM_SCM_PERM_RW (QCOM_SCM_PERM_READ | QCOM_SCM_PERM_WRITE)
+#define QCOM_SCM_PERM_RWX (QCOM_SCM_PERM_RW | QCOM_SCM_PERM_EXEC)
+
+extern bool qcom_scm_is_available(void);
+
+extern int qcom_scm_set_cold_boot_addr(void *entry);
+extern int qcom_scm_set_warm_boot_addr(void *entry);
+extern void qcom_scm_cpu_power_down(u32 flags);
+extern int qcom_scm_set_remote_state(u32 state, u32 id);
+
+struct qcom_scm_pas_metadata {
+ void *ptr;
+ dma_addr_t phys;
+ ssize_t size;
+};
+
+extern int qcom_scm_pas_init_image(u32 peripheral, const void *metadata,
+ size_t size,
+ struct qcom_scm_pas_metadata *ctx);
+void qcom_scm_pas_metadata_release(struct qcom_scm_pas_metadata *ctx);
+extern int qcom_scm_pas_mem_setup(u32 peripheral, phys_addr_t addr,
+ phys_addr_t size);
+extern int qcom_scm_pas_auth_and_reset(u32 peripheral);
+extern int qcom_scm_pas_shutdown(u32 peripheral);
+extern bool qcom_scm_pas_supported(u32 peripheral);
+
+extern int qcom_scm_io_readl(phys_addr_t addr, unsigned int *val);
+extern int qcom_scm_io_writel(phys_addr_t addr, unsigned int val);
+
+extern bool qcom_scm_restore_sec_cfg_available(void);
+extern int qcom_scm_restore_sec_cfg(u32 device_id, u32 spare);
+extern int qcom_scm_iommu_secure_ptbl_size(u32 spare, size_t *size);
+extern int qcom_scm_iommu_secure_ptbl_init(u64 addr, u32 size, u32 spare);
+extern int qcom_scm_iommu_set_cp_pool_size(u32 spare, u32 size);
+extern int qcom_scm_mem_protect_video_var(u32 cp_start, u32 cp_size,
+ u32 cp_nonpixel_start,
+ u32 cp_nonpixel_size);
+extern int qcom_scm_assign_mem(phys_addr_t mem_addr, size_t mem_sz,
+ u64 *src,
+ const struct qcom_scm_vmperm *newvm,
+ unsigned int dest_cnt);
+
+extern bool qcom_scm_ocmem_lock_available(void);
+extern int qcom_scm_ocmem_lock(enum qcom_scm_ocmem_client id, u32 offset,
+ u32 size, u32 mode);
+extern int qcom_scm_ocmem_unlock(enum qcom_scm_ocmem_client id, u32 offset,
+ u32 size);
+
+extern bool qcom_scm_ice_available(void);
+extern int qcom_scm_ice_invalidate_key(u32 index);
+extern int qcom_scm_ice_set_key(u32 index, const u8 *key, u32 key_size,
+ enum qcom_scm_ice_cipher cipher,
+ u32 data_unit_size);
+
+extern bool qcom_scm_hdcp_available(void);
+extern int qcom_scm_hdcp_req(struct qcom_scm_hdcp_req *req, u32 req_cnt,
+ u32 *resp);
+
+extern int qcom_scm_iommu_set_pt_format(u32 sec_id, u32 ctx_num, u32 pt_fmt);
+extern int qcom_scm_qsmmu500_wait_safe_toggle(bool en);
+
+extern int qcom_scm_lmh_dcvsh(u32 payload_fn, u32 payload_reg, u32 payload_val,
+ u64 limit_node, u32 node_id, u64 version);
+extern int qcom_scm_lmh_profile_change(u32 profile_id);
+extern bool qcom_scm_lmh_dcvsh_available(void);
+
+#endif
diff --git a/include/linux/firmware/trusted_foundations.h b/include/linux/firmware/trusted_foundations.h
new file mode 100644
index 000000000000..931b6c5c72df
--- /dev/null
+++ b/include/linux/firmware/trusted_foundations.h
@@ -0,0 +1,92 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (c) 2013, NVIDIA Corporation.
+ */
+
+/*
+ * Support for the Trusted Foundations secure monitor.
+ *
+ * Trusted Foundation comes active on some ARM consumer devices (most
+ * Tegra-based devices sold on the market are concerned). Such devices can only
+ * perform some basic operations, like setting the CPU reset vector, through
+ * SMC calls to the secure monitor. The calls are completely specific to
+ * Trusted Foundations, and do *not* follow the SMC calling convention or the
+ * PSCI standard.
+ */
+
+#ifndef __FIRMWARE_TRUSTED_FOUNDATIONS_H
+#define __FIRMWARE_TRUSTED_FOUNDATIONS_H
+
+#include <linux/printk.h>
+#include <linux/bug.h>
+#include <linux/of.h>
+#include <linux/cpu.h>
+#include <linux/smp.h>
+#include <linux/types.h>
+
+#include <asm/hardware/cache-l2x0.h>
+#include <asm/outercache.h>
+
+#define TF_PM_MODE_LP0 0
+#define TF_PM_MODE_LP1 1
+#define TF_PM_MODE_LP1_NO_MC_CLK 2
+#define TF_PM_MODE_LP2 3
+#define TF_PM_MODE_LP2_NOFLUSH_L2 4
+#define TF_PM_MODE_NONE 5
+
+struct trusted_foundations_platform_data {
+ unsigned int version_major;
+ unsigned int version_minor;
+};
+
+#if IS_ENABLED(CONFIG_TRUSTED_FOUNDATIONS)
+
+void register_trusted_foundations(struct trusted_foundations_platform_data *pd);
+void of_register_trusted_foundations(void);
+bool trusted_foundations_registered(void);
+
+#else /* CONFIG_TRUSTED_FOUNDATIONS */
+static inline void tf_dummy_write_sec(unsigned long val, unsigned int reg)
+{
+}
+
+static inline void register_trusted_foundations(
+ struct trusted_foundations_platform_data *pd)
+{
+ /*
+ * If the system requires TF and we cannot provide it, continue booting
+ * but disable features that cannot be provided.
+ */
+ pr_err("No support for Trusted Foundations, continuing in degraded mode.\n");
+ pr_err("Secondary processors as well as CPU PM will be disabled.\n");
+#if IS_ENABLED(CONFIG_CACHE_L2X0)
+ pr_err("L2X0 cache will be kept disabled.\n");
+ outer_cache.write_sec = tf_dummy_write_sec;
+#endif
+#if IS_ENABLED(CONFIG_SMP)
+ setup_max_cpus = 0;
+#endif
+ cpu_idle_poll_ctrl(true);
+}
+
+static inline void of_register_trusted_foundations(void)
+{
+ struct device_node *np = of_find_compatible_node(NULL, NULL, "tlm,trusted-foundations");
+
+ if (!np)
+ return;
+ of_node_put(np);
+ /*
+ * If we find the target should enable TF but does not support it,
+ * fail as the system won't be able to do much anyway
+ */
+ register_trusted_foundations(NULL);
+}
+
+static inline bool trusted_foundations_registered(void)
+{
+ return false;
+}
+#endif /* CONFIG_TRUSTED_FOUNDATIONS */
+
+#endif
diff --git a/include/linux/firmware/xlnx-event-manager.h b/include/linux/firmware/xlnx-event-manager.h
new file mode 100644
index 000000000000..82e8254b0f80
--- /dev/null
+++ b/include/linux/firmware/xlnx-event-manager.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef _FIRMWARE_XLNX_EVENT_MANAGER_H_
+#define _FIRMWARE_XLNX_EVENT_MANAGER_H_
+
+#include <linux/firmware/xlnx-zynqmp.h>
+
+#define CB_MAX_PAYLOAD_SIZE (4U) /*In payload maximum 32bytes */
+
+/************************** Exported Function *****************************/
+
+typedef void (*event_cb_func_t)(const u32 *payload, void *data);
+
+#if IS_REACHABLE(CONFIG_XLNX_EVENT_MANAGER)
+int xlnx_register_event(const enum pm_api_cb_id cb_type, const u32 node_id,
+ const u32 event, const bool wake,
+ event_cb_func_t cb_fun, void *data);
+
+int xlnx_unregister_event(const enum pm_api_cb_id cb_type, const u32 node_id,
+ const u32 event, event_cb_func_t cb_fun, void *data);
+#else
+static inline int xlnx_register_event(const enum pm_api_cb_id cb_type, const u32 node_id,
+ const u32 event, const bool wake,
+ event_cb_func_t cb_fun, void *data)
+{
+ return -ENODEV;
+}
+
+static inline int xlnx_unregister_event(const enum pm_api_cb_id cb_type, const u32 node_id,
+ const u32 event, event_cb_func_t cb_fun, void *data)
+{
+ return -ENODEV;
+}
+#endif
+
+#endif /* _FIRMWARE_XLNX_EVENT_MANAGER_H_ */
diff --git a/include/linux/firmware/xlnx-zynqmp.h b/include/linux/firmware/xlnx-zynqmp.h
new file mode 100644
index 000000000000..f5da51677069
--- /dev/null
+++ b/include/linux/firmware/xlnx-zynqmp.h
@@ -0,0 +1,889 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Xilinx Zynq MPSoC Firmware layer
+ *
+ * Copyright (C) 2014-2021 Xilinx
+ *
+ * Michal Simek <michal.simek@xilinx.com>
+ * Davorin Mista <davorin.mista@aggios.com>
+ * Jolly Shah <jollys@xilinx.com>
+ * Rajan Vaja <rajanv@xilinx.com>
+ */
+
+#ifndef __FIRMWARE_ZYNQMP_H__
+#define __FIRMWARE_ZYNQMP_H__
+#include <linux/types.h>
+
+#include <linux/err.h>
+
+#define ZYNQMP_PM_VERSION_MAJOR 1
+#define ZYNQMP_PM_VERSION_MINOR 0
+
+#define ZYNQMP_PM_VERSION ((ZYNQMP_PM_VERSION_MAJOR << 16) | \
+ ZYNQMP_PM_VERSION_MINOR)
+
+#define ZYNQMP_TZ_VERSION_MAJOR 1
+#define ZYNQMP_TZ_VERSION_MINOR 0
+
+#define ZYNQMP_TZ_VERSION ((ZYNQMP_TZ_VERSION_MAJOR << 16) | \
+ ZYNQMP_TZ_VERSION_MINOR)
+
+/* SMC SIP service Call Function Identifier Prefix */
+#define PM_SIP_SVC 0xC2000000
+
+/* PM API versions */
+#define PM_API_VERSION_2 2
+
+/* ATF only commands */
+#define TF_A_PM_REGISTER_SGI 0xa04
+#define PM_GET_TRUSTZONE_VERSION 0xa03
+#define PM_SET_SUSPEND_MODE 0xa02
+#define GET_CALLBACK_DATA 0xa01
+
+/* Number of 32bits values in payload */
+#define PAYLOAD_ARG_CNT 4U
+
+/* Number of arguments for a callback */
+#define CB_ARG_CNT 4
+
+/* Payload size (consists of callback API ID + arguments) */
+#define CB_PAYLOAD_SIZE (CB_ARG_CNT + 1)
+
+#define ZYNQMP_PM_MAX_QOS 100U
+
+#define GSS_NUM_REGS (4)
+
+/* Node capabilities */
+#define ZYNQMP_PM_CAPABILITY_ACCESS 0x1U
+#define ZYNQMP_PM_CAPABILITY_CONTEXT 0x2U
+#define ZYNQMP_PM_CAPABILITY_WAKEUP 0x4U
+#define ZYNQMP_PM_CAPABILITY_UNUSABLE 0x8U
+
+/* Loader commands */
+#define PM_LOAD_PDI 0x701
+#define PDI_SRC_DDR 0xF
+
+/*
+ * Firmware FPGA Manager flags
+ * XILINX_ZYNQMP_PM_FPGA_FULL: FPGA full reconfiguration
+ * XILINX_ZYNQMP_PM_FPGA_PARTIAL: FPGA partial reconfiguration
+ */
+#define XILINX_ZYNQMP_PM_FPGA_FULL 0x0U
+#define XILINX_ZYNQMP_PM_FPGA_PARTIAL BIT(0)
+
+/* FPGA Status Reg */
+#define XILINX_ZYNQMP_PM_FPGA_CONFIG_STAT_OFFSET 7U
+#define XILINX_ZYNQMP_PM_FPGA_READ_CONFIG_REG 0U
+
+/*
+ * Node IDs for the Error Events.
+ */
+#define EVENT_ERROR_PMC_ERR1 (0x28100000U)
+#define EVENT_ERROR_PMC_ERR2 (0x28104000U)
+#define EVENT_ERROR_PSM_ERR1 (0x28108000U)
+#define EVENT_ERROR_PSM_ERR2 (0x2810C000U)
+
+/* ZynqMP SD tap delay tuning */
+#define SD_ITAPDLY 0xFF180314
+#define SD_OTAPDLYSEL 0xFF180318
+
+enum pm_api_cb_id {
+ PM_INIT_SUSPEND_CB = 30,
+ PM_ACKNOWLEDGE_CB = 31,
+ PM_NOTIFY_CB = 32,
+};
+
+enum pm_api_id {
+ PM_GET_API_VERSION = 1,
+ PM_REGISTER_NOTIFIER = 5,
+ PM_FORCE_POWERDOWN = 8,
+ PM_REQUEST_WAKEUP = 10,
+ PM_SYSTEM_SHUTDOWN = 12,
+ PM_REQUEST_NODE = 13,
+ PM_RELEASE_NODE = 14,
+ PM_SET_REQUIREMENT = 15,
+ PM_RESET_ASSERT = 17,
+ PM_RESET_GET_STATUS = 18,
+ PM_MMIO_WRITE = 19,
+ PM_MMIO_READ = 20,
+ PM_PM_INIT_FINALIZE = 21,
+ PM_FPGA_LOAD = 22,
+ PM_FPGA_GET_STATUS = 23,
+ PM_GET_CHIPID = 24,
+ PM_SECURE_SHA = 26,
+ PM_PINCTRL_REQUEST = 28,
+ PM_PINCTRL_RELEASE = 29,
+ PM_PINCTRL_GET_FUNCTION = 30,
+ PM_PINCTRL_SET_FUNCTION = 31,
+ PM_PINCTRL_CONFIG_PARAM_GET = 32,
+ PM_PINCTRL_CONFIG_PARAM_SET = 33,
+ PM_IOCTL = 34,
+ PM_QUERY_DATA = 35,
+ PM_CLOCK_ENABLE = 36,
+ PM_CLOCK_DISABLE = 37,
+ PM_CLOCK_GETSTATE = 38,
+ PM_CLOCK_SETDIVIDER = 39,
+ PM_CLOCK_GETDIVIDER = 40,
+ PM_CLOCK_SETRATE = 41,
+ PM_CLOCK_GETRATE = 42,
+ PM_CLOCK_SETPARENT = 43,
+ PM_CLOCK_GETPARENT = 44,
+ PM_FPGA_READ = 46,
+ PM_SECURE_AES = 47,
+ PM_FEATURE_CHECK = 63,
+};
+
+/* PMU-FW return status codes */
+enum pm_ret_status {
+ XST_PM_SUCCESS = 0,
+ XST_PM_NO_FEATURE = 19,
+ XST_PM_INTERNAL = 2000,
+ XST_PM_CONFLICT = 2001,
+ XST_PM_NO_ACCESS = 2002,
+ XST_PM_INVALID_NODE = 2003,
+ XST_PM_DOUBLE_REQ = 2004,
+ XST_PM_ABORT_SUSPEND = 2005,
+ XST_PM_MULT_USER = 2008,
+};
+
+enum pm_ioctl_id {
+ IOCTL_GET_RPU_OPER_MODE = 0,
+ IOCTL_SET_RPU_OPER_MODE = 1,
+ IOCTL_RPU_BOOT_ADDR_CONFIG = 2,
+ IOCTL_TCM_COMB_CONFIG = 3,
+ IOCTL_SET_TAPDELAY_BYPASS = 4,
+ IOCTL_SD_DLL_RESET = 6,
+ IOCTL_SET_SD_TAPDELAY = 7,
+ IOCTL_SET_PLL_FRAC_MODE = 8,
+ IOCTL_GET_PLL_FRAC_MODE = 9,
+ IOCTL_SET_PLL_FRAC_DATA = 10,
+ IOCTL_GET_PLL_FRAC_DATA = 11,
+ IOCTL_WRITE_GGS = 12,
+ IOCTL_READ_GGS = 13,
+ IOCTL_WRITE_PGGS = 14,
+ IOCTL_READ_PGGS = 15,
+ /* Set healthy bit value */
+ IOCTL_SET_BOOT_HEALTH_STATUS = 17,
+ IOCTL_OSPI_MUX_SELECT = 21,
+ /* Register SGI to ATF */
+ IOCTL_REGISTER_SGI = 25,
+ /* Runtime feature configuration */
+ IOCTL_SET_FEATURE_CONFIG = 26,
+ IOCTL_GET_FEATURE_CONFIG = 27,
+ /* Dynamic SD/GEM configuration */
+ IOCTL_SET_SD_CONFIG = 30,
+ IOCTL_SET_GEM_CONFIG = 31,
+};
+
+enum pm_query_id {
+ PM_QID_INVALID = 0,
+ PM_QID_CLOCK_GET_NAME = 1,
+ PM_QID_CLOCK_GET_TOPOLOGY = 2,
+ PM_QID_CLOCK_GET_FIXEDFACTOR_PARAMS = 3,
+ PM_QID_CLOCK_GET_PARENTS = 4,
+ PM_QID_CLOCK_GET_ATTRIBUTES = 5,
+ PM_QID_PINCTRL_GET_NUM_PINS = 6,
+ PM_QID_PINCTRL_GET_NUM_FUNCTIONS = 7,
+ PM_QID_PINCTRL_GET_NUM_FUNCTION_GROUPS = 8,
+ PM_QID_PINCTRL_GET_FUNCTION_NAME = 9,
+ PM_QID_PINCTRL_GET_FUNCTION_GROUPS = 10,
+ PM_QID_PINCTRL_GET_PIN_GROUPS = 11,
+ PM_QID_CLOCK_GET_NUM_CLOCKS = 12,
+ PM_QID_CLOCK_GET_MAX_DIVISOR = 13,
+};
+
+enum rpu_oper_mode {
+ PM_RPU_MODE_LOCKSTEP = 0,
+ PM_RPU_MODE_SPLIT = 1,
+};
+
+enum rpu_boot_mem {
+ PM_RPU_BOOTMEM_LOVEC = 0,
+ PM_RPU_BOOTMEM_HIVEC = 1,
+};
+
+enum rpu_tcm_comb {
+ PM_RPU_TCM_SPLIT = 0,
+ PM_RPU_TCM_COMB = 1,
+};
+
+enum zynqmp_pm_reset_action {
+ PM_RESET_ACTION_RELEASE = 0,
+ PM_RESET_ACTION_ASSERT = 1,
+ PM_RESET_ACTION_PULSE = 2,
+};
+
+enum zynqmp_pm_reset {
+ ZYNQMP_PM_RESET_START = 1000,
+ ZYNQMP_PM_RESET_PCIE_CFG = ZYNQMP_PM_RESET_START,
+ ZYNQMP_PM_RESET_PCIE_BRIDGE = 1001,
+ ZYNQMP_PM_RESET_PCIE_CTRL = 1002,
+ ZYNQMP_PM_RESET_DP = 1003,
+ ZYNQMP_PM_RESET_SWDT_CRF = 1004,
+ ZYNQMP_PM_RESET_AFI_FM5 = 1005,
+ ZYNQMP_PM_RESET_AFI_FM4 = 1006,
+ ZYNQMP_PM_RESET_AFI_FM3 = 1007,
+ ZYNQMP_PM_RESET_AFI_FM2 = 1008,
+ ZYNQMP_PM_RESET_AFI_FM1 = 1009,
+ ZYNQMP_PM_RESET_AFI_FM0 = 1010,
+ ZYNQMP_PM_RESET_GDMA = 1011,
+ ZYNQMP_PM_RESET_GPU_PP1 = 1012,
+ ZYNQMP_PM_RESET_GPU_PP0 = 1013,
+ ZYNQMP_PM_RESET_GPU = 1014,
+ ZYNQMP_PM_RESET_GT = 1015,
+ ZYNQMP_PM_RESET_SATA = 1016,
+ ZYNQMP_PM_RESET_ACPU3_PWRON = 1017,
+ ZYNQMP_PM_RESET_ACPU2_PWRON = 1018,
+ ZYNQMP_PM_RESET_ACPU1_PWRON = 1019,
+ ZYNQMP_PM_RESET_ACPU0_PWRON = 1020,
+ ZYNQMP_PM_RESET_APU_L2 = 1021,
+ ZYNQMP_PM_RESET_ACPU3 = 1022,
+ ZYNQMP_PM_RESET_ACPU2 = 1023,
+ ZYNQMP_PM_RESET_ACPU1 = 1024,
+ ZYNQMP_PM_RESET_ACPU0 = 1025,
+ ZYNQMP_PM_RESET_DDR = 1026,
+ ZYNQMP_PM_RESET_APM_FPD = 1027,
+ ZYNQMP_PM_RESET_SOFT = 1028,
+ ZYNQMP_PM_RESET_GEM0 = 1029,
+ ZYNQMP_PM_RESET_GEM1 = 1030,
+ ZYNQMP_PM_RESET_GEM2 = 1031,
+ ZYNQMP_PM_RESET_GEM3 = 1032,
+ ZYNQMP_PM_RESET_QSPI = 1033,
+ ZYNQMP_PM_RESET_UART0 = 1034,
+ ZYNQMP_PM_RESET_UART1 = 1035,
+ ZYNQMP_PM_RESET_SPI0 = 1036,
+ ZYNQMP_PM_RESET_SPI1 = 1037,
+ ZYNQMP_PM_RESET_SDIO0 = 1038,
+ ZYNQMP_PM_RESET_SDIO1 = 1039,
+ ZYNQMP_PM_RESET_CAN0 = 1040,
+ ZYNQMP_PM_RESET_CAN1 = 1041,
+ ZYNQMP_PM_RESET_I2C0 = 1042,
+ ZYNQMP_PM_RESET_I2C1 = 1043,
+ ZYNQMP_PM_RESET_TTC0 = 1044,
+ ZYNQMP_PM_RESET_TTC1 = 1045,
+ ZYNQMP_PM_RESET_TTC2 = 1046,
+ ZYNQMP_PM_RESET_TTC3 = 1047,
+ ZYNQMP_PM_RESET_SWDT_CRL = 1048,
+ ZYNQMP_PM_RESET_NAND = 1049,
+ ZYNQMP_PM_RESET_ADMA = 1050,
+ ZYNQMP_PM_RESET_GPIO = 1051,
+ ZYNQMP_PM_RESET_IOU_CC = 1052,
+ ZYNQMP_PM_RESET_TIMESTAMP = 1053,
+ ZYNQMP_PM_RESET_RPU_R50 = 1054,
+ ZYNQMP_PM_RESET_RPU_R51 = 1055,
+ ZYNQMP_PM_RESET_RPU_AMBA = 1056,
+ ZYNQMP_PM_RESET_OCM = 1057,
+ ZYNQMP_PM_RESET_RPU_PGE = 1058,
+ ZYNQMP_PM_RESET_USB0_CORERESET = 1059,
+ ZYNQMP_PM_RESET_USB1_CORERESET = 1060,
+ ZYNQMP_PM_RESET_USB0_HIBERRESET = 1061,
+ ZYNQMP_PM_RESET_USB1_HIBERRESET = 1062,
+ ZYNQMP_PM_RESET_USB0_APB = 1063,
+ ZYNQMP_PM_RESET_USB1_APB = 1064,
+ ZYNQMP_PM_RESET_IPI = 1065,
+ ZYNQMP_PM_RESET_APM_LPD = 1066,
+ ZYNQMP_PM_RESET_RTC = 1067,
+ ZYNQMP_PM_RESET_SYSMON = 1068,
+ ZYNQMP_PM_RESET_AFI_FM6 = 1069,
+ ZYNQMP_PM_RESET_LPD_SWDT = 1070,
+ ZYNQMP_PM_RESET_FPD = 1071,
+ ZYNQMP_PM_RESET_RPU_DBG1 = 1072,
+ ZYNQMP_PM_RESET_RPU_DBG0 = 1073,
+ ZYNQMP_PM_RESET_DBG_LPD = 1074,
+ ZYNQMP_PM_RESET_DBG_FPD = 1075,
+ ZYNQMP_PM_RESET_APLL = 1076,
+ ZYNQMP_PM_RESET_DPLL = 1077,
+ ZYNQMP_PM_RESET_VPLL = 1078,
+ ZYNQMP_PM_RESET_IOPLL = 1079,
+ ZYNQMP_PM_RESET_RPLL = 1080,
+ ZYNQMP_PM_RESET_GPO3_PL_0 = 1081,
+ ZYNQMP_PM_RESET_GPO3_PL_1 = 1082,
+ ZYNQMP_PM_RESET_GPO3_PL_2 = 1083,
+ ZYNQMP_PM_RESET_GPO3_PL_3 = 1084,
+ ZYNQMP_PM_RESET_GPO3_PL_4 = 1085,
+ ZYNQMP_PM_RESET_GPO3_PL_5 = 1086,
+ ZYNQMP_PM_RESET_GPO3_PL_6 = 1087,
+ ZYNQMP_PM_RESET_GPO3_PL_7 = 1088,
+ ZYNQMP_PM_RESET_GPO3_PL_8 = 1089,
+ ZYNQMP_PM_RESET_GPO3_PL_9 = 1090,
+ ZYNQMP_PM_RESET_GPO3_PL_10 = 1091,
+ ZYNQMP_PM_RESET_GPO3_PL_11 = 1092,
+ ZYNQMP_PM_RESET_GPO3_PL_12 = 1093,
+ ZYNQMP_PM_RESET_GPO3_PL_13 = 1094,
+ ZYNQMP_PM_RESET_GPO3_PL_14 = 1095,
+ ZYNQMP_PM_RESET_GPO3_PL_15 = 1096,
+ ZYNQMP_PM_RESET_GPO3_PL_16 = 1097,
+ ZYNQMP_PM_RESET_GPO3_PL_17 = 1098,
+ ZYNQMP_PM_RESET_GPO3_PL_18 = 1099,
+ ZYNQMP_PM_RESET_GPO3_PL_19 = 1100,
+ ZYNQMP_PM_RESET_GPO3_PL_20 = 1101,
+ ZYNQMP_PM_RESET_GPO3_PL_21 = 1102,
+ ZYNQMP_PM_RESET_GPO3_PL_22 = 1103,
+ ZYNQMP_PM_RESET_GPO3_PL_23 = 1104,
+ ZYNQMP_PM_RESET_GPO3_PL_24 = 1105,
+ ZYNQMP_PM_RESET_GPO3_PL_25 = 1106,
+ ZYNQMP_PM_RESET_GPO3_PL_26 = 1107,
+ ZYNQMP_PM_RESET_GPO3_PL_27 = 1108,
+ ZYNQMP_PM_RESET_GPO3_PL_28 = 1109,
+ ZYNQMP_PM_RESET_GPO3_PL_29 = 1110,
+ ZYNQMP_PM_RESET_GPO3_PL_30 = 1111,
+ ZYNQMP_PM_RESET_GPO3_PL_31 = 1112,
+ ZYNQMP_PM_RESET_RPU_LS = 1113,
+ ZYNQMP_PM_RESET_PS_ONLY = 1114,
+ ZYNQMP_PM_RESET_PL = 1115,
+ ZYNQMP_PM_RESET_PS_PL0 = 1116,
+ ZYNQMP_PM_RESET_PS_PL1 = 1117,
+ ZYNQMP_PM_RESET_PS_PL2 = 1118,
+ ZYNQMP_PM_RESET_PS_PL3 = 1119,
+ ZYNQMP_PM_RESET_END = ZYNQMP_PM_RESET_PS_PL3
+};
+
+enum zynqmp_pm_suspend_reason {
+ SUSPEND_POWER_REQUEST = 201,
+ SUSPEND_ALERT = 202,
+ SUSPEND_SYSTEM_SHUTDOWN = 203,
+};
+
+enum zynqmp_pm_request_ack {
+ ZYNQMP_PM_REQUEST_ACK_NO = 1,
+ ZYNQMP_PM_REQUEST_ACK_BLOCKING = 2,
+ ZYNQMP_PM_REQUEST_ACK_NON_BLOCKING = 3,
+};
+
+enum pm_node_id {
+ NODE_SD_0 = 39,
+ NODE_SD_1 = 40,
+};
+
+enum tap_delay_type {
+ PM_TAPDELAY_INPUT = 0,
+ PM_TAPDELAY_OUTPUT = 1,
+};
+
+enum dll_reset_type {
+ PM_DLL_RESET_ASSERT = 0,
+ PM_DLL_RESET_RELEASE = 1,
+ PM_DLL_RESET_PULSE = 2,
+};
+
+enum pm_pinctrl_config_param {
+ PM_PINCTRL_CONFIG_SLEW_RATE = 0,
+ PM_PINCTRL_CONFIG_BIAS_STATUS = 1,
+ PM_PINCTRL_CONFIG_PULL_CTRL = 2,
+ PM_PINCTRL_CONFIG_SCHMITT_CMOS = 3,
+ PM_PINCTRL_CONFIG_DRIVE_STRENGTH = 4,
+ PM_PINCTRL_CONFIG_VOLTAGE_STATUS = 5,
+ PM_PINCTRL_CONFIG_TRI_STATE = 6,
+ PM_PINCTRL_CONFIG_MAX = 7,
+};
+
+enum pm_pinctrl_slew_rate {
+ PM_PINCTRL_SLEW_RATE_FAST = 0,
+ PM_PINCTRL_SLEW_RATE_SLOW = 1,
+};
+
+enum pm_pinctrl_bias_status {
+ PM_PINCTRL_BIAS_DISABLE = 0,
+ PM_PINCTRL_BIAS_ENABLE = 1,
+};
+
+enum pm_pinctrl_pull_ctrl {
+ PM_PINCTRL_BIAS_PULL_DOWN = 0,
+ PM_PINCTRL_BIAS_PULL_UP = 1,
+};
+
+enum pm_pinctrl_schmitt_cmos {
+ PM_PINCTRL_INPUT_TYPE_CMOS = 0,
+ PM_PINCTRL_INPUT_TYPE_SCHMITT = 1,
+};
+
+enum pm_pinctrl_drive_strength {
+ PM_PINCTRL_DRIVE_STRENGTH_2MA = 0,
+ PM_PINCTRL_DRIVE_STRENGTH_4MA = 1,
+ PM_PINCTRL_DRIVE_STRENGTH_8MA = 2,
+ PM_PINCTRL_DRIVE_STRENGTH_12MA = 3,
+};
+
+enum pm_pinctrl_tri_state {
+ PM_PINCTRL_TRI_STATE_DISABLE = 0,
+ PM_PINCTRL_TRI_STATE_ENABLE = 1,
+};
+
+enum zynqmp_pm_shutdown_type {
+ ZYNQMP_PM_SHUTDOWN_TYPE_SHUTDOWN = 0,
+ ZYNQMP_PM_SHUTDOWN_TYPE_RESET = 1,
+ ZYNQMP_PM_SHUTDOWN_TYPE_SETSCOPE_ONLY = 2,
+};
+
+enum zynqmp_pm_shutdown_subtype {
+ ZYNQMP_PM_SHUTDOWN_SUBTYPE_SUBSYSTEM = 0,
+ ZYNQMP_PM_SHUTDOWN_SUBTYPE_PS_ONLY = 1,
+ ZYNQMP_PM_SHUTDOWN_SUBTYPE_SYSTEM = 2,
+};
+
+enum tap_delay_signal_type {
+ PM_TAPDELAY_NAND_DQS_IN = 0,
+ PM_TAPDELAY_NAND_DQS_OUT = 1,
+ PM_TAPDELAY_QSPI = 2,
+ PM_TAPDELAY_MAX = 3,
+};
+
+enum tap_delay_bypass_ctrl {
+ PM_TAPDELAY_BYPASS_DISABLE = 0,
+ PM_TAPDELAY_BYPASS_ENABLE = 1,
+};
+
+enum ospi_mux_select_type {
+ PM_OSPI_MUX_SEL_DMA = 0,
+ PM_OSPI_MUX_SEL_LINEAR = 1,
+};
+
+enum pm_feature_config_id {
+ PM_FEATURE_INVALID = 0,
+ PM_FEATURE_OVERTEMP_STATUS = 1,
+ PM_FEATURE_OVERTEMP_VALUE = 2,
+ PM_FEATURE_EXTWDT_STATUS = 3,
+ PM_FEATURE_EXTWDT_VALUE = 4,
+};
+
+/**
+ * enum pm_sd_config_type - PM SD configuration.
+ * @SD_CONFIG_EMMC_SEL: To set SD_EMMC_SEL in CTRL_REG_SD and SD_SLOTTYPE
+ * @SD_CONFIG_BASECLK: To set SD_BASECLK in SD_CONFIG_REG1
+ * @SD_CONFIG_8BIT: To set SD_8BIT in SD_CONFIG_REG2
+ * @SD_CONFIG_FIXED: To set fixed config registers
+ */
+enum pm_sd_config_type {
+ SD_CONFIG_EMMC_SEL = 1,
+ SD_CONFIG_BASECLK = 2,
+ SD_CONFIG_8BIT = 3,
+ SD_CONFIG_FIXED = 4,
+};
+
+/**
+ * enum pm_gem_config_type - PM GEM configuration.
+ * @GEM_CONFIG_SGMII_MODE: To set GEM_SGMII_MODE in GEM_CLK_CTRL register
+ * @GEM_CONFIG_FIXED: To set fixed config registers
+ */
+enum pm_gem_config_type {
+ GEM_CONFIG_SGMII_MODE = 1,
+ GEM_CONFIG_FIXED = 2,
+};
+
+/**
+ * struct zynqmp_pm_query_data - PM query data
+ * @qid: query ID
+ * @arg1: Argument 1 of query data
+ * @arg2: Argument 2 of query data
+ * @arg3: Argument 3 of query data
+ */
+struct zynqmp_pm_query_data {
+ u32 qid;
+ u32 arg1;
+ u32 arg2;
+ u32 arg3;
+};
+
+int zynqmp_pm_invoke_fn(u32 pm_api_id, u32 arg0, u32 arg1,
+ u32 arg2, u32 arg3, u32 *ret_payload);
+
+#if IS_REACHABLE(CONFIG_ZYNQMP_FIRMWARE)
+int zynqmp_pm_get_api_version(u32 *version);
+int zynqmp_pm_get_chipid(u32 *idcode, u32 *version);
+int zynqmp_pm_query_data(struct zynqmp_pm_query_data qdata, u32 *out);
+int zynqmp_pm_clock_enable(u32 clock_id);
+int zynqmp_pm_clock_disable(u32 clock_id);
+int zynqmp_pm_clock_getstate(u32 clock_id, u32 *state);
+int zynqmp_pm_clock_setdivider(u32 clock_id, u32 divider);
+int zynqmp_pm_clock_getdivider(u32 clock_id, u32 *divider);
+int zynqmp_pm_clock_setrate(u32 clock_id, u64 rate);
+int zynqmp_pm_clock_getrate(u32 clock_id, u64 *rate);
+int zynqmp_pm_clock_setparent(u32 clock_id, u32 parent_id);
+int zynqmp_pm_clock_getparent(u32 clock_id, u32 *parent_id);
+int zynqmp_pm_set_pll_frac_mode(u32 clk_id, u32 mode);
+int zynqmp_pm_get_pll_frac_mode(u32 clk_id, u32 *mode);
+int zynqmp_pm_set_pll_frac_data(u32 clk_id, u32 data);
+int zynqmp_pm_get_pll_frac_data(u32 clk_id, u32 *data);
+int zynqmp_pm_set_sd_tapdelay(u32 node_id, u32 type, u32 value);
+int zynqmp_pm_sd_dll_reset(u32 node_id, u32 type);
+int zynqmp_pm_ospi_mux_select(u32 dev_id, u32 select);
+int zynqmp_pm_reset_assert(const enum zynqmp_pm_reset reset,
+ const enum zynqmp_pm_reset_action assert_flag);
+int zynqmp_pm_reset_get_status(const enum zynqmp_pm_reset reset, u32 *status);
+unsigned int zynqmp_pm_bootmode_read(u32 *ps_mode);
+int zynqmp_pm_bootmode_write(u32 ps_mode);
+int zynqmp_pm_init_finalize(void);
+int zynqmp_pm_set_suspend_mode(u32 mode);
+int zynqmp_pm_request_node(const u32 node, const u32 capabilities,
+ const u32 qos, const enum zynqmp_pm_request_ack ack);
+int zynqmp_pm_release_node(const u32 node);
+int zynqmp_pm_set_requirement(const u32 node, const u32 capabilities,
+ const u32 qos,
+ const enum zynqmp_pm_request_ack ack);
+int zynqmp_pm_aes_engine(const u64 address, u32 *out);
+int zynqmp_pm_sha_hash(const u64 address, const u32 size, const u32 flags);
+int zynqmp_pm_fpga_load(const u64 address, const u32 size, const u32 flags);
+int zynqmp_pm_fpga_get_status(u32 *value);
+int zynqmp_pm_fpga_get_config_status(u32 *value);
+int zynqmp_pm_write_ggs(u32 index, u32 value);
+int zynqmp_pm_read_ggs(u32 index, u32 *value);
+int zynqmp_pm_write_pggs(u32 index, u32 value);
+int zynqmp_pm_read_pggs(u32 index, u32 *value);
+int zynqmp_pm_set_tapdelay_bypass(u32 index, u32 value);
+int zynqmp_pm_system_shutdown(const u32 type, const u32 subtype);
+int zynqmp_pm_set_boot_health_status(u32 value);
+int zynqmp_pm_pinctrl_request(const u32 pin);
+int zynqmp_pm_pinctrl_release(const u32 pin);
+int zynqmp_pm_pinctrl_get_function(const u32 pin, u32 *id);
+int zynqmp_pm_pinctrl_set_function(const u32 pin, const u32 id);
+int zynqmp_pm_pinctrl_get_config(const u32 pin, const u32 param,
+ u32 *value);
+int zynqmp_pm_pinctrl_set_config(const u32 pin, const u32 param,
+ u32 value);
+int zynqmp_pm_load_pdi(const u32 src, const u64 address);
+int zynqmp_pm_register_notifier(const u32 node, const u32 event,
+ const u32 wake, const u32 enable);
+int zynqmp_pm_feature(const u32 api_id);
+int zynqmp_pm_is_function_supported(const u32 api_id, const u32 id);
+int zynqmp_pm_set_feature_config(enum pm_feature_config_id id, u32 value);
+int zynqmp_pm_get_feature_config(enum pm_feature_config_id id, u32 *payload);
+int zynqmp_pm_register_sgi(u32 sgi_num, u32 reset);
+int zynqmp_pm_force_pwrdwn(const u32 target,
+ const enum zynqmp_pm_request_ack ack);
+int zynqmp_pm_request_wake(const u32 node,
+ const bool set_addr,
+ const u64 address,
+ const enum zynqmp_pm_request_ack ack);
+int zynqmp_pm_get_rpu_mode(u32 node_id, enum rpu_oper_mode *rpu_mode);
+int zynqmp_pm_set_rpu_mode(u32 node_id, enum rpu_oper_mode rpu_mode);
+int zynqmp_pm_set_tcm_config(u32 node_id, enum rpu_tcm_comb tcm_mode);
+int zynqmp_pm_set_sd_config(u32 node, enum pm_sd_config_type config, u32 value);
+int zynqmp_pm_set_gem_config(u32 node, enum pm_gem_config_type config,
+ u32 value);
+#else
+static inline int zynqmp_pm_get_api_version(u32 *version)
+{
+ return -ENODEV;
+}
+
+static inline int zynqmp_pm_get_chipid(u32 *idcode, u32 *version)
+{
+ return -ENODEV;
+}
+
+static inline int zynqmp_pm_query_data(struct zynqmp_pm_query_data qdata,
+ u32 *out)
+{
+ return -ENODEV;
+}
+
+static inline int zynqmp_pm_clock_enable(u32 clock_id)
+{
+ return -ENODEV;
+}
+
+static inline int zynqmp_pm_clock_disable(u32 clock_id)
+{
+ return -ENODEV;
+}
+
+static inline int zynqmp_pm_clock_getstate(u32 clock_id, u32 *state)
+{
+ return -ENODEV;
+}
+
+static inline int zynqmp_pm_clock_setdivider(u32 clock_id, u32 divider)
+{
+ return -ENODEV;
+}
+
+static inline int zynqmp_pm_clock_getdivider(u32 clock_id, u32 *divider)
+{
+ return -ENODEV;
+}
+
+static inline int zynqmp_pm_clock_setrate(u32 clock_id, u64 rate)
+{
+ return -ENODEV;
+}
+
+static inline int zynqmp_pm_clock_getrate(u32 clock_id, u64 *rate)
+{
+ return -ENODEV;
+}
+
+static inline int zynqmp_pm_clock_setparent(u32 clock_id, u32 parent_id)
+{
+ return -ENODEV;
+}
+
+static inline int zynqmp_pm_clock_getparent(u32 clock_id, u32 *parent_id)
+{
+ return -ENODEV;
+}
+
+static inline int zynqmp_pm_set_pll_frac_mode(u32 clk_id, u32 mode)
+{
+ return -ENODEV;
+}
+
+static inline int zynqmp_pm_get_pll_frac_mode(u32 clk_id, u32 *mode)
+{
+ return -ENODEV;
+}
+
+static inline int zynqmp_pm_set_pll_frac_data(u32 clk_id, u32 data)
+{
+ return -ENODEV;
+}
+
+static inline int zynqmp_pm_get_pll_frac_data(u32 clk_id, u32 *data)
+{
+ return -ENODEV;
+}
+
+static inline int zynqmp_pm_set_sd_tapdelay(u32 node_id, u32 type, u32 value)
+{
+ return -ENODEV;
+}
+
+static inline int zynqmp_pm_sd_dll_reset(u32 node_id, u32 type)
+{
+ return -ENODEV;
+}
+
+static inline int zynqmp_pm_ospi_mux_select(u32 dev_id, u32 select)
+{
+ return -ENODEV;
+}
+
+static inline int zynqmp_pm_reset_assert(const enum zynqmp_pm_reset reset,
+ const enum zynqmp_pm_reset_action assert_flag)
+{
+ return -ENODEV;
+}
+
+static inline int zynqmp_pm_reset_get_status(const enum zynqmp_pm_reset reset,
+ u32 *status)
+{
+ return -ENODEV;
+}
+
+static inline unsigned int zynqmp_pm_bootmode_read(u32 *ps_mode)
+{
+ return -ENODEV;
+}
+
+static inline int zynqmp_pm_bootmode_write(u32 ps_mode)
+{
+ return -ENODEV;
+}
+
+static inline int zynqmp_pm_init_finalize(void)
+{
+ return -ENODEV;
+}
+
+static inline int zynqmp_pm_set_suspend_mode(u32 mode)
+{
+ return -ENODEV;
+}
+
+static inline int zynqmp_pm_request_node(const u32 node, const u32 capabilities,
+ const u32 qos,
+ const enum zynqmp_pm_request_ack ack)
+{
+ return -ENODEV;
+}
+
+static inline int zynqmp_pm_release_node(const u32 node)
+{
+ return -ENODEV;
+}
+
+static inline int zynqmp_pm_set_requirement(const u32 node,
+ const u32 capabilities,
+ const u32 qos,
+ const enum zynqmp_pm_request_ack ack)
+{
+ return -ENODEV;
+}
+
+static inline int zynqmp_pm_aes_engine(const u64 address, u32 *out)
+{
+ return -ENODEV;
+}
+
+static inline int zynqmp_pm_sha_hash(const u64 address, const u32 size,
+ const u32 flags)
+{
+ return -ENODEV;
+}
+
+static inline int zynqmp_pm_fpga_load(const u64 address, const u32 size,
+ const u32 flags)
+{
+ return -ENODEV;
+}
+
+static inline int zynqmp_pm_fpga_get_status(u32 *value)
+{
+ return -ENODEV;
+}
+
+static inline int zynqmp_pm_fpga_get_config_status(u32 *value)
+{
+ return -ENODEV;
+}
+
+static inline int zynqmp_pm_write_ggs(u32 index, u32 value)
+{
+ return -ENODEV;
+}
+
+static inline int zynqmp_pm_read_ggs(u32 index, u32 *value)
+{
+ return -ENODEV;
+}
+
+static inline int zynqmp_pm_write_pggs(u32 index, u32 value)
+{
+ return -ENODEV;
+}
+
+static inline int zynqmp_pm_read_pggs(u32 index, u32 *value)
+{
+ return -ENODEV;
+}
+
+static inline int zynqmp_pm_set_tapdelay_bypass(u32 index, u32 value)
+{
+ return -ENODEV;
+}
+
+static inline int zynqmp_pm_system_shutdown(const u32 type, const u32 subtype)
+{
+ return -ENODEV;
+}
+
+static inline int zynqmp_pm_set_boot_health_status(u32 value)
+{
+ return -ENODEV;
+}
+
+static inline int zynqmp_pm_pinctrl_request(const u32 pin)
+{
+ return -ENODEV;
+}
+
+static inline int zynqmp_pm_pinctrl_release(const u32 pin)
+{
+ return -ENODEV;
+}
+
+static inline int zynqmp_pm_pinctrl_get_function(const u32 pin, u32 *id)
+{
+ return -ENODEV;
+}
+
+static inline int zynqmp_pm_is_function_supported(const u32 api_id, const u32 id)
+{
+ return -ENODEV;
+}
+
+static inline int zynqmp_pm_pinctrl_set_function(const u32 pin, const u32 id)
+{
+ return -ENODEV;
+}
+
+static inline int zynqmp_pm_pinctrl_get_config(const u32 pin, const u32 param,
+ u32 *value)
+{
+ return -ENODEV;
+}
+
+static inline int zynqmp_pm_pinctrl_set_config(const u32 pin, const u32 param,
+ u32 value)
+{
+ return -ENODEV;
+}
+
+static inline int zynqmp_pm_load_pdi(const u32 src, const u64 address)
+{
+ return -ENODEV;
+}
+
+static inline int zynqmp_pm_register_notifier(const u32 node, const u32 event,
+ const u32 wake, const u32 enable)
+{
+ return -ENODEV;
+}
+
+static inline int zynqmp_pm_feature(const u32 api_id)
+{
+ return -ENODEV;
+}
+
+static inline int zynqmp_pm_set_feature_config(enum pm_feature_config_id id,
+ u32 value)
+{
+ return -ENODEV;
+}
+
+static inline int zynqmp_pm_get_feature_config(enum pm_feature_config_id id,
+ u32 *payload)
+{
+ return -ENODEV;
+}
+
+static inline int zynqmp_pm_register_sgi(u32 sgi_num, u32 reset)
+{
+ return -ENODEV;
+}
+
+static inline int zynqmp_pm_force_pwrdwn(const u32 target,
+ const enum zynqmp_pm_request_ack ack)
+{
+ return -ENODEV;
+}
+
+static inline int zynqmp_pm_request_wake(const u32 node,
+ const bool set_addr,
+ const u64 address,
+ const enum zynqmp_pm_request_ack ack)
+{
+ return -ENODEV;
+}
+
+static inline int zynqmp_pm_get_rpu_mode(u32 node_id, enum rpu_oper_mode *rpu_mode)
+{
+ return -ENODEV;
+}
+
+static inline int zynqmp_pm_set_rpu_mode(u32 node_id, enum rpu_oper_mode rpu_mode)
+{
+ return -ENODEV;
+}
+
+static inline int zynqmp_pm_set_tcm_config(u32 node_id, enum rpu_tcm_comb tcm_mode)
+{
+ return -ENODEV;
+}
+
+static inline int zynqmp_pm_set_sd_config(u32 node,
+ enum pm_sd_config_type config,
+ u32 value)
+{
+ return -ENODEV;
+}
+
+static inline int zynqmp_pm_set_gem_config(u32 node,
+ enum pm_gem_config_type config,
+ u32 value)
+{
+ return -ENODEV;
+}
+
+#endif
+
+#endif /* __FIRMWARE_ZYNQMP_H__ */
diff --git a/include/linux/fixp-arith.h b/include/linux/fixp-arith.h
index d4686fe1cac7..e485fb0c1201 100644
--- a/include/linux/fixp-arith.h
+++ b/include/linux/fixp-arith.h
@@ -1,6 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
#ifndef _FIXP_ARITH_H
#define _FIXP_ARITH_H
+#include <linux/bug.h>
#include <linux/math64.h>
/*
@@ -11,19 +13,6 @@
*/
/*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
* Should you need to contact me, the author, you can do so by
* e-mail - mail your message to <johann.deneux@gmail.com>
@@ -153,4 +142,23 @@ static inline s32 fixp_sin32_rad(u32 radians, u32 twopi)
#define fixp_cos32_rad(rad, twopi) \
fixp_sin32_rad(rad + twopi / 4, twopi)
+/**
+ * fixp_linear_interpolate() - interpolates a value from two known points
+ *
+ * @x0: x value of point 0
+ * @y0: y value of point 0
+ * @x1: x value of point 1
+ * @y1: y value of point 1
+ * @x: the linear interpolant
+ */
+static inline int fixp_linear_interpolate(int x0, int y0, int x1, int y1, int x)
+{
+ if (y0 == y1 || x == x0)
+ return y0;
+ if (x1 == x0 || x == x1)
+ return y1;
+
+ return y0 + ((y1 - y0) * (x - x0) / (x1 - x0));
+}
+
#endif
diff --git a/include/linux/flat.h b/include/linux/flat.h
index 7d542dfd0def..83977c0ce3de 100644
--- a/include/linux/flat.h
+++ b/include/linux/flat.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2002-2003 David McCullough <davidm@snapgear.com>
* Copyright (C) 1998 Kenneth Albanowski <kjahds@kjahds.com>
@@ -9,8 +10,41 @@
#ifndef _LINUX_FLAT_H
#define _LINUX_FLAT_H
-#include <uapi/linux/flat.h>
-#include <asm/flat.h>
+#define FLAT_VERSION 0x00000004L
+
+/*
+ * To make everything easier to port and manage cross platform
+ * development, all fields are in network byte order.
+ */
+
+struct flat_hdr {
+ char magic[4];
+ __be32 rev; /* version (as above) */
+ __be32 entry; /* Offset of first executable instruction
+ with text segment from beginning of file */
+ __be32 data_start; /* Offset of data segment from beginning of
+ file */
+ __be32 data_end; /* Offset of end of data segment from beginning
+ of file */
+ __be32 bss_end; /* Offset of end of bss segment from beginning
+ of file */
+
+ /* (It is assumed that data_end through bss_end forms the bss segment.) */
+
+ __be32 stack_size; /* Size of stack, in bytes */
+ __be32 reloc_start; /* Offset of relocation records from beginning of
+ file */
+ __be32 reloc_count; /* Number of relocation records */
+ __be32 flags;
+ __be32 build_date; /* When the program/library was built */
+ __u32 filler[5]; /* Reservered, set to zero */
+};
+
+#define FLAT_FLAG_RAM 0x0001 /* load program entirely into RAM */
+#define FLAT_FLAG_GOTPIC 0x0002 /* program is PIC with GOT */
+#define FLAT_FLAG_GZIP 0x0004 /* all but the header is compressed */
+#define FLAT_FLAG_GZDATA 0x0008 /* only data/relocs are compressed (for XIP) */
+#define FLAT_FLAG_KTRACE 0x0010 /* output useful kernel trace for debugging */
/*
* While it would be nice to keep this header clean, users of older
@@ -21,28 +55,21 @@
* with the format above, except to fix bugs with old format support.
*/
-#include <asm/byteorder.h>
-
#define OLD_FLAT_VERSION 0x00000002L
#define OLD_FLAT_RELOC_TYPE_TEXT 0
#define OLD_FLAT_RELOC_TYPE_DATA 1
#define OLD_FLAT_RELOC_TYPE_BSS 2
typedef union {
- unsigned long value;
+ u32 value;
struct {
-# if defined(mc68000) && !defined(CONFIG_COLDFIRE)
- signed long offset : 30;
- unsigned long type : 2;
-# define OLD_FLAT_FLAG_RAM 0x1 /* load program entirely into RAM */
+#if defined(__LITTLE_ENDIAN_BITFIELD) || \
+ (defined(mc68000) && !defined(CONFIG_COLDFIRE))
+ s32 offset : 30;
+ u32 type : 2;
# elif defined(__BIG_ENDIAN_BITFIELD)
- unsigned long type : 2;
- signed long offset : 30;
-# define OLD_FLAT_FLAG_RAM 0x1 /* load program entirely into RAM */
-# elif defined(__LITTLE_ENDIAN_BITFIELD)
- signed long offset : 30;
- unsigned long type : 2;
-# define OLD_FLAT_FLAG_RAM 0x1 /* load program entirely into RAM */
+ u32 type : 2;
+ s32 offset : 30;
# else
# error "Unknown bitfield order for flat files."
# endif
diff --git a/include/linux/flex_array.h b/include/linux/flex_array.h
deleted file mode 100644
index 11366b3ff0b4..000000000000
--- a/include/linux/flex_array.h
+++ /dev/null
@@ -1,148 +0,0 @@
-#ifndef _FLEX_ARRAY_H
-#define _FLEX_ARRAY_H
-
-#include <linux/types.h>
-#include <linux/reciprocal_div.h>
-#include <asm/page.h>
-
-#define FLEX_ARRAY_PART_SIZE PAGE_SIZE
-#define FLEX_ARRAY_BASE_SIZE PAGE_SIZE
-
-struct flex_array_part;
-
-/*
- * This is meant to replace cases where an array-like
- * structure has gotten too big to fit into kmalloc()
- * and the developer is getting tempted to use
- * vmalloc().
- */
-
-struct flex_array {
- union {
- struct {
- int element_size;
- int total_nr_elements;
- int elems_per_part;
- struct reciprocal_value reciprocal_elems;
- struct flex_array_part *parts[];
- };
- /*
- * This little trick makes sure that
- * sizeof(flex_array) == PAGE_SIZE
- */
- char padding[FLEX_ARRAY_BASE_SIZE];
- };
-};
-
-/* Number of bytes left in base struct flex_array, excluding metadata */
-#define FLEX_ARRAY_BASE_BYTES_LEFT \
- (FLEX_ARRAY_BASE_SIZE - offsetof(struct flex_array, parts))
-
-/* Number of pointers in base to struct flex_array_part pages */
-#define FLEX_ARRAY_NR_BASE_PTRS \
- (FLEX_ARRAY_BASE_BYTES_LEFT / sizeof(struct flex_array_part *))
-
-/* Number of elements of size that fit in struct flex_array_part */
-#define FLEX_ARRAY_ELEMENTS_PER_PART(size) \
- (FLEX_ARRAY_PART_SIZE / size)
-
-/*
- * Defines a statically allocated flex array and ensures its parameters are
- * valid.
- */
-#define DEFINE_FLEX_ARRAY(__arrayname, __element_size, __total) \
- struct flex_array __arrayname = { { { \
- .element_size = (__element_size), \
- .total_nr_elements = (__total), \
- } } }; \
- static inline void __arrayname##_invalid_parameter(void) \
- { \
- BUILD_BUG_ON((__total) > FLEX_ARRAY_NR_BASE_PTRS * \
- FLEX_ARRAY_ELEMENTS_PER_PART(__element_size)); \
- }
-
-/**
- * flex_array_alloc() - Creates a flexible array.
- * @element_size: individual object size.
- * @total: maximum number of objects which can be stored.
- * @flags: GFP flags
- *
- * Return: Returns an object of structure flex_array.
- */
-struct flex_array *flex_array_alloc(int element_size, unsigned int total,
- gfp_t flags);
-
-/**
- * flex_array_prealloc() - Ensures that memory for the elements indexed in the
- * range defined by start and nr_elements has been allocated.
- * @fa: array to allocate memory to.
- * @start: start address
- * @nr_elements: number of elements to be allocated.
- * @flags: GFP flags
- *
- */
-int flex_array_prealloc(struct flex_array *fa, unsigned int start,
- unsigned int nr_elements, gfp_t flags);
-
-/**
- * flex_array_free() - Removes all elements of a flexible array.
- * @fa: array to be freed.
- */
-void flex_array_free(struct flex_array *fa);
-
-/**
- * flex_array_free_parts() - Removes all elements of a flexible array, but
- * leaves the array itself in place.
- * @fa: array to be emptied.
- */
-void flex_array_free_parts(struct flex_array *fa);
-
-/**
- * flex_array_put() - Stores data into a flexible array.
- * @fa: array where element is to be stored.
- * @element_nr: position to copy, must be less than the maximum specified when
- * the array was created.
- * @src: data source to be copied into the array.
- * @flags: GFP flags
- *
- * Return: Returns zero on success, a negative error code otherwise.
- */
-int flex_array_put(struct flex_array *fa, unsigned int element_nr, void *src,
- gfp_t flags);
-
-/**
- * flex_array_clear() - Clears an individual element in the array, sets the
- * given element to FLEX_ARRAY_FREE.
- * @element_nr: element position to clear.
- * @fa: array to which element to be cleared belongs.
- *
- * Return: Returns zero on success, -EINVAL otherwise.
- */
-int flex_array_clear(struct flex_array *fa, unsigned int element_nr);
-
-/**
- * flex_array_get() - Retrieves data into a flexible array.
- *
- * @element_nr: Element position to retrieve data from.
- * @fa: array from which data is to be retrieved.
- *
- * Return: Returns a pointer to the data element, or NULL if that
- * particular element has never been allocated.
- */
-void *flex_array_get(struct flex_array *fa, unsigned int element_nr);
-
-/**
- * flex_array_shrink() - Reduces the allocated size of an array.
- * @fa: array to shrink.
- *
- * Return: Returns number of pages of memory actually freed.
- *
- */
-int flex_array_shrink(struct flex_array *fa);
-
-#define flex_array_put_ptr(fa, nr, src, gfp) \
- flex_array_put(fa, nr, (void *)&(src), gfp)
-
-void *flex_array_get_ptr(struct flex_array *fa, unsigned int element_nr);
-
-#endif /* _FLEX_ARRAY_H */
diff --git a/include/linux/flex_proportions.h b/include/linux/flex_proportions.h
index 0d348e011a6e..3e378b1fb0bc 100644
--- a/include/linux/flex_proportions.h
+++ b/include/linux/flex_proportions.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Floating proportions with flexible aging period
*
@@ -82,9 +83,10 @@ struct fprop_local_percpu {
int fprop_local_init_percpu(struct fprop_local_percpu *pl, gfp_t gfp);
void fprop_local_destroy_percpu(struct fprop_local_percpu *pl);
-void __fprop_inc_percpu(struct fprop_global *p, struct fprop_local_percpu *pl);
-void __fprop_inc_percpu_max(struct fprop_global *p, struct fprop_local_percpu *pl,
- int max_frac);
+void __fprop_add_percpu(struct fprop_global *p, struct fprop_local_percpu *pl,
+ long nr);
+void __fprop_add_percpu_max(struct fprop_global *p,
+ struct fprop_local_percpu *pl, int max_frac, long nr);
void fprop_fraction_percpu(struct fprop_global *p,
struct fprop_local_percpu *pl, unsigned long *numerator,
unsigned long *denominator);
@@ -95,7 +97,7 @@ void fprop_inc_percpu(struct fprop_global *p, struct fprop_local_percpu *pl)
unsigned long flags;
local_irq_save(flags);
- __fprop_inc_percpu(p, pl);
+ __fprop_add_percpu(p, pl, 1);
local_irq_restore(flags);
}
diff --git a/include/linux/fmc-sdb.h b/include/linux/fmc-sdb.h
deleted file mode 100644
index 599bd6bab56d..000000000000
--- a/include/linux/fmc-sdb.h
+++ /dev/null
@@ -1,38 +0,0 @@
-/*
- * This file is separate from sdb.h, because I want that one to remain
- * unchanged (as far as possible) from the official sdb distribution
- *
- * This file and associated functionality are a playground for me to
- * understand stuff which will later be implemented in more generic places.
- */
-#include <linux/sdb.h>
-
-/* This is the union of all currently defined types */
-union sdb_record {
- struct sdb_interconnect ic;
- struct sdb_device dev;
- struct sdb_bridge bridge;
- struct sdb_integration integr;
- struct sdb_empty empty;
- struct sdb_synthesis synthesis;
- struct sdb_repo_url repo_url;
-};
-
-struct fmc_device;
-
-/* Every sdb table is turned into this structure */
-struct sdb_array {
- int len;
- int level;
- unsigned long baseaddr;
- struct fmc_device *fmc; /* the device that hosts it */
- struct sdb_array *parent; /* NULL at root */
- union sdb_record *record; /* copies of the struct */
- struct sdb_array **subtree; /* only valid for bridge items */
-};
-
-extern int fmc_scan_sdb_tree(struct fmc_device *fmc, unsigned long address);
-extern void fmc_show_sdb_tree(const struct fmc_device *fmc);
-extern signed long fmc_find_sdb_device(struct sdb_array *tree, uint64_t vendor,
- uint32_t device, unsigned long *sz);
-extern int fmc_free_sdb_tree(struct fmc_device *fmc);
diff --git a/include/linux/fmc.h b/include/linux/fmc.h
deleted file mode 100644
index a5f0aa5c2a8d..000000000000
--- a/include/linux/fmc.h
+++ /dev/null
@@ -1,237 +0,0 @@
-/*
- * Copyright (C) 2012 CERN (www.cern.ch)
- * Author: Alessandro Rubini <rubini@gnudd.com>
- *
- * Released according to the GNU GPL, version 2 or any later version.
- *
- * This work is part of the White Rabbit project, a research effort led
- * by CERN, the European Institute for Nuclear Research.
- */
-#ifndef __LINUX_FMC_H__
-#define __LINUX_FMC_H__
-#include <linux/types.h>
-#include <linux/moduleparam.h>
-#include <linux/device.h>
-#include <linux/list.h>
-#include <linux/interrupt.h>
-#include <linux/io.h>
-
-struct fmc_device;
-struct fmc_driver;
-
-/*
- * This bus abstraction is developed separately from drivers, so we need
- * to check the version of the data structures we receive.
- */
-
-#define FMC_MAJOR 3
-#define FMC_MINOR 0
-#define FMC_VERSION ((FMC_MAJOR << 16) | FMC_MINOR)
-#define __FMC_MAJOR(x) ((x) >> 16)
-#define __FMC_MINOR(x) ((x) & 0xffff)
-
-/*
- * The device identification, as defined by the IPMI FRU (Field Replaceable
- * Unit) includes four different strings to describe the device. Here we
- * only match the "Board Manufacturer" and the "Board Product Name",
- * ignoring the "Board Serial Number" and "Board Part Number". All 4 are
- * expected to be strings, so they are treated as zero-terminated C strings.
- * Unspecified string (NULL) means "any", so if both are unspecified this
- * is a catch-all driver. So null entries are allowed and we use array
- * and length. This is unlike pci and usb that use null-terminated arrays
- */
-struct fmc_fru_id {
- char *manufacturer;
- char *product_name;
-};
-
-/*
- * If the FPGA is already programmed (think Etherbone or the second
- * SVEC slot), we can match on SDB devices in the memory image. This
- * match uses an array of devices that must all be present, and the
- * match is based on vendor and device only. Further checks are expected
- * to happen in the probe function. Zero means "any" and catch-all is allowed.
- */
-struct fmc_sdb_one_id {
- uint64_t vendor;
- uint32_t device;
-};
-struct fmc_sdb_id {
- struct fmc_sdb_one_id *cores;
- int cores_nr;
-};
-
-struct fmc_device_id {
- struct fmc_fru_id *fru_id;
- int fru_id_nr;
- struct fmc_sdb_id *sdb_id;
- int sdb_id_nr;
-};
-
-/* This sizes the module_param_array used by generic module parameters */
-#define FMC_MAX_CARDS 32
-
-/* The driver is a pretty simple thing */
-struct fmc_driver {
- unsigned long version;
- struct device_driver driver;
- int (*probe)(struct fmc_device *);
- int (*remove)(struct fmc_device *);
- const struct fmc_device_id id_table;
- /* What follows is for generic module parameters */
- int busid_n;
- int busid_val[FMC_MAX_CARDS];
- int gw_n;
- char *gw_val[FMC_MAX_CARDS];
-};
-#define to_fmc_driver(x) container_of((x), struct fmc_driver, driver)
-
-/* These are the generic parameters, that drivers may instantiate */
-#define FMC_PARAM_BUSID(_d) \
- module_param_array_named(busid, _d.busid_val, int, &_d.busid_n, 0444)
-#define FMC_PARAM_GATEWARE(_d) \
- module_param_array_named(gateware, _d.gw_val, charp, &_d.gw_n, 0444)
-
-/*
- * Drivers may need to configure gpio pins in the carrier. To read input
- * (a very uncommon operation, and definitely not in the hot paths), just
- * configure one gpio only and get 0 or 1 as retval of the config method
- */
-struct fmc_gpio {
- char *carrier_name; /* name or NULL for virtual pins */
- int gpio;
- int _gpio; /* internal use by the carrier */
- int mode; /* GPIOF_DIR_OUT etc, from <linux/gpio.h> */
- int irqmode; /* IRQF_TRIGGER_LOW and so on */
-};
-
-/* The numbering of gpio pins allows access to raw pins or virtual roles */
-#define FMC_GPIO_RAW(x) (x) /* 4096 of them */
-#define __FMC_GPIO_IS_RAW(x) ((x) < 0x1000)
-#define FMC_GPIO_IRQ(x) ((x) + 0x1000) /* 256 of them */
-#define FMC_GPIO_LED(x) ((x) + 0x1100) /* 256 of them */
-#define FMC_GPIO_KEY(x) ((x) + 0x1200) /* 256 of them */
-#define FMC_GPIO_TP(x) ((x) + 0x1300) /* 256 of them */
-#define FMC_GPIO_USER(x) ((x) + 0x1400) /* 256 of them */
-/* We may add SCL and SDA, or other roles if the need arises */
-
-/* GPIOF_DIR_IN etc are missing before 3.0. copy from <linux/gpio.h> */
-#ifndef GPIOF_DIR_IN
-# define GPIOF_DIR_OUT (0 << 0)
-# define GPIOF_DIR_IN (1 << 0)
-# define GPIOF_INIT_LOW (0 << 1)
-# define GPIOF_INIT_HIGH (1 << 1)
-#endif
-
-/*
- * The operations are offered by each carrier and should make driver
- * design completely independent of the carrier. Named GPIO pins may be
- * the exception.
- */
-struct fmc_operations {
- uint32_t (*read32)(struct fmc_device *fmc, int offset);
- void (*write32)(struct fmc_device *fmc, uint32_t value, int offset);
- int (*validate)(struct fmc_device *fmc, struct fmc_driver *drv);
- int (*reprogram)(struct fmc_device *f, struct fmc_driver *d, char *gw);
- int (*irq_request)(struct fmc_device *fmc, irq_handler_t h,
- char *name, int flags);
- void (*irq_ack)(struct fmc_device *fmc);
- int (*irq_free)(struct fmc_device *fmc);
- int (*gpio_config)(struct fmc_device *fmc, struct fmc_gpio *gpio,
- int ngpio);
- int (*read_ee)(struct fmc_device *fmc, int pos, void *d, int l);
- int (*write_ee)(struct fmc_device *fmc, int pos, const void *d, int l);
-};
-
-/* Prefer this helper rather than calling of fmc->reprogram directly */
-extern int fmc_reprogram(struct fmc_device *f, struct fmc_driver *d, char *gw,
- int sdb_entry);
-
-/*
- * The device reports all information needed to access hw.
- *
- * If we have eeprom_len and not contents, the core reads it.
- * Then, parsing of identifiers is done by the core which fills fmc_fru_id..
- * Similarly a device that must be matched based on SDB cores must
- * fill the entry point and the core will scan the bus (FIXME: sdb match)
- */
-struct fmc_device {
- unsigned long version;
- unsigned long flags;
- struct module *owner; /* char device must pin it */
- struct fmc_fru_id id; /* for EEPROM-based match */
- struct fmc_operations *op; /* carrier-provided */
- int irq; /* according to host bus. 0 == none */
- int eeprom_len; /* Usually 8kB, may be less */
- int eeprom_addr; /* 0x50, 0x52 etc */
- uint8_t *eeprom; /* Full contents or leading part */
- char *carrier_name; /* "SPEC" or similar, for special use */
- void *carrier_data; /* "struct spec *" or equivalent */
- __iomem void *fpga_base; /* May be NULL (Etherbone) */
- __iomem void *slot_base; /* Set by the driver */
- struct fmc_device **devarray; /* Allocated by the bus */
- int slot_id; /* Index in the slot array */
- int nr_slots; /* Number of slots in this carrier */
- unsigned long memlen; /* Used for the char device */
- struct device dev; /* For Linux use */
- struct device *hwdev; /* The underlying hardware device */
- unsigned long sdbfs_entry;
- struct sdb_array *sdb;
- uint32_t device_id; /* Filled by the device */
- char *mezzanine_name; /* Defaults to ``fmc'' */
- void *mezzanine_data;
-};
-#define to_fmc_device(x) container_of((x), struct fmc_device, dev)
-
-#define FMC_DEVICE_HAS_GOLDEN 1
-#define FMC_DEVICE_HAS_CUSTOM 2
-#define FMC_DEVICE_NO_MEZZANINE 4
-#define FMC_DEVICE_MATCH_SDB 8 /* fmc-core must scan sdb in fpga */
-
-/*
- * If fpga_base can be used, the carrier offers no readl/writel methods, and
- * this expands to a single, fast, I/O access.
- */
-static inline uint32_t fmc_readl(struct fmc_device *fmc, int offset)
-{
- if (unlikely(fmc->op->read32))
- return fmc->op->read32(fmc, offset);
- return readl(fmc->fpga_base + offset);
-}
-static inline void fmc_writel(struct fmc_device *fmc, uint32_t val, int off)
-{
- if (unlikely(fmc->op->write32))
- fmc->op->write32(fmc, val, off);
- else
- writel(val, fmc->fpga_base + off);
-}
-
-/* pci-like naming */
-static inline void *fmc_get_drvdata(const struct fmc_device *fmc)
-{
- return dev_get_drvdata(&fmc->dev);
-}
-
-static inline void fmc_set_drvdata(struct fmc_device *fmc, void *data)
-{
- dev_set_drvdata(&fmc->dev, data);
-}
-
-/* The 4 access points */
-extern int fmc_driver_register(struct fmc_driver *drv);
-extern void fmc_driver_unregister(struct fmc_driver *drv);
-extern int fmc_device_register(struct fmc_device *tdev);
-extern void fmc_device_unregister(struct fmc_device *tdev);
-
-/* Two more for device sets, all driven by the same FPGA */
-extern int fmc_device_register_n(struct fmc_device **devs, int n);
-extern void fmc_device_unregister_n(struct fmc_device **devs, int n);
-
-/* Internal cross-calls between files; not exported to other modules */
-extern int fmc_match(struct device *dev, struct device_driver *drv);
-extern int fmc_fill_id_info(struct fmc_device *fmc);
-extern void fmc_free_id_info(struct fmc_device *fmc);
-extern void fmc_dump_eeprom(const struct fmc_device *fmc);
-extern void fmc_dump_sdb(const struct fmc_device *fmc);
-
-#endif /* __LINUX_FMC_H__ */
diff --git a/include/linux/font.h b/include/linux/font.h
index d6821769dd1e..abf1442ce719 100644
--- a/include/linux/font.h
+++ b/include/linux/font.h
@@ -16,7 +16,8 @@
struct font_desc {
int idx;
const char *name;
- int width, height;
+ unsigned int width, height;
+ unsigned int charcount;
const void *data;
int pref;
};
@@ -32,6 +33,8 @@ struct font_desc {
#define ACORN8x8_IDX 8
#define MINI4x6_IDX 9
#define FONT6x10_IDX 10
+#define TER16x32_IDX 11
+#define FONT6x8_IDX 12
extern const struct font_desc font_vga_8x8,
font_vga_8x16,
@@ -43,7 +46,9 @@ extern const struct font_desc font_vga_8x8,
font_sun_12x22,
font_acorn_8x8,
font_mini_4x6,
- font_6x10;
+ font_6x10,
+ font_ter_16x32,
+ font_6x8;
/* Find a font with a specific name */
@@ -57,4 +62,17 @@ extern const struct font_desc *get_default_font(int xres, int yres,
/* Max. length for the name of a predefined font */
#define MAX_FONT_NAME 32
+/* Extra word getters */
+#define REFCOUNT(fd) (((int *)(fd))[-1])
+#define FNTSIZE(fd) (((int *)(fd))[-2])
+#define FNTCHARCNT(fd) (((int *)(fd))[-3])
+#define FNTSUM(fd) (((int *)(fd))[-4])
+
+#define FONT_EXTRA_WORDS 4
+
+struct font_data {
+ unsigned int extra[FONT_EXTRA_WORDS];
+ const unsigned char data[];
+} __packed;
+
#endif /* _VIDEO_FONT_H */
diff --git a/include/linux/fortify-string.h b/include/linux/fortify-string.h
new file mode 100644
index 000000000000..c9de1f59ee80
--- /dev/null
+++ b/include/linux/fortify-string.h
@@ -0,0 +1,755 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_FORTIFY_STRING_H_
+#define _LINUX_FORTIFY_STRING_H_
+
+#include <linux/bug.h>
+#include <linux/const.h>
+#include <linux/limits.h>
+
+#define __FORTIFY_INLINE extern __always_inline __gnu_inline __overloadable
+#define __RENAME(x) __asm__(#x)
+
+void fortify_panic(const char *name) __noreturn __cold;
+void __read_overflow(void) __compiletime_error("detected read beyond size of object (1st parameter)");
+void __read_overflow2(void) __compiletime_error("detected read beyond size of object (2nd parameter)");
+void __read_overflow2_field(size_t avail, size_t wanted) __compiletime_warning("detected read beyond size of field (2nd parameter); maybe use struct_group()?");
+void __write_overflow(void) __compiletime_error("detected write beyond size of object (1st parameter)");
+void __write_overflow_field(size_t avail, size_t wanted) __compiletime_warning("detected write beyond size of field (1st parameter); maybe use struct_group()?");
+
+#define __compiletime_strlen(p) \
+({ \
+ char *__p = (char *)(p); \
+ size_t __ret = SIZE_MAX; \
+ size_t __p_size = __member_size(p); \
+ if (__p_size != SIZE_MAX && \
+ __builtin_constant_p(*__p)) { \
+ size_t __p_len = __p_size - 1; \
+ if (__builtin_constant_p(__p[__p_len]) && \
+ __p[__p_len] == '\0') \
+ __ret = __builtin_strlen(__p); \
+ } \
+ __ret; \
+})
+
+#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
+extern void *__underlying_memchr(const void *p, int c, __kernel_size_t size) __RENAME(memchr);
+extern int __underlying_memcmp(const void *p, const void *q, __kernel_size_t size) __RENAME(memcmp);
+extern void *__underlying_memcpy(void *p, const void *q, __kernel_size_t size) __RENAME(memcpy);
+extern void *__underlying_memmove(void *p, const void *q, __kernel_size_t size) __RENAME(memmove);
+extern void *__underlying_memset(void *p, int c, __kernel_size_t size) __RENAME(memset);
+extern char *__underlying_strcat(char *p, const char *q) __RENAME(strcat);
+extern char *__underlying_strcpy(char *p, const char *q) __RENAME(strcpy);
+extern __kernel_size_t __underlying_strlen(const char *p) __RENAME(strlen);
+extern char *__underlying_strncat(char *p, const char *q, __kernel_size_t count) __RENAME(strncat);
+extern char *__underlying_strncpy(char *p, const char *q, __kernel_size_t size) __RENAME(strncpy);
+#else
+
+#if defined(__SANITIZE_MEMORY__)
+/*
+ * For KMSAN builds all memcpy/memset/memmove calls should be replaced by the
+ * corresponding __msan_XXX functions.
+ */
+#include <linux/kmsan_string.h>
+#define __underlying_memcpy __msan_memcpy
+#define __underlying_memmove __msan_memmove
+#define __underlying_memset __msan_memset
+#else
+#define __underlying_memcpy __builtin_memcpy
+#define __underlying_memmove __builtin_memmove
+#define __underlying_memset __builtin_memset
+#endif
+
+#define __underlying_memchr __builtin_memchr
+#define __underlying_memcmp __builtin_memcmp
+#define __underlying_strcat __builtin_strcat
+#define __underlying_strcpy __builtin_strcpy
+#define __underlying_strlen __builtin_strlen
+#define __underlying_strncat __builtin_strncat
+#define __underlying_strncpy __builtin_strncpy
+#endif
+
+/**
+ * unsafe_memcpy - memcpy implementation with no FORTIFY bounds checking
+ *
+ * @dst: Destination memory address to write to
+ * @src: Source memory address to read from
+ * @bytes: How many bytes to write to @dst from @src
+ * @justification: Free-form text or comment describing why the use is needed
+ *
+ * This should be used for corner cases where the compiler cannot do the
+ * right thing, or during transitions between APIs, etc. It should be used
+ * very rarely, and includes a place for justification detailing where bounds
+ * checking has happened, and why existing solutions cannot be employed.
+ */
+#define unsafe_memcpy(dst, src, bytes, justification) \
+ __underlying_memcpy(dst, src, bytes)
+
+/*
+ * Clang's use of __builtin_*object_size() within inlines needs hinting via
+ * __pass_*object_size(). The preference is to only ever use type 1 (member
+ * size, rather than struct size), but there remain some stragglers using
+ * type 0 that will be converted in the future.
+ */
+#if __has_builtin(__builtin_dynamic_object_size)
+#define POS __pass_dynamic_object_size(1)
+#define POS0 __pass_dynamic_object_size(0)
+#define __struct_size(p) __builtin_dynamic_object_size(p, 0)
+#define __member_size(p) __builtin_dynamic_object_size(p, 1)
+#else
+#define POS __pass_object_size(1)
+#define POS0 __pass_object_size(0)
+#define __struct_size(p) __builtin_object_size(p, 0)
+#define __member_size(p) __builtin_object_size(p, 1)
+#endif
+
+#define __compiletime_lessthan(bounds, length) ( \
+ __builtin_constant_p((bounds) < (length)) && \
+ (bounds) < (length) \
+)
+
+/**
+ * strncpy - Copy a string to memory with non-guaranteed NUL padding
+ *
+ * @p: pointer to destination of copy
+ * @q: pointer to NUL-terminated source string to copy
+ * @size: bytes to write at @p
+ *
+ * If strlen(@q) >= @size, the copy of @q will stop after @size bytes,
+ * and @p will NOT be NUL-terminated
+ *
+ * If strlen(@q) < @size, following the copy of @q, trailing NUL bytes
+ * will be written to @p until @size total bytes have been written.
+ *
+ * Do not use this function. While FORTIFY_SOURCE tries to avoid
+ * over-reads of @q, it cannot defend against writing unterminated
+ * results to @p. Using strncpy() remains ambiguous and fragile.
+ * Instead, please choose an alternative, so that the expectation
+ * of @p's contents is unambiguous:
+ *
+ * +--------------------+--------------------+------------+
+ * | **p** needs to be: | padded to **size** | not padded |
+ * +====================+====================+============+
+ * | NUL-terminated | strscpy_pad() | strscpy() |
+ * +--------------------+--------------------+------------+
+ * | not NUL-terminated | strtomem_pad() | strtomem() |
+ * +--------------------+--------------------+------------+
+ *
+ * Note strscpy*()'s differing return values for detecting truncation,
+ * and strtomem*()'s expectation that the destination is marked with
+ * __nonstring when it is a character array.
+ *
+ */
+__FORTIFY_INLINE __diagnose_as(__builtin_strncpy, 1, 2, 3)
+char *strncpy(char * const POS p, const char *q, __kernel_size_t size)
+{
+ size_t p_size = __member_size(p);
+
+ if (__compiletime_lessthan(p_size, size))
+ __write_overflow();
+ if (p_size < size)
+ fortify_panic(__func__);
+ return __underlying_strncpy(p, q, size);
+}
+
+/**
+ * strcat - Append a string to an existing string
+ *
+ * @p: pointer to NUL-terminated string to append to
+ * @q: pointer to NUL-terminated source string to append from
+ *
+ * Do not use this function. While FORTIFY_SOURCE tries to avoid
+ * read and write overflows, this is only possible when the
+ * destination buffer size is known to the compiler. Prefer
+ * building the string with formatting, via scnprintf() or similar.
+ * At the very least, use strncat().
+ *
+ * Returns @p.
+ *
+ */
+__FORTIFY_INLINE __diagnose_as(__builtin_strcat, 1, 2)
+char *strcat(char * const POS p, const char *q)
+{
+ size_t p_size = __member_size(p);
+
+ if (p_size == SIZE_MAX)
+ return __underlying_strcat(p, q);
+ if (strlcat(p, q, p_size) >= p_size)
+ fortify_panic(__func__);
+ return p;
+}
+
+extern __kernel_size_t __real_strnlen(const char *, __kernel_size_t) __RENAME(strnlen);
+/**
+ * strnlen - Return bounded count of characters in a NUL-terminated string
+ *
+ * @p: pointer to NUL-terminated string to count.
+ * @maxlen: maximum number of characters to count.
+ *
+ * Returns number of characters in @p (NOT including the final NUL), or
+ * @maxlen, if no NUL has been found up to there.
+ *
+ */
+__FORTIFY_INLINE __kernel_size_t strnlen(const char * const POS p, __kernel_size_t maxlen)
+{
+ size_t p_size = __member_size(p);
+ size_t p_len = __compiletime_strlen(p);
+ size_t ret;
+
+ /* We can take compile-time actions when maxlen is const. */
+ if (__builtin_constant_p(maxlen) && p_len != SIZE_MAX) {
+ /* If p is const, we can use its compile-time-known len. */
+ if (maxlen >= p_size)
+ return p_len;
+ }
+
+ /* Do not check characters beyond the end of p. */
+ ret = __real_strnlen(p, maxlen < p_size ? maxlen : p_size);
+ if (p_size <= ret && maxlen != ret)
+ fortify_panic(__func__);
+ return ret;
+}
+
+/*
+ * Defined after fortified strnlen to reuse it. However, it must still be
+ * possible for strlen() to be used on compile-time strings for use in
+ * static initializers (i.e. as a constant expression).
+ */
+/**
+ * strlen - Return count of characters in a NUL-terminated string
+ *
+ * @p: pointer to NUL-terminated string to count.
+ *
+ * Do not use this function unless the string length is known at
+ * compile-time. When @p is unterminated, this function may crash
+ * or return unexpected counts that could lead to memory content
+ * exposures. Prefer strnlen().
+ *
+ * Returns number of characters in @p (NOT including the final NUL).
+ *
+ */
+#define strlen(p) \
+ __builtin_choose_expr(__is_constexpr(__builtin_strlen(p)), \
+ __builtin_strlen(p), __fortify_strlen(p))
+__FORTIFY_INLINE __diagnose_as(__builtin_strlen, 1)
+__kernel_size_t __fortify_strlen(const char * const POS p)
+{
+ __kernel_size_t ret;
+ size_t p_size = __member_size(p);
+
+ /* Give up if we don't know how large p is. */
+ if (p_size == SIZE_MAX)
+ return __underlying_strlen(p);
+ ret = strnlen(p, p_size);
+ if (p_size <= ret)
+ fortify_panic(__func__);
+ return ret;
+}
+
+/* Defined after fortified strlen() to reuse it. */
+extern size_t __real_strlcpy(char *, const char *, size_t) __RENAME(strlcpy);
+/**
+ * strlcpy - Copy a string into another string buffer
+ *
+ * @p: pointer to destination of copy
+ * @q: pointer to NUL-terminated source string to copy
+ * @size: maximum number of bytes to write at @p
+ *
+ * If strlen(@q) >= @size, the copy of @q will be truncated at
+ * @size - 1 bytes. @p will always be NUL-terminated.
+ *
+ * Do not use this function. While FORTIFY_SOURCE tries to avoid
+ * over-reads when calculating strlen(@q), it is still possible.
+ * Prefer strscpy(), though note its different return values for
+ * detecting truncation.
+ *
+ * Returns total number of bytes written to @p, including terminating NUL.
+ *
+ */
+__FORTIFY_INLINE size_t strlcpy(char * const POS p, const char * const POS q, size_t size)
+{
+ size_t p_size = __member_size(p);
+ size_t q_size = __member_size(q);
+ size_t q_len; /* Full count of source string length. */
+ size_t len; /* Count of characters going into destination. */
+
+ if (p_size == SIZE_MAX && q_size == SIZE_MAX)
+ return __real_strlcpy(p, q, size);
+ q_len = strlen(q);
+ len = (q_len >= size) ? size - 1 : q_len;
+ if (__builtin_constant_p(size) && __builtin_constant_p(q_len) && size) {
+ /* Write size is always larger than destination. */
+ if (len >= p_size)
+ __write_overflow();
+ }
+ if (size) {
+ if (len >= p_size)
+ fortify_panic(__func__);
+ __underlying_memcpy(p, q, len);
+ p[len] = '\0';
+ }
+ return q_len;
+}
+
+/* Defined after fortified strnlen() to reuse it. */
+extern ssize_t __real_strscpy(char *, const char *, size_t) __RENAME(strscpy);
+/**
+ * strscpy - Copy a C-string into a sized buffer
+ *
+ * @p: Where to copy the string to
+ * @q: Where to copy the string from
+ * @size: Size of destination buffer
+ *
+ * Copy the source string @p, or as much of it as fits, into the destination
+ * @q buffer. The behavior is undefined if the string buffers overlap. The
+ * destination @p buffer is always NUL terminated, unless it's zero-sized.
+ *
+ * Preferred to strlcpy() since the API doesn't require reading memory
+ * from the source @q string beyond the specified @size bytes, and since
+ * the return value is easier to error-check than strlcpy()'s.
+ * In addition, the implementation is robust to the string changing out
+ * from underneath it, unlike the current strlcpy() implementation.
+ *
+ * Preferred to strncpy() since it always returns a valid string, and
+ * doesn't unnecessarily force the tail of the destination buffer to be
+ * zero padded. If padding is desired please use strscpy_pad().
+ *
+ * Returns the number of characters copied in @p (not including the
+ * trailing %NUL) or -E2BIG if @size is 0 or the copy of @q was truncated.
+ */
+__FORTIFY_INLINE ssize_t strscpy(char * const POS p, const char * const POS q, size_t size)
+{
+ size_t len;
+ /* Use string size rather than possible enclosing struct size. */
+ size_t p_size = __member_size(p);
+ size_t q_size = __member_size(q);
+
+ /* If we cannot get size of p and q default to call strscpy. */
+ if (p_size == SIZE_MAX && q_size == SIZE_MAX)
+ return __real_strscpy(p, q, size);
+
+ /*
+ * If size can be known at compile time and is greater than
+ * p_size, generate a compile time write overflow error.
+ */
+ if (__compiletime_lessthan(p_size, size))
+ __write_overflow();
+
+ /* Short-circuit for compile-time known-safe lengths. */
+ if (__compiletime_lessthan(p_size, SIZE_MAX)) {
+ len = __compiletime_strlen(q);
+
+ if (len < SIZE_MAX && __compiletime_lessthan(len, size)) {
+ __underlying_memcpy(p, q, len + 1);
+ return len;
+ }
+ }
+
+ /*
+ * This call protects from read overflow, because len will default to q
+ * length if it smaller than size.
+ */
+ len = strnlen(q, size);
+ /*
+ * If len equals size, we will copy only size bytes which leads to
+ * -E2BIG being returned.
+ * Otherwise we will copy len + 1 because of the final '\O'.
+ */
+ len = len == size ? size : len + 1;
+
+ /*
+ * Generate a runtime write overflow error if len is greater than
+ * p_size.
+ */
+ if (len > p_size)
+ fortify_panic(__func__);
+
+ /*
+ * We can now safely call vanilla strscpy because we are protected from:
+ * 1. Read overflow thanks to call to strnlen().
+ * 2. Write overflow thanks to above ifs.
+ */
+ return __real_strscpy(p, q, len);
+}
+
+/**
+ * strncat - Append a string to an existing string
+ *
+ * @p: pointer to NUL-terminated string to append to
+ * @q: pointer to source string to append from
+ * @count: Maximum bytes to read from @q
+ *
+ * Appends at most @count bytes from @q (stopping at the first
+ * NUL byte) after the NUL-terminated string at @p. @p will be
+ * NUL-terminated.
+ *
+ * Do not use this function. While FORTIFY_SOURCE tries to avoid
+ * read and write overflows, this is only possible when the sizes
+ * of @p and @q are known to the compiler. Prefer building the
+ * string with formatting, via scnprintf() or similar.
+ *
+ * Returns @p.
+ *
+ */
+/* Defined after fortified strlen() and strnlen() to reuse them. */
+__FORTIFY_INLINE __diagnose_as(__builtin_strncat, 1, 2, 3)
+char *strncat(char * const POS p, const char * const POS q, __kernel_size_t count)
+{
+ size_t p_len, copy_len;
+ size_t p_size = __member_size(p);
+ size_t q_size = __member_size(q);
+
+ if (p_size == SIZE_MAX && q_size == SIZE_MAX)
+ return __underlying_strncat(p, q, count);
+ p_len = strlen(p);
+ copy_len = strnlen(q, count);
+ if (p_size < p_len + copy_len + 1)
+ fortify_panic(__func__);
+ __underlying_memcpy(p + p_len, q, copy_len);
+ p[p_len + copy_len] = '\0';
+ return p;
+}
+
+__FORTIFY_INLINE void fortify_memset_chk(__kernel_size_t size,
+ const size_t p_size,
+ const size_t p_size_field)
+{
+ if (__builtin_constant_p(size)) {
+ /*
+ * Length argument is a constant expression, so we
+ * can perform compile-time bounds checking where
+ * buffer sizes are also known at compile time.
+ */
+
+ /* Error when size is larger than enclosing struct. */
+ if (__compiletime_lessthan(p_size_field, p_size) &&
+ __compiletime_lessthan(p_size, size))
+ __write_overflow();
+
+ /* Warn when write size is larger than dest field. */
+ if (__compiletime_lessthan(p_size_field, size))
+ __write_overflow_field(p_size_field, size);
+ }
+ /*
+ * At this point, length argument may not be a constant expression,
+ * so run-time bounds checking can be done where buffer sizes are
+ * known. (This is not an "else" because the above checks may only
+ * be compile-time warnings, and we want to still warn for run-time
+ * overflows.)
+ */
+
+ /*
+ * Always stop accesses beyond the struct that contains the
+ * field, when the buffer's remaining size is known.
+ * (The SIZE_MAX test is to optimize away checks where the buffer
+ * lengths are unknown.)
+ */
+ if (p_size != SIZE_MAX && p_size < size)
+ fortify_panic("memset");
+}
+
+#define __fortify_memset_chk(p, c, size, p_size, p_size_field) ({ \
+ size_t __fortify_size = (size_t)(size); \
+ fortify_memset_chk(__fortify_size, p_size, p_size_field), \
+ __underlying_memset(p, c, __fortify_size); \
+})
+
+/*
+ * __struct_size() vs __member_size() must be captured here to avoid
+ * evaluating argument side-effects further into the macro layers.
+ */
+#ifndef CONFIG_KMSAN
+#define memset(p, c, s) __fortify_memset_chk(p, c, s, \
+ __struct_size(p), __member_size(p))
+#endif
+
+/*
+ * To make sure the compiler can enforce protection against buffer overflows,
+ * memcpy(), memmove(), and memset() must not be used beyond individual
+ * struct members. If you need to copy across multiple members, please use
+ * struct_group() to create a named mirror of an anonymous struct union.
+ * (e.g. see struct sk_buff.) Read overflow checking is currently only
+ * done when a write overflow is also present, or when building with W=1.
+ *
+ * Mitigation coverage matrix
+ * Bounds checking at:
+ * +-------+-------+-------+-------+
+ * | Compile time | Run time |
+ * memcpy() argument sizes: | write | read | write | read |
+ * dest source length +-------+-------+-------+-------+
+ * memcpy(known, known, constant) | y | y | n/a | n/a |
+ * memcpy(known, unknown, constant) | y | n | n/a | V |
+ * memcpy(known, known, dynamic) | n | n | B | B |
+ * memcpy(known, unknown, dynamic) | n | n | B | V |
+ * memcpy(unknown, known, constant) | n | y | V | n/a |
+ * memcpy(unknown, unknown, constant) | n | n | V | V |
+ * memcpy(unknown, known, dynamic) | n | n | V | B |
+ * memcpy(unknown, unknown, dynamic) | n | n | V | V |
+ * +-------+-------+-------+-------+
+ *
+ * y = perform deterministic compile-time bounds checking
+ * n = cannot perform deterministic compile-time bounds checking
+ * n/a = no run-time bounds checking needed since compile-time deterministic
+ * B = can perform run-time bounds checking (currently unimplemented)
+ * V = vulnerable to run-time overflow (will need refactoring to solve)
+ *
+ */
+__FORTIFY_INLINE bool fortify_memcpy_chk(__kernel_size_t size,
+ const size_t p_size,
+ const size_t q_size,
+ const size_t p_size_field,
+ const size_t q_size_field,
+ const char *func)
+{
+ if (__builtin_constant_p(size)) {
+ /*
+ * Length argument is a constant expression, so we
+ * can perform compile-time bounds checking where
+ * buffer sizes are also known at compile time.
+ */
+
+ /* Error when size is larger than enclosing struct. */
+ if (__compiletime_lessthan(p_size_field, p_size) &&
+ __compiletime_lessthan(p_size, size))
+ __write_overflow();
+ if (__compiletime_lessthan(q_size_field, q_size) &&
+ __compiletime_lessthan(q_size, size))
+ __read_overflow2();
+
+ /* Warn when write size argument larger than dest field. */
+ if (__compiletime_lessthan(p_size_field, size))
+ __write_overflow_field(p_size_field, size);
+ /*
+ * Warn for source field over-read when building with W=1
+ * or when an over-write happened, so both can be fixed at
+ * the same time.
+ */
+ if ((IS_ENABLED(KBUILD_EXTRA_WARN1) ||
+ __compiletime_lessthan(p_size_field, size)) &&
+ __compiletime_lessthan(q_size_field, size))
+ __read_overflow2_field(q_size_field, size);
+ }
+ /*
+ * At this point, length argument may not be a constant expression,
+ * so run-time bounds checking can be done where buffer sizes are
+ * known. (This is not an "else" because the above checks may only
+ * be compile-time warnings, and we want to still warn for run-time
+ * overflows.)
+ */
+
+ /*
+ * Always stop accesses beyond the struct that contains the
+ * field, when the buffer's remaining size is known.
+ * (The SIZE_MAX test is to optimize away checks where the buffer
+ * lengths are unknown.)
+ */
+ if ((p_size != SIZE_MAX && p_size < size) ||
+ (q_size != SIZE_MAX && q_size < size))
+ fortify_panic(func);
+
+ /*
+ * Warn when writing beyond destination field size.
+ *
+ * We must ignore p_size_field == 0 for existing 0-element
+ * fake flexible arrays, until they are all converted to
+ * proper flexible arrays.
+ *
+ * The implementation of __builtin_*object_size() behaves
+ * like sizeof() when not directly referencing a flexible
+ * array member, which means there will be many bounds checks
+ * that will appear at run-time, without a way for them to be
+ * detected at compile-time (as can be done when the destination
+ * is specifically the flexible array member).
+ * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=101832
+ */
+ if (p_size_field != 0 && p_size_field != SIZE_MAX &&
+ p_size != p_size_field && p_size_field < size)
+ return true;
+
+ return false;
+}
+
+#define __fortify_memcpy_chk(p, q, size, p_size, q_size, \
+ p_size_field, q_size_field, op) ({ \
+ const size_t __fortify_size = (size_t)(size); \
+ const size_t __p_size = (p_size); \
+ const size_t __q_size = (q_size); \
+ const size_t __p_size_field = (p_size_field); \
+ const size_t __q_size_field = (q_size_field); \
+ WARN_ONCE(fortify_memcpy_chk(__fortify_size, __p_size, \
+ __q_size, __p_size_field, \
+ __q_size_field, #op), \
+ #op ": detected field-spanning write (size %zu) of single %s (size %zu)\n", \
+ __fortify_size, \
+ "field \"" #p "\" at " __FILE__ ":" __stringify(__LINE__), \
+ __p_size_field); \
+ __underlying_##op(p, q, __fortify_size); \
+})
+
+/*
+ * Notes about compile-time buffer size detection:
+ *
+ * With these types...
+ *
+ * struct middle {
+ * u16 a;
+ * u8 middle_buf[16];
+ * int b;
+ * };
+ * struct end {
+ * u16 a;
+ * u8 end_buf[16];
+ * };
+ * struct flex {
+ * int a;
+ * u8 flex_buf[];
+ * };
+ *
+ * void func(TYPE *ptr) { ... }
+ *
+ * Cases where destination size cannot be currently detected:
+ * - the size of ptr's object (seemingly by design, gcc & clang fail):
+ * __builtin_object_size(ptr, 1) == SIZE_MAX
+ * - the size of flexible arrays in ptr's obj (by design, dynamic size):
+ * __builtin_object_size(ptr->flex_buf, 1) == SIZE_MAX
+ * - the size of ANY array at the end of ptr's obj (gcc and clang bug):
+ * __builtin_object_size(ptr->end_buf, 1) == SIZE_MAX
+ * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=101836
+ *
+ * Cases where destination size is currently detected:
+ * - the size of non-array members within ptr's object:
+ * __builtin_object_size(ptr->a, 1) == 2
+ * - the size of non-flexible-array in the middle of ptr's obj:
+ * __builtin_object_size(ptr->middle_buf, 1) == 16
+ *
+ */
+
+/*
+ * __struct_size() vs __member_size() must be captured here to avoid
+ * evaluating argument side-effects further into the macro layers.
+ */
+#define memcpy(p, q, s) __fortify_memcpy_chk(p, q, s, \
+ __struct_size(p), __struct_size(q), \
+ __member_size(p), __member_size(q), \
+ memcpy)
+#define memmove(p, q, s) __fortify_memcpy_chk(p, q, s, \
+ __struct_size(p), __struct_size(q), \
+ __member_size(p), __member_size(q), \
+ memmove)
+
+extern void *__real_memscan(void *, int, __kernel_size_t) __RENAME(memscan);
+__FORTIFY_INLINE void *memscan(void * const POS0 p, int c, __kernel_size_t size)
+{
+ size_t p_size = __struct_size(p);
+
+ if (__compiletime_lessthan(p_size, size))
+ __read_overflow();
+ if (p_size < size)
+ fortify_panic(__func__);
+ return __real_memscan(p, c, size);
+}
+
+__FORTIFY_INLINE __diagnose_as(__builtin_memcmp, 1, 2, 3)
+int memcmp(const void * const POS0 p, const void * const POS0 q, __kernel_size_t size)
+{
+ size_t p_size = __struct_size(p);
+ size_t q_size = __struct_size(q);
+
+ if (__builtin_constant_p(size)) {
+ if (__compiletime_lessthan(p_size, size))
+ __read_overflow();
+ if (__compiletime_lessthan(q_size, size))
+ __read_overflow2();
+ }
+ if (p_size < size || q_size < size)
+ fortify_panic(__func__);
+ return __underlying_memcmp(p, q, size);
+}
+
+__FORTIFY_INLINE __diagnose_as(__builtin_memchr, 1, 2, 3)
+void *memchr(const void * const POS0 p, int c, __kernel_size_t size)
+{
+ size_t p_size = __struct_size(p);
+
+ if (__compiletime_lessthan(p_size, size))
+ __read_overflow();
+ if (p_size < size)
+ fortify_panic(__func__);
+ return __underlying_memchr(p, c, size);
+}
+
+void *__real_memchr_inv(const void *s, int c, size_t n) __RENAME(memchr_inv);
+__FORTIFY_INLINE void *memchr_inv(const void * const POS0 p, int c, size_t size)
+{
+ size_t p_size = __struct_size(p);
+
+ if (__compiletime_lessthan(p_size, size))
+ __read_overflow();
+ if (p_size < size)
+ fortify_panic(__func__);
+ return __real_memchr_inv(p, c, size);
+}
+
+extern void *__real_kmemdup(const void *src, size_t len, gfp_t gfp) __RENAME(kmemdup)
+ __realloc_size(2);
+__FORTIFY_INLINE void *kmemdup(const void * const POS0 p, size_t size, gfp_t gfp)
+{
+ size_t p_size = __struct_size(p);
+
+ if (__compiletime_lessthan(p_size, size))
+ __read_overflow();
+ if (p_size < size)
+ fortify_panic(__func__);
+ return __real_kmemdup(p, size, gfp);
+}
+
+/**
+ * strcpy - Copy a string into another string buffer
+ *
+ * @p: pointer to destination of copy
+ * @q: pointer to NUL-terminated source string to copy
+ *
+ * Do not use this function. While FORTIFY_SOURCE tries to avoid
+ * overflows, this is only possible when the sizes of @q and @p are
+ * known to the compiler. Prefer strscpy(), though note its different
+ * return values for detecting truncation.
+ *
+ * Returns @p.
+ *
+ */
+/* Defined after fortified strlen to reuse it. */
+__FORTIFY_INLINE __diagnose_as(__builtin_strcpy, 1, 2)
+char *strcpy(char * const POS p, const char * const POS q)
+{
+ size_t p_size = __member_size(p);
+ size_t q_size = __member_size(q);
+ size_t size;
+
+ /* If neither buffer size is known, immediately give up. */
+ if (__builtin_constant_p(p_size) &&
+ __builtin_constant_p(q_size) &&
+ p_size == SIZE_MAX && q_size == SIZE_MAX)
+ return __underlying_strcpy(p, q);
+ size = strlen(q) + 1;
+ /* Compile-time check for const size overflow. */
+ if (__compiletime_lessthan(p_size, size))
+ __write_overflow();
+ /* Run-time check for dynamic size overflow. */
+ if (p_size < size)
+ fortify_panic(__func__);
+ __underlying_memcpy(p, q, size);
+ return p;
+}
+
+/* Don't use these outside the FORITFY_SOURCE implementation */
+#undef __underlying_memchr
+#undef __underlying_memcmp
+#undef __underlying_strcat
+#undef __underlying_strcpy
+#undef __underlying_strlen
+#undef __underlying_strncat
+#undef __underlying_strncpy
+
+#undef POS
+#undef POS0
+
+#endif /* _LINUX_FORTIFY_STRING_H_ */
diff --git a/include/linux/fpga/adi-axi-common.h b/include/linux/fpga/adi-axi-common.h
new file mode 100644
index 000000000000..141ac3f251e6
--- /dev/null
+++ b/include/linux/fpga/adi-axi-common.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Analog Devices AXI common registers & definitions
+ *
+ * Copyright 2019 Analog Devices Inc.
+ *
+ * https://wiki.analog.com/resources/fpga/docs/axi_ip
+ * https://wiki.analog.com/resources/fpga/docs/hdl/regmap
+ */
+
+#ifndef ADI_AXI_COMMON_H_
+#define ADI_AXI_COMMON_H_
+
+#define ADI_AXI_REG_VERSION 0x0000
+
+#define ADI_AXI_PCORE_VER(major, minor, patch) \
+ (((major) << 16) | ((minor) << 8) | (patch))
+
+#define ADI_AXI_PCORE_VER_MAJOR(version) (((version) >> 16) & 0xff)
+#define ADI_AXI_PCORE_VER_MINOR(version) (((version) >> 8) & 0xff)
+#define ADI_AXI_PCORE_VER_PATCH(version) ((version) & 0xff)
+
+#endif /* ADI_AXI_COMMON_H_ */
diff --git a/include/linux/fpga/altera-pr-ip-core.h b/include/linux/fpga/altera-pr-ip-core.h
index 3810a9033f49..a6b4c07858cc 100644
--- a/include/linux/fpga/altera-pr-ip-core.h
+++ b/include/linux/fpga/altera-pr-ip-core.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Driver for Altera Partial Reconfiguration IP Core
*
@@ -5,18 +6,6 @@
*
* Based on socfpga-a10.c Copyright (C) 2015-2016 Altera Corporation
* by Alan Tull <atull@opensource.altera.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef _ALT_PR_IP_CORE_H
@@ -24,6 +13,5 @@
#include <linux/io.h>
int alt_pr_register(struct device *dev, void __iomem *reg_base);
-int alt_pr_unregister(struct device *dev);
#endif /* _ALT_PR_IP_CORE_H */
diff --git a/include/linux/fpga/fpga-bridge.h b/include/linux/fpga/fpga-bridge.h
index dba6e3c697c7..223da48a6d18 100644
--- a/include/linux/fpga/fpga-bridge.h
+++ b/include/linux/fpga/fpga-bridge.h
@@ -1,21 +1,42 @@
-#include <linux/device.h>
-#include <linux/fpga/fpga-mgr.h>
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_FPGA_BRIDGE_H
#define _LINUX_FPGA_BRIDGE_H
+#include <linux/device.h>
+#include <linux/fpga/fpga-mgr.h>
+
struct fpga_bridge;
/**
* struct fpga_bridge_ops - ops for low level FPGA bridge drivers
* @enable_show: returns the FPGA bridge's status
- * @enable_set: set a FPGA bridge as enabled or disabled
+ * @enable_set: set an FPGA bridge as enabled or disabled
* @fpga_bridge_remove: set FPGA into a specific state during driver remove
+ * @groups: optional attribute groups.
*/
struct fpga_bridge_ops {
int (*enable_show)(struct fpga_bridge *bridge);
int (*enable_set)(struct fpga_bridge *bridge, bool enable);
void (*fpga_bridge_remove)(struct fpga_bridge *bridge);
+ const struct attribute_group **groups;
+};
+
+/**
+ * struct fpga_bridge_info - collection of parameters an FPGA Bridge
+ * @name: fpga bridge name
+ * @br_ops: pointer to structure of fpga bridge ops
+ * @priv: fpga bridge private data
+ *
+ * fpga_bridge_info contains parameters for the register function. These
+ * are separated into an info structure because they some are optional
+ * others could be added to in the future. The info structure facilitates
+ * maintaining a stable API.
+ */
+struct fpga_bridge_info {
+ const char *name;
+ const struct fpga_bridge_ops *br_ops;
+ void *priv;
};
/**
@@ -42,6 +63,8 @@ struct fpga_bridge {
struct fpga_bridge *of_fpga_bridge_get(struct device_node *node,
struct fpga_image_info *info);
+struct fpga_bridge *fpga_bridge_get(struct device *dev,
+ struct fpga_image_info *info);
void fpga_bridge_put(struct fpga_bridge *bridge);
int fpga_bridge_enable(struct fpga_bridge *bridge);
int fpga_bridge_disable(struct fpga_bridge *bridge);
@@ -49,12 +72,17 @@ int fpga_bridge_disable(struct fpga_bridge *bridge);
int fpga_bridges_enable(struct list_head *bridge_list);
int fpga_bridges_disable(struct list_head *bridge_list);
void fpga_bridges_put(struct list_head *bridge_list);
-int fpga_bridge_get_to_list(struct device_node *np,
+int fpga_bridge_get_to_list(struct device *dev,
struct fpga_image_info *info,
struct list_head *bridge_list);
+int of_fpga_bridge_get_to_list(struct device_node *np,
+ struct fpga_image_info *info,
+ struct list_head *bridge_list);
-int fpga_bridge_register(struct device *dev, const char *name,
- const struct fpga_bridge_ops *br_ops, void *priv);
-void fpga_bridge_unregister(struct device *dev);
+struct fpga_bridge *
+fpga_bridge_register(struct device *parent, const char *name,
+ const struct fpga_bridge_ops *br_ops,
+ void *priv);
+void fpga_bridge_unregister(struct fpga_bridge *br);
#endif /* _LINUX_FPGA_BRIDGE_H */
diff --git a/include/linux/fpga/fpga-mgr.h b/include/linux/fpga/fpga-mgr.h
index b4ac24c4411d..54f63459efd6 100644
--- a/include/linux/fpga/fpga-mgr.h
+++ b/include/linux/fpga/fpga-mgr.h
@@ -1,26 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* FPGA Framework
*
- * Copyright (C) 2013-2015 Altera Corporation
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program. If not, see <http://www.gnu.org/licenses/>.
+ * Copyright (C) 2013-2016 Altera Corporation
+ * Copyright (C) 2017 Intel Corporation
*/
-#include <linux/mutex.h>
-#include <linux/platform_device.h>
-
#ifndef _LINUX_FPGA_MGR_H
#define _LINUX_FPGA_MGR_H
+#include <linux/mutex.h>
+#include <linux/platform_device.h>
+
struct fpga_manager;
struct sg_table;
@@ -32,6 +22,8 @@ struct sg_table;
* @FPGA_MGR_STATE_RESET: FPGA in reset state
* @FPGA_MGR_STATE_FIRMWARE_REQ: firmware request in progress
* @FPGA_MGR_STATE_FIRMWARE_REQ_ERR: firmware request failed
+ * @FPGA_MGR_STATE_PARSE_HEADER: parse FPGA image header
+ * @FPGA_MGR_STATE_PARSE_HEADER_ERR: Error during PARSE_HEADER stage
* @FPGA_MGR_STATE_WRITE_INIT: preparing FPGA for programming
* @FPGA_MGR_STATE_WRITE_INIT_ERR: Error during WRITE_INIT stage
* @FPGA_MGR_STATE_WRITE: writing image to FPGA
@@ -51,7 +43,9 @@ enum fpga_mgr_states {
FPGA_MGR_STATE_FIRMWARE_REQ,
FPGA_MGR_STATE_FIRMWARE_REQ_ERR,
- /* write sequence: init, write, complete */
+ /* write sequence: parse header, init, write, complete */
+ FPGA_MGR_STATE_PARSE_HEADER,
+ FPGA_MGR_STATE_PARSE_HEADER_ERR,
FPGA_MGR_STATE_WRITE_INIT,
FPGA_MGR_STATE_WRITE_INIT_ERR,
FPGA_MGR_STATE_WRITE,
@@ -63,39 +57,111 @@ enum fpga_mgr_states {
FPGA_MGR_STATE_OPERATING,
};
-/*
- * FPGA Manager flags
- * FPGA_MGR_PARTIAL_RECONFIG: do partial reconfiguration if supported
- * FPGA_MGR_EXTERNAL_CONFIG: FPGA has been configured prior to Linux booting
+/**
+ * DOC: FPGA Manager flags
+ *
+ * Flags used in the &fpga_image_info->flags field
+ *
+ * %FPGA_MGR_PARTIAL_RECONFIG: do partial reconfiguration if supported
+ *
+ * %FPGA_MGR_EXTERNAL_CONFIG: FPGA has been configured prior to Linux booting
+ *
+ * %FPGA_MGR_ENCRYPTED_BITSTREAM: indicates bitstream is encrypted
+ *
+ * %FPGA_MGR_BITSTREAM_LSB_FIRST: SPI bitstream bit order is LSB first
+ *
+ * %FPGA_MGR_COMPRESSED_BITSTREAM: FPGA bitstream is compressed
*/
#define FPGA_MGR_PARTIAL_RECONFIG BIT(0)
#define FPGA_MGR_EXTERNAL_CONFIG BIT(1)
#define FPGA_MGR_ENCRYPTED_BITSTREAM BIT(2)
+#define FPGA_MGR_BITSTREAM_LSB_FIRST BIT(3)
+#define FPGA_MGR_COMPRESSED_BITSTREAM BIT(4)
/**
- * struct fpga_image_info - information specific to a FPGA image
+ * struct fpga_image_info - information specific to an FPGA image
* @flags: boolean flags as defined above
* @enable_timeout_us: maximum time to enable traffic through bridge (uSec)
* @disable_timeout_us: maximum time to disable traffic through bridge (uSec)
* @config_complete_timeout_us: maximum time for FPGA to switch to operating
* status in the write_complete op.
+ * @firmware_name: name of FPGA image firmware file
+ * @sgt: scatter/gather table containing FPGA image
+ * @buf: contiguous buffer containing FPGA image
+ * @count: size of buf
+ * @header_size: size of image header.
+ * @data_size: size of image data to be sent to the device. If not specified,
+ * whole image will be used. Header may be skipped in either case.
+ * @region_id: id of target region
+ * @dev: device that owns this
+ * @overlay: Device Tree overlay
*/
struct fpga_image_info {
u32 flags;
u32 enable_timeout_us;
u32 disable_timeout_us;
u32 config_complete_timeout_us;
+ char *firmware_name;
+ struct sg_table *sgt;
+ const char *buf;
+ size_t count;
+ size_t header_size;
+ size_t data_size;
+ int region_id;
+ struct device *dev;
+#ifdef CONFIG_OF
+ struct device_node *overlay;
+#endif
+};
+
+/**
+ * struct fpga_compat_id - id for compatibility check
+ *
+ * @id_h: high 64bit of the compat_id
+ * @id_l: low 64bit of the compat_id
+ */
+struct fpga_compat_id {
+ u64 id_h;
+ u64 id_l;
+};
+
+/**
+ * struct fpga_manager_info - collection of parameters for an FPGA Manager
+ * @name: fpga manager name
+ * @compat_id: FPGA manager id for compatibility check.
+ * @mops: pointer to structure of fpga manager ops
+ * @priv: fpga manager private data
+ *
+ * fpga_manager_info contains parameters for the register_full function.
+ * These are separated into an info structure because they some are optional
+ * others could be added to in the future. The info structure facilitates
+ * maintaining a stable API.
+ */
+struct fpga_manager_info {
+ const char *name;
+ struct fpga_compat_id *compat_id;
+ const struct fpga_manager_ops *mops;
+ void *priv;
};
/**
* struct fpga_manager_ops - ops for low level fpga manager drivers
- * @initial_header_size: Maximum number of bytes that should be passed into write_init
+ * @initial_header_size: minimum number of bytes that should be passed into
+ * parse_header and write_init.
+ * @skip_header: bool flag to tell fpga-mgr core whether it should skip
+ * info->header_size part at the beginning of the image when invoking
+ * write callback.
* @state: returns an enum value of the FPGA's state
- * @write_init: prepare the FPGA to receive confuration data
+ * @status: returns status of the FPGA, including reconfiguration error code
+ * @parse_header: parse FPGA image header to set info->header_size and
+ * info->data_size. In case the input buffer is not large enough, set
+ * required size to info->header_size and return -EAGAIN.
+ * @write_init: prepare the FPGA to receive configuration data
* @write: write count bytes of configuration data to the FPGA
* @write_sg: write the scatter list of configuration data to the FPGA
* @write_complete: set FPGA to operating state after writing is done
* @fpga_remove: optional: Set FPGA into a specific state during driver remove
+ * @groups: optional attribute groups.
*
* fpga_manager_ops are the low level functions implemented by a specific
* fpga manager driver. The optional ones are tested for NULL before being
@@ -103,7 +169,12 @@ struct fpga_image_info {
*/
struct fpga_manager_ops {
size_t initial_header_size;
+ bool skip_header;
enum fpga_mgr_states (*state)(struct fpga_manager *mgr);
+ u64 (*status)(struct fpga_manager *mgr);
+ int (*parse_header)(struct fpga_manager *mgr,
+ struct fpga_image_info *info,
+ const char *buf, size_t count);
int (*write_init)(struct fpga_manager *mgr,
struct fpga_image_info *info,
const char *buf, size_t count);
@@ -112,14 +183,23 @@ struct fpga_manager_ops {
int (*write_complete)(struct fpga_manager *mgr,
struct fpga_image_info *info);
void (*fpga_remove)(struct fpga_manager *mgr);
+ const struct attribute_group **groups;
};
+/* FPGA manager status: Partial/Full Reconfiguration errors */
+#define FPGA_MGR_STATUS_OPERATION_ERR BIT(0)
+#define FPGA_MGR_STATUS_CRC_ERR BIT(1)
+#define FPGA_MGR_STATUS_INCOMPATIBLE_IMAGE_ERR BIT(2)
+#define FPGA_MGR_STATUS_IP_PROTOCOL_ERR BIT(3)
+#define FPGA_MGR_STATUS_FIFO_OVERFLOW_ERR BIT(4)
+
/**
* struct fpga_manager - fpga manager structure
* @name: name of low level fpga manager
* @dev: fpga manager device
* @ref_mutex: only allows one reference to fpga manager
* @state: state of fpga manager
+ * @compat_id: FPGA manager id for compatibility check.
* @mops: pointer to struct of fpga manager ops
* @priv: low level driver private date
*/
@@ -128,20 +208,21 @@ struct fpga_manager {
struct device dev;
struct mutex ref_mutex;
enum fpga_mgr_states state;
+ struct fpga_compat_id *compat_id;
const struct fpga_manager_ops *mops;
void *priv;
};
#define to_fpga_manager(d) container_of(d, struct fpga_manager, dev)
-int fpga_mgr_buf_load(struct fpga_manager *mgr, struct fpga_image_info *info,
- const char *buf, size_t count);
-int fpga_mgr_buf_load_sg(struct fpga_manager *mgr, struct fpga_image_info *info,
- struct sg_table *sgt);
+struct fpga_image_info *fpga_image_info_alloc(struct device *dev);
+
+void fpga_image_info_free(struct fpga_image_info *info);
-int fpga_mgr_firmware_load(struct fpga_manager *mgr,
- struct fpga_image_info *info,
- const char *image_name);
+int fpga_mgr_load(struct fpga_manager *mgr, struct fpga_image_info *info);
+
+int fpga_mgr_lock(struct fpga_manager *mgr);
+void fpga_mgr_unlock(struct fpga_manager *mgr);
struct fpga_manager *of_fpga_mgr_get(struct device_node *node);
@@ -149,9 +230,18 @@ struct fpga_manager *fpga_mgr_get(struct device *dev);
void fpga_mgr_put(struct fpga_manager *mgr);
-int fpga_mgr_register(struct device *dev, const char *name,
- const struct fpga_manager_ops *mops, void *priv);
+struct fpga_manager *
+fpga_mgr_register_full(struct device *parent, const struct fpga_manager_info *info);
+
+struct fpga_manager *
+fpga_mgr_register(struct device *parent, const char *name,
+ const struct fpga_manager_ops *mops, void *priv);
+void fpga_mgr_unregister(struct fpga_manager *mgr);
-void fpga_mgr_unregister(struct device *dev);
+struct fpga_manager *
+devm_fpga_mgr_register_full(struct device *parent, const struct fpga_manager_info *info);
+struct fpga_manager *
+devm_fpga_mgr_register(struct device *parent, const char *name,
+ const struct fpga_manager_ops *mops, void *priv);
#endif /*_LINUX_FPGA_MGR_H */
diff --git a/include/linux/fpga/fpga-region.h b/include/linux/fpga/fpga-region.h
new file mode 100644
index 000000000000..9d4d32909340
--- /dev/null
+++ b/include/linux/fpga/fpga-region.h
@@ -0,0 +1,69 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef _FPGA_REGION_H
+#define _FPGA_REGION_H
+
+#include <linux/device.h>
+#include <linux/fpga/fpga-mgr.h>
+#include <linux/fpga/fpga-bridge.h>
+
+struct fpga_region;
+
+/**
+ * struct fpga_region_info - collection of parameters an FPGA Region
+ * @mgr: fpga region manager
+ * @compat_id: FPGA region id for compatibility check.
+ * @priv: fpga region private data
+ * @get_bridges: optional function to get bridges to a list
+ *
+ * fpga_region_info contains parameters for the register_full function.
+ * These are separated into an info structure because they some are optional
+ * others could be added to in the future. The info structure facilitates
+ * maintaining a stable API.
+ */
+struct fpga_region_info {
+ struct fpga_manager *mgr;
+ struct fpga_compat_id *compat_id;
+ void *priv;
+ int (*get_bridges)(struct fpga_region *region);
+};
+
+/**
+ * struct fpga_region - FPGA Region structure
+ * @dev: FPGA Region device
+ * @mutex: enforces exclusive reference to region
+ * @bridge_list: list of FPGA bridges specified in region
+ * @mgr: FPGA manager
+ * @info: FPGA image info
+ * @compat_id: FPGA region id for compatibility check.
+ * @priv: private data
+ * @get_bridges: optional function to get bridges to a list
+ */
+struct fpga_region {
+ struct device dev;
+ struct mutex mutex; /* for exclusive reference to region */
+ struct list_head bridge_list;
+ struct fpga_manager *mgr;
+ struct fpga_image_info *info;
+ struct fpga_compat_id *compat_id;
+ void *priv;
+ int (*get_bridges)(struct fpga_region *region);
+};
+
+#define to_fpga_region(d) container_of(d, struct fpga_region, dev)
+
+struct fpga_region *
+fpga_region_class_find(struct device *start, const void *data,
+ int (*match)(struct device *, const void *));
+
+int fpga_region_program_fpga(struct fpga_region *region);
+
+struct fpga_region *
+fpga_region_register_full(struct device *parent, const struct fpga_region_info *info);
+
+struct fpga_region *
+fpga_region_register(struct device *parent, struct fpga_manager *mgr,
+ int (*get_bridges)(struct fpga_region *));
+void fpga_region_unregister(struct fpga_region *region);
+
+#endif /* _FPGA_REGION_H */
diff --git a/include/linux/fprobe.h b/include/linux/fprobe.h
new file mode 100644
index 000000000000..47fefc7f363b
--- /dev/null
+++ b/include/linux/fprobe.h
@@ -0,0 +1,111 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Simple ftrace probe wrapper */
+#ifndef _LINUX_FPROBE_H
+#define _LINUX_FPROBE_H
+
+#include <linux/compiler.h>
+#include <linux/ftrace.h>
+#include <linux/rethook.h>
+
+/**
+ * struct fprobe - ftrace based probe.
+ * @ops: The ftrace_ops.
+ * @nmissed: The counter for missing events.
+ * @flags: The status flag.
+ * @rethook: The rethook data structure. (internal data)
+ * @entry_data_size: The private data storage size.
+ * @nr_maxactive: The max number of active functions.
+ * @entry_handler: The callback function for function entry.
+ * @exit_handler: The callback function for function exit.
+ */
+struct fprobe {
+#ifdef CONFIG_FUNCTION_TRACER
+ /*
+ * If CONFIG_FUNCTION_TRACER is not set, CONFIG_FPROBE is disabled too.
+ * But user of fprobe may keep embedding the struct fprobe on their own
+ * code. To avoid build error, this will keep the fprobe data structure
+ * defined here, but remove ftrace_ops data structure.
+ */
+ struct ftrace_ops ops;
+#endif
+ unsigned long nmissed;
+ unsigned int flags;
+ struct rethook *rethook;
+ size_t entry_data_size;
+ int nr_maxactive;
+
+ int (*entry_handler)(struct fprobe *fp, unsigned long entry_ip,
+ struct pt_regs *regs, void *entry_data);
+ void (*exit_handler)(struct fprobe *fp, unsigned long entry_ip,
+ struct pt_regs *regs, void *entry_data);
+};
+
+/* This fprobe is soft-disabled. */
+#define FPROBE_FL_DISABLED 1
+
+/*
+ * This fprobe handler will be shared with kprobes.
+ * This flag must be set before registering.
+ */
+#define FPROBE_FL_KPROBE_SHARED 2
+
+static inline bool fprobe_disabled(struct fprobe *fp)
+{
+ return (fp) ? fp->flags & FPROBE_FL_DISABLED : false;
+}
+
+static inline bool fprobe_shared_with_kprobes(struct fprobe *fp)
+{
+ return (fp) ? fp->flags & FPROBE_FL_KPROBE_SHARED : false;
+}
+
+#ifdef CONFIG_FPROBE
+int register_fprobe(struct fprobe *fp, const char *filter, const char *notfilter);
+int register_fprobe_ips(struct fprobe *fp, unsigned long *addrs, int num);
+int register_fprobe_syms(struct fprobe *fp, const char **syms, int num);
+int unregister_fprobe(struct fprobe *fp);
+#else
+static inline int register_fprobe(struct fprobe *fp, const char *filter, const char *notfilter)
+{
+ return -EOPNOTSUPP;
+}
+static inline int register_fprobe_ips(struct fprobe *fp, unsigned long *addrs, int num)
+{
+ return -EOPNOTSUPP;
+}
+static inline int register_fprobe_syms(struct fprobe *fp, const char **syms, int num)
+{
+ return -EOPNOTSUPP;
+}
+static inline int unregister_fprobe(struct fprobe *fp)
+{
+ return -EOPNOTSUPP;
+}
+#endif
+
+/**
+ * disable_fprobe() - Disable fprobe
+ * @fp: The fprobe to be disabled.
+ *
+ * This will soft-disable @fp. Note that this doesn't remove the ftrace
+ * hooks from the function entry.
+ */
+static inline void disable_fprobe(struct fprobe *fp)
+{
+ if (fp)
+ fp->flags |= FPROBE_FL_DISABLED;
+}
+
+/**
+ * enable_fprobe() - Enable fprobe
+ * @fp: The fprobe to be enabled.
+ *
+ * This will soft-enable @fp.
+ */
+static inline void enable_fprobe(struct fprobe *fp)
+{
+ if (fp)
+ fp->flags &= ~FPROBE_FL_DISABLED;
+}
+
+#endif
diff --git a/include/linux/frame.h b/include/linux/frame.h
deleted file mode 100644
index d772c61c31da..000000000000
--- a/include/linux/frame.h
+++ /dev/null
@@ -1,23 +0,0 @@
-#ifndef _LINUX_FRAME_H
-#define _LINUX_FRAME_H
-
-#ifdef CONFIG_STACK_VALIDATION
-/*
- * This macro marks the given function's stack frame as "non-standard", which
- * tells objtool to ignore the function when doing stack metadata validation.
- * It should only be used in special cases where you're 100% sure it won't
- * affect the reliability of frame pointers and kernel stack traces.
- *
- * For more information, see tools/objtool/Documentation/stack-validation.txt.
- */
-#define STACK_FRAME_NON_STANDARD(func) \
- static void __used __section(.discard.func_stack_frame_non_standard) \
- *__func_stack_frame_non_standard_##func = func
-
-#else /* !CONFIG_STACK_VALIDATION */
-
-#define STACK_FRAME_NON_STANDARD(func)
-
-#endif /* CONFIG_STACK_VALIDATION */
-
-#endif /* _LINUX_FRAME_H */
diff --git a/include/linux/freelist.h b/include/linux/freelist.h
new file mode 100644
index 000000000000..fc1842b96469
--- /dev/null
+++ b/include/linux/freelist.h
@@ -0,0 +1,129 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause */
+#ifndef FREELIST_H
+#define FREELIST_H
+
+#include <linux/atomic.h>
+
+/*
+ * Copyright: cameron@moodycamel.com
+ *
+ * A simple CAS-based lock-free free list. Not the fastest thing in the world
+ * under heavy contention, but simple and correct (assuming nodes are never
+ * freed until after the free list is destroyed), and fairly speedy under low
+ * contention.
+ *
+ * Adapted from: https://moodycamel.com/blog/2014/solving-the-aba-problem-for-lock-free-free-lists
+ */
+
+struct freelist_node {
+ atomic_t refs;
+ struct freelist_node *next;
+};
+
+struct freelist_head {
+ struct freelist_node *head;
+};
+
+#define REFS_ON_FREELIST 0x80000000
+#define REFS_MASK 0x7FFFFFFF
+
+static inline void __freelist_add(struct freelist_node *node, struct freelist_head *list)
+{
+ /*
+ * Since the refcount is zero, and nobody can increase it once it's
+ * zero (except us, and we run only one copy of this method per node at
+ * a time, i.e. the single thread case), then we know we can safely
+ * change the next pointer of the node; however, once the refcount is
+ * back above zero, then other threads could increase it (happens under
+ * heavy contention, when the refcount goes to zero in between a load
+ * and a refcount increment of a node in try_get, then back up to
+ * something non-zero, then the refcount increment is done by the other
+ * thread) -- so if the CAS to add the node to the actual list fails,
+ * decrese the refcount and leave the add operation to the next thread
+ * who puts the refcount back to zero (which could be us, hence the
+ * loop).
+ */
+ struct freelist_node *head = READ_ONCE(list->head);
+
+ for (;;) {
+ WRITE_ONCE(node->next, head);
+ atomic_set_release(&node->refs, 1);
+
+ if (!try_cmpxchg_release(&list->head, &head, node)) {
+ /*
+ * Hmm, the add failed, but we can only try again when
+ * the refcount goes back to zero.
+ */
+ if (atomic_fetch_add_release(REFS_ON_FREELIST - 1, &node->refs) == 1)
+ continue;
+ }
+ return;
+ }
+}
+
+static inline void freelist_add(struct freelist_node *node, struct freelist_head *list)
+{
+ /*
+ * We know that the should-be-on-freelist bit is 0 at this point, so
+ * it's safe to set it using a fetch_add.
+ */
+ if (!atomic_fetch_add_release(REFS_ON_FREELIST, &node->refs)) {
+ /*
+ * Oh look! We were the last ones referencing this node, and we
+ * know we want to add it to the free list, so let's do it!
+ */
+ __freelist_add(node, list);
+ }
+}
+
+static inline struct freelist_node *freelist_try_get(struct freelist_head *list)
+{
+ struct freelist_node *prev, *next, *head = smp_load_acquire(&list->head);
+ unsigned int refs;
+
+ while (head) {
+ prev = head;
+ refs = atomic_read(&head->refs);
+ if ((refs & REFS_MASK) == 0 ||
+ !atomic_try_cmpxchg_acquire(&head->refs, &refs, refs+1)) {
+ head = smp_load_acquire(&list->head);
+ continue;
+ }
+
+ /*
+ * Good, reference count has been incremented (it wasn't at
+ * zero), which means we can read the next and not worry about
+ * it changing between now and the time we do the CAS.
+ */
+ next = READ_ONCE(head->next);
+ if (try_cmpxchg_acquire(&list->head, &head, next)) {
+ /*
+ * Yay, got the node. This means it was on the list,
+ * which means should-be-on-freelist must be false no
+ * matter the refcount (because nobody else knows it's
+ * been taken off yet, it can't have been put back on).
+ */
+ WARN_ON_ONCE(atomic_read(&head->refs) & REFS_ON_FREELIST);
+
+ /*
+ * Decrease refcount twice, once for our ref, and once
+ * for the list's ref.
+ */
+ atomic_fetch_add(-2, &head->refs);
+
+ return head;
+ }
+
+ /*
+ * OK, the head must have changed on us, but we still need to decrement
+ * the refcount we increased.
+ */
+ refs = atomic_fetch_add(-1, &prev->refs);
+ if (refs == REFS_ON_FREELIST + 1)
+ __freelist_add(prev, list);
+ }
+
+ return NULL;
+}
+
+#endif /* FREELIST_H */
diff --git a/include/linux/freezer.h b/include/linux/freezer.h
index dd03e837ebb7..b303472255be 100644
--- a/include/linux/freezer.h
+++ b/include/linux/freezer.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/* Freezer declarations */
#ifndef FREEZER_H_INCLUDED
@@ -7,9 +8,11 @@
#include <linux/sched.h>
#include <linux/wait.h>
#include <linux/atomic.h>
+#include <linux/jump_label.h>
#ifdef CONFIG_FREEZER
-extern atomic_t system_freezing_cnt; /* nr of freezing conds in effect */
+DECLARE_STATIC_KEY_FALSE(freezer_active);
+
extern bool pm_freezing; /* PM freezing in effect */
extern bool pm_nosig_freezing; /* PM nosig freezing in effect */
@@ -21,10 +24,7 @@ extern unsigned int freeze_timeout_msecs;
/*
* Check if a process has been frozen
*/
-static inline bool frozen(struct task_struct *p)
-{
- return p->flags & PF_FROZEN;
-}
+extern bool frozen(struct task_struct *p);
extern bool freezing_slow_path(struct task_struct *p);
@@ -33,9 +33,10 @@ extern bool freezing_slow_path(struct task_struct *p);
*/
static inline bool freezing(struct task_struct *p)
{
- if (likely(!atomic_read(&system_freezing_cnt)))
- return false;
- return freezing_slow_path(p);
+ if (static_branch_unlikely(&freezer_active))
+ return freezing_slow_path(p);
+
+ return false;
}
/* Takes and releases task alloc lock using task_lock() */
@@ -47,23 +48,14 @@ extern int freeze_kernel_threads(void);
extern void thaw_processes(void);
extern void thaw_kernel_threads(void);
-/*
- * DO NOT ADD ANY NEW CALLERS OF THIS FUNCTION
- * If try_to_freeze causes a lockdep warning it means the caller may deadlock
- */
-static inline bool try_to_freeze_unsafe(void)
+static inline bool try_to_freeze(void)
{
might_sleep();
if (likely(!freezing(current)))
return false;
- return __refrigerator(false);
-}
-
-static inline bool try_to_freeze(void)
-{
if (!(current->flags & PF_NOFREEZE))
debug_check_no_locks_held();
- return try_to_freeze_unsafe();
+ return __refrigerator(false);
}
extern bool freeze_task(struct task_struct *p);
@@ -78,184 +70,6 @@ static inline bool cgroup_freezing(struct task_struct *task)
}
#endif /* !CONFIG_CGROUP_FREEZER */
-/*
- * The PF_FREEZER_SKIP flag should be set by a vfork parent right before it
- * calls wait_for_completion(&vfork) and reset right after it returns from this
- * function. Next, the parent should call try_to_freeze() to freeze itself
- * appropriately in case the child has exited before the freezing of tasks is
- * complete. However, we don't want kernel threads to be frozen in unexpected
- * places, so we allow them to block freeze_processes() instead or to set
- * PF_NOFREEZE if needed. Fortunately, in the ____call_usermodehelper() case the
- * parent won't really block freeze_processes(), since ____call_usermodehelper()
- * (the child) does a little before exec/exit and it can't be frozen before
- * waking up the parent.
- */
-
-
-/**
- * freezer_do_not_count - tell freezer to ignore %current
- *
- * Tell freezers to ignore the current task when determining whether the
- * target frozen state is reached. IOW, the current task will be
- * considered frozen enough by freezers.
- *
- * The caller shouldn't do anything which isn't allowed for a frozen task
- * until freezer_cont() is called. Usually, freezer[_do_not]_count() pair
- * wrap a scheduling operation and nothing much else.
- */
-static inline void freezer_do_not_count(void)
-{
- current->flags |= PF_FREEZER_SKIP;
-}
-
-/**
- * freezer_count - tell freezer to stop ignoring %current
- *
- * Undo freezer_do_not_count(). It tells freezers that %current should be
- * considered again and tries to freeze if freezing condition is already in
- * effect.
- */
-static inline void freezer_count(void)
-{
- current->flags &= ~PF_FREEZER_SKIP;
- /*
- * If freezing is in progress, the following paired with smp_mb()
- * in freezer_should_skip() ensures that either we see %true
- * freezing() or freezer_should_skip() sees !PF_FREEZER_SKIP.
- */
- smp_mb();
- try_to_freeze();
-}
-
-/* DO NOT ADD ANY NEW CALLERS OF THIS FUNCTION */
-static inline void freezer_count_unsafe(void)
-{
- current->flags &= ~PF_FREEZER_SKIP;
- smp_mb();
- try_to_freeze_unsafe();
-}
-
-/**
- * freezer_should_skip - whether to skip a task when determining frozen
- * state is reached
- * @p: task in quesion
- *
- * This function is used by freezers after establishing %true freezing() to
- * test whether a task should be skipped when determining the target frozen
- * state is reached. IOW, if this function returns %true, @p is considered
- * frozen enough.
- */
-static inline bool freezer_should_skip(struct task_struct *p)
-{
- /*
- * The following smp_mb() paired with the one in freezer_count()
- * ensures that either freezer_count() sees %true freezing() or we
- * see cleared %PF_FREEZER_SKIP and return %false. This makes it
- * impossible for a task to slip frozen state testing after
- * clearing %PF_FREEZER_SKIP.
- */
- smp_mb();
- return p->flags & PF_FREEZER_SKIP;
-}
-
-/*
- * These functions are intended to be used whenever you want allow a sleeping
- * task to be frozen. Note that neither return any clear indication of
- * whether a freeze event happened while in this function.
- */
-
-/* Like schedule(), but should not block the freezer. */
-static inline void freezable_schedule(void)
-{
- freezer_do_not_count();
- schedule();
- freezer_count();
-}
-
-/* DO NOT ADD ANY NEW CALLERS OF THIS FUNCTION */
-static inline void freezable_schedule_unsafe(void)
-{
- freezer_do_not_count();
- schedule();
- freezer_count_unsafe();
-}
-
-/*
- * Like freezable_schedule_timeout(), but should not block the freezer. Do not
- * call this with locks held.
- */
-static inline long freezable_schedule_timeout(long timeout)
-{
- long __retval;
- freezer_do_not_count();
- __retval = schedule_timeout(timeout);
- freezer_count();
- return __retval;
-}
-
-/*
- * Like schedule_timeout_interruptible(), but should not block the freezer. Do not
- * call this with locks held.
- */
-static inline long freezable_schedule_timeout_interruptible(long timeout)
-{
- long __retval;
- freezer_do_not_count();
- __retval = schedule_timeout_interruptible(timeout);
- freezer_count();
- return __retval;
-}
-
-/* Like schedule_timeout_killable(), but should not block the freezer. */
-static inline long freezable_schedule_timeout_killable(long timeout)
-{
- long __retval;
- freezer_do_not_count();
- __retval = schedule_timeout_killable(timeout);
- freezer_count();
- return __retval;
-}
-
-/* DO NOT ADD ANY NEW CALLERS OF THIS FUNCTION */
-static inline long freezable_schedule_timeout_killable_unsafe(long timeout)
-{
- long __retval;
- freezer_do_not_count();
- __retval = schedule_timeout_killable(timeout);
- freezer_count_unsafe();
- return __retval;
-}
-
-/*
- * Like schedule_hrtimeout_range(), but should not block the freezer. Do not
- * call this with locks held.
- */
-static inline int freezable_schedule_hrtimeout_range(ktime_t *expires,
- u64 delta, const enum hrtimer_mode mode)
-{
- int __retval;
- freezer_do_not_count();
- __retval = schedule_hrtimeout_range(expires, delta, mode);
- freezer_count();
- return __retval;
-}
-
-/*
- * Freezer-friendly wrappers around wait_event_interruptible(),
- * wait_event_killable() and wait_event_interruptible_timeout(), originally
- * defined in <linux/wait.h>
- */
-
-/* DO NOT ADD ANY NEW CALLERS OF THIS FUNCTION */
-#define wait_event_freezekillable_unsafe(wq, condition) \
-({ \
- int __retval; \
- freezer_do_not_count(); \
- __retval = wait_event_killable(wq, (condition)); \
- freezer_count_unsafe(); \
- __retval; \
-})
-
#else /* !CONFIG_FREEZER */
static inline bool frozen(struct task_struct *p) { return false; }
static inline bool freezing(struct task_struct *p) { return false; }
@@ -267,35 +81,10 @@ static inline int freeze_kernel_threads(void) { return -ENOSYS; }
static inline void thaw_processes(void) {}
static inline void thaw_kernel_threads(void) {}
-static inline bool try_to_freeze_nowarn(void) { return false; }
static inline bool try_to_freeze(void) { return false; }
-static inline void freezer_do_not_count(void) {}
-static inline void freezer_count(void) {}
-static inline int freezer_should_skip(struct task_struct *p) { return 0; }
static inline void set_freezable(void) {}
-#define freezable_schedule() schedule()
-
-#define freezable_schedule_unsafe() schedule()
-
-#define freezable_schedule_timeout(timeout) schedule_timeout(timeout)
-
-#define freezable_schedule_timeout_interruptible(timeout) \
- schedule_timeout_interruptible(timeout)
-
-#define freezable_schedule_timeout_killable(timeout) \
- schedule_timeout_killable(timeout)
-
-#define freezable_schedule_timeout_killable_unsafe(timeout) \
- schedule_timeout_killable(timeout)
-
-#define freezable_schedule_hrtimeout_range(expires, delta, mode) \
- schedule_hrtimeout_range(expires, delta, mode)
-
-#define wait_event_freezekillable_unsafe(wq, condition) \
- wait_event_killable(wq, condition)
-
#endif /* !CONFIG_FREEZER */
#endif /* FREEZER_H_INCLUDED */
diff --git a/include/linux/frontswap.h b/include/linux/frontswap.h
index 1d18af034554..a631bac12220 100644
--- a/include/linux/frontswap.h
+++ b/include/linux/frontswap.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_FRONTSWAP_H
#define _LINUX_FRONTSWAP_H
@@ -12,18 +13,11 @@ struct frontswap_ops {
int (*load)(unsigned, pgoff_t, struct page *); /* load a page */
void (*invalidate_page)(unsigned, pgoff_t); /* page no longer needed */
void (*invalidate_area)(unsigned); /* swap type just swapoff'ed */
- struct frontswap_ops *next; /* private pointer to next ops */
};
-extern void frontswap_register_ops(struct frontswap_ops *ops);
-extern void frontswap_shrink(unsigned long);
-extern unsigned long frontswap_curr_pages(void);
-extern void frontswap_writethrough(bool);
-#define FRONTSWAP_HAS_EXCLUSIVE_GETS
-extern void frontswap_tmem_exclusive_gets(bool);
+int frontswap_register_ops(const struct frontswap_ops *ops);
-extern bool __frontswap_test(struct swap_info_struct *, pgoff_t);
-extern void __frontswap_init(unsigned type, unsigned long *map);
+extern void frontswap_init(unsigned type, unsigned long *map);
extern int __frontswap_store(struct page *page);
extern int __frontswap_load(struct page *page);
extern void __frontswap_invalidate_page(unsigned, pgoff_t);
@@ -37,11 +31,6 @@ static inline bool frontswap_enabled(void)
return static_branch_unlikely(&frontswap_enabled_key);
}
-static inline bool frontswap_test(struct swap_info_struct *sis, pgoff_t offset)
-{
- return __frontswap_test(sis, offset);
-}
-
static inline void frontswap_map_set(struct swap_info_struct *p,
unsigned long *map)
{
@@ -60,11 +49,6 @@ static inline bool frontswap_enabled(void)
return false;
}
-static inline bool frontswap_test(struct swap_info_struct *sis, pgoff_t offset)
-{
- return false;
-}
-
static inline void frontswap_map_set(struct swap_info_struct *p,
unsigned long *map)
{
@@ -104,11 +88,4 @@ static inline void frontswap_invalidate_area(unsigned type)
__frontswap_invalidate_area(type);
}
-static inline void frontswap_init(unsigned type, unsigned long *map)
-{
-#ifdef CONFIG_FRONTSWAP
- __frontswap_init(type, map);
-#endif
-}
-
#endif /* _LINUX_FRONTSWAP_H */
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 7b5d6816542b..21a981680856 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_FS_H
#define _LINUX_FS_H
@@ -12,6 +13,7 @@
#include <linux/list_lru.h>
#include <linux/llist.h>
#include <linux/radix-tree.h>
+#include <linux/xarray.h>
#include <linux/rbtree.h>
#include <linux/init.h>
#include <linux/pid.h>
@@ -22,7 +24,6 @@
#include <linux/capability.h>
#include <linux/semaphore.h>
#include <linux/fcntl.h>
-#include <linux/fiemap.h>
#include <linux/rculist_bl.h>
#include <linux/atomic.h>
#include <linux/shrinker.h>
@@ -34,6 +35,14 @@
#include <linux/delayed_call.h>
#include <linux/uuid.h>
#include <linux/errseq.h>
+#include <linux/ioprio.h>
+#include <linux/fs_types.h>
+#include <linux/build_bug.h>
+#include <linux/stddef.h>
+#include <linux/mount.h>
+#include <linux/cred.h>
+#include <linux/mnt_idmapping.h>
+#include <linux/slab.h>
#include <asm/byteorder.h>
#include <uapi/linux/fs.h>
@@ -41,7 +50,9 @@
struct backing_dev_info;
struct bdi_writeback;
struct bio;
+struct io_comp_batch;
struct export_operations;
+struct fiemap_extent_info;
struct hd_geometry;
struct iovec;
struct kiocb;
@@ -58,19 +69,22 @@ struct workqueue_struct;
struct iov_iter;
struct fscrypt_info;
struct fscrypt_operations;
+struct fsverity_info;
+struct fsverity_operations;
+struct fs_context;
+struct fs_parameter_spec;
+struct fileattr;
+struct iomap_ops;
extern void __init inode_init(void);
extern void __init inode_init_early(void);
extern void __init files_init(void);
extern void __init files_maxfiles_init(void);
-extern struct files_stat_struct files_stat;
extern unsigned long get_max_files(void);
extern unsigned int sysctl_nr_open;
-extern struct inodes_stat_t inodes_stat;
-extern int leases_enable, lease_break_time;
-extern int sysctl_protected_symlinks;
-extern int sysctl_protected_hardlinks;
+
+typedef __kernel_rwf_t rwf_t;
struct buffer_head;
typedef int (get_block_t)(struct inode *inode, sector_t iblock,
@@ -90,7 +104,7 @@ typedef int (dio_iodone_t)(struct kiocb *iocb, loff_t offset,
/*
* flags in file.f_mode. Note that FMODE_READ and FMODE_WRITE must correspond
- * to O_WRONLY and O_RDWR via the strange trick in __dentry_open()
+ * to O_WRONLY and O_RDWR via the strange trick in do_dentry_open()
*/
/* file is open for reading */
@@ -128,7 +142,7 @@ typedef int (dio_iodone_t)(struct kiocb *iocb, loff_t offset,
/* Expect random access pattern */
#define FMODE_RANDOM ((__force fmode_t)0x1000)
-/* File is huge (eg. /dev/kmem): treat loff_t as unsigned */
+/* File is huge (eg. /dev/mem): treat loff_t as unsigned */
#define FMODE_UNSIGNED_OFFSET ((__force fmode_t)0x2000)
/* File is opened with O_PATH; almost nothing can be done with it */
@@ -143,19 +157,37 @@ typedef int (dio_iodone_t)(struct kiocb *iocb, loff_t offset,
/* Has write method(s) */
#define FMODE_CAN_WRITE ((__force fmode_t)0x40000)
+#define FMODE_OPENED ((__force fmode_t)0x80000)
+#define FMODE_CREATED ((__force fmode_t)0x100000)
+
+/* File is stream-like */
+#define FMODE_STREAM ((__force fmode_t)0x200000)
+
+/* File supports DIRECT IO */
+#define FMODE_CAN_ODIRECT ((__force fmode_t)0x400000)
+
+#define FMODE_NOREUSE ((__force fmode_t)0x800000)
+
+/* File supports non-exclusive O_DIRECT writes from multiple threads */
+#define FMODE_DIO_PARALLEL_WRITE ((__force fmode_t)0x1000000)
+
/* File was opened by fanotify and shouldn't generate fanotify events */
#define FMODE_NONOTIFY ((__force fmode_t)0x4000000)
-/* File is capable of returning -EAGAIN if AIO will block */
-#define FMODE_AIO_NOWAIT ((__force fmode_t)0x8000000)
+/* File is capable of returning -EAGAIN if I/O will block */
+#define FMODE_NOWAIT ((__force fmode_t)0x8000000)
-/*
- * Flag for rw_copy_check_uvector and compat_rw_copy_check_uvector
- * that indicates that they should check the contents of the iovec are
- * valid, but not check the memory that the iovec elements
- * points too.
- */
-#define CHECK_IOVEC_ONLY -1
+/* File represents mount that needs unmounting */
+#define FMODE_NEED_UNMOUNT ((__force fmode_t)0x10000000)
+
+/* File does not contribute to nr_files count */
+#define FMODE_NOACCOUNT ((__force fmode_t)0x20000000)
+
+/* File supports async buffered reads */
+#define FMODE_BUF_RASYNC ((__force fmode_t)0x40000000)
+
+/* File supports async nowait buffered writes */
+#define FMODE_BUF_WASYNC ((__force fmode_t)0x80000000)
/*
* Attribute flags. These should be or-ed together to figure out what
@@ -171,7 +203,6 @@ typedef int (dio_iodone_t)(struct kiocb *iocb, loff_t offset,
#define ATTR_ATIME_SET (1 << 7)
#define ATTR_MTIME_SET (1 << 8)
#define ATTR_FORCE (1 << 9) /* Not a change, but a change it */
-#define ATTR_ATTR_FLAG (1 << 10)
#define ATTR_KILL_SUID (1 << 11)
#define ATTR_KILL_SGID (1 << 12)
#define ATTR_FILE (1 << 13)
@@ -199,12 +230,30 @@ typedef int (dio_iodone_t)(struct kiocb *iocb, loff_t offset,
struct iattr {
unsigned int ia_valid;
umode_t ia_mode;
- kuid_t ia_uid;
- kgid_t ia_gid;
+ /*
+ * The two anonymous unions wrap structures with the same member.
+ *
+ * Filesystems raising FS_ALLOW_IDMAP need to use ia_vfs{g,u}id which
+ * are a dedicated type requiring the filesystem to use the dedicated
+ * helpers. Other filesystem can continue to use ia_{g,u}id until they
+ * have been ported.
+ *
+ * They always contain the same value. In other words FS_ALLOW_IDMAP
+ * pass down the same value on idmapped mounts as they would on regular
+ * mounts.
+ */
+ union {
+ kuid_t ia_uid;
+ vfsuid_t ia_vfsuid;
+ };
+ union {
+ kgid_t ia_gid;
+ vfsgid_t ia_vfsgid;
+ };
loff_t ia_size;
- struct timespec ia_atime;
- struct timespec ia_mtime;
- struct timespec ia_ctime;
+ struct timespec64 ia_atime;
+ struct timespec64 ia_mtime;
+ struct timespec64 ia_ctime;
/*
* Not an attribute, but an auxiliary info for filesystems wanting to
@@ -243,7 +292,7 @@ struct iattr {
* trying again. The aop will be taking reasonable
* precautions not to livelock. If the caller held a page
* reference, it should drop it before retrying. Returned
- * by readpage().
+ * by read_folio().
*
* address_space_operation functions return these large constants to indicate
* special semantics to the caller. These are much larger than the bytes in a
@@ -256,20 +305,17 @@ enum positive_aop_returns {
AOP_TRUNCATED_PAGE = 0x80001,
};
-#define AOP_FLAG_CONT_EXPAND 0x0001 /* called from cont_expand */
-#define AOP_FLAG_NOFS 0x0002 /* used by filesystem to direct
- * helper code (eg buffer layer)
- * to clear GFP_FS from alloc */
-
/*
* oh the beauties of C type declarations.
*/
struct page;
struct address_space;
struct writeback_control;
+struct readahead_control;
/*
* Write life time hint values.
+ * Stored in struct inode as u8.
*/
enum rw_hint {
WRITE_LIFE_NOT_SET = 0,
@@ -280,22 +326,45 @@ enum rw_hint {
WRITE_LIFE_EXTREME = RWH_WRITE_LIFE_EXTREME,
};
-#define IOCB_EVENTFD (1 << 0)
-#define IOCB_APPEND (1 << 1)
-#define IOCB_DIRECT (1 << 2)
-#define IOCB_HIPRI (1 << 3)
-#define IOCB_DSYNC (1 << 4)
-#define IOCB_SYNC (1 << 5)
-#define IOCB_WRITE (1 << 6)
-#define IOCB_NOWAIT (1 << 7)
+/* Match RWF_* bits to IOCB bits */
+#define IOCB_HIPRI (__force int) RWF_HIPRI
+#define IOCB_DSYNC (__force int) RWF_DSYNC
+#define IOCB_SYNC (__force int) RWF_SYNC
+#define IOCB_NOWAIT (__force int) RWF_NOWAIT
+#define IOCB_APPEND (__force int) RWF_APPEND
+
+/* non-RWF related bits - start at 16 */
+#define IOCB_EVENTFD (1 << 16)
+#define IOCB_DIRECT (1 << 17)
+#define IOCB_WRITE (1 << 18)
+/* iocb->ki_waitq is valid */
+#define IOCB_WAITQ (1 << 19)
+#define IOCB_NOIO (1 << 20)
+/* can use bio alloc cache */
+#define IOCB_ALLOC_CACHE (1 << 21)
+
+/* for use in trace events */
+#define TRACE_IOCB_STRINGS \
+ { IOCB_HIPRI, "HIPRI" }, \
+ { IOCB_DSYNC, "DSYNC" }, \
+ { IOCB_SYNC, "SYNC" }, \
+ { IOCB_NOWAIT, "NOWAIT" }, \
+ { IOCB_APPEND, "APPEND" }, \
+ { IOCB_EVENTFD, "EVENTFD"}, \
+ { IOCB_DIRECT, "DIRECT" }, \
+ { IOCB_WRITE, "WRITE" }, \
+ { IOCB_WAITQ, "WAITQ" }, \
+ { IOCB_NOIO, "NOIO" }, \
+ { IOCB_ALLOC_CACHE, "ALLOC_CACHE" }
struct kiocb {
struct file *ki_filp;
loff_t ki_pos;
- void (*ki_complete)(struct kiocb *iocb, long ret, long ret2);
+ void (*ki_complete)(struct kiocb *iocb, long ret);
void *private;
int ki_flags;
- enum rw_hint ki_hint;
+ u16 ki_ioprio; /* See linux/ioprio.h */
+ struct wait_page_queue *ki_waitq; /* for async buffered IO */
};
static inline bool is_sync_kiocb(struct kiocb *kiocb)
@@ -303,43 +372,20 @@ static inline bool is_sync_kiocb(struct kiocb *kiocb)
return kiocb->ki_complete == NULL;
}
-/*
- * "descriptor" for what we're up to with a read.
- * This allows us to use the same read code yet
- * have multiple different users of the data that
- * we read from a file.
- *
- * The simplest case just copies the data to user
- * mode.
- */
-typedef struct {
- size_t written;
- size_t count;
- union {
- char __user *buf;
- void *data;
- } arg;
- int error;
-} read_descriptor_t;
-
-typedef int (*read_actor_t)(read_descriptor_t *, struct page *,
- unsigned long, unsigned long);
-
struct address_space_operations {
int (*writepage)(struct page *page, struct writeback_control *wbc);
- int (*readpage)(struct file *, struct page *);
+ int (*read_folio)(struct file *, struct folio *);
/* Write back some dirty pages from this mapping. */
int (*writepages)(struct address_space *, struct writeback_control *);
- /* Set a page dirty. Return true if this dirtied it */
- int (*set_page_dirty)(struct page *page);
+ /* Mark a folio dirty. Return true if this dirtied it */
+ bool (*dirty_folio)(struct address_space *, struct folio *);
- int (*readpages)(struct file *filp, struct address_space *mapping,
- struct list_head *pages, unsigned nr_pages);
+ void (*readahead)(struct readahead_control *);
int (*write_begin)(struct file *, struct address_space *mapping,
- loff_t pos, unsigned len, unsigned flags,
+ loff_t pos, unsigned len,
struct page **pagep, void **fsdata);
int (*write_end)(struct file *, struct address_space *mapping,
loff_t pos, unsigned len, unsigned copied,
@@ -347,128 +393,113 @@ struct address_space_operations {
/* Unfortunately this kludge is needed for FIBMAP. Don't use it */
sector_t (*bmap)(struct address_space *, sector_t);
- void (*invalidatepage) (struct page *, unsigned int, unsigned int);
- int (*releasepage) (struct page *, gfp_t);
- void (*freepage)(struct page *);
+ void (*invalidate_folio) (struct folio *, size_t offset, size_t len);
+ bool (*release_folio)(struct folio *, gfp_t);
+ void (*free_folio)(struct folio *folio);
ssize_t (*direct_IO)(struct kiocb *, struct iov_iter *iter);
/*
- * migrate the contents of a page to the specified target. If
+ * migrate the contents of a folio to the specified target. If
* migrate_mode is MIGRATE_ASYNC, it must not block.
*/
- int (*migratepage) (struct address_space *,
- struct page *, struct page *, enum migrate_mode);
- bool (*isolate_page)(struct page *, isolate_mode_t);
- void (*putback_page)(struct page *);
- int (*launder_page) (struct page *);
- int (*is_partially_uptodate) (struct page *, unsigned long,
- unsigned long);
- void (*is_dirty_writeback) (struct page *, bool *, bool *);
+ int (*migrate_folio)(struct address_space *, struct folio *dst,
+ struct folio *src, enum migrate_mode);
+ int (*launder_folio)(struct folio *);
+ bool (*is_partially_uptodate) (struct folio *, size_t from,
+ size_t count);
+ void (*is_dirty_writeback) (struct folio *, bool *dirty, bool *wb);
int (*error_remove_page)(struct address_space *, struct page *);
/* swapfile support */
int (*swap_activate)(struct swap_info_struct *sis, struct file *file,
sector_t *span);
void (*swap_deactivate)(struct file *file);
+ int (*swap_rw)(struct kiocb *iocb, struct iov_iter *iter);
};
extern const struct address_space_operations empty_aops;
-/*
- * pagecache_write_begin/pagecache_write_end must be used by general code
- * to write into the pagecache.
+/**
+ * struct address_space - Contents of a cacheable, mappable object.
+ * @host: Owner, either the inode or the block_device.
+ * @i_pages: Cached pages.
+ * @invalidate_lock: Guards coherency between page cache contents and
+ * file offset->disk block mappings in the filesystem during invalidates.
+ * It is also used to block modification of page cache contents through
+ * memory mappings.
+ * @gfp_mask: Memory allocation flags to use for allocating pages.
+ * @i_mmap_writable: Number of VM_SHARED mappings.
+ * @nr_thps: Number of THPs in the pagecache (non-shmem only).
+ * @i_mmap: Tree of private and shared mappings.
+ * @i_mmap_rwsem: Protects @i_mmap and @i_mmap_writable.
+ * @nrpages: Number of page entries, protected by the i_pages lock.
+ * @writeback_index: Writeback starts here.
+ * @a_ops: Methods.
+ * @flags: Error bits and flags (AS_*).
+ * @wb_err: The most recent error which has occurred.
+ * @private_lock: For use by the owner of the address_space.
+ * @private_list: For use by the owner of the address_space.
+ * @private_data: For use by the owner of the address_space.
*/
-int pagecache_write_begin(struct file *, struct address_space *mapping,
- loff_t pos, unsigned len, unsigned flags,
- struct page **pagep, void **fsdata);
-
-int pagecache_write_end(struct file *, struct address_space *mapping,
- loff_t pos, unsigned len, unsigned copied,
- struct page *page, void *fsdata);
-
struct address_space {
- struct inode *host; /* owner: inode, block_device */
- struct radix_tree_root page_tree; /* radix tree of all pages */
- spinlock_t tree_lock; /* and lock protecting it */
- atomic_t i_mmap_writable;/* count VM_SHARED mappings */
- struct rb_root i_mmap; /* tree of private and shared mappings */
- struct rw_semaphore i_mmap_rwsem; /* protect tree, count, list */
- /* Protected by tree_lock together with the radix tree */
- unsigned long nrpages; /* number of total pages */
- /* number of shadow or DAX exceptional entries */
- unsigned long nrexceptional;
- pgoff_t writeback_index;/* writeback starts here */
- const struct address_space_operations *a_ops; /* methods */
- unsigned long flags; /* error bits */
- spinlock_t private_lock; /* for use by the address_space */
- gfp_t gfp_mask; /* implicit gfp mask for allocations */
- struct list_head private_list; /* ditto */
- void *private_data; /* ditto */
+ struct inode *host;
+ struct xarray i_pages;
+ struct rw_semaphore invalidate_lock;
+ gfp_t gfp_mask;
+ atomic_t i_mmap_writable;
+#ifdef CONFIG_READ_ONLY_THP_FOR_FS
+ /* number of thp, only for non-shmem files */
+ atomic_t nr_thps;
+#endif
+ struct rb_root_cached i_mmap;
+ struct rw_semaphore i_mmap_rwsem;
+ unsigned long nrpages;
+ pgoff_t writeback_index;
+ const struct address_space_operations *a_ops;
+ unsigned long flags;
errseq_t wb_err;
-} __attribute__((aligned(sizeof(long))));
+ spinlock_t private_lock;
+ struct list_head private_list;
+ void *private_data;
+} __attribute__((aligned(sizeof(long)))) __randomize_layout;
/*
* On most architectures that alignment is already the case; but
* must be enforced here for CRIS, to let the least significant bit
* of struct page's "mapping" pointer be used for PAGE_MAPPING_ANON.
*/
-struct request_queue;
-
-struct block_device {
- dev_t bd_dev; /* not a kdev_t - it's a search key */
- int bd_openers;
- struct inode * bd_inode; /* will die */
- struct super_block * bd_super;
- struct mutex bd_mutex; /* open/close mutex */
- void * bd_claiming;
- void * bd_holder;
- int bd_holders;
- bool bd_write_holder;
-#ifdef CONFIG_SYSFS
- struct list_head bd_holder_disks;
-#endif
- struct block_device * bd_contains;
- unsigned bd_block_size;
- struct hd_struct * bd_part;
- /* number of times partitions within this device have been opened. */
- unsigned bd_part_count;
- int bd_invalidated;
- struct gendisk * bd_disk;
- struct request_queue * bd_queue;
- struct backing_dev_info *bd_bdi;
- struct list_head bd_list;
- /*
- * Private data. You must have bd_claim'ed the block_device
- * to use this. NOTE: bd_claim allows an owner to claim
- * the same device multiple times, the owner must take special
- * care to not mess up bd_private for that case.
- */
- unsigned long bd_private;
- /* The counter of freeze processes */
- int bd_fsfreeze_count;
- /* Mutex for freeze */
- struct mutex bd_fsfreeze_mutex;
-};
+/* XArray tags, for tagging dirty and writeback pages in the pagecache. */
+#define PAGECACHE_TAG_DIRTY XA_MARK_0
+#define PAGECACHE_TAG_WRITEBACK XA_MARK_1
+#define PAGECACHE_TAG_TOWRITE XA_MARK_2
/*
- * Radix-tree tags, for tagging dirty and writeback pages within the pagecache
- * radix trees
+ * Returns true if any of the pages in the mapping are marked with the tag.
*/
-#define PAGECACHE_TAG_DIRTY 0
-#define PAGECACHE_TAG_WRITEBACK 1
-#define PAGECACHE_TAG_TOWRITE 2
-
-int mapping_tagged(struct address_space *mapping, int tag);
+static inline bool mapping_tagged(struct address_space *mapping, xa_mark_t tag)
+{
+ return xa_marked(&mapping->i_pages, tag);
+}
static inline void i_mmap_lock_write(struct address_space *mapping)
{
down_write(&mapping->i_mmap_rwsem);
}
+static inline int i_mmap_trylock_write(struct address_space *mapping)
+{
+ return down_write_trylock(&mapping->i_mmap_rwsem);
+}
+
static inline void i_mmap_unlock_write(struct address_space *mapping)
{
up_write(&mapping->i_mmap_rwsem);
}
+static inline int i_mmap_trylock_read(struct address_space *mapping)
+{
+ return down_read_trylock(&mapping->i_mmap_rwsem);
+}
+
static inline void i_mmap_lock_read(struct address_space *mapping)
{
down_read(&mapping->i_mmap_rwsem);
@@ -479,17 +510,27 @@ static inline void i_mmap_unlock_read(struct address_space *mapping)
up_read(&mapping->i_mmap_rwsem);
}
+static inline void i_mmap_assert_locked(struct address_space *mapping)
+{
+ lockdep_assert_held(&mapping->i_mmap_rwsem);
+}
+
+static inline void i_mmap_assert_write_locked(struct address_space *mapping)
+{
+ lockdep_assert_held_write(&mapping->i_mmap_rwsem);
+}
+
/*
* Might pages of this file be mapped into userspace?
*/
static inline int mapping_mapped(struct address_space *mapping)
{
- return !RB_EMPTY_ROOT(&mapping->i_mmap);
+ return !RB_EMPTY_ROOT(&mapping->i_mmap.rb_root);
}
/*
* Might pages of this file have been modified in userspace?
- * Note that i_mmap_writable counts all VM_SHARED vmas: do_mmap_pgoff
+ * Note that i_mmap_writable counts all VM_SHARED vmas: do_mmap
* marks vma as VM_SHARED if it is shared, and the file was opened for
* writing i.e. vma may be mprotected writable even if now readonly.
*
@@ -536,6 +577,11 @@ static inline void mapping_allow_writable(struct address_space *mapping)
struct posix_acl;
#define ACL_NOT_CACHED ((void *)(-1))
+/*
+ * ACL_DONT_CACHE is for stacked filesystems, that rely on underlying fs to
+ * cache the ACL. This also means that ->get_inode_acl() can be called in RCU
+ * mode with the LOOKUP_RCU flag.
+ */
#define ACL_DONT_CACHE ((void *)(-3))
static inline struct posix_acl *
@@ -598,13 +644,13 @@ struct inode {
};
dev_t i_rdev;
loff_t i_size;
- struct timespec i_atime;
- struct timespec i_mtime;
- struct timespec i_ctime;
+ struct timespec64 i_atime;
+ struct timespec64 i_mtime;
+ struct timespec64 i_ctime;
spinlock_t i_lock; /* i_blocks, i_bytes, maybe i_size */
unsigned short i_bytes;
- unsigned int i_blkbits;
- enum rw_hint i_write_hint;
+ u8 i_blkbits;
+ u8 i_write_hint;
blkcnt_t i_blocks;
#ifdef __NEED_I_SIZE_ORDERED
@@ -635,20 +681,23 @@ struct inode {
struct hlist_head i_dentry;
struct rcu_head i_rcu;
};
- u64 i_version;
+ atomic64_t i_version;
+ atomic64_t i_sequence; /* see futex */
atomic_t i_count;
atomic_t i_dio_count;
atomic_t i_writecount;
-#ifdef CONFIG_IMA
+#if defined(CONFIG_IMA) || defined(CONFIG_FILE_LOCKING)
atomic_t i_readcount; /* struct files open RO */
#endif
- const struct file_operations *i_fop; /* former ->i_op->default_file_ops */
+ union {
+ const struct file_operations *i_fop; /* former ->i_op->default_file_ops */
+ void (*free_inode)(struct inode *);
+ };
struct file_lock_context *i_flctx;
struct address_space i_data;
struct list_head i_devices;
union {
struct pipe_inode_info *i_pipe;
- struct block_device *i_bdev;
struct cdev *i_cdev;
char *i_link;
unsigned i_dir_seq;
@@ -661,12 +710,18 @@ struct inode {
struct fsnotify_mark_connector __rcu *i_fsnotify_marks;
#endif
-#if IS_ENABLED(CONFIG_FS_ENCRYPTION)
+#ifdef CONFIG_FS_ENCRYPTION
struct fscrypt_info *i_crypt_info;
#endif
+#ifdef CONFIG_FS_VERITY
+ struct fsverity_info *i_verity_info;
+#endif
+
void *i_private; /* fs or device private pointer */
-};
+} __randomize_layout;
+
+struct timespec64 timestamp_truncate(struct timespec64 t, struct inode *inode);
static inline unsigned int i_blocksize(const struct inode *node)
{
@@ -679,6 +734,17 @@ static inline int inode_unhashed(struct inode *inode)
}
/*
+ * __mark_inode_dirty expects inodes to be hashed. Since we don't
+ * want special inodes in the fileset inode space, we make them
+ * appear hashed, but do not put on any lists. hlist_del()
+ * will work fine and require no locking.
+ */
+static inline void inode_fake_hash(struct inode *inode)
+{
+ hlist_add_fake(&inode->i_hash);
+}
+
+/*
* inode->i_mutex nesting subclasses for the lock validator:
*
* 0: the object of the current VFS operation
@@ -744,9 +810,47 @@ static inline void inode_lock_nested(struct inode *inode, unsigned subclass)
down_write_nested(&inode->i_rwsem, subclass);
}
+static inline void inode_lock_shared_nested(struct inode *inode, unsigned subclass)
+{
+ down_read_nested(&inode->i_rwsem, subclass);
+}
+
+static inline void filemap_invalidate_lock(struct address_space *mapping)
+{
+ down_write(&mapping->invalidate_lock);
+}
+
+static inline void filemap_invalidate_unlock(struct address_space *mapping)
+{
+ up_write(&mapping->invalidate_lock);
+}
+
+static inline void filemap_invalidate_lock_shared(struct address_space *mapping)
+{
+ down_read(&mapping->invalidate_lock);
+}
+
+static inline int filemap_invalidate_trylock_shared(
+ struct address_space *mapping)
+{
+ return down_read_trylock(&mapping->invalidate_lock);
+}
+
+static inline void filemap_invalidate_unlock_shared(
+ struct address_space *mapping)
+{
+ up_read(&mapping->invalidate_lock);
+}
+
void lock_two_nondirectories(struct inode *, struct inode*);
void unlock_two_nondirectories(struct inode *, struct inode*);
+void filemap_invalidate_lock_two(struct address_space *mapping1,
+ struct address_space *mapping2);
+void filemap_invalidate_unlock_two(struct address_space *mapping1,
+ struct address_space *mapping2);
+
+
/*
* NOTE: in a 32bit arch with a preemptable kernel and
* an UP compile the i_size_read/write must be atomic
@@ -768,7 +872,7 @@ static inline loff_t i_size_read(const struct inode *inode)
i_size = inode->i_size;
} while (read_seqcount_retry(&inode->i_size_seqcount, seq));
return i_size;
-#elif BITS_PER_LONG==32 && defined(CONFIG_PREEMPT)
+#elif BITS_PER_LONG==32 && defined(CONFIG_PREEMPTION)
loff_t i_size;
preempt_disable();
@@ -793,7 +897,7 @@ static inline void i_size_write(struct inode *inode, loff_t i_size)
inode->i_size = i_size;
write_seqcount_end(&inode->i_size_seqcount);
preempt_enable();
-#elif BITS_PER_LONG==32 && defined(CONFIG_PREEMPT)
+#elif BITS_PER_LONG==32 && defined(CONFIG_PREEMPTION)
preempt_disable();
inode->i_size = i_size;
preempt_enable();
@@ -812,8 +916,6 @@ static inline unsigned imajor(const struct inode *inode)
return MAJOR(inode->i_rdev);
}
-extern struct block_device *I_BDEV(struct inode *inode);
-
struct fown_struct {
rwlock_t lock; /* protects pid, uid, euid fields */
struct pid *pid; /* pid or -pgrp where SIGIO should be sent */
@@ -822,18 +924,27 @@ struct fown_struct {
int signum; /* posix.1b rt signal to be delivered on IO */
};
-/*
- * Track a single file's readahead state
+/**
+ * struct file_ra_state - Track a file's readahead state.
+ * @start: Where the most recent readahead started.
+ * @size: Number of pages read in the most recent readahead.
+ * @async_size: Numer of pages that were/are not needed immediately
+ * and so were/are genuinely "ahead". Start next readahead when
+ * the first of these pages is accessed.
+ * @ra_pages: Maximum size of a readahead request, copied from the bdi.
+ * @mmap_miss: How many mmap accesses missed in the page cache.
+ * @prev_pos: The last byte in the most recent read request.
+ *
+ * When this structure is passed to ->readahead(), the "most recent"
+ * readahead means the current readahead.
*/
struct file_ra_state {
- pgoff_t start; /* where readahead started */
- unsigned int size; /* # of readahead pages */
- unsigned int async_size; /* do asynchronous readahead when
- there are only # of pages ahead */
-
- unsigned int ra_pages; /* Maximum readahead window */
- unsigned int mmap_miss; /* Cache miss stat for mmap accesses */
- loff_t prev_pos; /* Cache last read() position */
+ pgoff_t start;
+ unsigned int size;
+ unsigned int async_size;
+ unsigned int ra_pages;
+ unsigned int mmap_miss;
+ loff_t prev_pos;
};
/*
@@ -847,19 +958,19 @@ static inline int ra_has_index(struct file_ra_state *ra, pgoff_t index)
struct file {
union {
- struct llist_node fu_llist;
- struct rcu_head fu_rcuhead;
- } f_u;
+ struct llist_node f_llist;
+ struct rcu_head f_rcuhead;
+ unsigned int f_iocb_flags;
+ };
struct path f_path;
struct inode *f_inode; /* cached value */
const struct file_operations *f_op;
/*
- * Protects f_ep_links, f_flags.
+ * Protects f_ep, f_flags.
* Must not be taken from IRQ context.
*/
spinlock_t f_lock;
- enum rw_hint f_write_hint;
atomic_long_t f_count;
unsigned int f_flags;
fmode_t f_mode;
@@ -878,18 +989,19 @@ struct file {
#ifdef CONFIG_EPOLL
/* Used by fs/eventpoll.c to link all the hooks to this file */
- struct list_head f_ep_links;
- struct list_head f_tfile_llink;
+ struct hlist_head *f_ep;
#endif /* #ifdef CONFIG_EPOLL */
struct address_space *f_mapping;
errseq_t f_wb_err;
-} __attribute__((aligned(4))); /* lest something weird decides that 2 is OK */
+ errseq_t f_sb_err; /* for syncfs */
+} __randomize_layout
+ __attribute__((aligned(4))); /* lest something weird decides that 2 is OK */
struct file_handle {
__u32 handle_bytes;
int handle_type;
/* file identifier */
- unsigned char f_handle[0];
+ unsigned char f_handle[];
};
static inline struct file *get_file(struct file *f)
@@ -898,7 +1010,6 @@ static inline struct file *get_file(struct file *f)
return f;
}
#define get_file_rcu(x) atomic_long_inc_not_zero(&(x)->f_count)
-#define fput_atomic(x) atomic_long_add_unless(&(x)->f_count, -1, 1)
#define file_count(x) atomic_long_read(&(x)->f_count)
#define MAX_NON_LFS ((1UL<<31) - 1)
@@ -906,325 +1017,24 @@ static inline struct file *get_file(struct file *f)
/* Page cache limit. The filesystems should put that into their s_maxbytes
limits, otherwise bad things can happen in VM. */
#if BITS_PER_LONG==32
-#define MAX_LFS_FILESIZE (((loff_t)PAGE_SIZE << (BITS_PER_LONG-1))-1)
+#define MAX_LFS_FILESIZE ((loff_t)ULONG_MAX << PAGE_SHIFT)
#elif BITS_PER_LONG==64
-#define MAX_LFS_FILESIZE ((loff_t)0x7fffffffffffffffLL)
+#define MAX_LFS_FILESIZE ((loff_t)LLONG_MAX)
#endif
-#define FL_POSIX 1
-#define FL_FLOCK 2
-#define FL_DELEG 4 /* NFSv4 delegation */
-#define FL_ACCESS 8 /* not trying to lock, just looking */
-#define FL_EXISTS 16 /* when unlocking, test for existence */
-#define FL_LEASE 32 /* lease held on this file */
-#define FL_CLOSE 64 /* unlock on close */
-#define FL_SLEEP 128 /* A blocking lock */
-#define FL_DOWNGRADE_PENDING 256 /* Lease is being downgraded */
-#define FL_UNLOCK_PENDING 512 /* Lease is being broken */
-#define FL_OFDLCK 1024 /* lock is "owned" by struct file */
-#define FL_LAYOUT 2048 /* outstanding pNFS layout */
-
-#define FL_CLOSE_POSIX (FL_POSIX | FL_CLOSE)
-
-/*
- * Special return value from posix_lock_file() and vfs_lock_file() for
- * asynchronous locking.
- */
-#define FILE_LOCK_DEFERRED 1
-
/* legacy typedef, should eventually be removed */
typedef void *fl_owner_t;
struct file_lock;
-struct file_lock_operations {
- void (*fl_copy_lock)(struct file_lock *, struct file_lock *);
- void (*fl_release_private)(struct file_lock *);
-};
-
-struct lock_manager_operations {
- int (*lm_compare_owner)(struct file_lock *, struct file_lock *);
- unsigned long (*lm_owner_key)(struct file_lock *);
- fl_owner_t (*lm_get_owner)(fl_owner_t);
- void (*lm_put_owner)(fl_owner_t);
- void (*lm_notify)(struct file_lock *); /* unblock callback */
- int (*lm_grant)(struct file_lock *, int);
- bool (*lm_break)(struct file_lock *);
- int (*lm_change)(struct file_lock *, int, struct list_head *);
- void (*lm_setup)(struct file_lock *, void **);
-};
-
-struct lock_manager {
- struct list_head list;
- /*
- * NFSv4 and up also want opens blocked during the grace period;
- * NLM doesn't care:
- */
- bool block_opens;
-};
-
-struct net;
-void locks_start_grace(struct net *, struct lock_manager *);
-void locks_end_grace(struct lock_manager *);
-int locks_in_grace(struct net *);
-int opens_in_grace(struct net *);
-
-/* that will die - we need it for nfs_lock_info */
-#include <linux/nfs_fs_i.h>
-
-/*
- * struct file_lock represents a generic "file lock". It's used to represent
- * POSIX byte range locks, BSD (flock) locks, and leases. It's important to
- * note that the same struct is used to represent both a request for a lock and
- * the lock itself, but the same object is never used for both.
- *
- * FIXME: should we create a separate "struct lock_request" to help distinguish
- * these two uses?
- *
- * The varous i_flctx lists are ordered by:
- *
- * 1) lock owner
- * 2) lock range start
- * 3) lock range end
- *
- * Obviously, the last two criteria only matter for POSIX locks.
- */
-struct file_lock {
- struct file_lock *fl_next; /* singly linked list for this inode */
- struct list_head fl_list; /* link into file_lock_context */
- struct hlist_node fl_link; /* node in global lists */
- struct list_head fl_block; /* circular list of blocked processes */
- fl_owner_t fl_owner;
- unsigned int fl_flags;
- unsigned char fl_type;
- unsigned int fl_pid;
- int fl_link_cpu; /* what cpu's list is this on? */
- struct pid *fl_nspid;
- wait_queue_head_t fl_wait;
- struct file *fl_file;
- loff_t fl_start;
- loff_t fl_end;
-
- struct fasync_struct * fl_fasync; /* for lease break notifications */
- /* for lease breaks: */
- unsigned long fl_break_time;
- unsigned long fl_downgrade_time;
-
- const struct file_lock_operations *fl_ops; /* Callbacks for filesystems */
- const struct lock_manager_operations *fl_lmops; /* Callbacks for lockmanagers */
- union {
- struct nfs_lock_info nfs_fl;
- struct nfs4_lock_info nfs4_fl;
- struct {
- struct list_head link; /* link in AFS vnode's pending_locks list */
- int state; /* state of grant or error if -ve */
- } afs;
- } fl_u;
-};
-
-struct file_lock_context {
- spinlock_t flc_lock;
- struct list_head flc_flock;
- struct list_head flc_posix;
- struct list_head flc_lease;
-};
-
/* The following constant reflects the upper bound of the file/locking space */
#ifndef OFFSET_MAX
-#define INT_LIMIT(x) (~((x)1 << (sizeof(x)*8 - 1)))
-#define OFFSET_MAX INT_LIMIT(loff_t)
-#define OFFT_OFFSET_MAX INT_LIMIT(off_t)
+#define OFFSET_MAX type_max(loff_t)
+#define OFFT_OFFSET_MAX type_max(off_t)
#endif
extern void send_sigio(struct fown_struct *fown, int fd, int band);
-/*
- * Return the inode to use for locking
- *
- * For overlayfs this should be the overlay inode, not the real inode returned
- * by file_inode(). For any other fs file_inode(filp) and locks_inode(filp) are
- * equal.
- */
-static inline struct inode *locks_inode(const struct file *f)
-{
- return f->f_path.dentry->d_inode;
-}
-
-#ifdef CONFIG_FILE_LOCKING
-extern int fcntl_getlk(struct file *, unsigned int, struct flock *);
-extern int fcntl_setlk(unsigned int, struct file *, unsigned int,
- struct flock *);
-
-#if BITS_PER_LONG == 32
-extern int fcntl_getlk64(struct file *, unsigned int, struct flock64 *);
-extern int fcntl_setlk64(unsigned int, struct file *, unsigned int,
- struct flock64 *);
-#endif
-
-extern int fcntl_setlease(unsigned int fd, struct file *filp, long arg);
-extern int fcntl_getlease(struct file *filp);
-
-/* fs/locks.c */
-void locks_free_lock_context(struct inode *inode);
-void locks_free_lock(struct file_lock *fl);
-extern void locks_init_lock(struct file_lock *);
-extern struct file_lock * locks_alloc_lock(void);
-extern void locks_copy_lock(struct file_lock *, struct file_lock *);
-extern void locks_copy_conflock(struct file_lock *, struct file_lock *);
-extern void locks_remove_posix(struct file *, fl_owner_t);
-extern void locks_remove_file(struct file *);
-extern void locks_release_private(struct file_lock *);
-extern void posix_test_lock(struct file *, struct file_lock *);
-extern int posix_lock_file(struct file *, struct file_lock *, struct file_lock *);
-extern int posix_unblock_lock(struct file_lock *);
-extern int vfs_test_lock(struct file *, struct file_lock *);
-extern int vfs_lock_file(struct file *, unsigned int, struct file_lock *, struct file_lock *);
-extern int vfs_cancel_lock(struct file *filp, struct file_lock *fl);
-extern int locks_lock_inode_wait(struct inode *inode, struct file_lock *fl);
-extern int __break_lease(struct inode *inode, unsigned int flags, unsigned int type);
-extern void lease_get_mtime(struct inode *, struct timespec *time);
-extern int generic_setlease(struct file *, long, struct file_lock **, void **priv);
-extern int vfs_setlease(struct file *, long, struct file_lock **, void **);
-extern int lease_modify(struct file_lock *, int, struct list_head *);
-struct files_struct;
-extern void show_fd_locks(struct seq_file *f,
- struct file *filp, struct files_struct *files);
-#else /* !CONFIG_FILE_LOCKING */
-static inline int fcntl_getlk(struct file *file, unsigned int cmd,
- struct flock __user *user)
-{
- return -EINVAL;
-}
-
-static inline int fcntl_setlk(unsigned int fd, struct file *file,
- unsigned int cmd, struct flock __user *user)
-{
- return -EACCES;
-}
-
-#if BITS_PER_LONG == 32
-static inline int fcntl_getlk64(struct file *file, unsigned int cmd,
- struct flock64 __user *user)
-{
- return -EINVAL;
-}
-
-static inline int fcntl_setlk64(unsigned int fd, struct file *file,
- unsigned int cmd, struct flock64 __user *user)
-{
- return -EACCES;
-}
-#endif
-static inline int fcntl_setlease(unsigned int fd, struct file *filp, long arg)
-{
- return -EINVAL;
-}
-
-static inline int fcntl_getlease(struct file *filp)
-{
- return F_UNLCK;
-}
-
-static inline void
-locks_free_lock_context(struct inode *inode)
-{
-}
-
-static inline void locks_init_lock(struct file_lock *fl)
-{
- return;
-}
-
-static inline void locks_copy_conflock(struct file_lock *new, struct file_lock *fl)
-{
- return;
-}
-
-static inline void locks_copy_lock(struct file_lock *new, struct file_lock *fl)
-{
- return;
-}
-
-static inline void locks_remove_posix(struct file *filp, fl_owner_t owner)
-{
- return;
-}
-
-static inline void locks_remove_file(struct file *filp)
-{
- return;
-}
-
-static inline void posix_test_lock(struct file *filp, struct file_lock *fl)
-{
- return;
-}
-
-static inline int posix_lock_file(struct file *filp, struct file_lock *fl,
- struct file_lock *conflock)
-{
- return -ENOLCK;
-}
-
-static inline int posix_unblock_lock(struct file_lock *waiter)
-{
- return -ENOENT;
-}
-
-static inline int vfs_test_lock(struct file *filp, struct file_lock *fl)
-{
- return 0;
-}
-
-static inline int vfs_lock_file(struct file *filp, unsigned int cmd,
- struct file_lock *fl, struct file_lock *conf)
-{
- return -ENOLCK;
-}
-
-static inline int vfs_cancel_lock(struct file *filp, struct file_lock *fl)
-{
- return 0;
-}
-
-static inline int locks_lock_inode_wait(struct inode *inode, struct file_lock *fl)
-{
- return -ENOLCK;
-}
-
-static inline int __break_lease(struct inode *inode, unsigned int mode, unsigned int type)
-{
- return 0;
-}
-
-static inline void lease_get_mtime(struct inode *inode, struct timespec *time)
-{
- return;
-}
-
-static inline int generic_setlease(struct file *filp, long arg,
- struct file_lock **flp, void **priv)
-{
- return -EINVAL;
-}
-
-static inline int vfs_setlease(struct file *filp, long arg,
- struct file_lock **lease, void **priv)
-{
- return -EINVAL;
-}
-
-static inline int lease_modify(struct file_lock *fl, int arg,
- struct list_head *dispose)
-{
- return -EINVAL;
-}
-
-struct files_struct;
-static inline void show_fd_locks(struct seq_file *f,
- struct file *filp, struct files_struct *files) {}
-#endif /* !CONFIG_FILE_LOCKING */
-
static inline struct inode *file_inode(const struct file *f)
{
return f->f_inode;
@@ -1232,16 +1042,11 @@ static inline struct inode *file_inode(const struct file *f)
static inline struct dentry *file_dentry(const struct file *file)
{
- return d_real(file->f_path.dentry, file_inode(file), 0);
-}
-
-static inline int locks_lock_file_wait(struct file *filp, struct file_lock *fl)
-{
- return locks_lock_inode_wait(locks_inode(filp), fl);
+ return d_real(file->f_path.dentry, file_inode(file));
}
struct fasync_struct {
- spinlock_t fa_lock;
+ rwlock_t fa_lock;
int magic;
int fa_fd;
struct fasync_struct *fa_next; /* singly linked list */
@@ -1267,7 +1072,39 @@ extern void f_delown(struct file *filp);
extern pid_t f_getown(struct file *filp);
extern int send_sigurg(struct fown_struct *fown);
-struct mm_struct;
+/*
+ * sb->s_flags. Note that these mirror the equivalent MS_* flags where
+ * represented in both.
+ */
+#define SB_RDONLY 1 /* Mount read-only */
+#define SB_NOSUID 2 /* Ignore suid and sgid bits */
+#define SB_NODEV 4 /* Disallow access to device special files */
+#define SB_NOEXEC 8 /* Disallow program execution */
+#define SB_SYNCHRONOUS 16 /* Writes are synced at once */
+#define SB_MANDLOCK 64 /* Allow mandatory locks on an FS */
+#define SB_DIRSYNC 128 /* Directory modifications are synchronous */
+#define SB_NOATIME 1024 /* Do not update access times. */
+#define SB_NODIRATIME 2048 /* Do not update directory access times */
+#define SB_SILENT 32768
+#define SB_POSIXACL (1<<16) /* VFS does not apply the umask */
+#define SB_INLINECRYPT (1<<17) /* Use blk-crypto for encrypted files */
+#define SB_KERNMOUNT (1<<22) /* this is a kern_mount call */
+#define SB_I_VERSION (1<<23) /* Update inode I_version field */
+#define SB_LAZYTIME (1<<25) /* Update the on-disk [acm]times lazily */
+
+/* These sb flags are internal to the kernel */
+#define SB_SUBMOUNT (1<<26)
+#define SB_FORCE (1<<27)
+#define SB_NOSEC (1<<28)
+#define SB_BORN (1<<29)
+#define SB_ACTIVE (1<<30)
+#define SB_NOUSER (1<<31)
+
+/* These flags relate to encoding and casefolding */
+#define SB_ENC_STRICT_MODE_FL (1 << 0)
+
+#define sb_has_strict_encoding(sb) \
+ (sb->s_encoding_flags & SB_ENC_STRICT_MODE_FL)
/*
* Umount options
@@ -1283,9 +1120,17 @@ struct mm_struct;
#define SB_I_CGROUPWB 0x00000001 /* cgroup-aware writeback enabled */
#define SB_I_NOEXEC 0x00000002 /* Ignore executables on this fs */
#define SB_I_NODEV 0x00000004 /* Ignore devices on this fs */
+#define SB_I_STABLE_WRITES 0x00000008 /* don't modify blks until WB is done */
/* sb->s_iflags to limit user namespace mounts */
#define SB_I_USERNS_VISIBLE 0x00000010 /* fstype already mounted */
+#define SB_I_IMA_UNVERIFIABLE_SIGNATURE 0x00000020
+#define SB_I_UNTRUSTED_MOUNTER 0x00000040
+
+#define SB_I_SKIP_SYNC 0x00000100 /* Skip superblock at global sync */
+#define SB_I_PERSB_BDI 0x00000200 /* has a per-sb bdi */
+#define SB_I_TS_EXPIRY_WARNED 0x00000400 /* warned about timestamp range expiry */
+#define SB_I_RETIRED 0x00000800 /* superblock shouldn't be reused */
/* Possible states of 'frozen' field */
enum {
@@ -1301,7 +1146,7 @@ enum {
struct sb_writers {
int frozen; /* Is sb frozen? */
- wait_queue_head_t wait_unfrozen; /* for get_super_thawed() */
+ wait_queue_head_t wait_unfrozen; /* wait for thaw */
struct percpu_rw_semaphore rw_sem[SB_FREEZE_LEVELS];
};
@@ -1327,10 +1172,18 @@ struct super_block {
void *s_security;
#endif
const struct xattr_handler **s_xattr;
-
+#ifdef CONFIG_FS_ENCRYPTION
const struct fscrypt_operations *s_cop;
-
- struct hlist_bl_head s_anon; /* anonymous dentries for (nfs) exporting */
+ struct fscrypt_keyring *s_master_keys; /* master crypto keys in use */
+#endif
+#ifdef CONFIG_FS_VERITY
+ const struct fsverity_operations *s_vop;
+#endif
+#if IS_ENABLED(CONFIG_UNICODE)
+ struct unicode_map *s_encoding;
+ __u16 s_encoding_flags;
+#endif
+ struct hlist_bl_head s_roots; /* alternate root dentries for NFS */
struct list_head s_mounts; /* list of mounts; _not_ for fs use */
struct block_device *s_bdev;
struct backing_dev_info *s_bdi;
@@ -1341,17 +1194,29 @@ struct super_block {
struct sb_writers s_writers;
+ /*
+ * Keep s_fs_info, s_time_gran, s_fsnotify_mask, and
+ * s_fsnotify_marks together for cache efficiency. They are frequently
+ * accessed and rarely modified.
+ */
+ void *s_fs_info; /* Filesystem private info */
+
+ /* Granularity of c/m/atime in ns (cannot be worse than a second) */
+ u32 s_time_gran;
+ /* Time limits for c/m/atime in seconds */
+ time64_t s_time_min;
+ time64_t s_time_max;
+#ifdef CONFIG_FSNOTIFY
+ __u32 s_fsnotify_mask;
+ struct fsnotify_mark_connector __rcu *s_fsnotify_marks;
+#endif
+
char s_id[32]; /* Informational name */
uuid_t s_uuid; /* UUID */
- void *s_fs_info; /* Filesystem private info */
unsigned int s_max_links;
fmode_t s_mode;
- /* Granularity of c/m/atime in ns.
- Cannot be worse than a second */
- u32 s_time_gran;
-
/*
* The next field is for VFS *only*. No filesystems have any business
* even looking at it. You had been warned.
@@ -1362,23 +1227,27 @@ struct super_block {
* Filesystem subtype. If non-empty the filesystem type field
* in /proc/mounts will be "type.subtype"
*/
- char *s_subtype;
+ const char *s_subtype;
const struct dentry_operations *s_d_op; /* default d_op for dentries */
- /*
- * Saved pool identifier for cleancache (-1 means none)
- */
- int cleancache_poolid;
-
struct shrinker s_shrink; /* per-sb shrinker handle */
/* Number of inodes with nlink == 0 but still referenced */
atomic_long_t s_remove_count;
+ /*
+ * Number of inode/mount/sb objects that are being watched, note that
+ * inodes objects are currently double-accounted.
+ */
+ atomic_long_t s_fsnotify_connectors;
+
/* Being remounted read-only */
int s_readonly_remount;
+ /* per-sb errseq_t for reporting writeback errors via syncfs */
+ errseq_t s_wb_err;
+
/* AIO completions deferred from interrupt context */
struct workqueue_struct *s_dio_done_wq;
struct hlist_head s_pins;
@@ -1391,11 +1260,12 @@ struct super_block {
struct user_namespace *s_user_ns;
/*
- * Keep the lru lists last in the structure so they always sit on their
- * own individual cachelines.
+ * The list_lru structure is essentially just a pointer to a table
+ * of per-node lru lists, each of which has its own spinlock.
+ * There is no need to put them into separate cachelines.
*/
- struct list_lru s_dentry_lru ____cacheline_aligned_in_smp;
- struct list_lru s_inode_lru ____cacheline_aligned_in_smp;
+ struct list_lru s_dentry_lru;
+ struct list_lru s_inode_lru;
struct rcu_head rcu;
struct work_struct destroy_work;
@@ -1412,7 +1282,12 @@ struct super_block {
spinlock_t s_inode_wblist_lock;
struct list_head s_inodes_wb; /* writeback inodes */
-};
+} __randomize_layout;
+
+static inline struct user_namespace *i_user_ns(const struct inode *inode)
+{
+ return inode->i_sb->s_user_ns;
+}
/* Helper functions so that in most cases filesystems will
* not need to deal directly with kuid_t and kgid_t and can
@@ -1421,38 +1296,219 @@ struct super_block {
*/
static inline uid_t i_uid_read(const struct inode *inode)
{
- return from_kuid(inode->i_sb->s_user_ns, inode->i_uid);
+ return from_kuid(i_user_ns(inode), inode->i_uid);
}
static inline gid_t i_gid_read(const struct inode *inode)
{
- return from_kgid(inode->i_sb->s_user_ns, inode->i_gid);
+ return from_kgid(i_user_ns(inode), inode->i_gid);
}
static inline void i_uid_write(struct inode *inode, uid_t uid)
{
- inode->i_uid = make_kuid(inode->i_sb->s_user_ns, uid);
+ inode->i_uid = make_kuid(i_user_ns(inode), uid);
}
static inline void i_gid_write(struct inode *inode, gid_t gid)
{
- inode->i_gid = make_kgid(inode->i_sb->s_user_ns, gid);
+ inode->i_gid = make_kgid(i_user_ns(inode), gid);
+}
+
+/**
+ * i_uid_into_vfsuid - map an inode's i_uid down according to an idmapping
+ * @idmap: idmap of the mount the inode was found from
+ * @inode: inode to map
+ *
+ * Return: whe inode's i_uid mapped down according to @idmap.
+ * If the inode's i_uid has no mapping INVALID_VFSUID is returned.
+ */
+static inline vfsuid_t i_uid_into_vfsuid(struct mnt_idmap *idmap,
+ const struct inode *inode)
+{
+ return make_vfsuid(idmap, i_user_ns(inode), inode->i_uid);
+}
+
+/**
+ * i_uid_needs_update - check whether inode's i_uid needs to be updated
+ * @idmap: idmap of the mount the inode was found from
+ * @attr: the new attributes of @inode
+ * @inode: the inode to update
+ *
+ * Check whether the $inode's i_uid field needs to be updated taking idmapped
+ * mounts into account if the filesystem supports it.
+ *
+ * Return: true if @inode's i_uid field needs to be updated, false if not.
+ */
+static inline bool i_uid_needs_update(struct mnt_idmap *idmap,
+ const struct iattr *attr,
+ const struct inode *inode)
+{
+ return ((attr->ia_valid & ATTR_UID) &&
+ !vfsuid_eq(attr->ia_vfsuid,
+ i_uid_into_vfsuid(idmap, inode)));
+}
+
+/**
+ * i_uid_update - update @inode's i_uid field
+ * @idmap: idmap of the mount the inode was found from
+ * @attr: the new attributes of @inode
+ * @inode: the inode to update
+ *
+ * Safely update @inode's i_uid field translating the vfsuid of any idmapped
+ * mount into the filesystem kuid.
+ */
+static inline void i_uid_update(struct mnt_idmap *idmap,
+ const struct iattr *attr,
+ struct inode *inode)
+{
+ if (attr->ia_valid & ATTR_UID)
+ inode->i_uid = from_vfsuid(idmap, i_user_ns(inode),
+ attr->ia_vfsuid);
}
-extern struct timespec current_time(struct inode *inode);
+/**
+ * i_gid_into_vfsgid - map an inode's i_gid down according to an idmapping
+ * @idmap: idmap of the mount the inode was found from
+ * @inode: inode to map
+ *
+ * Return: the inode's i_gid mapped down according to @idmap.
+ * If the inode's i_gid has no mapping INVALID_VFSGID is returned.
+ */
+static inline vfsgid_t i_gid_into_vfsgid(struct mnt_idmap *idmap,
+ const struct inode *inode)
+{
+ return make_vfsgid(idmap, i_user_ns(inode), inode->i_gid);
+}
+
+/**
+ * i_gid_needs_update - check whether inode's i_gid needs to be updated
+ * @idmap: idmap of the mount the inode was found from
+ * @attr: the new attributes of @inode
+ * @inode: the inode to update
+ *
+ * Check whether the $inode's i_gid field needs to be updated taking idmapped
+ * mounts into account if the filesystem supports it.
+ *
+ * Return: true if @inode's i_gid field needs to be updated, false if not.
+ */
+static inline bool i_gid_needs_update(struct mnt_idmap *idmap,
+ const struct iattr *attr,
+ const struct inode *inode)
+{
+ return ((attr->ia_valid & ATTR_GID) &&
+ !vfsgid_eq(attr->ia_vfsgid,
+ i_gid_into_vfsgid(idmap, inode)));
+}
+
+/**
+ * i_gid_update - update @inode's i_gid field
+ * @idmap: idmap of the mount the inode was found from
+ * @attr: the new attributes of @inode
+ * @inode: the inode to update
+ *
+ * Safely update @inode's i_gid field translating the vfsgid of any idmapped
+ * mount into the filesystem kgid.
+ */
+static inline void i_gid_update(struct mnt_idmap *idmap,
+ const struct iattr *attr,
+ struct inode *inode)
+{
+ if (attr->ia_valid & ATTR_GID)
+ inode->i_gid = from_vfsgid(idmap, i_user_ns(inode),
+ attr->ia_vfsgid);
+}
+
+/**
+ * inode_fsuid_set - initialize inode's i_uid field with callers fsuid
+ * @inode: inode to initialize
+ * @idmap: idmap of the mount the inode was found from
+ *
+ * Initialize the i_uid field of @inode. If the inode was found/created via
+ * an idmapped mount map the caller's fsuid according to @idmap.
+ */
+static inline void inode_fsuid_set(struct inode *inode,
+ struct mnt_idmap *idmap)
+{
+ inode->i_uid = mapped_fsuid(idmap, i_user_ns(inode));
+}
+
+/**
+ * inode_fsgid_set - initialize inode's i_gid field with callers fsgid
+ * @inode: inode to initialize
+ * @idmap: idmap of the mount the inode was found from
+ *
+ * Initialize the i_gid field of @inode. If the inode was found/created via
+ * an idmapped mount map the caller's fsgid according to @idmap.
+ */
+static inline void inode_fsgid_set(struct inode *inode,
+ struct mnt_idmap *idmap)
+{
+ inode->i_gid = mapped_fsgid(idmap, i_user_ns(inode));
+}
+
+/**
+ * fsuidgid_has_mapping() - check whether caller's fsuid/fsgid is mapped
+ * @sb: the superblock we want a mapping in
+ * @idmap: idmap of the relevant mount
+ *
+ * Check whether the caller's fsuid and fsgid have a valid mapping in the
+ * s_user_ns of the superblock @sb. If the caller is on an idmapped mount map
+ * the caller's fsuid and fsgid according to the @idmap first.
+ *
+ * Return: true if fsuid and fsgid is mapped, false if not.
+ */
+static inline bool fsuidgid_has_mapping(struct super_block *sb,
+ struct mnt_idmap *idmap)
+{
+ struct user_namespace *fs_userns = sb->s_user_ns;
+ kuid_t kuid;
+ kgid_t kgid;
+
+ kuid = mapped_fsuid(idmap, fs_userns);
+ if (!uid_valid(kuid))
+ return false;
+ kgid = mapped_fsgid(idmap, fs_userns);
+ if (!gid_valid(kgid))
+ return false;
+ return kuid_has_mapping(fs_userns, kuid) &&
+ kgid_has_mapping(fs_userns, kgid);
+}
+
+extern struct timespec64 current_time(struct inode *inode);
/*
* Snapshotting support.
*/
-void __sb_end_write(struct super_block *sb, int level);
-int __sb_start_write(struct super_block *sb, int level, bool wait);
+/*
+ * These are internal functions, please use sb_start_{write,pagefault,intwrite}
+ * instead.
+ */
+static inline void __sb_end_write(struct super_block *sb, int level)
+{
+ percpu_up_read(sb->s_writers.rw_sem + level-1);
+}
+
+static inline void __sb_start_write(struct super_block *sb, int level)
+{
+ percpu_down_read(sb->s_writers.rw_sem + level - 1);
+}
+
+static inline bool __sb_start_write_trylock(struct super_block *sb, int level)
+{
+ return percpu_down_read_trylock(sb->s_writers.rw_sem + level - 1);
+}
#define __sb_writers_acquired(sb, lev) \
percpu_rwsem_acquire(&(sb)->s_writers.rw_sem[(lev)-1], 1, _THIS_IP_)
#define __sb_writers_release(sb, lev) \
percpu_rwsem_release(&(sb)->s_writers.rw_sem[(lev)-1], 1, _THIS_IP_)
+static inline bool sb_write_started(const struct super_block *sb)
+{
+ return lockdep_is_held_type(sb->s_writers.rw_sem + SB_FREEZE_WRITE - 1, 1);
+}
+
/**
* sb_end_write - drop write access to a superblock
* @sb: the super we wrote to
@@ -1510,12 +1566,12 @@ static inline void sb_end_intwrite(struct super_block *sb)
*/
static inline void sb_start_write(struct super_block *sb)
{
- __sb_start_write(sb, SB_FREEZE_WRITE, true);
+ __sb_start_write(sb, SB_FREEZE_WRITE);
}
-static inline int sb_start_write_trylock(struct super_block *sb)
+static inline bool sb_start_write_trylock(struct super_block *sb)
{
- return __sb_start_write(sb, SB_FREEZE_WRITE, false);
+ return __sb_start_write_trylock(sb, SB_FREEZE_WRITE);
}
/**
@@ -1531,18 +1587,18 @@ static inline int sb_start_write_trylock(struct super_block *sb)
*
* Since page fault freeze protection behaves as a lock, users have to preserve
* ordering of freeze protection and other filesystem locks. It is advised to
- * put sb_start_pagefault() close to mmap_sem in lock ordering. Page fault
+ * put sb_start_pagefault() close to mmap_lock in lock ordering. Page fault
* handling code implies lock dependency:
*
- * mmap_sem
+ * mmap_lock
* -> sb_start_pagefault
*/
static inline void sb_start_pagefault(struct super_block *sb)
{
- __sb_start_write(sb, SB_FREEZE_PAGEFAULT, true);
+ __sb_start_write(sb, SB_FREEZE_PAGEFAULT);
}
-/*
+/**
* sb_start_intwrite - get write access to a superblock for internal fs purposes
* @sb: the super we write to
*
@@ -1557,87 +1613,111 @@ static inline void sb_start_pagefault(struct super_block *sb)
*/
static inline void sb_start_intwrite(struct super_block *sb)
{
- __sb_start_write(sb, SB_FREEZE_FS, true);
+ __sb_start_write(sb, SB_FREEZE_FS);
}
+static inline bool sb_start_intwrite_trylock(struct super_block *sb)
+{
+ return __sb_start_write_trylock(sb, SB_FREEZE_FS);
+}
-extern bool inode_owner_or_capable(const struct inode *inode);
+bool inode_owner_or_capable(struct mnt_idmap *idmap,
+ const struct inode *inode);
/*
* VFS helper functions..
*/
-extern int vfs_create(struct inode *, struct dentry *, umode_t, bool);
-extern int vfs_mkdir(struct inode *, struct dentry *, umode_t);
-extern int vfs_mknod(struct inode *, struct dentry *, umode_t, dev_t);
-extern int vfs_symlink(struct inode *, struct dentry *, const char *);
-extern int vfs_link(struct dentry *, struct inode *, struct dentry *, struct inode **);
-extern int vfs_rmdir(struct inode *, struct dentry *);
-extern int vfs_unlink(struct inode *, struct dentry *, struct inode **);
-extern int vfs_rename(struct inode *, struct dentry *, struct inode *, struct dentry *, struct inode **, unsigned int);
-extern int vfs_whiteout(struct inode *, struct dentry *);
+int vfs_create(struct mnt_idmap *, struct inode *,
+ struct dentry *, umode_t, bool);
+int vfs_mkdir(struct mnt_idmap *, struct inode *,
+ struct dentry *, umode_t);
+int vfs_mknod(struct mnt_idmap *, struct inode *, struct dentry *,
+ umode_t, dev_t);
+int vfs_symlink(struct mnt_idmap *, struct inode *,
+ struct dentry *, const char *);
+int vfs_link(struct dentry *, struct mnt_idmap *, struct inode *,
+ struct dentry *, struct inode **);
+int vfs_rmdir(struct mnt_idmap *, struct inode *, struct dentry *);
+int vfs_unlink(struct mnt_idmap *, struct inode *, struct dentry *,
+ struct inode **);
+
+/**
+ * struct renamedata - contains all information required for renaming
+ * @old_mnt_idmap: idmap of the old mount the inode was found from
+ * @old_dir: parent of source
+ * @old_dentry: source
+ * @new_mnt_idmap: idmap of the new mount the inode was found from
+ * @new_dir: parent of destination
+ * @new_dentry: destination
+ * @delegated_inode: returns an inode needing a delegation break
+ * @flags: rename flags
+ */
+struct renamedata {
+ struct mnt_idmap *old_mnt_idmap;
+ struct inode *old_dir;
+ struct dentry *old_dentry;
+ struct mnt_idmap *new_mnt_idmap;
+ struct inode *new_dir;
+ struct dentry *new_dentry;
+ struct inode **delegated_inode;
+ unsigned int flags;
+} __randomize_layout;
-extern struct dentry *vfs_tmpfile(struct dentry *dentry, umode_t mode,
- int open_flag);
+int vfs_rename(struct renamedata *);
+
+static inline int vfs_whiteout(struct mnt_idmap *idmap,
+ struct inode *dir, struct dentry *dentry)
+{
+ return vfs_mknod(idmap, dir, dentry, S_IFCHR | WHITEOUT_MODE,
+ WHITEOUT_DEV);
+}
+
+struct file *vfs_tmpfile_open(struct mnt_idmap *idmap,
+ const struct path *parentpath,
+ umode_t mode, int open_flag, const struct cred *cred);
+
+int vfs_mkobj(struct dentry *, umode_t,
+ int (*f)(struct dentry *, umode_t, void *),
+ void *);
+
+int vfs_fchown(struct file *file, uid_t user, gid_t group);
+int vfs_fchmod(struct file *file, umode_t mode);
+int vfs_utimes(const struct path *path, struct timespec64 *times);
+
+extern long vfs_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
+
+#ifdef CONFIG_COMPAT
+extern long compat_ptr_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg);
+#else
+#define compat_ptr_ioctl NULL
+#endif
/*
* VFS file helper functions.
*/
-extern void inode_init_owner(struct inode *inode, const struct inode *dir,
- umode_t mode);
+void inode_init_owner(struct mnt_idmap *idmap, struct inode *inode,
+ const struct inode *dir, umode_t mode);
extern bool may_open_dev(const struct path *path);
-/*
- * VFS FS_IOC_FIEMAP helper definitions.
- */
-struct fiemap_extent_info {
- unsigned int fi_flags; /* Flags as passed from user */
- unsigned int fi_extents_mapped; /* Number of mapped extents */
- unsigned int fi_extents_max; /* Size of fiemap_extent array */
- struct fiemap_extent __user *fi_extents_start; /* Start of
- fiemap_extent array */
-};
-int fiemap_fill_next_extent(struct fiemap_extent_info *info, u64 logical,
- u64 phys, u64 len, u32 flags);
-int fiemap_check_flags(struct fiemap_extent_info *fieinfo, u32 fs_flags);
-
-/*
- * File types
- *
- * NOTE! These match bits 12..15 of stat.st_mode
- * (ie "(i_mode >> 12) & 15").
- */
-#define DT_UNKNOWN 0
-#define DT_FIFO 1
-#define DT_CHR 2
-#define DT_DIR 4
-#define DT_BLK 6
-#define DT_REG 8
-#define DT_LNK 10
-#define DT_SOCK 12
-#define DT_WHT 14
+umode_t mode_strip_sgid(struct mnt_idmap *idmap,
+ const struct inode *dir, umode_t mode);
/*
* This is the "filldir" function type, used by readdir() to let
* the kernel specify what kind of dirent layout it wants to have.
* This allows the kernel to read directories into kernel space or
* to have different dirent layouts depending on the binary type.
+ * Return 'true' to keep going and 'false' if there are no more entries.
*/
struct dir_context;
-typedef int (*filldir_t)(struct dir_context *, const char *, int, loff_t, u64,
+typedef bool (*filldir_t)(struct dir_context *, const char *, int, loff_t, u64,
unsigned);
struct dir_context {
- const filldir_t actor;
+ filldir_t actor;
loff_t pos;
};
-struct block_device_operations;
-
-/* These macros are for out of kernel modules to test that
- * the kernel supports the unlocked_ioctl and compat_ioctl
- * fields in struct file_operations. */
-#define HAVE_COMPAT_IOCTL 1
-#define HAVE_UNLOCKED_IOCTL 1
-
/*
* These flags let !MMU mmap() govern direct device mapping vs immediate
* copying more easily for MAP_PRIVATE, especially for ROM filesystems.
@@ -1657,8 +1737,36 @@ struct block_device_operations;
#define NOMMU_VMFLAGS \
(NOMMU_MAP_READ | NOMMU_MAP_WRITE | NOMMU_MAP_EXEC)
+/*
+ * These flags control the behavior of the remap_file_range function pointer.
+ * If it is called with len == 0 that means "remap to end of source file".
+ * See Documentation/filesystems/vfs.rst for more details about this call.
+ *
+ * REMAP_FILE_DEDUP: only remap if contents identical (i.e. deduplicate)
+ * REMAP_FILE_CAN_SHORTEN: caller can handle a shortened request
+ */
+#define REMAP_FILE_DEDUP (1 << 0)
+#define REMAP_FILE_CAN_SHORTEN (1 << 1)
+
+/*
+ * These flags signal that the caller is ok with altering various aspects of
+ * the behavior of the remap operation. The changes must be made by the
+ * implementation; the vfs remap helper functions can take advantage of them.
+ * Flags in this category exist to preserve the quirky behavior of the hoisted
+ * btrfs clone/dedupe ioctls.
+ */
+#define REMAP_FILE_ADVISORY (REMAP_FILE_CAN_SHORTEN)
+
+/*
+ * These flags control the behavior of vfs_copy_file_range().
+ * They are not available to the user via syscall.
+ *
+ * COPY_FILE_SPLICE: call splice direct instead of fs clone/copy ops
+ */
+#define COPY_FILE_SPLICE (1 << 0)
struct iov_iter;
+struct io_uring_cmd;
struct file_operations {
struct module *owner;
@@ -1667,12 +1775,15 @@ struct file_operations {
ssize_t (*write) (struct file *, const char __user *, size_t, loff_t *);
ssize_t (*read_iter) (struct kiocb *, struct iov_iter *);
ssize_t (*write_iter) (struct kiocb *, struct iov_iter *);
+ int (*iopoll)(struct kiocb *kiocb, struct io_comp_batch *,
+ unsigned int flags);
int (*iterate) (struct file *, struct dir_context *);
int (*iterate_shared) (struct file *, struct dir_context *);
- unsigned int (*poll) (struct file *, struct poll_table_struct *);
+ __poll_t (*poll) (struct file *, struct poll_table_struct *);
long (*unlocked_ioctl) (struct file *, unsigned int, unsigned long);
long (*compat_ioctl) (struct file *, unsigned int, unsigned long);
int (*mmap) (struct file *, struct vm_area_struct *);
+ unsigned long mmap_supported_flags;
int (*open) (struct inode *, struct file *);
int (*flush) (struct file *, fl_owner_t id);
int (*release) (struct inode *, struct file *);
@@ -1694,40 +1805,55 @@ struct file_operations {
#endif
ssize_t (*copy_file_range)(struct file *, loff_t, struct file *,
loff_t, size_t, unsigned int);
- int (*clone_file_range)(struct file *, loff_t, struct file *, loff_t,
- u64);
- ssize_t (*dedupe_file_range)(struct file *, u64, u64, struct file *,
- u64);
-};
+ loff_t (*remap_file_range)(struct file *file_in, loff_t pos_in,
+ struct file *file_out, loff_t pos_out,
+ loff_t len, unsigned int remap_flags);
+ int (*fadvise)(struct file *, loff_t, loff_t, int);
+ int (*uring_cmd)(struct io_uring_cmd *ioucmd, unsigned int issue_flags);
+ int (*uring_cmd_iopoll)(struct io_uring_cmd *, struct io_comp_batch *,
+ unsigned int poll_flags);
+} __randomize_layout;
struct inode_operations {
struct dentry * (*lookup) (struct inode *,struct dentry *, unsigned int);
const char * (*get_link) (struct dentry *, struct inode *, struct delayed_call *);
- int (*permission) (struct inode *, int);
- struct posix_acl * (*get_acl)(struct inode *, int);
+ int (*permission) (struct mnt_idmap *, struct inode *, int);
+ struct posix_acl * (*get_inode_acl)(struct inode *, int, bool);
int (*readlink) (struct dentry *, char __user *,int);
- int (*create) (struct inode *,struct dentry *, umode_t, bool);
+ int (*create) (struct mnt_idmap *, struct inode *,struct dentry *,
+ umode_t, bool);
int (*link) (struct dentry *,struct inode *,struct dentry *);
int (*unlink) (struct inode *,struct dentry *);
- int (*symlink) (struct inode *,struct dentry *,const char *);
- int (*mkdir) (struct inode *,struct dentry *,umode_t);
+ int (*symlink) (struct mnt_idmap *, struct inode *,struct dentry *,
+ const char *);
+ int (*mkdir) (struct mnt_idmap *, struct inode *,struct dentry *,
+ umode_t);
int (*rmdir) (struct inode *,struct dentry *);
- int (*mknod) (struct inode *,struct dentry *,umode_t,dev_t);
- int (*rename) (struct inode *, struct dentry *,
+ int (*mknod) (struct mnt_idmap *, struct inode *,struct dentry *,
+ umode_t,dev_t);
+ int (*rename) (struct mnt_idmap *, struct inode *, struct dentry *,
struct inode *, struct dentry *, unsigned int);
- int (*setattr) (struct dentry *, struct iattr *);
- int (*getattr) (const struct path *, struct kstat *, u32, unsigned int);
+ int (*setattr) (struct mnt_idmap *, struct dentry *, struct iattr *);
+ int (*getattr) (struct mnt_idmap *, const struct path *,
+ struct kstat *, u32, unsigned int);
ssize_t (*listxattr) (struct dentry *, char *, size_t);
int (*fiemap)(struct inode *, struct fiemap_extent_info *, u64 start,
u64 len);
- int (*update_time)(struct inode *, struct timespec *, int);
+ int (*update_time)(struct inode *, struct timespec64 *, int);
int (*atomic_open)(struct inode *, struct dentry *,
struct file *, unsigned open_flag,
- umode_t create_mode, int *opened);
- int (*tmpfile) (struct inode *, struct dentry *, umode_t);
- int (*set_acl)(struct inode *, struct posix_acl *, int);
+ umode_t create_mode);
+ int (*tmpfile) (struct mnt_idmap *, struct inode *,
+ struct file *, umode_t);
+ struct posix_acl *(*get_acl)(struct mnt_idmap *, struct dentry *,
+ int);
+ int (*set_acl)(struct mnt_idmap *, struct dentry *,
+ struct posix_acl *, int);
+ int (*fileattr_set)(struct mnt_idmap *idmap,
+ struct dentry *dentry, struct fileattr *fa);
+ int (*fileattr_get)(struct dentry *dentry, struct fileattr *fa);
} ____cacheline_aligned;
static inline ssize_t call_read_iter(struct file *file, struct kiocb *kio,
@@ -1747,35 +1873,37 @@ static inline int call_mmap(struct file *file, struct vm_area_struct *vma)
return file->f_op->mmap(file, vma);
}
-ssize_t rw_copy_check_uvector(int type, const struct iovec __user * uvector,
- unsigned long nr_segs, unsigned long fast_segs,
- struct iovec *fast_pointer,
- struct iovec **ret_pointer);
-
-extern ssize_t __vfs_read(struct file *, char __user *, size_t, loff_t *);
-extern ssize_t __vfs_write(struct file *, const char __user *, size_t, loff_t *);
extern ssize_t vfs_read(struct file *, char __user *, size_t, loff_t *);
extern ssize_t vfs_write(struct file *, const char __user *, size_t, loff_t *);
-extern ssize_t vfs_readv(struct file *, const struct iovec __user *,
- unsigned long, loff_t *, int);
-extern ssize_t vfs_writev(struct file *, const struct iovec __user *,
- unsigned long, loff_t *, int);
extern ssize_t vfs_copy_file_range(struct file *, loff_t , struct file *,
loff_t, size_t, unsigned int);
-extern int vfs_clone_file_prep_inodes(struct inode *inode_in, loff_t pos_in,
- struct inode *inode_out, loff_t pos_out,
- u64 *len, bool is_dedupe);
-extern int vfs_clone_file_range(struct file *file_in, loff_t pos_in,
- struct file *file_out, loff_t pos_out, u64 len);
-extern int vfs_dedupe_file_range_compare(struct inode *src, loff_t srcoff,
- struct inode *dest, loff_t destoff,
- loff_t len, bool *is_same);
+extern ssize_t generic_copy_file_range(struct file *file_in, loff_t pos_in,
+ struct file *file_out, loff_t pos_out,
+ size_t len, unsigned int flags);
+int __generic_remap_file_range_prep(struct file *file_in, loff_t pos_in,
+ struct file *file_out, loff_t pos_out,
+ loff_t *len, unsigned int remap_flags,
+ const struct iomap_ops *dax_read_ops);
+int generic_remap_file_range_prep(struct file *file_in, loff_t pos_in,
+ struct file *file_out, loff_t pos_out,
+ loff_t *count, unsigned int remap_flags);
+extern loff_t do_clone_file_range(struct file *file_in, loff_t pos_in,
+ struct file *file_out, loff_t pos_out,
+ loff_t len, unsigned int remap_flags);
+extern loff_t vfs_clone_file_range(struct file *file_in, loff_t pos_in,
+ struct file *file_out, loff_t pos_out,
+ loff_t len, unsigned int remap_flags);
extern int vfs_dedupe_file_range(struct file *file,
struct file_dedupe_range *same);
+extern loff_t vfs_dedupe_file_range_one(struct file *src_file, loff_t src_pos,
+ struct file *dst_file, loff_t dst_pos,
+ loff_t len, unsigned int remap_flags);
+
struct super_operations {
struct inode *(*alloc_inode)(struct super_block *sb);
void (*destroy_inode)(struct inode *);
+ void (*free_inode)(struct inode *);
void (*dirty_inode) (struct inode *, int flags);
int (*write_inode) (struct inode *, struct writeback_control *wbc);
@@ -1800,7 +1928,6 @@ struct super_operations {
ssize_t (*quota_write)(struct super_block *, int, const char *, size_t, loff_t);
struct dquot **(*get_dquots)(struct inode *);
#endif
- int (*bdev_try_to_free_page)(struct super_block*, struct page*, gfp_t);
long (*nr_cached_objects)(struct super_block *,
struct shrink_control *);
long (*free_cached_objects)(struct super_block *,
@@ -1810,24 +1937,28 @@ struct super_operations {
/*
* Inode flags - they have no relation to superblock flags now
*/
-#define S_SYNC 1 /* Writes are synced at once */
-#define S_NOATIME 2 /* Do not update access times */
-#define S_APPEND 4 /* Append-only file */
-#define S_IMMUTABLE 8 /* Immutable file */
-#define S_DEAD 16 /* removed, but still open directory */
-#define S_NOQUOTA 32 /* Inode is not counted to quota */
-#define S_DIRSYNC 64 /* Directory modifications are synchronous */
-#define S_NOCMTIME 128 /* Do not update file c/mtime */
-#define S_SWAPFILE 256 /* Do not truncate: swapon got its bmaps */
-#define S_PRIVATE 512 /* Inode is fs-internal */
-#define S_IMA 1024 /* Inode has an associated IMA struct */
-#define S_AUTOMOUNT 2048 /* Automount/referral quasi-directory */
-#define S_NOSEC 4096 /* no suid or xattr security attributes */
+#define S_SYNC (1 << 0) /* Writes are synced at once */
+#define S_NOATIME (1 << 1) /* Do not update access times */
+#define S_APPEND (1 << 2) /* Append-only file */
+#define S_IMMUTABLE (1 << 3) /* Immutable file */
+#define S_DEAD (1 << 4) /* removed, but still open directory */
+#define S_NOQUOTA (1 << 5) /* Inode is not counted to quota */
+#define S_DIRSYNC (1 << 6) /* Directory modifications are synchronous */
+#define S_NOCMTIME (1 << 7) /* Do not update file c/mtime */
+#define S_SWAPFILE (1 << 8) /* Do not truncate: swapon got its bmaps */
+#define S_PRIVATE (1 << 9) /* Inode is fs-internal */
+#define S_IMA (1 << 10) /* Inode has an associated IMA struct */
+#define S_AUTOMOUNT (1 << 11) /* Automount/referral quasi-directory */
+#define S_NOSEC (1 << 12) /* no suid or xattr security attributes */
#ifdef CONFIG_FS_DAX
-#define S_DAX 8192 /* Direct Access, avoiding the page cache */
+#define S_DAX (1 << 13) /* Direct Access, avoiding the page cache */
#else
-#define S_DAX 0 /* Make all the DAX code disappear */
+#define S_DAX 0 /* Make all the DAX code disappear */
#endif
+#define S_ENCRYPTED (1 << 14) /* Encrypted file (using fs/crypto/) */
+#define S_CASEFOLD (1 << 15) /* Casefolded file */
+#define S_VERITY (1 << 16) /* Verity file (using fs/verity/) */
+#define S_KERNEL_FILE (1 << 17) /* File is in use by the kernel (eg. fs/cachefiles) */
/*
* Note that nosuid etc flags are inode-specific: setting some file-system
@@ -1835,7 +1966,7 @@ struct super_operations {
* possible to override it selectively if you really wanted to with some
* ioctl() that is not currently implemented.
*
- * Exception: MS_RDONLY is always applied to the entire file system.
+ * Exception: SB_RDONLY is always applied to the entire file system.
*
* Unfortunately, it is possible to change a filesystems flags with it mounted
* with files in use. This means that all of the inodes will not have their
@@ -1844,19 +1975,20 @@ struct super_operations {
*/
#define __IS_FLG(inode, flg) ((inode)->i_sb->s_flags & (flg))
-#define IS_RDONLY(inode) ((inode)->i_sb->s_flags & MS_RDONLY)
-#define IS_SYNC(inode) (__IS_FLG(inode, MS_SYNCHRONOUS) || \
+static inline bool sb_rdonly(const struct super_block *sb) { return sb->s_flags & SB_RDONLY; }
+#define IS_RDONLY(inode) sb_rdonly((inode)->i_sb)
+#define IS_SYNC(inode) (__IS_FLG(inode, SB_SYNCHRONOUS) || \
((inode)->i_flags & S_SYNC))
-#define IS_DIRSYNC(inode) (__IS_FLG(inode, MS_SYNCHRONOUS|MS_DIRSYNC) || \
+#define IS_DIRSYNC(inode) (__IS_FLG(inode, SB_SYNCHRONOUS|SB_DIRSYNC) || \
((inode)->i_flags & (S_SYNC|S_DIRSYNC)))
-#define IS_MANDLOCK(inode) __IS_FLG(inode, MS_MANDLOCK)
-#define IS_NOATIME(inode) __IS_FLG(inode, MS_RDONLY|MS_NOATIME)
-#define IS_I_VERSION(inode) __IS_FLG(inode, MS_I_VERSION)
+#define IS_MANDLOCK(inode) __IS_FLG(inode, SB_MANDLOCK)
+#define IS_NOATIME(inode) __IS_FLG(inode, SB_RDONLY|SB_NOATIME)
+#define IS_I_VERSION(inode) __IS_FLG(inode, SB_I_VERSION)
#define IS_NOQUOTA(inode) ((inode)->i_flags & S_NOQUOTA)
#define IS_APPEND(inode) ((inode)->i_flags & S_APPEND)
#define IS_IMMUTABLE(inode) ((inode)->i_flags & S_IMMUTABLE)
-#define IS_POSIXACL(inode) __IS_FLG(inode, MS_POSIXACL)
+#define IS_POSIXACL(inode) __IS_FLG(inode, SB_POSIXACL)
#define IS_DEADDIR(inode) ((inode)->i_flags & S_DEAD)
#define IS_NOCMTIME(inode) ((inode)->i_flags & S_NOCMTIME)
@@ -1866,39 +1998,45 @@ struct super_operations {
#define IS_AUTOMOUNT(inode) ((inode)->i_flags & S_AUTOMOUNT)
#define IS_NOSEC(inode) ((inode)->i_flags & S_NOSEC)
#define IS_DAX(inode) ((inode)->i_flags & S_DAX)
+#define IS_ENCRYPTED(inode) ((inode)->i_flags & S_ENCRYPTED)
+#define IS_CASEFOLDED(inode) ((inode)->i_flags & S_CASEFOLD)
+#define IS_VERITY(inode) ((inode)->i_flags & S_VERITY)
#define IS_WHITEOUT(inode) (S_ISCHR(inode->i_mode) && \
(inode)->i_rdev == WHITEOUT_DEV)
-static inline bool HAS_UNMAPPED_ID(struct inode *inode)
+static inline bool HAS_UNMAPPED_ID(struct mnt_idmap *idmap,
+ struct inode *inode)
{
- return !uid_valid(inode->i_uid) || !gid_valid(inode->i_gid);
+ return !vfsuid_valid(i_uid_into_vfsuid(idmap, inode)) ||
+ !vfsgid_valid(i_gid_into_vfsgid(idmap, inode));
}
-static inline enum rw_hint file_write_hint(struct file *file)
+static inline void init_sync_kiocb(struct kiocb *kiocb, struct file *filp)
{
- if (file->f_write_hint != WRITE_LIFE_NOT_SET)
- return file->f_write_hint;
-
- return file_inode(file)->i_write_hint;
+ *kiocb = (struct kiocb) {
+ .ki_filp = filp,
+ .ki_flags = filp->f_iocb_flags,
+ .ki_ioprio = get_current_ioprio(),
+ };
}
-static inline int iocb_flags(struct file *file);
-
-static inline void init_sync_kiocb(struct kiocb *kiocb, struct file *filp)
+static inline void kiocb_clone(struct kiocb *kiocb, struct kiocb *kiocb_src,
+ struct file *filp)
{
*kiocb = (struct kiocb) {
.ki_filp = filp,
- .ki_flags = iocb_flags(filp),
- .ki_hint = file_write_hint(filp),
+ .ki_flags = kiocb_src->ki_flags,
+ .ki_ioprio = kiocb_src->ki_ioprio,
+ .ki_pos = kiocb_src->ki_pos,
};
}
/*
* Inode state bits. Protected by inode->i_lock
*
- * Three bits determine the dirty state of the inode, I_DIRTY_SYNC,
- * I_DIRTY_DATASYNC and I_DIRTY_PAGES.
+ * Four bits determine the dirty state of the inode: I_DIRTY_SYNC,
+ * I_DIRTY_DATASYNC, I_DIRTY_PAGES, and I_DIRTY_TIME.
*
* Four bits define the lifetime of an inode. Initially, inodes are I_NEW,
* until that flag is cleared. I_WILL_FREE, I_FREEING and I_CLEAR are set at
@@ -1907,12 +2045,21 @@ static inline void init_sync_kiocb(struct kiocb *kiocb, struct file *filp)
* Two bits are used for locking and completion notification, I_NEW and I_SYNC.
*
* I_DIRTY_SYNC Inode is dirty, but doesn't have to be written on
- * fdatasync(). i_atime is the usual cause.
- * I_DIRTY_DATASYNC Data-related inode changes pending. We keep track of
+ * fdatasync() (unless I_DIRTY_DATASYNC is also set).
+ * Timestamp updates are the usual cause.
+ * I_DIRTY_DATASYNC Data-related inode changes pending. We keep track of
* these changes separately from I_DIRTY_SYNC so that we
* don't have to write inode on fdatasync() when only
- * mtime has changed in it.
+ * e.g. the timestamps have changed.
* I_DIRTY_PAGES Inode has dirty pages. Inode itself may be clean.
+ * I_DIRTY_TIME The inode itself has dirty timestamps, and the
+ * lazytime mount option is enabled. We keep track of this
+ * separately from I_DIRTY_SYNC in order to implement
+ * lazytime. This gets cleared if I_DIRTY_INODE
+ * (I_DIRTY_SYNC and/or I_DIRTY_DATASYNC) gets set. But
+ * I_DIRTY_TIME can still be set if I_DIRTY_SYNC is already
+ * in place because writeback might already be in progress
+ * and we don't want to lose the time update
* I_NEW Serves as both a mutex and completion notification.
* New inodes set I_NEW. If two processes both create
* the same inode, one of them will release its inode and
@@ -1947,12 +2094,22 @@ static inline void init_sync_kiocb(struct kiocb *kiocb, struct file *filp)
*
* I_WB_SWITCH Cgroup bdi_writeback switching in progress. Used to
* synchronize competing switching instances and to tell
- * wb stat updates to grab mapping->tree_lock. See
- * inode_switch_wb_work_fn() for details.
+ * wb stat updates to grab the i_pages lock. See
+ * inode_switch_wbs_work_fn() for details.
*
* I_OVL_INUSE Used by overlayfs to get exclusive ownership on upper
* and work dirs among overlayfs mounts.
*
+ * I_CREATING New object's inode in the middle of setting up.
+ *
+ * I_DONTCACHE Evict inode as soon as it is not used anymore.
+ *
+ * I_SYNC_QUEUED Inode is queued in b_io or b_more_io writeback lists.
+ * Used to detect that mark_inode_dirty() should not move
+ * inode between dirty lists.
+ *
+ * I_PINNING_FSCACHE_WB Inode is pinning an fscache object for writeback.
+ *
* Q: What is the difference between I_WILL_FREE and I_FREEING?
*/
#define I_DIRTY_SYNC (1 << 0)
@@ -1970,12 +2127,15 @@ static inline void init_sync_kiocb(struct kiocb *kiocb, struct file *filp)
#define I_DIO_WAKEUP (1 << __I_DIO_WAKEUP)
#define I_LINKABLE (1 << 10)
#define I_DIRTY_TIME (1 << 11)
-#define __I_DIRTY_TIME_EXPIRED 12
-#define I_DIRTY_TIME_EXPIRED (1 << __I_DIRTY_TIME_EXPIRED)
#define I_WB_SWITCH (1 << 13)
-#define I_OVL_INUSE (1 << 14)
-
-#define I_DIRTY (I_DIRTY_SYNC | I_DIRTY_DATASYNC | I_DIRTY_PAGES)
+#define I_OVL_INUSE (1 << 14)
+#define I_CREATING (1 << 15)
+#define I_DONTCACHE (1 << 16)
+#define I_SYNC_QUEUED (1 << 17)
+#define I_PINNING_FSCACHE_WB (1 << 18)
+
+#define I_DIRTY_INODE (I_DIRTY_SYNC | I_DIRTY_DATASYNC)
+#define I_DIRTY (I_DIRTY_INODE | I_DIRTY_PAGES)
#define I_DIRTY_ALL (I_DIRTY | I_DIRTY_TIME)
extern void __mark_inode_dirty(struct inode *, int);
@@ -1989,6 +2149,21 @@ static inline void mark_inode_dirty_sync(struct inode *inode)
__mark_inode_dirty(inode, I_DIRTY_SYNC);
}
+/*
+ * Returns true if the given inode itself only has dirty timestamps (its pages
+ * may still be dirty) and isn't currently being allocated or freed.
+ * Filesystems should call this if when writing an inode when lazytime is
+ * enabled, they want to opportunistically write the timestamps of other inodes
+ * located very nearby on-disk, e.g. in the same inode block. This returns true
+ * if the given inode is in need of such an opportunistic update. Requires
+ * i_lock, or at least later re-checking under i_lock.
+ */
+static inline bool inode_is_dirtytime_only(struct inode *inode)
+{
+ return (inode->i_state & (I_DIRTY_TIME | I_NEW |
+ I_FREEING | I_WILL_FREE)) == I_DIRTY_TIME;
+}
+
extern void inc_nlink(struct inode *inode);
extern void drop_nlink(struct inode *inode);
extern void clear_nlink(struct inode *inode);
@@ -2006,21 +2181,6 @@ static inline void inode_dec_link_count(struct inode *inode)
mark_inode_dirty(inode);
}
-/**
- * inode_inc_iversion - increments i_version
- * @inode: inode that need to be updated
- *
- * Every time the inode is modified, the i_version field will be incremented.
- * The filesystem has to be mounted with i_version flag
- */
-
-static inline void inode_inc_iversion(struct inode *inode)
-{
- spin_lock(&inode->i_lock);
- inode->i_version++;
- spin_unlock(&inode->i_lock);
-}
-
enum file_time_flags {
S_ATIME = 1,
S_MTIME = 2,
@@ -2028,14 +2188,19 @@ enum file_time_flags {
S_VERSION = 8,
};
+extern bool atime_needs_update(const struct path *, struct inode *);
extern void touch_atime(const struct path *);
+int inode_update_time(struct inode *inode, struct timespec64 *time, int flags);
+
static inline void file_accessed(struct file *file)
{
if (!(file->f_flags & O_NOATIME))
touch_atime(&file->f_path);
}
-int sync_inode(struct inode *inode, struct writeback_control *wbc);
+extern int file_modified(struct file *file);
+int kiocb_modified(struct kiocb *iocb);
+
int sync_inode_metadata(struct inode *inode, int wait);
struct file_system_type {
@@ -2045,7 +2210,11 @@ struct file_system_type {
#define FS_BINARY_MOUNTDATA 2
#define FS_HAS_SUBTYPE 4
#define FS_USERNS_MOUNT 8 /* Can be mounted by userns root */
+#define FS_DISALLOW_NOTIFY_PERM 16 /* Disable fanotify permission events */
+#define FS_ALLOW_IDMAP 32 /* FS has been updated to handle vfs idmappings. */
#define FS_RENAME_DOES_D_MOVE 32768 /* FS will handle d_move() during rename() internally. */
+ int (*init_fs_context)(struct fs_context *);
+ const struct fs_parameter_spec *parameters;
struct dentry *(*mount) (struct file_system_type *, int,
const char *, void *);
void (*kill_sb) (struct super_block *);
@@ -2060,14 +2229,12 @@ struct file_system_type {
struct lock_class_key i_lock_key;
struct lock_class_key i_mutex_key;
+ struct lock_class_key invalidate_lock_key;
struct lock_class_key i_mutex_dir_key;
};
#define MODULE_ALIAS_FS(NAME) MODULE_ALIAS("fs-" NAME)
-extern struct dentry *mount_ns(struct file_system_type *fs_type,
- int flags, void *data, void *ns, struct user_namespace *user_ns,
- int (*fill_super)(struct super_block *, void *, int));
extern struct dentry *mount_bdev(struct file_system_type *fs_type,
int flags, const char *dev_name, void *data,
int (*fill_super)(struct super_block *, void *, int));
@@ -2078,6 +2245,7 @@ extern struct dentry *mount_nodev(struct file_system_type *fs_type,
int flags, void *data,
int (*fill_super)(struct super_block *, void *, int));
extern struct dentry *mount_subtree(struct vfsmount *mnt, const char *path);
+void retire_super(struct super_block *sb);
void generic_shutdown_super(struct super_block *sb);
void kill_block_super(struct super_block *sb);
void kill_anon_super(struct super_block *sb);
@@ -2085,30 +2253,16 @@ void kill_litter_super(struct super_block *sb);
void deactivate_super(struct super_block *sb);
void deactivate_locked_super(struct super_block *sb);
int set_anon_super(struct super_block *s, void *data);
+int set_anon_super_fc(struct super_block *s, struct fs_context *fc);
int get_anon_bdev(dev_t *);
void free_anon_bdev(dev_t);
-struct super_block *sget_userns(struct file_system_type *type,
- int (*test)(struct super_block *,void *),
- int (*set)(struct super_block *,void *),
- int flags, struct user_namespace *user_ns,
- void *data);
+struct super_block *sget_fc(struct fs_context *fc,
+ int (*test)(struct super_block *, struct fs_context *),
+ int (*set)(struct super_block *, struct fs_context *));
struct super_block *sget(struct file_system_type *type,
int (*test)(struct super_block *,void *),
int (*set)(struct super_block *,void *),
int flags, void *data);
-extern struct dentry *mount_pseudo_xattr(struct file_system_type *, char *,
- const struct super_operations *ops,
- const struct xattr_handler **xattr,
- const struct dentry_operations *dops,
- unsigned long);
-
-static inline struct dentry *
-mount_pseudo(struct file_system_type *fs_type, char *name,
- const struct super_operations *ops,
- const struct dentry_operations *dops, unsigned long magic)
-{
- return mount_pseudo_xattr(fs_type, name, ops, NULL, dops, magic);
-}
/* Alas, no aliases. Too much hassle with bringing module.h everywhere */
#define fops_get(fops) \
@@ -2129,24 +2283,11 @@ mount_pseudo(struct file_system_type *fs_type, char *name,
extern int register_filesystem(struct file_system_type *);
extern int unregister_filesystem(struct file_system_type *);
-extern struct vfsmount *kern_mount_data(struct file_system_type *, void *data);
-#define kern_mount(type) kern_mount_data(type, NULL)
-extern void kern_unmount(struct vfsmount *mnt);
-extern int may_umount_tree(struct vfsmount *);
-extern int may_umount(struct vfsmount *);
-extern long do_mount(const char *, const char __user *,
- const char *, unsigned long, void *);
-extern struct vfsmount *collect_mounts(const struct path *);
-extern void drop_collected_mounts(struct vfsmount *);
-extern int iterate_mounts(int (*)(struct vfsmount *, void *), void *,
- struct vfsmount *);
extern int vfs_statfs(const struct path *, struct kstatfs *);
extern int user_statfs(const char __user *, struct kstatfs *);
extern int fd_statfs(int, struct kstatfs *);
-extern int vfs_ustat(dev_t, struct kstatfs *);
extern int freeze_super(struct super_block *super);
extern int thaw_super(struct super_block *super);
-extern bool our_mnt(struct vfsmount *mnt);
extern __printf(2, 3)
int super_setup_bdi_name(struct super_block *sb, char *fmt, ...);
extern int super_setup_bdi(struct super_block *sb);
@@ -2155,228 +2296,88 @@ extern int current_umask(void);
extern void ihold(struct inode * inode);
extern void iput(struct inode *);
-extern int generic_update_time(struct inode *, struct timespec *, int);
+extern int generic_update_time(struct inode *, struct timespec64 *, int);
/* /sys/fs */
extern struct kobject *fs_kobj;
#define MAX_RW_COUNT (INT_MAX & PAGE_MASK)
-#ifdef CONFIG_MANDATORY_FILE_LOCKING
-extern int locks_mandatory_locked(struct file *);
-extern int locks_mandatory_area(struct inode *, struct file *, loff_t, loff_t, unsigned char);
-
-/*
- * Candidates for mandatory locking have the setgid bit set
- * but no group execute bit - an otherwise meaningless combination.
- */
-
-static inline int __mandatory_lock(struct inode *ino)
-{
- return (ino->i_mode & (S_ISGID | S_IXGRP)) == S_ISGID;
-}
-
-/*
- * ... and these candidates should be on MS_MANDLOCK mounted fs,
- * otherwise these will be advisory locks
- */
-
-static inline int mandatory_lock(struct inode *ino)
-{
- return IS_MANDLOCK(ino) && __mandatory_lock(ino);
-}
-
-static inline int locks_verify_locked(struct file *file)
-{
- if (mandatory_lock(locks_inode(file)))
- return locks_mandatory_locked(file);
- return 0;
-}
-
-static inline int locks_verify_truncate(struct inode *inode,
- struct file *f,
- loff_t size)
-{
- if (!inode->i_flctx || !mandatory_lock(inode))
- return 0;
-
- if (size < inode->i_size) {
- return locks_mandatory_area(inode, f, size, inode->i_size - 1,
- F_WRLCK);
- } else {
- return locks_mandatory_area(inode, f, inode->i_size, size - 1,
- F_WRLCK);
- }
-}
-
-#else /* !CONFIG_MANDATORY_FILE_LOCKING */
-
-static inline int locks_mandatory_locked(struct file *file)
-{
- return 0;
-}
-
-static inline int locks_mandatory_area(struct inode *inode, struct file *filp,
- loff_t start, loff_t end, unsigned char type)
-{
- return 0;
-}
-
-static inline int __mandatory_lock(struct inode *inode)
-{
- return 0;
-}
-
-static inline int mandatory_lock(struct inode *inode)
-{
- return 0;
-}
-
-static inline int locks_verify_locked(struct file *file)
-{
- return 0;
-}
-
-static inline int locks_verify_truncate(struct inode *inode, struct file *filp,
- size_t size)
-{
- return 0;
-}
-
-#endif /* CONFIG_MANDATORY_FILE_LOCKING */
-
-
-#ifdef CONFIG_FILE_LOCKING
-static inline int break_lease(struct inode *inode, unsigned int mode)
-{
- /*
- * Since this check is lockless, we must ensure that any refcounts
- * taken are done before checking i_flctx->flc_lease. Otherwise, we
- * could end up racing with tasks trying to set a new lease on this
- * file.
- */
- smp_mb();
- if (inode->i_flctx && !list_empty_careful(&inode->i_flctx->flc_lease))
- return __break_lease(inode, mode, FL_LEASE);
- return 0;
-}
-
-static inline int break_deleg(struct inode *inode, unsigned int mode)
-{
- /*
- * Since this check is lockless, we must ensure that any refcounts
- * taken are done before checking i_flctx->flc_lease. Otherwise, we
- * could end up racing with tasks trying to set a new lease on this
- * file.
- */
- smp_mb();
- if (inode->i_flctx && !list_empty_careful(&inode->i_flctx->flc_lease))
- return __break_lease(inode, mode, FL_DELEG);
- return 0;
-}
-
-static inline int try_break_deleg(struct inode *inode, struct inode **delegated_inode)
-{
- int ret;
-
- ret = break_deleg(inode, O_WRONLY|O_NONBLOCK);
- if (ret == -EWOULDBLOCK && delegated_inode) {
- *delegated_inode = inode;
- ihold(inode);
- }
- return ret;
-}
-
-static inline int break_deleg_wait(struct inode **delegated_inode)
-{
- int ret;
-
- ret = break_deleg(*delegated_inode, O_WRONLY);
- iput(*delegated_inode);
- *delegated_inode = NULL;
- return ret;
-}
-
-static inline int break_layout(struct inode *inode, bool wait)
-{
- smp_mb();
- if (inode->i_flctx && !list_empty_careful(&inode->i_flctx->flc_lease))
- return __break_lease(inode,
- wait ? O_WRONLY : O_WRONLY | O_NONBLOCK,
- FL_LAYOUT);
- return 0;
-}
-
-#else /* !CONFIG_FILE_LOCKING */
-static inline int break_lease(struct inode *inode, unsigned int mode)
-{
- return 0;
-}
-
-static inline int break_deleg(struct inode *inode, unsigned int mode)
-{
- return 0;
-}
-
-static inline int try_break_deleg(struct inode *inode, struct inode **delegated_inode)
-{
- return 0;
-}
-
-static inline int break_deleg_wait(struct inode **delegated_inode)
-{
- BUG();
- return 0;
-}
-
-static inline int break_layout(struct inode *inode, bool wait)
-{
- return 0;
-}
-
-#endif /* CONFIG_FILE_LOCKING */
-
/* fs/open.c */
struct audit_names;
struct filename {
const char *name; /* pointer to actual string */
const __user char *uptr; /* original userland pointer */
- struct audit_names *aname;
int refcnt;
+ struct audit_names *aname;
const char iname[];
};
+static_assert(offsetof(struct filename, iname) % sizeof(long) == 0);
+
+static inline struct mnt_idmap *file_mnt_idmap(struct file *file)
+{
+ return mnt_idmap(file->f_path.mnt);
+}
+
+/**
+ * is_idmapped_mnt - check whether a mount is mapped
+ * @mnt: the mount to check
+ *
+ * If @mnt has an non @nop_mnt_idmap attached to it then @mnt is mapped.
+ *
+ * Return: true if mount is mapped, false if not.
+ */
+static inline bool is_idmapped_mnt(const struct vfsmount *mnt)
+{
+ return mnt_idmap(mnt) != &nop_mnt_idmap;
+}
extern long vfs_truncate(const struct path *, loff_t);
-extern int do_truncate(struct dentry *, loff_t start, unsigned int time_attrs,
- struct file *filp);
+int do_truncate(struct mnt_idmap *, struct dentry *, loff_t start,
+ unsigned int time_attrs, struct file *filp);
extern int vfs_fallocate(struct file *file, int mode, loff_t offset,
loff_t len);
extern long do_sys_open(int dfd, const char __user *filename, int flags,
umode_t mode);
extern struct file *file_open_name(struct filename *, int, umode_t);
extern struct file *filp_open(const char *, int, umode_t);
-extern struct file *file_open_root(struct dentry *, struct vfsmount *,
+extern struct file *file_open_root(const struct path *,
const char *, int, umode_t);
+static inline struct file *file_open_root_mnt(struct vfsmount *mnt,
+ const char *name, int flags, umode_t mode)
+{
+ return file_open_root(&(struct path){.mnt = mnt, .dentry = mnt->mnt_root},
+ name, flags, mode);
+}
extern struct file * dentry_open(const struct path *, int, const struct cred *);
+extern struct file *dentry_create(const struct path *path, int flags,
+ umode_t mode, const struct cred *cred);
+extern struct file * open_with_fake_path(const struct path *, int,
+ struct inode*, const struct cred *);
+static inline struct file *file_clone_open(struct file *file)
+{
+ return dentry_open(&file->f_path, file->f_flags, file->f_cred);
+}
extern int filp_close(struct file *, fl_owner_t id);
extern struct filename *getname_flags(const char __user *, int, int *);
+extern struct filename *getname_uflags(const char __user *, int);
extern struct filename *getname(const char __user *);
extern struct filename *getname_kernel(const char *);
extern void putname(struct filename *name);
-enum {
- FILE_CREATED = 1,
- FILE_OPENED = 2
-};
extern int finish_open(struct file *file, struct dentry *dentry,
- int (*open)(struct inode *, struct file *),
- int *opened);
+ int (*open)(struct inode *, struct file *));
extern int finish_no_open(struct file *file, struct dentry *dentry);
-/* fs/ioctl.c */
+/* Helper for the simple case when original dentry is used */
+static inline int finish_open_simple(struct file *file, int error)
+{
+ if (error)
+ return error;
-extern int ioctl_preallocate(struct file *filp, void __user *argp);
+ return finish_open(file, file->f_path.dentry, NULL);
+}
/* fs/dcache.c */
extern void __init vfs_caches_init_early(void);
@@ -2387,92 +2388,25 @@ extern struct kmem_cache *names_cachep;
#define __getname() kmem_cache_alloc(names_cachep, GFP_KERNEL)
#define __putname(name) kmem_cache_free(names_cachep, (void *)(name))
-#ifdef CONFIG_BLOCK
-extern int register_blkdev(unsigned int, const char *);
-extern void unregister_blkdev(unsigned int, const char *);
-extern void bdev_unhash_inode(dev_t dev);
-extern struct block_device *bdget(dev_t);
-extern struct block_device *bdgrab(struct block_device *bdev);
-extern void bd_set_size(struct block_device *, loff_t size);
-extern void bd_forget(struct inode *inode);
-extern void bdput(struct block_device *);
-extern void invalidate_bdev(struct block_device *);
-extern void iterate_bdevs(void (*)(struct block_device *, void *), void *);
-extern int sync_blockdev(struct block_device *bdev);
-extern void kill_bdev(struct block_device *);
-extern struct super_block *freeze_bdev(struct block_device *);
-extern void emergency_thaw_all(void);
-extern int thaw_bdev(struct block_device *bdev, struct super_block *sb);
-extern int fsync_bdev(struct block_device *);
-
extern struct super_block *blockdev_superblock;
-
static inline bool sb_is_blkdev_sb(struct super_block *sb)
{
- return sb == blockdev_superblock;
-}
-#else
-static inline void bd_forget(struct inode *inode) {}
-static inline int sync_blockdev(struct block_device *bdev) { return 0; }
-static inline void kill_bdev(struct block_device *bdev) {}
-static inline void invalidate_bdev(struct block_device *bdev) {}
-
-static inline struct super_block *freeze_bdev(struct block_device *sb)
-{
- return NULL;
-}
-
-static inline int thaw_bdev(struct block_device *bdev, struct super_block *sb)
-{
- return 0;
-}
-
-static inline void iterate_bdevs(void (*f)(struct block_device *, void *), void *arg)
-{
+ return IS_ENABLED(CONFIG_BLOCK) && sb == blockdev_superblock;
}
-static inline bool sb_is_blkdev_sb(struct super_block *sb)
-{
- return false;
-}
-#endif
+void emergency_thaw_all(void);
extern int sync_filesystem(struct super_block *);
extern const struct file_operations def_blk_fops;
extern const struct file_operations def_chr_fops;
-#ifdef CONFIG_BLOCK
-extern int ioctl_by_bdev(struct block_device *, unsigned, unsigned long);
-extern int blkdev_ioctl(struct block_device *, fmode_t, unsigned, unsigned long);
-extern long compat_blkdev_ioctl(struct file *, unsigned, unsigned long);
-extern int blkdev_get(struct block_device *bdev, fmode_t mode, void *holder);
-extern struct block_device *blkdev_get_by_path(const char *path, fmode_t mode,
- void *holder);
-extern struct block_device *blkdev_get_by_dev(dev_t dev, fmode_t mode,
- void *holder);
-extern void blkdev_put(struct block_device *bdev, fmode_t mode);
-extern int __blkdev_reread_part(struct block_device *bdev);
-extern int blkdev_reread_part(struct block_device *bdev);
-
-#ifdef CONFIG_SYSFS
-extern int bd_link_disk_holder(struct block_device *bdev, struct gendisk *disk);
-extern void bd_unlink_disk_holder(struct block_device *bdev,
- struct gendisk *disk);
-#else
-static inline int bd_link_disk_holder(struct block_device *bdev,
- struct gendisk *disk)
-{
- return 0;
-}
-static inline void bd_unlink_disk_holder(struct block_device *bdev,
- struct gendisk *disk)
-{
-}
-#endif
-#endif
/* fs/char_dev.c */
-#define CHRDEV_MAJOR_HASH_SIZE 255
+#define CHRDEV_MAJOR_MAX 512
/* Marks the bottom of the first segment of free char majors */
#define CHRDEV_MAJOR_DYN_END 234
+/* Marks the top and bottom of the second segment of free char majors */
+#define CHRDEV_MAJOR_DYN_EXT_START 511
+#define CHRDEV_MAJOR_DYN_EXT_END 384
+
extern int alloc_chrdev_region(dev_t *, unsigned, unsigned, const char *);
extern int register_chrdev_region(dev_t, unsigned, const char *);
extern int __register_chrdev(unsigned int major, unsigned int baseminor,
@@ -2494,125 +2428,36 @@ static inline void unregister_chrdev(unsigned int major, const char *name)
__unregister_chrdev(major, 0, 256, name);
}
-/* fs/block_dev.c */
-#define BDEVNAME_SIZE 32 /* Largest string for a blockdev identifier */
-#define BDEVT_SIZE 10 /* Largest string for MAJ:MIN for blkdev */
-
-#ifdef CONFIG_BLOCK
-#define BLKDEV_MAJOR_HASH_SIZE 255
-extern const char *__bdevname(dev_t, char *buffer);
-extern const char *bdevname(struct block_device *bdev, char *buffer);
-extern struct block_device *lookup_bdev(const char *);
-extern void blkdev_show(struct seq_file *,off_t);
-
-#else
-#define BLKDEV_MAJOR_HASH_SIZE 0
-#endif
-
extern void init_special_inode(struct inode *, umode_t, dev_t);
/* Invalid inode operations -- fs/bad_inode.c */
extern void make_bad_inode(struct inode *);
extern bool is_bad_inode(struct inode *);
-#ifdef CONFIG_BLOCK
-extern void check_disk_size_change(struct gendisk *disk,
- struct block_device *bdev);
-extern int revalidate_disk(struct gendisk *);
-extern int check_disk_change(struct block_device *);
-extern int __invalidate_device(struct block_device *, bool);
-extern int invalidate_partition(struct gendisk *, int);
-#endif
-unsigned long invalidate_mapping_pages(struct address_space *mapping,
- pgoff_t start, pgoff_t end);
-
-static inline void invalidate_remote_inode(struct inode *inode)
-{
- if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
- S_ISLNK(inode->i_mode))
- invalidate_mapping_pages(inode->i_mapping, 0, -1);
-}
-extern int invalidate_inode_pages2(struct address_space *mapping);
-extern int invalidate_inode_pages2_range(struct address_space *mapping,
- pgoff_t start, pgoff_t end);
-extern int write_inode_now(struct inode *, int);
-extern int filemap_fdatawrite(struct address_space *);
-extern int filemap_flush(struct address_space *);
-extern int filemap_fdatawait(struct address_space *);
-extern int filemap_fdatawait_keep_errors(struct address_space *mapping);
-extern int filemap_fdatawait_range(struct address_space *, loff_t lstart,
- loff_t lend);
-extern bool filemap_range_has_page(struct address_space *, loff_t lstart,
- loff_t lend);
-extern int filemap_write_and_wait(struct address_space *mapping);
-extern int filemap_write_and_wait_range(struct address_space *mapping,
- loff_t lstart, loff_t lend);
-extern int __filemap_fdatawrite_range(struct address_space *mapping,
- loff_t start, loff_t end, int sync_mode);
-extern int filemap_fdatawrite_range(struct address_space *mapping,
- loff_t start, loff_t end);
-extern int filemap_check_errors(struct address_space *mapping);
-
-extern void __filemap_set_wb_err(struct address_space *mapping, int err);
+extern int __must_check file_fdatawait_range(struct file *file, loff_t lstart,
+ loff_t lend);
extern int __must_check file_check_and_advance_wb_err(struct file *file);
extern int __must_check file_write_and_wait_range(struct file *file,
loff_t start, loff_t end);
-/**
- * filemap_set_wb_err - set a writeback error on an address_space
- * @mapping: mapping in which to set writeback error
- * @err: error to be set in mapping
- *
- * When writeback fails in some way, we must record that error so that
- * userspace can be informed when fsync and the like are called. We endeavor
- * to report errors on any file that was open at the time of the error. Some
- * internal callers also need to know when writeback errors have occurred.
- *
- * When a writeback error occurs, most filesystems will want to call
- * filemap_set_wb_err to record the error in the mapping so that it will be
- * automatically reported whenever fsync is called on the file.
- *
- * FIXME: mention FS_* flag here?
- */
-static inline void filemap_set_wb_err(struct address_space *mapping, int err)
-{
- /* Fastpath for common case of no error */
- if (unlikely(err))
- __filemap_set_wb_err(mapping, err);
-}
-
-/**
- * filemap_check_wb_error - has an error occurred since the mark was sampled?
- * @mapping: mapping to check for writeback errors
- * @since: previously-sampled errseq_t
- *
- * Grab the errseq_t value from the mapping, and see if it has changed "since"
- * the given value was sampled.
- *
- * If it has then report the latest error set, otherwise return 0.
- */
-static inline int filemap_check_wb_err(struct address_space *mapping,
- errseq_t since)
+static inline int file_write_and_wait(struct file *file)
{
- return errseq_check(&mapping->wb_err, since);
-}
-
-/**
- * filemap_sample_wb_err - sample the current errseq_t to test for later errors
- * @mapping: mapping to be sampled
- *
- * Writeback errors are always reported relative to a particular sample point
- * in the past. This function provides those sample points.
- */
-static inline errseq_t filemap_sample_wb_err(struct address_space *mapping)
-{
- return errseq_sample(&mapping->wb_err);
+ return file_write_and_wait_range(file, 0, LLONG_MAX);
}
extern int vfs_fsync_range(struct file *file, loff_t start, loff_t end,
int datasync);
extern int vfs_fsync(struct file *file, int datasync);
+extern int sync_file_range(struct file *file, loff_t offset, loff_t nbytes,
+ unsigned int flags);
+
+static inline bool iocb_is_dsync(const struct kiocb *iocb)
+{
+ return (iocb->ki_flags & IOCB_DSYNC) ||
+ IS_SYNC(iocb->ki_filp->f_mapping->host);
+}
+
/*
* Sync the bytes written if this was a synchronous write. Expect ki_pos
* to already be updated for the write, and will return either the amount
@@ -2620,7 +2465,7 @@ extern int vfs_fsync(struct file *file, int datasync);
*/
static inline ssize_t generic_write_sync(struct kiocb *iocb, ssize_t count)
{
- if (iocb->ki_flags & IOCB_DSYNC) {
+ if (iocb_is_dsync(iocb)) {
int ret = vfs_fsync_range(iocb->ki_filp,
iocb->ki_pos - count, iocb->ki_pos - 1,
(iocb->ki_flags & IOCB_SYNC) ? 0 : 1);
@@ -2633,32 +2478,55 @@ static inline ssize_t generic_write_sync(struct kiocb *iocb, ssize_t count)
extern void emergency_sync(void);
extern void emergency_remount(void);
+
#ifdef CONFIG_BLOCK
-extern sector_t bmap(struct inode *, sector_t);
+extern int bmap(struct inode *inode, sector_t *block);
+#else
+static inline int bmap(struct inode *inode, sector_t *block)
+{
+ return -EINVAL;
+}
#endif
-extern int notify_change(struct dentry *, struct iattr *, struct inode **);
-extern int inode_permission(struct inode *, int);
-extern int __inode_permission(struct inode *, int);
-extern int generic_permission(struct inode *, int);
-extern int __check_sticky(struct inode *dir, struct inode *inode);
+
+int notify_change(struct mnt_idmap *, struct dentry *,
+ struct iattr *, struct inode **);
+int inode_permission(struct mnt_idmap *, struct inode *, int);
+int generic_permission(struct mnt_idmap *, struct inode *, int);
+static inline int file_permission(struct file *file, int mask)
+{
+ return inode_permission(file_mnt_idmap(file),
+ file_inode(file), mask);
+}
+static inline int path_permission(const struct path *path, int mask)
+{
+ return inode_permission(mnt_idmap(path->mnt),
+ d_inode(path->dentry), mask);
+}
+int __check_sticky(struct mnt_idmap *idmap, struct inode *dir,
+ struct inode *inode);
static inline bool execute_ok(struct inode *inode)
{
return (inode->i_mode & S_IXUGO) || S_ISDIR(inode->i_mode);
}
+static inline bool inode_wrong_type(const struct inode *inode, umode_t mode)
+{
+ return (inode->i_mode ^ mode) & S_IFMT;
+}
+
static inline void file_start_write(struct file *file)
{
if (!S_ISREG(file_inode(file)->i_mode))
return;
- __sb_start_write(file_inode(file)->i_sb, SB_FREEZE_WRITE, true);
+ sb_start_write(file_inode(file)->i_sb);
}
static inline bool file_start_write_trylock(struct file *file)
{
if (!S_ISREG(file_inode(file)->i_mode))
return true;
- return __sb_start_write(file_inode(file)->i_sb, SB_FREEZE_WRITE, false);
+ return sb_start_write_trylock(file_inode(file)->i_sb);
}
static inline void file_end_write(struct file *file)
@@ -2668,29 +2536,21 @@ static inline void file_end_write(struct file *file)
__sb_end_write(file_inode(file)->i_sb, SB_FREEZE_WRITE);
}
-static inline int do_clone_file_range(struct file *file_in, loff_t pos_in,
- struct file *file_out, loff_t pos_out,
- u64 len)
-{
- int ret;
-
- file_start_write(file_out);
- ret = vfs_clone_file_range(file_in, pos_in, file_out, pos_out, len);
- file_end_write(file_out);
-
- return ret;
-}
-
/*
+ * This is used for regular files where some users -- especially the
+ * currently executed binary in a process, previously handled via
+ * VM_DENYWRITE -- cannot handle concurrent write (and maybe mmap
+ * read-write shared) accesses.
+ *
* get_write_access() gets write permission for a file.
* put_write_access() releases this write permission.
- * This is used for regular files.
- * We cannot support write (and maybe mmap read-write shared) accesses and
- * MAP_DENYWRITE mmappings simultaneously. The i_writecount field of an inode
- * can have the following values:
- * 0: no writers, no VM_DENYWRITE mappings
- * < 0: (-i_writecount) vm_area_structs with VM_DENYWRITE set exist
- * > 0: (i_writecount) users are writing to the file.
+ * deny_write_access() denies write access to a file.
+ * allow_write_access() re-enables write access to a file.
+ *
+ * The i_writecount field of an inode can have the following values:
+ * 0: no write access, no denied write access
+ * < 0: (-i_writecount) users that denied write access to the file.
+ * > 0: (i_writecount) users that have write access to the file.
*
* Normally we operate on that counter with atomic_{inc,dec} and it's safe
* except for the cases where we don't hold i_writecount yet. Then we need to
@@ -2720,7 +2580,7 @@ static inline bool inode_is_open_for_write(const struct inode *inode)
return atomic_read(&inode->i_writecount) > 0;
}
-#ifdef CONFIG_IMA
+#if defined(CONFIG_IMA) || defined(CONFIG_FILE_LOCKING)
static inline void i_readcount_dec(struct inode *inode)
{
BUG_ON(!atomic_read(&inode->i_readcount));
@@ -2742,44 +2602,10 @@ static inline void i_readcount_inc(struct inode *inode)
#endif
extern int do_pipe_flags(int *, int);
-#define __kernel_read_file_id(id) \
- id(UNKNOWN, unknown) \
- id(FIRMWARE, firmware) \
- id(FIRMWARE_PREALLOC_BUFFER, firmware) \
- id(MODULE, kernel-module) \
- id(KEXEC_IMAGE, kexec-image) \
- id(KEXEC_INITRAMFS, kexec-initramfs) \
- id(POLICY, security-policy) \
- id(MAX_ID, )
-
-#define __fid_enumify(ENUM, dummy) READING_ ## ENUM,
-#define __fid_stringify(dummy, str) #str,
-
-enum kernel_read_file_id {
- __kernel_read_file_id(__fid_enumify)
-};
-
-static const char * const kernel_read_file_str[] = {
- __kernel_read_file_id(__fid_stringify)
-};
-
-static inline const char *kernel_read_file_id_str(enum kernel_read_file_id id)
-{
- if ((unsigned)id >= READING_MAX_ID)
- return kernel_read_file_str[READING_UNKNOWN];
-
- return kernel_read_file_str[id];
-}
-
-extern int kernel_read(struct file *, loff_t, char *, unsigned long);
-extern int kernel_read_file(struct file *, void **, loff_t *, loff_t,
- enum kernel_read_file_id);
-extern int kernel_read_file_from_path(char *, void **, loff_t *, loff_t,
- enum kernel_read_file_id);
-extern int kernel_read_file_from_fd(int, void **, loff_t *, loff_t,
- enum kernel_read_file_id);
-extern ssize_t kernel_write(struct file *, const char *, size_t, loff_t);
-extern ssize_t __kernel_write(struct file *, const char *, size_t, loff_t *);
+extern ssize_t kernel_read(struct file *, void *, size_t, loff_t *);
+ssize_t __kernel_read(struct file *file, void *buf, size_t count, loff_t *pos);
+extern ssize_t kernel_write(struct file *, const void *, size_t, loff_t *);
+extern ssize_t __kernel_write(struct file *, const void *, size_t, loff_t *);
extern struct file * open_exec(const char *);
/* fs/dcache.c -- generic fs support functions */
@@ -2806,6 +2632,7 @@ static inline int generic_drop_inode(struct inode *inode)
{
return !inode->i_nlink || inode_unhashed(inode);
}
+extern void d_mark_dontcache(struct inode *inode);
extern struct inode *ilookup5_nowait(struct super_block *sb,
unsigned long hashval, int (*test)(struct inode *, void *),
@@ -2814,6 +2641,10 @@ extern struct inode *ilookup5(struct super_block *sb, unsigned long hashval,
int (*test)(struct inode *, void *), void *data);
extern struct inode *ilookup(struct super_block *sb, unsigned long ino);
+extern struct inode *inode_insert5(struct inode *inode, unsigned long hashval,
+ int (*test)(struct inode *, void *),
+ int (*set)(struct inode *, void *),
+ void *data);
extern struct inode * iget5_locked(struct super_block *, unsigned long, int (*test)(struct inode *, void *), int (*set)(struct inode *, void *), void *);
extern struct inode * iget_locked(struct super_block *, unsigned long);
extern struct inode *find_inode_nowait(struct super_block *,
@@ -2821,6 +2652,9 @@ extern struct inode *find_inode_nowait(struct super_block *,
int (*match)(struct inode *,
unsigned long, void *),
void *data);
+extern struct inode *find_inode_rcu(struct super_block *, unsigned long,
+ int (*)(struct inode *, void *), void *);
+extern struct inode *find_inode_by_ino_rcu(struct super_block *, unsigned long);
extern int insert_inode_locked4(struct inode *, unsigned long, int (*test)(struct inode *, void *), void *);
extern int insert_inode_locked(struct inode *);
#ifdef CONFIG_DEBUG_LOCK_ALLOC
@@ -2829,7 +2663,25 @@ extern void lockdep_annotate_inode_mutex_key(struct inode *inode);
static inline void lockdep_annotate_inode_mutex_key(struct inode *inode) { };
#endif
extern void unlock_new_inode(struct inode *);
+extern void discard_new_inode(struct inode *);
extern unsigned int get_next_ino(void);
+extern void evict_inodes(struct super_block *sb);
+void dump_mapping(const struct address_space *);
+
+/*
+ * Userspace may rely on the the inode number being non-zero. For example, glibc
+ * simply ignores files with zero i_ino in unlink() and other places.
+ *
+ * As an additional complication, if userspace was compiled with
+ * _FILE_OFFSET_BITS=32 on a 64-bit kernel we'll only end up reading out the
+ * lower 32 bits, so we need to check that those aren't zero explicitly. With
+ * _FILE_OFFSET_BITS=64, this may cause some harmless false-negatives, but
+ * better safe than sorry.
+ */
+static inline bool is_zero_ino(ino_t ino)
+{
+ return (u32)ino == 0;
+}
extern void __iget(struct inode * inode);
extern void iget_failed(struct inode *);
@@ -2838,8 +2690,20 @@ extern void __destroy_inode(struct inode *);
extern struct inode *new_inode_pseudo(struct super_block *sb);
extern struct inode *new_inode(struct super_block *sb);
extern void free_inode_nonrcu(struct inode *inode);
-extern int should_remove_suid(struct dentry *);
+extern int setattr_should_drop_suidgid(struct mnt_idmap *, struct inode *);
extern int file_remove_privs(struct file *);
+int setattr_should_drop_sgid(struct mnt_idmap *idmap,
+ const struct inode *inode);
+
+/*
+ * This must be used for allocating filesystems specific inodes to set
+ * up the inode reclaim context correctly.
+ */
+static inline void *
+alloc_inode_sb(struct super_block *sb, struct kmem_cache *cache, gfp_t gfp)
+{
+ return kmem_cache_alloc_lru(cache, &sb->s_inode_lru, gfp);
+}
extern void __insert_inode_hash(struct inode *, unsigned long hashval);
static inline void insert_inode_hash(struct inode *inode)
@@ -2855,36 +2719,42 @@ static inline void remove_inode_hash(struct inode *inode)
}
extern void inode_sb_list_add(struct inode *inode);
+extern void inode_add_lru(struct inode *inode);
-#ifdef CONFIG_BLOCK
-extern int bdev_read_only(struct block_device *);
-#endif
-extern int set_blocksize(struct block_device *, int);
extern int sb_set_blocksize(struct super_block *, int);
extern int sb_min_blocksize(struct super_block *, int);
extern int generic_file_mmap(struct file *, struct vm_area_struct *);
extern int generic_file_readonly_mmap(struct file *, struct vm_area_struct *);
extern ssize_t generic_write_checks(struct kiocb *, struct iov_iter *);
+int generic_write_checks_count(struct kiocb *iocb, loff_t *count);
+extern int generic_write_check_limits(struct file *file, loff_t pos,
+ loff_t *count);
+extern int generic_file_rw_checks(struct file *file_in, struct file *file_out);
+ssize_t filemap_read(struct kiocb *iocb, struct iov_iter *to,
+ ssize_t already_read);
extern ssize_t generic_file_read_iter(struct kiocb *, struct iov_iter *);
extern ssize_t __generic_file_write_iter(struct kiocb *, struct iov_iter *);
extern ssize_t generic_file_write_iter(struct kiocb *, struct iov_iter *);
extern ssize_t generic_file_direct_write(struct kiocb *, struct iov_iter *);
-extern ssize_t generic_perform_write(struct file *, struct iov_iter *, loff_t);
+ssize_t generic_perform_write(struct kiocb *, struct iov_iter *);
ssize_t vfs_iter_read(struct file *file, struct iov_iter *iter, loff_t *ppos,
- int flags);
+ rwf_t flags);
ssize_t vfs_iter_write(struct file *file, struct iov_iter *iter, loff_t *ppos,
- int flags);
-
-/* fs/block_dev.c */
-extern ssize_t blkdev_read_iter(struct kiocb *iocb, struct iov_iter *to);
-extern ssize_t blkdev_write_iter(struct kiocb *iocb, struct iov_iter *from);
-extern int blkdev_fsync(struct file *filp, loff_t start, loff_t end,
- int datasync);
-extern void block_sync_page(struct page *page);
+ rwf_t flags);
+ssize_t vfs_iocb_iter_read(struct file *file, struct kiocb *iocb,
+ struct iov_iter *iter);
+ssize_t vfs_iocb_iter_write(struct file *file, struct kiocb *iocb,
+ struct iov_iter *iter);
/* fs/splice.c */
+ssize_t filemap_splice_read(struct file *in, loff_t *ppos,
+ struct pipe_inode_info *pipe,
+ size_t len, unsigned int flags);
+ssize_t direct_splice_read(struct file *in, loff_t *ppos,
+ struct pipe_inode_info *pipe,
+ size_t len, unsigned int flags);
extern ssize_t generic_file_splice_read(struct file *, loff_t *,
struct pipe_inode_info *, size_t, unsigned int);
extern ssize_t iter_file_splice_write(struct pipe_inode_info *,
@@ -2898,7 +2768,7 @@ extern long do_splice_direct(struct file *in, loff_t *ppos, struct file *out,
extern void
file_ra_state_init(struct file_ra_state *ra, struct address_space *mapping);
extern loff_t noop_llseek(struct file *file, loff_t offset, int whence);
-extern loff_t no_llseek(struct file *file, loff_t offset, int whence);
+#define no_llseek NULL
extern loff_t vfs_setpos(struct file *file, loff_t offset, loff_t maxsize);
extern loff_t generic_file_llseek(struct file *file, loff_t offset, int whence);
extern loff_t generic_file_llseek_size(struct file *file, loff_t offset,
@@ -2907,8 +2777,10 @@ extern loff_t fixed_size_llseek(struct file *file, loff_t offset,
int whence, loff_t size);
extern loff_t no_seek_end_llseek_size(struct file *, loff_t, int, loff_t);
extern loff_t no_seek_end_llseek(struct file *, loff_t, int);
+int rw_verify_area(int, struct file *, const loff_t *, size_t);
extern int generic_file_open(struct inode * inode, struct file * filp);
extern int nonseekable_open(struct inode * inode, struct file * filp);
+extern int stream_open(struct inode * inode, struct file * filp);
#ifdef CONFIG_BLOCK
typedef void (dio_submit_t)(struct bio *bio, struct inode *inode,
@@ -2920,20 +2792,12 @@ enum {
/* filesystem does not support filling holes */
DIO_SKIP_HOLES = 0x02,
-
- /* filesystem can handle aio writes beyond i_size */
- DIO_ASYNC_EXTEND = 0x04,
-
- /* inode/fs/bdev does not need truncate protection */
- DIO_SKIP_DIO_COUNT = 0x08,
};
-void dio_end_io(struct bio *bio);
-
ssize_t __blockdev_direct_IO(struct kiocb *iocb, struct inode *inode,
struct block_device *bdev, struct iov_iter *iter,
get_block_t get_block,
- dio_iodone_t end_io, dio_submit_t submit_io,
+ dio_iodone_t end_io,
int flags);
static inline ssize_t blockdev_direct_IO(struct kiocb *iocb,
@@ -2942,13 +2806,13 @@ static inline ssize_t blockdev_direct_IO(struct kiocb *iocb,
get_block_t get_block)
{
return __blockdev_direct_IO(iocb, inode, inode->i_sb->s_bdev, iter,
- get_block, NULL, NULL, DIO_LOCKING | DIO_SKIP_HOLES);
+ get_block, NULL, DIO_LOCKING | DIO_SKIP_HOLES);
}
#endif
void inode_dio_wait(struct inode *inode);
-/*
+/**
* inode_dio_begin - signal start of a direct I/O requests
* @inode: inode the direct I/O happens on
*
@@ -2960,7 +2824,7 @@ static inline void inode_dio_begin(struct inode *inode)
atomic_inc(&inode->i_dio_count);
}
-/*
+/**
* inode_dio_end - signal finish of a direct I/O requests
* @inode: inode the direct I/O happens on
*
@@ -2973,6 +2837,11 @@ static inline void inode_dio_end(struct inode *inode)
wake_up_bit(&inode->i_state, __I_DIO_WAKEUP);
}
+/*
+ * Warn about a page cache invalidation failure diring a direct I/O write.
+ */
+void dio_warn_stale_pagecache(struct file *filp);
+
extern void inode_set_flags(struct inode *inode, unsigned int flags,
unsigned int mask);
@@ -2985,18 +2854,21 @@ extern int page_readlink(struct dentry *, char __user *, int);
extern const char *page_get_link(struct dentry *, struct inode *,
struct delayed_call *);
extern void page_put_link(void *);
-extern int __page_symlink(struct inode *inode, const char *symname, int len,
- int nofs);
extern int page_symlink(struct inode *inode, const char *symname, int len);
extern const struct inode_operations page_symlink_inode_operations;
extern void kfree_link(void *);
-extern void generic_fillattr(struct inode *, struct kstat *);
+void generic_fillattr(struct mnt_idmap *, struct inode *, struct kstat *);
+void generic_fill_statx_attr(struct inode *inode, struct kstat *stat);
extern int vfs_getattr_nosec(const struct path *, struct kstat *, u32, unsigned int);
extern int vfs_getattr(const struct path *, struct kstat *, u32, unsigned int);
void __inode_add_bytes(struct inode *inode, loff_t bytes);
void inode_add_bytes(struct inode *inode, loff_t bytes);
void __inode_sub_bytes(struct inode *inode, loff_t bytes);
void inode_sub_bytes(struct inode *inode, loff_t bytes);
+static inline loff_t __inode_get_bytes(struct inode *inode)
+{
+ return (((loff_t)inode->i_blocks) << 9) + inode->i_bytes;
+}
loff_t inode_get_bytes(struct inode *inode);
void inode_set_bytes(struct inode *inode, loff_t bytes);
const char *simple_get_link(struct dentry *, struct inode *,
@@ -3005,48 +2877,26 @@ extern const struct inode_operations simple_symlink_inode_operations;
extern int iterate_dir(struct file *, struct dir_context *);
-extern int vfs_statx(int, const char __user *, int, struct kstat *, u32);
-extern int vfs_statx_fd(unsigned int, struct kstat *, u32, unsigned int);
+int vfs_fstatat(int dfd, const char __user *filename, struct kstat *stat,
+ int flags);
+int vfs_fstat(int fd, struct kstat *stat);
static inline int vfs_stat(const char __user *filename, struct kstat *stat)
{
- return vfs_statx(AT_FDCWD, filename, AT_NO_AUTOMOUNT,
- stat, STATX_BASIC_STATS);
+ return vfs_fstatat(AT_FDCWD, filename, stat, 0);
}
static inline int vfs_lstat(const char __user *name, struct kstat *stat)
{
- return vfs_statx(AT_FDCWD, name, AT_SYMLINK_NOFOLLOW | AT_NO_AUTOMOUNT,
- stat, STATX_BASIC_STATS);
-}
-static inline int vfs_fstatat(int dfd, const char __user *filename,
- struct kstat *stat, int flags)
-{
- return vfs_statx(dfd, filename, flags | AT_NO_AUTOMOUNT,
- stat, STATX_BASIC_STATS);
+ return vfs_fstatat(AT_FDCWD, name, stat, AT_SYMLINK_NOFOLLOW);
}
-static inline int vfs_fstat(int fd, struct kstat *stat)
-{
- return vfs_statx_fd(fd, stat, STATX_BASIC_STATS, 0);
-}
-
extern const char *vfs_get_link(struct dentry *, struct delayed_call *);
extern int vfs_readlink(struct dentry *, char __user *, int);
-extern int __generic_block_fiemap(struct inode *inode,
- struct fiemap_extent_info *fieinfo,
- loff_t start, loff_t len,
- get_block_t *get_block);
-extern int generic_block_fiemap(struct inode *inode,
- struct fiemap_extent_info *fieinfo, u64 start,
- u64 len, get_block_t *get_block);
-
extern struct file_system_type *get_filesystem(struct file_system_type *fs);
extern void put_filesystem(struct file_system_type *fs);
extern struct file_system_type *get_fs_type(const char *name);
extern struct super_block *get_super(struct block_device *);
-extern struct super_block *get_super_thawed(struct block_device *);
-extern struct super_block *get_super_exclusive_thawed(struct block_device *bdev);
extern struct super_block *get_active_super(struct block_device *bdev);
extern void drop_super(struct super_block *sb);
extern void drop_super_exclusive(struct super_block *sb);
@@ -3058,24 +2908,29 @@ extern int dcache_dir_open(struct inode *, struct file *);
extern int dcache_dir_close(struct inode *, struct file *);
extern loff_t dcache_dir_lseek(struct file *, loff_t, int);
extern int dcache_readdir(struct file *, struct dir_context *);
-extern int simple_setattr(struct dentry *, struct iattr *);
-extern int simple_getattr(const struct path *, struct kstat *, u32, unsigned int);
+extern int simple_setattr(struct mnt_idmap *, struct dentry *,
+ struct iattr *);
+extern int simple_getattr(struct mnt_idmap *, const struct path *,
+ struct kstat *, u32, unsigned int);
extern int simple_statfs(struct dentry *, struct kstatfs *);
extern int simple_open(struct inode *inode, struct file *file);
extern int simple_link(struct dentry *, struct inode *, struct dentry *);
extern int simple_unlink(struct inode *, struct dentry *);
extern int simple_rmdir(struct inode *, struct dentry *);
-extern int simple_rename(struct inode *, struct dentry *,
- struct inode *, struct dentry *, unsigned int);
+extern int simple_rename_exchange(struct inode *old_dir, struct dentry *old_dentry,
+ struct inode *new_dir, struct dentry *new_dentry);
+extern int simple_rename(struct mnt_idmap *, struct inode *,
+ struct dentry *, struct inode *, struct dentry *,
+ unsigned int);
+extern void simple_recursive_removal(struct dentry *,
+ void (*callback)(struct dentry *));
extern int noop_fsync(struct file *, loff_t, loff_t, int);
+extern ssize_t noop_direct_IO(struct kiocb *iocb, struct iov_iter *iter);
extern int simple_empty(struct dentry *);
-extern int simple_readpage(struct file *file, struct page *page);
extern int simple_write_begin(struct file *file, struct address_space *mapping,
- loff_t pos, unsigned len, unsigned flags,
+ loff_t pos, unsigned len,
struct page **pagep, void **fsdata);
-extern int simple_write_end(struct file *file, struct address_space *mapping,
- loff_t pos, unsigned len, unsigned copied,
- struct page *page, void *fsdata);
+extern const struct address_space_operations ram_aops;
extern int always_delete_dentry(const struct dentry *);
extern struct inode *alloc_anon_inode(struct super_block *);
extern int simple_nosetlease(struct file *, long, struct file_lock **, void **);
@@ -3104,28 +2959,34 @@ extern int generic_file_fsync(struct file *, loff_t, loff_t, int);
extern int generic_check_addressable(unsigned, u64);
-#ifdef CONFIG_MIGRATION
-extern int buffer_migrate_page(struct address_space *,
- struct page *, struct page *,
- enum migrate_mode);
-#else
-#define buffer_migrate_page NULL
-#endif
+extern void generic_set_encrypted_ci_d_ops(struct dentry *dentry);
-extern int setattr_prepare(struct dentry *, struct iattr *);
+int may_setattr(struct mnt_idmap *idmap, struct inode *inode,
+ unsigned int ia_valid);
+int setattr_prepare(struct mnt_idmap *, struct dentry *, struct iattr *);
extern int inode_newsize_ok(const struct inode *, loff_t offset);
-extern void setattr_copy(struct inode *inode, const struct iattr *attr);
+void setattr_copy(struct mnt_idmap *, struct inode *inode,
+ const struct iattr *attr);
extern int file_update_time(struct file *file);
-static inline bool io_is_direct(struct file *filp)
+static inline bool vma_is_dax(const struct vm_area_struct *vma)
{
- return (filp->f_flags & O_DIRECT) || IS_DAX(filp->f_mapping->host);
+ return vma->vm_file && IS_DAX(vma->vm_file->f_mapping->host);
}
-static inline bool vma_is_dax(struct vm_area_struct *vma)
+static inline bool vma_is_fsdax(struct vm_area_struct *vma)
{
- return vma->vm_file && IS_DAX(vma->vm_file->f_mapping->host);
+ struct inode *inode;
+
+ if (!IS_ENABLED(CONFIG_FS_DAX) || !vma->vm_file)
+ return false;
+ if (!vma_is_dax(vma))
+ return false;
+ inode = file_inode(vma->vm_file);
+ if (S_ISCHR(inode->i_mode))
+ return false; /* device-dax */
+ return true;
}
static inline int iocb_flags(struct file *file)
@@ -3133,31 +2994,37 @@ static inline int iocb_flags(struct file *file)
int res = 0;
if (file->f_flags & O_APPEND)
res |= IOCB_APPEND;
- if (io_is_direct(file))
+ if (file->f_flags & O_DIRECT)
res |= IOCB_DIRECT;
- if ((file->f_flags & O_DSYNC) || IS_SYNC(file->f_mapping->host))
+ if (file->f_flags & O_DSYNC)
res |= IOCB_DSYNC;
if (file->f_flags & __O_SYNC)
res |= IOCB_SYNC;
return res;
}
-static inline int kiocb_set_rw_flags(struct kiocb *ki, int flags)
+static inline int kiocb_set_rw_flags(struct kiocb *ki, rwf_t flags)
{
+ int kiocb_flags = 0;
+
+ /* make sure there's no overlap between RWF and private IOCB flags */
+ BUILD_BUG_ON((__force int) RWF_SUPPORTED & IOCB_EVENTFD);
+
+ if (!flags)
+ return 0;
if (unlikely(flags & ~RWF_SUPPORTED))
return -EOPNOTSUPP;
if (flags & RWF_NOWAIT) {
- if (!(ki->ki_filp->f_mode & FMODE_AIO_NOWAIT))
+ if (!(ki->ki_filp->f_mode & FMODE_NOWAIT))
return -EOPNOTSUPP;
- ki->ki_flags |= IOCB_NOWAIT;
+ kiocb_flags |= IOCB_NOIO;
}
- if (flags & RWF_HIPRI)
- ki->ki_flags |= IOCB_HIPRI;
- if (flags & RWF_DSYNC)
- ki->ki_flags |= IOCB_DSYNC;
+ kiocb_flags |= (__force int) (flags & RWF_SUPPORTED);
if (flags & RWF_SYNC)
- ki->ki_flags |= (IOCB_DSYNC | IOCB_SYNC);
+ kiocb_flags |= IOCB_DSYNC;
+
+ ki->ki_flags |= kiocb_flags;
return 0;
}
@@ -3183,7 +3050,7 @@ static inline ino_t parent_ino(struct dentry *dentry)
*/
struct simple_transaction_argresp {
ssize_t size;
- char data[0];
+ char data[];
};
#define SIMPLE_TRANSACTION_LIMIT (PAGE_SIZE - sizeof(struct simple_transaction_argresp))
@@ -3212,7 +3079,7 @@ void simple_transaction_set(struct file *file, size_t n);
* All attributes contain a text representation of a numeric value
* that are accessed with the get() and set() functions.
*/
-#define DEFINE_SIMPLE_ATTRIBUTE(__fops, __get, __set, __fmt) \
+#define DEFINE_SIMPLE_ATTRIBUTE_XSIGNED(__fops, __get, __set, __fmt, __is_signed) \
static int __fops ## _open(struct inode *inode, struct file *file) \
{ \
__simple_attr_check_format(__fmt, 0ull); \
@@ -3223,10 +3090,16 @@ static const struct file_operations __fops = { \
.open = __fops ## _open, \
.release = simple_attr_release, \
.read = simple_attr_read, \
- .write = simple_attr_write, \
+ .write = (__is_signed) ? simple_attr_write_signed : simple_attr_write, \
.llseek = generic_file_llseek, \
}
+#define DEFINE_SIMPLE_ATTRIBUTE(__fops, __get, __set, __fmt) \
+ DEFINE_SIMPLE_ATTRIBUTE_XSIGNED(__fops, __get, __set, __fmt, false)
+
+#define DEFINE_SIMPLE_ATTRIBUTE_SIGNED(__fops, __get, __set, __fmt) \
+ DEFINE_SIMPLE_ATTRIBUTE_XSIGNED(__fops, __get, __set, __fmt, true)
+
static inline __printf(1, 2)
void __simple_attr_check_format(const char *fmt, ...)
{
@@ -3241,15 +3114,11 @@ ssize_t simple_attr_read(struct file *file, char __user *buf,
size_t len, loff_t *ppos);
ssize_t simple_attr_write(struct file *file, const char __user *buf,
size_t len, loff_t *ppos);
+ssize_t simple_attr_write_signed(struct file *file, const char __user *buf,
+ size_t len, loff_t *ppos);
struct ctl_table;
-int proc_nr_files(struct ctl_table *table, int write,
- void __user *buffer, size_t *lenp, loff_t *ppos);
-int proc_nr_dentry(struct ctl_table *table, int write,
- void __user *buffer, size_t *lenp, loff_t *ppos);
-int proc_nr_inodes(struct ctl_table *table, int write,
- void __user *buffer, size_t *lenp, loff_t *ppos);
-int __init get_filesystem_list(char *buf);
+int __init list_bdev_fs_names(char *buf, size_t size);
#define __FMODE_EXEC ((__force int) FMODE_EXEC)
#define __FMODE_NONOTIFY ((__force int) FMODE_NONOTIFY)
@@ -3260,20 +3129,21 @@ int __init get_filesystem_list(char *buf);
static inline bool is_sxid(umode_t mode)
{
- return (mode & S_ISUID) || ((mode & S_ISGID) && (mode & S_IXGRP));
+ return mode & (S_ISUID | S_ISGID);
}
-static inline int check_sticky(struct inode *dir, struct inode *inode)
+static inline int check_sticky(struct mnt_idmap *idmap,
+ struct inode *dir, struct inode *inode)
{
if (!(dir->i_mode & S_ISVTX))
return 0;
- return __check_sticky(dir, inode);
+ return __check_sticky(idmap, dir, inode);
}
static inline void inode_has_no_xattr(struct inode *inode)
{
- if (!is_sxid(inode->i_mode) && (inode->i_sb->s_flags & MS_NOSEC))
+ if (!is_sxid(inode->i_mode) && (inode->i_sb->s_flags & SB_NOSEC))
inode->i_flags |= S_NOSEC;
}
@@ -3286,17 +3156,17 @@ static inline bool dir_emit(struct dir_context *ctx,
const char *name, int namelen,
u64 ino, unsigned type)
{
- return ctx->actor(ctx, name, namelen, ctx->pos, ino, type) == 0;
+ return ctx->actor(ctx, name, namelen, ctx->pos, ino, type);
}
static inline bool dir_emit_dot(struct file *file, struct dir_context *ctx)
{
return ctx->actor(ctx, ".", 1, ctx->pos,
- file->f_path.dentry->d_inode->i_ino, DT_DIR) == 0;
+ file->f_path.dentry->d_inode->i_ino, DT_DIR);
}
static inline bool dir_emit_dotdot(struct file *file, struct dir_context *ctx)
{
return ctx->actor(ctx, "..", 2, ctx->pos,
- parent_ino(file->f_path.dentry), DT_DIR) == 0;
+ parent_ino(file->f_path.dentry), DT_DIR);
}
static inline bool dir_emit_dots(struct file *file, struct dir_context *ctx)
{
@@ -3329,4 +3199,10 @@ static inline bool dir_relax_shared(struct inode *inode)
extern bool path_noexec(const struct path *path);
extern void inode_nohighmem(struct inode *inode);
+/* mm/fadvise.c */
+extern int vfs_fadvise(struct file *file, loff_t offset, loff_t len,
+ int advice);
+extern int generic_fadvise(struct file *file, loff_t offset, loff_t len,
+ int advice);
+
#endif /* _LINUX_FS_H */
diff --git a/include/linux/fs_api.h b/include/linux/fs_api.h
new file mode 100644
index 000000000000..83be38d6d413
--- /dev/null
+++ b/include/linux/fs_api.h
@@ -0,0 +1 @@
+#include <linux/fs.h>
diff --git a/include/linux/fs_context.h b/include/linux/fs_context.h
new file mode 100644
index 000000000000..ff6341e09925
--- /dev/null
+++ b/include/linux/fs_context.h
@@ -0,0 +1,235 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/* Filesystem superblock creation and reconfiguration context.
+ *
+ * Copyright (C) 2018 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ */
+
+#ifndef _LINUX_FS_CONTEXT_H
+#define _LINUX_FS_CONTEXT_H
+
+#include <linux/kernel.h>
+#include <linux/refcount.h>
+#include <linux/errno.h>
+#include <linux/security.h>
+#include <linux/mutex.h>
+
+struct cred;
+struct dentry;
+struct file_operations;
+struct file_system_type;
+struct mnt_namespace;
+struct net;
+struct pid_namespace;
+struct super_block;
+struct user_namespace;
+struct vfsmount;
+struct path;
+
+enum fs_context_purpose {
+ FS_CONTEXT_FOR_MOUNT, /* New superblock for explicit mount */
+ FS_CONTEXT_FOR_SUBMOUNT, /* New superblock for automatic submount */
+ FS_CONTEXT_FOR_RECONFIGURE, /* Superblock reconfiguration (remount) */
+};
+
+/*
+ * Userspace usage phase for fsopen/fspick.
+ */
+enum fs_context_phase {
+ FS_CONTEXT_CREATE_PARAMS, /* Loading params for sb creation */
+ FS_CONTEXT_CREATING, /* A superblock is being created */
+ FS_CONTEXT_AWAITING_MOUNT, /* Superblock created, awaiting fsmount() */
+ FS_CONTEXT_AWAITING_RECONF, /* Awaiting initialisation for reconfiguration */
+ FS_CONTEXT_RECONF_PARAMS, /* Loading params for reconfiguration */
+ FS_CONTEXT_RECONFIGURING, /* Reconfiguring the superblock */
+ FS_CONTEXT_FAILED, /* Failed to correctly transition a context */
+};
+
+/*
+ * Type of parameter value.
+ */
+enum fs_value_type {
+ fs_value_is_undefined,
+ fs_value_is_flag, /* Value not given a value */
+ fs_value_is_string, /* Value is a string */
+ fs_value_is_blob, /* Value is a binary blob */
+ fs_value_is_filename, /* Value is a filename* + dirfd */
+ fs_value_is_file, /* Value is a file* */
+};
+
+/*
+ * Configuration parameter.
+ */
+struct fs_parameter {
+ const char *key; /* Parameter name */
+ enum fs_value_type type:8; /* The type of value here */
+ union {
+ char *string;
+ void *blob;
+ struct filename *name;
+ struct file *file;
+ };
+ size_t size;
+ int dirfd;
+};
+
+struct p_log {
+ const char *prefix;
+ struct fc_log *log;
+};
+
+/*
+ * Filesystem context for holding the parameters used in the creation or
+ * reconfiguration of a superblock.
+ *
+ * Superblock creation fills in ->root whereas reconfiguration begins with this
+ * already set.
+ *
+ * See Documentation/filesystems/mount_api.rst
+ */
+struct fs_context {
+ const struct fs_context_operations *ops;
+ struct mutex uapi_mutex; /* Userspace access mutex */
+ struct file_system_type *fs_type;
+ void *fs_private; /* The filesystem's context */
+ void *sget_key;
+ struct dentry *root; /* The root and superblock */
+ struct user_namespace *user_ns; /* The user namespace for this mount */
+ struct net *net_ns; /* The network namespace for this mount */
+ const struct cred *cred; /* The mounter's credentials */
+ struct p_log log; /* Logging buffer */
+ const char *source; /* The source name (eg. dev path) */
+ void *security; /* LSM options */
+ void *s_fs_info; /* Proposed s_fs_info */
+ unsigned int sb_flags; /* Proposed superblock flags (SB_*) */
+ unsigned int sb_flags_mask; /* Superblock flags that were changed */
+ unsigned int s_iflags; /* OR'd with sb->s_iflags */
+ enum fs_context_purpose purpose:8;
+ enum fs_context_phase phase:8; /* The phase the context is in */
+ bool need_free:1; /* Need to call ops->free() */
+ bool global:1; /* Goes into &init_user_ns */
+ bool oldapi:1; /* Coming from mount(2) */
+};
+
+struct fs_context_operations {
+ void (*free)(struct fs_context *fc);
+ int (*dup)(struct fs_context *fc, struct fs_context *src_fc);
+ int (*parse_param)(struct fs_context *fc, struct fs_parameter *param);
+ int (*parse_monolithic)(struct fs_context *fc, void *data);
+ int (*get_tree)(struct fs_context *fc);
+ int (*reconfigure)(struct fs_context *fc);
+};
+
+/*
+ * fs_context manipulation functions.
+ */
+extern struct fs_context *fs_context_for_mount(struct file_system_type *fs_type,
+ unsigned int sb_flags);
+extern struct fs_context *fs_context_for_reconfigure(struct dentry *dentry,
+ unsigned int sb_flags,
+ unsigned int sb_flags_mask);
+extern struct fs_context *fs_context_for_submount(struct file_system_type *fs_type,
+ struct dentry *reference);
+
+extern struct fs_context *vfs_dup_fs_context(struct fs_context *fc);
+extern int vfs_parse_fs_param(struct fs_context *fc, struct fs_parameter *param);
+extern int vfs_parse_fs_string(struct fs_context *fc, const char *key,
+ const char *value, size_t v_size);
+extern int generic_parse_monolithic(struct fs_context *fc, void *data);
+extern int vfs_get_tree(struct fs_context *fc);
+extern void put_fs_context(struct fs_context *fc);
+extern int vfs_parse_fs_param_source(struct fs_context *fc,
+ struct fs_parameter *param);
+extern void fc_drop_locked(struct fs_context *fc);
+int reconfigure_single(struct super_block *s,
+ int flags, void *data);
+
+extern int get_tree_nodev(struct fs_context *fc,
+ int (*fill_super)(struct super_block *sb,
+ struct fs_context *fc));
+extern int get_tree_single(struct fs_context *fc,
+ int (*fill_super)(struct super_block *sb,
+ struct fs_context *fc));
+extern int get_tree_single_reconf(struct fs_context *fc,
+ int (*fill_super)(struct super_block *sb,
+ struct fs_context *fc));
+extern int get_tree_keyed(struct fs_context *fc,
+ int (*fill_super)(struct super_block *sb,
+ struct fs_context *fc),
+ void *key);
+
+extern int get_tree_bdev(struct fs_context *fc,
+ int (*fill_super)(struct super_block *sb,
+ struct fs_context *fc));
+
+extern const struct file_operations fscontext_fops;
+
+/*
+ * Mount error, warning and informational message logging. This structure is
+ * shareable between a mount and a subordinate mount.
+ */
+struct fc_log {
+ refcount_t usage;
+ u8 head; /* Insertion index in buffer[] */
+ u8 tail; /* Removal index in buffer[] */
+ u8 need_free; /* Mask of kfree'able items in buffer[] */
+ struct module *owner; /* Owner module for strings that don't then need freeing */
+ char *buffer[8];
+};
+
+extern __attribute__((format(printf, 4, 5)))
+void logfc(struct fc_log *log, const char *prefix, char level, const char *fmt, ...);
+
+#define __logfc(fc, l, fmt, ...) logfc((fc)->log.log, NULL, \
+ l, fmt, ## __VA_ARGS__)
+#define __plog(p, l, fmt, ...) logfc((p)->log, (p)->prefix, \
+ l, fmt, ## __VA_ARGS__)
+/**
+ * infof - Store supplementary informational message
+ * @fc: The context in which to log the informational message
+ * @fmt: The format string
+ *
+ * Store the supplementary informational message for the process if the process
+ * has enabled the facility.
+ */
+#define infof(fc, fmt, ...) __logfc(fc, 'i', fmt, ## __VA_ARGS__)
+#define info_plog(p, fmt, ...) __plog(p, 'i', fmt, ## __VA_ARGS__)
+#define infofc(p, fmt, ...) __plog((&(fc)->log), 'i', fmt, ## __VA_ARGS__)
+
+/**
+ * warnf - Store supplementary warning message
+ * @fc: The context in which to log the error message
+ * @fmt: The format string
+ *
+ * Store the supplementary warning message for the process if the process has
+ * enabled the facility.
+ */
+#define warnf(fc, fmt, ...) __logfc(fc, 'w', fmt, ## __VA_ARGS__)
+#define warn_plog(p, fmt, ...) __plog(p, 'w', fmt, ## __VA_ARGS__)
+#define warnfc(fc, fmt, ...) __plog((&(fc)->log), 'w', fmt, ## __VA_ARGS__)
+
+/**
+ * errorf - Store supplementary error message
+ * @fc: The context in which to log the error message
+ * @fmt: The format string
+ *
+ * Store the supplementary error message for the process if the process has
+ * enabled the facility.
+ */
+#define errorf(fc, fmt, ...) __logfc(fc, 'e', fmt, ## __VA_ARGS__)
+#define error_plog(p, fmt, ...) __plog(p, 'e', fmt, ## __VA_ARGS__)
+#define errorfc(fc, fmt, ...) __plog((&(fc)->log), 'e', fmt, ## __VA_ARGS__)
+
+/**
+ * invalf - Store supplementary invalid argument error message
+ * @fc: The context in which to log the error message
+ * @fmt: The format string
+ *
+ * Store the supplementary error message for the process if the process has
+ * enabled the facility and return -EINVAL.
+ */
+#define invalf(fc, fmt, ...) (errorf(fc, fmt, ## __VA_ARGS__), -EINVAL)
+#define inval_plog(p, fmt, ...) (error_plog(p, fmt, ## __VA_ARGS__), -EINVAL)
+#define invalfc(fc, fmt, ...) (errorfc(fc, fmt, ## __VA_ARGS__), -EINVAL)
+
+#endif /* _LINUX_FS_CONTEXT_H */
diff --git a/include/linux/fs_parser.h b/include/linux/fs_parser.h
new file mode 100644
index 000000000000..01542c4b87a2
--- /dev/null
+++ b/include/linux/fs_parser.h
@@ -0,0 +1,135 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/* Filesystem parameter description and parser
+ *
+ * Copyright (C) 2018 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ */
+
+#ifndef _LINUX_FS_PARSER_H
+#define _LINUX_FS_PARSER_H
+
+#include <linux/fs_context.h>
+
+struct path;
+
+struct constant_table {
+ const char *name;
+ int value;
+};
+
+struct fs_parameter_spec;
+struct fs_parse_result;
+typedef int fs_param_type(struct p_log *,
+ const struct fs_parameter_spec *,
+ struct fs_parameter *,
+ struct fs_parse_result *);
+/*
+ * The type of parameter expected.
+ */
+fs_param_type fs_param_is_bool, fs_param_is_u32, fs_param_is_s32, fs_param_is_u64,
+ fs_param_is_enum, fs_param_is_string, fs_param_is_blob, fs_param_is_blockdev,
+ fs_param_is_path, fs_param_is_fd;
+
+/*
+ * Specification of the type of value a parameter wants.
+ *
+ * Note that the fsparam_flag(), fsparam_string(), fsparam_u32(), ... macros
+ * should be used to generate elements of this type.
+ */
+struct fs_parameter_spec {
+ const char *name;
+ fs_param_type *type; /* The desired parameter type */
+ u8 opt; /* Option number (returned by fs_parse()) */
+ unsigned short flags;
+#define fs_param_neg_with_no 0x0002 /* "noxxx" is negative param */
+#define fs_param_can_be_empty 0x0004 /* "xxx=" is allowed */
+#define fs_param_deprecated 0x0008 /* The param is deprecated */
+ const void *data;
+};
+
+/*
+ * Result of parse.
+ */
+struct fs_parse_result {
+ bool negated; /* T if param was "noxxx" */
+ union {
+ bool boolean; /* For spec_bool */
+ int int_32; /* For spec_s32/spec_enum */
+ unsigned int uint_32; /* For spec_u32{,_octal,_hex}/spec_enum */
+ u64 uint_64; /* For spec_u64 */
+ };
+};
+
+extern int __fs_parse(struct p_log *log,
+ const struct fs_parameter_spec *desc,
+ struct fs_parameter *value,
+ struct fs_parse_result *result);
+
+static inline int fs_parse(struct fs_context *fc,
+ const struct fs_parameter_spec *desc,
+ struct fs_parameter *param,
+ struct fs_parse_result *result)
+{
+ return __fs_parse(&fc->log, desc, param, result);
+}
+
+extern int fs_lookup_param(struct fs_context *fc,
+ struct fs_parameter *param,
+ bool want_bdev,
+ unsigned int flags,
+ struct path *_path);
+
+extern int lookup_constant(const struct constant_table tbl[], const char *name, int not_found);
+
+#ifdef CONFIG_VALIDATE_FS_PARSER
+extern bool validate_constant_table(const struct constant_table *tbl, size_t tbl_size,
+ int low, int high, int special);
+extern bool fs_validate_description(const char *name,
+ const struct fs_parameter_spec *desc);
+#else
+static inline bool validate_constant_table(const struct constant_table *tbl, size_t tbl_size,
+ int low, int high, int special)
+{ return true; }
+static inline bool fs_validate_description(const char *name,
+ const struct fs_parameter_spec *desc)
+{ return true; }
+#endif
+
+/*
+ * Parameter type, name, index and flags element constructors. Use as:
+ *
+ * fsparam_xxxx("foo", Opt_foo)
+ *
+ * If existing helpers are not enough, direct use of __fsparam() would
+ * work, but any such case is probably a sign that new helper is needed.
+ * Helpers will remain stable; low-level implementation may change.
+ */
+#define __fsparam(TYPE, NAME, OPT, FLAGS, DATA) \
+ { \
+ .name = NAME, \
+ .opt = OPT, \
+ .type = TYPE, \
+ .flags = FLAGS, \
+ .data = DATA \
+ }
+
+#define fsparam_flag(NAME, OPT) __fsparam(NULL, NAME, OPT, 0, NULL)
+#define fsparam_flag_no(NAME, OPT) \
+ __fsparam(NULL, NAME, OPT, fs_param_neg_with_no, NULL)
+#define fsparam_bool(NAME, OPT) __fsparam(fs_param_is_bool, NAME, OPT, 0, NULL)
+#define fsparam_u32(NAME, OPT) __fsparam(fs_param_is_u32, NAME, OPT, 0, NULL)
+#define fsparam_u32oct(NAME, OPT) \
+ __fsparam(fs_param_is_u32, NAME, OPT, 0, (void *)8)
+#define fsparam_u32hex(NAME, OPT) \
+ __fsparam(fs_param_is_u32_hex, NAME, OPT, 0, (void *)16)
+#define fsparam_s32(NAME, OPT) __fsparam(fs_param_is_s32, NAME, OPT, 0, NULL)
+#define fsparam_u64(NAME, OPT) __fsparam(fs_param_is_u64, NAME, OPT, 0, NULL)
+#define fsparam_enum(NAME, OPT, array) __fsparam(fs_param_is_enum, NAME, OPT, 0, array)
+#define fsparam_string(NAME, OPT) \
+ __fsparam(fs_param_is_string, NAME, OPT, 0, NULL)
+#define fsparam_blob(NAME, OPT) __fsparam(fs_param_is_blob, NAME, OPT, 0, NULL)
+#define fsparam_bdev(NAME, OPT) __fsparam(fs_param_is_blockdev, NAME, OPT, 0, NULL)
+#define fsparam_path(NAME, OPT) __fsparam(fs_param_is_path, NAME, OPT, 0, NULL)
+#define fsparam_fd(NAME, OPT) __fsparam(fs_param_is_fd, NAME, OPT, 0, NULL)
+
+#endif /* _LINUX_FS_PARSER_H */
diff --git a/include/linux/fs_pin.h b/include/linux/fs_pin.h
index 3886b3bffd7f..bdd09fd2520c 100644
--- a/include/linux/fs_pin.h
+++ b/include/linux/fs_pin.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#include <linux/wait.h>
struct fs_pin {
@@ -19,6 +20,5 @@ static inline void init_fs_pin(struct fs_pin *p, void (*kill)(struct fs_pin *))
}
void pin_remove(struct fs_pin *);
-void pin_insert_group(struct fs_pin *, struct vfsmount *, struct hlist_head *);
void pin_insert(struct fs_pin *, struct vfsmount *);
void pin_kill(struct fs_pin *);
diff --git a/include/linux/fs_stack.h b/include/linux/fs_stack.h
index da317c7163ab..54210a42c30d 100644
--- a/include/linux/fs_stack.h
+++ b/include/linux/fs_stack.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_FS_STACK_H
#define _LINUX_FS_STACK_H
diff --git a/include/linux/fs_struct.h b/include/linux/fs_struct.h
index 0efc3e62843a..783b48dedb72 100644
--- a/include/linux/fs_struct.h
+++ b/include/linux/fs_struct.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_FS_STRUCT_H
#define _LINUX_FS_STRUCT_H
@@ -8,11 +9,11 @@
struct fs_struct {
int users;
spinlock_t lock;
- seqcount_t seq;
+ seqcount_spinlock_t seq;
int umask;
int in_exec;
struct path root, pwd;
-};
+} __randomize_layout;
extern struct kmem_cache *fs_cachep;
diff --git a/include/linux/fs_types.h b/include/linux/fs_types.h
new file mode 100644
index 000000000000..54816791196f
--- /dev/null
+++ b/include/linux/fs_types.h
@@ -0,0 +1,75 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_FS_TYPES_H
+#define _LINUX_FS_TYPES_H
+
+/*
+ * This is a header for the common implementation of dirent
+ * to fs on-disk file type conversion. Although the fs on-disk
+ * bits are specific to every file system, in practice, many
+ * file systems use the exact same on-disk format to describe
+ * the lower 3 file type bits that represent the 7 POSIX file
+ * types.
+ *
+ * It is important to note that the definitions in this
+ * header MUST NOT change. This would break both the
+ * userspace ABI and the on-disk format of filesystems
+ * using this code.
+ *
+ * All those file systems can use this generic code for the
+ * conversions.
+ */
+
+/*
+ * struct dirent file types
+ * exposed to user via getdents(2), readdir(3)
+ *
+ * These match bits 12..15 of stat.st_mode
+ * (ie "(i_mode >> 12) & 15").
+ */
+#define S_DT_SHIFT 12
+#define S_DT(mode) (((mode) & S_IFMT) >> S_DT_SHIFT)
+#define S_DT_MASK (S_IFMT >> S_DT_SHIFT)
+
+/* these are defined by POSIX and also present in glibc's dirent.h */
+#define DT_UNKNOWN 0
+#define DT_FIFO 1
+#define DT_CHR 2
+#define DT_DIR 4
+#define DT_BLK 6
+#define DT_REG 8
+#define DT_LNK 10
+#define DT_SOCK 12
+#define DT_WHT 14
+
+#define DT_MAX (S_DT_MASK + 1) /* 16 */
+
+/*
+ * fs on-disk file types.
+ * Only the low 3 bits are used for the POSIX file types.
+ * Other bits are reserved for fs private use.
+ * These definitions are shared and used by multiple filesystems,
+ * and MUST NOT change under any circumstances.
+ *
+ * Note that no fs currently stores the whiteout type on-disk,
+ * so whiteout dirents are exposed to user as DT_CHR.
+ */
+#define FT_UNKNOWN 0
+#define FT_REG_FILE 1
+#define FT_DIR 2
+#define FT_CHRDEV 3
+#define FT_BLKDEV 4
+#define FT_FIFO 5
+#define FT_SOCK 6
+#define FT_SYMLINK 7
+
+#define FT_MAX 8
+
+/*
+ * declarations for helper functions, accompanying implementation
+ * is in fs/fs_types.c
+ */
+extern unsigned char fs_ftype_to_dtype(unsigned int filetype);
+extern unsigned char fs_umode_to_ftype(umode_t mode);
+extern unsigned char fs_umode_to_dtype(umode_t mode);
+
+#endif
diff --git a/include/linux/fscache-cache.h b/include/linux/fscache-cache.h
index 4c467ef50159..a174cedf4d90 100644
--- a/include/linux/fscache-cache.h
+++ b/include/linux/fscache-cache.h
@@ -1,16 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/* General filesystem caching backing cache interface
*
- * Copyright (C) 2004-2007 Red Hat, Inc. All Rights Reserved.
+ * Copyright (C) 2021 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- *
* NOTE!!! See:
*
- * Documentation/filesystems/caching/backend-api.txt
+ * Documentation/filesystems/caching/backend-api.rst
*
* for a description of the cache backend interface declared here.
*/
@@ -19,196 +15,34 @@
#define _LINUX_FSCACHE_CACHE_H
#include <linux/fscache.h>
-#include <linux/sched.h>
-#include <linux/workqueue.h>
-
-#define NR_MAXCACHES BITS_PER_LONG
-
-struct fscache_cache;
-struct fscache_cache_ops;
-struct fscache_object;
-struct fscache_operation;
-/*
- * cache tag definition
- */
-struct fscache_cache_tag {
- struct list_head link;
- struct fscache_cache *cache; /* cache referred to by this tag */
- unsigned long flags;
-#define FSCACHE_TAG_RESERVED 0 /* T if tag is reserved for a cache */
- atomic_t usage;
- char name[0]; /* tag name */
+enum fscache_cache_trace;
+enum fscache_cookie_trace;
+enum fscache_access_trace;
+
+enum fscache_cache_state {
+ FSCACHE_CACHE_IS_NOT_PRESENT, /* No cache is present for this name */
+ FSCACHE_CACHE_IS_PREPARING, /* A cache is preparing to come live */
+ FSCACHE_CACHE_IS_ACTIVE, /* Attached cache is active and can be used */
+ FSCACHE_CACHE_GOT_IOERROR, /* Attached cache stopped on I/O error */
+ FSCACHE_CACHE_IS_WITHDRAWN, /* Attached cache is being withdrawn */
+#define NR__FSCACHE_CACHE_STATE (FSCACHE_CACHE_IS_WITHDRAWN + 1)
};
/*
- * cache definition
+ * Cache cookie.
*/
struct fscache_cache {
const struct fscache_cache_ops *ops;
- struct fscache_cache_tag *tag; /* tag representing this cache */
- struct kobject *kobj; /* system representation of this cache */
- struct list_head link; /* link in list of caches */
- size_t max_index_size; /* maximum size of index data */
- char identifier[36]; /* cache label */
-
- /* node management */
- struct work_struct op_gc; /* operation garbage collector */
- struct list_head object_list; /* list of data/index objects */
- struct list_head op_gc_list; /* list of ops to be deleted */
- spinlock_t object_list_lock;
- spinlock_t op_gc_list_lock;
+ struct list_head cache_link; /* Link in cache list */
+ void *cache_priv; /* Private cache data (or NULL) */
+ refcount_t ref;
+ atomic_t n_volumes; /* Number of active volumes; */
+ atomic_t n_accesses; /* Number of in-progress accesses on the cache */
atomic_t object_count; /* no. of live objects in this cache */
- struct fscache_object *fsdef; /* object for the fsdef index */
- unsigned long flags;
-#define FSCACHE_IOERROR 0 /* cache stopped on I/O error */
-#define FSCACHE_CACHE_WITHDRAWN 1 /* cache has been withdrawn */
-};
-
-extern wait_queue_head_t fscache_cache_cleared_wq;
-
-/*
- * operation to be applied to a cache object
- * - retrieval initiation operations are done in the context of the process
- * that issued them, and not in an async thread pool
- */
-typedef void (*fscache_operation_release_t)(struct fscache_operation *op);
-typedef void (*fscache_operation_processor_t)(struct fscache_operation *op);
-typedef void (*fscache_operation_cancel_t)(struct fscache_operation *op);
-
-enum fscache_operation_state {
- FSCACHE_OP_ST_BLANK, /* Op is not yet submitted */
- FSCACHE_OP_ST_INITIALISED, /* Op is initialised */
- FSCACHE_OP_ST_PENDING, /* Op is blocked from running */
- FSCACHE_OP_ST_IN_PROGRESS, /* Op is in progress */
- FSCACHE_OP_ST_COMPLETE, /* Op is complete */
- FSCACHE_OP_ST_CANCELLED, /* Op has been cancelled */
- FSCACHE_OP_ST_DEAD /* Op is now dead */
-};
-
-struct fscache_operation {
- struct work_struct work; /* record for async ops */
- struct list_head pend_link; /* link in object->pending_ops */
- struct fscache_object *object; /* object to be operated upon */
-
- unsigned long flags;
-#define FSCACHE_OP_TYPE 0x000f /* operation type */
-#define FSCACHE_OP_ASYNC 0x0001 /* - async op, processor may sleep for disk */
-#define FSCACHE_OP_MYTHREAD 0x0002 /* - processing is done be issuing thread, not pool */
-#define FSCACHE_OP_WAITING 4 /* cleared when op is woken */
-#define FSCACHE_OP_EXCLUSIVE 5 /* exclusive op, other ops must wait */
-#define FSCACHE_OP_DEC_READ_CNT 6 /* decrement object->n_reads on destruction */
-#define FSCACHE_OP_UNUSE_COOKIE 7 /* call fscache_unuse_cookie() on completion */
-#define FSCACHE_OP_KEEP_FLAGS 0x00f0 /* flags to keep when repurposing an op */
-
- enum fscache_operation_state state;
- atomic_t usage;
- unsigned debug_id; /* debugging ID */
-
- /* operation processor callback
- * - can be NULL if FSCACHE_OP_WAITING is going to be used to perform
- * the op in a non-pool thread */
- fscache_operation_processor_t processor;
-
- /* Operation cancellation cleanup (optional) */
- fscache_operation_cancel_t cancel;
-
- /* operation releaser */
- fscache_operation_release_t release;
-};
-
-extern atomic_t fscache_op_debug_id;
-extern void fscache_op_work_func(struct work_struct *work);
-
-extern void fscache_enqueue_operation(struct fscache_operation *);
-extern void fscache_op_complete(struct fscache_operation *, bool);
-extern void fscache_put_operation(struct fscache_operation *);
-extern void fscache_operation_init(struct fscache_operation *,
- fscache_operation_processor_t,
- fscache_operation_cancel_t,
- fscache_operation_release_t);
-
-/*
- * data read operation
- */
-struct fscache_retrieval {
- struct fscache_operation op;
- struct fscache_cookie *cookie; /* The netfs cookie */
- struct address_space *mapping; /* netfs pages */
- fscache_rw_complete_t end_io_func; /* function to call on I/O completion */
- void *context; /* netfs read context (pinned) */
- struct list_head to_do; /* list of things to be done by the backend */
- unsigned long start_time; /* time at which retrieval started */
- atomic_t n_pages; /* number of pages to be retrieved */
-};
-
-typedef int (*fscache_page_retrieval_func_t)(struct fscache_retrieval *op,
- struct page *page,
- gfp_t gfp);
-
-typedef int (*fscache_pages_retrieval_func_t)(struct fscache_retrieval *op,
- struct list_head *pages,
- unsigned *nr_pages,
- gfp_t gfp);
-
-/**
- * fscache_get_retrieval - Get an extra reference on a retrieval operation
- * @op: The retrieval operation to get a reference on
- *
- * Get an extra reference on a retrieval operation.
- */
-static inline
-struct fscache_retrieval *fscache_get_retrieval(struct fscache_retrieval *op)
-{
- atomic_inc(&op->op.usage);
- return op;
-}
-
-/**
- * fscache_enqueue_retrieval - Enqueue a retrieval operation for processing
- * @op: The retrieval operation affected
- *
- * Enqueue a retrieval operation for processing by the FS-Cache thread pool.
- */
-static inline void fscache_enqueue_retrieval(struct fscache_retrieval *op)
-{
- fscache_enqueue_operation(&op->op);
-}
-
-/**
- * fscache_retrieval_complete - Record (partial) completion of a retrieval
- * @op: The retrieval operation affected
- * @n_pages: The number of pages to account for
- */
-static inline void fscache_retrieval_complete(struct fscache_retrieval *op,
- int n_pages)
-{
- atomic_sub(n_pages, &op->n_pages);
- if (atomic_read(&op->n_pages) <= 0)
- fscache_op_complete(&op->op, true);
-}
-
-/**
- * fscache_put_retrieval - Drop a reference to a retrieval operation
- * @op: The retrieval operation affected
- *
- * Drop a reference to a retrieval operation.
- */
-static inline void fscache_put_retrieval(struct fscache_retrieval *op)
-{
- fscache_put_operation(&op->op);
-}
-
-/*
- * cached page storage work item
- * - used to do three things:
- * - batch writes to the cache
- * - do cache writes asynchronously
- * - defer writes until cache object lookup completion
- */
-struct fscache_storage {
- struct fscache_operation op;
- pgoff_t store_limit; /* don't write more than this */
+ unsigned int debug_id;
+ enum fscache_cache_state state;
+ char *name;
};
/*
@@ -218,337 +52,154 @@ struct fscache_cache_ops {
/* name of cache provider */
const char *name;
- /* allocate an object record for a cookie */
- struct fscache_object *(*alloc_object)(struct fscache_cache *cache,
- struct fscache_cookie *cookie);
-
- /* look up the object for a cookie
- * - return -ETIMEDOUT to be requeued
- */
- int (*lookup_object)(struct fscache_object *object);
-
- /* finished looking up */
- void (*lookup_complete)(struct fscache_object *object);
+ /* Acquire a volume */
+ void (*acquire_volume)(struct fscache_volume *volume);
- /* increment the usage count on this object (may fail if unmounting) */
- struct fscache_object *(*grab_object)(struct fscache_object *object);
+ /* Free the cache's data attached to a volume */
+ void (*free_volume)(struct fscache_volume *volume);
- /* pin an object in the cache */
- int (*pin_object)(struct fscache_object *object);
+ /* Look up a cookie in the cache */
+ bool (*lookup_cookie)(struct fscache_cookie *cookie);
- /* unpin an object in the cache */
- void (*unpin_object)(struct fscache_object *object);
+ /* Withdraw an object without any cookie access counts held */
+ void (*withdraw_cookie)(struct fscache_cookie *cookie);
- /* check the consistency between the backing cache and the FS-Cache
- * cookie */
- int (*check_consistency)(struct fscache_operation *op);
-
- /* store the updated auxiliary data on an object */
- void (*update_object)(struct fscache_object *object);
+ /* Change the size of a data object */
+ void (*resize_cookie)(struct netfs_cache_resources *cres,
+ loff_t new_size);
/* Invalidate an object */
- void (*invalidate_object)(struct fscache_operation *op);
-
- /* discard the resources pinned by an object and effect retirement if
- * necessary */
- void (*drop_object)(struct fscache_object *object);
-
- /* dispose of a reference to an object */
- void (*put_object)(struct fscache_object *object);
-
- /* sync a cache */
- void (*sync_cache)(struct fscache_cache *cache);
-
- /* notification that the attributes of a non-index object (such as
- * i_size) have changed */
- int (*attr_changed)(struct fscache_object *object);
-
- /* reserve space for an object's data and associated metadata */
- int (*reserve_space)(struct fscache_object *object, loff_t i_size);
-
- /* request a backing block for a page be read or allocated in the
- * cache */
- fscache_page_retrieval_func_t read_or_alloc_page;
-
- /* request backing blocks for a list of pages be read or allocated in
- * the cache */
- fscache_pages_retrieval_func_t read_or_alloc_pages;
-
- /* request a backing block for a page be allocated in the cache so that
- * it can be written directly */
- fscache_page_retrieval_func_t allocate_page;
-
- /* request backing blocks for pages be allocated in the cache so that
- * they can be written directly */
- fscache_pages_retrieval_func_t allocate_pages;
-
- /* write a page to its backing block in the cache */
- int (*write_page)(struct fscache_storage *op, struct page *page);
-
- /* detach backing block from a page (optional)
- * - must release the cookie lock before returning
- * - may sleep
- */
- void (*uncache_page)(struct fscache_object *object,
- struct page *page);
-
- /* dissociate a cache from all the pages it was backing */
- void (*dissociate_pages)(struct fscache_cache *cache);
-};
+ bool (*invalidate_cookie)(struct fscache_cookie *cookie);
-extern struct fscache_cookie fscache_fsdef_index;
+ /* Begin an operation for the netfs lib */
+ bool (*begin_operation)(struct netfs_cache_resources *cres,
+ enum fscache_want_state want_state);
-/*
- * Event list for fscache_object::{event_mask,events}
- */
-enum {
- FSCACHE_OBJECT_EV_NEW_CHILD, /* T if object has a new child */
- FSCACHE_OBJECT_EV_PARENT_READY, /* T if object's parent is ready */
- FSCACHE_OBJECT_EV_UPDATE, /* T if object should be updated */
- FSCACHE_OBJECT_EV_INVALIDATE, /* T if cache requested object invalidation */
- FSCACHE_OBJECT_EV_CLEARED, /* T if accessors all gone */
- FSCACHE_OBJECT_EV_ERROR, /* T if fatal error occurred during processing */
- FSCACHE_OBJECT_EV_KILL, /* T if netfs relinquished or cache withdrew object */
- NR_FSCACHE_OBJECT_EVENTS
+ /* Prepare to write to a live cache object */
+ void (*prepare_to_write)(struct fscache_cookie *cookie);
};
-#define FSCACHE_OBJECT_EVENTS_MASK ((1UL << NR_FSCACHE_OBJECT_EVENTS) - 1)
+extern struct workqueue_struct *fscache_wq;
+extern wait_queue_head_t fscache_clearance_waiters;
/*
- * States for object state machine.
- */
-struct fscache_transition {
- unsigned long events;
- const struct fscache_state *transit_to;
-};
-
-struct fscache_state {
- char name[24];
- char short_name[8];
- const struct fscache_state *(*work)(struct fscache_object *object,
- int event);
- const struct fscache_transition transitions[];
-};
-
-/*
- * on-disk cache file or index handle
+ * out-of-line cache backend functions
*/
-struct fscache_object {
- const struct fscache_state *state; /* Object state machine state */
- const struct fscache_transition *oob_table; /* OOB state transition table */
- int debug_id; /* debugging ID */
- int n_children; /* number of child objects */
- int n_ops; /* number of extant ops on object */
- int n_obj_ops; /* number of object ops outstanding on object */
- int n_in_progress; /* number of ops in progress */
- int n_exclusive; /* number of exclusive ops queued or in progress */
- atomic_t n_reads; /* number of read ops in progress */
- spinlock_t lock; /* state and operations lock */
-
- unsigned long lookup_jif; /* time at which lookup started */
- unsigned long oob_event_mask; /* OOB events this object is interested in */
- unsigned long event_mask; /* events this object is interested in */
- unsigned long events; /* events to be processed by this object
- * (order is important - using fls) */
-
- unsigned long flags;
-#define FSCACHE_OBJECT_LOCK 0 /* T if object is busy being processed */
-#define FSCACHE_OBJECT_PENDING_WRITE 1 /* T if object has pending write */
-#define FSCACHE_OBJECT_WAITING 2 /* T if object is waiting on its parent */
-#define FSCACHE_OBJECT_IS_LIVE 3 /* T if object is not withdrawn or relinquished */
-#define FSCACHE_OBJECT_IS_LOOKED_UP 4 /* T if object has been looked up */
-#define FSCACHE_OBJECT_IS_AVAILABLE 5 /* T if object has become active */
-#define FSCACHE_OBJECT_RETIRED 6 /* T if object was retired on relinquishment */
-#define FSCACHE_OBJECT_KILLED_BY_CACHE 7 /* T if object was killed by the cache */
-#define FSCACHE_OBJECT_RUN_AFTER_DEAD 8 /* T if object has been dispatched after death */
-
- struct list_head cache_link; /* link in cache->object_list */
- struct hlist_node cookie_link; /* link in cookie->backing_objects */
- struct fscache_cache *cache; /* cache that supplied this object */
- struct fscache_cookie *cookie; /* netfs's file/index object */
- struct fscache_object *parent; /* parent object */
- struct work_struct work; /* attention scheduling record */
- struct list_head dependents; /* FIFO of dependent objects */
- struct list_head dep_link; /* link in parent's dependents list */
- struct list_head pending_ops; /* unstarted operations on this object */
-#ifdef CONFIG_FSCACHE_OBJECT_LIST
- struct rb_node objlist_link; /* link in global object list */
-#endif
- pgoff_t store_limit; /* current storage limit */
- loff_t store_limit_l; /* current storage limit */
-};
-
-extern void fscache_object_init(struct fscache_object *, struct fscache_cookie *,
- struct fscache_cache *);
-extern void fscache_object_destroy(struct fscache_object *);
-
-extern void fscache_object_lookup_negative(struct fscache_object *object);
-extern void fscache_obtained_object(struct fscache_object *object);
-
-static inline bool fscache_object_is_live(struct fscache_object *object)
-{
- return test_bit(FSCACHE_OBJECT_IS_LIVE, &object->flags);
-}
-
-static inline bool fscache_object_is_dying(struct fscache_object *object)
-{
- return !fscache_object_is_live(object);
-}
-
-static inline bool fscache_object_is_available(struct fscache_object *object)
-{
- return test_bit(FSCACHE_OBJECT_IS_AVAILABLE, &object->flags);
-}
+extern struct rw_semaphore fscache_addremove_sem;
+extern struct fscache_cache *fscache_acquire_cache(const char *name);
+extern void fscache_relinquish_cache(struct fscache_cache *cache);
+extern int fscache_add_cache(struct fscache_cache *cache,
+ const struct fscache_cache_ops *ops,
+ void *cache_priv);
+extern void fscache_withdraw_cache(struct fscache_cache *cache);
+extern void fscache_withdraw_volume(struct fscache_volume *volume);
+extern void fscache_withdraw_cookie(struct fscache_cookie *cookie);
-static inline bool fscache_cache_is_broken(struct fscache_object *object)
-{
- return test_bit(FSCACHE_IOERROR, &object->cache->flags);
-}
+extern void fscache_io_error(struct fscache_cache *cache);
-static inline bool fscache_object_is_active(struct fscache_object *object)
-{
- return fscache_object_is_available(object) &&
- fscache_object_is_live(object) &&
- !fscache_cache_is_broken(object);
-}
+extern void fscache_end_volume_access(struct fscache_volume *volume,
+ struct fscache_cookie *cookie,
+ enum fscache_access_trace why);
+
+extern struct fscache_cookie *fscache_get_cookie(struct fscache_cookie *cookie,
+ enum fscache_cookie_trace where);
+extern void fscache_put_cookie(struct fscache_cookie *cookie,
+ enum fscache_cookie_trace where);
+extern void fscache_end_cookie_access(struct fscache_cookie *cookie,
+ enum fscache_access_trace why);
+extern void fscache_cookie_lookup_negative(struct fscache_cookie *cookie);
+extern void fscache_resume_after_invalidation(struct fscache_cookie *cookie);
+extern void fscache_caching_failed(struct fscache_cookie *cookie);
+extern bool fscache_wait_for_operation(struct netfs_cache_resources *cred,
+ enum fscache_want_state state);
/**
- * fscache_object_destroyed - Note destruction of an object in a cache
- * @cache: The cache from which the object came
+ * fscache_cookie_state - Read the state of a cookie
+ * @cookie: The cookie to query
*
- * Note the destruction and deallocation of an object record in a cache.
+ * Get the state of a cookie, imposing an ordering between the cookie contents
+ * and the state value. Paired with fscache_set_cookie_state().
*/
-static inline void fscache_object_destroyed(struct fscache_cache *cache)
+static inline
+enum fscache_cookie_state fscache_cookie_state(struct fscache_cookie *cookie)
{
- if (atomic_dec_and_test(&cache->object_count))
- wake_up_all(&fscache_cache_cleared_wq);
+ return smp_load_acquire(&cookie->state);
}
/**
- * fscache_object_lookup_error - Note an object encountered an error
- * @object: The object on which the error was encountered
+ * fscache_get_key - Get a pointer to the cookie key
+ * @cookie: The cookie to query
*
- * Note that an object encountered a fatal error (usually an I/O error) and
- * that it should be withdrawn as soon as possible.
+ * Return a pointer to the where a cookie's key is stored.
*/
-static inline void fscache_object_lookup_error(struct fscache_object *object)
+static inline void *fscache_get_key(struct fscache_cookie *cookie)
{
- set_bit(FSCACHE_OBJECT_EV_ERROR, &object->events);
+ if (cookie->key_len <= sizeof(cookie->inline_key))
+ return cookie->inline_key;
+ else
+ return cookie->key;
}
-/**
- * fscache_set_store_limit - Set the maximum size to be stored in an object
- * @object: The object to set the maximum on
- * @i_size: The limit to set in bytes
- *
- * Set the maximum size an object is permitted to reach, implying the highest
- * byte that may be written. Intended to be called by the attr_changed() op.
- *
- * See Documentation/filesystems/caching/backend-api.txt for a complete
- * description.
- */
-static inline
-void fscache_set_store_limit(struct fscache_object *object, loff_t i_size)
+static inline struct fscache_cookie *fscache_cres_cookie(struct netfs_cache_resources *cres)
{
- object->store_limit_l = i_size;
- object->store_limit = i_size >> PAGE_SHIFT;
- if (i_size & ~PAGE_MASK)
- object->store_limit++;
+ return cres->cache_priv;
}
/**
- * fscache_end_io - End a retrieval operation on a page
- * @op: The FS-Cache operation covering the retrieval
- * @page: The page that was to be fetched
- * @error: The error code (0 if successful)
+ * fscache_count_object - Tell fscache that an object has been added
+ * @cache: The cache to account to
*
- * Note the end of an operation to retrieve a page, as covered by a particular
- * operation record.
+ * Tell fscache that an object has been added to the cache. This prevents the
+ * cache from tearing down the cache structure until the object is uncounted.
*/
-static inline void fscache_end_io(struct fscache_retrieval *op,
- struct page *page, int error)
+static inline void fscache_count_object(struct fscache_cache *cache)
{
- op->end_io_func(page, op->context, error);
-}
-
-static inline void __fscache_use_cookie(struct fscache_cookie *cookie)
-{
- atomic_inc(&cookie->n_active);
+ atomic_inc(&cache->object_count);
}
/**
- * fscache_use_cookie - Request usage of cookie attached to an object
- * @object: Object description
- *
- * Request usage of the cookie attached to an object. NULL is returned if the
- * relinquishment had reduced the cookie usage count to 0.
+ * fscache_uncount_object - Tell fscache that an object has been removed
+ * @cache: The cache to account to
+ *
+ * Tell fscache that an object has been removed from the cache and will no
+ * longer be accessed. After this point, the cache cookie may be destroyed.
*/
-static inline bool fscache_use_cookie(struct fscache_object *object)
-{
- struct fscache_cookie *cookie = object->cookie;
- return atomic_inc_not_zero(&cookie->n_active) != 0;
-}
-
-static inline bool __fscache_unuse_cookie(struct fscache_cookie *cookie)
+static inline void fscache_uncount_object(struct fscache_cache *cache)
{
- return atomic_dec_and_test(&cookie->n_active);
-}
-
-static inline void __fscache_wake_unused_cookie(struct fscache_cookie *cookie)
-{
- wake_up_atomic_t(&cookie->n_active);
+ if (atomic_dec_and_test(&cache->object_count))
+ wake_up_all(&fscache_clearance_waiters);
}
/**
- * fscache_unuse_cookie - Cease usage of cookie attached to an object
- * @object: Object description
- *
- * Cease usage of the cookie attached to an object. When the users count
- * reaches zero then the cookie relinquishment will be permitted to proceed.
- */
-static inline void fscache_unuse_cookie(struct fscache_object *object)
-{
- struct fscache_cookie *cookie = object->cookie;
- if (__fscache_unuse_cookie(cookie))
- __fscache_wake_unused_cookie(cookie);
-}
-
-/*
- * out-of-line cache backend functions
- */
-extern __printf(3, 4)
-void fscache_init_cache(struct fscache_cache *cache,
- const struct fscache_cache_ops *ops,
- const char *idfmt, ...);
-
-extern int fscache_add_cache(struct fscache_cache *cache,
- struct fscache_object *fsdef,
- const char *tagname);
-extern void fscache_withdraw_cache(struct fscache_cache *cache);
-
-extern void fscache_io_error(struct fscache_cache *cache);
-
-extern void fscache_mark_page_cached(struct fscache_retrieval *op,
- struct page *page);
-
-extern void fscache_mark_pages_cached(struct fscache_retrieval *op,
- struct pagevec *pagevec);
-
-extern bool fscache_object_sleep_till_congested(signed long *timeoutp);
-
-extern enum fscache_checkaux fscache_check_aux(struct fscache_object *object,
- const void *data,
- uint16_t datalen);
-
-extern void fscache_object_retrying_stale(struct fscache_object *object);
-
-enum fscache_why_object_killed {
- FSCACHE_OBJECT_IS_STALE,
- FSCACHE_OBJECT_NO_SPACE,
- FSCACHE_OBJECT_WAS_RETIRED,
- FSCACHE_OBJECT_WAS_CULLED,
-};
-extern void fscache_object_mark_killed(struct fscache_object *object,
- enum fscache_why_object_killed why);
+ * fscache_wait_for_objects - Wait for all objects to be withdrawn
+ * @cache: The cache to query
+ *
+ * Wait for all extant objects in a cache to finish being withdrawn
+ * and go away.
+ */
+static inline void fscache_wait_for_objects(struct fscache_cache *cache)
+{
+ wait_event(fscache_clearance_waiters,
+ atomic_read(&cache->object_count) == 0);
+}
+
+#ifdef CONFIG_FSCACHE_STATS
+extern atomic_t fscache_n_read;
+extern atomic_t fscache_n_write;
+extern atomic_t fscache_n_no_write_space;
+extern atomic_t fscache_n_no_create_space;
+extern atomic_t fscache_n_culled;
+#define fscache_count_read() atomic_inc(&fscache_n_read)
+#define fscache_count_write() atomic_inc(&fscache_n_write)
+#define fscache_count_no_write_space() atomic_inc(&fscache_n_no_write_space)
+#define fscache_count_no_create_space() atomic_inc(&fscache_n_no_create_space)
+#define fscache_count_culled() atomic_inc(&fscache_n_culled)
+#else
+#define fscache_count_read() do {} while(0)
+#define fscache_count_write() do {} while(0)
+#define fscache_count_no_write_space() do {} while(0)
+#define fscache_count_no_create_space() do {} while(0)
+#define fscache_count_culled() do {} while(0)
+#endif
#endif /* _LINUX_FSCACHE_CACHE_H */
diff --git a/include/linux/fscache.h b/include/linux/fscache.h
index 115bb81912cc..8e312c8323a8 100644
--- a/include/linux/fscache.h
+++ b/include/linux/fscache.h
@@ -1,16 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/* General filesystem caching interface
*
- * Copyright (C) 2004-2007 Red Hat, Inc. All Rights Reserved.
+ * Copyright (C) 2021 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- *
* NOTE!!! See:
*
- * Documentation/filesystems/caching/netfs-api.txt
+ * Documentation/filesystems/caching/netfs-api.rst
*
* for a description of the network filesystem interface declared here.
*/
@@ -19,189 +15,138 @@
#define _LINUX_FSCACHE_H
#include <linux/fs.h>
-#include <linux/list.h>
-#include <linux/pagemap.h>
-#include <linux/pagevec.h>
+#include <linux/netfs.h>
+#include <linux/writeback.h>
#if defined(CONFIG_FSCACHE) || defined(CONFIG_FSCACHE_MODULE)
+#define __fscache_available (1)
#define fscache_available() (1)
+#define fscache_volume_valid(volume) (volume)
#define fscache_cookie_valid(cookie) (cookie)
+#define fscache_resources_valid(cres) ((cres)->cache_priv)
+#define fscache_cookie_enabled(cookie) (cookie && !test_bit(FSCACHE_COOKIE_DISABLED, &cookie->flags))
#else
+#define __fscache_available (0)
#define fscache_available() (0)
+#define fscache_volume_valid(volume) (0)
#define fscache_cookie_valid(cookie) (0)
+#define fscache_resources_valid(cres) (false)
+#define fscache_cookie_enabled(cookie) (0)
#endif
-
-/*
- * overload PG_private_2 to give us PG_fscache - this is used to indicate that
- * a page is currently backed by a local disk cache
- */
-#define PageFsCache(page) PagePrivate2((page))
-#define SetPageFsCache(page) SetPagePrivate2((page))
-#define ClearPageFsCache(page) ClearPagePrivate2((page))
-#define TestSetPageFsCache(page) TestSetPagePrivate2((page))
-#define TestClearPageFsCache(page) TestClearPagePrivate2((page))
-
-/* pattern used to fill dead space in an index entry */
-#define FSCACHE_INDEX_DEADFILL_PATTERN 0x79
-
-struct pagevec;
-struct fscache_cache_tag;
struct fscache_cookie;
-struct fscache_netfs;
-typedef void (*fscache_rw_complete_t)(struct page *page,
- void *context,
- int error);
+#define FSCACHE_ADV_SINGLE_CHUNK 0x01 /* The object is a single chunk of data */
+#define FSCACHE_ADV_WRITE_CACHE 0x00 /* Do cache if written to locally */
+#define FSCACHE_ADV_WRITE_NOCACHE 0x02 /* Don't cache if written to locally */
+#define FSCACHE_ADV_WANT_CACHE_SIZE 0x04 /* Retrieve cache size at runtime */
+
+#define FSCACHE_INVAL_DIO_WRITE 0x01 /* Invalidate due to DIO write */
-/* result of index entry consultation */
-enum fscache_checkaux {
- FSCACHE_CHECKAUX_OKAY, /* entry okay as is */
- FSCACHE_CHECKAUX_NEEDS_UPDATE, /* entry requires update */
- FSCACHE_CHECKAUX_OBSOLETE, /* entry requires deletion */
+enum fscache_want_state {
+ FSCACHE_WANT_PARAMS,
+ FSCACHE_WANT_WRITE,
+ FSCACHE_WANT_READ,
};
/*
- * fscache cookie definition
- */
-struct fscache_cookie_def {
- /* name of cookie type */
- char name[16];
-
- /* cookie type */
- uint8_t type;
-#define FSCACHE_COOKIE_TYPE_INDEX 0
-#define FSCACHE_COOKIE_TYPE_DATAFILE 1
-
- /* select the cache into which to insert an entry in this index
- * - optional
- * - should return a cache identifier or NULL to cause the cache to be
- * inherited from the parent if possible or the first cache picked
- * for a non-index file if not
- */
- struct fscache_cache_tag *(*select_cache)(
- const void *parent_netfs_data,
- const void *cookie_netfs_data);
-
- /* get an index key
- * - should store the key data in the buffer
- * - should return the amount of data stored
- * - not permitted to return an error
- * - the netfs data from the cookie being used as the source is
- * presented
- */
- uint16_t (*get_key)(const void *cookie_netfs_data,
- void *buffer,
- uint16_t bufmax);
-
- /* get certain file attributes from the netfs data
- * - this function can be absent for an index
- * - not permitted to return an error
- * - the netfs data from the cookie being used as the source is
- * presented
- */
- void (*get_attr)(const void *cookie_netfs_data, uint64_t *size);
-
- /* get the auxiliary data from netfs data
- * - this function can be absent if the index carries no state data
- * - should store the auxiliary data in the buffer
- * - should return the amount of amount stored
- * - not permitted to return an error
- * - the netfs data from the cookie being used as the source is
- * presented
- */
- uint16_t (*get_aux)(const void *cookie_netfs_data,
- void *buffer,
- uint16_t bufmax);
-
- /* consult the netfs about the state of an object
- * - this function can be absent if the index carries no state data
- * - the netfs data from the cookie being used as the target is
- * presented, as is the auxiliary data
- */
- enum fscache_checkaux (*check_aux)(void *cookie_netfs_data,
- const void *data,
- uint16_t datalen);
-
- /* get an extra reference on a read context
- * - this function can be absent if the completion function doesn't
- * require a context
- */
- void (*get_context)(void *cookie_netfs_data, void *context);
-
- /* release an extra reference on a read context
- * - this function can be absent if the completion function doesn't
- * require a context
- */
- void (*put_context)(void *cookie_netfs_data, void *context);
-
- /* indicate page that now have cache metadata retained
- * - this function should mark the specified page as now being cached
- * - the page will have been marked with PG_fscache before this is
- * called, so this is optional
- */
- void (*mark_page_cached)(void *cookie_netfs_data,
- struct address_space *mapping,
- struct page *page);
-
- /* indicate the cookie is no longer cached
- * - this function is called when the backing store currently caching
- * a cookie is removed
- * - the netfs should use this to clean up any markers indicating
- * cached pages
- * - this is mandatory for any object that may have data
- */
- void (*now_uncached)(void *cookie_netfs_data);
-};
+ * Data object state.
+ */
+enum fscache_cookie_state {
+ FSCACHE_COOKIE_STATE_QUIESCENT, /* The cookie is uncached */
+ FSCACHE_COOKIE_STATE_LOOKING_UP, /* The cache object is being looked up */
+ FSCACHE_COOKIE_STATE_CREATING, /* The cache object is being created */
+ FSCACHE_COOKIE_STATE_ACTIVE, /* The cache is active, readable and writable */
+ FSCACHE_COOKIE_STATE_INVALIDATING, /* The cache is being invalidated */
+ FSCACHE_COOKIE_STATE_FAILED, /* The cache failed, withdraw to clear */
+ FSCACHE_COOKIE_STATE_LRU_DISCARDING, /* The cookie is being discarded by the LRU */
+ FSCACHE_COOKIE_STATE_WITHDRAWING, /* The cookie is being withdrawn */
+ FSCACHE_COOKIE_STATE_RELINQUISHING, /* The cookie is being relinquished */
+ FSCACHE_COOKIE_STATE_DROPPED, /* The cookie has been dropped */
+#define FSCACHE_COOKIE_STATE__NR (FSCACHE_COOKIE_STATE_DROPPED + 1)
+} __attribute__((mode(byte)));
/*
- * fscache cached network filesystem type
- * - name, version and ops must be filled in before registration
- * - all other fields will be set during registration
- */
-struct fscache_netfs {
- uint32_t version; /* indexing version */
- const char *name; /* filesystem name */
- struct fscache_cookie *primary_index;
- struct list_head link; /* internal link */
+ * Volume representation cookie.
+ */
+struct fscache_volume {
+ refcount_t ref;
+ atomic_t n_cookies; /* Number of data cookies in volume */
+ atomic_t n_accesses; /* Number of cache accesses in progress */
+ unsigned int debug_id;
+ unsigned int key_hash; /* Hash of key string */
+ u8 *key; /* Volume ID, eg. "afs@example.com@1234" */
+ struct list_head proc_link; /* Link in /proc/fs/fscache/volumes */
+ struct hlist_bl_node hash_link; /* Link in hash table */
+ struct work_struct work;
+ struct fscache_cache *cache; /* The cache in which this resides */
+ void *cache_priv; /* Cache private data */
+ spinlock_t lock;
+ unsigned long flags;
+#define FSCACHE_VOLUME_RELINQUISHED 0 /* Volume is being cleaned up */
+#define FSCACHE_VOLUME_INVALIDATE 1 /* Volume was invalidated */
+#define FSCACHE_VOLUME_COLLIDED_WITH 2 /* Volume was collided with */
+#define FSCACHE_VOLUME_ACQUIRE_PENDING 3 /* Volume is waiting to complete acquisition */
+#define FSCACHE_VOLUME_CREATING 4 /* Volume is being created on disk */
+ u8 coherency_len; /* Length of the coherency data */
+ u8 coherency[]; /* Coherency data */
};
/*
- * data file or index object cookie
+ * Data file representation cookie.
* - a file will only appear in one cache
* - a request to cache a file may or may not be honoured, subject to
* constraints such as disk space
* - indices are created on disk just-in-time
*/
struct fscache_cookie {
- atomic_t usage; /* number of users of this cookie */
- atomic_t n_children; /* number of children of this cookie */
- atomic_t n_active; /* number of active users of netfs ptrs */
+ refcount_t ref;
+ atomic_t n_active; /* number of active users of cookie */
+ atomic_t n_accesses; /* Number of cache accesses in progress */
+ unsigned int debug_id;
+ unsigned int inval_counter; /* Number of invalidations made */
spinlock_t lock;
- spinlock_t stores_lock; /* lock on page store tree */
- struct hlist_head backing_objects; /* object(s) backing this file/index */
- const struct fscache_cookie_def *def; /* definition */
- struct fscache_cookie *parent; /* parent of this entry */
- void *netfs_data; /* back pointer to netfs */
- struct radix_tree_root stores; /* pages to be stored on this cookie */
-#define FSCACHE_COOKIE_PENDING_TAG 0 /* pages tag: pending write to cache */
-#define FSCACHE_COOKIE_STORING_TAG 1 /* pages tag: writing to cache */
-
+ struct fscache_volume *volume; /* Parent volume of this file. */
+ void *cache_priv; /* Cache-side representation */
+ struct hlist_bl_node hash_link; /* Link in hash table */
+ struct list_head proc_link; /* Link in proc list */
+ struct list_head commit_link; /* Link in commit queue */
+ struct work_struct work; /* Commit/relinq/withdraw work */
+ loff_t object_size; /* Size of the netfs object */
+ unsigned long unused_at; /* Time at which unused (jiffies) */
unsigned long flags;
-#define FSCACHE_COOKIE_LOOKING_UP 0 /* T if non-index cookie being looked up still */
-#define FSCACHE_COOKIE_NO_DATA_YET 1 /* T if new object with no cached data yet */
-#define FSCACHE_COOKIE_UNAVAILABLE 2 /* T if cookie is unavailable (error, etc) */
-#define FSCACHE_COOKIE_INVALIDATING 3 /* T if cookie is being invalidated */
-#define FSCACHE_COOKIE_RELINQUISHED 4 /* T if cookie has been relinquished */
-#define FSCACHE_COOKIE_ENABLED 5 /* T if cookie is enabled */
-#define FSCACHE_COOKIE_ENABLEMENT_LOCK 6 /* T if cookie is being en/disabled */
+#define FSCACHE_COOKIE_RELINQUISHED 0 /* T if cookie has been relinquished */
+#define FSCACHE_COOKIE_RETIRED 1 /* T if this cookie has retired on relinq */
+#define FSCACHE_COOKIE_IS_CACHING 2 /* T if this cookie is cached */
+#define FSCACHE_COOKIE_NO_DATA_TO_READ 3 /* T if this cookie has nothing to read */
+#define FSCACHE_COOKIE_NEEDS_UPDATE 4 /* T if attrs have been updated */
+#define FSCACHE_COOKIE_HAS_BEEN_CACHED 5 /* T if cookie needs withdraw-on-relinq */
+#define FSCACHE_COOKIE_DISABLED 6 /* T if cookie has been disabled */
+#define FSCACHE_COOKIE_LOCAL_WRITE 7 /* T if cookie has been modified locally */
+#define FSCACHE_COOKIE_NO_ACCESS_WAKE 8 /* T if no wake when n_accesses goes 0 */
+#define FSCACHE_COOKIE_DO_RELINQUISH 9 /* T if this cookie needs relinquishment */
+#define FSCACHE_COOKIE_DO_WITHDRAW 10 /* T if this cookie needs withdrawing */
+#define FSCACHE_COOKIE_DO_LRU_DISCARD 11 /* T if this cookie needs LRU discard */
+#define FSCACHE_COOKIE_DO_PREP_TO_WRITE 12 /* T if cookie needs write preparation */
+#define FSCACHE_COOKIE_HAVE_DATA 13 /* T if this cookie has data stored */
+#define FSCACHE_COOKIE_IS_HASHED 14 /* T if this cookie is hashed */
+#define FSCACHE_COOKIE_DO_INVALIDATE 15 /* T if cookie needs invalidation */
+
+ enum fscache_cookie_state state;
+ u8 advice; /* FSCACHE_ADV_* */
+ u8 key_len; /* Length of index key */
+ u8 aux_len; /* Length of auxiliary data */
+ u32 key_hash; /* Hash of volume, key, len */
+ union {
+ void *key; /* Index key */
+ u8 inline_key[16]; /* - If the key is short enough */
+ };
+ union {
+ void *aux; /* Auxiliary data */
+ u8 inline_aux[8]; /* - If the aux data is short enough */
+ };
};
-static inline bool fscache_cookie_enabled(struct fscache_cookie *cookie)
-{
- return test_bit(FSCACHE_COOKIE_ENABLED, &cookie->flags);
-}
-
/*
* slow-path functions for when there is actually caching available, and the
* netfs does actually have a valid token
@@ -209,146 +154,139 @@ static inline bool fscache_cookie_enabled(struct fscache_cookie *cookie)
* - these are undefined symbols when FS-Cache is not configured and the
* optimiser takes care of not using them
*/
-extern int __fscache_register_netfs(struct fscache_netfs *);
-extern void __fscache_unregister_netfs(struct fscache_netfs *);
-extern struct fscache_cache_tag *__fscache_lookup_cache_tag(const char *);
-extern void __fscache_release_cache_tag(struct fscache_cache_tag *);
+extern struct fscache_volume *__fscache_acquire_volume(const char *, const char *,
+ const void *, size_t);
+extern void __fscache_relinquish_volume(struct fscache_volume *, const void *, bool);
extern struct fscache_cookie *__fscache_acquire_cookie(
- struct fscache_cookie *,
- const struct fscache_cookie_def *,
- void *, bool);
+ struct fscache_volume *,
+ u8,
+ const void *, size_t,
+ const void *, size_t,
+ loff_t);
+extern void __fscache_use_cookie(struct fscache_cookie *, bool);
+extern void __fscache_unuse_cookie(struct fscache_cookie *, const void *, const loff_t *);
extern void __fscache_relinquish_cookie(struct fscache_cookie *, bool);
-extern int __fscache_check_consistency(struct fscache_cookie *);
-extern void __fscache_update_cookie(struct fscache_cookie *);
-extern int __fscache_attr_changed(struct fscache_cookie *);
-extern void __fscache_invalidate(struct fscache_cookie *);
-extern void __fscache_wait_on_invalidate(struct fscache_cookie *);
-extern int __fscache_read_or_alloc_page(struct fscache_cookie *,
- struct page *,
- fscache_rw_complete_t,
- void *,
- gfp_t);
-extern int __fscache_read_or_alloc_pages(struct fscache_cookie *,
- struct address_space *,
- struct list_head *,
- unsigned *,
- fscache_rw_complete_t,
- void *,
- gfp_t);
-extern int __fscache_alloc_page(struct fscache_cookie *, struct page *, gfp_t);
-extern int __fscache_write_page(struct fscache_cookie *, struct page *, gfp_t);
-extern void __fscache_uncache_page(struct fscache_cookie *, struct page *);
-extern bool __fscache_check_page_write(struct fscache_cookie *, struct page *);
-extern void __fscache_wait_on_page_write(struct fscache_cookie *, struct page *);
-extern bool __fscache_maybe_release_page(struct fscache_cookie *, struct page *,
- gfp_t);
-extern void __fscache_uncache_all_inode_pages(struct fscache_cookie *,
- struct inode *);
-extern void __fscache_readpages_cancel(struct fscache_cookie *cookie,
- struct list_head *pages);
-extern void __fscache_disable_cookie(struct fscache_cookie *, bool);
-extern void __fscache_enable_cookie(struct fscache_cookie *,
- bool (*)(void *), void *);
+extern void __fscache_resize_cookie(struct fscache_cookie *, loff_t);
+extern void __fscache_invalidate(struct fscache_cookie *, const void *, loff_t, unsigned int);
+extern int __fscache_begin_read_operation(struct netfs_cache_resources *, struct fscache_cookie *);
+extern int __fscache_begin_write_operation(struct netfs_cache_resources *, struct fscache_cookie *);
+
+extern void __fscache_write_to_cache(struct fscache_cookie *, struct address_space *,
+ loff_t, size_t, loff_t, netfs_io_terminated_t, void *,
+ bool);
+extern void __fscache_clear_page_bits(struct address_space *, loff_t, size_t);
/**
- * fscache_register_netfs - Register a filesystem as desiring caching services
- * @netfs: The description of the filesystem
+ * fscache_acquire_volume - Register a volume as desiring caching services
+ * @volume_key: An identification string for the volume
+ * @cache_name: The name of the cache to use (or NULL for the default)
+ * @coherency_data: Piece of arbitrary coherency data to check (or NULL)
+ * @coherency_len: The size of the coherency data
*
- * Register a filesystem as desiring caching services if they're available.
+ * Register a volume as desiring caching services if they're available. The
+ * caller must provide an identifier for the volume and may also indicate which
+ * cache it should be in. If a preexisting volume entry is found in the cache,
+ * the coherency data must match otherwise the entry will be invalidated.
*
- * See Documentation/filesystems/caching/netfs-api.txt for a complete
- * description.
+ * Returns a cookie pointer on success, -ENOMEM if out of memory or -EBUSY if a
+ * cache volume of that name is already acquired. Note that "NULL" is a valid
+ * cookie pointer and can be returned if caching is refused.
*/
static inline
-int fscache_register_netfs(struct fscache_netfs *netfs)
+struct fscache_volume *fscache_acquire_volume(const char *volume_key,
+ const char *cache_name,
+ const void *coherency_data,
+ size_t coherency_len)
{
- if (fscache_available())
- return __fscache_register_netfs(netfs);
- else
- return 0;
+ if (!fscache_available())
+ return NULL;
+ return __fscache_acquire_volume(volume_key, cache_name,
+ coherency_data, coherency_len);
}
/**
- * fscache_unregister_netfs - Indicate that a filesystem no longer desires
- * caching services
- * @netfs: The description of the filesystem
+ * fscache_relinquish_volume - Cease caching a volume
+ * @volume: The volume cookie
+ * @coherency_data: Piece of arbitrary coherency data to set (or NULL)
+ * @invalidate: True if the volume should be invalidated
*
- * Indicate that a filesystem no longer desires caching services for the
- * moment.
- *
- * See Documentation/filesystems/caching/netfs-api.txt for a complete
- * description.
+ * Indicate that a filesystem no longer desires caching services for a volume.
+ * The caller must have relinquished all file cookies prior to calling this.
+ * The stored coherency data is updated.
*/
static inline
-void fscache_unregister_netfs(struct fscache_netfs *netfs)
+void fscache_relinquish_volume(struct fscache_volume *volume,
+ const void *coherency_data,
+ bool invalidate)
{
- if (fscache_available())
- __fscache_unregister_netfs(netfs);
+ if (fscache_volume_valid(volume))
+ __fscache_relinquish_volume(volume, coherency_data, invalidate);
}
/**
- * fscache_lookup_cache_tag - Look up a cache tag
- * @name: The name of the tag to search for
+ * fscache_acquire_cookie - Acquire a cookie to represent a cache object
+ * @volume: The volume in which to locate/create this cookie
+ * @advice: Advice flags (FSCACHE_COOKIE_ADV_*)
+ * @index_key: The index key for this cookie
+ * @index_key_len: Size of the index key
+ * @aux_data: The auxiliary data for the cookie (may be NULL)
+ * @aux_data_len: Size of the auxiliary data buffer
+ * @object_size: The initial size of object
*
- * Acquire a specific cache referral tag that can be used to select a specific
- * cache in which to cache an index.
+ * Acquire a cookie to represent a data file within the given cache volume.
*
- * See Documentation/filesystems/caching/netfs-api.txt for a complete
+ * See Documentation/filesystems/caching/netfs-api.rst for a complete
* description.
*/
static inline
-struct fscache_cache_tag *fscache_lookup_cache_tag(const char *name)
+struct fscache_cookie *fscache_acquire_cookie(struct fscache_volume *volume,
+ u8 advice,
+ const void *index_key,
+ size_t index_key_len,
+ const void *aux_data,
+ size_t aux_data_len,
+ loff_t object_size)
{
- if (fscache_available())
- return __fscache_lookup_cache_tag(name);
- else
+ if (!fscache_volume_valid(volume))
return NULL;
+ return __fscache_acquire_cookie(volume, advice,
+ index_key, index_key_len,
+ aux_data, aux_data_len,
+ object_size);
}
/**
- * fscache_release_cache_tag - Release a cache tag
- * @tag: The tag to release
- *
- * Release a reference to a cache referral tag previously looked up.
+ * fscache_use_cookie - Request usage of cookie attached to an object
+ * @cookie: The cookie representing the cache object
+ * @will_modify: If cache is expected to be modified locally
*
- * See Documentation/filesystems/caching/netfs-api.txt for a complete
- * description.
+ * Request usage of the cookie attached to an object. The caller should tell
+ * the cache if the object's contents are about to be modified locally and then
+ * the cache can apply the policy that has been set to handle this case.
*/
-static inline
-void fscache_release_cache_tag(struct fscache_cache_tag *tag)
+static inline void fscache_use_cookie(struct fscache_cookie *cookie,
+ bool will_modify)
{
- if (fscache_available())
- __fscache_release_cache_tag(tag);
+ if (fscache_cookie_valid(cookie))
+ __fscache_use_cookie(cookie, will_modify);
}
/**
- * fscache_acquire_cookie - Acquire a cookie to represent a cache object
- * @parent: The cookie that's to be the parent of this one
- * @def: A description of the cache object, including callback operations
- * @netfs_data: An arbitrary piece of data to be kept in the cookie to
- * represent the cache object to the netfs
- * @enable: Whether or not to enable a data cookie immediately
- *
- * This function is used to inform FS-Cache about part of an index hierarchy
- * that can be used to locate files. This is done by requesting a cookie for
- * each index in the path to the file.
+ * fscache_unuse_cookie - Cease usage of cookie attached to an object
+ * @cookie: The cookie representing the cache object
+ * @aux_data: Updated auxiliary data (or NULL)
+ * @object_size: Revised size of the object (or NULL)
*
- * See Documentation/filesystems/caching/netfs-api.txt for a complete
- * description.
+ * Cease usage of the cookie attached to an object. When the users count
+ * reaches zero then the cookie relinquishment will be permitted to proceed.
*/
-static inline
-struct fscache_cookie *fscache_acquire_cookie(
- struct fscache_cookie *parent,
- const struct fscache_cookie_def *def,
- void *netfs_data,
- bool enable)
+static inline void fscache_unuse_cookie(struct fscache_cookie *cookie,
+ const void *aux_data,
+ const loff_t *object_size)
{
- if (fscache_cookie_valid(parent) && fscache_cookie_enabled(parent))
- return __fscache_acquire_cookie(parent, def, netfs_data,
- enable);
- else
- return NULL;
+ if (fscache_cookie_valid(cookie))
+ __fscache_unuse_cookie(cookie, aux_data, object_size);
}
/**
@@ -360,7 +298,7 @@ struct fscache_cookie *fscache_acquire_cookie(
* This function returns a cookie to the cache, forcibly discarding the
* associated cache object if retire is set to true.
*
- * See Documentation/filesystems/caching/netfs-api.txt for a complete
+ * See Documentation/filesystems/caching/netfs-api.rst for a complete
* description.
*/
static inline
@@ -370,463 +308,388 @@ void fscache_relinquish_cookie(struct fscache_cookie *cookie, bool retire)
__fscache_relinquish_cookie(cookie, retire);
}
-/**
- * fscache_check_consistency - Request that if the cache is updated
- * @cookie: The cookie representing the cache object
- *
- * Request an consistency check from fscache, which passes the request
- * to the backing cache.
- *
- * Returns 0 if consistent and -ESTALE if inconsistent. May also
- * return -ENOMEM and -ERESTARTSYS.
+/*
+ * Find the auxiliary data on a cookie.
*/
-static inline
-int fscache_check_consistency(struct fscache_cookie *cookie)
+static inline void *fscache_get_aux(struct fscache_cookie *cookie)
{
- if (fscache_cookie_valid(cookie) && fscache_cookie_enabled(cookie))
- return __fscache_check_consistency(cookie);
+ if (cookie->aux_len <= sizeof(cookie->inline_aux))
+ return cookie->inline_aux;
else
- return 0;
+ return cookie->aux;
}
-/**
- * fscache_update_cookie - Request that a cache object be updated
- * @cookie: The cookie representing the cache object
- *
- * Request an update of the index data for the cache object associated with the
- * cookie.
- *
- * See Documentation/filesystems/caching/netfs-api.txt for a complete
- * description.
+/*
+ * Update the auxiliary data on a cookie.
*/
static inline
-void fscache_update_cookie(struct fscache_cookie *cookie)
+void fscache_update_aux(struct fscache_cookie *cookie,
+ const void *aux_data, const loff_t *object_size)
{
- if (fscache_cookie_valid(cookie) && fscache_cookie_enabled(cookie))
- __fscache_update_cookie(cookie);
+ void *p = fscache_get_aux(cookie);
+
+ if (aux_data && p)
+ memcpy(p, aux_data, cookie->aux_len);
+ if (object_size)
+ cookie->object_size = *object_size;
}
-/**
- * fscache_pin_cookie - Pin a data-storage cache object in its cache
- * @cookie: The cookie representing the cache object
- *
- * Permit data-storage cache objects to be pinned in the cache.
- *
- * See Documentation/filesystems/caching/netfs-api.txt for a complete
- * description.
- */
+#ifdef CONFIG_FSCACHE_STATS
+extern atomic_t fscache_n_updates;
+#endif
+
static inline
-int fscache_pin_cookie(struct fscache_cookie *cookie)
+void __fscache_update_cookie(struct fscache_cookie *cookie, const void *aux_data,
+ const loff_t *object_size)
{
- return -ENOBUFS;
+#ifdef CONFIG_FSCACHE_STATS
+ atomic_inc(&fscache_n_updates);
+#endif
+ fscache_update_aux(cookie, aux_data, object_size);
+ smp_wmb();
+ set_bit(FSCACHE_COOKIE_NEEDS_UPDATE, &cookie->flags);
}
/**
- * fscache_pin_cookie - Unpin a data-storage cache object in its cache
+ * fscache_update_cookie - Request that a cache object be updated
* @cookie: The cookie representing the cache object
+ * @aux_data: The updated auxiliary data for the cookie (may be NULL)
+ * @object_size: The current size of the object (may be NULL)
*
- * Permit data-storage cache objects to be unpinned from the cache.
+ * Request an update of the index data for the cache object associated with the
+ * cookie. The auxiliary data on the cookie will be updated first if @aux_data
+ * is set and the object size will be updated and the object possibly trimmed
+ * if @object_size is set.
*
- * See Documentation/filesystems/caching/netfs-api.txt for a complete
+ * See Documentation/filesystems/caching/netfs-api.rst for a complete
* description.
*/
static inline
-void fscache_unpin_cookie(struct fscache_cookie *cookie)
+void fscache_update_cookie(struct fscache_cookie *cookie, const void *aux_data,
+ const loff_t *object_size)
{
+ if (fscache_cookie_enabled(cookie))
+ __fscache_update_cookie(cookie, aux_data, object_size);
}
/**
- * fscache_attr_changed - Notify cache that an object's attributes changed
+ * fscache_resize_cookie - Request that a cache object be resized
* @cookie: The cookie representing the cache object
+ * @new_size: The new size of the object (may be NULL)
*
- * Send a notification to the cache indicating that an object's attributes have
- * changed. This includes the data size. These attributes will be obtained
- * through the get_attr() cookie definition op.
+ * Request that the size of an object be changed.
*
- * See Documentation/filesystems/caching/netfs-api.txt for a complete
+ * See Documentation/filesystems/caching/netfs-api.rst for a complete
* description.
*/
static inline
-int fscache_attr_changed(struct fscache_cookie *cookie)
+void fscache_resize_cookie(struct fscache_cookie *cookie, loff_t new_size)
{
- if (fscache_cookie_valid(cookie) && fscache_cookie_enabled(cookie))
- return __fscache_attr_changed(cookie);
- else
- return -ENOBUFS;
+ if (fscache_cookie_enabled(cookie))
+ __fscache_resize_cookie(cookie, new_size);
}
/**
* fscache_invalidate - Notify cache that an object needs invalidation
* @cookie: The cookie representing the cache object
+ * @aux_data: The updated auxiliary data for the cookie (may be NULL)
+ * @size: The revised size of the object.
+ * @flags: Invalidation flags (FSCACHE_INVAL_*)
*
* Notify the cache that an object is needs to be invalidated and that it
- * should abort any retrievals or stores it is doing on the cache. The object
- * is then marked non-caching until such time as the invalidation is complete.
+ * should abort any retrievals or stores it is doing on the cache. This
+ * increments inval_counter on the cookie which can be used by the caller to
+ * reconsider I/O requests as they complete.
*
- * This can be called with spinlocks held.
+ * If @flags has FSCACHE_INVAL_DIO_WRITE set, this indicates that this is due
+ * to a direct I/O write and will cause caching to be disabled on this cookie
+ * until it is completely unused.
*
- * See Documentation/filesystems/caching/netfs-api.txt for a complete
+ * See Documentation/filesystems/caching/netfs-api.rst for a complete
* description.
*/
static inline
-void fscache_invalidate(struct fscache_cookie *cookie)
+void fscache_invalidate(struct fscache_cookie *cookie,
+ const void *aux_data, loff_t size, unsigned int flags)
{
- if (fscache_cookie_valid(cookie) && fscache_cookie_enabled(cookie))
- __fscache_invalidate(cookie);
+ if (fscache_cookie_enabled(cookie))
+ __fscache_invalidate(cookie, aux_data, size, flags);
}
/**
- * fscache_wait_on_invalidate - Wait for invalidation to complete
- * @cookie: The cookie representing the cache object
- *
- * Wait for the invalidation of an object to complete.
+ * fscache_operation_valid - Return true if operations resources are usable
+ * @cres: The resources to check.
*
- * See Documentation/filesystems/caching/netfs-api.txt for a complete
- * description.
+ * Returns a pointer to the operations table if usable or NULL if not.
*/
static inline
-void fscache_wait_on_invalidate(struct fscache_cookie *cookie)
+const struct netfs_cache_ops *fscache_operation_valid(const struct netfs_cache_resources *cres)
{
- if (fscache_cookie_valid(cookie))
- __fscache_wait_on_invalidate(cookie);
+ return fscache_resources_valid(cres) ? cres->ops : NULL;
}
/**
- * fscache_reserve_space - Reserve data space for a cached object
+ * fscache_begin_read_operation - Begin a read operation for the netfs lib
+ * @cres: The cache resources for the read being performed
* @cookie: The cookie representing the cache object
- * @i_size: The amount of space to be reserved
*
- * Reserve an amount of space in the cache for the cache object attached to a
- * cookie so that a write to that object within the space can always be
- * honoured.
+ * Begin a read operation on behalf of the netfs helper library. @cres
+ * indicates the cache resources to which the operation state should be
+ * attached; @cookie indicates the cache object that will be accessed.
*
- * See Documentation/filesystems/caching/netfs-api.txt for a complete
- * description.
+ * This is intended to be called from the ->begin_cache_operation() netfs lib
+ * operation as implemented by the network filesystem.
+ *
+ * @cres->inval_counter is set from @cookie->inval_counter for comparison at
+ * the end of the operation. This allows invalidation during the operation to
+ * be detected by the caller.
+ *
+ * Returns:
+ * * 0 - Success
+ * * -ENOBUFS - No caching available
+ * * Other error code from the cache, such as -ENOMEM.
*/
static inline
-int fscache_reserve_space(struct fscache_cookie *cookie, loff_t size)
+int fscache_begin_read_operation(struct netfs_cache_resources *cres,
+ struct fscache_cookie *cookie)
{
+ if (fscache_cookie_enabled(cookie))
+ return __fscache_begin_read_operation(cres, cookie);
return -ENOBUFS;
}
/**
- * fscache_read_or_alloc_page - Read a page from the cache or allocate a block
- * in which to store it
- * @cookie: The cookie representing the cache object
- * @page: The netfs page to fill if possible
- * @end_io_func: The callback to invoke when and if the page is filled
- * @context: An arbitrary piece of data to pass on to end_io_func()
- * @gfp: The conditions under which memory allocation should be made
- *
- * Read a page from the cache, or if that's not possible make a potential
- * one-block reservation in the cache into which the page may be stored once
- * fetched from the server.
+ * fscache_end_operation - End the read operation for the netfs lib
+ * @cres: The cache resources for the read operation
*
- * If the page is not backed by the cache object, or if it there's some reason
- * it can't be, -ENOBUFS will be returned and nothing more will be done for
- * that page.
- *
- * Else, if that page is backed by the cache, a read will be initiated directly
- * to the netfs's page and 0 will be returned by this function. The
- * end_io_func() callback will be invoked when the operation terminates on a
- * completion or failure. Note that the callback may be invoked before the
- * return.
- *
- * Else, if the page is unbacked, -ENODATA is returned and a block may have
- * been allocated in the cache.
- *
- * See Documentation/filesystems/caching/netfs-api.txt for a complete
- * description.
+ * Clean up the resources at the end of the read request.
*/
-static inline
-int fscache_read_or_alloc_page(struct fscache_cookie *cookie,
- struct page *page,
- fscache_rw_complete_t end_io_func,
- void *context,
- gfp_t gfp)
+static inline void fscache_end_operation(struct netfs_cache_resources *cres)
{
- if (fscache_cookie_valid(cookie) && fscache_cookie_enabled(cookie))
- return __fscache_read_or_alloc_page(cookie, page, end_io_func,
- context, gfp);
- else
- return -ENOBUFS;
-}
+ const struct netfs_cache_ops *ops = fscache_operation_valid(cres);
-/**
- * fscache_read_or_alloc_pages - Read pages from the cache and/or allocate
- * blocks in which to store them
- * @cookie: The cookie representing the cache object
- * @mapping: The netfs inode mapping to which the pages will be attached
- * @pages: A list of potential netfs pages to be filled
- * @nr_pages: Number of pages to be read and/or allocated
- * @end_io_func: The callback to invoke when and if each page is filled
- * @context: An arbitrary piece of data to pass on to end_io_func()
- * @gfp: The conditions under which memory allocation should be made
- *
- * Read a set of pages from the cache, or if that's not possible, attempt to
- * make a potential one-block reservation for each page in the cache into which
- * that page may be stored once fetched from the server.
- *
- * If some pages are not backed by the cache object, or if it there's some
- * reason they can't be, -ENOBUFS will be returned and nothing more will be
- * done for that pages.
- *
- * Else, if some of the pages are backed by the cache, a read will be initiated
- * directly to the netfs's page and 0 will be returned by this function. The
- * end_io_func() callback will be invoked when the operation terminates on a
- * completion or failure. Note that the callback may be invoked before the
- * return.
- *
- * Else, if a page is unbacked, -ENODATA is returned and a block may have
- * been allocated in the cache.
- *
- * Because the function may want to return all of -ENOBUFS, -ENODATA and 0 in
- * regard to different pages, the return values are prioritised in that order.
- * Any pages submitted for reading are removed from the pages list.
- *
- * See Documentation/filesystems/caching/netfs-api.txt for a complete
- * description.
- */
-static inline
-int fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
- struct address_space *mapping,
- struct list_head *pages,
- unsigned *nr_pages,
- fscache_rw_complete_t end_io_func,
- void *context,
- gfp_t gfp)
-{
- if (fscache_cookie_valid(cookie) && fscache_cookie_enabled(cookie))
- return __fscache_read_or_alloc_pages(cookie, mapping, pages,
- nr_pages, end_io_func,
- context, gfp);
- else
- return -ENOBUFS;
+ if (ops)
+ ops->end_operation(cres);
}
/**
- * fscache_alloc_page - Allocate a block in which to store a page
- * @cookie: The cookie representing the cache object
- * @page: The netfs page to allocate a page for
- * @gfp: The conditions under which memory allocation should be made
+ * fscache_read - Start a read from the cache.
+ * @cres: The cache resources to use
+ * @start_pos: The beginning file offset in the cache file
+ * @iter: The buffer to fill - and also the length
+ * @read_hole: How to handle a hole in the data.
+ * @term_func: The function to call upon completion
+ * @term_func_priv: The private data for @term_func
*
- * Request Allocation a block in the cache in which to store a netfs page
- * without retrieving any contents from the cache.
+ * Start a read from the cache. @cres indicates the cache object to read from
+ * and must be obtained by a call to fscache_begin_operation() beforehand.
*
- * If the page is not backed by a file then -ENOBUFS will be returned and
- * nothing more will be done, and no reservation will be made.
+ * The data is read into the iterator, @iter, and that also indicates the size
+ * of the operation. @start_pos is the start position in the file, though if
+ * @seek_data is set appropriately, the cache can use SEEK_DATA to find the
+ * next piece of data, writing zeros for the hole into the iterator.
*
- * Else, a block will be allocated if one wasn't already, and 0 will be
- * returned
+ * Upon termination of the operation, @term_func will be called and supplied
+ * with @term_func_priv plus the amount of data written, if successful, or the
+ * error code otherwise.
*
- * See Documentation/filesystems/caching/netfs-api.txt for a complete
- * description.
- */
-static inline
-int fscache_alloc_page(struct fscache_cookie *cookie,
- struct page *page,
- gfp_t gfp)
-{
- if (fscache_cookie_valid(cookie) && fscache_cookie_enabled(cookie))
- return __fscache_alloc_page(cookie, page, gfp);
- else
- return -ENOBUFS;
-}
-
-/**
- * fscache_readpages_cancel - Cancel read/alloc on pages
- * @cookie: The cookie representing the inode's cache object.
- * @pages: The netfs pages that we canceled write on in readpages()
+ * @read_hole indicates how a partially populated region in the cache should be
+ * handled. It can be one of a number of settings:
+ *
+ * NETFS_READ_HOLE_IGNORE - Just try to read (may return a short read).
*
- * Uncache/unreserve the pages reserved earlier in readpages() via
- * fscache_readpages_or_alloc() and similar. In most successful caches in
- * readpages() this doesn't do anything. In cases when the underlying netfs's
- * readahead failed we need to clean up the pagelist (unmark and uncache).
+ * NETFS_READ_HOLE_CLEAR - Seek for data, clearing the part of the buffer
+ * skipped over, then do as for IGNORE.
*
- * This function may sleep as it may have to clean up disk state.
+ * NETFS_READ_HOLE_FAIL - Give ENODATA if we encounter a hole.
*/
static inline
-void fscache_readpages_cancel(struct fscache_cookie *cookie,
- struct list_head *pages)
+int fscache_read(struct netfs_cache_resources *cres,
+ loff_t start_pos,
+ struct iov_iter *iter,
+ enum netfs_read_from_hole read_hole,
+ netfs_io_terminated_t term_func,
+ void *term_func_priv)
{
- if (fscache_cookie_valid(cookie))
- __fscache_readpages_cancel(cookie, pages);
+ const struct netfs_cache_ops *ops = fscache_operation_valid(cres);
+ return ops->read(cres, start_pos, iter, read_hole,
+ term_func, term_func_priv);
}
/**
- * fscache_write_page - Request storage of a page in the cache
+ * fscache_begin_write_operation - Begin a write operation for the netfs lib
+ * @cres: The cache resources for the write being performed
* @cookie: The cookie representing the cache object
- * @page: The netfs page to store
- * @gfp: The conditions under which memory allocation should be made
*
- * Request the contents of the netfs page be written into the cache. This
- * request may be ignored if no cache block is currently allocated, in which
- * case it will return -ENOBUFS.
+ * Begin a write operation on behalf of the netfs helper library. @cres
+ * indicates the cache resources to which the operation state should be
+ * attached; @cookie indicates the cache object that will be accessed.
*
- * If a cache block was already allocated, a write will be initiated and 0 will
- * be returned. The PG_fscache_write page bit is set immediately and will then
- * be cleared at the completion of the write to indicate the success or failure
- * of the operation. Note that the completion may happen before the return.
+ * @cres->inval_counter is set from @cookie->inval_counter for comparison at
+ * the end of the operation. This allows invalidation during the operation to
+ * be detected by the caller.
*
- * See Documentation/filesystems/caching/netfs-api.txt for a complete
- * description.
+ * Returns:
+ * * 0 - Success
+ * * -ENOBUFS - No caching available
+ * * Other error code from the cache, such as -ENOMEM.
*/
static inline
-int fscache_write_page(struct fscache_cookie *cookie,
- struct page *page,
- gfp_t gfp)
+int fscache_begin_write_operation(struct netfs_cache_resources *cres,
+ struct fscache_cookie *cookie)
{
- if (fscache_cookie_valid(cookie) && fscache_cookie_enabled(cookie))
- return __fscache_write_page(cookie, page, gfp);
- else
- return -ENOBUFS;
+ if (fscache_cookie_enabled(cookie))
+ return __fscache_begin_write_operation(cres, cookie);
+ return -ENOBUFS;
}
/**
- * fscache_uncache_page - Indicate that caching is no longer required on a page
- * @cookie: The cookie representing the cache object
- * @page: The netfs page that was being cached.
+ * fscache_write - Start a write to the cache.
+ * @cres: The cache resources to use
+ * @start_pos: The beginning file offset in the cache file
+ * @iter: The data to write - and also the length
+ * @term_func: The function to call upon completion
+ * @term_func_priv: The private data for @term_func
*
- * Tell the cache that we no longer want a page to be cached and that it should
- * remove any knowledge of the netfs page it may have.
+ * Start a write to the cache. @cres indicates the cache object to write to and
+ * must be obtained by a call to fscache_begin_operation() beforehand.
*
- * Note that this cannot cancel any outstanding I/O operations between this
- * page and the cache.
+ * The data to be written is obtained from the iterator, @iter, and that also
+ * indicates the size of the operation. @start_pos is the start position in
+ * the file.
*
- * See Documentation/filesystems/caching/netfs-api.txt for a complete
- * description.
+ * Upon termination of the operation, @term_func will be called and supplied
+ * with @term_func_priv plus the amount of data written, if successful, or the
+ * error code otherwise.
*/
static inline
-void fscache_uncache_page(struct fscache_cookie *cookie,
- struct page *page)
+int fscache_write(struct netfs_cache_resources *cres,
+ loff_t start_pos,
+ struct iov_iter *iter,
+ netfs_io_terminated_t term_func,
+ void *term_func_priv)
{
- if (fscache_cookie_valid(cookie))
- __fscache_uncache_page(cookie, page);
+ const struct netfs_cache_ops *ops = fscache_operation_valid(cres);
+ return ops->write(cres, start_pos, iter, term_func, term_func_priv);
}
/**
- * fscache_check_page_write - Ask if a page is being writing to the cache
- * @cookie: The cookie representing the cache object
- * @page: The netfs page that is being cached.
- *
- * Ask the cache if a page is being written to the cache.
- *
- * See Documentation/filesystems/caching/netfs-api.txt for a complete
- * description.
- */
-static inline
-bool fscache_check_page_write(struct fscache_cookie *cookie,
- struct page *page)
+ * fscache_clear_page_bits - Clear the PG_fscache bits from a set of pages
+ * @mapping: The netfs inode to use as the source
+ * @start: The start position in @mapping
+ * @len: The amount of data to unlock
+ * @caching: If PG_fscache has been set
+ *
+ * Clear the PG_fscache flag from a sequence of pages and wake up anyone who's
+ * waiting.
+ */
+static inline void fscache_clear_page_bits(struct address_space *mapping,
+ loff_t start, size_t len,
+ bool caching)
{
- if (fscache_cookie_valid(cookie))
- return __fscache_check_page_write(cookie, page);
- return false;
+ if (caching)
+ __fscache_clear_page_bits(mapping, start, len);
}
/**
- * fscache_wait_on_page_write - Wait for a page to complete writing to the cache
+ * fscache_write_to_cache - Save a write to the cache and clear PG_fscache
* @cookie: The cookie representing the cache object
- * @page: The netfs page that is being cached.
- *
- * Ask the cache to wake us up when a page is no longer being written to the
- * cache.
- *
- * See Documentation/filesystems/caching/netfs-api.txt for a complete
- * description.
- */
-static inline
-void fscache_wait_on_page_write(struct fscache_cookie *cookie,
- struct page *page)
+ * @mapping: The netfs inode to use as the source
+ * @start: The start position in @mapping
+ * @len: The amount of data to write back
+ * @i_size: The new size of the inode
+ * @term_func: The function to call upon completion
+ * @term_func_priv: The private data for @term_func
+ * @caching: If PG_fscache has been set
+ *
+ * Helper function for a netfs to write dirty data from an inode into the cache
+ * object that's backing it.
+ *
+ * @start and @len describe the range of the data. This does not need to be
+ * page-aligned, but to satisfy DIO requirements, the cache may expand it up to
+ * the page boundaries on either end. All the pages covering the range must be
+ * marked with PG_fscache.
+ *
+ * If given, @term_func will be called upon completion and supplied with
+ * @term_func_priv. Note that the PG_fscache flags will have been cleared by
+ * this point, so the netfs must retain its own pin on the mapping.
+ */
+static inline void fscache_write_to_cache(struct fscache_cookie *cookie,
+ struct address_space *mapping,
+ loff_t start, size_t len, loff_t i_size,
+ netfs_io_terminated_t term_func,
+ void *term_func_priv,
+ bool caching)
{
- if (fscache_cookie_valid(cookie))
- __fscache_wait_on_page_write(cookie, page);
-}
+ if (caching)
+ __fscache_write_to_cache(cookie, mapping, start, len, i_size,
+ term_func, term_func_priv, caching);
+ else if (term_func)
+ term_func(term_func_priv, -ENOBUFS, false);
-/**
- * fscache_maybe_release_page - Consider releasing a page, cancelling a store
- * @cookie: The cookie representing the cache object
- * @page: The netfs page that is being cached.
- * @gfp: The gfp flags passed to releasepage()
- *
- * Consider releasing a page for the vmscan algorithm, on behalf of the netfs's
- * releasepage() call. A storage request on the page may cancelled if it is
- * not currently being processed.
- *
- * The function returns true if the page no longer has a storage request on it,
- * and false if a storage request is left in place. If true is returned, the
- * page will have been passed to fscache_uncache_page(). If false is returned
- * the page cannot be freed yet.
- */
-static inline
-bool fscache_maybe_release_page(struct fscache_cookie *cookie,
- struct page *page,
- gfp_t gfp)
-{
- if (fscache_cookie_valid(cookie) && PageFsCache(page))
- return __fscache_maybe_release_page(cookie, page, gfp);
- return false;
}
+#if __fscache_available
+bool fscache_dirty_folio(struct address_space *mapping, struct folio *folio,
+ struct fscache_cookie *cookie);
+#else
+#define fscache_dirty_folio(MAPPING, FOLIO, COOKIE) \
+ filemap_dirty_folio(MAPPING, FOLIO)
+#endif
+
/**
- * fscache_uncache_all_inode_pages - Uncache all an inode's pages
- * @cookie: The cookie representing the inode's cache object.
- * @inode: The inode to uncache pages from.
- *
- * Uncache all the pages in an inode that are marked PG_fscache, assuming them
- * to be associated with the given cookie.
+ * fscache_unpin_writeback - Unpin writeback resources
+ * @wbc: The writeback control
+ * @cookie: The cookie referring to the cache object
*
- * This function may sleep. It will wait for pages that are being written out
- * and will wait whilst the PG_fscache mark is removed by the cache.
+ * Unpin the writeback resources pinned by fscache_dirty_folio(). This is
+ * intended to be called by the netfs's ->write_inode() method.
*/
-static inline
-void fscache_uncache_all_inode_pages(struct fscache_cookie *cookie,
- struct inode *inode)
+static inline void fscache_unpin_writeback(struct writeback_control *wbc,
+ struct fscache_cookie *cookie)
{
- if (fscache_cookie_valid(cookie))
- __fscache_uncache_all_inode_pages(cookie, inode);
+ if (wbc->unpinned_fscache_wb)
+ fscache_unuse_cookie(cookie, NULL, NULL);
}
/**
- * fscache_disable_cookie - Disable a cookie
- * @cookie: The cookie representing the cache object
- * @invalidate: Invalidate the backing object
- *
- * Disable a cookie from accepting further alloc, read, write, invalidate,
- * update or acquire operations. Outstanding operations can still be waited
- * upon and pages can still be uncached and the cookie relinquished.
- *
- * This will not return until all outstanding operations have completed.
+ * fscache_clear_inode_writeback - Clear writeback resources pinned by an inode
+ * @cookie: The cookie referring to the cache object
+ * @inode: The inode to clean up
+ * @aux: Auxiliary data to apply to the inode
*
- * If @invalidate is set, then the backing object will be invalidated and
- * detached, otherwise it will just be detached.
+ * Clear any writeback resources held by an inode when the inode is evicted.
+ * This must be called before clear_inode() is called.
*/
-static inline
-void fscache_disable_cookie(struct fscache_cookie *cookie, bool invalidate)
+static inline void fscache_clear_inode_writeback(struct fscache_cookie *cookie,
+ struct inode *inode,
+ const void *aux)
{
- if (fscache_cookie_valid(cookie) && fscache_cookie_enabled(cookie))
- __fscache_disable_cookie(cookie, invalidate);
+ if (inode->i_state & I_PINNING_FSCACHE_WB) {
+ loff_t i_size = i_size_read(inode);
+ fscache_unuse_cookie(cookie, aux, &i_size);
+ }
}
/**
- * fscache_enable_cookie - Reenable a cookie
- * @cookie: The cookie representing the cache object
- * @can_enable: A function to permit enablement once lock is held
- * @data: Data for can_enable()
+ * fscache_note_page_release - Note that a netfs page got released
+ * @cookie: The cookie corresponding to the file
*
- * Reenable a previously disabled cookie, allowing it to accept further alloc,
- * read, write, invalidate, update or acquire operations. An attempt will be
- * made to immediately reattach the cookie to a backing object.
- *
- * The can_enable() function is called (if not NULL) once the enablement lock
- * is held to rule on whether enablement is still permitted to go ahead.
+ * Note that a page that has been copied to the cache has been released. This
+ * means that future reads will need to look in the cache to see if it's there.
*/
static inline
-void fscache_enable_cookie(struct fscache_cookie *cookie,
- bool (*can_enable)(void *data),
- void *data)
+void fscache_note_page_release(struct fscache_cookie *cookie)
{
- if (fscache_cookie_valid(cookie) && !fscache_cookie_enabled(cookie))
- __fscache_enable_cookie(cookie, can_enable, data);
+ /* If we've written data to the cache (HAVE_DATA) and there wasn't any
+ * data in the cache when we started (NO_DATA_TO_READ), it may no
+ * longer be true that we can skip reading from the cache - so clear
+ * the flag that causes reads to be skipped.
+ */
+ if (cookie &&
+ test_bit(FSCACHE_COOKIE_HAVE_DATA, &cookie->flags) &&
+ test_bit(FSCACHE_COOKIE_NO_DATA_TO_READ, &cookie->flags))
+ clear_bit(FSCACHE_COOKIE_NO_DATA_TO_READ, &cookie->flags);
}
#endif /* _LINUX_FSCACHE_H */
diff --git a/include/linux/fscrypt.h b/include/linux/fscrypt.h
new file mode 100644
index 000000000000..c895b12737a1
--- /dev/null
+++ b/include/linux/fscrypt.h
@@ -0,0 +1,1054 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * fscrypt.h: declarations for per-file encryption
+ *
+ * Filesystems that implement per-file encryption must include this header
+ * file.
+ *
+ * Copyright (C) 2015, Google, Inc.
+ *
+ * Written by Michael Halcrow, 2015.
+ * Modified by Jaegeuk Kim, 2015.
+ */
+#ifndef _LINUX_FSCRYPT_H
+#define _LINUX_FSCRYPT_H
+
+#include <linux/fs.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <uapi/linux/fscrypt.h>
+
+/*
+ * The lengths of all file contents blocks must be divisible by this value.
+ * This is needed to ensure that all contents encryption modes will work, as
+ * some of the supported modes don't support arbitrarily byte-aligned messages.
+ *
+ * Since the needed alignment is 16 bytes, most filesystems will meet this
+ * requirement naturally, as typical block sizes are powers of 2. However, if a
+ * filesystem can generate arbitrarily byte-aligned block lengths (e.g., via
+ * compression), then it will need to pad to this alignment before encryption.
+ */
+#define FSCRYPT_CONTENTS_ALIGNMENT 16
+
+union fscrypt_policy;
+struct fscrypt_info;
+struct fs_parameter;
+struct seq_file;
+
+struct fscrypt_str {
+ unsigned char *name;
+ u32 len;
+};
+
+struct fscrypt_name {
+ const struct qstr *usr_fname;
+ struct fscrypt_str disk_name;
+ u32 hash;
+ u32 minor_hash;
+ struct fscrypt_str crypto_buf;
+ bool is_nokey_name;
+};
+
+#define FSTR_INIT(n, l) { .name = n, .len = l }
+#define FSTR_TO_QSTR(f) QSTR_INIT((f)->name, (f)->len)
+#define fname_name(p) ((p)->disk_name.name)
+#define fname_len(p) ((p)->disk_name.len)
+
+/* Maximum value for the third parameter of fscrypt_operations.set_context(). */
+#define FSCRYPT_SET_CONTEXT_MAX_SIZE 40
+
+#ifdef CONFIG_FS_ENCRYPTION
+
+/*
+ * If set, the fscrypt bounce page pool won't be allocated (unless another
+ * filesystem needs it). Set this if the filesystem always uses its own bounce
+ * pages for writes and therefore won't need the fscrypt bounce page pool.
+ */
+#define FS_CFLG_OWN_PAGES (1U << 1)
+
+/* Crypto operations for filesystems */
+struct fscrypt_operations {
+
+ /* Set of optional flags; see above for allowed flags */
+ unsigned int flags;
+
+ /*
+ * If set, this is a filesystem-specific key description prefix that
+ * will be accepted for "logon" keys for v1 fscrypt policies, in
+ * addition to the generic prefix "fscrypt:". This functionality is
+ * deprecated, so new filesystems shouldn't set this field.
+ */
+ const char *key_prefix;
+
+ /*
+ * Get the fscrypt context of the given inode.
+ *
+ * @inode: the inode whose context to get
+ * @ctx: the buffer into which to get the context
+ * @len: length of the @ctx buffer in bytes
+ *
+ * Return: On success, returns the length of the context in bytes; this
+ * may be less than @len. On failure, returns -ENODATA if the
+ * inode doesn't have a context, -ERANGE if the context is
+ * longer than @len, or another -errno code.
+ */
+ int (*get_context)(struct inode *inode, void *ctx, size_t len);
+
+ /*
+ * Set an fscrypt context on the given inode.
+ *
+ * @inode: the inode whose context to set. The inode won't already have
+ * an fscrypt context.
+ * @ctx: the context to set
+ * @len: length of @ctx in bytes (at most FSCRYPT_SET_CONTEXT_MAX_SIZE)
+ * @fs_data: If called from fscrypt_set_context(), this will be the
+ * value the filesystem passed to fscrypt_set_context().
+ * Otherwise (i.e. when called from
+ * FS_IOC_SET_ENCRYPTION_POLICY) this will be NULL.
+ *
+ * i_rwsem will be held for write.
+ *
+ * Return: 0 on success, -errno on failure.
+ */
+ int (*set_context)(struct inode *inode, const void *ctx, size_t len,
+ void *fs_data);
+
+ /*
+ * Get the dummy fscrypt policy in use on the filesystem (if any).
+ *
+ * Filesystems only need to implement this function if they support the
+ * test_dummy_encryption mount option.
+ *
+ * Return: A pointer to the dummy fscrypt policy, if the filesystem is
+ * mounted with test_dummy_encryption; otherwise NULL.
+ */
+ const union fscrypt_policy *(*get_dummy_policy)(struct super_block *sb);
+
+ /*
+ * Check whether a directory is empty. i_rwsem will be held for write.
+ */
+ bool (*empty_dir)(struct inode *inode);
+
+ /*
+ * Check whether the filesystem's inode numbers and UUID are stable,
+ * meaning that they will never be changed even by offline operations
+ * such as filesystem shrinking and therefore can be used in the
+ * encryption without the possibility of files becoming unreadable.
+ *
+ * Filesystems only need to implement this function if they want to
+ * support the FSCRYPT_POLICY_FLAG_IV_INO_LBLK_{32,64} flags. These
+ * flags are designed to work around the limitations of UFS and eMMC
+ * inline crypto hardware, and they shouldn't be used in scenarios where
+ * such hardware isn't being used.
+ *
+ * Leaving this NULL is equivalent to always returning false.
+ */
+ bool (*has_stable_inodes)(struct super_block *sb);
+
+ /*
+ * Get the number of bits that the filesystem uses to represent inode
+ * numbers and file logical block numbers.
+ *
+ * By default, both of these are assumed to be 64-bit. This function
+ * can be implemented to declare that either or both of these numbers is
+ * shorter, which may allow the use of the
+ * FSCRYPT_POLICY_FLAG_IV_INO_LBLK_{32,64} flags and/or the use of
+ * inline crypto hardware whose maximum DUN length is less than 64 bits
+ * (e.g., eMMC v5.2 spec compliant hardware). This function only needs
+ * to be implemented if support for one of these features is needed.
+ */
+ void (*get_ino_and_lblk_bits)(struct super_block *sb,
+ int *ino_bits_ret, int *lblk_bits_ret);
+
+ /*
+ * Return an array of pointers to the block devices to which the
+ * filesystem may write encrypted file contents, NULL if the filesystem
+ * only has a single such block device, or an ERR_PTR() on error.
+ *
+ * On successful non-NULL return, *num_devs is set to the number of
+ * devices in the returned array. The caller must free the returned
+ * array using kfree().
+ *
+ * If the filesystem can use multiple block devices (other than block
+ * devices that aren't used for encrypted file contents, such as
+ * external journal devices), and wants to support inline encryption,
+ * then it must implement this function. Otherwise it's not needed.
+ */
+ struct block_device **(*get_devices)(struct super_block *sb,
+ unsigned int *num_devs);
+};
+
+static inline struct fscrypt_info *fscrypt_get_info(const struct inode *inode)
+{
+ /*
+ * Pairs with the cmpxchg_release() in fscrypt_setup_encryption_info().
+ * I.e., another task may publish ->i_crypt_info concurrently, executing
+ * a RELEASE barrier. We need to use smp_load_acquire() here to safely
+ * ACQUIRE the memory the other task published.
+ */
+ return smp_load_acquire(&inode->i_crypt_info);
+}
+
+/**
+ * fscrypt_needs_contents_encryption() - check whether an inode needs
+ * contents encryption
+ * @inode: the inode to check
+ *
+ * Return: %true iff the inode is an encrypted regular file and the kernel was
+ * built with fscrypt support.
+ *
+ * If you need to know whether the encrypt bit is set even when the kernel was
+ * built without fscrypt support, you must use IS_ENCRYPTED() directly instead.
+ */
+static inline bool fscrypt_needs_contents_encryption(const struct inode *inode)
+{
+ return IS_ENCRYPTED(inode) && S_ISREG(inode->i_mode);
+}
+
+/*
+ * When d_splice_alias() moves a directory's no-key alias to its plaintext alias
+ * as a result of the encryption key being added, DCACHE_NOKEY_NAME must be
+ * cleared. Note that we don't have to support arbitrary moves of this flag
+ * because fscrypt doesn't allow no-key names to be the source or target of a
+ * rename().
+ */
+static inline void fscrypt_handle_d_move(struct dentry *dentry)
+{
+ dentry->d_flags &= ~DCACHE_NOKEY_NAME;
+}
+
+/**
+ * fscrypt_is_nokey_name() - test whether a dentry is a no-key name
+ * @dentry: the dentry to check
+ *
+ * This returns true if the dentry is a no-key dentry. A no-key dentry is a
+ * dentry that was created in an encrypted directory that hasn't had its
+ * encryption key added yet. Such dentries may be either positive or negative.
+ *
+ * When a filesystem is asked to create a new filename in an encrypted directory
+ * and the new filename's dentry is a no-key dentry, it must fail the operation
+ * with ENOKEY. This includes ->create(), ->mkdir(), ->mknod(), ->symlink(),
+ * ->rename(), and ->link(). (However, ->rename() and ->link() are already
+ * handled by fscrypt_prepare_rename() and fscrypt_prepare_link().)
+ *
+ * This is necessary because creating a filename requires the directory's
+ * encryption key, but just checking for the key on the directory inode during
+ * the final filesystem operation doesn't guarantee that the key was available
+ * during the preceding dentry lookup. And the key must have already been
+ * available during the dentry lookup in order for it to have been checked
+ * whether the filename already exists in the directory and for the new file's
+ * dentry not to be invalidated due to it incorrectly having the no-key flag.
+ *
+ * Return: %true if the dentry is a no-key name
+ */
+static inline bool fscrypt_is_nokey_name(const struct dentry *dentry)
+{
+ return dentry->d_flags & DCACHE_NOKEY_NAME;
+}
+
+/* crypto.c */
+void fscrypt_enqueue_decrypt_work(struct work_struct *);
+
+struct page *fscrypt_encrypt_pagecache_blocks(struct page *page,
+ unsigned int len,
+ unsigned int offs,
+ gfp_t gfp_flags);
+int fscrypt_encrypt_block_inplace(const struct inode *inode, struct page *page,
+ unsigned int len, unsigned int offs,
+ u64 lblk_num, gfp_t gfp_flags);
+
+int fscrypt_decrypt_pagecache_blocks(struct folio *folio, size_t len,
+ size_t offs);
+int fscrypt_decrypt_block_inplace(const struct inode *inode, struct page *page,
+ unsigned int len, unsigned int offs,
+ u64 lblk_num);
+
+static inline bool fscrypt_is_bounce_page(struct page *page)
+{
+ return page->mapping == NULL;
+}
+
+static inline struct page *fscrypt_pagecache_page(struct page *bounce_page)
+{
+ return (struct page *)page_private(bounce_page);
+}
+
+static inline bool fscrypt_is_bounce_folio(struct folio *folio)
+{
+ return folio->mapping == NULL;
+}
+
+static inline struct folio *fscrypt_pagecache_folio(struct folio *bounce_folio)
+{
+ return bounce_folio->private;
+}
+
+void fscrypt_free_bounce_page(struct page *bounce_page);
+
+/* policy.c */
+int fscrypt_ioctl_set_policy(struct file *filp, const void __user *arg);
+int fscrypt_ioctl_get_policy(struct file *filp, void __user *arg);
+int fscrypt_ioctl_get_policy_ex(struct file *filp, void __user *arg);
+int fscrypt_ioctl_get_nonce(struct file *filp, void __user *arg);
+int fscrypt_has_permitted_context(struct inode *parent, struct inode *child);
+int fscrypt_context_for_new_inode(void *ctx, struct inode *inode);
+int fscrypt_set_context(struct inode *inode, void *fs_data);
+
+struct fscrypt_dummy_policy {
+ const union fscrypt_policy *policy;
+};
+
+int fscrypt_parse_test_dummy_encryption(const struct fs_parameter *param,
+ struct fscrypt_dummy_policy *dummy_policy);
+bool fscrypt_dummy_policies_equal(const struct fscrypt_dummy_policy *p1,
+ const struct fscrypt_dummy_policy *p2);
+void fscrypt_show_test_dummy_encryption(struct seq_file *seq, char sep,
+ struct super_block *sb);
+static inline bool
+fscrypt_is_dummy_policy_set(const struct fscrypt_dummy_policy *dummy_policy)
+{
+ return dummy_policy->policy != NULL;
+}
+static inline void
+fscrypt_free_dummy_policy(struct fscrypt_dummy_policy *dummy_policy)
+{
+ kfree(dummy_policy->policy);
+ dummy_policy->policy = NULL;
+}
+
+/* keyring.c */
+void fscrypt_destroy_keyring(struct super_block *sb);
+int fscrypt_ioctl_add_key(struct file *filp, void __user *arg);
+int fscrypt_ioctl_remove_key(struct file *filp, void __user *arg);
+int fscrypt_ioctl_remove_key_all_users(struct file *filp, void __user *arg);
+int fscrypt_ioctl_get_key_status(struct file *filp, void __user *arg);
+
+/* keysetup.c */
+int fscrypt_prepare_new_inode(struct inode *dir, struct inode *inode,
+ bool *encrypt_ret);
+void fscrypt_put_encryption_info(struct inode *inode);
+void fscrypt_free_inode(struct inode *inode);
+int fscrypt_drop_inode(struct inode *inode);
+
+/* fname.c */
+int fscrypt_fname_encrypt(const struct inode *inode, const struct qstr *iname,
+ u8 *out, unsigned int olen);
+bool fscrypt_fname_encrypted_size(const struct inode *inode, u32 orig_len,
+ u32 max_len, u32 *encrypted_len_ret);
+int fscrypt_setup_filename(struct inode *inode, const struct qstr *iname,
+ int lookup, struct fscrypt_name *fname);
+
+static inline void fscrypt_free_filename(struct fscrypt_name *fname)
+{
+ kfree(fname->crypto_buf.name);
+}
+
+int fscrypt_fname_alloc_buffer(u32 max_encrypted_len,
+ struct fscrypt_str *crypto_str);
+void fscrypt_fname_free_buffer(struct fscrypt_str *crypto_str);
+int fscrypt_fname_disk_to_usr(const struct inode *inode,
+ u32 hash, u32 minor_hash,
+ const struct fscrypt_str *iname,
+ struct fscrypt_str *oname);
+bool fscrypt_match_name(const struct fscrypt_name *fname,
+ const u8 *de_name, u32 de_name_len);
+u64 fscrypt_fname_siphash(const struct inode *dir, const struct qstr *name);
+int fscrypt_d_revalidate(struct dentry *dentry, unsigned int flags);
+
+/* bio.c */
+bool fscrypt_decrypt_bio(struct bio *bio);
+int fscrypt_zeroout_range(const struct inode *inode, pgoff_t lblk,
+ sector_t pblk, unsigned int len);
+
+/* hooks.c */
+int fscrypt_file_open(struct inode *inode, struct file *filp);
+int __fscrypt_prepare_link(struct inode *inode, struct inode *dir,
+ struct dentry *dentry);
+int __fscrypt_prepare_rename(struct inode *old_dir, struct dentry *old_dentry,
+ struct inode *new_dir, struct dentry *new_dentry,
+ unsigned int flags);
+int __fscrypt_prepare_lookup(struct inode *dir, struct dentry *dentry,
+ struct fscrypt_name *fname);
+int fscrypt_prepare_lookup_partial(struct inode *dir, struct dentry *dentry);
+int __fscrypt_prepare_readdir(struct inode *dir);
+int __fscrypt_prepare_setattr(struct dentry *dentry, struct iattr *attr);
+int fscrypt_prepare_setflags(struct inode *inode,
+ unsigned int oldflags, unsigned int flags);
+int fscrypt_prepare_symlink(struct inode *dir, const char *target,
+ unsigned int len, unsigned int max_len,
+ struct fscrypt_str *disk_link);
+int __fscrypt_encrypt_symlink(struct inode *inode, const char *target,
+ unsigned int len, struct fscrypt_str *disk_link);
+const char *fscrypt_get_symlink(struct inode *inode, const void *caddr,
+ unsigned int max_size,
+ struct delayed_call *done);
+int fscrypt_symlink_getattr(const struct path *path, struct kstat *stat);
+static inline void fscrypt_set_ops(struct super_block *sb,
+ const struct fscrypt_operations *s_cop)
+{
+ sb->s_cop = s_cop;
+}
+#else /* !CONFIG_FS_ENCRYPTION */
+
+static inline struct fscrypt_info *fscrypt_get_info(const struct inode *inode)
+{
+ return NULL;
+}
+
+static inline bool fscrypt_needs_contents_encryption(const struct inode *inode)
+{
+ return false;
+}
+
+static inline void fscrypt_handle_d_move(struct dentry *dentry)
+{
+}
+
+static inline bool fscrypt_is_nokey_name(const struct dentry *dentry)
+{
+ return false;
+}
+
+/* crypto.c */
+static inline void fscrypt_enqueue_decrypt_work(struct work_struct *work)
+{
+}
+
+static inline struct page *fscrypt_encrypt_pagecache_blocks(struct page *page,
+ unsigned int len,
+ unsigned int offs,
+ gfp_t gfp_flags)
+{
+ return ERR_PTR(-EOPNOTSUPP);
+}
+
+static inline int fscrypt_encrypt_block_inplace(const struct inode *inode,
+ struct page *page,
+ unsigned int len,
+ unsigned int offs, u64 lblk_num,
+ gfp_t gfp_flags)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int fscrypt_decrypt_pagecache_blocks(struct folio *folio,
+ size_t len, size_t offs)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int fscrypt_decrypt_block_inplace(const struct inode *inode,
+ struct page *page,
+ unsigned int len,
+ unsigned int offs, u64 lblk_num)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline bool fscrypt_is_bounce_page(struct page *page)
+{
+ return false;
+}
+
+static inline struct page *fscrypt_pagecache_page(struct page *bounce_page)
+{
+ WARN_ON_ONCE(1);
+ return ERR_PTR(-EINVAL);
+}
+
+static inline bool fscrypt_is_bounce_folio(struct folio *folio)
+{
+ return false;
+}
+
+static inline struct folio *fscrypt_pagecache_folio(struct folio *bounce_folio)
+{
+ WARN_ON_ONCE(1);
+ return ERR_PTR(-EINVAL);
+}
+
+static inline void fscrypt_free_bounce_page(struct page *bounce_page)
+{
+}
+
+/* policy.c */
+static inline int fscrypt_ioctl_set_policy(struct file *filp,
+ const void __user *arg)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int fscrypt_ioctl_get_policy(struct file *filp, void __user *arg)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int fscrypt_ioctl_get_policy_ex(struct file *filp,
+ void __user *arg)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int fscrypt_ioctl_get_nonce(struct file *filp, void __user *arg)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int fscrypt_has_permitted_context(struct inode *parent,
+ struct inode *child)
+{
+ return 0;
+}
+
+static inline int fscrypt_set_context(struct inode *inode, void *fs_data)
+{
+ return -EOPNOTSUPP;
+}
+
+struct fscrypt_dummy_policy {
+};
+
+static inline int
+fscrypt_parse_test_dummy_encryption(const struct fs_parameter *param,
+ struct fscrypt_dummy_policy *dummy_policy)
+{
+ return -EINVAL;
+}
+
+static inline bool
+fscrypt_dummy_policies_equal(const struct fscrypt_dummy_policy *p1,
+ const struct fscrypt_dummy_policy *p2)
+{
+ return true;
+}
+
+static inline void fscrypt_show_test_dummy_encryption(struct seq_file *seq,
+ char sep,
+ struct super_block *sb)
+{
+}
+
+static inline bool
+fscrypt_is_dummy_policy_set(const struct fscrypt_dummy_policy *dummy_policy)
+{
+ return false;
+}
+
+static inline void
+fscrypt_free_dummy_policy(struct fscrypt_dummy_policy *dummy_policy)
+{
+}
+
+/* keyring.c */
+static inline void fscrypt_destroy_keyring(struct super_block *sb)
+{
+}
+
+static inline int fscrypt_ioctl_add_key(struct file *filp, void __user *arg)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int fscrypt_ioctl_remove_key(struct file *filp, void __user *arg)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int fscrypt_ioctl_remove_key_all_users(struct file *filp,
+ void __user *arg)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int fscrypt_ioctl_get_key_status(struct file *filp,
+ void __user *arg)
+{
+ return -EOPNOTSUPP;
+}
+
+/* keysetup.c */
+
+static inline int fscrypt_prepare_new_inode(struct inode *dir,
+ struct inode *inode,
+ bool *encrypt_ret)
+{
+ if (IS_ENCRYPTED(dir))
+ return -EOPNOTSUPP;
+ return 0;
+}
+
+static inline void fscrypt_put_encryption_info(struct inode *inode)
+{
+ return;
+}
+
+static inline void fscrypt_free_inode(struct inode *inode)
+{
+}
+
+static inline int fscrypt_drop_inode(struct inode *inode)
+{
+ return 0;
+}
+
+ /* fname.c */
+static inline int fscrypt_setup_filename(struct inode *dir,
+ const struct qstr *iname,
+ int lookup, struct fscrypt_name *fname)
+{
+ if (IS_ENCRYPTED(dir))
+ return -EOPNOTSUPP;
+
+ memset(fname, 0, sizeof(*fname));
+ fname->usr_fname = iname;
+ fname->disk_name.name = (unsigned char *)iname->name;
+ fname->disk_name.len = iname->len;
+ return 0;
+}
+
+static inline void fscrypt_free_filename(struct fscrypt_name *fname)
+{
+ return;
+}
+
+static inline int fscrypt_fname_alloc_buffer(u32 max_encrypted_len,
+ struct fscrypt_str *crypto_str)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline void fscrypt_fname_free_buffer(struct fscrypt_str *crypto_str)
+{
+ return;
+}
+
+static inline int fscrypt_fname_disk_to_usr(const struct inode *inode,
+ u32 hash, u32 minor_hash,
+ const struct fscrypt_str *iname,
+ struct fscrypt_str *oname)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline bool fscrypt_match_name(const struct fscrypt_name *fname,
+ const u8 *de_name, u32 de_name_len)
+{
+ /* Encryption support disabled; use standard comparison */
+ if (de_name_len != fname->disk_name.len)
+ return false;
+ return !memcmp(de_name, fname->disk_name.name, fname->disk_name.len);
+}
+
+static inline u64 fscrypt_fname_siphash(const struct inode *dir,
+ const struct qstr *name)
+{
+ WARN_ON_ONCE(1);
+ return 0;
+}
+
+static inline int fscrypt_d_revalidate(struct dentry *dentry,
+ unsigned int flags)
+{
+ return 1;
+}
+
+/* bio.c */
+static inline bool fscrypt_decrypt_bio(struct bio *bio)
+{
+ return true;
+}
+
+static inline int fscrypt_zeroout_range(const struct inode *inode, pgoff_t lblk,
+ sector_t pblk, unsigned int len)
+{
+ return -EOPNOTSUPP;
+}
+
+/* hooks.c */
+
+static inline int fscrypt_file_open(struct inode *inode, struct file *filp)
+{
+ if (IS_ENCRYPTED(inode))
+ return -EOPNOTSUPP;
+ return 0;
+}
+
+static inline int __fscrypt_prepare_link(struct inode *inode, struct inode *dir,
+ struct dentry *dentry)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int __fscrypt_prepare_rename(struct inode *old_dir,
+ struct dentry *old_dentry,
+ struct inode *new_dir,
+ struct dentry *new_dentry,
+ unsigned int flags)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int __fscrypt_prepare_lookup(struct inode *dir,
+ struct dentry *dentry,
+ struct fscrypt_name *fname)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int fscrypt_prepare_lookup_partial(struct inode *dir,
+ struct dentry *dentry)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int __fscrypt_prepare_readdir(struct inode *dir)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int __fscrypt_prepare_setattr(struct dentry *dentry,
+ struct iattr *attr)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int fscrypt_prepare_setflags(struct inode *inode,
+ unsigned int oldflags,
+ unsigned int flags)
+{
+ return 0;
+}
+
+static inline int fscrypt_prepare_symlink(struct inode *dir,
+ const char *target,
+ unsigned int len,
+ unsigned int max_len,
+ struct fscrypt_str *disk_link)
+{
+ if (IS_ENCRYPTED(dir))
+ return -EOPNOTSUPP;
+ disk_link->name = (unsigned char *)target;
+ disk_link->len = len + 1;
+ if (disk_link->len > max_len)
+ return -ENAMETOOLONG;
+ return 0;
+}
+
+static inline int __fscrypt_encrypt_symlink(struct inode *inode,
+ const char *target,
+ unsigned int len,
+ struct fscrypt_str *disk_link)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline const char *fscrypt_get_symlink(struct inode *inode,
+ const void *caddr,
+ unsigned int max_size,
+ struct delayed_call *done)
+{
+ return ERR_PTR(-EOPNOTSUPP);
+}
+
+static inline int fscrypt_symlink_getattr(const struct path *path,
+ struct kstat *stat)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline void fscrypt_set_ops(struct super_block *sb,
+ const struct fscrypt_operations *s_cop)
+{
+}
+
+#endif /* !CONFIG_FS_ENCRYPTION */
+
+/* inline_crypt.c */
+#ifdef CONFIG_FS_ENCRYPTION_INLINE_CRYPT
+
+bool __fscrypt_inode_uses_inline_crypto(const struct inode *inode);
+
+void fscrypt_set_bio_crypt_ctx(struct bio *bio,
+ const struct inode *inode, u64 first_lblk,
+ gfp_t gfp_mask);
+
+void fscrypt_set_bio_crypt_ctx_bh(struct bio *bio,
+ const struct buffer_head *first_bh,
+ gfp_t gfp_mask);
+
+bool fscrypt_mergeable_bio(struct bio *bio, const struct inode *inode,
+ u64 next_lblk);
+
+bool fscrypt_mergeable_bio_bh(struct bio *bio,
+ const struct buffer_head *next_bh);
+
+bool fscrypt_dio_supported(struct inode *inode);
+
+u64 fscrypt_limit_io_blocks(const struct inode *inode, u64 lblk, u64 nr_blocks);
+
+#else /* CONFIG_FS_ENCRYPTION_INLINE_CRYPT */
+
+static inline bool __fscrypt_inode_uses_inline_crypto(const struct inode *inode)
+{
+ return false;
+}
+
+static inline void fscrypt_set_bio_crypt_ctx(struct bio *bio,
+ const struct inode *inode,
+ u64 first_lblk, gfp_t gfp_mask) { }
+
+static inline void fscrypt_set_bio_crypt_ctx_bh(
+ struct bio *bio,
+ const struct buffer_head *first_bh,
+ gfp_t gfp_mask) { }
+
+static inline bool fscrypt_mergeable_bio(struct bio *bio,
+ const struct inode *inode,
+ u64 next_lblk)
+{
+ return true;
+}
+
+static inline bool fscrypt_mergeable_bio_bh(struct bio *bio,
+ const struct buffer_head *next_bh)
+{
+ return true;
+}
+
+static inline bool fscrypt_dio_supported(struct inode *inode)
+{
+ return !fscrypt_needs_contents_encryption(inode);
+}
+
+static inline u64 fscrypt_limit_io_blocks(const struct inode *inode, u64 lblk,
+ u64 nr_blocks)
+{
+ return nr_blocks;
+}
+#endif /* !CONFIG_FS_ENCRYPTION_INLINE_CRYPT */
+
+/**
+ * fscrypt_inode_uses_inline_crypto() - test whether an inode uses inline
+ * encryption
+ * @inode: an inode. If encrypted, its key must be set up.
+ *
+ * Return: true if the inode requires file contents encryption and if the
+ * encryption should be done in the block layer via blk-crypto rather
+ * than in the filesystem layer.
+ */
+static inline bool fscrypt_inode_uses_inline_crypto(const struct inode *inode)
+{
+ return fscrypt_needs_contents_encryption(inode) &&
+ __fscrypt_inode_uses_inline_crypto(inode);
+}
+
+/**
+ * fscrypt_inode_uses_fs_layer_crypto() - test whether an inode uses fs-layer
+ * encryption
+ * @inode: an inode. If encrypted, its key must be set up.
+ *
+ * Return: true if the inode requires file contents encryption and if the
+ * encryption should be done in the filesystem layer rather than in the
+ * block layer via blk-crypto.
+ */
+static inline bool fscrypt_inode_uses_fs_layer_crypto(const struct inode *inode)
+{
+ return fscrypt_needs_contents_encryption(inode) &&
+ !__fscrypt_inode_uses_inline_crypto(inode);
+}
+
+/**
+ * fscrypt_has_encryption_key() - check whether an inode has had its key set up
+ * @inode: the inode to check
+ *
+ * Return: %true if the inode has had its encryption key set up, else %false.
+ *
+ * Usually this should be preceded by fscrypt_get_encryption_info() to try to
+ * set up the key first.
+ */
+static inline bool fscrypt_has_encryption_key(const struct inode *inode)
+{
+ return fscrypt_get_info(inode) != NULL;
+}
+
+/**
+ * fscrypt_prepare_link() - prepare to link an inode into a possibly-encrypted
+ * directory
+ * @old_dentry: an existing dentry for the inode being linked
+ * @dir: the target directory
+ * @dentry: negative dentry for the target filename
+ *
+ * A new link can only be added to an encrypted directory if the directory's
+ * encryption key is available --- since otherwise we'd have no way to encrypt
+ * the filename.
+ *
+ * We also verify that the link will not violate the constraint that all files
+ * in an encrypted directory tree use the same encryption policy.
+ *
+ * Return: 0 on success, -ENOKEY if the directory's encryption key is missing,
+ * -EXDEV if the link would result in an inconsistent encryption policy, or
+ * another -errno code.
+ */
+static inline int fscrypt_prepare_link(struct dentry *old_dentry,
+ struct inode *dir,
+ struct dentry *dentry)
+{
+ if (IS_ENCRYPTED(dir))
+ return __fscrypt_prepare_link(d_inode(old_dentry), dir, dentry);
+ return 0;
+}
+
+/**
+ * fscrypt_prepare_rename() - prepare for a rename between possibly-encrypted
+ * directories
+ * @old_dir: source directory
+ * @old_dentry: dentry for source file
+ * @new_dir: target directory
+ * @new_dentry: dentry for target location (may be negative unless exchanging)
+ * @flags: rename flags (we care at least about %RENAME_EXCHANGE)
+ *
+ * Prepare for ->rename() where the source and/or target directories may be
+ * encrypted. A new link can only be added to an encrypted directory if the
+ * directory's encryption key is available --- since otherwise we'd have no way
+ * to encrypt the filename. A rename to an existing name, on the other hand,
+ * *is* cryptographically possible without the key. However, we take the more
+ * conservative approach and just forbid all no-key renames.
+ *
+ * We also verify that the rename will not violate the constraint that all files
+ * in an encrypted directory tree use the same encryption policy.
+ *
+ * Return: 0 on success, -ENOKEY if an encryption key is missing, -EXDEV if the
+ * rename would cause inconsistent encryption policies, or another -errno code.
+ */
+static inline int fscrypt_prepare_rename(struct inode *old_dir,
+ struct dentry *old_dentry,
+ struct inode *new_dir,
+ struct dentry *new_dentry,
+ unsigned int flags)
+{
+ if (IS_ENCRYPTED(old_dir) || IS_ENCRYPTED(new_dir))
+ return __fscrypt_prepare_rename(old_dir, old_dentry,
+ new_dir, new_dentry, flags);
+ return 0;
+}
+
+/**
+ * fscrypt_prepare_lookup() - prepare to lookup a name in a possibly-encrypted
+ * directory
+ * @dir: directory being searched
+ * @dentry: filename being looked up
+ * @fname: (output) the name to use to search the on-disk directory
+ *
+ * Prepare for ->lookup() in a directory which may be encrypted by determining
+ * the name that will actually be used to search the directory on-disk. If the
+ * directory's encryption policy is supported by this kernel and its encryption
+ * key is available, then the lookup is assumed to be by plaintext name;
+ * otherwise, it is assumed to be by no-key name.
+ *
+ * This will set DCACHE_NOKEY_NAME on the dentry if the lookup is by no-key
+ * name. In this case the filesystem must assign the dentry a dentry_operations
+ * which contains fscrypt_d_revalidate (or contains a d_revalidate method that
+ * calls fscrypt_d_revalidate), so that the dentry will be invalidated if the
+ * directory's encryption key is later added.
+ *
+ * Return: 0 on success; -ENOENT if the directory's key is unavailable but the
+ * filename isn't a valid no-key name, so a negative dentry should be created;
+ * or another -errno code.
+ */
+static inline int fscrypt_prepare_lookup(struct inode *dir,
+ struct dentry *dentry,
+ struct fscrypt_name *fname)
+{
+ if (IS_ENCRYPTED(dir))
+ return __fscrypt_prepare_lookup(dir, dentry, fname);
+
+ memset(fname, 0, sizeof(*fname));
+ fname->usr_fname = &dentry->d_name;
+ fname->disk_name.name = (unsigned char *)dentry->d_name.name;
+ fname->disk_name.len = dentry->d_name.len;
+ return 0;
+}
+
+/**
+ * fscrypt_prepare_readdir() - prepare to read a possibly-encrypted directory
+ * @dir: the directory inode
+ *
+ * If the directory is encrypted and it doesn't already have its encryption key
+ * set up, try to set it up so that the filenames will be listed in plaintext
+ * form rather than in no-key form.
+ *
+ * Return: 0 on success; -errno on error. Note that the encryption key being
+ * unavailable is not considered an error. It is also not an error if
+ * the encryption policy is unsupported by this kernel; that is treated
+ * like the key being unavailable, so that files can still be deleted.
+ */
+static inline int fscrypt_prepare_readdir(struct inode *dir)
+{
+ if (IS_ENCRYPTED(dir))
+ return __fscrypt_prepare_readdir(dir);
+ return 0;
+}
+
+/**
+ * fscrypt_prepare_setattr() - prepare to change a possibly-encrypted inode's
+ * attributes
+ * @dentry: dentry through which the inode is being changed
+ * @attr: attributes to change
+ *
+ * Prepare for ->setattr() on a possibly-encrypted inode. On an encrypted file,
+ * most attribute changes are allowed even without the encryption key. However,
+ * without the encryption key we do have to forbid truncates. This is needed
+ * because the size being truncated to may not be a multiple of the filesystem
+ * block size, and in that case we'd have to decrypt the final block, zero the
+ * portion past i_size, and re-encrypt it. (We *could* allow truncating to a
+ * filesystem block boundary, but it's simpler to just forbid all truncates ---
+ * and we already forbid all other contents modifications without the key.)
+ *
+ * Return: 0 on success, -ENOKEY if the key is missing, or another -errno code
+ * if a problem occurred while setting up the encryption key.
+ */
+static inline int fscrypt_prepare_setattr(struct dentry *dentry,
+ struct iattr *attr)
+{
+ if (IS_ENCRYPTED(d_inode(dentry)))
+ return __fscrypt_prepare_setattr(dentry, attr);
+ return 0;
+}
+
+/**
+ * fscrypt_encrypt_symlink() - encrypt the symlink target if needed
+ * @inode: symlink inode
+ * @target: plaintext symlink target
+ * @len: length of @target excluding null terminator
+ * @disk_link: (in/out) the on-disk symlink target being prepared
+ *
+ * If the symlink target needs to be encrypted, then this function encrypts it
+ * into @disk_link->name. fscrypt_prepare_symlink() must have been called
+ * previously to compute @disk_link->len. If the filesystem did not allocate a
+ * buffer for @disk_link->name after calling fscrypt_prepare_link(), then one
+ * will be kmalloc()'ed and the filesystem will be responsible for freeing it.
+ *
+ * Return: 0 on success, -errno on failure
+ */
+static inline int fscrypt_encrypt_symlink(struct inode *inode,
+ const char *target,
+ unsigned int len,
+ struct fscrypt_str *disk_link)
+{
+ if (IS_ENCRYPTED(inode))
+ return __fscrypt_encrypt_symlink(inode, target, len, disk_link);
+ return 0;
+}
+
+/* If *pagep is a bounce page, free it and set *pagep to the pagecache page */
+static inline void fscrypt_finalize_bounce_page(struct page **pagep)
+{
+ struct page *page = *pagep;
+
+ if (fscrypt_is_bounce_page(page)) {
+ *pagep = fscrypt_pagecache_page(page);
+ fscrypt_free_bounce_page(page);
+ }
+}
+
+#endif /* _LINUX_FSCRYPT_H */
diff --git a/include/linux/fscrypt_common.h b/include/linux/fscrypt_common.h
deleted file mode 100644
index 97f738628b36..000000000000
--- a/include/linux/fscrypt_common.h
+++ /dev/null
@@ -1,141 +0,0 @@
-/*
- * fscrypt_common.h: common declarations for per-file encryption
- *
- * Copyright (C) 2015, Google, Inc.
- *
- * Written by Michael Halcrow, 2015.
- * Modified by Jaegeuk Kim, 2015.
- */
-
-#ifndef _LINUX_FSCRYPT_COMMON_H
-#define _LINUX_FSCRYPT_COMMON_H
-
-#include <linux/key.h>
-#include <linux/fs.h>
-#include <linux/mm.h>
-#include <linux/bio.h>
-#include <linux/dcache.h>
-#include <crypto/skcipher.h>
-#include <uapi/linux/fs.h>
-
-#define FS_CRYPTO_BLOCK_SIZE 16
-
-struct fscrypt_info;
-
-struct fscrypt_ctx {
- union {
- struct {
- struct page *bounce_page; /* Ciphertext page */
- struct page *control_page; /* Original page */
- } w;
- struct {
- struct bio *bio;
- struct work_struct work;
- } r;
- struct list_head free_list; /* Free list */
- };
- u8 flags; /* Flags */
-};
-
-/**
- * For encrypted symlinks, the ciphertext length is stored at the beginning
- * of the string in little-endian format.
- */
-struct fscrypt_symlink_data {
- __le16 len;
- char encrypted_path[1];
-} __packed;
-
-struct fscrypt_str {
- unsigned char *name;
- u32 len;
-};
-
-struct fscrypt_name {
- const struct qstr *usr_fname;
- struct fscrypt_str disk_name;
- u32 hash;
- u32 minor_hash;
- struct fscrypt_str crypto_buf;
-};
-
-#define FSTR_INIT(n, l) { .name = n, .len = l }
-#define FSTR_TO_QSTR(f) QSTR_INIT((f)->name, (f)->len)
-#define fname_name(p) ((p)->disk_name.name)
-#define fname_len(p) ((p)->disk_name.len)
-
-/*
- * fscrypt superblock flags
- */
-#define FS_CFLG_OWN_PAGES (1U << 1)
-
-/*
- * crypto opertions for filesystems
- */
-struct fscrypt_operations {
- unsigned int flags;
- const char *key_prefix;
- int (*get_context)(struct inode *, void *, size_t);
- int (*set_context)(struct inode *, const void *, size_t, void *);
- bool (*dummy_context)(struct inode *);
- bool (*is_encrypted)(struct inode *);
- bool (*empty_dir)(struct inode *);
- unsigned (*max_namelen)(struct inode *);
-};
-
-/* Maximum value for the third parameter of fscrypt_operations.set_context(). */
-#define FSCRYPT_SET_CONTEXT_MAX_SIZE 28
-
-static inline bool fscrypt_dummy_context_enabled(struct inode *inode)
-{
- if (inode->i_sb->s_cop->dummy_context &&
- inode->i_sb->s_cop->dummy_context(inode))
- return true;
- return false;
-}
-
-static inline bool fscrypt_valid_enc_modes(u32 contents_mode,
- u32 filenames_mode)
-{
- if (contents_mode == FS_ENCRYPTION_MODE_AES_128_CBC &&
- filenames_mode == FS_ENCRYPTION_MODE_AES_128_CTS)
- return true;
-
- if (contents_mode == FS_ENCRYPTION_MODE_AES_256_XTS &&
- filenames_mode == FS_ENCRYPTION_MODE_AES_256_CTS)
- return true;
-
- return false;
-}
-
-static inline bool fscrypt_is_dot_dotdot(const struct qstr *str)
-{
- if (str->len == 1 && str->name[0] == '.')
- return true;
-
- if (str->len == 2 && str->name[0] == '.' && str->name[1] == '.')
- return true;
-
- return false;
-}
-
-static inline struct page *fscrypt_control_page(struct page *page)
-{
-#if IS_ENABLED(CONFIG_FS_ENCRYPTION)
- return ((struct fscrypt_ctx *)page_private(page))->w.control_page;
-#else
- WARN_ON_ONCE(1);
- return ERR_PTR(-EINVAL);
-#endif
-}
-
-static inline int fscrypt_has_encryption_key(const struct inode *inode)
-{
-#if IS_ENABLED(CONFIG_FS_ENCRYPTION)
- return (inode->i_crypt_info != NULL);
-#else
- return 0;
-#endif
-}
-
-#endif /* _LINUX_FSCRYPT_COMMON_H */
diff --git a/include/linux/fscrypt_notsupp.h b/include/linux/fscrypt_notsupp.h
deleted file mode 100644
index ec406aed2f2f..000000000000
--- a/include/linux/fscrypt_notsupp.h
+++ /dev/null
@@ -1,177 +0,0 @@
-/*
- * fscrypt_notsupp.h
- *
- * This stubs out the fscrypt functions for filesystems configured without
- * encryption support.
- */
-
-#ifndef _LINUX_FSCRYPT_NOTSUPP_H
-#define _LINUX_FSCRYPT_NOTSUPP_H
-
-#include <linux/fscrypt_common.h>
-
-/* crypto.c */
-static inline struct fscrypt_ctx *fscrypt_get_ctx(const struct inode *inode,
- gfp_t gfp_flags)
-{
- return ERR_PTR(-EOPNOTSUPP);
-}
-
-static inline void fscrypt_release_ctx(struct fscrypt_ctx *ctx)
-{
- return;
-}
-
-static inline struct page *fscrypt_encrypt_page(const struct inode *inode,
- struct page *page,
- unsigned int len,
- unsigned int offs,
- u64 lblk_num, gfp_t gfp_flags)
-{
- return ERR_PTR(-EOPNOTSUPP);
-}
-
-static inline int fscrypt_decrypt_page(const struct inode *inode,
- struct page *page,
- unsigned int len, unsigned int offs,
- u64 lblk_num)
-{
- return -EOPNOTSUPP;
-}
-
-
-static inline void fscrypt_restore_control_page(struct page *page)
-{
- return;
-}
-
-static inline void fscrypt_set_d_op(struct dentry *dentry)
-{
- return;
-}
-
-static inline void fscrypt_set_encrypted_dentry(struct dentry *dentry)
-{
- return;
-}
-
-/* policy.c */
-static inline int fscrypt_ioctl_set_policy(struct file *filp,
- const void __user *arg)
-{
- return -EOPNOTSUPP;
-}
-
-static inline int fscrypt_ioctl_get_policy(struct file *filp, void __user *arg)
-{
- return -EOPNOTSUPP;
-}
-
-static inline int fscrypt_has_permitted_context(struct inode *parent,
- struct inode *child)
-{
- return 0;
-}
-
-static inline int fscrypt_inherit_context(struct inode *parent,
- struct inode *child,
- void *fs_data, bool preload)
-{
- return -EOPNOTSUPP;
-}
-
-/* keyinfo.c */
-static inline int fscrypt_get_encryption_info(struct inode *inode)
-{
- return -EOPNOTSUPP;
-}
-
-static inline void fscrypt_put_encryption_info(struct inode *inode,
- struct fscrypt_info *ci)
-{
- return;
-}
-
- /* fname.c */
-static inline int fscrypt_setup_filename(struct inode *dir,
- const struct qstr *iname,
- int lookup, struct fscrypt_name *fname)
-{
- if (dir->i_sb->s_cop->is_encrypted(dir))
- return -EOPNOTSUPP;
-
- memset(fname, 0, sizeof(struct fscrypt_name));
- fname->usr_fname = iname;
- fname->disk_name.name = (unsigned char *)iname->name;
- fname->disk_name.len = iname->len;
- return 0;
-}
-
-static inline void fscrypt_free_filename(struct fscrypt_name *fname)
-{
- return;
-}
-
-static inline u32 fscrypt_fname_encrypted_size(const struct inode *inode,
- u32 ilen)
-{
- /* never happens */
- WARN_ON(1);
- return 0;
-}
-
-static inline int fscrypt_fname_alloc_buffer(const struct inode *inode,
- u32 ilen,
- struct fscrypt_str *crypto_str)
-{
- return -EOPNOTSUPP;
-}
-
-static inline void fscrypt_fname_free_buffer(struct fscrypt_str *crypto_str)
-{
- return;
-}
-
-static inline int fscrypt_fname_disk_to_usr(struct inode *inode,
- u32 hash, u32 minor_hash,
- const struct fscrypt_str *iname,
- struct fscrypt_str *oname)
-{
- return -EOPNOTSUPP;
-}
-
-static inline int fscrypt_fname_usr_to_disk(struct inode *inode,
- const struct qstr *iname,
- struct fscrypt_str *oname)
-{
- return -EOPNOTSUPP;
-}
-
-static inline bool fscrypt_match_name(const struct fscrypt_name *fname,
- const u8 *de_name, u32 de_name_len)
-{
- /* Encryption support disabled; use standard comparison */
- if (de_name_len != fname->disk_name.len)
- return false;
- return !memcmp(de_name, fname->disk_name.name, fname->disk_name.len);
-}
-
-/* bio.c */
-static inline void fscrypt_decrypt_bio_pages(struct fscrypt_ctx *ctx,
- struct bio *bio)
-{
- return;
-}
-
-static inline void fscrypt_pullback_bio_page(struct page **page, bool restore)
-{
- return;
-}
-
-static inline int fscrypt_zeroout_range(const struct inode *inode, pgoff_t lblk,
- sector_t pblk, unsigned int len)
-{
- return -EOPNOTSUPP;
-}
-
-#endif /* _LINUX_FSCRYPT_NOTSUPP_H */
diff --git a/include/linux/fscrypt_supp.h b/include/linux/fscrypt_supp.h
deleted file mode 100644
index 32e2fcf13b01..000000000000
--- a/include/linux/fscrypt_supp.h
+++ /dev/null
@@ -1,145 +0,0 @@
-/*
- * fscrypt_supp.h
- *
- * This is included by filesystems configured with encryption support.
- */
-
-#ifndef _LINUX_FSCRYPT_SUPP_H
-#define _LINUX_FSCRYPT_SUPP_H
-
-#include <linux/fscrypt_common.h>
-
-/* crypto.c */
-extern struct kmem_cache *fscrypt_info_cachep;
-extern struct fscrypt_ctx *fscrypt_get_ctx(const struct inode *, gfp_t);
-extern void fscrypt_release_ctx(struct fscrypt_ctx *);
-extern struct page *fscrypt_encrypt_page(const struct inode *, struct page *,
- unsigned int, unsigned int,
- u64, gfp_t);
-extern int fscrypt_decrypt_page(const struct inode *, struct page *, unsigned int,
- unsigned int, u64);
-extern void fscrypt_restore_control_page(struct page *);
-
-extern const struct dentry_operations fscrypt_d_ops;
-
-static inline void fscrypt_set_d_op(struct dentry *dentry)
-{
- d_set_d_op(dentry, &fscrypt_d_ops);
-}
-
-static inline void fscrypt_set_encrypted_dentry(struct dentry *dentry)
-{
- spin_lock(&dentry->d_lock);
- dentry->d_flags |= DCACHE_ENCRYPTED_WITH_KEY;
- spin_unlock(&dentry->d_lock);
-}
-
-/* policy.c */
-extern int fscrypt_ioctl_set_policy(struct file *, const void __user *);
-extern int fscrypt_ioctl_get_policy(struct file *, void __user *);
-extern int fscrypt_has_permitted_context(struct inode *, struct inode *);
-extern int fscrypt_inherit_context(struct inode *, struct inode *,
- void *, bool);
-/* keyinfo.c */
-extern int fscrypt_get_encryption_info(struct inode *);
-extern void fscrypt_put_encryption_info(struct inode *, struct fscrypt_info *);
-
-/* fname.c */
-extern int fscrypt_setup_filename(struct inode *, const struct qstr *,
- int lookup, struct fscrypt_name *);
-
-static inline void fscrypt_free_filename(struct fscrypt_name *fname)
-{
- kfree(fname->crypto_buf.name);
-}
-
-extern u32 fscrypt_fname_encrypted_size(const struct inode *, u32);
-extern int fscrypt_fname_alloc_buffer(const struct inode *, u32,
- struct fscrypt_str *);
-extern void fscrypt_fname_free_buffer(struct fscrypt_str *);
-extern int fscrypt_fname_disk_to_usr(struct inode *, u32, u32,
- const struct fscrypt_str *, struct fscrypt_str *);
-extern int fscrypt_fname_usr_to_disk(struct inode *, const struct qstr *,
- struct fscrypt_str *);
-
-#define FSCRYPT_FNAME_MAX_UNDIGESTED_SIZE 32
-
-/* Extracts the second-to-last ciphertext block; see explanation below */
-#define FSCRYPT_FNAME_DIGEST(name, len) \
- ((name) + round_down((len) - FS_CRYPTO_BLOCK_SIZE - 1, \
- FS_CRYPTO_BLOCK_SIZE))
-
-#define FSCRYPT_FNAME_DIGEST_SIZE FS_CRYPTO_BLOCK_SIZE
-
-/**
- * fscrypt_digested_name - alternate identifier for an on-disk filename
- *
- * When userspace lists an encrypted directory without access to the key,
- * filenames whose ciphertext is longer than FSCRYPT_FNAME_MAX_UNDIGESTED_SIZE
- * bytes are shown in this abbreviated form (base64-encoded) rather than as the
- * full ciphertext (base64-encoded). This is necessary to allow supporting
- * filenames up to NAME_MAX bytes, since base64 encoding expands the length.
- *
- * To make it possible for filesystems to still find the correct directory entry
- * despite not knowing the full on-disk name, we encode any filesystem-specific
- * 'hash' and/or 'minor_hash' which the filesystem may need for its lookups,
- * followed by the second-to-last ciphertext block of the filename. Due to the
- * use of the CBC-CTS encryption mode, the second-to-last ciphertext block
- * depends on the full plaintext. (Note that ciphertext stealing causes the
- * last two blocks to appear "flipped".) This makes accidental collisions very
- * unlikely: just a 1 in 2^128 chance for two filenames to collide even if they
- * share the same filesystem-specific hashes.
- *
- * However, this scheme isn't immune to intentional collisions, which can be
- * created by anyone able to create arbitrary plaintext filenames and view them
- * without the key. Making the "digest" be a real cryptographic hash like
- * SHA-256 over the full ciphertext would prevent this, although it would be
- * less efficient and harder to implement, especially since the filesystem would
- * need to calculate it for each directory entry examined during a search.
- */
-struct fscrypt_digested_name {
- u32 hash;
- u32 minor_hash;
- u8 digest[FSCRYPT_FNAME_DIGEST_SIZE];
-};
-
-/**
- * fscrypt_match_name() - test whether the given name matches a directory entry
- * @fname: the name being searched for
- * @de_name: the name from the directory entry
- * @de_name_len: the length of @de_name in bytes
- *
- * Normally @fname->disk_name will be set, and in that case we simply compare
- * that to the name stored in the directory entry. The only exception is that
- * if we don't have the key for an encrypted directory and a filename in it is
- * very long, then we won't have the full disk_name and we'll instead need to
- * match against the fscrypt_digested_name.
- *
- * Return: %true if the name matches, otherwise %false.
- */
-static inline bool fscrypt_match_name(const struct fscrypt_name *fname,
- const u8 *de_name, u32 de_name_len)
-{
- if (unlikely(!fname->disk_name.name)) {
- const struct fscrypt_digested_name *n =
- (const void *)fname->crypto_buf.name;
- if (WARN_ON_ONCE(fname->usr_fname->name[0] != '_'))
- return false;
- if (de_name_len <= FSCRYPT_FNAME_MAX_UNDIGESTED_SIZE)
- return false;
- return !memcmp(FSCRYPT_FNAME_DIGEST(de_name, de_name_len),
- n->digest, FSCRYPT_FNAME_DIGEST_SIZE);
- }
-
- if (de_name_len != fname->disk_name.len)
- return false;
- return !memcmp(de_name, fname->disk_name.name, fname->disk_name.len);
-}
-
-/* bio.c */
-extern void fscrypt_decrypt_bio_pages(struct fscrypt_ctx *, struct bio *);
-extern void fscrypt_pullback_bio_page(struct page **, bool);
-extern int fscrypt_zeroout_range(const struct inode *, pgoff_t, sector_t,
- unsigned int);
-
-#endif /* _LINUX_FSCRYPT_SUPP_H */
diff --git a/include/linux/fsi-occ.h b/include/linux/fsi-occ.h
new file mode 100644
index 000000000000..7ee3dbd7f4b3
--- /dev/null
+++ b/include/linux/fsi-occ.h
@@ -0,0 +1,27 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#ifndef LINUX_FSI_OCC_H
+#define LINUX_FSI_OCC_H
+
+struct device;
+
+#define OCC_RESP_CMD_IN_PRG 0xFF
+#define OCC_RESP_SUCCESS 0
+#define OCC_RESP_CMD_INVAL 0x11
+#define OCC_RESP_CMD_LEN_INVAL 0x12
+#define OCC_RESP_DATA_INVAL 0x13
+#define OCC_RESP_CHKSUM_ERR 0x14
+#define OCC_RESP_INT_ERR 0x15
+#define OCC_RESP_BAD_STATE 0x16
+#define OCC_RESP_CRIT_EXCEPT 0xE0
+#define OCC_RESP_CRIT_INIT 0xE1
+#define OCC_RESP_CRIT_WATCHDOG 0xE2
+#define OCC_RESP_CRIT_OCB 0xE3
+#define OCC_RESP_CRIT_HW 0xE4
+
+#define OCC_MAX_RESP_WORDS 2048
+
+int fsi_occ_submit(struct device *dev, const void *request, size_t req_len,
+ void *response, size_t *resp_len);
+
+#endif /* LINUX_FSI_OCC_H */
diff --git a/include/linux/fsi-sbefifo.h b/include/linux/fsi-sbefifo.h
new file mode 100644
index 000000000000..a9935e806f8e
--- /dev/null
+++ b/include/linux/fsi-sbefifo.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * SBEFIFO FSI Client device driver
+ *
+ * Copyright (C) IBM Corporation 2017
+ */
+
+#ifndef LINUX_FSI_SBEFIFO_H
+#define LINUX_FSI_SBEFIFO_H
+
+#define SBEFIFO_CMD_PUT_OCC_SRAM 0xa404
+#define SBEFIFO_CMD_GET_OCC_SRAM 0xa403
+#define SBEFIFO_CMD_GET_SBE_FFDC 0xa801
+
+#define SBEFIFO_MAX_FFDC_SIZE 0x2000
+
+struct device;
+
+int sbefifo_submit(struct device *dev, const __be32 *command, size_t cmd_len,
+ __be32 *response, size_t *resp_len);
+
+int sbefifo_parse_status(struct device *dev, u16 cmd, __be32 *response,
+ size_t resp_len, size_t *data_len);
+
+#endif /* LINUX_FSI_SBEFIFO_H */
diff --git a/include/linux/fsi.h b/include/linux/fsi.h
index 141fd38d061f..3df8c54868df 100644
--- a/include/linux/fsi.h
+++ b/include/linux/fsi.h
@@ -1,15 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/* FSI device & driver interfaces
*
* Copyright (C) IBM Corporation 2016
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#ifndef LINUX_FSI_H
@@ -76,8 +68,18 @@ extern int fsi_slave_read(struct fsi_slave *slave, uint32_t addr,
extern int fsi_slave_write(struct fsi_slave *slave, uint32_t addr,
const void *val, size_t size);
+extern struct bus_type fsi_bus_type;
+extern const struct device_type fsi_cdev_type;
+enum fsi_dev_type {
+ fsi_dev_cfam,
+ fsi_dev_sbefifo,
+ fsi_dev_scom,
+ fsi_dev_occ
+};
-extern struct bus_type fsi_bus_type;
+extern int fsi_get_new_minor(struct fsi_device *fdev, enum fsi_dev_type type,
+ dev_t *out_dev, int *out_index);
+extern void fsi_free_minor(dev_t dev);
#endif /* LINUX_FSI_H */
diff --git a/include/linux/fsl-diu-fb.h b/include/linux/fsl-diu-fb.h
index c46eab5bc893..9a55ddc0d277 100644
--- a/include/linux/fsl-diu-fb.h
+++ b/include/linux/fsl-diu-fb.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Copyright 2008 Freescale Semiconductor, Inc. All Rights Reserved.
*
@@ -9,12 +10,6 @@
* York Sun <yorksun@freescale.com>
*
* Based on imxfb.c Copyright (C) 2004 S.Hauer, Pengutronix
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
*/
#ifndef __FSL_DIU_FB_H__
diff --git a/include/linux/fsl/bestcomm/bestcomm.h b/include/linux/fsl/bestcomm/bestcomm.h
index a0e2e6b19b57..154e541ce57e 100644
--- a/include/linux/fsl/bestcomm/bestcomm.h
+++ b/include/linux/fsl/bestcomm/bestcomm.h
@@ -27,7 +27,7 @@
*/
struct bcom_bd {
u32 status;
- u32 data[0]; /* variable payload size */
+ u32 data[]; /* variable payload size */
};
/* ======================================================================== */
diff --git a/include/linux/fsl/bestcomm/gen_bd.h b/include/linux/fsl/bestcomm/gen_bd.h
index de47260e69da..aeb312a1cd00 100644
--- a/include/linux/fsl/bestcomm/gen_bd.h
+++ b/include/linux/fsl/bestcomm/gen_bd.h
@@ -1,16 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Header for Bestcomm General Buffer Descriptor tasks driver
*
- *
* Copyright (C) 2007 Sylvain Munaut <tnt@246tNt.com>
* Copyright (C) 2006 AppSpec Computer Technologies Corp.
* Jeff Gibbons <jeff.gibbons@appspec.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published
- * by the Free Software Foundation.
- *
- *
*/
#ifndef __BESTCOMM_GEN_BD_H__
diff --git a/include/linux/fsl/edac.h b/include/linux/fsl/edac.h
index 90d64d4ec1a9..148a297d7587 100644
--- a/include/linux/fsl/edac.h
+++ b/include/linux/fsl/edac.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef FSL_EDAC_H
#define FSL_EDAC_H
diff --git a/include/linux/fsl/enetc_mdio.h b/include/linux/fsl/enetc_mdio.h
new file mode 100644
index 000000000000..df25fffdc0ae
--- /dev/null
+++ b/include/linux/fsl/enetc_mdio.h
@@ -0,0 +1,67 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
+/* Copyright 2019 NXP */
+
+#ifndef _FSL_ENETC_MDIO_H_
+#define _FSL_ENETC_MDIO_H_
+
+#include <linux/phy.h>
+
+/* PCS registers */
+#define ENETC_PCS_LINK_TIMER1 0x12
+#define ENETC_PCS_LINK_TIMER1_VAL 0x06a0
+#define ENETC_PCS_LINK_TIMER2 0x13
+#define ENETC_PCS_LINK_TIMER2_VAL 0x0003
+#define ENETC_PCS_IF_MODE 0x14
+#define ENETC_PCS_IF_MODE_SGMII_EN BIT(0)
+#define ENETC_PCS_IF_MODE_USE_SGMII_AN BIT(1)
+#define ENETC_PCS_IF_MODE_SGMII_SPEED(x) (((x) << 2) & GENMASK(3, 2))
+#define ENETC_PCS_IF_MODE_DUPLEX_HALF BIT(3)
+
+/* Not a mistake, the SerDes PLL needs to be set at 3.125 GHz by Reset
+ * Configuration Word (RCW, outside Linux control) for 2.5G SGMII mode. The PCS
+ * still thinks it's at gigabit.
+ */
+enum enetc_pcs_speed {
+ ENETC_PCS_SPEED_10 = 0,
+ ENETC_PCS_SPEED_100 = 1,
+ ENETC_PCS_SPEED_1000 = 2,
+ ENETC_PCS_SPEED_2500 = 2,
+};
+
+struct enetc_hw;
+
+struct enetc_mdio_priv {
+ struct enetc_hw *hw;
+ int mdio_base;
+};
+
+#if IS_REACHABLE(CONFIG_FSL_ENETC_MDIO)
+
+int enetc_mdio_read_c22(struct mii_bus *bus, int phy_id, int regnum);
+int enetc_mdio_write_c22(struct mii_bus *bus, int phy_id, int regnum,
+ u16 value);
+int enetc_mdio_read_c45(struct mii_bus *bus, int phy_id, int devad, int regnum);
+int enetc_mdio_write_c45(struct mii_bus *bus, int phy_id, int devad, int regnum,
+ u16 value);
+struct enetc_hw *enetc_hw_alloc(struct device *dev, void __iomem *port_regs);
+
+#else
+
+static inline int enetc_mdio_read_c22(struct mii_bus *bus, int phy_id,
+ int regnum)
+{ return -EINVAL; }
+static inline int enetc_mdio_write_c22(struct mii_bus *bus, int phy_id,
+ int regnum, u16 value)
+{ return -EINVAL; }
+static inline int enetc_mdio_read_c45(struct mii_bus *bus, int phy_id,
+ int devad, int regnum)
+{ return -EINVAL; }
+static inline int enetc_mdio_write_c45(struct mii_bus *bus, int phy_id,
+ int devad, int regnum, u16 value)
+{ return -EINVAL; }
+struct enetc_hw *enetc_hw_alloc(struct device *dev, void __iomem *port_regs)
+{ return ERR_PTR(-EINVAL); }
+
+#endif
+
+#endif
diff --git a/include/linux/fsl/ftm.h b/include/linux/fsl/ftm.h
new file mode 100644
index 000000000000..d59011acf66c
--- /dev/null
+++ b/include/linux/fsl/ftm.h
@@ -0,0 +1,88 @@
+// SPDX-License-Identifier: GPL-2.0
+#ifndef __FSL_FTM_H__
+#define __FSL_FTM_H__
+
+#define FTM_SC 0x0 /* Status And Control */
+#define FTM_CNT 0x4 /* Counter */
+#define FTM_MOD 0x8 /* Modulo */
+
+#define FTM_CNTIN 0x4C /* Counter Initial Value */
+#define FTM_STATUS 0x50 /* Capture And Compare Status */
+#define FTM_MODE 0x54 /* Features Mode Selection */
+#define FTM_SYNC 0x58 /* Synchronization */
+#define FTM_OUTINIT 0x5C /* Initial State For Channels Output */
+#define FTM_OUTMASK 0x60 /* Output Mask */
+#define FTM_COMBINE 0x64 /* Function For Linked Channels */
+#define FTM_DEADTIME 0x68 /* Deadtime Insertion Control */
+#define FTM_EXTTRIG 0x6C /* FTM External Trigger */
+#define FTM_POL 0x70 /* Channels Polarity */
+#define FTM_FMS 0x74 /* Fault Mode Status */
+#define FTM_FILTER 0x78 /* Input Capture Filter Control */
+#define FTM_FLTCTRL 0x7C /* Fault Control */
+#define FTM_QDCTRL 0x80 /* Quadrature Decoder Control And Status */
+#define FTM_CONF 0x84 /* Configuration */
+#define FTM_FLTPOL 0x88 /* FTM Fault Input Polarity */
+#define FTM_SYNCONF 0x8C /* Synchronization Configuration */
+#define FTM_INVCTRL 0x90 /* FTM Inverting Control */
+#define FTM_SWOCTRL 0x94 /* FTM Software Output Control */
+#define FTM_PWMLOAD 0x98 /* FTM PWM Load */
+
+#define FTM_SC_CLK_MASK_SHIFT 3
+#define FTM_SC_CLK_MASK (3 << FTM_SC_CLK_MASK_SHIFT)
+#define FTM_SC_TOF 0x80
+#define FTM_SC_TOIE 0x40
+#define FTM_SC_CPWMS 0x20
+#define FTM_SC_CLKS 0x18
+#define FTM_SC_PS_1 0x0
+#define FTM_SC_PS_2 0x1
+#define FTM_SC_PS_4 0x2
+#define FTM_SC_PS_8 0x3
+#define FTM_SC_PS_16 0x4
+#define FTM_SC_PS_32 0x5
+#define FTM_SC_PS_64 0x6
+#define FTM_SC_PS_128 0x7
+#define FTM_SC_PS_MASK 0x7
+
+#define FTM_MODE_FAULTIE 0x80
+#define FTM_MODE_FAULTM 0x60
+#define FTM_MODE_CAPTEST 0x10
+#define FTM_MODE_PWMSYNC 0x8
+#define FTM_MODE_WPDIS 0x4
+#define FTM_MODE_INIT 0x2
+#define FTM_MODE_FTMEN 0x1
+
+/* NXP Errata: The PHAFLTREN and PHBFLTREN bits are tide to zero internally
+ * and these bits cannot be set. Flextimer cannot use Filter in
+ * Quadrature Decoder Mode.
+ * https://community.nxp.com/thread/467648#comment-1010319
+ */
+#define FTM_QDCTRL_PHAFLTREN 0x80
+#define FTM_QDCTRL_PHBFLTREN 0x40
+#define FTM_QDCTRL_PHAPOL 0x20
+#define FTM_QDCTRL_PHBPOL 0x10
+#define FTM_QDCTRL_QUADMODE 0x8
+#define FTM_QDCTRL_QUADDIR 0x4
+#define FTM_QDCTRL_TOFDIR 0x2
+#define FTM_QDCTRL_QUADEN 0x1
+
+#define FTM_FMS_FAULTF 0x80
+#define FTM_FMS_WPEN 0x40
+#define FTM_FMS_FAULTIN 0x10
+#define FTM_FMS_FAULTF3 0x8
+#define FTM_FMS_FAULTF2 0x4
+#define FTM_FMS_FAULTF1 0x2
+#define FTM_FMS_FAULTF0 0x1
+
+#define FTM_CSC_BASE 0xC
+#define FTM_CSC_MSB 0x20
+#define FTM_CSC_MSA 0x10
+#define FTM_CSC_ELSB 0x8
+#define FTM_CSC_ELSA 0x4
+#define FTM_CSC(_channel) (FTM_CSC_BASE + ((_channel) * 8))
+
+#define FTM_CV_BASE 0x10
+#define FTM_CV(_channel) (FTM_CV_BASE + ((_channel) * 8))
+
+#define FTM_PS_MAX 7
+
+#endif
diff --git a/include/linux/fsl/guts.h b/include/linux/fsl/guts.h
index 3efa3b861d44..fdb55ca47a4f 100644
--- a/include/linux/fsl/guts.h
+++ b/include/linux/fsl/guts.h
@@ -1,23 +1,20 @@
-/**
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
* Freecale 85xx and 86xx Global Utilties register set
*
* Authors: Jeff Brown
* Timur Tabi <timur@freescale.com>
*
* Copyright 2004,2007,2012 Freescale Semiconductor, Inc
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
*/
#ifndef __FSL_GUTS_H__
#define __FSL_GUTS_H__
#include <linux/types.h>
+#include <linux/io.h>
-/**
+/*
* Global Utility Registers.
*
* Not all registers defined in this structure are available on all chips, so
@@ -134,8 +131,6 @@ struct ccsr_guts {
u32 srds2cr1; /* 0x.0f44 - SerDes2 Control Register 0 */
} __attribute__ ((packed));
-u32 fsl_guts_get_svr(void);
-
/* Alternate function signal multiplex control */
#define MPC85xx_PMUXCR_QE(x) (0x8000 >> (x))
diff --git a/include/linux/fsl/mc.h b/include/linux/fsl/mc.h
new file mode 100644
index 000000000000..a86115bc799c
--- /dev/null
+++ b/include/linux/fsl/mc.h
@@ -0,0 +1,683 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Freescale Management Complex (MC) bus public interface
+ *
+ * Copyright (C) 2014-2016 Freescale Semiconductor, Inc.
+ * Copyright 2019-2020 NXP
+ * Author: German Rivera <German.Rivera@freescale.com>
+ *
+ */
+#ifndef _FSL_MC_H_
+#define _FSL_MC_H_
+
+#include <linux/device.h>
+#include <linux/mod_devicetable.h>
+#include <linux/interrupt.h>
+#include <uapi/linux/fsl_mc.h>
+
+#define FSL_MC_VENDOR_FREESCALE 0x1957
+
+struct irq_domain;
+struct msi_domain_info;
+
+struct fsl_mc_device;
+struct fsl_mc_io;
+
+/**
+ * struct fsl_mc_driver - MC object device driver object
+ * @driver: Generic device driver
+ * @match_id_table: table of supported device matching Ids
+ * @probe: Function called when a device is added
+ * @remove: Function called when a device is removed
+ * @shutdown: Function called at shutdown time to quiesce the device
+ * @suspend: Function called when a device is stopped
+ * @resume: Function called when a device is resumed
+ * @driver_managed_dma: Device driver doesn't use kernel DMA API for DMA.
+ * For most device drivers, no need to care about this flag
+ * as long as all DMAs are handled through the kernel DMA API.
+ * For some special ones, for example VFIO drivers, they know
+ * how to manage the DMA themselves and set this flag so that
+ * the IOMMU layer will allow them to setup and manage their
+ * own I/O address space.
+ *
+ * Generic DPAA device driver object for device drivers that are registered
+ * with a DPRC bus. This structure is to be embedded in each device-specific
+ * driver structure.
+ */
+struct fsl_mc_driver {
+ struct device_driver driver;
+ const struct fsl_mc_device_id *match_id_table;
+ int (*probe)(struct fsl_mc_device *dev);
+ int (*remove)(struct fsl_mc_device *dev);
+ void (*shutdown)(struct fsl_mc_device *dev);
+ int (*suspend)(struct fsl_mc_device *dev, pm_message_t state);
+ int (*resume)(struct fsl_mc_device *dev);
+ bool driver_managed_dma;
+};
+
+#define to_fsl_mc_driver(_drv) \
+ container_of(_drv, struct fsl_mc_driver, driver)
+
+/**
+ * enum fsl_mc_pool_type - Types of allocatable MC bus resources
+ *
+ * Entries in these enum are used as indices in the array of resource
+ * pools of an fsl_mc_bus object.
+ */
+enum fsl_mc_pool_type {
+ FSL_MC_POOL_DPMCP = 0x0, /* corresponds to "dpmcp" in the MC */
+ FSL_MC_POOL_DPBP, /* corresponds to "dpbp" in the MC */
+ FSL_MC_POOL_DPCON, /* corresponds to "dpcon" in the MC */
+ FSL_MC_POOL_IRQ,
+
+ /*
+ * NOTE: New resource pool types must be added before this entry
+ */
+ FSL_MC_NUM_POOL_TYPES
+};
+
+/**
+ * struct fsl_mc_resource - MC generic resource
+ * @type: type of resource
+ * @id: unique MC resource Id within the resources of the same type
+ * @data: pointer to resource-specific data if the resource is currently
+ * allocated, or NULL if the resource is not currently allocated.
+ * @parent_pool: pointer to the parent resource pool from which this
+ * resource is allocated from.
+ * @node: Node in the free list of the corresponding resource pool
+ *
+ * NOTE: This structure is to be embedded as a field of specific
+ * MC resource structures.
+ */
+struct fsl_mc_resource {
+ enum fsl_mc_pool_type type;
+ s32 id;
+ void *data;
+ struct fsl_mc_resource_pool *parent_pool;
+ struct list_head node;
+};
+
+/**
+ * struct fsl_mc_device_irq - MC object device message-based interrupt
+ * @virq: Linux virtual interrupt number
+ * @mc_dev: MC object device that owns this interrupt
+ * @dev_irq_index: device-relative IRQ index
+ * @resource: MC generic resource associated with the interrupt
+ */
+struct fsl_mc_device_irq {
+ unsigned int virq;
+ struct fsl_mc_device *mc_dev;
+ u8 dev_irq_index;
+ struct fsl_mc_resource resource;
+};
+
+#define to_fsl_mc_irq(_mc_resource) \
+ container_of(_mc_resource, struct fsl_mc_device_irq, resource)
+
+/* Opened state - Indicates that an object is open by at least one owner */
+#define FSL_MC_OBJ_STATE_OPEN 0x00000001
+/* Plugged state - Indicates that the object is plugged */
+#define FSL_MC_OBJ_STATE_PLUGGED 0x00000002
+
+/**
+ * Shareability flag - Object flag indicating no memory shareability.
+ * the object generates memory accesses that are non coherent with other
+ * masters;
+ * user is responsible for proper memory handling through IOMMU configuration.
+ */
+#define FSL_MC_OBJ_FLAG_NO_MEM_SHAREABILITY 0x0001
+
+/**
+ * struct fsl_mc_obj_desc - Object descriptor
+ * @type: Type of object: NULL terminated string
+ * @id: ID of logical object resource
+ * @vendor: Object vendor identifier
+ * @ver_major: Major version number
+ * @ver_minor: Minor version number
+ * @irq_count: Number of interrupts supported by the object
+ * @region_count: Number of mappable regions supported by the object
+ * @state: Object state: combination of FSL_MC_OBJ_STATE_ states
+ * @label: Object label: NULL terminated string
+ * @flags: Object's flags
+ */
+struct fsl_mc_obj_desc {
+ char type[16];
+ int id;
+ u16 vendor;
+ u16 ver_major;
+ u16 ver_minor;
+ u8 irq_count;
+ u8 region_count;
+ u32 state;
+ char label[16];
+ u16 flags;
+};
+
+/**
+ * Bit masks for a MC object device (struct fsl_mc_device) flags
+ */
+#define FSL_MC_IS_DPRC 0x0001
+
+/* Region flags */
+/* Indicates that region can be mapped as cacheable */
+#define FSL_MC_REGION_CACHEABLE 0x00000001
+
+/* Indicates that region can be mapped as shareable */
+#define FSL_MC_REGION_SHAREABLE 0x00000002
+
+/**
+ * struct fsl_mc_device - MC object device object
+ * @dev: Linux driver model device object
+ * @dma_mask: Default DMA mask
+ * @flags: MC object device flags
+ * @icid: Isolation context ID for the device
+ * @mc_handle: MC handle for the corresponding MC object opened
+ * @mc_io: Pointer to MC IO object assigned to this device or
+ * NULL if none.
+ * @obj_desc: MC description of the DPAA device
+ * @regions: pointer to array of MMIO region entries
+ * @irqs: pointer to array of pointers to interrupts allocated to this device
+ * @resource: generic resource associated with this MC object device, if any.
+ * @driver_override: driver name to force a match; do not set directly,
+ * because core frees it; use driver_set_override() to
+ * set or clear it.
+ *
+ * Generic device object for MC object devices that are "attached" to a
+ * MC bus.
+ *
+ * NOTES:
+ * - For a non-DPRC object its icid is the same as its parent DPRC's icid.
+ * - The SMMU notifier callback gets invoked after device_add() has been
+ * called for an MC object device, but before the device-specific probe
+ * callback gets called.
+ * - DP_OBJ_DPRC objects are the only MC objects that have built-in MC
+ * portals. For all other MC objects, their device drivers are responsible for
+ * allocating MC portals for them by calling fsl_mc_portal_allocate().
+ * - Some types of MC objects (e.g., DP_OBJ_DPBP, DP_OBJ_DPCON) are
+ * treated as resources that can be allocated/deallocated from the
+ * corresponding resource pool in the object's parent DPRC, using the
+ * fsl_mc_object_allocate()/fsl_mc_object_free() functions. These MC objects
+ * are known as "allocatable" objects. For them, the corresponding
+ * fsl_mc_device's 'resource' points to the associated resource object.
+ * For MC objects that are not allocatable (e.g., DP_OBJ_DPRC, DP_OBJ_DPNI),
+ * 'resource' is NULL.
+ */
+struct fsl_mc_device {
+ struct device dev;
+ u64 dma_mask;
+ u16 flags;
+ u32 icid;
+ u16 mc_handle;
+ struct fsl_mc_io *mc_io;
+ struct fsl_mc_obj_desc obj_desc;
+ struct resource *regions;
+ struct fsl_mc_device_irq **irqs;
+ struct fsl_mc_resource *resource;
+ struct device_link *consumer_link;
+ const char *driver_override;
+};
+
+#define to_fsl_mc_device(_dev) \
+ container_of(_dev, struct fsl_mc_device, dev)
+
+struct mc_cmd_header {
+ u8 src_id;
+ u8 flags_hw;
+ u8 status;
+ u8 flags_sw;
+ __le16 token;
+ __le16 cmd_id;
+};
+
+enum mc_cmd_status {
+ MC_CMD_STATUS_OK = 0x0, /* Completed successfully */
+ MC_CMD_STATUS_READY = 0x1, /* Ready to be processed */
+ MC_CMD_STATUS_AUTH_ERR = 0x3, /* Authentication error */
+ MC_CMD_STATUS_NO_PRIVILEGE = 0x4, /* No privilege */
+ MC_CMD_STATUS_DMA_ERR = 0x5, /* DMA or I/O error */
+ MC_CMD_STATUS_CONFIG_ERR = 0x6, /* Configuration error */
+ MC_CMD_STATUS_TIMEOUT = 0x7, /* Operation timed out */
+ MC_CMD_STATUS_NO_RESOURCE = 0x8, /* No resources */
+ MC_CMD_STATUS_NO_MEMORY = 0x9, /* No memory available */
+ MC_CMD_STATUS_BUSY = 0xA, /* Device is busy */
+ MC_CMD_STATUS_UNSUPPORTED_OP = 0xB, /* Unsupported operation */
+ MC_CMD_STATUS_INVALID_STATE = 0xC /* Invalid state */
+};
+
+/*
+ * MC command flags
+ */
+
+/* High priority flag */
+#define MC_CMD_FLAG_PRI 0x80
+/* Command completion flag */
+#define MC_CMD_FLAG_INTR_DIS 0x01
+
+static inline __le64 mc_encode_cmd_header(u16 cmd_id,
+ u32 cmd_flags,
+ u16 token)
+{
+ __le64 header = 0;
+ struct mc_cmd_header *hdr = (struct mc_cmd_header *)&header;
+
+ hdr->cmd_id = cpu_to_le16(cmd_id);
+ hdr->token = cpu_to_le16(token);
+ hdr->status = MC_CMD_STATUS_READY;
+ if (cmd_flags & MC_CMD_FLAG_PRI)
+ hdr->flags_hw = MC_CMD_FLAG_PRI;
+ if (cmd_flags & MC_CMD_FLAG_INTR_DIS)
+ hdr->flags_sw = MC_CMD_FLAG_INTR_DIS;
+
+ return header;
+}
+
+static inline u16 mc_cmd_hdr_read_token(struct fsl_mc_command *cmd)
+{
+ struct mc_cmd_header *hdr = (struct mc_cmd_header *)&cmd->header;
+ u16 token = le16_to_cpu(hdr->token);
+
+ return token;
+}
+
+struct mc_rsp_create {
+ __le32 object_id;
+};
+
+struct mc_rsp_api_ver {
+ __le16 major_ver;
+ __le16 minor_ver;
+};
+
+static inline u32 mc_cmd_read_object_id(struct fsl_mc_command *cmd)
+{
+ struct mc_rsp_create *rsp_params;
+
+ rsp_params = (struct mc_rsp_create *)cmd->params;
+ return le32_to_cpu(rsp_params->object_id);
+}
+
+static inline void mc_cmd_read_api_version(struct fsl_mc_command *cmd,
+ u16 *major_ver,
+ u16 *minor_ver)
+{
+ struct mc_rsp_api_ver *rsp_params;
+
+ rsp_params = (struct mc_rsp_api_ver *)cmd->params;
+ *major_ver = le16_to_cpu(rsp_params->major_ver);
+ *minor_ver = le16_to_cpu(rsp_params->minor_ver);
+}
+
+/**
+ * Bit masks for a MC I/O object (struct fsl_mc_io) flags
+ */
+#define FSL_MC_IO_ATOMIC_CONTEXT_PORTAL 0x0001
+
+/**
+ * struct fsl_mc_io - MC I/O object to be passed-in to mc_send_command()
+ * @dev: device associated with this Mc I/O object
+ * @flags: flags for mc_send_command()
+ * @portal_size: MC command portal size in bytes
+ * @portal_phys_addr: MC command portal physical address
+ * @portal_virt_addr: MC command portal virtual address
+ * @dpmcp_dev: pointer to the DPMCP device associated with the MC portal.
+ *
+ * Fields are only meaningful if the FSL_MC_IO_ATOMIC_CONTEXT_PORTAL flag is not
+ * set:
+ * @mutex: Mutex to serialize mc_send_command() calls that use the same MC
+ * portal, if the fsl_mc_io object was created with the
+ * FSL_MC_IO_ATOMIC_CONTEXT_PORTAL flag off. mc_send_command() calls for this
+ * fsl_mc_io object must be made only from non-atomic context.
+ *
+ * Fields are only meaningful if the FSL_MC_IO_ATOMIC_CONTEXT_PORTAL flag is
+ * set:
+ * @spinlock: Spinlock to serialize mc_send_command() calls that use the same MC
+ * portal, if the fsl_mc_io object was created with the
+ * FSL_MC_IO_ATOMIC_CONTEXT_PORTAL flag on. mc_send_command() calls for this
+ * fsl_mc_io object can be made from atomic or non-atomic context.
+ */
+struct fsl_mc_io {
+ struct device *dev;
+ u16 flags;
+ u32 portal_size;
+ phys_addr_t portal_phys_addr;
+ void __iomem *portal_virt_addr;
+ struct fsl_mc_device *dpmcp_dev;
+ union {
+ /*
+ * This field is only meaningful if the
+ * FSL_MC_IO_ATOMIC_CONTEXT_PORTAL flag is not set
+ */
+ struct mutex mutex; /* serializes mc_send_command() */
+
+ /*
+ * This field is only meaningful if the
+ * FSL_MC_IO_ATOMIC_CONTEXT_PORTAL flag is set
+ */
+ raw_spinlock_t spinlock; /* serializes mc_send_command() */
+ };
+};
+
+int mc_send_command(struct fsl_mc_io *mc_io, struct fsl_mc_command *cmd);
+
+#ifdef CONFIG_FSL_MC_BUS
+#define dev_is_fsl_mc(_dev) ((_dev)->bus == &fsl_mc_bus_type)
+#else
+/* If fsl-mc bus is not present device cannot belong to fsl-mc bus */
+#define dev_is_fsl_mc(_dev) (0)
+#endif
+
+/* Macro to check if a device is a container device */
+#define fsl_mc_is_cont_dev(_dev) (to_fsl_mc_device(_dev)->flags & \
+ FSL_MC_IS_DPRC)
+
+/* Macro to get the container device of a MC device */
+#define fsl_mc_cont_dev(_dev) (fsl_mc_is_cont_dev(_dev) ? \
+ (_dev) : (_dev)->parent)
+
+/*
+ * module_fsl_mc_driver() - Helper macro for drivers that don't do
+ * anything special in module init/exit. This eliminates a lot of
+ * boilerplate. Each module may only use this macro once, and
+ * calling it replaces module_init() and module_exit()
+ */
+#define module_fsl_mc_driver(__fsl_mc_driver) \
+ module_driver(__fsl_mc_driver, fsl_mc_driver_register, \
+ fsl_mc_driver_unregister)
+
+/*
+ * Macro to avoid include chaining to get THIS_MODULE
+ */
+#define fsl_mc_driver_register(drv) \
+ __fsl_mc_driver_register(drv, THIS_MODULE)
+
+int __must_check __fsl_mc_driver_register(struct fsl_mc_driver *fsl_mc_driver,
+ struct module *owner);
+
+void fsl_mc_driver_unregister(struct fsl_mc_driver *driver);
+
+/**
+ * struct fsl_mc_version
+ * @major: Major version number: incremented on API compatibility changes
+ * @minor: Minor version number: incremented on API additions (that are
+ * backward compatible); reset when major version is incremented
+ * @revision: Internal revision number: incremented on implementation changes
+ * and/or bug fixes that have no impact on API
+ */
+struct fsl_mc_version {
+ u32 major;
+ u32 minor;
+ u32 revision;
+};
+
+struct fsl_mc_version *fsl_mc_get_version(void);
+
+int __must_check fsl_mc_portal_allocate(struct fsl_mc_device *mc_dev,
+ u16 mc_io_flags,
+ struct fsl_mc_io **new_mc_io);
+
+void fsl_mc_portal_free(struct fsl_mc_io *mc_io);
+
+int fsl_mc_portal_reset(struct fsl_mc_io *mc_io);
+
+int __must_check fsl_mc_object_allocate(struct fsl_mc_device *mc_dev,
+ enum fsl_mc_pool_type pool_type,
+ struct fsl_mc_device **new_mc_adev);
+
+void fsl_mc_object_free(struct fsl_mc_device *mc_adev);
+
+struct irq_domain *fsl_mc_msi_create_irq_domain(struct fwnode_handle *fwnode,
+ struct msi_domain_info *info,
+ struct irq_domain *parent);
+
+int __must_check fsl_mc_allocate_irqs(struct fsl_mc_device *mc_dev);
+
+void fsl_mc_free_irqs(struct fsl_mc_device *mc_dev);
+
+struct fsl_mc_device *fsl_mc_get_endpoint(struct fsl_mc_device *mc_dev,
+ u16 if_id);
+
+extern struct bus_type fsl_mc_bus_type;
+
+extern struct device_type fsl_mc_bus_dprc_type;
+extern struct device_type fsl_mc_bus_dpni_type;
+extern struct device_type fsl_mc_bus_dpio_type;
+extern struct device_type fsl_mc_bus_dpsw_type;
+extern struct device_type fsl_mc_bus_dpbp_type;
+extern struct device_type fsl_mc_bus_dpcon_type;
+extern struct device_type fsl_mc_bus_dpmcp_type;
+extern struct device_type fsl_mc_bus_dpmac_type;
+extern struct device_type fsl_mc_bus_dprtc_type;
+extern struct device_type fsl_mc_bus_dpseci_type;
+extern struct device_type fsl_mc_bus_dpdmux_type;
+extern struct device_type fsl_mc_bus_dpdcei_type;
+extern struct device_type fsl_mc_bus_dpaiop_type;
+extern struct device_type fsl_mc_bus_dpci_type;
+extern struct device_type fsl_mc_bus_dpdmai_type;
+
+static inline bool is_fsl_mc_bus_dprc(const struct fsl_mc_device *mc_dev)
+{
+ return mc_dev->dev.type == &fsl_mc_bus_dprc_type;
+}
+
+static inline bool is_fsl_mc_bus_dpni(const struct fsl_mc_device *mc_dev)
+{
+ return mc_dev->dev.type == &fsl_mc_bus_dpni_type;
+}
+
+static inline bool is_fsl_mc_bus_dpio(const struct fsl_mc_device *mc_dev)
+{
+ return mc_dev->dev.type == &fsl_mc_bus_dpio_type;
+}
+
+static inline bool is_fsl_mc_bus_dpsw(const struct fsl_mc_device *mc_dev)
+{
+ return mc_dev->dev.type == &fsl_mc_bus_dpsw_type;
+}
+
+static inline bool is_fsl_mc_bus_dpdmux(const struct fsl_mc_device *mc_dev)
+{
+ return mc_dev->dev.type == &fsl_mc_bus_dpdmux_type;
+}
+
+static inline bool is_fsl_mc_bus_dpbp(const struct fsl_mc_device *mc_dev)
+{
+ return mc_dev->dev.type == &fsl_mc_bus_dpbp_type;
+}
+
+static inline bool is_fsl_mc_bus_dpcon(const struct fsl_mc_device *mc_dev)
+{
+ return mc_dev->dev.type == &fsl_mc_bus_dpcon_type;
+}
+
+static inline bool is_fsl_mc_bus_dpmcp(const struct fsl_mc_device *mc_dev)
+{
+ return mc_dev->dev.type == &fsl_mc_bus_dpmcp_type;
+}
+
+static inline bool is_fsl_mc_bus_dpmac(const struct fsl_mc_device *mc_dev)
+{
+ return mc_dev->dev.type == &fsl_mc_bus_dpmac_type;
+}
+
+static inline bool is_fsl_mc_bus_dprtc(const struct fsl_mc_device *mc_dev)
+{
+ return mc_dev->dev.type == &fsl_mc_bus_dprtc_type;
+}
+
+static inline bool is_fsl_mc_bus_dpseci(const struct fsl_mc_device *mc_dev)
+{
+ return mc_dev->dev.type == &fsl_mc_bus_dpseci_type;
+}
+
+static inline bool is_fsl_mc_bus_dpdcei(const struct fsl_mc_device *mc_dev)
+{
+ return mc_dev->dev.type == &fsl_mc_bus_dpdcei_type;
+}
+
+static inline bool is_fsl_mc_bus_dpaiop(const struct fsl_mc_device *mc_dev)
+{
+ return mc_dev->dev.type == &fsl_mc_bus_dpaiop_type;
+}
+
+static inline bool is_fsl_mc_bus_dpci(const struct fsl_mc_device *mc_dev)
+{
+ return mc_dev->dev.type == &fsl_mc_bus_dpci_type;
+}
+
+static inline bool is_fsl_mc_bus_dpdmai(const struct fsl_mc_device *mc_dev)
+{
+ return mc_dev->dev.type == &fsl_mc_bus_dpdmai_type;
+}
+
+#define DPRC_RESET_OPTION_NON_RECURSIVE 0x00000001
+int dprc_reset_container(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ int child_container_id,
+ u32 options);
+
+int dprc_scan_container(struct fsl_mc_device *mc_bus_dev,
+ bool alloc_interrupts);
+
+void dprc_remove_devices(struct fsl_mc_device *mc_bus_dev,
+ struct fsl_mc_obj_desc *obj_desc_array,
+ int num_child_objects_in_mc);
+
+int dprc_cleanup(struct fsl_mc_device *mc_dev);
+
+int dprc_setup(struct fsl_mc_device *mc_dev);
+
+/**
+ * Maximum number of total IRQs that can be pre-allocated for an MC bus'
+ * IRQ pool
+ */
+#define FSL_MC_IRQ_POOL_MAX_TOTAL_IRQS 256
+
+int fsl_mc_populate_irq_pool(struct fsl_mc_device *mc_bus_dev,
+ unsigned int irq_count);
+
+void fsl_mc_cleanup_irq_pool(struct fsl_mc_device *mc_bus_dev);
+
+/*
+ * Data Path Buffer Pool (DPBP) API
+ * Contains initialization APIs and runtime control APIs for DPBP
+ */
+
+int dpbp_open(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ int dpbp_id,
+ u16 *token);
+
+int dpbp_close(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token);
+
+int dpbp_enable(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token);
+
+int dpbp_disable(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token);
+
+int dpbp_reset(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token);
+
+/**
+ * struct dpbp_attr - Structure representing DPBP attributes
+ * @id: DPBP object ID
+ * @bpid: Hardware buffer pool ID; should be used as an argument in
+ * acquire/release operations on buffers
+ */
+struct dpbp_attr {
+ int id;
+ u16 bpid;
+};
+
+int dpbp_get_attributes(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ struct dpbp_attr *attr);
+
+/* Data Path Concentrator (DPCON) API
+ * Contains initialization APIs and runtime control APIs for DPCON
+ */
+
+/**
+ * Use it to disable notifications; see dpcon_set_notification()
+ */
+#define DPCON_INVALID_DPIO_ID (int)(-1)
+
+int dpcon_open(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ int dpcon_id,
+ u16 *token);
+
+int dpcon_close(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token);
+
+int dpcon_enable(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token);
+
+int dpcon_disable(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token);
+
+int dpcon_reset(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token);
+
+int fsl_mc_obj_open(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ int obj_id,
+ char *obj_type,
+ u16 *token);
+
+int fsl_mc_obj_close(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token);
+
+int fsl_mc_obj_reset(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token);
+
+/**
+ * struct dpcon_attr - Structure representing DPCON attributes
+ * @id: DPCON object ID
+ * @qbman_ch_id: Channel ID to be used by dequeue operation
+ * @num_priorities: Number of priorities for the DPCON channel (1-8)
+ */
+struct dpcon_attr {
+ int id;
+ u16 qbman_ch_id;
+ u8 num_priorities;
+};
+
+int dpcon_get_attributes(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ struct dpcon_attr *attr);
+
+/**
+ * struct dpcon_notification_cfg - Structure representing notification params
+ * @dpio_id: DPIO object ID; must be configured with a notification channel;
+ * to disable notifications set it to 'DPCON_INVALID_DPIO_ID';
+ * @priority: Priority selection within the DPIO channel; valid values
+ * are 0-7, depending on the number of priorities in that channel
+ * @user_ctx: User context value provided with each CDAN message
+ */
+struct dpcon_notification_cfg {
+ int dpio_id;
+ u8 priority;
+ u64 user_ctx;
+};
+
+int dpcon_set_notification(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ struct dpcon_notification_cfg *cfg);
+
+#endif /* _FSL_MC_H_ */
diff --git a/include/linux/fsl/ptp_qoriq.h b/include/linux/fsl/ptp_qoriq.h
new file mode 100644
index 000000000000..b301bf7199d3
--- /dev/null
+++ b/include/linux/fsl/ptp_qoriq.h
@@ -0,0 +1,208 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2010 OMICRON electronics GmbH
+ * Copyright 2018 NXP
+ */
+#ifndef __PTP_QORIQ_H__
+#define __PTP_QORIQ_H__
+
+#include <linux/io.h>
+#include <linux/interrupt.h>
+#include <linux/ptp_clock_kernel.h>
+
+/*
+ * qoriq ptp registers
+ */
+struct ctrl_regs {
+ u32 tmr_ctrl; /* Timer control register */
+ u32 tmr_tevent; /* Timestamp event register */
+ u32 tmr_temask; /* Timer event mask register */
+ u32 tmr_pevent; /* Timestamp event register */
+ u32 tmr_pemask; /* Timer event mask register */
+ u32 tmr_stat; /* Timestamp status register */
+ u32 tmr_cnt_h; /* Timer counter high register */
+ u32 tmr_cnt_l; /* Timer counter low register */
+ u32 tmr_add; /* Timer drift compensation addend register */
+ u32 tmr_acc; /* Timer accumulator register */
+ u32 tmr_prsc; /* Timer prescale */
+ u8 res1[4];
+ u32 tmroff_h; /* Timer offset high */
+ u32 tmroff_l; /* Timer offset low */
+};
+
+struct alarm_regs {
+ u32 tmr_alarm1_h; /* Timer alarm 1 high register */
+ u32 tmr_alarm1_l; /* Timer alarm 1 high register */
+ u32 tmr_alarm2_h; /* Timer alarm 2 high register */
+ u32 tmr_alarm2_l; /* Timer alarm 2 high register */
+};
+
+struct fiper_regs {
+ u32 tmr_fiper1; /* Timer fixed period interval */
+ u32 tmr_fiper2; /* Timer fixed period interval */
+ u32 tmr_fiper3; /* Timer fixed period interval */
+};
+
+struct etts_regs {
+ u32 tmr_etts1_h; /* Timestamp of general purpose external trigger */
+ u32 tmr_etts1_l; /* Timestamp of general purpose external trigger */
+ u32 tmr_etts2_h; /* Timestamp of general purpose external trigger */
+ u32 tmr_etts2_l; /* Timestamp of general purpose external trigger */
+};
+
+struct ptp_qoriq_registers {
+ struct ctrl_regs __iomem *ctrl_regs;
+ struct alarm_regs __iomem *alarm_regs;
+ struct fiper_regs __iomem *fiper_regs;
+ struct etts_regs __iomem *etts_regs;
+};
+
+/* Offset definitions for the four register groups */
+#define ETSEC_CTRL_REGS_OFFSET 0x0
+#define ETSEC_ALARM_REGS_OFFSET 0x40
+#define ETSEC_FIPER_REGS_OFFSET 0x80
+#define ETSEC_ETTS_REGS_OFFSET 0xa0
+
+#define CTRL_REGS_OFFSET 0x80
+#define ALARM_REGS_OFFSET 0xb8
+#define FIPER_REGS_OFFSET 0xd0
+#define ETTS_REGS_OFFSET 0xe0
+
+
+/* Bit definitions for the TMR_CTRL register */
+#define ALM1P (1<<31) /* Alarm1 output polarity */
+#define ALM2P (1<<30) /* Alarm2 output polarity */
+#define FIPERST (1<<28) /* FIPER start indication */
+#define PP1L (1<<27) /* Fiper1 pulse loopback mode enabled. */
+#define PP2L (1<<26) /* Fiper2 pulse loopback mode enabled. */
+#define TCLK_PERIOD_SHIFT (16) /* 1588 timer reference clock period. */
+#define TCLK_PERIOD_MASK (0x3ff)
+#define RTPE (1<<15) /* Record Tx Timestamp to PAL Enable. */
+#define FRD (1<<14) /* FIPER Realignment Disable */
+#define ESFDP (1<<11) /* External Tx/Rx SFD Polarity. */
+#define ESFDE (1<<10) /* External Tx/Rx SFD Enable. */
+#define ETEP2 (1<<9) /* External trigger 2 edge polarity */
+#define ETEP1 (1<<8) /* External trigger 1 edge polarity */
+#define COPH (1<<7) /* Generated clock output phase. */
+#define CIPH (1<<6) /* External oscillator input clock phase */
+#define TMSR (1<<5) /* Timer soft reset. */
+#define BYP (1<<3) /* Bypass drift compensated clock */
+#define TE (1<<2) /* 1588 timer enable. */
+#define CKSEL_SHIFT (0) /* 1588 Timer reference clock source */
+#define CKSEL_MASK (0x3)
+
+/* Bit definitions for the TMR_TEVENT register */
+#define ETS2 (1<<25) /* External trigger 2 timestamp sampled */
+#define ETS1 (1<<24) /* External trigger 1 timestamp sampled */
+#define ALM2 (1<<17) /* Current time = alarm time register 2 */
+#define ALM1 (1<<16) /* Current time = alarm time register 1 */
+#define PP1 (1<<7) /* periodic pulse generated on FIPER1 */
+#define PP2 (1<<6) /* periodic pulse generated on FIPER2 */
+#define PP3 (1<<5) /* periodic pulse generated on FIPER3 */
+
+/* Bit definitions for the TMR_TEMASK register */
+#define ETS2EN (1<<25) /* External trigger 2 timestamp enable */
+#define ETS1EN (1<<24) /* External trigger 1 timestamp enable */
+#define ALM2EN (1<<17) /* Timer ALM2 event enable */
+#define ALM1EN (1<<16) /* Timer ALM1 event enable */
+#define PP1EN (1<<7) /* Periodic pulse event 1 enable */
+#define PP2EN (1<<6) /* Periodic pulse event 2 enable */
+
+/* Bit definitions for the TMR_PEVENT register */
+#define TXP2 (1<<9) /* PTP transmitted timestamp im TXTS2 */
+#define TXP1 (1<<8) /* PTP transmitted timestamp in TXTS1 */
+#define RXP (1<<0) /* PTP frame has been received */
+
+/* Bit definitions for the TMR_PEMASK register */
+#define TXP2EN (1<<9) /* Transmit PTP packet event 2 enable */
+#define TXP1EN (1<<8) /* Transmit PTP packet event 1 enable */
+#define RXPEN (1<<0) /* Receive PTP packet event enable */
+
+/* Bit definitions for the TMR_STAT register */
+#define STAT_VEC_SHIFT (0) /* Timer general purpose status vector */
+#define STAT_VEC_MASK (0x3f)
+#define ETS1_VLD (1<<24)
+#define ETS2_VLD (1<<25)
+
+/* Bit definitions for the TMR_PRSC register */
+#define PRSC_OCK_SHIFT (0) /* Output clock division/prescale factor. */
+#define PRSC_OCK_MASK (0xffff)
+
+
+#define DRIVER "ptp_qoriq"
+#define N_EXT_TS 2
+
+#define DEFAULT_CKSEL 1
+#define DEFAULT_TMR_PRSC 2
+#define DEFAULT_FIPER1_PERIOD 1000000000
+#define DEFAULT_FIPER2_PERIOD 1000000000
+#define DEFAULT_FIPER3_PERIOD 1000000000
+
+struct ptp_qoriq {
+ void __iomem *base;
+ struct ptp_qoriq_registers regs;
+ spinlock_t lock; /* protects regs */
+ struct ptp_clock *clock;
+ struct ptp_clock_info caps;
+ struct resource *rsrc;
+ struct dentry *debugfs_root;
+ struct device *dev;
+ bool extts_fifo_support;
+ bool fiper3_support;
+ bool etsec;
+ int irq;
+ int phc_index;
+ u32 tclk_period; /* nanoseconds */
+ u32 tmr_prsc;
+ u32 tmr_add;
+ u32 cksel;
+ u32 tmr_fiper1;
+ u32 tmr_fiper2;
+ u32 tmr_fiper3;
+ u32 (*read)(unsigned __iomem *addr);
+ void (*write)(unsigned __iomem *addr, u32 val);
+};
+
+static inline u32 qoriq_read_be(unsigned __iomem *addr)
+{
+ return ioread32be(addr);
+}
+
+static inline void qoriq_write_be(unsigned __iomem *addr, u32 val)
+{
+ iowrite32be(val, addr);
+}
+
+static inline u32 qoriq_read_le(unsigned __iomem *addr)
+{
+ return ioread32(addr);
+}
+
+static inline void qoriq_write_le(unsigned __iomem *addr, u32 val)
+{
+ iowrite32(val, addr);
+}
+
+irqreturn_t ptp_qoriq_isr(int irq, void *priv);
+int ptp_qoriq_init(struct ptp_qoriq *ptp_qoriq, void __iomem *base,
+ const struct ptp_clock_info *caps);
+void ptp_qoriq_free(struct ptp_qoriq *ptp_qoriq);
+int ptp_qoriq_adjfine(struct ptp_clock_info *ptp, long scaled_ppm);
+int ptp_qoriq_adjtime(struct ptp_clock_info *ptp, s64 delta);
+int ptp_qoriq_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts);
+int ptp_qoriq_settime(struct ptp_clock_info *ptp,
+ const struct timespec64 *ts);
+int ptp_qoriq_enable(struct ptp_clock_info *ptp,
+ struct ptp_clock_request *rq, int on);
+int extts_clean_up(struct ptp_qoriq *ptp_qoriq, int index, bool update_event);
+#ifdef CONFIG_DEBUG_FS
+void ptp_qoriq_create_debugfs(struct ptp_qoriq *ptp_qoriq);
+void ptp_qoriq_remove_debugfs(struct ptp_qoriq *ptp_qoriq);
+#else
+static inline void ptp_qoriq_create_debugfs(struct ptp_qoriq *ptp_qoriq)
+{ }
+static inline void ptp_qoriq_remove_debugfs(struct ptp_qoriq *ptp_qoriq)
+{ }
+#endif
+
+#endif
diff --git a/include/linux/fsl_devices.h b/include/linux/fsl_devices.h
index 60cef8227534..5d231ce8709b 100644
--- a/include/linux/fsl_devices.h
+++ b/include/linux/fsl_devices.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* include/linux/fsl_devices.h
*
@@ -7,11 +8,6 @@
* Maintainer: Kumar Gala <galak@kernel.crashing.org>
*
* Copyright 2004,2012 Freescale Semiconductor, Inc
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
*/
#ifndef _FSL_DEVICE_H_
@@ -98,10 +94,12 @@ struct fsl_usb2_platform_data {
unsigned suspended:1;
unsigned already_suspended:1;
- unsigned has_fsl_erratum_a007792:1;
- unsigned has_fsl_erratum_a005275:1;
+ unsigned has_fsl_erratum_a007792:1;
+ unsigned has_fsl_erratum_14:1;
+ unsigned has_fsl_erratum_a005275:1;
unsigned has_fsl_erratum_a005697:1;
- unsigned check_phy_clk_valid:1;
+ unsigned has_fsl_erratum_a006918:1;
+ unsigned check_phy_clk_valid:1;
/* register save area for suspend/resume */
u32 pm_command;
diff --git a/include/linux/fsl_ifc.h b/include/linux/fsl_ifc.h
index c332f0a45607..0af96a45e903 100644
--- a/include/linux/fsl_ifc.h
+++ b/include/linux/fsl_ifc.h
@@ -1,22 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/* Freescale Integrated Flash Controller
*
* Copyright 2011 Freescale Semiconductor, Inc
*
* Author: Dipen Dudhat <dipen.dudhat@freescale.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#ifndef __ASM_FSL_IFC_H
@@ -274,6 +261,8 @@
*/
/* Auto Boot Mode */
#define IFC_NAND_NCFGR_BOOT 0x80000000
+/* SRAM Initialization */
+#define IFC_NAND_NCFGR_SRAM_INIT_EN 0x20000000
/* Addressing Mode-ROW0+n/COL0 */
#define IFC_NAND_NCFGR_ADDR_MODE_RC0 0x00000000
/* Addressing Mode-ROW0+n/COL0+n */
@@ -734,11 +723,7 @@ struct fsl_ifc_nand {
u32 res19[0x10];
__be32 nand_fsr;
u32 res20;
- /* The V1 nand_eccstat is actually 4 words that overlaps the
- * V2 nand_eccstat.
- */
- __be32 v1_nand_eccstat[2];
- __be32 v2_nand_eccstat[6];
+ __be32 nand_eccstat[8];
u32 res21[0x1c];
__be32 nanndcr;
u32 res22[0x2];
diff --git a/include/linux/fsldma.h b/include/linux/fsldma.h
index b213c02963c9..c523d716ebd2 100644
--- a/include/linux/fsldma.h
+++ b/include/linux/fsldma.h
@@ -1,8 +1,5 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
- * This is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
#ifndef FSL_DMA_H
diff --git a/include/linux/fsnotify.h b/include/linux/fsnotify.h
index b78aa7ac77ce..bb8467cd11ae 100644
--- a/include/linux/fsnotify.h
+++ b/include/linux/fsnotify.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_FS_NOTIFY_H
#define _LINUX_FS_NOTIFY_H
@@ -16,43 +17,111 @@
#include <linux/slab.h>
#include <linux/bug.h>
+/*
+ * Notify this @dir inode about a change in a child directory entry.
+ * The directory entry may have turned positive or negative or its inode may
+ * have changed (i.e. renamed over).
+ *
+ * Unlike fsnotify_parent(), the event will be reported regardless of the
+ * FS_EVENT_ON_CHILD mask on the parent inode and will not be reported if only
+ * the child is interested and not the parent.
+ */
+static inline int fsnotify_name(__u32 mask, const void *data, int data_type,
+ struct inode *dir, const struct qstr *name,
+ u32 cookie)
+{
+ if (atomic_long_read(&dir->i_sb->s_fsnotify_connectors) == 0)
+ return 0;
+
+ return fsnotify(mask, data, data_type, dir, name, NULL, cookie);
+}
+
+static inline void fsnotify_dirent(struct inode *dir, struct dentry *dentry,
+ __u32 mask)
+{
+ fsnotify_name(mask, dentry, FSNOTIFY_EVENT_DENTRY, dir, &dentry->d_name, 0);
+}
+
+static inline void fsnotify_inode(struct inode *inode, __u32 mask)
+{
+ if (atomic_long_read(&inode->i_sb->s_fsnotify_connectors) == 0)
+ return;
+
+ if (S_ISDIR(inode->i_mode))
+ mask |= FS_ISDIR;
+
+ fsnotify(mask, inode, FSNOTIFY_EVENT_INODE, NULL, NULL, inode, 0);
+}
+
/* Notify this dentry's parent about a child's events. */
-static inline int fsnotify_parent(const struct path *path, struct dentry *dentry, __u32 mask)
+static inline int fsnotify_parent(struct dentry *dentry, __u32 mask,
+ const void *data, int data_type)
{
- if (!dentry)
- dentry = path->dentry;
+ struct inode *inode = d_inode(dentry);
+
+ if (atomic_long_read(&inode->i_sb->s_fsnotify_connectors) == 0)
+ return 0;
+
+ if (S_ISDIR(inode->i_mode)) {
+ mask |= FS_ISDIR;
- return __fsnotify_parent(path, dentry, mask);
+ /* sb/mount marks are not interested in name of directory */
+ if (!(dentry->d_flags & DCACHE_FSNOTIFY_PARENT_WATCHED))
+ goto notify_child;
+ }
+
+ /* disconnected dentry cannot notify parent */
+ if (IS_ROOT(dentry))
+ goto notify_child;
+
+ return __fsnotify_parent(dentry, mask, data, data_type);
+
+notify_child:
+ return fsnotify(mask, data, data_type, NULL, NULL, inode, 0);
}
-/* simple call site for access decisions */
-static inline int fsnotify_perm(struct file *file, int mask)
+/*
+ * Simple wrappers to consolidate calls to fsnotify_parent() when an event
+ * is on a file/dentry.
+ */
+static inline void fsnotify_dentry(struct dentry *dentry, __u32 mask)
+{
+ fsnotify_parent(dentry, mask, dentry, FSNOTIFY_EVENT_DENTRY);
+}
+
+static inline int fsnotify_file(struct file *file, __u32 mask)
{
const struct path *path = &file->f_path;
- /*
- * Do not use file_inode() here or anywhere in this file to get the
- * inode. That would break *notity on overlayfs.
- */
- struct inode *inode = path->dentry->d_inode;
- __u32 fsnotify_mask = 0;
- int ret;
if (file->f_mode & FMODE_NONOTIFY)
return 0;
+
+ return fsnotify_parent(path->dentry, mask, path, FSNOTIFY_EVENT_PATH);
+}
+
+/* Simple call site for access decisions */
+static inline int fsnotify_perm(struct file *file, int mask)
+{
+ int ret;
+ __u32 fsnotify_mask = 0;
+
if (!(mask & (MAY_READ | MAY_OPEN)))
return 0;
- if (mask & MAY_OPEN)
+
+ if (mask & MAY_OPEN) {
fsnotify_mask = FS_OPEN_PERM;
- else if (mask & MAY_READ)
- fsnotify_mask = FS_ACCESS_PERM;
- else
- BUG();
- ret = fsnotify_parent(path, NULL, fsnotify_mask);
- if (ret)
- return ret;
+ if (file->f_flags & __FMODE_EXEC) {
+ ret = fsnotify_file(file, FS_OPEN_EXEC_PERM);
+
+ if (ret)
+ return ret;
+ }
+ } else if (mask & MAY_READ) {
+ fsnotify_mask = FS_ACCESS_PERM;
+ }
- return fsnotify(inode, fsnotify_mask, path, FSNOTIFY_EVENT_PATH, NULL, 0);
+ return fsnotify_file(file, fsnotify_mask);
}
/*
@@ -60,40 +129,42 @@ static inline int fsnotify_perm(struct file *file, int mask)
*/
static inline void fsnotify_link_count(struct inode *inode)
{
- fsnotify(inode, FS_ATTRIB, inode, FSNOTIFY_EVENT_INODE, NULL, 0);
+ fsnotify_inode(inode, FS_ATTRIB);
}
/*
* fsnotify_move - file old_name at old_dir was moved to new_name at new_dir
*/
static inline void fsnotify_move(struct inode *old_dir, struct inode *new_dir,
- const unsigned char *old_name,
- int isdir, struct inode *target, struct dentry *moved)
+ const struct qstr *old_name,
+ int isdir, struct inode *target,
+ struct dentry *moved)
{
struct inode *source = moved->d_inode;
u32 fs_cookie = fsnotify_get_cookie();
- __u32 old_dir_mask = (FS_EVENT_ON_CHILD | FS_MOVED_FROM);
- __u32 new_dir_mask = (FS_EVENT_ON_CHILD | FS_MOVED_TO);
- const unsigned char *new_name = moved->d_name.name;
-
- if (old_dir == new_dir)
- old_dir_mask |= FS_DN_RENAME;
+ __u32 old_dir_mask = FS_MOVED_FROM;
+ __u32 new_dir_mask = FS_MOVED_TO;
+ __u32 rename_mask = FS_RENAME;
+ const struct qstr *new_name = &moved->d_name;
if (isdir) {
old_dir_mask |= FS_ISDIR;
new_dir_mask |= FS_ISDIR;
+ rename_mask |= FS_ISDIR;
}
- fsnotify(old_dir, old_dir_mask, source, FSNOTIFY_EVENT_INODE, old_name,
- fs_cookie);
- fsnotify(new_dir, new_dir_mask, source, FSNOTIFY_EVENT_INODE, new_name,
- fs_cookie);
+ /* Event with information about both old and new parent+name */
+ fsnotify_name(rename_mask, moved, FSNOTIFY_EVENT_DENTRY,
+ old_dir, old_name, 0);
+
+ fsnotify_name(old_dir_mask, source, FSNOTIFY_EVENT_INODE,
+ old_dir, old_name, fs_cookie);
+ fsnotify_name(new_dir_mask, source, FSNOTIFY_EVENT_INODE,
+ new_dir, new_name, fs_cookie);
if (target)
fsnotify_link_count(target);
-
- if (source)
- fsnotify(source, FS_MOVE_SELF, moved->d_inode, FSNOTIFY_EVENT_INODE, NULL, 0);
+ fsnotify_inode(source, FS_MOVE_SELF);
audit_inode_child(new_dir, moved, AUDIT_TYPE_CHILD_CREATE);
}
@@ -114,61 +185,120 @@ static inline void fsnotify_vfsmount_delete(struct vfsmount *mnt)
}
/*
- * fsnotify_nameremove - a filename was removed from a directory
- */
-static inline void fsnotify_nameremove(struct dentry *dentry, int isdir)
-{
- __u32 mask = FS_DELETE;
-
- if (isdir)
- mask |= FS_ISDIR;
-
- fsnotify_parent(NULL, dentry, mask);
-}
-
-/*
* fsnotify_inoderemove - an inode is going away
*/
static inline void fsnotify_inoderemove(struct inode *inode)
{
- fsnotify(inode, FS_DELETE_SELF, inode, FSNOTIFY_EVENT_INODE, NULL, 0);
+ fsnotify_inode(inode, FS_DELETE_SELF);
__fsnotify_inode_delete(inode);
}
/*
* fsnotify_create - 'name' was linked in
+ *
+ * Caller must make sure that dentry->d_name is stable.
+ * Note: some filesystems (e.g. kernfs) leave @dentry negative and instantiate
+ * ->d_inode later
*/
-static inline void fsnotify_create(struct inode *inode, struct dentry *dentry)
+static inline void fsnotify_create(struct inode *dir, struct dentry *dentry)
{
- audit_inode_child(inode, dentry, AUDIT_TYPE_CHILD_CREATE);
+ audit_inode_child(dir, dentry, AUDIT_TYPE_CHILD_CREATE);
- fsnotify(inode, FS_CREATE, dentry->d_inode, FSNOTIFY_EVENT_INODE, dentry->d_name.name, 0);
+ fsnotify_dirent(dir, dentry, FS_CREATE);
}
/*
* fsnotify_link - new hardlink in 'inode' directory
+ *
+ * Caller must make sure that new_dentry->d_name is stable.
* Note: We have to pass also the linked inode ptr as some filesystems leave
* new_dentry->d_inode NULL and instantiate inode pointer later
*/
-static inline void fsnotify_link(struct inode *dir, struct inode *inode, struct dentry *new_dentry)
+static inline void fsnotify_link(struct inode *dir, struct inode *inode,
+ struct dentry *new_dentry)
{
fsnotify_link_count(inode);
audit_inode_child(dir, new_dentry, AUDIT_TYPE_CHILD_CREATE);
- fsnotify(dir, FS_CREATE, inode, FSNOTIFY_EVENT_INODE, new_dentry->d_name.name, 0);
+ fsnotify_name(FS_CREATE, inode, FSNOTIFY_EVENT_INODE,
+ dir, &new_dentry->d_name, 0);
+}
+
+/*
+ * fsnotify_delete - @dentry was unlinked and unhashed
+ *
+ * Caller must make sure that dentry->d_name is stable.
+ *
+ * Note: unlike fsnotify_unlink(), we have to pass also the unlinked inode
+ * as this may be called after d_delete() and old_dentry may be negative.
+ */
+static inline void fsnotify_delete(struct inode *dir, struct inode *inode,
+ struct dentry *dentry)
+{
+ __u32 mask = FS_DELETE;
+
+ if (S_ISDIR(inode->i_mode))
+ mask |= FS_ISDIR;
+
+ fsnotify_name(mask, inode, FSNOTIFY_EVENT_INODE, dir, &dentry->d_name,
+ 0);
+}
+
+/**
+ * d_delete_notify - delete a dentry and call fsnotify_delete()
+ * @dentry: The dentry to delete
+ *
+ * This helper is used to guaranty that the unlinked inode cannot be found
+ * by lookup of this name after fsnotify_delete() event has been delivered.
+ */
+static inline void d_delete_notify(struct inode *dir, struct dentry *dentry)
+{
+ struct inode *inode = d_inode(dentry);
+
+ ihold(inode);
+ d_delete(dentry);
+ fsnotify_delete(dir, inode, dentry);
+ iput(inode);
+}
+
+/*
+ * fsnotify_unlink - 'name' was unlinked
+ *
+ * Caller must make sure that dentry->d_name is stable.
+ */
+static inline void fsnotify_unlink(struct inode *dir, struct dentry *dentry)
+{
+ if (WARN_ON_ONCE(d_is_negative(dentry)))
+ return;
+
+ fsnotify_delete(dir, d_inode(dentry), dentry);
}
/*
* fsnotify_mkdir - directory 'name' was created
+ *
+ * Caller must make sure that dentry->d_name is stable.
+ * Note: some filesystems (e.g. kernfs) leave @dentry negative and instantiate
+ * ->d_inode later
*/
-static inline void fsnotify_mkdir(struct inode *inode, struct dentry *dentry)
+static inline void fsnotify_mkdir(struct inode *dir, struct dentry *dentry)
{
- __u32 mask = (FS_CREATE | FS_ISDIR);
- struct inode *d_inode = dentry->d_inode;
+ audit_inode_child(dir, dentry, AUDIT_TYPE_CHILD_CREATE);
- audit_inode_child(inode, dentry, AUDIT_TYPE_CHILD_CREATE);
+ fsnotify_dirent(dir, dentry, FS_CREATE | FS_ISDIR);
+}
+
+/*
+ * fsnotify_rmdir - directory 'name' was removed
+ *
+ * Caller must make sure that dentry->d_name is stable.
+ */
+static inline void fsnotify_rmdir(struct inode *dir, struct dentry *dentry)
+{
+ if (WARN_ON_ONCE(d_is_negative(dentry)))
+ return;
- fsnotify(inode, mask, d_inode, FSNOTIFY_EVENT_INODE, dentry->d_name.name, 0);
+ fsnotify_delete(dir, d_inode(dentry), dentry);
}
/*
@@ -176,17 +306,7 @@ static inline void fsnotify_mkdir(struct inode *inode, struct dentry *dentry)
*/
static inline void fsnotify_access(struct file *file)
{
- const struct path *path = &file->f_path;
- struct inode *inode = path->dentry->d_inode;
- __u32 mask = FS_ACCESS;
-
- if (S_ISDIR(inode->i_mode))
- mask |= FS_ISDIR;
-
- if (!(file->f_mode & FMODE_NONOTIFY)) {
- fsnotify_parent(path, NULL, mask);
- fsnotify(inode, mask, path, FSNOTIFY_EVENT_PATH, NULL, 0);
- }
+ fsnotify_file(file, FS_ACCESS);
}
/*
@@ -194,17 +314,7 @@ static inline void fsnotify_access(struct file *file)
*/
static inline void fsnotify_modify(struct file *file)
{
- const struct path *path = &file->f_path;
- struct inode *inode = path->dentry->d_inode;
- __u32 mask = FS_MODIFY;
-
- if (S_ISDIR(inode->i_mode))
- mask |= FS_ISDIR;
-
- if (!(file->f_mode & FMODE_NONOTIFY)) {
- fsnotify_parent(path, NULL, mask);
- fsnotify(inode, mask, path, FSNOTIFY_EVENT_PATH, NULL, 0);
- }
+ fsnotify_file(file, FS_MODIFY);
}
/*
@@ -212,15 +322,12 @@ static inline void fsnotify_modify(struct file *file)
*/
static inline void fsnotify_open(struct file *file)
{
- const struct path *path = &file->f_path;
- struct inode *inode = path->dentry->d_inode;
__u32 mask = FS_OPEN;
- if (S_ISDIR(inode->i_mode))
- mask |= FS_ISDIR;
+ if (file->f_flags & __FMODE_EXEC)
+ mask |= FS_OPEN_EXEC;
- fsnotify_parent(path, NULL, mask);
- fsnotify(inode, mask, path, FSNOTIFY_EVENT_PATH, NULL, 0);
+ fsnotify_file(file, mask);
}
/*
@@ -228,18 +335,10 @@ static inline void fsnotify_open(struct file *file)
*/
static inline void fsnotify_close(struct file *file)
{
- const struct path *path = &file->f_path;
- struct inode *inode = path->dentry->d_inode;
- fmode_t mode = file->f_mode;
- __u32 mask = (mode & FMODE_WRITE) ? FS_CLOSE_WRITE : FS_CLOSE_NOWRITE;
+ __u32 mask = (file->f_mode & FMODE_WRITE) ? FS_CLOSE_WRITE :
+ FS_CLOSE_NOWRITE;
- if (S_ISDIR(inode->i_mode))
- mask |= FS_ISDIR;
-
- if (!(file->f_mode & FMODE_NONOTIFY)) {
- fsnotify_parent(path, NULL, mask);
- fsnotify(inode, mask, path, FSNOTIFY_EVENT_PATH, NULL, 0);
- }
+ fsnotify_file(file, mask);
}
/*
@@ -247,14 +346,7 @@ static inline void fsnotify_close(struct file *file)
*/
static inline void fsnotify_xattr(struct dentry *dentry)
{
- struct inode *inode = dentry->d_inode;
- __u32 mask = FS_ATTRIB;
-
- if (S_ISDIR(inode->i_mode))
- mask |= FS_ISDIR;
-
- fsnotify_parent(NULL, dentry, mask);
- fsnotify(inode, mask, inode, FSNOTIFY_EVENT_INODE, NULL, 0);
+ fsnotify_dentry(dentry, FS_ATTRIB);
}
/*
@@ -263,7 +355,6 @@ static inline void fsnotify_xattr(struct dentry *dentry)
*/
static inline void fsnotify_change(struct dentry *dentry, unsigned int ia_valid)
{
- struct inode *inode = dentry->d_inode;
__u32 mask = 0;
if (ia_valid & ATTR_UID)
@@ -284,13 +375,21 @@ static inline void fsnotify_change(struct dentry *dentry, unsigned int ia_valid)
if (ia_valid & ATTR_MODE)
mask |= FS_ATTRIB;
- if (mask) {
- if (S_ISDIR(inode->i_mode))
- mask |= FS_ISDIR;
+ if (mask)
+ fsnotify_dentry(dentry, mask);
+}
- fsnotify_parent(NULL, dentry, mask);
- fsnotify(inode, mask, inode, FSNOTIFY_EVENT_INODE, NULL, 0);
- }
+static inline int fsnotify_sb_error(struct super_block *sb, struct inode *inode,
+ int error)
+{
+ struct fs_error_report report = {
+ .error = error,
+ .inode = inode,
+ .sb = sb,
+ };
+
+ return fsnotify(FS_ERROR, &report, FSNOTIFY_EVENT_ERROR,
+ NULL, NULL, NULL, 0);
}
#endif /* _LINUX_FS_NOTIFY_H */
diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h
index c6c69318752b..d7d96c806bff 100644
--- a/include/linux/fsnotify_backend.h
+++ b/include/linux/fsnotify_backend.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Filesystem access notification for Linux
*
@@ -17,6 +18,9 @@
#include <linux/types.h>
#include <linux/atomic.h>
#include <linux/user_namespace.h>
+#include <linux/refcount.h>
+#include <linux/mempool.h>
+#include <linux/sched/mm.h>
/*
* IN_* from inotfy.h lines up EXACTLY with FS_*, this is so we can easily
@@ -36,44 +40,74 @@
#define FS_DELETE 0x00000200 /* Subfile was deleted */
#define FS_DELETE_SELF 0x00000400 /* Self was deleted */
#define FS_MOVE_SELF 0x00000800 /* Self was moved */
+#define FS_OPEN_EXEC 0x00001000 /* File was opened for exec */
#define FS_UNMOUNT 0x00002000 /* inode on umount fs */
#define FS_Q_OVERFLOW 0x00004000 /* Event queued overflowed */
+#define FS_ERROR 0x00008000 /* Filesystem Error (fanotify) */
+
+/*
+ * FS_IN_IGNORED overloads FS_ERROR. It is only used internally by inotify
+ * which does not support FS_ERROR.
+ */
#define FS_IN_IGNORED 0x00008000 /* last inotify event here */
#define FS_OPEN_PERM 0x00010000 /* open event in an permission hook */
#define FS_ACCESS_PERM 0x00020000 /* access event in a permissions hook */
+#define FS_OPEN_EXEC_PERM 0x00040000 /* open/exec event in a permission hook */
-#define FS_EXCL_UNLINK 0x04000000 /* do not send events if object is unlinked */
-#define FS_ISDIR 0x40000000 /* event occurred against dir */
-#define FS_IN_ONESHOT 0x80000000 /* only send event once */
+/*
+ * Set on inode mark that cares about things that happen to its children.
+ * Always set for dnotify and inotify.
+ * Set on inode/sb/mount marks that care about parent/name info.
+ */
+#define FS_EVENT_ON_CHILD 0x08000000
-#define FS_DN_RENAME 0x10000000 /* file renamed */
+#define FS_RENAME 0x10000000 /* File was renamed */
#define FS_DN_MULTISHOT 0x20000000 /* dnotify multishot */
+#define FS_ISDIR 0x40000000 /* event occurred against dir */
-/* This inode cares about things that happen to its children. Always set for
- * dnotify and inotify. */
-#define FS_EVENT_ON_CHILD 0x08000000
+#define FS_MOVE (FS_MOVED_FROM | FS_MOVED_TO)
-/* This is a list of all events that may get sent to a parernt based on fs event
- * happening to inodes inside that directory */
-#define FS_EVENTS_POSS_ON_CHILD (FS_ACCESS | FS_MODIFY | FS_ATTRIB |\
- FS_CLOSE_WRITE | FS_CLOSE_NOWRITE | FS_OPEN |\
- FS_MOVED_FROM | FS_MOVED_TO | FS_CREATE |\
- FS_DELETE | FS_OPEN_PERM | FS_ACCESS_PERM)
+/*
+ * Directory entry modification events - reported only to directory
+ * where entry is modified and not to a watching parent.
+ * The watching parent may get an FS_ATTRIB|FS_EVENT_ON_CHILD event
+ * when a directory entry inside a child subdir changes.
+ */
+#define ALL_FSNOTIFY_DIRENT_EVENTS (FS_CREATE | FS_DELETE | FS_MOVE | FS_RENAME)
-#define FS_MOVE (FS_MOVED_FROM | FS_MOVED_TO)
+#define ALL_FSNOTIFY_PERM_EVENTS (FS_OPEN_PERM | FS_ACCESS_PERM | \
+ FS_OPEN_EXEC_PERM)
+
+/*
+ * This is a list of all events that may get sent to a parent that is watching
+ * with flag FS_EVENT_ON_CHILD based on fs event on a child of that directory.
+ */
+#define FS_EVENTS_POSS_ON_CHILD (ALL_FSNOTIFY_PERM_EVENTS | \
+ FS_ACCESS | FS_MODIFY | FS_ATTRIB | \
+ FS_CLOSE_WRITE | FS_CLOSE_NOWRITE | \
+ FS_OPEN | FS_OPEN_EXEC)
-#define ALL_FSNOTIFY_PERM_EVENTS (FS_OPEN_PERM | FS_ACCESS_PERM)
+/*
+ * This is a list of all events that may get sent with the parent inode as the
+ * @to_tell argument of fsnotify().
+ * It may include events that can be sent to an inode/sb/mount mark, but cannot
+ * be sent to a parent watching children.
+ */
+#define FS_EVENTS_POSS_TO_PARENT (FS_EVENTS_POSS_ON_CHILD)
-#define ALL_FSNOTIFY_EVENTS (FS_ACCESS | FS_MODIFY | FS_ATTRIB | \
- FS_CLOSE_WRITE | FS_CLOSE_NOWRITE | FS_OPEN | \
- FS_MOVED_FROM | FS_MOVED_TO | FS_CREATE | \
- FS_DELETE | FS_DELETE_SELF | FS_MOVE_SELF | \
+/* Events that can be reported to backends */
+#define ALL_FSNOTIFY_EVENTS (ALL_FSNOTIFY_DIRENT_EVENTS | \
+ FS_EVENTS_POSS_ON_CHILD | \
+ FS_DELETE_SELF | FS_MOVE_SELF | \
FS_UNMOUNT | FS_Q_OVERFLOW | FS_IN_IGNORED | \
- FS_OPEN_PERM | FS_ACCESS_PERM | FS_EXCL_UNLINK | \
- FS_ISDIR | FS_IN_ONESHOT | FS_DN_RENAME | \
- FS_DN_MULTISHOT | FS_EVENT_ON_CHILD)
+ FS_ERROR)
+
+/* Extra flags that may be reported with event or control handling of events */
+#define ALL_FSNOTIFY_FLAGS (FS_ISDIR | FS_EVENT_ON_CHILD | FS_DN_MULTISHOT)
+
+#define ALL_FSNOTIFY_BITS (ALL_FSNOTIFY_EVENTS | ALL_FSNOTIFY_FLAGS)
struct fsnotify_group;
struct fsnotify_event;
@@ -82,28 +116,53 @@ struct fsnotify_event_private_data;
struct fsnotify_fname;
struct fsnotify_iter_info;
+struct mem_cgroup;
+
/*
* Each group much define these ops. The fsnotify infrastructure will call
* these operations for each relevant group.
*
* handle_event - main call for a group to handle an fs event
+ * @group: group to notify
+ * @mask: event type and flags
+ * @data: object that event happened on
+ * @data_type: type of object for fanotify_data_XXX() accessors
+ * @dir: optional directory associated with event -
+ * if @file_name is not NULL, this is the directory that
+ * @file_name is relative to
+ * @file_name: optional file name associated with event
+ * @cookie: inotify rename cookie
+ * @iter_info: array of marks from this group that are interested in the event
+ *
+ * handle_inode_event - simple variant of handle_event() for groups that only
+ * have inode marks and don't have ignore mask
+ * @mark: mark to notify
+ * @mask: event type and flags
+ * @inode: inode that event happened on
+ * @dir: optional directory associated with event -
+ * if @file_name is not NULL, this is the directory that
+ * @file_name is relative to.
+ * Either @inode or @dir must be non-NULL.
+ * @file_name: optional file name associated with event
+ * @cookie: inotify rename cookie
+ *
* free_group_priv - called when a group refcnt hits 0 to clean up the private union
* freeing_mark - called when a mark is being destroyed for some reason. The group
- * MUST be holding a reference on each mark and that reference must be
- * dropped in this function. inotify uses this function to send
- * userspace messages that marks have been removed.
+ * MUST be holding a reference on each mark and that reference must be
+ * dropped in this function. inotify uses this function to send
+ * userspace messages that marks have been removed.
*/
struct fsnotify_ops {
- int (*handle_event)(struct fsnotify_group *group,
- struct inode *inode,
- struct fsnotify_mark *inode_mark,
- struct fsnotify_mark *vfsmount_mark,
- u32 mask, const void *data, int data_type,
- const unsigned char *file_name, u32 cookie,
+ int (*handle_event)(struct fsnotify_group *group, u32 mask,
+ const void *data, int data_type, struct inode *dir,
+ const struct qstr *file_name, u32 cookie,
struct fsnotify_iter_info *iter_info);
+ int (*handle_inode_event)(struct fsnotify_mark *mark, u32 mask,
+ struct inode *inode, struct inode *dir,
+ const struct qstr *file_name, u32 cookie);
void (*free_group_priv)(struct fsnotify_group *group);
void (*freeing_mark)(struct fsnotify_mark *mark, struct fsnotify_group *group);
- void (*free_event)(struct fsnotify_event *event);
+ void (*free_event)(struct fsnotify_group *group, struct fsnotify_event *event);
/* called on final put+free to free memory */
void (*free_mark)(struct fsnotify_mark *mark);
};
@@ -115,9 +174,6 @@ struct fsnotify_ops {
*/
struct fsnotify_event {
struct list_head list;
- /* inode may ONLY be dereferenced during handle_event(). */
- struct inode *inode; /* either the inode the event happened to or its parent */
- u32 mask; /* the type of access, bitwise OR for FS_* event types */
};
/*
@@ -127,6 +183,8 @@ struct fsnotify_event {
* everything will be cleaned up.
*/
struct fsnotify_group {
+ const struct fsnotify_ops *ops; /* how this group handles things */
+
/*
* How the refcnt is used is up to each group. When the refcnt hits 0
* fsnotify will clean up all of the resources associated with this group.
@@ -135,9 +193,7 @@ struct fsnotify_group {
* inotify_init() and the refcnt will hit 0 only when that fd has been
* closed.
*/
- atomic_t refcnt; /* things with interest in this group */
-
- const struct fsnotify_ops *ops; /* how this group handles things */
+ refcount_t refcnt; /* things with interest in this group */
/* needed to send notification to userspace */
spinlock_t notification_lock; /* protect the notification_list */
@@ -155,11 +211,16 @@ struct fsnotify_group {
unsigned int priority;
bool shutdown; /* group is being shut down, don't queue more events */
+#define FSNOTIFY_GROUP_USER 0x01 /* user allocated group */
+#define FSNOTIFY_GROUP_DUPS 0x02 /* allow multiple marks per object */
+#define FSNOTIFY_GROUP_NOFS 0x04 /* group lock is not direct reclaim safe */
+ int flags;
+ unsigned int owner_flags; /* stored flags of mark_mutex owner */
+
/* stores all fastpath marks assoc with this group so they can be cleaned on unregister */
struct mutex mark_mutex; /* protect marks_list */
- atomic_t num_marks; /* 1 for each mark and 1 for not being
- * past the point of no return when freeing
- * a group */
+ atomic_t user_waits; /* Number of tasks waiting for user
+ * response */
struct list_head marks_list; /* all inode marks for this group */
struct fasync_struct *fsn_fa; /* async notification */
@@ -167,8 +228,8 @@ struct fsnotify_group {
struct fsnotify_event *overflow_event; /* Event we queue when the
* notification list is too
* full */
- atomic_t user_waits; /* Number of tasks waiting for user
- * response */
+
+ struct mem_cgroup *memcg; /* memcg to charge allocations */
/* groups can define private fields here or use the void *private */
union {
@@ -182,46 +243,246 @@ struct fsnotify_group {
#endif
#ifdef CONFIG_FANOTIFY
struct fanotify_group_private_data {
-#ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
+ /* Hash table of events for merge */
+ struct hlist_head *merge_hash;
/* allows a group to block waiting for a userspace response */
struct list_head access_list;
wait_queue_head_t access_waitq;
-#endif /* CONFIG_FANOTIFY_ACCESS_PERMISSIONS */
- int f_flags;
- unsigned int max_marks;
- struct user_struct *user;
+ int flags; /* flags from fanotify_init() */
+ int f_flags; /* event_f_flags from fanotify_init() */
+ struct ucounts *ucounts;
+ mempool_t error_events_pool;
} fanotify_data;
#endif /* CONFIG_FANOTIFY */
};
};
-/* when calling fsnotify tell it if the data is a path or inode */
-#define FSNOTIFY_EVENT_NONE 0
-#define FSNOTIFY_EVENT_PATH 1
-#define FSNOTIFY_EVENT_INODE 2
+/*
+ * These helpers are used to prevent deadlock when reclaiming inodes with
+ * evictable marks of the same group that is allocating a new mark.
+ */
+static inline void fsnotify_group_lock(struct fsnotify_group *group)
+{
+ mutex_lock(&group->mark_mutex);
+ if (group->flags & FSNOTIFY_GROUP_NOFS)
+ group->owner_flags = memalloc_nofs_save();
+}
+
+static inline void fsnotify_group_unlock(struct fsnotify_group *group)
+{
+ if (group->flags & FSNOTIFY_GROUP_NOFS)
+ memalloc_nofs_restore(group->owner_flags);
+ mutex_unlock(&group->mark_mutex);
+}
+
+static inline void fsnotify_group_assert_locked(struct fsnotify_group *group)
+{
+ WARN_ON_ONCE(!mutex_is_locked(&group->mark_mutex));
+ if (group->flags & FSNOTIFY_GROUP_NOFS)
+ WARN_ON_ONCE(!(current->flags & PF_MEMALLOC_NOFS));
+}
+
+/* When calling fsnotify tell it if the data is a path or inode */
+enum fsnotify_data_type {
+ FSNOTIFY_EVENT_NONE,
+ FSNOTIFY_EVENT_PATH,
+ FSNOTIFY_EVENT_INODE,
+ FSNOTIFY_EVENT_DENTRY,
+ FSNOTIFY_EVENT_ERROR,
+};
+
+struct fs_error_report {
+ int error;
+ struct inode *inode;
+ struct super_block *sb;
+};
+
+static inline struct inode *fsnotify_data_inode(const void *data, int data_type)
+{
+ switch (data_type) {
+ case FSNOTIFY_EVENT_INODE:
+ return (struct inode *)data;
+ case FSNOTIFY_EVENT_DENTRY:
+ return d_inode(data);
+ case FSNOTIFY_EVENT_PATH:
+ return d_inode(((const struct path *)data)->dentry);
+ case FSNOTIFY_EVENT_ERROR:
+ return ((struct fs_error_report *)data)->inode;
+ default:
+ return NULL;
+ }
+}
+
+static inline struct dentry *fsnotify_data_dentry(const void *data, int data_type)
+{
+ switch (data_type) {
+ case FSNOTIFY_EVENT_DENTRY:
+ /* Non const is needed for dget() */
+ return (struct dentry *)data;
+ case FSNOTIFY_EVENT_PATH:
+ return ((const struct path *)data)->dentry;
+ default:
+ return NULL;
+ }
+}
+
+static inline const struct path *fsnotify_data_path(const void *data,
+ int data_type)
+{
+ switch (data_type) {
+ case FSNOTIFY_EVENT_PATH:
+ return data;
+ default:
+ return NULL;
+ }
+}
+
+static inline struct super_block *fsnotify_data_sb(const void *data,
+ int data_type)
+{
+ switch (data_type) {
+ case FSNOTIFY_EVENT_INODE:
+ return ((struct inode *)data)->i_sb;
+ case FSNOTIFY_EVENT_DENTRY:
+ return ((struct dentry *)data)->d_sb;
+ case FSNOTIFY_EVENT_PATH:
+ return ((const struct path *)data)->dentry->d_sb;
+ case FSNOTIFY_EVENT_ERROR:
+ return ((struct fs_error_report *) data)->sb;
+ default:
+ return NULL;
+ }
+}
+
+static inline struct fs_error_report *fsnotify_data_error_report(
+ const void *data,
+ int data_type)
+{
+ switch (data_type) {
+ case FSNOTIFY_EVENT_ERROR:
+ return (struct fs_error_report *) data;
+ default:
+ return NULL;
+ }
+}
+
+/*
+ * Index to merged marks iterator array that correlates to a type of watch.
+ * The type of watched object can be deduced from the iterator type, but not
+ * the other way around, because an event can match different watched objects
+ * of the same object type.
+ * For example, both parent and child are watching an object of type inode.
+ */
+enum fsnotify_iter_type {
+ FSNOTIFY_ITER_TYPE_INODE,
+ FSNOTIFY_ITER_TYPE_VFSMOUNT,
+ FSNOTIFY_ITER_TYPE_SB,
+ FSNOTIFY_ITER_TYPE_PARENT,
+ FSNOTIFY_ITER_TYPE_INODE2,
+ FSNOTIFY_ITER_TYPE_COUNT
+};
+
+/* The type of object that a mark is attached to */
+enum fsnotify_obj_type {
+ FSNOTIFY_OBJ_TYPE_ANY = -1,
+ FSNOTIFY_OBJ_TYPE_INODE,
+ FSNOTIFY_OBJ_TYPE_VFSMOUNT,
+ FSNOTIFY_OBJ_TYPE_SB,
+ FSNOTIFY_OBJ_TYPE_COUNT,
+ FSNOTIFY_OBJ_TYPE_DETACHED = FSNOTIFY_OBJ_TYPE_COUNT
+};
+
+static inline bool fsnotify_valid_obj_type(unsigned int obj_type)
+{
+ return (obj_type < FSNOTIFY_OBJ_TYPE_COUNT);
+}
+
+struct fsnotify_iter_info {
+ struct fsnotify_mark *marks[FSNOTIFY_ITER_TYPE_COUNT];
+ struct fsnotify_group *current_group;
+ unsigned int report_mask;
+ int srcu_idx;
+};
+
+static inline bool fsnotify_iter_should_report_type(
+ struct fsnotify_iter_info *iter_info, int iter_type)
+{
+ return (iter_info->report_mask & (1U << iter_type));
+}
+
+static inline void fsnotify_iter_set_report_type(
+ struct fsnotify_iter_info *iter_info, int iter_type)
+{
+ iter_info->report_mask |= (1U << iter_type);
+}
+
+static inline struct fsnotify_mark *fsnotify_iter_mark(
+ struct fsnotify_iter_info *iter_info, int iter_type)
+{
+ if (fsnotify_iter_should_report_type(iter_info, iter_type))
+ return iter_info->marks[iter_type];
+ return NULL;
+}
+
+static inline int fsnotify_iter_step(struct fsnotify_iter_info *iter, int type,
+ struct fsnotify_mark **markp)
+{
+ while (type < FSNOTIFY_ITER_TYPE_COUNT) {
+ *markp = fsnotify_iter_mark(iter, type);
+ if (*markp)
+ break;
+ type++;
+ }
+ return type;
+}
+
+#define FSNOTIFY_ITER_FUNCS(name, NAME) \
+static inline struct fsnotify_mark *fsnotify_iter_##name##_mark( \
+ struct fsnotify_iter_info *iter_info) \
+{ \
+ return fsnotify_iter_mark(iter_info, FSNOTIFY_ITER_TYPE_##NAME); \
+}
+
+FSNOTIFY_ITER_FUNCS(inode, INODE)
+FSNOTIFY_ITER_FUNCS(parent, PARENT)
+FSNOTIFY_ITER_FUNCS(vfsmount, VFSMOUNT)
+FSNOTIFY_ITER_FUNCS(sb, SB)
+
+#define fsnotify_foreach_iter_type(type) \
+ for (type = 0; type < FSNOTIFY_ITER_TYPE_COUNT; type++)
+#define fsnotify_foreach_iter_mark_type(iter, mark, type) \
+ for (type = 0; \
+ type = fsnotify_iter_step(iter, type, &mark), \
+ type < FSNOTIFY_ITER_TYPE_COUNT; \
+ type++)
+
+/*
+ * fsnotify_connp_t is what we embed in objects which connector can be attached
+ * to. fsnotify_connp_t * is how we refer from connector back to object.
+ */
+struct fsnotify_mark_connector;
+typedef struct fsnotify_mark_connector __rcu *fsnotify_connp_t;
/*
- * Inode / vfsmount point to this structure which tracks all marks attached to
- * the inode / vfsmount. The reference to inode / vfsmount is held by this
+ * Inode/vfsmount/sb point to this structure which tracks all marks attached to
+ * the inode/vfsmount/sb. The reference to inode/vfsmount/sb is held by this
* structure. We destroy this structure when there are no more marks attached
* to it. The structure is protected by fsnotify_mark_srcu.
*/
struct fsnotify_mark_connector {
spinlock_t lock;
-#define FSNOTIFY_OBJ_TYPE_INODE 0x01
-#define FSNOTIFY_OBJ_TYPE_VFSMOUNT 0x02
-#define FSNOTIFY_OBJ_ALL_TYPES (FSNOTIFY_OBJ_TYPE_INODE | \
- FSNOTIFY_OBJ_TYPE_VFSMOUNT)
- unsigned int flags; /* Type of object [lock] */
- union { /* Object pointer [lock] */
- struct inode *inode;
- struct vfsmount *mnt;
- };
+ unsigned short type; /* Type of object [lock] */
+#define FSNOTIFY_CONN_FLAG_HAS_FSID 0x01
+#define FSNOTIFY_CONN_FLAG_HAS_IREF 0x02
+ unsigned short flags; /* flags [lock] */
+ __kernel_fsid_t fsid; /* fsid of filesystem containing object */
union {
- struct hlist_head list;
+ /* Object pointer [lock] */
+ fsnotify_connp_t *obj;
/* Used listing heads to free after srcu period expires */
struct fsnotify_mark_connector *destroy_next;
};
+ struct hlist_head list;
};
/*
@@ -243,11 +504,11 @@ struct fsnotify_mark {
__u32 mask;
/* We hold one for presence in g_list. Also one ref for each 'thing'
* in kernel that found and may be using this mark. */
- atomic_t refcnt;
+ refcount_t refcnt;
/* Group this mark is for. Set on mark creation, stable until last ref
* is dropped */
struct fsnotify_group *group;
- /* List of marks by group->i_fsnotify_marks. Also reused for queueing
+ /* List of marks by group->marks_list. Also reused for queueing
* mark into destroy_list when it's waiting for the end of SRCU period
* before it can be freed. [group->mark_mutex] */
struct list_head g_list;
@@ -257,11 +518,18 @@ struct fsnotify_mark {
struct hlist_node obj_list;
/* Head of list of marks for an object [mark ref] */
struct fsnotify_mark_connector *connector;
- /* Events types to ignore [mark->lock, group->mark_mutex] */
- __u32 ignored_mask;
-#define FSNOTIFY_MARK_FLAG_IGNORED_SURV_MODIFY 0x01
-#define FSNOTIFY_MARK_FLAG_ALIVE 0x02
-#define FSNOTIFY_MARK_FLAG_ATTACHED 0x04
+ /* Events types and flags to ignore [mark->lock, group->mark_mutex] */
+ __u32 ignore_mask;
+ /* General fsnotify mark flags */
+#define FSNOTIFY_MARK_FLAG_ALIVE 0x0001
+#define FSNOTIFY_MARK_FLAG_ATTACHED 0x0002
+ /* inotify mark flags */
+#define FSNOTIFY_MARK_FLAG_EXCL_UNLINK 0x0010
+#define FSNOTIFY_MARK_FLAG_IN_ONESHOT 0x0020
+ /* fanotify mark flags */
+#define FSNOTIFY_MARK_FLAG_IGNORED_SURV_MODIFY 0x0100
+#define FSNOTIFY_MARK_FLAG_NO_IREF 0x0200
+#define FSNOTIFY_MARK_FLAG_HAS_IGNORE_FLAGS 0x0400
unsigned int flags; /* flags [mark->lock] */
};
@@ -270,13 +538,29 @@ struct fsnotify_mark {
/* called from the vfs helpers */
/* main fsnotify call to send events */
-extern int fsnotify(struct inode *to_tell, __u32 mask, const void *data, int data_is,
- const unsigned char *name, u32 cookie);
-extern int __fsnotify_parent(const struct path *path, struct dentry *dentry, __u32 mask);
+extern int fsnotify(__u32 mask, const void *data, int data_type,
+ struct inode *dir, const struct qstr *name,
+ struct inode *inode, u32 cookie);
+extern int __fsnotify_parent(struct dentry *dentry, __u32 mask, const void *data,
+ int data_type);
extern void __fsnotify_inode_delete(struct inode *inode);
extern void __fsnotify_vfsmount_delete(struct vfsmount *mnt);
+extern void fsnotify_sb_delete(struct super_block *sb);
extern u32 fsnotify_get_cookie(void);
+static inline __u32 fsnotify_parent_needed_mask(__u32 mask)
+{
+ /* FS_EVENT_ON_CHILD is set on marks that want parent/name info */
+ if (!(mask & FS_EVENT_ON_CHILD))
+ return 0;
+ /*
+ * This object might be watched by a mark that cares about parent/name
+ * info, does it care about the specific set of events that can be
+ * reported with parent/name info?
+ */
+ return mask & FS_EVENTS_POSS_TO_PARENT;
+}
+
static inline int fsnotify_inode_watches_children(struct inode *inode)
{
/* FS_EVENT_ON_CHILD is set if the inode may care */
@@ -311,7 +595,9 @@ static inline void fsnotify_update_flags(struct dentry *dentry)
/* called from fsnotify listeners, such as fanotify or dnotify */
/* create a new group */
-extern struct fsnotify_group *fsnotify_alloc_group(const struct fsnotify_ops *ops);
+extern struct fsnotify_group *fsnotify_alloc_group(
+ const struct fsnotify_ops *ops,
+ int flags);
/* get reference to a group */
extern void fsnotify_get_group(struct fsnotify_group *group);
/* drop reference on a group from fsnotify_alloc_group */
@@ -326,32 +612,183 @@ extern int fsnotify_fasync(int fd, struct file *file, int on);
extern void fsnotify_destroy_event(struct fsnotify_group *group,
struct fsnotify_event *event);
/* attach the event to the group notification queue */
-extern int fsnotify_add_event(struct fsnotify_group *group,
- struct fsnotify_event *event,
- int (*merge)(struct list_head *,
- struct fsnotify_event *));
-/* true if the group notification queue is empty */
+extern int fsnotify_insert_event(struct fsnotify_group *group,
+ struct fsnotify_event *event,
+ int (*merge)(struct fsnotify_group *,
+ struct fsnotify_event *),
+ void (*insert)(struct fsnotify_group *,
+ struct fsnotify_event *));
+
+static inline int fsnotify_add_event(struct fsnotify_group *group,
+ struct fsnotify_event *event,
+ int (*merge)(struct fsnotify_group *,
+ struct fsnotify_event *))
+{
+ return fsnotify_insert_event(group, event, merge, NULL);
+}
+
+/* Queue overflow event to a notification group */
+static inline void fsnotify_queue_overflow(struct fsnotify_group *group)
+{
+ fsnotify_add_event(group, group->overflow_event, NULL);
+}
+
+static inline bool fsnotify_is_overflow_event(u32 mask)
+{
+ return mask & FS_Q_OVERFLOW;
+}
+
+static inline bool fsnotify_notify_queue_is_empty(struct fsnotify_group *group)
+{
+ assert_spin_locked(&group->notification_lock);
+
+ return list_empty(&group->notification_list);
+}
+
extern bool fsnotify_notify_queue_is_empty(struct fsnotify_group *group);
/* return, but do not dequeue the first event on the notification queue */
extern struct fsnotify_event *fsnotify_peek_first_event(struct fsnotify_group *group);
/* return AND dequeue the first event on the notification queue */
extern struct fsnotify_event *fsnotify_remove_first_event(struct fsnotify_group *group);
+/* Remove event queued in the notification list */
+extern void fsnotify_remove_queued_event(struct fsnotify_group *group,
+ struct fsnotify_event *event);
/* functions used to manipulate the marks attached to inodes */
+/*
+ * Canonical "ignore mask" including event flags.
+ *
+ * Note the subtle semantic difference from the legacy ->ignored_mask.
+ * ->ignored_mask traditionally only meant which events should be ignored,
+ * while ->ignore_mask also includes flags regarding the type of objects on
+ * which events should be ignored.
+ */
+static inline __u32 fsnotify_ignore_mask(struct fsnotify_mark *mark)
+{
+ __u32 ignore_mask = mark->ignore_mask;
+
+ /* The event flags in ignore mask take effect */
+ if (mark->flags & FSNOTIFY_MARK_FLAG_HAS_IGNORE_FLAGS)
+ return ignore_mask;
+
+ /*
+ * Legacy behavior:
+ * - Always ignore events on dir
+ * - Ignore events on child if parent is watching children
+ */
+ ignore_mask |= FS_ISDIR;
+ ignore_mask &= ~FS_EVENT_ON_CHILD;
+ ignore_mask |= mark->mask & FS_EVENT_ON_CHILD;
+
+ return ignore_mask;
+}
+
+/* Legacy ignored_mask - only event types to ignore */
+static inline __u32 fsnotify_ignored_events(struct fsnotify_mark *mark)
+{
+ return mark->ignore_mask & ALL_FSNOTIFY_EVENTS;
+}
+
+/*
+ * Check if mask (or ignore mask) should be applied depending if victim is a
+ * directory and whether it is reported to a watching parent.
+ */
+static inline bool fsnotify_mask_applicable(__u32 mask, bool is_dir,
+ int iter_type)
+{
+ /* Should mask be applied to a directory? */
+ if (is_dir && !(mask & FS_ISDIR))
+ return false;
+
+ /* Should mask be applied to a child? */
+ if (iter_type == FSNOTIFY_ITER_TYPE_PARENT &&
+ !(mask & FS_EVENT_ON_CHILD))
+ return false;
+
+ return true;
+}
+
+/*
+ * Effective ignore mask taking into account if event victim is a
+ * directory and whether it is reported to a watching parent.
+ */
+static inline __u32 fsnotify_effective_ignore_mask(struct fsnotify_mark *mark,
+ bool is_dir, int iter_type)
+{
+ __u32 ignore_mask = fsnotify_ignored_events(mark);
+
+ if (!ignore_mask)
+ return 0;
+
+ /* For non-dir and non-child, no need to consult the event flags */
+ if (!is_dir && iter_type != FSNOTIFY_ITER_TYPE_PARENT)
+ return ignore_mask;
+
+ ignore_mask = fsnotify_ignore_mask(mark);
+ if (!fsnotify_mask_applicable(ignore_mask, is_dir, iter_type))
+ return 0;
+
+ return ignore_mask & ALL_FSNOTIFY_EVENTS;
+}
+
+/* Get mask for calculating object interest taking ignore mask into account */
+static inline __u32 fsnotify_calc_mask(struct fsnotify_mark *mark)
+{
+ __u32 mask = mark->mask;
+
+ if (!fsnotify_ignored_events(mark))
+ return mask;
+
+ /* Interest in FS_MODIFY may be needed for clearing ignore mask */
+ if (!(mark->flags & FSNOTIFY_MARK_FLAG_IGNORED_SURV_MODIFY))
+ mask |= FS_MODIFY;
+
+ /*
+ * If mark is interested in ignoring events on children, the object must
+ * show interest in those events for fsnotify_parent() to notice it.
+ */
+ return mask | mark->ignore_mask;
+}
+
+/* Get mask of events for a list of marks */
+extern __u32 fsnotify_conn_mask(struct fsnotify_mark_connector *conn);
/* Calculate mask of events for a list of marks */
extern void fsnotify_recalc_mask(struct fsnotify_mark_connector *conn);
extern void fsnotify_init_mark(struct fsnotify_mark *mark,
struct fsnotify_group *group);
/* Find mark belonging to given group in the list of marks */
-extern struct fsnotify_mark *fsnotify_find_mark(
- struct fsnotify_mark_connector __rcu **connp,
- struct fsnotify_group *group);
-/* attach the mark to the inode or vfsmount */
-extern int fsnotify_add_mark(struct fsnotify_mark *mark, struct inode *inode,
- struct vfsmount *mnt, int allow_dups);
+extern struct fsnotify_mark *fsnotify_find_mark(fsnotify_connp_t *connp,
+ struct fsnotify_group *group);
+/* Get cached fsid of filesystem containing object */
+extern int fsnotify_get_conn_fsid(const struct fsnotify_mark_connector *conn,
+ __kernel_fsid_t *fsid);
+/* attach the mark to the object */
+extern int fsnotify_add_mark(struct fsnotify_mark *mark,
+ fsnotify_connp_t *connp, unsigned int obj_type,
+ int add_flags, __kernel_fsid_t *fsid);
extern int fsnotify_add_mark_locked(struct fsnotify_mark *mark,
- struct inode *inode, struct vfsmount *mnt, int allow_dups);
+ fsnotify_connp_t *connp,
+ unsigned int obj_type, int add_flags,
+ __kernel_fsid_t *fsid);
+
+/* attach the mark to the inode */
+static inline int fsnotify_add_inode_mark(struct fsnotify_mark *mark,
+ struct inode *inode,
+ int add_flags)
+{
+ return fsnotify_add_mark(mark, &inode->i_fsnotify_marks,
+ FSNOTIFY_OBJ_TYPE_INODE, add_flags, NULL);
+}
+static inline int fsnotify_add_inode_mark_locked(struct fsnotify_mark *mark,
+ struct inode *inode,
+ int add_flags)
+{
+ return fsnotify_add_mark_locked(mark, &inode->i_fsnotify_marks,
+ FSNOTIFY_OBJ_TYPE_INODE, add_flags,
+ NULL);
+}
+
/* given a group and a mark, flag mark to be freed when all references are dropped */
extern void fsnotify_destroy_mark(struct fsnotify_mark *mark,
struct fsnotify_group *group);
@@ -359,8 +796,11 @@ extern void fsnotify_destroy_mark(struct fsnotify_mark *mark,
extern void fsnotify_detach_mark(struct fsnotify_mark *mark);
/* free mark */
extern void fsnotify_free_mark(struct fsnotify_mark *mark);
-/* run all the marks in a group, and clear all of the marks attached to given object type */
-extern void fsnotify_clear_marks_by_group(struct fsnotify_group *group, unsigned int type);
+/* Wait until all marks queued for destruction are destroyed */
+extern void fsnotify_wait_marks_destroyed(void);
+/* Clear all of the marks of a group attached to a given object type */
+extern void fsnotify_clear_marks_by_group(struct fsnotify_group *group,
+ unsigned int obj_type);
/* run all the marks in a group, and clear all of the vfsmount marks */
static inline void fsnotify_clear_vfsmount_marks_by_group(struct fsnotify_group *group)
{
@@ -371,25 +811,32 @@ static inline void fsnotify_clear_inode_marks_by_group(struct fsnotify_group *gr
{
fsnotify_clear_marks_by_group(group, FSNOTIFY_OBJ_TYPE_INODE);
}
+/* run all the marks in a group, and clear all of the sn marks */
+static inline void fsnotify_clear_sb_marks_by_group(struct fsnotify_group *group)
+{
+ fsnotify_clear_marks_by_group(group, FSNOTIFY_OBJ_TYPE_SB);
+}
extern void fsnotify_get_mark(struct fsnotify_mark *mark);
extern void fsnotify_put_mark(struct fsnotify_mark *mark);
-extern void fsnotify_unmount_inodes(struct super_block *sb);
extern void fsnotify_finish_user_wait(struct fsnotify_iter_info *iter_info);
extern bool fsnotify_prepare_user_wait(struct fsnotify_iter_info *iter_info);
-/* put here because inotify does some weird stuff when destroying watches */
-extern void fsnotify_init_event(struct fsnotify_event *event,
- struct inode *to_tell, u32 mask);
+static inline void fsnotify_init_event(struct fsnotify_event *event)
+{
+ INIT_LIST_HEAD(&event->list);
+}
#else
-static inline int fsnotify(struct inode *to_tell, __u32 mask, const void *data, int data_is,
- const unsigned char *name, u32 cookie)
+static inline int fsnotify(__u32 mask, const void *data, int data_type,
+ struct inode *dir, const struct qstr *name,
+ struct inode *inode, u32 cookie)
{
return 0;
}
-static inline int __fsnotify_parent(const struct path *path, struct dentry *dentry, __u32 mask)
+static inline int __fsnotify_parent(struct dentry *dentry, __u32 mask,
+ const void *data, int data_type)
{
return 0;
}
@@ -400,6 +847,9 @@ static inline void __fsnotify_inode_delete(struct inode *inode)
static inline void __fsnotify_vfsmount_delete(struct vfsmount *mnt)
{}
+static inline void fsnotify_sb_delete(struct super_block *sb)
+{}
+
static inline void fsnotify_update_flags(struct dentry *dentry)
{}
diff --git a/include/linux/fsverity.h b/include/linux/fsverity.h
new file mode 100644
index 000000000000..e76605d5b36e
--- /dev/null
+++ b/include/linux/fsverity.h
@@ -0,0 +1,318 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * fs-verity: read-only file-based authenticity protection
+ *
+ * This header declares the interface between the fs/verity/ support layer and
+ * filesystems that support fs-verity.
+ *
+ * Copyright 2019 Google LLC
+ */
+
+#ifndef _LINUX_FSVERITY_H
+#define _LINUX_FSVERITY_H
+
+#include <linux/fs.h>
+#include <linux/mm.h>
+#include <crypto/hash_info.h>
+#include <crypto/sha2.h>
+#include <uapi/linux/fsverity.h>
+
+/*
+ * Largest digest size among all hash algorithms supported by fs-verity.
+ * Currently assumed to be <= size of fsverity_descriptor::root_hash.
+ */
+#define FS_VERITY_MAX_DIGEST_SIZE SHA512_DIGEST_SIZE
+
+/* Arbitrary limit to bound the kmalloc() size. Can be changed. */
+#define FS_VERITY_MAX_DESCRIPTOR_SIZE 16384
+
+/* Verity operations for filesystems */
+struct fsverity_operations {
+
+ /**
+ * Begin enabling verity on the given file.
+ *
+ * @filp: a readonly file descriptor for the file
+ *
+ * The filesystem must do any needed filesystem-specific preparations
+ * for enabling verity, e.g. evicting inline data. It also must return
+ * -EBUSY if verity is already being enabled on the given file.
+ *
+ * i_rwsem is held for write.
+ *
+ * Return: 0 on success, -errno on failure
+ */
+ int (*begin_enable_verity)(struct file *filp);
+
+ /**
+ * End enabling verity on the given file.
+ *
+ * @filp: a readonly file descriptor for the file
+ * @desc: the verity descriptor to write, or NULL on failure
+ * @desc_size: size of verity descriptor, or 0 on failure
+ * @merkle_tree_size: total bytes the Merkle tree took up
+ *
+ * If desc == NULL, then enabling verity failed and the filesystem only
+ * must do any necessary cleanups. Else, it must also store the given
+ * verity descriptor to a fs-specific location associated with the inode
+ * and do any fs-specific actions needed to mark the inode as a verity
+ * inode, e.g. setting a bit in the on-disk inode. The filesystem is
+ * also responsible for setting the S_VERITY flag in the VFS inode.
+ *
+ * i_rwsem is held for write, but it may have been dropped between
+ * ->begin_enable_verity() and ->end_enable_verity().
+ *
+ * Return: 0 on success, -errno on failure
+ */
+ int (*end_enable_verity)(struct file *filp, const void *desc,
+ size_t desc_size, u64 merkle_tree_size);
+
+ /**
+ * Get the verity descriptor of the given inode.
+ *
+ * @inode: an inode with the S_VERITY flag set
+ * @buf: buffer in which to place the verity descriptor
+ * @bufsize: size of @buf, or 0 to retrieve the size only
+ *
+ * If bufsize == 0, then the size of the verity descriptor is returned.
+ * Otherwise the verity descriptor is written to 'buf' and its actual
+ * size is returned; -ERANGE is returned if it's too large. This may be
+ * called by multiple processes concurrently on the same inode.
+ *
+ * Return: the size on success, -errno on failure
+ */
+ int (*get_verity_descriptor)(struct inode *inode, void *buf,
+ size_t bufsize);
+
+ /**
+ * Read a Merkle tree page of the given inode.
+ *
+ * @inode: the inode
+ * @index: 0-based index of the page within the Merkle tree
+ * @num_ra_pages: The number of Merkle tree pages that should be
+ * prefetched starting at @index if the page at @index
+ * isn't already cached. Implementations may ignore this
+ * argument; it's only a performance optimization.
+ *
+ * This can be called at any time on an open verity file. It may be
+ * called by multiple processes concurrently, even with the same page.
+ *
+ * Note that this must retrieve a *page*, not necessarily a *block*.
+ *
+ * Return: the page on success, ERR_PTR() on failure
+ */
+ struct page *(*read_merkle_tree_page)(struct inode *inode,
+ pgoff_t index,
+ unsigned long num_ra_pages);
+
+ /**
+ * Write a Merkle tree block to the given inode.
+ *
+ * @inode: the inode for which the Merkle tree is being built
+ * @buf: the Merkle tree block to write
+ * @pos: the position of the block in the Merkle tree (in bytes)
+ * @size: the Merkle tree block size (in bytes)
+ *
+ * This is only called between ->begin_enable_verity() and
+ * ->end_enable_verity().
+ *
+ * Return: 0 on success, -errno on failure
+ */
+ int (*write_merkle_tree_block)(struct inode *inode, const void *buf,
+ u64 pos, unsigned int size);
+};
+
+#ifdef CONFIG_FS_VERITY
+
+static inline struct fsverity_info *fsverity_get_info(const struct inode *inode)
+{
+ /*
+ * Pairs with the cmpxchg_release() in fsverity_set_info().
+ * I.e., another task may publish ->i_verity_info concurrently,
+ * executing a RELEASE barrier. We need to use smp_load_acquire() here
+ * to safely ACQUIRE the memory the other task published.
+ */
+ return smp_load_acquire(&inode->i_verity_info);
+}
+
+/* enable.c */
+
+int fsverity_ioctl_enable(struct file *filp, const void __user *arg);
+
+/* measure.c */
+
+int fsverity_ioctl_measure(struct file *filp, void __user *arg);
+int fsverity_get_digest(struct inode *inode,
+ u8 digest[FS_VERITY_MAX_DIGEST_SIZE],
+ enum hash_algo *alg);
+
+/* open.c */
+
+int __fsverity_file_open(struct inode *inode, struct file *filp);
+int __fsverity_prepare_setattr(struct dentry *dentry, struct iattr *attr);
+void __fsverity_cleanup_inode(struct inode *inode);
+
+/**
+ * fsverity_cleanup_inode() - free the inode's verity info, if present
+ * @inode: an inode being evicted
+ *
+ * Filesystems must call this on inode eviction to free ->i_verity_info.
+ */
+static inline void fsverity_cleanup_inode(struct inode *inode)
+{
+ if (inode->i_verity_info)
+ __fsverity_cleanup_inode(inode);
+}
+
+/* read_metadata.c */
+
+int fsverity_ioctl_read_metadata(struct file *filp, const void __user *uarg);
+
+/* verify.c */
+
+bool fsverity_verify_blocks(struct folio *folio, size_t len, size_t offset);
+void fsverity_verify_bio(struct bio *bio);
+void fsverity_enqueue_verify_work(struct work_struct *work);
+
+#else /* !CONFIG_FS_VERITY */
+
+static inline struct fsverity_info *fsverity_get_info(const struct inode *inode)
+{
+ return NULL;
+}
+
+/* enable.c */
+
+static inline int fsverity_ioctl_enable(struct file *filp,
+ const void __user *arg)
+{
+ return -EOPNOTSUPP;
+}
+
+/* measure.c */
+
+static inline int fsverity_ioctl_measure(struct file *filp, void __user *arg)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int fsverity_get_digest(struct inode *inode,
+ u8 digest[FS_VERITY_MAX_DIGEST_SIZE],
+ enum hash_algo *alg)
+{
+ return -EOPNOTSUPP;
+}
+
+/* open.c */
+
+static inline int __fsverity_file_open(struct inode *inode, struct file *filp)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int __fsverity_prepare_setattr(struct dentry *dentry,
+ struct iattr *attr)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline void fsverity_cleanup_inode(struct inode *inode)
+{
+}
+
+/* read_metadata.c */
+
+static inline int fsverity_ioctl_read_metadata(struct file *filp,
+ const void __user *uarg)
+{
+ return -EOPNOTSUPP;
+}
+
+/* verify.c */
+
+static inline bool fsverity_verify_blocks(struct folio *folio, size_t len,
+ size_t offset)
+{
+ WARN_ON_ONCE(1);
+ return false;
+}
+
+static inline void fsverity_verify_bio(struct bio *bio)
+{
+ WARN_ON_ONCE(1);
+}
+
+static inline void fsverity_enqueue_verify_work(struct work_struct *work)
+{
+ WARN_ON_ONCE(1);
+}
+
+#endif /* !CONFIG_FS_VERITY */
+
+static inline bool fsverity_verify_folio(struct folio *folio)
+{
+ return fsverity_verify_blocks(folio, folio_size(folio), 0);
+}
+
+static inline bool fsverity_verify_page(struct page *page)
+{
+ return fsverity_verify_blocks(page_folio(page), PAGE_SIZE, 0);
+}
+
+/**
+ * fsverity_active() - do reads from the inode need to go through fs-verity?
+ * @inode: inode to check
+ *
+ * This checks whether ->i_verity_info has been set.
+ *
+ * Filesystems call this from ->readahead() to check whether the pages need to
+ * be verified or not. Don't use IS_VERITY() for this purpose; it's subject to
+ * a race condition where the file is being read concurrently with
+ * FS_IOC_ENABLE_VERITY completing. (S_VERITY is set before ->i_verity_info.)
+ *
+ * Return: true if reads need to go through fs-verity, otherwise false
+ */
+static inline bool fsverity_active(const struct inode *inode)
+{
+ return fsverity_get_info(inode) != NULL;
+}
+
+/**
+ * fsverity_file_open() - prepare to open a verity file
+ * @inode: the inode being opened
+ * @filp: the struct file being set up
+ *
+ * When opening a verity file, deny the open if it is for writing. Otherwise,
+ * set up the inode's ->i_verity_info if not already done.
+ *
+ * When combined with fscrypt, this must be called after fscrypt_file_open().
+ * Otherwise, we won't have the key set up to decrypt the verity metadata.
+ *
+ * Return: 0 on success, -errno on failure
+ */
+static inline int fsverity_file_open(struct inode *inode, struct file *filp)
+{
+ if (IS_VERITY(inode))
+ return __fsverity_file_open(inode, filp);
+ return 0;
+}
+
+/**
+ * fsverity_prepare_setattr() - prepare to change a verity inode's attributes
+ * @dentry: dentry through which the inode is being changed
+ * @attr: attributes to change
+ *
+ * Verity files are immutable, so deny truncates. This isn't covered by the
+ * open-time check because sys_truncate() takes a path, not a file descriptor.
+ *
+ * Return: 0 on success, -errno on failure
+ */
+static inline int fsverity_prepare_setattr(struct dentry *dentry,
+ struct iattr *attr)
+{
+ if (IS_VERITY(d_inode(dentry)))
+ return __fsverity_prepare_setattr(dentry, attr);
+ return 0;
+}
+
+#endif /* _LINUX_FSVERITY_H */
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
index 5857390ac35a..b23bdd414394 100644
--- a/include/linux/ftrace.h
+++ b/include/linux/ftrace.h
@@ -1,12 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Ftrace header. For implementation details beyond the random comments
- * scattered below, see: Documentation/trace/ftrace-design.txt
+ * scattered below, see: Documentation/trace/ftrace-design.rst
*/
#ifndef _LINUX_FTRACE_H
#define _LINUX_FTRACE_H
+#include <linux/trace_recursion.h>
#include <linux/trace_clock.h>
+#include <linux/jump_label.h>
#include <linux/kallsyms.h>
#include <linux/linkage.h>
#include <linux/bitops.h>
@@ -28,16 +31,37 @@
#define ARCH_SUPPORTS_FTRACE_OPS 0
#endif
+#ifdef CONFIG_TRACING
+extern void ftrace_boot_snapshot(void);
+#else
+static inline void ftrace_boot_snapshot(void) { }
+#endif
+
+struct ftrace_ops;
+struct ftrace_regs;
+struct dyn_ftrace;
+
+#ifdef CONFIG_FUNCTION_TRACER
/*
* If the arch's mcount caller does not support all of ftrace's
* features, then it must call an indirect function that
- * does. Or at least does enough to prevent any unwelcomed side effects.
+ * does. Or at least does enough to prevent any unwelcome side effects.
+ *
+ * Also define the function prototype that these architectures use
+ * to call the ftrace_ops_list_func().
*/
#if !ARCH_SUPPORTS_FTRACE_OPS
# define FTRACE_FORCE_LIST_FUNC 1
+void arch_ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip);
#else
# define FTRACE_FORCE_LIST_FUNC 0
+void arch_ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
+ struct ftrace_ops *op, struct ftrace_regs *fregs);
#endif
+extern const struct ftrace_ops ftrace_nop_ops;
+extern const struct ftrace_ops ftrace_list_ops;
+struct ftrace_ops *ftrace_find_unique_ops(struct dyn_ftrace *rec);
+#endif /* CONFIG_FUNCTION_TRACER */
/* Main tracing buffer and events set up */
#ifdef CONFIG_TRACING
@@ -50,26 +74,100 @@ static inline void early_trace_init(void) { }
struct module;
struct ftrace_hash;
+struct ftrace_direct_func;
+
+#if defined(CONFIG_FUNCTION_TRACER) && defined(CONFIG_MODULES) && \
+ defined(CONFIG_DYNAMIC_FTRACE)
+const char *
+ftrace_mod_address_lookup(unsigned long addr, unsigned long *size,
+ unsigned long *off, char **modname, char *sym);
+#else
+static inline const char *
+ftrace_mod_address_lookup(unsigned long addr, unsigned long *size,
+ unsigned long *off, char **modname, char *sym)
+{
+ return NULL;
+}
+#endif
+
+#if defined(CONFIG_FUNCTION_TRACER) && defined(CONFIG_DYNAMIC_FTRACE)
+int ftrace_mod_get_kallsym(unsigned int symnum, unsigned long *value,
+ char *type, char *name,
+ char *module_name, int *exported);
+#else
+static inline int ftrace_mod_get_kallsym(unsigned int symnum, unsigned long *value,
+ char *type, char *name,
+ char *module_name, int *exported)
+{
+ return -1;
+}
+#endif
#ifdef CONFIG_FUNCTION_TRACER
extern int ftrace_enabled;
-extern int
-ftrace_enable_sysctl(struct ctl_table *table, int write,
- void __user *buffer, size_t *lenp,
- loff_t *ppos);
-struct ftrace_ops;
+#ifndef CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS
+
+struct ftrace_regs {
+ struct pt_regs regs;
+};
+#define arch_ftrace_get_regs(fregs) (&(fregs)->regs)
+
+/*
+ * ftrace_regs_set_instruction_pointer() is to be defined by the architecture
+ * if to allow setting of the instruction pointer from the ftrace_regs when
+ * HAVE_DYNAMIC_FTRACE_WITH_ARGS is set and it supports live kernel patching.
+ */
+#define ftrace_regs_set_instruction_pointer(fregs, ip) do { } while (0)
+#endif /* CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS */
+
+static __always_inline struct pt_regs *ftrace_get_regs(struct ftrace_regs *fregs)
+{
+ if (!fregs)
+ return NULL;
+
+ return arch_ftrace_get_regs(fregs);
+}
+
+/*
+ * When true, the ftrace_regs_{get,set}_*() functions may be used on fregs.
+ * Note: this can be true even when ftrace_get_regs() cannot provide a pt_regs.
+ */
+static __always_inline bool ftrace_regs_has_args(struct ftrace_regs *fregs)
+{
+ if (IS_ENABLED(CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS))
+ return true;
+
+ return ftrace_get_regs(fregs) != NULL;
+}
+
+#ifndef CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS
+#define ftrace_regs_get_instruction_pointer(fregs) \
+ instruction_pointer(ftrace_get_regs(fregs))
+#define ftrace_regs_get_argument(fregs, n) \
+ regs_get_kernel_argument(ftrace_get_regs(fregs), n)
+#define ftrace_regs_get_stack_pointer(fregs) \
+ kernel_stack_pointer(ftrace_get_regs(fregs))
+#define ftrace_regs_return_value(fregs) \
+ regs_return_value(ftrace_get_regs(fregs))
+#define ftrace_regs_set_return_value(fregs, ret) \
+ regs_set_return_value(ftrace_get_regs(fregs), ret)
+#define ftrace_override_function_with_return(fregs) \
+ override_function_with_return(ftrace_get_regs(fregs))
+#define ftrace_regs_query_register_offset(name) \
+ regs_query_register_offset(name)
+#endif
typedef void (*ftrace_func_t)(unsigned long ip, unsigned long parent_ip,
- struct ftrace_ops *op, struct pt_regs *regs);
+ struct ftrace_ops *op, struct ftrace_regs *fregs);
ftrace_func_t ftrace_ops_get_func(struct ftrace_ops *ops);
/*
* FTRACE_OPS_FL_* bits denote the state of ftrace_ops struct and are
* set in the flags member.
- * CONTROL, SAVE_REGS, SAVE_REGS_IF_SUPPORTED, RECURSION_SAFE, STUB and
+ * CONTROL, SAVE_REGS, SAVE_REGS_IF_SUPPORTED, RECURSION, STUB and
* IPMODIFY are a kind of attribute flags which can be set only before
* registering the ftrace_ops, and can not be modified while registered.
* Changing those attribute flags after registering ftrace_ops will
@@ -78,10 +176,6 @@ ftrace_func_t ftrace_ops_get_func(struct ftrace_ops *ops);
* ENABLED - set/unset when ftrace_ops is registered/unregistered
* DYNAMIC - set when ftrace_ops is registered to denote dynamically
* allocated ftrace_ops which need special care
- * PER_CPU - set manualy by ftrace_ops user to denote the ftrace_ops
- * could be controlled by following calls:
- * ftrace_function_local_enable
- * ftrace_function_local_disable
* SAVE_REGS - The ftrace_ops wants regs saved at each function called
* and passed to the callback. If this flag is set, but the
* architecture does not support passing regs
@@ -96,10 +190,10 @@ ftrace_func_t ftrace_ops_get_func(struct ftrace_ops *ops);
* passing regs to the handler.
* Note, if this flag is set, the SAVE_REGS flag will automatically
* get set upon registering the ftrace_ops, if the arch supports it.
- * RECURSION_SAFE - The ftrace_ops can set this to tell the ftrace infrastructure
- * that the call back has its own recursion protection. If it does
- * not set this, then the ftrace infrastructure will add recursion
- * protection for the caller.
+ * RECURSION - The ftrace_ops can set this to tell the ftrace infrastructure
+ * that the call back needs recursion protection. If it does
+ * not set this, then the ftrace infrastructure will assume
+ * that the callback can handle recursion on its own.
* STUB - The ftrace_ops is just a place holder.
* INITIALIZED - The ftrace_ops has already been initialized (first use time
* register_ftrace_function() is called, it will initialized the ops)
@@ -121,38 +215,91 @@ ftrace_func_t ftrace_ops_get_func(struct ftrace_ops *ops);
* PID - Is affected by set_ftrace_pid (allows filtering on those pids)
* RCU - Set when the ops can only be called when RCU is watching.
* TRACE_ARRAY - The ops->private points to a trace_array descriptor.
+ * PERMANENT - Set when the ops is permanent and should not be affected by
+ * ftrace_enabled.
+ * DIRECT - Used by the direct ftrace_ops helper for direct functions
+ * (internal ftrace only, should not be used by others)
*/
enum {
- FTRACE_OPS_FL_ENABLED = 1 << 0,
- FTRACE_OPS_FL_DYNAMIC = 1 << 1,
- FTRACE_OPS_FL_PER_CPU = 1 << 2,
- FTRACE_OPS_FL_SAVE_REGS = 1 << 3,
- FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED = 1 << 4,
- FTRACE_OPS_FL_RECURSION_SAFE = 1 << 5,
- FTRACE_OPS_FL_STUB = 1 << 6,
- FTRACE_OPS_FL_INITIALIZED = 1 << 7,
- FTRACE_OPS_FL_DELETED = 1 << 8,
- FTRACE_OPS_FL_ADDING = 1 << 9,
- FTRACE_OPS_FL_REMOVING = 1 << 10,
- FTRACE_OPS_FL_MODIFYING = 1 << 11,
- FTRACE_OPS_FL_ALLOC_TRAMP = 1 << 12,
- FTRACE_OPS_FL_IPMODIFY = 1 << 13,
- FTRACE_OPS_FL_PID = 1 << 14,
- FTRACE_OPS_FL_RCU = 1 << 15,
- FTRACE_OPS_FL_TRACE_ARRAY = 1 << 16,
+ FTRACE_OPS_FL_ENABLED = BIT(0),
+ FTRACE_OPS_FL_DYNAMIC = BIT(1),
+ FTRACE_OPS_FL_SAVE_REGS = BIT(2),
+ FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED = BIT(3),
+ FTRACE_OPS_FL_RECURSION = BIT(4),
+ FTRACE_OPS_FL_STUB = BIT(5),
+ FTRACE_OPS_FL_INITIALIZED = BIT(6),
+ FTRACE_OPS_FL_DELETED = BIT(7),
+ FTRACE_OPS_FL_ADDING = BIT(8),
+ FTRACE_OPS_FL_REMOVING = BIT(9),
+ FTRACE_OPS_FL_MODIFYING = BIT(10),
+ FTRACE_OPS_FL_ALLOC_TRAMP = BIT(11),
+ FTRACE_OPS_FL_IPMODIFY = BIT(12),
+ FTRACE_OPS_FL_PID = BIT(13),
+ FTRACE_OPS_FL_RCU = BIT(14),
+ FTRACE_OPS_FL_TRACE_ARRAY = BIT(15),
+ FTRACE_OPS_FL_PERMANENT = BIT(16),
+ FTRACE_OPS_FL_DIRECT = BIT(17),
};
+#ifndef CONFIG_DYNAMIC_FTRACE_WITH_ARGS
+#define FTRACE_OPS_FL_SAVE_ARGS FTRACE_OPS_FL_SAVE_REGS
+#else
+#define FTRACE_OPS_FL_SAVE_ARGS 0
+#endif
+
+/*
+ * FTRACE_OPS_CMD_* commands allow the ftrace core logic to request changes
+ * to a ftrace_ops. Note, the requests may fail.
+ *
+ * ENABLE_SHARE_IPMODIFY_SELF - enable a DIRECT ops to work on the same
+ * function as an ops with IPMODIFY. Called
+ * when the DIRECT ops is being registered.
+ * This is called with both direct_mutex and
+ * ftrace_lock are locked.
+ *
+ * ENABLE_SHARE_IPMODIFY_PEER - enable a DIRECT ops to work on the same
+ * function as an ops with IPMODIFY. Called
+ * when the other ops (the one with IPMODIFY)
+ * is being registered.
+ * This is called with direct_mutex locked.
+ *
+ * DISABLE_SHARE_IPMODIFY_PEER - disable a DIRECT ops to work on the same
+ * function as an ops with IPMODIFY. Called
+ * when the other ops (the one with IPMODIFY)
+ * is being unregistered.
+ * This is called with direct_mutex locked.
+ */
+enum ftrace_ops_cmd {
+ FTRACE_OPS_CMD_ENABLE_SHARE_IPMODIFY_SELF,
+ FTRACE_OPS_CMD_ENABLE_SHARE_IPMODIFY_PEER,
+ FTRACE_OPS_CMD_DISABLE_SHARE_IPMODIFY_PEER,
+};
+
+/*
+ * For most ftrace_ops_cmd,
+ * Returns:
+ * 0 - Success.
+ * Negative on failure. The return value is dependent on the
+ * callback.
+ */
+typedef int (*ftrace_ops_func_t)(struct ftrace_ops *op, enum ftrace_ops_cmd cmd);
+
#ifdef CONFIG_DYNAMIC_FTRACE
/* The hash used to know what functions callbacks trace */
struct ftrace_ops_hash {
- struct ftrace_hash *notrace_hash;
- struct ftrace_hash *filter_hash;
+ struct ftrace_hash __rcu *notrace_hash;
+ struct ftrace_hash __rcu *filter_hash;
struct mutex regex_lock;
};
void ftrace_free_init_mem(void);
+void ftrace_free_mem(struct module *mod, void *start, void *end);
#else
-static inline void ftrace_free_init_mem(void) { }
+static inline void ftrace_free_init_mem(void)
+{
+ ftrace_boot_snapshot();
+}
+static inline void ftrace_free_mem(struct module *mod, void *start, void *end) { }
#endif
/*
@@ -168,20 +315,47 @@ static inline void ftrace_free_init_mem(void) { }
*/
struct ftrace_ops {
ftrace_func_t func;
- struct ftrace_ops *next;
+ struct ftrace_ops __rcu *next;
unsigned long flags;
void *private;
ftrace_func_t saved_func;
- int __percpu *disabled;
#ifdef CONFIG_DYNAMIC_FTRACE
struct ftrace_ops_hash local_hash;
struct ftrace_ops_hash *func_hash;
struct ftrace_ops_hash old_hash;
unsigned long trampoline;
unsigned long trampoline_size;
+ struct list_head list;
+ ftrace_ops_func_t ops_func;
+#ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
+ unsigned long direct_call;
+#endif
#endif
};
+extern struct ftrace_ops __rcu *ftrace_ops_list;
+extern struct ftrace_ops ftrace_list_end;
+
+/*
+ * Traverse the ftrace_ops_list, invoking all entries. The reason that we
+ * can use rcu_dereference_raw_check() is that elements removed from this list
+ * are simply leaked, so there is no need to interact with a grace-period
+ * mechanism. The rcu_dereference_raw_check() calls are needed to handle
+ * concurrent insertions into the ftrace_ops_list.
+ *
+ * Silly Alpha and silly pointer-speculation compiler optimizations!
+ */
+#define do_for_each_ftrace_op(op, list) \
+ op = rcu_dereference_raw_check(list); \
+ do
+
+/*
+ * Optimized for just a single item in the list (as that is the normal case).
+ */
+#define while_for_each_ftrace_op(op) \
+ while (likely(op = rcu_dereference_raw_check((op)->next)) && \
+ unlikely((op) != &ftrace_list_end))
+
/*
* Type of the current tracing.
*/
@@ -202,60 +376,12 @@ extern enum ftrace_tracing_type_t ftrace_tracing_type;
*/
int register_ftrace_function(struct ftrace_ops *ops);
int unregister_ftrace_function(struct ftrace_ops *ops);
-void clear_ftrace_function(void);
-
-/**
- * ftrace_function_local_enable - enable ftrace_ops on current cpu
- *
- * This function enables tracing on current cpu by decreasing
- * the per cpu control variable.
- * It must be called with preemption disabled and only on ftrace_ops
- * registered with FTRACE_OPS_FL_PER_CPU. If called without preemption
- * disabled, this_cpu_ptr will complain when CONFIG_DEBUG_PREEMPT is enabled.
- */
-static inline void ftrace_function_local_enable(struct ftrace_ops *ops)
-{
- if (WARN_ON_ONCE(!(ops->flags & FTRACE_OPS_FL_PER_CPU)))
- return;
-
- (*this_cpu_ptr(ops->disabled))--;
-}
-
-/**
- * ftrace_function_local_disable - disable ftrace_ops on current cpu
- *
- * This function disables tracing on current cpu by increasing
- * the per cpu control variable.
- * It must be called with preemption disabled and only on ftrace_ops
- * registered with FTRACE_OPS_FL_PER_CPU. If called without preemption
- * disabled, this_cpu_ptr will complain when CONFIG_DEBUG_PREEMPT is enabled.
- */
-static inline void ftrace_function_local_disable(struct ftrace_ops *ops)
-{
- if (WARN_ON_ONCE(!(ops->flags & FTRACE_OPS_FL_PER_CPU)))
- return;
-
- (*this_cpu_ptr(ops->disabled))++;
-}
-
-/**
- * ftrace_function_local_disabled - returns ftrace_ops disabled value
- * on current cpu
- *
- * This function returns value of ftrace_ops::disabled on current cpu.
- * It must be called with preemption disabled and only on ftrace_ops
- * registered with FTRACE_OPS_FL_PER_CPU. If called without preemption
- * disabled, this_cpu_ptr will complain when CONFIG_DEBUG_PREEMPT is enabled.
- */
-static inline int ftrace_function_local_disabled(struct ftrace_ops *ops)
-{
- WARN_ON_ONCE(!(ops->flags & FTRACE_OPS_FL_PER_CPU));
- return *this_cpu_ptr(ops->disabled);
-}
extern void ftrace_stub(unsigned long a0, unsigned long a1,
- struct ftrace_ops *op, struct pt_regs *regs);
+ struct ftrace_ops *op, struct ftrace_regs *fregs);
+
+int ftrace_lookup_symbols(const char **sorted_syms, size_t cnt, unsigned long *addrs);
#else /* !CONFIG_FUNCTION_TRACER */
/*
* (un)register_ftrace_function must be a macro since the ops parameter
@@ -263,32 +389,80 @@ extern void ftrace_stub(unsigned long a0, unsigned long a1,
*/
#define register_ftrace_function(ops) ({ 0; })
#define unregister_ftrace_function(ops) ({ 0; })
-static inline int ftrace_nr_registered_ops(void)
-{
- return 0;
-}
-static inline void clear_ftrace_function(void) { }
static inline void ftrace_kill(void) { }
static inline void ftrace_free_init_mem(void) { }
+static inline void ftrace_free_mem(struct module *mod, void *start, void *end) { }
+static inline int ftrace_lookup_symbols(const char **sorted_syms, size_t cnt, unsigned long *addrs)
+{
+ return -EOPNOTSUPP;
+}
#endif /* CONFIG_FUNCTION_TRACER */
-#ifdef CONFIG_STACK_TRACER
+struct ftrace_func_entry {
+ struct hlist_node hlist;
+ unsigned long ip;
+ unsigned long direct; /* for direct lookup only */
+};
+
+#ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
+extern int ftrace_direct_func_count;
+unsigned long ftrace_find_rec_direct(unsigned long ip);
+int register_ftrace_direct(struct ftrace_ops *ops, unsigned long addr);
+int unregister_ftrace_direct(struct ftrace_ops *ops, unsigned long addr,
+ bool free_filters);
+int modify_ftrace_direct(struct ftrace_ops *ops, unsigned long addr);
+int modify_ftrace_direct_nolock(struct ftrace_ops *ops, unsigned long addr);
+
+void ftrace_stub_direct_tramp(void);
-#define STACK_TRACE_ENTRIES 500
+#else
+struct ftrace_ops;
+# define ftrace_direct_func_count 0
+static inline unsigned long ftrace_find_rec_direct(unsigned long ip)
+{
+ return 0;
+}
+static inline int register_ftrace_direct(struct ftrace_ops *ops, unsigned long addr)
+{
+ return -ENODEV;
+}
+static inline int unregister_ftrace_direct(struct ftrace_ops *ops, unsigned long addr,
+ bool free_filters)
+{
+ return -ENODEV;
+}
+static inline int modify_ftrace_direct(struct ftrace_ops *ops, unsigned long addr)
+{
+ return -ENODEV;
+}
+static inline int modify_ftrace_direct_nolock(struct ftrace_ops *ops, unsigned long addr)
+{
+ return -ENODEV;
+}
-struct stack_trace;
+/*
+ * This must be implemented by the architecture.
+ * It is the way the ftrace direct_ops helper, when called
+ * via ftrace (because there's other callbacks besides the
+ * direct call), can inform the architecture's trampoline that this
+ * routine has a direct caller, and what the caller is.
+ *
+ * For example, in x86, it returns the direct caller
+ * callback function via the regs->orig_ax parameter.
+ * Then in the ftrace trampoline, if this is set, it makes
+ * the return from the trampoline jump to the direct caller
+ * instead of going back to the function it just traced.
+ */
+static inline void arch_ftrace_set_direct_caller(struct ftrace_regs *fregs,
+ unsigned long addr) { }
+#endif /* CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS */
-extern unsigned stack_trace_index[];
-extern struct stack_trace stack_trace_max;
-extern unsigned long stack_trace_max_size;
-extern arch_spinlock_t stack_trace_max_lock;
+#ifdef CONFIG_STACK_TRACER
extern int stack_tracer_enabled;
-void stack_trace_print(void);
-int
-stack_trace_sysctl(struct ctl_table *table, int write,
- void __user *buffer, size_t *lenp,
- loff_t *ppos);
+
+int stack_trace_sysctl(struct ctl_table *table, int write, void *buffer,
+ size_t *lenp, loff_t *ppos);
/* DO NOT MODIFY THIS VARIABLE DIRECTLY! */
DECLARE_PER_CPU(int, disable_stack_tracer);
@@ -306,8 +480,8 @@ DECLARE_PER_CPU(int, disable_stack_tracer);
*/
static inline void stack_tracer_disable(void)
{
- /* Preemption or interupts must be disabled */
- if (IS_ENABLED(CONFIG_PREEMPT_DEBUG))
+ /* Preemption or interrupts must be disabled */
+ if (IS_ENABLED(CONFIG_DEBUG_PREEMPT))
WARN_ON_ONCE(!preempt_count() || !irqs_disabled());
this_cpu_inc(disable_stack_tracer);
}
@@ -320,7 +494,7 @@ static inline void stack_tracer_disable(void)
*/
static inline void stack_tracer_enable(void)
{
- if (IS_ENABLED(CONFIG_PREEMPT_DEBUG))
+ if (IS_ENABLED(CONFIG_DEBUG_PREEMPT))
WARN_ON_ONCE(!preempt_count() || !irqs_disabled());
this_cpu_dec(disable_stack_tracer);
}
@@ -331,10 +505,8 @@ static inline void stack_tracer_enable(void) { }
#ifdef CONFIG_DYNAMIC_FTRACE
-int ftrace_arch_code_modify_prepare(void);
-int ftrace_arch_code_modify_post_process(void);
-
-struct dyn_ftrace;
+void ftrace_arch_code_modify_prepare(void);
+void ftrace_arch_code_modify_post_process(void);
enum ftrace_bug_type {
FTRACE_BUG_UNKNOWN,
@@ -357,7 +529,7 @@ struct seq_file;
extern int ftrace_text_reserved(const void *start, const void *end);
-extern int ftrace_nr_registered_ops(void);
+struct ftrace_ops *ftrace_ops_trampoline(unsigned long addr);
bool is_ftrace_trampoline(unsigned long addr);
@@ -373,9 +545,14 @@ bool is_ftrace_trampoline(unsigned long addr);
* REGS_EN - the function is set up to save regs.
* IPMODIFY - the record allows for the IP address to be changed.
* DISABLED - the record is not ready to be touched yet
+ * DIRECT - there is a direct function to call
+ * CALL_OPS - the record can use callsite-specific ops
+ * CALL_OPS_EN - the function is set up to use callsite-specific ops
+ * TOUCHED - A callback was added since boot up
+ * MODIFIED - The function had IPMODIFY or DIRECT attached to it
*
* When a new ftrace_ops is registered and wants a function to save
- * pt_regs, the rec->flag REGS is set. When the function has been
+ * pt_regs, the rec->flags REGS is set. When the function has been
* set up to save regs, the REG_EN flag is set. Once a function
* starts saving regs it will do so until all ftrace_ops are removed
* from tracing that function.
@@ -388,15 +565,18 @@ enum {
FTRACE_FL_TRAMP_EN = (1UL << 27),
FTRACE_FL_IPMODIFY = (1UL << 26),
FTRACE_FL_DISABLED = (1UL << 25),
+ FTRACE_FL_DIRECT = (1UL << 24),
+ FTRACE_FL_DIRECT_EN = (1UL << 23),
+ FTRACE_FL_CALL_OPS = (1UL << 22),
+ FTRACE_FL_CALL_OPS_EN = (1UL << 21),
+ FTRACE_FL_TOUCHED = (1UL << 20),
+ FTRACE_FL_MODIFIED = (1UL << 19),
};
-#define FTRACE_REF_MAX_SHIFT 25
-#define FTRACE_FL_BITS 7
-#define FTRACE_FL_MASKED_BITS ((1UL << FTRACE_FL_BITS) - 1)
-#define FTRACE_FL_MASK (FTRACE_FL_MASKED_BITS << FTRACE_REF_MAX_SHIFT)
+#define FTRACE_REF_MAX_SHIFT 19
#define FTRACE_REF_MAX ((1UL << FTRACE_REF_MAX_SHIFT) - 1)
-#define ftrace_rec_count(rec) ((rec)->flags & ~FTRACE_FL_MASK)
+#define ftrace_rec_count(rec) ((rec)->flags & FTRACE_REF_MAX)
struct dyn_ftrace {
unsigned long ip; /* address of mcount call-site */
@@ -404,9 +584,10 @@ struct dyn_ftrace {
struct dyn_arch_ftrace arch;
};
-int ftrace_force_update(void);
int ftrace_set_filter_ip(struct ftrace_ops *ops, unsigned long ip,
int remove, int reset);
+int ftrace_set_filter_ips(struct ftrace_ops *ops, unsigned long *ips,
+ unsigned int cnt, int remove, int reset);
int ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf,
int len, int reset);
int ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf,
@@ -422,6 +603,7 @@ enum {
FTRACE_UPDATE_TRACE_FUNC = (1 << 2),
FTRACE_START_FUNC_RET = (1 << 3),
FTRACE_STOP_FUNC_RET = (1 << 4),
+ FTRACE_MAY_SLEEP = (1 << 5),
};
/*
@@ -450,9 +632,13 @@ enum {
FTRACE_ITER_PROBE = (1 << 4),
FTRACE_ITER_MOD = (1 << 5),
FTRACE_ITER_ENABLED = (1 << 6),
+ FTRACE_ITER_TOUCHED = (1 << 7),
};
void arch_ftrace_update_code(int command);
+void arch_ftrace_update_trampoline(struct ftrace_ops *ops);
+void *arch_ftrace_trampoline_func(struct ftrace_ops *ops, struct dyn_ftrace *rec);
+void arch_ftrace_trampoline_free(struct ftrace_ops *ops);
struct ftrace_rec_iter;
@@ -466,8 +652,8 @@ struct dyn_ftrace *ftrace_rec_iter_record(struct ftrace_rec_iter *iter);
iter = ftrace_rec_iter_next(iter))
-int ftrace_update_record(struct dyn_ftrace *rec, int enable);
-int ftrace_test_record(struct dyn_ftrace *rec, int enable);
+int ftrace_update_record(struct dyn_ftrace *rec, bool enable);
+int ftrace_test_record(struct dyn_ftrace *rec, bool enable);
void ftrace_run_stop_machine(int command);
unsigned long ftrace_location(unsigned long ip);
unsigned long ftrace_location_range(unsigned long start, unsigned long end);
@@ -538,7 +724,7 @@ static inline int ftrace_disable_ftrace_graph_caller(void) { return 0; }
/**
* ftrace_make_nop - convert code into nop
* @mod: module structure if called by module load initialization
- * @rec: the mcount call site record
+ * @rec: the call site record (e.g. mcount/fentry)
* @addr: the address that the call site should be calling
*
* This is a very sensitive operation and great care needs
@@ -560,8 +746,53 @@ extern int ftrace_make_nop(struct module *mod,
struct dyn_ftrace *rec, unsigned long addr);
/**
+ * ftrace_need_init_nop - return whether nop call sites should be initialized
+ *
+ * Normally the compiler's -mnop-mcount generates suitable nops, so we don't
+ * need to call ftrace_init_nop() if the code is built with that flag.
+ * Architectures where this is not always the case may define their own
+ * condition.
+ *
+ * Return must be:
+ * 0 if ftrace_init_nop() should be called
+ * Nonzero if ftrace_init_nop() should not be called
+ */
+
+#ifndef ftrace_need_init_nop
+#define ftrace_need_init_nop() (!__is_defined(CC_USING_NOP_MCOUNT))
+#endif
+
+/**
+ * ftrace_init_nop - initialize a nop call site
+ * @mod: module structure if called by module load initialization
+ * @rec: the call site record (e.g. mcount/fentry)
+ *
+ * This is a very sensitive operation and great care needs
+ * to be taken by the arch. The operation should carefully
+ * read the location, check to see if what is read is indeed
+ * what we expect it to be, and then on success of the compare,
+ * it should write to the location.
+ *
+ * The code segment at @rec->ip should contain the contents created by
+ * the compiler
+ *
+ * Return must be:
+ * 0 on success
+ * -EFAULT on error reading the location
+ * -EINVAL on a failed compare of the contents
+ * -EPERM on error writing to the location
+ * Any other value will be considered a failure.
+ */
+#ifndef ftrace_init_nop
+static inline int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec)
+{
+ return ftrace_make_nop(mod, rec, MCOUNT_ADDR);
+}
+#endif
+
+/**
* ftrace_make_call - convert a nop call site into a call to addr
- * @rec: the mcount call site record
+ * @rec: the call site record (e.g. mcount/fentry)
* @addr: the address that the call site should call
*
* This is a very sensitive operation and great care needs
@@ -581,10 +812,11 @@ extern int ftrace_make_nop(struct module *mod,
*/
extern int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr);
-#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
+#if defined(CONFIG_DYNAMIC_FTRACE_WITH_REGS) || \
+ defined(CONFIG_DYNAMIC_FTRACE_WITH_CALL_OPS)
/**
* ftrace_modify_call - convert from one addr to another (no nop)
- * @rec: the mcount call site record
+ * @rec: the call site record (e.g. mcount/fentry)
* @old_addr: the address expected to be currently called to
* @addr: the address to change to
*
@@ -594,6 +826,9 @@ extern int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr);
* what we expect it to be, and then on success of the compare,
* it should write to the location.
*
+ * When using call ops, this is called when the associated ops change, even
+ * when (addr == old_addr).
+ *
* The code segment at @rec->ip should be a caller to @old_addr
*
* Return must be:
@@ -626,7 +861,6 @@ extern void ftrace_disable_daemon(void);
extern void ftrace_enable_daemon(void);
#else /* CONFIG_DYNAMIC_FTRACE */
static inline int skip_trace(unsigned long ip) { return 0; }
-static inline int ftrace_force_update(void) { return 0; }
static inline void ftrace_disable_daemon(void) { }
static inline void ftrace_enable_daemon(void) { }
static inline void ftrace_module_init(struct module *mod) { }
@@ -649,6 +883,7 @@ static inline unsigned long ftrace_location(unsigned long ip)
#define ftrace_regex_open(ops, flag, inod, file) ({ -ENODEV; })
#define ftrace_set_early_filter(ops, buf, enable) do { } while (0)
#define ftrace_set_filter_ip(ops, ip, remove, reset) ({ -ENODEV; })
+#define ftrace_set_filter_ips(ops, ips, cnt, remove, reset) ({ -ENODEV; })
#define ftrace_set_filter(ops, buf, len, reset) ({ -ENODEV; })
#define ftrace_set_notrace(ops, buf, len, reset) ({ -ENODEV; })
#define ftrace_free_filter(ops) do { } while (0)
@@ -667,6 +902,15 @@ static inline bool is_ftrace_trampoline(unsigned long addr)
}
#endif /* CONFIG_DYNAMIC_FTRACE */
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+#ifndef ftrace_graph_func
+#define ftrace_graph_func ftrace_stub
+#define FTRACE_OPS_GRAPH_STUB FTRACE_OPS_FL_STUB
+#else
+#define FTRACE_OPS_GRAPH_STUB 0
+#endif
+#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
+
/* totally disable ftrace - can not re-enable after this */
void ftrace_kill(void);
@@ -722,7 +966,7 @@ static inline void __ftrace_enabled_restore(int enabled)
#define CALLER_ADDR5 ((unsigned long)ftrace_return_address(5))
#define CALLER_ADDR6 ((unsigned long)ftrace_return_address(6))
-static inline unsigned long get_lock_parent_ip(void)
+static __always_inline unsigned long get_lock_parent_ip(void)
{
unsigned long addr = CALLER_ADDR0;
@@ -734,15 +978,7 @@ static inline unsigned long get_lock_parent_ip(void)
return CALLER_ADDR2;
}
-#ifdef CONFIG_IRQSOFF_TRACER
- extern void time_hardirqs_on(unsigned long a0, unsigned long a1);
- extern void time_hardirqs_off(unsigned long a0, unsigned long a1);
-#else
- static inline void time_hardirqs_on(unsigned long a0, unsigned long a1) { }
- static inline void time_hardirqs_off(unsigned long a0, unsigned long a1) { }
-#endif
-
-#ifdef CONFIG_PREEMPT_TRACER
+#ifdef CONFIG_TRACE_PREEMPT_TOGGLE
extern void trace_preempt_on(unsigned long a0, unsigned long a1);
extern void trace_preempt_off(unsigned long a0, unsigned long a1);
#else
@@ -756,6 +992,11 @@ static inline unsigned long get_lock_parent_ip(void)
#ifdef CONFIG_FTRACE_MCOUNT_RECORD
extern void ftrace_init(void);
+#ifdef CC_USING_PATCHABLE_FUNCTION_ENTRY
+#define FTRACE_CALLSITE_SECTION "__patchable_function_entries"
+#else
+#define FTRACE_CALLSITE_SECTION "__mcount_loc"
+#endif
#else
static inline void ftrace_init(void) { }
#endif
@@ -777,21 +1018,25 @@ struct ftrace_graph_ent {
*/
struct ftrace_graph_ret {
unsigned long func; /* Current function */
+ int depth;
/* Number of functions that overran the depth limit for current task */
- unsigned long overrun;
+ unsigned int overrun;
unsigned long long calltime;
unsigned long long rettime;
- int depth;
} __packed;
/* Type of the callback handlers for tracing function graph*/
typedef void (*trace_func_graph_ret_t)(struct ftrace_graph_ret *); /* return */
typedef int (*trace_func_graph_ent_t)(struct ftrace_graph_ent *); /* entry */
+extern int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace);
+
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
-/* for init task */
-#define INIT_FTRACE_GRAPH .ret_stack = NULL,
+struct fgraph_ops {
+ trace_func_graph_ent_t entryfunc;
+ trace_func_graph_ret_t retfunc;
+};
/*
* Stack of return addresses for functions
@@ -821,8 +1066,11 @@ struct ftrace_ret_stack {
extern void return_to_handler(void);
extern int
-ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth,
- unsigned long frame_pointer, unsigned long *retp);
+function_graph_enter(unsigned long ret, unsigned long func,
+ unsigned long frame_pointer, unsigned long *retp);
+
+struct ftrace_ret_stack *
+ftrace_graph_get_ret_stack(struct task_struct *task, int idx);
unsigned long ftrace_graph_ret_addr(struct task_struct *task, int *idx,
unsigned long ret, unsigned long *retp);
@@ -834,30 +1082,36 @@ unsigned long ftrace_graph_ret_addr(struct task_struct *task, int *idx,
*/
#define __notrace_funcgraph notrace
-#define FTRACE_NOTRACE_DEPTH 65536
#define FTRACE_RETFUNC_DEPTH 50
#define FTRACE_RETSTACK_ALLOC_SIZE 32
-extern int register_ftrace_graph(trace_func_graph_ret_t retfunc,
- trace_func_graph_ent_t entryfunc);
-extern bool ftrace_graph_is_dead(void);
+extern int register_ftrace_graph(struct fgraph_ops *ops);
+extern void unregister_ftrace_graph(struct fgraph_ops *ops);
+
+/**
+ * ftrace_graph_is_dead - returns true if ftrace_graph_stop() was called
+ *
+ * ftrace_graph_stop() is called when a severe error is detected in
+ * the function graph tracing. This function is called by the critical
+ * paths of function graph to keep those paths from doing any more harm.
+ */
+DECLARE_STATIC_KEY_FALSE(kill_ftrace_graph);
+
+static inline bool ftrace_graph_is_dead(void)
+{
+ return static_branch_unlikely(&kill_ftrace_graph);
+}
+
extern void ftrace_graph_stop(void);
/* The current handlers in use */
extern trace_func_graph_ret_t ftrace_graph_return;
extern trace_func_graph_ent_t ftrace_graph_entry;
-extern void unregister_ftrace_graph(void);
-
extern void ftrace_graph_init_task(struct task_struct *t);
extern void ftrace_graph_exit_task(struct task_struct *t);
extern void ftrace_graph_init_idle_task(struct task_struct *t, int cpu);
-static inline int task_curr_ret_stack(struct task_struct *t)
-{
- return t->curr_ret_stack;
-}
-
static inline void pause_graph_tracing(void)
{
atomic_inc(&current->tracing_graph_pause);
@@ -870,23 +1124,14 @@ static inline void unpause_graph_tracing(void)
#else /* !CONFIG_FUNCTION_GRAPH_TRACER */
#define __notrace_funcgraph
-#define INIT_FTRACE_GRAPH
static inline void ftrace_graph_init_task(struct task_struct *t) { }
static inline void ftrace_graph_exit_task(struct task_struct *t) { }
static inline void ftrace_graph_init_idle_task(struct task_struct *t, int cpu) { }
-static inline int register_ftrace_graph(trace_func_graph_ret_t retfunc,
- trace_func_graph_ent_t entryfunc)
-{
- return -1;
-}
-static inline void unregister_ftrace_graph(void) { }
-
-static inline int task_curr_ret_stack(struct task_struct *tsk)
-{
- return -1;
-}
+/* Define as macros as fgraph_ops may not be defined */
+#define register_ftrace_graph(ops) ({ -1; })
+#define unregister_ftrace_graph(ops) do { } while (0)
static inline unsigned long
ftrace_graph_ret_addr(struct task_struct *task, int *idx, unsigned long ret,
@@ -900,47 +1145,6 @@ static inline void unpause_graph_tracing(void) { }
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
#ifdef CONFIG_TRACING
-
-/* flags for current->trace */
-enum {
- TSK_TRACE_FL_TRACE_BIT = 0,
- TSK_TRACE_FL_GRAPH_BIT = 1,
-};
-enum {
- TSK_TRACE_FL_TRACE = 1 << TSK_TRACE_FL_TRACE_BIT,
- TSK_TRACE_FL_GRAPH = 1 << TSK_TRACE_FL_GRAPH_BIT,
-};
-
-static inline void set_tsk_trace_trace(struct task_struct *tsk)
-{
- set_bit(TSK_TRACE_FL_TRACE_BIT, &tsk->trace);
-}
-
-static inline void clear_tsk_trace_trace(struct task_struct *tsk)
-{
- clear_bit(TSK_TRACE_FL_TRACE_BIT, &tsk->trace);
-}
-
-static inline int test_tsk_trace_trace(struct task_struct *tsk)
-{
- return tsk->trace & TSK_TRACE_FL_TRACE;
-}
-
-static inline void set_tsk_trace_graph(struct task_struct *tsk)
-{
- set_bit(TSK_TRACE_FL_GRAPH_BIT, &tsk->trace);
-}
-
-static inline void clear_tsk_trace_graph(struct task_struct *tsk)
-{
- clear_bit(TSK_TRACE_FL_GRAPH_BIT, &tsk->trace);
-}
-
-static inline int test_tsk_trace_graph(struct task_struct *tsk)
-{
- return tsk->trace & TSK_TRACE_FL_GRAPH;
-}
-
enum ftrace_dump_mode;
extern enum ftrace_dump_mode ftrace_dump_on_oops;
@@ -949,22 +1153,13 @@ extern int tracepoint_printk;
extern void disable_trace_on_warning(void);
extern int __disable_trace_on_warning;
-#ifdef CONFIG_PREEMPT
-#define INIT_TRACE_RECURSION .trace_recursion = 0,
-#endif
-
int tracepoint_printk_sysctl(struct ctl_table *table, int write,
- void __user *buffer, size_t *lenp,
- loff_t *ppos);
+ void *buffer, size_t *lenp, loff_t *ppos);
#else /* CONFIG_TRACING */
static inline void disable_trace_on_warning(void) { }
#endif /* CONFIG_TRACING */
-#ifndef INIT_TRACE_RECURSION
-#define INIT_TRACE_RECURSION
-#endif
-
#ifdef CONFIG_FTRACE_SYSCALLS
unsigned long arch_syscall_addr(int nr);
diff --git a/include/linux/ftrace_irq.h b/include/linux/ftrace_irq.h
index 4ec2c9b205f2..f6faa31289ba 100644
--- a/include/linux/ftrace_irq.h
+++ b/include/linux/ftrace_irq.h
@@ -1,36 +1,39 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_FTRACE_IRQ_H
#define _LINUX_FTRACE_IRQ_H
-
-#ifdef CONFIG_FTRACE_NMI_ENTER
-extern void arch_ftrace_nmi_enter(void);
-extern void arch_ftrace_nmi_exit(void);
-#else
-static inline void arch_ftrace_nmi_enter(void) { }
-static inline void arch_ftrace_nmi_exit(void) { }
-#endif
-
#ifdef CONFIG_HWLAT_TRACER
extern bool trace_hwlat_callback_enabled;
extern void trace_hwlat_callback(bool enter);
#endif
+#ifdef CONFIG_OSNOISE_TRACER
+extern bool trace_osnoise_callback_enabled;
+extern void trace_osnoise_callback(bool enter);
+#endif
+
static inline void ftrace_nmi_enter(void)
{
#ifdef CONFIG_HWLAT_TRACER
if (trace_hwlat_callback_enabled)
trace_hwlat_callback(true);
#endif
- arch_ftrace_nmi_enter();
+#ifdef CONFIG_OSNOISE_TRACER
+ if (trace_osnoise_callback_enabled)
+ trace_osnoise_callback(true);
+#endif
}
static inline void ftrace_nmi_exit(void)
{
- arch_ftrace_nmi_exit();
#ifdef CONFIG_HWLAT_TRACER
if (trace_hwlat_callback_enabled)
trace_hwlat_callback(false);
#endif
+#ifdef CONFIG_OSNOISE_TRACER
+ if (trace_osnoise_callback_enabled)
+ trace_osnoise_callback(false);
+#endif
}
#endif /* _LINUX_FTRACE_IRQ_H */
diff --git a/include/linux/futex.h b/include/linux/futex.h
index 7c5b694864cd..b70df27d7e85 100644
--- a/include/linux/futex.h
+++ b/include/linux/futex.h
@@ -1,19 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_FUTEX_H
#define _LINUX_FUTEX_H
+#include <linux/sched.h>
#include <linux/ktime.h>
+
#include <uapi/linux/futex.h>
struct inode;
struct mm_struct;
struct task_struct;
-long do_futex(u32 __user *uaddr, int op, u32 val, ktime_t *timeout,
- u32 __user *uaddr2, u32 val2, u32 val3);
-
-extern int
-handle_futex_death(u32 __user *uaddr, struct task_struct *curr, int pi);
-
/*
* Futexes are matched on equal values of this key.
* The key type depends on whether it's a shared or private mapping.
@@ -34,38 +31,63 @@ handle_futex_death(u32 __user *uaddr, struct task_struct *curr, int pi);
union futex_key {
struct {
+ u64 i_seq;
unsigned long pgoff;
- struct inode *inode;
- int offset;
+ unsigned int offset;
} shared;
struct {
+ union {
+ struct mm_struct *mm;
+ u64 __tmp;
+ };
unsigned long address;
- struct mm_struct *mm;
- int offset;
+ unsigned int offset;
} private;
struct {
+ u64 ptr;
unsigned long word;
- void *ptr;
- int offset;
+ unsigned int offset;
} both;
};
-#define FUTEX_KEY_INIT (union futex_key) { .both = { .ptr = NULL } }
+#define FUTEX_KEY_INIT (union futex_key) { .both = { .ptr = 0ULL } }
#ifdef CONFIG_FUTEX
-extern void exit_robust_list(struct task_struct *curr);
-extern void exit_pi_state_list(struct task_struct *curr);
-#ifdef CONFIG_HAVE_FUTEX_CMPXCHG
-#define futex_cmpxchg_enabled 1
-#else
-extern int futex_cmpxchg_enabled;
-#endif
-#else
-static inline void exit_robust_list(struct task_struct *curr)
+enum {
+ FUTEX_STATE_OK,
+ FUTEX_STATE_EXITING,
+ FUTEX_STATE_DEAD,
+};
+
+static inline void futex_init_task(struct task_struct *tsk)
{
+ tsk->robust_list = NULL;
+#ifdef CONFIG_COMPAT
+ tsk->compat_robust_list = NULL;
+#endif
+ INIT_LIST_HEAD(&tsk->pi_state_list);
+ tsk->pi_state_cache = NULL;
+ tsk->futex_state = FUTEX_STATE_OK;
+ mutex_init(&tsk->futex_exit_mutex);
}
-static inline void exit_pi_state_list(struct task_struct *curr)
+
+void futex_exit_recursive(struct task_struct *tsk);
+void futex_exit_release(struct task_struct *tsk);
+void futex_exec_release(struct task_struct *tsk);
+
+long do_futex(u32 __user *uaddr, int op, u32 val, ktime_t *timeout,
+ u32 __user *uaddr2, u32 val2, u32 val3);
+#else
+static inline void futex_init_task(struct task_struct *tsk) { }
+static inline void futex_exit_recursive(struct task_struct *tsk) { }
+static inline void futex_exit_release(struct task_struct *tsk) { }
+static inline void futex_exec_release(struct task_struct *tsk) { }
+static inline long do_futex(u32 __user *uaddr, int op, u32 val,
+ ktime_t *timeout, u32 __user *uaddr2,
+ u32 val2, u32 val3)
{
+ return -EINVAL;
}
#endif
+
#endif
diff --git a/include/linux/fwnode.h b/include/linux/fwnode.h
index 50893a1646cf..5700451b300f 100644
--- a/include/linux/fwnode.h
+++ b/include/linux/fwnode.h
@@ -1,35 +1,65 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* fwnode.h - Firmware device node object handle type definition.
*
* Copyright (C) 2015, Intel Corporation
* Author: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef _LINUX_FWNODE_H_
#define _LINUX_FWNODE_H_
#include <linux/types.h>
-
-enum fwnode_type {
- FWNODE_INVALID = 0,
- FWNODE_OF,
- FWNODE_ACPI,
- FWNODE_ACPI_DATA,
- FWNODE_ACPI_STATIC,
- FWNODE_PDATA,
- FWNODE_IRQCHIP
-};
+#include <linux/list.h>
+#include <linux/bits.h>
+#include <linux/err.h>
struct fwnode_operations;
+struct device;
+
+/*
+ * fwnode flags
+ *
+ * LINKS_ADDED: The fwnode has already be parsed to add fwnode links.
+ * NOT_DEVICE: The fwnode will never be populated as a struct device.
+ * INITIALIZED: The hardware corresponding to fwnode has been initialized.
+ * NEEDS_CHILD_BOUND_ON_ADD: For this fwnode/device to probe successfully, its
+ * driver needs its child devices to be bound with
+ * their respective drivers as soon as they are
+ * added.
+ * BEST_EFFORT: The fwnode/device needs to probe early and might be missing some
+ * suppliers. Only enforce ordering with suppliers that have
+ * drivers.
+ */
+#define FWNODE_FLAG_LINKS_ADDED BIT(0)
+#define FWNODE_FLAG_NOT_DEVICE BIT(1)
+#define FWNODE_FLAG_INITIALIZED BIT(2)
+#define FWNODE_FLAG_NEEDS_CHILD_BOUND_ON_ADD BIT(3)
+#define FWNODE_FLAG_BEST_EFFORT BIT(4)
+#define FWNODE_FLAG_VISITED BIT(5)
struct fwnode_handle {
- enum fwnode_type type;
struct fwnode_handle *secondary;
const struct fwnode_operations *ops;
+ struct device *dev;
+ struct list_head suppliers;
+ struct list_head consumers;
+ u8 flags;
+};
+
+/*
+ * fwnode link flags
+ *
+ * CYCLE: The fwnode link is part of a cycle. Don't defer probe.
+ */
+#define FWLINK_FLAG_CYCLE BIT(0)
+
+struct fwnode_link {
+ struct fwnode_handle *supplier;
+ struct list_head s_hook;
+ struct fwnode_handle *consumer;
+ struct list_head c_hook;
+ u8 flags;
};
/**
@@ -44,65 +74,109 @@ struct fwnode_endpoint {
const struct fwnode_handle *local_fwnode;
};
+/*
+ * ports and endpoints defined as software_nodes should all follow a common
+ * naming scheme; use these macros to ensure commonality.
+ */
+#define SWNODE_GRAPH_PORT_NAME_FMT "port@%u"
+#define SWNODE_GRAPH_ENDPOINT_NAME_FMT "endpoint@%u"
+
+#define NR_FWNODE_REFERENCE_ARGS 8
+
+/**
+ * struct fwnode_reference_args - Fwnode reference with additional arguments
+ * @fwnode:- A reference to the base fwnode
+ * @nargs: Number of elements in @args array
+ * @args: Integer arguments on the fwnode
+ */
+struct fwnode_reference_args {
+ struct fwnode_handle *fwnode;
+ unsigned int nargs;
+ u64 args[NR_FWNODE_REFERENCE_ARGS];
+};
+
/**
* struct fwnode_operations - Operations for fwnode interface
* @get: Get a reference to an fwnode.
* @put: Put a reference to an fwnode.
+ * @device_is_available: Return true if the device is available.
+ * @device_get_match_data: Return the device driver match data.
* @property_present: Return true if a property is present.
- * @property_read_integer_array: Read an array of integer properties. Return
- * zero on success, a negative error code
- * otherwise.
+ * @property_read_int_array: Read an array of integer properties. Return zero on
+ * success, a negative error code otherwise.
* @property_read_string_array: Read an array of string properties. Return zero
* on success, a negative error code otherwise.
+ * @get_name: Return the name of an fwnode.
+ * @get_name_prefix: Get a prefix for a node (for printing purposes).
* @get_parent: Return the parent of an fwnode.
* @get_next_child_node: Return the next child node in an iteration.
* @get_named_child_node: Return a child node with a given name.
+ * @get_reference_args: Return a reference pointed to by a property, with args
* @graph_get_next_endpoint: Return an endpoint node in an iteration.
* @graph_get_remote_endpoint: Return the remote endpoint node of a local
* endpoint node.
* @graph_get_port_parent: Return the parent node of a port node.
* @graph_parse_endpoint: Parse endpoint for port and endpoint id.
+ * @add_links: Create fwnode links to all the suppliers of the fwnode. Return
+ * zero on success, a negative error code otherwise.
*/
struct fwnode_operations {
- void (*get)(struct fwnode_handle *fwnode);
+ struct fwnode_handle *(*get)(struct fwnode_handle *fwnode);
void (*put)(struct fwnode_handle *fwnode);
- bool (*device_is_available)(struct fwnode_handle *fwnode);
- bool (*property_present)(struct fwnode_handle *fwnode,
+ bool (*device_is_available)(const struct fwnode_handle *fwnode);
+ const void *(*device_get_match_data)(const struct fwnode_handle *fwnode,
+ const struct device *dev);
+ bool (*device_dma_supported)(const struct fwnode_handle *fwnode);
+ enum dev_dma_attr
+ (*device_get_dma_attr)(const struct fwnode_handle *fwnode);
+ bool (*property_present)(const struct fwnode_handle *fwnode,
const char *propname);
- int (*property_read_int_array)(struct fwnode_handle *fwnode,
+ int (*property_read_int_array)(const struct fwnode_handle *fwnode,
const char *propname,
unsigned int elem_size, void *val,
size_t nval);
- int (*property_read_string_array)(struct fwnode_handle *fwnode_handle,
- const char *propname,
- const char **val, size_t nval);
- struct fwnode_handle *(*get_parent)(struct fwnode_handle *fwnode);
+ int
+ (*property_read_string_array)(const struct fwnode_handle *fwnode_handle,
+ const char *propname, const char **val,
+ size_t nval);
+ const char *(*get_name)(const struct fwnode_handle *fwnode);
+ const char *(*get_name_prefix)(const struct fwnode_handle *fwnode);
+ struct fwnode_handle *(*get_parent)(const struct fwnode_handle *fwnode);
struct fwnode_handle *
- (*get_next_child_node)(struct fwnode_handle *fwnode,
+ (*get_next_child_node)(const struct fwnode_handle *fwnode,
struct fwnode_handle *child);
struct fwnode_handle *
- (*get_named_child_node)(struct fwnode_handle *fwnode, const char *name);
+ (*get_named_child_node)(const struct fwnode_handle *fwnode,
+ const char *name);
+ int (*get_reference_args)(const struct fwnode_handle *fwnode,
+ const char *prop, const char *nargs_prop,
+ unsigned int nargs, unsigned int index,
+ struct fwnode_reference_args *args);
struct fwnode_handle *
- (*graph_get_next_endpoint)(struct fwnode_handle *fwnode,
+ (*graph_get_next_endpoint)(const struct fwnode_handle *fwnode,
struct fwnode_handle *prev);
struct fwnode_handle *
- (*graph_get_remote_endpoint)(struct fwnode_handle *fwnode);
+ (*graph_get_remote_endpoint)(const struct fwnode_handle *fwnode);
struct fwnode_handle *
(*graph_get_port_parent)(struct fwnode_handle *fwnode);
- int (*graph_parse_endpoint)(struct fwnode_handle *fwnode,
+ int (*graph_parse_endpoint)(const struct fwnode_handle *fwnode,
struct fwnode_endpoint *endpoint);
+ void __iomem *(*iomap)(struct fwnode_handle *fwnode, int index);
+ int (*irq_get)(const struct fwnode_handle *fwnode, unsigned int index);
+ int (*add_links)(struct fwnode_handle *fwnode);
};
-#define fwnode_has_op(fwnode, op) \
- ((fwnode) && (fwnode)->ops && (fwnode)->ops->op)
+#define fwnode_has_op(fwnode, op) \
+ (!IS_ERR_OR_NULL(fwnode) && (fwnode)->ops && (fwnode)->ops->op)
+
#define fwnode_call_int_op(fwnode, op, ...) \
- (fwnode ? (fwnode_has_op(fwnode, op) ? \
- (fwnode)->ops->op(fwnode, ## __VA_ARGS__) : -ENXIO) : \
- -EINVAL)
-#define fwnode_call_bool_op(fwnode, op, ...) \
- (fwnode ? (fwnode_has_op(fwnode, op) ? \
- (fwnode)->ops->op(fwnode, ## __VA_ARGS__) : false) : \
- false)
+ (fwnode_has_op(fwnode, op) ? \
+ (fwnode)->ops->op(fwnode, ## __VA_ARGS__) : (IS_ERR_OR_NULL(fwnode) ? -EINVAL : -ENXIO))
+
+#define fwnode_call_bool_op(fwnode, op, ...) \
+ (fwnode_has_op(fwnode, op) ? \
+ (fwnode)->ops->op(fwnode, ## __VA_ARGS__) : false)
+
#define fwnode_call_ptr_op(fwnode, op, ...) \
(fwnode_has_op(fwnode, op) ? \
(fwnode)->ops->op(fwnode, ## __VA_ARGS__) : NULL)
@@ -111,5 +185,31 @@ struct fwnode_operations {
if (fwnode_has_op(fwnode, op)) \
(fwnode)->ops->op(fwnode, ## __VA_ARGS__); \
} while (false)
+#define get_dev_from_fwnode(fwnode) get_device((fwnode)->dev)
+
+static inline void fwnode_init(struct fwnode_handle *fwnode,
+ const struct fwnode_operations *ops)
+{
+ fwnode->ops = ops;
+ INIT_LIST_HEAD(&fwnode->consumers);
+ INIT_LIST_HEAD(&fwnode->suppliers);
+}
+
+static inline void fwnode_dev_initialized(struct fwnode_handle *fwnode,
+ bool initialized)
+{
+ if (IS_ERR_OR_NULL(fwnode))
+ return;
+
+ if (initialized)
+ fwnode->flags |= FWNODE_FLAG_INITIALIZED;
+ else
+ fwnode->flags &= ~FWNODE_FLAG_INITIALIZED;
+}
+
+extern bool fw_devlink_is_strict(void);
+int fwnode_link_add(struct fwnode_handle *con, struct fwnode_handle *sup);
+void fwnode_links_purge(struct fwnode_handle *fwnode);
+void fw_devlink_purge_absent_suppliers(struct fwnode_handle *fwnode);
#endif
diff --git a/include/linux/fwnode_mdio.h b/include/linux/fwnode_mdio.h
new file mode 100644
index 000000000000..faf603c48c86
--- /dev/null
+++ b/include/linux/fwnode_mdio.h
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * FWNODE helper for the MDIO (Ethernet PHY) API
+ */
+
+#ifndef __LINUX_FWNODE_MDIO_H
+#define __LINUX_FWNODE_MDIO_H
+
+#include <linux/phy.h>
+
+#if IS_ENABLED(CONFIG_FWNODE_MDIO)
+int fwnode_mdiobus_phy_device_register(struct mii_bus *mdio,
+ struct phy_device *phy,
+ struct fwnode_handle *child, u32 addr);
+
+int fwnode_mdiobus_register_phy(struct mii_bus *bus,
+ struct fwnode_handle *child, u32 addr);
+
+#else /* CONFIG_FWNODE_MDIO */
+int fwnode_mdiobus_phy_device_register(struct mii_bus *mdio,
+ struct phy_device *phy,
+ struct fwnode_handle *child, u32 addr)
+{
+ return -EINVAL;
+}
+
+static inline int fwnode_mdiobus_register_phy(struct mii_bus *bus,
+ struct fwnode_handle *child,
+ u32 addr)
+{
+ return -EINVAL;
+}
+#endif
+
+#endif /* __LINUX_FWNODE_MDIO_H */
diff --git a/include/linux/gameport.h b/include/linux/gameport.h
index bb7de09e8d57..8c2f00018e89 100644
--- a/include/linux/gameport.h
+++ b/include/linux/gameport.h
@@ -1,9 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 1999-2002 Vojtech Pavlik
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
*/
#ifndef _GAMEPORT_H
#define _GAMEPORT_H
@@ -113,7 +110,7 @@ static inline void gameport_free_port(struct gameport *gameport)
static inline void gameport_set_name(struct gameport *gameport, const char *name)
{
- strlcpy(gameport->name, name, sizeof(gameport->name));
+ strscpy(gameport->name, name, sizeof(gameport->name));
}
/*
diff --git a/include/linux/gcd.h b/include/linux/gcd.h
index 69f5e8a01bad..cb572677fd7f 100644
--- a/include/linux/gcd.h
+++ b/include/linux/gcd.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _GCD_H
#define _GCD_H
diff --git a/include/linux/genalloc.h b/include/linux/genalloc.h
index 29d4385903d4..0bd581003cd5 100644
--- a/include/linux/genalloc.h
+++ b/include/linux/genalloc.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Basic general purpose allocator for managing special purpose
* memory, for example, memory that is not managed by the regular
@@ -21,9 +22,6 @@
* the allocator can NOT be used in NMI handler. So code uses the
* allocator in NMI handler should depend on
* CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG.
- *
- * This source code is licensed under the GNU General Public License,
- * Version 2. See the file COPYING for more details.
*/
@@ -32,24 +30,27 @@
#include <linux/types.h>
#include <linux/spinlock_types.h>
+#include <linux/atomic.h>
struct device;
struct device_node;
struct gen_pool;
/**
- * Allocation callback function type definition
+ * typedef genpool_algo_t: Allocation callback function type definition
* @map: Pointer to bitmap
* @size: The bitmap size in bits
* @start: The bitnumber to start searching at
* @nr: The number of zeroed bits we're looking for
- * @data: optional additional data used by @genpool_algo_t
+ * @data: optional additional data used by the callback
+ * @pool: the pool being allocated from
*/
typedef unsigned long (*genpool_algo_t)(unsigned long *map,
unsigned long size,
unsigned long start,
unsigned int nr,
- void *data, struct gen_pool *pool);
+ void *data, struct gen_pool *pool,
+ unsigned long start_addr);
/*
* General purpose special memory pool descriptor.
@@ -70,11 +71,12 @@ struct gen_pool {
*/
struct gen_pool_chunk {
struct list_head next_chunk; /* next chunk in pool */
- atomic_t avail;
+ atomic_long_t avail;
phys_addr_t phys_addr; /* physical starting address of memory chunk */
+ void *owner; /* private data to retrieve at alloc time */
unsigned long start_addr; /* start address of memory chunk */
unsigned long end_addr; /* end address of memory chunk (inclusive) */
- unsigned long bits[0]; /* bitmap for allocating memory chunk */
+ unsigned long bits[]; /* bitmap for allocating memory chunk */
};
/*
@@ -93,8 +95,15 @@ struct genpool_data_fixed {
extern struct gen_pool *gen_pool_create(int, int);
extern phys_addr_t gen_pool_virt_to_phys(struct gen_pool *pool, unsigned long);
-extern int gen_pool_add_virt(struct gen_pool *, unsigned long, phys_addr_t,
- size_t, int);
+extern int gen_pool_add_owner(struct gen_pool *, unsigned long, phys_addr_t,
+ size_t, int, void *);
+
+static inline int gen_pool_add_virt(struct gen_pool *pool, unsigned long addr,
+ phys_addr_t phys, size_t size, int nid)
+{
+ return gen_pool_add_owner(pool, addr, phys, size, nid, NULL);
+}
+
/**
* gen_pool_add - add a new chunk of special memory to the pool
* @pool: pool to add new memory chunk to
@@ -113,12 +122,56 @@ static inline int gen_pool_add(struct gen_pool *pool, unsigned long addr,
return gen_pool_add_virt(pool, addr, -1, size, nid);
}
extern void gen_pool_destroy(struct gen_pool *);
-extern unsigned long gen_pool_alloc(struct gen_pool *, size_t);
-extern unsigned long gen_pool_alloc_algo(struct gen_pool *, size_t,
- genpool_algo_t algo, void *data);
+unsigned long gen_pool_alloc_algo_owner(struct gen_pool *pool, size_t size,
+ genpool_algo_t algo, void *data, void **owner);
+
+static inline unsigned long gen_pool_alloc_owner(struct gen_pool *pool,
+ size_t size, void **owner)
+{
+ return gen_pool_alloc_algo_owner(pool, size, pool->algo, pool->data,
+ owner);
+}
+
+static inline unsigned long gen_pool_alloc_algo(struct gen_pool *pool,
+ size_t size, genpool_algo_t algo, void *data)
+{
+ return gen_pool_alloc_algo_owner(pool, size, algo, data, NULL);
+}
+
+/**
+ * gen_pool_alloc - allocate special memory from the pool
+ * @pool: pool to allocate from
+ * @size: number of bytes to allocate from the pool
+ *
+ * Allocate the requested number of bytes from the specified pool.
+ * Uses the pool allocation function (with first-fit algorithm by default).
+ * Can not be used in NMI handler on architectures without
+ * NMI-safe cmpxchg implementation.
+ */
+static inline unsigned long gen_pool_alloc(struct gen_pool *pool, size_t size)
+{
+ return gen_pool_alloc_algo(pool, size, pool->algo, pool->data);
+}
+
extern void *gen_pool_dma_alloc(struct gen_pool *pool, size_t size,
dma_addr_t *dma);
-extern void gen_pool_free(struct gen_pool *, unsigned long, size_t);
+extern void *gen_pool_dma_alloc_algo(struct gen_pool *pool, size_t size,
+ dma_addr_t *dma, genpool_algo_t algo, void *data);
+extern void *gen_pool_dma_alloc_align(struct gen_pool *pool, size_t size,
+ dma_addr_t *dma, int align);
+extern void *gen_pool_dma_zalloc(struct gen_pool *pool, size_t size, dma_addr_t *dma);
+extern void *gen_pool_dma_zalloc_algo(struct gen_pool *pool, size_t size,
+ dma_addr_t *dma, genpool_algo_t algo, void *data);
+extern void *gen_pool_dma_zalloc_align(struct gen_pool *pool, size_t size,
+ dma_addr_t *dma, int align);
+extern void gen_pool_free_owner(struct gen_pool *pool, unsigned long addr,
+ size_t size, void **owner);
+static inline void gen_pool_free(struct gen_pool *pool, unsigned long addr,
+ size_t size)
+{
+ gen_pool_free_owner(pool, addr, size, NULL);
+}
+
extern void gen_pool_for_each_chunk(struct gen_pool *,
void (*)(struct gen_pool *, struct gen_pool_chunk *, void *), void *);
extern size_t gen_pool_avail(struct gen_pool *);
@@ -129,31 +182,31 @@ extern void gen_pool_set_algo(struct gen_pool *pool, genpool_algo_t algo,
extern unsigned long gen_pool_first_fit(unsigned long *map, unsigned long size,
unsigned long start, unsigned int nr, void *data,
- struct gen_pool *pool);
+ struct gen_pool *pool, unsigned long start_addr);
extern unsigned long gen_pool_fixed_alloc(unsigned long *map,
unsigned long size, unsigned long start, unsigned int nr,
- void *data, struct gen_pool *pool);
+ void *data, struct gen_pool *pool, unsigned long start_addr);
extern unsigned long gen_pool_first_fit_align(unsigned long *map,
unsigned long size, unsigned long start, unsigned int nr,
- void *data, struct gen_pool *pool);
+ void *data, struct gen_pool *pool, unsigned long start_addr);
extern unsigned long gen_pool_first_fit_order_align(unsigned long *map,
unsigned long size, unsigned long start, unsigned int nr,
- void *data, struct gen_pool *pool);
+ void *data, struct gen_pool *pool, unsigned long start_addr);
extern unsigned long gen_pool_best_fit(unsigned long *map, unsigned long size,
unsigned long start, unsigned int nr, void *data,
- struct gen_pool *pool);
+ struct gen_pool *pool, unsigned long start_addr);
extern struct gen_pool *devm_gen_pool_create(struct device *dev,
int min_alloc_order, int nid, const char *name);
extern struct gen_pool *gen_pool_get(struct device *dev, const char *name);
-bool addr_in_gen_pool(struct gen_pool *pool, unsigned long start,
+extern bool gen_pool_has_addr(struct gen_pool *pool, unsigned long start,
size_t size);
#ifdef CONFIG_OF
diff --git a/include/linux/generic-radix-tree.h b/include/linux/generic-radix-tree.h
new file mode 100644
index 000000000000..107613f7d792
--- /dev/null
+++ b/include/linux/generic-radix-tree.h
@@ -0,0 +1,232 @@
+#ifndef _LINUX_GENERIC_RADIX_TREE_H
+#define _LINUX_GENERIC_RADIX_TREE_H
+
+/**
+ * DOC: Generic radix trees/sparse arrays
+ *
+ * Very simple and minimalistic, supporting arbitrary size entries up to
+ * PAGE_SIZE.
+ *
+ * A genradix is defined with the type it will store, like so:
+ *
+ * static GENRADIX(struct foo) foo_genradix;
+ *
+ * The main operations are:
+ *
+ * - genradix_init(radix) - initialize an empty genradix
+ *
+ * - genradix_free(radix) - free all memory owned by the genradix and
+ * reinitialize it
+ *
+ * - genradix_ptr(radix, idx) - gets a pointer to the entry at idx, returning
+ * NULL if that entry does not exist
+ *
+ * - genradix_ptr_alloc(radix, idx, gfp) - gets a pointer to an entry,
+ * allocating it if necessary
+ *
+ * - genradix_for_each(radix, iter, p) - iterate over each entry in a genradix
+ *
+ * The radix tree allocates one page of entries at a time, so entries may exist
+ * that were never explicitly allocated - they will be initialized to all
+ * zeroes.
+ *
+ * Internally, a genradix is just a radix tree of pages, and indexing works in
+ * terms of byte offsets. The wrappers in this header file use sizeof on the
+ * type the radix contains to calculate a byte offset from the index - see
+ * __idx_to_offset.
+ */
+
+#include <asm/page.h>
+#include <linux/bug.h>
+#include <linux/log2.h>
+#include <linux/math.h>
+#include <linux/types.h>
+
+struct genradix_root;
+
+struct __genradix {
+ struct genradix_root *root;
+};
+
+/*
+ * NOTE: currently, sizeof(_type) must not be larger than PAGE_SIZE:
+ */
+
+#define __GENRADIX_INITIALIZER \
+ { \
+ .tree = { \
+ .root = NULL, \
+ } \
+ }
+
+/*
+ * We use a 0 size array to stash the type we're storing without taking any
+ * space at runtime - then the various accessor macros can use typeof() to get
+ * to it for casts/sizeof - we also force the alignment so that storing a type
+ * with a ridiculous alignment doesn't blow up the alignment or size of the
+ * genradix.
+ */
+
+#define GENRADIX(_type) \
+struct { \
+ struct __genradix tree; \
+ _type type[0] __aligned(1); \
+}
+
+#define DEFINE_GENRADIX(_name, _type) \
+ GENRADIX(_type) _name = __GENRADIX_INITIALIZER
+
+/**
+ * genradix_init - initialize a genradix
+ * @_radix: genradix to initialize
+ *
+ * Does not fail
+ */
+#define genradix_init(_radix) \
+do { \
+ *(_radix) = (typeof(*_radix)) __GENRADIX_INITIALIZER; \
+} while (0)
+
+void __genradix_free(struct __genradix *);
+
+/**
+ * genradix_free: free all memory owned by a genradix
+ * @_radix: the genradix to free
+ *
+ * After freeing, @_radix will be reinitialized and empty
+ */
+#define genradix_free(_radix) __genradix_free(&(_radix)->tree)
+
+static inline size_t __idx_to_offset(size_t idx, size_t obj_size)
+{
+ if (__builtin_constant_p(obj_size))
+ BUILD_BUG_ON(obj_size > PAGE_SIZE);
+ else
+ BUG_ON(obj_size > PAGE_SIZE);
+
+ if (!is_power_of_2(obj_size)) {
+ size_t objs_per_page = PAGE_SIZE / obj_size;
+
+ return (idx / objs_per_page) * PAGE_SIZE +
+ (idx % objs_per_page) * obj_size;
+ } else {
+ return idx * obj_size;
+ }
+}
+
+#define __genradix_cast(_radix) (typeof((_radix)->type[0]) *)
+#define __genradix_obj_size(_radix) sizeof((_radix)->type[0])
+#define __genradix_idx_to_offset(_radix, _idx) \
+ __idx_to_offset(_idx, __genradix_obj_size(_radix))
+
+void *__genradix_ptr(struct __genradix *, size_t);
+
+/**
+ * genradix_ptr - get a pointer to a genradix entry
+ * @_radix: genradix to access
+ * @_idx: index to fetch
+ *
+ * Returns a pointer to entry at @_idx, or NULL if that entry does not exist.
+ */
+#define genradix_ptr(_radix, _idx) \
+ (__genradix_cast(_radix) \
+ __genradix_ptr(&(_radix)->tree, \
+ __genradix_idx_to_offset(_radix, _idx)))
+
+void *__genradix_ptr_alloc(struct __genradix *, size_t, gfp_t);
+
+/**
+ * genradix_ptr_alloc - get a pointer to a genradix entry, allocating it
+ * if necessary
+ * @_radix: genradix to access
+ * @_idx: index to fetch
+ * @_gfp: gfp mask
+ *
+ * Returns a pointer to entry at @_idx, or NULL on allocation failure
+ */
+#define genradix_ptr_alloc(_radix, _idx, _gfp) \
+ (__genradix_cast(_radix) \
+ __genradix_ptr_alloc(&(_radix)->tree, \
+ __genradix_idx_to_offset(_radix, _idx), \
+ _gfp))
+
+struct genradix_iter {
+ size_t offset;
+ size_t pos;
+};
+
+/**
+ * genradix_iter_init - initialize a genradix_iter
+ * @_radix: genradix that will be iterated over
+ * @_idx: index to start iterating from
+ */
+#define genradix_iter_init(_radix, _idx) \
+ ((struct genradix_iter) { \
+ .pos = (_idx), \
+ .offset = __genradix_idx_to_offset((_radix), (_idx)),\
+ })
+
+void *__genradix_iter_peek(struct genradix_iter *, struct __genradix *, size_t);
+
+/**
+ * genradix_iter_peek - get first entry at or above iterator's current
+ * position
+ * @_iter: a genradix_iter
+ * @_radix: genradix being iterated over
+ *
+ * If no more entries exist at or above @_iter's current position, returns NULL
+ */
+#define genradix_iter_peek(_iter, _radix) \
+ (__genradix_cast(_radix) \
+ __genradix_iter_peek(_iter, &(_radix)->tree, \
+ PAGE_SIZE / __genradix_obj_size(_radix)))
+
+static inline void __genradix_iter_advance(struct genradix_iter *iter,
+ size_t obj_size)
+{
+ iter->offset += obj_size;
+
+ if (!is_power_of_2(obj_size) &&
+ (iter->offset & (PAGE_SIZE - 1)) + obj_size > PAGE_SIZE)
+ iter->offset = round_up(iter->offset, PAGE_SIZE);
+
+ iter->pos++;
+}
+
+#define genradix_iter_advance(_iter, _radix) \
+ __genradix_iter_advance(_iter, __genradix_obj_size(_radix))
+
+#define genradix_for_each_from(_radix, _iter, _p, _start) \
+ for (_iter = genradix_iter_init(_radix, _start); \
+ (_p = genradix_iter_peek(&_iter, _radix)) != NULL; \
+ genradix_iter_advance(&_iter, _radix))
+
+/**
+ * genradix_for_each - iterate over entry in a genradix
+ * @_radix: genradix to iterate over
+ * @_iter: a genradix_iter to track current position
+ * @_p: pointer to genradix entry type
+ *
+ * On every iteration, @_p will point to the current entry, and @_iter.pos
+ * will be the current entry's index.
+ */
+#define genradix_for_each(_radix, _iter, _p) \
+ genradix_for_each_from(_radix, _iter, _p, 0)
+
+int __genradix_prealloc(struct __genradix *, size_t, gfp_t);
+
+/**
+ * genradix_prealloc - preallocate entries in a generic radix tree
+ * @_radix: genradix to preallocate
+ * @_nr: number of entries to preallocate
+ * @_gfp: gfp mask
+ *
+ * Returns 0 on success, -ENOMEM on failure
+ */
+#define genradix_prealloc(_radix, _nr, _gfp) \
+ __genradix_prealloc(&(_radix)->tree, \
+ __genradix_idx_to_offset(_radix, _nr + 1),\
+ _gfp)
+
+
+#endif /* _LINUX_GENERIC_RADIX_TREE_H */
diff --git a/include/linux/genetlink.h b/include/linux/genetlink.h
index a4c61cbce777..c285968e437a 100644
--- a/include/linux/genetlink.h
+++ b/include/linux/genetlink.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __LINUX_GENERIC_NETLINK_H
#define __LINUX_GENERIC_NETLINK_H
@@ -7,35 +8,11 @@
/* All generic netlink requests are serialized by a global lock. */
extern void genl_lock(void);
extern void genl_unlock(void);
-#ifdef CONFIG_LOCKDEP
-extern bool lockdep_genl_is_held(void);
-#endif
/* for synchronisation between af_netlink and genetlink */
extern atomic_t genl_sk_destructing_cnt;
extern wait_queue_head_t genl_sk_destructing_waitq;
-/**
- * rcu_dereference_genl - rcu_dereference with debug checking
- * @p: The pointer to read, prior to dereferencing
- *
- * Do an rcu_dereference(p), but check caller either holds rcu_read_lock()
- * or genl mutex. Note : Please prefer genl_dereference() or rcu_dereference()
- */
-#define rcu_dereference_genl(p) \
- rcu_dereference_check(p, lockdep_genl_is_held())
-
-/**
- * genl_dereference - fetch RCU pointer when updates are prevented by genl mutex
- * @p: The pointer to read, prior to dereferencing
- *
- * Return the value of the specified RCU-protected pointer, but omit
- * both the smp_read_barrier_depends() and the ACCESS_ONCE(), because
- * caller holds genl mutex.
- */
-#define genl_dereference(p) \
- rcu_dereference_protected(p, lockdep_genl_is_held())
-
#define MODULE_ALIAS_GENL_FAMILY(family)\
MODULE_ALIAS_NET_PF_PROTO_NAME(PF_NETLINK, NETLINK_GENERIC, "-family-" family)
diff --git a/include/linux/genhd.h b/include/linux/genhd.h
deleted file mode 100644
index e619fae2f037..000000000000
--- a/include/linux/genhd.h
+++ /dev/null
@@ -1,735 +0,0 @@
-#ifndef _LINUX_GENHD_H
-#define _LINUX_GENHD_H
-
-/*
- * genhd.h Copyright (C) 1992 Drew Eckhardt
- * Generic hard disk header file by
- * Drew Eckhardt
- *
- * <drew@colorado.edu>
- */
-
-#include <linux/types.h>
-#include <linux/kdev_t.h>
-#include <linux/rcupdate.h>
-#include <linux/slab.h>
-#include <linux/percpu-refcount.h>
-#include <linux/uuid.h>
-
-#ifdef CONFIG_BLOCK
-
-#define dev_to_disk(device) container_of((device), struct gendisk, part0.__dev)
-#define dev_to_part(device) container_of((device), struct hd_struct, __dev)
-#define disk_to_dev(disk) (&(disk)->part0.__dev)
-#define part_to_dev(part) (&((part)->__dev))
-
-extern struct device_type part_type;
-extern struct kobject *block_depr;
-extern struct class block_class;
-
-enum {
-/* These three have identical behaviour; use the second one if DOS FDISK gets
- confused about extended/logical partitions starting past cylinder 1023. */
- DOS_EXTENDED_PARTITION = 5,
- LINUX_EXTENDED_PARTITION = 0x85,
- WIN98_EXTENDED_PARTITION = 0x0f,
-
- SUN_WHOLE_DISK = DOS_EXTENDED_PARTITION,
-
- LINUX_SWAP_PARTITION = 0x82,
- LINUX_DATA_PARTITION = 0x83,
- LINUX_LVM_PARTITION = 0x8e,
- LINUX_RAID_PARTITION = 0xfd, /* autodetect RAID partition */
-
- SOLARIS_X86_PARTITION = LINUX_SWAP_PARTITION,
- NEW_SOLARIS_X86_PARTITION = 0xbf,
-
- DM6_AUX1PARTITION = 0x51, /* no DDO: use xlated geom */
- DM6_AUX3PARTITION = 0x53, /* no DDO: use xlated geom */
- DM6_PARTITION = 0x54, /* has DDO: use xlated geom & offset */
- EZD_PARTITION = 0x55, /* EZ-DRIVE */
-
- FREEBSD_PARTITION = 0xa5, /* FreeBSD Partition ID */
- OPENBSD_PARTITION = 0xa6, /* OpenBSD Partition ID */
- NETBSD_PARTITION = 0xa9, /* NetBSD Partition ID */
- BSDI_PARTITION = 0xb7, /* BSDI Partition ID */
- MINIX_PARTITION = 0x81, /* Minix Partition ID */
- UNIXWARE_PARTITION = 0x63, /* Same as GNU_HURD and SCO Unix */
-};
-
-#define DISK_MAX_PARTS 256
-#define DISK_NAME_LEN 32
-
-#include <linux/major.h>
-#include <linux/device.h>
-#include <linux/smp.h>
-#include <linux/string.h>
-#include <linux/fs.h>
-#include <linux/workqueue.h>
-
-struct partition {
- unsigned char boot_ind; /* 0x80 - active */
- unsigned char head; /* starting head */
- unsigned char sector; /* starting sector */
- unsigned char cyl; /* starting cylinder */
- unsigned char sys_ind; /* What partition type */
- unsigned char end_head; /* end head */
- unsigned char end_sector; /* end sector */
- unsigned char end_cyl; /* end cylinder */
- __le32 start_sect; /* starting sector counting from 0 */
- __le32 nr_sects; /* nr of sectors in partition */
-} __attribute__((packed));
-
-struct disk_stats {
- unsigned long sectors[2]; /* READs and WRITEs */
- unsigned long ios[2];
- unsigned long merges[2];
- unsigned long ticks[2];
- unsigned long io_ticks;
- unsigned long time_in_queue;
-};
-
-#define PARTITION_META_INFO_VOLNAMELTH 64
-/*
- * Enough for the string representation of any kind of UUID plus NULL.
- * EFI UUID is 36 characters. MSDOS UUID is 11 characters.
- */
-#define PARTITION_META_INFO_UUIDLTH (UUID_STRING_LEN + 1)
-
-struct partition_meta_info {
- char uuid[PARTITION_META_INFO_UUIDLTH];
- u8 volname[PARTITION_META_INFO_VOLNAMELTH];
-};
-
-struct hd_struct {
- sector_t start_sect;
- /*
- * nr_sects is protected by sequence counter. One might extend a
- * partition while IO is happening to it and update of nr_sects
- * can be non-atomic on 32bit machines with 64bit sector_t.
- */
- sector_t nr_sects;
- seqcount_t nr_sects_seq;
- sector_t alignment_offset;
- unsigned int discard_alignment;
- struct device __dev;
- struct kobject *holder_dir;
- int policy, partno;
- struct partition_meta_info *info;
-#ifdef CONFIG_FAIL_MAKE_REQUEST
- int make_it_fail;
-#endif
- unsigned long stamp;
- atomic_t in_flight[2];
-#ifdef CONFIG_SMP
- struct disk_stats __percpu *dkstats;
-#else
- struct disk_stats dkstats;
-#endif
- struct percpu_ref ref;
- struct rcu_head rcu_head;
-};
-
-#define GENHD_FL_REMOVABLE 1
-/* 2 is unused */
-#define GENHD_FL_MEDIA_CHANGE_NOTIFY 4
-#define GENHD_FL_CD 8
-#define GENHD_FL_UP 16
-#define GENHD_FL_SUPPRESS_PARTITION_INFO 32
-#define GENHD_FL_EXT_DEVT 64 /* allow extended devt */
-#define GENHD_FL_NATIVE_CAPACITY 128
-#define GENHD_FL_BLOCK_EVENTS_ON_EXCL_WRITE 256
-#define GENHD_FL_NO_PART_SCAN 512
-
-enum {
- DISK_EVENT_MEDIA_CHANGE = 1 << 0, /* media changed */
- DISK_EVENT_EJECT_REQUEST = 1 << 1, /* eject requested */
-};
-
-struct disk_part_tbl {
- struct rcu_head rcu_head;
- int len;
- struct hd_struct __rcu *last_lookup;
- struct hd_struct __rcu *part[];
-};
-
-struct disk_events;
-struct badblocks;
-
-#if defined(CONFIG_BLK_DEV_INTEGRITY)
-
-struct blk_integrity {
- const struct blk_integrity_profile *profile;
- unsigned char flags;
- unsigned char tuple_size;
- unsigned char interval_exp;
- unsigned char tag_size;
-};
-
-#endif /* CONFIG_BLK_DEV_INTEGRITY */
-
-struct gendisk {
- /* major, first_minor and minors are input parameters only,
- * don't use directly. Use disk_devt() and disk_max_parts().
- */
- int major; /* major number of driver */
- int first_minor;
- int minors; /* maximum number of minors, =1 for
- * disks that can't be partitioned. */
-
- char disk_name[DISK_NAME_LEN]; /* name of major driver */
- char *(*devnode)(struct gendisk *gd, umode_t *mode);
-
- unsigned int events; /* supported events */
- unsigned int async_events; /* async events, subset of all */
-
- /* Array of pointers to partitions indexed by partno.
- * Protected with matching bdev lock but stat and other
- * non-critical accesses use RCU. Always access through
- * helpers.
- */
- struct disk_part_tbl __rcu *part_tbl;
- struct hd_struct part0;
-
- const struct block_device_operations *fops;
- struct request_queue *queue;
- void *private_data;
-
- int flags;
- struct kobject *slave_dir;
-
- struct timer_rand_state *random;
- atomic_t sync_io; /* RAID */
- struct disk_events *ev;
-#ifdef CONFIG_BLK_DEV_INTEGRITY
- struct kobject integrity_kobj;
-#endif /* CONFIG_BLK_DEV_INTEGRITY */
- int node_id;
- struct badblocks *bb;
-};
-
-static inline struct gendisk *part_to_disk(struct hd_struct *part)
-{
- if (likely(part)) {
- if (part->partno)
- return dev_to_disk(part_to_dev(part)->parent);
- else
- return dev_to_disk(part_to_dev(part));
- }
- return NULL;
-}
-
-static inline int disk_max_parts(struct gendisk *disk)
-{
- if (disk->flags & GENHD_FL_EXT_DEVT)
- return DISK_MAX_PARTS;
- return disk->minors;
-}
-
-static inline bool disk_part_scan_enabled(struct gendisk *disk)
-{
- return disk_max_parts(disk) > 1 &&
- !(disk->flags & GENHD_FL_NO_PART_SCAN);
-}
-
-static inline dev_t disk_devt(struct gendisk *disk)
-{
- return disk_to_dev(disk)->devt;
-}
-
-static inline dev_t part_devt(struct hd_struct *part)
-{
- return part_to_dev(part)->devt;
-}
-
-extern struct hd_struct *disk_get_part(struct gendisk *disk, int partno);
-
-static inline void disk_put_part(struct hd_struct *part)
-{
- if (likely(part))
- put_device(part_to_dev(part));
-}
-
-/*
- * Smarter partition iterator without context limits.
- */
-#define DISK_PITER_REVERSE (1 << 0) /* iterate in the reverse direction */
-#define DISK_PITER_INCL_EMPTY (1 << 1) /* include 0-sized parts */
-#define DISK_PITER_INCL_PART0 (1 << 2) /* include partition 0 */
-#define DISK_PITER_INCL_EMPTY_PART0 (1 << 3) /* include empty partition 0 */
-
-struct disk_part_iter {
- struct gendisk *disk;
- struct hd_struct *part;
- int idx;
- unsigned int flags;
-};
-
-extern void disk_part_iter_init(struct disk_part_iter *piter,
- struct gendisk *disk, unsigned int flags);
-extern struct hd_struct *disk_part_iter_next(struct disk_part_iter *piter);
-extern void disk_part_iter_exit(struct disk_part_iter *piter);
-
-extern struct hd_struct *disk_map_sector_rcu(struct gendisk *disk,
- sector_t sector);
-
-/*
- * Macros to operate on percpu disk statistics:
- *
- * {disk|part|all}_stat_{add|sub|inc|dec}() modify the stat counters
- * and should be called between disk_stat_lock() and
- * disk_stat_unlock().
- *
- * part_stat_read() can be called at any time.
- *
- * part_stat_{add|set_all}() and {init|free}_part_stats are for
- * internal use only.
- */
-#ifdef CONFIG_SMP
-#define part_stat_lock() ({ rcu_read_lock(); get_cpu(); })
-#define part_stat_unlock() do { put_cpu(); rcu_read_unlock(); } while (0)
-
-#define __part_stat_add(cpu, part, field, addnd) \
- (per_cpu_ptr((part)->dkstats, (cpu))->field += (addnd))
-
-#define part_stat_read(part, field) \
-({ \
- typeof((part)->dkstats->field) res = 0; \
- unsigned int _cpu; \
- for_each_possible_cpu(_cpu) \
- res += per_cpu_ptr((part)->dkstats, _cpu)->field; \
- res; \
-})
-
-static inline void part_stat_set_all(struct hd_struct *part, int value)
-{
- int i;
-
- for_each_possible_cpu(i)
- memset(per_cpu_ptr(part->dkstats, i), value,
- sizeof(struct disk_stats));
-}
-
-static inline int init_part_stats(struct hd_struct *part)
-{
- part->dkstats = alloc_percpu(struct disk_stats);
- if (!part->dkstats)
- return 0;
- return 1;
-}
-
-static inline void free_part_stats(struct hd_struct *part)
-{
- free_percpu(part->dkstats);
-}
-
-#else /* !CONFIG_SMP */
-#define part_stat_lock() ({ rcu_read_lock(); 0; })
-#define part_stat_unlock() rcu_read_unlock()
-
-#define __part_stat_add(cpu, part, field, addnd) \
- ((part)->dkstats.field += addnd)
-
-#define part_stat_read(part, field) ((part)->dkstats.field)
-
-static inline void part_stat_set_all(struct hd_struct *part, int value)
-{
- memset(&part->dkstats, value, sizeof(struct disk_stats));
-}
-
-static inline int init_part_stats(struct hd_struct *part)
-{
- return 1;
-}
-
-static inline void free_part_stats(struct hd_struct *part)
-{
-}
-
-#endif /* CONFIG_SMP */
-
-#define part_stat_add(cpu, part, field, addnd) do { \
- __part_stat_add((cpu), (part), field, addnd); \
- if ((part)->partno) \
- __part_stat_add((cpu), &part_to_disk((part))->part0, \
- field, addnd); \
-} while (0)
-
-#define part_stat_dec(cpu, gendiskp, field) \
- part_stat_add(cpu, gendiskp, field, -1)
-#define part_stat_inc(cpu, gendiskp, field) \
- part_stat_add(cpu, gendiskp, field, 1)
-#define part_stat_sub(cpu, gendiskp, field, subnd) \
- part_stat_add(cpu, gendiskp, field, -subnd)
-
-static inline void part_inc_in_flight(struct hd_struct *part, int rw)
-{
- atomic_inc(&part->in_flight[rw]);
- if (part->partno)
- atomic_inc(&part_to_disk(part)->part0.in_flight[rw]);
-}
-
-static inline void part_dec_in_flight(struct hd_struct *part, int rw)
-{
- atomic_dec(&part->in_flight[rw]);
- if (part->partno)
- atomic_dec(&part_to_disk(part)->part0.in_flight[rw]);
-}
-
-static inline int part_in_flight(struct hd_struct *part)
-{
- return atomic_read(&part->in_flight[0]) + atomic_read(&part->in_flight[1]);
-}
-
-static inline struct partition_meta_info *alloc_part_info(struct gendisk *disk)
-{
- if (disk)
- return kzalloc_node(sizeof(struct partition_meta_info),
- GFP_KERNEL, disk->node_id);
- return kzalloc(sizeof(struct partition_meta_info), GFP_KERNEL);
-}
-
-static inline void free_part_info(struct hd_struct *part)
-{
- kfree(part->info);
-}
-
-/* block/blk-core.c */
-extern void part_round_stats(int cpu, struct hd_struct *part);
-
-/* block/genhd.c */
-extern void device_add_disk(struct device *parent, struct gendisk *disk);
-static inline void add_disk(struct gendisk *disk)
-{
- device_add_disk(NULL, disk);
-}
-
-extern void del_gendisk(struct gendisk *gp);
-extern struct gendisk *get_gendisk(dev_t dev, int *partno);
-extern struct block_device *bdget_disk(struct gendisk *disk, int partno);
-
-extern void set_device_ro(struct block_device *bdev, int flag);
-extern void set_disk_ro(struct gendisk *disk, int flag);
-
-static inline int get_disk_ro(struct gendisk *disk)
-{
- return disk->part0.policy;
-}
-
-extern void disk_block_events(struct gendisk *disk);
-extern void disk_unblock_events(struct gendisk *disk);
-extern void disk_flush_events(struct gendisk *disk, unsigned int mask);
-extern unsigned int disk_clear_events(struct gendisk *disk, unsigned int mask);
-
-/* drivers/char/random.c */
-extern void add_disk_randomness(struct gendisk *disk) __latent_entropy;
-extern void rand_initialize_disk(struct gendisk *disk);
-
-static inline sector_t get_start_sect(struct block_device *bdev)
-{
- return bdev->bd_part->start_sect;
-}
-static inline sector_t get_capacity(struct gendisk *disk)
-{
- return disk->part0.nr_sects;
-}
-static inline void set_capacity(struct gendisk *disk, sector_t size)
-{
- disk->part0.nr_sects = size;
-}
-
-#ifdef CONFIG_SOLARIS_X86_PARTITION
-
-#define SOLARIS_X86_NUMSLICE 16
-#define SOLARIS_X86_VTOC_SANE (0x600DDEEEUL)
-
-struct solaris_x86_slice {
- __le16 s_tag; /* ID tag of partition */
- __le16 s_flag; /* permission flags */
- __le32 s_start; /* start sector no of partition */
- __le32 s_size; /* # of blocks in partition */
-};
-
-struct solaris_x86_vtoc {
- unsigned int v_bootinfo[3]; /* info needed by mboot (unsupported) */
- __le32 v_sanity; /* to verify vtoc sanity */
- __le32 v_version; /* layout version */
- char v_volume[8]; /* volume name */
- __le16 v_sectorsz; /* sector size in bytes */
- __le16 v_nparts; /* number of partitions */
- unsigned int v_reserved[10]; /* free space */
- struct solaris_x86_slice
- v_slice[SOLARIS_X86_NUMSLICE]; /* slice headers */
- unsigned int timestamp[SOLARIS_X86_NUMSLICE]; /* timestamp (unsupported) */
- char v_asciilabel[128]; /* for compatibility */
-};
-
-#endif /* CONFIG_SOLARIS_X86_PARTITION */
-
-#ifdef CONFIG_BSD_DISKLABEL
-/*
- * BSD disklabel support by Yossi Gottlieb <yogo@math.tau.ac.il>
- * updated by Marc Espie <Marc.Espie@openbsd.org>
- */
-
-/* check against BSD src/sys/sys/disklabel.h for consistency */
-
-#define BSD_DISKMAGIC (0x82564557UL) /* The disk magic number */
-#define BSD_MAXPARTITIONS 16
-#define OPENBSD_MAXPARTITIONS 16
-#define BSD_FS_UNUSED 0 /* disklabel unused partition entry ID */
-struct bsd_disklabel {
- __le32 d_magic; /* the magic number */
- __s16 d_type; /* drive type */
- __s16 d_subtype; /* controller/d_type specific */
- char d_typename[16]; /* type name, e.g. "eagle" */
- char d_packname[16]; /* pack identifier */
- __u32 d_secsize; /* # of bytes per sector */
- __u32 d_nsectors; /* # of data sectors per track */
- __u32 d_ntracks; /* # of tracks per cylinder */
- __u32 d_ncylinders; /* # of data cylinders per unit */
- __u32 d_secpercyl; /* # of data sectors per cylinder */
- __u32 d_secperunit; /* # of data sectors per unit */
- __u16 d_sparespertrack; /* # of spare sectors per track */
- __u16 d_sparespercyl; /* # of spare sectors per cylinder */
- __u32 d_acylinders; /* # of alt. cylinders per unit */
- __u16 d_rpm; /* rotational speed */
- __u16 d_interleave; /* hardware sector interleave */
- __u16 d_trackskew; /* sector 0 skew, per track */
- __u16 d_cylskew; /* sector 0 skew, per cylinder */
- __u32 d_headswitch; /* head switch time, usec */
- __u32 d_trkseek; /* track-to-track seek, usec */
- __u32 d_flags; /* generic flags */
-#define NDDATA 5
- __u32 d_drivedata[NDDATA]; /* drive-type specific information */
-#define NSPARE 5
- __u32 d_spare[NSPARE]; /* reserved for future use */
- __le32 d_magic2; /* the magic number (again) */
- __le16 d_checksum; /* xor of data incl. partitions */
-
- /* filesystem and partition information: */
- __le16 d_npartitions; /* number of partitions in following */
- __le32 d_bbsize; /* size of boot area at sn0, bytes */
- __le32 d_sbsize; /* max size of fs superblock, bytes */
- struct bsd_partition { /* the partition table */
- __le32 p_size; /* number of sectors in partition */
- __le32 p_offset; /* starting sector */
- __le32 p_fsize; /* filesystem basic fragment size */
- __u8 p_fstype; /* filesystem type, see below */
- __u8 p_frag; /* filesystem fragments per block */
- __le16 p_cpg; /* filesystem cylinders per group */
- } d_partitions[BSD_MAXPARTITIONS]; /* actually may be more */
-};
-
-#endif /* CONFIG_BSD_DISKLABEL */
-
-#ifdef CONFIG_UNIXWARE_DISKLABEL
-/*
- * Unixware slices support by Andrzej Krzysztofowicz <ankry@mif.pg.gda.pl>
- * and Krzysztof G. Baranowski <kgb@knm.org.pl>
- */
-
-#define UNIXWARE_DISKMAGIC (0xCA5E600DUL) /* The disk magic number */
-#define UNIXWARE_DISKMAGIC2 (0x600DDEEEUL) /* The slice table magic nr */
-#define UNIXWARE_NUMSLICE 16
-#define UNIXWARE_FS_UNUSED 0 /* Unused slice entry ID */
-
-struct unixware_slice {
- __le16 s_label; /* label */
- __le16 s_flags; /* permission flags */
- __le32 start_sect; /* starting sector */
- __le32 nr_sects; /* number of sectors in slice */
-};
-
-struct unixware_disklabel {
- __le32 d_type; /* drive type */
- __le32 d_magic; /* the magic number */
- __le32 d_version; /* version number */
- char d_serial[12]; /* serial number of the device */
- __le32 d_ncylinders; /* # of data cylinders per device */
- __le32 d_ntracks; /* # of tracks per cylinder */
- __le32 d_nsectors; /* # of data sectors per track */
- __le32 d_secsize; /* # of bytes per sector */
- __le32 d_part_start; /* # of first sector of this partition */
- __le32 d_unknown1[12]; /* ? */
- __le32 d_alt_tbl; /* byte offset of alternate table */
- __le32 d_alt_len; /* byte length of alternate table */
- __le32 d_phys_cyl; /* # of physical cylinders per device */
- __le32 d_phys_trk; /* # of physical tracks per cylinder */
- __le32 d_phys_sec; /* # of physical sectors per track */
- __le32 d_phys_bytes; /* # of physical bytes per sector */
- __le32 d_unknown2; /* ? */
- __le32 d_unknown3; /* ? */
- __le32 d_pad[8]; /* pad */
-
- struct unixware_vtoc {
- __le32 v_magic; /* the magic number */
- __le32 v_version; /* version number */
- char v_name[8]; /* volume name */
- __le16 v_nslices; /* # of slices */
- __le16 v_unknown1; /* ? */
- __le32 v_reserved[10]; /* reserved */
- struct unixware_slice
- v_slice[UNIXWARE_NUMSLICE]; /* slice headers */
- } vtoc;
-
-}; /* 408 */
-
-#endif /* CONFIG_UNIXWARE_DISKLABEL */
-
-#ifdef CONFIG_MINIX_SUBPARTITION
-# define MINIX_NR_SUBPARTITIONS 4
-#endif /* CONFIG_MINIX_SUBPARTITION */
-
-#define ADDPART_FLAG_NONE 0
-#define ADDPART_FLAG_RAID 1
-#define ADDPART_FLAG_WHOLEDISK 2
-
-extern int blk_alloc_devt(struct hd_struct *part, dev_t *devt);
-extern void blk_free_devt(dev_t devt);
-extern dev_t blk_lookup_devt(const char *name, int partno);
-extern char *disk_name (struct gendisk *hd, int partno, char *buf);
-
-extern int disk_expand_part_tbl(struct gendisk *disk, int target);
-extern int rescan_partitions(struct gendisk *disk, struct block_device *bdev);
-extern int invalidate_partitions(struct gendisk *disk, struct block_device *bdev);
-extern struct hd_struct * __must_check add_partition(struct gendisk *disk,
- int partno, sector_t start,
- sector_t len, int flags,
- struct partition_meta_info
- *info);
-extern void __delete_partition(struct percpu_ref *);
-extern void delete_partition(struct gendisk *, int);
-extern void printk_all_partitions(void);
-
-extern struct gendisk *alloc_disk_node(int minors, int node_id);
-extern struct gendisk *alloc_disk(int minors);
-extern struct kobject *get_disk(struct gendisk *disk);
-extern void put_disk(struct gendisk *disk);
-extern void blk_register_region(dev_t devt, unsigned long range,
- struct module *module,
- struct kobject *(*probe)(dev_t, int *, void *),
- int (*lock)(dev_t, void *),
- void *data);
-extern void blk_unregister_region(dev_t devt, unsigned long range);
-
-extern ssize_t part_size_show(struct device *dev,
- struct device_attribute *attr, char *buf);
-extern ssize_t part_stat_show(struct device *dev,
- struct device_attribute *attr, char *buf);
-extern ssize_t part_inflight_show(struct device *dev,
- struct device_attribute *attr, char *buf);
-#ifdef CONFIG_FAIL_MAKE_REQUEST
-extern ssize_t part_fail_show(struct device *dev,
- struct device_attribute *attr, char *buf);
-extern ssize_t part_fail_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count);
-#endif /* CONFIG_FAIL_MAKE_REQUEST */
-
-static inline int hd_ref_init(struct hd_struct *part)
-{
- if (percpu_ref_init(&part->ref, __delete_partition, 0,
- GFP_KERNEL))
- return -ENOMEM;
- return 0;
-}
-
-static inline void hd_struct_get(struct hd_struct *part)
-{
- percpu_ref_get(&part->ref);
-}
-
-static inline int hd_struct_try_get(struct hd_struct *part)
-{
- return percpu_ref_tryget_live(&part->ref);
-}
-
-static inline void hd_struct_put(struct hd_struct *part)
-{
- percpu_ref_put(&part->ref);
-}
-
-static inline void hd_struct_kill(struct hd_struct *part)
-{
- percpu_ref_kill(&part->ref);
-}
-
-static inline void hd_free_part(struct hd_struct *part)
-{
- free_part_stats(part);
- free_part_info(part);
- percpu_ref_exit(&part->ref);
-}
-
-/*
- * Any access of part->nr_sects which is not protected by partition
- * bd_mutex or gendisk bdev bd_mutex, should be done using this
- * accessor function.
- *
- * Code written along the lines of i_size_read() and i_size_write().
- * CONFIG_PREEMPT case optimizes the case of UP kernel with preemption
- * on.
- */
-static inline sector_t part_nr_sects_read(struct hd_struct *part)
-{
-#if BITS_PER_LONG==32 && defined(CONFIG_LBDAF) && defined(CONFIG_SMP)
- sector_t nr_sects;
- unsigned seq;
- do {
- seq = read_seqcount_begin(&part->nr_sects_seq);
- nr_sects = part->nr_sects;
- } while (read_seqcount_retry(&part->nr_sects_seq, seq));
- return nr_sects;
-#elif BITS_PER_LONG==32 && defined(CONFIG_LBDAF) && defined(CONFIG_PREEMPT)
- sector_t nr_sects;
-
- preempt_disable();
- nr_sects = part->nr_sects;
- preempt_enable();
- return nr_sects;
-#else
- return part->nr_sects;
-#endif
-}
-
-/*
- * Should be called with mutex lock held (typically bd_mutex) of partition
- * to provide mutual exlusion among writers otherwise seqcount might be
- * left in wrong state leaving the readers spinning infinitely.
- */
-static inline void part_nr_sects_write(struct hd_struct *part, sector_t size)
-{
-#if BITS_PER_LONG==32 && defined(CONFIG_LBDAF) && defined(CONFIG_SMP)
- write_seqcount_begin(&part->nr_sects_seq);
- part->nr_sects = size;
- write_seqcount_end(&part->nr_sects_seq);
-#elif BITS_PER_LONG==32 && defined(CONFIG_LBDAF) && defined(CONFIG_PREEMPT)
- preempt_disable();
- part->nr_sects = size;
- preempt_enable();
-#else
- part->nr_sects = size;
-#endif
-}
-
-#if defined(CONFIG_BLK_DEV_INTEGRITY)
-extern void blk_integrity_add(struct gendisk *);
-extern void blk_integrity_del(struct gendisk *);
-#else /* CONFIG_BLK_DEV_INTEGRITY */
-static inline void blk_integrity_add(struct gendisk *disk) { }
-static inline void blk_integrity_del(struct gendisk *disk) { }
-#endif /* CONFIG_BLK_DEV_INTEGRITY */
-
-#else /* CONFIG_BLOCK */
-
-static inline void printk_all_partitions(void) { }
-
-static inline dev_t blk_lookup_devt(const char *name, int partno)
-{
- dev_t devt = MKDEV(0, 0);
- return devt;
-}
-#endif /* CONFIG_BLOCK */
-
-#endif /* _LINUX_GENHD_H */
diff --git a/include/linux/genl_magic_func.h b/include/linux/genl_magic_func.h
index 377257d8f7e3..2984b0cb24b1 100644
--- a/include/linux/genl_magic_func.h
+++ b/include/linux/genl_magic_func.h
@@ -1,6 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef GENL_MAGIC_FUNC_H
#define GENL_MAGIC_FUNC_H
+#include <linux/build_bug.h>
#include <linux/genl_magic_struct.h>
/*
@@ -131,17 +133,6 @@ static void dprint_array(const char *dir, int nla_type,
* use one static buffer for parsing of nested attributes */
static struct nlattr *nested_attr_tb[128];
-#ifndef BUILD_BUG_ON
-/* Force a compilation error if condition is true */
-#define BUILD_BUG_ON(condition) ((void)BUILD_BUG_ON_ZERO(condition))
-/* Force a compilation error if condition is true, but also produce a
- result (of value 0 and type size_t), so the expression can be used
- e.g. in a structure initializer (or where-ever else comma expressions
- aren't permitted). */
-#define BUILD_BUG_ON_ZERO(e) (sizeof(struct { int:-!!(e); }))
-#define BUILD_BUG_ON_NULL(e) ((void *)sizeof(struct { int:-!!(e); }))
-#endif
-
#undef GENL_struct
#define GENL_struct(tag_name, tag_number, s_name, s_fields) \
/* *_from_attrs functions are static, but potentially unused */ \
@@ -218,7 +209,7 @@ static int s_name ## _from_attrs_for_change(struct s_name *s, \
* Magic: define op number to op name mapping {{{1
* {{{2
*/
-const char *CONCAT_(GENL_MAGIC_FAMILY, _genl_cmd_to_str)(__u8 cmd)
+static const char *CONCAT_(GENL_MAGIC_FAMILY, _genl_cmd_to_str)(__u8 cmd)
{
switch (cmd) {
#undef GENL_op
@@ -242,7 +233,6 @@ const char *CONCAT_(GENL_MAGIC_FAMILY, _genl_cmd_to_str)(__u8 cmd)
{ \
handler \
.cmd = op_name, \
- .policy = CONCAT_(GENL_MAGIC_FAMILY, _tla_nl_policy), \
},
#define ZZZ_genl_ops CONCAT_(GENL_MAGIC_FAMILY, _genl_ops)
@@ -299,10 +289,12 @@ static struct genl_family ZZZ_genl_family __ro_after_init = {
#ifdef GENL_MAGIC_FAMILY_HDRSZ
.hdrsize = NLA_ALIGN(GENL_MAGIC_FAMILY_HDRSZ),
#endif
- .maxattr = ARRAY_SIZE(drbd_tla_nl_policy)-1,
+ .maxattr = ARRAY_SIZE(CONCAT_(GENL_MAGIC_FAMILY, _tla_nl_policy))-1,
+ .policy = CONCAT_(GENL_MAGIC_FAMILY, _tla_nl_policy),
.ops = ZZZ_genl_ops,
.n_ops = ARRAY_SIZE(ZZZ_genl_ops),
.mcgrps = ZZZ_genl_mcgrps,
+ .resv_start_op = 42, /* drbd is currently the only user */
.n_mcgrps = ARRAY_SIZE(ZZZ_genl_mcgrps),
.module = THIS_MODULE,
};
@@ -413,4 +405,3 @@ s_fields \
/* }}}1 */
#endif /* GENL_MAGIC_FUNC_H */
-/* vim: set foldmethod=marker foldlevel=1 nofoldenable : */
diff --git a/include/linux/genl_magic_struct.h b/include/linux/genl_magic_struct.h
index 6270a56e5edc..f81d48987528 100644
--- a/include/linux/genl_magic_struct.h
+++ b/include/linux/genl_magic_struct.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef GENL_MAGIC_STRUCT_H
#define GENL_MAGIC_STRUCT_H
@@ -88,7 +89,7 @@ static inline int nla_put_u64_0pad(struct sk_buff *skb, int attrtype, u64 value)
nla_get_u64, nla_put_u64_0pad, false)
#define __str_field(attr_nr, attr_flag, name, maxlen) \
__array(attr_nr, attr_flag, name, NLA_NUL_STRING, char, maxlen, \
- nla_strlcpy, nla_put, false)
+ nla_strscpy, nla_put, false)
#define __bin_field(attr_nr, attr_flag, name, maxlen) \
__array(attr_nr, attr_flag, name, NLA_BINARY, char, maxlen, \
nla_memcpy, nla_put, false)
@@ -190,6 +191,7 @@ static inline void ct_assert_unique_operations(void)
{
switch (0) {
#include GENL_MAGIC_INCLUDE_FILE
+ case 0:
;
}
}
@@ -208,6 +210,7 @@ static inline void ct_assert_unique_top_level_attributes(void)
{
switch (0) {
#include GENL_MAGIC_INCLUDE_FILE
+ case 0:
;
}
}
@@ -217,7 +220,8 @@ static inline void ct_assert_unique_top_level_attributes(void)
static inline void ct_assert_unique_ ## s_name ## _attributes(void) \
{ \
switch (0) { \
- s_fields \
+ s_fields \
+ case 0: \
; \
} \
}
@@ -279,4 +283,3 @@ enum { \
/* }}}1 */
#endif /* GENL_MAGIC_STRUCT_H */
-/* vim: set foldmethod=marker nofoldenable : */
diff --git a/include/linux/getcpu.h b/include/linux/getcpu.h
index c7372d7a97be..c304dcdb4eac 100644
--- a/include/linux/getcpu.h
+++ b/include/linux/getcpu.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_GETCPU_H
#define _LINUX_GETCPU_H 1
diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index bcfb9f7c46f5..ed8cb537c6a7 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -1,319 +1,32 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __LINUX_GFP_H
#define __LINUX_GFP_H
-#include <linux/mmdebug.h>
+#include <linux/gfp_types.h>
+
#include <linux/mmzone.h>
-#include <linux/stddef.h>
-#include <linux/linkage.h>
#include <linux/topology.h>
struct vm_area_struct;
-/*
- * In case of changes, please don't forget to update
- * include/trace/events/mmflags.h and tools/perf/builtin-kmem.c
- */
-
-/* Plain integer GFP bitmasks. Do not use this directly. */
-#define ___GFP_DMA 0x01u
-#define ___GFP_HIGHMEM 0x02u
-#define ___GFP_DMA32 0x04u
-#define ___GFP_MOVABLE 0x08u
-#define ___GFP_RECLAIMABLE 0x10u
-#define ___GFP_HIGH 0x20u
-#define ___GFP_IO 0x40u
-#define ___GFP_FS 0x80u
-#define ___GFP_COLD 0x100u
-#define ___GFP_NOWARN 0x200u
-#define ___GFP_RETRY_MAYFAIL 0x400u
-#define ___GFP_NOFAIL 0x800u
-#define ___GFP_NORETRY 0x1000u
-#define ___GFP_MEMALLOC 0x2000u
-#define ___GFP_COMP 0x4000u
-#define ___GFP_ZERO 0x8000u
-#define ___GFP_NOMEMALLOC 0x10000u
-#define ___GFP_HARDWALL 0x20000u
-#define ___GFP_THISNODE 0x40000u
-#define ___GFP_ATOMIC 0x80000u
-#define ___GFP_ACCOUNT 0x100000u
-#define ___GFP_NOTRACK 0x200000u
-#define ___GFP_DIRECT_RECLAIM 0x400000u
-#define ___GFP_WRITE 0x800000u
-#define ___GFP_KSWAPD_RECLAIM 0x1000000u
-#ifdef CONFIG_LOCKDEP
-#define ___GFP_NOLOCKDEP 0x2000000u
-#else
-#define ___GFP_NOLOCKDEP 0
-#endif
-/* If the above are modified, __GFP_BITS_SHIFT may need updating */
-
-/*
- * Physical address zone modifiers (see linux/mmzone.h - low four bits)
- *
- * Do not put any conditional on these. If necessary modify the definitions
- * without the underscores and use them consistently. The definitions here may
- * be used in bit comparisons.
- */
-#define __GFP_DMA ((__force gfp_t)___GFP_DMA)
-#define __GFP_HIGHMEM ((__force gfp_t)___GFP_HIGHMEM)
-#define __GFP_DMA32 ((__force gfp_t)___GFP_DMA32)
-#define __GFP_MOVABLE ((__force gfp_t)___GFP_MOVABLE) /* ZONE_MOVABLE allowed */
-#define GFP_ZONEMASK (__GFP_DMA|__GFP_HIGHMEM|__GFP_DMA32|__GFP_MOVABLE)
-
-/*
- * Page mobility and placement hints
- *
- * These flags provide hints about how mobile the page is. Pages with similar
- * mobility are placed within the same pageblocks to minimise problems due
- * to external fragmentation.
- *
- * __GFP_MOVABLE (also a zone modifier) indicates that the page can be
- * moved by page migration during memory compaction or can be reclaimed.
- *
- * __GFP_RECLAIMABLE is used for slab allocations that specify
- * SLAB_RECLAIM_ACCOUNT and whose pages can be freed via shrinkers.
- *
- * __GFP_WRITE indicates the caller intends to dirty the page. Where possible,
- * these pages will be spread between local zones to avoid all the dirty
- * pages being in one zone (fair zone allocation policy).
- *
- * __GFP_HARDWALL enforces the cpuset memory allocation policy.
- *
- * __GFP_THISNODE forces the allocation to be satisified from the requested
- * node with no fallbacks or placement policy enforcements.
- *
- * __GFP_ACCOUNT causes the allocation to be accounted to kmemcg.
- */
-#define __GFP_RECLAIMABLE ((__force gfp_t)___GFP_RECLAIMABLE)
-#define __GFP_WRITE ((__force gfp_t)___GFP_WRITE)
-#define __GFP_HARDWALL ((__force gfp_t)___GFP_HARDWALL)
-#define __GFP_THISNODE ((__force gfp_t)___GFP_THISNODE)
-#define __GFP_ACCOUNT ((__force gfp_t)___GFP_ACCOUNT)
-
-/*
- * Watermark modifiers -- controls access to emergency reserves
- *
- * __GFP_HIGH indicates that the caller is high-priority and that granting
- * the request is necessary before the system can make forward progress.
- * For example, creating an IO context to clean pages.
- *
- * __GFP_ATOMIC indicates that the caller cannot reclaim or sleep and is
- * high priority. Users are typically interrupt handlers. This may be
- * used in conjunction with __GFP_HIGH
- *
- * __GFP_MEMALLOC allows access to all memory. This should only be used when
- * the caller guarantees the allocation will allow more memory to be freed
- * very shortly e.g. process exiting or swapping. Users either should
- * be the MM or co-ordinating closely with the VM (e.g. swap over NFS).
- *
- * __GFP_NOMEMALLOC is used to explicitly forbid access to emergency reserves.
- * This takes precedence over the __GFP_MEMALLOC flag if both are set.
- */
-#define __GFP_ATOMIC ((__force gfp_t)___GFP_ATOMIC)
-#define __GFP_HIGH ((__force gfp_t)___GFP_HIGH)
-#define __GFP_MEMALLOC ((__force gfp_t)___GFP_MEMALLOC)
-#define __GFP_NOMEMALLOC ((__force gfp_t)___GFP_NOMEMALLOC)
-
-/*
- * Reclaim modifiers
- *
- * __GFP_IO can start physical IO.
- *
- * __GFP_FS can call down to the low-level FS. Clearing the flag avoids the
- * allocator recursing into the filesystem which might already be holding
- * locks.
- *
- * __GFP_DIRECT_RECLAIM indicates that the caller may enter direct reclaim.
- * This flag can be cleared to avoid unnecessary delays when a fallback
- * option is available.
- *
- * __GFP_KSWAPD_RECLAIM indicates that the caller wants to wake kswapd when
- * the low watermark is reached and have it reclaim pages until the high
- * watermark is reached. A caller may wish to clear this flag when fallback
- * options are available and the reclaim is likely to disrupt the system. The
- * canonical example is THP allocation where a fallback is cheap but
- * reclaim/compaction may cause indirect stalls.
- *
- * __GFP_RECLAIM is shorthand to allow/forbid both direct and kswapd reclaim.
- *
- * The default allocator behavior depends on the request size. We have a concept
- * of so called costly allocations (with order > PAGE_ALLOC_COSTLY_ORDER).
- * !costly allocations are too essential to fail so they are implicitly
- * non-failing by default (with some exceptions like OOM victims might fail so
- * the caller still has to check for failures) while costly requests try to be
- * not disruptive and back off even without invoking the OOM killer.
- * The following three modifiers might be used to override some of these
- * implicit rules
- *
- * __GFP_NORETRY: The VM implementation will try only very lightweight
- * memory direct reclaim to get some memory under memory pressure (thus
- * it can sleep). It will avoid disruptive actions like OOM killer. The
- * caller must handle the failure which is quite likely to happen under
- * heavy memory pressure. The flag is suitable when failure can easily be
- * handled at small cost, such as reduced throughput
- *
- * __GFP_RETRY_MAYFAIL: The VM implementation will retry memory reclaim
- * procedures that have previously failed if there is some indication
- * that progress has been made else where. It can wait for other
- * tasks to attempt high level approaches to freeing memory such as
- * compaction (which removes fragmentation) and page-out.
- * There is still a definite limit to the number of retries, but it is
- * a larger limit than with __GFP_NORETRY.
- * Allocations with this flag may fail, but only when there is
- * genuinely little unused memory. While these allocations do not
- * directly trigger the OOM killer, their failure indicates that
- * the system is likely to need to use the OOM killer soon. The
- * caller must handle failure, but can reasonably do so by failing
- * a higher-level request, or completing it only in a much less
- * efficient manner.
- * If the allocation does fail, and the caller is in a position to
- * free some non-essential memory, doing so could benefit the system
- * as a whole.
- *
- * __GFP_NOFAIL: The VM implementation _must_ retry infinitely: the caller
- * cannot handle allocation failures. The allocation could block
- * indefinitely but will never return with failure. Testing for
- * failure is pointless.
- * New users should be evaluated carefully (and the flag should be
- * used only when there is no reasonable failure policy) but it is
- * definitely preferable to use the flag rather than opencode endless
- * loop around allocator.
- * Using this flag for costly allocations is _highly_ discouraged.
- */
-#define __GFP_IO ((__force gfp_t)___GFP_IO)
-#define __GFP_FS ((__force gfp_t)___GFP_FS)
-#define __GFP_DIRECT_RECLAIM ((__force gfp_t)___GFP_DIRECT_RECLAIM) /* Caller can reclaim */
-#define __GFP_KSWAPD_RECLAIM ((__force gfp_t)___GFP_KSWAPD_RECLAIM) /* kswapd can wake */
-#define __GFP_RECLAIM ((__force gfp_t)(___GFP_DIRECT_RECLAIM|___GFP_KSWAPD_RECLAIM))
-#define __GFP_RETRY_MAYFAIL ((__force gfp_t)___GFP_RETRY_MAYFAIL)
-#define __GFP_NOFAIL ((__force gfp_t)___GFP_NOFAIL)
-#define __GFP_NORETRY ((__force gfp_t)___GFP_NORETRY)
-
-/*
- * Action modifiers
- *
- * __GFP_COLD indicates that the caller does not expect to be used in the near
- * future. Where possible, a cache-cold page will be returned.
- *
- * __GFP_NOWARN suppresses allocation failure reports.
- *
- * __GFP_COMP address compound page metadata.
- *
- * __GFP_ZERO returns a zeroed page on success.
- *
- * __GFP_NOTRACK avoids tracking with kmemcheck.
- *
- * __GFP_NOTRACK_FALSE_POSITIVE is an alias of __GFP_NOTRACK. It's a means of
- * distinguishing in the source between false positives and allocations that
- * cannot be supported (e.g. page tables).
- */
-#define __GFP_COLD ((__force gfp_t)___GFP_COLD)
-#define __GFP_NOWARN ((__force gfp_t)___GFP_NOWARN)
-#define __GFP_COMP ((__force gfp_t)___GFP_COMP)
-#define __GFP_ZERO ((__force gfp_t)___GFP_ZERO)
-#define __GFP_NOTRACK ((__force gfp_t)___GFP_NOTRACK)
-#define __GFP_NOTRACK_FALSE_POSITIVE (__GFP_NOTRACK)
-
-/* Disable lockdep for GFP context tracking */
-#define __GFP_NOLOCKDEP ((__force gfp_t)___GFP_NOLOCKDEP)
-
-/* Room for N __GFP_FOO bits */
-#define __GFP_BITS_SHIFT (25 + IS_ENABLED(CONFIG_LOCKDEP))
-#define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1))
-
-/*
- * Useful GFP flag combinations that are commonly used. It is recommended
- * that subsystems start with one of these combinations and then set/clear
- * __GFP_FOO flags as necessary.
- *
- * GFP_ATOMIC users can not sleep and need the allocation to succeed. A lower
- * watermark is applied to allow access to "atomic reserves"
- *
- * GFP_KERNEL is typical for kernel-internal allocations. The caller requires
- * ZONE_NORMAL or a lower zone for direct access but can direct reclaim.
- *
- * GFP_KERNEL_ACCOUNT is the same as GFP_KERNEL, except the allocation is
- * accounted to kmemcg.
- *
- * GFP_NOWAIT is for kernel allocations that should not stall for direct
- * reclaim, start physical IO or use any filesystem callback.
- *
- * GFP_NOIO will use direct reclaim to discard clean pages or slab pages
- * that do not require the starting of any physical IO.
- * Please try to avoid using this flag directly and instead use
- * memalloc_noio_{save,restore} to mark the whole scope which cannot
- * perform any IO with a short explanation why. All allocation requests
- * will inherit GFP_NOIO implicitly.
- *
- * GFP_NOFS will use direct reclaim but will not use any filesystem interfaces.
- * Please try to avoid using this flag directly and instead use
- * memalloc_nofs_{save,restore} to mark the whole scope which cannot/shouldn't
- * recurse into the FS layer with a short explanation why. All allocation
- * requests will inherit GFP_NOFS implicitly.
- *
- * GFP_USER is for userspace allocations that also need to be directly
- * accessibly by the kernel or hardware. It is typically used by hardware
- * for buffers that are mapped to userspace (e.g. graphics) that hardware
- * still must DMA to. cpuset limits are enforced for these allocations.
- *
- * GFP_DMA exists for historical reasons and should be avoided where possible.
- * The flags indicates that the caller requires that the lowest zone be
- * used (ZONE_DMA or 16M on x86-64). Ideally, this would be removed but
- * it would require careful auditing as some users really require it and
- * others use the flag to avoid lowmem reserves in ZONE_DMA and treat the
- * lowest zone as a type of emergency reserve.
- *
- * GFP_DMA32 is similar to GFP_DMA except that the caller requires a 32-bit
- * address.
- *
- * GFP_HIGHUSER is for userspace allocations that may be mapped to userspace,
- * do not need to be directly accessible by the kernel but that cannot
- * move once in use. An example may be a hardware allocation that maps
- * data directly into userspace but has no addressing limitations.
- *
- * GFP_HIGHUSER_MOVABLE is for userspace allocations that the kernel does not
- * need direct access to but can use kmap() when access is required. They
- * are expected to be movable via page reclaim or page migration. Typically,
- * pages on the LRU would also be allocated with GFP_HIGHUSER_MOVABLE.
- *
- * GFP_TRANSHUGE and GFP_TRANSHUGE_LIGHT are used for THP allocations. They are
- * compound allocations that will generally fail quickly if memory is not
- * available and will not wake kswapd/kcompactd on failure. The _LIGHT
- * version does not attempt reclaim/compaction at all and is by default used
- * in page fault path, while the non-light is used by khugepaged.
- */
-#define GFP_ATOMIC (__GFP_HIGH|__GFP_ATOMIC|__GFP_KSWAPD_RECLAIM)
-#define GFP_KERNEL (__GFP_RECLAIM | __GFP_IO | __GFP_FS)
-#define GFP_KERNEL_ACCOUNT (GFP_KERNEL | __GFP_ACCOUNT)
-#define GFP_NOWAIT (__GFP_KSWAPD_RECLAIM)
-#define GFP_NOIO (__GFP_RECLAIM)
-#define GFP_NOFS (__GFP_RECLAIM | __GFP_IO)
-#define GFP_TEMPORARY (__GFP_RECLAIM | __GFP_IO | __GFP_FS | \
- __GFP_RECLAIMABLE)
-#define GFP_USER (__GFP_RECLAIM | __GFP_IO | __GFP_FS | __GFP_HARDWALL)
-#define GFP_DMA __GFP_DMA
-#define GFP_DMA32 __GFP_DMA32
-#define GFP_HIGHUSER (GFP_USER | __GFP_HIGHMEM)
-#define GFP_HIGHUSER_MOVABLE (GFP_HIGHUSER | __GFP_MOVABLE)
-#define GFP_TRANSHUGE_LIGHT ((GFP_HIGHUSER_MOVABLE | __GFP_COMP | \
- __GFP_NOMEMALLOC | __GFP_NOWARN) & ~__GFP_RECLAIM)
-#define GFP_TRANSHUGE (GFP_TRANSHUGE_LIGHT | __GFP_DIRECT_RECLAIM)
-
/* Convert GFP flags to their corresponding migrate type */
#define GFP_MOVABLE_MASK (__GFP_RECLAIMABLE|__GFP_MOVABLE)
#define GFP_MOVABLE_SHIFT 3
-static inline int gfpflags_to_migratetype(const gfp_t gfp_flags)
+static inline int gfp_migratetype(const gfp_t gfp_flags)
{
VM_WARN_ON((gfp_flags & GFP_MOVABLE_MASK) == GFP_MOVABLE_MASK);
BUILD_BUG_ON((1UL << GFP_MOVABLE_SHIFT) != ___GFP_MOVABLE);
BUILD_BUG_ON((___GFP_MOVABLE >> GFP_MOVABLE_SHIFT) != MIGRATE_MOVABLE);
+ BUILD_BUG_ON((___GFP_RECLAIMABLE >> GFP_MOVABLE_SHIFT) != MIGRATE_RECLAIMABLE);
+ BUILD_BUG_ON(((___GFP_MOVABLE | ___GFP_RECLAIMABLE) >>
+ GFP_MOVABLE_SHIFT) != MIGRATE_HIGHATOMIC);
if (unlikely(page_group_by_mobility_disabled))
return MIGRATE_UNMOVABLE;
/* Group based on mobility */
- return (gfp_flags & GFP_MOVABLE_MASK) >> GFP_MOVABLE_SHIFT;
+ return (__force unsigned long)(gfp_flags & GFP_MOVABLE_MASK) >> GFP_MOVABLE_SHIFT;
}
#undef GFP_MOVABLE_MASK
#undef GFP_MOVABLE_SHIFT
@@ -358,7 +71,7 @@ static inline bool gfpflags_allow_blocking(const gfp_t gfp_flags)
* 0x1 => DMA or NORMAL
* 0x2 => HIGHMEM or NORMAL
* 0x3 => BAD (DMA+HIGHMEM)
- * 0x4 => DMA32 or DMA or NORMAL
+ * 0x4 => DMA32 or NORMAL
* 0x5 => BAD (DMA+DMA32)
* 0x6 => BAD (HIGHMEM+DMA32)
* 0x7 => BAD (HIGHMEM+DMA32+DMA)
@@ -366,7 +79,7 @@ static inline bool gfpflags_allow_blocking(const gfp_t gfp_flags)
* 0x9 => DMA or NORMAL (MOVABLE+DMA)
* 0xa => MOVABLE (Movable is valid only if HIGHMEM is set too)
* 0xb => BAD (MOVABLE+HIGHMEM+DMA)
- * 0xc => DMA32 (MOVABLE+DMA32)
+ * 0xc => DMA32 or NORMAL (MOVABLE+DMA32)
* 0xd => BAD (MOVABLE+DMA32+DMA)
* 0xe => BAD (MOVABLE+DMA32+HIGHMEM)
* 0xf => BAD (MOVABLE+DMA32+HIGHMEM+DMA)
@@ -442,12 +155,12 @@ static inline int gfp_zonelist(gfp_t flags)
/*
* We get the zone list from the current node and the gfp_mask.
- * This zone list contains a maximum of MAXNODES*MAX_NR_ZONES zones.
+ * This zone list contains a maximum of MAX_NUMNODES*MAX_NR_ZONES zones.
* There are two zonelists per node, one for all zones with memory and
* one containing just zones from the node the zonelist belongs to.
*
- * For the normal case of non-DISCONTIGMEM systems the NODE_DATA() gets
- * optimized to &contig_page_data at compile-time.
+ * For the case of non-NUMA systems the NODE_DATA() gets optimized to
+ * &contig_page_data at compile-time.
*/
static inline struct zonelist *node_zonelist(int nid, gfp_t flags)
{
@@ -461,14 +174,54 @@ static inline void arch_free_page(struct page *page, int order) { }
static inline void arch_alloc_page(struct page *page, int order) { }
#endif
-struct page *
-__alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, int preferred_nid,
- nodemask_t *nodemask);
+struct page *__alloc_pages(gfp_t gfp, unsigned int order, int preferred_nid,
+ nodemask_t *nodemask);
+struct folio *__folio_alloc(gfp_t gfp, unsigned int order, int preferred_nid,
+ nodemask_t *nodemask);
-static inline struct page *
-__alloc_pages(gfp_t gfp_mask, unsigned int order, int preferred_nid)
+unsigned long __alloc_pages_bulk(gfp_t gfp, int preferred_nid,
+ nodemask_t *nodemask, int nr_pages,
+ struct list_head *page_list,
+ struct page **page_array);
+
+unsigned long alloc_pages_bulk_array_mempolicy(gfp_t gfp,
+ unsigned long nr_pages,
+ struct page **page_array);
+
+/* Bulk allocate order-0 pages */
+static inline unsigned long
+alloc_pages_bulk_list(gfp_t gfp, unsigned long nr_pages, struct list_head *list)
+{
+ return __alloc_pages_bulk(gfp, numa_mem_id(), NULL, nr_pages, list, NULL);
+}
+
+static inline unsigned long
+alloc_pages_bulk_array(gfp_t gfp, unsigned long nr_pages, struct page **page_array)
+{
+ return __alloc_pages_bulk(gfp, numa_mem_id(), NULL, nr_pages, NULL, page_array);
+}
+
+static inline unsigned long
+alloc_pages_bulk_array_node(gfp_t gfp, int nid, unsigned long nr_pages, struct page **page_array)
+{
+ if (nid == NUMA_NO_NODE)
+ nid = numa_mem_id();
+
+ return __alloc_pages_bulk(gfp, nid, NULL, nr_pages, NULL, page_array);
+}
+
+static inline void warn_if_node_offline(int this_node, gfp_t gfp_mask)
{
- return __alloc_pages_nodemask(gfp_mask, order, preferred_nid, NULL);
+ gfp_t warn_gfp = gfp_mask & (__GFP_THISNODE|__GFP_NOWARN);
+
+ if (warn_gfp != (__GFP_THISNODE|__GFP_NOWARN))
+ return;
+
+ if (node_online(this_node))
+ return;
+
+ pr_warn("%pGg allocation from offline node %d\n", &gfp_mask, this_node);
+ dump_stack();
}
/*
@@ -479,9 +232,18 @@ static inline struct page *
__alloc_pages_node(int nid, gfp_t gfp_mask, unsigned int order)
{
VM_BUG_ON(nid < 0 || nid >= MAX_NUMNODES);
- VM_WARN_ON(!node_online(nid));
+ warn_if_node_offline(nid, gfp_mask);
- return __alloc_pages(gfp_mask, order, nid);
+ return __alloc_pages(gfp_mask, order, nid, NULL);
+}
+
+static inline
+struct folio *__folio_alloc_node(gfp_t gfp, unsigned int order, int nid)
+{
+ VM_BUG_ON(nid < 0 || nid >= MAX_NUMNODES);
+ warn_if_node_offline(nid, gfp);
+
+ return __folio_alloc(gfp, order, nid, NULL);
}
/*
@@ -499,38 +261,37 @@ static inline struct page *alloc_pages_node(int nid, gfp_t gfp_mask,
}
#ifdef CONFIG_NUMA
-extern struct page *alloc_pages_current(gfp_t gfp_mask, unsigned order);
-
-static inline struct page *
-alloc_pages(gfp_t gfp_mask, unsigned int order)
+struct page *alloc_pages(gfp_t gfp, unsigned int order);
+struct folio *folio_alloc(gfp_t gfp, unsigned order);
+struct folio *vma_alloc_folio(gfp_t gfp, int order, struct vm_area_struct *vma,
+ unsigned long addr, bool hugepage);
+#else
+static inline struct page *alloc_pages(gfp_t gfp_mask, unsigned int order)
{
- return alloc_pages_current(gfp_mask, order);
+ return alloc_pages_node(numa_node_id(), gfp_mask, order);
}
-extern struct page *alloc_pages_vma(gfp_t gfp_mask, int order,
- struct vm_area_struct *vma, unsigned long addr,
- int node, bool hugepage);
-#define alloc_hugepage_vma(gfp_mask, vma, addr, order) \
- alloc_pages_vma(gfp_mask, order, vma, addr, numa_node_id(), true)
-#else
-#define alloc_pages(gfp_mask, order) \
- alloc_pages_node(numa_node_id(), gfp_mask, order)
-#define alloc_pages_vma(gfp_mask, order, vma, addr, node, false)\
- alloc_pages(gfp_mask, order)
-#define alloc_hugepage_vma(gfp_mask, vma, addr, order) \
- alloc_pages(gfp_mask, order)
+static inline struct folio *folio_alloc(gfp_t gfp, unsigned int order)
+{
+ return __folio_alloc_node(gfp, order, numa_node_id());
+}
+#define vma_alloc_folio(gfp, order, vma, addr, hugepage) \
+ folio_alloc(gfp, order)
#endif
#define alloc_page(gfp_mask) alloc_pages(gfp_mask, 0)
-#define alloc_page_vma(gfp_mask, vma, addr) \
- alloc_pages_vma(gfp_mask, 0, vma, addr, numa_node_id(), false)
-#define alloc_page_vma_node(gfp_mask, vma, addr, node) \
- alloc_pages_vma(gfp_mask, 0, vma, addr, node, false)
+static inline struct page *alloc_page_vma(gfp_t gfp,
+ struct vm_area_struct *vma, unsigned long addr)
+{
+ struct folio *folio = vma_alloc_folio(gfp, 0, vma, addr, false);
+
+ return &folio->page;
+}
extern unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order);
extern unsigned long get_zeroed_page(gfp_t gfp_mask);
-void *alloc_pages_exact(size_t size, gfp_t gfp_mask);
+void *alloc_pages_exact(size_t size, gfp_t gfp_mask) __alloc_size(1);
void free_pages_exact(void *virt, size_t size);
-void * __meminit alloc_pages_exact_nid(int nid, size_t size, gfp_t gfp_mask);
+__meminit void *alloc_pages_exact_nid(int nid, size_t size, gfp_t gfp_mask) __alloc_size(2);
#define __get_free_page(gfp_mask) \
__get_free_pages((gfp_mask), 0)
@@ -540,19 +301,25 @@ void * __meminit alloc_pages_exact_nid(int nid, size_t size, gfp_t gfp_mask);
extern void __free_pages(struct page *page, unsigned int order);
extern void free_pages(unsigned long addr, unsigned int order);
-extern void free_hot_cold_page(struct page *page, bool cold);
-extern void free_hot_cold_page_list(struct list_head *list, bool cold);
struct page_frag_cache;
extern void __page_frag_cache_drain(struct page *page, unsigned int count);
-extern void *page_frag_alloc(struct page_frag_cache *nc,
- unsigned int fragsz, gfp_t gfp_mask);
+extern void *page_frag_alloc_align(struct page_frag_cache *nc,
+ unsigned int fragsz, gfp_t gfp_mask,
+ unsigned int align_mask);
+
+static inline void *page_frag_alloc(struct page_frag_cache *nc,
+ unsigned int fragsz, gfp_t gfp_mask)
+{
+ return page_frag_alloc_align(nc, fragsz, gfp_mask, ~0u);
+}
+
extern void page_frag_free(void *addr);
#define __free_page(page) __free_pages((page), 0)
#define free_page(addr) free_pages((addr), 0)
-void page_alloc_init(void);
+void page_alloc_init_cpuhp(void);
void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp);
void drain_all_pages(struct zone *zone);
void drain_local_pages(struct zone *zone);
@@ -574,6 +341,8 @@ bool gfp_pfmemalloc_allowed(gfp_t gfp_mask);
extern void pm_restrict_gfp_mask(void);
extern void pm_restore_gfp_mask(void);
+extern gfp_t vma_thp_gfp_mask(struct vm_area_struct *vma);
+
#ifdef CONFIG_PM_SLEEP
extern bool pm_suspended_storage(void);
#else
@@ -583,16 +352,13 @@ static inline bool pm_suspended_storage(void)
}
#endif /* CONFIG_PM_SLEEP */
-#if (defined(CONFIG_MEMORY_ISOLATION) && defined(CONFIG_COMPACTION)) || defined(CONFIG_CMA)
+#ifdef CONFIG_CONTIG_ALLOC
/* The below functions must be run on a range from a single zone. */
extern int alloc_contig_range(unsigned long start, unsigned long end,
unsigned migratetype, gfp_t gfp_mask);
-extern void free_contig_range(unsigned long pfn, unsigned nr_pages);
-#endif
-
-#ifdef CONFIG_CMA
-/* CMA stuff */
-extern void init_cma_reserved_pageblock(struct page *page);
+extern struct page *alloc_contig_pages(unsigned long nr_pages, gfp_t gfp_mask,
+ int nid, nodemask_t *nodemask);
#endif
+void free_contig_range(unsigned long pfn, unsigned long nr_pages);
#endif /* __LINUX_GFP_H */
diff --git a/include/linux/gfp_api.h b/include/linux/gfp_api.h
new file mode 100644
index 000000000000..5a05a2764a86
--- /dev/null
+++ b/include/linux/gfp_api.h
@@ -0,0 +1 @@
+#include <linux/gfp.h>
diff --git a/include/linux/gfp_types.h b/include/linux/gfp_types.h
new file mode 100644
index 000000000000..6583a58670c5
--- /dev/null
+++ b/include/linux/gfp_types.h
@@ -0,0 +1,340 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __LINUX_GFP_TYPES_H
+#define __LINUX_GFP_TYPES_H
+
+/* The typedef is in types.h but we want the documentation here */
+#if 0
+/**
+ * typedef gfp_t - Memory allocation flags.
+ *
+ * GFP flags are commonly used throughout Linux to indicate how memory
+ * should be allocated. The GFP acronym stands for get_free_pages(),
+ * the underlying memory allocation function. Not every GFP flag is
+ * supported by every function which may allocate memory. Most users
+ * will want to use a plain ``GFP_KERNEL``.
+ */
+typedef unsigned int __bitwise gfp_t;
+#endif
+
+/*
+ * In case of changes, please don't forget to update
+ * include/trace/events/mmflags.h and tools/perf/builtin-kmem.c
+ */
+
+/* Plain integer GFP bitmasks. Do not use this directly. */
+#define ___GFP_DMA 0x01u
+#define ___GFP_HIGHMEM 0x02u
+#define ___GFP_DMA32 0x04u
+#define ___GFP_MOVABLE 0x08u
+#define ___GFP_RECLAIMABLE 0x10u
+#define ___GFP_HIGH 0x20u
+#define ___GFP_IO 0x40u
+#define ___GFP_FS 0x80u
+#define ___GFP_ZERO 0x100u
+/* 0x200u unused */
+#define ___GFP_DIRECT_RECLAIM 0x400u
+#define ___GFP_KSWAPD_RECLAIM 0x800u
+#define ___GFP_WRITE 0x1000u
+#define ___GFP_NOWARN 0x2000u
+#define ___GFP_RETRY_MAYFAIL 0x4000u
+#define ___GFP_NOFAIL 0x8000u
+#define ___GFP_NORETRY 0x10000u
+#define ___GFP_MEMALLOC 0x20000u
+#define ___GFP_COMP 0x40000u
+#define ___GFP_NOMEMALLOC 0x80000u
+#define ___GFP_HARDWALL 0x100000u
+#define ___GFP_THISNODE 0x200000u
+#define ___GFP_ACCOUNT 0x400000u
+#define ___GFP_ZEROTAGS 0x800000u
+#ifdef CONFIG_KASAN_HW_TAGS
+#define ___GFP_SKIP_ZERO 0x1000000u
+#define ___GFP_SKIP_KASAN 0x2000000u
+#else
+#define ___GFP_SKIP_ZERO 0
+#define ___GFP_SKIP_KASAN 0
+#endif
+#ifdef CONFIG_LOCKDEP
+#define ___GFP_NOLOCKDEP 0x4000000u
+#else
+#define ___GFP_NOLOCKDEP 0
+#endif
+/* If the above are modified, __GFP_BITS_SHIFT may need updating */
+
+/*
+ * Physical address zone modifiers (see linux/mmzone.h - low four bits)
+ *
+ * Do not put any conditional on these. If necessary modify the definitions
+ * without the underscores and use them consistently. The definitions here may
+ * be used in bit comparisons.
+ */
+#define __GFP_DMA ((__force gfp_t)___GFP_DMA)
+#define __GFP_HIGHMEM ((__force gfp_t)___GFP_HIGHMEM)
+#define __GFP_DMA32 ((__force gfp_t)___GFP_DMA32)
+#define __GFP_MOVABLE ((__force gfp_t)___GFP_MOVABLE) /* ZONE_MOVABLE allowed */
+#define GFP_ZONEMASK (__GFP_DMA|__GFP_HIGHMEM|__GFP_DMA32|__GFP_MOVABLE)
+
+/**
+ * DOC: Page mobility and placement hints
+ *
+ * Page mobility and placement hints
+ * ---------------------------------
+ *
+ * These flags provide hints about how mobile the page is. Pages with similar
+ * mobility are placed within the same pageblocks to minimise problems due
+ * to external fragmentation.
+ *
+ * %__GFP_MOVABLE (also a zone modifier) indicates that the page can be
+ * moved by page migration during memory compaction or can be reclaimed.
+ *
+ * %__GFP_RECLAIMABLE is used for slab allocations that specify
+ * SLAB_RECLAIM_ACCOUNT and whose pages can be freed via shrinkers.
+ *
+ * %__GFP_WRITE indicates the caller intends to dirty the page. Where possible,
+ * these pages will be spread between local zones to avoid all the dirty
+ * pages being in one zone (fair zone allocation policy).
+ *
+ * %__GFP_HARDWALL enforces the cpuset memory allocation policy.
+ *
+ * %__GFP_THISNODE forces the allocation to be satisfied from the requested
+ * node with no fallbacks or placement policy enforcements.
+ *
+ * %__GFP_ACCOUNT causes the allocation to be accounted to kmemcg.
+ */
+#define __GFP_RECLAIMABLE ((__force gfp_t)___GFP_RECLAIMABLE)
+#define __GFP_WRITE ((__force gfp_t)___GFP_WRITE)
+#define __GFP_HARDWALL ((__force gfp_t)___GFP_HARDWALL)
+#define __GFP_THISNODE ((__force gfp_t)___GFP_THISNODE)
+#define __GFP_ACCOUNT ((__force gfp_t)___GFP_ACCOUNT)
+
+/**
+ * DOC: Watermark modifiers
+ *
+ * Watermark modifiers -- controls access to emergency reserves
+ * ------------------------------------------------------------
+ *
+ * %__GFP_HIGH indicates that the caller is high-priority and that granting
+ * the request is necessary before the system can make forward progress.
+ * For example creating an IO context to clean pages and requests
+ * from atomic context.
+ *
+ * %__GFP_MEMALLOC allows access to all memory. This should only be used when
+ * the caller guarantees the allocation will allow more memory to be freed
+ * very shortly e.g. process exiting or swapping. Users either should
+ * be the MM or co-ordinating closely with the VM (e.g. swap over NFS).
+ * Users of this flag have to be extremely careful to not deplete the reserve
+ * completely and implement a throttling mechanism which controls the
+ * consumption of the reserve based on the amount of freed memory.
+ * Usage of a pre-allocated pool (e.g. mempool) should be always considered
+ * before using this flag.
+ *
+ * %__GFP_NOMEMALLOC is used to explicitly forbid access to emergency reserves.
+ * This takes precedence over the %__GFP_MEMALLOC flag if both are set.
+ */
+#define __GFP_HIGH ((__force gfp_t)___GFP_HIGH)
+#define __GFP_MEMALLOC ((__force gfp_t)___GFP_MEMALLOC)
+#define __GFP_NOMEMALLOC ((__force gfp_t)___GFP_NOMEMALLOC)
+
+/**
+ * DOC: Reclaim modifiers
+ *
+ * Reclaim modifiers
+ * -----------------
+ * Please note that all the following flags are only applicable to sleepable
+ * allocations (e.g. %GFP_NOWAIT and %GFP_ATOMIC will ignore them).
+ *
+ * %__GFP_IO can start physical IO.
+ *
+ * %__GFP_FS can call down to the low-level FS. Clearing the flag avoids the
+ * allocator recursing into the filesystem which might already be holding
+ * locks.
+ *
+ * %__GFP_DIRECT_RECLAIM indicates that the caller may enter direct reclaim.
+ * This flag can be cleared to avoid unnecessary delays when a fallback
+ * option is available.
+ *
+ * %__GFP_KSWAPD_RECLAIM indicates that the caller wants to wake kswapd when
+ * the low watermark is reached and have it reclaim pages until the high
+ * watermark is reached. A caller may wish to clear this flag when fallback
+ * options are available and the reclaim is likely to disrupt the system. The
+ * canonical example is THP allocation where a fallback is cheap but
+ * reclaim/compaction may cause indirect stalls.
+ *
+ * %__GFP_RECLAIM is shorthand to allow/forbid both direct and kswapd reclaim.
+ *
+ * The default allocator behavior depends on the request size. We have a concept
+ * of so called costly allocations (with order > %PAGE_ALLOC_COSTLY_ORDER).
+ * !costly allocations are too essential to fail so they are implicitly
+ * non-failing by default (with some exceptions like OOM victims might fail so
+ * the caller still has to check for failures) while costly requests try to be
+ * not disruptive and back off even without invoking the OOM killer.
+ * The following three modifiers might be used to override some of these
+ * implicit rules
+ *
+ * %__GFP_NORETRY: The VM implementation will try only very lightweight
+ * memory direct reclaim to get some memory under memory pressure (thus
+ * it can sleep). It will avoid disruptive actions like OOM killer. The
+ * caller must handle the failure which is quite likely to happen under
+ * heavy memory pressure. The flag is suitable when failure can easily be
+ * handled at small cost, such as reduced throughput
+ *
+ * %__GFP_RETRY_MAYFAIL: The VM implementation will retry memory reclaim
+ * procedures that have previously failed if there is some indication
+ * that progress has been made else where. It can wait for other
+ * tasks to attempt high level approaches to freeing memory such as
+ * compaction (which removes fragmentation) and page-out.
+ * There is still a definite limit to the number of retries, but it is
+ * a larger limit than with %__GFP_NORETRY.
+ * Allocations with this flag may fail, but only when there is
+ * genuinely little unused memory. While these allocations do not
+ * directly trigger the OOM killer, their failure indicates that
+ * the system is likely to need to use the OOM killer soon. The
+ * caller must handle failure, but can reasonably do so by failing
+ * a higher-level request, or completing it only in a much less
+ * efficient manner.
+ * If the allocation does fail, and the caller is in a position to
+ * free some non-essential memory, doing so could benefit the system
+ * as a whole.
+ *
+ * %__GFP_NOFAIL: The VM implementation _must_ retry infinitely: the caller
+ * cannot handle allocation failures. The allocation could block
+ * indefinitely but will never return with failure. Testing for
+ * failure is pointless.
+ * New users should be evaluated carefully (and the flag should be
+ * used only when there is no reasonable failure policy) but it is
+ * definitely preferable to use the flag rather than opencode endless
+ * loop around allocator.
+ * Using this flag for costly allocations is _highly_ discouraged.
+ */
+#define __GFP_IO ((__force gfp_t)___GFP_IO)
+#define __GFP_FS ((__force gfp_t)___GFP_FS)
+#define __GFP_DIRECT_RECLAIM ((__force gfp_t)___GFP_DIRECT_RECLAIM) /* Caller can reclaim */
+#define __GFP_KSWAPD_RECLAIM ((__force gfp_t)___GFP_KSWAPD_RECLAIM) /* kswapd can wake */
+#define __GFP_RECLAIM ((__force gfp_t)(___GFP_DIRECT_RECLAIM|___GFP_KSWAPD_RECLAIM))
+#define __GFP_RETRY_MAYFAIL ((__force gfp_t)___GFP_RETRY_MAYFAIL)
+#define __GFP_NOFAIL ((__force gfp_t)___GFP_NOFAIL)
+#define __GFP_NORETRY ((__force gfp_t)___GFP_NORETRY)
+
+/**
+ * DOC: Action modifiers
+ *
+ * Action modifiers
+ * ----------------
+ *
+ * %__GFP_NOWARN suppresses allocation failure reports.
+ *
+ * %__GFP_COMP address compound page metadata.
+ *
+ * %__GFP_ZERO returns a zeroed page on success.
+ *
+ * %__GFP_ZEROTAGS zeroes memory tags at allocation time if the memory itself
+ * is being zeroed (either via __GFP_ZERO or via init_on_alloc, provided that
+ * __GFP_SKIP_ZERO is not set). This flag is intended for optimization: setting
+ * memory tags at the same time as zeroing memory has minimal additional
+ * performace impact.
+ *
+ * %__GFP_SKIP_KASAN makes KASAN skip unpoisoning on page allocation.
+ * Used for userspace and vmalloc pages; the latter are unpoisoned by
+ * kasan_unpoison_vmalloc instead. For userspace pages, results in
+ * poisoning being skipped as well, see should_skip_kasan_poison for
+ * details. Only effective in HW_TAGS mode.
+ */
+#define __GFP_NOWARN ((__force gfp_t)___GFP_NOWARN)
+#define __GFP_COMP ((__force gfp_t)___GFP_COMP)
+#define __GFP_ZERO ((__force gfp_t)___GFP_ZERO)
+#define __GFP_ZEROTAGS ((__force gfp_t)___GFP_ZEROTAGS)
+#define __GFP_SKIP_ZERO ((__force gfp_t)___GFP_SKIP_ZERO)
+#define __GFP_SKIP_KASAN ((__force gfp_t)___GFP_SKIP_KASAN)
+
+/* Disable lockdep for GFP context tracking */
+#define __GFP_NOLOCKDEP ((__force gfp_t)___GFP_NOLOCKDEP)
+
+/* Room for N __GFP_FOO bits */
+#define __GFP_BITS_SHIFT (26 + IS_ENABLED(CONFIG_LOCKDEP))
+#define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1))
+
+/**
+ * DOC: Useful GFP flag combinations
+ *
+ * Useful GFP flag combinations
+ * ----------------------------
+ *
+ * Useful GFP flag combinations that are commonly used. It is recommended
+ * that subsystems start with one of these combinations and then set/clear
+ * %__GFP_FOO flags as necessary.
+ *
+ * %GFP_ATOMIC users can not sleep and need the allocation to succeed. A lower
+ * watermark is applied to allow access to "atomic reserves".
+ * The current implementation doesn't support NMI and few other strict
+ * non-preemptive contexts (e.g. raw_spin_lock). The same applies to %GFP_NOWAIT.
+ *
+ * %GFP_KERNEL is typical for kernel-internal allocations. The caller requires
+ * %ZONE_NORMAL or a lower zone for direct access but can direct reclaim.
+ *
+ * %GFP_KERNEL_ACCOUNT is the same as GFP_KERNEL, except the allocation is
+ * accounted to kmemcg.
+ *
+ * %GFP_NOWAIT is for kernel allocations that should not stall for direct
+ * reclaim, start physical IO or use any filesystem callback.
+ *
+ * %GFP_NOIO will use direct reclaim to discard clean pages or slab pages
+ * that do not require the starting of any physical IO.
+ * Please try to avoid using this flag directly and instead use
+ * memalloc_noio_{save,restore} to mark the whole scope which cannot
+ * perform any IO with a short explanation why. All allocation requests
+ * will inherit GFP_NOIO implicitly.
+ *
+ * %GFP_NOFS will use direct reclaim but will not use any filesystem interfaces.
+ * Please try to avoid using this flag directly and instead use
+ * memalloc_nofs_{save,restore} to mark the whole scope which cannot/shouldn't
+ * recurse into the FS layer with a short explanation why. All allocation
+ * requests will inherit GFP_NOFS implicitly.
+ *
+ * %GFP_USER is for userspace allocations that also need to be directly
+ * accessibly by the kernel or hardware. It is typically used by hardware
+ * for buffers that are mapped to userspace (e.g. graphics) that hardware
+ * still must DMA to. cpuset limits are enforced for these allocations.
+ *
+ * %GFP_DMA exists for historical reasons and should be avoided where possible.
+ * The flags indicates that the caller requires that the lowest zone be
+ * used (%ZONE_DMA or 16M on x86-64). Ideally, this would be removed but
+ * it would require careful auditing as some users really require it and
+ * others use the flag to avoid lowmem reserves in %ZONE_DMA and treat the
+ * lowest zone as a type of emergency reserve.
+ *
+ * %GFP_DMA32 is similar to %GFP_DMA except that the caller requires a 32-bit
+ * address. Note that kmalloc(..., GFP_DMA32) does not return DMA32 memory
+ * because the DMA32 kmalloc cache array is not implemented.
+ * (Reason: there is no such user in kernel).
+ *
+ * %GFP_HIGHUSER is for userspace allocations that may be mapped to userspace,
+ * do not need to be directly accessible by the kernel but that cannot
+ * move once in use. An example may be a hardware allocation that maps
+ * data directly into userspace but has no addressing limitations.
+ *
+ * %GFP_HIGHUSER_MOVABLE is for userspace allocations that the kernel does not
+ * need direct access to but can use kmap() when access is required. They
+ * are expected to be movable via page reclaim or page migration. Typically,
+ * pages on the LRU would also be allocated with %GFP_HIGHUSER_MOVABLE.
+ *
+ * %GFP_TRANSHUGE and %GFP_TRANSHUGE_LIGHT are used for THP allocations. They
+ * are compound allocations that will generally fail quickly if memory is not
+ * available and will not wake kswapd/kcompactd on failure. The _LIGHT
+ * version does not attempt reclaim/compaction at all and is by default used
+ * in page fault path, while the non-light is used by khugepaged.
+ */
+#define GFP_ATOMIC (__GFP_HIGH|__GFP_KSWAPD_RECLAIM)
+#define GFP_KERNEL (__GFP_RECLAIM | __GFP_IO | __GFP_FS)
+#define GFP_KERNEL_ACCOUNT (GFP_KERNEL | __GFP_ACCOUNT)
+#define GFP_NOWAIT (__GFP_KSWAPD_RECLAIM)
+#define GFP_NOIO (__GFP_RECLAIM)
+#define GFP_NOFS (__GFP_RECLAIM | __GFP_IO)
+#define GFP_USER (__GFP_RECLAIM | __GFP_IO | __GFP_FS | __GFP_HARDWALL)
+#define GFP_DMA __GFP_DMA
+#define GFP_DMA32 __GFP_DMA32
+#define GFP_HIGHUSER (GFP_USER | __GFP_HIGHMEM)
+#define GFP_HIGHUSER_MOVABLE (GFP_HIGHUSER | __GFP_MOVABLE | __GFP_SKIP_KASAN)
+#define GFP_TRANSHUGE_LIGHT ((GFP_HIGHUSER_MOVABLE | __GFP_COMP | \
+ __GFP_NOMEMALLOC | __GFP_NOWARN) & ~__GFP_RECLAIM)
+#define GFP_TRANSHUGE (GFP_TRANSHUGE_LIGHT | __GFP_DIRECT_RECLAIM)
+
+#endif /* __LINUX_GFP_TYPES_H */
diff --git a/include/linux/glob.h b/include/linux/glob.h
index 861d8347d08e..861327b33e41 100644
--- a/include/linux/glob.h
+++ b/include/linux/glob.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_GLOB_H
#define _LINUX_GLOB_H
diff --git a/include/linux/gnss.h b/include/linux/gnss.h
new file mode 100644
index 000000000000..36968a0f33e8
--- /dev/null
+++ b/include/linux/gnss.h
@@ -0,0 +1,76 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * GNSS receiver support
+ *
+ * Copyright (C) 2018 Johan Hovold <johan@kernel.org>
+ */
+
+#ifndef _LINUX_GNSS_H
+#define _LINUX_GNSS_H
+
+#include <linux/cdev.h>
+#include <linux/device.h>
+#include <linux/kfifo.h>
+#include <linux/mutex.h>
+#include <linux/rwsem.h>
+#include <linux/types.h>
+#include <linux/wait.h>
+
+struct gnss_device;
+
+enum gnss_type {
+ GNSS_TYPE_NMEA = 0,
+ GNSS_TYPE_SIRF,
+ GNSS_TYPE_UBX,
+ GNSS_TYPE_MTK,
+
+ GNSS_TYPE_COUNT
+};
+
+struct gnss_operations {
+ int (*open)(struct gnss_device *gdev);
+ void (*close)(struct gnss_device *gdev);
+ int (*write_raw)(struct gnss_device *gdev, const unsigned char *buf,
+ size_t count);
+};
+
+struct gnss_device {
+ struct device dev;
+ struct cdev cdev;
+ int id;
+
+ enum gnss_type type;
+ unsigned long flags;
+
+ struct rw_semaphore rwsem;
+ const struct gnss_operations *ops;
+ unsigned int count;
+ unsigned int disconnected:1;
+
+ struct mutex read_mutex;
+ struct kfifo read_fifo;
+ wait_queue_head_t read_queue;
+
+ struct mutex write_mutex;
+ char *write_buf;
+};
+
+struct gnss_device *gnss_allocate_device(struct device *parent);
+void gnss_put_device(struct gnss_device *gdev);
+int gnss_register_device(struct gnss_device *gdev);
+void gnss_deregister_device(struct gnss_device *gdev);
+
+int gnss_insert_raw(struct gnss_device *gdev, const unsigned char *buf,
+ size_t count);
+
+static inline void gnss_set_drvdata(struct gnss_device *gdev, void *data)
+{
+ dev_set_drvdata(&gdev->dev, data);
+}
+
+static inline void *gnss_get_drvdata(struct gnss_device *gdev)
+{
+ return dev_get_drvdata(&gdev->dev);
+}
+
+#endif /* _LINUX_GNSS_H */
diff --git a/include/linux/goldfish.h b/include/linux/goldfish.h
index 93e080b39cf6..bcc17f95b906 100644
--- a/include/linux/goldfish.h
+++ b/include/linux/goldfish.h
@@ -1,14 +1,28 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __LINUX_GOLDFISH_H
#define __LINUX_GOLDFISH_H
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/io.h>
+
/* Helpers for Goldfish virtual platform */
+#ifndef gf_ioread32
+#define gf_ioread32 ioread32
+#endif
+#ifndef gf_iowrite32
+#define gf_iowrite32 iowrite32
+#endif
+
static inline void gf_write_ptr(const void *ptr, void __iomem *portl,
void __iomem *porth)
{
- writel((u32)(unsigned long)ptr, portl);
+ const unsigned long addr = (unsigned long)ptr;
+
+ gf_iowrite32(lower_32_bits(addr), portl);
#ifdef CONFIG_64BIT
- writel((unsigned long)ptr >> 32, porth);
+ gf_iowrite32(upper_32_bits(addr), porth);
#endif
}
@@ -16,9 +30,9 @@ static inline void gf_write_dma_addr(const dma_addr_t addr,
void __iomem *portl,
void __iomem *porth)
{
- writel((u32)addr, portl);
+ gf_iowrite32(lower_32_bits(addr), portl);
#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
- writel(addr >> 32, porth);
+ gf_iowrite32(upper_32_bits(addr), porth);
#endif
}
diff --git a/include/linux/gpio-fan.h b/include/linux/gpio-fan.h
deleted file mode 100644
index 096659169215..000000000000
--- a/include/linux/gpio-fan.h
+++ /dev/null
@@ -1,36 +0,0 @@
-/*
- * include/linux/gpio-fan.h
- *
- * Platform data structure for GPIO fan driver
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
- */
-
-#ifndef __LINUX_GPIO_FAN_H
-#define __LINUX_GPIO_FAN_H
-
-struct gpio_fan_alarm {
- unsigned gpio;
- unsigned active_low;
-};
-
-struct gpio_fan_speed {
- int rpm;
- int ctrl_val;
-};
-
-struct gpio_fan_platform_data {
- int num_ctrl;
- unsigned *ctrl; /* fan control GPIOs. */
- struct gpio_fan_alarm *alarm; /* fan alarm GPIO. */
- /*
- * Speed conversion array: rpm from/to GPIO bit field.
- * This array _must_ be sorted in ascending rpm order.
- */
- int num_speed;
- struct gpio_fan_speed *speed;
-};
-
-#endif /* __LINUX_GPIO_FAN_H */
diff --git a/include/linux/gpio-pxa.h b/include/linux/gpio-pxa.h
index d90ebbe02ca4..1e1fa0160480 100644
--- a/include/linux/gpio-pxa.h
+++ b/include/linux/gpio-pxa.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __GPIO_PXA_H
#define __GPIO_PXA_H
diff --git a/include/linux/gpio.h b/include/linux/gpio.h
index d12b5d566e4b..8528353e073b 100644
--- a/include/linux/gpio.h
+++ b/include/linux/gpio.h
@@ -1,9 +1,22 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * <linux/gpio.h>
+ *
+ * This is the LEGACY GPIO bulk include file, including legacy APIs. It is
+ * used for GPIO drivers still referencing the global GPIO numberspace,
+ * and should not be included in new code.
+ *
+ * If you're implementing a GPIO driver, only include <linux/gpio/driver.h>
+ * If you're implementing a GPIO consumer, only include <linux/gpio/consumer.h>
+ */
#ifndef __LINUX_GPIO_H
#define __LINUX_GPIO_H
-#include <linux/errno.h>
+#include <linux/types.h>
+
+struct device;
-/* see Documentation/gpio/gpio-legacy.txt */
+/* see Documentation/driver-api/gpio/legacy.rst */
/* make these flag values available regardless of GPIO kconfig options */
#define GPIOF_DIR_OUT (0 << 0)
@@ -44,57 +57,94 @@ struct gpio {
#ifdef CONFIG_GPIOLIB
-#ifdef CONFIG_ARCH_HAVE_CUSTOM_GPIO_H
-#include <asm/gpio.h>
-#else
+#include <linux/gpio/consumer.h>
-#include <asm-generic/gpio.h>
+/*
+ * "valid" GPIO numbers are nonnegative and may be passed to
+ * setup routines like gpio_request(). Only some valid numbers
+ * can successfully be requested and used.
+ *
+ * Invalid GPIO numbers are useful for indicating no-such-GPIO in
+ * platform data and other tables.
+ */
+static inline bool gpio_is_valid(int number)
+{
+ /* only non-negative numbers are valid */
+ return number >= 0;
+}
-static inline int gpio_get_value(unsigned int gpio)
+/*
+ * Platforms may implement their GPIO interface with library code,
+ * at a small performance cost for non-inlined operations and some
+ * extra memory (for code and for per-GPIO table entries).
+ */
+
+/*
+ * At the end we want all GPIOs to be dynamically allocated from 0.
+ * However, some legacy drivers still perform fixed allocation.
+ * Until they are all fixed, leave 0-512 space for them.
+ */
+#define GPIO_DYNAMIC_BASE 512
+
+/* Always use the library code for GPIO management calls,
+ * or when sleeping may be involved.
+ */
+int gpio_request(unsigned gpio, const char *label);
+void gpio_free(unsigned gpio);
+
+static inline int gpio_direction_input(unsigned gpio)
+{
+ return gpiod_direction_input(gpio_to_desc(gpio));
+}
+static inline int gpio_direction_output(unsigned gpio, int value)
{
- return __gpio_get_value(gpio);
+ return gpiod_direction_output_raw(gpio_to_desc(gpio), value);
}
-static inline void gpio_set_value(unsigned int gpio, int value)
+static inline int gpio_get_value_cansleep(unsigned gpio)
+{
+ return gpiod_get_raw_value_cansleep(gpio_to_desc(gpio));
+}
+static inline void gpio_set_value_cansleep(unsigned gpio, int value)
{
- __gpio_set_value(gpio, value);
+ return gpiod_set_raw_value_cansleep(gpio_to_desc(gpio), value);
}
-static inline int gpio_cansleep(unsigned int gpio)
+static inline int gpio_get_value(unsigned gpio)
+{
+ return gpiod_get_raw_value(gpio_to_desc(gpio));
+}
+static inline void gpio_set_value(unsigned gpio, int value)
{
- return __gpio_cansleep(gpio);
+ return gpiod_set_raw_value(gpio_to_desc(gpio), value);
}
-static inline int gpio_to_irq(unsigned int gpio)
+static inline int gpio_cansleep(unsigned gpio)
{
- return __gpio_to_irq(gpio);
+ return gpiod_cansleep(gpio_to_desc(gpio));
}
-static inline int irq_to_gpio(unsigned int irq)
+static inline int gpio_to_irq(unsigned gpio)
{
- return -EINVAL;
+ return gpiod_to_irq(gpio_to_desc(gpio));
}
-#endif /* ! CONFIG_ARCH_HAVE_CUSTOM_GPIO_H */
+int gpio_request_one(unsigned gpio, unsigned long flags, const char *label);
+int gpio_request_array(const struct gpio *array, size_t num);
+void gpio_free_array(const struct gpio *array, size_t num);
/* CONFIG_GPIOLIB: bindings for managed devices that want to request gpios */
-struct device;
-
int devm_gpio_request(struct device *dev, unsigned gpio, const char *label);
int devm_gpio_request_one(struct device *dev, unsigned gpio,
unsigned long flags, const char *label);
-void devm_gpio_free(struct device *dev, unsigned int gpio);
#else /* ! CONFIG_GPIOLIB */
#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/bug.h>
-#include <linux/pinctrl/pinctrl.h>
-struct device;
-struct gpio_chip;
+#include <asm/bug.h>
+#include <asm/errno.h>
static inline bool gpio_is_valid(int number)
{
@@ -143,11 +193,6 @@ static inline int gpio_direction_output(unsigned gpio, int value)
return -ENOSYS;
}
-static inline int gpio_set_debounce(unsigned gpio, unsigned debounce)
-{
- return -ENOSYS;
-}
-
static inline int gpio_get_value(unsigned gpio)
{
/* GPIO can never have been requested or set as {in,out}put */
@@ -181,27 +226,6 @@ static inline void gpio_set_value_cansleep(unsigned gpio, int value)
WARN_ON(1);
}
-static inline int gpio_export(unsigned gpio, bool direction_may_change)
-{
- /* GPIO can never have been requested or set as {in,out}put */
- WARN_ON(1);
- return -EINVAL;
-}
-
-static inline int gpio_export_link(struct device *dev, const char *name,
- unsigned gpio)
-{
- /* GPIO can never have been exported */
- WARN_ON(1);
- return -EINVAL;
-}
-
-static inline void gpio_unexport(unsigned gpio)
-{
- /* GPIO can never have been exported */
- WARN_ON(1);
-}
-
static inline int gpio_to_irq(unsigned gpio)
{
/* GPIO can never have been requested or set as input */
@@ -209,50 +233,6 @@ static inline int gpio_to_irq(unsigned gpio)
return -EINVAL;
}
-static inline int gpiochip_lock_as_irq(struct gpio_chip *chip,
- unsigned int offset)
-{
- WARN_ON(1);
- return -EINVAL;
-}
-
-static inline void gpiochip_unlock_as_irq(struct gpio_chip *chip,
- unsigned int offset)
-{
- WARN_ON(1);
-}
-
-static inline int irq_to_gpio(unsigned irq)
-{
- /* irq can never have been returned from gpio_to_irq() */
- WARN_ON(1);
- return -EINVAL;
-}
-
-static inline int
-gpiochip_add_pin_range(struct gpio_chip *chip, const char *pinctl_name,
- unsigned int gpio_offset, unsigned int pin_offset,
- unsigned int npins)
-{
- WARN_ON(1);
- return -EINVAL;
-}
-
-static inline int
-gpiochip_add_pingroup_range(struct gpio_chip *chip,
- struct pinctrl_dev *pctldev,
- unsigned int gpio_offset, const char *pin_group)
-{
- WARN_ON(1);
- return -EINVAL;
-}
-
-static inline void
-gpiochip_remove_pin_ranges(struct gpio_chip *chip)
-{
- WARN_ON(1);
-}
-
static inline int devm_gpio_request(struct device *dev, unsigned gpio,
const char *label)
{
@@ -267,11 +247,6 @@ static inline int devm_gpio_request_one(struct device *dev, unsigned gpio,
return -EINVAL;
}
-static inline void devm_gpio_free(struct device *dev, unsigned int gpio)
-{
- WARN_ON(1);
-}
-
#endif /* ! CONFIG_GPIOLIB */
#endif /* __LINUX_GPIO_H */
diff --git a/include/linux/gpio/aspeed.h b/include/linux/gpio/aspeed.h
new file mode 100644
index 000000000000..9a547e66c8c4
--- /dev/null
+++ b/include/linux/gpio/aspeed.h
@@ -0,0 +1,19 @@
+#ifndef __GPIO_ASPEED_H
+#define __GPIO_ASPEED_H
+
+#include <linux/types.h>
+
+struct gpio_desc;
+
+struct aspeed_gpio_copro_ops {
+ int (*request_access)(void *data);
+ int (*release_access)(void *data);
+};
+
+int aspeed_gpio_copro_grab_gpio(struct gpio_desc *desc,
+ u16 *vreg_offset, u16 *dreg_offset, u8 *bit);
+int aspeed_gpio_copro_release_gpio(struct gpio_desc *desc);
+int aspeed_gpio_copro_set_ops(const struct aspeed_gpio_copro_ops *ops, void *data);
+
+
+#endif /* __GPIO_ASPEED_H */
diff --git a/include/linux/gpio/consumer.h b/include/linux/gpio/consumer.h
index 8f702fcbe485..1c4385a00f88 100644
--- a/include/linux/gpio/consumer.h
+++ b/include/linux/gpio/consumer.h
@@ -1,26 +1,27 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __LINUX_GPIO_CONSUMER_H
#define __LINUX_GPIO_CONSUMER_H
-#include <linux/bug.h>
-#include <linux/err.h>
-#include <linux/kernel.h>
+#include <linux/bits.h>
+#include <linux/types.h>
+struct acpi_device;
struct device;
+struct fwnode_handle;
-/**
- * Opaque descriptor for a GPIO. These are obtained using gpiod_get() and are
- * preferable to the old integer-based handles.
- *
- * Contrary to integers, a pointer to a gpio_desc is guaranteed to be valid
- * until the GPIO is released.
- */
+struct gpio_array;
struct gpio_desc;
/**
- * Struct containing an array of descriptors that can be obtained using
- * gpiod_get_array().
+ * struct gpio_descs - Struct containing an array of descriptors that can be
+ * obtained using gpiod_get_array()
+ *
+ * @info: Pointer to the opaque gpio_array structure
+ * @ndescs: Number of held descriptors
+ * @desc: Array of pointers to GPIO descriptors
*/
struct gpio_descs {
+ struct gpio_array *info;
unsigned int ndescs;
struct gpio_desc *desc[];
};
@@ -28,10 +29,20 @@ struct gpio_descs {
#define GPIOD_FLAGS_BIT_DIR_SET BIT(0)
#define GPIOD_FLAGS_BIT_DIR_OUT BIT(1)
#define GPIOD_FLAGS_BIT_DIR_VAL BIT(2)
+#define GPIOD_FLAGS_BIT_OPEN_DRAIN BIT(3)
+#define GPIOD_FLAGS_BIT_NONEXCLUSIVE BIT(4)
/**
- * Optional flags that can be passed to one of gpiod_* to configure direction
- * and output value. These values cannot be OR'd.
+ * enum gpiod_flags - Optional flags that can be passed to one of gpiod_* to
+ * configure direction and output value. These values
+ * cannot be OR'd.
+ *
+ * @GPIOD_ASIS: Don't change anything
+ * @GPIOD_IN: Set lines to input mode
+ * @GPIOD_OUT_LOW: Set lines to output and drive them low
+ * @GPIOD_OUT_HIGH: Set lines to output and drive them high
+ * @GPIOD_OUT_LOW_OPEN_DRAIN: Set lines to open-drain output and drive them low
+ * @GPIOD_OUT_HIGH_OPEN_DRAIN: Set lines to open-drain output and drive them high
*/
enum gpiod_flags {
GPIOD_ASIS = 0,
@@ -39,6 +50,8 @@ enum gpiod_flags {
GPIOD_OUT_LOW = GPIOD_FLAGS_BIT_DIR_SET | GPIOD_FLAGS_BIT_DIR_OUT,
GPIOD_OUT_HIGH = GPIOD_FLAGS_BIT_DIR_SET | GPIOD_FLAGS_BIT_DIR_OUT |
GPIOD_FLAGS_BIT_DIR_VAL,
+ GPIOD_OUT_LOW_OPEN_DRAIN = GPIOD_OUT_LOW | GPIOD_FLAGS_BIT_OPEN_DRAIN,
+ GPIOD_OUT_HIGH_OPEN_DRAIN = GPIOD_OUT_HIGH | GPIOD_FLAGS_BIT_OPEN_DRAIN,
};
#ifdef CONFIG_GPIOLIB
@@ -90,62 +103,92 @@ struct gpio_descs *__must_check
devm_gpiod_get_array_optional(struct device *dev, const char *con_id,
enum gpiod_flags flags);
void devm_gpiod_put(struct device *dev, struct gpio_desc *desc);
+void devm_gpiod_unhinge(struct device *dev, struct gpio_desc *desc);
void devm_gpiod_put_array(struct device *dev, struct gpio_descs *descs);
int gpiod_get_direction(struct gpio_desc *desc);
int gpiod_direction_input(struct gpio_desc *desc);
int gpiod_direction_output(struct gpio_desc *desc, int value);
int gpiod_direction_output_raw(struct gpio_desc *desc, int value);
+int gpiod_enable_hw_timestamp_ns(struct gpio_desc *desc, unsigned long flags);
+int gpiod_disable_hw_timestamp_ns(struct gpio_desc *desc, unsigned long flags);
/* Value get/set from non-sleeping context */
int gpiod_get_value(const struct gpio_desc *desc);
+int gpiod_get_array_value(unsigned int array_size,
+ struct gpio_desc **desc_array,
+ struct gpio_array *array_info,
+ unsigned long *value_bitmap);
void gpiod_set_value(struct gpio_desc *desc, int value);
-void gpiod_set_array_value(unsigned int array_size,
- struct gpio_desc **desc_array, int *value_array);
+int gpiod_set_array_value(unsigned int array_size,
+ struct gpio_desc **desc_array,
+ struct gpio_array *array_info,
+ unsigned long *value_bitmap);
int gpiod_get_raw_value(const struct gpio_desc *desc);
+int gpiod_get_raw_array_value(unsigned int array_size,
+ struct gpio_desc **desc_array,
+ struct gpio_array *array_info,
+ unsigned long *value_bitmap);
void gpiod_set_raw_value(struct gpio_desc *desc, int value);
-void gpiod_set_raw_array_value(unsigned int array_size,
- struct gpio_desc **desc_array,
- int *value_array);
+int gpiod_set_raw_array_value(unsigned int array_size,
+ struct gpio_desc **desc_array,
+ struct gpio_array *array_info,
+ unsigned long *value_bitmap);
/* Value get/set from sleeping context */
int gpiod_get_value_cansleep(const struct gpio_desc *desc);
+int gpiod_get_array_value_cansleep(unsigned int array_size,
+ struct gpio_desc **desc_array,
+ struct gpio_array *array_info,
+ unsigned long *value_bitmap);
void gpiod_set_value_cansleep(struct gpio_desc *desc, int value);
-void gpiod_set_array_value_cansleep(unsigned int array_size,
- struct gpio_desc **desc_array,
- int *value_array);
+int gpiod_set_array_value_cansleep(unsigned int array_size,
+ struct gpio_desc **desc_array,
+ struct gpio_array *array_info,
+ unsigned long *value_bitmap);
int gpiod_get_raw_value_cansleep(const struct gpio_desc *desc);
+int gpiod_get_raw_array_value_cansleep(unsigned int array_size,
+ struct gpio_desc **desc_array,
+ struct gpio_array *array_info,
+ unsigned long *value_bitmap);
void gpiod_set_raw_value_cansleep(struct gpio_desc *desc, int value);
-void gpiod_set_raw_array_value_cansleep(unsigned int array_size,
- struct gpio_desc **desc_array,
- int *value_array);
+int gpiod_set_raw_array_value_cansleep(unsigned int array_size,
+ struct gpio_desc **desc_array,
+ struct gpio_array *array_info,
+ unsigned long *value_bitmap);
-int gpiod_set_debounce(struct gpio_desc *desc, unsigned debounce);
+int gpiod_set_config(struct gpio_desc *desc, unsigned long config);
+int gpiod_set_debounce(struct gpio_desc *desc, unsigned int debounce);
+int gpiod_set_transitory(struct gpio_desc *desc, bool transitory);
+void gpiod_toggle_active_low(struct gpio_desc *desc);
int gpiod_is_active_low(const struct gpio_desc *desc);
int gpiod_cansleep(const struct gpio_desc *desc);
int gpiod_to_irq(const struct gpio_desc *desc);
+int gpiod_set_consumer_name(struct gpio_desc *desc, const char *name);
/* Convert between the old gpio_ and new gpiod_ interfaces */
struct gpio_desc *gpio_to_desc(unsigned gpio);
int desc_to_gpio(const struct gpio_desc *desc);
-/* Child properties interface */
-struct fwnode_handle;
-
-struct gpio_desc *fwnode_get_named_gpiod(struct fwnode_handle *fwnode,
- const char *propname, int index,
- enum gpiod_flags dflags,
+struct gpio_desc *fwnode_gpiod_get_index(struct fwnode_handle *fwnode,
+ const char *con_id, int index,
+ enum gpiod_flags flags,
const char *label);
-struct gpio_desc *devm_fwnode_get_index_gpiod_from_child(struct device *dev,
- const char *con_id, int index,
- struct fwnode_handle *child,
- enum gpiod_flags flags,
- const char *label);
+struct gpio_desc *devm_fwnode_gpiod_get_index(struct device *dev,
+ struct fwnode_handle *child,
+ const char *con_id, int index,
+ enum gpiod_flags flags,
+ const char *label);
#else /* CONFIG_GPIOLIB */
+#include <linux/err.h>
+#include <linux/kernel.h>
+
+#include <asm/bug.h>
+
static inline int gpiod_count(struct device *dev, const char *con_id)
{
return 0;
@@ -199,7 +242,16 @@ static inline void gpiod_put(struct gpio_desc *desc)
might_sleep();
/* GPIO can never have been requested */
- WARN_ON(1);
+ WARN_ON(desc);
+}
+
+static inline void devm_gpiod_unhinge(struct device *dev,
+ struct gpio_desc *desc)
+{
+ might_sleep();
+
+ /* GPIO can never have been requested */
+ WARN_ON(desc);
}
static inline void gpiod_put_array(struct gpio_descs *descs)
@@ -207,7 +259,7 @@ static inline void gpiod_put_array(struct gpio_descs *descs)
might_sleep();
/* GPIO can never have been requested */
- WARN_ON(1);
+ WARN_ON(descs);
}
static inline struct gpio_desc *__must_check
@@ -260,7 +312,7 @@ static inline void devm_gpiod_put(struct device *dev, struct gpio_desc *desc)
might_sleep();
/* GPIO can never have been requested */
- WARN_ON(1);
+ WARN_ON(desc);
}
static inline void devm_gpiod_put_array(struct device *dev,
@@ -269,168 +321,247 @@ static inline void devm_gpiod_put_array(struct device *dev,
might_sleep();
/* GPIO can never have been requested */
- WARN_ON(1);
+ WARN_ON(descs);
}
static inline int gpiod_get_direction(const struct gpio_desc *desc)
{
/* GPIO can never have been requested */
- WARN_ON(1);
+ WARN_ON(desc);
return -ENOSYS;
}
static inline int gpiod_direction_input(struct gpio_desc *desc)
{
/* GPIO can never have been requested */
- WARN_ON(1);
+ WARN_ON(desc);
return -ENOSYS;
}
static inline int gpiod_direction_output(struct gpio_desc *desc, int value)
{
/* GPIO can never have been requested */
- WARN_ON(1);
+ WARN_ON(desc);
return -ENOSYS;
}
static inline int gpiod_direction_output_raw(struct gpio_desc *desc, int value)
{
/* GPIO can never have been requested */
- WARN_ON(1);
+ WARN_ON(desc);
+ return -ENOSYS;
+}
+static inline int gpiod_enable_hw_timestamp_ns(struct gpio_desc *desc,
+ unsigned long flags)
+{
+ WARN_ON(desc);
+ return -ENOSYS;
+}
+static inline int gpiod_disable_hw_timestamp_ns(struct gpio_desc *desc,
+ unsigned long flags)
+{
+ WARN_ON(desc);
return -ENOSYS;
}
-
-
static inline int gpiod_get_value(const struct gpio_desc *desc)
{
/* GPIO can never have been requested */
- WARN_ON(1);
+ WARN_ON(desc);
+ return 0;
+}
+static inline int gpiod_get_array_value(unsigned int array_size,
+ struct gpio_desc **desc_array,
+ struct gpio_array *array_info,
+ unsigned long *value_bitmap)
+{
+ /* GPIO can never have been requested */
+ WARN_ON(desc_array);
return 0;
}
static inline void gpiod_set_value(struct gpio_desc *desc, int value)
{
/* GPIO can never have been requested */
- WARN_ON(1);
+ WARN_ON(desc);
}
-static inline void gpiod_set_array_value(unsigned int array_size,
- struct gpio_desc **desc_array,
- int *value_array)
+static inline int gpiod_set_array_value(unsigned int array_size,
+ struct gpio_desc **desc_array,
+ struct gpio_array *array_info,
+ unsigned long *value_bitmap)
{
/* GPIO can never have been requested */
- WARN_ON(1);
+ WARN_ON(desc_array);
+ return 0;
}
static inline int gpiod_get_raw_value(const struct gpio_desc *desc)
{
/* GPIO can never have been requested */
- WARN_ON(1);
+ WARN_ON(desc);
+ return 0;
+}
+static inline int gpiod_get_raw_array_value(unsigned int array_size,
+ struct gpio_desc **desc_array,
+ struct gpio_array *array_info,
+ unsigned long *value_bitmap)
+{
+ /* GPIO can never have been requested */
+ WARN_ON(desc_array);
return 0;
}
static inline void gpiod_set_raw_value(struct gpio_desc *desc, int value)
{
/* GPIO can never have been requested */
- WARN_ON(1);
+ WARN_ON(desc);
}
-static inline void gpiod_set_raw_array_value(unsigned int array_size,
- struct gpio_desc **desc_array,
- int *value_array)
+static inline int gpiod_set_raw_array_value(unsigned int array_size,
+ struct gpio_desc **desc_array,
+ struct gpio_array *array_info,
+ unsigned long *value_bitmap)
{
/* GPIO can never have been requested */
- WARN_ON(1);
+ WARN_ON(desc_array);
+ return 0;
}
static inline int gpiod_get_value_cansleep(const struct gpio_desc *desc)
{
/* GPIO can never have been requested */
- WARN_ON(1);
+ WARN_ON(desc);
+ return 0;
+}
+static inline int gpiod_get_array_value_cansleep(unsigned int array_size,
+ struct gpio_desc **desc_array,
+ struct gpio_array *array_info,
+ unsigned long *value_bitmap)
+{
+ /* GPIO can never have been requested */
+ WARN_ON(desc_array);
return 0;
}
static inline void gpiod_set_value_cansleep(struct gpio_desc *desc, int value)
{
/* GPIO can never have been requested */
- WARN_ON(1);
+ WARN_ON(desc);
}
-static inline void gpiod_set_array_value_cansleep(unsigned int array_size,
+static inline int gpiod_set_array_value_cansleep(unsigned int array_size,
struct gpio_desc **desc_array,
- int *value_array)
+ struct gpio_array *array_info,
+ unsigned long *value_bitmap)
{
/* GPIO can never have been requested */
- WARN_ON(1);
+ WARN_ON(desc_array);
+ return 0;
}
static inline int gpiod_get_raw_value_cansleep(const struct gpio_desc *desc)
{
/* GPIO can never have been requested */
- WARN_ON(1);
+ WARN_ON(desc);
+ return 0;
+}
+static inline int gpiod_get_raw_array_value_cansleep(unsigned int array_size,
+ struct gpio_desc **desc_array,
+ struct gpio_array *array_info,
+ unsigned long *value_bitmap)
+{
+ /* GPIO can never have been requested */
+ WARN_ON(desc_array);
return 0;
}
static inline void gpiod_set_raw_value_cansleep(struct gpio_desc *desc,
int value)
{
/* GPIO can never have been requested */
- WARN_ON(1);
+ WARN_ON(desc);
}
-static inline void gpiod_set_raw_array_value_cansleep(unsigned int array_size,
+static inline int gpiod_set_raw_array_value_cansleep(unsigned int array_size,
struct gpio_desc **desc_array,
- int *value_array)
+ struct gpio_array *array_info,
+ unsigned long *value_bitmap)
{
/* GPIO can never have been requested */
- WARN_ON(1);
+ WARN_ON(desc_array);
+ return 0;
+}
+
+static inline int gpiod_set_config(struct gpio_desc *desc, unsigned long config)
+{
+ /* GPIO can never have been requested */
+ WARN_ON(desc);
+ return -ENOSYS;
}
-static inline int gpiod_set_debounce(struct gpio_desc *desc, unsigned debounce)
+static inline int gpiod_set_debounce(struct gpio_desc *desc, unsigned int debounce)
{
/* GPIO can never have been requested */
- WARN_ON(1);
+ WARN_ON(desc);
return -ENOSYS;
}
+static inline int gpiod_set_transitory(struct gpio_desc *desc, bool transitory)
+{
+ /* GPIO can never have been requested */
+ WARN_ON(desc);
+ return -ENOSYS;
+}
+
+static inline void gpiod_toggle_active_low(struct gpio_desc *desc)
+{
+ /* GPIO can never have been requested */
+ WARN_ON(desc);
+}
+
static inline int gpiod_is_active_low(const struct gpio_desc *desc)
{
/* GPIO can never have been requested */
- WARN_ON(1);
+ WARN_ON(desc);
return 0;
}
static inline int gpiod_cansleep(const struct gpio_desc *desc)
{
/* GPIO can never have been requested */
- WARN_ON(1);
+ WARN_ON(desc);
return 0;
}
static inline int gpiod_to_irq(const struct gpio_desc *desc)
{
/* GPIO can never have been requested */
- WARN_ON(1);
+ WARN_ON(desc);
+ return -EINVAL;
+}
+
+static inline int gpiod_set_consumer_name(struct gpio_desc *desc,
+ const char *name)
+{
+ /* GPIO can never have been requested */
+ WARN_ON(desc);
return -EINVAL;
}
static inline struct gpio_desc *gpio_to_desc(unsigned gpio)
{
- return ERR_PTR(-EINVAL);
+ return NULL;
}
static inline int desc_to_gpio(const struct gpio_desc *desc)
{
/* GPIO can never have been requested */
- WARN_ON(1);
+ WARN_ON(desc);
return -EINVAL;
}
-/* Child properties interface */
-struct fwnode_handle;
-
static inline
-struct gpio_desc *fwnode_get_named_gpiod(struct fwnode_handle *fwnode,
- const char *propname, int index,
- enum gpiod_flags dflags,
+struct gpio_desc *fwnode_gpiod_get_index(struct fwnode_handle *fwnode,
+ const char *con_id, int index,
+ enum gpiod_flags flags,
const char *label)
{
return ERR_PTR(-ENOSYS);
}
static inline
-struct gpio_desc *devm_fwnode_get_index_gpiod_from_child(struct device *dev,
- const char *con_id, int index,
- struct fwnode_handle *child,
- enum gpiod_flags flags,
- const char *label)
+struct gpio_desc *devm_fwnode_gpiod_get_index(struct device *dev,
+ struct fwnode_handle *fwnode,
+ const char *con_id, int index,
+ enum gpiod_flags flags,
+ const char *label)
{
return ERR_PTR(-ENOSYS);
}
@@ -438,16 +569,79 @@ struct gpio_desc *devm_fwnode_get_index_gpiod_from_child(struct device *dev,
#endif /* CONFIG_GPIOLIB */
static inline
-struct gpio_desc *devm_fwnode_get_gpiod_from_child(struct device *dev,
- const char *con_id,
- struct fwnode_handle *child,
- enum gpiod_flags flags,
- const char *label)
+struct gpio_desc *devm_fwnode_gpiod_get(struct device *dev,
+ struct fwnode_handle *fwnode,
+ const char *con_id,
+ enum gpiod_flags flags,
+ const char *label)
+{
+ return devm_fwnode_gpiod_get_index(dev, fwnode, con_id, 0,
+ flags, label);
+}
+
+struct acpi_gpio_params {
+ unsigned int crs_entry_index;
+ unsigned int line_index;
+ bool active_low;
+};
+
+struct acpi_gpio_mapping {
+ const char *name;
+ const struct acpi_gpio_params *data;
+ unsigned int size;
+
+/* Ignore IoRestriction field */
+#define ACPI_GPIO_QUIRK_NO_IO_RESTRICTION BIT(0)
+/*
+ * When ACPI GPIO mapping table is in use the index parameter inside it
+ * refers to the GPIO resource in _CRS method. That index has no
+ * distinction of actual type of the resource. When consumer wants to
+ * get GpioIo type explicitly, this quirk may be used.
+ */
+#define ACPI_GPIO_QUIRK_ONLY_GPIOIO BIT(1)
+/* Use given pin as an absolute GPIO number in the system */
+#define ACPI_GPIO_QUIRK_ABSOLUTE_NUMBER BIT(2)
+
+ unsigned int quirks;
+};
+
+#if IS_ENABLED(CONFIG_GPIOLIB) && IS_ENABLED(CONFIG_ACPI)
+
+int acpi_dev_add_driver_gpios(struct acpi_device *adev,
+ const struct acpi_gpio_mapping *gpios);
+void acpi_dev_remove_driver_gpios(struct acpi_device *adev);
+
+int devm_acpi_dev_add_driver_gpios(struct device *dev,
+ const struct acpi_gpio_mapping *gpios);
+
+struct gpio_desc *acpi_get_and_request_gpiod(char *path, unsigned int pin, char *label);
+
+#else /* CONFIG_GPIOLIB && CONFIG_ACPI */
+
+#include <linux/err.h>
+
+static inline int acpi_dev_add_driver_gpios(struct acpi_device *adev,
+ const struct acpi_gpio_mapping *gpios)
+{
+ return -ENXIO;
+}
+static inline void acpi_dev_remove_driver_gpios(struct acpi_device *adev) {}
+
+static inline int devm_acpi_dev_add_driver_gpios(struct device *dev,
+ const struct acpi_gpio_mapping *gpios)
+{
+ return -ENXIO;
+}
+
+static inline struct gpio_desc *acpi_get_and_request_gpiod(char *path, unsigned int pin,
+ char *label)
{
- return devm_fwnode_get_index_gpiod_from_child(dev, con_id, 0, child,
- flags, label);
+ return ERR_PTR(-ENOSYS);
}
+#endif /* CONFIG_GPIOLIB && CONFIG_ACPI */
+
+
#if IS_ENABLED(CONFIG_GPIOLIB) && IS_ENABLED(CONFIG_GPIO_SYSFS)
int gpiod_export(struct gpio_desc *desc, bool direction_may_change);
@@ -457,6 +651,8 @@ void gpiod_unexport(struct gpio_desc *desc);
#else /* CONFIG_GPIOLIB && CONFIG_GPIO_SYSFS */
+#include <asm/errno.h>
+
static inline int gpiod_export(struct gpio_desc *desc,
bool direction_may_change)
{
diff --git a/include/linux/gpio/driver.h b/include/linux/gpio/driver.h
index af20369ec8e7..5c6db5533be6 100644
--- a/include/linux/gpio/driver.h
+++ b/include/linux/gpio/driver.h
@@ -1,23 +1,320 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __LINUX_GPIO_DRIVER_H
#define __LINUX_GPIO_DRIVER_H
-#include <linux/device.h>
-#include <linux/types.h>
-#include <linux/irq.h>
+#include <linux/bits.h>
#include <linux/irqchip/chained_irq.h>
#include <linux/irqdomain.h>
+#include <linux/irqhandler.h>
#include <linux/lockdep.h>
-#include <linux/pinctrl/pinctrl.h>
#include <linux/pinctrl/pinconf-generic.h>
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/property.h>
+#include <linux/spinlock_types.h>
+#include <linux/types.h>
-struct gpio_desc;
+#ifdef CONFIG_GENERIC_MSI_IRQ
+#include <asm/msi.h>
+#endif
+
+struct device;
+struct irq_chip;
+struct irq_data;
+struct module;
struct of_phandle_args;
-struct device_node;
+struct pinctrl_dev;
struct seq_file;
+
+struct gpio_chip;
+struct gpio_desc;
struct gpio_device;
-struct module;
-#ifdef CONFIG_GPIOLIB
+enum gpio_lookup_flags;
+enum gpiod_flags;
+
+union gpio_irq_fwspec {
+ struct irq_fwspec fwspec;
+#ifdef CONFIG_GENERIC_MSI_IRQ
+ msi_alloc_info_t msiinfo;
+#endif
+};
+
+#define GPIO_LINE_DIRECTION_IN 1
+#define GPIO_LINE_DIRECTION_OUT 0
+
+/**
+ * struct gpio_irq_chip - GPIO interrupt controller
+ */
+struct gpio_irq_chip {
+ /**
+ * @chip:
+ *
+ * GPIO IRQ chip implementation, provided by GPIO driver.
+ */
+ struct irq_chip *chip;
+
+ /**
+ * @domain:
+ *
+ * Interrupt translation domain; responsible for mapping between GPIO
+ * hwirq number and Linux IRQ number.
+ */
+ struct irq_domain *domain;
+
+ /**
+ * @domain_ops:
+ *
+ * Table of interrupt domain operations for this IRQ chip.
+ */
+ const struct irq_domain_ops *domain_ops;
+
+#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
+ /**
+ * @fwnode:
+ *
+ * Firmware node corresponding to this gpiochip/irqchip, necessary
+ * for hierarchical irqdomain support.
+ */
+ struct fwnode_handle *fwnode;
+
+ /**
+ * @parent_domain:
+ *
+ * If non-NULL, will be set as the parent of this GPIO interrupt
+ * controller's IRQ domain to establish a hierarchical interrupt
+ * domain. The presence of this will activate the hierarchical
+ * interrupt support.
+ */
+ struct irq_domain *parent_domain;
+
+ /**
+ * @child_to_parent_hwirq:
+ *
+ * This callback translates a child hardware IRQ offset to a parent
+ * hardware IRQ offset on a hierarchical interrupt chip. The child
+ * hardware IRQs correspond to the GPIO index 0..ngpio-1 (see the
+ * ngpio field of struct gpio_chip) and the corresponding parent
+ * hardware IRQ and type (such as IRQ_TYPE_*) shall be returned by
+ * the driver. The driver can calculate this from an offset or using
+ * a lookup table or whatever method is best for this chip. Return
+ * 0 on successful translation in the driver.
+ *
+ * If some ranges of hardware IRQs do not have a corresponding parent
+ * HWIRQ, return -EINVAL, but also make sure to fill in @valid_mask and
+ * @need_valid_mask to make these GPIO lines unavailable for
+ * translation.
+ */
+ int (*child_to_parent_hwirq)(struct gpio_chip *gc,
+ unsigned int child_hwirq,
+ unsigned int child_type,
+ unsigned int *parent_hwirq,
+ unsigned int *parent_type);
+
+ /**
+ * @populate_parent_alloc_arg :
+ *
+ * This optional callback allocates and populates the specific struct
+ * for the parent's IRQ domain. If this is not specified, then
+ * &gpiochip_populate_parent_fwspec_twocell will be used. A four-cell
+ * variant named &gpiochip_populate_parent_fwspec_fourcell is also
+ * available.
+ */
+ int (*populate_parent_alloc_arg)(struct gpio_chip *gc,
+ union gpio_irq_fwspec *fwspec,
+ unsigned int parent_hwirq,
+ unsigned int parent_type);
+
+ /**
+ * @child_offset_to_irq:
+ *
+ * This optional callback is used to translate the child's GPIO line
+ * offset on the GPIO chip to an IRQ number for the GPIO to_irq()
+ * callback. If this is not specified, then a default callback will be
+ * provided that returns the line offset.
+ */
+ unsigned int (*child_offset_to_irq)(struct gpio_chip *gc,
+ unsigned int pin);
+
+ /**
+ * @child_irq_domain_ops:
+ *
+ * The IRQ domain operations that will be used for this GPIO IRQ
+ * chip. If no operations are provided, then default callbacks will
+ * be populated to setup the IRQ hierarchy. Some drivers need to
+ * supply their own translate function.
+ */
+ struct irq_domain_ops child_irq_domain_ops;
+#endif
+
+ /**
+ * @handler:
+ *
+ * The IRQ handler to use (often a predefined IRQ core function) for
+ * GPIO IRQs, provided by GPIO driver.
+ */
+ irq_flow_handler_t handler;
+
+ /**
+ * @default_type:
+ *
+ * Default IRQ triggering type applied during GPIO driver
+ * initialization, provided by GPIO driver.
+ */
+ unsigned int default_type;
+
+ /**
+ * @lock_key:
+ *
+ * Per GPIO IRQ chip lockdep class for IRQ lock.
+ */
+ struct lock_class_key *lock_key;
+
+ /**
+ * @request_key:
+ *
+ * Per GPIO IRQ chip lockdep class for IRQ request.
+ */
+ struct lock_class_key *request_key;
+
+ /**
+ * @parent_handler:
+ *
+ * The interrupt handler for the GPIO chip's parent interrupts, may be
+ * NULL if the parent interrupts are nested rather than cascaded.
+ */
+ irq_flow_handler_t parent_handler;
+
+ union {
+ /**
+ * @parent_handler_data:
+ *
+ * If @per_parent_data is false, @parent_handler_data is a
+ * single pointer used as the data associated with every
+ * parent interrupt.
+ */
+ void *parent_handler_data;
+
+ /**
+ * @parent_handler_data_array:
+ *
+ * If @per_parent_data is true, @parent_handler_data_array is
+ * an array of @num_parents pointers, and is used to associate
+ * different data for each parent. This cannot be NULL if
+ * @per_parent_data is true.
+ */
+ void **parent_handler_data_array;
+ };
+
+ /**
+ * @num_parents:
+ *
+ * The number of interrupt parents of a GPIO chip.
+ */
+ unsigned int num_parents;
+
+ /**
+ * @parents:
+ *
+ * A list of interrupt parents of a GPIO chip. This is owned by the
+ * driver, so the core will only reference this list, not modify it.
+ */
+ unsigned int *parents;
+
+ /**
+ * @map:
+ *
+ * A list of interrupt parents for each line of a GPIO chip.
+ */
+ unsigned int *map;
+
+ /**
+ * @threaded:
+ *
+ * True if set the interrupt handling uses nested threads.
+ */
+ bool threaded;
+
+ /**
+ * @per_parent_data:
+ *
+ * True if parent_handler_data_array describes a @num_parents
+ * sized array to be used as parent data.
+ */
+ bool per_parent_data;
+
+ /**
+ * @initialized:
+ *
+ * Flag to track GPIO chip irq member's initialization.
+ * This flag will make sure GPIO chip irq members are not used
+ * before they are initialized.
+ */
+ bool initialized;
+
+ /**
+ * @init_hw: optional routine to initialize hardware before
+ * an IRQ chip will be added. This is quite useful when
+ * a particular driver wants to clear IRQ related registers
+ * in order to avoid undesired events.
+ */
+ int (*init_hw)(struct gpio_chip *gc);
+
+ /**
+ * @init_valid_mask: optional routine to initialize @valid_mask, to be
+ * used if not all GPIO lines are valid interrupts. Sometimes some
+ * lines just cannot fire interrupts, and this routine, when defined,
+ * is passed a bitmap in "valid_mask" and it will have ngpios
+ * bits from 0..(ngpios-1) set to "1" as in valid. The callback can
+ * then directly set some bits to "0" if they cannot be used for
+ * interrupts.
+ */
+ void (*init_valid_mask)(struct gpio_chip *gc,
+ unsigned long *valid_mask,
+ unsigned int ngpios);
+
+ /**
+ * @valid_mask:
+ *
+ * If not %NULL, holds bitmask of GPIOs which are valid to be included
+ * in IRQ domain of the chip.
+ */
+ unsigned long *valid_mask;
+
+ /**
+ * @first:
+ *
+ * Required for static IRQ allocation. If set, irq_domain_add_simple()
+ * will allocate and map all IRQs during initialization.
+ */
+ unsigned int first;
+
+ /**
+ * @irq_enable:
+ *
+ * Store old irq_chip irq_enable callback
+ */
+ void (*irq_enable)(struct irq_data *data);
+
+ /**
+ * @irq_disable:
+ *
+ * Store old irq_chip irq_disable callback
+ */
+ void (*irq_disable)(struct irq_data *data);
+ /**
+ * @irq_unmask:
+ *
+ * Store old irq_chip irq_unmask callback
+ */
+ void (*irq_unmask)(struct irq_data *data);
+
+ /**
+ * @irq_mask:
+ *
+ * Store old irq_chip irq_mask callback
+ */
+ void (*irq_mask)(struct irq_data *data);
+};
/**
* struct gpio_chip - abstract a GPIO controller
@@ -25,25 +322,41 @@ struct module;
* number or the name of the SoC IP-block implementing it.
* @gpiodev: the internal state holder, opaque struct
* @parent: optional parent device providing the GPIOs
+ * @fwnode: optional fwnode providing this controller's properties
* @owner: helps prevent removal of modules exporting active GPIOs
* @request: optional hook for chip-specific activation, such as
* enabling module power and clock; may sleep
* @free: optional hook for chip-specific deactivation, such as
* disabling module power and clock; may sleep
* @get_direction: returns direction for signal "offset", 0=out, 1=in,
- * (same as GPIOF_DIR_XXX), or negative error
+ * (same as GPIO_LINE_DIRECTION_OUT / GPIO_LINE_DIRECTION_IN),
+ * or negative error. It is recommended to always implement this
+ * function, even on input-only or output-only gpio chips.
* @direction_input: configures signal "offset" as input, or returns error
+ * This can be omitted on input-only or output-only gpio chips.
* @direction_output: configures signal "offset" as output, or returns error
+ * This can be omitted on input-only or output-only gpio chips.
* @get: returns value for signal "offset", 0=low, 1=high, or negative error
+ * @get_multiple: reads values for multiple signals defined by "mask" and
+ * stores them in "bits", returns 0 on success or negative error
* @set: assigns output value for signal "offset"
* @set_multiple: assigns output values for multiple signals defined by "mask"
* @set_config: optional hook for all kinds of settings. Uses the same
* packed config format as generic pinconf.
- * @to_irq: optional hook supporting non-static gpio_to_irq() mappings;
+ * @to_irq: optional hook supporting non-static gpiod_to_irq() mappings;
* implementation may not sleep
* @dbg_show: optional routine to show contents in debugfs; default code
* will be used when this is omitted, but custom code can show extra
* state (such as pullup/pulldown configuration).
+ * @init_valid_mask: optional routine to initialize @valid_mask, to be used if
+ * not all GPIOs are valid.
+ * @add_pin_ranges: optional routine to initialize pin ranges, to be used when
+ * requires special mapping of the pins that provides GPIO functionality.
+ * It is called after adding GPIO chip and before adding IRQ chip.
+ * @en_hw_timestamp: Dependent on GPIO chip, an optional routine to
+ * enable hardware timestamp.
+ * @dis_hw_timestamp: Dependent on GPIO chip, an optional routine to
+ * disable hardware timestamp.
* @base: identifies the first GPIO number handled by this chip;
* or, if negative during registration, requests dynamic ID allocation.
* DEPRECATION: providing anything non-negative and nailing the base
@@ -52,6 +365,9 @@ struct module;
* get rid of the static GPIO number space in the long run.
* @ngpio: the number of GPIOs handled by this controller; the last GPIO
* handled is (base + ngpio - 1).
+ * @offset: when multiple gpio chips belong to the same device this
+ * can be used as offset within the device so friendly names can
+ * be properly assigned.
* @names: if set, must be an array of strings to use as alternative
* names for the GPIOs in this chip. Any entry in the array
* may be NULL if there is no alias for the GPIO, however the
@@ -65,13 +381,16 @@ struct module;
* registers.
* @read_reg: reader function for generic GPIO
* @write_reg: writer function for generic GPIO
- * @pin2mask: some generic GPIO controllers work with the big-endian bits
- * notation, e.g. in a 8-bits register, GPIO7 is the least significant
- * bit. This callback assigns the right bit mask.
+ * @be_bits: if the generic GPIO has big endian bit order (bit 31 is representing
+ * line 0, bit 30 is line 1 ... bit 0 is line 31) this is set to true by the
+ * generic GPIO core. It is for internal housekeeping only.
* @reg_dat: data (in) register for generic GPIO
* @reg_set: output set register (out=high) for generic GPIO
* @reg_clr: output clear register (out=low) for generic GPIO
- * @reg_dir: direction setting register for generic GPIO
+ * @reg_dir_out: direction out setting register for generic GPIO
+ * @reg_dir_in: direction in setting register for generic GPIO
+ * @bgpio_dir_unreadable: indicates that the direction register(s) cannot
+ * be read and we need to rely on out internal state tracking.
* @bgpio_bits: number of register bits used for a generic GPIO i.e.
* <register width> * 8
* @bgpio_lock: used to lock chip->bgpio_data. Also, this is needed to keep
@@ -79,27 +398,11 @@ struct module;
* @bgpio_data: shadowed data register for generic GPIO to clear/set bits
* safely.
* @bgpio_dir: shadowed direction register for generic GPIO to clear/set
- * direction safely.
- * @irqchip: GPIO IRQ chip impl, provided by GPIO driver
- * @irqdomain: Interrupt translation domain; responsible for mapping
- * between GPIO hwirq number and linux irq number
- * @irq_base: first linux IRQ number assigned to GPIO IRQ chip (deprecated)
- * @irq_handler: the irq handler to use (often a predefined irq core function)
- * for GPIO IRQs, provided by GPIO driver
- * @irq_default_type: default IRQ triggering type applied during GPIO driver
- * initialization, provided by GPIO driver
- * @irq_chained_parent: GPIO IRQ chip parent/bank linux irq number,
- * provided by GPIO driver for chained interrupt (not for nested
- * interrupts).
- * @irq_nested: True if set the interrupt handling is nested.
- * @irq_need_valid_mask: If set core allocates @irq_valid_mask with all
- * bits set to one
- * @irq_valid_mask: If not %NULL holds bitmask of GPIOs which are valid to
- * be included in IRQ domain of the chip
- * @lock_key: per GPIO IRQ chip lockdep class
+ * direction safely. A "1" in this word means the line is set as
+ * output.
*
* A gpio_chip can help platforms abstract various sources of GPIOs so
- * they can all be accessed through a common programing interface.
+ * they can all be accessed through a common programming interface.
* Example sources would be SOC controllers, FPGAs, multifunction
* chips, dedicated GPIO expanders, and so on.
*
@@ -112,114 +415,229 @@ struct gpio_chip {
const char *label;
struct gpio_device *gpiodev;
struct device *parent;
+ struct fwnode_handle *fwnode;
struct module *owner;
- int (*request)(struct gpio_chip *chip,
- unsigned offset);
- void (*free)(struct gpio_chip *chip,
- unsigned offset);
- int (*get_direction)(struct gpio_chip *chip,
- unsigned offset);
- int (*direction_input)(struct gpio_chip *chip,
- unsigned offset);
- int (*direction_output)(struct gpio_chip *chip,
- unsigned offset, int value);
- int (*get)(struct gpio_chip *chip,
- unsigned offset);
- void (*set)(struct gpio_chip *chip,
- unsigned offset, int value);
- void (*set_multiple)(struct gpio_chip *chip,
+ int (*request)(struct gpio_chip *gc,
+ unsigned int offset);
+ void (*free)(struct gpio_chip *gc,
+ unsigned int offset);
+ int (*get_direction)(struct gpio_chip *gc,
+ unsigned int offset);
+ int (*direction_input)(struct gpio_chip *gc,
+ unsigned int offset);
+ int (*direction_output)(struct gpio_chip *gc,
+ unsigned int offset, int value);
+ int (*get)(struct gpio_chip *gc,
+ unsigned int offset);
+ int (*get_multiple)(struct gpio_chip *gc,
+ unsigned long *mask,
+ unsigned long *bits);
+ void (*set)(struct gpio_chip *gc,
+ unsigned int offset, int value);
+ void (*set_multiple)(struct gpio_chip *gc,
unsigned long *mask,
unsigned long *bits);
- int (*set_config)(struct gpio_chip *chip,
- unsigned offset,
+ int (*set_config)(struct gpio_chip *gc,
+ unsigned int offset,
unsigned long config);
- int (*to_irq)(struct gpio_chip *chip,
- unsigned offset);
+ int (*to_irq)(struct gpio_chip *gc,
+ unsigned int offset);
void (*dbg_show)(struct seq_file *s,
- struct gpio_chip *chip);
+ struct gpio_chip *gc);
+
+ int (*init_valid_mask)(struct gpio_chip *gc,
+ unsigned long *valid_mask,
+ unsigned int ngpios);
+
+ int (*add_pin_ranges)(struct gpio_chip *gc);
+
+ int (*en_hw_timestamp)(struct gpio_chip *gc,
+ u32 offset,
+ unsigned long flags);
+ int (*dis_hw_timestamp)(struct gpio_chip *gc,
+ u32 offset,
+ unsigned long flags);
int base;
u16 ngpio;
+ u16 offset;
const char *const *names;
bool can_sleep;
#if IS_ENABLED(CONFIG_GPIO_GENERIC)
unsigned long (*read_reg)(void __iomem *reg);
void (*write_reg)(void __iomem *reg, unsigned long data);
- unsigned long (*pin2mask)(struct gpio_chip *gc, unsigned int pin);
+ bool be_bits;
void __iomem *reg_dat;
void __iomem *reg_set;
void __iomem *reg_clr;
- void __iomem *reg_dir;
+ void __iomem *reg_dir_out;
+ void __iomem *reg_dir_in;
+ bool bgpio_dir_unreadable;
int bgpio_bits;
- spinlock_t bgpio_lock;
+ raw_spinlock_t bgpio_lock;
unsigned long bgpio_data;
unsigned long bgpio_dir;
-#endif
+#endif /* CONFIG_GPIO_GENERIC */
#ifdef CONFIG_GPIOLIB_IRQCHIP
/*
* With CONFIG_GPIOLIB_IRQCHIP we get an irqchip inside the gpiolib
* to handle IRQs for most practical cases.
*/
- struct irq_chip *irqchip;
- struct irq_domain *irqdomain;
- unsigned int irq_base;
- irq_flow_handler_t irq_handler;
- unsigned int irq_default_type;
- unsigned int irq_chained_parent;
- bool irq_nested;
- bool irq_need_valid_mask;
- unsigned long *irq_valid_mask;
- struct lock_class_key *lock_key;
-#endif
+
+ /**
+ * @irq:
+ *
+ * Integrates interrupt chip functionality with the GPIO chip. Can be
+ * used to handle IRQs for most practical cases.
+ */
+ struct gpio_irq_chip irq;
+#endif /* CONFIG_GPIOLIB_IRQCHIP */
+
+ /**
+ * @valid_mask:
+ *
+ * If not %NULL, holds bitmask of GPIOs which are valid to be used
+ * from the chip.
+ */
+ unsigned long *valid_mask;
#if defined(CONFIG_OF_GPIO)
/*
- * If CONFIG_OF is enabled, then all GPIO controllers described in the
- * device tree automatically may have an OF translation
+ * If CONFIG_OF_GPIO is enabled, then all GPIO controllers described in
+ * the device tree automatically may have an OF translation
+ */
+
+ /**
+ * @of_gpio_n_cells:
+ *
+ * Number of cells used to form the GPIO specifier.
+ */
+ unsigned int of_gpio_n_cells;
+
+ /**
+ * @of_xlate:
+ *
+ * Callback to translate a device tree GPIO specifier into a chip-
+ * relative GPIO number and flags.
*/
- struct device_node *of_node;
- int of_gpio_n_cells;
int (*of_xlate)(struct gpio_chip *gc,
const struct of_phandle_args *gpiospec, u32 *flags);
-#endif
+#endif /* CONFIG_OF_GPIO */
};
-extern const char *gpiochip_is_requested(struct gpio_chip *chip,
- unsigned offset);
+extern const char *gpiochip_is_requested(struct gpio_chip *gc,
+ unsigned int offset);
+
+/**
+ * for_each_requested_gpio_in_range - iterates over requested GPIOs in a given range
+ * @chip: the chip to query
+ * @i: loop variable
+ * @base: first GPIO in the range
+ * @size: amount of GPIOs to check starting from @base
+ * @label: label of current GPIO
+ */
+#define for_each_requested_gpio_in_range(chip, i, base, size, label) \
+ for (i = 0; i < size; i++) \
+ if ((label = gpiochip_is_requested(chip, base + i)) == NULL) {} else
+
+/* Iterates over all requested GPIO of the given @chip */
+#define for_each_requested_gpio(chip, i, label) \
+ for_each_requested_gpio_in_range(chip, i, 0, chip->ngpio, label)
/* add/remove chips */
-extern int gpiochip_add_data(struct gpio_chip *chip, void *data);
-static inline int gpiochip_add(struct gpio_chip *chip)
+extern int gpiochip_add_data_with_key(struct gpio_chip *gc, void *data,
+ struct lock_class_key *lock_key,
+ struct lock_class_key *request_key);
+
+/**
+ * gpiochip_add_data() - register a gpio_chip
+ * @gc: the chip to register, with gc->base initialized
+ * @data: driver-private data associated with this chip
+ *
+ * Context: potentially before irqs will work
+ *
+ * When gpiochip_add_data() is called very early during boot, so that GPIOs
+ * can be freely used, the gc->parent device must be registered before
+ * the gpio framework's arch_initcall(). Otherwise sysfs initialization
+ * for GPIOs will fail rudely.
+ *
+ * gpiochip_add_data() must only be called after gpiolib initialization,
+ * i.e. after core_initcall().
+ *
+ * If gc->base is negative, this requests dynamic assignment of
+ * a range of valid GPIOs.
+ *
+ * Returns:
+ * A negative errno if the chip can't be registered, such as because the
+ * gc->base is invalid or already associated with a different chip.
+ * Otherwise it returns zero as a success code.
+ */
+#ifdef CONFIG_LOCKDEP
+#define gpiochip_add_data(gc, data) ({ \
+ static struct lock_class_key lock_key; \
+ static struct lock_class_key request_key; \
+ gpiochip_add_data_with_key(gc, data, &lock_key, \
+ &request_key); \
+ })
+#define devm_gpiochip_add_data(dev, gc, data) ({ \
+ static struct lock_class_key lock_key; \
+ static struct lock_class_key request_key; \
+ devm_gpiochip_add_data_with_key(dev, gc, data, &lock_key, \
+ &request_key); \
+ })
+#else
+#define gpiochip_add_data(gc, data) gpiochip_add_data_with_key(gc, data, NULL, NULL)
+#define devm_gpiochip_add_data(dev, gc, data) \
+ devm_gpiochip_add_data_with_key(dev, gc, data, NULL, NULL)
+#endif /* CONFIG_LOCKDEP */
+
+static inline int gpiochip_add(struct gpio_chip *gc)
{
- return gpiochip_add_data(chip, NULL);
+ return gpiochip_add_data(gc, NULL);
}
-extern void gpiochip_remove(struct gpio_chip *chip);
-extern int devm_gpiochip_add_data(struct device *dev, struct gpio_chip *chip,
- void *data);
-extern void devm_gpiochip_remove(struct device *dev, struct gpio_chip *chip);
+extern void gpiochip_remove(struct gpio_chip *gc);
+extern int devm_gpiochip_add_data_with_key(struct device *dev, struct gpio_chip *gc, void *data,
+ struct lock_class_key *lock_key,
+ struct lock_class_key *request_key);
extern struct gpio_chip *gpiochip_find(void *data,
- int (*match)(struct gpio_chip *chip, void *data));
+ int (*match)(struct gpio_chip *gc, void *data));
-/* lock/unlock as IRQ */
-int gpiochip_lock_as_irq(struct gpio_chip *chip, unsigned int offset);
-void gpiochip_unlock_as_irq(struct gpio_chip *chip, unsigned int offset);
-bool gpiochip_line_is_irq(struct gpio_chip *chip, unsigned int offset);
+bool gpiochip_line_is_irq(struct gpio_chip *gc, unsigned int offset);
+int gpiochip_reqres_irq(struct gpio_chip *gc, unsigned int offset);
+void gpiochip_relres_irq(struct gpio_chip *gc, unsigned int offset);
+void gpiochip_disable_irq(struct gpio_chip *gc, unsigned int offset);
+void gpiochip_enable_irq(struct gpio_chip *gc, unsigned int offset);
+
+/* irq_data versions of the above */
+int gpiochip_irq_reqres(struct irq_data *data);
+void gpiochip_irq_relres(struct irq_data *data);
+
+/* Paste this in your irq_chip structure */
+#define GPIOCHIP_IRQ_RESOURCE_HELPERS \
+ .irq_request_resources = gpiochip_irq_reqres, \
+ .irq_release_resources = gpiochip_irq_relres
+
+static inline void gpio_irq_chip_set_chip(struct gpio_irq_chip *girq,
+ const struct irq_chip *chip)
+{
+ /* Yes, dropping const is ugly, but it isn't like we have a choice */
+ girq->chip = (struct irq_chip *)chip;
+}
/* Line status inquiry for drivers */
-bool gpiochip_line_is_open_drain(struct gpio_chip *chip, unsigned int offset);
-bool gpiochip_line_is_open_source(struct gpio_chip *chip, unsigned int offset);
+bool gpiochip_line_is_open_drain(struct gpio_chip *gc, unsigned int offset);
+bool gpiochip_line_is_open_source(struct gpio_chip *gc, unsigned int offset);
/* Sleep persistence inquiry for drivers */
-bool gpiochip_line_is_persistent(struct gpio_chip *chip, unsigned int offset);
+bool gpiochip_line_is_persistent(struct gpio_chip *gc, unsigned int offset);
+bool gpiochip_line_is_valid(const struct gpio_chip *gc, unsigned int offset);
/* get driver data */
-void *gpiochip_get_data(struct gpio_chip *chip);
-
-struct gpio_chip *gpiod_to_chip(const struct gpio_desc *desc);
+void *gpiochip_get_data(struct gpio_chip *gc);
struct bgpio_pdata {
const char *label;
@@ -227,7 +645,18 @@ struct bgpio_pdata {
int ngpio;
};
-#if IS_ENABLED(CONFIG_GPIO_GENERIC)
+#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
+
+int gpiochip_populate_parent_fwspec_twocell(struct gpio_chip *gc,
+ union gpio_irq_fwspec *gfwspec,
+ unsigned int parent_hwirq,
+ unsigned int parent_type);
+int gpiochip_populate_parent_fwspec_fourcell(struct gpio_chip *gc,
+ union gpio_irq_fwspec *gfwspec,
+ unsigned int parent_hwirq,
+ unsigned int parent_type);
+
+#endif /* CONFIG_IRQ_DOMAIN_HIERARCHY */
int bgpio_init(struct gpio_chip *gc, struct device *dev,
unsigned long sz, void __iomem *dat, void __iomem *set,
@@ -240,123 +669,74 @@ int bgpio_init(struct gpio_chip *gc, struct device *dev,
#define BGPIOF_BIG_ENDIAN_BYTE_ORDER BIT(3)
#define BGPIOF_READ_OUTPUT_REG_SET BIT(4) /* reg_set stores output value */
#define BGPIOF_NO_OUTPUT BIT(5) /* only input */
+#define BGPIOF_NO_SET_ON_INPUT BIT(6)
-#endif
+int gpiochip_irq_map(struct irq_domain *d, unsigned int irq,
+ irq_hw_number_t hwirq);
+void gpiochip_irq_unmap(struct irq_domain *d, unsigned int irq);
-#ifdef CONFIG_GPIOLIB_IRQCHIP
+int gpiochip_irq_domain_activate(struct irq_domain *domain,
+ struct irq_data *data, bool reserve);
+void gpiochip_irq_domain_deactivate(struct irq_domain *domain,
+ struct irq_data *data);
-void gpiochip_set_chained_irqchip(struct gpio_chip *gpiochip,
- struct irq_chip *irqchip,
- unsigned int parent_irq,
- irq_flow_handler_t parent_handler);
+bool gpiochip_irqchip_irq_valid(const struct gpio_chip *gc,
+ unsigned int offset);
-void gpiochip_set_nested_irqchip(struct gpio_chip *gpiochip,
- struct irq_chip *irqchip,
- unsigned int parent_irq);
-
-int gpiochip_irqchip_add_key(struct gpio_chip *gpiochip,
- struct irq_chip *irqchip,
- unsigned int first_irq,
- irq_flow_handler_t handler,
- unsigned int type,
- bool nested,
- struct lock_class_key *lock_key);
-
-#ifdef CONFIG_LOCKDEP
-
-/*
- * Lockdep requires that each irqchip instance be created with a
- * unique key so as to avoid unnecessary warnings. This upfront
- * boilerplate static inlines provides such a key for each
- * unique instance.
- */
-static inline int gpiochip_irqchip_add(struct gpio_chip *gpiochip,
- struct irq_chip *irqchip,
- unsigned int first_irq,
- irq_flow_handler_t handler,
- unsigned int type)
-{
- static struct lock_class_key key;
-
- return gpiochip_irqchip_add_key(gpiochip, irqchip, first_irq,
- handler, type, false, &key);
-}
-
-static inline int gpiochip_irqchip_add_nested(struct gpio_chip *gpiochip,
- struct irq_chip *irqchip,
- unsigned int first_irq,
- irq_flow_handler_t handler,
- unsigned int type)
-{
-
- static struct lock_class_key key;
-
- return gpiochip_irqchip_add_key(gpiochip, irqchip, first_irq,
- handler, type, true, &key);
-}
+#ifdef CONFIG_GPIOLIB_IRQCHIP
+int gpiochip_irqchip_add_domain(struct gpio_chip *gc,
+ struct irq_domain *domain);
#else
-static inline int gpiochip_irqchip_add(struct gpio_chip *gpiochip,
- struct irq_chip *irqchip,
- unsigned int first_irq,
- irq_flow_handler_t handler,
- unsigned int type)
-{
- return gpiochip_irqchip_add_key(gpiochip, irqchip, first_irq,
- handler, type, false, NULL);
-}
-static inline int gpiochip_irqchip_add_nested(struct gpio_chip *gpiochip,
- struct irq_chip *irqchip,
- unsigned int first_irq,
- irq_flow_handler_t handler,
- unsigned int type)
+#include <asm/bug.h>
+#include <asm/errno.h>
+
+static inline int gpiochip_irqchip_add_domain(struct gpio_chip *gc,
+ struct irq_domain *domain)
{
- return gpiochip_irqchip_add_key(gpiochip, irqchip, first_irq,
- handler, type, true, NULL);
+ WARN_ON(1);
+ return -EINVAL;
}
-#endif /* CONFIG_LOCKDEP */
-
-#endif /* CONFIG_GPIOLIB_IRQCHIP */
+#endif
-int gpiochip_generic_request(struct gpio_chip *chip, unsigned offset);
-void gpiochip_generic_free(struct gpio_chip *chip, unsigned offset);
-int gpiochip_generic_config(struct gpio_chip *chip, unsigned offset,
+int gpiochip_generic_request(struct gpio_chip *gc, unsigned int offset);
+void gpiochip_generic_free(struct gpio_chip *gc, unsigned int offset);
+int gpiochip_generic_config(struct gpio_chip *gc, unsigned int offset,
unsigned long config);
-#ifdef CONFIG_PINCTRL
-
/**
* struct gpio_pin_range - pin range controlled by a gpio chip
- * @head: list for maintaining set of pin ranges, used internally
+ * @node: list for maintaining set of pin ranges, used internally
* @pctldev: pinctrl device which handles corresponding pins
* @range: actual range of pins controlled by a gpio controller
*/
-
struct gpio_pin_range {
struct list_head node;
struct pinctrl_dev *pctldev;
struct pinctrl_gpio_range range;
};
-int gpiochip_add_pin_range(struct gpio_chip *chip, const char *pinctl_name,
+#ifdef CONFIG_PINCTRL
+
+int gpiochip_add_pin_range(struct gpio_chip *gc, const char *pinctl_name,
unsigned int gpio_offset, unsigned int pin_offset,
unsigned int npins);
-int gpiochip_add_pingroup_range(struct gpio_chip *chip,
+int gpiochip_add_pingroup_range(struct gpio_chip *gc,
struct pinctrl_dev *pctldev,
unsigned int gpio_offset, const char *pin_group);
-void gpiochip_remove_pin_ranges(struct gpio_chip *chip);
+void gpiochip_remove_pin_ranges(struct gpio_chip *gc);
-#else
+#else /* ! CONFIG_PINCTRL */
static inline int
-gpiochip_add_pin_range(struct gpio_chip *chip, const char *pinctl_name,
+gpiochip_add_pin_range(struct gpio_chip *gc, const char *pinctl_name,
unsigned int gpio_offset, unsigned int pin_offset,
unsigned int npins)
{
return 0;
}
static inline int
-gpiochip_add_pingroup_range(struct gpio_chip *chip,
+gpiochip_add_pingroup_range(struct gpio_chip *gc,
struct pinctrl_dev *pctldev,
unsigned int gpio_offset, const char *pin_group)
{
@@ -364,18 +744,34 @@ gpiochip_add_pingroup_range(struct gpio_chip *chip,
}
static inline void
-gpiochip_remove_pin_ranges(struct gpio_chip *chip)
+gpiochip_remove_pin_ranges(struct gpio_chip *gc)
{
}
#endif /* CONFIG_PINCTRL */
-struct gpio_desc *gpiochip_request_own_desc(struct gpio_chip *chip, u16 hwnum,
- const char *label);
+struct gpio_desc *gpiochip_request_own_desc(struct gpio_chip *gc,
+ unsigned int hwnum,
+ const char *label,
+ enum gpio_lookup_flags lflags,
+ enum gpiod_flags dflags);
void gpiochip_free_own_desc(struct gpio_desc *desc);
+#ifdef CONFIG_GPIOLIB
+
+/* lock/unlock as IRQ */
+int gpiochip_lock_as_irq(struct gpio_chip *gc, unsigned int offset);
+void gpiochip_unlock_as_irq(struct gpio_chip *gc, unsigned int offset);
+
+
+struct gpio_chip *gpiod_to_chip(const struct gpio_desc *desc);
+
#else /* CONFIG_GPIOLIB */
+#include <linux/err.h>
+
+#include <asm/bug.h>
+
static inline struct gpio_chip *gpiod_to_chip(const struct gpio_desc *desc)
{
/* GPIO can never have been requested */
@@ -383,6 +779,43 @@ static inline struct gpio_chip *gpiod_to_chip(const struct gpio_desc *desc)
return ERR_PTR(-ENODEV);
}
+static inline int gpiochip_lock_as_irq(struct gpio_chip *gc,
+ unsigned int offset)
+{
+ WARN_ON(1);
+ return -EINVAL;
+}
+
+static inline void gpiochip_unlock_as_irq(struct gpio_chip *gc,
+ unsigned int offset)
+{
+ WARN_ON(1);
+}
#endif /* CONFIG_GPIOLIB */
-#endif
+#define for_each_gpiochip_node(dev, child) \
+ device_for_each_child_node(dev, child) \
+ if (!fwnode_property_present(child, "gpio-controller")) {} else
+
+static inline unsigned int gpiochip_node_count(struct device *dev)
+{
+ struct fwnode_handle *child;
+ unsigned int count = 0;
+
+ for_each_gpiochip_node(dev, child)
+ count++;
+
+ return count;
+}
+
+static inline struct fwnode_handle *gpiochip_node_get_first(struct device *dev)
+{
+ struct fwnode_handle *fwnode;
+
+ for_each_gpiochip_node(dev, fwnode)
+ return fwnode;
+
+ return NULL;
+}
+
+#endif /* __LINUX_GPIO_DRIVER_H */
diff --git a/include/linux/gpio/gpio-reg.h b/include/linux/gpio/gpio-reg.h
index 90e0b9060e6d..3913b6660ed1 100644
--- a/include/linux/gpio/gpio-reg.h
+++ b/include/linux/gpio/gpio-reg.h
@@ -1,13 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef GPIO_REG_H
#define GPIO_REG_H
+#include <linux/types.h>
+
struct device;
struct irq_domain;
+struct gpio_chip;
+
struct gpio_chip *gpio_reg_init(struct device *dev, void __iomem *reg,
int base, int num, const char *label, u32 direction, u32 def_out,
const char *const *names, struct irq_domain *irqdom, const int *irqs);
int gpio_reg_resume(struct gpio_chip *gc);
-#endif
+#endif /* GPIO_REG_H */
diff --git a/include/linux/gpio/legacy-of-mm-gpiochip.h b/include/linux/gpio/legacy-of-mm-gpiochip.h
new file mode 100644
index 000000000000..2e2bd3b19cc3
--- /dev/null
+++ b/include/linux/gpio/legacy-of-mm-gpiochip.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * OF helpers for the old of_mm_gpio_chip, used on ppc32 and nios2,
+ * do not use in new code.
+ *
+ * Copyright (c) 2007-2008 MontaVista Software, Inc.
+ *
+ * Author: Anton Vorontsov <avorontsov@ru.mvista.com>
+ */
+
+#ifndef __LINUX_GPIO_LEGACY_OF_MM_GPIO_CHIP_H
+#define __LINUX_GPIO_LEGACY_OF_MM_GPIO_CHIP_H
+
+#include <linux/gpio/driver.h>
+#include <linux/of.h>
+
+/*
+ * OF GPIO chip for memory mapped banks
+ */
+struct of_mm_gpio_chip {
+ struct gpio_chip gc;
+ void (*save_regs)(struct of_mm_gpio_chip *mm_gc);
+ void __iomem *regs;
+};
+
+static inline struct of_mm_gpio_chip *to_of_mm_gpio_chip(struct gpio_chip *gc)
+{
+ return container_of(gc, struct of_mm_gpio_chip, gc);
+}
+
+extern int of_mm_gpiochip_add_data(struct device_node *np,
+ struct of_mm_gpio_chip *mm_gc,
+ void *data);
+extern void of_mm_gpiochip_remove(struct of_mm_gpio_chip *mm_gc);
+
+#endif /* __LINUX_GPIO_LEGACY_OF_MM_GPIO_CHIP_H */
diff --git a/include/linux/gpio/machine.h b/include/linux/gpio/machine.h
index 6e76b16fcade..44e5f162973e 100644
--- a/include/linux/gpio/machine.h
+++ b/include/linux/gpio/machine.h
@@ -1,35 +1,43 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __LINUX_GPIO_MACHINE_H
#define __LINUX_GPIO_MACHINE_H
#include <linux/types.h>
-#include <linux/list.h>
enum gpio_lookup_flags {
- GPIO_ACTIVE_HIGH = (0 << 0),
- GPIO_ACTIVE_LOW = (1 << 0),
- GPIO_OPEN_DRAIN = (1 << 1),
- GPIO_OPEN_SOURCE = (1 << 2),
- GPIO_SLEEP_MAINTAIN_VALUE = (0 << 3),
- GPIO_SLEEP_MAY_LOOSE_VALUE = (1 << 3),
+ GPIO_ACTIVE_HIGH = (0 << 0),
+ GPIO_ACTIVE_LOW = (1 << 0),
+ GPIO_OPEN_DRAIN = (1 << 1),
+ GPIO_OPEN_SOURCE = (1 << 2),
+ GPIO_PERSISTENT = (0 << 3),
+ GPIO_TRANSITORY = (1 << 3),
+ GPIO_PULL_UP = (1 << 4),
+ GPIO_PULL_DOWN = (1 << 5),
+ GPIO_PULL_DISABLE = (1 << 6),
+
+ GPIO_LOOKUP_FLAGS_DEFAULT = GPIO_ACTIVE_HIGH | GPIO_PERSISTENT,
};
/**
* struct gpiod_lookup - lookup table
- * @chip_label: name of the chip the GPIO belongs to
- * @chip_hwnum: hardware number (i.e. relative to the chip) of the GPIO
+ * @key: either the name of the chip the GPIO belongs to, or the GPIO line name
+ * Note that GPIO line names are not guaranteed to be globally unique,
+ * so this will use the first match found!
+ * @chip_hwnum: hardware number (i.e. relative to the chip) of the GPIO, or
+ * U16_MAX to indicate that @key is a GPIO line name
* @con_id: name of the GPIO from the device's point of view
* @idx: index of the GPIO in case several GPIOs share the same name
- * @flags: mask of GPIO_* values
+ * @flags: bitmask of gpio_lookup_flags GPIO_* values
*
* gpiod_lookup is a lookup table for associating GPIOs to specific devices and
* functions using platform data.
*/
struct gpiod_lookup {
- const char *chip_label;
+ const char *key;
u16 chip_hwnum;
const char *con_id;
unsigned int idx;
- enum gpio_lookup_flags flags;
+ unsigned long flags;
};
struct gpiod_lookup_table {
@@ -38,34 +46,82 @@ struct gpiod_lookup_table {
struct gpiod_lookup table[];
};
+/**
+ * struct gpiod_hog - GPIO line hog table
+ * @chip_label: name of the chip the GPIO belongs to
+ * @chip_hwnum: hardware number (i.e. relative to the chip) of the GPIO
+ * @line_name: consumer name for the hogged line
+ * @lflags: bitmask of gpio_lookup_flags GPIO_* values
+ * @dflags: GPIO flags used to specify the direction and value
+ */
+struct gpiod_hog {
+ struct list_head list;
+ const char *chip_label;
+ u16 chip_hwnum;
+ const char *line_name;
+ unsigned long lflags;
+ int dflags;
+};
+
+/*
+ * Helper for lookup tables with just one single lookup for a device.
+ */
+#define GPIO_LOOKUP_SINGLE(_name, _dev_id, _key, _chip_hwnum, _con_id, _flags) \
+static struct gpiod_lookup_table _name = { \
+ .dev_id = _dev_id, \
+ .table = { \
+ GPIO_LOOKUP(_key, _chip_hwnum, _con_id, _flags), \
+ {}, \
+ }, \
+}
+
/*
* Simple definition of a single GPIO under a con_id
*/
-#define GPIO_LOOKUP(_chip_label, _chip_hwnum, _con_id, _flags) \
- GPIO_LOOKUP_IDX(_chip_label, _chip_hwnum, _con_id, 0, _flags)
+#define GPIO_LOOKUP(_key, _chip_hwnum, _con_id, _flags) \
+ GPIO_LOOKUP_IDX(_key, _chip_hwnum, _con_id, 0, _flags)
/*
* Use this macro if you need to have several GPIOs under the same con_id.
* Each GPIO needs to use a different index and can be accessed using
* gpiod_get_index()
*/
-#define GPIO_LOOKUP_IDX(_chip_label, _chip_hwnum, _con_id, _idx, _flags) \
-{ \
- .chip_label = _chip_label, \
+#define GPIO_LOOKUP_IDX(_key, _chip_hwnum, _con_id, _idx, _flags) \
+(struct gpiod_lookup) { \
+ .key = _key, \
.chip_hwnum = _chip_hwnum, \
.con_id = _con_id, \
.idx = _idx, \
.flags = _flags, \
}
+/*
+ * Simple definition of a single GPIO hog in an array.
+ */
+#define GPIO_HOG(_chip_label, _chip_hwnum, _line_name, _lflags, _dflags) \
+(struct gpiod_hog) { \
+ .chip_label = _chip_label, \
+ .chip_hwnum = _chip_hwnum, \
+ .line_name = _line_name, \
+ .lflags = _lflags, \
+ .dflags = _dflags, \
+}
+
#ifdef CONFIG_GPIOLIB
void gpiod_add_lookup_table(struct gpiod_lookup_table *table);
+void gpiod_add_lookup_tables(struct gpiod_lookup_table **tables, size_t n);
void gpiod_remove_lookup_table(struct gpiod_lookup_table *table);
-#else
+void gpiod_add_hogs(struct gpiod_hog *hogs);
+void gpiod_remove_hogs(struct gpiod_hog *hogs);
+#else /* ! CONFIG_GPIOLIB */
static inline
void gpiod_add_lookup_table(struct gpiod_lookup_table *table) {}
static inline
+void gpiod_add_lookup_tables(struct gpiod_lookup_table **tables, size_t n) {}
+static inline
void gpiod_remove_lookup_table(struct gpiod_lookup_table *table) {}
-#endif
+static inline void gpiod_add_hogs(struct gpiod_hog *hogs) {}
+static inline void gpiod_remove_hogs(struct gpiod_hog *hogs) {}
+#endif /* CONFIG_GPIOLIB */
#endif /* __LINUX_GPIO_MACHINE_H */
diff --git a/include/linux/gpio/property.h b/include/linux/gpio/property.h
new file mode 100644
index 000000000000..6c75c8bd44a0
--- /dev/null
+++ b/include/linux/gpio/property.h
@@ -0,0 +1,11 @@
+// SPDX-License-Identifier: GPL-2.0+
+#ifndef __LINUX_GPIO_PROPERTY_H
+#define __LINUX_GPIO_PROPERTY_H
+
+#include <dt-bindings/gpio/gpio.h> /* for GPIO_* flags */
+#include <linux/property.h>
+
+#define PROPERTY_ENTRY_GPIO(_name_, _chip_node_, _idx_, _flags_) \
+ PROPERTY_ENTRY_REF(_name_, _chip_node_, _idx_, _flags_)
+
+#endif /* __LINUX_GPIO_PROPERTY_H */
diff --git a/include/linux/gpio/regmap.h b/include/linux/gpio/regmap.h
new file mode 100644
index 000000000000..a9f7b7faf57b
--- /dev/null
+++ b/include/linux/gpio/regmap.h
@@ -0,0 +1,94 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef _LINUX_GPIO_REGMAP_H
+#define _LINUX_GPIO_REGMAP_H
+
+struct device;
+struct fwnode_handle;
+struct gpio_regmap;
+struct irq_domain;
+struct regmap;
+
+#define GPIO_REGMAP_ADDR_ZERO ((unsigned int)(-1))
+#define GPIO_REGMAP_ADDR(addr) ((addr) ? : GPIO_REGMAP_ADDR_ZERO)
+
+/**
+ * struct gpio_regmap_config - Description of a generic regmap gpio_chip.
+ * @parent: The parent device
+ * @regmap: The regmap used to access the registers
+ * given, the name of the device is used
+ * @fwnode: (Optional) The firmware node.
+ * If not given, the fwnode of the parent is used.
+ * @label: (Optional) Descriptive name for GPIO controller.
+ * If not given, the name of the device is used.
+ * @ngpio: Number of GPIOs
+ * @names: (Optional) Array of names for gpios
+ * @reg_dat_base: (Optional) (in) register base address
+ * @reg_set_base: (Optional) set register base address
+ * @reg_clr_base: (Optional) clear register base address
+ * @reg_dir_in_base: (Optional) in setting register base address
+ * @reg_dir_out_base: (Optional) out setting register base address
+ * @reg_stride: (Optional) May be set if the registers (of the
+ * same type, dat, set, etc) are not consecutive.
+ * @ngpio_per_reg: Number of GPIOs per register
+ * @irq_domain: (Optional) IRQ domain if the controller is
+ * interrupt-capable
+ * @reg_mask_xlate: (Optional) Translates base address and GPIO
+ * offset to a register/bitmask pair. If not
+ * given the default gpio_regmap_simple_xlate()
+ * is used.
+ * @drvdata: (Optional) Pointer to driver specific data which is
+ * not used by gpio-remap but is provided "as is" to the
+ * driver callback(s).
+ *
+ * The ->reg_mask_xlate translates a given base address and GPIO offset to
+ * register and mask pair. The base address is one of the given register
+ * base addresses in this structure.
+ *
+ * Although all register base addresses are marked as optional, there are
+ * several rules:
+ * 1. if you only have @reg_dat_base set, then it is input-only
+ * 2. if you only have @reg_set_base set, then it is output-only
+ * 3. if you have either @reg_dir_in_base or @reg_dir_out_base set, then
+ * you have to set both @reg_dat_base and @reg_set_base
+ * 4. if you have @reg_set_base set, you may also set @reg_clr_base to have
+ * two different registers for setting and clearing the output. This is
+ * also valid for the output-only case.
+ * 5. @reg_dir_in_base and @reg_dir_out_base are exclusive; is there really
+ * hardware which has redundant registers?
+ *
+ * Note: All base addresses may have the special value %GPIO_REGMAP_ADDR_ZERO
+ * which forces the address to the value 0.
+ */
+struct gpio_regmap_config {
+ struct device *parent;
+ struct regmap *regmap;
+ struct fwnode_handle *fwnode;
+
+ const char *label;
+ int ngpio;
+ const char *const *names;
+
+ unsigned int reg_dat_base;
+ unsigned int reg_set_base;
+ unsigned int reg_clr_base;
+ unsigned int reg_dir_in_base;
+ unsigned int reg_dir_out_base;
+ int reg_stride;
+ int ngpio_per_reg;
+ struct irq_domain *irq_domain;
+
+ int (*reg_mask_xlate)(struct gpio_regmap *gpio, unsigned int base,
+ unsigned int offset, unsigned int *reg,
+ unsigned int *mask);
+
+ void *drvdata;
+};
+
+struct gpio_regmap *gpio_regmap_register(const struct gpio_regmap_config *config);
+void gpio_regmap_unregister(struct gpio_regmap *gpio);
+struct gpio_regmap *devm_gpio_regmap_register(struct device *dev,
+ const struct gpio_regmap_config *config);
+void *gpio_regmap_get_drvdata(struct gpio_regmap *gpio);
+
+#endif /* _LINUX_GPIO_REGMAP_H */
diff --git a/include/linux/gpio_keys.h b/include/linux/gpio_keys.h
index 0b71024c082c..3f84aeb81e48 100644
--- a/include/linux/gpio_keys.h
+++ b/include/linux/gpio_keys.h
@@ -1,6 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _GPIO_KEYS_H
#define _GPIO_KEYS_H
+#include <linux/types.h>
+
struct device;
/**
@@ -12,6 +15,7 @@ struct device;
* @desc: label that will be attached to button's gpio
* @type: input event type (%EV_KEY, %EV_SW, %EV_ABS)
* @wakeup: configure the button as a wake-up source
+ * @wakeup_event_action: event action to trigger wakeup
* @debounce_interval: debounce ticks interval in msecs
* @can_disable: %true indicates that userspace is allowed to
* disable button via sysfs
@@ -25,6 +29,7 @@ struct gpio_keys_button {
const char *desc;
unsigned int type;
int wakeup;
+ int wakeup_event_action;
int debounce_interval;
bool can_disable;
int value;
diff --git a/include/linux/gpio_mouse.h b/include/linux/gpio_mouse.h
deleted file mode 100644
index 44ed7aa14d85..000000000000
--- a/include/linux/gpio_mouse.h
+++ /dev/null
@@ -1,61 +0,0 @@
-/*
- * Driver for simulating a mouse on GPIO lines.
- *
- * Copyright (C) 2007 Atmel Corporation
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#ifndef _GPIO_MOUSE_H
-#define _GPIO_MOUSE_H
-
-#define GPIO_MOUSE_POLARITY_ACT_HIGH 0x00
-#define GPIO_MOUSE_POLARITY_ACT_LOW 0x01
-
-#define GPIO_MOUSE_PIN_UP 0
-#define GPIO_MOUSE_PIN_DOWN 1
-#define GPIO_MOUSE_PIN_LEFT 2
-#define GPIO_MOUSE_PIN_RIGHT 3
-#define GPIO_MOUSE_PIN_BLEFT 4
-#define GPIO_MOUSE_PIN_BMIDDLE 5
-#define GPIO_MOUSE_PIN_BRIGHT 6
-#define GPIO_MOUSE_PIN_MAX 7
-
-/**
- * struct gpio_mouse_platform_data
- * @scan_ms: integer in ms specifying the scan periode.
- * @polarity: Pin polarity, active high or low.
- * @up: GPIO line for up value.
- * @down: GPIO line for down value.
- * @left: GPIO line for left value.
- * @right: GPIO line for right value.
- * @bleft: GPIO line for left button.
- * @bmiddle: GPIO line for middle button.
- * @bright: GPIO line for right button.
- *
- * This struct must be added to the platform_device in the board code.
- * It is used by the gpio_mouse driver to setup GPIO lines and to
- * calculate mouse movement.
- */
-struct gpio_mouse_platform_data {
- int scan_ms;
- int polarity;
-
- union {
- struct {
- int up;
- int down;
- int left;
- int right;
-
- int bleft;
- int bmiddle;
- int bright;
- };
- int pins[GPIO_MOUSE_PIN_MAX];
- };
-};
-
-#endif /* _GPIO_MOUSE_H */
diff --git a/include/linux/greybus.h b/include/linux/greybus.h
new file mode 100644
index 000000000000..18c0fb958b74
--- /dev/null
+++ b/include/linux/greybus.h
@@ -0,0 +1,152 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Greybus driver and device API
+ *
+ * Copyright 2014-2015 Google Inc.
+ * Copyright 2014-2015 Linaro Ltd.
+ */
+
+#ifndef __LINUX_GREYBUS_H
+#define __LINUX_GREYBUS_H
+
+#ifdef __KERNEL__
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/pm_runtime.h>
+#include <linux/idr.h>
+
+#include <linux/greybus/greybus_id.h>
+#include <linux/greybus/greybus_manifest.h>
+#include <linux/greybus/greybus_protocols.h>
+#include <linux/greybus/manifest.h>
+#include <linux/greybus/hd.h>
+#include <linux/greybus/svc.h>
+#include <linux/greybus/control.h>
+#include <linux/greybus/module.h>
+#include <linux/greybus/interface.h>
+#include <linux/greybus/bundle.h>
+#include <linux/greybus/connection.h>
+#include <linux/greybus/operation.h>
+
+/* Matches up with the Greybus Protocol specification document */
+#define GREYBUS_VERSION_MAJOR 0x00
+#define GREYBUS_VERSION_MINOR 0x01
+
+#define GREYBUS_ID_MATCH_DEVICE \
+ (GREYBUS_ID_MATCH_VENDOR | GREYBUS_ID_MATCH_PRODUCT)
+
+#define GREYBUS_DEVICE(v, p) \
+ .match_flags = GREYBUS_ID_MATCH_DEVICE, \
+ .vendor = (v), \
+ .product = (p),
+
+#define GREYBUS_DEVICE_CLASS(c) \
+ .match_flags = GREYBUS_ID_MATCH_CLASS, \
+ .class = (c),
+
+/* Maximum number of CPorts */
+#define CPORT_ID_MAX 4095 /* UniPro max id is 4095 */
+#define CPORT_ID_BAD U16_MAX
+
+struct greybus_driver {
+ const char *name;
+
+ int (*probe)(struct gb_bundle *bundle,
+ const struct greybus_bundle_id *id);
+ void (*disconnect)(struct gb_bundle *bundle);
+
+ const struct greybus_bundle_id *id_table;
+
+ struct device_driver driver;
+};
+#define to_greybus_driver(d) container_of(d, struct greybus_driver, driver)
+
+static inline void greybus_set_drvdata(struct gb_bundle *bundle, void *data)
+{
+ dev_set_drvdata(&bundle->dev, data);
+}
+
+static inline void *greybus_get_drvdata(struct gb_bundle *bundle)
+{
+ return dev_get_drvdata(&bundle->dev);
+}
+
+/* Don't call these directly, use the module_greybus_driver() macro instead */
+int greybus_register_driver(struct greybus_driver *driver,
+ struct module *module, const char *mod_name);
+void greybus_deregister_driver(struct greybus_driver *driver);
+
+/* define to get proper THIS_MODULE and KBUILD_MODNAME values */
+#define greybus_register(driver) \
+ greybus_register_driver(driver, THIS_MODULE, KBUILD_MODNAME)
+#define greybus_deregister(driver) \
+ greybus_deregister_driver(driver)
+
+/**
+ * module_greybus_driver() - Helper macro for registering a Greybus driver
+ * @__greybus_driver: greybus_driver structure
+ *
+ * Helper macro for Greybus drivers to set up proper module init / exit
+ * functions. Replaces module_init() and module_exit() and keeps people from
+ * printing pointless things to the kernel log when their driver is loaded.
+ */
+#define module_greybus_driver(__greybus_driver) \
+ module_driver(__greybus_driver, greybus_register, greybus_deregister)
+
+int greybus_disabled(void);
+
+void gb_debugfs_init(void);
+void gb_debugfs_cleanup(void);
+struct dentry *gb_debugfs_get(void);
+
+extern struct bus_type greybus_bus_type;
+
+extern struct device_type greybus_hd_type;
+extern struct device_type greybus_module_type;
+extern struct device_type greybus_interface_type;
+extern struct device_type greybus_control_type;
+extern struct device_type greybus_bundle_type;
+extern struct device_type greybus_svc_type;
+
+static inline int is_gb_host_device(const struct device *dev)
+{
+ return dev->type == &greybus_hd_type;
+}
+
+static inline int is_gb_module(const struct device *dev)
+{
+ return dev->type == &greybus_module_type;
+}
+
+static inline int is_gb_interface(const struct device *dev)
+{
+ return dev->type == &greybus_interface_type;
+}
+
+static inline int is_gb_control(const struct device *dev)
+{
+ return dev->type == &greybus_control_type;
+}
+
+static inline int is_gb_bundle(const struct device *dev)
+{
+ return dev->type == &greybus_bundle_type;
+}
+
+static inline int is_gb_svc(const struct device *dev)
+{
+ return dev->type == &greybus_svc_type;
+}
+
+static inline bool cport_id_valid(struct gb_host_device *hd, u16 cport_id)
+{
+ return cport_id != CPORT_ID_BAD && cport_id < hd->num_cports;
+}
+
+#endif /* __KERNEL__ */
+#endif /* __LINUX_GREYBUS_H */
diff --git a/include/linux/greybus/bundle.h b/include/linux/greybus/bundle.h
new file mode 100644
index 000000000000..df8d88424cb7
--- /dev/null
+++ b/include/linux/greybus/bundle.h
@@ -0,0 +1,92 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Greybus bundles
+ *
+ * Copyright 2014 Google Inc.
+ * Copyright 2014 Linaro Ltd.
+ */
+
+#ifndef __BUNDLE_H
+#define __BUNDLE_H
+
+#include <linux/types.h>
+#include <linux/list.h>
+#include <linux/pm_runtime.h>
+#include <linux/device.h>
+
+#define BUNDLE_ID_NONE U8_MAX
+
+/* Greybus "public" definitions" */
+struct gb_bundle {
+ struct device dev;
+ struct gb_interface *intf;
+
+ u8 id;
+ u8 class;
+ u8 class_major;
+ u8 class_minor;
+
+ size_t num_cports;
+ struct greybus_descriptor_cport *cport_desc;
+
+ struct list_head connections;
+ u8 *state;
+
+ struct list_head links; /* interface->bundles */
+};
+#define to_gb_bundle(d) container_of(d, struct gb_bundle, dev)
+
+/* Greybus "private" definitions" */
+struct gb_bundle *gb_bundle_create(struct gb_interface *intf, u8 bundle_id,
+ u8 class);
+int gb_bundle_add(struct gb_bundle *bundle);
+void gb_bundle_destroy(struct gb_bundle *bundle);
+
+/* Bundle Runtime PM wrappers */
+#ifdef CONFIG_PM
+static inline int gb_pm_runtime_get_sync(struct gb_bundle *bundle)
+{
+ int retval;
+
+ retval = pm_runtime_get_sync(&bundle->dev);
+ if (retval < 0) {
+ dev_err(&bundle->dev,
+ "pm_runtime_get_sync failed: %d\n", retval);
+ pm_runtime_put_noidle(&bundle->dev);
+ return retval;
+ }
+
+ return 0;
+}
+
+static inline int gb_pm_runtime_put_autosuspend(struct gb_bundle *bundle)
+{
+ int retval;
+
+ pm_runtime_mark_last_busy(&bundle->dev);
+ retval = pm_runtime_put_autosuspend(&bundle->dev);
+
+ return retval;
+}
+
+static inline void gb_pm_runtime_get_noresume(struct gb_bundle *bundle)
+{
+ pm_runtime_get_noresume(&bundle->dev);
+}
+
+static inline void gb_pm_runtime_put_noidle(struct gb_bundle *bundle)
+{
+ pm_runtime_put_noidle(&bundle->dev);
+}
+
+#else
+static inline int gb_pm_runtime_get_sync(struct gb_bundle *bundle)
+{ return 0; }
+static inline int gb_pm_runtime_put_autosuspend(struct gb_bundle *bundle)
+{ return 0; }
+
+static inline void gb_pm_runtime_get_noresume(struct gb_bundle *bundle) {}
+static inline void gb_pm_runtime_put_noidle(struct gb_bundle *bundle) {}
+#endif
+
+#endif /* __BUNDLE_H */
diff --git a/include/linux/greybus/connection.h b/include/linux/greybus/connection.h
new file mode 100644
index 000000000000..d59b7fc1de3e
--- /dev/null
+++ b/include/linux/greybus/connection.h
@@ -0,0 +1,131 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Greybus connections
+ *
+ * Copyright 2014 Google Inc.
+ * Copyright 2014 Linaro Ltd.
+ */
+
+#ifndef __CONNECTION_H
+#define __CONNECTION_H
+
+#include <linux/bits.h>
+#include <linux/list.h>
+#include <linux/kfifo.h>
+#include <linux/kref.h>
+#include <linux/workqueue.h>
+
+#define GB_CONNECTION_FLAG_CSD BIT(0)
+#define GB_CONNECTION_FLAG_NO_FLOWCTRL BIT(1)
+#define GB_CONNECTION_FLAG_OFFLOADED BIT(2)
+#define GB_CONNECTION_FLAG_CDSI1 BIT(3)
+#define GB_CONNECTION_FLAG_CONTROL BIT(4)
+#define GB_CONNECTION_FLAG_HIGH_PRIO BIT(5)
+
+#define GB_CONNECTION_FLAG_CORE_MASK GB_CONNECTION_FLAG_CONTROL
+
+enum gb_connection_state {
+ GB_CONNECTION_STATE_DISABLED = 0,
+ GB_CONNECTION_STATE_ENABLED_TX = 1,
+ GB_CONNECTION_STATE_ENABLED = 2,
+ GB_CONNECTION_STATE_DISCONNECTING = 3,
+};
+
+struct gb_operation;
+
+typedef int (*gb_request_handler_t)(struct gb_operation *);
+
+struct gb_connection {
+ struct gb_host_device *hd;
+ struct gb_interface *intf;
+ struct gb_bundle *bundle;
+ struct kref kref;
+ u16 hd_cport_id;
+ u16 intf_cport_id;
+
+ struct list_head hd_links;
+ struct list_head bundle_links;
+
+ gb_request_handler_t handler;
+ unsigned long flags;
+
+ struct mutex mutex;
+ spinlock_t lock;
+ enum gb_connection_state state;
+ struct list_head operations;
+
+ char name[16];
+ struct workqueue_struct *wq;
+
+ atomic_t op_cycle;
+
+ void *private;
+
+ bool mode_switch;
+};
+
+struct gb_connection *gb_connection_create_static(struct gb_host_device *hd,
+ u16 hd_cport_id, gb_request_handler_t handler);
+struct gb_connection *gb_connection_create_control(struct gb_interface *intf);
+struct gb_connection *gb_connection_create(struct gb_bundle *bundle,
+ u16 cport_id, gb_request_handler_t handler);
+struct gb_connection *gb_connection_create_flags(struct gb_bundle *bundle,
+ u16 cport_id, gb_request_handler_t handler,
+ unsigned long flags);
+struct gb_connection *gb_connection_create_offloaded(struct gb_bundle *bundle,
+ u16 cport_id, unsigned long flags);
+void gb_connection_destroy(struct gb_connection *connection);
+
+static inline bool gb_connection_is_static(struct gb_connection *connection)
+{
+ return !connection->intf;
+}
+
+int gb_connection_enable(struct gb_connection *connection);
+int gb_connection_enable_tx(struct gb_connection *connection);
+void gb_connection_disable_rx(struct gb_connection *connection);
+void gb_connection_disable(struct gb_connection *connection);
+void gb_connection_disable_forced(struct gb_connection *connection);
+
+void gb_connection_mode_switch_prepare(struct gb_connection *connection);
+void gb_connection_mode_switch_complete(struct gb_connection *connection);
+
+void greybus_data_rcvd(struct gb_host_device *hd, u16 cport_id,
+ u8 *data, size_t length);
+
+void gb_connection_latency_tag_enable(struct gb_connection *connection);
+void gb_connection_latency_tag_disable(struct gb_connection *connection);
+
+static inline bool gb_connection_e2efc_enabled(struct gb_connection *connection)
+{
+ return !(connection->flags & GB_CONNECTION_FLAG_CSD);
+}
+
+static inline bool
+gb_connection_flow_control_disabled(struct gb_connection *connection)
+{
+ return connection->flags & GB_CONNECTION_FLAG_NO_FLOWCTRL;
+}
+
+static inline bool gb_connection_is_offloaded(struct gb_connection *connection)
+{
+ return connection->flags & GB_CONNECTION_FLAG_OFFLOADED;
+}
+
+static inline bool gb_connection_is_control(struct gb_connection *connection)
+{
+ return connection->flags & GB_CONNECTION_FLAG_CONTROL;
+}
+
+static inline void *gb_connection_get_data(struct gb_connection *connection)
+{
+ return connection->private;
+}
+
+static inline void gb_connection_set_data(struct gb_connection *connection,
+ void *data)
+{
+ connection->private = data;
+}
+
+#endif /* __CONNECTION_H */
diff --git a/include/linux/greybus/control.h b/include/linux/greybus/control.h
new file mode 100644
index 000000000000..da11fe871653
--- /dev/null
+++ b/include/linux/greybus/control.h
@@ -0,0 +1,60 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Greybus CPort control protocol
+ *
+ * Copyright 2015 Google Inc.
+ * Copyright 2015 Linaro Ltd.
+ */
+
+#ifndef __CONTROL_H
+#define __CONTROL_H
+
+#include <linux/types.h>
+#include <linux/device.h>
+
+struct gb_control {
+ struct device dev;
+ struct gb_interface *intf;
+
+ struct gb_connection *connection;
+
+ u8 protocol_major;
+ u8 protocol_minor;
+
+ bool has_bundle_activate;
+ bool has_bundle_version;
+
+ char *vendor_string;
+ char *product_string;
+};
+#define to_gb_control(d) container_of(d, struct gb_control, dev)
+
+struct gb_control *gb_control_create(struct gb_interface *intf);
+int gb_control_enable(struct gb_control *control);
+void gb_control_disable(struct gb_control *control);
+int gb_control_suspend(struct gb_control *control);
+int gb_control_resume(struct gb_control *control);
+int gb_control_add(struct gb_control *control);
+void gb_control_del(struct gb_control *control);
+struct gb_control *gb_control_get(struct gb_control *control);
+void gb_control_put(struct gb_control *control);
+
+int gb_control_get_bundle_versions(struct gb_control *control);
+int gb_control_connected_operation(struct gb_control *control, u16 cport_id);
+int gb_control_disconnected_operation(struct gb_control *control, u16 cport_id);
+int gb_control_disconnecting_operation(struct gb_control *control,
+ u16 cport_id);
+int gb_control_mode_switch_operation(struct gb_control *control);
+void gb_control_mode_switch_prepare(struct gb_control *control);
+void gb_control_mode_switch_complete(struct gb_control *control);
+int gb_control_get_manifest_size_operation(struct gb_interface *intf);
+int gb_control_get_manifest_operation(struct gb_interface *intf, void *manifest,
+ size_t size);
+int gb_control_bundle_suspend(struct gb_control *control, u8 bundle_id);
+int gb_control_bundle_resume(struct gb_control *control, u8 bundle_id);
+int gb_control_bundle_deactivate(struct gb_control *control, u8 bundle_id);
+int gb_control_bundle_activate(struct gb_control *control, u8 bundle_id);
+int gb_control_interface_suspend_prepare(struct gb_control *control);
+int gb_control_interface_deactivate_prepare(struct gb_control *control);
+int gb_control_interface_hibernate_abort(struct gb_control *control);
+#endif /* __CONTROL_H */
diff --git a/include/linux/greybus/greybus_id.h b/include/linux/greybus/greybus_id.h
new file mode 100644
index 000000000000..f4c8440093e4
--- /dev/null
+++ b/include/linux/greybus/greybus_id.h
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* FIXME
+ * move this to include/linux/mod_devicetable.h when merging
+ */
+
+#ifndef __LINUX_GREYBUS_ID_H
+#define __LINUX_GREYBUS_ID_H
+
+#include <linux/types.h>
+#include <linux/mod_devicetable.h>
+
+
+struct greybus_bundle_id {
+ __u16 match_flags;
+ __u32 vendor;
+ __u32 product;
+ __u8 class;
+
+ kernel_ulong_t driver_info __aligned(sizeof(kernel_ulong_t));
+};
+
+/* Used to match the greybus_bundle_id */
+#define GREYBUS_ID_MATCH_VENDOR BIT(0)
+#define GREYBUS_ID_MATCH_PRODUCT BIT(1)
+#define GREYBUS_ID_MATCH_CLASS BIT(2)
+
+#endif /* __LINUX_GREYBUS_ID_H */
diff --git a/include/linux/greybus/greybus_manifest.h b/include/linux/greybus/greybus_manifest.h
new file mode 100644
index 000000000000..bef9eb2093e9
--- /dev/null
+++ b/include/linux/greybus/greybus_manifest.h
@@ -0,0 +1,181 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Greybus manifest definition
+ *
+ * See "Greybus Application Protocol" document (version 0.1) for
+ * details on these values and structures.
+ *
+ * Copyright 2014-2015 Google Inc.
+ * Copyright 2014-2015 Linaro Ltd.
+ *
+ * Released under the GPLv2 and BSD licenses.
+ */
+
+#ifndef __GREYBUS_MANIFEST_H
+#define __GREYBUS_MANIFEST_H
+
+#include <linux/bits.h>
+#include <linux/types.h>
+
+enum greybus_descriptor_type {
+ GREYBUS_TYPE_INVALID = 0x00,
+ GREYBUS_TYPE_INTERFACE = 0x01,
+ GREYBUS_TYPE_STRING = 0x02,
+ GREYBUS_TYPE_BUNDLE = 0x03,
+ GREYBUS_TYPE_CPORT = 0x04,
+};
+
+enum greybus_protocol {
+ GREYBUS_PROTOCOL_CONTROL = 0x00,
+ /* 0x01 is unused */
+ GREYBUS_PROTOCOL_GPIO = 0x02,
+ GREYBUS_PROTOCOL_I2C = 0x03,
+ GREYBUS_PROTOCOL_UART = 0x04,
+ GREYBUS_PROTOCOL_HID = 0x05,
+ GREYBUS_PROTOCOL_USB = 0x06,
+ GREYBUS_PROTOCOL_SDIO = 0x07,
+ GREYBUS_PROTOCOL_POWER_SUPPLY = 0x08,
+ GREYBUS_PROTOCOL_PWM = 0x09,
+ /* 0x0a is unused */
+ GREYBUS_PROTOCOL_SPI = 0x0b,
+ GREYBUS_PROTOCOL_DISPLAY = 0x0c,
+ GREYBUS_PROTOCOL_CAMERA_MGMT = 0x0d,
+ GREYBUS_PROTOCOL_SENSOR = 0x0e,
+ GREYBUS_PROTOCOL_LIGHTS = 0x0f,
+ GREYBUS_PROTOCOL_VIBRATOR = 0x10,
+ GREYBUS_PROTOCOL_LOOPBACK = 0x11,
+ GREYBUS_PROTOCOL_AUDIO_MGMT = 0x12,
+ GREYBUS_PROTOCOL_AUDIO_DATA = 0x13,
+ GREYBUS_PROTOCOL_SVC = 0x14,
+ GREYBUS_PROTOCOL_BOOTROM = 0x15,
+ GREYBUS_PROTOCOL_CAMERA_DATA = 0x16,
+ GREYBUS_PROTOCOL_FW_DOWNLOAD = 0x17,
+ GREYBUS_PROTOCOL_FW_MANAGEMENT = 0x18,
+ GREYBUS_PROTOCOL_AUTHENTICATION = 0x19,
+ GREYBUS_PROTOCOL_LOG = 0x1a,
+ /* ... */
+ GREYBUS_PROTOCOL_RAW = 0xfe,
+ GREYBUS_PROTOCOL_VENDOR = 0xff,
+};
+
+enum greybus_class_type {
+ GREYBUS_CLASS_CONTROL = 0x00,
+ /* 0x01 is unused */
+ /* 0x02 is unused */
+ /* 0x03 is unused */
+ /* 0x04 is unused */
+ GREYBUS_CLASS_HID = 0x05,
+ /* 0x06 is unused */
+ /* 0x07 is unused */
+ GREYBUS_CLASS_POWER_SUPPLY = 0x08,
+ /* 0x09 is unused */
+ GREYBUS_CLASS_BRIDGED_PHY = 0x0a,
+ /* 0x0b is unused */
+ GREYBUS_CLASS_DISPLAY = 0x0c,
+ GREYBUS_CLASS_CAMERA = 0x0d,
+ GREYBUS_CLASS_SENSOR = 0x0e,
+ GREYBUS_CLASS_LIGHTS = 0x0f,
+ GREYBUS_CLASS_VIBRATOR = 0x10,
+ GREYBUS_CLASS_LOOPBACK = 0x11,
+ GREYBUS_CLASS_AUDIO = 0x12,
+ /* 0x13 is unused */
+ /* 0x14 is unused */
+ GREYBUS_CLASS_BOOTROM = 0x15,
+ GREYBUS_CLASS_FW_MANAGEMENT = 0x16,
+ GREYBUS_CLASS_LOG = 0x17,
+ /* ... */
+ GREYBUS_CLASS_RAW = 0xfe,
+ GREYBUS_CLASS_VENDOR = 0xff,
+};
+
+enum {
+ GREYBUS_INTERFACE_FEATURE_TIMESYNC = BIT(0),
+};
+
+/*
+ * The string in a string descriptor is not NUL-terminated. The
+ * size of the descriptor will be rounded up to a multiple of 4
+ * bytes, by padding the string with 0x00 bytes if necessary.
+ */
+struct greybus_descriptor_string {
+ __u8 length;
+ __u8 id;
+ __u8 string[];
+} __packed;
+
+/*
+ * An interface descriptor describes information about an interface as a whole,
+ * *not* the functions within it.
+ */
+struct greybus_descriptor_interface {
+ __u8 vendor_stringid;
+ __u8 product_stringid;
+ __u8 features;
+ __u8 pad;
+} __packed;
+
+/*
+ * An bundle descriptor defines an identification number and a class for
+ * each bundle.
+ *
+ * @id: Uniquely identifies a bundle within a interface, its sole purpose is to
+ * allow CPort descriptors to specify which bundle they are associated with.
+ * The first bundle will have id 0, second will have 1 and so on.
+ *
+ * The largest CPort id associated with an bundle (defined by a
+ * CPort descriptor in the manifest) is used to determine how to
+ * encode the device id and module number in UniPro packets
+ * that use the bundle.
+ *
+ * @class: It is used by kernel to know the functionality provided by the
+ * bundle and will be matched against drivers functinality while probing greybus
+ * driver. It should contain one of the values defined in
+ * 'enum greybus_class_type'.
+ *
+ */
+struct greybus_descriptor_bundle {
+ __u8 id; /* interface-relative id (0..) */
+ __u8 class;
+ __u8 pad[2];
+} __packed;
+
+/*
+ * A CPort descriptor indicates the id of the bundle within the
+ * module it's associated with, along with the CPort id used to
+ * address the CPort. The protocol id defines the format of messages
+ * exchanged using the CPort.
+ */
+struct greybus_descriptor_cport {
+ __le16 id;
+ __u8 bundle;
+ __u8 protocol_id; /* enum greybus_protocol */
+} __packed;
+
+struct greybus_descriptor_header {
+ __le16 size;
+ __u8 type; /* enum greybus_descriptor_type */
+ __u8 pad;
+} __packed;
+
+struct greybus_descriptor {
+ struct greybus_descriptor_header header;
+ union {
+ struct greybus_descriptor_string string;
+ struct greybus_descriptor_interface interface;
+ struct greybus_descriptor_bundle bundle;
+ struct greybus_descriptor_cport cport;
+ };
+} __packed;
+
+struct greybus_manifest_header {
+ __le16 size;
+ __u8 version_major;
+ __u8 version_minor;
+} __packed;
+
+struct greybus_manifest {
+ struct greybus_manifest_header header;
+ struct greybus_descriptor descriptors[];
+} __packed;
+
+#endif /* __GREYBUS_MANIFEST_H */
diff --git a/include/linux/greybus/greybus_protocols.h b/include/linux/greybus/greybus_protocols.h
new file mode 100644
index 000000000000..aeb8f9243545
--- /dev/null
+++ b/include/linux/greybus/greybus_protocols.h
@@ -0,0 +1,2178 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) */
+/*
+ * Copyright(c) 2014 - 2015 Google Inc. All rights reserved.
+ * Copyright(c) 2014 - 2015 Linaro Ltd. All rights reserved.
+ */
+
+#ifndef __GREYBUS_PROTOCOLS_H
+#define __GREYBUS_PROTOCOLS_H
+
+#include <linux/types.h>
+
+/* Fixed IDs for control/svc protocols */
+
+/* SVC switch-port device ids */
+#define GB_SVC_DEVICE_ID_SVC 0
+#define GB_SVC_DEVICE_ID_AP 1
+#define GB_SVC_DEVICE_ID_MIN 2
+#define GB_SVC_DEVICE_ID_MAX 31
+
+#define GB_SVC_CPORT_ID 0
+#define GB_CONTROL_BUNDLE_ID 0
+#define GB_CONTROL_CPORT_ID 0
+
+
+/*
+ * All operation messages (both requests and responses) begin with
+ * a header that encodes the size of the message (header included).
+ * This header also contains a unique identifier, that associates a
+ * response message with its operation. The header contains an
+ * operation type field, whose interpretation is dependent on what
+ * type of protocol is used over the connection. The high bit
+ * (0x80) of the operation type field is used to indicate whether
+ * the message is a request (clear) or a response (set).
+ *
+ * Response messages include an additional result byte, which
+ * communicates the result of the corresponding request. A zero
+ * result value means the operation completed successfully. Any
+ * other value indicates an error; in this case, the payload of the
+ * response message (if any) is ignored. The result byte must be
+ * zero in the header for a request message.
+ *
+ * The wire format for all numeric fields in the header is little
+ * endian. Any operation-specific data begins immediately after the
+ * header.
+ */
+struct gb_operation_msg_hdr {
+ __le16 size; /* Size in bytes of header + payload */
+ __le16 operation_id; /* Operation unique id */
+ __u8 type; /* E.g GB_I2C_TYPE_* or GB_GPIO_TYPE_* */
+ __u8 result; /* Result of request (in responses only) */
+ __u8 pad[2]; /* must be zero (ignore when read) */
+} __packed;
+
+
+/* Generic request types */
+#define GB_REQUEST_TYPE_CPORT_SHUTDOWN 0x00
+#define GB_REQUEST_TYPE_INVALID 0x7f
+
+struct gb_cport_shutdown_request {
+ __u8 phase;
+} __packed;
+
+
+/* Control Protocol */
+
+/* Greybus control request types */
+#define GB_CONTROL_TYPE_VERSION 0x01
+#define GB_CONTROL_TYPE_PROBE_AP 0x02
+#define GB_CONTROL_TYPE_GET_MANIFEST_SIZE 0x03
+#define GB_CONTROL_TYPE_GET_MANIFEST 0x04
+#define GB_CONTROL_TYPE_CONNECTED 0x05
+#define GB_CONTROL_TYPE_DISCONNECTED 0x06
+#define GB_CONTROL_TYPE_TIMESYNC_ENABLE 0x07
+#define GB_CONTROL_TYPE_TIMESYNC_DISABLE 0x08
+#define GB_CONTROL_TYPE_TIMESYNC_AUTHORITATIVE 0x09
+/* Unused 0x0a */
+#define GB_CONTROL_TYPE_BUNDLE_VERSION 0x0b
+#define GB_CONTROL_TYPE_DISCONNECTING 0x0c
+#define GB_CONTROL_TYPE_TIMESYNC_GET_LAST_EVENT 0x0d
+#define GB_CONTROL_TYPE_MODE_SWITCH 0x0e
+#define GB_CONTROL_TYPE_BUNDLE_SUSPEND 0x0f
+#define GB_CONTROL_TYPE_BUNDLE_RESUME 0x10
+#define GB_CONTROL_TYPE_BUNDLE_DEACTIVATE 0x11
+#define GB_CONTROL_TYPE_BUNDLE_ACTIVATE 0x12
+#define GB_CONTROL_TYPE_INTF_SUSPEND_PREPARE 0x13
+#define GB_CONTROL_TYPE_INTF_DEACTIVATE_PREPARE 0x14
+#define GB_CONTROL_TYPE_INTF_HIBERNATE_ABORT 0x15
+
+struct gb_control_version_request {
+ __u8 major;
+ __u8 minor;
+} __packed;
+
+struct gb_control_version_response {
+ __u8 major;
+ __u8 minor;
+} __packed;
+
+struct gb_control_bundle_version_request {
+ __u8 bundle_id;
+} __packed;
+
+struct gb_control_bundle_version_response {
+ __u8 major;
+ __u8 minor;
+} __packed;
+
+/* Control protocol manifest get size request has no payload*/
+struct gb_control_get_manifest_size_response {
+ __le16 size;
+} __packed;
+
+/* Control protocol manifest get request has no payload */
+struct gb_control_get_manifest_response {
+ __u8 data[0];
+} __packed;
+
+/* Control protocol [dis]connected request */
+struct gb_control_connected_request {
+ __le16 cport_id;
+} __packed;
+
+struct gb_control_disconnecting_request {
+ __le16 cport_id;
+} __packed;
+/* disconnecting response has no payload */
+
+struct gb_control_disconnected_request {
+ __le16 cport_id;
+} __packed;
+/* Control protocol [dis]connected response has no payload */
+
+/*
+ * All Bundle power management operations use the same request and response
+ * layout and status codes.
+ */
+
+#define GB_CONTROL_BUNDLE_PM_OK 0x00
+#define GB_CONTROL_BUNDLE_PM_INVAL 0x01
+#define GB_CONTROL_BUNDLE_PM_BUSY 0x02
+#define GB_CONTROL_BUNDLE_PM_FAIL 0x03
+#define GB_CONTROL_BUNDLE_PM_NA 0x04
+
+struct gb_control_bundle_pm_request {
+ __u8 bundle_id;
+} __packed;
+
+struct gb_control_bundle_pm_response {
+ __u8 status;
+} __packed;
+
+/*
+ * Interface Suspend Prepare and Deactivate Prepare operations use the same
+ * response layout and error codes. Define a single response structure and reuse
+ * it. Both operations have no payload.
+ */
+
+#define GB_CONTROL_INTF_PM_OK 0x00
+#define GB_CONTROL_INTF_PM_BUSY 0x01
+#define GB_CONTROL_INTF_PM_NA 0x02
+
+struct gb_control_intf_pm_response {
+ __u8 status;
+} __packed;
+
+/* APBridge protocol */
+
+/* request APB1 log */
+#define GB_APB_REQUEST_LOG 0x02
+
+/* request to map a cport to bulk in and bulk out endpoints */
+#define GB_APB_REQUEST_EP_MAPPING 0x03
+
+/* request to get the number of cports available */
+#define GB_APB_REQUEST_CPORT_COUNT 0x04
+
+/* request to reset a cport state */
+#define GB_APB_REQUEST_RESET_CPORT 0x05
+
+/* request to time the latency of messages on a given cport */
+#define GB_APB_REQUEST_LATENCY_TAG_EN 0x06
+#define GB_APB_REQUEST_LATENCY_TAG_DIS 0x07
+
+/* request to control the CSI transmitter */
+#define GB_APB_REQUEST_CSI_TX_CONTROL 0x08
+
+/* request to control audio streaming */
+#define GB_APB_REQUEST_AUDIO_CONTROL 0x09
+
+/* TimeSync requests */
+#define GB_APB_REQUEST_TIMESYNC_ENABLE 0x0d
+#define GB_APB_REQUEST_TIMESYNC_DISABLE 0x0e
+#define GB_APB_REQUEST_TIMESYNC_AUTHORITATIVE 0x0f
+#define GB_APB_REQUEST_TIMESYNC_GET_LAST_EVENT 0x10
+
+/* requests to set Greybus CPort flags */
+#define GB_APB_REQUEST_CPORT_FLAGS 0x11
+
+/* ARPC request */
+#define GB_APB_REQUEST_ARPC_RUN 0x12
+
+struct gb_apb_request_cport_flags {
+ __le32 flags;
+#define GB_APB_CPORT_FLAG_CONTROL 0x01
+#define GB_APB_CPORT_FLAG_HIGH_PRIO 0x02
+} __packed;
+
+
+/* Firmware Download Protocol */
+
+/* Request Types */
+#define GB_FW_DOWNLOAD_TYPE_FIND_FIRMWARE 0x01
+#define GB_FW_DOWNLOAD_TYPE_FETCH_FIRMWARE 0x02
+#define GB_FW_DOWNLOAD_TYPE_RELEASE_FIRMWARE 0x03
+
+#define GB_FIRMWARE_TAG_MAX_SIZE 10
+
+/* firmware download find firmware request/response */
+struct gb_fw_download_find_firmware_request {
+ __u8 firmware_tag[GB_FIRMWARE_TAG_MAX_SIZE];
+} __packed;
+
+struct gb_fw_download_find_firmware_response {
+ __u8 firmware_id;
+ __le32 size;
+} __packed;
+
+/* firmware download fetch firmware request/response */
+struct gb_fw_download_fetch_firmware_request {
+ __u8 firmware_id;
+ __le32 offset;
+ __le32 size;
+} __packed;
+
+struct gb_fw_download_fetch_firmware_response {
+ __u8 data[0];
+} __packed;
+
+/* firmware download release firmware request */
+struct gb_fw_download_release_firmware_request {
+ __u8 firmware_id;
+} __packed;
+/* firmware download release firmware response has no payload */
+
+
+/* Firmware Management Protocol */
+
+/* Request Types */
+#define GB_FW_MGMT_TYPE_INTERFACE_FW_VERSION 0x01
+#define GB_FW_MGMT_TYPE_LOAD_AND_VALIDATE_FW 0x02
+#define GB_FW_MGMT_TYPE_LOADED_FW 0x03
+#define GB_FW_MGMT_TYPE_BACKEND_FW_VERSION 0x04
+#define GB_FW_MGMT_TYPE_BACKEND_FW_UPDATE 0x05
+#define GB_FW_MGMT_TYPE_BACKEND_FW_UPDATED 0x06
+
+#define GB_FW_LOAD_METHOD_UNIPRO 0x01
+#define GB_FW_LOAD_METHOD_INTERNAL 0x02
+
+#define GB_FW_LOAD_STATUS_FAILED 0x00
+#define GB_FW_LOAD_STATUS_UNVALIDATED 0x01
+#define GB_FW_LOAD_STATUS_VALIDATED 0x02
+#define GB_FW_LOAD_STATUS_VALIDATION_FAILED 0x03
+
+#define GB_FW_BACKEND_FW_STATUS_SUCCESS 0x01
+#define GB_FW_BACKEND_FW_STATUS_FAIL_FIND 0x02
+#define GB_FW_BACKEND_FW_STATUS_FAIL_FETCH 0x03
+#define GB_FW_BACKEND_FW_STATUS_FAIL_WRITE 0x04
+#define GB_FW_BACKEND_FW_STATUS_INT 0x05
+#define GB_FW_BACKEND_FW_STATUS_RETRY 0x06
+#define GB_FW_BACKEND_FW_STATUS_NOT_SUPPORTED 0x07
+
+#define GB_FW_BACKEND_VERSION_STATUS_SUCCESS 0x01
+#define GB_FW_BACKEND_VERSION_STATUS_NOT_AVAILABLE 0x02
+#define GB_FW_BACKEND_VERSION_STATUS_NOT_SUPPORTED 0x03
+#define GB_FW_BACKEND_VERSION_STATUS_RETRY 0x04
+#define GB_FW_BACKEND_VERSION_STATUS_FAIL_INT 0x05
+
+/* firmware management interface firmware version request has no payload */
+struct gb_fw_mgmt_interface_fw_version_response {
+ __u8 firmware_tag[GB_FIRMWARE_TAG_MAX_SIZE];
+ __le16 major;
+ __le16 minor;
+} __packed;
+
+/* firmware management load and validate firmware request/response */
+struct gb_fw_mgmt_load_and_validate_fw_request {
+ __u8 request_id;
+ __u8 load_method;
+ __u8 firmware_tag[GB_FIRMWARE_TAG_MAX_SIZE];
+} __packed;
+/* firmware management load and validate firmware response has no payload*/
+
+/* firmware management loaded firmware request */
+struct gb_fw_mgmt_loaded_fw_request {
+ __u8 request_id;
+ __u8 status;
+ __le16 major;
+ __le16 minor;
+} __packed;
+/* firmware management loaded firmware response has no payload */
+
+/* firmware management backend firmware version request/response */
+struct gb_fw_mgmt_backend_fw_version_request {
+ __u8 firmware_tag[GB_FIRMWARE_TAG_MAX_SIZE];
+} __packed;
+
+struct gb_fw_mgmt_backend_fw_version_response {
+ __le16 major;
+ __le16 minor;
+ __u8 status;
+} __packed;
+
+/* firmware management backend firmware update request */
+struct gb_fw_mgmt_backend_fw_update_request {
+ __u8 request_id;
+ __u8 firmware_tag[GB_FIRMWARE_TAG_MAX_SIZE];
+} __packed;
+/* firmware management backend firmware update response has no payload */
+
+/* firmware management backend firmware updated request */
+struct gb_fw_mgmt_backend_fw_updated_request {
+ __u8 request_id;
+ __u8 status;
+} __packed;
+/* firmware management backend firmware updated response has no payload */
+
+
+/* Component Authentication Protocol (CAP) */
+
+/* Request Types */
+#define GB_CAP_TYPE_GET_ENDPOINT_UID 0x01
+#define GB_CAP_TYPE_GET_IMS_CERTIFICATE 0x02
+#define GB_CAP_TYPE_AUTHENTICATE 0x03
+
+/* CAP get endpoint uid request has no payload */
+struct gb_cap_get_endpoint_uid_response {
+ __u8 uid[8];
+} __packed;
+
+/* CAP get endpoint ims certificate request/response */
+struct gb_cap_get_ims_certificate_request {
+ __le32 certificate_class;
+ __le32 certificate_id;
+} __packed;
+
+struct gb_cap_get_ims_certificate_response {
+ __u8 result_code;
+ __u8 certificate[];
+} __packed;
+
+/* CAP authenticate request/response */
+struct gb_cap_authenticate_request {
+ __le32 auth_type;
+ __u8 uid[8];
+ __u8 challenge[32];
+} __packed;
+
+struct gb_cap_authenticate_response {
+ __u8 result_code;
+ __u8 response[64];
+ __u8 signature[];
+} __packed;
+
+
+/* Bootrom Protocol */
+
+/* Version of the Greybus bootrom protocol we support */
+#define GB_BOOTROM_VERSION_MAJOR 0x00
+#define GB_BOOTROM_VERSION_MINOR 0x01
+
+/* Greybus bootrom request types */
+#define GB_BOOTROM_TYPE_VERSION 0x01
+#define GB_BOOTROM_TYPE_FIRMWARE_SIZE 0x02
+#define GB_BOOTROM_TYPE_GET_FIRMWARE 0x03
+#define GB_BOOTROM_TYPE_READY_TO_BOOT 0x04
+#define GB_BOOTROM_TYPE_AP_READY 0x05 /* Request with no-payload */
+#define GB_BOOTROM_TYPE_GET_VID_PID 0x06 /* Request with no-payload */
+
+/* Greybus bootrom boot stages */
+#define GB_BOOTROM_BOOT_STAGE_ONE 0x01 /* Reserved for the boot ROM */
+#define GB_BOOTROM_BOOT_STAGE_TWO 0x02 /* Bootrom package to be loaded by the boot ROM */
+#define GB_BOOTROM_BOOT_STAGE_THREE 0x03 /* Module personality package loaded by Stage 2 firmware */
+
+/* Greybus bootrom ready to boot status */
+#define GB_BOOTROM_BOOT_STATUS_INVALID 0x00 /* Firmware blob could not be validated */
+#define GB_BOOTROM_BOOT_STATUS_INSECURE 0x01 /* Firmware blob is valid but insecure */
+#define GB_BOOTROM_BOOT_STATUS_SECURE 0x02 /* Firmware blob is valid and secure */
+
+/* Max bootrom data fetch size in bytes */
+#define GB_BOOTROM_FETCH_MAX 2000
+
+struct gb_bootrom_version_request {
+ __u8 major;
+ __u8 minor;
+} __packed;
+
+struct gb_bootrom_version_response {
+ __u8 major;
+ __u8 minor;
+} __packed;
+
+/* Bootrom protocol firmware size request/response */
+struct gb_bootrom_firmware_size_request {
+ __u8 stage;
+} __packed;
+
+struct gb_bootrom_firmware_size_response {
+ __le32 size;
+} __packed;
+
+/* Bootrom protocol get firmware request/response */
+struct gb_bootrom_get_firmware_request {
+ __le32 offset;
+ __le32 size;
+} __packed;
+
+struct gb_bootrom_get_firmware_response {
+ __u8 data[0];
+} __packed;
+
+/* Bootrom protocol Ready to boot request */
+struct gb_bootrom_ready_to_boot_request {
+ __u8 status;
+} __packed;
+/* Bootrom protocol Ready to boot response has no payload */
+
+/* Bootrom protocol get VID/PID request has no payload */
+struct gb_bootrom_get_vid_pid_response {
+ __le32 vendor_id;
+ __le32 product_id;
+} __packed;
+
+
+/* Power Supply */
+
+/* Greybus power supply request types */
+#define GB_POWER_SUPPLY_TYPE_GET_SUPPLIES 0x02
+#define GB_POWER_SUPPLY_TYPE_GET_DESCRIPTION 0x03
+#define GB_POWER_SUPPLY_TYPE_GET_PROP_DESCRIPTORS 0x04
+#define GB_POWER_SUPPLY_TYPE_GET_PROPERTY 0x05
+#define GB_POWER_SUPPLY_TYPE_SET_PROPERTY 0x06
+#define GB_POWER_SUPPLY_TYPE_EVENT 0x07
+
+/* Greybus power supply battery technologies types */
+#define GB_POWER_SUPPLY_TECH_UNKNOWN 0x0000
+#define GB_POWER_SUPPLY_TECH_NiMH 0x0001
+#define GB_POWER_SUPPLY_TECH_LION 0x0002
+#define GB_POWER_SUPPLY_TECH_LIPO 0x0003
+#define GB_POWER_SUPPLY_TECH_LiFe 0x0004
+#define GB_POWER_SUPPLY_TECH_NiCd 0x0005
+#define GB_POWER_SUPPLY_TECH_LiMn 0x0006
+
+/* Greybus power supply types */
+#define GB_POWER_SUPPLY_UNKNOWN_TYPE 0x0000
+#define GB_POWER_SUPPLY_BATTERY_TYPE 0x0001
+#define GB_POWER_SUPPLY_UPS_TYPE 0x0002
+#define GB_POWER_SUPPLY_MAINS_TYPE 0x0003
+#define GB_POWER_SUPPLY_USB_TYPE 0x0004
+#define GB_POWER_SUPPLY_USB_DCP_TYPE 0x0005
+#define GB_POWER_SUPPLY_USB_CDP_TYPE 0x0006
+#define GB_POWER_SUPPLY_USB_ACA_TYPE 0x0007
+
+/* Greybus power supply health values */
+#define GB_POWER_SUPPLY_HEALTH_UNKNOWN 0x0000
+#define GB_POWER_SUPPLY_HEALTH_GOOD 0x0001
+#define GB_POWER_SUPPLY_HEALTH_OVERHEAT 0x0002
+#define GB_POWER_SUPPLY_HEALTH_DEAD 0x0003
+#define GB_POWER_SUPPLY_HEALTH_OVERVOLTAGE 0x0004
+#define GB_POWER_SUPPLY_HEALTH_UNSPEC_FAILURE 0x0005
+#define GB_POWER_SUPPLY_HEALTH_COLD 0x0006
+#define GB_POWER_SUPPLY_HEALTH_WATCHDOG_TIMER_EXPIRE 0x0007
+#define GB_POWER_SUPPLY_HEALTH_SAFETY_TIMER_EXPIRE 0x0008
+
+/* Greybus power supply status values */
+#define GB_POWER_SUPPLY_STATUS_UNKNOWN 0x0000
+#define GB_POWER_SUPPLY_STATUS_CHARGING 0x0001
+#define GB_POWER_SUPPLY_STATUS_DISCHARGING 0x0002
+#define GB_POWER_SUPPLY_STATUS_NOT_CHARGING 0x0003
+#define GB_POWER_SUPPLY_STATUS_FULL 0x0004
+
+/* Greybus power supply capacity level values */
+#define GB_POWER_SUPPLY_CAPACITY_LEVEL_UNKNOWN 0x0000
+#define GB_POWER_SUPPLY_CAPACITY_LEVEL_CRITICAL 0x0001
+#define GB_POWER_SUPPLY_CAPACITY_LEVEL_LOW 0x0002
+#define GB_POWER_SUPPLY_CAPACITY_LEVEL_NORMAL 0x0003
+#define GB_POWER_SUPPLY_CAPACITY_LEVEL_HIGH 0x0004
+#define GB_POWER_SUPPLY_CAPACITY_LEVEL_FULL 0x0005
+
+/* Greybus power supply scope values */
+#define GB_POWER_SUPPLY_SCOPE_UNKNOWN 0x0000
+#define GB_POWER_SUPPLY_SCOPE_SYSTEM 0x0001
+#define GB_POWER_SUPPLY_SCOPE_DEVICE 0x0002
+
+struct gb_power_supply_get_supplies_response {
+ __u8 supplies_count;
+} __packed;
+
+struct gb_power_supply_get_description_request {
+ __u8 psy_id;
+} __packed;
+
+struct gb_power_supply_get_description_response {
+ __u8 manufacturer[32];
+ __u8 model[32];
+ __u8 serial_number[32];
+ __le16 type;
+ __u8 properties_count;
+} __packed;
+
+struct gb_power_supply_props_desc {
+ __u8 property;
+#define GB_POWER_SUPPLY_PROP_STATUS 0x00
+#define GB_POWER_SUPPLY_PROP_CHARGE_TYPE 0x01
+#define GB_POWER_SUPPLY_PROP_HEALTH 0x02
+#define GB_POWER_SUPPLY_PROP_PRESENT 0x03
+#define GB_POWER_SUPPLY_PROP_ONLINE 0x04
+#define GB_POWER_SUPPLY_PROP_AUTHENTIC 0x05
+#define GB_POWER_SUPPLY_PROP_TECHNOLOGY 0x06
+#define GB_POWER_SUPPLY_PROP_CYCLE_COUNT 0x07
+#define GB_POWER_SUPPLY_PROP_VOLTAGE_MAX 0x08
+#define GB_POWER_SUPPLY_PROP_VOLTAGE_MIN 0x09
+#define GB_POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN 0x0A
+#define GB_POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN 0x0B
+#define GB_POWER_SUPPLY_PROP_VOLTAGE_NOW 0x0C
+#define GB_POWER_SUPPLY_PROP_VOLTAGE_AVG 0x0D
+#define GB_POWER_SUPPLY_PROP_VOLTAGE_OCV 0x0E
+#define GB_POWER_SUPPLY_PROP_VOLTAGE_BOOT 0x0F
+#define GB_POWER_SUPPLY_PROP_CURRENT_MAX 0x10
+#define GB_POWER_SUPPLY_PROP_CURRENT_NOW 0x11
+#define GB_POWER_SUPPLY_PROP_CURRENT_AVG 0x12
+#define GB_POWER_SUPPLY_PROP_CURRENT_BOOT 0x13
+#define GB_POWER_SUPPLY_PROP_POWER_NOW 0x14
+#define GB_POWER_SUPPLY_PROP_POWER_AVG 0x15
+#define GB_POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN 0x16
+#define GB_POWER_SUPPLY_PROP_CHARGE_EMPTY_DESIGN 0x17
+#define GB_POWER_SUPPLY_PROP_CHARGE_FULL 0x18
+#define GB_POWER_SUPPLY_PROP_CHARGE_EMPTY 0x19
+#define GB_POWER_SUPPLY_PROP_CHARGE_NOW 0x1A
+#define GB_POWER_SUPPLY_PROP_CHARGE_AVG 0x1B
+#define GB_POWER_SUPPLY_PROP_CHARGE_COUNTER 0x1C
+#define GB_POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT 0x1D
+#define GB_POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX 0x1E
+#define GB_POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE 0x1F
+#define GB_POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE_MAX 0x20
+#define GB_POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT 0x21
+#define GB_POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT_MAX 0x22
+#define GB_POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT 0x23
+#define GB_POWER_SUPPLY_PROP_ENERGY_FULL_DESIGN 0x24
+#define GB_POWER_SUPPLY_PROP_ENERGY_EMPTY_DESIGN 0x25
+#define GB_POWER_SUPPLY_PROP_ENERGY_FULL 0x26
+#define GB_POWER_SUPPLY_PROP_ENERGY_EMPTY 0x27
+#define GB_POWER_SUPPLY_PROP_ENERGY_NOW 0x28
+#define GB_POWER_SUPPLY_PROP_ENERGY_AVG 0x29
+#define GB_POWER_SUPPLY_PROP_CAPACITY 0x2A
+#define GB_POWER_SUPPLY_PROP_CAPACITY_ALERT_MIN 0x2B
+#define GB_POWER_SUPPLY_PROP_CAPACITY_ALERT_MAX 0x2C
+#define GB_POWER_SUPPLY_PROP_CAPACITY_LEVEL 0x2D
+#define GB_POWER_SUPPLY_PROP_TEMP 0x2E
+#define GB_POWER_SUPPLY_PROP_TEMP_MAX 0x2F
+#define GB_POWER_SUPPLY_PROP_TEMP_MIN 0x30
+#define GB_POWER_SUPPLY_PROP_TEMP_ALERT_MIN 0x31
+#define GB_POWER_SUPPLY_PROP_TEMP_ALERT_MAX 0x32
+#define GB_POWER_SUPPLY_PROP_TEMP_AMBIENT 0x33
+#define GB_POWER_SUPPLY_PROP_TEMP_AMBIENT_ALERT_MIN 0x34
+#define GB_POWER_SUPPLY_PROP_TEMP_AMBIENT_ALERT_MAX 0x35
+#define GB_POWER_SUPPLY_PROP_TIME_TO_EMPTY_NOW 0x36
+#define GB_POWER_SUPPLY_PROP_TIME_TO_EMPTY_AVG 0x37
+#define GB_POWER_SUPPLY_PROP_TIME_TO_FULL_NOW 0x38
+#define GB_POWER_SUPPLY_PROP_TIME_TO_FULL_AVG 0x39
+#define GB_POWER_SUPPLY_PROP_TYPE 0x3A
+#define GB_POWER_SUPPLY_PROP_SCOPE 0x3B
+#define GB_POWER_SUPPLY_PROP_CHARGE_TERM_CURRENT 0x3C
+#define GB_POWER_SUPPLY_PROP_CALIBRATE 0x3D
+ __u8 is_writeable;
+} __packed;
+
+struct gb_power_supply_get_property_descriptors_request {
+ __u8 psy_id;
+} __packed;
+
+struct gb_power_supply_get_property_descriptors_response {
+ __u8 properties_count;
+ struct gb_power_supply_props_desc props[];
+} __packed;
+
+struct gb_power_supply_get_property_request {
+ __u8 psy_id;
+ __u8 property;
+} __packed;
+
+struct gb_power_supply_get_property_response {
+ __le32 prop_val;
+};
+
+struct gb_power_supply_set_property_request {
+ __u8 psy_id;
+ __u8 property;
+ __le32 prop_val;
+} __packed;
+
+struct gb_power_supply_event_request {
+ __u8 psy_id;
+ __u8 event;
+#define GB_POWER_SUPPLY_UPDATE 0x01
+} __packed;
+
+
+/* HID */
+
+/* Greybus HID operation types */
+#define GB_HID_TYPE_GET_DESC 0x02
+#define GB_HID_TYPE_GET_REPORT_DESC 0x03
+#define GB_HID_TYPE_PWR_ON 0x04
+#define GB_HID_TYPE_PWR_OFF 0x05
+#define GB_HID_TYPE_GET_REPORT 0x06
+#define GB_HID_TYPE_SET_REPORT 0x07
+#define GB_HID_TYPE_IRQ_EVENT 0x08
+
+/* Report type */
+#define GB_HID_INPUT_REPORT 0
+#define GB_HID_OUTPUT_REPORT 1
+#define GB_HID_FEATURE_REPORT 2
+
+/* Different request/response structures */
+/* HID get descriptor response */
+struct gb_hid_desc_response {
+ __u8 bLength;
+ __le16 wReportDescLength;
+ __le16 bcdHID;
+ __le16 wProductID;
+ __le16 wVendorID;
+ __u8 bCountryCode;
+} __packed;
+
+/* HID get report request/response */
+struct gb_hid_get_report_request {
+ __u8 report_type;
+ __u8 report_id;
+} __packed;
+
+/* HID set report request */
+struct gb_hid_set_report_request {
+ __u8 report_type;
+ __u8 report_id;
+ __u8 report[];
+} __packed;
+
+/* HID input report request, via interrupt pipe */
+struct gb_hid_input_report_request {
+ __u8 report[0];
+} __packed;
+
+
+/* I2C */
+
+/* Greybus i2c request types */
+#define GB_I2C_TYPE_FUNCTIONALITY 0x02
+#define GB_I2C_TYPE_TRANSFER 0x05
+
+/* functionality request has no payload */
+struct gb_i2c_functionality_response {
+ __le32 functionality;
+} __packed;
+
+/*
+ * Outgoing data immediately follows the op count and ops array.
+ * The data for each write (master -> slave) op in the array is sent
+ * in order, with no (e.g. pad) bytes separating them.
+ *
+ * Short reads cause the entire transfer request to fail So response
+ * payload consists only of bytes read, and the number of bytes is
+ * exactly what was specified in the corresponding op. Like
+ * outgoing data, the incoming data is in order and contiguous.
+ */
+struct gb_i2c_transfer_op {
+ __le16 addr;
+ __le16 flags;
+ __le16 size;
+} __packed;
+
+struct gb_i2c_transfer_request {
+ __le16 op_count;
+ struct gb_i2c_transfer_op ops[]; /* op_count of these */
+} __packed;
+struct gb_i2c_transfer_response {
+ __u8 data[0]; /* inbound data */
+} __packed;
+
+
+/* GPIO */
+
+/* Greybus GPIO request types */
+#define GB_GPIO_TYPE_LINE_COUNT 0x02
+#define GB_GPIO_TYPE_ACTIVATE 0x03
+#define GB_GPIO_TYPE_DEACTIVATE 0x04
+#define GB_GPIO_TYPE_GET_DIRECTION 0x05
+#define GB_GPIO_TYPE_DIRECTION_IN 0x06
+#define GB_GPIO_TYPE_DIRECTION_OUT 0x07
+#define GB_GPIO_TYPE_GET_VALUE 0x08
+#define GB_GPIO_TYPE_SET_VALUE 0x09
+#define GB_GPIO_TYPE_SET_DEBOUNCE 0x0a
+#define GB_GPIO_TYPE_IRQ_TYPE 0x0b
+#define GB_GPIO_TYPE_IRQ_MASK 0x0c
+#define GB_GPIO_TYPE_IRQ_UNMASK 0x0d
+#define GB_GPIO_TYPE_IRQ_EVENT 0x0e
+
+#define GB_GPIO_IRQ_TYPE_NONE 0x00
+#define GB_GPIO_IRQ_TYPE_EDGE_RISING 0x01
+#define GB_GPIO_IRQ_TYPE_EDGE_FALLING 0x02
+#define GB_GPIO_IRQ_TYPE_EDGE_BOTH 0x03
+#define GB_GPIO_IRQ_TYPE_LEVEL_HIGH 0x04
+#define GB_GPIO_IRQ_TYPE_LEVEL_LOW 0x08
+
+/* line count request has no payload */
+struct gb_gpio_line_count_response {
+ __u8 count;
+} __packed;
+
+struct gb_gpio_activate_request {
+ __u8 which;
+} __packed;
+/* activate response has no payload */
+
+struct gb_gpio_deactivate_request {
+ __u8 which;
+} __packed;
+/* deactivate response has no payload */
+
+struct gb_gpio_get_direction_request {
+ __u8 which;
+} __packed;
+struct gb_gpio_get_direction_response {
+ __u8 direction;
+} __packed;
+
+struct gb_gpio_direction_in_request {
+ __u8 which;
+} __packed;
+/* direction in response has no payload */
+
+struct gb_gpio_direction_out_request {
+ __u8 which;
+ __u8 value;
+} __packed;
+/* direction out response has no payload */
+
+struct gb_gpio_get_value_request {
+ __u8 which;
+} __packed;
+struct gb_gpio_get_value_response {
+ __u8 value;
+} __packed;
+
+struct gb_gpio_set_value_request {
+ __u8 which;
+ __u8 value;
+} __packed;
+/* set value response has no payload */
+
+struct gb_gpio_set_debounce_request {
+ __u8 which;
+ __le16 usec;
+} __packed;
+/* debounce response has no payload */
+
+struct gb_gpio_irq_type_request {
+ __u8 which;
+ __u8 type;
+} __packed;
+/* irq type response has no payload */
+
+struct gb_gpio_irq_mask_request {
+ __u8 which;
+} __packed;
+/* irq mask response has no payload */
+
+struct gb_gpio_irq_unmask_request {
+ __u8 which;
+} __packed;
+/* irq unmask response has no payload */
+
+/* irq event requests originate on another module and are handled on the AP */
+struct gb_gpio_irq_event_request {
+ __u8 which;
+} __packed;
+/* irq event has no response */
+
+
+/* PWM */
+
+/* Greybus PWM operation types */
+#define GB_PWM_TYPE_PWM_COUNT 0x02
+#define GB_PWM_TYPE_ACTIVATE 0x03
+#define GB_PWM_TYPE_DEACTIVATE 0x04
+#define GB_PWM_TYPE_CONFIG 0x05
+#define GB_PWM_TYPE_POLARITY 0x06
+#define GB_PWM_TYPE_ENABLE 0x07
+#define GB_PWM_TYPE_DISABLE 0x08
+
+/* pwm count request has no payload */
+struct gb_pwm_count_response {
+ __u8 count;
+} __packed;
+
+struct gb_pwm_activate_request {
+ __u8 which;
+} __packed;
+
+struct gb_pwm_deactivate_request {
+ __u8 which;
+} __packed;
+
+struct gb_pwm_config_request {
+ __u8 which;
+ __le32 duty;
+ __le32 period;
+} __packed;
+
+struct gb_pwm_polarity_request {
+ __u8 which;
+ __u8 polarity;
+} __packed;
+
+struct gb_pwm_enable_request {
+ __u8 which;
+} __packed;
+
+struct gb_pwm_disable_request {
+ __u8 which;
+} __packed;
+
+/* SPI */
+
+/* Should match up with modes in linux/spi/spi.h */
+#define GB_SPI_MODE_CPHA 0x01 /* clock phase */
+#define GB_SPI_MODE_CPOL 0x02 /* clock polarity */
+#define GB_SPI_MODE_MODE_0 (0 | 0) /* (original MicroWire) */
+#define GB_SPI_MODE_MODE_1 (0 | GB_SPI_MODE_CPHA)
+#define GB_SPI_MODE_MODE_2 (GB_SPI_MODE_CPOL | 0)
+#define GB_SPI_MODE_MODE_3 (GB_SPI_MODE_CPOL | GB_SPI_MODE_CPHA)
+#define GB_SPI_MODE_CS_HIGH 0x04 /* chipselect active high? */
+#define GB_SPI_MODE_LSB_FIRST 0x08 /* per-word bits-on-wire */
+#define GB_SPI_MODE_3WIRE 0x10 /* SI/SO signals shared */
+#define GB_SPI_MODE_LOOP 0x20 /* loopback mode */
+#define GB_SPI_MODE_NO_CS 0x40 /* 1 dev/bus, no chipselect */
+#define GB_SPI_MODE_READY 0x80 /* slave pulls low to pause */
+
+/* Should match up with flags in linux/spi/spi.h */
+#define GB_SPI_FLAG_HALF_DUPLEX BIT(0) /* can't do full duplex */
+#define GB_SPI_FLAG_NO_RX BIT(1) /* can't do buffer read */
+#define GB_SPI_FLAG_NO_TX BIT(2) /* can't do buffer write */
+
+/* Greybus spi operation types */
+#define GB_SPI_TYPE_MASTER_CONFIG 0x02
+#define GB_SPI_TYPE_DEVICE_CONFIG 0x03
+#define GB_SPI_TYPE_TRANSFER 0x04
+
+/* mode request has no payload */
+struct gb_spi_master_config_response {
+ __le32 bits_per_word_mask;
+ __le32 min_speed_hz;
+ __le32 max_speed_hz;
+ __le16 mode;
+ __le16 flags;
+ __u8 num_chipselect;
+} __packed;
+
+struct gb_spi_device_config_request {
+ __u8 chip_select;
+} __packed;
+
+struct gb_spi_device_config_response {
+ __le16 mode;
+ __u8 bits_per_word;
+ __le32 max_speed_hz;
+ __u8 device_type;
+#define GB_SPI_SPI_DEV 0x00
+#define GB_SPI_SPI_NOR 0x01
+#define GB_SPI_SPI_MODALIAS 0x02
+ __u8 name[32];
+} __packed;
+
+/**
+ * struct gb_spi_transfer - a read/write buffer pair
+ * @speed_hz: Select a speed other than the device default for this transfer. If
+ * 0 the default (from @spi_device) is used.
+ * @len: size of rx and tx buffers (in bytes)
+ * @delay_usecs: microseconds to delay after this transfer before (optionally)
+ * changing the chipselect status, then starting the next transfer or
+ * completing this spi_message.
+ * @cs_change: affects chipselect after this transfer completes
+ * @bits_per_word: select a bits_per_word other than the device default for this
+ * transfer. If 0 the default (from @spi_device) is used.
+ */
+struct gb_spi_transfer {
+ __le32 speed_hz;
+ __le32 len;
+ __le16 delay_usecs;
+ __u8 cs_change;
+ __u8 bits_per_word;
+ __u8 xfer_flags;
+#define GB_SPI_XFER_READ 0x01
+#define GB_SPI_XFER_WRITE 0x02
+#define GB_SPI_XFER_INPROGRESS 0x04
+} __packed;
+
+struct gb_spi_transfer_request {
+ __u8 chip_select; /* of the spi device */
+ __u8 mode; /* of the spi device */
+ __le16 count;
+ struct gb_spi_transfer transfers[]; /* count of these */
+} __packed;
+
+struct gb_spi_transfer_response {
+ __u8 data[0]; /* inbound data */
+} __packed;
+
+/* Version of the Greybus SVC protocol we support */
+#define GB_SVC_VERSION_MAJOR 0x00
+#define GB_SVC_VERSION_MINOR 0x01
+
+/* Greybus SVC request types */
+#define GB_SVC_TYPE_PROTOCOL_VERSION 0x01
+#define GB_SVC_TYPE_SVC_HELLO 0x02
+#define GB_SVC_TYPE_INTF_DEVICE_ID 0x03
+#define GB_SVC_TYPE_INTF_RESET 0x06
+#define GB_SVC_TYPE_CONN_CREATE 0x07
+#define GB_SVC_TYPE_CONN_DESTROY 0x08
+#define GB_SVC_TYPE_DME_PEER_GET 0x09
+#define GB_SVC_TYPE_DME_PEER_SET 0x0a
+#define GB_SVC_TYPE_ROUTE_CREATE 0x0b
+#define GB_SVC_TYPE_ROUTE_DESTROY 0x0c
+#define GB_SVC_TYPE_TIMESYNC_ENABLE 0x0d
+#define GB_SVC_TYPE_TIMESYNC_DISABLE 0x0e
+#define GB_SVC_TYPE_TIMESYNC_AUTHORITATIVE 0x0f
+#define GB_SVC_TYPE_INTF_SET_PWRM 0x10
+#define GB_SVC_TYPE_INTF_EJECT 0x11
+#define GB_SVC_TYPE_PING 0x13
+#define GB_SVC_TYPE_PWRMON_RAIL_COUNT_GET 0x14
+#define GB_SVC_TYPE_PWRMON_RAIL_NAMES_GET 0x15
+#define GB_SVC_TYPE_PWRMON_SAMPLE_GET 0x16
+#define GB_SVC_TYPE_PWRMON_INTF_SAMPLE_GET 0x17
+#define GB_SVC_TYPE_TIMESYNC_WAKE_PINS_ACQUIRE 0x18
+#define GB_SVC_TYPE_TIMESYNC_WAKE_PINS_RELEASE 0x19
+#define GB_SVC_TYPE_TIMESYNC_PING 0x1a
+#define GB_SVC_TYPE_MODULE_INSERTED 0x1f
+#define GB_SVC_TYPE_MODULE_REMOVED 0x20
+#define GB_SVC_TYPE_INTF_VSYS_ENABLE 0x21
+#define GB_SVC_TYPE_INTF_VSYS_DISABLE 0x22
+#define GB_SVC_TYPE_INTF_REFCLK_ENABLE 0x23
+#define GB_SVC_TYPE_INTF_REFCLK_DISABLE 0x24
+#define GB_SVC_TYPE_INTF_UNIPRO_ENABLE 0x25
+#define GB_SVC_TYPE_INTF_UNIPRO_DISABLE 0x26
+#define GB_SVC_TYPE_INTF_ACTIVATE 0x27
+#define GB_SVC_TYPE_INTF_RESUME 0x28
+#define GB_SVC_TYPE_INTF_MAILBOX_EVENT 0x29
+#define GB_SVC_TYPE_INTF_OOPS 0x2a
+
+/* Greybus SVC protocol status values */
+#define GB_SVC_OP_SUCCESS 0x00
+#define GB_SVC_OP_UNKNOWN_ERROR 0x01
+#define GB_SVC_INTF_NOT_DETECTED 0x02
+#define GB_SVC_INTF_NO_UPRO_LINK 0x03
+#define GB_SVC_INTF_UPRO_NOT_DOWN 0x04
+#define GB_SVC_INTF_UPRO_NOT_HIBERNATED 0x05
+#define GB_SVC_INTF_NO_V_SYS 0x06
+#define GB_SVC_INTF_V_CHG 0x07
+#define GB_SVC_INTF_WAKE_BUSY 0x08
+#define GB_SVC_INTF_NO_REFCLK 0x09
+#define GB_SVC_INTF_RELEASING 0x0a
+#define GB_SVC_INTF_NO_ORDER 0x0b
+#define GB_SVC_INTF_MBOX_SET 0x0c
+#define GB_SVC_INTF_BAD_MBOX 0x0d
+#define GB_SVC_INTF_OP_TIMEOUT 0x0e
+#define GB_SVC_PWRMON_OP_NOT_PRESENT 0x0f
+
+struct gb_svc_version_request {
+ __u8 major;
+ __u8 minor;
+} __packed;
+
+struct gb_svc_version_response {
+ __u8 major;
+ __u8 minor;
+} __packed;
+
+/* SVC protocol hello request */
+struct gb_svc_hello_request {
+ __le16 endo_id;
+ __u8 interface_id;
+} __packed;
+/* hello response has no payload */
+
+struct gb_svc_intf_device_id_request {
+ __u8 intf_id;
+ __u8 device_id;
+} __packed;
+/* device id response has no payload */
+
+struct gb_svc_intf_reset_request {
+ __u8 intf_id;
+} __packed;
+/* interface reset response has no payload */
+
+struct gb_svc_intf_eject_request {
+ __u8 intf_id;
+} __packed;
+/* interface eject response has no payload */
+
+struct gb_svc_conn_create_request {
+ __u8 intf1_id;
+ __le16 cport1_id;
+ __u8 intf2_id;
+ __le16 cport2_id;
+ __u8 tc;
+ __u8 flags;
+} __packed;
+/* connection create response has no payload */
+
+struct gb_svc_conn_destroy_request {
+ __u8 intf1_id;
+ __le16 cport1_id;
+ __u8 intf2_id;
+ __le16 cport2_id;
+} __packed;
+/* connection destroy response has no payload */
+
+struct gb_svc_dme_peer_get_request {
+ __u8 intf_id;
+ __le16 attr;
+ __le16 selector;
+} __packed;
+
+struct gb_svc_dme_peer_get_response {
+ __le16 result_code;
+ __le32 attr_value;
+} __packed;
+
+struct gb_svc_dme_peer_set_request {
+ __u8 intf_id;
+ __le16 attr;
+ __le16 selector;
+ __le32 value;
+} __packed;
+
+struct gb_svc_dme_peer_set_response {
+ __le16 result_code;
+} __packed;
+
+/* Greybus init-status values, currently retrieved using DME peer gets. */
+#define GB_INIT_SPI_BOOT_STARTED 0x02
+#define GB_INIT_TRUSTED_SPI_BOOT_FINISHED 0x03
+#define GB_INIT_UNTRUSTED_SPI_BOOT_FINISHED 0x04
+#define GB_INIT_BOOTROM_UNIPRO_BOOT_STARTED 0x06
+#define GB_INIT_BOOTROM_FALLBACK_UNIPRO_BOOT_STARTED 0x09
+#define GB_INIT_S2_LOADER_BOOT_STARTED 0x0D
+
+struct gb_svc_route_create_request {
+ __u8 intf1_id;
+ __u8 dev1_id;
+ __u8 intf2_id;
+ __u8 dev2_id;
+} __packed;
+/* route create response has no payload */
+
+struct gb_svc_route_destroy_request {
+ __u8 intf1_id;
+ __u8 intf2_id;
+} __packed;
+/* route destroy response has no payload */
+
+/* used for svc_intf_vsys_{enable,disable} */
+struct gb_svc_intf_vsys_request {
+ __u8 intf_id;
+} __packed;
+
+struct gb_svc_intf_vsys_response {
+ __u8 result_code;
+#define GB_SVC_INTF_VSYS_OK 0x00
+ /* 0x01 is reserved */
+#define GB_SVC_INTF_VSYS_FAIL 0x02
+} __packed;
+
+/* used for svc_intf_refclk_{enable,disable} */
+struct gb_svc_intf_refclk_request {
+ __u8 intf_id;
+} __packed;
+
+struct gb_svc_intf_refclk_response {
+ __u8 result_code;
+#define GB_SVC_INTF_REFCLK_OK 0x00
+ /* 0x01 is reserved */
+#define GB_SVC_INTF_REFCLK_FAIL 0x02
+} __packed;
+
+/* used for svc_intf_unipro_{enable,disable} */
+struct gb_svc_intf_unipro_request {
+ __u8 intf_id;
+} __packed;
+
+struct gb_svc_intf_unipro_response {
+ __u8 result_code;
+#define GB_SVC_INTF_UNIPRO_OK 0x00
+ /* 0x01 is reserved */
+#define GB_SVC_INTF_UNIPRO_FAIL 0x02
+#define GB_SVC_INTF_UNIPRO_NOT_OFF 0x03
+} __packed;
+
+#define GB_SVC_UNIPRO_FAST_MODE 0x01
+#define GB_SVC_UNIPRO_SLOW_MODE 0x02
+#define GB_SVC_UNIPRO_FAST_AUTO_MODE 0x04
+#define GB_SVC_UNIPRO_SLOW_AUTO_MODE 0x05
+#define GB_SVC_UNIPRO_MODE_UNCHANGED 0x07
+#define GB_SVC_UNIPRO_HIBERNATE_MODE 0x11
+#define GB_SVC_UNIPRO_OFF_MODE 0x12
+
+#define GB_SVC_SMALL_AMPLITUDE 0x01
+#define GB_SVC_LARGE_AMPLITUDE 0x02
+
+#define GB_SVC_NO_DE_EMPHASIS 0x00
+#define GB_SVC_SMALL_DE_EMPHASIS 0x01
+#define GB_SVC_LARGE_DE_EMPHASIS 0x02
+
+#define GB_SVC_PWRM_RXTERMINATION 0x01
+#define GB_SVC_PWRM_TXTERMINATION 0x02
+#define GB_SVC_PWRM_LINE_RESET 0x04
+#define GB_SVC_PWRM_SCRAMBLING 0x20
+
+#define GB_SVC_PWRM_QUIRK_HSSER 0x00000001
+
+#define GB_SVC_UNIPRO_HS_SERIES_A 0x01
+#define GB_SVC_UNIPRO_HS_SERIES_B 0x02
+
+#define GB_SVC_SETPWRM_PWR_OK 0x00
+#define GB_SVC_SETPWRM_PWR_LOCAL 0x01
+#define GB_SVC_SETPWRM_PWR_REMOTE 0x02
+#define GB_SVC_SETPWRM_PWR_BUSY 0x03
+#define GB_SVC_SETPWRM_PWR_ERROR_CAP 0x04
+#define GB_SVC_SETPWRM_PWR_FATAL_ERROR 0x05
+
+struct gb_svc_l2_timer_cfg {
+ __le16 tsb_fc0_protection_timeout;
+ __le16 tsb_tc0_replay_timeout;
+ __le16 tsb_afc0_req_timeout;
+ __le16 tsb_fc1_protection_timeout;
+ __le16 tsb_tc1_replay_timeout;
+ __le16 tsb_afc1_req_timeout;
+ __le16 reserved_for_tc2[3];
+ __le16 reserved_for_tc3[3];
+} __packed;
+
+struct gb_svc_intf_set_pwrm_request {
+ __u8 intf_id;
+ __u8 hs_series;
+ __u8 tx_mode;
+ __u8 tx_gear;
+ __u8 tx_nlanes;
+ __u8 tx_amplitude;
+ __u8 tx_hs_equalizer;
+ __u8 rx_mode;
+ __u8 rx_gear;
+ __u8 rx_nlanes;
+ __u8 flags;
+ __le32 quirks;
+ struct gb_svc_l2_timer_cfg local_l2timerdata, remote_l2timerdata;
+} __packed;
+
+struct gb_svc_intf_set_pwrm_response {
+ __u8 result_code;
+} __packed;
+
+struct gb_svc_key_event_request {
+ __le16 key_code;
+#define GB_KEYCODE_ARA 0x00
+
+ __u8 key_event;
+#define GB_SVC_KEY_RELEASED 0x00
+#define GB_SVC_KEY_PRESSED 0x01
+} __packed;
+
+#define GB_SVC_PWRMON_MAX_RAIL_COUNT 254
+
+struct gb_svc_pwrmon_rail_count_get_response {
+ __u8 rail_count;
+} __packed;
+
+#define GB_SVC_PWRMON_RAIL_NAME_BUFSIZE 32
+
+struct gb_svc_pwrmon_rail_names_get_response {
+ __u8 status;
+ __u8 name[][GB_SVC_PWRMON_RAIL_NAME_BUFSIZE];
+} __packed;
+
+#define GB_SVC_PWRMON_TYPE_CURR 0x01
+#define GB_SVC_PWRMON_TYPE_VOL 0x02
+#define GB_SVC_PWRMON_TYPE_PWR 0x03
+
+#define GB_SVC_PWRMON_GET_SAMPLE_OK 0x00
+#define GB_SVC_PWRMON_GET_SAMPLE_INVAL 0x01
+#define GB_SVC_PWRMON_GET_SAMPLE_NOSUPP 0x02
+#define GB_SVC_PWRMON_GET_SAMPLE_HWERR 0x03
+
+struct gb_svc_pwrmon_sample_get_request {
+ __u8 rail_id;
+ __u8 measurement_type;
+} __packed;
+
+struct gb_svc_pwrmon_sample_get_response {
+ __u8 result;
+ __le32 measurement;
+} __packed;
+
+struct gb_svc_pwrmon_intf_sample_get_request {
+ __u8 intf_id;
+ __u8 measurement_type;
+} __packed;
+
+struct gb_svc_pwrmon_intf_sample_get_response {
+ __u8 result;
+ __le32 measurement;
+} __packed;
+
+#define GB_SVC_MODULE_INSERTED_FLAG_NO_PRIMARY 0x0001
+
+struct gb_svc_module_inserted_request {
+ __u8 primary_intf_id;
+ __u8 intf_count;
+ __le16 flags;
+} __packed;
+/* module_inserted response has no payload */
+
+struct gb_svc_module_removed_request {
+ __u8 primary_intf_id;
+} __packed;
+/* module_removed response has no payload */
+
+struct gb_svc_intf_activate_request {
+ __u8 intf_id;
+} __packed;
+
+#define GB_SVC_INTF_TYPE_UNKNOWN 0x00
+#define GB_SVC_INTF_TYPE_DUMMY 0x01
+#define GB_SVC_INTF_TYPE_UNIPRO 0x02
+#define GB_SVC_INTF_TYPE_GREYBUS 0x03
+
+struct gb_svc_intf_activate_response {
+ __u8 status;
+ __u8 intf_type;
+} __packed;
+
+struct gb_svc_intf_resume_request {
+ __u8 intf_id;
+} __packed;
+
+struct gb_svc_intf_resume_response {
+ __u8 status;
+} __packed;
+
+#define GB_SVC_INTF_MAILBOX_NONE 0x00
+#define GB_SVC_INTF_MAILBOX_AP 0x01
+#define GB_SVC_INTF_MAILBOX_GREYBUS 0x02
+
+struct gb_svc_intf_mailbox_event_request {
+ __u8 intf_id;
+ __le16 result_code;
+ __le32 mailbox;
+} __packed;
+/* intf_mailbox_event response has no payload */
+
+struct gb_svc_intf_oops_request {
+ __u8 intf_id;
+ __u8 reason;
+} __packed;
+/* intf_oops response has no payload */
+
+
+/* RAW */
+
+/* Greybus raw request types */
+#define GB_RAW_TYPE_SEND 0x02
+
+struct gb_raw_send_request {
+ __le32 len;
+ __u8 data[];
+} __packed;
+
+
+/* UART */
+
+/* Greybus UART operation types */
+#define GB_UART_TYPE_SEND_DATA 0x02
+#define GB_UART_TYPE_RECEIVE_DATA 0x03 /* Unsolicited data */
+#define GB_UART_TYPE_SET_LINE_CODING 0x04
+#define GB_UART_TYPE_SET_CONTROL_LINE_STATE 0x05
+#define GB_UART_TYPE_SEND_BREAK 0x06
+#define GB_UART_TYPE_SERIAL_STATE 0x07 /* Unsolicited data */
+#define GB_UART_TYPE_RECEIVE_CREDITS 0x08
+#define GB_UART_TYPE_FLUSH_FIFOS 0x09
+
+/* Represents data from AP -> Module */
+struct gb_uart_send_data_request {
+ __le16 size;
+ __u8 data[];
+} __packed;
+
+/* recv-data-request flags */
+#define GB_UART_RECV_FLAG_FRAMING 0x01 /* Framing error */
+#define GB_UART_RECV_FLAG_PARITY 0x02 /* Parity error */
+#define GB_UART_RECV_FLAG_OVERRUN 0x04 /* Overrun error */
+#define GB_UART_RECV_FLAG_BREAK 0x08 /* Break */
+
+/* Represents data from Module -> AP */
+struct gb_uart_recv_data_request {
+ __le16 size;
+ __u8 flags;
+ __u8 data[];
+} __packed;
+
+struct gb_uart_receive_credits_request {
+ __le16 count;
+} __packed;
+
+struct gb_uart_set_line_coding_request {
+ __le32 rate;
+ __u8 format;
+#define GB_SERIAL_1_STOP_BITS 0
+#define GB_SERIAL_1_5_STOP_BITS 1
+#define GB_SERIAL_2_STOP_BITS 2
+
+ __u8 parity;
+#define GB_SERIAL_NO_PARITY 0
+#define GB_SERIAL_ODD_PARITY 1
+#define GB_SERIAL_EVEN_PARITY 2
+#define GB_SERIAL_MARK_PARITY 3
+#define GB_SERIAL_SPACE_PARITY 4
+
+ __u8 data_bits;
+
+ __u8 flow_control;
+#define GB_SERIAL_AUTO_RTSCTS_EN 0x1
+} __packed;
+
+/* output control lines */
+#define GB_UART_CTRL_DTR 0x01
+#define GB_UART_CTRL_RTS 0x02
+
+struct gb_uart_set_control_line_state_request {
+ __u8 control;
+} __packed;
+
+struct gb_uart_set_break_request {
+ __u8 state;
+} __packed;
+
+/* input control lines and line errors */
+#define GB_UART_CTRL_DCD 0x01
+#define GB_UART_CTRL_DSR 0x02
+#define GB_UART_CTRL_RI 0x04
+
+struct gb_uart_serial_state_request {
+ __u8 control;
+} __packed;
+
+struct gb_uart_serial_flush_request {
+ __u8 flags;
+#define GB_SERIAL_FLAG_FLUSH_TRANSMITTER 0x01
+#define GB_SERIAL_FLAG_FLUSH_RECEIVER 0x02
+} __packed;
+
+/* Loopback */
+
+/* Greybus loopback request types */
+#define GB_LOOPBACK_TYPE_PING 0x02
+#define GB_LOOPBACK_TYPE_TRANSFER 0x03
+#define GB_LOOPBACK_TYPE_SINK 0x04
+
+/*
+ * Loopback request/response header format should be identical
+ * to simplify bandwidth and data movement analysis.
+ */
+struct gb_loopback_transfer_request {
+ __le32 len;
+ __le32 reserved0;
+ __le32 reserved1;
+ __u8 data[];
+} __packed;
+
+struct gb_loopback_transfer_response {
+ __le32 len;
+ __le32 reserved0;
+ __le32 reserved1;
+ __u8 data[];
+} __packed;
+
+/* SDIO */
+/* Greybus SDIO operation types */
+#define GB_SDIO_TYPE_GET_CAPABILITIES 0x02
+#define GB_SDIO_TYPE_SET_IOS 0x03
+#define GB_SDIO_TYPE_COMMAND 0x04
+#define GB_SDIO_TYPE_TRANSFER 0x05
+#define GB_SDIO_TYPE_EVENT 0x06
+
+/* get caps response: request has no payload */
+struct gb_sdio_get_caps_response {
+ __le32 caps;
+#define GB_SDIO_CAP_NONREMOVABLE 0x00000001
+#define GB_SDIO_CAP_4_BIT_DATA 0x00000002
+#define GB_SDIO_CAP_8_BIT_DATA 0x00000004
+#define GB_SDIO_CAP_MMC_HS 0x00000008
+#define GB_SDIO_CAP_SD_HS 0x00000010
+#define GB_SDIO_CAP_ERASE 0x00000020
+#define GB_SDIO_CAP_1_2V_DDR 0x00000040
+#define GB_SDIO_CAP_1_8V_DDR 0x00000080
+#define GB_SDIO_CAP_POWER_OFF_CARD 0x00000100
+#define GB_SDIO_CAP_UHS_SDR12 0x00000200
+#define GB_SDIO_CAP_UHS_SDR25 0x00000400
+#define GB_SDIO_CAP_UHS_SDR50 0x00000800
+#define GB_SDIO_CAP_UHS_SDR104 0x00001000
+#define GB_SDIO_CAP_UHS_DDR50 0x00002000
+#define GB_SDIO_CAP_DRIVER_TYPE_A 0x00004000
+#define GB_SDIO_CAP_DRIVER_TYPE_C 0x00008000
+#define GB_SDIO_CAP_DRIVER_TYPE_D 0x00010000
+#define GB_SDIO_CAP_HS200_1_2V 0x00020000
+#define GB_SDIO_CAP_HS200_1_8V 0x00040000
+#define GB_SDIO_CAP_HS400_1_2V 0x00080000
+#define GB_SDIO_CAP_HS400_1_8V 0x00100000
+
+ /* see possible values below at vdd */
+ __le32 ocr;
+ __le32 f_min;
+ __le32 f_max;
+ __le16 max_blk_count;
+ __le16 max_blk_size;
+} __packed;
+
+/* set ios request: response has no payload */
+struct gb_sdio_set_ios_request {
+ __le32 clock;
+ __le32 vdd;
+#define GB_SDIO_VDD_165_195 0x00000001
+#define GB_SDIO_VDD_20_21 0x00000002
+#define GB_SDIO_VDD_21_22 0x00000004
+#define GB_SDIO_VDD_22_23 0x00000008
+#define GB_SDIO_VDD_23_24 0x00000010
+#define GB_SDIO_VDD_24_25 0x00000020
+#define GB_SDIO_VDD_25_26 0x00000040
+#define GB_SDIO_VDD_26_27 0x00000080
+#define GB_SDIO_VDD_27_28 0x00000100
+#define GB_SDIO_VDD_28_29 0x00000200
+#define GB_SDIO_VDD_29_30 0x00000400
+#define GB_SDIO_VDD_30_31 0x00000800
+#define GB_SDIO_VDD_31_32 0x00001000
+#define GB_SDIO_VDD_32_33 0x00002000
+#define GB_SDIO_VDD_33_34 0x00004000
+#define GB_SDIO_VDD_34_35 0x00008000
+#define GB_SDIO_VDD_35_36 0x00010000
+
+ __u8 bus_mode;
+#define GB_SDIO_BUSMODE_OPENDRAIN 0x00
+#define GB_SDIO_BUSMODE_PUSHPULL 0x01
+
+ __u8 power_mode;
+#define GB_SDIO_POWER_OFF 0x00
+#define GB_SDIO_POWER_UP 0x01
+#define GB_SDIO_POWER_ON 0x02
+#define GB_SDIO_POWER_UNDEFINED 0x03
+
+ __u8 bus_width;
+#define GB_SDIO_BUS_WIDTH_1 0x00
+#define GB_SDIO_BUS_WIDTH_4 0x02
+#define GB_SDIO_BUS_WIDTH_8 0x03
+
+ __u8 timing;
+#define GB_SDIO_TIMING_LEGACY 0x00
+#define GB_SDIO_TIMING_MMC_HS 0x01
+#define GB_SDIO_TIMING_SD_HS 0x02
+#define GB_SDIO_TIMING_UHS_SDR12 0x03
+#define GB_SDIO_TIMING_UHS_SDR25 0x04
+#define GB_SDIO_TIMING_UHS_SDR50 0x05
+#define GB_SDIO_TIMING_UHS_SDR104 0x06
+#define GB_SDIO_TIMING_UHS_DDR50 0x07
+#define GB_SDIO_TIMING_MMC_DDR52 0x08
+#define GB_SDIO_TIMING_MMC_HS200 0x09
+#define GB_SDIO_TIMING_MMC_HS400 0x0A
+
+ __u8 signal_voltage;
+#define GB_SDIO_SIGNAL_VOLTAGE_330 0x00
+#define GB_SDIO_SIGNAL_VOLTAGE_180 0x01
+#define GB_SDIO_SIGNAL_VOLTAGE_120 0x02
+
+ __u8 drv_type;
+#define GB_SDIO_SET_DRIVER_TYPE_B 0x00
+#define GB_SDIO_SET_DRIVER_TYPE_A 0x01
+#define GB_SDIO_SET_DRIVER_TYPE_C 0x02
+#define GB_SDIO_SET_DRIVER_TYPE_D 0x03
+} __packed;
+
+/* command request */
+struct gb_sdio_command_request {
+ __u8 cmd;
+ __u8 cmd_flags;
+#define GB_SDIO_RSP_NONE 0x00
+#define GB_SDIO_RSP_PRESENT 0x01
+#define GB_SDIO_RSP_136 0x02
+#define GB_SDIO_RSP_CRC 0x04
+#define GB_SDIO_RSP_BUSY 0x08
+#define GB_SDIO_RSP_OPCODE 0x10
+
+ __u8 cmd_type;
+#define GB_SDIO_CMD_AC 0x00
+#define GB_SDIO_CMD_ADTC 0x01
+#define GB_SDIO_CMD_BC 0x02
+#define GB_SDIO_CMD_BCR 0x03
+
+ __le32 cmd_arg;
+ __le16 data_blocks;
+ __le16 data_blksz;
+} __packed;
+
+struct gb_sdio_command_response {
+ __le32 resp[4];
+} __packed;
+
+/* transfer request */
+struct gb_sdio_transfer_request {
+ __u8 data_flags;
+#define GB_SDIO_DATA_WRITE 0x01
+#define GB_SDIO_DATA_READ 0x02
+#define GB_SDIO_DATA_STREAM 0x04
+
+ __le16 data_blocks;
+ __le16 data_blksz;
+ __u8 data[];
+} __packed;
+
+struct gb_sdio_transfer_response {
+ __le16 data_blocks;
+ __le16 data_blksz;
+ __u8 data[];
+} __packed;
+
+/* event request: generated by module and is defined as unidirectional */
+struct gb_sdio_event_request {
+ __u8 event;
+#define GB_SDIO_CARD_INSERTED 0x01
+#define GB_SDIO_CARD_REMOVED 0x02
+#define GB_SDIO_WP 0x04
+} __packed;
+
+/* Camera */
+
+/* Greybus Camera request types */
+#define GB_CAMERA_TYPE_CAPABILITIES 0x02
+#define GB_CAMERA_TYPE_CONFIGURE_STREAMS 0x03
+#define GB_CAMERA_TYPE_CAPTURE 0x04
+#define GB_CAMERA_TYPE_FLUSH 0x05
+#define GB_CAMERA_TYPE_METADATA 0x06
+
+#define GB_CAMERA_MAX_STREAMS 4
+#define GB_CAMERA_MAX_SETTINGS_SIZE 8192
+
+/* Greybus Camera Configure Streams request payload */
+struct gb_camera_stream_config_request {
+ __le16 width;
+ __le16 height;
+ __le16 format;
+ __le16 padding;
+} __packed;
+
+struct gb_camera_configure_streams_request {
+ __u8 num_streams;
+ __u8 flags;
+#define GB_CAMERA_CONFIGURE_STREAMS_TEST_ONLY 0x01
+ __le16 padding;
+ struct gb_camera_stream_config_request config[];
+} __packed;
+
+/* Greybus Camera Configure Streams response payload */
+struct gb_camera_stream_config_response {
+ __le16 width;
+ __le16 height;
+ __le16 format;
+ __u8 virtual_channel;
+ __u8 data_type[2];
+ __le16 max_pkt_size;
+ __u8 padding;
+ __le32 max_size;
+} __packed;
+
+struct gb_camera_configure_streams_response {
+ __u8 num_streams;
+#define GB_CAMERA_CONFIGURE_STREAMS_ADJUSTED 0x01
+ __u8 flags;
+ __u8 padding[2];
+ __le32 data_rate;
+ struct gb_camera_stream_config_response config[];
+};
+
+/* Greybus Camera Capture request payload - response has no payload */
+struct gb_camera_capture_request {
+ __le32 request_id;
+ __u8 streams;
+ __u8 padding;
+ __le16 num_frames;
+ __u8 settings[];
+} __packed;
+
+/* Greybus Camera Flush response payload - request has no payload */
+struct gb_camera_flush_response {
+ __le32 request_id;
+} __packed;
+
+/* Greybus Camera Metadata request payload - operation has no response */
+struct gb_camera_metadata_request {
+ __le32 request_id;
+ __le16 frame_number;
+ __u8 stream;
+ __u8 padding;
+ __u8 metadata[];
+} __packed;
+
+/* Lights */
+
+/* Greybus Lights request types */
+#define GB_LIGHTS_TYPE_GET_LIGHTS 0x02
+#define GB_LIGHTS_TYPE_GET_LIGHT_CONFIG 0x03
+#define GB_LIGHTS_TYPE_GET_CHANNEL_CONFIG 0x04
+#define GB_LIGHTS_TYPE_GET_CHANNEL_FLASH_CONFIG 0x05
+#define GB_LIGHTS_TYPE_SET_BRIGHTNESS 0x06
+#define GB_LIGHTS_TYPE_SET_BLINK 0x07
+#define GB_LIGHTS_TYPE_SET_COLOR 0x08
+#define GB_LIGHTS_TYPE_SET_FADE 0x09
+#define GB_LIGHTS_TYPE_EVENT 0x0A
+#define GB_LIGHTS_TYPE_SET_FLASH_INTENSITY 0x0B
+#define GB_LIGHTS_TYPE_SET_FLASH_STROBE 0x0C
+#define GB_LIGHTS_TYPE_SET_FLASH_TIMEOUT 0x0D
+#define GB_LIGHTS_TYPE_GET_FLASH_FAULT 0x0E
+
+/* Greybus Light modes */
+
+/*
+ * if you add any specific mode below, update also the
+ * GB_CHANNEL_MODE_DEFINED_RANGE value accordingly
+ */
+#define GB_CHANNEL_MODE_NONE 0x00000000
+#define GB_CHANNEL_MODE_BATTERY 0x00000001
+#define GB_CHANNEL_MODE_POWER 0x00000002
+#define GB_CHANNEL_MODE_WIRELESS 0x00000004
+#define GB_CHANNEL_MODE_BLUETOOTH 0x00000008
+#define GB_CHANNEL_MODE_KEYBOARD 0x00000010
+#define GB_CHANNEL_MODE_BUTTONS 0x00000020
+#define GB_CHANNEL_MODE_NOTIFICATION 0x00000040
+#define GB_CHANNEL_MODE_ATTENTION 0x00000080
+#define GB_CHANNEL_MODE_FLASH 0x00000100
+#define GB_CHANNEL_MODE_TORCH 0x00000200
+#define GB_CHANNEL_MODE_INDICATOR 0x00000400
+
+/* Lights Mode valid bit values */
+#define GB_CHANNEL_MODE_DEFINED_RANGE 0x000004FF
+#define GB_CHANNEL_MODE_VENDOR_RANGE 0x00F00000
+
+/* Greybus Light Channels Flags */
+#define GB_LIGHT_CHANNEL_MULTICOLOR 0x00000001
+#define GB_LIGHT_CHANNEL_FADER 0x00000002
+#define GB_LIGHT_CHANNEL_BLINK 0x00000004
+
+/* get count of lights in module */
+struct gb_lights_get_lights_response {
+ __u8 lights_count;
+} __packed;
+
+/* light config request payload */
+struct gb_lights_get_light_config_request {
+ __u8 id;
+} __packed;
+
+/* light config response payload */
+struct gb_lights_get_light_config_response {
+ __u8 channel_count;
+ __u8 name[32];
+} __packed;
+
+/* channel config request payload */
+struct gb_lights_get_channel_config_request {
+ __u8 light_id;
+ __u8 channel_id;
+} __packed;
+
+/* channel flash config request payload */
+struct gb_lights_get_channel_flash_config_request {
+ __u8 light_id;
+ __u8 channel_id;
+} __packed;
+
+/* channel config response payload */
+struct gb_lights_get_channel_config_response {
+ __u8 max_brightness;
+ __le32 flags;
+ __le32 color;
+ __u8 color_name[32];
+ __le32 mode;
+ __u8 mode_name[32];
+} __packed;
+
+/* channel flash config response payload */
+struct gb_lights_get_channel_flash_config_response {
+ __le32 intensity_min_uA;
+ __le32 intensity_max_uA;
+ __le32 intensity_step_uA;
+ __le32 timeout_min_us;
+ __le32 timeout_max_us;
+ __le32 timeout_step_us;
+} __packed;
+
+/* blink request payload: response have no payload */
+struct gb_lights_blink_request {
+ __u8 light_id;
+ __u8 channel_id;
+ __le16 time_on_ms;
+ __le16 time_off_ms;
+} __packed;
+
+/* set brightness request payload: response have no payload */
+struct gb_lights_set_brightness_request {
+ __u8 light_id;
+ __u8 channel_id;
+ __u8 brightness;
+} __packed;
+
+/* set color request payload: response have no payload */
+struct gb_lights_set_color_request {
+ __u8 light_id;
+ __u8 channel_id;
+ __le32 color;
+} __packed;
+
+/* set fade request payload: response have no payload */
+struct gb_lights_set_fade_request {
+ __u8 light_id;
+ __u8 channel_id;
+ __u8 fade_in;
+ __u8 fade_out;
+} __packed;
+
+/* event request: generated by module */
+struct gb_lights_event_request {
+ __u8 light_id;
+ __u8 event;
+#define GB_LIGHTS_LIGHT_CONFIG 0x01
+} __packed;
+
+/* set flash intensity request payload: response have no payload */
+struct gb_lights_set_flash_intensity_request {
+ __u8 light_id;
+ __u8 channel_id;
+ __le32 intensity_uA;
+} __packed;
+
+/* set flash strobe state request payload: response have no payload */
+struct gb_lights_set_flash_strobe_request {
+ __u8 light_id;
+ __u8 channel_id;
+ __u8 state;
+} __packed;
+
+/* set flash timeout request payload: response have no payload */
+struct gb_lights_set_flash_timeout_request {
+ __u8 light_id;
+ __u8 channel_id;
+ __le32 timeout_us;
+} __packed;
+
+/* get flash fault request payload */
+struct gb_lights_get_flash_fault_request {
+ __u8 light_id;
+ __u8 channel_id;
+} __packed;
+
+/* get flash fault response payload */
+struct gb_lights_get_flash_fault_response {
+ __le32 fault;
+#define GB_LIGHTS_FLASH_FAULT_OVER_VOLTAGE 0x00000000
+#define GB_LIGHTS_FLASH_FAULT_TIMEOUT 0x00000001
+#define GB_LIGHTS_FLASH_FAULT_OVER_TEMPERATURE 0x00000002
+#define GB_LIGHTS_FLASH_FAULT_SHORT_CIRCUIT 0x00000004
+#define GB_LIGHTS_FLASH_FAULT_OVER_CURRENT 0x00000008
+#define GB_LIGHTS_FLASH_FAULT_INDICATOR 0x00000010
+#define GB_LIGHTS_FLASH_FAULT_UNDER_VOLTAGE 0x00000020
+#define GB_LIGHTS_FLASH_FAULT_INPUT_VOLTAGE 0x00000040
+#define GB_LIGHTS_FLASH_FAULT_LED_OVER_TEMPERATURE 0x00000080
+} __packed;
+
+/* Audio */
+
+#define GB_AUDIO_TYPE_GET_TOPOLOGY_SIZE 0x02
+#define GB_AUDIO_TYPE_GET_TOPOLOGY 0x03
+#define GB_AUDIO_TYPE_GET_CONTROL 0x04
+#define GB_AUDIO_TYPE_SET_CONTROL 0x05
+#define GB_AUDIO_TYPE_ENABLE_WIDGET 0x06
+#define GB_AUDIO_TYPE_DISABLE_WIDGET 0x07
+#define GB_AUDIO_TYPE_GET_PCM 0x08
+#define GB_AUDIO_TYPE_SET_PCM 0x09
+#define GB_AUDIO_TYPE_SET_TX_DATA_SIZE 0x0a
+ /* 0x0b unused */
+#define GB_AUDIO_TYPE_ACTIVATE_TX 0x0c
+#define GB_AUDIO_TYPE_DEACTIVATE_TX 0x0d
+#define GB_AUDIO_TYPE_SET_RX_DATA_SIZE 0x0e
+ /* 0x0f unused */
+#define GB_AUDIO_TYPE_ACTIVATE_RX 0x10
+#define GB_AUDIO_TYPE_DEACTIVATE_RX 0x11
+#define GB_AUDIO_TYPE_JACK_EVENT 0x12
+#define GB_AUDIO_TYPE_BUTTON_EVENT 0x13
+#define GB_AUDIO_TYPE_STREAMING_EVENT 0x14
+#define GB_AUDIO_TYPE_SEND_DATA 0x15
+
+/* Module must be able to buffer 10ms of audio data, minimum */
+#define GB_AUDIO_SAMPLE_BUFFER_MIN_US 10000
+
+#define GB_AUDIO_PCM_NAME_MAX 32
+#define AUDIO_DAI_NAME_MAX 32
+#define AUDIO_CONTROL_NAME_MAX 32
+#define AUDIO_CTL_ELEM_NAME_MAX 44
+#define AUDIO_ENUM_NAME_MAX 64
+#define AUDIO_WIDGET_NAME_MAX 32
+
+/* See SNDRV_PCM_FMTBIT_* in Linux source */
+#define GB_AUDIO_PCM_FMT_S8 BIT(0)
+#define GB_AUDIO_PCM_FMT_U8 BIT(1)
+#define GB_AUDIO_PCM_FMT_S16_LE BIT(2)
+#define GB_AUDIO_PCM_FMT_S16_BE BIT(3)
+#define GB_AUDIO_PCM_FMT_U16_LE BIT(4)
+#define GB_AUDIO_PCM_FMT_U16_BE BIT(5)
+#define GB_AUDIO_PCM_FMT_S24_LE BIT(6)
+#define GB_AUDIO_PCM_FMT_S24_BE BIT(7)
+#define GB_AUDIO_PCM_FMT_U24_LE BIT(8)
+#define GB_AUDIO_PCM_FMT_U24_BE BIT(9)
+#define GB_AUDIO_PCM_FMT_S32_LE BIT(10)
+#define GB_AUDIO_PCM_FMT_S32_BE BIT(11)
+#define GB_AUDIO_PCM_FMT_U32_LE BIT(12)
+#define GB_AUDIO_PCM_FMT_U32_BE BIT(13)
+
+/* See SNDRV_PCM_RATE_* in Linux source */
+#define GB_AUDIO_PCM_RATE_5512 BIT(0)
+#define GB_AUDIO_PCM_RATE_8000 BIT(1)
+#define GB_AUDIO_PCM_RATE_11025 BIT(2)
+#define GB_AUDIO_PCM_RATE_16000 BIT(3)
+#define GB_AUDIO_PCM_RATE_22050 BIT(4)
+#define GB_AUDIO_PCM_RATE_32000 BIT(5)
+#define GB_AUDIO_PCM_RATE_44100 BIT(6)
+#define GB_AUDIO_PCM_RATE_48000 BIT(7)
+#define GB_AUDIO_PCM_RATE_64000 BIT(8)
+#define GB_AUDIO_PCM_RATE_88200 BIT(9)
+#define GB_AUDIO_PCM_RATE_96000 BIT(10)
+#define GB_AUDIO_PCM_RATE_176400 BIT(11)
+#define GB_AUDIO_PCM_RATE_192000 BIT(12)
+
+#define GB_AUDIO_STREAM_TYPE_CAPTURE 0x1
+#define GB_AUDIO_STREAM_TYPE_PLAYBACK 0x2
+
+#define GB_AUDIO_CTL_ELEM_ACCESS_READ BIT(0)
+#define GB_AUDIO_CTL_ELEM_ACCESS_WRITE BIT(1)
+
+/* See SNDRV_CTL_ELEM_TYPE_* in Linux source */
+#define GB_AUDIO_CTL_ELEM_TYPE_BOOLEAN 0x01
+#define GB_AUDIO_CTL_ELEM_TYPE_INTEGER 0x02
+#define GB_AUDIO_CTL_ELEM_TYPE_ENUMERATED 0x03
+#define GB_AUDIO_CTL_ELEM_TYPE_INTEGER64 0x06
+
+/* See SNDRV_CTL_ELEM_IFACE_* in Linux source */
+#define GB_AUDIO_CTL_ELEM_IFACE_CARD 0x00
+#define GB_AUDIO_CTL_ELEM_IFACE_HWDEP 0x01
+#define GB_AUDIO_CTL_ELEM_IFACE_MIXER 0x02
+#define GB_AUDIO_CTL_ELEM_IFACE_PCM 0x03
+#define GB_AUDIO_CTL_ELEM_IFACE_RAWMIDI 0x04
+#define GB_AUDIO_CTL_ELEM_IFACE_TIMER 0x05
+#define GB_AUDIO_CTL_ELEM_IFACE_SEQUENCER 0x06
+
+/* SNDRV_CTL_ELEM_ACCESS_* in Linux source */
+#define GB_AUDIO_ACCESS_READ BIT(0)
+#define GB_AUDIO_ACCESS_WRITE BIT(1)
+#define GB_AUDIO_ACCESS_VOLATILE BIT(2)
+#define GB_AUDIO_ACCESS_TIMESTAMP BIT(3)
+#define GB_AUDIO_ACCESS_TLV_READ BIT(4)
+#define GB_AUDIO_ACCESS_TLV_WRITE BIT(5)
+#define GB_AUDIO_ACCESS_TLV_COMMAND BIT(6)
+#define GB_AUDIO_ACCESS_INACTIVE BIT(7)
+#define GB_AUDIO_ACCESS_LOCK BIT(8)
+#define GB_AUDIO_ACCESS_OWNER BIT(9)
+
+/* enum snd_soc_dapm_type */
+#define GB_AUDIO_WIDGET_TYPE_INPUT 0x0
+#define GB_AUDIO_WIDGET_TYPE_OUTPUT 0x1
+#define GB_AUDIO_WIDGET_TYPE_MUX 0x2
+#define GB_AUDIO_WIDGET_TYPE_VIRT_MUX 0x3
+#define GB_AUDIO_WIDGET_TYPE_VALUE_MUX 0x4
+#define GB_AUDIO_WIDGET_TYPE_MIXER 0x5
+#define GB_AUDIO_WIDGET_TYPE_MIXER_NAMED_CTL 0x6
+#define GB_AUDIO_WIDGET_TYPE_PGA 0x7
+#define GB_AUDIO_WIDGET_TYPE_OUT_DRV 0x8
+#define GB_AUDIO_WIDGET_TYPE_ADC 0x9
+#define GB_AUDIO_WIDGET_TYPE_DAC 0xa
+#define GB_AUDIO_WIDGET_TYPE_MICBIAS 0xb
+#define GB_AUDIO_WIDGET_TYPE_MIC 0xc
+#define GB_AUDIO_WIDGET_TYPE_HP 0xd
+#define GB_AUDIO_WIDGET_TYPE_SPK 0xe
+#define GB_AUDIO_WIDGET_TYPE_LINE 0xf
+#define GB_AUDIO_WIDGET_TYPE_SWITCH 0x10
+#define GB_AUDIO_WIDGET_TYPE_VMID 0x11
+#define GB_AUDIO_WIDGET_TYPE_PRE 0x12
+#define GB_AUDIO_WIDGET_TYPE_POST 0x13
+#define GB_AUDIO_WIDGET_TYPE_SUPPLY 0x14
+#define GB_AUDIO_WIDGET_TYPE_REGULATOR_SUPPLY 0x15
+#define GB_AUDIO_WIDGET_TYPE_CLOCK_SUPPLY 0x16
+#define GB_AUDIO_WIDGET_TYPE_AIF_IN 0x17
+#define GB_AUDIO_WIDGET_TYPE_AIF_OUT 0x18
+#define GB_AUDIO_WIDGET_TYPE_SIGGEN 0x19
+#define GB_AUDIO_WIDGET_TYPE_DAI_IN 0x1a
+#define GB_AUDIO_WIDGET_TYPE_DAI_OUT 0x1b
+#define GB_AUDIO_WIDGET_TYPE_DAI_LINK 0x1c
+
+#define GB_AUDIO_WIDGET_STATE_DISABLED 0x01
+#define GB_AUDIO_WIDGET_STATE_ENAABLED 0x02
+
+#define GB_AUDIO_JACK_EVENT_INSERTION 0x1
+#define GB_AUDIO_JACK_EVENT_REMOVAL 0x2
+
+#define GB_AUDIO_BUTTON_EVENT_PRESS 0x1
+#define GB_AUDIO_BUTTON_EVENT_RELEASE 0x2
+
+#define GB_AUDIO_STREAMING_EVENT_UNSPECIFIED 0x1
+#define GB_AUDIO_STREAMING_EVENT_HALT 0x2
+#define GB_AUDIO_STREAMING_EVENT_INTERNAL_ERROR 0x3
+#define GB_AUDIO_STREAMING_EVENT_PROTOCOL_ERROR 0x4
+#define GB_AUDIO_STREAMING_EVENT_FAILURE 0x5
+#define GB_AUDIO_STREAMING_EVENT_UNDERRUN 0x6
+#define GB_AUDIO_STREAMING_EVENT_OVERRUN 0x7
+#define GB_AUDIO_STREAMING_EVENT_CLOCKING 0x8
+#define GB_AUDIO_STREAMING_EVENT_DATA_LEN 0x9
+
+#define GB_AUDIO_INVALID_INDEX 0xff
+
+/* enum snd_jack_types */
+#define GB_AUDIO_JACK_HEADPHONE 0x0000001
+#define GB_AUDIO_JACK_MICROPHONE 0x0000002
+#define GB_AUDIO_JACK_HEADSET (GB_AUDIO_JACK_HEADPHONE | \
+ GB_AUDIO_JACK_MICROPHONE)
+#define GB_AUDIO_JACK_LINEOUT 0x0000004
+#define GB_AUDIO_JACK_MECHANICAL 0x0000008
+#define GB_AUDIO_JACK_VIDEOOUT 0x0000010
+#define GB_AUDIO_JACK_AVOUT (GB_AUDIO_JACK_LINEOUT | \
+ GB_AUDIO_JACK_VIDEOOUT)
+#define GB_AUDIO_JACK_LINEIN 0x0000020
+#define GB_AUDIO_JACK_OC_HPHL 0x0000040
+#define GB_AUDIO_JACK_OC_HPHR 0x0000080
+#define GB_AUDIO_JACK_MICROPHONE2 0x0000200
+#define GB_AUDIO_JACK_ANC_HEADPHONE (GB_AUDIO_JACK_HEADPHONE | \
+ GB_AUDIO_JACK_MICROPHONE | \
+ GB_AUDIO_JACK_MICROPHONE2)
+/* Kept separate from switches to facilitate implementation */
+#define GB_AUDIO_JACK_BTN_0 0x4000000
+#define GB_AUDIO_JACK_BTN_1 0x2000000
+#define GB_AUDIO_JACK_BTN_2 0x1000000
+#define GB_AUDIO_JACK_BTN_3 0x0800000
+
+struct gb_audio_pcm {
+ __u8 stream_name[GB_AUDIO_PCM_NAME_MAX];
+ __le32 formats; /* GB_AUDIO_PCM_FMT_* */
+ __le32 rates; /* GB_AUDIO_PCM_RATE_* */
+ __u8 chan_min;
+ __u8 chan_max;
+ __u8 sig_bits; /* number of bits of content */
+} __packed;
+
+struct gb_audio_dai {
+ __u8 name[AUDIO_DAI_NAME_MAX];
+ __le16 data_cport;
+ struct gb_audio_pcm capture;
+ struct gb_audio_pcm playback;
+} __packed;
+
+struct gb_audio_integer {
+ __le32 min;
+ __le32 max;
+ __le32 step;
+} __packed;
+
+struct gb_audio_integer64 {
+ __le64 min;
+ __le64 max;
+ __le64 step;
+} __packed;
+
+struct gb_audio_enumerated {
+ __le32 items;
+ __le16 names_length;
+ __u8 names[];
+} __packed;
+
+struct gb_audio_ctl_elem_info { /* See snd_ctl_elem_info in Linux source */
+ __u8 type; /* GB_AUDIO_CTL_ELEM_TYPE_* */
+ __le16 dimen[4];
+ union {
+ struct gb_audio_integer integer;
+ struct gb_audio_integer64 integer64;
+ struct gb_audio_enumerated enumerated;
+ } value;
+} __packed;
+
+struct gb_audio_ctl_elem_value { /* See snd_ctl_elem_value in Linux source */
+ __le64 timestamp; /* XXX needed? */
+ union {
+ __le32 integer_value[2]; /* consider CTL_DOUBLE_xxx */
+ __le64 integer64_value[2];
+ __le32 enumerated_item[2];
+ } value;
+} __packed;
+
+struct gb_audio_control {
+ __u8 name[AUDIO_CONTROL_NAME_MAX];
+ __u8 id; /* 0-63 */
+ __u8 iface; /* GB_AUDIO_IFACE_* */
+ __le16 data_cport;
+ __le32 access; /* GB_AUDIO_ACCESS_* */
+ __u8 count; /* count of same elements */
+ __u8 count_values; /* count of values, max=2 for CTL_DOUBLE_xxx */
+ struct gb_audio_ctl_elem_info info;
+} __packed;
+
+struct gb_audio_widget {
+ __u8 name[AUDIO_WIDGET_NAME_MAX];
+ __u8 sname[AUDIO_WIDGET_NAME_MAX];
+ __u8 id;
+ __u8 type; /* GB_AUDIO_WIDGET_TYPE_* */
+ __u8 state; /* GB_AUDIO_WIDGET_STATE_* */
+ __u8 ncontrols;
+ struct gb_audio_control ctl[]; /* 'ncontrols' entries */
+} __packed;
+
+struct gb_audio_route {
+ __u8 source_id; /* widget id */
+ __u8 destination_id; /* widget id */
+ __u8 control_id; /* 0-63 */
+ __u8 index; /* Selection within the control */
+} __packed;
+
+struct gb_audio_topology {
+ __u8 num_dais;
+ __u8 num_controls;
+ __u8 num_widgets;
+ __u8 num_routes;
+ __le32 size_dais;
+ __le32 size_controls;
+ __le32 size_widgets;
+ __le32 size_routes;
+ __le32 jack_type;
+ /*
+ * struct gb_audio_dai dai[num_dais];
+ * struct gb_audio_control controls[num_controls];
+ * struct gb_audio_widget widgets[num_widgets];
+ * struct gb_audio_route routes[num_routes];
+ */
+ __u8 data[];
+} __packed;
+
+struct gb_audio_get_topology_size_response {
+ __le16 size;
+} __packed;
+
+struct gb_audio_get_topology_response {
+ struct gb_audio_topology topology;
+} __packed;
+
+struct gb_audio_get_control_request {
+ __u8 control_id;
+ __u8 index;
+} __packed;
+
+struct gb_audio_get_control_response {
+ struct gb_audio_ctl_elem_value value;
+} __packed;
+
+struct gb_audio_set_control_request {
+ __u8 control_id;
+ __u8 index;
+ struct gb_audio_ctl_elem_value value;
+} __packed;
+
+struct gb_audio_enable_widget_request {
+ __u8 widget_id;
+} __packed;
+
+struct gb_audio_disable_widget_request {
+ __u8 widget_id;
+} __packed;
+
+struct gb_audio_get_pcm_request {
+ __le16 data_cport;
+} __packed;
+
+struct gb_audio_get_pcm_response {
+ __le32 format;
+ __le32 rate;
+ __u8 channels;
+ __u8 sig_bits;
+} __packed;
+
+struct gb_audio_set_pcm_request {
+ __le16 data_cport;
+ __le32 format;
+ __le32 rate;
+ __u8 channels;
+ __u8 sig_bits;
+} __packed;
+
+struct gb_audio_set_tx_data_size_request {
+ __le16 data_cport;
+ __le16 size;
+} __packed;
+
+struct gb_audio_activate_tx_request {
+ __le16 data_cport;
+} __packed;
+
+struct gb_audio_deactivate_tx_request {
+ __le16 data_cport;
+} __packed;
+
+struct gb_audio_set_rx_data_size_request {
+ __le16 data_cport;
+ __le16 size;
+} __packed;
+
+struct gb_audio_activate_rx_request {
+ __le16 data_cport;
+} __packed;
+
+struct gb_audio_deactivate_rx_request {
+ __le16 data_cport;
+} __packed;
+
+struct gb_audio_jack_event_request {
+ __u8 widget_id;
+ __u8 jack_attribute;
+ __u8 event;
+} __packed;
+
+struct gb_audio_button_event_request {
+ __u8 widget_id;
+ __u8 button_id;
+ __u8 event;
+} __packed;
+
+struct gb_audio_streaming_event_request {
+ __le16 data_cport;
+ __u8 event;
+} __packed;
+
+struct gb_audio_send_data_request {
+ __le64 timestamp;
+ __u8 data[];
+} __packed;
+
+
+/* Log */
+
+/* operations */
+#define GB_LOG_TYPE_SEND_LOG 0x02
+
+/* length */
+#define GB_LOG_MAX_LEN 1024
+
+struct gb_log_send_log_request {
+ __le16 len;
+ __u8 msg[];
+} __packed;
+
+#endif /* __GREYBUS_PROTOCOLS_H */
+
diff --git a/include/linux/greybus/hd.h b/include/linux/greybus/hd.h
new file mode 100644
index 000000000000..718e2857054e
--- /dev/null
+++ b/include/linux/greybus/hd.h
@@ -0,0 +1,85 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Greybus Host Device
+ *
+ * Copyright 2014-2015 Google Inc.
+ * Copyright 2014-2015 Linaro Ltd.
+ */
+
+#ifndef __HD_H
+#define __HD_H
+
+#include <linux/types.h>
+#include <linux/device.h>
+
+struct gb_host_device;
+struct gb_message;
+
+struct gb_hd_driver {
+ size_t hd_priv_size;
+
+ int (*cport_allocate)(struct gb_host_device *hd, int cport_id,
+ unsigned long flags);
+ void (*cport_release)(struct gb_host_device *hd, u16 cport_id);
+ int (*cport_enable)(struct gb_host_device *hd, u16 cport_id,
+ unsigned long flags);
+ int (*cport_disable)(struct gb_host_device *hd, u16 cport_id);
+ int (*cport_connected)(struct gb_host_device *hd, u16 cport_id);
+ int (*cport_flush)(struct gb_host_device *hd, u16 cport_id);
+ int (*cport_shutdown)(struct gb_host_device *hd, u16 cport_id,
+ u8 phase, unsigned int timeout);
+ int (*cport_quiesce)(struct gb_host_device *hd, u16 cport_id,
+ size_t peer_space, unsigned int timeout);
+ int (*cport_clear)(struct gb_host_device *hd, u16 cport_id);
+
+ int (*message_send)(struct gb_host_device *hd, u16 dest_cport_id,
+ struct gb_message *message, gfp_t gfp_mask);
+ void (*message_cancel)(struct gb_message *message);
+ int (*latency_tag_enable)(struct gb_host_device *hd, u16 cport_id);
+ int (*latency_tag_disable)(struct gb_host_device *hd, u16 cport_id);
+ int (*output)(struct gb_host_device *hd, void *req, u16 size, u8 cmd,
+ bool async);
+};
+
+struct gb_host_device {
+ struct device dev;
+ int bus_id;
+ const struct gb_hd_driver *driver;
+
+ struct list_head modules;
+ struct list_head connections;
+ struct ida cport_id_map;
+
+ /* Number of CPorts supported by the UniPro IP */
+ size_t num_cports;
+
+ /* Host device buffer constraints */
+ size_t buffer_size_max;
+
+ struct gb_svc *svc;
+ /* Private data for the host driver */
+ unsigned long hd_priv[] __aligned(sizeof(s64));
+};
+#define to_gb_host_device(d) container_of(d, struct gb_host_device, dev)
+
+int gb_hd_cport_reserve(struct gb_host_device *hd, u16 cport_id);
+void gb_hd_cport_release_reserved(struct gb_host_device *hd, u16 cport_id);
+int gb_hd_cport_allocate(struct gb_host_device *hd, int cport_id,
+ unsigned long flags);
+void gb_hd_cport_release(struct gb_host_device *hd, u16 cport_id);
+
+struct gb_host_device *gb_hd_create(struct gb_hd_driver *driver,
+ struct device *parent,
+ size_t buffer_size_max,
+ size_t num_cports);
+int gb_hd_add(struct gb_host_device *hd);
+void gb_hd_del(struct gb_host_device *hd);
+void gb_hd_shutdown(struct gb_host_device *hd);
+void gb_hd_put(struct gb_host_device *hd);
+int gb_hd_output(struct gb_host_device *hd, void *req, u16 size, u8 cmd,
+ bool in_irq);
+
+int gb_hd_init(void);
+void gb_hd_exit(void);
+
+#endif /* __HD_H */
diff --git a/include/linux/greybus/interface.h b/include/linux/greybus/interface.h
new file mode 100644
index 000000000000..ce4def881e6f
--- /dev/null
+++ b/include/linux/greybus/interface.h
@@ -0,0 +1,85 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Greybus Interface Block code
+ *
+ * Copyright 2014 Google Inc.
+ * Copyright 2014 Linaro Ltd.
+ */
+
+#ifndef __INTERFACE_H
+#define __INTERFACE_H
+
+#include <linux/types.h>
+#include <linux/device.h>
+
+enum gb_interface_type {
+ GB_INTERFACE_TYPE_INVALID = 0,
+ GB_INTERFACE_TYPE_UNKNOWN,
+ GB_INTERFACE_TYPE_DUMMY,
+ GB_INTERFACE_TYPE_UNIPRO,
+ GB_INTERFACE_TYPE_GREYBUS,
+};
+
+#define GB_INTERFACE_QUIRK_NO_CPORT_FEATURES BIT(0)
+#define GB_INTERFACE_QUIRK_NO_INIT_STATUS BIT(1)
+#define GB_INTERFACE_QUIRK_NO_GMP_IDS BIT(2)
+#define GB_INTERFACE_QUIRK_FORCED_DISABLE BIT(3)
+#define GB_INTERFACE_QUIRK_LEGACY_MODE_SWITCH BIT(4)
+#define GB_INTERFACE_QUIRK_NO_BUNDLE_ACTIVATE BIT(5)
+#define GB_INTERFACE_QUIRK_NO_PM BIT(6)
+
+struct gb_interface {
+ struct device dev;
+ struct gb_control *control;
+
+ struct list_head bundles;
+ struct list_head module_node;
+ struct list_head manifest_descs;
+ u8 interface_id; /* Physical location within the Endo */
+ u8 device_id;
+ u8 features; /* Feature flags set in the manifest */
+
+ enum gb_interface_type type;
+
+ u32 ddbl1_manufacturer_id;
+ u32 ddbl1_product_id;
+ u32 vendor_id;
+ u32 product_id;
+ u64 serial_number;
+
+ struct gb_host_device *hd;
+ struct gb_module *module;
+
+ unsigned long quirks;
+
+ struct mutex mutex;
+
+ bool disconnected;
+
+ bool ejected;
+ bool removed;
+ bool active;
+ bool enabled;
+ bool mode_switch;
+ bool dme_read;
+
+ struct work_struct mode_switch_work;
+ struct completion mode_switch_completion;
+};
+#define to_gb_interface(d) container_of(d, struct gb_interface, dev)
+
+struct gb_interface *gb_interface_create(struct gb_module *module,
+ u8 interface_id);
+int gb_interface_activate(struct gb_interface *intf);
+void gb_interface_deactivate(struct gb_interface *intf);
+int gb_interface_enable(struct gb_interface *intf);
+void gb_interface_disable(struct gb_interface *intf);
+int gb_interface_add(struct gb_interface *intf);
+void gb_interface_del(struct gb_interface *intf);
+void gb_interface_put(struct gb_interface *intf);
+void gb_interface_mailbox_event(struct gb_interface *intf, u16 result,
+ u32 mailbox);
+
+int gb_interface_request_mode_switch(struct gb_interface *intf);
+
+#endif /* __INTERFACE_H */
diff --git a/include/linux/greybus/manifest.h b/include/linux/greybus/manifest.h
new file mode 100644
index 000000000000..830301b7a8bc
--- /dev/null
+++ b/include/linux/greybus/manifest.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Greybus manifest parsing
+ *
+ * Copyright 2014 Google Inc.
+ * Copyright 2014 Linaro Ltd.
+ */
+
+#ifndef __MANIFEST_H
+#define __MANIFEST_H
+
+#include <linux/types.h>
+
+struct gb_interface;
+bool gb_manifest_parse(struct gb_interface *intf, void *data, size_t size);
+
+#endif /* __MANIFEST_H */
diff --git a/include/linux/greybus/module.h b/include/linux/greybus/module.h
new file mode 100644
index 000000000000..3efe2133acfd
--- /dev/null
+++ b/include/linux/greybus/module.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Greybus Module code
+ *
+ * Copyright 2016 Google Inc.
+ * Copyright 2016 Linaro Ltd.
+ */
+
+#ifndef __MODULE_H
+#define __MODULE_H
+
+#include <linux/types.h>
+#include <linux/device.h>
+
+struct gb_module {
+ struct device dev;
+ struct gb_host_device *hd;
+
+ struct list_head hd_node;
+
+ u8 module_id;
+ size_t num_interfaces;
+
+ bool disconnected;
+
+ struct gb_interface *interfaces[];
+};
+#define to_gb_module(d) container_of(d, struct gb_module, dev)
+
+struct gb_module *gb_module_create(struct gb_host_device *hd, u8 module_id,
+ size_t num_interfaces);
+int gb_module_add(struct gb_module *module);
+void gb_module_del(struct gb_module *module);
+void gb_module_put(struct gb_module *module);
+
+#endif /* __MODULE_H */
diff --git a/include/linux/greybus/operation.h b/include/linux/greybus/operation.h
new file mode 100644
index 000000000000..cb8e4ef45222
--- /dev/null
+++ b/include/linux/greybus/operation.h
@@ -0,0 +1,229 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Greybus operations
+ *
+ * Copyright 2014 Google Inc.
+ * Copyright 2014 Linaro Ltd.
+ */
+
+#ifndef __OPERATION_H
+#define __OPERATION_H
+
+#include <linux/completion.h>
+#include <linux/kref.h>
+#include <linux/timer.h>
+#include <linux/types.h>
+#include <linux/workqueue.h>
+
+struct gb_host_device;
+struct gb_operation;
+
+/* The default amount of time a request is given to complete */
+#define GB_OPERATION_TIMEOUT_DEFAULT 1000 /* milliseconds */
+
+/*
+ * The top bit of the type in an operation message header indicates
+ * whether the message is a request (bit clear) or response (bit set)
+ */
+#define GB_MESSAGE_TYPE_RESPONSE ((u8)0x80)
+
+enum gb_operation_result {
+ GB_OP_SUCCESS = 0x00,
+ GB_OP_INTERRUPTED = 0x01,
+ GB_OP_TIMEOUT = 0x02,
+ GB_OP_NO_MEMORY = 0x03,
+ GB_OP_PROTOCOL_BAD = 0x04,
+ GB_OP_OVERFLOW = 0x05,
+ GB_OP_INVALID = 0x06,
+ GB_OP_RETRY = 0x07,
+ GB_OP_NONEXISTENT = 0x08,
+ GB_OP_UNKNOWN_ERROR = 0xfe,
+ GB_OP_MALFUNCTION = 0xff,
+};
+
+#define GB_OPERATION_MESSAGE_SIZE_MIN sizeof(struct gb_operation_msg_hdr)
+#define GB_OPERATION_MESSAGE_SIZE_MAX U16_MAX
+
+/*
+ * Protocol code should only examine the payload and payload_size fields, and
+ * host-controller drivers may use the hcpriv field. All other fields are
+ * intended to be private to the operations core code.
+ */
+struct gb_message {
+ struct gb_operation *operation;
+ struct gb_operation_msg_hdr *header;
+
+ void *payload;
+ size_t payload_size;
+
+ void *buffer;
+
+ void *hcpriv;
+};
+
+#define GB_OPERATION_FLAG_INCOMING BIT(0)
+#define GB_OPERATION_FLAG_UNIDIRECTIONAL BIT(1)
+#define GB_OPERATION_FLAG_SHORT_RESPONSE BIT(2)
+#define GB_OPERATION_FLAG_CORE BIT(3)
+
+#define GB_OPERATION_FLAG_USER_MASK (GB_OPERATION_FLAG_SHORT_RESPONSE | \
+ GB_OPERATION_FLAG_UNIDIRECTIONAL)
+
+/*
+ * A Greybus operation is a remote procedure call performed over a
+ * connection between two UniPro interfaces.
+ *
+ * Every operation consists of a request message sent to the other
+ * end of the connection coupled with a reply message returned to
+ * the sender. Every operation has a type, whose interpretation is
+ * dependent on the protocol associated with the connection.
+ *
+ * Only four things in an operation structure are intended to be
+ * directly usable by protocol handlers: the operation's connection
+ * pointer; the operation type; the request message payload (and
+ * size); and the response message payload (and size). Note that a
+ * message with a 0-byte payload has a null message payload pointer.
+ *
+ * In addition, every operation has a result, which is an errno
+ * value. Protocol handlers access the operation result using
+ * gb_operation_result().
+ */
+typedef void (*gb_operation_callback)(struct gb_operation *);
+struct gb_operation {
+ struct gb_connection *connection;
+ struct gb_message *request;
+ struct gb_message *response;
+
+ unsigned long flags;
+ u8 type;
+ u16 id;
+ int errno; /* Operation result */
+
+ struct work_struct work;
+ gb_operation_callback callback;
+ struct completion completion;
+ struct timer_list timer;
+
+ struct kref kref;
+ atomic_t waiters;
+
+ int active;
+ struct list_head links; /* connection->operations */
+
+ void *private;
+};
+
+static inline bool
+gb_operation_is_incoming(struct gb_operation *operation)
+{
+ return operation->flags & GB_OPERATION_FLAG_INCOMING;
+}
+
+static inline bool
+gb_operation_is_unidirectional(struct gb_operation *operation)
+{
+ return operation->flags & GB_OPERATION_FLAG_UNIDIRECTIONAL;
+}
+
+static inline bool
+gb_operation_short_response_allowed(struct gb_operation *operation)
+{
+ return operation->flags & GB_OPERATION_FLAG_SHORT_RESPONSE;
+}
+
+static inline bool gb_operation_is_core(struct gb_operation *operation)
+{
+ return operation->flags & GB_OPERATION_FLAG_CORE;
+}
+
+void gb_connection_recv(struct gb_connection *connection,
+ void *data, size_t size);
+
+int gb_operation_result(struct gb_operation *operation);
+
+size_t gb_operation_get_payload_size_max(struct gb_connection *connection);
+struct gb_operation *
+gb_operation_create_flags(struct gb_connection *connection,
+ u8 type, size_t request_size,
+ size_t response_size, unsigned long flags,
+ gfp_t gfp);
+
+static inline struct gb_operation *
+gb_operation_create(struct gb_connection *connection,
+ u8 type, size_t request_size,
+ size_t response_size, gfp_t gfp)
+{
+ return gb_operation_create_flags(connection, type, request_size,
+ response_size, 0, gfp);
+}
+
+struct gb_operation *
+gb_operation_create_core(struct gb_connection *connection,
+ u8 type, size_t request_size,
+ size_t response_size, unsigned long flags,
+ gfp_t gfp);
+
+void gb_operation_get(struct gb_operation *operation);
+void gb_operation_put(struct gb_operation *operation);
+
+bool gb_operation_response_alloc(struct gb_operation *operation,
+ size_t response_size, gfp_t gfp);
+
+int gb_operation_request_send(struct gb_operation *operation,
+ gb_operation_callback callback,
+ unsigned int timeout,
+ gfp_t gfp);
+int gb_operation_request_send_sync_timeout(struct gb_operation *operation,
+ unsigned int timeout);
+static inline int
+gb_operation_request_send_sync(struct gb_operation *operation)
+{
+ return gb_operation_request_send_sync_timeout(operation,
+ GB_OPERATION_TIMEOUT_DEFAULT);
+}
+
+void gb_operation_cancel(struct gb_operation *operation, int errno);
+void gb_operation_cancel_incoming(struct gb_operation *operation, int errno);
+
+void greybus_message_sent(struct gb_host_device *hd,
+ struct gb_message *message, int status);
+
+int gb_operation_sync_timeout(struct gb_connection *connection, int type,
+ void *request, int request_size,
+ void *response, int response_size,
+ unsigned int timeout);
+int gb_operation_unidirectional_timeout(struct gb_connection *connection,
+ int type, void *request, int request_size,
+ unsigned int timeout);
+
+static inline int gb_operation_sync(struct gb_connection *connection, int type,
+ void *request, int request_size,
+ void *response, int response_size)
+{
+ return gb_operation_sync_timeout(connection, type,
+ request, request_size, response, response_size,
+ GB_OPERATION_TIMEOUT_DEFAULT);
+}
+
+static inline int gb_operation_unidirectional(struct gb_connection *connection,
+ int type, void *request, int request_size)
+{
+ return gb_operation_unidirectional_timeout(connection, type,
+ request, request_size, GB_OPERATION_TIMEOUT_DEFAULT);
+}
+
+static inline void *gb_operation_get_data(struct gb_operation *operation)
+{
+ return operation->private;
+}
+
+static inline void gb_operation_set_data(struct gb_operation *operation,
+ void *data)
+{
+ operation->private = data;
+}
+
+int gb_operation_init(void);
+void gb_operation_exit(void);
+
+#endif /* !__OPERATION_H */
diff --git a/include/linux/greybus/svc.h b/include/linux/greybus/svc.h
new file mode 100644
index 000000000000..5afaf5f06856
--- /dev/null
+++ b/include/linux/greybus/svc.h
@@ -0,0 +1,106 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Greybus SVC code
+ *
+ * Copyright 2015 Google Inc.
+ * Copyright 2015 Linaro Ltd.
+ */
+
+#ifndef __SVC_H
+#define __SVC_H
+
+#include <linux/types.h>
+#include <linux/device.h>
+
+struct gb_svc_l2_timer_cfg;
+
+#define GB_SVC_CPORT_FLAG_E2EFC BIT(0)
+#define GB_SVC_CPORT_FLAG_CSD_N BIT(1)
+#define GB_SVC_CPORT_FLAG_CSV_N BIT(2)
+
+enum gb_svc_state {
+ GB_SVC_STATE_RESET,
+ GB_SVC_STATE_PROTOCOL_VERSION,
+ GB_SVC_STATE_SVC_HELLO,
+};
+
+enum gb_svc_watchdog_bite {
+ GB_SVC_WATCHDOG_BITE_RESET_UNIPRO = 0,
+ GB_SVC_WATCHDOG_BITE_PANIC_KERNEL,
+};
+
+struct gb_svc_watchdog;
+
+struct svc_debugfs_pwrmon_rail {
+ u8 id;
+ struct gb_svc *svc;
+};
+
+struct gb_svc {
+ struct device dev;
+
+ struct gb_host_device *hd;
+ struct gb_connection *connection;
+ enum gb_svc_state state;
+ struct ida device_id_map;
+ struct workqueue_struct *wq;
+
+ u16 endo_id;
+ u8 ap_intf_id;
+
+ u8 protocol_major;
+ u8 protocol_minor;
+
+ struct gb_svc_watchdog *watchdog;
+ enum gb_svc_watchdog_bite action;
+
+ struct dentry *debugfs_dentry;
+ struct svc_debugfs_pwrmon_rail *pwrmon_rails;
+};
+#define to_gb_svc(d) container_of(d, struct gb_svc, dev)
+
+struct gb_svc *gb_svc_create(struct gb_host_device *hd);
+int gb_svc_add(struct gb_svc *svc);
+void gb_svc_del(struct gb_svc *svc);
+void gb_svc_put(struct gb_svc *svc);
+
+int gb_svc_pwrmon_intf_sample_get(struct gb_svc *svc, u8 intf_id,
+ u8 measurement_type, u32 *value);
+int gb_svc_intf_device_id(struct gb_svc *svc, u8 intf_id, u8 device_id);
+int gb_svc_route_create(struct gb_svc *svc, u8 intf1_id, u8 dev1_id,
+ u8 intf2_id, u8 dev2_id);
+void gb_svc_route_destroy(struct gb_svc *svc, u8 intf1_id, u8 intf2_id);
+int gb_svc_connection_create(struct gb_svc *svc, u8 intf1_id, u16 cport1_id,
+ u8 intf2_id, u16 cport2_id, u8 cport_flags);
+void gb_svc_connection_destroy(struct gb_svc *svc, u8 intf1_id, u16 cport1_id,
+ u8 intf2_id, u16 cport2_id);
+int gb_svc_intf_eject(struct gb_svc *svc, u8 intf_id);
+int gb_svc_intf_vsys_set(struct gb_svc *svc, u8 intf_id, bool enable);
+int gb_svc_intf_refclk_set(struct gb_svc *svc, u8 intf_id, bool enable);
+int gb_svc_intf_unipro_set(struct gb_svc *svc, u8 intf_id, bool enable);
+int gb_svc_intf_activate(struct gb_svc *svc, u8 intf_id, u8 *intf_type);
+int gb_svc_intf_resume(struct gb_svc *svc, u8 intf_id);
+
+int gb_svc_dme_peer_get(struct gb_svc *svc, u8 intf_id, u16 attr, u16 selector,
+ u32 *value);
+int gb_svc_dme_peer_set(struct gb_svc *svc, u8 intf_id, u16 attr, u16 selector,
+ u32 value);
+int gb_svc_intf_set_power_mode(struct gb_svc *svc, u8 intf_id, u8 hs_series,
+ u8 tx_mode, u8 tx_gear, u8 tx_nlanes,
+ u8 tx_amplitude, u8 tx_hs_equalizer,
+ u8 rx_mode, u8 rx_gear, u8 rx_nlanes,
+ u8 flags, u32 quirks,
+ struct gb_svc_l2_timer_cfg *local,
+ struct gb_svc_l2_timer_cfg *remote);
+int gb_svc_intf_set_power_mode_hibernate(struct gb_svc *svc, u8 intf_id);
+int gb_svc_ping(struct gb_svc *svc);
+int gb_svc_watchdog_create(struct gb_svc *svc);
+void gb_svc_watchdog_destroy(struct gb_svc *svc);
+bool gb_svc_watchdog_enabled(struct gb_svc *svc);
+int gb_svc_watchdog_enable(struct gb_svc *svc);
+int gb_svc_watchdog_disable(struct gb_svc *svc);
+
+int gb_svc_protocol_init(void);
+void gb_svc_protocol_exit(void);
+
+#endif /* __SVC_H */
diff --git a/include/linux/group_cpus.h b/include/linux/group_cpus.h
new file mode 100644
index 000000000000..e42807ec61f6
--- /dev/null
+++ b/include/linux/group_cpus.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2016 Thomas Gleixner.
+ * Copyright (C) 2016-2017 Christoph Hellwig.
+ */
+
+#ifndef __LINUX_GROUP_CPUS_H
+#define __LINUX_GROUP_CPUS_H
+#include <linux/kernel.h>
+#include <linux/cpu.h>
+
+struct cpumask *group_cpus_evenly(unsigned int numgrps);
+
+#endif
diff --git a/include/linux/hardirq.h b/include/linux/hardirq.h
index c683996110b1..d57cab4d4c06 100644
--- a/include/linux/hardirq.h
+++ b/include/linux/hardirq.h
@@ -1,31 +1,30 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef LINUX_HARDIRQ_H
#define LINUX_HARDIRQ_H
+#include <linux/context_tracking_state.h>
#include <linux/preempt.h>
#include <linux/lockdep.h>
#include <linux/ftrace_irq.h>
+#include <linux/sched.h>
#include <linux/vtime.h>
#include <asm/hardirq.h>
-
extern void synchronize_irq(unsigned int irq);
extern bool synchronize_hardirq(unsigned int irq);
-#if defined(CONFIG_TINY_RCU)
-
-static inline void rcu_nmi_enter(void)
-{
-}
+#ifdef CONFIG_NO_HZ_FULL
+void __rcu_irq_enter_check_tick(void);
+#else
+static inline void __rcu_irq_enter_check_tick(void) { }
+#endif
-static inline void rcu_nmi_exit(void)
+static __always_inline void rcu_irq_enter_check_tick(void)
{
+ if (context_tracking_enabled())
+ __rcu_irq_enter_check_tick();
}
-#else
-extern void rcu_nmi_enter(void);
-extern void rcu_nmi_exit(void);
-#endif
-
/*
* It is safe to do non-atomic ops on ->hardirq_context,
* because NMI handlers may not preempt and the ops are
@@ -34,51 +33,111 @@ extern void rcu_nmi_exit(void);
*/
#define __irq_enter() \
do { \
- account_irq_enter_time(current); \
preempt_count_add(HARDIRQ_OFFSET); \
- trace_hardirq_enter(); \
+ lockdep_hardirq_enter(); \
+ account_hardirq_enter(current); \
+ } while (0)
+
+/*
+ * Like __irq_enter() without time accounting for fast
+ * interrupts, e.g. reschedule IPI where time accounting
+ * is more expensive than the actual interrupt.
+ */
+#define __irq_enter_raw() \
+ do { \
+ preempt_count_add(HARDIRQ_OFFSET); \
+ lockdep_hardirq_enter(); \
} while (0)
/*
* Enter irq context (on NO_HZ, update jiffies):
*/
-extern void irq_enter(void);
+void irq_enter(void);
+/*
+ * Like irq_enter(), but RCU is already watching.
+ */
+void irq_enter_rcu(void);
/*
* Exit irq context without processing softirqs:
*/
#define __irq_exit() \
do { \
- trace_hardirq_exit(); \
- account_irq_exit_time(current); \
+ account_hardirq_exit(current); \
+ lockdep_hardirq_exit(); \
+ preempt_count_sub(HARDIRQ_OFFSET); \
+ } while (0)
+
+/*
+ * Like __irq_exit() without time accounting
+ */
+#define __irq_exit_raw() \
+ do { \
+ lockdep_hardirq_exit(); \
preempt_count_sub(HARDIRQ_OFFSET); \
} while (0)
/*
* Exit irq context and process softirqs if needed:
*/
-extern void irq_exit(void);
+void irq_exit(void);
-#define nmi_enter() \
+/*
+ * Like irq_exit(), but return with RCU watching.
+ */
+void irq_exit_rcu(void);
+
+#ifndef arch_nmi_enter
+#define arch_nmi_enter() do { } while (0)
+#define arch_nmi_exit() do { } while (0)
+#endif
+
+/*
+ * NMI vs Tracing
+ * --------------
+ *
+ * We must not land in a tracer until (or after) we've changed preempt_count
+ * such that in_nmi() becomes true. To that effect all NMI C entry points must
+ * be marked 'notrace' and call nmi_enter() as soon as possible.
+ */
+
+/*
+ * nmi_enter() can nest up to 15 times; see NMI_BITS.
+ */
+#define __nmi_enter() \
do { \
- printk_nmi_enter(); \
lockdep_off(); \
+ arch_nmi_enter(); \
+ BUG_ON(in_nmi() == NMI_MASK); \
+ __preempt_count_add(NMI_OFFSET + HARDIRQ_OFFSET); \
+ } while (0)
+
+#define nmi_enter() \
+ do { \
+ __nmi_enter(); \
+ lockdep_hardirq_enter(); \
+ ct_nmi_enter(); \
+ instrumentation_begin(); \
ftrace_nmi_enter(); \
- BUG_ON(in_nmi()); \
- preempt_count_add(NMI_OFFSET + HARDIRQ_OFFSET); \
- rcu_nmi_enter(); \
- trace_hardirq_enter(); \
+ instrumentation_end(); \
} while (0)
-#define nmi_exit() \
+#define __nmi_exit() \
do { \
- trace_hardirq_exit(); \
- rcu_nmi_exit(); \
BUG_ON(!in_nmi()); \
- preempt_count_sub(NMI_OFFSET + HARDIRQ_OFFSET); \
- ftrace_nmi_exit(); \
+ __preempt_count_sub(NMI_OFFSET + HARDIRQ_OFFSET); \
+ arch_nmi_exit(); \
lockdep_on(); \
- printk_nmi_exit(); \
+ } while (0)
+
+#define nmi_exit() \
+ do { \
+ instrumentation_begin(); \
+ ftrace_nmi_exit(); \
+ instrumentation_end(); \
+ ct_nmi_exit(); \
+ lockdep_hardirq_exit(); \
+ __nmi_exit(); \
} while (0)
#endif /* LINUX_HARDIRQ_H */
diff --git a/include/linux/hash.h b/include/linux/hash.h
index ad6fa21d977b..38edaa08f862 100644
--- a/include/linux/hash.h
+++ b/include/linux/hash.h
@@ -62,10 +62,7 @@ static inline u32 __hash_32_generic(u32 val)
return val * GOLDEN_RATIO_32;
}
-#ifndef HAVE_ARCH_HASH_32
-#define hash_32 hash_32_generic
-#endif
-static inline u32 hash_32_generic(u32 val, unsigned int bits)
+static inline u32 hash_32(u32 val, unsigned int bits)
{
/* High bits are more random, so use them. */
return __hash_32(val) >> (32 - bits);
diff --git a/include/linux/hashtable.h b/include/linux/hashtable.h
index 082dc1bd0801..f6c666730b8c 100644
--- a/include/linux/hashtable.h
+++ b/include/linux/hashtable.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Statically sized hash table implementation
* (C) 2012 Sasha Levin <levinsasha928@gmail.com>
@@ -144,7 +145,7 @@ static inline void hash_del_rcu(struct hlist_node *node)
* hash entry
* @name: hashtable to iterate
* @bkt: integer to use as bucket loop cursor
- * @tmp: a &struct used for temporary storage
+ * @tmp: a &struct hlist_node used for temporary storage
* @obj: the type * to use as a loop cursor for each entry
* @member: the name of the hlist_node within the struct
*/
@@ -172,9 +173,9 @@ static inline void hash_del_rcu(struct hlist_node *node)
* @member: the name of the hlist_node within the struct
* @key: the key of the objects to iterate over
*/
-#define hash_for_each_possible_rcu(name, obj, member, key) \
+#define hash_for_each_possible_rcu(name, obj, member, key, cond...) \
hlist_for_each_entry_rcu(obj, &name[hash_min(key, HASH_BITS(name))],\
- member)
+ member, ## cond)
/**
* hash_for_each_possible_rcu_notrace - iterate over all possible objects hashing
@@ -196,7 +197,7 @@ static inline void hash_del_rcu(struct hlist_node *node)
* same bucket safe against removals
* @name: hashtable to iterate
* @obj: the type * to use as a loop cursor for each entry
- * @tmp: a &struct used for temporary storage
+ * @tmp: a &struct hlist_node used for temporary storage
* @member: the name of the hlist_node within the struct
* @key: the key of the objects to iterate over
*/
diff --git a/include/linux/hashtable_api.h b/include/linux/hashtable_api.h
new file mode 100644
index 000000000000..c268ac2c5c0e
--- /dev/null
+++ b/include/linux/hashtable_api.h
@@ -0,0 +1 @@
+#include <linux/hashtable.h>
diff --git a/include/linux/hdlc.h b/include/linux/hdlc.h
index 97585d9679f3..630a388035f1 100644
--- a/include/linux/hdlc.h
+++ b/include/linux/hdlc.h
@@ -1,11 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Generic HDLC support routines for Linux
*
* Copyright (C) 1999-2005 Krzysztof Halasa <khc@pm.waw.pl>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License
- * as published by the Free Software Foundation.
*/
#ifndef __HDLC_H
#define __HDLC_H
@@ -25,7 +22,7 @@ struct hdlc_proto {
void (*start)(struct net_device *dev); /* if open & DCD */
void (*stop)(struct net_device *dev); /* if open & !DCD */
void (*detach)(struct net_device *dev);
- int (*ioctl)(struct net_device *dev, struct ifreq *ifr);
+ int (*ioctl)(struct net_device *dev, struct if_settings *ifs);
__be16 (*type_trans)(struct sk_buff *skb, struct net_device *dev);
int (*netif_rx)(struct sk_buff *skb);
netdev_tx_t (*xmit)(struct sk_buff *skb, struct net_device *dev);
@@ -57,7 +54,7 @@ typedef struct hdlc_device {
/* Exported from hdlc module */
/* Called by hardware driver when a user requests HDLC service */
-int hdlc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd);
+int hdlc_ioctl(struct net_device *dev, struct if_settings *ifs);
/* Must be used by hardware driver on module startup/exit */
#define register_hdlc_device(dev) register_netdev(dev)
diff --git a/include/linux/hdlcdrv.h b/include/linux/hdlcdrv.h
index be3be25bb898..5d70c3f98f5b 100644
--- a/include/linux/hdlcdrv.h
+++ b/include/linux/hdlcdrv.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* hdlcdrv.h -- HDLC packet radio network driver.
* The Linux soundcard driver for 1200 baud and 9600 baud packet radio
@@ -78,7 +79,7 @@ struct hdlcdrv_ops {
*/
int (*open)(struct net_device *);
int (*close)(struct net_device *);
- int (*ioctl)(struct net_device *, struct ifreq *,
+ int (*ioctl)(struct net_device *, void __user *,
struct hdlcdrv_ioctl *, int);
};
diff --git a/include/linux/hdmi.h b/include/linux/hdmi.h
index d271ff23984f..2f4dcc8d060e 100644
--- a/include/linux/hdmi.h
+++ b/include/linux/hdmi.h
@@ -27,11 +27,27 @@
#include <linux/types.h>
#include <linux/device.h>
+enum hdmi_packet_type {
+ HDMI_PACKET_TYPE_NULL = 0x00,
+ HDMI_PACKET_TYPE_AUDIO_CLOCK_REGEN = 0x01,
+ HDMI_PACKET_TYPE_AUDIO_SAMPLE = 0x02,
+ HDMI_PACKET_TYPE_GENERAL_CONTROL = 0x03,
+ HDMI_PACKET_TYPE_ACP = 0x04,
+ HDMI_PACKET_TYPE_ISRC1 = 0x05,
+ HDMI_PACKET_TYPE_ISRC2 = 0x06,
+ HDMI_PACKET_TYPE_ONE_BIT_AUDIO_SAMPLE = 0x07,
+ HDMI_PACKET_TYPE_DST_AUDIO = 0x08,
+ HDMI_PACKET_TYPE_HBR_AUDIO_STREAM = 0x09,
+ HDMI_PACKET_TYPE_GAMUT_METADATA = 0x0a,
+ /* + enum hdmi_infoframe_type */
+};
+
enum hdmi_infoframe_type {
HDMI_INFOFRAME_TYPE_VENDOR = 0x81,
HDMI_INFOFRAME_TYPE_AVI = 0x82,
HDMI_INFOFRAME_TYPE_SPD = 0x83,
HDMI_INFOFRAME_TYPE_AUDIO = 0x84,
+ HDMI_INFOFRAME_TYPE_DRM = 0x87,
};
#define HDMI_IEEE_OUI 0x000c03
@@ -40,6 +56,8 @@ enum hdmi_infoframe_type {
#define HDMI_AVI_INFOFRAME_SIZE 13
#define HDMI_SPD_INFOFRAME_SIZE 25
#define HDMI_AUDIO_INFOFRAME_SIZE 10
+#define HDMI_DRM_INFOFRAME_SIZE 26
+#define HDMI_VENDOR_INFOFRAME_SIZE 4
#define HDMI_INFOFRAME_SIZE(type) \
(HDMI_INFOFRAME_HEADER_SIZE + HDMI_ ## type ## _INFOFRAME_SIZE)
@@ -101,8 +119,8 @@ enum hdmi_extended_colorimetry {
HDMI_EXTENDED_COLORIMETRY_XV_YCC_601,
HDMI_EXTENDED_COLORIMETRY_XV_YCC_709,
HDMI_EXTENDED_COLORIMETRY_S_YCC_601,
- HDMI_EXTENDED_COLORIMETRY_ADOBE_YCC_601,
- HDMI_EXTENDED_COLORIMETRY_ADOBE_RGB,
+ HDMI_EXTENDED_COLORIMETRY_OPYCC_601,
+ HDMI_EXTENDED_COLORIMETRY_OPRGB,
/* The following EC values are only defined in CEA-861-F. */
HDMI_EXTENDED_COLORIMETRY_BT2020_CONST_LUM,
@@ -137,6 +155,17 @@ enum hdmi_content_type {
HDMI_CONTENT_TYPE_GAME,
};
+enum hdmi_metadata_type {
+ HDMI_STATIC_METADATA_TYPE1 = 0,
+};
+
+enum hdmi_eotf {
+ HDMI_EOTF_TRADITIONAL_GAMMA_SDR,
+ HDMI_EOTF_TRADITIONAL_GAMMA_HDR,
+ HDMI_EOTF_SMPTE_ST2084,
+ HDMI_EOTF_BT_2100_HLG,
+};
+
struct hdmi_avi_infoframe {
enum hdmi_infoframe_type type;
unsigned char version;
@@ -160,9 +189,39 @@ struct hdmi_avi_infoframe {
unsigned short right_bar;
};
-int hdmi_avi_infoframe_init(struct hdmi_avi_infoframe *frame);
+/* DRM Infoframe as per CTA 861.G spec */
+struct hdmi_drm_infoframe {
+ enum hdmi_infoframe_type type;
+ unsigned char version;
+ unsigned char length;
+ enum hdmi_eotf eotf;
+ enum hdmi_metadata_type metadata_type;
+ struct {
+ u16 x, y;
+ } display_primaries[3];
+ struct {
+ u16 x, y;
+ } white_point;
+ u16 max_display_mastering_luminance;
+ u16 min_display_mastering_luminance;
+ u16 max_cll;
+ u16 max_fall;
+};
+
+void hdmi_avi_infoframe_init(struct hdmi_avi_infoframe *frame);
ssize_t hdmi_avi_infoframe_pack(struct hdmi_avi_infoframe *frame, void *buffer,
size_t size);
+ssize_t hdmi_avi_infoframe_pack_only(const struct hdmi_avi_infoframe *frame,
+ void *buffer, size_t size);
+int hdmi_avi_infoframe_check(struct hdmi_avi_infoframe *frame);
+int hdmi_drm_infoframe_init(struct hdmi_drm_infoframe *frame);
+ssize_t hdmi_drm_infoframe_pack(struct hdmi_drm_infoframe *frame, void *buffer,
+ size_t size);
+ssize_t hdmi_drm_infoframe_pack_only(const struct hdmi_drm_infoframe *frame,
+ void *buffer, size_t size);
+int hdmi_drm_infoframe_check(struct hdmi_drm_infoframe *frame);
+int hdmi_drm_infoframe_unpack_only(struct hdmi_drm_infoframe *frame,
+ const void *buffer, size_t size);
enum hdmi_spd_sdi {
HDMI_SPD_SDI_UNKNOWN,
@@ -194,6 +253,9 @@ int hdmi_spd_infoframe_init(struct hdmi_spd_infoframe *frame,
const char *vendor, const char *product);
ssize_t hdmi_spd_infoframe_pack(struct hdmi_spd_infoframe *frame, void *buffer,
size_t size);
+ssize_t hdmi_spd_infoframe_pack_only(const struct hdmi_spd_infoframe *frame,
+ void *buffer, size_t size);
+int hdmi_spd_infoframe_check(struct hdmi_spd_infoframe *frame);
enum hdmi_audio_coding_type {
HDMI_AUDIO_CODING_TYPE_STREAM,
@@ -272,6 +334,14 @@ struct hdmi_audio_infoframe {
int hdmi_audio_infoframe_init(struct hdmi_audio_infoframe *frame);
ssize_t hdmi_audio_infoframe_pack(struct hdmi_audio_infoframe *frame,
void *buffer, size_t size);
+ssize_t hdmi_audio_infoframe_pack_only(const struct hdmi_audio_infoframe *frame,
+ void *buffer, size_t size);
+int hdmi_audio_infoframe_check(const struct hdmi_audio_infoframe *frame);
+
+struct dp_sdp;
+ssize_t
+hdmi_audio_infoframe_pack_for_dp(const struct hdmi_audio_infoframe *frame,
+ struct dp_sdp *sdp, u8 dp_version);
enum hdmi_3d_structure {
HDMI_3D_STRUCTURE_INVALID = -1,
@@ -296,9 +366,39 @@ struct hdmi_vendor_infoframe {
unsigned int s3d_ext_data;
};
+/* HDR Metadata as per 861.G spec */
+struct hdr_static_metadata {
+ __u8 eotf;
+ __u8 metadata_type;
+ __u16 max_cll;
+ __u16 max_fall;
+ __u16 min_cll;
+};
+
+/**
+ * struct hdr_sink_metadata - HDR sink metadata
+ *
+ * Metadata Information read from Sink's EDID
+ */
+struct hdr_sink_metadata {
+ /**
+ * @metadata_type: Static_Metadata_Descriptor_ID.
+ */
+ __u32 metadata_type;
+ /**
+ * @hdmi_type1: HDR Metadata Infoframe.
+ */
+ union {
+ struct hdr_static_metadata hdmi_type1;
+ };
+};
+
int hdmi_vendor_infoframe_init(struct hdmi_vendor_infoframe *frame);
ssize_t hdmi_vendor_infoframe_pack(struct hdmi_vendor_infoframe *frame,
void *buffer, size_t size);
+ssize_t hdmi_vendor_infoframe_pack_only(const struct hdmi_vendor_infoframe *frame,
+ void *buffer, size_t size);
+int hdmi_vendor_infoframe_check(struct hdmi_vendor_infoframe *frame);
union hdmi_vendor_any_infoframe {
struct {
@@ -317,6 +417,7 @@ union hdmi_vendor_any_infoframe {
* @spd: spd infoframe
* @vendor: union of all vendor infoframes
* @audio: audio infoframe
+ * @drm: Dynamic Range and Mastering infoframe
*
* This is used by the generic pack function. This works since all infoframes
* have the same header which also indicates which type of infoframe should be
@@ -328,12 +429,17 @@ union hdmi_infoframe {
struct hdmi_spd_infoframe spd;
union hdmi_vendor_any_infoframe vendor;
struct hdmi_audio_infoframe audio;
+ struct hdmi_drm_infoframe drm;
};
-ssize_t
-hdmi_infoframe_pack(union hdmi_infoframe *frame, void *buffer, size_t size);
-int hdmi_infoframe_unpack(union hdmi_infoframe *frame, void *buffer);
+ssize_t hdmi_infoframe_pack(union hdmi_infoframe *frame, void *buffer,
+ size_t size);
+ssize_t hdmi_infoframe_pack_only(const union hdmi_infoframe *frame,
+ void *buffer, size_t size);
+int hdmi_infoframe_check(union hdmi_infoframe *frame);
+int hdmi_infoframe_unpack(union hdmi_infoframe *frame,
+ const void *buffer, size_t size);
void hdmi_infoframe_log(const char *level, struct device *dev,
- union hdmi_infoframe *frame);
+ const union hdmi_infoframe *frame);
#endif /* _DRM_HDMI_H */
diff --git a/include/linux/hex.h b/include/linux/hex.h
new file mode 100644
index 000000000000..2618382e5b0c
--- /dev/null
+++ b/include/linux/hex.h
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_HEX_H
+#define _LINUX_HEX_H
+
+#include <linux/types.h>
+
+extern const char hex_asc[];
+#define hex_asc_lo(x) hex_asc[((x) & 0x0f)]
+#define hex_asc_hi(x) hex_asc[((x) & 0xf0) >> 4]
+
+static inline char *hex_byte_pack(char *buf, u8 byte)
+{
+ *buf++ = hex_asc_hi(byte);
+ *buf++ = hex_asc_lo(byte);
+ return buf;
+}
+
+extern const char hex_asc_upper[];
+#define hex_asc_upper_lo(x) hex_asc_upper[((x) & 0x0f)]
+#define hex_asc_upper_hi(x) hex_asc_upper[((x) & 0xf0) >> 4]
+
+static inline char *hex_byte_pack_upper(char *buf, u8 byte)
+{
+ *buf++ = hex_asc_upper_hi(byte);
+ *buf++ = hex_asc_upper_lo(byte);
+ return buf;
+}
+
+extern int hex_to_bin(unsigned char ch);
+extern int __must_check hex2bin(u8 *dst, const char *src, size_t count);
+extern char *bin2hex(char *dst, const void *src, size_t count);
+
+bool mac_pton(const char *s, u8 *mac);
+
+#endif
diff --git a/include/linux/hid-debug.h b/include/linux/hid-debug.h
index 8663f216c563..ea7b23d13bfd 100644
--- a/include/linux/hid-debug.h
+++ b/include/linux/hid-debug.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
#ifndef __HID_DEBUG_H
#define __HID_DEBUG_H
@@ -6,25 +7,14 @@
*/
/*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
*/
#ifdef CONFIG_DEBUG_FS
+#include <linux/kfifo.h>
+
#define HID_DEBUG_BUFSIZE 512
+#define HID_DEBUG_FIFOSIZE 512
void hid_dump_input(struct hid_device *, struct hid_usage *, __s32);
void hid_dump_report(struct hid_device *, int , u8 *, int);
@@ -37,11 +27,8 @@ void hid_debug_init(void);
void hid_debug_exit(void);
void hid_debug_event(struct hid_device *, char *);
-
struct hid_debug_list {
- char *hid_debug_buf;
- int head;
- int tail;
+ DECLARE_KFIFO_PTR(hid_debug_fifo, char);
struct fasync_struct *fasync;
struct hid_device *hdev;
struct list_head node;
@@ -64,4 +51,3 @@ struct hid_debug_list {
#endif
#endif
-
diff --git a/include/linux/hid-roccat.h b/include/linux/hid-roccat.h
index 24e1ca01f9a0..3214fb0815fc 100644
--- a/include/linux/hid-roccat.h
+++ b/include/linux/hid-roccat.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
#ifndef __HID_ROCCAT_H
#define __HID_ROCCAT_H
@@ -6,10 +7,6 @@
*/
/*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the Free
- * Software Foundation; either version 2 of the License, or (at your option)
- * any later version.
*/
#include <linux/hid.h>
diff --git a/include/linux/hid-sensor-hub.h b/include/linux/hid-sensor-hub.h
index fc7aae64dcde..c27329e2a5ad 100644
--- a/include/linux/hid-sensor-hub.h
+++ b/include/linux/hid-sensor-hub.h
@@ -1,20 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* HID Sensors Driver
* Copyright (c) 2012, Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
- *
*/
#ifndef _HID_SENSORS_HUB_H
#define _HID_SENSORS_HUB_H
@@ -163,7 +150,7 @@ int sensor_hub_remove_callback(struct hid_sensor_hub_device *hsdev,
* @info: return information about attribute after parsing report
*
* Parses report and returns the attribute information such as report id,
-* field index, units and exponet etc.
+* field index, units and exponent etc.
*/
int sensor_hub_input_get_attribute_info(struct hid_sensor_hub_device *hsdev,
u8 type,
@@ -177,9 +164,10 @@ int sensor_hub_input_get_attribute_info(struct hid_sensor_hub_device *hsdev,
* @attr_usage_id: Attribute usage id as per spec
* @report_id: Report id to look for
* @flag: Synchronous or asynchronous read
+* @is_signed: If true then fields < 32 bits will be sign-extended
*
* Issues a synchronous or asynchronous read request for an input attribute.
-* Returns data upto 32 bits.
+* Return: data up to 32 bits.
*/
enum sensor_hub_read_flags {
@@ -190,7 +178,8 @@ enum sensor_hub_read_flags {
int sensor_hub_input_attr_get_raw_value(struct hid_sensor_hub_device *hsdev,
u32 usage_id,
u32 attr_usage_id, u32 report_id,
- enum sensor_hub_read_flags flag
+ enum sensor_hub_read_flags flag,
+ bool is_signed
);
/**
@@ -216,8 +205,9 @@ int sensor_hub_set_feature(struct hid_sensor_hub_device *hsdev, u32 report_id,
* @buffer: buffer to copy output
*
* Used to get a field in feature report. For example this can get polling
-* interval, sensitivity, activate/deactivate state. On success it returns
-* number of bytes copied to buffer. On failure, it returns value < 0.
+* interval, sensitivity, activate/deactivate state.
+* Return: On success, it returns the number of bytes copied to buffer.
+* On failure, it returns value < 0.
*/
int sensor_hub_get_feature(struct hid_sensor_hub_device *hsdev, u32 report_id,
u32 field_index, int buffer_size, void *buffer);
@@ -231,6 +221,7 @@ struct hid_sensor_common {
unsigned usage_id;
atomic_t data_ready;
atomic_t user_requested_state;
+ atomic_t runtime_pm_enable;
int poll_interval;
int raw_hystersis;
int latency_ms;
@@ -240,6 +231,7 @@ struct hid_sensor_common {
struct hid_sensor_hub_attribute_info report_state;
struct hid_sensor_hub_attribute_info power_state;
struct hid_sensor_hub_attribute_info sensitivity;
+ struct hid_sensor_hub_attribute_info sensitivity_rel;
struct hid_sensor_hub_attribute_info report_latency;
struct work_struct work;
};
@@ -257,11 +249,17 @@ static inline int hid_sensor_convert_exponent(int unit_expo)
int hid_sensor_parse_common_attributes(struct hid_sensor_hub_device *hsdev,
u32 usage_id,
- struct hid_sensor_common *st);
+ struct hid_sensor_common *st,
+ const u32 *sensitivity_addresses,
+ u32 sensitivity_addresses_len);
int hid_sensor_write_raw_hyst_value(struct hid_sensor_common *st,
int val1, int val2);
+int hid_sensor_write_raw_hyst_rel_value(struct hid_sensor_common *st, int val1,
+ int val2);
int hid_sensor_read_raw_hyst_value(struct hid_sensor_common *st,
int *val1, int *val2);
+int hid_sensor_read_raw_hyst_rel_value(struct hid_sensor_common *st,
+ int *val1, int *val2);
int hid_sensor_write_samp_freq_value(struct hid_sensor_common *st,
int val1, int val2);
int hid_sensor_read_samp_freq_value(struct hid_sensor_common *st,
diff --git a/include/linux/hid-sensor-ids.h b/include/linux/hid-sensor-ids.h
index 76033e0420a7..13b1e65fbdcc 100644
--- a/include/linux/hid-sensor-ids.h
+++ b/include/linux/hid-sensor-ids.h
@@ -1,20 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* HID Sensors Driver
* Copyright (c) 2012, Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
- *
*/
#ifndef _HID_SENSORS_IDS_H
#define _HID_SENSORS_IDS_H
@@ -141,6 +128,11 @@
#define HID_USAGE_SENSOR_UNITS_DEGREES_PER_SECOND 0x15
/* Common selectors */
+#define HID_USAGE_SENSOR_PROP_DESC 0x200300
+#define HID_USAGE_SENSOR_PROP_FRIENDLY_NAME 0x200301
+#define HID_USAGE_SENSOR_PROP_SERIAL_NUM 0x200307
+#define HID_USAGE_SENSOR_PROP_MANUFACTURER 0x200305
+#define HID_USAGE_SENSOR_PROP_MODEL 0x200306
#define HID_USAGE_SENSOR_PROP_REPORT_INTERVAL 0x20030E
#define HID_USAGE_SENSOR_PROP_SENSITIVITY_ABS 0x20030F
#define HID_USAGE_SENSOR_PROP_SENSITIVITY_RANGE_PCT 0x200310
@@ -158,6 +150,7 @@
/* Per data field properties */
#define HID_USAGE_SENSOR_DATA_MOD_NONE 0x00
#define HID_USAGE_SENSOR_DATA_MOD_CHANGE_SENSITIVITY_ABS 0x1000
+#define HID_USAGE_SENSOR_DATA_MOD_CHANGE_SENSITIVITY_REL_PCT 0xE000
/* Power state enumerations */
#define HID_USAGE_SENSOR_PROP_POWER_STATE_UNDEFINED_ENUM 0x200850
@@ -171,4 +164,14 @@
#define HID_USAGE_SENSOR_PROP_REPORTING_STATE_NO_EVENTS_ENUM 0x200840
#define HID_USAGE_SENSOR_PROP_REPORTING_STATE_ALL_EVENTS_ENUM 0x200841
+/* Custom Sensor (2000e1) */
+#define HID_USAGE_SENSOR_HINGE 0x20020B
+#define HID_USAGE_SENSOR_DATA_FIELD_LOCATION 0x200400
+#define HID_USAGE_SENSOR_DATA_FIELE_TIME_SINCE_SYS_BOOT 0x20052B
+#define HID_USAGE_SENSOR_DATA_FIELD_CUSTOM_USAGE 0x200541
+#define HID_USAGE_SENSOR_DATA_FIELD_CUSTOM_VALUE_BASE 0x200543
+/* Custom Sensor data 28=>x>=0 */
+#define HID_USAGE_SENSOR_DATA_FIELD_CUSTOM_VALUE(x) \
+ (HID_USAGE_SENSOR_DATA_FIELD_CUSTOM_VALUE_BASE + (x))
+
#endif
diff --git a/include/linux/hid.h b/include/linux/hid.h
index 5006f9b5d837..4e4c4fe36911 100644
--- a/include/linux/hid.h
+++ b/include/linux/hid.h
@@ -1,22 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Copyright (c) 1999 Andreas Gal
* Copyright (c) 2000-2001 Vojtech Pavlik
* Copyright (c) 2006-2007 Jiri Kosina
*/
/*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
* Should you need to contact me, the author, you can do so either by
* e-mail - mail your message to <vojtech@ucw.cz>, or by paper mail:
@@ -26,6 +14,7 @@
#define __HID_H
+#include <linux/bitops.h>
#include <linux/types.h>
#include <linux/slab.h>
#include <linux/list.h>
@@ -37,6 +26,7 @@
#include <linux/mutex.h>
#include <linux/power_supply.h>
#include <uapi/linux/hid.h>
+#include <linux/hid_bpf.h>
/*
* We parse each description item into this structure. Short items data
@@ -113,6 +103,7 @@ struct hid_item {
#define HID_COLLECTION_PHYSICAL 0
#define HID_COLLECTION_APPLICATION 1
#define HID_COLLECTION_LOGICAL 2
+#define HID_COLLECTION_NAMED_ARRAY 4
/*
* HID report descriptor global item tags
@@ -164,6 +155,8 @@ struct hid_item {
#define HID_UP_CONSUMER 0x000c0000
#define HID_UP_DIGITIZER 0x000d0000
#define HID_UP_PID 0x000f0000
+#define HID_UP_BATTERY 0x00850000
+#define HID_UP_CAMERA 0x00900000
#define HID_UP_HPVENDOR 0xff7f0000
#define HID_UP_HPVENDOR2 0xff010000
#define HID_UP_MSVENDOR 0xff000000
@@ -173,6 +166,8 @@ struct hid_item {
#define HID_UP_LOGIVENDOR3 0xff430000
#define HID_UP_LNVENDOR 0xffa00000
#define HID_UP_SENSOR 0x00200000
+#define HID_UP_ASUSVENDOR 0xff310000
+#define HID_UP_GOOGLEVENDOR 0xffd10000
#define HID_USAGE 0x0000ffff
@@ -188,6 +183,12 @@ struct hid_item {
* http://www.usb.org/developers/hidpage/HUTRR40RadioHIDUsagesFinal.pdf
*/
#define HID_GD_WIRELESS_RADIO_CTLS 0x0001000c
+/*
+ * System Multi-Axis, see:
+ * http://www.usb.org/developers/hidpage/HUTRR62_-_Generic_Desktop_CA_for_System_Multi-Axis_Controllers.txt
+ */
+#define HID_GD_SYSTEM_MULTIAXIS 0x0001000e
+
#define HID_GD_X 0x00010030
#define HID_GD_Y 0x00010031
#define HID_GD_Z 0x00010032
@@ -211,6 +212,7 @@ struct hid_item {
#define HID_GD_VBRZ 0x00010045
#define HID_GD_VNO 0x00010046
#define HID_GD_FEATURE 0x00010047
+#define HID_GD_RESOLUTION_MULTIPLIER 0x00010048
#define HID_GD_SYSTEM_CONTROL 0x00010080
#define HID_GD_UP 0x00010090
#define HID_GD_DOWN 0x00010091
@@ -224,12 +226,14 @@ struct hid_item {
#define HID_DC_BATTERYSTRENGTH 0x00060020
#define HID_CP_CONSUMER_CONTROL 0x000c0001
+#define HID_CP_AC_PAN 0x000c0238
#define HID_DG_DIGITIZER 0x000d0001
#define HID_DG_PEN 0x000d0002
#define HID_DG_LIGHTPEN 0x000d0003
#define HID_DG_TOUCHSCREEN 0x000d0004
#define HID_DG_TOUCHPAD 0x000d0005
+#define HID_DG_WHITEBOARD 0x000d0006
#define HID_DG_STYLUS 0x000d0020
#define HID_DG_PUCK 0x000d0021
#define HID_DG_FINGER 0x000d0022
@@ -239,6 +243,7 @@ struct hid_item {
#define HID_DG_TOUCH 0x000d0033
#define HID_DG_UNTOUCH 0x000d0034
#define HID_DG_TAP 0x000d0035
+#define HID_DG_TRANSDUCER_INDEX 0x000d0038
#define HID_DG_TABLETFUNCTIONKEY 0x000d0039
#define HID_DG_PROGRAMCHANGEKEY 0x000d003a
#define HID_DG_BATTERYSTRENGTH 0x000d003b
@@ -251,6 +256,15 @@ struct hid_item {
#define HID_DG_BARRELSWITCH 0x000d0044
#define HID_DG_ERASER 0x000d0045
#define HID_DG_TABLETPICK 0x000d0046
+#define HID_DG_PEN_COLOR 0x000d005c
+#define HID_DG_PEN_LINE_WIDTH 0x000d005e
+#define HID_DG_PEN_LINE_STYLE 0x000d0070
+#define HID_DG_PEN_LINE_STYLE_INK 0x000d0072
+#define HID_DG_PEN_LINE_STYLE_PENCIL 0x000d0073
+#define HID_DG_PEN_LINE_STYLE_HIGHLIGHTER 0x000d0074
+#define HID_DG_PEN_LINE_STYLE_CHISEL_MARKER 0x000d0075
+#define HID_DG_PEN_LINE_STYLE_BRUSH 0x000d0076
+#define HID_DG_PEN_LINE_STYLE_NO_PREFERENCE 0x000d0077
#define HID_CP_CONSUMERCONTROL 0x000c0001
#define HID_CP_NUMERICKEYPAD 0x000c0002
@@ -262,6 +276,8 @@ struct hid_item {
#define HID_CP_SELECTION 0x000c0080
#define HID_CP_MEDIASELECTION 0x000c0087
#define HID_CP_SELECTDISC 0x000c00ba
+#define HID_CP_VOLUMEUP 0x000c00e9
+#define HID_CP_VOLUMEDOWN 0x000c00ea
#define HID_CP_PLAYBACKSPEED 0x000c00f1
#define HID_CP_PROXIMITY 0x000c0109
#define HID_CP_SPEAKERSYSTEM 0x000c0160
@@ -280,6 +296,7 @@ struct hid_item {
#define HID_DG_DEVICECONFIG 0x000d000e
#define HID_DG_DEVICESETTINGS 0x000d0023
+#define HID_DG_AZIMUTH 0x000d003f
#define HID_DG_CONFIDENCE 0x000d0047
#define HID_DG_WIDTH 0x000d0048
#define HID_DG_HEIGHT 0x000d0049
@@ -288,31 +305,30 @@ struct hid_item {
#define HID_DG_DEVICEINDEX 0x000d0053
#define HID_DG_CONTACTCOUNT 0x000d0054
#define HID_DG_CONTACTMAX 0x000d0055
+#define HID_DG_SCANTIME 0x000d0056
+#define HID_DG_SURFACESWITCH 0x000d0057
+#define HID_DG_BUTTONSWITCH 0x000d0058
#define HID_DG_BUTTONTYPE 0x000d0059
#define HID_DG_BARRELSWITCH2 0x000d005a
#define HID_DG_TOOLSERIALNUMBER 0x000d005b
+#define HID_DG_LATENCYMODE 0x000d0060
-/*
- * HID report types --- Ouch! HID spec says 1 2 3!
- */
-
-#define HID_INPUT_REPORT 0
-#define HID_OUTPUT_REPORT 1
-#define HID_FEATURE_REPORT 2
+#define HID_BAT_ABSOLUTESTATEOFCHARGE 0x00850065
+#define HID_BAT_CHARGING 0x00850044
-#define HID_REPORT_TYPES 3
+#define HID_VD_ASUS_CUSTOM_MEDIA_KEYS 0xff310076
/*
* HID connect requests
*/
-#define HID_CONNECT_HIDINPUT 0x01
-#define HID_CONNECT_HIDINPUT_FORCE 0x02
-#define HID_CONNECT_HIDRAW 0x04
-#define HID_CONNECT_HIDDEV 0x08
-#define HID_CONNECT_HIDDEV_FORCE 0x10
-#define HID_CONNECT_FF 0x20
-#define HID_CONNECT_DRIVER 0x40
+#define HID_CONNECT_HIDINPUT BIT(0)
+#define HID_CONNECT_HIDINPUT_FORCE BIT(1)
+#define HID_CONNECT_HIDRAW BIT(2)
+#define HID_CONNECT_HIDDEV BIT(3)
+#define HID_CONNECT_HIDDEV_FORCE BIT(4)
+#define HID_CONNECT_FF BIT(5)
+#define HID_CONNECT_DRIVER BIT(6)
#define HID_CONNECT_DEFAULT (HID_CONNECT_HIDINPUT|HID_CONNECT_HIDRAW| \
HID_CONNECT_HIDDEV|HID_CONNECT_FF)
@@ -320,29 +336,34 @@ struct hid_item {
* HID device quirks.
*/
-/*
+/*
* Increase this if you need to configure more HID quirks at module load time
*/
#define MAX_USBHID_BOOT_QUIRKS 4
-#define HID_QUIRK_INVERT 0x00000001
-#define HID_QUIRK_NOTOUCH 0x00000002
-#define HID_QUIRK_IGNORE 0x00000004
-#define HID_QUIRK_NOGET 0x00000008
-#define HID_QUIRK_HIDDEV_FORCE 0x00000010
-#define HID_QUIRK_BADPAD 0x00000020
-#define HID_QUIRK_MULTI_INPUT 0x00000040
-#define HID_QUIRK_HIDINPUT_FORCE 0x00000080
-#define HID_QUIRK_NO_EMPTY_INPUT 0x00000100
-/* 0x00000200 reserved for backward compatibility, was NO_INIT_INPUT_REPORTS */
-#define HID_QUIRK_ALWAYS_POLL 0x00000400
-#define HID_QUIRK_SKIP_OUTPUT_REPORTS 0x00010000
-#define HID_QUIRK_SKIP_OUTPUT_REPORT_ID 0x00020000
-#define HID_QUIRK_NO_OUTPUT_REPORTS_ON_INTR_EP 0x00040000
-#define HID_QUIRK_FULLSPEED_INTERVAL 0x10000000
-#define HID_QUIRK_NO_INIT_REPORTS 0x20000000
-#define HID_QUIRK_NO_IGNORE 0x40000000
-#define HID_QUIRK_NO_INPUT_SYNC 0x80000000
+/* BIT(0) reserved for backward compatibility, was HID_QUIRK_INVERT */
+#define HID_QUIRK_NOTOUCH BIT(1)
+#define HID_QUIRK_IGNORE BIT(2)
+#define HID_QUIRK_NOGET BIT(3)
+#define HID_QUIRK_HIDDEV_FORCE BIT(4)
+#define HID_QUIRK_BADPAD BIT(5)
+#define HID_QUIRK_MULTI_INPUT BIT(6)
+#define HID_QUIRK_HIDINPUT_FORCE BIT(7)
+/* BIT(8) reserved for backward compatibility, was HID_QUIRK_NO_EMPTY_INPUT */
+/* BIT(9) reserved for backward compatibility, was NO_INIT_INPUT_REPORTS */
+#define HID_QUIRK_ALWAYS_POLL BIT(10)
+#define HID_QUIRK_INPUT_PER_APP BIT(11)
+#define HID_QUIRK_X_INVERT BIT(12)
+#define HID_QUIRK_Y_INVERT BIT(13)
+#define HID_QUIRK_SKIP_OUTPUT_REPORTS BIT(16)
+#define HID_QUIRK_SKIP_OUTPUT_REPORT_ID BIT(17)
+#define HID_QUIRK_NO_OUTPUT_REPORTS_ON_INTR_EP BIT(18)
+#define HID_QUIRK_HAVE_SPECIAL_DRIVER BIT(19)
+#define HID_QUIRK_INCREMENT_USAGE_ON_DUPLICATE BIT(20)
+#define HID_QUIRK_FULLSPEED_INTERVAL BIT(28)
+#define HID_QUIRK_NO_INIT_REPORTS BIT(29)
+#define HID_QUIRK_NO_IGNORE BIT(30)
+#define HID_QUIRK_NO_INPUT_SYNC BIT(31)
/*
* HID device groups
@@ -361,6 +382,15 @@ struct hid_item {
#define HID_GROUP_RMI 0x0100
#define HID_GROUP_WACOM 0x0101
#define HID_GROUP_LOGITECH_DJ_DEVICE 0x0102
+#define HID_GROUP_STEAM 0x0103
+#define HID_GROUP_LOGITECH_27MHZ_DEVICE 0x0104
+#define HID_GROUP_VIVALDI 0x0105
+
+/*
+ * HID protocol status
+ */
+#define HID_REPORT_PROTOCOL 1
+#define HID_BOOT_PROTOCOL 0
/*
* This is the global environment of the parser. This information is
@@ -390,6 +420,7 @@ struct hid_global {
struct hid_local {
unsigned usage[HID_MAX_USAGES]; /* usage array */
+ u8 usage_size[HID_MAX_USAGES]; /* usage size array */
unsigned collection_index[HID_MAX_USAGES]; /* collection index array */
unsigned usage_index;
unsigned usage_minimum;
@@ -403,6 +434,7 @@ struct hid_local {
*/
struct hid_collection {
+ int parent_idx; /* device->collection */
unsigned type;
unsigned usage;
unsigned level;
@@ -412,12 +444,16 @@ struct hid_usage {
unsigned hid; /* hid usage code */
unsigned collection_index; /* index into collection array */
unsigned usage_index; /* index into usage array */
+ __s8 resolution_multiplier;/* Effective Resolution Multiplier
+ (HUT v1.12, 4.3.1), default: 1 */
/* hidinput data */
+ __s8 wheel_factor; /* 120/resolution_multiplier */
__u16 code; /* input driver code */
__u8 type; /* input driver type */
__s8 hat_min; /* hat switch fun */
__s8 hat_max; /* ditto */
__s8 hat_dir; /* ditto */
+ __s16 wheel_accumulated; /* hi-res wheel */
};
struct hid_input;
@@ -434,29 +470,50 @@ struct hid_field {
unsigned report_count; /* number of this field in the report */
unsigned report_type; /* (input,output,feature) */
__s32 *value; /* last known value(s) */
+ __s32 *new_value; /* newly read value(s) */
+ __s32 *usages_priorities; /* priority of each usage when reading the report
+ * bits 8-16 are reserved for hid-input usage
+ */
__s32 logical_minimum;
__s32 logical_maximum;
__s32 physical_minimum;
__s32 physical_maximum;
__s32 unit_exponent;
unsigned unit;
+ bool ignored; /* this field is ignored in this event */
struct hid_report *report; /* associated report */
unsigned index; /* index into report->field[] */
/* hidinput data */
struct hid_input *hidinput; /* associated input structure */
__u16 dpad; /* dpad input code */
+ unsigned int slot_idx; /* slot index in a report */
};
#define HID_MAX_FIELDS 256
+struct hid_field_entry {
+ struct list_head list;
+ struct hid_field *field;
+ unsigned int index;
+ __s32 priority;
+};
+
struct hid_report {
struct list_head list;
- unsigned id; /* id of this report */
- unsigned type; /* report type */
+ struct list_head hidinput_list;
+ struct list_head field_entry_list; /* ordered list of input fields */
+ unsigned int id; /* id of this report */
+ enum hid_report_type type; /* report type */
+ unsigned int application; /* application usage for this report */
struct hid_field *field[HID_MAX_FIELDS]; /* fields of the report */
+ struct hid_field_entry *field_entries; /* allocated memory of input field_entry */
unsigned maxfield; /* maximum valid field index */
unsigned size; /* size of the report (bits) */
struct hid_device *device; /* associated device */
+
+ /* tool related state */
+ bool tool_active; /* whether the current tool is active */
+ unsigned int tool; /* BTN_TOOL_* */
};
#define HID_MAX_IDS 256
@@ -468,7 +525,7 @@ struct hid_report_enum {
};
#define HID_MIN_BUFFER_SIZE 64 /* make sure there is at least a packet size of space */
-#define HID_MAX_BUFFER_SIZE 4096 /* 4kb */
+#define HID_MAX_BUFFER_SIZE 16384 /* 16kb */
#define HID_CONTROL_FIFO_SIZE 256 /* to init devices with >100 reports */
#define HID_OUTPUT_FIFO_SIZE 64
@@ -483,19 +540,24 @@ struct hid_output_fifo {
char *raw_report;
};
-#define HID_CLAIMED_INPUT 1
-#define HID_CLAIMED_HIDDEV 2
-#define HID_CLAIMED_HIDRAW 4
-#define HID_CLAIMED_DRIVER 8
+#define HID_CLAIMED_INPUT BIT(0)
+#define HID_CLAIMED_HIDDEV BIT(1)
+#define HID_CLAIMED_HIDRAW BIT(2)
+#define HID_CLAIMED_DRIVER BIT(3)
-#define HID_STAT_ADDED 1
-#define HID_STAT_PARSED 2
+#define HID_STAT_ADDED BIT(0)
+#define HID_STAT_PARSED BIT(1)
+#define HID_STAT_DUP_DETECTED BIT(2)
+#define HID_STAT_REPROBED BIT(3)
struct hid_input {
struct list_head list;
struct hid_report *report;
struct input_dev *input;
+ const char *name;
bool registered;
+ struct list_head reports; /* the list of reports */
+ unsigned int application; /* application usage for this input */
};
enum hid_type {
@@ -504,6 +566,12 @@ enum hid_type {
HID_TYPE_USBNONE
};
+enum hid_battery_status {
+ HID_BATTERY_UNKNOWN = 0,
+ HID_BATTERY_QUERIED, /* Kernel explicitly queried battery strength */
+ HID_BATTERY_REPORTED, /* Device sent unsolicited battery strength report */
+};
+
struct hid_driver;
struct hid_ll_driver;
@@ -526,12 +594,11 @@ struct hid_device { /* device report descriptor */
struct hid_report_enum report_enum[HID_REPORT_TYPES];
struct work_struct led_work; /* delayed LED worker */
- struct semaphore driver_lock; /* protects the current driver, except during input */
struct semaphore driver_input_lock; /* protects the current driver */
struct device dev; /* device */
struct hid_driver *driver;
- struct hid_ll_driver *ll_driver;
+ const struct hid_ll_driver *ll_driver;
struct mutex ll_open_lock;
unsigned int ll_open_count;
@@ -542,16 +609,22 @@ struct hid_device { /* device report descriptor */
* battery is non-NULL.
*/
struct power_supply *battery;
+ __s32 battery_capacity;
__s32 battery_min;
__s32 battery_max;
__s32 battery_report_type;
__s32 battery_report_id;
+ __s32 battery_charge_status;
+ enum hid_battery_status battery_status;
+ bool battery_avoid_query;
+ ktime_t battery_ratelimit_time;
#endif
- unsigned int status; /* see STAT flags above */
+ unsigned long status; /* see STAT flags above */
unsigned claimed; /* Claimed by hidinput, hiddev? */
unsigned quirks; /* Various quirks the device can pull on us */
- bool io_started; /* Protected by driver_lock. If IO has started */
+ unsigned initial_quirks; /* Initial set of quirks supplied when creating device */
+ bool io_started; /* If IO has started */
struct list_head inputs; /* The list of inputs */
void *hiddev; /* The hiddev structure */
@@ -581,6 +654,12 @@ struct hid_device { /* device report descriptor */
struct list_head debug_list;
spinlock_t debug_list_lock;
wait_queue_head_t debug_wait;
+
+ unsigned int id; /* system unique id */
+
+#ifdef CONFIG_BPF
+ struct hid_bpf bpf; /* hid-bpf data */
+#endif /* CONFIG_BPF */
};
#define to_hid_device(pdev) \
@@ -606,12 +685,13 @@ static inline void hid_set_drvdata(struct hid_device *hdev, void *data)
struct hid_parser {
struct hid_global global;
struct hid_global global_stack[HID_GLOBAL_STACK_SIZE];
- unsigned global_stack_ptr;
+ unsigned int global_stack_ptr;
struct hid_local local;
- unsigned collection_stack[HID_COLLECTION_STACK_SIZE];
- unsigned collection_stack_ptr;
+ unsigned int *collection_stack;
+ unsigned int collection_stack_ptr;
+ unsigned int collection_stack_size;
struct hid_device *device;
- unsigned scan_flags;
+ unsigned int scan_flags;
};
struct hid_class_descriptor {
@@ -661,6 +741,7 @@ struct hid_usage_id {
* to be called)
* @dyn_list: list of dynamically added device ids
* @dyn_lock: lock protecting @dyn_list
+ * @match: check if the given device is handled by this driver
* @probe: new device inserted
* @remove: device removed (NULL if not a hot-plug capable driver)
* @report_table: on which reports to call raw_event (NULL means all)
@@ -681,8 +762,8 @@ struct hid_usage_id {
* input will not be passed to raw_event unless hid_device_io_start is
* called.
*
- * raw_event and event should return 0 on no action performed, 1 when no
- * further processing should be done and negative on error
+ * raw_event and event should return negative on error, any other value will
+ * pass the event on to .event() typically return 0 for success.
*
* input_mapping shall return a negative value to completely ignore this usage
* (e.g. doubled or invalid usage), zero to continue with parsing of this
@@ -701,6 +782,7 @@ struct hid_driver {
struct list_head dyn_list;
spinlock_t dyn_lock;
+ bool (*match)(struct hid_device *dev, bool ignore_special_driver);
int (*probe)(struct hid_device *dev, const struct hid_device_id *id);
void (*remove)(struct hid_device *dev);
@@ -739,11 +821,12 @@ struct hid_driver {
container_of(pdrv, struct hid_driver, driver)
/**
- * hid_ll_driver - low level driver callbacks
+ * struct hid_ll_driver - low level driver callbacks
* @start: called on probe to start the device
* @stop: called on remove
* @open: called by input layer on open
* @close: called by input layer on close
+ * @power: request underlying hardware to enter requested power mode
* @parse: this method is called only once to parse the device data,
* shouldn't allocate anything to not leak memory
* @request: send report request to device (e.g. feature report)
@@ -751,6 +834,8 @@ struct hid_driver {
* @raw_request: send raw report request to device (e.g. feature report)
* @output_report: send output report to device
* @idle: send idle request to device
+ * @may_wakeup: return if device may act as a wakeup source during system-suspend
+ * @max_buffer_size: over-ride maximum data buffer size (default: HID_MAX_BUFFER_SIZE)
*/
struct hid_ll_driver {
int (*start)(struct hid_device *hdev);
@@ -775,23 +860,32 @@ struct hid_ll_driver {
int (*output_report) (struct hid_device *hdev, __u8 *buf, size_t len);
int (*idle)(struct hid_device *hdev, int report, int idle, int reqtype);
+ bool (*may_wakeup)(struct hid_device *hdev);
+
+ unsigned int max_buffer_size;
};
+extern bool hid_is_usb(const struct hid_device *hdev);
+
#define PM_HINT_FULLON 1<<5
#define PM_HINT_NORMAL 1<<1
/* Applications from HID Usage Tables 4/8/99 Version 1.1 */
/* We ignore a few input applications that are not widely used */
-#define IS_INPUT_APPLICATION(a) (((a >= 0x00010000) && (a <= 0x00010008)) || (a == 0x00010080) || (a == 0x000c0001) || ((a >= 0x000d0002) && (a <= 0x000d0006)))
+#define IS_INPUT_APPLICATION(a) \
+ (((a >= HID_UP_GENDESK) && (a <= HID_GD_MULTIAXIS)) \
+ || ((a >= HID_DG_DIGITIZER) && (a <= HID_DG_WHITEBOARD)) \
+ || (a == HID_GD_SYSTEM_CONTROL) || (a == HID_CP_CONSUMER_CONTROL) \
+ || (a == HID_GD_WIRELESS_RADIO_CTLS))
/* HID core API */
-extern int hid_debug;
-
extern bool hid_ignore(struct hid_device *);
extern int hid_add_device(struct hid_device *);
extern void hid_destroy_device(struct hid_device *);
+extern struct bus_type hid_bus_type;
+
extern int __must_check __hid_register_driver(struct hid_driver *,
struct module *, const char *mod_name);
@@ -819,35 +913,55 @@ extern int hidinput_connect(struct hid_device *hid, unsigned int force);
extern void hidinput_disconnect(struct hid_device *);
int hid_set_field(struct hid_field *, unsigned, __s32);
-int hid_input_report(struct hid_device *, int type, u8 *, int, int);
-int hidinput_find_field(struct hid_device *hid, unsigned int type, unsigned int code, struct hid_field **field);
+int hid_input_report(struct hid_device *hid, enum hid_report_type type, u8 *data, u32 size,
+ int interrupt);
struct hid_field *hidinput_get_led_field(struct hid_device *hid);
unsigned int hidinput_count_leds(struct hid_device *hid);
__s32 hidinput_calc_abs_res(const struct hid_field *field, __u16 code);
void hid_output_report(struct hid_report *report, __u8 *data);
-void __hid_request(struct hid_device *hid, struct hid_report *rep, int reqtype);
+int __hid_request(struct hid_device *hid, struct hid_report *rep, enum hid_class_request reqtype);
u8 *hid_alloc_report_buf(struct hid_report *report, gfp_t flags);
struct hid_device *hid_allocate_device(void);
-struct hid_report *hid_register_report(struct hid_device *device, unsigned type, unsigned id);
+struct hid_report *hid_register_report(struct hid_device *device,
+ enum hid_report_type type, unsigned int id,
+ unsigned int application);
int hid_parse_report(struct hid_device *hid, __u8 *start, unsigned size);
struct hid_report *hid_validate_values(struct hid_device *hid,
- unsigned int type, unsigned int id,
+ enum hid_report_type type, unsigned int id,
unsigned int field_index,
unsigned int report_counts);
+
+void hid_setup_resolution_multiplier(struct hid_device *hid);
int hid_open_report(struct hid_device *device);
int hid_check_keys_pressed(struct hid_device *hid);
int hid_connect(struct hid_device *hid, unsigned int connect_mask);
void hid_disconnect(struct hid_device *hid);
-const struct hid_device_id *hid_match_id(struct hid_device *hdev,
+bool hid_match_one_id(const struct hid_device *hdev,
+ const struct hid_device_id *id);
+const struct hid_device_id *hid_match_id(const struct hid_device *hdev,
const struct hid_device_id *id);
+const struct hid_device_id *hid_match_device(struct hid_device *hdev,
+ struct hid_driver *hdrv);
+bool hid_compare_device_paths(struct hid_device *hdev_a,
+ struct hid_device *hdev_b, char separator);
s32 hid_snto32(__u32 value, unsigned n);
__u32 hid_field_extract(const struct hid_device *hid, __u8 *report,
unsigned offset, unsigned n);
+#ifdef CONFIG_PM
+int hid_driver_suspend(struct hid_device *hdev, pm_message_t state);
+int hid_driver_reset_resume(struct hid_device *hdev);
+int hid_driver_resume(struct hid_device *hdev);
+#else
+static inline int hid_driver_suspend(struct hid_device *hdev, pm_message_t state) { return 0; }
+static inline int hid_driver_reset_resume(struct hid_device *hdev) { return 0; }
+static inline int hid_driver_resume(struct hid_device *hdev) { return 0; }
+#endif
+
/**
* hid_device_io_start - enable HID input during probe, remove
*
- * @hid - the device
+ * @hid: the device
*
* This should only be called during probe or remove and only be
* called by the thread calling probe or remove. It will allow
@@ -865,7 +979,7 @@ static inline void hid_device_io_start(struct hid_device *hid) {
/**
* hid_device_io_stop - disable HID input during probe, remove
*
- * @hid - the device
+ * @hid: the device
*
* Should only be called after hid_device_io_start. It will prevent
* incoming packets from going to the driver for the duration of
@@ -891,39 +1005,65 @@ static inline void hid_device_io_stop(struct hid_device *hid) {
* @max: maximal valid usage->code to consider later (out parameter)
* @type: input event type (EV_KEY, EV_REL, ...)
* @c: code which corresponds to this usage and type
+ *
+ * The value pointed to by @bit will be set to NULL if either @type is
+ * an unhandled event type, or if @c is out of range for @type. This
+ * can be used as an error condition.
*/
static inline void hid_map_usage(struct hid_input *hidinput,
struct hid_usage *usage, unsigned long **bit, int *max,
- __u8 type, __u16 c)
+ __u8 type, unsigned int c)
{
struct input_dev *input = hidinput->input;
-
- usage->type = type;
- usage->code = c;
+ unsigned long *bmap = NULL;
+ unsigned int limit = 0;
switch (type) {
case EV_ABS:
- *bit = input->absbit;
- *max = ABS_MAX;
+ bmap = input->absbit;
+ limit = ABS_MAX;
break;
case EV_REL:
- *bit = input->relbit;
- *max = REL_MAX;
+ bmap = input->relbit;
+ limit = REL_MAX;
break;
case EV_KEY:
- *bit = input->keybit;
- *max = KEY_MAX;
+ bmap = input->keybit;
+ limit = KEY_MAX;
break;
case EV_LED:
- *bit = input->ledbit;
- *max = LED_MAX;
+ bmap = input->ledbit;
+ limit = LED_MAX;
break;
+ case EV_MSC:
+ bmap = input->mscbit;
+ limit = MSC_MAX;
+ break;
+ }
+
+ if (unlikely(c > limit || !bmap)) {
+ pr_warn_ratelimited("%s: Invalid code %d type %d\n",
+ input->name, c, type);
+ *bit = NULL;
+ return;
}
+
+ usage->type = type;
+ usage->code = c;
+ *max = limit;
+ *bit = bmap;
}
/**
* hid_map_usage_clear - map usage input bits and clear the input bit
*
+ * @hidinput: hidinput which we are interested in
+ * @usage: usage to fill in
+ * @bit: pointer to input->{}bit (out parameter)
+ * @max: maximal valid usage->code to consider later (out parameter)
+ * @type: input event type (EV_KEY, EV_REL, ...)
+ * @c: code which corresponds to this usage and type
+ *
* The same as hid_map_usage, except the @c bit is also cleared in supported
* bits (@bit).
*/
@@ -932,7 +1072,8 @@ static inline void hid_map_usage_clear(struct hid_input *hidinput,
__u8 type, __u16 c)
{
hid_map_usage(hidinput, usage, bit, max, type, c);
- clear_bit(c, *bit);
+ if (*bit)
+ clear_bit(usage->code, *bit);
}
/**
@@ -954,6 +1095,13 @@ int __must_check hid_hw_start(struct hid_device *hdev,
void hid_hw_stop(struct hid_device *hdev);
int __must_check hid_hw_open(struct hid_device *hdev);
void hid_hw_close(struct hid_device *hdev);
+void hid_hw_request(struct hid_device *hdev,
+ struct hid_report *report, enum hid_class_request reqtype);
+int hid_hw_raw_request(struct hid_device *hdev,
+ unsigned char reportnum, __u8 *buf,
+ size_t len, enum hid_report_type rtype,
+ enum hid_class_request reqtype);
+int hid_hw_output_report(struct hid_device *hdev, __u8 *buf, size_t len);
/**
* hid_hw_power - requests underlying HW to go into given power mode
@@ -972,82 +1120,36 @@ static inline int hid_hw_power(struct hid_device *hdev, int level)
/**
- * hid_hw_request - send report request to device
+ * hid_hw_idle - send idle request to device
*
* @hdev: hid device
- * @report: report to send
+ * @report: report to control
+ * @idle: idle state
* @reqtype: hid request type
*/
-static inline void hid_hw_request(struct hid_device *hdev,
- struct hid_report *report, int reqtype)
-{
- if (hdev->ll_driver->request)
- return hdev->ll_driver->request(hdev, report, reqtype);
-
- __hid_request(hdev, report, reqtype);
-}
-
-/**
- * hid_hw_raw_request - send report request to device
- *
- * @hdev: hid device
- * @reportnum: report ID
- * @buf: in/out data to transfer
- * @len: length of buf
- * @rtype: HID report type
- * @reqtype: HID_REQ_GET_REPORT or HID_REQ_SET_REPORT
- *
- * @return: count of data transfered, negative if error
- *
- * Same behavior as hid_hw_request, but with raw buffers instead.
- */
-static inline int hid_hw_raw_request(struct hid_device *hdev,
- unsigned char reportnum, __u8 *buf,
- size_t len, unsigned char rtype, int reqtype)
+static inline int hid_hw_idle(struct hid_device *hdev, int report, int idle,
+ enum hid_class_request reqtype)
{
- if (len < 1 || len > HID_MAX_BUFFER_SIZE || !buf)
- return -EINVAL;
+ if (hdev->ll_driver->idle)
+ return hdev->ll_driver->idle(hdev, report, idle, reqtype);
- return hdev->ll_driver->raw_request(hdev, reportnum, buf, len,
- rtype, reqtype);
+ return 0;
}
/**
- * hid_hw_output_report - send output report to device
+ * hid_hw_may_wakeup - return if the hid device may act as a wakeup source during system-suspend
*
* @hdev: hid device
- * @buf: raw data to transfer
- * @len: length of buf
- *
- * @return: count of data transfered, negative if error
*/
-static inline int hid_hw_output_report(struct hid_device *hdev, __u8 *buf,
- size_t len)
+static inline bool hid_hw_may_wakeup(struct hid_device *hdev)
{
- if (len < 1 || len > HID_MAX_BUFFER_SIZE || !buf)
- return -EINVAL;
-
- if (hdev->ll_driver->output_report)
- return hdev->ll_driver->output_report(hdev, buf, len);
+ if (hdev->ll_driver->may_wakeup)
+ return hdev->ll_driver->may_wakeup(hdev);
- return -ENOSYS;
-}
-
-/**
- * hid_hw_idle - send idle request to device
- *
- * @hdev: hid device
- * @report: report to control
- * @idle: idle state
- * @reqtype: hid request type
- */
-static inline int hid_hw_idle(struct hid_device *hdev, int report, int idle,
- int reqtype)
-{
- if (hdev->ll_driver->idle)
- return hdev->ll_driver->idle(hdev, report, idle, reqtype);
+ if (hdev->dev.parent)
+ return device_may_wakeup(hdev->dev.parent);
- return 0;
+ return false;
}
/**
@@ -1066,19 +1168,18 @@ static inline void hid_hw_wait(struct hid_device *hdev)
*
* @report: the report we want to know the length
*/
-static inline int hid_report_len(struct hid_report *report)
+static inline u32 hid_report_len(struct hid_report *report)
{
- /* equivalent to DIV_ROUND_UP(report->size, 8) + !!(report->id > 0) */
- return ((report->size - 1) >> 3) + 1 + (report->id > 0);
+ return DIV_ROUND_UP(report->size, 8) + (report->id > 0);
}
-int hid_report_raw_event(struct hid_device *hid, int type, u8 *data, int size,
- int interrupt);
+int hid_report_raw_event(struct hid_device *hid, enum hid_report_type type, u8 *data, u32 size,
+ int interrupt);
/* HID quirks API */
-u32 usbhid_lookup_quirk(const u16 idVendor, const u16 idProduct);
-int usbhid_quirks_init(char **quirks_param);
-void usbhid_quirks_exit(void);
+unsigned long hid_lookup_quirk(const struct hid_device *hdev);
+int hid_quirks_init(char **quirks_param, __u16 bus, int count);
+void hid_quirks_exit(__u16 bus);
#ifdef CONFIG_HID_PID
int hid_pidff_init(struct hid_device *hid);
@@ -1086,29 +1187,28 @@ int hid_pidff_init(struct hid_device *hid);
#define hid_pidff_init NULL
#endif
-#define dbg_hid(format, arg...) \
-do { \
- if (hid_debug) \
- printk(KERN_DEBUG "%s: " format, __FILE__, ##arg); \
-} while (0)
-
-#define hid_printk(level, hid, fmt, arg...) \
- dev_printk(level, &(hid)->dev, fmt, ##arg)
-#define hid_emerg(hid, fmt, arg...) \
- dev_emerg(&(hid)->dev, fmt, ##arg)
-#define hid_crit(hid, fmt, arg...) \
- dev_crit(&(hid)->dev, fmt, ##arg)
-#define hid_alert(hid, fmt, arg...) \
- dev_alert(&(hid)->dev, fmt, ##arg)
-#define hid_err(hid, fmt, arg...) \
- dev_err(&(hid)->dev, fmt, ##arg)
-#define hid_notice(hid, fmt, arg...) \
- dev_notice(&(hid)->dev, fmt, ##arg)
-#define hid_warn(hid, fmt, arg...) \
- dev_warn(&(hid)->dev, fmt, ##arg)
-#define hid_info(hid, fmt, arg...) \
- dev_info(&(hid)->dev, fmt, ##arg)
-#define hid_dbg(hid, fmt, arg...) \
- dev_dbg(&(hid)->dev, fmt, ##arg)
+#define dbg_hid(fmt, ...) pr_debug("%s: " fmt, __FILE__, ##__VA_ARGS__)
+
+#define hid_err(hid, fmt, ...) \
+ dev_err(&(hid)->dev, fmt, ##__VA_ARGS__)
+#define hid_notice(hid, fmt, ...) \
+ dev_notice(&(hid)->dev, fmt, ##__VA_ARGS__)
+#define hid_warn(hid, fmt, ...) \
+ dev_warn(&(hid)->dev, fmt, ##__VA_ARGS__)
+#define hid_info(hid, fmt, ...) \
+ dev_info(&(hid)->dev, fmt, ##__VA_ARGS__)
+#define hid_dbg(hid, fmt, ...) \
+ dev_dbg(&(hid)->dev, fmt, ##__VA_ARGS__)
+
+#define hid_err_once(hid, fmt, ...) \
+ dev_err_once(&(hid)->dev, fmt, ##__VA_ARGS__)
+#define hid_notice_once(hid, fmt, ...) \
+ dev_notice_once(&(hid)->dev, fmt, ##__VA_ARGS__)
+#define hid_warn_once(hid, fmt, ...) \
+ dev_warn_once(&(hid)->dev, fmt, ##__VA_ARGS__)
+#define hid_info_once(hid, fmt, ...) \
+ dev_info_once(&(hid)->dev, fmt, ##__VA_ARGS__)
+#define hid_dbg_once(hid, fmt, ...) \
+ dev_dbg_once(&(hid)->dev, fmt, ##__VA_ARGS__)
#endif
diff --git a/include/linux/hid_bpf.h b/include/linux/hid_bpf.h
new file mode 100644
index 000000000000..e9afb61e6ee0
--- /dev/null
+++ b/include/linux/hid_bpf.h
@@ -0,0 +1,170 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+
+#ifndef __HID_BPF_H
+#define __HID_BPF_H
+
+#include <linux/bpf.h>
+#include <linux/spinlock.h>
+#include <uapi/linux/hid.h>
+
+struct hid_device;
+
+/*
+ * The following is the user facing HID BPF API.
+ *
+ * Extra care should be taken when editing this part, as
+ * it might break existing out of the tree bpf programs.
+ */
+
+/**
+ * struct hid_bpf_ctx - User accessible data for all HID programs
+ *
+ * ``data`` is not directly accessible from the context. We need to issue
+ * a call to ``hid_bpf_get_data()`` in order to get a pointer to that field.
+ *
+ * All of these fields are currently read-only.
+ *
+ * @index: program index in the jump table. No special meaning (a smaller index
+ * doesn't mean the program will be executed before another program with
+ * a bigger index).
+ * @hid: the ``struct hid_device`` representing the device itself
+ * @report_type: used for ``hid_bpf_device_event()``
+ * @allocated_size: Allocated size of data.
+ *
+ * This is how much memory is available and can be requested
+ * by the HID program.
+ * Note that for ``HID_BPF_RDESC_FIXUP``, that memory is set to
+ * ``4096`` (4 KB)
+ * @size: Valid data in the data field.
+ *
+ * Programs can get the available valid size in data by fetching this field.
+ * Programs can also change this value by returning a positive number in the
+ * program.
+ * To discard the event, return a negative error code.
+ *
+ * ``size`` must always be less or equal than ``allocated_size`` (it is enforced
+ * once all BPF programs have been run).
+ * @retval: Return value of the previous program.
+ */
+struct hid_bpf_ctx {
+ __u32 index;
+ const struct hid_device *hid;
+ __u32 allocated_size;
+ enum hid_report_type report_type;
+ union {
+ __s32 retval;
+ __s32 size;
+ };
+};
+
+/**
+ * enum hid_bpf_attach_flags - flags used when attaching a HIF-BPF program
+ *
+ * @HID_BPF_FLAG_NONE: no specific flag is used, the kernel choses where to
+ * insert the program
+ * @HID_BPF_FLAG_INSERT_HEAD: insert the given program before any other program
+ * currently attached to the device. This doesn't
+ * guarantee that this program will always be first
+ * @HID_BPF_FLAG_MAX: sentinel value, not to be used by the callers
+ */
+enum hid_bpf_attach_flags {
+ HID_BPF_FLAG_NONE = 0,
+ HID_BPF_FLAG_INSERT_HEAD = _BITUL(0),
+ HID_BPF_FLAG_MAX,
+};
+
+/* Following functions are tracepoints that BPF programs can attach to */
+int hid_bpf_device_event(struct hid_bpf_ctx *ctx);
+int hid_bpf_rdesc_fixup(struct hid_bpf_ctx *ctx);
+
+/* Following functions are kfunc that we export to BPF programs */
+/* available everywhere in HID-BPF */
+__u8 *hid_bpf_get_data(struct hid_bpf_ctx *ctx, unsigned int offset, const size_t __sz);
+
+/* only available in syscall */
+int hid_bpf_attach_prog(unsigned int hid_id, int prog_fd, __u32 flags);
+int hid_bpf_hw_request(struct hid_bpf_ctx *ctx, __u8 *buf, size_t buf__sz,
+ enum hid_report_type rtype, enum hid_class_request reqtype);
+struct hid_bpf_ctx *hid_bpf_allocate_context(unsigned int hid_id);
+void hid_bpf_release_context(struct hid_bpf_ctx *ctx);
+
+/*
+ * Below is HID internal
+ */
+
+/* internal function to call eBPF programs, not to be used by anybody */
+int __hid_bpf_tail_call(struct hid_bpf_ctx *ctx);
+
+#define HID_BPF_MAX_PROGS_PER_DEV 64
+#define HID_BPF_FLAG_MASK (((HID_BPF_FLAG_MAX - 1) << 1) - 1)
+
+/* types of HID programs to attach to */
+enum hid_bpf_prog_type {
+ HID_BPF_PROG_TYPE_UNDEF = -1,
+ HID_BPF_PROG_TYPE_DEVICE_EVENT, /* an event is emitted from the device */
+ HID_BPF_PROG_TYPE_RDESC_FIXUP,
+ HID_BPF_PROG_TYPE_MAX,
+};
+
+struct hid_report_enum;
+
+struct hid_bpf_ops {
+ struct hid_report *(*hid_get_report)(struct hid_report_enum *report_enum, const u8 *data);
+ int (*hid_hw_raw_request)(struct hid_device *hdev,
+ unsigned char reportnum, __u8 *buf,
+ size_t len, enum hid_report_type rtype,
+ enum hid_class_request reqtype);
+ struct module *owner;
+ struct bus_type *bus_type;
+};
+
+extern struct hid_bpf_ops *hid_bpf_ops;
+
+struct hid_bpf_prog_list {
+ u16 prog_idx[HID_BPF_MAX_PROGS_PER_DEV];
+ u8 prog_cnt;
+};
+
+/* stored in each device */
+struct hid_bpf {
+ u8 *device_data; /* allocated when a bpf program of type
+ * SEC(f.../hid_bpf_device_event) has been attached
+ * to this HID device
+ */
+ u32 allocated_data;
+
+ struct hid_bpf_prog_list __rcu *progs[HID_BPF_PROG_TYPE_MAX]; /* attached BPF progs */
+ bool destroyed; /* prevents the assignment of any progs */
+
+ spinlock_t progs_lock; /* protects RCU update of progs */
+};
+
+/* specific HID-BPF link when a program is attached to a device */
+struct hid_bpf_link {
+ struct bpf_link link;
+ int hid_table_index;
+};
+
+#ifdef CONFIG_HID_BPF
+u8 *dispatch_hid_bpf_device_event(struct hid_device *hid, enum hid_report_type type, u8 *data,
+ u32 *size, int interrupt);
+int hid_bpf_connect_device(struct hid_device *hdev);
+void hid_bpf_disconnect_device(struct hid_device *hdev);
+void hid_bpf_destroy_device(struct hid_device *hid);
+void hid_bpf_device_init(struct hid_device *hid);
+u8 *call_hid_bpf_rdesc_fixup(struct hid_device *hdev, u8 *rdesc, unsigned int *size);
+#else /* CONFIG_HID_BPF */
+static inline u8 *dispatch_hid_bpf_device_event(struct hid_device *hid, enum hid_report_type type,
+ u8 *data, u32 *size, int interrupt) { return data; }
+static inline int hid_bpf_connect_device(struct hid_device *hdev) { return 0; }
+static inline void hid_bpf_disconnect_device(struct hid_device *hdev) {}
+static inline void hid_bpf_destroy_device(struct hid_device *hid) {}
+static inline void hid_bpf_device_init(struct hid_device *hid) {}
+static inline u8 *call_hid_bpf_rdesc_fixup(struct hid_device *hdev, u8 *rdesc, unsigned int *size)
+{
+ return kmemdup(rdesc, *size, GFP_KERNEL);
+}
+
+#endif /* CONFIG_HID_BPF */
+
+#endif /* __HID_BPF_H */
diff --git a/include/linux/hidden.h b/include/linux/hidden.h
new file mode 100644
index 000000000000..49a17b6b5962
--- /dev/null
+++ b/include/linux/hidden.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * When building position independent code with GCC using the -fPIC option,
+ * (or even the -fPIE one on older versions), it will assume that we are
+ * building a dynamic object (either a shared library or an executable) that
+ * may have symbol references that can only be resolved at load time. For a
+ * variety of reasons (ELF symbol preemption, the CoW footprint of the section
+ * that is modified by the loader), this results in all references to symbols
+ * with external linkage to go via entries in the Global Offset Table (GOT),
+ * which carries absolute addresses which need to be fixed up when the
+ * executable image is loaded at an offset which is different from its link
+ * time offset.
+ *
+ * Fortunately, there is a way to inform the compiler that such symbol
+ * references will be satisfied at link time rather than at load time, by
+ * giving them 'hidden' visibility.
+ */
+
+#pragma GCC visibility push(hidden)
diff --git a/include/linux/hiddev.h b/include/linux/hiddev.h
index 921622222957..2164c03d2c72 100644
--- a/include/linux/hiddev.h
+++ b/include/linux/hiddev.h
@@ -1,22 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Copyright (c) 1999-2000 Vojtech Pavlik
*
* Sponsored by SuSE
*/
/*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
* Should you need to contact me, the author, you can do so either by
* e-mail - mail your message to <vojtech@suse.cz>, or by paper mail:
diff --git a/include/linux/hidraw.h b/include/linux/hidraw.h
index ddf52612eed8..cd67f4ca5599 100644
--- a/include/linux/hidraw.h
+++ b/include/linux/hidraw.h
@@ -1,15 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2007 Jiri Kosina
*/
-/*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
- */
#ifndef _HIDRAW_H
#define _HIDRAW_H
diff --git a/include/linux/highmem-internal.h b/include/linux/highmem-internal.h
new file mode 100644
index 000000000000..a3028e400a9c
--- /dev/null
+++ b/include/linux/highmem-internal.h
@@ -0,0 +1,290 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_HIGHMEM_INTERNAL_H
+#define _LINUX_HIGHMEM_INTERNAL_H
+
+/*
+ * Outside of CONFIG_HIGHMEM to support X86 32bit iomap_atomic() cruft.
+ */
+#ifdef CONFIG_KMAP_LOCAL
+void *__kmap_local_pfn_prot(unsigned long pfn, pgprot_t prot);
+void *__kmap_local_page_prot(struct page *page, pgprot_t prot);
+void kunmap_local_indexed(const void *vaddr);
+void kmap_local_fork(struct task_struct *tsk);
+void __kmap_local_sched_out(void);
+void __kmap_local_sched_in(void);
+static inline void kmap_assert_nomap(void)
+{
+ DEBUG_LOCKS_WARN_ON(current->kmap_ctrl.idx);
+}
+#else
+static inline void kmap_local_fork(struct task_struct *tsk) { }
+static inline void kmap_assert_nomap(void) { }
+#endif
+
+#ifdef CONFIG_HIGHMEM
+#include <asm/highmem.h>
+
+#ifndef ARCH_HAS_KMAP_FLUSH_TLB
+static inline void kmap_flush_tlb(unsigned long addr) { }
+#endif
+
+#ifndef kmap_prot
+#define kmap_prot PAGE_KERNEL
+#endif
+
+void *kmap_high(struct page *page);
+void kunmap_high(struct page *page);
+void __kmap_flush_unused(void);
+struct page *__kmap_to_page(void *addr);
+
+static inline void *kmap(struct page *page)
+{
+ void *addr;
+
+ might_sleep();
+ if (!PageHighMem(page))
+ addr = page_address(page);
+ else
+ addr = kmap_high(page);
+ kmap_flush_tlb((unsigned long)addr);
+ return addr;
+}
+
+static inline void kunmap(struct page *page)
+{
+ might_sleep();
+ if (!PageHighMem(page))
+ return;
+ kunmap_high(page);
+}
+
+static inline struct page *kmap_to_page(void *addr)
+{
+ return __kmap_to_page(addr);
+}
+
+static inline void kmap_flush_unused(void)
+{
+ __kmap_flush_unused();
+}
+
+static inline void *kmap_local_page(struct page *page)
+{
+ return __kmap_local_page_prot(page, kmap_prot);
+}
+
+static inline void *kmap_local_folio(struct folio *folio, size_t offset)
+{
+ struct page *page = folio_page(folio, offset / PAGE_SIZE);
+ return __kmap_local_page_prot(page, kmap_prot) + offset % PAGE_SIZE;
+}
+
+static inline void *kmap_local_page_prot(struct page *page, pgprot_t prot)
+{
+ return __kmap_local_page_prot(page, prot);
+}
+
+static inline void *kmap_local_pfn(unsigned long pfn)
+{
+ return __kmap_local_pfn_prot(pfn, kmap_prot);
+}
+
+static inline void __kunmap_local(const void *vaddr)
+{
+ kunmap_local_indexed(vaddr);
+}
+
+static inline void *kmap_atomic_prot(struct page *page, pgprot_t prot)
+{
+ if (IS_ENABLED(CONFIG_PREEMPT_RT))
+ migrate_disable();
+ else
+ preempt_disable();
+
+ pagefault_disable();
+ return __kmap_local_page_prot(page, prot);
+}
+
+static inline void *kmap_atomic(struct page *page)
+{
+ return kmap_atomic_prot(page, kmap_prot);
+}
+
+static inline void *kmap_atomic_pfn(unsigned long pfn)
+{
+ if (IS_ENABLED(CONFIG_PREEMPT_RT))
+ migrate_disable();
+ else
+ preempt_disable();
+
+ pagefault_disable();
+ return __kmap_local_pfn_prot(pfn, kmap_prot);
+}
+
+static inline void __kunmap_atomic(const void *addr)
+{
+ kunmap_local_indexed(addr);
+ pagefault_enable();
+ if (IS_ENABLED(CONFIG_PREEMPT_RT))
+ migrate_enable();
+ else
+ preempt_enable();
+}
+
+unsigned int __nr_free_highpages(void);
+extern atomic_long_t _totalhigh_pages;
+
+static inline unsigned int nr_free_highpages(void)
+{
+ return __nr_free_highpages();
+}
+
+static inline unsigned long totalhigh_pages(void)
+{
+ return (unsigned long)atomic_long_read(&_totalhigh_pages);
+}
+
+static inline void totalhigh_pages_add(long count)
+{
+ atomic_long_add(count, &_totalhigh_pages);
+}
+
+static inline bool is_kmap_addr(const void *x)
+{
+ unsigned long addr = (unsigned long)x;
+
+ return (addr >= PKMAP_ADDR(0) && addr < PKMAP_ADDR(LAST_PKMAP)) ||
+ (addr >= __fix_to_virt(FIX_KMAP_END) &&
+ addr < __fix_to_virt(FIX_KMAP_BEGIN));
+}
+#else /* CONFIG_HIGHMEM */
+
+static inline struct page *kmap_to_page(void *addr)
+{
+ return virt_to_page(addr);
+}
+
+static inline void *kmap(struct page *page)
+{
+ might_sleep();
+ return page_address(page);
+}
+
+static inline void kunmap_high(struct page *page) { }
+static inline void kmap_flush_unused(void) { }
+
+static inline void kunmap(struct page *page)
+{
+#ifdef ARCH_HAS_FLUSH_ON_KUNMAP
+ kunmap_flush_on_unmap(page_address(page));
+#endif
+}
+
+static inline void *kmap_local_page(struct page *page)
+{
+ return page_address(page);
+}
+
+static inline void *kmap_local_folio(struct folio *folio, size_t offset)
+{
+ return page_address(&folio->page) + offset;
+}
+
+static inline void *kmap_local_page_prot(struct page *page, pgprot_t prot)
+{
+ return kmap_local_page(page);
+}
+
+static inline void *kmap_local_pfn(unsigned long pfn)
+{
+ return kmap_local_page(pfn_to_page(pfn));
+}
+
+static inline void __kunmap_local(const void *addr)
+{
+#ifdef ARCH_HAS_FLUSH_ON_KUNMAP
+ kunmap_flush_on_unmap(PTR_ALIGN_DOWN(addr, PAGE_SIZE));
+#endif
+}
+
+static inline void *kmap_atomic(struct page *page)
+{
+ if (IS_ENABLED(CONFIG_PREEMPT_RT))
+ migrate_disable();
+ else
+ preempt_disable();
+ pagefault_disable();
+ return page_address(page);
+}
+
+static inline void *kmap_atomic_prot(struct page *page, pgprot_t prot)
+{
+ return kmap_atomic(page);
+}
+
+static inline void *kmap_atomic_pfn(unsigned long pfn)
+{
+ return kmap_atomic(pfn_to_page(pfn));
+}
+
+static inline void __kunmap_atomic(const void *addr)
+{
+#ifdef ARCH_HAS_FLUSH_ON_KUNMAP
+ kunmap_flush_on_unmap(PTR_ALIGN_DOWN(addr, PAGE_SIZE));
+#endif
+ pagefault_enable();
+ if (IS_ENABLED(CONFIG_PREEMPT_RT))
+ migrate_enable();
+ else
+ preempt_enable();
+}
+
+static inline unsigned int nr_free_highpages(void) { return 0; }
+static inline unsigned long totalhigh_pages(void) { return 0UL; }
+
+static inline bool is_kmap_addr(const void *x)
+{
+ return false;
+}
+
+#endif /* CONFIG_HIGHMEM */
+
+/**
+ * kunmap_atomic - Unmap the virtual address mapped by kmap_atomic() - deprecated!
+ * @__addr: Virtual address to be unmapped
+ *
+ * Unmaps an address previously mapped by kmap_atomic() and re-enables
+ * pagefaults. Depending on PREEMP_RT configuration, re-enables also
+ * migration and preemption. Users should not count on these side effects.
+ *
+ * Mappings should be unmapped in the reverse order that they were mapped.
+ * See kmap_local_page() for details on nesting.
+ *
+ * @__addr can be any address within the mapped page, so there is no need
+ * to subtract any offset that has been added. In contrast to kunmap(),
+ * this function takes the address returned from kmap_atomic(), not the
+ * page passed to it. The compiler will warn you if you pass the page.
+ */
+#define kunmap_atomic(__addr) \
+do { \
+ BUILD_BUG_ON(__same_type((__addr), struct page *)); \
+ __kunmap_atomic(__addr); \
+} while (0)
+
+/**
+ * kunmap_local - Unmap a page mapped via kmap_local_page().
+ * @__addr: An address within the page mapped
+ *
+ * @__addr can be any address within the mapped page. Commonly it is the
+ * address return from kmap_local_page(), but it can also include offsets.
+ *
+ * Unmapping should be done in the reverse order of the mapping. See
+ * kmap_local_page() for details.
+ */
+#define kunmap_local(__addr) \
+do { \
+ BUILD_BUG_ON(__same_type((__addr), struct page *)); \
+ __kunmap_local(__addr); \
+} while (0)
+
+#endif
diff --git a/include/linux/highmem.h b/include/linux/highmem.h
index bb3f3297062a..4de1dbcd3ef6 100644
--- a/include/linux/highmem.h
+++ b/include/linux/highmem.h
@@ -1,14 +1,186 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_HIGHMEM_H
#define _LINUX_HIGHMEM_H
#include <linux/fs.h>
#include <linux/kernel.h>
#include <linux/bug.h>
+#include <linux/cacheflush.h>
+#include <linux/kmsan.h>
#include <linux/mm.h>
#include <linux/uaccess.h>
#include <linux/hardirq.h>
-#include <asm/cacheflush.h>
+#include "highmem-internal.h"
+
+/**
+ * kmap - Map a page for long term usage
+ * @page: Pointer to the page to be mapped
+ *
+ * Returns: The virtual address of the mapping
+ *
+ * Can only be invoked from preemptible task context because on 32bit
+ * systems with CONFIG_HIGHMEM enabled this function might sleep.
+ *
+ * For systems with CONFIG_HIGHMEM=n and for pages in the low memory area
+ * this returns the virtual address of the direct kernel mapping.
+ *
+ * The returned virtual address is globally visible and valid up to the
+ * point where it is unmapped via kunmap(). The pointer can be handed to
+ * other contexts.
+ *
+ * For highmem pages on 32bit systems this can be slow as the mapping space
+ * is limited and protected by a global lock. In case that there is no
+ * mapping slot available the function blocks until a slot is released via
+ * kunmap().
+ */
+static inline void *kmap(struct page *page);
+
+/**
+ * kunmap - Unmap the virtual address mapped by kmap()
+ * @page: Pointer to the page which was mapped by kmap()
+ *
+ * Counterpart to kmap(). A NOOP for CONFIG_HIGHMEM=n and for mappings of
+ * pages in the low memory area.
+ */
+static inline void kunmap(struct page *page);
+
+/**
+ * kmap_to_page - Get the page for a kmap'ed address
+ * @addr: The address to look up
+ *
+ * Returns: The page which is mapped to @addr.
+ */
+static inline struct page *kmap_to_page(void *addr);
+
+/**
+ * kmap_flush_unused - Flush all unused kmap mappings in order to
+ * remove stray mappings
+ */
+static inline void kmap_flush_unused(void);
+
+/**
+ * kmap_local_page - Map a page for temporary usage
+ * @page: Pointer to the page to be mapped
+ *
+ * Returns: The virtual address of the mapping
+ *
+ * Can be invoked from any context, including interrupts.
+ *
+ * Requires careful handling when nesting multiple mappings because the map
+ * management is stack based. The unmap has to be in the reverse order of
+ * the map operation:
+ *
+ * addr1 = kmap_local_page(page1);
+ * addr2 = kmap_local_page(page2);
+ * ...
+ * kunmap_local(addr2);
+ * kunmap_local(addr1);
+ *
+ * Unmapping addr1 before addr2 is invalid and causes malfunction.
+ *
+ * Contrary to kmap() mappings the mapping is only valid in the context of
+ * the caller and cannot be handed to other contexts.
+ *
+ * On CONFIG_HIGHMEM=n kernels and for low memory pages this returns the
+ * virtual address of the direct mapping. Only real highmem pages are
+ * temporarily mapped.
+ *
+ * While kmap_local_page() is significantly faster than kmap() for the highmem
+ * case it comes with restrictions about the pointer validity.
+ *
+ * On HIGHMEM enabled systems mapping a highmem page has the side effect of
+ * disabling migration in order to keep the virtual address stable across
+ * preemption. No caller of kmap_local_page() can rely on this side effect.
+ */
+static inline void *kmap_local_page(struct page *page);
+
+/**
+ * kmap_local_folio - Map a page in this folio for temporary usage
+ * @folio: The folio containing the page.
+ * @offset: The byte offset within the folio which identifies the page.
+ *
+ * Requires careful handling when nesting multiple mappings because the map
+ * management is stack based. The unmap has to be in the reverse order of
+ * the map operation::
+ *
+ * addr1 = kmap_local_folio(folio1, offset1);
+ * addr2 = kmap_local_folio(folio2, offset2);
+ * ...
+ * kunmap_local(addr2);
+ * kunmap_local(addr1);
+ *
+ * Unmapping addr1 before addr2 is invalid and causes malfunction.
+ *
+ * Contrary to kmap() mappings the mapping is only valid in the context of
+ * the caller and cannot be handed to other contexts.
+ *
+ * On CONFIG_HIGHMEM=n kernels and for low memory pages this returns the
+ * virtual address of the direct mapping. Only real highmem pages are
+ * temporarily mapped.
+ *
+ * While it is significantly faster than kmap() for the highmem case it
+ * comes with restrictions about the pointer validity.
+ *
+ * On HIGHMEM enabled systems mapping a highmem page has the side effect of
+ * disabling migration in order to keep the virtual address stable across
+ * preemption. No caller of kmap_local_folio() can rely on this side effect.
+ *
+ * Context: Can be invoked from any context.
+ * Return: The virtual address of @offset.
+ */
+static inline void *kmap_local_folio(struct folio *folio, size_t offset);
+
+/**
+ * kmap_atomic - Atomically map a page for temporary usage - Deprecated!
+ * @page: Pointer to the page to be mapped
+ *
+ * Returns: The virtual address of the mapping
+ *
+ * In fact a wrapper around kmap_local_page() which also disables pagefaults
+ * and, depending on PREEMPT_RT configuration, also CPU migration and
+ * preemption. Therefore users should not count on the latter two side effects.
+ *
+ * Mappings should always be released by kunmap_atomic().
+ *
+ * Do not use in new code. Use kmap_local_page() instead.
+ *
+ * It is used in atomic context when code wants to access the contents of a
+ * page that might be allocated from high memory (see __GFP_HIGHMEM), for
+ * example a page in the pagecache. The API has two functions, and they
+ * can be used in a manner similar to the following::
+ *
+ * // Find the page of interest.
+ * struct page *page = find_get_page(mapping, offset);
+ *
+ * // Gain access to the contents of that page.
+ * void *vaddr = kmap_atomic(page);
+ *
+ * // Do something to the contents of that page.
+ * memset(vaddr, 0, PAGE_SIZE);
+ *
+ * // Unmap that page.
+ * kunmap_atomic(vaddr);
+ *
+ * Note that the kunmap_atomic() call takes the result of the kmap_atomic()
+ * call, not the argument.
+ *
+ * If you need to map two pages because you want to copy from one page to
+ * another you need to keep the kmap_atomic calls strictly nested, like:
+ *
+ * vaddr1 = kmap_atomic(page1);
+ * vaddr2 = kmap_atomic(page2);
+ *
+ * memcpy(vaddr1, vaddr2, PAGE_SIZE);
+ *
+ * kunmap_atomic(vaddr2);
+ * kunmap_atomic(vaddr1);
+ */
+static inline void *kmap_atomic(struct page *page);
+
+/* Highmem related interfaces for management code */
+static inline unsigned int nr_free_highpages(void);
+static inline unsigned long totalhigh_pages(void);
#ifndef ARCH_HAS_FLUSH_ANON_PAGE
static inline void flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr)
@@ -16,10 +188,7 @@ static inline void flush_anon_page(struct vm_area_struct *vma, struct page *page
}
#endif
-#ifndef ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE
-static inline void flush_kernel_dcache_page(struct page *page)
-{
-}
+#ifndef ARCH_IMPLEMENTS_FLUSH_KERNEL_VMAP_RANGE
static inline void flush_kernel_vmap_range(void *vaddr, int size)
{
}
@@ -28,223 +197,320 @@ static inline void invalidate_kernel_vmap_range(void *vaddr, int size)
}
#endif
-#include <asm/kmap_types.h>
-
-#ifdef CONFIG_HIGHMEM
-#include <asm/highmem.h>
-
-/* declarations for linux/mm/highmem.c */
-unsigned int nr_free_highpages(void);
-extern unsigned long totalhigh_pages;
-
-void kmap_flush_unused(void);
-
-struct page *kmap_to_page(void *addr);
-
-#else /* CONFIG_HIGHMEM */
-
-static inline unsigned int nr_free_highpages(void) { return 0; }
-
-static inline struct page *kmap_to_page(void *addr)
+/* when CONFIG_HIGHMEM is not set these will be plain clear/copy_page */
+#ifndef clear_user_highpage
+static inline void clear_user_highpage(struct page *page, unsigned long vaddr)
{
- return virt_to_page(addr);
+ void *addr = kmap_local_page(page);
+ clear_user_page(addr, vaddr, page);
+ kunmap_local(addr);
}
+#endif
-#define totalhigh_pages 0UL
-
-#ifndef ARCH_HAS_KMAP
-static inline void *kmap(struct page *page)
+#ifndef vma_alloc_zeroed_movable_folio
+/**
+ * vma_alloc_zeroed_movable_folio - Allocate a zeroed page for a VMA.
+ * @vma: The VMA the page is to be allocated for.
+ * @vaddr: The virtual address the page will be inserted into.
+ *
+ * This function will allocate a page suitable for inserting into this
+ * VMA at this virtual address. It may be allocated from highmem or
+ * the movable zone. An architecture may provide its own implementation.
+ *
+ * Return: A folio containing one allocated and zeroed page or NULL if
+ * we are out of memory.
+ */
+static inline
+struct folio *vma_alloc_zeroed_movable_folio(struct vm_area_struct *vma,
+ unsigned long vaddr)
{
- might_sleep();
- return page_address(page);
+ struct folio *folio;
+
+ folio = vma_alloc_folio(GFP_HIGHUSER_MOVABLE, 0, vma, vaddr, false);
+ if (folio)
+ clear_user_highpage(&folio->page, vaddr);
+
+ return folio;
}
+#endif
-static inline void kunmap(struct page *page)
+static inline void clear_highpage(struct page *page)
{
+ void *kaddr = kmap_local_page(page);
+ clear_page(kaddr);
+ kunmap_local(kaddr);
}
-static inline void *kmap_atomic(struct page *page)
+static inline void clear_highpage_kasan_tagged(struct page *page)
{
- preempt_disable();
- pagefault_disable();
- return page_address(page);
+ void *kaddr = kmap_local_page(page);
+
+ clear_page(kasan_reset_tag(kaddr));
+ kunmap_local(kaddr);
}
-#define kmap_atomic_prot(page, prot) kmap_atomic(page)
-static inline void __kunmap_atomic(void *addr)
+#ifndef __HAVE_ARCH_TAG_CLEAR_HIGHPAGE
+
+static inline void tag_clear_highpage(struct page *page)
{
- pagefault_enable();
- preempt_enable();
}
-#define kmap_atomic_pfn(pfn) kmap_atomic(pfn_to_page(pfn))
-
-#define kmap_flush_unused() do {} while(0)
#endif
-#endif /* CONFIG_HIGHMEM */
+/*
+ * If we pass in a base or tail page, we can zero up to PAGE_SIZE.
+ * If we pass in a head page, we can zero up to the size of the compound page.
+ */
+#ifdef CONFIG_HIGHMEM
+void zero_user_segments(struct page *page, unsigned start1, unsigned end1,
+ unsigned start2, unsigned end2);
+#else
+static inline void zero_user_segments(struct page *page,
+ unsigned start1, unsigned end1,
+ unsigned start2, unsigned end2)
+{
+ void *kaddr = kmap_local_page(page);
+ unsigned int i;
-#if defined(CONFIG_HIGHMEM) || defined(CONFIG_X86_32)
+ BUG_ON(end1 > page_size(page) || end2 > page_size(page));
-DECLARE_PER_CPU(int, __kmap_atomic_idx);
+ if (end1 > start1)
+ memset(kaddr + start1, 0, end1 - start1);
-static inline int kmap_atomic_idx_push(void)
-{
- int idx = __this_cpu_inc_return(__kmap_atomic_idx) - 1;
+ if (end2 > start2)
+ memset(kaddr + start2, 0, end2 - start2);
-#ifdef CONFIG_DEBUG_HIGHMEM
- WARN_ON_ONCE(in_irq() && !irqs_disabled());
- BUG_ON(idx >= KM_TYPE_NR);
+ kunmap_local(kaddr);
+ for (i = 0; i < compound_nr(page); i++)
+ flush_dcache_page(page + i);
+}
#endif
- return idx;
+
+static inline void zero_user_segment(struct page *page,
+ unsigned start, unsigned end)
+{
+ zero_user_segments(page, start, end, 0, 0);
}
-static inline int kmap_atomic_idx(void)
+static inline void zero_user(struct page *page,
+ unsigned start, unsigned size)
{
- return __this_cpu_read(__kmap_atomic_idx) - 1;
+ zero_user_segments(page, start, start + size, 0, 0);
}
-static inline void kmap_atomic_idx_pop(void)
+#ifndef __HAVE_ARCH_COPY_USER_HIGHPAGE
+
+static inline void copy_user_highpage(struct page *to, struct page *from,
+ unsigned long vaddr, struct vm_area_struct *vma)
{
-#ifdef CONFIG_DEBUG_HIGHMEM
- int idx = __this_cpu_dec_return(__kmap_atomic_idx);
+ char *vfrom, *vto;
+
+ vfrom = kmap_local_page(from);
+ vto = kmap_local_page(to);
+ copy_user_page(vto, vfrom, vaddr, to);
+ kmsan_unpoison_memory(page_address(to), PAGE_SIZE);
+ kunmap_local(vto);
+ kunmap_local(vfrom);
+}
- BUG_ON(idx < 0);
-#else
- __this_cpu_dec(__kmap_atomic_idx);
#endif
+
+#ifndef __HAVE_ARCH_COPY_HIGHPAGE
+
+static inline void copy_highpage(struct page *to, struct page *from)
+{
+ char *vfrom, *vto;
+
+ vfrom = kmap_local_page(from);
+ vto = kmap_local_page(to);
+ copy_page(vto, vfrom);
+ kmsan_copy_page_meta(to, from);
+ kunmap_local(vto);
+ kunmap_local(vfrom);
}
#endif
+#ifdef copy_mc_to_kernel
/*
- * Prevent people trying to call kunmap_atomic() as if it were kunmap()
- * kunmap_atomic() should get the return value of kmap_atomic, not the page.
+ * If architecture supports machine check exception handling, define the
+ * #MC versions of copy_user_highpage and copy_highpage. They copy a memory
+ * page with #MC in source page (@from) handled, and return the number
+ * of bytes not copied if there was a #MC, otherwise 0 for success.
*/
-#define kunmap_atomic(addr) \
-do { \
- BUILD_BUG_ON(__same_type((addr), struct page *)); \
- __kunmap_atomic(addr); \
-} while (0)
+static inline int copy_mc_user_highpage(struct page *to, struct page *from,
+ unsigned long vaddr, struct vm_area_struct *vma)
+{
+ unsigned long ret;
+ char *vfrom, *vto;
+ vfrom = kmap_local_page(from);
+ vto = kmap_local_page(to);
+ ret = copy_mc_to_kernel(vto, vfrom, PAGE_SIZE);
+ if (!ret)
+ kmsan_unpoison_memory(page_address(to), PAGE_SIZE);
+ kunmap_local(vto);
+ kunmap_local(vfrom);
-/* when CONFIG_HIGHMEM is not set these will be plain clear/copy_page */
-#ifndef clear_user_highpage
-static inline void clear_user_highpage(struct page *page, unsigned long vaddr)
-{
- void *addr = kmap_atomic(page);
- clear_user_page(addr, vaddr, page);
- kunmap_atomic(addr);
+ return ret;
}
-#endif
-#ifndef __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE
-/**
- * __alloc_zeroed_user_highpage - Allocate a zeroed HIGHMEM page for a VMA with caller-specified movable GFP flags
- * @movableflags: The GFP flags related to the pages future ability to move like __GFP_MOVABLE
- * @vma: The VMA the page is to be allocated for
- * @vaddr: The virtual address the page will be inserted into
- *
- * This function will allocate a page for a VMA but the caller is expected
- * to specify via movableflags whether the page will be movable in the
- * future or not
- *
- * An architecture may override this function by defining
- * __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE and providing their own
- * implementation.
- */
-static inline struct page *
-__alloc_zeroed_user_highpage(gfp_t movableflags,
- struct vm_area_struct *vma,
- unsigned long vaddr)
+static inline int copy_mc_highpage(struct page *to, struct page *from)
{
- struct page *page = alloc_page_vma(GFP_HIGHUSER | movableflags,
- vma, vaddr);
+ unsigned long ret;
+ char *vfrom, *vto;
- if (page)
- clear_user_highpage(page, vaddr);
+ vfrom = kmap_local_page(from);
+ vto = kmap_local_page(to);
+ ret = copy_mc_to_kernel(vto, vfrom, PAGE_SIZE);
+ if (!ret)
+ kmsan_copy_page_meta(to, from);
+ kunmap_local(vto);
+ kunmap_local(vfrom);
- return page;
+ return ret;
+}
+#else
+static inline int copy_mc_user_highpage(struct page *to, struct page *from,
+ unsigned long vaddr, struct vm_area_struct *vma)
+{
+ copy_user_highpage(to, from, vaddr, vma);
+ return 0;
}
-#endif
-/**
- * alloc_zeroed_user_highpage_movable - Allocate a zeroed HIGHMEM page for a VMA that the caller knows can move
- * @vma: The VMA the page is to be allocated for
- * @vaddr: The virtual address the page will be inserted into
- *
- * This function will allocate a page for a VMA that the caller knows will
- * be able to migrate in the future using move_pages() or reclaimed
- */
-static inline struct page *
-alloc_zeroed_user_highpage_movable(struct vm_area_struct *vma,
- unsigned long vaddr)
+static inline int copy_mc_highpage(struct page *to, struct page *from)
{
- return __alloc_zeroed_user_highpage(__GFP_MOVABLE, vma, vaddr);
+ copy_highpage(to, from);
+ return 0;
}
+#endif
-static inline void clear_highpage(struct page *page)
+static inline void memcpy_page(struct page *dst_page, size_t dst_off,
+ struct page *src_page, size_t src_off,
+ size_t len)
{
- void *kaddr = kmap_atomic(page);
- clear_page(kaddr);
- kunmap_atomic(kaddr);
+ char *dst = kmap_local_page(dst_page);
+ char *src = kmap_local_page(src_page);
+
+ VM_BUG_ON(dst_off + len > PAGE_SIZE || src_off + len > PAGE_SIZE);
+ memcpy(dst + dst_off, src + src_off, len);
+ kunmap_local(src);
+ kunmap_local(dst);
}
-static inline void zero_user_segments(struct page *page,
- unsigned start1, unsigned end1,
- unsigned start2, unsigned end2)
+static inline void memset_page(struct page *page, size_t offset, int val,
+ size_t len)
{
- void *kaddr = kmap_atomic(page);
+ char *addr = kmap_local_page(page);
- BUG_ON(end1 > PAGE_SIZE || end2 > PAGE_SIZE);
+ VM_BUG_ON(offset + len > PAGE_SIZE);
+ memset(addr + offset, val, len);
+ kunmap_local(addr);
+}
- if (end1 > start1)
- memset(kaddr + start1, 0, end1 - start1);
+static inline void memcpy_from_page(char *to, struct page *page,
+ size_t offset, size_t len)
+{
+ char *from = kmap_local_page(page);
- if (end2 > start2)
- memset(kaddr + start2, 0, end2 - start2);
+ VM_BUG_ON(offset + len > PAGE_SIZE);
+ memcpy(to, from + offset, len);
+ kunmap_local(from);
+}
+
+static inline void memcpy_to_page(struct page *page, size_t offset,
+ const char *from, size_t len)
+{
+ char *to = kmap_local_page(page);
- kunmap_atomic(kaddr);
+ VM_BUG_ON(offset + len > PAGE_SIZE);
+ memcpy(to + offset, from, len);
flush_dcache_page(page);
+ kunmap_local(to);
}
-static inline void zero_user_segment(struct page *page,
- unsigned start, unsigned end)
+static inline void memzero_page(struct page *page, size_t offset, size_t len)
{
- zero_user_segments(page, start, end, 0, 0);
+ char *addr = kmap_local_page(page);
+
+ VM_BUG_ON(offset + len > PAGE_SIZE);
+ memset(addr + offset, 0, len);
+ flush_dcache_page(page);
+ kunmap_local(addr);
}
-static inline void zero_user(struct page *page,
- unsigned start, unsigned size)
+/**
+ * memcpy_from_file_folio - Copy some bytes from a file folio.
+ * @to: The destination buffer.
+ * @folio: The folio to copy from.
+ * @pos: The position in the file.
+ * @len: The maximum number of bytes to copy.
+ *
+ * Copy up to @len bytes from this folio. This may be limited by PAGE_SIZE
+ * if the folio comes from HIGHMEM, and by the size of the folio.
+ *
+ * Return: The number of bytes copied from the folio.
+ */
+static inline size_t memcpy_from_file_folio(char *to, struct folio *folio,
+ loff_t pos, size_t len)
{
- zero_user_segments(page, start, start + size, 0, 0);
-}
+ size_t offset = offset_in_folio(folio, pos);
+ char *from = kmap_local_folio(folio, offset);
-#ifndef __HAVE_ARCH_COPY_USER_HIGHPAGE
+ if (folio_test_highmem(folio)) {
+ offset = offset_in_page(offset);
+ len = min_t(size_t, len, PAGE_SIZE - offset);
+ } else
+ len = min(len, folio_size(folio) - offset);
-static inline void copy_user_highpage(struct page *to, struct page *from,
- unsigned long vaddr, struct vm_area_struct *vma)
-{
- char *vfrom, *vto;
+ memcpy(to, from, len);
+ kunmap_local(from);
- vfrom = kmap_atomic(from);
- vto = kmap_atomic(to);
- copy_user_page(vto, vfrom, vaddr, to);
- kunmap_atomic(vto);
- kunmap_atomic(vfrom);
+ return len;
}
-#endif
+/**
+ * folio_zero_segments() - Zero two byte ranges in a folio.
+ * @folio: The folio to write to.
+ * @start1: The first byte to zero.
+ * @xend1: One more than the last byte in the first range.
+ * @start2: The first byte to zero in the second range.
+ * @xend2: One more than the last byte in the second range.
+ */
+static inline void folio_zero_segments(struct folio *folio,
+ size_t start1, size_t xend1, size_t start2, size_t xend2)
+{
+ zero_user_segments(&folio->page, start1, xend1, start2, xend2);
+}
-static inline void copy_highpage(struct page *to, struct page *from)
+/**
+ * folio_zero_segment() - Zero a byte range in a folio.
+ * @folio: The folio to write to.
+ * @start: The first byte to zero.
+ * @xend: One more than the last byte to zero.
+ */
+static inline void folio_zero_segment(struct folio *folio,
+ size_t start, size_t xend)
{
- char *vfrom, *vto;
+ zero_user_segments(&folio->page, start, xend, 0, 0);
+}
- vfrom = kmap_atomic(from);
- vto = kmap_atomic(to);
- copy_page(vto, vfrom);
- kunmap_atomic(vto);
- kunmap_atomic(vfrom);
+/**
+ * folio_zero_range() - Zero a byte range in a folio.
+ * @folio: The folio to write to.
+ * @start: The first byte to zero.
+ * @length: The number of bytes to zero.
+ */
+static inline void folio_zero_range(struct folio *folio,
+ size_t start, size_t length)
+{
+ zero_user_segments(&folio->page, start, start + length, 0, 0);
+}
+
+static inline void put_and_unmap_page(struct page *page, void *addr)
+{
+ kunmap_local(addr);
+ put_page(page);
}
#endif /* _LINUX_HIGHMEM_H */
diff --git a/include/linux/highuid.h b/include/linux/highuid.h
index 434e56246f67..50d383fd674d 100644
--- a/include/linux/highuid.h
+++ b/include/linux/highuid.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_HIGHUID_H
#define _LINUX_HIGHUID_H
diff --git a/include/linux/hil_mlc.h b/include/linux/hil_mlc.h
index 394a8405dd74..369221fd5518 100644
--- a/include/linux/hil_mlc.h
+++ b/include/linux/hil_mlc.h
@@ -103,7 +103,7 @@ struct hilse_node {
/* Methods for back-end drivers, e.g. hp_sdc_mlc */
typedef int (hil_mlc_cts) (hil_mlc *mlc);
-typedef void (hil_mlc_out) (hil_mlc *mlc);
+typedef int (hil_mlc_out) (hil_mlc *mlc);
typedef int (hil_mlc_in) (hil_mlc *mlc, suseconds_t timeout);
struct hil_mlc_devinfo {
@@ -144,12 +144,12 @@ struct hil_mlc {
hil_packet ipacket[16];
hil_packet imatch;
int icount;
- struct timeval instart;
- suseconds_t intimeout;
+ unsigned long instart;
+ unsigned long intimeout;
int ddi; /* Last operational device id */
int lcv; /* LCV to throttle loops */
- struct timeval lcv_tv; /* Time loop was started */
+ time64_t lcv_time; /* Time loop was started */
int di_map[7]; /* Maps below items to live devs */
struct hil_mlc_devinfo di[HIL_MLC_DEVMEM];
diff --git a/include/linux/hippidevice.h b/include/linux/hippidevice.h
index 402f99e328d4..07414c241e65 100644
--- a/include/linux/hippidevice.h
+++ b/include/linux/hippidevice.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* INET An implementation of the TCP/IP protocol suite for the LINUX
* operating system. INET is implemented using the BSD Socket
@@ -14,11 +15,6 @@
* Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
* Alan Cox, <gw4pts@gw4pts.ampr.org>
* Lawrence V. Stefani, <stefani@lkg.dec.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
*/
#ifndef _LINUX_HIPPIDEVICE_H
#define _LINUX_HIPPIDEVICE_H
@@ -27,6 +23,10 @@
#ifdef __KERNEL__
+struct neigh_parms;
+struct net_device;
+struct sk_buff;
+
struct hippi_cb {
__u32 ifield;
};
diff --git a/include/linux/hisi_acc_qm.h b/include/linux/hisi_acc_qm.h
new file mode 100644
index 000000000000..a7d54d4d41fd
--- /dev/null
+++ b/include/linux/hisi_acc_qm.h
@@ -0,0 +1,536 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2019 HiSilicon Limited. */
+#ifndef HISI_ACC_QM_H
+#define HISI_ACC_QM_H
+
+#include <linux/bitfield.h>
+#include <linux/debugfs.h>
+#include <linux/iopoll.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+
+#define QM_QNUM_V1 4096
+#define QM_QNUM_V2 1024
+#define QM_MAX_VFS_NUM_V2 63
+
+/* qm user domain */
+#define QM_ARUSER_M_CFG_1 0x100088
+#define AXUSER_SNOOP_ENABLE BIT(30)
+#define AXUSER_CMD_TYPE GENMASK(14, 12)
+#define AXUSER_CMD_SMMU_NORMAL 1
+#define AXUSER_NS BIT(6)
+#define AXUSER_NO BIT(5)
+#define AXUSER_FP BIT(4)
+#define AXUSER_SSV BIT(0)
+#define AXUSER_BASE (AXUSER_SNOOP_ENABLE | \
+ FIELD_PREP(AXUSER_CMD_TYPE, \
+ AXUSER_CMD_SMMU_NORMAL) | \
+ AXUSER_NS | AXUSER_NO | AXUSER_FP)
+#define QM_ARUSER_M_CFG_ENABLE 0x100090
+#define ARUSER_M_CFG_ENABLE 0xfffffffe
+#define QM_AWUSER_M_CFG_1 0x100098
+#define QM_AWUSER_M_CFG_ENABLE 0x1000a0
+#define AWUSER_M_CFG_ENABLE 0xfffffffe
+#define QM_WUSER_M_CFG_ENABLE 0x1000a8
+#define WUSER_M_CFG_ENABLE 0xffffffff
+
+/* mailbox */
+#define QM_MB_CMD_SQC 0x0
+#define QM_MB_CMD_CQC 0x1
+#define QM_MB_CMD_EQC 0x2
+#define QM_MB_CMD_AEQC 0x3
+#define QM_MB_CMD_SQC_BT 0x4
+#define QM_MB_CMD_CQC_BT 0x5
+#define QM_MB_CMD_SQC_VFT_V2 0x6
+#define QM_MB_CMD_STOP_QP 0x8
+#define QM_MB_CMD_SRC 0xc
+#define QM_MB_CMD_DST 0xd
+
+#define QM_MB_CMD_SEND_BASE 0x300
+#define QM_MB_EVENT_SHIFT 8
+#define QM_MB_BUSY_SHIFT 13
+#define QM_MB_OP_SHIFT 14
+#define QM_MB_CMD_DATA_ADDR_L 0x304
+#define QM_MB_CMD_DATA_ADDR_H 0x308
+#define QM_MB_MAX_WAIT_CNT 6000
+
+/* doorbell */
+#define QM_DOORBELL_CMD_SQ 0
+#define QM_DOORBELL_CMD_CQ 1
+#define QM_DOORBELL_CMD_EQ 2
+#define QM_DOORBELL_CMD_AEQ 3
+
+#define QM_DOORBELL_SQ_CQ_BASE_V2 0x1000
+#define QM_DOORBELL_EQ_AEQ_BASE_V2 0x2000
+#define QM_QP_MAX_NUM_SHIFT 11
+#define QM_DB_CMD_SHIFT_V2 12
+#define QM_DB_RAND_SHIFT_V2 16
+#define QM_DB_INDEX_SHIFT_V2 32
+#define QM_DB_PRIORITY_SHIFT_V2 48
+#define QM_VF_STATE 0x60
+
+/* qm cache */
+#define QM_CACHE_CTL 0x100050
+#define SQC_CACHE_ENABLE BIT(0)
+#define CQC_CACHE_ENABLE BIT(1)
+#define SQC_CACHE_WB_ENABLE BIT(4)
+#define SQC_CACHE_WB_THRD GENMASK(10, 5)
+#define CQC_CACHE_WB_ENABLE BIT(11)
+#define CQC_CACHE_WB_THRD GENMASK(17, 12)
+#define QM_AXI_M_CFG 0x1000ac
+#define AXI_M_CFG 0xffff
+#define QM_AXI_M_CFG_ENABLE 0x1000b0
+#define AM_CFG_SINGLE_PORT_MAX_TRANS 0x300014
+#define AXI_M_CFG_ENABLE 0xffffffff
+#define QM_PEH_AXUSER_CFG 0x1000cc
+#define QM_PEH_AXUSER_CFG_ENABLE 0x1000d0
+#define PEH_AXUSER_CFG 0x401001
+#define PEH_AXUSER_CFG_ENABLE 0xffffffff
+
+#define QM_MIN_QNUM 2
+#define HISI_ACC_SGL_SGE_NR_MAX 255
+#define QM_SHAPER_CFG 0x100164
+#define QM_SHAPER_ENABLE BIT(30)
+#define QM_SHAPER_TYPE1_OFFSET 10
+
+/* page number for queue file region */
+#define QM_DOORBELL_PAGE_NR 1
+
+/* uacce mode of the driver */
+#define UACCE_MODE_NOUACCE 0 /* don't use uacce */
+#define UACCE_MODE_SVA 1 /* use uacce sva mode */
+#define UACCE_MODE_DESC "0(default) means only register to crypto, 1 means both register to crypto and uacce"
+
+enum qm_stop_reason {
+ QM_NORMAL,
+ QM_SOFT_RESET,
+ QM_FLR,
+};
+
+enum qm_state {
+ QM_INIT = 0,
+ QM_START,
+ QM_CLOSE,
+ QM_STOP,
+};
+
+enum qp_state {
+ QP_INIT = 1,
+ QP_START,
+ QP_STOP,
+ QP_CLOSE,
+};
+
+enum qm_hw_ver {
+ QM_HW_V1 = 0x20,
+ QM_HW_V2 = 0x21,
+ QM_HW_V3 = 0x30,
+};
+
+enum qm_fun_type {
+ QM_HW_PF,
+ QM_HW_VF,
+};
+
+enum qm_debug_file {
+ CURRENT_QM,
+ CURRENT_Q,
+ CLEAR_ENABLE,
+ DEBUG_FILE_NUM,
+};
+
+enum qm_vf_state {
+ QM_READY = 0,
+ QM_NOT_READY,
+};
+
+enum qm_cap_bits {
+ QM_SUPPORT_DB_ISOLATION = 0x0,
+ QM_SUPPORT_FUNC_QOS,
+ QM_SUPPORT_STOP_QP,
+ QM_SUPPORT_MB_COMMAND,
+ QM_SUPPORT_SVA_PREFETCH,
+ QM_SUPPORT_RPM,
+};
+
+struct dfx_diff_registers {
+ u32 *regs;
+ u32 reg_offset;
+ u32 reg_len;
+};
+
+struct qm_dfx {
+ atomic64_t err_irq_cnt;
+ atomic64_t aeq_irq_cnt;
+ atomic64_t abnormal_irq_cnt;
+ atomic64_t create_qp_err_cnt;
+ atomic64_t mb_err_cnt;
+};
+
+struct debugfs_file {
+ enum qm_debug_file index;
+ struct mutex lock;
+ struct qm_debug *debug;
+};
+
+struct qm_debug {
+ u32 curr_qm_qp_num;
+ u32 sqe_mask_offset;
+ u32 sqe_mask_len;
+ struct qm_dfx dfx;
+ struct dentry *debug_root;
+ struct dentry *qm_d;
+ struct debugfs_file files[DEBUG_FILE_NUM];
+ unsigned int *qm_last_words;
+ /* ACC engines recoreding last regs */
+ unsigned int *last_words;
+ struct dfx_diff_registers *qm_diff_regs;
+ struct dfx_diff_registers *acc_diff_regs;
+};
+
+struct qm_shaper_factor {
+ u32 func_qos;
+ u64 cir_b;
+ u64 cir_u;
+ u64 cir_s;
+ u64 cbs_s;
+};
+
+struct qm_dma {
+ void *va;
+ dma_addr_t dma;
+ size_t size;
+};
+
+struct hisi_qm_status {
+ u32 eq_head;
+ bool eqc_phase;
+ u32 aeq_head;
+ bool aeqc_phase;
+ atomic_t flags;
+ int stop_reason;
+};
+
+struct hisi_qm;
+
+struct hisi_qm_err_info {
+ char *acpi_rst;
+ u32 msi_wr_port;
+ u32 ecc_2bits_mask;
+ u32 qm_shutdown_mask;
+ u32 dev_shutdown_mask;
+ u32 qm_reset_mask;
+ u32 dev_reset_mask;
+ u32 ce;
+ u32 nfe;
+ u32 fe;
+};
+
+struct hisi_qm_err_status {
+ u32 is_qm_ecc_mbit;
+ u32 is_dev_ecc_mbit;
+};
+
+struct hisi_qm_err_ini {
+ int (*hw_init)(struct hisi_qm *qm);
+ void (*hw_err_enable)(struct hisi_qm *qm);
+ void (*hw_err_disable)(struct hisi_qm *qm);
+ u32 (*get_dev_hw_err_status)(struct hisi_qm *qm);
+ void (*clear_dev_hw_err_status)(struct hisi_qm *qm, u32 err_sts);
+ void (*open_axi_master_ooo)(struct hisi_qm *qm);
+ void (*close_axi_master_ooo)(struct hisi_qm *qm);
+ void (*open_sva_prefetch)(struct hisi_qm *qm);
+ void (*close_sva_prefetch)(struct hisi_qm *qm);
+ void (*log_dev_hw_err)(struct hisi_qm *qm, u32 err_sts);
+ void (*show_last_dfx_regs)(struct hisi_qm *qm);
+ void (*err_info_init)(struct hisi_qm *qm);
+};
+
+struct hisi_qm_cap_info {
+ u32 type;
+ /* Register offset */
+ u32 offset;
+ /* Bit offset in register */
+ u32 shift;
+ u32 mask;
+ u32 v1_val;
+ u32 v2_val;
+ u32 v3_val;
+};
+
+struct hisi_qm_list {
+ struct mutex lock;
+ struct list_head list;
+ int (*register_to_crypto)(struct hisi_qm *qm);
+ void (*unregister_from_crypto)(struct hisi_qm *qm);
+};
+
+struct hisi_qm_poll_data {
+ struct hisi_qm *qm;
+ struct work_struct work;
+ u16 *qp_finish_id;
+};
+
+/**
+ * struct qm_err_isolate
+ * @isolate_lock: protects device error log
+ * @err_threshold: user config error threshold which triggers isolation
+ * @is_isolate: device isolation state
+ * @uacce_hw_errs: index into qm device error list
+ */
+struct qm_err_isolate {
+ struct mutex isolate_lock;
+ u32 err_threshold;
+ bool is_isolate;
+ struct list_head qm_hw_errs;
+};
+
+struct hisi_qm {
+ enum qm_hw_ver ver;
+ enum qm_fun_type fun_type;
+ const char *dev_name;
+ struct pci_dev *pdev;
+ void __iomem *io_base;
+ void __iomem *db_io_base;
+
+ /* Capbility version, 0: not supports */
+ u32 cap_ver;
+ u32 sqe_size;
+ u32 qp_base;
+ u32 qp_num;
+ u32 qp_in_used;
+ u32 ctrl_qp_num;
+ u32 max_qp_num;
+ u32 vfs_num;
+ u32 db_interval;
+ u16 eq_depth;
+ u16 aeq_depth;
+ struct list_head list;
+ struct hisi_qm_list *qm_list;
+
+ struct qm_dma qdma;
+ struct qm_sqc *sqc;
+ struct qm_cqc *cqc;
+ struct qm_eqe *eqe;
+ struct qm_aeqe *aeqe;
+ dma_addr_t sqc_dma;
+ dma_addr_t cqc_dma;
+ dma_addr_t eqe_dma;
+ dma_addr_t aeqe_dma;
+
+ struct hisi_qm_status status;
+ const struct hisi_qm_err_ini *err_ini;
+ struct hisi_qm_err_info err_info;
+ struct hisi_qm_err_status err_status;
+ /* driver removing and reset sched */
+ unsigned long misc_ctl;
+ /* Device capability bit */
+ unsigned long caps;
+
+ struct rw_semaphore qps_lock;
+ struct idr qp_idr;
+ struct hisi_qp *qp_array;
+ struct hisi_qm_poll_data *poll_data;
+
+ struct mutex mailbox_lock;
+
+ const struct hisi_qm_hw_ops *ops;
+
+ struct qm_debug debug;
+
+ u32 error_mask;
+
+ struct workqueue_struct *wq;
+ struct work_struct rst_work;
+ struct work_struct cmd_process;
+
+ const char *algs;
+ bool use_sva;
+
+ resource_size_t phys_base;
+ resource_size_t db_phys_base;
+ struct uacce_device *uacce;
+ int mode;
+ struct qm_shaper_factor *factor;
+ u32 mb_qos;
+ u32 type_rate;
+ struct qm_err_isolate isolate_data;
+};
+
+struct hisi_qp_status {
+ atomic_t used;
+ u16 sq_tail;
+ u16 cq_head;
+ bool cqc_phase;
+ atomic_t flags;
+};
+
+struct hisi_qp_ops {
+ int (*fill_sqe)(void *sqe, void *q_parm, void *d_parm);
+};
+
+struct hisi_qp {
+ u32 qp_id;
+ u16 sq_depth;
+ u16 cq_depth;
+ u8 alg_type;
+ u8 req_type;
+
+ struct qm_dma qdma;
+ void *sqe;
+ struct qm_cqe *cqe;
+ dma_addr_t sqe_dma;
+ dma_addr_t cqe_dma;
+
+ struct hisi_qp_status qp_status;
+ struct hisi_qp_ops *hw_ops;
+ void *qp_ctx;
+ void (*req_cb)(struct hisi_qp *qp, void *data);
+ void (*event_cb)(struct hisi_qp *qp);
+
+ struct hisi_qm *qm;
+ bool is_resetting;
+ bool is_in_kernel;
+ u16 pasid;
+ struct uacce_queue *uacce_q;
+};
+
+static inline int q_num_set(const char *val, const struct kernel_param *kp,
+ unsigned int device)
+{
+ struct pci_dev *pdev;
+ u32 n, q_num;
+ int ret;
+
+ if (!val)
+ return -EINVAL;
+
+ pdev = pci_get_device(PCI_VENDOR_ID_HUAWEI, device, NULL);
+ if (!pdev) {
+ q_num = min_t(u32, QM_QNUM_V1, QM_QNUM_V2);
+ pr_info("No device found currently, suppose queue number is %u\n",
+ q_num);
+ } else {
+ if (pdev->revision == QM_HW_V1)
+ q_num = QM_QNUM_V1;
+ else
+ q_num = QM_QNUM_V2;
+
+ pci_dev_put(pdev);
+ }
+
+ ret = kstrtou32(val, 10, &n);
+ if (ret || n < QM_MIN_QNUM || n > q_num)
+ return -EINVAL;
+
+ return param_set_int(val, kp);
+}
+
+static inline int vfs_num_set(const char *val, const struct kernel_param *kp)
+{
+ u32 n;
+ int ret;
+
+ if (!val)
+ return -EINVAL;
+
+ ret = kstrtou32(val, 10, &n);
+ if (ret < 0)
+ return ret;
+
+ if (n > QM_MAX_VFS_NUM_V2)
+ return -EINVAL;
+
+ return param_set_int(val, kp);
+}
+
+static inline int mode_set(const char *val, const struct kernel_param *kp)
+{
+ u32 n;
+ int ret;
+
+ if (!val)
+ return -EINVAL;
+
+ ret = kstrtou32(val, 10, &n);
+ if (ret != 0 || (n != UACCE_MODE_SVA &&
+ n != UACCE_MODE_NOUACCE))
+ return -EINVAL;
+
+ return param_set_int(val, kp);
+}
+
+static inline int uacce_mode_set(const char *val, const struct kernel_param *kp)
+{
+ return mode_set(val, kp);
+}
+
+static inline void hisi_qm_init_list(struct hisi_qm_list *qm_list)
+{
+ INIT_LIST_HEAD(&qm_list->list);
+ mutex_init(&qm_list->lock);
+}
+
+int hisi_qm_init(struct hisi_qm *qm);
+void hisi_qm_uninit(struct hisi_qm *qm);
+int hisi_qm_start(struct hisi_qm *qm);
+int hisi_qm_stop(struct hisi_qm *qm, enum qm_stop_reason r);
+int hisi_qm_start_qp(struct hisi_qp *qp, unsigned long arg);
+int hisi_qm_stop_qp(struct hisi_qp *qp);
+int hisi_qp_send(struct hisi_qp *qp, const void *msg);
+void hisi_qm_debug_init(struct hisi_qm *qm);
+void hisi_qm_debug_regs_clear(struct hisi_qm *qm);
+int hisi_qm_sriov_enable(struct pci_dev *pdev, int max_vfs);
+int hisi_qm_sriov_disable(struct pci_dev *pdev, bool is_frozen);
+int hisi_qm_sriov_configure(struct pci_dev *pdev, int num_vfs);
+void hisi_qm_dev_err_init(struct hisi_qm *qm);
+void hisi_qm_dev_err_uninit(struct hisi_qm *qm);
+int hisi_qm_regs_debugfs_init(struct hisi_qm *qm,
+ struct dfx_diff_registers *dregs, u32 reg_len);
+void hisi_qm_regs_debugfs_uninit(struct hisi_qm *qm, u32 reg_len);
+void hisi_qm_acc_diff_regs_dump(struct hisi_qm *qm, struct seq_file *s,
+ struct dfx_diff_registers *dregs, u32 regs_len);
+
+pci_ers_result_t hisi_qm_dev_err_detected(struct pci_dev *pdev,
+ pci_channel_state_t state);
+pci_ers_result_t hisi_qm_dev_slot_reset(struct pci_dev *pdev);
+void hisi_qm_reset_prepare(struct pci_dev *pdev);
+void hisi_qm_reset_done(struct pci_dev *pdev);
+
+int hisi_qm_wait_mb_ready(struct hisi_qm *qm);
+int hisi_qm_mb(struct hisi_qm *qm, u8 cmd, dma_addr_t dma_addr, u16 queue,
+ bool op);
+
+struct hisi_acc_sgl_pool;
+struct hisi_acc_hw_sgl *hisi_acc_sg_buf_map_to_hw_sgl(struct device *dev,
+ struct scatterlist *sgl, struct hisi_acc_sgl_pool *pool,
+ u32 index, dma_addr_t *hw_sgl_dma);
+void hisi_acc_sg_buf_unmap(struct device *dev, struct scatterlist *sgl,
+ struct hisi_acc_hw_sgl *hw_sgl);
+struct hisi_acc_sgl_pool *hisi_acc_create_sgl_pool(struct device *dev,
+ u32 count, u32 sge_nr);
+void hisi_acc_free_sgl_pool(struct device *dev,
+ struct hisi_acc_sgl_pool *pool);
+int hisi_qm_alloc_qps_node(struct hisi_qm_list *qm_list, int qp_num,
+ u8 alg_type, int node, struct hisi_qp **qps);
+void hisi_qm_free_qps(struct hisi_qp **qps, int qp_num);
+void hisi_qm_dev_shutdown(struct pci_dev *pdev);
+void hisi_qm_wait_task_finish(struct hisi_qm *qm, struct hisi_qm_list *qm_list);
+int hisi_qm_alg_register(struct hisi_qm *qm, struct hisi_qm_list *qm_list);
+void hisi_qm_alg_unregister(struct hisi_qm *qm, struct hisi_qm_list *qm_list);
+int hisi_qm_resume(struct device *dev);
+int hisi_qm_suspend(struct device *dev);
+void hisi_qm_pm_uninit(struct hisi_qm *qm);
+void hisi_qm_pm_init(struct hisi_qm *qm);
+int hisi_qm_get_dfx_access(struct hisi_qm *qm);
+void hisi_qm_put_dfx_access(struct hisi_qm *qm);
+void hisi_qm_regs_dump(struct seq_file *s, struct debugfs_regset32 *regset);
+u32 hisi_qm_get_hw_info(struct hisi_qm *qm,
+ const struct hisi_qm_cap_info *info_table,
+ u32 index, bool is_read);
+
+/* Used by VFIO ACC live migration driver */
+struct pci_driver *hisi_sec_get_pf_driver(void);
+struct pci_driver *hisi_hpre_get_pf_driver(void);
+struct pci_driver *hisi_zip_get_pf_driver(void);
+#endif
diff --git a/include/linux/hmm.h b/include/linux/hmm.h
new file mode 100644
index 000000000000..126a36571667
--- /dev/null
+++ b/include/linux/hmm.h
@@ -0,0 +1,116 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Authors: Jérôme Glisse <jglisse@redhat.com>
+ *
+ * See Documentation/mm/hmm.rst for reasons and overview of what HMM is.
+ */
+#ifndef LINUX_HMM_H
+#define LINUX_HMM_H
+
+#include <linux/mm.h>
+
+struct mmu_interval_notifier;
+
+/*
+ * On output:
+ * 0 - The page is faultable and a future call with
+ * HMM_PFN_REQ_FAULT could succeed.
+ * HMM_PFN_VALID - the pfn field points to a valid PFN. This PFN is at
+ * least readable. If dev_private_owner is !NULL then this could
+ * point at a DEVICE_PRIVATE page.
+ * HMM_PFN_WRITE - if the page memory can be written to (requires HMM_PFN_VALID)
+ * HMM_PFN_ERROR - accessing the pfn is impossible and the device should
+ * fail. ie poisoned memory, special pages, no vma, etc
+ *
+ * On input:
+ * 0 - Return the current state of the page, do not fault it.
+ * HMM_PFN_REQ_FAULT - The output must have HMM_PFN_VALID or hmm_range_fault()
+ * will fail
+ * HMM_PFN_REQ_WRITE - The output must have HMM_PFN_WRITE or hmm_range_fault()
+ * will fail. Must be combined with HMM_PFN_REQ_FAULT.
+ */
+enum hmm_pfn_flags {
+ /* Output fields and flags */
+ HMM_PFN_VALID = 1UL << (BITS_PER_LONG - 1),
+ HMM_PFN_WRITE = 1UL << (BITS_PER_LONG - 2),
+ HMM_PFN_ERROR = 1UL << (BITS_PER_LONG - 3),
+ HMM_PFN_ORDER_SHIFT = (BITS_PER_LONG - 8),
+
+ /* Input flags */
+ HMM_PFN_REQ_FAULT = HMM_PFN_VALID,
+ HMM_PFN_REQ_WRITE = HMM_PFN_WRITE,
+
+ HMM_PFN_FLAGS = 0xFFUL << HMM_PFN_ORDER_SHIFT,
+};
+
+/*
+ * hmm_pfn_to_page() - return struct page pointed to by a device entry
+ *
+ * This must be called under the caller 'user_lock' after a successful
+ * mmu_interval_read_begin(). The caller must have tested for HMM_PFN_VALID
+ * already.
+ */
+static inline struct page *hmm_pfn_to_page(unsigned long hmm_pfn)
+{
+ return pfn_to_page(hmm_pfn & ~HMM_PFN_FLAGS);
+}
+
+/*
+ * hmm_pfn_to_map_order() - return the CPU mapping size order
+ *
+ * This is optionally useful to optimize processing of the pfn result
+ * array. It indicates that the page starts at the order aligned VA and is
+ * 1<<order bytes long. Every pfn within an high order page will have the
+ * same pfn flags, both access protections and the map_order. The caller must
+ * be careful with edge cases as the start and end VA of the given page may
+ * extend past the range used with hmm_range_fault().
+ *
+ * This must be called under the caller 'user_lock' after a successful
+ * mmu_interval_read_begin(). The caller must have tested for HMM_PFN_VALID
+ * already.
+ */
+static inline unsigned int hmm_pfn_to_map_order(unsigned long hmm_pfn)
+{
+ return (hmm_pfn >> HMM_PFN_ORDER_SHIFT) & 0x1F;
+}
+
+/*
+ * struct hmm_range - track invalidation lock on virtual address range
+ *
+ * @notifier: a mmu_interval_notifier that includes the start/end
+ * @notifier_seq: result of mmu_interval_read_begin()
+ * @start: range virtual start address (inclusive)
+ * @end: range virtual end address (exclusive)
+ * @hmm_pfns: array of pfns (big enough for the range)
+ * @default_flags: default flags for the range (write, read, ... see hmm doc)
+ * @pfn_flags_mask: allows to mask pfn flags so that only default_flags matter
+ * @dev_private_owner: owner of device private pages
+ */
+struct hmm_range {
+ struct mmu_interval_notifier *notifier;
+ unsigned long notifier_seq;
+ unsigned long start;
+ unsigned long end;
+ unsigned long *hmm_pfns;
+ unsigned long default_flags;
+ unsigned long pfn_flags_mask;
+ void *dev_private_owner;
+};
+
+/*
+ * Please see Documentation/mm/hmm.rst for how to use the range API.
+ */
+int hmm_range_fault(struct hmm_range *range);
+
+/*
+ * HMM_RANGE_DEFAULT_TIMEOUT - default timeout (ms) when waiting for a range
+ *
+ * When waiting for mmu notifiers we need some kind of time out otherwise we
+ * could potentially wait for ever, 1000ms ie 1s sounds like a long time to
+ * wait already.
+ */
+#define HMM_RANGE_DEFAULT_TIMEOUT 1000
+
+#endif /* LINUX_HMM_H */
diff --git a/include/linux/host1x.h b/include/linux/host1x.h
index 630b1a98ab58..9c8119ed13a4 100644
--- a/include/linux/host1x.h
+++ b/include/linux/host1x.h
@@ -1,25 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Copyright (c) 2009-2013, NVIDIA Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#ifndef __LINUX_HOST1X_H
#define __LINUX_HOST1X_H
#include <linux/device.h>
+#include <linux/dma-direction.h>
+#include <linux/dma-fence.h>
+#include <linux/spinlock.h>
#include <linux/types.h>
enum host1x_class {
@@ -28,35 +18,82 @@ enum host1x_class {
HOST1X_CLASS_GR2D_SB = 0x52,
HOST1X_CLASS_VIC = 0x5D,
HOST1X_CLASS_GR3D = 0x60,
+ HOST1X_CLASS_NVDEC = 0xF0,
+ HOST1X_CLASS_NVDEC1 = 0xF5,
};
+struct host1x;
struct host1x_client;
+struct iommu_group;
+
+u64 host1x_get_dma_mask(struct host1x *host1x);
+
+/**
+ * struct host1x_bo_cache - host1x buffer object cache
+ * @mappings: list of mappings
+ * @lock: synchronizes accesses to the list of mappings
+ *
+ * Note that entries are not periodically evicted from this cache and instead need to be
+ * explicitly released. This is used primarily for DRM/KMS where the cache's reference is
+ * released when the last reference to a buffer object represented by a mapping in this
+ * cache is dropped.
+ */
+struct host1x_bo_cache {
+ struct list_head mappings;
+ struct mutex lock;
+};
+
+static inline void host1x_bo_cache_init(struct host1x_bo_cache *cache)
+{
+ INIT_LIST_HEAD(&cache->mappings);
+ mutex_init(&cache->lock);
+}
+
+static inline void host1x_bo_cache_destroy(struct host1x_bo_cache *cache)
+{
+ /* XXX warn if not empty? */
+ mutex_destroy(&cache->lock);
+}
/**
* struct host1x_client_ops - host1x client operations
+ * @early_init: host1x client early initialization code
* @init: host1x client initialization code
* @exit: host1x client tear down code
+ * @late_exit: host1x client late tear down code
+ * @suspend: host1x client suspend code
+ * @resume: host1x client resume code
*/
struct host1x_client_ops {
+ int (*early_init)(struct host1x_client *client);
int (*init)(struct host1x_client *client);
int (*exit)(struct host1x_client *client);
+ int (*late_exit)(struct host1x_client *client);
+ int (*suspend)(struct host1x_client *client);
+ int (*resume)(struct host1x_client *client);
};
/**
* struct host1x_client - host1x client structure
* @list: list node for the host1x client
- * @parent: pointer to struct device representing the host1x controller
+ * @host: pointer to struct device representing the host1x controller
* @dev: pointer to struct device backing this host1x client
+ * @group: IOMMU group that this client is a member of
* @ops: host1x client operations
* @class: host1x class represented by this client
* @channel: host1x channel associated with this client
* @syncpts: array of syncpoints requested for this client
* @num_syncpts: number of syncpoints requested for this client
+ * @parent: pointer to parent structure
+ * @usecount: reference count for this structure
+ * @lock: mutex for mutually exclusive concurrency
+ * @cache: host1x buffer object cache
*/
struct host1x_client {
struct list_head list;
- struct device *parent;
+ struct device *host;
struct device *dev;
+ struct iommu_group *group;
const struct host1x_client_ops *ops;
@@ -65,6 +102,12 @@ struct host1x_client {
struct host1x_syncpt **syncpts;
unsigned int num_syncpts;
+
+ struct host1x_client *parent;
+ unsigned int usecount;
+ struct mutex lock;
+
+ struct host1x_bo_cache cache;
};
/*
@@ -74,24 +117,48 @@ struct host1x_client {
struct host1x_bo;
struct sg_table;
+struct host1x_bo_mapping {
+ struct kref ref;
+ struct dma_buf_attachment *attach;
+ enum dma_data_direction direction;
+ struct list_head list;
+ struct host1x_bo *bo;
+ struct sg_table *sgt;
+ unsigned int chunks;
+ struct device *dev;
+ dma_addr_t phys;
+ size_t size;
+
+ struct host1x_bo_cache *cache;
+ struct list_head entry;
+};
+
+static inline struct host1x_bo_mapping *to_host1x_bo_mapping(struct kref *ref)
+{
+ return container_of(ref, struct host1x_bo_mapping, ref);
+}
+
struct host1x_bo_ops {
struct host1x_bo *(*get)(struct host1x_bo *bo);
void (*put)(struct host1x_bo *bo);
- dma_addr_t (*pin)(struct host1x_bo *bo, struct sg_table **sgt);
- void (*unpin)(struct host1x_bo *bo, struct sg_table *sgt);
+ struct host1x_bo_mapping *(*pin)(struct device *dev, struct host1x_bo *bo,
+ enum dma_data_direction dir);
+ void (*unpin)(struct host1x_bo_mapping *map);
void *(*mmap)(struct host1x_bo *bo);
void (*munmap)(struct host1x_bo *bo, void *addr);
- void *(*kmap)(struct host1x_bo *bo, unsigned int pagenum);
- void (*kunmap)(struct host1x_bo *bo, unsigned int pagenum, void *addr);
};
struct host1x_bo {
const struct host1x_bo_ops *ops;
+ struct list_head mappings;
+ spinlock_t lock;
};
static inline void host1x_bo_init(struct host1x_bo *bo,
const struct host1x_bo_ops *ops)
{
+ INIT_LIST_HEAD(&bo->mappings);
+ spin_lock_init(&bo->lock);
bo->ops = ops;
}
@@ -105,16 +172,10 @@ static inline void host1x_bo_put(struct host1x_bo *bo)
bo->ops->put(bo);
}
-static inline dma_addr_t host1x_bo_pin(struct host1x_bo *bo,
- struct sg_table **sgt)
-{
- return bo->ops->pin(bo, sgt);
-}
-
-static inline void host1x_bo_unpin(struct host1x_bo *bo, struct sg_table *sgt)
-{
- bo->ops->unpin(bo, sgt);
-}
+struct host1x_bo_mapping *host1x_bo_pin(struct device *dev, struct host1x_bo *bo,
+ enum dma_data_direction dir,
+ struct host1x_bo_cache *cache);
+void host1x_bo_unpin(struct host1x_bo_mapping *map);
static inline void *host1x_bo_mmap(struct host1x_bo *bo)
{
@@ -126,17 +187,6 @@ static inline void host1x_bo_munmap(struct host1x_bo *bo, void *addr)
bo->ops->munmap(bo, addr);
}
-static inline void *host1x_bo_kmap(struct host1x_bo *bo, unsigned int pagenum)
-{
- return bo->ops->kmap(bo, pagenum);
-}
-
-static inline void host1x_bo_kunmap(struct host1x_bo *bo,
- unsigned int pagenum, void *addr)
-{
- bo->ops->kunmap(bo, pagenum, addr);
-}
-
/*
* host1x syncpoints
*/
@@ -148,7 +198,9 @@ struct host1x_syncpt_base;
struct host1x_syncpt;
struct host1x;
-struct host1x_syncpt *host1x_syncpt_get(struct host1x *host, u32 id);
+struct host1x_syncpt *host1x_syncpt_get_by_id(struct host1x *host, u32 id);
+struct host1x_syncpt *host1x_syncpt_get_by_id_noref(struct host1x *host, u32 id);
+struct host1x_syncpt *host1x_syncpt_get(struct host1x_syncpt *sp);
u32 host1x_syncpt_id(struct host1x_syncpt *sp);
u32 host1x_syncpt_read_min(struct host1x_syncpt *sp);
u32 host1x_syncpt_read_max(struct host1x_syncpt *sp);
@@ -157,13 +209,23 @@ int host1x_syncpt_incr(struct host1x_syncpt *sp);
u32 host1x_syncpt_incr_max(struct host1x_syncpt *sp, u32 incrs);
int host1x_syncpt_wait(struct host1x_syncpt *sp, u32 thresh, long timeout,
u32 *value);
-struct host1x_syncpt *host1x_syncpt_request(struct device *dev,
+struct host1x_syncpt *host1x_syncpt_request(struct host1x_client *client,
unsigned long flags);
-void host1x_syncpt_free(struct host1x_syncpt *sp);
+void host1x_syncpt_put(struct host1x_syncpt *sp);
+struct host1x_syncpt *host1x_syncpt_alloc(struct host1x *host,
+ unsigned long flags,
+ const char *name);
struct host1x_syncpt_base *host1x_syncpt_get_base(struct host1x_syncpt *sp);
u32 host1x_syncpt_base_id(struct host1x_syncpt_base *base);
+void host1x_syncpt_release_vblank_reservation(struct host1x_client *client,
+ u32 syncpt_id);
+
+struct dma_fence *host1x_fence_create(struct host1x_syncpt *sp, u32 threshold,
+ bool timeout);
+void host1x_fence_cancel(struct dma_fence *fence);
+
/*
* host1x channel
*/
@@ -171,8 +233,9 @@ u32 host1x_syncpt_base_id(struct host1x_syncpt_base *base);
struct host1x_channel;
struct host1x_job;
-struct host1x_channel *host1x_channel_request(struct device *dev);
+struct host1x_channel *host1x_channel_request(struct host1x_client *client);
struct host1x_channel *host1x_channel_get(struct host1x_channel *channel);
+void host1x_channel_stop(struct host1x_channel *channel);
void host1x_channel_put(struct host1x_channel *channel);
int host1x_job_submit(struct host1x_job *job);
@@ -180,6 +243,9 @@ int host1x_job_submit(struct host1x_job *job);
* host1x job
*/
+#define HOST1X_RELOC_READ (1 << 0)
+#define HOST1X_RELOC_WRITE (1 << 1)
+
struct host1x_reloc {
struct {
struct host1x_bo *bo;
@@ -190,13 +256,7 @@ struct host1x_reloc {
unsigned long offset;
} target;
unsigned long shift;
-};
-
-struct host1x_waitchk {
- struct host1x_bo *bo;
- u32 offset;
- u32 syncpt_id;
- u32 thresh;
+ unsigned long flags;
};
struct host1x_job {
@@ -209,19 +269,15 @@ struct host1x_job {
/* Channel where job is submitted to */
struct host1x_channel *channel;
- u32 client;
+ /* client where the job originated */
+ struct host1x_client *client;
/* Gathers and their memory */
- struct host1x_job_gather *gathers;
- unsigned int num_gathers;
-
- /* Wait checks to be processed at submit time */
- struct host1x_waitchk *waitchk;
- unsigned int num_waitchk;
- u32 waitchk_mask;
+ struct host1x_job_cmd *cmds;
+ unsigned int num_cmds;
/* Array of handles to be pinned & unpinned */
- struct host1x_reloc *relocarray;
+ struct host1x_reloc *relocs;
unsigned int num_relocs;
struct host1x_job_unpin_data *unpins;
unsigned int num_unpins;
@@ -231,13 +287,20 @@ struct host1x_job {
dma_addr_t *reloc_addr_phys;
/* Sync point id, number of increments and end related to the submit */
- u32 syncpt_id;
+ struct host1x_syncpt *syncpt;
u32 syncpt_incrs;
u32 syncpt_end;
+ /* Completion fence for job tracking */
+ struct dma_fence *fence;
+ struct dma_fence_cb fence_cb;
+
/* Maximum time to wait for this job */
unsigned int timeout;
+ /* Job has timed out and should be released */
+ bool cancelled;
+
/* Index and number of slots used in the push buffer */
unsigned int first_get;
unsigned int num_slots;
@@ -258,13 +321,33 @@ struct host1x_job {
/* Add a channel wait for previous ops to complete */
bool serialize;
+
+ /* Fast-forward syncpoint increments on job timeout */
+ bool syncpt_recovery;
+
+ /* Callback called when job is freed */
+ void (*release)(struct host1x_job *job);
+ void *user_data;
+
+ /* Whether host1x-side firewall should be ran for this job or not */
+ bool enable_firewall;
+
+ /* Options for configuring engine data stream ID */
+ /* Context device to use for job */
+ struct host1x_memory_context *memory_context;
+ /* Stream ID to use if context isolation is disabled (!memory_context) */
+ u32 engine_fallback_streamid;
+ /* Engine offset to program stream ID to */
+ u32 engine_streamid_offset;
};
struct host1x_job *host1x_job_alloc(struct host1x_channel *ch,
u32 num_cmdbufs, u32 num_relocs,
- u32 num_waitchks);
-void host1x_job_add_gather(struct host1x_job *job, struct host1x_bo *mem_id,
- u32 words, u32 offset);
+ bool skip_firewall);
+void host1x_job_add_gather(struct host1x_job *job, struct host1x_bo *bo,
+ unsigned int words, unsigned int offset);
+void host1x_job_add_wait(struct host1x_job *job, u32 id, u32 thresh,
+ bool relative, u32 next_class);
struct host1x_job *host1x_job_get(struct host1x_job *job);
void host1x_job_put(struct host1x_job *job);
int host1x_job_pin(struct host1x_job *job, struct device *dev);
@@ -322,6 +405,8 @@ struct host1x_device {
struct list_head clients;
bool registered;
+
+ struct device_dma_parameters dma_parms;
};
static inline struct host1x_device *to_host1x_device(struct device *dev)
@@ -332,15 +417,81 @@ static inline struct host1x_device *to_host1x_device(struct device *dev)
int host1x_device_init(struct host1x_device *device);
int host1x_device_exit(struct host1x_device *device);
-int host1x_client_register(struct host1x_client *client);
-int host1x_client_unregister(struct host1x_client *client);
+void __host1x_client_init(struct host1x_client *client, struct lock_class_key *key);
+void host1x_client_exit(struct host1x_client *client);
+
+#define host1x_client_init(client) \
+ ({ \
+ static struct lock_class_key __key; \
+ __host1x_client_init(client, &__key); \
+ })
+
+int __host1x_client_register(struct host1x_client *client);
+
+/*
+ * Note that this wrapper calls __host1x_client_init() for compatibility
+ * with existing callers. Callers that want to separately initialize and
+ * register a host1x client must first initialize using either of the
+ * __host1x_client_init() or host1x_client_init() functions and then use
+ * the low-level __host1x_client_register() function to avoid the client
+ * getting reinitialized.
+ */
+#define host1x_client_register(client) \
+ ({ \
+ static struct lock_class_key __key; \
+ __host1x_client_init(client, &__key); \
+ __host1x_client_register(client); \
+ })
+
+void host1x_client_unregister(struct host1x_client *client);
+
+int host1x_client_suspend(struct host1x_client *client);
+int host1x_client_resume(struct host1x_client *client);
struct tegra_mipi_device;
-struct tegra_mipi_device *tegra_mipi_request(struct device *device);
+struct tegra_mipi_device *tegra_mipi_request(struct device *device,
+ struct device_node *np);
void tegra_mipi_free(struct tegra_mipi_device *device);
int tegra_mipi_enable(struct tegra_mipi_device *device);
int tegra_mipi_disable(struct tegra_mipi_device *device);
-int tegra_mipi_calibrate(struct tegra_mipi_device *device);
+int tegra_mipi_start_calibration(struct tegra_mipi_device *device);
+int tegra_mipi_finish_calibration(struct tegra_mipi_device *device);
+
+/* host1x memory contexts */
+
+struct host1x_memory_context {
+ struct host1x *host;
+
+ refcount_t ref;
+ struct pid *owner;
+
+ struct device dev;
+ u64 dma_mask;
+ u32 stream_id;
+};
+
+#ifdef CONFIG_IOMMU_API
+struct host1x_memory_context *host1x_memory_context_alloc(struct host1x *host1x,
+ struct device *dev,
+ struct pid *pid);
+void host1x_memory_context_get(struct host1x_memory_context *cd);
+void host1x_memory_context_put(struct host1x_memory_context *cd);
+#else
+static inline struct host1x_memory_context *host1x_memory_context_alloc(struct host1x *host1x,
+ struct device *dev,
+ struct pid *pid)
+{
+ return NULL;
+}
+
+static inline void host1x_memory_context_get(struct host1x_memory_context *cd)
+{
+}
+
+static inline void host1x_memory_context_put(struct host1x_memory_context *cd)
+{
+}
+#endif
#endif
diff --git a/include/linux/host1x_context_bus.h b/include/linux/host1x_context_bus.h
new file mode 100644
index 000000000000..72462737a6db
--- /dev/null
+++ b/include/linux/host1x_context_bus.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (c) 2021, NVIDIA Corporation. All rights reserved.
+ */
+
+#ifndef __LINUX_HOST1X_CONTEXT_BUS_H
+#define __LINUX_HOST1X_CONTEXT_BUS_H
+
+#include <linux/device.h>
+
+#ifdef CONFIG_TEGRA_HOST1X_CONTEXT_BUS
+extern struct bus_type host1x_context_device_bus_type;
+#endif
+
+#endif
diff --git a/include/linux/hp_sdc.h b/include/linux/hp_sdc.h
index d392975d8887..9be8704e2d38 100644
--- a/include/linux/hp_sdc.h
+++ b/include/linux/hp_sdc.h
@@ -180,7 +180,7 @@ switch (val) { \
#define HP_SDC_CMD_SET_IM 0x40 /* 010xxxxx == set irq mask */
-/* The documents provided do not explicitly state that all registers betweem
+/* The documents provided do not explicitly state that all registers between
* 0x01 and 0x1f inclusive can be read by sending their register index as a
* command, but this is implied and appears to be the case.
*/
@@ -281,7 +281,7 @@ typedef struct {
hp_sdc_transaction *tq[HP_SDC_QUEUE_LEN]; /* All pending read/writes */
int rcurr, rqty; /* Current read transact in process */
- struct timeval rtv; /* Time when current read started */
+ ktime_t rtime; /* Time when current read started */
int wcurr; /* Current write transact in process */
int dev_err; /* carries status from registration */
diff --git a/include/linux/hpet.h b/include/linux/hpet.h
index 9427ab4e01c3..21e69eaf7a36 100644
--- a/include/linux/hpet.h
+++ b/include/linux/hpet.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __HPET__
#define __HPET__ 1
@@ -29,7 +30,7 @@ struct hpet {
unsigned long _hpet_compare;
} _u1;
u64 hpet_fsb[2]; /* FSB route */
- } hpet_timers[1];
+ } hpet_timers[];
};
#define hpet_mc _u0._hpet_mc
diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h
index 012c37fdb688..0ee140176f10 100644
--- a/include/linux/hrtimer.h
+++ b/include/linux/hrtimer.h
@@ -1,6 +1,5 @@
+// SPDX-License-Identifier: GPL-2.0
/*
- * include/linux/hrtimer.h
- *
* hrtimers - High-resolution kernel timers
*
* Copyright(C) 2005, Thomas Gleixner <tglx@linutronix.de>
@@ -9,17 +8,16 @@
* data type definitions, declarations, prototypes
*
* Started by: Thomas Gleixner and Ingo Molnar
- *
- * For licencing details see kernel-base/COPYING
*/
#ifndef _LINUX_HRTIMER_H
#define _LINUX_HRTIMER_H
+#include <linux/hrtimer_defs.h>
#include <linux/rbtree.h>
-#include <linux/ktime.h>
#include <linux/init.h>
#include <linux/list.h>
#include <linux/percpu.h>
+#include <linux/seqlock.h>
#include <linux/timer.h>
#include <linux/timerqueue.h>
@@ -28,13 +26,37 @@ struct hrtimer_cpu_base;
/*
* Mode arguments of xxx_hrtimer functions:
+ *
+ * HRTIMER_MODE_ABS - Time value is absolute
+ * HRTIMER_MODE_REL - Time value is relative to now
+ * HRTIMER_MODE_PINNED - Timer is bound to CPU (is only considered
+ * when starting the timer)
+ * HRTIMER_MODE_SOFT - Timer callback function will be executed in
+ * soft irq context
+ * HRTIMER_MODE_HARD - Timer callback function will be executed in
+ * hard irq context even on PREEMPT_RT.
*/
enum hrtimer_mode {
- HRTIMER_MODE_ABS = 0x0, /* Time value is absolute */
- HRTIMER_MODE_REL = 0x1, /* Time value is relative to now */
- HRTIMER_MODE_PINNED = 0x02, /* Timer is bound to CPU */
- HRTIMER_MODE_ABS_PINNED = 0x02,
- HRTIMER_MODE_REL_PINNED = 0x03,
+ HRTIMER_MODE_ABS = 0x00,
+ HRTIMER_MODE_REL = 0x01,
+ HRTIMER_MODE_PINNED = 0x02,
+ HRTIMER_MODE_SOFT = 0x04,
+ HRTIMER_MODE_HARD = 0x08,
+
+ HRTIMER_MODE_ABS_PINNED = HRTIMER_MODE_ABS | HRTIMER_MODE_PINNED,
+ HRTIMER_MODE_REL_PINNED = HRTIMER_MODE_REL | HRTIMER_MODE_PINNED,
+
+ HRTIMER_MODE_ABS_SOFT = HRTIMER_MODE_ABS | HRTIMER_MODE_SOFT,
+ HRTIMER_MODE_REL_SOFT = HRTIMER_MODE_REL | HRTIMER_MODE_SOFT,
+
+ HRTIMER_MODE_ABS_PINNED_SOFT = HRTIMER_MODE_ABS_PINNED | HRTIMER_MODE_SOFT,
+ HRTIMER_MODE_REL_PINNED_SOFT = HRTIMER_MODE_REL_PINNED | HRTIMER_MODE_SOFT,
+
+ HRTIMER_MODE_ABS_HARD = HRTIMER_MODE_ABS | HRTIMER_MODE_HARD,
+ HRTIMER_MODE_REL_HARD = HRTIMER_MODE_REL | HRTIMER_MODE_HARD,
+
+ HRTIMER_MODE_ABS_PINNED_HARD = HRTIMER_MODE_ABS_PINNED | HRTIMER_MODE_HARD,
+ HRTIMER_MODE_REL_PINNED_HARD = HRTIMER_MODE_REL_PINNED | HRTIMER_MODE_HARD,
};
/*
@@ -87,6 +109,9 @@ enum hrtimer_restart {
* @base: pointer to the timer base (per cpu and per clock)
* @state: state information (See bit values above)
* @is_rel: Set if the timer was armed relative
+ * @is_soft: Set if hrtimer will be expired in soft interrupt context.
+ * @is_hard: Set if hrtimer will be expired in hard interrupt context
+ * even on RT.
*
* The hrtimer structure must be initialized by hrtimer_init()
*/
@@ -97,6 +122,8 @@ struct hrtimer {
struct hrtimer_clock_base *base;
u8 state;
u8 is_rel;
+ u8 is_soft;
+ u8 is_hard;
};
/**
@@ -112,9 +139,9 @@ struct hrtimer_sleeper {
};
#ifdef CONFIG_64BIT
-# define HRTIMER_CLOCK_BASE_ALIGN 64
+# define __hrtimer_clock_base_align ____cacheline_aligned
#else
-# define HRTIMER_CLOCK_BASE_ALIGN 32
+# define __hrtimer_clock_base_align
#endif
/**
@@ -123,48 +150,61 @@ struct hrtimer_sleeper {
* @index: clock type index for per_cpu support when moving a
* timer to a base on another cpu.
* @clockid: clock id for per_cpu support
+ * @seq: seqcount around __run_hrtimer
+ * @running: pointer to the currently running hrtimer
* @active: red black tree root node for the active timers
* @get_time: function to retrieve the current time of the clock
* @offset: offset of this clock to the monotonic base
*/
struct hrtimer_clock_base {
struct hrtimer_cpu_base *cpu_base;
- int index;
+ unsigned int index;
clockid_t clockid;
+ seqcount_raw_spinlock_t seq;
+ struct hrtimer *running;
struct timerqueue_head active;
ktime_t (*get_time)(void);
ktime_t offset;
-} __attribute__((__aligned__(HRTIMER_CLOCK_BASE_ALIGN)));
+} __hrtimer_clock_base_align;
enum hrtimer_base_type {
HRTIMER_BASE_MONOTONIC,
HRTIMER_BASE_REALTIME,
HRTIMER_BASE_BOOTTIME,
HRTIMER_BASE_TAI,
+ HRTIMER_BASE_MONOTONIC_SOFT,
+ HRTIMER_BASE_REALTIME_SOFT,
+ HRTIMER_BASE_BOOTTIME_SOFT,
+ HRTIMER_BASE_TAI_SOFT,
HRTIMER_MAX_CLOCK_BASES,
};
-/*
+/**
* struct hrtimer_cpu_base - the per cpu clock bases
* @lock: lock protecting the base and associated clock bases
* and timers
- * @seq: seqcount around __run_hrtimer
- * @running: pointer to the currently running hrtimer
* @cpu: cpu number
* @active_bases: Bitfield to mark bases with active timers
* @clock_was_set_seq: Sequence counter of clock was set events
- * @migration_enabled: The migration of hrtimers to other cpus is enabled
- * @nohz_active: The nohz functionality is enabled
- * @expires_next: absolute time of the next event which was scheduled
- * via clock_set_next_event()
- * @next_timer: Pointer to the first expiring timer
- * @in_hrtirq: hrtimer_interrupt() is currently executing
* @hres_active: State of high resolution mode
+ * @in_hrtirq: hrtimer_interrupt() is currently executing
* @hang_detected: The last hrtimer interrupt detected a hang
+ * @softirq_activated: displays, if the softirq is raised - update of softirq
+ * related settings is not required then.
* @nr_events: Total number of hrtimer interrupt events
* @nr_retries: Total number of hrtimer interrupt retries
* @nr_hangs: Total number of hrtimer interrupt hangs
* @max_hang_time: Maximum time spent in hrtimer_interrupt
+ * @softirq_expiry_lock: Lock which is taken while softirq based hrtimer are
+ * expired
+ * @timer_waiters: A hrtimer_cancel() invocation waits for the timer
+ * callback to finish.
+ * @expires_next: absolute time of the next event, is required for remote
+ * hrtimer enqueue; it is the total first expiry time (hard
+ * and soft hrtimer are taken into account)
+ * @next_timer: Pointer to the first expiring timer
+ * @softirq_expires_next: Time to check, if soft queues needs also to be expired
+ * @softirq_next_timer: Pointer to the first expiring softirq based timer
* @clock_base: array of clock bases for this cpu
*
* Note: next_timer is just an optimization for __remove_hrtimer().
@@ -173,31 +213,32 @@ enum hrtimer_base_type {
*/
struct hrtimer_cpu_base {
raw_spinlock_t lock;
- seqcount_t seq;
- struct hrtimer *running;
unsigned int cpu;
unsigned int active_bases;
unsigned int clock_was_set_seq;
- bool migration_enabled;
- bool nohz_active;
+ unsigned int hres_active : 1,
+ in_hrtirq : 1,
+ hang_detected : 1,
+ softirq_activated : 1;
#ifdef CONFIG_HIGH_RES_TIMERS
- unsigned int in_hrtirq : 1,
- hres_active : 1,
- hang_detected : 1;
- ktime_t expires_next;
- struct hrtimer *next_timer;
unsigned int nr_events;
- unsigned int nr_retries;
- unsigned int nr_hangs;
+ unsigned short nr_retries;
+ unsigned short nr_hangs;
unsigned int max_hang_time;
#endif
+#ifdef CONFIG_PREEMPT_RT
+ spinlock_t softirq_expiry_lock;
+ atomic_t timer_waiters;
+#endif
+ ktime_t expires_next;
+ struct hrtimer *next_timer;
+ ktime_t softirq_expires_next;
+ struct hrtimer *softirq_next_timer;
struct hrtimer_clock_base clock_base[HRTIMER_MAX_CLOCK_BASES];
} ____cacheline_aligned;
static inline void hrtimer_set_expires(struct hrtimer *timer, ktime_t time)
{
- BUILD_BUG_ON(sizeof(struct hrtimer_clock_base) > HRTIMER_CLOCK_BASE_ALIGN);
-
timer->node.expires = time;
timer->_softexpires = time;
}
@@ -266,45 +307,23 @@ static inline ktime_t hrtimer_cb_get_time(struct hrtimer *timer)
return timer->base->get_time();
}
-#ifdef CONFIG_HIGH_RES_TIMERS
-struct clock_event_device;
-
-extern void hrtimer_interrupt(struct clock_event_device *dev);
-
static inline int hrtimer_is_hres_active(struct hrtimer *timer)
{
- return timer->base->cpu_base->hres_active;
+ return IS_ENABLED(CONFIG_HIGH_RES_TIMERS) ?
+ timer->base->cpu_base->hres_active : 0;
}
-/*
- * The resolution of the clocks. The resolution value is returned in
- * the clock_getres() system call to give application programmers an
- * idea of the (in)accuracy of timers. Timer values are rounded up to
- * this resolution values.
- */
-# define HIGH_RES_NSEC 1
-# define KTIME_HIGH_RES (HIGH_RES_NSEC)
-# define MONOTONIC_RES_NSEC HIGH_RES_NSEC
-# define KTIME_MONOTONIC_RES KTIME_HIGH_RES
+#ifdef CONFIG_HIGH_RES_TIMERS
+struct clock_event_device;
-extern void clock_was_set_delayed(void);
+extern void hrtimer_interrupt(struct clock_event_device *dev);
extern unsigned int hrtimer_resolution;
#else
-# define MONOTONIC_RES_NSEC LOW_RES_NSEC
-# define KTIME_MONOTONIC_RES KTIME_LOW_RES
-
#define hrtimer_resolution (unsigned int)LOW_RES_NSEC
-static inline int hrtimer_is_hres_active(struct hrtimer *timer)
-{
- return 0;
-}
-
-static inline void clock_was_set_delayed(void) { }
-
#endif
static inline ktime_t
@@ -328,26 +347,39 @@ hrtimer_expires_remaining_adjusted(const struct hrtimer *timer)
timer->base->get_time());
}
-extern void clock_was_set(void);
#ifdef CONFIG_TIMERFD
extern void timerfd_clock_was_set(void);
+extern void timerfd_resume(void);
#else
static inline void timerfd_clock_was_set(void) { }
+static inline void timerfd_resume(void) { }
#endif
-extern void hrtimers_resume(void);
DECLARE_PER_CPU(struct tick_device, tick_cpu_device);
+#ifdef CONFIG_PREEMPT_RT
+void hrtimer_cancel_wait_running(const struct hrtimer *timer);
+#else
+static inline void hrtimer_cancel_wait_running(struct hrtimer *timer)
+{
+ cpu_relax();
+}
+#endif
/* Exported timer functions: */
/* Initialize timers: */
extern void hrtimer_init(struct hrtimer *timer, clockid_t which_clock,
enum hrtimer_mode mode);
+extern void hrtimer_init_sleeper(struct hrtimer_sleeper *sl, clockid_t clock_id,
+ enum hrtimer_mode mode);
#ifdef CONFIG_DEBUG_OBJECTS_TIMERS
extern void hrtimer_init_on_stack(struct hrtimer *timer, clockid_t which_clock,
enum hrtimer_mode mode);
+extern void hrtimer_init_sleeper_on_stack(struct hrtimer_sleeper *sl,
+ clockid_t clock_id,
+ enum hrtimer_mode mode);
extern void destroy_hrtimer_on_stack(struct hrtimer *timer);
#else
@@ -357,6 +389,14 @@ static inline void hrtimer_init_on_stack(struct hrtimer *timer,
{
hrtimer_init(timer, which_clock, mode);
}
+
+static inline void hrtimer_init_sleeper_on_stack(struct hrtimer_sleeper *sl,
+ clockid_t clock_id,
+ enum hrtimer_mode mode)
+{
+ hrtimer_init_sleeper(sl, clock_id, mode);
+}
+
static inline void destroy_hrtimer_on_stack(struct hrtimer *timer) { }
#endif
@@ -365,11 +405,12 @@ extern void hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
u64 range_ns, const enum hrtimer_mode mode);
/**
- * hrtimer_start - (re)start an hrtimer on the current CPU
+ * hrtimer_start - (re)start an hrtimer
* @timer: the timer to be added
* @tim: expiry time
- * @mode: expiry mode: absolute (HRTIMER_MODE_ABS) or
- * relative (HRTIMER_MODE_REL)
+ * @mode: timer mode: absolute (HRTIMER_MODE_ABS) or
+ * relative (HRTIMER_MODE_REL), and pinned (HRTIMER_MODE_PINNED);
+ * softirq based mode is considered for debug purpose only!
*/
static inline void hrtimer_start(struct hrtimer *timer, ktime_t tim,
const enum hrtimer_mode mode)
@@ -391,6 +432,9 @@ static inline void hrtimer_start_expires(struct hrtimer *timer,
hrtimer_start_range_ns(timer, soft, delta, mode);
}
+void hrtimer_sleeper_start_expires(struct hrtimer_sleeper *sl,
+ enum hrtimer_mode mode);
+
static inline void hrtimer_restart(struct hrtimer *timer)
{
hrtimer_start_expires(timer, HRTIMER_MODE_ABS);
@@ -399,21 +443,32 @@ static inline void hrtimer_restart(struct hrtimer *timer)
/* Query timers: */
extern ktime_t __hrtimer_get_remaining(const struct hrtimer *timer, bool adjust);
+/**
+ * hrtimer_get_remaining - get remaining time for the timer
+ * @timer: the timer to read
+ */
static inline ktime_t hrtimer_get_remaining(const struct hrtimer *timer)
{
return __hrtimer_get_remaining(timer, false);
}
extern u64 hrtimer_get_next_event(void);
+extern u64 hrtimer_next_event_without(const struct hrtimer *exclude);
extern bool hrtimer_active(const struct hrtimer *timer);
-/*
- * Helper function to check, whether the timer is on one of the queues
+/**
+ * hrtimer_is_queued - check, whether the timer is on one of the queues
+ * @timer: Timer to check
+ *
+ * Returns: True if the timer is queued, false otherwise
+ *
+ * The function can be used lockless, but it gives only a current snapshot.
*/
-static inline int hrtimer_is_queued(struct hrtimer *timer)
+static inline bool hrtimer_is_queued(struct hrtimer *timer)
{
- return timer->state & HRTIMER_STATE_ENQUEUED;
+ /* The READ_ONCE pairs with the update functions of timer->state */
+ return !!(READ_ONCE(timer->state) & HRTIMER_STATE_ENQUEUED);
}
/*
@@ -422,7 +477,7 @@ static inline int hrtimer_is_queued(struct hrtimer *timer)
*/
static inline int hrtimer_callback_running(struct hrtimer *timer)
{
- return timer->base->cpu_base->running == timer;
+ return timer->base->running == timer;
}
/* Forward a hrtimer so it expires after now: */
@@ -454,19 +509,15 @@ static inline u64 hrtimer_forward_now(struct hrtimer *timer,
/* Precise sleep: */
extern int nanosleep_copyout(struct restart_block *, struct timespec64 *);
-extern long hrtimer_nanosleep(const struct timespec64 *rqtp,
- const enum hrtimer_mode mode,
+extern long hrtimer_nanosleep(ktime_t rqtp, const enum hrtimer_mode mode,
const clockid_t clockid);
-extern void hrtimer_init_sleeper(struct hrtimer_sleeper *sl,
- struct task_struct *tsk);
-
extern int schedule_hrtimeout_range(ktime_t *expires, u64 delta,
- const enum hrtimer_mode mode);
+ const enum hrtimer_mode mode);
extern int schedule_hrtimeout_range_clock(ktime_t *expires,
u64 delta,
const enum hrtimer_mode mode,
- int clock);
+ clockid_t clock_id);
extern int schedule_hrtimeout(ktime_t *expires, const enum hrtimer_mode mode);
/* Soft interrupt function to run the hrtimer queues: */
diff --git a/include/linux/hrtimer_api.h b/include/linux/hrtimer_api.h
new file mode 100644
index 000000000000..8d9700894468
--- /dev/null
+++ b/include/linux/hrtimer_api.h
@@ -0,0 +1 @@
+#include <linux/hrtimer.h>
diff --git a/include/linux/hrtimer_defs.h b/include/linux/hrtimer_defs.h
new file mode 100644
index 000000000000..2d3e3c5fb946
--- /dev/null
+++ b/include/linux/hrtimer_defs.h
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_HRTIMER_DEFS_H
+#define _LINUX_HRTIMER_DEFS_H
+
+#include <linux/ktime.h>
+
+#ifdef CONFIG_HIGH_RES_TIMERS
+
+/*
+ * The resolution of the clocks. The resolution value is returned in
+ * the clock_getres() system call to give application programmers an
+ * idea of the (in)accuracy of timers. Timer values are rounded up to
+ * this resolution values.
+ */
+# define HIGH_RES_NSEC 1
+# define KTIME_HIGH_RES (HIGH_RES_NSEC)
+# define MONOTONIC_RES_NSEC HIGH_RES_NSEC
+# define KTIME_MONOTONIC_RES KTIME_HIGH_RES
+
+#else
+
+# define MONOTONIC_RES_NSEC LOW_RES_NSEC
+# define KTIME_MONOTONIC_RES KTIME_LOW_RES
+
+#endif
+
+#endif
diff --git a/include/linux/hsi/hsi.h b/include/linux/hsi/hsi.h
index 57402544b53f..6ca92bff02c6 100644
--- a/include/linux/hsi/hsi.h
+++ b/include/linux/hsi/hsi.h
@@ -1,23 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* HSI core header file.
*
* Copyright (C) 2010 Nokia Corporation. All rights reserved.
*
* Contact: Carlos Chinea <carlos.chinea@nokia.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
- * 02110-1301 USA
*/
#ifndef __LINUX_HSI_H__
diff --git a/include/linux/hsi/ssi_protocol.h b/include/linux/hsi/ssi_protocol.h
index 1433651be0dc..2d6f3cfa7dea 100644
--- a/include/linux/hsi/ssi_protocol.h
+++ b/include/linux/hsi/ssi_protocol.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* ssip_slave.h
*
@@ -6,20 +7,6 @@
* Copyright (C) 2010 Nokia Corporation. All rights reserved.
*
* Contact: Carlos Chinea <carlos.chinea@nokia.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
- * 02110-1301 USA
*/
#ifndef __LINUX_SSIP_SLAVE_H__
diff --git a/include/linux/htcpld.h b/include/linux/htcpld.h
deleted file mode 100644
index ab3f6cb4dddc..000000000000
--- a/include/linux/htcpld.h
+++ /dev/null
@@ -1,24 +0,0 @@
-#ifndef __LINUX_HTCPLD_H
-#define __LINUX_HTCPLD_H
-
-struct htcpld_chip_platform_data {
- unsigned int addr;
- unsigned int reset;
- unsigned int num_gpios;
- unsigned int gpio_out_base;
- unsigned int gpio_in_base;
- unsigned int irq_base;
- unsigned int num_irqs;
-};
-
-struct htcpld_core_platform_data {
- unsigned int int_reset_gpio_hi;
- unsigned int int_reset_gpio_lo;
- unsigned int i2c_adapter_id;
-
- struct htcpld_chip_platform_data *chip;
- unsigned int num_chip;
-};
-
-#endif /* __LINUX_HTCPLD_H */
-
diff --git a/include/linux/hte.h b/include/linux/hte.h
new file mode 100644
index 000000000000..8289055061ab
--- /dev/null
+++ b/include/linux/hte.h
@@ -0,0 +1,271 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __LINUX_HTE_H
+#define __LINUX_HTE_H
+
+#include <linux/errno.h>
+
+struct hte_chip;
+struct hte_device;
+struct of_phandle_args;
+
+/**
+ * enum hte_edge - HTE line edge flags.
+ *
+ * @HTE_EDGE_NO_SETUP: No edge setup. In this case consumer will setup edges,
+ * for example during request irq call.
+ * @HTE_RISING_EDGE_TS: Rising edge.
+ * @HTE_FALLING_EDGE_TS: Falling edge.
+ *
+ */
+enum hte_edge {
+ HTE_EDGE_NO_SETUP = 1U << 0,
+ HTE_RISING_EDGE_TS = 1U << 1,
+ HTE_FALLING_EDGE_TS = 1U << 2,
+};
+
+/**
+ * enum hte_return - HTE subsystem return values used during callback.
+ *
+ * @HTE_CB_HANDLED: The consumer handled the data.
+ * @HTE_RUN_SECOND_CB: The consumer needs further processing, in that case
+ * HTE subsystem calls secondary callback provided by the consumer where it
+ * is allowed to sleep.
+ */
+enum hte_return {
+ HTE_CB_HANDLED,
+ HTE_RUN_SECOND_CB,
+};
+
+/**
+ * struct hte_ts_data - HTE timestamp data.
+ *
+ * @tsc: Timestamp value.
+ * @seq: Sequence counter of the timestamps.
+ * @raw_level: Level of the line at the timestamp if provider supports it,
+ * -1 otherwise.
+ */
+struct hte_ts_data {
+ u64 tsc;
+ u64 seq;
+ int raw_level;
+};
+
+/**
+ * struct hte_clk_info - Clock source info that HTE provider uses to timestamp.
+ *
+ * @hz: Supported clock rate in HZ, for example 1KHz clock = 1000.
+ * @type: Supported clock type.
+ */
+struct hte_clk_info {
+ u64 hz;
+ clockid_t type;
+};
+
+/**
+ * typedef hte_ts_cb_t - HTE timestamp data processing primary callback.
+ *
+ * The callback is used to push timestamp data to the client and it is
+ * not allowed to sleep.
+ *
+ * @ts: HW timestamp data.
+ * @data: Client supplied data.
+ */
+typedef enum hte_return (*hte_ts_cb_t)(struct hte_ts_data *ts, void *data);
+
+/**
+ * typedef hte_ts_sec_cb_t - HTE timestamp data processing secondary callback.
+ *
+ * This is used when the client needs further processing where it is
+ * allowed to sleep.
+ *
+ * @data: Client supplied data.
+ *
+ */
+typedef enum hte_return (*hte_ts_sec_cb_t)(void *data);
+
+/**
+ * struct hte_line_attr - Line attributes.
+ *
+ * @line_id: The logical ID understood by the consumers and providers.
+ * @line_data: Line data related to line_id.
+ * @edge_flags: Edge setup flags.
+ * @name: Descriptive name of the entity that is being monitored for the
+ * hardware timestamping. If null, HTE core will construct the name.
+ *
+ */
+struct hte_line_attr {
+ u32 line_id;
+ void *line_data;
+ unsigned long edge_flags;
+ const char *name;
+};
+
+/**
+ * struct hte_ts_desc - HTE timestamp descriptor.
+ *
+ * This structure is a communication token between consumers to subsystem
+ * and subsystem to providers.
+ *
+ * @attr: The line attributes.
+ * @hte_data: Subsystem's private data, set by HTE subsystem.
+ */
+struct hte_ts_desc {
+ struct hte_line_attr attr;
+ void *hte_data;
+};
+
+/**
+ * struct hte_ops - HTE operations set by providers.
+ *
+ * @request: Hook for requesting a HTE timestamp. Returns 0 on success,
+ * non-zero for failures.
+ * @release: Hook for releasing a HTE timestamp. Returns 0 on success,
+ * non-zero for failures.
+ * @enable: Hook to enable the specified timestamp. Returns 0 on success,
+ * non-zero for failures.
+ * @disable: Hook to disable specified timestamp. Returns 0 on success,
+ * non-zero for failures.
+ * @get_clk_src_info: Hook to get the clock information the provider uses
+ * to timestamp. Returns 0 for success and negative error code for failure. On
+ * success HTE subsystem fills up provided struct hte_clk_info.
+ *
+ * xlated_id parameter is used to communicate between HTE subsystem and the
+ * providers and is translated by the provider.
+ */
+struct hte_ops {
+ int (*request)(struct hte_chip *chip, struct hte_ts_desc *desc,
+ u32 xlated_id);
+ int (*release)(struct hte_chip *chip, struct hte_ts_desc *desc,
+ u32 xlated_id);
+ int (*enable)(struct hte_chip *chip, u32 xlated_id);
+ int (*disable)(struct hte_chip *chip, u32 xlated_id);
+ int (*get_clk_src_info)(struct hte_chip *chip,
+ struct hte_clk_info *ci);
+};
+
+/**
+ * struct hte_chip - Abstract HTE chip.
+ *
+ * @name: functional name of the HTE IP block.
+ * @dev: device providing the HTE.
+ * @ops: callbacks for this HTE.
+ * @nlines: number of lines/signals supported by this chip.
+ * @xlate_of: Callback which translates consumer supplied logical ids to
+ * physical ids, return 0 for the success and negative for the failures.
+ * It stores (between 0 to @nlines) in xlated_id parameter for the success.
+ * @xlate_plat: Same as above but for the consumers with no DT node.
+ * @match_from_linedata: Match HTE device using the line_data.
+ * @of_hte_n_cells: Number of cells used to form the HTE specifier.
+ * @gdev: HTE subsystem abstract device, internal to the HTE subsystem.
+ * @data: chip specific private data.
+ */
+struct hte_chip {
+ const char *name;
+ struct device *dev;
+ const struct hte_ops *ops;
+ u32 nlines;
+ int (*xlate_of)(struct hte_chip *gc,
+ const struct of_phandle_args *args,
+ struct hte_ts_desc *desc, u32 *xlated_id);
+ int (*xlate_plat)(struct hte_chip *gc, struct hte_ts_desc *desc,
+ u32 *xlated_id);
+ bool (*match_from_linedata)(const struct hte_chip *chip,
+ const struct hte_ts_desc *hdesc);
+ u8 of_hte_n_cells;
+
+ struct hte_device *gdev;
+ void *data;
+};
+
+#if IS_ENABLED(CONFIG_HTE)
+/* HTE APIs for the providers */
+int devm_hte_register_chip(struct hte_chip *chip);
+int hte_push_ts_ns(const struct hte_chip *chip, u32 xlated_id,
+ struct hte_ts_data *data);
+
+/* HTE APIs for the consumers */
+int hte_init_line_attr(struct hte_ts_desc *desc, u32 line_id,
+ unsigned long edge_flags, const char *name,
+ void *data);
+int hte_ts_get(struct device *dev, struct hte_ts_desc *desc, int index);
+int hte_ts_put(struct hte_ts_desc *desc);
+int hte_request_ts_ns(struct hte_ts_desc *desc, hte_ts_cb_t cb,
+ hte_ts_sec_cb_t tcb, void *data);
+int devm_hte_request_ts_ns(struct device *dev, struct hte_ts_desc *desc,
+ hte_ts_cb_t cb, hte_ts_sec_cb_t tcb, void *data);
+int of_hte_req_count(struct device *dev);
+int hte_enable_ts(struct hte_ts_desc *desc);
+int hte_disable_ts(struct hte_ts_desc *desc);
+int hte_get_clk_src_info(const struct hte_ts_desc *desc,
+ struct hte_clk_info *ci);
+
+#else /* !CONFIG_HTE */
+static inline int devm_hte_register_chip(struct hte_chip *chip)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int hte_push_ts_ns(const struct hte_chip *chip,
+ u32 xlated_id,
+ const struct hte_ts_data *data)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int hte_init_line_attr(struct hte_ts_desc *desc, u32 line_id,
+ unsigned long edge_flags,
+ const char *name, void *data)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int hte_ts_get(struct device *dev, struct hte_ts_desc *desc,
+ int index)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int hte_ts_put(struct hte_ts_desc *desc)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int hte_request_ts_ns(struct hte_ts_desc *desc, hte_ts_cb_t cb,
+ hte_ts_sec_cb_t tcb, void *data)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int devm_hte_request_ts_ns(struct device *dev,
+ struct hte_ts_desc *desc,
+ hte_ts_cb_t cb,
+ hte_ts_sec_cb_t tcb,
+ void *data)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int of_hte_req_count(struct device *dev)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int hte_enable_ts(struct hte_ts_desc *desc)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int hte_disable_ts(struct hte_ts_desc *desc)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int hte_get_clk_src_info(const struct hte_ts_desc *desc,
+ struct hte_clk_info *ci)
+{
+ return -EOPNOTSUPP;
+}
+#endif /* !CONFIG_HTE */
+
+#endif
diff --git a/include/linux/htirq.h b/include/linux/htirq.h
deleted file mode 100644
index d4a527e58434..000000000000
--- a/include/linux/htirq.h
+++ /dev/null
@@ -1,38 +0,0 @@
-#ifndef LINUX_HTIRQ_H
-#define LINUX_HTIRQ_H
-
-struct pci_dev;
-struct irq_data;
-
-struct ht_irq_msg {
- u32 address_lo; /* low 32 bits of the ht irq message */
- u32 address_hi; /* high 32 bits of the it irq message */
-};
-
-typedef void (ht_irq_update_t)(struct pci_dev *dev, int irq,
- struct ht_irq_msg *msg);
-
-struct ht_irq_cfg {
- struct pci_dev *dev;
- /* Update callback used to cope with buggy hardware */
- ht_irq_update_t *update;
- unsigned pos;
- unsigned idx;
- struct ht_irq_msg msg;
-};
-
-/* Helper functions.. */
-void fetch_ht_irq_msg(unsigned int irq, struct ht_irq_msg *msg);
-void write_ht_irq_msg(unsigned int irq, struct ht_irq_msg *msg);
-void mask_ht_irq(struct irq_data *data);
-void unmask_ht_irq(struct irq_data *data);
-
-/* The arch hook for getting things started */
-int arch_setup_ht_irq(int idx, int pos, struct pci_dev *dev,
- ht_irq_update_t *update);
-void arch_teardown_ht_irq(unsigned int irq);
-
-/* For drivers of buggy hardware */
-int __ht_create_irq(struct pci_dev *dev, int idx, ht_irq_update_t *update);
-
-#endif /* LINUX_HTIRQ_H */
diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
index ee696347f928..20284387b841 100644
--- a/include/linux/huge_mm.h
+++ b/include/linux/huge_mm.h
@@ -1,55 +1,50 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_HUGE_MM_H
#define _LINUX_HUGE_MM_H
#include <linux/sched/coredump.h>
+#include <linux/mm_types.h>
#include <linux/fs.h> /* only for vma_is_dax() */
-extern int do_huge_pmd_anonymous_page(struct vm_fault *vmf);
-extern int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
- pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr,
- struct vm_area_struct *vma);
-extern void huge_pmd_set_accessed(struct vm_fault *vmf, pmd_t orig_pmd);
-extern int copy_huge_pud(struct mm_struct *dst_mm, struct mm_struct *src_mm,
- pud_t *dst_pud, pud_t *src_pud, unsigned long addr,
- struct vm_area_struct *vma);
+vm_fault_t do_huge_pmd_anonymous_page(struct vm_fault *vmf);
+int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
+ pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr,
+ struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma);
+void huge_pmd_set_accessed(struct vm_fault *vmf);
+int copy_huge_pud(struct mm_struct *dst_mm, struct mm_struct *src_mm,
+ pud_t *dst_pud, pud_t *src_pud, unsigned long addr,
+ struct vm_area_struct *vma);
#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
-extern void huge_pud_set_accessed(struct vm_fault *vmf, pud_t orig_pud);
+void huge_pud_set_accessed(struct vm_fault *vmf, pud_t orig_pud);
#else
static inline void huge_pud_set_accessed(struct vm_fault *vmf, pud_t orig_pud)
{
}
#endif
-extern int do_huge_pmd_wp_page(struct vm_fault *vmf, pmd_t orig_pmd);
-extern struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
- unsigned long addr,
- pmd_t *pmd,
- unsigned int flags);
-extern bool madvise_free_huge_pmd(struct mmu_gather *tlb,
- struct vm_area_struct *vma,
- pmd_t *pmd, unsigned long addr, unsigned long next);
-extern int zap_huge_pmd(struct mmu_gather *tlb,
- struct vm_area_struct *vma,
- pmd_t *pmd, unsigned long addr);
-extern int zap_huge_pud(struct mmu_gather *tlb,
- struct vm_area_struct *vma,
- pud_t *pud, unsigned long addr);
-extern int mincore_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
- unsigned long addr, unsigned long end,
- unsigned char *vec);
-extern bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr,
- unsigned long new_addr, unsigned long old_end,
- pmd_t *old_pmd, pmd_t *new_pmd, bool *need_flush);
-extern int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
- unsigned long addr, pgprot_t newprot,
- int prot_numa);
-int vmf_insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr,
- pmd_t *pmd, pfn_t pfn, bool write);
-int vmf_insert_pfn_pud(struct vm_area_struct *vma, unsigned long addr,
- pud_t *pud, pfn_t pfn, bool write);
+vm_fault_t do_huge_pmd_wp_page(struct vm_fault *vmf);
+struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
+ unsigned long addr, pmd_t *pmd,
+ unsigned int flags);
+bool madvise_free_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
+ pmd_t *pmd, unsigned long addr, unsigned long next);
+int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, pmd_t *pmd,
+ unsigned long addr);
+int zap_huge_pud(struct mmu_gather *tlb, struct vm_area_struct *vma, pud_t *pud,
+ unsigned long addr);
+bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr,
+ unsigned long new_addr, pmd_t *old_pmd, pmd_t *new_pmd);
+int change_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
+ pmd_t *pmd, unsigned long addr, pgprot_t newprot,
+ unsigned long cp_flags);
+
+vm_fault_t vmf_insert_pfn_pmd(struct vm_fault *vmf, pfn_t pfn, bool write);
+vm_fault_t vmf_insert_pfn_pud(struct vm_fault *vmf, pfn_t pfn, bool write);
+
enum transparent_hugepage_flag {
+ TRANSPARENT_HUGEPAGE_UNSUPPORTED,
TRANSPARENT_HUGEPAGE_FLAG,
TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG,
@@ -58,21 +53,18 @@ enum transparent_hugepage_flag {
TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG,
TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG,
TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG,
-#ifdef CONFIG_DEBUG_VM
- TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG,
-#endif
};
struct kobject;
struct kobj_attribute;
-extern ssize_t single_hugepage_flag_store(struct kobject *kobj,
- struct kobj_attribute *attr,
- const char *buf, size_t count,
- enum transparent_hugepage_flag flag);
-extern ssize_t single_hugepage_flag_show(struct kobject *kobj,
- struct kobj_attribute *attr, char *buf,
- enum transparent_hugepage_flag flag);
+ssize_t single_hugepage_flag_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf, size_t count,
+ enum transparent_hugepage_flag flag);
+ssize_t single_hugepage_flag_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf,
+ enum transparent_hugepage_flag flag);
extern struct kobj_attribute shmem_enabled_attr;
#define HPAGE_PMD_ORDER (HPAGE_PMD_SHIFT-PAGE_SHIFT)
@@ -87,67 +79,88 @@ extern struct kobj_attribute shmem_enabled_attr;
#define HPAGE_PUD_SIZE ((1UL) << HPAGE_PUD_SHIFT)
#define HPAGE_PUD_MASK (~(HPAGE_PUD_SIZE - 1))
-extern bool is_vma_temporary_stack(struct vm_area_struct *vma);
-
extern unsigned long transparent_hugepage_flags;
-static inline bool transparent_hugepage_enabled(struct vm_area_struct *vma)
+#define hugepage_flags_enabled() \
+ (transparent_hugepage_flags & \
+ ((1<<TRANSPARENT_HUGEPAGE_FLAG) | \
+ (1<<TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG)))
+#define hugepage_flags_always() \
+ (transparent_hugepage_flags & \
+ (1<<TRANSPARENT_HUGEPAGE_FLAG))
+
+/*
+ * Do the below checks:
+ * - For file vma, check if the linear page offset of vma is
+ * HPAGE_PMD_NR aligned within the file. The hugepage is
+ * guaranteed to be hugepage-aligned within the file, but we must
+ * check that the PMD-aligned addresses in the VMA map to
+ * PMD-aligned offsets within the file, else the hugepage will
+ * not be PMD-mappable.
+ * - For all vmas, check if the haddr is in an aligned HPAGE_PMD_SIZE
+ * area.
+ */
+static inline bool transhuge_vma_suitable(struct vm_area_struct *vma,
+ unsigned long addr)
{
- if (vma->vm_flags & VM_NOHUGEPAGE)
- return false;
+ unsigned long haddr;
- if (is_vma_temporary_stack(vma))
- return false;
+ /* Don't have to check pgoff for anonymous vma */
+ if (!vma_is_anonymous(vma)) {
+ if (!IS_ALIGNED((vma->vm_start >> PAGE_SHIFT) - vma->vm_pgoff,
+ HPAGE_PMD_NR))
+ return false;
+ }
+
+ haddr = addr & HPAGE_PMD_MASK;
- if (test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags))
+ if (haddr < vma->vm_start || haddr + HPAGE_PMD_SIZE > vma->vm_end)
return false;
+ return true;
+}
- if (transparent_hugepage_flags & (1 << TRANSPARENT_HUGEPAGE_FLAG))
- return true;
+static inline bool file_thp_enabled(struct vm_area_struct *vma)
+{
+ struct inode *inode;
- if (vma_is_dax(vma))
- return true;
+ if (!vma->vm_file)
+ return false;
- if (transparent_hugepage_flags &
- (1 << TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG))
- return !!(vma->vm_flags & VM_HUGEPAGE);
+ inode = vma->vm_file->f_inode;
- return false;
+ return (IS_ENABLED(CONFIG_READ_ONLY_THP_FOR_FS)) &&
+ (vma->vm_flags & VM_EXEC) &&
+ !inode_is_open_for_write(inode) && S_ISREG(inode->i_mode);
}
+bool hugepage_vma_check(struct vm_area_struct *vma, unsigned long vm_flags,
+ bool smaps, bool in_pf, bool enforce_sysfs);
+
#define transparent_hugepage_use_zero_page() \
(transparent_hugepage_flags & \
(1<<TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG))
-#ifdef CONFIG_DEBUG_VM
-#define transparent_hugepage_debug_cow() \
- (transparent_hugepage_flags & \
- (1<<TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG))
-#else /* CONFIG_DEBUG_VM */
-#define transparent_hugepage_debug_cow() 0
-#endif /* CONFIG_DEBUG_VM */
-extern unsigned long thp_get_unmapped_area(struct file *filp,
- unsigned long addr, unsigned long len, unsigned long pgoff,
- unsigned long flags);
+unsigned long thp_get_unmapped_area(struct file *filp, unsigned long addr,
+ unsigned long len, unsigned long pgoff, unsigned long flags);
-extern void prep_transhuge_page(struct page *page);
-extern void free_transhuge_page(struct page *page);
+void prep_transhuge_page(struct page *page);
+void free_transhuge_page(struct page *page);
-bool can_split_huge_page(struct page *page, int *pextra_pins);
+bool can_split_folio(struct folio *folio, int *pextra_pins);
int split_huge_page_to_list(struct page *page, struct list_head *list);
static inline int split_huge_page(struct page *page)
{
return split_huge_page_to_list(page, NULL);
}
-void deferred_split_huge_page(struct page *page);
+void deferred_split_folio(struct folio *folio);
void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
- unsigned long address, bool freeze, struct page *page);
+ unsigned long address, bool freeze, struct folio *folio);
#define split_huge_pmd(__vma, __pmd, __address) \
do { \
pmd_t *____pmd = (__pmd); \
- if (pmd_trans_huge(*____pmd) \
+ if (is_swap_pmd(*____pmd) || pmd_trans_huge(*____pmd) \
|| pmd_devmap(*____pmd)) \
__split_huge_pmd(__vma, __pmd, __address, \
false, NULL); \
@@ -155,7 +168,7 @@ void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
void split_huge_pmd_address(struct vm_area_struct *vma, unsigned long address,
- bool freeze, struct page *page);
+ bool freeze, struct folio *folio);
void __split_huge_pud(struct vm_area_struct *vma, pud_t *pud,
unsigned long address);
@@ -168,22 +181,26 @@ void __split_huge_pud(struct vm_area_struct *vma, pud_t *pud,
__split_huge_pud(__vma, __pud, __address); \
} while (0)
-extern int hugepage_madvise(struct vm_area_struct *vma,
- unsigned long *vm_flags, int advice);
-extern void vma_adjust_trans_huge(struct vm_area_struct *vma,
- unsigned long start,
- unsigned long end,
- long adjust_next);
-extern spinlock_t *__pmd_trans_huge_lock(pmd_t *pmd,
- struct vm_area_struct *vma);
-extern spinlock_t *__pud_trans_huge_lock(pud_t *pud,
- struct vm_area_struct *vma);
-/* mmap_sem must be held on entry */
+int hugepage_madvise(struct vm_area_struct *vma, unsigned long *vm_flags,
+ int advice);
+int madvise_collapse(struct vm_area_struct *vma,
+ struct vm_area_struct **prev,
+ unsigned long start, unsigned long end);
+void vma_adjust_trans_huge(struct vm_area_struct *vma, unsigned long start,
+ unsigned long end, long adjust_next);
+spinlock_t *__pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma);
+spinlock_t *__pud_trans_huge_lock(pud_t *pud, struct vm_area_struct *vma);
+
+static inline int is_swap_pmd(pmd_t pmd)
+{
+ return !pmd_none(pmd) && !pmd_present(pmd);
+}
+
+/* mmap_lock must be held on entry */
static inline spinlock_t *pmd_trans_huge_lock(pmd_t *pmd,
struct vm_area_struct *vma)
{
- VM_BUG_ON_VMA(!rwsem_is_locked(&vma->vm_mm->mmap_sem), vma);
- if (pmd_trans_huge(*pmd) || pmd_devmap(*pmd))
+ if (is_swap_pmd(*pmd) || pmd_trans_huge(*pmd) || pmd_devmap(*pmd))
return __pmd_trans_huge_lock(pmd, vma);
else
return NULL;
@@ -191,36 +208,39 @@ static inline spinlock_t *pmd_trans_huge_lock(pmd_t *pmd,
static inline spinlock_t *pud_trans_huge_lock(pud_t *pud,
struct vm_area_struct *vma)
{
- VM_BUG_ON_VMA(!rwsem_is_locked(&vma->vm_mm->mmap_sem), vma);
if (pud_trans_huge(*pud) || pud_devmap(*pud))
return __pud_trans_huge_lock(pud, vma);
else
return NULL;
}
-static inline int hpage_nr_pages(struct page *page)
+
+/**
+ * folio_test_pmd_mappable - Can we map this folio with a PMD?
+ * @folio: The folio to test
+ */
+static inline bool folio_test_pmd_mappable(struct folio *folio)
{
- if (unlikely(PageTransHuge(page)))
- return HPAGE_PMD_NR;
- return 1;
+ return folio_order(folio) >= HPAGE_PMD_ORDER;
}
struct page *follow_devmap_pmd(struct vm_area_struct *vma, unsigned long addr,
- pmd_t *pmd, int flags);
+ pmd_t *pmd, int flags, struct dev_pagemap **pgmap);
struct page *follow_devmap_pud(struct vm_area_struct *vma, unsigned long addr,
- pud_t *pud, int flags);
+ pud_t *pud, int flags, struct dev_pagemap **pgmap);
-extern int do_huge_pmd_numa_page(struct vm_fault *vmf, pmd_t orig_pmd);
+vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf);
extern struct page *huge_zero_page;
+extern unsigned long huge_zero_pfn;
static inline bool is_huge_zero_page(struct page *page)
{
- return ACCESS_ONCE(huge_zero_page) == page;
+ return READ_ONCE(huge_zero_page) == page;
}
static inline bool is_huge_zero_pmd(pmd_t pmd)
{
- return is_huge_zero_page(pmd_page(pmd));
+ return pmd_present(pmd) && READ_ONCE(huge_zero_pfn) == pmd_pfn(pmd);
}
static inline bool is_huge_zero_pud(pud_t pud)
@@ -233,6 +253,11 @@ void mm_put_huge_zero_page(struct mm_struct *mm);
#define mk_huge_pmd(page, prot) pmd_mkhuge(mk_pmd(page, prot))
+static inline bool thp_migration_supported(void)
+{
+ return IS_ENABLED(CONFIG_ARCH_ENABLE_THP_MIGRATION);
+}
+
#else /* CONFIG_TRANSPARENT_HUGEPAGE */
#define HPAGE_PMD_SHIFT ({ BUILD_BUG(); 0; })
#define HPAGE_PMD_MASK ({ BUILD_BUG(); 0; })
@@ -242,9 +267,20 @@ void mm_put_huge_zero_page(struct mm_struct *mm);
#define HPAGE_PUD_MASK ({ BUILD_BUG(); 0; })
#define HPAGE_PUD_SIZE ({ BUILD_BUG(); 0; })
-#define hpage_nr_pages(x) 1
+static inline bool folio_test_pmd_mappable(struct folio *folio)
+{
+ return false;
+}
+
+static inline bool transhuge_vma_suitable(struct vm_area_struct *vma,
+ unsigned long addr)
+{
+ return false;
+}
-static inline bool transparent_hugepage_enabled(struct vm_area_struct *vma)
+static inline bool hugepage_vma_check(struct vm_area_struct *vma,
+ unsigned long vm_flags, bool smaps,
+ bool in_pf, bool enforce_sysfs)
{
return false;
}
@@ -256,9 +292,8 @@ static inline void prep_transhuge_page(struct page *page) {}
#define thp_get_unmapped_area NULL
static inline bool
-can_split_huge_page(struct page *page, int *pextra_pins)
+can_split_folio(struct folio *folio, int *pextra_pins)
{
- BUILD_BUG();
return false;
}
static inline int
@@ -270,14 +305,14 @@ static inline int split_huge_page(struct page *page)
{
return 0;
}
-static inline void deferred_split_huge_page(struct page *page) {}
+static inline void deferred_split_folio(struct folio *folio) {}
#define split_huge_pmd(__vma, __pmd, __address) \
do { } while (0)
static inline void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
- unsigned long address, bool freeze, struct page *page) {}
+ unsigned long address, bool freeze, struct folio *folio) {}
static inline void split_huge_pmd_address(struct vm_area_struct *vma,
- unsigned long address, bool freeze, struct page *page) {}
+ unsigned long address, bool freeze, struct folio *folio) {}
#define split_huge_pud(__vma, __pmd, __address) \
do { } while (0)
@@ -285,15 +320,26 @@ static inline void split_huge_pmd_address(struct vm_area_struct *vma,
static inline int hugepage_madvise(struct vm_area_struct *vma,
unsigned long *vm_flags, int advice)
{
- BUG();
- return 0;
+ return -EINVAL;
}
+
+static inline int madvise_collapse(struct vm_area_struct *vma,
+ struct vm_area_struct **prev,
+ unsigned long start, unsigned long end)
+{
+ return -EINVAL;
+}
+
static inline void vma_adjust_trans_huge(struct vm_area_struct *vma,
unsigned long start,
unsigned long end,
long adjust_next)
{
}
+static inline int is_swap_pmd(pmd_t pmd)
+{
+ return 0;
+}
static inline spinlock_t *pmd_trans_huge_lock(pmd_t *pmd,
struct vm_area_struct *vma)
{
@@ -305,7 +351,7 @@ static inline spinlock_t *pud_trans_huge_lock(pud_t *pud,
return NULL;
}
-static inline int do_huge_pmd_numa_page(struct vm_fault *vmf, pmd_t orig_pmd)
+static inline vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf)
{
return 0;
}
@@ -315,6 +361,11 @@ static inline bool is_huge_zero_page(struct page *page)
return false;
}
+static inline bool is_huge_zero_pmd(pmd_t pmd)
+{
+ return false;
+}
+
static inline bool is_huge_zero_pud(pud_t pud)
{
return false;
@@ -326,16 +377,44 @@ static inline void mm_put_huge_zero_page(struct mm_struct *mm)
}
static inline struct page *follow_devmap_pmd(struct vm_area_struct *vma,
- unsigned long addr, pmd_t *pmd, int flags)
+ unsigned long addr, pmd_t *pmd, int flags, struct dev_pagemap **pgmap)
{
return NULL;
}
static inline struct page *follow_devmap_pud(struct vm_area_struct *vma,
- unsigned long addr, pud_t *pud, int flags)
+ unsigned long addr, pud_t *pud, int flags, struct dev_pagemap **pgmap)
{
return NULL;
}
+
+static inline bool thp_migration_supported(void)
+{
+ return false;
+}
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
+static inline int split_folio_to_list(struct folio *folio,
+ struct list_head *list)
+{
+ return split_huge_page_to_list(&folio->page, list);
+}
+
+static inline int split_folio(struct folio *folio)
+{
+ return split_folio_to_list(folio, NULL);
+}
+
+/*
+ * archs that select ARCH_WANTS_THP_SWAP but don't support THP_SWP due to
+ * limitations in the implementation like arm64 MTE can override this to
+ * false
+ */
+#ifndef arch_thp_swp_supported
+static inline bool arch_thp_swp_supported(void)
+{
+ return true;
+}
+#endif
+
#endif /* _LINUX_HUGE_MM_H */
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index 0ed8e41aaf11..6d041aa9f0fe 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -1,59 +1,54 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_HUGETLB_H
#define _LINUX_HUGETLB_H
+#include <linux/mm.h>
#include <linux/mm_types.h>
#include <linux/mmdebug.h>
#include <linux/fs.h>
#include <linux/hugetlb_inline.h>
#include <linux/cgroup.h>
+#include <linux/page_ref.h>
#include <linux/list.h>
#include <linux/kref.h>
-#include <asm/pgtable.h>
+#include <linux/pgtable.h>
+#include <linux/gfp.h>
+#include <linux/userfaultfd_k.h>
struct ctl_table;
struct user_struct;
struct mmu_gather;
+struct node;
-#ifndef is_hugepd
-/*
- * Some architectures requires a hugepage directory format that is
- * required to support multiple hugepage sizes. For example
- * a4fe3ce76 "powerpc/mm: Allow more flexible layouts for hugepage pagetables"
- * introduced the same on powerpc. This allows for a more flexible hugepage
- * pagetable layout.
- */
+#ifndef CONFIG_ARCH_HAS_HUGEPD
typedef struct { unsigned long pd; } hugepd_t;
#define is_hugepd(hugepd) (0)
#define __hugepd(x) ((hugepd_t) { (x) })
-static inline int gup_huge_pd(hugepd_t hugepd, unsigned long addr,
- unsigned pdshift, unsigned long end,
- int write, struct page **pages, int *nr)
-{
- return 0;
-}
-#else
-extern int gup_huge_pd(hugepd_t hugepd, unsigned long addr,
- unsigned pdshift, unsigned long end,
- int write, struct page **pages, int *nr);
#endif
-
#ifdef CONFIG_HUGETLB_PAGE
#include <linux/mempolicy.h>
#include <linux/shm.h>
#include <asm/tlbflush.h>
+/*
+ * For HugeTLB page, there are more metadata to save in the struct page. But
+ * the head struct page cannot meet our needs, so we have to abuse other tail
+ * struct page to store the metadata.
+ */
+#define __NR_USED_SUBPAGE 3
+
struct hugepage_subpool {
spinlock_t lock;
long count;
long max_hpages; /* Maximum huge pages or -1 if no maximum. */
long used_hpages; /* Used count against maximum, includes */
- /* both alloced and reserved pages. */
+ /* both allocated and reserved pages. */
struct hstate *hstate;
long min_hpages; /* Minimum huge pages or -1 if no minimum. */
long rsv_hpages; /* Pages reserved against global pool to */
- /* sasitfy minimum size. */
+ /* satisfy minimum size. */
};
struct resv_map {
@@ -63,7 +58,58 @@ struct resv_map {
long adds_in_progress;
struct list_head region_cache;
long region_cache_count;
+#ifdef CONFIG_CGROUP_HUGETLB
+ /*
+ * On private mappings, the counter to uncharge reservations is stored
+ * here. If these fields are 0, then either the mapping is shared, or
+ * cgroup accounting is disabled for this resv_map.
+ */
+ struct page_counter *reservation_counter;
+ unsigned long pages_per_hpage;
+ struct cgroup_subsys_state *css;
+#endif
+};
+
+/*
+ * Region tracking -- allows tracking of reservations and instantiated pages
+ * across the pages in a mapping.
+ *
+ * The region data structures are embedded into a resv_map and protected
+ * by a resv_map's lock. The set of regions within the resv_map represent
+ * reservations for huge pages, or huge pages that have already been
+ * instantiated within the map. The from and to elements are huge page
+ * indices into the associated mapping. from indicates the starting index
+ * of the region. to represents the first index past the end of the region.
+ *
+ * For example, a file region structure with from == 0 and to == 4 represents
+ * four huge pages in a mapping. It is important to note that the to element
+ * represents the first element past the end of the region. This is used in
+ * arithmetic as 4(to) - 0(from) = 4 huge pages in the region.
+ *
+ * Interval notation of the form [from, to) will be used to indicate that
+ * the endpoint from is inclusive and to is exclusive.
+ */
+struct file_region {
+ struct list_head link;
+ long from;
+ long to;
+#ifdef CONFIG_CGROUP_HUGETLB
+ /*
+ * On shared mappings, each reserved region appears as a struct
+ * file_region in resv_map. These fields hold the info needed to
+ * uncharge each reservation.
+ */
+ struct page_counter *reservation_counter;
+ struct cgroup_subsys_state *css;
+#endif
+};
+
+struct hugetlb_vma_lock {
+ struct kref refs;
+ struct rw_semaphore rw_sema;
+ struct vm_area_struct *vma;
};
+
extern struct resv_map *resv_map_alloc(void);
void resv_map_release(struct kref *ref);
@@ -76,90 +122,155 @@ struct hugepage_subpool *hugepage_new_subpool(struct hstate *h, long max_hpages,
long min_hpages);
void hugepage_put_subpool(struct hugepage_subpool *spool);
-void reset_vma_resv_huge_pages(struct vm_area_struct *vma);
-int hugetlb_sysctl_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *);
-int hugetlb_overcommit_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *);
-int hugetlb_treat_movable_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *);
-
-#ifdef CONFIG_NUMA
-int hugetlb_mempolicy_sysctl_handler(struct ctl_table *, int,
- void __user *, size_t *, loff_t *);
-#endif
-
-int copy_hugetlb_page_range(struct mm_struct *, struct mm_struct *, struct vm_area_struct *);
+void hugetlb_dup_vma_private(struct vm_area_struct *vma);
+void clear_vma_resv_huge_pages(struct vm_area_struct *vma);
+int move_hugetlb_page_tables(struct vm_area_struct *vma,
+ struct vm_area_struct *new_vma,
+ unsigned long old_addr, unsigned long new_addr,
+ unsigned long len);
+int copy_hugetlb_page_range(struct mm_struct *, struct mm_struct *,
+ struct vm_area_struct *, struct vm_area_struct *);
+struct page *hugetlb_follow_page_mask(struct vm_area_struct *vma,
+ unsigned long address, unsigned int flags);
long follow_hugetlb_page(struct mm_struct *, struct vm_area_struct *,
struct page **, struct vm_area_struct **,
unsigned long *, unsigned long *, long, unsigned int,
int *);
void unmap_hugepage_range(struct vm_area_struct *,
- unsigned long, unsigned long, struct page *);
+ unsigned long, unsigned long, struct page *,
+ zap_flags_t);
void __unmap_hugepage_range_final(struct mmu_gather *tlb,
struct vm_area_struct *vma,
unsigned long start, unsigned long end,
- struct page *ref_page);
-void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
- unsigned long start, unsigned long end,
- struct page *ref_page);
+ struct page *ref_page, zap_flags_t zap_flags);
void hugetlb_report_meminfo(struct seq_file *);
-int hugetlb_report_node_meminfo(int, char *);
-void hugetlb_show_meminfo(void);
+int hugetlb_report_node_meminfo(char *buf, int len, int nid);
+void hugetlb_show_meminfo_node(int nid);
unsigned long hugetlb_total_pages(void);
-int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
+vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
unsigned long address, unsigned int flags);
-int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm, pte_t *dst_pte,
- struct vm_area_struct *dst_vma,
- unsigned long dst_addr,
- unsigned long src_addr,
- struct page **pagep);
-int hugetlb_reserve_pages(struct inode *inode, long from, long to,
+#ifdef CONFIG_USERFAULTFD
+int hugetlb_mfill_atomic_pte(pte_t *dst_pte,
+ struct vm_area_struct *dst_vma,
+ unsigned long dst_addr,
+ unsigned long src_addr,
+ uffd_flags_t flags,
+ struct folio **foliop);
+#endif /* CONFIG_USERFAULTFD */
+bool hugetlb_reserve_pages(struct inode *inode, long from, long to,
struct vm_area_struct *vma,
vm_flags_t vm_flags);
long hugetlb_unreserve_pages(struct inode *inode, long start, long end,
long freed);
-bool isolate_huge_page(struct page *page, struct list_head *list);
-void putback_active_hugepage(struct page *page);
+bool isolate_hugetlb(struct folio *folio, struct list_head *list);
+int get_hwpoison_hugetlb_folio(struct folio *folio, bool *hugetlb, bool unpoison);
+int get_huge_page_for_hwpoison(unsigned long pfn, int flags,
+ bool *migratable_cleared);
+void folio_putback_active_hugetlb(struct folio *folio);
+void move_hugetlb_state(struct folio *old_folio, struct folio *new_folio, int reason);
void free_huge_page(struct page *page);
void hugetlb_fix_reserve_counts(struct inode *inode);
extern struct mutex *hugetlb_fault_mutex_table;
-u32 hugetlb_fault_mutex_hash(struct hstate *h, struct mm_struct *mm,
- struct vm_area_struct *vma,
- struct address_space *mapping,
- pgoff_t idx, unsigned long address);
+u32 hugetlb_fault_mutex_hash(struct address_space *mapping, pgoff_t idx);
+
+pte_t *huge_pmd_share(struct mm_struct *mm, struct vm_area_struct *vma,
+ unsigned long addr, pud_t *pud);
-pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud);
+struct address_space *hugetlb_page_mapping_lock_write(struct page *hpage);
-extern int hugepages_treat_as_movable;
extern int sysctl_hugetlb_shm_group;
extern struct list_head huge_boot_pages;
/* arch callbacks */
-pte_t *huge_pte_alloc(struct mm_struct *mm,
+#ifndef CONFIG_HIGHPTE
+/*
+ * pte_offset_huge() and pte_alloc_huge() are helpers for those architectures
+ * which may go down to the lowest PTE level in their huge_pte_offset() and
+ * huge_pte_alloc(): to avoid reliance on pte_offset_map() without pte_unmap().
+ */
+static inline pte_t *pte_offset_huge(pmd_t *pmd, unsigned long address)
+{
+ return pte_offset_kernel(pmd, address);
+}
+static inline pte_t *pte_alloc_huge(struct mm_struct *mm, pmd_t *pmd,
+ unsigned long address)
+{
+ return pte_alloc(mm, pmd) ? NULL : pte_offset_huge(pmd, address);
+}
+#endif
+
+pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
unsigned long addr, unsigned long sz);
+/*
+ * huge_pte_offset(): Walk the hugetlb pgtable until the last level PTE.
+ * Returns the pte_t* if found, or NULL if the address is not mapped.
+ *
+ * IMPORTANT: we should normally not directly call this function, instead
+ * this is only a common interface to implement arch-specific
+ * walker. Please use hugetlb_walk() instead, because that will attempt to
+ * verify the locking for you.
+ *
+ * Since this function will walk all the pgtable pages (including not only
+ * high-level pgtable page, but also PUD entry that can be unshared
+ * concurrently for VM_SHARED), the caller of this function should be
+ * responsible of its thread safety. One can follow this rule:
+ *
+ * (1) For private mappings: pmd unsharing is not possible, so holding the
+ * mmap_lock for either read or write is sufficient. Most callers
+ * already hold the mmap_lock, so normally, no special action is
+ * required.
+ *
+ * (2) For shared mappings: pmd unsharing is possible (so the PUD-ranged
+ * pgtable page can go away from under us! It can be done by a pmd
+ * unshare with a follow up munmap() on the other process), then we
+ * need either:
+ *
+ * (2.1) hugetlb vma lock read or write held, to make sure pmd unshare
+ * won't happen upon the range (it also makes sure the pte_t we
+ * read is the right and stable one), or,
+ *
+ * (2.2) hugetlb mapping i_mmap_rwsem lock held read or write, to make
+ * sure even if unshare happened the racy unmap() will wait until
+ * i_mmap_rwsem is released.
+ *
+ * Option (2.1) is the safest, which guarantees pte stability from pmd
+ * sharing pov, until the vma lock released. Option (2.2) doesn't protect
+ * a concurrent pmd unshare, but it makes sure the pgtable page is safe to
+ * access.
+ */
pte_t *huge_pte_offset(struct mm_struct *mm,
unsigned long addr, unsigned long sz);
-int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep);
-struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address,
- int write);
-struct page *follow_huge_pd(struct vm_area_struct *vma,
- unsigned long address, hugepd_t hpd,
- int flags, int pdshift);
-struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
- pmd_t *pmd, int flags);
-struct page *follow_huge_pud(struct mm_struct *mm, unsigned long address,
- pud_t *pud, int flags);
-struct page *follow_huge_pgd(struct mm_struct *mm, unsigned long address,
- pgd_t *pgd, int flags);
+unsigned long hugetlb_mask_last_page(struct hstate *h);
+int huge_pmd_unshare(struct mm_struct *mm, struct vm_area_struct *vma,
+ unsigned long addr, pte_t *ptep);
+void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma,
+ unsigned long *start, unsigned long *end);
+
+void hugetlb_vma_lock_read(struct vm_area_struct *vma);
+void hugetlb_vma_unlock_read(struct vm_area_struct *vma);
+void hugetlb_vma_lock_write(struct vm_area_struct *vma);
+void hugetlb_vma_unlock_write(struct vm_area_struct *vma);
+int hugetlb_vma_trylock_write(struct vm_area_struct *vma);
+void hugetlb_vma_assert_locked(struct vm_area_struct *vma);
+void hugetlb_vma_lock_release(struct kref *kref);
int pmd_huge(pmd_t pmd);
int pud_huge(pud_t pud);
-unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
- unsigned long address, unsigned long end, pgprot_t newprot);
+long hugetlb_change_protection(struct vm_area_struct *vma,
+ unsigned long address, unsigned long end, pgprot_t newprot,
+ unsigned long cp_flags);
bool is_hugetlb_entry_migration(pte_t pte);
+void hugetlb_unshare_all_pmds(struct vm_area_struct *vma);
+
#else /* !CONFIG_HUGETLB_PAGE */
-static inline void reset_vma_resv_huge_pages(struct vm_area_struct *vma)
+static inline void hugetlb_dup_vma_private(struct vm_area_struct *vma)
+{
+}
+
+static inline void clear_vma_resv_huge_pages(struct vm_area_struct *vma)
{
}
@@ -168,56 +279,197 @@ static inline unsigned long hugetlb_total_pages(void)
return 0;
}
-#define follow_hugetlb_page(m,v,p,vs,a,b,i,w,n) ({ BUG(); 0; })
-#define follow_huge_addr(mm, addr, write) ERR_PTR(-EINVAL)
-#define copy_hugetlb_page_range(src, dst, vma) ({ BUG(); 0; })
+static inline struct address_space *hugetlb_page_mapping_lock_write(
+ struct page *hpage)
+{
+ return NULL;
+}
+
+static inline int huge_pmd_unshare(struct mm_struct *mm,
+ struct vm_area_struct *vma,
+ unsigned long addr, pte_t *ptep)
+{
+ return 0;
+}
+
+static inline void adjust_range_if_pmd_sharing_possible(
+ struct vm_area_struct *vma,
+ unsigned long *start, unsigned long *end)
+{
+}
+
+static inline struct page *hugetlb_follow_page_mask(struct vm_area_struct *vma,
+ unsigned long address, unsigned int flags)
+{
+ BUILD_BUG(); /* should never be compiled in if !CONFIG_HUGETLB_PAGE*/
+}
+
+static inline long follow_hugetlb_page(struct mm_struct *mm,
+ struct vm_area_struct *vma, struct page **pages,
+ struct vm_area_struct **vmas, unsigned long *position,
+ unsigned long *nr_pages, long i, unsigned int flags,
+ int *nonblocking)
+{
+ BUG();
+ return 0;
+}
+
+static inline int copy_hugetlb_page_range(struct mm_struct *dst,
+ struct mm_struct *src,
+ struct vm_area_struct *dst_vma,
+ struct vm_area_struct *src_vma)
+{
+ BUG();
+ return 0;
+}
+
+static inline int move_hugetlb_page_tables(struct vm_area_struct *vma,
+ struct vm_area_struct *new_vma,
+ unsigned long old_addr,
+ unsigned long new_addr,
+ unsigned long len)
+{
+ BUG();
+ return 0;
+}
+
static inline void hugetlb_report_meminfo(struct seq_file *m)
{
}
-#define hugetlb_report_node_meminfo(n, buf) 0
-static inline void hugetlb_show_meminfo(void)
+
+static inline int hugetlb_report_node_meminfo(char *buf, int len, int nid)
+{
+ return 0;
+}
+
+static inline void hugetlb_show_meminfo_node(int nid)
+{
+}
+
+static inline int prepare_hugepage_range(struct file *file,
+ unsigned long addr, unsigned long len)
+{
+ return -EINVAL;
+}
+
+static inline void hugetlb_vma_lock_read(struct vm_area_struct *vma)
+{
+}
+
+static inline void hugetlb_vma_unlock_read(struct vm_area_struct *vma)
+{
+}
+
+static inline void hugetlb_vma_lock_write(struct vm_area_struct *vma)
+{
+}
+
+static inline void hugetlb_vma_unlock_write(struct vm_area_struct *vma)
{
}
-#define follow_huge_pd(vma, addr, hpd, flags, pdshift) NULL
-#define follow_huge_pmd(mm, addr, pmd, flags) NULL
-#define follow_huge_pud(mm, addr, pud, flags) NULL
-#define follow_huge_pgd(mm, addr, pgd, flags) NULL
-#define prepare_hugepage_range(file, addr, len) (-EINVAL)
-#define pmd_huge(x) 0
-#define pud_huge(x) 0
-#define is_hugepage_only_range(mm, addr, len) 0
-#define hugetlb_free_pgd_range(tlb, addr, end, floor, ceiling) ({BUG(); 0; })
-#define hugetlb_fault(mm, vma, addr, flags) ({ BUG(); 0; })
-#define hugetlb_mcopy_atomic_pte(dst_mm, dst_pte, dst_vma, dst_addr, \
- src_addr, pagep) ({ BUG(); 0; })
-#define huge_pte_offset(mm, address, sz) 0
-static inline bool isolate_huge_page(struct page *page, struct list_head *list)
+static inline int hugetlb_vma_trylock_write(struct vm_area_struct *vma)
+{
+ return 1;
+}
+
+static inline void hugetlb_vma_assert_locked(struct vm_area_struct *vma)
+{
+}
+
+static inline int pmd_huge(pmd_t pmd)
+{
+ return 0;
+}
+
+static inline int pud_huge(pud_t pud)
+{
+ return 0;
+}
+
+static inline int is_hugepage_only_range(struct mm_struct *mm,
+ unsigned long addr, unsigned long len)
+{
+ return 0;
+}
+
+static inline void hugetlb_free_pgd_range(struct mmu_gather *tlb,
+ unsigned long addr, unsigned long end,
+ unsigned long floor, unsigned long ceiling)
+{
+ BUG();
+}
+
+#ifdef CONFIG_USERFAULTFD
+static inline int hugetlb_mfill_atomic_pte(pte_t *dst_pte,
+ struct vm_area_struct *dst_vma,
+ unsigned long dst_addr,
+ unsigned long src_addr,
+ uffd_flags_t flags,
+ struct folio **foliop)
+{
+ BUG();
+ return 0;
+}
+#endif /* CONFIG_USERFAULTFD */
+
+static inline pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr,
+ unsigned long sz)
+{
+ return NULL;
+}
+
+static inline bool isolate_hugetlb(struct folio *folio, struct list_head *list)
{
return false;
}
-#define putback_active_hugepage(p) do {} while (0)
-static inline unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
- unsigned long address, unsigned long end, pgprot_t newprot)
+static inline int get_hwpoison_hugetlb_folio(struct folio *folio, bool *hugetlb, bool unpoison)
+{
+ return 0;
+}
+
+static inline int get_huge_page_for_hwpoison(unsigned long pfn, int flags,
+ bool *migratable_cleared)
+{
+ return 0;
+}
+
+static inline void folio_putback_active_hugetlb(struct folio *folio)
+{
+}
+
+static inline void move_hugetlb_state(struct folio *old_folio,
+ struct folio *new_folio, int reason)
+{
+}
+
+static inline long hugetlb_change_protection(
+ struct vm_area_struct *vma, unsigned long address,
+ unsigned long end, pgprot_t newprot,
+ unsigned long cp_flags)
{
return 0;
}
static inline void __unmap_hugepage_range_final(struct mmu_gather *tlb,
struct vm_area_struct *vma, unsigned long start,
- unsigned long end, struct page *ref_page)
+ unsigned long end, struct page *ref_page,
+ zap_flags_t zap_flags)
{
BUG();
}
-static inline void __unmap_hugepage_range(struct mmu_gather *tlb,
- struct vm_area_struct *vma, unsigned long start,
- unsigned long end, struct page *ref_page)
+static inline vm_fault_t hugetlb_fault(struct mm_struct *mm,
+ struct vm_area_struct *vma, unsigned long address,
+ unsigned int flags)
{
BUG();
+ return 0;
}
+static inline void hugetlb_unshare_all_pmds(struct vm_area_struct *vma) { }
+
#endif /* !CONFIG_HUGETLB_PAGE */
/*
* hugepages at page global directory. If arch support
@@ -238,14 +490,6 @@ static inline int pgd_write(pgd_t pgd)
}
#endif
-#ifndef pud_write
-static inline int pud_write(pud_t pud)
-{
- BUG();
- return 0;
-}
-#endif
-
#define HUGETLB_ANON_FILE "anon_hugepage"
enum {
@@ -278,11 +522,21 @@ static inline struct hugetlbfs_sb_info *HUGETLBFS_SB(struct super_block *sb)
return sb->s_fs_info;
}
+struct hugetlbfs_inode_info {
+ struct shared_policy policy;
+ struct inode vfs_inode;
+ unsigned int seals;
+};
+
+static inline struct hugetlbfs_inode_info *HUGETLBFS_I(struct inode *inode)
+{
+ return container_of(inode, struct hugetlbfs_inode_info, vfs_inode);
+}
+
extern const struct file_operations hugetlbfs_file_operations;
extern const struct vm_operations_struct hugetlb_vm_ops;
struct file *hugetlb_file_setup(const char *name, size_t size, vm_flags_t acct,
- struct user_struct **user, int creat_flags,
- int page_size_log);
+ int creat_flags, int page_size_log);
static inline bool is_file_hugepages(struct file *file)
{
@@ -292,18 +546,24 @@ static inline bool is_file_hugepages(struct file *file)
return is_file_shm_hugepages(file);
}
-
+static inline struct hstate *hstate_inode(struct inode *i)
+{
+ return HUGETLBFS_SB(i->i_sb)->hstate;
+}
#else /* !CONFIG_HUGETLBFS */
#define is_file_hugepages(file) false
static inline struct file *
hugetlb_file_setup(const char *name, size_t size, vm_flags_t acctflag,
- struct user_struct **user, int creat_flags,
- int page_size_log)
+ int creat_flags, int page_size_log)
{
return ERR_PTR(-ENOSYS);
}
+static inline struct hstate *hstate_inode(struct inode *i)
+{
+ return NULL;
+}
#endif /* !CONFIG_HUGETLBFS */
#ifdef HAVE_ARCH_HUGETLB_UNMAPPED_AREA
@@ -312,14 +572,130 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
unsigned long flags);
#endif /* HAVE_ARCH_HUGETLB_UNMAPPED_AREA */
+unsigned long
+generic_hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
+ unsigned long len, unsigned long pgoff,
+ unsigned long flags);
+
+/*
+ * huegtlb page specific state flags. These flags are located in page.private
+ * of the hugetlb head page. Functions created via the below macros should be
+ * used to manipulate these flags.
+ *
+ * HPG_restore_reserve - Set when a hugetlb page consumes a reservation at
+ * allocation time. Cleared when page is fully instantiated. Free
+ * routine checks flag to restore a reservation on error paths.
+ * Synchronization: Examined or modified by code that knows it has
+ * the only reference to page. i.e. After allocation but before use
+ * or when the page is being freed.
+ * HPG_migratable - Set after a newly allocated page is added to the page
+ * cache and/or page tables. Indicates the page is a candidate for
+ * migration.
+ * Synchronization: Initially set after new page allocation with no
+ * locking. When examined and modified during migration processing
+ * (isolate, migrate, putback) the hugetlb_lock is held.
+ * HPG_temporary - Set on a page that is temporarily allocated from the buddy
+ * allocator. Typically used for migration target pages when no pages
+ * are available in the pool. The hugetlb free page path will
+ * immediately free pages with this flag set to the buddy allocator.
+ * Synchronization: Can be set after huge page allocation from buddy when
+ * code knows it has only reference. All other examinations and
+ * modifications require hugetlb_lock.
+ * HPG_freed - Set when page is on the free lists.
+ * Synchronization: hugetlb_lock held for examination and modification.
+ * HPG_vmemmap_optimized - Set when the vmemmap pages of the page are freed.
+ * HPG_raw_hwp_unreliable - Set when the hugetlb page has a hwpoison sub-page
+ * that is not tracked by raw_hwp_page list.
+ */
+enum hugetlb_page_flags {
+ HPG_restore_reserve = 0,
+ HPG_migratable,
+ HPG_temporary,
+ HPG_freed,
+ HPG_vmemmap_optimized,
+ HPG_raw_hwp_unreliable,
+ __NR_HPAGEFLAGS,
+};
+
+/*
+ * Macros to create test, set and clear function definitions for
+ * hugetlb specific page flags.
+ */
+#ifdef CONFIG_HUGETLB_PAGE
+#define TESTHPAGEFLAG(uname, flname) \
+static __always_inline \
+bool folio_test_hugetlb_##flname(struct folio *folio) \
+ { void *private = &folio->private; \
+ return test_bit(HPG_##flname, private); \
+ } \
+static inline int HPage##uname(struct page *page) \
+ { return test_bit(HPG_##flname, &(page->private)); }
+
+#define SETHPAGEFLAG(uname, flname) \
+static __always_inline \
+void folio_set_hugetlb_##flname(struct folio *folio) \
+ { void *private = &folio->private; \
+ set_bit(HPG_##flname, private); \
+ } \
+static inline void SetHPage##uname(struct page *page) \
+ { set_bit(HPG_##flname, &(page->private)); }
+
+#define CLEARHPAGEFLAG(uname, flname) \
+static __always_inline \
+void folio_clear_hugetlb_##flname(struct folio *folio) \
+ { void *private = &folio->private; \
+ clear_bit(HPG_##flname, private); \
+ } \
+static inline void ClearHPage##uname(struct page *page) \
+ { clear_bit(HPG_##flname, &(page->private)); }
+#else
+#define TESTHPAGEFLAG(uname, flname) \
+static inline bool \
+folio_test_hugetlb_##flname(struct folio *folio) \
+ { return 0; } \
+static inline int HPage##uname(struct page *page) \
+ { return 0; }
+
+#define SETHPAGEFLAG(uname, flname) \
+static inline void \
+folio_set_hugetlb_##flname(struct folio *folio) \
+ { } \
+static inline void SetHPage##uname(struct page *page) \
+ { }
+
+#define CLEARHPAGEFLAG(uname, flname) \
+static inline void \
+folio_clear_hugetlb_##flname(struct folio *folio) \
+ { } \
+static inline void ClearHPage##uname(struct page *page) \
+ { }
+#endif
+
+#define HPAGEFLAG(uname, flname) \
+ TESTHPAGEFLAG(uname, flname) \
+ SETHPAGEFLAG(uname, flname) \
+ CLEARHPAGEFLAG(uname, flname) \
+
+/*
+ * Create functions associated with hugetlb page flags
+ */
+HPAGEFLAG(RestoreReserve, restore_reserve)
+HPAGEFLAG(Migratable, migratable)
+HPAGEFLAG(Temporary, temporary)
+HPAGEFLAG(Freed, freed)
+HPAGEFLAG(VmemmapOptimized, vmemmap_optimized)
+HPAGEFLAG(RawHwpUnreliable, raw_hwp_unreliable)
+
#ifdef CONFIG_HUGETLB_PAGE
#define HSTATE_NAME_LEN 32
/* Defines one hugetlb page size */
struct hstate {
+ struct mutex resize_lock;
int next_nid_to_alloc;
int next_nid_to_free;
unsigned int order;
+ unsigned int demote_order;
unsigned long mask;
unsigned long max_huge_pages;
unsigned long nr_huge_pages;
@@ -329,12 +705,14 @@ struct hstate {
unsigned long nr_overcommit_huge_pages;
struct list_head hugepage_activelist;
struct list_head hugepage_freelists[MAX_NUMNODES];
+ unsigned int max_huge_pages_node[MAX_NUMNODES];
unsigned int nr_huge_pages_node[MAX_NUMNODES];
unsigned int free_huge_pages_node[MAX_NUMNODES];
unsigned int surplus_huge_pages_node[MAX_NUMNODES];
#ifdef CONFIG_CGROUP_HUGETLB
/* cgroup control files */
- struct cftype cgroup_files[5];
+ struct cftype cgroup_files_dfl[8];
+ struct cftype cgroup_files_legacy[10];
#endif
char name[HSTATE_NAME_LEN];
};
@@ -342,26 +720,27 @@ struct hstate {
struct huge_bootmem_page {
struct list_head list;
struct hstate *hstate;
-#ifdef CONFIG_HIGHMEM
- phys_addr_t phys;
-#endif
};
-struct page *alloc_huge_page(struct vm_area_struct *vma,
+int isolate_or_dissolve_huge_page(struct page *page, struct list_head *list);
+struct folio *alloc_hugetlb_folio(struct vm_area_struct *vma,
unsigned long addr, int avoid_reserve);
-struct page *alloc_huge_page_node(struct hstate *h, int nid);
-struct page *alloc_huge_page_noerr(struct vm_area_struct *vma,
- unsigned long addr, int avoid_reserve);
-struct page *alloc_huge_page_nodemask(struct hstate *h, int preferred_nid,
- nodemask_t *nmask);
-int huge_add_to_page_cache(struct page *page, struct address_space *mapping,
+struct folio *alloc_hugetlb_folio_nodemask(struct hstate *h, int preferred_nid,
+ nodemask_t *nmask, gfp_t gfp_mask);
+struct folio *alloc_hugetlb_folio_vma(struct hstate *h, struct vm_area_struct *vma,
+ unsigned long address);
+int hugetlb_add_to_page_cache(struct folio *folio, struct address_space *mapping,
pgoff_t idx);
+void restore_reserve_on_error(struct hstate *h, struct vm_area_struct *vma,
+ unsigned long address, struct folio *folio);
/* arch callback */
-int __init alloc_bootmem_huge_page(struct hstate *h);
+int __init __alloc_bootmem_huge_page(struct hstate *h, int nid);
+int __init alloc_bootmem_huge_page(struct hstate *h, int nid);
+bool __init hugetlb_node_alloc_supported(void);
-void __init hugetlb_bad_size(void);
void __init hugetlb_add_hstate(unsigned order);
+bool __init arch_hugetlb_valid_size(unsigned long size);
struct hstate *size_to_hstate(unsigned long size);
#ifndef HUGE_MAX_HSTATE
@@ -373,9 +752,29 @@ extern unsigned int default_hstate_idx;
#define default_hstate (hstates[default_hstate_idx])
-static inline struct hstate *hstate_inode(struct inode *i)
+static inline struct hugepage_subpool *hugetlb_folio_subpool(struct folio *folio)
{
- return HUGETLBFS_SB(i->i_sb)->hstate;
+ return folio->_hugetlb_subpool;
+}
+
+/*
+ * hugetlb page subpool pointer located in hpage[2].hugetlb_subpool
+ */
+static inline struct hugepage_subpool *hugetlb_page_subpool(struct page *hpage)
+{
+ return hugetlb_folio_subpool(page_folio(hpage));
+}
+
+static inline void hugetlb_set_folio_subpool(struct folio *folio,
+ struct hugepage_subpool *subpool)
+{
+ folio->_hugetlb_subpool = subpool;
+}
+
+static inline void hugetlb_set_page_subpool(struct page *hpage,
+ struct hugepage_subpool *subpool)
+{
+ hugetlb_set_folio_subpool(page_folio(hpage), subpool);
}
static inline struct hstate *hstate_file(struct file *f)
@@ -388,7 +787,10 @@ static inline struct hstate *hstate_sizelog(int page_size_log)
if (!page_size_log)
return &default_hstate;
- return size_to_hstate(1UL << page_size_log);
+ if (page_size_log < BITS_PER_LONG)
+ return size_to_hstate(1UL << page_size_log);
+
+ return NULL;
}
static inline struct hstate *hstate_vma(struct vm_area_struct *vma)
@@ -396,7 +798,7 @@ static inline struct hstate *hstate_vma(struct vm_area_struct *vma)
return hstate_file(vma->vm_file);
}
-static inline unsigned long huge_page_size(struct hstate *h)
+static inline unsigned long huge_page_size(const struct hstate *h)
{
return (unsigned long)PAGE_SIZE << h->order;
}
@@ -422,10 +824,10 @@ static inline unsigned huge_page_shift(struct hstate *h)
static inline bool hstate_is_gigantic(struct hstate *h)
{
- return huge_page_order(h) >= MAX_ORDER;
+ return huge_page_order(h) > MAX_ORDER;
}
-static inline unsigned int pages_per_huge_page(struct hstate *h)
+static inline unsigned int pages_per_huge_page(const struct hstate *h)
{
return 1 << h->order;
}
@@ -437,18 +839,37 @@ static inline unsigned int blocks_per_huge_page(struct hstate *h)
#include <asm/hugetlb.h>
+#ifndef is_hugepage_only_range
+static inline int is_hugepage_only_range(struct mm_struct *mm,
+ unsigned long addr, unsigned long len)
+{
+ return 0;
+}
+#define is_hugepage_only_range is_hugepage_only_range
+#endif
+
+#ifndef arch_clear_hugepage_flags
+static inline void arch_clear_hugepage_flags(struct page *page) { }
+#define arch_clear_hugepage_flags arch_clear_hugepage_flags
+#endif
+
#ifndef arch_make_huge_pte
-static inline pte_t arch_make_huge_pte(pte_t entry, struct vm_area_struct *vma,
- struct page *page, int writable)
+static inline pte_t arch_make_huge_pte(pte_t entry, unsigned int shift,
+ vm_flags_t flags)
{
- return entry;
+ return pte_mkhuge(entry);
}
#endif
+static inline struct hstate *folio_hstate(struct folio *folio)
+{
+ VM_BUG_ON_FOLIO(!folio_test_hugetlb(folio), folio);
+ return size_to_hstate(folio_size(folio));
+}
+
static inline struct hstate *page_hstate(struct page *page)
{
- VM_BUG_ON_PAGE(!PageHuge(page), page);
- return size_to_hstate(PAGE_SIZE << compound_order(page));
+ return folio_hstate(page_folio(page));
}
static inline unsigned hstate_index_to_shift(unsigned index)
@@ -461,31 +882,86 @@ static inline int hstate_index(struct hstate *h)
return h - hstates;
}
-pgoff_t __basepage_index(struct page *page);
-
-/* Return page->index in PAGE_SIZE units */
-static inline pgoff_t basepage_index(struct page *page)
-{
- if (!PageCompound(page))
- return page->index;
-
- return __basepage_index(page);
-}
-
extern int dissolve_free_huge_page(struct page *page);
extern int dissolve_free_huge_pages(unsigned long start_pfn,
unsigned long end_pfn);
-static inline bool hugepage_migration_supported(struct hstate *h)
+
+#ifdef CONFIG_MEMORY_FAILURE
+extern void folio_clear_hugetlb_hwpoison(struct folio *folio);
+#else
+static inline void folio_clear_hugetlb_hwpoison(struct folio *folio)
{
+}
+#endif
+
#ifdef CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION
+#ifndef arch_hugetlb_migration_supported
+static inline bool arch_hugetlb_migration_supported(struct hstate *h)
+{
if ((huge_page_shift(h) == PMD_SHIFT) ||
- (huge_page_shift(h) == PGDIR_SHIFT))
+ (huge_page_shift(h) == PUD_SHIFT) ||
+ (huge_page_shift(h) == PGDIR_SHIFT))
return true;
else
return false;
+}
+#endif
#else
+static inline bool arch_hugetlb_migration_supported(struct hstate *h)
+{
return false;
+}
#endif
+
+static inline bool hugepage_migration_supported(struct hstate *h)
+{
+ return arch_hugetlb_migration_supported(h);
+}
+
+/*
+ * Movability check is different as compared to migration check.
+ * It determines whether or not a huge page should be placed on
+ * movable zone or not. Movability of any huge page should be
+ * required only if huge page size is supported for migration.
+ * There won't be any reason for the huge page to be movable if
+ * it is not migratable to start with. Also the size of the huge
+ * page should be large enough to be placed under a movable zone
+ * and still feasible enough to be migratable. Just the presence
+ * in movable zone does not make the migration feasible.
+ *
+ * So even though large huge page sizes like the gigantic ones
+ * are migratable they should not be movable because its not
+ * feasible to migrate them from movable zone.
+ */
+static inline bool hugepage_movable_supported(struct hstate *h)
+{
+ if (!hugepage_migration_supported(h))
+ return false;
+
+ if (hstate_is_gigantic(h))
+ return false;
+ return true;
+}
+
+/* Movability of hugepages depends on migration support. */
+static inline gfp_t htlb_alloc_mask(struct hstate *h)
+{
+ if (hugepage_movable_supported(h))
+ return GFP_HIGHUSER_MOVABLE;
+ else
+ return GFP_HIGHUSER;
+}
+
+static inline gfp_t htlb_modify_alloc_mask(struct hstate *h, gfp_t gfp_mask)
+{
+ gfp_t modified_mask = htlb_alloc_mask(h);
+
+ /* Some callers might want to enforce node */
+ modified_mask |= (gfp_mask & __GFP_THISNODE);
+
+ modified_mask |= (gfp_mask & __GFP_NOWARN);
+
+ return modified_mask;
}
static inline spinlock_t *huge_pte_lockptr(struct hstate *h,
@@ -508,6 +984,11 @@ static inline spinlock_t *huge_pte_lockptr(struct hstate *h,
void hugetlb_report_usage(struct seq_file *m, struct mm_struct *mm);
+static inline void hugetlb_count_init(struct mm_struct *mm)
+{
+ atomic_long_set(&mm->hugetlb_usage, 0);
+}
+
static inline void hugetlb_count_add(long l, struct mm_struct *mm)
{
atomic_long_add(l, &mm->hugetlb_usage);
@@ -518,31 +999,135 @@ static inline void hugetlb_count_sub(long l, struct mm_struct *mm)
atomic_long_sub(l, &mm->hugetlb_usage);
}
-#ifndef set_huge_swap_pte_at
-static inline void set_huge_swap_pte_at(struct mm_struct *mm, unsigned long addr,
- pte_t *ptep, pte_t pte, unsigned long sz)
+#ifndef huge_ptep_modify_prot_start
+#define huge_ptep_modify_prot_start huge_ptep_modify_prot_start
+static inline pte_t huge_ptep_modify_prot_start(struct vm_area_struct *vma,
+ unsigned long addr, pte_t *ptep)
+{
+ return huge_ptep_get_and_clear(vma->vm_mm, addr, ptep);
+}
+#endif
+
+#ifndef huge_ptep_modify_prot_commit
+#define huge_ptep_modify_prot_commit huge_ptep_modify_prot_commit
+static inline void huge_ptep_modify_prot_commit(struct vm_area_struct *vma,
+ unsigned long addr, pte_t *ptep,
+ pte_t old_pte, pte_t pte)
{
- set_huge_pte_at(mm, addr, ptep, pte);
+ set_huge_pte_at(vma->vm_mm, addr, ptep, pte);
}
#endif
+
+#ifdef CONFIG_NUMA
+void hugetlb_register_node(struct node *node);
+void hugetlb_unregister_node(struct node *node);
+#endif
+
#else /* CONFIG_HUGETLB_PAGE */
struct hstate {};
-#define alloc_huge_page(v, a, r) NULL
-#define alloc_huge_page_node(h, nid) NULL
-#define alloc_huge_page_nodemask(h, preferred_nid, nmask) NULL
-#define alloc_huge_page_noerr(v, a, r) NULL
-#define alloc_bootmem_huge_page(h) NULL
-#define hstate_file(f) NULL
-#define hstate_sizelog(s) NULL
-#define hstate_vma(v) NULL
-#define hstate_inode(i) NULL
-#define page_hstate(page) NULL
-#define huge_page_size(h) PAGE_SIZE
-#define huge_page_mask(h) PAGE_MASK
-#define vma_kernel_pagesize(v) PAGE_SIZE
-#define vma_mmu_pagesize(v) PAGE_SIZE
-#define huge_page_order(h) 0
-#define huge_page_shift(h) PAGE_SHIFT
+
+static inline struct hugepage_subpool *hugetlb_folio_subpool(struct folio *folio)
+{
+ return NULL;
+}
+
+static inline struct hugepage_subpool *hugetlb_page_subpool(struct page *hpage)
+{
+ return NULL;
+}
+
+static inline int isolate_or_dissolve_huge_page(struct page *page,
+ struct list_head *list)
+{
+ return -ENOMEM;
+}
+
+static inline struct folio *alloc_hugetlb_folio(struct vm_area_struct *vma,
+ unsigned long addr,
+ int avoid_reserve)
+{
+ return NULL;
+}
+
+static inline struct folio *
+alloc_hugetlb_folio_nodemask(struct hstate *h, int preferred_nid,
+ nodemask_t *nmask, gfp_t gfp_mask)
+{
+ return NULL;
+}
+
+static inline struct folio *alloc_hugetlb_folio_vma(struct hstate *h,
+ struct vm_area_struct *vma,
+ unsigned long address)
+{
+ return NULL;
+}
+
+static inline int __alloc_bootmem_huge_page(struct hstate *h)
+{
+ return 0;
+}
+
+static inline struct hstate *hstate_file(struct file *f)
+{
+ return NULL;
+}
+
+static inline struct hstate *hstate_sizelog(int page_size_log)
+{
+ return NULL;
+}
+
+static inline struct hstate *hstate_vma(struct vm_area_struct *vma)
+{
+ return NULL;
+}
+
+static inline struct hstate *folio_hstate(struct folio *folio)
+{
+ return NULL;
+}
+
+static inline struct hstate *page_hstate(struct page *page)
+{
+ return NULL;
+}
+
+static inline struct hstate *size_to_hstate(unsigned long size)
+{
+ return NULL;
+}
+
+static inline unsigned long huge_page_size(struct hstate *h)
+{
+ return PAGE_SIZE;
+}
+
+static inline unsigned long huge_page_mask(struct hstate *h)
+{
+ return PAGE_MASK;
+}
+
+static inline unsigned long vma_kernel_pagesize(struct vm_area_struct *vma)
+{
+ return PAGE_SIZE;
+}
+
+static inline unsigned long vma_mmu_pagesize(struct vm_area_struct *vma)
+{
+ return PAGE_SIZE;
+}
+
+static inline unsigned int huge_page_order(struct hstate *h)
+{
+ return 0;
+}
+
+static inline unsigned int huge_page_shift(struct hstate *h)
+{
+ return PAGE_SHIFT;
+}
+
static inline bool hstate_is_gigantic(struct hstate *h)
{
return false;
@@ -563,11 +1148,6 @@ static inline int hstate_index(struct hstate *h)
return 0;
}
-static inline pgoff_t basepage_index(struct page *page)
-{
- return page->index;
-}
-
static inline int dissolve_free_huge_page(struct page *page)
{
return 0;
@@ -584,12 +1164,31 @@ static inline bool hugepage_migration_supported(struct hstate *h)
return false;
}
+static inline bool hugepage_movable_supported(struct hstate *h)
+{
+ return false;
+}
+
+static inline gfp_t htlb_alloc_mask(struct hstate *h)
+{
+ return 0;
+}
+
+static inline gfp_t htlb_modify_alloc_mask(struct hstate *h, gfp_t gfp_mask)
+{
+ return 0;
+}
+
static inline spinlock_t *huge_pte_lockptr(struct hstate *h,
struct mm_struct *mm, pte_t *pte)
{
return &mm->page_table_lock;
}
+static inline void hugetlb_count_init(struct mm_struct *mm)
+{
+}
+
static inline void hugetlb_report_usage(struct seq_file *f, struct mm_struct *m)
{
}
@@ -598,8 +1197,22 @@ static inline void hugetlb_count_sub(long l, struct mm_struct *mm)
{
}
-static inline void set_huge_swap_pte_at(struct mm_struct *mm, unsigned long addr,
- pte_t *ptep, pte_t pte, unsigned long sz)
+static inline pte_t huge_ptep_clear_flush(struct vm_area_struct *vma,
+ unsigned long addr, pte_t *ptep)
+{
+ return *ptep;
+}
+
+static inline void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep, pte_t pte)
+{
+}
+
+static inline void hugetlb_register_node(struct node *node)
+{
+}
+
+static inline void hugetlb_unregister_node(struct node *node)
{
}
#endif /* CONFIG_HUGETLB_PAGE */
@@ -614,4 +1227,65 @@ static inline spinlock_t *huge_pte_lock(struct hstate *h,
return ptl;
}
+#if defined(CONFIG_HUGETLB_PAGE) && defined(CONFIG_CMA)
+extern void __init hugetlb_cma_reserve(int order);
+#else
+static inline __init void hugetlb_cma_reserve(int order)
+{
+}
+#endif
+
+#ifdef CONFIG_ARCH_WANT_HUGE_PMD_SHARE
+static inline bool hugetlb_pmd_shared(pte_t *pte)
+{
+ return page_count(virt_to_page(pte)) > 1;
+}
+#else
+static inline bool hugetlb_pmd_shared(pte_t *pte)
+{
+ return false;
+}
+#endif
+
+bool want_pmd_share(struct vm_area_struct *vma, unsigned long addr);
+
+#ifndef __HAVE_ARCH_FLUSH_HUGETLB_TLB_RANGE
+/*
+ * ARCHes with special requirements for evicting HUGETLB backing TLB entries can
+ * implement this.
+ */
+#define flush_hugetlb_tlb_range(vma, addr, end) flush_tlb_range(vma, addr, end)
+#endif
+
+static inline bool __vma_shareable_lock(struct vm_area_struct *vma)
+{
+ return (vma->vm_flags & VM_MAYSHARE) && vma->vm_private_data;
+}
+
+/*
+ * Safe version of huge_pte_offset() to check the locks. See comments
+ * above huge_pte_offset().
+ */
+static inline pte_t *
+hugetlb_walk(struct vm_area_struct *vma, unsigned long addr, unsigned long sz)
+{
+#if defined(CONFIG_HUGETLB_PAGE) && \
+ defined(CONFIG_ARCH_WANT_HUGE_PMD_SHARE) && defined(CONFIG_LOCKDEP)
+ struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
+
+ /*
+ * If pmd sharing possible, locking needed to safely walk the
+ * hugetlb pgtables. More information can be found at the comment
+ * above huge_pte_offset() in the same file.
+ *
+ * NOTE: lockdep_is_held() is only defined with CONFIG_LOCKDEP.
+ */
+ if (__vma_shareable_lock(vma))
+ WARN_ON_ONCE(!lockdep_is_held(&vma_lock->rw_sema) &&
+ !lockdep_is_held(
+ &vma->vm_file->f_mapping->i_mmap_rwsem));
+#endif
+ return huge_pte_offset(vma->vm_mm, addr, sz);
+}
+
#endif /* _LINUX_HUGETLB_H */
diff --git a/include/linux/hugetlb_cgroup.h b/include/linux/hugetlb_cgroup.h
index 063962f6dfc6..3d82d91f49ac 100644
--- a/include/linux/hugetlb_cgroup.h
+++ b/include/linux/hugetlb_cgroup.h
@@ -18,32 +18,97 @@
#include <linux/mmdebug.h>
struct hugetlb_cgroup;
+struct resv_map;
+struct file_region;
+
+#ifdef CONFIG_CGROUP_HUGETLB
/*
* Minimum page order trackable by hugetlb cgroup.
* At least 3 pages are necessary for all the tracking information.
+ * The second tail page contains all of the hugetlb-specific fields.
*/
-#define HUGETLB_CGROUP_MIN_ORDER 2
+#define HUGETLB_CGROUP_MIN_ORDER order_base_2(__NR_USED_SUBPAGE)
-#ifdef CONFIG_CGROUP_HUGETLB
+enum hugetlb_memory_event {
+ HUGETLB_MAX,
+ HUGETLB_NR_MEMORY_EVENTS,
+};
-static inline struct hugetlb_cgroup *hugetlb_cgroup_from_page(struct page *page)
-{
- VM_BUG_ON_PAGE(!PageHuge(page), page);
+struct hugetlb_cgroup_per_node {
+ /* hugetlb usage in pages over all hstates. */
+ unsigned long usage[HUGE_MAX_HSTATE];
+};
+
+struct hugetlb_cgroup {
+ struct cgroup_subsys_state css;
+
+ /*
+ * the counter to account for hugepages from hugetlb.
+ */
+ struct page_counter hugepage[HUGE_MAX_HSTATE];
+
+ /*
+ * the counter to account for hugepage reservations from hugetlb.
+ */
+ struct page_counter rsvd_hugepage[HUGE_MAX_HSTATE];
+
+ atomic_long_t events[HUGE_MAX_HSTATE][HUGETLB_NR_MEMORY_EVENTS];
+ atomic_long_t events_local[HUGE_MAX_HSTATE][HUGETLB_NR_MEMORY_EVENTS];
+
+ /* Handle for "hugetlb.events" */
+ struct cgroup_file events_file[HUGE_MAX_HSTATE];
- if (compound_order(page) < HUGETLB_CGROUP_MIN_ORDER)
+ /* Handle for "hugetlb.events.local" */
+ struct cgroup_file events_local_file[HUGE_MAX_HSTATE];
+
+ struct hugetlb_cgroup_per_node *nodeinfo[];
+};
+
+static inline struct hugetlb_cgroup *
+__hugetlb_cgroup_from_folio(struct folio *folio, bool rsvd)
+{
+ VM_BUG_ON_FOLIO(!folio_test_hugetlb(folio), folio);
+ if (folio_order(folio) < HUGETLB_CGROUP_MIN_ORDER)
return NULL;
- return (struct hugetlb_cgroup *)page[2].private;
+ if (rsvd)
+ return folio->_hugetlb_cgroup_rsvd;
+ else
+ return folio->_hugetlb_cgroup;
}
-static inline
-int set_hugetlb_cgroup(struct page *page, struct hugetlb_cgroup *h_cg)
+static inline struct hugetlb_cgroup *hugetlb_cgroup_from_folio(struct folio *folio)
{
- VM_BUG_ON_PAGE(!PageHuge(page), page);
+ return __hugetlb_cgroup_from_folio(folio, false);
+}
- if (compound_order(page) < HUGETLB_CGROUP_MIN_ORDER)
- return -1;
- page[2].private = (unsigned long)h_cg;
- return 0;
+static inline struct hugetlb_cgroup *
+hugetlb_cgroup_from_folio_rsvd(struct folio *folio)
+{
+ return __hugetlb_cgroup_from_folio(folio, true);
+}
+
+static inline void __set_hugetlb_cgroup(struct folio *folio,
+ struct hugetlb_cgroup *h_cg, bool rsvd)
+{
+ VM_BUG_ON_FOLIO(!folio_test_hugetlb(folio), folio);
+ if (folio_order(folio) < HUGETLB_CGROUP_MIN_ORDER)
+ return;
+ if (rsvd)
+ folio->_hugetlb_cgroup_rsvd = h_cg;
+ else
+ folio->_hugetlb_cgroup = h_cg;
+}
+
+static inline void set_hugetlb_cgroup(struct folio *folio,
+ struct hugetlb_cgroup *h_cg)
+{
+ __set_hugetlb_cgroup(folio, h_cg, false);
+}
+
+static inline void set_hugetlb_cgroup_rsvd(struct folio *folio,
+ struct hugetlb_cgroup *h_cg)
+{
+ __set_hugetlb_cgroup(folio, h_cg, true);
}
static inline bool hugetlb_cgroup_disabled(void)
@@ -51,29 +116,84 @@ static inline bool hugetlb_cgroup_disabled(void)
return !cgroup_subsys_enabled(hugetlb_cgrp_subsys);
}
+static inline void hugetlb_cgroup_put_rsvd_cgroup(struct hugetlb_cgroup *h_cg)
+{
+ css_put(&h_cg->css);
+}
+
+static inline void resv_map_dup_hugetlb_cgroup_uncharge_info(
+ struct resv_map *resv_map)
+{
+ if (resv_map->css)
+ css_get(resv_map->css);
+}
+
+static inline void resv_map_put_hugetlb_cgroup_uncharge_info(
+ struct resv_map *resv_map)
+{
+ if (resv_map->css)
+ css_put(resv_map->css);
+}
+
extern int hugetlb_cgroup_charge_cgroup(int idx, unsigned long nr_pages,
struct hugetlb_cgroup **ptr);
+extern int hugetlb_cgroup_charge_cgroup_rsvd(int idx, unsigned long nr_pages,
+ struct hugetlb_cgroup **ptr);
extern void hugetlb_cgroup_commit_charge(int idx, unsigned long nr_pages,
struct hugetlb_cgroup *h_cg,
- struct page *page);
-extern void hugetlb_cgroup_uncharge_page(int idx, unsigned long nr_pages,
- struct page *page);
+ struct folio *folio);
+extern void hugetlb_cgroup_commit_charge_rsvd(int idx, unsigned long nr_pages,
+ struct hugetlb_cgroup *h_cg,
+ struct folio *folio);
+extern void hugetlb_cgroup_uncharge_folio(int idx, unsigned long nr_pages,
+ struct folio *folio);
+extern void hugetlb_cgroup_uncharge_folio_rsvd(int idx, unsigned long nr_pages,
+ struct folio *folio);
+
extern void hugetlb_cgroup_uncharge_cgroup(int idx, unsigned long nr_pages,
struct hugetlb_cgroup *h_cg);
+extern void hugetlb_cgroup_uncharge_cgroup_rsvd(int idx, unsigned long nr_pages,
+ struct hugetlb_cgroup *h_cg);
+extern void hugetlb_cgroup_uncharge_counter(struct resv_map *resv,
+ unsigned long start,
+ unsigned long end);
+
+extern void hugetlb_cgroup_uncharge_file_region(struct resv_map *resv,
+ struct file_region *rg,
+ unsigned long nr_pages,
+ bool region_del);
+
extern void hugetlb_cgroup_file_init(void) __init;
-extern void hugetlb_cgroup_migrate(struct page *oldhpage,
- struct page *newhpage);
+extern void hugetlb_cgroup_migrate(struct folio *old_folio,
+ struct folio *new_folio);
#else
-static inline struct hugetlb_cgroup *hugetlb_cgroup_from_page(struct page *page)
+static inline void hugetlb_cgroup_uncharge_file_region(struct resv_map *resv,
+ struct file_region *rg,
+ unsigned long nr_pages,
+ bool region_del)
+{
+}
+
+static inline struct hugetlb_cgroup *hugetlb_cgroup_from_folio(struct folio *folio)
{
return NULL;
}
-static inline
-int set_hugetlb_cgroup(struct page *page, struct hugetlb_cgroup *h_cg)
+static inline struct hugetlb_cgroup *
+hugetlb_cgroup_from_folio_rsvd(struct folio *folio)
+{
+ return NULL;
+}
+
+static inline void set_hugetlb_cgroup(struct folio *folio,
+ struct hugetlb_cgroup *h_cg)
+{
+}
+
+static inline void set_hugetlb_cgroup_rsvd(struct folio *folio,
+ struct hugetlb_cgroup *h_cg)
{
- return 0;
}
static inline bool hugetlb_cgroup_disabled(void)
@@ -81,28 +201,71 @@ static inline bool hugetlb_cgroup_disabled(void)
return true;
}
-static inline int
-hugetlb_cgroup_charge_cgroup(int idx, unsigned long nr_pages,
- struct hugetlb_cgroup **ptr)
+static inline void hugetlb_cgroup_put_rsvd_cgroup(struct hugetlb_cgroup *h_cg)
+{
+}
+
+static inline void resv_map_dup_hugetlb_cgroup_uncharge_info(
+ struct resv_map *resv_map)
+{
+}
+
+static inline void resv_map_put_hugetlb_cgroup_uncharge_info(
+ struct resv_map *resv_map)
+{
+}
+
+static inline int hugetlb_cgroup_charge_cgroup(int idx, unsigned long nr_pages,
+ struct hugetlb_cgroup **ptr)
{
return 0;
}
-static inline void
-hugetlb_cgroup_commit_charge(int idx, unsigned long nr_pages,
- struct hugetlb_cgroup *h_cg,
- struct page *page)
+static inline int hugetlb_cgroup_charge_cgroup_rsvd(int idx,
+ unsigned long nr_pages,
+ struct hugetlb_cgroup **ptr)
+{
+ return 0;
+}
+
+static inline void hugetlb_cgroup_commit_charge(int idx, unsigned long nr_pages,
+ struct hugetlb_cgroup *h_cg,
+ struct folio *folio)
{
}
static inline void
-hugetlb_cgroup_uncharge_page(int idx, unsigned long nr_pages, struct page *page)
+hugetlb_cgroup_commit_charge_rsvd(int idx, unsigned long nr_pages,
+ struct hugetlb_cgroup *h_cg,
+ struct folio *folio)
+{
+}
+
+static inline void hugetlb_cgroup_uncharge_folio(int idx, unsigned long nr_pages,
+ struct folio *folio)
+{
+}
+
+static inline void hugetlb_cgroup_uncharge_folio_rsvd(int idx,
+ unsigned long nr_pages,
+ struct folio *folio)
+{
+}
+static inline void hugetlb_cgroup_uncharge_cgroup(int idx,
+ unsigned long nr_pages,
+ struct hugetlb_cgroup *h_cg)
{
}
static inline void
-hugetlb_cgroup_uncharge_cgroup(int idx, unsigned long nr_pages,
- struct hugetlb_cgroup *h_cg)
+hugetlb_cgroup_uncharge_cgroup_rsvd(int idx, unsigned long nr_pages,
+ struct hugetlb_cgroup *h_cg)
+{
+}
+
+static inline void hugetlb_cgroup_uncharge_counter(struct resv_map *resv,
+ unsigned long start,
+ unsigned long end)
{
}
@@ -110,8 +273,8 @@ static inline void hugetlb_cgroup_file_init(void)
{
}
-static inline void hugetlb_cgroup_migrate(struct page *oldhpage,
- struct page *newhpage)
+static inline void hugetlb_cgroup_migrate(struct folio *old_folio,
+ struct folio *new_folio)
{
}
diff --git a/include/linux/hugetlb_inline.h b/include/linux/hugetlb_inline.h
index a4e7ca0f3585..0660a03d37d9 100644
--- a/include/linux/hugetlb_inline.h
+++ b/include/linux/hugetlb_inline.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_HUGETLB_INLINE_H
#define _LINUX_HUGETLB_INLINE_H
diff --git a/include/linux/hw_breakpoint.h b/include/linux/hw_breakpoint.h
index 0464c85e63fd..7fbb45911273 100644
--- a/include/linux/hw_breakpoint.h
+++ b/include/linux/hw_breakpoint.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_HW_BREAKPOINT_H
#define _LINUX_HW_BREAKPOINT_H
@@ -6,6 +7,16 @@
#ifdef CONFIG_HAVE_HW_BREAKPOINT
+enum bp_type_idx {
+ TYPE_INST = 0,
+#if defined(CONFIG_HAVE_MIXED_BREAKPOINTS_REGS)
+ TYPE_DATA = 0,
+#else
+ TYPE_DATA = 1,
+#endif
+ TYPE_MAX
+};
+
extern int __init init_hw_breakpoint(void);
static inline void hw_breakpoint_init(struct perf_event_attr *attr)
@@ -52,6 +63,9 @@ register_user_hw_breakpoint(struct perf_event_attr *attr,
/* FIXME: only change from the attr, and don't unregister */
extern int
modify_user_hw_breakpoint(struct perf_event *bp, struct perf_event_attr *attr);
+extern int
+modify_user_hw_breakpoint_check(struct perf_event *bp, struct perf_event_attr *attr,
+ bool check);
/*
* Kernel breakpoints are not associated with any particular thread.
@@ -68,14 +82,17 @@ register_wide_hw_breakpoint(struct perf_event_attr *attr,
void *context);
extern int register_perf_hw_breakpoint(struct perf_event *bp);
-extern int __register_perf_hw_breakpoint(struct perf_event *bp);
extern void unregister_hw_breakpoint(struct perf_event *bp);
extern void unregister_wide_hw_breakpoint(struct perf_event * __percpu *cpu_events);
+extern bool hw_breakpoint_is_used(void);
extern int dbg_reserve_bp_slot(struct perf_event *bp);
extern int dbg_release_bp_slot(struct perf_event *bp);
extern int reserve_bp_slot(struct perf_event *bp);
extern void release_bp_slot(struct perf_event *bp);
+int arch_reserve_bp_slot(struct perf_event *bp);
+void arch_release_bp_slot(struct perf_event *bp);
+void arch_unregister_hw_breakpoint(struct perf_event *bp);
extern void flush_ptrace_hw_breakpoint(struct task_struct *tsk);
@@ -96,6 +113,10 @@ register_user_hw_breakpoint(struct perf_event_attr *attr,
static inline int
modify_user_hw_breakpoint(struct perf_event *bp,
struct perf_event_attr *attr) { return -ENOSYS; }
+static inline int
+modify_user_hw_breakpoint_check(struct perf_event *bp, struct perf_event_attr *attr,
+ bool check) { return -ENOSYS; }
+
static inline struct perf_event *
register_wide_hw_breakpoint_cpu(struct perf_event_attr *attr,
perf_overflow_handler_t triggered,
@@ -107,11 +128,11 @@ register_wide_hw_breakpoint(struct perf_event_attr *attr,
void *context) { return NULL; }
static inline int
register_perf_hw_breakpoint(struct perf_event *bp) { return -ENOSYS; }
-static inline int
-__register_perf_hw_breakpoint(struct perf_event *bp) { return -ENOSYS; }
static inline void unregister_hw_breakpoint(struct perf_event *bp) { }
static inline void
unregister_wide_hw_breakpoint(struct perf_event * __percpu *cpu_events) { }
+static inline bool hw_breakpoint_is_used(void) { return false; }
+
static inline int
reserve_bp_slot(struct perf_event *bp) {return -ENOSYS; }
static inline void release_bp_slot(struct perf_event *bp) { }
diff --git a/include/linux/hw_random.h b/include/linux/hw_random.h
index bee0827766a3..8a3115516a1b 100644
--- a/include/linux/hw_random.h
+++ b/include/linux/hw_random.h
@@ -1,7 +1,7 @@
/*
Hardware Random Number Generator
- Please read Documentation/hw_random.txt for details on use.
+ Please read Documentation/admin-guide/hw_random.rst for details on use.
----------------------------------------------------------
This software may be used and distributed according to the terms
@@ -33,7 +33,8 @@
* and max is a multiple of 4 and >= 32 bytes.
* @priv: Private data, for use by the RNG driver.
* @quality: Estimation of true entropy in RNG's bitstream
- * (per mill).
+ * (in bits of entropy per 1024 bits of input;
+ * valid values: 1 to 1024, or 0 for maximum).
*/
struct hwrng {
const char *name;
@@ -49,6 +50,7 @@ struct hwrng {
struct list_head list;
struct kref ref;
struct completion cleanup_done;
+ struct completion dying;
};
struct device;
@@ -59,7 +61,7 @@ extern int devm_hwrng_register(struct device *dev, struct hwrng *rng);
/** Unregister a Hardware Random Number Generator driver. */
extern void hwrng_unregister(struct hwrng *rng);
extern void devm_hwrng_unregister(struct device *dve, struct hwrng *rng);
-/** Feed random bits into the pool. */
-extern void add_hwgenerator_randomness(const char *buffer, size_t count, size_t entropy);
+
+extern long hwrng_msleep(struct hwrng *rng, unsigned int msecs);
#endif /* LINUX_HWRANDOM_H_ */
diff --git a/include/linux/hwmon-sysfs.h b/include/linux/hwmon-sysfs.h
index 1c7b89ae6bdc..d896713359cd 100644
--- a/include/linux/hwmon-sysfs.h
+++ b/include/linux/hwmon-sysfs.h
@@ -1,26 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* hwmon-sysfs.h - hardware monitoring chip driver sysfs defines
*
* Copyright (C) 2005 Yani Ioannou <yani.ioannou@gmail.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#ifndef _LINUX_HWMON_SYSFS_H
#define _LINUX_HWMON_SYSFS_H
#include <linux/device.h>
+#include <linux/kstrtox.h>
struct sensor_device_attribute{
struct device_attribute dev_attr;
@@ -33,10 +21,28 @@ struct sensor_device_attribute{
{ .dev_attr = __ATTR(_name, _mode, _show, _store), \
.index = _index }
+#define SENSOR_ATTR_RO(_name, _func, _index) \
+ SENSOR_ATTR(_name, 0444, _func##_show, NULL, _index)
+
+#define SENSOR_ATTR_RW(_name, _func, _index) \
+ SENSOR_ATTR(_name, 0644, _func##_show, _func##_store, _index)
+
+#define SENSOR_ATTR_WO(_name, _func, _index) \
+ SENSOR_ATTR(_name, 0200, NULL, _func##_store, _index)
+
#define SENSOR_DEVICE_ATTR(_name, _mode, _show, _store, _index) \
struct sensor_device_attribute sensor_dev_attr_##_name \
= SENSOR_ATTR(_name, _mode, _show, _store, _index)
+#define SENSOR_DEVICE_ATTR_RO(_name, _func, _index) \
+ SENSOR_DEVICE_ATTR(_name, 0444, _func##_show, NULL, _index)
+
+#define SENSOR_DEVICE_ATTR_RW(_name, _func, _index) \
+ SENSOR_DEVICE_ATTR(_name, 0644, _func##_show, _func##_store, _index)
+
+#define SENSOR_DEVICE_ATTR_WO(_name, _func, _index) \
+ SENSOR_DEVICE_ATTR(_name, 0200, NULL, _func##_store, _index)
+
struct sensor_device_attribute_2 {
struct device_attribute dev_attr;
u8 index;
@@ -50,8 +56,29 @@ struct sensor_device_attribute_2 {
.index = _index, \
.nr = _nr }
+#define SENSOR_ATTR_2_RO(_name, _func, _nr, _index) \
+ SENSOR_ATTR_2(_name, 0444, _func##_show, NULL, _nr, _index)
+
+#define SENSOR_ATTR_2_RW(_name, _func, _nr, _index) \
+ SENSOR_ATTR_2(_name, 0644, _func##_show, _func##_store, _nr, _index)
+
+#define SENSOR_ATTR_2_WO(_name, _func, _nr, _index) \
+ SENSOR_ATTR_2(_name, 0200, NULL, _func##_store, _nr, _index)
+
#define SENSOR_DEVICE_ATTR_2(_name,_mode,_show,_store,_nr,_index) \
struct sensor_device_attribute_2 sensor_dev_attr_##_name \
= SENSOR_ATTR_2(_name, _mode, _show, _store, _nr, _index)
+#define SENSOR_DEVICE_ATTR_2_RO(_name, _func, _nr, _index) \
+ SENSOR_DEVICE_ATTR_2(_name, 0444, _func##_show, NULL, \
+ _nr, _index)
+
+#define SENSOR_DEVICE_ATTR_2_RW(_name, _func, _nr, _index) \
+ SENSOR_DEVICE_ATTR_2(_name, 0644, _func##_show, _func##_store, \
+ _nr, _index)
+
+#define SENSOR_DEVICE_ATTR_2_WO(_name, _func, _nr, _index) \
+ SENSOR_DEVICE_ATTR_2(_name, 0200, NULL, _func##_store, \
+ _nr, _index)
+
#endif /* _LINUX_HWMON_SYSFS_H */
diff --git a/include/linux/hwmon-vid.h b/include/linux/hwmon-vid.h
index da0a680e2f6d..9409e1d207ef 100644
--- a/include/linux/hwmon-vid.h
+++ b/include/linux/hwmon-vid.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
hwmon-vid.h - VID/VRM/VRD voltage conversions
@@ -5,19 +6,6 @@
Copyright (c) 2002 Mark D. Studebaker <mdsxyz123@yahoo.com>
With assistance from Trent Piepho <xyzzy@speakeasy.org>
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#ifndef _LINUX_HWMON_VID_H
diff --git a/include/linux/hwmon.h b/include/linux/hwmon.h
index ceb751987c40..492dd27a5dd8 100644
--- a/include/linux/hwmon.h
+++ b/include/linux/hwmon.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
hwmon.h - part of lm_sensors, Linux kernel modules for hardware monitoring
@@ -6,9 +7,6 @@
Copyright (C) 2005 Mark M. Hoffman <mhoffman@lightlink.com>
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; version 2 of the License.
*/
#ifndef _HWMON_H_
@@ -29,6 +27,8 @@ enum hwmon_sensor_types {
hwmon_humidity,
hwmon_fan,
hwmon_pwm,
+ hwmon_intrusion,
+ hwmon_max,
};
enum hwmon_chip_attributes {
@@ -39,6 +39,11 @@ enum hwmon_chip_attributes {
hwmon_chip_register_tz,
hwmon_chip_update_interval,
hwmon_chip_alarms,
+ hwmon_chip_samples,
+ hwmon_chip_curr_samples,
+ hwmon_chip_in_samples,
+ hwmon_chip_power_samples,
+ hwmon_chip_temp_samples,
};
#define HWMON_C_TEMP_RESET_HISTORY BIT(hwmon_chip_temp_reset_history)
@@ -48,9 +53,15 @@ enum hwmon_chip_attributes {
#define HWMON_C_REGISTER_TZ BIT(hwmon_chip_register_tz)
#define HWMON_C_UPDATE_INTERVAL BIT(hwmon_chip_update_interval)
#define HWMON_C_ALARMS BIT(hwmon_chip_alarms)
+#define HWMON_C_SAMPLES BIT(hwmon_chip_samples)
+#define HWMON_C_CURR_SAMPLES BIT(hwmon_chip_curr_samples)
+#define HWMON_C_IN_SAMPLES BIT(hwmon_chip_in_samples)
+#define HWMON_C_POWER_SAMPLES BIT(hwmon_chip_power_samples)
+#define HWMON_C_TEMP_SAMPLES BIT(hwmon_chip_temp_samples)
enum hwmon_temp_attributes {
- hwmon_temp_input = 0,
+ hwmon_temp_enable,
+ hwmon_temp_input,
hwmon_temp_type,
hwmon_temp_lcrit,
hwmon_temp_lcrit_hyst,
@@ -74,8 +85,11 @@ enum hwmon_temp_attributes {
hwmon_temp_lowest,
hwmon_temp_highest,
hwmon_temp_reset_history,
+ hwmon_temp_rated_min,
+ hwmon_temp_rated_max,
};
+#define HWMON_T_ENABLE BIT(hwmon_temp_enable)
#define HWMON_T_INPUT BIT(hwmon_temp_input)
#define HWMON_T_TYPE BIT(hwmon_temp_type)
#define HWMON_T_LCRIT BIT(hwmon_temp_lcrit)
@@ -92,6 +106,7 @@ enum hwmon_temp_attributes {
#define HWMON_T_MIN_ALARM BIT(hwmon_temp_min_alarm)
#define HWMON_T_MAX_ALARM BIT(hwmon_temp_max_alarm)
#define HWMON_T_CRIT_ALARM BIT(hwmon_temp_crit_alarm)
+#define HWMON_T_LCRIT_ALARM BIT(hwmon_temp_lcrit_alarm)
#define HWMON_T_EMERGENCY_ALARM BIT(hwmon_temp_emergency_alarm)
#define HWMON_T_FAULT BIT(hwmon_temp_fault)
#define HWMON_T_OFFSET BIT(hwmon_temp_offset)
@@ -99,8 +114,11 @@ enum hwmon_temp_attributes {
#define HWMON_T_LOWEST BIT(hwmon_temp_lowest)
#define HWMON_T_HIGHEST BIT(hwmon_temp_highest)
#define HWMON_T_RESET_HISTORY BIT(hwmon_temp_reset_history)
+#define HWMON_T_RATED_MIN BIT(hwmon_temp_rated_min)
+#define HWMON_T_RATED_MAX BIT(hwmon_temp_rated_max)
enum hwmon_in_attributes {
+ hwmon_in_enable,
hwmon_in_input,
hwmon_in_min,
hwmon_in_max,
@@ -116,8 +134,11 @@ enum hwmon_in_attributes {
hwmon_in_max_alarm,
hwmon_in_lcrit_alarm,
hwmon_in_crit_alarm,
+ hwmon_in_rated_min,
+ hwmon_in_rated_max,
};
+#define HWMON_I_ENABLE BIT(hwmon_in_enable)
#define HWMON_I_INPUT BIT(hwmon_in_input)
#define HWMON_I_MIN BIT(hwmon_in_min)
#define HWMON_I_MAX BIT(hwmon_in_max)
@@ -133,8 +154,11 @@ enum hwmon_in_attributes {
#define HWMON_I_MAX_ALARM BIT(hwmon_in_max_alarm)
#define HWMON_I_LCRIT_ALARM BIT(hwmon_in_lcrit_alarm)
#define HWMON_I_CRIT_ALARM BIT(hwmon_in_crit_alarm)
+#define HWMON_I_RATED_MIN BIT(hwmon_in_rated_min)
+#define HWMON_I_RATED_MAX BIT(hwmon_in_rated_max)
enum hwmon_curr_attributes {
+ hwmon_curr_enable,
hwmon_curr_input,
hwmon_curr_min,
hwmon_curr_max,
@@ -150,8 +174,11 @@ enum hwmon_curr_attributes {
hwmon_curr_max_alarm,
hwmon_curr_lcrit_alarm,
hwmon_curr_crit_alarm,
+ hwmon_curr_rated_min,
+ hwmon_curr_rated_max,
};
+#define HWMON_C_ENABLE BIT(hwmon_curr_enable)
#define HWMON_C_INPUT BIT(hwmon_curr_input)
#define HWMON_C_MIN BIT(hwmon_curr_min)
#define HWMON_C_MAX BIT(hwmon_curr_max)
@@ -167,8 +194,11 @@ enum hwmon_curr_attributes {
#define HWMON_C_MAX_ALARM BIT(hwmon_curr_max_alarm)
#define HWMON_C_LCRIT_ALARM BIT(hwmon_curr_lcrit_alarm)
#define HWMON_C_CRIT_ALARM BIT(hwmon_curr_crit_alarm)
+#define HWMON_C_RATED_MIN BIT(hwmon_curr_rated_min)
+#define HWMON_C_RATED_MAX BIT(hwmon_curr_rated_max)
enum hwmon_power_attributes {
+ hwmon_power_enable,
hwmon_power_average,
hwmon_power_average_interval,
hwmon_power_average_interval_max,
@@ -186,15 +216,22 @@ enum hwmon_power_attributes {
hwmon_power_cap_hyst,
hwmon_power_cap_max,
hwmon_power_cap_min,
+ hwmon_power_min,
hwmon_power_max,
hwmon_power_crit,
+ hwmon_power_lcrit,
hwmon_power_label,
hwmon_power_alarm,
hwmon_power_cap_alarm,
+ hwmon_power_min_alarm,
hwmon_power_max_alarm,
+ hwmon_power_lcrit_alarm,
hwmon_power_crit_alarm,
+ hwmon_power_rated_min,
+ hwmon_power_rated_max,
};
+#define HWMON_P_ENABLE BIT(hwmon_power_enable)
#define HWMON_P_AVERAGE BIT(hwmon_power_average)
#define HWMON_P_AVERAGE_INTERVAL BIT(hwmon_power_average_interval)
#define HWMON_P_AVERAGE_INTERVAL_MAX BIT(hwmon_power_average_interval_max)
@@ -212,23 +249,32 @@ enum hwmon_power_attributes {
#define HWMON_P_CAP_HYST BIT(hwmon_power_cap_hyst)
#define HWMON_P_CAP_MAX BIT(hwmon_power_cap_max)
#define HWMON_P_CAP_MIN BIT(hwmon_power_cap_min)
+#define HWMON_P_MIN BIT(hwmon_power_min)
#define HWMON_P_MAX BIT(hwmon_power_max)
+#define HWMON_P_LCRIT BIT(hwmon_power_lcrit)
#define HWMON_P_CRIT BIT(hwmon_power_crit)
#define HWMON_P_LABEL BIT(hwmon_power_label)
#define HWMON_P_ALARM BIT(hwmon_power_alarm)
#define HWMON_P_CAP_ALARM BIT(hwmon_power_cap_alarm)
+#define HWMON_P_MIN_ALARM BIT(hwmon_power_min_alarm)
#define HWMON_P_MAX_ALARM BIT(hwmon_power_max_alarm)
+#define HWMON_P_LCRIT_ALARM BIT(hwmon_power_lcrit_alarm)
#define HWMON_P_CRIT_ALARM BIT(hwmon_power_crit_alarm)
+#define HWMON_P_RATED_MIN BIT(hwmon_power_rated_min)
+#define HWMON_P_RATED_MAX BIT(hwmon_power_rated_max)
enum hwmon_energy_attributes {
+ hwmon_energy_enable,
hwmon_energy_input,
hwmon_energy_label,
};
+#define HWMON_E_ENABLE BIT(hwmon_energy_enable)
#define HWMON_E_INPUT BIT(hwmon_energy_input)
#define HWMON_E_LABEL BIT(hwmon_energy_label)
enum hwmon_humidity_attributes {
+ hwmon_humidity_enable,
hwmon_humidity_input,
hwmon_humidity_label,
hwmon_humidity_min,
@@ -237,8 +283,11 @@ enum hwmon_humidity_attributes {
hwmon_humidity_max_hyst,
hwmon_humidity_alarm,
hwmon_humidity_fault,
+ hwmon_humidity_rated_min,
+ hwmon_humidity_rated_max,
};
+#define HWMON_H_ENABLE BIT(hwmon_humidity_enable)
#define HWMON_H_INPUT BIT(hwmon_humidity_input)
#define HWMON_H_LABEL BIT(hwmon_humidity_label)
#define HWMON_H_MIN BIT(hwmon_humidity_min)
@@ -247,8 +296,11 @@ enum hwmon_humidity_attributes {
#define HWMON_H_MAX_HYST BIT(hwmon_humidity_max_hyst)
#define HWMON_H_ALARM BIT(hwmon_humidity_alarm)
#define HWMON_H_FAULT BIT(hwmon_humidity_fault)
+#define HWMON_H_RATED_MIN BIT(hwmon_humidity_rated_min)
+#define HWMON_H_RATED_MAX BIT(hwmon_humidity_rated_max)
enum hwmon_fan_attributes {
+ hwmon_fan_enable,
hwmon_fan_input,
hwmon_fan_label,
hwmon_fan_min,
@@ -262,6 +314,7 @@ enum hwmon_fan_attributes {
hwmon_fan_fault,
};
+#define HWMON_F_ENABLE BIT(hwmon_fan_enable)
#define HWMON_F_INPUT BIT(hwmon_fan_input)
#define HWMON_F_LABEL BIT(hwmon_fan_label)
#define HWMON_F_MIN BIT(hwmon_fan_min)
@@ -279,12 +332,21 @@ enum hwmon_pwm_attributes {
hwmon_pwm_enable,
hwmon_pwm_mode,
hwmon_pwm_freq,
+ hwmon_pwm_auto_channels_temp,
};
#define HWMON_PWM_INPUT BIT(hwmon_pwm_input)
#define HWMON_PWM_ENABLE BIT(hwmon_pwm_enable)
#define HWMON_PWM_MODE BIT(hwmon_pwm_mode)
#define HWMON_PWM_FREQ BIT(hwmon_pwm_freq)
+#define HWMON_PWM_AUTO_CHANNELS_TEMP BIT(hwmon_pwm_auto_channels_temp)
+
+enum hwmon_intrusion_attributes {
+ hwmon_intrusion_alarm,
+ hwmon_intrusion_beep,
+};
+#define HWMON_INTRUSION_ALARM BIT(hwmon_intrusion_alarm)
+#define HWMON_INTRUSION_BEEP BIT(hwmon_intrusion_beep)
/**
* struct hwmon_ops - hwmon device operations
@@ -343,7 +405,7 @@ struct hwmon_ops {
};
/**
- * Channel information
+ * struct hwmon_channel_info - Channel information
* @type: Channel type.
* @config: Pointer to NULL-terminated list of channel parameters.
* Use for per-channel attributes.
@@ -353,19 +415,31 @@ struct hwmon_channel_info {
const u32 *config;
};
+#define HWMON_CHANNEL_INFO(stype, ...) \
+ (&(struct hwmon_channel_info) { \
+ .type = hwmon_##stype, \
+ .config = (u32 []) { \
+ __VA_ARGS__, 0 \
+ } \
+ })
+
/**
- * Chip configuration
+ * struct hwmon_chip_info - Chip configuration
* @ops: Pointer to hwmon operations.
* @info: Null-terminated list of channel information.
*/
struct hwmon_chip_info {
const struct hwmon_ops *ops;
- const struct hwmon_channel_info **info;
+ const struct hwmon_channel_info * const *info;
};
/* hwmon_device_register() is deprecated */
struct device *hwmon_device_register(struct device *dev);
+/*
+ * hwmon_device_register_with_groups() and
+ * devm_hwmon_device_register_with_groups() are deprecated.
+ */
struct device *
hwmon_device_register_with_groups(struct device *dev, const char *name,
void *drvdata,
@@ -380,6 +454,9 @@ hwmon_device_register_with_info(struct device *dev,
const struct hwmon_chip_info *info,
const struct attribute_group **extra_groups);
struct device *
+hwmon_device_register_for_thermal(struct device *dev, const char *name,
+ void *drvdata);
+struct device *
devm_hwmon_device_register_with_info(struct device *dev,
const char *name, void *drvdata,
const struct hwmon_chip_info *info,
@@ -388,4 +465,33 @@ devm_hwmon_device_register_with_info(struct device *dev,
void hwmon_device_unregister(struct device *dev);
void devm_hwmon_device_unregister(struct device *dev);
+int hwmon_notify_event(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel);
+
+char *hwmon_sanitize_name(const char *name);
+char *devm_hwmon_sanitize_name(struct device *dev, const char *name);
+
+/**
+ * hwmon_is_bad_char - Is the char invalid in a hwmon name
+ * @ch: the char to be considered
+ *
+ * hwmon_is_bad_char() can be used to determine if the given character
+ * may not be used in a hwmon name.
+ *
+ * Returns true if the char is invalid, false otherwise.
+ */
+static inline bool hwmon_is_bad_char(const char ch)
+{
+ switch (ch) {
+ case '-':
+ case '*':
+ case ' ':
+ case '\t':
+ case '\n':
+ return true;
+ default:
+ return false;
+ }
+}
+
#endif
diff --git a/include/linux/hwspinlock.h b/include/linux/hwspinlock.h
index 859d673d98c8..bfe7c1f1ac6d 100644
--- a/include/linux/hwspinlock.h
+++ b/include/linux/hwspinlock.h
@@ -1,18 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Hardware spinlock public header
*
* Copyright (C) 2010 Texas Instruments Incorporated - http://www.ti.com
*
* Contact: Ohad Ben-Cohen <ohad@wizery.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published
- * by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#ifndef __LINUX_HWSPINLOCK_H
@@ -22,8 +14,10 @@
#include <linux/sched.h>
/* hwspinlock mode argument */
-#define HWLOCK_IRQSTATE 0x01 /* Disable interrupts, save state */
-#define HWLOCK_IRQ 0x02 /* Disable interrupts, don't save state */
+#define HWLOCK_IRQSTATE 0x01 /* Disable interrupts, save state */
+#define HWLOCK_IRQ 0x02 /* Disable interrupts, don't save state */
+#define HWLOCK_RAW 0x03
+#define HWLOCK_IN_ATOMIC 0x04 /* Called while in atomic context */
struct device;
struct device_node;
@@ -59,7 +53,7 @@ struct hwspinlock_pdata {
int base_id;
};
-#if defined(CONFIG_HWSPINLOCK) || defined(CONFIG_HWSPINLOCK_MODULE)
+#ifdef CONFIG_HWSPINLOCK
int hwspin_lock_register(struct hwspinlock_device *bank, struct device *dev,
const struct hwspinlock_ops *ops, int base_id, int num_locks);
@@ -73,6 +67,17 @@ int __hwspin_lock_timeout(struct hwspinlock *, unsigned int, int,
unsigned long *);
int __hwspin_trylock(struct hwspinlock *, int, unsigned long *);
void __hwspin_unlock(struct hwspinlock *, int, unsigned long *);
+int of_hwspin_lock_get_id_byname(struct device_node *np, const char *name);
+int devm_hwspin_lock_free(struct device *dev, struct hwspinlock *hwlock);
+struct hwspinlock *devm_hwspin_lock_request(struct device *dev);
+struct hwspinlock *devm_hwspin_lock_request_specific(struct device *dev,
+ unsigned int id);
+int devm_hwspin_lock_unregister(struct device *dev,
+ struct hwspinlock_device *bank);
+int devm_hwspin_lock_register(struct device *dev,
+ struct hwspinlock_device *bank,
+ const struct hwspinlock_ops *ops,
+ int base_id, int num_locks);
#else /* !CONFIG_HWSPINLOCK */
@@ -132,6 +137,30 @@ static inline int hwspin_lock_get_id(struct hwspinlock *hwlock)
return 0;
}
+static inline
+int of_hwspin_lock_get_id_byname(struct device_node *np, const char *name)
+{
+ return 0;
+}
+
+static inline
+int devm_hwspin_lock_free(struct device *dev, struct hwspinlock *hwlock)
+{
+ return 0;
+}
+
+static inline struct hwspinlock *devm_hwspin_lock_request(struct device *dev)
+{
+ return ERR_PTR(-ENODEV);
+}
+
+static inline
+struct hwspinlock *devm_hwspin_lock_request_specific(struct device *dev,
+ unsigned int id)
+{
+ return ERR_PTR(-ENODEV);
+}
+
#endif /* !CONFIG_HWSPINLOCK */
/**
@@ -176,6 +205,42 @@ static inline int hwspin_trylock_irq(struct hwspinlock *hwlock)
}
/**
+ * hwspin_trylock_raw() - attempt to lock a specific hwspinlock
+ * @hwlock: an hwspinlock which we want to trylock
+ *
+ * This function attempts to lock an hwspinlock, and will immediately fail
+ * if the hwspinlock is already taken.
+ *
+ * Caution: User must protect the routine of getting hardware lock with mutex
+ * or spinlock to avoid dead-lock, that will let user can do some time-consuming
+ * or sleepable operations under the hardware lock.
+ *
+ * Returns 0 if we successfully locked the hwspinlock, -EBUSY if
+ * the hwspinlock was already taken, and -EINVAL if @hwlock is invalid.
+ */
+static inline int hwspin_trylock_raw(struct hwspinlock *hwlock)
+{
+ return __hwspin_trylock(hwlock, HWLOCK_RAW, NULL);
+}
+
+/**
+ * hwspin_trylock_in_atomic() - attempt to lock a specific hwspinlock
+ * @hwlock: an hwspinlock which we want to trylock
+ *
+ * This function attempts to lock an hwspinlock, and will immediately fail
+ * if the hwspinlock is already taken.
+ *
+ * This function shall be called only from an atomic context.
+ *
+ * Returns 0 if we successfully locked the hwspinlock, -EBUSY if
+ * the hwspinlock was already taken, and -EINVAL if @hwlock is invalid.
+ */
+static inline int hwspin_trylock_in_atomic(struct hwspinlock *hwlock)
+{
+ return __hwspin_trylock(hwlock, HWLOCK_IN_ATOMIC, NULL);
+}
+
+/**
* hwspin_trylock() - attempt to lock a specific hwspinlock
* @hwlock: an hwspinlock which we want to trylock
*
@@ -243,6 +308,51 @@ int hwspin_lock_timeout_irq(struct hwspinlock *hwlock, unsigned int to)
}
/**
+ * hwspin_lock_timeout_raw() - lock an hwspinlock with timeout limit
+ * @hwlock: the hwspinlock to be locked
+ * @to: timeout value in msecs
+ *
+ * This function locks the underlying @hwlock. If the @hwlock
+ * is already taken, the function will busy loop waiting for it to
+ * be released, but give up when @timeout msecs have elapsed.
+ *
+ * Caution: User must protect the routine of getting hardware lock with mutex
+ * or spinlock to avoid dead-lock, that will let user can do some time-consuming
+ * or sleepable operations under the hardware lock.
+ *
+ * Returns 0 when the @hwlock was successfully taken, and an appropriate
+ * error code otherwise (most notably an -ETIMEDOUT if the @hwlock is still
+ * busy after @timeout msecs). The function will never sleep.
+ */
+static inline
+int hwspin_lock_timeout_raw(struct hwspinlock *hwlock, unsigned int to)
+{
+ return __hwspin_lock_timeout(hwlock, to, HWLOCK_RAW, NULL);
+}
+
+/**
+ * hwspin_lock_timeout_in_atomic() - lock an hwspinlock with timeout limit
+ * @hwlock: the hwspinlock to be locked
+ * @to: timeout value in msecs
+ *
+ * This function locks the underlying @hwlock. If the @hwlock
+ * is already taken, the function will busy loop waiting for it to
+ * be released, but give up when @timeout msecs have elapsed.
+ *
+ * This function shall be called only from an atomic context and the timeout
+ * value shall not exceed a few msecs.
+ *
+ * Returns 0 when the @hwlock was successfully taken, and an appropriate
+ * error code otherwise (most notably an -ETIMEDOUT if the @hwlock is still
+ * busy after @timeout msecs). The function will never sleep.
+ */
+static inline
+int hwspin_lock_timeout_in_atomic(struct hwspinlock *hwlock, unsigned int to)
+{
+ return __hwspin_lock_timeout(hwlock, to, HWLOCK_IN_ATOMIC, NULL);
+}
+
+/**
* hwspin_lock_timeout() - lock an hwspinlock with timeout limit
* @hwlock: the hwspinlock to be locked
* @to: timeout value in msecs
@@ -302,6 +412,36 @@ static inline void hwspin_unlock_irq(struct hwspinlock *hwlock)
}
/**
+ * hwspin_unlock_raw() - unlock hwspinlock
+ * @hwlock: a previously-acquired hwspinlock which we want to unlock
+ *
+ * This function will unlock a specific hwspinlock.
+ *
+ * @hwlock must be already locked (e.g. by hwspin_trylock()) before calling
+ * this function: it is a bug to call unlock on a @hwlock that is already
+ * unlocked.
+ */
+static inline void hwspin_unlock_raw(struct hwspinlock *hwlock)
+{
+ __hwspin_unlock(hwlock, HWLOCK_RAW, NULL);
+}
+
+/**
+ * hwspin_unlock_in_atomic() - unlock hwspinlock
+ * @hwlock: a previously-acquired hwspinlock which we want to unlock
+ *
+ * This function will unlock a specific hwspinlock.
+ *
+ * @hwlock must be already locked (e.g. by hwspin_trylock()) before calling
+ * this function: it is a bug to call unlock on a @hwlock that is already
+ * unlocked.
+ */
+static inline void hwspin_unlock_in_atomic(struct hwspinlock *hwlock)
+{
+ __hwspin_unlock(hwlock, HWLOCK_IN_ATOMIC, NULL);
+}
+
+/**
* hwspin_unlock() - unlock hwspinlock
* @hwlock: a previously-acquired hwspinlock which we want to unlock
*
diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h
index b7d7bbec74e0..bfbc37ce223b 100644
--- a/include/linux/hyperv.h
+++ b/include/linux/hyperv.h
@@ -1,33 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
*
* Copyright (c) 2011, Microsoft Corporation.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
- * Place - Suite 330, Boston, MA 02111-1307 USA.
- *
* Authors:
* Haiyang Zhang <haiyangz@microsoft.com>
* Hank Janssen <hjanssen@microsoft.com>
* K. Y. Srinivasan <kys@microsoft.com>
- *
*/
#ifndef _HYPERV_H
#define _HYPERV_H
#include <uapi/linux/hyperv.h>
-#include <uapi/asm/hyperv.h>
+#include <linux/mm.h>
#include <linux/types.h>
#include <linux/scatterlist.h>
#include <linux/list.h>
@@ -36,12 +23,56 @@
#include <linux/device.h>
#include <linux/mod_devicetable.h>
#include <linux/interrupt.h>
+#include <linux/reciprocal_div.h>
+#include <asm/hyperv-tlfs.h>
#define MAX_PAGE_BUFFER_COUNT 32
#define MAX_MULTIPAGE_BUFFER_COUNT 32 /* 128K */
#pragma pack(push, 1)
+/*
+ * Types for GPADL, decides is how GPADL header is created.
+ *
+ * It doesn't make much difference between BUFFER and RING if PAGE_SIZE is the
+ * same as HV_HYP_PAGE_SIZE.
+ *
+ * If PAGE_SIZE is bigger than HV_HYP_PAGE_SIZE, the headers of ring buffers
+ * will be of PAGE_SIZE, however, only the first HV_HYP_PAGE will be put
+ * into gpadl, therefore the number for HV_HYP_PAGE and the indexes of each
+ * HV_HYP_PAGE will be different between different types of GPADL, for example
+ * if PAGE_SIZE is 64K:
+ *
+ * BUFFER:
+ *
+ * gva: |-- 64k --|-- 64k --| ... |
+ * gpa: | 4k | 4k | ... | 4k | 4k | 4k | ... | 4k |
+ * index: 0 1 2 15 16 17 18 .. 31 32 ...
+ * | | ... | | | ... | ...
+ * v V V V V V
+ * gpadl: | 4k | 4k | ... | 4k | 4k | 4k | ... | 4k | ... |
+ * index: 0 1 2 ... 15 16 17 18 .. 31 32 ...
+ *
+ * RING:
+ *
+ * | header | data | header | data |
+ * gva: |-- 64k --|-- 64k --| ... |-- 64k --|-- 64k --| ... |
+ * gpa: | 4k | .. | 4k | 4k | ... | 4k | ... | 4k | .. | 4k | .. | ... |
+ * index: 0 1 16 17 18 31 ... n n+1 n+16 ... 2n
+ * | / / / | / /
+ * | / / / | / /
+ * | / / ... / ... | / ... /
+ * | / / / | / /
+ * | / / / | / /
+ * V V V V V V v
+ * gpadl: | 4k | 4k | ... | ... | 4k | 4k | ... |
+ * index: 0 1 2 ... 16 ... n-15 n-14 n-13 ... 2n-30
+ */
+enum hv_gpadl_type {
+ HV_GPADL_BUFFER,
+ HV_GPADL_RING
+};
+
/* Single-page buffer */
struct hv_page_buffer {
u32 len;
@@ -89,18 +120,33 @@ struct hv_ring_buffer {
u32 interrupt_mask;
/*
- * Win8 uses some of the reserved bits to implement
- * interrupt driven flow management. On the send side
- * we can request that the receiver interrupt the sender
- * when the ring transitions from being full to being able
- * to handle a message of size "pending_send_sz".
+ * WS2012/Win8 and later versions of Hyper-V implement interrupt
+ * driven flow management. The feature bit feat_pending_send_sz
+ * is set by the host on the host->guest ring buffer, and by the
+ * guest on the guest->host ring buffer.
+ *
+ * The meaning of the feature bit is a bit complex in that it has
+ * semantics that apply to both ring buffers. If the guest sets
+ * the feature bit in the guest->host ring buffer, the guest is
+ * telling the host that:
+ * 1) It will set the pending_send_sz field in the guest->host ring
+ * buffer when it is waiting for space to become available, and
+ * 2) It will read the pending_send_sz field in the host->guest
+ * ring buffer and interrupt the host when it frees enough space
*
- * Add necessary state for this enhancement.
+ * Similarly, if the host sets the feature bit in the host->guest
+ * ring buffer, the host is telling the guest that:
+ * 1) It will set the pending_send_sz field in the host->guest ring
+ * buffer when it is waiting for space to become available, and
+ * 2) It will read the pending_send_sz field in the guest->host
+ * ring buffer and interrupt the guest when it frees enough space
+ *
+ * If either the guest or host does not set the feature bit that it
+ * owns, that guest or host must do polling if it encounters a full
+ * ring buffer, and not signal the other end with an interrupt.
*/
u32 pending_send_sz;
-
u32 reserved1[12];
-
union {
struct {
u32 feat_pending_send_sz:1;
@@ -109,49 +155,38 @@ struct hv_ring_buffer {
} feature_bits;
/* Pad it to PAGE_SIZE so that data starts on page boundary */
- u8 reserved2[4028];
+ u8 reserved2[PAGE_SIZE - 68];
/*
* Ring data starts here + RingDataStartOffset
* !!! DO NOT place any fields below this !!!
*/
- u8 buffer[0];
+ u8 buffer[];
} __packed;
+/* Calculate the proper size of a ringbuffer, it must be page-aligned */
+#define VMBUS_RING_SIZE(payload_sz) PAGE_ALIGN(sizeof(struct hv_ring_buffer) + \
+ (payload_sz))
+
struct hv_ring_buffer_info {
struct hv_ring_buffer *ring_buffer;
u32 ring_size; /* Include the shared header */
+ struct reciprocal_value ring_size_div10_reciprocal;
spinlock_t ring_lock;
u32 ring_datasize; /* < ring_size */
- u32 ring_data_startoffset;
- u32 priv_write_index;
u32 priv_read_index;
- u32 cached_read_index;
-};
-
-/*
- *
- * hv_get_ringbuffer_availbytes()
- *
- * Get number of bytes available to read and to write to
- * for the specified ring buffer
- */
-static inline void
-hv_get_ringbuffer_availbytes(const struct hv_ring_buffer_info *rbi,
- u32 *read, u32 *write)
-{
- u32 read_loc, write_loc, dsize;
+ /*
+ * The ring buffer mutex lock. This lock prevents the ring buffer from
+ * being freed while the ring buffer is being accessed.
+ */
+ struct mutex ring_buffer_mutex;
- /* Capture the read/write indices before they changed */
- read_loc = rbi->ring_buffer->read_index;
- write_loc = rbi->ring_buffer->write_index;
- dsize = rbi->ring_datasize;
+ /* Buffer that holds a copy of an incoming host packet */
+ void *pkt_buffer;
+ u32 pkt_buffer_size;
+};
- *write = write_loc >= read_loc ? dsize - (write_loc - read_loc) :
- read_loc - write_loc;
- *read = dsize - *write;
-}
static inline u32 hv_get_bytes_to_read(const struct hv_ring_buffer_info *rbi)
{
@@ -180,39 +215,46 @@ static inline u32 hv_get_bytes_to_write(const struct hv_ring_buffer_info *rbi)
return write;
}
-static inline u32 hv_get_cached_bytes_to_write(
- const struct hv_ring_buffer_info *rbi)
+static inline u32 hv_get_avail_to_write_percent(
+ const struct hv_ring_buffer_info *rbi)
{
- u32 read_loc, write_loc, dsize, write;
-
- dsize = rbi->ring_datasize;
- read_loc = rbi->cached_read_index;
- write_loc = rbi->ring_buffer->write_index;
+ u32 avail_write = hv_get_bytes_to_write(rbi);
- write = write_loc >= read_loc ? dsize - (write_loc - read_loc) :
- read_loc - write_loc;
- return write;
+ return reciprocal_divide(
+ (avail_write << 3) + (avail_write << 1),
+ rbi->ring_size_div10_reciprocal);
}
+
/*
* VMBUS version is 32 bit entity broken up into
* two 16 bit quantities: major_number. minor_number.
*
* 0 . 13 (Windows Server 2008)
- * 1 . 1 (Windows 7)
- * 2 . 4 (Windows 8)
- * 3 . 0 (Windows 8 R2)
+ * 1 . 1 (Windows 7, WS2008 R2)
+ * 2 . 4 (Windows 8, WS2012)
+ * 3 . 0 (Windows 8.1, WS2012 R2)
* 4 . 0 (Windows 10)
+ * 4 . 1 (Windows 10 RS3)
+ * 5 . 0 (Newer Windows 10)
+ * 5 . 1 (Windows 10 RS4)
+ * 5 . 2 (Windows Server 2019, RS5)
+ * 5 . 3 (Windows Server 2022)
+ *
+ * The WS2008 and WIN7 versions are listed here for
+ * completeness but are no longer supported in the
+ * Linux kernel.
*/
#define VERSION_WS2008 ((0 << 16) | (13))
#define VERSION_WIN7 ((1 << 16) | (1))
#define VERSION_WIN8 ((2 << 16) | (4))
#define VERSION_WIN8_1 ((3 << 16) | (0))
-#define VERSION_WIN10 ((4 << 16) | (0))
-
-#define VERSION_INVAL -1
-
-#define VERSION_CURRENT VERSION_WIN10
+#define VERSION_WIN10 ((4 << 16) | (0))
+#define VERSION_WIN10_V4_1 ((4 << 16) | (1))
+#define VERSION_WIN10_V5 ((5 << 16) | (0))
+#define VERSION_WIN10_V5_1 ((5 << 16) | (1))
+#define VERSION_WIN10_V5_2 ((5 << 16) | (2))
+#define VERSION_WIN10_V5_3 ((5 << 16) | (3))
/* Make maximum size of pipe payload of 16K */
#define MAX_PIPE_DATA_PAYLOAD (sizeof(u8) * 16384)
@@ -232,8 +274,8 @@ static inline u32 hv_get_cached_bytes_to_write(
* struct contains the fundamental information about an offer.
*/
struct vmbus_channel_offer {
- uuid_le if_type;
- uuid_le if_instance;
+ guid_t if_type;
+ guid_t if_instance;
/*
* These two fields are not currently used.
@@ -252,7 +294,7 @@ struct vmbus_channel_offer {
/*
* Pipes:
- * The following sructure is an integrated pipe protocol, which
+ * The following structure is an integrated pipe protocol, which
* is implemented on top of standard user-defined data. Pipe
* clients have MAX_PIPE_USER_DEFINED_BYTES left for their own
* use.
@@ -263,7 +305,10 @@ struct vmbus_channel_offer {
} pipe;
} u;
/*
- * The sub_channel_index is defined in win8.
+ * The sub_channel_index is defined in Win8: a value of zero means a
+ * primary channel and a value of non-zero means a sub-channel.
+ *
+ * Before Win8, the field is reserved, meaning it's always zero.
*/
u16 sub_channel_index;
u16 reserved3;
@@ -326,7 +371,7 @@ struct vmadd_remove_transfer_page_set {
struct gpa_range {
u32 byte_count;
u32 byte_offset;
- u64 pfn_array[0];
+ u64 pfn_array[];
};
/*
@@ -438,9 +483,15 @@ enum vmbus_channel_message_type {
CHANNELMSG_19 = 19,
CHANNELMSG_20 = 20,
CHANNELMSG_TL_CONNECT_REQUEST = 21,
+ CHANNELMSG_MODIFYCHANNEL = 22,
+ CHANNELMSG_TL_CONNECT_RESULT = 23,
+ CHANNELMSG_MODIFYCHANNEL_RESPONSE = 24,
CHANNELMSG_COUNT
};
+/* Hyper-V supports about 2048 channels, and the RELIDs start with 1. */
+#define INVALID_RELID U32_MAX
+
struct vmbus_channel_message_header {
enum vmbus_channel_message_type msgtype;
u32 padding;
@@ -491,12 +542,6 @@ struct vmbus_channel_rescind_offer {
u32 child_relid;
} __packed;
-static inline u32
-hv_ringbuffer_pending_size(const struct hv_ring_buffer_info *rbi)
-{
- return rbi->ring_buffer->pending_send_sz;
-}
-
/*
* Request Offer -- no parameters, SynIC message contains the partition ID
* Set Snoop -- no parameters, SynIC message contains the partition ID
@@ -548,6 +593,13 @@ struct vmbus_channel_open_result {
u32 status;
} __packed;
+/* Modify Channel Result parameters */
+struct vmbus_channel_modifychannel_response {
+ struct vmbus_channel_message_header header;
+ u32 child_relid;
+ u32 status;
+} __packed;
+
/* Close channel parameters; */
struct vmbus_channel_close_channel {
struct vmbus_channel_message_header header;
@@ -571,7 +623,7 @@ struct vmbus_channel_gpadl_header {
u32 gpadl;
u16 range_buflen;
u16 rangecount;
- struct gpa_range range[0];
+ struct gpa_range range[];
} __packed;
/* This is the followup packet that contains more PFNs. */
@@ -579,7 +631,7 @@ struct vmbus_channel_gpadl_body {
struct vmbus_channel_message_header header;
u32 msgnumber;
u32 gpadl;
- u64 pfn[0];
+ u64 pfn[];
} __packed;
struct vmbus_channel_gpadl_created {
@@ -609,7 +661,14 @@ struct vmbus_channel_initiate_contact {
struct vmbus_channel_message_header header;
u32 vmbus_version_requested;
u32 target_vcpu; /* The VCPU the host should respond to */
- u64 interrupt_page;
+ union {
+ u64 interrupt_page;
+ struct {
+ u8 msg_sint;
+ u8 padding1[3];
+ u32 padding2;
+ };
+ };
u64 monitor_page1;
u64 monitor_page2;
} __packed;
@@ -617,13 +676,33 @@ struct vmbus_channel_initiate_contact {
/* Hyper-V socket: guest's connect()-ing to host */
struct vmbus_channel_tl_connect_request {
struct vmbus_channel_message_header header;
- uuid_le guest_endpoint_id;
- uuid_le host_service_id;
+ guid_t guest_endpoint_id;
+ guid_t host_service_id;
+} __packed;
+
+/* Modify Channel parameters, cf. vmbus_send_modifychannel() */
+struct vmbus_channel_modifychannel {
+ struct vmbus_channel_message_header header;
+ u32 child_relid;
+ u32 target_vp;
} __packed;
struct vmbus_channel_version_response {
struct vmbus_channel_message_header header;
u8 version_supported;
+
+ u8 connection_state;
+ u16 padding;
+
+ /*
+ * On new hosts that support VMBus protocol 5.0, we must use
+ * VMBUS_MESSAGE_CONNECTION_ID_4 for the Initiate Contact Message,
+ * and for subsequent messages, we must use the Message Connection ID
+ * field in the host-returned Version Response Message.
+ *
+ * On old hosts, we should always use VMBUS_MESSAGE_CONNECTION_ID (1).
+ */
+ u32 msg_conn_id;
} __packed;
enum vmbus_channel_state {
@@ -653,6 +732,7 @@ struct vmbus_channel_msginfo {
struct vmbus_channel_gpadl_torndown gpadl_torndown;
struct vmbus_channel_gpadl_created gpadl_created;
struct vmbus_channel_version_response version_response;
+ struct vmbus_channel_modifychannel_response modify_response;
} response;
u32 msgsize;
@@ -660,7 +740,7 @@ struct vmbus_channel_msginfo {
* The channel message that goes out on the "wire".
* It will contain at minimum the VMBUS_CHANNEL_MESSAGE_HEADER header
*/
- unsigned char msg[0];
+ unsigned char msg[];
};
struct vmbus_close_msg {
@@ -677,23 +757,6 @@ union hv_connection_id {
} u;
};
-/* Definition of the hv_signal_event hypercall input structure. */
-struct hv_input_signal_event {
- union hv_connection_id connectionid;
- u16 flag_number;
- u16 rsvdz;
-};
-
-struct hv_input_signal_event_buffer {
- u64 align8;
- struct hv_input_signal_event event;
-};
-
-enum hv_numa_policy {
- HV_BALANCED = 0,
- HV_LOCALIZED,
-};
-
enum vmbus_device_type {
HV_IDE = 0,
HV_SCSI,
@@ -714,10 +777,41 @@ enum vmbus_device_type {
HV_UNKNOWN,
};
+/*
+ * Provides request ids for VMBus. Encapsulates guest memory
+ * addresses and stores the next available slot in req_arr
+ * to generate new ids in constant time.
+ */
+struct vmbus_requestor {
+ u64 *req_arr;
+ unsigned long *req_bitmap; /* is a given slot available? */
+ u32 size;
+ u64 next_request_id;
+ spinlock_t req_lock; /* provides atomicity */
+};
+
+#define VMBUS_NO_RQSTOR U64_MAX
+#define VMBUS_RQST_ERROR (U64_MAX - 1)
+#define VMBUS_RQST_ADDR_ANY U64_MAX
+/* NetVSC-specific */
+#define VMBUS_RQST_ID_NO_RESPONSE (U64_MAX - 2)
+/* StorVSC-specific */
+#define VMBUS_RQST_INIT (U64_MAX - 2)
+#define VMBUS_RQST_RESET (U64_MAX - 3)
+
struct vmbus_device {
u16 dev_type;
- uuid_le guid;
+ guid_t guid;
bool perf_device;
+ bool allowed_in_isolated;
+};
+
+#define VMBUS_DEFAULT_MAX_PKT_SIZE 4096
+
+struct vmbus_gpadl {
+ u32 gpadl_handle;
+ u32 size;
+ void *buffer;
};
struct vmbus_channel {
@@ -736,22 +830,51 @@ struct vmbus_channel {
u8 monitor_bit;
bool rescind; /* got rescind msg */
+ bool rescind_ref; /* got rescind msg, got channel reference */
+ struct completion rescind_event;
- u32 ringbuffer_gpadlhandle;
+ struct vmbus_gpadl ringbuffer_gpadlhandle;
/* Allocated memory for ring buffer */
- void *ringbuffer_pages;
+ struct page *ringbuffer_page;
u32 ringbuffer_pagecount;
+ u32 ringbuffer_send_offset;
struct hv_ring_buffer_info outbound; /* send to parent */
struct hv_ring_buffer_info inbound; /* receive from parent */
struct vmbus_close_msg close_msg;
+ /* Statistics */
+ u64 interrupts; /* Host to Guest interrupts */
+ u64 sig_events; /* Guest to Host events */
+
+ /*
+ * Guest to host interrupts caused by the outbound ring buffer changing
+ * from empty to not empty.
+ */
+ u64 intr_out_empty;
+
+ /*
+ * Indicates that a full outbound ring buffer was encountered. The flag
+ * is set to true when a full outbound ring buffer is encountered and
+ * set to false when a write to the outbound ring buffer is completed.
+ */
+ bool out_full_flag;
+
/* Channel callback's invoked in softirq context */
struct tasklet_struct callback_event;
void (*onchannel_callback)(void *context);
void *channel_callback_context;
+ void (*change_target_cpu_callback)(struct vmbus_channel *channel,
+ u32 old, u32 new);
+
+ /*
+ * Synchronize channel scheduling and channel removal; see the inline
+ * comments in vmbus_chan_sched() and vmbus_reset_channel_cb().
+ */
+ spinlock_t sched_lock;
+
/*
* A channel can be marked for one of three modes of reading:
* BATCHED - callback called from taslket and should read
@@ -770,34 +893,27 @@ struct vmbus_channel {
} callback_mode;
bool is_dedicated_interrupt;
- struct hv_input_signal_event_buffer sig_buf;
- struct hv_input_signal_event *sig_event;
+ u64 sig_event;
/*
- * Starting with win8, this field will be used to specify
- * the target virtual processor on which to deliver the interrupt for
- * the host to guest communication.
- * Prior to win8, incoming channel interrupts would only
- * be delivered on cpu 0. Setting this value to 0 would
- * preserve the earlier behavior.
+ * Starting with win8, this field will be used to specify the
+ * target CPU on which to deliver the interrupt for the host
+ * to guest communication.
+ *
+ * Prior to win8, incoming channel interrupts would only be
+ * delivered on CPU 0. Setting this value to 0 would preserve
+ * the earlier behavior.
*/
- u32 target_vp;
- /* The corresponding CPUID in the guest */
u32 target_cpu;
/*
- * State to manage the CPU affiliation of channels.
- */
- struct cpumask alloced_cpus_in_node;
- int numa_node;
- /*
* Support for sub-channels. For high performance devices,
* it will be useful to have multiple sub-channels to support
* a scalable communication infrastructure with the host.
- * The support for sub-channels is implemented as an extention
+ * The support for sub-channels is implemented as an extension
* to the current infrastructure.
* The initial offer is considered the primary channel and this
* offer message will indicate if the host supports sub-channels.
- * The guest is free to ask for sub-channels to be offerred and can
+ * The guest is free to ask for sub-channels to be offered and can
* open these sub-channels as a normal "primary" channel. However,
* all sub-channels will have the same type and instance guids as the
* primary channel. Requests sent on a given channel will result in a
@@ -818,25 +934,10 @@ struct vmbus_channel {
void (*chn_rescind_callback)(struct vmbus_channel *channel);
/*
- * The spinlock to protect the structure. It is being used to protect
- * test-and-set access to various attributes of the structure as well
- * as all sc_list operations.
- */
- spinlock_t lock;
- /*
* All Sub-channels of a primary channel are linked here.
*/
struct list_head sc_list;
/*
- * Current number of sub-channels.
- */
- int num_sc;
- /*
- * Number of a sub-channel (position within sc_list) which is supposed
- * to be used as the next outgoing channel.
- */
- int next_oc;
- /*
* The primary channel this sub-channel belongs to.
* This will be NULL for the primary channel.
*/
@@ -845,11 +946,6 @@ struct vmbus_channel {
* Support per-channel state for use by vmbus drivers.
*/
void *per_channel_state;
- /*
- * To support per-cpu lookup mapping of relid to channel,
- * link up channels based on their CPU affinity.
- */
- struct list_head percpu_list;
/*
* Defer freeing channel until after all cpu's have
@@ -858,6 +954,11 @@ struct vmbus_channel {
struct rcu_head rcu;
/*
+ * For sysfs per-channel properties.
+ */
+ struct kobject kobj;
+
+ /*
* For performance critical channels (storage, networking
* etc,), Hyper-V has a mechanism to enhance the throughput
* at the expense of latency:
@@ -868,7 +969,7 @@ struct vmbus_channel {
* mechanism improves throughput by:
*
* A) Making the host more efficient - each time it wakes up,
- * potentially it will process morev number of packets. The
+ * potentially it will process more number of packets. The
* monitor latency allows a batch to build up.
* B) By deferring the hypercall to signal, we will also minimize
* the interrupts.
@@ -876,37 +977,110 @@ struct vmbus_channel {
* Clearly, these optimizations improve throughput at the expense of
* latency. Furthermore, since the channel is shared for both
* control and data messages, control messages currently suffer
- * unnecessary latency adversley impacting performance and boot
+ * unnecessary latency adversely impacting performance and boot
* time. To fix this issue, permit tagging the channel as being
* in "low latency" mode. In this mode, we will bypass the monitor
* mechanism.
*/
bool low_latency;
+ bool probe_done;
+
+ /*
+ * Cache the device ID here for easy access; this is useful, in
+ * particular, in situations where the channel's device_obj has
+ * not been allocated/initialized yet.
+ */
+ u16 device_id;
+
+ /*
+ * We must offload the handling of the primary/sub channels
+ * from the single-threaded vmbus_connection.work_queue to
+ * two different workqueue, otherwise we can block
+ * vmbus_connection.work_queue and hang: see vmbus_process_offer().
+ */
+ struct work_struct add_channel_work;
+
+ /*
+ * Guest to host interrupts caused by the inbound ring buffer changing
+ * from full to not full while a packet is waiting.
+ */
+ u64 intr_in_full;
+
+ /*
+ * The total number of write operations that encountered a full
+ * outbound ring buffer.
+ */
+ u64 out_full_total;
+
/*
- * NUMA distribution policy:
- * We support teo policies:
- * 1) Balanced: Here all performance critical channels are
- * distributed evenly amongst all the NUMA nodes.
- * This policy will be the default policy.
- * 2) Localized: All channels of a given instance of a
- * performance critical service will be assigned CPUs
- * within a selected NUMA node.
+ * The number of write operations that were the first to encounter a
+ * full outbound ring buffer.
*/
- enum hv_numa_policy affinity_policy;
+ u64 out_full_first;
+ /* enabling/disabling fuzz testing on the channel (default is false)*/
+ bool fuzz_testing_state;
+
+ /*
+ * Interrupt delay will delay the guest from emptying the ring buffer
+ * for a specific amount of time. The delay is in microseconds and will
+ * be between 1 to a maximum of 1000, its default is 0 (no delay).
+ * The Message delay will delay guest reading on a per message basis
+ * in microseconds between 1 to 1000 with the default being 0
+ * (no delay).
+ */
+ u32 fuzz_testing_interrupt_delay;
+ u32 fuzz_testing_message_delay;
+
+ /* callback to generate a request ID from a request address */
+ u64 (*next_request_id_callback)(struct vmbus_channel *channel, u64 rqst_addr);
+ /* callback to retrieve a request address from a request ID */
+ u64 (*request_addr_callback)(struct vmbus_channel *channel, u64 rqst_id);
+
+ /* request/transaction ids for VMBus */
+ struct vmbus_requestor requestor;
+ u32 rqstor_size;
+
+ /* The max size of a packet on this channel */
+ u32 max_pkt_size;
};
+#define lock_requestor(channel, flags) \
+do { \
+ struct vmbus_requestor *rqstor = &(channel)->requestor; \
+ \
+ spin_lock_irqsave(&rqstor->req_lock, flags); \
+} while (0)
+
+static __always_inline void unlock_requestor(struct vmbus_channel *channel,
+ unsigned long flags)
+{
+ struct vmbus_requestor *rqstor = &channel->requestor;
+
+ spin_unlock_irqrestore(&rqstor->req_lock, flags);
+}
+
+u64 vmbus_next_request_id(struct vmbus_channel *channel, u64 rqst_addr);
+u64 __vmbus_request_addr_match(struct vmbus_channel *channel, u64 trans_id,
+ u64 rqst_addr);
+u64 vmbus_request_addr_match(struct vmbus_channel *channel, u64 trans_id,
+ u64 rqst_addr);
+u64 vmbus_request_addr(struct vmbus_channel *channel, u64 trans_id);
+
+static inline bool is_hvsock_offer(const struct vmbus_channel_offer_channel *o)
+{
+ return !!(o->offer.chn_flags & VMBUS_CHANNEL_TLNPI_PROVIDER_OFFER);
+}
+
static inline bool is_hvsock_channel(const struct vmbus_channel *c)
{
- return !!(c->offermsg.offer.chn_flags &
- VMBUS_CHANNEL_TLNPI_PROVIDER_OFFER);
+ return is_hvsock_offer(&c->offermsg);
}
-static inline void set_channel_affinity_state(struct vmbus_channel *c,
- enum hv_numa_policy policy)
+static inline bool is_sub_channel(const struct vmbus_channel *c)
{
- c->affinity_policy = policy;
+ return c->offermsg.offer.sub_channel_index != 0;
}
static inline void set_channel_read_mode(struct vmbus_channel *c,
@@ -928,20 +1102,25 @@ static inline void *get_per_channel_state(struct vmbus_channel *c)
static inline void set_channel_pending_send_size(struct vmbus_channel *c,
u32 size)
{
- c->outbound.ring_buffer->pending_send_sz = size;
-}
+ unsigned long flags;
-static inline void set_low_latency_mode(struct vmbus_channel *c)
-{
- c->low_latency = true;
-}
+ if (size) {
+ spin_lock_irqsave(&c->outbound.ring_lock, flags);
+ ++c->out_full_total;
-static inline void clear_low_latency_mode(struct vmbus_channel *c)
-{
- c->low_latency = false;
+ if (!c->out_full_flag) {
+ ++c->out_full_first;
+ c->out_full_flag = true;
+ }
+ spin_unlock_irqrestore(&c->outbound.ring_lock, flags);
+ } else {
+ c->out_full_flag = false;
+ }
+
+ c->outbound.ring_buffer->pending_send_sz = size;
}
-void vmbus_onmessage(void *context);
+void vmbus_onmessage(struct vmbus_channel_message_header *hdr);
int vmbus_request_offers(void);
@@ -955,27 +1134,6 @@ void vmbus_set_sc_create_callback(struct vmbus_channel *primary_channel,
void vmbus_set_chn_rescind_callback(struct vmbus_channel *channel,
void (*chn_rescind_cb)(struct vmbus_channel *));
-/*
- * Retrieve the (sub) channel on which to send an outgoing request.
- * When a primary channel has multiple sub-channels, we choose a
- * channel whose VCPU binding is closest to the VCPU on which
- * this call is being made.
- */
-struct vmbus_channel *vmbus_get_outgoing_channel(struct vmbus_channel *primary);
-
-/*
- * Check if sub-channels have already been offerred. This API will be useful
- * when the driver is unloaded after establishing sub-channels. In this case,
- * when the driver is re-loaded, the driver would have to check if the
- * subchannels have already been established before attempting to request
- * the creation of sub-channels.
- * This function returns TRUE to indicate that subchannels have already been
- * created.
- * This function should be invoked after setting the callback function for
- * sub-channel creation.
- */
-bool vmbus_are_subchannels_present(struct vmbus_channel *primary);
-
/* The format must be the same as struct vmdata_gpa_direct */
struct vmbus_channel_packet_page_buffer {
u16 type;
@@ -1012,6 +1170,14 @@ struct vmbus_packet_mpb_array {
struct hv_mpb_array range;
} __packed;
+int vmbus_alloc_ring(struct vmbus_channel *channel,
+ u32 send_size, u32 recv_size);
+void vmbus_free_ring(struct vmbus_channel *channel);
+
+int vmbus_connect_ring(struct vmbus_channel *channel,
+ void (*onchannel_callback)(void *context),
+ void *context);
+int vmbus_disconnect_ring(struct vmbus_channel *channel);
extern int vmbus_open(struct vmbus_channel *channel,
u32 send_ringbuffersize,
@@ -1023,14 +1189,14 @@ extern int vmbus_open(struct vmbus_channel *channel,
extern void vmbus_close(struct vmbus_channel *channel);
-extern int vmbus_sendpacket(struct vmbus_channel *channel,
+extern int vmbus_sendpacket_getid(struct vmbus_channel *channel,
void *buffer,
u32 bufferLen,
u64 requestid,
+ u64 *trans_id,
enum vmbus_packet_type type,
u32 flags);
-
-extern int vmbus_sendpacket_ctl(struct vmbus_channel *channel,
+extern int vmbus_sendpacket(struct vmbus_channel *channel,
void *buffer,
u32 bufferLen,
u64 requestid,
@@ -1044,20 +1210,6 @@ extern int vmbus_sendpacket_pagebuffer(struct vmbus_channel *channel,
u32 bufferlen,
u64 requestid);
-extern int vmbus_sendpacket_pagebuffer_ctl(struct vmbus_channel *channel,
- struct hv_page_buffer pagebuffers[],
- u32 pagecount,
- void *buffer,
- u32 bufferlen,
- u64 requestid,
- u32 flags);
-
-extern int vmbus_sendpacket_multipagebuffer(struct vmbus_channel *channel,
- struct hv_multipage_buffer *mpb,
- void *buffer,
- u32 bufferlen,
- u64 requestid);
-
extern int vmbus_sendpacket_mpb_desc(struct vmbus_channel *channel,
struct vmbus_packet_mpb_array *mpb,
u32 desc_size,
@@ -1068,10 +1220,12 @@ extern int vmbus_sendpacket_mpb_desc(struct vmbus_channel *channel,
extern int vmbus_establish_gpadl(struct vmbus_channel *channel,
void *kbuffer,
u32 size,
- u32 *gpadl_handle);
+ struct vmbus_gpadl *gpadl);
extern int vmbus_teardown_gpadl(struct vmbus_channel *channel,
- u32 gpadl_handle);
+ struct vmbus_gpadl *gpadl);
+
+void vmbus_reset_channel_cb(struct vmbus_channel *channel);
extern int vmbus_recvpacket(struct vmbus_channel *channel,
void *buffer,
@@ -1107,7 +1261,7 @@ struct hv_driver {
bool hvsock;
/* the device type supported by this driver */
- uuid_le dev_type;
+ guid_t dev_type;
const struct hv_vmbus_device_id *id_table;
struct device_driver driver;
@@ -1119,31 +1273,43 @@ struct hv_driver {
} dynids;
int (*probe)(struct hv_device *, const struct hv_vmbus_device_id *);
- int (*remove)(struct hv_device *);
+ void (*remove)(struct hv_device *dev);
void (*shutdown)(struct hv_device *);
+ int (*suspend)(struct hv_device *);
+ int (*resume)(struct hv_device *);
+
};
/* Base device object */
struct hv_device {
/* the device type id of this device */
- uuid_le dev_type;
+ guid_t dev_type;
/* the device instance id of this device */
- uuid_le dev_instance;
+ guid_t dev_instance;
u16 vendor_id;
u16 device_id;
struct device device;
+ /*
+ * Driver name to force a match. Do not set directly, because core
+ * frees it. Use driver_set_override() to set or clear it.
+ */
+ const char *driver_override;
struct vmbus_channel *channel;
+ struct kset *channels_kset;
+ struct device_dma_parameters dma_parms;
+ u64 dma_mask;
+
+ /* place holder to keep track of the dir for hv device in debugfs */
+ struct dentry *debug_dir;
+
};
-static inline struct hv_device *device_to_hv_device(struct device *d)
-{
- return container_of(d, struct hv_device, device);
-}
+#define device_to_hv_device(d) container_of_const(d, struct hv_device, device)
static inline struct hv_driver *drv_to_hv_drv(struct device_driver *d)
{
@@ -1168,8 +1334,11 @@ struct hv_ring_buffer_debug_info {
u32 bytes_avail_towrite;
};
-void hv_ringbuffer_get_debuginfo(const struct hv_ring_buffer_info *ring_info,
- struct hv_ring_buffer_debug_info *debug_info);
+
+int hv_ringbuffer_get_debuginfo(struct hv_ring_buffer_info *ring_info,
+ struct hv_ring_buffer_debug_info *debug_info);
+
+bool hv_ringbuffer_spinlock_busy(struct vmbus_channel *channel);
/* Vmbus interface */
#define vmbus_driver_register(driver) \
@@ -1186,8 +1355,6 @@ int vmbus_allocate_mmio(struct resource **new, struct hv_device *device_obj,
resource_size_t size, resource_size_t align,
bool fb_overlap_ok);
void vmbus_free_mmio(resource_size_t start, resource_size_t size);
-int vmbus_cpu_number_to_vp_number(int cpu_number);
-u64 hv_do_hypercall(u64 control, void *input, void *output);
/*
* GUID definitions of various offer types - services offered to the guest.
@@ -1198,102 +1365,102 @@ u64 hv_do_hypercall(u64 control, void *input, void *output);
* {f8615163-df3e-46c5-913f-f2d2f965ed0e}
*/
#define HV_NIC_GUID \
- .guid = UUID_LE(0xf8615163, 0xdf3e, 0x46c5, 0x91, 0x3f, \
- 0xf2, 0xd2, 0xf9, 0x65, 0xed, 0x0e)
+ .guid = GUID_INIT(0xf8615163, 0xdf3e, 0x46c5, 0x91, 0x3f, \
+ 0xf2, 0xd2, 0xf9, 0x65, 0xed, 0x0e)
/*
* IDE GUID
* {32412632-86cb-44a2-9b5c-50d1417354f5}
*/
#define HV_IDE_GUID \
- .guid = UUID_LE(0x32412632, 0x86cb, 0x44a2, 0x9b, 0x5c, \
- 0x50, 0xd1, 0x41, 0x73, 0x54, 0xf5)
+ .guid = GUID_INIT(0x32412632, 0x86cb, 0x44a2, 0x9b, 0x5c, \
+ 0x50, 0xd1, 0x41, 0x73, 0x54, 0xf5)
/*
* SCSI GUID
* {ba6163d9-04a1-4d29-b605-72e2ffb1dc7f}
*/
#define HV_SCSI_GUID \
- .guid = UUID_LE(0xba6163d9, 0x04a1, 0x4d29, 0xb6, 0x05, \
- 0x72, 0xe2, 0xff, 0xb1, 0xdc, 0x7f)
+ .guid = GUID_INIT(0xba6163d9, 0x04a1, 0x4d29, 0xb6, 0x05, \
+ 0x72, 0xe2, 0xff, 0xb1, 0xdc, 0x7f)
/*
* Shutdown GUID
* {0e0b6031-5213-4934-818b-38d90ced39db}
*/
#define HV_SHUTDOWN_GUID \
- .guid = UUID_LE(0x0e0b6031, 0x5213, 0x4934, 0x81, 0x8b, \
- 0x38, 0xd9, 0x0c, 0xed, 0x39, 0xdb)
+ .guid = GUID_INIT(0x0e0b6031, 0x5213, 0x4934, 0x81, 0x8b, \
+ 0x38, 0xd9, 0x0c, 0xed, 0x39, 0xdb)
/*
* Time Synch GUID
* {9527E630-D0AE-497b-ADCE-E80AB0175CAF}
*/
#define HV_TS_GUID \
- .guid = UUID_LE(0x9527e630, 0xd0ae, 0x497b, 0xad, 0xce, \
- 0xe8, 0x0a, 0xb0, 0x17, 0x5c, 0xaf)
+ .guid = GUID_INIT(0x9527e630, 0xd0ae, 0x497b, 0xad, 0xce, \
+ 0xe8, 0x0a, 0xb0, 0x17, 0x5c, 0xaf)
/*
* Heartbeat GUID
* {57164f39-9115-4e78-ab55-382f3bd5422d}
*/
#define HV_HEART_BEAT_GUID \
- .guid = UUID_LE(0x57164f39, 0x9115, 0x4e78, 0xab, 0x55, \
- 0x38, 0x2f, 0x3b, 0xd5, 0x42, 0x2d)
+ .guid = GUID_INIT(0x57164f39, 0x9115, 0x4e78, 0xab, 0x55, \
+ 0x38, 0x2f, 0x3b, 0xd5, 0x42, 0x2d)
/*
* KVP GUID
* {a9a0f4e7-5a45-4d96-b827-8a841e8c03e6}
*/
#define HV_KVP_GUID \
- .guid = UUID_LE(0xa9a0f4e7, 0x5a45, 0x4d96, 0xb8, 0x27, \
- 0x8a, 0x84, 0x1e, 0x8c, 0x03, 0xe6)
+ .guid = GUID_INIT(0xa9a0f4e7, 0x5a45, 0x4d96, 0xb8, 0x27, \
+ 0x8a, 0x84, 0x1e, 0x8c, 0x03, 0xe6)
/*
* Dynamic memory GUID
* {525074dc-8985-46e2-8057-a307dc18a502}
*/
#define HV_DM_GUID \
- .guid = UUID_LE(0x525074dc, 0x8985, 0x46e2, 0x80, 0x57, \
- 0xa3, 0x07, 0xdc, 0x18, 0xa5, 0x02)
+ .guid = GUID_INIT(0x525074dc, 0x8985, 0x46e2, 0x80, 0x57, \
+ 0xa3, 0x07, 0xdc, 0x18, 0xa5, 0x02)
/*
* Mouse GUID
* {cfa8b69e-5b4a-4cc0-b98b-8ba1a1f3f95a}
*/
#define HV_MOUSE_GUID \
- .guid = UUID_LE(0xcfa8b69e, 0x5b4a, 0x4cc0, 0xb9, 0x8b, \
- 0x8b, 0xa1, 0xa1, 0xf3, 0xf9, 0x5a)
+ .guid = GUID_INIT(0xcfa8b69e, 0x5b4a, 0x4cc0, 0xb9, 0x8b, \
+ 0x8b, 0xa1, 0xa1, 0xf3, 0xf9, 0x5a)
/*
* Keyboard GUID
* {f912ad6d-2b17-48ea-bd65-f927a61c7684}
*/
#define HV_KBD_GUID \
- .guid = UUID_LE(0xf912ad6d, 0x2b17, 0x48ea, 0xbd, 0x65, \
- 0xf9, 0x27, 0xa6, 0x1c, 0x76, 0x84)
+ .guid = GUID_INIT(0xf912ad6d, 0x2b17, 0x48ea, 0xbd, 0x65, \
+ 0xf9, 0x27, 0xa6, 0x1c, 0x76, 0x84)
/*
* VSS (Backup/Restore) GUID
*/
#define HV_VSS_GUID \
- .guid = UUID_LE(0x35fa2e29, 0xea23, 0x4236, 0x96, 0xae, \
- 0x3a, 0x6e, 0xba, 0xcb, 0xa4, 0x40)
+ .guid = GUID_INIT(0x35fa2e29, 0xea23, 0x4236, 0x96, 0xae, \
+ 0x3a, 0x6e, 0xba, 0xcb, 0xa4, 0x40)
/*
* Synthetic Video GUID
* {DA0A7802-E377-4aac-8E77-0558EB1073F8}
*/
#define HV_SYNTHVID_GUID \
- .guid = UUID_LE(0xda0a7802, 0xe377, 0x4aac, 0x8e, 0x77, \
- 0x05, 0x58, 0xeb, 0x10, 0x73, 0xf8)
+ .guid = GUID_INIT(0xda0a7802, 0xe377, 0x4aac, 0x8e, 0x77, \
+ 0x05, 0x58, 0xeb, 0x10, 0x73, 0xf8)
/*
* Synthetic FC GUID
* {2f9bcc4a-0069-4af3-b76b-6fd0be528cda}
*/
#define HV_SYNTHFC_GUID \
- .guid = UUID_LE(0x2f9bcc4a, 0x0069, 0x4af3, 0xb7, 0x6b, \
- 0x6f, 0xd0, 0xbe, 0x52, 0x8c, 0xda)
+ .guid = GUID_INIT(0x2f9bcc4a, 0x0069, 0x4af3, 0xb7, 0x6b, \
+ 0x6f, 0xd0, 0xbe, 0x52, 0x8c, 0xda)
/*
* Guest File Copy Service
@@ -1301,16 +1468,16 @@ u64 hv_do_hypercall(u64 control, void *input, void *output);
*/
#define HV_FCOPY_GUID \
- .guid = UUID_LE(0x34d14be3, 0xdee4, 0x41c8, 0x9a, 0xe7, \
- 0x6b, 0x17, 0x49, 0x77, 0xc1, 0x92)
+ .guid = GUID_INIT(0x34d14be3, 0xdee4, 0x41c8, 0x9a, 0xe7, \
+ 0x6b, 0x17, 0x49, 0x77, 0xc1, 0x92)
/*
* NetworkDirect. This is the guest RDMA service.
* {8c2eaf3d-32a7-4b09-ab99-bd1f1c86b501}
*/
#define HV_ND_GUID \
- .guid = UUID_LE(0x8c2eaf3d, 0x32a7, 0x4b09, 0xab, 0x99, \
- 0xbd, 0x1f, 0x1c, 0x86, 0xb5, 0x01)
+ .guid = GUID_INIT(0x8c2eaf3d, 0x32a7, 0x4b09, 0xab, 0x99, \
+ 0xbd, 0x1f, 0x1c, 0x86, 0xb5, 0x01)
/*
* PCI Express Pass Through
@@ -1318,29 +1485,35 @@ u64 hv_do_hypercall(u64 control, void *input, void *output);
*/
#define HV_PCIE_GUID \
- .guid = UUID_LE(0x44c4f61d, 0x4444, 0x4400, 0x9d, 0x52, \
- 0x80, 0x2e, 0x27, 0xed, 0xe1, 0x9f)
+ .guid = GUID_INIT(0x44c4f61d, 0x4444, 0x4400, 0x9d, 0x52, \
+ 0x80, 0x2e, 0x27, 0xed, 0xe1, 0x9f)
/*
- * Linux doesn't support the 3 devices: the first two are for
- * Automatic Virtual Machine Activation, and the third is for
- * Remote Desktop Virtualization.
+ * Linux doesn't support these 4 devices: the first two are for
+ * Automatic Virtual Machine Activation, the third is for
+ * Remote Desktop Virtualization, and the fourth is Initial
+ * Machine Configuration (IMC) used only by Windows guests.
* {f8e65716-3cb3-4a06-9a60-1889c5cccab5}
* {3375baf4-9e15-4b30-b765-67acb10d607b}
* {276aacf4-ac15-426c-98dd-7521ad3f01fe}
+ * {c376c1c3-d276-48d2-90a9-c04748072c60}
*/
#define HV_AVMA1_GUID \
- .guid = UUID_LE(0xf8e65716, 0x3cb3, 0x4a06, 0x9a, 0x60, \
- 0x18, 0x89, 0xc5, 0xcc, 0xca, 0xb5)
+ .guid = GUID_INIT(0xf8e65716, 0x3cb3, 0x4a06, 0x9a, 0x60, \
+ 0x18, 0x89, 0xc5, 0xcc, 0xca, 0xb5)
#define HV_AVMA2_GUID \
- .guid = UUID_LE(0x3375baf4, 0x9e15, 0x4b30, 0xb7, 0x65, \
- 0x67, 0xac, 0xb1, 0x0d, 0x60, 0x7b)
+ .guid = GUID_INIT(0x3375baf4, 0x9e15, 0x4b30, 0xb7, 0x65, \
+ 0x67, 0xac, 0xb1, 0x0d, 0x60, 0x7b)
#define HV_RDV_GUID \
- .guid = UUID_LE(0x276aacf4, 0xac15, 0x426c, 0x98, 0xdd, \
- 0x75, 0x21, 0xad, 0x3f, 0x01, 0xfe)
+ .guid = GUID_INIT(0x276aacf4, 0xac15, 0x426c, 0x98, 0xdd, \
+ 0x75, 0x21, 0xad, 0x3f, 0x01, 0xfe)
+
+#define HV_IMC_GUID \
+ .guid = GUID_INIT(0xc376c1c3, 0xd276, 0x48d2, 0x90, 0xa9, \
+ 0xc0, 0x47, 0x48, 0x07, 0x2c, 0x60)
/*
* Common header for Hyper-V ICs
@@ -1352,6 +1525,7 @@ u64 hv_do_hypercall(u64 control, void *input, void *output);
#define ICMSGTYPE_SHUTDOWN 3
#define ICMSGTYPE_TIMESYNC 4
#define ICMSGTYPE_VSS 5
+#define ICMSGTYPE_FCOPY 7
#define ICMSGHDRFLAG_TRANSACTION 1
#define ICMSGHDRFLAG_REQUEST 2
@@ -1370,6 +1544,8 @@ struct hv_util_service {
void (*util_cb)(void *);
int (*util_init)(struct hv_util_service *);
void (*util_deinit)(void);
+ int (*util_pre_suspend)(void);
+ int (*util_pre_resume)(void);
};
struct vmbuspipe_hdr {
@@ -1393,11 +1569,17 @@ struct icmsg_hdr {
u8 reserved[2];
} __packed;
+#define IC_VERSION_NEGOTIATION_MAX_VER_COUNT 100
+#define ICMSG_HDR (sizeof(struct vmbuspipe_hdr) + sizeof(struct icmsg_hdr))
+#define ICMSG_NEGOTIATE_PKT_SIZE(icframe_vercnt, icmsg_vercnt) \
+ (ICMSG_HDR + sizeof(struct icmsg_negotiate) + \
+ (((icframe_vercnt) + (icmsg_vercnt)) * sizeof(struct ic_version)))
+
struct icmsg_negotiate {
u16 icframe_vercnt;
u16 icmsg_vercnt;
u32 reserved;
- struct ic_version icversion_data[1]; /* any size array */
+ struct ic_version icversion_data[]; /* any size array */
} __packed;
struct shutdown_msg_data {
@@ -1442,18 +1624,23 @@ struct ictimesync_ref_data {
struct hyperv_service_callback {
u8 msg_type;
char *log_msg;
- uuid_le data;
+ guid_t data;
struct vmbus_channel *channel;
void (*callback)(void *context);
};
+struct hv_dma_range {
+ dma_addr_t dma;
+ u32 mapping_size;
+};
+
#define MAX_SRV_VER 0x7ffffff
-extern bool vmbus_prep_negotiate_resp(struct icmsg_hdr *icmsghdrp, u8 *buf,
+extern bool vmbus_prep_negotiate_resp(struct icmsg_hdr *icmsghdrp, u8 *buf, u32 buflen,
const int *fw_version, int fw_vercnt,
const int *srv_version, int srv_vercnt,
int *nego_fw_version, int *nego_srv_version);
-void hv_process_channel_removal(struct vmbus_channel *channel, u32 relid);
+void hv_process_channel_removal(struct vmbus_channel *channel);
void vmbus_setevent(struct vmbus_channel *channel);
/*
@@ -1462,8 +1649,9 @@ void vmbus_setevent(struct vmbus_channel *channel);
extern __u32 vmbus_proto_version;
-int vmbus_send_tl_connect_request(const uuid_le *shv_guest_servie_id,
- const uuid_le *shv_host_servie_id);
+int vmbus_send_tl_connect_request(const guid_t *shv_guest_servie_id,
+ const guid_t *shv_host_servie_id);
+int vmbus_send_modifychannel(struct vmbus_channel *channel, u32 target_vp);
void vmbus_set_event(struct vmbus_channel *channel);
/* Get the start of the ring buffer. */
@@ -1474,55 +1662,6 @@ hv_get_ring_buffer(const struct hv_ring_buffer_info *ring_info)
}
/*
- * To optimize the flow management on the send-side,
- * when the sender is blocked because of lack of
- * sufficient space in the ring buffer, potential the
- * consumer of the ring buffer can signal the producer.
- * This is controlled by the following parameters:
- *
- * 1. pending_send_sz: This is the size in bytes that the
- * producer is trying to send.
- * 2. The feature bit feat_pending_send_sz set to indicate if
- * the consumer of the ring will signal when the ring
- * state transitions from being full to a state where
- * there is room for the producer to send the pending packet.
- */
-
-static inline void hv_signal_on_read(struct vmbus_channel *channel)
-{
- u32 cur_write_sz, cached_write_sz;
- u32 pending_sz;
- struct hv_ring_buffer_info *rbi = &channel->inbound;
-
- /*
- * Issue a full memory barrier before making the signaling decision.
- * Here is the reason for having this barrier:
- * If the reading of the pend_sz (in this function)
- * were to be reordered and read before we commit the new read
- * index (in the calling function) we could
- * have a problem. If the host were to set the pending_sz after we
- * have sampled pending_sz and go to sleep before we commit the
- * read index, we could miss sending the interrupt. Issue a full
- * memory barrier to address this.
- */
- virt_mb();
-
- pending_sz = READ_ONCE(rbi->ring_buffer->pending_send_sz);
- /* If the other end is not blocked on write don't bother. */
- if (pending_sz == 0)
- return;
-
- cur_write_sz = hv_get_bytes_to_write(rbi);
-
- if (cur_write_sz < pending_sz)
- return;
-
- cached_write_sz = hv_get_cached_bytes_to_write(rbi);
- if (cached_write_sz < pending_sz)
- vmbus_setevent(channel);
-}
-
-/*
* Mask off host interrupt callback notifications
*/
static inline void hv_begin_read(struct hv_ring_buffer_info *rbi)
@@ -1568,6 +1707,11 @@ static inline u32 hv_pkt_datalen(const struct vmpacket_descriptor *desc)
return (desc->len8 << 3) - (desc->offset8 << 3);
}
+/* Get packet length associated with descriptor */
+static inline u32 hv_pkt_len(const struct vmpacket_descriptor *desc)
+{
+ return desc->len8 << 3;
+}
struct vmpacket_descriptor *
hv_pkt_iter_first(struct vmbus_channel *channel);
@@ -1578,10 +1722,6 @@ __hv_pkt_iter_next(struct vmbus_channel *channel,
void hv_pkt_iter_close(struct vmbus_channel *channel);
-/*
- * Get next packet descriptor from iterator
- * If at end of list, return NULL and update host.
- */
static inline struct vmpacket_descriptor *
hv_pkt_iter_next(struct vmbus_channel *channel,
const struct vmpacket_descriptor *pkt)
@@ -1599,4 +1739,52 @@ hv_pkt_iter_next(struct vmbus_channel *channel,
for (pkt = hv_pkt_iter_first(channel); pkt; \
pkt = hv_pkt_iter_next(channel, pkt))
+/*
+ * Interface for passing data between SR-IOV PF and VF drivers. The VF driver
+ * sends requests to read and write blocks. Each block must be 128 bytes or
+ * smaller. Optionally, the VF driver can register a callback function which
+ * will be invoked when the host says that one or more of the first 64 block
+ * IDs is "invalid" which means that the VF driver should reread them.
+ */
+#define HV_CONFIG_BLOCK_SIZE_MAX 128
+
+int hyperv_read_cfg_blk(struct pci_dev *dev, void *buf, unsigned int buf_len,
+ unsigned int block_id, unsigned int *bytes_returned);
+int hyperv_write_cfg_blk(struct pci_dev *dev, void *buf, unsigned int len,
+ unsigned int block_id);
+int hyperv_reg_block_invalidate(struct pci_dev *dev, void *context,
+ void (*block_invalidate)(void *context,
+ u64 block_mask));
+
+struct hyperv_pci_block_ops {
+ int (*read_block)(struct pci_dev *dev, void *buf, unsigned int buf_len,
+ unsigned int block_id, unsigned int *bytes_returned);
+ int (*write_block)(struct pci_dev *dev, void *buf, unsigned int len,
+ unsigned int block_id);
+ int (*reg_blk_invalidate)(struct pci_dev *dev, void *context,
+ void (*block_invalidate)(void *context,
+ u64 block_mask));
+};
+
+extern struct hyperv_pci_block_ops hvpci_block_ops;
+
+static inline unsigned long virt_to_hvpfn(void *addr)
+{
+ phys_addr_t paddr;
+
+ if (is_vmalloc_addr(addr))
+ paddr = page_to_phys(vmalloc_to_page(addr)) +
+ offset_in_page(addr);
+ else
+ paddr = __pa(addr);
+
+ return paddr >> HV_HYP_PAGE_SHIFT;
+}
+
+#define NR_HV_HYP_PAGES_IN_PAGE (PAGE_SIZE / HV_HYP_PAGE_SIZE)
+#define offset_in_hvpage(ptr) ((unsigned long)(ptr) & ~HV_HYP_PAGE_MASK)
+#define HVPFN_UP(x) (((x) + HV_HYP_PAGE_SIZE-1) >> HV_HYP_PAGE_SHIFT)
+#define HVPFN_DOWN(x) ((x) >> HV_HYP_PAGE_SHIFT)
+#define page_to_hvpfn(page) (page_to_pfn(page) * NR_HV_HYP_PAGES_IN_PAGE)
+
#endif /* _HYPERV_H */
diff --git a/include/linux/hypervisor.h b/include/linux/hypervisor.h
index 3fa5ef2b3759..9efbc54e35e5 100644
--- a/include/linux/hypervisor.h
+++ b/include/linux/hypervisor.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __LINUX_HYPEVISOR_H
#define __LINUX_HYPEVISOR_H
@@ -6,12 +7,37 @@
* Juergen Gross <jgross@suse.com>
*/
-#ifdef CONFIG_HYPERVISOR_GUEST
-#include <asm/hypervisor.h>
-#else
+#ifdef CONFIG_X86
+
+#include <asm/jailhouse_para.h>
+#include <asm/x86_init.h>
+
+static inline void hypervisor_pin_vcpu(int cpu)
+{
+ x86_platform.hyper.pin_vcpu(cpu);
+}
+
+#else /* !CONFIG_X86 */
+
+#include <linux/of.h>
+
static inline void hypervisor_pin_vcpu(int cpu)
{
}
-#endif
+
+static inline bool jailhouse_paravirt(void)
+{
+ return of_find_compatible_node(NULL, NULL, "jailhouse,cell");
+}
+
+#endif /* !CONFIG_X86 */
+
+static inline bool hypervisor_isolated_pci_functions(void)
+{
+ if (IS_ENABLED(CONFIG_S390))
+ return true;
+
+ return jailhouse_paravirt();
+}
#endif /* __LINUX_HYPEVISOR_H */
diff --git a/include/linux/i2c-algo-bit.h b/include/linux/i2c-algo-bit.h
index 63904ba6887e..7fd5575a368f 100644
--- a/include/linux/i2c-algo-bit.h
+++ b/include/linux/i2c-algo-bit.h
@@ -1,30 +1,17 @@
-/* ------------------------------------------------------------------------- */
-/* i2c-algo-bit.h i2c driver algorithms for bit-shift adapters */
-/* ------------------------------------------------------------------------- */
-/* Copyright (C) 1995-99 Simon G. Vogl
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
- MA 02110-1301 USA. */
-/* ------------------------------------------------------------------------- */
-
-/* With some changes from Kyösti Mälkki <kmalkki@cc.hut.fi> and even
- Frodo Looijaard <frodol@dds.nl> */
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * i2c-algo-bit.h: i2c driver algorithms for bit-shift adapters
+ *
+ * Copyright (C) 1995-99 Simon G. Vogl
+ * With some changes from Kyösti Mälkki <kmalkki@cc.hut.fi> and even
+ * Frodo Looijaard <frodol@dds.nl>
+ */
#ifndef _LINUX_I2C_ALGO_BIT_H
#define _LINUX_I2C_ALGO_BIT_H
+#include <linux/i2c.h>
+
/* --- Defines for bit-adapters --------------------------------------- */
/*
* This struct contains the hw-dependent functions of bit-style adapters to
@@ -46,6 +33,7 @@ struct i2c_algo_bit_data {
minimum 5 us for standard-mode I2C and SMBus,
maximum 50 us for SMBus */
int timeout; /* in jiffies */
+ bool can_do_atomic; /* callbacks don't sleep, we can be atomic */
};
int i2c_bit_add_bus(struct i2c_adapter *);
diff --git a/include/linux/i2c-algo-pca.h b/include/linux/i2c-algo-pca.h
index a3c3ecd59f08..7c522fdd9ea7 100644
--- a/include/linux/i2c-algo-pca.h
+++ b/include/linux/i2c-algo-pca.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_I2C_ALGO_PCA_H
#define _LINUX_I2C_ALGO_PCA_H
@@ -52,6 +53,20 @@
#define I2C_PCA_CON_SI 0x08 /* Serial Interrupt */
#define I2C_PCA_CON_CR 0x07 /* Clock Rate (MASK) */
+/**
+ * struct pca_i2c_bus_settings - The configured PCA i2c bus settings
+ * @mode: Configured i2c bus mode
+ * @tlow: Configured SCL LOW period
+ * @thi: Configured SCL HIGH period
+ * @clock_freq: The configured clock frequency
+ */
+struct pca_i2c_bus_settings {
+ int mode;
+ int tlow;
+ int thi;
+ int clock_freq;
+};
+
struct i2c_algo_pca_data {
void *data; /* private low level data */
void (*write_byte) (void *data, int reg, int val);
@@ -63,6 +78,7 @@ struct i2c_algo_pca_data {
* For PCA9665, use the frequency you want here. */
unsigned int i2c_clock;
unsigned int chip;
+ struct pca_i2c_bus_settings bus_settings;
};
int i2c_pca_add_bus(struct i2c_adapter *);
diff --git a/include/linux/i2c-algo-pcf.h b/include/linux/i2c-algo-pcf.h
index 538e8f41a319..696e7de83c79 100644
--- a/include/linux/i2c-algo-pcf.h
+++ b/include/linux/i2c-algo-pcf.h
@@ -1,23 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/* ------------------------------------------------------------------------- */
/* adap-pcf.h i2c driver algorithms for PCF8584 adapters */
/* ------------------------------------------------------------------------- */
/* Copyright (C) 1995-97 Simon G. Vogl
1998-99 Hans Berglund
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
- MA 02110-1301 USA. */
+ */
/* ------------------------------------------------------------------------- */
/* With some changes from Kyösti Mälkki <kmalkki@cc.hut.fi> and even
diff --git a/include/linux/i2c-dev.h b/include/linux/i2c-dev.h
index 79727144c5cd..4c86fce30a51 100644
--- a/include/linux/i2c-dev.h
+++ b/include/linux/i2c-dev.h
@@ -1,23 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
i2c-dev.h - i2c-bus driver, char device interface
Copyright (C) 1995-97 Simon G. Vogl
Copyright (C) 1998-99 Frodo Looijaard <frodol@dds.nl>
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
- MA 02110-1301 USA.
*/
#ifndef _LINUX_I2C_DEV_H
#define _LINUX_I2C_DEV_H
diff --git a/include/linux/i2c-mux-pinctrl.h b/include/linux/i2c-mux-pinctrl.h
deleted file mode 100644
index a65c86429e84..000000000000
--- a/include/linux/i2c-mux-pinctrl.h
+++ /dev/null
@@ -1,41 +0,0 @@
-/*
- * i2c-mux-pinctrl platform data
- *
- * Copyright (c) 2012, NVIDIA CORPORATION. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- */
-
-#ifndef _LINUX_I2C_MUX_PINCTRL_H
-#define _LINUX_I2C_MUX_PINCTRL_H
-
-/**
- * struct i2c_mux_pinctrl_platform_data - Platform data for i2c-mux-pinctrl
- * @parent_bus_num: Parent I2C bus number
- * @base_bus_num: Base I2C bus number for the child busses. 0 for dynamic.
- * @bus_count: Number of child busses. Also the number of elements in
- * @pinctrl_states
- * @pinctrl_states: The names of the pinctrl state to select for each child bus
- * @pinctrl_state_idle: The pinctrl state to select when no child bus is being
- * accessed. If NULL, the most recently used pinctrl state will be left
- * selected.
- */
-struct i2c_mux_pinctrl_platform_data {
- int parent_bus_num;
- int base_bus_num;
- int bus_count;
- const char **pinctrl_states;
- const char *pinctrl_state_idle;
-};
-
-#endif
diff --git a/include/linux/i2c-mux.h b/include/linux/i2c-mux.h
index bd74d5706f3b..98ef73b7c8fd 100644
--- a/include/linux/i2c-mux.h
+++ b/include/linux/i2c-mux.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
*
* i2c-mux.h - functions for the i2c-bus mux support
@@ -5,21 +6,6 @@
* Copyright (c) 2008-2009 Rodolfo Giometti <giometti@linux.it>
* Copyright (c) 2008-2009 Eurotech S.p.A. <info@eurotech.it>
* Michael Lawnick <michael.lawnick.ext@nsn.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
- * MA 02110-1301 USA.
*/
#ifndef _LINUX_I2C_MUX_H
@@ -43,7 +29,7 @@ struct i2c_mux_core {
int num_adapters;
int max_adapters;
- struct i2c_adapter *adapter[0];
+ struct i2c_adapter *adapter[];
};
struct i2c_mux_core *i2c_mux_alloc(struct i2c_adapter *parent,
diff --git a/include/linux/i2c-pnx.h b/include/linux/i2c-pnx.h
deleted file mode 100644
index 5388326fbbff..000000000000
--- a/include/linux/i2c-pnx.h
+++ /dev/null
@@ -1,38 +0,0 @@
-/*
- * Header file for I2C support on PNX010x/4008.
- *
- * Author: Dennis Kovalev <dkovalev@ru.mvista.com>
- *
- * 2004-2006 (c) MontaVista Software, Inc. This file is licensed under
- * the terms of the GNU General Public License version 2. This program
- * is licensed "as is" without any warranty of any kind, whether express
- * or implied.
- */
-
-#ifndef __I2C_PNX_H__
-#define __I2C_PNX_H__
-
-struct platform_device;
-struct clk;
-
-struct i2c_pnx_mif {
- int ret; /* Return value */
- int mode; /* Interface mode */
- struct completion complete; /* I/O completion */
- struct timer_list timer; /* Timeout */
- u8 * buf; /* Data buffer */
- int len; /* Length of data buffer */
- int order; /* RX Bytes to order via TX */
-};
-
-struct i2c_pnx_algo_data {
- void __iomem *ioaddr;
- struct i2c_pnx_mif mif;
- int last;
- struct clk *clk;
- struct i2c_adapter adapter;
- int irq;
- u32 timeout;
-};
-
-#endif /* __I2C_PNX_H__ */
diff --git a/include/linux/i2c-pxa.h b/include/linux/i2c-pxa.h
deleted file mode 100644
index 41dcdfe7f625..000000000000
--- a/include/linux/i2c-pxa.h
+++ /dev/null
@@ -1,17 +0,0 @@
-#ifndef _LINUX_I2C_ALGO_PXA_H
-#define _LINUX_I2C_ALGO_PXA_H
-
-typedef enum i2c_slave_event_e {
- I2C_SLAVE_EVENT_START_READ,
- I2C_SLAVE_EVENT_START_WRITE,
- I2C_SLAVE_EVENT_STOP
-} i2c_slave_event_t;
-
-struct i2c_slave_client {
- void *data;
- void (*event)(void *ptr, i2c_slave_event_t event);
- int (*read) (void *ptr);
- void (*write)(void *ptr, unsigned int val);
-};
-
-#endif /* _LINUX_I2C_ALGO_PXA_H */
diff --git a/include/linux/i2c-smbus.h b/include/linux/i2c-smbus.h
index a1385023a29b..ced1c6ead52a 100644
--- a/include/linux/i2c-smbus.h
+++ b/include/linux/i2c-smbus.h
@@ -1,22 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* i2c-smbus.h - SMBus extensions to the I2C protocol
*
- * Copyright (C) 2010 Jean Delvare <jdelvare@suse.de>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
- * MA 02110-1301 USA.
+ * Copyright (C) 2010-2019 Jean Delvare <jdelvare@suse.de>
*/
#ifndef _LINUX_I2C_SMBUS_H
@@ -29,25 +15,38 @@
/**
* i2c_smbus_alert_setup - platform data for the smbus_alert i2c client
- * @alert_edge_triggered: whether the alert interrupt is edge (1) or level (0)
- * triggered
* @irq: IRQ number, if the smbus_alert driver should take care of interrupt
* handling
*
* If irq is not specified, the smbus_alert driver doesn't take care of
* interrupt handling. In that case it is up to the I2C bus driver to either
* handle the interrupts or to poll for alerts.
- *
- * If irq is specified then it it crucial that alert_edge_triggered is
- * properly set.
*/
struct i2c_smbus_alert_setup {
- unsigned int alert_edge_triggered:1;
int irq;
};
-struct i2c_client *i2c_setup_smbus_alert(struct i2c_adapter *adapter,
- struct i2c_smbus_alert_setup *setup);
+struct i2c_client *i2c_new_smbus_alert_device(struct i2c_adapter *adapter,
+ struct i2c_smbus_alert_setup *setup);
int i2c_handle_smbus_alert(struct i2c_client *ara);
+#if IS_ENABLED(CONFIG_I2C_SMBUS) && IS_ENABLED(CONFIG_I2C_SLAVE)
+struct i2c_client *i2c_new_slave_host_notify_device(struct i2c_adapter *adapter);
+void i2c_free_slave_host_notify_device(struct i2c_client *client);
+#else
+static inline struct i2c_client *i2c_new_slave_host_notify_device(struct i2c_adapter *adapter)
+{
+ return ERR_PTR(-ENOSYS);
+}
+static inline void i2c_free_slave_host_notify_device(struct i2c_client *client)
+{
+}
+#endif
+
+#if IS_ENABLED(CONFIG_I2C_SMBUS) && IS_ENABLED(CONFIG_DMI)
+void i2c_register_spd(struct i2c_adapter *adap);
+#else
+static inline void i2c_register_spd(struct i2c_adapter *adap) { }
+#endif
+
#endif /* _LINUX_I2C_SMBUS_H */
diff --git a/include/linux/i2c.h b/include/linux/i2c.h
index 00ca5b86a753..13a1ce38cb0c 100644
--- a/include/linux/i2c.h
+++ b/include/linux/i2c.h
@@ -1,35 +1,22 @@
-/* ------------------------------------------------------------------------- */
-/* */
-/* i2c.h - definitions for the i2c-bus interface */
-/* */
-/* ------------------------------------------------------------------------- */
-/* Copyright (C) 1995-2000 Simon G. Vogl
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
- MA 02110-1301 USA. */
-/* ------------------------------------------------------------------------- */
-
-/* With some changes from Kyösti Mälkki <kmalkki@cc.hut.fi> and
- Frodo Looijaard <frodol@dds.nl> */
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * i2c.h - definitions for the Linux i2c bus interface
+ * Copyright (C) 1995-2000 Simon G. Vogl
+ * Copyright (C) 2013-2019 Wolfram Sang <wsa@kernel.org>
+ *
+ * With some changes from Kyösti Mälkki <kmalkki@cc.hut.fi> and
+ * Frodo Looijaard <frodol@dds.nl>
+ */
#ifndef _LINUX_I2C_H
#define _LINUX_I2C_H
+#include <linux/acpi.h> /* for acpi_handle */
+#include <linux/bits.h>
#include <linux/mod_devicetable.h>
#include <linux/device.h> /* for struct device */
#include <linux/sched.h> /* for completion */
#include <linux/mutex.h>
+#include <linux/regulator/consumer.h>
#include <linux/rtmutex.h>
#include <linux/irqdomain.h> /* for Host Notify IRQ */
#include <linux/of.h> /* for struct device_node */
@@ -47,57 +34,130 @@ struct i2c_algorithm;
struct i2c_adapter;
struct i2c_client;
struct i2c_driver;
+struct i2c_device_identity;
union i2c_smbus_data;
struct i2c_board_info;
enum i2c_slave_event;
-typedef int (*i2c_slave_cb_t)(struct i2c_client *, enum i2c_slave_event, u8 *);
+typedef int (*i2c_slave_cb_t)(struct i2c_client *client,
+ enum i2c_slave_event event, u8 *val);
+
+/* I2C Frequency Modes */
+#define I2C_MAX_STANDARD_MODE_FREQ 100000
+#define I2C_MAX_FAST_MODE_FREQ 400000
+#define I2C_MAX_FAST_MODE_PLUS_FREQ 1000000
+#define I2C_MAX_TURBO_MODE_FREQ 1400000
+#define I2C_MAX_HIGH_SPEED_MODE_FREQ 3400000
+#define I2C_MAX_ULTRA_FAST_MODE_FREQ 5000000
struct module;
struct property_entry;
-#if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)
+#if IS_ENABLED(CONFIG_I2C)
+/* Return the Frequency mode string based on the bus frequency */
+const char *i2c_freq_mode_string(u32 bus_freq_hz);
+
/*
* The master routines are the ones normally used to transmit data to devices
* on a bus (or read from them). Apart from two basic transfer functions to
* transmit one message at a time, a more complex version can be used to
* transmit an arbitrary number of messages without interruption.
- * @count must be be less than 64k since msg.len is u16.
+ * @count must be less than 64k since msg.len is u16.
+ */
+int i2c_transfer_buffer_flags(const struct i2c_client *client,
+ char *buf, int count, u16 flags);
+
+/**
+ * i2c_master_recv - issue a single I2C message in master receive mode
+ * @client: Handle to slave device
+ * @buf: Where to store data read from slave
+ * @count: How many bytes to read, must be less than 64k since msg.len is u16
+ *
+ * Returns negative errno, or else the number of bytes read.
+ */
+static inline int i2c_master_recv(const struct i2c_client *client,
+ char *buf, int count)
+{
+ return i2c_transfer_buffer_flags(client, buf, count, I2C_M_RD);
+};
+
+/**
+ * i2c_master_recv_dmasafe - issue a single I2C message in master receive mode
+ * using a DMA safe buffer
+ * @client: Handle to slave device
+ * @buf: Where to store data read from slave, must be safe to use with DMA
+ * @count: How many bytes to read, must be less than 64k since msg.len is u16
+ *
+ * Returns negative errno, or else the number of bytes read.
*/
-extern int i2c_master_send(const struct i2c_client *client, const char *buf,
- int count);
-extern int i2c_master_recv(const struct i2c_client *client, char *buf,
- int count);
+static inline int i2c_master_recv_dmasafe(const struct i2c_client *client,
+ char *buf, int count)
+{
+ return i2c_transfer_buffer_flags(client, buf, count,
+ I2C_M_RD | I2C_M_DMA_SAFE);
+};
+
+/**
+ * i2c_master_send - issue a single I2C message in master transmit mode
+ * @client: Handle to slave device
+ * @buf: Data that will be written to the slave
+ * @count: How many bytes to write, must be less than 64k since msg.len is u16
+ *
+ * Returns negative errno, or else the number of bytes written.
+ */
+static inline int i2c_master_send(const struct i2c_client *client,
+ const char *buf, int count)
+{
+ return i2c_transfer_buffer_flags(client, (char *)buf, count, 0);
+};
+
+/**
+ * i2c_master_send_dmasafe - issue a single I2C message in master transmit mode
+ * using a DMA safe buffer
+ * @client: Handle to slave device
+ * @buf: Data that will be written to the slave, must be safe to use with DMA
+ * @count: How many bytes to write, must be less than 64k since msg.len is u16
+ *
+ * Returns negative errno, or else the number of bytes written.
+ */
+static inline int i2c_master_send_dmasafe(const struct i2c_client *client,
+ const char *buf, int count)
+{
+ return i2c_transfer_buffer_flags(client, (char *)buf, count,
+ I2C_M_DMA_SAFE);
+};
/* Transfer num messages.
*/
-extern int i2c_transfer(struct i2c_adapter *adap, struct i2c_msg *msgs,
- int num);
+int i2c_transfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num);
/* Unlocked flavor */
-extern int __i2c_transfer(struct i2c_adapter *adap, struct i2c_msg *msgs,
- int num);
+int __i2c_transfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num);
/* This is the very generalized SMBus access routine. You probably do not
want to use this, though; one of the functions below may be much easier,
and probably just as fast.
Note that we use i2c_adapter here, because you do not need a specific
smbus adapter to call this function. */
-extern s32 i2c_smbus_xfer(struct i2c_adapter *adapter, u16 addr,
- unsigned short flags, char read_write, u8 command,
- int size, union i2c_smbus_data *data);
+s32 i2c_smbus_xfer(struct i2c_adapter *adapter, u16 addr,
+ unsigned short flags, char read_write, u8 command,
+ int protocol, union i2c_smbus_data *data);
+
+/* Unlocked flavor */
+s32 __i2c_smbus_xfer(struct i2c_adapter *adapter, u16 addr,
+ unsigned short flags, char read_write, u8 command,
+ int protocol, union i2c_smbus_data *data);
/* Now follow the 'nice' access routines. These also document the calling
conventions of i2c_smbus_xfer. */
-extern s32 i2c_smbus_read_byte(const struct i2c_client *client);
-extern s32 i2c_smbus_write_byte(const struct i2c_client *client, u8 value);
-extern s32 i2c_smbus_read_byte_data(const struct i2c_client *client,
- u8 command);
-extern s32 i2c_smbus_write_byte_data(const struct i2c_client *client,
- u8 command, u8 value);
-extern s32 i2c_smbus_read_word_data(const struct i2c_client *client,
- u8 command);
-extern s32 i2c_smbus_write_word_data(const struct i2c_client *client,
- u8 command, u16 value);
+u8 i2c_smbus_pec(u8 crc, u8 *p, size_t count);
+s32 i2c_smbus_read_byte(const struct i2c_client *client);
+s32 i2c_smbus_write_byte(const struct i2c_client *client, u8 value);
+s32 i2c_smbus_read_byte_data(const struct i2c_client *client, u8 command);
+s32 i2c_smbus_write_byte_data(const struct i2c_client *client,
+ u8 command, u8 value);
+s32 i2c_smbus_read_word_data(const struct i2c_client *client, u8 command);
+s32 i2c_smbus_write_word_data(const struct i2c_client *client,
+ u8 command, u16 value);
static inline s32
i2c_smbus_read_word_swapped(const struct i2c_client *client, u8 command)
@@ -115,32 +175,69 @@ i2c_smbus_write_word_swapped(const struct i2c_client *client,
}
/* Returns the number of read bytes */
-extern s32 i2c_smbus_read_block_data(const struct i2c_client *client,
- u8 command, u8 *values);
-extern s32 i2c_smbus_write_block_data(const struct i2c_client *client,
- u8 command, u8 length, const u8 *values);
+s32 i2c_smbus_read_block_data(const struct i2c_client *client,
+ u8 command, u8 *values);
+s32 i2c_smbus_write_block_data(const struct i2c_client *client,
+ u8 command, u8 length, const u8 *values);
/* Returns the number of read bytes */
-extern s32 i2c_smbus_read_i2c_block_data(const struct i2c_client *client,
- u8 command, u8 length, u8 *values);
-extern s32 i2c_smbus_write_i2c_block_data(const struct i2c_client *client,
- u8 command, u8 length,
- const u8 *values);
-extern s32
-i2c_smbus_read_i2c_block_data_or_emulated(const struct i2c_client *client,
- u8 command, u8 length, u8 *values);
+s32 i2c_smbus_read_i2c_block_data(const struct i2c_client *client,
+ u8 command, u8 length, u8 *values);
+s32 i2c_smbus_write_i2c_block_data(const struct i2c_client *client,
+ u8 command, u8 length, const u8 *values);
+s32 i2c_smbus_read_i2c_block_data_or_emulated(const struct i2c_client *client,
+ u8 command, u8 length,
+ u8 *values);
+int i2c_get_device_id(const struct i2c_client *client,
+ struct i2c_device_identity *id);
+const struct i2c_device_id *i2c_client_get_device_id(const struct i2c_client *client);
#endif /* I2C */
+/**
+ * struct i2c_device_identity - i2c client device identification
+ * @manufacturer_id: 0 - 4095, database maintained by NXP
+ * @part_id: 0 - 511, according to manufacturer
+ * @die_revision: 0 - 7, according to manufacturer
+ */
+struct i2c_device_identity {
+ u16 manufacturer_id;
+#define I2C_DEVICE_ID_NXP_SEMICONDUCTORS 0
+#define I2C_DEVICE_ID_NXP_SEMICONDUCTORS_1 1
+#define I2C_DEVICE_ID_NXP_SEMICONDUCTORS_2 2
+#define I2C_DEVICE_ID_NXP_SEMICONDUCTORS_3 3
+#define I2C_DEVICE_ID_RAMTRON_INTERNATIONAL 4
+#define I2C_DEVICE_ID_ANALOG_DEVICES 5
+#define I2C_DEVICE_ID_STMICROELECTRONICS 6
+#define I2C_DEVICE_ID_ON_SEMICONDUCTOR 7
+#define I2C_DEVICE_ID_SPRINTEK_CORPORATION 8
+#define I2C_DEVICE_ID_ESPROS_PHOTONICS_AG 9
+#define I2C_DEVICE_ID_FUJITSU_SEMICONDUCTOR 10
+#define I2C_DEVICE_ID_FLIR 11
+#define I2C_DEVICE_ID_O2MICRO 12
+#define I2C_DEVICE_ID_ATMEL 13
+#define I2C_DEVICE_ID_NONE 0xffff
+ u16 part_id;
+ u8 die_revision;
+};
+
enum i2c_alert_protocol {
I2C_PROTOCOL_SMBUS_ALERT,
I2C_PROTOCOL_SMBUS_HOST_NOTIFY,
};
/**
+ * enum i2c_driver_flags - Flags for an I2C device driver
+ *
+ * @I2C_DRV_ACPI_WAIVE_D0_PROBE: Don't put the device in D0 state for probe
+ */
+enum i2c_driver_flags {
+ I2C_DRV_ACPI_WAIVE_D0_PROBE = BIT(0),
+};
+
+/**
* struct i2c_driver - represent an I2C device driver
* @class: What kind of i2c device we instantiate (for detect)
- * @attach_adapter: Callback for bus addition (deprecated)
- * @probe: Callback for device binding - soon to be deprecated
- * @probe_new: New callback for device binding
+ * @probe: Callback for device binding
+ * @probe_new: Transitional callback for device binding - do not use
* @remove: Callback for device unbinding
* @shutdown: Callback for device shutdown
* @alert: Alert callback, for example for the SMBus alert protocol
@@ -150,7 +247,7 @@ enum i2c_alert_protocol {
* @detect: Callback for device detection
* @address_list: The I2C addresses to probe (for detect)
* @clients: List of detected clients we created (for i2c-core use only)
- * @disable_i2c_core_irq_mapping: Tell the i2c-core to not do irq-mapping
+ * @flags: A bitmask of flags defined in &enum i2c_driver_flags
*
* The driver.owner field should be set to the module owner of this driver.
* The driver.name field should be set to the name of this driver.
@@ -175,22 +272,21 @@ enum i2c_alert_protocol {
struct i2c_driver {
unsigned int class;
- /* Notifies the driver that a new bus has appeared. You should avoid
- * using this, it will be removed in a near future.
- */
- int (*attach_adapter)(struct i2c_adapter *) __deprecated;
-
+ union {
/* Standard driver model interfaces */
- int (*probe)(struct i2c_client *, const struct i2c_device_id *);
- int (*remove)(struct i2c_client *);
+ int (*probe)(struct i2c_client *client);
+ /*
+ * Legacy callback that was part of a conversion of .probe().
+ * Today it has the same semantic as .probe(). Don't use for new
+ * code.
+ */
+ int (*probe_new)(struct i2c_client *client);
+ };
+ void (*remove)(struct i2c_client *client);
- /* New driver model interface to aid the seamless removal of the
- * current probe()'s, more commonly unused than used second parameter.
- */
- int (*probe_new)(struct i2c_client *);
/* driver model interfaces that don't relate to enumeration */
- void (*shutdown)(struct i2c_client *);
+ void (*shutdown)(struct i2c_client *client);
/* Alert callback, for example for the SMBus alert protocol.
* The format and meaning of the data value depends on the protocol.
@@ -199,7 +295,7 @@ struct i2c_driver {
* For the SMBus Host Notify protocol, the data corresponds to the
* 16-bit payload data reported by the slave device acting as master.
*/
- void (*alert)(struct i2c_client *, enum i2c_alert_protocol protocol,
+ void (*alert)(struct i2c_client *client, enum i2c_alert_protocol protocol,
unsigned int data);
/* a ioctl like command that can be used to perform specific functions
@@ -211,28 +307,30 @@ struct i2c_driver {
const struct i2c_device_id *id_table;
/* Device detection callback for automatic device creation */
- int (*detect)(struct i2c_client *, struct i2c_board_info *);
+ int (*detect)(struct i2c_client *client, struct i2c_board_info *info);
const unsigned short *address_list;
struct list_head clients;
- bool disable_i2c_core_irq_mapping;
+ u32 flags;
};
#define to_i2c_driver(d) container_of(d, struct i2c_driver, driver)
/**
* struct i2c_client - represent an I2C slave device
- * @flags: I2C_CLIENT_TEN indicates the device uses a ten bit chip address;
- * I2C_CLIENT_PEC indicates it uses SMBus Packet Error Checking
+ * @flags: see I2C_CLIENT_* for possible flags
* @addr: Address used on the I2C bus connected to the parent adapter.
* @name: Indicates the type of the device, usually a chip name that's
* generic enough to hide second-sourcing and compatible revisions.
* @adapter: manages the bus segment hosting this I2C device
* @dev: Driver model device node for the slave.
+ * @init_irq: IRQ that was set at initialization
* @irq: indicates the IRQ generated by this device (if any)
* @detected: member of an i2c_driver.clients list or i2c-core's
* userspace_devices list
* @slave_cb: Callback when I2C slave mode of an adapter is used. The adapter
* calls it to pass on slave events to the slave driver.
+ * @devres_group_id: id of the devres group that will be created for resources
+ * acquired when probing this device.
*
* An i2c_client identifies a single device (i.e. chip) connected to an
* i2c bus. The behaviour exposed to Linux is defined by the driver
@@ -240,44 +338,53 @@ struct i2c_driver {
*/
struct i2c_client {
unsigned short flags; /* div., see below */
+#define I2C_CLIENT_PEC 0x04 /* Use Packet Error Checking */
+#define I2C_CLIENT_TEN 0x10 /* we have a ten bit chip address */
+ /* Must equal I2C_M_TEN below */
+#define I2C_CLIENT_SLAVE 0x20 /* we are the slave */
+#define I2C_CLIENT_HOST_NOTIFY 0x40 /* We want to use I2C host notify */
+#define I2C_CLIENT_WAKE 0x80 /* for board_info; true iff can wake */
+#define I2C_CLIENT_SCCB 0x9000 /* Use Omnivision SCCB protocol */
+ /* Must match I2C_M_STOP|IGNORE_NAK */
+
unsigned short addr; /* chip address - NOTE: 7bit */
/* addresses are stored in the */
/* _LOWER_ 7 bits */
char name[I2C_NAME_SIZE];
struct i2c_adapter *adapter; /* the adapter we sit on */
struct device dev; /* the device structure */
+ int init_irq; /* irq set at initialization */
int irq; /* irq issued by device */
struct list_head detected;
#if IS_ENABLED(CONFIG_I2C_SLAVE)
i2c_slave_cb_t slave_cb; /* callback for slave mode */
#endif
+ void *devres_group_id; /* ID of probe devres group */
};
#define to_i2c_client(d) container_of(d, struct i2c_client, dev)
-extern struct i2c_client *i2c_verify_client(struct device *dev);
-extern struct i2c_adapter *i2c_verify_adapter(struct device *dev);
-extern const struct i2c_device_id *i2c_match_id(const struct i2c_device_id *id,
- const struct i2c_client *client);
+struct i2c_adapter *i2c_verify_adapter(struct device *dev);
+const struct i2c_device_id *i2c_match_id(const struct i2c_device_id *id,
+ const struct i2c_client *client);
static inline struct i2c_client *kobj_to_i2c_client(struct kobject *kobj)
{
- struct device * const dev = container_of(kobj, struct device, kobj);
+ struct device * const dev = kobj_to_dev(kobj);
return to_i2c_client(dev);
}
-static inline void *i2c_get_clientdata(const struct i2c_client *dev)
+static inline void *i2c_get_clientdata(const struct i2c_client *client)
{
- return dev_get_drvdata(&dev->dev);
+ return dev_get_drvdata(&client->dev);
}
-static inline void i2c_set_clientdata(struct i2c_client *dev, void *data)
+static inline void i2c_set_clientdata(struct i2c_client *client, void *data)
{
- dev_set_drvdata(&dev->dev, data);
+ dev_set_drvdata(&client->dev, data);
}
/* I2C slave support */
-#if IS_ENABLED(CONFIG_I2C_SLAVE)
enum i2c_slave_event {
I2C_SLAVE_READ_REQUESTED,
I2C_SLAVE_WRITE_REQUESTED,
@@ -286,15 +393,12 @@ enum i2c_slave_event {
I2C_SLAVE_STOP,
};
-extern int i2c_slave_register(struct i2c_client *client, i2c_slave_cb_t slave_cb);
-extern int i2c_slave_unregister(struct i2c_client *client);
-extern bool i2c_detect_slave_mode(struct device *dev);
-
-static inline int i2c_slave_event(struct i2c_client *client,
- enum i2c_slave_event event, u8 *val)
-{
- return client->slave_cb(client, event, val);
-}
+int i2c_slave_register(struct i2c_client *client, i2c_slave_cb_t slave_cb);
+int i2c_slave_unregister(struct i2c_client *client);
+int i2c_slave_event(struct i2c_client *client,
+ enum i2c_slave_event event, u8 *val);
+#if IS_ENABLED(CONFIG_I2C_SLAVE)
+bool i2c_detect_slave_mode(struct device *dev);
#else
static inline bool i2c_detect_slave_mode(struct device *dev) { return false; }
#endif
@@ -304,11 +408,11 @@ static inline bool i2c_detect_slave_mode(struct device *dev) { return false; }
* @type: chip type, to initialize i2c_client.name
* @flags: to initialize i2c_client.flags
* @addr: stored in i2c_client.addr
+ * @dev_name: Overrides the default <busnr>-<addr> dev_name if set
* @platform_data: stored in i2c_client.dev.platform_data
- * @archdata: copied into i2c_client.dev.archdata
* @of_node: pointer to OpenFirmware device node
* @fwnode: device node supplied by the platform firmware
- * @properties: additional device properties for the device
+ * @swnode: software node for the device
* @resources: resources associated with the device
* @num_resources: number of resources in the @resources array
* @irq: stored in i2c_client.irq
@@ -322,17 +426,17 @@ static inline bool i2c_detect_slave_mode(struct device *dev) { return false; }
* that are present. This information is used to grow the driver model tree.
* For mainboards this is done statically using i2c_register_board_info();
* bus numbers identify adapters that aren't yet available. For add-on boards,
- * i2c_new_device() does this dynamically with the adapter already known.
+ * i2c_new_client_device() does this dynamically with the adapter already known.
*/
struct i2c_board_info {
char type[I2C_NAME_SIZE];
unsigned short flags;
unsigned short addr;
+ const char *dev_name;
void *platform_data;
- struct dev_archdata *archdata;
struct device_node *of_node;
struct fwnode_handle *fwnode;
- const struct property_entry *properties;
+ const struct software_node *swnode;
const struct resource *resources;
unsigned int num_resources;
int irq;
@@ -352,13 +456,14 @@ struct i2c_board_info {
.type = dev_type, .addr = (dev_addr)
-#if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)
-/* Add-on boards should register/unregister their devices; e.g. a board
+#if IS_ENABLED(CONFIG_I2C)
+/*
+ * Add-on boards should register/unregister their devices; e.g. a board
* with integrated I2C, a config eeprom, sensors, and a codec that's
* used in conjunction with the primary hardware.
*/
-extern struct i2c_client *
-i2c_new_device(struct i2c_adapter *adap, struct i2c_board_info const *info);
+struct i2c_client *
+i2c_new_client_device(struct i2c_adapter *adap, struct i2c_board_info const *info);
/* If you don't know the exact address of an I2C device, use this variant
* instead, which can probe for device presence in a list of possible
@@ -366,27 +471,34 @@ i2c_new_device(struct i2c_adapter *adap, struct i2c_board_info const *info);
* it must return 1 on successful probe, 0 otherwise. If it is not provided,
* a default probing method is used.
*/
-extern struct i2c_client *
-i2c_new_probed_device(struct i2c_adapter *adap,
- struct i2c_board_info *info,
- unsigned short const *addr_list,
- int (*probe)(struct i2c_adapter *, unsigned short addr));
+struct i2c_client *
+i2c_new_scanned_device(struct i2c_adapter *adap,
+ struct i2c_board_info *info,
+ unsigned short const *addr_list,
+ int (*probe)(struct i2c_adapter *adap, unsigned short addr));
/* Common custom probe functions */
-extern int i2c_probe_func_quick_read(struct i2c_adapter *, unsigned short addr);
+int i2c_probe_func_quick_read(struct i2c_adapter *adap, unsigned short addr);
-/* For devices that use several addresses, use i2c_new_dummy() to make
- * client handles for the extra addresses.
- */
-extern struct i2c_client *
-i2c_new_dummy(struct i2c_adapter *adap, u16 address);
+struct i2c_client *
+i2c_new_dummy_device(struct i2c_adapter *adapter, u16 address);
+
+struct i2c_client *
+devm_i2c_new_dummy_device(struct device *dev, struct i2c_adapter *adap, u16 address);
-extern struct i2c_client *
-i2c_new_secondary_device(struct i2c_client *client,
- const char *name,
- u16 default_addr);
+struct i2c_client *
+i2c_new_ancillary_device(struct i2c_client *client,
+ const char *name,
+ u16 default_addr);
-extern void i2c_unregister_device(struct i2c_client *);
+void i2c_unregister_device(struct i2c_client *client);
+
+struct i2c_client *i2c_verify_client(struct device *dev);
+#else
+static inline struct i2c_client *i2c_verify_client(struct device *dev)
+{
+ return NULL;
+}
#endif /* I2C */
/* Mainboard arch_initcall() code should register all its I2C devices.
@@ -394,7 +506,7 @@ extern void i2c_unregister_device(struct i2c_client *);
* Modules for add-on boards must use other calls.
*/
#ifdef CONFIG_I2C_BOARDINFO
-extern int
+int
i2c_register_board_info(int busnum, struct i2c_board_info const *info,
unsigned n);
#else
@@ -411,11 +523,15 @@ i2c_register_board_info(int busnum, struct i2c_board_info const *info,
* @master_xfer: Issue a set of i2c transactions to the given I2C adapter
* defined by the msgs array, with num messages available to transfer via
* the adapter specified by adap.
+ * @master_xfer_atomic: same as @master_xfer. Yet, only using atomic context
+ * so e.g. PMICs can be accessed very late before shutdown. Optional.
* @smbus_xfer: Issue smbus transactions to the given I2C adapter. If this
* is not present, then the bus layer will try and convert the SMBus calls
* into I2C transfers instead.
+ * @smbus_xfer_atomic: same as @smbus_xfer. Yet, only using atomic context
+ * so e.g. PMICs can be accessed very late before shutdown. Optional.
* @functionality: Return the flags that this algorithm/adapter pair supports
- * from the I2C_FUNC_* flags.
+ * from the ``I2C_FUNC_*`` flags.
* @reg_slave: Register given client to I2C slave mode of this adapter
* @unreg_slave: Unregister given client from I2C slave mode of this adapter
*
@@ -424,25 +540,34 @@ i2c_register_board_info(int busnum, struct i2c_board_info const *info,
* be addressed using the same bus algorithms - i.e. bit-banging or the PCF8584
* to name two of the most common.
*
- * The return codes from the @master_xfer field should indicate the type of
- * error code that occurred during the transfer, as documented in the kernel
- * Documentation file Documentation/i2c/fault-codes.
+ * The return codes from the ``master_xfer{_atomic}`` fields should indicate the
+ * type of error code that occurred during the transfer, as documented in the
+ * Kernel Documentation file Documentation/i2c/fault-codes.rst. Otherwise, the
+ * number of messages executed should be returned.
*/
struct i2c_algorithm {
- /* If an adapter algorithm can't do I2C-level access, set master_xfer
- to NULL. If an adapter algorithm can do SMBus access, set
- smbus_xfer. If set to NULL, the SMBus protocol is simulated
- using common I2C messages */
- /* master_xfer should return the number of messages successfully
- processed, or a negative value on error */
+ /*
+ * If an adapter algorithm can't do I2C-level access, set master_xfer
+ * to NULL. If an adapter algorithm can do SMBus access, set
+ * smbus_xfer. If set to NULL, the SMBus protocol is simulated
+ * using common I2C messages.
+ *
+ * master_xfer should return the number of messages successfully
+ * processed, or a negative value on error
+ */
int (*master_xfer)(struct i2c_adapter *adap, struct i2c_msg *msgs,
int num);
- int (*smbus_xfer) (struct i2c_adapter *adap, u16 addr,
- unsigned short flags, char read_write,
- u8 command, int size, union i2c_smbus_data *data);
+ int (*master_xfer_atomic)(struct i2c_adapter *adap,
+ struct i2c_msg *msgs, int num);
+ int (*smbus_xfer)(struct i2c_adapter *adap, u16 addr,
+ unsigned short flags, char read_write,
+ u8 command, int size, union i2c_smbus_data *data);
+ int (*smbus_xfer_atomic)(struct i2c_adapter *adap, u16 addr,
+ unsigned short flags, char read_write,
+ u8 command, int size, union i2c_smbus_data *data);
/* To determine what the adapter supports */
- u32 (*functionality) (struct i2c_adapter *);
+ u32 (*functionality)(struct i2c_adapter *adap);
#if IS_ENABLED(CONFIG_I2C_SLAVE)
int (*reg_slave)(struct i2c_client *client);
@@ -459,9 +584,9 @@ struct i2c_algorithm {
* The main operations are wrapped by i2c_lock_bus and i2c_unlock_bus.
*/
struct i2c_lock_operations {
- void (*lock_bus)(struct i2c_adapter *, unsigned int flags);
- int (*trylock_bus)(struct i2c_adapter *, unsigned int flags);
- void (*unlock_bus)(struct i2c_adapter *, unsigned int flags);
+ void (*lock_bus)(struct i2c_adapter *adapter, unsigned int flags);
+ int (*trylock_bus)(struct i2c_adapter *adapter, unsigned int flags);
+ void (*unlock_bus)(struct i2c_adapter *adapter, unsigned int flags);
};
/**
@@ -471,6 +596,11 @@ struct i2c_lock_operations {
* @scl_fall_ns: time SCL signal takes to fall in ns; t(f) in the I2C specification
* @scl_int_delay_ns: time IP core additionally needs to setup SCL in ns
* @sda_fall_ns: time SDA signal takes to fall in ns; t(f) in the I2C specification
+ * @sda_hold_ns: time IP core additionally needs to hold SDA in ns
+ * @digital_filter_width_ns: width in ns of spikes on i2c lines that the IP core
+ * digital filter can filter out
+ * @analog_filter_cutoff_freq_hz: threshold frequency for the low pass IP core
+ * analog filter
*/
struct i2c_timings {
u32 bus_freq_hz;
@@ -478,45 +608,65 @@ struct i2c_timings {
u32 scl_fall_ns;
u32 scl_int_delay_ns;
u32 sda_fall_ns;
+ u32 sda_hold_ns;
+ u32 digital_filter_width_ns;
+ u32 analog_filter_cutoff_freq_hz;
};
/**
* struct i2c_bus_recovery_info - I2C bus recovery information
* @recover_bus: Recover routine. Either pass driver's recover_bus() routine, or
- * i2c_generic_scl_recovery() or i2c_generic_gpio_recovery().
+ * i2c_generic_scl_recovery().
* @get_scl: This gets current value of SCL line. Mandatory for generic SCL
- * recovery. Used internally for generic GPIO recovery.
- * @set_scl: This sets/clears SCL line. Mandatory for generic SCL recovery. Used
- * internally for generic GPIO recovery.
- * @get_sda: This gets current value of SDA line. Optional for generic SCL
- * recovery. Used internally, if sda_gpio is a valid GPIO, for generic GPIO
- * recovery.
+ * recovery. Populated internally for generic GPIO recovery.
+ * @set_scl: This sets/clears the SCL line. Mandatory for generic SCL recovery.
+ * Populated internally for generic GPIO recovery.
+ * @get_sda: This gets current value of SDA line. This or set_sda() is mandatory
+ * for generic SCL recovery. Populated internally, if sda_gpio is a valid
+ * GPIO, for generic GPIO recovery.
+ * @set_sda: This sets/clears the SDA line. This or get_sda() is mandatory for
+ * generic SCL recovery. Populated internally, if sda_gpio is a valid GPIO,
+ * for generic GPIO recovery.
+ * @get_bus_free: Returns the bus free state as seen from the IP core in case it
+ * has a more complex internal logic than just reading SDA. Optional.
* @prepare_recovery: This will be called before starting recovery. Platform may
* configure padmux here for SDA/SCL line or something else they want.
* @unprepare_recovery: This will be called after completing recovery. Platform
* may configure padmux here for SDA/SCL line or something else they want.
- * @scl_gpio: gpio number of the SCL line. Only required for GPIO recovery.
- * @sda_gpio: gpio number of the SDA line. Only required for GPIO recovery.
+ * @scl_gpiod: gpiod of the SCL line. Only required for GPIO recovery.
+ * @sda_gpiod: gpiod of the SDA line. Only required for GPIO recovery.
+ * @pinctrl: pinctrl used by GPIO recovery to change the state of the I2C pins.
+ * Optional.
+ * @pins_default: default pinctrl state of SCL/SDA lines, when they are assigned
+ * to the I2C bus. Optional. Populated internally for GPIO recovery, if
+ * state with the name PINCTRL_STATE_DEFAULT is found and pinctrl is valid.
+ * @pins_gpio: recovery pinctrl state of SCL/SDA lines, when they are used as
+ * GPIOs. Optional. Populated internally for GPIO recovery, if this state
+ * is called "gpio" or "recovery" and pinctrl is valid.
*/
struct i2c_bus_recovery_info {
- int (*recover_bus)(struct i2c_adapter *);
+ int (*recover_bus)(struct i2c_adapter *adap);
- int (*get_scl)(struct i2c_adapter *);
- void (*set_scl)(struct i2c_adapter *, int val);
- int (*get_sda)(struct i2c_adapter *);
+ int (*get_scl)(struct i2c_adapter *adap);
+ void (*set_scl)(struct i2c_adapter *adap, int val);
+ int (*get_sda)(struct i2c_adapter *adap);
+ void (*set_sda)(struct i2c_adapter *adap, int val);
+ int (*get_bus_free)(struct i2c_adapter *adap);
- void (*prepare_recovery)(struct i2c_adapter *);
- void (*unprepare_recovery)(struct i2c_adapter *);
+ void (*prepare_recovery)(struct i2c_adapter *adap);
+ void (*unprepare_recovery)(struct i2c_adapter *adap);
/* gpio recovery */
- int scl_gpio;
- int sda_gpio;
+ struct gpio_desc *scl_gpiod;
+ struct gpio_desc *sda_gpiod;
+ struct pinctrl *pinctrl;
+ struct pinctrl_state *pins_default;
+ struct pinctrl_state *pins_gpio;
};
int i2c_recover_bus(struct i2c_adapter *adap);
/* Generic recovery routines */
-int i2c_generic_gpio_recovery(struct i2c_adapter *adap);
int i2c_generic_scl_recovery(struct i2c_adapter *adap);
/**
@@ -562,6 +712,12 @@ struct i2c_adapter_quirks {
I2C_AQ_COMB_READ_SECOND | I2C_AQ_COMB_SAME_ADDR)
/* clock stretching is not supported */
#define I2C_AQ_NO_CLK_STRETCH BIT(4)
+/* message cannot have length of 0 */
+#define I2C_AQ_NO_ZERO_LEN_READ BIT(5)
+#define I2C_AQ_NO_ZERO_LEN_WRITE BIT(6)
+#define I2C_AQ_NO_ZERO_LEN (I2C_AQ_NO_ZERO_LEN_READ | I2C_AQ_NO_ZERO_LEN_WRITE)
+/* adapter cannot do repeated START */
+#define I2C_AQ_NO_REP_START BIT(7)
/*
* i2c_adapter is the structure used to identify a physical i2c bus along
@@ -581,6 +737,9 @@ struct i2c_adapter {
int timeout; /* in jiffies */
int retries;
struct device dev; /* the adapter device */
+ unsigned long locked_flags; /* owned by the I2C core */
+#define I2C_ALF_IS_SUSPENDED 0
+#define I2C_ALF_SUSPEND_REPORTED 1
int nr;
char name[48];
@@ -593,17 +752,18 @@ struct i2c_adapter {
const struct i2c_adapter_quirks *quirks;
struct irq_domain *host_notify_domain;
+ struct regulator *bus_regulator;
};
#define to_i2c_adapter(d) container_of(d, struct i2c_adapter, dev)
-static inline void *i2c_get_adapdata(const struct i2c_adapter *dev)
+static inline void *i2c_get_adapdata(const struct i2c_adapter *adap)
{
- return dev_get_drvdata(&dev->dev);
+ return dev_get_drvdata(&adap->dev);
}
-static inline void i2c_set_adapdata(struct i2c_adapter *dev, void *data)
+static inline void i2c_set_adapdata(struct i2c_adapter *adap, void *data)
{
- dev_set_drvdata(&dev->dev, data);
+ dev_set_drvdata(&adap->dev, data);
}
static inline struct i2c_adapter *
@@ -619,7 +779,7 @@ i2c_parent_is_i2c_adapter(const struct i2c_adapter *adapter)
return NULL;
}
-int i2c_for_each_dev(void *data, int (*fn)(struct device *, void *));
+int i2c_for_each_dev(void *data, int (*fn)(struct device *dev, void *data));
/* Adapter locking functions, exported for shared pin cases */
#define I2C_LOCK_ROOT_ADAPTER BIT(0)
@@ -663,33 +823,44 @@ i2c_unlock_bus(struct i2c_adapter *adapter, unsigned int flags)
adapter->lock_ops->unlock_bus(adapter, flags);
}
-static inline void
-i2c_lock_adapter(struct i2c_adapter *adapter)
+/**
+ * i2c_mark_adapter_suspended - Report suspended state of the adapter to the core
+ * @adap: Adapter to mark as suspended
+ *
+ * When using this helper to mark an adapter as suspended, the core will reject
+ * further transfers to this adapter. The usage of this helper is optional but
+ * recommended for devices having distinct handlers for system suspend and
+ * runtime suspend. More complex devices are free to implement custom solutions
+ * to reject transfers when suspended.
+ */
+static inline void i2c_mark_adapter_suspended(struct i2c_adapter *adap)
{
- i2c_lock_bus(adapter, I2C_LOCK_ROOT_ADAPTER);
+ i2c_lock_bus(adap, I2C_LOCK_ROOT_ADAPTER);
+ set_bit(I2C_ALF_IS_SUSPENDED, &adap->locked_flags);
+ i2c_unlock_bus(adap, I2C_LOCK_ROOT_ADAPTER);
}
-static inline void
-i2c_unlock_adapter(struct i2c_adapter *adapter)
+/**
+ * i2c_mark_adapter_resumed - Report resumed state of the adapter to the core
+ * @adap: Adapter to mark as resumed
+ *
+ * When using this helper to mark an adapter as resumed, the core will allow
+ * further transfers to this adapter. See also further notes to
+ * @i2c_mark_adapter_suspended().
+ */
+static inline void i2c_mark_adapter_resumed(struct i2c_adapter *adap)
{
- i2c_unlock_bus(adapter, I2C_LOCK_ROOT_ADAPTER);
+ i2c_lock_bus(adap, I2C_LOCK_ROOT_ADAPTER);
+ clear_bit(I2C_ALF_IS_SUSPENDED, &adap->locked_flags);
+ i2c_unlock_bus(adap, I2C_LOCK_ROOT_ADAPTER);
}
-/*flags for the client struct: */
-#define I2C_CLIENT_PEC 0x04 /* Use Packet Error Checking */
-#define I2C_CLIENT_TEN 0x10 /* we have a ten bit chip address */
- /* Must equal I2C_M_TEN below */
-#define I2C_CLIENT_SLAVE 0x20 /* we are the slave */
-#define I2C_CLIENT_HOST_NOTIFY 0x40 /* We want to use I2C host notify */
-#define I2C_CLIENT_WAKE 0x80 /* for board_info; true iff can wake */
-#define I2C_CLIENT_SCCB 0x9000 /* Use Omnivision SCCB protocol */
- /* Must match I2C_M_STOP|IGNORE_NAK */
-
/* i2c adapter classes (bitmask) */
#define I2C_CLASS_HWMON (1<<0) /* lm_sensors, ... */
#define I2C_CLASS_DDC (1<<3) /* DDC bus on graphics adapters */
#define I2C_CLASS_SPD (1<<7) /* Memory modules */
-#define I2C_CLASS_DEPRECATED (1<<8) /* Warn users that adapter will stop using classes */
+/* Warn users that the adapter doesn't support classes anymore */
+#define I2C_CLASS_DEPRECATED (1<<8)
/* Internal numbers to terminate lists */
#define I2C_CLIENT_END 0xfffeU
@@ -703,29 +874,32 @@ i2c_unlock_adapter(struct i2c_adapter *adapter)
/* administration...
*/
-#if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)
-extern int i2c_add_adapter(struct i2c_adapter *);
-extern void i2c_del_adapter(struct i2c_adapter *);
-extern int i2c_add_numbered_adapter(struct i2c_adapter *);
+#if IS_ENABLED(CONFIG_I2C)
+int i2c_add_adapter(struct i2c_adapter *adap);
+int devm_i2c_add_adapter(struct device *dev, struct i2c_adapter *adapter);
+void i2c_del_adapter(struct i2c_adapter *adap);
+int i2c_add_numbered_adapter(struct i2c_adapter *adap);
-extern int i2c_register_driver(struct module *, struct i2c_driver *);
-extern void i2c_del_driver(struct i2c_driver *);
+int i2c_register_driver(struct module *owner, struct i2c_driver *driver);
+void i2c_del_driver(struct i2c_driver *driver);
/* use a define to avoid include chaining to get THIS_MODULE */
#define i2c_add_driver(driver) \
i2c_register_driver(THIS_MODULE, driver)
-extern struct i2c_client *i2c_use_client(struct i2c_client *client);
-extern void i2c_release_client(struct i2c_client *client);
+static inline bool i2c_client_has_driver(struct i2c_client *client)
+{
+ return !IS_ERR_OR_NULL(client) && client->dev.driver;
+}
/* call the i2c_client->command() of all attached clients with
* the given arguments */
-extern void i2c_clients_command(struct i2c_adapter *adap,
- unsigned int cmd, void *arg);
+void i2c_clients_command(struct i2c_adapter *adap,
+ unsigned int cmd, void *arg);
-extern struct i2c_adapter *i2c_get_adapter(int nr);
-extern void i2c_put_adapter(struct i2c_adapter *adap);
-extern unsigned int i2c_adapter_depth(struct i2c_adapter *adapter);
+struct i2c_adapter *i2c_get_adapter(int nr);
+void i2c_put_adapter(struct i2c_adapter *adap);
+unsigned int i2c_adapter_depth(struct i2c_adapter *adapter);
void i2c_parse_fw_timings(struct device *dev, struct i2c_timings *t, bool use_defaults);
@@ -766,6 +940,9 @@ static inline u8 i2c_8bit_addr_from_msg(const struct i2c_msg *msg)
return (msg->addr << 1) | (msg->flags & I2C_M_RD ? 1 : 0);
}
+u8 *i2c_get_dma_safe_msg_buf(struct i2c_msg *msg, unsigned int threshold);
+void i2c_put_dma_safe_msg_buf(u8 *buf, struct i2c_msg *msg, bool xferred);
+
int i2c_handle_smbus_host_notify(struct i2c_adapter *adap, unsigned short addr);
/**
* module_i2c_driver() - Helper macro for registering a modular I2C driver
@@ -792,20 +969,41 @@ int i2c_handle_smbus_host_notify(struct i2c_adapter *adap, unsigned short addr);
#endif /* I2C */
+/* must call put_device() when done with returned i2c_client device */
+struct i2c_client *i2c_find_device_by_fwnode(struct fwnode_handle *fwnode);
+
+/* must call put_device() when done with returned i2c_adapter device */
+struct i2c_adapter *i2c_find_adapter_by_fwnode(struct fwnode_handle *fwnode);
+
+/* must call i2c_put_adapter() when done with returned i2c_adapter device */
+struct i2c_adapter *i2c_get_adapter_by_fwnode(struct fwnode_handle *fwnode);
+
#if IS_ENABLED(CONFIG_OF)
/* must call put_device() when done with returned i2c_client device */
-extern struct i2c_client *of_find_i2c_device_by_node(struct device_node *node);
+static inline struct i2c_client *of_find_i2c_device_by_node(struct device_node *node)
+{
+ return i2c_find_device_by_fwnode(of_fwnode_handle(node));
+}
/* must call put_device() when done with returned i2c_adapter device */
-extern struct i2c_adapter *of_find_i2c_adapter_by_node(struct device_node *node);
+static inline struct i2c_adapter *of_find_i2c_adapter_by_node(struct device_node *node)
+{
+ return i2c_find_adapter_by_fwnode(of_fwnode_handle(node));
+}
/* must call i2c_put_adapter() when done with returned i2c_adapter device */
-struct i2c_adapter *of_get_i2c_adapter_by_node(struct device_node *node);
+static inline struct i2c_adapter *of_get_i2c_adapter_by_node(struct device_node *node)
+{
+ return i2c_get_adapter_by_fwnode(of_fwnode_handle(node));
+}
-extern const struct of_device_id
+const struct of_device_id
*i2c_of_match_device(const struct of_device_id *matches,
struct i2c_client *client);
+int of_i2c_get_board_info(struct device *dev, struct device_node *node,
+ struct i2c_board_info *info);
+
#else
static inline struct i2c_client *of_find_i2c_device_by_node(struct device_node *node)
@@ -830,22 +1028,63 @@ static inline const struct of_device_id
return NULL;
}
+static inline int of_i2c_get_board_info(struct device *dev,
+ struct device_node *node,
+ struct i2c_board_info *info)
+{
+ return -ENOTSUPP;
+}
+
#endif /* CONFIG_OF */
+struct acpi_resource;
+struct acpi_resource_i2c_serialbus;
+
#if IS_ENABLED(CONFIG_ACPI)
+bool i2c_acpi_get_i2c_resource(struct acpi_resource *ares,
+ struct acpi_resource_i2c_serialbus **i2c);
+int i2c_acpi_client_count(struct acpi_device *adev);
u32 i2c_acpi_find_bus_speed(struct device *dev);
-struct i2c_client *i2c_acpi_new_device(struct device *dev, int index,
- struct i2c_board_info *info);
+struct i2c_client *i2c_acpi_new_device_by_fwnode(struct fwnode_handle *fwnode,
+ int index,
+ struct i2c_board_info *info);
+struct i2c_adapter *i2c_acpi_find_adapter_by_handle(acpi_handle handle);
+bool i2c_acpi_waive_d0_probe(struct device *dev);
#else
+static inline bool i2c_acpi_get_i2c_resource(struct acpi_resource *ares,
+ struct acpi_resource_i2c_serialbus **i2c)
+{
+ return false;
+}
+static inline int i2c_acpi_client_count(struct acpi_device *adev)
+{
+ return 0;
+}
static inline u32 i2c_acpi_find_bus_speed(struct device *dev)
{
return 0;
}
-static inline struct i2c_client *i2c_acpi_new_device(struct device *dev,
- int index, struct i2c_board_info *info)
+static inline struct i2c_client *i2c_acpi_new_device_by_fwnode(
+ struct fwnode_handle *fwnode, int index,
+ struct i2c_board_info *info)
+{
+ return ERR_PTR(-ENODEV);
+}
+static inline struct i2c_adapter *i2c_acpi_find_adapter_by_handle(acpi_handle handle)
{
return NULL;
}
+static inline bool i2c_acpi_waive_d0_probe(struct device *dev)
+{
+ return false;
+}
#endif /* CONFIG_ACPI */
+static inline struct i2c_client *i2c_acpi_new_device(struct device *dev,
+ int index,
+ struct i2c_board_info *info)
+{
+ return i2c_acpi_new_device_by_fwnode(dev_fwnode(dev), index, info);
+}
+
#endif /* _LINUX_I2C_H */
diff --git a/include/linux/i2c/bfin_twi.h b/include/linux/i2c/bfin_twi.h
deleted file mode 100644
index 135a4e0876ae..000000000000
--- a/include/linux/i2c/bfin_twi.h
+++ /dev/null
@@ -1,145 +0,0 @@
-/*
- * i2c-bfin-twi.h - interface to ADI TWI controller
- *
- * Copyright 2005-2014 Analog Devices Inc.
- *
- * Licensed under the GPL-2 or later.
- */
-
-#ifndef __I2C_BFIN_TWI_H__
-#define __I2C_BFIN_TWI_H__
-
-#include <linux/types.h>
-#include <linux/i2c.h>
-
-/*
- * ADI twi registers layout
- */
-struct bfin_twi_regs {
- u16 clkdiv;
- u16 dummy1;
- u16 control;
- u16 dummy2;
- u16 slave_ctl;
- u16 dummy3;
- u16 slave_stat;
- u16 dummy4;
- u16 slave_addr;
- u16 dummy5;
- u16 master_ctl;
- u16 dummy6;
- u16 master_stat;
- u16 dummy7;
- u16 master_addr;
- u16 dummy8;
- u16 int_stat;
- u16 dummy9;
- u16 int_mask;
- u16 dummy10;
- u16 fifo_ctl;
- u16 dummy11;
- u16 fifo_stat;
- u16 dummy12;
- u32 __pad[20];
- u16 xmt_data8;
- u16 dummy13;
- u16 xmt_data16;
- u16 dummy14;
- u16 rcv_data8;
- u16 dummy15;
- u16 rcv_data16;
- u16 dummy16;
-};
-
-struct bfin_twi_iface {
- int irq;
- spinlock_t lock;
- char read_write;
- u8 command;
- u8 *transPtr;
- int readNum;
- int writeNum;
- int cur_mode;
- int manual_stop;
- int result;
- struct i2c_adapter adap;
- struct completion complete;
- struct i2c_msg *pmsg;
- int msg_num;
- int cur_msg;
- u16 saved_clkdiv;
- u16 saved_control;
- struct bfin_twi_regs __iomem *regs_base;
-};
-
-/* ******************** TWO-WIRE INTERFACE (TWI) MASKS ********************/
-/* TWI_CLKDIV Macros (Use: *pTWI_CLKDIV = CLKLOW(x)|CLKHI(y); ) */
-#define CLKLOW(x) ((x) & 0xFF) /* Periods Clock Is Held Low */
-#define CLKHI(y) (((y)&0xFF)<<0x8) /* Periods Before New Clock Low */
-
-/* TWI_PRESCALE Masks */
-#define PRESCALE 0x007F /* SCLKs Per Internal Time Reference (10MHz) */
-#define TWI_ENA 0x0080 /* TWI Enable */
-#define SCCB 0x0200 /* SCCB Compatibility Enable */
-
-/* TWI_SLAVE_CTL Masks */
-#define SEN 0x0001 /* Slave Enable */
-#define SADD_LEN 0x0002 /* Slave Address Length */
-#define STDVAL 0x0004 /* Slave Transmit Data Valid */
-#define NAK 0x0008 /* NAK Generated At Conclusion Of Transfer */
-#define GEN 0x0010 /* General Call Address Matching Enabled */
-
-/* TWI_SLAVE_STAT Masks */
-#define SDIR 0x0001 /* Slave Transfer Direction (RX/TX*) */
-#define GCALL 0x0002 /* General Call Indicator */
-
-/* TWI_MASTER_CTL Masks */
-#define MEN 0x0001 /* Master Mode Enable */
-#define MADD_LEN 0x0002 /* Master Address Length */
-#define MDIR 0x0004 /* Master Transmit Direction (RX/TX*) */
-#define FAST 0x0008 /* Use Fast Mode Timing Specs */
-#define STOP 0x0010 /* Issue Stop Condition */
-#define RSTART 0x0020 /* Repeat Start or Stop* At End Of Transfer */
-#define DCNT 0x3FC0 /* Data Bytes To Transfer */
-#define SDAOVR 0x4000 /* Serial Data Override */
-#define SCLOVR 0x8000 /* Serial Clock Override */
-
-/* TWI_MASTER_STAT Masks */
-#define MPROG 0x0001 /* Master Transfer In Progress */
-#define LOSTARB 0x0002 /* Lost Arbitration Indicator (Xfer Aborted) */
-#define ANAK 0x0004 /* Address Not Acknowledged */
-#define DNAK 0x0008 /* Data Not Acknowledged */
-#define BUFRDERR 0x0010 /* Buffer Read Error */
-#define BUFWRERR 0x0020 /* Buffer Write Error */
-#define SDASEN 0x0040 /* Serial Data Sense */
-#define SCLSEN 0x0080 /* Serial Clock Sense */
-#define BUSBUSY 0x0100 /* Bus Busy Indicator */
-
-/* TWI_INT_SRC and TWI_INT_ENABLE Masks */
-#define SINIT 0x0001 /* Slave Transfer Initiated */
-#define SCOMP 0x0002 /* Slave Transfer Complete */
-#define SERR 0x0004 /* Slave Transfer Error */
-#define SOVF 0x0008 /* Slave Overflow */
-#define MCOMP 0x0010 /* Master Transfer Complete */
-#define MERR 0x0020 /* Master Transfer Error */
-#define XMTSERV 0x0040 /* Transmit FIFO Service */
-#define RCVSERV 0x0080 /* Receive FIFO Service */
-
-/* TWI_FIFO_CTRL Masks */
-#define XMTFLUSH 0x0001 /* Transmit Buffer Flush */
-#define RCVFLUSH 0x0002 /* Receive Buffer Flush */
-#define XMTINTLEN 0x0004 /* Transmit Buffer Interrupt Length */
-#define RCVINTLEN 0x0008 /* Receive Buffer Interrupt Length */
-
-/* TWI_FIFO_STAT Masks */
-#define XMTSTAT 0x0003 /* Transmit FIFO Status */
-#define XMT_EMPTY 0x0000 /* Transmit FIFO Empty */
-#define XMT_HALF 0x0001 /* Transmit FIFO Has 1 Byte To Write */
-#define XMT_FULL 0x0003 /* Transmit FIFO Full (2 Bytes To Write) */
-
-#define RCVSTAT 0x000C /* Receive FIFO Status */
-#define RCV_EMPTY 0x0000 /* Receive FIFO Empty */
-#define RCV_HALF 0x0004 /* Receive FIFO Has 1 Byte To Read */
-#define RCV_FULL 0x000C /* Receive FIFO Full (2 Bytes To Read) */
-
-#endif
diff --git a/include/linux/i2c/dm355evm_msp.h b/include/linux/i2c/dm355evm_msp.h
deleted file mode 100644
index 372470350fab..000000000000
--- a/include/linux/i2c/dm355evm_msp.h
+++ /dev/null
@@ -1,79 +0,0 @@
-/*
- * dm355evm_msp.h - support MSP430 microcontroller on DM355EVM board
- */
-#ifndef __LINUX_I2C_DM355EVM_MSP
-#define __LINUX_I2C_DM355EVM_MSP
-
-/*
- * Written against Spectrum's writeup for the A4 firmware revision,
- * and tweaked to match source and rev D2 schematics by removing CPLD
- * and NOR flash hooks (which were last appropriate in rev B boards).
- *
- * Note that the firmware supports a flavor of write posting ... to be
- * sure a write completes, issue another read or write.
- */
-
-/* utilities to access "registers" emulated by msp430 firmware */
-extern int dm355evm_msp_write(u8 value, u8 reg);
-extern int dm355evm_msp_read(u8 reg);
-
-
-/* command/control registers */
-#define DM355EVM_MSP_COMMAND 0x00
-# define MSP_COMMAND_NULL 0
-# define MSP_COMMAND_RESET_COLD 1
-# define MSP_COMMAND_RESET_WARM 2
-# define MSP_COMMAND_RESET_WARM_I 3
-# define MSP_COMMAND_POWEROFF 4
-# define MSP_COMMAND_IR_REINIT 5
-#define DM355EVM_MSP_STATUS 0x01
-# define MSP_STATUS_BAD_OFFSET BIT(0)
-# define MSP_STATUS_BAD_COMMAND BIT(1)
-# define MSP_STATUS_POWER_ERROR BIT(2)
-# define MSP_STATUS_RXBUF_OVERRUN BIT(3)
-#define DM355EVM_MSP_RESET 0x02 /* 0 bits == in reset */
-# define MSP_RESET_DC5 BIT(0)
-# define MSP_RESET_TVP5154 BIT(2)
-# define MSP_RESET_IMAGER BIT(3)
-# define MSP_RESET_ETHERNET BIT(4)
-# define MSP_RESET_SYS BIT(5)
-# define MSP_RESET_AIC33 BIT(7)
-
-/* GPIO registers ... bit patterns mostly match the source MSP ports */
-#define DM355EVM_MSP_LED 0x03 /* active low (MSP P4) */
-#define DM355EVM_MSP_SWITCH1 0x04 /* (MSP P5, masked) */
-# define MSP_SWITCH1_SW6_1 BIT(0)
-# define MSP_SWITCH1_SW6_2 BIT(1)
-# define MSP_SWITCH1_SW6_3 BIT(2)
-# define MSP_SWITCH1_SW6_4 BIT(3)
-# define MSP_SWITCH1_J1 BIT(4) /* NTSC/PAL */
-# define MSP_SWITCH1_MSP_INT BIT(5) /* active low */
-#define DM355EVM_MSP_SWITCH2 0x05 /* (MSP P6, masked) */
-# define MSP_SWITCH2_SW10 BIT(3)
-# define MSP_SWITCH2_SW11 BIT(4)
-# define MSP_SWITCH2_SW12 BIT(5)
-# define MSP_SWITCH2_SW13 BIT(6)
-# define MSP_SWITCH2_SW14 BIT(7)
-#define DM355EVM_MSP_SDMMC 0x06 /* (MSP P2, masked) */
-# define MSP_SDMMC_0_WP BIT(1)
-# define MSP_SDMMC_0_CD BIT(2) /* active low */
-# define MSP_SDMMC_1_WP BIT(3)
-# define MSP_SDMMC_1_CD BIT(4) /* active low */
-#define DM355EVM_MSP_FIRMREV 0x07 /* not a GPIO (out of order) */
-#define DM355EVM_MSP_VIDEO_IN 0x08 /* (MSP P3, masked) */
-# define MSP_VIDEO_IMAGER BIT(7) /* low == tvp5146 */
-
-/* power supply registers are currently omitted */
-
-/* RTC registers */
-#define DM355EVM_MSP_RTC_0 0x12 /* LSB */
-#define DM355EVM_MSP_RTC_1 0x13
-#define DM355EVM_MSP_RTC_2 0x14
-#define DM355EVM_MSP_RTC_3 0x15 /* MSB */
-
-/* input event queue registers; code == ((HIGH << 8) | LOW) */
-#define DM355EVM_MSP_INPUT_COUNT 0x16 /* decrement by reading LOW */
-#define DM355EVM_MSP_INPUT_HIGH 0x17
-#define DM355EVM_MSP_INPUT_LOW 0x18
-
-#endif /* __LINUX_I2C_DM355EVM_MSP */
diff --git a/include/linux/i2c/mlxcpld.h b/include/linux/i2c/mlxcpld.h
deleted file mode 100644
index b08dcb183fca..000000000000
--- a/include/linux/i2c/mlxcpld.h
+++ /dev/null
@@ -1,52 +0,0 @@
-/*
- * mlxcpld.h - Mellanox I2C multiplexer support in CPLD
- *
- * Copyright (c) 2016 Mellanox Technologies. All rights reserved.
- * Copyright (c) 2016 Michael Shych <michaels@mellanox.com>
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the names of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef _LINUX_I2C_MLXCPLD_H
-#define _LINUX_I2C_MLXCPLD_H
-
-/* Platform data for the CPLD I2C multiplexers */
-
-/* mlxcpld_mux_plat_data - per mux data, used with i2c_register_board_info
- * @adap_ids - adapter array
- * @num_adaps - number of adapters
- * @sel_reg_addr - mux select register offset in CPLD space
- */
-struct mlxcpld_mux_plat_data {
- int *adap_ids;
- int num_adaps;
- int sel_reg_addr;
-};
-
-#endif /* _LINUX_I2C_MLXCPLD_H */
diff --git a/include/linux/i2c/pca954x.h b/include/linux/i2c/pca954x.h
deleted file mode 100644
index 1712677d5904..000000000000
--- a/include/linux/i2c/pca954x.h
+++ /dev/null
@@ -1,48 +0,0 @@
-/*
- *
- * pca954x.h - I2C multiplexer/switch support
- *
- * Copyright (c) 2008-2009 Rodolfo Giometti <giometti@linux.it>
- * Copyright (c) 2008-2009 Eurotech S.p.A. <info@eurotech.it>
- * Michael Lawnick <michael.lawnick.ext@nsn.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- */
-
-
-#ifndef _LINUX_I2C_PCA954X_H
-#define _LINUX_I2C_PCA954X_H
-
-/* Platform data for the PCA954x I2C multiplexers */
-
-/* Per channel initialisation data:
- * @adap_id: bus number for the adapter. 0 = don't care
- * @deselect_on_exit: set this entry to 1, if your H/W needs deselection
- * of this channel after transaction.
- *
- */
-struct pca954x_platform_mode {
- int adap_id;
- unsigned int deselect_on_exit:1;
- unsigned int class;
-};
-
-/* Per mux/switch data, used with i2c_register_board_info */
-struct pca954x_platform_data {
- struct pca954x_platform_mode *modes;
- int num_modes;
-};
-
-#endif /* _LINUX_I2C_PCA954X_H */
diff --git a/include/linux/i2c/pxa-i2c.h b/include/linux/i2c/pxa-i2c.h
deleted file mode 100644
index 53aab243cbd8..000000000000
--- a/include/linux/i2c/pxa-i2c.h
+++ /dev/null
@@ -1,85 +0,0 @@
-/*
- * i2c_pxa.h
- *
- * Copyright (C) 2002 Intrinsyc Software Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- */
-#ifndef _I2C_PXA_H_
-#define _I2C_PXA_H_
-
-#if 0
-#define DEF_TIMEOUT 3
-#else
-/* need a longer timeout if we're dealing with the fact we may well be
- * looking at a multi-master environment
-*/
-#define DEF_TIMEOUT 32
-#endif
-
-#define BUS_ERROR (-EREMOTEIO)
-#define XFER_NAKED (-ECONNREFUSED)
-#define I2C_RETRY (-2000) /* an error has occurred retry transmit */
-
-/* ICR initialize bit values
-*
-* 15. FM 0 (100 Khz operation)
-* 14. UR 0 (No unit reset)
-* 13. SADIE 0 (Disables the unit from interrupting on slave addresses
-* matching its slave address)
-* 12. ALDIE 0 (Disables the unit from interrupt when it loses arbitration
-* in master mode)
-* 11. SSDIE 0 (Disables interrupts from a slave stop detected, in slave mode)
-* 10. BEIE 1 (Enable interrupts from detected bus errors, no ACK sent)
-* 9. IRFIE 1 (Enable interrupts from full buffer received)
-* 8. ITEIE 1 (Enables the I2C unit to interrupt when transmit buffer empty)
-* 7. GCD 1 (Disables i2c unit response to general call messages as a slave)
-* 6. IUE 0 (Disable unit until we change settings)
-* 5. SCLE 1 (Enables the i2c clock output for master mode (drives SCL)
-* 4. MA 0 (Only send stop with the ICR stop bit)
-* 3. TB 0 (We are not transmitting a byte initially)
-* 2. ACKNAK 0 (Send an ACK after the unit receives a byte)
-* 1. STOP 0 (Do not send a STOP)
-* 0. START 0 (Do not send a START)
-*
-*/
-#define I2C_ICR_INIT (ICR_BEIE | ICR_IRFIE | ICR_ITEIE | ICR_GCD | ICR_SCLE)
-
-/* I2C status register init values
- *
- * 10. BED 1 (Clear bus error detected)
- * 9. SAD 1 (Clear slave address detected)
- * 7. IRF 1 (Clear IDBR Receive Full)
- * 6. ITE 1 (Clear IDBR Transmit Empty)
- * 5. ALD 1 (Clear Arbitration Loss Detected)
- * 4. SSD 1 (Clear Slave Stop Detected)
- */
-#define I2C_ISR_INIT 0x7FF /* status register init */
-
-struct i2c_slave_client;
-
-struct i2c_pxa_platform_data {
- unsigned int slave_addr;
- struct i2c_slave_client *slave;
- unsigned int class;
- unsigned int use_pio :1;
- unsigned int fast_mode :1;
- unsigned int high_mode:1;
- unsigned char master_code;
- unsigned long rate;
-};
-
-extern void pxa_set_i2c_info(struct i2c_pxa_platform_data *info);
-
-#ifdef CONFIG_PXA27x
-extern void pxa27x_set_i2c_power_info(struct i2c_pxa_platform_data *info);
-#endif
-
-#ifdef CONFIG_PXA3xx
-extern void pxa3xx_set_i2c_power_info(struct i2c_pxa_platform_data *info);
-#endif
-
-#endif
diff --git a/include/linux/i2c/tc35876x.h b/include/linux/i2c/tc35876x.h
deleted file mode 100644
index cd6a51c71e7e..000000000000
--- a/include/linux/i2c/tc35876x.h
+++ /dev/null
@@ -1,11 +0,0 @@
-
-#ifndef _TC35876X_H
-#define _TC35876X_H
-
-struct tc35876x_platform_data {
- int gpio_bridge_reset;
- int gpio_panel_bl_en;
- int gpio_panel_vadd;
-};
-
-#endif /* _TC35876X_H */
diff --git a/include/linux/i3c/ccc.h b/include/linux/i3c/ccc.h
new file mode 100644
index 000000000000..ad59a4ae60d1
--- /dev/null
+++ b/include/linux/i3c/ccc.h
@@ -0,0 +1,385 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2018 Cadence Design Systems Inc.
+ *
+ * Author: Boris Brezillon <boris.brezillon@bootlin.com>
+ */
+
+#ifndef I3C_CCC_H
+#define I3C_CCC_H
+
+#include <linux/bitops.h>
+#include <linux/i3c/device.h>
+
+/* I3C CCC (Common Command Codes) related definitions */
+#define I3C_CCC_DIRECT BIT(7)
+
+#define I3C_CCC_ID(id, broadcast) \
+ ((id) | ((broadcast) ? 0 : I3C_CCC_DIRECT))
+
+/* Commands valid in both broadcast and unicast modes */
+#define I3C_CCC_ENEC(broadcast) I3C_CCC_ID(0x0, broadcast)
+#define I3C_CCC_DISEC(broadcast) I3C_CCC_ID(0x1, broadcast)
+#define I3C_CCC_ENTAS(as, broadcast) I3C_CCC_ID(0x2 + (as), broadcast)
+#define I3C_CCC_RSTDAA(broadcast) I3C_CCC_ID(0x6, broadcast)
+#define I3C_CCC_SETMWL(broadcast) I3C_CCC_ID(0x9, broadcast)
+#define I3C_CCC_SETMRL(broadcast) I3C_CCC_ID(0xa, broadcast)
+#define I3C_CCC_SETXTIME(broadcast) ((broadcast) ? 0x28 : 0x98)
+#define I3C_CCC_VENDOR(id, broadcast) ((id) + ((broadcast) ? 0x61 : 0xe0))
+
+/* Broadcast-only commands */
+#define I3C_CCC_ENTDAA I3C_CCC_ID(0x7, true)
+#define I3C_CCC_DEFSLVS I3C_CCC_ID(0x8, true)
+#define I3C_CCC_ENTTM I3C_CCC_ID(0xb, true)
+#define I3C_CCC_ENTHDR(x) I3C_CCC_ID(0x20 + (x), true)
+
+/* Unicast-only commands */
+#define I3C_CCC_SETDASA I3C_CCC_ID(0x7, false)
+#define I3C_CCC_SETNEWDA I3C_CCC_ID(0x8, false)
+#define I3C_CCC_GETMWL I3C_CCC_ID(0xb, false)
+#define I3C_CCC_GETMRL I3C_CCC_ID(0xc, false)
+#define I3C_CCC_GETPID I3C_CCC_ID(0xd, false)
+#define I3C_CCC_GETBCR I3C_CCC_ID(0xe, false)
+#define I3C_CCC_GETDCR I3C_CCC_ID(0xf, false)
+#define I3C_CCC_GETSTATUS I3C_CCC_ID(0x10, false)
+#define I3C_CCC_GETACCMST I3C_CCC_ID(0x11, false)
+#define I3C_CCC_SETBRGTGT I3C_CCC_ID(0x13, false)
+#define I3C_CCC_GETMXDS I3C_CCC_ID(0x14, false)
+#define I3C_CCC_GETHDRCAP I3C_CCC_ID(0x15, false)
+#define I3C_CCC_GETXTIME I3C_CCC_ID(0x19, false)
+
+#define I3C_CCC_EVENT_SIR BIT(0)
+#define I3C_CCC_EVENT_MR BIT(1)
+#define I3C_CCC_EVENT_HJ BIT(3)
+
+/**
+ * struct i3c_ccc_events - payload passed to ENEC/DISEC CCC
+ *
+ * @events: bitmask of I3C_CCC_EVENT_xxx events.
+ *
+ * Depending on the CCC command, the specific events coming from all devices
+ * (broadcast version) or a specific device (unicast version) will be
+ * enabled (ENEC) or disabled (DISEC).
+ */
+struct i3c_ccc_events {
+ u8 events;
+};
+
+/**
+ * struct i3c_ccc_mwl - payload passed to SETMWL/GETMWL CCC
+ *
+ * @len: maximum write length in bytes
+ *
+ * The maximum write length is only applicable to SDR private messages or
+ * extended Write CCCs (like SETXTIME).
+ */
+struct i3c_ccc_mwl {
+ __be16 len;
+};
+
+/**
+ * struct i3c_ccc_mrl - payload passed to SETMRL/GETMRL CCC
+ *
+ * @len: maximum read length in bytes
+ * @ibi_len: maximum IBI payload length
+ *
+ * The maximum read length is only applicable to SDR private messages or
+ * extended Read CCCs (like GETXTIME).
+ * The IBI length is only valid if the I3C slave is IBI capable
+ * (%I3C_BCR_IBI_REQ_CAP is set).
+ */
+struct i3c_ccc_mrl {
+ __be16 read_len;
+ u8 ibi_len;
+} __packed;
+
+/**
+ * struct i3c_ccc_dev_desc - I3C/I2C device descriptor used for DEFSLVS
+ *
+ * @dyn_addr: dynamic address assigned to the I3C slave or 0 if the entry is
+ * describing an I2C slave.
+ * @dcr: DCR value (not applicable to entries describing I2C devices)
+ * @lvr: LVR value (not applicable to entries describing I3C devices)
+ * @bcr: BCR value or 0 if this entry is describing an I2C slave
+ * @static_addr: static address or 0 if the device does not have a static
+ * address
+ *
+ * The DEFSLVS command should be passed an array of i3c_ccc_dev_desc
+ * descriptors (one entry per I3C/I2C dev controlled by the master).
+ */
+struct i3c_ccc_dev_desc {
+ u8 dyn_addr;
+ union {
+ u8 dcr;
+ u8 lvr;
+ };
+ u8 bcr;
+ u8 static_addr;
+};
+
+/**
+ * struct i3c_ccc_defslvs - payload passed to DEFSLVS CCC
+ *
+ * @count: number of dev descriptors
+ * @master: descriptor describing the current master
+ * @slaves: array of descriptors describing slaves controlled by the
+ * current master
+ *
+ * Information passed to the broadcast DEFSLVS to propagate device
+ * information to all masters currently acting as slaves on the bus.
+ * This is only meaningful if you have more than one master.
+ */
+struct i3c_ccc_defslvs {
+ u8 count;
+ struct i3c_ccc_dev_desc master;
+ struct i3c_ccc_dev_desc slaves[];
+} __packed;
+
+/**
+ * enum i3c_ccc_test_mode - enum listing all available test modes
+ *
+ * @I3C_CCC_EXIT_TEST_MODE: exit test mode
+ * @I3C_CCC_VENDOR_TEST_MODE: enter vendor test mode
+ */
+enum i3c_ccc_test_mode {
+ I3C_CCC_EXIT_TEST_MODE,
+ I3C_CCC_VENDOR_TEST_MODE,
+};
+
+/**
+ * struct i3c_ccc_enttm - payload passed to ENTTM CCC
+ *
+ * @mode: one of the &enum i3c_ccc_test_mode modes
+ *
+ * Information passed to the ENTTM CCC to instruct an I3C device to enter a
+ * specific test mode.
+ */
+struct i3c_ccc_enttm {
+ u8 mode;
+};
+
+/**
+ * struct i3c_ccc_setda - payload passed to SETNEWDA and SETDASA CCCs
+ *
+ * @addr: dynamic address to assign to an I3C device
+ *
+ * Information passed to the SETNEWDA and SETDASA CCCs to assign/change the
+ * dynamic address of an I3C device.
+ */
+struct i3c_ccc_setda {
+ u8 addr;
+};
+
+/**
+ * struct i3c_ccc_getpid - payload passed to GETPID CCC
+ *
+ * @pid: 48 bits PID in big endian
+ */
+struct i3c_ccc_getpid {
+ u8 pid[6];
+};
+
+/**
+ * struct i3c_ccc_getbcr - payload passed to GETBCR CCC
+ *
+ * @bcr: BCR (Bus Characteristic Register) value
+ */
+struct i3c_ccc_getbcr {
+ u8 bcr;
+};
+
+/**
+ * struct i3c_ccc_getdcr - payload passed to GETDCR CCC
+ *
+ * @dcr: DCR (Device Characteristic Register) value
+ */
+struct i3c_ccc_getdcr {
+ u8 dcr;
+};
+
+#define I3C_CCC_STATUS_PENDING_INT(status) ((status) & GENMASK(3, 0))
+#define I3C_CCC_STATUS_PROTOCOL_ERROR BIT(5)
+#define I3C_CCC_STATUS_ACTIVITY_MODE(status) \
+ (((status) & GENMASK(7, 6)) >> 6)
+
+/**
+ * struct i3c_ccc_getstatus - payload passed to GETSTATUS CCC
+ *
+ * @status: status of the I3C slave (see I3C_CCC_STATUS_xxx macros for more
+ * information).
+ */
+struct i3c_ccc_getstatus {
+ __be16 status;
+};
+
+/**
+ * struct i3c_ccc_getaccmst - payload passed to GETACCMST CCC
+ *
+ * @newmaster: address of the master taking bus ownership
+ */
+struct i3c_ccc_getaccmst {
+ u8 newmaster;
+};
+
+/**
+ * struct i3c_ccc_bridged_slave_desc - bridged slave descriptor
+ *
+ * @addr: dynamic address of the bridged device
+ * @id: ID of the slave device behind the bridge
+ */
+struct i3c_ccc_bridged_slave_desc {
+ u8 addr;
+ __be16 id;
+} __packed;
+
+/**
+ * struct i3c_ccc_setbrgtgt - payload passed to SETBRGTGT CCC
+ *
+ * @count: number of bridged slaves
+ * @bslaves: bridged slave descriptors
+ */
+struct i3c_ccc_setbrgtgt {
+ u8 count;
+ struct i3c_ccc_bridged_slave_desc bslaves[];
+} __packed;
+
+/**
+ * enum i3c_sdr_max_data_rate - max data rate values for private SDR transfers
+ */
+enum i3c_sdr_max_data_rate {
+ I3C_SDR0_FSCL_MAX,
+ I3C_SDR1_FSCL_8MHZ,
+ I3C_SDR2_FSCL_6MHZ,
+ I3C_SDR3_FSCL_4MHZ,
+ I3C_SDR4_FSCL_2MHZ,
+};
+
+/**
+ * enum i3c_tsco - clock to data turn-around
+ */
+enum i3c_tsco {
+ I3C_TSCO_8NS,
+ I3C_TSCO_9NS,
+ I3C_TSCO_10NS,
+ I3C_TSCO_11NS,
+ I3C_TSCO_12NS,
+};
+
+#define I3C_CCC_MAX_SDR_FSCL_MASK GENMASK(2, 0)
+#define I3C_CCC_MAX_SDR_FSCL(x) ((x) & I3C_CCC_MAX_SDR_FSCL_MASK)
+
+/**
+ * struct i3c_ccc_getmxds - payload passed to GETMXDS CCC
+ *
+ * @maxwr: write limitations
+ * @maxrd: read limitations
+ * @maxrdturn: maximum read turn-around expressed micro-seconds and
+ * little-endian formatted
+ */
+struct i3c_ccc_getmxds {
+ u8 maxwr;
+ u8 maxrd;
+ u8 maxrdturn[3];
+} __packed;
+
+#define I3C_CCC_HDR_MODE(mode) BIT(mode)
+
+/**
+ * struct i3c_ccc_gethdrcap - payload passed to GETHDRCAP CCC
+ *
+ * @modes: bitmap of supported HDR modes
+ */
+struct i3c_ccc_gethdrcap {
+ u8 modes;
+} __packed;
+
+/**
+ * enum i3c_ccc_setxtime_subcmd - SETXTIME sub-commands
+ */
+enum i3c_ccc_setxtime_subcmd {
+ I3C_CCC_SETXTIME_ST = 0x7f,
+ I3C_CCC_SETXTIME_DT = 0xbf,
+ I3C_CCC_SETXTIME_ENTER_ASYNC_MODE0 = 0xdf,
+ I3C_CCC_SETXTIME_ENTER_ASYNC_MODE1 = 0xef,
+ I3C_CCC_SETXTIME_ENTER_ASYNC_MODE2 = 0xf7,
+ I3C_CCC_SETXTIME_ENTER_ASYNC_MODE3 = 0xfb,
+ I3C_CCC_SETXTIME_ASYNC_TRIGGER = 0xfd,
+ I3C_CCC_SETXTIME_TPH = 0x3f,
+ I3C_CCC_SETXTIME_TU = 0x9f,
+ I3C_CCC_SETXTIME_ODR = 0x8f,
+};
+
+/**
+ * struct i3c_ccc_setxtime - payload passed to SETXTIME CCC
+ *
+ * @subcmd: one of the sub-commands ddefined in &enum i3c_ccc_setxtime_subcmd
+ * @data: sub-command payload. Amount of data is determined by
+ * &i3c_ccc_setxtime->subcmd
+ */
+struct i3c_ccc_setxtime {
+ u8 subcmd;
+ u8 data[];
+} __packed;
+
+#define I3C_CCC_GETXTIME_SYNC_MODE BIT(0)
+#define I3C_CCC_GETXTIME_ASYNC_MODE(x) BIT((x) + 1)
+#define I3C_CCC_GETXTIME_OVERFLOW BIT(7)
+
+/**
+ * struct i3c_ccc_getxtime - payload retrieved from GETXTIME CCC
+ *
+ * @supported_modes: bitmap describing supported XTIME modes
+ * @state: current status (enabled mode and overflow status)
+ * @frequency: slave's internal oscillator frequency in 500KHz steps
+ * @inaccuracy: slave's internal oscillator inaccuracy in 0.1% steps
+ */
+struct i3c_ccc_getxtime {
+ u8 supported_modes;
+ u8 state;
+ u8 frequency;
+ u8 inaccuracy;
+} __packed;
+
+/**
+ * struct i3c_ccc_cmd_payload - CCC payload
+ *
+ * @len: payload length
+ * @data: payload data. This buffer must be DMA-able
+ */
+struct i3c_ccc_cmd_payload {
+ u16 len;
+ void *data;
+};
+
+/**
+ * struct i3c_ccc_cmd_dest - CCC command destination
+ *
+ * @addr: can be an I3C device address or the broadcast address if this is a
+ * broadcast CCC
+ * @payload: payload to be sent to this device or broadcasted
+ */
+struct i3c_ccc_cmd_dest {
+ u8 addr;
+ struct i3c_ccc_cmd_payload payload;
+};
+
+/**
+ * struct i3c_ccc_cmd - CCC command
+ *
+ * @rnw: true if the CCC should retrieve data from the device. Only valid for
+ * unicast commands
+ * @id: CCC command id
+ * @ndests: number of destinations. Should always be one for broadcast commands
+ * @dests: array of destinations and associated payload for this CCC. Most of
+ * the time, only one destination is provided
+ * @err: I3C error code
+ */
+struct i3c_ccc_cmd {
+ u8 rnw;
+ u8 id;
+ unsigned int ndests;
+ struct i3c_ccc_cmd_dest *dests;
+ enum i3c_error_code err;
+};
+
+#endif /* I3C_CCC_H */
diff --git a/include/linux/i3c/device.h b/include/linux/i3c/device.h
new file mode 100644
index 000000000000..90fa83464f00
--- /dev/null
+++ b/include/linux/i3c/device.h
@@ -0,0 +1,346 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2018 Cadence Design Systems Inc.
+ *
+ * Author: Boris Brezillon <boris.brezillon@bootlin.com>
+ */
+
+#ifndef I3C_DEV_H
+#define I3C_DEV_H
+
+#include <linux/bitops.h>
+#include <linux/device.h>
+#include <linux/i2c.h>
+#include <linux/kconfig.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+
+/**
+ * enum i3c_error_code - I3C error codes
+ *
+ * @I3C_ERROR_UNKNOWN: unknown error, usually means the error is not I3C
+ * related
+ * @I3C_ERROR_M0: M0 error
+ * @I3C_ERROR_M1: M1 error
+ * @I3C_ERROR_M2: M2 error
+ *
+ * These are the standard error codes as defined by the I3C specification.
+ * When -EIO is returned by the i3c_device_do_priv_xfers() or
+ * i3c_device_send_hdr_cmds() one can check the error code in
+ * &struct_i3c_priv_xfer.err or &struct i3c_hdr_cmd.err to get a better idea of
+ * what went wrong.
+ *
+ */
+enum i3c_error_code {
+ I3C_ERROR_UNKNOWN = 0,
+ I3C_ERROR_M0 = 1,
+ I3C_ERROR_M1,
+ I3C_ERROR_M2,
+};
+
+/**
+ * enum i3c_hdr_mode - HDR mode ids
+ * @I3C_HDR_DDR: DDR mode
+ * @I3C_HDR_TSP: TSP mode
+ * @I3C_HDR_TSL: TSL mode
+ */
+enum i3c_hdr_mode {
+ I3C_HDR_DDR,
+ I3C_HDR_TSP,
+ I3C_HDR_TSL,
+};
+
+/**
+ * struct i3c_priv_xfer - I3C SDR private transfer
+ * @rnw: encodes the transfer direction. true for a read, false for a write
+ * @len: transfer length in bytes of the transfer
+ * @data: input/output buffer
+ * @data.in: input buffer. Must point to a DMA-able buffer
+ * @data.out: output buffer. Must point to a DMA-able buffer
+ * @err: I3C error code
+ */
+struct i3c_priv_xfer {
+ u8 rnw;
+ u16 len;
+ union {
+ void *in;
+ const void *out;
+ } data;
+ enum i3c_error_code err;
+};
+
+/**
+ * enum i3c_dcr - I3C DCR values
+ * @I3C_DCR_GENERIC_DEVICE: generic I3C device
+ */
+enum i3c_dcr {
+ I3C_DCR_GENERIC_DEVICE = 0,
+};
+
+#define I3C_PID_MANUF_ID(pid) (((pid) & GENMASK_ULL(47, 33)) >> 33)
+#define I3C_PID_RND_LOWER_32BITS(pid) (!!((pid) & BIT_ULL(32)))
+#define I3C_PID_RND_VAL(pid) ((pid) & GENMASK_ULL(31, 0))
+#define I3C_PID_PART_ID(pid) (((pid) & GENMASK_ULL(31, 16)) >> 16)
+#define I3C_PID_INSTANCE_ID(pid) (((pid) & GENMASK_ULL(15, 12)) >> 12)
+#define I3C_PID_EXTRA_INFO(pid) ((pid) & GENMASK_ULL(11, 0))
+
+#define I3C_BCR_DEVICE_ROLE(bcr) ((bcr) & GENMASK(7, 6))
+#define I3C_BCR_I3C_SLAVE (0 << 6)
+#define I3C_BCR_I3C_MASTER (1 << 6)
+#define I3C_BCR_HDR_CAP BIT(5)
+#define I3C_BCR_BRIDGE BIT(4)
+#define I3C_BCR_OFFLINE_CAP BIT(3)
+#define I3C_BCR_IBI_PAYLOAD BIT(2)
+#define I3C_BCR_IBI_REQ_CAP BIT(1)
+#define I3C_BCR_MAX_DATA_SPEED_LIM BIT(0)
+
+/**
+ * struct i3c_device_info - I3C device information
+ * @pid: Provisional ID
+ * @bcr: Bus Characteristic Register
+ * @dcr: Device Characteristic Register
+ * @static_addr: static/I2C address
+ * @dyn_addr: dynamic address
+ * @hdr_cap: supported HDR modes
+ * @max_read_ds: max read speed information
+ * @max_write_ds: max write speed information
+ * @max_ibi_len: max IBI payload length
+ * @max_read_turnaround: max read turn-around time in micro-seconds
+ * @max_read_len: max private SDR read length in bytes
+ * @max_write_len: max private SDR write length in bytes
+ *
+ * These are all basic information that should be advertised by an I3C device.
+ * Some of them are optional depending on the device type and device
+ * capabilities.
+ * For each I3C slave attached to a master with
+ * i3c_master_add_i3c_dev_locked(), the core will send the relevant CCC command
+ * to retrieve these data.
+ */
+struct i3c_device_info {
+ u64 pid;
+ u8 bcr;
+ u8 dcr;
+ u8 static_addr;
+ u8 dyn_addr;
+ u8 hdr_cap;
+ u8 max_read_ds;
+ u8 max_write_ds;
+ u8 max_ibi_len;
+ u32 max_read_turnaround;
+ u16 max_read_len;
+ u16 max_write_len;
+};
+
+/*
+ * I3C device internals are kept hidden from I3C device users. It's just
+ * simpler to refactor things when everything goes through getter/setters, and
+ * I3C device drivers should not have to worry about internal representation
+ * anyway.
+ */
+struct i3c_device;
+
+/* These macros should be used to i3c_device_id entries. */
+#define I3C_MATCH_MANUF_AND_PART (I3C_MATCH_MANUF | I3C_MATCH_PART)
+
+#define I3C_DEVICE(_manufid, _partid, _drvdata) \
+ { \
+ .match_flags = I3C_MATCH_MANUF_AND_PART, \
+ .manuf_id = _manufid, \
+ .part_id = _partid, \
+ .data = _drvdata, \
+ }
+
+#define I3C_DEVICE_EXTRA_INFO(_manufid, _partid, _info, _drvdata) \
+ { \
+ .match_flags = I3C_MATCH_MANUF_AND_PART | \
+ I3C_MATCH_EXTRA_INFO, \
+ .manuf_id = _manufid, \
+ .part_id = _partid, \
+ .extra_info = _info, \
+ .data = _drvdata, \
+ }
+
+#define I3C_CLASS(_dcr, _drvdata) \
+ { \
+ .match_flags = I3C_MATCH_DCR, \
+ .dcr = _dcr, \
+ }
+
+/**
+ * struct i3c_driver - I3C device driver
+ * @driver: inherit from device_driver
+ * @probe: I3C device probe method
+ * @remove: I3C device remove method
+ * @id_table: I3C device match table. Will be used by the framework to decide
+ * which device to bind to this driver
+ */
+struct i3c_driver {
+ struct device_driver driver;
+ int (*probe)(struct i3c_device *dev);
+ void (*remove)(struct i3c_device *dev);
+ const struct i3c_device_id *id_table;
+};
+
+static inline struct i3c_driver *drv_to_i3cdrv(struct device_driver *drv)
+{
+ return container_of(drv, struct i3c_driver, driver);
+}
+
+struct device *i3cdev_to_dev(struct i3c_device *i3cdev);
+
+/**
+ * dev_to_i3cdev() - Returns the I3C device containing @dev
+ * @__dev: device object
+ *
+ * Return: a pointer to an I3C device object.
+ */
+#define dev_to_i3cdev(__dev) container_of_const(__dev, struct i3c_device, dev)
+
+const struct i3c_device_id *
+i3c_device_match_id(struct i3c_device *i3cdev,
+ const struct i3c_device_id *id_table);
+
+static inline void i3cdev_set_drvdata(struct i3c_device *i3cdev,
+ void *data)
+{
+ struct device *dev = i3cdev_to_dev(i3cdev);
+
+ dev_set_drvdata(dev, data);
+}
+
+static inline void *i3cdev_get_drvdata(struct i3c_device *i3cdev)
+{
+ struct device *dev = i3cdev_to_dev(i3cdev);
+
+ return dev_get_drvdata(dev);
+}
+
+int i3c_driver_register_with_owner(struct i3c_driver *drv,
+ struct module *owner);
+void i3c_driver_unregister(struct i3c_driver *drv);
+
+#define i3c_driver_register(__drv) \
+ i3c_driver_register_with_owner(__drv, THIS_MODULE)
+
+/**
+ * module_i3c_driver() - Register a module providing an I3C driver
+ * @__drv: the I3C driver to register
+ *
+ * Provide generic init/exit functions that simply register/unregister an I3C
+ * driver.
+ * Should be used by any driver that does not require extra init/cleanup steps.
+ */
+#define module_i3c_driver(__drv) \
+ module_driver(__drv, i3c_driver_register, i3c_driver_unregister)
+
+/**
+ * i3c_i2c_driver_register() - Register an i2c and an i3c driver
+ * @i3cdrv: the I3C driver to register
+ * @i2cdrv: the I2C driver to register
+ *
+ * This function registers both @i2cdev and @i3cdev, and fails if one of these
+ * registrations fails. This is mainly useful for devices that support both I2C
+ * and I3C modes.
+ * Note that when CONFIG_I3C is not enabled, this function only registers the
+ * I2C driver.
+ *
+ * Return: 0 if both registrations succeeds, a negative error code otherwise.
+ */
+static inline int i3c_i2c_driver_register(struct i3c_driver *i3cdrv,
+ struct i2c_driver *i2cdrv)
+{
+ int ret;
+
+ ret = i2c_add_driver(i2cdrv);
+ if (ret || !IS_ENABLED(CONFIG_I3C))
+ return ret;
+
+ ret = i3c_driver_register(i3cdrv);
+ if (ret)
+ i2c_del_driver(i2cdrv);
+
+ return ret;
+}
+
+/**
+ * i3c_i2c_driver_unregister() - Unregister an i2c and an i3c driver
+ * @i3cdrv: the I3C driver to register
+ * @i2cdrv: the I2C driver to register
+ *
+ * This function unregisters both @i3cdrv and @i2cdrv.
+ * Note that when CONFIG_I3C is not enabled, this function only unregisters the
+ * @i2cdrv.
+ */
+static inline void i3c_i2c_driver_unregister(struct i3c_driver *i3cdrv,
+ struct i2c_driver *i2cdrv)
+{
+ if (IS_ENABLED(CONFIG_I3C))
+ i3c_driver_unregister(i3cdrv);
+
+ i2c_del_driver(i2cdrv);
+}
+
+/**
+ * module_i3c_i2c_driver() - Register a module providing an I3C and an I2C
+ * driver
+ * @__i3cdrv: the I3C driver to register
+ * @__i2cdrv: the I3C driver to register
+ *
+ * Provide generic init/exit functions that simply register/unregister an I3C
+ * and an I2C driver.
+ * This macro can be used even if CONFIG_I3C is disabled, in this case, only
+ * the I2C driver will be registered.
+ * Should be used by any driver that does not require extra init/cleanup steps.
+ */
+#define module_i3c_i2c_driver(__i3cdrv, __i2cdrv) \
+ module_driver(__i3cdrv, \
+ i3c_i2c_driver_register, \
+ i3c_i2c_driver_unregister, \
+ __i2cdrv)
+
+int i3c_device_do_priv_xfers(struct i3c_device *dev,
+ struct i3c_priv_xfer *xfers,
+ int nxfers);
+
+int i3c_device_do_setdasa(struct i3c_device *dev);
+
+void i3c_device_get_info(const struct i3c_device *dev, struct i3c_device_info *info);
+
+struct i3c_ibi_payload {
+ unsigned int len;
+ const void *data;
+};
+
+/**
+ * struct i3c_ibi_setup - IBI setup object
+ * @max_payload_len: maximum length of the payload associated to an IBI. If one
+ * IBI appears to have a payload that is bigger than this
+ * number, the IBI will be rejected.
+ * @num_slots: number of pre-allocated IBI slots. This should be chosen so that
+ * the system never runs out of IBI slots, otherwise you'll lose
+ * IBIs.
+ * @handler: IBI handler, every time an IBI is received. This handler is called
+ * in a workqueue context. It is allowed to sleep and send new
+ * messages on the bus, though it's recommended to keep the
+ * processing done there as fast as possible to avoid delaying
+ * processing of other queued on the same workqueue.
+ *
+ * Temporary structure used to pass information to i3c_device_request_ibi().
+ * This object can be allocated on the stack since i3c_device_request_ibi()
+ * copies every bit of information and do not use it after
+ * i3c_device_request_ibi() has returned.
+ */
+struct i3c_ibi_setup {
+ unsigned int max_payload_len;
+ unsigned int num_slots;
+ void (*handler)(struct i3c_device *dev,
+ const struct i3c_ibi_payload *payload);
+};
+
+int i3c_device_request_ibi(struct i3c_device *dev,
+ const struct i3c_ibi_setup *setup);
+void i3c_device_free_ibi(struct i3c_device *dev);
+int i3c_device_enable_ibi(struct i3c_device *dev);
+int i3c_device_disable_ibi(struct i3c_device *dev);
+
+#endif /* I3C_DEV_H */
diff --git a/include/linux/i3c/master.h b/include/linux/i3c/master.h
new file mode 100644
index 000000000000..0b52da4f2346
--- /dev/null
+++ b/include/linux/i3c/master.h
@@ -0,0 +1,655 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2018 Cadence Design Systems Inc.
+ *
+ * Author: Boris Brezillon <boris.brezillon@bootlin.com>
+ */
+
+#ifndef I3C_MASTER_H
+#define I3C_MASTER_H
+
+#include <asm/bitsperlong.h>
+
+#include <linux/bitops.h>
+#include <linux/i2c.h>
+#include <linux/i3c/ccc.h>
+#include <linux/i3c/device.h>
+#include <linux/rwsem.h>
+#include <linux/spinlock.h>
+#include <linux/workqueue.h>
+
+#define I3C_HOT_JOIN_ADDR 0x2
+#define I3C_BROADCAST_ADDR 0x7e
+#define I3C_MAX_ADDR GENMASK(6, 0)
+
+struct i2c_client;
+
+struct i3c_master_controller;
+struct i3c_bus;
+struct i3c_device;
+
+/**
+ * struct i3c_i2c_dev_desc - Common part of the I3C/I2C device descriptor
+ * @node: node element used to insert the slot into the I2C or I3C device
+ * list
+ * @master: I3C master that instantiated this device. Will be used to do
+ * I2C/I3C transfers
+ * @master_priv: master private data assigned to the device. Can be used to
+ * add master specific information
+ *
+ * This structure is describing common I3C/I2C dev information.
+ */
+struct i3c_i2c_dev_desc {
+ struct list_head node;
+ struct i3c_master_controller *master;
+ void *master_priv;
+};
+
+#define I3C_LVR_I2C_INDEX_MASK GENMASK(7, 5)
+#define I3C_LVR_I2C_INDEX(x) ((x) << 5)
+#define I3C_LVR_I2C_FM_MODE BIT(4)
+
+#define I2C_MAX_ADDR GENMASK(6, 0)
+
+/**
+ * struct i2c_dev_boardinfo - I2C device board information
+ * @node: used to insert the boardinfo object in the I2C boardinfo list
+ * @base: regular I2C board information
+ * @lvr: LVR (Legacy Virtual Register) needed by the I3C core to know about
+ * the I2C device limitations
+ *
+ * This structure is used to attach board-level information to an I2C device.
+ * Each I2C device connected on the I3C bus should have one.
+ */
+struct i2c_dev_boardinfo {
+ struct list_head node;
+ struct i2c_board_info base;
+ u8 lvr;
+};
+
+/**
+ * struct i2c_dev_desc - I2C device descriptor
+ * @common: common part of the I2C device descriptor
+ * @boardinfo: pointer to the boardinfo attached to this I2C device
+ * @dev: I2C device object registered to the I2C framework
+ * @addr: I2C device address
+ * @lvr: LVR (Legacy Virtual Register) needed by the I3C core to know about
+ * the I2C device limitations
+ *
+ * Each I2C device connected on the bus will have an i2c_dev_desc.
+ * This object is created by the core and later attached to the controller
+ * using &struct_i3c_master_controller->ops->attach_i2c_dev().
+ *
+ * &struct_i2c_dev_desc is the internal representation of an I2C device
+ * connected on an I3C bus. This object is also passed to all
+ * &struct_i3c_master_controller_ops hooks.
+ */
+struct i2c_dev_desc {
+ struct i3c_i2c_dev_desc common;
+ struct i2c_client *dev;
+ u16 addr;
+ u8 lvr;
+};
+
+/**
+ * struct i3c_ibi_slot - I3C IBI (In-Band Interrupt) slot
+ * @work: work associated to this slot. The IBI handler will be called from
+ * there
+ * @dev: the I3C device that has generated this IBI
+ * @len: length of the payload associated to this IBI
+ * @data: payload buffer
+ *
+ * An IBI slot is an object pre-allocated by the controller and used when an
+ * IBI comes in.
+ * Every time an IBI comes in, the I3C master driver should find a free IBI
+ * slot in its IBI slot pool, retrieve the IBI payload and queue the IBI using
+ * i3c_master_queue_ibi().
+ *
+ * How IBI slots are allocated is left to the I3C master driver, though, for
+ * simple kmalloc-based allocation, the generic IBI slot pool can be used.
+ */
+struct i3c_ibi_slot {
+ struct work_struct work;
+ struct i3c_dev_desc *dev;
+ unsigned int len;
+ void *data;
+};
+
+/**
+ * struct i3c_device_ibi_info - IBI information attached to a specific device
+ * @all_ibis_handled: used to be informed when no more IBIs are waiting to be
+ * processed. Used by i3c_device_disable_ibi() to wait for
+ * all IBIs to be dequeued
+ * @pending_ibis: count the number of pending IBIs. Each pending IBI has its
+ * work element queued to the controller workqueue
+ * @max_payload_len: maximum payload length for an IBI coming from this device.
+ * this value is specified when calling
+ * i3c_device_request_ibi() and should not change at run
+ * time. All messages IBIs exceeding this limit should be
+ * rejected by the master
+ * @num_slots: number of IBI slots reserved for this device
+ * @enabled: reflect the IBI status
+ * @handler: IBI handler specified at i3c_device_request_ibi() call time. This
+ * handler will be called from the controller workqueue, and as such
+ * is allowed to sleep (though it is recommended to process the IBI
+ * as fast as possible to not stall processing of other IBIs queued
+ * on the same workqueue).
+ * New I3C messages can be sent from the IBI handler
+ *
+ * The &struct_i3c_device_ibi_info object is allocated when
+ * i3c_device_request_ibi() is called and attached to a specific device. This
+ * object is here to manage IBIs coming from a specific I3C device.
+ *
+ * Note that this structure is the generic view of the IBI management
+ * infrastructure. I3C master drivers may have their own internal
+ * representation which they can associate to the device using
+ * controller-private data.
+ */
+struct i3c_device_ibi_info {
+ struct completion all_ibis_handled;
+ atomic_t pending_ibis;
+ unsigned int max_payload_len;
+ unsigned int num_slots;
+ unsigned int enabled;
+ void (*handler)(struct i3c_device *dev,
+ const struct i3c_ibi_payload *payload);
+};
+
+/**
+ * struct i3c_dev_boardinfo - I3C device board information
+ * @node: used to insert the boardinfo object in the I3C boardinfo list
+ * @init_dyn_addr: initial dynamic address requested by the FW. We provide no
+ * guarantee that the device will end up using this address,
+ * but try our best to assign this specific address to the
+ * device
+ * @static_addr: static address the I3C device listen on before it's been
+ * assigned a dynamic address by the master. Will be used during
+ * bus initialization to assign it a specific dynamic address
+ * before starting DAA (Dynamic Address Assignment)
+ * @pid: I3C Provisional ID exposed by the device. This is a unique identifier
+ * that may be used to attach boardinfo to i3c_dev_desc when the device
+ * does not have a static address
+ * @of_node: optional DT node in case the device has been described in the DT
+ *
+ * This structure is used to attach board-level information to an I3C device.
+ * Not all I3C devices connected on the bus will have a boardinfo. It's only
+ * needed if you want to attach extra resources to a device or assign it a
+ * specific dynamic address.
+ */
+struct i3c_dev_boardinfo {
+ struct list_head node;
+ u8 init_dyn_addr;
+ u8 static_addr;
+ u64 pid;
+ struct device_node *of_node;
+};
+
+/**
+ * struct i3c_dev_desc - I3C device descriptor
+ * @common: common part of the I3C device descriptor
+ * @info: I3C device information. Will be automatically filled when you create
+ * your device with i3c_master_add_i3c_dev_locked()
+ * @ibi_lock: lock used to protect the &struct_i3c_device->ibi
+ * @ibi: IBI info attached to a device. Should be NULL until
+ * i3c_device_request_ibi() is called
+ * @dev: pointer to the I3C device object exposed to I3C device drivers. This
+ * should never be accessed from I3C master controller drivers. Only core
+ * code should manipulate it in when updating the dev <-> desc link or
+ * when propagating IBI events to the driver
+ * @boardinfo: pointer to the boardinfo attached to this I3C device
+ *
+ * Internal representation of an I3C device. This object is only used by the
+ * core and passed to I3C master controller drivers when they're requested to
+ * do some operations on the device.
+ * The core maintains the link between the internal I3C dev descriptor and the
+ * object exposed to the I3C device drivers (&struct_i3c_device).
+ */
+struct i3c_dev_desc {
+ struct i3c_i2c_dev_desc common;
+ struct i3c_device_info info;
+ struct mutex ibi_lock;
+ struct i3c_device_ibi_info *ibi;
+ struct i3c_device *dev;
+ const struct i3c_dev_boardinfo *boardinfo;
+};
+
+/**
+ * struct i3c_device - I3C device object
+ * @dev: device object to register the I3C dev to the device model
+ * @desc: pointer to an i3c device descriptor object. This link is updated
+ * every time the I3C device is rediscovered with a different dynamic
+ * address assigned
+ * @bus: I3C bus this device is attached to
+ *
+ * I3C device object exposed to I3C device drivers. The takes care of linking
+ * this object to the relevant &struct_i3c_dev_desc one.
+ * All I3C devs on the I3C bus are represented, including I3C masters. For each
+ * of them, we have an instance of &struct i3c_device.
+ */
+struct i3c_device {
+ struct device dev;
+ struct i3c_dev_desc *desc;
+ struct i3c_bus *bus;
+};
+
+/*
+ * The I3C specification says the maximum number of devices connected on the
+ * bus is 11, but this number depends on external parameters like trace length,
+ * capacitive load per Device, and the types of Devices present on the Bus.
+ * I3C master can also have limitations, so this number is just here as a
+ * reference and should be adjusted on a per-controller/per-board basis.
+ */
+#define I3C_BUS_MAX_DEVS 11
+
+#define I3C_BUS_MAX_I3C_SCL_RATE 12900000
+#define I3C_BUS_TYP_I3C_SCL_RATE 12500000
+#define I3C_BUS_I2C_FM_PLUS_SCL_RATE 1000000
+#define I3C_BUS_I2C_FM_SCL_RATE 400000
+#define I3C_BUS_TLOW_OD_MIN_NS 200
+
+/**
+ * enum i3c_bus_mode - I3C bus mode
+ * @I3C_BUS_MODE_PURE: only I3C devices are connected to the bus. No limitation
+ * expected
+ * @I3C_BUS_MODE_MIXED_FAST: I2C devices with 50ns spike filter are present on
+ * the bus. The only impact in this mode is that the
+ * high SCL pulse has to stay below 50ns to trick I2C
+ * devices when transmitting I3C frames
+ * @I3C_BUS_MODE_MIXED_LIMITED: I2C devices without 50ns spike filter are
+ * present on the bus. However they allow
+ * compliance up to the maximum SDR SCL clock
+ * frequency.
+ * @I3C_BUS_MODE_MIXED_SLOW: I2C devices without 50ns spike filter are present
+ * on the bus
+ */
+enum i3c_bus_mode {
+ I3C_BUS_MODE_PURE,
+ I3C_BUS_MODE_MIXED_FAST,
+ I3C_BUS_MODE_MIXED_LIMITED,
+ I3C_BUS_MODE_MIXED_SLOW,
+};
+
+/**
+ * enum i3c_addr_slot_status - I3C address slot status
+ * @I3C_ADDR_SLOT_FREE: address is free
+ * @I3C_ADDR_SLOT_RSVD: address is reserved
+ * @I3C_ADDR_SLOT_I2C_DEV: address is assigned to an I2C device
+ * @I3C_ADDR_SLOT_I3C_DEV: address is assigned to an I3C device
+ * @I3C_ADDR_SLOT_STATUS_MASK: address slot mask
+ *
+ * On an I3C bus, addresses are assigned dynamically, and we need to know which
+ * addresses are free to use and which ones are already assigned.
+ *
+ * Addresses marked as reserved are those reserved by the I3C protocol
+ * (broadcast address, ...).
+ */
+enum i3c_addr_slot_status {
+ I3C_ADDR_SLOT_FREE,
+ I3C_ADDR_SLOT_RSVD,
+ I3C_ADDR_SLOT_I2C_DEV,
+ I3C_ADDR_SLOT_I3C_DEV,
+ I3C_ADDR_SLOT_STATUS_MASK = 3,
+};
+
+/**
+ * struct i3c_bus - I3C bus object
+ * @cur_master: I3C master currently driving the bus. Since I3C is multi-master
+ * this can change over the time. Will be used to let a master
+ * know whether it needs to request bus ownership before sending
+ * a frame or not
+ * @id: bus ID. Assigned by the framework when register the bus
+ * @addrslots: a bitmap with 2-bits per-slot to encode the address status and
+ * ease the DAA (Dynamic Address Assignment) procedure (see
+ * &enum i3c_addr_slot_status)
+ * @mode: bus mode (see &enum i3c_bus_mode)
+ * @scl_rate.i3c: maximum rate for the clock signal when doing I3C SDR/priv
+ * transfers
+ * @scl_rate.i2c: maximum rate for the clock signal when doing I2C transfers
+ * @scl_rate: SCL signal rate for I3C and I2C mode
+ * @devs.i3c: contains a list of I3C device descriptors representing I3C
+ * devices connected on the bus and successfully attached to the
+ * I3C master
+ * @devs.i2c: contains a list of I2C device descriptors representing I2C
+ * devices connected on the bus and successfully attached to the
+ * I3C master
+ * @devs: 2 lists containing all I3C/I2C devices connected to the bus
+ * @lock: read/write lock on the bus. This is needed to protect against
+ * operations that have an impact on the whole bus and the devices
+ * connected to it. For example, when asking slaves to drop their
+ * dynamic address (RSTDAA CCC), we need to make sure no one is trying
+ * to send I3C frames to these devices.
+ * Note that this lock does not protect against concurrency between
+ * devices: several drivers can send different I3C/I2C frames through
+ * the same master in parallel. This is the responsibility of the
+ * master to guarantee that frames are actually sent sequentially and
+ * not interlaced
+ *
+ * The I3C bus is represented with its own object and not implicitly described
+ * by the I3C master to cope with the multi-master functionality, where one bus
+ * can be shared amongst several masters, each of them requesting bus ownership
+ * when they need to.
+ */
+struct i3c_bus {
+ struct i3c_dev_desc *cur_master;
+ int id;
+ unsigned long addrslots[((I2C_MAX_ADDR + 1) * 2) / BITS_PER_LONG];
+ enum i3c_bus_mode mode;
+ struct {
+ unsigned long i3c;
+ unsigned long i2c;
+ } scl_rate;
+ struct {
+ struct list_head i3c;
+ struct list_head i2c;
+ } devs;
+ struct rw_semaphore lock;
+};
+
+/**
+ * struct i3c_master_controller_ops - I3C master methods
+ * @bus_init: hook responsible for the I3C bus initialization. You should at
+ * least call master_set_info() from there and set the bus mode.
+ * You can also put controller specific initialization in there.
+ * This method is mandatory.
+ * @bus_cleanup: cleanup everything done in
+ * &i3c_master_controller_ops->bus_init().
+ * This method is optional.
+ * @attach_i3c_dev: called every time an I3C device is attached to the bus. It
+ * can be after a DAA or when a device is statically declared
+ * by the FW, in which case it will only have a static address
+ * and the dynamic address will be 0.
+ * When this function is called, device information have not
+ * been retrieved yet.
+ * This is a good place to attach master controller specific
+ * data to I3C devices.
+ * This method is optional.
+ * @reattach_i3c_dev: called every time an I3C device has its addressed
+ * changed. It can be because the device has been powered
+ * down and has lost its address, or it can happen when a
+ * device had a static address and has been assigned a
+ * dynamic address with SETDASA.
+ * This method is optional.
+ * @detach_i3c_dev: called when an I3C device is detached from the bus. Usually
+ * happens when the master device is unregistered.
+ * This method is optional.
+ * @do_daa: do a DAA (Dynamic Address Assignment) procedure. This is procedure
+ * should send an ENTDAA CCC command and then add all devices
+ * discovered sure the DAA using i3c_master_add_i3c_dev_locked().
+ * Add devices added with i3c_master_add_i3c_dev_locked() will then be
+ * attached or re-attached to the controller.
+ * This method is mandatory.
+ * @supports_ccc_cmd: should return true if the CCC command is supported, false
+ * otherwise.
+ * This method is optional, if not provided the core assumes
+ * all CCC commands are supported.
+ * @send_ccc_cmd: send a CCC command
+ * This method is mandatory.
+ * @priv_xfers: do one or several private I3C SDR transfers
+ * This method is mandatory.
+ * @attach_i2c_dev: called every time an I2C device is attached to the bus.
+ * This is a good place to attach master controller specific
+ * data to I2C devices.
+ * This method is optional.
+ * @detach_i2c_dev: called when an I2C device is detached from the bus. Usually
+ * happens when the master device is unregistered.
+ * This method is optional.
+ * @i2c_xfers: do one or several I2C transfers. Note that, unlike i3c
+ * transfers, the core does not guarantee that buffers attached to
+ * the transfers are DMA-safe. If drivers want to have DMA-safe
+ * buffers, they should use the i2c_get_dma_safe_msg_buf()
+ * and i2c_put_dma_safe_msg_buf() helpers provided by the I2C
+ * framework.
+ * This method is mandatory.
+ * @request_ibi: attach an IBI handler to an I3C device. This implies defining
+ * an IBI handler and the constraints of the IBI (maximum payload
+ * length and number of pre-allocated slots).
+ * Some controllers support less IBI-capable devices than regular
+ * devices, so this method might return -%EBUSY if there's no
+ * more space for an extra IBI registration
+ * This method is optional.
+ * @free_ibi: free an IBI previously requested with ->request_ibi(). The IBI
+ * should have been disabled with ->disable_irq() prior to that
+ * This method is mandatory only if ->request_ibi is not NULL.
+ * @enable_ibi: enable the IBI. Only valid if ->request_ibi() has been called
+ * prior to ->enable_ibi(). The controller should first enable
+ * the IBI on the controller end (for example, unmask the hardware
+ * IRQ) and then send the ENEC CCC command (with the IBI flag set)
+ * to the I3C device.
+ * This method is mandatory only if ->request_ibi is not NULL.
+ * @disable_ibi: disable an IBI. First send the DISEC CCC command with the IBI
+ * flag set and then deactivate the hardware IRQ on the
+ * controller end.
+ * This method is mandatory only if ->request_ibi is not NULL.
+ * @recycle_ibi_slot: recycle an IBI slot. Called every time an IBI has been
+ * processed by its handler. The IBI slot should be put back
+ * in the IBI slot pool so that the controller can re-use it
+ * for a future IBI
+ * This method is mandatory only if ->request_ibi is not
+ * NULL.
+ */
+struct i3c_master_controller_ops {
+ int (*bus_init)(struct i3c_master_controller *master);
+ void (*bus_cleanup)(struct i3c_master_controller *master);
+ int (*attach_i3c_dev)(struct i3c_dev_desc *dev);
+ int (*reattach_i3c_dev)(struct i3c_dev_desc *dev, u8 old_dyn_addr);
+ void (*detach_i3c_dev)(struct i3c_dev_desc *dev);
+ int (*do_daa)(struct i3c_master_controller *master);
+ bool (*supports_ccc_cmd)(struct i3c_master_controller *master,
+ const struct i3c_ccc_cmd *cmd);
+ int (*send_ccc_cmd)(struct i3c_master_controller *master,
+ struct i3c_ccc_cmd *cmd);
+ int (*priv_xfers)(struct i3c_dev_desc *dev,
+ struct i3c_priv_xfer *xfers,
+ int nxfers);
+ int (*attach_i2c_dev)(struct i2c_dev_desc *dev);
+ void (*detach_i2c_dev)(struct i2c_dev_desc *dev);
+ int (*i2c_xfers)(struct i2c_dev_desc *dev,
+ const struct i2c_msg *xfers, int nxfers);
+ int (*request_ibi)(struct i3c_dev_desc *dev,
+ const struct i3c_ibi_setup *req);
+ void (*free_ibi)(struct i3c_dev_desc *dev);
+ int (*enable_ibi)(struct i3c_dev_desc *dev);
+ int (*disable_ibi)(struct i3c_dev_desc *dev);
+ void (*recycle_ibi_slot)(struct i3c_dev_desc *dev,
+ struct i3c_ibi_slot *slot);
+};
+
+/**
+ * struct i3c_master_controller - I3C master controller object
+ * @dev: device to be registered to the device-model
+ * @this: an I3C device object representing this master. This device will be
+ * added to the list of I3C devs available on the bus
+ * @i2c: I2C adapter used for backward compatibility. This adapter is
+ * registered to the I2C subsystem to be as transparent as possible to
+ * existing I2C drivers
+ * @ops: master operations. See &struct i3c_master_controller_ops
+ * @secondary: true if the master is a secondary master
+ * @init_done: true when the bus initialization is done
+ * @boardinfo.i3c: list of I3C boardinfo objects
+ * @boardinfo.i2c: list of I2C boardinfo objects
+ * @boardinfo: board-level information attached to devices connected on the bus
+ * @bus: I3C bus exposed by this master
+ * @wq: workqueue used to execute IBI handlers. Can also be used by master
+ * drivers if they need to postpone operations that need to take place
+ * in a thread context. Typical examples are Hot Join processing which
+ * requires taking the bus lock in maintenance, which in turn, can only
+ * be done from a sleep-able context
+ *
+ * A &struct i3c_master_controller has to be registered to the I3C subsystem
+ * through i3c_master_register(). None of &struct i3c_master_controller fields
+ * should be set manually, just pass appropriate values to
+ * i3c_master_register().
+ */
+struct i3c_master_controller {
+ struct device dev;
+ struct i3c_dev_desc *this;
+ struct i2c_adapter i2c;
+ const struct i3c_master_controller_ops *ops;
+ unsigned int secondary : 1;
+ unsigned int init_done : 1;
+ struct {
+ struct list_head i3c;
+ struct list_head i2c;
+ } boardinfo;
+ struct i3c_bus bus;
+ struct workqueue_struct *wq;
+};
+
+/**
+ * i3c_bus_for_each_i2cdev() - iterate over all I2C devices present on the bus
+ * @bus: the I3C bus
+ * @dev: an I2C device descriptor pointer updated to point to the current slot
+ * at each iteration of the loop
+ *
+ * Iterate over all I2C devs present on the bus.
+ */
+#define i3c_bus_for_each_i2cdev(bus, dev) \
+ list_for_each_entry(dev, &(bus)->devs.i2c, common.node)
+
+/**
+ * i3c_bus_for_each_i3cdev() - iterate over all I3C devices present on the bus
+ * @bus: the I3C bus
+ * @dev: and I3C device descriptor pointer updated to point to the current slot
+ * at each iteration of the loop
+ *
+ * Iterate over all I3C devs present on the bus.
+ */
+#define i3c_bus_for_each_i3cdev(bus, dev) \
+ list_for_each_entry(dev, &(bus)->devs.i3c, common.node)
+
+int i3c_master_do_i2c_xfers(struct i3c_master_controller *master,
+ const struct i2c_msg *xfers,
+ int nxfers);
+
+int i3c_master_disec_locked(struct i3c_master_controller *master, u8 addr,
+ u8 evts);
+int i3c_master_enec_locked(struct i3c_master_controller *master, u8 addr,
+ u8 evts);
+int i3c_master_entdaa_locked(struct i3c_master_controller *master);
+int i3c_master_defslvs_locked(struct i3c_master_controller *master);
+
+int i3c_master_get_free_addr(struct i3c_master_controller *master,
+ u8 start_addr);
+
+int i3c_master_add_i3c_dev_locked(struct i3c_master_controller *master,
+ u8 addr);
+int i3c_master_do_daa(struct i3c_master_controller *master);
+
+int i3c_master_set_info(struct i3c_master_controller *master,
+ const struct i3c_device_info *info);
+
+int i3c_master_register(struct i3c_master_controller *master,
+ struct device *parent,
+ const struct i3c_master_controller_ops *ops,
+ bool secondary);
+void i3c_master_unregister(struct i3c_master_controller *master);
+
+/**
+ * i3c_dev_get_master_data() - get master private data attached to an I3C
+ * device descriptor
+ * @dev: the I3C device descriptor to get private data from
+ *
+ * Return: the private data previously attached with i3c_dev_set_master_data()
+ * or NULL if no data has been attached to the device.
+ */
+static inline void *i3c_dev_get_master_data(const struct i3c_dev_desc *dev)
+{
+ return dev->common.master_priv;
+}
+
+/**
+ * i3c_dev_set_master_data() - attach master private data to an I3C device
+ * descriptor
+ * @dev: the I3C device descriptor to attach private data to
+ * @data: private data
+ *
+ * This functions allows a master controller to attach per-device private data
+ * which can then be retrieved with i3c_dev_get_master_data().
+ */
+static inline void i3c_dev_set_master_data(struct i3c_dev_desc *dev,
+ void *data)
+{
+ dev->common.master_priv = data;
+}
+
+/**
+ * i2c_dev_get_master_data() - get master private data attached to an I2C
+ * device descriptor
+ * @dev: the I2C device descriptor to get private data from
+ *
+ * Return: the private data previously attached with i2c_dev_set_master_data()
+ * or NULL if no data has been attached to the device.
+ */
+static inline void *i2c_dev_get_master_data(const struct i2c_dev_desc *dev)
+{
+ return dev->common.master_priv;
+}
+
+/**
+ * i2c_dev_set_master_data() - attach master private data to an I2C device
+ * descriptor
+ * @dev: the I2C device descriptor to attach private data to
+ * @data: private data
+ *
+ * This functions allows a master controller to attach per-device private data
+ * which can then be retrieved with i2c_device_get_master_data().
+ */
+static inline void i2c_dev_set_master_data(struct i2c_dev_desc *dev,
+ void *data)
+{
+ dev->common.master_priv = data;
+}
+
+/**
+ * i3c_dev_get_master() - get master used to communicate with a device
+ * @dev: I3C dev
+ *
+ * Return: the master controller driving @dev
+ */
+static inline struct i3c_master_controller *
+i3c_dev_get_master(struct i3c_dev_desc *dev)
+{
+ return dev->common.master;
+}
+
+/**
+ * i2c_dev_get_master() - get master used to communicate with a device
+ * @dev: I2C dev
+ *
+ * Return: the master controller driving @dev
+ */
+static inline struct i3c_master_controller *
+i2c_dev_get_master(struct i2c_dev_desc *dev)
+{
+ return dev->common.master;
+}
+
+/**
+ * i3c_master_get_bus() - get the bus attached to a master
+ * @master: master object
+ *
+ * Return: the I3C bus @master is connected to
+ */
+static inline struct i3c_bus *
+i3c_master_get_bus(struct i3c_master_controller *master)
+{
+ return &master->bus;
+}
+
+struct i3c_generic_ibi_pool;
+
+struct i3c_generic_ibi_pool *
+i3c_generic_ibi_alloc_pool(struct i3c_dev_desc *dev,
+ const struct i3c_ibi_setup *req);
+void i3c_generic_ibi_free_pool(struct i3c_generic_ibi_pool *pool);
+
+struct i3c_ibi_slot *
+i3c_generic_ibi_get_free_slot(struct i3c_generic_ibi_pool *pool);
+void i3c_generic_ibi_recycle_slot(struct i3c_generic_ibi_pool *pool,
+ struct i3c_ibi_slot *slot);
+
+void i3c_master_queue_ibi(struct i3c_dev_desc *dev, struct i3c_ibi_slot *slot);
+
+struct i3c_ibi_slot *i3c_master_get_free_ibi_slot(struct i3c_dev_desc *dev);
+
+#endif /* I3C_MASTER_H */
diff --git a/include/linux/i7300_idle.h b/include/linux/i7300_idle.h
deleted file mode 100644
index 1587b7dec505..000000000000
--- a/include/linux/i7300_idle.h
+++ /dev/null
@@ -1,83 +0,0 @@
-
-#ifndef I7300_IDLE_H
-#define I7300_IDLE_H
-
-#include <linux/pci.h>
-
-/*
- * I/O AT controls (PCI bus 0 device 8 function 0)
- * DIMM controls (PCI bus 0 device 16 function 1)
- */
-#define IOAT_BUS 0
-#define IOAT_DEVFN PCI_DEVFN(8, 0)
-#define MEMCTL_BUS 0
-#define MEMCTL_DEVFN PCI_DEVFN(16, 1)
-
-struct fbd_ioat {
- unsigned int vendor;
- unsigned int ioat_dev;
- unsigned int enabled;
-};
-
-/*
- * The i5000 chip-set has the same hooks as the i7300
- * but it is not enabled by default and must be manually
- * manually enabled with "forceload=1" because it is
- * only lightly validated.
- */
-
-static const struct fbd_ioat fbd_ioat_list[] = {
- {PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_CNB, 1},
- {PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT, 0},
- {0, 0}
-};
-
-/* table of devices that work with this driver */
-static const struct pci_device_id pci_tbl[] = {
- { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_FBD_CNB) },
- { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_5000_ERR) },
- { } /* Terminating entry */
-};
-
-/* Check for known platforms with I/O-AT */
-static inline int i7300_idle_platform_probe(struct pci_dev **fbd_dev,
- struct pci_dev **ioat_dev,
- int enable_all)
-{
- int i;
- struct pci_dev *memdev, *dmadev;
-
- memdev = pci_get_bus_and_slot(MEMCTL_BUS, MEMCTL_DEVFN);
- if (!memdev)
- return -ENODEV;
-
- for (i = 0; pci_tbl[i].vendor != 0; i++) {
- if (memdev->vendor == pci_tbl[i].vendor &&
- memdev->device == pci_tbl[i].device) {
- break;
- }
- }
- if (pci_tbl[i].vendor == 0)
- return -ENODEV;
-
- dmadev = pci_get_bus_and_slot(IOAT_BUS, IOAT_DEVFN);
- if (!dmadev)
- return -ENODEV;
-
- for (i = 0; fbd_ioat_list[i].vendor != 0; i++) {
- if (dmadev->vendor == fbd_ioat_list[i].vendor &&
- dmadev->device == fbd_ioat_list[i].ioat_dev) {
- if (!(fbd_ioat_list[i].enabled || enable_all))
- continue;
- if (fbd_dev)
- *fbd_dev = memdev;
- if (ioat_dev)
- *ioat_dev = dmadev;
-
- return 0;
- }
- }
- return -ENODEV;
-}
-
-#endif
diff --git a/include/linux/i8042.h b/include/linux/i8042.h
index d98780ca9604..0261e2fb3636 100644
--- a/include/linux/i8042.h
+++ b/include/linux/i8042.h
@@ -1,11 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
#ifndef _LINUX_I8042_H
#define _LINUX_I8042_H
-/*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- */
#include <linux/types.h>
diff --git a/include/linux/i8253.h b/include/linux/i8253.h
index e6bb36a97519..8336b2f6f834 100644
--- a/include/linux/i8253.h
+++ b/include/linux/i8253.h
@@ -21,6 +21,7 @@
#define PIT_LATCH ((PIT_TICK_RATE + HZ/2) / HZ)
extern raw_spinlock_t i8253_lock;
+extern bool i8253_clear_counter_on_shutdown;
extern struct clock_event_device i8253_clockevent;
extern void clockevent_i8253_init(bool oneshot);
diff --git a/include/linux/icmp.h b/include/linux/icmp.h
index efc18490627a..0af4d210ee31 100644
--- a/include/linux/icmp.h
+++ b/include/linux/icmp.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* INET An implementation of the TCP/IP protocol suite for the LINUX
* operating system. INET is implemented using the BSD Socket
@@ -8,20 +9,35 @@
* Version: @(#)icmp.h 1.0.3 04/28/93
*
* Author: Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
*/
#ifndef _LINUX_ICMP_H
#define _LINUX_ICMP_H
#include <linux/skbuff.h>
#include <uapi/linux/icmp.h>
+#include <uapi/linux/errqueue.h>
static inline struct icmphdr *icmp_hdr(const struct sk_buff *skb)
{
return (struct icmphdr *)skb_transport_header(skb);
}
+
+static inline bool icmp_is_err(int type)
+{
+ switch (type) {
+ case ICMP_DEST_UNREACH:
+ case ICMP_SOURCE_QUENCH:
+ case ICMP_REDIRECT:
+ case ICMP_TIME_EXCEEDED:
+ case ICMP_PARAMETERPROB:
+ return true;
+ }
+
+ return false;
+}
+
+void ip_icmp_error_rfc4884(const struct sk_buff *skb,
+ struct sock_ee_data_rfc4884 *out,
+ int thlen, int off);
+
#endif /* _LINUX_ICMP_H */
diff --git a/include/linux/icmpv6.h b/include/linux/icmpv6.h
index 57086e9fc64c..db0f4fcfdaf4 100644
--- a/include/linux/icmpv6.h
+++ b/include/linux/icmpv6.h
@@ -1,7 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_ICMPV6_H
#define _LINUX_ICMPV6_H
#include <linux/skbuff.h>
+#include <linux/ipv6.h>
#include <uapi/linux/icmpv6.h>
static inline struct icmp6hdr *icmp6_hdr(const struct sk_buff *skb)
@@ -12,21 +14,64 @@ static inline struct icmp6hdr *icmp6_hdr(const struct sk_buff *skb)
#include <linux/netdevice.h>
#if IS_ENABLED(CONFIG_IPV6)
-extern void icmpv6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info);
typedef void ip6_icmp_send_t(struct sk_buff *skb, u8 type, u8 code, __u32 info,
- const struct in6_addr *force_saddr);
+ const struct in6_addr *force_saddr,
+ const struct inet6_skb_parm *parm);
+void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info,
+ const struct in6_addr *force_saddr,
+ const struct inet6_skb_parm *parm);
+#if IS_BUILTIN(CONFIG_IPV6)
+static inline void __icmpv6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info,
+ const struct inet6_skb_parm *parm)
+{
+ icmp6_send(skb, type, code, info, NULL, parm);
+}
+static inline int inet6_register_icmp_sender(ip6_icmp_send_t *fn)
+{
+ BUILD_BUG_ON(fn != icmp6_send);
+ return 0;
+}
+static inline int inet6_unregister_icmp_sender(ip6_icmp_send_t *fn)
+{
+ BUILD_BUG_ON(fn != icmp6_send);
+ return 0;
+}
+#else
+extern void __icmpv6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info,
+ const struct inet6_skb_parm *parm);
extern int inet6_register_icmp_sender(ip6_icmp_send_t *fn);
extern int inet6_unregister_icmp_sender(ip6_icmp_send_t *fn);
+#endif
+
+static inline void icmpv6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info)
+{
+ __icmpv6_send(skb, type, code, info, IP6CB(skb));
+}
+
int ip6_err_gen_icmpv6_unreach(struct sk_buff *skb, int nhs, int type,
unsigned int data_len);
+#if IS_ENABLED(CONFIG_NF_NAT)
+void icmpv6_ndo_send(struct sk_buff *skb_in, u8 type, u8 code, __u32 info);
+#else
+static inline void icmpv6_ndo_send(struct sk_buff *skb_in, u8 type, u8 code, __u32 info)
+{
+ struct inet6_skb_parm parm = { 0 };
+ __icmpv6_send(skb_in, type, code, info, &parm);
+}
+#endif
+
#else
static inline void icmpv6_send(struct sk_buff *skb,
u8 type, u8 code, __u32 info)
{
+}
+static inline void icmpv6_ndo_send(struct sk_buff *skb,
+ u8 type, u8 code, __u32 info)
+{
}
#endif
@@ -34,8 +79,9 @@ extern int icmpv6_init(void);
extern int icmpv6_err_convert(u8 type, u8 code,
int *err);
extern void icmpv6_cleanup(void);
-extern void icmpv6_param_prob(struct sk_buff *skb,
- u8 code, int pos);
+extern void icmpv6_param_prob_reason(struct sk_buff *skb,
+ u8 code, int pos,
+ enum skb_drop_reason reason);
struct flowi6;
struct in6_addr;
@@ -45,4 +91,24 @@ extern void icmpv6_flow_init(struct sock *sk,
const struct in6_addr *saddr,
const struct in6_addr *daddr,
int oif);
+
+static inline void icmpv6_param_prob(struct sk_buff *skb, u8 code, int pos)
+{
+ icmpv6_param_prob_reason(skb, code, pos,
+ SKB_DROP_REASON_NOT_SPECIFIED);
+}
+
+static inline bool icmpv6_is_err(int type)
+{
+ switch (type) {
+ case ICMPV6_DEST_UNREACH:
+ case ICMPV6_PKT_TOOBIG:
+ case ICMPV6_TIME_EXCEED:
+ case ICMPV6_PARAMPROB:
+ return true;
+ }
+
+ return false;
+}
+
#endif
diff --git a/include/linux/ide.h b/include/linux/ide.h
deleted file mode 100644
index dc152e4b7f73..000000000000
--- a/include/linux/ide.h
+++ /dev/null
@@ -1,1617 +0,0 @@
-#ifndef _IDE_H
-#define _IDE_H
-/*
- * linux/include/linux/ide.h
- *
- * Copyright (C) 1994-2002 Linus Torvalds & authors
- */
-
-#include <linux/init.h>
-#include <linux/ioport.h>
-#include <linux/ata.h>
-#include <linux/blkdev.h>
-#include <linux/proc_fs.h>
-#include <linux/interrupt.h>
-#include <linux/bitops.h>
-#include <linux/bio.h>
-#include <linux/pci.h>
-#include <linux/completion.h>
-#include <linux/pm.h>
-#include <linux/mutex.h>
-/* for request_sense */
-#include <linux/cdrom.h>
-#include <scsi/scsi_cmnd.h>
-#include <asm/byteorder.h>
-#include <asm/io.h>
-
-#if defined(CONFIG_CRIS) || defined(CONFIG_FRV) || defined(CONFIG_MN10300)
-# define SUPPORT_VLB_SYNC 0
-#else
-# define SUPPORT_VLB_SYNC 1
-#endif
-
-/*
- * Probably not wise to fiddle with these
- */
-#define IDE_DEFAULT_MAX_FAILURES 1
-#define ERROR_MAX 8 /* Max read/write errors per sector */
-#define ERROR_RESET 3 /* Reset controller every 4th retry */
-#define ERROR_RECAL 1 /* Recalibrate every 2nd retry */
-
-struct device;
-
-/* values for ide_request.type */
-enum ata_priv_type {
- ATA_PRIV_MISC,
- ATA_PRIV_TASKFILE,
- ATA_PRIV_PC,
- ATA_PRIV_SENSE, /* sense request */
- ATA_PRIV_PM_SUSPEND, /* suspend request */
- ATA_PRIV_PM_RESUME, /* resume request */
-};
-
-struct ide_request {
- struct scsi_request sreq;
- u8 sense[SCSI_SENSE_BUFFERSIZE];
- u8 type;
-};
-
-static inline struct ide_request *ide_req(struct request *rq)
-{
- return blk_mq_rq_to_pdu(rq);
-}
-
-static inline bool ata_misc_request(struct request *rq)
-{
- return blk_rq_is_private(rq) && ide_req(rq)->type == ATA_PRIV_MISC;
-}
-
-static inline bool ata_taskfile_request(struct request *rq)
-{
- return blk_rq_is_private(rq) && ide_req(rq)->type == ATA_PRIV_TASKFILE;
-}
-
-static inline bool ata_pc_request(struct request *rq)
-{
- return blk_rq_is_private(rq) && ide_req(rq)->type == ATA_PRIV_PC;
-}
-
-static inline bool ata_sense_request(struct request *rq)
-{
- return blk_rq_is_private(rq) && ide_req(rq)->type == ATA_PRIV_SENSE;
-}
-
-static inline bool ata_pm_request(struct request *rq)
-{
- return blk_rq_is_private(rq) &&
- (ide_req(rq)->type == ATA_PRIV_PM_SUSPEND ||
- ide_req(rq)->type == ATA_PRIV_PM_RESUME);
-}
-
-/* Error codes returned in result to the higher part of the driver. */
-enum {
- IDE_DRV_ERROR_GENERAL = 101,
- IDE_DRV_ERROR_FILEMARK = 102,
- IDE_DRV_ERROR_EOD = 103,
-};
-
-/*
- * Definitions for accessing IDE controller registers
- */
-#define IDE_NR_PORTS (10)
-
-struct ide_io_ports {
- unsigned long data_addr;
-
- union {
- unsigned long error_addr; /* read: error */
- unsigned long feature_addr; /* write: feature */
- };
-
- unsigned long nsect_addr;
- unsigned long lbal_addr;
- unsigned long lbam_addr;
- unsigned long lbah_addr;
-
- unsigned long device_addr;
-
- union {
- unsigned long status_addr; /*  read: status  */
- unsigned long command_addr; /* write: command */
- };
-
- unsigned long ctl_addr;
-
- unsigned long irq_addr;
-};
-
-#define OK_STAT(stat,good,bad) (((stat)&((good)|(bad)))==(good))
-
-#define BAD_R_STAT (ATA_BUSY | ATA_ERR)
-#define BAD_W_STAT (BAD_R_STAT | ATA_DF)
-#define BAD_STAT (BAD_R_STAT | ATA_DRQ)
-#define DRIVE_READY (ATA_DRDY | ATA_DSC)
-
-#define BAD_CRC (ATA_ABORTED | ATA_ICRC)
-
-#define SATA_NR_PORTS (3) /* 16 possible ?? */
-
-#define SATA_STATUS_OFFSET (0)
-#define SATA_ERROR_OFFSET (1)
-#define SATA_CONTROL_OFFSET (2)
-
-/*
- * Our Physical Region Descriptor (PRD) table should be large enough
- * to handle the biggest I/O request we are likely to see. Since requests
- * can have no more than 256 sectors, and since the typical blocksize is
- * two or more sectors, we could get by with a limit of 128 entries here for
- * the usual worst case. Most requests seem to include some contiguous blocks,
- * further reducing the number of table entries required.
- *
- * The driver reverts to PIO mode for individual requests that exceed
- * this limit (possible with 512 byte blocksizes, eg. MSDOS f/s), so handling
- * 100% of all crazy scenarios here is not necessary.
- *
- * As it turns out though, we must allocate a full 4KB page for this,
- * so the two PRD tables (ide0 & ide1) will each get half of that,
- * allowing each to have about 256 entries (8 bytes each) from this.
- */
-#define PRD_BYTES 8
-#define PRD_ENTRIES 256
-
-/*
- * Some more useful definitions
- */
-#define PARTN_BITS 6 /* number of minor dev bits for partitions */
-#define MAX_DRIVES 2 /* per interface; 2 assumed by lots of code */
-#define SECTOR_SIZE 512
-
-/*
- * Timeouts for various operations:
- */
-enum {
- /* spec allows up to 20ms, but CF cards and SSD drives need more */
- WAIT_DRQ = 1 * HZ, /* 1s */
- /* some laptops are very slow */
- WAIT_READY = 5 * HZ, /* 5s */
- /* should be less than 3ms (?), if all ATAPI CD is closed at boot */
- WAIT_PIDENTIFY = 10 * HZ, /* 10s */
- /* worst case when spinning up */
- WAIT_WORSTCASE = 30 * HZ, /* 30s */
- /* maximum wait for an IRQ to happen */
- WAIT_CMD = 10 * HZ, /* 10s */
- /* Some drives require a longer IRQ timeout. */
- WAIT_FLOPPY_CMD = 50 * HZ, /* 50s */
- /*
- * Some drives (for example, Seagate STT3401A Travan) require a very
- * long timeout, because they don't return an interrupt or clear their
- * BSY bit until after the command completes (even retension commands).
- */
- WAIT_TAPE_CMD = 900 * HZ, /* 900s */
- /* minimum sleep time */
- WAIT_MIN_SLEEP = HZ / 50, /* 20ms */
-};
-
-/*
- * Op codes for special requests to be handled by ide_special_rq().
- * Values should be in the range of 0x20 to 0x3f.
- */
-#define REQ_DRIVE_RESET 0x20
-#define REQ_DEVSET_EXEC 0x21
-#define REQ_PARK_HEADS 0x22
-#define REQ_UNPARK_HEADS 0x23
-
-/*
- * hwif_chipset_t is used to keep track of the specific hardware
- * chipset used by each IDE interface, if known.
- */
-enum { ide_unknown, ide_generic, ide_pci,
- ide_cmd640, ide_dtc2278, ide_ali14xx,
- ide_qd65xx, ide_umc8672, ide_ht6560b,
- ide_4drives, ide_pmac, ide_acorn,
- ide_au1xxx, ide_palm3710
-};
-
-typedef u8 hwif_chipset_t;
-
-/*
- * Structure to hold all information about the location of this port
- */
-struct ide_hw {
- union {
- struct ide_io_ports io_ports;
- unsigned long io_ports_array[IDE_NR_PORTS];
- };
-
- int irq; /* our irq number */
- struct device *dev, *parent;
- unsigned long config;
-};
-
-static inline void ide_std_init_ports(struct ide_hw *hw,
- unsigned long io_addr,
- unsigned long ctl_addr)
-{
- unsigned int i;
-
- for (i = 0; i <= 7; i++)
- hw->io_ports_array[i] = io_addr++;
-
- hw->io_ports.ctl_addr = ctl_addr;
-}
-
-#define MAX_HWIFS 10
-
-/*
- * Now for the data we need to maintain per-drive: ide_drive_t
- */
-
-#define ide_scsi 0x21
-#define ide_disk 0x20
-#define ide_optical 0x7
-#define ide_cdrom 0x5
-#define ide_tape 0x1
-#define ide_floppy 0x0
-
-/*
- * Special Driver Flags
- */
-enum {
- IDE_SFLAG_SET_GEOMETRY = (1 << 0),
- IDE_SFLAG_RECALIBRATE = (1 << 1),
- IDE_SFLAG_SET_MULTMODE = (1 << 2),
-};
-
-/*
- * Status returned from various ide_ functions
- */
-typedef enum {
- ide_stopped, /* no drive operation was started */
- ide_started, /* a drive operation was started, handler was set */
-} ide_startstop_t;
-
-enum {
- IDE_VALID_ERROR = (1 << 1),
- IDE_VALID_FEATURE = IDE_VALID_ERROR,
- IDE_VALID_NSECT = (1 << 2),
- IDE_VALID_LBAL = (1 << 3),
- IDE_VALID_LBAM = (1 << 4),
- IDE_VALID_LBAH = (1 << 5),
- IDE_VALID_DEVICE = (1 << 6),
- IDE_VALID_LBA = IDE_VALID_LBAL |
- IDE_VALID_LBAM |
- IDE_VALID_LBAH,
- IDE_VALID_OUT_TF = IDE_VALID_FEATURE |
- IDE_VALID_NSECT |
- IDE_VALID_LBA,
- IDE_VALID_IN_TF = IDE_VALID_NSECT |
- IDE_VALID_LBA,
- IDE_VALID_OUT_HOB = IDE_VALID_OUT_TF,
- IDE_VALID_IN_HOB = IDE_VALID_ERROR |
- IDE_VALID_NSECT |
- IDE_VALID_LBA,
-};
-
-enum {
- IDE_TFLAG_LBA48 = (1 << 0),
- IDE_TFLAG_WRITE = (1 << 1),
- IDE_TFLAG_CUSTOM_HANDLER = (1 << 2),
- IDE_TFLAG_DMA_PIO_FALLBACK = (1 << 3),
- /* force 16-bit I/O operations */
- IDE_TFLAG_IO_16BIT = (1 << 4),
- /* struct ide_cmd was allocated using kmalloc() */
- IDE_TFLAG_DYN = (1 << 5),
- IDE_TFLAG_FS = (1 << 6),
- IDE_TFLAG_MULTI_PIO = (1 << 7),
- IDE_TFLAG_SET_XFER = (1 << 8),
-};
-
-enum {
- IDE_FTFLAG_FLAGGED = (1 << 0),
- IDE_FTFLAG_SET_IN_FLAGS = (1 << 1),
- IDE_FTFLAG_OUT_DATA = (1 << 2),
- IDE_FTFLAG_IN_DATA = (1 << 3),
-};
-
-struct ide_taskfile {
- u8 data; /* 0: data byte (for TASKFILE ioctl) */
- union { /* 1: */
- u8 error; /* read: error */
- u8 feature; /* write: feature */
- };
- u8 nsect; /* 2: number of sectors */
- u8 lbal; /* 3: LBA low */
- u8 lbam; /* 4: LBA mid */
- u8 lbah; /* 5: LBA high */
- u8 device; /* 6: device select */
- union { /* 7: */
- u8 status; /* read: status */
- u8 command; /* write: command */
- };
-};
-
-struct ide_cmd {
- struct ide_taskfile tf;
- struct ide_taskfile hob;
- struct {
- struct {
- u8 tf;
- u8 hob;
- } out, in;
- } valid;
-
- u16 tf_flags;
- u8 ftf_flags; /* for TASKFILE ioctl */
- int protocol;
-
- int sg_nents; /* number of sg entries */
- int orig_sg_nents;
- int sg_dma_direction; /* DMA transfer direction */
-
- unsigned int nbytes;
- unsigned int nleft;
- unsigned int last_xfer_len;
-
- struct scatterlist *cursg;
- unsigned int cursg_ofs;
-
- struct request *rq; /* copy of request */
-};
-
-/* ATAPI packet command flags */
-enum {
- /* set when an error is considered normal - no retry (ide-tape) */
- PC_FLAG_ABORT = (1 << 0),
- PC_FLAG_SUPPRESS_ERROR = (1 << 1),
- PC_FLAG_WAIT_FOR_DSC = (1 << 2),
- PC_FLAG_DMA_OK = (1 << 3),
- PC_FLAG_DMA_IN_PROGRESS = (1 << 4),
- PC_FLAG_DMA_ERROR = (1 << 5),
- PC_FLAG_WRITING = (1 << 6),
-};
-
-#define ATAPI_WAIT_PC (60 * HZ)
-
-struct ide_atapi_pc {
- /* actual packet bytes */
- u8 c[12];
- /* incremented on each retry */
- int retries;
- int error;
-
- /* bytes to transfer */
- int req_xfer;
-
- /* the corresponding request */
- struct request *rq;
-
- unsigned long flags;
-
- /*
- * those are more or less driver-specific and some of them are subject
- * to change/removal later.
- */
- unsigned long timeout;
-};
-
-struct ide_devset;
-struct ide_driver;
-
-#ifdef CONFIG_BLK_DEV_IDEACPI
-struct ide_acpi_drive_link;
-struct ide_acpi_hwif_link;
-#endif
-
-struct ide_drive_s;
-
-struct ide_disk_ops {
- int (*check)(struct ide_drive_s *, const char *);
- int (*get_capacity)(struct ide_drive_s *);
- void (*unlock_native_capacity)(struct ide_drive_s *);
- void (*setup)(struct ide_drive_s *);
- void (*flush)(struct ide_drive_s *);
- int (*init_media)(struct ide_drive_s *, struct gendisk *);
- int (*set_doorlock)(struct ide_drive_s *, struct gendisk *,
- int);
- ide_startstop_t (*do_request)(struct ide_drive_s *, struct request *,
- sector_t);
- int (*ioctl)(struct ide_drive_s *, struct block_device *,
- fmode_t, unsigned int, unsigned long);
-};
-
-/* ATAPI device flags */
-enum {
- IDE_AFLAG_DRQ_INTERRUPT = (1 << 0),
-
- /* ide-cd */
- /* Drive cannot eject the disc. */
- IDE_AFLAG_NO_EJECT = (1 << 1),
- /* Drive is a pre ATAPI 1.2 drive. */
- IDE_AFLAG_PRE_ATAPI12 = (1 << 2),
- /* TOC addresses are in BCD. */
- IDE_AFLAG_TOCADDR_AS_BCD = (1 << 3),
- /* TOC track numbers are in BCD. */
- IDE_AFLAG_TOCTRACKS_AS_BCD = (1 << 4),
- /* Saved TOC information is current. */
- IDE_AFLAG_TOC_VALID = (1 << 6),
- /* We think that the drive door is locked. */
- IDE_AFLAG_DOOR_LOCKED = (1 << 7),
- /* SET_CD_SPEED command is unsupported. */
- IDE_AFLAG_NO_SPEED_SELECT = (1 << 8),
- IDE_AFLAG_VERTOS_300_SSD = (1 << 9),
- IDE_AFLAG_VERTOS_600_ESD = (1 << 10),
- IDE_AFLAG_SANYO_3CD = (1 << 11),
- IDE_AFLAG_FULL_CAPS_PAGE = (1 << 12),
- IDE_AFLAG_PLAY_AUDIO_OK = (1 << 13),
- IDE_AFLAG_LE_SPEED_FIELDS = (1 << 14),
-
- /* ide-floppy */
- /* Avoid commands not supported in Clik drive */
- IDE_AFLAG_CLIK_DRIVE = (1 << 15),
- /* Requires BH algorithm for packets */
- IDE_AFLAG_ZIP_DRIVE = (1 << 16),
- /* Supports format progress report */
- IDE_AFLAG_SRFP = (1 << 17),
-
- /* ide-tape */
- IDE_AFLAG_IGNORE_DSC = (1 << 18),
- /* 0 When the tape position is unknown */
- IDE_AFLAG_ADDRESS_VALID = (1 << 19),
- /* Device already opened */
- IDE_AFLAG_BUSY = (1 << 20),
- /* Attempt to auto-detect the current user block size */
- IDE_AFLAG_DETECT_BS = (1 << 21),
- /* Currently on a filemark */
- IDE_AFLAG_FILEMARK = (1 << 22),
- /* 0 = no tape is loaded, so we don't rewind after ejecting */
- IDE_AFLAG_MEDIUM_PRESENT = (1 << 23),
-
- IDE_AFLAG_NO_AUTOCLOSE = (1 << 24),
-};
-
-/* device flags */
-enum {
- /* restore settings after device reset */
- IDE_DFLAG_KEEP_SETTINGS = (1 << 0),
- /* device is using DMA for read/write */
- IDE_DFLAG_USING_DMA = (1 << 1),
- /* okay to unmask other IRQs */
- IDE_DFLAG_UNMASK = (1 << 2),
- /* don't attempt flushes */
- IDE_DFLAG_NOFLUSH = (1 << 3),
- /* DSC overlap */
- IDE_DFLAG_DSC_OVERLAP = (1 << 4),
- /* give potential excess bandwidth */
- IDE_DFLAG_NICE1 = (1 << 5),
- /* device is physically present */
- IDE_DFLAG_PRESENT = (1 << 6),
- /* disable Host Protected Area */
- IDE_DFLAG_NOHPA = (1 << 7),
- /* id read from device (synthetic if not set) */
- IDE_DFLAG_ID_READ = (1 << 8),
- IDE_DFLAG_NOPROBE = (1 << 9),
- /* need to do check_media_change() */
- IDE_DFLAG_REMOVABLE = (1 << 10),
- /* needed for removable devices */
- IDE_DFLAG_ATTACH = (1 << 11),
- IDE_DFLAG_FORCED_GEOM = (1 << 12),
- /* disallow setting unmask bit */
- IDE_DFLAG_NO_UNMASK = (1 << 13),
- /* disallow enabling 32-bit I/O */
- IDE_DFLAG_NO_IO_32BIT = (1 << 14),
- /* for removable only: door lock/unlock works */
- IDE_DFLAG_DOORLOCKING = (1 << 15),
- /* disallow DMA */
- IDE_DFLAG_NODMA = (1 << 16),
- /* powermanagement told us not to do anything, so sleep nicely */
- IDE_DFLAG_BLOCKED = (1 << 17),
- /* sleeping & sleep field valid */
- IDE_DFLAG_SLEEPING = (1 << 18),
- IDE_DFLAG_POST_RESET = (1 << 19),
- IDE_DFLAG_UDMA33_WARNED = (1 << 20),
- IDE_DFLAG_LBA48 = (1 << 21),
- /* status of write cache */
- IDE_DFLAG_WCACHE = (1 << 22),
- /* used for ignoring ATA_DF */
- IDE_DFLAG_NOWERR = (1 << 23),
- /* retrying in PIO */
- IDE_DFLAG_DMA_PIO_RETRY = (1 << 24),
- IDE_DFLAG_LBA = (1 << 25),
- /* don't unload heads */
- IDE_DFLAG_NO_UNLOAD = (1 << 26),
- /* heads unloaded, please don't reset port */
- IDE_DFLAG_PARKED = (1 << 27),
- IDE_DFLAG_MEDIA_CHANGED = (1 << 28),
- /* write protect */
- IDE_DFLAG_WP = (1 << 29),
- IDE_DFLAG_FORMAT_IN_PROGRESS = (1 << 30),
- IDE_DFLAG_NIEN_QUIRK = (1 << 31),
-};
-
-struct ide_drive_s {
- char name[4]; /* drive name, such as "hda" */
- char driver_req[10]; /* requests specific driver */
-
- struct request_queue *queue; /* request queue */
-
- struct request *rq; /* current request */
- void *driver_data; /* extra driver data */
- u16 *id; /* identification info */
-#ifdef CONFIG_IDE_PROC_FS
- struct proc_dir_entry *proc; /* /proc/ide/ directory entry */
- const struct ide_proc_devset *settings; /* /proc/ide/ drive settings */
-#endif
- struct hwif_s *hwif; /* actually (ide_hwif_t *) */
-
- const struct ide_disk_ops *disk_ops;
-
- unsigned long dev_flags;
-
- unsigned long sleep; /* sleep until this time */
- unsigned long timeout; /* max time to wait for irq */
-
- u8 special_flags; /* special action flags */
-
- u8 select; /* basic drive/head select reg value */
- u8 retry_pio; /* retrying dma capable host in pio */
- u8 waiting_for_dma; /* dma currently in progress */
- u8 dma; /* atapi dma flag */
-
- u8 init_speed; /* transfer rate set at boot */
- u8 current_speed; /* current transfer rate set */
- u8 desired_speed; /* desired transfer rate set */
- u8 pio_mode; /* for ->set_pio_mode _only_ */
- u8 dma_mode; /* for ->set_dma_mode _only_ */
- u8 dn; /* now wide spread use */
- u8 acoustic; /* acoustic management */
- u8 media; /* disk, cdrom, tape, floppy, ... */
- u8 ready_stat; /* min status value for drive ready */
- u8 mult_count; /* current multiple sector setting */
- u8 mult_req; /* requested multiple sector setting */
- u8 io_32bit; /* 0=16-bit, 1=32-bit, 2/3=32bit+sync */
- u8 bad_wstat; /* used for ignoring ATA_DF */
- u8 head; /* "real" number of heads */
- u8 sect; /* "real" sectors per track */
- u8 bios_head; /* BIOS/fdisk/LILO number of heads */
- u8 bios_sect; /* BIOS/fdisk/LILO sectors per track */
-
- /* delay this long before sending packet command */
- u8 pc_delay;
-
- unsigned int bios_cyl; /* BIOS/fdisk/LILO number of cyls */
- unsigned int cyl; /* "real" number of cyls */
- void *drive_data; /* used by set_pio_mode/dev_select() */
- unsigned int failures; /* current failure count */
- unsigned int max_failures; /* maximum allowed failure count */
- u64 probed_capacity;/* initial/native media capacity */
- u64 capacity64; /* total number of sectors */
-
- int lun; /* logical unit */
- int crc_count; /* crc counter to reduce drive speed */
-
- unsigned long debug_mask; /* debugging levels switch */
-
-#ifdef CONFIG_BLK_DEV_IDEACPI
- struct ide_acpi_drive_link *acpidata;
-#endif
- struct list_head list;
- struct device gendev;
- struct completion gendev_rel_comp; /* to deal with device release() */
-
- /* current packet command */
- struct ide_atapi_pc *pc;
-
- /* last failed packet command */
- struct ide_atapi_pc *failed_pc;
-
- /* callback for packet commands */
- int (*pc_callback)(struct ide_drive_s *, int);
-
- ide_startstop_t (*irq_handler)(struct ide_drive_s *);
-
- unsigned long atapi_flags;
-
- struct ide_atapi_pc request_sense_pc;
-
- /* current sense rq and buffer */
- bool sense_rq_armed;
- struct request *sense_rq;
- struct request_sense sense_data;
-};
-
-typedef struct ide_drive_s ide_drive_t;
-
-#define to_ide_device(dev) container_of(dev, ide_drive_t, gendev)
-
-#define to_ide_drv(obj, cont_type) \
- container_of(obj, struct cont_type, dev)
-
-#define ide_drv_g(disk, cont_type) \
- container_of((disk)->private_data, struct cont_type, driver)
-
-struct ide_port_info;
-
-struct ide_tp_ops {
- void (*exec_command)(struct hwif_s *, u8);
- u8 (*read_status)(struct hwif_s *);
- u8 (*read_altstatus)(struct hwif_s *);
- void (*write_devctl)(struct hwif_s *, u8);
-
- void (*dev_select)(ide_drive_t *);
- void (*tf_load)(ide_drive_t *, struct ide_taskfile *, u8);
- void (*tf_read)(ide_drive_t *, struct ide_taskfile *, u8);
-
- void (*input_data)(ide_drive_t *, struct ide_cmd *,
- void *, unsigned int);
- void (*output_data)(ide_drive_t *, struct ide_cmd *,
- void *, unsigned int);
-};
-
-extern const struct ide_tp_ops default_tp_ops;
-
-/**
- * struct ide_port_ops - IDE port operations
- *
- * @init_dev: host specific initialization of a device
- * @set_pio_mode: routine to program host for PIO mode
- * @set_dma_mode: routine to program host for DMA mode
- * @reset_poll: chipset polling based on hba specifics
- * @pre_reset: chipset specific changes to default for device-hba resets
- * @resetproc: routine to reset controller after a disk reset
- * @maskproc: special host masking for drive selection
- * @quirkproc: check host's drive quirk list
- * @clear_irq: clear IRQ
- *
- * @mdma_filter: filter MDMA modes
- * @udma_filter: filter UDMA modes
- *
- * @cable_detect: detect cable type
- */
-struct ide_port_ops {
- void (*init_dev)(ide_drive_t *);
- void (*set_pio_mode)(struct hwif_s *, ide_drive_t *);
- void (*set_dma_mode)(struct hwif_s *, ide_drive_t *);
- blk_status_t (*reset_poll)(ide_drive_t *);
- void (*pre_reset)(ide_drive_t *);
- void (*resetproc)(ide_drive_t *);
- void (*maskproc)(ide_drive_t *, int);
- void (*quirkproc)(ide_drive_t *);
- void (*clear_irq)(ide_drive_t *);
- int (*test_irq)(struct hwif_s *);
-
- u8 (*mdma_filter)(ide_drive_t *);
- u8 (*udma_filter)(ide_drive_t *);
-
- u8 (*cable_detect)(struct hwif_s *);
-};
-
-struct ide_dma_ops {
- void (*dma_host_set)(struct ide_drive_s *, int);
- int (*dma_setup)(struct ide_drive_s *, struct ide_cmd *);
- void (*dma_start)(struct ide_drive_s *);
- int (*dma_end)(struct ide_drive_s *);
- int (*dma_test_irq)(struct ide_drive_s *);
- void (*dma_lost_irq)(struct ide_drive_s *);
- /* below ones are optional */
- int (*dma_check)(struct ide_drive_s *, struct ide_cmd *);
- int (*dma_timer_expiry)(struct ide_drive_s *);
- void (*dma_clear)(struct ide_drive_s *);
- /*
- * The following method is optional and only required to be
- * implemented for the SFF-8038i compatible controllers.
- */
- u8 (*dma_sff_read_status)(struct hwif_s *);
-};
-
-enum {
- IDE_PFLAG_PROBING = (1 << 0),
-};
-
-struct ide_host;
-
-typedef struct hwif_s {
- struct hwif_s *mate; /* other hwif from same PCI chip */
- struct proc_dir_entry *proc; /* /proc/ide/ directory entry */
-
- struct ide_host *host;
-
- char name[6]; /* name of interface, eg. "ide0" */
-
- struct ide_io_ports io_ports;
-
- unsigned long sata_scr[SATA_NR_PORTS];
-
- ide_drive_t *devices[MAX_DRIVES + 1];
-
- unsigned long port_flags;
-
- u8 major; /* our major number */
- u8 index; /* 0 for ide0; 1 for ide1; ... */
- u8 channel; /* for dual-port chips: 0=primary, 1=secondary */
-
- u32 host_flags;
-
- u8 pio_mask;
-
- u8 ultra_mask;
- u8 mwdma_mask;
- u8 swdma_mask;
-
- u8 cbl; /* cable type */
-
- hwif_chipset_t chipset; /* sub-module for tuning.. */
-
- struct device *dev;
-
- void (*rw_disk)(ide_drive_t *, struct request *);
-
- const struct ide_tp_ops *tp_ops;
- const struct ide_port_ops *port_ops;
- const struct ide_dma_ops *dma_ops;
-
- /* dma physical region descriptor table (cpu view) */
- unsigned int *dmatable_cpu;
- /* dma physical region descriptor table (dma view) */
- dma_addr_t dmatable_dma;
-
- /* maximum number of PRD table entries */
- int prd_max_nents;
- /* PRD entry size in bytes */
- int prd_ent_size;
-
- /* Scatter-gather list used to build the above */
- struct scatterlist *sg_table;
- int sg_max_nents; /* Maximum number of entries in it */
-
- struct ide_cmd cmd; /* current command */
-
- int rqsize; /* max sectors per request */
- int irq; /* our irq number */
-
- unsigned long dma_base; /* base addr for dma ports */
-
- unsigned long config_data; /* for use by chipset-specific code */
- unsigned long select_data; /* for use by chipset-specific code */
-
- unsigned long extra_base; /* extra addr for dma ports */
- unsigned extra_ports; /* number of extra dma ports */
-
- unsigned present : 1; /* this interface exists */
- unsigned busy : 1; /* serializes devices on a port */
-
- struct device gendev;
- struct device *portdev;
-
- struct completion gendev_rel_comp; /* To deal with device release() */
-
- void *hwif_data; /* extra hwif data */
-
-#ifdef CONFIG_BLK_DEV_IDEACPI
- struct ide_acpi_hwif_link *acpidata;
-#endif
-
- /* IRQ handler, if active */
- ide_startstop_t (*handler)(ide_drive_t *);
-
- /* BOOL: polling active & poll_timeout field valid */
- unsigned int polling : 1;
-
- /* current drive */
- ide_drive_t *cur_dev;
-
- /* current request */
- struct request *rq;
-
- /* failsafe timer */
- struct timer_list timer;
- /* timeout value during long polls */
- unsigned long poll_timeout;
- /* queried upon timeouts */
- int (*expiry)(ide_drive_t *);
-
- int req_gen;
- int req_gen_timer;
-
- spinlock_t lock;
-} ____cacheline_internodealigned_in_smp ide_hwif_t;
-
-#define MAX_HOST_PORTS 4
-
-struct ide_host {
- ide_hwif_t *ports[MAX_HOST_PORTS + 1];
- unsigned int n_ports;
- struct device *dev[2];
-
- int (*init_chipset)(struct pci_dev *);
-
- void (*get_lock)(irq_handler_t, void *);
- void (*release_lock)(void);
-
- irq_handler_t irq_handler;
-
- unsigned long host_flags;
-
- int irq_flags;
-
- void *host_priv;
- ide_hwif_t *cur_port; /* for hosts requiring serialization */
-
- /* used for hosts requiring serialization */
- volatile unsigned long host_busy;
-};
-
-#define IDE_HOST_BUSY 0
-
-/*
- * internal ide interrupt handler type
- */
-typedef ide_startstop_t (ide_handler_t)(ide_drive_t *);
-typedef int (ide_expiry_t)(ide_drive_t *);
-
-/* used by ide-cd, ide-floppy, etc. */
-typedef void (xfer_func_t)(ide_drive_t *, struct ide_cmd *, void *, unsigned);
-
-extern struct mutex ide_setting_mtx;
-
-/*
- * configurable drive settings
- */
-
-#define DS_SYNC (1 << 0)
-
-struct ide_devset {
- int (*get)(ide_drive_t *);
- int (*set)(ide_drive_t *, int);
- unsigned int flags;
-};
-
-#define __DEVSET(_flags, _get, _set) { \
- .flags = _flags, \
- .get = _get, \
- .set = _set, \
-}
-
-#define ide_devset_get(name, field) \
-static int get_##name(ide_drive_t *drive) \
-{ \
- return drive->field; \
-}
-
-#define ide_devset_set(name, field) \
-static int set_##name(ide_drive_t *drive, int arg) \
-{ \
- drive->field = arg; \
- return 0; \
-}
-
-#define ide_devset_get_flag(name, flag) \
-static int get_##name(ide_drive_t *drive) \
-{ \
- return !!(drive->dev_flags & flag); \
-}
-
-#define ide_devset_set_flag(name, flag) \
-static int set_##name(ide_drive_t *drive, int arg) \
-{ \
- if (arg) \
- drive->dev_flags |= flag; \
- else \
- drive->dev_flags &= ~flag; \
- return 0; \
-}
-
-#define __IDE_DEVSET(_name, _flags, _get, _set) \
-const struct ide_devset ide_devset_##_name = \
- __DEVSET(_flags, _get, _set)
-
-#define IDE_DEVSET(_name, _flags, _get, _set) \
-static __IDE_DEVSET(_name, _flags, _get, _set)
-
-#define ide_devset_rw(_name, _func) \
-IDE_DEVSET(_name, 0, get_##_func, set_##_func)
-
-#define ide_devset_w(_name, _func) \
-IDE_DEVSET(_name, 0, NULL, set_##_func)
-
-#define ide_ext_devset_rw(_name, _func) \
-__IDE_DEVSET(_name, 0, get_##_func, set_##_func)
-
-#define ide_ext_devset_rw_sync(_name, _func) \
-__IDE_DEVSET(_name, DS_SYNC, get_##_func, set_##_func)
-
-#define ide_decl_devset(_name) \
-extern const struct ide_devset ide_devset_##_name
-
-ide_decl_devset(io_32bit);
-ide_decl_devset(keepsettings);
-ide_decl_devset(pio_mode);
-ide_decl_devset(unmaskirq);
-ide_decl_devset(using_dma);
-
-#ifdef CONFIG_IDE_PROC_FS
-/*
- * /proc/ide interface
- */
-
-#define ide_devset_rw_field(_name, _field) \
-ide_devset_get(_name, _field); \
-ide_devset_set(_name, _field); \
-IDE_DEVSET(_name, DS_SYNC, get_##_name, set_##_name)
-
-#define ide_devset_rw_flag(_name, _field) \
-ide_devset_get_flag(_name, _field); \
-ide_devset_set_flag(_name, _field); \
-IDE_DEVSET(_name, DS_SYNC, get_##_name, set_##_name)
-
-struct ide_proc_devset {
- const char *name;
- const struct ide_devset *setting;
- int min, max;
- int (*mulf)(ide_drive_t *);
- int (*divf)(ide_drive_t *);
-};
-
-#define __IDE_PROC_DEVSET(_name, _min, _max, _mulf, _divf) { \
- .name = __stringify(_name), \
- .setting = &ide_devset_##_name, \
- .min = _min, \
- .max = _max, \
- .mulf = _mulf, \
- .divf = _divf, \
-}
-
-#define IDE_PROC_DEVSET(_name, _min, _max) \
-__IDE_PROC_DEVSET(_name, _min, _max, NULL, NULL)
-
-typedef struct {
- const char *name;
- umode_t mode;
- const struct file_operations *proc_fops;
-} ide_proc_entry_t;
-
-void proc_ide_create(void);
-void proc_ide_destroy(void);
-void ide_proc_register_port(ide_hwif_t *);
-void ide_proc_port_register_devices(ide_hwif_t *);
-void ide_proc_unregister_device(ide_drive_t *);
-void ide_proc_unregister_port(ide_hwif_t *);
-void ide_proc_register_driver(ide_drive_t *, struct ide_driver *);
-void ide_proc_unregister_driver(ide_drive_t *, struct ide_driver *);
-
-extern const struct file_operations ide_capacity_proc_fops;
-extern const struct file_operations ide_geometry_proc_fops;
-#else
-static inline void proc_ide_create(void) { ; }
-static inline void proc_ide_destroy(void) { ; }
-static inline void ide_proc_register_port(ide_hwif_t *hwif) { ; }
-static inline void ide_proc_port_register_devices(ide_hwif_t *hwif) { ; }
-static inline void ide_proc_unregister_device(ide_drive_t *drive) { ; }
-static inline void ide_proc_unregister_port(ide_hwif_t *hwif) { ; }
-static inline void ide_proc_register_driver(ide_drive_t *drive,
- struct ide_driver *driver) { ; }
-static inline void ide_proc_unregister_driver(ide_drive_t *drive,
- struct ide_driver *driver) { ; }
-#endif
-
-enum {
- /* enter/exit functions */
- IDE_DBG_FUNC = (1 << 0),
- /* sense key/asc handling */
- IDE_DBG_SENSE = (1 << 1),
- /* packet commands handling */
- IDE_DBG_PC = (1 << 2),
- /* request handling */
- IDE_DBG_RQ = (1 << 3),
- /* driver probing/setup */
- IDE_DBG_PROBE = (1 << 4),
-};
-
-/* DRV_NAME has to be defined in the driver before using the macro below */
-#define __ide_debug_log(lvl, fmt, args...) \
-{ \
- if (unlikely(drive->debug_mask & lvl)) \
- printk(KERN_INFO DRV_NAME ": %s: " fmt "\n", \
- __func__, ## args); \
-}
-
-/*
- * Power Management state machine (rq->pm->pm_step).
- *
- * For each step, the core calls ide_start_power_step() first.
- * This can return:
- * - ide_stopped : In this case, the core calls us back again unless
- * step have been set to ide_power_state_completed.
- * - ide_started : In this case, the channel is left busy until an
- * async event (interrupt) occurs.
- * Typically, ide_start_power_step() will issue a taskfile request with
- * do_rw_taskfile().
- *
- * Upon reception of the interrupt, the core will call ide_complete_power_step()
- * with the error code if any. This routine should update the step value
- * and return. It should not start a new request. The core will call
- * ide_start_power_step() for the new step value, unless step have been
- * set to IDE_PM_COMPLETED.
- */
-enum {
- IDE_PM_START_SUSPEND,
- IDE_PM_FLUSH_CACHE = IDE_PM_START_SUSPEND,
- IDE_PM_STANDBY,
-
- IDE_PM_START_RESUME,
- IDE_PM_RESTORE_PIO = IDE_PM_START_RESUME,
- IDE_PM_IDLE,
- IDE_PM_RESTORE_DMA,
-
- IDE_PM_COMPLETED,
-};
-
-int generic_ide_suspend(struct device *, pm_message_t);
-int generic_ide_resume(struct device *);
-
-void ide_complete_power_step(ide_drive_t *, struct request *);
-ide_startstop_t ide_start_power_step(ide_drive_t *, struct request *);
-void ide_complete_pm_rq(ide_drive_t *, struct request *);
-void ide_check_pm_state(ide_drive_t *, struct request *);
-
-/*
- * Subdrivers support.
- *
- * The gendriver.owner field should be set to the module owner of this driver.
- * The gendriver.name field should be set to the name of this driver
- */
-struct ide_driver {
- const char *version;
- ide_startstop_t (*do_request)(ide_drive_t *, struct request *, sector_t);
- struct device_driver gen_driver;
- int (*probe)(ide_drive_t *);
- void (*remove)(ide_drive_t *);
- void (*resume)(ide_drive_t *);
- void (*shutdown)(ide_drive_t *);
-#ifdef CONFIG_IDE_PROC_FS
- ide_proc_entry_t * (*proc_entries)(ide_drive_t *);
- const struct ide_proc_devset * (*proc_devsets)(ide_drive_t *);
-#endif
-};
-
-#define to_ide_driver(drv) container_of(drv, struct ide_driver, gen_driver)
-
-int ide_device_get(ide_drive_t *);
-void ide_device_put(ide_drive_t *);
-
-struct ide_ioctl_devset {
- unsigned int get_ioctl;
- unsigned int set_ioctl;
- const struct ide_devset *setting;
-};
-
-int ide_setting_ioctl(ide_drive_t *, struct block_device *, unsigned int,
- unsigned long, const struct ide_ioctl_devset *);
-
-int generic_ide_ioctl(ide_drive_t *, struct block_device *, unsigned, unsigned long);
-
-extern int ide_vlb_clk;
-extern int ide_pci_clk;
-
-int ide_end_rq(ide_drive_t *, struct request *, blk_status_t, unsigned int);
-void ide_kill_rq(ide_drive_t *, struct request *);
-
-void __ide_set_handler(ide_drive_t *, ide_handler_t *, unsigned int);
-void ide_set_handler(ide_drive_t *, ide_handler_t *, unsigned int);
-
-void ide_execute_command(ide_drive_t *, struct ide_cmd *, ide_handler_t *,
- unsigned int);
-
-void ide_pad_transfer(ide_drive_t *, int, int);
-
-ide_startstop_t ide_error(ide_drive_t *, const char *, u8);
-
-void ide_fix_driveid(u16 *);
-
-extern void ide_fixstring(u8 *, const int, const int);
-
-int ide_busy_sleep(ide_drive_t *, unsigned long, int);
-
-int __ide_wait_stat(ide_drive_t *, u8, u8, unsigned long, u8 *);
-int ide_wait_stat(ide_startstop_t *, ide_drive_t *, u8, u8, unsigned long);
-
-ide_startstop_t ide_do_park_unpark(ide_drive_t *, struct request *);
-ide_startstop_t ide_do_devset(ide_drive_t *, struct request *);
-
-extern ide_startstop_t ide_do_reset (ide_drive_t *);
-
-extern int ide_devset_execute(ide_drive_t *drive,
- const struct ide_devset *setting, int arg);
-
-void ide_complete_cmd(ide_drive_t *, struct ide_cmd *, u8, u8);
-int ide_complete_rq(ide_drive_t *, blk_status_t, unsigned int);
-
-void ide_tf_readback(ide_drive_t *drive, struct ide_cmd *cmd);
-void ide_tf_dump(const char *, struct ide_cmd *);
-
-void ide_exec_command(ide_hwif_t *, u8);
-u8 ide_read_status(ide_hwif_t *);
-u8 ide_read_altstatus(ide_hwif_t *);
-void ide_write_devctl(ide_hwif_t *, u8);
-
-void ide_dev_select(ide_drive_t *);
-void ide_tf_load(ide_drive_t *, struct ide_taskfile *, u8);
-void ide_tf_read(ide_drive_t *, struct ide_taskfile *, u8);
-
-void ide_input_data(ide_drive_t *, struct ide_cmd *, void *, unsigned int);
-void ide_output_data(ide_drive_t *, struct ide_cmd *, void *, unsigned int);
-
-void SELECT_MASK(ide_drive_t *, int);
-
-u8 ide_read_error(ide_drive_t *);
-void ide_read_bcount_and_ireason(ide_drive_t *, u16 *, u8 *);
-
-int ide_check_ireason(ide_drive_t *, struct request *, int, int, int);
-
-int ide_check_atapi_device(ide_drive_t *, const char *);
-
-void ide_init_pc(struct ide_atapi_pc *);
-
-/* Disk head parking */
-extern wait_queue_head_t ide_park_wq;
-ssize_t ide_park_show(struct device *dev, struct device_attribute *attr,
- char *buf);
-ssize_t ide_park_store(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t len);
-
-/*
- * Special requests for ide-tape block device strategy routine.
- *
- * In order to service a character device command, we add special requests to
- * the tail of our block device request queue and wait for their completion.
- */
-enum {
- REQ_IDETAPE_PC1 = (1 << 0), /* packet command (first stage) */
- REQ_IDETAPE_PC2 = (1 << 1), /* packet command (second stage) */
- REQ_IDETAPE_READ = (1 << 2),
- REQ_IDETAPE_WRITE = (1 << 3),
-};
-
-int ide_queue_pc_tail(ide_drive_t *, struct gendisk *, struct ide_atapi_pc *,
- void *, unsigned int);
-
-int ide_do_test_unit_ready(ide_drive_t *, struct gendisk *);
-int ide_do_start_stop(ide_drive_t *, struct gendisk *, int);
-int ide_set_media_lock(ide_drive_t *, struct gendisk *, int);
-void ide_create_request_sense_cmd(ide_drive_t *, struct ide_atapi_pc *);
-void ide_retry_pc(ide_drive_t *drive);
-
-void ide_prep_sense(ide_drive_t *drive, struct request *rq);
-int ide_queue_sense_rq(ide_drive_t *drive, void *special);
-
-int ide_cd_expiry(ide_drive_t *);
-
-int ide_cd_get_xferlen(struct request *);
-
-ide_startstop_t ide_issue_pc(ide_drive_t *, struct ide_cmd *);
-
-ide_startstop_t do_rw_taskfile(ide_drive_t *, struct ide_cmd *);
-
-void ide_pio_bytes(ide_drive_t *, struct ide_cmd *, unsigned int, unsigned int);
-
-void ide_finish_cmd(ide_drive_t *, struct ide_cmd *, u8);
-
-int ide_raw_taskfile(ide_drive_t *, struct ide_cmd *, u8 *, u16);
-int ide_no_data_taskfile(ide_drive_t *, struct ide_cmd *);
-
-int ide_taskfile_ioctl(ide_drive_t *, unsigned long);
-
-int ide_dev_read_id(ide_drive_t *, u8, u16 *, int);
-
-extern int ide_driveid_update(ide_drive_t *);
-extern int ide_config_drive_speed(ide_drive_t *, u8);
-extern u8 eighty_ninty_three (ide_drive_t *);
-extern int taskfile_lib_get_identify(ide_drive_t *drive, u8 *);
-
-extern int ide_wait_not_busy(ide_hwif_t *hwif, unsigned long timeout);
-
-extern void ide_stall_queue(ide_drive_t *drive, unsigned long timeout);
-
-extern void ide_timer_expiry(unsigned long);
-extern irqreturn_t ide_intr(int irq, void *dev_id);
-extern void do_ide_request(struct request_queue *);
-extern void ide_requeue_and_plug(ide_drive_t *drive, struct request *rq);
-
-void ide_init_disk(struct gendisk *, ide_drive_t *);
-
-#ifdef CONFIG_IDEPCI_PCIBUS_ORDER
-extern int __ide_pci_register_driver(struct pci_driver *driver, struct module *owner, const char *mod_name);
-#define ide_pci_register_driver(d) __ide_pci_register_driver(d, THIS_MODULE, KBUILD_MODNAME)
-#else
-#define ide_pci_register_driver(d) pci_register_driver(d)
-#endif
-
-static inline int ide_pci_is_in_compatibility_mode(struct pci_dev *dev)
-{
- if ((dev->class >> 8) == PCI_CLASS_STORAGE_IDE && (dev->class & 5) != 5)
- return 1;
- return 0;
-}
-
-void ide_pci_setup_ports(struct pci_dev *, const struct ide_port_info *,
- struct ide_hw *, struct ide_hw **);
-void ide_setup_pci_noise(struct pci_dev *, const struct ide_port_info *);
-
-#ifdef CONFIG_BLK_DEV_IDEDMA_PCI
-int ide_pci_set_master(struct pci_dev *, const char *);
-unsigned long ide_pci_dma_base(ide_hwif_t *, const struct ide_port_info *);
-int ide_pci_check_simplex(ide_hwif_t *, const struct ide_port_info *);
-int ide_hwif_setup_dma(ide_hwif_t *, const struct ide_port_info *);
-#else
-static inline int ide_hwif_setup_dma(ide_hwif_t *hwif,
- const struct ide_port_info *d)
-{
- return -EINVAL;
-}
-#endif
-
-struct ide_pci_enablebit {
- u8 reg; /* byte pci reg holding the enable-bit */
- u8 mask; /* mask to isolate the enable-bit */
- u8 val; /* value of masked reg when "enabled" */
-};
-
-enum {
- /* Uses ISA control ports not PCI ones. */
- IDE_HFLAG_ISA_PORTS = (1 << 0),
- /* single port device */
- IDE_HFLAG_SINGLE = (1 << 1),
- /* don't use legacy PIO blacklist */
- IDE_HFLAG_PIO_NO_BLACKLIST = (1 << 2),
- /* set for the second port of QD65xx */
- IDE_HFLAG_QD_2ND_PORT = (1 << 3),
- /* use PIO8/9 for prefetch off/on */
- IDE_HFLAG_ABUSE_PREFETCH = (1 << 4),
- /* use PIO6/7 for fast-devsel off/on */
- IDE_HFLAG_ABUSE_FAST_DEVSEL = (1 << 5),
- /* use 100-102 and 200-202 PIO values to set DMA modes */
- IDE_HFLAG_ABUSE_DMA_MODES = (1 << 6),
- /*
- * keep DMA setting when programming PIO mode, may be used only
- * for hosts which have separate PIO and DMA timings (ie. PMAC)
- */
- IDE_HFLAG_SET_PIO_MODE_KEEP_DMA = (1 << 7),
- /* program host for the transfer mode after programming device */
- IDE_HFLAG_POST_SET_MODE = (1 << 8),
- /* don't program host/device for the transfer mode ("smart" hosts) */
- IDE_HFLAG_NO_SET_MODE = (1 << 9),
- /* trust BIOS for programming chipset/device for DMA */
- IDE_HFLAG_TRUST_BIOS_FOR_DMA = (1 << 10),
- /* host is CS5510/CS5520 */
- IDE_HFLAG_CS5520 = (1 << 11),
- /* ATAPI DMA is unsupported */
- IDE_HFLAG_NO_ATAPI_DMA = (1 << 12),
- /* set if host is a "non-bootable" controller */
- IDE_HFLAG_NON_BOOTABLE = (1 << 13),
- /* host doesn't support DMA */
- IDE_HFLAG_NO_DMA = (1 << 14),
- /* check if host is PCI IDE device before allowing DMA */
- IDE_HFLAG_NO_AUTODMA = (1 << 15),
- /* host uses MMIO */
- IDE_HFLAG_MMIO = (1 << 16),
- /* no LBA48 */
- IDE_HFLAG_NO_LBA48 = (1 << 17),
- /* no LBA48 DMA */
- IDE_HFLAG_NO_LBA48_DMA = (1 << 18),
- /* data FIFO is cleared by an error */
- IDE_HFLAG_ERROR_STOPS_FIFO = (1 << 19),
- /* serialize ports */
- IDE_HFLAG_SERIALIZE = (1 << 20),
- /* host is DTC2278 */
- IDE_HFLAG_DTC2278 = (1 << 21),
- /* 4 devices on a single set of I/O ports */
- IDE_HFLAG_4DRIVES = (1 << 22),
- /* host is TRM290 */
- IDE_HFLAG_TRM290 = (1 << 23),
- /* use 32-bit I/O ops */
- IDE_HFLAG_IO_32BIT = (1 << 24),
- /* unmask IRQs */
- IDE_HFLAG_UNMASK_IRQS = (1 << 25),
- IDE_HFLAG_BROKEN_ALTSTATUS = (1 << 26),
- /* serialize ports if DMA is possible (for sl82c105) */
- IDE_HFLAG_SERIALIZE_DMA = (1 << 27),
- /* force host out of "simplex" mode */
- IDE_HFLAG_CLEAR_SIMPLEX = (1 << 28),
- /* DSC overlap is unsupported */
- IDE_HFLAG_NO_DSC = (1 << 29),
- /* never use 32-bit I/O ops */
- IDE_HFLAG_NO_IO_32BIT = (1 << 30),
- /* never unmask IRQs */
- IDE_HFLAG_NO_UNMASK_IRQS = (1 << 31),
-};
-
-#ifdef CONFIG_BLK_DEV_OFFBOARD
-# define IDE_HFLAG_OFF_BOARD 0
-#else
-# define IDE_HFLAG_OFF_BOARD IDE_HFLAG_NON_BOOTABLE
-#endif
-
-struct ide_port_info {
- char *name;
-
- int (*init_chipset)(struct pci_dev *);
-
- void (*get_lock)(irq_handler_t, void *);
- void (*release_lock)(void);
-
- void (*init_iops)(ide_hwif_t *);
- void (*init_hwif)(ide_hwif_t *);
- int (*init_dma)(ide_hwif_t *,
- const struct ide_port_info *);
-
- const struct ide_tp_ops *tp_ops;
- const struct ide_port_ops *port_ops;
- const struct ide_dma_ops *dma_ops;
-
- struct ide_pci_enablebit enablebits[2];
-
- hwif_chipset_t chipset;
-
- u16 max_sectors; /* if < than the default one */
-
- u32 host_flags;
-
- int irq_flags;
-
- u8 pio_mask;
- u8 swdma_mask;
- u8 mwdma_mask;
- u8 udma_mask;
-};
-
-/*
- * State information carried for REQ_TYPE_ATA_PM_SUSPEND and REQ_TYPE_ATA_PM_RESUME
- * requests.
- */
-struct ide_pm_state {
- /* PM state machine step value, currently driver specific */
- int pm_step;
- /* requested PM state value (S1, S2, S3, S4, ...) */
- u32 pm_state;
- void* data; /* for driver use */
-};
-
-
-int ide_pci_init_one(struct pci_dev *, const struct ide_port_info *, void *);
-int ide_pci_init_two(struct pci_dev *, struct pci_dev *,
- const struct ide_port_info *, void *);
-void ide_pci_remove(struct pci_dev *);
-
-#ifdef CONFIG_PM
-int ide_pci_suspend(struct pci_dev *, pm_message_t);
-int ide_pci_resume(struct pci_dev *);
-#else
-#define ide_pci_suspend NULL
-#define ide_pci_resume NULL
-#endif
-
-void ide_map_sg(ide_drive_t *, struct ide_cmd *);
-void ide_init_sg_cmd(struct ide_cmd *, unsigned int);
-
-#define BAD_DMA_DRIVE 0
-#define GOOD_DMA_DRIVE 1
-
-struct drive_list_entry {
- const char *id_model;
- const char *id_firmware;
-};
-
-int ide_in_drive_list(u16 *, const struct drive_list_entry *);
-
-#ifdef CONFIG_BLK_DEV_IDEDMA
-int ide_dma_good_drive(ide_drive_t *);
-int __ide_dma_bad_drive(ide_drive_t *);
-
-u8 ide_find_dma_mode(ide_drive_t *, u8);
-
-static inline u8 ide_max_dma_mode(ide_drive_t *drive)
-{
- return ide_find_dma_mode(drive, XFER_UDMA_6);
-}
-
-void ide_dma_off_quietly(ide_drive_t *);
-void ide_dma_off(ide_drive_t *);
-void ide_dma_on(ide_drive_t *);
-int ide_set_dma(ide_drive_t *);
-void ide_check_dma_crc(ide_drive_t *);
-ide_startstop_t ide_dma_intr(ide_drive_t *);
-
-int ide_allocate_dma_engine(ide_hwif_t *);
-void ide_release_dma_engine(ide_hwif_t *);
-
-int ide_dma_prepare(ide_drive_t *, struct ide_cmd *);
-void ide_dma_unmap_sg(ide_drive_t *, struct ide_cmd *);
-
-#ifdef CONFIG_BLK_DEV_IDEDMA_SFF
-int config_drive_for_dma(ide_drive_t *);
-int ide_build_dmatable(ide_drive_t *, struct ide_cmd *);
-void ide_dma_host_set(ide_drive_t *, int);
-int ide_dma_setup(ide_drive_t *, struct ide_cmd *);
-extern void ide_dma_start(ide_drive_t *);
-int ide_dma_end(ide_drive_t *);
-int ide_dma_test_irq(ide_drive_t *);
-int ide_dma_sff_timer_expiry(ide_drive_t *);
-u8 ide_dma_sff_read_status(ide_hwif_t *);
-extern const struct ide_dma_ops sff_dma_ops;
-#else
-static inline int config_drive_for_dma(ide_drive_t *drive) { return 0; }
-#endif /* CONFIG_BLK_DEV_IDEDMA_SFF */
-
-void ide_dma_lost_irq(ide_drive_t *);
-ide_startstop_t ide_dma_timeout_retry(ide_drive_t *, int);
-
-#else
-static inline u8 ide_find_dma_mode(ide_drive_t *drive, u8 speed) { return 0; }
-static inline u8 ide_max_dma_mode(ide_drive_t *drive) { return 0; }
-static inline void ide_dma_off_quietly(ide_drive_t *drive) { ; }
-static inline void ide_dma_off(ide_drive_t *drive) { ; }
-static inline void ide_dma_on(ide_drive_t *drive) { ; }
-static inline void ide_dma_verbose(ide_drive_t *drive) { ; }
-static inline int ide_set_dma(ide_drive_t *drive) { return 1; }
-static inline void ide_check_dma_crc(ide_drive_t *drive) { ; }
-static inline ide_startstop_t ide_dma_intr(ide_drive_t *drive) { return ide_stopped; }
-static inline ide_startstop_t ide_dma_timeout_retry(ide_drive_t *drive, int error) { return ide_stopped; }
-static inline void ide_release_dma_engine(ide_hwif_t *hwif) { ; }
-static inline int ide_dma_prepare(ide_drive_t *drive,
- struct ide_cmd *cmd) { return 1; }
-static inline void ide_dma_unmap_sg(ide_drive_t *drive,
- struct ide_cmd *cmd) { ; }
-#endif /* CONFIG_BLK_DEV_IDEDMA */
-
-#ifdef CONFIG_BLK_DEV_IDEACPI
-int ide_acpi_init(void);
-bool ide_port_acpi(ide_hwif_t *hwif);
-extern int ide_acpi_exec_tfs(ide_drive_t *drive);
-extern void ide_acpi_get_timing(ide_hwif_t *hwif);
-extern void ide_acpi_push_timing(ide_hwif_t *hwif);
-void ide_acpi_init_port(ide_hwif_t *);
-void ide_acpi_port_init_devices(ide_hwif_t *);
-extern void ide_acpi_set_state(ide_hwif_t *hwif, int on);
-#else
-static inline int ide_acpi_init(void) { return 0; }
-static inline bool ide_port_acpi(ide_hwif_t *hwif) { return 0; }
-static inline int ide_acpi_exec_tfs(ide_drive_t *drive) { return 0; }
-static inline void ide_acpi_get_timing(ide_hwif_t *hwif) { ; }
-static inline void ide_acpi_push_timing(ide_hwif_t *hwif) { ; }
-static inline void ide_acpi_init_port(ide_hwif_t *hwif) { ; }
-static inline void ide_acpi_port_init_devices(ide_hwif_t *hwif) { ; }
-static inline void ide_acpi_set_state(ide_hwif_t *hwif, int on) {}
-#endif
-
-void ide_register_region(struct gendisk *);
-void ide_unregister_region(struct gendisk *);
-
-void ide_check_nien_quirk_list(ide_drive_t *);
-void ide_undecoded_slave(ide_drive_t *);
-
-void ide_port_apply_params(ide_hwif_t *);
-int ide_sysfs_register_port(ide_hwif_t *);
-
-struct ide_host *ide_host_alloc(const struct ide_port_info *, struct ide_hw **,
- unsigned int);
-void ide_host_free(struct ide_host *);
-int ide_host_register(struct ide_host *, const struct ide_port_info *,
- struct ide_hw **);
-int ide_host_add(const struct ide_port_info *, struct ide_hw **, unsigned int,
- struct ide_host **);
-void ide_host_remove(struct ide_host *);
-int ide_legacy_device_add(const struct ide_port_info *, unsigned long);
-void ide_port_unregister_devices(ide_hwif_t *);
-void ide_port_scan(ide_hwif_t *);
-
-static inline void *ide_get_hwifdata (ide_hwif_t * hwif)
-{
- return hwif->hwif_data;
-}
-
-static inline void ide_set_hwifdata (ide_hwif_t * hwif, void *data)
-{
- hwif->hwif_data = data;
-}
-
-extern void ide_toggle_bounce(ide_drive_t *drive, int on);
-
-u64 ide_get_lba_addr(struct ide_cmd *, int);
-u8 ide_dump_status(ide_drive_t *, const char *, u8);
-
-struct ide_timing {
- u8 mode;
- u8 setup; /* t1 */
- u16 act8b; /* t2 for 8-bit io */
- u16 rec8b; /* t2i for 8-bit io */
- u16 cyc8b; /* t0 for 8-bit io */
- u16 active; /* t2 or tD */
- u16 recover; /* t2i or tK */
- u16 cycle; /* t0 */
- u16 udma; /* t2CYCTYP/2 */
-};
-
-enum {
- IDE_TIMING_SETUP = (1 << 0),
- IDE_TIMING_ACT8B = (1 << 1),
- IDE_TIMING_REC8B = (1 << 2),
- IDE_TIMING_CYC8B = (1 << 3),
- IDE_TIMING_8BIT = IDE_TIMING_ACT8B | IDE_TIMING_REC8B |
- IDE_TIMING_CYC8B,
- IDE_TIMING_ACTIVE = (1 << 4),
- IDE_TIMING_RECOVER = (1 << 5),
- IDE_TIMING_CYCLE = (1 << 6),
- IDE_TIMING_UDMA = (1 << 7),
- IDE_TIMING_ALL = IDE_TIMING_SETUP | IDE_TIMING_8BIT |
- IDE_TIMING_ACTIVE | IDE_TIMING_RECOVER |
- IDE_TIMING_CYCLE | IDE_TIMING_UDMA,
-};
-
-struct ide_timing *ide_timing_find_mode(u8);
-u16 ide_pio_cycle_time(ide_drive_t *, u8);
-void ide_timing_merge(struct ide_timing *, struct ide_timing *,
- struct ide_timing *, unsigned int);
-int ide_timing_compute(ide_drive_t *, u8, struct ide_timing *, int, int);
-
-#ifdef CONFIG_IDE_XFER_MODE
-int ide_scan_pio_blacklist(char *);
-const char *ide_xfer_verbose(u8);
-int ide_pio_need_iordy(ide_drive_t *, const u8);
-int ide_set_pio_mode(ide_drive_t *, u8);
-int ide_set_dma_mode(ide_drive_t *, u8);
-void ide_set_pio(ide_drive_t *, u8);
-int ide_set_xfer_rate(ide_drive_t *, u8);
-#else
-static inline void ide_set_pio(ide_drive_t *drive, u8 pio) { ; }
-static inline int ide_set_xfer_rate(ide_drive_t *drive, u8 rate) { return -1; }
-#endif
-
-static inline void ide_set_max_pio(ide_drive_t *drive)
-{
- ide_set_pio(drive, 255);
-}
-
-char *ide_media_string(ide_drive_t *);
-
-extern const struct attribute_group *ide_dev_groups[];
-extern struct bus_type ide_bus_type;
-extern struct class *ide_port_class;
-
-static inline void ide_dump_identify(u8 *id)
-{
- print_hex_dump(KERN_INFO, "", DUMP_PREFIX_NONE, 16, 2, id, 512, 0);
-}
-
-static inline int hwif_to_node(ide_hwif_t *hwif)
-{
- return hwif->dev ? dev_to_node(hwif->dev) : -1;
-}
-
-static inline ide_drive_t *ide_get_pair_dev(ide_drive_t *drive)
-{
- ide_drive_t *peer = drive->hwif->devices[(drive->dn ^ 1) & 1];
-
- return (peer->dev_flags & IDE_DFLAG_PRESENT) ? peer : NULL;
-}
-
-static inline void *ide_get_drivedata(ide_drive_t *drive)
-{
- return drive->drive_data;
-}
-
-static inline void ide_set_drivedata(ide_drive_t *drive, void *data)
-{
- drive->drive_data = data;
-}
-
-#define ide_port_for_each_dev(i, dev, port) \
- for ((i) = 0; ((dev) = (port)->devices[i]) || (i) < MAX_DRIVES; (i)++)
-
-#define ide_port_for_each_present_dev(i, dev, port) \
- for ((i) = 0; ((dev) = (port)->devices[i]) || (i) < MAX_DRIVES; (i)++) \
- if ((dev)->dev_flags & IDE_DFLAG_PRESENT)
-
-#define ide_host_for_each_port(i, port, host) \
- for ((i) = 0; ((port) = (host)->ports[i]) || (i) < MAX_HOST_PORTS; (i)++)
-
-
-#endif /* _IDE_H */
diff --git a/include/linux/idle_inject.h b/include/linux/idle_inject.h
new file mode 100644
index 000000000000..a85d5dd40f72
--- /dev/null
+++ b/include/linux/idle_inject.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2018 Linaro Ltd
+ *
+ * Author: Daniel Lezcano <daniel.lezcano@linaro.org>
+ *
+ */
+#ifndef __IDLE_INJECT_H__
+#define __IDLE_INJECT_H__
+
+/* private idle injection device structure */
+struct idle_inject_device;
+
+struct idle_inject_device *idle_inject_register(struct cpumask *cpumask);
+
+struct idle_inject_device *idle_inject_register_full(struct cpumask *cpumask,
+ bool (*update)(void));
+
+void idle_inject_unregister(struct idle_inject_device *ii_dev);
+
+int idle_inject_start(struct idle_inject_device *ii_dev);
+
+void idle_inject_stop(struct idle_inject_device *ii_dev);
+
+void idle_inject_set_duration(struct idle_inject_device *ii_dev,
+ unsigned int run_duration_us,
+ unsigned int idle_duration_us);
+
+void idle_inject_get_duration(struct idle_inject_device *ii_dev,
+ unsigned int *run_duration_us,
+ unsigned int *idle_duration_us);
+
+void idle_inject_set_latency(struct idle_inject_device *ii_dev,
+ unsigned int latency_us);
+
+#endif /* __IDLE_INJECT_H__ */
diff --git a/include/linux/idr.h b/include/linux/idr.h
index bf70b3ef0a07..a0dce14090a9 100644
--- a/include/linux/idr.h
+++ b/include/linux/idr.h
@@ -1,9 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* include/linux/idr.h
*
* 2002-10-18 written by Jim Houston jim.houston@ccur.com
* Copyright (C) 2002 by Concurrent Computer Corporation
- * Distributed under the GNU GPL license version 2.
*
* Small id to pointer translation service avoiding fixed sized
* tables.
@@ -18,6 +18,7 @@
struct idr {
struct radix_tree_root idr_rt;
+ unsigned int idr_base;
unsigned int idr_next;
};
@@ -28,13 +29,31 @@ struct idr {
#define IDR_FREE 0
/* Set the IDR flag and the IDR_FREE tag */
-#define IDR_RT_MARKER ((__force gfp_t)(3 << __GFP_BITS_SHIFT))
+#define IDR_RT_MARKER (ROOT_IS_IDR | (__force gfp_t) \
+ (1 << (ROOT_TAG_SHIFT + IDR_FREE)))
-#define IDR_INIT \
-{ \
- .idr_rt = RADIX_TREE_INIT(IDR_RT_MARKER) \
+#define IDR_INIT_BASE(name, base) { \
+ .idr_rt = RADIX_TREE_INIT(name, IDR_RT_MARKER), \
+ .idr_base = (base), \
+ .idr_next = 0, \
}
-#define DEFINE_IDR(name) struct idr name = IDR_INIT
+
+/**
+ * IDR_INIT() - Initialise an IDR.
+ * @name: Name of IDR.
+ *
+ * A freshly-initialised IDR contains no IDs.
+ */
+#define IDR_INIT(name) IDR_INIT_BASE(name, 0)
+
+/**
+ * DEFINE_IDR() - Define a statically-allocated IDR.
+ * @name: Name of IDR.
+ *
+ * An IDR defined using this macro is ready for use with no additional
+ * initialisation required. It contains no IDs.
+ */
+#define DEFINE_IDR(name) struct idr name = IDR_INIT(name)
/**
* idr_get_cursor - Return the current position of the cyclic allocator
@@ -79,26 +98,65 @@ static inline void idr_set_cursor(struct idr *idr, unsigned int val)
* period).
*/
+#define idr_lock(idr) xa_lock(&(idr)->idr_rt)
+#define idr_unlock(idr) xa_unlock(&(idr)->idr_rt)
+#define idr_lock_bh(idr) xa_lock_bh(&(idr)->idr_rt)
+#define idr_unlock_bh(idr) xa_unlock_bh(&(idr)->idr_rt)
+#define idr_lock_irq(idr) xa_lock_irq(&(idr)->idr_rt)
+#define idr_unlock_irq(idr) xa_unlock_irq(&(idr)->idr_rt)
+#define idr_lock_irqsave(idr, flags) \
+ xa_lock_irqsave(&(idr)->idr_rt, flags)
+#define idr_unlock_irqrestore(idr, flags) \
+ xa_unlock_irqrestore(&(idr)->idr_rt, flags)
+
void idr_preload(gfp_t gfp_mask);
-int idr_alloc(struct idr *, void *entry, int start, int end, gfp_t);
-int idr_alloc_cyclic(struct idr *, void *entry, int start, int end, gfp_t);
+
+int idr_alloc(struct idr *, void *ptr, int start, int end, gfp_t);
+int __must_check idr_alloc_u32(struct idr *, void *ptr, u32 *id,
+ unsigned long max, gfp_t);
+int idr_alloc_cyclic(struct idr *, void *ptr, int start, int end, gfp_t);
+void *idr_remove(struct idr *, unsigned long id);
+void *idr_find(const struct idr *, unsigned long id);
int idr_for_each(const struct idr *,
int (*fn)(int id, void *p, void *data), void *data);
void *idr_get_next(struct idr *, int *nextid);
-void *idr_replace(struct idr *, void *, int id);
+void *idr_get_next_ul(struct idr *, unsigned long *nextid);
+void *idr_replace(struct idr *, void *, unsigned long id);
void idr_destroy(struct idr *);
-static inline void *idr_remove(struct idr *idr, int id)
+/**
+ * idr_init_base() - Initialise an IDR.
+ * @idr: IDR handle.
+ * @base: The base value for the IDR.
+ *
+ * This variation of idr_init() creates an IDR which will allocate IDs
+ * starting at %base.
+ */
+static inline void idr_init_base(struct idr *idr, int base)
{
- return radix_tree_delete_item(&idr->idr_rt, id, NULL);
+ INIT_RADIX_TREE(&idr->idr_rt, IDR_RT_MARKER);
+ idr->idr_base = base;
+ idr->idr_next = 0;
}
+/**
+ * idr_init() - Initialise an IDR.
+ * @idr: IDR handle.
+ *
+ * Initialise a dynamically allocated IDR. To initialise a
+ * statically allocated IDR, use DEFINE_IDR().
+ */
static inline void idr_init(struct idr *idr)
{
- INIT_RADIX_TREE(&idr->idr_rt, IDR_RT_MARKER);
- idr->idr_next = 0;
+ idr_init_base(idr, 0);
}
+/**
+ * idr_is_empty() - Are there any IDs allocated?
+ * @idr: IDR handle.
+ *
+ * Return: %true if any IDs have been allocated from this IDR.
+ */
static inline bool idr_is_empty(const struct idr *idr)
{
return radix_tree_empty(&idr->idr_rt) &&
@@ -113,56 +171,67 @@ static inline bool idr_is_empty(const struct idr *idr)
*/
static inline void idr_preload_end(void)
{
- preempt_enable();
+ local_unlock(&radix_tree_preloads.lock);
}
/**
- * idr_find - return pointer for given id
- * @idr: idr handle
- * @id: lookup key
- *
- * Return the pointer given the id it has been registered with. A %NULL
- * return indicates that @id is not valid or you passed %NULL in
- * idr_get_new().
+ * idr_for_each_entry() - Iterate over an IDR's elements of a given type.
+ * @idr: IDR handle.
+ * @entry: The type * to use as cursor
+ * @id: Entry ID.
*
- * This function can be called under rcu_read_lock(), given that the leaf
- * pointers lifetimes are correctly managed.
+ * @entry and @id do not need to be initialized before the loop, and
+ * after normal termination @entry is left with the value NULL. This
+ * is convenient for a "not found" value.
*/
-static inline void *idr_find(const struct idr *idr, int id)
-{
- return radix_tree_lookup(&idr->idr_rt, id);
-}
+#define idr_for_each_entry(idr, entry, id) \
+ for (id = 0; ((entry) = idr_get_next(idr, &(id))) != NULL; id += 1U)
/**
- * idr_for_each_entry - iterate over an idr's elements of a given type
- * @idr: idr handle
- * @entry: the type * to use as cursor
- * @id: id entry's key
+ * idr_for_each_entry_ul() - Iterate over an IDR's elements of a given type.
+ * @idr: IDR handle.
+ * @entry: The type * to use as cursor.
+ * @tmp: A temporary placeholder for ID.
+ * @id: Entry ID.
*
* @entry and @id do not need to be initialized before the loop, and
- * after normal terminatinon @entry is left with the value NULL. This
+ * after normal termination @entry is left with the value NULL. This
* is convenient for a "not found" value.
*/
-#define idr_for_each_entry(idr, entry, id) \
- for (id = 0; ((entry) = idr_get_next(idr, &(id))) != NULL; ++id)
+#define idr_for_each_entry_ul(idr, entry, tmp, id) \
+ for (tmp = 0, id = 0; \
+ tmp <= id && ((entry) = idr_get_next_ul(idr, &(id))) != NULL; \
+ tmp = id, ++id)
/**
- * idr_for_each_entry_continue - continue iteration over an idr's elements of a given type
- * @idr: idr handle
- * @entry: the type * to use as cursor
- * @id: id entry's key
+ * idr_for_each_entry_continue() - Continue iteration over an IDR's elements of a given type
+ * @idr: IDR handle.
+ * @entry: The type * to use as a cursor.
+ * @id: Entry ID.
*
- * Continue to iterate over list of given type, continuing after
- * the current position.
+ * Continue to iterate over entries, continuing after the current position.
*/
#define idr_for_each_entry_continue(idr, entry, id) \
for ((entry) = idr_get_next((idr), &(id)); \
entry; \
++id, (entry) = idr_get_next((idr), &(id)))
+/**
+ * idr_for_each_entry_continue_ul() - Continue iteration over an IDR's elements of a given type
+ * @idr: IDR handle.
+ * @entry: The type * to use as a cursor.
+ * @tmp: A temporary placeholder for ID.
+ * @id: Entry ID.
+ *
+ * Continue to iterate over entries, continuing after the current position.
+ */
+#define idr_for_each_entry_continue_ul(idr, entry, tmp, id) \
+ for (tmp = id; \
+ tmp <= id && ((entry) = idr_get_next_ul(idr, &(id))) != NULL; \
+ tmp = id, ++id)
+
/*
- * IDA - IDR based id allocator, use when translation from id to
- * pointer isn't necessary.
+ * IDA - ID Allocator, use when translation from id to pointer isn't necessary.
*/
#define IDA_CHUNK_SIZE 128 /* 128 bytes per chunk */
#define IDA_BITMAP_LONGS (IDA_CHUNK_SIZE / sizeof(long))
@@ -172,45 +241,89 @@ struct ida_bitmap {
unsigned long bitmap[IDA_BITMAP_LONGS];
};
-DECLARE_PER_CPU(struct ida_bitmap *, ida_bitmap);
-
struct ida {
- struct radix_tree_root ida_rt;
+ struct xarray xa;
};
-#define IDA_INIT { \
- .ida_rt = RADIX_TREE_INIT(IDR_RT_MARKER | GFP_NOWAIT), \
+#define IDA_INIT_FLAGS (XA_FLAGS_LOCK_IRQ | XA_FLAGS_ALLOC)
+
+#define IDA_INIT(name) { \
+ .xa = XARRAY_INIT(name, IDA_INIT_FLAGS) \
}
-#define DEFINE_IDA(name) struct ida name = IDA_INIT
+#define DEFINE_IDA(name) struct ida name = IDA_INIT(name)
-int ida_pre_get(struct ida *ida, gfp_t gfp_mask);
-int ida_get_new_above(struct ida *ida, int starting_id, int *p_id);
-void ida_remove(struct ida *ida, int id);
+int ida_alloc_range(struct ida *, unsigned int min, unsigned int max, gfp_t);
+void ida_free(struct ida *, unsigned int id);
void ida_destroy(struct ida *ida);
-int ida_simple_get(struct ida *ida, unsigned int start, unsigned int end,
- gfp_t gfp_mask);
-void ida_simple_remove(struct ida *ida, unsigned int id);
+/**
+ * ida_alloc() - Allocate an unused ID.
+ * @ida: IDA handle.
+ * @gfp: Memory allocation flags.
+ *
+ * Allocate an ID between 0 and %INT_MAX, inclusive.
+ *
+ * Context: Any context. It is safe to call this function without
+ * locking in your code.
+ * Return: The allocated ID, or %-ENOMEM if memory could not be allocated,
+ * or %-ENOSPC if there are no free IDs.
+ */
+static inline int ida_alloc(struct ida *ida, gfp_t gfp)
+{
+ return ida_alloc_range(ida, 0, ~0, gfp);
+}
-static inline void ida_init(struct ida *ida)
+/**
+ * ida_alloc_min() - Allocate an unused ID.
+ * @ida: IDA handle.
+ * @min: Lowest ID to allocate.
+ * @gfp: Memory allocation flags.
+ *
+ * Allocate an ID between @min and %INT_MAX, inclusive.
+ *
+ * Context: Any context. It is safe to call this function without
+ * locking in your code.
+ * Return: The allocated ID, or %-ENOMEM if memory could not be allocated,
+ * or %-ENOSPC if there are no free IDs.
+ */
+static inline int ida_alloc_min(struct ida *ida, unsigned int min, gfp_t gfp)
{
- INIT_RADIX_TREE(&ida->ida_rt, IDR_RT_MARKER | GFP_NOWAIT);
+ return ida_alloc_range(ida, min, ~0, gfp);
}
/**
- * ida_get_new - allocate new ID
- * @ida: idr handle
- * @p_id: pointer to the allocated handle
+ * ida_alloc_max() - Allocate an unused ID.
+ * @ida: IDA handle.
+ * @max: Highest ID to allocate.
+ * @gfp: Memory allocation flags.
+ *
+ * Allocate an ID between 0 and @max, inclusive.
*
- * Simple wrapper around ida_get_new_above() w/ @starting_id of zero.
+ * Context: Any context. It is safe to call this function without
+ * locking in your code.
+ * Return: The allocated ID, or %-ENOMEM if memory could not be allocated,
+ * or %-ENOSPC if there are no free IDs.
*/
-static inline int ida_get_new(struct ida *ida, int *p_id)
+static inline int ida_alloc_max(struct ida *ida, unsigned int max, gfp_t gfp)
{
- return ida_get_new_above(ida, 0, p_id);
+ return ida_alloc_range(ida, 0, max, gfp);
}
+static inline void ida_init(struct ida *ida)
+{
+ xa_init_flags(&ida->xa, IDA_INIT_FLAGS);
+}
+
+/*
+ * ida_simple_get() and ida_simple_remove() are deprecated. Use
+ * ida_alloc() and ida_free() instead respectively.
+ */
+#define ida_simple_get(ida, start, end, gfp) \
+ ida_alloc_range(ida, start, (end) - 1, gfp)
+#define ida_simple_remove(ida, id) ida_free(ida, id)
+
static inline bool ida_is_empty(const struct ida *ida)
{
- return radix_tree_empty(&ida->ida_rt);
+ return xa_empty(&ida->xa);
}
#endif /* __IDR_H__ */
diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h
index 55a604ad459f..c4cf296e7eaf 100644
--- a/include/linux/ieee80211.h
+++ b/include/linux/ieee80211.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* IEEE 802.11 defines
*
@@ -8,10 +9,7 @@
* Copyright (c) 2006, Michael Wu <flamingice@sourmilk.net>
* Copyright (c) 2013 - 2014 Intel Mobile Communications GmbH
* Copyright (c) 2016 - 2017 Intel Deutschland GmbH
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
+ * Copyright (c) 2018 - 2023 Intel Corporation
*/
#ifndef LINUX_IEEE80211_H
@@ -20,6 +18,7 @@
#include <linux/types.h>
#include <linux/if_ether.h>
#include <linux/etherdevice.h>
+#include <linux/bitfield.h>
#include <asm/byteorder.h>
#include <asm/unaligned.h>
@@ -77,6 +76,7 @@
#define IEEE80211_STYPE_ACTION 0x00D0
/* control */
+#define IEEE80211_STYPE_TRIGGER 0x0020
#define IEEE80211_STYPE_CTL_EXT 0x0060
#define IEEE80211_STYPE_BACK_REQ 0x0080
#define IEEE80211_STYPE_BACK 0x0090
@@ -107,6 +107,54 @@
/* extension, added by 802.11ad */
#define IEEE80211_STYPE_DMG_BEACON 0x0000
+#define IEEE80211_STYPE_S1G_BEACON 0x0010
+
+/* bits unique to S1G beacon */
+#define IEEE80211_S1G_BCN_NEXT_TBTT 0x100
+
+/* see 802.11ah-2016 9.9 NDP CMAC frames */
+#define IEEE80211_S1G_1MHZ_NDP_BITS 25
+#define IEEE80211_S1G_1MHZ_NDP_BYTES 4
+#define IEEE80211_S1G_2MHZ_NDP_BITS 37
+#define IEEE80211_S1G_2MHZ_NDP_BYTES 5
+
+#define IEEE80211_NDP_FTYPE_CTS 0
+#define IEEE80211_NDP_FTYPE_CF_END 0
+#define IEEE80211_NDP_FTYPE_PS_POLL 1
+#define IEEE80211_NDP_FTYPE_ACK 2
+#define IEEE80211_NDP_FTYPE_PS_POLL_ACK 3
+#define IEEE80211_NDP_FTYPE_BA 4
+#define IEEE80211_NDP_FTYPE_BF_REPORT_POLL 5
+#define IEEE80211_NDP_FTYPE_PAGING 6
+#define IEEE80211_NDP_FTYPE_PREQ 7
+
+#define SM64(f, v) ((((u64)v) << f##_S) & f)
+
+/* NDP CMAC frame fields */
+#define IEEE80211_NDP_FTYPE 0x0000000000000007
+#define IEEE80211_NDP_FTYPE_S 0x0000000000000000
+
+/* 1M Probe Request 11ah 9.9.3.1.1 */
+#define IEEE80211_NDP_1M_PREQ_ANO 0x0000000000000008
+#define IEEE80211_NDP_1M_PREQ_ANO_S 3
+#define IEEE80211_NDP_1M_PREQ_CSSID 0x00000000000FFFF0
+#define IEEE80211_NDP_1M_PREQ_CSSID_S 4
+#define IEEE80211_NDP_1M_PREQ_RTYPE 0x0000000000100000
+#define IEEE80211_NDP_1M_PREQ_RTYPE_S 20
+#define IEEE80211_NDP_1M_PREQ_RSV 0x0000000001E00000
+#define IEEE80211_NDP_1M_PREQ_RSV 0x0000000001E00000
+/* 2M Probe Request 11ah 9.9.3.1.2 */
+#define IEEE80211_NDP_2M_PREQ_ANO 0x0000000000000008
+#define IEEE80211_NDP_2M_PREQ_ANO_S 3
+#define IEEE80211_NDP_2M_PREQ_CSSID 0x0000000FFFFFFFF0
+#define IEEE80211_NDP_2M_PREQ_CSSID_S 4
+#define IEEE80211_NDP_2M_PREQ_RTYPE 0x0000001000000000
+#define IEEE80211_NDP_2M_PREQ_RTYPE_S 36
+
+#define IEEE80211_ANO_NETTYPE_WILD 15
+
+/* bits unique to S1G beacon */
+#define IEEE80211_S1G_BCN_NEXT_TBTT 0x100
/* control extension - for IEEE80211_FTYPE_CTL | IEEE80211_STYPE_CTL_EXT */
#define IEEE80211_CTL_EXT_POLL 0x2000
@@ -123,6 +171,21 @@
#define IEEE80211_MAX_SN IEEE80211_SN_MASK
#define IEEE80211_SN_MODULO (IEEE80211_MAX_SN + 1)
+
+/* PV1 Layout 11ah 9.8.3.1 */
+#define IEEE80211_PV1_FCTL_VERS 0x0003
+#define IEEE80211_PV1_FCTL_FTYPE 0x001c
+#define IEEE80211_PV1_FCTL_STYPE 0x00e0
+#define IEEE80211_PV1_FCTL_TODS 0x0100
+#define IEEE80211_PV1_FCTL_MOREFRAGS 0x0200
+#define IEEE80211_PV1_FCTL_PM 0x0400
+#define IEEE80211_PV1_FCTL_MOREDATA 0x0800
+#define IEEE80211_PV1_FCTL_PROTECTED 0x1000
+#define IEEE80211_PV1_FCTL_END_SP 0x2000
+#define IEEE80211_PV1_FCTL_RELAYED 0x4000
+#define IEEE80211_PV1_FCTL_ACK_POLICY 0x8000
+#define IEEE80211_PV1_FCTL_CTL_EXT 0x0f00
+
static inline bool ieee80211_sn_less(u16 sn1, u16 sn2)
{
return ((sn1 - sn2) & IEEE80211_SN_MASK) > (IEEE80211_SN_MODULO >> 1);
@@ -150,6 +213,7 @@ static inline u16 ieee80211_sn_sub(u16 sn1, u16 sn2)
#define IEEE80211_MAX_FRAG_THRESHOLD 2352
#define IEEE80211_MAX_RTS_THRESHOLD 2353
#define IEEE80211_MAX_AID 2007
+#define IEEE80211_MAX_AID_S1G 8191
#define IEEE80211_MAX_TIM_LEN 251
#define IEEE80211_MAX_MESH_PEERINGS 63
/* Maximum size for the MA-UNITDATA primitive, 802.11 standard section
@@ -232,12 +296,25 @@ static inline u16 ieee80211_sn_sub(u16 sn1, u16 sn2)
#define IEEE80211_HT_CTL_LEN 4
+/* trigger type within common_info of trigger frame */
+#define IEEE80211_TRIGGER_TYPE_MASK 0xf
+#define IEEE80211_TRIGGER_TYPE_BASIC 0x0
+#define IEEE80211_TRIGGER_TYPE_BFRP 0x1
+#define IEEE80211_TRIGGER_TYPE_MU_BAR 0x2
+#define IEEE80211_TRIGGER_TYPE_MU_RTS 0x3
+#define IEEE80211_TRIGGER_TYPE_BSRP 0x4
+#define IEEE80211_TRIGGER_TYPE_GCR_MU_BAR 0x5
+#define IEEE80211_TRIGGER_TYPE_BQRP 0x6
+#define IEEE80211_TRIGGER_TYPE_NFRP 0x7
+
struct ieee80211_hdr {
__le16 frame_control;
__le16 duration_id;
- u8 addr1[ETH_ALEN];
- u8 addr2[ETH_ALEN];
- u8 addr3[ETH_ALEN];
+ struct_group(addrs,
+ u8 addr1[ETH_ALEN];
+ u8 addr2[ETH_ALEN];
+ u8 addr3[ETH_ALEN];
+ );
__le16 seq_ctrl;
u8 addr4[ETH_ALEN];
} __packed __aligned(2);
@@ -261,6 +338,26 @@ struct ieee80211_qos_hdr {
__le16 qos_ctrl;
} __packed __aligned(2);
+struct ieee80211_qos_hdr_4addr {
+ __le16 frame_control;
+ __le16 duration_id;
+ u8 addr1[ETH_ALEN];
+ u8 addr2[ETH_ALEN];
+ u8 addr3[ETH_ALEN];
+ __le16 seq_ctrl;
+ u8 addr4[ETH_ALEN];
+ __le16 qos_ctrl;
+} __packed __aligned(2);
+
+struct ieee80211_trigger {
+ __le16 frame_control;
+ __le16 duration;
+ u8 ra[ETH_ALEN];
+ u8 ta[ETH_ALEN];
+ __le64 common_info;
+ u8 variable[];
+} __packed __aligned(2);
+
/**
* ieee80211_has_tods - check if IEEE80211_FCTL_TODS is set
* @fc: frame control bytes in little-endian byteorder
@@ -374,6 +471,17 @@ static inline bool ieee80211_is_data(__le16 fc)
}
/**
+ * ieee80211_is_ext - check if type is IEEE80211_FTYPE_EXT
+ * @fc: frame control bytes in little-endian byteorder
+ */
+static inline bool ieee80211_is_ext(__le16 fc)
+{
+ return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE)) ==
+ cpu_to_le16(IEEE80211_FTYPE_EXT);
+}
+
+
+/**
* ieee80211_is_data_qos - check if type is IEEE80211_FTYPE_DATA and IEEE80211_STYPE_QOS_DATA is set
* @fc: frame control bytes in little-endian byteorder
*/
@@ -472,6 +580,40 @@ static inline bool ieee80211_is_beacon(__le16 fc)
}
/**
+ * ieee80211_is_s1g_beacon - check if IEEE80211_FTYPE_EXT &&
+ * IEEE80211_STYPE_S1G_BEACON
+ * @fc: frame control bytes in little-endian byteorder
+ */
+static inline bool ieee80211_is_s1g_beacon(__le16 fc)
+{
+ return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE |
+ IEEE80211_FCTL_STYPE)) ==
+ cpu_to_le16(IEEE80211_FTYPE_EXT | IEEE80211_STYPE_S1G_BEACON);
+}
+
+/**
+ * ieee80211_next_tbtt_present - check if IEEE80211_FTYPE_EXT &&
+ * IEEE80211_STYPE_S1G_BEACON && IEEE80211_S1G_BCN_NEXT_TBTT
+ * @fc: frame control bytes in little-endian byteorder
+ */
+static inline bool ieee80211_next_tbtt_present(__le16 fc)
+{
+ return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) ==
+ cpu_to_le16(IEEE80211_FTYPE_EXT | IEEE80211_STYPE_S1G_BEACON) &&
+ fc & cpu_to_le16(IEEE80211_S1G_BCN_NEXT_TBTT);
+}
+
+/**
+ * ieee80211_is_s1g_short_beacon - check if next tbtt present bit is set. Only
+ * true for S1G beacons when they're short.
+ * @fc: frame control bytes in little-endian byteorder
+ */
+static inline bool ieee80211_is_s1g_short_beacon(__le16 fc)
+{
+ return ieee80211_is_s1g_beacon(fc) && ieee80211_next_tbtt_present(fc);
+}
+
+/**
* ieee80211_is_atim - check if IEEE80211_FTYPE_MGMT && IEEE80211_STYPE_ATIM
* @fc: frame control bytes in little-endian byteorder
*/
@@ -622,17 +764,22 @@ static inline bool ieee80211_is_qos_nullfunc(__le16 fc)
}
/**
- * ieee80211_is_bufferable_mmpdu - check if frame is bufferable MMPDU
+ * ieee80211_is_trigger - check if frame is trigger frame
* @fc: frame control field in little-endian byteorder
*/
-static inline bool ieee80211_is_bufferable_mmpdu(__le16 fc)
+static inline bool ieee80211_is_trigger(__le16 fc)
+{
+ return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) ==
+ cpu_to_le16(IEEE80211_FTYPE_CTL | IEEE80211_STYPE_TRIGGER);
+}
+
+/**
+ * ieee80211_is_any_nullfunc - check if frame is regular or QoS nullfunc frame
+ * @fc: frame control bytes in little-endian byteorder
+ */
+static inline bool ieee80211_is_any_nullfunc(__le16 fc)
{
- /* IEEE 802.11-2012, definition of "bufferable management frame";
- * note that this ignores the IBSS special case. */
- return ieee80211_is_mgmt(fc) &&
- (ieee80211_is_action(fc) ||
- ieee80211_is_disassoc(fc) ||
- ieee80211_is_deauth(fc));
+ return (ieee80211_is_nullfunc(fc) || ieee80211_is_qos_nullfunc(fc));
}
/**
@@ -709,7 +856,7 @@ struct ieee80211_msrment_ie {
u8 token;
u8 mode;
u8 type;
- u8 request[0];
+ u8 request[];
} __packed;
/**
@@ -811,6 +958,8 @@ enum mesh_config_capab_flags {
IEEE80211_MESHCONF_CAPAB_POWER_SAVE_LEVEL = 0x40,
};
+#define IEEE80211_MESHCONF_FORM_CONNECTED_TO_GATE 0x1
+
/**
* mesh channel switch parameters element's flag indicator
*
@@ -850,6 +999,7 @@ enum ieee80211_ht_chanwidth_values {
* @IEEE80211_OPMODE_NOTIF_CHANWIDTH_40MHZ: 40 MHz channel width
* @IEEE80211_OPMODE_NOTIF_CHANWIDTH_80MHZ: 80 MHz channel width
* @IEEE80211_OPMODE_NOTIF_CHANWIDTH_160MHZ: 160 MHz or 80+80 MHz channel width
+ * @IEEE80211_OPMODE_NOTIF_BW_160_80P80: 160 / 80+80 MHz indicator flag
* @IEEE80211_OPMODE_NOTIF_RX_NSS_MASK: number of spatial streams mask
* (the NSS value is the value of this field + 1)
* @IEEE80211_OPMODE_NOTIF_RX_NSS_SHIFT: number of spatial streams shift
@@ -857,16 +1007,36 @@ enum ieee80211_ht_chanwidth_values {
* using a beamforming steering matrix
*/
enum ieee80211_vht_opmode_bits {
- IEEE80211_OPMODE_NOTIF_CHANWIDTH_MASK = 3,
+ IEEE80211_OPMODE_NOTIF_CHANWIDTH_MASK = 0x03,
IEEE80211_OPMODE_NOTIF_CHANWIDTH_20MHZ = 0,
IEEE80211_OPMODE_NOTIF_CHANWIDTH_40MHZ = 1,
IEEE80211_OPMODE_NOTIF_CHANWIDTH_80MHZ = 2,
IEEE80211_OPMODE_NOTIF_CHANWIDTH_160MHZ = 3,
+ IEEE80211_OPMODE_NOTIF_BW_160_80P80 = 0x04,
IEEE80211_OPMODE_NOTIF_RX_NSS_MASK = 0x70,
IEEE80211_OPMODE_NOTIF_RX_NSS_SHIFT = 4,
IEEE80211_OPMODE_NOTIF_RX_NSS_TYPE_BF = 0x80,
};
+/**
+ * enum ieee80211_s1g_chanwidth
+ * These are defined in IEEE802.11-2016ah Table 10-20
+ * as BSS Channel Width
+ *
+ * @IEEE80211_S1G_CHANWIDTH_1MHZ: 1MHz operating channel
+ * @IEEE80211_S1G_CHANWIDTH_2MHZ: 2MHz operating channel
+ * @IEEE80211_S1G_CHANWIDTH_4MHZ: 4MHz operating channel
+ * @IEEE80211_S1G_CHANWIDTH_8MHZ: 8MHz operating channel
+ * @IEEE80211_S1G_CHANWIDTH_16MHZ: 16MHz operating channel
+ */
+enum ieee80211_s1g_chanwidth {
+ IEEE80211_S1G_CHANWIDTH_1MHZ = 0,
+ IEEE80211_S1G_CHANWIDTH_2MHZ = 1,
+ IEEE80211_S1G_CHANWIDTH_4MHZ = 3,
+ IEEE80211_S1G_CHANWIDTH_8MHZ = 7,
+ IEEE80211_S1G_CHANWIDTH_16MHZ = 15,
+};
+
#define WLAN_SA_QUERY_TR_ID_LEN 2
#define WLAN_MEMBERSHIP_LEN 8
#define WLAN_USER_POSITION_LEN 16
@@ -881,6 +1051,118 @@ struct ieee80211_tpc_report_ie {
u8 link_margin;
} __packed;
+#define IEEE80211_ADDBA_EXT_FRAG_LEVEL_MASK GENMASK(2, 1)
+#define IEEE80211_ADDBA_EXT_FRAG_LEVEL_SHIFT 1
+#define IEEE80211_ADDBA_EXT_NO_FRAG BIT(0)
+#define IEEE80211_ADDBA_EXT_BUF_SIZE_MASK GENMASK(7, 5)
+#define IEEE80211_ADDBA_EXT_BUF_SIZE_SHIFT 10
+
+struct ieee80211_addba_ext_ie {
+ u8 data;
+} __packed;
+
+/**
+ * struct ieee80211_s1g_bcn_compat_ie
+ *
+ * S1G Beacon Compatibility element
+ */
+struct ieee80211_s1g_bcn_compat_ie {
+ __le16 compat_info;
+ __le16 beacon_int;
+ __le32 tsf_completion;
+} __packed;
+
+/**
+ * struct ieee80211_s1g_oper_ie
+ *
+ * S1G Operation element
+ */
+struct ieee80211_s1g_oper_ie {
+ u8 ch_width;
+ u8 oper_class;
+ u8 primary_ch;
+ u8 oper_ch;
+ __le16 basic_mcs_nss;
+} __packed;
+
+/**
+ * struct ieee80211_aid_response_ie
+ *
+ * AID Response element
+ */
+struct ieee80211_aid_response_ie {
+ __le16 aid;
+ u8 switch_count;
+ __le16 response_int;
+} __packed;
+
+struct ieee80211_s1g_cap {
+ u8 capab_info[10];
+ u8 supp_mcs_nss[5];
+} __packed;
+
+struct ieee80211_ext {
+ __le16 frame_control;
+ __le16 duration;
+ union {
+ struct {
+ u8 sa[ETH_ALEN];
+ __le32 timestamp;
+ u8 change_seq;
+ u8 variable[0];
+ } __packed s1g_beacon;
+ struct {
+ u8 sa[ETH_ALEN];
+ __le32 timestamp;
+ u8 change_seq;
+ u8 next_tbtt[3];
+ u8 variable[0];
+ } __packed s1g_short_beacon;
+ } u;
+} __packed __aligned(2);
+
+#define IEEE80211_TWT_CONTROL_NDP BIT(0)
+#define IEEE80211_TWT_CONTROL_RESP_MODE BIT(1)
+#define IEEE80211_TWT_CONTROL_NEG_TYPE_BROADCAST BIT(3)
+#define IEEE80211_TWT_CONTROL_RX_DISABLED BIT(4)
+#define IEEE80211_TWT_CONTROL_WAKE_DUR_UNIT BIT(5)
+
+#define IEEE80211_TWT_REQTYPE_REQUEST BIT(0)
+#define IEEE80211_TWT_REQTYPE_SETUP_CMD GENMASK(3, 1)
+#define IEEE80211_TWT_REQTYPE_TRIGGER BIT(4)
+#define IEEE80211_TWT_REQTYPE_IMPLICIT BIT(5)
+#define IEEE80211_TWT_REQTYPE_FLOWTYPE BIT(6)
+#define IEEE80211_TWT_REQTYPE_FLOWID GENMASK(9, 7)
+#define IEEE80211_TWT_REQTYPE_WAKE_INT_EXP GENMASK(14, 10)
+#define IEEE80211_TWT_REQTYPE_PROTECTION BIT(15)
+
+enum ieee80211_twt_setup_cmd {
+ TWT_SETUP_CMD_REQUEST,
+ TWT_SETUP_CMD_SUGGEST,
+ TWT_SETUP_CMD_DEMAND,
+ TWT_SETUP_CMD_GROUPING,
+ TWT_SETUP_CMD_ACCEPT,
+ TWT_SETUP_CMD_ALTERNATE,
+ TWT_SETUP_CMD_DICTATE,
+ TWT_SETUP_CMD_REJECT,
+};
+
+struct ieee80211_twt_params {
+ __le16 req_type;
+ __le64 twt;
+ u8 min_twt_dur;
+ __le16 mantissa;
+ u8 channel;
+} __packed;
+
+struct ieee80211_twt_setup {
+ u8 dialog_token;
+ u8 element_id;
+ u8 length;
+ u8 control;
+ u8 params[];
+} __packed;
+
struct ieee80211_mgmt {
__le16 frame_control;
__le16 duration;
@@ -894,7 +1176,7 @@ struct ieee80211_mgmt {
__le16 auth_transaction;
__le16 status_code;
/* possibly followed by Challenge text */
- u8 variable[0];
+ u8 variable[];
} __packed auth;
struct {
__le16 reason_code;
@@ -903,21 +1185,26 @@ struct ieee80211_mgmt {
__le16 capab_info;
__le16 listen_interval;
/* followed by SSID and Supported rates */
- u8 variable[0];
+ u8 variable[];
} __packed assoc_req;
struct {
__le16 capab_info;
__le16 status_code;
__le16 aid;
/* followed by Supported rates */
- u8 variable[0];
+ u8 variable[];
} __packed assoc_resp, reassoc_resp;
struct {
__le16 capab_info;
+ __le16 status_code;
+ u8 variable[];
+ } __packed s1g_assoc_resp, s1g_reassoc_resp;
+ struct {
+ __le16 capab_info;
__le16 listen_interval;
u8 current_ap[ETH_ALEN];
/* followed by SSID and Supported rates */
- u8 variable[0];
+ u8 variable[];
} __packed reassoc_req;
struct {
__le16 reason_code;
@@ -928,11 +1215,11 @@ struct ieee80211_mgmt {
__le16 capab_info;
/* followed by some of SSID, Supported rates,
* FH Params, DS Params, CF Params, IBSS Params, TIM */
- u8 variable[0];
+ u8 variable[];
} __packed beacon;
struct {
/* only variable items: SSID, Supported rates */
- u8 variable[0];
+ DECLARE_FLEX_ARRAY(u8, variable);
} __packed probe_req;
struct {
__le64 timestamp;
@@ -940,7 +1227,7 @@ struct ieee80211_mgmt {
__le16 capab_info;
/* followed by some of SSID, Supported rates,
* FH Params, DS Params, CF Params, IBSS Params */
- u8 variable[0];
+ u8 variable[];
} __packed probe_resp;
struct {
u8 category;
@@ -949,16 +1236,16 @@ struct ieee80211_mgmt {
u8 action_code;
u8 dialog_token;
u8 status_code;
- u8 variable[0];
+ u8 variable[];
} __packed wme_action;
struct{
u8 action_code;
- u8 variable[0];
+ u8 variable[];
} __packed chan_switch;
struct{
u8 action_code;
struct ieee80211_ext_chansw_ie data;
- u8 variable[0];
+ u8 variable[];
} __packed ext_chan_switch;
struct{
u8 action_code;
@@ -973,6 +1260,8 @@ struct ieee80211_mgmt {
__le16 capab;
__le16 timeout;
__le16 start_seq_num;
+ /* followed by BA Extension */
+ u8 variable[];
} __packed addba_req;
struct{
u8 action_code;
@@ -988,11 +1277,11 @@ struct ieee80211_mgmt {
} __packed delba;
struct {
u8 action_code;
- u8 variable[0];
+ u8 variable[];
} __packed self_prot;
struct{
u8 action_code;
- u8 variable[0];
+ u8 variable[];
} __packed mesh_action;
struct {
u8 action;
@@ -1036,16 +1325,32 @@ struct ieee80211_mgmt {
u8 toa[6];
__le16 tod_error;
__le16 toa_error;
- u8 variable[0];
+ u8 variable[];
} __packed ftm;
+ struct {
+ u8 action_code;
+ u8 variable[];
+ } __packed s1g;
+ struct {
+ u8 action_code;
+ u8 dialog_token;
+ u8 follow_up;
+ u32 tod;
+ u32 toa;
+ u8 max_tod_error;
+ u8 max_toa_error;
+ } __packed wnm_timing_msr;
} u;
} __packed action;
+ DECLARE_FLEX_ARRAY(u8, body); /* Generic frame body */
} u;
} __packed __aligned(2);
/* Supported rates membership selectors */
#define BSS_MEMBERSHIP_SELECTOR_HT_PHY 127
#define BSS_MEMBERSHIP_SELECTOR_VHT_PHY 126
+#define BSS_MEMBERSHIP_SELECTOR_HE_PHY 122
+#define BSS_MEMBERSHIP_SELECTOR_SAE_H2E 123
/* mgmt header + 1 byte category code */
#define IEEE80211_MIN_ACTION_SIZE offsetof(struct ieee80211_mgmt, u.action.u)
@@ -1432,11 +1737,15 @@ struct ieee80211_ht_operation {
#define IEEE80211_DELBA_PARAM_INITIATOR_MASK 0x0800
/*
- * A-PMDU buffer sizes
- * According to IEEE802.11n spec size varies from 8K to 64K (in powers of 2)
+ * A-MPDU buffer sizes
+ * According to HT size varies from 8 to 64 frames
+ * HE adds the ability to have up to 256 frames.
+ * EHT adds the ability to have up to 1K frames.
*/
-#define IEEE80211_MIN_AMPDU_BUF 0x8
-#define IEEE80211_MAX_AMPDU_BUF 0x40
+#define IEEE80211_MIN_AMPDU_BUF 0x8
+#define IEEE80211_MAX_AMPDU_BUF_HT 0x40
+#define IEEE80211_MAX_AMPDU_BUF_HE 0x100
+#define IEEE80211_MAX_AMPDU_BUF_EHT 0x400
/* Spatial Multiplexing Power Save Modes (for capability) */
@@ -1457,13 +1766,16 @@ struct ieee80211_ht_operation {
* STA can receive. Rate expressed in units of 1 Mbps.
* If this field is 0 this value should not be used to
* consider the highest RX data rate supported.
- * The top 3 bits of this field are reserved.
+ * The top 3 bits of this field indicate the Maximum NSTS,total
+ * (a beamformee capability.)
* @tx_mcs_map: TX MCS map 2 bits for each stream, total 8 streams
* @tx_highest: Indicates highest long GI VHT PPDU data rate
* STA can transmit. Rate expressed in units of 1 Mbps.
* If this field is 0 this value should not be used to
* consider the highest TX data rate supported.
- * The top 3 bits of this field are reserved.
+ * The top 2 bits of this field are reserved, the
+ * 3rd bit from the top indiciates VHT Extended NSS BW
+ * Capability.
*/
struct ieee80211_vht_mcs_info {
__le16 rx_mcs_map;
@@ -1472,6 +1784,13 @@ struct ieee80211_vht_mcs_info {
__le16 tx_highest;
} __packed;
+/* for rx_highest */
+#define IEEE80211_VHT_MAX_NSTS_TOTAL_SHIFT 13
+#define IEEE80211_VHT_MAX_NSTS_TOTAL_MASK (7 << IEEE80211_VHT_MAX_NSTS_TOTAL_SHIFT)
+
+/* for tx_highest */
+#define IEEE80211_VHT_EXT_NSS_BW_CAPABLE (1 << 13)
+
/**
* enum ieee80211_vht_mcs_support - VHT MCS support definitions
* @IEEE80211_VHT_MCS_SUPPORT_0_7: MCSes 0-7 are supported for the
@@ -1538,6 +1857,243 @@ struct ieee80211_vht_operation {
__le16 basic_mcs_set;
} __packed;
+/**
+ * struct ieee80211_he_cap_elem - HE capabilities element
+ *
+ * This structure is the "HE capabilities element" fixed fields as
+ * described in P802.11ax_D4.0 section 9.4.2.242.2 and 9.4.2.242.3
+ */
+struct ieee80211_he_cap_elem {
+ u8 mac_cap_info[6];
+ u8 phy_cap_info[11];
+} __packed;
+
+#define IEEE80211_TX_RX_MCS_NSS_DESC_MAX_LEN 5
+
+/**
+ * enum ieee80211_he_mcs_support - HE MCS support definitions
+ * @IEEE80211_HE_MCS_SUPPORT_0_7: MCSes 0-7 are supported for the
+ * number of streams
+ * @IEEE80211_HE_MCS_SUPPORT_0_9: MCSes 0-9 are supported
+ * @IEEE80211_HE_MCS_SUPPORT_0_11: MCSes 0-11 are supported
+ * @IEEE80211_HE_MCS_NOT_SUPPORTED: This number of streams isn't supported
+ *
+ * These definitions are used in each 2-bit subfield of the rx_mcs_*
+ * and tx_mcs_* fields of &struct ieee80211_he_mcs_nss_supp, which are
+ * both split into 8 subfields by number of streams. These values indicate
+ * which MCSes are supported for the number of streams the value appears
+ * for.
+ */
+enum ieee80211_he_mcs_support {
+ IEEE80211_HE_MCS_SUPPORT_0_7 = 0,
+ IEEE80211_HE_MCS_SUPPORT_0_9 = 1,
+ IEEE80211_HE_MCS_SUPPORT_0_11 = 2,
+ IEEE80211_HE_MCS_NOT_SUPPORTED = 3,
+};
+
+/**
+ * struct ieee80211_he_mcs_nss_supp - HE Tx/Rx HE MCS NSS Support Field
+ *
+ * This structure holds the data required for the Tx/Rx HE MCS NSS Support Field
+ * described in P802.11ax_D2.0 section 9.4.2.237.4
+ *
+ * @rx_mcs_80: Rx MCS map 2 bits for each stream, total 8 streams, for channel
+ * widths less than 80MHz.
+ * @tx_mcs_80: Tx MCS map 2 bits for each stream, total 8 streams, for channel
+ * widths less than 80MHz.
+ * @rx_mcs_160: Rx MCS map 2 bits for each stream, total 8 streams, for channel
+ * width 160MHz.
+ * @tx_mcs_160: Tx MCS map 2 bits for each stream, total 8 streams, for channel
+ * width 160MHz.
+ * @rx_mcs_80p80: Rx MCS map 2 bits for each stream, total 8 streams, for
+ * channel width 80p80MHz.
+ * @tx_mcs_80p80: Tx MCS map 2 bits for each stream, total 8 streams, for
+ * channel width 80p80MHz.
+ */
+struct ieee80211_he_mcs_nss_supp {
+ __le16 rx_mcs_80;
+ __le16 tx_mcs_80;
+ __le16 rx_mcs_160;
+ __le16 tx_mcs_160;
+ __le16 rx_mcs_80p80;
+ __le16 tx_mcs_80p80;
+} __packed;
+
+/**
+ * struct ieee80211_he_operation - HE capabilities element
+ *
+ * This structure is the "HE operation element" fields as
+ * described in P802.11ax_D4.0 section 9.4.2.243
+ */
+struct ieee80211_he_operation {
+ __le32 he_oper_params;
+ __le16 he_mcs_nss_set;
+ /* Optional 0,1,3,4,5,7 or 8 bytes: depends on @he_oper_params */
+ u8 optional[];
+} __packed;
+
+/**
+ * struct ieee80211_he_spr - HE spatial reuse element
+ *
+ * This structure is the "HE spatial reuse element" element as
+ * described in P802.11ax_D4.0 section 9.4.2.241
+ */
+struct ieee80211_he_spr {
+ u8 he_sr_control;
+ /* Optional 0 to 19 bytes: depends on @he_sr_control */
+ u8 optional[];
+} __packed;
+
+/**
+ * struct ieee80211_he_mu_edca_param_ac_rec - MU AC Parameter Record field
+ *
+ * This structure is the "MU AC Parameter Record" fields as
+ * described in P802.11ax_D4.0 section 9.4.2.245
+ */
+struct ieee80211_he_mu_edca_param_ac_rec {
+ u8 aifsn;
+ u8 ecw_min_max;
+ u8 mu_edca_timer;
+} __packed;
+
+/**
+ * struct ieee80211_mu_edca_param_set - MU EDCA Parameter Set element
+ *
+ * This structure is the "MU EDCA Parameter Set element" fields as
+ * described in P802.11ax_D4.0 section 9.4.2.245
+ */
+struct ieee80211_mu_edca_param_set {
+ u8 mu_qos_info;
+ struct ieee80211_he_mu_edca_param_ac_rec ac_be;
+ struct ieee80211_he_mu_edca_param_ac_rec ac_bk;
+ struct ieee80211_he_mu_edca_param_ac_rec ac_vi;
+ struct ieee80211_he_mu_edca_param_ac_rec ac_vo;
+} __packed;
+
+#define IEEE80211_EHT_MCS_NSS_RX 0x0f
+#define IEEE80211_EHT_MCS_NSS_TX 0xf0
+
+/**
+ * struct ieee80211_eht_mcs_nss_supp_20mhz_only - EHT 20MHz only station max
+ * supported NSS for per MCS.
+ *
+ * For each field below, bits 0 - 3 indicate the maximal number of spatial
+ * streams for Rx, and bits 4 - 7 indicate the maximal number of spatial streams
+ * for Tx.
+ *
+ * @rx_tx_mcs7_max_nss: indicates the maximum number of spatial streams
+ * supported for reception and the maximum number of spatial streams
+ * supported for transmission for MCS 0 - 7.
+ * @rx_tx_mcs9_max_nss: indicates the maximum number of spatial streams
+ * supported for reception and the maximum number of spatial streams
+ * supported for transmission for MCS 8 - 9.
+ * @rx_tx_mcs11_max_nss: indicates the maximum number of spatial streams
+ * supported for reception and the maximum number of spatial streams
+ * supported for transmission for MCS 10 - 11.
+ * @rx_tx_mcs13_max_nss: indicates the maximum number of spatial streams
+ * supported for reception and the maximum number of spatial streams
+ * supported for transmission for MCS 12 - 13.
+ */
+struct ieee80211_eht_mcs_nss_supp_20mhz_only {
+ u8 rx_tx_mcs7_max_nss;
+ u8 rx_tx_mcs9_max_nss;
+ u8 rx_tx_mcs11_max_nss;
+ u8 rx_tx_mcs13_max_nss;
+};
+
+/**
+ * struct ieee80211_eht_mcs_nss_supp_bw - EHT max supported NSS per MCS (except
+ * 20MHz only stations).
+ *
+ * For each field below, bits 0 - 3 indicate the maximal number of spatial
+ * streams for Rx, and bits 4 - 7 indicate the maximal number of spatial streams
+ * for Tx.
+ *
+ * @rx_tx_mcs9_max_nss: indicates the maximum number of spatial streams
+ * supported for reception and the maximum number of spatial streams
+ * supported for transmission for MCS 0 - 9.
+ * @rx_tx_mcs11_max_nss: indicates the maximum number of spatial streams
+ * supported for reception and the maximum number of spatial streams
+ * supported for transmission for MCS 10 - 11.
+ * @rx_tx_mcs13_max_nss: indicates the maximum number of spatial streams
+ * supported for reception and the maximum number of spatial streams
+ * supported for transmission for MCS 12 - 13.
+ */
+struct ieee80211_eht_mcs_nss_supp_bw {
+ u8 rx_tx_mcs9_max_nss;
+ u8 rx_tx_mcs11_max_nss;
+ u8 rx_tx_mcs13_max_nss;
+};
+
+/**
+ * struct ieee80211_eht_cap_elem_fixed - EHT capabilities fixed data
+ *
+ * This structure is the "EHT Capabilities element" fixed fields as
+ * described in P802.11be_D2.0 section 9.4.2.313.
+ *
+ * @mac_cap_info: MAC capabilities, see IEEE80211_EHT_MAC_CAP*
+ * @phy_cap_info: PHY capabilities, see IEEE80211_EHT_PHY_CAP*
+ */
+struct ieee80211_eht_cap_elem_fixed {
+ u8 mac_cap_info[2];
+ u8 phy_cap_info[9];
+} __packed;
+
+/**
+ * struct ieee80211_eht_cap_elem - EHT capabilities element
+ * @fixed: fixed parts, see &ieee80211_eht_cap_elem_fixed
+ * @optional: optional parts
+ */
+struct ieee80211_eht_cap_elem {
+ struct ieee80211_eht_cap_elem_fixed fixed;
+
+ /*
+ * Followed by:
+ * Supported EHT-MCS And NSS Set field: 4, 3, 6 or 9 octets.
+ * EHT PPE Thresholds field: variable length.
+ */
+ u8 optional[];
+} __packed;
+
+#define IEEE80211_EHT_OPER_INFO_PRESENT 0x01
+#define IEEE80211_EHT_OPER_DISABLED_SUBCHANNEL_BITMAP_PRESENT 0x02
+#define IEEE80211_EHT_OPER_EHT_DEF_PE_DURATION 0x04
+#define IEEE80211_EHT_OPER_GROUP_ADDRESSED_BU_IND_LIMIT 0x08
+#define IEEE80211_EHT_OPER_GROUP_ADDRESSED_BU_IND_EXP_MASK 0x30
+
+/**
+ * struct ieee80211_eht_operation - eht operation element
+ *
+ * This structure is the "EHT Operation Element" fields as
+ * described in P802.11be_D2.0 section 9.4.2.311
+ *
+ * @params: EHT operation element parameters. See &IEEE80211_EHT_OPER_*
+ * @basic_mcs_nss: indicates the EHT-MCSs for each number of spatial streams in
+ * EHT PPDUs that are supported by all EHT STAs in the BSS in transmit and
+ * receive.
+ * @optional: optional parts
+ */
+struct ieee80211_eht_operation {
+ u8 params;
+ __le32 basic_mcs_nss;
+ u8 optional[];
+} __packed;
+
+/**
+ * struct ieee80211_eht_operation_info - eht operation information
+ *
+ * @control: EHT operation information control.
+ * @ccfs0: defines a channel center frequency for a 20, 40, 80, 160, or 320 MHz
+ * EHT BSS.
+ * @ccfs1: defines a channel center frequency for a 160 or 320 MHz EHT BSS.
+ * @optional: optional parts
+ */
+struct ieee80211_eht_operation_info {
+ u8 control;
+ u8 ccfs0;
+ u8 ccfs1;
+ u8 optional[];
+} __packed;
/* 802.11ac VHT Capabilities */
#define IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_3895 0x00000000
@@ -1547,6 +2103,7 @@ struct ieee80211_vht_operation {
#define IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ 0x00000004
#define IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ 0x00000008
#define IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK 0x0000000C
+#define IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_SHIFT 2
#define IEEE80211_VHT_CAP_RXLDPC 0x00000010
#define IEEE80211_VHT_CAP_SHORT_GI_80 0x00000020
#define IEEE80211_VHT_CAP_SHORT_GI_160 0x00000040
@@ -1556,6 +2113,7 @@ struct ieee80211_vht_operation {
#define IEEE80211_VHT_CAP_RXSTBC_3 0x00000300
#define IEEE80211_VHT_CAP_RXSTBC_4 0x00000400
#define IEEE80211_VHT_CAP_RXSTBC_MASK 0x00000700
+#define IEEE80211_VHT_CAP_RXSTBC_SHIFT 8
#define IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE 0x00000800
#define IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE 0x00001000
#define IEEE80211_VHT_CAP_BEAMFORMEE_STS_SHIFT 13
@@ -1575,6 +2133,875 @@ struct ieee80211_vht_operation {
#define IEEE80211_VHT_CAP_VHT_LINK_ADAPTATION_VHT_MRQ_MFB 0x0c000000
#define IEEE80211_VHT_CAP_RX_ANTENNA_PATTERN 0x10000000
#define IEEE80211_VHT_CAP_TX_ANTENNA_PATTERN 0x20000000
+#define IEEE80211_VHT_CAP_EXT_NSS_BW_SHIFT 30
+#define IEEE80211_VHT_CAP_EXT_NSS_BW_MASK 0xc0000000
+
+/**
+ * ieee80211_get_vht_max_nss - return max NSS for a given bandwidth/MCS
+ * @cap: VHT capabilities of the peer
+ * @bw: bandwidth to use
+ * @mcs: MCS index to use
+ * @ext_nss_bw_capable: indicates whether or not the local transmitter
+ * (rate scaling algorithm) can deal with the new logic
+ * (dot11VHTExtendedNSSBWCapable)
+ * @max_vht_nss: current maximum NSS as advertised by the STA in
+ * operating mode notification, can be 0 in which case the
+ * capability data will be used to derive this (from MCS support)
+ *
+ * Due to the VHT Extended NSS Bandwidth Support, the maximum NSS can
+ * vary for a given BW/MCS. This function parses the data.
+ *
+ * Note: This function is exported by cfg80211.
+ */
+int ieee80211_get_vht_max_nss(struct ieee80211_vht_cap *cap,
+ enum ieee80211_vht_chanwidth bw,
+ int mcs, bool ext_nss_bw_capable,
+ unsigned int max_vht_nss);
+
+/**
+ * enum ieee80211_ap_reg_power - regulatory power for a Access Point
+ *
+ * @IEEE80211_REG_UNSET_AP: Access Point has no regulatory power mode
+ * @IEEE80211_REG_LPI: Indoor Access Point
+ * @IEEE80211_REG_SP: Standard power Access Point
+ * @IEEE80211_REG_VLP: Very low power Access Point
+ * @IEEE80211_REG_AP_POWER_AFTER_LAST: internal
+ * @IEEE80211_REG_AP_POWER_MAX: maximum value
+ */
+enum ieee80211_ap_reg_power {
+ IEEE80211_REG_UNSET_AP,
+ IEEE80211_REG_LPI_AP,
+ IEEE80211_REG_SP_AP,
+ IEEE80211_REG_VLP_AP,
+ IEEE80211_REG_AP_POWER_AFTER_LAST,
+ IEEE80211_REG_AP_POWER_MAX =
+ IEEE80211_REG_AP_POWER_AFTER_LAST - 1,
+};
+
+/**
+ * enum ieee80211_client_reg_power - regulatory power for a client
+ *
+ * @IEEE80211_REG_UNSET_CLIENT: Client has no regulatory power mode
+ * @IEEE80211_REG_DEFAULT_CLIENT: Default Client
+ * @IEEE80211_REG_SUBORDINATE_CLIENT: Subordinate Client
+ * @IEEE80211_REG_CLIENT_POWER_AFTER_LAST: internal
+ * @IEEE80211_REG_CLIENT_POWER_MAX: maximum value
+ */
+enum ieee80211_client_reg_power {
+ IEEE80211_REG_UNSET_CLIENT,
+ IEEE80211_REG_DEFAULT_CLIENT,
+ IEEE80211_REG_SUBORDINATE_CLIENT,
+ IEEE80211_REG_CLIENT_POWER_AFTER_LAST,
+ IEEE80211_REG_CLIENT_POWER_MAX =
+ IEEE80211_REG_CLIENT_POWER_AFTER_LAST - 1,
+};
+
+/* 802.11ax HE MAC capabilities */
+#define IEEE80211_HE_MAC_CAP0_HTC_HE 0x01
+#define IEEE80211_HE_MAC_CAP0_TWT_REQ 0x02
+#define IEEE80211_HE_MAC_CAP0_TWT_RES 0x04
+#define IEEE80211_HE_MAC_CAP0_DYNAMIC_FRAG_NOT_SUPP 0x00
+#define IEEE80211_HE_MAC_CAP0_DYNAMIC_FRAG_LEVEL_1 0x08
+#define IEEE80211_HE_MAC_CAP0_DYNAMIC_FRAG_LEVEL_2 0x10
+#define IEEE80211_HE_MAC_CAP0_DYNAMIC_FRAG_LEVEL_3 0x18
+#define IEEE80211_HE_MAC_CAP0_DYNAMIC_FRAG_MASK 0x18
+#define IEEE80211_HE_MAC_CAP0_MAX_NUM_FRAG_MSDU_1 0x00
+#define IEEE80211_HE_MAC_CAP0_MAX_NUM_FRAG_MSDU_2 0x20
+#define IEEE80211_HE_MAC_CAP0_MAX_NUM_FRAG_MSDU_4 0x40
+#define IEEE80211_HE_MAC_CAP0_MAX_NUM_FRAG_MSDU_8 0x60
+#define IEEE80211_HE_MAC_CAP0_MAX_NUM_FRAG_MSDU_16 0x80
+#define IEEE80211_HE_MAC_CAP0_MAX_NUM_FRAG_MSDU_32 0xa0
+#define IEEE80211_HE_MAC_CAP0_MAX_NUM_FRAG_MSDU_64 0xc0
+#define IEEE80211_HE_MAC_CAP0_MAX_NUM_FRAG_MSDU_UNLIMITED 0xe0
+#define IEEE80211_HE_MAC_CAP0_MAX_NUM_FRAG_MSDU_MASK 0xe0
+
+#define IEEE80211_HE_MAC_CAP1_MIN_FRAG_SIZE_UNLIMITED 0x00
+#define IEEE80211_HE_MAC_CAP1_MIN_FRAG_SIZE_128 0x01
+#define IEEE80211_HE_MAC_CAP1_MIN_FRAG_SIZE_256 0x02
+#define IEEE80211_HE_MAC_CAP1_MIN_FRAG_SIZE_512 0x03
+#define IEEE80211_HE_MAC_CAP1_MIN_FRAG_SIZE_MASK 0x03
+#define IEEE80211_HE_MAC_CAP1_TF_MAC_PAD_DUR_0US 0x00
+#define IEEE80211_HE_MAC_CAP1_TF_MAC_PAD_DUR_8US 0x04
+#define IEEE80211_HE_MAC_CAP1_TF_MAC_PAD_DUR_16US 0x08
+#define IEEE80211_HE_MAC_CAP1_TF_MAC_PAD_DUR_MASK 0x0c
+#define IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_RX_QOS_1 0x00
+#define IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_RX_QOS_2 0x10
+#define IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_RX_QOS_3 0x20
+#define IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_RX_QOS_4 0x30
+#define IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_RX_QOS_5 0x40
+#define IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_RX_QOS_6 0x50
+#define IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_RX_QOS_7 0x60
+#define IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_RX_QOS_8 0x70
+#define IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_RX_QOS_MASK 0x70
+
+/* Link adaptation is split between byte HE_MAC_CAP1 and
+ * HE_MAC_CAP2. It should be set only if IEEE80211_HE_MAC_CAP0_HTC_HE
+ * in which case the following values apply:
+ * 0 = No feedback.
+ * 1 = reserved.
+ * 2 = Unsolicited feedback.
+ * 3 = both
+ */
+#define IEEE80211_HE_MAC_CAP1_LINK_ADAPTATION 0x80
+
+#define IEEE80211_HE_MAC_CAP2_LINK_ADAPTATION 0x01
+#define IEEE80211_HE_MAC_CAP2_ALL_ACK 0x02
+#define IEEE80211_HE_MAC_CAP2_TRS 0x04
+#define IEEE80211_HE_MAC_CAP2_BSR 0x08
+#define IEEE80211_HE_MAC_CAP2_BCAST_TWT 0x10
+#define IEEE80211_HE_MAC_CAP2_32BIT_BA_BITMAP 0x20
+#define IEEE80211_HE_MAC_CAP2_MU_CASCADING 0x40
+#define IEEE80211_HE_MAC_CAP2_ACK_EN 0x80
+
+#define IEEE80211_HE_MAC_CAP3_OMI_CONTROL 0x02
+#define IEEE80211_HE_MAC_CAP3_OFDMA_RA 0x04
+
+/* The maximum length of an A-MDPU is defined by the combination of the Maximum
+ * A-MDPU Length Exponent field in the HT capabilities, VHT capabilities and the
+ * same field in the HE capabilities.
+ */
+#define IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_EXT_0 0x00
+#define IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_EXT_1 0x08
+#define IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_EXT_2 0x10
+#define IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_EXT_3 0x18
+#define IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_MASK 0x18
+#define IEEE80211_HE_MAC_CAP3_AMSDU_FRAG 0x20
+#define IEEE80211_HE_MAC_CAP3_FLEX_TWT_SCHED 0x40
+#define IEEE80211_HE_MAC_CAP3_RX_CTRL_FRAME_TO_MULTIBSS 0x80
+
+#define IEEE80211_HE_MAC_CAP4_BSRP_BQRP_A_MPDU_AGG 0x01
+#define IEEE80211_HE_MAC_CAP4_QTP 0x02
+#define IEEE80211_HE_MAC_CAP4_BQR 0x04
+#define IEEE80211_HE_MAC_CAP4_PSR_RESP 0x08
+#define IEEE80211_HE_MAC_CAP4_NDP_FB_REP 0x10
+#define IEEE80211_HE_MAC_CAP4_OPS 0x20
+#define IEEE80211_HE_MAC_CAP4_AMSDU_IN_AMPDU 0x40
+/* Multi TID agg TX is split between byte #4 and #5
+ * The value is a combination of B39,B40,B41
+ */
+#define IEEE80211_HE_MAC_CAP4_MULTI_TID_AGG_TX_QOS_B39 0x80
+
+#define IEEE80211_HE_MAC_CAP5_MULTI_TID_AGG_TX_QOS_B40 0x01
+#define IEEE80211_HE_MAC_CAP5_MULTI_TID_AGG_TX_QOS_B41 0x02
+#define IEEE80211_HE_MAC_CAP5_SUBCHAN_SELECTIVE_TRANSMISSION 0x04
+#define IEEE80211_HE_MAC_CAP5_UL_2x996_TONE_RU 0x08
+#define IEEE80211_HE_MAC_CAP5_OM_CTRL_UL_MU_DATA_DIS_RX 0x10
+#define IEEE80211_HE_MAC_CAP5_HE_DYNAMIC_SM_PS 0x20
+#define IEEE80211_HE_MAC_CAP5_PUNCTURED_SOUNDING 0x40
+#define IEEE80211_HE_MAC_CAP5_HT_VHT_TRIG_FRAME_RX 0x80
+
+#define IEEE80211_HE_VHT_MAX_AMPDU_FACTOR 20
+#define IEEE80211_HE_HT_MAX_AMPDU_FACTOR 16
+#define IEEE80211_HE_6GHZ_MAX_AMPDU_FACTOR 13
+
+/* 802.11ax HE PHY capabilities */
+#define IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_IN_2G 0x02
+#define IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_80MHZ_IN_5G 0x04
+#define IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G 0x08
+#define IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G 0x10
+#define IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_MASK_ALL 0x1e
+
+#define IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_RU_MAPPING_IN_2G 0x20
+#define IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_RU_MAPPING_IN_5G 0x40
+#define IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_MASK 0xfe
+
+#define IEEE80211_HE_PHY_CAP1_PREAMBLE_PUNC_RX_80MHZ_ONLY_SECOND_20MHZ 0x01
+#define IEEE80211_HE_PHY_CAP1_PREAMBLE_PUNC_RX_80MHZ_ONLY_SECOND_40MHZ 0x02
+#define IEEE80211_HE_PHY_CAP1_PREAMBLE_PUNC_RX_160MHZ_ONLY_SECOND_20MHZ 0x04
+#define IEEE80211_HE_PHY_CAP1_PREAMBLE_PUNC_RX_160MHZ_ONLY_SECOND_40MHZ 0x08
+#define IEEE80211_HE_PHY_CAP1_PREAMBLE_PUNC_RX_MASK 0x0f
+#define IEEE80211_HE_PHY_CAP1_DEVICE_CLASS_A 0x10
+#define IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD 0x20
+#define IEEE80211_HE_PHY_CAP1_HE_LTF_AND_GI_FOR_HE_PPDUS_0_8US 0x40
+/* Midamble RX/TX Max NSTS is split between byte #2 and byte #3 */
+#define IEEE80211_HE_PHY_CAP1_MIDAMBLE_RX_TX_MAX_NSTS 0x80
+
+#define IEEE80211_HE_PHY_CAP2_MIDAMBLE_RX_TX_MAX_NSTS 0x01
+#define IEEE80211_HE_PHY_CAP2_NDP_4x_LTF_AND_3_2US 0x02
+#define IEEE80211_HE_PHY_CAP2_STBC_TX_UNDER_80MHZ 0x04
+#define IEEE80211_HE_PHY_CAP2_STBC_RX_UNDER_80MHZ 0x08
+#define IEEE80211_HE_PHY_CAP2_DOPPLER_TX 0x10
+#define IEEE80211_HE_PHY_CAP2_DOPPLER_RX 0x20
+
+/* Note that the meaning of UL MU below is different between an AP and a non-AP
+ * sta, where in the AP case it indicates support for Rx and in the non-AP sta
+ * case it indicates support for Tx.
+ */
+#define IEEE80211_HE_PHY_CAP2_UL_MU_FULL_MU_MIMO 0x40
+#define IEEE80211_HE_PHY_CAP2_UL_MU_PARTIAL_MU_MIMO 0x80
+
+#define IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_NO_DCM 0x00
+#define IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_BPSK 0x01
+#define IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_QPSK 0x02
+#define IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_16_QAM 0x03
+#define IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_MASK 0x03
+#define IEEE80211_HE_PHY_CAP3_DCM_MAX_TX_NSS_1 0x00
+#define IEEE80211_HE_PHY_CAP3_DCM_MAX_TX_NSS_2 0x04
+#define IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_NO_DCM 0x00
+#define IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_BPSK 0x08
+#define IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_QPSK 0x10
+#define IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_16_QAM 0x18
+#define IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_MASK 0x18
+#define IEEE80211_HE_PHY_CAP3_DCM_MAX_RX_NSS_1 0x00
+#define IEEE80211_HE_PHY_CAP3_DCM_MAX_RX_NSS_2 0x20
+#define IEEE80211_HE_PHY_CAP3_RX_PARTIAL_BW_SU_IN_20MHZ_MU 0x40
+#define IEEE80211_HE_PHY_CAP3_SU_BEAMFORMER 0x80
+
+#define IEEE80211_HE_PHY_CAP4_SU_BEAMFORMEE 0x01
+#define IEEE80211_HE_PHY_CAP4_MU_BEAMFORMER 0x02
+
+/* Minimal allowed value of Max STS under 80MHz is 3 */
+#define IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_UNDER_80MHZ_4 0x0c
+#define IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_UNDER_80MHZ_5 0x10
+#define IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_UNDER_80MHZ_6 0x14
+#define IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_UNDER_80MHZ_7 0x18
+#define IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_UNDER_80MHZ_8 0x1c
+#define IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_UNDER_80MHZ_MASK 0x1c
+
+/* Minimal allowed value of Max STS above 80MHz is 3 */
+#define IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_ABOVE_80MHZ_4 0x60
+#define IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_ABOVE_80MHZ_5 0x80
+#define IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_ABOVE_80MHZ_6 0xa0
+#define IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_ABOVE_80MHZ_7 0xc0
+#define IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_ABOVE_80MHZ_8 0xe0
+#define IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_ABOVE_80MHZ_MASK 0xe0
+
+#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_1 0x00
+#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_2 0x01
+#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_3 0x02
+#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_4 0x03
+#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_5 0x04
+#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_6 0x05
+#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_7 0x06
+#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_8 0x07
+#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_MASK 0x07
+
+#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ_1 0x00
+#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ_2 0x08
+#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ_3 0x10
+#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ_4 0x18
+#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ_5 0x20
+#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ_6 0x28
+#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ_7 0x30
+#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ_8 0x38
+#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ_MASK 0x38
+
+#define IEEE80211_HE_PHY_CAP5_NG16_SU_FEEDBACK 0x40
+#define IEEE80211_HE_PHY_CAP5_NG16_MU_FEEDBACK 0x80
+
+#define IEEE80211_HE_PHY_CAP6_CODEBOOK_SIZE_42_SU 0x01
+#define IEEE80211_HE_PHY_CAP6_CODEBOOK_SIZE_75_MU 0x02
+#define IEEE80211_HE_PHY_CAP6_TRIG_SU_BEAMFORMING_FB 0x04
+#define IEEE80211_HE_PHY_CAP6_TRIG_MU_BEAMFORMING_PARTIAL_BW_FB 0x08
+#define IEEE80211_HE_PHY_CAP6_TRIG_CQI_FB 0x10
+#define IEEE80211_HE_PHY_CAP6_PARTIAL_BW_EXT_RANGE 0x20
+#define IEEE80211_HE_PHY_CAP6_PARTIAL_BANDWIDTH_DL_MUMIMO 0x40
+#define IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT 0x80
+
+#define IEEE80211_HE_PHY_CAP7_PSR_BASED_SR 0x01
+#define IEEE80211_HE_PHY_CAP7_POWER_BOOST_FACTOR_SUPP 0x02
+#define IEEE80211_HE_PHY_CAP7_HE_SU_MU_PPDU_4XLTF_AND_08_US_GI 0x04
+#define IEEE80211_HE_PHY_CAP7_MAX_NC_1 0x08
+#define IEEE80211_HE_PHY_CAP7_MAX_NC_2 0x10
+#define IEEE80211_HE_PHY_CAP7_MAX_NC_3 0x18
+#define IEEE80211_HE_PHY_CAP7_MAX_NC_4 0x20
+#define IEEE80211_HE_PHY_CAP7_MAX_NC_5 0x28
+#define IEEE80211_HE_PHY_CAP7_MAX_NC_6 0x30
+#define IEEE80211_HE_PHY_CAP7_MAX_NC_7 0x38
+#define IEEE80211_HE_PHY_CAP7_MAX_NC_MASK 0x38
+#define IEEE80211_HE_PHY_CAP7_STBC_TX_ABOVE_80MHZ 0x40
+#define IEEE80211_HE_PHY_CAP7_STBC_RX_ABOVE_80MHZ 0x80
+
+#define IEEE80211_HE_PHY_CAP8_HE_ER_SU_PPDU_4XLTF_AND_08_US_GI 0x01
+#define IEEE80211_HE_PHY_CAP8_20MHZ_IN_40MHZ_HE_PPDU_IN_2G 0x02
+#define IEEE80211_HE_PHY_CAP8_20MHZ_IN_160MHZ_HE_PPDU 0x04
+#define IEEE80211_HE_PHY_CAP8_80MHZ_IN_160MHZ_HE_PPDU 0x08
+#define IEEE80211_HE_PHY_CAP8_HE_ER_SU_1XLTF_AND_08_US_GI 0x10
+#define IEEE80211_HE_PHY_CAP8_MIDAMBLE_RX_TX_2X_AND_1XLTF 0x20
+#define IEEE80211_HE_PHY_CAP8_DCM_MAX_RU_242 0x00
+#define IEEE80211_HE_PHY_CAP8_DCM_MAX_RU_484 0x40
+#define IEEE80211_HE_PHY_CAP8_DCM_MAX_RU_996 0x80
+#define IEEE80211_HE_PHY_CAP8_DCM_MAX_RU_2x996 0xc0
+#define IEEE80211_HE_PHY_CAP8_DCM_MAX_RU_MASK 0xc0
+
+#define IEEE80211_HE_PHY_CAP9_LONGER_THAN_16_SIGB_OFDM_SYM 0x01
+#define IEEE80211_HE_PHY_CAP9_NON_TRIGGERED_CQI_FEEDBACK 0x02
+#define IEEE80211_HE_PHY_CAP9_TX_1024_QAM_LESS_THAN_242_TONE_RU 0x04
+#define IEEE80211_HE_PHY_CAP9_RX_1024_QAM_LESS_THAN_242_TONE_RU 0x08
+#define IEEE80211_HE_PHY_CAP9_RX_FULL_BW_SU_USING_MU_WITH_COMP_SIGB 0x10
+#define IEEE80211_HE_PHY_CAP9_RX_FULL_BW_SU_USING_MU_WITH_NON_COMP_SIGB 0x20
+#define IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_0US 0x0
+#define IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_8US 0x1
+#define IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_16US 0x2
+#define IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_RESERVED 0x3
+#define IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_POS 6
+#define IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_MASK 0xc0
+
+#define IEEE80211_HE_PHY_CAP10_HE_MU_M1RU_MAX_LTF 0x01
+
+/* 802.11ax HE TX/RX MCS NSS Support */
+#define IEEE80211_TX_RX_MCS_NSS_SUPP_HIGHEST_MCS_POS (3)
+#define IEEE80211_TX_RX_MCS_NSS_SUPP_TX_BITMAP_POS (6)
+#define IEEE80211_TX_RX_MCS_NSS_SUPP_RX_BITMAP_POS (11)
+#define IEEE80211_TX_RX_MCS_NSS_SUPP_TX_BITMAP_MASK 0x07c0
+#define IEEE80211_TX_RX_MCS_NSS_SUPP_RX_BITMAP_MASK 0xf800
+
+/* TX/RX HE MCS Support field Highest MCS subfield encoding */
+enum ieee80211_he_highest_mcs_supported_subfield_enc {
+ HIGHEST_MCS_SUPPORTED_MCS7 = 0,
+ HIGHEST_MCS_SUPPORTED_MCS8,
+ HIGHEST_MCS_SUPPORTED_MCS9,
+ HIGHEST_MCS_SUPPORTED_MCS10,
+ HIGHEST_MCS_SUPPORTED_MCS11,
+};
+
+/* Calculate 802.11ax HE capabilities IE Tx/Rx HE MCS NSS Support Field size */
+static inline u8
+ieee80211_he_mcs_nss_size(const struct ieee80211_he_cap_elem *he_cap)
+{
+ u8 count = 4;
+
+ if (he_cap->phy_cap_info[0] &
+ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G)
+ count += 4;
+
+ if (he_cap->phy_cap_info[0] &
+ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G)
+ count += 4;
+
+ return count;
+}
+
+/* 802.11ax HE PPE Thresholds */
+#define IEEE80211_PPE_THRES_NSS_SUPPORT_2NSS (1)
+#define IEEE80211_PPE_THRES_NSS_POS (0)
+#define IEEE80211_PPE_THRES_NSS_MASK (7)
+#define IEEE80211_PPE_THRES_RU_INDEX_BITMASK_2x966_AND_966_RU \
+ (BIT(5) | BIT(6))
+#define IEEE80211_PPE_THRES_RU_INDEX_BITMASK_MASK 0x78
+#define IEEE80211_PPE_THRES_RU_INDEX_BITMASK_POS (3)
+#define IEEE80211_PPE_THRES_INFO_PPET_SIZE (3)
+#define IEEE80211_HE_PPE_THRES_INFO_HEADER_SIZE (7)
+
+/*
+ * Calculate 802.11ax HE capabilities IE PPE field size
+ * Input: Header byte of ppe_thres (first byte), and HE capa IE's PHY cap u8*
+ */
+static inline u8
+ieee80211_he_ppe_size(u8 ppe_thres_hdr, const u8 *phy_cap_info)
+{
+ u8 n;
+
+ if ((phy_cap_info[6] &
+ IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT) == 0)
+ return 0;
+
+ n = hweight8(ppe_thres_hdr &
+ IEEE80211_PPE_THRES_RU_INDEX_BITMASK_MASK);
+ n *= (1 + ((ppe_thres_hdr & IEEE80211_PPE_THRES_NSS_MASK) >>
+ IEEE80211_PPE_THRES_NSS_POS));
+
+ /*
+ * Each pair is 6 bits, and we need to add the 7 "header" bits to the
+ * total size.
+ */
+ n = (n * IEEE80211_PPE_THRES_INFO_PPET_SIZE * 2) + 7;
+ n = DIV_ROUND_UP(n, 8);
+
+ return n;
+}
+
+static inline bool ieee80211_he_capa_size_ok(const u8 *data, u8 len)
+{
+ const struct ieee80211_he_cap_elem *he_cap_ie_elem = (const void *)data;
+ u8 needed = sizeof(*he_cap_ie_elem);
+
+ if (len < needed)
+ return false;
+
+ needed += ieee80211_he_mcs_nss_size(he_cap_ie_elem);
+ if (len < needed)
+ return false;
+
+ if (he_cap_ie_elem->phy_cap_info[6] &
+ IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT) {
+ if (len < needed + 1)
+ return false;
+ needed += ieee80211_he_ppe_size(data[needed],
+ he_cap_ie_elem->phy_cap_info);
+ }
+
+ return len >= needed;
+}
+
+/* HE Operation defines */
+#define IEEE80211_HE_OPERATION_DFLT_PE_DURATION_MASK 0x00000007
+#define IEEE80211_HE_OPERATION_TWT_REQUIRED 0x00000008
+#define IEEE80211_HE_OPERATION_RTS_THRESHOLD_MASK 0x00003ff0
+#define IEEE80211_HE_OPERATION_RTS_THRESHOLD_OFFSET 4
+#define IEEE80211_HE_OPERATION_VHT_OPER_INFO 0x00004000
+#define IEEE80211_HE_OPERATION_CO_HOSTED_BSS 0x00008000
+#define IEEE80211_HE_OPERATION_ER_SU_DISABLE 0x00010000
+#define IEEE80211_HE_OPERATION_6GHZ_OP_INFO 0x00020000
+#define IEEE80211_HE_OPERATION_BSS_COLOR_MASK 0x3f000000
+#define IEEE80211_HE_OPERATION_BSS_COLOR_OFFSET 24
+#define IEEE80211_HE_OPERATION_PARTIAL_BSS_COLOR 0x40000000
+#define IEEE80211_HE_OPERATION_BSS_COLOR_DISABLED 0x80000000
+
+#define IEEE80211_6GHZ_CTRL_REG_LPI_AP 0
+#define IEEE80211_6GHZ_CTRL_REG_SP_AP 1
+
+/**
+ * ieee80211_he_6ghz_oper - HE 6 GHz operation Information field
+ * @primary: primary channel
+ * @control: control flags
+ * @ccfs0: channel center frequency segment 0
+ * @ccfs1: channel center frequency segment 1
+ * @minrate: minimum rate (in 1 Mbps units)
+ */
+struct ieee80211_he_6ghz_oper {
+ u8 primary;
+#define IEEE80211_HE_6GHZ_OPER_CTRL_CHANWIDTH 0x3
+#define IEEE80211_HE_6GHZ_OPER_CTRL_CHANWIDTH_20MHZ 0
+#define IEEE80211_HE_6GHZ_OPER_CTRL_CHANWIDTH_40MHZ 1
+#define IEEE80211_HE_6GHZ_OPER_CTRL_CHANWIDTH_80MHZ 2
+#define IEEE80211_HE_6GHZ_OPER_CTRL_CHANWIDTH_160MHZ 3
+#define IEEE80211_HE_6GHZ_OPER_CTRL_DUP_BEACON 0x4
+#define IEEE80211_HE_6GHZ_OPER_CTRL_REG_INFO 0x38
+ u8 control;
+ u8 ccfs0;
+ u8 ccfs1;
+ u8 minrate;
+} __packed;
+
+/*
+ * In "9.4.2.161 Transmit Power Envelope element" of "IEEE Std 802.11ax-2021",
+ * it show four types in "Table 9-275a-Maximum Transmit Power Interpretation
+ * subfield encoding", and two category for each type in "Table E-12-Regulatory
+ * Info subfield encoding in the United States".
+ * So it it totally max 8 Transmit Power Envelope element.
+ */
+#define IEEE80211_TPE_MAX_IE_COUNT 8
+/*
+ * In "Table 9-277—Meaning of Maximum Transmit Power Count subfield"
+ * of "IEEE Std 802.11ax™‐2021", the max power level is 8.
+ */
+#define IEEE80211_MAX_NUM_PWR_LEVEL 8
+
+#define IEEE80211_TPE_MAX_POWER_COUNT 8
+
+/* transmit power interpretation type of transmit power envelope element */
+enum ieee80211_tx_power_intrpt_type {
+ IEEE80211_TPE_LOCAL_EIRP,
+ IEEE80211_TPE_LOCAL_EIRP_PSD,
+ IEEE80211_TPE_REG_CLIENT_EIRP,
+ IEEE80211_TPE_REG_CLIENT_EIRP_PSD,
+};
+
+/**
+ * struct ieee80211_tx_pwr_env
+ *
+ * This structure represents the "Transmit Power Envelope element"
+ */
+struct ieee80211_tx_pwr_env {
+ u8 tx_power_info;
+ s8 tx_power[IEEE80211_TPE_MAX_POWER_COUNT];
+} __packed;
+
+#define IEEE80211_TX_PWR_ENV_INFO_COUNT 0x7
+#define IEEE80211_TX_PWR_ENV_INFO_INTERPRET 0x38
+#define IEEE80211_TX_PWR_ENV_INFO_CATEGORY 0xC0
+
+/*
+ * ieee80211_he_oper_size - calculate 802.11ax HE Operations IE size
+ * @he_oper_ie: byte data of the He Operations IE, stating from the byte
+ * after the ext ID byte. It is assumed that he_oper_ie has at least
+ * sizeof(struct ieee80211_he_operation) bytes, the caller must have
+ * validated this.
+ * @return the actual size of the IE data (not including header), or 0 on error
+ */
+static inline u8
+ieee80211_he_oper_size(const u8 *he_oper_ie)
+{
+ const struct ieee80211_he_operation *he_oper = (const void *)he_oper_ie;
+ u8 oper_len = sizeof(struct ieee80211_he_operation);
+ u32 he_oper_params;
+
+ /* Make sure the input is not NULL */
+ if (!he_oper_ie)
+ return 0;
+
+ /* Calc required length */
+ he_oper_params = le32_to_cpu(he_oper->he_oper_params);
+ if (he_oper_params & IEEE80211_HE_OPERATION_VHT_OPER_INFO)
+ oper_len += 3;
+ if (he_oper_params & IEEE80211_HE_OPERATION_CO_HOSTED_BSS)
+ oper_len++;
+ if (he_oper_params & IEEE80211_HE_OPERATION_6GHZ_OP_INFO)
+ oper_len += sizeof(struct ieee80211_he_6ghz_oper);
+
+ /* Add the first byte (extension ID) to the total length */
+ oper_len++;
+
+ return oper_len;
+}
+
+/**
+ * ieee80211_he_6ghz_oper - obtain 6 GHz operation field
+ * @he_oper: HE operation element (must be pre-validated for size)
+ * but may be %NULL
+ *
+ * Return: a pointer to the 6 GHz operation field, or %NULL
+ */
+static inline const struct ieee80211_he_6ghz_oper *
+ieee80211_he_6ghz_oper(const struct ieee80211_he_operation *he_oper)
+{
+ const u8 *ret = (const void *)&he_oper->optional;
+ u32 he_oper_params;
+
+ if (!he_oper)
+ return NULL;
+
+ he_oper_params = le32_to_cpu(he_oper->he_oper_params);
+
+ if (!(he_oper_params & IEEE80211_HE_OPERATION_6GHZ_OP_INFO))
+ return NULL;
+ if (he_oper_params & IEEE80211_HE_OPERATION_VHT_OPER_INFO)
+ ret += 3;
+ if (he_oper_params & IEEE80211_HE_OPERATION_CO_HOSTED_BSS)
+ ret++;
+
+ return (const void *)ret;
+}
+
+/* HE Spatial Reuse defines */
+#define IEEE80211_HE_SPR_PSR_DISALLOWED BIT(0)
+#define IEEE80211_HE_SPR_NON_SRG_OBSS_PD_SR_DISALLOWED BIT(1)
+#define IEEE80211_HE_SPR_NON_SRG_OFFSET_PRESENT BIT(2)
+#define IEEE80211_HE_SPR_SRG_INFORMATION_PRESENT BIT(3)
+#define IEEE80211_HE_SPR_HESIGA_SR_VAL15_ALLOWED BIT(4)
+
+/*
+ * ieee80211_he_spr_size - calculate 802.11ax HE Spatial Reuse IE size
+ * @he_spr_ie: byte data of the He Spatial Reuse IE, stating from the byte
+ * after the ext ID byte. It is assumed that he_spr_ie has at least
+ * sizeof(struct ieee80211_he_spr) bytes, the caller must have validated
+ * this
+ * @return the actual size of the IE data (not including header), or 0 on error
+ */
+static inline u8
+ieee80211_he_spr_size(const u8 *he_spr_ie)
+{
+ const struct ieee80211_he_spr *he_spr = (const void *)he_spr_ie;
+ u8 spr_len = sizeof(struct ieee80211_he_spr);
+ u8 he_spr_params;
+
+ /* Make sure the input is not NULL */
+ if (!he_spr_ie)
+ return 0;
+
+ /* Calc required length */
+ he_spr_params = he_spr->he_sr_control;
+ if (he_spr_params & IEEE80211_HE_SPR_NON_SRG_OFFSET_PRESENT)
+ spr_len++;
+ if (he_spr_params & IEEE80211_HE_SPR_SRG_INFORMATION_PRESENT)
+ spr_len += 18;
+
+ /* Add the first byte (extension ID) to the total length */
+ spr_len++;
+
+ return spr_len;
+}
+
+/* S1G Capabilities Information field */
+#define IEEE80211_S1G_CAPABILITY_LEN 15
+
+#define S1G_CAP0_S1G_LONG BIT(0)
+#define S1G_CAP0_SGI_1MHZ BIT(1)
+#define S1G_CAP0_SGI_2MHZ BIT(2)
+#define S1G_CAP0_SGI_4MHZ BIT(3)
+#define S1G_CAP0_SGI_8MHZ BIT(4)
+#define S1G_CAP0_SGI_16MHZ BIT(5)
+#define S1G_CAP0_SUPP_CH_WIDTH GENMASK(7, 6)
+
+#define S1G_SUPP_CH_WIDTH_2 0
+#define S1G_SUPP_CH_WIDTH_4 1
+#define S1G_SUPP_CH_WIDTH_8 2
+#define S1G_SUPP_CH_WIDTH_16 3
+#define S1G_SUPP_CH_WIDTH_MAX(cap) ((1 << FIELD_GET(S1G_CAP0_SUPP_CH_WIDTH, \
+ cap[0])) << 1)
+
+#define S1G_CAP1_RX_LDPC BIT(0)
+#define S1G_CAP1_TX_STBC BIT(1)
+#define S1G_CAP1_RX_STBC BIT(2)
+#define S1G_CAP1_SU_BFER BIT(3)
+#define S1G_CAP1_SU_BFEE BIT(4)
+#define S1G_CAP1_BFEE_STS GENMASK(7, 5)
+
+#define S1G_CAP2_SOUNDING_DIMENSIONS GENMASK(2, 0)
+#define S1G_CAP2_MU_BFER BIT(3)
+#define S1G_CAP2_MU_BFEE BIT(4)
+#define S1G_CAP2_PLUS_HTC_VHT BIT(5)
+#define S1G_CAP2_TRAVELING_PILOT GENMASK(7, 6)
+
+#define S1G_CAP3_RD_RESPONDER BIT(0)
+#define S1G_CAP3_HT_DELAYED_BA BIT(1)
+#define S1G_CAP3_MAX_MPDU_LEN BIT(2)
+#define S1G_CAP3_MAX_AMPDU_LEN_EXP GENMASK(4, 3)
+#define S1G_CAP3_MIN_MPDU_START GENMASK(7, 5)
+
+#define S1G_CAP4_UPLINK_SYNC BIT(0)
+#define S1G_CAP4_DYNAMIC_AID BIT(1)
+#define S1G_CAP4_BAT BIT(2)
+#define S1G_CAP4_TIME_ADE BIT(3)
+#define S1G_CAP4_NON_TIM BIT(4)
+#define S1G_CAP4_GROUP_AID BIT(5)
+#define S1G_CAP4_STA_TYPE GENMASK(7, 6)
+
+#define S1G_CAP5_CENT_AUTH_CONTROL BIT(0)
+#define S1G_CAP5_DIST_AUTH_CONTROL BIT(1)
+#define S1G_CAP5_AMSDU BIT(2)
+#define S1G_CAP5_AMPDU BIT(3)
+#define S1G_CAP5_ASYMMETRIC_BA BIT(4)
+#define S1G_CAP5_FLOW_CONTROL BIT(5)
+#define S1G_CAP5_SECTORIZED_BEAM GENMASK(7, 6)
+
+#define S1G_CAP6_OBSS_MITIGATION BIT(0)
+#define S1G_CAP6_FRAGMENT_BA BIT(1)
+#define S1G_CAP6_NDP_PS_POLL BIT(2)
+#define S1G_CAP6_RAW_OPERATION BIT(3)
+#define S1G_CAP6_PAGE_SLICING BIT(4)
+#define S1G_CAP6_TXOP_SHARING_IMP_ACK BIT(5)
+#define S1G_CAP6_VHT_LINK_ADAPT GENMASK(7, 6)
+
+#define S1G_CAP7_TACK_AS_PS_POLL BIT(0)
+#define S1G_CAP7_DUP_1MHZ BIT(1)
+#define S1G_CAP7_MCS_NEGOTIATION BIT(2)
+#define S1G_CAP7_1MHZ_CTL_RESPONSE_PREAMBLE BIT(3)
+#define S1G_CAP7_NDP_BFING_REPORT_POLL BIT(4)
+#define S1G_CAP7_UNSOLICITED_DYN_AID BIT(5)
+#define S1G_CAP7_SECTOR_TRAINING_OPERATION BIT(6)
+#define S1G_CAP7_TEMP_PS_MODE_SWITCH BIT(7)
+
+#define S1G_CAP8_TWT_GROUPING BIT(0)
+#define S1G_CAP8_BDT BIT(1)
+#define S1G_CAP8_COLOR GENMASK(4, 2)
+#define S1G_CAP8_TWT_REQUEST BIT(5)
+#define S1G_CAP8_TWT_RESPOND BIT(6)
+#define S1G_CAP8_PV1_FRAME BIT(7)
+
+#define S1G_CAP9_LINK_ADAPT_PER_CONTROL_RESPONSE BIT(0)
+
+#define S1G_OPER_CH_WIDTH_PRIMARY_1MHZ BIT(0)
+#define S1G_OPER_CH_WIDTH_OPER GENMASK(4, 1)
+
+/* EHT MAC capabilities as defined in P802.11be_D2.0 section 9.4.2.313.2 */
+#define IEEE80211_EHT_MAC_CAP0_EPCS_PRIO_ACCESS 0x01
+#define IEEE80211_EHT_MAC_CAP0_OM_CONTROL 0x02
+#define IEEE80211_EHT_MAC_CAP0_TRIG_TXOP_SHARING_MODE1 0x04
+#define IEEE80211_EHT_MAC_CAP0_TRIG_TXOP_SHARING_MODE2 0x08
+#define IEEE80211_EHT_MAC_CAP0_RESTRICTED_TWT 0x10
+#define IEEE80211_EHT_MAC_CAP0_SCS_TRAFFIC_DESC 0x20
+#define IEEE80211_EHT_MAC_CAP0_MAX_MPDU_LEN_MASK 0xc0
+#define IEEE80211_EHT_MAC_CAP0_MAX_MPDU_LEN_3895 0
+#define IEEE80211_EHT_MAC_CAP0_MAX_MPDU_LEN_7991 1
+#define IEEE80211_EHT_MAC_CAP0_MAX_MPDU_LEN_11454 2
+
+#define IEEE80211_EHT_MAC_CAP1_MAX_AMPDU_LEN_MASK 0x01
+
+/* EHT PHY capabilities as defined in P802.11be_D2.0 section 9.4.2.313.3 */
+#define IEEE80211_EHT_PHY_CAP0_320MHZ_IN_6GHZ 0x02
+#define IEEE80211_EHT_PHY_CAP0_242_TONE_RU_GT20MHZ 0x04
+#define IEEE80211_EHT_PHY_CAP0_NDP_4_EHT_LFT_32_GI 0x08
+#define IEEE80211_EHT_PHY_CAP0_PARTIAL_BW_UL_MU_MIMO 0x10
+#define IEEE80211_EHT_PHY_CAP0_SU_BEAMFORMER 0x20
+#define IEEE80211_EHT_PHY_CAP0_SU_BEAMFORMEE 0x40
+
+/* EHT beamformee number of spatial streams <= 80MHz is split */
+#define IEEE80211_EHT_PHY_CAP0_BEAMFORMEE_SS_80MHZ_MASK 0x80
+#define IEEE80211_EHT_PHY_CAP1_BEAMFORMEE_SS_80MHZ_MASK 0x03
+
+#define IEEE80211_EHT_PHY_CAP1_BEAMFORMEE_SS_160MHZ_MASK 0x1c
+#define IEEE80211_EHT_PHY_CAP1_BEAMFORMEE_SS_320MHZ_MASK 0xe0
+
+#define IEEE80211_EHT_PHY_CAP2_SOUNDING_DIM_80MHZ_MASK 0x07
+#define IEEE80211_EHT_PHY_CAP2_SOUNDING_DIM_160MHZ_MASK 0x38
+
+/* EHT number of sounding dimensions for 320MHz is split */
+#define IEEE80211_EHT_PHY_CAP2_SOUNDING_DIM_320MHZ_MASK 0xc0
+#define IEEE80211_EHT_PHY_CAP3_SOUNDING_DIM_320MHZ_MASK 0x01
+#define IEEE80211_EHT_PHY_CAP3_NG_16_SU_FEEDBACK 0x02
+#define IEEE80211_EHT_PHY_CAP3_NG_16_MU_FEEDBACK 0x04
+#define IEEE80211_EHT_PHY_CAP3_CODEBOOK_4_2_SU_FDBK 0x08
+#define IEEE80211_EHT_PHY_CAP3_CODEBOOK_7_5_MU_FDBK 0x10
+#define IEEE80211_EHT_PHY_CAP3_TRIG_SU_BF_FDBK 0x20
+#define IEEE80211_EHT_PHY_CAP3_TRIG_MU_BF_PART_BW_FDBK 0x40
+#define IEEE80211_EHT_PHY_CAP3_TRIG_CQI_FDBK 0x80
+
+#define IEEE80211_EHT_PHY_CAP4_PART_BW_DL_MU_MIMO 0x01
+#define IEEE80211_EHT_PHY_CAP4_PSR_SR_SUPP 0x02
+#define IEEE80211_EHT_PHY_CAP4_POWER_BOOST_FACT_SUPP 0x04
+#define IEEE80211_EHT_PHY_CAP4_EHT_MU_PPDU_4_EHT_LTF_08_GI 0x08
+#define IEEE80211_EHT_PHY_CAP4_MAX_NC_MASK 0xf0
+
+#define IEEE80211_EHT_PHY_CAP5_NON_TRIG_CQI_FEEDBACK 0x01
+#define IEEE80211_EHT_PHY_CAP5_TX_LESS_242_TONE_RU_SUPP 0x02
+#define IEEE80211_EHT_PHY_CAP5_RX_LESS_242_TONE_RU_SUPP 0x04
+#define IEEE80211_EHT_PHY_CAP5_PPE_THRESHOLD_PRESENT 0x08
+#define IEEE80211_EHT_PHY_CAP5_COMMON_NOMINAL_PKT_PAD_MASK 0x30
+#define IEEE80211_EHT_PHY_CAP5_COMMON_NOMINAL_PKT_PAD_0US 0
+#define IEEE80211_EHT_PHY_CAP5_COMMON_NOMINAL_PKT_PAD_8US 1
+#define IEEE80211_EHT_PHY_CAP5_COMMON_NOMINAL_PKT_PAD_16US 2
+#define IEEE80211_EHT_PHY_CAP5_COMMON_NOMINAL_PKT_PAD_20US 3
+
+/* Maximum number of supported EHT LTF is split */
+#define IEEE80211_EHT_PHY_CAP5_MAX_NUM_SUPP_EHT_LTF_MASK 0xc0
+#define IEEE80211_EHT_PHY_CAP6_MAX_NUM_SUPP_EHT_LTF_MASK 0x07
+
+#define IEEE80211_EHT_PHY_CAP6_MCS15_SUPP_MASK 0x78
+#define IEEE80211_EHT_PHY_CAP6_EHT_DUP_6GHZ_SUPP 0x80
+
+#define IEEE80211_EHT_PHY_CAP7_20MHZ_STA_RX_NDP_WIDER_BW 0x01
+#define IEEE80211_EHT_PHY_CAP7_NON_OFDMA_UL_MU_MIMO_80MHZ 0x02
+#define IEEE80211_EHT_PHY_CAP7_NON_OFDMA_UL_MU_MIMO_160MHZ 0x04
+#define IEEE80211_EHT_PHY_CAP7_NON_OFDMA_UL_MU_MIMO_320MHZ 0x08
+#define IEEE80211_EHT_PHY_CAP7_MU_BEAMFORMER_80MHZ 0x10
+#define IEEE80211_EHT_PHY_CAP7_MU_BEAMFORMER_160MHZ 0x20
+#define IEEE80211_EHT_PHY_CAP7_MU_BEAMFORMER_320MHZ 0x40
+#define IEEE80211_EHT_PHY_CAP7_TB_SOUNDING_FDBK_RATE_LIMIT 0x80
+
+#define IEEE80211_EHT_PHY_CAP8_RX_1024QAM_WIDER_BW_DL_OFDMA 0x01
+#define IEEE80211_EHT_PHY_CAP8_RX_4096QAM_WIDER_BW_DL_OFDMA 0x02
+
+/*
+ * EHT operation channel width as defined in P802.11be_D2.0 section 9.4.2.311
+ */
+#define IEEE80211_EHT_OPER_CHAN_WIDTH 0x7
+#define IEEE80211_EHT_OPER_CHAN_WIDTH_20MHZ 0
+#define IEEE80211_EHT_OPER_CHAN_WIDTH_40MHZ 1
+#define IEEE80211_EHT_OPER_CHAN_WIDTH_80MHZ 2
+#define IEEE80211_EHT_OPER_CHAN_WIDTH_160MHZ 3
+#define IEEE80211_EHT_OPER_CHAN_WIDTH_320MHZ 4
+
+/* Calculate 802.11be EHT capabilities IE Tx/Rx EHT MCS NSS Support Field size */
+static inline u8
+ieee80211_eht_mcs_nss_size(const struct ieee80211_he_cap_elem *he_cap,
+ const struct ieee80211_eht_cap_elem_fixed *eht_cap,
+ bool from_ap)
+{
+ u8 count = 0;
+
+ /* on 2.4 GHz, if it supports 40 MHz, the result is 3 */
+ if (he_cap->phy_cap_info[0] &
+ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_IN_2G)
+ return 3;
+
+ /* on 2.4 GHz, these three bits are reserved, so should be 0 */
+ if (he_cap->phy_cap_info[0] &
+ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_80MHZ_IN_5G)
+ count += 3;
+
+ if (he_cap->phy_cap_info[0] &
+ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G)
+ count += 3;
+
+ if (eht_cap->phy_cap_info[0] & IEEE80211_EHT_PHY_CAP0_320MHZ_IN_6GHZ)
+ count += 3;
+
+ if (count)
+ return count;
+
+ return from_ap ? 3 : 4;
+}
+
+/* 802.11be EHT PPE Thresholds */
+#define IEEE80211_EHT_PPE_THRES_NSS_POS 0
+#define IEEE80211_EHT_PPE_THRES_NSS_MASK 0xf
+#define IEEE80211_EHT_PPE_THRES_RU_INDEX_BITMASK_MASK 0x1f0
+#define IEEE80211_EHT_PPE_THRES_INFO_PPET_SIZE 3
+#define IEEE80211_EHT_PPE_THRES_INFO_HEADER_SIZE 9
+
+/*
+ * Calculate 802.11be EHT capabilities IE EHT field size
+ */
+static inline u8
+ieee80211_eht_ppe_size(u16 ppe_thres_hdr, const u8 *phy_cap_info)
+{
+ u32 n;
+
+ if (!(phy_cap_info[5] &
+ IEEE80211_EHT_PHY_CAP5_PPE_THRESHOLD_PRESENT))
+ return 0;
+
+ n = hweight16(ppe_thres_hdr &
+ IEEE80211_EHT_PPE_THRES_RU_INDEX_BITMASK_MASK);
+ n *= 1 + u16_get_bits(ppe_thres_hdr, IEEE80211_EHT_PPE_THRES_NSS_MASK);
+
+ /*
+ * Each pair is 6 bits, and we need to add the 9 "header" bits to the
+ * total size.
+ */
+ n = n * IEEE80211_EHT_PPE_THRES_INFO_PPET_SIZE * 2 +
+ IEEE80211_EHT_PPE_THRES_INFO_HEADER_SIZE;
+ return DIV_ROUND_UP(n, 8);
+}
+
+static inline bool
+ieee80211_eht_capa_size_ok(const u8 *he_capa, const u8 *data, u8 len,
+ bool from_ap)
+{
+ const struct ieee80211_eht_cap_elem_fixed *elem = (const void *)data;
+ u8 needed = sizeof(struct ieee80211_eht_cap_elem_fixed);
+
+ if (len < needed || !he_capa)
+ return false;
+
+ needed += ieee80211_eht_mcs_nss_size((const void *)he_capa,
+ (const void *)data,
+ from_ap);
+ if (len < needed)
+ return false;
+
+ if (elem->phy_cap_info[5] &
+ IEEE80211_EHT_PHY_CAP5_PPE_THRESHOLD_PRESENT) {
+ u16 ppe_thres_hdr;
+
+ if (len < needed + sizeof(ppe_thres_hdr))
+ return false;
+
+ ppe_thres_hdr = get_unaligned_le16(data + needed);
+ needed += ieee80211_eht_ppe_size(ppe_thres_hdr,
+ elem->phy_cap_info);
+ }
+
+ return len >= needed;
+}
+
+static inline bool
+ieee80211_eht_oper_size_ok(const u8 *data, u8 len)
+{
+ const struct ieee80211_eht_operation *elem = (const void *)data;
+ u8 needed = sizeof(*elem);
+
+ if (len < needed)
+ return false;
+
+ if (elem->params & IEEE80211_EHT_OPER_INFO_PRESENT) {
+ needed += 3;
+
+ if (elem->params &
+ IEEE80211_EHT_OPER_DISABLED_SUBCHANNEL_BITMAP_PRESENT)
+ needed += 2;
+ }
+
+ return len >= needed;
+}
+
+#define LISTEN_INT_USF GENMASK(15, 14)
+#define LISTEN_INT_UI GENMASK(13, 0)
+
+#define IEEE80211_MAX_USF FIELD_MAX(LISTEN_INT_USF)
+#define IEEE80211_MAX_UI FIELD_MAX(LISTEN_INT_UI)
/* Authentication algorithms */
#define WLAN_AUTH_OPEN 0
@@ -1639,6 +3066,8 @@ struct ieee80211_vht_operation {
#define IEEE80211_SPCT_MSR_RPRT_TYPE_BASIC 0
#define IEEE80211_SPCT_MSR_RPRT_TYPE_CCA 1
#define IEEE80211_SPCT_MSR_RPRT_TYPE_RPI 2
+#define IEEE80211_SPCT_MSR_RPRT_TYPE_LCI 8
+#define IEEE80211_SPCT_MSR_RPRT_TYPE_CIVIC 11
/* 802.11g ERP information element */
#define WLAN_ERP_NON_ERP_PRESENT (1<<0)
@@ -1726,6 +3155,8 @@ enum ieee80211_statuscode {
/* 802.11ai */
WLAN_STATUS_FILS_AUTHENTICATION_FAILURE = 108,
WLAN_STATUS_UNKNOWN_AUTHENTICATION_SERVER = 109,
+ WLAN_STATUS_SAE_HASH_TO_ELEMENT = 126,
+ WLAN_STATUS_SAE_PK = 127,
};
@@ -1963,19 +3394,29 @@ enum ieee80211_eid {
WLAN_EID_VHT_OPERATION = 192,
WLAN_EID_EXTENDED_BSS_LOAD = 193,
WLAN_EID_WIDE_BW_CHANNEL_SWITCH = 194,
- WLAN_EID_VHT_TX_POWER_ENVELOPE = 195,
+ WLAN_EID_TX_POWER_ENVELOPE = 195,
WLAN_EID_CHANNEL_SWITCH_WRAPPER = 196,
WLAN_EID_AID = 197,
WLAN_EID_QUIET_CHANNEL = 198,
WLAN_EID_OPMODE_NOTIF = 199,
+ WLAN_EID_REDUCED_NEIGHBOR_REPORT = 201,
+
+ WLAN_EID_AID_REQUEST = 210,
+ WLAN_EID_AID_RESPONSE = 211,
+ WLAN_EID_S1G_BCN_COMPAT = 213,
+ WLAN_EID_S1G_SHORT_BCN_INTERVAL = 214,
+ WLAN_EID_S1G_TWT = 216,
+ WLAN_EID_S1G_CAPABILITIES = 217,
WLAN_EID_VENDOR_SPECIFIC = 221,
WLAN_EID_QOS_PARAMETER = 222,
+ WLAN_EID_S1G_OPERATION = 232,
WLAN_EID_CAG_NUMBER = 237,
WLAN_EID_AP_CSN = 239,
WLAN_EID_FILS_INDICATION = 240,
WLAN_EID_DILS = 241,
WLAN_EID_FRAGMENT = 242,
+ WLAN_EID_RSNX = 244,
WLAN_EID_EXTENSION = 255
};
@@ -1991,6 +3432,28 @@ enum ieee80211_eid_ext {
WLAN_EID_EXT_FILS_WRAPPED_DATA = 8,
WLAN_EID_EXT_FILS_PUBLIC_KEY = 12,
WLAN_EID_EXT_FILS_NONCE = 13,
+ WLAN_EID_EXT_FUTURE_CHAN_GUIDANCE = 14,
+ WLAN_EID_EXT_HE_CAPABILITY = 35,
+ WLAN_EID_EXT_HE_OPERATION = 36,
+ WLAN_EID_EXT_UORA = 37,
+ WLAN_EID_EXT_HE_MU_EDCA = 38,
+ WLAN_EID_EXT_HE_SPR = 39,
+ WLAN_EID_EXT_NDP_FEEDBACK_REPORT_PARAMSET = 41,
+ WLAN_EID_EXT_BSS_COLOR_CHG_ANN = 42,
+ WLAN_EID_EXT_QUIET_TIME_PERIOD_SETUP = 43,
+ WLAN_EID_EXT_ESS_REPORT = 45,
+ WLAN_EID_EXT_OPS = 46,
+ WLAN_EID_EXT_HE_BSS_LOAD = 47,
+ WLAN_EID_EXT_MAX_CHANNEL_SWITCH_TIME = 52,
+ WLAN_EID_EXT_MULTIPLE_BSSID_CONFIGURATION = 55,
+ WLAN_EID_EXT_NON_INHERITANCE = 56,
+ WLAN_EID_EXT_KNOWN_BSSID = 57,
+ WLAN_EID_EXT_SHORT_SSID_LIST = 58,
+ WLAN_EID_EXT_HE_6GHZ_CAPA = 59,
+ WLAN_EID_EXT_UL_MU_POWER_CAPA = 60,
+ WLAN_EID_EXT_EHT_OPERATION = 106,
+ WLAN_EID_EXT_EHT_MULTI_LINK = 107,
+ WLAN_EID_EXT_EHT_CAPABILITY = 108,
};
/* Action category code */
@@ -2001,6 +3464,7 @@ enum ieee80211_category {
WLAN_CATEGORY_BACK = 3,
WLAN_CATEGORY_PUBLIC = 4,
WLAN_CATEGORY_RADIO_MEASUREMENT = 5,
+ WLAN_CATEGORY_FAST_BBS_TRANSITION = 6,
WLAN_CATEGORY_HT = 7,
WLAN_CATEGORY_SA_QUERY = 8,
WLAN_CATEGORY_PROTECTED_DUAL_OF_ACTION = 9,
@@ -2015,6 +3479,7 @@ enum ieee80211_category {
WLAN_CATEGORY_FST = 18,
WLAN_CATEGORY_UNPROT_DMG = 20,
WLAN_CATEGORY_VHT = 21,
+ WLAN_CATEGORY_S1G = 22,
WLAN_CATEGORY_VENDOR_SPECIFIC_PROTECTED = 126,
WLAN_CATEGORY_VENDOR_SPECIFIC = 127,
};
@@ -2072,6 +3537,12 @@ enum ieee80211_mesh_actioncode {
WLAN_MESH_ACTION_TBTT_ADJUSTMENT_RESPONSE,
};
+/* Unprotected WNM action codes */
+enum ieee80211_unprotected_wnm_actioncode {
+ WLAN_UNPROTECTED_WNM_ACTION_TIM = 0,
+ WLAN_UNPROTECTED_WNM_ACTION_TIMING_MEASUREMENT_RESPONSE = 1,
+};
+
/* Security key length */
enum ieee80211_key_len {
WLAN_KEY_LEN_WEP40 = 5,
@@ -2088,6 +3559,20 @@ enum ieee80211_key_len {
WLAN_KEY_LEN_BIP_GMAC_256 = 32,
};
+enum ieee80211_s1g_actioncode {
+ WLAN_S1G_AID_SWITCH_REQUEST,
+ WLAN_S1G_AID_SWITCH_RESPONSE,
+ WLAN_S1G_SYNC_CONTROL,
+ WLAN_S1G_STA_INFO_ANNOUNCE,
+ WLAN_S1G_EDCA_PARAM_SET,
+ WLAN_S1G_EL_OPERATION,
+ WLAN_S1G_TWT_SETUP,
+ WLAN_S1G_TWT_TEARDOWN,
+ WLAN_S1G_SECT_GROUP_ID_LIST,
+ WLAN_S1G_SECT_ID_FEEDBACK,
+ WLAN_S1G_TWT_INFORMATION = 11,
+};
+
#define IEEE80211_WEP_IV_LEN 4
#define IEEE80211_WEP_ICV_LEN 4
#define IEEE80211_CCMP_HDR_LEN 8
@@ -2111,7 +3596,8 @@ enum ieee80211_key_len {
#define FILS_ERP_MAX_REALM_LEN 253
#define FILS_ERP_MAX_RRK_LEN 64
-#define PMK_MAX_LEN 48
+#define PMK_MAX_LEN 64
+#define SAE_PASSWORD_MAX_LEN 128
/* Public action codes (IEEE Std 802.11-2016, 9.6.8.1, Table 9-307) */
enum ieee80211_pub_actioncode {
@@ -2148,7 +3634,7 @@ enum ieee80211_pub_actioncode {
WLAN_PUB_ACTION_NETWORK_CHANNEL_CONTROL = 30,
WLAN_PUB_ACTION_WHITE_SPACE_MAP_ANN = 31,
WLAN_PUB_ACTION_FTM_REQUEST = 32,
- WLAN_PUB_ACTION_FTM = 33,
+ WLAN_PUB_ACTION_FTM_RESPONSE = 33,
WLAN_PUB_ACTION_FILS_DISCOVERY = 34,
};
@@ -2172,7 +3658,17 @@ enum ieee80211_tdls_actioncode {
*/
#define WLAN_EXT_CAPA1_EXT_CHANNEL_SWITCHING BIT(2)
-/* TDLS capabilities in the the 4th byte of @WLAN_EID_EXT_CAPABILITY */
+/* Multiple BSSID capability is set in the 6th bit of 3rd byte of the
+ * @WLAN_EID_EXT_CAPABILITY information element
+ */
+#define WLAN_EXT_CAPA3_MULTI_BSSID_SUPPORT BIT(6)
+
+/* Timing Measurement protocol for time sync is set in the 7th bit of 3rd byte
+ * of the @WLAN_EID_EXT_CAPABILITY information element
+ */
+#define WLAN_EXT_CAPA3_TIMING_MEASUREMENT_SUPPORT BIT(7)
+
+/* TDLS capabilities in the 4th byte of @WLAN_EID_EXT_CAPABILITY */
#define WLAN_EXT_CAPA4_TDLS_BUFFER_STA BIT(4)
#define WLAN_EXT_CAPA4_TDLS_PEER_PSM BIT(5)
#define WLAN_EXT_CAPA4_TDLS_CHAN_SWITCH BIT(6)
@@ -2203,6 +3699,20 @@ enum ieee80211_tdls_actioncode {
*/
#define WLAN_EXT_CAPA9_FTM_INITIATOR BIT(7)
+/* Defines support for TWT Requester and TWT Responder */
+#define WLAN_EXT_CAPA10_TWT_REQUESTER_SUPPORT BIT(5)
+#define WLAN_EXT_CAPA10_TWT_RESPONDER_SUPPORT BIT(6)
+
+/*
+ * When set, indicates that the AP is able to tolerate 26-tone RU UL
+ * OFDMA transmissions using HE TB PPDU from OBSS (not falsely classify the
+ * 26-tone RU UL OFDMA transmissions as radar pulses).
+ */
+#define WLAN_EXT_CAPA10_OBSS_NARROW_BW_RU_TOLERANCE_SUPPORT BIT(7)
+
+/* Defines support for enhanced multi-bssid advertisement*/
+#define WLAN_EXT_CAPA11_EMA_SUPPORT BIT(3)
+
/* TDLS specific payload type in the LLC/SNAP header */
#define WLAN_TDLS_SNAP_RFTYPE 0x2
@@ -2394,6 +3904,34 @@ enum ieee80211_sa_query_action {
WLAN_ACTION_SA_QUERY_RESPONSE = 1,
};
+/**
+ * struct ieee80211_bssid_index
+ *
+ * This structure refers to "Multiple BSSID-index element"
+ *
+ * @bssid_index: BSSID index
+ * @dtim_period: optional, overrides transmitted BSS dtim period
+ * @dtim_count: optional, overrides transmitted BSS dtim count
+ */
+struct ieee80211_bssid_index {
+ u8 bssid_index;
+ u8 dtim_period;
+ u8 dtim_count;
+};
+
+/**
+ * struct ieee80211_multiple_bssid_configuration
+ *
+ * This structure refers to "Multiple BSSID Configuration element"
+ *
+ * @bssid_count: total number of active BSSIDs in the set
+ * @profile_periodicity: the least number of beacon frames need to be received
+ * in order to discover all the nontransmitted BSSIDs in the set.
+ */
+struct ieee80211_multiple_bssid_configuration {
+ u8 bssid_count;
+ u8 profile_periodicity;
+};
#define SUITE(oui, id) (((oui) << 8) | (id))
@@ -2424,12 +3962,19 @@ enum ieee80211_sa_query_action {
#define WLAN_AKM_SUITE_TDLS SUITE(0x000FAC, 7)
#define WLAN_AKM_SUITE_SAE SUITE(0x000FAC, 8)
#define WLAN_AKM_SUITE_FT_OVER_SAE SUITE(0x000FAC, 9)
+#define WLAN_AKM_SUITE_AP_PEER_KEY SUITE(0x000FAC, 10)
#define WLAN_AKM_SUITE_8021X_SUITE_B SUITE(0x000FAC, 11)
#define WLAN_AKM_SUITE_8021X_SUITE_B_192 SUITE(0x000FAC, 12)
+#define WLAN_AKM_SUITE_FT_8021X_SHA384 SUITE(0x000FAC, 13)
#define WLAN_AKM_SUITE_FILS_SHA256 SUITE(0x000FAC, 14)
#define WLAN_AKM_SUITE_FILS_SHA384 SUITE(0x000FAC, 15)
#define WLAN_AKM_SUITE_FT_FILS_SHA256 SUITE(0x000FAC, 16)
#define WLAN_AKM_SUITE_FT_FILS_SHA384 SUITE(0x000FAC, 17)
+#define WLAN_AKM_SUITE_OWE SUITE(0x000FAC, 18)
+#define WLAN_AKM_SUITE_FT_PSK_SHA384 SUITE(0x000FAC, 19)
+#define WLAN_AKM_SUITE_PSK_SHA384 SUITE(0x000FAC, 20)
+
+#define WLAN_AKM_SUITE_WFA_DPP SUITE(WLAN_OUI_WFA, 2)
#define WLAN_MAX_KEY_LEN 32
@@ -2441,10 +3986,12 @@ enum ieee80211_sa_query_action {
#define WLAN_OUI_WFA 0x506f9a
#define WLAN_OUI_TYPE_WFA_P2P 9
+#define WLAN_OUI_TYPE_WFA_DPP 0x1A
#define WLAN_OUI_MICROSOFT 0x0050f2
#define WLAN_OUI_TYPE_MICROSOFT_WPA 1
#define WLAN_OUI_TYPE_MICROSOFT_WMM 2
#define WLAN_OUI_TYPE_MICROSOFT_WPS 4
+#define WLAN_OUI_TYPE_MICROSOFT_TPC 8
/*
* WMM/802.11e Tspec Element
@@ -2483,21 +4030,55 @@ struct ieee80211_tspec_ie {
__le16 medium_time;
} __packed;
+struct ieee80211_he_6ghz_capa {
+ /* uses IEEE80211_HE_6GHZ_CAP_* below */
+ __le16 capa;
+} __packed;
+
+/* HE 6 GHz band capabilities */
+/* uses enum ieee80211_min_mpdu_spacing values */
+#define IEEE80211_HE_6GHZ_CAP_MIN_MPDU_START 0x0007
+/* uses enum ieee80211_vht_max_ampdu_length_exp values */
+#define IEEE80211_HE_6GHZ_CAP_MAX_AMPDU_LEN_EXP 0x0038
+/* uses IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_* values */
+#define IEEE80211_HE_6GHZ_CAP_MAX_MPDU_LEN 0x00c0
+/* WLAN_HT_CAP_SM_PS_* values */
+#define IEEE80211_HE_6GHZ_CAP_SM_PS 0x0600
+#define IEEE80211_HE_6GHZ_CAP_RD_RESPONDER 0x0800
+#define IEEE80211_HE_6GHZ_CAP_RX_ANTPAT_CONS 0x1000
+#define IEEE80211_HE_6GHZ_CAP_TX_ANTPAT_CONS 0x2000
+
/**
* ieee80211_get_qos_ctl - get pointer to qos control bytes
* @hdr: the frame
*
* The qos ctrl bytes come after the frame_control, duration, seq_num
- * and 3 or 4 addresses of length ETH_ALEN.
- * 3 addr: 2 + 2 + 2 + 3*6 = 24
- * 4 addr: 2 + 2 + 2 + 4*6 = 30
+ * and 3 or 4 addresses of length ETH_ALEN. Checks frame_control to choose
+ * between struct ieee80211_qos_hdr_4addr and struct ieee80211_qos_hdr.
*/
static inline u8 *ieee80211_get_qos_ctl(struct ieee80211_hdr *hdr)
{
- if (ieee80211_has_a4(hdr->frame_control))
- return (u8 *)hdr + 30;
+ union {
+ struct ieee80211_qos_hdr addr3;
+ struct ieee80211_qos_hdr_4addr addr4;
+ } *qos;
+
+ qos = (void *)hdr;
+ if (ieee80211_has_a4(qos->addr3.frame_control))
+ return (u8 *)&qos->addr4.qos_ctrl;
else
- return (u8 *)hdr + 24;
+ return (u8 *)&qos->addr3.qos_ctrl;
+}
+
+/**
+ * ieee80211_get_tid - get qos TID
+ * @hdr: the frame
+ */
+static inline u8 ieee80211_get_tid(struct ieee80211_hdr *hdr)
+{
+ u8 *qc = ieee80211_get_qos_ctl(hdr);
+
+ return qc[0] & IEEE80211_QOS_CTL_TID_MASK;
}
/**
@@ -2538,6 +4119,44 @@ static inline u8 *ieee80211_get_DA(struct ieee80211_hdr *hdr)
}
/**
+ * ieee80211_is_bufferable_mmpdu - check if frame is bufferable MMPDU
+ * @skb: the skb to check, starting with the 802.11 header
+ */
+static inline bool ieee80211_is_bufferable_mmpdu(struct sk_buff *skb)
+{
+ struct ieee80211_mgmt *mgmt = (void *)skb->data;
+ __le16 fc = mgmt->frame_control;
+
+ /*
+ * IEEE 802.11 REVme D2.0 definition of bufferable MMPDU;
+ * note that this ignores the IBSS special case.
+ */
+ if (!ieee80211_is_mgmt(fc))
+ return false;
+
+ if (ieee80211_is_disassoc(fc) || ieee80211_is_deauth(fc))
+ return true;
+
+ if (!ieee80211_is_action(fc))
+ return false;
+
+ if (skb->len < offsetofend(typeof(*mgmt), u.action.u.ftm.action_code))
+ return true;
+
+ /* action frame - additionally check for non-bufferable FTM */
+
+ if (mgmt->u.action.category != WLAN_CATEGORY_PUBLIC &&
+ mgmt->u.action.category != WLAN_CATEGORY_PROTECTED_DUAL_OF_ACTION)
+ return true;
+
+ if (mgmt->u.action.u.ftm.action_code == WLAN_PUB_ACTION_FTM_REQUEST ||
+ mgmt->u.action.u.ftm.action_code == WLAN_PUB_ACTION_FTM_RESPONSE)
+ return false;
+
+ return true;
+}
+
+/**
* _ieee80211_is_robust_mgmt_frame - check if frame is a robust management frame
* @hdr: the frame (buffer must include at least the first octet of payload)
*/
@@ -2566,6 +4185,7 @@ static inline bool _ieee80211_is_robust_mgmt_frame(struct ieee80211_hdr *hdr)
*category != WLAN_CATEGORY_SELF_PROTECTED &&
*category != WLAN_CATEGORY_UNPROT_DMG &&
*category != WLAN_CATEGORY_VHT &&
+ *category != WLAN_CATEGORY_S1G &&
*category != WLAN_CATEGORY_VENDOR_SPECIFIC;
}
@@ -2696,6 +4316,18 @@ static inline int ieee80211_get_tdls_action(struct sk_buff *skb, u32 hdr_size)
#define TU_TO_JIFFIES(x) (usecs_to_jiffies((x) * 1024))
#define TU_TO_EXP_TIME(x) (jiffies + TU_TO_JIFFIES(x))
+/* convert frequencies */
+#define MHZ_TO_KHZ(freq) ((freq) * 1000)
+#define KHZ_TO_MHZ(freq) ((freq) / 1000)
+#define PR_KHZ(f) KHZ_TO_MHZ(f), f % 1000
+#define KHZ_F "%d.%03d"
+
+/* convert powers */
+#define DBI_TO_MBI(gain) ((gain) * 100)
+#define MBI_TO_DBI(gain) ((gain) / 100)
+#define DBM_TO_MBM(gain) ((gain) * 100)
+#define MBM_TO_DBM(gain) ((gain) / 100)
+
/**
* ieee80211_action_contains_tpc - checks if the frame contains TPC element
* @skb: the skb containing the frame, length will be checked
@@ -2743,4 +4375,396 @@ static inline bool ieee80211_action_contains_tpc(struct sk_buff *skb)
return true;
}
+static inline bool ieee80211_is_timing_measurement(struct sk_buff *skb)
+{
+ struct ieee80211_mgmt *mgmt = (void *)skb->data;
+
+ if (skb->len < IEEE80211_MIN_ACTION_SIZE)
+ return false;
+
+ if (!ieee80211_is_action(mgmt->frame_control))
+ return false;
+
+ if (mgmt->u.action.category == WLAN_CATEGORY_WNM_UNPROTECTED &&
+ mgmt->u.action.u.wnm_timing_msr.action_code ==
+ WLAN_UNPROTECTED_WNM_ACTION_TIMING_MEASUREMENT_RESPONSE &&
+ skb->len >= offsetofend(typeof(*mgmt), u.action.u.wnm_timing_msr))
+ return true;
+
+ return false;
+}
+
+static inline bool ieee80211_is_ftm(struct sk_buff *skb)
+{
+ struct ieee80211_mgmt *mgmt = (void *)skb->data;
+
+ if (!ieee80211_is_public_action((void *)mgmt, skb->len))
+ return false;
+
+ if (mgmt->u.action.u.ftm.action_code ==
+ WLAN_PUB_ACTION_FTM_RESPONSE &&
+ skb->len >= offsetofend(typeof(*mgmt), u.action.u.ftm))
+ return true;
+
+ return false;
+}
+
+struct element {
+ u8 id;
+ u8 datalen;
+ u8 data[];
+} __packed;
+
+/* element iteration helpers */
+#define for_each_element(_elem, _data, _datalen) \
+ for (_elem = (const struct element *)(_data); \
+ (const u8 *)(_data) + (_datalen) - (const u8 *)_elem >= \
+ (int)sizeof(*_elem) && \
+ (const u8 *)(_data) + (_datalen) - (const u8 *)_elem >= \
+ (int)sizeof(*_elem) + _elem->datalen; \
+ _elem = (const struct element *)(_elem->data + _elem->datalen))
+
+#define for_each_element_id(element, _id, data, datalen) \
+ for_each_element(element, data, datalen) \
+ if (element->id == (_id))
+
+#define for_each_element_extid(element, extid, _data, _datalen) \
+ for_each_element(element, _data, _datalen) \
+ if (element->id == WLAN_EID_EXTENSION && \
+ element->datalen > 0 && \
+ element->data[0] == (extid))
+
+#define for_each_subelement(sub, element) \
+ for_each_element(sub, (element)->data, (element)->datalen)
+
+#define for_each_subelement_id(sub, id, element) \
+ for_each_element_id(sub, id, (element)->data, (element)->datalen)
+
+#define for_each_subelement_extid(sub, extid, element) \
+ for_each_element_extid(sub, extid, (element)->data, (element)->datalen)
+
+/**
+ * for_each_element_completed - determine if element parsing consumed all data
+ * @element: element pointer after for_each_element() or friends
+ * @data: same data pointer as passed to for_each_element() or friends
+ * @datalen: same data length as passed to for_each_element() or friends
+ *
+ * This function returns %true if all the data was parsed or considered
+ * while walking the elements. Only use this if your for_each_element()
+ * loop cannot be broken out of, otherwise it always returns %false.
+ *
+ * If some data was malformed, this returns %false since the last parsed
+ * element will not fill the whole remaining data.
+ */
+static inline bool for_each_element_completed(const struct element *element,
+ const void *data, size_t datalen)
+{
+ return (const u8 *)element == (const u8 *)data + datalen;
+}
+
+/**
+ * RSNX Capabilities:
+ * bits 0-3: Field length (n-1)
+ */
+#define WLAN_RSNX_CAPA_PROTECTED_TWT BIT(4)
+#define WLAN_RSNX_CAPA_SAE_H2E BIT(5)
+
+/*
+ * reduced neighbor report, based on Draft P802.11ax_D6.1,
+ * section 9.4.2.170 and accepted contributions.
+ */
+#define IEEE80211_AP_INFO_TBTT_HDR_TYPE 0x03
+#define IEEE80211_AP_INFO_TBTT_HDR_FILTERED 0x04
+#define IEEE80211_AP_INFO_TBTT_HDR_COLOC 0x08
+#define IEEE80211_AP_INFO_TBTT_HDR_COUNT 0xF0
+#define IEEE80211_TBTT_INFO_OFFSET_BSSID_BSS_PARAM 9
+#define IEEE80211_TBTT_INFO_OFFSET_BSSID_SSSID_BSS_PARAM 13
+
+#define IEEE80211_RNR_TBTT_PARAMS_OCT_RECOMMENDED 0x01
+#define IEEE80211_RNR_TBTT_PARAMS_SAME_SSID 0x02
+#define IEEE80211_RNR_TBTT_PARAMS_MULTI_BSSID 0x04
+#define IEEE80211_RNR_TBTT_PARAMS_TRANSMITTED_BSSID 0x08
+#define IEEE80211_RNR_TBTT_PARAMS_COLOC_ESS 0x10
+#define IEEE80211_RNR_TBTT_PARAMS_PROBE_ACTIVE 0x20
+#define IEEE80211_RNR_TBTT_PARAMS_COLOC_AP 0x40
+
+struct ieee80211_neighbor_ap_info {
+ u8 tbtt_info_hdr;
+ u8 tbtt_info_len;
+ u8 op_class;
+ u8 channel;
+} __packed;
+
+enum ieee80211_range_params_max_total_ltf {
+ IEEE80211_RANGE_PARAMS_MAX_TOTAL_LTF_4 = 0,
+ IEEE80211_RANGE_PARAMS_MAX_TOTAL_LTF_8,
+ IEEE80211_RANGE_PARAMS_MAX_TOTAL_LTF_16,
+ IEEE80211_RANGE_PARAMS_MAX_TOTAL_LTF_UNSPECIFIED,
+};
+
+/* multi-link device */
+#define IEEE80211_MLD_MAX_NUM_LINKS 15
+
+#define IEEE80211_ML_CONTROL_TYPE 0x0007
+#define IEEE80211_ML_CONTROL_TYPE_BASIC 0
+#define IEEE80211_ML_CONTROL_TYPE_PREQ 1
+#define IEEE80211_ML_CONTROL_TYPE_RECONF 2
+#define IEEE80211_ML_CONTROL_TYPE_TDLS 3
+#define IEEE80211_ML_CONTROL_TYPE_PRIO_ACCESS 4
+#define IEEE80211_ML_CONTROL_PRESENCE_MASK 0xfff0
+
+struct ieee80211_multi_link_elem {
+ __le16 control;
+ u8 variable[];
+} __packed;
+
+#define IEEE80211_MLC_BASIC_PRES_LINK_ID 0x0010
+#define IEEE80211_MLC_BASIC_PRES_BSS_PARAM_CH_CNT 0x0020
+#define IEEE80211_MLC_BASIC_PRES_MED_SYNC_DELAY 0x0040
+#define IEEE80211_MLC_BASIC_PRES_EML_CAPA 0x0080
+#define IEEE80211_MLC_BASIC_PRES_MLD_CAPA_OP 0x0100
+#define IEEE80211_MLC_BASIC_PRES_MLD_ID 0x0200
+
+#define IEEE80211_MED_SYNC_DELAY_DURATION 0x00ff
+#define IEEE80211_MED_SYNC_DELAY_SYNC_OFDM_ED_THRESH 0x0f00
+#define IEEE80211_MED_SYNC_DELAY_SYNC_MAX_NUM_TXOPS 0xf000
+
+#define IEEE80211_EML_CAP_EMLSR_SUPP 0x0001
+#define IEEE80211_EML_CAP_EMLSR_PADDING_DELAY 0x000e
+#define IEEE80211_EML_CAP_EMLSR_PADDING_DELAY_0US 0
+#define IEEE80211_EML_CAP_EMLSR_PADDING_DELAY_32US 1
+#define IEEE80211_EML_CAP_EMLSR_PADDING_DELAY_64US 2
+#define IEEE80211_EML_CAP_EMLSR_PADDING_DELAY_128US 3
+#define IEEE80211_EML_CAP_EMLSR_PADDING_DELAY_256US 4
+#define IEEE80211_EML_CAP_EMLSR_TRANSITION_DELAY 0x0070
+#define IEEE80211_EML_CAP_EMLSR_TRANSITION_DELAY_0US 0
+#define IEEE80211_EML_CAP_EMLSR_TRANSITION_DELAY_16US 1
+#define IEEE80211_EML_CAP_EMLSR_TRANSITION_DELAY_32US 2
+#define IEEE80211_EML_CAP_EMLSR_TRANSITION_DELAY_64US 3
+#define IEEE80211_EML_CAP_EMLSR_TRANSITION_DELAY_128US 4
+#define IEEE80211_EML_CAP_EMLSR_TRANSITION_DELAY_256US 5
+#define IEEE80211_EML_CAP_EMLMR_SUPPORT 0x0080
+#define IEEE80211_EML_CAP_EMLMR_DELAY 0x0700
+#define IEEE80211_EML_CAP_EMLMR_DELAY_0US 0
+#define IEEE80211_EML_CAP_EMLMR_DELAY_32US 1
+#define IEEE80211_EML_CAP_EMLMR_DELAY_64US 2
+#define IEEE80211_EML_CAP_EMLMR_DELAY_128US 3
+#define IEEE80211_EML_CAP_EMLMR_DELAY_256US 4
+#define IEEE80211_EML_CAP_TRANSITION_TIMEOUT 0x7800
+#define IEEE80211_EML_CAP_TRANSITION_TIMEOUT_0 0
+#define IEEE80211_EML_CAP_TRANSITION_TIMEOUT_128US 1
+#define IEEE80211_EML_CAP_TRANSITION_TIMEOUT_256US 2
+#define IEEE80211_EML_CAP_TRANSITION_TIMEOUT_512US 3
+#define IEEE80211_EML_CAP_TRANSITION_TIMEOUT_1TU 4
+#define IEEE80211_EML_CAP_TRANSITION_TIMEOUT_2TU 5
+#define IEEE80211_EML_CAP_TRANSITION_TIMEOUT_4TU 6
+#define IEEE80211_EML_CAP_TRANSITION_TIMEOUT_8TU 7
+#define IEEE80211_EML_CAP_TRANSITION_TIMEOUT_16TU 8
+#define IEEE80211_EML_CAP_TRANSITION_TIMEOUT_32TU 9
+#define IEEE80211_EML_CAP_TRANSITION_TIMEOUT_64TU 10
+#define IEEE80211_EML_CAP_TRANSITION_TIMEOUT_128TU 11
+
+#define IEEE80211_MLD_CAP_OP_MAX_SIMUL_LINKS 0x000f
+#define IEEE80211_MLD_CAP_OP_SRS_SUPPORT 0x0010
+#define IEEE80211_MLD_CAP_OP_TID_TO_LINK_MAP_NEG_SUPP 0x0060
+#define IEEE80211_MLD_CAP_OP_FREQ_SEP_TYPE_IND 0x0f80
+#define IEEE80211_MLD_CAP_OP_AAR_SUPPORT 0x1000
+
+struct ieee80211_mle_basic_common_info {
+ u8 len;
+ u8 mld_mac_addr[ETH_ALEN];
+ u8 variable[];
+} __packed;
+
+#define IEEE80211_MLC_PREQ_PRES_MLD_ID 0x0010
+
+struct ieee80211_mle_preq_common_info {
+ u8 len;
+ u8 variable[];
+} __packed;
+
+#define IEEE80211_MLC_RECONF_PRES_MLD_MAC_ADDR 0x0010
+
+/* no fixed fields in RECONF */
+
+struct ieee80211_mle_tdls_common_info {
+ u8 len;
+ u8 ap_mld_mac_addr[ETH_ALEN];
+} __packed;
+
+#define IEEE80211_MLC_PRIO_ACCESS_PRES_AP_MLD_MAC_ADDR 0x0010
+
+/* no fixed fields in PRIO_ACCESS */
+
+/**
+ * ieee80211_mle_common_size - check multi-link element common size
+ * @data: multi-link element, must already be checked for size using
+ * ieee80211_mle_size_ok()
+ */
+static inline u8 ieee80211_mle_common_size(const u8 *data)
+{
+ const struct ieee80211_multi_link_elem *mle = (const void *)data;
+ u16 control = le16_to_cpu(mle->control);
+ u8 common = 0;
+
+ switch (u16_get_bits(control, IEEE80211_ML_CONTROL_TYPE)) {
+ case IEEE80211_ML_CONTROL_TYPE_BASIC:
+ case IEEE80211_ML_CONTROL_TYPE_PREQ:
+ case IEEE80211_ML_CONTROL_TYPE_TDLS:
+ /*
+ * The length is the first octet pointed by mle->variable so no
+ * need to add anything
+ */
+ break;
+ case IEEE80211_ML_CONTROL_TYPE_RECONF:
+ if (control & IEEE80211_MLC_RECONF_PRES_MLD_MAC_ADDR)
+ common += ETH_ALEN;
+ return common;
+ case IEEE80211_ML_CONTROL_TYPE_PRIO_ACCESS:
+ if (control & IEEE80211_MLC_PRIO_ACCESS_PRES_AP_MLD_MAC_ADDR)
+ common += ETH_ALEN;
+ return common;
+ default:
+ WARN_ON(1);
+ return 0;
+ }
+
+ return sizeof(*mle) + common + mle->variable[0];
+}
+
+/**
+ * ieee80211_mle_size_ok - validate multi-link element size
+ * @data: pointer to the element data
+ * @len: length of the containing element
+ */
+static inline bool ieee80211_mle_size_ok(const u8 *data, size_t len)
+{
+ const struct ieee80211_multi_link_elem *mle = (const void *)data;
+ u8 fixed = sizeof(*mle);
+ u8 common = 0;
+ bool check_common_len = false;
+ u16 control;
+
+ if (len < fixed)
+ return false;
+
+ control = le16_to_cpu(mle->control);
+
+ switch (u16_get_bits(control, IEEE80211_ML_CONTROL_TYPE)) {
+ case IEEE80211_ML_CONTROL_TYPE_BASIC:
+ common += sizeof(struct ieee80211_mle_basic_common_info);
+ check_common_len = true;
+ if (control & IEEE80211_MLC_BASIC_PRES_LINK_ID)
+ common += 1;
+ if (control & IEEE80211_MLC_BASIC_PRES_BSS_PARAM_CH_CNT)
+ common += 1;
+ if (control & IEEE80211_MLC_BASIC_PRES_MED_SYNC_DELAY)
+ common += 2;
+ if (control & IEEE80211_MLC_BASIC_PRES_EML_CAPA)
+ common += 2;
+ if (control & IEEE80211_MLC_BASIC_PRES_MLD_CAPA_OP)
+ common += 2;
+ if (control & IEEE80211_MLC_BASIC_PRES_MLD_ID)
+ common += 1;
+ break;
+ case IEEE80211_ML_CONTROL_TYPE_PREQ:
+ common += sizeof(struct ieee80211_mle_preq_common_info);
+ if (control & IEEE80211_MLC_PREQ_PRES_MLD_ID)
+ common += 1;
+ check_common_len = true;
+ break;
+ case IEEE80211_ML_CONTROL_TYPE_RECONF:
+ if (control & IEEE80211_MLC_RECONF_PRES_MLD_MAC_ADDR)
+ common += ETH_ALEN;
+ break;
+ case IEEE80211_ML_CONTROL_TYPE_TDLS:
+ common += sizeof(struct ieee80211_mle_tdls_common_info);
+ check_common_len = true;
+ break;
+ case IEEE80211_ML_CONTROL_TYPE_PRIO_ACCESS:
+ if (control & IEEE80211_MLC_PRIO_ACCESS_PRES_AP_MLD_MAC_ADDR)
+ common += ETH_ALEN;
+ break;
+ default:
+ /* we don't know this type */
+ return true;
+ }
+
+ if (len < fixed + common)
+ return false;
+
+ if (!check_common_len)
+ return true;
+
+ /* if present, common length is the first octet there */
+ return mle->variable[0] >= common;
+}
+
+enum ieee80211_mle_subelems {
+ IEEE80211_MLE_SUBELEM_PER_STA_PROFILE = 0,
+ IEEE80211_MLE_SUBELEM_FRAGMENT = 254,
+};
+
+#define IEEE80211_MLE_STA_CONTROL_LINK_ID 0x000f
+#define IEEE80211_MLE_STA_CONTROL_COMPLETE_PROFILE 0x0010
+#define IEEE80211_MLE_STA_CONTROL_STA_MAC_ADDR_PRESENT 0x0020
+#define IEEE80211_MLE_STA_CONTROL_BEACON_INT_PRESENT 0x0040
+#define IEEE80211_MLE_STA_CONTROL_TSF_OFFS_PRESENT 0x0080
+#define IEEE80211_MLE_STA_CONTROL_DTIM_INFO_PRESENT 0x0100
+#define IEEE80211_MLE_STA_CONTROL_NSTR_LINK_PAIR_PRESENT 0x0200
+#define IEEE80211_MLE_STA_CONTROL_NSTR_BITMAP_SIZE 0x0400
+#define IEEE80211_MLE_STA_CONTROL_BSS_PARAM_CHANGE_CNT_PRESENT 0x0800
+
+struct ieee80211_mle_per_sta_profile {
+ __le16 control;
+ u8 sta_info_len;
+ u8 variable[];
+} __packed;
+
+/**
+ * ieee80211_mle_sta_prof_size_ok - validate multi-link element sta profile size
+ * @data: pointer to the sub element data
+ * @len: length of the containing sub element
+ */
+static inline bool ieee80211_mle_sta_prof_size_ok(const u8 *data, size_t len)
+{
+ const struct ieee80211_mle_per_sta_profile *prof = (const void *)data;
+ u16 control;
+ u8 fixed = sizeof(*prof);
+ u8 info_len = 1;
+
+ if (len < fixed)
+ return false;
+
+ control = le16_to_cpu(prof->control);
+
+ if (control & IEEE80211_MLE_STA_CONTROL_STA_MAC_ADDR_PRESENT)
+ info_len += 6;
+ if (control & IEEE80211_MLE_STA_CONTROL_BEACON_INT_PRESENT)
+ info_len += 2;
+ if (control & IEEE80211_MLE_STA_CONTROL_TSF_OFFS_PRESENT)
+ info_len += 8;
+ if (control & IEEE80211_MLE_STA_CONTROL_DTIM_INFO_PRESENT)
+ info_len += 2;
+ if (control & IEEE80211_MLE_STA_CONTROL_BSS_PARAM_CHANGE_CNT_PRESENT)
+ info_len += 1;
+
+ if (control & IEEE80211_MLE_STA_CONTROL_COMPLETE_PROFILE &&
+ control & IEEE80211_MLE_STA_CONTROL_NSTR_BITMAP_SIZE) {
+ if (control & IEEE80211_MLE_STA_CONTROL_NSTR_BITMAP_SIZE)
+ info_len += 2;
+ else
+ info_len += 1;
+ }
+
+ return prof->sta_info_len >= info_len &&
+ fixed + prof->sta_info_len <= len;
+}
+
+#define for_each_mle_subelement(_elem, _data, _len) \
+ if (ieee80211_mle_size_ok(_data, _len)) \
+ for_each_element(_elem, \
+ _data + ieee80211_mle_common_size(_data),\
+ _len - ieee80211_mle_common_size(_data))
+
#endif /* LINUX_IEEE80211_H */
diff --git a/include/linux/ieee802154.h b/include/linux/ieee802154.h
index ddb890174a0e..140f61ec0f5f 100644
--- a/include/linux/ieee802154.h
+++ b/include/linux/ieee802154.h
@@ -1,17 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* IEEE802.15.4-2003 specification
*
* Copyright (C) 2007, 2008 Siemens AG
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
* Written by:
* Pavel Smolenskiy <pavel.smolenskiy@gmail.com>
* Maxim Gorbachyov <maxim.gorbachev@siemens.com>
@@ -52,6 +44,13 @@
#define IEEE802154_SHORT_ADDR_LEN 2
#define IEEE802154_PAN_ID_LEN 2
+/* Duration in superframe order */
+#define IEEE802154_MAX_SCAN_DURATION 14
+#define IEEE802154_ACTIVE_SCAN_DURATION 15
+/* Superframe duration in slots */
+#define IEEE802154_SUPERFRAME_PERIOD 16
+/* Various periods expressed in symbols */
+#define IEEE802154_SLOT_PERIOD 60
#define IEEE802154_LIFS_PERIOD 40
#define IEEE802154_SIFS_PERIOD 12
#define IEEE802154_MAX_SIFS_FRAME_SIZE 18
@@ -142,18 +141,46 @@ enum {
* a successful transmission.
*/
IEEE802154_SUCCESS = 0x0,
-
+ /* The requested operation failed. */
+ IEEE802154_MAC_ERROR = 0x1,
+ /* The requested operation has been cancelled. */
+ IEEE802154_CANCELLED = 0x2,
+ /*
+ * Device is ready to poll the coordinator for data in a non beacon
+ * enabled PAN.
+ */
+ IEEE802154_READY_FOR_POLL = 0x3,
+ /* Wrong frame counter. */
+ IEEE802154_COUNTER_ERROR = 0xdb,
+ /*
+ * The frame does not conforms to the incoming key usage policy checking
+ * procedure.
+ */
+ IEEE802154_IMPROPER_KEY_TYPE = 0xdc,
+ /*
+ * The frame does not conforms to the incoming security level usage
+ * policy checking procedure.
+ */
+ IEEE802154_IMPROPER_SECURITY_LEVEL = 0xdd,
+ /* Secured frame received with an empty Frame Version field. */
+ IEEE802154_UNSUPPORTED_LEGACY = 0xde,
+ /*
+ * A secured frame is received or must be sent but security is not
+ * enabled in the device. Or, the Auxiliary Security Header has security
+ * level of zero in it.
+ */
+ IEEE802154_UNSUPPORTED_SECURITY = 0xdf,
/* The beacon was lost following a synchronization request. */
- IEEE802154_BEACON_LOSS = 0xe0,
+ IEEE802154_BEACON_LOST = 0xe0,
/*
* A transmission could not take place due to activity on the
* channel, i.e., the CSMA-CA mechanism has failed.
*/
- IEEE802154_CHNL_ACCESS_FAIL = 0xe1,
+ IEEE802154_CHANNEL_ACCESS_FAILURE = 0xe1,
/* The GTS request has been denied by the PAN coordinator. */
- IEEE802154_DENINED = 0xe2,
+ IEEE802154_DENIED = 0xe2,
/* The attempt to disable the transceiver has failed. */
- IEEE802154_DISABLE_TRX_FAIL = 0xe3,
+ IEEE802154_DISABLE_TRX_FAILURE = 0xe3,
/*
* The received frame induces a failed security check according to
* the security suite.
@@ -193,9 +220,9 @@ enum {
* A PAN identifier conflict has been detected and communicated to the
* PAN coordinator.
*/
- IEEE802154_PANID_CONFLICT = 0xee,
+ IEEE802154_PAN_ID_CONFLICT = 0xee,
/* A coordinator realignment command has been received. */
- IEEE802154_REALIGMENT = 0xef,
+ IEEE802154_REALIGNMENT = 0xef,
/* The transaction has expired and its information discarded. */
IEEE802154_TRANSACTION_EXPIRED = 0xf0,
/* There is no capacity to store the transaction. */
@@ -211,12 +238,73 @@ enum {
* A SET/GET request was issued with the identifier of a PIB attribute
* that is not supported.
*/
- IEEE802154_UNSUPPORTED_ATTR = 0xf4,
+ IEEE802154_UNSUPPORTED_ATTRIBUTE = 0xf4,
+ /* Missing source or destination address or address mode. */
+ IEEE802154_INVALID_ADDRESS = 0xf5,
+ /*
+ * MLME asked to turn the receiver on, but the on time duration is too
+ * big compared to the macBeaconOrder.
+ */
+ IEEE802154_ON_TIME_TOO_LONG = 0xf6,
+ /*
+ * MLME asaked to turn the receiver on, but the request was delayed for
+ * too long before getting processed.
+ */
+ IEEE802154_PAST_TIME = 0xf7,
+ /*
+ * The StartTime parameter is nonzero, and the MLME is not currently
+ * tracking the beacon of the coordinator through which it is
+ * associated.
+ */
+ IEEE802154_TRACKING_OFF = 0xf8,
+ /*
+ * The index inside the hierarchical values in PIBAttribute is out of
+ * range.
+ */
+ IEEE802154_INVALID_INDEX = 0xf9,
+ /*
+ * The number of PAN descriptors discovered during a scan has been
+ * reached.
+ */
+ IEEE802154_LIMIT_REACHED = 0xfa,
+ /*
+ * The PIBAttribute parameter specifies an attribute that is a read-only
+ * attribute.
+ */
+ IEEE802154_READ_ONLY = 0xfb,
/*
* A request to perform a scan operation failed because the MLME was
* in the process of performing a previously initiated scan operation.
*/
IEEE802154_SCAN_IN_PROGRESS = 0xfc,
+ /* The outgoing superframe overlaps the incoming superframe. */
+ IEEE802154_SUPERFRAME_OVERLAP = 0xfd,
+ /* Any other error situation. */
+ IEEE802154_SYSTEM_ERROR = 0xff,
+};
+
+/**
+ * enum ieee802154_filtering_level - Filtering levels applicable to a PHY
+ *
+ * @IEEE802154_FILTERING_NONE: No filtering at all, what is received is
+ * forwarded to the softMAC
+ * @IEEE802154_FILTERING_1_FCS: First filtering level, frames with an invalid
+ * FCS should be dropped
+ * @IEEE802154_FILTERING_2_PROMISCUOUS: Second filtering level, promiscuous
+ * mode as described in the spec, identical in terms of filtering to the
+ * level one on PHY side, but at the MAC level the frame should be
+ * forwarded to the upper layer directly
+ * @IEEE802154_FILTERING_3_SCAN: Third filtering level, scan related, where
+ * only beacons must be processed, all remaining traffic gets dropped
+ * @IEEE802154_FILTERING_4_FRAME_FIELDS: Fourth filtering level actually
+ * enforcing the validity of the content of the frame with various checks
+ */
+enum ieee802154_filtering_level {
+ IEEE802154_FILTERING_NONE,
+ IEEE802154_FILTERING_1_FCS,
+ IEEE802154_FILTERING_2_PROMISCUOUS,
+ IEEE802154_FILTERING_3_SCAN,
+ IEEE802154_FILTERING_4_FRAME_FIELDS,
};
/* frame control handling */
diff --git a/include/linux/if_arp.h b/include/linux/if_arp.h
index 3355efc89781..1ed52441972f 100644
--- a/include/linux/if_arp.h
+++ b/include/linux/if_arp.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* INET An implementation of the TCP/IP protocol suite for the LINUX
* operating system. INET is implemented using the BSD Socket
@@ -14,11 +15,6 @@
* Florian La Roche,
* Jonathan Layes <layes@loran.com>
* Arnaldo Carvalho de Melo <acme@conectiva.com.br> ARPHRD_HWX25
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
*/
#ifndef _LINUX_IF_ARP_H
#define _LINUX_IF_ARP_H
@@ -31,7 +27,7 @@ static inline struct arphdr *arp_hdr(const struct sk_buff *skb)
return (struct arphdr *)skb_network_header(skb);
}
-static inline int arp_hdr_len(struct net_device *dev)
+static inline unsigned int arp_hdr_len(const struct net_device *dev)
{
switch (dev->type) {
#if IS_ENABLED(CONFIG_FIREWIRE_NET)
@@ -52,8 +48,11 @@ static inline bool dev_is_mac_header_xmit(const struct net_device *dev)
case ARPHRD_TUNNEL6:
case ARPHRD_SIT:
case ARPHRD_IPGRE:
+ case ARPHRD_IP6GRE:
case ARPHRD_VOID:
case ARPHRD_NONE:
+ case ARPHRD_RAWIP:
+ case ARPHRD_PIMREG:
return false;
default:
return true;
diff --git a/include/linux/if_bridge.h b/include/linux/if_bridge.h
index 3cd18ac0697f..3ff96ae31bf6 100644
--- a/include/linux/if_bridge.h
+++ b/include/linux/if_bridge.h
@@ -1,13 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Linux ethernet bridge
*
* Authors:
* Lennert Buytenhek <buytenh@gnu.org>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
*/
#ifndef _LINUX_IF_BRIDGE_H
#define _LINUX_IF_BRIDGE_H
@@ -23,7 +19,14 @@ struct br_ip {
#if IS_ENABLED(CONFIG_IPV6)
struct in6_addr ip6;
#endif
- } u;
+ } src;
+ union {
+ __be32 ip4;
+#if IS_ENABLED(CONFIG_IPV6)
+ struct in6_addr ip6;
+#endif
+ unsigned char mac_addr[ETH_ALEN];
+ } dst;
__be16 proto;
__u16 vid;
};
@@ -49,20 +52,33 @@ struct br_ip_list {
#define BR_MULTICAST_TO_UNICAST BIT(12)
#define BR_VLAN_TUNNEL BIT(13)
#define BR_BCAST_FLOOD BIT(14)
+#define BR_NEIGH_SUPPRESS BIT(15)
+#define BR_ISOLATED BIT(16)
+#define BR_MRP_AWARE BIT(17)
+#define BR_MRP_LOST_CONT BIT(18)
+#define BR_MRP_LOST_IN_CONT BIT(19)
+#define BR_TX_FWD_OFFLOAD BIT(20)
+#define BR_PORT_LOCKED BIT(21)
+#define BR_PORT_MAB BIT(22)
+#define BR_NEIGH_VLAN_SUPPRESS BIT(23)
#define BR_DEFAULT_AGEING_TIME (300 * HZ)
-extern void brioctl_set(int (*ioctl_hook)(struct net *, unsigned int, void __user *));
-
-typedef int br_should_route_hook_t(struct sk_buff *skb);
-extern br_should_route_hook_t __rcu *br_should_route_hook;
+struct net_bridge;
+void brioctl_set(int (*hook)(struct net *net, struct net_bridge *br,
+ unsigned int cmd, struct ifreq *ifr,
+ void __user *uarg));
+int br_ioctl_call(struct net *net, struct net_bridge *br, unsigned int cmd,
+ struct ifreq *ifr, void __user *uarg);
#if IS_ENABLED(CONFIG_BRIDGE) && IS_ENABLED(CONFIG_BRIDGE_IGMP_SNOOPING)
int br_multicast_list_adjacent(struct net_device *dev,
struct list_head *br_ip_list);
bool br_multicast_has_querier_anywhere(struct net_device *dev, int proto);
bool br_multicast_has_querier_adjacent(struct net_device *dev, int proto);
+bool br_multicast_has_router_adjacent(struct net_device *dev, int proto);
bool br_multicast_enabled(const struct net_device *dev);
+bool br_multicast_router(const struct net_device *dev);
#else
static inline int br_multicast_list_adjacent(struct net_device *dev,
struct list_head *br_ip_list)
@@ -79,19 +95,121 @@ static inline bool br_multicast_has_querier_adjacent(struct net_device *dev,
{
return false;
}
+
+static inline bool br_multicast_has_router_adjacent(struct net_device *dev,
+ int proto)
+{
+ return true;
+}
+
static inline bool br_multicast_enabled(const struct net_device *dev)
{
return false;
}
+static inline bool br_multicast_router(const struct net_device *dev)
+{
+ return false;
+}
#endif
#if IS_ENABLED(CONFIG_BRIDGE) && IS_ENABLED(CONFIG_BRIDGE_VLAN_FILTERING)
bool br_vlan_enabled(const struct net_device *dev);
+int br_vlan_get_pvid(const struct net_device *dev, u16 *p_pvid);
+int br_vlan_get_pvid_rcu(const struct net_device *dev, u16 *p_pvid);
+int br_vlan_get_proto(const struct net_device *dev, u16 *p_proto);
+int br_vlan_get_info(const struct net_device *dev, u16 vid,
+ struct bridge_vlan_info *p_vinfo);
+int br_vlan_get_info_rcu(const struct net_device *dev, u16 vid,
+ struct bridge_vlan_info *p_vinfo);
+bool br_mst_enabled(const struct net_device *dev);
+int br_mst_get_info(const struct net_device *dev, u16 msti, unsigned long *vids);
+int br_mst_get_state(const struct net_device *dev, u16 msti, u8 *state);
#else
static inline bool br_vlan_enabled(const struct net_device *dev)
{
return false;
}
+
+static inline int br_vlan_get_pvid(const struct net_device *dev, u16 *p_pvid)
+{
+ return -EINVAL;
+}
+
+static inline int br_vlan_get_proto(const struct net_device *dev, u16 *p_proto)
+{
+ return -EINVAL;
+}
+
+static inline int br_vlan_get_pvid_rcu(const struct net_device *dev, u16 *p_pvid)
+{
+ return -EINVAL;
+}
+
+static inline int br_vlan_get_info(const struct net_device *dev, u16 vid,
+ struct bridge_vlan_info *p_vinfo)
+{
+ return -EINVAL;
+}
+
+static inline int br_vlan_get_info_rcu(const struct net_device *dev, u16 vid,
+ struct bridge_vlan_info *p_vinfo)
+{
+ return -EINVAL;
+}
+
+static inline bool br_mst_enabled(const struct net_device *dev)
+{
+ return false;
+}
+
+static inline int br_mst_get_info(const struct net_device *dev, u16 msti,
+ unsigned long *vids)
+{
+ return -EINVAL;
+}
+static inline int br_mst_get_state(const struct net_device *dev, u16 msti,
+ u8 *state)
+{
+ return -EINVAL;
+}
+#endif
+
+#if IS_ENABLED(CONFIG_BRIDGE)
+struct net_device *br_fdb_find_port(const struct net_device *br_dev,
+ const unsigned char *addr,
+ __u16 vid);
+void br_fdb_clear_offload(const struct net_device *dev, u16 vid);
+bool br_port_flag_is_set(const struct net_device *dev, unsigned long flag);
+u8 br_port_get_stp_state(const struct net_device *dev);
+clock_t br_get_ageing_time(const struct net_device *br_dev);
+#else
+static inline struct net_device *
+br_fdb_find_port(const struct net_device *br_dev,
+ const unsigned char *addr,
+ __u16 vid)
+{
+ return NULL;
+}
+
+static inline void br_fdb_clear_offload(const struct net_device *dev, u16 vid)
+{
+}
+
+static inline bool
+br_port_flag_is_set(const struct net_device *dev, unsigned long flag)
+{
+ return false;
+}
+
+static inline u8 br_port_get_stp_state(const struct net_device *dev)
+{
+ return BR_STATE_DISABLED;
+}
+
+static inline clock_t br_get_ageing_time(const struct net_device *br_dev)
+{
+ return 0;
+}
#endif
#endif
diff --git a/include/linux/if_eql.h b/include/linux/if_eql.h
index d984694c384d..07f9b660b741 100644
--- a/include/linux/if_eql.h
+++ b/include/linux/if_eql.h
@@ -21,11 +21,13 @@
#include <linux/timer.h>
#include <linux/spinlock.h>
+#include <net/net_trackers.h>
#include <uapi/linux/if_eql.h>
typedef struct slave {
struct list_head list;
struct net_device *dev;
+ netdevice_tracker dev_tracker;
long priority;
long priority_bps;
long priority_Bps;
diff --git a/include/linux/if_ether.h b/include/linux/if_ether.h
index 548fd535fd02..8a9792a6427a 100644
--- a/include/linux/if_ether.h
+++ b/include/linux/if_ether.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* INET An implementation of the TCP/IP protocol suite for the LINUX
* operating system. INET is implemented using the BSD Socket
@@ -11,11 +12,6 @@
* Donald Becker, <becker@super.org>
* Alan Cox, <alan@lxorguk.ukuu.org.uk>
* Steve Whitehouse, <gw7rrm@eeshack3.swan.ac.uk>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
*/
#ifndef _LINUX_IF_ETHER_H
#define _LINUX_IF_ETHER_H
@@ -28,6 +24,14 @@ static inline struct ethhdr *eth_hdr(const struct sk_buff *skb)
return (struct ethhdr *)skb_mac_header(skb);
}
+/* Prefer this version in TX path, instead of
+ * skb_reset_mac_header() + eth_hdr()
+ */
+static inline struct ethhdr *skb_eth_hdr(const struct sk_buff *skb)
+{
+ return (struct ethhdr *)skb->data;
+}
+
static inline struct ethhdr *inner_eth_hdr(const struct sk_buff *skb)
{
return (struct ethhdr *)skb_inner_mac_header(skb);
diff --git a/include/linux/if_fddi.h b/include/linux/if_fddi.h
index f5550b3eeeab..c796f452d646 100644
--- a/include/linux/if_fddi.h
+++ b/include/linux/if_fddi.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* INET An implementation of the TCP/IP protocol suite for the LINUX
* operating system. INET is implemented using the BSD Socket
@@ -15,11 +16,6 @@
* Alan Cox, <alan@lxorguk.ukuu.org.uk>
* Steve Whitehouse, <gw7rrm@eeshack3.swan.ac.uk>
* Peter De Schrijver, <stud11@cc4.kuleuven.ac.be>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
*/
#ifndef _LINUX_IF_FDDI_H
#define _LINUX_IF_FDDI_H
diff --git a/include/linux/if_frad.h b/include/linux/if_frad.h
deleted file mode 100644
index 46df7e565d6f..000000000000
--- a/include/linux/if_frad.h
+++ /dev/null
@@ -1,95 +0,0 @@
-/*
- * DLCI/FRAD Definitions for Frame Relay Access Devices. DLCI devices are
- * created for each DLCI associated with a FRAD. The FRAD driver
- * is not truly a network device, but the lower level device
- * handler. This allows other FRAD manufacturers to use the DLCI
- * code, including its RFC1490 encapsulation alongside the current
- * implementation for the Sangoma cards.
- *
- * Version: @(#)if_ifrad.h 0.15 31 Mar 96
- *
- * Author: Mike McLagan <mike.mclagan@linux.org>
- *
- * Changes:
- * 0.15 Mike McLagan changed structure defs (packed)
- * re-arranged flags
- * added DLCI_RET vars
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- */
-#ifndef _FRAD_H_
-#define _FRAD_H_
-
-#include <uapi/linux/if_frad.h>
-
-
-#if defined(CONFIG_DLCI) || defined(CONFIG_DLCI_MODULE)
-
-/* these are the fields of an RFC 1490 header */
-struct frhdr
-{
- unsigned char control;
-
- /* for IP packets, this can be the NLPID */
- unsigned char pad;
-
- unsigned char NLPID;
- unsigned char OUI[3];
- __be16 PID;
-
-#define IP_NLPID pad
-} __packed;
-
-/* see RFC 1490 for the definition of the following */
-#define FRAD_I_UI 0x03
-
-#define FRAD_P_PADDING 0x00
-#define FRAD_P_Q933 0x08
-#define FRAD_P_SNAP 0x80
-#define FRAD_P_CLNP 0x81
-#define FRAD_P_IP 0xCC
-
-struct dlci_local
-{
- struct net_device *master;
- struct net_device *slave;
- struct dlci_conf config;
- int configured;
- struct list_head list;
-
- /* callback function */
- void (*receive)(struct sk_buff *skb, struct net_device *);
-};
-
-struct frad_local
-{
- /* devices which this FRAD is slaved to */
- struct net_device *master[CONFIG_DLCI_MAX];
- short dlci[CONFIG_DLCI_MAX];
-
- struct frad_conf config;
- int configured; /* has this device been configured */
- int initialized; /* mem_start, port, irq set ? */
-
- /* callback functions */
- int (*activate)(struct net_device *, struct net_device *);
- int (*deactivate)(struct net_device *, struct net_device *);
- int (*assoc)(struct net_device *, struct net_device *);
- int (*deassoc)(struct net_device *, struct net_device *);
- int (*dlci_conf)(struct net_device *, struct net_device *, int get);
-
- /* fields that are used by the Sangoma SDLA cards */
- struct timer_list timer;
- int type; /* adapter type */
- int state; /* state of the S502/8 control latch */
- int buffer; /* current buffer for S508 firmware */
-};
-
-#endif /* CONFIG_DLCI || CONFIG_DLCI_MODULE */
-
-extern void dlci_ioctl_set(int (*hook)(unsigned int, void __user *));
-
-#endif
diff --git a/include/linux/if_hsr.h b/include/linux/if_hsr.h
new file mode 100644
index 000000000000..0404f5bf4f30
--- /dev/null
+++ b/include/linux/if_hsr.h
@@ -0,0 +1,47 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_IF_HSR_H_
+#define _LINUX_IF_HSR_H_
+
+#include <linux/types.h>
+
+struct net_device;
+
+/* used to differentiate various protocols */
+enum hsr_version {
+ HSR_V0 = 0,
+ HSR_V1,
+ PRP_V1,
+};
+
+/* HSR Tag.
+ * As defined in IEC-62439-3:2010, the HSR tag is really { ethertype = 0x88FB,
+ * path, LSDU_size, sequence Nr }. But we let eth_header() create { h_dest,
+ * h_source, h_proto = 0x88FB }, and add { path, LSDU_size, sequence Nr,
+ * encapsulated protocol } instead.
+ *
+ * Field names as defined in the IEC:2010 standard for HSR.
+ */
+struct hsr_tag {
+ __be16 path_and_LSDU_size;
+ __be16 sequence_nr;
+ __be16 encap_proto;
+} __packed;
+
+#define HSR_HLEN 6
+
+#if IS_ENABLED(CONFIG_HSR)
+extern bool is_hsr_master(struct net_device *dev);
+extern int hsr_get_version(struct net_device *dev, enum hsr_version *ver);
+#else
+static inline bool is_hsr_master(struct net_device *dev)
+{
+ return false;
+}
+static inline int hsr_get_version(struct net_device *dev,
+ enum hsr_version *ver)
+{
+ return -EINVAL;
+}
+#endif /* CONFIG_HSR */
+
+#endif /*_LINUX_IF_HSR_H_*/
diff --git a/include/linux/if_link.h b/include/linux/if_link.h
index 0b17c585b5cd..622658dfbf0a 100644
--- a/include/linux/if_link.h
+++ b/include/linux/if_link.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_IF_LINK_H
#define _LINUX_IF_LINK_H
@@ -12,6 +13,8 @@ struct ifla_vf_stats {
__u64 tx_bytes;
__u64 broadcast;
__u64 multicast;
+ __u64 rx_dropped;
+ __u64 tx_dropped;
};
struct ifla_vf_info {
diff --git a/include/linux/if_ltalk.h b/include/linux/if_ltalk.h
index 81e434c50790..4cc1c0b77870 100644
--- a/include/linux/if_ltalk.h
+++ b/include/linux/if_ltalk.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __LINUX_LTALK_H
#define __LINUX_LTALK_H
diff --git a/include/linux/if_macvlan.h b/include/linux/if_macvlan.h
index c9ec1343d187..523025106a64 100644
--- a/include/linux/if_macvlan.h
+++ b/include/linux/if_macvlan.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_IF_MACVLAN_H
#define _LINUX_IF_MACVLAN_H
@@ -10,13 +11,6 @@
#include <linux/u64_stats_sync.h>
struct macvlan_port;
-struct macvtap_queue;
-
-/*
- * Maximum times a macvtap device can be opened. This can be used to
- * configure the number of receive queue, e.g. for multiqueue virtio.
- */
-#define MAX_TAP_QUEUES 256
#define MACVLAN_MC_FILTER_BITS 8
#define MACVLAN_MC_FILTER_SZ (1 << MACVLAN_MC_FILTER_BITS)
@@ -27,7 +21,8 @@ struct macvlan_dev {
struct hlist_node hlist;
struct macvlan_port *port;
struct net_device *lowerdev;
- void *fwd_priv;
+ netdevice_tracker dev_tracker;
+ void *accel_priv;
struct vlan_pcpu_stats __percpu *pcpu_stats;
DECLARE_BITMAP(mc_filter, MACVLAN_MC_FILTER_SZ);
@@ -35,19 +30,11 @@ struct macvlan_dev {
netdev_features_t set_features;
enum macvlan_mode mode;
u16 flags;
- /* This array tracks active taps. */
- struct tap_queue __rcu *taps[MAX_TAP_QUEUES];
- /* This list tracks all taps (both enabled and disabled) */
- struct list_head queue_list;
- int numvtaps;
- int numqueues;
- netdev_features_t tap_features;
- int minor;
- int nest_level;
+ unsigned int macaddr_count;
+ u32 bc_queue_len_req;
#ifdef CONFIG_NET_POLL_CONTROLLER
struct netpoll *netpoll;
#endif
- unsigned int macaddr_count;
};
static inline void macvlan_count_rx(const struct macvlan_dev *vlan,
@@ -57,13 +44,14 @@ static inline void macvlan_count_rx(const struct macvlan_dev *vlan,
if (likely(success)) {
struct vlan_pcpu_stats *pcpu_stats;
- pcpu_stats = this_cpu_ptr(vlan->pcpu_stats);
+ pcpu_stats = get_cpu_ptr(vlan->pcpu_stats);
u64_stats_update_begin(&pcpu_stats->syncp);
- pcpu_stats->rx_packets++;
- pcpu_stats->rx_bytes += len;
+ u64_stats_inc(&pcpu_stats->rx_packets);
+ u64_stats_add(&pcpu_stats->rx_bytes, len);
if (multicast)
- pcpu_stats->rx_multicast++;
+ u64_stats_inc(&pcpu_stats->rx_multicast);
u64_stats_update_end(&pcpu_stats->syncp);
+ put_cpu_ptr(vlan->pcpu_stats);
} else {
this_cpu_inc(vlan->pcpu_stats->rx_errors);
}
@@ -72,11 +60,8 @@ static inline void macvlan_count_rx(const struct macvlan_dev *vlan,
extern void macvlan_common_setup(struct net_device *dev);
extern int macvlan_common_newlink(struct net *src_net, struct net_device *dev,
- struct nlattr *tb[], struct nlattr *data[]);
-
-extern void macvlan_count_rx(const struct macvlan_dev *vlan,
- unsigned int len, bool success,
- bool multicast);
+ struct nlattr *tb[], struct nlattr *data[],
+ struct netlink_ext_ack *extack);
extern void macvlan_dellink(struct net_device *dev, struct list_head *head);
@@ -99,4 +84,27 @@ macvlan_dev_real_dev(const struct net_device *dev)
}
#endif
+static inline void *macvlan_accel_priv(struct net_device *dev)
+{
+ struct macvlan_dev *macvlan = netdev_priv(dev);
+
+ return macvlan->accel_priv;
+}
+
+static inline bool macvlan_supports_dest_filter(struct net_device *dev)
+{
+ struct macvlan_dev *macvlan = netdev_priv(dev);
+
+ return macvlan->mode == MACVLAN_MODE_PRIVATE ||
+ macvlan->mode == MACVLAN_MODE_VEPA ||
+ macvlan->mode == MACVLAN_MODE_BRIDGE;
+}
+
+static inline int macvlan_release_l2fw_offload(struct net_device *dev)
+{
+ struct macvlan_dev *macvlan = netdev_priv(dev);
+
+ macvlan->accel_priv = NULL;
+ return dev_uc_add(macvlan->lowerdev, dev->dev_addr);
+}
#endif /* _LINUX_IF_MACVLAN_H */
diff --git a/include/linux/if_phonet.h b/include/linux/if_phonet.h
index bbcdb0a767d8..2d8486168ec5 100644
--- a/include/linux/if_phonet.h
+++ b/include/linux/if_phonet.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* File: if_phonet.h
*
@@ -10,5 +11,5 @@
#include <uapi/linux/if_phonet.h>
-extern struct header_ops phonet_header_ops;
+extern const struct header_ops phonet_header_ops;
#endif
diff --git a/include/linux/if_pppol2tp.h b/include/linux/if_pppol2tp.h
index 0fb71e532b2c..c87efd333faa 100644
--- a/include/linux/if_pppol2tp.h
+++ b/include/linux/if_pppol2tp.h
@@ -1,15 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/***************************************************************************
* Linux PPP over L2TP (PPPoL2TP) Socket Implementation (RFC 2661)
*
* This file supplies definitions required by the PPP over L2TP driver
* (l2tp_ppp.c). All version information wrt this file is located in l2tp_ppp.c
- *
- * License:
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- *
*/
#ifndef __LINUX_IF_PPPOL2TP_H
#define __LINUX_IF_PPPOL2TP_H
diff --git a/include/linux/if_pppox.h b/include/linux/if_pppox.h
index ba7a9b0c7c57..ff3beda1312c 100644
--- a/include/linux/if_pppox.h
+++ b/include/linux/if_pppox.h
@@ -1,16 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/***************************************************************************
* Linux PPP over X - Generic PPP transport layer sockets
* Linux PPP over Ethernet (PPPoE) Socket Implementation (RFC 2516)
*
* This file supplies definitions required by the PPP over Ethernet driver
* (pppox.c). All version information wrt this file is located in pppox.c
- *
- * License:
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- *
*/
#ifndef __LINUX_IF_PPPOX_H
#define __LINUX_IF_PPPOX_H
@@ -84,6 +78,9 @@ extern int register_pppox_proto(int proto_num, const struct pppox_proto *pp);
extern void unregister_pppox_proto(int proto_num);
extern void pppox_unbind_sock(struct sock *sk);/* delete ppp-channel binding */
extern int pppox_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg);
+extern int pppox_compat_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg);
+
+#define PPPOEIOCSFWD32 _IOW(0xB1 ,0, compat_size_t)
/* PPPoX socket states */
enum {
diff --git a/include/linux/if_rmnet.h b/include/linux/if_rmnet.h
new file mode 100644
index 000000000000..839d1e48b85e
--- /dev/null
+++ b/include/linux/if_rmnet.h
@@ -0,0 +1,74 @@
+/* SPDX-License-Identifier: GPL-2.0-only
+ * Copyright (c) 2013-2019, 2021 The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _LINUX_IF_RMNET_H_
+#define _LINUX_IF_RMNET_H_
+
+#include <linux/types.h>
+
+struct rmnet_map_header {
+ u8 flags; /* MAP_CMD_FLAG, MAP_PAD_LEN_MASK */
+ u8 mux_id;
+ __be16 pkt_len; /* Length of packet, including pad */
+} __aligned(1);
+
+/* rmnet_map_header flags field:
+ * PAD_LEN: number of pad bytes following packet data
+ * CMD: 1 = packet contains a MAP command; 0 = packet contains data
+ * NEXT_HEADER: 1 = packet contains V5 CSUM header 0 = no V5 CSUM header
+ */
+#define MAP_PAD_LEN_MASK GENMASK(5, 0)
+#define MAP_NEXT_HEADER_FLAG BIT(6)
+#define MAP_CMD_FLAG BIT(7)
+
+struct rmnet_map_dl_csum_trailer {
+ u8 reserved1;
+ u8 flags; /* MAP_CSUM_DL_VALID_FLAG */
+ __be16 csum_start_offset;
+ __be16 csum_length;
+ __sum16 csum_value;
+} __aligned(1);
+
+/* rmnet_map_dl_csum_trailer flags field:
+ * VALID: 1 = checksum and length valid; 0 = ignore them
+ */
+#define MAP_CSUM_DL_VALID_FLAG BIT(0)
+
+struct rmnet_map_ul_csum_header {
+ __be16 csum_start_offset;
+ __be16 csum_info; /* MAP_CSUM_UL_* */
+} __aligned(1);
+
+/* csum_info field:
+ * OFFSET: where (offset in bytes) to insert computed checksum
+ * UDP: 1 = UDP checksum (zero checkum means no checksum)
+ * ENABLED: 1 = checksum computation requested
+ */
+#define MAP_CSUM_UL_OFFSET_MASK GENMASK(13, 0)
+#define MAP_CSUM_UL_UDP_FLAG BIT(14)
+#define MAP_CSUM_UL_ENABLED_FLAG BIT(15)
+
+/* MAP CSUM headers */
+struct rmnet_map_v5_csum_header {
+ u8 header_info;
+ u8 csum_info;
+ __be16 reserved;
+} __aligned(1);
+
+/* v5 header_info field
+ * NEXT_HEADER: represents whether there is any next header
+ * HEADER_TYPE: represents the type of this header
+ *
+ * csum_info field
+ * CSUM_VALID_OR_REQ:
+ * 1 = for UL, checksum computation is requested.
+ * 1 = for DL, validated the checksum and has found it valid
+ */
+
+#define MAPV5_HDRINFO_NXT_HDR_FLAG BIT(0)
+#define MAPV5_HDRINFO_HDR_TYPE_FMASK GENMASK(7, 1)
+#define MAPV5_CSUMINFO_VALID_FLAG BIT(7)
+
+#define RMNET_MAP_HEADER_TYPE_CSUM_OFFLOAD 2
+#endif /* !(_LINUX_IF_RMNET_H_) */
diff --git a/include/linux/if_tap.h b/include/linux/if_tap.h
index 4837157da0dc..553552fa635c 100644
--- a/include/linux/if_tap.h
+++ b/include/linux/if_tap.h
@@ -1,27 +1,33 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_IF_TAP_H_
#define _LINUX_IF_TAP_H_
+#include <net/sock.h>
+#include <linux/skb_array.h>
+
+struct file;
+struct socket;
+
#if IS_ENABLED(CONFIG_TAP)
struct socket *tap_get_socket(struct file *);
-struct skb_array *tap_get_skb_array(struct file *file);
+struct ptr_ring *tap_get_ptr_ring(struct file *file);
#else
#include <linux/err.h>
#include <linux/errno.h>
-struct file;
-struct socket;
static inline struct socket *tap_get_socket(struct file *f)
{
return ERR_PTR(-EINVAL);
}
-static inline struct skb_array *tap_get_skb_array(struct file *f)
+static inline struct ptr_ring *tap_get_ptr_ring(struct file *f)
{
return ERR_PTR(-EINVAL);
}
#endif /* CONFIG_TAP */
-#include <net/sock.h>
-#include <linux/skb_array.h>
-
+/*
+ * Maximum times a tap device can be opened. This can be used to
+ * configure the number of receive queue, e.g. for multiqueue virtio.
+ */
#define MAX_TAP_QUEUES 256
struct tap_queue;
@@ -57,7 +63,6 @@ struct tap_dev {
struct tap_queue {
struct sock sk;
struct socket sock;
- struct socket_wq wq;
int vnet_hdr_sz;
struct tap_dev __rcu *tap;
struct file *file;
@@ -65,7 +70,7 @@ struct tap_queue {
u16 queue_index;
bool enabled;
struct list_head next;
- struct skb_array skb_array;
+ struct ptr_ring ring;
};
rx_handler_result_t tap_handle_frame(struct sk_buff **pskb);
@@ -73,8 +78,8 @@ void tap_del_queues(struct tap_dev *tap);
int tap_get_minor(dev_t major, struct tap_dev *tap);
void tap_free_minor(dev_t major, struct tap_dev *tap);
int tap_queue_resize(struct tap_dev *tap);
-int tap_create_cdev(struct cdev *tap_cdev,
- dev_t *tap_major, const char *device_name);
+int tap_create_cdev(struct cdev *tap_cdev, dev_t *tap_major,
+ const char *device_name, struct module *module);
void tap_destroy_cdev(dev_t major, struct cdev *tap_cdev);
#endif /*_LINUX_IF_TAP_H_*/
diff --git a/include/linux/if_team.h b/include/linux/if_team.h
index 30294603526f..fc985e5c739d 100644
--- a/include/linux/if_team.h
+++ b/include/linux/if_team.h
@@ -1,11 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* include/linux/if_team.h - Network team device driver header
* Copyright (c) 2011 Jiri Pirko <jpirko@redhat.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
#ifndef _LINUX_IF_TEAM_H_
#define _LINUX_IF_TEAM_H_
@@ -16,11 +12,11 @@
#include <uapi/linux/if_team.h>
struct team_pcpu_stats {
- u64 rx_packets;
- u64 rx_bytes;
- u64 rx_multicast;
- u64 tx_packets;
- u64 tx_bytes;
+ u64_stats_t rx_packets;
+ u64_stats_t rx_bytes;
+ u64_stats_t rx_multicast;
+ u64_stats_t tx_packets;
+ u64_stats_t tx_bytes;
struct u64_stats_sync syncp;
u32 rx_dropped;
u32 tx_dropped;
@@ -71,9 +67,14 @@ struct team_port {
u16 queue_id;
struct list_head qom_list; /* node in queue override mapping list */
struct rcu_head rcu;
- long mode_priv[0];
+ long mode_priv[];
};
+static inline struct team_port *team_port_get_rcu(const struct net_device *dev)
+{
+ return rcu_dereference(dev->rx_handler_data);
+}
+
static inline bool team_port_enabled(struct team_port *port)
{
return port->index != -1;
@@ -84,14 +85,24 @@ static inline bool team_port_txable(struct team_port *port)
return port->linkup && team_port_enabled(port);
}
+static inline bool team_port_dev_txable(const struct net_device *port_dev)
+{
+ struct team_port *port;
+ bool txable;
+
+ rcu_read_lock();
+ port = team_port_get_rcu(port_dev);
+ txable = port ? team_port_txable(port) : false;
+ rcu_read_unlock();
+
+ return txable;
+}
+
#ifdef CONFIG_NET_POLL_CONTROLLER
static inline void team_netpoll_send_skb(struct team_port *port,
struct sk_buff *skb)
{
- struct netpoll *np = port->np;
-
- if (np)
- netpoll_send_skb(np, skb);
+ netpoll_send_skb(port->np, skb);
}
#else
static inline void team_netpoll_send_skb(struct team_port *port,
@@ -209,6 +220,7 @@ struct team {
atomic_t count_pending;
struct delayed_work dw;
} mcast_rejoin;
+ struct lock_class_key team_lock_key;
long mode_priv[TEAM_MODE_PRIV_LONGS];
};
@@ -247,7 +259,7 @@ static inline struct team_port *team_get_port_by_index(struct team *team,
static inline int team_num_to_port_index(struct team *team, unsigned int num)
{
- int en_port_count = ACCESS_ONCE(team->en_port_count);
+ int en_port_count = READ_ONCE(team->en_port_count);
if (unlikely(!en_port_count))
return 0;
diff --git a/include/linux/if_tun.h b/include/linux/if_tun.h
index bf9bdf42d577..2a7660843444 100644
--- a/include/linux/if_tun.h
+++ b/include/linux/if_tun.h
@@ -1,25 +1,45 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Universal TUN/TAP device driver.
* Copyright (C) 1999-2000 Maxim Krasnyansky <max_mk@yahoo.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#ifndef __IF_TUN_H
#define __IF_TUN_H
#include <uapi/linux/if_tun.h>
+#include <uapi/linux/virtio_net.h>
+
+#define TUN_XDP_FLAG 0x1UL
+
+#define TUN_MSG_UBUF 1
+#define TUN_MSG_PTR 2
+struct tun_msg_ctl {
+ unsigned short type;
+ unsigned short num;
+ void *ptr;
+};
+
+struct tun_xdp_hdr {
+ int buflen;
+ struct virtio_net_hdr gso;
+};
#if defined(CONFIG_TUN) || defined(CONFIG_TUN_MODULE)
struct socket *tun_get_socket(struct file *);
-struct skb_array *tun_get_skb_array(struct file *file);
+struct ptr_ring *tun_get_tx_ring(struct file *file);
+static inline bool tun_is_xdp_frame(void *ptr)
+{
+ return (unsigned long)ptr & TUN_XDP_FLAG;
+}
+static inline void *tun_xdp_to_ptr(struct xdp_frame *xdp)
+{
+ return (void *)((unsigned long)xdp | TUN_XDP_FLAG);
+}
+static inline struct xdp_frame *tun_ptr_to_xdp(void *ptr)
+{
+ return (void *)((unsigned long)ptr & ~TUN_XDP_FLAG);
+}
+void tun_ptr_free(void *ptr);
#else
#include <linux/err.h>
#include <linux/errno.h>
@@ -29,9 +49,24 @@ static inline struct socket *tun_get_socket(struct file *f)
{
return ERR_PTR(-EINVAL);
}
-static inline struct skb_array *tun_get_skb_array(struct file *f)
+static inline struct ptr_ring *tun_get_tx_ring(struct file *f)
{
return ERR_PTR(-EINVAL);
}
+static inline bool tun_is_xdp_frame(void *ptr)
+{
+ return false;
+}
+static inline void *tun_xdp_to_ptr(struct xdp_frame *xdp)
+{
+ return NULL;
+}
+static inline struct xdp_frame *tun_ptr_to_xdp(void *ptr)
+{
+ return NULL;
+}
+static inline void tun_ptr_free(void *ptr)
+{
+}
#endif /* CONFIG_TUN */
#endif /* __IF_TUN_H */
diff --git a/include/linux/if_tunnel.h b/include/linux/if_tunnel.h
index 712710bc0580..26606523eca4 100644
--- a/include/linux/if_tunnel.h
+++ b/include/linux/if_tunnel.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _IF_TUNNEL_H_
#define _IF_TUNNEL_H_
diff --git a/include/linux/if_vlan.h b/include/linux/if_vlan.h
index 5e6a2d4dc366..6ba71957851e 100644
--- a/include/linux/if_vlan.h
+++ b/include/linux/if_vlan.h
@@ -1,13 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* VLAN An implementation of 802.1Q VLAN tagging.
*
* Authors: Ben Greear <greearb@candelatech.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- *
*/
#ifndef _LINUX_IF_VLAN_H_
#define _LINUX_IF_VLAN_H_
@@ -30,6 +25,8 @@
#define VLAN_ETH_DATA_LEN 1500 /* Max. octets in payload */
#define VLAN_ETH_FRAME_LEN 1518 /* Max. octets in frame sans FCS */
+#define VLAN_MAX_DEPTH 8 /* Max. number of nested VLAN tags parsed */
+
/*
* struct vlan_hdr - vlan header
* @h_vlan_TCI: priority and VLAN ID
@@ -49,8 +46,10 @@ struct vlan_hdr {
* @h_vlan_encapsulated_proto: packet type ID or len
*/
struct vlan_ethhdr {
- unsigned char h_dest[ETH_ALEN];
- unsigned char h_source[ETH_ALEN];
+ struct_group(addrs,
+ unsigned char h_dest[ETH_ALEN];
+ unsigned char h_source[ETH_ALEN];
+ );
__be16 h_vlan_proto;
__be16 h_vlan_TCI;
__be16 h_vlan_encapsulated_proto;
@@ -63,10 +62,17 @@ static inline struct vlan_ethhdr *vlan_eth_hdr(const struct sk_buff *skb)
return (struct vlan_ethhdr *)skb_mac_header(skb);
}
+/* Prefer this version in TX path, instead of
+ * skb_reset_mac_header() + vlan_eth_hdr()
+ */
+static inline struct vlan_ethhdr *skb_vlan_eth_hdr(const struct sk_buff *skb)
+{
+ return (struct vlan_ethhdr *)skb->data;
+}
+
#define VLAN_PRIO_MASK 0xe000 /* Priority Code Point */
#define VLAN_PRIO_SHIFT 13
-#define VLAN_CFI_MASK 0x1000 /* Canonical Format Indicator */
-#define VLAN_TAG_PRESENT VLAN_CFI_MASK
+#define VLAN_CFI_MASK 0x1000 /* Canonical Format Indicator / Drop Eligible Indicator */
#define VLAN_VID_MASK 0x0fff /* VLAN Identifier */
#define VLAN_N_VID 4096
@@ -78,10 +84,35 @@ static inline bool is_vlan_dev(const struct net_device *dev)
return dev->priv_flags & IFF_802_1Q_VLAN;
}
-#define skb_vlan_tag_present(__skb) ((__skb)->vlan_tci & VLAN_TAG_PRESENT)
-#define skb_vlan_tag_get(__skb) ((__skb)->vlan_tci & ~VLAN_TAG_PRESENT)
+#define skb_vlan_tag_present(__skb) (!!(__skb)->vlan_all)
+#define skb_vlan_tag_get(__skb) ((__skb)->vlan_tci)
#define skb_vlan_tag_get_id(__skb) ((__skb)->vlan_tci & VLAN_VID_MASK)
-#define skb_vlan_tag_get_prio(__skb) ((__skb)->vlan_tci & VLAN_PRIO_MASK)
+#define skb_vlan_tag_get_cfi(__skb) (!!((__skb)->vlan_tci & VLAN_CFI_MASK))
+#define skb_vlan_tag_get_prio(__skb) (((__skb)->vlan_tci & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT)
+
+static inline int vlan_get_rx_ctag_filter_info(struct net_device *dev)
+{
+ ASSERT_RTNL();
+ return notifier_to_errno(call_netdevice_notifiers(NETDEV_CVLAN_FILTER_PUSH_INFO, dev));
+}
+
+static inline void vlan_drop_rx_ctag_filter_info(struct net_device *dev)
+{
+ ASSERT_RTNL();
+ call_netdevice_notifiers(NETDEV_CVLAN_FILTER_DROP_INFO, dev);
+}
+
+static inline int vlan_get_rx_stag_filter_info(struct net_device *dev)
+{
+ ASSERT_RTNL();
+ return notifier_to_errno(call_netdevice_notifiers(NETDEV_SVLAN_FILTER_PUSH_INFO, dev));
+}
+
+static inline void vlan_drop_rx_stag_filter_info(struct net_device *dev)
+{
+ ASSERT_RTNL();
+ call_netdevice_notifiers(NETDEV_SVLAN_FILTER_DROP_INFO, dev);
+}
/**
* struct vlan_pcpu_stats - VLAN percpu rx/tx stats
@@ -95,11 +126,11 @@ static inline bool is_vlan_dev(const struct net_device *dev)
* @tx_dropped: number of tx drops
*/
struct vlan_pcpu_stats {
- u64 rx_packets;
- u64 rx_bytes;
- u64 rx_multicast;
- u64 tx_packets;
- u64 tx_bytes;
+ u64_stats_t rx_packets;
+ u64_stats_t rx_bytes;
+ u64_stats_t rx_multicast;
+ u64_stats_t tx_packets;
+ u64_stats_t tx_bytes;
struct u64_stats_sync syncp;
u32 rx_errors;
u32 tx_dropped;
@@ -109,6 +140,9 @@ struct vlan_pcpu_stats {
extern struct net_device *__vlan_find_dev_deep_rcu(struct net_device *real_dev,
__be16 vlan_proto, u16 vlan_id);
+extern int vlan_for_each(struct net_device *dev,
+ int (*action)(struct net_device *dev, int vid,
+ void *arg), void *arg);
extern struct net_device *vlan_dev_real_dev(const struct net_device *dev);
extern u16 vlan_dev_vlan_id(const struct net_device *dev);
extern __be16 vlan_dev_vlan_proto(const struct net_device *dev);
@@ -138,6 +172,7 @@ struct netpoll;
* @vlan_id: VLAN identifier
* @flags: device flags
* @real_dev: underlying netdevice
+ * @dev_tracker: refcount tracker for @real_dev reference
* @real_dev_addr: address of underlying netdevice
* @dent: proc dir entry
* @vlan_pcpu_stats: ptr to percpu rx stats
@@ -153,6 +188,8 @@ struct vlan_dev_priv {
u16 flags;
struct net_device *real_dev;
+ netdevice_tracker dev_tracker;
+
unsigned char real_dev_addr[ETH_ALEN];
struct proc_dir_entry *dent;
@@ -160,7 +197,6 @@ struct vlan_dev_priv {
#ifdef CONFIG_NET_POLL_CONTROLLER
struct netpoll *netpoll;
#endif
- unsigned int nest_level;
};
static inline struct vlan_dev_priv *vlan_dev_priv(const struct net_device *dev)
@@ -199,11 +235,6 @@ extern void vlan_vids_del_by_dev(struct net_device *dev,
extern bool vlan_uses_dev(const struct net_device *dev);
-static inline int vlan_get_encap_level(struct net_device *dev)
-{
- BUG_ON(!is_vlan_dev(dev));
- return vlan_dev_priv(dev)->nest_level;
-}
#else
static inline struct net_device *
__vlan_find_dev_deep_rcu(struct net_device *real_dev,
@@ -212,6 +243,14 @@ __vlan_find_dev_deep_rcu(struct net_device *real_dev,
return NULL;
}
+static inline int
+vlan_for_each(struct net_device *dev,
+ int (*action)(struct net_device *dev, int vid, void *arg),
+ void *arg)
+{
+ return 0;
+}
+
static inline struct net_device *vlan_dev_real_dev(const struct net_device *dev)
{
BUG();
@@ -265,11 +304,6 @@ static inline bool vlan_uses_dev(const struct net_device *dev)
{
return false;
}
-static inline int vlan_get_encap_level(struct net_device *dev)
-{
- BUG();
- return 0;
-}
#endif
/**
@@ -300,32 +334,48 @@ static inline bool vlan_hw_offload_capable(netdev_features_t features,
}
/**
- * __vlan_insert_tag - regular VLAN tag inserting
+ * __vlan_insert_inner_tag - inner VLAN tag inserting
* @skb: skbuff to tag
* @vlan_proto: VLAN encapsulation protocol
* @vlan_tci: VLAN TCI to insert
+ * @mac_len: MAC header length including outer vlan headers
*
- * Inserts the VLAN tag into @skb as part of the payload
- * Returns error if skb_cow_head failes.
+ * Inserts the VLAN tag into @skb as part of the payload at offset mac_len
+ * Returns error if skb_cow_head fails.
*
* Does not change skb->protocol so this function can be used during receive.
*/
-static inline int __vlan_insert_tag(struct sk_buff *skb,
- __be16 vlan_proto, u16 vlan_tci)
+static inline int __vlan_insert_inner_tag(struct sk_buff *skb,
+ __be16 vlan_proto, u16 vlan_tci,
+ unsigned int mac_len)
{
struct vlan_ethhdr *veth;
if (skb_cow_head(skb, VLAN_HLEN) < 0)
return -ENOMEM;
- veth = skb_push(skb, VLAN_HLEN);
+ skb_push(skb, VLAN_HLEN);
+
+ /* Move the mac header sans proto to the beginning of the new header. */
+ if (likely(mac_len > ETH_TLEN))
+ memmove(skb->data, skb->data + VLAN_HLEN, mac_len - ETH_TLEN);
+ if (skb_mac_header_was_set(skb))
+ skb->mac_header -= VLAN_HLEN;
- /* Move the mac addresses to the beginning of the new header. */
- memmove(skb->data, skb->data + VLAN_HLEN, 2 * ETH_ALEN);
- skb->mac_header -= VLAN_HLEN;
+ veth = (struct vlan_ethhdr *)(skb->data + mac_len - ETH_HLEN);
/* first, the ethernet type */
- veth->h_vlan_proto = vlan_proto;
+ if (likely(mac_len >= ETH_TLEN)) {
+ /* h_vlan_encapsulated_proto should already be populated, and
+ * skb->data has space for h_vlan_proto
+ */
+ veth->h_vlan_proto = vlan_proto;
+ } else {
+ /* h_vlan_encapsulated_proto should not be populated, and
+ * skb->data has no space for h_vlan_proto
+ */
+ veth->h_vlan_encapsulated_proto = skb->protocol;
+ }
/* now, the TCI */
veth->h_vlan_TCI = htons(vlan_tci);
@@ -334,12 +384,30 @@ static inline int __vlan_insert_tag(struct sk_buff *skb,
}
/**
- * vlan_insert_tag - regular VLAN tag inserting
+ * __vlan_insert_tag - regular VLAN tag inserting
* @skb: skbuff to tag
* @vlan_proto: VLAN encapsulation protocol
* @vlan_tci: VLAN TCI to insert
*
* Inserts the VLAN tag into @skb as part of the payload
+ * Returns error if skb_cow_head fails.
+ *
+ * Does not change skb->protocol so this function can be used during receive.
+ */
+static inline int __vlan_insert_tag(struct sk_buff *skb,
+ __be16 vlan_proto, u16 vlan_tci)
+{
+ return __vlan_insert_inner_tag(skb, vlan_proto, vlan_tci, ETH_HLEN);
+}
+
+/**
+ * vlan_insert_inner_tag - inner VLAN tag inserting
+ * @skb: skbuff to tag
+ * @vlan_proto: VLAN encapsulation protocol
+ * @vlan_tci: VLAN TCI to insert
+ * @mac_len: MAC header length including outer vlan headers
+ *
+ * Inserts the VLAN tag into @skb as part of the payload at offset mac_len
* Returns a VLAN tagged skb. If a new skb is created, @skb is freed.
*
* Following the skb_unshare() example, in case of error, the calling function
@@ -347,12 +415,14 @@ static inline int __vlan_insert_tag(struct sk_buff *skb,
*
* Does not change skb->protocol so this function can be used during receive.
*/
-static inline struct sk_buff *vlan_insert_tag(struct sk_buff *skb,
- __be16 vlan_proto, u16 vlan_tci)
+static inline struct sk_buff *vlan_insert_inner_tag(struct sk_buff *skb,
+ __be16 vlan_proto,
+ u16 vlan_tci,
+ unsigned int mac_len)
{
int err;
- err = __vlan_insert_tag(skb, vlan_proto, vlan_tci);
+ err = __vlan_insert_inner_tag(skb, vlan_proto, vlan_tci, mac_len);
if (err) {
dev_kfree_skb_any(skb);
return NULL;
@@ -361,6 +431,26 @@ static inline struct sk_buff *vlan_insert_tag(struct sk_buff *skb,
}
/**
+ * vlan_insert_tag - regular VLAN tag inserting
+ * @skb: skbuff to tag
+ * @vlan_proto: VLAN encapsulation protocol
+ * @vlan_tci: VLAN TCI to insert
+ *
+ * Inserts the VLAN tag into @skb as part of the payload
+ * Returns a VLAN tagged skb. If a new skb is created, @skb is freed.
+ *
+ * Following the skb_unshare() example, in case of error, the calling function
+ * doesn't have to worry about freeing the original skb.
+ *
+ * Does not change skb->protocol so this function can be used during receive.
+ */
+static inline struct sk_buff *vlan_insert_tag(struct sk_buff *skb,
+ __be16 vlan_proto, u16 vlan_tci)
+{
+ return vlan_insert_inner_tag(skb, vlan_proto, vlan_tci, ETH_HLEN);
+}
+
+/**
* vlan_insert_tag_set_proto - regular VLAN tag inserting
* @skb: skbuff to tag
* @vlan_proto: VLAN encapsulation protocol
@@ -382,6 +472,29 @@ static inline struct sk_buff *vlan_insert_tag_set_proto(struct sk_buff *skb,
return skb;
}
+/**
+ * __vlan_hwaccel_clear_tag - clear hardware accelerated VLAN info
+ * @skb: skbuff to clear
+ *
+ * Clears the VLAN information from @skb
+ */
+static inline void __vlan_hwaccel_clear_tag(struct sk_buff *skb)
+{
+ skb->vlan_all = 0;
+}
+
+/**
+ * __vlan_hwaccel_copy_tag - copy hardware accelerated VLAN info from another skb
+ * @dst: skbuff to copy to
+ * @src: skbuff to copy from
+ *
+ * Copies VLAN information from @src to @dst (for branchless code)
+ */
+static inline void __vlan_hwaccel_copy_tag(struct sk_buff *dst, const struct sk_buff *src)
+{
+ dst->vlan_all = src->vlan_all;
+}
+
/*
* __vlan_hwaccel_push_inside - pushes vlan tag to the payload
* @skb: skbuff to tag
@@ -396,7 +509,7 @@ static inline struct sk_buff *__vlan_hwaccel_push_inside(struct sk_buff *skb)
skb = vlan_insert_tag_set_proto(skb, skb->vlan_proto,
skb_vlan_tag_get(skb));
if (likely(skb))
- skb->vlan_tci = 0;
+ __vlan_hwaccel_clear_tag(skb);
return skb;
}
@@ -412,7 +525,7 @@ static inline void __vlan_hwaccel_put_tag(struct sk_buff *skb,
__be16 vlan_proto, u16 vlan_tci)
{
skb->vlan_proto = vlan_proto;
- skb->vlan_tci = VLAN_TAG_PRESENT | vlan_tci;
+ skb->vlan_tci = vlan_tci;
}
/**
@@ -424,7 +537,7 @@ static inline void __vlan_hwaccel_put_tag(struct sk_buff *skb,
*/
static inline int __vlan_get_tag(const struct sk_buff *skb, u16 *vlan_tci)
{
- struct vlan_ethhdr *veth = (struct vlan_ethhdr *)skb->data;
+ struct vlan_ethhdr *veth = skb_vlan_eth_hdr(skb);
if (!eth_type_vlan(veth->h_vlan_proto))
return -EINVAL;
@@ -452,8 +565,6 @@ static inline int __vlan_hwaccel_get_tag(const struct sk_buff *skb,
}
}
-#define HAVE_VLAN_GET_TAG
-
/**
* vlan_get_tag - get the VLAN ID from the skb
* @skb: skbuff to query
@@ -479,10 +590,10 @@ static inline int vlan_get_tag(const struct sk_buff *skb, u16 *vlan_tci)
* Returns the EtherType of the packet, regardless of whether it is
* vlan encapsulated (normal or hardware accelerated) or not.
*/
-static inline __be16 __vlan_get_protocol(struct sk_buff *skb, __be16 type,
+static inline __be16 __vlan_get_protocol(const struct sk_buff *skb, __be16 type,
int *depth)
{
- unsigned int vlan_depth = skb->mac_len;
+ unsigned int vlan_depth = skb->mac_len, parse_depth = VLAN_MAX_DEPTH;
/* if type is 802.1Q/AD then the header should already be
* present at mac_len - VLAN_HLEN (if mac_len > 0), or at
@@ -497,13 +608,12 @@ static inline __be16 __vlan_get_protocol(struct sk_buff *skb, __be16 type,
vlan_depth = ETH_HLEN;
}
do {
- struct vlan_hdr *vh;
+ struct vlan_hdr vhdr, *vh;
- if (unlikely(!pskb_may_pull(skb,
- vlan_depth + VLAN_HLEN)))
+ vh = skb_header_pointer(skb, vlan_depth, sizeof(vhdr), &vhdr);
+ if (unlikely(!vh || !--parse_depth))
return 0;
- vh = (struct vlan_hdr *)(skb->data + vlan_depth);
type = vh->h_vlan_encapsulated_proto;
vlan_depth += VLAN_HLEN;
} while (eth_type_vlan(type));
@@ -522,11 +632,42 @@ static inline __be16 __vlan_get_protocol(struct sk_buff *skb, __be16 type,
* Returns the EtherType of the packet, regardless of whether it is
* vlan encapsulated (normal or hardware accelerated) or not.
*/
-static inline __be16 vlan_get_protocol(struct sk_buff *skb)
+static inline __be16 vlan_get_protocol(const struct sk_buff *skb)
{
return __vlan_get_protocol(skb, skb->protocol, NULL);
}
+/* This version of __vlan_get_protocol() also pulls mac header in skb->head */
+static inline __be16 vlan_get_protocol_and_depth(struct sk_buff *skb,
+ __be16 type, int *depth)
+{
+ int maclen;
+
+ type = __vlan_get_protocol(skb, type, &maclen);
+
+ if (type) {
+ if (!pskb_may_pull(skb, maclen))
+ type = 0;
+ else if (depth)
+ *depth = maclen;
+ }
+ return type;
+}
+
+/* A getter for the SKB protocol field which will handle VLAN tags consistently
+ * whether VLAN acceleration is enabled or not.
+ */
+static inline __be16 skb_protocol(const struct sk_buff *skb, bool skip_vlan)
+{
+ if (!skip_vlan)
+ /* VLAN acceleration strips the VLAN header from the skb and
+ * moves it to skb->vlan_proto
+ */
+ return skb_vlan_tag_present(skb) ? skb->vlan_proto : skb->protocol;
+
+ return vlan_get_protocol(skb);
+}
+
static inline void vlan_set_encap_proto(struct sk_buff *skb,
struct vlan_hdr *vhdr)
{
@@ -562,6 +703,27 @@ static inline void vlan_set_encap_proto(struct sk_buff *skb,
}
/**
+ * vlan_remove_tag - remove outer VLAN tag from payload
+ * @skb: skbuff to remove tag from
+ * @vlan_tci: buffer to store value
+ *
+ * Expects the skb to contain a VLAN tag in the payload, and to have skb->data
+ * pointing at the MAC header.
+ *
+ * Returns a new pointer to skb->data, or NULL on failure to pull.
+ */
+static inline void *vlan_remove_tag(struct sk_buff *skb, u16 *vlan_tci)
+{
+ struct vlan_hdr *vhdr = (struct vlan_hdr *)(skb->data + ETH_HLEN);
+
+ *vlan_tci = ntohs(vhdr->h_vlan_TCI);
+
+ memmove(skb->data + VLAN_HLEN, skb->data, 2 * ETH_ALEN);
+ vlan_set_encap_proto(skb, vhdr);
+ return __skb_pull(skb, VLAN_HLEN);
+}
+
+/**
* skb_vlan_tagged - check if skb is vlan tagged.
* @skb: skbuff to query
*
@@ -584,7 +746,7 @@ static inline bool skb_vlan_tagged(const struct sk_buff *skb)
* Returns true if the skb is tagged with multiple vlan headers, regardless
* of whether it is hardware accelerated or not.
*/
-static inline bool skb_vlan_tagged_multi(const struct sk_buff *skb)
+static inline bool skb_vlan_tagged_multi(struct sk_buff *skb)
{
__be16 protocol = skb->protocol;
@@ -594,7 +756,10 @@ static inline bool skb_vlan_tagged_multi(const struct sk_buff *skb)
if (likely(!eth_type_vlan(protocol)))
return false;
- veh = (struct vlan_ethhdr *)skb->data;
+ if (unlikely(!pskb_may_pull(skb, VLAN_ETH_HLEN)))
+ return false;
+
+ veh = skb_vlan_eth_hdr(skb);
protocol = veh->h_vlan_encapsulated_proto;
}
@@ -611,7 +776,7 @@ static inline bool skb_vlan_tagged_multi(const struct sk_buff *skb)
*
* Returns features without unsafe ones if the skb has multiple tags.
*/
-static inline netdev_features_t vlan_features_check(const struct sk_buff *skb,
+static inline netdev_features_t vlan_features_check(struct sk_buff *skb,
netdev_features_t features)
{
if (skb_vlan_tagged_multi(skb)) {
diff --git a/include/linux/igmp.h b/include/linux/igmp.h
index 97caf1821de8..ebf4349a53af 100644
--- a/include/linux/igmp.h
+++ b/include/linux/igmp.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Linux NET3: Internet Group Management Protocol [IGMP]
*
@@ -5,12 +6,6 @@
* Alan Cox <alan@lxorguk.ukuu.org.uk>
*
* Extended to talk the BSD extended IGMP protocol of mrouted 3.6
- *
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
*/
#ifndef _LINUX_IGMP_H
#define _LINUX_IGMP_H
@@ -18,7 +13,9 @@
#include <linux/skbuff.h>
#include <linux/timer.h>
#include <linux/in.h>
+#include <linux/ip.h>
#include <linux/refcount.h>
+#include <linux/sockptr.h>
#include <uapi/linux/igmp.h>
static inline struct igmphdr *igmp_hdr(const struct sk_buff *skb)
@@ -42,12 +39,9 @@ struct ip_sf_socklist {
unsigned int sl_max;
unsigned int sl_count;
struct rcu_head rcu;
- __be32 sl_addr[0];
+ __be32 sl_addr[];
};
-#define IP_SFLSIZE(count) (sizeof(struct ip_sf_socklist) + \
- (count) * sizeof(__be32))
-
#define IP_SFBLOCK 10 /* allocate this many at once */
/* ip_mc_socklist is real list now. Speed is not argument;
@@ -64,8 +58,8 @@ struct ip_mc_socklist {
struct ip_sf_list {
struct ip_sf_list *sf_next;
- __be32 sf_inaddr;
unsigned long sf_count[2]; /* include/exclude counts */
+ __be32 sf_inaddr;
unsigned char sf_gsresp; /* include in g & s response? */
unsigned char sf_oldin; /* change state */
unsigned char sf_crcount; /* retrans. left to send */
@@ -106,27 +100,44 @@ struct ip_mc_list {
#define IGMPV3_QQIC(value) IGMPV3_EXP(0x80, 4, 3, value)
#define IGMPV3_MRC(value) IGMPV3_EXP(0x80, 4, 3, value)
+static inline int ip_mc_may_pull(struct sk_buff *skb, unsigned int len)
+{
+ if (skb_transport_offset(skb) + ip_transport_len(skb) < len)
+ return 0;
+
+ return pskb_may_pull(skb, len);
+}
+
extern int ip_check_mc_rcu(struct in_device *dev, __be32 mc_addr, __be32 src_addr, u8 proto);
extern int igmp_rcv(struct sk_buff *);
extern int ip_mc_join_group(struct sock *sk, struct ip_mreqn *imr);
+extern int ip_mc_join_group_ssm(struct sock *sk, struct ip_mreqn *imr,
+ unsigned int mode);
extern int ip_mc_leave_group(struct sock *sk, struct ip_mreqn *imr);
extern void ip_mc_drop_socket(struct sock *sk);
extern int ip_mc_source(int add, int omode, struct sock *sk,
struct ip_mreq_source *mreqs, int ifindex);
extern int ip_mc_msfilter(struct sock *sk, struct ip_msfilter *msf,int ifindex);
extern int ip_mc_msfget(struct sock *sk, struct ip_msfilter *msf,
- struct ip_msfilter __user *optval, int __user *optlen);
+ sockptr_t optval, sockptr_t optlen);
extern int ip_mc_gsfget(struct sock *sk, struct group_filter *gsf,
- struct group_filter __user *optval, int __user *optlen);
-extern int ip_mc_sf_allow(struct sock *sk, __be32 local, __be32 rmt, int dif);
+ sockptr_t optval, size_t offset);
+extern int ip_mc_sf_allow(const struct sock *sk, __be32 local, __be32 rmt,
+ int dif, int sdif);
extern void ip_mc_init_dev(struct in_device *);
extern void ip_mc_destroy_dev(struct in_device *);
extern void ip_mc_up(struct in_device *);
extern void ip_mc_down(struct in_device *);
extern void ip_mc_unmap(struct in_device *);
extern void ip_mc_remap(struct in_device *);
-extern void ip_mc_dec_group(struct in_device *in_dev, __be32 addr);
+extern void __ip_mc_dec_group(struct in_device *in_dev, __be32 addr, gfp_t gfp);
+static inline void ip_mc_dec_group(struct in_device *in_dev, __be32 addr)
+{
+ return __ip_mc_dec_group(in_dev, addr, GFP_KERNEL);
+}
+extern void __ip_mc_inc_group(struct in_device *in_dev, __be32 addr,
+ gfp_t gfp);
extern void ip_mc_inc_group(struct in_device *in_dev, __be32 addr);
-int ip_mc_check_igmp(struct sk_buff *skb, struct sk_buff **skb_trimmed);
+int ip_mc_check_igmp(struct sk_buff *skb);
#endif
diff --git a/include/linux/ihex.h b/include/linux/ihex.h
index 31d8629e75a1..b824877e6d1b 100644
--- a/include/linux/ihex.h
+++ b/include/linux/ihex.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Compact binary representation of ihex records. Some devices need their
* firmware loaded in strange orders rather than a single big blob, but
@@ -17,15 +18,27 @@
struct ihex_binrec {
__be32 addr;
__be16 len;
- uint8_t data[0];
+ uint8_t data[];
} __attribute__((packed));
+static inline uint16_t ihex_binrec_size(const struct ihex_binrec *p)
+{
+ return be16_to_cpu(p->len) + sizeof(*p);
+}
+
/* Find the next record, taking into account the 4-byte alignment */
static inline const struct ihex_binrec *
+__ihex_next_binrec(const struct ihex_binrec *rec)
+{
+ const void *p = rec;
+
+ return p + ALIGN(ihex_binrec_size(rec), 4);
+}
+
+static inline const struct ihex_binrec *
ihex_next_binrec(const struct ihex_binrec *rec)
{
- int next = ((be16_to_cpu(rec->len) + 5) & ~3) - 2;
- rec = (void *)&rec->data[next];
+ rec = __ihex_next_binrec(rec);
return be16_to_cpu(rec->len) ? rec : NULL;
}
@@ -33,18 +46,15 @@ ihex_next_binrec(const struct ihex_binrec *rec)
/* Check that ihex_next_binrec() won't take us off the end of the image... */
static inline int ihex_validate_fw(const struct firmware *fw)
{
- const struct ihex_binrec *rec;
- size_t ofs = 0;
+ const struct ihex_binrec *end, *rec;
- while (ofs <= fw->size - sizeof(*rec)) {
- rec = (void *)&fw->data[ofs];
+ rec = (const void *)fw->data;
+ end = (const void *)&fw->data[fw->size - sizeof(*end)];
+ for (; rec <= end; rec = __ihex_next_binrec(rec)) {
/* Zero length marks end of records */
- if (!be16_to_cpu(rec->len))
+ if (rec == end && !be16_to_cpu(rec->len))
return 0;
-
- /* Point to next record... */
- ofs += (sizeof(*rec) + be16_to_cpu(rec->len) + 3) & ~3;
}
return -EINVAL;
}
diff --git a/include/linux/iio/accel/kxcjk_1013.h b/include/linux/iio/accel/kxcjk_1013.h
index fd1d540ea62d..ea0ecb774371 100644
--- a/include/linux/iio/accel/kxcjk_1013.h
+++ b/include/linux/iio/accel/kxcjk_1013.h
@@ -1,22 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* KXCJK-1013 3-axis accelerometer Interface
* Copyright (c) 2014, Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
*/
#ifndef __IIO_KXCJK_1013_H__
#define __IIO_KXCJK_1013_H__
+#include <linux/iio/iio.h>
+
struct kxcjk_1013_platform_data {
bool active_high_intr;
+ struct iio_mount_matrix orientation;
};
#endif
diff --git a/include/linux/iio/adc/ad_sigma_delta.h b/include/linux/iio/adc/ad_sigma_delta.h
index 5ba430cc9a87..7852f6c9a714 100644
--- a/include/linux/iio/adc/ad_sigma_delta.h
+++ b/include/linux/iio/adc/ad_sigma_delta.h
@@ -1,10 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Support code for Analog Devices Sigma-Delta ADCs
*
* Copyright 2012 Analog Devices Inc.
* Author: Lars-Peter Clausen <lars@metafoo.de>
- *
- * Licensed under the GPL-2.
*/
#ifndef __AD_SIGMA_DELTA_H__
#define __AD_SIGMA_DELTA_H__
@@ -27,26 +26,40 @@ struct ad_sd_calib_data {
};
struct ad_sigma_delta;
+struct device;
struct iio_dev;
/**
* struct ad_sigma_delta_info - Sigma Delta driver specific callbacks and options
* @set_channel: Will be called to select the current channel, may be NULL.
+ * @append_status: Will be called to enable status append at the end of the sample, may be NULL.
* @set_mode: Will be called to select the current mode, may be NULL.
+ * @disable_all: Will be called to disable all channels, may be NULL.
* @postprocess_sample: Is called for each sampled data word, can be used to
* modify or drop the sample data, it, may be NULL.
* @has_registers: true if the device has writable and readable registers, false
* if there is just one read-only sample data shift register.
* @addr_shift: Shift of the register address in the communications register.
* @read_mask: Mask for the communications register having the read bit set.
+ * @status_ch_mask: Mask for the channel number stored in status register.
+ * @data_reg: Address of the data register, if 0 the default address of 0x3 will
+ * be used.
+ * @irq_flags: flags for the interrupt used by the triggered buffer
+ * @num_slots: Number of sequencer slots
*/
struct ad_sigma_delta_info {
int (*set_channel)(struct ad_sigma_delta *, unsigned int channel);
+ int (*append_status)(struct ad_sigma_delta *, bool append);
int (*set_mode)(struct ad_sigma_delta *, enum ad_sigma_delta_mode mode);
+ int (*disable_all)(struct ad_sigma_delta *);
int (*postprocess_sample)(struct ad_sigma_delta *, unsigned int raw_sample);
bool has_registers;
unsigned int addr_shift;
unsigned int read_mask;
+ unsigned int status_ch_mask;
+ unsigned int data_reg;
+ unsigned long irq_flags;
+ unsigned int num_slots;
};
/**
@@ -66,16 +79,28 @@ struct ad_sigma_delta {
bool irq_dis;
bool bus_locked;
+ bool keep_cs_asserted;
uint8_t comm;
const struct ad_sigma_delta_info *info;
+ unsigned int active_slots;
+ unsigned int current_slot;
+ unsigned int num_slots;
+ bool status_appended;
+ /* map slots to channels in order to know what to expect from devices */
+ unsigned int *slots;
+ uint8_t *samples_buf;
/*
* DMA (thus cache coherency maintenance) requires the
* transfer buffers to live in their own cache lines.
+ * 'tx_buf' is up to 32 bits.
+ * 'rx_buf' is up to 32 bits per sample + 64 bit timestamp,
+ * rounded to 16 bytes to take into account padding.
*/
- uint8_t data[4] ____cacheline_aligned;
+ uint8_t tx_buf[4] ____cacheline_aligned;
+ uint8_t rx_buf[16] __aligned(8);
};
static inline int ad_sigma_delta_set_channel(struct ad_sigma_delta *sd,
@@ -87,6 +112,29 @@ static inline int ad_sigma_delta_set_channel(struct ad_sigma_delta *sd,
return 0;
}
+static inline int ad_sigma_delta_append_status(struct ad_sigma_delta *sd, bool append)
+{
+ int ret;
+
+ if (sd->info->append_status) {
+ ret = sd->info->append_status(sd, append);
+ if (ret < 0)
+ return ret;
+
+ sd->status_appended = append;
+ }
+
+ return 0;
+}
+
+static inline int ad_sigma_delta_disable_all(struct ad_sigma_delta *sd)
+{
+ if (sd->info->disable_all)
+ return sd->info->disable_all(sd);
+
+ return 0;
+}
+
static inline int ad_sigma_delta_set_mode(struct ad_sigma_delta *sd,
unsigned int mode)
{
@@ -111,64 +159,20 @@ int ad_sd_write_reg(struct ad_sigma_delta *sigma_delta, unsigned int reg,
int ad_sd_read_reg(struct ad_sigma_delta *sigma_delta, unsigned int reg,
unsigned int size, unsigned int *val);
+int ad_sd_reset(struct ad_sigma_delta *sigma_delta,
+ unsigned int reset_length);
+
int ad_sigma_delta_single_conversion(struct iio_dev *indio_dev,
const struct iio_chan_spec *chan, int *val);
+int ad_sd_calibrate(struct ad_sigma_delta *sigma_delta,
+ unsigned int mode, unsigned int channel);
int ad_sd_calibrate_all(struct ad_sigma_delta *sigma_delta,
const struct ad_sd_calib_data *cd, unsigned int n);
int ad_sd_init(struct ad_sigma_delta *sigma_delta, struct iio_dev *indio_dev,
struct spi_device *spi, const struct ad_sigma_delta_info *info);
-int ad_sd_setup_buffer_and_trigger(struct iio_dev *indio_dev);
-void ad_sd_cleanup_buffer_and_trigger(struct iio_dev *indio_dev);
+int devm_ad_sd_setup_buffer_and_trigger(struct device *dev, struct iio_dev *indio_dev);
int ad_sd_validate_trigger(struct iio_dev *indio_dev, struct iio_trigger *trig);
-#define __AD_SD_CHANNEL(_si, _channel1, _channel2, _address, _bits, \
- _storagebits, _shift, _extend_name, _type) \
- { \
- .type = (_type), \
- .differential = (_channel2 == -1 ? 0 : 1), \
- .indexed = 1, \
- .channel = (_channel1), \
- .channel2 = (_channel2), \
- .address = (_address), \
- .extend_name = (_extend_name), \
- .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
- BIT(IIO_CHAN_INFO_OFFSET), \
- .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \
- .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ), \
- .scan_index = (_si), \
- .scan_type = { \
- .sign = 'u', \
- .realbits = (_bits), \
- .storagebits = (_storagebits), \
- .shift = (_shift), \
- .endianness = IIO_BE, \
- }, \
- }
-
-#define AD_SD_DIFF_CHANNEL(_si, _channel1, _channel2, _address, _bits, \
- _storagebits, _shift) \
- __AD_SD_CHANNEL(_si, _channel1, _channel2, _address, _bits, \
- _storagebits, _shift, NULL, IIO_VOLTAGE)
-
-#define AD_SD_SHORTED_CHANNEL(_si, _channel, _address, _bits, \
- _storagebits, _shift) \
- __AD_SD_CHANNEL(_si, _channel, _channel, _address, _bits, \
- _storagebits, _shift, "shorted", IIO_VOLTAGE)
-
-#define AD_SD_CHANNEL(_si, _channel, _address, _bits, \
- _storagebits, _shift) \
- __AD_SD_CHANNEL(_si, _channel, -1, _address, _bits, \
- _storagebits, _shift, NULL, IIO_VOLTAGE)
-
-#define AD_SD_TEMP_CHANNEL(_si, _address, _bits, _storagebits, _shift) \
- __AD_SD_CHANNEL(_si, 0, -1, _address, _bits, \
- _storagebits, _shift, NULL, IIO_TEMP)
-
-#define AD_SD_SUPPLY_CHANNEL(_si, _channel, _address, _bits, _storagebits, \
- _shift) \
- __AD_SD_CHANNEL(_si, _channel, -1, _address, _bits, \
- _storagebits, _shift, "supply", IIO_VOLTAGE)
-
#endif
diff --git a/include/linux/iio/adc/adi-axi-adc.h b/include/linux/iio/adc/adi-axi-adc.h
new file mode 100644
index 000000000000..52620e5b8052
--- /dev/null
+++ b/include/linux/iio/adc/adi-axi-adc.h
@@ -0,0 +1,64 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Analog Devices Generic AXI ADC IP core driver/library
+ * Link: https://wiki.analog.com/resources/fpga/docs/axi_adc_ip
+ *
+ * Copyright 2012-2020 Analog Devices Inc.
+ */
+#ifndef __ADI_AXI_ADC_H__
+#define __ADI_AXI_ADC_H__
+
+struct device;
+struct iio_chan_spec;
+
+/**
+ * struct adi_axi_adc_chip_info - Chip specific information
+ * @name Chip name
+ * @id Chip ID (usually product ID)
+ * @channels Channel specifications of type @struct iio_chan_spec
+ * @num_channels Number of @channels
+ * @scale_table Supported scales by the chip; tuples of 2 ints
+ * @num_scales Number of scales in the table
+ * @max_rate Maximum sampling rate supported by the device
+ */
+struct adi_axi_adc_chip_info {
+ const char *name;
+ unsigned int id;
+
+ const struct iio_chan_spec *channels;
+ unsigned int num_channels;
+
+ const unsigned int (*scale_table)[2];
+ int num_scales;
+
+ unsigned long max_rate;
+};
+
+/**
+ * struct adi_axi_adc_conv - data of the ADC attached to the AXI ADC
+ * @chip_info chip info details for the client ADC
+ * @preenable_setup op to run in the client before enabling the AXI ADC
+ * @reg_access IIO debugfs_reg_access hook for the client ADC
+ * @read_raw IIO read_raw hook for the client ADC
+ * @write_raw IIO write_raw hook for the client ADC
+ */
+struct adi_axi_adc_conv {
+ const struct adi_axi_adc_chip_info *chip_info;
+
+ int (*preenable_setup)(struct adi_axi_adc_conv *conv);
+ int (*reg_access)(struct adi_axi_adc_conv *conv, unsigned int reg,
+ unsigned int writeval, unsigned int *readval);
+ int (*read_raw)(struct adi_axi_adc_conv *conv,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask);
+ int (*write_raw)(struct adi_axi_adc_conv *conv,
+ struct iio_chan_spec const *chan,
+ int val, int val2, long mask);
+};
+
+struct adi_axi_adc_conv *devm_adi_axi_adc_conv_register(struct device *dev,
+ size_t sizeof_priv);
+
+void *adi_axi_adc_conv_priv(struct adi_axi_adc_conv *conv);
+
+#endif
diff --git a/include/linux/iio/adc/qcom-vadc-common.h b/include/linux/iio/adc/qcom-vadc-common.h
new file mode 100644
index 000000000000..aa21b032e861
--- /dev/null
+++ b/include/linux/iio/adc/qcom-vadc-common.h
@@ -0,0 +1,167 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Code shared between the different Qualcomm PMIC voltage ADCs
+ */
+
+#ifndef QCOM_VADC_COMMON_H
+#define QCOM_VADC_COMMON_H
+
+#include <linux/math.h>
+#include <linux/types.h>
+
+#define VADC_CONV_TIME_MIN_US 2000
+#define VADC_CONV_TIME_MAX_US 2100
+
+/* Min ADC code represents 0V */
+#define VADC_MIN_ADC_CODE 0x6000
+/* Max ADC code represents full-scale range of 1.8V */
+#define VADC_MAX_ADC_CODE 0xa800
+
+#define VADC_ABSOLUTE_RANGE_UV 625000
+#define VADC_RATIOMETRIC_RANGE 1800
+
+#define VADC_DEF_PRESCALING 0 /* 1:1 */
+#define VADC_DEF_DECIMATION 0 /* 512 */
+#define VADC_DEF_HW_SETTLE_TIME 0 /* 0 us */
+#define VADC_DEF_AVG_SAMPLES 0 /* 1 sample */
+#define VADC_DEF_CALIB_TYPE VADC_CALIB_ABSOLUTE
+
+#define VADC_DECIMATION_MIN 512
+#define VADC_DECIMATION_MAX 4096
+#define ADC5_DEF_VBAT_PRESCALING 1 /* 1:3 */
+#define ADC5_DECIMATION_SHORT 250
+#define ADC5_DECIMATION_MEDIUM 420
+#define ADC5_DECIMATION_LONG 840
+/* Default decimation - 1024 for rev2, 840 for pmic5 */
+#define ADC5_DECIMATION_DEFAULT 2
+#define ADC5_DECIMATION_SAMPLES_MAX 3
+
+#define VADC_HW_SETTLE_DELAY_MAX 10000
+#define VADC_HW_SETTLE_SAMPLES_MAX 16
+#define VADC_AVG_SAMPLES_MAX 512
+#define ADC5_AVG_SAMPLES_MAX 16
+
+#define PMIC5_CHG_TEMP_SCALE_FACTOR 377500
+#define PMIC5_SMB_TEMP_CONSTANT 419400
+#define PMIC5_SMB_TEMP_SCALE_FACTOR 356
+
+#define PMI_CHG_SCALE_1 -138890
+#define PMI_CHG_SCALE_2 391750000000LL
+
+#define VADC5_MAX_CODE 0x7fff
+#define ADC5_FULL_SCALE_CODE 0x70e4
+#define ADC5_USR_DATA_CHECK 0x8000
+
+#define R_PU_100K 100000
+#define RATIO_MAX_ADC7 BIT(14)
+
+/*
+ * VADC_CALIB_ABSOLUTE: uses the 625mV and 1.25V as reference channels.
+ * VADC_CALIB_RATIOMETRIC: uses the reference voltage (1.8V) and GND for
+ * calibration.
+ */
+enum vadc_calibration {
+ VADC_CALIB_ABSOLUTE = 0,
+ VADC_CALIB_RATIOMETRIC
+};
+
+/**
+ * struct vadc_linear_graph - Represent ADC characteristics.
+ * @dy: numerator slope to calculate the gain.
+ * @dx: denominator slope to calculate the gain.
+ * @gnd: A/D word of the ground reference used for the channel.
+ *
+ * Each ADC device has different offset and gain parameters which are
+ * computed to calibrate the device.
+ */
+struct vadc_linear_graph {
+ s32 dy;
+ s32 dx;
+ s32 gnd;
+};
+
+/**
+ * enum vadc_scale_fn_type - Scaling function to convert ADC code to
+ * physical scaled units for the channel.
+ * SCALE_DEFAULT: Default scaling to convert raw adc code to voltage (uV).
+ * SCALE_THERM_100K_PULLUP: Returns temperature in millidegC.
+ * Uses a mapping table with 100K pullup.
+ * SCALE_PMIC_THERM: Returns result in milli degree's Centigrade.
+ * SCALE_XOTHERM: Returns XO thermistor voltage in millidegC.
+ * SCALE_PMI_CHG_TEMP: Conversion for PMI CHG temp
+ * SCALE_HW_CALIB_DEFAULT: Default scaling to convert raw adc code to
+ * voltage (uV) with hardware applied offset/slope values to adc code.
+ * SCALE_HW_CALIB_THERM_100K_PULLUP: Returns temperature in millidegC using
+ * lookup table. The hardware applies offset/slope to adc code.
+ * SCALE_HW_CALIB_XOTHERM: Returns XO thermistor voltage in millidegC using
+ * 100k pullup. The hardware applies offset/slope to adc code.
+ * SCALE_HW_CALIB_THERM_100K_PU_PM7: Returns temperature in millidegC using
+ * lookup table for PMIC7. The hardware applies offset/slope to adc code.
+ * SCALE_HW_CALIB_PMIC_THERM: Returns result in milli degree's Centigrade.
+ * The hardware applies offset/slope to adc code.
+ * SCALE_HW_CALIB_PMIC_THERM: Returns result in milli degree's Centigrade.
+ * The hardware applies offset/slope to adc code. This is for PMIC7.
+ * SCALE_HW_CALIB_PM5_CHG_TEMP: Returns result in millidegrees for PMIC5
+ * charger temperature.
+ * SCALE_HW_CALIB_PM5_SMB_TEMP: Returns result in millidegrees for PMIC5
+ * SMB1390 temperature.
+ */
+enum vadc_scale_fn_type {
+ SCALE_DEFAULT = 0,
+ SCALE_THERM_100K_PULLUP,
+ SCALE_PMIC_THERM,
+ SCALE_XOTHERM,
+ SCALE_PMI_CHG_TEMP,
+ SCALE_HW_CALIB_DEFAULT,
+ SCALE_HW_CALIB_THERM_100K_PULLUP,
+ SCALE_HW_CALIB_XOTHERM,
+ SCALE_HW_CALIB_THERM_100K_PU_PM7,
+ SCALE_HW_CALIB_PMIC_THERM,
+ SCALE_HW_CALIB_PMIC_THERM_PM7,
+ SCALE_HW_CALIB_PM5_CHG_TEMP,
+ SCALE_HW_CALIB_PM5_SMB_TEMP,
+ SCALE_HW_CALIB_INVALID,
+};
+
+struct adc5_data {
+ const u32 full_scale_code_volt;
+ const u32 full_scale_code_cur;
+ const struct adc5_channels *adc_chans;
+ const struct iio_info *info;
+ unsigned int *decimation;
+ unsigned int *hw_settle_1;
+ unsigned int *hw_settle_2;
+};
+
+int qcom_vadc_scale(enum vadc_scale_fn_type scaletype,
+ const struct vadc_linear_graph *calib_graph,
+ const struct u32_fract *prescale,
+ bool absolute,
+ u16 adc_code, int *result_mdec);
+
+struct qcom_adc5_scale_type {
+ int (*scale_fn)(const struct u32_fract *prescale,
+ const struct adc5_data *data, u16 adc_code, int *result);
+};
+
+int qcom_adc5_hw_scale(enum vadc_scale_fn_type scaletype,
+ unsigned int prescale_ratio,
+ const struct adc5_data *data,
+ u16 adc_code, int *result_mdec);
+
+u16 qcom_adc_tm5_temp_volt_scale(unsigned int prescale_ratio,
+ u32 full_scale_code_volt, int temp);
+
+u16 qcom_adc_tm5_gen2_temp_res_scale(int temp);
+
+int qcom_adc5_prescaling_from_dt(u32 num, u32 den);
+
+int qcom_adc5_hw_settle_time_from_dt(u32 value, const unsigned int *hw_settle);
+
+int qcom_adc5_avg_samples_from_dt(u32 value);
+
+int qcom_adc5_decimation_from_dt(u32 value, const unsigned int *decimation);
+
+int qcom_vadc_decimation_from_dt(u32 value);
+
+#endif /* QCOM_VADC_COMMON_H */
diff --git a/include/linux/iio/adc/stm32-dfsdm-adc.h b/include/linux/iio/adc/stm32-dfsdm-adc.h
new file mode 100644
index 000000000000..0da298b41737
--- /dev/null
+++ b/include/linux/iio/adc/stm32-dfsdm-adc.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * This file discribe the STM32 DFSDM IIO driver API for audio part
+ *
+ * Copyright (C) 2017, STMicroelectronics - All Rights Reserved
+ * Author(s): Arnaud Pouliquen <arnaud.pouliquen@st.com>.
+ */
+
+#ifndef STM32_DFSDM_ADC_H
+#define STM32_DFSDM_ADC_H
+
+#include <linux/iio/iio.h>
+
+int stm32_dfsdm_get_buff_cb(struct iio_dev *iio_dev,
+ int (*cb)(const void *data, size_t size,
+ void *private),
+ void *private);
+int stm32_dfsdm_release_buff_cb(struct iio_dev *iio_dev);
+
+#endif
diff --git a/include/linux/iio/afe/rescale.h b/include/linux/iio/afe/rescale.h
new file mode 100644
index 000000000000..6eecb435488f
--- /dev/null
+++ b/include/linux/iio/afe/rescale.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2018 Axentia Technologies AB
+ */
+
+#ifndef __IIO_RESCALE_H__
+#define __IIO_RESCALE_H__
+
+#include <linux/types.h>
+#include <linux/iio/iio.h>
+
+struct device;
+struct rescale;
+
+struct rescale_cfg {
+ enum iio_chan_type type;
+ int (*props)(struct device *dev, struct rescale *rescale);
+};
+
+struct rescale {
+ const struct rescale_cfg *cfg;
+ struct iio_channel *source;
+ struct iio_chan_spec chan;
+ struct iio_chan_spec_ext_info *ext_info;
+ bool chan_processed;
+ s32 numerator;
+ s32 denominator;
+ s32 offset;
+};
+
+int rescale_process_scale(struct rescale *rescale, int scale_type,
+ int *val, int *val2);
+int rescale_process_offset(struct rescale *rescale, int scale_type,
+ int scale, int scale2, int schan_off,
+ int *val, int *val2);
+#endif /* __IIO_RESCALE_H__ */
diff --git a/include/linux/iio/buffer-dma.h b/include/linux/iio/buffer-dma.h
index 767467d886de..6564bdcdac66 100644
--- a/include/linux/iio/buffer-dma.h
+++ b/include/linux/iio/buffer-dma.h
@@ -1,8 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright 2013-2015 Analog Devices Inc.
* Author: Lars-Peter Clausen <lars@metafoo.de>
- *
- * Licensed under the GPL-2.
*/
#ifndef __INDUSTRIALIO_DMA_BUFFER_H__
@@ -12,17 +11,12 @@
#include <linux/kref.h>
#include <linux/spinlock.h>
#include <linux/mutex.h>
-#include <linux/iio/buffer.h>
+#include <linux/iio/buffer_impl.h>
struct iio_dma_buffer_queue;
struct iio_dma_buffer_ops;
struct device;
-struct iio_buffer_block {
- u32 size;
- u32 bytes_used;
-};
-
/**
* enum iio_block_state - State of a struct iio_dma_buffer_block
* @IIO_BLOCK_STATE_DEQUEUED: Block is not queued
@@ -141,7 +135,7 @@ int iio_dma_buffer_read(struct iio_buffer *buffer, size_t n,
char __user *user_buffer);
size_t iio_dma_buffer_data_available(struct iio_buffer *buffer);
int iio_dma_buffer_set_bytes_per_datum(struct iio_buffer *buffer, size_t bpd);
-int iio_dma_buffer_set_length(struct iio_buffer *buffer, int length);
+int iio_dma_buffer_set_length(struct iio_buffer *buffer, unsigned int length);
int iio_dma_buffer_request_update(struct iio_buffer *buffer);
int iio_dma_buffer_init(struct iio_dma_buffer_queue *queue,
diff --git a/include/linux/iio/buffer-dmaengine.h b/include/linux/iio/buffer-dmaengine.h
index 5dcddf427bb0..5c355be89814 100644
--- a/include/linux/iio/buffer-dmaengine.h
+++ b/include/linux/iio/buffer-dmaengine.h
@@ -1,18 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Copyright 2014-2015 Analog Devices Inc.
* Author: Lars-Peter Clausen <lars@metafoo.de>
- *
- * Licensed under the GPL-2 or later.
*/
#ifndef __IIO_DMAENGINE_H__
#define __IIO_DMAENGINE_H__
-struct iio_buffer;
+struct iio_dev;
struct device;
-struct iio_buffer *iio_dmaengine_buffer_alloc(struct device *dev,
- const char *channel);
-void iio_dmaengine_buffer_free(struct iio_buffer *buffer);
+int devm_iio_dmaengine_buffer_setup(struct device *dev,
+ struct iio_dev *indio_dev,
+ const char *channel);
#endif
diff --git a/include/linux/iio/buffer.h b/include/linux/iio/buffer.h
index 48767c776119..418b1307d3f2 100644
--- a/include/linux/iio/buffer.h
+++ b/include/linux/iio/buffer.h
@@ -1,10 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/* The industrial I/O core - generic buffer interfaces.
*
* Copyright (c) 2008 Jonathan Cameron
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
*/
#ifndef _IIO_BUFFER_GENERIC_H_
@@ -14,11 +11,15 @@
struct iio_buffer;
-void iio_buffer_set_attrs(struct iio_buffer *buffer,
- const struct attribute **attrs);
+enum iio_buffer_direction {
+ IIO_BUFFER_DIRECTION_IN,
+ IIO_BUFFER_DIRECTION_OUT,
+};
int iio_push_to_buffers(struct iio_dev *indio_dev, const void *data);
+int iio_pop_from_buffer(struct iio_buffer *buffer, void *data);
+
/**
* iio_push_to_buffers_with_timestamp() - push data and timestamp to buffers
* @indio_dev: iio_dev structure for device.
@@ -44,10 +45,14 @@ static inline int iio_push_to_buffers_with_timestamp(struct iio_dev *indio_dev,
return iio_push_to_buffers(indio_dev, data);
}
+int iio_push_to_buffers_with_ts_unaligned(struct iio_dev *indio_dev,
+ const void *data, size_t data_sz,
+ int64_t timestamp);
+
bool iio_validate_scan_mask_onehot(struct iio_dev *indio_dev,
const unsigned long *mask);
-void iio_device_attach_buffer(struct iio_dev *indio_dev,
- struct iio_buffer *buffer);
+int iio_device_attach_buffer(struct iio_dev *indio_dev,
+ struct iio_buffer *buffer);
#endif /* _IIO_BUFFER_GENERIC_H_ */
diff --git a/include/linux/iio/buffer_impl.h b/include/linux/iio/buffer_impl.h
index 8daba198fafa..89c3fd7c29ca 100644
--- a/include/linux/iio/buffer_impl.h
+++ b/include/linux/iio/buffer_impl.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _IIO_BUFFER_GENERIC_IMPL_H_
#define _IIO_BUFFER_GENERIC_IMPL_H_
#include <linux/sysfs.h>
@@ -5,6 +6,9 @@
#ifdef CONFIG_IIO_BUFFER
+#include <uapi/linux/iio/buffer.h>
+#include <linux/iio/buffer.h>
+
struct iio_dev;
struct iio_buffer;
@@ -17,9 +21,13 @@ struct iio_buffer;
/**
* struct iio_buffer_access_funcs - access functions for buffers.
* @store_to: actually store stuff to the buffer
- * @read_first_n: try to get a specified number of bytes (must exist)
+ * @read: try to get a specified number of bytes (must exist)
* @data_available: indicates how much data is available for reading from
* the buffer.
+ * @remove_from: remove scan from buffer. Drivers should calls this to
+ * remove a scan from a buffer.
+ * @write: try to write a number of bytes
+ * @space_available: returns the amount of bytes available in a buffer
* @request_update: if a parameter change has been marked, update underlying
* storage.
* @set_bytes_per_datum:set number of bytes per datum
@@ -44,15 +52,16 @@ struct iio_buffer;
**/
struct iio_buffer_access_funcs {
int (*store_to)(struct iio_buffer *buffer, const void *data);
- int (*read_first_n)(struct iio_buffer *buffer,
- size_t n,
- char __user *buf);
+ int (*read)(struct iio_buffer *buffer, size_t n, char __user *buf);
size_t (*data_available)(struct iio_buffer *buffer);
+ int (*remove_from)(struct iio_buffer *buffer, void *data);
+ int (*write)(struct iio_buffer *buffer, size_t n, const char __user *buf);
+ size_t (*space_available)(struct iio_buffer *buffer);
int (*request_update)(struct iio_buffer *buffer);
int (*set_bytes_per_datum)(struct iio_buffer *buffer, size_t bpd);
- int (*set_length)(struct iio_buffer *buffer, int length);
+ int (*set_length)(struct iio_buffer *buffer, unsigned int length);
int (*enable)(struct iio_buffer *buffer, struct iio_dev *indio_dev);
int (*disable)(struct iio_buffer *buffer, struct iio_dev *indio_dev);
@@ -71,10 +80,16 @@ struct iio_buffer_access_funcs {
*/
struct iio_buffer {
/** @length: Number of datums in buffer. */
- int length;
+ unsigned int length;
+
+ /** @flags: File ops flags including busy flag. */
+ unsigned long flags;
/** @bytes_per_datum: Size of individual datum including timestamp. */
- int bytes_per_datum;
+ size_t bytes_per_datum;
+
+ /* @direction: Direction of the data stream (in/out). */
+ enum iio_buffer_direction direction;
/**
* @access: Buffer access functions associated with the
@@ -95,36 +110,27 @@ struct iio_buffer {
unsigned int watermark;
/* private: */
- /*
- * @scan_el_attrs: Control of scan elements if that scan mode
- * control method is used.
- */
- struct attribute_group *scan_el_attrs;
-
/* @scan_timestamp: Does the scan mode include a timestamp. */
bool scan_timestamp;
- /* @scan_el_dev_attr_list: List of scan element related attributes. */
- struct list_head scan_el_dev_attr_list;
-
- /* @buffer_group: Attributes of the buffer group. */
- struct attribute_group buffer_group;
+ /* @buffer_attr_list: List of buffer attributes. */
+ struct list_head buffer_attr_list;
/*
- * @scan_el_group: Attribute group for those attributes not
- * created from the iio_chan_info array.
+ * @buffer_group: Attributes of the new buffer group.
+ * Includes scan elements attributes.
*/
- struct attribute_group scan_el_group;
-
- /* @stufftoread: Flag to indicate new data. */
- bool stufftoread;
+ struct attribute_group buffer_group;
/* @attrs: Standard attributes of the buffer. */
- const struct attribute **attrs;
+ const struct iio_dev_attr **attrs;
/* @demux_bounce: Buffer for doing gather from incoming scan. */
void *demux_bounce;
+ /* @attached_entry: Entry in the devices list of buffers attached by the driver. */
+ struct list_head attached_entry;
+
/* @buffer_list: Entry in the devices list of current buffers. */
struct list_head buffer_list;
diff --git a/include/linux/iio/common/cros_ec_sensors_core.h b/include/linux/iio/common/cros_ec_sensors_core.h
new file mode 100644
index 000000000000..e72167b96d27
--- /dev/null
+++ b/include/linux/iio/common/cros_ec_sensors_core.h
@@ -0,0 +1,130 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * ChromeOS EC sensor hub
+ *
+ * Copyright (C) 2016 Google, Inc
+ */
+
+#ifndef __CROS_EC_SENSORS_CORE_H
+#define __CROS_EC_SENSORS_CORE_H
+
+#include <linux/iio/iio.h>
+#include <linux/irqreturn.h>
+#include <linux/platform_data/cros_ec_commands.h>
+#include <linux/platform_data/cros_ec_proto.h>
+#include <linux/platform_data/cros_ec_sensorhub.h>
+
+enum {
+ CROS_EC_SENSOR_X,
+ CROS_EC_SENSOR_Y,
+ CROS_EC_SENSOR_Z,
+ CROS_EC_SENSOR_MAX_AXIS,
+};
+
+/* EC returns sensor values using signed 16 bit registers */
+#define CROS_EC_SENSOR_BITS 16
+
+/*
+ * 4 16 bit channels are allowed.
+ * Good enough for current sensors, they use up to 3 16 bit vectors.
+ */
+#define CROS_EC_SAMPLE_SIZE (sizeof(s64) * 2)
+
+typedef irqreturn_t (*cros_ec_sensors_capture_t)(int irq, void *p);
+
+/**
+ * struct cros_ec_sensors_core_state - state data for EC sensors IIO driver
+ * @ec: cros EC device structure
+ * @cmd_lock: lock used to prevent simultaneous access to the
+ * commands.
+ * @msg: cros EC command structure
+ * @param: motion sensor parameters structure
+ * @resp: motion sensor response structure
+ * @type: type of motion sensor
+ * @range_updated: True if the range of the sensor has been
+ * updated.
+ * @curr_range: If updated, the current range value.
+ * It will be reapplied at every resume.
+ * @calib: calibration parameters. Note that trigger
+ * captured data will always provide the calibrated
+ * data
+ * @samples: static array to hold data from a single capture.
+ * For each channel we need 2 bytes, except for
+ * the timestamp. The timestamp is always last and
+ * is always 8-byte aligned.
+ * @read_ec_sensors_data: function used for accessing sensors values
+ * @fifo_max_event_count: Size of the EC sensor FIFO
+ * @frequencies: Table of known available frequencies:
+ * 0, Min and Max in mHz
+ */
+struct cros_ec_sensors_core_state {
+ struct cros_ec_device *ec;
+ struct mutex cmd_lock;
+
+ struct cros_ec_command *msg;
+ struct ec_params_motion_sense param;
+ struct ec_response_motion_sense *resp;
+
+ enum motionsensor_type type;
+
+ bool range_updated;
+ int curr_range;
+
+ struct calib_data {
+ s16 offset;
+ u16 scale;
+ } calib[CROS_EC_SENSOR_MAX_AXIS];
+ s8 sign[CROS_EC_SENSOR_MAX_AXIS];
+ u8 samples[CROS_EC_SAMPLE_SIZE] __aligned(8);
+
+ int (*read_ec_sensors_data)(struct iio_dev *indio_dev,
+ unsigned long scan_mask, s16 *data);
+
+ u32 fifo_max_event_count;
+ int frequencies[6];
+};
+
+int cros_ec_sensors_read_lpc(struct iio_dev *indio_dev, unsigned long scan_mask,
+ s16 *data);
+
+int cros_ec_sensors_read_cmd(struct iio_dev *indio_dev, unsigned long scan_mask,
+ s16 *data);
+
+struct platform_device;
+int cros_ec_sensors_core_init(struct platform_device *pdev,
+ struct iio_dev *indio_dev, bool physical_device,
+ cros_ec_sensors_capture_t trigger_capture);
+
+int cros_ec_sensors_core_register(struct device *dev,
+ struct iio_dev *indio_dev,
+ cros_ec_sensorhub_push_data_cb_t push_data);
+
+irqreturn_t cros_ec_sensors_capture(int irq, void *p);
+int cros_ec_sensors_push_data(struct iio_dev *indio_dev,
+ s16 *data,
+ s64 timestamp);
+
+int cros_ec_motion_send_host_cmd(struct cros_ec_sensors_core_state *st,
+ u16 opt_length);
+
+int cros_ec_sensors_core_read(struct cros_ec_sensors_core_state *st,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask);
+
+int cros_ec_sensors_core_read_avail(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ const int **vals,
+ int *type,
+ int *length,
+ long mask);
+
+int cros_ec_sensors_core_write(struct cros_ec_sensors_core_state *st,
+ struct iio_chan_spec const *chan,
+ int val, int val2, long mask);
+
+extern const struct dev_pm_ops cros_ec_sensors_pm_ops;
+
+/* List of extended channel specification for all sensors. */
+extern const struct iio_chan_spec_ext_info cros_ec_sensors_ext_info[];
+
+#endif /* __CROS_EC_SENSORS_CORE_H */
diff --git a/include/linux/iio/common/ssp_sensors.h b/include/linux/iio/common/ssp_sensors.h
index f4d1b0edb432..06c9b4b563b3 100644
--- a/include/linux/iio/common/ssp_sensors.h
+++ b/include/linux/iio/common/ssp_sensors.h
@@ -1,16 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Copyright (C) 2014, Samsung Electronics Co. Ltd. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
*/
#ifndef _SSP_SENSORS_H_
#define _SSP_SENSORS_H_
diff --git a/include/linux/iio/common/st_sensors.h b/include/linux/iio/common/st_sensors.h
index 497f2b3a5a62..f5f3ee57bc70 100644
--- a/include/linux/iio/common/st_sensors.h
+++ b/include/linux/iio/common/st_sensors.h
@@ -1,11 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* STMicroelectronics sensors library driver
*
* Copyright 2012-2013 STMicroelectronics Inc.
*
* Denis Ciocca <denis.ciocca@st.com>
- *
- * Licensed under the GPL-2.
*/
#ifndef ST_SENSORS_H
@@ -14,14 +13,22 @@
#include <linux/i2c.h>
#include <linux/spi/spi.h>
#include <linux/irqreturn.h>
+#include <linux/iio/iio.h>
#include <linux/iio/trigger.h>
#include <linux/bitops.h>
#include <linux/regulator/consumer.h>
+#include <linux/regmap.h>
#include <linux/platform_data/st_sensors_pdata.h>
-#define ST_SENSORS_TX_MAX_LENGTH 2
-#define ST_SENSORS_RX_MAX_LENGTH 6
+#define LSM9DS0_IMU_DEV_NAME "lsm9ds0"
+
+/*
+ * Buffer size max case: 2bytes per channel, 3 channels in total +
+ * 8bytes timestamp channel (s64)
+ */
+#define ST_SENSORS_MAX_BUFFER_SIZE (ALIGN(2 * 3, sizeof(s64)) + \
+ sizeof(s64))
#define ST_SENSORS_ODR_LIST_MAX 10
#define ST_SENSORS_FULLSCALE_AVL_MAX 10
@@ -40,10 +47,10 @@
#define ST_SENSORS_DEFAULT_STAT_ADDR 0x27
#define ST_SENSORS_MAX_NAME 17
-#define ST_SENSORS_MAX_4WAI 7
+#define ST_SENSORS_MAX_4WAI 8
-#define ST_SENSORS_LSM_CHANNELS(device_type, mask, index, mod, \
- ch2, s, endian, rbits, sbits, addr) \
+#define ST_SENSORS_LSM_CHANNELS_EXT(device_type, mask, index, mod, \
+ ch2, s, endian, rbits, sbits, addr, ext) \
{ \
.type = device_type, \
.modified = mod, \
@@ -59,8 +66,14 @@
.storagebits = sbits, \
.endianness = endian, \
}, \
+ .ext_info = ext, \
}
+#define ST_SENSORS_LSM_CHANNELS(device_type, mask, index, mod, \
+ ch2, s, endian, rbits, sbits, addr) \
+ ST_SENSORS_LSM_CHANNELS_EXT(device_type, mask, index, mod, \
+ ch2, s, endian, rbits, sbits, addr, NULL)
+
#define ST_SENSORS_DEV_ATTR_SAMP_FREQ_AVAIL() \
IIO_DEV_ATTR_SAMP_FREQ_AVAIL( \
st_sensors_sysfs_sampling_frequency_avail)
@@ -105,6 +118,11 @@ struct st_sensor_fullscale {
struct st_sensor_fullscale_avl fs_avl[ST_SENSORS_FULLSCALE_AVL_MAX];
};
+struct st_sensor_sim {
+ u8 addr;
+ u8 value;
+};
+
/**
* struct st_sensor_bdu - ST sensor device block data update
* @addr: address of the register.
@@ -126,28 +144,39 @@ struct st_sensor_das {
};
/**
+ * struct st_sensor_int_drdy - ST sensor device drdy line parameters
+ * @addr: address of INT drdy register.
+ * @mask: mask to enable drdy line.
+ * @addr_od: address to enable/disable Open Drain on the INT line.
+ * @mask_od: mask to enable/disable Open Drain on the INT line.
+ */
+struct st_sensor_int_drdy {
+ u8 addr;
+ u8 mask;
+ u8 addr_od;
+ u8 mask_od;
+};
+
+/**
* struct st_sensor_data_ready_irq - ST sensor device data-ready interrupt
- * @addr: address of the register.
- * @mask_int1: mask to enable/disable IRQ on INT1 pin.
- * @mask_int2: mask to enable/disable IRQ on INT2 pin.
+ * struct int1 - data-ready configuration register for INT1 pin.
+ * struct int2 - data-ready configuration register for INT2 pin.
* @addr_ihl: address to enable/disable active low on the INT lines.
* @mask_ihl: mask to enable/disable active low on the INT lines.
- * @addr_od: address to enable/disable Open Drain on the INT lines.
- * @mask_od: mask to enable/disable Open Drain on the INT lines.
- * @addr_stat_drdy: address to read status of DRDY (data ready) interrupt
+ * struct stat_drdy - status register of DRDY (data ready) interrupt.
* struct ig1 - represents the Interrupt Generator 1 of sensors.
* @en_addr: address of the enable ig1 register.
* @en_mask: mask to write the on/off value for enable.
*/
struct st_sensor_data_ready_irq {
- u8 addr;
- u8 mask_int1;
- u8 mask_int2;
+ struct st_sensor_int_drdy int1;
+ struct st_sensor_int_drdy int2;
u8 addr_ihl;
u8 mask_ihl;
- u8 addr_od;
- u8 mask_od;
- u8 addr_stat_drdy;
+ struct {
+ u8 addr;
+ u8 mask;
+ } stat_drdy;
struct {
u8 en_addr;
u8 en_mask;
@@ -155,36 +184,6 @@ struct st_sensor_data_ready_irq {
};
/**
- * struct st_sensor_transfer_buffer - ST sensor device I/O buffer
- * @buf_lock: Mutex to protect rx and tx buffers.
- * @tx_buf: Buffer used by SPI transfer function to send data to the sensors.
- * This buffer is used to avoid DMA not-aligned issue.
- * @rx_buf: Buffer used by SPI transfer to receive data from sensors.
- * This buffer is used to avoid DMA not-aligned issue.
- */
-struct st_sensor_transfer_buffer {
- struct mutex buf_lock;
- u8 rx_buf[ST_SENSORS_RX_MAX_LENGTH];
- u8 tx_buf[ST_SENSORS_TX_MAX_LENGTH] ____cacheline_aligned;
-};
-
-/**
- * struct st_sensor_transfer_function - ST sensor device I/O function
- * @read_byte: Function used to read one byte.
- * @write_byte: Function used to write one byte.
- * @read_multiple_byte: Function used to read multiple byte.
- */
-struct st_sensor_transfer_function {
- int (*read_byte) (struct st_sensor_transfer_buffer *tb,
- struct device *dev, u8 reg_addr, u8 *res_byte);
- int (*write_byte) (struct st_sensor_transfer_buffer *tb,
- struct device *dev, u8 reg_addr, u8 data);
- int (*read_multiple_byte) (struct st_sensor_transfer_buffer *tb,
- struct device *dev, u8 reg_addr, int len, u8 *data,
- bool multiread_bit);
-};
-
-/**
* struct st_sensor_settings - ST specific sensor settings
* @wai: Contents of WhoAmI register.
* @wai_addr: The address of WhoAmI register.
@@ -197,6 +196,7 @@ struct st_sensor_transfer_function {
* @bdu: Block data update register.
* @das: Data Alignment Selection register.
* @drdy_irq: Data ready register of the sensor.
+ * @sim: SPI serial interface mode register of the sensor.
* @multi_read_bit: Use or not particular bit for [I2C/SPI] multi-read.
* @bootime: samples to discard when sensor passing from power-down to power-up.
*/
@@ -213,59 +213,53 @@ struct st_sensor_settings {
struct st_sensor_bdu bdu;
struct st_sensor_das das;
struct st_sensor_data_ready_irq drdy_irq;
+ struct st_sensor_sim sim;
bool multi_read_bit;
unsigned int bootime;
};
/**
* struct st_sensor_data - ST sensor device status
- * @dev: Pointer to instance of struct device (I2C or SPI).
* @trig: The trigger in use by the core driver.
+ * @mount_matrix: The mounting matrix of the sensor.
* @sensor_settings: Pointer to the specific sensor settings in use.
* @current_fullscale: Maximum range of measure by the sensor.
- * @vdd: Pointer to sensor's Vdd power supply
- * @vdd_io: Pointer to sensor's Vdd-IO power supply
+ * @regmap: Pointer to specific sensor regmap configuration.
* @enabled: Status of the sensor (false->off, true->on).
- * @multiread_bit: Use or not particular bit for [I2C/SPI] multiread.
- * @buffer_data: Data used by buffer part.
* @odr: Output data rate of the sensor [Hz].
* num_data_channels: Number of data channels used in buffer.
* @drdy_int_pin: Redirect DRDY on pin 1 (1) or pin 2 (2).
* @int_pin_open_drain: Set the interrupt/DRDY to open drain.
- * @get_irq_data_ready: Function to get the IRQ used for data ready signal.
- * @tf: Transfer function structure used by I/O operations.
- * @tb: Transfer buffers and mutex used by I/O operations.
+ * @irq: the IRQ number.
* @edge_irq: the IRQ triggers on edges and need special handling.
* @hw_irq_trigger: if we're using the hardware interrupt on the sensor.
* @hw_timestamp: Latest timestamp from the interrupt handler, when in use.
+ * @buffer_data: Data used by buffer part.
+ * @odr_lock: Local lock for preventing concurrent ODR accesses/changes
*/
struct st_sensor_data {
- struct device *dev;
struct iio_trigger *trig;
+ struct iio_mount_matrix mount_matrix;
struct st_sensor_settings *sensor_settings;
struct st_sensor_fullscale_avl *current_fullscale;
- struct regulator *vdd;
- struct regulator *vdd_io;
+ struct regmap *regmap;
bool enabled;
- bool multiread_bit;
-
- char *buffer_data;
unsigned int odr;
unsigned int num_data_channels;
u8 drdy_int_pin;
bool int_pin_open_drain;
-
- unsigned int (*get_irq_data_ready) (struct iio_dev *indio_dev);
-
- const struct st_sensor_transfer_function *tf;
- struct st_sensor_transfer_buffer tb;
+ int irq;
bool edge_irq;
bool hw_irq_trigger;
s64 hw_timestamp;
+
+ char buffer_data[ST_SENSORS_MAX_BUFFER_SIZE] ____cacheline_aligned;
+
+ struct mutex odr_lock;
};
#ifdef CONFIG_IIO_BUFFER
@@ -276,7 +270,6 @@ irqreturn_t st_sensors_trigger_handler(int irq, void *p);
int st_sensors_allocate_trigger(struct iio_dev *indio_dev,
const struct iio_trigger_ops *trigger_ops);
-void st_sensors_deallocate_trigger(struct iio_dev *indio_dev);
int st_sensors_validate_device(struct iio_trigger *trig,
struct iio_dev *indio_dev);
#else
@@ -285,10 +278,6 @@ static inline int st_sensors_allocate_trigger(struct iio_dev *indio_dev,
{
return 0;
}
-static inline void st_sensors_deallocate_trigger(struct iio_dev *indio_dev)
-{
- return;
-}
#define st_sensors_validate_device NULL
#endif
@@ -301,8 +290,6 @@ int st_sensors_set_axis_enable(struct iio_dev *indio_dev, u8 axis_enable);
int st_sensors_power_enable(struct iio_dev *indio_dev);
-void st_sensors_power_disable(struct iio_dev *indio_dev);
-
int st_sensors_debugfs_reg_access(struct iio_dev *indio_dev,
unsigned reg, unsigned writeval,
unsigned *readval);
@@ -316,8 +303,11 @@ int st_sensors_set_fullscale_by_gain(struct iio_dev *indio_dev, int scale);
int st_sensors_read_info_raw(struct iio_dev *indio_dev,
struct iio_chan_spec const *ch, int *val);
-int st_sensors_check_device_support(struct iio_dev *indio_dev,
- int num_sensors_list, const struct st_sensor_settings *sensor_settings);
+int st_sensors_get_settings_index(const char *name,
+ const struct st_sensor_settings *list,
+ const int list_length);
+
+int st_sensors_verify_id(struct iio_dev *indio_dev);
ssize_t st_sensors_sysfs_sampling_frequency_avail(struct device *dev,
struct device_attribute *attr, char *buf);
@@ -325,4 +315,22 @@ ssize_t st_sensors_sysfs_sampling_frequency_avail(struct device *dev,
ssize_t st_sensors_sysfs_scale_avail(struct device *dev,
struct device_attribute *attr, char *buf);
+void st_sensors_dev_name_probe(struct device *dev, char *name, int len);
+
+/* Accelerometer */
+const struct st_sensor_settings *st_accel_get_settings(const char *name);
+int st_accel_common_probe(struct iio_dev *indio_dev);
+
+/* Gyroscope */
+const struct st_sensor_settings *st_gyro_get_settings(const char *name);
+int st_gyro_common_probe(struct iio_dev *indio_dev);
+
+/* Magnetometer */
+const struct st_sensor_settings *st_magn_get_settings(const char *name);
+int st_magn_common_probe(struct iio_dev *indio_dev);
+
+/* Pressure */
+const struct st_sensor_settings *st_press_get_settings(const char *name);
+int st_press_common_probe(struct iio_dev *indio_dev);
+
#endif /* ST_SENSORS_H */
diff --git a/include/linux/iio/common/st_sensors_i2c.h b/include/linux/iio/common/st_sensors_i2c.h
index 254de3c7dde8..5f15cf01036c 100644
--- a/include/linux/iio/common/st_sensors_i2c.h
+++ b/include/linux/iio/common/st_sensors_i2c.h
@@ -1,11 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* STMicroelectronics sensors i2c library driver
*
* Copyright 2012-2013 STMicroelectronics Inc.
*
* Denis Ciocca <denis.ciocca@st.com>
- *
- * Licensed under the GPL-2.
*/
#ifndef ST_SENSORS_I2C_H
@@ -13,28 +12,8 @@
#include <linux/i2c.h>
#include <linux/iio/common/st_sensors.h>
-#include <linux/of.h>
-
-void st_sensors_i2c_configure(struct iio_dev *indio_dev,
- struct i2c_client *client, struct st_sensor_data *sdata);
-
-#ifdef CONFIG_OF
-void st_sensors_of_i2c_probe(struct i2c_client *client,
- const struct of_device_id *match);
-#else
-static inline void st_sensors_of_i2c_probe(struct i2c_client *client,
- const struct of_device_id *match)
-{
-}
-#endif
-#ifdef CONFIG_ACPI
-int st_sensors_match_acpi_device(struct device *dev);
-#else
-static inline int st_sensors_match_acpi_device(struct device *dev)
-{
- return -ENODEV;
-}
-#endif
+int st_sensors_i2c_configure(struct iio_dev *indio_dev,
+ struct i2c_client *client);
#endif /* ST_SENSORS_I2C_H */
diff --git a/include/linux/iio/common/st_sensors_spi.h b/include/linux/iio/common/st_sensors_spi.h
index d964a3563dc6..90b25f087f06 100644
--- a/include/linux/iio/common/st_sensors_spi.h
+++ b/include/linux/iio/common/st_sensors_spi.h
@@ -1,11 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* STMicroelectronics sensors spi library driver
*
* Copyright 2012-2013 STMicroelectronics Inc.
*
* Denis Ciocca <denis.ciocca@st.com>
- *
- * Licensed under the GPL-2.
*/
#ifndef ST_SENSORS_SPI_H
@@ -14,7 +13,7 @@
#include <linux/spi/spi.h>
#include <linux/iio/common/st_sensors.h>
-void st_sensors_spi_configure(struct iio_dev *indio_dev,
- struct spi_device *spi, struct st_sensor_data *sdata);
+int st_sensors_spi_configure(struct iio_dev *indio_dev,
+ struct spi_device *spi);
#endif /* ST_SENSORS_SPI_H */
diff --git a/include/linux/iio/configfs.h b/include/linux/iio/configfs.h
index 93befd67c15c..84cab3f47e80 100644
--- a/include/linux/iio/configfs.h
+++ b/include/linux/iio/configfs.h
@@ -1,11 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Industrial I/O configfs support
*
* Copyright (c) 2015 Intel Corporation
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
*/
#ifndef __IIO_CONFIGFS
#define __IIO_CONFIGFS
diff --git a/include/linux/iio/consumer.h b/include/linux/iio/consumer.h
index 5e347a9805fd..6802596b017c 100644
--- a/include/linux/iio/consumer.h
+++ b/include/linux/iio/consumer.h
@@ -1,11 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Industrial I/O in kernel consumer interface
*
* Copyright (c) 2011 Jonathan Cameron
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
*/
#ifndef _IIO_INKERN_CONSUMER_H_
#define _IIO_INKERN_CONSUMER_H_
@@ -16,6 +13,7 @@
struct iio_dev;
struct iio_chan_spec;
struct device;
+struct fwnode_handle;
/**
* struct iio_channel - everything needed for a consumer to use a channel
@@ -67,15 +65,6 @@ void iio_channel_release(struct iio_channel *chan);
struct iio_channel *devm_iio_channel_get(struct device *dev,
const char *consumer_channel);
/**
- * devm_iio_channel_release() - Resource managed version of
- * iio_channel_release().
- * @dev: Pointer to consumer device for which resource
- * is allocared.
- * @chan: The channel to be released.
- */
-void devm_iio_channel_release(struct device *dev, struct iio_channel *chan);
-
-/**
* iio_channel_get_all() - get all channels associated with a client
* @dev: Pointer to consumer device.
*
@@ -110,13 +99,33 @@ void iio_channel_release_all(struct iio_channel *chan);
struct iio_channel *devm_iio_channel_get_all(struct device *dev);
/**
- * devm_iio_channel_release_all() - Resource managed version of
- * iio_channel_release_all().
- * @dev: Pointer to consumer device for which resource
- * is allocared.
- * @chan: Array channel to be released.
+ * fwnode_iio_channel_get_by_name() - get description of all that is needed to access channel.
+ * @fwnode: Pointer to consumer Firmware node
+ * @consumer_channel: Unique name to identify the channel on the consumer
+ * side. This typically describes the channels use within
+ * the consumer. E.g. 'battery_voltage'
*/
-void devm_iio_channel_release_all(struct device *dev, struct iio_channel *chan);
+struct iio_channel *fwnode_iio_channel_get_by_name(struct fwnode_handle *fwnode,
+ const char *name);
+
+/**
+ * devm_fwnode_iio_channel_get_by_name() - Resource managed version of
+ * fwnode_iio_channel_get_by_name().
+ * @dev: Pointer to consumer device.
+ * @fwnode: Pointer to consumer Firmware node
+ * @consumer_channel: Unique name to identify the channel on the consumer
+ * side. This typically describes the channels use within
+ * the consumer. E.g. 'battery_voltage'
+ *
+ * Returns a pointer to negative errno if it is not able to get the iio channel
+ * otherwise returns valid pointer for iio channel.
+ *
+ * The allocated iio channel is automatically released when the device is
+ * unbound.
+ */
+struct iio_channel *devm_fwnode_iio_channel_get_by_name(struct device *dev,
+ struct fwnode_handle *fwnode,
+ const char *consumer_channel);
struct iio_cb_buffer;
/**
@@ -134,6 +143,17 @@ struct iio_cb_buffer *iio_channel_get_all_cb(struct device *dev,
void *private),
void *private);
/**
+ * iio_channel_cb_set_buffer_watermark() - set the buffer watermark.
+ * @cb_buffer: The callback buffer from whom we want the channel
+ * information.
+ * @watermark: buffer watermark in bytes.
+ *
+ * This function allows to configure the buffer watermark.
+ */
+int iio_channel_cb_set_buffer_watermark(struct iio_cb_buffer *cb_buffer,
+ size_t watermark);
+
+/**
* iio_channel_release_all_cb() - release and unregister the callback.
* @cb_buffer: The callback buffer that was allocated.
*/
@@ -216,6 +236,47 @@ int iio_read_channel_average_raw(struct iio_channel *chan, int *val);
int iio_read_channel_processed(struct iio_channel *chan, int *val);
/**
+ * iio_read_channel_processed_scale() - read and scale a processed value
+ * @chan: The channel being queried.
+ * @val: Value read back.
+ * @scale: Scale factor to apply during the conversion
+ *
+ * Returns an error code or 0.
+ *
+ * This function will read a processed value from a channel. This will work
+ * like @iio_read_channel_processed() but also scale with an additional
+ * scale factor while attempting to minimize any precision loss.
+ */
+int iio_read_channel_processed_scale(struct iio_channel *chan, int *val,
+ unsigned int scale);
+
+/**
+ * iio_write_channel_attribute() - Write values to the device attribute.
+ * @chan: The channel being queried.
+ * @val: Value being written.
+ * @val2: Value being written.val2 use depends on attribute type.
+ * @attribute: info attribute to be read.
+ *
+ * Returns an error code or 0.
+ */
+int iio_write_channel_attribute(struct iio_channel *chan, int val,
+ int val2, enum iio_chan_info_enum attribute);
+
+/**
+ * iio_read_channel_attribute() - Read values from the device attribute.
+ * @chan: The channel being queried.
+ * @val: Value being written.
+ * @val2: Value being written.Val2 use depends on attribute type.
+ * @attribute: info attribute to be written.
+ *
+ * Returns an error code if failed. Else returns a description of what is in val
+ * and val2, such as IIO_VAL_INT_PLUS_MICRO telling us we have a value of val
+ * + val2/1e6
+ */
+int iio_read_channel_attribute(struct iio_channel *chan, int *val,
+ int *val2, enum iio_chan_info_enum attribute);
+
+/**
* iio_write_channel_raw() - write to a given channel
* @chan: The channel being queried.
* @val: Value being written.
@@ -254,6 +315,20 @@ int iio_read_avail_channel_raw(struct iio_channel *chan,
const int **vals, int *length);
/**
+ * iio_read_avail_channel_attribute() - read available channel attribute values
+ * @chan: The channel being queried.
+ * @vals: Available values read back.
+ * @type: Type of values read back.
+ * @length: Number of entries in vals.
+ * @attribute: info attribute to be read back.
+ *
+ * Returns an error code, IIO_AVAIL_RANGE or IIO_AVAIL_LIST.
+ */
+int iio_read_avail_channel_attribute(struct iio_channel *chan,
+ const int **vals, int *type, int *length,
+ enum iio_chan_info_enum attribute);
+
+/**
* iio_get_channel_type() - get the type of a channel
* @channel: The channel being queried.
* @type: The type of the channel.
diff --git a/include/linux/iio/dac/ad5421.h b/include/linux/iio/dac/ad5421.h
index 8fd8f057a890..d8ee9a7f8a6a 100644
--- a/include/linux/iio/dac/ad5421.h
+++ b/include/linux/iio/dac/ad5421.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __IIO_DAC_AD5421_H__
#define __IIO_DAC_AD5421_H__
diff --git a/include/linux/iio/dac/ad5504.h b/include/linux/iio/dac/ad5504.h
index 43895376a9ca..9f23c90486ee 100644
--- a/include/linux/iio/dac/ad5504.h
+++ b/include/linux/iio/dac/ad5504.h
@@ -1,9 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* AD5504 SPI DAC driver
*
* Copyright 2011 Analog Devices Inc.
- *
- * Licensed under the GPL-2.
*/
#ifndef SPI_AD5504_H_
diff --git a/include/linux/iio/dac/ad5791.h b/include/linux/iio/dac/ad5791.h
index 45ee281c6660..02966553f71e 100644
--- a/include/linux/iio/dac/ad5791.h
+++ b/include/linux/iio/dac/ad5791.h
@@ -1,9 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* AD5791 SPI DAC driver
*
* Copyright 2011 Analog Devices Inc.
- *
- * Licensed under the GPL-2.
*/
#ifndef SPI_AD5791_H_
diff --git a/include/linux/iio/dac/max517.h b/include/linux/iio/dac/max517.h
index 7668716cd73c..4923645a18fd 100644
--- a/include/linux/iio/dac/max517.h
+++ b/include/linux/iio/dac/max517.h
@@ -1,9 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* MAX517 DAC driver
*
* Copyright 2011 Roland Stigge <stigge@antcom.de>
- *
- * Licensed under the GPL-2 or later.
*/
#ifndef IIO_DAC_MAX517_H_
#define IIO_DAC_MAX517_H_
diff --git a/include/linux/iio/dac/mcp4725.h b/include/linux/iio/dac/mcp4725.h
index 628b2cf54c50..1f7e53c506b6 100644
--- a/include/linux/iio/dac/mcp4725.h
+++ b/include/linux/iio/dac/mcp4725.h
@@ -1,9 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* MCP4725 DAC driver
*
* Copyright (C) 2012 Peter Meerwald <pmeerw@pmeerw.net>
- *
- * Licensed under the GPL-2 or later.
*/
#ifndef IIO_DAC_MCP4725_H_
@@ -16,7 +15,7 @@
* @vref_buffered: Controls buffering of the external reference voltage.
*
* Vref related settings are available only on MCP4756. See
- * Documentation/devicetree/bindings/iio/dac/mcp4725.txt for more information.
+ * Documentation/devicetree/bindings/iio/dac/microchip,mcp4725.yaml for more information.
*/
struct mcp4725_platform_data {
bool use_vref;
diff --git a/include/linux/iio/driver.h b/include/linux/iio/driver.h
index 7dfb10ee2669..7a157ed218f6 100644
--- a/include/linux/iio/driver.h
+++ b/include/linux/iio/driver.h
@@ -1,16 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Industrial I/O in kernel access map interface.
*
* Copyright (c) 2011 Jonathan Cameron
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
*/
#ifndef _IIO_INKERN_H_
#define _IIO_INKERN_H_
+struct device;
+struct iio_dev;
struct iio_map;
/**
@@ -28,4 +27,17 @@ int iio_map_array_register(struct iio_dev *indio_dev,
*/
int iio_map_array_unregister(struct iio_dev *indio_dev);
+/**
+ * devm_iio_map_array_register - device-managed version of iio_map_array_register
+ * @dev: Device object to which to bind the unwinding of this registration
+ * @indio_dev: Pointer to the iio_dev structure
+ * @maps: Pointer to an IIO map object which is to be registered to this IIO device
+ *
+ * This function will call iio_map_array_register() to register an IIO map object
+ * and will also hook a callback to the iio_map_array_unregister() function to
+ * handle de-registration of the IIO map object when the device's refcount goes to
+ * zero.
+ */
+int devm_iio_map_array_register(struct device *dev, struct iio_dev *indio_dev, struct iio_map *maps);
+
#endif
diff --git a/include/linux/iio/events.h b/include/linux/iio/events.h
index 8ad87d1c5340..a4558c45a548 100644
--- a/include/linux/iio/events.h
+++ b/include/linux/iio/events.h
@@ -1,10 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/* The industrial I/O - event passing to userspace
*
* Copyright (c) 2008-2011 Jonathan Cameron
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
*/
#ifndef _IIO_EVENTS_H_
#define _IIO_EVENTS_H_
diff --git a/include/linux/iio/frequency/ad9523.h b/include/linux/iio/frequency/ad9523.h
index 12ce3ee427fd..ff22a0ac15f5 100644
--- a/include/linux/iio/frequency/ad9523.h
+++ b/include/linux/iio/frequency/ad9523.h
@@ -1,9 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* AD9523 SPI Low Jitter Clock Generator
*
* Copyright 2012 Analog Devices Inc.
- *
- * Licensed under the GPL-2.
*/
#ifndef IIO_FREQUENCY_AD9523_H_
@@ -129,8 +128,8 @@ enum cpole1_capacitor {
* @pll2_ndiv_b_cnt: PLL2 Feedback N-divider, B Counter, range 0..63.
* @pll2_freq_doubler_en: PLL2 frequency doubler enable.
* @pll2_r2_div: PLL2 R2 divider, range 0..31.
- * @pll2_vco_diff_m1: VCO1 divider, range 3..5.
- * @pll2_vco_diff_m2: VCO2 divider, range 3..5.
+ * @pll2_vco_div_m1: VCO1 divider, range 3..5.
+ * @pll2_vco_div_m2: VCO2 divider, range 3..5.
* @rpole2: PLL2 loop filter Rpole resistor value.
* @rzero: PLL2 loop filter Rzero resistor value.
* @cpole1: PLL2 loop filter Cpole capacitor value.
@@ -176,8 +175,8 @@ struct ad9523_platform_data {
unsigned char pll2_ndiv_b_cnt;
bool pll2_freq_doubler_en;
unsigned char pll2_r2_div;
- unsigned char pll2_vco_diff_m1; /* 3..5 */
- unsigned char pll2_vco_diff_m2; /* 3..5 */
+ unsigned char pll2_vco_div_m1; /* 3..5 */
+ unsigned char pll2_vco_div_m2; /* 3..5 */
/* Loop Filter PLL2 */
enum rpole2_resistor rpole2;
diff --git a/include/linux/iio/frequency/adf4350.h b/include/linux/iio/frequency/adf4350.h
index ffd8c8f90928..de45cf2ee1e4 100644
--- a/include/linux/iio/frequency/adf4350.h
+++ b/include/linux/iio/frequency/adf4350.h
@@ -1,9 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* ADF4350/ADF4351 SPI PLL driver
*
* Copyright 2012-2013 Analog Devices Inc.
- *
- * Licensed under the GPL-2.
*/
#ifndef IIO_PLL_ADF4350_H_
@@ -104,9 +103,6 @@
* @r2_user_settings: User defined settings for ADF4350/1 REGISTER_2.
* @r3_user_settings: User defined settings for ADF4350/1 REGISTER_3.
* @r4_user_settings: User defined settings for ADF4350/1 REGISTER_4.
- * @gpio_lock_detect: Optional, if set with a valid GPIO number,
- * pll lock state is tested upon read.
- * If not used - set to -1.
*/
struct adf4350_platform_data {
@@ -122,7 +118,6 @@ struct adf4350_platform_data {
unsigned r2_user_settings;
unsigned r3_user_settings;
unsigned r4_user_settings;
- int gpio_lock_detect;
};
#endif /* IIO_PLL_ADF4350_H_ */
diff --git a/include/linux/iio/gyro/itg3200.h b/include/linux/iio/gyro/itg3200.h
index 2a820850f284..74b6d1cadc86 100644
--- a/include/linux/iio/gyro/itg3200.h
+++ b/include/linux/iio/gyro/itg3200.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* itg3200.h -- support InvenSense ITG3200
* Digital 3-Axis Gyroscope driver
@@ -5,10 +6,6 @@
* Copyright (c) 2011 Christian Strobel <christian.strobel@iis.fraunhofer.de>
* Copyright (c) 2011 Manuel Stahl <manuel.stahl@iis.fraunhofer.de>
* Copyright (c) 2012 Thorsten Nowak <thorsten.nowak@iis.fraunhofer.de>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef I2C_ITG3200_H_
@@ -104,6 +101,9 @@
struct itg3200 {
struct i2c_client *i2c;
struct iio_trigger *trig;
+ struct iio_mount_matrix orientation;
+ /* lock to protect against multiple access to the device */
+ struct mutex lock;
};
enum ITG3200_SCAN_INDEX {
diff --git a/include/linux/iio/hw-consumer.h b/include/linux/iio/hw-consumer.h
new file mode 100644
index 000000000000..e8255c2e33bc
--- /dev/null
+++ b/include/linux/iio/hw-consumer.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Industrial I/O in kernel hardware consumer interface
+ *
+ * Copyright 2017 Analog Devices Inc.
+ * Author: Lars-Peter Clausen <lars@metafoo.de>
+ */
+
+#ifndef LINUX_IIO_HW_CONSUMER_H
+#define LINUX_IIO_HW_CONSUMER_H
+
+struct iio_hw_consumer;
+
+struct iio_hw_consumer *iio_hw_consumer_alloc(struct device *dev);
+void iio_hw_consumer_free(struct iio_hw_consumer *hwc);
+struct iio_hw_consumer *devm_iio_hw_consumer_alloc(struct device *dev);
+int iio_hw_consumer_enable(struct iio_hw_consumer *hwc);
+void iio_hw_consumer_disable(struct iio_hw_consumer *hwc);
+
+#endif
diff --git a/include/linux/iio/iio-gts-helper.h b/include/linux/iio/iio-gts-helper.h
new file mode 100644
index 000000000000..dd64e544a3da
--- /dev/null
+++ b/include/linux/iio/iio-gts-helper.h
@@ -0,0 +1,206 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* gain-time-scale conversion helpers for IIO light sensors
+ *
+ * Copyright (c) 2023 Matti Vaittinen <mazziesaccount@gmail.com>
+ */
+
+#ifndef __IIO_GTS_HELPER__
+#define __IIO_GTS_HELPER__
+
+#include <linux/types.h>
+
+struct device;
+
+/**
+ * struct iio_gain_sel_pair - gain - selector values
+ *
+ * In many cases devices like light sensors allow setting signal amplification
+ * (gain) using a register interface. This structure describes amplification
+ * and corresponding selector (register value)
+ *
+ * @gain: Gain (multiplication) value. Gain must be positive, negative
+ * values are reserved for error handling.
+ * @sel: Selector (usually register value) used to indicate this gain.
+ * NOTE: Only selectors >= 0 supported.
+ */
+struct iio_gain_sel_pair {
+ int gain;
+ int sel;
+};
+
+/**
+ * struct iio_itime_sel_mul - integration time description
+ *
+ * In many cases devices like light sensors allow setting the duration of
+ * collecting data. Typically this duration has also an impact to the magnitude
+ * of measured values (gain). This structure describes the relation of
+ * integration time and amplification as well as corresponding selector
+ * (register value).
+ *
+ * An example could be a sensor allowing 50, 100, 200 and 400 mS times. The
+ * respective multiplication values could be 50 mS => 1, 100 mS => 2,
+ * 200 mS => 4 and 400 mS => 8 assuming the impact of integration time would be
+ * linear in a way that when collecting data for 50 mS caused value X, doubling
+ * the data collection time caused value 2X etc.
+ *
+ * @time_us: Integration time in microseconds. Time values must be positive,
+ * negative values are reserved for error handling.
+ * @sel: Selector (usually register value) used to indicate this time
+ * NOTE: Only selectors >= 0 supported.
+ * @mul: Multiplication to the values caused by this time.
+ * NOTE: Only multipliers > 0 supported.
+ */
+struct iio_itime_sel_mul {
+ int time_us;
+ int sel;
+ int mul;
+};
+
+struct iio_gts {
+ u64 max_scale;
+ const struct iio_gain_sel_pair *hwgain_table;
+ int num_hwgain;
+ const struct iio_itime_sel_mul *itime_table;
+ int num_itime;
+ int **per_time_avail_scale_tables;
+ int *avail_all_scales_table;
+ int num_avail_all_scales;
+ int *avail_time_tables;
+ int num_avail_time_tables;
+};
+
+#define GAIN_SCALE_GAIN(_gain, _sel) \
+{ \
+ .gain = (_gain), \
+ .sel = (_sel), \
+}
+
+#define GAIN_SCALE_ITIME_US(_itime, _sel, _mul) \
+{ \
+ .time_us = (_itime), \
+ .sel = (_sel), \
+ .mul = (_mul), \
+}
+
+static inline const struct iio_itime_sel_mul *
+iio_gts_find_itime_by_time(struct iio_gts *gts, int time)
+{
+ int i;
+
+ if (!gts->num_itime)
+ return NULL;
+
+ for (i = 0; i < gts->num_itime; i++)
+ if (gts->itime_table[i].time_us == time)
+ return &gts->itime_table[i];
+
+ return NULL;
+}
+
+static inline const struct iio_itime_sel_mul *
+iio_gts_find_itime_by_sel(struct iio_gts *gts, int sel)
+{
+ int i;
+
+ for (i = 0; i < gts->num_itime; i++)
+ if (gts->itime_table[i].sel == sel)
+ return &gts->itime_table[i];
+
+ return NULL;
+}
+
+int devm_iio_init_iio_gts(struct device *dev, int max_scale_int, int max_scale_nano,
+ const struct iio_gain_sel_pair *gain_tbl, int num_gain,
+ const struct iio_itime_sel_mul *tim_tbl, int num_times,
+ struct iio_gts *gts);
+/**
+ * iio_gts_find_int_time_by_sel - find integration time matching a selector
+ * @gts: Gain time scale descriptor
+ * @sel: selector for which matching integration time is searched for
+ *
+ * Return: integration time matching given selector or -EINVAL if
+ * integration time was not found.
+ */
+static inline int iio_gts_find_int_time_by_sel(struct iio_gts *gts, int sel)
+{
+ const struct iio_itime_sel_mul *itime;
+
+ itime = iio_gts_find_itime_by_sel(gts, sel);
+ if (!itime)
+ return -EINVAL;
+
+ return itime->time_us;
+}
+
+/**
+ * iio_gts_find_sel_by_int_time - find selector matching integration time
+ * @gts: Gain time scale descriptor
+ * @gain: HW-gain for which matching selector is searched for
+ *
+ * Return: a selector matching given integration time or -EINVAL if
+ * selector was not found.
+ */
+static inline int iio_gts_find_sel_by_int_time(struct iio_gts *gts, int time)
+{
+ const struct iio_itime_sel_mul *itime;
+
+ itime = iio_gts_find_itime_by_time(gts, time);
+ if (!itime)
+ return -EINVAL;
+
+ return itime->sel;
+}
+
+/**
+ * iio_gts_valid_time - check if given integration time is valid
+ * @gts: Gain time scale descriptor
+ * @time_us: Integration time to check
+ *
+ * Return: True if given time is supported by device. False if not.
+ */
+static inline bool iio_gts_valid_time(struct iio_gts *gts, int time_us)
+{
+ return iio_gts_find_itime_by_time(gts, time_us) != NULL;
+}
+
+int iio_gts_find_sel_by_gain(struct iio_gts *gts, int gain);
+
+/**
+ * iio_gts_valid_gain - check if given HW-gain is valid
+ * @gts: Gain time scale descriptor
+ * @gain: HW-gain to check
+ *
+ * Return: True if given time is supported by device. False if not.
+ */
+static inline bool iio_gts_valid_gain(struct iio_gts *gts, int gain)
+{
+ return iio_gts_find_sel_by_gain(gts, gain) >= 0;
+}
+
+int iio_find_closest_gain_low(struct iio_gts *gts, int gain, bool *in_range);
+int iio_gts_find_gain_by_sel(struct iio_gts *gts, int sel);
+int iio_gts_get_min_gain(struct iio_gts *gts);
+int iio_gts_find_int_time_by_sel(struct iio_gts *gts, int sel);
+int iio_gts_find_sel_by_int_time(struct iio_gts *gts, int time);
+
+int iio_gts_total_gain_to_scale(struct iio_gts *gts, int total_gain,
+ int *scale_int, int *scale_nano);
+int iio_gts_find_gain_sel_for_scale_using_time(struct iio_gts *gts, int time_sel,
+ int scale_int, int scale_nano,
+ int *gain_sel);
+int iio_gts_get_scale(struct iio_gts *gts, int gain, int time, int *scale_int,
+ int *scale_nano);
+int iio_gts_find_new_gain_sel_by_old_gain_time(struct iio_gts *gts,
+ int old_gain, int old_time_sel,
+ int new_time_sel, int *new_gain);
+int iio_gts_find_new_gain_by_old_gain_time(struct iio_gts *gts, int old_gain,
+ int old_time, int new_time,
+ int *new_gain);
+int iio_gts_avail_times(struct iio_gts *gts, const int **vals, int *type,
+ int *length);
+int iio_gts_all_avail_scales(struct iio_gts *gts, const int **vals, int *type,
+ int *length);
+int iio_gts_avail_scales_for_time(struct iio_gts *gts, int time,
+ const int **vals, int *type, int *length);
+
+#endif
diff --git a/include/linux/iio/iio-opaque.h b/include/linux/iio/iio-opaque.h
new file mode 100644
index 000000000000..5aec3945555b
--- /dev/null
+++ b/include/linux/iio/iio-opaque.h
@@ -0,0 +1,82 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef _INDUSTRIAL_IO_OPAQUE_H_
+#define _INDUSTRIAL_IO_OPAQUE_H_
+
+/**
+ * struct iio_dev_opaque - industrial I/O device opaque information
+ * @indio_dev: public industrial I/O device information
+ * @id: used to identify device internally
+ * @currentmode: operating mode currently in use, may be eventually
+ * checked by device drivers but should be considered
+ * read-only as this is a core internal bit
+ * @driver_module: used to make it harder to undercut users
+ * @mlock: lock used to prevent simultaneous device state changes
+ * @mlock_key: lockdep class for iio_dev lock
+ * @info_exist_lock: lock to prevent use during removal
+ * @trig_readonly: mark the current trigger immutable
+ * @event_interface: event chrdevs associated with interrupt lines
+ * @attached_buffers: array of buffers statically attached by the driver
+ * @attached_buffers_cnt: number of buffers in the array of statically attached buffers
+ * @buffer_ioctl_handler: ioctl() handler for this IIO device's buffer interface
+ * @buffer_list: list of all buffers currently attached
+ * @channel_attr_list: keep track of automatically created channel
+ * attributes
+ * @chan_attr_group: group for all attrs in base directory
+ * @ioctl_handlers: ioctl handlers registered with the core handler
+ * @groups: attribute groups
+ * @groupcounter: index of next attribute group
+ * @legacy_scan_el_group: attribute group for legacy scan elements attribute group
+ * @legacy_buffer_group: attribute group for legacy buffer attributes group
+ * @bounce_buffer: for devices that call iio_push_to_buffers_with_timestamp_unaligned()
+ * @bounce_buffer_size: size of currently allocate bounce buffer
+ * @scan_index_timestamp: cache of the index to the timestamp
+ * @clock_id: timestamping clock posix identifier
+ * @chrdev: associated character device
+ * @flags: file ops related flags including busy flag.
+ * @debugfs_dentry: device specific debugfs dentry
+ * @cached_reg_addr: cached register address for debugfs reads
+ * @read_buf: read buffer to be used for the initial reg read
+ * @read_buf_len: data length in @read_buf
+ */
+struct iio_dev_opaque {
+ struct iio_dev indio_dev;
+ int currentmode;
+ int id;
+ struct module *driver_module;
+ struct mutex mlock;
+ struct lock_class_key mlock_key;
+ struct mutex info_exist_lock;
+ bool trig_readonly;
+ struct iio_event_interface *event_interface;
+ struct iio_buffer **attached_buffers;
+ unsigned int attached_buffers_cnt;
+ struct iio_ioctl_handler *buffer_ioctl_handler;
+ struct list_head buffer_list;
+ struct list_head channel_attr_list;
+ struct attribute_group chan_attr_group;
+ struct list_head ioctl_handlers;
+ const struct attribute_group **groups;
+ int groupcounter;
+ struct attribute_group legacy_scan_el_group;
+ struct attribute_group legacy_buffer_group;
+ void *bounce_buffer;
+ size_t bounce_buffer_size;
+
+ unsigned int scan_index_timestamp;
+ clockid_t clock_id;
+ struct cdev chrdev;
+ unsigned long flags;
+
+#if defined(CONFIG_DEBUG_FS)
+ struct dentry *debugfs_dentry;
+ unsigned cached_reg_addr;
+ char read_buf[20];
+ unsigned int read_buf_len;
+#endif
+};
+
+#define to_iio_dev_opaque(_indio_dev) \
+ container_of((_indio_dev), struct iio_dev_opaque, indio_dev)
+
+#endif
diff --git a/include/linux/iio/iio.h b/include/linux/iio/iio.h
index d68bec297a45..81413cd3a3e7 100644
--- a/include/linux/iio/iio.h
+++ b/include/linux/iio/iio.h
@@ -1,52 +1,23 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/* The industrial I/O core
*
* Copyright (c) 2008 Jonathan Cameron
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
*/
#ifndef _INDUSTRIAL_IO_H_
#define _INDUSTRIAL_IO_H_
#include <linux/device.h>
#include <linux/cdev.h>
+#include <linux/slab.h>
#include <linux/iio/types.h>
-#include <linux/of.h>
/* IIO TODO LIST */
/*
* Provide means of adjusting timer accuracy.
* Currently assumes nano seconds.
*/
-enum iio_chan_info_enum {
- IIO_CHAN_INFO_RAW = 0,
- IIO_CHAN_INFO_PROCESSED,
- IIO_CHAN_INFO_SCALE,
- IIO_CHAN_INFO_OFFSET,
- IIO_CHAN_INFO_CALIBSCALE,
- IIO_CHAN_INFO_CALIBBIAS,
- IIO_CHAN_INFO_PEAK,
- IIO_CHAN_INFO_PEAK_SCALE,
- IIO_CHAN_INFO_QUADRATURE_CORRECTION_RAW,
- IIO_CHAN_INFO_AVERAGE_RAW,
- IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY,
- IIO_CHAN_INFO_HIGH_PASS_FILTER_3DB_FREQUENCY,
- IIO_CHAN_INFO_SAMP_FREQ,
- IIO_CHAN_INFO_FREQUENCY,
- IIO_CHAN_INFO_PHASE,
- IIO_CHAN_INFO_HARDWAREGAIN,
- IIO_CHAN_INFO_HYSTERESIS,
- IIO_CHAN_INFO_INT_TIME,
- IIO_CHAN_INFO_ENABLE,
- IIO_CHAN_INFO_CALIBHEIGHT,
- IIO_CHAN_INFO_CALIBWEIGHT,
- IIO_CHAN_INFO_DEBOUNCE_COUNT,
- IIO_CHAN_INFO_DEBOUNCE_TIME,
- IIO_CHAN_INFO_CALIBEMISSIVITY,
- IIO_CHAN_INFO_OVERSAMPLING_RATIO,
-};
+struct fwnode_reference_args;
enum iio_shared_by {
IIO_SEPARATE,
@@ -134,15 +105,16 @@ ssize_t iio_enum_write(struct iio_dev *indio_dev,
/**
* IIO_ENUM_AVAILABLE() - Initialize enum available extended channel attribute
* @_name: Attribute name ("_available" will be appended to the name)
+ * @_shared: Whether the attribute is shared between all channels
* @_e: Pointer to an iio_enum struct
*
* Creates a read only attribute which lists all the available enum items in a
* space separated list. This should usually be used together with IIO_ENUM()
*/
-#define IIO_ENUM_AVAILABLE(_name, _e) \
+#define IIO_ENUM_AVAILABLE(_name, _shared, _e) \
{ \
.name = (_name "_available"), \
- .shared = IIO_SHARED_BY_TYPE, \
+ .shared = _shared, \
.read = iio_enum_available_read, \
.private = (uintptr_t)(_e), \
}
@@ -158,8 +130,7 @@ struct iio_mount_matrix {
ssize_t iio_show_mount_matrix(struct iio_dev *indio_dev, uintptr_t priv,
const struct iio_chan_spec *chan, char *buf);
-int of_iio_read_mount_matrix(const struct device *dev, const char *propname,
- struct iio_mount_matrix *matrix);
+int iio_read_mount_matrix(struct device *dev, struct iio_mount_matrix *matrix);
typedef const struct iio_mount_matrix *
(iio_get_mount_matrix_t)(const struct iio_dev *indio_dev,
@@ -211,18 +182,18 @@ struct iio_event_spec {
* @address: Driver specific identifier.
* @scan_index: Monotonic index to give ordering in scans when read
* from a buffer.
- * @scan_type: sign: 's' or 'u' to specify signed or unsigned
- * realbits: Number of valid bits of data
- * storagebits: Realbits + padding
- * shift: Shift right by this before masking out
- * realbits.
- * repeat: Number of times real/storage bits
- * repeats. When the repeat element is
- * more than 1, then the type element in
- * sysfs will show a repeat value.
- * Otherwise, the number of repetitions is
- * omitted.
- * endianness: little or big endian
+ * @scan_type: struct describing the scan type
+ * @scan_type.sign: 's' or 'u' to specify signed or unsigned
+ * @scan_type.realbits: Number of valid bits of data
+ * @scan_type.storagebits: Realbits + padding
+ * @scan_type.shift: Shift right by this before masking out
+ * realbits.
+ * @scan_type.repeat: Number of times real/storage bits repeats.
+ * When the repeat element is more than 1, then
+ * the type element in sysfs will show a repeat
+ * value. Otherwise, the number of repetitions
+ * is omitted.
+ * @scan_type.endianness: little or big endian
* @info_mask_separate: What information is to be exported that is specific to
* this channel.
* @info_mask_separate_available: What availability information is to be
@@ -344,9 +315,55 @@ static inline bool iio_channel_has_available(const struct iio_chan_spec *chan,
}
s64 iio_get_time_ns(const struct iio_dev *indio_dev);
-unsigned int iio_get_time_res(const struct iio_dev *indio_dev);
-/* Device operating modes */
+/*
+ * Device operating modes
+ * @INDIO_DIRECT_MODE: There is an access to either:
+ * a) The last single value available for devices that do not provide
+ * on-demand reads.
+ * b) A new value after performing an on-demand read otherwise.
+ * On most devices, this is a single-shot read. On some devices with data
+ * streams without an 'on-demand' function, this might also be the 'last value'
+ * feature. Above all, this mode internally means that we are not in any of the
+ * other modes, and sysfs reads should work.
+ * Device drivers should inform the core if they support this mode.
+ * @INDIO_BUFFER_TRIGGERED: Common mode when dealing with kfifo buffers.
+ * It indicates that an explicit trigger is required. This requests the core to
+ * attach a poll function when enabling the buffer, which is indicated by the
+ * _TRIGGERED suffix.
+ * The core will ensure this mode is set when registering a triggered buffer
+ * with iio_triggered_buffer_setup().
+ * @INDIO_BUFFER_SOFTWARE: Another kfifo buffer mode, but not event triggered.
+ * No poll function can be attached because there is no triggered infrastructure
+ * we can use to cause capture. There is a kfifo that the driver will fill, but
+ * not "only one scan at a time". Typically, hardware will have a buffer that
+ * can hold multiple scans. Software may read one or more scans at a single time
+ * and push the available data to a Kfifo. This means the core will not attach
+ * any poll function when enabling the buffer.
+ * The core will ensure this mode is set when registering a simple kfifo buffer
+ * with devm_iio_kfifo_buffer_setup().
+ * @INDIO_BUFFER_HARDWARE: For specific hardware, if unsure do not use this mode.
+ * Same as above but this time the buffer is not a kfifo where we have direct
+ * access to the data. Instead, the consumer driver must access the data through
+ * non software visible channels (or DMA when there is no demux possible in
+ * software)
+ * The core will ensure this mode is set when registering a dmaengine buffer
+ * with devm_iio_dmaengine_buffer_setup().
+ * @INDIO_EVENT_TRIGGERED: Very unusual mode.
+ * Triggers usually refer to an external event which will start data capture.
+ * Here it is kind of the opposite as, a particular state of the data might
+ * produce an event which can be considered as an event. We don't necessarily
+ * have access to the data itself, but to the event produced. For example, this
+ * can be a threshold detector. The internal path of this mode is very close to
+ * the INDIO_BUFFER_TRIGGERED mode.
+ * The core will ensure this mode is set when registering a triggered event.
+ * @INDIO_HARDWARE_TRIGGERED: Very unusual mode.
+ * Here, triggers can result in data capture and can be routed to multiple
+ * hardware components, which make them close to regular triggers in the way
+ * they must be managed by the core, but without the entire interrupts/poll
+ * functions burden. Interrupts are irrelevant as the data flow is hardware
+ * mediated and distributed.
+ */
#define INDIO_DIRECT_MODE 0x01
#define INDIO_BUFFER_TRIGGERED 0x02
#define INDIO_BUFFER_SOFTWARE 0x04
@@ -364,13 +381,15 @@ unsigned int iio_get_time_res(const struct iio_dev *indio_dev);
#define INDIO_MAX_RAW_ELEMENTS 4
+struct iio_val_int_plus_micro {
+ int integer;
+ int micro;
+};
+
struct iio_trigger; /* forward declaration */
-struct iio_dev;
/**
* struct iio_info - constant information about device
- * @driver_module: module structure used to ensure correct
- * ownership of chrdevs etc
* @event_attrs: event control attributes
* @attrs: general purpose device attributes
* @read_raw: function to request a value from the device.
@@ -396,6 +415,8 @@ struct iio_dev;
* and max. For lists, all possible values are enumerated.
* @write_raw: function to write a value to the device.
* Parameters are the same as for read_raw.
+ * @read_label: function to request label name for a specified label,
+ * for better channel identification.
* @write_raw_get_fmt: callback function to query the expected
* format/precision. If not set by the driver, write_raw
* returns IIO_VAL_INT_PLUS_MICRO.
@@ -413,6 +434,8 @@ struct iio_dev;
* provide a custom of_xlate function that reads the
* *args* and returns the appropriate index in registered
* IIO channels array.
+ * @fwnode_xlate: fwnode based function pointer to obtain channel specifier index.
+ * Functionally the same as @of_xlate.
* @hwfifo_set_watermark: function pointer to set the current hardware
* fifo watermark level; see hwfifo_* entries in
* Documentation/ABI/testing/sysfs-bus-iio for details on
@@ -425,7 +448,6 @@ struct iio_dev;
* were flushed and there was an error.
**/
struct iio_info {
- struct module *driver_module;
const struct attribute_group *event_attrs;
const struct attribute_group *attrs;
@@ -455,6 +477,10 @@ struct iio_info {
int val2,
long mask);
+ int (*read_label)(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ char *label);
+
int (*write_raw_get_fmt)(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan,
long mask);
@@ -489,8 +515,8 @@ struct iio_info {
int (*debugfs_reg_access)(struct iio_dev *indio_dev,
unsigned reg, unsigned writeval,
unsigned *readval);
- int (*of_xlate)(struct iio_dev *indio_dev,
- const struct of_phandle_args *iiospec);
+ int (*fwnode_xlate)(struct iio_dev *indio_dev,
+ const struct fwnode_reference_args *iiospec);
int (*hwfifo_set_watermark)(struct iio_dev *indio_dev, unsigned val);
int (*hwfifo_flush_to_buffer)(struct iio_dev *indio_dev,
unsigned count);
@@ -517,100 +543,96 @@ struct iio_buffer_setup_ops {
/**
* struct iio_dev - industrial I/O device
- * @id: [INTERN] used to identify device internally
- * @modes: [DRIVER] operating modes supported by device
- * @currentmode: [DRIVER] current operating mode
+ * @modes: [DRIVER] bitmask listing all the operating modes
+ * supported by the IIO device. This list should be
+ * initialized before registering the IIO device. It can
+ * also be filed up by the IIO core, as a result of
+ * enabling particular features in the driver
+ * (see iio_triggered_event_setup()).
* @dev: [DRIVER] device structure, should be assigned a parent
* and owner
- * @event_interface: [INTERN] event chrdevs associated with interrupt lines
* @buffer: [DRIVER] any buffer present
- * @buffer_list: [INTERN] list of all buffers currently attached
* @scan_bytes: [INTERN] num bytes captured to be fed to buffer demux
- * @mlock: [DRIVER] lock used to prevent simultaneous device state
- * changes
* @available_scan_masks: [DRIVER] optional array of allowed bitmasks
* @masklength: [INTERN] the length of the mask established from
* channels
* @active_scan_mask: [INTERN] union of all scan masks requested by buffers
* @scan_timestamp: [INTERN] set if any buffers have requested timestamp
- * @scan_index_timestamp:[INTERN] cache of the index to the timestamp
* @trig: [INTERN] current device trigger (buffer modes)
- * @trig_readonly [INTERN] mark the current trigger immutable
* @pollfunc: [DRIVER] function run on trigger being received
* @pollfunc_event: [DRIVER] function run on events trigger being received
* @channels: [DRIVER] channel specification structure table
* @num_channels: [DRIVER] number of channels specified in @channels.
- * @channel_attr_list: [INTERN] keep track of automatically created channel
- * attributes
- * @chan_attr_group: [INTERN] group for all attrs in base directory
* @name: [DRIVER] name of the device.
+ * @label: [DRIVER] unique name to identify which device this is
* @info: [DRIVER] callbacks and constant info from driver
- * @clock_id: [INTERN] timestamping clock posix identifier
- * @info_exist_lock: [INTERN] lock to prevent use during removal
* @setup_ops: [DRIVER] callbacks to call before and after buffer
* enable/disable
- * @chrdev: [INTERN] associated character device
- * @groups: [INTERN] attribute groups
- * @groupcounter: [INTERN] index of next attribute group
- * @flags: [INTERN] file ops related flags including busy flag.
- * @debugfs_dentry: [INTERN] device specific debugfs dentry.
- * @cached_reg_addr: [INTERN] cached register address for debugfs reads.
+ * @priv: [DRIVER] reference to driver's private information
+ * **MUST** be accessed **ONLY** via iio_priv() helper
*/
struct iio_dev {
- int id;
-
int modes;
- int currentmode;
struct device dev;
- struct iio_event_interface *event_interface;
-
struct iio_buffer *buffer;
- struct list_head buffer_list;
int scan_bytes;
- struct mutex mlock;
const unsigned long *available_scan_masks;
unsigned masklength;
const unsigned long *active_scan_mask;
bool scan_timestamp;
- unsigned scan_index_timestamp;
struct iio_trigger *trig;
- bool trig_readonly;
struct iio_poll_func *pollfunc;
struct iio_poll_func *pollfunc_event;
struct iio_chan_spec const *channels;
int num_channels;
- struct list_head channel_attr_list;
- struct attribute_group chan_attr_group;
const char *name;
+ const char *label;
const struct iio_info *info;
- clockid_t clock_id;
- struct mutex info_exist_lock;
const struct iio_buffer_setup_ops *setup_ops;
- struct cdev chrdev;
-#define IIO_MAX_GROUPS 6
- const struct attribute_group *groups[IIO_MAX_GROUPS + 1];
- int groupcounter;
- unsigned long flags;
-#if defined(CONFIG_DEBUG_FS)
- struct dentry *debugfs_dentry;
- unsigned cached_reg_addr;
-#endif
+ void *priv;
};
+int iio_device_id(struct iio_dev *indio_dev);
+int iio_device_get_current_mode(struct iio_dev *indio_dev);
+bool iio_buffer_enabled(struct iio_dev *indio_dev);
+
const struct iio_chan_spec
*iio_find_channel_from_si(struct iio_dev *indio_dev, int si);
-int iio_device_register(struct iio_dev *indio_dev);
+/**
+ * iio_device_register() - register a device with the IIO subsystem
+ * @indio_dev: Device structure filled by the device driver
+ **/
+#define iio_device_register(indio_dev) \
+ __iio_device_register((indio_dev), THIS_MODULE)
+int __iio_device_register(struct iio_dev *indio_dev, struct module *this_mod);
void iio_device_unregister(struct iio_dev *indio_dev);
-int devm_iio_device_register(struct device *dev, struct iio_dev *indio_dev);
-void devm_iio_device_unregister(struct device *dev, struct iio_dev *indio_dev);
+/**
+ * devm_iio_device_register - Resource-managed iio_device_register()
+ * @dev: Device to allocate iio_dev for
+ * @indio_dev: Device structure filled by the device driver
+ *
+ * Managed iio_device_register. The IIO device registered with this
+ * function is automatically unregistered on driver detach. This function
+ * calls iio_device_register() internally. Refer to that function for more
+ * information.
+ *
+ * RETURNS:
+ * 0 on success, negative error number on failure.
+ */
+#define devm_iio_device_register(dev, indio_dev) \
+ __devm_iio_device_register((dev), (indio_dev), THIS_MODULE)
+int __devm_iio_device_register(struct device *dev, struct iio_dev *indio_dev,
+ struct module *this_mod);
int iio_push_event(struct iio_dev *indio_dev, u64 ev_code, s64 timestamp);
int iio_device_claim_direct_mode(struct iio_dev *indio_dev);
void iio_device_release_direct_mode(struct iio_dev *indio_dev);
+int iio_device_claim_buffer_mode(struct iio_dev *indio_dev);
+void iio_device_release_buffer_mode(struct iio_dev *indio_dev);
extern struct bus_type iio_bus_type;
@@ -624,14 +646,8 @@ static inline void iio_device_put(struct iio_dev *indio_dev)
put_device(&indio_dev->dev);
}
-/**
- * iio_device_get_clock() - Retrieve current timestamping clock for the device
- * @indio_dev: IIO device structure containing the device
- */
-static inline clockid_t iio_device_get_clock(const struct iio_dev *indio_dev)
-{
- return indio_dev->clock_id;
-}
+clockid_t iio_device_get_clock(const struct iio_dev *indio_dev);
+int iio_device_set_clock(struct iio_dev *indio_dev, clockid_t clock_id);
/**
* dev_to_iio_dev() - Get IIO device struct from a device struct
@@ -655,6 +671,26 @@ static inline struct iio_dev *iio_device_get(struct iio_dev *indio_dev)
return indio_dev ? dev_to_iio_dev(get_device(&indio_dev->dev)) : NULL;
}
+/**
+ * iio_device_set_parent() - assign parent device to the IIO device object
+ * @indio_dev: IIO device structure
+ * @parent: reference to parent device object
+ *
+ * This utility must be called between IIO device allocation
+ * (via devm_iio_device_alloc()) & IIO device registration
+ * (via iio_device_register() and devm_iio_device_register())).
+ * By default, the device allocation will also assign a parent device to
+ * the IIO device object. In cases where devm_iio_device_alloc() is used,
+ * sometimes the parent device must be different than the device used to
+ * manage the allocation.
+ * In that case, this helper should be used to change the parent, hence the
+ * requirement to call this between allocation & registration.
+ **/
+static inline void iio_device_set_parent(struct iio_dev *indio_dev,
+ struct device *parent)
+{
+ indio_dev->dev.parent = parent;
+}
/**
* iio_device_set_drvdata() - Set device driver data
@@ -675,54 +711,41 @@ static inline void iio_device_set_drvdata(struct iio_dev *indio_dev, void *data)
*
* Returns the data previously set with iio_device_set_drvdata()
*/
-static inline void *iio_device_get_drvdata(struct iio_dev *indio_dev)
+static inline void *iio_device_get_drvdata(const struct iio_dev *indio_dev)
{
return dev_get_drvdata(&indio_dev->dev);
}
-/* Can we make this smaller? */
-#define IIO_ALIGN L1_CACHE_BYTES
-struct iio_dev *iio_device_alloc(int sizeof_priv);
+/*
+ * Used to ensure the iio_priv() structure is aligned to allow that structure
+ * to in turn include IIO_DMA_MINALIGN'd elements such as buffers which
+ * must not share cachelines with the rest of the structure, thus making
+ * them safe for use with non-coherent DMA.
+ */
+#define IIO_DMA_MINALIGN ARCH_KMALLOC_MINALIGN
+struct iio_dev *iio_device_alloc(struct device *parent, int sizeof_priv);
+/* The information at the returned address is guaranteed to be cacheline aligned */
static inline void *iio_priv(const struct iio_dev *indio_dev)
{
- return (char *)indio_dev + ALIGN(sizeof(struct iio_dev), IIO_ALIGN);
-}
-
-static inline struct iio_dev *iio_priv_to_dev(void *priv)
-{
- return (struct iio_dev *)((char *)priv -
- ALIGN(sizeof(struct iio_dev), IIO_ALIGN));
+ return indio_dev->priv;
}
void iio_device_free(struct iio_dev *indio_dev);
-int devm_iio_device_match(struct device *dev, void *res, void *data);
-struct iio_dev *devm_iio_device_alloc(struct device *dev, int sizeof_priv);
-void devm_iio_device_free(struct device *dev, struct iio_dev *indio_dev);
-struct iio_trigger *devm_iio_trigger_alloc(struct device *dev,
- const char *fmt, ...);
-void devm_iio_trigger_free(struct device *dev, struct iio_trigger *iio_trig);
-
-/**
- * iio_buffer_enabled() - helper function to test if the buffer is enabled
- * @indio_dev: IIO device structure for device
- **/
-static inline bool iio_buffer_enabled(struct iio_dev *indio_dev)
-{
- return indio_dev->currentmode
- & (INDIO_BUFFER_TRIGGERED | INDIO_BUFFER_HARDWARE |
- INDIO_BUFFER_SOFTWARE);
-}
-
+struct iio_dev *devm_iio_device_alloc(struct device *parent, int sizeof_priv);
+
+#define devm_iio_trigger_alloc(parent, fmt, ...) \
+ __devm_iio_trigger_alloc((parent), THIS_MODULE, (fmt), ##__VA_ARGS__)
+__printf(3, 4)
+struct iio_trigger *__devm_iio_trigger_alloc(struct device *parent,
+ struct module *this_mod,
+ const char *fmt, ...);
/**
* iio_get_debugfs_dentry() - helper function to get the debugfs_dentry
* @indio_dev: IIO device structure for device
**/
#if defined(CONFIG_DEBUG_FS)
-static inline struct dentry *iio_get_debugfs_dentry(struct iio_dev *indio_dev)
-{
- return indio_dev->debugfs_dentry;
-}
+struct dentry *iio_get_debugfs_dentry(struct iio_dev *indio_dev);
#else
static inline struct dentry *iio_get_debugfs_dentry(struct iio_dev *indio_dev)
{
diff --git a/include/linux/iio/imu/adis.h b/include/linux/iio/imu/adis.h
index 360da7d18a3d..dc9ea299e088 100644
--- a/include/linux/iio/imu/adis.h
+++ b/include/linux/iio/imu/adis.h
@@ -1,10 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Common library for ADIS16XXX devices
*
* Copyright 2012 Analog Devices Inc.
* Author: Lars-Peter Clausen <lars@metafoo.de>
- *
- * Licensed under the GPL-2 or later.
*/
#ifndef __IIO_ADIS_H__
@@ -23,59 +22,256 @@
struct adis;
/**
+ * struct adis_timeouts - ADIS chip variant timeouts
+ * @reset_ms - Wait time after rst pin goes inactive
+ * @sw_reset_ms - Wait time after sw reset command
+ * @self_test_ms - Wait time after self test command
+ */
+struct adis_timeout {
+ u16 reset_ms;
+ u16 sw_reset_ms;
+ u16 self_test_ms;
+};
+
+/**
* struct adis_data - ADIS chip variant specific data
* @read_delay: SPI delay for read operations in us
* @write_delay: SPI delay for write operations in us
+ * @cs_change_delay: SPI delay between CS changes in us
* @glob_cmd_reg: Register address of the GLOB_CMD register
* @msc_ctrl_reg: Register address of the MSC_CTRL register
* @diag_stat_reg: Register address of the DIAG_STAT register
- * @status_error_msgs: Array of error messgaes
- * @status_error_mask:
+ * @prod_id_reg: Register address of the PROD_ID register
+ * @prod_id: Product ID code that should be expected when reading @prod_id_reg
+ * @self_test_mask: Bitmask of supported self-test operations
+ * @self_test_reg: Register address to request self test command
+ * @self_test_no_autoclear: True if device's self-test needs clear of ctrl reg
+ * @status_error_msgs: Array of error messages
+ * @status_error_mask: Bitmask of errors supported by the device
+ * @timeouts: Chip specific delays
+ * @enable_irq: Hook for ADIS devices that have a special IRQ enable/disable
+ * @unmasked_drdy: True for devices that cannot mask/unmask the data ready pin
+ * @has_paging: True if ADIS device has paged registers
+ * @burst_reg_cmd: Register command that triggers burst
+ * @burst_len: Burst size in the SPI RX buffer. If @burst_max_len is defined,
+ * this should be the minimum size supported by the device.
+ * @burst_max_len: Holds the maximum burst size when the device supports
+ * more than one burst mode with different sizes
+ * @burst_max_speed_hz: Maximum spi speed that can be used in burst mode
*/
struct adis_data {
unsigned int read_delay;
unsigned int write_delay;
+ unsigned int cs_change_delay;
unsigned int glob_cmd_reg;
unsigned int msc_ctrl_reg;
unsigned int diag_stat_reg;
+ unsigned int prod_id_reg;
+
+ unsigned int prod_id;
unsigned int self_test_mask;
+ unsigned int self_test_reg;
bool self_test_no_autoclear;
- unsigned int startup_delay;
+ const struct adis_timeout *timeouts;
const char * const *status_error_msgs;
unsigned int status_error_mask;
int (*enable_irq)(struct adis *adis, bool enable);
+ bool unmasked_drdy;
bool has_paging;
+
+ unsigned int burst_reg_cmd;
+ unsigned int burst_len;
+ unsigned int burst_max_len;
+ unsigned int burst_max_speed_hz;
};
+/**
+ * struct adis - ADIS device instance data
+ * @spi: Reference to SPI device which owns this ADIS IIO device
+ * @trig: IIO trigger object data
+ * @data: ADIS chip variant specific data
+ * @burst: ADIS burst transfer information
+ * @burst_extra_len: Burst extra length. Should only be used by devices that can
+ * dynamically change their burst mode length.
+ * @state_lock: Lock used by the device to protect state
+ * @msg: SPI message object
+ * @xfer: SPI transfer objects to be used for a @msg
+ * @current_page: Some ADIS devices have registers, this selects current page
+ * @irq_flag: IRQ handling flags as passed to request_irq()
+ * @buffer: Data buffer for information read from the device
+ * @tx: DMA safe TX buffer for SPI transfers
+ * @rx: DMA safe RX buffer for SPI transfers
+ */
struct adis {
struct spi_device *spi;
struct iio_trigger *trig;
const struct adis_data *data;
-
- struct mutex txrx_lock;
+ unsigned int burst_extra_len;
+ /**
+ * The state_lock is meant to be used during operations that require
+ * a sequence of SPI R/W in order to protect the SPI transfer
+ * information (fields 'xfer', 'msg' & 'current_page') between
+ * potential concurrent accesses.
+ * This lock is used by all "adis_{functions}" that have to read/write
+ * registers. These functions also have unlocked variants
+ * (see "__adis_{functions}"), which don't hold this lock.
+ * This allows users of the ADIS library to group SPI R/W into
+ * the drivers, but they also must manage this lock themselves.
+ */
+ struct mutex state_lock;
struct spi_message msg;
struct spi_transfer *xfer;
unsigned int current_page;
+ unsigned long irq_flag;
void *buffer;
- uint8_t tx[10] ____cacheline_aligned;
- uint8_t rx[4];
+ u8 tx[10] ____cacheline_aligned;
+ u8 rx[4];
};
int adis_init(struct adis *adis, struct iio_dev *indio_dev,
- struct spi_device *spi, const struct adis_data *data);
-int adis_reset(struct adis *adis);
+ struct spi_device *spi, const struct adis_data *data);
+int __adis_reset(struct adis *adis);
+
+/**
+ * adis_reset() - Reset the device
+ * @adis: The adis device
+ *
+ * Returns 0 on success, a negative error code otherwise
+ */
+static inline int adis_reset(struct adis *adis)
+{
+ int ret;
+
+ mutex_lock(&adis->state_lock);
+ ret = __adis_reset(adis);
+ mutex_unlock(&adis->state_lock);
+
+ return ret;
+}
+
+int __adis_write_reg(struct adis *adis, unsigned int reg,
+ unsigned int val, unsigned int size);
+int __adis_read_reg(struct adis *adis, unsigned int reg,
+ unsigned int *val, unsigned int size);
+
+/**
+ * __adis_write_reg_8() - Write single byte to a register (unlocked)
+ * @adis: The adis device
+ * @reg: The address of the register to be written
+ * @value: The value to write
+ */
+static inline int __adis_write_reg_8(struct adis *adis, unsigned int reg,
+ u8 val)
+{
+ return __adis_write_reg(adis, reg, val, 1);
+}
+
+/**
+ * __adis_write_reg_16() - Write 2 bytes to a pair of registers (unlocked)
+ * @adis: The adis device
+ * @reg: The address of the lower of the two registers
+ * @value: Value to be written
+ */
+static inline int __adis_write_reg_16(struct adis *adis, unsigned int reg,
+ u16 val)
+{
+ return __adis_write_reg(adis, reg, val, 2);
+}
+
+/**
+ * __adis_write_reg_32() - write 4 bytes to four registers (unlocked)
+ * @adis: The adis device
+ * @reg: The address of the lower of the four register
+ * @value: Value to be written
+ */
+static inline int __adis_write_reg_32(struct adis *adis, unsigned int reg,
+ u32 val)
+{
+ return __adis_write_reg(adis, reg, val, 4);
+}
+
+/**
+ * __adis_read_reg_16() - read 2 bytes from a 16-bit register (unlocked)
+ * @adis: The adis device
+ * @reg: The address of the lower of the two registers
+ * @val: The value read back from the device
+ */
+static inline int __adis_read_reg_16(struct adis *adis, unsigned int reg,
+ u16 *val)
+{
+ unsigned int tmp;
+ int ret;
+
+ ret = __adis_read_reg(adis, reg, &tmp, 2);
+ if (ret == 0)
+ *val = tmp;
+
+ return ret;
+}
+
+/**
+ * __adis_read_reg_32() - read 4 bytes from a 32-bit register (unlocked)
+ * @adis: The adis device
+ * @reg: The address of the lower of the two registers
+ * @val: The value read back from the device
+ */
+static inline int __adis_read_reg_32(struct adis *adis, unsigned int reg,
+ u32 *val)
+{
+ unsigned int tmp;
+ int ret;
+
+ ret = __adis_read_reg(adis, reg, &tmp, 4);
+ if (ret == 0)
+ *val = tmp;
+
+ return ret;
+}
+
+/**
+ * adis_write_reg() - write N bytes to register
+ * @adis: The adis device
+ * @reg: The address of the lower of the two registers
+ * @value: The value to write to device (up to 4 bytes)
+ * @size: The size of the @value (in bytes)
+ */
+static inline int adis_write_reg(struct adis *adis, unsigned int reg,
+ unsigned int val, unsigned int size)
+{
+ int ret;
+
+ mutex_lock(&adis->state_lock);
+ ret = __adis_write_reg(adis, reg, val, size);
+ mutex_unlock(&adis->state_lock);
+
+ return ret;
+}
+
+/**
+ * adis_read_reg() - read N bytes from register
+ * @adis: The adis device
+ * @reg: The address of the lower of the two registers
+ * @val: The value read back from the device
+ * @size: The size of the @val buffer
+ */
+static int adis_read_reg(struct adis *adis, unsigned int reg,
+ unsigned int *val, unsigned int size)
+{
+ int ret;
+
+ mutex_lock(&adis->state_lock);
+ ret = __adis_read_reg(adis, reg, val, size);
+ mutex_unlock(&adis->state_lock);
-int adis_write_reg(struct adis *adis, unsigned int reg,
- unsigned int val, unsigned int size);
-int adis_read_reg(struct adis *adis, unsigned int reg,
- unsigned int *val, unsigned int size);
+ return ret;
+}
/**
* adis_write_reg_8() - Write single byte to a register
@@ -84,7 +280,7 @@ int adis_read_reg(struct adis *adis, unsigned int reg,
* @value: The value to write
*/
static inline int adis_write_reg_8(struct adis *adis, unsigned int reg,
- uint8_t val)
+ u8 val)
{
return adis_write_reg(adis, reg, val, 1);
}
@@ -96,7 +292,7 @@ static inline int adis_write_reg_8(struct adis *adis, unsigned int reg,
* @value: Value to be written
*/
static inline int adis_write_reg_16(struct adis *adis, unsigned int reg,
- uint16_t val)
+ u16 val)
{
return adis_write_reg(adis, reg, val, 2);
}
@@ -108,7 +304,7 @@ static inline int adis_write_reg_16(struct adis *adis, unsigned int reg,
* @value: Value to be written
*/
static inline int adis_write_reg_32(struct adis *adis, unsigned int reg,
- uint32_t val)
+ u32 val)
{
return adis_write_reg(adis, reg, val, 4);
}
@@ -120,13 +316,14 @@ static inline int adis_write_reg_32(struct adis *adis, unsigned int reg,
* @val: The value read back from the device
*/
static inline int adis_read_reg_16(struct adis *adis, unsigned int reg,
- uint16_t *val)
+ u16 *val)
{
unsigned int tmp;
int ret;
ret = adis_read_reg(adis, reg, &tmp, 2);
- *val = tmp;
+ if (ret == 0)
+ *val = tmp;
return ret;
}
@@ -138,25 +335,112 @@ static inline int adis_read_reg_16(struct adis *adis, unsigned int reg,
* @val: The value read back from the device
*/
static inline int adis_read_reg_32(struct adis *adis, unsigned int reg,
- uint32_t *val)
+ u32 *val)
{
unsigned int tmp;
int ret;
ret = adis_read_reg(adis, reg, &tmp, 4);
- *val = tmp;
+ if (ret == 0)
+ *val = tmp;
return ret;
}
-int adis_enable_irq(struct adis *adis, bool enable);
-int adis_check_status(struct adis *adis);
+int __adis_update_bits_base(struct adis *adis, unsigned int reg, const u32 mask,
+ const u32 val, u8 size);
+/**
+ * adis_update_bits_base() - ADIS Update bits function - Locked version
+ * @adis: The adis device
+ * @reg: The address of the lower of the two registers
+ * @mask: Bitmask to change
+ * @val: Value to be written
+ * @size: Size of the register to update
+ *
+ * Updates the desired bits of @reg in accordance with @mask and @val.
+ */
+static inline int adis_update_bits_base(struct adis *adis, unsigned int reg,
+ const u32 mask, const u32 val, u8 size)
+{
+ int ret;
-int adis_initial_startup(struct adis *adis);
+ mutex_lock(&adis->state_lock);
+ ret = __adis_update_bits_base(adis, reg, mask, val, size);
+ mutex_unlock(&adis->state_lock);
+ return ret;
+}
+
+/**
+ * adis_update_bits() - Wrapper macro for adis_update_bits_base - Locked version
+ * @adis: The adis device
+ * @reg: The address of the lower of the two registers
+ * @mask: Bitmask to change
+ * @val: Value to be written
+ *
+ * This macro evaluates the sizeof of @val at compile time and calls
+ * adis_update_bits_base() accordingly. Be aware that using MACROS/DEFINES for
+ * @val can lead to undesired behavior if the register to update is 16bit.
+ */
+#define adis_update_bits(adis, reg, mask, val) ({ \
+ BUILD_BUG_ON(sizeof(val) != 2 && sizeof(val) != 4); \
+ adis_update_bits_base(adis, reg, mask, val, sizeof(val)); \
+})
+
+/**
+ * adis_update_bits() - Wrapper macro for adis_update_bits_base
+ * @adis: The adis device
+ * @reg: The address of the lower of the two registers
+ * @mask: Bitmask to change
+ * @val: Value to be written
+ *
+ * This macro evaluates the sizeof of @val at compile time and calls
+ * adis_update_bits_base() accordingly. Be aware that using MACROS/DEFINES for
+ * @val can lead to undesired behavior if the register to update is 16bit.
+ */
+#define __adis_update_bits(adis, reg, mask, val) ({ \
+ BUILD_BUG_ON(sizeof(val) != 2 && sizeof(val) != 4); \
+ __adis_update_bits_base(adis, reg, mask, val, sizeof(val)); \
+})
+
+int __adis_check_status(struct adis *adis);
+int __adis_initial_startup(struct adis *adis);
+int __adis_enable_irq(struct adis *adis, bool enable);
+
+static inline int adis_enable_irq(struct adis *adis, bool enable)
+{
+ int ret;
+
+ mutex_lock(&adis->state_lock);
+ ret = __adis_enable_irq(adis, enable);
+ mutex_unlock(&adis->state_lock);
+
+ return ret;
+}
+
+static inline int adis_check_status(struct adis *adis)
+{
+ int ret;
+
+ mutex_lock(&adis->state_lock);
+ ret = __adis_check_status(adis);
+ mutex_unlock(&adis->state_lock);
+
+ return ret;
+}
+
+static inline void adis_dev_lock(struct adis *adis)
+{
+ mutex_lock(&adis->state_lock);
+}
+
+static inline void adis_dev_unlock(struct adis *adis)
+{
+ mutex_unlock(&adis->state_lock);
+}
int adis_single_conversion(struct iio_dev *indio_dev,
- const struct iio_chan_spec *chan, unsigned int error_mask,
- int *val);
+ const struct iio_chan_spec *chan,
+ unsigned int error_mask, int *val);
#define ADIS_VOLTAGE_CHAN(addr, si, chan, name, info_all, bits) { \
.type = IIO_VOLTAGE, \
@@ -205,7 +489,7 @@ int adis_single_conversion(struct iio_dev *indio_dev,
.modified = 1, \
.channel2 = IIO_MOD_ ## mod, \
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
- info_sep, \
+ (info_sep), \
.info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \
.info_mask_shared_by_all = info_all, \
.address = (addr), \
@@ -232,40 +516,30 @@ int adis_single_conversion(struct iio_dev *indio_dev,
#ifdef CONFIG_IIO_ADIS_LIB_BUFFER
-int adis_setup_buffer_and_trigger(struct adis *adis,
- struct iio_dev *indio_dev, irqreturn_t (*trigger_handler)(int, void *));
-void adis_cleanup_buffer_and_trigger(struct adis *adis,
- struct iio_dev *indio_dev);
+int
+devm_adis_setup_buffer_and_trigger(struct adis *adis, struct iio_dev *indio_dev,
+ irq_handler_t trigger_handler);
-int adis_probe_trigger(struct adis *adis, struct iio_dev *indio_dev);
-void adis_remove_trigger(struct adis *adis);
+int devm_adis_probe_trigger(struct adis *adis, struct iio_dev *indio_dev);
int adis_update_scan_mode(struct iio_dev *indio_dev,
- const unsigned long *scan_mask);
+ const unsigned long *scan_mask);
#else /* CONFIG_IIO_BUFFER */
-static inline int adis_setup_buffer_and_trigger(struct adis *adis,
- struct iio_dev *indio_dev, irqreturn_t (*trigger_handler)(int, void *))
+static inline int
+devm_adis_setup_buffer_and_trigger(struct adis *adis, struct iio_dev *indio_dev,
+ irq_handler_t trigger_handler)
{
return 0;
}
-static inline void adis_cleanup_buffer_and_trigger(struct adis *adis,
- struct iio_dev *indio_dev)
-{
-}
-
-static inline int adis_probe_trigger(struct adis *adis,
- struct iio_dev *indio_dev)
+static inline int devm_adis_probe_trigger(struct adis *adis,
+ struct iio_dev *indio_dev)
{
return 0;
}
-static inline void adis_remove_trigger(struct adis *adis)
-{
-}
-
#define adis_update_scan_mode NULL
#endif /* CONFIG_IIO_BUFFER */
@@ -273,7 +547,8 @@ static inline void adis_remove_trigger(struct adis *adis)
#ifdef CONFIG_DEBUG_FS
int adis_debugfs_reg_access(struct iio_dev *indio_dev,
- unsigned int reg, unsigned int writeval, unsigned int *readval);
+ unsigned int reg, unsigned int writeval,
+ unsigned int *readval);
#else
diff --git a/include/linux/iio/kfifo_buf.h b/include/linux/iio/kfifo_buf.h
index 027cfa9c3703..22874da0c8be 100644
--- a/include/linux/iio/kfifo_buf.h
+++ b/include/linux/iio/kfifo_buf.h
@@ -1,13 +1,22 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __LINUX_IIO_KFIFO_BUF_H__
#define __LINUX_IIO_KFIFO_BUF_H__
struct iio_buffer;
+struct iio_buffer_setup_ops;
+struct iio_dev;
+struct iio_dev_attr;
struct device;
struct iio_buffer *iio_kfifo_allocate(void);
void iio_kfifo_free(struct iio_buffer *r);
-struct iio_buffer *devm_iio_kfifo_allocate(struct device *dev);
-void devm_iio_kfifo_free(struct device *dev, struct iio_buffer *r);
+int devm_iio_kfifo_buffer_setup_ext(struct device *dev,
+ struct iio_dev *indio_dev,
+ const struct iio_buffer_setup_ops *setup_ops,
+ const struct iio_dev_attr **buffer_attrs);
+
+#define devm_iio_kfifo_buffer_setup(dev, indio_dev, setup_ops) \
+ devm_iio_kfifo_buffer_setup_ext((dev), (indio_dev), (setup_ops), NULL)
#endif
diff --git a/include/linux/iio/machine.h b/include/linux/iio/machine.h
index 1601a2a63a72..fe7ccbb81184 100644
--- a/include/linux/iio/machine.h
+++ b/include/linux/iio/machine.h
@@ -1,11 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Industrial I/O in kernel access map definitions for board files.
*
* Copyright (c) 2011 Jonathan Cameron
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
*/
#ifndef __LINUX_IIO_MACHINE_H__
@@ -28,4 +25,11 @@ struct iio_map {
void *consumer_data;
};
+#define IIO_MAP(_provider_channel, _consumer_dev_name, _consumer_channel) \
+{ \
+ .adc_channel_label = _provider_channel, \
+ .consumer_dev_name = _consumer_dev_name, \
+ .consumer_channel = _consumer_channel, \
+}
+
#endif
diff --git a/include/linux/iio/magnetometer/ak8975.h b/include/linux/iio/magnetometer/ak8975.h
deleted file mode 100644
index c8400959d197..000000000000
--- a/include/linux/iio/magnetometer/ak8975.h
+++ /dev/null
@@ -1,16 +0,0 @@
-#ifndef __IIO_MAGNETOMETER_AK8975_H__
-#define __IIO_MAGNETOMETER_AK8975_H__
-
-#include <linux/iio/iio.h>
-
-/**
- * struct ak8975_platform_data - AK8975 magnetometer driver platform data
- * @eoc_gpio: data ready event gpio
- * @orientation: mounting matrix relative to main hardware
- */
-struct ak8975_platform_data {
- int eoc_gpio;
- struct iio_mount_matrix orientation;
-};
-
-#endif
diff --git a/include/linux/iio/sw_device.h b/include/linux/iio/sw_device.h
index fa7931933067..eff1e6b2595c 100644
--- a/include/linux/iio/sw_device.h
+++ b/include/linux/iio/sw_device.h
@@ -1,11 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Industrial I/O software device interface
*
* Copyright (c) 2016 Intel Corporation
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
*/
#ifndef __IIO_SW_DEVICE
@@ -60,7 +57,7 @@ void iio_sw_device_type_configfs_unregister(struct iio_sw_device_type *dt);
static inline
void iio_swd_group_init_type_name(struct iio_sw_device *d,
const char *name,
- struct config_item_type *type)
+ const struct config_item_type *type)
{
#if IS_ENABLED(CONFIG_CONFIGFS_FS)
config_group_init_type_name(&d->group, name, type);
diff --git a/include/linux/iio/sw_trigger.h b/include/linux/iio/sw_trigger.h
index c97eab67558f..47de2443e984 100644
--- a/include/linux/iio/sw_trigger.h
+++ b/include/linux/iio/sw_trigger.h
@@ -1,11 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Industrial I/O software trigger interface
*
* Copyright (c) 2015 Intel Corporation
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
*/
#ifndef __IIO_SW_TRIGGER
@@ -60,7 +57,7 @@ void iio_sw_trigger_type_configfs_unregister(struct iio_sw_trigger_type *tt);
static inline
void iio_swt_group_init_type_name(struct iio_sw_trigger *t,
const char *name,
- struct config_item_type *type)
+ const struct config_item_type *type)
{
#if IS_ENABLED(CONFIG_CONFIGFS_FS)
config_group_init_type_name(&t->group, name, type);
diff --git a/include/linux/iio/sysfs.h b/include/linux/iio/sysfs.h
index ce9426c507fd..de5bb125815c 100644
--- a/include/linux/iio/sysfs.h
+++ b/include/linux/iio/sysfs.h
@@ -1,17 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/* The industrial I/O core
*
*Copyright (c) 2008 Jonathan Cameron
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
* General attributes
*/
#ifndef _INDUSTRIAL_IO_SYSFS_H_
#define _INDUSTRIAL_IO_SYSFS_H_
+struct iio_buffer;
struct iio_chan_spec;
/**
@@ -20,12 +18,14 @@ struct iio_chan_spec;
* @address: associated register address
* @l: list head for maintaining list of dynamically created attrs
* @c: specification for the underlying channel
+ * @buffer: the IIO buffer to which this attribute belongs to (if any)
*/
struct iio_dev_attr {
struct device_attribute dev_attr;
u64 address;
struct list_head l;
struct iio_chan_spec const *c;
+ struct iio_buffer *buffer;
};
#define to_iio_dev_attr(_dev_attr) \
@@ -97,6 +97,17 @@ struct iio_const_attr {
= { .string = _string, \
.dev_attr = __ATTR(_name, S_IRUGO, iio_read_const_attr, NULL)}
+#define IIO_STATIC_CONST_DEVICE_ATTR(_name, _string) \
+ static ssize_t iio_const_dev_attr_show_##_name( \
+ struct device *dev, \
+ struct device_attribute *attr, \
+ char *buf) \
+ { \
+ return sysfs_emit(buf, "%s\n", _string); \
+ } \
+ static IIO_DEVICE_ATTR(_name, 0444, \
+ iio_const_dev_attr_show_##_name, NULL, 0)
+
/* Generic attributes of onetype or another */
/**
diff --git a/include/linux/iio/timer/stm32-lptim-trigger.h b/include/linux/iio/timer/stm32-lptim-trigger.h
new file mode 100644
index 000000000000..a34dcf6a6001
--- /dev/null
+++ b/include/linux/iio/timer/stm32-lptim-trigger.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) STMicroelectronics 2017
+ *
+ * Author: Fabrice Gasnier <fabrice.gasnier@st.com>
+ */
+
+#ifndef _STM32_LPTIM_TRIGGER_H_
+#define _STM32_LPTIM_TRIGGER_H_
+
+#include <linux/iio/iio.h>
+#include <linux/iio/trigger.h>
+
+#define LPTIM1_OUT "lptim1_out"
+#define LPTIM2_OUT "lptim2_out"
+#define LPTIM3_OUT "lptim3_out"
+
+#if IS_REACHABLE(CONFIG_IIO_STM32_LPTIMER_TRIGGER)
+bool is_stm32_lptim_trigger(struct iio_trigger *trig);
+#else
+static inline bool is_stm32_lptim_trigger(struct iio_trigger *trig)
+{
+#if IS_ENABLED(CONFIG_IIO_STM32_LPTIMER_TRIGGER)
+ pr_warn_once("stm32 lptim_trigger not linked in\n");
+#endif
+ return false;
+}
+#endif
+#endif
diff --git a/include/linux/iio/timer/stm32-timer-trigger.h b/include/linux/iio/timer/stm32-timer-trigger.h
index fa7d786ed99e..37572e4dc73a 100644
--- a/include/linux/iio/timer/stm32-timer-trigger.h
+++ b/include/linux/iio/timer/stm32-timer-trigger.h
@@ -1,9 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) STMicroelectronics 2016
*
* Author: Benjamin Gaignard <benjamin.gaignard@st.com>
- *
- * License terms: GNU General Public License (GPL), version 2
*/
#ifndef _STM32_TIMER_TRIGGER_H_
@@ -55,10 +54,33 @@
#define TIM9_CH1 "tim9_ch1"
#define TIM9_CH2 "tim9_ch2"
+#define TIM10_OC1 "tim10_oc1"
+
+#define TIM11_OC1 "tim11_oc1"
+
#define TIM12_TRGO "tim12_trgo"
#define TIM12_CH1 "tim12_ch1"
#define TIM12_CH2 "tim12_ch2"
-bool is_stm32_timer_trigger(struct iio_trigger *trig);
+#define TIM13_OC1 "tim13_oc1"
+
+#define TIM14_OC1 "tim14_oc1"
+
+#define TIM15_TRGO "tim15_trgo"
+#define TIM16_OC1 "tim16_oc1"
+
+#define TIM17_OC1 "tim17_oc1"
+
+#if IS_REACHABLE(CONFIG_IIO_STM32_TIMER_TRIGGER)
+bool is_stm32_timer_trigger(struct iio_trigger *trig);
+#else
+static inline bool is_stm32_timer_trigger(struct iio_trigger *trig)
+{
+#if IS_ENABLED(CONFIG_IIO_STM32_TIMER_TRIGGER)
+ pr_warn_once("stm32-timer-trigger not linked in\n");
+#endif
+ return false;
+}
+#endif
#endif
diff --git a/include/linux/iio/trigger.h b/include/linux/iio/trigger.h
index ea08302f2d7b..51f52c5c6092 100644
--- a/include/linux/iio/trigger.h
+++ b/include/linux/iio/trigger.h
@@ -1,10 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/* The industrial I/O core, trigger handling functions
*
* Copyright (c) 2008 Jonathan Cameron
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
*/
#include <linux/irq.h>
#include <linux/module.h>
@@ -23,9 +20,8 @@ struct iio_trigger;
/**
* struct iio_trigger_ops - operations structure for an iio_trigger.
- * @owner: used to monitor usage count of the trigger.
* @set_trigger_state: switch on/off the trigger on demand
- * @try_reenable: function to reenable the trigger when the
+ * @reenable: function to reenable the trigger when the
* use count is zero (may be NULL)
* @validate_device: function to validate the device when the
* current trigger gets changed.
@@ -34,9 +30,8 @@ struct iio_trigger;
* instances of a given device.
**/
struct iio_trigger_ops {
- struct module *owner;
int (*set_trigger_state)(struct iio_trigger *trig, bool state);
- int (*try_reenable)(struct iio_trigger *trig);
+ void (*reenable)(struct iio_trigger *trig);
int (*validate_device)(struct iio_trigger *trig,
struct iio_dev *indio_dev);
};
@@ -45,12 +40,13 @@ struct iio_trigger_ops {
/**
* struct iio_trigger - industrial I/O trigger device
* @ops: [DRIVER] operations structure
+ * @owner: [INTERN] owner of this driver module
* @id: [INTERN] unique id number
* @name: [DRIVER] unique name
* @dev: [DRIVER] associated device (if relevant)
* @list: [INTERN] used in maintenance of global trigger list
* @alloc_list: [DRIVER] used for driver specific trigger list
- * @use_count: use count for the trigger
+ * @use_count: [INTERN] use count for the trigger.
* @subirq_chip: [INTERN] associate 'virtual' irq chip.
* @subirq_base: [INTERN] base number for irqs provided by trigger.
* @subirqs: [INTERN] information about the 'child' irqs.
@@ -59,9 +55,11 @@ struct iio_trigger_ops {
* @attached_own_device:[INTERN] if we are using our own device as trigger,
* i.e. if we registered a poll function to the same
* device as the one providing the trigger.
+ * @reenable_work: [INTERN] work item used to ensure reenable can sleep.
**/
struct iio_trigger {
const struct iio_trigger_ops *ops;
+ struct module *owner;
int id;
const char *name;
struct device dev;
@@ -77,6 +75,7 @@ struct iio_trigger {
unsigned long pool[BITS_TO_LONGS(CONFIG_IIO_CONSUMERS_PER_TRIGGER)];
struct mutex pool_lock;
bool attached_own_device;
+ struct work_struct reenable_work;
};
@@ -87,20 +86,25 @@ static inline struct iio_trigger *to_iio_trigger(struct device *d)
static inline void iio_trigger_put(struct iio_trigger *trig)
{
- module_put(trig->ops->owner);
+ module_put(trig->owner);
put_device(&trig->dev);
}
static inline struct iio_trigger *iio_trigger_get(struct iio_trigger *trig)
{
get_device(&trig->dev);
- __module_get(trig->ops->owner);
+
+ WARN_ONCE(list_empty(&trig->list),
+ "Getting non-registered iio trigger %s is prohibited\n",
+ trig->name);
+
+ __module_get(trig->owner);
return trig;
}
/**
- * iio_device_set_drvdata() - Set trigger driver data
+ * iio_trigger_set_drvdata() - Set trigger driver data
* @trig: IIO trigger structure
* @data: Driver specific data
*
@@ -138,30 +142,27 @@ int devm_iio_trigger_register(struct device *dev,
**/
void iio_trigger_unregister(struct iio_trigger *trig_info);
-void devm_iio_trigger_unregister(struct device *dev,
- struct iio_trigger *trig_info);
-
/**
* iio_trigger_set_immutable() - set an immutable trigger on destination
*
- * @indio_dev - IIO device structure containing the device
- * @trig - trigger to assign to device
+ * @indio_dev: IIO device structure containing the device
+ * @trig: trigger to assign to device
*
**/
int iio_trigger_set_immutable(struct iio_dev *indio_dev, struct iio_trigger *trig);
-/**
- * iio_trigger_poll() - called on a trigger occurring
- * @trig: trigger which occurred
- *
- * Typically called in relevant hardware interrupt handler.
- **/
void iio_trigger_poll(struct iio_trigger *trig);
-void iio_trigger_poll_chained(struct iio_trigger *trig);
+void iio_trigger_poll_nested(struct iio_trigger *trig);
irqreturn_t iio_trigger_generic_data_rdy_poll(int irq, void *private);
-__printf(1, 2) struct iio_trigger *iio_trigger_alloc(const char *fmt, ...);
+#define iio_trigger_alloc(parent, fmt, ...) \
+ __iio_trigger_alloc((parent), THIS_MODULE, (fmt), ##__VA_ARGS__)
+
+__printf(3, 4)
+struct iio_trigger *__iio_trigger_alloc(struct device *parent,
+ struct module *this_mod,
+ const char *fmt, ...);
void iio_trigger_free(struct iio_trigger *trig);
/**
diff --git a/include/linux/iio/trigger_consumer.h b/include/linux/iio/trigger_consumer.h
index c4f8c7409666..2c05dfad88d7 100644
--- a/include/linux/iio/trigger_consumer.h
+++ b/include/linux/iio/trigger_consumer.h
@@ -1,10 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/* The industrial I/O core, trigger consumer functions
*
* Copyright (c) 2008-2011 Jonathan Cameron
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
*/
#ifndef __LINUX_IIO_TRIGGER_CONSUMER_H__
@@ -41,7 +38,7 @@ struct iio_poll_func {
};
-struct iio_poll_func
+__printf(5, 6) struct iio_poll_func
*iio_alloc_pollfunc(irqreturn_t (*h)(int irq, void *p),
irqreturn_t (*thread)(int irq, void *p),
int type,
@@ -53,11 +50,4 @@ irqreturn_t iio_pollfunc_store_time(int irq, void *p);
void iio_trigger_notify_done(struct iio_trigger *trig);
-/*
- * Two functions for common case where all that happens is a pollfunc
- * is attached and detached from a trigger
- */
-int iio_triggered_buffer_postenable(struct iio_dev *indio_dev);
-int iio_triggered_buffer_predisable(struct iio_dev *indio_dev);
-
#endif
diff --git a/include/linux/iio/triggered_buffer.h b/include/linux/iio/triggered_buffer.h
index 30145616773d..29e1fe146879 100644
--- a/include/linux/iio/triggered_buffer.h
+++ b/include/linux/iio/triggered_buffer.h
@@ -1,23 +1,38 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_IIO_TRIGGERED_BUFFER_H_
#define _LINUX_IIO_TRIGGERED_BUFFER_H_
+#include <linux/iio/buffer.h>
#include <linux/interrupt.h>
struct iio_dev;
+struct iio_dev_attr;
struct iio_buffer_setup_ops;
-int iio_triggered_buffer_setup(struct iio_dev *indio_dev,
+int iio_triggered_buffer_setup_ext(struct iio_dev *indio_dev,
irqreturn_t (*h)(int irq, void *p),
irqreturn_t (*thread)(int irq, void *p),
- const struct iio_buffer_setup_ops *setup_ops);
+ enum iio_buffer_direction direction,
+ const struct iio_buffer_setup_ops *setup_ops,
+ const struct iio_dev_attr **buffer_attrs);
void iio_triggered_buffer_cleanup(struct iio_dev *indio_dev);
-int devm_iio_triggered_buffer_setup(struct device *dev,
- struct iio_dev *indio_dev,
- irqreturn_t (*h)(int irq, void *p),
- irqreturn_t (*thread)(int irq, void *p),
- const struct iio_buffer_setup_ops *ops);
-void devm_iio_triggered_buffer_cleanup(struct device *dev,
- struct iio_dev *indio_dev);
+#define iio_triggered_buffer_setup(indio_dev, h, thread, setup_ops) \
+ iio_triggered_buffer_setup_ext((indio_dev), (h), (thread), \
+ IIO_BUFFER_DIRECTION_IN, (setup_ops), \
+ NULL)
+
+int devm_iio_triggered_buffer_setup_ext(struct device *dev,
+ struct iio_dev *indio_dev,
+ irqreturn_t (*h)(int irq, void *p),
+ irqreturn_t (*thread)(int irq, void *p),
+ enum iio_buffer_direction direction,
+ const struct iio_buffer_setup_ops *ops,
+ const struct iio_dev_attr **buffer_attrs);
+
+#define devm_iio_triggered_buffer_setup(dev, indio_dev, h, thread, setup_ops) \
+ devm_iio_triggered_buffer_setup_ext((dev), (indio_dev), (h), (thread), \
+ IIO_BUFFER_DIRECTION_IN, \
+ (setup_ops), NULL)
#endif
diff --git a/include/linux/iio/triggered_event.h b/include/linux/iio/triggered_event.h
index 8fe8537085bb..13250fd99745 100644
--- a/include/linux/iio/triggered_event.h
+++ b/include/linux/iio/triggered_event.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_IIO_TRIGGERED_EVENT_H_
#define _LINUX_IIO_TRIGGERED_EVENT_H_
diff --git a/include/linux/iio/types.h b/include/linux/iio/types.h
index 2aa7b6384d64..82faa98c719a 100644
--- a/include/linux/iio/types.h
+++ b/include/linux/iio/types.h
@@ -1,10 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/* industrial I/O data types needed both in and out of kernel
*
* Copyright (c) 2008 Jonathan Cameron
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
*/
#ifndef _IIO_TYPES_H_
@@ -19,6 +16,9 @@ enum iio_event_info {
IIO_EV_INFO_PERIOD,
IIO_EV_INFO_HIGH_PASS_FILTER_3DB,
IIO_EV_INFO_LOW_PASS_FILTER_3DB,
+ IIO_EV_INFO_TIMEOUT,
+ IIO_EV_INFO_RESET_TIMEOUT,
+ IIO_EV_INFO_TAP2_MIN_DELAY,
};
#define IIO_VAL_INT 1
@@ -26,12 +26,46 @@ enum iio_event_info {
#define IIO_VAL_INT_PLUS_NANO 3
#define IIO_VAL_INT_PLUS_MICRO_DB 4
#define IIO_VAL_INT_MULTIPLE 5
+#define IIO_VAL_INT_64 6 /* 64-bit data, val is lower 32 bits */
#define IIO_VAL_FRACTIONAL 10
#define IIO_VAL_FRACTIONAL_LOG2 11
+#define IIO_VAL_CHAR 12
enum iio_available_type {
IIO_AVAIL_LIST,
IIO_AVAIL_RANGE,
};
+enum iio_chan_info_enum {
+ IIO_CHAN_INFO_RAW = 0,
+ IIO_CHAN_INFO_PROCESSED,
+ IIO_CHAN_INFO_SCALE,
+ IIO_CHAN_INFO_OFFSET,
+ IIO_CHAN_INFO_CALIBSCALE,
+ IIO_CHAN_INFO_CALIBBIAS,
+ IIO_CHAN_INFO_PEAK,
+ IIO_CHAN_INFO_PEAK_SCALE,
+ IIO_CHAN_INFO_QUADRATURE_CORRECTION_RAW,
+ IIO_CHAN_INFO_AVERAGE_RAW,
+ IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY,
+ IIO_CHAN_INFO_HIGH_PASS_FILTER_3DB_FREQUENCY,
+ IIO_CHAN_INFO_SAMP_FREQ,
+ IIO_CHAN_INFO_FREQUENCY,
+ IIO_CHAN_INFO_PHASE,
+ IIO_CHAN_INFO_HARDWAREGAIN,
+ IIO_CHAN_INFO_HYSTERESIS,
+ IIO_CHAN_INFO_HYSTERESIS_RELATIVE,
+ IIO_CHAN_INFO_INT_TIME,
+ IIO_CHAN_INFO_ENABLE,
+ IIO_CHAN_INFO_CALIBHEIGHT,
+ IIO_CHAN_INFO_CALIBWEIGHT,
+ IIO_CHAN_INFO_DEBOUNCE_COUNT,
+ IIO_CHAN_INFO_DEBOUNCE_TIME,
+ IIO_CHAN_INFO_CALIBEMISSIVITY,
+ IIO_CHAN_INFO_OVERSAMPLING_RATIO,
+ IIO_CHAN_INFO_THERMOCOUPLE_TYPE,
+ IIO_CHAN_INFO_CALIBAMBIENT,
+ IIO_CHAN_INFO_ZEROPOINT,
+};
+
#endif /* _IIO_TYPES_H_ */
diff --git a/include/linux/ima.h b/include/linux/ima.h
index 0e4647e0eb60..86b57757c7b1 100644
--- a/include/linux/ima.h
+++ b/include/linux/ima.h
@@ -1,55 +1,108 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2008 IBM Corporation
* Author: Mimi Zohar <zohar@us.ibm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation, version 2 of the License.
*/
#ifndef _LINUX_IMA_H
#define _LINUX_IMA_H
+#include <linux/kernel_read_file.h>
#include <linux/fs.h>
+#include <linux/security.h>
#include <linux/kexec.h>
+#include <crypto/hash_info.h>
struct linux_binprm;
#ifdef CONFIG_IMA
+extern enum hash_algo ima_get_current_hash_algo(void);
extern int ima_bprm_check(struct linux_binprm *bprm);
-extern int ima_file_check(struct file *file, int mask, int opened);
+extern int ima_file_check(struct file *file, int mask);
+extern void ima_post_create_tmpfile(struct mnt_idmap *idmap,
+ struct inode *inode);
extern void ima_file_free(struct file *file);
-extern int ima_file_mmap(struct file *file, unsigned long prot);
-extern int ima_read_file(struct file *file, enum kernel_read_file_id id);
+extern int ima_file_mmap(struct file *file, unsigned long reqprot,
+ unsigned long prot, unsigned long flags);
+extern int ima_file_mprotect(struct vm_area_struct *vma, unsigned long prot);
+extern int ima_load_data(enum kernel_load_data_id id, bool contents);
+extern int ima_post_load_data(char *buf, loff_t size,
+ enum kernel_load_data_id id, char *description);
+extern int ima_read_file(struct file *file, enum kernel_read_file_id id,
+ bool contents);
extern int ima_post_read_file(struct file *file, void *buf, loff_t size,
enum kernel_read_file_id id);
-extern void ima_post_path_mknod(struct dentry *dentry);
+extern void ima_post_path_mknod(struct mnt_idmap *idmap,
+ struct dentry *dentry);
+extern int ima_file_hash(struct file *file, char *buf, size_t buf_size);
+extern int ima_inode_hash(struct inode *inode, char *buf, size_t buf_size);
+extern void ima_kexec_cmdline(int kernel_fd, const void *buf, int size);
+extern int ima_measure_critical_data(const char *event_label,
+ const char *event_name,
+ const void *buf, size_t buf_len,
+ bool hash, u8 *digest, size_t digest_len);
+
+#ifdef CONFIG_IMA_APPRAISE_BOOTPARAM
+extern void ima_appraise_parse_cmdline(void);
+#else
+static inline void ima_appraise_parse_cmdline(void) {}
+#endif
#ifdef CONFIG_IMA_KEXEC
extern void ima_add_kexec_buffer(struct kimage *image);
#endif
#else
+static inline enum hash_algo ima_get_current_hash_algo(void)
+{
+ return HASH_ALGO__LAST;
+}
+
static inline int ima_bprm_check(struct linux_binprm *bprm)
{
return 0;
}
-static inline int ima_file_check(struct file *file, int mask, int opened)
+static inline int ima_file_check(struct file *file, int mask)
{
return 0;
}
+static inline void ima_post_create_tmpfile(struct mnt_idmap *idmap,
+ struct inode *inode)
+{
+}
+
static inline void ima_file_free(struct file *file)
{
return;
}
-static inline int ima_file_mmap(struct file *file, unsigned long prot)
+static inline int ima_file_mmap(struct file *file, unsigned long reqprot,
+ unsigned long prot, unsigned long flags)
{
return 0;
}
-static inline int ima_read_file(struct file *file, enum kernel_read_file_id id)
+static inline int ima_file_mprotect(struct vm_area_struct *vma,
+ unsigned long prot)
+{
+ return 0;
+}
+
+static inline int ima_load_data(enum kernel_load_data_id id, bool contents)
+{
+ return 0;
+}
+
+static inline int ima_post_load_data(char *buf, loff_t size,
+ enum kernel_load_data_id id,
+ char *description)
+{
+ return 0;
+}
+
+static inline int ima_read_file(struct file *file, enum kernel_read_file_id id,
+ bool contents)
{
return 0;
}
@@ -60,13 +113,55 @@ static inline int ima_post_read_file(struct file *file, void *buf, loff_t size,
return 0;
}
-static inline void ima_post_path_mknod(struct dentry *dentry)
+static inline void ima_post_path_mknod(struct mnt_idmap *idmap,
+ struct dentry *dentry)
{
return;
}
+static inline int ima_file_hash(struct file *file, char *buf, size_t buf_size)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int ima_inode_hash(struct inode *inode, char *buf, size_t buf_size)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline void ima_kexec_cmdline(int kernel_fd, const void *buf, int size) {}
+
+static inline int ima_measure_critical_data(const char *event_label,
+ const char *event_name,
+ const void *buf, size_t buf_len,
+ bool hash, u8 *digest,
+ size_t digest_len)
+{
+ return -ENOENT;
+}
+
#endif /* CONFIG_IMA */
+#ifdef CONFIG_HAVE_IMA_KEXEC
+int __init ima_free_kexec_buffer(void);
+int __init ima_get_kexec_buffer(void **addr, size_t *size);
+#endif
+
+#ifdef CONFIG_IMA_SECURE_AND_OR_TRUSTED_BOOT
+extern bool arch_ima_get_secureboot(void);
+extern const char * const *arch_get_ima_policy(void);
+#else
+static inline bool arch_ima_get_secureboot(void)
+{
+ return false;
+}
+
+static inline const char * const *arch_get_ima_policy(void)
+{
+ return NULL;
+}
+#endif
+
#ifndef CONFIG_IMA_KEXEC
struct kimage;
@@ -74,11 +169,35 @@ static inline void ima_add_kexec_buffer(struct kimage *image)
{}
#endif
+#ifdef CONFIG_IMA_MEASURE_ASYMMETRIC_KEYS
+extern void ima_post_key_create_or_update(struct key *keyring,
+ struct key *key,
+ const void *payload, size_t plen,
+ unsigned long flags, bool create);
+#else
+static inline void ima_post_key_create_or_update(struct key *keyring,
+ struct key *key,
+ const void *payload,
+ size_t plen,
+ unsigned long flags,
+ bool create) {}
+#endif /* CONFIG_IMA_MEASURE_ASYMMETRIC_KEYS */
+
#ifdef CONFIG_IMA_APPRAISE
extern bool is_ima_appraise_enabled(void);
-extern void ima_inode_post_setattr(struct dentry *dentry);
+extern void ima_inode_post_setattr(struct mnt_idmap *idmap,
+ struct dentry *dentry);
extern int ima_inode_setxattr(struct dentry *dentry, const char *xattr_name,
const void *xattr_value, size_t xattr_value_len);
+extern int ima_inode_set_acl(struct mnt_idmap *idmap,
+ struct dentry *dentry, const char *acl_name,
+ struct posix_acl *kacl);
+static inline int ima_inode_remove_acl(struct mnt_idmap *idmap,
+ struct dentry *dentry,
+ const char *acl_name)
+{
+ return ima_inode_set_acl(idmap, dentry, acl_name, NULL);
+}
extern int ima_inode_removexattr(struct dentry *dentry, const char *xattr_name);
#else
static inline bool is_ima_appraise_enabled(void)
@@ -86,7 +205,8 @@ static inline bool is_ima_appraise_enabled(void)
return 0;
}
-static inline void ima_inode_post_setattr(struct dentry *dentry)
+static inline void ima_inode_post_setattr(struct mnt_idmap *idmap,
+ struct dentry *dentry)
{
return;
}
@@ -99,10 +219,34 @@ static inline int ima_inode_setxattr(struct dentry *dentry,
return 0;
}
+static inline int ima_inode_set_acl(struct mnt_idmap *idmap,
+ struct dentry *dentry, const char *acl_name,
+ struct posix_acl *kacl)
+{
+
+ return 0;
+}
+
static inline int ima_inode_removexattr(struct dentry *dentry,
const char *xattr_name)
{
return 0;
}
+
+static inline int ima_inode_remove_acl(struct mnt_idmap *idmap,
+ struct dentry *dentry,
+ const char *acl_name)
+{
+ return 0;
+}
#endif /* CONFIG_IMA_APPRAISE */
+
+#if defined(CONFIG_IMA_APPRAISE) && defined(CONFIG_INTEGRITY_TRUSTED_KEYRING)
+extern bool ima_appraise_signature(enum kernel_read_file_id func);
+#else
+static inline bool ima_appraise_signature(enum kernel_read_file_id func)
+{
+ return false;
+}
+#endif /* CONFIG_IMA_APPRAISE && CONFIG_INTEGRITY_TRUSTED_KEYRING */
#endif /* _LINUX_IMA_H */
diff --git a/include/linux/imx-media.h b/include/linux/imx-media.h
index 77221ecad6fc..e017e1779118 100644
--- a/include/linux/imx-media.h
+++ b/include/linux/imx-media.h
@@ -1,10 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Copyright (c) 2014-2017 Mentor Graphics Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the
- * License, or (at your option) any later version
*/
#ifndef __LINUX_IMX_MEDIA_H__
diff --git a/include/linux/in.h b/include/linux/in.h
index 31b493734763..1873ef642605 100644
--- a/include/linux/in.h
+++ b/include/linux/in.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* INET An implementation of the TCP/IP protocol suite for the LINUX
* operating system. INET is implemented using the BSD Socket
@@ -9,11 +10,6 @@
*
* Authors: Original taken from the GNU Project <netinet/in.h> file.
* Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
*/
#ifndef _LINUX_IN_H
#define _LINUX_IN_H
@@ -60,9 +56,14 @@ static inline bool ipv4_is_lbcast(__be32 addr)
return addr == htonl(INADDR_BROADCAST);
}
+static inline bool ipv4_is_all_snoopers(__be32 addr)
+{
+ return addr == htonl(INADDR_ALLSNOOPERS_GROUP);
+}
+
static inline bool ipv4_is_zeronet(__be32 addr)
{
- return (addr & htonl(0xff000000)) == htonl(0x00000000);
+ return (addr == 0);
}
/* Special-Use IPv4 Addresses (RFC3330) */
diff --git a/include/linux/in6.h b/include/linux/in6.h
index 34edf1f6c9a3..0777a21cbf86 100644
--- a/include/linux/in6.h
+++ b/include/linux/in6.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Types and definitions for AF_INET6
* Linux INET6 implementation
@@ -11,11 +12,6 @@
*
* Advanced Sockets API for IPv6
* <draft-stevens-advanced-api-00.txt>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
*/
#ifndef _LINUX_IN6_H
#define _LINUX_IN6_H
diff --git a/include/linux/indirect_call_wrapper.h b/include/linux/indirect_call_wrapper.h
new file mode 100644
index 000000000000..c1c76a70a6ce
--- /dev/null
+++ b/include/linux/indirect_call_wrapper.h
@@ -0,0 +1,71 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_INDIRECT_CALL_WRAPPER_H
+#define _LINUX_INDIRECT_CALL_WRAPPER_H
+
+#ifdef CONFIG_RETPOLINE
+
+/*
+ * INDIRECT_CALL_$NR - wrapper for indirect calls with $NR known builtin
+ * @f: function pointer
+ * @f$NR: builtin functions names, up to $NR of them
+ * @__VA_ARGS__: arguments for @f
+ *
+ * Avoid retpoline overhead for known builtin, checking @f vs each of them and
+ * eventually invoking directly the builtin function. The functions are check
+ * in the given order. Fallback to the indirect call.
+ */
+#define INDIRECT_CALL_1(f, f1, ...) \
+ ({ \
+ likely(f == f1) ? f1(__VA_ARGS__) : f(__VA_ARGS__); \
+ })
+#define INDIRECT_CALL_2(f, f2, f1, ...) \
+ ({ \
+ likely(f == f2) ? f2(__VA_ARGS__) : \
+ INDIRECT_CALL_1(f, f1, __VA_ARGS__); \
+ })
+#define INDIRECT_CALL_3(f, f3, f2, f1, ...) \
+ ({ \
+ likely(f == f3) ? f3(__VA_ARGS__) : \
+ INDIRECT_CALL_2(f, f2, f1, __VA_ARGS__); \
+ })
+#define INDIRECT_CALL_4(f, f4, f3, f2, f1, ...) \
+ ({ \
+ likely(f == f4) ? f4(__VA_ARGS__) : \
+ INDIRECT_CALL_3(f, f3, f2, f1, __VA_ARGS__); \
+ })
+
+#define INDIRECT_CALLABLE_DECLARE(f) f
+#define INDIRECT_CALLABLE_SCOPE
+#define EXPORT_INDIRECT_CALLABLE(f) EXPORT_SYMBOL(f)
+
+#else
+#define INDIRECT_CALL_1(f, f1, ...) f(__VA_ARGS__)
+#define INDIRECT_CALL_2(f, f2, f1, ...) f(__VA_ARGS__)
+#define INDIRECT_CALL_3(f, f3, f2, f1, ...) f(__VA_ARGS__)
+#define INDIRECT_CALL_4(f, f4, f3, f2, f1, ...) f(__VA_ARGS__)
+#define INDIRECT_CALLABLE_DECLARE(f)
+#define INDIRECT_CALLABLE_SCOPE static
+#define EXPORT_INDIRECT_CALLABLE(f)
+#endif
+
+/*
+ * We can use INDIRECT_CALL_$NR for ipv6 related functions only if ipv6 is
+ * builtin, this macro simplify dealing with indirect calls with only ipv4/ipv6
+ * alternatives
+ */
+#if IS_BUILTIN(CONFIG_IPV6)
+#define INDIRECT_CALL_INET(f, f2, f1, ...) \
+ INDIRECT_CALL_2(f, f2, f1, __VA_ARGS__)
+#elif IS_ENABLED(CONFIG_INET)
+#define INDIRECT_CALL_INET(f, f2, f1, ...) INDIRECT_CALL_1(f, f1, __VA_ARGS__)
+#else
+#define INDIRECT_CALL_INET(f, f2, f1, ...) f(__VA_ARGS__)
+#endif
+
+#if IS_ENABLED(CONFIG_INET)
+#define INDIRECT_CALL_INET_1(f, f1, ...) INDIRECT_CALL_1(f, f1, __VA_ARGS__)
+#else
+#define INDIRECT_CALL_INET_1(f, f1, ...) f(__VA_ARGS__)
+#endif
+
+#endif
diff --git a/include/linux/inet.h b/include/linux/inet.h
index 636ebe87e6f8..bd8276e96e60 100644
--- a/include/linux/inet.h
+++ b/include/linux/inet.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Swansea University Computer Society NET3
*
@@ -33,11 +34,6 @@
* $Id: udp.h,v 0.8.4.1 1992/11/10 00:17:18 bir7 Exp $
* $Id: we.c,v 0.8.4.10 1993/01/23 18:00:11 bir7 Exp $
* $Id: wereg.h,v 0.8.4.1 1992/11/10 00:17:18 bir7 Exp $
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
*/
#ifndef _LINUX_INET_H
#define _LINUX_INET_H
@@ -59,5 +55,6 @@ extern int in6_pton(const char *src, int srclen, u8 *dst, int delim, const char
extern int inet_pton_with_scope(struct net *net, unsigned short af,
const char *src, const char *port, struct sockaddr_storage *addr);
+extern bool inet_addr_is_any(struct sockaddr *addr);
#endif /* _LINUX_INET_H */
diff --git a/include/linux/inet_diag.h b/include/linux/inet_diag.h
index 65da430e260f..84abb30a3fbb 100644
--- a/include/linux/inet_diag.h
+++ b/include/linux/inet_diag.h
@@ -1,30 +1,31 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _INET_DIAG_H_
#define _INET_DIAG_H_ 1
+#include <net/netlink.h>
#include <uapi/linux/inet_diag.h>
-struct net;
-struct sock;
struct inet_hashinfo;
-struct nlattr;
-struct nlmsghdr;
-struct sk_buff;
-struct netlink_callback;
struct inet_diag_handler {
void (*dump)(struct sk_buff *skb,
struct netlink_callback *cb,
- const struct inet_diag_req_v2 *r,
- struct nlattr *bc);
+ const struct inet_diag_req_v2 *r);
- int (*dump_one)(struct sk_buff *in_skb,
- const struct nlmsghdr *nlh,
+ int (*dump_one)(struct netlink_callback *cb,
const struct inet_diag_req_v2 *req);
void (*idiag_get_info)(struct sock *sk,
struct inet_diag_msg *r,
void *info);
+ int (*idiag_get_aux)(struct sock *sk,
+ bool net_admin,
+ struct sk_buff *skb);
+
+ size_t (*idiag_get_aux_size)(struct sock *sk,
+ bool net_admin);
+
int (*destroy)(struct sk_buff *in_skb,
const struct inet_diag_req_v2 *req);
@@ -32,18 +33,25 @@ struct inet_diag_handler {
__u16 idiag_info_size;
};
+struct bpf_sk_storage_diag;
+struct inet_diag_dump_data {
+ struct nlattr *req_nlas[__INET_DIAG_REQ_MAX];
+#define inet_diag_nla_bc req_nlas[INET_DIAG_REQ_BYTECODE]
+#define inet_diag_nla_bpf_stgs req_nlas[INET_DIAG_REQ_SK_BPF_STORAGES]
+
+ struct bpf_sk_storage_diag *bpf_stg_diag;
+};
+
struct inet_connection_sock;
int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk,
- struct sk_buff *skb, const struct inet_diag_req_v2 *req,
- struct user_namespace *user_ns,
- u32 pid, u32 seq, u16 nlmsg_flags,
- const struct nlmsghdr *unlh, bool net_admin);
+ struct sk_buff *skb, struct netlink_callback *cb,
+ const struct inet_diag_req_v2 *req,
+ u16 nlmsg_flags, bool net_admin);
void inet_diag_dump_icsk(struct inet_hashinfo *h, struct sk_buff *skb,
struct netlink_callback *cb,
- const struct inet_diag_req_v2 *r,
- struct nlattr *bc);
+ const struct inet_diag_req_v2 *r);
int inet_diag_dump_one_icsk(struct inet_hashinfo *hashinfo,
- struct sk_buff *in_skb, const struct nlmsghdr *nlh,
+ struct netlink_callback *cb,
const struct inet_diag_req_v2 *req);
struct sock *inet_diag_find_one_icsk(struct net *net,
@@ -54,6 +62,23 @@ int inet_diag_bc_sk(const struct nlattr *_bc, struct sock *sk);
void inet_diag_msg_common_fill(struct inet_diag_msg *r, struct sock *sk);
+static inline size_t inet_diag_msg_attrs_size(void)
+{
+ return nla_total_size(1) /* INET_DIAG_SHUTDOWN */
+ + nla_total_size(1) /* INET_DIAG_TOS */
+#if IS_ENABLED(CONFIG_IPV6)
+ + nla_total_size(1) /* INET_DIAG_TCLASS */
+ + nla_total_size(1) /* INET_DIAG_SKV6ONLY */
+#endif
+ + nla_total_size(4) /* INET_DIAG_MARK */
+ + nla_total_size(4) /* INET_DIAG_CLASS_ID */
+#ifdef CONFIG_SOCK_CGROUP_DATA
+ + nla_total_size_64bit(sizeof(u64)) /* INET_DIAG_CGROUP_ID */
+#endif
+ + nla_total_size(sizeof(struct inet_diag_sockopt))
+ /* INET_DIAG_SOCKOPT */
+ ;
+}
int inet_diag_msg_attrs_fill(struct sock *sk, struct sk_buff *skb,
struct inet_diag_msg *r, int ext,
struct user_namespace *user_ns, bool net_admin);
diff --git a/include/linux/inetdevice.h b/include/linux/inetdevice.h
index fb3f809e34e4..ddb27fc0ee8c 100644
--- a/include/linux/inetdevice.h
+++ b/include/linux/inetdevice.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_INETDEVICE_H
#define _LINUX_INETDEVICE_H
@@ -23,9 +24,11 @@ struct ipv4_devconf {
struct in_device {
struct net_device *dev;
+ netdevice_tracker dev_tracker;
+
refcount_t refcnt;
int dead;
- struct in_ifaddr *ifa_list; /* IP ifaddr chain */
+ struct in_ifaddr __rcu *ifa_list;/* IP ifaddr chain */
struct ip_mc_list __rcu *mc_list; /* IP multicast filter chain */
struct ip_mc_list __rcu * __rcu *mc_hash;
@@ -36,9 +39,11 @@ struct in_device {
unsigned long mr_v1_seen;
unsigned long mr_v2_seen;
unsigned long mr_maxdelay;
- unsigned char mr_qrv;
+ unsigned long mr_qi; /* Query Interval */
+ unsigned long mr_qri; /* Query Response Interval */
+ unsigned char mr_qrv; /* Query Robustness Variable */
unsigned char mr_gq_running;
- unsigned char mr_ifc_count;
+ u32 mr_ifc_count;
struct timer_list mr_gq_timer; /* general query timer */
struct timer_list mr_ifc_timer; /* interface change timer */
@@ -92,6 +97,7 @@ static inline void ipv4_devconf_setall(struct in_device *in_dev)
#define IN_DEV_FORWARD(in_dev) IN_DEV_CONF_GET((in_dev), FORWARDING)
#define IN_DEV_MFORWARD(in_dev) IN_DEV_ANDCONF((in_dev), MC_FORWARDING)
+#define IN_DEV_BFORWARD(in_dev) IN_DEV_ANDCONF((in_dev), BC_FORWARDING)
#define IN_DEV_RPFILTER(in_dev) IN_DEV_MAXCONF((in_dev), RP_FILTER)
#define IN_DEV_SRC_VMARK(in_dev) IN_DEV_ORCONF((in_dev), SRC_VMARK)
#define IN_DEV_SOURCE_ROUTE(in_dev) IN_DEV_ANDCONF((in_dev), \
@@ -101,7 +107,7 @@ static inline void ipv4_devconf_setall(struct in_device *in_dev)
#define IN_DEV_LOG_MARTIANS(in_dev) IN_DEV_ORCONF((in_dev), LOG_MARTIANS)
#define IN_DEV_PROXY_ARP(in_dev) IN_DEV_ORCONF((in_dev), PROXY_ARP)
-#define IN_DEV_PROXY_ARP_PVLAN(in_dev) IN_DEV_CONF_GET(in_dev, PROXY_ARP_PVLAN)
+#define IN_DEV_PROXY_ARP_PVLAN(in_dev) IN_DEV_ORCONF((in_dev), PROXY_ARP_PVLAN)
#define IN_DEV_SHARED_MEDIA(in_dev) IN_DEV_ORCONF((in_dev), SHARED_MEDIA)
#define IN_DEV_TX_REDIRECTS(in_dev) IN_DEV_ORCONF((in_dev), SEND_REDIRECTS)
#define IN_DEV_SEC_REDIRECTS(in_dev) IN_DEV_ORCONF((in_dev), \
@@ -122,25 +128,29 @@ static inline void ipv4_devconf_setall(struct in_device *in_dev)
IN_DEV_ORCONF((in_dev), ACCEPT_REDIRECTS)))
#define IN_DEV_IGNORE_ROUTES_WITH_LINKDOWN(in_dev) \
- IN_DEV_CONF_GET((in_dev), IGNORE_ROUTES_WITH_LINKDOWN)
+ IN_DEV_ORCONF((in_dev), IGNORE_ROUTES_WITH_LINKDOWN)
#define IN_DEV_ARPFILTER(in_dev) IN_DEV_ORCONF((in_dev), ARPFILTER)
-#define IN_DEV_ARP_ACCEPT(in_dev) IN_DEV_ORCONF((in_dev), ARP_ACCEPT)
+#define IN_DEV_ARP_ACCEPT(in_dev) IN_DEV_MAXCONF((in_dev), ARP_ACCEPT)
#define IN_DEV_ARP_ANNOUNCE(in_dev) IN_DEV_MAXCONF((in_dev), ARP_ANNOUNCE)
#define IN_DEV_ARP_IGNORE(in_dev) IN_DEV_MAXCONF((in_dev), ARP_IGNORE)
#define IN_DEV_ARP_NOTIFY(in_dev) IN_DEV_MAXCONF((in_dev), ARP_NOTIFY)
+#define IN_DEV_ARP_EVICT_NOCARRIER(in_dev) IN_DEV_ANDCONF((in_dev), \
+ ARP_EVICT_NOCARRIER)
struct in_ifaddr {
struct hlist_node hash;
- struct in_ifaddr *ifa_next;
+ struct in_ifaddr __rcu *ifa_next;
struct in_device *ifa_dev;
struct rcu_head rcu_head;
__be32 ifa_local;
__be32 ifa_address;
__be32 ifa_mask;
+ __u32 ifa_rt_priority;
__be32 ifa_broadcast;
unsigned char ifa_scope;
unsigned char ifa_prefixlen;
+ unsigned char ifa_proto;
__u32 ifa_flags;
char ifa_label[IFNAMSIZ];
@@ -154,6 +164,7 @@ struct in_ifaddr {
struct in_validator_info {
__be32 ivi_addr;
struct in_device *ivi_dev;
+ struct netlink_ext_ack *extack;
};
int register_inetaddr_notifier(struct notifier_block *nb);
@@ -171,7 +182,16 @@ static inline struct net_device *ip_dev_find(struct net *net, __be32 addr)
}
int inet_addr_onlink(struct in_device *in_dev, __be32 a, __be32 b);
-int devinet_ioctl(struct net *net, unsigned int cmd, void __user *);
+int devinet_ioctl(struct net *net, unsigned int cmd, struct ifreq *);
+#ifdef CONFIG_INET
+int inet_gifconf(struct net_device *dev, char __user *buf, int len, int size);
+#else
+static inline int inet_gifconf(struct net_device *dev, char __user *buf,
+ int len, int size)
+{
+ return 0;
+}
+#endif
void devinet_init(void);
struct in_device *inetdev_by_index(struct net *, int);
__be32 inet_select_addr(const struct net_device *dev, __be32 dst, int scope);
@@ -179,7 +199,8 @@ __be32 inet_confirm_addr(struct net *net, struct in_device *in_dev, __be32 dst,
__be32 local, int scope);
struct in_ifaddr *inet_ifa_byprefix(struct in_device *in_dev, __be32 prefix,
__be32 mask);
-static __inline__ bool inet_ifa_match(__be32 addr, struct in_ifaddr *ifa)
+struct in_ifaddr *inet_lookup_ifaddr_rcu(struct net *net, __be32 addr);
+static inline bool inet_ifa_match(__be32 addr, const struct in_ifaddr *ifa)
{
return !((addr^ifa->ifa_address)&ifa->ifa_mask);
}
@@ -199,14 +220,13 @@ static __inline__ bool bad_mask(__be32 mask, __be32 addr)
return false;
}
-#define for_primary_ifa(in_dev) { struct in_ifaddr *ifa; \
- for (ifa = (in_dev)->ifa_list; ifa && !(ifa->ifa_flags&IFA_F_SECONDARY); ifa = ifa->ifa_next)
-
-#define for_ifa(in_dev) { struct in_ifaddr *ifa; \
- for (ifa = (in_dev)->ifa_list; ifa; ifa = ifa->ifa_next)
-
+#define in_dev_for_each_ifa_rtnl(ifa, in_dev) \
+ for (ifa = rtnl_dereference((in_dev)->ifa_list); ifa; \
+ ifa = rtnl_dereference(ifa->ifa_next))
-#define endfor_ifa(in_dev) }
+#define in_dev_for_each_ifa_rcu(ifa, in_dev) \
+ for (ifa = rcu_dereference((in_dev)->ifa_list); ifa; \
+ ifa = rcu_dereference(ifa->ifa_next))
static inline struct in_device *__in_dev_get_rcu(const struct net_device *dev)
{
@@ -230,6 +250,20 @@ static inline struct in_device *__in_dev_get_rtnl(const struct net_device *dev)
return rtnl_dereference(dev->ip_ptr);
}
+/* called with rcu_read_lock or rtnl held */
+static inline bool ip_ignore_linkdown(const struct net_device *dev)
+{
+ struct in_device *in_dev;
+ bool rc = false;
+
+ in_dev = rcu_dereference_rtnl(dev->ip_ptr);
+ if (in_dev &&
+ IN_DEV_IGNORE_ROUTES_WITH_LINKDOWN(in_dev))
+ rc = true;
+
+ return rc;
+}
+
static inline struct neigh_parms *__in_dev_arp_parms_get_rcu(const struct net_device *dev)
{
struct in_device *in_dev = __in_dev_get_rcu(dev);
diff --git a/include/linux/init.h b/include/linux/init.h
index 94769d687cf0..c5fe6d26f5b1 100644
--- a/include/linux/init.h
+++ b/include/linux/init.h
@@ -1,9 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_INIT_H
#define _LINUX_INIT_H
+#include <linux/build_bug.h>
#include <linux/compiler.h>
+#include <linux/stringify.h>
#include <linux/types.h>
+/* Built-in __init functions needn't be compiled with retpoline */
+#if defined(__noretpoline) && !defined(MODULE)
+#define __noinitretpoline __noretpoline
+#else
+#define __noinitretpoline
+#endif
+
/* These macros are used to mark some functions or
* initialized data (doesn't apply to uninitialized data)
* as `initialization' functions. The kernel can take this
@@ -39,11 +49,11 @@
/* These are for everybody (although not all archs will actually
discard it in modules) */
-#define __init __section(.init.text) __cold __inittrace __latent_entropy
-#define __initdata __section(.init.data)
-#define __initconst __section(.init.rodata)
-#define __exitdata __section(.exit.data)
-#define __exit_call __used __section(.exitcall.exit)
+#define __init __section(".init.text") __cold __latent_entropy __noinitretpoline
+#define __initdata __section(".init.data")
+#define __initconst __section(".init.rodata")
+#define __exitdata __section(".exit.data")
+#define __exit_call __used __section(".exitcall.exit")
/*
* modpost check for section mismatches during the kernel build.
@@ -62,28 +72,26 @@
*
* The markers follow same syntax rules as __init / __initdata.
*/
-#define __ref __section(.ref.text) noinline
-#define __refdata __section(.ref.data)
-#define __refconst __section(.ref.rodata)
+#define __ref __section(".ref.text") noinline
+#define __refdata __section(".ref.data")
+#define __refconst __section(".ref.rodata")
#ifdef MODULE
#define __exitused
-#define __inittrace notrace
#else
#define __exitused __used
-#define __inittrace
#endif
-#define __exit __section(.exit.text) __exitused __cold notrace
+#define __exit __section(".exit.text") __exitused __cold notrace
/* Used for MEMORY_HOTPLUG */
-#define __meminit __section(.meminit.text) __cold notrace \
+#define __meminit __section(".meminit.text") __cold notrace \
__latent_entropy
-#define __meminitdata __section(.meminit.data)
-#define __meminitconst __section(.meminit.rodata)
-#define __memexit __section(.memexit.text) __exitused __cold notrace
-#define __memexitdata __section(.memexit.data)
-#define __memexitconst __section(.memexit.rodata)
+#define __meminitdata __section(".meminit.data")
+#define __meminitconst __section(".meminit.rodata")
+#define __memexit __section(".memexit.text") __exitused __cold notrace
+#define __memexitdata __section(".memexit.data")
+#define __memexitconst __section(".memexit.rodata")
/* For assembly routines */
#define __HEAD .section ".head.text","ax"
@@ -110,23 +118,41 @@
typedef int (*initcall_t)(void);
typedef void (*exitcall_t)(void);
-extern initcall_t __con_initcall_start[], __con_initcall_end[];
-extern initcall_t __security_initcall_start[], __security_initcall_end[];
+#ifdef CONFIG_HAVE_ARCH_PREL32_RELOCATIONS
+typedef int initcall_entry_t;
+
+static inline initcall_t initcall_from_entry(initcall_entry_t *entry)
+{
+ return offset_to_ptr(entry);
+}
+#else
+typedef initcall_t initcall_entry_t;
+
+static inline initcall_t initcall_from_entry(initcall_entry_t *entry)
+{
+ return *entry;
+}
+#endif
+
+extern initcall_entry_t __con_initcall_start[], __con_initcall_end[];
-/* Used for contructor calls. */
+/* Used for constructor calls. */
typedef void (*ctor_fn_t)(void);
+struct file_system_type;
+
/* Defined in init/main.c */
extern int do_one_initcall(initcall_t fn);
extern char __initdata boot_command_line[];
extern char *saved_command_line;
+extern unsigned int saved_command_line_len;
extern unsigned int reset_devices;
/* used by init/main.c */
void setup_arch(char **);
void prepare_namespace(void);
-void __init load_default_modules(void);
-int __init init_rootfs(void);
+void __init init_rootfs(void);
+extern struct file_system_type rootfs_fs_type;
#if defined(CONFIG_STRICT_KERNEL_RWX) || defined(CONFIG_STRICT_MODULE_RWX)
extern bool rodata_enabled;
@@ -161,9 +187,82 @@ extern bool initcall_debug;
* as KEEP() in the linker script.
*/
-#define __define_initcall(fn, id) \
- static initcall_t __initcall_##fn##id __used \
- __attribute__((__section__(".initcall" #id ".init"))) = fn;
+/* Format: <modname>__<counter>_<line>_<fn> */
+#define __initcall_id(fn) \
+ __PASTE(__KBUILD_MODNAME, \
+ __PASTE(__, \
+ __PASTE(__COUNTER__, \
+ __PASTE(_, \
+ __PASTE(__LINE__, \
+ __PASTE(_, fn))))))
+
+/* Format: __<prefix>__<iid><id> */
+#define __initcall_name(prefix, __iid, id) \
+ __PASTE(__, \
+ __PASTE(prefix, \
+ __PASTE(__, \
+ __PASTE(__iid, id))))
+
+#ifdef CONFIG_LTO_CLANG
+/*
+ * With LTO, the compiler doesn't necessarily obey link order for
+ * initcalls. In order to preserve the correct order, we add each
+ * variable into its own section and generate a linker script (in
+ * scripts/link-vmlinux.sh) to specify the order of the sections.
+ */
+#define __initcall_section(__sec, __iid) \
+ #__sec ".init.." #__iid
+
+/*
+ * With LTO, the compiler can rename static functions to avoid
+ * global naming collisions. We use a global stub function for
+ * initcalls to create a stable symbol name whose address can be
+ * taken in inline assembly when PREL32 relocations are used.
+ */
+#define __initcall_stub(fn, __iid, id) \
+ __initcall_name(initstub, __iid, id)
+
+#define __define_initcall_stub(__stub, fn) \
+ int __init __stub(void); \
+ int __init __stub(void) \
+ { \
+ return fn(); \
+ } \
+ __ADDRESSABLE(__stub)
+#else
+#define __initcall_section(__sec, __iid) \
+ #__sec ".init"
+
+#define __initcall_stub(fn, __iid, id) fn
+
+#define __define_initcall_stub(__stub, fn) \
+ __ADDRESSABLE(fn)
+#endif
+
+#ifdef CONFIG_HAVE_ARCH_PREL32_RELOCATIONS
+#define ____define_initcall(fn, __stub, __name, __sec) \
+ __define_initcall_stub(__stub, fn) \
+ asm(".section \"" __sec "\", \"a\" \n" \
+ __stringify(__name) ": \n" \
+ ".long " __stringify(__stub) " - . \n" \
+ ".previous \n"); \
+ static_assert(__same_type(initcall_t, &fn));
+#else
+#define ____define_initcall(fn, __unused, __name, __sec) \
+ static initcall_t __name __used \
+ __attribute__((__section__(__sec))) = fn;
+#endif
+
+#define __unique_initcall(fn, id, __sec, __iid) \
+ ____define_initcall(fn, \
+ __initcall_stub(fn, __iid, id), \
+ __initcall_name(initcall, __iid, id), \
+ __initcall_section(__sec, __iid))
+
+#define ___define_initcall(fn, id, __sec) \
+ __unique_initcall(fn, id, __sec, __initcall_id(fn))
+
+#define __define_initcall(fn, id) ___define_initcall(fn, id, .initcall##id)
/*
* Early initcalls run before initializing SMP.
@@ -202,13 +301,7 @@ extern bool initcall_debug;
#define __exitcall(fn) \
static exitcall_t __exitcall_##fn __exit_call = fn
-#define console_initcall(fn) \
- static initcall_t __initcall_##fn \
- __used __section(.con_initcall.init) = fn
-
-#define security_initcall(fn) \
- static initcall_t __initcall_##fn \
- __used __section(.security_initcall.init) = fn
+#define console_initcall(fn) ___define_initcall(fn, con, .con_initcall)
struct obs_kernel_param {
const char *str;
@@ -226,16 +319,23 @@ struct obs_kernel_param {
static const char __setup_str_##unique_id[] __initconst \
__aligned(1) = str; \
static struct obs_kernel_param __setup_##unique_id \
- __used __section(.init.setup) \
- __attribute__((aligned((sizeof(long))))) \
+ __used __section(".init.setup") \
+ __aligned(__alignof__(struct obs_kernel_param)) \
= { __setup_str_##unique_id, fn, early }
+/*
+ * NOTE: __setup functions return values:
+ * @fn returns 1 (or non-zero) if the option argument is "handled"
+ * and returns 0 if the option argument is "not handled".
+ */
#define __setup(str, fn) \
__setup_param(str, fn, fn, 0)
/*
- * NOTE: fn is as per module_param, not __setup!
- * Emits warning if fn returns non-zero.
+ * NOTE: @fn is as per module_param, not __setup!
+ * I.e., @fn returns 0 for no error or non-zero for error
+ * (possibly @fn returns a -errno value, but it does not matter).
+ * Emits warning if @fn returns non-zero.
*/
#define early_param(str, fn) \
__setup_param(str, fn, fn, 1)
@@ -249,14 +349,14 @@ struct obs_kernel_param {
var = 1; \
return 0; \
} \
- __setup_param(str_on, parse_##var##_on, parse_##var##_on, 1); \
+ early_param(str_on, parse_##var##_on); \
\
static int __init parse_##var##_off(char *arg) \
{ \
var = 0; \
return 0; \
} \
- __setup_param(str_off, parse_##var##_off, parse_##var##_off, 1)
+ early_param(str_off, parse_##var##_off)
/* Relies on boot_command_line being set */
void __init parse_early_param(void);
@@ -270,7 +370,7 @@ void __init parse_early_options(char *cmdline);
#endif
/* Data marked not to be saved by software suspend */
-#define __nosavedata __section(.data..nosave)
+#define __nosavedata __section(".data..nosave")
#ifdef MODULE
#define __exit_p(x) x
diff --git a/include/linux/init_ohci1394_dma.h b/include/linux/init_ohci1394_dma.h
index 3c03a4bba5e4..228afca432ac 100644
--- a/include/linux/init_ohci1394_dma.h
+++ b/include/linux/init_ohci1394_dma.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifdef CONFIG_PROVIDE_OHCI1394_DMA_INIT
extern int __initdata init_ohci1394_dma_early;
extern void __init init_ohci1394_dma_on_all_controllers(void);
diff --git a/include/linux/init_syscalls.h b/include/linux/init_syscalls.h
new file mode 100644
index 000000000000..92045d18cbfc
--- /dev/null
+++ b/include/linux/init_syscalls.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+int __init init_mount(const char *dev_name, const char *dir_name,
+ const char *type_page, unsigned long flags, void *data_page);
+int __init init_umount(const char *name, int flags);
+int __init init_chdir(const char *filename);
+int __init init_chroot(const char *filename);
+int __init init_chown(const char *filename, uid_t user, gid_t group, int flags);
+int __init init_chmod(const char *filename, umode_t mode);
+int __init init_eaccess(const char *filename);
+int __init init_stat(const char *filename, struct kstat *stat, int flags);
+int __init init_mknod(const char *filename, umode_t mode, unsigned int dev);
+int __init init_link(const char *oldname, const char *newname);
+int __init init_symlink(const char *oldname, const char *newname);
+int __init init_unlink(const char *pathname);
+int __init init_mkdir(const char *pathname, umode_t mode);
+int __init init_rmdir(const char *pathname);
+int __init init_utimes(char *filename, struct timespec64 *ts);
+int __init init_dup(struct file *file);
diff --git a/include/linux/init_task.h b/include/linux/init_task.h
index a2f6707e9fc0..40fc5813cf93 100644
--- a/include/linux/init_task.h
+++ b/include/linux/init_task.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX__INIT_TASK_H
#define _LINUX__INIT_TASK_H
@@ -12,6 +13,7 @@
#include <linux/securebits.h>
#include <linux/seqlock.h>
#include <linux/rbtree.h>
+#include <linux/refcount.h>
#include <linux/sched/autogroup.h>
#include <net/net_namespace.h>
#include <linux/sched/rt.h>
@@ -20,22 +22,10 @@
#include <asm/thread_info.h>
-#ifdef CONFIG_SMP
-# define INIT_PUSHABLE_TASKS(tsk) \
- .pushable_tasks = PLIST_NODE_INIT(tsk.pushable_tasks, MAX_PRIO),
-#else
-# define INIT_PUSHABLE_TASKS(tsk)
-#endif
-
extern struct files_struct init_files;
extern struct fs_struct init_fs;
-
-#ifdef CONFIG_CPUSETS
-#define INIT_CPUSET_SEQ(tsk) \
- .mems_allowed_seq = SEQCNT_ZERO(tsk.mems_allowed_seq),
-#else
-#define INIT_CPUSET_SEQ(tsk)
-#endif
+extern struct nsproxy init_nsproxy;
+extern struct cred init_cred;
#ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
#define INIT_PREV_CPUTIME(x) .prev_cputime = { \
@@ -45,271 +35,16 @@ extern struct fs_struct init_fs;
#define INIT_PREV_CPUTIME(x)
#endif
-#ifdef CONFIG_POSIX_TIMERS
-#define INIT_POSIX_TIMERS(s) \
- .posix_timers = LIST_HEAD_INIT(s.posix_timers),
-#define INIT_CPU_TIMERS(s) \
- .cpu_timers = { \
- LIST_HEAD_INIT(s.cpu_timers[0]), \
- LIST_HEAD_INIT(s.cpu_timers[1]), \
- LIST_HEAD_INIT(s.cpu_timers[2]), \
- },
-#define INIT_CPUTIMER(s) \
- .cputimer = { \
- .cputime_atomic = INIT_CPUTIME_ATOMIC, \
- .running = false, \
- .checking_timer = false, \
- },
-#else
-#define INIT_POSIX_TIMERS(s)
-#define INIT_CPU_TIMERS(s)
-#define INIT_CPUTIMER(s)
-#endif
-
-#define INIT_SIGNALS(sig) { \
- .nr_threads = 1, \
- .thread_head = LIST_HEAD_INIT(init_task.thread_node), \
- .wait_chldexit = __WAIT_QUEUE_HEAD_INITIALIZER(sig.wait_chldexit),\
- .shared_pending = { \
- .list = LIST_HEAD_INIT(sig.shared_pending.list), \
- .signal = {{0}}}, \
- INIT_POSIX_TIMERS(sig) \
- INIT_CPU_TIMERS(sig) \
- .rlim = INIT_RLIMITS, \
- INIT_CPUTIMER(sig) \
- INIT_PREV_CPUTIME(sig) \
- .cred_guard_mutex = \
- __MUTEX_INITIALIZER(sig.cred_guard_mutex), \
-}
-
-extern struct nsproxy init_nsproxy;
-
-#define INIT_SIGHAND(sighand) { \
- .count = ATOMIC_INIT(1), \
- .action = { { { .sa_handler = SIG_DFL, } }, }, \
- .siglock = __SPIN_LOCK_UNLOCKED(sighand.siglock), \
- .signalfd_wqh = __WAIT_QUEUE_HEAD_INITIALIZER(sighand.signalfd_wqh), \
-}
-
-extern struct group_info init_groups;
-
-#define INIT_STRUCT_PID { \
- .count = ATOMIC_INIT(1), \
- .tasks = { \
- { .first = NULL }, \
- { .first = NULL }, \
- { .first = NULL }, \
- }, \
- .level = 0, \
- .numbers = { { \
- .nr = 0, \
- .ns = &init_pid_ns, \
- .pid_chain = { .next = NULL, .pprev = NULL }, \
- }, } \
-}
-
-#define INIT_PID_LINK(type) \
-{ \
- .node = { \
- .next = NULL, \
- .pprev = NULL, \
- }, \
- .pid = &init_struct_pid, \
-}
-
-#ifdef CONFIG_AUDITSYSCALL
-#define INIT_IDS \
- .loginuid = INVALID_UID, \
- .sessionid = (unsigned int)-1,
-#else
-#define INIT_IDS
-#endif
-
-#ifdef CONFIG_PREEMPT_RCU
-#define INIT_TASK_RCU_TREE_PREEMPT() \
- .rcu_blocked_node = NULL,
-#else
-#define INIT_TASK_RCU_TREE_PREEMPT(tsk)
-#endif
-#ifdef CONFIG_PREEMPT_RCU
-#define INIT_TASK_RCU_PREEMPT(tsk) \
- .rcu_read_lock_nesting = 0, \
- .rcu_read_unlock_special.s = 0, \
- .rcu_node_entry = LIST_HEAD_INIT(tsk.rcu_node_entry), \
- INIT_TASK_RCU_TREE_PREEMPT()
-#else
-#define INIT_TASK_RCU_PREEMPT(tsk)
-#endif
-#ifdef CONFIG_TASKS_RCU
-#define INIT_TASK_RCU_TASKS(tsk) \
- .rcu_tasks_holdout = false, \
- .rcu_tasks_holdout_list = \
- LIST_HEAD_INIT(tsk.rcu_tasks_holdout_list), \
- .rcu_tasks_idle_cpu = -1,
-#else
-#define INIT_TASK_RCU_TASKS(tsk)
-#endif
-
-extern struct cred init_cred;
-
-#ifdef CONFIG_CGROUP_SCHED
-# define INIT_CGROUP_SCHED(tsk) \
- .sched_task_group = &root_task_group,
-#else
-# define INIT_CGROUP_SCHED(tsk)
-#endif
-
-#ifdef CONFIG_PERF_EVENTS
-# define INIT_PERF_EVENTS(tsk) \
- .perf_event_mutex = \
- __MUTEX_INITIALIZER(tsk.perf_event_mutex), \
- .perf_event_list = LIST_HEAD_INIT(tsk.perf_event_list),
-#else
-# define INIT_PERF_EVENTS(tsk)
-#endif
-
-#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
-# define INIT_VTIME(tsk) \
- .vtime.seqcount = SEQCNT_ZERO(tsk.vtime.seqcount), \
- .vtime.starttime = 0, \
- .vtime.state = VTIME_SYS,
-#else
-# define INIT_VTIME(tsk)
-#endif
-
#define INIT_TASK_COMM "swapper"
-#ifdef CONFIG_RT_MUTEXES
-# define INIT_RT_MUTEXES(tsk) \
- .pi_waiters = RB_ROOT, \
- .pi_top_task = NULL, \
- .pi_waiters_leftmost = NULL,
-#else
-# define INIT_RT_MUTEXES(tsk)
-#endif
-
-#ifdef CONFIG_NUMA_BALANCING
-# define INIT_NUMA_BALANCING(tsk) \
- .numa_preferred_nid = -1, \
- .numa_group = NULL, \
- .numa_faults = NULL,
-#else
-# define INIT_NUMA_BALANCING(tsk)
-#endif
-
-#ifdef CONFIG_KASAN
-# define INIT_KASAN(tsk) \
- .kasan_depth = 1,
-#else
-# define INIT_KASAN(tsk)
-#endif
-
-#ifdef CONFIG_LIVEPATCH
-# define INIT_LIVEPATCH(tsk) \
- .patch_state = KLP_UNDEFINED,
-#else
-# define INIT_LIVEPATCH(tsk)
-#endif
-
-#ifdef CONFIG_THREAD_INFO_IN_TASK
-# define INIT_TASK_TI(tsk) \
- .thread_info = INIT_THREAD_INFO(tsk), \
- .stack_refcount = ATOMIC_INIT(1),
-#else
-# define INIT_TASK_TI(tsk)
-#endif
-
-#ifdef CONFIG_SECURITY
-#define INIT_TASK_SECURITY .security = NULL,
+/* Attach to the init_task data structure for proper alignment */
+#ifdef CONFIG_ARCH_TASK_STRUCT_ON_STACK
+#define __init_task_data __section(".data..init_task")
#else
-#define INIT_TASK_SECURITY
+#define __init_task_data /**/
#endif
-/*
- * INIT_TASK is used to set up the first task table, touch at
- * your own risk!. Base=0, limit=0x1fffff (=2MB)
- */
-#define INIT_TASK(tsk) \
-{ \
- INIT_TASK_TI(tsk) \
- .state = 0, \
- .stack = init_stack, \
- .usage = ATOMIC_INIT(2), \
- .flags = PF_KTHREAD, \
- .prio = MAX_PRIO-20, \
- .static_prio = MAX_PRIO-20, \
- .normal_prio = MAX_PRIO-20, \
- .policy = SCHED_NORMAL, \
- .cpus_allowed = CPU_MASK_ALL, \
- .nr_cpus_allowed= NR_CPUS, \
- .mm = NULL, \
- .active_mm = &init_mm, \
- .restart_block = { \
- .fn = do_no_restart_syscall, \
- }, \
- .se = { \
- .group_node = LIST_HEAD_INIT(tsk.se.group_node), \
- }, \
- .rt = { \
- .run_list = LIST_HEAD_INIT(tsk.rt.run_list), \
- .time_slice = RR_TIMESLICE, \
- }, \
- .tasks = LIST_HEAD_INIT(tsk.tasks), \
- INIT_PUSHABLE_TASKS(tsk) \
- INIT_CGROUP_SCHED(tsk) \
- .ptraced = LIST_HEAD_INIT(tsk.ptraced), \
- .ptrace_entry = LIST_HEAD_INIT(tsk.ptrace_entry), \
- .real_parent = &tsk, \
- .parent = &tsk, \
- .children = LIST_HEAD_INIT(tsk.children), \
- .sibling = LIST_HEAD_INIT(tsk.sibling), \
- .group_leader = &tsk, \
- RCU_POINTER_INITIALIZER(real_cred, &init_cred), \
- RCU_POINTER_INITIALIZER(cred, &init_cred), \
- .comm = INIT_TASK_COMM, \
- .thread = INIT_THREAD, \
- .fs = &init_fs, \
- .files = &init_files, \
- .signal = &init_signals, \
- .sighand = &init_sighand, \
- .nsproxy = &init_nsproxy, \
- .pending = { \
- .list = LIST_HEAD_INIT(tsk.pending.list), \
- .signal = {{0}}}, \
- .blocked = {{0}}, \
- .alloc_lock = __SPIN_LOCK_UNLOCKED(tsk.alloc_lock), \
- .journal_info = NULL, \
- INIT_CPU_TIMERS(tsk) \
- .pi_lock = __RAW_SPIN_LOCK_UNLOCKED(tsk.pi_lock), \
- .timer_slack_ns = 50000, /* 50 usec default slack */ \
- .pids = { \
- [PIDTYPE_PID] = INIT_PID_LINK(PIDTYPE_PID), \
- [PIDTYPE_PGID] = INIT_PID_LINK(PIDTYPE_PGID), \
- [PIDTYPE_SID] = INIT_PID_LINK(PIDTYPE_SID), \
- }, \
- .thread_group = LIST_HEAD_INIT(tsk.thread_group), \
- .thread_node = LIST_HEAD_INIT(init_signals.thread_head), \
- INIT_IDS \
- INIT_PERF_EVENTS(tsk) \
- INIT_TRACE_IRQFLAGS \
- INIT_LOCKDEP \
- INIT_FTRACE_GRAPH \
- INIT_TRACE_RECURSION \
- INIT_TASK_RCU_PREEMPT(tsk) \
- INIT_TASK_RCU_TASKS(tsk) \
- INIT_CPUSET_SEQ(tsk) \
- INIT_RT_MUTEXES(tsk) \
- INIT_PREV_CPUTIME(tsk) \
- INIT_VTIME(tsk) \
- INIT_NUMA_BALANCING(tsk) \
- INIT_KASAN(tsk) \
- INIT_LIVEPATCH(tsk) \
- INIT_TASK_SECURITY \
-}
-
-
-/* Attach to the init_task data structure for proper alignment */
-#define __init_task_data __attribute__((__section__(".data..init_task")))
-
+/* Attach to the thread_info data structure for proper alignment */
+#define __init_thread_info __section(".data..init_thread_info")
#endif
diff --git a/include/linux/initrd.h b/include/linux/initrd.h
index bc67b767f9ce..f1a1f4c92ded 100644
--- a/include/linux/initrd.h
+++ b/include/linux/initrd.h
@@ -1,11 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 */
-#define INITRD_MINOR 250 /* shouldn't collide with /dev/ram* too soon ... */
-
-/* 1 = load ramdisk, 0 = don't load */
-extern int rd_doload;
+#ifndef __LINUX_INITRD_H
+#define __LINUX_INITRD_H
-/* 1 = prompt for ramdisk, 0 = don't prompt */
-extern int rd_prompt;
+#define INITRD_MINOR 250 /* shouldn't collide with /dev/ram* too soon ... */
/* starting block # of image */
extern int rd_image_start;
@@ -20,4 +18,20 @@ extern int initrd_below_start_ok;
extern unsigned long initrd_start, initrd_end;
extern void free_initrd_mem(unsigned long, unsigned long);
-extern unsigned int real_root_dev;
+#ifdef CONFIG_BLK_DEV_INITRD
+extern void __init reserve_initrd_mem(void);
+extern void wait_for_initramfs(void);
+#else
+static inline void __init reserve_initrd_mem(void) {}
+static inline void wait_for_initramfs(void) {}
+#endif
+
+extern phys_addr_t phys_initrd_start;
+extern unsigned long phys_initrd_size;
+
+extern char __initramfs_start[];
+extern unsigned long __initramfs_size;
+
+void console_on_rootfs(void);
+
+#endif /* __LINUX_INITRD_H */
diff --git a/include/linux/inotify.h b/include/linux/inotify.h
index 23aede0b5843..8d20caa1b268 100644
--- a/include/linux/inotify.h
+++ b/include/linux/inotify.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Inode based directory notification for Linux
*
@@ -6,17 +7,14 @@
#ifndef _LINUX_INOTIFY_H
#define _LINUX_INOTIFY_H
-#include <linux/sysctl.h>
#include <uapi/linux/inotify.h>
-extern struct ctl_table inotify_table[]; /* for sysctl */
-
#define ALL_INOTIFY_BITS (IN_ACCESS | IN_MODIFY | IN_ATTRIB | IN_CLOSE_WRITE | \
IN_CLOSE_NOWRITE | IN_OPEN | IN_MOVED_FROM | \
IN_MOVED_TO | IN_CREATE | IN_DELETE | \
IN_DELETE_SELF | IN_MOVE_SELF | IN_UNMOUNT | \
IN_Q_OVERFLOW | IN_IGNORED | IN_ONLYDIR | \
IN_DONT_FOLLOW | IN_EXCL_UNLINK | IN_MASK_ADD | \
- IN_ISDIR | IN_ONESHOT)
+ IN_MASK_CREATE | IN_ISDIR | IN_ONESHOT)
#endif /* _LINUX_INOTIFY_H */
diff --git a/include/linux/input-polldev.h b/include/linux/input-polldev.h
deleted file mode 100644
index 2465182670db..000000000000
--- a/include/linux/input-polldev.h
+++ /dev/null
@@ -1,61 +0,0 @@
-#ifndef _INPUT_POLLDEV_H
-#define _INPUT_POLLDEV_H
-
-/*
- * Copyright (c) 2007 Dmitry Torokhov
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- */
-
-#include <linux/input.h>
-#include <linux/workqueue.h>
-
-/**
- * struct input_polled_dev - simple polled input device
- * @private: private driver data.
- * @open: driver-supplied method that prepares device for polling
- * (enabled the device and maybe flushes device state).
- * @close: driver-supplied method that is called when device is no
- * longer being polled. Used to put device into low power mode.
- * @poll: driver-supplied method that polls the device and posts
- * input events (mandatory).
- * @poll_interval: specifies how often the poll() method should be called.
- * Defaults to 500 msec unless overridden when registering the device.
- * @poll_interval_max: specifies upper bound for the poll interval.
- * Defaults to the initial value of @poll_interval.
- * @poll_interval_min: specifies lower bound for the poll interval.
- * Defaults to 0.
- * @input: input device structure associated with the polled device.
- * Must be properly initialized by the driver (id, name, phys, bits).
- *
- * Polled input device provides a skeleton for supporting simple input
- * devices that do not raise interrupts but have to be periodically
- * scanned or polled to detect changes in their state.
- */
-struct input_polled_dev {
- void *private;
-
- void (*open)(struct input_polled_dev *dev);
- void (*close)(struct input_polled_dev *dev);
- void (*poll)(struct input_polled_dev *dev);
- unsigned int poll_interval; /* msec */
- unsigned int poll_interval_max; /* msec */
- unsigned int poll_interval_min; /* msec */
-
- struct input_dev *input;
-
-/* private: */
- struct delayed_work work;
-
- bool devres_managed;
-};
-
-struct input_polled_dev *input_allocate_polled_device(void);
-struct input_polled_dev *devm_input_allocate_polled_device(struct device *dev);
-void input_free_polled_device(struct input_polled_dev *dev);
-int input_register_polled_device(struct input_polled_dev *dev);
-void input_unregister_polled_device(struct input_polled_dev *dev);
-
-#endif
diff --git a/include/linux/input.h b/include/linux/input.h
index a65e3b24fb18..49790c1bd2c4 100644
--- a/include/linux/input.h
+++ b/include/linux/input.h
@@ -1,9 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 1999-2002 Vojtech Pavlik
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
*/
#ifndef _INPUT_H
#define _INPUT_H
@@ -24,6 +21,8 @@
#include <linux/timer.h>
#include <linux/mod_devicetable.h>
+struct input_dev_poller;
+
/**
* struct input_value - input value representation
* @type: type of value (EV_KEY, EV_ABS, etc)
@@ -36,6 +35,13 @@ struct input_value {
__s32 value;
};
+enum input_clock_type {
+ INPUT_CLK_REAL = 0,
+ INPUT_CLK_MONO,
+ INPUT_CLK_BOOT,
+ INPUT_CLK_MAX
+};
+
/**
* struct input_dev - represents an input device
* @name: name of the device
@@ -67,6 +73,8 @@ struct input_value {
* not sleep
* @ff: force feedback structure associated with the device if device
* supports force feedback effects
+ * @poller: poller structure associated with the device if device is
+ * set up to use polling mode
* @repeat_key: stores key code of the last key pressed; used to implement
* software autorepeat
* @timer: timer for software autorepeat
@@ -82,9 +90,11 @@ struct input_value {
* @open: this method is called when the very first user calls
* input_open_device(). The driver must prepare the device
* to start generating events (start polling thread,
- * request an IRQ, submit URB, etc.)
+ * request an IRQ, submit URB, etc.). The meaning of open() is
+ * to start providing events to the input core.
* @close: this method is called when the very last user calls
- * input_close_device().
+ * input_close_device(). The meaning of close() is to stop
+ * providing events to the input core.
* @flush: purges the device. Most commonly used to get rid of force
* feedback effects loaded into the device when disconnecting
* from it
@@ -117,6 +127,12 @@ struct input_value {
* @vals: array of values queued in the current frame
* @devres_managed: indicates that devices is managed with devres framework
* and needs not be explicitly unregistered or freed.
+ * @timestamp: storage for a timestamp set by input_set_timestamp called
+ * by a driver
+ * @inhibited: indicates that the input device is inhibited. If that is
+ * the case then input core ignores any events generated by the device.
+ * Device's close() is called when it is being inhibited and its open()
+ * is called when it is being uninhibited.
*/
struct input_dev {
const char *name;
@@ -150,6 +166,8 @@ struct input_dev {
struct ff_device *ff;
+ struct input_dev_poller *poller;
+
unsigned int repeat_key;
struct timer_list timer;
@@ -187,6 +205,10 @@ struct input_dev {
struct input_value *vals;
bool devres_managed;
+
+ ktime_t timestamp[INPUT_CLK_MAX];
+
+ bool inhibited;
};
#define to_input_dev(d) container_of(d, struct input_dev, dev)
@@ -234,6 +256,10 @@ struct input_dev {
#error "SW_MAX and INPUT_DEVICE_ID_SW_MAX do not match"
#endif
+#if INPUT_PROP_MAX != INPUT_DEVICE_ID_PROP_MAX
+#error "INPUT_PROP_MAX and INPUT_DEVICE_ID_PROP_MAX do not match"
+#endif
+
#define INPUT_DEVICE_ID_MATCH_DEVICE \
(INPUT_DEVICE_ID_MATCH_BUS | INPUT_DEVICE_ID_MATCH_VENDOR | INPUT_DEVICE_ID_MATCH_PRODUCT)
#define INPUT_DEVICE_ID_MATCH_DEVICE_AND_VERSION \
@@ -360,6 +386,13 @@ void input_unregister_device(struct input_dev *);
void input_reset_device(struct input_dev *);
+int input_setup_polling(struct input_dev *dev,
+ void (*poll_fn)(struct input_dev *dev));
+void input_set_poll_interval(struct input_dev *dev, unsigned int interval);
+void input_set_min_poll_interval(struct input_dev *dev, unsigned int interval);
+void input_set_max_poll_interval(struct input_dev *dev, unsigned int interval);
+int input_get_poll_interval(struct input_dev *dev);
+
int __must_check input_register_handler(struct input_handler *);
void input_unregister_handler(struct input_handler *);
@@ -381,6 +414,9 @@ void input_close_device(struct input_handle *);
int input_flush_device(struct input_handle *handle, struct file *file);
+void input_set_timestamp(struct input_dev *dev, ktime_t timestamp);
+ktime_t *input_get_timestamp(struct input_dev *dev);
+
void input_event(struct input_dev *dev, unsigned int type, unsigned int code, int value);
void input_inject_event(struct input_handle *handle, unsigned int type, unsigned int code, int value);
@@ -439,6 +475,8 @@ static inline void input_set_events_per_packet(struct input_dev *dev, int n_even
void input_alloc_absinfo(struct input_dev *dev);
void input_set_abs_params(struct input_dev *dev, unsigned int axis,
int min, int max, int fuzz, int flat);
+void input_copy_abs(struct input_dev *dst, unsigned int dst_axis,
+ const struct input_dev *src, unsigned int src_axis);
#define INPUT_GENERATE_ABS_ACCESSORS(_suffix, _item) \
static inline int input_abs_get_##_suffix(struct input_dev *dev, \
@@ -469,8 +507,13 @@ int input_get_keycode(struct input_dev *dev, struct input_keymap_entry *ke);
int input_set_keycode(struct input_dev *dev,
const struct input_keymap_entry *ke);
+bool input_match_device_id(const struct input_dev *dev,
+ const struct input_device_id *id);
+
void input_enable_softrepeat(struct input_dev *dev, int delay, int period);
+bool input_device_enabled(struct input_dev *dev);
+
extern struct class input_class;
/**
@@ -529,6 +572,7 @@ int input_ff_event(struct input_dev *dev, unsigned int type, unsigned int code,
int input_ff_upload(struct input_dev *dev, struct ff_effect *effect, struct file *file);
int input_ff_erase(struct input_dev *dev, int effect_id, struct file *file);
+int input_ff_flush(struct input_dev *dev, struct file *file);
int input_ff_create_memless(struct input_dev *dev, void *data,
int (*play_effect)(struct input_dev *, void *, struct ff_effect *));
diff --git a/include/linux/input/ad714x.h b/include/linux/input/ad714x.h
index d388d857bf14..20aea668b007 100644
--- a/include/linux/input/ad714x.h
+++ b/include/linux/input/ad714x.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* include/linux/input/ad714x.h
*
@@ -7,8 +8,6 @@
* information.
*
* Copyright 2009-2011 Analog Devices Inc.
- *
- * Licensed under the GPL-2 or later.
*/
#ifndef __LINUX_INPUT_AD714X_H__
diff --git a/include/linux/input/adp5589.h b/include/linux/input/adp5589.h
index 1a05eee15e67..0e4742c8c81e 100644
--- a/include/linux/input/adp5589.h
+++ b/include/linux/input/adp5589.h
@@ -1,9 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Analog Devices ADP5589/ADP5585 I/O Expander and QWERTY Keypad Controller
*
* Copyright 2010-2011 Analog Devices Inc.
- *
- * Licensed under the GPL-2.
*/
#ifndef _ADP5589_H
@@ -176,13 +175,6 @@ struct i2c_client; /* forward declaration */
struct adp5589_gpio_platform_data {
int gpio_start; /* GPIO Chip base # */
- int (*setup)(struct i2c_client *client,
- int gpio, unsigned ngpio,
- void *context);
- int (*teardown)(struct i2c_client *client,
- int gpio, unsigned ngpio,
- void *context);
- void *context;
};
#endif
diff --git a/include/linux/input/adxl34x.h b/include/linux/input/adxl34x.h
index 010d98175efa..7efc9008f316 100644
--- a/include/linux/input/adxl34x.h
+++ b/include/linux/input/adxl34x.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* include/linux/input/adxl34x.h
*
@@ -6,8 +7,6 @@
* device's "struct device" holds this information.
*
* Copyright 2009 Analog Devices Inc.
- *
- * Licensed under the GPL-2 or later.
*/
#ifndef __LINUX_INPUT_ADXL34X_H__
diff --git a/include/linux/input/as5011.h b/include/linux/input/as5011.h
index 1affd0ddfa9d..5fba52a56cd6 100644
--- a/include/linux/input/as5011.h
+++ b/include/linux/input/as5011.h
@@ -1,12 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
#ifndef _AS5011_H
#define _AS5011_H
/*
* Copyright (c) 2010, 2011 Fabien Marteau <fabien.marteau@armadeus.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
*/
struct as5011_platform_data {
diff --git a/include/linux/input/auo-pixcir-ts.h b/include/linux/input/auo-pixcir-ts.h
deleted file mode 100644
index 5049f21928e4..000000000000
--- a/include/linux/input/auo-pixcir-ts.h
+++ /dev/null
@@ -1,54 +0,0 @@
-/*
- * Driver for AUO in-cell touchscreens
- *
- * Copyright (c) 2011 Heiko Stuebner <heiko@sntech.de>
- *
- * based on auo_touch.h from Dell Streak kernel
- *
- * Copyright (c) 2008 QUALCOMM Incorporated.
- * Copyright (c) 2008 QUALCOMM USA, INC.
- *
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-
-#ifndef __AUO_PIXCIR_TS_H__
-#define __AUO_PIXCIR_TS_H__
-
-/*
- * Interrupt modes:
- * periodical: interrupt is asserted periodicaly
- * compare coordinates: interrupt is asserted when coordinates change
- * indicate touch: interrupt is asserted during touch
- */
-#define AUO_PIXCIR_INT_PERIODICAL 0x00
-#define AUO_PIXCIR_INT_COMP_COORD 0x01
-#define AUO_PIXCIR_INT_TOUCH_IND 0x02
-
-/*
- * @gpio_int interrupt gpio
- * @int_setting one of AUO_PIXCIR_INT_*
- * @init_hw hardwarespecific init
- * @exit_hw hardwarespecific shutdown
- * @x_max x-resolution
- * @y_max y-resolution
- */
-struct auo_pixcir_ts_platdata {
- int gpio_int;
- int gpio_rst;
-
- int int_setting;
-
- unsigned int x_max;
- unsigned int y_max;
-};
-
-#endif
diff --git a/include/linux/input/bu21013.h b/include/linux/input/bu21013.h
deleted file mode 100644
index 6230d76bde5d..000000000000
--- a/include/linux/input/bu21013.h
+++ /dev/null
@@ -1,34 +0,0 @@
-/*
- * Copyright (C) ST-Ericsson SA 2010
- * Author: Naveen Kumar G <naveen.gaddipati@stericsson.com> for ST-Ericsson
- * License terms:GNU General Public License (GPL) version 2
- */
-
-#ifndef _BU21013_H
-#define _BU21013_H
-
-/**
- * struct bu21013_platform_device - Handle the platform data
- * @touch_x_max: touch x max
- * @touch_y_max: touch y max
- * @cs_pin: chip select pin
- * @touch_pin: touch gpio pin
- * @ext_clk: external clock flag
- * @x_flip: x flip flag
- * @y_flip: y flip flag
- * @wakeup: wakeup flag
- *
- * This is used to handle the platform data
- */
-struct bu21013_platform_device {
- int touch_x_max;
- int touch_y_max;
- unsigned int cs_pin;
- unsigned int touch_pin;
- bool ext_clk;
- bool x_flip;
- bool y_flip;
- bool wakeup;
-};
-
-#endif
diff --git a/include/linux/input/cma3000.h b/include/linux/input/cma3000.h
index cbbaac27d311..aaab51fa909f 100644
--- a/include/linux/input/cma3000.h
+++ b/include/linux/input/cma3000.h
@@ -1,20 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* VTI CMA3000_Dxx Accelerometer driver
*
* Copyright (C) 2010 Texas Instruments
* Author: Hemanth V <hemanthv@ti.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef _LINUX_CMA3000_H
diff --git a/include/linux/input/cy8ctmg110_pdata.h b/include/linux/input/cy8ctmg110_pdata.h
deleted file mode 100644
index 09522cb59910..000000000000
--- a/include/linux/input/cy8ctmg110_pdata.h
+++ /dev/null
@@ -1,10 +0,0 @@
-#ifndef _LINUX_CY8CTMG110_PDATA_H
-#define _LINUX_CY8CTMG110_PDATA_H
-
-struct cy8ctmg110_pdata
-{
- int reset_pin; /* Reset pin is wired to this GPIO (optional) */
- int irq_pin; /* IRQ pin is wired to this GPIO */
-};
-
-#endif
diff --git a/include/linux/input/cyttsp.h b/include/linux/input/cyttsp.h
deleted file mode 100644
index 586c8c95dcb0..000000000000
--- a/include/linux/input/cyttsp.h
+++ /dev/null
@@ -1,43 +0,0 @@
-/*
- * Header file for:
- * Cypress TrueTouch(TM) Standard Product (TTSP) touchscreen drivers.
- * For use with Cypress Txx3xx parts.
- * Supported parts include:
- * CY8CTST341
- * CY8CTMA340
- *
- * Copyright (C) 2009, 2010, 2011 Cypress Semiconductor, Inc.
- * Copyright (C) 2012 Javier Martinez Canillas <javier@dowhile0.org>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2, and only version 2, as published by the
- * Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
- *
- * Contact Cypress Semiconductor at www.cypress.com (kev@cypress.com)
- *
- */
-#ifndef _CYTTSP_H_
-#define _CYTTSP_H_
-
-#define CY_SPI_NAME "cyttsp-spi"
-#define CY_I2C_NAME "cyttsp-i2c"
-/* Active Power state scanning/processing refresh interval */
-#define CY_ACT_INTRVL_DFLT 0x00 /* ms */
-/* touch timeout for the Active power */
-#define CY_TCH_TMOUT_DFLT 0xFF /* ms */
-/* Low Power state scanning/processing refresh interval */
-#define CY_LP_INTRVL_DFLT 0x0A /* ms */
-/* Active distance in pixels for a gesture to be reported */
-#define CY_ACT_DIST_DFLT 0xF8 /* pixels */
-
-#endif /* _CYTTSP_H_ */
diff --git a/include/linux/input/elan-i2c-ids.h b/include/linux/input/elan-i2c-ids.h
new file mode 100644
index 000000000000..51cca17ee94c
--- /dev/null
+++ b/include/linux/input/elan-i2c-ids.h
@@ -0,0 +1,80 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Elan I2C/SMBus Touchpad device whitelist
+ *
+ * Copyright (c) 2013 ELAN Microelectronics Corp.
+ *
+ * Author: æ維 (Duson Lin) <dusonlin@emc.com.tw>
+ * Author: KT Liao <kt.liao@emc.com.tw>
+ * Version: 1.6.3
+ *
+ * Based on cyapa driver:
+ * copyright (c) 2011-2012 Cypress Semiconductor, Inc.
+ * copyright (c) 2011-2012 Google, Inc.
+ *
+ * Trademarks are the property of their respective owners.
+ */
+
+#ifndef __ELAN_I2C_IDS_H
+#define __ELAN_I2C_IDS_H
+
+#include <linux/mod_devicetable.h>
+
+static const struct acpi_device_id elan_acpi_id[] = {
+ { "ELAN0000", 0 },
+ { "ELAN0100", 0 },
+ { "ELAN0600", 0 },
+ { "ELAN0601", 0 },
+ { "ELAN0602", 0 },
+ { "ELAN0603", 0 },
+ { "ELAN0604", 0 },
+ { "ELAN0605", 0 },
+ { "ELAN0606", 0 },
+ { "ELAN0607", 0 },
+ { "ELAN0608", 0 },
+ { "ELAN0609", 0 },
+ { "ELAN060B", 0 },
+ { "ELAN060C", 0 },
+ { "ELAN060F", 0 },
+ { "ELAN0610", 0 },
+ { "ELAN0611", 0 },
+ { "ELAN0612", 0 },
+ { "ELAN0615", 0 },
+ { "ELAN0616", 0 },
+ { "ELAN0617", 0 },
+ { "ELAN0618", 0 },
+ { "ELAN0619", 0 },
+ { "ELAN061A", 0 },
+/* { "ELAN061B", 0 }, not working on the Lenovo Legion Y7000 */
+ { "ELAN061C", 0 },
+ { "ELAN061D", 0 },
+ { "ELAN061E", 0 },
+ { "ELAN061F", 0 },
+ { "ELAN0620", 0 },
+ { "ELAN0621", 0 },
+ { "ELAN0622", 0 },
+ { "ELAN0623", 0 },
+ { "ELAN0624", 0 },
+ { "ELAN0625", 0 },
+ { "ELAN0626", 0 },
+ { "ELAN0627", 0 },
+ { "ELAN0628", 0 },
+ { "ELAN0629", 0 },
+ { "ELAN062A", 0 },
+ { "ELAN062B", 0 },
+ { "ELAN062C", 0 },
+ { "ELAN062D", 0 },
+ { "ELAN062E", 0 }, /* Lenovo V340 Whiskey Lake U */
+ { "ELAN062F", 0 }, /* Lenovo V340 Comet Lake U */
+ { "ELAN0631", 0 },
+ { "ELAN0632", 0 },
+ { "ELAN0633", 0 }, /* Lenovo S145 */
+ { "ELAN0634", 0 }, /* Lenovo V340 Ice lake */
+ { "ELAN0635", 0 }, /* Lenovo V1415-IIL */
+ { "ELAN0636", 0 }, /* Lenovo V1415-Dali */
+ { "ELAN0637", 0 }, /* Lenovo V1415-IGLR */
+ { "ELAN1000", 0 },
+ { }
+};
+
+#endif /* __ELAN_I2C_IDS_H */
diff --git a/include/linux/input/gp2ap002a00f.h b/include/linux/input/gp2ap002a00f.h
deleted file mode 100644
index aad2fd44a61a..000000000000
--- a/include/linux/input/gp2ap002a00f.h
+++ /dev/null
@@ -1,22 +0,0 @@
-#ifndef _GP2AP002A00F_H_
-#define _GP2AP002A00F_H_
-
-#include <linux/i2c.h>
-
-#define GP2A_I2C_NAME "gp2ap002a00f"
-
-/**
- * struct gp2a_platform_data - Sharp gp2ap002a00f proximity platform data
- * @vout_gpio: The gpio connected to the object detected pin (VOUT)
- * @wakeup: Set to true if the proximity can wake the device from suspend
- * @hw_setup: Callback for setting up hardware such as gpios and vregs
- * @hw_shutdown: Callback for properly shutting down hardware
- */
-struct gp2a_platform_data {
- int vout_gpio;
- bool wakeup;
- int (*hw_setup)(struct i2c_client *client);
- int (*hw_shutdown)(struct i2c_client *client);
-};
-
-#endif
diff --git a/include/linux/input/gpio_tilt.h b/include/linux/input/gpio_tilt.h
deleted file mode 100644
index c1cc52d380e0..000000000000
--- a/include/linux/input/gpio_tilt.h
+++ /dev/null
@@ -1,73 +0,0 @@
-#ifndef _INPUT_GPIO_TILT_H
-#define _INPUT_GPIO_TILT_H
-
-/**
- * struct gpio_tilt_axis - Axis used by the tilt switch
- * @axis: Constant describing the axis, e.g. ABS_X
- * @min: minimum value for abs_param
- * @max: maximum value for abs_param
- * @fuzz: fuzz value for abs_param
- * @flat: flat value for abs_param
- */
-struct gpio_tilt_axis {
- int axis;
- int min;
- int max;
- int fuzz;
- int flat;
-};
-
-/**
- * struct gpio_tilt_state - state description
- * @gpios: bitfield of gpio target-states for the value
- * @axes: array containing the axes settings for the gpio state
- * The array indizes must correspond to the axes defined
- * in platform_data
- *
- * This structure describes a supported axis settings
- * and the necessary gpio-state which represent it.
- *
- * The n-th bit in the bitfield describes the state of the n-th GPIO
- * from the gpios-array defined in gpio_regulator_config below.
- */
-struct gpio_tilt_state {
- int gpios;
- int *axes;
-};
-
-/**
- * struct gpio_tilt_platform_data
- * @gpios: Array containing the gpios determining the tilt state
- * @nr_gpios: Number of gpios
- * @axes: Array of gpio_tilt_axis descriptions
- * @nr_axes: Number of axes
- * @states: Array of gpio_tilt_state entries describing
- * the gpio state for specific tilts
- * @nr_states: Number of states available
- * @debounce_interval: debounce ticks interval in msecs
- * @poll_interval: polling interval in msecs - for polling driver only
- * @enable: callback to enable the tilt switch
- * @disable: callback to disable the tilt switch
- *
- * This structure contains gpio-tilt-switch configuration
- * information that must be passed by platform code to the
- * gpio-tilt input driver.
- */
-struct gpio_tilt_platform_data {
- struct gpio *gpios;
- int nr_gpios;
-
- struct gpio_tilt_axis *axes;
- int nr_axes;
-
- struct gpio_tilt_state *states;
- int nr_states;
-
- int debounce_interval;
-
- unsigned int poll_interval;
- int (*enable)(struct device *dev);
- void (*disable)(struct device *dev);
-};
-
-#endif
diff --git a/include/linux/input/ili210x.h b/include/linux/input/ili210x.h
deleted file mode 100644
index a5471245a13c..000000000000
--- a/include/linux/input/ili210x.h
+++ /dev/null
@@ -1,10 +0,0 @@
-#ifndef _ILI210X_H
-#define _ILI210X_H
-
-struct ili210x_platform_data {
- unsigned long irq_flags;
- unsigned int poll_period;
- bool (*get_pendown_state)(void);
-};
-
-#endif
diff --git a/include/linux/input/kxtj9.h b/include/linux/input/kxtj9.h
index d415579b56fe..46e231986fde 100644
--- a/include/linux/input/kxtj9.h
+++ b/include/linux/input/kxtj9.h
@@ -1,20 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2011 Kionix, Inc.
* Written by Chris Hudson <chudson@kionix.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
- * 02111-1307, USA
*/
#ifndef __KXTJ9_H__
diff --git a/include/linux/input/lm8333.h b/include/linux/input/lm8333.h
index 79f918c6e8c5..906da5fc06e0 100644
--- a/include/linux/input/lm8333.h
+++ b/include/linux/input/lm8333.h
@@ -1,6 +1,6 @@
/*
* public include for LM8333 keypad driver - same license as driver
- * Copyright (C) 2012 Wolfram Sang, Pengutronix <w.sang@pengutronix.de>
+ * Copyright (C) 2012 Wolfram Sang, Pengutronix <kernel@pengutronix.de>
*/
#ifndef _LM8333_H
diff --git a/include/linux/input/matrix_keypad.h b/include/linux/input/matrix_keypad.h
index 6174733a57eb..b8d8d69eba29 100644
--- a/include/linux/input/matrix_keypad.h
+++ b/include/linux/input/matrix_keypad.h
@@ -1,9 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _MATRIX_KEYPAD_H
#define _MATRIX_KEYPAD_H
#include <linux/types.h>
-#include <linux/input.h>
-#include <linux/of.h>
+
+struct device;
+struct input_dev;
#define MATRIX_MAX_ROWS 32
#define MATRIX_MAX_COLS 32
diff --git a/include/linux/input/mt.h b/include/linux/input/mt.h
index d7188de4db96..3b8580bd33c1 100644
--- a/include/linux/input/mt.h
+++ b/include/linux/input/mt.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
#ifndef _INPUT_MT_H
#define _INPUT_MT_H
@@ -5,10 +6,6 @@
* Input Multitouch Library
*
* Copyright (c) 2010 Henrik Rydberg
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
*/
#include <linux/input.h>
@@ -100,9 +97,14 @@ static inline bool input_is_mt_axis(int axis)
return axis == ABS_MT_SLOT || input_is_mt_value(axis);
}
-void input_mt_report_slot_state(struct input_dev *dev,
+bool input_mt_report_slot_state(struct input_dev *dev,
unsigned int tool_type, bool active);
+static inline void input_mt_report_slot_inactive(struct input_dev *dev)
+{
+ input_mt_report_slot_state(dev, 0, false);
+}
+
void input_mt_report_finger_count(struct input_dev *dev, int count);
void input_mt_report_pointer_emulation(struct input_dev *dev, bool use_count);
void input_mt_drop_unused(struct input_dev *dev);
diff --git a/include/linux/input/navpoint.h b/include/linux/input/navpoint.h
index 45050eb34de3..d464ffb4db52 100644
--- a/include/linux/input/navpoint.h
+++ b/include/linux/input/navpoint.h
@@ -1,9 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2012 Paul Parsons <lost.distance@yahoo.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
struct navpoint_platform_data {
diff --git a/include/linux/input/samsung-keypad.h b/include/linux/input/samsung-keypad.h
index f25619bfd8a8..ab6b97114c08 100644
--- a/include/linux/input/samsung-keypad.h
+++ b/include/linux/input/samsung-keypad.h
@@ -1,13 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Samsung Keypad platform data definitions
*
* Copyright (C) 2010 Samsung Electronics Co.Ltd
* Author: Joonyoung Shim <jy0922.shim@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
*/
#ifndef __SAMSUNG_KEYPAD_H
diff --git a/include/linux/input/sh_keysc.h b/include/linux/input/sh_keysc.h
index 5d253cd93691..b3c4f3b6679c 100644
--- a/include/linux/input/sh_keysc.h
+++ b/include/linux/input/sh_keysc.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __SH_KEYSC_H__
#define __SH_KEYSC_H__
diff --git a/include/linux/input/sparse-keymap.h b/include/linux/input/sparse-keymap.h
index c7346e33d958..d0dddc14ebc8 100644
--- a/include/linux/input/sparse-keymap.h
+++ b/include/linux/input/sparse-keymap.h
@@ -1,12 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
#ifndef _SPARSE_KEYMAP_H
#define _SPARSE_KEYMAP_H
/*
* Copyright (c) 2009 Dmitry Torokhov
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
*/
#define KE_END 0 /* Indicates end of keymap */
@@ -23,6 +20,7 @@
* private definitions.
* @code: Device-specific data identifying the button/switch
* @keycode: KEY_* code assigned to a key/button
+ * @sw: struct with code/value used by KE_SW and KE_VSW
* @sw.code: SW_* code assigned to a switch
* @sw.value: Value that should be sent in an input even when KE_SW
* switch is toggled. KE_VSW switches ignore this field and
diff --git a/include/linux/input/touchscreen.h b/include/linux/input/touchscreen.h
index 09d22ccb9e41..fe66e2b58f62 100644
--- a/include/linux/input/touchscreen.h
+++ b/include/linux/input/touchscreen.h
@@ -1,9 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2014 Sebastian Reichel <sre@kernel.org>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
*/
#ifndef _TOUCHSCREEN_H
diff --git a/include/linux/input/vivaldi-fmap.h b/include/linux/input/vivaldi-fmap.h
new file mode 100644
index 000000000000..7e4b7023bf04
--- /dev/null
+++ b/include/linux/input/vivaldi-fmap.h
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _VIVALDI_FMAP_H
+#define _VIVALDI_FMAP_H
+
+#include <linux/types.h>
+
+#define VIVALDI_MAX_FUNCTION_ROW_KEYS 24
+
+/**
+ * struct vivaldi_data - Function row map data for ChromeOS Vivaldi keyboards
+ * @function_row_physmap: An array of scancodes or their equivalent (HID usage
+ * codes, encoded rows/columns, etc) for the top
+ * row function keys, in an order from left to right
+ * @num_function_row_keys: The number of top row keys in a custom keyboard
+ *
+ * This structure is supposed to be used by ChromeOS keyboards using
+ * the Vivaldi keyboard function row design.
+ */
+struct vivaldi_data {
+ u32 function_row_physmap[VIVALDI_MAX_FUNCTION_ROW_KEYS];
+ unsigned int num_function_row_keys;
+};
+
+ssize_t vivaldi_function_row_physmap_show(const struct vivaldi_data *data,
+ char *buf);
+
+#endif /* _VIVALDI_FMAP_H */
diff --git a/include/linux/instruction_pointer.h b/include/linux/instruction_pointer.h
new file mode 100644
index 000000000000..cda1f706eaeb
--- /dev/null
+++ b/include/linux/instruction_pointer.h
@@ -0,0 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_INSTRUCTION_POINTER_H
+#define _LINUX_INSTRUCTION_POINTER_H
+
+#define _RET_IP_ (unsigned long)__builtin_return_address(0)
+#define _THIS_IP_ ({ __label__ __here; __here: (unsigned long)&&__here; })
+
+#endif /* _LINUX_INSTRUCTION_POINTER_H */
diff --git a/include/linux/instrumentation.h b/include/linux/instrumentation.h
new file mode 100644
index 000000000000..bc7babe91b2e
--- /dev/null
+++ b/include/linux/instrumentation.h
@@ -0,0 +1,61 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __LINUX_INSTRUMENTATION_H
+#define __LINUX_INSTRUMENTATION_H
+
+#ifdef CONFIG_NOINSTR_VALIDATION
+
+#include <linux/stringify.h>
+
+/* Begin/end of an instrumentation safe region */
+#define __instrumentation_begin(c) ({ \
+ asm volatile(__stringify(c) ": nop\n\t" \
+ ".pushsection .discard.instr_begin\n\t" \
+ ".long " __stringify(c) "b - .\n\t" \
+ ".popsection\n\t" : : "i" (c)); \
+})
+#define instrumentation_begin() __instrumentation_begin(__COUNTER__)
+
+/*
+ * Because instrumentation_{begin,end}() can nest, objtool validation considers
+ * _begin() a +1 and _end() a -1 and computes a sum over the instructions.
+ * When the value is greater than 0, we consider instrumentation allowed.
+ *
+ * There is a problem with code like:
+ *
+ * noinstr void foo()
+ * {
+ * instrumentation_begin();
+ * ...
+ * if (cond) {
+ * instrumentation_begin();
+ * ...
+ * instrumentation_end();
+ * }
+ * bar();
+ * instrumentation_end();
+ * }
+ *
+ * If instrumentation_end() would be an empty label, like all the other
+ * annotations, the inner _end(), which is at the end of a conditional block,
+ * would land on the instruction after the block.
+ *
+ * If we then consider the sum of the !cond path, we'll see that the call to
+ * bar() is with a 0-value, even though, we meant it to happen with a positive
+ * value.
+ *
+ * To avoid this, have _end() be a NOP instruction, this ensures it will be
+ * part of the condition block and does not escape.
+ */
+#define __instrumentation_end(c) ({ \
+ asm volatile(__stringify(c) ": nop\n\t" \
+ ".pushsection .discard.instr_end\n\t" \
+ ".long " __stringify(c) "b - .\n\t" \
+ ".popsection\n\t" : : "i" (c)); \
+})
+#define instrumentation_end() __instrumentation_end(__COUNTER__)
+#else /* !CONFIG_NOINSTR_VALIDATION */
+# define instrumentation_begin() do { } while(0)
+# define instrumentation_end() do { } while(0)
+#endif /* CONFIG_NOINSTR_VALIDATION */
+
+#endif /* __LINUX_INSTRUMENTATION_H */
diff --git a/include/linux/instrumented.h b/include/linux/instrumented.h
new file mode 100644
index 000000000000..1b608e00290a
--- /dev/null
+++ b/include/linux/instrumented.h
@@ -0,0 +1,181 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/*
+ * This header provides generic wrappers for memory access instrumentation that
+ * the compiler cannot emit for: KASAN, KCSAN, KMSAN.
+ */
+#ifndef _LINUX_INSTRUMENTED_H
+#define _LINUX_INSTRUMENTED_H
+
+#include <linux/compiler.h>
+#include <linux/kasan-checks.h>
+#include <linux/kcsan-checks.h>
+#include <linux/kmsan-checks.h>
+#include <linux/types.h>
+
+/**
+ * instrument_read - instrument regular read access
+ * @v: address of access
+ * @size: size of access
+ *
+ * Instrument a regular read access. The instrumentation should be inserted
+ * before the actual read happens.
+ */
+static __always_inline void instrument_read(const volatile void *v, size_t size)
+{
+ kasan_check_read(v, size);
+ kcsan_check_read(v, size);
+}
+
+/**
+ * instrument_write - instrument regular write access
+ * @v: address of access
+ * @size: size of access
+ *
+ * Instrument a regular write access. The instrumentation should be inserted
+ * before the actual write happens.
+ */
+static __always_inline void instrument_write(const volatile void *v, size_t size)
+{
+ kasan_check_write(v, size);
+ kcsan_check_write(v, size);
+}
+
+/**
+ * instrument_read_write - instrument regular read-write access
+ * @v: address of access
+ * @size: size of access
+ *
+ * Instrument a regular write access. The instrumentation should be inserted
+ * before the actual write happens.
+ */
+static __always_inline void instrument_read_write(const volatile void *v, size_t size)
+{
+ kasan_check_write(v, size);
+ kcsan_check_read_write(v, size);
+}
+
+/**
+ * instrument_atomic_read - instrument atomic read access
+ * @v: address of access
+ * @size: size of access
+ *
+ * Instrument an atomic read access. The instrumentation should be inserted
+ * before the actual read happens.
+ */
+static __always_inline void instrument_atomic_read(const volatile void *v, size_t size)
+{
+ kasan_check_read(v, size);
+ kcsan_check_atomic_read(v, size);
+}
+
+/**
+ * instrument_atomic_write - instrument atomic write access
+ * @v: address of access
+ * @size: size of access
+ *
+ * Instrument an atomic write access. The instrumentation should be inserted
+ * before the actual write happens.
+ */
+static __always_inline void instrument_atomic_write(const volatile void *v, size_t size)
+{
+ kasan_check_write(v, size);
+ kcsan_check_atomic_write(v, size);
+}
+
+/**
+ * instrument_atomic_read_write - instrument atomic read-write access
+ * @v: address of access
+ * @size: size of access
+ *
+ * Instrument an atomic read-write access. The instrumentation should be
+ * inserted before the actual write happens.
+ */
+static __always_inline void instrument_atomic_read_write(const volatile void *v, size_t size)
+{
+ kasan_check_write(v, size);
+ kcsan_check_atomic_read_write(v, size);
+}
+
+/**
+ * instrument_copy_to_user - instrument reads of copy_to_user
+ * @to: destination address
+ * @from: source address
+ * @n: number of bytes to copy
+ *
+ * Instrument reads from kernel memory, that are due to copy_to_user (and
+ * variants). The instrumentation must be inserted before the accesses.
+ */
+static __always_inline void
+instrument_copy_to_user(void __user *to, const void *from, unsigned long n)
+{
+ kasan_check_read(from, n);
+ kcsan_check_read(from, n);
+ kmsan_copy_to_user(to, from, n, 0);
+}
+
+/**
+ * instrument_copy_from_user_before - add instrumentation before copy_from_user
+ * @to: destination address
+ * @from: source address
+ * @n: number of bytes to copy
+ *
+ * Instrument writes to kernel memory, that are due to copy_from_user (and
+ * variants). The instrumentation should be inserted before the accesses.
+ */
+static __always_inline void
+instrument_copy_from_user_before(const void *to, const void __user *from, unsigned long n)
+{
+ kasan_check_write(to, n);
+ kcsan_check_write(to, n);
+}
+
+/**
+ * instrument_copy_from_user_after - add instrumentation after copy_from_user
+ * @to: destination address
+ * @from: source address
+ * @n: number of bytes to copy
+ * @left: number of bytes not copied (as returned by copy_from_user)
+ *
+ * Instrument writes to kernel memory, that are due to copy_from_user (and
+ * variants). The instrumentation should be inserted after the accesses.
+ */
+static __always_inline void
+instrument_copy_from_user_after(const void *to, const void __user *from,
+ unsigned long n, unsigned long left)
+{
+ kmsan_unpoison_memory(to, n - left);
+}
+
+/**
+ * instrument_get_user() - add instrumentation to get_user()-like macros
+ * @to: destination variable, may not be address-taken
+ *
+ * get_user() and friends are fragile, so it may depend on the implementation
+ * whether the instrumentation happens before or after the data is copied from
+ * the userspace.
+ */
+#define instrument_get_user(to) \
+({ \
+ u64 __tmp = (u64)(to); \
+ kmsan_unpoison_memory(&__tmp, sizeof(__tmp)); \
+ to = __tmp; \
+})
+
+
+/**
+ * instrument_put_user() - add instrumentation to put_user()-like macros
+ * @from: source address
+ * @ptr: userspace pointer to copy to
+ * @size: number of bytes to copy
+ *
+ * put_user() and friends are fragile, so it may depend on the implementation
+ * whether the instrumentation happens before or after the data is copied from
+ * the userspace.
+ */
+#define instrument_put_user(from, ptr, size) \
+({ \
+ kmsan_copy_to_user(ptr, &from, sizeof(from), 0); \
+})
+
+#endif /* _LINUX_INSTRUMENTED_H */
diff --git a/include/linux/integrity.h b/include/linux/integrity.h
index c2d6082a1a4c..2ea0f2f65ab6 100644
--- a/include/linux/integrity.h
+++ b/include/linux/integrity.h
@@ -1,10 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2009 IBM Corporation
* Author: Mimi Zohar <zohar@us.ibm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation, version 2 of the License.
*/
#ifndef _LINUX_INTEGRITY_H
@@ -14,7 +11,9 @@
enum integrity_status {
INTEGRITY_PASS = 0,
+ INTEGRITY_PASS_IMMUTABLE,
INTEGRITY_FAIL,
+ INTEGRITY_FAIL_IMMUTABLE,
INTEGRITY_NOLABEL,
INTEGRITY_NOXATTRS,
INTEGRITY_UNKNOWN,
@@ -43,4 +42,17 @@ static inline void integrity_load_keys(void)
}
#endif /* CONFIG_INTEGRITY */
+#ifdef CONFIG_INTEGRITY_ASYMMETRIC_KEYS
+
+extern int integrity_kernel_module_request(char *kmod_name);
+
+#else
+
+static inline int integrity_kernel_module_request(char *kmod_name)
+{
+ return 0;
+}
+
+#endif /* CONFIG_INTEGRITY_ASYMMETRIC_KEYS */
+
#endif /* _LINUX_INTEGRITY_H */
diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h
deleted file mode 100644
index 485a5b48f038..000000000000
--- a/include/linux/intel-iommu.h
+++ /dev/null
@@ -1,494 +0,0 @@
-/*
- * Copyright © 2006-2015, Intel Corporation.
- *
- * Authors: Ashok Raj <ashok.raj@intel.com>
- * Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
- * David Woodhouse <David.Woodhouse@intel.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
- * Place - Suite 330, Boston, MA 02111-1307 USA.
- */
-
-#ifndef _INTEL_IOMMU_H_
-#define _INTEL_IOMMU_H_
-
-#include <linux/types.h>
-#include <linux/iova.h>
-#include <linux/io.h>
-#include <linux/idr.h>
-#include <linux/dma_remapping.h>
-#include <linux/mmu_notifier.h>
-#include <linux/list.h>
-#include <linux/iommu.h>
-#include <linux/io-64-nonatomic-lo-hi.h>
-
-#include <asm/cacheflush.h>
-#include <asm/iommu.h>
-
-/*
- * Intel IOMMU register specification per version 1.0 public spec.
- */
-
-#define DMAR_VER_REG 0x0 /* Arch version supported by this IOMMU */
-#define DMAR_CAP_REG 0x8 /* Hardware supported capabilities */
-#define DMAR_ECAP_REG 0x10 /* Extended capabilities supported */
-#define DMAR_GCMD_REG 0x18 /* Global command register */
-#define DMAR_GSTS_REG 0x1c /* Global status register */
-#define DMAR_RTADDR_REG 0x20 /* Root entry table */
-#define DMAR_CCMD_REG 0x28 /* Context command reg */
-#define DMAR_FSTS_REG 0x34 /* Fault Status register */
-#define DMAR_FECTL_REG 0x38 /* Fault control register */
-#define DMAR_FEDATA_REG 0x3c /* Fault event interrupt data register */
-#define DMAR_FEADDR_REG 0x40 /* Fault event interrupt addr register */
-#define DMAR_FEUADDR_REG 0x44 /* Upper address register */
-#define DMAR_AFLOG_REG 0x58 /* Advanced Fault control */
-#define DMAR_PMEN_REG 0x64 /* Enable Protected Memory Region */
-#define DMAR_PLMBASE_REG 0x68 /* PMRR Low addr */
-#define DMAR_PLMLIMIT_REG 0x6c /* PMRR low limit */
-#define DMAR_PHMBASE_REG 0x70 /* pmrr high base addr */
-#define DMAR_PHMLIMIT_REG 0x78 /* pmrr high limit */
-#define DMAR_IQH_REG 0x80 /* Invalidation queue head register */
-#define DMAR_IQT_REG 0x88 /* Invalidation queue tail register */
-#define DMAR_IQ_SHIFT 4 /* Invalidation queue head/tail shift */
-#define DMAR_IQA_REG 0x90 /* Invalidation queue addr register */
-#define DMAR_ICS_REG 0x9c /* Invalidation complete status register */
-#define DMAR_IRTA_REG 0xb8 /* Interrupt remapping table addr register */
-#define DMAR_PQH_REG 0xc0 /* Page request queue head register */
-#define DMAR_PQT_REG 0xc8 /* Page request queue tail register */
-#define DMAR_PQA_REG 0xd0 /* Page request queue address register */
-#define DMAR_PRS_REG 0xdc /* Page request status register */
-#define DMAR_PECTL_REG 0xe0 /* Page request event control register */
-#define DMAR_PEDATA_REG 0xe4 /* Page request event interrupt data register */
-#define DMAR_PEADDR_REG 0xe8 /* Page request event interrupt addr register */
-#define DMAR_PEUADDR_REG 0xec /* Page request event Upper address register */
-
-#define OFFSET_STRIDE (9)
-
-#define dmar_readq(a) readq(a)
-#define dmar_writeq(a,v) writeq(v,a)
-
-#define DMAR_VER_MAJOR(v) (((v) & 0xf0) >> 4)
-#define DMAR_VER_MINOR(v) ((v) & 0x0f)
-
-/*
- * Decoding Capability Register
- */
-#define cap_pi_support(c) (((c) >> 59) & 1)
-#define cap_read_drain(c) (((c) >> 55) & 1)
-#define cap_write_drain(c) (((c) >> 54) & 1)
-#define cap_max_amask_val(c) (((c) >> 48) & 0x3f)
-#define cap_num_fault_regs(c) ((((c) >> 40) & 0xff) + 1)
-#define cap_pgsel_inv(c) (((c) >> 39) & 1)
-
-#define cap_super_page_val(c) (((c) >> 34) & 0xf)
-#define cap_super_offset(c) (((find_first_bit(&cap_super_page_val(c), 4)) \
- * OFFSET_STRIDE) + 21)
-
-#define cap_fault_reg_offset(c) ((((c) >> 24) & 0x3ff) * 16)
-#define cap_max_fault_reg_offset(c) \
- (cap_fault_reg_offset(c) + cap_num_fault_regs(c) * 16)
-
-#define cap_zlr(c) (((c) >> 22) & 1)
-#define cap_isoch(c) (((c) >> 23) & 1)
-#define cap_mgaw(c) ((((c) >> 16) & 0x3f) + 1)
-#define cap_sagaw(c) (((c) >> 8) & 0x1f)
-#define cap_caching_mode(c) (((c) >> 7) & 1)
-#define cap_phmr(c) (((c) >> 6) & 1)
-#define cap_plmr(c) (((c) >> 5) & 1)
-#define cap_rwbf(c) (((c) >> 4) & 1)
-#define cap_afl(c) (((c) >> 3) & 1)
-#define cap_ndoms(c) (((unsigned long)1) << (4 + 2 * ((c) & 0x7)))
-/*
- * Extended Capability Register
- */
-
-#define ecap_pasid(e) ((e >> 40) & 0x1)
-#define ecap_pss(e) ((e >> 35) & 0x1f)
-#define ecap_eafs(e) ((e >> 34) & 0x1)
-#define ecap_nwfs(e) ((e >> 33) & 0x1)
-#define ecap_srs(e) ((e >> 31) & 0x1)
-#define ecap_ers(e) ((e >> 30) & 0x1)
-#define ecap_prs(e) ((e >> 29) & 0x1)
-#define ecap_broken_pasid(e) ((e >> 28) & 0x1)
-#define ecap_dis(e) ((e >> 27) & 0x1)
-#define ecap_nest(e) ((e >> 26) & 0x1)
-#define ecap_mts(e) ((e >> 25) & 0x1)
-#define ecap_ecs(e) ((e >> 24) & 0x1)
-#define ecap_iotlb_offset(e) ((((e) >> 8) & 0x3ff) * 16)
-#define ecap_max_iotlb_offset(e) (ecap_iotlb_offset(e) + 16)
-#define ecap_coherent(e) ((e) & 0x1)
-#define ecap_qis(e) ((e) & 0x2)
-#define ecap_pass_through(e) ((e >> 6) & 0x1)
-#define ecap_eim_support(e) ((e >> 4) & 0x1)
-#define ecap_ir_support(e) ((e >> 3) & 0x1)
-#define ecap_dev_iotlb_support(e) (((e) >> 2) & 0x1)
-#define ecap_max_handle_mask(e) ((e >> 20) & 0xf)
-#define ecap_sc_support(e) ((e >> 7) & 0x1) /* Snooping Control */
-
-/* IOTLB_REG */
-#define DMA_TLB_FLUSH_GRANU_OFFSET 60
-#define DMA_TLB_GLOBAL_FLUSH (((u64)1) << 60)
-#define DMA_TLB_DSI_FLUSH (((u64)2) << 60)
-#define DMA_TLB_PSI_FLUSH (((u64)3) << 60)
-#define DMA_TLB_IIRG(type) ((type >> 60) & 3)
-#define DMA_TLB_IAIG(val) (((val) >> 57) & 3)
-#define DMA_TLB_READ_DRAIN (((u64)1) << 49)
-#define DMA_TLB_WRITE_DRAIN (((u64)1) << 48)
-#define DMA_TLB_DID(id) (((u64)((id) & 0xffff)) << 32)
-#define DMA_TLB_IVT (((u64)1) << 63)
-#define DMA_TLB_IH_NONLEAF (((u64)1) << 6)
-#define DMA_TLB_MAX_SIZE (0x3f)
-
-/* INVALID_DESC */
-#define DMA_CCMD_INVL_GRANU_OFFSET 61
-#define DMA_ID_TLB_GLOBAL_FLUSH (((u64)1) << 4)
-#define DMA_ID_TLB_DSI_FLUSH (((u64)2) << 4)
-#define DMA_ID_TLB_PSI_FLUSH (((u64)3) << 4)
-#define DMA_ID_TLB_READ_DRAIN (((u64)1) << 7)
-#define DMA_ID_TLB_WRITE_DRAIN (((u64)1) << 6)
-#define DMA_ID_TLB_DID(id) (((u64)((id & 0xffff) << 16)))
-#define DMA_ID_TLB_IH_NONLEAF (((u64)1) << 6)
-#define DMA_ID_TLB_ADDR(addr) (addr)
-#define DMA_ID_TLB_ADDR_MASK(mask) (mask)
-
-/* PMEN_REG */
-#define DMA_PMEN_EPM (((u32)1)<<31)
-#define DMA_PMEN_PRS (((u32)1)<<0)
-
-/* GCMD_REG */
-#define DMA_GCMD_TE (((u32)1) << 31)
-#define DMA_GCMD_SRTP (((u32)1) << 30)
-#define DMA_GCMD_SFL (((u32)1) << 29)
-#define DMA_GCMD_EAFL (((u32)1) << 28)
-#define DMA_GCMD_WBF (((u32)1) << 27)
-#define DMA_GCMD_QIE (((u32)1) << 26)
-#define DMA_GCMD_SIRTP (((u32)1) << 24)
-#define DMA_GCMD_IRE (((u32) 1) << 25)
-#define DMA_GCMD_CFI (((u32) 1) << 23)
-
-/* GSTS_REG */
-#define DMA_GSTS_TES (((u32)1) << 31)
-#define DMA_GSTS_RTPS (((u32)1) << 30)
-#define DMA_GSTS_FLS (((u32)1) << 29)
-#define DMA_GSTS_AFLS (((u32)1) << 28)
-#define DMA_GSTS_WBFS (((u32)1) << 27)
-#define DMA_GSTS_QIES (((u32)1) << 26)
-#define DMA_GSTS_IRTPS (((u32)1) << 24)
-#define DMA_GSTS_IRES (((u32)1) << 25)
-#define DMA_GSTS_CFIS (((u32)1) << 23)
-
-/* DMA_RTADDR_REG */
-#define DMA_RTADDR_RTT (((u64)1) << 11)
-
-/* CCMD_REG */
-#define DMA_CCMD_ICC (((u64)1) << 63)
-#define DMA_CCMD_GLOBAL_INVL (((u64)1) << 61)
-#define DMA_CCMD_DOMAIN_INVL (((u64)2) << 61)
-#define DMA_CCMD_DEVICE_INVL (((u64)3) << 61)
-#define DMA_CCMD_FM(m) (((u64)((m) & 0x3)) << 32)
-#define DMA_CCMD_MASK_NOBIT 0
-#define DMA_CCMD_MASK_1BIT 1
-#define DMA_CCMD_MASK_2BIT 2
-#define DMA_CCMD_MASK_3BIT 3
-#define DMA_CCMD_SID(s) (((u64)((s) & 0xffff)) << 16)
-#define DMA_CCMD_DID(d) ((u64)((d) & 0xffff))
-
-/* FECTL_REG */
-#define DMA_FECTL_IM (((u32)1) << 31)
-
-/* FSTS_REG */
-#define DMA_FSTS_PPF ((u32)2)
-#define DMA_FSTS_PFO ((u32)1)
-#define DMA_FSTS_IQE (1 << 4)
-#define DMA_FSTS_ICE (1 << 5)
-#define DMA_FSTS_ITE (1 << 6)
-#define dma_fsts_fault_record_index(s) (((s) >> 8) & 0xff)
-
-/* FRCD_REG, 32 bits access */
-#define DMA_FRCD_F (((u32)1) << 31)
-#define dma_frcd_type(d) ((d >> 30) & 1)
-#define dma_frcd_fault_reason(c) (c & 0xff)
-#define dma_frcd_source_id(c) (c & 0xffff)
-/* low 64 bit */
-#define dma_frcd_page_addr(d) (d & (((u64)-1) << PAGE_SHIFT))
-
-/* PRS_REG */
-#define DMA_PRS_PPR ((u32)1)
-
-#define IOMMU_WAIT_OP(iommu, offset, op, cond, sts) \
-do { \
- cycles_t start_time = get_cycles(); \
- while (1) { \
- sts = op(iommu->reg + offset); \
- if (cond) \
- break; \
- if (DMAR_OPERATION_TIMEOUT < (get_cycles() - start_time))\
- panic("DMAR hardware is malfunctioning\n"); \
- cpu_relax(); \
- } \
-} while (0)
-
-#define QI_LENGTH 256 /* queue length */
-
-enum {
- QI_FREE,
- QI_IN_USE,
- QI_DONE,
- QI_ABORT
-};
-
-#define QI_CC_TYPE 0x1
-#define QI_IOTLB_TYPE 0x2
-#define QI_DIOTLB_TYPE 0x3
-#define QI_IEC_TYPE 0x4
-#define QI_IWD_TYPE 0x5
-#define QI_EIOTLB_TYPE 0x6
-#define QI_PC_TYPE 0x7
-#define QI_DEIOTLB_TYPE 0x8
-#define QI_PGRP_RESP_TYPE 0x9
-#define QI_PSTRM_RESP_TYPE 0xa
-
-#define QI_IEC_SELECTIVE (((u64)1) << 4)
-#define QI_IEC_IIDEX(idx) (((u64)(idx & 0xffff) << 32))
-#define QI_IEC_IM(m) (((u64)(m & 0x1f) << 27))
-
-#define QI_IWD_STATUS_DATA(d) (((u64)d) << 32)
-#define QI_IWD_STATUS_WRITE (((u64)1) << 5)
-
-#define QI_IOTLB_DID(did) (((u64)did) << 16)
-#define QI_IOTLB_DR(dr) (((u64)dr) << 7)
-#define QI_IOTLB_DW(dw) (((u64)dw) << 6)
-#define QI_IOTLB_GRAN(gran) (((u64)gran) >> (DMA_TLB_FLUSH_GRANU_OFFSET-4))
-#define QI_IOTLB_ADDR(addr) (((u64)addr) & VTD_PAGE_MASK)
-#define QI_IOTLB_IH(ih) (((u64)ih) << 6)
-#define QI_IOTLB_AM(am) (((u8)am))
-
-#define QI_CC_FM(fm) (((u64)fm) << 48)
-#define QI_CC_SID(sid) (((u64)sid) << 32)
-#define QI_CC_DID(did) (((u64)did) << 16)
-#define QI_CC_GRAN(gran) (((u64)gran) >> (DMA_CCMD_INVL_GRANU_OFFSET-4))
-
-#define QI_DEV_IOTLB_SID(sid) ((u64)((sid) & 0xffff) << 32)
-#define QI_DEV_IOTLB_QDEP(qdep) (((qdep) & 0x1f) << 16)
-#define QI_DEV_IOTLB_ADDR(addr) ((u64)(addr) & VTD_PAGE_MASK)
-#define QI_DEV_IOTLB_SIZE 1
-#define QI_DEV_IOTLB_MAX_INVS 32
-
-#define QI_PC_PASID(pasid) (((u64)pasid) << 32)
-#define QI_PC_DID(did) (((u64)did) << 16)
-#define QI_PC_GRAN(gran) (((u64)gran) << 4)
-
-#define QI_PC_ALL_PASIDS (QI_PC_TYPE | QI_PC_GRAN(0))
-#define QI_PC_PASID_SEL (QI_PC_TYPE | QI_PC_GRAN(1))
-
-#define QI_EIOTLB_ADDR(addr) ((u64)(addr) & VTD_PAGE_MASK)
-#define QI_EIOTLB_GL(gl) (((u64)gl) << 7)
-#define QI_EIOTLB_IH(ih) (((u64)ih) << 6)
-#define QI_EIOTLB_AM(am) (((u64)am))
-#define QI_EIOTLB_PASID(pasid) (((u64)pasid) << 32)
-#define QI_EIOTLB_DID(did) (((u64)did) << 16)
-#define QI_EIOTLB_GRAN(gran) (((u64)gran) << 4)
-
-#define QI_DEV_EIOTLB_ADDR(a) ((u64)(a) & VTD_PAGE_MASK)
-#define QI_DEV_EIOTLB_SIZE (((u64)1) << 11)
-#define QI_DEV_EIOTLB_GLOB(g) ((u64)g)
-#define QI_DEV_EIOTLB_PASID(p) (((u64)p) << 32)
-#define QI_DEV_EIOTLB_SID(sid) ((u64)((sid) & 0xffff) << 16)
-#define QI_DEV_EIOTLB_QDEP(qd) ((u64)((qd) & 0x1f) << 4)
-#define QI_DEV_EIOTLB_MAX_INVS 32
-
-#define QI_PGRP_IDX(idx) (((u64)(idx)) << 55)
-#define QI_PGRP_PRIV(priv) (((u64)(priv)) << 32)
-#define QI_PGRP_RESP_CODE(res) ((u64)(res))
-#define QI_PGRP_PASID(pasid) (((u64)(pasid)) << 32)
-#define QI_PGRP_DID(did) (((u64)(did)) << 16)
-#define QI_PGRP_PASID_P(p) (((u64)(p)) << 4)
-
-#define QI_PSTRM_ADDR(addr) (((u64)(addr)) & VTD_PAGE_MASK)
-#define QI_PSTRM_DEVFN(devfn) (((u64)(devfn)) << 4)
-#define QI_PSTRM_RESP_CODE(res) ((u64)(res))
-#define QI_PSTRM_IDX(idx) (((u64)(idx)) << 55)
-#define QI_PSTRM_PRIV(priv) (((u64)(priv)) << 32)
-#define QI_PSTRM_BUS(bus) (((u64)(bus)) << 24)
-#define QI_PSTRM_PASID(pasid) (((u64)(pasid)) << 4)
-
-#define QI_RESP_SUCCESS 0x0
-#define QI_RESP_INVALID 0x1
-#define QI_RESP_FAILURE 0xf
-
-#define QI_GRAN_ALL_ALL 0
-#define QI_GRAN_NONG_ALL 1
-#define QI_GRAN_NONG_PASID 2
-#define QI_GRAN_PSI_PASID 3
-
-struct qi_desc {
- u64 low, high;
-};
-
-struct q_inval {
- raw_spinlock_t q_lock;
- struct qi_desc *desc; /* invalidation queue */
- int *desc_status; /* desc status */
- int free_head; /* first free entry */
- int free_tail; /* last free entry */
- int free_cnt;
-};
-
-#ifdef CONFIG_IRQ_REMAP
-/* 1MB - maximum possible interrupt remapping table size */
-#define INTR_REMAP_PAGE_ORDER 8
-#define INTR_REMAP_TABLE_REG_SIZE 0xf
-#define INTR_REMAP_TABLE_REG_SIZE_MASK 0xf
-
-#define INTR_REMAP_TABLE_ENTRIES 65536
-
-struct irq_domain;
-
-struct ir_table {
- struct irte *base;
- unsigned long *bitmap;
-};
-#endif
-
-struct iommu_flush {
- void (*flush_context)(struct intel_iommu *iommu, u16 did, u16 sid,
- u8 fm, u64 type);
- void (*flush_iotlb)(struct intel_iommu *iommu, u16 did, u64 addr,
- unsigned int size_order, u64 type);
-};
-
-enum {
- SR_DMAR_FECTL_REG,
- SR_DMAR_FEDATA_REG,
- SR_DMAR_FEADDR_REG,
- SR_DMAR_FEUADDR_REG,
- MAX_SR_DMAR_REGS
-};
-
-#define VTD_FLAG_TRANS_PRE_ENABLED (1 << 0)
-#define VTD_FLAG_IRQ_REMAP_PRE_ENABLED (1 << 1)
-
-struct pasid_entry;
-struct pasid_state_entry;
-struct page_req_dsc;
-
-struct intel_iommu {
- void __iomem *reg; /* Pointer to hardware regs, virtual addr */
- u64 reg_phys; /* physical address of hw register set */
- u64 reg_size; /* size of hw register set */
- u64 cap;
- u64 ecap;
- u32 gcmd; /* Holds TE, EAFL. Don't need SRTP, SFL, WBF */
- raw_spinlock_t register_lock; /* protect register handling */
- int seq_id; /* sequence id of the iommu */
- int agaw; /* agaw of this iommu */
- int msagaw; /* max sagaw of this iommu */
- unsigned int irq, pr_irq;
- u16 segment; /* PCI segment# */
- unsigned char name[13]; /* Device Name */
-
-#ifdef CONFIG_INTEL_IOMMU
- unsigned long *domain_ids; /* bitmap of domains */
- struct dmar_domain ***domains; /* ptr to domains */
- spinlock_t lock; /* protect context, domain ids */
- struct root_entry *root_entry; /* virtual address */
-
- struct iommu_flush flush;
-#endif
-#ifdef CONFIG_INTEL_IOMMU_SVM
- /* These are large and need to be contiguous, so we allocate just
- * one for now. We'll maybe want to rethink that if we truly give
- * devices away to userspace processes (e.g. for DPDK) and don't
- * want to trust that userspace will use *only* the PASID it was
- * told to. But while it's all driver-arbitrated, we're fine. */
- struct pasid_entry *pasid_table;
- struct pasid_state_entry *pasid_state_table;
- struct page_req_dsc *prq;
- unsigned char prq_name[16]; /* Name for PRQ interrupt */
- struct idr pasid_idr;
- u32 pasid_max;
-#endif
- struct q_inval *qi; /* Queued invalidation info */
- u32 *iommu_state; /* Store iommu states between suspend and resume.*/
-
-#ifdef CONFIG_IRQ_REMAP
- struct ir_table *ir_table; /* Interrupt remapping info */
- struct irq_domain *ir_domain;
- struct irq_domain *ir_msi_domain;
-#endif
- struct iommu_device iommu; /* IOMMU core code handle */
- int node;
- u32 flags; /* Software defined flags */
-};
-
-static inline void __iommu_flush_cache(
- struct intel_iommu *iommu, void *addr, int size)
-{
- if (!ecap_coherent(iommu->ecap))
- clflush_cache_range(addr, size);
-}
-
-extern struct dmar_drhd_unit * dmar_find_matched_drhd_unit(struct pci_dev *dev);
-extern int dmar_find_matched_atsr_unit(struct pci_dev *dev);
-
-extern int dmar_enable_qi(struct intel_iommu *iommu);
-extern void dmar_disable_qi(struct intel_iommu *iommu);
-extern int dmar_reenable_qi(struct intel_iommu *iommu);
-extern void qi_global_iec(struct intel_iommu *iommu);
-
-extern void qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid,
- u8 fm, u64 type);
-extern void qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
- unsigned int size_order, u64 type);
-extern void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 qdep,
- u64 addr, unsigned mask);
-
-extern int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu);
-
-extern int dmar_ir_support(void);
-
-#ifdef CONFIG_INTEL_IOMMU_SVM
-extern int intel_svm_alloc_pasid_tables(struct intel_iommu *iommu);
-extern int intel_svm_free_pasid_tables(struct intel_iommu *iommu);
-extern int intel_svm_enable_prq(struct intel_iommu *iommu);
-extern int intel_svm_finish_prq(struct intel_iommu *iommu);
-
-struct svm_dev_ops;
-
-struct intel_svm_dev {
- struct list_head list;
- struct rcu_head rcu;
- struct device *dev;
- struct svm_dev_ops *ops;
- int users;
- u16 did;
- u16 dev_iotlb:1;
- u16 sid, qdep;
-};
-
-struct intel_svm {
- struct mmu_notifier notifier;
- struct mm_struct *mm;
- struct intel_iommu *iommu;
- int flags;
- int pasid;
- struct list_head devs;
-};
-
-extern int intel_iommu_enable_pasid(struct intel_iommu *iommu, struct intel_svm_dev *sdev);
-extern struct intel_iommu *intel_svm_device_to_iommu(struct device *dev);
-#endif
-
-extern const struct attribute_group *intel_iommu_groups[];
-
-#endif
diff --git a/include/linux/intel-ish-client-if.h b/include/linux/intel-ish-client-if.h
new file mode 100644
index 000000000000..f45f13304add
--- /dev/null
+++ b/include/linux/intel-ish-client-if.h
@@ -0,0 +1,121 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Intel ISH client Interface definitions
+ *
+ * Copyright (c) 2019, Intel Corporation.
+ */
+
+#ifndef _INTEL_ISH_CLIENT_IF_H_
+#define _INTEL_ISH_CLIENT_IF_H_
+
+#include <linux/device.h>
+#include <linux/mod_devicetable.h>
+
+struct ishtp_cl_device;
+struct ishtp_device;
+struct ishtp_cl;
+struct ishtp_fw_client;
+
+typedef __printf(2, 3) void (*ishtp_print_log)(struct ishtp_device *dev,
+ const char *format, ...);
+
+/* Client state */
+enum cl_state {
+ ISHTP_CL_INITIALIZING = 0,
+ ISHTP_CL_CONNECTING,
+ ISHTP_CL_CONNECTED,
+ ISHTP_CL_DISCONNECTING,
+ ISHTP_CL_DISCONNECTED
+};
+
+/**
+ * struct ishtp_cl_device - ISHTP device handle
+ * @driver: driver instance on a bus
+ * @name: Name of the device for probe
+ * @probe: driver callback for device probe
+ * @remove: driver callback on device removal
+ *
+ * Client drivers defines to get probed/removed for ISHTP client device.
+ */
+struct ishtp_cl_driver {
+ struct device_driver driver;
+ const char *name;
+ const struct ishtp_device_id *id;
+ int (*probe)(struct ishtp_cl_device *dev);
+ void (*remove)(struct ishtp_cl_device *dev);
+ int (*reset)(struct ishtp_cl_device *dev);
+ const struct dev_pm_ops *pm;
+};
+
+/**
+ * struct ishtp_msg_data - ISHTP message data struct
+ * @size: Size of data in the *data
+ * @data: Pointer to data
+ */
+struct ishtp_msg_data {
+ uint32_t size;
+ unsigned char *data;
+};
+
+/*
+ * struct ishtp_cl_rb - request block structure
+ * @list: Link to list members
+ * @cl: ISHTP client instance
+ * @buffer: message header
+ * @buf_idx: Index into buffer
+ * @read_time: unused at this time
+ */
+struct ishtp_cl_rb {
+ struct list_head list;
+ struct ishtp_cl *cl;
+ struct ishtp_msg_data buffer;
+ unsigned long buf_idx;
+ unsigned long read_time;
+};
+
+int ishtp_cl_driver_register(struct ishtp_cl_driver *driver,
+ struct module *owner);
+void ishtp_cl_driver_unregister(struct ishtp_cl_driver *driver);
+int ishtp_register_event_cb(struct ishtp_cl_device *device,
+ void (*read_cb)(struct ishtp_cl_device *));
+
+/* Get the device * from ishtp device instance */
+struct device *ishtp_device(struct ishtp_cl_device *cl_device);
+/* wait for IPC resume */
+bool ishtp_wait_resume(struct ishtp_device *dev);
+/* Trace interface for clients */
+ishtp_print_log ishtp_trace_callback(struct ishtp_cl_device *cl_device);
+/* Get device pointer of PCI device for DMA acces */
+struct device *ishtp_get_pci_device(struct ishtp_cl_device *cl_device);
+
+struct ishtp_cl *ishtp_cl_allocate(struct ishtp_cl_device *cl_device);
+void ishtp_cl_free(struct ishtp_cl *cl);
+int ishtp_cl_link(struct ishtp_cl *cl);
+void ishtp_cl_unlink(struct ishtp_cl *cl);
+int ishtp_cl_disconnect(struct ishtp_cl *cl);
+int ishtp_cl_connect(struct ishtp_cl *cl);
+int ishtp_cl_send(struct ishtp_cl *cl, uint8_t *buf, size_t length);
+int ishtp_cl_flush_queues(struct ishtp_cl *cl);
+int ishtp_cl_io_rb_recycle(struct ishtp_cl_rb *rb);
+bool ishtp_cl_tx_empty(struct ishtp_cl *cl);
+struct ishtp_cl_rb *ishtp_cl_rx_get_rb(struct ishtp_cl *cl);
+void *ishtp_get_client_data(struct ishtp_cl *cl);
+void ishtp_set_client_data(struct ishtp_cl *cl, void *data);
+struct ishtp_device *ishtp_get_ishtp_device(struct ishtp_cl *cl);
+void ishtp_set_tx_ring_size(struct ishtp_cl *cl, int size);
+void ishtp_set_rx_ring_size(struct ishtp_cl *cl, int size);
+void ishtp_set_connection_state(struct ishtp_cl *cl, int state);
+void ishtp_cl_set_fw_client_id(struct ishtp_cl *cl, int fw_client_id);
+
+void ishtp_put_device(struct ishtp_cl_device *cl_dev);
+void ishtp_get_device(struct ishtp_cl_device *cl_dev);
+void ishtp_set_drvdata(struct ishtp_cl_device *cl_device, void *data);
+void *ishtp_get_drvdata(struct ishtp_cl_device *cl_device);
+struct ishtp_cl_device *ishtp_dev_to_cl_device(struct device *dev);
+int ishtp_register_event_cb(struct ishtp_cl_device *device,
+ void (*read_cb)(struct ishtp_cl_device *));
+struct ishtp_fw_client *ishtp_fw_cl_get_client(struct ishtp_device *dev,
+ const guid_t *uuid);
+int ishtp_get_fw_client_id(struct ishtp_fw_client *fw_client);
+int ish_hw_reset(struct ishtp_device *dev);
+#endif /* _INTEL_ISH_CLIENT_IF_H_ */
diff --git a/include/linux/intel-svm.h b/include/linux/intel-svm.h
deleted file mode 100644
index 99bc5b3ae26e..000000000000
--- a/include/linux/intel-svm.h
+++ /dev/null
@@ -1,141 +0,0 @@
-/*
- * Copyright © 2015 Intel Corporation.
- *
- * Authors: David Woodhouse <David.Woodhouse@intel.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- */
-
-#ifndef __INTEL_SVM_H__
-#define __INTEL_SVM_H__
-
-struct device;
-
-struct svm_dev_ops {
- void (*fault_cb)(struct device *dev, int pasid, u64 address,
- u32 private, int rwxp, int response);
-};
-
-/* Values for rxwp in fault_cb callback */
-#define SVM_REQ_READ (1<<3)
-#define SVM_REQ_WRITE (1<<2)
-#define SVM_REQ_EXEC (1<<1)
-#define SVM_REQ_PRIV (1<<0)
-
-
-/*
- * The SVM_FLAG_PRIVATE_PASID flag requests a PASID which is *not* the "main"
- * PASID for the current process. Even if a PASID already exists, a new one
- * will be allocated. And the PASID allocated with SVM_FLAG_PRIVATE_PASID
- * will not be given to subsequent callers. This facility allows a driver to
- * disambiguate between multiple device contexts which access the same MM,
- * if there is no other way to do so. It should be used sparingly, if at all.
- */
-#define SVM_FLAG_PRIVATE_PASID (1<<0)
-
-/*
- * The SVM_FLAG_SUPERVISOR_MODE flag requests a PASID which can be used only
- * for access to kernel addresses. No IOTLB flushes are automatically done
- * for kernel mappings; it is valid only for access to the kernel's static
- * 1:1 mapping of physical memory — not to vmalloc or even module mappings.
- * A future API addition may permit the use of such ranges, by means of an
- * explicit IOTLB flush call (akin to the DMA API's unmap method).
- *
- * It is unlikely that we will ever hook into flush_tlb_kernel_range() to
- * do such IOTLB flushes automatically.
- */
-#define SVM_FLAG_SUPERVISOR_MODE (1<<1)
-
-#ifdef CONFIG_INTEL_IOMMU_SVM
-
-/**
- * intel_svm_bind_mm() - Bind the current process to a PASID
- * @dev: Device to be granted acccess
- * @pasid: Address for allocated PASID
- * @flags: Flags. Later for requesting supervisor mode, etc.
- * @ops: Callbacks to device driver
- *
- * This function attempts to enable PASID support for the given device.
- * If the @pasid argument is non-%NULL, a PASID is allocated for access
- * to the MM of the current process.
- *
- * By using a %NULL value for the @pasid argument, this function can
- * be used to simply validate that PASID support is available for the
- * given device — i.e. that it is behind an IOMMU which has the
- * requisite support, and is enabled.
- *
- * Page faults are handled transparently by the IOMMU code, and there
- * should be no need for the device driver to be involved. If a page
- * fault cannot be handled (i.e. is an invalid address rather than
- * just needs paging in), then the page request will be completed by
- * the core IOMMU code with appropriate status, and the device itself
- * can then report the resulting fault to its driver via whatever
- * mechanism is appropriate.
- *
- * Multiple calls from the same process may result in the same PASID
- * being re-used. A reference count is kept.
- */
-extern int intel_svm_bind_mm(struct device *dev, int *pasid, int flags,
- struct svm_dev_ops *ops);
-
-/**
- * intel_svm_unbind_mm() - Unbind a specified PASID
- * @dev: Device for which PASID was allocated
- * @pasid: PASID value to be unbound
- *
- * This function allows a PASID to be retired when the device no
- * longer requires access to the address space of a given process.
- *
- * If the use count for the PASID in question reaches zero, the
- * PASID is revoked and may no longer be used by hardware.
- *
- * Device drivers are required to ensure that no access (including
- * page requests) is currently outstanding for the PASID in question,
- * before calling this function.
- */
-extern int intel_svm_unbind_mm(struct device *dev, int pasid);
-
-/**
- * intel_svm_is_pasid_valid() - check if pasid is valid
- * @dev: Device for which PASID was allocated
- * @pasid: PASID value to be checked
- *
- * This function checks if the specified pasid is still valid. A
- * valid pasid means the backing mm is still having a valid user.
- * For kernel callers init_mm is always valid. for other mm, if mm->mm_users
- * is non-zero, it is valid.
- *
- * returns -EINVAL if invalid pasid, 0 if pasid ref count is invalid
- * 1 if pasid is valid.
- */
-extern int intel_svm_is_pasid_valid(struct device *dev, int pasid);
-
-#else /* CONFIG_INTEL_IOMMU_SVM */
-
-static inline int intel_svm_bind_mm(struct device *dev, int *pasid,
- int flags, struct svm_dev_ops *ops)
-{
- return -ENOSYS;
-}
-
-static inline int intel_svm_unbind_mm(struct device *dev, int pasid)
-{
- BUG();
-}
-
-static int intel_svm_is_pasid_valid(struct device *dev, int pasid)
-{
- return -EINVAL;
-}
-#endif /* CONFIG_INTEL_IOMMU_SVM */
-
-#define intel_svm_available(dev) (!intel_svm_bind_mm((dev), NULL, 0, NULL))
-
-#endif /* __INTEL_SVM_H__ */
diff --git a/include/linux/intel_rapl.h b/include/linux/intel_rapl.h
new file mode 100644
index 000000000000..9f4b6f5b822f
--- /dev/null
+++ b/include/linux/intel_rapl.h
@@ -0,0 +1,163 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Data types and headers for RAPL support
+ *
+ * Copyright (C) 2019 Intel Corporation.
+ *
+ * Author: Zhang Rui <rui.zhang@intel.com>
+ */
+
+#ifndef __INTEL_RAPL_H__
+#define __INTEL_RAPL_H__
+
+#include <linux/types.h>
+#include <linux/powercap.h>
+#include <linux/cpuhotplug.h>
+
+enum rapl_domain_type {
+ RAPL_DOMAIN_PACKAGE, /* entire package/socket */
+ RAPL_DOMAIN_PP0, /* core power plane */
+ RAPL_DOMAIN_PP1, /* graphics uncore */
+ RAPL_DOMAIN_DRAM, /* DRAM control_type */
+ RAPL_DOMAIN_PLATFORM, /* PSys control_type */
+ RAPL_DOMAIN_MAX,
+};
+
+enum rapl_domain_reg_id {
+ RAPL_DOMAIN_REG_LIMIT,
+ RAPL_DOMAIN_REG_STATUS,
+ RAPL_DOMAIN_REG_PERF,
+ RAPL_DOMAIN_REG_POLICY,
+ RAPL_DOMAIN_REG_INFO,
+ RAPL_DOMAIN_REG_PL4,
+ RAPL_DOMAIN_REG_MAX,
+};
+
+struct rapl_domain;
+
+enum rapl_primitives {
+ ENERGY_COUNTER,
+ POWER_LIMIT1,
+ POWER_LIMIT2,
+ POWER_LIMIT4,
+ FW_LOCK,
+
+ PL1_ENABLE, /* power limit 1, aka long term */
+ PL1_CLAMP, /* allow frequency to go below OS request */
+ PL2_ENABLE, /* power limit 2, aka short term, instantaneous */
+ PL2_CLAMP,
+ PL4_ENABLE, /* power limit 4, aka max peak power */
+
+ TIME_WINDOW1, /* long term */
+ TIME_WINDOW2, /* short term */
+ THERMAL_SPEC_POWER,
+ MAX_POWER,
+
+ MIN_POWER,
+ MAX_TIME_WINDOW,
+ THROTTLED_TIME,
+ PRIORITY_LEVEL,
+
+ PSYS_POWER_LIMIT1,
+ PSYS_POWER_LIMIT2,
+ PSYS_PL1_ENABLE,
+ PSYS_PL2_ENABLE,
+ PSYS_TIME_WINDOW1,
+ PSYS_TIME_WINDOW2,
+ /* below are not raw primitive data */
+ AVERAGE_POWER,
+ NR_RAPL_PRIMITIVES,
+};
+
+struct rapl_domain_data {
+ u64 primitives[NR_RAPL_PRIMITIVES];
+ unsigned long timestamp;
+};
+
+#define NR_POWER_LIMITS (3)
+struct rapl_power_limit {
+ struct powercap_zone_constraint *constraint;
+ int prim_id; /* primitive ID used to enable */
+ struct rapl_domain *domain;
+ const char *name;
+ u64 last_power_limit;
+};
+
+struct rapl_package;
+
+#define RAPL_DOMAIN_NAME_LENGTH 16
+
+struct rapl_domain {
+ char name[RAPL_DOMAIN_NAME_LENGTH];
+ enum rapl_domain_type id;
+ u64 regs[RAPL_DOMAIN_REG_MAX];
+ struct powercap_zone power_zone;
+ struct rapl_domain_data rdd;
+ struct rapl_power_limit rpl[NR_POWER_LIMITS];
+ u64 attr_map; /* track capabilities */
+ unsigned int state;
+ unsigned int domain_energy_unit;
+ struct rapl_package *rp;
+};
+
+struct reg_action {
+ u64 reg;
+ u64 mask;
+ u64 value;
+ int err;
+};
+
+/**
+ * struct rapl_if_priv: private data for different RAPL interfaces
+ * @control_type: Each RAPL interface must have its own powercap
+ * control type.
+ * @platform_rapl_domain: Optional. Some RAPL interface may have platform
+ * level RAPL control.
+ * @pcap_rapl_online: CPU hotplug state for each RAPL interface.
+ * @reg_unit: Register for getting energy/power/time unit.
+ * @regs: Register sets for different RAPL Domains.
+ * @limits: Number of power limits supported by each domain.
+ * @read_raw: Callback for reading RAPL interface specific
+ * registers.
+ * @write_raw: Callback for writing RAPL interface specific
+ * registers.
+ */
+struct rapl_if_priv {
+ struct powercap_control_type *control_type;
+ struct rapl_domain *platform_rapl_domain;
+ enum cpuhp_state pcap_rapl_online;
+ u64 reg_unit;
+ u64 regs[RAPL_DOMAIN_MAX][RAPL_DOMAIN_REG_MAX];
+ int limits[RAPL_DOMAIN_MAX];
+ int (*read_raw)(int cpu, struct reg_action *ra);
+ int (*write_raw)(int cpu, struct reg_action *ra);
+};
+
+/* maximum rapl package domain name: package-%d-die-%d */
+#define PACKAGE_DOMAIN_NAME_LENGTH 30
+
+struct rapl_package {
+ unsigned int id; /* logical die id, equals physical 1-die systems */
+ unsigned int nr_domains;
+ unsigned long domain_map; /* bit map of active domains */
+ unsigned int power_unit;
+ unsigned int energy_unit;
+ unsigned int time_unit;
+ struct rapl_domain *domains; /* array of domains, sized at runtime */
+ struct powercap_zone *power_zone; /* keep track of parent zone */
+ unsigned long power_limit_irq; /* keep track of package power limit
+ * notify interrupt enable status.
+ */
+ struct list_head plist;
+ int lead_cpu; /* one active cpu per package for access */
+ /* Track active cpus */
+ struct cpumask cpumask;
+ char name[PACKAGE_DOMAIN_NAME_LENGTH];
+ struct rapl_if_priv *priv;
+};
+
+struct rapl_package *rapl_find_package_domain(int cpu, struct rapl_if_priv *priv);
+struct rapl_package *rapl_add_package(int cpu, struct rapl_if_priv *priv);
+void rapl_remove_package(struct rapl_package *rp);
+
+#endif /* __INTEL_RAPL_H__ */
diff --git a/include/linux/intel_tcc.h b/include/linux/intel_tcc.h
new file mode 100644
index 000000000000..f422612c28d6
--- /dev/null
+++ b/include/linux/intel_tcc.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * header for Intel TCC (thermal control circuitry) library
+ *
+ * Copyright (C) 2022 Intel Corporation.
+ */
+
+#ifndef __INTEL_TCC_H__
+#define __INTEL_TCC_H__
+
+#include <linux/types.h>
+
+int intel_tcc_get_tjmax(int cpu);
+int intel_tcc_get_offset(int cpu);
+int intel_tcc_set_offset(int cpu, int offset);
+int intel_tcc_get_temp(int cpu, bool pkg);
+
+#endif /* __INTEL_TCC_H__ */
diff --git a/include/linux/intel_th.h b/include/linux/intel_th.h
new file mode 100644
index 000000000000..9b7f4c22499c
--- /dev/null
+++ b/include/linux/intel_th.h
@@ -0,0 +1,79 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Intel(R) Trace Hub data structures for implementing buffer sinks.
+ *
+ * Copyright (C) 2019 Intel Corporation.
+ */
+
+#ifndef _INTEL_TH_H_
+#define _INTEL_TH_H_
+
+#include <linux/scatterlist.h>
+
+/* MSC operating modes (MSC_MODE) */
+enum {
+ MSC_MODE_SINGLE = 0,
+ MSC_MODE_MULTI,
+ MSC_MODE_EXI,
+ MSC_MODE_DEBUG,
+};
+
+struct msu_buffer {
+ const char *name;
+ /*
+ * ->assign() called when buffer 'mode' is set to this driver
+ * (aka mode_store())
+ * @device: struct device * of the msc
+ * @mode: allows the driver to set HW mode (see the enum above)
+ * Returns: a pointer to a private structure associated with this
+ * msc or NULL in case of error. This private structure
+ * will then be passed into all other callbacks.
+ */
+ void *(*assign)(struct device *dev, int *mode);
+ /* ->unassign(): some other mode is selected, clean up */
+ void (*unassign)(void *priv);
+ /*
+ * ->alloc_window(): allocate memory for the window of a given
+ * size
+ * @sgt: pointer to sg_table, can be overridden by the buffer
+ * driver, or kept intact
+ * Returns: number of sg table entries <= number of pages;
+ * 0 is treated as an allocation failure.
+ */
+ int (*alloc_window)(void *priv, struct sg_table **sgt,
+ size_t size);
+ void (*free_window)(void *priv, struct sg_table *sgt);
+ /* ->activate(): trace has started */
+ void (*activate)(void *priv);
+ /* ->deactivate(): trace is about to stop */
+ void (*deactivate)(void *priv);
+ /*
+ * ->ready(): window @sgt is filled up to the last block OR
+ * tracing is stopped by the user; this window contains
+ * @bytes data. The window in question transitions into
+ * the "LOCKED" state, indicating that it can't be used
+ * by hardware. To clear this state and make the window
+ * available to the hardware again, call
+ * intel_th_msc_window_unlock().
+ */
+ int (*ready)(void *priv, struct sg_table *sgt, size_t bytes);
+};
+
+int intel_th_msu_buffer_register(const struct msu_buffer *mbuf,
+ struct module *owner);
+void intel_th_msu_buffer_unregister(const struct msu_buffer *mbuf);
+void intel_th_msc_window_unlock(struct device *dev, struct sg_table *sgt);
+
+#define module_intel_th_msu_buffer(__buffer) \
+static int __init __buffer##_init(void) \
+{ \
+ return intel_th_msu_buffer_register(&(__buffer), THIS_MODULE); \
+} \
+module_init(__buffer##_init); \
+static void __exit __buffer##_exit(void) \
+{ \
+ intel_th_msu_buffer_unregister(&(__buffer)); \
+} \
+module_exit(__buffer##_exit);
+
+#endif /* _INTEL_TH_H_ */
diff --git a/include/linux/intel_tpmi.h b/include/linux/intel_tpmi.h
new file mode 100644
index 000000000000..f505788c05da
--- /dev/null
+++ b/include/linux/intel_tpmi.h
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * intel_tpmi.h: Intel TPMI core external interface
+ */
+
+#ifndef _INTEL_TPMI_H_
+#define _INTEL_TPMI_H_
+
+/**
+ * struct intel_tpmi_plat_info - Platform information for a TPMI device instance
+ * @package_id: CPU Package id
+ * @bus_number: PCI bus number
+ * @device_number: PCI device number
+ * @function_number: PCI function number
+ *
+ * Structure to store platform data for a TPMI device instance. This
+ * struct is used to return data via tpmi_get_platform_data().
+ */
+struct intel_tpmi_plat_info {
+ u8 package_id;
+ u8 bus_number;
+ u8 device_number;
+ u8 function_number;
+};
+
+struct intel_tpmi_plat_info *tpmi_get_platform_data(struct auxiliary_device *auxdev);
+struct resource *tpmi_get_resource_at_index(struct auxiliary_device *auxdev, int index);
+int tpmi_get_resource_count(struct auxiliary_device *auxdev);
+
+#endif
diff --git a/include/linux/interconnect-provider.h b/include/linux/interconnect-provider.h
new file mode 100644
index 000000000000..e6d8aca6886d
--- /dev/null
+++ b/include/linux/interconnect-provider.h
@@ -0,0 +1,181 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2018, Linaro Ltd.
+ * Author: Georgi Djakov <georgi.djakov@linaro.org>
+ */
+
+#ifndef __LINUX_INTERCONNECT_PROVIDER_H
+#define __LINUX_INTERCONNECT_PROVIDER_H
+
+#include <linux/interconnect.h>
+
+#define icc_units_to_bps(bw) ((bw) * 1000ULL)
+
+struct icc_node;
+struct of_phandle_args;
+
+/**
+ * struct icc_node_data - icc node data
+ *
+ * @node: icc node
+ * @tag: tag
+ */
+struct icc_node_data {
+ struct icc_node *node;
+ u32 tag;
+};
+
+/**
+ * struct icc_onecell_data - driver data for onecell interconnect providers
+ *
+ * @num_nodes: number of nodes in this device
+ * @nodes: array of pointers to the nodes in this device
+ */
+struct icc_onecell_data {
+ unsigned int num_nodes;
+ struct icc_node *nodes[];
+};
+
+struct icc_node *of_icc_xlate_onecell(struct of_phandle_args *spec,
+ void *data);
+
+/**
+ * struct icc_provider - interconnect provider (controller) entity that might
+ * provide multiple interconnect controls
+ *
+ * @provider_list: list of the registered interconnect providers
+ * @nodes: internal list of the interconnect provider nodes
+ * @set: pointer to device specific set operation function
+ * @aggregate: pointer to device specific aggregate operation function
+ * @pre_aggregate: pointer to device specific function that is called
+ * before the aggregation begins (optional)
+ * @get_bw: pointer to device specific function to get current bandwidth
+ * @xlate: provider-specific callback for mapping nodes from phandle arguments
+ * @xlate_extended: vendor-specific callback for mapping node data from phandle arguments
+ * @dev: the device this interconnect provider belongs to
+ * @users: count of active users
+ * @inter_set: whether inter-provider pairs will be configured with @set
+ * @data: pointer to private data
+ */
+struct icc_provider {
+ struct list_head provider_list;
+ struct list_head nodes;
+ int (*set)(struct icc_node *src, struct icc_node *dst);
+ int (*aggregate)(struct icc_node *node, u32 tag, u32 avg_bw,
+ u32 peak_bw, u32 *agg_avg, u32 *agg_peak);
+ void (*pre_aggregate)(struct icc_node *node);
+ int (*get_bw)(struct icc_node *node, u32 *avg, u32 *peak);
+ struct icc_node* (*xlate)(struct of_phandle_args *spec, void *data);
+ struct icc_node_data* (*xlate_extended)(struct of_phandle_args *spec, void *data);
+ struct device *dev;
+ int users;
+ bool inter_set;
+ void *data;
+};
+
+/**
+ * struct icc_node - entity that is part of the interconnect topology
+ *
+ * @id: platform specific node id
+ * @name: node name used in debugfs
+ * @links: a list of targets pointing to where we can go next when traversing
+ * @num_links: number of links to other interconnect nodes
+ * @provider: points to the interconnect provider of this node
+ * @node_list: the list entry in the parent provider's "nodes" list
+ * @search_list: list used when walking the nodes graph
+ * @reverse: pointer to previous node when walking the nodes graph
+ * @is_traversed: flag that is used when walking the nodes graph
+ * @req_list: a list of QoS constraint requests associated with this node
+ * @avg_bw: aggregated value of average bandwidth requests from all consumers
+ * @peak_bw: aggregated value of peak bandwidth requests from all consumers
+ * @init_avg: average bandwidth value that is read from the hardware during init
+ * @init_peak: peak bandwidth value that is read from the hardware during init
+ * @data: pointer to private data
+ */
+struct icc_node {
+ int id;
+ const char *name;
+ struct icc_node **links;
+ size_t num_links;
+
+ struct icc_provider *provider;
+ struct list_head node_list;
+ struct list_head search_list;
+ struct icc_node *reverse;
+ u8 is_traversed:1;
+ struct hlist_head req_list;
+ u32 avg_bw;
+ u32 peak_bw;
+ u32 init_avg;
+ u32 init_peak;
+ void *data;
+};
+
+#if IS_ENABLED(CONFIG_INTERCONNECT)
+
+int icc_std_aggregate(struct icc_node *node, u32 tag, u32 avg_bw,
+ u32 peak_bw, u32 *agg_avg, u32 *agg_peak);
+struct icc_node *icc_node_create(int id);
+void icc_node_destroy(int id);
+int icc_link_create(struct icc_node *node, const int dst_id);
+void icc_node_add(struct icc_node *node, struct icc_provider *provider);
+void icc_node_del(struct icc_node *node);
+int icc_nodes_remove(struct icc_provider *provider);
+void icc_provider_init(struct icc_provider *provider);
+int icc_provider_register(struct icc_provider *provider);
+void icc_provider_deregister(struct icc_provider *provider);
+struct icc_node_data *of_icc_get_from_provider(struct of_phandle_args *spec);
+void icc_sync_state(struct device *dev);
+
+#else
+
+static inline int icc_std_aggregate(struct icc_node *node, u32 tag, u32 avg_bw,
+ u32 peak_bw, u32 *agg_avg, u32 *agg_peak)
+{
+ return -ENOTSUPP;
+}
+
+static inline struct icc_node *icc_node_create(int id)
+{
+ return ERR_PTR(-ENOTSUPP);
+}
+
+static inline void icc_node_destroy(int id)
+{
+}
+
+static inline int icc_link_create(struct icc_node *node, const int dst_id)
+{
+ return -ENOTSUPP;
+}
+
+static inline void icc_node_add(struct icc_node *node, struct icc_provider *provider)
+{
+}
+
+static inline void icc_node_del(struct icc_node *node)
+{
+}
+
+static inline int icc_nodes_remove(struct icc_provider *provider)
+{
+ return -ENOTSUPP;
+}
+
+static inline void icc_provider_init(struct icc_provider *provider) { }
+
+static inline int icc_provider_register(struct icc_provider *provider)
+{
+ return -ENOTSUPP;
+}
+
+static inline void icc_provider_deregister(struct icc_provider *provider) { }
+
+static inline struct icc_node_data *of_icc_get_from_provider(struct of_phandle_args *spec)
+{
+ return ERR_PTR(-ENOTSUPP);
+}
+
+#endif /* CONFIG_INTERCONNECT */
+
+#endif /* __LINUX_INTERCONNECT_PROVIDER_H */
diff --git a/include/linux/interconnect.h b/include/linux/interconnect.h
new file mode 100644
index 000000000000..2b0e784ba771
--- /dev/null
+++ b/include/linux/interconnect.h
@@ -0,0 +1,146 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2018-2019, Linaro Ltd.
+ * Author: Georgi Djakov <georgi.djakov@linaro.org>
+ */
+
+#ifndef __LINUX_INTERCONNECT_H
+#define __LINUX_INTERCONNECT_H
+
+#include <linux/mutex.h>
+#include <linux/types.h>
+
+/* macros for converting to icc units */
+#define Bps_to_icc(x) ((x) / 1000)
+#define kBps_to_icc(x) (x)
+#define MBps_to_icc(x) ((x) * 1000)
+#define GBps_to_icc(x) ((x) * 1000 * 1000)
+#define bps_to_icc(x) (1)
+#define kbps_to_icc(x) ((x) / 8 + ((x) % 8 ? 1 : 0))
+#define Mbps_to_icc(x) ((x) * 1000 / 8)
+#define Gbps_to_icc(x) ((x) * 1000 * 1000 / 8)
+
+struct icc_path;
+struct device;
+
+/**
+ * struct icc_bulk_data - Data used for bulk icc operations.
+ *
+ * @path: reference to the interconnect path (internal use)
+ * @name: the name from the "interconnect-names" DT property
+ * @avg_bw: average bandwidth in icc units
+ * @peak_bw: peak bandwidth in icc units
+ */
+struct icc_bulk_data {
+ struct icc_path *path;
+ const char *name;
+ u32 avg_bw;
+ u32 peak_bw;
+};
+
+#if IS_ENABLED(CONFIG_INTERCONNECT)
+
+struct icc_path *icc_get(struct device *dev, const int src_id,
+ const int dst_id);
+struct icc_path *of_icc_get(struct device *dev, const char *name);
+struct icc_path *devm_of_icc_get(struct device *dev, const char *name);
+int devm_of_icc_bulk_get(struct device *dev, int num_paths, struct icc_bulk_data *paths);
+struct icc_path *of_icc_get_by_index(struct device *dev, int idx);
+void icc_put(struct icc_path *path);
+int icc_enable(struct icc_path *path);
+int icc_disable(struct icc_path *path);
+int icc_set_bw(struct icc_path *path, u32 avg_bw, u32 peak_bw);
+void icc_set_tag(struct icc_path *path, u32 tag);
+const char *icc_get_name(struct icc_path *path);
+int __must_check of_icc_bulk_get(struct device *dev, int num_paths,
+ struct icc_bulk_data *paths);
+void icc_bulk_put(int num_paths, struct icc_bulk_data *paths);
+int icc_bulk_set_bw(int num_paths, const struct icc_bulk_data *paths);
+int icc_bulk_enable(int num_paths, const struct icc_bulk_data *paths);
+void icc_bulk_disable(int num_paths, const struct icc_bulk_data *paths);
+
+#else
+
+static inline struct icc_path *icc_get(struct device *dev, const int src_id,
+ const int dst_id)
+{
+ return NULL;
+}
+
+static inline struct icc_path *of_icc_get(struct device *dev,
+ const char *name)
+{
+ return NULL;
+}
+
+static inline struct icc_path *devm_of_icc_get(struct device *dev,
+ const char *name)
+{
+ return NULL;
+}
+
+static inline struct icc_path *of_icc_get_by_index(struct device *dev, int idx)
+{
+ return NULL;
+}
+
+static inline void icc_put(struct icc_path *path)
+{
+}
+
+static inline int icc_enable(struct icc_path *path)
+{
+ return 0;
+}
+
+static inline int icc_disable(struct icc_path *path)
+{
+ return 0;
+}
+
+static inline int icc_set_bw(struct icc_path *path, u32 avg_bw, u32 peak_bw)
+{
+ return 0;
+}
+
+static inline void icc_set_tag(struct icc_path *path, u32 tag)
+{
+}
+
+static inline const char *icc_get_name(struct icc_path *path)
+{
+ return NULL;
+}
+
+static inline int of_icc_bulk_get(struct device *dev, int num_paths, struct icc_bulk_data *paths)
+{
+ return 0;
+}
+
+static inline int devm_of_icc_bulk_get(struct device *dev, int num_paths,
+ struct icc_bulk_data *paths)
+{
+ return 0;
+}
+
+static inline void icc_bulk_put(int num_paths, struct icc_bulk_data *paths)
+{
+}
+
+static inline int icc_bulk_set_bw(int num_paths, const struct icc_bulk_data *paths)
+{
+ return 0;
+}
+
+static inline int icc_bulk_enable(int num_paths, const struct icc_bulk_data *paths)
+{
+ return 0;
+}
+
+static inline void icc_bulk_disable(int num_paths, const struct icc_bulk_data *paths)
+{
+}
+
+#endif /* CONFIG_INTERCONNECT */
+
+#endif /* __LINUX_INTERCONNECT_H */
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
index a2fddddb0d60..a92bce40b04b 100644
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
@@ -1,11 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/* interrupt.h */
#ifndef _LINUX_INTERRUPT_H
#define _LINUX_INTERRUPT_H
#include <linux/kernel.h>
-#include <linux/linkage.h>
#include <linux/bitops.h>
-#include <linux/preempt.h>
#include <linux/cpumask.h>
#include <linux/irqreturn.h>
#include <linux/irqnr.h>
@@ -14,10 +13,12 @@
#include <linux/hrtimer.h>
#include <linux/kref.h>
#include <linux/workqueue.h>
+#include <linux/jump_label.h>
#include <linux/atomic.h>
#include <asm/ptrace.h>
#include <asm/irq.h>
+#include <asm/sections.h>
/*
* These correspond to the IORESOURCE_IRQ_* defines in
@@ -45,14 +46,14 @@
* IRQF_PERCPU - Interrupt is per cpu
* IRQF_NOBALANCING - Flag to exclude this interrupt from irq balancing
* IRQF_IRQPOLL - Interrupt is used for polling (only the interrupt that is
- * registered first in an shared interrupt is considered for
+ * registered first in a shared interrupt is considered for
* performance reasons)
* IRQF_ONESHOT - Interrupt is not reenabled after the hardirq handler finished.
* Used by threaded interrupts which need to keep the
* irq line disabled until the threaded handler has been run.
* IRQF_NO_SUSPEND - Do not disable this IRQ during suspend. Does not guarantee
* that this interrupt will wake the system from a suspended
- * state. See Documentation/power/suspend-and-interrupts.txt
+ * state. See Documentation/power/suspend-and-interrupts.rst
* IRQF_FORCE_RESUME - Force enable it on resume even if IRQF_NO_SUSPEND is set
* IRQF_NO_THREAD - Interrupt cannot be threaded
* IRQF_EARLY_RESUME - Resume IRQ early during syscore instead of at device
@@ -61,6 +62,11 @@
* interrupt handler after suspending interrupts. For system
* wakeup devices users need to implement wakeup detection in
* their interrupt handlers.
+ * IRQF_NO_AUTOEN - Don't enable IRQ or NMI automatically when users request it.
+ * Users will enable it explicitly by enable_irq() or enable_nmi()
+ * later.
+ * IRQF_NO_DEBUG - Exclude from runnaway detection for IPI and similar handlers,
+ * depends on IRQF_PERCPU.
*/
#define IRQF_SHARED 0x00000080
#define IRQF_PROBE_SHARED 0x00000100
@@ -74,6 +80,8 @@
#define IRQF_NO_THREAD 0x00010000
#define IRQF_EARLY_RESUME 0x00020000
#define IRQF_COND_SUSPEND 0x00040000
+#define IRQF_NO_AUTOEN 0x00080000
+#define IRQF_NO_DEBUG 0x00100000
#define IRQF_TIMER (__IRQF_TIMER | IRQF_NO_SUSPEND | IRQF_NO_THREAD)
@@ -140,6 +148,19 @@ request_threaded_irq(unsigned int irq, irq_handler_t handler,
irq_handler_t thread_fn,
unsigned long flags, const char *name, void *dev);
+/**
+ * request_irq - Add a handler for an interrupt line
+ * @irq: The interrupt line to allocate
+ * @handler: Function to be called when the IRQ occurs.
+ * Primary handler for threaded interrupts
+ * If NULL, the default primary handler is installed
+ * @flags: Handling flags
+ * @name: Name of the device generating this interrupt
+ * @dev: A cookie passed to the handler function
+ *
+ * This call allocates an interrupt and establishes a handler; see
+ * the documentation for request_threaded_irq() for details.
+ */
static inline int __must_check
request_irq(unsigned int irq, irq_handler_t handler, unsigned long flags,
const char *name, void *dev)
@@ -156,6 +177,10 @@ __request_percpu_irq(unsigned int irq, irq_handler_t handler,
unsigned long flags, const char *devname,
void __percpu *percpu_dev_id);
+extern int __must_check
+request_nmi(unsigned int irq, irq_handler_t handler, unsigned long flags,
+ const char *name, void *dev);
+
static inline int __must_check
request_percpu_irq(unsigned int irq, irq_handler_t handler,
const char *devname, void __percpu *percpu_dev_id)
@@ -164,9 +189,16 @@ request_percpu_irq(unsigned int irq, irq_handler_t handler,
devname, percpu_dev_id);
}
+extern int __must_check
+request_percpu_nmi(unsigned int irq, irq_handler_t handler,
+ const char *devname, void __percpu *dev);
+
extern const void *free_irq(unsigned int, void *);
extern void free_percpu_irq(unsigned int, void __percpu *);
+extern const void *free_nmi(unsigned int irq, void *dev_id);
+extern void free_percpu_nmi(unsigned int irq, void __percpu *percpu_dev_id);
+
struct device;
extern int __must_check
@@ -190,24 +222,7 @@ devm_request_any_context_irq(struct device *dev, unsigned int irq,
extern void devm_free_irq(struct device *dev, unsigned int irq, void *dev_id);
-/*
- * On lockdep we dont want to enable hardirqs in hardirq
- * context. Use local_irq_enable_in_hardirq() to annotate
- * kernel code that has to do this nevertheless (pretty much
- * the only valid case is for old/broken hardware that is
- * insanely slow).
- *
- * NOTE: in theory this might break fragile code that relies
- * on hardirq delivery - in practice we dont seem to have such
- * places left. So the only effect should be slightly increased
- * irqs-off latencies.
- */
-#ifdef CONFIG_LOCKDEP
-# define local_irq_enable_in_hardirq() do { } while (0)
-#else
-# define local_irq_enable_in_hardirq() local_irq_enable()
-#endif
-
+bool irq_has_action(unsigned int irq);
extern void disable_irq_nosync(unsigned int irq);
extern bool disable_hardirq(unsigned int irq);
extern void disable_irq(unsigned int irq);
@@ -217,9 +232,19 @@ extern void enable_percpu_irq(unsigned int irq, unsigned int type);
extern bool irq_percpu_is_enabled(unsigned int irq);
extern void irq_wake_thread(unsigned int irq, void *dev_id);
+extern void disable_nmi_nosync(unsigned int irq);
+extern void disable_percpu_nmi(unsigned int irq);
+extern void enable_nmi(unsigned int irq);
+extern void enable_percpu_nmi(unsigned int irq, unsigned int type);
+extern int prepare_percpu_nmi(unsigned int irq);
+extern void teardown_percpu_nmi(unsigned int irq);
+
+extern int irq_inject_interrupt(unsigned int irq);
+
/* The following three functions are for the core kernel use only. */
extern void suspend_device_irqs(void);
extern void resume_device_irqs(void);
+extern void rearm_wake_irq(unsigned int irq);
/**
* struct irq_affinity_notify - context for notification of IRQ affinity changes
@@ -241,66 +266,102 @@ struct irq_affinity_notify {
void (*release)(struct kref *ref);
};
+#define IRQ_AFFINITY_MAX_SETS 4
+
/**
* struct irq_affinity - Description for automatic irq affinity assignements
* @pre_vectors: Don't apply affinity to @pre_vectors at beginning of
* the MSI(-X) vector space
* @post_vectors: Don't apply affinity to @post_vectors at end of
* the MSI(-X) vector space
+ * @nr_sets: The number of interrupt sets for which affinity
+ * spreading is required
+ * @set_size: Array holding the size of each interrupt set
+ * @calc_sets: Callback for calculating the number and size
+ * of interrupt sets
+ * @priv: Private data for usage by @calc_sets, usually a
+ * pointer to driver/device specific data.
*/
struct irq_affinity {
- int pre_vectors;
- int post_vectors;
+ unsigned int pre_vectors;
+ unsigned int post_vectors;
+ unsigned int nr_sets;
+ unsigned int set_size[IRQ_AFFINITY_MAX_SETS];
+ void (*calc_sets)(struct irq_affinity *, unsigned int nvecs);
+ void *priv;
+};
+
+/**
+ * struct irq_affinity_desc - Interrupt affinity descriptor
+ * @mask: cpumask to hold the affinity assignment
+ * @is_managed: 1 if the interrupt is managed internally
+ */
+struct irq_affinity_desc {
+ struct cpumask mask;
+ unsigned int is_managed : 1;
};
#if defined(CONFIG_SMP)
extern cpumask_var_t irq_default_affinity;
-/* Internal implementation. Use the helpers below */
-extern int __irq_set_affinity(unsigned int irq, const struct cpumask *cpumask,
- bool force);
+extern int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask);
+extern int irq_force_affinity(unsigned int irq, const struct cpumask *cpumask);
+
+extern int irq_can_set_affinity(unsigned int irq);
+extern int irq_select_affinity(unsigned int irq);
+
+extern int __irq_apply_affinity_hint(unsigned int irq, const struct cpumask *m,
+ bool setaffinity);
/**
- * irq_set_affinity - Set the irq affinity of a given irq
- * @irq: Interrupt to set affinity
- * @cpumask: cpumask
+ * irq_update_affinity_hint - Update the affinity hint
+ * @irq: Interrupt to update
+ * @m: cpumask pointer (NULL to clear the hint)
*
- * Fails if cpumask does not contain an online CPU
+ * Updates the affinity hint, but does not change the affinity of the interrupt.
*/
static inline int
-irq_set_affinity(unsigned int irq, const struct cpumask *cpumask)
+irq_update_affinity_hint(unsigned int irq, const struct cpumask *m)
{
- return __irq_set_affinity(irq, cpumask, false);
+ return __irq_apply_affinity_hint(irq, m, false);
}
/**
- * irq_force_affinity - Force the irq affinity of a given irq
- * @irq: Interrupt to set affinity
- * @cpumask: cpumask
- *
- * Same as irq_set_affinity, but without checking the mask against
- * online cpus.
+ * irq_set_affinity_and_hint - Update the affinity hint and apply the provided
+ * cpumask to the interrupt
+ * @irq: Interrupt to update
+ * @m: cpumask pointer (NULL to clear the hint)
*
- * Solely for low level cpu hotplug code, where we need to make per
- * cpu interrupts affine before the cpu becomes online.
+ * Updates the affinity hint and if @m is not NULL it applies it as the
+ * affinity of that interrupt.
*/
static inline int
-irq_force_affinity(unsigned int irq, const struct cpumask *cpumask)
+irq_set_affinity_and_hint(unsigned int irq, const struct cpumask *m)
{
- return __irq_set_affinity(irq, cpumask, true);
+ return __irq_apply_affinity_hint(irq, m, true);
}
-extern int irq_can_set_affinity(unsigned int irq);
-extern int irq_select_affinity(unsigned int irq);
+/*
+ * Deprecated. Use irq_update_affinity_hint() or irq_set_affinity_and_hint()
+ * instead.
+ */
+static inline int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m)
+{
+ return irq_set_affinity_and_hint(irq, m);
+}
-extern int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m);
+extern int irq_update_affinity_desc(unsigned int irq,
+ struct irq_affinity_desc *affinity);
extern int
irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify);
-struct cpumask *irq_create_affinity_masks(int nvec, const struct irq_affinity *affd);
-int irq_calc_affinity_vectors(int minvec, int maxvec, const struct irq_affinity *affd);
+struct irq_affinity_desc *
+irq_create_affinity_masks(unsigned int nvec, struct irq_affinity *affd);
+
+unsigned int irq_calc_affinity_vectors(unsigned int minvec, unsigned int maxvec,
+ const struct irq_affinity *affd);
#else /* CONFIG_SMP */
@@ -321,26 +382,45 @@ static inline int irq_can_set_affinity(unsigned int irq)
static inline int irq_select_affinity(unsigned int irq) { return 0; }
+static inline int irq_update_affinity_hint(unsigned int irq,
+ const struct cpumask *m)
+{
+ return -EINVAL;
+}
+
+static inline int irq_set_affinity_and_hint(unsigned int irq,
+ const struct cpumask *m)
+{
+ return -EINVAL;
+}
+
static inline int irq_set_affinity_hint(unsigned int irq,
const struct cpumask *m)
{
return -EINVAL;
}
+static inline int irq_update_affinity_desc(unsigned int irq,
+ struct irq_affinity_desc *affinity)
+{
+ return -EINVAL;
+}
+
static inline int
irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify)
{
return 0;
}
-static inline struct cpumask *
-irq_create_affinity_masks(int nvec, const struct irq_affinity *affd)
+static inline struct irq_affinity_desc *
+irq_create_affinity_masks(unsigned int nvec, struct irq_affinity *affd)
{
return NULL;
}
-static inline int
-irq_calc_affinity_vectors(int minvec, int maxvec, const struct irq_affinity *affd)
+static inline unsigned int
+irq_calc_affinity_vectors(unsigned int minvec, unsigned int maxvec,
+ const struct irq_affinity *affd)
{
return maxvec;
}
@@ -427,16 +507,28 @@ extern int irq_set_irqchip_state(unsigned int irq, enum irqchip_irq_state which,
bool state);
#ifdef CONFIG_IRQ_FORCED_THREADING
-extern bool force_irqthreads;
+# ifdef CONFIG_PREEMPT_RT
+# define force_irqthreads() (true)
+# else
+DECLARE_STATIC_KEY_FALSE(force_irqthreads_key);
+# define force_irqthreads() (static_branch_unlikely(&force_irqthreads_key))
+# endif
#else
-#define force_irqthreads (0)
+#define force_irqthreads() (false)
#endif
-#ifndef __ARCH_SET_SOFTIRQ_PENDING
-#define set_softirq_pending(x) (local_softirq_pending() = (x))
-#define or_softirq_pending(x) (local_softirq_pending() |= (x))
+#ifndef local_softirq_pending
+
+#ifndef local_softirq_pending_ref
+#define local_softirq_pending_ref irq_stat.__softirq_pending
#endif
+#define local_softirq_pending() (__this_cpu_read(local_softirq_pending_ref))
+#define set_softirq_pending(x) (__this_cpu_write(local_softirq_pending_ref, (x)))
+#define or_softirq_pending(x) (__this_cpu_or(local_softirq_pending_ref, (x)))
+
+#endif /* local_softirq_pending */
+
/* Some architectures might implement lazy enabling/disabling of
* interrupts. In some cases, such as stop_machine, we might want
* to ensure that after a local_irq_disable(), interrupts have
@@ -463,14 +555,22 @@ enum
IRQ_POLL_SOFTIRQ,
TASKLET_SOFTIRQ,
SCHED_SOFTIRQ,
- HRTIMER_SOFTIRQ, /* Unused, but kept as tools rely on the
- numbering. Sigh! */
+ HRTIMER_SOFTIRQ,
RCU_SOFTIRQ, /* Preferable RCU should always be the last softirq */
NR_SOFTIRQS
};
-#define SOFTIRQ_STOP_IDLE_MASK (~(1 << RCU_SOFTIRQ))
+/*
+ * The following vectors can be safely ignored after ksoftirqd is parked:
+ *
+ * _ RCU:
+ * 1) rcutree_migrate_callbacks() migrates the queue.
+ * 2) rcu_report_dead() reports the final quiescent states.
+ *
+ * _ IRQ_POLL: irq_poll_cpu_dead() migrates the queue
+ */
+#define SOFTIRQ_HOTPLUG_SAFE_MASK (BIT(RCU_SOFTIRQ) | BIT(IRQ_POLL_SOFTIRQ))
/* map softirq index to softirq name. update 'softirq_to_name' in
* kernel/softirq.c when adding a new softirq.
@@ -489,12 +589,12 @@ struct softirq_action
asmlinkage void do_softirq(void);
asmlinkage void __do_softirq(void);
-#ifdef __ARCH_HAS_DO_SOFTIRQ
-void do_softirq_own_stack(void);
+#ifdef CONFIG_PREEMPT_RT
+extern void do_softirq_post_smp_call_flush(unsigned int was_pending);
#else
-static inline void do_softirq_own_stack(void)
+static inline void do_softirq_post_smp_call_flush(unsigned int unused)
{
- __do_softirq();
+ do_softirq();
}
#endif
@@ -514,6 +614,9 @@ static inline struct task_struct *this_cpu_ksoftirqd(void)
/* Tasklets --- multithreaded analogue of BHs.
+ This API is deprecated. Please consider using threaded IRQs instead:
+ https://lore.kernel.org/lkml/20200716081538.2sivhkj4hcyrusem@linutronix.de
+
Main feature differing them of generic softirqs: tasklet
is running only on one CPU simultaneously.
@@ -537,16 +640,42 @@ struct tasklet_struct
struct tasklet_struct *next;
unsigned long state;
atomic_t count;
- void (*func)(unsigned long);
+ bool use_callback;
+ union {
+ void (*func)(unsigned long data);
+ void (*callback)(struct tasklet_struct *t);
+ };
unsigned long data;
};
-#define DECLARE_TASKLET(name, func, data) \
-struct tasklet_struct name = { NULL, 0, ATOMIC_INIT(0), func, data }
+#define DECLARE_TASKLET(name, _callback) \
+struct tasklet_struct name = { \
+ .count = ATOMIC_INIT(0), \
+ .callback = _callback, \
+ .use_callback = true, \
+}
+
+#define DECLARE_TASKLET_DISABLED(name, _callback) \
+struct tasklet_struct name = { \
+ .count = ATOMIC_INIT(1), \
+ .callback = _callback, \
+ .use_callback = true, \
+}
+
+#define from_tasklet(var, callback_tasklet, tasklet_fieldname) \
+ container_of(callback_tasklet, typeof(*var), tasklet_fieldname)
-#define DECLARE_TASKLET_DISABLED(name, func, data) \
-struct tasklet_struct name = { NULL, 0, ATOMIC_INIT(1), func, data }
+#define DECLARE_TASKLET_OLD(name, _func) \
+struct tasklet_struct name = { \
+ .count = ATOMIC_INIT(0), \
+ .func = _func, \
+}
+#define DECLARE_TASKLET_DISABLED_OLD(name, _func) \
+struct tasklet_struct name = { \
+ .count = ATOMIC_INIT(1), \
+ .func = _func, \
+}
enum
{
@@ -554,26 +683,21 @@ enum
TASKLET_STATE_RUN /* Tasklet is running (SMP only) */
};
-#ifdef CONFIG_SMP
+#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT)
static inline int tasklet_trylock(struct tasklet_struct *t)
{
return !test_and_set_bit(TASKLET_STATE_RUN, &(t)->state);
}
-static inline void tasklet_unlock(struct tasklet_struct *t)
-{
- smp_mb__before_atomic();
- clear_bit(TASKLET_STATE_RUN, &(t)->state);
-}
+void tasklet_unlock(struct tasklet_struct *t);
+void tasklet_unlock_wait(struct tasklet_struct *t);
+void tasklet_unlock_spin_wait(struct tasklet_struct *t);
-static inline void tasklet_unlock_wait(struct tasklet_struct *t)
-{
- while (test_bit(TASKLET_STATE_RUN, &(t)->state)) { barrier(); }
-}
#else
-#define tasklet_trylock(t) 1
-#define tasklet_unlock_wait(t) do { } while (0)
-#define tasklet_unlock(t) do { } while (0)
+static inline int tasklet_trylock(struct tasklet_struct *t) { return 1; }
+static inline void tasklet_unlock(struct tasklet_struct *t) { }
+static inline void tasklet_unlock_wait(struct tasklet_struct *t) { }
+static inline void tasklet_unlock_spin_wait(struct tasklet_struct *t) { }
#endif
extern void __tasklet_schedule(struct tasklet_struct *t);
@@ -592,27 +716,23 @@ static inline void tasklet_hi_schedule(struct tasklet_struct *t)
__tasklet_hi_schedule(t);
}
-extern void __tasklet_hi_schedule_first(struct tasklet_struct *t);
-
-/*
- * This version avoids touching any other tasklets. Needed for kmemcheck
- * in order not to take any page faults while enqueueing this tasklet;
- * consider VERY carefully whether you really need this or
- * tasklet_hi_schedule()...
- */
-static inline void tasklet_hi_schedule_first(struct tasklet_struct *t)
-{
- if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state))
- __tasklet_hi_schedule_first(t);
-}
-
-
static inline void tasklet_disable_nosync(struct tasklet_struct *t)
{
atomic_inc(&t->count);
smp_mb__after_atomic();
}
+/*
+ * Do not use in new code. Disabling tasklets from atomic contexts is
+ * error prone and should be avoided.
+ */
+static inline void tasklet_disable_in_atomic(struct tasklet_struct *t)
+{
+ tasklet_disable_nosync(t);
+ tasklet_unlock_spin_wait(t);
+ smp_mb();
+}
+
static inline void tasklet_disable(struct tasklet_struct *t)
{
tasklet_disable_nosync(t);
@@ -627,34 +747,10 @@ static inline void tasklet_enable(struct tasklet_struct *t)
}
extern void tasklet_kill(struct tasklet_struct *t);
-extern void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu);
extern void tasklet_init(struct tasklet_struct *t,
void (*func)(unsigned long), unsigned long data);
-
-struct tasklet_hrtimer {
- struct hrtimer timer;
- struct tasklet_struct tasklet;
- enum hrtimer_restart (*function)(struct hrtimer *);
-};
-
-extern void
-tasklet_hrtimer_init(struct tasklet_hrtimer *ttimer,
- enum hrtimer_restart (*function)(struct hrtimer *),
- clockid_t which_clock, enum hrtimer_mode mode);
-
-static inline
-void tasklet_hrtimer_start(struct tasklet_hrtimer *ttimer, ktime_t time,
- const enum hrtimer_mode mode)
-{
- hrtimer_start(&ttimer->timer, time, mode);
-}
-
-static inline
-void tasklet_hrtimer_cancel(struct tasklet_hrtimer *ttimer)
-{
- hrtimer_cancel(&ttimer->timer);
- tasklet_kill(&ttimer->tasklet);
-}
+extern void tasklet_setup(struct tasklet_struct *t,
+ void (*callback)(struct tasklet_struct *));
/*
* Autoprobing for irqs:
@@ -726,24 +822,13 @@ extern int early_irq_init(void);
extern int arch_probe_nr_irqs(void);
extern int arch_early_irq_init(void);
-#if defined(CONFIG_FUNCTION_GRAPH_TRACER) || defined(CONFIG_KASAN)
/*
* We want to know which function is an entrypoint of a hardirq or a softirq.
*/
-#define __irq_entry __attribute__((__section__(".irqentry.text")))
-#define __softirq_entry \
- __attribute__((__section__(".softirqentry.text")))
-
-/* Limits of hardirq entrypoints */
-extern char __irqentry_text_start[];
-extern char __irqentry_text_end[];
-/* Limits of softirq entrypoints */
-extern char __softirqentry_text_start[];
-extern char __softirqentry_text_end[];
-
-#else
-#define __irq_entry
-#define __softirq_entry
+#ifndef __irq_entry
+# define __irq_entry __section(".irqentry.text")
#endif
+#define __softirq_entry __section(".softirqentry.text")
+
#endif
diff --git a/include/linux/interval_tree.h b/include/linux/interval_tree.h
index 724556aa3c95..2b8026a39906 100644
--- a/include/linux/interval_tree.h
+++ b/include/linux/interval_tree.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_INTERVAL_TREE_H
#define _LINUX_INTERVAL_TREE_H
@@ -11,17 +12,77 @@ struct interval_tree_node {
};
extern void
-interval_tree_insert(struct interval_tree_node *node, struct rb_root *root);
+interval_tree_insert(struct interval_tree_node *node,
+ struct rb_root_cached *root);
extern void
-interval_tree_remove(struct interval_tree_node *node, struct rb_root *root);
+interval_tree_remove(struct interval_tree_node *node,
+ struct rb_root_cached *root);
extern struct interval_tree_node *
-interval_tree_iter_first(struct rb_root *root,
+interval_tree_iter_first(struct rb_root_cached *root,
unsigned long start, unsigned long last);
extern struct interval_tree_node *
interval_tree_iter_next(struct interval_tree_node *node,
unsigned long start, unsigned long last);
+/**
+ * struct interval_tree_span_iter - Find used and unused spans.
+ * @start_hole: Start of an interval for a hole when is_hole == 1
+ * @last_hole: Inclusive end of an interval for a hole when is_hole == 1
+ * @start_used: Start of a used interval when is_hole == 0
+ * @last_used: Inclusive end of a used interval when is_hole == 0
+ * @is_hole: 0 == used, 1 == is_hole, -1 == done iteration
+ *
+ * This iterator travels over spans in an interval tree. It does not return
+ * nodes but classifies each span as either a hole, where no nodes intersect, or
+ * a used, which is fully covered by nodes. Each iteration step toggles between
+ * hole and used until the entire range is covered. The returned spans always
+ * fully cover the requested range.
+ *
+ * The iterator is greedy, it always returns the largest hole or used possible,
+ * consolidating all consecutive nodes.
+ *
+ * Use interval_tree_span_iter_done() to detect end of iteration.
+ */
+struct interval_tree_span_iter {
+ /* private: not for use by the caller */
+ struct interval_tree_node *nodes[2];
+ unsigned long first_index;
+ unsigned long last_index;
+
+ /* public: */
+ union {
+ unsigned long start_hole;
+ unsigned long start_used;
+ };
+ union {
+ unsigned long last_hole;
+ unsigned long last_used;
+ };
+ int is_hole;
+};
+
+void interval_tree_span_iter_first(struct interval_tree_span_iter *state,
+ struct rb_root_cached *itree,
+ unsigned long first_index,
+ unsigned long last_index);
+void interval_tree_span_iter_advance(struct interval_tree_span_iter *iter,
+ struct rb_root_cached *itree,
+ unsigned long new_index);
+void interval_tree_span_iter_next(struct interval_tree_span_iter *state);
+
+static inline bool
+interval_tree_span_iter_done(struct interval_tree_span_iter *state)
+{
+ return state->is_hole == -1;
+}
+
+#define interval_tree_for_each_span(span, itree, first_index, last_index) \
+ for (interval_tree_span_iter_first(span, itree, \
+ first_index, last_index); \
+ !interval_tree_span_iter_done(span); \
+ interval_tree_span_iter_next(span))
+
#endif /* _LINUX_INTERVAL_TREE_H */
diff --git a/include/linux/interval_tree_generic.h b/include/linux/interval_tree_generic.h
index 58370e1862ad..aaa8a0767aa3 100644
--- a/include/linux/interval_tree_generic.h
+++ b/include/linux/interval_tree_generic.h
@@ -1,20 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
Interval Trees
(C) 2012 Michel Lespinasse <walken@google.com>
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
include/linux/interval_tree_generic.h
*/
@@ -33,7 +21,7 @@
* ITSTATIC: 'static' or empty
* ITPREFIX: prefix to use for the inline tree definitions
*
- * Note - before using this, please consider if non-generic version
+ * Note - before using this, please consider if generic version
* (interval_tree.h) would work for you...
*/
@@ -42,34 +30,18 @@
\
/* Callbacks for augmented rbtree insert and remove */ \
\
-static inline ITTYPE ITPREFIX ## _compute_subtree_last(ITSTRUCT *node) \
-{ \
- ITTYPE max = ITLAST(node), subtree_last; \
- if (node->ITRB.rb_left) { \
- subtree_last = rb_entry(node->ITRB.rb_left, \
- ITSTRUCT, ITRB)->ITSUBTREE; \
- if (max < subtree_last) \
- max = subtree_last; \
- } \
- if (node->ITRB.rb_right) { \
- subtree_last = rb_entry(node->ITRB.rb_right, \
- ITSTRUCT, ITRB)->ITSUBTREE; \
- if (max < subtree_last) \
- max = subtree_last; \
- } \
- return max; \
-} \
- \
-RB_DECLARE_CALLBACKS(static, ITPREFIX ## _augment, ITSTRUCT, ITRB, \
- ITTYPE, ITSUBTREE, ITPREFIX ## _compute_subtree_last) \
+RB_DECLARE_CALLBACKS_MAX(static, ITPREFIX ## _augment, \
+ ITSTRUCT, ITRB, ITTYPE, ITSUBTREE, ITLAST) \
\
/* Insert / remove interval nodes from the tree */ \
\
-ITSTATIC void ITPREFIX ## _insert(ITSTRUCT *node, struct rb_root *root) \
+ITSTATIC void ITPREFIX ## _insert(ITSTRUCT *node, \
+ struct rb_root_cached *root) \
{ \
- struct rb_node **link = &root->rb_node, *rb_parent = NULL; \
+ struct rb_node **link = &root->rb_root.rb_node, *rb_parent = NULL; \
ITTYPE start = ITSTART(node), last = ITLAST(node); \
ITSTRUCT *parent; \
+ bool leftmost = true; \
\
while (*link) { \
rb_parent = *link; \
@@ -78,18 +50,22 @@ ITSTATIC void ITPREFIX ## _insert(ITSTRUCT *node, struct rb_root *root) \
parent->ITSUBTREE = last; \
if (start < ITSTART(parent)) \
link = &parent->ITRB.rb_left; \
- else \
+ else { \
link = &parent->ITRB.rb_right; \
+ leftmost = false; \
+ } \
} \
\
node->ITSUBTREE = last; \
rb_link_node(&node->ITRB, rb_parent, link); \
- rb_insert_augmented(&node->ITRB, root, &ITPREFIX ## _augment); \
+ rb_insert_augmented_cached(&node->ITRB, root, \
+ leftmost, &ITPREFIX ## _augment); \
} \
\
-ITSTATIC void ITPREFIX ## _remove(ITSTRUCT *node, struct rb_root *root) \
+ITSTATIC void ITPREFIX ## _remove(ITSTRUCT *node, \
+ struct rb_root_cached *root) \
{ \
- rb_erase_augmented(&node->ITRB, root, &ITPREFIX ## _augment); \
+ rb_erase_augmented_cached(&node->ITRB, root, &ITPREFIX ## _augment); \
} \
\
/* \
@@ -140,15 +116,35 @@ ITPREFIX ## _subtree_search(ITSTRUCT *node, ITTYPE start, ITTYPE last) \
} \
\
ITSTATIC ITSTRUCT * \
-ITPREFIX ## _iter_first(struct rb_root *root, ITTYPE start, ITTYPE last) \
+ITPREFIX ## _iter_first(struct rb_root_cached *root, \
+ ITTYPE start, ITTYPE last) \
{ \
- ITSTRUCT *node; \
+ ITSTRUCT *node, *leftmost; \
\
- if (!root->rb_node) \
+ if (!root->rb_root.rb_node) \
return NULL; \
- node = rb_entry(root->rb_node, ITSTRUCT, ITRB); \
+ \
+ /* \
+ * Fastpath range intersection/overlap between A: [a0, a1] and \
+ * B: [b0, b1] is given by: \
+ * \
+ * a0 <= b1 && b0 <= a1 \
+ * \
+ * ... where A holds the lock range and B holds the smallest \
+ * 'start' and largest 'last' in the tree. For the later, we \
+ * rely on the root node, which by augmented interval tree \
+ * property, holds the largest value in its last-in-subtree. \
+ * This allows mitigating some of the tree walk overhead for \
+ * for non-intersecting ranges, maintained and consulted in O(1). \
+ */ \
+ node = rb_entry(root->rb_root.rb_node, ITSTRUCT, ITRB); \
if (node->ITSUBTREE < start) \
return NULL; \
+ \
+ leftmost = rb_entry(root->rb_leftmost, ITSTRUCT, ITRB); \
+ if (ITSTART(leftmost) > last) \
+ return NULL; \
+ \
return ITPREFIX ## _subtree_search(node, start, last); \
} \
\
diff --git a/include/linux/io-64-nonatomic-hi-lo.h b/include/linux/io-64-nonatomic-hi-lo.h
index defcc4644ce3..f32522bb3aa5 100644
--- a/include/linux/io-64-nonatomic-hi-lo.h
+++ b/include/linux/io-64-nonatomic-hi-lo.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_IO_64_NONATOMIC_HI_LO_H_
#define _LINUX_IO_64_NONATOMIC_HI_LO_H_
@@ -54,4 +55,68 @@ static inline void hi_lo_writeq_relaxed(__u64 val, volatile void __iomem *addr)
#define writeq_relaxed hi_lo_writeq_relaxed
#endif
+#ifndef ioread64_hi_lo
+#define ioread64_hi_lo ioread64_hi_lo
+static inline u64 ioread64_hi_lo(const void __iomem *addr)
+{
+ u32 low, high;
+
+ high = ioread32(addr + sizeof(u32));
+ low = ioread32(addr);
+
+ return low + ((u64)high << 32);
+}
+#endif
+
+#ifndef iowrite64_hi_lo
+#define iowrite64_hi_lo iowrite64_hi_lo
+static inline void iowrite64_hi_lo(u64 val, void __iomem *addr)
+{
+ iowrite32(val >> 32, addr + sizeof(u32));
+ iowrite32(val, addr);
+}
+#endif
+
+#ifndef ioread64be_hi_lo
+#define ioread64be_hi_lo ioread64be_hi_lo
+static inline u64 ioread64be_hi_lo(const void __iomem *addr)
+{
+ u32 low, high;
+
+ high = ioread32be(addr);
+ low = ioread32be(addr + sizeof(u32));
+
+ return low + ((u64)high << 32);
+}
+#endif
+
+#ifndef iowrite64be_hi_lo
+#define iowrite64be_hi_lo iowrite64be_hi_lo
+static inline void iowrite64be_hi_lo(u64 val, void __iomem *addr)
+{
+ iowrite32be(val >> 32, addr);
+ iowrite32be(val, addr + sizeof(u32));
+}
+#endif
+
+#ifndef ioread64
+#define ioread64_is_nonatomic
+#define ioread64 ioread64_hi_lo
+#endif
+
+#ifndef iowrite64
+#define iowrite64_is_nonatomic
+#define iowrite64 iowrite64_hi_lo
+#endif
+
+#ifndef ioread64be
+#define ioread64be_is_nonatomic
+#define ioread64be ioread64be_hi_lo
+#endif
+
+#ifndef iowrite64be
+#define iowrite64be_is_nonatomic
+#define iowrite64be iowrite64be_hi_lo
+#endif
+
#endif /* _LINUX_IO_64_NONATOMIC_HI_LO_H_ */
diff --git a/include/linux/io-64-nonatomic-lo-hi.h b/include/linux/io-64-nonatomic-lo-hi.h
index 084461a4e5ab..448a21435dba 100644
--- a/include/linux/io-64-nonatomic-lo-hi.h
+++ b/include/linux/io-64-nonatomic-lo-hi.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_IO_64_NONATOMIC_LO_HI_H_
#define _LINUX_IO_64_NONATOMIC_LO_HI_H_
@@ -54,4 +55,68 @@ static inline void lo_hi_writeq_relaxed(__u64 val, volatile void __iomem *addr)
#define writeq_relaxed lo_hi_writeq_relaxed
#endif
+#ifndef ioread64_lo_hi
+#define ioread64_lo_hi ioread64_lo_hi
+static inline u64 ioread64_lo_hi(const void __iomem *addr)
+{
+ u32 low, high;
+
+ low = ioread32(addr);
+ high = ioread32(addr + sizeof(u32));
+
+ return low + ((u64)high << 32);
+}
+#endif
+
+#ifndef iowrite64_lo_hi
+#define iowrite64_lo_hi iowrite64_lo_hi
+static inline void iowrite64_lo_hi(u64 val, void __iomem *addr)
+{
+ iowrite32(val, addr);
+ iowrite32(val >> 32, addr + sizeof(u32));
+}
+#endif
+
+#ifndef ioread64be_lo_hi
+#define ioread64be_lo_hi ioread64be_lo_hi
+static inline u64 ioread64be_lo_hi(const void __iomem *addr)
+{
+ u32 low, high;
+
+ low = ioread32be(addr + sizeof(u32));
+ high = ioread32be(addr);
+
+ return low + ((u64)high << 32);
+}
+#endif
+
+#ifndef iowrite64be_lo_hi
+#define iowrite64be_lo_hi iowrite64be_lo_hi
+static inline void iowrite64be_lo_hi(u64 val, void __iomem *addr)
+{
+ iowrite32be(val, addr + sizeof(u32));
+ iowrite32be(val >> 32, addr);
+}
+#endif
+
+#ifndef ioread64
+#define ioread64_is_nonatomic
+#define ioread64 ioread64_lo_hi
+#endif
+
+#ifndef iowrite64
+#define iowrite64_is_nonatomic
+#define iowrite64 iowrite64_lo_hi
+#endif
+
+#ifndef ioread64be
+#define ioread64be_is_nonatomic
+#define ioread64be ioread64be_lo_hi
+#endif
+
+#ifndef iowrite64be
+#define iowrite64be_is_nonatomic
+#define iowrite64be iowrite64be_lo_hi
+#endif
+
#endif /* _LINUX_IO_64_NONATOMIC_LO_HI_H_ */
diff --git a/include/linux/io-mapping.h b/include/linux/io-mapping.h
index 58df02bd93c9..7376c1df9c90 100644
--- a/include/linux/io-mapping.h
+++ b/include/linux/io-mapping.h
@@ -1,18 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright © 2008 Keith Packard <keithp@keithp.com>
- *
- * This file is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#ifndef _LINUX_IO_MAPPING_H
@@ -22,13 +10,14 @@
#include <linux/slab.h>
#include <linux/bug.h>
#include <linux/io.h>
+#include <linux/pgtable.h>
#include <asm/page.h>
/*
* The io_mapping mechanism provides an abstraction for mapping
* individual pages from an io device to the CPU in an efficient fashion.
*
- * See Documentation/io-mapping.txt
+ * See Documentation/driver-api/io-mapping.rst
*/
struct io_mapping {
@@ -40,6 +29,7 @@ struct io_mapping {
#ifdef CONFIG_HAVE_ATOMIC_IOMAP
+#include <linux/pfn.h>
#include <asm/iomap.h>
/*
* For small address space machines, mapping large objects
@@ -76,18 +66,41 @@ io_mapping_map_atomic_wc(struct io_mapping *mapping,
unsigned long offset)
{
resource_size_t phys_addr;
- unsigned long pfn;
BUG_ON(offset >= mapping->size);
phys_addr = mapping->base + offset;
- pfn = (unsigned long) (phys_addr >> PAGE_SHIFT);
- return iomap_atomic_prot_pfn(pfn, mapping->prot);
+ if (!IS_ENABLED(CONFIG_PREEMPT_RT))
+ preempt_disable();
+ else
+ migrate_disable();
+ pagefault_disable();
+ return __iomap_local_pfn_prot(PHYS_PFN(phys_addr), mapping->prot);
}
static inline void
io_mapping_unmap_atomic(void __iomem *vaddr)
{
- iounmap_atomic(vaddr);
+ kunmap_local_indexed((void __force *)vaddr);
+ pagefault_enable();
+ if (!IS_ENABLED(CONFIG_PREEMPT_RT))
+ preempt_enable();
+ else
+ migrate_enable();
+}
+
+static inline void __iomem *
+io_mapping_map_local_wc(struct io_mapping *mapping, unsigned long offset)
+{
+ resource_size_t phys_addr;
+
+ BUG_ON(offset >= mapping->size);
+ phys_addr = mapping->base + offset;
+ return __iomap_local_pfn_prot(PHYS_PFN(phys_addr), mapping->prot);
+}
+
+static inline void io_mapping_unmap_local(void __iomem *vaddr)
+{
+ kunmap_local_indexed((void __force *)vaddr);
}
static inline void __iomem *
@@ -109,10 +122,9 @@ io_mapping_unmap(void __iomem *vaddr)
iounmap(vaddr);
}
-#else
+#else /* HAVE_ATOMIC_IOMAP */
#include <linux/uaccess.h>
-#include <asm/pgtable.h>
/* Create the io_mapping object*/
static inline struct io_mapping *
@@ -120,16 +132,13 @@ io_mapping_init_wc(struct io_mapping *iomap,
resource_size_t base,
unsigned long size)
{
+ iomap->iomem = ioremap_wc(base, size);
+ if (!iomap->iomem)
+ return NULL;
+
iomap->base = base;
iomap->size = size;
- iomap->iomem = ioremap_wc(base, size);
-#if defined(pgprot_noncached_wc) /* archs can't agree on a name ... */
- iomap->prot = pgprot_noncached_wc(PAGE_KERNEL);
-#elif defined(pgprot_writecombine)
iomap->prot = pgprot_writecombine(PAGE_KERNEL);
-#else
- iomap->prot = pgprot_noncached(PAGE_KERNEL);
-#endif
return iomap;
}
@@ -159,7 +168,10 @@ static inline void __iomem *
io_mapping_map_atomic_wc(struct io_mapping *mapping,
unsigned long offset)
{
- preempt_disable();
+ if (!IS_ENABLED(CONFIG_PREEMPT_RT))
+ preempt_disable();
+ else
+ migrate_disable();
pagefault_disable();
return io_mapping_map_wc(mapping, offset, PAGE_SIZE);
}
@@ -169,10 +181,24 @@ io_mapping_unmap_atomic(void __iomem *vaddr)
{
io_mapping_unmap(vaddr);
pagefault_enable();
- preempt_enable();
+ if (!IS_ENABLED(CONFIG_PREEMPT_RT))
+ preempt_enable();
+ else
+ migrate_enable();
}
-#endif /* HAVE_ATOMIC_IOMAP */
+static inline void __iomem *
+io_mapping_map_local_wc(struct io_mapping *mapping, unsigned long offset)
+{
+ return io_mapping_map_wc(mapping, offset, PAGE_SIZE);
+}
+
+static inline void io_mapping_unmap_local(void __iomem *vaddr)
+{
+ io_mapping_unmap(vaddr);
+}
+
+#endif /* !HAVE_ATOMIC_IOMAP */
static inline struct io_mapping *
io_mapping_create_wc(resource_size_t base,
@@ -199,4 +225,7 @@ io_mapping_free(struct io_mapping *iomap)
kfree(iomap);
}
+int io_mapping_map_user(struct io_mapping *iomap, struct vm_area_struct *vma,
+ unsigned long addr, unsigned long pfn, unsigned long size);
+
#endif /* _LINUX_IO_MAPPING_H */
diff --git a/include/linux/io-pgtable.h b/include/linux/io-pgtable.h
new file mode 100644
index 000000000000..1b7a44b35616
--- /dev/null
+++ b/include/linux/io-pgtable.h
@@ -0,0 +1,262 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __IO_PGTABLE_H
+#define __IO_PGTABLE_H
+
+#include <linux/bitops.h>
+#include <linux/iommu.h>
+
+/*
+ * Public API for use by IOMMU drivers
+ */
+enum io_pgtable_fmt {
+ ARM_32_LPAE_S1,
+ ARM_32_LPAE_S2,
+ ARM_64_LPAE_S1,
+ ARM_64_LPAE_S2,
+ ARM_V7S,
+ ARM_MALI_LPAE,
+ AMD_IOMMU_V1,
+ AMD_IOMMU_V2,
+ APPLE_DART,
+ APPLE_DART2,
+ IO_PGTABLE_NUM_FMTS,
+};
+
+/**
+ * struct iommu_flush_ops - IOMMU callbacks for TLB and page table management.
+ *
+ * @tlb_flush_all: Synchronously invalidate the entire TLB context.
+ * @tlb_flush_walk: Synchronously invalidate all intermediate TLB state
+ * (sometimes referred to as the "walk cache") for a virtual
+ * address range.
+ * @tlb_add_page: Optional callback to queue up leaf TLB invalidation for a
+ * single page. IOMMUs that cannot batch TLB invalidation
+ * operations efficiently will typically issue them here, but
+ * others may decide to update the iommu_iotlb_gather structure
+ * and defer the invalidation until iommu_iotlb_sync() instead.
+ *
+ * Note that these can all be called in atomic context and must therefore
+ * not block.
+ */
+struct iommu_flush_ops {
+ void (*tlb_flush_all)(void *cookie);
+ void (*tlb_flush_walk)(unsigned long iova, size_t size, size_t granule,
+ void *cookie);
+ void (*tlb_add_page)(struct iommu_iotlb_gather *gather,
+ unsigned long iova, size_t granule, void *cookie);
+};
+
+/**
+ * struct io_pgtable_cfg - Configuration data for a set of page tables.
+ *
+ * @quirks: A bitmap of hardware quirks that require some special
+ * action by the low-level page table allocator.
+ * @pgsize_bitmap: A bitmap of page sizes supported by this set of page
+ * tables.
+ * @ias: Input address (iova) size, in bits.
+ * @oas: Output address (paddr) size, in bits.
+ * @coherent_walk A flag to indicate whether or not page table walks made
+ * by the IOMMU are coherent with the CPU caches.
+ * @tlb: TLB management callbacks for this set of tables.
+ * @iommu_dev: The device representing the DMA configuration for the
+ * page table walker.
+ */
+struct io_pgtable_cfg {
+ /*
+ * IO_PGTABLE_QUIRK_ARM_NS: (ARM formats) Set NS and NSTABLE bits in
+ * stage 1 PTEs, for hardware which insists on validating them
+ * even in non-secure state where they should normally be ignored.
+ *
+ * IO_PGTABLE_QUIRK_NO_PERMS: Ignore the IOMMU_READ, IOMMU_WRITE and
+ * IOMMU_NOEXEC flags and map everything with full access, for
+ * hardware which does not implement the permissions of a given
+ * format, and/or requires some format-specific default value.
+ *
+ * IO_PGTABLE_QUIRK_ARM_MTK_EXT: (ARM v7s format) MediaTek IOMMUs extend
+ * to support up to 35 bits PA where the bit32, bit33 and bit34 are
+ * encoded in the bit9, bit4 and bit5 of the PTE respectively.
+ *
+ * IO_PGTABLE_QUIRK_ARM_MTK_TTBR_EXT: (ARM v7s format) MediaTek IOMMUs
+ * extend the translation table base support up to 35 bits PA, the
+ * encoding format is same with IO_PGTABLE_QUIRK_ARM_MTK_EXT.
+ *
+ * IO_PGTABLE_QUIRK_ARM_TTBR1: (ARM LPAE format) Configure the table
+ * for use in the upper half of a split address space.
+ *
+ * IO_PGTABLE_QUIRK_ARM_OUTER_WBWA: Override the outer-cacheability
+ * attributes set in the TCR for a non-coherent page-table walker.
+ */
+ #define IO_PGTABLE_QUIRK_ARM_NS BIT(0)
+ #define IO_PGTABLE_QUIRK_NO_PERMS BIT(1)
+ #define IO_PGTABLE_QUIRK_ARM_MTK_EXT BIT(3)
+ #define IO_PGTABLE_QUIRK_ARM_MTK_TTBR_EXT BIT(4)
+ #define IO_PGTABLE_QUIRK_ARM_TTBR1 BIT(5)
+ #define IO_PGTABLE_QUIRK_ARM_OUTER_WBWA BIT(6)
+ unsigned long quirks;
+ unsigned long pgsize_bitmap;
+ unsigned int ias;
+ unsigned int oas;
+ bool coherent_walk;
+ const struct iommu_flush_ops *tlb;
+ struct device *iommu_dev;
+
+ /* Low-level data specific to the table format */
+ union {
+ struct {
+ u64 ttbr;
+ struct {
+ u32 ips:3;
+ u32 tg:2;
+ u32 sh:2;
+ u32 orgn:2;
+ u32 irgn:2;
+ u32 tsz:6;
+ } tcr;
+ u64 mair;
+ } arm_lpae_s1_cfg;
+
+ struct {
+ u64 vttbr;
+ struct {
+ u32 ps:3;
+ u32 tg:2;
+ u32 sh:2;
+ u32 orgn:2;
+ u32 irgn:2;
+ u32 sl:2;
+ u32 tsz:6;
+ } vtcr;
+ } arm_lpae_s2_cfg;
+
+ struct {
+ u32 ttbr;
+ u32 tcr;
+ u32 nmrr;
+ u32 prrr;
+ } arm_v7s_cfg;
+
+ struct {
+ u64 transtab;
+ u64 memattr;
+ } arm_mali_lpae_cfg;
+
+ struct {
+ u64 ttbr[4];
+ u32 n_ttbrs;
+ } apple_dart_cfg;
+ };
+};
+
+/**
+ * struct io_pgtable_ops - Page table manipulation API for IOMMU drivers.
+ *
+ * @map_pages: Map a physically contiguous range of pages of the same size.
+ * @unmap_pages: Unmap a range of virtually contiguous pages of the same size.
+ * @iova_to_phys: Translate iova to physical address.
+ *
+ * These functions map directly onto the iommu_ops member functions with
+ * the same names.
+ */
+struct io_pgtable_ops {
+ int (*map_pages)(struct io_pgtable_ops *ops, unsigned long iova,
+ phys_addr_t paddr, size_t pgsize, size_t pgcount,
+ int prot, gfp_t gfp, size_t *mapped);
+ size_t (*unmap_pages)(struct io_pgtable_ops *ops, unsigned long iova,
+ size_t pgsize, size_t pgcount,
+ struct iommu_iotlb_gather *gather);
+ phys_addr_t (*iova_to_phys)(struct io_pgtable_ops *ops,
+ unsigned long iova);
+};
+
+/**
+ * alloc_io_pgtable_ops() - Allocate a page table allocator for use by an IOMMU.
+ *
+ * @fmt: The page table format.
+ * @cfg: The page table configuration. This will be modified to represent
+ * the configuration actually provided by the allocator (e.g. the
+ * pgsize_bitmap may be restricted).
+ * @cookie: An opaque token provided by the IOMMU driver and passed back to
+ * the callback routines in cfg->tlb.
+ */
+struct io_pgtable_ops *alloc_io_pgtable_ops(enum io_pgtable_fmt fmt,
+ struct io_pgtable_cfg *cfg,
+ void *cookie);
+
+/**
+ * free_io_pgtable_ops() - Free an io_pgtable_ops structure. The caller
+ * *must* ensure that the page table is no longer
+ * live, but the TLB can be dirty.
+ *
+ * @ops: The ops returned from alloc_io_pgtable_ops.
+ */
+void free_io_pgtable_ops(struct io_pgtable_ops *ops);
+
+
+/*
+ * Internal structures for page table allocator implementations.
+ */
+
+/**
+ * struct io_pgtable - Internal structure describing a set of page tables.
+ *
+ * @fmt: The page table format.
+ * @cookie: An opaque token provided by the IOMMU driver and passed back to
+ * any callback routines.
+ * @cfg: A copy of the page table configuration.
+ * @ops: The page table operations in use for this set of page tables.
+ */
+struct io_pgtable {
+ enum io_pgtable_fmt fmt;
+ void *cookie;
+ struct io_pgtable_cfg cfg;
+ struct io_pgtable_ops ops;
+};
+
+#define io_pgtable_ops_to_pgtable(x) container_of((x), struct io_pgtable, ops)
+
+static inline void io_pgtable_tlb_flush_all(struct io_pgtable *iop)
+{
+ if (iop->cfg.tlb && iop->cfg.tlb->tlb_flush_all)
+ iop->cfg.tlb->tlb_flush_all(iop->cookie);
+}
+
+static inline void
+io_pgtable_tlb_flush_walk(struct io_pgtable *iop, unsigned long iova,
+ size_t size, size_t granule)
+{
+ if (iop->cfg.tlb && iop->cfg.tlb->tlb_flush_walk)
+ iop->cfg.tlb->tlb_flush_walk(iova, size, granule, iop->cookie);
+}
+
+static inline void
+io_pgtable_tlb_add_page(struct io_pgtable *iop,
+ struct iommu_iotlb_gather * gather, unsigned long iova,
+ size_t granule)
+{
+ if (iop->cfg.tlb && iop->cfg.tlb->tlb_add_page)
+ iop->cfg.tlb->tlb_add_page(gather, iova, granule, iop->cookie);
+}
+
+/**
+ * struct io_pgtable_init_fns - Alloc/free a set of page tables for a
+ * particular format.
+ *
+ * @alloc: Allocate a set of page tables described by cfg.
+ * @free: Free the page tables associated with iop.
+ */
+struct io_pgtable_init_fns {
+ struct io_pgtable *(*alloc)(struct io_pgtable_cfg *cfg, void *cookie);
+ void (*free)(struct io_pgtable *iop);
+};
+
+extern struct io_pgtable_init_fns io_pgtable_arm_32_lpae_s1_init_fns;
+extern struct io_pgtable_init_fns io_pgtable_arm_32_lpae_s2_init_fns;
+extern struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s1_init_fns;
+extern struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s2_init_fns;
+extern struct io_pgtable_init_fns io_pgtable_arm_v7s_init_fns;
+extern struct io_pgtable_init_fns io_pgtable_arm_mali_lpae_init_fns;
+extern struct io_pgtable_init_fns io_pgtable_amd_iommu_v1_init_fns;
+extern struct io_pgtable_init_fns io_pgtable_amd_iommu_v2_init_fns;
+extern struct io_pgtable_init_fns io_pgtable_apple_dart_init_fns;
+
+#endif /* __IO_PGTABLE_H */
diff --git a/include/linux/io.h b/include/linux/io.h
index 2195d9ea4aaa..308f4f0cfb93 100644
--- a/include/linux/io.h
+++ b/include/linux/io.h
@@ -1,18 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright 2006 PathScale, Inc. All Rights Reserved.
- *
- * This file is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#ifndef _LINUX_IO_H
@@ -43,14 +31,6 @@ static inline int ioremap_page_range(unsigned long addr, unsigned long end,
}
#endif
-#ifdef CONFIG_HAVE_ARCH_HUGE_VMAP
-void __init ioremap_huge_init(void);
-int arch_ioremap_pud_supported(void);
-int arch_ioremap_pmd_supported(void);
-#else
-static inline void ioremap_huge_init(void) { }
-#endif
-
/*
* Managed iomap interface
*/
@@ -75,7 +55,7 @@ static inline void devm_ioport_unmap(struct device *dev, void __iomem *addr)
void __iomem *devm_ioremap(struct device *dev, resource_size_t offset,
resource_size_t size);
-void __iomem *devm_ioremap_nocache(struct device *dev, resource_size_t offset,
+void __iomem *devm_ioremap_uc(struct device *dev, resource_size_t offset,
resource_size_t size);
void __iomem *devm_ioremap_wc(struct device *dev, resource_size_t offset,
resource_size_t size);
@@ -88,25 +68,23 @@ void *devm_memremap(struct device *dev, resource_size_t offset,
size_t size, unsigned long flags);
void devm_memunmap(struct device *dev, void *addr);
-void *__devm_memremap_pages(struct device *dev, struct resource *res);
-
#ifdef CONFIG_PCI
/*
* The PCI specifications (Rev 3.0, 3.2.5 "Transaction Ordering and
- * Posting") mandate non-posted configuration transactions. There is
- * no ioremap API in the kernel that can guarantee non-posted write
- * semantics across arches so provide a default implementation for
- * mapping PCI config space that defaults to ioremap_nocache(); arches
- * should override it if they have memory mapping implementations that
- * guarantee non-posted writes semantics to make the memory mapping
- * compliant with the PCI specification.
+ * Posting") mandate non-posted configuration transactions. This default
+ * implementation attempts to use the ioremap_np() API to provide this
+ * on arches that support it, and falls back to ioremap() on those that
+ * don't. Overriding this function is deprecated; arches that properly
+ * support non-posted accesses should implement ioremap_np() instead, which
+ * this default implementation can then use to return mappings compliant with
+ * the PCI specification.
*/
#ifndef pci_remap_cfgspace
#define pci_remap_cfgspace pci_remap_cfgspace
static inline void __iomem *pci_remap_cfgspace(phys_addr_t offset,
size_t size)
{
- return ioremap_nocache(offset, size);
+ return ioremap_np(offset, size) ?: ioremap(offset, size);
}
#endif
#endif
@@ -152,11 +130,15 @@ static inline int arch_phys_wc_index(int handle)
#endif
#endif
+int devm_arch_phys_wc_add(struct device *dev, unsigned long base, unsigned long size);
+
enum {
/* See memremap() kernel-doc for usage description... */
MEMREMAP_WB = 1 << 0,
MEMREMAP_WT = 1 << 1,
MEMREMAP_WC = 1 << 2,
+ MEMREMAP_ENC = 1 << 3,
+ MEMREMAP_DEC = 1 << 4,
};
void *memremap(resource_size_t offset, size_t size, unsigned long flags);
@@ -184,4 +166,7 @@ static inline void arch_io_free_memtype_wc(resource_size_t base,
}
#endif
+int devm_arch_io_reserve_memtype_wc(struct device *dev, resource_size_t start,
+ resource_size_t size);
+
#endif /* _LINUX_IO_H */
diff --git a/include/linux/io_uring.h b/include/linux/io_uring.h
new file mode 100644
index 000000000000..7fe31b2cd02f
--- /dev/null
+++ b/include/linux/io_uring.h
@@ -0,0 +1,107 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+#ifndef _LINUX_IO_URING_H
+#define _LINUX_IO_URING_H
+
+#include <linux/sched.h>
+#include <linux/xarray.h>
+#include <uapi/linux/io_uring.h>
+
+enum io_uring_cmd_flags {
+ IO_URING_F_COMPLETE_DEFER = 1,
+ IO_URING_F_UNLOCKED = 2,
+ /* the request is executed from poll, it should not be freed */
+ IO_URING_F_MULTISHOT = 4,
+ /* executed by io-wq */
+ IO_URING_F_IOWQ = 8,
+ /* int's last bit, sign checks are usually faster than a bit test */
+ IO_URING_F_NONBLOCK = INT_MIN,
+
+ /* ctx state flags, for URING_CMD */
+ IO_URING_F_SQE128 = (1 << 8),
+ IO_URING_F_CQE32 = (1 << 9),
+ IO_URING_F_IOPOLL = (1 << 10),
+};
+
+struct io_uring_cmd {
+ struct file *file;
+ const struct io_uring_sqe *sqe;
+ union {
+ /* callback to defer completions to task context */
+ void (*task_work_cb)(struct io_uring_cmd *cmd, unsigned);
+ /* used for polled completion */
+ void *cookie;
+ };
+ u32 cmd_op;
+ u32 flags;
+ u8 pdu[32]; /* available inline for free use */
+};
+
+static inline const void *io_uring_sqe_cmd(const struct io_uring_sqe *sqe)
+{
+ return sqe->cmd;
+}
+
+#if defined(CONFIG_IO_URING)
+int io_uring_cmd_import_fixed(u64 ubuf, unsigned long len, int rw,
+ struct iov_iter *iter, void *ioucmd);
+void io_uring_cmd_done(struct io_uring_cmd *cmd, ssize_t ret, ssize_t res2,
+ unsigned issue_flags);
+void io_uring_cmd_complete_in_task(struct io_uring_cmd *ioucmd,
+ void (*task_work_cb)(struct io_uring_cmd *, unsigned));
+struct sock *io_uring_get_socket(struct file *file);
+void __io_uring_cancel(bool cancel_all);
+void __io_uring_free(struct task_struct *tsk);
+void io_uring_unreg_ringfd(void);
+const char *io_uring_get_opcode(u8 opcode);
+
+static inline void io_uring_files_cancel(void)
+{
+ if (current->io_uring) {
+ io_uring_unreg_ringfd();
+ __io_uring_cancel(false);
+ }
+}
+static inline void io_uring_task_cancel(void)
+{
+ if (current->io_uring)
+ __io_uring_cancel(true);
+}
+static inline void io_uring_free(struct task_struct *tsk)
+{
+ if (tsk->io_uring)
+ __io_uring_free(tsk);
+}
+#else
+static inline int io_uring_cmd_import_fixed(u64 ubuf, unsigned long len, int rw,
+ struct iov_iter *iter, void *ioucmd)
+{
+ return -EOPNOTSUPP;
+}
+static inline void io_uring_cmd_done(struct io_uring_cmd *cmd, ssize_t ret,
+ ssize_t ret2, unsigned issue_flags)
+{
+}
+static inline void io_uring_cmd_complete_in_task(struct io_uring_cmd *ioucmd,
+ void (*task_work_cb)(struct io_uring_cmd *, unsigned))
+{
+}
+static inline struct sock *io_uring_get_socket(struct file *file)
+{
+ return NULL;
+}
+static inline void io_uring_task_cancel(void)
+{
+}
+static inline void io_uring_files_cancel(void)
+{
+}
+static inline void io_uring_free(struct task_struct *tsk)
+{
+}
+static inline const char *io_uring_get_opcode(u8 opcode)
+{
+ return "";
+}
+#endif
+
+#endif
diff --git a/include/linux/io_uring_types.h b/include/linux/io_uring_types.h
new file mode 100644
index 000000000000..1b2a20a42413
--- /dev/null
+++ b/include/linux/io_uring_types.h
@@ -0,0 +1,595 @@
+#ifndef IO_URING_TYPES_H
+#define IO_URING_TYPES_H
+
+#include <linux/blkdev.h>
+#include <linux/task_work.h>
+#include <linux/bitmap.h>
+#include <linux/llist.h>
+#include <uapi/linux/io_uring.h>
+
+struct io_wq_work_node {
+ struct io_wq_work_node *next;
+};
+
+struct io_wq_work_list {
+ struct io_wq_work_node *first;
+ struct io_wq_work_node *last;
+};
+
+struct io_wq_work {
+ struct io_wq_work_node list;
+ unsigned flags;
+ /* place it here instead of io_kiocb as it fills padding and saves 4B */
+ int cancel_seq;
+};
+
+struct io_fixed_file {
+ /* file * with additional FFS_* flags */
+ unsigned long file_ptr;
+};
+
+struct io_file_table {
+ struct io_fixed_file *files;
+ unsigned long *bitmap;
+ unsigned int alloc_hint;
+};
+
+struct io_hash_bucket {
+ spinlock_t lock;
+ struct hlist_head list;
+} ____cacheline_aligned_in_smp;
+
+struct io_hash_table {
+ struct io_hash_bucket *hbs;
+ unsigned hash_bits;
+};
+
+/*
+ * Arbitrary limit, can be raised if need be
+ */
+#define IO_RINGFD_REG_MAX 16
+
+struct io_uring_task {
+ /* submission side */
+ int cached_refs;
+ const struct io_ring_ctx *last;
+ struct io_wq *io_wq;
+ struct file *registered_rings[IO_RINGFD_REG_MAX];
+
+ struct xarray xa;
+ struct wait_queue_head wait;
+ atomic_t in_cancel;
+ atomic_t inflight_tracked;
+ struct percpu_counter inflight;
+
+ struct { /* task_work */
+ struct llist_head task_list;
+ struct callback_head task_work;
+ } ____cacheline_aligned_in_smp;
+};
+
+struct io_uring {
+ u32 head ____cacheline_aligned_in_smp;
+ u32 tail ____cacheline_aligned_in_smp;
+};
+
+/*
+ * This data is shared with the application through the mmap at offsets
+ * IORING_OFF_SQ_RING and IORING_OFF_CQ_RING.
+ *
+ * The offsets to the member fields are published through struct
+ * io_sqring_offsets when calling io_uring_setup.
+ */
+struct io_rings {
+ /*
+ * Head and tail offsets into the ring; the offsets need to be
+ * masked to get valid indices.
+ *
+ * The kernel controls head of the sq ring and the tail of the cq ring,
+ * and the application controls tail of the sq ring and the head of the
+ * cq ring.
+ */
+ struct io_uring sq, cq;
+ /*
+ * Bitmasks to apply to head and tail offsets (constant, equals
+ * ring_entries - 1)
+ */
+ u32 sq_ring_mask, cq_ring_mask;
+ /* Ring sizes (constant, power of 2) */
+ u32 sq_ring_entries, cq_ring_entries;
+ /*
+ * Number of invalid entries dropped by the kernel due to
+ * invalid index stored in array
+ *
+ * Written by the kernel, shouldn't be modified by the
+ * application (i.e. get number of "new events" by comparing to
+ * cached value).
+ *
+ * After a new SQ head value was read by the application this
+ * counter includes all submissions that were dropped reaching
+ * the new SQ head (and possibly more).
+ */
+ u32 sq_dropped;
+ /*
+ * Runtime SQ flags
+ *
+ * Written by the kernel, shouldn't be modified by the
+ * application.
+ *
+ * The application needs a full memory barrier before checking
+ * for IORING_SQ_NEED_WAKEUP after updating the sq tail.
+ */
+ atomic_t sq_flags;
+ /*
+ * Runtime CQ flags
+ *
+ * Written by the application, shouldn't be modified by the
+ * kernel.
+ */
+ u32 cq_flags;
+ /*
+ * Number of completion events lost because the queue was full;
+ * this should be avoided by the application by making sure
+ * there are not more requests pending than there is space in
+ * the completion queue.
+ *
+ * Written by the kernel, shouldn't be modified by the
+ * application (i.e. get number of "new events" by comparing to
+ * cached value).
+ *
+ * As completion events come in out of order this counter is not
+ * ordered with any other data.
+ */
+ u32 cq_overflow;
+ /*
+ * Ring buffer of completion events.
+ *
+ * The kernel writes completion events fresh every time they are
+ * produced, so the application is allowed to modify pending
+ * entries.
+ */
+ struct io_uring_cqe cqes[] ____cacheline_aligned_in_smp;
+};
+
+struct io_restriction {
+ DECLARE_BITMAP(register_op, IORING_REGISTER_LAST);
+ DECLARE_BITMAP(sqe_op, IORING_OP_LAST);
+ u8 sqe_flags_allowed;
+ u8 sqe_flags_required;
+ bool registered;
+};
+
+struct io_submit_link {
+ struct io_kiocb *head;
+ struct io_kiocb *last;
+};
+
+struct io_submit_state {
+ /* inline/task_work completion list, under ->uring_lock */
+ struct io_wq_work_node free_list;
+ /* batch completion logic */
+ struct io_wq_work_list compl_reqs;
+ struct io_submit_link link;
+
+ bool plug_started;
+ bool need_plug;
+ unsigned short submit_nr;
+ unsigned int cqes_count;
+ struct blk_plug plug;
+ struct io_uring_cqe cqes[16];
+};
+
+struct io_ev_fd {
+ struct eventfd_ctx *cq_ev_fd;
+ unsigned int eventfd_async: 1;
+ struct rcu_head rcu;
+ atomic_t refs;
+ atomic_t ops;
+};
+
+struct io_alloc_cache {
+ struct io_wq_work_node list;
+ unsigned int nr_cached;
+ unsigned int max_cached;
+ size_t elem_size;
+};
+
+struct io_ring_ctx {
+ /* const or read-mostly hot data */
+ struct {
+ unsigned int flags;
+ unsigned int drain_next: 1;
+ unsigned int restricted: 1;
+ unsigned int off_timeout_used: 1;
+ unsigned int drain_active: 1;
+ unsigned int has_evfd: 1;
+ /* all CQEs should be posted only by the submitter task */
+ unsigned int task_complete: 1;
+ unsigned int syscall_iopoll: 1;
+ unsigned int poll_activated: 1;
+ unsigned int drain_disabled: 1;
+ unsigned int compat: 1;
+
+ enum task_work_notify_mode notify_method;
+ struct io_rings *rings;
+ struct task_struct *submitter_task;
+ struct percpu_ref refs;
+ } ____cacheline_aligned_in_smp;
+
+ /* submission data */
+ struct {
+ struct mutex uring_lock;
+
+ /*
+ * Ring buffer of indices into array of io_uring_sqe, which is
+ * mmapped by the application using the IORING_OFF_SQES offset.
+ *
+ * This indirection could e.g. be used to assign fixed
+ * io_uring_sqe entries to operations and only submit them to
+ * the queue when needed.
+ *
+ * The kernel modifies neither the indices array nor the entries
+ * array.
+ */
+ u32 *sq_array;
+ struct io_uring_sqe *sq_sqes;
+ unsigned cached_sq_head;
+ unsigned sq_entries;
+
+ /*
+ * Fixed resources fast path, should be accessed only under
+ * uring_lock, and updated through io_uring_register(2)
+ */
+ struct io_rsrc_node *rsrc_node;
+ atomic_t cancel_seq;
+ struct io_file_table file_table;
+ unsigned nr_user_files;
+ unsigned nr_user_bufs;
+ struct io_mapped_ubuf **user_bufs;
+
+ struct io_submit_state submit_state;
+
+ struct io_buffer_list *io_bl;
+ struct xarray io_bl_xa;
+ struct list_head io_buffers_cache;
+
+ struct io_hash_table cancel_table_locked;
+ struct list_head cq_overflow_list;
+ struct io_alloc_cache apoll_cache;
+ struct io_alloc_cache netmsg_cache;
+ } ____cacheline_aligned_in_smp;
+
+ /* IRQ completion list, under ->completion_lock */
+ struct io_wq_work_list locked_free_list;
+ unsigned int locked_free_nr;
+
+ const struct cred *sq_creds; /* cred used for __io_sq_thread() */
+ struct io_sq_data *sq_data; /* if using sq thread polling */
+
+ struct wait_queue_head sqo_sq_wait;
+ struct list_head sqd_list;
+
+ unsigned long check_cq;
+
+ unsigned int file_alloc_start;
+ unsigned int file_alloc_end;
+
+ struct xarray personalities;
+ u32 pers_next;
+
+ struct {
+ /*
+ * We cache a range of free CQEs we can use, once exhausted it
+ * should go through a slower range setup, see __io_get_cqe()
+ */
+ struct io_uring_cqe *cqe_cached;
+ struct io_uring_cqe *cqe_sentinel;
+
+ unsigned cached_cq_tail;
+ unsigned cq_entries;
+ struct io_ev_fd __rcu *io_ev_fd;
+ struct wait_queue_head cq_wait;
+ unsigned cq_extra;
+ } ____cacheline_aligned_in_smp;
+
+ struct {
+ spinlock_t completion_lock;
+
+ bool poll_multi_queue;
+ atomic_t cq_wait_nr;
+
+ /*
+ * ->iopoll_list is protected by the ctx->uring_lock for
+ * io_uring instances that don't use IORING_SETUP_SQPOLL.
+ * For SQPOLL, only the single threaded io_sq_thread() will
+ * manipulate the list, hence no extra locking is needed there.
+ */
+ struct io_wq_work_list iopoll_list;
+ struct io_hash_table cancel_table;
+
+ struct llist_head work_llist;
+
+ struct list_head io_buffers_comp;
+ } ____cacheline_aligned_in_smp;
+
+ /* timeouts */
+ struct {
+ spinlock_t timeout_lock;
+ atomic_t cq_timeouts;
+ struct list_head timeout_list;
+ struct list_head ltimeout_list;
+ unsigned cq_last_tm_flush;
+ } ____cacheline_aligned_in_smp;
+
+ /* Keep this last, we don't need it for the fast path */
+ struct wait_queue_head poll_wq;
+ struct io_restriction restrictions;
+
+ /* slow path rsrc auxilary data, used by update/register */
+ struct io_mapped_ubuf *dummy_ubuf;
+ struct io_rsrc_data *file_data;
+ struct io_rsrc_data *buf_data;
+
+ /* protected by ->uring_lock */
+ struct list_head rsrc_ref_list;
+ struct io_alloc_cache rsrc_node_cache;
+ struct wait_queue_head rsrc_quiesce_wq;
+ unsigned rsrc_quiesce;
+
+ struct list_head io_buffers_pages;
+
+ #if defined(CONFIG_UNIX)
+ struct socket *ring_sock;
+ #endif
+ /* hashed buffered write serialization */
+ struct io_wq_hash *hash_map;
+
+ /* Only used for accounting purposes */
+ struct user_struct *user;
+ struct mm_struct *mm_account;
+
+ /* ctx exit and cancelation */
+ struct llist_head fallback_llist;
+ struct delayed_work fallback_work;
+ struct work_struct exit_work;
+ struct list_head tctx_list;
+ struct completion ref_comp;
+
+ /* io-wq management, e.g. thread count */
+ u32 iowq_limits[2];
+ bool iowq_limits_set;
+
+ struct callback_head poll_wq_task_work;
+ struct list_head defer_list;
+ unsigned sq_thread_idle;
+ /* protected by ->completion_lock */
+ unsigned evfd_last_cq_tail;
+};
+
+struct io_tw_state {
+ /* ->uring_lock is taken, callbacks can use io_tw_lock to lock it */
+ bool locked;
+};
+
+enum {
+ REQ_F_FIXED_FILE_BIT = IOSQE_FIXED_FILE_BIT,
+ REQ_F_IO_DRAIN_BIT = IOSQE_IO_DRAIN_BIT,
+ REQ_F_LINK_BIT = IOSQE_IO_LINK_BIT,
+ REQ_F_HARDLINK_BIT = IOSQE_IO_HARDLINK_BIT,
+ REQ_F_FORCE_ASYNC_BIT = IOSQE_ASYNC_BIT,
+ REQ_F_BUFFER_SELECT_BIT = IOSQE_BUFFER_SELECT_BIT,
+ REQ_F_CQE_SKIP_BIT = IOSQE_CQE_SKIP_SUCCESS_BIT,
+
+ /* first byte is taken by user flags, shift it to not overlap */
+ REQ_F_FAIL_BIT = 8,
+ REQ_F_INFLIGHT_BIT,
+ REQ_F_CUR_POS_BIT,
+ REQ_F_NOWAIT_BIT,
+ REQ_F_LINK_TIMEOUT_BIT,
+ REQ_F_NEED_CLEANUP_BIT,
+ REQ_F_POLLED_BIT,
+ REQ_F_BUFFER_SELECTED_BIT,
+ REQ_F_BUFFER_RING_BIT,
+ REQ_F_REISSUE_BIT,
+ REQ_F_CREDS_BIT,
+ REQ_F_REFCOUNT_BIT,
+ REQ_F_ARM_LTIMEOUT_BIT,
+ REQ_F_ASYNC_DATA_BIT,
+ REQ_F_SKIP_LINK_CQES_BIT,
+ REQ_F_SINGLE_POLL_BIT,
+ REQ_F_DOUBLE_POLL_BIT,
+ REQ_F_PARTIAL_IO_BIT,
+ REQ_F_CQE32_INIT_BIT,
+ REQ_F_APOLL_MULTISHOT_BIT,
+ REQ_F_CLEAR_POLLIN_BIT,
+ REQ_F_HASH_LOCKED_BIT,
+ /* keep async read/write and isreg together and in order */
+ REQ_F_SUPPORT_NOWAIT_BIT,
+ REQ_F_ISREG_BIT,
+
+ /* not a real bit, just to check we're not overflowing the space */
+ __REQ_F_LAST_BIT,
+};
+
+enum {
+ /* ctx owns file */
+ REQ_F_FIXED_FILE = BIT(REQ_F_FIXED_FILE_BIT),
+ /* drain existing IO first */
+ REQ_F_IO_DRAIN = BIT(REQ_F_IO_DRAIN_BIT),
+ /* linked sqes */
+ REQ_F_LINK = BIT(REQ_F_LINK_BIT),
+ /* doesn't sever on completion < 0 */
+ REQ_F_HARDLINK = BIT(REQ_F_HARDLINK_BIT),
+ /* IOSQE_ASYNC */
+ REQ_F_FORCE_ASYNC = BIT(REQ_F_FORCE_ASYNC_BIT),
+ /* IOSQE_BUFFER_SELECT */
+ REQ_F_BUFFER_SELECT = BIT(REQ_F_BUFFER_SELECT_BIT),
+ /* IOSQE_CQE_SKIP_SUCCESS */
+ REQ_F_CQE_SKIP = BIT(REQ_F_CQE_SKIP_BIT),
+
+ /* fail rest of links */
+ REQ_F_FAIL = BIT(REQ_F_FAIL_BIT),
+ /* on inflight list, should be cancelled and waited on exit reliably */
+ REQ_F_INFLIGHT = BIT(REQ_F_INFLIGHT_BIT),
+ /* read/write uses file position */
+ REQ_F_CUR_POS = BIT(REQ_F_CUR_POS_BIT),
+ /* must not punt to workers */
+ REQ_F_NOWAIT = BIT(REQ_F_NOWAIT_BIT),
+ /* has or had linked timeout */
+ REQ_F_LINK_TIMEOUT = BIT(REQ_F_LINK_TIMEOUT_BIT),
+ /* needs cleanup */
+ REQ_F_NEED_CLEANUP = BIT(REQ_F_NEED_CLEANUP_BIT),
+ /* already went through poll handler */
+ REQ_F_POLLED = BIT(REQ_F_POLLED_BIT),
+ /* buffer already selected */
+ REQ_F_BUFFER_SELECTED = BIT(REQ_F_BUFFER_SELECTED_BIT),
+ /* buffer selected from ring, needs commit */
+ REQ_F_BUFFER_RING = BIT(REQ_F_BUFFER_RING_BIT),
+ /* caller should reissue async */
+ REQ_F_REISSUE = BIT(REQ_F_REISSUE_BIT),
+ /* supports async reads/writes */
+ REQ_F_SUPPORT_NOWAIT = BIT(REQ_F_SUPPORT_NOWAIT_BIT),
+ /* regular file */
+ REQ_F_ISREG = BIT(REQ_F_ISREG_BIT),
+ /* has creds assigned */
+ REQ_F_CREDS = BIT(REQ_F_CREDS_BIT),
+ /* skip refcounting if not set */
+ REQ_F_REFCOUNT = BIT(REQ_F_REFCOUNT_BIT),
+ /* there is a linked timeout that has to be armed */
+ REQ_F_ARM_LTIMEOUT = BIT(REQ_F_ARM_LTIMEOUT_BIT),
+ /* ->async_data allocated */
+ REQ_F_ASYNC_DATA = BIT(REQ_F_ASYNC_DATA_BIT),
+ /* don't post CQEs while failing linked requests */
+ REQ_F_SKIP_LINK_CQES = BIT(REQ_F_SKIP_LINK_CQES_BIT),
+ /* single poll may be active */
+ REQ_F_SINGLE_POLL = BIT(REQ_F_SINGLE_POLL_BIT),
+ /* double poll may active */
+ REQ_F_DOUBLE_POLL = BIT(REQ_F_DOUBLE_POLL_BIT),
+ /* request has already done partial IO */
+ REQ_F_PARTIAL_IO = BIT(REQ_F_PARTIAL_IO_BIT),
+ /* fast poll multishot mode */
+ REQ_F_APOLL_MULTISHOT = BIT(REQ_F_APOLL_MULTISHOT_BIT),
+ /* ->extra1 and ->extra2 are initialised */
+ REQ_F_CQE32_INIT = BIT(REQ_F_CQE32_INIT_BIT),
+ /* recvmsg special flag, clear EPOLLIN */
+ REQ_F_CLEAR_POLLIN = BIT(REQ_F_CLEAR_POLLIN_BIT),
+ /* hashed into ->cancel_hash_locked, protected by ->uring_lock */
+ REQ_F_HASH_LOCKED = BIT(REQ_F_HASH_LOCKED_BIT),
+};
+
+typedef void (*io_req_tw_func_t)(struct io_kiocb *req, struct io_tw_state *ts);
+
+struct io_task_work {
+ struct llist_node node;
+ io_req_tw_func_t func;
+};
+
+struct io_cqe {
+ __u64 user_data;
+ __s32 res;
+ /* fd initially, then cflags for completion */
+ union {
+ __u32 flags;
+ int fd;
+ };
+};
+
+/*
+ * Each request type overlays its private data structure on top of this one.
+ * They must not exceed this one in size.
+ */
+struct io_cmd_data {
+ struct file *file;
+ /* each command gets 56 bytes of data */
+ __u8 data[56];
+};
+
+static inline void io_kiocb_cmd_sz_check(size_t cmd_sz)
+{
+ BUILD_BUG_ON(cmd_sz > sizeof(struct io_cmd_data));
+}
+#define io_kiocb_to_cmd(req, cmd_type) ( \
+ io_kiocb_cmd_sz_check(sizeof(cmd_type)) , \
+ ((cmd_type *)&(req)->cmd) \
+)
+#define cmd_to_io_kiocb(ptr) ((struct io_kiocb *) ptr)
+
+struct io_kiocb {
+ union {
+ /*
+ * NOTE! Each of the io_kiocb union members has the file pointer
+ * as the first entry in their struct definition. So you can
+ * access the file pointer through any of the sub-structs,
+ * or directly as just 'file' in this struct.
+ */
+ struct file *file;
+ struct io_cmd_data cmd;
+ };
+
+ u8 opcode;
+ /* polled IO has completed */
+ u8 iopoll_completed;
+ /*
+ * Can be either a fixed buffer index, or used with provided buffers.
+ * For the latter, before issue it points to the buffer group ID,
+ * and after selection it points to the buffer ID itself.
+ */
+ u16 buf_index;
+ unsigned int flags;
+
+ struct io_cqe cqe;
+
+ struct io_ring_ctx *ctx;
+ struct task_struct *task;
+
+ struct io_rsrc_node *rsrc_node;
+
+ union {
+ /* store used ubuf, so we can prevent reloading */
+ struct io_mapped_ubuf *imu;
+
+ /* stores selected buf, valid IFF REQ_F_BUFFER_SELECTED is set */
+ struct io_buffer *kbuf;
+
+ /*
+ * stores buffer ID for ring provided buffers, valid IFF
+ * REQ_F_BUFFER_RING is set.
+ */
+ struct io_buffer_list *buf_list;
+ };
+
+ union {
+ /* used by request caches, completion batching and iopoll */
+ struct io_wq_work_node comp_list;
+ /* cache ->apoll->events */
+ __poll_t apoll_events;
+ };
+ atomic_t refs;
+ atomic_t poll_refs;
+ struct io_task_work io_task_work;
+ unsigned nr_tw;
+ /* for polled requests, i.e. IORING_OP_POLL_ADD and async armed poll */
+ union {
+ struct hlist_node hash_node;
+ struct {
+ u64 extra1;
+ u64 extra2;
+ };
+ };
+ /* internal polling, see IORING_FEAT_FAST_POLL */
+ struct async_poll *apoll;
+ /* opcode allocated if it needs to store data for async defer */
+ void *async_data;
+ /* linked requests, IFF REQ_F_HARDLINK or REQ_F_LINK are set */
+ struct io_kiocb *link;
+ /* custom credentials, valid IFF REQ_F_CREDS is set */
+ const struct cred *creds;
+ struct io_wq_work work;
+};
+
+struct io_overflow_cqe {
+ struct list_head list;
+ struct io_uring_cqe cqe;
+};
+
+#endif
diff --git a/include/linux/ioam6.h b/include/linux/ioam6.h
new file mode 100644
index 000000000000..94a24b36998f
--- /dev/null
+++ b/include/linux/ioam6.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * IPv6 IOAM
+ *
+ * Author:
+ * Justin Iurman <justin.iurman@uliege.be>
+ */
+#ifndef _LINUX_IOAM6_H
+#define _LINUX_IOAM6_H
+
+#include <uapi/linux/ioam6.h>
+
+#endif /* _LINUX_IOAM6_H */
diff --git a/include/linux/ioam6_genl.h b/include/linux/ioam6_genl.h
new file mode 100644
index 000000000000..176e67919de3
--- /dev/null
+++ b/include/linux/ioam6_genl.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * IPv6 IOAM Generic Netlink API
+ *
+ * Author:
+ * Justin Iurman <justin.iurman@uliege.be>
+ */
+#ifndef _LINUX_IOAM6_GENL_H
+#define _LINUX_IOAM6_GENL_H
+
+#include <uapi/linux/ioam6_genl.h>
+
+#endif /* _LINUX_IOAM6_GENL_H */
diff --git a/include/linux/ioam6_iptunnel.h b/include/linux/ioam6_iptunnel.h
new file mode 100644
index 000000000000..07d9dfedd29d
--- /dev/null
+++ b/include/linux/ioam6_iptunnel.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * IPv6 IOAM Lightweight Tunnel API
+ *
+ * Author:
+ * Justin Iurman <justin.iurman@uliege.be>
+ */
+#ifndef _LINUX_IOAM6_IPTUNNEL_H
+#define _LINUX_IOAM6_IPTUNNEL_H
+
+#include <uapi/linux/ioam6_iptunnel.h>
+
+#endif /* _LINUX_IOAM6_IPTUNNEL_H */
diff --git a/include/linux/ioc3.h b/include/linux/ioc3.h
deleted file mode 100644
index 38b286e9a46c..000000000000
--- a/include/linux/ioc3.h
+++ /dev/null
@@ -1,93 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (c) 2005 Stanislaw Skowronek <skylark@linux-mips.org>
- */
-
-#ifndef _LINUX_IOC3_H
-#define _LINUX_IOC3_H
-
-#include <asm/sn/ioc3.h>
-
-#define IOC3_MAX_SUBMODULES 32
-
-#define IOC3_CLASS_NONE 0
-#define IOC3_CLASS_BASE_IP27 1
-#define IOC3_CLASS_BASE_IP30 2
-#define IOC3_CLASS_MENET_123 3
-#define IOC3_CLASS_MENET_4 4
-#define IOC3_CLASS_CADDUO 5
-#define IOC3_CLASS_SERIAL 6
-
-/* One of these per IOC3 */
-struct ioc3_driver_data {
- struct list_head list;
- int id; /* IOC3 sequence number */
- /* PCI mapping */
- unsigned long pma; /* physical address */
- struct ioc3 __iomem *vma; /* pointer to registers */
- struct pci_dev *pdev; /* PCI device */
- /* IRQ stuff */
- int dual_irq; /* set if separate IRQs are used */
- int irq_io, irq_eth; /* IRQ numbers */
- /* GPIO magic */
- spinlock_t gpio_lock;
- unsigned int gpdr_shadow;
- /* NIC identifiers */
- char nic_part[32];
- char nic_serial[16];
- char nic_mac[6];
- /* submodule set */
- int class;
- void *data[IOC3_MAX_SUBMODULES]; /* for submodule use */
- int active[IOC3_MAX_SUBMODULES]; /* set if probe succeeds */
- /* is_ir_lock must be held while
- * modifying sio_ie values, so
- * we can be sure that sio_ie is
- * not changing when we read it
- * along with sio_ir.
- */
- spinlock_t ir_lock; /* SIO_IE[SC] mod lock */
-};
-
-/* One per submodule */
-struct ioc3_submodule {
- char *name; /* descriptive submodule name */
- struct module *owner; /* owning kernel module */
- int ethernet; /* set for ethernet drivers */
- int (*probe) (struct ioc3_submodule *, struct ioc3_driver_data *);
- int (*remove) (struct ioc3_submodule *, struct ioc3_driver_data *);
- int id; /* assigned by IOC3, index for the "data" array */
- /* IRQ stuff */
- unsigned int irq_mask; /* IOC3 IRQ mask, leave clear for Ethernet */
- int reset_mask; /* non-zero if you want the ioc3.c module to reset interrupts */
- int (*intr) (struct ioc3_submodule *, struct ioc3_driver_data *, unsigned int);
- /* private submodule data */
- void *data; /* assigned by submodule */
-};
-
-/**********************************
- * Functions needed by submodules *
- **********************************/
-
-#define IOC3_W_IES 0
-#define IOC3_W_IEC 1
-
-/* registers a submodule for all existing and future IOC3 chips */
-extern int ioc3_register_submodule(struct ioc3_submodule *);
-/* unregisters a submodule */
-extern void ioc3_unregister_submodule(struct ioc3_submodule *);
-/* enables IRQs indicated by irq_mask for a specified IOC3 chip */
-extern void ioc3_enable(struct ioc3_submodule *, struct ioc3_driver_data *, unsigned int);
-/* ackowledges specified IRQs */
-extern void ioc3_ack(struct ioc3_submodule *, struct ioc3_driver_data *, unsigned int);
-/* disables IRQs indicated by irq_mask for a specified IOC3 chip */
-extern void ioc3_disable(struct ioc3_submodule *, struct ioc3_driver_data *, unsigned int);
-/* atomically sets GPCR bits */
-extern void ioc3_gpcr_set(struct ioc3_driver_data *, unsigned int);
-/* general ireg writer */
-extern void ioc3_write_ireg(struct ioc3_driver_data *idd, uint32_t value, int reg);
-
-#endif
diff --git a/include/linux/ioc4.h b/include/linux/ioc4.h
deleted file mode 100644
index 51e2b9fb6372..000000000000
--- a/include/linux/ioc4.h
+++ /dev/null
@@ -1,184 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (c) 2005 Silicon Graphics, Inc. All Rights Reserved.
- */
-
-#ifndef _LINUX_IOC4_H
-#define _LINUX_IOC4_H
-
-#include <linux/interrupt.h>
-
-/***************
- * Definitions *
- ***************/
-
-/* Miscellaneous values inherent to hardware */
-
-#define IOC4_EXTINT_COUNT_DIVISOR 520 /* PCI clocks per COUNT tick */
-
-/***********************************
- * Structures needed by subdrivers *
- ***********************************/
-
-/* This structure fully describes the IOC4 miscellaneous registers which
- * appear at bar[0]+0x00000 through bar[0]+0x0005c. The corresponding
- * PCI resource is managed by the main IOC4 driver because it contains
- * registers of interest to many different IOC4 subdrivers.
- */
-struct ioc4_misc_regs {
- /* Miscellaneous IOC4 registers */
- union ioc4_pci_err_addr_l {
- uint32_t raw;
- struct {
- uint32_t valid:1; /* Address captured */
- uint32_t master_id:4; /* Unit causing error
- * 0/1: Serial port 0 TX/RX
- * 2/3: Serial port 1 TX/RX
- * 4/5: Serial port 2 TX/RX
- * 6/7: Serial port 3 TX/RX
- * 8: ATA/ATAPI
- * 9-15: Undefined
- */
- uint32_t mul_err:1; /* Multiple errors occurred */
- uint32_t addr:26; /* Bits 31-6 of error addr */
- } fields;
- } pci_err_addr_l;
- uint32_t pci_err_addr_h; /* Bits 63-32 of error addr */
- union ioc4_sio_int {
- uint32_t raw;
- struct {
- uint8_t tx_mt:1; /* TX ring buffer empty */
- uint8_t rx_full:1; /* RX ring buffer full */
- uint8_t rx_high:1; /* RX high-water exceeded */
- uint8_t rx_timer:1; /* RX timer has triggered */
- uint8_t delta_dcd:1; /* DELTA_DCD seen */
- uint8_t delta_cts:1; /* DELTA_CTS seen */
- uint8_t intr_pass:1; /* Interrupt pass-through */
- uint8_t tx_explicit:1; /* TX, MCW, or delay complete */
- } fields[4];
- } sio_ir; /* Serial interrupt state */
- union ioc4_other_int {
- uint32_t raw;
- struct {
- uint32_t ata_int:1; /* ATA port passthru */
- uint32_t ata_memerr:1; /* ATA halted by mem error */
- uint32_t memerr:4; /* Serial halted by mem err */
- uint32_t kbd_int:1; /* kbd/mouse intr asserted */
- uint32_t reserved:16; /* zero */
- uint32_t rt_int:1; /* INT_OUT section latch */
- uint32_t gen_int:8; /* Intr. from generic pins */
- } fields;
- } other_ir; /* Other interrupt state */
- union ioc4_sio_int sio_ies; /* Serial interrupt enable set */
- union ioc4_other_int other_ies; /* Other interrupt enable set */
- union ioc4_sio_int sio_iec; /* Serial interrupt enable clear */
- union ioc4_other_int other_iec; /* Other interrupt enable clear */
- union ioc4_sio_cr {
- uint32_t raw;
- struct {
- uint32_t cmd_pulse:4; /* Bytebus strobe width */
- uint32_t arb_diag:3; /* PCI bus requester */
- uint32_t sio_diag_idle:1; /* Active ser req? */
- uint32_t ata_diag_idle:1; /* Active ATA req? */
- uint32_t ata_diag_active:1; /* ATA req is winner */
- uint32_t reserved:22; /* zero */
- } fields;
- } sio_cr;
- uint32_t unused1;
- union ioc4_int_out {
- uint32_t raw;
- struct {
- uint32_t count:16; /* Period control */
- uint32_t mode:3; /* Output signal shape */
- uint32_t reserved:11; /* zero */
- uint32_t diag:1; /* Timebase control */
- uint32_t int_out:1; /* Current value */
- } fields;
- } int_out; /* External interrupt output control */
- uint32_t unused2;
- union ioc4_gpcr {
- uint32_t raw;
- struct {
- uint32_t dir:8; /* Pin direction */
- uint32_t edge:8; /* Edge/level mode */
- uint32_t reserved1:4; /* zero */
- uint32_t int_out_en:1; /* INT_OUT enable */
- uint32_t reserved2:11; /* zero */
- } fields;
- } gpcr_s; /* Generic PIO control set */
- union ioc4_gpcr gpcr_c; /* Generic PIO control clear */
- union ioc4_gpdr {
- uint32_t raw;
- struct {
- uint32_t gen_pin:8; /* State of pins */
- uint32_t reserved:24;
- } fields;
- } gpdr; /* Generic PIO data */
- uint32_t unused3;
- union ioc4_gppr {
- uint32_t raw;
- struct {
- uint32_t gen_pin:1; /* Single pin state */
- uint32_t reserved:31;
- } fields;
- } gppr[8]; /* Generic PIO pins */
-};
-
-/* Masks for GPCR DIR pins */
-#define IOC4_GPCR_DIR_0 0x01 /* External interrupt output */
-#define IOC4_GPCR_DIR_1 0x02 /* External interrupt input */
-#define IOC4_GPCR_DIR_2 0x04
-#define IOC4_GPCR_DIR_3 0x08 /* Keyboard/mouse presence */
-#define IOC4_GPCR_DIR_4 0x10 /* Ser. port 0 xcvr select (0=232, 1=422) */
-#define IOC4_GPCR_DIR_5 0x20 /* Ser. port 1 xcvr select (0=232, 1=422) */
-#define IOC4_GPCR_DIR_6 0x40 /* Ser. port 2 xcvr select (0=232, 1=422) */
-#define IOC4_GPCR_DIR_7 0x80 /* Ser. port 3 xcvr select (0=232, 1=422) */
-
-/* Masks for GPCR EDGE pins */
-#define IOC4_GPCR_EDGE_0 0x01
-#define IOC4_GPCR_EDGE_1 0x02 /* External interrupt input */
-#define IOC4_GPCR_EDGE_2 0x04
-#define IOC4_GPCR_EDGE_3 0x08
-#define IOC4_GPCR_EDGE_4 0x10
-#define IOC4_GPCR_EDGE_5 0x20
-#define IOC4_GPCR_EDGE_6 0x40
-#define IOC4_GPCR_EDGE_7 0x80
-
-#define IOC4_VARIANT_IO9 0x0900
-#define IOC4_VARIANT_PCI_RT 0x0901
-#define IOC4_VARIANT_IO10 0x1000
-
-/* One of these per IOC4 */
-struct ioc4_driver_data {
- struct list_head idd_list;
- unsigned long idd_bar0;
- struct pci_dev *idd_pdev;
- const struct pci_device_id *idd_pci_id;
- struct ioc4_misc_regs __iomem *idd_misc_regs;
- unsigned long count_period;
- void *idd_serial_data;
- unsigned int idd_variant;
-};
-
-/* One per submodule */
-struct ioc4_submodule {
- struct list_head is_list;
- char *is_name;
- struct module *is_owner;
- int (*is_probe) (struct ioc4_driver_data *);
- int (*is_remove) (struct ioc4_driver_data *);
-};
-
-#define IOC4_NUM_CARDS 8 /* max cards per partition */
-
-/**********************************
- * Functions needed by submodules *
- **********************************/
-
-extern int ioc4_register_submodule(struct ioc4_submodule *);
-extern void ioc4_unregister_submodule(struct ioc4_submodule *);
-
-#endif /* _LINUX_IOC4_H */
diff --git a/include/linux/iocontext.h b/include/linux/iocontext.h
index df38db2ef45b..14f7eaf1b443 100644
--- a/include/linux/iocontext.h
+++ b/include/linux/iocontext.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef IOCONTEXT_H
#define IOCONTEXT_H
@@ -7,6 +8,7 @@
enum {
ICQ_EXITED = 1 << 2,
+ ICQ_DESTROYED = 1 << 3,
};
/*
@@ -97,61 +99,40 @@ struct io_cq {
struct io_context {
atomic_long_t refcount;
atomic_t active_ref;
- atomic_t nr_tasks;
-
- /* all the fields below are protected by this lock */
- spinlock_t lock;
unsigned short ioprio;
- /*
- * For request batching
- */
- int nr_batch_requests; /* Number of requests left in the batch */
- unsigned long last_waited; /* Time last woken after wait for request */
+#ifdef CONFIG_BLK_ICQ
+ /* all the fields below are protected by this lock */
+ spinlock_t lock;
struct radix_tree_root icq_tree;
struct io_cq __rcu *icq_hint;
struct hlist_head icq_list;
struct work_struct release_work;
+#endif /* CONFIG_BLK_ICQ */
};
-/**
- * get_io_context_active - get active reference on ioc
- * @ioc: ioc of interest
- *
- * Only iocs with active reference can issue new IOs. This function
- * acquires an active reference on @ioc. The caller must already have an
- * active reference on @ioc.
- */
-static inline void get_io_context_active(struct io_context *ioc)
-{
- WARN_ON_ONCE(atomic_long_read(&ioc->refcount) <= 0);
- WARN_ON_ONCE(atomic_read(&ioc->active_ref) <= 0);
- atomic_long_inc(&ioc->refcount);
- atomic_inc(&ioc->active_ref);
-}
-
-static inline void ioc_task_link(struct io_context *ioc)
-{
- get_io_context_active(ioc);
-
- WARN_ON_ONCE(atomic_read(&ioc->nr_tasks) <= 0);
- atomic_inc(&ioc->nr_tasks);
-}
-
struct task_struct;
#ifdef CONFIG_BLOCK
void put_io_context(struct io_context *ioc);
-void put_io_context_active(struct io_context *ioc);
void exit_io_context(struct task_struct *task);
-struct io_context *get_task_io_context(struct task_struct *task,
- gfp_t gfp_flags, int node);
+int __copy_io(unsigned long clone_flags, struct task_struct *tsk);
+static inline int copy_io(unsigned long clone_flags, struct task_struct *tsk)
+{
+ if (!current->io_context)
+ return 0;
+ return __copy_io(clone_flags, tsk);
+}
#else
struct io_context;
static inline void put_io_context(struct io_context *ioc) { }
static inline void exit_io_context(struct task_struct *task) { }
-#endif
+static inline int copy_io(unsigned long clone_flags, struct task_struct *tsk)
+{
+ return 0;
+}
+#endif /* CONFIG_BLOCK */
-#endif
+#endif /* IOCONTEXT_H */
diff --git a/include/linux/iomap.h b/include/linux/iomap.h
index f64dc6ce5161..e2b836c2e119 100644
--- a/include/linux/iomap.h
+++ b/include/linux/iomap.h
@@ -1,47 +1,161 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef LINUX_IOMAP_H
#define LINUX_IOMAP_H 1
+#include <linux/atomic.h>
+#include <linux/bitmap.h>
+#include <linux/blk_types.h>
+#include <linux/mm.h>
#include <linux/types.h>
+#include <linux/mm_types.h>
+#include <linux/blkdev.h>
+struct address_space;
struct fiemap_extent_info;
struct inode;
+struct iomap_iter;
+struct iomap_dio;
+struct iomap_writepage_ctx;
struct iov_iter;
struct kiocb;
+struct page;
struct vm_area_struct;
struct vm_fault;
/*
* Types of block ranges for iomap mappings:
*/
-#define IOMAP_HOLE 0x01 /* no blocks allocated, need allocation */
-#define IOMAP_DELALLOC 0x02 /* delayed allocation blocks */
-#define IOMAP_MAPPED 0x03 /* blocks allocated @blkno */
-#define IOMAP_UNWRITTEN 0x04 /* blocks allocated @blkno in unwritten state */
+#define IOMAP_HOLE 0 /* no blocks allocated, need allocation */
+#define IOMAP_DELALLOC 1 /* delayed allocation blocks */
+#define IOMAP_MAPPED 2 /* blocks allocated at @addr */
+#define IOMAP_UNWRITTEN 3 /* blocks allocated at @addr in unwritten state */
+#define IOMAP_INLINE 4 /* data inline in the inode */
/*
- * Flags for all iomap mappings:
+ * Flags reported by the file system from iomap_begin:
+ *
+ * IOMAP_F_NEW indicates that the blocks have been newly allocated and need
+ * zeroing for areas that no data is copied to.
+ *
+ * IOMAP_F_DIRTY indicates the inode has uncommitted metadata needed to access
+ * written data and requires fdatasync to commit them to persistent storage.
+ * This needs to take into account metadata changes that *may* be made at IO
+ * completion, such as file size updates from direct IO.
+ *
+ * IOMAP_F_SHARED indicates that the blocks are shared, and will need to be
+ * unshared as part a write.
+ *
+ * IOMAP_F_MERGED indicates that the iomap contains the merge of multiple block
+ * mappings.
+ *
+ * IOMAP_F_BUFFER_HEAD indicates that the file system requires the use of
+ * buffer heads for this mapping.
+ *
+ * IOMAP_F_XATTR indicates that the iomap is for an extended attribute extent
+ * rather than a file data extent.
*/
-#define IOMAP_F_NEW 0x01 /* blocks have been newly allocated */
+#define IOMAP_F_NEW (1U << 0)
+#define IOMAP_F_DIRTY (1U << 1)
+#define IOMAP_F_SHARED (1U << 2)
+#define IOMAP_F_MERGED (1U << 3)
+#define IOMAP_F_BUFFER_HEAD (1U << 4)
+#define IOMAP_F_XATTR (1U << 5)
/*
- * Flags that only need to be reported for IOMAP_REPORT requests:
+ * Flags set by the core iomap code during operations:
+ *
+ * IOMAP_F_SIZE_CHANGED indicates to the iomap_end method that the file size
+ * has changed as the result of this write operation.
+ *
+ * IOMAP_F_STALE indicates that the iomap is not valid any longer and the file
+ * range it covers needs to be remapped by the high level before the operation
+ * can proceed.
*/
-#define IOMAP_F_MERGED 0x10 /* contains multiple blocks/extents */
-#define IOMAP_F_SHARED 0x20 /* block shared with another file */
+#define IOMAP_F_SIZE_CHANGED (1U << 8)
+#define IOMAP_F_STALE (1U << 9)
/*
- * Magic value for blkno:
+ * Flags from 0x1000 up are for file system specific usage:
*/
-#define IOMAP_NULL_BLOCK -1LL /* blkno is not valid */
+#define IOMAP_F_PRIVATE (1U << 12)
+
+
+/*
+ * Magic value for addr:
+ */
+#define IOMAP_NULL_ADDR -1ULL /* addr is not valid */
+
+struct iomap_folio_ops;
struct iomap {
- sector_t blkno; /* 1st sector of mapping, 512b units */
+ u64 addr; /* disk offset of mapping, bytes */
loff_t offset; /* file offset of mapping, bytes */
u64 length; /* length of mapping, bytes */
u16 type; /* type of mapping */
u16 flags; /* flags for mapping */
struct block_device *bdev; /* block device for I/O */
struct dax_device *dax_dev; /* dax_dev for dax operations */
+ void *inline_data;
+ void *private; /* filesystem private */
+ const struct iomap_folio_ops *folio_ops;
+ u64 validity_cookie; /* used with .iomap_valid() */
+};
+
+static inline sector_t iomap_sector(const struct iomap *iomap, loff_t pos)
+{
+ return (iomap->addr + pos - iomap->offset) >> SECTOR_SHIFT;
+}
+
+/*
+ * Returns the inline data pointer for logical offset @pos.
+ */
+static inline void *iomap_inline_data(const struct iomap *iomap, loff_t pos)
+{
+ return iomap->inline_data + pos - iomap->offset;
+}
+
+/*
+ * Check if the mapping's length is within the valid range for inline data.
+ * This is used to guard against accessing data beyond the page inline_data
+ * points at.
+ */
+static inline bool iomap_inline_data_valid(const struct iomap *iomap)
+{
+ return iomap->length <= PAGE_SIZE - offset_in_page(iomap->inline_data);
+}
+
+/*
+ * When a filesystem sets folio_ops in an iomap mapping it returns, get_folio
+ * and put_folio will be called for each folio written to. This only applies
+ * to buffered writes as unbuffered writes will not typically have folios
+ * associated with them.
+ *
+ * When get_folio succeeds, put_folio will always be called to do any
+ * cleanup work necessary. put_folio is responsible for unlocking and putting
+ * @folio.
+ */
+struct iomap_folio_ops {
+ struct folio *(*get_folio)(struct iomap_iter *iter, loff_t pos,
+ unsigned len);
+ void (*put_folio)(struct inode *inode, loff_t pos, unsigned copied,
+ struct folio *folio);
+
+ /*
+ * Check that the cached iomap still maps correctly to the filesystem's
+ * internal extent map. FS internal extent maps can change while iomap
+ * is iterating a cached iomap, so this hook allows iomap to detect that
+ * the iomap needs to be refreshed during a long running write
+ * operation.
+ *
+ * The filesystem can store internal state (e.g. a sequence number) in
+ * iomap->validity_cookie when the iomap is first mapped to be able to
+ * detect changes between mapping time and whenever .iomap_valid() is
+ * called.
+ *
+ * This is called with the folio over the specified file position held
+ * locked by the iomap code.
+ */
+ bool (*iomap_valid)(struct inode *inode, const struct iomap *iomap);
};
/*
@@ -52,7 +166,14 @@ struct iomap {
#define IOMAP_REPORT (1 << 2) /* report extent status, e.g. FIEMAP */
#define IOMAP_FAULT (1 << 3) /* mapping for page fault */
#define IOMAP_DIRECT (1 << 4) /* direct I/O */
-#define IOMAP_NOWAIT (1 << 5) /* Don't wait for writeback */
+#define IOMAP_NOWAIT (1 << 5) /* do not block */
+#define IOMAP_OVERWRITE_ONLY (1 << 6) /* only pure overwrites allowed */
+#define IOMAP_UNSHARE (1 << 7) /* unshare_file_range */
+#ifdef CONFIG_FS_DAX
+#define IOMAP_DAX (1 << 8) /* DAX mapping */
+#else
+#define IOMAP_DAX 0
+#endif /* CONFIG_FS_DAX */
struct iomap_ops {
/*
@@ -61,7 +182,8 @@ struct iomap_ops {
* The actual length is returned in iomap->length.
*/
int (*iomap_begin)(struct inode *inode, loff_t pos, loff_t length,
- unsigned flags, struct iomap *iomap);
+ unsigned flags, struct iomap *iomap,
+ struct iomap *srcmap);
/*
* Commit and/or unreserve space previous allocated using iomap_begin.
@@ -73,30 +195,206 @@ struct iomap_ops {
ssize_t written, unsigned flags, struct iomap *iomap);
};
+/**
+ * struct iomap_iter - Iterate through a range of a file
+ * @inode: Set at the start of the iteration and should not change.
+ * @pos: The current file position we are operating on. It is updated by
+ * calls to iomap_iter(). Treat as read-only in the body.
+ * @len: The remaining length of the file segment we're operating on.
+ * It is updated at the same time as @pos.
+ * @processed: The number of bytes processed by the body in the most recent
+ * iteration, or a negative errno. 0 causes the iteration to stop.
+ * @flags: Zero or more of the iomap_begin flags above.
+ * @iomap: Map describing the I/O iteration
+ * @srcmap: Source map for COW operations
+ */
+struct iomap_iter {
+ struct inode *inode;
+ loff_t pos;
+ u64 len;
+ s64 processed;
+ unsigned flags;
+ struct iomap iomap;
+ struct iomap srcmap;
+ void *private;
+};
+
+int iomap_iter(struct iomap_iter *iter, const struct iomap_ops *ops);
+
+/**
+ * iomap_length - length of the current iomap iteration
+ * @iter: iteration structure
+ *
+ * Returns the length that the operation applies to for the current iteration.
+ */
+static inline u64 iomap_length(const struct iomap_iter *iter)
+{
+ u64 end = iter->iomap.offset + iter->iomap.length;
+
+ if (iter->srcmap.type != IOMAP_HOLE)
+ end = min(end, iter->srcmap.offset + iter->srcmap.length);
+ return min(iter->len, end - iter->pos);
+}
+
+/**
+ * iomap_iter_srcmap - return the source map for the current iomap iteration
+ * @i: iteration structure
+ *
+ * Write operations on file systems with reflink support might require a
+ * source and a destination map. This function retourns the source map
+ * for a given operation, which may or may no be identical to the destination
+ * map in &i->iomap.
+ */
+static inline const struct iomap *iomap_iter_srcmap(const struct iomap_iter *i)
+{
+ if (i->srcmap.type != IOMAP_HOLE)
+ return &i->srcmap;
+ return &i->iomap;
+}
+
ssize_t iomap_file_buffered_write(struct kiocb *iocb, struct iov_iter *from,
const struct iomap_ops *ops);
-int iomap_file_dirty(struct inode *inode, loff_t pos, loff_t len,
+int iomap_file_buffered_write_punch_delalloc(struct inode *inode,
+ struct iomap *iomap, loff_t pos, loff_t length, ssize_t written,
+ int (*punch)(struct inode *inode, loff_t pos, loff_t length));
+
+int iomap_read_folio(struct folio *folio, const struct iomap_ops *ops);
+void iomap_readahead(struct readahead_control *, const struct iomap_ops *ops);
+bool iomap_is_partially_uptodate(struct folio *, size_t from, size_t count);
+struct folio *iomap_get_folio(struct iomap_iter *iter, loff_t pos);
+bool iomap_release_folio(struct folio *folio, gfp_t gfp_flags);
+void iomap_invalidate_folio(struct folio *folio, size_t offset, size_t len);
+int iomap_file_unshare(struct inode *inode, loff_t pos, loff_t len,
const struct iomap_ops *ops);
int iomap_zero_range(struct inode *inode, loff_t pos, loff_t len,
bool *did_zero, const struct iomap_ops *ops);
int iomap_truncate_page(struct inode *inode, loff_t pos, bool *did_zero,
const struct iomap_ops *ops);
-int iomap_page_mkwrite(struct vm_fault *vmf, const struct iomap_ops *ops);
+vm_fault_t iomap_page_mkwrite(struct vm_fault *vmf,
+ const struct iomap_ops *ops);
int iomap_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
- loff_t start, loff_t len, const struct iomap_ops *ops);
+ u64 start, u64 len, const struct iomap_ops *ops);
loff_t iomap_seek_hole(struct inode *inode, loff_t offset,
const struct iomap_ops *ops);
loff_t iomap_seek_data(struct inode *inode, loff_t offset,
const struct iomap_ops *ops);
+sector_t iomap_bmap(struct address_space *mapping, sector_t bno,
+ const struct iomap_ops *ops);
+
+/*
+ * Structure for writeback I/O completions.
+ */
+struct iomap_ioend {
+ struct list_head io_list; /* next ioend in chain */
+ u16 io_type;
+ u16 io_flags; /* IOMAP_F_* */
+ u32 io_folios; /* folios added to ioend */
+ struct inode *io_inode; /* file being written to */
+ size_t io_size; /* size of the extent */
+ loff_t io_offset; /* offset in the file */
+ sector_t io_sector; /* start sector of ioend */
+ struct bio *io_bio; /* bio being built */
+ struct bio io_inline_bio; /* MUST BE LAST! */
+};
+
+struct iomap_writeback_ops {
+ /*
+ * Required, maps the blocks so that writeback can be performed on
+ * the range starting at offset.
+ */
+ int (*map_blocks)(struct iomap_writepage_ctx *wpc, struct inode *inode,
+ loff_t offset);
+
+ /*
+ * Optional, allows the file systems to perform actions just before
+ * submitting the bio and/or override the bio end_io handler for complex
+ * operations like copy on write extent manipulation or unwritten extent
+ * conversions.
+ */
+ int (*prepare_ioend)(struct iomap_ioend *ioend, int status);
+
+ /*
+ * Optional, allows the file system to discard state on a page where
+ * we failed to submit any I/O.
+ */
+ void (*discard_folio)(struct folio *folio, loff_t pos);
+};
+
+struct iomap_writepage_ctx {
+ struct iomap iomap;
+ struct iomap_ioend *ioend;
+ const struct iomap_writeback_ops *ops;
+};
+
+void iomap_finish_ioends(struct iomap_ioend *ioend, int error);
+void iomap_ioend_try_merge(struct iomap_ioend *ioend,
+ struct list_head *more_ioends);
+void iomap_sort_ioends(struct list_head *ioend_list);
+int iomap_writepages(struct address_space *mapping,
+ struct writeback_control *wbc, struct iomap_writepage_ctx *wpc,
+ const struct iomap_writeback_ops *ops);
/*
* Flags for direct I/O ->end_io:
*/
#define IOMAP_DIO_UNWRITTEN (1 << 0) /* covers unwritten extent(s) */
#define IOMAP_DIO_COW (1 << 1) /* covers COW extent(s) */
-typedef int (iomap_dio_end_io_t)(struct kiocb *iocb, ssize_t ret,
- unsigned flags);
+
+struct iomap_dio_ops {
+ int (*end_io)(struct kiocb *iocb, ssize_t size, int error,
+ unsigned flags);
+ void (*submit_io)(const struct iomap_iter *iter, struct bio *bio,
+ loff_t file_offset);
+
+ /*
+ * Filesystems wishing to attach private information to a direct io bio
+ * must provide a ->submit_io method that attaches the additional
+ * information to the bio and changes the ->bi_end_io callback to a
+ * custom function. This function should, at a minimum, perform any
+ * relevant post-processing of the bio and end with a call to
+ * iomap_dio_bio_end_io.
+ */
+ struct bio_set *bio_set;
+};
+
+/*
+ * Wait for the I/O to complete in iomap_dio_rw even if the kiocb is not
+ * synchronous.
+ */
+#define IOMAP_DIO_FORCE_WAIT (1 << 0)
+
+/*
+ * Do not allocate blocks or zero partial blocks, but instead fall back to
+ * the caller by returning -EAGAIN. Used to optimize direct I/O writes that
+ * are not aligned to the file system block size.
+ */
+#define IOMAP_DIO_OVERWRITE_ONLY (1 << 1)
+
+/*
+ * When a page fault occurs, return a partial synchronous result and allow
+ * the caller to retry the rest of the operation after dealing with the page
+ * fault.
+ */
+#define IOMAP_DIO_PARTIAL (1 << 2)
+
ssize_t iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
- const struct iomap_ops *ops, iomap_dio_end_io_t end_io);
+ const struct iomap_ops *ops, const struct iomap_dio_ops *dops,
+ unsigned int dio_flags, void *private, size_t done_before);
+struct iomap_dio *__iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
+ const struct iomap_ops *ops, const struct iomap_dio_ops *dops,
+ unsigned int dio_flags, void *private, size_t done_before);
+ssize_t iomap_dio_complete(struct iomap_dio *dio);
+void iomap_dio_bio_end_io(struct bio *bio);
+
+#ifdef CONFIG_SWAP
+struct file;
+struct swap_info_struct;
+
+int iomap_swapfile_activate(struct swap_info_struct *sis,
+ struct file *swap_file, sector_t *pagespan,
+ const struct iomap_ops *ops);
+#else
+# define iomap_swapfile_activate(sis, swapfile, pagespan, ops) (-EIO)
+#endif /* CONFIG_SWAP */
#endif /* LINUX_IOMAP_H */
diff --git a/include/linux/iommu-common.h b/include/linux/iommu-common.h
deleted file mode 100644
index 376a27c9cc6a..000000000000
--- a/include/linux/iommu-common.h
+++ /dev/null
@@ -1,52 +0,0 @@
-#ifndef _LINUX_IOMMU_COMMON_H
-#define _LINUX_IOMMU_COMMON_H
-
-#include <linux/spinlock_types.h>
-#include <linux/device.h>
-#include <asm/page.h>
-
-#define IOMMU_POOL_HASHBITS 4
-#define IOMMU_NR_POOLS (1 << IOMMU_POOL_HASHBITS)
-#define IOMMU_ERROR_CODE (~(unsigned long) 0)
-
-struct iommu_pool {
- unsigned long start;
- unsigned long end;
- unsigned long hint;
- spinlock_t lock;
-};
-
-struct iommu_map_table {
- unsigned long table_map_base;
- unsigned long table_shift;
- unsigned long nr_pools;
- void (*lazy_flush)(struct iommu_map_table *);
- unsigned long poolsize;
- struct iommu_pool pools[IOMMU_NR_POOLS];
- u32 flags;
-#define IOMMU_HAS_LARGE_POOL 0x00000001
-#define IOMMU_NO_SPAN_BOUND 0x00000002
-#define IOMMU_NEED_FLUSH 0x00000004
- struct iommu_pool large_pool;
- unsigned long *map;
-};
-
-extern void iommu_tbl_pool_init(struct iommu_map_table *iommu,
- unsigned long num_entries,
- u32 table_shift,
- void (*lazy_flush)(struct iommu_map_table *),
- bool large_pool, u32 npools,
- bool skip_span_boundary_check);
-
-extern unsigned long iommu_tbl_range_alloc(struct device *dev,
- struct iommu_map_table *iommu,
- unsigned long npages,
- unsigned long *handle,
- unsigned long mask,
- unsigned int align_order);
-
-extern void iommu_tbl_range_free(struct iommu_map_table *iommu,
- u64 dma_addr, unsigned long npages,
- unsigned long entry);
-
-#endif
diff --git a/include/linux/iommu-helper.h b/include/linux/iommu-helper.h
index 86bdeffe43ad..74be34f3a20a 100644
--- a/include/linux/iommu-helper.h
+++ b/include/linux/iommu-helper.h
@@ -1,7 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_IOMMU_HELPER_H
#define _LINUX_IOMMU_HELPER_H
-#include <linux/kernel.h>
+#include <linux/bug.h>
+#include <linux/log2.h>
+#include <linux/math.h>
+#include <linux/types.h>
static inline unsigned long iommu_device_max_index(unsigned long size,
unsigned long offset,
@@ -13,9 +17,15 @@ static inline unsigned long iommu_device_max_index(unsigned long size,
return size;
}
-extern int iommu_is_span_boundary(unsigned int index, unsigned int nr,
- unsigned long shift,
- unsigned long boundary_size);
+static inline int iommu_is_span_boundary(unsigned int index, unsigned int nr,
+ unsigned long shift, unsigned long boundary_size)
+{
+ BUG_ON(!is_power_of_2(boundary_size));
+
+ shift = (shift + index) & (boundary_size - 1);
+ return shift + nr > boundary_size;
+}
+
extern unsigned long iommu_area_alloc(unsigned long *map, unsigned long size,
unsigned long start, unsigned int nr,
unsigned long shift,
diff --git a/include/linux/iommu.h b/include/linux/iommu.h
index 2cb54adc4a33..e8c9a7da1060 100644
--- a/include/linux/iommu.h
+++ b/include/linux/iommu.h
@@ -1,19 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2007-2008 Advanced Micro Devices, Inc.
* Author: Joerg Roedel <joerg.roedel@amd.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published
- * by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#ifndef __LINUX_IOMMU_H
@@ -25,6 +13,7 @@
#include <linux/errno.h>
#include <linux/err.h>
#include <linux/of.h>
+#include <uapi/linux/iommu.h>
#define IOMMU_READ (1 << 0)
#define IOMMU_WRITE (1 << 1)
@@ -47,7 +36,11 @@ struct iommu_group;
struct bus_type;
struct device;
struct iommu_domain;
+struct iommu_domain_ops;
struct notifier_block;
+struct iommu_sva;
+struct iommu_fault_event;
+struct iommu_dma_cookie;
/* iommu fault flags */
#define IOMMU_FAULT_READ 0x0
@@ -55,6 +48,7 @@ struct notifier_block;
typedef int (*iommu_fault_handler_t)(struct iommu_domain *,
struct device *, unsigned long, int, void *);
+typedef int (*iommu_dev_fault_handler_t)(struct iommu_fault *, void *);
struct iommu_domain_geometry {
dma_addr_t aperture_start; /* First address that can be mapped */
@@ -67,6 +61,9 @@ struct iommu_domain_geometry {
#define __IOMMU_DOMAIN_DMA_API (1U << 1) /* Domain for use in DMA-API
implementation */
#define __IOMMU_DOMAIN_PT (1U << 2) /* Domain is identity mapped */
+#define __IOMMU_DOMAIN_DMA_FQ (1U << 3) /* DMA-API uses flush queue */
+
+#define __IOMMU_DOMAIN_SVA (1U << 4) /* Shared process address space */
/*
* This are the possible domain-types
@@ -79,58 +76,69 @@ struct iommu_domain_geometry {
* IOMMU_DOMAIN_DMA - Internally used for DMA-API implementations.
* This flag allows IOMMU drivers to implement
* certain optimizations for these domains
+ * IOMMU_DOMAIN_DMA_FQ - As above, but definitely using batched TLB
+ * invalidation.
+ * IOMMU_DOMAIN_SVA - DMA addresses are shared process addresses
+ * represented by mm_struct's.
*/
#define IOMMU_DOMAIN_BLOCKED (0U)
#define IOMMU_DOMAIN_IDENTITY (__IOMMU_DOMAIN_PT)
#define IOMMU_DOMAIN_UNMANAGED (__IOMMU_DOMAIN_PAGING)
#define IOMMU_DOMAIN_DMA (__IOMMU_DOMAIN_PAGING | \
__IOMMU_DOMAIN_DMA_API)
+#define IOMMU_DOMAIN_DMA_FQ (__IOMMU_DOMAIN_PAGING | \
+ __IOMMU_DOMAIN_DMA_API | \
+ __IOMMU_DOMAIN_DMA_FQ)
+#define IOMMU_DOMAIN_SVA (__IOMMU_DOMAIN_SVA)
struct iommu_domain {
unsigned type;
- const struct iommu_ops *ops;
+ const struct iommu_domain_ops *ops;
unsigned long pgsize_bitmap; /* Bitmap of page sizes in use */
- iommu_fault_handler_t handler;
- void *handler_token;
struct iommu_domain_geometry geometry;
- void *iova_cookie;
+ struct iommu_dma_cookie *iova_cookie;
+ enum iommu_page_response_code (*iopf_handler)(struct iommu_fault *fault,
+ void *data);
+ void *fault_data;
+ union {
+ struct {
+ iommu_fault_handler_t handler;
+ void *handler_token;
+ };
+ struct { /* IOMMU_DOMAIN_SVA */
+ struct mm_struct *mm;
+ int users;
+ };
+ };
};
+static inline bool iommu_is_dma_domain(struct iommu_domain *domain)
+{
+ return domain->type & __IOMMU_DOMAIN_DMA_API;
+}
+
enum iommu_cap {
- IOMMU_CAP_CACHE_COHERENCY, /* IOMMU can enforce cache coherent DMA
- transactions */
- IOMMU_CAP_INTR_REMAP, /* IOMMU supports interrupt isolation */
+ IOMMU_CAP_CACHE_COHERENCY, /* IOMMU_CACHE is supported */
IOMMU_CAP_NOEXEC, /* IOMMU_NOEXEC flag */
-};
-
-/*
- * Following constraints are specifc to FSL_PAMUV1:
- * -aperture must be power of 2, and naturally aligned
- * -number of windows must be power of 2, and address space size
- * of each window is determined by aperture size / # of windows
- * -the actual size of the mapped region of a window must be power
- * of 2 starting with 4KB and physical address must be naturally
- * aligned.
- * DOMAIN_ATTR_FSL_PAMUV1 corresponds to the above mentioned contraints.
- * The caller can invoke iommu_domain_get_attr to check if the underlying
- * iommu implementation supports these constraints.
- */
-
-enum iommu_attr {
- DOMAIN_ATTR_GEOMETRY,
- DOMAIN_ATTR_PAGING,
- DOMAIN_ATTR_WINDOWS,
- DOMAIN_ATTR_FSL_PAMU_STASH,
- DOMAIN_ATTR_FSL_PAMU_ENABLE,
- DOMAIN_ATTR_FSL_PAMUV1,
- DOMAIN_ATTR_NESTING, /* two stages of translation */
- DOMAIN_ATTR_MAX,
+ IOMMU_CAP_PRE_BOOT_PROTECTION, /* Firmware says it used the IOMMU for
+ DMA protection and we should too */
+ /*
+ * Per-device flag indicating if enforce_cache_coherency() will work on
+ * this device.
+ */
+ IOMMU_CAP_ENFORCE_CACHE_COHERENCY,
};
/* These are the possible reserved region types */
enum iommu_resv_type {
/* Memory regions which must be mapped 1:1 at all times */
IOMMU_RESV_DIRECT,
+ /*
+ * Memory regions which are advertised to be 1:1 but are
+ * commonly considered relaxable in some conditions,
+ * for instance in device assignment use case (USB, Graphics)
+ */
+ IOMMU_RESV_DIRECT_RELAXABLE,
/* Arbitrary "never map this or give it to a device" address ranges */
IOMMU_RESV_RESERVED,
/* Hardware MSI region (untranslated) */
@@ -146,6 +154,7 @@ enum iommu_resv_type {
* @length: Length of the region in bytes
* @prot: IOMMU Protection flags (READ/WRITE/...)
* @type: Type of the reserved region
+ * @free: Callback to free associated memory allocations
*/
struct iommu_resv_region {
struct list_head list;
@@ -153,80 +162,191 @@ struct iommu_resv_region {
size_t length;
int prot;
enum iommu_resv_type type;
+ void (*free)(struct device *dev, struct iommu_resv_region *region);
};
+struct iommu_iort_rmr_data {
+ struct iommu_resv_region rr;
+
+ /* Stream IDs associated with IORT RMR entry */
+ const u32 *sids;
+ u32 num_sids;
+};
+
+/**
+ * enum iommu_dev_features - Per device IOMMU features
+ * @IOMMU_DEV_FEAT_SVA: Shared Virtual Addresses
+ * @IOMMU_DEV_FEAT_IOPF: I/O Page Faults such as PRI or Stall. Generally
+ * enabling %IOMMU_DEV_FEAT_SVA requires
+ * %IOMMU_DEV_FEAT_IOPF, but some devices manage I/O Page
+ * Faults themselves instead of relying on the IOMMU. When
+ * supported, this feature must be enabled before and
+ * disabled after %IOMMU_DEV_FEAT_SVA.
+ *
+ * Device drivers enable a feature using iommu_dev_enable_feature().
+ */
+enum iommu_dev_features {
+ IOMMU_DEV_FEAT_SVA,
+ IOMMU_DEV_FEAT_IOPF,
+};
+
+#define IOMMU_PASID_INVALID (-1U)
+typedef unsigned int ioasid_t;
+
#ifdef CONFIG_IOMMU_API
/**
+ * struct iommu_iotlb_gather - Range information for a pending IOTLB flush
+ *
+ * @start: IOVA representing the start of the range to be flushed
+ * @end: IOVA representing the end of the range to be flushed (inclusive)
+ * @pgsize: The interval at which to perform the flush
+ * @freelist: Removed pages to free after sync
+ * @queued: Indicates that the flush will be queued
+ *
+ * This structure is intended to be updated by multiple calls to the
+ * ->unmap() function in struct iommu_ops before eventually being passed
+ * into ->iotlb_sync(). Drivers can add pages to @freelist to be freed after
+ * ->iotlb_sync() or ->iotlb_flush_all() have cleared all cached references to
+ * them. @queued is set to indicate when ->iotlb_flush_all() will be called
+ * later instead of ->iotlb_sync(), so drivers may optimise accordingly.
+ */
+struct iommu_iotlb_gather {
+ unsigned long start;
+ unsigned long end;
+ size_t pgsize;
+ struct list_head freelist;
+ bool queued;
+};
+
+/**
* struct iommu_ops - iommu ops and capabilities
* @capable: check capability
* @domain_alloc: allocate iommu domain
- * @domain_free: free iommu domain
- * @attach_dev: attach device to an iommu domain
- * @detach_dev: detach device from an iommu domain
- * @map: map a physically contiguous memory region to an iommu domain
- * @unmap: unmap a physically contiguous memory region from an iommu domain
- * @map_sg: map a scatter-gather list of physically contiguous memory chunks
- * to an iommu domain
- * @iova_to_phys: translate iova to physical address
- * @add_device: add device to iommu grouping
- * @remove_device: remove device from iommu grouping
+ * @probe_device: Add device to iommu driver handling
+ * @release_device: Remove device from iommu driver handling
+ * @probe_finalize: Do final setup work after the device is added to an IOMMU
+ * group and attached to the groups domain
+ * @set_platform_dma_ops: Returning control back to the platform DMA ops. This op
+ * is to support old IOMMU drivers, new drivers should use
+ * default domains, and the common IOMMU DMA ops.
* @device_group: find iommu group for a particular device
- * @domain_get_attr: Query domain attributes
- * @domain_set_attr: Change domain attributes
* @get_resv_regions: Request list of reserved regions for a device
- * @put_resv_regions: Free list of reserved regions for a device
- * @apply_resv_region: Temporary helper call-back for iova reserved ranges
- * @domain_window_enable: Configure and enable a particular window for a domain
- * @domain_window_disable: Disable a particular window for a domain
- * @domain_set_windows: Set the number of windows for a domain
- * @domain_get_windows: Return the number of windows for a domain
* @of_xlate: add OF master IDs to iommu grouping
+ * @is_attach_deferred: Check if domain attach should be deferred from iommu
+ * driver init to device driver init (default no)
+ * @dev_enable/disable_feat: per device entries to enable/disable
+ * iommu specific features.
+ * @page_response: handle page request response
+ * @def_domain_type: device default domain type, return value:
+ * - IOMMU_DOMAIN_IDENTITY: must use an identity domain
+ * - IOMMU_DOMAIN_DMA: must use a dma domain
+ * - 0: use the default setting
+ * @default_domain_ops: the default ops for domains
+ * @remove_dev_pasid: Remove any translation configurations of a specific
+ * pasid, so that any DMA transactions with this pasid
+ * will be blocked by the hardware.
* @pgsize_bitmap: bitmap of all possible supported page sizes
+ * @owner: Driver module providing these ops
*/
struct iommu_ops {
- bool (*capable)(enum iommu_cap);
+ bool (*capable)(struct device *dev, enum iommu_cap);
/* Domain allocation and freeing by the iommu driver */
struct iommu_domain *(*domain_alloc)(unsigned iommu_domain_type);
- void (*domain_free)(struct iommu_domain *);
- int (*attach_dev)(struct iommu_domain *domain, struct device *dev);
- void (*detach_dev)(struct iommu_domain *domain, struct device *dev);
- int (*map)(struct iommu_domain *domain, unsigned long iova,
- phys_addr_t paddr, size_t size, int prot);
- size_t (*unmap)(struct iommu_domain *domain, unsigned long iova,
- size_t size);
- size_t (*map_sg)(struct iommu_domain *domain, unsigned long iova,
- struct scatterlist *sg, unsigned int nents, int prot);
- phys_addr_t (*iova_to_phys)(struct iommu_domain *domain, dma_addr_t iova);
- int (*add_device)(struct device *dev);
- void (*remove_device)(struct device *dev);
+ struct iommu_device *(*probe_device)(struct device *dev);
+ void (*release_device)(struct device *dev);
+ void (*probe_finalize)(struct device *dev);
+ void (*set_platform_dma_ops)(struct device *dev);
struct iommu_group *(*device_group)(struct device *dev);
- int (*domain_get_attr)(struct iommu_domain *domain,
- enum iommu_attr attr, void *data);
- int (*domain_set_attr)(struct iommu_domain *domain,
- enum iommu_attr attr, void *data);
/* Request/Free a list of reserved regions for a device */
void (*get_resv_regions)(struct device *dev, struct list_head *list);
- void (*put_resv_regions)(struct device *dev, struct list_head *list);
- void (*apply_resv_region)(struct device *dev,
- struct iommu_domain *domain,
- struct iommu_resv_region *region);
-
- /* Window handling functions */
- int (*domain_window_enable)(struct iommu_domain *domain, u32 wnd_nr,
- phys_addr_t paddr, u64 size, int prot);
- void (*domain_window_disable)(struct iommu_domain *domain, u32 wnd_nr);
- /* Set the number of windows per domain */
- int (*domain_set_windows)(struct iommu_domain *domain, u32 w_count);
- /* Get the number of windows per domain */
- u32 (*domain_get_windows)(struct iommu_domain *domain);
int (*of_xlate)(struct device *dev, struct of_phandle_args *args);
+ bool (*is_attach_deferred)(struct device *dev);
+
+ /* Per device IOMMU features */
+ int (*dev_enable_feat)(struct device *dev, enum iommu_dev_features f);
+ int (*dev_disable_feat)(struct device *dev, enum iommu_dev_features f);
+
+ int (*page_response)(struct device *dev,
+ struct iommu_fault_event *evt,
+ struct iommu_page_response *msg);
+ int (*def_domain_type)(struct device *dev);
+ void (*remove_dev_pasid)(struct device *dev, ioasid_t pasid);
+
+ const struct iommu_domain_ops *default_domain_ops;
unsigned long pgsize_bitmap;
+ struct module *owner;
+};
+
+/**
+ * struct iommu_domain_ops - domain specific operations
+ * @attach_dev: attach an iommu domain to a device
+ * Return:
+ * * 0 - success
+ * * EINVAL - can indicate that device and domain are incompatible due to
+ * some previous configuration of the domain, in which case the
+ * driver shouldn't log an error, since it is legitimate for a
+ * caller to test reuse of existing domains. Otherwise, it may
+ * still represent some other fundamental problem
+ * * ENOMEM - out of memory
+ * * ENOSPC - non-ENOMEM type of resource allocation failures
+ * * EBUSY - device is attached to a domain and cannot be changed
+ * * ENODEV - device specific errors, not able to be attached
+ * * <others> - treated as ENODEV by the caller. Use is discouraged
+ * @set_dev_pasid: set an iommu domain to a pasid of device
+ * @map: map a physically contiguous memory region to an iommu domain
+ * @map_pages: map a physically contiguous set of pages of the same size to
+ * an iommu domain.
+ * @unmap: unmap a physically contiguous memory region from an iommu domain
+ * @unmap_pages: unmap a number of pages of the same size from an iommu domain
+ * @flush_iotlb_all: Synchronously flush all hardware TLBs for this domain
+ * @iotlb_sync_map: Sync mappings created recently using @map to the hardware
+ * @iotlb_sync: Flush all queued ranges from the hardware TLBs and empty flush
+ * queue
+ * @iova_to_phys: translate iova to physical address
+ * @enforce_cache_coherency: Prevent any kind of DMA from bypassing IOMMU_CACHE,
+ * including no-snoop TLPs on PCIe or other platform
+ * specific mechanisms.
+ * @enable_nesting: Enable nesting
+ * @set_pgtable_quirks: Set io page table quirks (IO_PGTABLE_QUIRK_*)
+ * @free: Release the domain after use.
+ */
+struct iommu_domain_ops {
+ int (*attach_dev)(struct iommu_domain *domain, struct device *dev);
+ int (*set_dev_pasid)(struct iommu_domain *domain, struct device *dev,
+ ioasid_t pasid);
+
+ int (*map)(struct iommu_domain *domain, unsigned long iova,
+ phys_addr_t paddr, size_t size, int prot, gfp_t gfp);
+ int (*map_pages)(struct iommu_domain *domain, unsigned long iova,
+ phys_addr_t paddr, size_t pgsize, size_t pgcount,
+ int prot, gfp_t gfp, size_t *mapped);
+ size_t (*unmap)(struct iommu_domain *domain, unsigned long iova,
+ size_t size, struct iommu_iotlb_gather *iotlb_gather);
+ size_t (*unmap_pages)(struct iommu_domain *domain, unsigned long iova,
+ size_t pgsize, size_t pgcount,
+ struct iommu_iotlb_gather *iotlb_gather);
+
+ void (*flush_iotlb_all)(struct iommu_domain *domain);
+ void (*iotlb_sync_map)(struct iommu_domain *domain, unsigned long iova,
+ size_t size);
+ void (*iotlb_sync)(struct iommu_domain *domain,
+ struct iommu_iotlb_gather *iotlb_gather);
+
+ phys_addr_t (*iova_to_phys)(struct iommu_domain *domain,
+ dma_addr_t iova);
+
+ bool (*enforce_cache_coherency)(struct iommu_domain *domain);
+ int (*enable_nesting)(struct iommu_domain *domain);
+ int (*set_pgtable_quirks)(struct iommu_domain *domain,
+ unsigned long quirks);
+
+ void (*free)(struct iommu_domain *domain);
};
/**
@@ -235,15 +355,72 @@ struct iommu_ops {
* @list: Used by the iommu-core to keep a list of registered iommus
* @ops: iommu-ops for talking to this iommu
* @dev: struct device for sysfs handling
+ * @max_pasids: number of supported PASIDs
*/
struct iommu_device {
struct list_head list;
const struct iommu_ops *ops;
struct fwnode_handle *fwnode;
- struct device dev;
+ struct device *dev;
+ u32 max_pasids;
};
-int iommu_device_register(struct iommu_device *iommu);
+/**
+ * struct iommu_fault_event - Generic fault event
+ *
+ * Can represent recoverable faults such as a page requests or
+ * unrecoverable faults such as DMA or IRQ remapping faults.
+ *
+ * @fault: fault descriptor
+ * @list: pending fault event list, used for tracking responses
+ */
+struct iommu_fault_event {
+ struct iommu_fault fault;
+ struct list_head list;
+};
+
+/**
+ * struct iommu_fault_param - per-device IOMMU fault data
+ * @handler: Callback function to handle IOMMU faults at device level
+ * @data: handler private data
+ * @faults: holds the pending faults which needs response
+ * @lock: protect pending faults list
+ */
+struct iommu_fault_param {
+ iommu_dev_fault_handler_t handler;
+ void *data;
+ struct list_head faults;
+ struct mutex lock;
+};
+
+/**
+ * struct dev_iommu - Collection of per-device IOMMU data
+ *
+ * @fault_param: IOMMU detected device fault reporting data
+ * @iopf_param: I/O Page Fault queue and data
+ * @fwspec: IOMMU fwspec data
+ * @iommu_dev: IOMMU device this device is linked to
+ * @priv: IOMMU Driver private data
+ * @max_pasids: number of PASIDs this device can consume
+ * @attach_deferred: the dma domain attachment is deferred
+ *
+ * TODO: migrate other per device data pointers under iommu_dev_data, e.g.
+ * struct iommu_group *iommu_group;
+ */
+struct dev_iommu {
+ struct mutex lock;
+ struct iommu_fault_param *fault_param;
+ struct iopf_device_param *iopf_param;
+ struct iommu_fwspec *fwspec;
+ struct iommu_device *iommu_dev;
+ void *priv;
+ u32 max_pasids;
+ u32 attach_deferred:1;
+};
+
+int iommu_device_register(struct iommu_device *iommu,
+ const struct iommu_ops *ops,
+ struct device *hwdev);
void iommu_device_unregister(struct iommu_device *iommu);
int iommu_device_sysfs_add(struct iommu_device *iommu,
struct device *parent,
@@ -252,54 +429,68 @@ int iommu_device_sysfs_add(struct iommu_device *iommu,
void iommu_device_sysfs_remove(struct iommu_device *iommu);
int iommu_device_link(struct iommu_device *iommu, struct device *link);
void iommu_device_unlink(struct iommu_device *iommu, struct device *link);
+int iommu_deferred_attach(struct device *dev, struct iommu_domain *domain);
-static inline void iommu_device_set_ops(struct iommu_device *iommu,
- const struct iommu_ops *ops)
+static inline struct iommu_device *dev_to_iommu_device(struct device *dev)
{
- iommu->ops = ops;
+ return (struct iommu_device *)dev_get_drvdata(dev);
}
-static inline void iommu_device_set_fwnode(struct iommu_device *iommu,
- struct fwnode_handle *fwnode)
+static inline void iommu_iotlb_gather_init(struct iommu_iotlb_gather *gather)
{
- iommu->fwnode = fwnode;
+ *gather = (struct iommu_iotlb_gather) {
+ .start = ULONG_MAX,
+ .freelist = LIST_HEAD_INIT(gather->freelist),
+ };
}
-#define IOMMU_GROUP_NOTIFY_ADD_DEVICE 1 /* Device added */
-#define IOMMU_GROUP_NOTIFY_DEL_DEVICE 2 /* Pre Device removed */
-#define IOMMU_GROUP_NOTIFY_BIND_DRIVER 3 /* Pre Driver bind */
-#define IOMMU_GROUP_NOTIFY_BOUND_DRIVER 4 /* Post Driver bind */
-#define IOMMU_GROUP_NOTIFY_UNBIND_DRIVER 5 /* Pre Driver unbind */
-#define IOMMU_GROUP_NOTIFY_UNBOUND_DRIVER 6 /* Post Driver unbind */
+static inline const struct iommu_ops *dev_iommu_ops(struct device *dev)
+{
+ /*
+ * Assume that valid ops must be installed if iommu_probe_device()
+ * has succeeded. The device ops are essentially for internal use
+ * within the IOMMU subsystem itself, so we should be able to trust
+ * ourselves not to misuse the helper.
+ */
+ return dev->iommu->iommu_dev->ops;
+}
-extern int bus_set_iommu(struct bus_type *bus, const struct iommu_ops *ops);
-extern bool iommu_present(struct bus_type *bus);
-extern bool iommu_capable(struct bus_type *bus, enum iommu_cap cap);
-extern struct iommu_domain *iommu_domain_alloc(struct bus_type *bus);
-extern struct iommu_group *iommu_group_get_by_id(int id);
+extern int bus_iommu_probe(const struct bus_type *bus);
+extern bool iommu_present(const struct bus_type *bus);
+extern bool device_iommu_capable(struct device *dev, enum iommu_cap cap);
+extern bool iommu_group_has_isolated_msi(struct iommu_group *group);
+extern struct iommu_domain *iommu_domain_alloc(const struct bus_type *bus);
extern void iommu_domain_free(struct iommu_domain *domain);
extern int iommu_attach_device(struct iommu_domain *domain,
struct device *dev);
extern void iommu_detach_device(struct iommu_domain *domain,
struct device *dev);
+extern int iommu_sva_unbind_gpasid(struct iommu_domain *domain,
+ struct device *dev, ioasid_t pasid);
extern struct iommu_domain *iommu_get_domain_for_dev(struct device *dev);
+extern struct iommu_domain *iommu_get_dma_domain(struct device *dev);
extern int iommu_map(struct iommu_domain *domain, unsigned long iova,
- phys_addr_t paddr, size_t size, int prot);
+ phys_addr_t paddr, size_t size, int prot, gfp_t gfp);
extern size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova,
- size_t size);
-extern size_t default_iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
- struct scatterlist *sg,unsigned int nents,
- int prot);
+ size_t size);
+extern size_t iommu_unmap_fast(struct iommu_domain *domain,
+ unsigned long iova, size_t size,
+ struct iommu_iotlb_gather *iotlb_gather);
+extern ssize_t iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
+ struct scatterlist *sg, unsigned int nents,
+ int prot, gfp_t gfp);
extern phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova);
extern void iommu_set_fault_handler(struct iommu_domain *domain,
iommu_fault_handler_t handler, void *token);
extern void iommu_get_resv_regions(struct device *dev, struct list_head *list);
extern void iommu_put_resv_regions(struct device *dev, struct list_head *list);
-extern int iommu_request_dm_for_dev(struct device *dev);
+extern void iommu_set_default_passthrough(bool cmd_line);
+extern void iommu_set_default_translated(bool cmd_line);
+extern bool iommu_default_passthrough(void);
extern struct iommu_resv_region *
iommu_alloc_resv_region(phys_addr_t start, size_t length, int prot,
- enum iommu_resv_type type);
+ enum iommu_resv_type type, gfp_t gfp);
extern int iommu_get_group_resv_regions(struct iommu_group *group,
struct list_head *head);
@@ -321,54 +512,156 @@ extern int iommu_group_for_each_dev(struct iommu_group *group, void *data,
extern struct iommu_group *iommu_group_get(struct device *dev);
extern struct iommu_group *iommu_group_ref_get(struct iommu_group *group);
extern void iommu_group_put(struct iommu_group *group);
-extern int iommu_group_register_notifier(struct iommu_group *group,
- struct notifier_block *nb);
-extern int iommu_group_unregister_notifier(struct iommu_group *group,
- struct notifier_block *nb);
+extern int iommu_register_device_fault_handler(struct device *dev,
+ iommu_dev_fault_handler_t handler,
+ void *data);
+
+extern int iommu_unregister_device_fault_handler(struct device *dev);
+
+extern int iommu_report_device_fault(struct device *dev,
+ struct iommu_fault_event *evt);
+extern int iommu_page_response(struct device *dev,
+ struct iommu_page_response *msg);
+
extern int iommu_group_id(struct iommu_group *group);
-extern struct iommu_group *iommu_group_get_for_dev(struct device *dev);
extern struct iommu_domain *iommu_group_default_domain(struct iommu_group *);
-extern int iommu_domain_get_attr(struct iommu_domain *domain, enum iommu_attr,
- void *data);
-extern int iommu_domain_set_attr(struct iommu_domain *domain, enum iommu_attr,
- void *data);
+int iommu_enable_nesting(struct iommu_domain *domain);
+int iommu_set_pgtable_quirks(struct iommu_domain *domain,
+ unsigned long quirks);
-/* Window handling function prototypes */
-extern int iommu_domain_window_enable(struct iommu_domain *domain, u32 wnd_nr,
- phys_addr_t offset, u64 size,
- int prot);
-extern void iommu_domain_window_disable(struct iommu_domain *domain, u32 wnd_nr);
+void iommu_set_dma_strict(void);
extern int report_iommu_fault(struct iommu_domain *domain, struct device *dev,
unsigned long iova, int flags);
-static inline size_t iommu_map_sg(struct iommu_domain *domain,
- unsigned long iova, struct scatterlist *sg,
- unsigned int nents, int prot)
+static inline void iommu_flush_iotlb_all(struct iommu_domain *domain)
+{
+ if (domain->ops->flush_iotlb_all)
+ domain->ops->flush_iotlb_all(domain);
+}
+
+static inline void iommu_iotlb_sync(struct iommu_domain *domain,
+ struct iommu_iotlb_gather *iotlb_gather)
+{
+ if (domain->ops->iotlb_sync)
+ domain->ops->iotlb_sync(domain, iotlb_gather);
+
+ iommu_iotlb_gather_init(iotlb_gather);
+}
+
+/**
+ * iommu_iotlb_gather_is_disjoint - Checks whether a new range is disjoint
+ *
+ * @gather: TLB gather data
+ * @iova: start of page to invalidate
+ * @size: size of page to invalidate
+ *
+ * Helper for IOMMU drivers to check whether a new range and the gathered range
+ * are disjoint. For many IOMMUs, flushing the IOMMU in this case is better
+ * than merging the two, which might lead to unnecessary invalidations.
+ */
+static inline
+bool iommu_iotlb_gather_is_disjoint(struct iommu_iotlb_gather *gather,
+ unsigned long iova, size_t size)
+{
+ unsigned long start = iova, end = start + size - 1;
+
+ return gather->end != 0 &&
+ (end + 1 < gather->start || start > gather->end + 1);
+}
+
+
+/**
+ * iommu_iotlb_gather_add_range - Gather for address-based TLB invalidation
+ * @gather: TLB gather data
+ * @iova: start of page to invalidate
+ * @size: size of page to invalidate
+ *
+ * Helper for IOMMU drivers to build arbitrarily-sized invalidation commands
+ * where only the address range matters, and simply minimising intermediate
+ * syncs is preferred.
+ */
+static inline void iommu_iotlb_gather_add_range(struct iommu_iotlb_gather *gather,
+ unsigned long iova, size_t size)
+{
+ unsigned long end = iova + size - 1;
+
+ if (gather->start > iova)
+ gather->start = iova;
+ if (gather->end < end)
+ gather->end = end;
+}
+
+/**
+ * iommu_iotlb_gather_add_page - Gather for page-based TLB invalidation
+ * @domain: IOMMU domain to be invalidated
+ * @gather: TLB gather data
+ * @iova: start of page to invalidate
+ * @size: size of page to invalidate
+ *
+ * Helper for IOMMU drivers to build invalidation commands based on individual
+ * pages, or with page size/table level hints which cannot be gathered if they
+ * differ.
+ */
+static inline void iommu_iotlb_gather_add_page(struct iommu_domain *domain,
+ struct iommu_iotlb_gather *gather,
+ unsigned long iova, size_t size)
+{
+ /*
+ * If the new page is disjoint from the current range or is mapped at
+ * a different granularity, then sync the TLB so that the gather
+ * structure can be rewritten.
+ */
+ if ((gather->pgsize && gather->pgsize != size) ||
+ iommu_iotlb_gather_is_disjoint(gather, iova, size))
+ iommu_iotlb_sync(domain, gather);
+
+ gather->pgsize = size;
+ iommu_iotlb_gather_add_range(gather, iova, size);
+}
+
+static inline bool iommu_iotlb_gather_queued(struct iommu_iotlb_gather *gather)
{
- return domain->ops->map_sg(domain, iova, sg, nents, prot);
+ return gather && gather->queued;
}
/* PCI device grouping function */
extern struct iommu_group *pci_device_group(struct device *dev);
/* Generic device grouping function */
extern struct iommu_group *generic_device_group(struct device *dev);
+/* FSL-MC device grouping function */
+struct iommu_group *fsl_mc_device_group(struct device *dev);
/**
* struct iommu_fwspec - per-device IOMMU instance data
* @ops: ops for this device's IOMMU
* @iommu_fwnode: firmware handle for this device's IOMMU
- * @iommu_priv: IOMMU driver private data for this device
+ * @flags: IOMMU_FWSPEC_* flags
* @num_ids: number of associated device IDs
* @ids: IDs which this device may present to the IOMMU
+ *
+ * Note that the IDs (and any other information, really) stored in this structure should be
+ * considered private to the IOMMU device driver and are not to be used directly by IOMMU
+ * consumers.
*/
struct iommu_fwspec {
const struct iommu_ops *ops;
struct fwnode_handle *iommu_fwnode;
- void *iommu_priv;
+ u32 flags;
unsigned int num_ids;
- u32 ids[1];
+ u32 ids[];
+};
+
+/* ATS is supported */
+#define IOMMU_FWSPEC_PCI_RC_ATS (1 << 0)
+
+/**
+ * struct iommu_sva - handle to a device-mm bond
+ */
+struct iommu_sva {
+ struct device *dev;
+ struct iommu_domain *domain;
};
int iommu_fwspec_init(struct device *dev, struct fwnode_handle *iommu_fwnode,
@@ -377,29 +670,77 @@ void iommu_fwspec_free(struct device *dev);
int iommu_fwspec_add_ids(struct device *dev, u32 *ids, int num_ids);
const struct iommu_ops *iommu_ops_from_fwnode(struct fwnode_handle *fwnode);
+static inline struct iommu_fwspec *dev_iommu_fwspec_get(struct device *dev)
+{
+ if (dev->iommu)
+ return dev->iommu->fwspec;
+ else
+ return NULL;
+}
+
+static inline void dev_iommu_fwspec_set(struct device *dev,
+ struct iommu_fwspec *fwspec)
+{
+ dev->iommu->fwspec = fwspec;
+}
+
+static inline void *dev_iommu_priv_get(struct device *dev)
+{
+ if (dev->iommu)
+ return dev->iommu->priv;
+ else
+ return NULL;
+}
+
+static inline void dev_iommu_priv_set(struct device *dev, void *priv)
+{
+ dev->iommu->priv = priv;
+}
+
+int iommu_probe_device(struct device *dev);
+
+int iommu_dev_enable_feature(struct device *dev, enum iommu_dev_features f);
+int iommu_dev_disable_feature(struct device *dev, enum iommu_dev_features f);
+
+int iommu_device_use_default_domain(struct device *dev);
+void iommu_device_unuse_default_domain(struct device *dev);
+
+int iommu_group_claim_dma_owner(struct iommu_group *group, void *owner);
+void iommu_group_release_dma_owner(struct iommu_group *group);
+bool iommu_group_dma_owner_claimed(struct iommu_group *group);
+
+int iommu_device_claim_dma_owner(struct device *dev, void *owner);
+void iommu_device_release_dma_owner(struct device *dev);
+
+struct iommu_domain *iommu_sva_domain_alloc(struct device *dev,
+ struct mm_struct *mm);
+int iommu_attach_device_pasid(struct iommu_domain *domain,
+ struct device *dev, ioasid_t pasid);
+void iommu_detach_device_pasid(struct iommu_domain *domain,
+ struct device *dev, ioasid_t pasid);
+struct iommu_domain *
+iommu_get_domain_for_dev_pasid(struct device *dev, ioasid_t pasid,
+ unsigned int type);
#else /* CONFIG_IOMMU_API */
struct iommu_ops {};
struct iommu_group {};
struct iommu_fwspec {};
struct iommu_device {};
+struct iommu_fault_param {};
+struct iommu_iotlb_gather {};
-static inline bool iommu_present(struct bus_type *bus)
+static inline bool iommu_present(const struct bus_type *bus)
{
return false;
}
-static inline bool iommu_capable(struct bus_type *bus, enum iommu_cap cap)
+static inline bool device_iommu_capable(struct device *dev, enum iommu_cap cap)
{
return false;
}
-static inline struct iommu_domain *iommu_domain_alloc(struct bus_type *bus)
-{
- return NULL;
-}
-
-static inline struct iommu_group *iommu_group_get_by_id(int id)
+static inline struct iommu_domain *iommu_domain_alloc(const struct bus_type *bus)
{
return NULL;
}
@@ -425,33 +766,37 @@ static inline struct iommu_domain *iommu_get_domain_for_dev(struct device *dev)
}
static inline int iommu_map(struct iommu_domain *domain, unsigned long iova,
- phys_addr_t paddr, int gfp_order, int prot)
+ phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
{
return -ENODEV;
}
-static inline int iommu_unmap(struct iommu_domain *domain, unsigned long iova,
- int gfp_order)
+static inline size_t iommu_unmap(struct iommu_domain *domain,
+ unsigned long iova, size_t size)
{
- return -ENODEV;
+ return 0;
}
-static inline size_t iommu_map_sg(struct iommu_domain *domain,
- unsigned long iova, struct scatterlist *sg,
- unsigned int nents, int prot)
+static inline size_t iommu_unmap_fast(struct iommu_domain *domain,
+ unsigned long iova, int gfp_order,
+ struct iommu_iotlb_gather *iotlb_gather)
{
- return -ENODEV;
+ return 0;
}
-static inline int iommu_domain_window_enable(struct iommu_domain *domain,
- u32 wnd_nr, phys_addr_t paddr,
- u64 size, int prot)
+static inline ssize_t iommu_map_sg(struct iommu_domain *domain,
+ unsigned long iova, struct scatterlist *sg,
+ unsigned int nents, int prot, gfp_t gfp)
{
return -ENODEV;
}
-static inline void iommu_domain_window_disable(struct iommu_domain *domain,
- u32 wnd_nr)
+static inline void iommu_flush_iotlb_all(struct iommu_domain *domain)
+{
+}
+
+static inline void iommu_iotlb_sync(struct iommu_domain *domain,
+ struct iommu_iotlb_gather *iotlb_gather)
{
}
@@ -481,9 +826,17 @@ static inline int iommu_get_group_resv_regions(struct iommu_group *group,
return -ENODEV;
}
-static inline int iommu_request_dm_for_dev(struct device *dev)
+static inline void iommu_set_default_passthrough(bool cmd_line)
{
- return -ENODEV;
+}
+
+static inline void iommu_set_default_translated(bool cmd_line)
+{
+}
+
+static inline bool iommu_default_passthrough(void)
+{
+ return true;
}
static inline int iommu_attach_group(struct iommu_domain *domain,
@@ -545,50 +898,69 @@ static inline void iommu_group_put(struct iommu_group *group)
{
}
-static inline int iommu_group_register_notifier(struct iommu_group *group,
- struct notifier_block *nb)
+static inline
+int iommu_register_device_fault_handler(struct device *dev,
+ iommu_dev_fault_handler_t handler,
+ void *data)
{
return -ENODEV;
}
-static inline int iommu_group_unregister_notifier(struct iommu_group *group,
- struct notifier_block *nb)
+static inline int iommu_unregister_device_fault_handler(struct device *dev)
{
return 0;
}
-static inline int iommu_group_id(struct iommu_group *group)
+static inline
+int iommu_report_device_fault(struct device *dev, struct iommu_fault_event *evt)
{
return -ENODEV;
}
-static inline int iommu_domain_get_attr(struct iommu_domain *domain,
- enum iommu_attr attr, void *data)
+static inline int iommu_page_response(struct device *dev,
+ struct iommu_page_response *msg)
{
- return -EINVAL;
+ return -ENODEV;
}
-static inline int iommu_domain_set_attr(struct iommu_domain *domain,
- enum iommu_attr attr, void *data)
+static inline int iommu_group_id(struct iommu_group *group)
{
- return -EINVAL;
+ return -ENODEV;
}
-static inline int iommu_device_register(struct iommu_device *iommu)
+static inline int iommu_set_pgtable_quirks(struct iommu_domain *domain,
+ unsigned long quirks)
+{
+ return 0;
+}
+
+static inline int iommu_device_register(struct iommu_device *iommu,
+ const struct iommu_ops *ops,
+ struct device *hwdev)
{
return -ENODEV;
}
-static inline void iommu_device_set_ops(struct iommu_device *iommu,
- const struct iommu_ops *ops)
+static inline struct iommu_device *dev_to_iommu_device(struct device *dev)
+{
+ return NULL;
+}
+
+static inline void iommu_iotlb_gather_init(struct iommu_iotlb_gather *gather)
{
}
-static inline void iommu_device_set_fwnode(struct iommu_device *iommu,
- struct fwnode_handle *fwnode)
+static inline void iommu_iotlb_gather_add_page(struct iommu_domain *domain,
+ struct iommu_iotlb_gather *gather,
+ unsigned long iova, size_t size)
{
}
+static inline bool iommu_iotlb_gather_queued(struct iommu_iotlb_gather *gather)
+{
+ return false;
+}
+
static inline void iommu_device_unregister(struct iommu_device *iommu)
{
}
@@ -637,6 +1009,194 @@ const struct iommu_ops *iommu_ops_from_fwnode(struct fwnode_handle *fwnode)
return NULL;
}
+static inline int
+iommu_dev_enable_feature(struct device *dev, enum iommu_dev_features feat)
+{
+ return -ENODEV;
+}
+
+static inline int
+iommu_dev_disable_feature(struct device *dev, enum iommu_dev_features feat)
+{
+ return -ENODEV;
+}
+
+static inline struct iommu_fwspec *dev_iommu_fwspec_get(struct device *dev)
+{
+ return NULL;
+}
+
+static inline int iommu_device_use_default_domain(struct device *dev)
+{
+ return 0;
+}
+
+static inline void iommu_device_unuse_default_domain(struct device *dev)
+{
+}
+
+static inline int
+iommu_group_claim_dma_owner(struct iommu_group *group, void *owner)
+{
+ return -ENODEV;
+}
+
+static inline void iommu_group_release_dma_owner(struct iommu_group *group)
+{
+}
+
+static inline bool iommu_group_dma_owner_claimed(struct iommu_group *group)
+{
+ return false;
+}
+
+static inline void iommu_device_release_dma_owner(struct device *dev)
+{
+}
+
+static inline int iommu_device_claim_dma_owner(struct device *dev, void *owner)
+{
+ return -ENODEV;
+}
+
+static inline struct iommu_domain *
+iommu_sva_domain_alloc(struct device *dev, struct mm_struct *mm)
+{
+ return NULL;
+}
+
+static inline int iommu_attach_device_pasid(struct iommu_domain *domain,
+ struct device *dev, ioasid_t pasid)
+{
+ return -ENODEV;
+}
+
+static inline void iommu_detach_device_pasid(struct iommu_domain *domain,
+ struct device *dev, ioasid_t pasid)
+{
+}
+
+static inline struct iommu_domain *
+iommu_get_domain_for_dev_pasid(struct device *dev, ioasid_t pasid,
+ unsigned int type)
+{
+ return NULL;
+}
#endif /* CONFIG_IOMMU_API */
+/**
+ * iommu_map_sgtable - Map the given buffer to the IOMMU domain
+ * @domain: The IOMMU domain to perform the mapping
+ * @iova: The start address to map the buffer
+ * @sgt: The sg_table object describing the buffer
+ * @prot: IOMMU protection bits
+ *
+ * Creates a mapping at @iova for the buffer described by a scatterlist
+ * stored in the given sg_table object in the provided IOMMU domain.
+ */
+static inline size_t iommu_map_sgtable(struct iommu_domain *domain,
+ unsigned long iova, struct sg_table *sgt, int prot)
+{
+ return iommu_map_sg(domain, iova, sgt->sgl, sgt->orig_nents, prot,
+ GFP_KERNEL);
+}
+
+#ifdef CONFIG_IOMMU_DEBUGFS
+extern struct dentry *iommu_debugfs_dir;
+void iommu_debugfs_setup(void);
+#else
+static inline void iommu_debugfs_setup(void) {}
+#endif
+
+#ifdef CONFIG_IOMMU_DMA
+#include <linux/msi.h>
+
+/* Setup call for arch DMA mapping code */
+void iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 dma_limit);
+
+int iommu_get_msi_cookie(struct iommu_domain *domain, dma_addr_t base);
+
+int iommu_dma_prepare_msi(struct msi_desc *desc, phys_addr_t msi_addr);
+void iommu_dma_compose_msi_msg(struct msi_desc *desc, struct msi_msg *msg);
+
+#else /* CONFIG_IOMMU_DMA */
+
+struct msi_desc;
+struct msi_msg;
+
+static inline void iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 dma_limit)
+{
+}
+
+static inline int iommu_get_msi_cookie(struct iommu_domain *domain, dma_addr_t base)
+{
+ return -ENODEV;
+}
+
+static inline int iommu_dma_prepare_msi(struct msi_desc *desc, phys_addr_t msi_addr)
+{
+ return 0;
+}
+
+static inline void iommu_dma_compose_msi_msg(struct msi_desc *desc, struct msi_msg *msg)
+{
+}
+
+#endif /* CONFIG_IOMMU_DMA */
+
+/*
+ * Newer generations of Tegra SoCs require devices' stream IDs to be directly programmed into
+ * some registers. These are always paired with a Tegra SMMU or ARM SMMU, for which the contents
+ * of the struct iommu_fwspec are known. Use this helper to formalize access to these internals.
+ */
+#define TEGRA_STREAM_ID_BYPASS 0x7f
+
+static inline bool tegra_dev_iommu_get_stream_id(struct device *dev, u32 *stream_id)
+{
+#ifdef CONFIG_IOMMU_API
+ struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
+
+ if (fwspec && fwspec->num_ids == 1) {
+ *stream_id = fwspec->ids[0] & 0xffff;
+ return true;
+ }
+#endif
+
+ return false;
+}
+
+#ifdef CONFIG_IOMMU_SVA
+static inline void mm_pasid_init(struct mm_struct *mm)
+{
+ mm->pasid = IOMMU_PASID_INVALID;
+}
+static inline bool mm_valid_pasid(struct mm_struct *mm)
+{
+ return mm->pasid != IOMMU_PASID_INVALID;
+}
+void mm_pasid_drop(struct mm_struct *mm);
+struct iommu_sva *iommu_sva_bind_device(struct device *dev,
+ struct mm_struct *mm);
+void iommu_sva_unbind_device(struct iommu_sva *handle);
+u32 iommu_sva_get_pasid(struct iommu_sva *handle);
+#else
+static inline struct iommu_sva *
+iommu_sva_bind_device(struct device *dev, struct mm_struct *mm)
+{
+ return NULL;
+}
+
+static inline void iommu_sva_unbind_device(struct iommu_sva *handle)
+{
+}
+
+static inline u32 iommu_sva_get_pasid(struct iommu_sva *handle)
+{
+ return IOMMU_PASID_INVALID;
+}
+static inline void mm_pasid_init(struct mm_struct *mm) {}
+static inline bool mm_valid_pasid(struct mm_struct *mm) { return false; }
+static inline void mm_pasid_drop(struct mm_struct *mm) {}
+#endif /* CONFIG_IOMMU_SVA */
+
#endif /* __LINUX_IOMMU_H */
diff --git a/include/linux/iommufd.h b/include/linux/iommufd.h
new file mode 100644
index 000000000000..1129a36a74c4
--- /dev/null
+++ b/include/linux/iommufd.h
@@ -0,0 +1,105 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2021 Intel Corporation
+ * Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES
+ */
+#ifndef __LINUX_IOMMUFD_H
+#define __LINUX_IOMMUFD_H
+
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/err.h>
+
+struct device;
+struct iommufd_device;
+struct page;
+struct iommufd_ctx;
+struct iommufd_access;
+struct file;
+
+struct iommufd_device *iommufd_device_bind(struct iommufd_ctx *ictx,
+ struct device *dev, u32 *id);
+void iommufd_device_unbind(struct iommufd_device *idev);
+
+int iommufd_device_attach(struct iommufd_device *idev, u32 *pt_id);
+void iommufd_device_detach(struct iommufd_device *idev);
+
+struct iommufd_access_ops {
+ u8 needs_pin_pages : 1;
+ void (*unmap)(void *data, unsigned long iova, unsigned long length);
+};
+
+enum {
+ IOMMUFD_ACCESS_RW_READ = 0,
+ IOMMUFD_ACCESS_RW_WRITE = 1 << 0,
+ /* Set if the caller is in a kthread then rw will use kthread_use_mm() */
+ IOMMUFD_ACCESS_RW_KTHREAD = 1 << 1,
+
+ /* Only for use by selftest */
+ __IOMMUFD_ACCESS_RW_SLOW_PATH = 1 << 2,
+};
+
+struct iommufd_access *
+iommufd_access_create(struct iommufd_ctx *ictx,
+ const struct iommufd_access_ops *ops, void *data, u32 *id);
+void iommufd_access_destroy(struct iommufd_access *access);
+int iommufd_access_attach(struct iommufd_access *access, u32 ioas_id);
+
+void iommufd_ctx_get(struct iommufd_ctx *ictx);
+
+#if IS_ENABLED(CONFIG_IOMMUFD)
+struct iommufd_ctx *iommufd_ctx_from_file(struct file *file);
+void iommufd_ctx_put(struct iommufd_ctx *ictx);
+
+int iommufd_access_pin_pages(struct iommufd_access *access, unsigned long iova,
+ unsigned long length, struct page **out_pages,
+ unsigned int flags);
+void iommufd_access_unpin_pages(struct iommufd_access *access,
+ unsigned long iova, unsigned long length);
+int iommufd_access_rw(struct iommufd_access *access, unsigned long iova,
+ void *data, size_t len, unsigned int flags);
+int iommufd_vfio_compat_ioas_get_id(struct iommufd_ctx *ictx, u32 *out_ioas_id);
+int iommufd_vfio_compat_ioas_create(struct iommufd_ctx *ictx);
+int iommufd_vfio_compat_set_no_iommu(struct iommufd_ctx *ictx);
+#else /* !CONFIG_IOMMUFD */
+static inline struct iommufd_ctx *iommufd_ctx_from_file(struct file *file)
+{
+ return ERR_PTR(-EOPNOTSUPP);
+}
+
+static inline void iommufd_ctx_put(struct iommufd_ctx *ictx)
+{
+}
+
+static inline int iommufd_access_pin_pages(struct iommufd_access *access,
+ unsigned long iova,
+ unsigned long length,
+ struct page **out_pages,
+ unsigned int flags)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline void iommufd_access_unpin_pages(struct iommufd_access *access,
+ unsigned long iova,
+ unsigned long length)
+{
+}
+
+static inline int iommufd_access_rw(struct iommufd_access *access, unsigned long iova,
+ void *data, size_t len, unsigned int flags)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int iommufd_vfio_compat_ioas_create(struct iommufd_ctx *ictx)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int iommufd_vfio_compat_set_no_iommu(struct iommufd_ctx *ictx)
+{
+ return -EOPNOTSUPP;
+}
+#endif /* CONFIG_IOMMUFD */
+#endif
diff --git a/include/linux/iopoll.h b/include/linux/iopoll.h
index d29e1e21bf3f..2c8860e406bd 100644
--- a/include/linux/iopoll.h
+++ b/include/linux/iopoll.h
@@ -1,15 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2012-2014 The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
*/
#ifndef _LINUX_IOPOLL_H
@@ -23,75 +14,131 @@
#include <linux/io.h>
/**
- * readx_poll_timeout - Periodically poll an address until a condition is met or a timeout occurs
- * @op: accessor function (takes @addr as its only argument)
- * @addr: Address to poll
+ * read_poll_timeout - Periodically poll an address until a condition is
+ * met or a timeout occurs
+ * @op: accessor function (takes @args as its arguments)
* @val: Variable to read the value into
* @cond: Break condition (usually involving @val)
* @sleep_us: Maximum time to sleep between reads in us (0
* tight-loops). Should be less than ~20ms since usleep_range
- * is used (see Documentation/timers/timers-howto.txt).
+ * is used (see Documentation/timers/timers-howto.rst).
* @timeout_us: Timeout in us, 0 means never timeout
+ * @sleep_before_read: if it is true, sleep @sleep_us before read.
+ * @args: arguments for @op poll
*
* Returns 0 on success and -ETIMEDOUT upon a timeout. In either
- * case, the last read value at @addr is stored in @val. Must not
+ * case, the last read value at @args is stored in @val. Must not
* be called from atomic context if sleep_us or timeout_us are used.
*
* When available, you'll probably want to use one of the specialized
* macros defined below rather than this macro directly.
*/
-#define readx_poll_timeout(op, addr, val, cond, sleep_us, timeout_us) \
+#define read_poll_timeout(op, val, cond, sleep_us, timeout_us, \
+ sleep_before_read, args...) \
({ \
- ktime_t timeout = ktime_add_us(ktime_get(), timeout_us); \
- might_sleep_if(sleep_us); \
+ u64 __timeout_us = (timeout_us); \
+ unsigned long __sleep_us = (sleep_us); \
+ ktime_t __timeout = ktime_add_us(ktime_get(), __timeout_us); \
+ might_sleep_if((__sleep_us) != 0); \
+ if (sleep_before_read && __sleep_us) \
+ usleep_range((__sleep_us >> 2) + 1, __sleep_us); \
for (;;) { \
- (val) = op(addr); \
+ (val) = op(args); \
if (cond) \
break; \
- if (timeout_us && ktime_compare(ktime_get(), timeout) > 0) { \
- (val) = op(addr); \
+ if (__timeout_us && \
+ ktime_compare(ktime_get(), __timeout) > 0) { \
+ (val) = op(args); \
break; \
} \
- if (sleep_us) \
- usleep_range((sleep_us >> 2) + 1, sleep_us); \
+ if (__sleep_us) \
+ usleep_range((__sleep_us >> 2) + 1, __sleep_us); \
} \
(cond) ? 0 : -ETIMEDOUT; \
})
/**
- * readx_poll_timeout_atomic - Periodically poll an address until a condition is met or a timeout occurs
- * @op: accessor function (takes @addr as its only argument)
- * @addr: Address to poll
+ * read_poll_timeout_atomic - Periodically poll an address until a condition is
+ * met or a timeout occurs
+ * @op: accessor function (takes @args as its arguments)
* @val: Variable to read the value into
* @cond: Break condition (usually involving @val)
* @delay_us: Time to udelay between reads in us (0 tight-loops). Should
* be less than ~10us since udelay is used (see
- * Documentation/timers/timers-howto.txt).
+ * Documentation/timers/timers-howto.rst).
* @timeout_us: Timeout in us, 0 means never timeout
+ * @delay_before_read: if it is true, delay @delay_us before read.
+ * @args: arguments for @op poll
*
* Returns 0 on success and -ETIMEDOUT upon a timeout. In either
- * case, the last read value at @addr is stored in @val.
+ * case, the last read value at @args is stored in @val.
*
* When available, you'll probably want to use one of the specialized
* macros defined below rather than this macro directly.
*/
-#define readx_poll_timeout_atomic(op, addr, val, cond, delay_us, timeout_us) \
+#define read_poll_timeout_atomic(op, val, cond, delay_us, timeout_us, \
+ delay_before_read, args...) \
({ \
- ktime_t timeout = ktime_add_us(ktime_get(), timeout_us); \
+ u64 __timeout_us = (timeout_us); \
+ unsigned long __delay_us = (delay_us); \
+ ktime_t __timeout = ktime_add_us(ktime_get(), __timeout_us); \
+ if (delay_before_read && __delay_us) \
+ udelay(__delay_us); \
for (;;) { \
- (val) = op(addr); \
+ (val) = op(args); \
if (cond) \
break; \
- if (timeout_us && ktime_compare(ktime_get(), timeout) > 0) { \
- (val) = op(addr); \
+ if (__timeout_us && \
+ ktime_compare(ktime_get(), __timeout) > 0) { \
+ (val) = op(args); \
break; \
} \
- if (delay_us) \
- udelay(delay_us); \
+ if (__delay_us) \
+ udelay(__delay_us); \
} \
(cond) ? 0 : -ETIMEDOUT; \
})
+/**
+ * readx_poll_timeout - Periodically poll an address until a condition is met or a timeout occurs
+ * @op: accessor function (takes @addr as its only argument)
+ * @addr: Address to poll
+ * @val: Variable to read the value into
+ * @cond: Break condition (usually involving @val)
+ * @sleep_us: Maximum time to sleep between reads in us (0
+ * tight-loops). Should be less than ~20ms since usleep_range
+ * is used (see Documentation/timers/timers-howto.rst).
+ * @timeout_us: Timeout in us, 0 means never timeout
+ *
+ * Returns 0 on success and -ETIMEDOUT upon a timeout. In either
+ * case, the last read value at @addr is stored in @val. Must not
+ * be called from atomic context if sleep_us or timeout_us are used.
+ *
+ * When available, you'll probably want to use one of the specialized
+ * macros defined below rather than this macro directly.
+ */
+#define readx_poll_timeout(op, addr, val, cond, sleep_us, timeout_us) \
+ read_poll_timeout(op, val, cond, sleep_us, timeout_us, false, addr)
+
+/**
+ * readx_poll_timeout_atomic - Periodically poll an address until a condition is met or a timeout occurs
+ * @op: accessor function (takes @addr as its only argument)
+ * @addr: Address to poll
+ * @val: Variable to read the value into
+ * @cond: Break condition (usually involving @val)
+ * @delay_us: Time to udelay between reads in us (0 tight-loops). Should
+ * be less than ~10us since udelay is used (see
+ * Documentation/timers/timers-howto.rst).
+ * @timeout_us: Timeout in us, 0 means never timeout
+ *
+ * Returns 0 on success and -ETIMEDOUT upon a timeout. In either
+ * case, the last read value at @addr is stored in @val.
+ *
+ * When available, you'll probably want to use one of the specialized
+ * macros defined below rather than this macro directly.
+ */
+#define readx_poll_timeout_atomic(op, addr, val, cond, delay_us, timeout_us) \
+ read_poll_timeout_atomic(op, val, cond, delay_us, timeout_us, false, addr)
#define readb_poll_timeout(addr, val, cond, delay_us, timeout_us) \
readx_poll_timeout(readb, addr, val, cond, delay_us, timeout_us)
diff --git a/include/linux/ioport.h b/include/linux/ioport.h
index 6230064d7f95..25d768d48970 100644
--- a/include/linux/ioport.h
+++ b/include/linux/ioport.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* ioport.h Definitions of routines for detecting, reserving and
* allocating system resources.
@@ -9,7 +10,9 @@
#define _LINUX_IOPORT_H
#ifndef __ASSEMBLY__
+#include <linux/bits.h>
#include <linux/compiler.h>
+#include <linux/minmax.h>
#include <linux/types.h>
/*
* Resources are tree-like, allowing
@@ -56,6 +59,10 @@ struct resource {
#define IORESOURCE_EXT_TYPE_BITS 0x01000000 /* Resource extended types */
#define IORESOURCE_SYSRAM 0x01000000 /* System RAM (modifier) */
+/* IORESOURCE_SYSRAM specific bits. */
+#define IORESOURCE_SYSRAM_DRIVER_MANAGED 0x02000000 /* Always detected via a driver. */
+#define IORESOURCE_SYSRAM_MERGEABLE 0x04000000 /* Resource can be merged. */
+
#define IORESOURCE_EXCLUSIVE 0x08000000 /* Userland may not map this resource */
#define IORESOURCE_DISABLED 0x10000000
@@ -72,7 +79,8 @@ struct resource {
#define IORESOURCE_IRQ_HIGHLEVEL (1<<2)
#define IORESOURCE_IRQ_LOWLEVEL (1<<3)
#define IORESOURCE_IRQ_SHAREABLE (1<<4)
-#define IORESOURCE_IRQ_OPTIONAL (1<<5)
+#define IORESOURCE_IRQ_OPTIONAL (1<<5)
+#define IORESOURCE_IRQ_WAKECAPABLE (1<<6)
/* PnP DMA specific bits (IORESOURCE_BITS) */
#define IORESOURCE_DMA_TYPE_MASK (3<<0)
@@ -101,6 +109,7 @@ struct resource {
#define IORESOURCE_MEM_32BIT (3<<3)
#define IORESOURCE_MEM_SHADOWABLE (1<<5) /* dup: IORESOURCE_SHADOWABLE */
#define IORESOURCE_MEM_EXPANSIONROM (1<<6)
+#define IORESOURCE_MEM_NONPOSTED (1<<7)
/* PnP I/O specific bits (IORESOURCE_BITS) */
#define IORESOURCE_IO_16BIT_ADDR (1<<0)
@@ -130,11 +139,23 @@ enum {
IORES_DESC_ACPI_NV_STORAGE = 3,
IORES_DESC_PERSISTENT_MEMORY = 4,
IORES_DESC_PERSISTENT_MEMORY_LEGACY = 5,
+ IORES_DESC_DEVICE_PRIVATE_MEMORY = 6,
+ IORES_DESC_RESERVED = 7,
+ IORES_DESC_SOFT_RESERVED = 8,
+ IORES_DESC_CXL = 9,
+};
+
+/*
+ * Flags controlling ioremap() behavior.
+ */
+enum {
+ IORES_MAP_SYSTEM_RAM = BIT(0),
+ IORES_MAP_ENCRYPTED = BIT(1),
};
/* helpers to define resources */
#define DEFINE_RES_NAMED(_start, _size, _name, _flags) \
- { \
+(struct resource) { \
.start = (_start), \
.end = (_start) + (_size) - 1, \
.name = (_name), \
@@ -152,6 +173,11 @@ enum {
#define DEFINE_RES_MEM(_start, _size) \
DEFINE_RES_MEM_NAMED((_start), (_size), NULL)
+#define DEFINE_RES_REG_NAMED(_start, _size, _name) \
+ DEFINE_RES_NAMED((_start), (_size), (_name), IORESOURCE_REG)
+#define DEFINE_RES_REG(_start, _size) \
+ DEFINE_RES_REG_NAMED((_start), (_size), NULL)
+
#define DEFINE_RES_IRQ_NAMED(_irq, _name) \
DEFINE_RES_NAMED((_irq), 1, (_name), IORESOURCE_IRQ)
#define DEFINE_RES_IRQ(_irq) \
@@ -212,12 +238,39 @@ static inline bool resource_contains(struct resource *r1, struct resource *r2)
return r1->start <= r2->start && r1->end >= r2->end;
}
+/* True if any part of r1 overlaps r2 */
+static inline bool resource_overlaps(struct resource *r1, struct resource *r2)
+{
+ return r1->start <= r2->end && r1->end >= r2->start;
+}
+
+static inline bool
+resource_intersection(struct resource *r1, struct resource *r2, struct resource *r)
+{
+ if (!resource_overlaps(r1, r2))
+ return false;
+ r->start = max(r1->start, r2->start);
+ r->end = min(r1->end, r2->end);
+ return true;
+}
+
+static inline bool
+resource_union(struct resource *r1, struct resource *r2, struct resource *r)
+{
+ if (!resource_overlaps(r1, r2))
+ return false;
+ r->start = min(r1->start, r2->start);
+ r->end = max(r1->end, r2->end);
+ return true;
+}
/* Convenience shorthand with allocation */
#define request_region(start,n,name) __request_region(&ioport_resource, (start), (n), (name), 0)
#define request_muxed_region(start,n,name) __request_region(&ioport_resource, (start), (n), (name), IORESOURCE_MUXED)
#define __request_mem_region(start,n,name, excl) __request_region(&iomem_resource, (start), (n), (name), excl)
#define request_mem_region(start,n,name) __request_region(&iomem_resource, (start), (n), (name), 0)
+#define request_mem_region_muxed(start, n, name) \
+ __request_region(&iomem_resource, (start), (n), (name), IORESOURCE_MUXED)
#define request_mem_region_exclusive(start,n,name) \
__request_region(&iomem_resource, (start), (n), (name), IORESOURCE_EXCLUSIVE)
#define rename_region(region, newname) do { (region)->name = (newname); } while (0)
@@ -234,8 +287,10 @@ extern struct resource * __request_region(struct resource *,
extern void __release_region(struct resource *, resource_size_t,
resource_size_t);
#ifdef CONFIG_MEMORY_HOTREMOVE
-extern int release_mem_region_adjustable(struct resource *, resource_size_t,
- resource_size_t);
+extern void release_mem_region_adjustable(resource_size_t, resource_size_t);
+#endif
+#ifdef CONFIG_MEMORY_HOTPLUG
+extern void merge_system_ram_resource(struct resource *res);
#endif
/* Wrappers for managed devices */
@@ -262,24 +317,38 @@ extern struct resource * __devm_request_region(struct device *dev,
extern void __devm_release_region(struct device *dev, struct resource *parent,
resource_size_t start, resource_size_t n);
extern int iomem_map_sanity_check(resource_size_t addr, unsigned long size);
-extern int iomem_is_exclusive(u64 addr);
+extern bool iomem_is_exclusive(u64 addr);
+extern bool resource_is_exclusive(struct resource *resource, u64 addr,
+ resource_size_t size);
extern int
walk_system_ram_range(unsigned long start_pfn, unsigned long nr_pages,
void *arg, int (*func)(unsigned long, unsigned long, void *));
extern int
+walk_mem_res(u64 start, u64 end, void *arg,
+ int (*func)(struct resource *, void *));
+extern int
walk_system_ram_res(u64 start, u64 end, void *arg,
- int (*func)(u64, u64, void *));
+ int (*func)(struct resource *, void *));
extern int
walk_iomem_res_desc(unsigned long desc, unsigned long flags, u64 start, u64 end,
- void *arg, int (*func)(u64, u64, void *));
+ void *arg, int (*func)(struct resource *, void *));
-/* True if any part of r1 overlaps r2 */
-static inline bool resource_overlaps(struct resource *r1, struct resource *r2)
+struct resource *devm_request_free_mem_region(struct device *dev,
+ struct resource *base, unsigned long size);
+struct resource *request_free_mem_region(struct resource *base,
+ unsigned long size, const char *name);
+struct resource *alloc_free_mem_region(struct resource *base,
+ unsigned long size, unsigned long align, const char *name);
+
+static inline void irqresource_disabled(struct resource *res, u32 irq)
{
- return (r1->start <= r2->end && r1->end >= r2->start);
+ res->start = irq;
+ res->end = irq;
+ res->flags |= IORESOURCE_IRQ | IORESOURCE_DISABLED | IORESOURCE_UNSET;
}
+extern struct address_space *iomem_get_mapping(void);
#endif /* __ASSEMBLY__ */
#endif /* _LINUX_IOPORT_H */
diff --git a/include/linux/ioprio.h b/include/linux/ioprio.h
index 8c1239020d79..7578d4f6a969 100644
--- a/include/linux/ioprio.h
+++ b/include/linux/ioprio.h
@@ -1,49 +1,27 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef IOPRIO_H
#define IOPRIO_H
#include <linux/sched.h>
+#include <linux/sched/rt.h>
#include <linux/iocontext.h>
-/*
- * Gives us 8 prio classes with 13-bits of data for each class
- */
-#define IOPRIO_CLASS_SHIFT (13)
-#define IOPRIO_PRIO_MASK ((1UL << IOPRIO_CLASS_SHIFT) - 1)
-
-#define IOPRIO_PRIO_CLASS(mask) ((mask) >> IOPRIO_CLASS_SHIFT)
-#define IOPRIO_PRIO_DATA(mask) ((mask) & IOPRIO_PRIO_MASK)
-#define IOPRIO_PRIO_VALUE(class, data) (((class) << IOPRIO_CLASS_SHIFT) | data)
-
-#define ioprio_valid(mask) (IOPRIO_PRIO_CLASS((mask)) != IOPRIO_CLASS_NONE)
+#include <uapi/linux/ioprio.h>
/*
- * These are the io priority groups as implemented by CFQ. RT is the realtime
- * class, it always gets premium service. BE is the best-effort scheduling
- * class, the default for any process. IDLE is the idle scheduling class, it
- * is only served when no one else is using the disk.
+ * Default IO priority.
*/
-enum {
- IOPRIO_CLASS_NONE,
- IOPRIO_CLASS_RT,
- IOPRIO_CLASS_BE,
- IOPRIO_CLASS_IDLE,
-};
+#define IOPRIO_DEFAULT IOPRIO_PRIO_VALUE(IOPRIO_CLASS_NONE, 0)
/*
- * 8 best effort priority levels are supported
+ * Check that a priority value has a valid class.
*/
-#define IOPRIO_BE_NR (8)
-
-enum {
- IOPRIO_WHO_PROCESS = 1,
- IOPRIO_WHO_PGRP,
- IOPRIO_WHO_USER,
-};
+static inline bool ioprio_valid(unsigned short ioprio)
+{
+ unsigned short class = IOPRIO_PRIO_CLASS(ioprio);
-/*
- * Fallback BE priority
- */
-#define IOPRIO_NORM (4)
+ return class > IOPRIO_CLASS_NONE && class <= IOPRIO_CLASS_IDLE;
+}
/*
* if process has set io priority explicitly, use that. if not, convert
@@ -62,17 +40,35 @@ static inline int task_nice_ioclass(struct task_struct *task)
{
if (task->policy == SCHED_IDLE)
return IOPRIO_CLASS_IDLE;
- else if (task->policy == SCHED_FIFO || task->policy == SCHED_RR)
+ else if (task_is_realtime(task))
return IOPRIO_CLASS_RT;
else
return IOPRIO_CLASS_BE;
}
-/*
- * For inheritance, return the highest of the two given priorities
- */
-extern int ioprio_best(unsigned short aprio, unsigned short bprio);
+#ifdef CONFIG_BLOCK
+int __get_task_ioprio(struct task_struct *p);
+#else
+static inline int __get_task_ioprio(struct task_struct *p)
+{
+ return IOPRIO_DEFAULT;
+}
+#endif /* CONFIG_BLOCK */
+
+static inline int get_current_ioprio(void)
+{
+ return __get_task_ioprio(current);
+}
extern int set_task_ioprio(struct task_struct *task, int ioprio);
+#ifdef CONFIG_BLOCK
+extern int ioprio_check_cap(int ioprio);
+#else
+static inline int ioprio_check_cap(int ioprio)
+{
+ return -ENOTBLK;
+}
+#endif /* CONFIG_BLOCK */
+
#endif
diff --git a/include/linux/iosys-map.h b/include/linux/iosys-map.h
new file mode 100644
index 000000000000..cb71aa616bd3
--- /dev/null
+++ b/include/linux/iosys-map.h
@@ -0,0 +1,516 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Pointer abstraction for IO/system memory
+ */
+
+#ifndef __IOSYS_MAP_H__
+#define __IOSYS_MAP_H__
+
+#include <linux/compiler_types.h>
+#include <linux/io.h>
+#include <linux/string.h>
+
+/**
+ * DOC: overview
+ *
+ * When accessing a memory region, depending on its location, users may have to
+ * access it with I/O operations or memory load/store operations. For example,
+ * copying to system memory could be done with memcpy(), copying to I/O memory
+ * would be done with memcpy_toio().
+ *
+ * .. code-block:: c
+ *
+ * void *vaddr = ...; // pointer to system memory
+ * memcpy(vaddr, src, len);
+ *
+ * void *vaddr_iomem = ...; // pointer to I/O memory
+ * memcpy_toio(vaddr_iomem, src, len);
+ *
+ * The user of such pointer may not have information about the mapping of that
+ * region or may want to have a single code path to handle operations on that
+ * buffer, regardless if it's located in system or IO memory. The type
+ * :c:type:`struct iosys_map <iosys_map>` and its helpers abstract that so the
+ * buffer can be passed around to other drivers or have separate duties inside
+ * the same driver for allocation, read and write operations.
+ *
+ * Open-coding access to :c:type:`struct iosys_map <iosys_map>` is considered
+ * bad style. Rather then accessing its fields directly, use one of the provided
+ * helper functions, or implement your own. For example, instances of
+ * :c:type:`struct iosys_map <iosys_map>` can be initialized statically with
+ * IOSYS_MAP_INIT_VADDR(), or at runtime with iosys_map_set_vaddr(). These
+ * helpers will set an address in system memory.
+ *
+ * .. code-block:: c
+ *
+ * struct iosys_map map = IOSYS_MAP_INIT_VADDR(0xdeadbeaf);
+ *
+ * iosys_map_set_vaddr(&map, 0xdeadbeaf);
+ *
+ * To set an address in I/O memory, use IOSYS_MAP_INIT_VADDR_IOMEM() or
+ * iosys_map_set_vaddr_iomem().
+ *
+ * .. code-block:: c
+ *
+ * struct iosys_map map = IOSYS_MAP_INIT_VADDR_IOMEM(0xdeadbeaf);
+ *
+ * iosys_map_set_vaddr_iomem(&map, 0xdeadbeaf);
+ *
+ * Instances of struct iosys_map do not have to be cleaned up, but
+ * can be cleared to NULL with iosys_map_clear(). Cleared mappings
+ * always refer to system memory.
+ *
+ * .. code-block:: c
+ *
+ * iosys_map_clear(&map);
+ *
+ * Test if a mapping is valid with either iosys_map_is_set() or
+ * iosys_map_is_null().
+ *
+ * .. code-block:: c
+ *
+ * if (iosys_map_is_set(&map) != iosys_map_is_null(&map))
+ * // always true
+ *
+ * Instances of :c:type:`struct iosys_map <iosys_map>` can be compared for
+ * equality with iosys_map_is_equal(). Mappings that point to different memory
+ * spaces, system or I/O, are never equal. That's even true if both spaces are
+ * located in the same address space, both mappings contain the same address
+ * value, or both mappings refer to NULL.
+ *
+ * .. code-block:: c
+ *
+ * struct iosys_map sys_map; // refers to system memory
+ * struct iosys_map io_map; // refers to I/O memory
+ *
+ * if (iosys_map_is_equal(&sys_map, &io_map))
+ * // always false
+ *
+ * A set up instance of struct iosys_map can be used to access or manipulate the
+ * buffer memory. Depending on the location of the memory, the provided helpers
+ * will pick the correct operations. Data can be copied into the memory with
+ * iosys_map_memcpy_to(). The address can be manipulated with iosys_map_incr().
+ *
+ * .. code-block:: c
+ *
+ * const void *src = ...; // source buffer
+ * size_t len = ...; // length of src
+ *
+ * iosys_map_memcpy_to(&map, src, len);
+ * iosys_map_incr(&map, len); // go to first byte after the memcpy
+ */
+
+/**
+ * struct iosys_map - Pointer to IO/system memory
+ * @vaddr_iomem: The buffer's address if in I/O memory
+ * @vaddr: The buffer's address if in system memory
+ * @is_iomem: True if the buffer is located in I/O memory, or false
+ * otherwise.
+ */
+struct iosys_map {
+ union {
+ void __iomem *vaddr_iomem;
+ void *vaddr;
+ };
+ bool is_iomem;
+};
+
+/**
+ * IOSYS_MAP_INIT_VADDR - Initializes struct iosys_map to an address in system memory
+ * @vaddr_: A system-memory address
+ */
+#define IOSYS_MAP_INIT_VADDR(vaddr_) \
+ { \
+ .vaddr = (vaddr_), \
+ .is_iomem = false, \
+ }
+
+/**
+ * IOSYS_MAP_INIT_VADDR_IOMEM - Initializes struct iosys_map to an address in I/O memory
+ * @vaddr_iomem_: An I/O-memory address
+ */
+#define IOSYS_MAP_INIT_VADDR_IOMEM(vaddr_iomem_) \
+ { \
+ .vaddr_iomem = (vaddr_iomem_), \
+ .is_iomem = true, \
+ }
+
+/**
+ * IOSYS_MAP_INIT_OFFSET - Initializes struct iosys_map from another iosys_map
+ * @map_: The dma-buf mapping structure to copy from
+ * @offset_: Offset to add to the other mapping
+ *
+ * Initializes a new iosys_map struct based on another passed as argument. It
+ * does a shallow copy of the struct so it's possible to update the back storage
+ * without changing where the original map points to. It is the equivalent of
+ * doing:
+ *
+ * .. code-block:: c
+ *
+ * iosys_map map = other_map;
+ * iosys_map_incr(&map, &offset);
+ *
+ * Example usage:
+ *
+ * .. code-block:: c
+ *
+ * void foo(struct device *dev, struct iosys_map *base_map)
+ * {
+ * ...
+ * struct iosys_map map = IOSYS_MAP_INIT_OFFSET(base_map, FIELD_OFFSET);
+ * ...
+ * }
+ *
+ * The advantage of using the initializer over just increasing the offset with
+ * iosys_map_incr() like above is that the new map will always point to the
+ * right place of the buffer during its scope. It reduces the risk of updating
+ * the wrong part of the buffer and having no compiler warning about that. If
+ * the assignment to IOSYS_MAP_INIT_OFFSET() is forgotten, the compiler can warn
+ * about the use of uninitialized variable.
+ */
+#define IOSYS_MAP_INIT_OFFSET(map_, offset_) ({ \
+ struct iosys_map copy = *map_; \
+ iosys_map_incr(&copy, offset_); \
+ copy; \
+})
+
+/**
+ * iosys_map_set_vaddr - Sets a iosys mapping structure to an address in system memory
+ * @map: The iosys_map structure
+ * @vaddr: A system-memory address
+ *
+ * Sets the address and clears the I/O-memory flag.
+ */
+static inline void iosys_map_set_vaddr(struct iosys_map *map, void *vaddr)
+{
+ map->vaddr = vaddr;
+ map->is_iomem = false;
+}
+
+/**
+ * iosys_map_set_vaddr_iomem - Sets a iosys mapping structure to an address in I/O memory
+ * @map: The iosys_map structure
+ * @vaddr_iomem: An I/O-memory address
+ *
+ * Sets the address and the I/O-memory flag.
+ */
+static inline void iosys_map_set_vaddr_iomem(struct iosys_map *map,
+ void __iomem *vaddr_iomem)
+{
+ map->vaddr_iomem = vaddr_iomem;
+ map->is_iomem = true;
+}
+
+/**
+ * iosys_map_is_equal - Compares two iosys mapping structures for equality
+ * @lhs: The iosys_map structure
+ * @rhs: A iosys_map structure to compare with
+ *
+ * Two iosys mapping structures are equal if they both refer to the same type of memory
+ * and to the same address within that memory.
+ *
+ * Returns:
+ * True is both structures are equal, or false otherwise.
+ */
+static inline bool iosys_map_is_equal(const struct iosys_map *lhs,
+ const struct iosys_map *rhs)
+{
+ if (lhs->is_iomem != rhs->is_iomem)
+ return false;
+ else if (lhs->is_iomem)
+ return lhs->vaddr_iomem == rhs->vaddr_iomem;
+ else
+ return lhs->vaddr == rhs->vaddr;
+}
+
+/**
+ * iosys_map_is_null - Tests for a iosys mapping to be NULL
+ * @map: The iosys_map structure
+ *
+ * Depending on the state of struct iosys_map.is_iomem, tests if the
+ * mapping is NULL.
+ *
+ * Returns:
+ * True if the mapping is NULL, or false otherwise.
+ */
+static inline bool iosys_map_is_null(const struct iosys_map *map)
+{
+ if (map->is_iomem)
+ return !map->vaddr_iomem;
+ return !map->vaddr;
+}
+
+/**
+ * iosys_map_is_set - Tests if the iosys mapping has been set
+ * @map: The iosys_map structure
+ *
+ * Depending on the state of struct iosys_map.is_iomem, tests if the
+ * mapping has been set.
+ *
+ * Returns:
+ * True if the mapping is been set, or false otherwise.
+ */
+static inline bool iosys_map_is_set(const struct iosys_map *map)
+{
+ return !iosys_map_is_null(map);
+}
+
+/**
+ * iosys_map_clear - Clears a iosys mapping structure
+ * @map: The iosys_map structure
+ *
+ * Clears all fields to zero, including struct iosys_map.is_iomem, so
+ * mapping structures that were set to point to I/O memory are reset for
+ * system memory. Pointers are cleared to NULL. This is the default.
+ */
+static inline void iosys_map_clear(struct iosys_map *map)
+{
+ if (map->is_iomem) {
+ map->vaddr_iomem = NULL;
+ map->is_iomem = false;
+ } else {
+ map->vaddr = NULL;
+ }
+}
+
+/**
+ * iosys_map_memcpy_to - Memcpy into offset of iosys_map
+ * @dst: The iosys_map structure
+ * @dst_offset: The offset from which to copy
+ * @src: The source buffer
+ * @len: The number of byte in src
+ *
+ * Copies data into a iosys_map with an offset. The source buffer is in
+ * system memory. Depending on the buffer's location, the helper picks the
+ * correct method of accessing the memory.
+ */
+static inline void iosys_map_memcpy_to(struct iosys_map *dst, size_t dst_offset,
+ const void *src, size_t len)
+{
+ if (dst->is_iomem)
+ memcpy_toio(dst->vaddr_iomem + dst_offset, src, len);
+ else
+ memcpy(dst->vaddr + dst_offset, src, len);
+}
+
+/**
+ * iosys_map_memcpy_from - Memcpy from iosys_map into system memory
+ * @dst: Destination in system memory
+ * @src: The iosys_map structure
+ * @src_offset: The offset from which to copy
+ * @len: The number of byte in src
+ *
+ * Copies data from a iosys_map with an offset. The dest buffer is in
+ * system memory. Depending on the mapping location, the helper picks the
+ * correct method of accessing the memory.
+ */
+static inline void iosys_map_memcpy_from(void *dst, const struct iosys_map *src,
+ size_t src_offset, size_t len)
+{
+ if (src->is_iomem)
+ memcpy_fromio(dst, src->vaddr_iomem + src_offset, len);
+ else
+ memcpy(dst, src->vaddr + src_offset, len);
+}
+
+/**
+ * iosys_map_incr - Increments the address stored in a iosys mapping
+ * @map: The iosys_map structure
+ * @incr: The number of bytes to increment
+ *
+ * Increments the address stored in a iosys mapping. Depending on the
+ * buffer's location, the correct value will be updated.
+ */
+static inline void iosys_map_incr(struct iosys_map *map, size_t incr)
+{
+ if (map->is_iomem)
+ map->vaddr_iomem += incr;
+ else
+ map->vaddr += incr;
+}
+
+/**
+ * iosys_map_memset - Memset iosys_map
+ * @dst: The iosys_map structure
+ * @offset: Offset from dst where to start setting value
+ * @value: The value to set
+ * @len: The number of bytes to set in dst
+ *
+ * Set value in iosys_map. Depending on the buffer's location, the helper
+ * picks the correct method of accessing the memory.
+ */
+static inline void iosys_map_memset(struct iosys_map *dst, size_t offset,
+ int value, size_t len)
+{
+ if (dst->is_iomem)
+ memset_io(dst->vaddr_iomem + offset, value, len);
+ else
+ memset(dst->vaddr + offset, value, len);
+}
+
+#ifdef CONFIG_64BIT
+#define __iosys_map_rd_io_u64_case(val_, vaddr_iomem_) \
+ u64: val_ = readq(vaddr_iomem_)
+#define __iosys_map_wr_io_u64_case(val_, vaddr_iomem_) \
+ u64: writeq(val_, vaddr_iomem_)
+#else
+#define __iosys_map_rd_io_u64_case(val_, vaddr_iomem_) \
+ u64: memcpy_fromio(&(val_), vaddr_iomem_, sizeof(u64))
+#define __iosys_map_wr_io_u64_case(val_, vaddr_iomem_) \
+ u64: memcpy_toio(vaddr_iomem_, &(val_), sizeof(u64))
+#endif
+
+#define __iosys_map_rd_io(val__, vaddr_iomem__, type__) _Generic(val__, \
+ u8: val__ = readb(vaddr_iomem__), \
+ u16: val__ = readw(vaddr_iomem__), \
+ u32: val__ = readl(vaddr_iomem__), \
+ __iosys_map_rd_io_u64_case(val__, vaddr_iomem__))
+
+#define __iosys_map_rd_sys(val__, vaddr__, type__) \
+ val__ = READ_ONCE(*(type__ *)(vaddr__))
+
+#define __iosys_map_wr_io(val__, vaddr_iomem__, type__) _Generic(val__, \
+ u8: writeb(val__, vaddr_iomem__), \
+ u16: writew(val__, vaddr_iomem__), \
+ u32: writel(val__, vaddr_iomem__), \
+ __iosys_map_wr_io_u64_case(val__, vaddr_iomem__))
+
+#define __iosys_map_wr_sys(val__, vaddr__, type__) \
+ WRITE_ONCE(*(type__ *)(vaddr__), val__)
+
+/**
+ * iosys_map_rd - Read a C-type value from the iosys_map
+ *
+ * @map__: The iosys_map structure
+ * @offset__: The offset from which to read
+ * @type__: Type of the value being read
+ *
+ * Read a C type value (u8, u16, u32 and u64) from iosys_map. For other types or
+ * if pointer may be unaligned (and problematic for the architecture supported),
+ * use iosys_map_memcpy_from().
+ *
+ * Returns:
+ * The value read from the mapping.
+ */
+#define iosys_map_rd(map__, offset__, type__) ({ \
+ type__ val; \
+ if ((map__)->is_iomem) { \
+ __iosys_map_rd_io(val, (map__)->vaddr_iomem + (offset__), type__);\
+ } else { \
+ __iosys_map_rd_sys(val, (map__)->vaddr + (offset__), type__); \
+ } \
+ val; \
+})
+
+/**
+ * iosys_map_wr - Write a C-type value to the iosys_map
+ *
+ * @map__: The iosys_map structure
+ * @offset__: The offset from the mapping to write to
+ * @type__: Type of the value being written
+ * @val__: Value to write
+ *
+ * Write a C type value (u8, u16, u32 and u64) to the iosys_map. For other types
+ * or if pointer may be unaligned (and problematic for the architecture
+ * supported), use iosys_map_memcpy_to()
+ */
+#define iosys_map_wr(map__, offset__, type__, val__) ({ \
+ type__ val = (val__); \
+ if ((map__)->is_iomem) { \
+ __iosys_map_wr_io(val, (map__)->vaddr_iomem + (offset__), type__);\
+ } else { \
+ __iosys_map_wr_sys(val, (map__)->vaddr + (offset__), type__); \
+ } \
+})
+
+/**
+ * iosys_map_rd_field - Read a member from a struct in the iosys_map
+ *
+ * @map__: The iosys_map structure
+ * @struct_offset__: Offset from the beggining of the map, where the struct
+ * is located
+ * @struct_type__: The struct describing the layout of the mapping
+ * @field__: Member of the struct to read
+ *
+ * Read a value from iosys_map considering its layout is described by a C struct
+ * starting at @struct_offset__. The field offset and size is calculated and its
+ * value read. If the field access would incur in un-aligned access, then either
+ * iosys_map_memcpy_from() needs to be used or the architecture must support it.
+ * For example: suppose there is a @struct foo defined as below and the value
+ * ``foo.field2.inner2`` needs to be read from the iosys_map:
+ *
+ * .. code-block:: c
+ *
+ * struct foo {
+ * int field1;
+ * struct {
+ * int inner1;
+ * int inner2;
+ * } field2;
+ * int field3;
+ * } __packed;
+ *
+ * This is the expected memory layout of a buffer using iosys_map_rd_field():
+ *
+ * +------------------------------+--------------------------+
+ * | Address | Content |
+ * +==============================+==========================+
+ * | buffer + 0000 | start of mmapped buffer |
+ * | | pointed by iosys_map |
+ * +------------------------------+--------------------------+
+ * | ... | ... |
+ * +------------------------------+--------------------------+
+ * | buffer + ``struct_offset__`` | start of ``struct foo`` |
+ * +------------------------------+--------------------------+
+ * | ... | ... |
+ * +------------------------------+--------------------------+
+ * | buffer + wwww | ``foo.field2.inner2`` |
+ * +------------------------------+--------------------------+
+ * | ... | ... |
+ * +------------------------------+--------------------------+
+ * | buffer + yyyy | end of ``struct foo`` |
+ * +------------------------------+--------------------------+
+ * | ... | ... |
+ * +------------------------------+--------------------------+
+ * | buffer + zzzz | end of mmaped buffer |
+ * +------------------------------+--------------------------+
+ *
+ * Values automatically calculated by this macro or not needed are denoted by
+ * wwww, yyyy and zzzz. This is the code to read that value:
+ *
+ * .. code-block:: c
+ *
+ * x = iosys_map_rd_field(&map, offset, struct foo, field2.inner2);
+ *
+ * Returns:
+ * The value read from the mapping.
+ */
+#define iosys_map_rd_field(map__, struct_offset__, struct_type__, field__) ({ \
+ struct_type__ *s; \
+ iosys_map_rd(map__, struct_offset__ + offsetof(struct_type__, field__), \
+ typeof(s->field__)); \
+})
+
+/**
+ * iosys_map_wr_field - Write to a member of a struct in the iosys_map
+ *
+ * @map__: The iosys_map structure
+ * @struct_offset__: Offset from the beggining of the map, where the struct
+ * is located
+ * @struct_type__: The struct describing the layout of the mapping
+ * @field__: Member of the struct to read
+ * @val__: Value to write
+ *
+ * Write a value to the iosys_map considering its layout is described by a C
+ * struct starting at @struct_offset__. The field offset and size is calculated
+ * and the @val__ is written. If the field access would incur in un-aligned
+ * access, then either iosys_map_memcpy_to() needs to be used or the
+ * architecture must support it. Refer to iosys_map_rd_field() for expected
+ * usage and memory layout.
+ */
+#define iosys_map_wr_field(map__, struct_offset__, struct_type__, field__, val__) ({ \
+ struct_type__ *s; \
+ iosys_map_wr(map__, struct_offset__ + offsetof(struct_type__, field__), \
+ typeof(s->field__), val__); \
+})
+
+#endif /* __IOSYS_MAP_H__ */
diff --git a/include/linux/iova.h b/include/linux/iova.h
index e0a892ae45c0..83c00fac2acb 100644
--- a/include/linux/iova.h
+++ b/include/linux/iova.h
@@ -1,11 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2006, Intel Corporation.
*
- * This file is released under the GPLv2.
- *
* Copyright (C) 2006-2008 Intel Corporation
* Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
- *
*/
#ifndef _IOVA_H_
@@ -23,28 +21,23 @@ struct iova {
unsigned long pfn_lo; /* Lowest allocated pfn */
};
-struct iova_magazine;
-struct iova_cpu_rcache;
-
-#define IOVA_RANGE_CACHE_MAX_SIZE 6 /* log of max cached IOVA range size (in pages) */
-#define MAX_GLOBAL_MAGS 32 /* magazines per bin */
-struct iova_rcache {
- spinlock_t lock;
- unsigned long depot_size;
- struct iova_magazine *depot[MAX_GLOBAL_MAGS];
- struct iova_cpu_rcache __percpu *cpu_rcaches;
-};
+struct iova_rcache;
/* holds all the iova translations for a domain */
struct iova_domain {
spinlock_t iova_rbtree_lock; /* Lock to protect update of rbtree */
struct rb_root rbroot; /* iova domain rbtree root */
- struct rb_node *cached32_node; /* Save last alloced node */
+ struct rb_node *cached_node; /* Save last alloced node */
+ struct rb_node *cached32_node; /* Save last 32-bit alloced node */
unsigned long granule; /* pfn granularity for this domain */
unsigned long start_pfn; /* Lower limit for this domain */
unsigned long dma_32bit_pfn;
- struct iova_rcache rcaches[IOVA_RANGE_CACHE_MAX_SIZE]; /* IOVA range caches */
+ unsigned long max32_alloc_size; /* Size of last failed allocation */
+ struct iova anchor; /* rbtree lookup anchor */
+
+ struct iova_rcache *rcaches;
+ struct hlist_node cpuhp_dead;
};
static inline unsigned long iova_size(struct iova *iova)
@@ -82,12 +75,12 @@ static inline unsigned long iova_pfn(struct iova_domain *iovad, dma_addr_t iova)
return iova >> iova_shift(iovad);
}
-#if IS_ENABLED(CONFIG_IOMMU_IOVA)
+#if IS_REACHABLE(CONFIG_IOMMU_IOVA)
int iova_cache_get(void);
void iova_cache_put(void);
-struct iova *alloc_iova_mem(void);
-void free_iova_mem(struct iova *iova);
+unsigned long iova_rcache_range(void);
+
void free_iova(struct iova_domain *iovad, unsigned long pfn);
void __free_iova(struct iova_domain *iovad, struct iova *iova);
struct iova *alloc_iova(struct iova_domain *iovad, unsigned long size,
@@ -96,17 +89,14 @@ struct iova *alloc_iova(struct iova_domain *iovad, unsigned long size,
void free_iova_fast(struct iova_domain *iovad, unsigned long pfn,
unsigned long size);
unsigned long alloc_iova_fast(struct iova_domain *iovad, unsigned long size,
- unsigned long limit_pfn);
+ unsigned long limit_pfn, bool flush_rcache);
struct iova *reserve_iova(struct iova_domain *iovad, unsigned long pfn_lo,
unsigned long pfn_hi);
-void copy_reserved_iova(struct iova_domain *from, struct iova_domain *to);
void init_iova_domain(struct iova_domain *iovad, unsigned long granule,
- unsigned long start_pfn, unsigned long pfn_32bit);
+ unsigned long start_pfn);
+int iova_domain_init_rcaches(struct iova_domain *iovad);
struct iova *find_iova(struct iova_domain *iovad, unsigned long pfn);
void put_iova_domain(struct iova_domain *iovad);
-struct iova *split_and_remove_iova(struct iova_domain *iovad,
- struct iova *iova, unsigned long pfn_lo, unsigned long pfn_hi);
-void free_cpu_cached_iovas(unsigned int cpu, struct iova_domain *iovad);
#else
static inline int iova_cache_get(void)
{
@@ -117,15 +107,6 @@ static inline void iova_cache_put(void)
{
}
-static inline struct iova *alloc_iova_mem(void)
-{
- return NULL;
-}
-
-static inline void free_iova_mem(struct iova *iova)
-{
-}
-
static inline void free_iova(struct iova_domain *iovad, unsigned long pfn)
{
}
@@ -150,7 +131,8 @@ static inline void free_iova_fast(struct iova_domain *iovad,
static inline unsigned long alloc_iova_fast(struct iova_domain *iovad,
unsigned long size,
- unsigned long limit_pfn)
+ unsigned long limit_pfn,
+ bool flush_rcache)
{
return 0;
}
@@ -162,15 +144,9 @@ static inline struct iova *reserve_iova(struct iova_domain *iovad,
return NULL;
}
-static inline void copy_reserved_iova(struct iova_domain *from,
- struct iova_domain *to)
-{
-}
-
static inline void init_iova_domain(struct iova_domain *iovad,
unsigned long granule,
- unsigned long start_pfn,
- unsigned long pfn_32bit)
+ unsigned long start_pfn)
{
}
@@ -184,18 +160,6 @@ static inline void put_iova_domain(struct iova_domain *iovad)
{
}
-static inline struct iova *split_and_remove_iova(struct iova_domain *iovad,
- struct iova *iova,
- unsigned long pfn_lo,
- unsigned long pfn_hi)
-{
- return NULL;
-}
-
-static inline void free_cpu_cached_iovas(unsigned int cpu,
- struct iova_domain *iovad)
-{
-}
#endif
#endif
diff --git a/include/linux/iova_bitmap.h b/include/linux/iova_bitmap.h
new file mode 100644
index 000000000000..c006cf0a25f3
--- /dev/null
+++ b/include/linux/iova_bitmap.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2022, Oracle and/or its affiliates.
+ * Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved
+ */
+#ifndef _IOVA_BITMAP_H_
+#define _IOVA_BITMAP_H_
+
+#include <linux/types.h>
+
+struct iova_bitmap;
+
+typedef int (*iova_bitmap_fn_t)(struct iova_bitmap *bitmap,
+ unsigned long iova, size_t length,
+ void *opaque);
+
+struct iova_bitmap *iova_bitmap_alloc(unsigned long iova, size_t length,
+ unsigned long page_size,
+ u64 __user *data);
+void iova_bitmap_free(struct iova_bitmap *bitmap);
+int iova_bitmap_for_each(struct iova_bitmap *bitmap, void *opaque,
+ iova_bitmap_fn_t fn);
+void iova_bitmap_set(struct iova_bitmap *bitmap,
+ unsigned long iova, size_t length);
+
+#endif
diff --git a/include/linux/ip.h b/include/linux/ip.h
index 492bc6513533..d11c25f5030a 100644
--- a/include/linux/ip.h
+++ b/include/linux/ip.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* INET An implementation of the TCP/IP protocol suite for the LINUX
* operating system. INET is implemented using the BSD Socket
@@ -8,11 +9,6 @@
* Version: @(#)ip.h 1.0.2 04/28/93
*
* Authors: Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
*/
#ifndef _LINUX_IP_H
#define _LINUX_IP_H
@@ -34,4 +30,30 @@ static inline struct iphdr *ipip_hdr(const struct sk_buff *skb)
{
return (struct iphdr *)skb_transport_header(skb);
}
+
+static inline unsigned int ip_transport_len(const struct sk_buff *skb)
+{
+ return ntohs(ip_hdr(skb)->tot_len) - skb_network_header_len(skb);
+}
+
+static inline unsigned int iph_totlen(const struct sk_buff *skb, const struct iphdr *iph)
+{
+ u32 len = ntohs(iph->tot_len);
+
+ return (len || !skb_is_gso(skb) || !skb_is_gso_tcp(skb)) ?
+ len : skb->len - skb_network_offset(skb);
+}
+
+static inline unsigned int skb_ip_totlen(const struct sk_buff *skb)
+{
+ return iph_totlen(skb, ip_hdr(skb));
+}
+
+/* IPv4 datagram length is stored into 16bit field (tot_len) */
+#define IP_MAX_MTU 0xFFFFU
+
+static inline void iph_set_totlen(struct iphdr *iph, unsigned int len)
+{
+ iph->tot_len = len <= IP_MAX_MTU ? htons(len) : 0;
+}
#endif /* _LINUX_IP_H */
diff --git a/include/linux/ipack.h b/include/linux/ipack.h
index 8bddc3fbdddf..2c6936b8371f 100644
--- a/include/linux/ipack.h
+++ b/include/linux/ipack.h
@@ -1,12 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Industry-pack bus.
*
* Copyright (C) 2011-2012 CERN (www.cern.ch)
* Author: Samuel Iglesias Gonsalvez <siglesias@igalia.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the Free
- * Software Foundation; version 2 of the License.
*/
#include <linux/mod_devicetable.h>
diff --git a/include/linux/ipc.h b/include/linux/ipc.h
index 5591f055e13f..e1c9eea6015b 100644
--- a/include/linux/ipc.h
+++ b/include/linux/ipc.h
@@ -1,11 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_IPC_H
#define _LINUX_IPC_H
#include <linux/spinlock.h>
#include <linux/uidgid.h>
+#include <linux/rhashtable-types.h>
#include <uapi/linux/ipc.h>
-
-#define IPCMNI 32768 /* <= MAX_INT limit for ipc arrays (including sysctl changes) */
+#include <linux/refcount.h>
/* used by in-kernel data structures */
struct kern_ipc_perm {
@@ -21,8 +22,10 @@ struct kern_ipc_perm {
unsigned long seq;
void *security;
+ struct rhash_head khtnode;
+
struct rcu_head rcu;
- atomic_t refcount;
-} ____cacheline_aligned_in_smp;
+ refcount_t refcount;
+} ____cacheline_aligned_in_smp __randomize_layout;
#endif /* _LINUX_IPC_H */
diff --git a/include/linux/ipc_namespace.h b/include/linux/ipc_namespace.h
index 848e5796400e..e8240cf2611a 100644
--- a/include/linux/ipc_namespace.h
+++ b/include/linux/ipc_namespace.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __IPC_NAMESPACE_H__
#define __IPC_NAMESPACE_H__
@@ -7,6 +8,10 @@
#include <linux/notifier.h>
#include <linux/nsproxy.h>
#include <linux/ns_common.h>
+#include <linux/refcount.h>
+#include <linux/rhashtable-types.h>
+#include <linux/sysctl.h>
+#include <linux/percpu_counter.h>
struct user_namespace;
@@ -15,11 +20,15 @@ struct ipc_ids {
unsigned short seq;
struct rw_semaphore rwsem;
struct idr ipcs_idr;
+ int max_idx;
+ int last_idx; /* For wrap around detection */
+#ifdef CONFIG_CHECKPOINT_RESTORE
int next_id;
+#endif
+ struct rhashtable key_ht;
};
struct ipc_namespace {
- atomic_t count;
struct ipc_ids ids[3];
int sem_ctls[4];
@@ -28,8 +37,8 @@ struct ipc_namespace {
unsigned int msg_ctlmax;
unsigned int msg_ctlmnb;
unsigned int msg_ctlmni;
- atomic_t msg_bytes;
- atomic_t msg_hdrs;
+ struct percpu_counter percpu_msg_bytes;
+ struct percpu_counter percpu_msg_hdrs;
size_t shm_ctlmax;
size_t shm_ctlall;
@@ -56,12 +65,20 @@ struct ipc_namespace {
unsigned int mq_msg_default;
unsigned int mq_msgsize_default;
+ struct ctl_table_set mq_set;
+ struct ctl_table_header *mq_sysctls;
+
+ struct ctl_table_set ipc_set;
+ struct ctl_table_header *ipc_sysctls;
+
/* user_ns which owns the ipc ns */
struct user_namespace *user_ns;
struct ucounts *ucounts;
+ struct llist_node mnt_llist;
+
struct ns_common ns;
-};
+} __randomize_layout;
extern struct ipc_namespace init_ipc_ns;
extern spinlock_t mq_lock;
@@ -118,10 +135,20 @@ extern struct ipc_namespace *copy_ipcs(unsigned long flags,
static inline struct ipc_namespace *get_ipc_ns(struct ipc_namespace *ns)
{
if (ns)
- atomic_inc(&ns->count);
+ refcount_inc(&ns->ns.count);
return ns;
}
+static inline struct ipc_namespace *get_ipc_ns_not_zero(struct ipc_namespace *ns)
+{
+ if (ns) {
+ if (refcount_inc_not_zero(&ns->ns.count))
+ return ns;
+ }
+
+ return NULL;
+}
+
extern void put_ipc_ns(struct ipc_namespace *ns);
#else
static inline struct ipc_namespace *copy_ipcs(unsigned long flags,
@@ -138,6 +165,11 @@ static inline struct ipc_namespace *get_ipc_ns(struct ipc_namespace *ns)
return ns;
}
+static inline struct ipc_namespace *get_ipc_ns_not_zero(struct ipc_namespace *ns)
+{
+ return ns;
+}
+
static inline void put_ipc_ns(struct ipc_namespace *ns)
{
}
@@ -145,15 +177,37 @@ static inline void put_ipc_ns(struct ipc_namespace *ns)
#ifdef CONFIG_POSIX_MQUEUE_SYSCTL
-struct ctl_table_header;
-extern struct ctl_table_header *mq_register_sysctl_table(void);
+void retire_mq_sysctls(struct ipc_namespace *ns);
+bool setup_mq_sysctls(struct ipc_namespace *ns);
#else /* CONFIG_POSIX_MQUEUE_SYSCTL */
-static inline struct ctl_table_header *mq_register_sysctl_table(void)
+static inline void retire_mq_sysctls(struct ipc_namespace *ns)
{
- return NULL;
+}
+
+static inline bool setup_mq_sysctls(struct ipc_namespace *ns)
+{
+ return true;
}
#endif /* CONFIG_POSIX_MQUEUE_SYSCTL */
+
+#ifdef CONFIG_SYSVIPC_SYSCTL
+
+bool setup_ipc_sysctls(struct ipc_namespace *ns);
+void retire_ipc_sysctls(struct ipc_namespace *ns);
+
+#else /* CONFIG_SYSVIPC_SYSCTL */
+
+static inline void retire_ipc_sysctls(struct ipc_namespace *ns)
+{
+}
+
+static inline bool setup_ipc_sysctls(struct ipc_namespace *ns)
+{
+ return true;
+}
+
+#endif /* CONFIG_SYSVIPC_SYSCTL */
#endif
diff --git a/include/linux/ipmi-fru.h b/include/linux/ipmi-fru.h
deleted file mode 100644
index 4d3a76380e32..000000000000
--- a/include/linux/ipmi-fru.h
+++ /dev/null
@@ -1,135 +0,0 @@
-/*
- * Copyright (C) 2012 CERN (www.cern.ch)
- * Author: Alessandro Rubini <rubini@gnudd.com>
- *
- * Released according to the GNU GPL, version 2 or any later version.
- *
- * This work is part of the White Rabbit project, a research effort led
- * by CERN, the European Institute for Nuclear Research.
- */
-#ifndef __LINUX_IPMI_FRU_H__
-#define __LINUX_IPMI_FRU_H__
-#ifdef __KERNEL__
-# include <linux/types.h>
-# include <linux/string.h>
-#else
-# include <stdint.h>
-# include <string.h>
-#endif
-
-/*
- * These structures match the unaligned crap we have in FRU1011.pdf
- * (http://download.intel.com/design/servers/ipmi/FRU1011.pdf)
- */
-
-/* chapter 8, page 5 */
-struct fru_common_header {
- uint8_t format; /* 0x01 */
- uint8_t internal_use_off; /* multiple of 8 bytes */
- uint8_t chassis_info_off; /* multiple of 8 bytes */
- uint8_t board_area_off; /* multiple of 8 bytes */
- uint8_t product_area_off; /* multiple of 8 bytes */
- uint8_t multirecord_off; /* multiple of 8 bytes */
- uint8_t pad; /* must be 0 */
- uint8_t checksum; /* sum modulo 256 must be 0 */
-};
-
-/* chapter 9, page 5 -- internal_use: not used by us */
-
-/* chapter 10, page 6 -- chassis info: not used by us */
-
-/* chapter 13, page 9 -- used by board_info_area below */
-struct fru_type_length {
- uint8_t type_length;
- uint8_t data[0];
-};
-
-/* chapter 11, page 7 */
-struct fru_board_info_area {
- uint8_t format; /* 0x01 */
- uint8_t area_len; /* multiple of 8 bytes */
- uint8_t language; /* I hope it's 0 */
- uint8_t mfg_date[3]; /* LSB, minutes since 1996-01-01 */
- struct fru_type_length tl[0]; /* type-length stuff follows */
-
- /*
- * the TL there are in order:
- * Board Manufacturer
- * Board Product Name
- * Board Serial Number
- * Board Part Number
- * FRU File ID (may be null)
- * more manufacturer-specific stuff
- * 0xc1 as a terminator
- * 0x00 pad to a multiple of 8 bytes - 1
- * checksum (sum of all stuff module 256 must be zero)
- */
-};
-
-enum fru_type {
- FRU_TYPE_BINARY = 0x00,
- FRU_TYPE_BCDPLUS = 0x40,
- FRU_TYPE_ASCII6 = 0x80,
- FRU_TYPE_ASCII = 0xc0, /* not ascii: depends on language */
-};
-
-/*
- * some helpers
- */
-static inline struct fru_board_info_area *fru_get_board_area(
- const struct fru_common_header *header)
-{
- /* we know for sure that the header is 8 bytes in size */
- return (struct fru_board_info_area *)(header + header->board_area_off);
-}
-
-static inline int fru_type(struct fru_type_length *tl)
-{
- return tl->type_length & 0xc0;
-}
-
-static inline int fru_length(struct fru_type_length *tl)
-{
- return (tl->type_length & 0x3f) + 1; /* len of whole record */
-}
-
-/* assume ascii-latin1 encoding */
-static inline int fru_strlen(struct fru_type_length *tl)
-{
- return fru_length(tl) - 1;
-}
-
-static inline char *fru_strcpy(char *dest, struct fru_type_length *tl)
-{
- int len = fru_strlen(tl);
- memcpy(dest, tl->data, len);
- dest[len] = '\0';
- return dest;
-}
-
-static inline struct fru_type_length *fru_next_tl(struct fru_type_length *tl)
-{
- return tl + fru_length(tl);
-}
-
-static inline int fru_is_eof(struct fru_type_length *tl)
-{
- return tl->type_length == 0xc1;
-}
-
-/*
- * External functions defined in fru-parse.c.
- */
-extern int fru_header_cksum_ok(struct fru_common_header *header);
-extern int fru_bia_cksum_ok(struct fru_board_info_area *bia);
-
-/* All these 4 return allocated strings by calling fru_alloc() */
-extern char *fru_get_board_manufacturer(struct fru_common_header *header);
-extern char *fru_get_product_name(struct fru_common_header *header);
-extern char *fru_get_serial_number(struct fru_common_header *header);
-extern char *fru_get_part_number(struct fru_common_header *header);
-
-/* This must be defined by the caller of the above functions */
-extern void *fru_alloc(size_t size);
-
-#endif /* __LINUX_IMPI_FRU_H__ */
diff --git a/include/linux/ipmi.h b/include/linux/ipmi.h
index f1045b2c6a00..a1c9c0d48ebf 100644
--- a/include/linux/ipmi.h
+++ b/include/linux/ipmi.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* ipmi.h
*
@@ -9,26 +10,6 @@
*
* Copyright 2002 MontaVista Software Inc.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- *
- * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
- * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
- * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
- * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
- * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
- * USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#ifndef __LINUX_IPMI_H
#define __LINUX_IPMI_H
@@ -42,9 +23,11 @@
struct module;
struct device;
-/* Opaque type for a IPMI message user. One of these is needed to
- send and receive messages. */
-typedef struct ipmi_user *ipmi_user_t;
+/*
+ * Opaque type for a IPMI message user. One of these is needed to
+ * send and receive messages.
+ */
+struct ipmi_user;
/*
* Stuff coming from the receive interface comes as one of these.
@@ -56,83 +39,119 @@ typedef struct ipmi_user *ipmi_user_t;
struct ipmi_recv_msg {
struct list_head link;
- /* The type of message as defined in the "Receive Types"
- defines above. */
+ /*
+ * The type of message as defined in the "Receive Types"
+ * defines above.
+ */
int recv_type;
- ipmi_user_t user;
+ struct ipmi_user *user;
struct ipmi_addr addr;
long msgid;
struct kernel_ipmi_msg msg;
- /* The user_msg_data is the data supplied when a message was
- sent, if this is a response to a sent message. If this is
- not a response to a sent message, then user_msg_data will
- be NULL. If the user above is NULL, then this will be the
- intf. */
+ /*
+ * The user_msg_data is the data supplied when a message was
+ * sent, if this is a response to a sent message. If this is
+ * not a response to a sent message, then user_msg_data will
+ * be NULL. If the user above is NULL, then this will be the
+ * intf.
+ */
void *user_msg_data;
- /* Call this when done with the message. It will presumably free
- the message and do any other necessary cleanup. */
+ /*
+ * Call this when done with the message. It will presumably free
+ * the message and do any other necessary cleanup.
+ */
void (*done)(struct ipmi_recv_msg *msg);
- /* Place-holder for the data, don't make any assumptions about
- the size or existence of this, since it may change. */
+ /*
+ * Place-holder for the data, don't make any assumptions about
+ * the size or existence of this, since it may change.
+ */
unsigned char msg_data[IPMI_MAX_MSG_LENGTH];
};
+#define INIT_IPMI_RECV_MSG(done_handler) \
+{ \
+ .done = done_handler \
+}
+
/* Allocate and free the receive message. */
void ipmi_free_recv_msg(struct ipmi_recv_msg *msg);
struct ipmi_user_hndl {
- /* Routine type to call when a message needs to be routed to
- the upper layer. This will be called with some locks held,
- the only IPMI routines that can be called are ipmi_request
- and the alloc/free operations. The handler_data is the
- variable supplied when the receive handler was registered. */
+ /*
+ * Routine type to call when a message needs to be routed to
+ * the upper layer. This will be called with some locks held,
+ * the only IPMI routines that can be called are ipmi_request
+ * and the alloc/free operations. The handler_data is the
+ * variable supplied when the receive handler was registered.
+ */
void (*ipmi_recv_hndl)(struct ipmi_recv_msg *msg,
void *user_msg_data);
- /* Called when the interface detects a watchdog pre-timeout. If
- this is NULL, it will be ignored for the user. */
+ /*
+ * Called when the interface detects a watchdog pre-timeout. If
+ * this is NULL, it will be ignored for the user.
+ */
void (*ipmi_watchdog_pretimeout)(void *handler_data);
+
+ /*
+ * If not NULL, called at panic time after the interface has
+ * been set up to handle run to completion.
+ */
+ void (*ipmi_panic_handler)(void *handler_data);
+
+ /*
+ * Called when the interface has been removed. After this returns
+ * the user handle will be invalid. The interface may or may
+ * not be usable when this is called, but it will return errors
+ * if it is not usable.
+ */
+ void (*shutdown)(void *handler_data);
};
/* Create a new user of the IPMI layer on the given interface number. */
int ipmi_create_user(unsigned int if_num,
const struct ipmi_user_hndl *handler,
void *handler_data,
- ipmi_user_t *user);
+ struct ipmi_user **user);
-/* Destroy the given user of the IPMI layer. Note that after this
- function returns, the system is guaranteed to not call any
- callbacks for the user. Thus as long as you destroy all the users
- before you unload a module, you will be safe. And if you destroy
- the users before you destroy the callback structures, it should be
- safe, too. */
-int ipmi_destroy_user(ipmi_user_t user);
+/*
+ * Destroy the given user of the IPMI layer. Note that after this
+ * function returns, the system is guaranteed to not call any
+ * callbacks for the user. Thus as long as you destroy all the users
+ * before you unload a module, you will be safe. And if you destroy
+ * the users before you destroy the callback structures, it should be
+ * safe, too.
+ */
+int ipmi_destroy_user(struct ipmi_user *user);
/* Get the IPMI version of the BMC we are talking to. */
-void ipmi_get_version(ipmi_user_t user,
- unsigned char *major,
- unsigned char *minor);
-
-/* Set and get the slave address and LUN that we will use for our
- source messages. Note that this affects the interface, not just
- this user, so it will affect all users of this interface. This is
- so some initialization code can come in and do the OEM-specific
- things it takes to determine your address (if not the BMC) and set
- it for everyone else. Note that each channel can have its own address. */
-int ipmi_set_my_address(ipmi_user_t user,
+int ipmi_get_version(struct ipmi_user *user,
+ unsigned char *major,
+ unsigned char *minor);
+
+/*
+ * Set and get the slave address and LUN that we will use for our
+ * source messages. Note that this affects the interface, not just
+ * this user, so it will affect all users of this interface. This is
+ * so some initialization code can come in and do the OEM-specific
+ * things it takes to determine your address (if not the BMC) and set
+ * it for everyone else. Note that each channel can have its own
+ * address.
+ */
+int ipmi_set_my_address(struct ipmi_user *user,
unsigned int channel,
unsigned char address);
-int ipmi_get_my_address(ipmi_user_t user,
+int ipmi_get_my_address(struct ipmi_user *user,
unsigned int channel,
unsigned char *address);
-int ipmi_set_my_LUN(ipmi_user_t user,
+int ipmi_set_my_LUN(struct ipmi_user *user,
unsigned int channel,
unsigned char LUN);
-int ipmi_get_my_LUN(ipmi_user_t user,
+int ipmi_get_my_LUN(struct ipmi_user *user,
unsigned int channel,
unsigned char *LUN);
@@ -149,7 +168,7 @@ int ipmi_get_my_LUN(ipmi_user_t user,
* it makes no sense to do it here. However, this can be used if you
* have unusual requirements.
*/
-int ipmi_request_settime(ipmi_user_t user,
+int ipmi_request_settime(struct ipmi_user *user,
struct ipmi_addr *addr,
long msgid,
struct kernel_ipmi_msg *msg,
@@ -167,7 +186,7 @@ int ipmi_request_settime(ipmi_user_t user,
* change as the system changes, so don't use it unless you REALLY
* have to.
*/
-int ipmi_request_supply_msgs(ipmi_user_t user,
+int ipmi_request_supply_msgs(struct ipmi_user *user,
struct ipmi_addr *addr,
long msgid,
struct kernel_ipmi_msg *msg,
@@ -183,7 +202,7 @@ int ipmi_request_supply_msgs(ipmi_user_t user,
* way. This is useful if you need to spin waiting for something to
* happen in the IPMI driver.
*/
-void ipmi_poll_interface(ipmi_user_t user);
+void ipmi_poll_interface(struct ipmi_user *user);
/*
* When commands come in to the SMS, the user can register to receive
@@ -194,11 +213,11 @@ void ipmi_poll_interface(ipmi_user_t user);
* error. Channels are specified as a bitfield, use IPMI_CHAN_ALL to
* mean all channels.
*/
-int ipmi_register_for_cmd(ipmi_user_t user,
+int ipmi_register_for_cmd(struct ipmi_user *user,
unsigned char netfn,
unsigned char cmd,
unsigned int chans);
-int ipmi_unregister_for_cmd(ipmi_user_t user,
+int ipmi_unregister_for_cmd(struct ipmi_user *user,
unsigned char netfn,
unsigned char cmd,
unsigned int chans);
@@ -229,8 +248,8 @@ int ipmi_unregister_for_cmd(ipmi_user_t user,
*
* See the IPMI_MAINTENANCE_MODE_xxx defines for what the mode means.
*/
-int ipmi_get_maintenance_mode(ipmi_user_t user);
-int ipmi_set_maintenance_mode(ipmi_user_t user, int mode);
+int ipmi_get_maintenance_mode(struct ipmi_user *user);
+int ipmi_set_maintenance_mode(struct ipmi_user *user, int mode);
/*
* When the user is created, it will not receive IPMI events by
@@ -238,7 +257,7 @@ int ipmi_set_maintenance_mode(ipmi_user_t user, int mode);
* The first user that sets this to TRUE will receive all events that
* have been queued while no one was waiting for events.
*/
-int ipmi_set_gets_events(ipmi_user_t user, bool val);
+int ipmi_set_gets_events(struct ipmi_user *user, bool val);
/*
* Called when a new SMI is registered. This will also be called on
@@ -248,14 +267,18 @@ int ipmi_set_gets_events(ipmi_user_t user, bool val);
struct ipmi_smi_watcher {
struct list_head link;
- /* You must set the owner to the current module, if you are in
- a module (generally just set it to "THIS_MODULE"). */
+ /*
+ * You must set the owner to the current module, if you are in
+ * a module (generally just set it to "THIS_MODULE").
+ */
struct module *owner;
- /* These two are called with read locks held for the interface
- the watcher list. So you can add and remove users from the
- IPMI interface, send messages, etc., but you cannot add
- or remove SMI watchers or SMI interfaces. */
+ /*
+ * These two are called with read locks held for the interface
+ * the watcher list. So you can add and remove users from the
+ * IPMI interface, send messages, etc., but you cannot add
+ * or remove SMI watchers or SMI interfaces.
+ */
void (*new_smi)(int if_num, struct device *dev);
void (*smi_gone)(int if_num);
};
@@ -263,8 +286,10 @@ struct ipmi_smi_watcher {
int ipmi_smi_watcher_register(struct ipmi_smi_watcher *watcher);
int ipmi_smi_watcher_unregister(struct ipmi_smi_watcher *watcher);
-/* The following are various helper functions for dealing with IPMI
- addresses. */
+/*
+ * The following are various helper functions for dealing with IPMI
+ * addresses.
+ */
/* Return the maximum length of an IPMI address given it's type. */
unsigned int ipmi_addr_length(int addr_type);
@@ -277,7 +302,7 @@ int ipmi_validate_addr(struct ipmi_addr *addr, int len);
*/
enum ipmi_addr_src {
SI_INVALID = 0, SI_HOTMOD, SI_HARDCODED, SI_SPMI, SI_ACPI, SI_SMBIOS,
- SI_PCI, SI_DEVICETREE, SI_LAST
+ SI_PCI, SI_DEVICETREE, SI_PLATFORM, SI_LAST
};
const char *ipmi_addr_src_to_str(enum ipmi_addr_src src);
@@ -310,7 +335,12 @@ struct ipmi_smi_info {
union ipmi_smi_info_union addr_info;
};
-/* This is to get the private info of ipmi_smi_t */
+/* This is to get the private info of struct ipmi_smi */
extern int ipmi_get_smi_info(int if_num, struct ipmi_smi_info *data);
+#define GET_DEVICE_ID_MAX_RETRY 5
+
+/* Helper function for computing the IPMB checksum of some data. */
+unsigned char ipmb_checksum(unsigned char *data, int size);
+
#endif /* __LINUX_IPMI_H */
diff --git a/include/linux/ipmi_smi.h b/include/linux/ipmi_smi.h
index f8cea14485dd..5d69820d8b02 100644
--- a/include/linux/ipmi_smi.h
+++ b/include/linux/ipmi_smi.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* ipmi_smi.h
*
@@ -9,26 +10,6 @@
*
* Copyright 2002 MontaVista Software Inc.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- *
- * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
- * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
- * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
- * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
- * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
- * USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#ifndef __LINUX_IPMI_SMI_H
@@ -41,11 +22,74 @@
struct device;
-/* This files describes the interface for IPMI system management interface
- drivers to bind into the IPMI message handler. */
+/*
+ * This files describes the interface for IPMI system management interface
+ * drivers to bind into the IPMI message handler.
+ */
/* Structure for the low-level drivers. */
-typedef struct ipmi_smi *ipmi_smi_t;
+struct ipmi_smi;
+
+/*
+ * Flags for set_check_watch() below. Tells if the SMI should be
+ * waiting for watchdog timeouts, commands and/or messages.
+ */
+#define IPMI_WATCH_MASK_CHECK_MESSAGES (1 << 0)
+#define IPMI_WATCH_MASK_CHECK_WATCHDOG (1 << 1)
+#define IPMI_WATCH_MASK_CHECK_COMMANDS (1 << 2)
+
+/*
+ * SMI messages
+ *
+ * When communicating with an SMI, messages come in two formats:
+ *
+ * * Normal (to a BMC over a BMC interface)
+ *
+ * * IPMB (over a IPMB to another MC)
+ *
+ * When normal, commands are sent using the format defined by a
+ * standard message over KCS (NetFn must be even):
+ *
+ * +-----------+-----+------+
+ * | NetFn/LUN | Cmd | Data |
+ * +-----------+-----+------+
+ *
+ * And responses, similarly, with an completion code added (NetFn must
+ * be odd):
+ *
+ * +-----------+-----+------+------+
+ * | NetFn/LUN | Cmd | CC | Data |
+ * +-----------+-----+------+------+
+ *
+ * With normal messages, only commands are sent and only responses are
+ * received.
+ *
+ * In IPMB mode, we are acting as an IPMB device. Commands will be in
+ * the following format (NetFn must be even):
+ *
+ * +-------------+------+-------------+-----+------+
+ * | NetFn/rsLUN | Addr | rqSeq/rqLUN | Cmd | Data |
+ * +-------------+------+-------------+-----+------+
+ *
+ * Responses will using the following format:
+ *
+ * +-------------+------+-------------+-----+------+------+
+ * | NetFn/rqLUN | Addr | rqSeq/rsLUN | Cmd | CC | Data |
+ * +-------------+------+-------------+-----+------+------+
+ *
+ * This is similar to the format defined in the IPMB manual section
+ * 2.11.1 with the checksums and the first address removed. Also, the
+ * address is always the remote address.
+ *
+ * IPMB messages can be commands and responses in both directions.
+ * Received commands are handled as received commands from the message
+ * queue.
+ */
+
+enum ipmi_smi_msg_type {
+ IPMI_SMI_MSG_TYPE_NORMAL = 0,
+ IPMI_SMI_MSG_TYPE_IPMB_DIRECT
+};
/*
* Messages to/from the lower layer. The smi interface will take one
@@ -63,6 +107,8 @@ typedef struct ipmi_smi *ipmi_smi_t;
struct ipmi_smi_msg {
struct list_head link;
+ enum ipmi_smi_msg_type type;
+
long msgid;
void *user_data;
@@ -72,20 +118,40 @@ struct ipmi_smi_msg {
int rsp_size;
unsigned char rsp[IPMI_MAX_MSG_LENGTH];
- /* Will be called when the system is done with the message
- (presumably to free it). */
+ /*
+ * Will be called when the system is done with the message
+ * (presumably to free it).
+ */
void (*done)(struct ipmi_smi_msg *msg);
};
+#define INIT_IPMI_SMI_MSG(done_handler) \
+{ \
+ .done = done_handler, \
+ .type = IPMI_SMI_MSG_TYPE_NORMAL \
+}
+
struct ipmi_smi_handlers {
struct module *owner;
- /* The low-level interface cannot start sending messages to
- the upper layer until this function is called. This may
- not be NULL, the lower layer must take the interface from
- this call. */
- int (*start_processing)(void *send_info,
- ipmi_smi_t new_intf);
+ /* Capabilities of the SMI. */
+#define IPMI_SMI_CAN_HANDLE_IPMB_DIRECT (1 << 0)
+ unsigned int flags;
+
+ /*
+ * The low-level interface cannot start sending messages to
+ * the upper layer until this function is called. This may
+ * not be NULL, the lower layer must take the interface from
+ * this call.
+ */
+ int (*start_processing)(void *send_info,
+ struct ipmi_smi *new_intf);
+
+ /*
+ * When called, the low-level interface should disable all
+ * processing, it should be complete shut down when it returns.
+ */
+ void (*shutdown)(void *send_info);
/*
* Get the detailed private info of the low level interface and store
@@ -94,56 +160,64 @@ struct ipmi_smi_handlers {
*/
int (*get_smi_info)(void *send_info, struct ipmi_smi_info *data);
- /* Called to enqueue an SMI message to be sent. This
- operation is not allowed to fail. If an error occurs, it
- should report back the error in a received message. It may
- do this in the current call context, since no write locks
- are held when this is run. Message are delivered one at
- a time by the message handler, a new message will not be
- delivered until the previous message is returned. */
+ /*
+ * Called to enqueue an SMI message to be sent. This
+ * operation is not allowed to fail. If an error occurs, it
+ * should report back the error in a received message. It may
+ * do this in the current call context, since no write locks
+ * are held when this is run. Message are delivered one at
+ * a time by the message handler, a new message will not be
+ * delivered until the previous message is returned.
+ */
void (*sender)(void *send_info,
struct ipmi_smi_msg *msg);
- /* Called by the upper layer to request that we try to get
- events from the BMC we are attached to. */
+ /*
+ * Called by the upper layer to request that we try to get
+ * events from the BMC we are attached to.
+ */
void (*request_events)(void *send_info);
- /* Called by the upper layer when some user requires that the
- interface watch for events, received messages, watchdog
- pretimeouts, or not. Used by the SMI to know if it should
- watch for these. This may be NULL if the SMI does not
- implement it. */
- void (*set_need_watch)(void *send_info, bool enable);
+ /*
+ * Called by the upper layer when some user requires that the
+ * interface watch for received messages and watchdog
+ * pretimeouts (basically do a "Get Flags", or not. Used by
+ * the SMI to know if it should watch for these. This may be
+ * NULL if the SMI does not implement it. watch_mask is from
+ * IPMI_WATCH_MASK_xxx above. The interface should run slower
+ * timeouts for just watchdog checking or faster timeouts when
+ * waiting for the message queue.
+ */
+ void (*set_need_watch)(void *send_info, unsigned int watch_mask);
/*
* Called when flushing all pending messages.
*/
void (*flush_messages)(void *send_info);
- /* Called when the interface should go into "run to
- completion" mode. If this call sets the value to true, the
- interface should make sure that all messages are flushed
- out and that none are pending, and any new requests are run
- to completion immediately. */
+ /*
+ * Called when the interface should go into "run to
+ * completion" mode. If this call sets the value to true, the
+ * interface should make sure that all messages are flushed
+ * out and that none are pending, and any new requests are run
+ * to completion immediately.
+ */
void (*set_run_to_completion)(void *send_info, bool run_to_completion);
- /* Called to poll for work to do. This is so upper layers can
- poll for operations during things like crash dumps. */
+ /*
+ * Called to poll for work to do. This is so upper layers can
+ * poll for operations during things like crash dumps.
+ */
void (*poll)(void *send_info);
- /* Enable/disable firmware maintenance mode. Note that this
- is *not* the modes defined, this is simply an on/off
- setting. The message handler does the mode handling. Note
- that this is called from interrupt context, so it cannot
- block. */
+ /*
+ * Enable/disable firmware maintenance mode. Note that this
+ * is *not* the modes defined, this is simply an on/off
+ * setting. The message handler does the mode handling. Note
+ * that this is called from interrupt context, so it cannot
+ * block.
+ */
void (*set_maintenance_mode)(void *send_info, bool enable);
-
- /* Tell the handler that we are using it/not using it. The
- message handler get the modules that this handler belongs
- to; this function lets the SMI claim any modules that it
- uses. These may be NULL if this is not required. */
- int (*inc_usecount)(void *send_info);
- void (*dec_usecount)(void *send_info);
};
struct ipmi_device_id {
@@ -162,27 +236,28 @@ struct ipmi_device_id {
#define ipmi_version_major(v) ((v)->ipmi_version & 0xf)
#define ipmi_version_minor(v) ((v)->ipmi_version >> 4)
-/* Take a pointer to a raw data buffer and a length and extract device
- id information from it. The first byte of data must point to the
- netfn << 2, the data should be of the format:
- netfn << 2, cmd, completion code, data
- as normally comes from a device interface. */
-static inline int ipmi_demangle_device_id(const unsigned char *data,
+/*
+ * Take a pointer to an IPMI response and extract device id information from
+ * it. @netfn is in the IPMI_NETFN_ format, so may need to be shifted from
+ * a SI response.
+ */
+static inline int ipmi_demangle_device_id(uint8_t netfn, uint8_t cmd,
+ const unsigned char *data,
unsigned int data_len,
struct ipmi_device_id *id)
{
- if (data_len < 9)
+ if (data_len < 7)
return -EINVAL;
- if (data[0] != IPMI_NETFN_APP_RESPONSE << 2 ||
- data[1] != IPMI_GET_DEVICE_ID_CMD)
+ if (netfn != IPMI_NETFN_APP_RESPONSE || cmd != IPMI_GET_DEVICE_ID_CMD)
/* Strange, didn't get the response we expected. */
return -EINVAL;
- if (data[2] != 0)
+ if (data[0] != 0)
/* That's odd, it shouldn't be able to fail. */
return -EINVAL;
- data += 3;
- data_len -= 3;
+ data++;
+ data_len--;
+
id->device_id = data[0];
id->device_revision = data[1];
id->firmware_revision_1 = data[2];
@@ -206,23 +281,28 @@ static inline int ipmi_demangle_device_id(const unsigned char *data,
return 0;
}
-/* Add a low-level interface to the IPMI driver. Note that if the
- interface doesn't know its slave address, it should pass in zero.
- The low-level interface should not deliver any messages to the
- upper layer until the start_processing() function in the handlers
- is called, and the lower layer must get the interface from that
- call. */
-int ipmi_register_smi(const struct ipmi_smi_handlers *handlers,
- void *send_info,
- struct ipmi_device_id *device_id,
- struct device *dev,
- unsigned char slave_addr);
+/*
+ * Add a low-level interface to the IPMI driver. Note that if the
+ * interface doesn't know its slave address, it should pass in zero.
+ * The low-level interface should not deliver any messages to the
+ * upper layer until the start_processing() function in the handlers
+ * is called, and the lower layer must get the interface from that
+ * call.
+ */
+int ipmi_add_smi(struct module *owner,
+ const struct ipmi_smi_handlers *handlers,
+ void *send_info,
+ struct device *dev,
+ unsigned char slave_addr);
+
+#define ipmi_register_smi(handlers, send_info, dev, slave_addr) \
+ ipmi_add_smi(THIS_MODULE, handlers, send_info, dev, slave_addr)
/*
* Remove a low-level interface from the IPMI driver. This will
* return an error if the interface is still in use by a user.
*/
-int ipmi_unregister_smi(ipmi_smi_t intf);
+void ipmi_unregister_smi(struct ipmi_smi *intf);
/*
* The lower layer reports received messages through this interface.
@@ -230,11 +310,11 @@ int ipmi_unregister_smi(ipmi_smi_t intf);
* the lower layer gets an error sending a message, it should format
* an error response in the message response.
*/
-void ipmi_smi_msg_received(ipmi_smi_t intf,
+void ipmi_smi_msg_received(struct ipmi_smi *intf,
struct ipmi_smi_msg *msg);
/* The lower layer received a watchdog pre-timeout on interface. */
-void ipmi_smi_watchdog_pretimeout(ipmi_smi_t intf);
+void ipmi_smi_watchdog_pretimeout(struct ipmi_smi *intf);
struct ipmi_smi_msg *ipmi_alloc_smi_msg(void);
static inline void ipmi_free_smi_msg(struct ipmi_smi_msg *msg)
@@ -242,11 +322,4 @@ static inline void ipmi_free_smi_msg(struct ipmi_smi_msg *msg)
msg->done(msg);
}
-/* Allow the lower layer to add things to the proc filesystem
- directory for this interface. Note that the entry will
- automatically be dstroyed when the interface is destroyed. */
-int ipmi_smi_add_proc_entry(ipmi_smi_t smi, char *name,
- const struct file_operations *proc_ops,
- void *data);
-
#endif /* __LINUX_IPMI_SMI_H */
diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h
index e1b442996f81..839247a4f48e 100644
--- a/include/linux/ipv6.h
+++ b/include/linux/ipv6.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _IPV6_H
#define _IPV6_H
@@ -30,6 +31,7 @@ struct ipv6_devconf {
__s32 max_desync_factor;
__s32 max_addresses;
__s32 accept_ra_defrtr;
+ __u32 ra_defrtr_metric;
__s32 accept_ra_min_hop_limit;
__s32 accept_ra_pinfo;
__s32 ignore_routes_with_linkdown;
@@ -49,7 +51,7 @@ struct ipv6_devconf {
__s32 use_optimistic;
#endif
#ifdef CONFIG_IPV6_MROUTE
- __s32 mc_forwarding;
+ atomic_t mc_forwarding;
#endif
__s32 disable_ipv6;
__s32 drop_unicast_in_l2_multicast;
@@ -59,6 +61,7 @@ struct ipv6_devconf {
__s32 suppress_frag_ndisc;
__s32 accept_ra_mtu;
__s32 drop_unsolicited_na;
+ __s32 accept_untracked_na;
struct ipv6_stable_secret {
bool initialized;
struct in6_addr secret;
@@ -72,6 +75,12 @@ struct ipv6_devconf {
__u32 enhanced_dad;
__u32 addr_gen_mode;
__s32 disable_policy;
+ __s32 ndisc_tclass;
+ __s32 rpl_seg_enabled;
+ __u32 ioam6_id;
+ __u32 ioam6_id_wide;
+ __u8 ioam6_enabled;
+ __u8 ndisc_evict_nocarrier;
struct ctl_table_header *sysctl_header;
};
@@ -81,7 +90,6 @@ struct ipv6_params {
__s32 autoconf;
};
extern struct ipv6_params ipv6_defaults;
-#include <linux/icmpv6.h>
#include <linux/tcp.h>
#include <linux/udp.h>
@@ -102,6 +110,12 @@ static inline struct ipv6hdr *ipipv6_hdr(const struct sk_buff *skb)
return (struct ipv6hdr *)skb_transport_header(skb);
}
+static inline unsigned int ipv6_transport_len(const struct sk_buff *skb)
+{
+ return ntohs(ipv6_hdr(skb)->payload_len) + sizeof(struct ipv6hdr) -
+ skb_network_header_len(skb);
+}
+
/*
This structure contains results of exthdrs parsing
as offsets from skb->nh.
@@ -120,6 +134,7 @@ struct inet6_skb_parm {
__u16 dsthao;
#endif
__u16 frag_max_size;
+ __u16 srhoff;
#define IP6SKB_XFRM_TRANSFORMED 1
#define IP6SKB_FORWARDED 2
@@ -128,6 +143,9 @@ struct inet6_skb_parm {
#define IP6SKB_FRAGMENTED 16
#define IP6SKB_HOPBYHOP 32
#define IP6SKB_L3SLAVE 64
+#define IP6SKB_JUMBOGRAM 128
+#define IP6SKB_SEG6 256
+#define IP6SKB_FAKEJUMBO 512
};
#if defined(CONFIG_NET_L3_MASTER_DEV)
@@ -152,15 +170,19 @@ static inline int inet6_iif(const struct sk_buff *skb)
return l3_slave ? skb->skb_iif : IP6CB(skb)->iif;
}
+static inline bool inet6_is_jumbogram(const struct sk_buff *skb)
+{
+ return !!(IP6CB(skb)->flags & IP6SKB_JUMBOGRAM);
+}
+
/* can not be used in TCP layer after tcp_v6_fill_cb */
-static inline bool inet6_exact_dif_match(struct net *net, struct sk_buff *skb)
+static inline int inet6_sdif(const struct sk_buff *skb)
{
-#if defined(CONFIG_NET_L3_MASTER_DEV)
- if (!net->ipv4.sysctl_tcp_l3mdev_accept &&
- skb && ipv6_l3mdev_skb(IP6CB(skb)->flags))
- return true;
+#if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV)
+ if (skb && ipv6_l3mdev_skb(IP6CB(skb)->flags))
+ return IP6CB(skb)->iif;
#endif
- return false;
+ return 0;
}
struct tcp6_request_sock {
@@ -198,7 +220,7 @@ struct ipv6_pinfo {
/*
* Packed in 16bits.
- * Omit one shift by by putting the signed field at MSB.
+ * Omit one shift by putting the signed field at MSB.
*/
#if defined(__BIG_ENDIAN_BITFIELD)
__s16 hop_limit:9;
@@ -255,13 +277,16 @@ struct ipv6_pinfo {
* 100: prefer care-of address
*/
dontfrag:1,
- autoflowlabel:1;
+ autoflowlabel:1,
+ autoflowlabel_set:1,
+ mc_all:1,
+ recverr_rfc4884:1,
+ rtalert_isolate:1;
__u8 min_hopcount;
__u8 tclass;
__be32 rcv_flowinfo;
__u32 dst_cookie;
- __u32 rx_dst_cookie;
struct ipv6_mc_socklist __rcu *ipv6_mc_list;
struct ipv6_ac_socklist *ipv6_ac_list;
@@ -311,24 +336,9 @@ static inline struct ipv6_pinfo *inet6_sk(const struct sock *__sk)
return sk_fullsock(__sk) ? inet_sk(__sk)->pinet6 : NULL;
}
-static inline struct raw6_sock *raw6_sk(const struct sock *sk)
-{
- return (struct raw6_sock *)sk;
-}
-
-static inline void inet_sk_copy_descendant(struct sock *sk_to,
- const struct sock *sk_from)
-{
- int ancestor_size = sizeof(struct inet_sock);
-
- if (sk_from->sk_family == PF_INET6)
- ancestor_size += sizeof(struct ipv6_pinfo);
-
- __inet_sk_copy_descendant(sk_to, sk_from, ancestor_size);
-}
+#define raw6_sk(ptr) container_of_const(ptr, struct raw6_sock, inet.sk)
-#define __ipv6_only_sock(sk) (sk->sk_ipv6only)
-#define ipv6_only_sock(sk) (__ipv6_only_sock(sk))
+#define ipv6_only_sock(sk) (sk->sk_ipv6only)
#define ipv6_sk_rxinfo(sk) ((sk)->sk_family == PF_INET6 && \
inet6_sk(sk)->rxopt.bits.rxinfo)
@@ -345,7 +355,6 @@ static inline int inet_v6_ipv6only(const struct sock *sk)
return ipv6_only_sock(sk);
}
#else
-#define __ipv6_only_sock(sk) 0
#define ipv6_only_sock(sk) 0
#define ipv6_sk_rxinfo(sk) 0
@@ -359,19 +368,12 @@ static inline struct ipv6_pinfo * inet6_sk(const struct sock *__sk)
return NULL;
}
-static inline struct inet6_request_sock *
- inet6_rsk(const struct request_sock *rsk)
-{
- return NULL;
-}
-
static inline struct raw6_sock *raw6_sk(const struct sock *sk)
{
return NULL;
}
#define inet6_rcv_saddr(__sk) NULL
-#define tcp_twsk_ipv6only(__sk) 0
#define inet_v6_ipv6only(__sk) 0
#endif /* IS_ENABLED(CONFIG_IPV6) */
#endif /* _IPV6_H */
diff --git a/include/linux/ipv6_route.h b/include/linux/ipv6_route.h
index 25b5f1f5e780..5711e246c39c 100644
--- a/include/linux/ipv6_route.h
+++ b/include/linux/ipv6_route.h
@@ -1,13 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Linux INET6 implementation
*
* Authors:
* Pedro Roque <roque@di.fc.ul.pt>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
*/
#ifndef _LINUX_IPV6_ROUTE_H
#define _LINUX_IPV6_ROUTE_H
diff --git a/include/linux/irq.h b/include/linux/irq.h
index 00db35b61e9e..b1b28affb32a 100644
--- a/include/linux/irq.h
+++ b/include/linux/irq.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_IRQ_H
#define _LINUX_IRQ_H
@@ -9,18 +10,13 @@
* Thanks. --rmk
*/
-#include <linux/smp.h>
-#include <linux/linkage.h>
#include <linux/cache.h>
#include <linux/spinlock.h>
#include <linux/cpumask.h>
-#include <linux/gfp.h>
#include <linux/irqhandler.h>
#include <linux/irqreturn.h>
#include <linux/irqnr.h>
-#include <linux/errno.h>
#include <linux/topology.h>
-#include <linux/wait.h>
#include <linux/io.h>
#include <linux/slab.h>
@@ -31,6 +27,7 @@
struct seq_file;
struct module;
struct msi_msg;
+struct irq_affinity_desc;
enum irqchip_irq_state;
/*
@@ -74,6 +71,8 @@ enum irqchip_irq_state;
* it from the spurious interrupt detection
* mechanism and from core side polling.
* IRQ_DISABLE_UNLAZY - Disable lazy irq disable
+ * IRQ_HIDDEN - Don't show up in /proc/interrupts
+ * IRQ_NO_DEBUG - Exclude from note_interrupt() debugging
*/
enum {
IRQ_TYPE_NONE = 0x00000000,
@@ -100,13 +99,15 @@ enum {
IRQ_PER_CPU_DEVID = (1 << 17),
IRQ_IS_POLLED = (1 << 18),
IRQ_DISABLE_UNLAZY = (1 << 19),
+ IRQ_HIDDEN = (1 << 20),
+ IRQ_NO_DEBUG = (1 << 21),
};
#define IRQF_MODIFY_MASK \
(IRQ_TYPE_SENSE_MASK | IRQ_NOPROBE | IRQ_NOREQUEST | \
IRQ_NOAUTOEN | IRQ_MOVE_PCNTXT | IRQ_LEVEL | IRQ_NO_BALANCING | \
IRQ_PER_CPU | IRQ_NESTED_THREAD | IRQ_NOTHREAD | IRQ_PER_CPU_DEVID | \
- IRQ_IS_POLLED | IRQ_DISABLE_UNLAZY)
+ IRQ_IS_POLLED | IRQ_DISABLE_UNLAZY | IRQ_HIDDEN)
#define IRQ_NO_BALANCING_MASK (IRQ_PER_CPU | IRQ_NO_BALANCING)
@@ -117,7 +118,7 @@ enum {
* IRQ_SET_MASK_NOCPY - OK, chip did update irq_common_data.affinity
* IRQ_SET_MASK_OK_DONE - Same as IRQ_SET_MASK_OK for core. Special code to
* support stacked irqchips, which indicates skipping
- * all descendent irqchips.
+ * all descendant irqchips.
*/
enum {
IRQ_SET_MASK_OK = 0,
@@ -150,7 +151,9 @@ struct irq_common_data {
#endif
void *handler_data;
struct msi_desc *msi_desc;
+#ifdef CONFIG_SMP
cpumask_var_t affinity;
+#endif
#ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK
cpumask_var_t effective_affinity;
#endif
@@ -198,7 +201,7 @@ struct irq_data {
* IRQD_LEVEL - Interrupt is level triggered
* IRQD_WAKEUP_STATE - Interrupt is configured for wakeup
* from suspend
- * IRDQ_MOVE_PCNTXT - Interrupt can be moved in process
+ * IRQD_MOVE_PCNTXT - Interrupt can be moved in process
* context
* IRQD_IRQ_DISABLED - Disabled state of the interrupt
* IRQD_IRQ_MASKED - Masked state of the interrupt
@@ -210,6 +213,16 @@ struct irq_data {
* IRQD_MANAGED_SHUTDOWN - Interrupt was shutdown due to empty affinity
* mask. Applies only to affinity managed irqs.
* IRQD_SINGLE_TARGET - IRQ allows only a single affinity target
+ * IRQD_DEFAULT_TRIGGER_SET - Expected trigger already been set
+ * IRQD_CAN_RESERVE - Can use reservation mode
+ * IRQD_MSI_NOMASK_QUIRK - Non-maskable MSI quirk for affinity change
+ * required
+ * IRQD_HANDLE_ENFORCE_IRQCTX - Enforce that handle_irq_*() is only invoked
+ * from actual interrupt context.
+ * IRQD_AFFINITY_ON_ACTIVATE - Affinity is set on activation. Don't call
+ * irq_chip::irq_set_affinity() when deactivated.
+ * IRQD_IRQ_ENABLED_ON_SUSPEND - Interrupt is enabled on suspend by irq pm if
+ * irqchip have flag IRQCHIP_ENABLE_WAKEUP_ON_SUSPEND set.
*/
enum {
IRQD_TRIGGER_MASK = 0xf,
@@ -230,6 +243,12 @@ enum {
IRQD_IRQ_STARTED = (1 << 22),
IRQD_MANAGED_SHUTDOWN = (1 << 23),
IRQD_SINGLE_TARGET = (1 << 24),
+ IRQD_DEFAULT_TRIGGER_SET = (1 << 25),
+ IRQD_CAN_RESERVE = (1 << 26),
+ IRQD_MSI_NOMASK_QUIRK = (1 << 27),
+ IRQD_HANDLE_ENFORCE_IRQCTX = (1 << 28),
+ IRQD_AFFINITY_ON_ACTIVATE = (1 << 29),
+ IRQD_IRQ_ENABLED_ON_SUSPEND = (1 << 30),
};
#define __irqd_to_state(d) ACCESS_PRIVATE((d)->common, state_use_accessors)
@@ -259,18 +278,25 @@ static inline void irqd_mark_affinity_was_set(struct irq_data *d)
__irqd_to_state(d) |= IRQD_AFFINITY_SET;
}
+static inline bool irqd_trigger_type_was_set(struct irq_data *d)
+{
+ return __irqd_to_state(d) & IRQD_DEFAULT_TRIGGER_SET;
+}
+
static inline u32 irqd_get_trigger_type(struct irq_data *d)
{
return __irqd_to_state(d) & IRQD_TRIGGER_MASK;
}
/*
- * Must only be called inside irq_chip.irq_set_type() functions.
+ * Must only be called inside irq_chip.irq_set_type() functions or
+ * from the DT/ACPI setup code.
*/
static inline void irqd_set_trigger_type(struct irq_data *d, u32 type)
{
__irqd_to_state(d) &= ~IRQD_TRIGGER_MASK;
__irqd_to_state(d) |= type & IRQD_TRIGGER_MASK;
+ __irqd_to_state(d) |= IRQD_DEFAULT_TRIGGER_SET;
}
static inline bool irqd_is_level_type(struct irq_data *d)
@@ -280,7 +306,7 @@ static inline bool irqd_is_level_type(struct irq_data *d)
/*
* Must only be called of irqchip.irq_set_affinity() or low level
- * hieararchy domain allocation functions.
+ * hierarchy domain allocation functions.
*/
static inline void irqd_set_single_target(struct irq_data *d)
{
@@ -292,6 +318,21 @@ static inline bool irqd_is_single_target(struct irq_data *d)
return __irqd_to_state(d) & IRQD_SINGLE_TARGET;
}
+static inline void irqd_set_handle_enforce_irqctx(struct irq_data *d)
+{
+ __irqd_to_state(d) |= IRQD_HANDLE_ENFORCE_IRQCTX;
+}
+
+static inline bool irqd_is_handle_enforce_irqctx(struct irq_data *d)
+{
+ return __irqd_to_state(d) & IRQD_HANDLE_ENFORCE_IRQCTX;
+}
+
+static inline bool irqd_is_enabled_on_suspend(struct irq_data *d)
+{
+ return __irqd_to_state(d) & IRQD_IRQ_ENABLED_ON_SUSPEND;
+}
+
static inline bool irqd_is_wakeup_set(struct irq_data *d)
{
return __irqd_to_state(d) & IRQD_WAKEUP_STATE;
@@ -367,6 +408,46 @@ static inline bool irqd_is_managed_and_shutdown(struct irq_data *d)
return __irqd_to_state(d) & IRQD_MANAGED_SHUTDOWN;
}
+static inline void irqd_set_can_reserve(struct irq_data *d)
+{
+ __irqd_to_state(d) |= IRQD_CAN_RESERVE;
+}
+
+static inline void irqd_clr_can_reserve(struct irq_data *d)
+{
+ __irqd_to_state(d) &= ~IRQD_CAN_RESERVE;
+}
+
+static inline bool irqd_can_reserve(struct irq_data *d)
+{
+ return __irqd_to_state(d) & IRQD_CAN_RESERVE;
+}
+
+static inline void irqd_set_msi_nomask_quirk(struct irq_data *d)
+{
+ __irqd_to_state(d) |= IRQD_MSI_NOMASK_QUIRK;
+}
+
+static inline void irqd_clr_msi_nomask_quirk(struct irq_data *d)
+{
+ __irqd_to_state(d) &= ~IRQD_MSI_NOMASK_QUIRK;
+}
+
+static inline bool irqd_msi_nomask_quirk(struct irq_data *d)
+{
+ return __irqd_to_state(d) & IRQD_MSI_NOMASK_QUIRK;
+}
+
+static inline void irqd_set_affinity_on_activate(struct irq_data *d)
+{
+ __irqd_to_state(d) |= IRQD_AFFINITY_ON_ACTIVATE;
+}
+
+static inline bool irqd_affinity_on_activate(struct irq_data *d)
+{
+ return __irqd_to_state(d) & IRQD_AFFINITY_ON_ACTIVATE;
+}
+
#undef __irqd_to_state
static inline irq_hw_number_t irqd_to_hwirq(struct irq_data *d)
@@ -377,7 +458,6 @@ static inline irq_hw_number_t irqd_to_hwirq(struct irq_data *d)
/**
* struct irq_chip - hardware interrupt chip descriptor
*
- * @parent_device: pointer to parent device for irqchip
* @name: name for /proc/interrupts
* @irq_startup: start up the interrupt (defaults to ->enable if NULL)
* @irq_shutdown: shut down the interrupt (defaults to ->disable if NULL)
@@ -388,7 +468,12 @@ static inline irq_hw_number_t irqd_to_hwirq(struct irq_data *d)
* @irq_mask_ack: ack and mask an interrupt source
* @irq_unmask: unmask an interrupt source
* @irq_eoi: end of interrupt
- * @irq_set_affinity: set the CPU affinity on SMP machines
+ * @irq_set_affinity: Set the CPU affinity on SMP machines. If the force
+ * argument is true, it tells the driver to
+ * unconditionally apply the affinity setting. Sanity
+ * checks against the supplied affinity mask are not
+ * required. This is used for CPU hotplug where the
+ * target CPU is not yet set in the cpu_online_mask.
* @irq_retrigger: resend an IRQ to the CPU
* @irq_set_type: set the flow type (IRQ_TYPE_LEVEL/etc.) of an IRQ
* @irq_set_wake: enable/disable power-management wake-on of an IRQ
@@ -414,10 +499,11 @@ static inline irq_hw_number_t irqd_to_hwirq(struct irq_data *d)
* @irq_set_vcpu_affinity: optional to target a vCPU in a virtual machine
* @ipi_send_single: send a single IPI to destination cpus
* @ipi_send_mask: send an IPI to destination cpus in cpumask
+ * @irq_nmi_setup: function called from core code before enabling an NMI
+ * @irq_nmi_teardown: function called from core code after disabling an NMI
* @flags: chip specific flags
*/
struct irq_chip {
- struct device *parent_device;
const char *name;
unsigned int (*irq_startup)(struct irq_data *data);
void (*irq_shutdown)(struct irq_data *data);
@@ -438,9 +524,10 @@ struct irq_chip {
void (*irq_bus_lock)(struct irq_data *data);
void (*irq_bus_sync_unlock)(struct irq_data *data);
+#ifdef CONFIG_DEPRECATED_IRQ_CPU_ONOFFLINE
void (*irq_cpu_online)(struct irq_data *data);
void (*irq_cpu_offline)(struct irq_data *data);
-
+#endif
void (*irq_suspend)(struct irq_data *data);
void (*irq_resume)(struct irq_data *data);
void (*irq_pm_shutdown)(struct irq_data *data);
@@ -462,29 +549,43 @@ struct irq_chip {
void (*ipi_send_single)(struct irq_data *data, unsigned int cpu);
void (*ipi_send_mask)(struct irq_data *data, const struct cpumask *dest);
+ int (*irq_nmi_setup)(struct irq_data *data);
+ void (*irq_nmi_teardown)(struct irq_data *data);
+
unsigned long flags;
};
/*
* irq_chip specific flags
*
- * IRQCHIP_SET_TYPE_MASKED: Mask before calling chip.irq_set_type()
- * IRQCHIP_EOI_IF_HANDLED: Only issue irq_eoi() when irq was handled
- * IRQCHIP_MASK_ON_SUSPEND: Mask non wake irqs in the suspend path
- * IRQCHIP_ONOFFLINE_ENABLED: Only call irq_on/off_line callbacks
- * when irq enabled
- * IRQCHIP_SKIP_SET_WAKE: Skip chip.irq_set_wake(), for this irq chip
- * IRQCHIP_ONESHOT_SAFE: One shot does not require mask/unmask
- * IRQCHIP_EOI_THREADED: Chip requires eoi() on unmask in threaded mode
+ * IRQCHIP_SET_TYPE_MASKED: Mask before calling chip.irq_set_type()
+ * IRQCHIP_EOI_IF_HANDLED: Only issue irq_eoi() when irq was handled
+ * IRQCHIP_MASK_ON_SUSPEND: Mask non wake irqs in the suspend path
+ * IRQCHIP_ONOFFLINE_ENABLED: Only call irq_on/off_line callbacks
+ * when irq enabled
+ * IRQCHIP_SKIP_SET_WAKE: Skip chip.irq_set_wake(), for this irq chip
+ * IRQCHIP_ONESHOT_SAFE: One shot does not require mask/unmask
+ * IRQCHIP_EOI_THREADED: Chip requires eoi() on unmask in threaded mode
+ * IRQCHIP_SUPPORTS_LEVEL_MSI: Chip can provide two doorbells for Level MSIs
+ * IRQCHIP_SUPPORTS_NMI: Chip can deliver NMIs, only for root irqchips
+ * IRQCHIP_ENABLE_WAKEUP_ON_SUSPEND: Invokes __enable_irq()/__disable_irq() for wake irqs
+ * in the suspend path if they are in disabled state
+ * IRQCHIP_AFFINITY_PRE_STARTUP: Default affinity update before startup
+ * IRQCHIP_IMMUTABLE: Don't ever change anything in this chip
*/
enum {
- IRQCHIP_SET_TYPE_MASKED = (1 << 0),
- IRQCHIP_EOI_IF_HANDLED = (1 << 1),
- IRQCHIP_MASK_ON_SUSPEND = (1 << 2),
- IRQCHIP_ONOFFLINE_ENABLED = (1 << 3),
- IRQCHIP_SKIP_SET_WAKE = (1 << 4),
- IRQCHIP_ONESHOT_SAFE = (1 << 5),
- IRQCHIP_EOI_THREADED = (1 << 6),
+ IRQCHIP_SET_TYPE_MASKED = (1 << 0),
+ IRQCHIP_EOI_IF_HANDLED = (1 << 1),
+ IRQCHIP_MASK_ON_SUSPEND = (1 << 2),
+ IRQCHIP_ONOFFLINE_ENABLED = (1 << 3),
+ IRQCHIP_SKIP_SET_WAKE = (1 << 4),
+ IRQCHIP_ONESHOT_SAFE = (1 << 5),
+ IRQCHIP_EOI_THREADED = (1 << 6),
+ IRQCHIP_SUPPORTS_LEVEL_MSI = (1 << 7),
+ IRQCHIP_SUPPORTS_NMI = (1 << 8),
+ IRQCHIP_ENABLE_WAKEUP_ON_SUSPEND = (1 << 9),
+ IRQCHIP_AFFINITY_PRE_STARTUP = (1 << 10),
+ IRQCHIP_IMMUTABLE = (1 << 11),
};
#include <linux/irqdesc.h>
@@ -505,13 +606,13 @@ enum {
#define IRQ_DEFAULT_INIT_FLAGS ARCH_IRQ_INIT_FLAGS
struct irqaction;
-extern int setup_irq(unsigned int irq, struct irqaction *new);
-extern void remove_irq(unsigned int irq, struct irqaction *act);
extern int setup_percpu_irq(unsigned int irq, struct irqaction *new);
extern void remove_percpu_irq(unsigned int irq, struct irqaction *act);
+#ifdef CONFIG_DEPRECATED_IRQ_CPU_ONOFFLINE
extern void irq_cpu_online(void);
extern void irq_cpu_offline(void);
+#endif
extern int irq_set_affinity_locked(struct irq_data *data,
const struct cpumask *cpumask, bool force);
extern int irq_set_vcpu_affinity(unsigned int irq, void *vcpu_info);
@@ -524,7 +625,12 @@ extern int irq_affinity_online_cpu(unsigned int cpu);
#endif
#if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_PENDING_IRQ)
-void irq_move_irq(struct irq_data *data);
+void __irq_move_irq(struct irq_data *data);
+static inline void irq_move_irq(struct irq_data *data)
+{
+ if (unlikely(irqd_is_setaffinity_pending(data)))
+ __irq_move_irq(data);
+}
void irq_move_masked_irq(struct irq_data *data);
void irq_force_complete_move(struct irq_desc *desc);
#else
@@ -559,15 +665,27 @@ extern void handle_percpu_devid_irq(struct irq_desc *desc);
extern void handle_bad_irq(struct irq_desc *desc);
extern void handle_nested_irq(unsigned int irq);
+extern void handle_fasteoi_nmi(struct irq_desc *desc);
+extern void handle_percpu_devid_fasteoi_nmi(struct irq_desc *desc);
+
extern int irq_chip_compose_msi_msg(struct irq_data *data, struct msi_msg *msg);
extern int irq_chip_pm_get(struct irq_data *data);
extern int irq_chip_pm_put(struct irq_data *data);
#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
+extern void handle_fasteoi_ack_irq(struct irq_desc *desc);
+extern void handle_fasteoi_mask_irq(struct irq_desc *desc);
+extern int irq_chip_set_parent_state(struct irq_data *data,
+ enum irqchip_irq_state which,
+ bool val);
+extern int irq_chip_get_parent_state(struct irq_data *data,
+ enum irqchip_irq_state which,
+ bool *state);
extern void irq_chip_enable_parent(struct irq_data *data);
extern void irq_chip_disable_parent(struct irq_data *data);
extern void irq_chip_ack_parent(struct irq_data *data);
extern int irq_chip_retrigger_hierarchy(struct irq_data *data);
extern void irq_chip_mask_parent(struct irq_data *data);
+extern void irq_chip_mask_ack_parent(struct irq_data *data);
extern void irq_chip_unmask_parent(struct irq_data *data);
extern void irq_chip_eoi_parent(struct irq_data *data);
extern int irq_chip_set_affinity_parent(struct irq_data *data,
@@ -577,6 +695,8 @@ extern int irq_chip_set_wake_parent(struct irq_data *data, unsigned int on);
extern int irq_chip_set_vcpu_affinity_parent(struct irq_data *data,
void *vcpu_info);
extern int irq_chip_set_type_parent(struct irq_data *data, unsigned int type);
+extern int irq_chip_request_resources_parent(struct irq_data *data);
+extern void irq_chip_release_resources_parent(struct irq_data *data);
#endif
/* Handling of unhandled and spurious interrupts: */
@@ -594,10 +714,11 @@ extern struct irq_chip no_irq_chip;
extern struct irq_chip dummy_irq_chip;
extern void
-irq_set_chip_and_handler_name(unsigned int irq, struct irq_chip *chip,
+irq_set_chip_and_handler_name(unsigned int irq, const struct irq_chip *chip,
irq_flow_handler_t handle, const char *name);
-static inline void irq_set_chip_and_handler(unsigned int irq, struct irq_chip *chip,
+static inline void irq_set_chip_and_handler(unsigned int irq,
+ const struct irq_chip *chip,
irq_flow_handler_t handle)
{
irq_set_chip_and_handler_name(irq, chip, handle, NULL);
@@ -687,7 +808,7 @@ static inline void irq_set_percpu_devid_flags(unsigned int irq)
}
/* Set/get chip/data for an IRQ: */
-extern int irq_set_chip(unsigned int irq, struct irq_chip *chip);
+extern int irq_set_chip(unsigned int irq, const struct irq_chip *chip);
extern int irq_set_handler_data(unsigned int irq, void *data);
extern int irq_set_chip_data(unsigned int irq, void *data);
extern int irq_set_irq_type(unsigned int irq, unsigned int type);
@@ -760,21 +881,34 @@ static inline int irq_data_get_node(struct irq_data *d)
return irq_common_data_get_node(d->common);
}
-static inline struct cpumask *irq_get_affinity_mask(int irq)
+static inline
+const struct cpumask *irq_data_get_affinity_mask(struct irq_data *d)
{
- struct irq_data *d = irq_get_irq_data(irq);
+#ifdef CONFIG_SMP
+ return d->common->affinity;
+#else
+ return cpumask_of(0);
+#endif
+}
- return d ? d->common->affinity : NULL;
+static inline void irq_data_update_affinity(struct irq_data *d,
+ const struct cpumask *m)
+{
+#ifdef CONFIG_SMP
+ cpumask_copy(d->common->affinity, m);
+#endif
}
-static inline struct cpumask *irq_data_get_affinity_mask(struct irq_data *d)
+static inline const struct cpumask *irq_get_affinity_mask(int irq)
{
- return d->common->affinity;
+ struct irq_data *d = irq_get_irq_data(irq);
+
+ return d ? irq_data_get_affinity_mask(d) : NULL;
}
#ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK
static inline
-struct cpumask *irq_data_get_effective_affinity_mask(struct irq_data *d)
+const struct cpumask *irq_data_get_effective_affinity_mask(struct irq_data *d)
{
return d->common->effective_affinity;
}
@@ -789,27 +923,36 @@ static inline void irq_data_update_effective_affinity(struct irq_data *d,
{
}
static inline
-struct cpumask *irq_data_get_effective_affinity_mask(struct irq_data *d)
+const struct cpumask *irq_data_get_effective_affinity_mask(struct irq_data *d)
{
- return d->common->affinity;
+ return irq_data_get_affinity_mask(d);
}
#endif
+static inline
+const struct cpumask *irq_get_effective_affinity_mask(unsigned int irq)
+{
+ struct irq_data *d = irq_get_irq_data(irq);
+
+ return d ? irq_data_get_effective_affinity_mask(d) : NULL;
+}
+
unsigned int arch_dynirq_lower_bound(unsigned int from);
int __irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node,
- struct module *owner, const struct cpumask *affinity);
+ struct module *owner,
+ const struct irq_affinity_desc *affinity);
int __devm_irq_alloc_descs(struct device *dev, int irq, unsigned int from,
unsigned int cnt, int node, struct module *owner,
- const struct cpumask *affinity);
+ const struct irq_affinity_desc *affinity);
/* use macros to avoid needing export.h for THIS_MODULE */
#define irq_alloc_descs(irq, from, cnt, node) \
__irq_alloc_descs(irq, from, cnt, node, THIS_MODULE, NULL)
#define irq_alloc_desc(node) \
- irq_alloc_descs(-1, 0, 1, node)
+ irq_alloc_descs(-1, 1, 1, node)
#define irq_alloc_desc_at(at, node) \
irq_alloc_descs(at, at, 1, node)
@@ -824,7 +967,7 @@ int __devm_irq_alloc_descs(struct device *dev, int irq, unsigned int from,
__devm_irq_alloc_descs(dev, irq, from, cnt, node, THIS_MODULE, NULL)
#define devm_irq_alloc_desc(dev, node) \
- devm_irq_alloc_descs(dev, -1, 0, 1, node)
+ devm_irq_alloc_descs(dev, -1, 1, 1, node)
#define devm_irq_alloc_desc_at(dev, at, node) \
devm_irq_alloc_descs(dev, at, at, 1, node)
@@ -841,21 +984,6 @@ static inline void irq_free_desc(unsigned int irq)
irq_free_descs(irq, 1);
}
-#ifdef CONFIG_GENERIC_IRQ_LEGACY_ALLOC_HWIRQ
-unsigned int irq_alloc_hwirqs(int cnt, int node);
-static inline unsigned int irq_alloc_hwirq(int node)
-{
- return irq_alloc_hwirqs(1, node);
-}
-void irq_free_hwirqs(unsigned int from, int cnt);
-static inline void irq_free_hwirq(unsigned int irq)
-{
- return irq_free_hwirqs(irq, 1);
-}
-int arch_setup_hwirq(unsigned int irq, int node);
-void arch_teardown_hwirq(unsigned int irq);
-#endif
-
#ifdef CONFIG_GENERIC_IRQ_LEGACY
void irq_init_desc(unsigned int irq);
#endif
@@ -955,7 +1083,7 @@ struct irq_chip_generic {
unsigned long unused;
struct irq_domain *domain;
struct list_head list;
- struct irq_chip_type chip_types[0];
+ struct irq_chip_type chip_types[];
};
/**
@@ -991,7 +1119,7 @@ struct irq_domain_chip_generic {
unsigned int irq_flags_to_clear;
unsigned int irq_flags_to_set;
enum irq_gc_flags gc_flags;
- struct irq_chip_generic *gc[0];
+ struct irq_chip_generic *gc[];
};
/* Generic chip callback functions */
@@ -1002,13 +1130,14 @@ void irq_gc_mask_clr_bit(struct irq_data *d);
void irq_gc_unmask_enable_reg(struct irq_data *d);
void irq_gc_ack_set_bit(struct irq_data *d);
void irq_gc_ack_clr_bit(struct irq_data *d);
-void irq_gc_mask_disable_reg_and_ack(struct irq_data *d);
+void irq_gc_mask_disable_and_ack_set(struct irq_data *d);
void irq_gc_eoi(struct irq_data *d);
int irq_gc_set_wake(struct irq_data *d, unsigned int on);
/* Setup functions for irq_chip_generic */
int irq_map_generic_chip(struct irq_domain *d, unsigned int virq,
irq_hw_number_t hw_irq);
+void irq_unmap_generic_chip(struct irq_domain *d, unsigned int virq);
struct irq_chip_generic *
irq_alloc_generic_chip(const char *name, int nr_ct, unsigned int irq_base,
void __iomem *reg_base, irq_flow_handler_t handler);
@@ -1106,6 +1235,29 @@ static inline u32 irq_reg_readl(struct irq_chip_generic *gc,
return readl(gc->reg_base + reg_offset);
}
+struct irq_matrix;
+struct irq_matrix *irq_alloc_matrix(unsigned int matrix_bits,
+ unsigned int alloc_start,
+ unsigned int alloc_end);
+void irq_matrix_online(struct irq_matrix *m);
+void irq_matrix_offline(struct irq_matrix *m);
+void irq_matrix_assign_system(struct irq_matrix *m, unsigned int bit, bool replace);
+int irq_matrix_reserve_managed(struct irq_matrix *m, const struct cpumask *msk);
+void irq_matrix_remove_managed(struct irq_matrix *m, const struct cpumask *msk);
+int irq_matrix_alloc_managed(struct irq_matrix *m, const struct cpumask *msk,
+ unsigned int *mapped_cpu);
+void irq_matrix_reserve(struct irq_matrix *m);
+void irq_matrix_remove_reserved(struct irq_matrix *m);
+int irq_matrix_alloc(struct irq_matrix *m, const struct cpumask *msk,
+ bool reserved, unsigned int *mapped_cpu);
+void irq_matrix_free(struct irq_matrix *m, unsigned int cpu,
+ unsigned int bit, bool managed);
+void irq_matrix_assign(struct irq_matrix *m, unsigned int bit);
+unsigned int irq_matrix_available(struct irq_matrix *m, bool cpudown);
+unsigned int irq_matrix_allocated(struct irq_matrix *m);
+unsigned int irq_matrix_reserved(struct irq_matrix *m);
+void irq_matrix_debug_show(struct seq_file *sf, struct irq_matrix *m, int ind);
+
/* Contrary to Linux irqs, for hardware irqs the irq number 0 is valid */
#define INVALID_HWIRQ (~0UL)
irq_hw_number_t ipi_get_hwirq(unsigned int irq, unsigned int cpu);
@@ -1114,4 +1266,34 @@ int __ipi_send_mask(struct irq_desc *desc, const struct cpumask *dest);
int ipi_send_single(unsigned int virq, unsigned int cpu);
int ipi_send_mask(unsigned int virq, const struct cpumask *dest);
+void ipi_mux_process(void);
+int ipi_mux_create(unsigned int nr_ipi, void (*mux_send)(unsigned int cpu));
+
+#ifdef CONFIG_GENERIC_IRQ_MULTI_HANDLER
+/*
+ * Registers a generic IRQ handling function as the top-level IRQ handler in
+ * the system, which is generally the first C code called from an assembly
+ * architecture-specific interrupt handler.
+ *
+ * Returns 0 on success, or -EBUSY if an IRQ handler has already been
+ * registered.
+ */
+int __init set_handle_irq(void (*handle_irq)(struct pt_regs *));
+
+/*
+ * Allows interrupt handlers to find the irqchip that's been registered as the
+ * top-level IRQ handler.
+ */
+extern void (*handle_arch_irq)(struct pt_regs *) __ro_after_init;
+asmlinkage void generic_handle_arch_irq(struct pt_regs *regs);
+#else
+#ifndef set_handle_irq
+#define set_handle_irq(handle_irq) \
+ do { \
+ (void)handle_irq; \
+ WARN_ON(1); \
+ } while (0)
+#endif
+#endif
+
#endif /* _LINUX_IRQ_H */
diff --git a/include/linux/irq_cpustat.h b/include/linux/irq_cpustat.h
deleted file mode 100644
index 77e4bac29287..000000000000
--- a/include/linux/irq_cpustat.h
+++ /dev/null
@@ -1,31 +0,0 @@
-#ifndef __irq_cpustat_h
-#define __irq_cpustat_h
-
-/*
- * Contains default mappings for irq_cpustat_t, used by almost every
- * architecture. Some arch (like s390) have per cpu hardware pages and
- * they define their own mappings for irq_stat.
- *
- * Keith Owens <kaos@ocs.com.au> July 2000.
- */
-
-
-/*
- * Simple wrappers reducing source bloat. Define all irq_stat fields
- * here, even ones that are arch dependent. That way we get common
- * definitions instead of differing sets for each arch.
- */
-
-#ifndef __ARCH_IRQ_STAT
-extern irq_cpustat_t irq_stat[]; /* defined in asm/hardirq.h */
-#define __IRQ_STAT(cpu, member) (irq_stat[cpu].member)
-#endif
-
- /* arch independent irq_stat fields */
-#define local_softirq_pending() \
- __IRQ_STAT(smp_processor_id(), __softirq_pending)
-
- /* arch dependent irq_stat fields */
-#define nmi_count(cpu) __IRQ_STAT((cpu), __nmi_count) /* i386 */
-
-#endif /* __irq_cpustat_h */
diff --git a/include/linux/irq_poll.h b/include/linux/irq_poll.h
index 3e8c1b8fb9be..16aaeccb65cb 100644
--- a/include/linux/irq_poll.h
+++ b/include/linux/irq_poll.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef IRQ_POLL_H
#define IRQ_POLL_H
diff --git a/include/linux/irq_sim.h b/include/linux/irq_sim.h
new file mode 100644
index 000000000000..ab831e5ae748
--- /dev/null
+++ b/include/linux/irq_sim.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright (C) 2017-2018 Bartosz Golaszewski <brgl@bgdev.pl>
+ * Copyright (C) 2020 Bartosz Golaszewski <bgolaszewski@baylibre.com>
+ */
+
+#ifndef _LINUX_IRQ_SIM_H
+#define _LINUX_IRQ_SIM_H
+
+#include <linux/device.h>
+#include <linux/fwnode.h>
+#include <linux/irqdomain.h>
+
+/*
+ * Provides a framework for allocating simulated interrupts which can be
+ * requested like normal irqs and enqueued from process context.
+ */
+
+struct irq_domain *irq_domain_create_sim(struct fwnode_handle *fwnode,
+ unsigned int num_irqs);
+struct irq_domain *devm_irq_domain_create_sim(struct device *dev,
+ struct fwnode_handle *fwnode,
+ unsigned int num_irqs);
+void irq_domain_remove_sim(struct irq_domain *domain);
+
+#endif /* _LINUX_IRQ_SIM_H */
diff --git a/include/linux/irq_work.h b/include/linux/irq_work.h
index 47b9ebd4a74f..8cd11a223260 100644
--- a/include/linux/irq_work.h
+++ b/include/linux/irq_work.h
@@ -1,7 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_IRQ_WORK_H
#define _LINUX_IRQ_WORK_H
-#include <linux/llist.h>
+#include <linux/smp_types.h>
+#include <linux/rcuwait.h>
/*
* An entry can be in one of four states:
@@ -12,31 +14,48 @@
* busy NULL, 2 -> {free, claimed} : callback in progress, can be claimed
*/
-#define IRQ_WORK_PENDING 1UL
-#define IRQ_WORK_BUSY 2UL
-#define IRQ_WORK_FLAGS 3UL
-#define IRQ_WORK_LAZY 4UL /* Doesn't want IPI, wait for tick */
-
struct irq_work {
- unsigned long flags;
- struct llist_node llnode;
+ struct __call_single_node node;
void (*func)(struct irq_work *);
+ struct rcuwait irqwait;
};
+#define __IRQ_WORK_INIT(_func, _flags) (struct irq_work){ \
+ .node = { .u_flags = (_flags), }, \
+ .func = (_func), \
+ .irqwait = __RCUWAIT_INITIALIZER(irqwait), \
+}
+
+#define IRQ_WORK_INIT(_func) __IRQ_WORK_INIT(_func, 0)
+#define IRQ_WORK_INIT_LAZY(_func) __IRQ_WORK_INIT(_func, IRQ_WORK_LAZY)
+#define IRQ_WORK_INIT_HARD(_func) __IRQ_WORK_INIT(_func, IRQ_WORK_HARD_IRQ)
+
+#define DEFINE_IRQ_WORK(name, _f) \
+ struct irq_work name = IRQ_WORK_INIT(_f)
+
static inline
void init_irq_work(struct irq_work *work, void (*func)(struct irq_work *))
{
- work->flags = 0;
- work->func = func;
+ *work = IRQ_WORK_INIT(func);
}
-#define DEFINE_IRQ_WORK(name, _f) struct irq_work name = { .func = (_f), }
+static inline bool irq_work_is_pending(struct irq_work *work)
+{
+ return atomic_read(&work->node.a_flags) & IRQ_WORK_PENDING;
+}
-bool irq_work_queue(struct irq_work *work);
+static inline bool irq_work_is_busy(struct irq_work *work)
+{
+ return atomic_read(&work->node.a_flags) & IRQ_WORK_BUSY;
+}
-#ifdef CONFIG_SMP
+static inline bool irq_work_is_hard(struct irq_work *work)
+{
+ return atomic_read(&work->node.a_flags) & IRQ_WORK_HARD_IRQ;
+}
+
+bool irq_work_queue(struct irq_work *work);
bool irq_work_queue_on(struct irq_work *work, int cpu);
-#endif
void irq_work_tick(void);
void irq_work_sync(struct irq_work *work);
@@ -46,9 +65,11 @@ void irq_work_sync(struct irq_work *work);
void irq_work_run(void);
bool irq_work_needs_cpu(void);
+void irq_work_single(void *arg);
#else
static inline bool irq_work_needs_cpu(void) { return false; }
static inline void irq_work_run(void) { }
+static inline void irq_work_single(void *arg) { }
#endif
#endif /* _LINUX_IRQ_WORK_H */
diff --git a/include/linux/irqbypass.h b/include/linux/irqbypass.h
index f0f5d2671509..9bdb2a781841 100644
--- a/include/linux/irqbypass.h
+++ b/include/linux/irqbypass.h
@@ -1,12 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* IRQ offload/bypass manager
*
* Copyright (C) 2015 Red Hat, Inc.
* Copyright (c) 2015 Linaro Ltd.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef IRQBYPASS_H
#define IRQBYPASS_H
diff --git a/include/linux/irqchip.h b/include/linux/irqchip.h
index 89c34b200671..d5e6024cb2a8 100644
--- a/include/linux/irqchip.h
+++ b/include/linux/irqchip.h
@@ -12,25 +12,61 @@
#define _LINUX_IRQCHIP_H
#include <linux/acpi.h>
+#include <linux/module.h>
#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/platform_device.h>
+
+/* Undefined on purpose */
+extern of_irq_init_cb_t typecheck_irq_init_cb;
+
+#define typecheck_irq_init_cb(fn) \
+ (__typecheck(typecheck_irq_init_cb, &fn) ? fn : fn)
/*
* This macro must be used by the different irqchip drivers to declare
* the association between their DT compatible string and their
* initialization function.
*
- * @name: name that must be unique accross all IRQCHIP_DECLARE of the
+ * @name: name that must be unique across all IRQCHIP_DECLARE of the
* same file.
- * @compstr: compatible string of the irqchip driver
+ * @compat: compatible string of the irqchip driver
* @fn: initialization function
*/
-#define IRQCHIP_DECLARE(name, compat, fn) OF_DECLARE_2(irqchip, name, compat, fn)
+#define IRQCHIP_DECLARE(name, compat, fn) \
+ OF_DECLARE_2(irqchip, name, compat, typecheck_irq_init_cb(fn))
+
+extern int platform_irqchip_probe(struct platform_device *pdev);
+
+#define IRQCHIP_PLATFORM_DRIVER_BEGIN(drv_name) \
+static const struct of_device_id drv_name##_irqchip_match_table[] = {
+
+#define IRQCHIP_MATCH(compat, fn) { .compatible = compat, \
+ .data = typecheck_irq_init_cb(fn), },
+
+
+#define IRQCHIP_PLATFORM_DRIVER_END(drv_name, ...) \
+ {}, \
+}; \
+MODULE_DEVICE_TABLE(of, drv_name##_irqchip_match_table); \
+static struct platform_driver drv_name##_driver = { \
+ .probe = IS_ENABLED(CONFIG_IRQCHIP) ? \
+ platform_irqchip_probe : NULL, \
+ .driver = { \
+ .name = #drv_name, \
+ .owner = THIS_MODULE, \
+ .of_match_table = drv_name##_irqchip_match_table, \
+ .suppress_bind_attrs = true, \
+ __VA_ARGS__ \
+ }, \
+}; \
+builtin_platform_driver(drv_name##_driver)
/*
* This macro must be used by the different irqchip drivers to declare
* the association between their version and their initialization function.
*
- * @name: name that must be unique accross all IRQCHIP_ACPI_DECLARE of the
+ * @name: name that must be unique across all IRQCHIP_ACPI_DECLARE of the
* same file.
* @subtable: Subtable to be identified in MADT
* @validate: Function to be called on that subtable to check its validity.
@@ -39,8 +75,9 @@
* @fn: initialization function
*/
#define IRQCHIP_ACPI_DECLARE(name, subtable, validate, data, fn) \
- ACPI_DECLARE_PROBE_ENTRY(irqchip, name, ACPI_SIG_MADT, \
- subtable, validate, data, fn)
+ ACPI_DECLARE_SUBTABLE_PROBE_ENTRY(irqchip, name, \
+ ACPI_SIG_MADT, subtable, \
+ validate, data, fn)
#ifdef CONFIG_IRQCHIP
void irqchip_init(void);
diff --git a/include/linux/irqchip/arm-gic-common.h b/include/linux/irqchip/arm-gic-common.h
index c647b0547bcd..1177f3a1aed5 100644
--- a/include/linux/irqchip/arm-gic-common.h
+++ b/include/linux/irqchip/arm-gic-common.h
@@ -1,34 +1,23 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* include/linux/irqchip/arm-gic-common.h
*
* Copyright (C) 2016 ARM Limited, All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef __LINUX_IRQCHIP_ARM_GIC_COMMON_H
#define __LINUX_IRQCHIP_ARM_GIC_COMMON_H
-#include <linux/types.h>
-#include <linux/ioport.h>
-
-enum gic_type {
- GIC_V2,
- GIC_V3,
-};
+#include <linux/irqchip/arm-vgic-info.h>
-struct gic_kvm_info {
- /* GIC type */
- enum gic_type type;
- /* Virtual CPU interface */
- struct resource vcpu;
- /* Interrupt number */
- unsigned int maint_irq;
- /* Virtual control interface */
- struct resource vctrl;
-};
+#define GICD_INT_DEF_PRI 0xa0
+#define GICD_INT_DEF_PRI_X4 ((GICD_INT_DEF_PRI << 24) |\
+ (GICD_INT_DEF_PRI << 16) |\
+ (GICD_INT_DEF_PRI << 8) |\
+ GICD_INT_DEF_PRI)
-const struct gic_kvm_info *gic_get_kvm_info(void);
+struct irq_domain;
+struct fwnode_handle;
+int gicv2m_init(struct fwnode_handle *parent_handle,
+ struct irq_domain *parent);
#endif /* __LINUX_IRQCHIP_ARM_GIC_COMMON_H */
diff --git a/include/linux/irqchip/arm-gic-v3.h b/include/linux/irqchip/arm-gic-v3.h
index 6a1f87ff94e2..728691365464 100644
--- a/include/linux/irqchip/arm-gic-v3.h
+++ b/include/linux/irqchip/arm-gic-v3.h
@@ -1,19 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2013, 2014 ARM Limited, All Rights Reserved.
* Author: Marc Zyngier <marc.zyngier@arm.com>
- *
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef __LINUX_IRQCHIP_ARM_GIC_V3_H
#define __LINUX_IRQCHIP_ARM_GIC_V3_H
@@ -25,12 +13,12 @@
#define GICD_CTLR 0x0000
#define GICD_TYPER 0x0004
#define GICD_IIDR 0x0008
+#define GICD_TYPER2 0x000C
#define GICD_STATUSR 0x0010
#define GICD_SETSPI_NSR 0x0040
#define GICD_CLRSPI_NSR 0x0048
#define GICD_SETSPI_SR 0x0050
#define GICD_CLRSPI_SR 0x0058
-#define GICD_SEIR 0x0068
#define GICD_IGROUPR 0x0080
#define GICD_ISENABLER 0x0100
#define GICD_ICENABLER 0x0180
@@ -42,10 +30,22 @@
#define GICD_ICFGR 0x0C00
#define GICD_IGRPMODR 0x0D00
#define GICD_NSACR 0x0E00
+#define GICD_IGROUPRnE 0x1000
+#define GICD_ISENABLERnE 0x1200
+#define GICD_ICENABLERnE 0x1400
+#define GICD_ISPENDRnE 0x1600
+#define GICD_ICPENDRnE 0x1800
+#define GICD_ISACTIVERnE 0x1A00
+#define GICD_ICACTIVERnE 0x1C00
+#define GICD_IPRIORITYRnE 0x2000
+#define GICD_ICFGRnE 0x3000
#define GICD_IROUTER 0x6000
+#define GICD_IROUTERnE 0x8000
#define GICD_IDREGS 0xFFD0
#define GICD_PIDR2 0xFFE8
+#define ESPI_BASE_INTID 4096
+
/*
* Those registers are actually from GICv2, but the spec demands that they
* are implemented as RES0 if ARE is 1 (which we do in KVM's emulated GICv3).
@@ -56,11 +56,22 @@
#define GICD_SPENDSGIR 0x0F20
#define GICD_CTLR_RWP (1U << 31)
+#define GICD_CTLR_nASSGIreq (1U << 8)
#define GICD_CTLR_DS (1U << 6)
#define GICD_CTLR_ARE_NS (1U << 4)
#define GICD_CTLR_ENABLE_G1A (1U << 1)
#define GICD_CTLR_ENABLE_G1 (1U << 0)
+#define GICD_IIDR_IMPLEMENTER_SHIFT 0
+#define GICD_IIDR_IMPLEMENTER_MASK (0xfff << GICD_IIDR_IMPLEMENTER_SHIFT)
+#define GICD_IIDR_REVISION_SHIFT 12
+#define GICD_IIDR_REVISION_MASK (0xf << GICD_IIDR_REVISION_SHIFT)
+#define GICD_IIDR_VARIANT_SHIFT 16
+#define GICD_IIDR_VARIANT_MASK (0xf << GICD_IIDR_VARIANT_SHIFT)
+#define GICD_IIDR_PRODUCT_ID_SHIFT 24
+#define GICD_IIDR_PRODUCT_ID_MASK (0xff << GICD_IIDR_PRODUCT_ID_SHIFT)
+
+
/*
* In systems with a single security state (what we emulate in KVM)
* the meaning of the interrupt group enable bits is slightly different
@@ -68,11 +79,20 @@
#define GICD_CTLR_ENABLE_SS_G1 (1U << 1)
#define GICD_CTLR_ENABLE_SS_G0 (1U << 0)
+#define GICD_TYPER_RSS (1U << 26)
#define GICD_TYPER_LPIS (1U << 17)
#define GICD_TYPER_MBIS (1U << 16)
+#define GICD_TYPER_ESPI (1U << 8)
#define GICD_TYPER_ID_BITS(typer) ((((typer) >> 19) & 0x1f) + 1)
-#define GICD_TYPER_IRQS(typer) ((((typer) & 0x1f) + 1) * 32)
+#define GICD_TYPER_NUM_LPIS(typer) ((((typer) >> 11) & 0x1f) + 1)
+#define GICD_TYPER_SPIS(typer) ((((typer) & 0x1f) + 1) * 32)
+#define GICD_TYPER_ESPIS(typer) \
+ (((typer) & GICD_TYPER_ESPI) ? GICD_TYPER_SPIS((typer) >> 27) : 0)
+
+#define GICD_TYPER2_nASSGIcap (1U << 8)
+#define GICD_TYPER2_VIL (1U << 7)
+#define GICD_TYPER2_VID GENMASK(4, 0)
#define GICD_IROUTER_SPI_MODE_ONE (0U << 31)
#define GICD_IROUTER_SPI_MODE_ANY (1U << 31)
@@ -83,6 +103,11 @@
#define GIC_V3_DIST_SIZE 0x10000
+#define GIC_PAGE_SIZE_4K 0ULL
+#define GIC_PAGE_SIZE_16K 1ULL
+#define GIC_PAGE_SIZE_64K 2ULL
+#define GIC_PAGE_SIZE_MASK 3ULL
+
/*
* Re-Distributor registers, offsets from RD_base
*/
@@ -93,21 +118,33 @@
#define GICR_WAKER 0x0014
#define GICR_SETLPIR 0x0040
#define GICR_CLRLPIR 0x0048
-#define GICR_SEIR GICD_SEIR
#define GICR_PROPBASER 0x0070
#define GICR_PENDBASER 0x0078
#define GICR_INVLPIR 0x00A0
#define GICR_INVALLR 0x00B0
#define GICR_SYNCR 0x00C0
-#define GICR_MOVLPIR 0x0100
-#define GICR_MOVALLR 0x0110
#define GICR_IDREGS GICD_IDREGS
#define GICR_PIDR2 GICD_PIDR2
#define GICR_CTLR_ENABLE_LPIS (1UL << 0)
+#define GICR_CTLR_CES (1UL << 1)
+#define GICR_CTLR_IR (1UL << 2)
+#define GICR_CTLR_RWP (1UL << 3)
#define GICR_TYPER_CPU_NUMBER(r) (((r) >> 8) & 0xffff)
+#define EPPI_BASE_INTID 1056
+
+#define GICR_TYPER_NR_PPIS(r) \
+ ({ \
+ unsigned int __ppinum = ((r) >> 27) & 0x1f; \
+ unsigned int __nr_ppis = 16; \
+ if (__ppinum == 1 || __ppinum == 2) \
+ __nr_ppis += __ppinum * 32; \
+ \
+ __nr_ppis; \
+ })
+
#define GICR_WAKER_ProcessorSleep (1U << 1)
#define GICR_WAKER_ChildrenAsleep (1U << 2)
@@ -152,7 +189,7 @@
#define GICR_PROPBASER_nCnB GIC_BASER_CACHEABILITY(GICR_PROPBASER, INNER, nCnB)
#define GICR_PROPBASER_nC GIC_BASER_CACHEABILITY(GICR_PROPBASER, INNER, nC)
#define GICR_PROPBASER_RaWt GIC_BASER_CACHEABILITY(GICR_PROPBASER, INNER, RaWt)
-#define GICR_PROPBASER_RaWb GIC_BASER_CACHEABILITY(GICR_PROPBASER, INNER, RaWt)
+#define GICR_PROPBASER_RaWb GIC_BASER_CACHEABILITY(GICR_PROPBASER, INNER, RaWb)
#define GICR_PROPBASER_WaWt GIC_BASER_CACHEABILITY(GICR_PROPBASER, INNER, WaWt)
#define GICR_PROPBASER_WaWb GIC_BASER_CACHEABILITY(GICR_PROPBASER, INNER, WaWb)
#define GICR_PROPBASER_RaWaWt GIC_BASER_CACHEABILITY(GICR_PROPBASER, INNER, RaWaWt)
@@ -179,7 +216,7 @@
#define GICR_PENDBASER_nCnB GIC_BASER_CACHEABILITY(GICR_PENDBASER, INNER, nCnB)
#define GICR_PENDBASER_nC GIC_BASER_CACHEABILITY(GICR_PENDBASER, INNER, nC)
#define GICR_PENDBASER_RaWt GIC_BASER_CACHEABILITY(GICR_PENDBASER, INNER, RaWt)
-#define GICR_PENDBASER_RaWb GIC_BASER_CACHEABILITY(GICR_PENDBASER, INNER, RaWt)
+#define GICR_PENDBASER_RaWb GIC_BASER_CACHEABILITY(GICR_PENDBASER, INNER, RaWb)
#define GICR_PENDBASER_WaWt GIC_BASER_CACHEABILITY(GICR_PENDBASER, INNER, WaWt)
#define GICR_PENDBASER_WaWb GIC_BASER_CACHEABILITY(GICR_PENDBASER, INNER, WaWb)
#define GICR_PENDBASER_RaWaWt GIC_BASER_CACHEABILITY(GICR_PENDBASER, INNER, RaWaWt)
@@ -204,7 +241,19 @@
#define GICR_TYPER_PLPIS (1U << 0)
#define GICR_TYPER_VLPIS (1U << 1)
+#define GICR_TYPER_DIRTY (1U << 2)
+#define GICR_TYPER_DirectLPIS (1U << 3)
#define GICR_TYPER_LAST (1U << 4)
+#define GICR_TYPER_RVPEID (1U << 7)
+#define GICR_TYPER_COMMON_LPI_AFF GENMASK_ULL(25, 24)
+#define GICR_TYPER_AFFINITY GENMASK_ULL(63, 32)
+
+#define GICR_INVLPIR_INTID GENMASK_ULL(31, 0)
+#define GICR_INVLPIR_VPEID GENMASK_ULL(47, 32)
+#define GICR_INVLPIR_V GENMASK_ULL(63, 63)
+
+#define GICR_INVALLR_VPEID GICR_INVLPIR_VPEID
+#define GICR_INVALLR_V GICR_INVLPIR_V
#define GIC_V3_REDIST_SIZE 0x20000
@@ -212,11 +261,108 @@
#define LPI_PROP_ENABLED (1 << 0)
/*
+ * Re-Distributor registers, offsets from VLPI_base
+ */
+#define GICR_VPROPBASER 0x0070
+
+#define GICR_VPROPBASER_IDBITS_MASK 0x1f
+
+#define GICR_VPROPBASER_SHAREABILITY_SHIFT (10)
+#define GICR_VPROPBASER_INNER_CACHEABILITY_SHIFT (7)
+#define GICR_VPROPBASER_OUTER_CACHEABILITY_SHIFT (56)
+
+#define GICR_VPROPBASER_SHAREABILITY_MASK \
+ GIC_BASER_SHAREABILITY(GICR_VPROPBASER, SHAREABILITY_MASK)
+#define GICR_VPROPBASER_INNER_CACHEABILITY_MASK \
+ GIC_BASER_CACHEABILITY(GICR_VPROPBASER, INNER, MASK)
+#define GICR_VPROPBASER_OUTER_CACHEABILITY_MASK \
+ GIC_BASER_CACHEABILITY(GICR_VPROPBASER, OUTER, MASK)
+#define GICR_VPROPBASER_CACHEABILITY_MASK \
+ GICR_VPROPBASER_INNER_CACHEABILITY_MASK
+
+#define GICR_VPROPBASER_InnerShareable \
+ GIC_BASER_SHAREABILITY(GICR_VPROPBASER, InnerShareable)
+
+#define GICR_VPROPBASER_nCnB GIC_BASER_CACHEABILITY(GICR_VPROPBASER, INNER, nCnB)
+#define GICR_VPROPBASER_nC GIC_BASER_CACHEABILITY(GICR_VPROPBASER, INNER, nC)
+#define GICR_VPROPBASER_RaWt GIC_BASER_CACHEABILITY(GICR_VPROPBASER, INNER, RaWt)
+#define GICR_VPROPBASER_RaWb GIC_BASER_CACHEABILITY(GICR_VPROPBASER, INNER, RaWb)
+#define GICR_VPROPBASER_WaWt GIC_BASER_CACHEABILITY(GICR_VPROPBASER, INNER, WaWt)
+#define GICR_VPROPBASER_WaWb GIC_BASER_CACHEABILITY(GICR_VPROPBASER, INNER, WaWb)
+#define GICR_VPROPBASER_RaWaWt GIC_BASER_CACHEABILITY(GICR_VPROPBASER, INNER, RaWaWt)
+#define GICR_VPROPBASER_RaWaWb GIC_BASER_CACHEABILITY(GICR_VPROPBASER, INNER, RaWaWb)
+
+/*
+ * GICv4.1 VPROPBASER reinvention. A subtle mix between the old
+ * VPROPBASER and ITS_BASER. Just not quite any of the two.
+ */
+#define GICR_VPROPBASER_4_1_VALID (1ULL << 63)
+#define GICR_VPROPBASER_4_1_ENTRY_SIZE GENMASK_ULL(61, 59)
+#define GICR_VPROPBASER_4_1_INDIRECT (1ULL << 55)
+#define GICR_VPROPBASER_4_1_PAGE_SIZE GENMASK_ULL(54, 53)
+#define GICR_VPROPBASER_4_1_Z (1ULL << 52)
+#define GICR_VPROPBASER_4_1_ADDR GENMASK_ULL(51, 12)
+#define GICR_VPROPBASER_4_1_SIZE GENMASK_ULL(6, 0)
+
+#define GICR_VPENDBASER 0x0078
+
+#define GICR_VPENDBASER_SHAREABILITY_SHIFT (10)
+#define GICR_VPENDBASER_INNER_CACHEABILITY_SHIFT (7)
+#define GICR_VPENDBASER_OUTER_CACHEABILITY_SHIFT (56)
+#define GICR_VPENDBASER_SHAREABILITY_MASK \
+ GIC_BASER_SHAREABILITY(GICR_VPENDBASER, SHAREABILITY_MASK)
+#define GICR_VPENDBASER_INNER_CACHEABILITY_MASK \
+ GIC_BASER_CACHEABILITY(GICR_VPENDBASER, INNER, MASK)
+#define GICR_VPENDBASER_OUTER_CACHEABILITY_MASK \
+ GIC_BASER_CACHEABILITY(GICR_VPENDBASER, OUTER, MASK)
+#define GICR_VPENDBASER_CACHEABILITY_MASK \
+ GICR_VPENDBASER_INNER_CACHEABILITY_MASK
+
+#define GICR_VPENDBASER_NonShareable \
+ GIC_BASER_SHAREABILITY(GICR_VPENDBASER, NonShareable)
+
+#define GICR_VPENDBASER_InnerShareable \
+ GIC_BASER_SHAREABILITY(GICR_VPENDBASER, InnerShareable)
+
+#define GICR_VPENDBASER_nCnB GIC_BASER_CACHEABILITY(GICR_VPENDBASER, INNER, nCnB)
+#define GICR_VPENDBASER_nC GIC_BASER_CACHEABILITY(GICR_VPENDBASER, INNER, nC)
+#define GICR_VPENDBASER_RaWt GIC_BASER_CACHEABILITY(GICR_VPENDBASER, INNER, RaWt)
+#define GICR_VPENDBASER_RaWb GIC_BASER_CACHEABILITY(GICR_VPENDBASER, INNER, RaWb)
+#define GICR_VPENDBASER_WaWt GIC_BASER_CACHEABILITY(GICR_VPENDBASER, INNER, WaWt)
+#define GICR_VPENDBASER_WaWb GIC_BASER_CACHEABILITY(GICR_VPENDBASER, INNER, WaWb)
+#define GICR_VPENDBASER_RaWaWt GIC_BASER_CACHEABILITY(GICR_VPENDBASER, INNER, RaWaWt)
+#define GICR_VPENDBASER_RaWaWb GIC_BASER_CACHEABILITY(GICR_VPENDBASER, INNER, RaWaWb)
+
+#define GICR_VPENDBASER_Dirty (1ULL << 60)
+#define GICR_VPENDBASER_PendingLast (1ULL << 61)
+#define GICR_VPENDBASER_IDAI (1ULL << 62)
+#define GICR_VPENDBASER_Valid (1ULL << 63)
+
+/*
+ * GICv4.1 VPENDBASER, used for VPE residency. On top of these fields,
+ * also use the above Valid, PendingLast and Dirty.
+ */
+#define GICR_VPENDBASER_4_1_DB (1ULL << 62)
+#define GICR_VPENDBASER_4_1_VGRP0EN (1ULL << 59)
+#define GICR_VPENDBASER_4_1_VGRP1EN (1ULL << 58)
+#define GICR_VPENDBASER_4_1_VPEID GENMASK_ULL(15, 0)
+
+#define GICR_VSGIR 0x0080
+
+#define GICR_VSGIR_VPEID GENMASK(15, 0)
+
+#define GICR_VSGIPENDR 0x0088
+
+#define GICR_VSGIPENDR_BUSY (1U << 31)
+#define GICR_VSGIPENDR_PENDING GENMASK(15, 0)
+
+/*
* ITS registers, offsets from ITS_base
*/
#define GITS_CTLR 0x0000
#define GITS_IIDR 0x0004
#define GITS_TYPER 0x0008
+#define GITS_MPIDR 0x0018
#define GITS_CBASER 0x0080
#define GITS_CWRITER 0x0088
#define GITS_CREADR 0x0090
@@ -233,16 +379,30 @@
#define GITS_TRANSLATER 0x10040
+#define GITS_SGIR 0x20020
+
+#define GITS_SGIR_VPEID GENMASK_ULL(47, 32)
+#define GITS_SGIR_VINTID GENMASK_ULL(3, 0)
+
#define GITS_CTLR_ENABLE (1U << 0)
+#define GITS_CTLR_ImDe (1U << 1)
+#define GITS_CTLR_ITS_NUMBER_SHIFT 4
+#define GITS_CTLR_ITS_NUMBER (0xFU << GITS_CTLR_ITS_NUMBER_SHIFT)
#define GITS_CTLR_QUIESCENT (1U << 31)
#define GITS_TYPER_PLPIS (1UL << 0)
+#define GITS_TYPER_VLPIS (1UL << 1)
#define GITS_TYPER_ITT_ENTRY_SIZE_SHIFT 4
+#define GITS_TYPER_ITT_ENTRY_SIZE GENMASK_ULL(7, 4)
#define GITS_TYPER_IDBITS_SHIFT 8
#define GITS_TYPER_DEVBITS_SHIFT 13
-#define GITS_TYPER_DEVBITS(r) ((((r) >> GITS_TYPER_DEVBITS_SHIFT) & 0x1f) + 1)
+#define GITS_TYPER_DEVBITS GENMASK_ULL(17, 13)
#define GITS_TYPER_PTA (1UL << 19)
-#define GITS_TYPER_HWCOLLCNT_SHIFT 24
+#define GITS_TYPER_HCC_SHIFT 24
+#define GITS_TYPER_HCC(r) (((r) >> GITS_TYPER_HCC_SHIFT) & 0xff)
+#define GITS_TYPER_VMOVP (1ULL << 37)
+#define GITS_TYPER_VMAPP (1ULL << 40)
+#define GITS_TYPER_SVPET GENMASK_ULL(42, 41)
#define GITS_IIDR_REV_SHIFT 12
#define GITS_IIDR_REV_MASK (0xf << GITS_IIDR_REV_SHIFT)
@@ -267,12 +427,14 @@
#define GITS_CBASER_nCnB GIC_BASER_CACHEABILITY(GITS_CBASER, INNER, nCnB)
#define GITS_CBASER_nC GIC_BASER_CACHEABILITY(GITS_CBASER, INNER, nC)
#define GITS_CBASER_RaWt GIC_BASER_CACHEABILITY(GITS_CBASER, INNER, RaWt)
-#define GITS_CBASER_RaWb GIC_BASER_CACHEABILITY(GITS_CBASER, INNER, RaWt)
+#define GITS_CBASER_RaWb GIC_BASER_CACHEABILITY(GITS_CBASER, INNER, RaWb)
#define GITS_CBASER_WaWt GIC_BASER_CACHEABILITY(GITS_CBASER, INNER, WaWt)
#define GITS_CBASER_WaWb GIC_BASER_CACHEABILITY(GITS_CBASER, INNER, WaWb)
#define GITS_CBASER_RaWaWt GIC_BASER_CACHEABILITY(GITS_CBASER, INNER, RaWaWt)
#define GITS_CBASER_RaWaWb GIC_BASER_CACHEABILITY(GITS_CBASER, INNER, RaWaWb)
+#define GITS_CBASER_ADDRESS(cbaser) ((cbaser) & GENMASK_ULL(51, 12))
+
#define GITS_BASER_NR_REGS 8
#define GITS_BASER_VALID (1ULL << 63)
@@ -291,7 +453,7 @@
#define GITS_BASER_nCnB GIC_BASER_CACHEABILITY(GITS_BASER, INNER, nCnB)
#define GITS_BASER_nC GIC_BASER_CACHEABILITY(GITS_BASER, INNER, nC)
#define GITS_BASER_RaWt GIC_BASER_CACHEABILITY(GITS_BASER, INNER, RaWt)
-#define GITS_BASER_RaWb GIC_BASER_CACHEABILITY(GITS_BASER, INNER, RaWt)
+#define GITS_BASER_RaWb GIC_BASER_CACHEABILITY(GITS_BASER, INNER, RaWb)
#define GITS_BASER_WaWt GIC_BASER_CACHEABILITY(GITS_BASER, INNER, WaWt)
#define GITS_BASER_WaWb GIC_BASER_CACHEABILITY(GITS_BASER, INNER, WaWb)
#define GITS_BASER_RaWaWt GIC_BASER_CACHEABILITY(GITS_BASER, INNER, RaWaWt)
@@ -302,14 +464,20 @@
#define GITS_BASER_ENTRY_SIZE_SHIFT (48)
#define GITS_BASER_ENTRY_SIZE(r) ((((r) >> GITS_BASER_ENTRY_SIZE_SHIFT) & 0x1f) + 1)
#define GITS_BASER_ENTRY_SIZE_MASK GENMASK_ULL(52, 48)
+#define GITS_BASER_PHYS_52_to_48(phys) \
+ (((phys) & GENMASK_ULL(47, 16)) | (((phys) >> 48) & 0xf) << 12)
+#define GITS_BASER_ADDR_48_to_52(baser) \
+ (((baser) & GENMASK_ULL(47, 16)) | (((baser) >> 12) & 0xf) << 48)
+
#define GITS_BASER_SHAREABILITY_SHIFT (10)
#define GITS_BASER_InnerShareable \
GIC_BASER_SHAREABILITY(GITS_BASER, InnerShareable)
#define GITS_BASER_PAGE_SIZE_SHIFT (8)
-#define GITS_BASER_PAGE_SIZE_4K (0ULL << GITS_BASER_PAGE_SIZE_SHIFT)
-#define GITS_BASER_PAGE_SIZE_16K (1ULL << GITS_BASER_PAGE_SIZE_SHIFT)
-#define GITS_BASER_PAGE_SIZE_64K (2ULL << GITS_BASER_PAGE_SIZE_SHIFT)
-#define GITS_BASER_PAGE_SIZE_MASK (3ULL << GITS_BASER_PAGE_SIZE_SHIFT)
+#define __GITS_BASER_PSZ(sz) (GIC_PAGE_SIZE_ ## sz << GITS_BASER_PAGE_SIZE_SHIFT)
+#define GITS_BASER_PAGE_SIZE_4K __GITS_BASER_PSZ(4K)
+#define GITS_BASER_PAGE_SIZE_16K __GITS_BASER_PSZ(16K)
+#define GITS_BASER_PAGE_SIZE_64K __GITS_BASER_PSZ(64K)
+#define GITS_BASER_PAGE_SIZE_MASK __GITS_BASER_PSZ(MASK)
#define GITS_BASER_PAGES_MAX 256
#define GITS_BASER_PAGES_SHIFT (0)
#define GITS_BASER_NR_PAGES(r) (((r) & 0xff) + 1)
@@ -342,6 +510,20 @@
#define GITS_CMD_SYNC 0x05
/*
+ * GICv4 ITS specific commands
+ */
+#define GITS_CMD_GICv4(x) ((x) | 0x20)
+#define GITS_CMD_VINVALL GITS_CMD_GICv4(GITS_CMD_INVALL)
+#define GITS_CMD_VMAPP GITS_CMD_GICv4(GITS_CMD_MAPC)
+#define GITS_CMD_VMAPTI GITS_CMD_GICv4(GITS_CMD_MAPTI)
+#define GITS_CMD_VMOVI GITS_CMD_GICv4(GITS_CMD_MOVI)
+#define GITS_CMD_VSYNC GITS_CMD_GICv4(GITS_CMD_SYNC)
+/* VMOVP, VSGI and INVDB are the odd ones, as they dont have a physical counterpart */
+#define GITS_CMD_VMOVP GITS_CMD_GICv4(2)
+#define GITS_CMD_VSGI GITS_CMD_GICv4(3)
+#define GITS_CMD_INVDB GITS_CMD_GICv4(0xe)
+
+/*
* ITS error numbers
*/
#define E_ITS_MOVI_UNMAPPED_INTERRUPT 0x010107
@@ -369,6 +551,8 @@
#define ICC_CTLR_EL1_EOImode_MASK (1 << ICC_CTLR_EL1_EOImode_SHIFT)
#define ICC_CTLR_EL1_CBPR_SHIFT 0
#define ICC_CTLR_EL1_CBPR_MASK (1 << ICC_CTLR_EL1_CBPR_SHIFT)
+#define ICC_CTLR_EL1_PMHE_SHIFT 6
+#define ICC_CTLR_EL1_PMHE_MASK (1 << ICC_CTLR_EL1_PMHE_SHIFT)
#define ICC_CTLR_EL1_PRI_BITS_SHIFT 8
#define ICC_CTLR_EL1_PRI_BITS_MASK (0x7 << ICC_CTLR_EL1_PRI_BITS_SHIFT)
#define ICC_CTLR_EL1_ID_BITS_SHIFT 11
@@ -377,6 +561,8 @@
#define ICC_CTLR_EL1_SEIS_MASK (0x1 << ICC_CTLR_EL1_SEIS_SHIFT)
#define ICC_CTLR_EL1_A3V_SHIFT 15
#define ICC_CTLR_EL1_A3V_MASK (0x1 << ICC_CTLR_EL1_A3V_SHIFT)
+#define ICC_CTLR_EL1_RSS (0x1 << 18)
+#define ICC_CTLR_EL1_ExtRange (0x1 << 19)
#define ICC_PMR_EL1_SHIFT 0
#define ICC_PMR_EL1_MASK (0xff << ICC_PMR_EL1_SHIFT)
#define ICC_BPR0_EL1_SHIFT 0
@@ -391,66 +577,11 @@
#define ICC_SRE_EL1_DFB (1U << 1)
#define ICC_SRE_EL1_SRE (1U << 0)
-/*
- * Hypervisor interface registers (SRE only)
- */
-#define ICH_LR_VIRTUAL_ID_MASK ((1ULL << 32) - 1)
-
-#define ICH_LR_EOI (1ULL << 41)
-#define ICH_LR_GROUP (1ULL << 60)
-#define ICH_LR_HW (1ULL << 61)
-#define ICH_LR_STATE (3ULL << 62)
-#define ICH_LR_PENDING_BIT (1ULL << 62)
-#define ICH_LR_ACTIVE_BIT (1ULL << 63)
-#define ICH_LR_PHYS_ID_SHIFT 32
-#define ICH_LR_PHYS_ID_MASK (0x3ffULL << ICH_LR_PHYS_ID_SHIFT)
-#define ICH_LR_PRIORITY_SHIFT 48
-#define ICH_LR_PRIORITY_MASK (0xffULL << ICH_LR_PRIORITY_SHIFT)
-
/* These are for GICv2 emulation only */
#define GICH_LR_VIRTUALID (0x3ffUL << 0)
#define GICH_LR_PHYSID_CPUID_SHIFT (10)
#define GICH_LR_PHYSID_CPUID (7UL << GICH_LR_PHYSID_CPUID_SHIFT)
-#define ICH_MISR_EOI (1 << 0)
-#define ICH_MISR_U (1 << 1)
-
-#define ICH_HCR_EN (1 << 0)
-#define ICH_HCR_UIE (1 << 1)
-#define ICH_HCR_TC (1 << 10)
-#define ICH_HCR_TALL0 (1 << 11)
-#define ICH_HCR_TALL1 (1 << 12)
-#define ICH_HCR_EOIcount_SHIFT 27
-#define ICH_HCR_EOIcount_MASK (0x1f << ICH_HCR_EOIcount_SHIFT)
-
-#define ICH_VMCR_ACK_CTL_SHIFT 2
-#define ICH_VMCR_ACK_CTL_MASK (1 << ICH_VMCR_ACK_CTL_SHIFT)
-#define ICH_VMCR_FIQ_EN_SHIFT 3
-#define ICH_VMCR_FIQ_EN_MASK (1 << ICH_VMCR_FIQ_EN_SHIFT)
-#define ICH_VMCR_CBPR_SHIFT 4
-#define ICH_VMCR_CBPR_MASK (1 << ICH_VMCR_CBPR_SHIFT)
-#define ICH_VMCR_EOIM_SHIFT 9
-#define ICH_VMCR_EOIM_MASK (1 << ICH_VMCR_EOIM_SHIFT)
-#define ICH_VMCR_BPR1_SHIFT 18
-#define ICH_VMCR_BPR1_MASK (7 << ICH_VMCR_BPR1_SHIFT)
-#define ICH_VMCR_BPR0_SHIFT 21
-#define ICH_VMCR_BPR0_MASK (7 << ICH_VMCR_BPR0_SHIFT)
-#define ICH_VMCR_PMR_SHIFT 24
-#define ICH_VMCR_PMR_MASK (0xffUL << ICH_VMCR_PMR_SHIFT)
-#define ICH_VMCR_ENG0_SHIFT 0
-#define ICH_VMCR_ENG0_MASK (1 << ICH_VMCR_ENG0_SHIFT)
-#define ICH_VMCR_ENG1_SHIFT 1
-#define ICH_VMCR_ENG1_MASK (1 << ICH_VMCR_ENG1_SHIFT)
-
-#define ICH_VTR_PRI_BITS_SHIFT 29
-#define ICH_VTR_PRI_BITS_MASK (7 << ICH_VTR_PRI_BITS_SHIFT)
-#define ICH_VTR_ID_BITS_SHIFT 23
-#define ICH_VTR_ID_BITS_MASK (7 << ICH_VTR_ID_BITS_SHIFT)
-#define ICH_VTR_SEIS_SHIFT 22
-#define ICH_VTR_SEIS_MASK (1 << ICH_VTR_SEIS_SHIFT)
-#define ICH_VTR_A3V_SHIFT 21
-#define ICH_VTR_A3V_MASK (1 << ICH_VTR_A3V_SHIFT)
-
#define ICC_IAR1_EL1_SPURIOUS 0x3ff
#define ICC_SRE_EL2_SRE (1 << 0)
@@ -465,6 +596,8 @@
#define ICC_SGI1R_AFFINITY_2_SHIFT 32
#define ICC_SGI1R_AFFINITY_2_MASK (0xffULL << ICC_SGI1R_AFFINITY_2_SHIFT)
#define ICC_SGI1R_IRQ_ROUTING_MODE_BIT 40
+#define ICC_SGI1R_RS_SHIFT 44
+#define ICC_SGI1R_RS_MASK (0xfULL << ICC_SGI1R_RS_SHIFT)
#define ICC_SGI1R_AFFINITY_3_SHIFT 48
#define ICC_SGI1R_AFFINITY_3_MASK (0xffULL << ICC_SGI1R_AFFINITY_3_SHIFT)
@@ -480,20 +613,33 @@
struct rdists {
struct {
+ raw_spinlock_t rd_lock;
void __iomem *rd_base;
struct page *pend_page;
phys_addr_t phys_base;
+ u64 flags;
+ cpumask_t *vpe_table_mask;
+ void *vpe_l1_base;
} __percpu *rdist;
- struct page *prop_page;
- int id_bits;
+ phys_addr_t prop_table_pa;
+ void *prop_table_va;
u64 flags;
+ u32 gicd_typer;
+ u32 gicd_typer2;
+ int cpuhp_memreserve_state;
+ bool has_vlpis;
+ bool has_rvpeid;
+ bool has_direct_lpi;
+ bool has_vpend_valid_dirty;
};
struct irq_domain;
struct fwnode_handle;
+int __init its_lpi_memreserve_init(void);
int its_cpu_init(void);
int its_init(struct fwnode_handle *handle, struct rdists *rdists,
struct irq_domain *domain);
+int mbi_init(struct fwnode_handle *fwnode, struct irq_domain *parent);
static inline bool gic_enable_sre(void)
{
diff --git a/include/linux/irqchip/arm-gic-v4.h b/include/linux/irqchip/arm-gic-v4.h
new file mode 100644
index 000000000000..2c63375bbd43
--- /dev/null
+++ b/include/linux/irqchip/arm-gic-v4.h
@@ -0,0 +1,150 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2016,2017 ARM Limited, All Rights Reserved.
+ * Author: Marc Zyngier <marc.zyngier@arm.com>
+ */
+
+#ifndef __LINUX_IRQCHIP_ARM_GIC_V4_H
+#define __LINUX_IRQCHIP_ARM_GIC_V4_H
+
+struct its_vpe;
+
+/*
+ * Maximum number of ITTs when GITS_TYPER.VMOVP == 0, using the
+ * ITSList mechanism to perform inter-ITS synchronization.
+ */
+#define GICv4_ITS_LIST_MAX 16
+
+/* Embedded in kvm.arch */
+struct its_vm {
+ struct fwnode_handle *fwnode;
+ struct irq_domain *domain;
+ struct page *vprop_page;
+ struct its_vpe **vpes;
+ int nr_vpes;
+ irq_hw_number_t db_lpi_base;
+ unsigned long *db_bitmap;
+ int nr_db_lpis;
+ u32 vlpi_count[GICv4_ITS_LIST_MAX];
+};
+
+/* Embedded in kvm_vcpu.arch */
+struct its_vpe {
+ struct page *vpt_page;
+ struct its_vm *its_vm;
+ /* per-vPE VLPI tracking */
+ atomic_t vlpi_count;
+ /* Doorbell interrupt */
+ int irq;
+ irq_hw_number_t vpe_db_lpi;
+ /* VPE resident */
+ bool resident;
+ /* VPT parse complete */
+ bool ready;
+ union {
+ /* GICv4.0 implementations */
+ struct {
+ /* VPE proxy mapping */
+ int vpe_proxy_event;
+ /* Implementation Defined Area Invalid */
+ bool idai;
+ };
+ /* GICv4.1 implementations */
+ struct {
+ struct fwnode_handle *fwnode;
+ struct irq_domain *sgi_domain;
+ struct {
+ u8 priority;
+ bool enabled;
+ bool group;
+ } sgi_config[16];
+ atomic_t vmapp_count;
+ };
+ };
+
+ /*
+ * Ensures mutual exclusion between affinity setting of the
+ * vPE and vLPI operations using vpe->col_idx.
+ */
+ raw_spinlock_t vpe_lock;
+ /*
+ * This collection ID is used to indirect the target
+ * redistributor for this VPE. The ID itself isn't involved in
+ * programming of the ITS.
+ */
+ u16 col_idx;
+ /* Unique (system-wide) VPE identifier */
+ u16 vpe_id;
+ /* Pending VLPIs on schedule out? */
+ bool pending_last;
+};
+
+/*
+ * struct its_vlpi_map: structure describing the mapping of a
+ * VLPI. Only to be interpreted in the context of a physical interrupt
+ * it complements. To be used as the vcpu_info passed to
+ * irq_set_vcpu_affinity().
+ *
+ * @vm: Pointer to the GICv4 notion of a VM
+ * @vpe: Pointer to the GICv4 notion of a virtual CPU (VPE)
+ * @vintid: Virtual LPI number
+ * @properties: Priority and enable bits (as written in the prop table)
+ * @db_enabled: Is the VPE doorbell to be generated?
+ */
+struct its_vlpi_map {
+ struct its_vm *vm;
+ struct its_vpe *vpe;
+ u32 vintid;
+ u8 properties;
+ bool db_enabled;
+};
+
+enum its_vcpu_info_cmd_type {
+ MAP_VLPI,
+ GET_VLPI,
+ PROP_UPDATE_VLPI,
+ PROP_UPDATE_AND_INV_VLPI,
+ SCHEDULE_VPE,
+ DESCHEDULE_VPE,
+ COMMIT_VPE,
+ INVALL_VPE,
+ PROP_UPDATE_VSGI,
+};
+
+struct its_cmd_info {
+ enum its_vcpu_info_cmd_type cmd_type;
+ union {
+ struct its_vlpi_map *map;
+ u8 config;
+ bool req_db;
+ struct {
+ bool g0en;
+ bool g1en;
+ };
+ struct {
+ u8 priority;
+ bool group;
+ };
+ };
+};
+
+int its_alloc_vcpu_irqs(struct its_vm *vm);
+void its_free_vcpu_irqs(struct its_vm *vm);
+int its_make_vpe_resident(struct its_vpe *vpe, bool g0en, bool g1en);
+int its_make_vpe_non_resident(struct its_vpe *vpe, bool db);
+int its_commit_vpe(struct its_vpe *vpe);
+int its_invall_vpe(struct its_vpe *vpe);
+int its_map_vlpi(int irq, struct its_vlpi_map *map);
+int its_get_vlpi(int irq, struct its_vlpi_map *map);
+int its_unmap_vlpi(int irq);
+int its_prop_update_vlpi(int irq, u8 config, bool inv);
+int its_prop_update_vsgi(int irq, u8 priority, bool group);
+
+struct irq_domain_ops;
+int its_init_v4(struct irq_domain *domain,
+ const struct irq_domain_ops *vpe_ops,
+ const struct irq_domain_ops *sgi_ops);
+
+bool gic_cpuif_has_vsgi(void);
+
+#endif
diff --git a/include/linux/irqchip/arm-gic.h b/include/linux/irqchip/arm-gic.h
index d3453ee072fc..2223f95079ce 100644
--- a/include/linux/irqchip/arm-gic.h
+++ b/include/linux/irqchip/arm-gic.h
@@ -1,11 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* include/linux/irqchip/arm-gic.h
*
* Copyright (C) 2002 ARM Limited, All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef __LINUX_IRQCHIP_ARM_GIC_H
#define __LINUX_IRQCHIP_ARM_GIC_H
@@ -65,11 +62,16 @@
#define GICD_INT_EN_CLR_X32 0xffffffff
#define GICD_INT_EN_SET_SGI 0x0000ffff
#define GICD_INT_EN_CLR_PPI 0xffff0000
-#define GICD_INT_DEF_PRI 0xa0
-#define GICD_INT_DEF_PRI_X4 ((GICD_INT_DEF_PRI << 24) |\
- (GICD_INT_DEF_PRI << 16) |\
- (GICD_INT_DEF_PRI << 8) |\
- GICD_INT_DEF_PRI)
+
+#define GICD_IIDR_IMPLEMENTER_SHIFT 0
+#define GICD_IIDR_IMPLEMENTER_MASK (0xfff << GICD_IIDR_IMPLEMENTER_SHIFT)
+#define GICD_IIDR_REVISION_SHIFT 12
+#define GICD_IIDR_REVISION_MASK (0xf << GICD_IIDR_REVISION_SHIFT)
+#define GICD_IIDR_VARIANT_SHIFT 16
+#define GICD_IIDR_VARIANT_MASK (0xf << GICD_IIDR_VARIANT_SHIFT)
+#define GICD_IIDR_PRODUCT_ID_SHIFT 24
+#define GICD_IIDR_PRODUCT_ID_MASK (0xff << GICD_IIDR_PRODUCT_ID_SHIFT)
+
#define GICH_HCR 0x0
#define GICH_VTR 0x4
@@ -84,6 +86,7 @@
#define GICH_HCR_EN (1 << 0)
#define GICH_HCR_UIE (1 << 1)
+#define GICH_HCR_NPIE (1 << 3)
#define GICH_LR_VIRTUALID (0x3ff << 0)
#define GICH_LR_PHYSID_CPUID_SHIFT (10)
@@ -93,6 +96,7 @@
#define GICH_LR_PENDING_BIT (1 << 28)
#define GICH_LR_ACTIVE_BIT (1 << 29)
#define GICH_LR_EOI (1 << 19)
+#define GICH_LR_GROUP1 (1 << 30)
#define GICH_LR_HW (1 << 31)
#define GICH_VMCR_ENABLE_GRP0_SHIFT 0
@@ -147,16 +151,6 @@ int gic_of_init(struct device_node *node, struct device_node *parent);
*/
int gic_of_init_child(struct device *dev, struct gic_chip_data **gic, int irq);
-/*
- * Legacy platforms not converted to DT yet must use this to init
- * their GIC
- */
-void gic_init(unsigned int nr, int start,
- void __iomem *dist , void __iomem *cpu);
-
-int gicv2m_init(struct fwnode_handle *parent_handle,
- struct irq_domain *parent);
-
void gic_send_sgi(unsigned int cpu_id, unsigned int irq);
int gic_get_cpu_id(unsigned int cpu);
void gic_migrate_target(unsigned int new_cpu_id);
diff --git a/include/linux/irqchip/arm-vgic-info.h b/include/linux/irqchip/arm-vgic-info.h
new file mode 100644
index 000000000000..a75b2c7de69d
--- /dev/null
+++ b/include/linux/irqchip/arm-vgic-info.h
@@ -0,0 +1,45 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * include/linux/irqchip/arm-vgic-info.h
+ *
+ * Copyright (C) 2016 ARM Limited, All Rights Reserved.
+ */
+#ifndef __LINUX_IRQCHIP_ARM_VGIC_INFO_H
+#define __LINUX_IRQCHIP_ARM_VGIC_INFO_H
+
+#include <linux/types.h>
+#include <linux/ioport.h>
+
+enum gic_type {
+ /* Full GICv2 */
+ GIC_V2,
+ /* Full GICv3, optionally with v2 compat */
+ GIC_V3,
+};
+
+struct gic_kvm_info {
+ /* GIC type */
+ enum gic_type type;
+ /* Virtual CPU interface */
+ struct resource vcpu;
+ /* Interrupt number */
+ unsigned int maint_irq;
+ /* No interrupt mask, no need to use the above field */
+ bool no_maint_irq_mask;
+ /* Virtual control interface */
+ struct resource vctrl;
+ /* vlpi support */
+ bool has_v4;
+ /* rvpeid support */
+ bool has_v4_1;
+ /* Deactivation impared, subpar stuff */
+ bool no_hw_deactivation;
+};
+
+#ifdef CONFIG_KVM
+void vgic_set_kvm_info(const struct gic_kvm_info *info);
+#else
+static inline void vgic_set_kvm_info(const struct gic_kvm_info *info) {}
+#endif
+
+#endif
diff --git a/include/linux/irqchip/arm-vic.h b/include/linux/irqchip/arm-vic.h
index ba46c794b4e5..f2b11d1df23d 100644
--- a/include/linux/irqchip/arm-vic.h
+++ b/include/linux/irqchip/arm-vic.h
@@ -1,38 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* arch/arm/include/asm/hardware/vic.h
*
* Copyright (c) ARM Limited 2003. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#ifndef __ASM_ARM_HARDWARE_VIC_H
#define __ASM_ARM_HARDWARE_VIC_H
#include <linux/types.h>
-#define VIC_RAW_STATUS 0x08
-#define VIC_INT_ENABLE 0x10 /* 1 = enable, 0 = disable */
-#define VIC_INT_ENABLE_CLEAR 0x14
-
-struct device_node;
-struct pt_regs;
-
-void __vic_init(void __iomem *base, int parent_irq, int irq_start,
- u32 vic_sources, u32 resume_sources, struct device_node *node);
void vic_init(void __iomem *base, unsigned int irq_start, u32 vic_sources, u32 resume_sources);
-int vic_init_cascaded(void __iomem *base, unsigned int parent_irq,
- u32 vic_sources, u32 resume_sources);
#endif
diff --git a/include/linux/irqchip/chained_irq.h b/include/linux/irqchip/chained_irq.h
index adf4c30f3af6..dd8b3c476666 100644
--- a/include/linux/irqchip/chained_irq.h
+++ b/include/linux/irqchip/chained_irq.h
@@ -1,19 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Chained IRQ handlers support.
*
* Copyright (C) 2011 ARM Ltd.
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef __IRQCHIP_CHAINED_IRQ_H
#define __IRQCHIP_CHAINED_IRQ_H
diff --git a/include/linux/irqchip/ingenic.h b/include/linux/irqchip/ingenic.h
deleted file mode 100644
index 0ee319a4029d..000000000000
--- a/include/linux/irqchip/ingenic.h
+++ /dev/null
@@ -1,23 +0,0 @@
-/*
- * Copyright (C) 2010, Lars-Peter Clausen <lars@metafoo.de>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- */
-
-#ifndef __LINUX_IRQCHIP_INGENIC_H__
-#define __LINUX_IRQCHIP_INGENIC_H__
-
-#include <linux/irq.h>
-
-extern void ingenic_intc_irq_suspend(struct irq_data *data);
-extern void ingenic_intc_irq_resume(struct irq_data *data);
-
-#endif
diff --git a/include/linux/irqchip/irq-bcm2836.h b/include/linux/irqchip/irq-bcm2836.h
new file mode 100644
index 000000000000..ac5719d8f56b
--- /dev/null
+++ b/include/linux/irqchip/irq-bcm2836.h
@@ -0,0 +1,61 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Root interrupt controller for the BCM2836 (Raspberry Pi 2).
+ *
+ * Copyright 2015 Broadcom
+ */
+
+#define LOCAL_CONTROL 0x000
+#define LOCAL_PRESCALER 0x008
+
+/*
+ * The low 2 bits identify the CPU that the GPU IRQ goes to, and the
+ * next 2 bits identify the CPU that the GPU FIQ goes to.
+ */
+#define LOCAL_GPU_ROUTING 0x00c
+/* When setting bits 0-3, enables PMU interrupts on that CPU. */
+#define LOCAL_PM_ROUTING_SET 0x010
+/* When setting bits 0-3, disables PMU interrupts on that CPU. */
+#define LOCAL_PM_ROUTING_CLR 0x014
+/*
+ * The low 4 bits of this are the CPU's timer IRQ enables, and the
+ * next 4 bits are the CPU's timer FIQ enables (which override the IRQ
+ * bits).
+ */
+#define LOCAL_TIMER_INT_CONTROL0 0x040
+/*
+ * The low 4 bits of this are the CPU's per-mailbox IRQ enables, and
+ * the next 4 bits are the CPU's per-mailbox FIQ enables (which
+ * override the IRQ bits).
+ */
+#define LOCAL_MAILBOX_INT_CONTROL0 0x050
+/*
+ * The CPU's interrupt status register. Bits are defined by the
+ * LOCAL_IRQ_* bits below.
+ */
+#define LOCAL_IRQ_PENDING0 0x060
+/* Same status bits as above, but for FIQ. */
+#define LOCAL_FIQ_PENDING0 0x070
+/*
+ * Mailbox write-to-set bits. There are 16 mailboxes, 4 per CPU, and
+ * these bits are organized by mailbox number and then CPU number. We
+ * use mailbox 0 for IPIs. The mailbox's interrupt is raised while
+ * any bit is set.
+ */
+#define LOCAL_MAILBOX0_SET0 0x080
+#define LOCAL_MAILBOX3_SET0 0x08c
+/* Mailbox write-to-clear bits. */
+#define LOCAL_MAILBOX0_CLR0 0x0c0
+#define LOCAL_MAILBOX3_CLR0 0x0cc
+
+#define LOCAL_IRQ_CNTPSIRQ 0
+#define LOCAL_IRQ_CNTPNSIRQ 1
+#define LOCAL_IRQ_CNTHPIRQ 2
+#define LOCAL_IRQ_CNTVIRQ 3
+#define LOCAL_IRQ_MAILBOX0 4
+#define LOCAL_IRQ_MAILBOX1 5
+#define LOCAL_IRQ_MAILBOX2 6
+#define LOCAL_IRQ_MAILBOX3 7
+#define LOCAL_IRQ_GPU_FAST 8
+#define LOCAL_IRQ_PMU_FAST 9
+#define LAST_IRQ LOCAL_IRQ_PMU_FAST
diff --git a/include/linux/irqchip/irq-davinci-aintc.h b/include/linux/irqchip/irq-davinci-aintc.h
new file mode 100644
index 000000000000..ea4e087fac98
--- /dev/null
+++ b/include/linux/irqchip/irq-davinci-aintc.h
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (C) 2019 Texas Instruments
+ */
+
+#ifndef _LINUX_IRQ_DAVINCI_AINTC_
+#define _LINUX_IRQ_DAVINCI_AINTC_
+
+#include <linux/ioport.h>
+
+/**
+ * struct davinci_aintc_config - configuration data for davinci-aintc driver.
+ *
+ * @reg: register range to map
+ * @num_irqs: number of HW interrupts supported by the controller
+ * @prios: an array of size num_irqs containing priority settings for
+ * each interrupt
+ */
+struct davinci_aintc_config {
+ struct resource reg;
+ unsigned int num_irqs;
+ u8 *prios;
+};
+
+void davinci_aintc_init(const struct davinci_aintc_config *config);
+
+#endif /* _LINUX_IRQ_DAVINCI_AINTC_ */
diff --git a/include/linux/irqchip/irq-davinci-cp-intc.h b/include/linux/irqchip/irq-davinci-cp-intc.h
new file mode 100644
index 000000000000..8d71ed5b5a61
--- /dev/null
+++ b/include/linux/irqchip/irq-davinci-cp-intc.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (C) 2019 Texas Instruments
+ */
+
+#ifndef _LINUX_IRQ_DAVINCI_CP_INTC_
+#define _LINUX_IRQ_DAVINCI_CP_INTC_
+
+#include <linux/ioport.h>
+
+/**
+ * struct davinci_cp_intc_config - configuration data for davinci-cp-intc
+ * driver.
+ *
+ * @reg: register range to map
+ * @num_irqs: number of HW interrupts supported by the controller
+ */
+struct davinci_cp_intc_config {
+ struct resource reg;
+ unsigned int num_irqs;
+};
+
+int davinci_cp_intc_init(const struct davinci_cp_intc_config *config);
+
+#endif /* _LINUX_IRQ_DAVINCI_CP_INTC_ */
diff --git a/include/linux/irqchip/irq-madera.h b/include/linux/irqchip/irq-madera.h
new file mode 100644
index 000000000000..1160fa3769ae
--- /dev/null
+++ b/include/linux/irqchip/irq-madera.h
@@ -0,0 +1,132 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Interrupt support for Cirrus Logic Madera codecs
+ *
+ * Copyright (C) 2016-2018 Cirrus Logic, Inc. and
+ * Cirrus Logic International Semiconductor Ltd.
+ */
+
+#ifndef IRQCHIP_MADERA_H
+#define IRQCHIP_MADERA_H
+
+#include <linux/interrupt.h>
+#include <linux/mfd/madera/core.h>
+
+#define MADERA_IRQ_FLL1_LOCK 0
+#define MADERA_IRQ_FLL2_LOCK 1
+#define MADERA_IRQ_FLL3_LOCK 2
+#define MADERA_IRQ_FLLAO_LOCK 3
+#define MADERA_IRQ_CLK_SYS_ERR 4
+#define MADERA_IRQ_CLK_ASYNC_ERR 5
+#define MADERA_IRQ_CLK_DSP_ERR 6
+#define MADERA_IRQ_HPDET 7
+#define MADERA_IRQ_MICDET1 8
+#define MADERA_IRQ_MICDET2 9
+#define MADERA_IRQ_JD1_RISE 10
+#define MADERA_IRQ_JD1_FALL 11
+#define MADERA_IRQ_JD2_RISE 12
+#define MADERA_IRQ_JD2_FALL 13
+#define MADERA_IRQ_MICD_CLAMP_RISE 14
+#define MADERA_IRQ_MICD_CLAMP_FALL 15
+#define MADERA_IRQ_DRC2_SIG_DET 16
+#define MADERA_IRQ_DRC1_SIG_DET 17
+#define MADERA_IRQ_ASRC1_IN1_LOCK 18
+#define MADERA_IRQ_ASRC1_IN2_LOCK 19
+#define MADERA_IRQ_ASRC2_IN1_LOCK 20
+#define MADERA_IRQ_ASRC2_IN2_LOCK 21
+#define MADERA_IRQ_DSP_IRQ1 22
+#define MADERA_IRQ_DSP_IRQ2 23
+#define MADERA_IRQ_DSP_IRQ3 24
+#define MADERA_IRQ_DSP_IRQ4 25
+#define MADERA_IRQ_DSP_IRQ5 26
+#define MADERA_IRQ_DSP_IRQ6 27
+#define MADERA_IRQ_DSP_IRQ7 28
+#define MADERA_IRQ_DSP_IRQ8 29
+#define MADERA_IRQ_DSP_IRQ9 30
+#define MADERA_IRQ_DSP_IRQ10 31
+#define MADERA_IRQ_DSP_IRQ11 32
+#define MADERA_IRQ_DSP_IRQ12 33
+#define MADERA_IRQ_DSP_IRQ13 34
+#define MADERA_IRQ_DSP_IRQ14 35
+#define MADERA_IRQ_DSP_IRQ15 36
+#define MADERA_IRQ_DSP_IRQ16 37
+#define MADERA_IRQ_HP1L_SC 38
+#define MADERA_IRQ_HP1R_SC 39
+#define MADERA_IRQ_HP2L_SC 40
+#define MADERA_IRQ_HP2R_SC 41
+#define MADERA_IRQ_HP3L_SC 42
+#define MADERA_IRQ_HP3R_SC 43
+#define MADERA_IRQ_SPKOUTL_SC 44
+#define MADERA_IRQ_SPKOUTR_SC 45
+#define MADERA_IRQ_HP1L_ENABLE_DONE 46
+#define MADERA_IRQ_HP1R_ENABLE_DONE 47
+#define MADERA_IRQ_HP2L_ENABLE_DONE 48
+#define MADERA_IRQ_HP2R_ENABLE_DONE 49
+#define MADERA_IRQ_HP3L_ENABLE_DONE 50
+#define MADERA_IRQ_HP3R_ENABLE_DONE 51
+#define MADERA_IRQ_SPKOUTL_ENABLE_DONE 52
+#define MADERA_IRQ_SPKOUTR_ENABLE_DONE 53
+#define MADERA_IRQ_SPK_SHUTDOWN 54
+#define MADERA_IRQ_SPK_OVERHEAT 55
+#define MADERA_IRQ_SPK_OVERHEAT_WARN 56
+#define MADERA_IRQ_GPIO1 57
+#define MADERA_IRQ_GPIO2 58
+#define MADERA_IRQ_GPIO3 59
+#define MADERA_IRQ_GPIO4 60
+#define MADERA_IRQ_GPIO5 61
+#define MADERA_IRQ_GPIO6 62
+#define MADERA_IRQ_GPIO7 63
+#define MADERA_IRQ_GPIO8 64
+#define MADERA_IRQ_DSP1_BUS_ERR 65
+#define MADERA_IRQ_DSP2_BUS_ERR 66
+#define MADERA_IRQ_DSP3_BUS_ERR 67
+#define MADERA_IRQ_DSP4_BUS_ERR 68
+#define MADERA_IRQ_DSP5_BUS_ERR 69
+#define MADERA_IRQ_DSP6_BUS_ERR 70
+#define MADERA_IRQ_DSP7_BUS_ERR 71
+
+#define MADERA_NUM_IRQ 72
+
+/*
+ * These wrapper functions are for use by other child drivers of the
+ * same parent MFD.
+ */
+static inline int madera_get_irq_mapping(struct madera *madera, int irq)
+{
+ if (!madera->irq_dev)
+ return -ENODEV;
+
+ return regmap_irq_get_virq(madera->irq_data, irq);
+}
+
+static inline int madera_request_irq(struct madera *madera, int irq,
+ const char *name,
+ irq_handler_t handler, void *data)
+{
+ irq = madera_get_irq_mapping(madera, irq);
+ if (irq < 0)
+ return irq;
+
+ return request_threaded_irq(irq, NULL, handler, IRQF_ONESHOT, name,
+ data);
+}
+
+static inline void madera_free_irq(struct madera *madera, int irq, void *data)
+{
+ irq = madera_get_irq_mapping(madera, irq);
+ if (irq < 0)
+ return;
+
+ free_irq(irq, data);
+}
+
+static inline int madera_set_irq_wake(struct madera *madera, int irq, int on)
+{
+ irq = madera_get_irq_mapping(madera, irq);
+ if (irq < 0)
+ return irq;
+
+ return irq_set_irq_wake(irq, on);
+}
+
+#endif
diff --git a/include/linux/irqchip/irq-omap-intc.h b/include/linux/irqchip/irq-omap-intc.h
index 2e3d1afeb674..dca379c0d7eb 100644
--- a/include/linux/irqchip/irq-omap-intc.h
+++ b/include/linux/irqchip/irq-omap-intc.h
@@ -1,25 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/**
* irq-omap-intc.h - INTC Idle Functions
*
- * Copyright (C) 2014 Texas Instruments Incorporated - http://www.ti.com
+ * Copyright (C) 2014 Texas Instruments Incorporated - https://www.ti.com
*
* Author: Felipe Balbi <balbi@ti.com>
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 of
- * the License as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#ifndef __INCLUDE_LINUX_IRQCHIP_IRQ_OMAP_INTC_H
#define __INCLUDE_LINUX_IRQCHIP_IRQ_OMAP_INTC_H
-void omap3_init_irq(void);
-
int omap_irq_pending(void);
void omap_intc_save_context(void);
void omap_intc_restore_context(void);
diff --git a/include/linux/irqchip/irq-partition-percpu.h b/include/linux/irqchip/irq-partition-percpu.h
index 87433a5d1285..2f6ae7551748 100644
--- a/include/linux/irqchip/irq-partition-percpu.h
+++ b/include/linux/irqchip/irq-partition-percpu.h
@@ -1,20 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2016 ARM Limited, All Rights Reserved.
* Author: Marc Zyngier <marc.zyngier@arm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
+#ifndef __LINUX_IRQCHIP_IRQ_PARTITION_PERCPU_H
+#define __LINUX_IRQCHIP_IRQ_PARTITION_PERCPU_H
+
#include <linux/fwnode.h>
#include <linux/cpumask.h>
#include <linux/irqdomain.h>
@@ -57,3 +49,5 @@ struct irq_domain *partition_get_domain(struct partition_desc *dsc)
return NULL;
}
#endif
+
+#endif /* __LINUX_IRQCHIP_IRQ_PARTITION_PERCPU_H */
diff --git a/include/linux/irqchip/irq-sa11x0.h b/include/linux/irqchip/irq-sa11x0.h
index 15db6829c1e4..68fd2d73b683 100644
--- a/include/linux/irqchip/irq-sa11x0.h
+++ b/include/linux/irqchip/irq-sa11x0.h
@@ -1,12 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Generic IRQ handling for the SA11x0.
*
* Copyright (C) 2015 Dmitry Eremin-Solenikov
* Copyright (C) 1999-2001 Nicolas Pitre
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef __INCLUDE_LINUX_IRQCHIP_IRQ_SA11x0_H
diff --git a/include/linux/irqchip/metag-ext.h b/include/linux/irqchip/metag-ext.h
deleted file mode 100644
index 697af0fe7c5a..000000000000
--- a/include/linux/irqchip/metag-ext.h
+++ /dev/null
@@ -1,33 +0,0 @@
-/*
- * Copyright (C) 2012 Imagination Technologies
- */
-
-#ifndef _LINUX_IRQCHIP_METAG_EXT_H_
-#define _LINUX_IRQCHIP_METAG_EXT_H_
-
-struct irq_data;
-struct platform_device;
-
-/* called from core irq code at init */
-int init_external_IRQ(void);
-
-/*
- * called from SoC init_irq() callback to dynamically indicate the lack of
- * HWMASKEXT registers.
- */
-void meta_intc_no_mask(void);
-
-/*
- * These allow SoCs to specialise the interrupt controller from their init_irq
- * callbacks.
- */
-
-extern struct irq_chip meta_intc_edge_chip;
-extern struct irq_chip meta_intc_level_chip;
-
-/* this should be called in the mask callback */
-void meta_intc_mask_irq_simple(struct irq_data *data);
-/* this should be called in the unmask callback */
-void meta_intc_unmask_irq_simple(struct irq_data *data);
-
-#endif /* _LINUX_IRQCHIP_METAG_EXT_H_ */
diff --git a/include/linux/irqchip/metag.h b/include/linux/irqchip/metag.h
deleted file mode 100644
index 4ebdfb3101ab..000000000000
--- a/include/linux/irqchip/metag.h
+++ /dev/null
@@ -1,24 +0,0 @@
-/*
- * Copyright (C) 2011 Imagination Technologies
- */
-
-#ifndef _LINUX_IRQCHIP_METAG_H_
-#define _LINUX_IRQCHIP_METAG_H_
-
-#include <linux/errno.h>
-
-#ifdef CONFIG_METAG_PERFCOUNTER_IRQS
-extern int init_internal_IRQ(void);
-extern int internal_irq_map(unsigned int hw);
-#else
-static inline int init_internal_IRQ(void)
-{
- return 0;
-}
-static inline int internal_irq_map(unsigned int hw)
-{
- return -EINVAL;
-}
-#endif
-
-#endif /* _LINUX_IRQCHIP_METAG_H_ */
diff --git a/include/linux/irqchip/mips-gic.h b/include/linux/irqchip/mips-gic.h
deleted file mode 100644
index 2b0e56619e53..000000000000
--- a/include/linux/irqchip/mips-gic.h
+++ /dev/null
@@ -1,297 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 2000, 07 MIPS Technologies, Inc.
- */
-#ifndef __LINUX_IRQCHIP_MIPS_GIC_H
-#define __LINUX_IRQCHIP_MIPS_GIC_H
-
-#include <linux/clocksource.h>
-#include <linux/ioport.h>
-
-#define GIC_MAX_INTRS 256
-
-/* Constants */
-#define GIC_POL_POS 1
-#define GIC_POL_NEG 0
-#define GIC_TRIG_EDGE 1
-#define GIC_TRIG_LEVEL 0
-#define GIC_TRIG_DUAL_ENABLE 1
-#define GIC_TRIG_DUAL_DISABLE 0
-
-#define MSK(n) ((1 << (n)) - 1)
-
-/* Accessors */
-#define GIC_REG(segment, offset) (segment##_##SECTION_OFS + offset##_##OFS)
-
-/* GIC Address Space */
-#define SHARED_SECTION_OFS 0x0000
-#define SHARED_SECTION_SIZE 0x8000
-#define VPE_LOCAL_SECTION_OFS 0x8000
-#define VPE_LOCAL_SECTION_SIZE 0x4000
-#define VPE_OTHER_SECTION_OFS 0xc000
-#define VPE_OTHER_SECTION_SIZE 0x4000
-#define USM_VISIBLE_SECTION_OFS 0x10000
-#define USM_VISIBLE_SECTION_SIZE 0x10000
-
-/* Register Map for Shared Section */
-
-#define GIC_SH_CONFIG_OFS 0x0000
-
-/* Shared Global Counter */
-#define GIC_SH_COUNTER_31_00_OFS 0x0010
-/* 64-bit counter register for CM3 */
-#define GIC_SH_COUNTER_OFS GIC_SH_COUNTER_31_00_OFS
-#define GIC_SH_COUNTER_63_32_OFS 0x0014
-#define GIC_SH_REVISIONID_OFS 0x0020
-
-/* Convert an interrupt number to a byte offset/bit for multi-word registers */
-#define GIC_INTR_OFS(intr) ({ \
- unsigned bits = mips_cm_is64 ? 64 : 32; \
- unsigned reg_idx = (intr) / bits; \
- unsigned reg_width = bits / 8; \
- \
- reg_idx * reg_width; \
-})
-#define GIC_INTR_BIT(intr) ((intr) % (mips_cm_is64 ? 64 : 32))
-
-/* Polarity : Reset Value is always 0 */
-#define GIC_SH_SET_POLARITY_OFS 0x0100
-
-/* Triggering : Reset Value is always 0 */
-#define GIC_SH_SET_TRIGGER_OFS 0x0180
-
-/* Dual edge triggering : Reset Value is always 0 */
-#define GIC_SH_SET_DUAL_OFS 0x0200
-
-/* Set/Clear corresponding bit in Edge Detect Register */
-#define GIC_SH_WEDGE_OFS 0x0280
-
-/* Mask manipulation */
-#define GIC_SH_RMASK_OFS 0x0300
-#define GIC_SH_SMASK_OFS 0x0380
-
-/* Global Interrupt Mask Register (RO) - Bit Set == Interrupt enabled */
-#define GIC_SH_MASK_OFS 0x0400
-
-/* Pending Global Interrupts (RO) */
-#define GIC_SH_PEND_OFS 0x0480
-
-/* Maps Interrupt X to a Pin */
-#define GIC_SH_INTR_MAP_TO_PIN_BASE_OFS 0x0500
-#define GIC_SH_MAP_TO_PIN(intr) (4 * (intr))
-
-/* Maps Interrupt X to a VPE */
-#define GIC_SH_INTR_MAP_TO_VPE_BASE_OFS 0x2000
-#define GIC_SH_MAP_TO_VPE_REG_OFF(intr, vpe) \
- ((32 * (intr)) + (((vpe) / 32) * 4))
-#define GIC_SH_MAP_TO_VPE_REG_BIT(vpe) (1 << ((vpe) % 32))
-
-/* Register Map for Local Section */
-#define GIC_VPE_CTL_OFS 0x0000
-#define GIC_VPE_PEND_OFS 0x0004
-#define GIC_VPE_MASK_OFS 0x0008
-#define GIC_VPE_RMASK_OFS 0x000c
-#define GIC_VPE_SMASK_OFS 0x0010
-#define GIC_VPE_WD_MAP_OFS 0x0040
-#define GIC_VPE_COMPARE_MAP_OFS 0x0044
-#define GIC_VPE_TIMER_MAP_OFS 0x0048
-#define GIC_VPE_FDC_MAP_OFS 0x004c
-#define GIC_VPE_PERFCTR_MAP_OFS 0x0050
-#define GIC_VPE_SWINT0_MAP_OFS 0x0054
-#define GIC_VPE_SWINT1_MAP_OFS 0x0058
-#define GIC_VPE_OTHER_ADDR_OFS 0x0080
-#define GIC_VP_IDENT_OFS 0x0088
-#define GIC_VPE_WD_CONFIG0_OFS 0x0090
-#define GIC_VPE_WD_COUNT0_OFS 0x0094
-#define GIC_VPE_WD_INITIAL0_OFS 0x0098
-#define GIC_VPE_COMPARE_LO_OFS 0x00a0
-/* 64-bit Compare register on CM3 */
-#define GIC_VPE_COMPARE_OFS GIC_VPE_COMPARE_LO_OFS
-#define GIC_VPE_COMPARE_HI_OFS 0x00a4
-
-#define GIC_VPE_EIC_SHADOW_SET_BASE_OFS 0x0100
-#define GIC_VPE_EIC_SS(intr) (4 * (intr))
-
-#define GIC_VPE_EIC_VEC_BASE_OFS 0x0800
-#define GIC_VPE_EIC_VEC(intr) (4 * (intr))
-
-#define GIC_VPE_TENABLE_NMI_OFS 0x1000
-#define GIC_VPE_TENABLE_YQ_OFS 0x1004
-#define GIC_VPE_TENABLE_INT_31_0_OFS 0x1080
-#define GIC_VPE_TENABLE_INT_63_32_OFS 0x1084
-
-/* User Mode Visible Section Register Map */
-#define GIC_UMV_SH_COUNTER_31_00_OFS 0x0000
-#define GIC_UMV_SH_COUNTER_63_32_OFS 0x0004
-
-/* Masks */
-#define GIC_SH_CONFIG_COUNTSTOP_SHF 28
-#define GIC_SH_CONFIG_COUNTSTOP_MSK (MSK(1) << GIC_SH_CONFIG_COUNTSTOP_SHF)
-
-#define GIC_SH_CONFIG_COUNTBITS_SHF 24
-#define GIC_SH_CONFIG_COUNTBITS_MSK (MSK(4) << GIC_SH_CONFIG_COUNTBITS_SHF)
-
-#define GIC_SH_CONFIG_NUMINTRS_SHF 16
-#define GIC_SH_CONFIG_NUMINTRS_MSK (MSK(8) << GIC_SH_CONFIG_NUMINTRS_SHF)
-
-#define GIC_SH_CONFIG_NUMVPES_SHF 0
-#define GIC_SH_CONFIG_NUMVPES_MSK (MSK(8) << GIC_SH_CONFIG_NUMVPES_SHF)
-
-#define GIC_SH_WEDGE_SET(intr) ((intr) | (0x1 << 31))
-#define GIC_SH_WEDGE_CLR(intr) ((intr) & ~(0x1 << 31))
-
-#define GIC_MAP_TO_PIN_SHF 31
-#define GIC_MAP_TO_PIN_MSK (MSK(1) << GIC_MAP_TO_PIN_SHF)
-#define GIC_MAP_TO_NMI_SHF 30
-#define GIC_MAP_TO_NMI_MSK (MSK(1) << GIC_MAP_TO_NMI_SHF)
-#define GIC_MAP_TO_YQ_SHF 29
-#define GIC_MAP_TO_YQ_MSK (MSK(1) << GIC_MAP_TO_YQ_SHF)
-#define GIC_MAP_SHF 0
-#define GIC_MAP_MSK (MSK(6) << GIC_MAP_SHF)
-
-/* GIC_VPE_CTL Masks */
-#define GIC_VPE_CTL_FDC_RTBL_SHF 4
-#define GIC_VPE_CTL_FDC_RTBL_MSK (MSK(1) << GIC_VPE_CTL_FDC_RTBL_SHF)
-#define GIC_VPE_CTL_SWINT_RTBL_SHF 3
-#define GIC_VPE_CTL_SWINT_RTBL_MSK (MSK(1) << GIC_VPE_CTL_SWINT_RTBL_SHF)
-#define GIC_VPE_CTL_PERFCNT_RTBL_SHF 2
-#define GIC_VPE_CTL_PERFCNT_RTBL_MSK (MSK(1) << GIC_VPE_CTL_PERFCNT_RTBL_SHF)
-#define GIC_VPE_CTL_TIMER_RTBL_SHF 1
-#define GIC_VPE_CTL_TIMER_RTBL_MSK (MSK(1) << GIC_VPE_CTL_TIMER_RTBL_SHF)
-#define GIC_VPE_CTL_EIC_MODE_SHF 0
-#define GIC_VPE_CTL_EIC_MODE_MSK (MSK(1) << GIC_VPE_CTL_EIC_MODE_SHF)
-
-/* GIC_VPE_PEND Masks */
-#define GIC_VPE_PEND_WD_SHF 0
-#define GIC_VPE_PEND_WD_MSK (MSK(1) << GIC_VPE_PEND_WD_SHF)
-#define GIC_VPE_PEND_CMP_SHF 1
-#define GIC_VPE_PEND_CMP_MSK (MSK(1) << GIC_VPE_PEND_CMP_SHF)
-#define GIC_VPE_PEND_TIMER_SHF 2
-#define GIC_VPE_PEND_TIMER_MSK (MSK(1) << GIC_VPE_PEND_TIMER_SHF)
-#define GIC_VPE_PEND_PERFCOUNT_SHF 3
-#define GIC_VPE_PEND_PERFCOUNT_MSK (MSK(1) << GIC_VPE_PEND_PERFCOUNT_SHF)
-#define GIC_VPE_PEND_SWINT0_SHF 4
-#define GIC_VPE_PEND_SWINT0_MSK (MSK(1) << GIC_VPE_PEND_SWINT0_SHF)
-#define GIC_VPE_PEND_SWINT1_SHF 5
-#define GIC_VPE_PEND_SWINT1_MSK (MSK(1) << GIC_VPE_PEND_SWINT1_SHF)
-#define GIC_VPE_PEND_FDC_SHF 6
-#define GIC_VPE_PEND_FDC_MSK (MSK(1) << GIC_VPE_PEND_FDC_SHF)
-
-/* GIC_VPE_RMASK Masks */
-#define GIC_VPE_RMASK_WD_SHF 0
-#define GIC_VPE_RMASK_WD_MSK (MSK(1) << GIC_VPE_RMASK_WD_SHF)
-#define GIC_VPE_RMASK_CMP_SHF 1
-#define GIC_VPE_RMASK_CMP_MSK (MSK(1) << GIC_VPE_RMASK_CMP_SHF)
-#define GIC_VPE_RMASK_TIMER_SHF 2
-#define GIC_VPE_RMASK_TIMER_MSK (MSK(1) << GIC_VPE_RMASK_TIMER_SHF)
-#define GIC_VPE_RMASK_PERFCNT_SHF 3
-#define GIC_VPE_RMASK_PERFCNT_MSK (MSK(1) << GIC_VPE_RMASK_PERFCNT_SHF)
-#define GIC_VPE_RMASK_SWINT0_SHF 4
-#define GIC_VPE_RMASK_SWINT0_MSK (MSK(1) << GIC_VPE_RMASK_SWINT0_SHF)
-#define GIC_VPE_RMASK_SWINT1_SHF 5
-#define GIC_VPE_RMASK_SWINT1_MSK (MSK(1) << GIC_VPE_RMASK_SWINT1_SHF)
-#define GIC_VPE_RMASK_FDC_SHF 6
-#define GIC_VPE_RMASK_FDC_MSK (MSK(1) << GIC_VPE_RMASK_FDC_SHF)
-
-/* GIC_VPE_SMASK Masks */
-#define GIC_VPE_SMASK_WD_SHF 0
-#define GIC_VPE_SMASK_WD_MSK (MSK(1) << GIC_VPE_SMASK_WD_SHF)
-#define GIC_VPE_SMASK_CMP_SHF 1
-#define GIC_VPE_SMASK_CMP_MSK (MSK(1) << GIC_VPE_SMASK_CMP_SHF)
-#define GIC_VPE_SMASK_TIMER_SHF 2
-#define GIC_VPE_SMASK_TIMER_MSK (MSK(1) << GIC_VPE_SMASK_TIMER_SHF)
-#define GIC_VPE_SMASK_PERFCNT_SHF 3
-#define GIC_VPE_SMASK_PERFCNT_MSK (MSK(1) << GIC_VPE_SMASK_PERFCNT_SHF)
-#define GIC_VPE_SMASK_SWINT0_SHF 4
-#define GIC_VPE_SMASK_SWINT0_MSK (MSK(1) << GIC_VPE_SMASK_SWINT0_SHF)
-#define GIC_VPE_SMASK_SWINT1_SHF 5
-#define GIC_VPE_SMASK_SWINT1_MSK (MSK(1) << GIC_VPE_SMASK_SWINT1_SHF)
-#define GIC_VPE_SMASK_FDC_SHF 6
-#define GIC_VPE_SMASK_FDC_MSK (MSK(1) << GIC_VPE_SMASK_FDC_SHF)
-
-/* GIC_VP_IDENT fields */
-#define GIC_VP_IDENT_VCNUM_SHF 0
-#define GIC_VP_IDENT_VCNUM_MSK (MSK(6) << GIC_VP_IDENT_VCNUM_SHF)
-
-/* GIC nomenclature for Core Interrupt Pins. */
-#define GIC_CPU_INT0 0 /* Core Interrupt 2 */
-#define GIC_CPU_INT1 1 /* . */
-#define GIC_CPU_INT2 2 /* . */
-#define GIC_CPU_INT3 3 /* . */
-#define GIC_CPU_INT4 4 /* . */
-#define GIC_CPU_INT5 5 /* Core Interrupt 7 */
-
-/* Add 2 to convert GIC CPU pin to core interrupt */
-#define GIC_CPU_PIN_OFFSET 2
-
-/* Add 2 to convert non-EIC hardware interrupt to EIC vector number. */
-#define GIC_CPU_TO_VEC_OFFSET 2
-
-/* Mapped interrupt to pin X, then GIC will generate the vector (X+1). */
-#define GIC_PIN_TO_VEC_OFFSET 1
-
-/* Local GIC interrupts. */
-#define GIC_LOCAL_INT_WD 0 /* GIC watchdog */
-#define GIC_LOCAL_INT_COMPARE 1 /* GIC count and compare timer */
-#define GIC_LOCAL_INT_TIMER 2 /* CPU timer interrupt */
-#define GIC_LOCAL_INT_PERFCTR 3 /* CPU performance counter */
-#define GIC_LOCAL_INT_SWINT0 4 /* CPU software interrupt 0 */
-#define GIC_LOCAL_INT_SWINT1 5 /* CPU software interrupt 1 */
-#define GIC_LOCAL_INT_FDC 6 /* CPU fast debug channel */
-#define GIC_NUM_LOCAL_INTRS 7
-
-/* Convert between local/shared IRQ number and GIC HW IRQ number. */
-#define GIC_LOCAL_HWIRQ_BASE 0
-#define GIC_LOCAL_TO_HWIRQ(x) (GIC_LOCAL_HWIRQ_BASE + (x))
-#define GIC_HWIRQ_TO_LOCAL(x) ((x) - GIC_LOCAL_HWIRQ_BASE)
-#define GIC_SHARED_HWIRQ_BASE GIC_NUM_LOCAL_INTRS
-#define GIC_SHARED_TO_HWIRQ(x) (GIC_SHARED_HWIRQ_BASE + (x))
-#define GIC_HWIRQ_TO_SHARED(x) ((x) - GIC_SHARED_HWIRQ_BASE)
-
-#ifdef CONFIG_MIPS_GIC
-
-extern unsigned int gic_present;
-
-extern void gic_init(unsigned long gic_base_addr,
- unsigned long gic_addrspace_size, unsigned int cpu_vec,
- unsigned int irqbase);
-extern u64 gic_read_count(void);
-extern unsigned int gic_get_count_width(void);
-extern u64 gic_read_compare(void);
-extern void gic_write_compare(u64 cnt);
-extern void gic_write_cpu_compare(u64 cnt, int cpu);
-extern void gic_start_count(void);
-extern void gic_stop_count(void);
-extern int gic_get_c0_compare_int(void);
-extern int gic_get_c0_perfcount_int(void);
-extern int gic_get_c0_fdc_int(void);
-extern int gic_get_usm_range(struct resource *gic_usm_res);
-
-#else /* CONFIG_MIPS_GIC */
-
-#define gic_present 0
-
-static inline int gic_get_usm_range(struct resource *gic_usm_res)
-{
- /* Shouldn't be called. */
- return -1;
-}
-
-#endif /* CONFIG_MIPS_GIC */
-
-/**
- * gic_read_local_vp_id() - read the local VPs VCNUM
- *
- * Read the VCNUM of the local VP from the GIC_VP_IDENT register and
- * return it to the caller. This ID should be used to refer to the VP
- * via the GICs VP-other region, or when calculating an offset to a
- * bit representing the VP in interrupt masks.
- *
- * Return: The VCNUM value for the local VP.
- */
-extern unsigned gic_read_local_vp_id(void);
-
-#endif /* __LINUX_IRQCHIP_MIPS_GIC_H */
diff --git a/include/linux/irqchip/mmp.h b/include/linux/irqchip/mmp.h
index c78a8921185d..aa1813749a4f 100644
--- a/include/linux/irqchip/mmp.h
+++ b/include/linux/irqchip/mmp.h
@@ -1,6 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __IRQCHIP_MMP_H
#define __IRQCHIP_MMP_H
extern struct irq_chip icu_irq_chip;
+extern void icu_init_irq(void);
+extern void mmp2_init_icu(void);
+
#endif /* __IRQCHIP_MMP_H */
diff --git a/include/linux/irqchip/mxs.h b/include/linux/irqchip/mxs.h
index 9039a538a919..4f447e3f0f3a 100644
--- a/include/linux/irqchip/mxs.h
+++ b/include/linux/irqchip/mxs.h
@@ -1,9 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2013 Freescale Semiconductor, Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef __LINUX_IRQCHIP_MXS_H
diff --git a/include/linux/irqchip/versatile-fpga.h b/include/linux/irqchip/versatile-fpga.h
deleted file mode 100644
index 1fac9651d3ca..000000000000
--- a/include/linux/irqchip/versatile-fpga.h
+++ /dev/null
@@ -1,13 +0,0 @@
-#ifndef PLAT_FPGA_IRQ_H
-#define PLAT_FPGA_IRQ_H
-
-struct device_node;
-struct pt_regs;
-
-void fpga_handle_irq(struct pt_regs *regs);
-void fpga_irq_init(void __iomem *, const char *, int, int, u32,
- struct device_node *node);
-int fpga_irq_of_init(struct device_node *node,
- struct device_node *parent);
-
-#endif
diff --git a/include/linux/irqdesc.h b/include/linux/irqdesc.h
index 3e90a094798d..844a8e30e6de 100644
--- a/include/linux/irqdesc.h
+++ b/include/linux/irqdesc.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_IRQDESC_H
#define _LINUX_IRQDESC_H
@@ -21,17 +22,17 @@ struct pt_regs;
* @irq_common_data: per irq and chip data passed down to chip functions
* @kstat_irqs: irq stats per cpu
* @handle_irq: highlevel irq-events handler
- * @preflow_handler: handler called before the flow handler (currently used by sparc)
* @action: the irq action chain
- * @status: status information
+ * @status_use_accessors: status information
* @core_internal_state__do_not_mess_with_it: core internal status information
* @depth: disable-depth, for nested irq_disable() calls
* @wake_depth: enable depth, for multiple irq_set_irq_wake() callers
+ * @tot_count: stats field for non-percpu irqs
* @irq_count: stats field to detect stalled irqs
* @last_unhandled: aging timer for unhandled count
* @irqs_unhandled: stats field for spurious unhandled interrupts
* @threads_handled: stats field for deferred spurious detection of threaded handlers
- * @threads_handled_last: comparator field for deferred spurious detection of theraded handlers
+ * @threads_handled_last: comparator field for deferred spurious detection of threaded handlers
* @lock: locking for SMP
* @affinity_hint: hint to user space for preferred irq affinity
* @affinity_notify: context for notification of affinity changes
@@ -56,14 +57,12 @@ struct irq_desc {
struct irq_data irq_data;
unsigned int __percpu *kstat_irqs;
irq_flow_handler_t handle_irq;
-#ifdef CONFIG_IRQ_PREFLOW_FASTEOI
- irq_preflow_handler_t preflow_handler;
-#endif
struct irqaction *action; /* IRQ action list */
unsigned int status_use_accessors;
unsigned int core_internal_state__do_not_mess_with_it;
unsigned int depth; /* nested irq disables */
unsigned int wake_depth; /* nested wake enables */
+ unsigned int tot_count;
unsigned int irq_count; /* For detecting broken IRQs */
unsigned long last_unhandled; /* Aging timer for unhandled count */
unsigned int irqs_unhandled;
@@ -93,6 +92,7 @@ struct irq_desc {
#endif
#ifdef CONFIG_GENERIC_IRQ_DEBUGFS
struct dentry *debugfs_file;
+ const char *dev_name;
#endif
#ifdef CONFIG_SPARSE_IRQ
struct rcu_head rcu;
@@ -113,6 +113,12 @@ static inline void irq_unlock_sparse(void) { }
extern struct irq_desc irq_desc[NR_IRQS];
#endif
+static inline unsigned int irq_desc_kstat_cpu(struct irq_desc *desc,
+ unsigned int cpu)
+{
+ return desc->kstat_irqs ? *per_cpu_ptr(desc->kstat_irqs, cpu) : 0;
+}
+
static inline struct irq_desc *irq_data_to_desc(struct irq_data *data)
{
return container_of(data->common, struct irq_desc, irq_common_data);
@@ -143,11 +149,6 @@ static inline void *irq_desc_get_handler_data(struct irq_desc *desc)
return desc->irq_common_data.handler_data;
}
-static inline struct msi_desc *irq_desc_get_msi_desc(struct irq_desc *desc)
-{
- return desc->irq_common_data.msi_desc;
-}
-
/*
* Architectures call this to let the generic IRQ layer
* handle an interrupt.
@@ -157,34 +158,25 @@ static inline void generic_handle_irq_desc(struct irq_desc *desc)
desc->handle_irq(desc);
}
+int handle_irq_desc(struct irq_desc *desc);
int generic_handle_irq(unsigned int irq);
+int generic_handle_irq_safe(unsigned int irq);
-#ifdef CONFIG_HANDLE_DOMAIN_IRQ
+#ifdef CONFIG_IRQ_DOMAIN
/*
* Convert a HW interrupt number to a logical one using a IRQ domain,
* and handle the result interrupt number. Return -EINVAL if
- * conversion failed. Providing a NULL domain indicates that the
- * conversion has already been done.
+ * conversion failed.
*/
-int __handle_domain_irq(struct irq_domain *domain, unsigned int hwirq,
- bool lookup, struct pt_regs *regs);
-
-static inline int handle_domain_irq(struct irq_domain *domain,
- unsigned int hwirq, struct pt_regs *regs)
-{
- return __handle_domain_irq(domain, hwirq, true, regs);
-}
+int generic_handle_domain_irq(struct irq_domain *domain, unsigned int hwirq);
+int generic_handle_domain_irq_safe(struct irq_domain *domain, unsigned int hwirq);
+int generic_handle_domain_nmi(struct irq_domain *domain, unsigned int hwirq);
#endif
/* Test to see if a driver has successfully requested an irq */
static inline int irq_desc_has_action(struct irq_desc *desc)
{
- return desc->action != NULL;
-}
-
-static inline int irq_has_action(unsigned int irq)
-{
- return irq_desc_has_action(irq_to_desc(irq));
+ return desc && desc->action != NULL;
}
/**
@@ -218,50 +210,42 @@ static inline void irq_set_handler_locked(struct irq_data *data,
* Must be called with irq_desc locked and valid parameters.
*/
static inline void
-irq_set_chip_handler_name_locked(struct irq_data *data, struct irq_chip *chip,
+irq_set_chip_handler_name_locked(struct irq_data *data,
+ const struct irq_chip *chip,
irq_flow_handler_t handler, const char *name)
{
struct irq_desc *desc = irq_data_to_desc(data);
desc->handle_irq = handler;
desc->name = name;
- data->chip = chip;
+ data->chip = (struct irq_chip *)chip;
}
-static inline int irq_balancing_disabled(unsigned int irq)
-{
- struct irq_desc *desc;
+bool irq_check_status_bit(unsigned int irq, unsigned int bitmask);
- desc = irq_to_desc(irq);
- return desc->status_use_accessors & IRQ_NO_BALANCING_MASK;
+static inline bool irq_balancing_disabled(unsigned int irq)
+{
+ return irq_check_status_bit(irq, IRQ_NO_BALANCING_MASK);
}
-static inline int irq_is_percpu(unsigned int irq)
+static inline bool irq_is_percpu(unsigned int irq)
{
- struct irq_desc *desc;
-
- desc = irq_to_desc(irq);
- return desc->status_use_accessors & IRQ_PER_CPU;
+ return irq_check_status_bit(irq, IRQ_PER_CPU);
}
-static inline void
-irq_set_lockdep_class(unsigned int irq, struct lock_class_key *class)
+static inline bool irq_is_percpu_devid(unsigned int irq)
{
- struct irq_desc *desc = irq_to_desc(irq);
-
- if (desc)
- lockdep_set_class(&desc->lock, class);
+ return irq_check_status_bit(irq, IRQ_PER_CPU_DEVID);
}
-#ifdef CONFIG_IRQ_PREFLOW_FASTEOI
+void __irq_set_lockdep_class(unsigned int irq, struct lock_class_key *lock_class,
+ struct lock_class_key *request_class);
static inline void
-__irq_set_preflow_handler(unsigned int irq, irq_preflow_handler_t handler)
+irq_set_lockdep_class(unsigned int irq, struct lock_class_key *lock_class,
+ struct lock_class_key *request_class)
{
- struct irq_desc *desc;
-
- desc = irq_to_desc(irq);
- desc->preflow_handler = handler;
+ if (IS_ENABLED(CONFIG_LOCKDEP))
+ __irq_set_lockdep_class(irq, lock_class, request_class);
}
-#endif
#endif
diff --git a/include/linux/irqdomain.h b/include/linux/irqdomain.h
index cac77a5c5555..51c254b7fec2 100644
--- a/include/linux/irqdomain.h
+++ b/include/linux/irqdomain.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* irq_domain - IRQ translation domains
*
@@ -30,19 +31,22 @@
#define _LINUX_IRQDOMAIN_H
#include <linux/types.h>
+#include <linux/irqdomain_defs.h>
#include <linux/irqhandler.h>
#include <linux/of.h>
+#include <linux/mutex.h>
#include <linux/radix-tree.h>
struct device_node;
+struct fwnode_handle;
struct irq_domain;
-struct of_device_id;
struct irq_chip;
struct irq_data;
+struct irq_desc;
struct cpumask;
-
-/* Number of irqs reserved for a legacy isa controller */
-#define NUM_ISA_INTERRUPTS 16
+struct seq_file;
+struct irq_affinity_desc;
+struct msi_parent_ops;
#define IRQ_DOMAIN_IRQ_SPEC_PARAMS 16
@@ -62,22 +66,9 @@ struct irq_fwspec {
u32 param[IRQ_DOMAIN_IRQ_SPEC_PARAMS];
};
-/*
- * Should several domains have the same device node, but serve
- * different purposes (for example one domain is for PCI/MSI, and the
- * other for wired IRQs), they can be distinguished using a
- * bus-specific token. Most domains are expected to only carry
- * DOMAIN_BUS_ANY.
- */
-enum irq_domain_bus_token {
- DOMAIN_BUS_ANY = 0,
- DOMAIN_BUS_WIRED,
- DOMAIN_BUS_PCI_MSI,
- DOMAIN_BUS_PLATFORM_MSI,
- DOMAIN_BUS_NEXUS,
- DOMAIN_BUS_IPI,
- DOMAIN_BUS_FSL_MC_MSI,
-};
+/* Conversion function from of_phandle_args fields to fwspec */
+void of_phandle_args_to_fwspec(struct device_node *np, const u32 *args,
+ unsigned int count, struct irq_fwspec *fwspec);
/**
* struct irq_domain_ops - Methods for irq_domain objects
@@ -104,75 +95,86 @@ struct irq_domain_ops {
int (*xlate)(struct irq_domain *d, struct device_node *node,
const u32 *intspec, unsigned int intsize,
unsigned long *out_hwirq, unsigned int *out_type);
-
#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
/* extended V2 interfaces to support hierarchy irq_domains */
int (*alloc)(struct irq_domain *d, unsigned int virq,
unsigned int nr_irqs, void *arg);
void (*free)(struct irq_domain *d, unsigned int virq,
unsigned int nr_irqs);
- void (*activate)(struct irq_domain *d, struct irq_data *irq_data);
+ int (*activate)(struct irq_domain *d, struct irq_data *irqd, bool reserve);
void (*deactivate)(struct irq_domain *d, struct irq_data *irq_data);
int (*translate)(struct irq_domain *d, struct irq_fwspec *fwspec,
unsigned long *out_hwirq, unsigned int *out_type);
#endif
+#ifdef CONFIG_GENERIC_IRQ_DEBUGFS
+ void (*debug_show)(struct seq_file *m, struct irq_domain *d,
+ struct irq_data *irqd, int ind);
+#endif
};
-extern struct irq_domain_ops irq_generic_chip_ops;
+extern const struct irq_domain_ops irq_generic_chip_ops;
struct irq_domain_chip_generic;
/**
* struct irq_domain - Hardware interrupt number translation object
- * @link: Element in global irq_domain list.
- * @name: Name of interrupt domain
- * @ops: pointer to irq_domain methods
- * @host_data: private data pointer for use by owner. Not touched by irq_domain
- * core code.
- * @flags: host per irq_domain flags
- * @mapcount: The number of mapped interrupts
+ * @link: Element in global irq_domain list.
+ * @name: Name of interrupt domain
+ * @ops: Pointer to irq_domain methods
+ * @host_data: Private data pointer for use by owner. Not touched by irq_domain
+ * core code.
+ * @flags: Per irq_domain flags
+ * @mapcount: The number of mapped interrupts
+ * @mutex: Domain lock, hierarchical domains use root domain's lock
+ * @root: Pointer to root domain, or containing structure if non-hierarchical
*
- * Optional elements
- * @of_node: Pointer to device tree nodes associated with the irq_domain. Used
- * when decoding device tree interrupt specifiers.
- * @gc: Pointer to a list of generic chips. There is a helper function for
- * setting up one or more generic chips for interrupt controllers
- * drivers using the generic chip library which uses this pointer.
- * @parent: Pointer to parent irq_domain to support hierarchy irq_domains
- * @debugfs_file: dentry for the domain debugfs file
+ * Optional elements:
+ * @fwnode: Pointer to firmware node associated with the irq_domain. Pretty easy
+ * to swap it for the of_node via the irq_domain_get_of_node accessor
+ * @gc: Pointer to a list of generic chips. There is a helper function for
+ * setting up one or more generic chips for interrupt controllers
+ * drivers using the generic chip library which uses this pointer.
+ * @dev: Pointer to the device which instantiated the irqdomain
+ * With per device irq domains this is not necessarily the same
+ * as @pm_dev.
+ * @pm_dev: Pointer to a device that can be utilized for power management
+ * purposes related to the irq domain.
+ * @parent: Pointer to parent irq_domain to support hierarchy irq_domains
+ * @msi_parent_ops: Pointer to MSI parent domain methods for per device domain init
*
- * Revmap data, used internally by irq_domain
- * @revmap_direct_max_irq: The largest hwirq that can be set for controllers that
- * support direct mapping
- * @revmap_size: Size of the linear map table @linear_revmap[]
- * @revmap_tree: Radix map tree for hwirqs that don't fit in the linear map
- * @linear_revmap: Linear table of hwirq->virq reverse mappings
+ * Revmap data, used internally by the irq domain code:
+ * @revmap_size: Size of the linear map table @revmap[]
+ * @revmap_tree: Radix map tree for hwirqs that don't fit in the linear map
+ * @revmap: Linear table of irq_data pointers
*/
struct irq_domain {
- struct list_head link;
- const char *name;
- const struct irq_domain_ops *ops;
- void *host_data;
- unsigned int flags;
- unsigned int mapcount;
+ struct list_head link;
+ const char *name;
+ const struct irq_domain_ops *ops;
+ void *host_data;
+ unsigned int flags;
+ unsigned int mapcount;
+ struct mutex mutex;
+ struct irq_domain *root;
/* Optional data */
- struct fwnode_handle *fwnode;
- enum irq_domain_bus_token bus_token;
- struct irq_domain_chip_generic *gc;
+ struct fwnode_handle *fwnode;
+ enum irq_domain_bus_token bus_token;
+ struct irq_domain_chip_generic *gc;
+ struct device *dev;
+ struct device *pm_dev;
#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
- struct irq_domain *parent;
+ struct irq_domain *parent;
#endif
-#ifdef CONFIG_GENERIC_IRQ_DEBUGFS
- struct dentry *debugfs_file;
+#ifdef CONFIG_GENERIC_MSI_IRQ
+ const struct msi_parent_ops *msi_parent_ops;
#endif
/* reverse map data. The linear map gets appended to the irq_domain */
- irq_hw_number_t hwirq_max;
- unsigned int revmap_direct_max_irq;
- unsigned int revmap_size;
- struct radix_tree_root revmap_tree;
- unsigned int linear_revmap[];
+ irq_hw_number_t hwirq_max;
+ unsigned int revmap_size;
+ struct radix_tree_root revmap_tree;
+ struct irq_data __rcu *revmap[];
};
/* Irq domain flags */
@@ -181,7 +183,7 @@ enum {
IRQ_DOMAIN_FLAG_HIERARCHY = (1 << 0),
/* Irq domain name was allocated in __irq_domain_add() */
- IRQ_DOMAIN_NAME_ALLOCATED = (1 << 6),
+ IRQ_DOMAIN_NAME_ALLOCATED = (1 << 1),
/* Irq domain is an IPI domain with virq per cpu */
IRQ_DOMAIN_FLAG_IPI_PER_CPU = (1 << 2),
@@ -192,8 +194,19 @@ enum {
/* Irq domain implements MSIs */
IRQ_DOMAIN_FLAG_MSI = (1 << 4),
- /* Irq domain implements MSI remapping */
- IRQ_DOMAIN_FLAG_MSI_REMAP = (1 << 5),
+ /*
+ * Irq domain implements isolated MSI, see msi_device_has_isolated_msi()
+ */
+ IRQ_DOMAIN_FLAG_ISOLATED_MSI = (1 << 5),
+
+ /* Irq domain doesn't translate anything */
+ IRQ_DOMAIN_FLAG_NO_MAP = (1 << 6),
+
+ /* Irq domain is a MSI parent domain */
+ IRQ_DOMAIN_FLAG_MSI_PARENT = (1 << 8),
+
+ /* Irq domain is a MSI device domain */
+ IRQ_DOMAIN_FLAG_MSI_DEVICE = (1 << 9),
/*
* Flags starting from IRQ_DOMAIN_FLAG_NONCORE are reserved
@@ -208,9 +221,16 @@ static inline struct device_node *irq_domain_get_of_node(struct irq_domain *d)
return to_of_node(d->fwnode);
}
+static inline void irq_domain_set_pm_device(struct irq_domain *d,
+ struct device *dev)
+{
+ if (d)
+ d->pm_dev = dev;
+}
+
#ifdef CONFIG_IRQ_DOMAIN
struct fwnode_handle *__irq_domain_alloc_fwnode(unsigned int type, int id,
- const char *name, void *data);
+ const char *name, phys_addr_t *pa);
enum {
IRQCHIP_FWNODE_REAL,
@@ -231,43 +251,51 @@ struct fwnode_handle *irq_domain_alloc_named_id_fwnode(const char *name, int id)
NULL);
}
-static inline struct fwnode_handle *irq_domain_alloc_fwnode(void *data)
+static inline struct fwnode_handle *irq_domain_alloc_fwnode(phys_addr_t *pa)
{
- return __irq_domain_alloc_fwnode(IRQCHIP_FWNODE_REAL, 0, NULL, data);
+ return __irq_domain_alloc_fwnode(IRQCHIP_FWNODE_REAL, 0, NULL, pa);
}
void irq_domain_free_fwnode(struct fwnode_handle *fwnode);
-struct irq_domain *__irq_domain_add(struct fwnode_handle *fwnode, int size,
+struct irq_domain *__irq_domain_add(struct fwnode_handle *fwnode, unsigned int size,
irq_hw_number_t hwirq_max, int direct_max,
const struct irq_domain_ops *ops,
void *host_data);
-struct irq_domain *irq_domain_add_simple(struct device_node *of_node,
- unsigned int size,
- unsigned int first_irq,
- const struct irq_domain_ops *ops,
- void *host_data);
+struct irq_domain *irq_domain_create_simple(struct fwnode_handle *fwnode,
+ unsigned int size,
+ unsigned int first_irq,
+ const struct irq_domain_ops *ops,
+ void *host_data);
struct irq_domain *irq_domain_add_legacy(struct device_node *of_node,
unsigned int size,
unsigned int first_irq,
irq_hw_number_t first_hwirq,
const struct irq_domain_ops *ops,
void *host_data);
+struct irq_domain *irq_domain_create_legacy(struct fwnode_handle *fwnode,
+ unsigned int size,
+ unsigned int first_irq,
+ irq_hw_number_t first_hwirq,
+ const struct irq_domain_ops *ops,
+ void *host_data);
extern struct irq_domain *irq_find_matching_fwspec(struct irq_fwspec *fwspec,
enum irq_domain_bus_token bus_token);
-extern bool irq_domain_check_msi_remap(void);
extern void irq_set_default_host(struct irq_domain *host);
+extern struct irq_domain *irq_get_default_host(void);
extern int irq_domain_alloc_descs(int virq, unsigned int nr_irqs,
irq_hw_number_t hwirq, int node,
- const struct cpumask *affinity);
+ const struct irq_affinity_desc *affinity);
static inline struct fwnode_handle *of_node_to_fwnode(struct device_node *node)
{
return node ? &node->fwnode : NULL;
}
+extern const struct fwnode_operations irqchip_fwnode_ops;
+
static inline bool is_fwnode_irqchip(struct fwnode_handle *fwnode)
{
- return fwnode && fwnode->type == FWNODE_IRQCHIP;
+ return fwnode && fwnode->ops == &irqchip_fwnode_ops;
}
extern void irq_domain_update_bus_token(struct irq_domain *domain,
@@ -292,7 +320,22 @@ static inline struct irq_domain *irq_find_matching_host(struct device_node *node
static inline struct irq_domain *irq_find_host(struct device_node *node)
{
- return irq_find_matching_host(node, DOMAIN_BUS_ANY);
+ struct irq_domain *d;
+
+ d = irq_find_matching_host(node, DOMAIN_BUS_WIRED);
+ if (!d)
+ d = irq_find_matching_host(node, DOMAIN_BUS_ANY);
+
+ return d;
+}
+
+static inline struct irq_domain *irq_domain_add_simple(struct device_node *of_node,
+ unsigned int size,
+ unsigned int first_irq,
+ const struct irq_domain_ops *ops,
+ void *host_data)
+{
+ return irq_domain_create_simple(of_node_to_fwnode(of_node), size, first_irq, ops, host_data);
}
/**
@@ -309,6 +352,8 @@ static inline struct irq_domain *irq_domain_add_linear(struct device_node *of_no
{
return __irq_domain_add(of_node_to_fwnode(of_node), size, size, 0, ops, host_data);
}
+
+#ifdef CONFIG_IRQ_DOMAIN_NOMAP
static inline struct irq_domain *irq_domain_add_nomap(struct device_node *of_node,
unsigned int max_irq,
const struct irq_domain_ops *ops,
@@ -316,14 +361,10 @@ static inline struct irq_domain *irq_domain_add_nomap(struct device_node *of_nod
{
return __irq_domain_add(of_node_to_fwnode(of_node), 0, max_irq, max_irq, ops, host_data);
}
-static inline struct irq_domain *irq_domain_add_legacy_isa(
- struct device_node *of_node,
- const struct irq_domain_ops *ops,
- void *host_data)
-{
- return irq_domain_add_legacy(of_node, NUM_ISA_INTERRUPTS, 0, 0, ops,
- host_data);
-}
+
+extern unsigned int irq_create_direct_mapping(struct irq_domain *host);
+#endif
+
static inline struct irq_domain *irq_domain_add_tree(struct device_node *of_node,
const struct irq_domain_ops *ops,
void *host_data)
@@ -353,40 +394,49 @@ extern int irq_domain_associate(struct irq_domain *domain, unsigned int irq,
extern void irq_domain_associate_many(struct irq_domain *domain,
unsigned int irq_base,
irq_hw_number_t hwirq_base, int count);
-extern void irq_domain_disassociate(struct irq_domain *domain,
- unsigned int irq);
-extern unsigned int irq_create_mapping(struct irq_domain *host,
- irq_hw_number_t hwirq);
+extern unsigned int irq_create_mapping_affinity(struct irq_domain *host,
+ irq_hw_number_t hwirq,
+ const struct irq_affinity_desc *affinity);
extern unsigned int irq_create_fwspec_mapping(struct irq_fwspec *fwspec);
extern void irq_dispose_mapping(unsigned int virq);
+static inline unsigned int irq_create_mapping(struct irq_domain *host,
+ irq_hw_number_t hwirq)
+{
+ return irq_create_mapping_affinity(host, hwirq, NULL);
+}
+
+extern struct irq_desc *__irq_resolve_mapping(struct irq_domain *domain,
+ irq_hw_number_t hwirq,
+ unsigned int *irq);
+
+static inline struct irq_desc *irq_resolve_mapping(struct irq_domain *domain,
+ irq_hw_number_t hwirq)
+{
+ return __irq_resolve_mapping(domain, hwirq, NULL);
+}
+
/**
- * irq_linear_revmap() - Find a linux irq from a hw irq number.
+ * irq_find_mapping() - Find a linux irq from a hw irq number.
* @domain: domain owning this hardware interrupt
* @hwirq: hardware irq number in that domain space
- *
- * This is a fast path alternative to irq_find_mapping() that can be
- * called directly by irq controller code to save a handful of
- * instructions. It is always safe to call, but won't find irqs mapped
- * using the radix tree.
*/
-static inline unsigned int irq_linear_revmap(struct irq_domain *domain,
- irq_hw_number_t hwirq)
+static inline unsigned int irq_find_mapping(struct irq_domain *domain,
+ irq_hw_number_t hwirq)
{
- return hwirq < domain->revmap_size ? domain->linear_revmap[hwirq] : 0;
+ unsigned int irq;
+
+ if (__irq_resolve_mapping(domain, hwirq, &irq))
+ return irq;
+
+ return 0;
}
-extern unsigned int irq_find_mapping(struct irq_domain *host,
- irq_hw_number_t hwirq);
-extern unsigned int irq_create_direct_mapping(struct irq_domain *host);
-extern int irq_create_strict_mappings(struct irq_domain *domain,
- unsigned int irq_base,
- irq_hw_number_t hwirq_base, int count);
-static inline int irq_create_identity_mapping(struct irq_domain *host,
- irq_hw_number_t hwirq)
+static inline unsigned int irq_linear_revmap(struct irq_domain *domain,
+ irq_hw_number_t hwirq)
{
- return irq_create_strict_mappings(host, hwirq, hwirq, 1);
+ return irq_find_mapping(domain, hwirq);
}
extern const struct irq_domain_ops irq_domain_simple_ops;
@@ -402,6 +452,16 @@ int irq_domain_xlate_onetwocell(struct irq_domain *d, struct device_node *ctrlr,
const u32 *intspec, unsigned int intsize,
irq_hw_number_t *out_hwirq, unsigned int *out_type);
+int irq_domain_translate_twocell(struct irq_domain *d,
+ struct irq_fwspec *fwspec,
+ unsigned long *out_hwirq,
+ unsigned int *out_type);
+
+int irq_domain_translate_onecell(struct irq_domain *d,
+ struct irq_fwspec *fwspec,
+ unsigned long *out_hwirq,
+ unsigned int *out_type);
+
/* IPI functions */
int irq_reserve_ipi(struct irq_domain *domain, const struct cpumask *dest);
int irq_destroy_ipi(unsigned int irq, const struct cpumask *dest);
@@ -410,9 +470,11 @@ int irq_destroy_ipi(unsigned int irq, const struct cpumask *dest);
extern struct irq_data *irq_domain_get_irq_data(struct irq_domain *domain,
unsigned int virq);
extern void irq_domain_set_info(struct irq_domain *domain, unsigned int virq,
- irq_hw_number_t hwirq, struct irq_chip *chip,
+ irq_hw_number_t hwirq,
+ const struct irq_chip *chip,
void *chip_data, irq_flow_handler_t handler,
void *handler_data, const char *handler_name);
+extern void irq_domain_reset_irq_data(struct irq_data *irq_data);
#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
extern struct irq_domain *irq_domain_create_hierarchy(struct irq_domain *parent,
unsigned int flags, unsigned int size,
@@ -433,9 +495,10 @@ static inline struct irq_domain *irq_domain_add_hierarchy(struct irq_domain *par
extern int __irq_domain_alloc_irqs(struct irq_domain *domain, int irq_base,
unsigned int nr_irqs, int node, void *arg,
- bool realloc, const struct cpumask *affinity);
+ bool realloc,
+ const struct irq_affinity_desc *affinity);
extern void irq_domain_free_irqs(unsigned int virq, unsigned int nr_irqs);
-extern void irq_domain_activate_irq(struct irq_data *irq_data);
+extern int irq_domain_activate_irq(struct irq_data *irq_data, bool early);
extern void irq_domain_deactivate_irq(struct irq_data *irq_data);
static inline int irq_domain_alloc_irqs(struct irq_domain *domain,
@@ -451,15 +514,17 @@ extern int irq_domain_alloc_irqs_hierarchy(struct irq_domain *domain,
extern int irq_domain_set_hwirq_and_chip(struct irq_domain *domain,
unsigned int virq,
irq_hw_number_t hwirq,
- struct irq_chip *chip,
+ const struct irq_chip *chip,
void *chip_data);
-extern void irq_domain_reset_irq_data(struct irq_data *irq_data);
extern void irq_domain_free_irqs_common(struct irq_domain *domain,
unsigned int virq,
unsigned int nr_irqs);
extern void irq_domain_free_irqs_top(struct irq_domain *domain,
unsigned int virq, unsigned int nr_irqs);
+extern int irq_domain_push_irq(struct irq_domain *domain, int virq, void *arg);
+extern int irq_domain_pop_irq(struct irq_domain *domain, int virq);
+
extern int irq_domain_alloc_irqs_parent(struct irq_domain *domain,
unsigned int irq_base,
unsigned int nr_irqs, void *arg);
@@ -468,6 +533,9 @@ extern void irq_domain_free_irqs_parent(struct irq_domain *domain,
unsigned int irq_base,
unsigned int nr_irqs);
+extern int irq_domain_disconnect_hierarchy(struct irq_domain *domain,
+ unsigned int virq);
+
static inline bool irq_domain_is_hierarchy(struct irq_domain *domain)
{
return domain->flags & IRQ_DOMAIN_FLAG_HIERARCHY;
@@ -494,16 +562,17 @@ static inline bool irq_domain_is_msi(struct irq_domain *domain)
return domain->flags & IRQ_DOMAIN_FLAG_MSI;
}
-static inline bool irq_domain_is_msi_remap(struct irq_domain *domain)
+static inline bool irq_domain_is_msi_parent(struct irq_domain *domain)
{
- return domain->flags & IRQ_DOMAIN_FLAG_MSI_REMAP;
+ return domain->flags & IRQ_DOMAIN_FLAG_MSI_PARENT;
}
-extern bool irq_domain_hierarchical_is_msi_remap(struct irq_domain *domain);
+static inline bool irq_domain_is_msi_device(struct irq_domain *domain)
+{
+ return domain->flags & IRQ_DOMAIN_FLAG_MSI_DEVICE;
+}
#else /* CONFIG_IRQ_DOMAIN_HIERARCHY */
-static inline void irq_domain_activate_irq(struct irq_data *data) { }
-static inline void irq_domain_deactivate_irq(struct irq_data *data) { }
static inline int irq_domain_alloc_irqs(struct irq_domain *domain,
unsigned int nr_irqs, int node, void *arg)
{
@@ -538,31 +607,25 @@ static inline bool irq_domain_is_msi(struct irq_domain *domain)
return false;
}
-static inline bool irq_domain_is_msi_remap(struct irq_domain *domain)
+static inline bool irq_domain_is_msi_parent(struct irq_domain *domain)
{
return false;
}
-static inline bool
-irq_domain_hierarchical_is_msi_remap(struct irq_domain *domain)
+static inline bool irq_domain_is_msi_device(struct irq_domain *domain)
{
return false;
}
+
#endif /* CONFIG_IRQ_DOMAIN_HIERARCHY */
#else /* CONFIG_IRQ_DOMAIN */
static inline void irq_dispose_mapping(unsigned int virq) { }
-static inline void irq_domain_activate_irq(struct irq_data *data) { }
-static inline void irq_domain_deactivate_irq(struct irq_data *data) { }
static inline struct irq_domain *irq_find_matching_fwnode(
struct fwnode_handle *fwnode, enum irq_domain_bus_token bus_token)
{
return NULL;
}
-static inline bool irq_domain_check_msi_remap(void)
-{
- return false;
-}
#endif /* !CONFIG_IRQ_DOMAIN */
#endif /* _LINUX_IRQDOMAIN_H */
diff --git a/include/linux/irqdomain_defs.h b/include/linux/irqdomain_defs.h
new file mode 100644
index 000000000000..c29921fd8cd1
--- /dev/null
+++ b/include/linux/irqdomain_defs.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_IRQDOMAIN_DEFS_H
+#define _LINUX_IRQDOMAIN_DEFS_H
+
+/*
+ * Should several domains have the same device node, but serve
+ * different purposes (for example one domain is for PCI/MSI, and the
+ * other for wired IRQs), they can be distinguished using a
+ * bus-specific token. Most domains are expected to only carry
+ * DOMAIN_BUS_ANY.
+ */
+enum irq_domain_bus_token {
+ DOMAIN_BUS_ANY = 0,
+ DOMAIN_BUS_WIRED,
+ DOMAIN_BUS_GENERIC_MSI,
+ DOMAIN_BUS_PCI_MSI,
+ DOMAIN_BUS_PLATFORM_MSI,
+ DOMAIN_BUS_NEXUS,
+ DOMAIN_BUS_IPI,
+ DOMAIN_BUS_FSL_MC_MSI,
+ DOMAIN_BUS_TI_SCI_INTA_MSI,
+ DOMAIN_BUS_WAKEUP,
+ DOMAIN_BUS_VMD_MSI,
+ DOMAIN_BUS_PCI_DEVICE_MSI,
+ DOMAIN_BUS_PCI_DEVICE_MSIX,
+ DOMAIN_BUS_DMAR,
+ DOMAIN_BUS_AMDVI,
+ DOMAIN_BUS_PCI_DEVICE_IMS,
+};
+
+#endif /* _LINUX_IRQDOMAIN_DEFS_H */
diff --git a/include/linux/irqflags.h b/include/linux/irqflags.h
index 5dd1272d1ab2..5ec0fa71399e 100644
--- a/include/linux/irqflags.h
+++ b/include/linux/irqflags.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* include/linux/irqflags.h
*
@@ -13,35 +14,137 @@
#include <linux/typecheck.h>
#include <asm/irqflags.h>
+#include <asm/percpu.h>
+
+/* Currently lockdep_softirqs_on/off is used only by lockdep */
+#ifdef CONFIG_PROVE_LOCKING
+ extern void lockdep_softirqs_on(unsigned long ip);
+ extern void lockdep_softirqs_off(unsigned long ip);
+ extern void lockdep_hardirqs_on_prepare(void);
+ extern void lockdep_hardirqs_on(unsigned long ip);
+ extern void lockdep_hardirqs_off(unsigned long ip);
+#else
+ static inline void lockdep_softirqs_on(unsigned long ip) { }
+ static inline void lockdep_softirqs_off(unsigned long ip) { }
+ static inline void lockdep_hardirqs_on_prepare(void) { }
+ static inline void lockdep_hardirqs_on(unsigned long ip) { }
+ static inline void lockdep_hardirqs_off(unsigned long ip) { }
+#endif
#ifdef CONFIG_TRACE_IRQFLAGS
- extern void trace_softirqs_on(unsigned long ip);
- extern void trace_softirqs_off(unsigned long ip);
- extern void trace_hardirqs_on(void);
- extern void trace_hardirqs_off(void);
-# define trace_hardirq_context(p) ((p)->hardirq_context)
-# define trace_softirq_context(p) ((p)->softirq_context)
-# define trace_hardirqs_enabled(p) ((p)->hardirqs_enabled)
-# define trace_softirqs_enabled(p) ((p)->softirqs_enabled)
-# define trace_hardirq_enter() do { current->hardirq_context++; } while (0)
-# define trace_hardirq_exit() do { current->hardirq_context--; } while (0)
-# define lockdep_softirq_enter() do { current->softirq_context++; } while (0)
-# define lockdep_softirq_exit() do { current->softirq_context--; } while (0)
-# define INIT_TRACE_IRQFLAGS .softirqs_enabled = 1,
+
+/* Per-task IRQ trace events information. */
+struct irqtrace_events {
+ unsigned int irq_events;
+ unsigned long hardirq_enable_ip;
+ unsigned long hardirq_disable_ip;
+ unsigned int hardirq_enable_event;
+ unsigned int hardirq_disable_event;
+ unsigned long softirq_disable_ip;
+ unsigned long softirq_enable_ip;
+ unsigned int softirq_disable_event;
+ unsigned int softirq_enable_event;
+};
+
+DECLARE_PER_CPU(int, hardirqs_enabled);
+DECLARE_PER_CPU(int, hardirq_context);
+
+extern void trace_hardirqs_on_prepare(void);
+extern void trace_hardirqs_off_finish(void);
+extern void trace_hardirqs_on(void);
+extern void trace_hardirqs_off(void);
+
+# define lockdep_hardirq_context() (raw_cpu_read(hardirq_context))
+# define lockdep_softirq_context(p) ((p)->softirq_context)
+# define lockdep_hardirqs_enabled() (this_cpu_read(hardirqs_enabled))
+# define lockdep_softirqs_enabled(p) ((p)->softirqs_enabled)
+# define lockdep_hardirq_enter() \
+do { \
+ if (__this_cpu_inc_return(hardirq_context) == 1)\
+ current->hardirq_threaded = 0; \
+} while (0)
+# define lockdep_hardirq_threaded() \
+do { \
+ current->hardirq_threaded = 1; \
+} while (0)
+# define lockdep_hardirq_exit() \
+do { \
+ __this_cpu_dec(hardirq_context); \
+} while (0)
+
+# define lockdep_hrtimer_enter(__hrtimer) \
+({ \
+ bool __expires_hardirq = true; \
+ \
+ if (!__hrtimer->is_hard) { \
+ current->irq_config = 1; \
+ __expires_hardirq = false; \
+ } \
+ __expires_hardirq; \
+})
+
+# define lockdep_hrtimer_exit(__expires_hardirq) \
+ do { \
+ if (!__expires_hardirq) \
+ current->irq_config = 0; \
+ } while (0)
+
+# define lockdep_posixtimer_enter() \
+ do { \
+ current->irq_config = 1; \
+ } while (0)
+
+# define lockdep_posixtimer_exit() \
+ do { \
+ current->irq_config = 0; \
+ } while (0)
+
+# define lockdep_irq_work_enter(_flags) \
+ do { \
+ if (!((_flags) & IRQ_WORK_HARD_IRQ)) \
+ current->irq_config = 1; \
+ } while (0)
+# define lockdep_irq_work_exit(_flags) \
+ do { \
+ if (!((_flags) & IRQ_WORK_HARD_IRQ)) \
+ current->irq_config = 0; \
+ } while (0)
+
+#else
+# define trace_hardirqs_on_prepare() do { } while (0)
+# define trace_hardirqs_off_finish() do { } while (0)
+# define trace_hardirqs_on() do { } while (0)
+# define trace_hardirqs_off() do { } while (0)
+# define lockdep_hardirq_context() 0
+# define lockdep_softirq_context(p) 0
+# define lockdep_hardirqs_enabled() 0
+# define lockdep_softirqs_enabled(p) 0
+# define lockdep_hardirq_enter() do { } while (0)
+# define lockdep_hardirq_threaded() do { } while (0)
+# define lockdep_hardirq_exit() do { } while (0)
+# define lockdep_softirq_enter() do { } while (0)
+# define lockdep_softirq_exit() do { } while (0)
+# define lockdep_hrtimer_enter(__hrtimer) false
+# define lockdep_hrtimer_exit(__context) do { } while (0)
+# define lockdep_posixtimer_enter() do { } while (0)
+# define lockdep_posixtimer_exit() do { } while (0)
+# define lockdep_irq_work_enter(__work) do { } while (0)
+# define lockdep_irq_work_exit(__work) do { } while (0)
+#endif
+
+#if defined(CONFIG_TRACE_IRQFLAGS) && !defined(CONFIG_PREEMPT_RT)
+# define lockdep_softirq_enter() \
+do { \
+ current->softirq_context++; \
+} while (0)
+# define lockdep_softirq_exit() \
+do { \
+ current->softirq_context--; \
+} while (0)
+
#else
-# define trace_hardirqs_on() do { } while (0)
-# define trace_hardirqs_off() do { } while (0)
-# define trace_softirqs_on(ip) do { } while (0)
-# define trace_softirqs_off(ip) do { } while (0)
-# define trace_hardirq_context(p) 0
-# define trace_softirq_context(p) 0
-# define trace_hardirqs_enabled(p) 0
-# define trace_softirqs_enabled(p) 0
-# define trace_hardirq_enter() do { } while (0)
-# define trace_hardirq_exit() do { } while (0)
-# define lockdep_softirq_enter() do { } while (0)
-# define lockdep_softirq_exit() do { } while (0)
-# define INIT_TRACE_IRQFLAGS
+# define lockdep_softirq_enter() do { } while (0)
+# define lockdep_softirq_exit() do { } while (0)
#endif
#if defined(CONFIG_IRQSOFF_TRACER) || \
@@ -53,6 +156,17 @@
# define start_critical_timings() do { } while (0)
#endif
+#ifdef CONFIG_DEBUG_IRQFLAGS
+extern void warn_bogus_irq_restore(void);
+#define raw_check_bogus_irq_restore() \
+ do { \
+ if (unlikely(!arch_irqs_disabled())) \
+ warn_bogus_irq_restore(); \
+ } while (0)
+#else
+#define raw_check_bogus_irq_restore() do { } while (0)
+#endif
+
/*
* Wrap the arch provided IRQ routines to provide appropriate checks.
*/
@@ -66,6 +180,7 @@
#define raw_local_irq_restore(flags) \
do { \
typecheck(unsigned long, flags); \
+ raw_check_bogus_irq_restore(); \
arch_local_irq_restore(flags); \
} while (0)
#define raw_local_save_flags(flags) \
@@ -86,26 +201,33 @@
* if !TRACE_IRQFLAGS.
*/
#ifdef CONFIG_TRACE_IRQFLAGS
-#define local_irq_enable() \
- do { trace_hardirqs_on(); raw_local_irq_enable(); } while (0)
-#define local_irq_disable() \
- do { raw_local_irq_disable(); trace_hardirqs_off(); } while (0)
+
+#define local_irq_enable() \
+ do { \
+ trace_hardirqs_on(); \
+ raw_local_irq_enable(); \
+ } while (0)
+
+#define local_irq_disable() \
+ do { \
+ bool was_disabled = raw_irqs_disabled();\
+ raw_local_irq_disable(); \
+ if (!was_disabled) \
+ trace_hardirqs_off(); \
+ } while (0)
+
#define local_irq_save(flags) \
do { \
raw_local_irq_save(flags); \
- trace_hardirqs_off(); \
+ if (!raw_irqs_disabled_flags(flags)) \
+ trace_hardirqs_off(); \
} while (0)
-
#define local_irq_restore(flags) \
do { \
- if (raw_irqs_disabled_flags(flags)) { \
- raw_local_irq_restore(flags); \
- trace_hardirqs_off(); \
- } else { \
+ if (!raw_irqs_disabled_flags(flags)) \
trace_hardirqs_on(); \
- raw_local_irq_restore(flags); \
- } \
+ raw_local_irq_restore(flags); \
} while (0)
#define safe_halt() \
@@ -119,10 +241,7 @@
#define local_irq_enable() do { raw_local_irq_enable(); } while (0)
#define local_irq_disable() do { raw_local_irq_disable(); } while (0)
-#define local_irq_save(flags) \
- do { \
- raw_local_irq_save(flags); \
- } while (0)
+#define local_irq_save(flags) do { raw_local_irq_save(flags); } while (0)
#define local_irq_restore(flags) do { raw_local_irq_restore(flags); } while (0)
#define safe_halt() do { raw_safe_halt(); } while (0)
diff --git a/include/linux/irqhandler.h b/include/linux/irqhandler.h
index 661bed0ed1f3..c30f454a9518 100644
--- a/include/linux/irqhandler.h
+++ b/include/linux/irqhandler.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_IRQHANDLER_H
#define _LINUX_IRQHANDLER_H
@@ -9,6 +10,5 @@
struct irq_desc;
struct irq_data;
typedef void (*irq_flow_handler_t)(struct irq_desc *desc);
-typedef void (*irq_preflow_handler_t)(struct irq_data *data);
#endif
diff --git a/include/linux/irqnr.h b/include/linux/irqnr.h
index 9669bf9d4f48..3496baa0b07f 100644
--- a/include/linux/irqnr.h
+++ b/include/linux/irqnr.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_IRQNR_H
#define _LINUX_IRQNR_H
diff --git a/include/linux/irqreturn.h b/include/linux/irqreturn.h
index eb1bdcf95f2e..d426c7ad92bf 100644
--- a/include/linux/irqreturn.h
+++ b/include/linux/irqreturn.h
@@ -1,11 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_IRQRETURN_H
#define _LINUX_IRQRETURN_H
/**
- * enum irqreturn
- * @IRQ_NONE interrupt was not from this device or was not handled
- * @IRQ_HANDLED interrupt was handled by this device
- * @IRQ_WAKE_THREAD handler requests to wake the handler thread
+ * enum irqreturn - irqreturn type values
+ * @IRQ_NONE: interrupt was not from this device or was not handled
+ * @IRQ_HANDLED: interrupt was handled by this device
+ * @IRQ_WAKE_THREAD: handler requests to wake the handler thread
*/
enum irqreturn {
IRQ_NONE = (0 << 0),
diff --git a/include/linux/isa-dma.h b/include/linux/isa-dma.h
new file mode 100644
index 000000000000..61504a8c1b9e
--- /dev/null
+++ b/include/linux/isa-dma.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __LINUX_ISA_DMA_H
+#define __LINUX_ISA_DMA_H
+
+#include <asm/dma.h>
+
+#if defined(CONFIG_PCI) && defined(CONFIG_X86_32)
+extern int isa_dma_bridge_buggy;
+#else
+#define isa_dma_bridge_buggy (0)
+#endif
+
+#endif /* __LINUX_ISA_DMA_H */
diff --git a/include/linux/isa.h b/include/linux/isa.h
index f2d0258414cf..4fbbf5e36e08 100644
--- a/include/linux/isa.h
+++ b/include/linux/isa.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* ISA bus.
*/
@@ -12,7 +13,7 @@
struct isa_driver {
int (*match)(struct device *, unsigned int);
int (*probe)(struct device *, unsigned int);
- int (*remove)(struct device *, unsigned int);
+ void (*remove)(struct device *, unsigned int);
void (*shutdown)(struct device *, unsigned int);
int (*suspend)(struct device *, unsigned int, pm_message_t);
int (*resume)(struct device *, unsigned int);
@@ -37,6 +38,32 @@ static inline void isa_unregister_driver(struct isa_driver *d)
}
#endif
+#define module_isa_driver_init(__isa_driver, __num_isa_dev) \
+static int __init __isa_driver##_init(void) \
+{ \
+ return isa_register_driver(&(__isa_driver), __num_isa_dev); \
+} \
+module_init(__isa_driver##_init)
+
+#define module_isa_driver_with_irq_init(__isa_driver, __num_isa_dev, __num_irq) \
+static int __init __isa_driver##_init(void) \
+{ \
+ if (__num_irq != __num_isa_dev) { \
+ pr_err("%s: Number of irq (%u) does not match number of base (%u)\n", \
+ __isa_driver.driver.name, __num_irq, __num_isa_dev); \
+ return -EINVAL; \
+ } \
+ return isa_register_driver(&(__isa_driver), __num_isa_dev); \
+} \
+module_init(__isa_driver##_init)
+
+#define module_isa_driver_exit(__isa_driver) \
+static void __exit __isa_driver##_exit(void) \
+{ \
+ isa_unregister_driver(&(__isa_driver)); \
+} \
+module_exit(__isa_driver##_exit)
+
/**
* module_isa_driver() - Helper macro for registering a ISA driver
* @__isa_driver: isa_driver struct
@@ -47,16 +74,22 @@ static inline void isa_unregister_driver(struct isa_driver *d)
* use this macro once, and calling it replaces module_init and module_exit.
*/
#define module_isa_driver(__isa_driver, __num_isa_dev) \
-static int __init __isa_driver##_init(void) \
-{ \
- return isa_register_driver(&(__isa_driver), __num_isa_dev); \
-} \
-module_init(__isa_driver##_init); \
-static void __exit __isa_driver##_exit(void) \
-{ \
- isa_unregister_driver(&(__isa_driver)); \
-} \
-module_exit(__isa_driver##_exit);
+module_isa_driver_init(__isa_driver, __num_isa_dev); \
+module_isa_driver_exit(__isa_driver)
+
+/**
+ * module_isa_driver_with_irq() - Helper macro for registering an ISA driver with irq
+ * @__isa_driver: isa_driver struct
+ * @__num_isa_dev: number of devices to register
+ * @__num_irq: number of IRQ to register
+ *
+ * Helper macro for ISA drivers with irq that do not do anything special in
+ * module init/exit. Each module may only use this macro once, and calling it
+ * replaces module_init and module_exit.
+ */
+#define module_isa_driver_with_irq(__isa_driver, __num_isa_dev, __num_irq) \
+module_isa_driver_with_irq_init(__isa_driver, __num_isa_dev, __num_irq); \
+module_isa_driver_exit(__isa_driver)
/**
* max_num_isa_dev() - Maximum possible number registered of an ISA device
diff --git a/include/linux/isapnp.h b/include/linux/isapnp.h
index 3c77bf9b1efd..dba18c95844b 100644
--- a/include/linux/isapnp.h
+++ b/include/linux/isapnp.h
@@ -1,22 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* ISA Plug & Play support
* Copyright (c) by Jaroslav Kysela <perex@suse.cz>
- *
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
*/
#ifndef LINUX_ISAPNP_H
@@ -90,9 +75,6 @@ static inline int isapnp_proc_done(void) { return 0; }
#endif
/* compat */
-struct pnp_card *pnp_find_card(unsigned short vendor,
- unsigned short device,
- struct pnp_card *from);
struct pnp_dev *pnp_find_dev(struct pnp_card *card,
unsigned short vendor,
unsigned short function,
@@ -107,9 +89,6 @@ static inline int isapnp_cfg_end(void) { return -ENODEV; }
static inline unsigned char isapnp_read_byte(unsigned char idx) { return 0xff; }
static inline void isapnp_write_byte(unsigned char idx, unsigned char val) { ; }
-static inline struct pnp_card *pnp_find_card(unsigned short vendor,
- unsigned short device,
- struct pnp_card *from) { return NULL; }
static inline struct pnp_dev *pnp_find_dev(struct pnp_card *card,
unsigned short vendor,
unsigned short function,
diff --git a/include/linux/iscsi_boot_sysfs.h b/include/linux/iscsi_boot_sysfs.h
index 10923d730486..5f244d3f1472 100644
--- a/include/linux/iscsi_boot_sysfs.h
+++ b/include/linux/iscsi_boot_sysfs.h
@@ -1,17 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Export the iSCSI boot info to userland via sysfs.
*
* Copyright (C) 2010 Red Hat, Inc. All rights reserved.
* Copyright (C) 2010 Mike Christie
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License v2.0 as published by
- * the Free Software Foundation
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#ifndef _ISCSI_BOOT_SYSFS_
#define _ISCSI_BOOT_SYSFS_
diff --git a/include/linux/iscsi_ibft.h b/include/linux/iscsi_ibft.h
index 605cc5c333d9..790e7fcfc1a6 100644
--- a/include/linux/iscsi_ibft.h
+++ b/include/linux/iscsi_ibft.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright 2007 Red Hat, Inc.
* by Peter Jones <pjones@redhat.com>
@@ -7,40 +8,27 @@
* by Konrad Rzeszutek <ketuzsezr@darnok.org>
*
* This code exposes the iSCSI Boot Format Table to userland via sysfs.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License v2.0 as published by
- * the Free Software Foundation
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#ifndef ISCSI_IBFT_H
#define ISCSI_IBFT_H
-#include <linux/acpi.h>
+#include <linux/types.h>
/*
- * Logical location of iSCSI Boot Format Table.
- * If the value is NULL there is no iBFT on the machine.
+ * Physical location of iSCSI Boot Format Table.
+ * If the value is 0 there is no iBFT on the machine.
*/
-extern struct acpi_table_ibft *ibft_addr;
+extern phys_addr_t ibft_phys_addr;
/*
* Routine used to find and reserve the iSCSI Boot Format Table. The
- * mapped address is set in the ibft_addr variable.
+ * physical address is set in the ibft_phys_addr variable.
*/
#ifdef CONFIG_ISCSI_IBFT_FIND
-unsigned long find_ibft_region(unsigned long *sizep);
+void reserve_ibft_region(void);
#else
-static inline unsigned long find_ibft_region(unsigned long *sizep)
-{
- *sizep = 0;
- return 0;
-}
+static inline void reserve_ibft_region(void) {}
#endif
#endif /* ISCSI_IBFT_H */
diff --git a/include/linux/isdn.h b/include/linux/isdn.h
deleted file mode 100644
index df97c8444f5d..000000000000
--- a/include/linux/isdn.h
+++ /dev/null
@@ -1,473 +0,0 @@
-/* $Id: isdn.h,v 1.125.2.3 2004/02/10 01:07:14 keil Exp $
- *
- * Main header for the Linux ISDN subsystem (linklevel).
- *
- * Copyright 1994,95,96 by Fritz Elfert (fritz@isdn4linux.de)
- * Copyright 1995,96 by Thinking Objects Software GmbH Wuerzburg
- * Copyright 1995,96 by Michael Hipp (Michael.Hipp@student.uni-tuebingen.de)
- *
- * This software may be used and distributed according to the terms
- * of the GNU General Public License, incorporated herein by reference.
- *
- */
-#ifndef __ISDN_H__
-#define __ISDN_H__
-
-
-#include <linux/errno.h>
-#include <linux/fs.h>
-#include <linux/major.h>
-#include <asm/io.h>
-#include <linux/kernel.h>
-#include <linux/signal.h>
-#include <linux/slab.h>
-#include <linux/timer.h>
-#include <linux/wait.h>
-#include <linux/tty.h>
-#include <linux/tty_flip.h>
-#include <linux/serial_reg.h>
-#include <linux/fcntl.h>
-#include <linux/types.h>
-#include <linux/interrupt.h>
-#include <linux/ip.h>
-#include <linux/in.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/skbuff.h>
-#include <linux/tcp.h>
-#include <linux/mutex.h>
-#include <uapi/linux/isdn.h>
-
-#define ISDN_TTY_MAJOR 43
-#define ISDN_TTYAUX_MAJOR 44
-#define ISDN_MAJOR 45
-
-/* The minor-devicenumbers for Channel 0 and 1 are used as arguments for
- * physical Channel-Mapping, so they MUST NOT be changed without changing
- * the correspondent code in isdn.c
- */
-
-#define ISDN_MINOR_B 0
-#define ISDN_MINOR_BMAX (ISDN_MAX_CHANNELS-1)
-#define ISDN_MINOR_CTRL 64
-#define ISDN_MINOR_CTRLMAX (64 + (ISDN_MAX_CHANNELS-1))
-#define ISDN_MINOR_PPP 128
-#define ISDN_MINOR_PPPMAX (128 + (ISDN_MAX_CHANNELS-1))
-#define ISDN_MINOR_STATUS 255
-
-#ifdef CONFIG_ISDN_PPP
-
-#ifdef CONFIG_ISDN_PPP_VJ
-# include <net/slhc_vj.h>
-#endif
-
-#include <linux/ppp_defs.h>
-#include <linux/ppp-ioctl.h>
-
-#include <linux/isdn_ppp.h>
-#endif
-
-#ifdef CONFIG_ISDN_X25
-# include <linux/concap.h>
-#endif
-
-#include <linux/isdnif.h>
-
-#define ISDN_DRVIOCTL_MASK 0x7f /* Mask for Device-ioctl */
-
-/* Until now unused */
-#define ISDN_SERVICE_VOICE 1
-#define ISDN_SERVICE_AB 1<<1
-#define ISDN_SERVICE_X21 1<<2
-#define ISDN_SERVICE_G4 1<<3
-#define ISDN_SERVICE_BTX 1<<4
-#define ISDN_SERVICE_DFUE 1<<5
-#define ISDN_SERVICE_X25 1<<6
-#define ISDN_SERVICE_TTX 1<<7
-#define ISDN_SERVICE_MIXED 1<<8
-#define ISDN_SERVICE_FW 1<<9
-#define ISDN_SERVICE_GTEL 1<<10
-#define ISDN_SERVICE_BTXN 1<<11
-#define ISDN_SERVICE_BTEL 1<<12
-
-/* Macros checking plain usage */
-#define USG_NONE(x) ((x & ISDN_USAGE_MASK)==ISDN_USAGE_NONE)
-#define USG_RAW(x) ((x & ISDN_USAGE_MASK)==ISDN_USAGE_RAW)
-#define USG_MODEM(x) ((x & ISDN_USAGE_MASK)==ISDN_USAGE_MODEM)
-#define USG_VOICE(x) ((x & ISDN_USAGE_MASK)==ISDN_USAGE_VOICE)
-#define USG_NET(x) ((x & ISDN_USAGE_MASK)==ISDN_USAGE_NET)
-#define USG_FAX(x) ((x & ISDN_USAGE_MASK)==ISDN_USAGE_FAX)
-#define USG_OUTGOING(x) ((x & ISDN_USAGE_OUTGOING)==ISDN_USAGE_OUTGOING)
-#define USG_MODEMORVOICE(x) (((x & ISDN_USAGE_MASK)==ISDN_USAGE_MODEM) || \
- ((x & ISDN_USAGE_MASK)==ISDN_USAGE_VOICE) )
-
-/* Timer-delays and scheduling-flags */
-#define ISDN_TIMER_RES 4 /* Main Timer-Resolution */
-#define ISDN_TIMER_02SEC (HZ/ISDN_TIMER_RES/5) /* Slow-Timer1 .2 sec */
-#define ISDN_TIMER_1SEC (HZ/ISDN_TIMER_RES) /* Slow-Timer2 1 sec */
-#define ISDN_TIMER_RINGING 5 /* tty RINGs = ISDN_TIMER_1SEC * this factor */
-#define ISDN_TIMER_KEEPINT 10 /* Cisco-Keepalive = ISDN_TIMER_1SEC * this factor */
-#define ISDN_TIMER_MODEMREAD 1
-#define ISDN_TIMER_MODEMPLUS 2
-#define ISDN_TIMER_MODEMRING 4
-#define ISDN_TIMER_MODEMXMIT 8
-#define ISDN_TIMER_NETDIAL 16
-#define ISDN_TIMER_NETHANGUP 32
-#define ISDN_TIMER_CARRIER 256 /* Wait for Carrier */
-#define ISDN_TIMER_FAST (ISDN_TIMER_MODEMREAD | ISDN_TIMER_MODEMPLUS | \
- ISDN_TIMER_MODEMXMIT)
-#define ISDN_TIMER_SLOW (ISDN_TIMER_MODEMRING | ISDN_TIMER_NETHANGUP | \
- ISDN_TIMER_NETDIAL | ISDN_TIMER_CARRIER)
-
-/* Timeout-Values for isdn_net_dial() */
-#define ISDN_TIMER_DTIMEOUT10 (10*HZ/(ISDN_TIMER_02SEC*(ISDN_TIMER_RES+1)))
-#define ISDN_TIMER_DTIMEOUT15 (15*HZ/(ISDN_TIMER_02SEC*(ISDN_TIMER_RES+1)))
-#define ISDN_TIMER_DTIMEOUT60 (60*HZ/(ISDN_TIMER_02SEC*(ISDN_TIMER_RES+1)))
-
-/* GLOBAL_FLAGS */
-#define ISDN_GLOBAL_STOPPED 1
-
-/*=================== Start of ip-over-ISDN stuff =========================*/
-
-/* Feature- and status-flags for a net-interface */
-#define ISDN_NET_CONNECTED 0x01 /* Bound to ISDN-Channel */
-#define ISDN_NET_SECURE 0x02 /* Accept calls from phonelist only */
-#define ISDN_NET_CALLBACK 0x04 /* activate callback */
-#define ISDN_NET_CBHUP 0x08 /* hangup before callback */
-#define ISDN_NET_CBOUT 0x10 /* remote machine does callback */
-
-#define ISDN_NET_MAGIC 0x49344C02 /* for paranoia-checking */
-
-/* Phone-list-element */
-typedef struct {
- void *next;
- char num[ISDN_MSNLEN];
-} isdn_net_phone;
-
-/*
- Principles when extending structures for generic encapsulation protocol
- ("concap") support:
- - Stuff which is hardware specific (here i4l-specific) goes in
- the netdev -> local structure (here: isdn_net_local)
- - Stuff which is encapsulation protocol specific goes in the structure
- which holds the linux device structure (here: isdn_net_device)
-*/
-
-/* Local interface-data */
-typedef struct isdn_net_local_s {
- ulong magic;
- struct net_device_stats stats; /* Ethernet Statistics */
- int isdn_device; /* Index to isdn-device */
- int isdn_channel; /* Index to isdn-channel */
- int ppp_slot; /* PPPD device slot number */
- int pre_device; /* Preselected isdn-device */
- int pre_channel; /* Preselected isdn-channel */
- int exclusive; /* If non-zero idx to reserved chan.*/
- int flags; /* Connection-flags */
- int dialretry; /* Counter for Dialout-retries */
- int dialmax; /* Max. Number of Dial-retries */
- int cbdelay; /* Delay before Callback starts */
- int dtimer; /* Timeout-counter for dialing */
- char msn[ISDN_MSNLEN]; /* MSNs/EAZs for this interface */
- u_char cbhup; /* Flag: Reject Call before Callback*/
- u_char dialstate; /* State for dialing */
- u_char p_encap; /* Packet encapsulation */
- /* 0 = Ethernet over ISDN */
- /* 1 = RAW-IP */
- /* 2 = IP with type field */
- u_char l2_proto; /* Layer-2-protocol */
- /* See ISDN_PROTO_L2..-constants in */
- /* isdnif.h */
- /* 0 = X75/LAPB with I-Frames */
- /* 1 = X75/LAPB with UI-Frames */
- /* 2 = X75/LAPB with BUI-Frames */
- /* 3 = HDLC */
- u_char l3_proto; /* Layer-3-protocol */
- /* See ISDN_PROTO_L3..-constants in */
- /* isdnif.h */
- /* 0 = Transparent */
- int huptimer; /* Timeout-counter for auto-hangup */
- int charge; /* Counter for charging units */
- ulong chargetime; /* Timer for Charging info */
- int hupflags; /* Flags for charge-unit-hangup: */
- /* bit0: chargeint is invalid */
- /* bit1: Getting charge-interval */
- /* bit2: Do charge-unit-hangup */
- /* bit3: Do hangup even on incoming */
- int outgoing; /* Flag: outgoing call */
- int onhtime; /* Time to keep link up */
- int chargeint; /* Interval between charge-infos */
- int onum; /* Flag: at least 1 outgoing number */
- int cps; /* current speed of this interface */
- int transcount; /* byte-counter for cps-calculation */
- int sqfull; /* Flag: netdev-queue overloaded */
- ulong sqfull_stamp; /* Start-Time of overload */
- ulong slavedelay; /* Dynamic bundling delaytime */
- int triggercps; /* BogoCPS needed for trigger slave */
- isdn_net_phone *phone[2]; /* List of remote-phonenumbers */
- /* phone[0] = Incoming Numbers */
- /* phone[1] = Outgoing Numbers */
- isdn_net_phone *dial; /* Pointer to dialed number */
- struct net_device *master; /* Ptr to Master device for slaves */
- struct net_device *slave; /* Ptr to Slave device for masters */
- struct isdn_net_local_s *next; /* Ptr to next link in bundle */
- struct isdn_net_local_s *last; /* Ptr to last link in bundle */
- struct isdn_net_dev_s *netdev; /* Ptr to netdev */
- struct sk_buff_head super_tx_queue; /* List of supervisory frames to */
- /* be transmitted asap */
- atomic_t frame_cnt; /* number of frames currently */
- /* queued in HL driver */
- /* Ptr to orig. hard_header_cache */
- spinlock_t xmit_lock; /* used to protect the xmit path of */
- /* a particular channel (including */
- /* the frame_cnt */
-
- int pppbind; /* ippp device for bindings */
- int dialtimeout; /* How long shall we try on dialing? (jiffies) */
- int dialwait; /* How long shall we wait after failed attempt? (jiffies) */
- ulong dialstarted; /* jiffies of first dialing-attempt */
- ulong dialwait_timer; /* jiffies of earliest next dialing-attempt */
- int huptimeout; /* How long will the connection be up? (seconds) */
-#ifdef CONFIG_ISDN_X25
- struct concap_device_ops *dops; /* callbacks used by encapsulator */
-#endif
- /* use an own struct for that in later versions */
- ulong cisco_myseq; /* Local keepalive seq. for Cisco */
- ulong cisco_mineseen; /* returned keepalive seq. from remote */
- ulong cisco_yourseq; /* Remote keepalive seq. for Cisco */
- int cisco_keepalive_period; /* keepalive period */
- ulong cisco_last_slarp_in; /* jiffie of last keepalive packet we received */
- char cisco_line_state; /* state of line according to keepalive packets */
- char cisco_debserint; /* debugging flag of cisco hdlc with slarp */
- struct timer_list cisco_timer;
- struct work_struct tqueue;
-} isdn_net_local;
-
-/* the interface itself */
-typedef struct isdn_net_dev_s {
- isdn_net_local *local;
- isdn_net_local *queue; /* circular list of all bundled
- channels, which are currently
- online */
- spinlock_t queue_lock; /* lock to protect queue */
- void *next; /* Pointer to next isdn-interface */
- struct net_device *dev; /* interface to upper levels */
-#ifdef CONFIG_ISDN_PPP
- ippp_bundle * pb; /* pointer to the common bundle structure
- * with the per-bundle data */
-#endif
-#ifdef CONFIG_ISDN_X25
- struct concap_proto *cprot; /* connection oriented encapsulation protocol */
-#endif
-
-} isdn_net_dev;
-
-/*===================== End of ip-over-ISDN stuff ===========================*/
-
-/*======================= Start of ISDN-tty stuff ===========================*/
-
-#define ISDN_ASYNC_MAGIC 0x49344C01 /* for paranoia-checking */
-#define ISDN_SERIAL_XMIT_SIZE 1024 /* Default bufsize for write */
-#define ISDN_SERIAL_XMIT_MAX 4000 /* Maximum bufsize for write */
-
-#ifdef CONFIG_ISDN_AUDIO
-/* For using sk_buffs with audio we need some private variables
- * within each sk_buff. For this purpose, we declare a struct here,
- * and put it always at the private skb->cb data array. A few macros help
- * accessing the variables.
- */
-typedef struct _isdn_audio_data {
- unsigned short dle_count;
- unsigned char lock;
-} isdn_audio_data_t;
-
-#define ISDN_AUDIO_SKB_DLECOUNT(skb) (((isdn_audio_data_t *)&skb->cb[0])->dle_count)
-#define ISDN_AUDIO_SKB_LOCK(skb) (((isdn_audio_data_t *)&skb->cb[0])->lock)
-#endif
-
-/* Private data of AT-command-interpreter */
-typedef struct atemu {
- u_char profile[ISDN_MODEM_NUMREG]; /* Modem-Regs. Profile 0 */
- u_char mdmreg[ISDN_MODEM_NUMREG]; /* Modem-Registers */
- char pmsn[ISDN_MSNLEN]; /* EAZ/MSNs Profile 0 */
- char msn[ISDN_MSNLEN]; /* EAZ/MSN */
- char plmsn[ISDN_LMSNLEN]; /* Listening MSNs Profile 0 */
- char lmsn[ISDN_LMSNLEN]; /* Listening MSNs */
- char cpn[ISDN_MSNLEN]; /* CalledPartyNumber on incoming call */
- char connmsg[ISDN_CMSGLEN]; /* CONNECT-Msg from HL-Driver */
-#ifdef CONFIG_ISDN_AUDIO
- u_char vpar[10]; /* Voice-parameters */
- int lastDLE; /* Flag for voice-coding: DLE seen */
-#endif
- int mdmcmdl; /* Length of Modem-Commandbuffer */
- int pluscount; /* Counter for +++ sequence */
- u_long lastplus; /* Timestamp of last + */
- int carrierwait; /* Seconds of carrier waiting */
- char mdmcmd[255]; /* Modem-Commandbuffer */
- unsigned int charge; /* Charge units of current connection */
-} atemu;
-
-/* Private data (similar to async_struct in <linux/serial.h>) */
-typedef struct modem_info {
- int magic;
- struct tty_port port;
- int x_char; /* xon/xoff character */
- int mcr; /* Modem control register */
- int msr; /* Modem status register */
- int lsr; /* Line status register */
- int line;
- int online; /* 1 = B-Channel is up, drop data */
- /* 2 = B-Channel is up, deliver d.*/
- int dialing; /* Dial in progress or ATA */
- int closing;
- int rcvsched; /* Receive needs schedule */
- int isdn_driver; /* Index to isdn-driver */
- int isdn_channel; /* Index to isdn-channel */
- int drv_index; /* Index to dev->usage */
- int ncarrier; /* Flag: schedule NO CARRIER */
- unsigned char last_cause[8]; /* Last cause message */
- unsigned char last_num[ISDN_MSNLEN];
- /* Last phone-number */
- unsigned char last_l2; /* Last layer-2 protocol */
- unsigned char last_si; /* Last service */
- unsigned char last_lhup; /* Last hangup local? */
- unsigned char last_dir; /* Last direction (in or out) */
- struct timer_list nc_timer; /* Timer for delayed NO CARRIER */
- int send_outstanding;/* # of outstanding send-requests */
- int xmit_size; /* max. # of chars in xmit_buf */
- int xmit_count; /* # of chars in xmit_buf */
- struct sk_buff_head xmit_queue; /* transmit queue */
- atomic_t xmit_lock; /* Semaphore for isdn_tty_write */
-#ifdef CONFIG_ISDN_AUDIO
- int vonline; /* Voice-channel status */
- /* Bit 0 = recording */
- /* Bit 1 = playback */
- /* Bit 2 = playback, DLE-ETX seen */
- struct sk_buff_head dtmf_queue; /* queue for dtmf results */
- void *adpcms; /* state for adpcm decompression */
- void *adpcmr; /* state for adpcm compression */
- void *dtmf_state; /* state for dtmf decoder */
- void *silence_state; /* state for silence detection */
-#endif
-#ifdef CONFIG_ISDN_TTY_FAX
- struct T30_s *fax; /* T30 Fax Group 3 data/interface */
- int faxonline; /* Fax-channel status */
-#endif
- atemu emu; /* AT-emulator data */
- spinlock_t readlock;
-} modem_info;
-
-#define ISDN_MODEM_WINSIZE 8
-
-/* Description of one ISDN-tty */
-typedef struct _isdn_modem {
- int refcount; /* Number of opens */
- struct tty_driver *tty_modem; /* tty-device */
- struct tty_struct *modem_table[ISDN_MAX_CHANNELS]; /* ?? copied from Orig */
- struct ktermios *modem_termios[ISDN_MAX_CHANNELS];
- struct ktermios *modem_termios_locked[ISDN_MAX_CHANNELS];
- modem_info info[ISDN_MAX_CHANNELS]; /* Private data */
-} isdn_modem_t;
-
-/*======================= End of ISDN-tty stuff ============================*/
-
-/*======================== Start of V.110 stuff ============================*/
-#define V110_BUFSIZE 1024
-
-typedef struct {
- int nbytes; /* 1 Matrixbyte -> nbytes in stream */
- int nbits; /* Number of used bits in streambyte */
- unsigned char key; /* Bitmask in stream eg. 11 (nbits=2) */
- int decodelen; /* Amount of data in decodebuf */
- int SyncInit; /* Number of sync frames to send */
- unsigned char *OnlineFrame; /* Precalculated V110 idle frame */
- unsigned char *OfflineFrame; /* Precalculated V110 sync Frame */
- int framelen; /* Length of frames */
- int skbuser; /* Number of unacked userdata skbs */
- int skbidle; /* Number of unacked idle/sync skbs */
- int introducer; /* Local vars for decoder */
- int dbit;
- unsigned char b;
- int skbres; /* space to reserve in outgoing skb */
- int maxsize; /* maxbufsize of lowlevel driver */
- unsigned char *encodebuf; /* temporary buffer for encoding */
- unsigned char decodebuf[V110_BUFSIZE]; /* incomplete V110 matrices */
-} isdn_v110_stream;
-
-/*========================= End of V.110 stuff =============================*/
-
-/*======================= Start of general stuff ===========================*/
-
-typedef struct {
- char *next;
- char *private;
-} infostruct;
-
-#define DRV_FLAG_RUNNING 1
-#define DRV_FLAG_REJBUS 2
-#define DRV_FLAG_LOADED 4
-
-/* Description of hardware-level-driver */
-typedef struct _isdn_driver {
- ulong online; /* Channel-Online flags */
- ulong flags; /* Misc driver Flags */
- int locks; /* Number of locks for this driver */
- int channels; /* Number of channels */
- wait_queue_head_t st_waitq; /* Wait-Queue for status-read's */
- int maxbufsize; /* Maximum Buffersize supported */
- unsigned long pktcount; /* Until now: unused */
- int stavail; /* Chars avail on Status-device */
- isdn_if *interface; /* Interface to driver */
- int *rcverr; /* Error-counters for B-Ch.-receive */
- int *rcvcount; /* Byte-counters for B-Ch.-receive */
-#ifdef CONFIG_ISDN_AUDIO
- unsigned long DLEflag; /* Flags: Insert DLE at next read */
-#endif
- struct sk_buff_head *rpqueue; /* Pointers to start of Rcv-Queue */
- wait_queue_head_t *rcv_waitq; /* Wait-Queues for B-Channel-Reads */
- wait_queue_head_t *snd_waitq; /* Wait-Queue for B-Channel-Send's */
- char msn2eaz[10][ISDN_MSNLEN]; /* Mapping-Table MSN->EAZ */
-} isdn_driver_t;
-
-/* Main driver-data */
-typedef struct isdn_devt {
- struct module *owner;
- spinlock_t lock;
- unsigned short flags; /* Bitmapped Flags: */
- int drivers; /* Current number of drivers */
- int channels; /* Current number of channels */
- int net_verbose; /* Verbose-Flag */
- int modempoll; /* Flag: tty-read active */
- spinlock_t timerlock;
- int tflags; /* Timer-Flags: */
- /* see ISDN_TIMER_..defines */
- int global_flags;
- infostruct *infochain; /* List of open info-devs. */
- wait_queue_head_t info_waitq; /* Wait-Queue for isdninfo */
- struct timer_list timer; /* Misc.-function Timer */
- int chanmap[ISDN_MAX_CHANNELS]; /* Map minor->device-channel */
- int drvmap[ISDN_MAX_CHANNELS]; /* Map minor->driver-index */
- int usage[ISDN_MAX_CHANNELS]; /* Used by tty/ip/voice */
- char num[ISDN_MAX_CHANNELS][ISDN_MSNLEN];
- /* Remote number of active ch.*/
- int m_idx[ISDN_MAX_CHANNELS]; /* Index for mdm.... */
- isdn_driver_t *drv[ISDN_MAX_DRIVERS]; /* Array of drivers */
- isdn_net_dev *netdev; /* Linked list of net-if's */
- char drvid[ISDN_MAX_DRIVERS][20];/* Driver-ID */
- struct task_struct *profd; /* For iprofd */
- isdn_modem_t mdm; /* tty-driver-data */
- isdn_net_dev *rx_netdev[ISDN_MAX_CHANNELS]; /* rx netdev-pointers */
- isdn_net_dev *st_netdev[ISDN_MAX_CHANNELS]; /* stat netdev-pointers */
- ulong ibytes[ISDN_MAX_CHANNELS]; /* Statistics incoming bytes */
- ulong obytes[ISDN_MAX_CHANNELS]; /* Statistics outgoing bytes */
- int v110emu[ISDN_MAX_CHANNELS]; /* V.110 emulator-mode 0=none */
- atomic_t v110use[ISDN_MAX_CHANNELS]; /* Usage-Semaphore for stream */
- isdn_v110_stream *v110[ISDN_MAX_CHANNELS]; /* V.110 private data */
- struct mutex mtx; /* serialize list access*/
- unsigned long global_features;
-} isdn_dev;
-
-extern isdn_dev *dev;
-
-
-#endif /* __ISDN_H__ */
diff --git a/include/linux/isdn/capilli.h b/include/linux/isdn/capilli.h
index 11b57c485854..12be09b6883b 100644
--- a/include/linux/isdn/capilli.h
+++ b/include/linux/isdn/capilli.h
@@ -50,7 +50,7 @@ struct capi_ctr {
u16 (*send_message)(struct capi_ctr *, struct sk_buff *skb);
char *(*procinfo)(struct capi_ctr *);
- const struct file_operations *proc_fops;
+ int (*proc_show)(struct seq_file *, void *);
/* filled in before calling ready callback */
u8 manu[CAPI_MANUFACTURER_LEN]; /* CAPI_GET_MANUFACTURER */
@@ -69,7 +69,6 @@ struct capi_ctr {
unsigned short state; /* controller state */
int blocked; /* output blocked */
int traceflag; /* capi trace */
- wait_queue_head_t state_wait_queue;
struct proc_dir_entry *procent;
char procfn[128];
@@ -80,8 +79,6 @@ int detach_capi_ctr(struct capi_ctr *);
void capi_ctr_ready(struct capi_ctr * card);
void capi_ctr_down(struct capi_ctr * card);
-void capi_ctr_suspend_output(struct capi_ctr * card);
-void capi_ctr_resume_output(struct capi_ctr * card);
void capi_ctr_handle_message(struct capi_ctr * card, u16 appl, struct sk_buff *skb);
// ---------------------------------------------------------------------------
@@ -91,23 +88,8 @@ struct capi_driver {
char name[32]; /* driver name */
char revision[32];
- int (*add_card)(struct capi_driver *driver, capicardparams *data);
-
/* management information for kcapi */
struct list_head list;
};
-void register_capi_driver(struct capi_driver *driver);
-void unregister_capi_driver(struct capi_driver *driver);
-
-// ---------------------------------------------------------------------------
-// library functions for use by hardware controller drivers
-
-void capilib_new_ncci(struct list_head *head, u16 applid, u32 ncci, u32 winsize);
-void capilib_free_ncci(struct list_head *head, u16 applid, u32 ncci);
-void capilib_release_appl(struct list_head *head, u16 applid);
-void capilib_release(struct list_head *head);
-void capilib_data_b3_conf(struct list_head *head, u16 applid, u32 ncci, u16 msgid);
-u16 capilib_data_b3_req(struct list_head *head, u16 applid, u32 ncci, u16 msgid);
-
#endif /* __CAPILLI_H__ */
diff --git a/include/linux/isdn/capiutil.h b/include/linux/isdn/capiutil.h
index 44bd6046e6e2..953fd500dff7 100644
--- a/include/linux/isdn/capiutil.h
+++ b/include/linux/isdn/capiutil.h
@@ -57,460 +57,4 @@ static inline void capimsg_setu32(void *m, int off, __u32 val)
#define CAPIMSG_SETCONTROL(m, contr) capimsg_setu32(m, 8, contr)
#define CAPIMSG_SETDATALEN(m, len) capimsg_setu16(m, 16, len)
-/*----- basic-type definitions -----*/
-
-typedef __u8 *_cstruct;
-
-typedef enum {
- CAPI_COMPOSE,
- CAPI_DEFAULT
-} _cmstruct;
-
-/*
- The _cmsg structure contains all possible CAPI 2.0 parameter.
- All parameters are stored here first. The function CAPI_CMSG_2_MESSAGE
- assembles the parameter and builds CAPI2.0 conform messages.
- CAPI_MESSAGE_2_CMSG disassembles CAPI 2.0 messages and stores the
- parameter in the _cmsg structure
- */
-
-typedef struct {
- /* Header */
- __u16 ApplId;
- __u8 Command;
- __u8 Subcommand;
- __u16 Messagenumber;
-
- /* Parameter */
- union {
- __u32 adrController;
- __u32 adrPLCI;
- __u32 adrNCCI;
- } adr;
-
- _cmstruct AdditionalInfo;
- _cstruct B1configuration;
- __u16 B1protocol;
- _cstruct B2configuration;
- __u16 B2protocol;
- _cstruct B3configuration;
- __u16 B3protocol;
- _cstruct BC;
- _cstruct BChannelinformation;
- _cmstruct BProtocol;
- _cstruct CalledPartyNumber;
- _cstruct CalledPartySubaddress;
- _cstruct CallingPartyNumber;
- _cstruct CallingPartySubaddress;
- __u32 CIPmask;
- __u32 CIPmask2;
- __u16 CIPValue;
- __u32 Class;
- _cstruct ConnectedNumber;
- _cstruct ConnectedSubaddress;
- __u32 Data;
- __u16 DataHandle;
- __u16 DataLength;
- _cstruct FacilityConfirmationParameter;
- _cstruct Facilitydataarray;
- _cstruct FacilityIndicationParameter;
- _cstruct FacilityRequestParameter;
- __u16 FacilitySelector;
- __u16 Flags;
- __u32 Function;
- _cstruct HLC;
- __u16 Info;
- _cstruct InfoElement;
- __u32 InfoMask;
- __u16 InfoNumber;
- _cstruct Keypadfacility;
- _cstruct LLC;
- _cstruct ManuData;
- __u32 ManuID;
- _cstruct NCPI;
- __u16 Reason;
- __u16 Reason_B3;
- __u16 Reject;
- _cstruct Useruserdata;
-
- /* intern */
- unsigned l, p;
- unsigned char *par;
- __u8 *m;
-
- /* buffer to construct message */
- __u8 buf[180];
-
-} _cmsg;
-
-/*
- * capi_cmsg2message() assembles the parameter from _cmsg to a CAPI 2.0
- * conform message
- */
-unsigned capi_cmsg2message(_cmsg * cmsg, __u8 * msg);
-
-/*
- * capi_message2cmsg disassembles a CAPI message an writes the parameter
- * into _cmsg for easy access
- */
-unsigned capi_message2cmsg(_cmsg * cmsg, __u8 * msg);
-
-/*
- * capi_cmsg_header() fills the _cmsg structure with default values, so only
- * parameter with non default values must be changed before sending the
- * message.
- */
-unsigned capi_cmsg_header(_cmsg * cmsg, __u16 _ApplId,
- __u8 _Command, __u8 _Subcommand,
- __u16 _Messagenumber, __u32 _Controller);
-
-/*-----------------------------------------------------------------------*/
-
-/*
- * Debugging / Tracing functions
- */
-
-char *capi_cmd2str(__u8 cmd, __u8 subcmd);
-
-typedef struct {
- u_char *buf;
- u_char *p;
- size_t size;
- size_t pos;
-} _cdebbuf;
-
-#define CDEBUG_SIZE 1024
-#define CDEBUG_GSIZE 4096
-
-void cdebbuf_free(_cdebbuf *cdb);
-int cdebug_init(void);
-void cdebug_exit(void);
-
-_cdebbuf *capi_cmsg2str(_cmsg *cmsg);
-_cdebbuf *capi_message2str(__u8 *msg);
-
-/*-----------------------------------------------------------------------*/
-
-static inline void capi_cmsg_answer(_cmsg * cmsg)
-{
- cmsg->Subcommand |= 0x01;
-}
-
-/*-----------------------------------------------------------------------*/
-
-static inline void capi_fill_CONNECT_B3_REQ(_cmsg * cmsg, __u16 ApplId, __u16 Messagenumber,
- __u32 adr,
- _cstruct NCPI)
-{
- capi_cmsg_header(cmsg, ApplId, 0x82, 0x80, Messagenumber, adr);
- cmsg->NCPI = NCPI;
-}
-
-static inline void capi_fill_FACILITY_REQ(_cmsg * cmsg, __u16 ApplId, __u16 Messagenumber,
- __u32 adr,
- __u16 FacilitySelector,
- _cstruct FacilityRequestParameter)
-{
- capi_cmsg_header(cmsg, ApplId, 0x80, 0x80, Messagenumber, adr);
- cmsg->FacilitySelector = FacilitySelector;
- cmsg->FacilityRequestParameter = FacilityRequestParameter;
-}
-
-static inline void capi_fill_INFO_REQ(_cmsg * cmsg, __u16 ApplId, __u16 Messagenumber,
- __u32 adr,
- _cstruct CalledPartyNumber,
- _cstruct BChannelinformation,
- _cstruct Keypadfacility,
- _cstruct Useruserdata,
- _cstruct Facilitydataarray)
-{
- capi_cmsg_header(cmsg, ApplId, 0x08, 0x80, Messagenumber, adr);
- cmsg->CalledPartyNumber = CalledPartyNumber;
- cmsg->BChannelinformation = BChannelinformation;
- cmsg->Keypadfacility = Keypadfacility;
- cmsg->Useruserdata = Useruserdata;
- cmsg->Facilitydataarray = Facilitydataarray;
-}
-
-static inline void capi_fill_LISTEN_REQ(_cmsg * cmsg, __u16 ApplId, __u16 Messagenumber,
- __u32 adr,
- __u32 InfoMask,
- __u32 CIPmask,
- __u32 CIPmask2,
- _cstruct CallingPartyNumber,
- _cstruct CallingPartySubaddress)
-{
- capi_cmsg_header(cmsg, ApplId, 0x05, 0x80, Messagenumber, adr);
- cmsg->InfoMask = InfoMask;
- cmsg->CIPmask = CIPmask;
- cmsg->CIPmask2 = CIPmask2;
- cmsg->CallingPartyNumber = CallingPartyNumber;
- cmsg->CallingPartySubaddress = CallingPartySubaddress;
-}
-
-static inline void capi_fill_ALERT_REQ(_cmsg * cmsg, __u16 ApplId, __u16 Messagenumber,
- __u32 adr,
- _cstruct BChannelinformation,
- _cstruct Keypadfacility,
- _cstruct Useruserdata,
- _cstruct Facilitydataarray)
-{
- capi_cmsg_header(cmsg, ApplId, 0x01, 0x80, Messagenumber, adr);
- cmsg->BChannelinformation = BChannelinformation;
- cmsg->Keypadfacility = Keypadfacility;
- cmsg->Useruserdata = Useruserdata;
- cmsg->Facilitydataarray = Facilitydataarray;
-}
-
-static inline void capi_fill_CONNECT_REQ(_cmsg * cmsg, __u16 ApplId, __u16 Messagenumber,
- __u32 adr,
- __u16 CIPValue,
- _cstruct CalledPartyNumber,
- _cstruct CallingPartyNumber,
- _cstruct CalledPartySubaddress,
- _cstruct CallingPartySubaddress,
- __u16 B1protocol,
- __u16 B2protocol,
- __u16 B3protocol,
- _cstruct B1configuration,
- _cstruct B2configuration,
- _cstruct B3configuration,
- _cstruct BC,
- _cstruct LLC,
- _cstruct HLC,
- _cstruct BChannelinformation,
- _cstruct Keypadfacility,
- _cstruct Useruserdata,
- _cstruct Facilitydataarray)
-{
-
- capi_cmsg_header(cmsg, ApplId, 0x02, 0x80, Messagenumber, adr);
- cmsg->CIPValue = CIPValue;
- cmsg->CalledPartyNumber = CalledPartyNumber;
- cmsg->CallingPartyNumber = CallingPartyNumber;
- cmsg->CalledPartySubaddress = CalledPartySubaddress;
- cmsg->CallingPartySubaddress = CallingPartySubaddress;
- cmsg->B1protocol = B1protocol;
- cmsg->B2protocol = B2protocol;
- cmsg->B3protocol = B3protocol;
- cmsg->B1configuration = B1configuration;
- cmsg->B2configuration = B2configuration;
- cmsg->B3configuration = B3configuration;
- cmsg->BC = BC;
- cmsg->LLC = LLC;
- cmsg->HLC = HLC;
- cmsg->BChannelinformation = BChannelinformation;
- cmsg->Keypadfacility = Keypadfacility;
- cmsg->Useruserdata = Useruserdata;
- cmsg->Facilitydataarray = Facilitydataarray;
-}
-
-static inline void capi_fill_DATA_B3_REQ(_cmsg * cmsg, __u16 ApplId, __u16 Messagenumber,
- __u32 adr,
- __u32 Data,
- __u16 DataLength,
- __u16 DataHandle,
- __u16 Flags)
-{
-
- capi_cmsg_header(cmsg, ApplId, 0x86, 0x80, Messagenumber, adr);
- cmsg->Data = Data;
- cmsg->DataLength = DataLength;
- cmsg->DataHandle = DataHandle;
- cmsg->Flags = Flags;
-}
-
-static inline void capi_fill_DISCONNECT_REQ(_cmsg * cmsg, __u16 ApplId, __u16 Messagenumber,
- __u32 adr,
- _cstruct BChannelinformation,
- _cstruct Keypadfacility,
- _cstruct Useruserdata,
- _cstruct Facilitydataarray)
-{
-
- capi_cmsg_header(cmsg, ApplId, 0x04, 0x80, Messagenumber, adr);
- cmsg->BChannelinformation = BChannelinformation;
- cmsg->Keypadfacility = Keypadfacility;
- cmsg->Useruserdata = Useruserdata;
- cmsg->Facilitydataarray = Facilitydataarray;
-}
-
-static inline void capi_fill_DISCONNECT_B3_REQ(_cmsg * cmsg, __u16 ApplId, __u16 Messagenumber,
- __u32 adr,
- _cstruct NCPI)
-{
-
- capi_cmsg_header(cmsg, ApplId, 0x84, 0x80, Messagenumber, adr);
- cmsg->NCPI = NCPI;
-}
-
-static inline void capi_fill_MANUFACTURER_REQ(_cmsg * cmsg, __u16 ApplId, __u16 Messagenumber,
- __u32 adr,
- __u32 ManuID,
- __u32 Class,
- __u32 Function,
- _cstruct ManuData)
-{
-
- capi_cmsg_header(cmsg, ApplId, 0xff, 0x80, Messagenumber, adr);
- cmsg->ManuID = ManuID;
- cmsg->Class = Class;
- cmsg->Function = Function;
- cmsg->ManuData = ManuData;
-}
-
-static inline void capi_fill_RESET_B3_REQ(_cmsg * cmsg, __u16 ApplId, __u16 Messagenumber,
- __u32 adr,
- _cstruct NCPI)
-{
-
- capi_cmsg_header(cmsg, ApplId, 0x87, 0x80, Messagenumber, adr);
- cmsg->NCPI = NCPI;
-}
-
-static inline void capi_fill_SELECT_B_PROTOCOL_REQ(_cmsg * cmsg, __u16 ApplId, __u16 Messagenumber,
- __u32 adr,
- __u16 B1protocol,
- __u16 B2protocol,
- __u16 B3protocol,
- _cstruct B1configuration,
- _cstruct B2configuration,
- _cstruct B3configuration)
-{
-
- capi_cmsg_header(cmsg, ApplId, 0x41, 0x80, Messagenumber, adr);
- cmsg->B1protocol = B1protocol;
- cmsg->B2protocol = B2protocol;
- cmsg->B3protocol = B3protocol;
- cmsg->B1configuration = B1configuration;
- cmsg->B2configuration = B2configuration;
- cmsg->B3configuration = B3configuration;
-}
-
-static inline void capi_fill_CONNECT_RESP(_cmsg * cmsg, __u16 ApplId, __u16 Messagenumber,
- __u32 adr,
- __u16 Reject,
- __u16 B1protocol,
- __u16 B2protocol,
- __u16 B3protocol,
- _cstruct B1configuration,
- _cstruct B2configuration,
- _cstruct B3configuration,
- _cstruct ConnectedNumber,
- _cstruct ConnectedSubaddress,
- _cstruct LLC,
- _cstruct BChannelinformation,
- _cstruct Keypadfacility,
- _cstruct Useruserdata,
- _cstruct Facilitydataarray)
-{
- capi_cmsg_header(cmsg, ApplId, 0x02, 0x83, Messagenumber, adr);
- cmsg->Reject = Reject;
- cmsg->B1protocol = B1protocol;
- cmsg->B2protocol = B2protocol;
- cmsg->B3protocol = B3protocol;
- cmsg->B1configuration = B1configuration;
- cmsg->B2configuration = B2configuration;
- cmsg->B3configuration = B3configuration;
- cmsg->ConnectedNumber = ConnectedNumber;
- cmsg->ConnectedSubaddress = ConnectedSubaddress;
- cmsg->LLC = LLC;
- cmsg->BChannelinformation = BChannelinformation;
- cmsg->Keypadfacility = Keypadfacility;
- cmsg->Useruserdata = Useruserdata;
- cmsg->Facilitydataarray = Facilitydataarray;
-}
-
-static inline void capi_fill_CONNECT_ACTIVE_RESP(_cmsg * cmsg, __u16 ApplId, __u16 Messagenumber,
- __u32 adr)
-{
-
- capi_cmsg_header(cmsg, ApplId, 0x03, 0x83, Messagenumber, adr);
-}
-
-static inline void capi_fill_CONNECT_B3_ACTIVE_RESP(_cmsg * cmsg, __u16 ApplId, __u16 Messagenumber,
- __u32 adr)
-{
-
- capi_cmsg_header(cmsg, ApplId, 0x83, 0x83, Messagenumber, adr);
-}
-
-static inline void capi_fill_CONNECT_B3_RESP(_cmsg * cmsg, __u16 ApplId, __u16 Messagenumber,
- __u32 adr,
- __u16 Reject,
- _cstruct NCPI)
-{
- capi_cmsg_header(cmsg, ApplId, 0x82, 0x83, Messagenumber, adr);
- cmsg->Reject = Reject;
- cmsg->NCPI = NCPI;
-}
-
-static inline void capi_fill_CONNECT_B3_T90_ACTIVE_RESP(_cmsg * cmsg, __u16 ApplId, __u16 Messagenumber,
- __u32 adr)
-{
-
- capi_cmsg_header(cmsg, ApplId, 0x88, 0x83, Messagenumber, adr);
-}
-
-static inline void capi_fill_DATA_B3_RESP(_cmsg * cmsg, __u16 ApplId, __u16 Messagenumber,
- __u32 adr,
- __u16 DataHandle)
-{
-
- capi_cmsg_header(cmsg, ApplId, 0x86, 0x83, Messagenumber, adr);
- cmsg->DataHandle = DataHandle;
-}
-
-static inline void capi_fill_DISCONNECT_B3_RESP(_cmsg * cmsg, __u16 ApplId, __u16 Messagenumber,
- __u32 adr)
-{
-
- capi_cmsg_header(cmsg, ApplId, 0x84, 0x83, Messagenumber, adr);
-}
-
-static inline void capi_fill_DISCONNECT_RESP(_cmsg * cmsg, __u16 ApplId, __u16 Messagenumber,
- __u32 adr)
-{
-
- capi_cmsg_header(cmsg, ApplId, 0x04, 0x83, Messagenumber, adr);
-}
-
-static inline void capi_fill_FACILITY_RESP(_cmsg * cmsg, __u16 ApplId, __u16 Messagenumber,
- __u32 adr,
- __u16 FacilitySelector)
-{
-
- capi_cmsg_header(cmsg, ApplId, 0x80, 0x83, Messagenumber, adr);
- cmsg->FacilitySelector = FacilitySelector;
-}
-
-static inline void capi_fill_INFO_RESP(_cmsg * cmsg, __u16 ApplId, __u16 Messagenumber,
- __u32 adr)
-{
-
- capi_cmsg_header(cmsg, ApplId, 0x08, 0x83, Messagenumber, adr);
-}
-
-static inline void capi_fill_MANUFACTURER_RESP(_cmsg * cmsg, __u16 ApplId, __u16 Messagenumber,
- __u32 adr,
- __u32 ManuID,
- __u32 Class,
- __u32 Function,
- _cstruct ManuData)
-{
-
- capi_cmsg_header(cmsg, ApplId, 0xff, 0x83, Messagenumber, adr);
- cmsg->ManuID = ManuID;
- cmsg->Class = Class;
- cmsg->Function = Function;
- cmsg->ManuData = ManuData;
-}
-
-static inline void capi_fill_RESET_B3_RESP(_cmsg * cmsg, __u16 ApplId, __u16 Messagenumber,
- __u32 adr)
-{
-
- capi_cmsg_header(cmsg, ApplId, 0x87, 0x83, Messagenumber, adr);
-}
-
#endif /* __CAPIUTIL_H__ */
diff --git a/include/linux/isdn/hdlc.h b/include/linux/isdn/hdlc.h
deleted file mode 100644
index 96521370c782..000000000000
--- a/include/linux/isdn/hdlc.h
+++ /dev/null
@@ -1,82 +0,0 @@
-/*
- * hdlc.h -- General purpose ISDN HDLC decoder.
- *
- * Implementation of a HDLC decoder/encoder in software.
- * Necessary because some ISDN devices don't have HDLC
- * controllers.
- *
- * Copyright (C)
- * 2009 Karsten Keil <keil@b1-systems.de>
- * 2002 Wolfgang Mües <wolfgang@iksw-muees.de>
- * 2001 Frode Isaksen <fisaksen@bewan.com>
- * 2001 Kai Germaschewski <kai.germaschewski@gmx.de>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- */
-
-#ifndef __ISDNHDLC_H__
-#define __ISDNHDLC_H__
-
-struct isdnhdlc_vars {
- int bit_shift;
- int hdlc_bits1;
- int data_bits;
- int ffbit_shift; /* encoding only */
- int state;
- int dstpos;
-
- u16 crc;
-
- u8 cbin;
- u8 shift_reg;
- u8 ffvalue;
-
- /* set if transferring data */
- u32 data_received:1;
- /* set if D channel (send idle instead of flags) */
- u32 dchannel:1;
- /* set if 56K adaptation */
- u32 do_adapt56:1;
- /* set if in closing phase (need to send CRC + flag) */
- u32 do_closing:1;
- /* set if data is bitreverse */
- u32 do_bitreverse:1;
-};
-
-/* Feature Flags */
-#define HDLC_56KBIT 0x01
-#define HDLC_DCHANNEL 0x02
-#define HDLC_BITREVERSE 0x04
-
-/*
- The return value from isdnhdlc_decode is
- the frame length, 0 if no complete frame was decoded,
- or a negative error number
-*/
-#define HDLC_FRAMING_ERROR 1
-#define HDLC_CRC_ERROR 2
-#define HDLC_LENGTH_ERROR 3
-
-extern void isdnhdlc_rcv_init(struct isdnhdlc_vars *hdlc, u32 features);
-
-extern int isdnhdlc_decode(struct isdnhdlc_vars *hdlc, const u8 *src,
- int slen, int *count, u8 *dst, int dsize);
-
-extern void isdnhdlc_out_init(struct isdnhdlc_vars *hdlc, u32 features);
-
-extern int isdnhdlc_encode(struct isdnhdlc_vars *hdlc, const u8 *src,
- u16 slen, int *count, u8 *dst, int dsize);
-
-#endif /* __ISDNHDLC_H__ */
diff --git a/include/linux/isdn_divertif.h b/include/linux/isdn_divertif.h
deleted file mode 100644
index 19ab361f9f07..000000000000
--- a/include/linux/isdn_divertif.h
+++ /dev/null
@@ -1,35 +0,0 @@
-/* $Id: isdn_divertif.h,v 1.4.6.1 2001/09/23 22:25:05 kai Exp $
- *
- * Header for the diversion supplementary interface for i4l.
- *
- * Author Werner Cornelius (werner@titro.de)
- * Copyright by Werner Cornelius (werner@titro.de)
- *
- * This software may be used and distributed according to the terms
- * of the GNU General Public License, incorporated herein by reference.
- *
- */
-#ifndef _LINUX_ISDN_DIVERTIF_H
-#define _LINUX_ISDN_DIVERTIF_H
-
-#include <linux/isdnif.h>
-#include <linux/types.h>
-#include <uapi/linux/isdn_divertif.h>
-
-/***************************************************************/
-/* structure exchanging data between isdn hl and divert module */
-/***************************************************************/
-typedef struct
- { ulong if_magic; /* magic info and version */
- int cmd; /* command */
- int (*stat_callback)(isdn_ctrl *); /* supplied by divert module when calling */
- int (*ll_cmd)(isdn_ctrl *); /* supplied by hl on return */
- char * (*drv_to_name)(int); /* map a driver id to name, supplied by hl */
- int (*name_to_drv)(char *); /* map a driver id to name, supplied by hl */
- } isdn_divert_if;
-
-/*********************/
-/* function register */
-/*********************/
-extern int DIVERT_REG_NAME(isdn_divert_if *);
-#endif /* _LINUX_ISDN_DIVERTIF_H */
diff --git a/include/linux/isdn_ppp.h b/include/linux/isdn_ppp.h
deleted file mode 100644
index a0070c6dfaf8..000000000000
--- a/include/linux/isdn_ppp.h
+++ /dev/null
@@ -1,194 +0,0 @@
-/* Linux ISDN subsystem, sync PPP, interface to ipppd
- *
- * Copyright 1994-1999 by Fritz Elfert (fritz@isdn4linux.de)
- * Copyright 1995,96 Thinking Objects Software GmbH Wuerzburg
- * Copyright 1995,96 by Michael Hipp (Michael.Hipp@student.uni-tuebingen.de)
- * Copyright 2000-2002 by Kai Germaschewski (kai@germaschewski.name)
- *
- * This software may be used and distributed according to the terms
- * of the GNU General Public License, incorporated herein by reference.
- *
- */
-#ifndef _LINUX_ISDN_PPP_H
-#define _LINUX_ISDN_PPP_H
-
-
-
-
-#ifdef CONFIG_IPPP_FILTER
-#include <linux/filter.h>
-#endif
-#include <uapi/linux/isdn_ppp.h>
-
-#define DECOMP_ERR_NOMEM (-10)
-
-#define MP_END_FRAG 0x40
-#define MP_BEGIN_FRAG 0x80
-
-#define MP_MAX_QUEUE_LEN 16
-
-/*
- * We need a way for the decompressor to influence the generation of CCP
- * Reset-Requests in a variety of ways. The decompressor is already returning
- * a lot of information (generated skb length, error conditions) so we use
- * another parameter. This parameter is a pointer to a structure which is
- * to be marked valid by the decompressor and only in this case is ever used.
- * Furthermore, the only case where this data is used is when the decom-
- * pressor returns DECOMP_ERROR.
- *
- * We use this same struct for the reset entry of the compressor to commu-
- * nicate to its caller how to deal with sending of a Reset Ack. In this
- * case, expra is not used, but other options still apply (suppressing
- * sending with rsend, appending arbitrary data, etc).
- */
-
-#define IPPP_RESET_MAXDATABYTES 32
-
-struct isdn_ppp_resetparams {
- unsigned char valid:1; /* rw Is this structure filled at all ? */
- unsigned char rsend:1; /* rw Should we send one at all ? */
- unsigned char idval:1; /* rw Is the id field valid ? */
- unsigned char dtval:1; /* rw Is the data field valid ? */
- unsigned char expra:1; /* rw Is an Ack expected for this Req ? */
- unsigned char id; /* wo Send CCP ResetReq with this id */
- unsigned short maxdlen; /* ro Max bytes to be stored in data field */
- unsigned short dlen; /* rw Bytes stored in data field */
- unsigned char *data; /* wo Data for ResetReq info field */
-};
-
-/*
- * this is an 'old friend' from ppp-comp.h under a new name
- * check the original include for more information
- */
-struct isdn_ppp_compressor {
- struct isdn_ppp_compressor *next, *prev;
- struct module *owner;
- int num; /* CCP compression protocol number */
-
- void *(*alloc) (struct isdn_ppp_comp_data *);
- void (*free) (void *state);
- int (*init) (void *state, struct isdn_ppp_comp_data *,
- int unit,int debug);
-
- /* The reset entry needs to get more exact information about the
- ResetReq or ResetAck it was called with. The parameters are
- obvious. If reset is called without a Req or Ack frame which
- could be handed into it, code MUST be set to 0. Using rsparm,
- the reset entry can control if and how a ResetAck is returned. */
-
- void (*reset) (void *state, unsigned char code, unsigned char id,
- unsigned char *data, unsigned len,
- struct isdn_ppp_resetparams *rsparm);
-
- int (*compress) (void *state, struct sk_buff *in,
- struct sk_buff *skb_out, int proto);
-
- int (*decompress) (void *state,struct sk_buff *in,
- struct sk_buff *skb_out,
- struct isdn_ppp_resetparams *rsparm);
-
- void (*incomp) (void *state, struct sk_buff *in,int proto);
- void (*stat) (void *state, struct compstat *stats);
-};
-
-extern int isdn_ppp_register_compressor(struct isdn_ppp_compressor *);
-extern int isdn_ppp_unregister_compressor(struct isdn_ppp_compressor *);
-extern int isdn_ppp_dial_slave(char *);
-extern int isdn_ppp_hangup_slave(char *);
-
-typedef struct {
- unsigned long seqerrs;
- unsigned long frame_drops;
- unsigned long overflows;
- unsigned long max_queue_len;
-} isdn_mppp_stats;
-
-typedef struct {
- int mp_mrru; /* unused */
- struct sk_buff * frags; /* fragments sl list -- use skb->next */
- long frames; /* number of frames in the frame list */
- unsigned int seq; /* last processed packet seq #: any packets
- * with smaller seq # will be dropped
- * unconditionally */
- spinlock_t lock;
- int ref_ct;
- /* statistics */
- isdn_mppp_stats stats;
-} ippp_bundle;
-
-#define NUM_RCV_BUFFS 64
-
-struct ippp_buf_queue {
- struct ippp_buf_queue *next;
- struct ippp_buf_queue *last;
- char *buf; /* NULL here indicates end of queue */
- int len;
-};
-
-/* The data structure for one CCP reset transaction */
-enum ippp_ccp_reset_states {
- CCPResetIdle,
- CCPResetSentReq,
- CCPResetRcvdReq,
- CCPResetSentAck,
- CCPResetRcvdAck
-};
-
-struct ippp_ccp_reset_state {
- enum ippp_ccp_reset_states state; /* State of this transaction */
- struct ippp_struct *is; /* Backlink to device stuff */
- unsigned char id; /* Backlink id index */
- unsigned char ta:1; /* The timer is active (flag) */
- unsigned char expra:1; /* We expect a ResetAck at all */
- int dlen; /* Databytes stored in data */
- struct timer_list timer; /* For timeouts/retries */
- /* This is a hack but seems sufficient for the moment. We do not want
- to have this be yet another allocation for some bytes, it is more
- memory management overhead than the whole mess is worth. */
- unsigned char data[IPPP_RESET_MAXDATABYTES];
-};
-
-/* The data structure keeping track of the currently outstanding CCP Reset
- transactions. */
-struct ippp_ccp_reset {
- struct ippp_ccp_reset_state *rs[256]; /* One per possible id */
- unsigned char lastid; /* Last id allocated by the engine */
-};
-
-struct ippp_struct {
- struct ippp_struct *next_link;
- int state;
- spinlock_t buflock;
- struct ippp_buf_queue rq[NUM_RCV_BUFFS]; /* packet queue for isdn_ppp_read() */
- struct ippp_buf_queue *first; /* pointer to (current) first packet */
- struct ippp_buf_queue *last; /* pointer to (current) last used packet in queue */
- wait_queue_head_t wq;
- struct task_struct *tk;
- unsigned int mpppcfg;
- unsigned int pppcfg;
- unsigned int mru;
- unsigned int mpmru;
- unsigned int mpmtu;
- unsigned int maxcid;
- struct isdn_net_local_s *lp;
- int unit;
- int minor;
- unsigned int last_link_seqno;
- long mp_seqno;
-#ifdef CONFIG_ISDN_PPP_VJ
- unsigned char *cbuf;
- struct slcompress *slcomp;
-#endif
-#ifdef CONFIG_IPPP_FILTER
- struct bpf_prog *pass_filter; /* filter for packets to pass */
- struct bpf_prog *active_filter; /* filter for pkts to reset idle */
-#endif
- unsigned long debug;
- struct isdn_ppp_compressor *compressor,*decompressor;
- struct isdn_ppp_compressor *link_compressor,*link_decompressor;
- void *decomp_stat,*comp_stat,*link_decomp_stat,*link_comp_stat;
- struct ippp_ccp_reset *reset; /* Allocated on demand, may never be needed */
- unsigned long compflags;
-};
-
-#endif /* _LINUX_ISDN_PPP_H */
diff --git a/include/linux/isdnif.h b/include/linux/isdnif.h
deleted file mode 100644
index 8d80fdc68647..000000000000
--- a/include/linux/isdnif.h
+++ /dev/null
@@ -1,505 +0,0 @@
-/* $Id: isdnif.h,v 1.43.2.2 2004/01/12 23:08:35 keil Exp $
- *
- * Linux ISDN subsystem
- * Definition of the interface between the subsystem and its low-level drivers.
- *
- * Copyright 1994,95,96 by Fritz Elfert (fritz@isdn4linux.de)
- * Copyright 1995,96 Thinking Objects Software GmbH Wuerzburg
- *
- * This software may be used and distributed according to the terms
- * of the GNU General Public License, incorporated herein by reference.
- *
- */
-#ifndef __ISDNIF_H__
-#define __ISDNIF_H__
-
-
-#include <linux/skbuff.h>
-#include <uapi/linux/isdnif.h>
-
-/***************************************************************************/
-/* Extensions made by Werner Cornelius (werner@ikt.de) */
-/* */
-/* The proceed command holds a incoming call in a state to leave processes */
-/* enough time to check whether ist should be accepted. */
-/* The PROT_IO Command extends the interface to make protocol dependent */
-/* features available (call diversion, call waiting...). */
-/* */
-/* The PROT_IO Command is executed with the desired driver id and the arg */
-/* parameter coded as follows: */
-/* The lower 8 bits of arg contain the desired protocol from ISDN_PTYPE */
-/* definitions. The upper 24 bits represent the protocol specific cmd/stat.*/
-/* Any additional data is protocol and command specific. */
-/* This mechanism also applies to the statcallb callback STAT_PROT. */
-/* */
-/* This suggested extension permits an easy expansion of protocol specific */
-/* handling. Extensions may be added at any time without changing the HL */
-/* driver code and not getting conflicts without certifications. */
-/* The well known CAPI 2.0 interface handles such extensions in a similar */
-/* way. Perhaps a protocol specific module may be added and separately */
-/* loaded and linked to the basic isdn module for handling. */
-/***************************************************************************/
-
-/*****************/
-/* DSS1 commands */
-/*****************/
-#define DSS1_CMD_INVOKE ((0x00 << 8) | ISDN_PTYPE_EURO) /* invoke a supplementary service */
-#define DSS1_CMD_INVOKE_ABORT ((0x01 << 8) | ISDN_PTYPE_EURO) /* abort a invoke cmd */
-
-/*******************************/
-/* DSS1 Status callback values */
-/*******************************/
-#define DSS1_STAT_INVOKE_RES ((0x80 << 8) | ISDN_PTYPE_EURO) /* Result for invocation */
-#define DSS1_STAT_INVOKE_ERR ((0x81 << 8) | ISDN_PTYPE_EURO) /* Error Return for invocation */
-#define DSS1_STAT_INVOKE_BRD ((0x82 << 8) | ISDN_PTYPE_EURO) /* Deliver invoke broadcast info */
-
-
-/*********************************************************************/
-/* structures for DSS1 commands and callback */
-/* */
-/* An action is invoked by sending a DSS1_CMD_INVOKE. The ll_id, proc*/
-/* timeout, datalen and data fields must be set before calling. */
-/* */
-/* The return value is a positive hl_id value also delivered in the */
-/* hl_id field. A value of zero signals no more left hl_id capacitys.*/
-/* A negative return value signals errors in LL. So if the return */
-/* value is <= 0 no action in LL will be taken -> request ignored */
-/* */
-/* The timeout field must be filled with a positive value specifying */
-/* the amount of time the INVOKED process waits for a reaction from */
-/* the network. */
-/* If a response (either error or result) is received during this */
-/* intervall, a reporting callback is initiated and the process will */
-/* be deleted, the hl identifier will be freed. */
-/* If no response is received during the specified intervall, a error*/
-/* callback is initiated with timeout set to -1 and a datalen set */
-/* to 0. */
-/* If timeout is set to a value <= 0 during INVOCATION the process is*/
-/* immediately deleted after sending the data. No callback occurs ! */
-/* */
-/* A currently waiting process may be aborted with INVOKE_ABORT. No */
-/* callback will occur when a process has been aborted. */
-/* */
-/* Broadcast invoke frames from the network are reported via the */
-/* STAT_INVOKE_BRD callback. The ll_id is set to 0, the other fields */
-/* are supplied by the network and not by the HL. */
-/*********************************************************************/
-
-/*****************/
-/* NI1 commands */
-/*****************/
-#define NI1_CMD_INVOKE ((0x00 << 8) | ISDN_PTYPE_NI1) /* invoke a supplementary service */
-#define NI1_CMD_INVOKE_ABORT ((0x01 << 8) | ISDN_PTYPE_NI1) /* abort a invoke cmd */
-
-/*******************************/
-/* NI1 Status callback values */
-/*******************************/
-#define NI1_STAT_INVOKE_RES ((0x80 << 8) | ISDN_PTYPE_NI1) /* Result for invocation */
-#define NI1_STAT_INVOKE_ERR ((0x81 << 8) | ISDN_PTYPE_NI1) /* Error Return for invocation */
-#define NI1_STAT_INVOKE_BRD ((0x82 << 8) | ISDN_PTYPE_NI1) /* Deliver invoke broadcast info */
-
-typedef struct
- { ulong ll_id; /* ID supplied by LL when executing */
- /* a command and returned by HL for */
- /* INVOKE_RES and INVOKE_ERR */
- int hl_id; /* ID supplied by HL when called */
- /* for executing a cmd and delivered */
- /* for results and errors */
- /* must be supplied by LL when aborting*/
- int proc; /* invoke procedure used by CMD_INVOKE */
- /* returned by callback and broadcast */
- int timeout; /* timeout for INVOKE CMD in ms */
- /* -1 in stat callback when timed out */
- /* error value when error callback */
- int datalen; /* length of cmd or stat data */
- u_char *data;/* pointer to data delivered or send */
- } isdn_cmd_stat;
-
-/*
- * Commands from linklevel to lowlevel
- *
- */
-#define ISDN_CMD_IOCTL 0 /* Perform ioctl */
-#define ISDN_CMD_DIAL 1 /* Dial out */
-#define ISDN_CMD_ACCEPTD 2 /* Accept an incoming call on D-Chan. */
-#define ISDN_CMD_ACCEPTB 3 /* Request B-Channel connect. */
-#define ISDN_CMD_HANGUP 4 /* Hangup */
-#define ISDN_CMD_CLREAZ 5 /* Clear EAZ(s) of channel */
-#define ISDN_CMD_SETEAZ 6 /* Set EAZ(s) of channel */
-#define ISDN_CMD_GETEAZ 7 /* Get EAZ(s) of channel */
-#define ISDN_CMD_SETSIL 8 /* Set Service-Indicator-List of channel */
-#define ISDN_CMD_GETSIL 9 /* Get Service-Indicator-List of channel */
-#define ISDN_CMD_SETL2 10 /* Set B-Chan. Layer2-Parameter */
-#define ISDN_CMD_GETL2 11 /* Get B-Chan. Layer2-Parameter */
-#define ISDN_CMD_SETL3 12 /* Set B-Chan. Layer3-Parameter */
-#define ISDN_CMD_GETL3 13 /* Get B-Chan. Layer3-Parameter */
-// #define ISDN_CMD_LOCK 14 /* Signal usage by upper levels */
-// #define ISDN_CMD_UNLOCK 15 /* Release usage-lock */
-#define ISDN_CMD_SUSPEND 16 /* Suspend connection */
-#define ISDN_CMD_RESUME 17 /* Resume connection */
-#define ISDN_CMD_PROCEED 18 /* Proceed with call establishment */
-#define ISDN_CMD_ALERT 19 /* Alert after Proceeding */
-#define ISDN_CMD_REDIR 20 /* Redir a incoming call */
-#define ISDN_CMD_PROT_IO 21 /* Protocol specific commands */
-#define CAPI_PUT_MESSAGE 22 /* CAPI message send down or up */
-#define ISDN_CMD_FAXCMD 23 /* FAX commands to HL-driver */
-#define ISDN_CMD_AUDIO 24 /* DSP, DTMF, ... settings */
-
-/*
- * Status-Values delivered from lowlevel to linklevel via
- * statcallb().
- *
- */
-#define ISDN_STAT_STAVAIL 256 /* Raw status-data available */
-#define ISDN_STAT_ICALL 257 /* Incoming call detected */
-#define ISDN_STAT_RUN 258 /* Signal protocol-code is running */
-#define ISDN_STAT_STOP 259 /* Signal halt of protocol-code */
-#define ISDN_STAT_DCONN 260 /* Signal D-Channel connect */
-#define ISDN_STAT_BCONN 261 /* Signal B-Channel connect */
-#define ISDN_STAT_DHUP 262 /* Signal D-Channel disconnect */
-#define ISDN_STAT_BHUP 263 /* Signal B-Channel disconnect */
-#define ISDN_STAT_CINF 264 /* Charge-Info */
-#define ISDN_STAT_LOAD 265 /* Signal new lowlevel-driver is loaded */
-#define ISDN_STAT_UNLOAD 266 /* Signal unload of lowlevel-driver */
-#define ISDN_STAT_BSENT 267 /* Signal packet sent */
-#define ISDN_STAT_NODCH 268 /* Signal no D-Channel */
-#define ISDN_STAT_ADDCH 269 /* Add more Channels */
-#define ISDN_STAT_CAUSE 270 /* Cause-Message */
-#define ISDN_STAT_ICALLW 271 /* Incoming call without B-chan waiting */
-#define ISDN_STAT_REDIR 272 /* Redir result */
-#define ISDN_STAT_PROT 273 /* protocol IO specific callback */
-#define ISDN_STAT_DISPLAY 274 /* deliver a received display message */
-#define ISDN_STAT_L1ERR 275 /* Signal Layer-1 Error */
-#define ISDN_STAT_FAXIND 276 /* FAX indications from HL-driver */
-#define ISDN_STAT_AUDIO 277 /* DTMF, DSP indications */
-#define ISDN_STAT_DISCH 278 /* Disable/Enable channel usage */
-
-/*
- * Audio commands
- */
-#define ISDN_AUDIO_SETDD 0 /* Set DTMF detection */
-#define ISDN_AUDIO_DTMF 1 /* Rx/Tx DTMF */
-
-/*
- * Values for errcode field
- */
-#define ISDN_STAT_L1ERR_SEND 1
-#define ISDN_STAT_L1ERR_RECV 2
-
-/*
- * Values for feature-field of interface-struct.
- */
-/* Layer 2 */
-#define ISDN_FEATURE_L2_X75I (0x0001 << ISDN_PROTO_L2_X75I)
-#define ISDN_FEATURE_L2_X75UI (0x0001 << ISDN_PROTO_L2_X75UI)
-#define ISDN_FEATURE_L2_X75BUI (0x0001 << ISDN_PROTO_L2_X75BUI)
-#define ISDN_FEATURE_L2_HDLC (0x0001 << ISDN_PROTO_L2_HDLC)
-#define ISDN_FEATURE_L2_TRANS (0x0001 << ISDN_PROTO_L2_TRANS)
-#define ISDN_FEATURE_L2_X25DTE (0x0001 << ISDN_PROTO_L2_X25DTE)
-#define ISDN_FEATURE_L2_X25DCE (0x0001 << ISDN_PROTO_L2_X25DCE)
-#define ISDN_FEATURE_L2_V11096 (0x0001 << ISDN_PROTO_L2_V11096)
-#define ISDN_FEATURE_L2_V11019 (0x0001 << ISDN_PROTO_L2_V11019)
-#define ISDN_FEATURE_L2_V11038 (0x0001 << ISDN_PROTO_L2_V11038)
-#define ISDN_FEATURE_L2_MODEM (0x0001 << ISDN_PROTO_L2_MODEM)
-#define ISDN_FEATURE_L2_FAX (0x0001 << ISDN_PROTO_L2_FAX)
-#define ISDN_FEATURE_L2_HDLC_56K (0x0001 << ISDN_PROTO_L2_HDLC_56K)
-
-#define ISDN_FEATURE_L2_MASK (0x0FFFF) /* Max. 16 protocols */
-#define ISDN_FEATURE_L2_SHIFT (0)
-
-/* Layer 3 */
-#define ISDN_FEATURE_L3_TRANS (0x10000 << ISDN_PROTO_L3_TRANS)
-#define ISDN_FEATURE_L3_TRANSDSP (0x10000 << ISDN_PROTO_L3_TRANSDSP)
-#define ISDN_FEATURE_L3_FCLASS2 (0x10000 << ISDN_PROTO_L3_FCLASS2)
-#define ISDN_FEATURE_L3_FCLASS1 (0x10000 << ISDN_PROTO_L3_FCLASS1)
-
-#define ISDN_FEATURE_L3_MASK (0x0FF0000) /* Max. 8 Protocols */
-#define ISDN_FEATURE_L3_SHIFT (16)
-
-/* Signaling */
-#define ISDN_FEATURE_P_UNKNOWN (0x1000000 << ISDN_PTYPE_UNKNOWN)
-#define ISDN_FEATURE_P_1TR6 (0x1000000 << ISDN_PTYPE_1TR6)
-#define ISDN_FEATURE_P_EURO (0x1000000 << ISDN_PTYPE_EURO)
-#define ISDN_FEATURE_P_NI1 (0x1000000 << ISDN_PTYPE_NI1)
-
-#define ISDN_FEATURE_P_MASK (0x0FF000000) /* Max. 8 Protocols */
-#define ISDN_FEATURE_P_SHIFT (24)
-
-typedef struct setup_parm {
- unsigned char phone[32]; /* Remote Phone-Number */
- unsigned char eazmsn[32]; /* Local EAZ or MSN */
- unsigned char si1; /* Service Indicator 1 */
- unsigned char si2; /* Service Indicator 2 */
- unsigned char plan; /* Numbering plan */
- unsigned char screen; /* Screening info */
-} setup_parm;
-
-
-#ifdef CONFIG_ISDN_TTY_FAX
-/* T.30 Fax G3 */
-
-#define FAXIDLEN 21
-
-typedef struct T30_s {
- /* session parameters */
- __u8 resolution;
- __u8 rate;
- __u8 width;
- __u8 length;
- __u8 compression;
- __u8 ecm;
- __u8 binary;
- __u8 scantime;
- __u8 id[FAXIDLEN];
- /* additional parameters */
- __u8 phase;
- __u8 direction;
- __u8 code;
- __u8 badlin;
- __u8 badmul;
- __u8 bor;
- __u8 fet;
- __u8 pollid[FAXIDLEN];
- __u8 cq;
- __u8 cr;
- __u8 ctcrty;
- __u8 minsp;
- __u8 phcto;
- __u8 rel;
- __u8 nbc;
- /* remote station parameters */
- __u8 r_resolution;
- __u8 r_rate;
- __u8 r_width;
- __u8 r_length;
- __u8 r_compression;
- __u8 r_ecm;
- __u8 r_binary;
- __u8 r_scantime;
- __u8 r_id[FAXIDLEN];
- __u8 r_code;
-} __packed T30_s;
-
-#define ISDN_TTY_FAX_CONN_IN 0
-#define ISDN_TTY_FAX_CONN_OUT 1
-
-#define ISDN_TTY_FAX_FCON 0
-#define ISDN_TTY_FAX_DIS 1
-#define ISDN_TTY_FAX_FTT 2
-#define ISDN_TTY_FAX_MCF 3
-#define ISDN_TTY_FAX_DCS 4
-#define ISDN_TTY_FAX_TRAIN_OK 5
-#define ISDN_TTY_FAX_EOP 6
-#define ISDN_TTY_FAX_EOM 7
-#define ISDN_TTY_FAX_MPS 8
-#define ISDN_TTY_FAX_DTC 9
-#define ISDN_TTY_FAX_RID 10
-#define ISDN_TTY_FAX_HNG 11
-#define ISDN_TTY_FAX_DT 12
-#define ISDN_TTY_FAX_FCON_I 13
-#define ISDN_TTY_FAX_DR 14
-#define ISDN_TTY_FAX_ET 15
-#define ISDN_TTY_FAX_CFR 16
-#define ISDN_TTY_FAX_PTS 17
-#define ISDN_TTY_FAX_SENT 18
-
-#define ISDN_FAX_PHASE_IDLE 0
-#define ISDN_FAX_PHASE_A 1
-#define ISDN_FAX_PHASE_B 2
-#define ISDN_FAX_PHASE_C 3
-#define ISDN_FAX_PHASE_D 4
-#define ISDN_FAX_PHASE_E 5
-
-#endif /* TTY_FAX */
-
-#define ISDN_FAX_CLASS1_FAE 0
-#define ISDN_FAX_CLASS1_FTS 1
-#define ISDN_FAX_CLASS1_FRS 2
-#define ISDN_FAX_CLASS1_FTM 3
-#define ISDN_FAX_CLASS1_FRM 4
-#define ISDN_FAX_CLASS1_FTH 5
-#define ISDN_FAX_CLASS1_FRH 6
-#define ISDN_FAX_CLASS1_CTRL 7
-
-#define ISDN_FAX_CLASS1_OK 0
-#define ISDN_FAX_CLASS1_CONNECT 1
-#define ISDN_FAX_CLASS1_NOCARR 2
-#define ISDN_FAX_CLASS1_ERROR 3
-#define ISDN_FAX_CLASS1_FCERROR 4
-#define ISDN_FAX_CLASS1_QUERY 5
-
-typedef struct {
- __u8 cmd;
- __u8 subcmd;
- __u8 para[50];
-} aux_s;
-
-#define AT_COMMAND 0
-#define AT_EQ_VALUE 1
-#define AT_QUERY 2
-#define AT_EQ_QUERY 3
-
-/* CAPI structs */
-
-/* this is compatible to the old union size */
-#define MAX_CAPI_PARA_LEN 50
-
-typedef struct {
- /* Header */
- __u16 Length;
- __u16 ApplId;
- __u8 Command;
- __u8 Subcommand;
- __u16 Messagenumber;
-
- /* Parameter */
- union {
- __u32 Controller;
- __u32 PLCI;
- __u32 NCCI;
- } adr;
- __u8 para[MAX_CAPI_PARA_LEN];
-} capi_msg;
-
-/*
- * Structure for exchanging above infos
- *
- */
-typedef struct {
- int driver; /* Lowlevel-Driver-ID */
- int command; /* Command or Status (see above) */
- ulong arg; /* Additional Data */
- union {
- ulong errcode; /* Type of error with STAT_L1ERR */
- int length; /* Amount of bytes sent with STAT_BSENT */
- u_char num[50]; /* Additional Data */
- setup_parm setup;/* For SETUP msg */
- capi_msg cmsg; /* For CAPI like messages */
- char display[85];/* display message data */
- isdn_cmd_stat isdn_io; /* ISDN IO-parameter/result */
- aux_s aux; /* for modem commands/indications */
-#ifdef CONFIG_ISDN_TTY_FAX
- T30_s *fax; /* Pointer to ttys fax struct */
-#endif
- ulong userdata; /* User Data */
- } parm;
-} isdn_ctrl;
-
-#define dss1_io isdn_io
-#define ni1_io isdn_io
-
-/*
- * The interface-struct itself (initialized at load-time of lowlevel-driver)
- *
- * See Documentation/isdn/INTERFACE for a description, how the communication
- * between the ISDN subsystem and its drivers is done.
- *
- */
-typedef struct {
- struct module *owner;
-
- /* Number of channels supported by this driver
- */
- int channels;
-
- /*
- * Maximum Size of transmit/receive-buffer this driver supports.
- */
- int maxbufsize;
-
- /* Feature-Flags for this driver.
- * See defines ISDN_FEATURE_... for Values
- */
- unsigned long features;
-
- /*
- * Needed for calculating
- * dev->hard_header_len = linklayer header + hl_hdrlen;
- * Drivers, not supporting sk_buff's should set this to 0.
- */
- unsigned short hl_hdrlen;
-
- /*
- * Receive-Callback using sk_buff's
- * Parameters:
- * int Driver-ID
- * int local channel-number (0 ...)
- * struct sk_buff *skb received Data
- */
- void (*rcvcallb_skb)(int, int, struct sk_buff *);
-
- /* Status-Callback
- * Parameters:
- * isdn_ctrl*
- * driver = Driver ID.
- * command = One of above ISDN_STAT_... constants.
- * arg = depending on status-type.
- * num = depending on status-type.
- */
- int (*statcallb)(isdn_ctrl*);
-
- /* Send command
- * Parameters:
- * isdn_ctrl*
- * driver = Driver ID.
- * command = One of above ISDN_CMD_... constants.
- * arg = depending on command.
- * num = depending on command.
- */
- int (*command)(isdn_ctrl*);
-
- /*
- * Send data using sk_buff's
- * Parameters:
- * int driverId
- * int local channel-number (0...)
- * int Flag: Need ACK for this packet.
- * struct sk_buff *skb Data to send
- */
- int (*writebuf_skb) (int, int, int, struct sk_buff *);
-
- /* Send raw D-Channel-Commands
- * Parameters:
- * u_char pointer data
- * int length of data
- * int driverId
- * int local channel-number (0 ...)
- */
- int (*writecmd)(const u_char __user *, int, int, int);
-
- /* Read raw Status replies
- * u_char pointer data (volatile)
- * int length of buffer
- * int driverId
- * int local channel-number (0 ...)
- */
- int (*readstat)(u_char __user *, int, int, int);
-
- char id[20];
-} isdn_if;
-
-/*
- * Function which must be called by lowlevel-driver at loadtime with
- * the following fields of above struct set:
- *
- * channels Number of channels that will be supported.
- * hl_hdrlen Space to preserve in sk_buff's when sending. Drivers, not
- * supporting sk_buff's should set this to 0.
- * command Address of Command-Handler.
- * features Bitwise coded Features of this driver. (use ISDN_FEATURE_...)
- * writebuf_skb Address of Skbuff-Send-Handler.
- * writecmd " " D-Channel " which accepts raw D-Ch-Commands.
- * readstat " " D-Channel " which delivers raw Status-Data.
- *
- * The linklevel-driver fills the following fields:
- *
- * channels Driver-ID assigned to this driver. (Must be used on all
- * subsequent callbacks.
- * rcvcallb_skb Address of handler for received Skbuff's.
- * statcallb " " " for status-changes.
- *
- */
-extern int register_isdn(isdn_if*);
-#include <linux/uaccess.h>
-
-#endif /* __ISDNIF_H__ */
diff --git a/include/linux/isicom.h b/include/linux/isicom.h
deleted file mode 100644
index b92e05650639..000000000000
--- a/include/linux/isicom.h
+++ /dev/null
@@ -1,84 +0,0 @@
-#ifndef _LINUX_ISICOM_H
-#define _LINUX_ISICOM_H
-
-#define YES 1
-#define NO 0
-
-/*
- * ISICOM Driver definitions ...
- *
- */
-
-#define ISICOM_NAME "ISICom"
-
-/*
- * PCI definitions
- */
-
-#define DEVID_COUNT 9
-#define VENDOR_ID 0x10b5
-
-/*
- * These are now officially allocated numbers
- */
-
-#define ISICOM_NMAJOR 112 /* normal */
-#define ISICOM_CMAJOR 113 /* callout */
-#define ISICOM_MAGIC (('M' << 8) | 'T')
-
-#define WAKEUP_CHARS 256 /* hard coded for now */
-#define TX_SIZE 254
-
-#define BOARD_COUNT 4
-#define PORT_COUNT (BOARD_COUNT*16)
-
-/* character sizes */
-
-#define ISICOM_CS5 0x0000
-#define ISICOM_CS6 0x0001
-#define ISICOM_CS7 0x0002
-#define ISICOM_CS8 0x0003
-
-/* stop bits */
-
-#define ISICOM_1SB 0x0000
-#define ISICOM_2SB 0x0004
-
-/* parity */
-
-#define ISICOM_NOPAR 0x0000
-#define ISICOM_ODPAR 0x0008
-#define ISICOM_EVPAR 0x0018
-
-/* flow control */
-
-#define ISICOM_CTSRTS 0x03
-#define ISICOM_INITIATE_XONXOFF 0x04
-#define ISICOM_RESPOND_XONXOFF 0x08
-
-#define BOARD(line) (((line) >> 4) & 0x3)
-
- /* isi kill queue bitmap */
-
-#define ISICOM_KILLTX 0x01
-#define ISICOM_KILLRX 0x02
-
- /* isi_board status bitmap */
-
-#define FIRMWARE_LOADED 0x0001
-#define BOARD_ACTIVE 0x0002
-#define BOARD_INIT 0x0004
-
- /* isi_port status bitmap */
-
-#define ISI_CTS 0x1000
-#define ISI_DSR 0x2000
-#define ISI_RI 0x4000
-#define ISI_DCD 0x8000
-#define ISI_DTR 0x0100
-#define ISI_RTS 0x0200
-
-
-#define ISI_TXOK 0x0001
-
-#endif /* ISICOM_H */
diff --git a/include/linux/ism.h b/include/linux/ism.h
new file mode 100644
index 000000000000..ea2bcdae7401
--- /dev/null
+++ b/include/linux/ism.h
@@ -0,0 +1,98 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Internal Shared Memory
+ *
+ * Definitions for the ISM module
+ *
+ * Copyright IBM Corp. 2022
+ */
+#ifndef _ISM_H
+#define _ISM_H
+
+#include <linux/workqueue.h>
+
+struct ism_dmb {
+ u64 dmb_tok;
+ u64 rgid;
+ u32 dmb_len;
+ u32 sba_idx;
+ u32 vlan_valid;
+ u32 vlan_id;
+ void *cpu_addr;
+ dma_addr_t dma_addr;
+};
+
+/* Unless we gain unexpected popularity, this limit should hold for a while */
+#define MAX_CLIENTS 8
+#define ISM_NR_DMBS 1920
+
+struct ism_dev {
+ spinlock_t lock; /* protects the ism device */
+ struct list_head list;
+ struct pci_dev *pdev;
+
+ struct ism_sba *sba;
+ dma_addr_t sba_dma_addr;
+ DECLARE_BITMAP(sba_bitmap, ISM_NR_DMBS);
+ u8 *sba_client_arr; /* entries are indices into 'clients' array */
+ void *priv[MAX_CLIENTS];
+
+ struct ism_eq *ieq;
+ dma_addr_t ieq_dma_addr;
+
+ struct device dev;
+ u64 local_gid;
+ int ieq_idx;
+
+ atomic_t free_clients_cnt;
+ atomic_t add_dev_cnt;
+ wait_queue_head_t waitq;
+};
+
+struct ism_event {
+ u32 type;
+ u32 code;
+ u64 tok;
+ u64 time;
+ u64 info;
+};
+
+struct ism_client {
+ const char *name;
+ void (*add)(struct ism_dev *dev);
+ void (*remove)(struct ism_dev *dev);
+ void (*handle_event)(struct ism_dev *dev, struct ism_event *event);
+ /* Parameter dmbemask contains a bit vector with updated DMBEs, if sent
+ * via ism_move_data(). Callback function must handle all active bits
+ * indicated by dmbemask.
+ */
+ void (*handle_irq)(struct ism_dev *dev, unsigned int bit, u16 dmbemask);
+ /* Private area - don't touch! */
+ struct work_struct remove_work;
+ struct work_struct add_work;
+ struct ism_dev *tgt_ism;
+ u8 id;
+};
+
+int ism_register_client(struct ism_client *client);
+int ism_unregister_client(struct ism_client *client);
+static inline void *ism_get_priv(struct ism_dev *dev,
+ struct ism_client *client) {
+ return dev->priv[client->id];
+}
+
+static inline void ism_set_priv(struct ism_dev *dev, struct ism_client *client,
+ void *priv) {
+ dev->priv[client->id] = priv;
+}
+
+int ism_register_dmb(struct ism_dev *dev, struct ism_dmb *dmb,
+ struct ism_client *client);
+int ism_unregister_dmb(struct ism_dev *dev, struct ism_dmb *dmb);
+int ism_move(struct ism_dev *dev, u64 dmb_tok, unsigned int idx, bool sf,
+ unsigned int offset, void *data, unsigned int size);
+u8 *ism_get_seid(void);
+
+const struct smcd_ops *ism_get_smcd_ops(void);
+
+#endif /* _ISM_H */
diff --git a/include/linux/iversion.h b/include/linux/iversion.h
new file mode 100644
index 000000000000..f174ff1b59ee
--- /dev/null
+++ b/include/linux/iversion.h
@@ -0,0 +1,300 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_IVERSION_H
+#define _LINUX_IVERSION_H
+
+#include <linux/fs.h>
+
+/*
+ * The inode->i_version field:
+ * ---------------------------
+ * The change attribute (i_version) is mandated by NFSv4 and is mostly for
+ * knfsd, but is also used for other purposes (e.g. IMA). The i_version must
+ * appear larger to observers if there was an explicit change to the inode's
+ * data or metadata since it was last queried.
+ *
+ * An explicit change is one that would ordinarily result in a change to the
+ * inode status change time (aka ctime). i_version must appear to change, even
+ * if the ctime does not (since the whole point is to avoid missing updates due
+ * to timestamp granularity). If POSIX or other relevant spec mandates that the
+ * ctime must change due to an operation, then the i_version counter must be
+ * incremented as well.
+ *
+ * Making the i_version update completely atomic with the operation itself would
+ * be prohibitively expensive. Traditionally the kernel has updated the times on
+ * directories after an operation that changes its contents. For regular files,
+ * the ctime is usually updated before the data is copied into the cache for a
+ * write. This means that there is a window of time when an observer can
+ * associate a new timestamp with old file contents. Since the purpose of the
+ * i_version is to allow for better cache coherency, the i_version must always
+ * be updated after the results of the operation are visible. Updating it before
+ * and after a change is also permitted. (Note that no filesystems currently do
+ * this. Fixing that is a work-in-progress).
+ *
+ * Observers see the i_version as a 64-bit number that never decreases. If it
+ * remains the same since it was last checked, then nothing has changed in the
+ * inode. If it's different then something has changed. Observers cannot infer
+ * anything about the nature or magnitude of the changes from the value, only
+ * that the inode has changed in some fashion.
+ *
+ * Not all filesystems properly implement the i_version counter. Subsystems that
+ * want to use i_version field on an inode should first check whether the
+ * filesystem sets the SB_I_VERSION flag (usually via the IS_I_VERSION macro).
+ *
+ * Those that set SB_I_VERSION will automatically have their i_version counter
+ * incremented on writes to normal files. If the SB_I_VERSION is not set, then
+ * the VFS will not touch it on writes, and the filesystem can use it how it
+ * wishes. Note that the filesystem is always responsible for updating the
+ * i_version on namespace changes in directories (mkdir, rmdir, unlink, etc.).
+ * We consider these sorts of filesystems to have a kernel-managed i_version.
+ *
+ * It may be impractical for filesystems to keep i_version updates atomic with
+ * respect to the changes that cause them. They should, however, guarantee
+ * that i_version updates are never visible before the changes that caused
+ * them. Also, i_version updates should never be delayed longer than it takes
+ * the original change to reach disk.
+ *
+ * This implementation uses the low bit in the i_version field as a flag to
+ * track when the value has been queried. If it has not been queried since it
+ * was last incremented, we can skip the increment in most cases.
+ *
+ * In the event that we're updating the ctime, we will usually go ahead and
+ * bump the i_version anyway. Since that has to go to stable storage in some
+ * fashion, we might as well increment it as well.
+ *
+ * With this implementation, the value should always appear to observers to
+ * increase over time if the file has changed. It's recommended to use
+ * inode_eq_iversion() helper to compare values.
+ *
+ * Note that some filesystems (e.g. NFS and AFS) just use the field to store
+ * a server-provided value (for the most part). For that reason, those
+ * filesystems do not set SB_I_VERSION. These filesystems are considered to
+ * have a self-managed i_version.
+ *
+ * Persistently storing the i_version
+ * ----------------------------------
+ * Queries of the i_version field are not gated on them hitting the backing
+ * store. It's always possible that the host could crash after allowing
+ * a query of the value but before it has made it to disk.
+ *
+ * To mitigate this problem, filesystems should always use
+ * inode_set_iversion_queried when loading an existing inode from disk. This
+ * ensures that the next attempted inode increment will result in the value
+ * changing.
+ *
+ * Storing the value to disk therefore does not count as a query, so those
+ * filesystems should use inode_peek_iversion to grab the value to be stored.
+ * There is no need to flag the value as having been queried in that case.
+ */
+
+/*
+ * We borrow the lowest bit in the i_version to use as a flag to tell whether
+ * it has been queried since we last incremented it. If it has, then we must
+ * increment it on the next change. After that, we can clear the flag and
+ * avoid incrementing it again until it has again been queried.
+ */
+#define I_VERSION_QUERIED_SHIFT (1)
+#define I_VERSION_QUERIED (1ULL << (I_VERSION_QUERIED_SHIFT - 1))
+#define I_VERSION_INCREMENT (1ULL << I_VERSION_QUERIED_SHIFT)
+
+/**
+ * inode_set_iversion_raw - set i_version to the specified raw value
+ * @inode: inode to set
+ * @val: new i_version value to set
+ *
+ * Set @inode's i_version field to @val. This function is for use by
+ * filesystems that self-manage the i_version.
+ *
+ * For example, the NFS client stores its NFSv4 change attribute in this way,
+ * and the AFS client stores the data_version from the server here.
+ */
+static inline void
+inode_set_iversion_raw(struct inode *inode, u64 val)
+{
+ atomic64_set(&inode->i_version, val);
+}
+
+/**
+ * inode_peek_iversion_raw - grab a "raw" iversion value
+ * @inode: inode from which i_version should be read
+ *
+ * Grab a "raw" inode->i_version value and return it. The i_version is not
+ * flagged or converted in any way. This is mostly used to access a self-managed
+ * i_version.
+ *
+ * With those filesystems, we want to treat the i_version as an entirely
+ * opaque value.
+ */
+static inline u64
+inode_peek_iversion_raw(const struct inode *inode)
+{
+ return atomic64_read(&inode->i_version);
+}
+
+/**
+ * inode_set_max_iversion_raw - update i_version new value is larger
+ * @inode: inode to set
+ * @val: new i_version to set
+ *
+ * Some self-managed filesystems (e.g Ceph) will only update the i_version
+ * value if the new value is larger than the one we already have.
+ */
+static inline void
+inode_set_max_iversion_raw(struct inode *inode, u64 val)
+{
+ u64 cur = inode_peek_iversion_raw(inode);
+
+ do {
+ if (cur > val)
+ break;
+ } while (!atomic64_try_cmpxchg(&inode->i_version, &cur, val));
+}
+
+/**
+ * inode_set_iversion - set i_version to a particular value
+ * @inode: inode to set
+ * @val: new i_version value to set
+ *
+ * Set @inode's i_version field to @val. This function is for filesystems with
+ * a kernel-managed i_version, for initializing a newly-created inode from
+ * scratch.
+ *
+ * In this case, we do not set the QUERIED flag since we know that this value
+ * has never been queried.
+ */
+static inline void
+inode_set_iversion(struct inode *inode, u64 val)
+{
+ inode_set_iversion_raw(inode, val << I_VERSION_QUERIED_SHIFT);
+}
+
+/**
+ * inode_set_iversion_queried - set i_version to a particular value as quereied
+ * @inode: inode to set
+ * @val: new i_version value to set
+ *
+ * Set @inode's i_version field to @val, and flag it for increment on the next
+ * change.
+ *
+ * Filesystems that persistently store the i_version on disk should use this
+ * when loading an existing inode from disk.
+ *
+ * When loading in an i_version value from a backing store, we can't be certain
+ * that it wasn't previously viewed before being stored. Thus, we must assume
+ * that it was, to ensure that we don't end up handing out the same value for
+ * different versions of the same inode.
+ */
+static inline void
+inode_set_iversion_queried(struct inode *inode, u64 val)
+{
+ inode_set_iversion_raw(inode, (val << I_VERSION_QUERIED_SHIFT) |
+ I_VERSION_QUERIED);
+}
+
+bool inode_maybe_inc_iversion(struct inode *inode, bool force);
+
+/**
+ * inode_inc_iversion - forcibly increment i_version
+ * @inode: inode that needs to be updated
+ *
+ * Forcbily increment the i_version field. This always results in a change to
+ * the observable value.
+ */
+static inline void
+inode_inc_iversion(struct inode *inode)
+{
+ inode_maybe_inc_iversion(inode, true);
+}
+
+/**
+ * inode_iversion_need_inc - is the i_version in need of being incremented?
+ * @inode: inode to check
+ *
+ * Returns whether the inode->i_version counter needs incrementing on the next
+ * change. Just fetch the value and check the QUERIED flag.
+ */
+static inline bool
+inode_iversion_need_inc(struct inode *inode)
+{
+ return inode_peek_iversion_raw(inode) & I_VERSION_QUERIED;
+}
+
+/**
+ * inode_inc_iversion_raw - forcibly increment raw i_version
+ * @inode: inode that needs to be updated
+ *
+ * Forcbily increment the raw i_version field. This always results in a change
+ * to the raw value.
+ *
+ * NFS will use the i_version field to store the value from the server. It
+ * mostly treats it as opaque, but in the case where it holds a write
+ * delegation, it must increment the value itself. This function does that.
+ */
+static inline void
+inode_inc_iversion_raw(struct inode *inode)
+{
+ atomic64_inc(&inode->i_version);
+}
+
+/**
+ * inode_peek_iversion - read i_version without flagging it to be incremented
+ * @inode: inode from which i_version should be read
+ *
+ * Read the inode i_version counter for an inode without registering it as a
+ * query.
+ *
+ * This is typically used by local filesystems that need to store an i_version
+ * on disk. In that situation, it's not necessary to flag it as having been
+ * viewed, as the result won't be used to gauge changes from that point.
+ */
+static inline u64
+inode_peek_iversion(const struct inode *inode)
+{
+ return inode_peek_iversion_raw(inode) >> I_VERSION_QUERIED_SHIFT;
+}
+
+/*
+ * For filesystems without any sort of change attribute, the best we can
+ * do is fake one up from the ctime:
+ */
+static inline u64 time_to_chattr(struct timespec64 *t)
+{
+ u64 chattr = t->tv_sec;
+
+ chattr <<= 32;
+ chattr += t->tv_nsec;
+ return chattr;
+}
+
+u64 inode_query_iversion(struct inode *inode);
+
+/**
+ * inode_eq_iversion_raw - check whether the raw i_version counter has changed
+ * @inode: inode to check
+ * @old: old value to check against its i_version
+ *
+ * Compare the current raw i_version counter with a previous one. Returns true
+ * if they are the same or false if they are different.
+ */
+static inline bool
+inode_eq_iversion_raw(const struct inode *inode, u64 old)
+{
+ return inode_peek_iversion_raw(inode) == old;
+}
+
+/**
+ * inode_eq_iversion - check whether the i_version counter has changed
+ * @inode: inode to check
+ * @old: old value to check against its i_version
+ *
+ * Compare an i_version counter with a previous one. Returns true if they are
+ * the same, and false if they are different.
+ *
+ * Note that we don't need to set the QUERIED flag in this case, as the value
+ * in the inode is not being recorded for later use.
+ */
+static inline bool
+inode_eq_iversion(const struct inode *inode, u64 old)
+{
+ return inode_peek_iversion(inode) == old;
+}
+#endif
diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h
index 606b6bce3a5b..f619bae1dcc5 100644
--- a/include/linux/jbd2.h
+++ b/include/linux/jbd2.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* linux/include/linux/jbd2.h
*
@@ -5,10 +6,6 @@
*
* Copyright 1998-2000 Red Hat, Inc --- All Rights Reserved
*
- * This file is part of the Linux kernel and is made available under
- * the terms of the GNU General Public License, version 2, or at your
- * option, any later version, incorporated herein by reference.
- *
* Definitions for transaction data structures for the buffer cache
* filesystem journaling support.
*/
@@ -30,6 +27,7 @@
#include <linux/timer.h>
#include <linux/slab.h>
#include <linux/bit_spinlock.h>
+#include <linux/blkdev.h>
#include <crypto/hash.h>
#endif
@@ -56,20 +54,20 @@
* CONFIG_JBD2_DEBUG is on.
*/
#define JBD2_EXPENSIVE_CHECKING
-extern ushort jbd2_journal_enable_debug;
void __jbd2_debug(int level, const char *file, const char *func,
unsigned int line, const char *fmt, ...);
-#define jbd_debug(n, fmt, a...) \
+#define jbd2_debug(n, fmt, a...) \
__jbd2_debug((n), __FILE__, __func__, __LINE__, (fmt), ##a)
#else
-#define jbd_debug(n, fmt, a...) /**/
+#define jbd2_debug(n, fmt, a...) no_printk(fmt, ##a)
#endif
extern void *jbd2_alloc(size_t size, gfp_t flags);
extern void jbd2_free(void *ptr, size_t size);
#define JBD2_MIN_JOURNAL_BLOCKS 1024
+#define JBD2_DEFAULT_FAST_COMMIT_BLOCKS 256
#ifdef __KERNEL__
@@ -265,7 +263,10 @@ typedef struct journal_superblock_s
/* 0x0050 */
__u8 s_checksum_type; /* checksum type */
__u8 s_padding2[3];
- __u32 s_padding[42];
+/* 0x0054 */
+ __be32 s_num_fc_blks; /* Number of fast commit blocks */
+/* 0x0058 */
+ __u32 s_padding[41];
__be32 s_checksum; /* crc32c(superblock) */
/* 0x0100 */
@@ -291,6 +292,7 @@ typedef struct journal_superblock_s
#define JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT 0x00000004
#define JBD2_FEATURE_INCOMPAT_CSUM_V2 0x00000008
#define JBD2_FEATURE_INCOMPAT_CSUM_V3 0x00000010
+#define JBD2_FEATURE_INCOMPAT_FAST_COMMIT 0x00000020
/* See "journal feature predicate functions" below */
@@ -301,7 +303,8 @@ typedef struct journal_superblock_s
JBD2_FEATURE_INCOMPAT_64BIT | \
JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT | \
JBD2_FEATURE_INCOMPAT_CSUM_V2 | \
- JBD2_FEATURE_INCOMPAT_CSUM_V3)
+ JBD2_FEATURE_INCOMPAT_CSUM_V3 | \
+ JBD2_FEATURE_INCOMPAT_FAST_COMMIT)
#ifdef __KERNEL__
@@ -316,7 +319,6 @@ enum jbd_state_bits {
BH_Revoked, /* Has been revoked from the log */
BH_RevokeValid, /* Revoked flag is valid */
BH_JBDDirty, /* Is dirty but journaled */
- BH_State, /* Pins most journal_head state */
BH_JournalHead, /* Pins bh->b_private and jh->b_bh */
BH_Shadow, /* IO on shadow buffer is running */
BH_Verified, /* Metadata block has been verified ok */
@@ -345,26 +347,6 @@ static inline struct journal_head *bh2jh(struct buffer_head *bh)
return bh->b_private;
}
-static inline void jbd_lock_bh_state(struct buffer_head *bh)
-{
- bit_spin_lock(BH_State, &bh->b_state);
-}
-
-static inline int jbd_trylock_bh_state(struct buffer_head *bh)
-{
- return bit_spin_trylock(BH_State, &bh->b_state);
-}
-
-static inline int jbd_is_locked_bh_state(struct buffer_head *bh)
-{
- return bit_spin_is_locked(BH_State, &bh->b_state);
-}
-
-static inline void jbd_unlock_bh_state(struct buffer_head *bh)
-{
- bit_spin_unlock(BH_State, &bh->b_state);
-}
-
static inline void jbd_lock_bh_journal_head(struct buffer_head *bh)
{
bit_spin_lock(BH_JournalHead, &bh->b_state);
@@ -418,41 +400,83 @@ static inline void jbd_unlock_bh_journal_head(struct buffer_head *bh)
#define JI_WAIT_DATA (1 << __JI_WAIT_DATA)
/**
- * struct jbd_inode is the structure linking inodes in ordered mode
- * present in a transaction so that we can sync them during commit.
+ * struct jbd2_inode - The jbd_inode type is the structure linking inodes in
+ * ordered mode present in a transaction so that we can sync them during commit.
*/
struct jbd2_inode {
- /* Which transaction does this inode belong to? Either the running
- * transaction or the committing one. [j_list_lock] */
+ /**
+ * @i_transaction:
+ *
+ * Which transaction does this inode belong to? Either the running
+ * transaction or the committing one. [j_list_lock]
+ */
transaction_t *i_transaction;
- /* Pointer to the running transaction modifying inode's data in case
- * there is already a committing transaction touching it. [j_list_lock] */
+ /**
+ * @i_next_transaction:
+ *
+ * Pointer to the running transaction modifying inode's data in case
+ * there is already a committing transaction touching it. [j_list_lock]
+ */
transaction_t *i_next_transaction;
- /* List of inodes in the i_transaction [j_list_lock] */
+ /**
+ * @i_list: List of inodes in the i_transaction [j_list_lock]
+ */
struct list_head i_list;
- /* VFS inode this inode belongs to [constant during the lifetime
- * of the structure] */
+ /**
+ * @i_vfs_inode:
+ *
+ * VFS inode this inode belongs to [constant for lifetime of structure]
+ */
struct inode *i_vfs_inode;
- /* Flags of inode [j_list_lock] */
+ /**
+ * @i_flags: Flags of inode [j_list_lock]
+ */
unsigned long i_flags;
+
+ /**
+ * @i_dirty_start:
+ *
+ * Offset in bytes where the dirty range for this inode starts.
+ * [j_list_lock]
+ */
+ loff_t i_dirty_start;
+
+ /**
+ * @i_dirty_end:
+ *
+ * Inclusive offset in bytes where the dirty range for this inode
+ * ends. [j_list_lock]
+ */
+ loff_t i_dirty_end;
};
struct jbd2_revoke_table_s;
/**
- * struct handle_s - The handle_s type is the concrete type associated with
- * handle_t.
+ * struct jbd2_journal_handle - The jbd2_journal_handle type is the concrete
+ * type associated with handle_t.
* @h_transaction: Which compound transaction is this update a part of?
- * @h_buffer_credits: Number of remaining buffers we are allowed to dirty.
- * @h_ref: Reference count on this handle
- * @h_err: Field for caller's use to track errors through large fs operations
- * @h_sync: flag for sync-on-close
- * @h_jdata: flag to force data journaling
- * @h_aborted: flag indicating fatal error on handle
+ * @h_journal: Which journal handle belongs to - used iff h_reserved set.
+ * @h_rsv_handle: Handle reserved for finishing the logical operation.
+ * @h_total_credits: Number of remaining buffers we are allowed to add to
+ * journal. These are dirty buffers and revoke descriptor blocks.
+ * @h_revoke_credits: Number of remaining revoke records available for handle
+ * @h_ref: Reference count on this handle.
+ * @h_err: Field for caller's use to track errors through large fs operations.
+ * @h_sync: Flag for sync-on-close.
+ * @h_jdata: Flag to force data journaling.
+ * @h_reserved: Flag for handle for reserved credits.
+ * @h_aborted: Flag indicating fatal error on handle.
+ * @h_type: For handle statistics.
+ * @h_line_no: For handle statistics.
+ * @h_start_jiffies: Handle Start time.
+ * @h_requested_credits: Holds @h_total_credits after handle is started.
+ * @h_revoke_credits_requested: Holds @h_revoke_credits after handle is started.
+ * @saved_alloc_context: Saved context while transaction is open.
**/
/* Docbook can't yet cope with the bit fields, but will leave the documentation
@@ -462,32 +486,25 @@ struct jbd2_revoke_table_s;
struct jbd2_journal_handle
{
union {
- /* Which compound transaction is this update a part of? */
transaction_t *h_transaction;
/* Which journal handle belongs to - used iff h_reserved set */
journal_t *h_journal;
};
- /* Handle reserved for finishing the logical operation */
handle_t *h_rsv_handle;
-
- /* Number of remaining buffers we are allowed to dirty: */
- int h_buffer_credits;
-
- /* Reference count on this handle */
+ int h_total_credits;
+ int h_revoke_credits;
+ int h_revoke_credits_requested;
int h_ref;
-
- /* Field for caller's use to track errors through large fs */
- /* operations */
int h_err;
/* Flags [no locking] */
- unsigned int h_sync: 1; /* sync-on-close */
- unsigned int h_jdata: 1; /* force data journaling */
- unsigned int h_reserved: 1; /* handle with reserved credits */
- unsigned int h_aborted: 1; /* fatal error on handle */
- unsigned int h_type: 8; /* for handle statistics */
- unsigned int h_line_no: 16; /* for handle statistics */
+ unsigned int h_sync: 1;
+ unsigned int h_jdata: 1;
+ unsigned int h_reserved: 1;
+ unsigned int h_aborted: 1;
+ unsigned int h_type: 8;
+ unsigned int h_line_no: 16;
unsigned long h_start_jiffies;
unsigned int h_requested_credits;
@@ -520,6 +537,7 @@ struct transaction_chp_stats_s {
* The transaction keeps track of all of the buffers modified by a
* running transaction, and all of the buffers committed but not yet
* flushed to home for finished transactions.
+ * (Locking Documentation improved by LockDoc)
*/
/*
@@ -529,15 +547,12 @@ struct transaction_chp_stats_s {
* ->jbd_lock_bh_journal_head() (This is "innermost")
*
* j_state_lock
- * ->jbd_lock_bh_state()
+ * ->b_state_lock
*
- * jbd_lock_bh_state()
+ * b_state_lock
* ->j_list_lock
*
* j_state_lock
- * ->t_handle_lock
- *
- * j_state_lock
* ->j_list_lock (journal_unmap_buffer)
*
*/
@@ -561,6 +576,7 @@ struct transaction_s
enum {
T_RUNNING,
T_LOCKED,
+ T_SWITCH,
T_FLUSH,
T_COMMIT,
T_COMMIT_DFLUSH,
@@ -574,18 +590,22 @@ struct transaction_s
*/
unsigned long t_log_start;
- /* Number of buffers on the t_buffers list [j_list_lock] */
+ /*
+ * Number of buffers on the t_buffers list [j_list_lock, no locks
+ * needed for jbd2 thread]
+ */
int t_nr_buffers;
/*
* Doubly-linked circular list of all buffers reserved but not yet
- * modified by this transaction [j_list_lock]
+ * modified by this transaction [j_list_lock, no locks needed fo
+ * jbd2 thread]
*/
struct journal_head *t_reserved_list;
/*
* Doubly-linked circular list of all metadata buffers owned by this
- * transaction [j_list_lock]
+ * transaction [j_list_lock, no locks needed for jbd2 thread]
*/
struct journal_head *t_buffers;
@@ -609,14 +629,18 @@ struct transaction_s
struct journal_head *t_checkpoint_io_list;
/*
- * Doubly-linked circular list of metadata buffers being shadowed by log
- * IO. The IO buffers on the iobuf list and the shadow buffers on this
- * list match each other one for one at all times. [j_list_lock]
+ * Doubly-linked circular list of metadata buffers being
+ * shadowed by log IO. The IO buffers on the iobuf list and
+ * the shadow buffers on this list match each other one for
+ * one at all times. [j_list_lock, no locks needed for jbd2
+ * thread]
*/
struct journal_head *t_shadow_list;
/*
- * List of inodes whose data we've modified in data=ordered mode.
+ * List of inodes associated with the transaction; e.g., ext4 uses
+ * this to track inodes in data=ordered and data=journal mode that
+ * need special handling on transaction commit; also used by ocfs2.
* [j_list_lock]
*/
struct list_head t_inode_list;
@@ -637,28 +661,41 @@ struct transaction_s
unsigned long t_start;
/*
- * When commit was requested
+ * When commit was requested [j_state_lock]
*/
unsigned long t_requested;
/*
- * Checkpointing stats [j_checkpoint_sem]
+ * Checkpointing stats [j_list_lock]
*/
struct transaction_chp_stats_s t_chp_stats;
/*
* Number of outstanding updates running on this transaction
- * [t_handle_lock]
+ * [none]
*/
atomic_t t_updates;
/*
- * Number of buffers reserved for use by all handles in this transaction
- * handle but not yet modified. [t_handle_lock]
+ * Number of blocks reserved for this transaction in the journal.
+ * This is including all credits reserved when starting transaction
+ * handles as well as all journal descriptor blocks needed for this
+ * transaction. [none]
*/
atomic_t t_outstanding_credits;
/*
+ * Number of revoke records for this transaction added by already
+ * stopped handles. [none]
+ */
+ atomic_t t_outstanding_revokes;
+
+ /*
+ * How many handles used this transaction? [none]
+ */
+ atomic_t t_handle_count;
+
+ /*
* Forward and backward links for the circular list of all transactions
* awaiting checkpoint. [j_list_lock]
*/
@@ -676,11 +713,6 @@ struct transaction_s
ktime_t t_start_time;
/*
- * How many handles used this transaction? [t_handle_lock]
- */
- atomic_t t_handle_count;
-
- /*
* This transaction is being forced and some process is
* waiting for it to finish.
*/
@@ -726,231 +758,329 @@ jbd2_time_diff(unsigned long start, unsigned long end)
#define JBD2_NR_BATCH 64
+enum passtype {PASS_SCAN, PASS_REVOKE, PASS_REPLAY};
+
+#define JBD2_FC_REPLAY_STOP 0
+#define JBD2_FC_REPLAY_CONTINUE 1
+
/**
* struct journal_s - The journal_s type is the concrete type associated with
* journal_t.
- * @j_flags: General journaling state flags
- * @j_errno: Is there an outstanding uncleared error on the journal (from a
- * prior abort)?
- * @j_sb_buffer: First part of superblock buffer
- * @j_superblock: Second part of superblock buffer
- * @j_format_version: Version of the superblock format
- * @j_state_lock: Protect the various scalars in the journal
- * @j_barrier_count: Number of processes waiting to create a barrier lock
- * @j_barrier: The barrier lock itself
- * @j_running_transaction: The current running transaction..
- * @j_committing_transaction: the transaction we are pushing to disk
- * @j_checkpoint_transactions: a linked circular list of all transactions
- * waiting for checkpointing
- * @j_wait_transaction_locked: Wait queue for waiting for a locked transaction
- * to start committing, or for a barrier lock to be released
- * @j_wait_done_commit: Wait queue for waiting for commit to complete
- * @j_wait_commit: Wait queue to trigger commit
- * @j_wait_updates: Wait queue to wait for updates to complete
- * @j_wait_reserved: Wait queue to wait for reserved buffer credits to drop
- * @j_checkpoint_mutex: Mutex for locking against concurrent checkpoints
- * @j_head: Journal head - identifies the first unused block in the journal
- * @j_tail: Journal tail - identifies the oldest still-used block in the
- * journal.
- * @j_free: Journal free - how many free blocks are there in the journal?
- * @j_first: The block number of the first usable block
- * @j_last: The block number one beyond the last usable block
- * @j_dev: Device where we store the journal
- * @j_blocksize: blocksize for the location where we store the journal.
- * @j_blk_offset: starting block offset for into the device where we store the
- * journal
- * @j_fs_dev: Device which holds the client fs. For internal journal this will
- * be equal to j_dev
- * @j_reserved_credits: Number of buffers reserved from the running transaction
- * @j_maxlen: Total maximum capacity of the journal region on disk.
- * @j_list_lock: Protects the buffer lists and internal buffer state.
- * @j_inode: Optional inode where we store the journal. If present, all journal
- * block numbers are mapped into this inode via bmap().
- * @j_tail_sequence: Sequence number of the oldest transaction in the log
- * @j_transaction_sequence: Sequence number of the next transaction to grant
- * @j_commit_sequence: Sequence number of the most recently committed
- * transaction
- * @j_commit_request: Sequence number of the most recent transaction wanting
- * commit
- * @j_uuid: Uuid of client object.
- * @j_task: Pointer to the current commit thread for this journal
- * @j_max_transaction_buffers: Maximum number of metadata buffers to allow in a
- * single compound commit transaction
- * @j_commit_interval: What is the maximum transaction lifetime before we begin
- * a commit?
- * @j_commit_timer: The timer used to wakeup the commit thread
- * @j_revoke_lock: Protect the revoke table
- * @j_revoke: The revoke table - maintains the list of revoked blocks in the
- * current transaction.
- * @j_revoke_table: alternate revoke tables for j_revoke
- * @j_wbuf: array of buffer_heads for jbd2_journal_commit_transaction
- * @j_wbufsize: maximum number of buffer_heads allowed in j_wbuf, the
- * number that will fit in j_blocksize
- * @j_last_sync_writer: most recent pid which did a synchronous write
- * @j_history_lock: Protect the transactions statistics history
- * @j_proc_entry: procfs entry for the jbd statistics directory
- * @j_stats: Overall statistics
- * @j_private: An opaque pointer to fs-private information.
- * @j_trans_commit_map: Lockdep entity to track transaction commit dependencies
*/
-
struct journal_s
{
- /* General journaling state flags [j_state_lock] */
+ /**
+ * @j_flags: General journaling state flags [j_state_lock,
+ * no lock for quick racy checks]
+ */
unsigned long j_flags;
- /*
+ /**
+ * @j_atomic_flags: Atomic journaling state flags.
+ */
+ unsigned long j_atomic_flags;
+
+ /**
+ * @j_errno:
+ *
* Is there an outstanding uncleared error on the journal (from a prior
* abort)? [j_state_lock]
*/
int j_errno;
- /* The superblock buffer */
+ /**
+ * @j_abort_mutex: Lock the whole aborting procedure.
+ */
+ struct mutex j_abort_mutex;
+
+ /**
+ * @j_sb_buffer: The first part of the superblock buffer.
+ */
struct buffer_head *j_sb_buffer;
+
+ /**
+ * @j_superblock: The second part of the superblock buffer.
+ */
journal_superblock_t *j_superblock;
- /* Version of the superblock format */
+ /**
+ * @j_format_version: Version of the superblock format.
+ */
int j_format_version;
- /*
- * Protect the various scalars in the journal
+ /**
+ * @j_state_lock: Protect the various scalars in the journal.
*/
rwlock_t j_state_lock;
- /*
- * Number of processes waiting to create a barrier lock [j_state_lock]
+ /**
+ * @j_barrier_count:
+ *
+ * Number of processes waiting to create a barrier lock [j_state_lock,
+ * no lock for quick racy checks]
*/
int j_barrier_count;
- /* The barrier lock itself */
+ /**
+ * @j_barrier: The barrier lock itself.
+ */
struct mutex j_barrier;
- /*
+ /**
+ * @j_running_transaction:
+ *
* Transactions: The current running transaction...
- * [j_state_lock] [caller holding open handle]
+ * [j_state_lock, no lock for quick racy checks] [caller holding
+ * open handle]
*/
transaction_t *j_running_transaction;
- /*
+ /**
+ * @j_committing_transaction:
+ *
* the transaction we are pushing to disk
* [j_state_lock] [caller holding open handle]
*/
transaction_t *j_committing_transaction;
- /*
+ /**
+ * @j_checkpoint_transactions:
+ *
* ... and a linked circular list of all transactions waiting for
* checkpointing. [j_list_lock]
*/
transaction_t *j_checkpoint_transactions;
- /*
+ /**
+ * @j_wait_transaction_locked:
+ *
* Wait queue for waiting for a locked transaction to start committing,
- * or for a barrier lock to be released
+ * or for a barrier lock to be released.
*/
wait_queue_head_t j_wait_transaction_locked;
- /* Wait queue for waiting for commit to complete */
+ /**
+ * @j_wait_done_commit: Wait queue for waiting for commit to complete.
+ */
wait_queue_head_t j_wait_done_commit;
- /* Wait queue to trigger commit */
+ /**
+ * @j_wait_commit: Wait queue to trigger commit.
+ */
wait_queue_head_t j_wait_commit;
- /* Wait queue to wait for updates to complete */
+ /**
+ * @j_wait_updates: Wait queue to wait for updates to complete.
+ */
wait_queue_head_t j_wait_updates;
- /* Wait queue to wait for reserved buffer credits to drop */
+ /**
+ * @j_wait_reserved:
+ *
+ * Wait queue to wait for reserved buffer credits to drop.
+ */
wait_queue_head_t j_wait_reserved;
- /* Semaphore for locking against concurrent checkpoints */
+ /**
+ * @j_fc_wait:
+ *
+ * Wait queue to wait for completion of async fast commits.
+ */
+ wait_queue_head_t j_fc_wait;
+
+ /**
+ * @j_checkpoint_mutex:
+ *
+ * Semaphore for locking against concurrent checkpoints.
+ */
struct mutex j_checkpoint_mutex;
- /*
+ /**
+ * @j_chkpt_bhs:
+ *
* List of buffer heads used by the checkpoint routine. This
* was moved from jbd2_log_do_checkpoint() to reduce stack
* usage. Access to this array is controlled by the
- * j_checkpoint_mutex. [j_checkpoint_mutex]
+ * @j_checkpoint_mutex. [j_checkpoint_mutex]
*/
struct buffer_head *j_chkpt_bhs[JBD2_NR_BATCH];
-
- /*
+
+ /**
+ * @j_shrinker:
+ *
+ * Journal head shrinker, reclaim buffer's journal head which
+ * has been written back.
+ */
+ struct shrinker j_shrinker;
+
+ /**
+ * @j_checkpoint_jh_count:
+ *
+ * Number of journal buffers on the checkpoint list. [j_list_lock]
+ */
+ struct percpu_counter j_checkpoint_jh_count;
+
+ /**
+ * @j_shrink_transaction:
+ *
+ * Record next transaction will shrink on the checkpoint list.
+ * [j_list_lock]
+ */
+ transaction_t *j_shrink_transaction;
+
+ /**
+ * @j_head:
+ *
* Journal head: identifies the first unused block in the journal.
* [j_state_lock]
*/
unsigned long j_head;
- /*
+ /**
+ * @j_tail:
+ *
* Journal tail: identifies the oldest still-used block in the journal.
* [j_state_lock]
*/
unsigned long j_tail;
- /*
+ /**
+ * @j_free:
+ *
* Journal free: how many free blocks are there in the journal?
* [j_state_lock]
*/
unsigned long j_free;
- /*
- * Journal start and end: the block numbers of the first usable block
- * and one beyond the last usable block in the journal. [j_state_lock]
+ /**
+ * @j_first:
+ *
+ * The block number of the first usable block in the journal
+ * [j_state_lock].
*/
unsigned long j_first;
+
+ /**
+ * @j_last:
+ *
+ * The block number one beyond the last usable block in the journal
+ * [j_state_lock].
+ */
unsigned long j_last;
- /*
- * Device, blocksize and starting block offset for the location where we
- * store the journal.
+ /**
+ * @j_fc_first:
+ *
+ * The block number of the first fast commit block in the journal
+ * [j_state_lock].
+ */
+ unsigned long j_fc_first;
+
+ /**
+ * @j_fc_off:
+ *
+ * Number of fast commit blocks currently allocated. Accessed only
+ * during fast commit. Currently only process can do fast commit, so
+ * this field is not protected by any lock.
+ */
+ unsigned long j_fc_off;
+
+ /**
+ * @j_fc_last:
+ *
+ * The block number one beyond the last fast commit block in the journal
+ * [j_state_lock].
+ */
+ unsigned long j_fc_last;
+
+ /**
+ * @j_dev: Device where we store the journal.
*/
struct block_device *j_dev;
+
+ /**
+ * @j_blocksize: Block size for the location where we store the journal.
+ */
int j_blocksize;
+
+ /**
+ * @j_blk_offset:
+ *
+ * Starting block offset into the device where we store the journal.
+ */
unsigned long long j_blk_offset;
+
+ /**
+ * @j_devname: Journal device name.
+ */
char j_devname[BDEVNAME_SIZE+24];
- /*
+ /**
+ * @j_fs_dev:
+ *
* Device which holds the client fs. For internal journal this will be
* equal to j_dev.
*/
struct block_device *j_fs_dev;
- /* Total maximum capacity of the journal region on disk. */
- unsigned int j_maxlen;
+ /**
+ * @j_total_len: Total maximum capacity of the journal region on disk.
+ */
+ unsigned int j_total_len;
- /* Number of buffers reserved from the running transaction */
+ /**
+ * @j_reserved_credits:
+ *
+ * Number of buffers reserved from the running transaction.
+ */
atomic_t j_reserved_credits;
- /*
- * Protects the buffer lists and internal buffer state.
+ /**
+ * @j_list_lock: Protects the buffer lists and internal buffer state.
*/
spinlock_t j_list_lock;
- /* Optional inode where we store the journal. If present, all */
- /* journal block numbers are mapped into this inode via */
- /* bmap(). */
+ /**
+ * @j_inode:
+ *
+ * Optional inode where we store the journal. If present, all
+ * journal block numbers are mapped into this inode via bmap().
+ */
struct inode *j_inode;
- /*
+ /**
+ * @j_tail_sequence:
+ *
* Sequence number of the oldest transaction in the log [j_state_lock]
*/
tid_t j_tail_sequence;
- /*
+ /**
+ * @j_transaction_sequence:
+ *
* Sequence number of the next transaction to grant [j_state_lock]
*/
tid_t j_transaction_sequence;
- /*
+ /**
+ * @j_commit_sequence:
+ *
* Sequence number of the most recently committed transaction
- * [j_state_lock].
+ * [j_state_lock, no lock for quick racy checks]
*/
tid_t j_commit_sequence;
- /*
+ /**
+ * @j_commit_request:
+ *
* Sequence number of the most recent transaction wanting commit
- * [j_state_lock]
+ * [j_state_lock, no lock for quick racy checks]
*/
tid_t j_commit_request;
- /*
+ /**
+ * @j_uuid:
+ *
* Journal uuid: identifies the object (filesystem, LVM volume etc)
* backed by this journal. This will eventually be replaced by an array
* of uuids, allowing us to index multiple devices within a single
@@ -958,85 +1088,193 @@ struct journal_s
*/
__u8 j_uuid[16];
- /* Pointer to the current commit thread for this journal */
+ /**
+ * @j_task: Pointer to the current commit thread for this journal.
+ */
struct task_struct *j_task;
- /*
+ /**
+ * @j_max_transaction_buffers:
+ *
* Maximum number of metadata buffers to allow in a single compound
- * commit transaction
+ * commit transaction.
*/
int j_max_transaction_buffers;
- /*
+ /**
+ * @j_revoke_records_per_block:
+ *
+ * Number of revoke records that fit in one descriptor block.
+ */
+ int j_revoke_records_per_block;
+
+ /**
+ * @j_commit_interval:
+ *
* What is the maximum transaction lifetime before we begin a commit?
*/
unsigned long j_commit_interval;
- /* The timer used to wakeup the commit thread: */
+ /**
+ * @j_commit_timer: The timer used to wakeup the commit thread.
+ */
struct timer_list j_commit_timer;
- /*
- * The revoke table: maintains the list of revoked blocks in the
- * current transaction. [j_revoke_lock]
+ /**
+ * @j_revoke_lock: Protect the revoke table.
*/
spinlock_t j_revoke_lock;
+
+ /**
+ * @j_revoke:
+ *
+ * The revoke table - maintains the list of revoked blocks in the
+ * current transaction.
+ */
struct jbd2_revoke_table_s *j_revoke;
+
+ /**
+ * @j_revoke_table: Alternate revoke tables for j_revoke.
+ */
struct jbd2_revoke_table_s *j_revoke_table[2];
- /*
- * array of bhs for jbd2_journal_commit_transaction
+ /**
+ * @j_wbuf: Array of bhs for jbd2_journal_commit_transaction.
*/
struct buffer_head **j_wbuf;
+
+ /**
+ * @j_fc_wbuf: Array of fast commit bhs for fast commit. Accessed only
+ * during a fast commit. Currently only process can do fast commit, so
+ * this field is not protected by any lock.
+ */
+ struct buffer_head **j_fc_wbuf;
+
+ /**
+ * @j_wbufsize:
+ *
+ * Size of @j_wbuf array.
+ */
int j_wbufsize;
- /*
- * this is the pid of hte last person to run a synchronous operation
- * through the journal
+ /**
+ * @j_fc_wbufsize:
+ *
+ * Size of @j_fc_wbuf array.
+ */
+ int j_fc_wbufsize;
+
+ /**
+ * @j_last_sync_writer:
+ *
+ * The pid of the last person to run a synchronous operation
+ * through the journal.
*/
pid_t j_last_sync_writer;
- /*
- * the average amount of time in nanoseconds it takes to commit a
+ /**
+ * @j_average_commit_time:
+ *
+ * The average amount of time in nanoseconds it takes to commit a
* transaction to disk. [j_state_lock]
*/
u64 j_average_commit_time;
- /*
- * minimum and maximum times that we should wait for
- * additional filesystem operations to get batched into a
- * synchronous handle in microseconds
+ /**
+ * @j_min_batch_time:
+ *
+ * Minimum time that we should wait for additional filesystem operations
+ * to get batched into a synchronous handle in microseconds.
*/
u32 j_min_batch_time;
+
+ /**
+ * @j_max_batch_time:
+ *
+ * Maximum time that we should wait for additional filesystem operations
+ * to get batched into a synchronous handle in microseconds.
+ */
u32 j_max_batch_time;
- /* This function is called when a transaction is closed */
+ /**
+ * @j_commit_callback:
+ *
+ * This function is called when a transaction is closed.
+ */
void (*j_commit_callback)(journal_t *,
transaction_t *);
+ /**
+ * @j_submit_inode_data_buffers:
+ *
+ * This function is called for all inodes associated with the
+ * committing transaction marked with JI_WRITE_DATA flag
+ * before we start to write out the transaction to the journal.
+ */
+ int (*j_submit_inode_data_buffers)
+ (struct jbd2_inode *);
+
+ /**
+ * @j_finish_inode_data_buffers:
+ *
+ * This function is called for all inodes associated with the
+ * committing transaction marked with JI_WAIT_DATA flag
+ * after we have written the transaction to the journal
+ * but before we write out the commit block.
+ */
+ int (*j_finish_inode_data_buffers)
+ (struct jbd2_inode *);
+
/*
* Journal statistics
*/
+
+ /**
+ * @j_history_lock: Protect the transactions statistics history.
+ */
spinlock_t j_history_lock;
+
+ /**
+ * @j_proc_entry: procfs entry for the jbd statistics directory.
+ */
struct proc_dir_entry *j_proc_entry;
+
+ /**
+ * @j_stats: Overall statistics.
+ */
struct transaction_stats_s j_stats;
- /* Failed journal commit ID */
+ /**
+ * @j_failed_commit: Failed journal commit ID.
+ */
unsigned int j_failed_commit;
- /*
+ /**
+ * @j_private:
+ *
* An opaque pointer to fs-private information. ext3 puts its
- * superblock pointer here
+ * superblock pointer here.
*/
void *j_private;
- /* Reference to checksum algorithm driver via cryptoapi */
+ /**
+ * @j_chksum_driver:
+ *
+ * Reference to checksum algorithm driver via cryptoapi.
+ */
struct crypto_shash *j_chksum_driver;
- /* Precomputed journal UUID checksum for seeding other checksums */
+ /**
+ * @j_csum_seed:
+ *
+ * Precomputed journal UUID checksum for seeding other checksums.
+ */
__u32 j_csum_seed;
#ifdef CONFIG_DEBUG_LOCK_ALLOC
- /*
+ /**
+ * @j_trans_commit_map:
+ *
* Lockdep entity to track transaction commit dependencies. Handles
* hold this "lock" for read, when we wait for commit, we acquire the
* "lock" for writing. This matches the properties of jbd2 journalling
@@ -1046,12 +1284,44 @@ struct journal_s
*/
struct lockdep_map j_trans_commit_map;
#endif
+
+ /**
+ * @j_fc_cleanup_callback:
+ *
+ * Clean-up after fast commit or full commit. JBD2 calls this function
+ * after every commit operation.
+ */
+ void (*j_fc_cleanup_callback)(struct journal_s *journal, int full, tid_t tid);
+
+ /**
+ * @j_fc_replay_callback:
+ *
+ * File-system specific function that performs replay of a fast
+ * commit. JBD2 calls this function for each fast commit block found in
+ * the journal. This function should return JBD2_FC_REPLAY_CONTINUE
+ * to indicate that the block was processed correctly and more fast
+ * commit replay should continue. Return value of JBD2_FC_REPLAY_STOP
+ * indicates the end of replay (no more blocks remaining). A negative
+ * return value indicates error.
+ */
+ int (*j_fc_replay_callback)(struct journal_s *journal,
+ struct buffer_head *bh,
+ enum passtype pass, int off,
+ tid_t expected_commit_id);
+
+ /**
+ * @j_bmap:
+ *
+ * Bmap function that should be used instead of the generic
+ * VFS bmap function.
+ */
+ int (*j_bmap)(struct journal_s *journal, sector_t *block);
};
#define jbd2_might_wait_for_commit(j) \
do { \
rwsem_acquire(&j->j_trans_commit_map, 0, 0, _THIS_IP_); \
- rwsem_release(&j->j_trans_commit_map, 1, _THIS_IP_); \
+ rwsem_release(&j->j_trans_commit_map, _THIS_IP_); \
} while (0)
/* journal feature predicate functions */
@@ -1116,6 +1386,7 @@ JBD2_FEATURE_INCOMPAT_FUNCS(64bit, 64BIT)
JBD2_FEATURE_INCOMPAT_FUNCS(async_commit, ASYNC_COMMIT)
JBD2_FEATURE_INCOMPAT_FUNCS(csum2, CSUM_V2)
JBD2_FEATURE_INCOMPAT_FUNCS(csum3, CSUM_V3)
+JBD2_FEATURE_INCOMPAT_FUNCS(fast_commit, FAST_COMMIT)
/*
* Journal flag definitions
@@ -1129,7 +1400,18 @@ JBD2_FEATURE_INCOMPAT_FUNCS(csum3, CSUM_V3)
#define JBD2_ABORT_ON_SYNCDATA_ERR 0x040 /* Abort the journal on file
* data write error in ordered
* mode */
-#define JBD2_REC_ERR 0x080 /* The errno in the sb has been recorded */
+#define JBD2_FAST_COMMIT_ONGOING 0x100 /* Fast commit is ongoing */
+#define JBD2_FULL_COMMIT_ONGOING 0x200 /* Full commit is ongoing */
+#define JBD2_JOURNAL_FLUSH_DISCARD 0x0001
+#define JBD2_JOURNAL_FLUSH_ZEROOUT 0x0002
+#define JBD2_JOURNAL_FLUSH_VALID (JBD2_JOURNAL_FLUSH_DISCARD | \
+ JBD2_JOURNAL_FLUSH_ZEROOUT)
+
+/*
+ * Journal atomic flag definitions
+ */
+#define JBD2_CHECKPOINT_IO_ERROR 0x001 /* Detect io error while writing
+ * buffer back to disk */
/*
* Function declarations for the journaling transaction and buffer
@@ -1138,12 +1420,10 @@ JBD2_FEATURE_INCOMPAT_FUNCS(csum3, CSUM_V3)
/* Filing buffers */
extern void jbd2_journal_unfile_buffer(journal_t *, struct journal_head *);
-extern void __jbd2_journal_refile_buffer(struct journal_head *);
+extern bool __jbd2_journal_refile_buffer(struct journal_head *);
extern void jbd2_journal_refile_buffer(journal_t *, struct journal_head *);
extern void __jbd2_journal_file_buffer(struct journal_head *, transaction_t *, int);
-extern void __journal_free_buffer(struct journal_head *bh);
extern void jbd2_journal_file_buffer(struct journal_head *, transaction_t *, int);
-extern void __journal_clean_data_list(transaction_t *transaction);
static inline void jbd2_file_log_bh(struct list_head *head, struct buffer_head *bh)
{
list_add_tail(&bh->b_assoc_buffers, head);
@@ -1167,6 +1447,7 @@ extern void jbd2_journal_commit_transaction(journal_t *);
/* Checkpoint list management */
void __jbd2_journal_clean_checkpoint_list(journal_t *journal, bool destroy);
+unsigned long jbd2_journal_shrink_checkpoint_list(journal_t *journal, unsigned long *nr_to_scan);
int __jbd2_journal_remove_checkpoint(struct journal_head *);
void jbd2_journal_destroy_checkpoint(journal_t *journal);
void __jbd2_journal_insert_checkpoint(struct journal_head *, transaction_t *);
@@ -1207,12 +1488,9 @@ extern int jbd2_journal_write_metadata_buffer(transaction_t *transaction,
struct buffer_head **bh_out,
sector_t blocknr);
-/* Transaction locking */
-extern void __wait_on_journal (journal_t *);
-
/* Transaction cache support */
extern void jbd2_journal_destroy_transaction_cache(void);
-extern int jbd2_journal_init_transaction_cache(void);
+extern int __init jbd2_journal_init_transaction_cache(void);
extern void jbd2_journal_free_transaction(transaction_t *);
/*
@@ -1239,14 +1517,16 @@ static inline handle_t *journal_current_handle(void)
extern handle_t *jbd2_journal_start(journal_t *, int nblocks);
extern handle_t *jbd2__journal_start(journal_t *, int blocks, int rsv_blocks,
- gfp_t gfp_mask, unsigned int type,
- unsigned int line_no);
+ int revoke_records, gfp_t gfp_mask,
+ unsigned int type, unsigned int line_no);
extern int jbd2_journal_restart(handle_t *, int nblocks);
-extern int jbd2__journal_restart(handle_t *, int nblocks, gfp_t gfp_mask);
+extern int jbd2__journal_restart(handle_t *, int nblocks,
+ int revoke_records, gfp_t gfp_mask);
extern int jbd2_journal_start_reserved(handle_t *handle,
unsigned int type, unsigned int line_no);
extern void jbd2_journal_free_reserved(handle_t *handle);
-extern int jbd2_journal_extend (handle_t *, int nblocks);
+extern int jbd2_journal_extend(handle_t *handle, int nblocks,
+ int revoke_records);
extern int jbd2_journal_get_write_access(handle_t *, struct buffer_head *);
extern int jbd2_journal_get_create_access (handle_t *, struct buffer_head *);
extern int jbd2_journal_get_undo_access(handle_t *, struct buffer_head *);
@@ -1254,15 +1534,16 @@ void jbd2_journal_set_triggers(struct buffer_head *,
struct jbd2_buffer_trigger_type *type);
extern int jbd2_journal_dirty_metadata (handle_t *, struct buffer_head *);
extern int jbd2_journal_forget (handle_t *, struct buffer_head *);
-extern void journal_sync_buffer (struct buffer_head *);
-extern int jbd2_journal_invalidatepage(journal_t *,
- struct page *, unsigned int, unsigned int);
-extern int jbd2_journal_try_to_free_buffers(journal_t *, struct page *, gfp_t);
+int jbd2_journal_invalidate_folio(journal_t *, struct folio *,
+ size_t offset, size_t length);
+bool jbd2_journal_try_to_free_buffers(journal_t *journal, struct folio *folio);
extern int jbd2_journal_stop(handle_t *);
-extern int jbd2_journal_flush (journal_t *);
+extern int jbd2_journal_flush(journal_t *journal, unsigned int flags);
extern void jbd2_journal_lock_updates (journal_t *);
extern void jbd2_journal_unlock_updates (journal_t *);
+void jbd2_journal_wait_updates(journal_t *);
+
extern journal_t * jbd2_journal_init_dev(struct block_device *bdev,
struct block_device *fs_dev,
unsigned long long start, int len, int bsize);
@@ -1283,8 +1564,7 @@ extern int jbd2_journal_wipe (journal_t *, int);
extern int jbd2_journal_skip_recovery (journal_t *);
extern void jbd2_journal_update_sb_errno(journal_t *);
extern int jbd2_journal_update_sb_log_tail (journal_t *, tid_t,
- unsigned long, int);
-extern void __jbd2_journal_abort_hard (journal_t *);
+ unsigned long, blk_opf_t);
extern void jbd2_journal_abort (journal_t *, int);
extern int jbd2_journal_errno (journal_t *);
extern void jbd2_journal_ack_err (journal_t *);
@@ -1292,8 +1572,14 @@ extern int jbd2_journal_clear_err (journal_t *);
extern int jbd2_journal_bmap(journal_t *, unsigned long, unsigned long long *);
extern int jbd2_journal_force_commit(journal_t *);
extern int jbd2_journal_force_commit_nested(journal_t *);
-extern int jbd2_journal_inode_add_write(handle_t *handle, struct jbd2_inode *inode);
-extern int jbd2_journal_inode_add_wait(handle_t *handle, struct jbd2_inode *inode);
+extern int jbd2_journal_inode_ranged_write(handle_t *handle,
+ struct jbd2_inode *inode, loff_t start_byte,
+ loff_t length);
+extern int jbd2_journal_inode_ranged_wait(handle_t *handle,
+ struct jbd2_inode *inode, loff_t start_byte,
+ loff_t length);
+extern int jbd2_journal_finish_inode_data_buffers(
+ struct jbd2_inode *jinode);
extern int jbd2_journal_begin_ordered_truncate(journal_t *journal,
struct jbd2_inode *inode, loff_t new_size);
extern void jbd2_journal_init_jbd_inode(struct jbd2_inode *jinode, struct inode *inode);
@@ -1340,8 +1626,10 @@ static inline void jbd2_free_inode(struct jbd2_inode *jinode)
/* Primary revoke support */
#define JOURNAL_REVOKE_DEFAULT_HASH 256
extern int jbd2_journal_init_revoke(journal_t *, int);
-extern void jbd2_journal_destroy_revoke_caches(void);
-extern int jbd2_journal_init_revoke_caches(void);
+extern void jbd2_journal_destroy_revoke_record_cache(void);
+extern void jbd2_journal_destroy_revoke_table_cache(void);
+extern int __init jbd2_journal_init_revoke_record_cache(void);
+extern int __init jbd2_journal_init_revoke_table_cache(void);
extern void jbd2_journal_destroy_revoke(journal_t *);
extern int jbd2_journal_revoke (handle_t *, unsigned long long, struct buffer_head *);
@@ -1364,9 +1652,9 @@ extern void jbd2_clear_buffer_revoked_flags(journal_t *journal);
*/
int jbd2_log_start_commit(journal_t *journal, tid_t tid);
-int __jbd2_log_start_commit(journal_t *journal, tid_t tid);
int jbd2_journal_start_commit(journal_t *journal, tid_t *tid);
int jbd2_log_wait_commit(journal_t *journal, tid_t tid);
+int jbd2_transaction_committed(journal_t *journal, tid_t tid);
int jbd2_complete_transaction(journal_t *journal, tid_t tid);
int jbd2_log_do_checkpoint(journal_t *journal);
int jbd2_trans_will_send_data_barrier(journal_t *journal, tid_t tid);
@@ -1375,6 +1663,21 @@ void __jbd2_log_wait_for_space(journal_t *journal);
extern void __jbd2_journal_drop_transaction(journal_t *, transaction_t *);
extern int jbd2_cleanup_journal_tail(journal_t *);
+/* Fast commit related APIs */
+int jbd2_fc_begin_commit(journal_t *journal, tid_t tid);
+int jbd2_fc_end_commit(journal_t *journal);
+int jbd2_fc_end_commit_fallback(journal_t *journal);
+int jbd2_fc_get_buf(journal_t *journal, struct buffer_head **bh_out);
+int jbd2_submit_inode_data(journal_t *journal, struct jbd2_inode *jinode);
+int jbd2_wait_inode_data(journal_t *journal, struct jbd2_inode *jinode);
+int jbd2_fc_wait_bufs(journal_t *journal, int num_blks);
+int jbd2_fc_release_bufs(journal_t *journal);
+
+static inline int jbd2_journal_get_max_txn_bufs(journal_t *journal)
+{
+ return (journal->j_total_len - journal->j_fc_wbufsize) / 4;
+}
+
/*
* is_journal_abort
*
@@ -1435,20 +1738,11 @@ static inline int jbd2_journal_has_csum_v2or3(journal_t *journal)
return journal->j_chksum_driver != NULL;
}
-/*
- * We reserve t_outstanding_credits >> JBD2_CONTROL_BLOCKS_SHIFT for
- * transaction control blocks.
- */
-#define JBD2_CONTROL_BLOCKS_SHIFT 5
-
-/*
- * Return the minimum number of blocks which must be free in the journal
- * before a new transaction may be started. Must be called under j_state_lock.
- */
-static inline int jbd2_space_needed(journal_t *journal)
+static inline int jbd2_journal_get_num_fc_blks(journal_superblock_t *jsb)
{
- int nblocks = journal->j_max_transaction_buffers;
- return nblocks + (nblocks >> JBD2_CONTROL_BLOCKS_SHIFT);
+ int num_fc_blocks = be32_to_cpu(jsb->s_num_fc_blks);
+
+ return num_fc_blocks ? num_fc_blocks : JBD2_DEFAULT_FAST_COMMIT_BLOCKS;
}
/*
@@ -1457,16 +1751,13 @@ static inline int jbd2_space_needed(journal_t *journal)
static inline unsigned long jbd2_log_space_left(journal_t *journal)
{
/* Allow for rounding errors */
- unsigned long free = journal->j_free - 32;
+ long free = journal->j_free - 32;
if (journal->j_committing_transaction) {
- unsigned long committing = atomic_read(&journal->
- j_committing_transaction->t_outstanding_credits);
-
- /* Transaction + control blocks */
- free -= committing + (committing >> JBD2_CONTROL_BLOCKS_SHIFT);
+ free -= atomic_read(&journal->
+ j_committing_transaction->t_outstanding_credits);
}
- return free;
+ return max_t(long, free, 0);
}
/*
@@ -1481,8 +1772,6 @@ static inline unsigned long jbd2_log_space_left(journal_t *journal)
#define BJ_Reserved 4 /* Buffer is reserved for access by journal */
#define BJ_Types 5
-extern int jbd_blocks_per_page(struct inode *inode);
-
/* JBD uses a CRC32 checksum */
#define JBD_MAX_CHECKSUM_SIZE 4
@@ -1499,7 +1788,6 @@ static inline u32 jbd2_chksum(journal_t *journal, u32 crc,
JBD_MAX_CHECKSUM_SIZE);
desc.shash.tfm = journal->j_chksum_driver;
- desc.shash.flags = 0;
*(u32 *)desc.ctx = crc;
err = crypto_shash_update(&desc.shash, address, length);
@@ -1521,6 +1809,20 @@ static inline tid_t jbd2_get_latest_transaction(journal_t *journal)
return tid;
}
+static inline int jbd2_handle_buffer_credits(handle_t *handle)
+{
+ journal_t *journal;
+
+ if (!handle->h_reserved)
+ journal = handle->h_transaction->t_journal;
+ else
+ journal = handle->h_journal;
+
+ return handle->h_total_credits -
+ DIV_ROUND_UP(handle->h_revoke_credits_requested,
+ journal->j_revoke_records_per_block);
+}
+
#ifdef __KERNEL__
#define buffer_trace_init(bh) do {} while (0)
diff --git a/include/linux/jhash.h b/include/linux/jhash.h
index 348c6f47e4cc..ab7f8c152b89 100644
--- a/include/linux/jhash.h
+++ b/include/linux/jhash.h
@@ -5,7 +5,7 @@
*
* Copyright (C) 2006. Bob Jenkins (bob_jenkins@burtleburtle.net)
*
- * http://burtleburtle.net/bob/hash/
+ * https://burtleburtle.net/bob/hash/
*
* These are the credits from Bob's sources:
*
@@ -17,7 +17,7 @@
* if SELF_TEST is defined. You can use this free for any purpose. It's in
* the public domain. It has no warranty.
*
- * Copyright (C) 2009-2010 Jozsef Kadlecsik (kadlec@blackhole.kfki.hu)
+ * Copyright (C) 2009-2010 Jozsef Kadlecsik (kadlec@netfilter.org)
*
* I've modified Bob's hash to be useful in the Linux kernel, and
* any bugs present are my fault.
@@ -85,21 +85,21 @@ static inline u32 jhash(const void *key, u32 length, u32 initval)
k += 12;
}
/* Last block: affect all 32 bits of (c) */
- /* All the case statements fall through */
switch (length) {
- case 12: c += (u32)k[11]<<24;
- case 11: c += (u32)k[10]<<16;
- case 10: c += (u32)k[9]<<8;
- case 9: c += k[8];
- case 8: b += (u32)k[7]<<24;
- case 7: b += (u32)k[6]<<16;
- case 6: b += (u32)k[5]<<8;
- case 5: b += k[4];
- case 4: a += (u32)k[3]<<24;
- case 3: a += (u32)k[2]<<16;
- case 2: a += (u32)k[1]<<8;
+ case 12: c += (u32)k[11]<<24; fallthrough;
+ case 11: c += (u32)k[10]<<16; fallthrough;
+ case 10: c += (u32)k[9]<<8; fallthrough;
+ case 9: c += k[8]; fallthrough;
+ case 8: b += (u32)k[7]<<24; fallthrough;
+ case 7: b += (u32)k[6]<<16; fallthrough;
+ case 6: b += (u32)k[5]<<8; fallthrough;
+ case 5: b += k[4]; fallthrough;
+ case 4: a += (u32)k[3]<<24; fallthrough;
+ case 3: a += (u32)k[2]<<16; fallthrough;
+ case 2: a += (u32)k[1]<<8; fallthrough;
case 1: a += k[0];
__jhash_final(a, b, c);
+ break;
case 0: /* Nothing left to add */
break;
}
@@ -131,12 +131,13 @@ static inline u32 jhash2(const u32 *k, u32 length, u32 initval)
k += 3;
}
- /* Handle the last 3 u32's: all the case statements fall through */
+ /* Handle the last 3 u32's */
switch (length) {
- case 3: c += k[2];
- case 2: b += k[1];
+ case 3: c += k[2]; fallthrough;
+ case 2: b += k[1]; fallthrough;
case 1: a += k[0];
__jhash_final(a, b, c);
+ break;
case 0: /* Nothing left to add */
break;
}
diff --git a/include/linux/jiffies.h b/include/linux/jiffies.h
index 734377ad42e9..5e13f801c902 100644
--- a/include/linux/jiffies.h
+++ b/include/linux/jiffies.h
@@ -1,12 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_JIFFIES_H
#define _LINUX_JIFFIES_H
#include <linux/cache.h>
+#include <linux/limits.h>
#include <linux/math64.h>
-#include <linux/kernel.h>
+#include <linux/minmax.h>
#include <linux/types.h>
#include <linux/time.h>
#include <linux/timex.h>
+#include <vdso/jiffies.h>
#include <asm/param.h> /* for HZ */
#include <generated/timeconst.h>
@@ -58,11 +61,11 @@
extern int register_refined_jiffies(long clock_tick_rate);
-/* TICK_NSEC is the time between ticks in nsec assuming SHIFTED_HZ */
-#define TICK_NSEC ((NSEC_PER_SEC+HZ/2)/HZ)
+/* TICK_USEC is the time between ticks in usec assuming SHIFTED_HZ */
+#define TICK_USEC ((USEC_PER_SEC + HZ/2) / HZ)
-/* TICK_USEC is the time between ticks in usec assuming fake USER_HZ */
-#define TICK_USEC ((1000000UL + USER_HZ/2) / USER_HZ)
+/* USER_TICK_USEC is the time between ticks in usec assuming fake USER_HZ */
+#define USER_TICK_USEC ((1000000UL + USER_HZ/2) / USER_HZ)
#ifndef __jiffy_arch_data
#define __jiffy_arch_data
@@ -293,6 +296,7 @@ static inline u64 jiffies_to_nsecs(const unsigned long j)
}
extern u64 jiffies64_to_nsecs(u64 j);
+extern u64 jiffies64_to_msecs(u64 j);
extern unsigned long __msecs_to_jiffies(const unsigned int m);
#if HZ <= MSEC_PER_SEC && !(MSEC_PER_SEC % HZ)
@@ -417,32 +421,17 @@ static __always_inline unsigned long usecs_to_jiffies(const unsigned int u)
extern unsigned long timespec64_to_jiffies(const struct timespec64 *value);
extern void jiffies_to_timespec64(const unsigned long jiffies,
struct timespec64 *value);
-static inline unsigned long timespec_to_jiffies(const struct timespec *value)
-{
- struct timespec64 ts = timespec_to_timespec64(*value);
-
- return timespec64_to_jiffies(&ts);
-}
-
-static inline void jiffies_to_timespec(const unsigned long jiffies,
- struct timespec *value)
-{
- struct timespec64 ts;
-
- jiffies_to_timespec64(jiffies, &ts);
- *value = timespec64_to_timespec(ts);
-}
-
-extern unsigned long timeval_to_jiffies(const struct timeval *value);
-extern void jiffies_to_timeval(const unsigned long jiffies,
- struct timeval *value);
-
extern clock_t jiffies_to_clock_t(unsigned long x);
static inline clock_t jiffies_delta_to_clock_t(long delta)
{
return jiffies_to_clock_t(max(0L, delta));
}
+static inline unsigned int jiffies_delta_to_msecs(long delta)
+{
+ return jiffies_to_msecs(max(0L, delta));
+}
+
extern unsigned long clock_t_to_jiffies(unsigned long x);
extern u64 jiffies_64_to_clock_t(u64 x);
extern u64 nsec_to_clock_t(u64 x);
diff --git a/include/linux/journal-head.h b/include/linux/journal-head.h
index 98cd41bb39c8..75bc56109031 100644
--- a/include/linux/journal-head.h
+++ b/include/linux/journal-head.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* include/linux/journal-head.h
*
@@ -10,6 +11,8 @@
#ifndef JOURNAL_HEAD_H_INCLUDED
#define JOURNAL_HEAD_H_INCLUDED
+#include <linux/spinlock.h>
+
typedef unsigned int tid_t; /* Unique transaction ID */
typedef struct transaction_s transaction_t; /* Compound transaction type */
@@ -23,13 +26,18 @@ struct journal_head {
struct buffer_head *b_bh;
/*
+ * Protect the buffer head state
+ */
+ spinlock_t b_state_lock;
+
+ /*
* Reference count - see description in journal.c
* [jbd_lock_bh_journal_head()]
*/
int b_jcount;
/*
- * Journalling list for this buffer [jbd_lock_bh_state()]
+ * Journalling list for this buffer [b_state_lock]
* NOTE: We *cannot* combine this with b_modified into a bitfield
* as gcc would then (which the C standard allows but which is
* very unuseful) make 64-bit accesses to the bitfield and clobber
@@ -40,20 +48,20 @@ struct journal_head {
/*
* This flag signals the buffer has been modified by
* the currently running transaction
- * [jbd_lock_bh_state()]
+ * [b_state_lock]
*/
unsigned b_modified;
/*
* Copy of the buffer data frozen for writing to the log.
- * [jbd_lock_bh_state()]
+ * [b_state_lock]
*/
char *b_frozen_data;
/*
* Pointer to a saved copy of the buffer containing no uncommitted
* deallocation references, so that allocations can avoid overwriting
- * uncommitted deletes. [jbd_lock_bh_state()]
+ * uncommitted deletes. [b_state_lock]
*/
char *b_committed_data;
@@ -62,7 +70,7 @@ struct journal_head {
* metadata: either the running transaction or the committing
* transaction (if there is one). Only applies to buffers on a
* transaction's data or metadata journaling list.
- * [j_list_lock] [jbd_lock_bh_state()]
+ * [j_list_lock] [b_state_lock]
* Either of these locks is enough for reading, both are needed for
* changes.
*/
@@ -72,13 +80,13 @@ struct journal_head {
* Pointer to the running compound transaction which is currently
* modifying the buffer's metadata, if there was already a transaction
* committing it when the new transaction touched it.
- * [t_list_lock] [jbd_lock_bh_state()]
+ * [t_list_lock] [b_state_lock]
*/
transaction_t *b_next_transaction;
/*
* Doubly-linked list of buffers on a transaction's data, metadata or
- * forget queue. [t_list_lock] [jbd_lock_bh_state()]
+ * forget queue. [t_list_lock] [b_state_lock]
*/
struct journal_head *b_tnext, *b_tprev;
diff --git a/include/linux/joystick.h b/include/linux/joystick.h
index cbf2aa9e93b9..41b833b012f5 100644
--- a/include/linux/joystick.h
+++ b/include/linux/joystick.h
@@ -1,26 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Copyright (C) 1996-2000 Vojtech Pavlik
*
* Sponsored by SuSE
*/
/*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- * Should you need to contact me, the author, you can do so either by
- * e-mail - mail your message to <vojtech@suse.cz>, or by paper mail:
- * Vojtech Pavlik, Ucitelska 1576, Prague 8, 182 00 Czech Republic
*/
#ifndef _LINUX_JOYSTICK_H
#define _LINUX_JOYSTICK_H
diff --git a/include/linux/jump_label.h b/include/linux/jump_label.h
index 2afd74b9d844..4e968ebadce6 100644
--- a/include/linux/jump_label.h
+++ b/include/linux/jump_label.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_JUMP_LABEL_H
#define _LINUX_JUMP_LABEL_H
@@ -67,13 +68,9 @@
* Lacking toolchain and or architecture support, static keys fall back to a
* simple conditional branch.
*
- * Additional babbling in: Documentation/static-keys.txt
+ * Additional babbling in: Documentation/staging/static-keys.rst
*/
-#if defined(CC_HAVE_ASM_GOTO) && defined(CONFIG_JUMP_LABEL)
-# define HAVE_JUMP_LABEL
-#endif
-
#ifndef __ASSEMBLY__
#include <linux/types.h>
@@ -81,14 +78,13 @@
extern bool static_key_initialized;
-#define STATIC_KEY_CHECK_USE() WARN(!static_key_initialized, \
- "%s used before call to jump_label_init", \
- __func__)
-
-#ifdef HAVE_JUMP_LABEL
+#define STATIC_KEY_CHECK_USE(key) WARN(!static_key_initialized, \
+ "%s(): static key '%pS' used before call to jump_label_init()", \
+ __func__, (key))
struct static_key {
atomic_t enabled;
+#ifdef CONFIG_JUMP_LABEL
/*
* Note:
* To make anonymous unions work with old compilers, the static
@@ -107,17 +103,87 @@ struct static_key {
struct jump_entry *entries;
struct static_key_mod *next;
};
+#endif /* CONFIG_JUMP_LABEL */
};
-#else
-struct static_key {
- atomic_t enabled;
-};
-#endif /* HAVE_JUMP_LABEL */
#endif /* __ASSEMBLY__ */
-#ifdef HAVE_JUMP_LABEL
+#ifdef CONFIG_JUMP_LABEL
#include <asm/jump_label.h>
+
+#ifndef __ASSEMBLY__
+#ifdef CONFIG_HAVE_ARCH_JUMP_LABEL_RELATIVE
+
+struct jump_entry {
+ s32 code;
+ s32 target;
+ long key; // key may be far away from the core kernel under KASLR
+};
+
+static inline unsigned long jump_entry_code(const struct jump_entry *entry)
+{
+ return (unsigned long)&entry->code + entry->code;
+}
+
+static inline unsigned long jump_entry_target(const struct jump_entry *entry)
+{
+ return (unsigned long)&entry->target + entry->target;
+}
+
+static inline struct static_key *jump_entry_key(const struct jump_entry *entry)
+{
+ long offset = entry->key & ~3L;
+
+ return (struct static_key *)((unsigned long)&entry->key + offset);
+}
+
+#else
+
+static inline unsigned long jump_entry_code(const struct jump_entry *entry)
+{
+ return entry->code;
+}
+
+static inline unsigned long jump_entry_target(const struct jump_entry *entry)
+{
+ return entry->target;
+}
+
+static inline struct static_key *jump_entry_key(const struct jump_entry *entry)
+{
+ return (struct static_key *)((unsigned long)entry->key & ~3UL);
+}
+
+#endif
+
+static inline bool jump_entry_is_branch(const struct jump_entry *entry)
+{
+ return (unsigned long)entry->key & 1UL;
+}
+
+static inline bool jump_entry_is_init(const struct jump_entry *entry)
+{
+ return (unsigned long)entry->key & 2UL;
+}
+
+static inline void jump_entry_set_init(struct jump_entry *entry, bool set)
+{
+ if (set)
+ entry->key |= 2;
+ else
+ entry->key &= ~2;
+}
+
+static inline int jump_entry_size(struct jump_entry *entry)
+{
+#ifdef JUMP_LABEL_NOP_SIZE
+ return JUMP_LABEL_NOP_SIZE;
+#else
+ return arch_jump_entry_size(entry);
+#endif
+}
+
+#endif
#endif
#ifndef __ASSEMBLY__
@@ -129,7 +195,7 @@ enum jump_label_type {
struct module;
-#ifdef HAVE_JUMP_LABEL
+#ifdef CONFIG_JUMP_LABEL
#define JUMP_TYPE_FALSE 0UL
#define JUMP_TYPE_TRUE 1UL
@@ -154,15 +220,21 @@ extern void jump_label_lock(void);
extern void jump_label_unlock(void);
extern void arch_jump_label_transform(struct jump_entry *entry,
enum jump_label_type type);
-extern void arch_jump_label_transform_static(struct jump_entry *entry,
- enum jump_label_type type);
+extern bool arch_jump_label_transform_queue(struct jump_entry *entry,
+ enum jump_label_type type);
+extern void arch_jump_label_transform_apply(void);
extern int jump_label_text_reserved(void *start, void *end);
-extern void static_key_slow_inc(struct static_key *key);
+extern bool static_key_slow_inc(struct static_key *key);
+extern bool static_key_fast_inc_not_disabled(struct static_key *key);
extern void static_key_slow_dec(struct static_key *key);
-extern void jump_label_apply_nops(struct module *mod);
+extern bool static_key_slow_inc_cpuslocked(struct static_key *key);
+extern void static_key_slow_dec_cpuslocked(struct static_key *key);
extern int static_key_count(struct static_key *key);
extern void static_key_enable(struct static_key *key);
extern void static_key_disable(struct static_key *key);
+extern void static_key_enable_cpuslocked(struct static_key *key);
+extern void static_key_disable_cpuslocked(struct static_key *key);
+extern enum jump_label_type jump_label_init_type(struct jump_entry *entry);
/*
* We should be using ATOMIC_INIT() for initializing .enabled, but
@@ -173,19 +245,19 @@ extern void static_key_disable(struct static_key *key);
*/
#define STATIC_KEY_INIT_TRUE \
{ .enabled = { 1 }, \
- { .entries = (void *)JUMP_TYPE_TRUE } }
+ { .type = JUMP_TYPE_TRUE } }
#define STATIC_KEY_INIT_FALSE \
{ .enabled = { 0 }, \
- { .entries = (void *)JUMP_TYPE_FALSE } }
+ { .type = JUMP_TYPE_FALSE } }
-#else /* !HAVE_JUMP_LABEL */
+#else /* !CONFIG_JUMP_LABEL */
#include <linux/atomic.h>
#include <linux/bug.h>
-static inline int static_key_count(struct static_key *key)
+static __always_inline int static_key_count(struct static_key *key)
{
- return atomic_read(&key->enabled);
+ return arch_atomic_read(&key->enabled);
}
static __always_inline void jump_label_init(void)
@@ -195,30 +267,45 @@ static __always_inline void jump_label_init(void)
static __always_inline bool static_key_false(struct static_key *key)
{
- if (unlikely(static_key_count(key) > 0))
+ if (unlikely_notrace(static_key_count(key) > 0))
return true;
return false;
}
static __always_inline bool static_key_true(struct static_key *key)
{
- if (likely(static_key_count(key) > 0))
+ if (likely_notrace(static_key_count(key) > 0))
return true;
return false;
}
-static inline void static_key_slow_inc(struct static_key *key)
+static inline bool static_key_fast_inc_not_disabled(struct static_key *key)
{
- STATIC_KEY_CHECK_USE();
- atomic_inc(&key->enabled);
+ int v;
+
+ STATIC_KEY_CHECK_USE(key);
+ /*
+ * Prevent key->enabled getting negative to follow the same semantics
+ * as for CONFIG_JUMP_LABEL=y, see kernel/jump_label.c comment.
+ */
+ v = atomic_read(&key->enabled);
+ do {
+ if (v < 0 || (v + 1) < 0)
+ return false;
+ } while (!likely(atomic_try_cmpxchg(&key->enabled, &v, v + 1)));
+ return true;
}
+#define static_key_slow_inc(key) static_key_fast_inc_not_disabled(key)
static inline void static_key_slow_dec(struct static_key *key)
{
- STATIC_KEY_CHECK_USE();
+ STATIC_KEY_CHECK_USE(key);
atomic_dec(&key->enabled);
}
+#define static_key_slow_inc_cpuslocked(key) static_key_slow_inc(key)
+#define static_key_slow_dec_cpuslocked(key) static_key_slow_dec(key)
+
static inline int jump_label_text_reserved(void *start, void *end)
{
return 0;
@@ -227,35 +314,35 @@ static inline int jump_label_text_reserved(void *start, void *end)
static inline void jump_label_lock(void) {}
static inline void jump_label_unlock(void) {}
-static inline int jump_label_apply_nops(struct module *mod)
-{
- return 0;
-}
-
static inline void static_key_enable(struct static_key *key)
{
- int count = static_key_count(key);
-
- WARN_ON_ONCE(count < 0 || count > 1);
+ STATIC_KEY_CHECK_USE(key);
- if (!count)
- static_key_slow_inc(key);
+ if (atomic_read(&key->enabled) != 0) {
+ WARN_ON_ONCE(atomic_read(&key->enabled) != 1);
+ return;
+ }
+ atomic_set(&key->enabled, 1);
}
static inline void static_key_disable(struct static_key *key)
{
- int count = static_key_count(key);
-
- WARN_ON_ONCE(count < 0 || count > 1);
+ STATIC_KEY_CHECK_USE(key);
- if (count)
- static_key_slow_dec(key);
+ if (atomic_read(&key->enabled) != 1) {
+ WARN_ON_ONCE(atomic_read(&key->enabled) != 0);
+ return;
+ }
+ atomic_set(&key->enabled, 0);
}
+#define static_key_enable_cpuslocked(k) static_key_enable((k))
+#define static_key_disable_cpuslocked(k) static_key_disable((k))
+
#define STATIC_KEY_INIT_TRUE { .enabled = ATOMIC_INIT(1) }
#define STATIC_KEY_INIT_FALSE { .enabled = ATOMIC_INIT(0) }
-#endif /* HAVE_JUMP_LABEL */
+#endif /* CONFIG_JUMP_LABEL */
#define STATIC_KEY_INIT STATIC_KEY_INIT_FALSE
#define jump_label_enabled static_key_enabled
@@ -283,12 +370,18 @@ struct static_key_false {
#define DEFINE_STATIC_KEY_TRUE(name) \
struct static_key_true name = STATIC_KEY_TRUE_INIT
+#define DEFINE_STATIC_KEY_TRUE_RO(name) \
+ struct static_key_true name __ro_after_init = STATIC_KEY_TRUE_INIT
+
#define DECLARE_STATIC_KEY_TRUE(name) \
extern struct static_key_true name
#define DEFINE_STATIC_KEY_FALSE(name) \
struct static_key_false name = STATIC_KEY_FALSE_INIT
+#define DEFINE_STATIC_KEY_FALSE_RO(name) \
+ struct static_key_false name __ro_after_init = STATIC_KEY_FALSE_INIT
+
#define DECLARE_STATIC_KEY_FALSE(name) \
extern struct static_key_false name
@@ -302,6 +395,21 @@ struct static_key_false {
[0 ... (count) - 1] = STATIC_KEY_FALSE_INIT, \
}
+#define _DEFINE_STATIC_KEY_1(name) DEFINE_STATIC_KEY_TRUE(name)
+#define _DEFINE_STATIC_KEY_0(name) DEFINE_STATIC_KEY_FALSE(name)
+#define DEFINE_STATIC_KEY_MAYBE(cfg, name) \
+ __PASTE(_DEFINE_STATIC_KEY_, IS_ENABLED(cfg))(name)
+
+#define _DEFINE_STATIC_KEY_RO_1(name) DEFINE_STATIC_KEY_TRUE_RO(name)
+#define _DEFINE_STATIC_KEY_RO_0(name) DEFINE_STATIC_KEY_FALSE_RO(name)
+#define DEFINE_STATIC_KEY_MAYBE_RO(cfg, name) \
+ __PASTE(_DEFINE_STATIC_KEY_RO_, IS_ENABLED(cfg))(name)
+
+#define _DECLARE_STATIC_KEY_1(name) DECLARE_STATIC_KEY_TRUE(name)
+#define _DECLARE_STATIC_KEY_0(name) DECLARE_STATIC_KEY_FALSE(name)
+#define DECLARE_STATIC_KEY_MAYBE(cfg, name) \
+ __PASTE(_DECLARE_STATIC_KEY_, IS_ENABLED(cfg))(name)
+
extern bool ____wrong_branch_error(void);
#define static_key_enabled(x) \
@@ -313,7 +421,7 @@ extern bool ____wrong_branch_error(void);
static_key_count((struct static_key *)x) > 0; \
})
-#ifdef HAVE_JUMP_LABEL
+#ifdef CONFIG_JUMP_LABEL
/*
* Combine the right initial value (type) with the right branch order
@@ -380,7 +488,7 @@ extern bool ____wrong_branch_error(void);
branch = !arch_static_branch_jump(&(x)->key, true); \
else \
branch = ____wrong_branch_error(); \
- branch; \
+ likely_notrace(branch); \
})
#define static_branch_unlikely(x) \
@@ -392,15 +500,19 @@ extern bool ____wrong_branch_error(void);
branch = arch_static_branch(&(x)->key, false); \
else \
branch = ____wrong_branch_error(); \
- branch; \
+ unlikely_notrace(branch); \
})
-#else /* !HAVE_JUMP_LABEL */
+#else /* !CONFIG_JUMP_LABEL */
+
+#define static_branch_likely(x) likely_notrace(static_key_enabled(&(x)->key))
+#define static_branch_unlikely(x) unlikely_notrace(static_key_enabled(&(x)->key))
-#define static_branch_likely(x) likely(static_key_enabled(&(x)->key))
-#define static_branch_unlikely(x) unlikely(static_key_enabled(&(x)->key))
+#endif /* CONFIG_JUMP_LABEL */
-#endif /* HAVE_JUMP_LABEL */
+#define static_branch_maybe(config, x) \
+ (IS_ENABLED(config) ? static_branch_likely(x) \
+ : static_branch_unlikely(x))
/*
* Advanced usage; refcount, branch is enabled when: count != 0
@@ -408,13 +520,17 @@ extern bool ____wrong_branch_error(void);
#define static_branch_inc(x) static_key_slow_inc(&(x)->key)
#define static_branch_dec(x) static_key_slow_dec(&(x)->key)
+#define static_branch_inc_cpuslocked(x) static_key_slow_inc_cpuslocked(&(x)->key)
+#define static_branch_dec_cpuslocked(x) static_key_slow_dec_cpuslocked(&(x)->key)
/*
* Normal usage; boolean enable/disable.
*/
-#define static_branch_enable(x) static_key_enable(&(x)->key)
-#define static_branch_disable(x) static_key_disable(&(x)->key)
+#define static_branch_enable(x) static_key_enable(&(x)->key)
+#define static_branch_disable(x) static_key_disable(&(x)->key)
+#define static_branch_enable_cpuslocked(x) static_key_enable_cpuslocked(&(x)->key)
+#define static_branch_disable_cpuslocked(x) static_key_disable_cpuslocked(&(x)->key)
#endif /* __ASSEMBLY__ */
diff --git a/include/linux/jump_label_ratelimit.h b/include/linux/jump_label_ratelimit.h
index 23da3af459fe..8c3ee291b2d8 100644
--- a/include/linux/jump_label_ratelimit.h
+++ b/include/linux/jump_label_ratelimit.h
@@ -1,41 +1,99 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_JUMP_LABEL_RATELIMIT_H
#define _LINUX_JUMP_LABEL_RATELIMIT_H
#include <linux/jump_label.h>
#include <linux/workqueue.h>
-#if defined(CC_HAVE_ASM_GOTO) && defined(CONFIG_JUMP_LABEL)
+#if defined(CONFIG_JUMP_LABEL)
struct static_key_deferred {
struct static_key key;
unsigned long timeout;
struct delayed_work work;
};
-#endif
-#ifdef HAVE_JUMP_LABEL
-extern void static_key_slow_dec_deferred(struct static_key_deferred *key);
-extern void static_key_deferred_flush(struct static_key_deferred *key);
+struct static_key_true_deferred {
+ struct static_key_true key;
+ unsigned long timeout;
+ struct delayed_work work;
+};
+
+struct static_key_false_deferred {
+ struct static_key_false key;
+ unsigned long timeout;
+ struct delayed_work work;
+};
+
+#define static_key_slow_dec_deferred(x) \
+ __static_key_slow_dec_deferred(&(x)->key, &(x)->work, (x)->timeout)
+#define static_branch_slow_dec_deferred(x) \
+ __static_key_slow_dec_deferred(&(x)->key.key, &(x)->work, (x)->timeout)
+
+#define static_key_deferred_flush(x) \
+ __static_key_deferred_flush((x), &(x)->work)
+
+extern void
+__static_key_slow_dec_deferred(struct static_key *key,
+ struct delayed_work *work,
+ unsigned long timeout);
+extern void __static_key_deferred_flush(void *key, struct delayed_work *work);
extern void
jump_label_rate_limit(struct static_key_deferred *key, unsigned long rl);
-#else /* !HAVE_JUMP_LABEL */
+extern void jump_label_update_timeout(struct work_struct *work);
+
+#define DEFINE_STATIC_KEY_DEFERRED_TRUE(name, rl) \
+ struct static_key_true_deferred name = { \
+ .key = { STATIC_KEY_INIT_TRUE }, \
+ .timeout = (rl), \
+ .work = __DELAYED_WORK_INITIALIZER((name).work, \
+ jump_label_update_timeout, \
+ 0), \
+ }
+
+#define DEFINE_STATIC_KEY_DEFERRED_FALSE(name, rl) \
+ struct static_key_false_deferred name = { \
+ .key = { STATIC_KEY_INIT_FALSE }, \
+ .timeout = (rl), \
+ .work = __DELAYED_WORK_INITIALIZER((name).work, \
+ jump_label_update_timeout, \
+ 0), \
+ }
+
+#else /* !CONFIG_JUMP_LABEL */
struct static_key_deferred {
struct static_key key;
};
+struct static_key_true_deferred {
+ struct static_key_true key;
+};
+struct static_key_false_deferred {
+ struct static_key_false key;
+};
+#define DEFINE_STATIC_KEY_DEFERRED_TRUE(name, rl) \
+ struct static_key_true_deferred name = { STATIC_KEY_TRUE_INIT }
+#define DEFINE_STATIC_KEY_DEFERRED_FALSE(name, rl) \
+ struct static_key_false_deferred name = { STATIC_KEY_FALSE_INIT }
+
+#define static_branch_slow_dec_deferred(x) static_branch_dec(&(x)->key)
+
static inline void static_key_slow_dec_deferred(struct static_key_deferred *key)
{
- STATIC_KEY_CHECK_USE();
+ STATIC_KEY_CHECK_USE(key);
static_key_slow_dec(&key->key);
}
-static inline void static_key_deferred_flush(struct static_key_deferred *key)
+static inline void static_key_deferred_flush(void *key)
{
- STATIC_KEY_CHECK_USE();
+ STATIC_KEY_CHECK_USE(key);
}
static inline void
jump_label_rate_limit(struct static_key_deferred *key,
unsigned long rl)
{
- STATIC_KEY_CHECK_USE();
+ STATIC_KEY_CHECK_USE(key);
}
-#endif /* HAVE_JUMP_LABEL */
+#endif /* CONFIG_JUMP_LABEL */
+
+#define static_branch_deferred_inc(x) static_branch_inc(&(x)->key)
+
#endif /* _LINUX_JUMP_LABEL_RATELIMIT_H */
diff --git a/include/linux/jz4740-adc.h b/include/linux/jz4740-adc.h
index 8184578fbfa4..19d995c8bf06 100644
--- a/include/linux/jz4740-adc.h
+++ b/include/linux/jz4740-adc.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __LINUX_JZ4740_ADC
#define __LINUX_JZ4740_ADC
diff --git a/include/linux/jz4780-nemc.h b/include/linux/jz4780-nemc.h
index e7f1cc7a2284..bd7fad910242 100644
--- a/include/linux/jz4780-nemc.h
+++ b/include/linux/jz4780-nemc.h
@@ -1,13 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* JZ4780 NAND/external memory controller (NEMC)
*
* Copyright (c) 2015 Imagination Technologies
* Author: Alex Smith <alex@alex-smith.me.uk>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
*/
#ifndef __LINUX_JZ4780_NEMC_H__
diff --git a/include/linux/kallsyms.h b/include/linux/kallsyms.h
index 6883e197acb9..fe3c9993b5bf 100644
--- a/include/linux/kallsyms.h
+++ b/include/linux/kallsyms.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/* Rewritten and vastly simplified by Rusty Russell for in-kernel
* module loader:
* Copyright 2002 Rusty Russell <rusty@rustcorp.com.au> IBM Corporation
@@ -6,24 +7,74 @@
#define _LINUX_KALLSYMS_H
#include <linux/errno.h>
+#include <linux/buildid.h>
#include <linux/kernel.h>
#include <linux/stddef.h>
+#include <linux/mm.h>
+#include <linux/module.h>
-#define KSYM_NAME_LEN 128
-#define KSYM_SYMBOL_LEN (sizeof("%s+%#lx/%#lx [%s]") + (KSYM_NAME_LEN - 1) + \
- 2*(BITS_PER_LONG*3/10) + (MODULE_NAME_LEN - 1) + 1)
+#include <asm/sections.h>
+#define KSYM_NAME_LEN 512
+#define KSYM_SYMBOL_LEN (sizeof("%s+%#lx/%#lx [%s %s]") + \
+ (KSYM_NAME_LEN - 1) + \
+ 2*(BITS_PER_LONG*3/10) + (MODULE_NAME_LEN - 1) + \
+ (BUILD_ID_SIZE_MAX * 2) + 1)
+
+struct cred;
struct module;
+static inline int is_kernel_text(unsigned long addr)
+{
+ if (__is_kernel_text(addr))
+ return 1;
+ return in_gate_area_no_mm(addr);
+}
+
+static inline int is_kernel(unsigned long addr)
+{
+ if (__is_kernel(addr))
+ return 1;
+ return in_gate_area_no_mm(addr);
+}
+
+static inline int is_ksym_addr(unsigned long addr)
+{
+ if (IS_ENABLED(CONFIG_KALLSYMS_ALL))
+ return is_kernel(addr);
+
+ return is_kernel_text(addr) || is_kernel_inittext(addr);
+}
+
+static inline void *dereference_symbol_descriptor(void *ptr)
+{
+#ifdef CONFIG_HAVE_FUNCTION_DESCRIPTORS
+ struct module *mod;
+
+ ptr = dereference_kernel_function_descriptor(ptr);
+ if (is_ksym_addr((unsigned long)ptr))
+ return ptr;
+
+ preempt_disable();
+ mod = __module_address((unsigned long)ptr);
+ preempt_enable();
+
+ if (mod)
+ ptr = dereference_module_function_descriptor(mod, ptr);
+#endif
+ return ptr;
+}
+
#ifdef CONFIG_KALLSYMS
+unsigned long kallsyms_sym_address(int idx);
+int kallsyms_on_each_symbol(int (*fn)(void *, const char *, unsigned long),
+ void *data);
+int kallsyms_on_each_match_symbol(int (*fn)(void *, unsigned long),
+ const char *name, void *data);
+
/* Lookup the address for a symbol. Returns 0 if not found. */
unsigned long kallsyms_lookup_name(const char *name);
-/* Call a function on each kallsyms symbol in the core kernel */
-int kallsyms_on_each_symbol(int (*fn)(void *, const char *, struct module *,
- unsigned long),
- void *data);
-
extern int kallsyms_lookup_size_offset(unsigned long addr,
unsigned long *symbolsize,
unsigned long *offset);
@@ -36,15 +87,17 @@ const char *kallsyms_lookup(unsigned long addr,
/* Look up a kernel symbol and return it in a text buffer. */
extern int sprint_symbol(char *buffer, unsigned long address);
+extern int sprint_symbol_build_id(char *buffer, unsigned long address);
extern int sprint_symbol_no_offset(char *buffer, unsigned long address);
extern int sprint_backtrace(char *buffer, unsigned long address);
-
-/* Look up a kernel symbol and print it to the kernel messages. */
-extern void __print_symbol(const char *fmt, unsigned long address);
+extern int sprint_backtrace_build_id(char *buffer, unsigned long address);
int lookup_symbol_name(unsigned long addr, char *symname);
int lookup_symbol_attrs(unsigned long addr, unsigned long *size, unsigned long *offset, char *modname, char *name);
+/* How and when do we show kallsyms values? */
+extern bool kallsyms_show_value(const struct cred *cred);
+
#else /* !CONFIG_KALLSYMS */
static inline unsigned long kallsyms_lookup_name(const char *name)
@@ -52,14 +105,6 @@ static inline unsigned long kallsyms_lookup_name(const char *name)
return 0;
}
-static inline int kallsyms_on_each_symbol(int (*fn)(void *, const char *,
- struct module *,
- unsigned long),
- void *data)
-{
- return 0;
-}
-
static inline int kallsyms_lookup_size_offset(unsigned long addr,
unsigned long *symbolsize,
unsigned long *offset)
@@ -81,6 +126,12 @@ static inline int sprint_symbol(char *buffer, unsigned long addr)
return 0;
}
+static inline int sprint_symbol_build_id(char *buffer, unsigned long address)
+{
+ *buffer = '\0';
+ return 0;
+}
+
static inline int sprint_symbol_no_offset(char *buffer, unsigned long addr)
{
*buffer = '\0';
@@ -93,6 +144,12 @@ static inline int sprint_backtrace(char *buffer, unsigned long addr)
return 0;
}
+static inline int sprint_backtrace_build_id(char *buffer, unsigned long addr)
+{
+ *buffer = '\0';
+ return 0;
+}
+
static inline int lookup_symbol_name(unsigned long addr, char *symname)
{
return -ERANGE;
@@ -103,26 +160,27 @@ static inline int lookup_symbol_attrs(unsigned long addr, unsigned long *size, u
return -ERANGE;
}
-/* Stupid that this does nothing, but I didn't create this mess. */
-#define __print_symbol(fmt, addr)
-#endif /*CONFIG_KALLSYMS*/
+static inline bool kallsyms_show_value(const struct cred *cred)
+{
+ return false;
+}
-/* This macro allows us to keep printk typechecking */
-static __printf(1, 2)
-void __check_printsym_format(const char *fmt, ...)
+static inline int kallsyms_on_each_symbol(int (*fn)(void *, const char *, unsigned long),
+ void *data)
{
+ return -EOPNOTSUPP;
}
-static inline void print_symbol(const char *fmt, unsigned long addr)
+static inline int kallsyms_on_each_match_symbol(int (*fn)(void *, unsigned long),
+ const char *name, void *data)
{
- __check_printsym_format(fmt, "");
- __print_symbol(fmt, (unsigned long)
- __builtin_extract_return_addr((void *)addr));
+ return -EOPNOTSUPP;
}
+#endif /*CONFIG_KALLSYMS*/
-static inline void print_ip_sym(unsigned long ip)
+static inline void print_ip_sym(const char *loglvl, unsigned long ip)
{
- printk("[<%p>] %pS\n", (void *) ip, (void *) ip);
+ printk("%s[<%px>] %pS\n", loglvl, (void *) ip, (void *) ip);
}
#endif /*_LINUX_KALLSYMS_H*/
diff --git a/include/linux/kasan-checks.h b/include/linux/kasan-checks.h
index b7f8aced7870..3d6d22a25bdc 100644
--- a/include/linux/kasan-checks.h
+++ b/include/linux/kasan-checks.h
@@ -1,12 +1,50 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_KASAN_CHECKS_H
#define _LINUX_KASAN_CHECKS_H
-#ifdef CONFIG_KASAN
-void kasan_check_read(const void *p, unsigned int size);
-void kasan_check_write(const void *p, unsigned int size);
+#include <linux/types.h>
+
+/*
+ * The annotations present in this file are only relevant for the software
+ * KASAN modes that rely on compiler instrumentation, and will be optimized
+ * away for the hardware tag-based KASAN mode. Use kasan_check_byte() instead.
+ */
+
+/*
+ * __kasan_check_*: Always available when KASAN is enabled. This may be used
+ * even in compilation units that selectively disable KASAN, but must use KASAN
+ * to validate access to an address. Never use these in header files!
+ */
+#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
+bool __kasan_check_read(const volatile void *p, unsigned int size);
+bool __kasan_check_write(const volatile void *p, unsigned int size);
+#else
+static inline bool __kasan_check_read(const volatile void *p, unsigned int size)
+{
+ return true;
+}
+static inline bool __kasan_check_write(const volatile void *p, unsigned int size)
+{
+ return true;
+}
+#endif
+
+/*
+ * kasan_check_*: Only available when the particular compilation unit has KASAN
+ * instrumentation enabled. May be used in header files.
+ */
+#ifdef __SANITIZE_ADDRESS__
+#define kasan_check_read __kasan_check_read
+#define kasan_check_write __kasan_check_write
#else
-static inline void kasan_check_read(const void *p, unsigned int size) { }
-static inline void kasan_check_write(const void *p, unsigned int size) { }
+static inline bool kasan_check_read(const volatile void *p, unsigned int size)
+{
+ return true;
+}
+static inline bool kasan_check_write(const volatile void *p, unsigned int size)
+{
+ return true;
+}
#endif
#endif
diff --git a/include/linux/kasan-enabled.h b/include/linux/kasan-enabled.h
new file mode 100644
index 000000000000..6f612d69ea0c
--- /dev/null
+++ b/include/linux/kasan-enabled.h
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_KASAN_ENABLED_H
+#define _LINUX_KASAN_ENABLED_H
+
+#include <linux/static_key.h>
+
+#ifdef CONFIG_KASAN_HW_TAGS
+
+DECLARE_STATIC_KEY_FALSE(kasan_flag_enabled);
+
+static __always_inline bool kasan_enabled(void)
+{
+ return static_branch_likely(&kasan_flag_enabled);
+}
+
+static inline bool kasan_hw_tags_enabled(void)
+{
+ return kasan_enabled();
+}
+
+#else /* CONFIG_KASAN_HW_TAGS */
+
+static inline bool kasan_enabled(void)
+{
+ return IS_ENABLED(CONFIG_KASAN);
+}
+
+static inline bool kasan_hw_tags_enabled(void)
+{
+ return false;
+}
+
+#endif /* CONFIG_KASAN_HW_TAGS */
+
+#endif /* LINUX_KASAN_ENABLED_H */
diff --git a/include/linux/kasan-tags.h b/include/linux/kasan-tags.h
new file mode 100644
index 000000000000..4f85f562512c
--- /dev/null
+++ b/include/linux/kasan-tags.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_KASAN_TAGS_H
+#define _LINUX_KASAN_TAGS_H
+
+#define KASAN_TAG_KERNEL 0xFF /* native kernel pointers tag */
+#define KASAN_TAG_INVALID 0xFE /* inaccessible memory tag */
+#define KASAN_TAG_MAX 0xFD /* maximum value for random tags */
+
+#ifdef CONFIG_KASAN_HW_TAGS
+#define KASAN_TAG_MIN 0xF0 /* minimum value for random tags */
+#else
+#define KASAN_TAG_MIN 0x00 /* minimum value for random tags */
+#endif
+
+#endif /* LINUX_KASAN_TAGS_H */
diff --git a/include/linux/kasan.h b/include/linux/kasan.h
index a5c7046f26b4..f7ef70661ce2 100644
--- a/include/linux/kasan.h
+++ b/include/linux/kasan.h
@@ -1,27 +1,57 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_KASAN_H
#define _LINUX_KASAN_H
+#include <linux/bug.h>
+#include <linux/kasan-enabled.h>
+#include <linux/kernel.h>
+#include <linux/static_key.h>
#include <linux/types.h>
struct kmem_cache;
struct page;
+struct slab;
struct vm_struct;
struct task_struct;
#ifdef CONFIG_KASAN
-#define KASAN_SHADOW_SCALE_SHIFT 3
-
+#include <linux/linkage.h>
#include <asm/kasan.h>
-#include <asm/pgtable.h>
-extern unsigned char kasan_zero_page[PAGE_SIZE];
-extern pte_t kasan_zero_pte[PTRS_PER_PTE];
-extern pmd_t kasan_zero_pmd[PTRS_PER_PMD];
-extern pud_t kasan_zero_pud[PTRS_PER_PUD];
-extern p4d_t kasan_zero_p4d[PTRS_PER_P4D];
+#endif
+
+typedef unsigned int __bitwise kasan_vmalloc_flags_t;
+
+#define KASAN_VMALLOC_NONE ((__force kasan_vmalloc_flags_t)0x00u)
+#define KASAN_VMALLOC_INIT ((__force kasan_vmalloc_flags_t)0x01u)
+#define KASAN_VMALLOC_VM_ALLOC ((__force kasan_vmalloc_flags_t)0x02u)
+#define KASAN_VMALLOC_PROT_NORMAL ((__force kasan_vmalloc_flags_t)0x04u)
+
+#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
+
+#include <linux/pgtable.h>
-void kasan_populate_zero_shadow(const void *shadow_start,
+/* Software KASAN implementations use shadow memory. */
+
+#ifdef CONFIG_KASAN_SW_TAGS
+/* This matches KASAN_TAG_INVALID. */
+#define KASAN_SHADOW_INIT 0xFE
+#else
+#define KASAN_SHADOW_INIT 0
+#endif
+
+#ifndef PTE_HWTABLE_PTRS
+#define PTE_HWTABLE_PTRS 0
+#endif
+
+extern unsigned char kasan_early_shadow_page[PAGE_SIZE];
+extern pte_t kasan_early_shadow_pte[MAX_PTRS_PER_PTE + PTE_HWTABLE_PTRS];
+extern pmd_t kasan_early_shadow_pmd[MAX_PTRS_PER_PMD];
+extern pud_t kasan_early_shadow_pud[MAX_PTRS_PER_PUD];
+extern p4d_t kasan_early_shadow_p4d[MAX_PTRS_PER_P4D];
+
+int kasan_populate_early_shadow(const void *shadow_start,
const void *shadow_end);
static inline void *kasan_mem_to_shadow(const void *addr)
@@ -30,103 +60,414 @@ static inline void *kasan_mem_to_shadow(const void *addr)
+ KASAN_SHADOW_OFFSET;
}
+int kasan_add_zero_shadow(void *start, unsigned long size);
+void kasan_remove_zero_shadow(void *start, unsigned long size);
+
/* Enable reporting bugs after kasan_disable_current() */
extern void kasan_enable_current(void);
/* Disable reporting bugs for current task */
extern void kasan_disable_current(void);
-void kasan_unpoison_shadow(const void *address, size_t size);
+#else /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
-void kasan_unpoison_task_stack(struct task_struct *task);
-void kasan_unpoison_stack_above_sp_to(const void *watermark);
+static inline int kasan_add_zero_shadow(void *start, unsigned long size)
+{
+ return 0;
+}
+static inline void kasan_remove_zero_shadow(void *start,
+ unsigned long size)
+{}
-void kasan_alloc_pages(struct page *page, unsigned int order);
-void kasan_free_pages(struct page *page, unsigned int order);
+static inline void kasan_enable_current(void) {}
+static inline void kasan_disable_current(void) {}
-void kasan_cache_create(struct kmem_cache *cache, size_t *size,
- unsigned long *flags);
-void kasan_cache_shrink(struct kmem_cache *cache);
-void kasan_cache_shutdown(struct kmem_cache *cache);
+#endif /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
-void kasan_poison_slab(struct page *page);
-void kasan_unpoison_object_data(struct kmem_cache *cache, void *object);
-void kasan_poison_object_data(struct kmem_cache *cache, void *object);
-void kasan_init_slab_obj(struct kmem_cache *cache, const void *object);
+#ifdef CONFIG_KASAN_HW_TAGS
-void kasan_kmalloc_large(const void *ptr, size_t size, gfp_t flags);
-void kasan_kfree_large(const void *ptr);
-void kasan_poison_kfree(void *ptr);
-void kasan_kmalloc(struct kmem_cache *s, const void *object, size_t size,
- gfp_t flags);
-void kasan_krealloc(const void *object, size_t new_size, gfp_t flags);
+#else /* CONFIG_KASAN_HW_TAGS */
-void kasan_slab_alloc(struct kmem_cache *s, void *object, gfp_t flags);
-bool kasan_slab_free(struct kmem_cache *s, void *object);
+#endif /* CONFIG_KASAN_HW_TAGS */
-struct kasan_cache {
- int alloc_meta_offset;
- int free_meta_offset;
-};
+static inline bool kasan_has_integrated_init(void)
+{
+ return kasan_hw_tags_enabled();
+}
+
+#ifdef CONFIG_KASAN
+void __kasan_unpoison_range(const void *addr, size_t size);
+static __always_inline void kasan_unpoison_range(const void *addr, size_t size)
+{
+ if (kasan_enabled())
+ __kasan_unpoison_range(addr, size);
+}
+
+void __kasan_poison_pages(struct page *page, unsigned int order, bool init);
+static __always_inline void kasan_poison_pages(struct page *page,
+ unsigned int order, bool init)
+{
+ if (kasan_enabled())
+ __kasan_poison_pages(page, order, init);
+}
+
+bool __kasan_unpoison_pages(struct page *page, unsigned int order, bool init);
+static __always_inline bool kasan_unpoison_pages(struct page *page,
+ unsigned int order, bool init)
+{
+ if (kasan_enabled())
+ return __kasan_unpoison_pages(page, order, init);
+ return false;
+}
+
+void __kasan_poison_slab(struct slab *slab);
+static __always_inline void kasan_poison_slab(struct slab *slab)
+{
+ if (kasan_enabled())
+ __kasan_poison_slab(slab);
+}
-int kasan_module_alloc(void *addr, size_t size);
-void kasan_free_shadow(const struct vm_struct *vm);
+void __kasan_unpoison_object_data(struct kmem_cache *cache, void *object);
+static __always_inline void kasan_unpoison_object_data(struct kmem_cache *cache,
+ void *object)
+{
+ if (kasan_enabled())
+ __kasan_unpoison_object_data(cache, object);
+}
+
+void __kasan_poison_object_data(struct kmem_cache *cache, void *object);
+static __always_inline void kasan_poison_object_data(struct kmem_cache *cache,
+ void *object)
+{
+ if (kasan_enabled())
+ __kasan_poison_object_data(cache, object);
+}
-size_t ksize(const void *);
-static inline void kasan_unpoison_slab(const void *ptr) { ksize(ptr); }
-size_t kasan_metadata_size(struct kmem_cache *cache);
+void * __must_check __kasan_init_slab_obj(struct kmem_cache *cache,
+ const void *object);
+static __always_inline void * __must_check kasan_init_slab_obj(
+ struct kmem_cache *cache, const void *object)
+{
+ if (kasan_enabled())
+ return __kasan_init_slab_obj(cache, object);
+ return (void *)object;
+}
-bool kasan_save_enable_multi_shot(void);
-void kasan_restore_multi_shot(bool enabled);
+bool __kasan_slab_free(struct kmem_cache *s, void *object,
+ unsigned long ip, bool init);
+static __always_inline bool kasan_slab_free(struct kmem_cache *s,
+ void *object, bool init)
+{
+ if (kasan_enabled())
+ return __kasan_slab_free(s, object, _RET_IP_, init);
+ return false;
+}
+
+void __kasan_kfree_large(void *ptr, unsigned long ip);
+static __always_inline void kasan_kfree_large(void *ptr)
+{
+ if (kasan_enabled())
+ __kasan_kfree_large(ptr, _RET_IP_);
+}
+
+void __kasan_slab_free_mempool(void *ptr, unsigned long ip);
+static __always_inline void kasan_slab_free_mempool(void *ptr)
+{
+ if (kasan_enabled())
+ __kasan_slab_free_mempool(ptr, _RET_IP_);
+}
+
+void * __must_check __kasan_slab_alloc(struct kmem_cache *s,
+ void *object, gfp_t flags, bool init);
+static __always_inline void * __must_check kasan_slab_alloc(
+ struct kmem_cache *s, void *object, gfp_t flags, bool init)
+{
+ if (kasan_enabled())
+ return __kasan_slab_alloc(s, object, flags, init);
+ return object;
+}
+
+void * __must_check __kasan_kmalloc(struct kmem_cache *s, const void *object,
+ size_t size, gfp_t flags);
+static __always_inline void * __must_check kasan_kmalloc(struct kmem_cache *s,
+ const void *object, size_t size, gfp_t flags)
+{
+ if (kasan_enabled())
+ return __kasan_kmalloc(s, object, size, flags);
+ return (void *)object;
+}
+
+void * __must_check __kasan_kmalloc_large(const void *ptr,
+ size_t size, gfp_t flags);
+static __always_inline void * __must_check kasan_kmalloc_large(const void *ptr,
+ size_t size, gfp_t flags)
+{
+ if (kasan_enabled())
+ return __kasan_kmalloc_large(ptr, size, flags);
+ return (void *)ptr;
+}
+
+void * __must_check __kasan_krealloc(const void *object,
+ size_t new_size, gfp_t flags);
+static __always_inline void * __must_check kasan_krealloc(const void *object,
+ size_t new_size, gfp_t flags)
+{
+ if (kasan_enabled())
+ return __kasan_krealloc(object, new_size, flags);
+ return (void *)object;
+}
+
+/*
+ * Unlike kasan_check_read/write(), kasan_check_byte() is performed even for
+ * the hardware tag-based mode that doesn't rely on compiler instrumentation.
+ */
+bool __kasan_check_byte(const void *addr, unsigned long ip);
+static __always_inline bool kasan_check_byte(const void *addr)
+{
+ if (kasan_enabled())
+ return __kasan_check_byte(addr, _RET_IP_);
+ return true;
+}
#else /* CONFIG_KASAN */
-static inline void kasan_unpoison_shadow(const void *address, size_t size) {}
+static inline void kasan_unpoison_range(const void *address, size_t size) {}
+static inline void kasan_poison_pages(struct page *page, unsigned int order,
+ bool init) {}
+static inline bool kasan_unpoison_pages(struct page *page, unsigned int order,
+ bool init)
+{
+ return false;
+}
+static inline void kasan_poison_slab(struct slab *slab) {}
+static inline void kasan_unpoison_object_data(struct kmem_cache *cache,
+ void *object) {}
+static inline void kasan_poison_object_data(struct kmem_cache *cache,
+ void *object) {}
+static inline void *kasan_init_slab_obj(struct kmem_cache *cache,
+ const void *object)
+{
+ return (void *)object;
+}
+static inline bool kasan_slab_free(struct kmem_cache *s, void *object, bool init)
+{
+ return false;
+}
+static inline void kasan_kfree_large(void *ptr) {}
+static inline void kasan_slab_free_mempool(void *ptr) {}
+static inline void *kasan_slab_alloc(struct kmem_cache *s, void *object,
+ gfp_t flags, bool init)
+{
+ return object;
+}
+static inline void *kasan_kmalloc(struct kmem_cache *s, const void *object,
+ size_t size, gfp_t flags)
+{
+ return (void *)object;
+}
+static inline void *kasan_kmalloc_large(const void *ptr, size_t size, gfp_t flags)
+{
+ return (void *)ptr;
+}
+static inline void *kasan_krealloc(const void *object, size_t new_size,
+ gfp_t flags)
+{
+ return (void *)object;
+}
+static inline bool kasan_check_byte(const void *address)
+{
+ return true;
+}
+
+#endif /* CONFIG_KASAN */
+#if defined(CONFIG_KASAN) && defined(CONFIG_KASAN_STACK)
+void kasan_unpoison_task_stack(struct task_struct *task);
+#else
static inline void kasan_unpoison_task_stack(struct task_struct *task) {}
-static inline void kasan_unpoison_stack_above_sp_to(const void *watermark) {}
+#endif
-static inline void kasan_enable_current(void) {}
-static inline void kasan_disable_current(void) {}
+#ifdef CONFIG_KASAN_GENERIC
+
+struct kasan_cache {
+ int alloc_meta_offset;
+ int free_meta_offset;
+};
+
+size_t kasan_metadata_size(struct kmem_cache *cache, bool in_object);
+slab_flags_t kasan_never_merge(void);
+void kasan_cache_create(struct kmem_cache *cache, unsigned int *size,
+ slab_flags_t *flags);
+
+void kasan_cache_shrink(struct kmem_cache *cache);
+void kasan_cache_shutdown(struct kmem_cache *cache);
+void kasan_record_aux_stack(void *ptr);
+void kasan_record_aux_stack_noalloc(void *ptr);
-static inline void kasan_alloc_pages(struct page *page, unsigned int order) {}
-static inline void kasan_free_pages(struct page *page, unsigned int order) {}
+#else /* CONFIG_KASAN_GENERIC */
+/* Tag-based KASAN modes do not use per-object metadata. */
+static inline size_t kasan_metadata_size(struct kmem_cache *cache,
+ bool in_object)
+{
+ return 0;
+}
+/* And thus nothing prevents cache merging. */
+static inline slab_flags_t kasan_never_merge(void)
+{
+ return 0;
+}
+/* And no cache-related metadata initialization is required. */
static inline void kasan_cache_create(struct kmem_cache *cache,
- size_t *size,
- unsigned long *flags) {}
+ unsigned int *size,
+ slab_flags_t *flags) {}
+
static inline void kasan_cache_shrink(struct kmem_cache *cache) {}
static inline void kasan_cache_shutdown(struct kmem_cache *cache) {}
+static inline void kasan_record_aux_stack(void *ptr) {}
+static inline void kasan_record_aux_stack_noalloc(void *ptr) {}
-static inline void kasan_poison_slab(struct page *page) {}
-static inline void kasan_unpoison_object_data(struct kmem_cache *cache,
- void *object) {}
-static inline void kasan_poison_object_data(struct kmem_cache *cache,
- void *object) {}
-static inline void kasan_init_slab_obj(struct kmem_cache *cache,
- const void *object) {}
+#endif /* CONFIG_KASAN_GENERIC */
-static inline void kasan_kmalloc_large(void *ptr, size_t size, gfp_t flags) {}
-static inline void kasan_kfree_large(const void *ptr) {}
-static inline void kasan_poison_kfree(void *ptr) {}
-static inline void kasan_kmalloc(struct kmem_cache *s, const void *object,
- size_t size, gfp_t flags) {}
-static inline void kasan_krealloc(const void *object, size_t new_size,
- gfp_t flags) {}
+#if defined(CONFIG_KASAN_SW_TAGS) || defined(CONFIG_KASAN_HW_TAGS)
-static inline void kasan_slab_alloc(struct kmem_cache *s, void *object,
- gfp_t flags) {}
-static inline bool kasan_slab_free(struct kmem_cache *s, void *object)
+static inline void *kasan_reset_tag(const void *addr)
{
- return false;
+ return (void *)arch_kasan_reset_tag(addr);
}
-static inline int kasan_module_alloc(void *addr, size_t size) { return 0; }
-static inline void kasan_free_shadow(const struct vm_struct *vm) {}
+/**
+ * kasan_report - print a report about a bad memory access detected by KASAN
+ * @addr: address of the bad access
+ * @size: size of the bad access
+ * @is_write: whether the bad access is a write or a read
+ * @ip: instruction pointer for the accessibility check or the bad access itself
+ */
+bool kasan_report(unsigned long addr, size_t size,
+ bool is_write, unsigned long ip);
-static inline void kasan_unpoison_slab(const void *ptr) { }
-static inline size_t kasan_metadata_size(struct kmem_cache *cache) { return 0; }
+#else /* CONFIG_KASAN_SW_TAGS || CONFIG_KASAN_HW_TAGS */
-#endif /* CONFIG_KASAN */
+static inline void *kasan_reset_tag(const void *addr)
+{
+ return (void *)addr;
+}
+
+#endif /* CONFIG_KASAN_SW_TAGS || CONFIG_KASAN_HW_TAGS*/
+
+#ifdef CONFIG_KASAN_HW_TAGS
+
+void kasan_report_async(void);
+
+#endif /* CONFIG_KASAN_HW_TAGS */
+
+#ifdef CONFIG_KASAN_SW_TAGS
+void __init kasan_init_sw_tags(void);
+#else
+static inline void kasan_init_sw_tags(void) { }
+#endif
+
+#ifdef CONFIG_KASAN_HW_TAGS
+void kasan_init_hw_tags_cpu(void);
+void __init kasan_init_hw_tags(void);
+#else
+static inline void kasan_init_hw_tags_cpu(void) { }
+static inline void kasan_init_hw_tags(void) { }
+#endif
+
+#ifdef CONFIG_KASAN_VMALLOC
+
+#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
+
+void kasan_populate_early_vm_area_shadow(void *start, unsigned long size);
+int kasan_populate_vmalloc(unsigned long addr, unsigned long size);
+void kasan_release_vmalloc(unsigned long start, unsigned long end,
+ unsigned long free_region_start,
+ unsigned long free_region_end);
+
+#else /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
+
+static inline void kasan_populate_early_vm_area_shadow(void *start,
+ unsigned long size)
+{ }
+static inline int kasan_populate_vmalloc(unsigned long start,
+ unsigned long size)
+{
+ return 0;
+}
+static inline void kasan_release_vmalloc(unsigned long start,
+ unsigned long end,
+ unsigned long free_region_start,
+ unsigned long free_region_end) { }
+
+#endif /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
+
+void *__kasan_unpoison_vmalloc(const void *start, unsigned long size,
+ kasan_vmalloc_flags_t flags);
+static __always_inline void *kasan_unpoison_vmalloc(const void *start,
+ unsigned long size,
+ kasan_vmalloc_flags_t flags)
+{
+ if (kasan_enabled())
+ return __kasan_unpoison_vmalloc(start, size, flags);
+ return (void *)start;
+}
+
+void __kasan_poison_vmalloc(const void *start, unsigned long size);
+static __always_inline void kasan_poison_vmalloc(const void *start,
+ unsigned long size)
+{
+ if (kasan_enabled())
+ __kasan_poison_vmalloc(start, size);
+}
+
+#else /* CONFIG_KASAN_VMALLOC */
+
+static inline void kasan_populate_early_vm_area_shadow(void *start,
+ unsigned long size) { }
+static inline int kasan_populate_vmalloc(unsigned long start,
+ unsigned long size)
+{
+ return 0;
+}
+static inline void kasan_release_vmalloc(unsigned long start,
+ unsigned long end,
+ unsigned long free_region_start,
+ unsigned long free_region_end) { }
+
+static inline void *kasan_unpoison_vmalloc(const void *start,
+ unsigned long size,
+ kasan_vmalloc_flags_t flags)
+{
+ return (void *)start;
+}
+static inline void kasan_poison_vmalloc(const void *start, unsigned long size)
+{ }
+
+#endif /* CONFIG_KASAN_VMALLOC */
+
+#if (defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)) && \
+ !defined(CONFIG_KASAN_VMALLOC)
+
+/*
+ * These functions allocate and free shadow memory for kernel modules.
+ * They are only required when KASAN_VMALLOC is not supported, as otherwise
+ * shadow memory is allocated by the generic vmalloc handlers.
+ */
+int kasan_alloc_module_shadow(void *addr, size_t size, gfp_t gfp_mask);
+void kasan_free_module_shadow(const struct vm_struct *vm);
+
+#else /* (CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS) && !CONFIG_KASAN_VMALLOC */
+
+static inline int kasan_alloc_module_shadow(void *addr, size_t size, gfp_t gfp_mask) { return 0; }
+static inline void kasan_free_module_shadow(const struct vm_struct *vm) {}
+
+#endif /* (CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS) && !CONFIG_KASAN_VMALLOC */
+
+#ifdef CONFIG_KASAN_INLINE
+void kasan_non_canonical_hook(unsigned long addr);
+#else /* CONFIG_KASAN_INLINE */
+static inline void kasan_non_canonical_hook(unsigned long addr) { }
+#endif /* CONFIG_KASAN_INLINE */
#endif /* LINUX_KASAN_H */
diff --git a/include/linux/kbd_diacr.h b/include/linux/kbd_diacr.h
index 7274ec68c246..738c7340c151 100644
--- a/include/linux/kbd_diacr.h
+++ b/include/linux/kbd_diacr.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _DIACR_H
#define _DIACR_H
#include <linux/kd.h>
diff --git a/include/linux/kbd_kern.h b/include/linux/kbd_kern.h
index cbfb171bbcba..c40811d79769 100644
--- a/include/linux/kbd_kern.h
+++ b/include/linux/kbd_kern.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _KBD_KERN_H
#define _KBD_KERN_H
@@ -5,12 +6,7 @@
#include <linux/interrupt.h>
#include <linux/keyboard.h>
-extern struct tasklet_struct keyboard_tasklet;
-
extern char *func_table[MAX_NR_FUNC];
-extern char func_buf[];
-extern char *funcbufptr;
-extern int funcbufsize, funcbufleft;
/*
* kbd->xxx contains the VC-local things (flag settings etc..)
@@ -73,12 +69,6 @@ extern void (*kbd_ledfunc)(unsigned int led);
extern int set_console(int nr);
extern void schedule_console_callback(void);
-/* FIXME: review locking for vt.c callers */
-static inline void set_leds(void)
-{
- tasklet_schedule(&keyboard_tasklet);
-}
-
static inline int vc_kbd_mode(struct kbd_struct * kbd, int flag)
{
return ((kbd->modeflags >> flag) & 1);
@@ -137,7 +127,7 @@ static inline void chg_vc_kbd_led(struct kbd_struct * kbd, int flag)
struct console;
-void compute_shiftstate(void);
+void vt_set_leds_compute_shiftstate(void);
/* defkeymap.c */
diff --git a/include/linux/kbuild.h b/include/linux/kbuild.h
index 4e80f3a9ad58..e7be517aaaf6 100644
--- a/include/linux/kbuild.h
+++ b/include/linux/kbuild.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __LINUX_KBUILD_H
#define __LINUX_KBUILD_H
diff --git a/include/linux/kconfig.h b/include/linux/kconfig.h
index 4d748603e818..20d1079e92b4 100644
--- a/include/linux/kconfig.h
+++ b/include/linux/kconfig.h
@@ -1,8 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __LINUX_KCONFIG_H
#define __LINUX_KCONFIG_H
#include <generated/autoconf.h>
+#ifdef CONFIG_CPU_BIG_ENDIAN
+#define __BIG_ENDIAN 4321
+#else
+#define __LITTLE_ENDIAN 1234
+#endif
+
#define __ARG_PLACEHOLDER_1 0,
#define __take_second_arg(__ignored, val, ...) val
@@ -44,7 +51,8 @@
/*
* IS_MODULE(CONFIG_FOO) evaluates to 1 if CONFIG_FOO is set to 'm', 0
- * otherwise.
+ * otherwise. CONFIG_FOO=m results in "#define CONFIG_FOO_MODULE 1" in
+ * autoconf.h.
*/
#define IS_MODULE(option) __is_defined(option##_MODULE)
@@ -59,7 +67,8 @@
/*
* IS_ENABLED(CONFIG_FOO) evaluates to 1 if CONFIG_FOO is set to 'y' or 'm',
- * 0 otherwise.
+ * 0 otherwise. Note that CONFIG_FOO=y results in "#define CONFIG_FOO 1" in
+ * autoconf.h, while CONFIG_FOO=m results in "#define CONFIG_FOO_MODULE 1".
*/
#define IS_ENABLED(option) __or(IS_BUILTIN(option), IS_MODULE(option))
diff --git a/include/linux/kcore.h b/include/linux/kcore.h
index d92762286645..86c0f1d18998 100644
--- a/include/linux/kcore.h
+++ b/include/linux/kcore.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* /proc/kcore definitions
*/
@@ -9,7 +10,7 @@ enum kcore_type {
KCORE_VMALLOC,
KCORE_RAM,
KCORE_VMEMMAP,
- KCORE_OTHER,
+ KCORE_USER,
};
struct kcore_list {
@@ -26,8 +27,16 @@ struct vmcore {
loff_t offset;
};
+struct vmcoredd_node {
+ struct list_head list; /* List of dumps */
+ void *buf; /* Buffer containing device's dump */
+ unsigned int size; /* Size of the buffer */
+};
+
#ifdef CONFIG_PROC_KCORE
-extern void kclist_add(struct kcore_list *, void *, size_t, int type);
+void __init kclist_add(struct kcore_list *, void *, size_t, int type);
+
+extern int __init register_mem_pfn_is_ram(int (*fn)(unsigned long pfn));
#else
static inline
void kclist_add(struct kcore_list *new, void *addr, size_t size, int type)
diff --git a/include/linux/kcov.h b/include/linux/kcov.h
index 2883ac98c280..ee04256f28af 100644
--- a/include/linux/kcov.h
+++ b/include/linux/kcov.h
@@ -1,29 +1,93 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_KCOV_H
#define _LINUX_KCOV_H
+#include <linux/sched.h>
#include <uapi/linux/kcov.h>
struct task_struct;
#ifdef CONFIG_KCOV
-void kcov_task_init(struct task_struct *t);
-void kcov_task_exit(struct task_struct *t);
-
enum kcov_mode {
/* Coverage collection is not enabled yet. */
KCOV_MODE_DISABLED = 0,
+ /* KCOV was initialized, but tracing mode hasn't been chosen yet. */
+ KCOV_MODE_INIT = 1,
/*
* Tracing coverage collection mode.
* Covered PCs are collected in a per-task buffer.
*/
- KCOV_MODE_TRACE = 1,
+ KCOV_MODE_TRACE_PC = 2,
+ /* Collecting comparison operands mode. */
+ KCOV_MODE_TRACE_CMP = 3,
};
+#define KCOV_IN_CTXSW (1 << 30)
+
+void kcov_task_init(struct task_struct *t);
+void kcov_task_exit(struct task_struct *t);
+
+#define kcov_prepare_switch(t) \
+do { \
+ (t)->kcov_mode |= KCOV_IN_CTXSW; \
+} while (0)
+
+#define kcov_finish_switch(t) \
+do { \
+ (t)->kcov_mode &= ~KCOV_IN_CTXSW; \
+} while (0)
+
+/* See Documentation/dev-tools/kcov.rst for usage details. */
+void kcov_remote_start(u64 handle);
+void kcov_remote_stop(void);
+u64 kcov_common_handle(void);
+
+static inline void kcov_remote_start_common(u64 id)
+{
+ kcov_remote_start(kcov_remote_handle(KCOV_SUBSYSTEM_COMMON, id));
+}
+
+static inline void kcov_remote_start_usb(u64 id)
+{
+ kcov_remote_start(kcov_remote_handle(KCOV_SUBSYSTEM_USB, id));
+}
+
+/*
+ * The softirq flavor of kcov_remote_*() functions is introduced as a temporary
+ * work around for kcov's lack of nested remote coverage sections support in
+ * task context. Adding support for nested sections is tracked in:
+ * https://bugzilla.kernel.org/show_bug.cgi?id=210337
+ */
+
+static inline void kcov_remote_start_usb_softirq(u64 id)
+{
+ if (in_serving_softirq())
+ kcov_remote_start_usb(id);
+}
+
+static inline void kcov_remote_stop_softirq(void)
+{
+ if (in_serving_softirq())
+ kcov_remote_stop();
+}
+
#else
static inline void kcov_task_init(struct task_struct *t) {}
static inline void kcov_task_exit(struct task_struct *t) {}
+static inline void kcov_prepare_switch(struct task_struct *t) {}
+static inline void kcov_finish_switch(struct task_struct *t) {}
+static inline void kcov_remote_start(u64 handle) {}
+static inline void kcov_remote_stop(void) {}
+static inline u64 kcov_common_handle(void)
+{
+ return 0;
+}
+static inline void kcov_remote_start_common(u64 id) {}
+static inline void kcov_remote_start_usb(u64 id) {}
+static inline void kcov_remote_start_usb_softirq(u64 id) {}
+static inline void kcov_remote_stop_softirq(void) {}
#endif /* CONFIG_KCOV */
#endif /* _LINUX_KCOV_H */
diff --git a/include/linux/kcsan-checks.h b/include/linux/kcsan-checks.h
new file mode 100644
index 000000000000..92f3843d9ebb
--- /dev/null
+++ b/include/linux/kcsan-checks.h
@@ -0,0 +1,533 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * KCSAN access checks and modifiers. These can be used to explicitly check
+ * uninstrumented accesses, or change KCSAN checking behaviour of accesses.
+ *
+ * Copyright (C) 2019, Google LLC.
+ */
+
+#ifndef _LINUX_KCSAN_CHECKS_H
+#define _LINUX_KCSAN_CHECKS_H
+
+/* Note: Only include what is already included by compiler.h. */
+#include <linux/compiler_attributes.h>
+#include <linux/types.h>
+
+/* Access types -- if KCSAN_ACCESS_WRITE is not set, the access is a read. */
+#define KCSAN_ACCESS_WRITE (1 << 0) /* Access is a write. */
+#define KCSAN_ACCESS_COMPOUND (1 << 1) /* Compounded read-write instrumentation. */
+#define KCSAN_ACCESS_ATOMIC (1 << 2) /* Access is atomic. */
+/* The following are special, and never due to compiler instrumentation. */
+#define KCSAN_ACCESS_ASSERT (1 << 3) /* Access is an assertion. */
+#define KCSAN_ACCESS_SCOPED (1 << 4) /* Access is a scoped access. */
+
+/*
+ * __kcsan_*: Always calls into the runtime when KCSAN is enabled. This may be used
+ * even in compilation units that selectively disable KCSAN, but must use KCSAN
+ * to validate access to an address. Never use these in header files!
+ */
+#ifdef CONFIG_KCSAN
+/**
+ * __kcsan_check_access - check generic access for races
+ *
+ * @ptr: address of access
+ * @size: size of access
+ * @type: access type modifier
+ */
+void __kcsan_check_access(const volatile void *ptr, size_t size, int type);
+
+/*
+ * See definition of __tsan_atomic_signal_fence() in kernel/kcsan/core.c.
+ * Note: The mappings are arbitrary, and do not reflect any real mappings of C11
+ * memory orders to the LKMM memory orders and vice-versa!
+ */
+#define __KCSAN_BARRIER_TO_SIGNAL_FENCE_mb __ATOMIC_SEQ_CST
+#define __KCSAN_BARRIER_TO_SIGNAL_FENCE_wmb __ATOMIC_ACQ_REL
+#define __KCSAN_BARRIER_TO_SIGNAL_FENCE_rmb __ATOMIC_ACQUIRE
+#define __KCSAN_BARRIER_TO_SIGNAL_FENCE_release __ATOMIC_RELEASE
+
+/**
+ * __kcsan_mb - full memory barrier instrumentation
+ */
+void __kcsan_mb(void);
+
+/**
+ * __kcsan_wmb - write memory barrier instrumentation
+ */
+void __kcsan_wmb(void);
+
+/**
+ * __kcsan_rmb - read memory barrier instrumentation
+ */
+void __kcsan_rmb(void);
+
+/**
+ * __kcsan_release - release barrier instrumentation
+ */
+void __kcsan_release(void);
+
+/**
+ * kcsan_disable_current - disable KCSAN for the current context
+ *
+ * Supports nesting.
+ */
+void kcsan_disable_current(void);
+
+/**
+ * kcsan_enable_current - re-enable KCSAN for the current context
+ *
+ * Supports nesting.
+ */
+void kcsan_enable_current(void);
+void kcsan_enable_current_nowarn(void); /* Safe in uaccess regions. */
+
+/**
+ * kcsan_nestable_atomic_begin - begin nestable atomic region
+ *
+ * Accesses within the atomic region may appear to race with other accesses but
+ * should be considered atomic.
+ */
+void kcsan_nestable_atomic_begin(void);
+
+/**
+ * kcsan_nestable_atomic_end - end nestable atomic region
+ */
+void kcsan_nestable_atomic_end(void);
+
+/**
+ * kcsan_flat_atomic_begin - begin flat atomic region
+ *
+ * Accesses within the atomic region may appear to race with other accesses but
+ * should be considered atomic.
+ */
+void kcsan_flat_atomic_begin(void);
+
+/**
+ * kcsan_flat_atomic_end - end flat atomic region
+ */
+void kcsan_flat_atomic_end(void);
+
+/**
+ * kcsan_atomic_next - consider following accesses as atomic
+ *
+ * Force treating the next n memory accesses for the current context as atomic
+ * operations.
+ *
+ * @n: number of following memory accesses to treat as atomic.
+ */
+void kcsan_atomic_next(int n);
+
+/**
+ * kcsan_set_access_mask - set access mask
+ *
+ * Set the access mask for all accesses for the current context if non-zero.
+ * Only value changes to bits set in the mask will be reported.
+ *
+ * @mask: bitmask
+ */
+void kcsan_set_access_mask(unsigned long mask);
+
+/* Scoped access information. */
+struct kcsan_scoped_access {
+ union {
+ struct list_head list; /* scoped_accesses list */
+ /*
+ * Not an entry in scoped_accesses list; stack depth from where
+ * the access was initialized.
+ */
+ int stack_depth;
+ };
+
+ /* Access information. */
+ const volatile void *ptr;
+ size_t size;
+ int type;
+ /* Location where scoped access was set up. */
+ unsigned long ip;
+};
+/*
+ * Automatically call kcsan_end_scoped_access() when kcsan_scoped_access goes
+ * out of scope; relies on attribute "cleanup", which is supported by all
+ * compilers that support KCSAN.
+ */
+#define __kcsan_cleanup_scoped \
+ __maybe_unused __attribute__((__cleanup__(kcsan_end_scoped_access)))
+
+/**
+ * kcsan_begin_scoped_access - begin scoped access
+ *
+ * Begin scoped access and initialize @sa, which will cause KCSAN to
+ * continuously check the memory range in the current thread until
+ * kcsan_end_scoped_access() is called for @sa.
+ *
+ * Scoped accesses are implemented by appending @sa to an internal list for the
+ * current execution context, and then checked on every call into the KCSAN
+ * runtime.
+ *
+ * @ptr: address of access
+ * @size: size of access
+ * @type: access type modifier
+ * @sa: struct kcsan_scoped_access to use for the scope of the access
+ */
+struct kcsan_scoped_access *
+kcsan_begin_scoped_access(const volatile void *ptr, size_t size, int type,
+ struct kcsan_scoped_access *sa);
+
+/**
+ * kcsan_end_scoped_access - end scoped access
+ *
+ * End a scoped access, which will stop KCSAN checking the memory range.
+ * Requires that kcsan_begin_scoped_access() was previously called once for @sa.
+ *
+ * @sa: a previously initialized struct kcsan_scoped_access
+ */
+void kcsan_end_scoped_access(struct kcsan_scoped_access *sa);
+
+
+#else /* CONFIG_KCSAN */
+
+static inline void __kcsan_check_access(const volatile void *ptr, size_t size,
+ int type) { }
+
+static inline void __kcsan_mb(void) { }
+static inline void __kcsan_wmb(void) { }
+static inline void __kcsan_rmb(void) { }
+static inline void __kcsan_release(void) { }
+static inline void kcsan_disable_current(void) { }
+static inline void kcsan_enable_current(void) { }
+static inline void kcsan_enable_current_nowarn(void) { }
+static inline void kcsan_nestable_atomic_begin(void) { }
+static inline void kcsan_nestable_atomic_end(void) { }
+static inline void kcsan_flat_atomic_begin(void) { }
+static inline void kcsan_flat_atomic_end(void) { }
+static inline void kcsan_atomic_next(int n) { }
+static inline void kcsan_set_access_mask(unsigned long mask) { }
+
+struct kcsan_scoped_access { };
+#define __kcsan_cleanup_scoped __maybe_unused
+static inline struct kcsan_scoped_access *
+kcsan_begin_scoped_access(const volatile void *ptr, size_t size, int type,
+ struct kcsan_scoped_access *sa) { return sa; }
+static inline void kcsan_end_scoped_access(struct kcsan_scoped_access *sa) { }
+
+#endif /* CONFIG_KCSAN */
+
+#ifdef __SANITIZE_THREAD__
+/*
+ * Only calls into the runtime when the particular compilation unit has KCSAN
+ * instrumentation enabled. May be used in header files.
+ */
+#define kcsan_check_access __kcsan_check_access
+
+/*
+ * Only use these to disable KCSAN for accesses in the current compilation unit;
+ * calls into libraries may still perform KCSAN checks.
+ */
+#define __kcsan_disable_current kcsan_disable_current
+#define __kcsan_enable_current kcsan_enable_current_nowarn
+#else /* __SANITIZE_THREAD__ */
+static inline void kcsan_check_access(const volatile void *ptr, size_t size,
+ int type) { }
+static inline void __kcsan_enable_current(void) { }
+static inline void __kcsan_disable_current(void) { }
+#endif /* __SANITIZE_THREAD__ */
+
+#if defined(CONFIG_KCSAN_WEAK_MEMORY) && defined(__SANITIZE_THREAD__)
+/*
+ * Normal barrier instrumentation is not done via explicit calls, but by mapping
+ * to a repurposed __atomic_signal_fence(), which normally does not generate any
+ * real instructions, but is still intercepted by fsanitize=thread. This means,
+ * like any other compile-time instrumentation, barrier instrumentation can be
+ * disabled with the __no_kcsan function attribute.
+ *
+ * Also see definition of __tsan_atomic_signal_fence() in kernel/kcsan/core.c.
+ *
+ * These are all macros, like <asm/barrier.h>, since some architectures use them
+ * in non-static inline functions.
+ */
+#define __KCSAN_BARRIER_TO_SIGNAL_FENCE(name) \
+ do { \
+ barrier(); \
+ __atomic_signal_fence(__KCSAN_BARRIER_TO_SIGNAL_FENCE_##name); \
+ barrier(); \
+ } while (0)
+#define kcsan_mb() __KCSAN_BARRIER_TO_SIGNAL_FENCE(mb)
+#define kcsan_wmb() __KCSAN_BARRIER_TO_SIGNAL_FENCE(wmb)
+#define kcsan_rmb() __KCSAN_BARRIER_TO_SIGNAL_FENCE(rmb)
+#define kcsan_release() __KCSAN_BARRIER_TO_SIGNAL_FENCE(release)
+#elif defined(CONFIG_KCSAN_WEAK_MEMORY) && defined(__KCSAN_INSTRUMENT_BARRIERS__)
+#define kcsan_mb __kcsan_mb
+#define kcsan_wmb __kcsan_wmb
+#define kcsan_rmb __kcsan_rmb
+#define kcsan_release __kcsan_release
+#else /* CONFIG_KCSAN_WEAK_MEMORY && ... */
+#define kcsan_mb() do { } while (0)
+#define kcsan_wmb() do { } while (0)
+#define kcsan_rmb() do { } while (0)
+#define kcsan_release() do { } while (0)
+#endif /* CONFIG_KCSAN_WEAK_MEMORY && ... */
+
+/**
+ * __kcsan_check_read - check regular read access for races
+ *
+ * @ptr: address of access
+ * @size: size of access
+ */
+#define __kcsan_check_read(ptr, size) __kcsan_check_access(ptr, size, 0)
+
+/**
+ * __kcsan_check_write - check regular write access for races
+ *
+ * @ptr: address of access
+ * @size: size of access
+ */
+#define __kcsan_check_write(ptr, size) \
+ __kcsan_check_access(ptr, size, KCSAN_ACCESS_WRITE)
+
+/**
+ * __kcsan_check_read_write - check regular read-write access for races
+ *
+ * @ptr: address of access
+ * @size: size of access
+ */
+#define __kcsan_check_read_write(ptr, size) \
+ __kcsan_check_access(ptr, size, KCSAN_ACCESS_COMPOUND | KCSAN_ACCESS_WRITE)
+
+/**
+ * kcsan_check_read - check regular read access for races
+ *
+ * @ptr: address of access
+ * @size: size of access
+ */
+#define kcsan_check_read(ptr, size) kcsan_check_access(ptr, size, 0)
+
+/**
+ * kcsan_check_write - check regular write access for races
+ *
+ * @ptr: address of access
+ * @size: size of access
+ */
+#define kcsan_check_write(ptr, size) \
+ kcsan_check_access(ptr, size, KCSAN_ACCESS_WRITE)
+
+/**
+ * kcsan_check_read_write - check regular read-write access for races
+ *
+ * @ptr: address of access
+ * @size: size of access
+ */
+#define kcsan_check_read_write(ptr, size) \
+ kcsan_check_access(ptr, size, KCSAN_ACCESS_COMPOUND | KCSAN_ACCESS_WRITE)
+
+/*
+ * Check for atomic accesses: if atomic accesses are not ignored, this simply
+ * aliases to kcsan_check_access(), otherwise becomes a no-op.
+ */
+#ifdef CONFIG_KCSAN_IGNORE_ATOMICS
+#define kcsan_check_atomic_read(...) do { } while (0)
+#define kcsan_check_atomic_write(...) do { } while (0)
+#define kcsan_check_atomic_read_write(...) do { } while (0)
+#else
+#define kcsan_check_atomic_read(ptr, size) \
+ kcsan_check_access(ptr, size, KCSAN_ACCESS_ATOMIC)
+#define kcsan_check_atomic_write(ptr, size) \
+ kcsan_check_access(ptr, size, KCSAN_ACCESS_ATOMIC | KCSAN_ACCESS_WRITE)
+#define kcsan_check_atomic_read_write(ptr, size) \
+ kcsan_check_access(ptr, size, KCSAN_ACCESS_ATOMIC | KCSAN_ACCESS_WRITE | KCSAN_ACCESS_COMPOUND)
+#endif
+
+/**
+ * ASSERT_EXCLUSIVE_WRITER - assert no concurrent writes to @var
+ *
+ * Assert that there are no concurrent writes to @var; other readers are
+ * allowed. This assertion can be used to specify properties of concurrent code,
+ * where violation cannot be detected as a normal data race.
+ *
+ * For example, if we only have a single writer, but multiple concurrent
+ * readers, to avoid data races, all these accesses must be marked; even
+ * concurrent marked writes racing with the single writer are bugs.
+ * Unfortunately, due to being marked, they are no longer data races. For cases
+ * like these, we can use the macro as follows:
+ *
+ * .. code-block:: c
+ *
+ * void writer(void) {
+ * spin_lock(&update_foo_lock);
+ * ASSERT_EXCLUSIVE_WRITER(shared_foo);
+ * WRITE_ONCE(shared_foo, ...);
+ * spin_unlock(&update_foo_lock);
+ * }
+ * void reader(void) {
+ * // update_foo_lock does not need to be held!
+ * ... = READ_ONCE(shared_foo);
+ * }
+ *
+ * Note: ASSERT_EXCLUSIVE_WRITER_SCOPED(), if applicable, performs more thorough
+ * checking if a clear scope where no concurrent writes are expected exists.
+ *
+ * @var: variable to assert on
+ */
+#define ASSERT_EXCLUSIVE_WRITER(var) \
+ __kcsan_check_access(&(var), sizeof(var), KCSAN_ACCESS_ASSERT)
+
+/*
+ * Helper macros for implementation of for ASSERT_EXCLUSIVE_*_SCOPED(). @id is
+ * expected to be unique for the scope in which instances of kcsan_scoped_access
+ * are declared.
+ */
+#define __kcsan_scoped_name(c, suffix) __kcsan_scoped_##c##suffix
+#define __ASSERT_EXCLUSIVE_SCOPED(var, type, id) \
+ struct kcsan_scoped_access __kcsan_scoped_name(id, _) \
+ __kcsan_cleanup_scoped; \
+ struct kcsan_scoped_access *__kcsan_scoped_name(id, _dummy_p) \
+ __maybe_unused = kcsan_begin_scoped_access( \
+ &(var), sizeof(var), KCSAN_ACCESS_SCOPED | (type), \
+ &__kcsan_scoped_name(id, _))
+
+/**
+ * ASSERT_EXCLUSIVE_WRITER_SCOPED - assert no concurrent writes to @var in scope
+ *
+ * Scoped variant of ASSERT_EXCLUSIVE_WRITER().
+ *
+ * Assert that there are no concurrent writes to @var for the duration of the
+ * scope in which it is introduced. This provides a better way to fully cover
+ * the enclosing scope, compared to multiple ASSERT_EXCLUSIVE_WRITER(), and
+ * increases the likelihood for KCSAN to detect racing accesses.
+ *
+ * For example, it allows finding race-condition bugs that only occur due to
+ * state changes within the scope itself:
+ *
+ * .. code-block:: c
+ *
+ * void writer(void) {
+ * spin_lock(&update_foo_lock);
+ * {
+ * ASSERT_EXCLUSIVE_WRITER_SCOPED(shared_foo);
+ * WRITE_ONCE(shared_foo, 42);
+ * ...
+ * // shared_foo should still be 42 here!
+ * }
+ * spin_unlock(&update_foo_lock);
+ * }
+ * void buggy(void) {
+ * if (READ_ONCE(shared_foo) == 42)
+ * WRITE_ONCE(shared_foo, 1); // bug!
+ * }
+ *
+ * @var: variable to assert on
+ */
+#define ASSERT_EXCLUSIVE_WRITER_SCOPED(var) \
+ __ASSERT_EXCLUSIVE_SCOPED(var, KCSAN_ACCESS_ASSERT, __COUNTER__)
+
+/**
+ * ASSERT_EXCLUSIVE_ACCESS - assert no concurrent accesses to @var
+ *
+ * Assert that there are no concurrent accesses to @var (no readers nor
+ * writers). This assertion can be used to specify properties of concurrent
+ * code, where violation cannot be detected as a normal data race.
+ *
+ * For example, where exclusive access is expected after determining no other
+ * users of an object are left, but the object is not actually freed. We can
+ * check that this property actually holds as follows:
+ *
+ * .. code-block:: c
+ *
+ * if (refcount_dec_and_test(&obj->refcnt)) {
+ * ASSERT_EXCLUSIVE_ACCESS(*obj);
+ * do_some_cleanup(obj);
+ * release_for_reuse(obj);
+ * }
+ *
+ * Note:
+ *
+ * 1. ASSERT_EXCLUSIVE_ACCESS_SCOPED(), if applicable, performs more thorough
+ * checking if a clear scope where no concurrent accesses are expected exists.
+ *
+ * 2. For cases where the object is freed, `KASAN <kasan.html>`_ is a better
+ * fit to detect use-after-free bugs.
+ *
+ * @var: variable to assert on
+ */
+#define ASSERT_EXCLUSIVE_ACCESS(var) \
+ __kcsan_check_access(&(var), sizeof(var), KCSAN_ACCESS_WRITE | KCSAN_ACCESS_ASSERT)
+
+/**
+ * ASSERT_EXCLUSIVE_ACCESS_SCOPED - assert no concurrent accesses to @var in scope
+ *
+ * Scoped variant of ASSERT_EXCLUSIVE_ACCESS().
+ *
+ * Assert that there are no concurrent accesses to @var (no readers nor writers)
+ * for the entire duration of the scope in which it is introduced. This provides
+ * a better way to fully cover the enclosing scope, compared to multiple
+ * ASSERT_EXCLUSIVE_ACCESS(), and increases the likelihood for KCSAN to detect
+ * racing accesses.
+ *
+ * @var: variable to assert on
+ */
+#define ASSERT_EXCLUSIVE_ACCESS_SCOPED(var) \
+ __ASSERT_EXCLUSIVE_SCOPED(var, KCSAN_ACCESS_WRITE | KCSAN_ACCESS_ASSERT, __COUNTER__)
+
+/**
+ * ASSERT_EXCLUSIVE_BITS - assert no concurrent writes to subset of bits in @var
+ *
+ * Bit-granular variant of ASSERT_EXCLUSIVE_WRITER().
+ *
+ * Assert that there are no concurrent writes to a subset of bits in @var;
+ * concurrent readers are permitted. This assertion captures more detailed
+ * bit-level properties, compared to the other (word granularity) assertions.
+ * Only the bits set in @mask are checked for concurrent modifications, while
+ * ignoring the remaining bits, i.e. concurrent writes (or reads) to ~mask bits
+ * are ignored.
+ *
+ * Use this for variables, where some bits must not be modified concurrently,
+ * yet other bits are expected to be modified concurrently.
+ *
+ * For example, variables where, after initialization, some bits are read-only,
+ * but other bits may still be modified concurrently. A reader may wish to
+ * assert that this is true as follows:
+ *
+ * .. code-block:: c
+ *
+ * ASSERT_EXCLUSIVE_BITS(flags, READ_ONLY_MASK);
+ * foo = (READ_ONCE(flags) & READ_ONLY_MASK) >> READ_ONLY_SHIFT;
+ *
+ * Note: The access that immediately follows ASSERT_EXCLUSIVE_BITS() is assumed
+ * to access the masked bits only, and KCSAN optimistically assumes it is
+ * therefore safe, even in the presence of data races, and marking it with
+ * READ_ONCE() is optional from KCSAN's point-of-view. We caution, however, that
+ * it may still be advisable to do so, since we cannot reason about all compiler
+ * optimizations when it comes to bit manipulations (on the reader and writer
+ * side). If you are sure nothing can go wrong, we can write the above simply
+ * as:
+ *
+ * .. code-block:: c
+ *
+ * ASSERT_EXCLUSIVE_BITS(flags, READ_ONLY_MASK);
+ * foo = (flags & READ_ONLY_MASK) >> READ_ONLY_SHIFT;
+ *
+ * Another example, where this may be used, is when certain bits of @var may
+ * only be modified when holding the appropriate lock, but other bits may still
+ * be modified concurrently. Writers, where other bits may change concurrently,
+ * could use the assertion as follows:
+ *
+ * .. code-block:: c
+ *
+ * spin_lock(&foo_lock);
+ * ASSERT_EXCLUSIVE_BITS(flags, FOO_MASK);
+ * old_flags = flags;
+ * new_flags = (old_flags & ~FOO_MASK) | (new_foo << FOO_SHIFT);
+ * if (cmpxchg(&flags, old_flags, new_flags) != old_flags) { ... }
+ * spin_unlock(&foo_lock);
+ *
+ * @var: variable to assert on
+ * @mask: only check for modifications to bits set in @mask
+ */
+#define ASSERT_EXCLUSIVE_BITS(var, mask) \
+ do { \
+ kcsan_set_access_mask(mask); \
+ __kcsan_check_access(&(var), sizeof(var), KCSAN_ACCESS_ASSERT);\
+ kcsan_set_access_mask(0); \
+ kcsan_atomic_next(1); \
+ } while (0)
+
+#endif /* _LINUX_KCSAN_CHECKS_H */
diff --git a/include/linux/kcsan.h b/include/linux/kcsan.h
new file mode 100644
index 000000000000..c07c71f5ba4f
--- /dev/null
+++ b/include/linux/kcsan.h
@@ -0,0 +1,75 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * The Kernel Concurrency Sanitizer (KCSAN) infrastructure. Public interface and
+ * data structures to set up runtime. See kcsan-checks.h for explicit checks and
+ * modifiers. For more info please see Documentation/dev-tools/kcsan.rst.
+ *
+ * Copyright (C) 2019, Google LLC.
+ */
+
+#ifndef _LINUX_KCSAN_H
+#define _LINUX_KCSAN_H
+
+#include <linux/kcsan-checks.h>
+#include <linux/types.h>
+
+#ifdef CONFIG_KCSAN
+
+/*
+ * Context for each thread of execution: for tasks, this is stored in
+ * task_struct, and interrupts access internal per-CPU storage.
+ */
+struct kcsan_ctx {
+ int disable_count; /* disable counter */
+ int disable_scoped; /* disable scoped access counter */
+ int atomic_next; /* number of following atomic ops */
+
+ /*
+ * We distinguish between: (a) nestable atomic regions that may contain
+ * other nestable regions; and (b) flat atomic regions that do not keep
+ * track of nesting. Both (a) and (b) are entirely independent of each
+ * other, and a flat region may be started in a nestable region or
+ * vice-versa.
+ *
+ * This is required because, for example, in the annotations for
+ * seqlocks, we declare seqlock writer critical sections as (a) nestable
+ * atomic regions, but reader critical sections as (b) flat atomic
+ * regions, but have encountered cases where seqlock reader critical
+ * sections are contained within writer critical sections (the opposite
+ * may be possible, too).
+ *
+ * To support these cases, we independently track the depth of nesting
+ * for (a), and whether the leaf level is flat for (b).
+ */
+ int atomic_nest_count;
+ bool in_flat_atomic;
+
+ /*
+ * Access mask for all accesses if non-zero.
+ */
+ unsigned long access_mask;
+
+ /* List of scoped accesses; likely to be empty. */
+ struct list_head scoped_accesses;
+
+#ifdef CONFIG_KCSAN_WEAK_MEMORY
+ /*
+ * Scoped access for modeling access reordering to detect missing memory
+ * barriers; only keep 1 to keep fast-path complexity manageable.
+ */
+ struct kcsan_scoped_access reorder_access;
+#endif
+};
+
+/**
+ * kcsan_init - initialize KCSAN runtime
+ */
+void kcsan_init(void);
+
+#else /* CONFIG_KCSAN */
+
+static inline void kcsan_init(void) { }
+
+#endif /* CONFIG_KCSAN */
+
+#endif /* _LINUX_KCSAN_H */
diff --git a/include/linux/kd.h b/include/linux/kd.h
deleted file mode 100644
index 25bd17fad239..000000000000
--- a/include/linux/kd.h
+++ /dev/null
@@ -1,7 +0,0 @@
-#ifndef _LINUX_KD_H
-#define _LINUX_KD_H
-
-#include <uapi/linux/kd.h>
-
-#define KD_FONT_FLAG_OLD 0x80000000 /* Invoked via old interface [compat] */
-#endif /* _LINUX_KD_H */
diff --git a/include/linux/kdb.h b/include/linux/kdb.h
index 68bd88223417..07dfb6a20a1c 100644
--- a/include/linux/kdb.h
+++ b/include/linux/kdb.h
@@ -13,6 +13,8 @@
* Copyright (C) 2009 Jason Wessel <jason.wessel@windriver.com>
*/
+#include <linux/list.h>
+
/* Shifted versions of the command enable bits are be used if the command
* has no arguments (see kdb_check_flags). This allows commands, such as
* go, to have different permissions depending upon whether it is called
@@ -64,6 +66,17 @@ typedef enum {
typedef int (*kdb_func_t)(int, const char **);
+/* The KDB shell command table */
+typedef struct _kdbtab {
+ char *name; /* Command name */
+ kdb_func_t func; /* Function to execute command */
+ char *usage; /* Usage String for this command */
+ char *help; /* Help message for this command */
+ short minlen; /* Minimum legal # cmd chars required */
+ kdb_cmdflags_t flags; /* Command behaviour flags */
+ struct list_head list_node; /* Command list */
+} kdbtab_t;
+
#ifdef CONFIG_KGDB_KDB
#include <linux/init.h>
#include <linux/sched.h>
@@ -125,7 +138,7 @@ extern const char *kdb_diemsg;
#define KDB_FLAG_NO_I8042 (1 << 7) /* No i8042 chip is available, do
* not use keyboard */
-extern int kdb_flags; /* Global flags, see kdb_state for per cpu state */
+extern unsigned int kdb_flags; /* Global flags, see kdb_state for per cpu state */
extern void kdb_save_flags(void);
extern void kdb_restore_flags(void);
@@ -183,8 +196,6 @@ int kdb_process_cpu(const struct task_struct *p)
return cpu;
}
-/* kdb access to register set for stack dumping */
-extern struct pt_regs *kdb_current_regs;
#ifdef CONFIG_KALLSYMS
extern const char *kdb_walk_kallsyms(loff_t *pos);
#else /* ! CONFIG_KALLSYMS */
@@ -195,19 +206,13 @@ static inline const char *kdb_walk_kallsyms(loff_t *pos)
#endif /* ! CONFIG_KALLSYMS */
/* Dynamic kdb shell command registration */
-extern int kdb_register(char *, kdb_func_t, char *, char *, short);
-extern int kdb_register_flags(char *, kdb_func_t, char *, char *,
- short, kdb_cmdflags_t);
-extern int kdb_unregister(char *);
+extern int kdb_register(kdbtab_t *cmd);
+extern void kdb_unregister(kdbtab_t *cmd);
#else /* ! CONFIG_KGDB_KDB */
static inline __printf(1, 2) int kdb_printf(const char *fmt, ...) { return 0; }
static inline void kdb_init(int level) {}
-static inline int kdb_register(char *cmd, kdb_func_t func, char *usage,
- char *help, short minlen) { return 0; }
-static inline int kdb_register_flags(char *cmd, kdb_func_t func, char *usage,
- char *help, short minlen,
- kdb_cmdflags_t flags) { return 0; }
-static inline int kdb_unregister(char *cmd) { return 0; }
+static inline int kdb_register(kdbtab_t *cmd) { return 0; }
+static inline void kdb_unregister(kdbtab_t *cmd) {}
#endif /* CONFIG_KGDB_KDB */
enum {
KDB_NOT_INITIALIZED,
@@ -217,5 +222,6 @@ enum {
extern int kdbgetintenv(const char *, int *);
extern int kdb_set(int, const char **);
+int kdb_lsmod(int argc, const char **argv);
#endif /* !_KDB_H */
diff --git a/include/linux/kdebug.h b/include/linux/kdebug.h
index ed815090b3bc..fd311565fabf 100644
--- a/include/linux/kdebug.h
+++ b/include/linux/kdebug.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_KDEBUG_H
#define _LINUX_KDEBUG_H
diff --git a/include/linux/kdev_t.h b/include/linux/kdev_t.h
index 8e9e288b08c1..4856706fbfeb 100644
--- a/include/linux/kdev_t.h
+++ b/include/linux/kdev_t.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_KDEV_T_H
#define _LINUX_KDEV_T_H
@@ -20,61 +21,61 @@
})
/* acceptable for old filesystems */
-static inline bool old_valid_dev(dev_t dev)
+static __always_inline bool old_valid_dev(dev_t dev)
{
return MAJOR(dev) < 256 && MINOR(dev) < 256;
}
-static inline u16 old_encode_dev(dev_t dev)
+static __always_inline u16 old_encode_dev(dev_t dev)
{
return (MAJOR(dev) << 8) | MINOR(dev);
}
-static inline dev_t old_decode_dev(u16 val)
+static __always_inline dev_t old_decode_dev(u16 val)
{
return MKDEV((val >> 8) & 255, val & 255);
}
-static inline u32 new_encode_dev(dev_t dev)
+static __always_inline u32 new_encode_dev(dev_t dev)
{
unsigned major = MAJOR(dev);
unsigned minor = MINOR(dev);
return (minor & 0xff) | (major << 8) | ((minor & ~0xff) << 12);
}
-static inline dev_t new_decode_dev(u32 dev)
+static __always_inline dev_t new_decode_dev(u32 dev)
{
unsigned major = (dev & 0xfff00) >> 8;
unsigned minor = (dev & 0xff) | ((dev >> 12) & 0xfff00);
return MKDEV(major, minor);
}
-static inline u64 huge_encode_dev(dev_t dev)
+static __always_inline u64 huge_encode_dev(dev_t dev)
{
return new_encode_dev(dev);
}
-static inline dev_t huge_decode_dev(u64 dev)
+static __always_inline dev_t huge_decode_dev(u64 dev)
{
return new_decode_dev(dev);
}
-static inline int sysv_valid_dev(dev_t dev)
+static __always_inline int sysv_valid_dev(dev_t dev)
{
return MAJOR(dev) < (1<<14) && MINOR(dev) < (1<<18);
}
-static inline u32 sysv_encode_dev(dev_t dev)
+static __always_inline u32 sysv_encode_dev(dev_t dev)
{
return MINOR(dev) | (MAJOR(dev) << 18);
}
-static inline unsigned sysv_major(u32 dev)
+static __always_inline unsigned sysv_major(u32 dev)
{
return (dev >> 18) & 0x3fff;
}
-static inline unsigned sysv_minor(u32 dev)
+static __always_inline unsigned sysv_minor(u32 dev)
{
return dev & 0x3ffff;
}
diff --git a/include/linux/kern_levels.h b/include/linux/kern_levels.h
index f282d4e87258..bf2389c26ae3 100644
--- a/include/linux/kern_levels.h
+++ b/include/linux/kern_levels.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __KERN_LEVELS_H__
#define __KERN_LEVELS_H__
@@ -13,7 +14,7 @@
#define KERN_INFO KERN_SOH "6" /* informational */
#define KERN_DEBUG KERN_SOH "7" /* debug-level messages */
-#define KERN_DEFAULT KERN_SOH "d" /* the default kernel loglevel */
+#define KERN_DEFAULT "" /* the default kernel loglevel */
/*
* Annotation for a "continued" line of log printout (only done after a
diff --git a/include/linux/kernel-page-flags.h b/include/linux/kernel-page-flags.h
index f65ce09784f1..859f4b0c1b2b 100644
--- a/include/linux/kernel-page-flags.h
+++ b/include/linux/kernel-page-flags.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef LINUX_KERNEL_PAGE_FLAGS_H
#define LINUX_KERNEL_PAGE_FLAGS_H
@@ -16,5 +17,7 @@
#define KPF_ARCH 38
#define KPF_UNCACHED 39
#define KPF_SOFTDIRTY 40
+#define KPF_ARCH_2 41
+#define KPF_ARCH_3 42
#endif /* LINUX_KERNEL_PAGE_FLAGS_H */
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index bd6d96cf80b1..0d91e0af0125 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -1,166 +1,69 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * NOTE:
+ *
+ * This header has combined a lot of unrelated to each other stuff.
+ * The process of splitting its content is in progress while keeping
+ * backward compatibility. That's why it's highly recommended NOT to
+ * include this header inside another header file, especially under
+ * generic or architectural include/ directory.
+ */
#ifndef _LINUX_KERNEL_H
#define _LINUX_KERNEL_H
-
-#include <stdarg.h>
+#include <linux/stdarg.h>
+#include <linux/align.h>
+#include <linux/limits.h>
#include <linux/linkage.h>
#include <linux/stddef.h>
#include <linux/types.h>
#include <linux/compiler.h>
+#include <linux/container_of.h>
#include <linux/bitops.h>
+#include <linux/hex.h>
+#include <linux/kstrtox.h>
#include <linux/log2.h>
+#include <linux/math.h>
+#include <linux/minmax.h>
#include <linux/typecheck.h>
+#include <linux/panic.h>
#include <linux/printk.h>
#include <linux/build_bug.h>
+#include <linux/static_call_types.h>
+#include <linux/instruction_pointer.h>
#include <asm/byteorder.h>
-#include <uapi/linux/kernel.h>
-#define USHRT_MAX ((u16)(~0U))
-#define SHRT_MAX ((s16)(USHRT_MAX>>1))
-#define SHRT_MIN ((s16)(-SHRT_MAX - 1))
-#define INT_MAX ((int)(~0U>>1))
-#define INT_MIN (-INT_MAX - 1)
-#define UINT_MAX (~0U)
-#define LONG_MAX ((long)(~0UL>>1))
-#define LONG_MIN (-LONG_MAX - 1)
-#define ULONG_MAX (~0UL)
-#define LLONG_MAX ((long long)(~0ULL>>1))
-#define LLONG_MIN (-LLONG_MAX - 1)
-#define ULLONG_MAX (~0ULL)
-#define SIZE_MAX (~(size_t)0)
-
-#define U8_MAX ((u8)~0U)
-#define S8_MAX ((s8)(U8_MAX>>1))
-#define S8_MIN ((s8)(-S8_MAX - 1))
-#define U16_MAX ((u16)~0U)
-#define S16_MAX ((s16)(U16_MAX>>1))
-#define S16_MIN ((s16)(-S16_MAX - 1))
-#define U32_MAX ((u32)~0U)
-#define S32_MAX ((s32)(U32_MAX>>1))
-#define S32_MIN ((s32)(-S32_MAX - 1))
-#define U64_MAX ((u64)~0ULL)
-#define S64_MAX ((s64)(U64_MAX>>1))
-#define S64_MIN ((s64)(-S64_MAX - 1))
+#include <uapi/linux/kernel.h>
#define STACK_MAGIC 0xdeadbeef
+/**
+ * REPEAT_BYTE - repeat the value @x multiple times as an unsigned long value
+ * @x: value to repeat
+ *
+ * NOTE: @x is not checked for > 0xff; larger values produce odd results.
+ */
#define REPEAT_BYTE(x) ((~0ul / 0xff) * (x))
-/* @a is a power of 2 value */
-#define ALIGN(x, a) __ALIGN_KERNEL((x), (a))
-#define ALIGN_DOWN(x, a) __ALIGN_KERNEL((x) - ((a) - 1), (a))
-#define __ALIGN_MASK(x, mask) __ALIGN_KERNEL_MASK((x), (mask))
-#define PTR_ALIGN(p, a) ((typeof(p))ALIGN((unsigned long)(p), (a)))
-#define IS_ALIGNED(x, a) (((x) & ((typeof(x))(a) - 1)) == 0)
-
/* generic data direction definitions */
#define READ 0
#define WRITE 1
+/**
+ * ARRAY_SIZE - get the number of elements in array @arr
+ * @arr: array to be sized
+ */
#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]) + __must_be_array(arr))
+#define PTR_IF(cond, ptr) ((cond) ? (ptr) : NULL)
+
#define u64_to_user_ptr(x) ( \
{ \
- typecheck(u64, x); \
- (void __user *)(uintptr_t)x; \
+ typecheck(u64, (x)); \
+ (void __user *)(uintptr_t)(x); \
} \
)
-/*
- * This looks more complex than it should be. But we need to
- * get the type for the ~ right in round_down (it needs to be
- * as wide as the result!), and we want to evaluate the macro
- * arguments just once each.
- */
-#define __round_mask(x, y) ((__typeof__(x))((y)-1))
-#define round_up(x, y) ((((x)-1) | __round_mask(x, y))+1)
-#define round_down(x, y) ((x) & ~__round_mask(x, y))
-
-#define FIELD_SIZEOF(t, f) (sizeof(((t*)0)->f))
-#define DIV_ROUND_UP __KERNEL_DIV_ROUND_UP
-#define DIV_ROUND_UP_ULL(ll,d) \
- ({ unsigned long long _tmp = (ll)+(d)-1; do_div(_tmp, d); _tmp; })
-
-#if BITS_PER_LONG == 32
-# define DIV_ROUND_UP_SECTOR_T(ll,d) DIV_ROUND_UP_ULL(ll, d)
-#else
-# define DIV_ROUND_UP_SECTOR_T(ll,d) DIV_ROUND_UP(ll,d)
-#endif
-
-/* The `const' in roundup() prevents gcc-3.3 from calling __divdi3 */
-#define roundup(x, y) ( \
-{ \
- const typeof(y) __y = y; \
- (((x) + (__y - 1)) / __y) * __y; \
-} \
-)
-#define rounddown(x, y) ( \
-{ \
- typeof(x) __x = (x); \
- __x - (__x % (y)); \
-} \
-)
-
-/*
- * Divide positive or negative dividend by positive or negative divisor
- * and round to closest integer. Result is undefined for negative
- * divisors if he dividend variable type is unsigned and for negative
- * dividends if the divisor variable type is unsigned.
- */
-#define DIV_ROUND_CLOSEST(x, divisor)( \
-{ \
- typeof(x) __x = x; \
- typeof(divisor) __d = divisor; \
- (((typeof(x))-1) > 0 || \
- ((typeof(divisor))-1) > 0 || \
- (((__x) > 0) == ((__d) > 0))) ? \
- (((__x) + ((__d) / 2)) / (__d)) : \
- (((__x) - ((__d) / 2)) / (__d)); \
-} \
-)
-/*
- * Same as above but for u64 dividends. divisor must be a 32-bit
- * number.
- */
-#define DIV_ROUND_CLOSEST_ULL(x, divisor)( \
-{ \
- typeof(divisor) __d = divisor; \
- unsigned long long _tmp = (x) + (__d) / 2; \
- do_div(_tmp, __d); \
- _tmp; \
-} \
-)
-
-/*
- * Multiplies an integer by a fraction, while avoiding unnecessary
- * overflow or loss of precision.
- */
-#define mult_frac(x, numer, denom)( \
-{ \
- typeof(x) quot = (x) / (denom); \
- typeof(x) rem = (x) % (denom); \
- (quot * (numer)) + ((rem * (numer)) / (denom)); \
-} \
-)
-
-
-#define _RET_IP_ (unsigned long)__builtin_return_address(0)
-#define _THIS_IP_ ({ __label__ __here; __here: (unsigned long)&&__here; })
-
-#ifdef CONFIG_LBDAF
-# include <asm/div64.h>
-# define sector_div(a, b) do_div(a, b)
-#else
-# define sector_div(n, b)( \
-{ \
- int _res; \
- _res = (n) % (b); \
- (n) /= (b); \
- _res; \
-} \
-)
-#endif
-
/**
* upper_32_bits - return bits 32-63 of a number
* @n: the number we're accessing
@@ -175,87 +78,120 @@
* lower_32_bits - return bits 0-31 of a number
* @n: the number we're accessing
*/
-#define lower_32_bits(n) ((u32)(n))
+#define lower_32_bits(n) ((u32)((n) & 0xffffffff))
+
+/**
+ * upper_16_bits - return bits 16-31 of a number
+ * @n: the number we're accessing
+ */
+#define upper_16_bits(n) ((u16)((n) >> 16))
+
+/**
+ * lower_16_bits - return bits 0-15 of a number
+ * @n: the number we're accessing
+ */
+#define lower_16_bits(n) ((u16)((n) & 0xffff))
struct completion;
-struct pt_regs;
struct user;
-#ifdef CONFIG_PREEMPT_VOLUNTARY
-extern int _cond_resched(void);
-# define might_resched() _cond_resched()
+#ifdef CONFIG_PREEMPT_VOLUNTARY_BUILD
+
+extern int __cond_resched(void);
+# define might_resched() __cond_resched()
+
+#elif defined(CONFIG_PREEMPT_DYNAMIC) && defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL)
+
+extern int __cond_resched(void);
+
+DECLARE_STATIC_CALL(might_resched, __cond_resched);
+
+static __always_inline void might_resched(void)
+{
+ static_call_mod(might_resched)();
+}
+
+#elif defined(CONFIG_PREEMPT_DYNAMIC) && defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY)
+
+extern int dynamic_might_resched(void);
+# define might_resched() dynamic_might_resched()
+
#else
+
# define might_resched() do { } while (0)
-#endif
+
+#endif /* CONFIG_PREEMPT_* */
#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
- void ___might_sleep(const char *file, int line, int preempt_offset);
- void __might_sleep(const char *file, int line, int preempt_offset);
+extern void __might_resched(const char *file, int line, unsigned int offsets);
+extern void __might_sleep(const char *file, int line);
+extern void __cant_sleep(const char *file, int line, int preempt_offset);
+extern void __cant_migrate(const char *file, int line);
+
/**
* might_sleep - annotation for functions that can sleep
*
* this macro will print a stack trace if it is executed in an atomic
- * context (spinlock, irq-handler, ...).
+ * context (spinlock, irq-handler, ...). Additional sections where blocking is
+ * not allowed can be annotated with non_block_start() and non_block_end()
+ * pairs.
*
* This is a useful debugging help to be able to catch problems early and not
* be bitten later when the calling function happens to sleep when it is not
* supposed to.
*/
# define might_sleep() \
- do { __might_sleep(__FILE__, __LINE__, 0); might_resched(); } while (0)
+ do { __might_sleep(__FILE__, __LINE__); might_resched(); } while (0)
+/**
+ * cant_sleep - annotation for functions that cannot sleep
+ *
+ * this macro will print a stack trace if it is executed with preemption enabled
+ */
+# define cant_sleep() \
+ do { __cant_sleep(__FILE__, __LINE__, 0); } while (0)
# define sched_annotate_sleep() (current->task_state_change = 0)
-#else
- static inline void ___might_sleep(const char *file, int line,
- int preempt_offset) { }
- static inline void __might_sleep(const char *file, int line,
- int preempt_offset) { }
-# define might_sleep() do { might_resched(); } while (0)
-# define sched_annotate_sleep() do { } while (0)
-#endif
-
-#define might_sleep_if(cond) do { if (cond) might_sleep(); } while (0)
/**
- * abs - return absolute value of an argument
- * @x: the value. If it is unsigned type, it is converted to signed type first.
- * char is treated as if it was signed (regardless of whether it really is)
- * but the macro's return type is preserved as char.
+ * cant_migrate - annotation for functions that cannot migrate
*
- * Return: an absolute value of x.
+ * Will print a stack trace if executed in code which is migratable
*/
-#define abs(x) __abs_choose_expr(x, long long, \
- __abs_choose_expr(x, long, \
- __abs_choose_expr(x, int, \
- __abs_choose_expr(x, short, \
- __abs_choose_expr(x, char, \
- __builtin_choose_expr( \
- __builtin_types_compatible_p(typeof(x), char), \
- (char)({ signed char __x = (x); __x<0?-__x:__x; }), \
- ((void)0)))))))
-
-#define __abs_choose_expr(x, type, other) __builtin_choose_expr( \
- __builtin_types_compatible_p(typeof(x), signed type) || \
- __builtin_types_compatible_p(typeof(x), unsigned type), \
- ({ signed type __x = (x); __x < 0 ? -__x : __x; }), other)
+# define cant_migrate() \
+ do { \
+ if (IS_ENABLED(CONFIG_SMP)) \
+ __cant_migrate(__FILE__, __LINE__); \
+ } while (0)
/**
- * reciprocal_scale - "scale" a value into range [0, ep_ro)
- * @val: value
- * @ep_ro: right open interval endpoint
+ * non_block_start - annotate the start of section where sleeping is prohibited
*
- * Perform a "reciprocal multiplication" in order to "scale" a value into
- * range [0, ep_ro), where the upper interval endpoint is right-open.
- * This is useful, e.g. for accessing a index of an array containing
- * ep_ro elements, for example. Think of it as sort of modulus, only that
- * the result isn't that of modulo. ;) Note that if initial input is a
- * small value, then result will return 0.
+ * This is on behalf of the oom reaper, specifically when it is calling the mmu
+ * notifiers. The problem is that if the notifier were to block on, for example,
+ * mutex_lock() and if the process which holds that mutex were to perform a
+ * sleeping memory allocation, the oom reaper is now blocked on completion of
+ * that memory allocation. Other blocking calls like wait_event() pose similar
+ * issues.
+ */
+# define non_block_start() (current->non_block_count++)
+/**
+ * non_block_end - annotate the end of section where sleeping is prohibited
*
- * Return: a result based on val in interval [0, ep_ro).
+ * Closes a section opened by non_block_start().
*/
-static inline u32 reciprocal_scale(u32 val, u32 ep_ro)
-{
- return (u32)(((u64) val * ep_ro) >> 32);
-}
+# define non_block_end() WARN_ON(current->non_block_count-- == 0)
+#else
+ static inline void __might_resched(const char *file, int line,
+ unsigned int offsets) { }
+static inline void __might_sleep(const char *file, int line) { }
+# define might_sleep() do { might_resched(); } while (0)
+# define cant_sleep() do { } while (0)
+# define cant_migrate() do { } while (0)
+# define sched_annotate_sleep() do { } while (0)
+# define non_block_start() do { } while (0)
+# define non_block_end() do { } while (0)
+#endif
+
+#define might_sleep_if(cond) do { if (cond) might_sleep(); } while (0)
#if defined(CONFIG_MMU) && \
(defined(CONFIG_PROVE_LOCKING) || defined(CONFIG_DEBUG_ATOMIC_SLEEP))
@@ -265,152 +201,10 @@ void __might_fault(const char *file, int line);
static inline void might_fault(void) { }
#endif
-extern struct atomic_notifier_head panic_notifier_list;
-extern long (*panic_blink)(int state);
-__printf(1, 2)
-void panic(const char *fmt, ...) __noreturn __cold;
-void nmi_panic(struct pt_regs *regs, const char *msg);
-extern void oops_enter(void);
-extern void oops_exit(void);
-void print_oops_end_marker(void);
-extern int oops_may_print(void);
void do_exit(long error_code) __noreturn;
-void complete_and_exit(struct completion *, long) __noreturn;
-
-/* Internal, do not use. */
-int __must_check _kstrtoul(const char *s, unsigned int base, unsigned long *res);
-int __must_check _kstrtol(const char *s, unsigned int base, long *res);
-
-int __must_check kstrtoull(const char *s, unsigned int base, unsigned long long *res);
-int __must_check kstrtoll(const char *s, unsigned int base, long long *res);
-
-/**
- * kstrtoul - convert a string to an unsigned long
- * @s: The start of the string. The string must be null-terminated, and may also
- * include a single newline before its terminating null. The first character
- * may also be a plus sign, but not a minus sign.
- * @base: The number base to use. The maximum supported base is 16. If base is
- * given as 0, then the base of the string is automatically detected with the
- * conventional semantics - If it begins with 0x the number will be parsed as a
- * hexadecimal (case insensitive), if it otherwise begins with 0, it will be
- * parsed as an octal number. Otherwise it will be parsed as a decimal.
- * @res: Where to write the result of the conversion on success.
- *
- * Returns 0 on success, -ERANGE on overflow and -EINVAL on parsing error.
- * Used as a replacement for the obsolete simple_strtoull. Return code must
- * be checked.
-*/
-static inline int __must_check kstrtoul(const char *s, unsigned int base, unsigned long *res)
-{
- /*
- * We want to shortcut function call, but
- * __builtin_types_compatible_p(unsigned long, unsigned long long) = 0.
- */
- if (sizeof(unsigned long) == sizeof(unsigned long long) &&
- __alignof__(unsigned long) == __alignof__(unsigned long long))
- return kstrtoull(s, base, (unsigned long long *)res);
- else
- return _kstrtoul(s, base, res);
-}
-
-/**
- * kstrtol - convert a string to a long
- * @s: The start of the string. The string must be null-terminated, and may also
- * include a single newline before its terminating null. The first character
- * may also be a plus sign or a minus sign.
- * @base: The number base to use. The maximum supported base is 16. If base is
- * given as 0, then the base of the string is automatically detected with the
- * conventional semantics - If it begins with 0x the number will be parsed as a
- * hexadecimal (case insensitive), if it otherwise begins with 0, it will be
- * parsed as an octal number. Otherwise it will be parsed as a decimal.
- * @res: Where to write the result of the conversion on success.
- *
- * Returns 0 on success, -ERANGE on overflow and -EINVAL on parsing error.
- * Used as a replacement for the obsolete simple_strtoull. Return code must
- * be checked.
- */
-static inline int __must_check kstrtol(const char *s, unsigned int base, long *res)
-{
- /*
- * We want to shortcut function call, but
- * __builtin_types_compatible_p(long, long long) = 0.
- */
- if (sizeof(long) == sizeof(long long) &&
- __alignof__(long) == __alignof__(long long))
- return kstrtoll(s, base, (long long *)res);
- else
- return _kstrtol(s, base, res);
-}
-
-int __must_check kstrtouint(const char *s, unsigned int base, unsigned int *res);
-int __must_check kstrtoint(const char *s, unsigned int base, int *res);
-
-static inline int __must_check kstrtou64(const char *s, unsigned int base, u64 *res)
-{
- return kstrtoull(s, base, res);
-}
-
-static inline int __must_check kstrtos64(const char *s, unsigned int base, s64 *res)
-{
- return kstrtoll(s, base, res);
-}
-
-static inline int __must_check kstrtou32(const char *s, unsigned int base, u32 *res)
-{
- return kstrtouint(s, base, res);
-}
-
-static inline int __must_check kstrtos32(const char *s, unsigned int base, s32 *res)
-{
- return kstrtoint(s, base, res);
-}
-
-int __must_check kstrtou16(const char *s, unsigned int base, u16 *res);
-int __must_check kstrtos16(const char *s, unsigned int base, s16 *res);
-int __must_check kstrtou8(const char *s, unsigned int base, u8 *res);
-int __must_check kstrtos8(const char *s, unsigned int base, s8 *res);
-int __must_check kstrtobool(const char *s, bool *res);
-
-int __must_check kstrtoull_from_user(const char __user *s, size_t count, unsigned int base, unsigned long long *res);
-int __must_check kstrtoll_from_user(const char __user *s, size_t count, unsigned int base, long long *res);
-int __must_check kstrtoul_from_user(const char __user *s, size_t count, unsigned int base, unsigned long *res);
-int __must_check kstrtol_from_user(const char __user *s, size_t count, unsigned int base, long *res);
-int __must_check kstrtouint_from_user(const char __user *s, size_t count, unsigned int base, unsigned int *res);
-int __must_check kstrtoint_from_user(const char __user *s, size_t count, unsigned int base, int *res);
-int __must_check kstrtou16_from_user(const char __user *s, size_t count, unsigned int base, u16 *res);
-int __must_check kstrtos16_from_user(const char __user *s, size_t count, unsigned int base, s16 *res);
-int __must_check kstrtou8_from_user(const char __user *s, size_t count, unsigned int base, u8 *res);
-int __must_check kstrtos8_from_user(const char __user *s, size_t count, unsigned int base, s8 *res);
-int __must_check kstrtobool_from_user(const char __user *s, size_t count, bool *res);
-
-static inline int __must_check kstrtou64_from_user(const char __user *s, size_t count, unsigned int base, u64 *res)
-{
- return kstrtoull_from_user(s, count, base, res);
-}
-
-static inline int __must_check kstrtos64_from_user(const char __user *s, size_t count, unsigned int base, s64 *res)
-{
- return kstrtoll_from_user(s, count, base, res);
-}
-
-static inline int __must_check kstrtou32_from_user(const char __user *s, size_t count, unsigned int base, u32 *res)
-{
- return kstrtouint_from_user(s, count, base, res);
-}
-
-static inline int __must_check kstrtos32_from_user(const char __user *s, size_t count, unsigned int base, s32 *res)
-{
- return kstrtoint_from_user(s, count, base, res);
-}
-
-/* Obsolete, do not use. Use kstrto<foo> instead */
-extern unsigned long simple_strtoul(const char *,char **,unsigned int);
-extern long simple_strtol(const char *,char **,unsigned int);
-extern unsigned long long simple_strtoull(const char *,char **,unsigned int);
-extern long long simple_strtoll(const char *,char **,unsigned int);
-
-extern int num_to_str(char *buf, int size, unsigned long long num);
+extern int num_to_str(char *buf, int size,
+ unsigned long long num, unsigned int width);
/* lib/printf utilities */
@@ -436,6 +230,8 @@ int sscanf(const char *, const char *, ...);
extern __scanf(2, 0)
int vsscanf(const char *, const char *, va_list);
+extern int no_hash_pointers_enable(char *str);
+
extern int get_option(char **str, int *pint);
extern char *get_options(const char *str, int nints, int *ints);
extern unsigned long long memparse(const char *ptr, char **retptr);
@@ -443,50 +239,12 @@ extern bool parse_option_str(const char *str, const char *option);
extern char *next_arg(char *args, char **param, char **val);
extern int core_kernel_text(unsigned long addr);
-extern int core_kernel_data(unsigned long addr);
extern int __kernel_text_address(unsigned long addr);
extern int kernel_text_address(unsigned long addr);
extern int func_ptr_is_kernel_text(void *ptr);
-unsigned long int_sqrt(unsigned long);
-
extern void bust_spinlocks(int yes);
-extern int oops_in_progress; /* If set, an oops, panic(), BUG() or die() is in progress */
-extern int panic_timeout;
-extern int panic_on_oops;
-extern int panic_on_unrecovered_nmi;
-extern int panic_on_io_nmi;
-extern int panic_on_warn;
-extern int sysctl_panic_on_rcu_stall;
-extern int sysctl_panic_on_stackoverflow;
-extern bool crash_kexec_post_notifiers;
-
-/*
- * panic_cpu is used for synchronizing panic() and crash_kexec() execution. It
- * holds a CPU number which is executing panic() currently. A value of
- * PANIC_CPU_INVALID means no CPU has entered panic() or crash_kexec().
- */
-extern atomic_t panic_cpu;
-#define PANIC_CPU_INVALID -1
-
-/*
- * Only to be used by arch init code. If the user over-wrote the default
- * CONFIG_PANIC_TIMEOUT, honor it.
- */
-static inline void set_arch_panic_timeout(int timeout, int arch_default_timeout)
-{
- if (panic_timeout == arch_default_timeout)
- panic_timeout = timeout;
-}
-extern const char *print_tainted(void);
-enum lockdep_ok {
- LOCKDEP_STILL_OK,
- LOCKDEP_NOW_UNRELIABLE
-};
-extern void add_taint(unsigned flag, enum lockdep_ok);
-extern int test_taint(unsigned flag);
-extern unsigned long get_taint(void);
extern int root_mountflags;
extern bool early_boot_irqs_disabled;
@@ -498,73 +256,21 @@ extern bool early_boot_irqs_disabled;
extern enum system_states {
SYSTEM_BOOTING,
SYSTEM_SCHEDULING,
+ SYSTEM_FREEING_INITMEM,
SYSTEM_RUNNING,
SYSTEM_HALT,
SYSTEM_POWER_OFF,
SYSTEM_RESTART,
+ SYSTEM_SUSPEND,
} system_state;
-#define TAINT_PROPRIETARY_MODULE 0
-#define TAINT_FORCED_MODULE 1
-#define TAINT_CPU_OUT_OF_SPEC 2
-#define TAINT_FORCED_RMMOD 3
-#define TAINT_MACHINE_CHECK 4
-#define TAINT_BAD_PAGE 5
-#define TAINT_USER 6
-#define TAINT_DIE 7
-#define TAINT_OVERRIDDEN_ACPI_TABLE 8
-#define TAINT_WARN 9
-#define TAINT_CRAP 10
-#define TAINT_FIRMWARE_WORKAROUND 11
-#define TAINT_OOT_MODULE 12
-#define TAINT_UNSIGNED_MODULE 13
-#define TAINT_SOFTLOCKUP 14
-#define TAINT_LIVEPATCH 15
-#define TAINT_FLAGS_COUNT 16
-
-struct taint_flag {
- char c_true; /* character printed when tainted */
- char c_false; /* character printed when not tainted */
- bool module; /* also show as a per-module taint flag */
-};
-
-extern const struct taint_flag taint_flags[TAINT_FLAGS_COUNT];
-
-extern const char hex_asc[];
-#define hex_asc_lo(x) hex_asc[((x) & 0x0f)]
-#define hex_asc_hi(x) hex_asc[((x) & 0xf0) >> 4]
-
-static inline char *hex_byte_pack(char *buf, u8 byte)
-{
- *buf++ = hex_asc_hi(byte);
- *buf++ = hex_asc_lo(byte);
- return buf;
-}
-
-extern const char hex_asc_upper[];
-#define hex_asc_upper_lo(x) hex_asc_upper[((x) & 0x0f)]
-#define hex_asc_upper_hi(x) hex_asc_upper[((x) & 0xf0) >> 4]
-
-static inline char *hex_byte_pack_upper(char *buf, u8 byte)
-{
- *buf++ = hex_asc_upper_hi(byte);
- *buf++ = hex_asc_upper_lo(byte);
- return buf;
-}
-
-extern int hex_to_bin(char ch);
-extern int __must_check hex2bin(u8 *dst, const char *src, size_t count);
-extern char *bin2hex(char *dst, const void *src, size_t count);
-
-bool mac_pton(const char *s, u8 *mac);
-
/*
* General tracing related utility functions - trace_printk(),
* tracing_on/tracing_off and tracing_start()/tracing_stop
*
* Use tracing_on/tracing_off when you want to quickly turn on or off
* tracing. It simply enables or disables the recording of the trace events.
- * This also corresponds to the user space /sys/kernel/debug/tracing/tracing_on
+ * This also corresponds to the user space /sys/kernel/tracing/tracing_on
* file, which gives a means for the kernel and userspace to interact.
* Place a tracing_off() in the kernel where you want tracing to end.
* From user space, examine the trace, and then echo 1 > tracing_on
@@ -608,8 +314,8 @@ do { \
* trace_printk - printf formatting in the ftrace buffer
* @fmt: the printf format for printing
*
- * Note: __trace_printk is an internal function for trace_printk and
- * the @ip is passed in via the trace_printk macro.
+ * Note: __trace_printk is an internal function for trace_printk() and
+ * the @ip is passed in via the trace_printk() macro.
*
* This function allows a kernel developer to debug fast path sections
* that printk is not appropriate for. By scattering in various
@@ -619,9 +325,9 @@ do { \
* This is intended as a debugging tool for the developer only.
* Please refrain from leaving trace_printks scattered around in
* your code. (Extra memory is used for special buffers that are
- * allocated when trace_printk() is used)
+ * allocated when trace_printk() is used.)
*
- * A little optization trick is done here. If there's only one
+ * A little optimization trick is done here. If there's only one
* argument, there's no need to scan the string for printf formats.
* The trace_puts() will suffice. But how can we take advantage of
* using trace_puts() when trace_printk() has only one argument?
@@ -646,7 +352,7 @@ do { \
#define do_trace_printk(fmt, args...) \
do { \
static const char *trace_printk_fmt __used \
- __attribute__((section("__trace_printk_fmt"))) = \
+ __section("__trace_printk_fmt") = \
__builtin_constant_p(fmt) ? fmt : NULL; \
\
__trace_printk_check_format(fmt, ##args); \
@@ -671,7 +377,7 @@ int __trace_printk(unsigned long ip, const char *fmt, ...);
* the @ip is passed in via the trace_puts macro.
*
* This is similar to trace_printk() but is made for those really fast
- * paths that a developer wants the least amount of "Heisenbug" affects,
+ * paths that a developer wants the least amount of "Heisenbug" effects,
* where the processing of the print format is still too much.
*
* This function allows a kernel developer to debug fast path sections
@@ -682,7 +388,7 @@ int __trace_printk(unsigned long ip, const char *fmt, ...);
* This is intended as a debugging tool for the developer only.
* Please refrain from leaving trace_puts scattered around in
* your code. (Extra memory is used for special buffers that are
- * allocated when trace_puts() is used)
+ * allocated when trace_puts() is used.)
*
* Returns: 0 if nothing was written, positive # if string was.
* (1 when __trace_bputs is used, strlen(str) when __trace_puts is used)
@@ -690,7 +396,7 @@ int __trace_printk(unsigned long ip, const char *fmt, ...);
#define trace_puts(str) ({ \
static const char *trace_printk_fmt __used \
- __attribute__((section("__trace_printk_fmt"))) = \
+ __section("__trace_printk_fmt") = \
__builtin_constant_p(str) ? str : NULL; \
\
if (__builtin_constant_p(str)) \
@@ -712,7 +418,7 @@ extern void trace_dump_stack(int skip);
do { \
if (__builtin_constant_p(fmt)) { \
static const char *trace_printk_fmt __used \
- __attribute__((section("__trace_printk_fmt"))) = \
+ __section("__trace_printk_fmt") = \
__builtin_constant_p(fmt) ? fmt : NULL; \
\
__ftrace_vbprintk(_THIS_IP_, trace_printk_fmt, vargs); \
@@ -751,116 +457,12 @@ ftrace_vprintk(const char *fmt, va_list ap)
static inline void ftrace_dump(enum ftrace_dump_mode oops_dump_mode) { }
#endif /* CONFIG_TRACING */
-/*
- * min()/max()/clamp() macros that also do
- * strict type-checking.. See the
- * "unnecessary" pointer comparison.
- */
-#define __min(t1, t2, min1, min2, x, y) ({ \
- t1 min1 = (x); \
- t2 min2 = (y); \
- (void) (&min1 == &min2); \
- min1 < min2 ? min1 : min2; })
-#define min(x, y) \
- __min(typeof(x), typeof(y), \
- __UNIQUE_ID(min1_), __UNIQUE_ID(min2_), \
- x, y)
-
-#define __max(t1, t2, max1, max2, x, y) ({ \
- t1 max1 = (x); \
- t2 max2 = (y); \
- (void) (&max1 == &max2); \
- max1 > max2 ? max1 : max2; })
-#define max(x, y) \
- __max(typeof(x), typeof(y), \
- __UNIQUE_ID(max1_), __UNIQUE_ID(max2_), \
- x, y)
-
-#define min3(x, y, z) min((typeof(x))min(x, y), z)
-#define max3(x, y, z) max((typeof(x))max(x, y), z)
+/* This counts to 12. Any more, it will return 13th argument. */
+#define __COUNT_ARGS(_0, _1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11, _12, _n, X...) _n
+#define COUNT_ARGS(X...) __COUNT_ARGS(, ##X, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0)
-/**
- * min_not_zero - return the minimum that is _not_ zero, unless both are zero
- * @x: value1
- * @y: value2
- */
-#define min_not_zero(x, y) ({ \
- typeof(x) __x = (x); \
- typeof(y) __y = (y); \
- __x == 0 ? __y : ((__y == 0) ? __x : min(__x, __y)); })
-
-/**
- * clamp - return a value clamped to a given range with strict typechecking
- * @val: current value
- * @lo: lowest allowable value
- * @hi: highest allowable value
- *
- * This macro does strict typechecking of lo/hi to make sure they are of the
- * same type as val. See the unnecessary pointer comparisons.
- */
-#define clamp(val, lo, hi) min((typeof(val))max(val, lo), hi)
-
-/*
- * ..and if you can't take the strict
- * types, you can specify one yourself.
- *
- * Or not use min/max/clamp at all, of course.
- */
-#define min_t(type, x, y) \
- __min(type, type, \
- __UNIQUE_ID(min1_), __UNIQUE_ID(min2_), \
- x, y)
-
-#define max_t(type, x, y) \
- __max(type, type, \
- __UNIQUE_ID(min1_), __UNIQUE_ID(min2_), \
- x, y)
-
-/**
- * clamp_t - return a value clamped to a given range using a given type
- * @type: the type of variable to use
- * @val: current value
- * @lo: minimum allowable value
- * @hi: maximum allowable value
- *
- * This macro does no typechecking and uses temporary variables of type
- * 'type' to make all the comparisons.
- */
-#define clamp_t(type, val, lo, hi) min_t(type, max_t(type, val, lo), hi)
-
-/**
- * clamp_val - return a value clamped to a given range using val's type
- * @val: current value
- * @lo: minimum allowable value
- * @hi: maximum allowable value
- *
- * This macro does no typechecking and uses temporary variables of whatever
- * type the input argument 'val' is. This is useful when val is an unsigned
- * type and min and max are literals that will otherwise be assigned a signed
- * integer type.
- */
-#define clamp_val(val, lo, hi) clamp_t(typeof(val), val, lo, hi)
-
-
-/*
- * swap - swap value of @a and @b
- */
-#define swap(a, b) \
- do { typeof(a) __tmp = (a); (a) = (b); (b) = __tmp; } while (0)
-
-/**
- * container_of - cast a member of a structure out to the containing structure
- * @ptr: the pointer to the member.
- * @type: the type of the container struct this is embedded in.
- * @member: the name of the member within the struct.
- *
- */
-#define container_of(ptr, type, member) ({ \
- void *__mptr = (void *)(ptr); \
- BUILD_BUG_ON_MSG(!__same_type(*(ptr), ((type *)0)->member) && \
- !__same_type(*(ptr), void), \
- "pointer type mismatch in container_of()"); \
- ((type *)(__mptr - offsetof(type, member))); })
+#define __CONCAT(a, b) a ## b
+#define CONCATENATE(a, b) __CONCAT(a, b)
/* Rebuild everything on CONFIG_FTRACE_MCOUNT_RECORD */
#ifdef CONFIG_FTRACE_MCOUNT_RECORD
diff --git a/include/linux/kernel_read_file.h b/include/linux/kernel_read_file.h
new file mode 100644
index 000000000000..90451e2e12bd
--- /dev/null
+++ b/include/linux/kernel_read_file.h
@@ -0,0 +1,55 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_KERNEL_READ_FILE_H
+#define _LINUX_KERNEL_READ_FILE_H
+
+#include <linux/file.h>
+#include <linux/types.h>
+
+/* This is a list of *what* is being read, not *how* nor *where*. */
+#define __kernel_read_file_id(id) \
+ id(UNKNOWN, unknown) \
+ id(FIRMWARE, firmware) \
+ id(MODULE, kernel-module) \
+ id(KEXEC_IMAGE, kexec-image) \
+ id(KEXEC_INITRAMFS, kexec-initramfs) \
+ id(POLICY, security-policy) \
+ id(X509_CERTIFICATE, x509-certificate) \
+ id(MAX_ID, )
+
+#define __fid_enumify(ENUM, dummy) READING_ ## ENUM,
+#define __fid_stringify(dummy, str) #str,
+
+enum kernel_read_file_id {
+ __kernel_read_file_id(__fid_enumify)
+};
+
+static const char * const kernel_read_file_str[] = {
+ __kernel_read_file_id(__fid_stringify)
+};
+
+static inline const char *kernel_read_file_id_str(enum kernel_read_file_id id)
+{
+ if ((unsigned int)id >= READING_MAX_ID)
+ return kernel_read_file_str[READING_UNKNOWN];
+
+ return kernel_read_file_str[id];
+}
+
+ssize_t kernel_read_file(struct file *file, loff_t offset,
+ void **buf, size_t buf_size,
+ size_t *file_size,
+ enum kernel_read_file_id id);
+ssize_t kernel_read_file_from_path(const char *path, loff_t offset,
+ void **buf, size_t buf_size,
+ size_t *file_size,
+ enum kernel_read_file_id id);
+ssize_t kernel_read_file_from_path_initns(const char *path, loff_t offset,
+ void **buf, size_t buf_size,
+ size_t *file_size,
+ enum kernel_read_file_id id);
+ssize_t kernel_read_file_from_fd(int fd, loff_t offset,
+ void **buf, size_t buf_size,
+ size_t *file_size,
+ enum kernel_read_file_id id);
+
+#endif /* _LINUX_KERNEL_READ_FILE_H */
diff --git a/include/linux/kernel_stat.h b/include/linux/kernel_stat.h
index 66be8b6beceb..9935f7ecbfb9 100644
--- a/include/linux/kernel_stat.h
+++ b/include/linux/kernel_stat.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_KERNEL_STAT_H
#define _LINUX_KERNEL_STAT_H
@@ -27,6 +28,9 @@ enum cpu_usage_stat {
CPUTIME_STEAL,
CPUTIME_GUEST,
CPUTIME_GUEST_NICE,
+#ifdef CONFIG_SCHED_CORE
+ CPUTIME_FORCEIDLE,
+#endif
NR_STATS,
};
@@ -48,6 +52,7 @@ DECLARE_PER_CPU(struct kernel_cpustat, kernel_cpustat);
#define kstat_cpu(cpu) per_cpu(kstat, cpu)
#define kcpustat_cpu(cpu) per_cpu(kernel_cpustat, cpu)
+extern unsigned long long nr_context_switches_cpu(int cpu);
extern unsigned long long nr_context_switches(void);
extern unsigned int kstat_irqs_cpu(unsigned int irq, int cpu);
@@ -63,20 +68,48 @@ static inline unsigned int kstat_softirqs_cpu(unsigned int irq, int cpu)
return kstat_cpu(cpu).softirqs[irq];
}
+static inline unsigned int kstat_cpu_softirqs_sum(int cpu)
+{
+ int i;
+ unsigned int sum = 0;
+
+ for (i = 0; i < NR_SOFTIRQS; i++)
+ sum += kstat_softirqs_cpu(i, cpu);
+
+ return sum;
+}
+
/*
* Number of interrupts per specific IRQ source, since bootup
*/
-extern unsigned int kstat_irqs(unsigned int irq);
extern unsigned int kstat_irqs_usr(unsigned int irq);
/*
* Number of interrupts per cpu, since bootup
*/
-static inline unsigned int kstat_cpu_irqs_sum(unsigned int cpu)
+static inline unsigned long kstat_cpu_irqs_sum(unsigned int cpu)
{
return kstat_cpu(cpu).irqs_sum;
}
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
+extern u64 kcpustat_field(struct kernel_cpustat *kcpustat,
+ enum cpu_usage_stat usage, int cpu);
+extern void kcpustat_cpu_fetch(struct kernel_cpustat *dst, int cpu);
+#else
+static inline u64 kcpustat_field(struct kernel_cpustat *kcpustat,
+ enum cpu_usage_stat usage, int cpu)
+{
+ return kcpustat->cpustat[usage];
+}
+
+static inline void kcpustat_cpu_fetch(struct kernel_cpustat *dst, int cpu)
+{
+ *dst = kcpustat_cpu(cpu);
+}
+
+#endif
+
extern void account_user_time(struct task_struct *, u64);
extern void account_guest_time(struct task_struct *, u64);
extern void account_system_time(struct task_struct *, int, u64);
@@ -84,6 +117,7 @@ extern void account_system_index_time(struct task_struct *, u64,
enum cpu_usage_stat);
extern void account_steal_time(u64);
extern void account_idle_time(u64);
+extern u64 get_idle_time(struct kernel_cpustat *kcs, int cpu);
#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
static inline void account_process_tick(struct task_struct *tsk, int user)
@@ -96,4 +130,8 @@ extern void account_process_tick(struct task_struct *, int user);
extern void account_idle_ticks(unsigned long ticks);
+#ifdef CONFIG_SCHED_CORE
+extern void __account_forceidle_time(struct task_struct *tsk, u64 delta);
+#endif
+
#endif /* _LINUX_KERNEL_STAT_H */
diff --git a/include/linux/kernelcapi.h b/include/linux/kernelcapi.h
index e985ba679c4a..94ba42bf9da1 100644
--- a/include/linux/kernelcapi.h
+++ b/include/linux/kernelcapi.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* $Id: kernelcapi.h,v 1.8.6.2 2001/02/07 11:31:31 kai Exp $
*
@@ -9,46 +10,12 @@
#ifndef __KERNELCAPI_H__
#define __KERNELCAPI_H__
-
#include <linux/list.h>
#include <linux/skbuff.h>
#include <linux/workqueue.h>
#include <linux/notifier.h>
#include <uapi/linux/kernelcapi.h>
-struct capi20_appl {
- u16 applid;
- capi_register_params rparam;
- void (*recv_message)(struct capi20_appl *ap, struct sk_buff *skb);
- void *private;
-
- /* internal to kernelcapi.o */
- unsigned long nrecvctlpkt;
- unsigned long nrecvdatapkt;
- unsigned long nsentctlpkt;
- unsigned long nsentdatapkt;
- struct mutex recv_mtx;
- struct sk_buff_head recv_queue;
- struct work_struct recv_work;
- int release_in_progress;
-};
-
-u16 capi20_isinstalled(void);
-u16 capi20_register(struct capi20_appl *ap);
-u16 capi20_release(struct capi20_appl *ap);
-u16 capi20_put_message(struct capi20_appl *ap, struct sk_buff *skb);
-u16 capi20_get_manufacturer(u32 contr, u8 buf[CAPI_MANUFACTURER_LEN]);
-u16 capi20_get_version(u32 contr, struct capi_version *verp);
-u16 capi20_get_serial(u32 contr, u8 serial[CAPI_SERIAL_LEN]);
-u16 capi20_get_profile(u32 contr, struct capi_profile *profp);
-int capi20_manufacturer(unsigned long cmd, void __user *data);
-
-#define CAPICTR_UP 0
-#define CAPICTR_DOWN 1
-
-int register_capictr_notifier(struct notifier_block *nb);
-int unregister_capictr_notifier(struct notifier_block *nb);
-
#define CAPI_NOERROR 0x0000
#define CAPI_TOOMANYAPPLS 0x1001
@@ -75,45 +42,4 @@ int unregister_capictr_notifier(struct notifier_block *nb);
#define CAPI_MSGCTRLERNOTSUPPORTEXTEQUIP 0x110a
#define CAPI_MSGCTRLERONLYSUPPORTEXTEQUIP 0x110b
-typedef enum {
- CapiMessageNotSupportedInCurrentState = 0x2001,
- CapiIllContrPlciNcci = 0x2002,
- CapiNoPlciAvailable = 0x2003,
- CapiNoNcciAvailable = 0x2004,
- CapiNoListenResourcesAvailable = 0x2005,
- CapiNoFaxResourcesAvailable = 0x2006,
- CapiIllMessageParmCoding = 0x2007,
-} RESOURCE_CODING_PROBLEM;
-
-typedef enum {
- CapiB1ProtocolNotSupported = 0x3001,
- CapiB2ProtocolNotSupported = 0x3002,
- CapiB3ProtocolNotSupported = 0x3003,
- CapiB1ProtocolParameterNotSupported = 0x3004,
- CapiB2ProtocolParameterNotSupported = 0x3005,
- CapiB3ProtocolParameterNotSupported = 0x3006,
- CapiBProtocolCombinationNotSupported = 0x3007,
- CapiNcpiNotSupported = 0x3008,
- CapiCipValueUnknown = 0x3009,
- CapiFlagsNotSupported = 0x300a,
- CapiFacilityNotSupported = 0x300b,
- CapiDataLengthNotSupportedByCurrentProtocol = 0x300c,
- CapiResetProcedureNotSupportedByCurrentProtocol = 0x300d,
- CapiTeiAssignmentFailed = 0x300e,
-} REQUESTED_SERVICES_PROBLEM;
-
-typedef enum {
- CapiSuccess = 0x0000,
- CapiSupplementaryServiceNotSupported = 0x300e,
- CapiRequestNotAllowedInThisState = 0x3010,
-} SUPPLEMENTARY_SERVICE_INFO;
-
-typedef enum {
- CapiProtocolErrorLayer1 = 0x3301,
- CapiProtocolErrorLayer2 = 0x3302,
- CapiProtocolErrorLayer3 = 0x3303,
- CapiTimeOut = 0x3303, // SuppServiceReason
- CapiCallGivenToOtherApplication = 0x3304,
-} CAPI_REASON;
-
#endif /* __KERNELCAPI_H__ */
diff --git a/include/linux/kernfs.h b/include/linux/kernfs.h
index a9b11b8d06f2..73f5c120def8 100644
--- a/include/linux/kernfs.h
+++ b/include/linux/kernfs.h
@@ -1,13 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* kernfs.h - pseudo filesystem decoupled from vfs locking
- *
- * This file is released under the GPLv2.
*/
#ifndef __LINUX_KERNFS_H
#define __LINUX_KERNFS_H
-#include <linux/kernel.h>
#include <linux/err.h>
#include <linux/list.h>
#include <linux/mutex.h>
@@ -15,27 +13,94 @@
#include <linux/lockdep.h>
#include <linux/rbtree.h>
#include <linux/atomic.h>
+#include <linux/bug.h>
+#include <linux/types.h>
+#include <linux/uidgid.h>
#include <linux/wait.h>
+#include <linux/rwsem.h>
+#include <linux/cache.h>
struct file;
struct dentry;
struct iattr;
struct seq_file;
struct vm_area_struct;
+struct vm_operations_struct;
struct super_block;
struct file_system_type;
+struct poll_table_struct;
+struct fs_context;
+struct kernfs_fs_context;
struct kernfs_open_node;
struct kernfs_iattrs;
+/*
+ * NR_KERNFS_LOCK_BITS determines size (NR_KERNFS_LOCKS) of hash
+ * table of locks.
+ * Having a small hash table would impact scalability, since
+ * more and more kernfs_node objects will end up using same lock
+ * and having a very large hash table would waste memory.
+ *
+ * At the moment size of hash table of locks is being set based on
+ * the number of CPUs as follows:
+ *
+ * NR_CPU NR_KERNFS_LOCK_BITS NR_KERNFS_LOCKS
+ * 1 1 2
+ * 2-3 2 4
+ * 4-7 4 16
+ * 8-15 6 64
+ * 16-31 8 256
+ * 32 and more 10 1024
+ *
+ * The above relation between NR_CPU and number of locks is based
+ * on some internal experimentation which involved booting qemu
+ * with different values of smp, performing some sysfs operations
+ * on all CPUs and observing how increase in number of locks impacts
+ * completion time of these sysfs operations on each CPU.
+ */
+#ifdef CONFIG_SMP
+#define NR_KERNFS_LOCK_BITS (2 * (ilog2(NR_CPUS < 32 ? NR_CPUS : 32)))
+#else
+#define NR_KERNFS_LOCK_BITS 1
+#endif
+
+#define NR_KERNFS_LOCKS (1 << NR_KERNFS_LOCK_BITS)
+
+/*
+ * There's one kernfs_open_file for each open file and one kernfs_open_node
+ * for each kernfs_node with one or more open files.
+ *
+ * filp->private_data points to seq_file whose ->private points to
+ * kernfs_open_file.
+ *
+ * kernfs_open_files are chained at kernfs_open_node->files, which is
+ * protected by kernfs_global_locks.open_file_mutex[i].
+ *
+ * To reduce possible contention in sysfs access, arising due to single
+ * locks, use an array of locks (e.g. open_file_mutex) and use kernfs_node
+ * object address as hash keys to get the index of these locks.
+ *
+ * Hashed mutexes are safe to use here because operations using these don't
+ * rely on global exclusion.
+ *
+ * In future we intend to replace other global locks with hashed ones as well.
+ * kernfs_global_locks acts as a holder for all such hash tables.
+ */
+struct kernfs_global_locks {
+ struct mutex open_file_mutex[NR_KERNFS_LOCKS];
+};
+
enum kernfs_node_type {
KERNFS_DIR = 0x0001,
KERNFS_FILE = 0x0002,
KERNFS_LINK = 0x0004,
};
-#define KERNFS_TYPE_MASK 0x000f
-#define KERNFS_FLAG_MASK ~KERNFS_TYPE_MASK
+#define KERNFS_TYPE_MASK 0x000f
+#define KERNFS_FLAG_MASK ~KERNFS_TYPE_MASK
+#define KERNFS_MAX_USER_XATTRS 128
+#define KERNFS_USER_XATTR_SIZE_LIMIT (128 << 10)
enum kernfs_node_flag {
KERNFS_ACTIVATED = 0x0010,
@@ -43,10 +108,12 @@ enum kernfs_node_flag {
KERNFS_HAS_SEQ_SHOW = 0x0040,
KERNFS_HAS_MMAP = 0x0080,
KERNFS_LOCKDEP = 0x0100,
+ KERNFS_HIDDEN = 0x0200,
KERNFS_SUICIDAL = 0x0400,
KERNFS_SUICIDED = 0x0800,
KERNFS_EMPTY_DIR = 0x1000,
KERNFS_HAS_RELEASE = 0x2000,
+ KERNFS_REMOVING = 0x4000,
};
/* @flags for kernfs_create_root() */
@@ -60,7 +127,7 @@ enum kernfs_root_flag {
KERNFS_ROOT_CREATE_DEACTIVATED = 0x0001,
/*
- * For regular flies, if the opener has CAP_DAC_OVERRIDE, open(2)
+ * For regular files, if the opener has CAP_DAC_OVERRIDE, open(2)
* succeeds regardless of the RW permissions. sysfs had an extra
* layer of enforcement where open(2) fails with -EACCES regardless
* of CAP_DAC_OVERRIDE if the permission doesn't have the
@@ -69,6 +136,17 @@ enum kernfs_root_flag {
* following flag enables that behavior.
*/
KERNFS_ROOT_EXTRA_OPEN_PERM_CHECK = 0x0002,
+
+ /*
+ * The filesystem supports exportfs operation, so userspace can use
+ * fhandle to access nodes of the fs.
+ */
+ KERNFS_ROOT_SUPPORT_EXPORTOP = 0x0004,
+
+ /*
+ * Support user xattrs to be written to nodes rooted at this root.
+ */
+ KERNFS_ROOT_SUPPORT_USER_XATTR = 0x0008,
};
/* type-specific structures for kernfs_node union members */
@@ -82,6 +160,11 @@ struct kernfs_elem_dir {
* better directly in kernfs_node but is here to save space.
*/
struct kernfs_root *root;
+ /*
+ * Monotonic revision counter, used to identify if a directory
+ * node has changed during negative dentry revalidation.
+ */
+ unsigned long rev;
};
struct kernfs_elem_symlink {
@@ -90,7 +173,7 @@ struct kernfs_elem_symlink {
struct kernfs_elem_attr {
const struct kernfs_ops *ops;
- struct kernfs_open_node *open;
+ struct kernfs_open_node __rcu *open;
loff_t size;
struct kernfs_node *notify_next; /* for kernfs_notify() */
};
@@ -100,7 +183,7 @@ struct kernfs_elem_attr {
* kernfs node is represented by single kernfs_node. Most fields are
* private to kernfs and shouldn't be accessed directly by kernfs users.
*
- * As long as s_count reference is held, the kernfs_node itself is
+ * As long as count reference is held, the kernfs_node itself is
* accessible. Dereferencing elem or any other outer entity requires
* active reference.
*/
@@ -131,9 +214,14 @@ struct kernfs_node {
void *priv;
+ /*
+ * 64bit unique ID. On 64bit ino setups, id is the ino. On 32bit,
+ * the low 32bits are ino and upper generation.
+ */
+ u64 id;
+
unsigned short flags;
umode_t mode;
- unsigned int ino;
struct kernfs_iattrs *iattr;
};
@@ -145,7 +233,6 @@ struct kernfs_node {
* kernfs_node parameter.
*/
struct kernfs_syscall_ops {
- int (*remount_fs)(struct kernfs_root *root, int *flags, char *data);
int (*show_options)(struct seq_file *sf, struct kernfs_root *root);
int (*mkdir)(struct kernfs_node *parent, const char *name,
@@ -157,20 +244,7 @@ struct kernfs_syscall_ops {
struct kernfs_root *root);
};
-struct kernfs_root {
- /* published fields */
- struct kernfs_node *kn;
- unsigned int flags; /* KERNFS_ROOT_* flags */
-
- /* private fields, do not use outside kernfs proper */
- struct ida ino_ida;
- struct kernfs_syscall_ops *syscall_ops;
-
- /* list of kernfs_super_info of this root, protected by kernfs_mutex */
- struct list_head supers;
-
- wait_queue_head_t deactivate_waitq;
-};
+struct kernfs_node *kernfs_root_to_node(struct kernfs_root *root);
struct kernfs_open_file {
/* published fields */
@@ -238,11 +312,22 @@ struct kernfs_ops {
ssize_t (*write)(struct kernfs_open_file *of, char *buf, size_t bytes,
loff_t off);
+ __poll_t (*poll)(struct kernfs_open_file *of,
+ struct poll_table_struct *pt);
+
int (*mmap)(struct kernfs_open_file *of, struct vm_area_struct *vma);
+};
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
- struct lock_class_key lockdep_key;
-#endif
+/*
+ * The kernfs superblock creation/mount parameter context.
+ */
+struct kernfs_fs_context {
+ struct kernfs_root *root; /* Root of the hierarchy being mounted */
+ void *ns_tag; /* Namespace tag of the mount (or NULL) */
+ unsigned long magic; /* File system specific magic number */
+
+ /* The following are set/used by kernfs_mount() */
+ bool new_sb_created; /* Set to T if we allocated a new sb */
};
#ifdef CONFIG_KERNFS
@@ -252,6 +337,34 @@ static inline enum kernfs_node_type kernfs_type(struct kernfs_node *kn)
return kn->flags & KERNFS_TYPE_MASK;
}
+static inline ino_t kernfs_id_ino(u64 id)
+{
+ /* id is ino if ino_t is 64bit; otherwise, low 32bits */
+ if (sizeof(ino_t) >= sizeof(u64))
+ return id;
+ else
+ return (u32)id;
+}
+
+static inline u32 kernfs_id_gen(u64 id)
+{
+ /* gen is fixed at 1 if ino_t is 64bit; otherwise, high 32bits */
+ if (sizeof(ino_t) >= sizeof(u64))
+ return 1;
+ else
+ return id >> 32;
+}
+
+static inline ino_t kernfs_ino(struct kernfs_node *kn)
+{
+ return kernfs_id_ino(kn->id);
+}
+
+static inline ino_t kernfs_gen(struct kernfs_node *kn)
+{
+ return kernfs_id_gen(kn->id);
+}
+
/**
* kernfs_enable_ns - enable namespace under a directory
* @kn: directory of interest, should be empty
@@ -303,12 +416,14 @@ void kernfs_destroy_root(struct kernfs_root *root);
struct kernfs_node *kernfs_create_dir_ns(struct kernfs_node *parent,
const char *name, umode_t mode,
+ kuid_t uid, kgid_t gid,
void *priv, const void *ns);
struct kernfs_node *kernfs_create_empty_dir(struct kernfs_node *parent,
const char *name);
struct kernfs_node *__kernfs_create_file(struct kernfs_node *parent,
- const char *name,
- umode_t mode, loff_t size,
+ const char *name, umode_t mode,
+ kuid_t uid, kgid_t gid,
+ loff_t size,
const struct kernfs_ops *ops,
void *priv, const void *ns,
struct lock_class_key *key);
@@ -316,6 +431,7 @@ struct kernfs_node *kernfs_create_link(struct kernfs_node *parent,
const char *name,
struct kernfs_node *target);
void kernfs_activate(struct kernfs_node *kn);
+void kernfs_show(struct kernfs_node *kn, bool show);
void kernfs_remove(struct kernfs_node *kn);
void kernfs_break_active_protection(struct kernfs_node *kn);
void kernfs_unbreak_active_protection(struct kernfs_node *kn);
@@ -325,17 +441,24 @@ int kernfs_remove_by_name_ns(struct kernfs_node *parent, const char *name,
int kernfs_rename_ns(struct kernfs_node *kn, struct kernfs_node *new_parent,
const char *new_name, const void *new_ns);
int kernfs_setattr(struct kernfs_node *kn, const struct iattr *iattr);
+__poll_t kernfs_generic_poll(struct kernfs_open_file *of,
+ struct poll_table_struct *pt);
void kernfs_notify(struct kernfs_node *kn);
+int kernfs_xattr_get(struct kernfs_node *kn, const char *name,
+ void *value, size_t size);
+int kernfs_xattr_set(struct kernfs_node *kn, const char *name,
+ const void *value, size_t size, int flags);
+
const void *kernfs_super_ns(struct super_block *sb);
-struct dentry *kernfs_mount_ns(struct file_system_type *fs_type, int flags,
- struct kernfs_root *root, unsigned long magic,
- bool *new_sb_created, const void *ns);
+int kernfs_get_tree(struct fs_context *fc);
+void kernfs_free_fs_context(struct fs_context *fc);
void kernfs_kill_sb(struct super_block *sb);
-struct super_block *kernfs_pin_sb(struct kernfs_root *root, const void *ns);
void kernfs_init(void);
+struct kernfs_node *kernfs_find_and_get_node_by_id(struct kernfs_root *root,
+ u64 id);
#else /* CONFIG_KERNFS */
static inline enum kernfs_node_type kernfs_type(struct kernfs_node *kn)
@@ -391,12 +514,14 @@ static inline void kernfs_destroy_root(struct kernfs_root *root) { }
static inline struct kernfs_node *
kernfs_create_dir_ns(struct kernfs_node *parent, const char *name,
- umode_t mode, void *priv, const void *ns)
+ umode_t mode, kuid_t uid, kgid_t gid,
+ void *priv, const void *ns)
{ return ERR_PTR(-ENOSYS); }
static inline struct kernfs_node *
__kernfs_create_file(struct kernfs_node *parent, const char *name,
- umode_t mode, loff_t size, const struct kernfs_ops *ops,
+ umode_t mode, kuid_t uid, kgid_t gid,
+ loff_t size, const struct kernfs_ops *ops,
void *priv, const void *ns, struct lock_class_key *key)
{ return ERR_PTR(-ENOSYS); }
@@ -427,14 +552,21 @@ static inline int kernfs_setattr(struct kernfs_node *kn,
static inline void kernfs_notify(struct kernfs_node *kn) { }
+static inline int kernfs_xattr_get(struct kernfs_node *kn, const char *name,
+ void *value, size_t size)
+{ return -ENOSYS; }
+
+static inline int kernfs_xattr_set(struct kernfs_node *kn, const char *name,
+ const void *value, size_t size, int flags)
+{ return -ENOSYS; }
+
static inline const void *kernfs_super_ns(struct super_block *sb)
{ return NULL; }
-static inline struct dentry *
-kernfs_mount_ns(struct file_system_type *fs_type, int flags,
- struct kernfs_root *root, unsigned long magic,
- bool *new_sb_created, const void *ns)
-{ return ERR_PTR(-ENOSYS); }
+static inline int kernfs_get_tree(struct fs_context *fc)
+{ return -ENOSYS; }
+
+static inline void kernfs_free_fs_context(struct fs_context *fc) { }
static inline void kernfs_kill_sb(struct super_block *sb) { }
@@ -448,10 +580,11 @@ static inline void kernfs_init(void) { }
* @buf: buffer to copy @kn's name into
* @buflen: size of @buf
*
- * Builds and returns the full path of @kn in @buf of @buflen bytes. The
- * path is built from the end of @buf so the returned pointer usually
- * doesn't match @buf. If @buf isn't long enough, @buf is nul terminated
- * and %NULL is returned.
+ * If @kn is NULL result will be "(null)".
+ *
+ * Returns the length of the full path. If the full length is equal to or
+ * greater than @buflen, @buf contains the truncated path with the trailing
+ * '\0'. On error, -errno is returned.
*/
static inline int kernfs_path(struct kernfs_node *kn, char *buf, size_t buflen)
{
@@ -474,28 +607,9 @@ static inline struct kernfs_node *
kernfs_create_dir(struct kernfs_node *parent, const char *name, umode_t mode,
void *priv)
{
- return kernfs_create_dir_ns(parent, name, mode, priv, NULL);
-}
-
-static inline struct kernfs_node *
-kernfs_create_file_ns(struct kernfs_node *parent, const char *name,
- umode_t mode, loff_t size, const struct kernfs_ops *ops,
- void *priv, const void *ns)
-{
- struct lock_class_key *key = NULL;
-
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
- key = (struct lock_class_key *)&ops->lockdep_key;
-#endif
- return __kernfs_create_file(parent, name, mode, size, ops, priv, ns,
- key);
-}
-
-static inline struct kernfs_node *
-kernfs_create_file(struct kernfs_node *parent, const char *name, umode_t mode,
- loff_t size, const struct kernfs_ops *ops, void *priv)
-{
- return kernfs_create_file_ns(parent, name, mode, size, ops, priv, NULL);
+ return kernfs_create_dir_ns(parent, name, mode,
+ GLOBAL_ROOT_UID, GLOBAL_ROOT_GID,
+ priv, NULL);
}
static inline int kernfs_remove_by_name(struct kernfs_node *parent,
@@ -511,13 +625,4 @@ static inline int kernfs_rename(struct kernfs_node *kn,
return kernfs_rename_ns(kn, new_parent, new_name, NULL);
}
-static inline struct dentry *
-kernfs_mount(struct file_system_type *fs_type, int flags,
- struct kernfs_root *root, unsigned long magic,
- bool *new_sb_created)
-{
- return kernfs_mount_ns(fs_type, flags, root,
- magic, new_sb_created, NULL);
-}
-
#endif /* __LINUX_KERNFS_H */
diff --git a/include/linux/kexec.h b/include/linux/kexec.h
index dd056fab9e35..22b5cd24f581 100644
--- a/include/linux/kexec.h
+++ b/include/linux/kexec.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef LINUX_KEXEC_H
#define LINUX_KEXEC_H
@@ -16,8 +17,16 @@
#include <linux/crash_core.h>
#include <asm/io.h>
+#include <linux/range.h>
#include <uapi/linux/kexec.h>
+#include <linux/verification.h>
+
+/* Location of a reserved region to hold the crash kernel.
+ */
+extern struct resource crashk_res;
+extern struct resource crashk_low_res;
+extern note_buf_t __percpu *crash_notes;
#ifdef CONFIG_KEXEC_CORE
#include <linux/list.h>
@@ -98,21 +107,25 @@ struct compat_kexec_segment {
#ifdef CONFIG_KEXEC_FILE
struct purgatory_info {
- /* Pointer to elf header of read only purgatory */
- Elf_Ehdr *ehdr;
-
- /* Pointer to purgatory sechdrs which are modifiable */
+ /*
+ * Pointer to elf header at the beginning of kexec_purgatory.
+ * Note: kexec_purgatory is read only
+ */
+ const Elf_Ehdr *ehdr;
+ /*
+ * Temporary, modifiable buffer for sechdrs used for relocation.
+ * This memory can be freed post image load.
+ */
Elf_Shdr *sechdrs;
/*
- * Temporary buffer location where purgatory is loaded and relocated
- * This memory can be freed post image load
+ * Temporary, modifiable buffer for stripped purgatory used for
+ * relocation. This memory can be freed post image load.
*/
void *purgatory_buf;
-
- /* Address where purgatory is finally loaded and is executed from */
- unsigned long purgatory_load_addr;
};
+struct kimage;
+
typedef int (kexec_probe_t)(const char *kernel_buf, unsigned long kernel_size);
typedef void *(kexec_load_t)(struct kimage *image, char *kernel_buf,
unsigned long kernel_len, char *initrd,
@@ -120,7 +133,7 @@ typedef void *(kexec_load_t)(struct kimage *image, char *kernel_buf,
unsigned long cmdline_len);
typedef int (kexec_cleanup_t)(void *loader_data);
-#ifdef CONFIG_KEXEC_VERIFY_SIG
+#ifdef CONFIG_KEXEC_SIG
typedef int (kexec_verify_sig_t)(const char *kernel_buf,
unsigned long kernel_len);
#endif
@@ -129,11 +142,25 @@ struct kexec_file_ops {
kexec_probe_t *probe;
kexec_load_t *load;
kexec_cleanup_t *cleanup;
-#ifdef CONFIG_KEXEC_VERIFY_SIG
+#ifdef CONFIG_KEXEC_SIG
kexec_verify_sig_t *verify_sig;
#endif
};
+extern const struct kexec_file_ops * const kexec_file_loaders[];
+
+int kexec_image_probe_default(struct kimage *image, void *buf,
+ unsigned long buf_len);
+int kexec_image_post_load_cleanup_default(struct kimage *image);
+
+/*
+ * If kexec_buf.mem is set to this value, kexec_locate_mem_hole()
+ * will try to allocate free memory. Arch may overwrite it.
+ */
+#ifndef KEXEC_BUF_MEM_UNKNOWN
+#define KEXEC_BUF_MEM_UNKNOWN 0
+#endif
+
/**
* struct kexec_buf - parameters for finding a place for a buffer in memory
* @image: kexec image in which memory to search.
@@ -158,12 +185,128 @@ struct kexec_buf {
bool top_down;
};
-int __weak arch_kexec_walk_mem(struct kexec_buf *kbuf,
- int (*func)(u64, u64, void *));
+int kexec_load_purgatory(struct kimage *image, struct kexec_buf *kbuf);
+int kexec_purgatory_get_set_symbol(struct kimage *image, const char *name,
+ void *buf, unsigned int size,
+ bool get_value);
+void *kexec_purgatory_get_symbol_addr(struct kimage *image, const char *name);
+
+#ifndef arch_kexec_kernel_image_probe
+static inline int
+arch_kexec_kernel_image_probe(struct kimage *image, void *buf, unsigned long buf_len)
+{
+ return kexec_image_probe_default(image, buf, buf_len);
+}
+#endif
+
+#ifndef arch_kimage_file_post_load_cleanup
+static inline int arch_kimage_file_post_load_cleanup(struct kimage *image)
+{
+ return kexec_image_post_load_cleanup_default(image);
+}
+#endif
+
+#ifdef CONFIG_KEXEC_SIG
+#ifdef CONFIG_SIGNED_PE_FILE_VERIFICATION
+int kexec_kernel_verify_pe_sig(const char *kernel, unsigned long kernel_len);
+#endif
+#endif
+
extern int kexec_add_buffer(struct kexec_buf *kbuf);
int kexec_locate_mem_hole(struct kexec_buf *kbuf);
+
+#ifndef arch_kexec_locate_mem_hole
+/**
+ * arch_kexec_locate_mem_hole - Find free memory to place the segments.
+ * @kbuf: Parameters for the memory search.
+ *
+ * On success, kbuf->mem will have the start address of the memory region found.
+ *
+ * Return: 0 on success, negative errno on error.
+ */
+static inline int arch_kexec_locate_mem_hole(struct kexec_buf *kbuf)
+{
+ return kexec_locate_mem_hole(kbuf);
+}
+#endif
+
+/* Alignment required for elf header segment */
+#define ELF_CORE_HEADER_ALIGN 4096
+
+struct crash_mem {
+ unsigned int max_nr_ranges;
+ unsigned int nr_ranges;
+ struct range ranges[];
+};
+
+extern int crash_exclude_mem_range(struct crash_mem *mem,
+ unsigned long long mstart,
+ unsigned long long mend);
+extern int crash_prepare_elf64_headers(struct crash_mem *mem, int need_kernel_map,
+ void **addr, unsigned long *sz);
+
+#ifndef arch_kexec_apply_relocations_add
+/*
+ * arch_kexec_apply_relocations_add - apply relocations of type RELA
+ * @pi: Purgatory to be relocated.
+ * @section: Section relocations applying to.
+ * @relsec: Section containing RELAs.
+ * @symtab: Corresponding symtab.
+ *
+ * Return: 0 on success, negative errno on error.
+ */
+static inline int
+arch_kexec_apply_relocations_add(struct purgatory_info *pi, Elf_Shdr *section,
+ const Elf_Shdr *relsec, const Elf_Shdr *symtab)
+{
+ pr_err("RELA relocation unsupported.\n");
+ return -ENOEXEC;
+}
+#endif
+
+#ifndef arch_kexec_apply_relocations
+/*
+ * arch_kexec_apply_relocations - apply relocations of type REL
+ * @pi: Purgatory to be relocated.
+ * @section: Section relocations applying to.
+ * @relsec: Section containing RELs.
+ * @symtab: Corresponding symtab.
+ *
+ * Return: 0 on success, negative errno on error.
+ */
+static inline int
+arch_kexec_apply_relocations(struct purgatory_info *pi, Elf_Shdr *section,
+ const Elf_Shdr *relsec, const Elf_Shdr *symtab)
+{
+ pr_err("REL relocation unsupported.\n");
+ return -ENOEXEC;
+}
+#endif
#endif /* CONFIG_KEXEC_FILE */
+#ifdef CONFIG_KEXEC_ELF
+struct kexec_elf_info {
+ /*
+ * Where the ELF binary contents are kept.
+ * Memory managed by the user of the struct.
+ */
+ const char *buffer;
+
+ const struct elfhdr *ehdr;
+ const struct elf_phdr *proghdrs;
+};
+
+int kexec_build_elf_info(const char *buf, size_t len, struct elfhdr *ehdr,
+ struct kexec_elf_info *elf_info);
+
+int kexec_elf_load(struct kimage *image, struct elfhdr *ehdr,
+ struct kexec_elf_info *elf_info,
+ struct kexec_buf *kbuf,
+ unsigned long *lowest_load_addr);
+
+void kexec_free_elf_info(struct kexec_elf_info *elf_info);
+int kexec_elf_probe(const char *buf, unsigned long len);
+#endif
struct kimage {
kimage_entry_t head;
kimage_entry_t *entry;
@@ -208,7 +351,7 @@ struct kimage {
unsigned long cmdline_buf_len;
/* File operations provided by image loader */
- struct kexec_file_ops *fops;
+ const struct kexec_file_ops *fops;
/* Image loader handling the kernel can store a pointer here */
void *image_loader_data;
@@ -216,27 +359,33 @@ struct kimage {
/* Information for loading purgatory */
struct purgatory_info purgatory_info;
#endif
+
+#ifdef CONFIG_IMA_KEXEC
+ /* Virtual address of IMA measurement buffer for kexec syscall */
+ void *ima_buffer;
+
+ phys_addr_t ima_buffer_addr;
+ size_t ima_buffer_size;
+#endif
+
+ /* Core ELF header buffer */
+ void *elf_headers;
+ unsigned long elf_headers_sz;
+ unsigned long elf_load_addr;
};
/* kexec interface functions */
extern void machine_kexec(struct kimage *image);
extern int machine_kexec_prepare(struct kimage *image);
extern void machine_kexec_cleanup(struct kimage *image);
-extern asmlinkage long sys_kexec_load(unsigned long entry,
- unsigned long nr_segments,
- struct kexec_segment __user *segments,
- unsigned long flags);
extern int kernel_kexec(void);
extern struct page *kimage_alloc_control_pages(struct kimage *image,
unsigned int order);
-extern int kexec_load_purgatory(struct kimage *image, unsigned long min,
- unsigned long max, int top_down,
- unsigned long *load_addr);
-extern int kexec_purgatory_get_set_symbol(struct kimage *image,
- const char *name, void *buf,
- unsigned int size, bool get_value);
-extern void *kexec_purgatory_get_symbol_addr(struct kimage *image,
- const char *name);
+
+#ifndef machine_kexec_post_load
+static inline int machine_kexec_post_load(struct kimage *image) { return 0; }
+#endif
+
extern void __crash_kexec(struct pt_regs *);
extern void crash_kexec(struct pt_regs *);
int kexec_should_crash(struct task_struct *);
@@ -246,7 +395,8 @@ extern int kimage_crash_copy_vmcoreinfo(struct kimage *image);
extern struct kimage *kexec_image;
extern struct kimage *kexec_crash_image;
-extern int kexec_load_disabled;
+
+bool kexec_load_permitted(int kexec_image_type);
#ifndef kexec_flush_icache_page
#define kexec_flush_icache_page(page)
@@ -263,31 +413,26 @@ extern int kexec_load_disabled;
#define KEXEC_FILE_FLAGS (KEXEC_FILE_UNLOAD | KEXEC_FILE_ON_CRASH | \
KEXEC_FILE_NO_INITRAMFS)
-/* Location of a reserved region to hold the crash kernel.
- */
-extern struct resource crashk_res;
-extern struct resource crashk_low_res;
-extern note_buf_t __percpu *crash_notes;
-
/* flag to track if kexec reboot is in progress */
extern bool kexec_in_progress;
int crash_shrink_memory(unsigned long new_size);
-size_t crash_get_memory_size(void);
-void crash_free_reserved_phys_range(unsigned long begin, unsigned long end);
-
-int __weak arch_kexec_kernel_image_probe(struct kimage *image, void *buf,
- unsigned long buf_len);
-void * __weak arch_kexec_kernel_image_load(struct kimage *image);
-int __weak arch_kimage_file_post_load_cleanup(struct kimage *image);
-int __weak arch_kexec_kernel_verify_sig(struct kimage *image, void *buf,
- unsigned long buf_len);
-int __weak arch_kexec_apply_relocations_add(const Elf_Ehdr *ehdr,
- Elf_Shdr *sechdrs, unsigned int relsec);
-int __weak arch_kexec_apply_relocations(const Elf_Ehdr *ehdr, Elf_Shdr *sechdrs,
- unsigned int relsec);
-void arch_kexec_protect_crashkres(void);
-void arch_kexec_unprotect_crashkres(void);
+ssize_t crash_get_memory_size(void);
+
+#ifndef arch_kexec_protect_crashkres
+/*
+ * Protection mechanism for crashkernel reserved memory after
+ * the kdump kernel is loaded.
+ *
+ * Provide an empty default implementation here -- architecture
+ * code may override this
+ */
+static inline void arch_kexec_protect_crashkres(void) { }
+#endif
+
+#ifndef arch_kexec_unprotect_crashkres
+static inline void arch_kexec_unprotect_crashkres(void) { }
+#endif
#ifndef page_to_boot_pfn
static inline unsigned long page_to_boot_pfn(struct page *page)
@@ -317,6 +462,16 @@ static inline phys_addr_t boot_phys_to_phys(unsigned long boot_phys)
}
#endif
+#ifndef crash_free_reserved_phys_range
+static inline void crash_free_reserved_phys_range(unsigned long begin, unsigned long end)
+{
+ unsigned long addr;
+
+ for (addr = begin; addr < end; addr += PAGE_SIZE)
+ free_reserved_page(boot_pfn_to_page(addr >> PAGE_SHIFT));
+}
+#endif
+
static inline unsigned long virt_to_boot_phys(void *addr)
{
return phys_to_boot_phys(__pa((unsigned long)addr));
@@ -327,6 +482,14 @@ static inline void *boot_phys_to_virt(unsigned long entry)
return phys_to_virt(boot_phys_to_phys(entry));
}
+#ifndef arch_kexec_post_alloc_pages
+static inline int arch_kexec_post_alloc_pages(void *vaddr, unsigned int pages, gfp_t gfp) { return 0; }
+#endif
+
+#ifndef arch_kexec_pre_free_pages
+static inline void arch_kexec_pre_free_pages(void *vaddr, unsigned int pages) { }
+#endif
+
#else /* !CONFIG_KEXEC_CORE */
struct pt_regs;
struct task_struct;
@@ -337,6 +500,12 @@ static inline int kexec_crash_loaded(void) { return 0; }
#define kexec_in_progress false
#endif /* CONFIG_KEXEC_CORE */
+#ifdef CONFIG_KEXEC_SIG
+void set_kexec_sig_enforced(void);
+#else
+static inline void set_kexec_sig_enforced(void) {}
+#endif
+
#endif /* !defined(__ASSEBMLY__) */
#endif /* LINUX_KEXEC_H */
diff --git a/include/linux/key-type.h b/include/linux/key-type.h
index 8496cf64575c..7d985a1dfe4a 100644
--- a/include/linux/key-type.h
+++ b/include/linux/key-type.h
@@ -1,12 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/* Definitions for key type implementations
*
* Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public Licence
- * as published by the Free Software Foundation; either version
- * 2 of the Licence, or (at your option) any later version.
*/
#ifndef _LINUX_KEY_TYPE_H
@@ -17,14 +13,8 @@
#ifdef CONFIG_KEYS
-/*
- * key under-construction record
- * - passed to the request_key actor if supplied
- */
-struct key_construction {
- struct key *key; /* key being constructed */
- struct key *authkey;/* authorisation for key being constructed */
-};
+struct kernel_pkey_query;
+struct kernel_pkey_params;
/*
* Pre-parsed payload, used by key add, update and instantiate.
@@ -39,16 +29,16 @@ struct key_construction {
* clear the contents.
*/
struct key_preparsed_payload {
+ const char *orig_description; /* Actual or proposed description (maybe NULL) */
char *description; /* Proposed key description (or NULL) */
union key_payload payload; /* Proposed payload */
const void *data; /* Raw data */
size_t datalen; /* Raw datalen */
size_t quotalen; /* Quota length for proposed payload */
- time_t expiry; /* Expiry time of key */
-};
+ time64_t expiry; /* Expiry time of key */
+} __randomize_layout;
-typedef int (*request_key_actor_t)(struct key_construction *key,
- const char *op, void *aux);
+typedef int (*request_key_actor_t)(struct key *auth_key, void *aux);
/*
* Preparsed matching criterion.
@@ -81,6 +71,9 @@ struct key_type {
*/
size_t def_datalen;
+ unsigned int flags;
+#define KEY_TYPE_NET_DOMAIN 0x00000001 /* Keys of this type have a net namespace domain */
+
/* vet a description */
int (*vet_description)(const char *description);
@@ -135,7 +128,7 @@ struct key_type {
* much is copied into the buffer
* - shouldn't do the copy if the buffer is NULL
*/
- long (*read)(const struct key *key, char __user *buffer, size_t buflen);
+ long (*read)(const struct key *key, char *buffer, size_t buflen);
/* handle request_key() for this type instead of invoking
* /sbin/request-key (optional)
@@ -155,10 +148,18 @@ struct key_type {
*/
struct key_restriction *(*lookup_restriction)(const char *params);
+ /* Asymmetric key accessor functions. */
+ int (*asym_query)(const struct kernel_pkey_params *params,
+ struct kernel_pkey_query *info);
+ int (*asym_eds_op)(struct kernel_pkey_params *params,
+ const void *in, void *out);
+ int (*asym_verify_signature)(struct kernel_pkey_params *params,
+ const void *in, const void *in2);
+
/* internal fields */
struct list_head link; /* link in types list */
struct lock_class_key lock_class; /* key->sem lock class */
-};
+} __randomize_layout;
extern struct key_type key_type_keyring;
@@ -170,20 +171,20 @@ extern int key_instantiate_and_link(struct key *key,
const void *data,
size_t datalen,
struct key *keyring,
- struct key *instkey);
+ struct key *authkey);
extern int key_reject_and_link(struct key *key,
unsigned timeout,
unsigned error,
struct key *keyring,
- struct key *instkey);
-extern void complete_request_key(struct key_construction *cons, int error);
+ struct key *authkey);
+extern void complete_request_key(struct key *authkey, int error);
static inline int key_negate_and_link(struct key *key,
unsigned timeout,
struct key *keyring,
- struct key *instkey)
+ struct key *authkey)
{
- return key_reject_and_link(key, timeout, ENOKEY, keyring, instkey);
+ return key_reject_and_link(key, timeout, ENOKEY, keyring, authkey);
}
extern int generic_key_instantiate(struct key *key, struct key_preparsed_payload *prep);
diff --git a/include/linux/key.h b/include/linux/key.h
index 044114185120..8dc7f7c3088b 100644
--- a/include/linux/key.h
+++ b/include/linux/key.h
@@ -1,14 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/* Authentication token and access key management
*
* Copyright (C) 2004, 2007 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- *
- *
* See Documentation/security/keys/core.rst for information on keys/keyrings.
*/
@@ -24,6 +19,7 @@
#include <linux/atomic.h>
#include <linux/assoc_array.h>
#include <linux/refcount.h>
+#include <linux/time64.h>
#ifdef __KERNEL__
#include <linux/uidgid.h>
@@ -35,6 +31,7 @@ typedef int32_t key_serial_t;
typedef uint32_t key_perm_t;
struct key;
+struct net;
#ifdef CONFIG_KEYS
@@ -74,6 +71,29 @@ struct key;
#define KEY_PERM_UNDEF 0xffffffff
+/*
+ * The permissions required on a key that we're looking up.
+ */
+enum key_need_perm {
+ KEY_NEED_UNSPECIFIED, /* Needed permission unspecified */
+ KEY_NEED_VIEW, /* Require permission to view attributes */
+ KEY_NEED_READ, /* Require permission to read content */
+ KEY_NEED_WRITE, /* Require permission to update / modify */
+ KEY_NEED_SEARCH, /* Require permission to search (keyring) or find (key) */
+ KEY_NEED_LINK, /* Require permission to link */
+ KEY_NEED_SETATTR, /* Require permission to change attributes */
+ KEY_NEED_UNLINK, /* Require permission to unlink key */
+ KEY_SYSADMIN_OVERRIDE, /* Special: override by CAP_SYS_ADMIN */
+ KEY_AUTHTOKEN_OVERRIDE, /* Special: override by possession of auth token */
+ KEY_DEFER_PERM_CHECK, /* Special: permission check is deferred */
+};
+
+enum key_lookup_flag {
+ KEY_LOOKUP_CREATE = 0x01,
+ KEY_LOOKUP_PARTIAL = 0x02,
+ KEY_LOOKUP_ALL = (KEY_LOOKUP_CREATE | KEY_LOOKUP_PARTIAL),
+};
+
struct seq_file;
struct user_struct;
struct signal_struct;
@@ -81,13 +101,34 @@ struct cred;
struct key_type;
struct key_owner;
+struct key_tag;
struct keyring_list;
struct keyring_name;
+struct key_tag {
+ struct rcu_head rcu;
+ refcount_t usage;
+ bool removed; /* T when subject removed */
+};
+
struct keyring_index_key {
+ /* [!] If this structure is altered, the union in struct key must change too! */
+ unsigned long hash; /* Hash value */
+ union {
+ struct {
+#ifdef __LITTLE_ENDIAN /* Put desc_len at the LSB of x */
+ u16 desc_len;
+ char desc[sizeof(long) - 2]; /* First few chars of description */
+#else
+ char desc[sizeof(long) - 2]; /* First few chars of description */
+ u16 desc_len;
+#endif
+ };
+ unsigned long x;
+ };
struct key_type *type;
+ struct key_tag *domain_tag; /* Domain of operation */
const char *description;
- size_t desc_len;
};
union key_payload {
@@ -138,6 +179,11 @@ struct key_restriction {
struct key_type *keytype;
};
+enum key_state {
+ KEY_IS_UNINSTANTIATED,
+ KEY_IS_POSITIVE, /* Positively instantiated */
+};
+
/*****************************************************************************/
/*
* authentication token / access credential / keyring
@@ -153,14 +199,17 @@ struct key {
struct list_head graveyard_link;
struct rb_node serial_node;
};
+#ifdef CONFIG_KEY_NOTIFICATIONS
+ struct watch_list *watchers; /* Entities watching this key for changes */
+#endif
struct rw_semaphore sem; /* change vs change sem */
struct key_user *user; /* owner of this key */
void *security; /* security data for this key */
union {
- time_t expiry; /* time at which key expires (or 0) */
- time_t revoked_at; /* time at which key was revoked */
+ time64_t expiry; /* time at which key expires (or 0) */
+ time64_t revoked_at; /* time at which key was revoked */
};
- time_t last_used_at; /* last time used for LRU keyring discard */
+ time64_t last_used_at; /* last time used for LRU keyring discard */
kuid_t uid;
kgid_t gid;
key_perm_t perm; /* access permissions */
@@ -169,6 +218,7 @@ struct key {
* - may not match RCU dereferenced payload
* - payload should contain own length
*/
+ short state; /* Key state (+) or rejection error (-) */
#ifdef KEY_DEBUGGING
unsigned magic;
@@ -176,17 +226,16 @@ struct key {
#endif
unsigned long flags; /* status flags (change with bitops) */
-#define KEY_FLAG_INSTANTIATED 0 /* set if key has been instantiated */
-#define KEY_FLAG_DEAD 1 /* set if key type has been deleted */
-#define KEY_FLAG_REVOKED 2 /* set if key had been revoked */
-#define KEY_FLAG_IN_QUOTA 3 /* set if key consumes quota */
-#define KEY_FLAG_USER_CONSTRUCT 4 /* set if key is being constructed in userspace */
-#define KEY_FLAG_NEGATIVE 5 /* set if key is negative */
-#define KEY_FLAG_ROOT_CAN_CLEAR 6 /* set if key can be cleared by root without permission */
-#define KEY_FLAG_INVALIDATED 7 /* set if key has been invalidated */
-#define KEY_FLAG_BUILTIN 8 /* set if key is built in to the kernel */
-#define KEY_FLAG_ROOT_CAN_INVAL 9 /* set if key can be invalidated by root without permission */
-#define KEY_FLAG_KEEP 10 /* set if key should not be removed */
+#define KEY_FLAG_DEAD 0 /* set if key type has been deleted */
+#define KEY_FLAG_REVOKED 1 /* set if key had been revoked */
+#define KEY_FLAG_IN_QUOTA 2 /* set if key consumes quota */
+#define KEY_FLAG_USER_CONSTRUCT 3 /* set if key is being constructed in userspace */
+#define KEY_FLAG_ROOT_CAN_CLEAR 4 /* set if key can be cleared by root without permission */
+#define KEY_FLAG_INVALIDATED 5 /* set if key has been invalidated */
+#define KEY_FLAG_BUILTIN 6 /* set if key is built in to the kernel */
+#define KEY_FLAG_ROOT_CAN_INVAL 7 /* set if key can be invalidated by root without permission */
+#define KEY_FLAG_KEEP 8 /* set if key should not be removed */
+#define KEY_FLAG_UID_KEYRING 9 /* set if key is a user or user session keyring */
/* the key type and key description string
* - the desc is used to match a key against search criteria
@@ -196,7 +245,10 @@ struct key {
union {
struct keyring_index_key index_key;
struct {
+ unsigned long hash;
+ unsigned long len_desc;
struct key_type *type; /* type of key */
+ struct key_tag *domain_tag; /* Domain of operation */
char *description;
};
};
@@ -212,7 +264,6 @@ struct key {
struct list_head name_link;
struct assoc_array keys;
};
- int reject_error;
};
/* This is set on a keyring to restrict the addition of a link to a key
@@ -243,10 +294,14 @@ extern struct key *key_alloc(struct key_type *type,
#define KEY_ALLOC_NOT_IN_QUOTA 0x0002 /* not in quota */
#define KEY_ALLOC_BUILT_IN 0x0004 /* Key is built into kernel */
#define KEY_ALLOC_BYPASS_RESTRICTION 0x0008 /* Override the check on restricted keyrings */
+#define KEY_ALLOC_UID_KEYRING 0x0010 /* allocating a user or user session keyring */
+#define KEY_ALLOC_SET_KEEP 0x0020 /* Set the KEEP flag on the key/keyring */
extern void key_revoke(struct key *key);
extern void key_invalidate(struct key *key);
extern void key_put(struct key *key);
+extern bool key_put_tag(struct key_tag *tag);
+extern void key_remove_domain(struct key_tag *domain_tag);
static inline struct key *__key_get(struct key *key)
{
@@ -264,31 +319,81 @@ static inline void key_ref_put(key_ref_t key_ref)
key_put(key_ref_to_ptr(key_ref));
}
-extern struct key *request_key(struct key_type *type,
- const char *description,
- const char *callout_info);
+extern struct key *request_key_tag(struct key_type *type,
+ const char *description,
+ struct key_tag *domain_tag,
+ const char *callout_info);
+
+extern struct key *request_key_rcu(struct key_type *type,
+ const char *description,
+ struct key_tag *domain_tag);
extern struct key *request_key_with_auxdata(struct key_type *type,
const char *description,
+ struct key_tag *domain_tag,
const void *callout_info,
size_t callout_len,
void *aux);
-extern struct key *request_key_async(struct key_type *type,
- const char *description,
- const void *callout_info,
- size_t callout_len);
+/**
+ * request_key - Request a key and wait for construction
+ * @type: Type of key.
+ * @description: The searchable description of the key.
+ * @callout_info: The data to pass to the instantiation upcall (or NULL).
+ *
+ * As for request_key_tag(), but with the default global domain tag.
+ */
+static inline struct key *request_key(struct key_type *type,
+ const char *description,
+ const char *callout_info)
+{
+ return request_key_tag(type, description, NULL, callout_info);
+}
-extern struct key *request_key_async_with_auxdata(struct key_type *type,
- const char *description,
- const void *callout_info,
- size_t callout_len,
- void *aux);
+#ifdef CONFIG_NET
+/**
+ * request_key_net - Request a key for a net namespace and wait for construction
+ * @type: Type of key.
+ * @description: The searchable description of the key.
+ * @net: The network namespace that is the key's domain of operation.
+ * @callout_info: The data to pass to the instantiation upcall (or NULL).
+ *
+ * As for request_key() except that it does not add the returned key to a
+ * keyring if found, new keys are always allocated in the user's quota, the
+ * callout_info must be a NUL-terminated string and no auxiliary data can be
+ * passed. Only keys that operate the specified network namespace are used.
+ *
+ * Furthermore, it then works as wait_for_key_construction() to wait for the
+ * completion of keys undergoing construction with a non-interruptible wait.
+ */
+#define request_key_net(type, description, net, callout_info) \
+ request_key_tag(type, description, net->key_domain, callout_info)
+
+/**
+ * request_key_net_rcu - Request a key for a net namespace under RCU conditions
+ * @type: Type of key.
+ * @description: The searchable description of the key.
+ * @net: The network namespace that is the key's domain of operation.
+ *
+ * As for request_key_rcu() except that only keys that operate the specified
+ * network namespace are used.
+ */
+#define request_key_net_rcu(type, description, net) \
+ request_key_rcu(type, description, net->key_domain)
+#endif /* CONFIG_NET */
extern int wait_for_key_construction(struct key *key, bool intr);
extern int key_validate(const struct key *key);
+extern key_ref_t key_create(key_ref_t keyring,
+ const char *type,
+ const char *description,
+ const void *payload,
+ size_t plen,
+ key_perm_t perm,
+ unsigned long flags);
+
extern key_ref_t key_create_or_update(key_ref_t keyring,
const char *type,
const char *description,
@@ -304,6 +409,11 @@ extern int key_update(key_ref_t key,
extern int key_link(struct key *keyring,
struct key *key);
+extern int key_move(struct key *key,
+ struct key *from_keyring,
+ struct key *to_keyring,
+ unsigned int flags);
+
extern int key_unlink(struct key *keyring,
struct key *key);
@@ -323,7 +433,8 @@ extern int keyring_clear(struct key *keyring);
extern key_ref_t keyring_search(key_ref_t keyring,
struct key_type *type,
- const char *description);
+ const char *description,
+ bool recurse);
extern int keyring_add_key(struct key *keyring,
struct key *key);
@@ -340,28 +451,31 @@ static inline key_serial_t key_serial(const struct key *key)
extern void key_set_timeout(struct key *, unsigned);
-/*
- * The permissions required on a key that we're looking up.
- */
-#define KEY_NEED_VIEW 0x01 /* Require permission to view attributes */
-#define KEY_NEED_READ 0x02 /* Require permission to read content */
-#define KEY_NEED_WRITE 0x04 /* Require permission to update / modify */
-#define KEY_NEED_SEARCH 0x08 /* Require permission to search (keyring) or find (key) */
-#define KEY_NEED_LINK 0x10 /* Require permission to link */
-#define KEY_NEED_SETATTR 0x20 /* Require permission to change attributes */
-#define KEY_NEED_ALL 0x3f /* All the above permissions */
+extern key_ref_t lookup_user_key(key_serial_t id, unsigned long flags,
+ enum key_need_perm need_perm);
+extern void key_free_user_ns(struct user_namespace *);
+
+static inline short key_read_state(const struct key *key)
+{
+ /* Barrier versus mark_key_instantiated(). */
+ return smp_load_acquire(&key->state);
+}
/**
- * key_is_instantiated - Determine if a key has been positively instantiated
+ * key_is_positive - Determine if a key has been positively instantiated
* @key: The key to check.
*
* Return true if the specified key has been positively instantiated, false
* otherwise.
*/
-static inline bool key_is_instantiated(const struct key *key)
+static inline bool key_is_positive(const struct key *key)
+{
+ return key_read_state(key) == KEY_IS_POSITIVE;
+}
+
+static inline bool key_is_negative(const struct key *key)
{
- return test_bit(KEY_FLAG_INSTANTIATED, &key->flags) &&
- !test_bit(KEY_FLAG_NEGATIVE, &key->flags);
+ return key_read_state(key) < 0;
}
#define dereference_key_rcu(KEY) \
@@ -383,8 +497,8 @@ extern struct ctl_table key_sysctls[];
* the userspace interface
*/
extern int install_thread_keyring_to_cred(struct cred *cred);
-extern void key_fsuid_changed(struct task_struct *tsk);
-extern void key_fsgid_changed(struct task_struct *tsk);
+extern void key_fsuid_changed(struct cred *new_cred);
+extern void key_fsgid_changed(struct cred *new_cred);
extern void key_init(void);
#else /* CONFIG_KEYS */
@@ -399,9 +513,11 @@ extern void key_init(void);
#define make_key_ref(k, p) NULL
#define key_ref_to_ptr(k) NULL
#define is_key_possessed(k) 0
-#define key_fsuid_changed(t) do { } while(0)
-#define key_fsgid_changed(t) do { } while(0)
+#define key_fsuid_changed(c) do { } while(0)
+#define key_fsgid_changed(c) do { } while(0)
#define key_init() do { } while(0)
+#define key_free_user_ns(ns) do { } while(0)
+#define key_remove_domain(d) do { } while(0)
#endif /* CONFIG_KEYS */
#endif /* __KERNEL__ */
diff --git a/include/linux/keyboard.h b/include/linux/keyboard.h
index 131ed5146521..73d11e4090cf 100644
--- a/include/linux/keyboard.h
+++ b/include/linux/keyboard.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __LINUX_KEYBOARD_H
#define __LINUX_KEYBOARD_H
diff --git a/include/linux/keyctl.h b/include/linux/keyctl.h
new file mode 100644
index 000000000000..5b79847207ef
--- /dev/null
+++ b/include/linux/keyctl.h
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/* keyctl kernel bits
+ *
+ * Copyright (C) 2016 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ */
+
+#ifndef __LINUX_KEYCTL_H
+#define __LINUX_KEYCTL_H
+
+#include <uapi/linux/keyctl.h>
+
+struct kernel_pkey_query {
+ __u32 supported_ops; /* Which ops are supported */
+ __u32 key_size; /* Size of the key in bits */
+ __u16 max_data_size; /* Maximum size of raw data to sign in bytes */
+ __u16 max_sig_size; /* Maximum size of signature in bytes */
+ __u16 max_enc_size; /* Maximum size of encrypted blob in bytes */
+ __u16 max_dec_size; /* Maximum size of decrypted blob in bytes */
+};
+
+enum kernel_pkey_operation {
+ kernel_pkey_encrypt,
+ kernel_pkey_decrypt,
+ kernel_pkey_sign,
+ kernel_pkey_verify,
+};
+
+struct kernel_pkey_params {
+ struct key *key;
+ const char *encoding; /* Encoding (eg. "oaep" or "raw" for none) */
+ const char *hash_algo; /* Digest algorithm used (eg. "sha1") or NULL if N/A */
+ char *info; /* Modified info string to be released later */
+ __u32 in_len; /* Input data size */
+ union {
+ __u32 out_len; /* Output buffer size (enc/dec/sign) */
+ __u32 in2_len; /* 2nd input data size (verify) */
+ };
+ enum kernel_pkey_operation op : 8;
+};
+
+#endif /* __LINUX_KEYCTL_H */
diff --git a/include/linux/kfence.h b/include/linux/kfence.h
new file mode 100644
index 000000000000..726857a4b680
--- /dev/null
+++ b/include/linux/kfence.h
@@ -0,0 +1,250 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Kernel Electric-Fence (KFENCE). Public interface for allocator and fault
+ * handler integration. For more info see Documentation/dev-tools/kfence.rst.
+ *
+ * Copyright (C) 2020, Google LLC.
+ */
+
+#ifndef _LINUX_KFENCE_H
+#define _LINUX_KFENCE_H
+
+#include <linux/mm.h>
+#include <linux/types.h>
+
+#ifdef CONFIG_KFENCE
+
+#include <linux/atomic.h>
+#include <linux/static_key.h>
+
+extern unsigned long kfence_sample_interval;
+
+/*
+ * We allocate an even number of pages, as it simplifies calculations to map
+ * address to metadata indices; effectively, the very first page serves as an
+ * extended guard page, but otherwise has no special purpose.
+ */
+#define KFENCE_POOL_SIZE ((CONFIG_KFENCE_NUM_OBJECTS + 1) * 2 * PAGE_SIZE)
+extern char *__kfence_pool;
+
+DECLARE_STATIC_KEY_FALSE(kfence_allocation_key);
+extern atomic_t kfence_allocation_gate;
+
+/**
+ * is_kfence_address() - check if an address belongs to KFENCE pool
+ * @addr: address to check
+ *
+ * Return: true or false depending on whether the address is within the KFENCE
+ * object range.
+ *
+ * KFENCE objects live in a separate page range and are not to be intermixed
+ * with regular heap objects (e.g. KFENCE objects must never be added to the
+ * allocator freelists). Failing to do so may and will result in heap
+ * corruptions, therefore is_kfence_address() must be used to check whether
+ * an object requires specific handling.
+ *
+ * Note: This function may be used in fast-paths, and is performance critical.
+ * Future changes should take this into account; for instance, we want to avoid
+ * introducing another load and therefore need to keep KFENCE_POOL_SIZE a
+ * constant (until immediate patching support is added to the kernel).
+ */
+static __always_inline bool is_kfence_address(const void *addr)
+{
+ /*
+ * The __kfence_pool != NULL check is required to deal with the case
+ * where __kfence_pool == NULL && addr < KFENCE_POOL_SIZE. Keep it in
+ * the slow-path after the range-check!
+ */
+ return unlikely((unsigned long)((char *)addr - __kfence_pool) < KFENCE_POOL_SIZE && __kfence_pool);
+}
+
+/**
+ * kfence_alloc_pool() - allocate the KFENCE pool via memblock
+ */
+void __init kfence_alloc_pool(void);
+
+/**
+ * kfence_init() - perform KFENCE initialization at boot time
+ *
+ * Requires that kfence_alloc_pool() was called before. This sets up the
+ * allocation gate timer, and requires that workqueues are available.
+ */
+void __init kfence_init(void);
+
+/**
+ * kfence_shutdown_cache() - handle shutdown_cache() for KFENCE objects
+ * @s: cache being shut down
+ *
+ * Before shutting down a cache, one must ensure there are no remaining objects
+ * allocated from it. Because KFENCE objects are not referenced from the cache
+ * directly, we need to check them here.
+ *
+ * Note that shutdown_cache() is internal to SL*B, and kmem_cache_destroy() does
+ * not return if allocated objects still exist: it prints an error message and
+ * simply aborts destruction of a cache, leaking memory.
+ *
+ * If the only such objects are KFENCE objects, we will not leak the entire
+ * cache, but instead try to provide more useful debug info by making allocated
+ * objects "zombie allocations". Objects may then still be used or freed (which
+ * is handled gracefully), but usage will result in showing KFENCE error reports
+ * which include stack traces to the user of the object, the original allocation
+ * site, and caller to shutdown_cache().
+ */
+void kfence_shutdown_cache(struct kmem_cache *s);
+
+/*
+ * Allocate a KFENCE object. Allocators must not call this function directly,
+ * use kfence_alloc() instead.
+ */
+void *__kfence_alloc(struct kmem_cache *s, size_t size, gfp_t flags);
+
+/**
+ * kfence_alloc() - allocate a KFENCE object with a low probability
+ * @s: struct kmem_cache with object requirements
+ * @size: exact size of the object to allocate (can be less than @s->size
+ * e.g. for kmalloc caches)
+ * @flags: GFP flags
+ *
+ * Return:
+ * * NULL - must proceed with allocating as usual,
+ * * non-NULL - pointer to a KFENCE object.
+ *
+ * kfence_alloc() should be inserted into the heap allocation fast path,
+ * allowing it to transparently return KFENCE-allocated objects with a low
+ * probability using a static branch (the probability is controlled by the
+ * kfence.sample_interval boot parameter).
+ */
+static __always_inline void *kfence_alloc(struct kmem_cache *s, size_t size, gfp_t flags)
+{
+#if defined(CONFIG_KFENCE_STATIC_KEYS) || CONFIG_KFENCE_SAMPLE_INTERVAL == 0
+ if (!static_branch_unlikely(&kfence_allocation_key))
+ return NULL;
+#else
+ if (!static_branch_likely(&kfence_allocation_key))
+ return NULL;
+#endif
+ if (likely(atomic_read(&kfence_allocation_gate)))
+ return NULL;
+ return __kfence_alloc(s, size, flags);
+}
+
+/**
+ * kfence_ksize() - get actual amount of memory allocated for a KFENCE object
+ * @addr: pointer to a heap object
+ *
+ * Return:
+ * * 0 - not a KFENCE object, must call __ksize() instead,
+ * * non-0 - this many bytes can be accessed without causing a memory error.
+ *
+ * kfence_ksize() returns the number of bytes requested for a KFENCE object at
+ * allocation time. This number may be less than the object size of the
+ * corresponding struct kmem_cache.
+ */
+size_t kfence_ksize(const void *addr);
+
+/**
+ * kfence_object_start() - find the beginning of a KFENCE object
+ * @addr: address within a KFENCE-allocated object
+ *
+ * Return: address of the beginning of the object.
+ *
+ * SL[AU]B-allocated objects are laid out within a page one by one, so it is
+ * easy to calculate the beginning of an object given a pointer inside it and
+ * the object size. The same is not true for KFENCE, which places a single
+ * object at either end of the page. This helper function is used to find the
+ * beginning of a KFENCE-allocated object.
+ */
+void *kfence_object_start(const void *addr);
+
+/**
+ * __kfence_free() - release a KFENCE heap object to KFENCE pool
+ * @addr: object to be freed
+ *
+ * Requires: is_kfence_address(addr)
+ *
+ * Release a KFENCE object and mark it as freed.
+ */
+void __kfence_free(void *addr);
+
+/**
+ * kfence_free() - try to release an arbitrary heap object to KFENCE pool
+ * @addr: object to be freed
+ *
+ * Return:
+ * * false - object doesn't belong to KFENCE pool and was ignored,
+ * * true - object was released to KFENCE pool.
+ *
+ * Release a KFENCE object and mark it as freed. May be called on any object,
+ * even non-KFENCE objects, to simplify integration of the hooks into the
+ * allocator's free codepath. The allocator must check the return value to
+ * determine if it was a KFENCE object or not.
+ */
+static __always_inline __must_check bool kfence_free(void *addr)
+{
+ if (!is_kfence_address(addr))
+ return false;
+ __kfence_free(addr);
+ return true;
+}
+
+/**
+ * kfence_handle_page_fault() - perform page fault handling for KFENCE pages
+ * @addr: faulting address
+ * @is_write: is access a write
+ * @regs: current struct pt_regs (can be NULL, but shows full stack trace)
+ *
+ * Return:
+ * * false - address outside KFENCE pool,
+ * * true - page fault handled by KFENCE, no additional handling required.
+ *
+ * A page fault inside KFENCE pool indicates a memory error, such as an
+ * out-of-bounds access, a use-after-free or an invalid memory access. In these
+ * cases KFENCE prints an error message and marks the offending page as
+ * present, so that the kernel can proceed.
+ */
+bool __must_check kfence_handle_page_fault(unsigned long addr, bool is_write, struct pt_regs *regs);
+
+#ifdef CONFIG_PRINTK
+struct kmem_obj_info;
+/**
+ * __kfence_obj_info() - fill kmem_obj_info struct
+ * @kpp: kmem_obj_info to be filled
+ * @object: the object
+ *
+ * Return:
+ * * false - not a KFENCE object
+ * * true - a KFENCE object, filled @kpp
+ *
+ * Copies information to @kpp for KFENCE objects.
+ */
+bool __kfence_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab);
+#endif
+
+#else /* CONFIG_KFENCE */
+
+static inline bool is_kfence_address(const void *addr) { return false; }
+static inline void kfence_alloc_pool(void) { }
+static inline void kfence_init(void) { }
+static inline void kfence_shutdown_cache(struct kmem_cache *s) { }
+static inline void *kfence_alloc(struct kmem_cache *s, size_t size, gfp_t flags) { return NULL; }
+static inline size_t kfence_ksize(const void *addr) { return 0; }
+static inline void *kfence_object_start(const void *addr) { return NULL; }
+static inline void __kfence_free(void *addr) { }
+static inline bool __must_check kfence_free(void *addr) { return false; }
+static inline bool __must_check kfence_handle_page_fault(unsigned long addr, bool is_write,
+ struct pt_regs *regs)
+{
+ return false;
+}
+
+#ifdef CONFIG_PRINTK
+struct kmem_obj_info;
+static inline bool __kfence_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab)
+{
+ return false;
+}
+#endif
+
+#endif
+
+#endif /* _LINUX_KFENCE_H */
diff --git a/include/linux/kfifo.h b/include/linux/kfifo.h
index 41eb6fdf87a8..0b35a41440ff 100644
--- a/include/linux/kfifo.h
+++ b/include/linux/kfifo.h
@@ -1,22 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* A generic kernel FIFO implementation
*
* Copyright (C) 2013 Stefani Seibold <stefani@seibold.net>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
*/
#ifndef _LINUX_KFIFO_H
@@ -41,11 +27,11 @@
*/
/*
- * Note about locking : There is no locking required until only * one reader
- * and one writer is using the fifo and no kfifo_reset() will be * called
- * kfifo_reset_out() can be safely used, until it will be only called
+ * Note about locking: There is no locking required until only one reader
+ * and one writer is using the fifo and no kfifo_reset() will be called.
+ * kfifo_reset_out() can be safely used, until it will be only called
* in the reader thread.
- * For multiple writer and one reader there is only a need to lock the writer.
+ * For multiple writer and one reader there is only a need to lock the writer.
* And vice versa for only one writer and multiple reader there is only a need
* to lock the reader.
*/
@@ -113,7 +99,8 @@ struct kfifo_rec_ptr_2 __STRUCT_KFIFO_PTR(unsigned char, 2, void);
* array is a part of the structure and the fifo type where the array is
* outside of the fifo structure.
*/
-#define __is_kfifo_ptr(fifo) (sizeof(*fifo) == sizeof(struct __kfifo))
+#define __is_kfifo_ptr(fifo) \
+ (sizeof(*fifo) == sizeof(STRUCT_KFIFO_PTR(typeof(*(fifo)->type))))
/**
* DECLARE_KFIFO_PTR - macro to declare a fifo pointer object
@@ -260,6 +247,37 @@ __kfifo_int_must_check_helper(int val)
})
/**
+ * kfifo_is_empty_spinlocked - returns true if the fifo is empty using
+ * a spinlock for locking
+ * @fifo: address of the fifo to be used
+ * @lock: spinlock to be used for locking
+ */
+#define kfifo_is_empty_spinlocked(fifo, lock) \
+({ \
+ unsigned long __flags; \
+ bool __ret; \
+ spin_lock_irqsave(lock, __flags); \
+ __ret = kfifo_is_empty(fifo); \
+ spin_unlock_irqrestore(lock, __flags); \
+ __ret; \
+})
+
+/**
+ * kfifo_is_empty_spinlocked_noirqsave - returns true if the fifo is empty
+ * using a spinlock for locking, doesn't disable interrupts
+ * @fifo: address of the fifo to be used
+ * @lock: spinlock to be used for locking
+ */
+#define kfifo_is_empty_spinlocked_noirqsave(fifo, lock) \
+({ \
+ bool __ret; \
+ spin_lock(lock); \
+ __ret = kfifo_is_empty(fifo); \
+ spin_unlock(lock); \
+ __ret; \
+})
+
+/**
* kfifo_is_full - returns true if the fifo is full
* @fifo: address of the fifo to be used
*/
@@ -325,7 +343,7 @@ __kfifo_uint_must_check_helper( \
*
* This macro dynamically allocates a new fifo buffer.
*
- * The numer of elements will be rounded-up to a power of 2.
+ * The number of elements will be rounded-up to a power of 2.
* The fifo will be release with kfifo_free().
* Return 0 if no error, otherwise an error code.
*/
@@ -358,9 +376,9 @@ __kfifo_int_must_check_helper( \
* @buffer: the preallocated buffer to be used
* @size: the size of the internal buffer, this have to be a power of 2
*
- * This macro initialize a fifo using a preallocated buffer.
+ * This macro initializes a fifo using a preallocated buffer.
*
- * The numer of elements will be rounded-up to a power of 2.
+ * The number of elements will be rounded-up to a power of 2.
* Return 0 if no error, otherwise an error code.
*/
#define kfifo_init(fifo, buffer, size) \
@@ -530,6 +548,26 @@ __kfifo_uint_must_check_helper( \
__ret; \
})
+/**
+ * kfifo_in_spinlocked_noirqsave - put data into fifo using a spinlock for
+ * locking, don't disable interrupts
+ * @fifo: address of the fifo to be used
+ * @buf: the data to be added
+ * @n: number of elements to be added
+ * @lock: pointer to the spinlock to use for locking
+ *
+ * This is a variant of kfifo_in_spinlocked() but uses spin_lock/unlock()
+ * for locking and doesn't disable interrupts.
+ */
+#define kfifo_in_spinlocked_noirqsave(fifo, buf, n, lock) \
+({ \
+ unsigned int __ret; \
+ spin_lock(lock); \
+ __ret = kfifo_in(fifo, buf, n); \
+ spin_unlock(lock); \
+ __ret; \
+})
+
/* alias for kfifo_in_spinlocked, will be removed in a future release */
#define kfifo_in_locked(fifo, buf, n, lock) \
kfifo_in_spinlocked(fifo, buf, n, lock)
@@ -582,6 +620,28 @@ __kfifo_uint_must_check_helper( \
}) \
)
+/**
+ * kfifo_out_spinlocked_noirqsave - get data from the fifo using a spinlock
+ * for locking, don't disable interrupts
+ * @fifo: address of the fifo to be used
+ * @buf: pointer to the storage buffer
+ * @n: max. number of elements to get
+ * @lock: pointer to the spinlock to use for locking
+ *
+ * This is a variant of kfifo_out_spinlocked() which uses spin_lock/unlock()
+ * for locking and doesn't disable interrupts.
+ */
+#define kfifo_out_spinlocked_noirqsave(fifo, buf, n, lock) \
+__kfifo_uint_must_check_helper( \
+({ \
+ unsigned int __ret; \
+ spin_lock(lock); \
+ __ret = kfifo_out(fifo, buf, n); \
+ spin_unlock(lock); \
+ __ret; \
+}) \
+)
+
/* alias for kfifo_out_spinlocked, will be removed in a future release */
#define kfifo_out_locked(fifo, buf, n, lock) \
kfifo_out_spinlocked(fifo, buf, n, lock)
@@ -628,7 +688,7 @@ __kfifo_uint_must_check_helper( \
* writer, you don't need extra locking to use these macro.
*/
#define kfifo_to_user(fifo, to, len, copied) \
-__kfifo_uint_must_check_helper( \
+__kfifo_int_must_check_helper( \
({ \
typeof((fifo) + 1) __tmp = (fifo); \
void __user *__to = (to); \
diff --git a/include/linux/kgdb.h b/include/linux/kgdb.h
index e465bb15912d..258cdde8d356 100644
--- a/include/linux/kgdb.h
+++ b/include/linux/kgdb.h
@@ -16,6 +16,7 @@
#include <linux/linkage.h>
#include <linux/init.h>
#include <linux/atomic.h>
+#include <linux/kprobes.h>
#ifdef CONFIG_HAVE_ARCH_KGDB
#include <asm/kgdb.h>
#endif
@@ -104,9 +105,9 @@ extern int dbg_set_reg(int regno, void *mem, struct pt_regs *regs);
*/
/**
- * kgdb_arch_init - Perform any architecture specific initalization.
+ * kgdb_arch_init - Perform any architecture specific initialization.
*
- * This function will handle the initalization of any architecture
+ * This function will handle the initialization of any architecture
* specific callbacks.
*/
extern int kgdb_arch_init(void);
@@ -177,22 +178,39 @@ kgdb_arch_handle_exception(int vector, int signo, int err_code,
struct pt_regs *regs);
/**
+ * kgdb_arch_handle_qxfer_pkt - Handle architecture specific GDB XML
+ * packets.
+ * @remcom_in_buffer: The buffer of the packet we have read.
+ * @remcom_out_buffer: The buffer of %BUFMAX bytes to write a packet into.
+ */
+
+extern void
+kgdb_arch_handle_qxfer_pkt(char *remcom_in_buffer,
+ char *remcom_out_buffer);
+
+/**
+ * kgdb_call_nmi_hook - Call kgdb_nmicallback() on the current CPU
+ * @ignored: This parameter is only here to match the prototype.
+ *
+ * If you're using the default implementation of kgdb_roundup_cpus()
+ * this function will be called per CPU. If you don't implement
+ * kgdb_call_nmi_hook() a default will be used.
+ */
+
+extern void kgdb_call_nmi_hook(void *ignored);
+
+/**
* kgdb_roundup_cpus - Get other CPUs into a holding pattern
- * @flags: Current IRQ state
*
* On SMP systems, we need to get the attention of the other CPUs
* and get them into a known state. This should do what is needed
* to get the other CPUs to call kgdb_wait(). Note that on some arches,
- * the NMI approach is not used for rounding up all the CPUs. For example,
- * in case of MIPS, smp_call_function() is used to roundup CPUs. In
- * this case, we have to make sure that interrupts are enabled before
- * calling smp_call_function(). The argument to this function is
- * the flags that will be used when restoring the interrupts. There is
- * local_irq_save() call before kgdb_roundup_cpus().
+ * the NMI approach is not used for rounding up all the CPUs. Normally
+ * those architectures can just not implement this and get the default.
*
* On non-SMP systems, this is not called.
*/
-extern void kgdb_roundup_cpus(unsigned long flags);
+extern void kgdb_roundup_cpus(void);
/**
* kgdb_arch_set_pc - Generic call back to the program counter
@@ -211,9 +229,9 @@ extern int kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt);
extern int kgdb_arch_remove_breakpoint(struct kgdb_bkpt *bpt);
/**
- * kgdb_arch_late - Perform any architecture specific initalization.
+ * kgdb_arch_late - Perform any architecture specific initialization.
*
- * This function will handle the late initalization of any
+ * This function will handle the late initialization of any
* architecture specific callbacks. This is an optional function for
* handling things like late initialization of hw breakpoints. The
* default implementation does nothing.
@@ -263,12 +281,14 @@ struct kgdb_arch {
* @write_char: Pointer to a function that will write one char.
* @flush: Pointer to a function that will flush any pending writes.
* @init: Pointer to a function that will initialize the device.
+ * @deinit: Pointer to a function that will deinit the device. Implies that
+ * this I/O driver is temporary and expects to be replaced. Called when
+ * an I/O driver is replaced or explicitly unregistered.
* @pre_exception: Pointer to a function that will do any prep work for
* the I/O driver.
* @post_exception: Pointer to a function that will do any cleanup work
* for the I/O driver.
- * @is_console: 1 if the end device is a console 0 if the I/O device is
- * not a console
+ * @cons: valid if the I/O device is a console; else NULL.
*/
struct kgdb_io {
const char *name;
@@ -276,12 +296,13 @@ struct kgdb_io {
void (*write_char) (u8);
void (*flush) (void);
int (*init) (void);
+ void (*deinit) (void);
void (*pre_exception) (void);
void (*post_exception) (void);
- int is_console;
+ struct console *cons;
};
-extern struct kgdb_arch arch_kgdb_ops;
+extern const struct kgdb_arch arch_kgdb_ops;
extern unsigned long kgdb_arch_pc(int exception, struct pt_regs *regs);
@@ -292,7 +313,7 @@ extern bool kgdb_nmi_poll_knock(void);
#else
static inline int kgdb_register_nmi_console(void) { return 0; }
static inline int kgdb_unregister_nmi_console(void) { return 0; }
-static inline bool kgdb_nmi_poll_knock(void) { return 1; }
+static inline bool kgdb_nmi_poll_knock(void) { return true; }
#endif
extern int kgdb_register_io_module(struct kgdb_io *local_kgdb_io_ops);
@@ -304,7 +325,7 @@ extern char *kgdb_mem2hex(char *mem, char *buf, int count);
extern int kgdb_hex2mem(char *buf, char *mem, int count);
extern int kgdb_isremovedbreak(unsigned long addr);
-extern void kgdb_schedule_breakpoint(void);
+extern int kgdb_has_hit_break(unsigned long addr);
extern int
kgdb_handle_exception(int ex_vector, int signo, int err_code,
@@ -314,14 +335,35 @@ extern int kgdb_nmicallin(int cpu, int trapnr, void *regs, int err_code,
atomic_t *snd_rdy);
extern void gdbstub_exit(int status);
+/*
+ * kgdb and kprobes both use the same (kprobe) blocklist (which makes sense
+ * given they are both typically hooked up to the same trap meaning on most
+ * architectures one cannot be used to debug the other)
+ *
+ * However on architectures where kprobes is not (yet) implemented we permit
+ * breakpoints everywhere rather than blocking everything by default.
+ */
+static inline bool kgdb_within_blocklist(unsigned long addr)
+{
+#ifdef CONFIG_KGDB_HONOUR_BLOCKLIST
+ return within_kprobe_blacklist(addr);
+#else
+ return false;
+#endif
+}
+
extern int kgdb_single_step;
extern atomic_t kgdb_active;
#define in_dbg_master() \
- (raw_smp_processor_id() == atomic_read(&kgdb_active))
+ (irqs_disabled() && (smp_processor_id() == atomic_read(&kgdb_active)))
extern bool dbg_is_early;
extern void __init dbg_late_init(void);
+extern void kgdb_panic(const char *msg);
+extern void kgdb_free_init_mem(void);
#else /* ! CONFIG_KGDB */
#define in_dbg_master() (0)
#define dbg_late_init()
+static inline void kgdb_panic(const char *msg) {}
+static inline void kgdb_free_init_mem(void) { }
#endif /* ! CONFIG_KGDB */
#endif /* _KGDB_H_ */
diff --git a/include/linux/khugepaged.h b/include/linux/khugepaged.h
index f0d7335336cd..f68865e19b0b 100644
--- a/include/linux/khugepaged.h
+++ b/include/linux/khugepaged.h
@@ -1,39 +1,36 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_KHUGEPAGED_H
#define _LINUX_KHUGEPAGED_H
#include <linux/sched/coredump.h> /* MMF_VM_HUGEPAGE */
-
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
extern struct attribute_group khugepaged_attr_group;
extern int khugepaged_init(void);
extern void khugepaged_destroy(void);
extern int start_stop_khugepaged(void);
-extern int __khugepaged_enter(struct mm_struct *mm);
+extern void __khugepaged_enter(struct mm_struct *mm);
extern void __khugepaged_exit(struct mm_struct *mm);
-extern int khugepaged_enter_vma_merge(struct vm_area_struct *vma,
- unsigned long vm_flags);
-
-#define khugepaged_enabled() \
- (transparent_hugepage_flags & \
- ((1<<TRANSPARENT_HUGEPAGE_FLAG) | \
- (1<<TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG)))
-#define khugepaged_always() \
- (transparent_hugepage_flags & \
- (1<<TRANSPARENT_HUGEPAGE_FLAG))
-#define khugepaged_req_madv() \
- (transparent_hugepage_flags & \
- (1<<TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG))
-#define khugepaged_defrag() \
- (transparent_hugepage_flags & \
- (1<<TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG))
+extern void khugepaged_enter_vma(struct vm_area_struct *vma,
+ unsigned long vm_flags);
+extern void khugepaged_min_free_kbytes_update(void);
+extern bool current_is_khugepaged(void);
+#ifdef CONFIG_SHMEM
+extern int collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr,
+ bool install_pmd);
+#else
+static inline int collapse_pte_mapped_thp(struct mm_struct *mm,
+ unsigned long addr, bool install_pmd)
+{
+ return 0;
+}
+#endif
-static inline int khugepaged_fork(struct mm_struct *mm, struct mm_struct *oldmm)
+static inline void khugepaged_fork(struct mm_struct *mm, struct mm_struct *oldmm)
{
if (test_bit(MMF_VM_HUGEPAGE, &oldmm->flags))
- return __khugepaged_enter(mm);
- return 0;
+ __khugepaged_enter(mm);
}
static inline void khugepaged_exit(struct mm_struct *mm)
@@ -41,37 +38,31 @@ static inline void khugepaged_exit(struct mm_struct *mm)
if (test_bit(MMF_VM_HUGEPAGE, &mm->flags))
__khugepaged_exit(mm);
}
-
-static inline int khugepaged_enter(struct vm_area_struct *vma,
- unsigned long vm_flags)
-{
- if (!test_bit(MMF_VM_HUGEPAGE, &vma->vm_mm->flags))
- if ((khugepaged_always() ||
- (khugepaged_req_madv() && (vm_flags & VM_HUGEPAGE))) &&
- !(vm_flags & VM_NOHUGEPAGE) &&
- !test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags))
- if (__khugepaged_enter(vma->vm_mm))
- return -ENOMEM;
- return 0;
-}
#else /* CONFIG_TRANSPARENT_HUGEPAGE */
-static inline int khugepaged_fork(struct mm_struct *mm, struct mm_struct *oldmm)
+static inline void khugepaged_fork(struct mm_struct *mm, struct mm_struct *oldmm)
{
- return 0;
}
static inline void khugepaged_exit(struct mm_struct *mm)
{
}
-static inline int khugepaged_enter(struct vm_area_struct *vma,
- unsigned long vm_flags)
+static inline void khugepaged_enter_vma(struct vm_area_struct *vma,
+ unsigned long vm_flags)
{
- return 0;
}
-static inline int khugepaged_enter_vma_merge(struct vm_area_struct *vma,
- unsigned long vm_flags)
+static inline int collapse_pte_mapped_thp(struct mm_struct *mm,
+ unsigned long addr, bool install_pmd)
{
return 0;
}
+
+static inline void khugepaged_min_free_kbytes_update(void)
+{
+}
+
+static inline bool current_is_khugepaged(void)
+{
+ return false;
+}
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
#endif /* _LINUX_KHUGEPAGED_H */
diff --git a/include/linux/klist.h b/include/linux/klist.h
index 953f283f8451..b0f238f20dbb 100644
--- a/include/linux/klist.h
+++ b/include/linux/klist.h
@@ -1,12 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* klist.h - Some generic list helpers, extending struct list_head a bit.
*
* Implementations are found in lib/klist.c
*
- *
* Copyright (C) 2005 Patrick Mochel
- *
- * This file is rleased under the GPL v2.
*/
#ifndef _LINUX_KLIST_H
diff --git a/include/linux/kmemcheck.h b/include/linux/kmemcheck.h
deleted file mode 100644
index 39f8453239f7..000000000000
--- a/include/linux/kmemcheck.h
+++ /dev/null
@@ -1,171 +0,0 @@
-#ifndef LINUX_KMEMCHECK_H
-#define LINUX_KMEMCHECK_H
-
-#include <linux/mm_types.h>
-#include <linux/types.h>
-
-#ifdef CONFIG_KMEMCHECK
-extern int kmemcheck_enabled;
-
-/* The slab-related functions. */
-void kmemcheck_alloc_shadow(struct page *page, int order, gfp_t flags, int node);
-void kmemcheck_free_shadow(struct page *page, int order);
-void kmemcheck_slab_alloc(struct kmem_cache *s, gfp_t gfpflags, void *object,
- size_t size);
-void kmemcheck_slab_free(struct kmem_cache *s, void *object, size_t size);
-
-void kmemcheck_pagealloc_alloc(struct page *p, unsigned int order,
- gfp_t gfpflags);
-
-void kmemcheck_show_pages(struct page *p, unsigned int n);
-void kmemcheck_hide_pages(struct page *p, unsigned int n);
-
-bool kmemcheck_page_is_tracked(struct page *p);
-
-void kmemcheck_mark_unallocated(void *address, unsigned int n);
-void kmemcheck_mark_uninitialized(void *address, unsigned int n);
-void kmemcheck_mark_initialized(void *address, unsigned int n);
-void kmemcheck_mark_freed(void *address, unsigned int n);
-
-void kmemcheck_mark_unallocated_pages(struct page *p, unsigned int n);
-void kmemcheck_mark_uninitialized_pages(struct page *p, unsigned int n);
-void kmemcheck_mark_initialized_pages(struct page *p, unsigned int n);
-
-int kmemcheck_show_addr(unsigned long address);
-int kmemcheck_hide_addr(unsigned long address);
-
-bool kmemcheck_is_obj_initialized(unsigned long addr, size_t size);
-
-/*
- * Bitfield annotations
- *
- * How to use: If you have a struct using bitfields, for example
- *
- * struct a {
- * int x:8, y:8;
- * };
- *
- * then this should be rewritten as
- *
- * struct a {
- * kmemcheck_bitfield_begin(flags);
- * int x:8, y:8;
- * kmemcheck_bitfield_end(flags);
- * };
- *
- * Now the "flags_begin" and "flags_end" members may be used to refer to the
- * beginning and end, respectively, of the bitfield (and things like
- * &x.flags_begin is allowed). As soon as the struct is allocated, the bit-
- * fields should be annotated:
- *
- * struct a *a = kmalloc(sizeof(struct a), GFP_KERNEL);
- * kmemcheck_annotate_bitfield(a, flags);
- */
-#define kmemcheck_bitfield_begin(name) \
- int name##_begin[0];
-
-#define kmemcheck_bitfield_end(name) \
- int name##_end[0];
-
-#define kmemcheck_annotate_bitfield(ptr, name) \
- do { \
- int _n; \
- \
- if (!ptr) \
- break; \
- \
- _n = (long) &((ptr)->name##_end) \
- - (long) &((ptr)->name##_begin); \
- BUILD_BUG_ON(_n < 0); \
- \
- kmemcheck_mark_initialized(&((ptr)->name##_begin), _n); \
- } while (0)
-
-#define kmemcheck_annotate_variable(var) \
- do { \
- kmemcheck_mark_initialized(&(var), sizeof(var)); \
- } while (0) \
-
-#else
-#define kmemcheck_enabled 0
-
-static inline void
-kmemcheck_alloc_shadow(struct page *page, int order, gfp_t flags, int node)
-{
-}
-
-static inline void
-kmemcheck_free_shadow(struct page *page, int order)
-{
-}
-
-static inline void
-kmemcheck_slab_alloc(struct kmem_cache *s, gfp_t gfpflags, void *object,
- size_t size)
-{
-}
-
-static inline void kmemcheck_slab_free(struct kmem_cache *s, void *object,
- size_t size)
-{
-}
-
-static inline void kmemcheck_pagealloc_alloc(struct page *p,
- unsigned int order, gfp_t gfpflags)
-{
-}
-
-static inline bool kmemcheck_page_is_tracked(struct page *p)
-{
- return false;
-}
-
-static inline void kmemcheck_mark_unallocated(void *address, unsigned int n)
-{
-}
-
-static inline void kmemcheck_mark_uninitialized(void *address, unsigned int n)
-{
-}
-
-static inline void kmemcheck_mark_initialized(void *address, unsigned int n)
-{
-}
-
-static inline void kmemcheck_mark_freed(void *address, unsigned int n)
-{
-}
-
-static inline void kmemcheck_mark_unallocated_pages(struct page *p,
- unsigned int n)
-{
-}
-
-static inline void kmemcheck_mark_uninitialized_pages(struct page *p,
- unsigned int n)
-{
-}
-
-static inline void kmemcheck_mark_initialized_pages(struct page *p,
- unsigned int n)
-{
-}
-
-static inline bool kmemcheck_is_obj_initialized(unsigned long addr, size_t size)
-{
- return true;
-}
-
-#define kmemcheck_bitfield_begin(name)
-#define kmemcheck_bitfield_end(name)
-#define kmemcheck_annotate_bitfield(ptr, name) \
- do { \
- } while (0)
-
-#define kmemcheck_annotate_variable(var) \
- do { \
- } while (0)
-
-#endif /* CONFIG_KMEMCHECK */
-
-#endif /* LINUX_KMEMCHECK_H */
diff --git a/include/linux/kmemleak.h b/include/linux/kmemleak.h
index 590343f6c1b1..6a3cd1bf4680 100644
--- a/include/linux/kmemleak.h
+++ b/include/linux/kmemleak.h
@@ -1,21 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* include/linux/kmemleak.h
*
* Copyright (C) 2008 ARM Limited
* Written by Catalin Marinas <catalin.marinas@arm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#ifndef __KMEMLEAK_H
@@ -41,21 +29,20 @@ extern void kmemleak_not_leak(const void *ptr) __ref;
extern void kmemleak_ignore(const void *ptr) __ref;
extern void kmemleak_scan_area(const void *ptr, size_t size, gfp_t gfp) __ref;
extern void kmemleak_no_scan(const void *ptr) __ref;
-extern void kmemleak_alloc_phys(phys_addr_t phys, size_t size, int min_count,
+extern void kmemleak_alloc_phys(phys_addr_t phys, size_t size,
gfp_t gfp) __ref;
extern void kmemleak_free_part_phys(phys_addr_t phys, size_t size) __ref;
-extern void kmemleak_not_leak_phys(phys_addr_t phys) __ref;
extern void kmemleak_ignore_phys(phys_addr_t phys) __ref;
static inline void kmemleak_alloc_recursive(const void *ptr, size_t size,
- int min_count, unsigned long flags,
+ int min_count, slab_flags_t flags,
gfp_t gfp)
{
if (!(flags & SLAB_NOLEAKTRACE))
kmemleak_alloc(ptr, size, min_count, gfp);
}
-static inline void kmemleak_free_recursive(const void *ptr, unsigned long flags)
+static inline void kmemleak_free_recursive(const void *ptr, slab_flags_t flags)
{
if (!(flags & SLAB_NOLEAKTRACE))
kmemleak_free(ptr);
@@ -76,7 +63,7 @@ static inline void kmemleak_alloc(const void *ptr, size_t size, int min_count,
{
}
static inline void kmemleak_alloc_recursive(const void *ptr, size_t size,
- int min_count, unsigned long flags,
+ int min_count, slab_flags_t flags,
gfp_t gfp)
{
}
@@ -94,7 +81,7 @@ static inline void kmemleak_free(const void *ptr)
static inline void kmemleak_free_part(const void *ptr, size_t size)
{
}
-static inline void kmemleak_free_recursive(const void *ptr, unsigned long flags)
+static inline void kmemleak_free_recursive(const void *ptr, slab_flags_t flags)
{
}
static inline void kmemleak_free_percpu(const void __percpu *ptr)
@@ -119,15 +106,12 @@ static inline void kmemleak_no_scan(const void *ptr)
{
}
static inline void kmemleak_alloc_phys(phys_addr_t phys, size_t size,
- int min_count, gfp_t gfp)
+ gfp_t gfp)
{
}
static inline void kmemleak_free_part_phys(phys_addr_t phys, size_t size)
{
}
-static inline void kmemleak_not_leak_phys(phys_addr_t phys)
-{
-}
static inline void kmemleak_ignore_phys(phys_addr_t phys)
{
}
diff --git a/include/linux/kmod.h b/include/linux/kmod.h
index c4e441e00db5..68f69362d427 100644
--- a/include/linux/kmod.h
+++ b/include/linux/kmod.h
@@ -1,24 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
#ifndef __LINUX_KMOD_H__
#define __LINUX_KMOD_H__
/*
* include/linux/kmod.h
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
+#include <linux/umh.h>
#include <linux/gfp.h>
#include <linux/stddef.h>
#include <linux/errno.h>
@@ -44,63 +32,4 @@ static inline int request_module_nowait(const char *name, ...) { return -ENOSYS;
#define try_then_request_module(x, mod...) (x)
#endif
-
-struct cred;
-struct file;
-
-#define UMH_NO_WAIT 0 /* don't wait at all */
-#define UMH_WAIT_EXEC 1 /* wait for the exec, but not the process */
-#define UMH_WAIT_PROC 2 /* wait for the process to complete */
-#define UMH_KILLABLE 4 /* wait for EXEC/PROC killable */
-
-struct subprocess_info {
- struct work_struct work;
- struct completion *complete;
- const char *path;
- char **argv;
- char **envp;
- int wait;
- int retval;
- int (*init)(struct subprocess_info *info, struct cred *new);
- void (*cleanup)(struct subprocess_info *info);
- void *data;
-};
-
-extern int
-call_usermodehelper(const char *path, char **argv, char **envp, int wait);
-
-extern struct subprocess_info *
-call_usermodehelper_setup(const char *path, char **argv, char **envp,
- gfp_t gfp_mask,
- int (*init)(struct subprocess_info *info, struct cred *new),
- void (*cleanup)(struct subprocess_info *), void *data);
-
-extern int
-call_usermodehelper_exec(struct subprocess_info *info, int wait);
-
-extern struct ctl_table usermodehelper_table[];
-
-enum umh_disable_depth {
- UMH_ENABLED = 0,
- UMH_FREEZING,
- UMH_DISABLED,
-};
-
-extern int __usermodehelper_disable(enum umh_disable_depth depth);
-extern void __usermodehelper_set_disable_depth(enum umh_disable_depth depth);
-
-static inline int usermodehelper_disable(void)
-{
- return __usermodehelper_disable(UMH_DISABLED);
-}
-
-static inline void usermodehelper_enable(void)
-{
- __usermodehelper_set_disable_depth(UMH_ENABLED);
-}
-
-extern int usermodehelper_read_trylock(void);
-extern long usermodehelper_read_lock_wait(long timeout);
-extern void usermodehelper_read_unlock(void);
-
#endif /* __LINUX_KMOD_H__ */
diff --git a/include/linux/kmsan-checks.h b/include/linux/kmsan-checks.h
new file mode 100644
index 000000000000..c4cae333deec
--- /dev/null
+++ b/include/linux/kmsan-checks.h
@@ -0,0 +1,83 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * KMSAN checks to be used for one-off annotations in subsystems.
+ *
+ * Copyright (C) 2017-2022 Google LLC
+ * Author: Alexander Potapenko <glider@google.com>
+ *
+ */
+
+#ifndef _LINUX_KMSAN_CHECKS_H
+#define _LINUX_KMSAN_CHECKS_H
+
+#include <linux/types.h>
+
+#ifdef CONFIG_KMSAN
+
+/**
+ * kmsan_poison_memory() - Mark the memory range as uninitialized.
+ * @address: address to start with.
+ * @size: size of buffer to poison.
+ * @flags: GFP flags for allocations done by this function.
+ *
+ * Until other data is written to this range, KMSAN will treat it as
+ * uninitialized. Error reports for this memory will reference the call site of
+ * kmsan_poison_memory() as origin.
+ */
+void kmsan_poison_memory(const void *address, size_t size, gfp_t flags);
+
+/**
+ * kmsan_unpoison_memory() - Mark the memory range as initialized.
+ * @address: address to start with.
+ * @size: size of buffer to unpoison.
+ *
+ * Until other data is written to this range, KMSAN will treat it as
+ * initialized.
+ */
+void kmsan_unpoison_memory(const void *address, size_t size);
+
+/**
+ * kmsan_check_memory() - Check the memory range for being initialized.
+ * @address: address to start with.
+ * @size: size of buffer to check.
+ *
+ * If any piece of the given range is marked as uninitialized, KMSAN will report
+ * an error.
+ */
+void kmsan_check_memory(const void *address, size_t size);
+
+/**
+ * kmsan_copy_to_user() - Notify KMSAN about a data transfer to userspace.
+ * @to: destination address in the userspace.
+ * @from: source address in the kernel.
+ * @to_copy: number of bytes to copy.
+ * @left: number of bytes not copied.
+ *
+ * If this is a real userspace data transfer, KMSAN checks the bytes that were
+ * actually copied to ensure there was no information leak. If @to belongs to
+ * the kernel space (which is possible for compat syscalls), KMSAN just copies
+ * the metadata.
+ */
+void kmsan_copy_to_user(void __user *to, const void *from, size_t to_copy,
+ size_t left);
+
+#else
+
+static inline void kmsan_poison_memory(const void *address, size_t size,
+ gfp_t flags)
+{
+}
+static inline void kmsan_unpoison_memory(const void *address, size_t size)
+{
+}
+static inline void kmsan_check_memory(const void *address, size_t size)
+{
+}
+static inline void kmsan_copy_to_user(void __user *to, const void *from,
+ size_t to_copy, size_t left)
+{
+}
+
+#endif
+
+#endif /* _LINUX_KMSAN_CHECKS_H */
diff --git a/include/linux/kmsan.h b/include/linux/kmsan.h
new file mode 100644
index 000000000000..e0c23a32cdf0
--- /dev/null
+++ b/include/linux/kmsan.h
@@ -0,0 +1,334 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * KMSAN API for subsystems.
+ *
+ * Copyright (C) 2017-2022 Google LLC
+ * Author: Alexander Potapenko <glider@google.com>
+ *
+ */
+#ifndef _LINUX_KMSAN_H
+#define _LINUX_KMSAN_H
+
+#include <linux/dma-direction.h>
+#include <linux/gfp.h>
+#include <linux/kmsan-checks.h>
+#include <linux/types.h>
+
+struct page;
+struct kmem_cache;
+struct task_struct;
+struct scatterlist;
+struct urb;
+
+#ifdef CONFIG_KMSAN
+
+/**
+ * kmsan_task_create() - Initialize KMSAN state for the task.
+ * @task: task to initialize.
+ */
+void kmsan_task_create(struct task_struct *task);
+
+/**
+ * kmsan_task_exit() - Notify KMSAN that a task has exited.
+ * @task: task about to finish.
+ */
+void kmsan_task_exit(struct task_struct *task);
+
+/**
+ * kmsan_init_shadow() - Initialize KMSAN shadow at boot time.
+ *
+ * Allocate and initialize KMSAN metadata for early allocations.
+ */
+void __init kmsan_init_shadow(void);
+
+/**
+ * kmsan_init_runtime() - Initialize KMSAN state and enable KMSAN.
+ */
+void __init kmsan_init_runtime(void);
+
+/**
+ * kmsan_memblock_free_pages() - handle freeing of memblock pages.
+ * @page: struct page to free.
+ * @order: order of @page.
+ *
+ * Freed pages are either returned to buddy allocator or held back to be used
+ * as metadata pages.
+ */
+bool __init __must_check kmsan_memblock_free_pages(struct page *page,
+ unsigned int order);
+
+/**
+ * kmsan_alloc_page() - Notify KMSAN about an alloc_pages() call.
+ * @page: struct page pointer returned by alloc_pages().
+ * @order: order of allocated struct page.
+ * @flags: GFP flags used by alloc_pages()
+ *
+ * KMSAN marks 1<<@order pages starting at @page as uninitialized, unless
+ * @flags contain __GFP_ZERO.
+ */
+void kmsan_alloc_page(struct page *page, unsigned int order, gfp_t flags);
+
+/**
+ * kmsan_free_page() - Notify KMSAN about a free_pages() call.
+ * @page: struct page pointer passed to free_pages().
+ * @order: order of deallocated struct page.
+ *
+ * KMSAN marks freed memory as uninitialized.
+ */
+void kmsan_free_page(struct page *page, unsigned int order);
+
+/**
+ * kmsan_copy_page_meta() - Copy KMSAN metadata between two pages.
+ * @dst: destination page.
+ * @src: source page.
+ *
+ * KMSAN copies the contents of metadata pages for @src into the metadata pages
+ * for @dst. If @dst has no associated metadata pages, nothing happens.
+ * If @src has no associated metadata pages, @dst metadata pages are unpoisoned.
+ */
+void kmsan_copy_page_meta(struct page *dst, struct page *src);
+
+/**
+ * kmsan_slab_alloc() - Notify KMSAN about a slab allocation.
+ * @s: slab cache the object belongs to.
+ * @object: object pointer.
+ * @flags: GFP flags passed to the allocator.
+ *
+ * Depending on cache flags and GFP flags, KMSAN sets up the metadata of the
+ * newly created object, marking it as initialized or uninitialized.
+ */
+void kmsan_slab_alloc(struct kmem_cache *s, void *object, gfp_t flags);
+
+/**
+ * kmsan_slab_free() - Notify KMSAN about a slab deallocation.
+ * @s: slab cache the object belongs to.
+ * @object: object pointer.
+ *
+ * KMSAN marks the freed object as uninitialized.
+ */
+void kmsan_slab_free(struct kmem_cache *s, void *object);
+
+/**
+ * kmsan_kmalloc_large() - Notify KMSAN about a large slab allocation.
+ * @ptr: object pointer.
+ * @size: object size.
+ * @flags: GFP flags passed to the allocator.
+ *
+ * Similar to kmsan_slab_alloc(), but for large allocations.
+ */
+void kmsan_kmalloc_large(const void *ptr, size_t size, gfp_t flags);
+
+/**
+ * kmsan_kfree_large() - Notify KMSAN about a large slab deallocation.
+ * @ptr: object pointer.
+ *
+ * Similar to kmsan_slab_free(), but for large allocations.
+ */
+void kmsan_kfree_large(const void *ptr);
+
+/**
+ * kmsan_map_kernel_range_noflush() - Notify KMSAN about a vmap.
+ * @start: start of vmapped range.
+ * @end: end of vmapped range.
+ * @prot: page protection flags used for vmap.
+ * @pages: array of pages.
+ * @page_shift: page_shift passed to vmap_range_noflush().
+ *
+ * KMSAN maps shadow and origin pages of @pages into contiguous ranges in
+ * vmalloc metadata address range. Returns 0 on success, callers must check
+ * for non-zero return value.
+ */
+int __must_check kmsan_vmap_pages_range_noflush(unsigned long start,
+ unsigned long end,
+ pgprot_t prot,
+ struct page **pages,
+ unsigned int page_shift);
+
+/**
+ * kmsan_vunmap_kernel_range_noflush() - Notify KMSAN about a vunmap.
+ * @start: start of vunmapped range.
+ * @end: end of vunmapped range.
+ *
+ * KMSAN unmaps the contiguous metadata ranges created by
+ * kmsan_map_kernel_range_noflush().
+ */
+void kmsan_vunmap_range_noflush(unsigned long start, unsigned long end);
+
+/**
+ * kmsan_ioremap_page_range() - Notify KMSAN about a ioremap_page_range() call.
+ * @addr: range start.
+ * @end: range end.
+ * @phys_addr: physical range start.
+ * @prot: page protection flags used for ioremap_page_range().
+ * @page_shift: page_shift argument passed to vmap_range_noflush().
+ *
+ * KMSAN creates new metadata pages for the physical pages mapped into the
+ * virtual memory. Returns 0 on success, callers must check for non-zero return
+ * value.
+ */
+int __must_check kmsan_ioremap_page_range(unsigned long addr, unsigned long end,
+ phys_addr_t phys_addr, pgprot_t prot,
+ unsigned int page_shift);
+
+/**
+ * kmsan_iounmap_page_range() - Notify KMSAN about a iounmap_page_range() call.
+ * @start: range start.
+ * @end: range end.
+ *
+ * KMSAN unmaps the metadata pages for the given range and, unlike for
+ * vunmap_page_range(), also deallocates them.
+ */
+void kmsan_iounmap_page_range(unsigned long start, unsigned long end);
+
+/**
+ * kmsan_handle_dma() - Handle a DMA data transfer.
+ * @page: first page of the buffer.
+ * @offset: offset of the buffer within the first page.
+ * @size: buffer size.
+ * @dir: one of possible dma_data_direction values.
+ *
+ * Depending on @direction, KMSAN:
+ * * checks the buffer, if it is copied to device;
+ * * initializes the buffer, if it is copied from device;
+ * * does both, if this is a DMA_BIDIRECTIONAL transfer.
+ */
+void kmsan_handle_dma(struct page *page, size_t offset, size_t size,
+ enum dma_data_direction dir);
+
+/**
+ * kmsan_handle_dma_sg() - Handle a DMA transfer using scatterlist.
+ * @sg: scatterlist holding DMA buffers.
+ * @nents: number of scatterlist entries.
+ * @dir: one of possible dma_data_direction values.
+ *
+ * Depending on @direction, KMSAN:
+ * * checks the buffers in the scatterlist, if they are copied to device;
+ * * initializes the buffers, if they are copied from device;
+ * * does both, if this is a DMA_BIDIRECTIONAL transfer.
+ */
+void kmsan_handle_dma_sg(struct scatterlist *sg, int nents,
+ enum dma_data_direction dir);
+
+/**
+ * kmsan_handle_urb() - Handle a USB data transfer.
+ * @urb: struct urb pointer.
+ * @is_out: data transfer direction (true means output to hardware).
+ *
+ * If @is_out is true, KMSAN checks the transfer buffer of @urb. Otherwise,
+ * KMSAN initializes the transfer buffer.
+ */
+void kmsan_handle_urb(const struct urb *urb, bool is_out);
+
+/**
+ * kmsan_unpoison_entry_regs() - Handle pt_regs in low-level entry code.
+ * @regs: struct pt_regs pointer received from assembly code.
+ *
+ * KMSAN unpoisons the contents of the passed pt_regs, preventing potential
+ * false positive reports. Unlike kmsan_unpoison_memory(),
+ * kmsan_unpoison_entry_regs() can be called from the regions where
+ * kmsan_in_runtime() returns true, which is the case in early entry code.
+ */
+void kmsan_unpoison_entry_regs(const struct pt_regs *regs);
+
+#else
+
+static inline void kmsan_init_shadow(void)
+{
+}
+
+static inline void kmsan_init_runtime(void)
+{
+}
+
+static inline bool __must_check kmsan_memblock_free_pages(struct page *page,
+ unsigned int order)
+{
+ return true;
+}
+
+static inline void kmsan_task_create(struct task_struct *task)
+{
+}
+
+static inline void kmsan_task_exit(struct task_struct *task)
+{
+}
+
+static inline void kmsan_alloc_page(struct page *page, unsigned int order,
+ gfp_t flags)
+{
+}
+
+static inline void kmsan_free_page(struct page *page, unsigned int order)
+{
+}
+
+static inline void kmsan_copy_page_meta(struct page *dst, struct page *src)
+{
+}
+
+static inline void kmsan_slab_alloc(struct kmem_cache *s, void *object,
+ gfp_t flags)
+{
+}
+
+static inline void kmsan_slab_free(struct kmem_cache *s, void *object)
+{
+}
+
+static inline void kmsan_kmalloc_large(const void *ptr, size_t size,
+ gfp_t flags)
+{
+}
+
+static inline void kmsan_kfree_large(const void *ptr)
+{
+}
+
+static inline int __must_check kmsan_vmap_pages_range_noflush(
+ unsigned long start, unsigned long end, pgprot_t prot,
+ struct page **pages, unsigned int page_shift)
+{
+ return 0;
+}
+
+static inline void kmsan_vunmap_range_noflush(unsigned long start,
+ unsigned long end)
+{
+}
+
+static inline int __must_check kmsan_ioremap_page_range(unsigned long start,
+ unsigned long end,
+ phys_addr_t phys_addr,
+ pgprot_t prot,
+ unsigned int page_shift)
+{
+ return 0;
+}
+
+static inline void kmsan_iounmap_page_range(unsigned long start,
+ unsigned long end)
+{
+}
+
+static inline void kmsan_handle_dma(struct page *page, size_t offset,
+ size_t size, enum dma_data_direction dir)
+{
+}
+
+static inline void kmsan_handle_dma_sg(struct scatterlist *sg, int nents,
+ enum dma_data_direction dir)
+{
+}
+
+static inline void kmsan_handle_urb(const struct urb *urb, bool is_out)
+{
+}
+
+static inline void kmsan_unpoison_entry_regs(const struct pt_regs *regs)
+{
+}
+
+#endif
+
+#endif /* _LINUX_KMSAN_H */
diff --git a/include/linux/kmsan_string.h b/include/linux/kmsan_string.h
new file mode 100644
index 000000000000..7287da6f52ef
--- /dev/null
+++ b/include/linux/kmsan_string.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * KMSAN string functions API used in other headers.
+ *
+ * Copyright (C) 2022 Google LLC
+ * Author: Alexander Potapenko <glider@google.com>
+ *
+ */
+#ifndef _LINUX_KMSAN_STRING_H
+#define _LINUX_KMSAN_STRING_H
+
+/*
+ * KMSAN overrides the default memcpy/memset/memmove implementations in the
+ * kernel, which requires having __msan_XXX function prototypes in several other
+ * headers. Keep them in one place instead of open-coding.
+ */
+void *__msan_memcpy(void *dst, const void *src, size_t size);
+void *__msan_memset(void *s, int c, size_t n);
+void *__msan_memmove(void *dest, const void *src, size_t len);
+
+#endif /* _LINUX_KMSAN_STRING_H */
diff --git a/include/linux/kmsan_types.h b/include/linux/kmsan_types.h
new file mode 100644
index 000000000000..8bfa6c98176d
--- /dev/null
+++ b/include/linux/kmsan_types.h
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * A minimal header declaring types added by KMSAN to existing kernel structs.
+ *
+ * Copyright (C) 2017-2022 Google LLC
+ * Author: Alexander Potapenko <glider@google.com>
+ *
+ */
+#ifndef _LINUX_KMSAN_TYPES_H
+#define _LINUX_KMSAN_TYPES_H
+
+/* These constants are defined in the MSan LLVM instrumentation pass. */
+#define KMSAN_RETVAL_SIZE 800
+#define KMSAN_PARAM_SIZE 800
+
+struct kmsan_context_state {
+ char param_tls[KMSAN_PARAM_SIZE];
+ char retval_tls[KMSAN_RETVAL_SIZE];
+ char va_arg_tls[KMSAN_PARAM_SIZE];
+ char va_arg_origin_tls[KMSAN_PARAM_SIZE];
+ u64 va_arg_overflow_size_tls;
+ char param_origin_tls[KMSAN_PARAM_SIZE];
+ u32 retval_origin_tls;
+};
+
+#undef KMSAN_PARAM_SIZE
+#undef KMSAN_RETVAL_SIZE
+
+struct kmsan_ctx {
+ struct kmsan_context_state cstate;
+ int kmsan_in_runtime;
+ bool allow_reporting;
+};
+
+#endif /* _LINUX_KMSAN_TYPES_H */
diff --git a/include/linux/kmsg_dump.h b/include/linux/kmsg_dump.h
index 2e7a1e032c71..906521c2329c 100644
--- a/include/linux/kmsg_dump.h
+++ b/include/linux/kmsg_dump.h
@@ -25,9 +25,18 @@ enum kmsg_dump_reason {
KMSG_DUMP_PANIC,
KMSG_DUMP_OOPS,
KMSG_DUMP_EMERG,
- KMSG_DUMP_RESTART,
- KMSG_DUMP_HALT,
- KMSG_DUMP_POWEROFF,
+ KMSG_DUMP_SHUTDOWN,
+ KMSG_DUMP_MAX
+};
+
+/**
+ * struct kmsg_dump_iter - iterator for retrieving kernel messages
+ * @cur_seq: Points to the oldest message to dump
+ * @next_seq: Points after the newest message to dump
+ */
+struct kmsg_dump_iter {
+ u64 cur_seq;
+ u64 next_seq;
};
/**
@@ -42,64 +51,43 @@ struct kmsg_dumper {
struct list_head list;
void (*dump)(struct kmsg_dumper *dumper, enum kmsg_dump_reason reason);
enum kmsg_dump_reason max_reason;
- bool active;
bool registered;
-
- /* private state of the kmsg iterator */
- u32 cur_idx;
- u32 next_idx;
- u64 cur_seq;
- u64 next_seq;
};
#ifdef CONFIG_PRINTK
void kmsg_dump(enum kmsg_dump_reason reason);
-bool kmsg_dump_get_line_nolock(struct kmsg_dumper *dumper, bool syslog,
- char *line, size_t size, size_t *len);
-
-bool kmsg_dump_get_line(struct kmsg_dumper *dumper, bool syslog,
+bool kmsg_dump_get_line(struct kmsg_dump_iter *iter, bool syslog,
char *line, size_t size, size_t *len);
-bool kmsg_dump_get_buffer(struct kmsg_dumper *dumper, bool syslog,
- char *buf, size_t size, size_t *len);
-
-void kmsg_dump_rewind_nolock(struct kmsg_dumper *dumper);
+bool kmsg_dump_get_buffer(struct kmsg_dump_iter *iter, bool syslog,
+ char *buf, size_t size, size_t *len_out);
-void kmsg_dump_rewind(struct kmsg_dumper *dumper);
+void kmsg_dump_rewind(struct kmsg_dump_iter *iter);
int kmsg_dump_register(struct kmsg_dumper *dumper);
int kmsg_dump_unregister(struct kmsg_dumper *dumper);
+
+const char *kmsg_dump_reason_str(enum kmsg_dump_reason reason);
#else
static inline void kmsg_dump(enum kmsg_dump_reason reason)
{
}
-static inline bool kmsg_dump_get_line_nolock(struct kmsg_dumper *dumper,
- bool syslog, const char *line,
- size_t size, size_t *len)
-{
- return false;
-}
-
-static inline bool kmsg_dump_get_line(struct kmsg_dumper *dumper, bool syslog,
+static inline bool kmsg_dump_get_line(struct kmsg_dump_iter *iter, bool syslog,
const char *line, size_t size, size_t *len)
{
return false;
}
-static inline bool kmsg_dump_get_buffer(struct kmsg_dumper *dumper, bool syslog,
+static inline bool kmsg_dump_get_buffer(struct kmsg_dump_iter *iter, bool syslog,
char *buf, size_t size, size_t *len)
{
return false;
}
-static inline void kmsg_dump_rewind_nolock(struct kmsg_dumper *dumper)
-{
-}
-
-static inline void kmsg_dump_rewind(struct kmsg_dumper *dumper)
+static inline void kmsg_dump_rewind(struct kmsg_dump_iter *iter)
{
}
@@ -112,6 +100,11 @@ static inline int kmsg_dump_unregister(struct kmsg_dumper *dumper)
{
return -EINVAL;
}
+
+static inline const char *kmsg_dump_reason_str(enum kmsg_dump_reason reason)
+{
+ return "Disabled";
+}
#endif
#endif /* _LINUX_KMSG_DUMP_H */
diff --git a/include/linux/kobj_map.h b/include/linux/kobj_map.h
index 18ca75ffcc5a..c9919f8b2293 100644
--- a/include/linux/kobj_map.h
+++ b/include/linux/kobj_map.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* kobj_map.h
*/
diff --git a/include/linux/kobject.h b/include/linux/kobject.h
index eeab34b0f589..c392c811d9ad 100644
--- a/include/linux/kobject.h
+++ b/include/linux/kobject.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* kobject.h - generic kernel object infrastructure.
*
@@ -6,9 +7,7 @@
* Copyright (c) 2006-2008 Greg Kroah-Hartman <greg@kroah.com>
* Copyright (c) 2006-2008 Novell Inc.
*
- * This file is released under the GPLv2.
- *
- * Please read Documentation/kobject.txt before using the kobject
+ * Please read Documentation/core-api/kobject.rst before using the kobject
* interface, ESPECIALLY the parts about reference counts and object
* destructors.
*/
@@ -20,16 +19,17 @@
#include <linux/list.h>
#include <linux/sysfs.h>
#include <linux/compiler.h>
+#include <linux/container_of.h>
#include <linux/spinlock.h>
#include <linux/kref.h>
#include <linux/kobject_ns.h>
-#include <linux/kernel.h>
#include <linux/wait.h>
#include <linux/atomic.h>
#include <linux/workqueue.h>
+#include <linux/uidgid.h>
#define UEVENT_HELPER_PATH_LEN 256
-#define UEVENT_NUM_ENVP 32 /* number of env pointers */
+#define UEVENT_NUM_ENVP 64 /* number of env pointers */
#define UEVENT_BUFFER_SIZE 2048 /* buffer for the variables */
#ifdef CONFIG_UEVENT_HELPER
@@ -57,7 +57,8 @@ enum kobject_action {
KOBJ_MOVE,
KOBJ_ONLINE,
KOBJ_OFFLINE,
- KOBJ_MAX
+ KOBJ_BIND,
+ KOBJ_UNBIND,
};
struct kobject {
@@ -65,7 +66,7 @@ struct kobject {
struct list_head entry;
struct kobject *parent;
struct kset *kset;
- struct kobj_type *ktype;
+ const struct kobj_type *ktype;
struct kernfs_node *sd; /* sysfs directory entry */
struct kref kref;
#ifdef CONFIG_DEBUG_KOBJECT_RELEASE
@@ -78,49 +79,45 @@ struct kobject {
unsigned int uevent_suppress:1;
};
-extern __printf(2, 3)
-int kobject_set_name(struct kobject *kobj, const char *name, ...);
-extern __printf(2, 0)
-int kobject_set_name_vargs(struct kobject *kobj, const char *fmt,
- va_list vargs);
+__printf(2, 3) int kobject_set_name(struct kobject *kobj, const char *name, ...);
+__printf(2, 0) int kobject_set_name_vargs(struct kobject *kobj, const char *fmt, va_list vargs);
static inline const char *kobject_name(const struct kobject *kobj)
{
return kobj->name;
}
-extern void kobject_init(struct kobject *kobj, struct kobj_type *ktype);
-extern __printf(3, 4) __must_check
-int kobject_add(struct kobject *kobj, struct kobject *parent,
- const char *fmt, ...);
-extern __printf(4, 5) __must_check
-int kobject_init_and_add(struct kobject *kobj,
- struct kobj_type *ktype, struct kobject *parent,
- const char *fmt, ...);
+void kobject_init(struct kobject *kobj, const struct kobj_type *ktype);
+__printf(3, 4) __must_check int kobject_add(struct kobject *kobj,
+ struct kobject *parent,
+ const char *fmt, ...);
+__printf(4, 5) __must_check int kobject_init_and_add(struct kobject *kobj,
+ const struct kobj_type *ktype,
+ struct kobject *parent,
+ const char *fmt, ...);
-extern void kobject_del(struct kobject *kobj);
+void kobject_del(struct kobject *kobj);
-extern struct kobject * __must_check kobject_create(void);
-extern struct kobject * __must_check kobject_create_and_add(const char *name,
- struct kobject *parent);
+struct kobject * __must_check kobject_create_and_add(const char *name, struct kobject *parent);
-extern int __must_check kobject_rename(struct kobject *, const char *new_name);
-extern int __must_check kobject_move(struct kobject *, struct kobject *);
+int __must_check kobject_rename(struct kobject *, const char *new_name);
+int __must_check kobject_move(struct kobject *, struct kobject *);
-extern struct kobject *kobject_get(struct kobject *kobj);
-extern struct kobject * __must_check kobject_get_unless_zero(
- struct kobject *kobj);
-extern void kobject_put(struct kobject *kobj);
+struct kobject *kobject_get(struct kobject *kobj);
+struct kobject * __must_check kobject_get_unless_zero(struct kobject *kobj);
+void kobject_put(struct kobject *kobj);
-extern const void *kobject_namespace(struct kobject *kobj);
-extern char *kobject_get_path(struct kobject *kobj, gfp_t flag);
+const void *kobject_namespace(const struct kobject *kobj);
+void kobject_get_ownership(const struct kobject *kobj, kuid_t *uid, kgid_t *gid);
+char *kobject_get_path(const struct kobject *kobj, gfp_t flag);
struct kobj_type {
void (*release)(struct kobject *kobj);
const struct sysfs_ops *sysfs_ops;
- struct attribute **default_attrs;
- const struct kobj_ns_type_operations *(*child_ns_type)(struct kobject *kobj);
- const void *(*namespace)(struct kobject *kobj);
+ const struct attribute_group **default_groups;
+ const struct kobj_ns_type_operations *(*child_ns_type)(const struct kobject *kobj);
+ const void *(*namespace)(const struct kobject *kobj);
+ void (*get_ownership)(const struct kobject *kobj, kuid_t *uid, kgid_t *gid);
};
struct kobj_uevent_env {
@@ -132,10 +129,9 @@ struct kobj_uevent_env {
};
struct kset_uevent_ops {
- int (* const filter)(struct kset *kset, struct kobject *kobj);
- const char *(* const name)(struct kset *kset, struct kobject *kobj);
- int (* const uevent)(struct kset *kset, struct kobject *kobj,
- struct kobj_uevent_env *env);
+ int (* const filter)(const struct kobject *kobj);
+ const char *(* const name)(const struct kobject *kobj);
+ int (* const uevent)(const struct kobject *kobj, struct kobj_uevent_env *env);
};
struct kobj_attribute {
@@ -172,14 +168,13 @@ struct kset {
spinlock_t list_lock;
struct kobject kobj;
const struct kset_uevent_ops *uevent_ops;
-};
+} __randomize_layout;
-extern void kset_init(struct kset *kset);
-extern int __must_check kset_register(struct kset *kset);
-extern void kset_unregister(struct kset *kset);
-extern struct kset * __must_check kset_create_and_add(const char *name,
- const struct kset_uevent_ops *u,
- struct kobject *parent_kobj);
+void kset_init(struct kset *kset);
+int __must_check kset_register(struct kset *kset);
+void kset_unregister(struct kset *kset);
+struct kset * __must_check kset_create_and_add(const char *name, const struct kset_uevent_ops *u,
+ struct kobject *parent_kobj);
static inline struct kset *to_kset(struct kobject *kobj)
{
@@ -196,12 +191,12 @@ static inline void kset_put(struct kset *k)
kobject_put(&k->kobj);
}
-static inline struct kobj_type *get_ktype(struct kobject *kobj)
+static inline const struct kobj_type *get_ktype(const struct kobject *kobj)
{
return kobj->ktype;
}
-extern struct kobject *kset_find_obj(struct kset *, const char *);
+struct kobject *kset_find_obj(struct kset *, const char *);
/* The global /sys/kernel/ kobject for people to chain off of */
extern struct kobject *kernel_kobj;
diff --git a/include/linux/kobject_api.h b/include/linux/kobject_api.h
new file mode 100644
index 000000000000..6e36a054c2d6
--- /dev/null
+++ b/include/linux/kobject_api.h
@@ -0,0 +1 @@
+#include <linux/kobject.h>
diff --git a/include/linux/kobject_ns.h b/include/linux/kobject_ns.h
index df32d2508290..be707748e7ce 100644
--- a/include/linux/kobject_ns.h
+++ b/include/linux/kobject_ns.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/* Kernel object name space definitions
*
* Copyright (c) 2002-2003 Patrick Mochel
@@ -7,9 +8,7 @@
*
* Split from kobject.h by David Howells (dhowells@redhat.com)
*
- * This file is released under the GPLv2.
- *
- * Please read Documentation/kobject.txt before using the kobject
+ * Please read Documentation/core-api/kobject.rst before using the kobject
* interface, ESPECIALLY the parts about reference counts and object
* destructors.
*/
@@ -48,8 +47,8 @@ struct kobj_ns_type_operations {
int kobj_ns_type_register(const struct kobj_ns_type_operations *ops);
int kobj_ns_type_registered(enum kobj_ns_type type);
-const struct kobj_ns_type_operations *kobj_child_ns_ops(struct kobject *parent);
-const struct kobj_ns_type_operations *kobj_ns_ops(struct kobject *kobj);
+const struct kobj_ns_type_operations *kobj_child_ns_ops(const struct kobject *parent);
+const struct kobj_ns_type_operations *kobj_ns_ops(const struct kobject *kobj);
bool kobj_ns_current_may_mount(enum kobj_ns_type type);
void *kobj_ns_grab_current(enum kobj_ns_type type);
diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h
index bd2684700b74..85a64cb95d75 100644
--- a/include/linux/kprobes.h
+++ b/include/linux/kprobes.h
@@ -1,22 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
#ifndef _LINUX_KPROBES_H
#define _LINUX_KPROBES_H
/*
* Kernel Probes (KProbes)
- * include/linux/kprobes.h
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*
* Copyright (C) IBM Corporation, 2002, 2004
*
@@ -40,6 +26,9 @@
#include <linux/rcupdate.h>
#include <linux/mutex.h>
#include <linux/ftrace.h>
+#include <linux/refcount.h>
+#include <linux/freelist.h>
+#include <linux/rethook.h>
#include <asm/kprobes.h>
#ifdef CONFIG_KPROBES
@@ -50,7 +39,7 @@
#define KPROBE_REENTER 0x00000004
#define KPROBE_HIT_SSDONE 0x00000008
-#else /* CONFIG_KPROBES */
+#else /* !CONFIG_KPROBES */
#include <asm-generic/kprobes.h>
typedef int kprobe_opcode_t;
struct arch_specific_insn {
@@ -63,11 +52,8 @@ struct pt_regs;
struct kretprobe;
struct kretprobe_instance;
typedef int (*kprobe_pre_handler_t) (struct kprobe *, struct pt_regs *);
-typedef int (*kprobe_break_handler_t) (struct kprobe *, struct pt_regs *);
typedef void (*kprobe_post_handler_t) (struct kprobe *, struct pt_regs *,
unsigned long flags);
-typedef int (*kprobe_fault_handler_t) (struct kprobe *, struct pt_regs *,
- int trapnr);
typedef int (*kretprobe_handler_t) (struct kretprobe_instance *,
struct pt_regs *);
@@ -95,18 +81,6 @@ struct kprobe {
/* Called after addr is executed, unless... */
kprobe_post_handler_t post_handler;
- /*
- * ... called if executing addr causes a fault (eg. page fault).
- * Return 1 if it handled fault, otherwise kernel will see it.
- */
- kprobe_fault_handler_t fault_handler;
-
- /*
- * ... called if breakpoint trap occurs in probe handler.
- * Return 1 if it handled break, otherwise kernel will see it.
- */
- kprobe_break_handler_t break_handler;
-
/* Saved opcode (which has been replaced with breakpoint) */
kprobe_opcode_t opcode;
@@ -129,50 +103,33 @@ struct kprobe {
* this flag is only for optimized_kprobe.
*/
#define KPROBE_FLAG_FTRACE 8 /* probe is using ftrace */
+#define KPROBE_FLAG_ON_FUNC_ENTRY 16 /* probe is on the function entry */
/* Has this kprobe gone ? */
-static inline int kprobe_gone(struct kprobe *p)
+static inline bool kprobe_gone(struct kprobe *p)
{
return p->flags & KPROBE_FLAG_GONE;
}
/* Is this kprobe disabled ? */
-static inline int kprobe_disabled(struct kprobe *p)
+static inline bool kprobe_disabled(struct kprobe *p)
{
return p->flags & (KPROBE_FLAG_DISABLED | KPROBE_FLAG_GONE);
}
/* Is this kprobe really running optimized path ? */
-static inline int kprobe_optimized(struct kprobe *p)
+static inline bool kprobe_optimized(struct kprobe *p)
{
return p->flags & KPROBE_FLAG_OPTIMIZED;
}
/* Is this kprobe uses ftrace ? */
-static inline int kprobe_ftrace(struct kprobe *p)
+static inline bool kprobe_ftrace(struct kprobe *p)
{
return p->flags & KPROBE_FLAG_FTRACE;
}
/*
- * Special probe type that uses setjmp-longjmp type tricks to resume
- * execution at a specified entry with a matching prototype corresponding
- * to the probed function - a trick to enable arguments to become
- * accessible seamlessly by probe handling logic.
- * Note:
- * Because of the way compilers allocate stack space for local variables
- * etc upfront, regardless of sub-scopes within a function, this mirroring
- * principle currently works only for probes placed on function entry points.
- */
-struct jprobe {
- struct kprobe kp;
- void *entry; /* probe handling code to jump to */
-};
-
-/* For backward compatibility with old code using JPROBE_ENTRY() */
-#define JPROBE_ENTRY(handler) (handler)
-
-/*
* Function-return probe -
* Note:
* User needs to provide a handler function, and initialize maxactive.
@@ -182,6 +139,11 @@ struct jprobe {
* ignored, due to maxactive being too low.
*
*/
+struct kretprobe_holder {
+ struct kretprobe *rp;
+ refcount_t ref;
+};
+
struct kretprobe {
struct kprobe kp;
kretprobe_handler_t handler;
@@ -189,16 +151,30 @@ struct kretprobe {
int maxactive;
int nmissed;
size_t data_size;
- struct hlist_head free_instances;
- raw_spinlock_t lock;
+#ifdef CONFIG_KRETPROBE_ON_RETHOOK
+ struct rethook *rh;
+#else
+ struct freelist_head freelist;
+ struct kretprobe_holder *rph;
+#endif
};
+#define KRETPROBE_MAX_DATA_SIZE 4096
+
struct kretprobe_instance {
- struct hlist_node hlist;
- struct kretprobe *rp;
+#ifdef CONFIG_KRETPROBE_ON_RETHOOK
+ struct rethook_node node;
+#else
+ union {
+ struct freelist_node freelist;
+ struct rcu_head rcu;
+ };
+ struct llist_node llist;
+ struct kretprobe_holder *rph;
kprobe_opcode_t *ret_addr;
- struct task_struct *task;
- char data[0];
+ void *fp;
+#endif
+ char data[];
};
struct kretprobe_blackpoint {
@@ -216,19 +192,77 @@ struct kprobe_blacklist_entry {
DECLARE_PER_CPU(struct kprobe *, current_kprobe);
DECLARE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
+extern void kprobe_busy_begin(void);
+extern void kprobe_busy_end(void);
+
+#ifdef CONFIG_KRETPROBES
+/* Check whether @p is used for implementing a trampoline. */
+extern int arch_trampoline_kprobe(struct kprobe *p);
+
+#ifdef CONFIG_KRETPROBE_ON_RETHOOK
+static nokprobe_inline struct kretprobe *get_kretprobe(struct kretprobe_instance *ri)
+{
+ RCU_LOCKDEP_WARN(!rcu_read_lock_any_held(),
+ "Kretprobe is accessed from instance under preemptive context");
+
+ return (struct kretprobe *)READ_ONCE(ri->node.rethook->data);
+}
+static nokprobe_inline unsigned long get_kretprobe_retaddr(struct kretprobe_instance *ri)
+{
+ return ri->node.ret_addr;
+}
+#else
+extern void arch_prepare_kretprobe(struct kretprobe_instance *ri,
+ struct pt_regs *regs);
+void arch_kretprobe_fixup_return(struct pt_regs *regs,
+ kprobe_opcode_t *correct_ret_addr);
+
+void __kretprobe_trampoline(void);
/*
- * For #ifdef avoidance:
+ * Since some architecture uses structured function pointer,
+ * use dereference_function_descriptor() to get real function address.
*/
-static inline int kprobes_built_in(void)
+static nokprobe_inline void *kretprobe_trampoline_addr(void)
{
- return 1;
+ return dereference_kernel_function_descriptor(__kretprobe_trampoline);
}
-#ifdef CONFIG_KRETPROBES
-extern void arch_prepare_kretprobe(struct kretprobe_instance *ri,
- struct pt_regs *regs);
-extern int arch_trampoline_kprobe(struct kprobe *p);
-#else /* CONFIG_KRETPROBES */
+/* If the trampoline handler called from a kprobe, use this version */
+unsigned long __kretprobe_trampoline_handler(struct pt_regs *regs,
+ void *frame_pointer);
+
+static nokprobe_inline
+unsigned long kretprobe_trampoline_handler(struct pt_regs *regs,
+ void *frame_pointer)
+{
+ unsigned long ret;
+ /*
+ * Set a dummy kprobe for avoiding kretprobe recursion.
+ * Since kretprobe never runs in kprobe handler, no kprobe must
+ * be running at this point.
+ */
+ kprobe_busy_begin();
+ ret = __kretprobe_trampoline_handler(regs, frame_pointer);
+ kprobe_busy_end();
+
+ return ret;
+}
+
+static nokprobe_inline struct kretprobe *get_kretprobe(struct kretprobe_instance *ri)
+{
+ RCU_LOCKDEP_WARN(!rcu_read_lock_any_held(),
+ "Kretprobe is accessed from instance under preemptive context");
+
+ return READ_ONCE(ri->rph->rp);
+}
+
+static nokprobe_inline unsigned long get_kretprobe_retaddr(struct kretprobe_instance *ri)
+{
+ return (unsigned long)ri->ret_addr;
+}
+#endif /* CONFIG_KRETPROBE_ON_RETHOOK */
+
+#else /* !CONFIG_KRETPROBES */
static inline void arch_prepare_kretprobe(struct kretprobe *rp,
struct pt_regs *regs)
{
@@ -239,21 +273,15 @@ static inline int arch_trampoline_kprobe(struct kprobe *p)
}
#endif /* CONFIG_KRETPROBES */
-extern struct kretprobe_blackpoint kretprobe_blacklist[];
+/* Markers of '_kprobe_blacklist' section */
+extern unsigned long __start_kprobe_blacklist[];
+extern unsigned long __stop_kprobe_blacklist[];
-static inline void kretprobe_assert(struct kretprobe_instance *ri,
- unsigned long orig_ret_address, unsigned long trampoline_address)
-{
- if (!orig_ret_address || (orig_ret_address == trampoline_address)) {
- printk("kretprobe BUG!: Processing kretprobe %p @ %p\n",
- ri->rp, ri->rp->kp.addr);
- BUG();
- }
-}
+extern struct kretprobe_blackpoint kretprobe_blacklist[];
#ifdef CONFIG_KPROBES_SANITY_TEST
extern int init_test_probes(void);
-#else
+#else /* !CONFIG_KPROBES_SANITY_TEST */
static inline int init_test_probes(void)
{
return 0;
@@ -264,18 +292,20 @@ extern int arch_prepare_kprobe(struct kprobe *p);
extern void arch_arm_kprobe(struct kprobe *p);
extern void arch_disarm_kprobe(struct kprobe *p);
extern int arch_init_kprobes(void);
-extern void show_registers(struct pt_regs *regs);
extern void kprobes_inc_nmissed_count(struct kprobe *p);
extern bool arch_within_kprobe_blacklist(unsigned long addr);
-extern bool arch_kprobe_on_func_entry(unsigned long offset);
-extern bool kprobe_on_func_entry(kprobe_opcode_t *addr, const char *sym, unsigned long offset);
+extern int arch_populate_kprobe_blacklist(void);
+extern int kprobe_on_func_entry(kprobe_opcode_t *addr, const char *sym, unsigned long offset);
extern bool within_kprobe_blacklist(unsigned long addr);
+extern int kprobe_add_ksym_blacklist(unsigned long entry);
+extern int kprobe_add_area_blacklist(unsigned long start, unsigned long end);
struct kprobe_insn_cache {
struct mutex mutex;
void *(*alloc)(void); /* allocate insn page */
void (*free)(void *); /* free insn page */
+ const char *sym; /* symbol for insn pages */
struct list_head pages; /* list of kprobe_insn_page */
size_t insn_size; /* size of instruction slot */
int nr_garbage;
@@ -306,7 +336,11 @@ static inline bool is_kprobe_##__name##_slot(unsigned long addr) \
{ \
return __is_insn_slot_addr(&kprobe_##__name##_slots, addr); \
}
-#else /* __ARCH_WANT_KPROBES_INSN_SLOT */
+#define KPROBE_INSN_PAGE_SYM "kprobe_insn_page"
+#define KPROBE_OPTINSN_PAGE_SYM "kprobe_optinsn_page"
+int kprobe_cache_get_kallsym(struct kprobe_insn_cache *c, unsigned int *symnum,
+ unsigned long *value, char *type, char *sym);
+#else /* !__ARCH_WANT_KPROBES_INSN_SLOT */
#define DEFINE_INSN_CACHE_OPS(__name) \
static inline bool is_kprobe_##__name##_slot(unsigned long addr) \
{ \
@@ -337,41 +371,37 @@ extern void arch_unoptimize_kprobes(struct list_head *oplist,
struct list_head *done_list);
extern void arch_unoptimize_kprobe(struct optimized_kprobe *op);
extern int arch_within_optimized_kprobe(struct optimized_kprobe *op,
- unsigned long addr);
+ kprobe_opcode_t *addr);
extern void opt_pre_handler(struct kprobe *p, struct pt_regs *regs);
DEFINE_INSN_CACHE_OPS(optinsn);
-#ifdef CONFIG_SYSCTL
-extern int sysctl_kprobes_optimization;
-extern int proc_kprobes_optimization_handler(struct ctl_table *table,
- int write, void __user *buffer,
- size_t *length, loff_t *ppos);
-#endif
extern void wait_for_kprobe_optimizer(void);
-#else
+bool optprobe_queued_unopt(struct optimized_kprobe *op);
+bool kprobe_disarmed(struct kprobe *p);
+#else /* !CONFIG_OPTPROBES */
static inline void wait_for_kprobe_optimizer(void) { }
#endif /* CONFIG_OPTPROBES */
+
#ifdef CONFIG_KPROBES_ON_FTRACE
extern void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip,
- struct ftrace_ops *ops, struct pt_regs *regs);
+ struct ftrace_ops *ops, struct ftrace_regs *fregs);
extern int arch_prepare_kprobe_ftrace(struct kprobe *p);
-#endif
-
-int arch_check_ftrace_location(struct kprobe *p);
+#else
+static inline int arch_prepare_kprobe_ftrace(struct kprobe *p)
+{
+ return -EINVAL;
+}
+#endif /* CONFIG_KPROBES_ON_FTRACE */
/* Get the kprobe at this addr (if any) - called with preemption disabled */
struct kprobe *get_kprobe(void *addr);
-void kretprobe_hash_lock(struct task_struct *tsk,
- struct hlist_head **head, unsigned long *flags);
-void kretprobe_hash_unlock(struct task_struct *tsk, unsigned long *flags);
-struct hlist_head * kretprobe_inst_table_head(struct task_struct *tsk);
/* kprobe_running() will just return the current_kprobe on this CPU */
static inline struct kprobe *kprobe_running(void)
{
- return (__this_cpu_read(current_kprobe));
+ return __this_cpu_read(current_kprobe);
}
static inline void reset_current_kprobe(void)
@@ -385,38 +415,43 @@ static inline struct kprobe_ctlblk *get_kprobe_ctlblk(void)
}
kprobe_opcode_t *kprobe_lookup_name(const char *name, unsigned int offset);
+kprobe_opcode_t *arch_adjust_kprobe_addr(unsigned long addr, unsigned long offset, bool *on_func_entry);
+
int register_kprobe(struct kprobe *p);
void unregister_kprobe(struct kprobe *p);
int register_kprobes(struct kprobe **kps, int num);
void unregister_kprobes(struct kprobe **kps, int num);
-int setjmp_pre_handler(struct kprobe *, struct pt_regs *);
-int longjmp_break_handler(struct kprobe *, struct pt_regs *);
-int register_jprobe(struct jprobe *p);
-void unregister_jprobe(struct jprobe *p);
-int register_jprobes(struct jprobe **jps, int num);
-void unregister_jprobes(struct jprobe **jps, int num);
-void jprobe_return(void);
-unsigned long arch_deref_entry_point(void *);
int register_kretprobe(struct kretprobe *rp);
void unregister_kretprobe(struct kretprobe *rp);
int register_kretprobes(struct kretprobe **rps, int num);
void unregister_kretprobes(struct kretprobe **rps, int num);
+#if defined(CONFIG_KRETPROBE_ON_RETHOOK) || !defined(CONFIG_KRETPROBES)
+#define kprobe_flush_task(tk) do {} while (0)
+#else
void kprobe_flush_task(struct task_struct *tk);
-void recycle_rp_inst(struct kretprobe_instance *ri, struct hlist_head *head);
+#endif
+
+void kprobe_free_init_mem(void);
int disable_kprobe(struct kprobe *kp);
int enable_kprobe(struct kprobe *kp);
void dump_kprobe(struct kprobe *kp);
+void *alloc_insn_page(void);
+
+void *alloc_optinsn_page(void);
+void free_optinsn_page(void *page);
+
+int kprobe_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
+ char *sym);
+
+int arch_kprobe_get_kallsym(unsigned int *symnum, unsigned long *value,
+ char *type, char *sym);
#else /* !CONFIG_KPROBES: */
-static inline int kprobes_built_in(void)
-{
- return 0;
-}
static inline int kprobe_fault_handler(struct pt_regs *regs, int trapnr)
{
return 0;
@@ -429,13 +464,16 @@ static inline struct kprobe *kprobe_running(void)
{
return NULL;
}
+#define kprobe_busy_begin() do {} while (0)
+#define kprobe_busy_end() do {} while (0)
+
static inline int register_kprobe(struct kprobe *p)
{
- return -ENOSYS;
+ return -EOPNOTSUPP;
}
static inline int register_kprobes(struct kprobe **kps, int num)
{
- return -ENOSYS;
+ return -EOPNOTSUPP;
}
static inline void unregister_kprobe(struct kprobe *p)
{
@@ -443,30 +481,13 @@ static inline void unregister_kprobe(struct kprobe *p)
static inline void unregister_kprobes(struct kprobe **kps, int num)
{
}
-static inline int register_jprobe(struct jprobe *p)
-{
- return -ENOSYS;
-}
-static inline int register_jprobes(struct jprobe **jps, int num)
-{
- return -ENOSYS;
-}
-static inline void unregister_jprobe(struct jprobe *p)
-{
-}
-static inline void unregister_jprobes(struct jprobe **jps, int num)
-{
-}
-static inline void jprobe_return(void)
-{
-}
static inline int register_kretprobe(struct kretprobe *rp)
{
- return -ENOSYS;
+ return -EOPNOTSUPP;
}
static inline int register_kretprobes(struct kretprobe **rps, int num)
{
- return -ENOSYS;
+ return -EOPNOTSUPP;
}
static inline void unregister_kretprobe(struct kretprobe *rp)
{
@@ -477,15 +498,29 @@ static inline void unregister_kretprobes(struct kretprobe **rps, int num)
static inline void kprobe_flush_task(struct task_struct *tk)
{
}
+static inline void kprobe_free_init_mem(void)
+{
+}
static inline int disable_kprobe(struct kprobe *kp)
{
- return -ENOSYS;
+ return -EOPNOTSUPP;
}
static inline int enable_kprobe(struct kprobe *kp)
{
- return -ENOSYS;
+ return -EOPNOTSUPP;
+}
+
+static inline bool within_kprobe_blacklist(unsigned long addr)
+{
+ return true;
+}
+static inline int kprobe_get_kallsym(unsigned int symnum, unsigned long *value,
+ char *type, char *sym)
+{
+ return -ERANGE;
}
#endif /* CONFIG_KPROBES */
+
static inline int disable_kretprobe(struct kretprobe *rp)
{
return disable_kprobe(&rp->kp);
@@ -494,26 +529,74 @@ static inline int enable_kretprobe(struct kretprobe *rp)
{
return enable_kprobe(&rp->kp);
}
-static inline int disable_jprobe(struct jprobe *jp)
-{
- return disable_kprobe(&jp->kp);
-}
-static inline int enable_jprobe(struct jprobe *jp)
-{
- return enable_kprobe(&jp->kp);
-}
#ifndef CONFIG_KPROBES
static inline bool is_kprobe_insn_slot(unsigned long addr)
{
return false;
}
-#endif
+#endif /* !CONFIG_KPROBES */
+
#ifndef CONFIG_OPTPROBES
static inline bool is_kprobe_optinsn_slot(unsigned long addr)
{
return false;
}
+#endif /* !CONFIG_OPTPROBES */
+
+#ifdef CONFIG_KRETPROBES
+#ifdef CONFIG_KRETPROBE_ON_RETHOOK
+static nokprobe_inline bool is_kretprobe_trampoline(unsigned long addr)
+{
+ return is_rethook_trampoline(addr);
+}
+
+static nokprobe_inline
+unsigned long kretprobe_find_ret_addr(struct task_struct *tsk, void *fp,
+ struct llist_node **cur)
+{
+ return rethook_find_ret_addr(tsk, (unsigned long)fp, cur);
+}
+#else
+static nokprobe_inline bool is_kretprobe_trampoline(unsigned long addr)
+{
+ return (void *)addr == kretprobe_trampoline_addr();
+}
+
+unsigned long kretprobe_find_ret_addr(struct task_struct *tsk, void *fp,
+ struct llist_node **cur);
+#endif
+#else
+static nokprobe_inline bool is_kretprobe_trampoline(unsigned long addr)
+{
+ return false;
+}
+
+static nokprobe_inline
+unsigned long kretprobe_find_ret_addr(struct task_struct *tsk, void *fp,
+ struct llist_node **cur)
+{
+ return 0;
+}
#endif
+/* Returns true if kprobes handled the fault */
+static nokprobe_inline bool kprobe_page_fault(struct pt_regs *regs,
+ unsigned int trap)
+{
+ if (!IS_ENABLED(CONFIG_KPROBES))
+ return false;
+ if (user_mode(regs))
+ return false;
+ /*
+ * To be potentially processing a kprobe fault and to be allowed
+ * to call kprobe_running(), we have to be non-preemptible.
+ */
+ if (preemptible())
+ return false;
+ if (!kprobe_running())
+ return false;
+ return kprobe_fault_handler(regs, trap);
+}
+
#endif /* _LINUX_KPROBES_H */
diff --git a/include/linux/kref.h b/include/linux/kref.h
index 29220724bf1c..d32e21a2538c 100644
--- a/include/linux/kref.h
+++ b/include/linux/kref.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* kref.h - library routines for handling generic reference counted objects
*
@@ -7,9 +8,6 @@
* based on kobject.h which was:
* Copyright (C) 2002-2003 Patrick Mochel <mochel@osdl.org>
* Copyright (C) 2002-2003 Open Source Development Labs
- *
- * This file is released under the GPLv2.
- *
*/
#ifndef _KREF_H_
@@ -53,10 +51,7 @@ static inline void kref_get(struct kref *kref)
* @release: pointer to the function that will clean up the object when the
* last reference to the object is released.
* This pointer is required, and it is not acceptable to pass kfree
- * in as this function. If the caller does pass kfree to this
- * function, you will be publicly mocked mercilessly by the kref
- * maintainer, and anyone else who happens to notice it. You have
- * been warned.
+ * in as this function.
*
* Decrement the refcount, and if 0, call release().
* Return 1 if the object was removed, otherwise return 0. Beware, if this
diff --git a/include/linux/kref_api.h b/include/linux/kref_api.h
new file mode 100644
index 000000000000..d67e554721d2
--- /dev/null
+++ b/include/linux/kref_api.h
@@ -0,0 +1 @@
+#include <linux/kref.h>
diff --git a/include/linux/ks0108.h b/include/linux/ks0108.h
index cb311798e0bc..1a37a664f915 100644
--- a/include/linux/ks0108.h
+++ b/include/linux/ks0108.h
@@ -1,25 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Filename: ks0108.h
* Version: 0.1.0
* Description: ks0108 LCD Controller driver header
- * License: GPLv2
*
- * Author: Copyright (C) Miguel Ojeda Sandonis
+ * Author: Copyright (C) Miguel Ojeda <ojeda@kernel.org>
* Date: 2006-10-31
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
*/
#ifndef _KS0108_H_
diff --git a/include/linux/ks8842.h b/include/linux/ks8842.h
index 14ba4452296e..96ffdf3cbe21 100644
--- a/include/linux/ks8842.h
+++ b/include/linux/ks8842.h
@@ -1,19 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* ks8842.h KS8842 platform data struct definition
* Copyright (c) 2010 Intel Corporation
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#ifndef _LINUX_KS8842_H
diff --git a/include/linux/ks8851_mll.h b/include/linux/ks8851_mll.h
index e9ccfb59ed30..57c0a39ed796 100644
--- a/include/linux/ks8851_mll.h
+++ b/include/linux/ks8851_mll.h
@@ -1,19 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* ks8861_mll platform data struct definition
* Copyright (c) 2012 BTicino S.p.A.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#ifndef _LINUX_KS8851_MLL_H
diff --git a/include/linux/ksm.h b/include/linux/ksm.h
index 78b44a024eaa..899a314bc487 100644
--- a/include/linux/ksm.h
+++ b/include/linux/ksm.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __LINUX_KSM_H
#define __LINUX_KSM_H
/*
@@ -14,19 +15,31 @@
#include <linux/sched.h>
#include <linux/sched/coredump.h>
-struct stable_node;
-struct mem_cgroup;
-
#ifdef CONFIG_KSM
int ksm_madvise(struct vm_area_struct *vma, unsigned long start,
unsigned long end, int advice, unsigned long *vm_flags);
+
+void ksm_add_vma(struct vm_area_struct *vma);
+int ksm_enable_merge_any(struct mm_struct *mm);
+int ksm_disable_merge_any(struct mm_struct *mm);
+int ksm_disable(struct mm_struct *mm);
+
int __ksm_enter(struct mm_struct *mm);
void __ksm_exit(struct mm_struct *mm);
static inline int ksm_fork(struct mm_struct *mm, struct mm_struct *oldmm)
{
- if (test_bit(MMF_VM_MERGEABLE, &oldmm->flags))
- return __ksm_enter(mm);
+ int ret;
+
+ if (test_bit(MMF_VM_MERGEABLE, &oldmm->flags)) {
+ ret = __ksm_enter(mm);
+ if (ret)
+ return ret;
+ }
+
+ if (test_bit(MMF_VM_MERGE_ANY, &oldmm->flags))
+ set_bit(MMF_VM_MERGE_ANY, &mm->flags);
+
return 0;
}
@@ -36,17 +49,6 @@ static inline void ksm_exit(struct mm_struct *mm)
__ksm_exit(mm);
}
-static inline struct stable_node *page_stable_node(struct page *page)
-{
- return PageKsm(page) ? page_rmapping(page) : NULL;
-}
-
-static inline void set_page_stable_node(struct page *page,
- struct stable_node *stable_node)
-{
- page->mapping = (void *)((unsigned long)stable_node | PAGE_MAPPING_KSM);
-}
-
/*
* When do_swap_page() first faults in from swap what used to be a KSM page,
* no problem, it will be assigned to this vma's anon_vma; but thereafter,
@@ -61,11 +63,29 @@ static inline void set_page_stable_node(struct page *page,
struct page *ksm_might_need_to_copy(struct page *page,
struct vm_area_struct *vma, unsigned long address);
-void rmap_walk_ksm(struct page *page, struct rmap_walk_control *rwc);
-void ksm_migrate_page(struct page *newpage, struct page *oldpage);
+void rmap_walk_ksm(struct folio *folio, struct rmap_walk_control *rwc);
+void folio_migrate_ksm(struct folio *newfolio, struct folio *folio);
+
+#ifdef CONFIG_MEMORY_FAILURE
+void collect_procs_ksm(struct page *page, struct list_head *to_kill,
+ int force_early);
+#endif
+
+#ifdef CONFIG_PROC_FS
+long ksm_process_profit(struct mm_struct *);
+#endif /* CONFIG_PROC_FS */
#else /* !CONFIG_KSM */
+static inline void ksm_add_vma(struct vm_area_struct *vma)
+{
+}
+
+static inline int ksm_disable(struct mm_struct *mm)
+{
+ return 0;
+}
+
static inline int ksm_fork(struct mm_struct *mm, struct mm_struct *oldmm)
{
return 0;
@@ -75,6 +95,13 @@ static inline void ksm_exit(struct mm_struct *mm)
{
}
+#ifdef CONFIG_MEMORY_FAILURE
+static inline void collect_procs_ksm(struct page *page,
+ struct list_head *to_kill, int force_early)
+{
+}
+#endif
+
#ifdef CONFIG_MMU
static inline int ksm_madvise(struct vm_area_struct *vma, unsigned long start,
unsigned long end, int advice, unsigned long *vm_flags)
@@ -88,18 +115,12 @@ static inline struct page *ksm_might_need_to_copy(struct page *page,
return page;
}
-static inline int page_referenced_ksm(struct page *page,
- struct mem_cgroup *memcg, unsigned long *vm_flags)
-{
- return 0;
-}
-
-static inline void rmap_walk_ksm(struct page *page,
+static inline void rmap_walk_ksm(struct folio *folio,
struct rmap_walk_control *rwc)
{
}
-static inline void ksm_migrate_page(struct page *newpage, struct page *oldpage)
+static inline void folio_migrate_ksm(struct folio *newfolio, struct folio *old)
{
}
#endif /* CONFIG_MMU */
diff --git a/include/linux/kstrtox.h b/include/linux/kstrtox.h
new file mode 100644
index 000000000000..529974e22ea7
--- /dev/null
+++ b/include/linux/kstrtox.h
@@ -0,0 +1,155 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_KSTRTOX_H
+#define _LINUX_KSTRTOX_H
+
+#include <linux/compiler.h>
+#include <linux/types.h>
+
+/* Internal, do not use. */
+int __must_check _kstrtoul(const char *s, unsigned int base, unsigned long *res);
+int __must_check _kstrtol(const char *s, unsigned int base, long *res);
+
+int __must_check kstrtoull(const char *s, unsigned int base, unsigned long long *res);
+int __must_check kstrtoll(const char *s, unsigned int base, long long *res);
+
+/**
+ * kstrtoul - convert a string to an unsigned long
+ * @s: The start of the string. The string must be null-terminated, and may also
+ * include a single newline before its terminating null. The first character
+ * may also be a plus sign, but not a minus sign.
+ * @base: The number base to use. The maximum supported base is 16. If base is
+ * given as 0, then the base of the string is automatically detected with the
+ * conventional semantics - If it begins with 0x the number will be parsed as a
+ * hexadecimal (case insensitive), if it otherwise begins with 0, it will be
+ * parsed as an octal number. Otherwise it will be parsed as a decimal.
+ * @res: Where to write the result of the conversion on success.
+ *
+ * Returns 0 on success, -ERANGE on overflow and -EINVAL on parsing error.
+ * Preferred over simple_strtoul(). Return code must be checked.
+*/
+static inline int __must_check kstrtoul(const char *s, unsigned int base, unsigned long *res)
+{
+ /*
+ * We want to shortcut function call, but
+ * __builtin_types_compatible_p(unsigned long, unsigned long long) = 0.
+ */
+ if (sizeof(unsigned long) == sizeof(unsigned long long) &&
+ __alignof__(unsigned long) == __alignof__(unsigned long long))
+ return kstrtoull(s, base, (unsigned long long *)res);
+ else
+ return _kstrtoul(s, base, res);
+}
+
+/**
+ * kstrtol - convert a string to a long
+ * @s: The start of the string. The string must be null-terminated, and may also
+ * include a single newline before its terminating null. The first character
+ * may also be a plus sign or a minus sign.
+ * @base: The number base to use. The maximum supported base is 16. If base is
+ * given as 0, then the base of the string is automatically detected with the
+ * conventional semantics - If it begins with 0x the number will be parsed as a
+ * hexadecimal (case insensitive), if it otherwise begins with 0, it will be
+ * parsed as an octal number. Otherwise it will be parsed as a decimal.
+ * @res: Where to write the result of the conversion on success.
+ *
+ * Returns 0 on success, -ERANGE on overflow and -EINVAL on parsing error.
+ * Preferred over simple_strtol(). Return code must be checked.
+ */
+static inline int __must_check kstrtol(const char *s, unsigned int base, long *res)
+{
+ /*
+ * We want to shortcut function call, but
+ * __builtin_types_compatible_p(long, long long) = 0.
+ */
+ if (sizeof(long) == sizeof(long long) &&
+ __alignof__(long) == __alignof__(long long))
+ return kstrtoll(s, base, (long long *)res);
+ else
+ return _kstrtol(s, base, res);
+}
+
+int __must_check kstrtouint(const char *s, unsigned int base, unsigned int *res);
+int __must_check kstrtoint(const char *s, unsigned int base, int *res);
+
+static inline int __must_check kstrtou64(const char *s, unsigned int base, u64 *res)
+{
+ return kstrtoull(s, base, res);
+}
+
+static inline int __must_check kstrtos64(const char *s, unsigned int base, s64 *res)
+{
+ return kstrtoll(s, base, res);
+}
+
+static inline int __must_check kstrtou32(const char *s, unsigned int base, u32 *res)
+{
+ return kstrtouint(s, base, res);
+}
+
+static inline int __must_check kstrtos32(const char *s, unsigned int base, s32 *res)
+{
+ return kstrtoint(s, base, res);
+}
+
+int __must_check kstrtou16(const char *s, unsigned int base, u16 *res);
+int __must_check kstrtos16(const char *s, unsigned int base, s16 *res);
+int __must_check kstrtou8(const char *s, unsigned int base, u8 *res);
+int __must_check kstrtos8(const char *s, unsigned int base, s8 *res);
+int __must_check kstrtobool(const char *s, bool *res);
+
+int __must_check kstrtoull_from_user(const char __user *s, size_t count, unsigned int base, unsigned long long *res);
+int __must_check kstrtoll_from_user(const char __user *s, size_t count, unsigned int base, long long *res);
+int __must_check kstrtoul_from_user(const char __user *s, size_t count, unsigned int base, unsigned long *res);
+int __must_check kstrtol_from_user(const char __user *s, size_t count, unsigned int base, long *res);
+int __must_check kstrtouint_from_user(const char __user *s, size_t count, unsigned int base, unsigned int *res);
+int __must_check kstrtoint_from_user(const char __user *s, size_t count, unsigned int base, int *res);
+int __must_check kstrtou16_from_user(const char __user *s, size_t count, unsigned int base, u16 *res);
+int __must_check kstrtos16_from_user(const char __user *s, size_t count, unsigned int base, s16 *res);
+int __must_check kstrtou8_from_user(const char __user *s, size_t count, unsigned int base, u8 *res);
+int __must_check kstrtos8_from_user(const char __user *s, size_t count, unsigned int base, s8 *res);
+int __must_check kstrtobool_from_user(const char __user *s, size_t count, bool *res);
+
+static inline int __must_check kstrtou64_from_user(const char __user *s, size_t count, unsigned int base, u64 *res)
+{
+ return kstrtoull_from_user(s, count, base, res);
+}
+
+static inline int __must_check kstrtos64_from_user(const char __user *s, size_t count, unsigned int base, s64 *res)
+{
+ return kstrtoll_from_user(s, count, base, res);
+}
+
+static inline int __must_check kstrtou32_from_user(const char __user *s, size_t count, unsigned int base, u32 *res)
+{
+ return kstrtouint_from_user(s, count, base, res);
+}
+
+static inline int __must_check kstrtos32_from_user(const char __user *s, size_t count, unsigned int base, s32 *res)
+{
+ return kstrtoint_from_user(s, count, base, res);
+}
+
+/*
+ * Use kstrto<foo> instead.
+ *
+ * NOTE: simple_strto<foo> does not check for the range overflow and,
+ * depending on the input, may give interesting results.
+ *
+ * Use these functions if and only if you cannot use kstrto<foo>, because
+ * the conversion ends on the first non-digit character, which may be far
+ * beyond the supported range. It might be useful to parse the strings like
+ * 10x50 or 12:21 without altering original string or temporary buffer in use.
+ * Keep in mind above caveat.
+ */
+
+extern unsigned long simple_strtoul(const char *,char **,unsigned int);
+extern long simple_strtol(const char *,char **,unsigned int);
+extern unsigned long long simple_strtoull(const char *,char **,unsigned int);
+extern long long simple_strtoll(const char *,char **,unsigned int);
+
+static inline int strtobool(const char *s, bool *res)
+{
+ return kstrtobool(s, res);
+}
+
+#endif /* _LINUX_KSTRTOX_H */
diff --git a/include/linux/kthread.h b/include/linux/kthread.h
index 4fec8b775895..30e5bec81d2b 100644
--- a/include/linux/kthread.h
+++ b/include/linux/kthread.h
@@ -1,9 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_KTHREAD_H
#define _LINUX_KTHREAD_H
/* Simple interface for creating and stopping kernel threads without mess. */
#include <linux/err.h>
#include <linux/sched.h>
+struct mm_struct;
+
__printf(4, 5)
struct task_struct *kthread_create_on_node(int (*threadfn)(void *data),
void *data,
@@ -15,7 +18,7 @@ struct task_struct *kthread_create_on_node(int (*threadfn)(void *data),
* @threadfn: the function to run in the thread
* @data: data pointer for @threadfn()
* @namefmt: printf-style format string for the thread name
- * @...: arguments for @namefmt.
+ * @arg: arguments for @namefmt.
*
* This macro will create a kthread on the current node, leaving it in
* the stopped state. This is just a helper for kthread_create_on_node();
@@ -30,6 +33,12 @@ struct task_struct *kthread_create_on_cpu(int (*threadfn)(void *data),
unsigned int cpu,
const char *namefmt);
+void get_kthread_comm(char *buf, size_t buf_size, struct task_struct *tsk);
+bool set_kthread_struct(struct task_struct *p);
+
+void kthread_set_per_cpu(struct task_struct *k, int cpu);
+bool kthread_is_per_cpu(struct task_struct *k);
+
/**
* kthread_run - create and wake a thread.
* @threadfn: the function to run until signal_pending(current).
@@ -48,18 +57,47 @@ struct task_struct *kthread_create_on_cpu(int (*threadfn)(void *data),
__k; \
})
+/**
+ * kthread_run_on_cpu - create and wake a cpu bound thread.
+ * @threadfn: the function to run until signal_pending(current).
+ * @data: data ptr for @threadfn.
+ * @cpu: The cpu on which the thread should be bound,
+ * @namefmt: printf-style name for the thread. Format is restricted
+ * to "name.*%u". Code fills in cpu number.
+ *
+ * Description: Convenient wrapper for kthread_create_on_cpu()
+ * followed by wake_up_process(). Returns the kthread or
+ * ERR_PTR(-ENOMEM).
+ */
+static inline struct task_struct *
+kthread_run_on_cpu(int (*threadfn)(void *data), void *data,
+ unsigned int cpu, const char *namefmt)
+{
+ struct task_struct *p;
+
+ p = kthread_create_on_cpu(threadfn, data, cpu, namefmt);
+ if (!IS_ERR(p))
+ wake_up_process(p);
+
+ return p;
+}
+
void free_kthread_struct(struct task_struct *k);
void kthread_bind(struct task_struct *k, unsigned int cpu);
void kthread_bind_mask(struct task_struct *k, const struct cpumask *mask);
int kthread_stop(struct task_struct *k);
bool kthread_should_stop(void);
bool kthread_should_park(void);
+bool __kthread_should_park(struct task_struct *k);
bool kthread_freezable_should_stop(bool *was_frozen);
+void *kthread_func(struct task_struct *k);
void *kthread_data(struct task_struct *k);
void *kthread_probe_data(struct task_struct *k);
int kthread_park(struct task_struct *k);
void kthread_unpark(struct task_struct *k);
void kthread_parkme(void);
+void kthread_exit(long result) __noreturn;
+void kthread_complete_and_exit(struct completion *, long) __noreturn;
int kthreadd(void *unused);
extern struct task_struct *kthreadd_task;
@@ -75,7 +113,7 @@ extern int tsk_fork_get_node(struct task_struct *tsk);
*/
struct kthread_work;
typedef void (*kthread_work_func_t)(struct kthread_work *work);
-void kthread_delayed_work_timer_fn(unsigned long __data);
+void kthread_delayed_work_timer_fn(struct timer_list *t);
enum {
KTW_FREEZABLE = 1 << 0, /* freeze during suspend */
@@ -83,7 +121,7 @@ enum {
struct kthread_worker {
unsigned int flags;
- spinlock_t lock;
+ raw_spinlock_t lock;
struct list_head work_list;
struct list_head delayed_work_list;
struct task_struct *task;
@@ -103,12 +141,6 @@ struct kthread_delayed_work {
struct timer_list timer;
};
-#define KTHREAD_WORKER_INIT(worker) { \
- .lock = __SPIN_LOCK_UNLOCKED((worker).lock), \
- .work_list = LIST_HEAD_INIT((worker).work_list), \
- .delayed_work_list = LIST_HEAD_INIT((worker).delayed_work_list),\
- }
-
#define KTHREAD_WORK_INIT(work, fn) { \
.node = LIST_HEAD_INIT((work).node), \
.func = (fn), \
@@ -116,14 +148,10 @@ struct kthread_delayed_work {
#define KTHREAD_DELAYED_WORK_INIT(dwork, fn) { \
.work = KTHREAD_WORK_INIT((dwork).work, (fn)), \
- .timer = __TIMER_INITIALIZER(kthread_delayed_work_timer_fn, \
- 0, (unsigned long)&(dwork), \
+ .timer = __TIMER_INITIALIZER(kthread_delayed_work_timer_fn,\
TIMER_IRQSAFE), \
}
-#define DEFINE_KTHREAD_WORKER(worker) \
- struct kthread_worker worker = KTHREAD_WORKER_INIT(worker)
-
#define DEFINE_KTHREAD_WORK(work, fn) \
struct kthread_work work = KTHREAD_WORK_INIT(work, fn)
@@ -131,19 +159,6 @@ struct kthread_delayed_work {
struct kthread_delayed_work dwork = \
KTHREAD_DELAYED_WORK_INIT(dwork, fn)
-/*
- * kthread_worker.lock needs its own lockdep class key when defined on
- * stack with lockdep enabled. Use the following macros in such cases.
- */
-#ifdef CONFIG_LOCKDEP
-# define KTHREAD_WORKER_INIT_ONSTACK(worker) \
- ({ kthread_init_worker(&worker); worker; })
-# define DEFINE_KTHREAD_WORKER_ONSTACK(worker) \
- struct kthread_worker worker = KTHREAD_WORKER_INIT_ONSTACK(worker)
-#else
-# define DEFINE_KTHREAD_WORKER_ONSTACK(worker) DEFINE_KTHREAD_WORKER(worker)
-#endif
-
extern void __kthread_init_worker(struct kthread_worker *worker,
const char *name, struct lock_class_key *key);
@@ -163,10 +178,9 @@ extern void __kthread_init_worker(struct kthread_worker *worker,
#define kthread_init_delayed_work(dwork, fn) \
do { \
kthread_init_work(&(dwork)->work, (fn)); \
- __setup_timer(&(dwork)->timer, \
- kthread_delayed_work_timer_fn, \
- (unsigned long)(dwork), \
- TIMER_IRQSAFE); \
+ timer_setup(&(dwork)->timer, \
+ kthread_delayed_work_timer_fn, \
+ TIMER_IRQSAFE); \
} while (0)
int kthread_worker_fn(void *worker_ptr);
@@ -198,4 +212,15 @@ bool kthread_cancel_delayed_work_sync(struct kthread_delayed_work *work);
void kthread_destroy_worker(struct kthread_worker *worker);
+void kthread_use_mm(struct mm_struct *mm);
+void kthread_unuse_mm(struct mm_struct *mm);
+
+struct cgroup_subsys_state;
+
+#ifdef CONFIG_BLK_CGROUP
+void kthread_associate_blkcg(struct cgroup_subsys_state *css);
+struct cgroup_subsys_state *kthread_blkcg(void);
+#else
+static inline void kthread_associate_blkcg(struct cgroup_subsys_state *css) { }
+#endif
#endif /* _LINUX_KTHREAD_H */
diff --git a/include/linux/ktime.h b/include/linux/ktime.h
index 0c8bd45c8206..73f20deb497d 100644
--- a/include/linux/ktime.h
+++ b/include/linux/ktime.h
@@ -23,6 +23,7 @@
#include <linux/time.h>
#include <linux/jiffies.h>
+#include <asm/bug.h>
/* Nanosecond scalar representation for kernel time values */
typedef s64 ktime_t;
@@ -66,35 +67,20 @@ static inline ktime_t ktime_set(const s64 secs, const unsigned long nsecs)
*/
#define ktime_sub_ns(kt, nsval) ((kt) - (nsval))
-/* convert a timespec to ktime_t format: */
-static inline ktime_t timespec_to_ktime(struct timespec ts)
-{
- return ktime_set(ts.tv_sec, ts.tv_nsec);
-}
-
/* convert a timespec64 to ktime_t format: */
static inline ktime_t timespec64_to_ktime(struct timespec64 ts)
{
return ktime_set(ts.tv_sec, ts.tv_nsec);
}
-/* convert a timeval to ktime_t format: */
-static inline ktime_t timeval_to_ktime(struct timeval tv)
-{
- return ktime_set(tv.tv_sec, tv.tv_usec * NSEC_PER_USEC);
-}
-
-/* Map the ktime_t to timespec conversion to ns_to_timespec function */
-#define ktime_to_timespec(kt) ns_to_timespec((kt))
-
/* Map the ktime_t to timespec conversion to ns_to_timespec function */
#define ktime_to_timespec64(kt) ns_to_timespec64((kt))
-/* Map the ktime_t to timeval conversion to ns_to_timeval function */
-#define ktime_to_timeval(kt) ns_to_timeval((kt))
-
-/* Convert ktime_t to nanoseconds - NOP in the scalar storage format: */
-#define ktime_to_ns(kt) (kt)
+/* Convert ktime_t to nanoseconds */
+static inline s64 ktime_to_ns(const ktime_t kt)
+{
+ return kt;
+}
/**
* ktime_compare - Compares two ktime_t variables for less, greater or equal
@@ -213,25 +199,6 @@ static inline ktime_t ktime_sub_ms(const ktime_t kt, const u64 msec)
extern ktime_t ktime_add_safe(const ktime_t lhs, const ktime_t rhs);
/**
- * ktime_to_timespec_cond - convert a ktime_t variable to timespec
- * format only if the variable contains data
- * @kt: the ktime_t variable to convert
- * @ts: the timespec variable to store the result in
- *
- * Return: %true if there was a successful conversion, %false if kt was 0.
- */
-static inline __must_check bool ktime_to_timespec_cond(const ktime_t kt,
- struct timespec *ts)
-{
- if (kt) {
- *ts = ktime_to_timespec(kt);
- return true;
- } else {
- return false;
- }
-}
-
-/**
* ktime_to_timespec64_cond - convert a ktime_t variable to timespec64
* format only if the variable contains data
* @kt: the ktime_t variable to convert
@@ -250,14 +217,7 @@ static inline __must_check bool ktime_to_timespec64_cond(const ktime_t kt,
}
}
-/*
- * The resolution of the clocks. The resolution value is returned in
- * the clock_getres() system call to give application programmers an
- * idea of the (in)accuracy of timers. Timer values are rounded up to
- * this resolution values.
- */
-#define LOW_RES_NSEC TICK_NSEC
-#define KTIME_LOW_RES (LOW_RES_NSEC)
+#include <vdso/ktime.h>
static inline ktime_t ns_to_ktime(u64 ns)
{
diff --git a/include/linux/ktime_api.h b/include/linux/ktime_api.h
new file mode 100644
index 000000000000..f697d493960f
--- /dev/null
+++ b/include/linux/ktime_api.h
@@ -0,0 +1 @@
+#include <linux/ktime.h>
diff --git a/include/linux/kvm_dirty_ring.h b/include/linux/kvm_dirty_ring.h
new file mode 100644
index 000000000000..4862c98d80d3
--- /dev/null
+++ b/include/linux/kvm_dirty_ring.h
@@ -0,0 +1,101 @@
+#ifndef KVM_DIRTY_RING_H
+#define KVM_DIRTY_RING_H
+
+#include <linux/kvm.h>
+
+/**
+ * kvm_dirty_ring: KVM internal dirty ring structure
+ *
+ * @dirty_index: free running counter that points to the next slot in
+ * dirty_ring->dirty_gfns, where a new dirty page should go
+ * @reset_index: free running counter that points to the next dirty page
+ * in dirty_ring->dirty_gfns for which dirty trap needs to
+ * be reenabled
+ * @size: size of the compact list, dirty_ring->dirty_gfns
+ * @soft_limit: when the number of dirty pages in the list reaches this
+ * limit, vcpu that owns this ring should exit to userspace
+ * to allow userspace to harvest all the dirty pages
+ * @dirty_gfns: the array to keep the dirty gfns
+ * @index: index of this dirty ring
+ */
+struct kvm_dirty_ring {
+ u32 dirty_index;
+ u32 reset_index;
+ u32 size;
+ u32 soft_limit;
+ struct kvm_dirty_gfn *dirty_gfns;
+ int index;
+};
+
+#ifndef CONFIG_HAVE_KVM_DIRTY_RING
+/*
+ * If CONFIG_HAVE_HVM_DIRTY_RING not defined, kvm_dirty_ring.o should
+ * not be included as well, so define these nop functions for the arch.
+ */
+static inline u32 kvm_dirty_ring_get_rsvd_entries(void)
+{
+ return 0;
+}
+
+static inline bool kvm_use_dirty_bitmap(struct kvm *kvm)
+{
+ return true;
+}
+
+static inline int kvm_dirty_ring_alloc(struct kvm_dirty_ring *ring,
+ int index, u32 size)
+{
+ return 0;
+}
+
+static inline int kvm_dirty_ring_reset(struct kvm *kvm,
+ struct kvm_dirty_ring *ring)
+{
+ return 0;
+}
+
+static inline void kvm_dirty_ring_push(struct kvm_vcpu *vcpu,
+ u32 slot, u64 offset)
+{
+}
+
+static inline struct page *kvm_dirty_ring_get_page(struct kvm_dirty_ring *ring,
+ u32 offset)
+{
+ return NULL;
+}
+
+static inline void kvm_dirty_ring_free(struct kvm_dirty_ring *ring)
+{
+}
+
+#else /* CONFIG_HAVE_KVM_DIRTY_RING */
+
+int kvm_cpu_dirty_log_size(void);
+bool kvm_use_dirty_bitmap(struct kvm *kvm);
+bool kvm_arch_allow_write_without_running_vcpu(struct kvm *kvm);
+u32 kvm_dirty_ring_get_rsvd_entries(void);
+int kvm_dirty_ring_alloc(struct kvm_dirty_ring *ring, int index, u32 size);
+
+/*
+ * called with kvm->slots_lock held, returns the number of
+ * processed pages.
+ */
+int kvm_dirty_ring_reset(struct kvm *kvm, struct kvm_dirty_ring *ring);
+
+/*
+ * returns =0: successfully pushed
+ * <0: unable to push, need to wait
+ */
+void kvm_dirty_ring_push(struct kvm_vcpu *vcpu, u32 slot, u64 offset);
+
+bool kvm_dirty_ring_check_request(struct kvm_vcpu *vcpu);
+
+/* for use in vm_operations_struct */
+struct page *kvm_dirty_ring_get_page(struct kvm_dirty_ring *ring, u32 offset);
+
+void kvm_dirty_ring_free(struct kvm_dirty_ring *ring);
+
+#endif /* CONFIG_HAVE_KVM_DIRTY_RING */
+
+#endif /* KVM_DIRTY_RING_H */
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 648b34cabb38..0e571e973bc2 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -1,10 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
#ifndef __KVM_HOST_H
#define __KVM_HOST_H
-/*
- * This work is licensed under the terms of the GNU GPL, version 2. See
- * the COPYING file in the top-level directory.
- */
#include <linux/types.h>
#include <linux/hardirq.h>
@@ -13,20 +10,31 @@
#include <linux/spinlock.h>
#include <linux/signal.h>
#include <linux/sched.h>
+#include <linux/sched/stat.h>
#include <linux/bug.h>
+#include <linux/minmax.h>
#include <linux/mm.h>
#include <linux/mmu_notifier.h>
#include <linux/preempt.h>
#include <linux/msi.h>
#include <linux/slab.h>
+#include <linux/vmalloc.h>
#include <linux/rcupdate.h>
#include <linux/ratelimit.h>
#include <linux/err.h>
#include <linux/irqflags.h>
#include <linux/context_tracking.h>
#include <linux/irqbypass.h>
-#include <linux/swait.h>
+#include <linux/rcuwait.h>
#include <linux/refcount.h>
+#include <linux/nospec.h>
+#include <linux/notifier.h>
+#include <linux/ftrace.h>
+#include <linux/hashtable.h>
+#include <linux/instrumentation.h>
+#include <linux/interval_tree.h>
+#include <linux/rbtree.h>
+#include <linux/xarray.h>
#include <asm/signal.h>
#include <linux/kvm.h>
@@ -35,18 +43,40 @@
#include <linux/kvm_types.h>
#include <asm/kvm_host.h>
+#include <linux/kvm_dirty_ring.h>
-#ifndef KVM_MAX_VCPU_ID
-#define KVM_MAX_VCPU_ID KVM_MAX_VCPUS
+#ifndef KVM_MAX_VCPU_IDS
+#define KVM_MAX_VCPU_IDS KVM_MAX_VCPUS
#endif
/*
- * The bit 16 ~ bit 31 of kvm_memory_region::flags are internally used
- * in kvm, other bits are visible for userspace which are defined in
+ * The bit 16 ~ bit 31 of kvm_userspace_memory_region::flags are internally
+ * used in kvm, other bits are visible for userspace which are defined in
* include/linux/kvm_h.
*/
#define KVM_MEMSLOT_INVALID (1UL << 16)
+/*
+ * Bit 63 of the memslot generation number is an "update in-progress flag",
+ * e.g. is temporarily set for the duration of kvm_swap_active_memslots().
+ * This flag effectively creates a unique generation number that is used to
+ * mark cached memslot data, e.g. MMIO accesses, as potentially being stale,
+ * i.e. may (or may not) have come from the previous memslots generation.
+ *
+ * This is necessary because the actual memslots update is not atomic with
+ * respect to the generation number update. Updating the generation number
+ * first would allow a vCPU to cache a spte from the old memslots using the
+ * new generation number, and updating the generation number after switching
+ * to the new memslots would allow cache hits using the old generation number
+ * to reference the defunct memslots.
+ *
+ * This mechanism is used to prevent getting hits in KVM's caches while a
+ * memslot update is in-progress, and to prevent cache hits *after* updating
+ * the actual generation number against accesses that were inserted into the
+ * cache *before* the memslots were updated.
+ */
+#define KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS BIT_ULL(63)
+
/* Two fragments for cross MMIO pages. */
#define KVM_MAX_MMIO_FRAGMENTS 2
@@ -66,6 +96,7 @@
#define KVM_PFN_ERR_FAULT (KVM_PFN_ERR_MASK)
#define KVM_PFN_ERR_HWPOISON (KVM_PFN_ERR_MASK + 1)
#define KVM_PFN_ERR_RO_FAULT (KVM_PFN_ERR_MASK + 2)
+#define KVM_PFN_ERR_SIGPENDING (KVM_PFN_ERR_MASK + 3)
/*
* error pfns indicate that the gfn is in slot but faild to
@@ -77,6 +108,15 @@ static inline bool is_error_pfn(kvm_pfn_t pfn)
}
/*
+ * KVM_PFN_ERR_SIGPENDING indicates that fetching the PFN was interrupted
+ * by a pending signal. Note, the signal may or may not be fatal.
+ */
+static inline bool is_sigpending_pfn(kvm_pfn_t pfn)
+{
+ return pfn == KVM_PFN_ERR_SIGPENDING;
+}
+
+/*
* error_noslot pfns indicate that the gfn can not be
* translated to pfn - it is not in slot or failed to
* translate it to pfn.
@@ -118,28 +158,45 @@ static inline bool is_error_page(struct page *page)
#define KVM_REQUEST_MASK GENMASK(7,0)
#define KVM_REQUEST_NO_WAKEUP BIT(8)
#define KVM_REQUEST_WAIT BIT(9)
+#define KVM_REQUEST_NO_ACTION BIT(10)
/*
* Architecture-independent vcpu->requests bit members
- * Bits 4-7 are reserved for more arch-independent bits.
+ * Bits 3-7 are reserved for more arch-independent bits.
*/
-#define KVM_REQ_TLB_FLUSH (0 | KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
-#define KVM_REQ_MMU_RELOAD (1 | KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
-#define KVM_REQ_PENDING_TIMER 2
-#define KVM_REQ_UNHALT 3
-#define KVM_REQUEST_ARCH_BASE 8
+#define KVM_REQ_TLB_FLUSH (0 | KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
+#define KVM_REQ_VM_DEAD (1 | KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
+#define KVM_REQ_UNBLOCK 2
+#define KVM_REQ_DIRTY_RING_SOFT_FULL 3
+#define KVM_REQUEST_ARCH_BASE 8
+
+/*
+ * KVM_REQ_OUTSIDE_GUEST_MODE exists is purely as way to force the vCPU to
+ * OUTSIDE_GUEST_MODE. KVM_REQ_OUTSIDE_GUEST_MODE differs from a vCPU "kick"
+ * in that it ensures the vCPU has reached OUTSIDE_GUEST_MODE before continuing
+ * on. A kick only guarantees that the vCPU is on its way out, e.g. a previous
+ * kick may have set vcpu->mode to EXITING_GUEST_MODE, and so there's no
+ * guarantee the vCPU received an IPI and has actually exited guest mode.
+ */
+#define KVM_REQ_OUTSIDE_GUEST_MODE (KVM_REQUEST_NO_ACTION | KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
#define KVM_ARCH_REQ_FLAGS(nr, flags) ({ \
- BUILD_BUG_ON((unsigned)(nr) >= 32 - KVM_REQUEST_ARCH_BASE); \
+ BUILD_BUG_ON((unsigned)(nr) >= (sizeof_field(struct kvm_vcpu, requests) * 8) - KVM_REQUEST_ARCH_BASE); \
(unsigned)(((nr) + KVM_REQUEST_ARCH_BASE) | (flags)); \
})
#define KVM_ARCH_REQ(nr) KVM_ARCH_REQ_FLAGS(nr, 0)
+bool kvm_make_vcpus_request_mask(struct kvm *kvm, unsigned int req,
+ unsigned long *vcpu_bitmap);
+bool kvm_make_all_cpus_request(struct kvm *kvm, unsigned int req);
+bool kvm_make_all_cpus_request_except(struct kvm *kvm, unsigned int req,
+ struct kvm_vcpu *except);
+bool kvm_make_cpus_request_mask(struct kvm *kvm, unsigned int req,
+ unsigned long *vcpu_bitmap);
+
#define KVM_USERSPACE_IRQ_SOURCE_ID 0
#define KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID 1
-extern struct kmem_cache *kvm_vcpu_cache;
-
-extern spinlock_t kvm_lock;
+extern struct mutex kvm_lock;
extern struct list_head vm_list;
struct kvm_io_range {
@@ -172,8 +229,8 @@ int kvm_io_bus_read(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr,
int len, void *val);
int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
int len, struct kvm_io_device *dev);
-void kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
- struct kvm_io_device *dev);
+int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
+ struct kvm_io_device *dev);
struct kvm_io_device *kvm_io_bus_get_dev(struct kvm *kvm, enum kvm_bus bus_idx,
gpa_t addr);
@@ -184,19 +241,34 @@ struct kvm_async_pf {
struct list_head queue;
struct kvm_vcpu *vcpu;
struct mm_struct *mm;
- gva_t gva;
+ gpa_t cr2_or_gpa;
unsigned long addr;
struct kvm_arch_async_pf arch;
bool wakeup_all;
+ bool notpresent_injected;
};
void kvm_clear_async_pf_completion_queue(struct kvm_vcpu *vcpu);
void kvm_check_async_pf_completion(struct kvm_vcpu *vcpu);
-int kvm_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, unsigned long hva,
- struct kvm_arch_async_pf *arch);
+bool kvm_setup_async_pf(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
+ unsigned long hva, struct kvm_arch_async_pf *arch);
int kvm_async_pf_wakeup_all(struct kvm_vcpu *vcpu);
#endif
+#ifdef KVM_ARCH_WANT_MMU_NOTIFIER
+struct kvm_gfn_range {
+ struct kvm_memory_slot *slot;
+ gfn_t start;
+ gfn_t end;
+ pte_t pte;
+ bool may_block;
+};
+bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range);
+bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range);
+bool kvm_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range);
+bool kvm_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range);
+#endif
+
enum {
OUTSIDE_GUEST_MODE,
IN_GUEST_MODE,
@@ -204,6 +276,37 @@ enum {
READING_SHADOW_PAGE_TABLES,
};
+#define KVM_UNMAPPED_PAGE ((void *) 0x500 + POISON_POINTER_DELTA)
+
+struct kvm_host_map {
+ /*
+ * Only valid if the 'pfn' is managed by the host kernel (i.e. There is
+ * a 'struct page' for it. When using mem= kernel parameter some memory
+ * can be used as guest memory but they are not managed by host
+ * kernel).
+ * If 'pfn' is not managed by the host kernel, this field is
+ * initialized to KVM_UNMAPPED_PAGE.
+ */
+ struct page *page;
+ void *hva;
+ kvm_pfn_t pfn;
+ kvm_pfn_t gfn;
+};
+
+/*
+ * Used to check if the mapping is valid or not. Never use 'kvm_host_map'
+ * directly to check for that.
+ */
+static inline bool kvm_vcpu_mapped(struct kvm_host_map *map)
+{
+ return !!map->hva;
+}
+
+static inline bool kvm_vcpu_can_poll(ktime_t cur, ktime_t stop)
+{
+ return single_task_running() && !need_resched() && ktime_before(cur, stop);
+}
+
/*
* Sometimes a large or cross-page mmio needs to be broken up into separate
* exits for userspace servicing.
@@ -220,24 +323,25 @@ struct kvm_vcpu {
struct preempt_notifier preempt_notifier;
#endif
int cpu;
- int vcpu_id;
- int srcu_idx;
+ int vcpu_id; /* id given by userspace at creation */
+ int vcpu_idx; /* index into kvm->vcpu_array */
+ int ____srcu_idx; /* Don't use this directly. You've been warned. */
+#ifdef CONFIG_PROVE_RCU
+ int srcu_depth;
+#endif
int mode;
- unsigned long requests;
+ u64 requests;
unsigned long guest_debug;
- int pre_pcpu;
- struct list_head blocked_vcpu_list;
-
struct mutex mutex;
struct kvm_run *run;
- int guest_fpu_loaded, guest_xcr0_loaded;
- struct swait_queue_head wq;
+#ifndef __KVM_HAVE_ARCH_WQP
+ struct rcuwait wait;
+#endif
struct pid __rcu *pid;
int sigset_active;
sigset_t sigset;
- struct kvm_vcpu_stat stat;
unsigned int halt_poll_ns;
bool valid_wakeup;
@@ -272,10 +376,171 @@ struct kvm_vcpu {
} spin_loop;
#endif
bool preempted;
+ bool ready;
struct kvm_vcpu_arch arch;
- struct dentry *debugfs_dentry;
+ struct kvm_vcpu_stat stat;
+ char stats_id[KVM_STATS_NAME_SIZE];
+ struct kvm_dirty_ring dirty_ring;
+
+ /*
+ * The most recently used memslot by this vCPU and the slots generation
+ * for which it is valid.
+ * No wraparound protection is needed since generations won't overflow in
+ * thousands of years, even assuming 1M memslot operations per second.
+ */
+ struct kvm_memory_slot *last_used_slot;
+ u64 last_used_slot_gen;
};
+/*
+ * Start accounting time towards a guest.
+ * Must be called before entering guest context.
+ */
+static __always_inline void guest_timing_enter_irqoff(void)
+{
+ /*
+ * This is running in ioctl context so its safe to assume that it's the
+ * stime pending cputime to flush.
+ */
+ instrumentation_begin();
+ vtime_account_guest_enter();
+ instrumentation_end();
+}
+
+/*
+ * Enter guest context and enter an RCU extended quiescent state.
+ *
+ * Between guest_context_enter_irqoff() and guest_context_exit_irqoff() it is
+ * unsafe to use any code which may directly or indirectly use RCU, tracing
+ * (including IRQ flag tracing), or lockdep. All code in this period must be
+ * non-instrumentable.
+ */
+static __always_inline void guest_context_enter_irqoff(void)
+{
+ /*
+ * KVM does not hold any references to rcu protected data when it
+ * switches CPU into a guest mode. In fact switching to a guest mode
+ * is very similar to exiting to userspace from rcu point of view. In
+ * addition CPU may stay in a guest mode for quite a long time (up to
+ * one time slice). Lets treat guest mode as quiescent state, just like
+ * we do with user-mode execution.
+ */
+ if (!context_tracking_guest_enter()) {
+ instrumentation_begin();
+ rcu_virt_note_context_switch();
+ instrumentation_end();
+ }
+}
+
+/*
+ * Deprecated. Architectures should move to guest_timing_enter_irqoff() and
+ * guest_state_enter_irqoff().
+ */
+static __always_inline void guest_enter_irqoff(void)
+{
+ guest_timing_enter_irqoff();
+ guest_context_enter_irqoff();
+}
+
+/**
+ * guest_state_enter_irqoff - Fixup state when entering a guest
+ *
+ * Entry to a guest will enable interrupts, but the kernel state is interrupts
+ * disabled when this is invoked. Also tell RCU about it.
+ *
+ * 1) Trace interrupts on state
+ * 2) Invoke context tracking if enabled to adjust RCU state
+ * 3) Tell lockdep that interrupts are enabled
+ *
+ * Invoked from architecture specific code before entering a guest.
+ * Must be called with interrupts disabled and the caller must be
+ * non-instrumentable.
+ * The caller has to invoke guest_timing_enter_irqoff() before this.
+ *
+ * Note: this is analogous to exit_to_user_mode().
+ */
+static __always_inline void guest_state_enter_irqoff(void)
+{
+ instrumentation_begin();
+ trace_hardirqs_on_prepare();
+ lockdep_hardirqs_on_prepare();
+ instrumentation_end();
+
+ guest_context_enter_irqoff();
+ lockdep_hardirqs_on(CALLER_ADDR0);
+}
+
+/*
+ * Exit guest context and exit an RCU extended quiescent state.
+ *
+ * Between guest_context_enter_irqoff() and guest_context_exit_irqoff() it is
+ * unsafe to use any code which may directly or indirectly use RCU, tracing
+ * (including IRQ flag tracing), or lockdep. All code in this period must be
+ * non-instrumentable.
+ */
+static __always_inline void guest_context_exit_irqoff(void)
+{
+ context_tracking_guest_exit();
+}
+
+/*
+ * Stop accounting time towards a guest.
+ * Must be called after exiting guest context.
+ */
+static __always_inline void guest_timing_exit_irqoff(void)
+{
+ instrumentation_begin();
+ /* Flush the guest cputime we spent on the guest */
+ vtime_account_guest_exit();
+ instrumentation_end();
+}
+
+/*
+ * Deprecated. Architectures should move to guest_state_exit_irqoff() and
+ * guest_timing_exit_irqoff().
+ */
+static __always_inline void guest_exit_irqoff(void)
+{
+ guest_context_exit_irqoff();
+ guest_timing_exit_irqoff();
+}
+
+static inline void guest_exit(void)
+{
+ unsigned long flags;
+
+ local_irq_save(flags);
+ guest_exit_irqoff();
+ local_irq_restore(flags);
+}
+
+/**
+ * guest_state_exit_irqoff - Establish state when returning from guest mode
+ *
+ * Entry from a guest disables interrupts, but guest mode is traced as
+ * interrupts enabled. Also with NO_HZ_FULL RCU might be idle.
+ *
+ * 1) Tell lockdep that interrupts are disabled
+ * 2) Invoke context tracking if enabled to reactivate RCU
+ * 3) Trace interrupts off state
+ *
+ * Invoked from architecture specific code after exiting a guest.
+ * Must be invoked with interrupts disabled and the caller must be
+ * non-instrumentable.
+ * The caller has to invoke guest_timing_exit_irqoff() after this.
+ *
+ * Note: this is analogous to enter_from_user_mode().
+ */
+static __always_inline void guest_state_exit_irqoff(void)
+{
+ lockdep_hardirqs_off(CALLER_ADDR0);
+ guest_context_exit_irqoff();
+
+ instrumentation_begin();
+ trace_hardirqs_off_finish();
+ instrumentation_end();
+}
+
static inline int kvm_vcpu_exiting_guest_mode(struct kvm_vcpu *vcpu)
{
/*
@@ -293,7 +558,26 @@ static inline int kvm_vcpu_exiting_guest_mode(struct kvm_vcpu *vcpu)
*/
#define KVM_MEM_MAX_NR_PAGES ((1UL << 31) - 1)
+/*
+ * Since at idle each memslot belongs to two memslot sets it has to contain
+ * two embedded nodes for each data structure that it forms a part of.
+ *
+ * Two memslot sets (one active and one inactive) are necessary so the VM
+ * continues to run on one memslot set while the other is being modified.
+ *
+ * These two memslot sets normally point to the same set of memslots.
+ * They can, however, be desynchronized when performing a memslot management
+ * operation by replacing the memslot to be modified by its copy.
+ * After the operation is complete, both memslot sets once again point to
+ * the same, common set of memslot data.
+ *
+ * The memslots themselves are independent of each other so they can be
+ * individually added or deleted.
+ */
struct kvm_memory_slot {
+ struct hlist_node id_node[2];
+ struct interval_tree_node hva_node[2];
+ struct rb_node gfn_node[2];
gfn_t base_gfn;
unsigned long npages;
unsigned long *dirty_bitmap;
@@ -301,13 +585,30 @@ struct kvm_memory_slot {
unsigned long userspace_addr;
u32 flags;
short id;
+ u16 as_id;
};
+static inline bool kvm_slot_dirty_track_enabled(const struct kvm_memory_slot *slot)
+{
+ return slot->flags & KVM_MEM_LOG_DIRTY_PAGES;
+}
+
static inline unsigned long kvm_dirty_bitmap_bytes(struct kvm_memory_slot *memslot)
{
return ALIGN(memslot->npages, BITS_PER_LONG) / 8;
}
+static inline unsigned long *kvm_second_dirty_bitmap(struct kvm_memory_slot *memslot)
+{
+ unsigned long len = kvm_dirty_bitmap_bytes(memslot);
+
+ return memslot->dirty_bitmap + len / sizeof(*memslot->dirty_bitmap);
+}
+
+#ifndef KVM_DIRTY_LOG_MANUAL_CAPS
+#define KVM_DIRTY_LOG_MANUAL_CAPS KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE
+#endif
+
struct kvm_s390_adapter_int {
u64 ind_addr;
u64 summary_addr;
@@ -321,6 +622,13 @@ struct kvm_hv_sint {
u32 sint;
};
+struct kvm_xen_evtchn {
+ u32 port;
+ u32 vcpu_id;
+ int vcpu_idx;
+ u32 priority;
+};
+
struct kvm_kernel_irq_routing_entry {
u32 gsi;
u32 type;
@@ -341,6 +649,7 @@ struct kvm_kernel_irq_routing_entry {
} msi;
struct kvm_s390_adapter_int adapter;
struct kvm_hv_sint hv_sint;
+ struct kvm_xen_evtchn xen_evtchn;
};
struct hlist_node link;
};
@@ -353,18 +662,19 @@ struct kvm_irq_routing_table {
* Array indexed by gsi. Each entry contains list of irq chips
* the gsi is connected to.
*/
- struct hlist_head map[0];
+ struct hlist_head map[];
};
#endif
-#ifndef KVM_PRIVATE_MEM_SLOTS
-#define KVM_PRIVATE_MEM_SLOTS 0
-#endif
+bool kvm_arch_irqchip_in_kernel(struct kvm *kvm);
-#ifndef KVM_MEM_SLOTS_NUM
-#define KVM_MEM_SLOTS_NUM (KVM_USER_MEM_SLOTS + KVM_PRIVATE_MEM_SLOTS)
+#ifndef KVM_INTERNAL_MEM_SLOTS
+#define KVM_INTERNAL_MEM_SLOTS 0
#endif
+#define KVM_MEM_SLOTS_NUM SHRT_MAX
+#define KVM_USER_MEM_SLOTS (KVM_MEM_SLOTS_NUM - KVM_INTERNAL_MEM_SLOTS)
+
#ifndef __KVM_VCPU_MULTIPLE_ADDRESS_SPACE
static inline int kvm_arch_vcpu_memslots_id(struct kvm_vcpu *vcpu)
{
@@ -372,26 +682,61 @@ static inline int kvm_arch_vcpu_memslots_id(struct kvm_vcpu *vcpu)
}
#endif
-/*
- * Note:
- * memslots are not sorted by id anymore, please use id_to_memslot()
- * to get the memslot by its id.
- */
struct kvm_memslots {
u64 generation;
- struct kvm_memory_slot memslots[KVM_MEM_SLOTS_NUM];
- /* The mapping table from slot id to the index in memslots[]. */
- short id_to_index[KVM_MEM_SLOTS_NUM];
- atomic_t lru_slot;
- int used_slots;
+ atomic_long_t last_used_slot;
+ struct rb_root_cached hva_tree;
+ struct rb_root gfn_tree;
+ /*
+ * The mapping table from slot id to memslot.
+ *
+ * 7-bit bucket count matches the size of the old id to index array for
+ * 512 slots, while giving good performance with this slot count.
+ * Higher bucket counts bring only small performance improvements but
+ * always result in higher memory usage (even for lower memslot counts).
+ */
+ DECLARE_HASHTABLE(id_hash, 7);
+ int node_idx;
};
struct kvm {
+#ifdef KVM_HAVE_MMU_RWLOCK
+ rwlock_t mmu_lock;
+#else
spinlock_t mmu_lock;
+#endif /* KVM_HAVE_MMU_RWLOCK */
+
struct mutex slots_lock;
+
+ /*
+ * Protects the arch-specific fields of struct kvm_memory_slots in
+ * use by the VM. To be used under the slots_lock (above) or in a
+ * kvm->srcu critical section where acquiring the slots_lock would
+ * lead to deadlock with the synchronize_srcu in
+ * kvm_swap_active_memslots().
+ */
+ struct mutex slots_arch_lock;
struct mm_struct *mm; /* userspace tied to this vm */
+ unsigned long nr_memslot_pages;
+ /* The two memslot sets - active and inactive (per address space) */
+ struct kvm_memslots __memslots[KVM_ADDRESS_SPACE_NUM][2];
+ /* The current active memslot set for each address space */
struct kvm_memslots __rcu *memslots[KVM_ADDRESS_SPACE_NUM];
- struct kvm_vcpu *vcpus[KVM_MAX_VCPUS];
+ struct xarray vcpu_array;
+ /*
+ * Protected by slots_lock, but can be read outside if an
+ * incorrect answer is acceptable.
+ */
+ atomic_t nr_memslots_dirty_logging;
+
+ /* Used to wait for completion of MMU notifiers. */
+ spinlock_t mn_invalidate_lock;
+ unsigned long mn_active_invalidate_count;
+ struct rcuwait mn_memslots_update_rcuwait;
+
+ /* For management / invalidation of gfn_to_pfn_caches */
+ spinlock_t gpc_lock;
+ struct list_head gpc_list;
/*
* created_vcpus is protected by kvm->lock, and is incremented
@@ -400,6 +745,7 @@ struct kvm {
* and is accessed atomically.
*/
atomic_t online_vcpus;
+ int max_vcpus;
int created_vcpus;
int last_boosted_vcpu;
struct list_head vm_list;
@@ -409,6 +755,7 @@ struct kvm {
struct {
spinlock_t lock;
struct list_head items;
+ /* resampler_list update side is protected by resampler_lock. */
struct list_head resampler_list;
struct mutex resampler_lock;
} irqfds;
@@ -436,15 +783,29 @@ struct kvm {
#if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
struct mmu_notifier mmu_notifier;
- unsigned long mmu_notifier_seq;
- long mmu_notifier_count;
+ unsigned long mmu_invalidate_seq;
+ long mmu_invalidate_in_progress;
+ unsigned long mmu_invalidate_range_start;
+ unsigned long mmu_invalidate_range_end;
#endif
- long tlbs_dirty;
struct list_head devices;
+ u64 manual_dirty_log_protect;
struct dentry *debugfs_dentry;
struct kvm_stat_data **debugfs_stat_data;
struct srcu_struct srcu;
struct srcu_struct irq_srcu;
+ pid_t userspace_pid;
+ bool override_halt_poll_ns;
+ unsigned int max_halt_poll_ns;
+ u32 dirty_ring_size;
+ bool dirty_ring_with_bitmap;
+ bool vm_bugged;
+ bool vm_dead;
+
+#ifdef CONFIG_HAVE_KVM_PM_NOTIFIER
+ struct notifier_block pm_notifier;
+#endif
+ char stats_id[KVM_STATS_NAME_SIZE];
};
#define kvm_err(fmt, ...) \
@@ -473,32 +834,86 @@ struct kvm {
#define vcpu_err(vcpu, fmt, ...) \
kvm_err("vcpu%i " fmt, (vcpu)->vcpu_id, ## __VA_ARGS__)
+static inline void kvm_vm_dead(struct kvm *kvm)
+{
+ kvm->vm_dead = true;
+ kvm_make_all_cpus_request(kvm, KVM_REQ_VM_DEAD);
+}
+
+static inline void kvm_vm_bugged(struct kvm *kvm)
+{
+ kvm->vm_bugged = true;
+ kvm_vm_dead(kvm);
+}
+
+
+#define KVM_BUG(cond, kvm, fmt...) \
+({ \
+ int __ret = (cond); \
+ \
+ if (WARN_ONCE(__ret && !(kvm)->vm_bugged, fmt)) \
+ kvm_vm_bugged(kvm); \
+ unlikely(__ret); \
+})
+
+#define KVM_BUG_ON(cond, kvm) \
+({ \
+ int __ret = (cond); \
+ \
+ if (WARN_ON_ONCE(__ret && !(kvm)->vm_bugged)) \
+ kvm_vm_bugged(kvm); \
+ unlikely(__ret); \
+})
+
+static inline void kvm_vcpu_srcu_read_lock(struct kvm_vcpu *vcpu)
+{
+#ifdef CONFIG_PROVE_RCU
+ WARN_ONCE(vcpu->srcu_depth++,
+ "KVM: Illegal vCPU srcu_idx LOCK, depth=%d", vcpu->srcu_depth - 1);
+#endif
+ vcpu->____srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
+}
+
+static inline void kvm_vcpu_srcu_read_unlock(struct kvm_vcpu *vcpu)
+{
+ srcu_read_unlock(&vcpu->kvm->srcu, vcpu->____srcu_idx);
+
+#ifdef CONFIG_PROVE_RCU
+ WARN_ONCE(--vcpu->srcu_depth,
+ "KVM: Illegal vCPU srcu_idx UNLOCK, depth=%d", vcpu->srcu_depth);
+#endif
+}
+
+static inline bool kvm_dirty_log_manual_protect_and_init_set(struct kvm *kvm)
+{
+ return !!(kvm->manual_dirty_log_protect & KVM_DIRTY_LOG_INITIALLY_SET);
+}
+
static inline struct kvm_io_bus *kvm_get_bus(struct kvm *kvm, enum kvm_bus idx)
{
return srcu_dereference_check(kvm->buses[idx], &kvm->srcu,
- lockdep_is_held(&kvm->slots_lock));
+ lockdep_is_held(&kvm->slots_lock) ||
+ !refcount_read(&kvm->users_count));
}
static inline struct kvm_vcpu *kvm_get_vcpu(struct kvm *kvm, int i)
{
- /* Pairs with smp_wmb() in kvm_vm_ioctl_create_vcpu, in case
- * the caller has read kvm->online_vcpus before (as is the case
- * for kvm_for_each_vcpu, for example).
- */
+ int num_vcpus = atomic_read(&kvm->online_vcpus);
+ i = array_index_nospec(i, num_vcpus);
+
+ /* Pairs with smp_wmb() in kvm_vm_ioctl_create_vcpu. */
smp_rmb();
- return kvm->vcpus[i];
+ return xa_load(&kvm->vcpu_array, i);
}
-#define kvm_for_each_vcpu(idx, vcpup, kvm) \
- for (idx = 0; \
- idx < atomic_read(&kvm->online_vcpus) && \
- (vcpup = kvm_get_vcpu(kvm, idx)) != NULL; \
- idx++)
+#define kvm_for_each_vcpu(idx, vcpup, kvm) \
+ xa_for_each_range(&kvm->vcpu_array, idx, vcpup, 0, \
+ (atomic_read(&kvm->online_vcpus) - 1))
static inline struct kvm_vcpu *kvm_get_vcpu_by_id(struct kvm *kvm, int id)
{
struct kvm_vcpu *vcpu = NULL;
- int i;
+ unsigned long i;
if (id < 0)
return NULL;
@@ -512,26 +927,9 @@ static inline struct kvm_vcpu *kvm_get_vcpu_by_id(struct kvm *kvm, int id)
return NULL;
}
-static inline int kvm_vcpu_get_idx(struct kvm_vcpu *vcpu)
-{
- struct kvm_vcpu *tmp;
- int idx;
-
- kvm_for_each_vcpu(idx, tmp, vcpu->kvm)
- if (tmp == vcpu)
- return idx;
- BUG();
-}
-
-#define kvm_for_each_memslot(memslot, slots) \
- for (memslot = &slots->memslots[0]; \
- memslot < slots->memslots + KVM_MEM_SLOTS_NUM && memslot->npages;\
- memslot++)
-
-int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id);
-void kvm_vcpu_uninit(struct kvm_vcpu *vcpu);
+void kvm_destroy_vcpus(struct kvm *kvm);
-int __must_check vcpu_load(struct kvm_vcpu *vcpu);
+void vcpu_load(struct kvm_vcpu *vcpu);
void vcpu_put(struct kvm_vcpu *vcpu);
#ifdef __KVM_HAVE_IOAPIC
@@ -559,17 +957,21 @@ static inline void kvm_irqfd_exit(void)
{
}
#endif
-int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
- struct module *module);
+int kvm_init(unsigned vcpu_size, unsigned vcpu_align, struct module *module);
void kvm_exit(void);
void kvm_get_kvm(struct kvm *kvm);
+bool kvm_get_kvm_safe(struct kvm *kvm);
void kvm_put_kvm(struct kvm *kvm);
+bool file_is_kvm(struct file *file);
+void kvm_put_kvm_no_destroy(struct kvm *kvm);
static inline struct kvm_memslots *__kvm_memslots(struct kvm *kvm, int as_id)
{
+ as_id = array_index_nospec(as_id, KVM_ADDRESS_SPACE_NUM);
return srcu_dereference_check(kvm->memslots[as_id], &kvm->srcu,
- lockdep_is_held(&kvm->slots_lock));
+ lockdep_is_held(&kvm->slots_lock) ||
+ !refcount_read(&kvm->users_count));
}
static inline struct kvm_memslots *kvm_memslots(struct kvm *kvm)
@@ -584,18 +986,124 @@ static inline struct kvm_memslots *kvm_vcpu_memslots(struct kvm_vcpu *vcpu)
return __kvm_memslots(vcpu->kvm, as_id);
}
-static inline struct kvm_memory_slot *
-id_to_memslot(struct kvm_memslots *slots, int id)
+static inline bool kvm_memslots_empty(struct kvm_memslots *slots)
+{
+ return RB_EMPTY_ROOT(&slots->gfn_tree);
+}
+
+#define kvm_for_each_memslot(memslot, bkt, slots) \
+ hash_for_each(slots->id_hash, bkt, memslot, id_node[slots->node_idx]) \
+ if (WARN_ON_ONCE(!memslot->npages)) { \
+ } else
+
+static inline
+struct kvm_memory_slot *id_to_memslot(struct kvm_memslots *slots, int id)
{
- int index = slots->id_to_index[id];
struct kvm_memory_slot *slot;
+ int idx = slots->node_idx;
- slot = &slots->memslots[index];
+ hash_for_each_possible(slots->id_hash, slot, id_node[idx], id) {
+ if (slot->id == id)
+ return slot;
+ }
- WARN_ON(slot->id != id);
- return slot;
+ return NULL;
+}
+
+/* Iterator used for walking memslots that overlap a gfn range. */
+struct kvm_memslot_iter {
+ struct kvm_memslots *slots;
+ struct rb_node *node;
+ struct kvm_memory_slot *slot;
+};
+
+static inline void kvm_memslot_iter_next(struct kvm_memslot_iter *iter)
+{
+ iter->node = rb_next(iter->node);
+ if (!iter->node)
+ return;
+
+ iter->slot = container_of(iter->node, struct kvm_memory_slot, gfn_node[iter->slots->node_idx]);
}
+static inline void kvm_memslot_iter_start(struct kvm_memslot_iter *iter,
+ struct kvm_memslots *slots,
+ gfn_t start)
+{
+ int idx = slots->node_idx;
+ struct rb_node *tmp;
+ struct kvm_memory_slot *slot;
+
+ iter->slots = slots;
+
+ /*
+ * Find the so called "upper bound" of a key - the first node that has
+ * its key strictly greater than the searched one (the start gfn in our case).
+ */
+ iter->node = NULL;
+ for (tmp = slots->gfn_tree.rb_node; tmp; ) {
+ slot = container_of(tmp, struct kvm_memory_slot, gfn_node[idx]);
+ if (start < slot->base_gfn) {
+ iter->node = tmp;
+ tmp = tmp->rb_left;
+ } else {
+ tmp = tmp->rb_right;
+ }
+ }
+
+ /*
+ * Find the slot with the lowest gfn that can possibly intersect with
+ * the range, so we'll ideally have slot start <= range start
+ */
+ if (iter->node) {
+ /*
+ * A NULL previous node means that the very first slot
+ * already has a higher start gfn.
+ * In this case slot start > range start.
+ */
+ tmp = rb_prev(iter->node);
+ if (tmp)
+ iter->node = tmp;
+ } else {
+ /* a NULL node below means no slots */
+ iter->node = rb_last(&slots->gfn_tree);
+ }
+
+ if (iter->node) {
+ iter->slot = container_of(iter->node, struct kvm_memory_slot, gfn_node[idx]);
+
+ /*
+ * It is possible in the slot start < range start case that the
+ * found slot ends before or at range start (slot end <= range start)
+ * and so it does not overlap the requested range.
+ *
+ * In such non-overlapping case the next slot (if it exists) will
+ * already have slot start > range start, otherwise the logic above
+ * would have found it instead of the current slot.
+ */
+ if (iter->slot->base_gfn + iter->slot->npages <= start)
+ kvm_memslot_iter_next(iter);
+ }
+}
+
+static inline bool kvm_memslot_iter_is_valid(struct kvm_memslot_iter *iter, gfn_t end)
+{
+ if (!iter->node)
+ return false;
+
+ /*
+ * If this slot starts beyond or at the end of the range so does
+ * every next one
+ */
+ return iter->slot->base_gfn < end;
+}
+
+/* Iterate over each memslot at least partially intersecting [start, end) range */
+#define kvm_for_each_memslot_in_gfn_range(iter, slots, start, end) \
+ for (kvm_memslot_iter_start(iter, slots, start); \
+ kvm_memslot_iter_is_valid(iter, end); \
+ kvm_memslot_iter_next(iter))
+
/*
* KVM_SET_USER_MEMORY_REGION ioctl allows the following operations:
* - create a new memory slot
@@ -618,22 +1126,16 @@ int kvm_set_memory_region(struct kvm *kvm,
const struct kvm_userspace_memory_region *mem);
int __kvm_set_memory_region(struct kvm *kvm,
const struct kvm_userspace_memory_region *mem);
-void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
- struct kvm_memory_slot *dont);
-int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
- unsigned long npages);
-void kvm_arch_memslots_updated(struct kvm *kvm, struct kvm_memslots *slots);
+void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *slot);
+void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen);
int kvm_arch_prepare_memory_region(struct kvm *kvm,
- struct kvm_memory_slot *memslot,
- const struct kvm_userspace_memory_region *mem,
+ const struct kvm_memory_slot *old,
+ struct kvm_memory_slot *new,
enum kvm_mr_change change);
void kvm_arch_commit_memory_region(struct kvm *kvm,
- const struct kvm_userspace_memory_region *mem,
- const struct kvm_memory_slot *old,
+ struct kvm_memory_slot *old,
const struct kvm_memory_slot *new,
enum kvm_mr_change change);
-bool kvm_largepages_enabled(void);
-void kvm_disable_largepages(void);
/* flush all memory translations */
void kvm_arch_flush_shadow_all(struct kvm *kvm);
/* flush memory translations pointing to 'slot' */
@@ -651,30 +1153,30 @@ unsigned long gfn_to_hva_memslot_prot(struct kvm_memory_slot *slot, gfn_t gfn,
bool *writable);
void kvm_release_page_clean(struct page *page);
void kvm_release_page_dirty(struct page *page);
-void kvm_set_page_accessed(struct page *page);
-kvm_pfn_t gfn_to_pfn_atomic(struct kvm *kvm, gfn_t gfn);
kvm_pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn);
kvm_pfn_t gfn_to_pfn_prot(struct kvm *kvm, gfn_t gfn, bool write_fault,
bool *writable);
-kvm_pfn_t gfn_to_pfn_memslot(struct kvm_memory_slot *slot, gfn_t gfn);
-kvm_pfn_t gfn_to_pfn_memslot_atomic(struct kvm_memory_slot *slot, gfn_t gfn);
-kvm_pfn_t __gfn_to_pfn_memslot(struct kvm_memory_slot *slot, gfn_t gfn,
- bool atomic, bool *async, bool write_fault,
- bool *writable);
+kvm_pfn_t gfn_to_pfn_memslot(const struct kvm_memory_slot *slot, gfn_t gfn);
+kvm_pfn_t gfn_to_pfn_memslot_atomic(const struct kvm_memory_slot *slot, gfn_t gfn);
+kvm_pfn_t __gfn_to_pfn_memslot(const struct kvm_memory_slot *slot, gfn_t gfn,
+ bool atomic, bool interruptible, bool *async,
+ bool write_fault, bool *writable, hva_t *hva);
void kvm_release_pfn_clean(kvm_pfn_t pfn);
+void kvm_release_pfn_dirty(kvm_pfn_t pfn);
void kvm_set_pfn_dirty(kvm_pfn_t pfn);
void kvm_set_pfn_accessed(kvm_pfn_t pfn);
-void kvm_get_pfn(kvm_pfn_t pfn);
+void kvm_release_pfn(kvm_pfn_t pfn, bool dirty);
int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset,
int len);
-int kvm_read_guest_atomic(struct kvm *kvm, gpa_t gpa, void *data,
- unsigned long len);
int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len);
int kvm_read_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
void *data, unsigned long len);
+int kvm_read_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
+ void *data, unsigned int offset,
+ unsigned long len);
int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn, const void *data,
int offset, int len);
int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data,
@@ -682,21 +1184,67 @@ int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data,
int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
void *data, unsigned long len);
int kvm_write_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
- void *data, int offset, unsigned long len);
+ void *data, unsigned int offset,
+ unsigned long len);
int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
gpa_t gpa, unsigned long len);
-int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len);
+
+#define __kvm_get_guest(kvm, gfn, offset, v) \
+({ \
+ unsigned long __addr = gfn_to_hva(kvm, gfn); \
+ typeof(v) __user *__uaddr = (typeof(__uaddr))(__addr + offset); \
+ int __ret = -EFAULT; \
+ \
+ if (!kvm_is_error_hva(__addr)) \
+ __ret = get_user(v, __uaddr); \
+ __ret; \
+})
+
+#define kvm_get_guest(kvm, gpa, v) \
+({ \
+ gpa_t __gpa = gpa; \
+ struct kvm *__kvm = kvm; \
+ \
+ __kvm_get_guest(__kvm, __gpa >> PAGE_SHIFT, \
+ offset_in_page(__gpa), v); \
+})
+
+#define __kvm_put_guest(kvm, gfn, offset, v) \
+({ \
+ unsigned long __addr = gfn_to_hva(kvm, gfn); \
+ typeof(v) __user *__uaddr = (typeof(__uaddr))(__addr + offset); \
+ int __ret = -EFAULT; \
+ \
+ if (!kvm_is_error_hva(__addr)) \
+ __ret = put_user(v, __uaddr); \
+ if (!__ret) \
+ mark_page_dirty(kvm, gfn); \
+ __ret; \
+})
+
+#define kvm_put_guest(kvm, gpa, v) \
+({ \
+ gpa_t __gpa = gpa; \
+ struct kvm *__kvm = kvm; \
+ \
+ __kvm_put_guest(__kvm, __gpa >> PAGE_SHIFT, \
+ offset_in_page(__gpa), v); \
+})
+
int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len);
struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn);
bool kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn);
-unsigned long kvm_host_page_size(struct kvm *kvm, gfn_t gfn);
+bool kvm_vcpu_is_visible_gfn(struct kvm_vcpu *vcpu, gfn_t gfn);
+unsigned long kvm_host_page_size(struct kvm_vcpu *vcpu, gfn_t gfn);
+void mark_page_dirty_in_slot(struct kvm *kvm, const struct kvm_memory_slot *memslot, gfn_t gfn);
void mark_page_dirty(struct kvm *kvm, gfn_t gfn);
struct kvm_memslots *kvm_vcpu_memslots(struct kvm_vcpu *vcpu);
struct kvm_memory_slot *kvm_vcpu_gfn_to_memslot(struct kvm_vcpu *vcpu, gfn_t gfn);
kvm_pfn_t kvm_vcpu_gfn_to_pfn_atomic(struct kvm_vcpu *vcpu, gfn_t gfn);
kvm_pfn_t kvm_vcpu_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn);
-struct page *kvm_vcpu_gfn_to_page(struct kvm_vcpu *vcpu, gfn_t gfn);
+int kvm_vcpu_map(struct kvm_vcpu *vcpu, gpa_t gpa, struct kvm_host_map *map);
+void kvm_vcpu_unmap(struct kvm_vcpu *vcpu, struct kvm_host_map *map, bool dirty);
unsigned long kvm_vcpu_gfn_to_hva(struct kvm_vcpu *vcpu, gfn_t gfn);
unsigned long kvm_vcpu_gfn_to_hva_prot(struct kvm_vcpu *vcpu, gfn_t gfn, bool *writable);
int kvm_vcpu_read_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, void *data, int offset,
@@ -711,46 +1259,148 @@ int kvm_vcpu_write_guest(struct kvm_vcpu *vcpu, gpa_t gpa, const void *data,
unsigned long len);
void kvm_vcpu_mark_page_dirty(struct kvm_vcpu *vcpu, gfn_t gfn);
-void kvm_vcpu_block(struct kvm_vcpu *vcpu);
+/**
+ * kvm_gpc_init - initialize gfn_to_pfn_cache.
+ *
+ * @gpc: struct gfn_to_pfn_cache object.
+ * @kvm: pointer to kvm instance.
+ * @vcpu: vCPU to be used for marking pages dirty and to be woken on
+ * invalidation.
+ * @usage: indicates if the resulting host physical PFN is used while
+ * the @vcpu is IN_GUEST_MODE (in which case invalidation of
+ * the cache from MMU notifiers---but not for KVM memslot
+ * changes!---will also force @vcpu to exit the guest and
+ * refresh the cache); and/or if the PFN used directly
+ * by KVM (and thus needs a kernel virtual mapping).
+ *
+ * This sets up a gfn_to_pfn_cache by initializing locks and assigning the
+ * immutable attributes. Note, the cache must be zero-allocated (or zeroed by
+ * the caller before init).
+ */
+void kvm_gpc_init(struct gfn_to_pfn_cache *gpc, struct kvm *kvm,
+ struct kvm_vcpu *vcpu, enum pfn_cache_usage usage);
+
+/**
+ * kvm_gpc_activate - prepare a cached kernel mapping and HPA for a given guest
+ * physical address.
+ *
+ * @gpc: struct gfn_to_pfn_cache object.
+ * @gpa: guest physical address to map.
+ * @len: sanity check; the range being access must fit a single page.
+ *
+ * @return: 0 for success.
+ * -EINVAL for a mapping which would cross a page boundary.
+ * -EFAULT for an untranslatable guest physical address.
+ *
+ * This primes a gfn_to_pfn_cache and links it into the @gpc->kvm's list for
+ * invalidations to be processed. Callers are required to use kvm_gpc_check()
+ * to ensure that the cache is valid before accessing the target page.
+ */
+int kvm_gpc_activate(struct gfn_to_pfn_cache *gpc, gpa_t gpa, unsigned long len);
+
+/**
+ * kvm_gpc_check - check validity of a gfn_to_pfn_cache.
+ *
+ * @gpc: struct gfn_to_pfn_cache object.
+ * @len: sanity check; the range being access must fit a single page.
+ *
+ * @return: %true if the cache is still valid and the address matches.
+ * %false if the cache is not valid.
+ *
+ * Callers outside IN_GUEST_MODE context should hold a read lock on @gpc->lock
+ * while calling this function, and then continue to hold the lock until the
+ * access is complete.
+ *
+ * Callers in IN_GUEST_MODE may do so without locking, although they should
+ * still hold a read lock on kvm->scru for the memslot checks.
+ */
+bool kvm_gpc_check(struct gfn_to_pfn_cache *gpc, unsigned long len);
+
+/**
+ * kvm_gpc_refresh - update a previously initialized cache.
+ *
+ * @gpc: struct gfn_to_pfn_cache object.
+ * @len: sanity check; the range being access must fit a single page.
+ *
+ * @return: 0 for success.
+ * -EINVAL for a mapping which would cross a page boundary.
+ * -EFAULT for an untranslatable guest physical address.
+ *
+ * This will attempt to refresh a gfn_to_pfn_cache. Note that a successful
+ * return from this function does not mean the page can be immediately
+ * accessed because it may have raced with an invalidation. Callers must
+ * still lock and check the cache status, as this function does not return
+ * with the lock still held to permit access.
+ */
+int kvm_gpc_refresh(struct gfn_to_pfn_cache *gpc, unsigned long len);
+
+/**
+ * kvm_gpc_deactivate - deactivate and unlink a gfn_to_pfn_cache.
+ *
+ * @gpc: struct gfn_to_pfn_cache object.
+ *
+ * This removes a cache from the VM's list to be processed on MMU notifier
+ * invocation.
+ */
+void kvm_gpc_deactivate(struct gfn_to_pfn_cache *gpc);
+
+void kvm_sigset_activate(struct kvm_vcpu *vcpu);
+void kvm_sigset_deactivate(struct kvm_vcpu *vcpu);
+
+void kvm_vcpu_halt(struct kvm_vcpu *vcpu);
+bool kvm_vcpu_block(struct kvm_vcpu *vcpu);
void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu);
void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu);
bool kvm_vcpu_wake_up(struct kvm_vcpu *vcpu);
void kvm_vcpu_kick(struct kvm_vcpu *vcpu);
int kvm_vcpu_yield_to(struct kvm_vcpu *target);
-void kvm_vcpu_on_spin(struct kvm_vcpu *vcpu);
-void kvm_load_guest_fpu(struct kvm_vcpu *vcpu);
-void kvm_put_guest_fpu(struct kvm_vcpu *vcpu);
+void kvm_vcpu_on_spin(struct kvm_vcpu *vcpu, bool yield_to_kernel_mode);
void kvm_flush_remote_tlbs(struct kvm *kvm);
-void kvm_reload_remote_mmus(struct kvm *kvm);
-bool kvm_make_all_cpus_request(struct kvm *kvm, unsigned int req);
+
+#ifdef KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE
+int kvm_mmu_topup_memory_cache(struct kvm_mmu_memory_cache *mc, int min);
+int __kvm_mmu_topup_memory_cache(struct kvm_mmu_memory_cache *mc, int capacity, int min);
+int kvm_mmu_memory_cache_nr_free_objects(struct kvm_mmu_memory_cache *mc);
+void kvm_mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc);
+void *kvm_mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc);
+#endif
+
+void kvm_mmu_invalidate_begin(struct kvm *kvm, unsigned long start,
+ unsigned long end);
+void kvm_mmu_invalidate_end(struct kvm *kvm, unsigned long start,
+ unsigned long end);
long kvm_arch_dev_ioctl(struct file *filp,
unsigned int ioctl, unsigned long arg);
long kvm_arch_vcpu_ioctl(struct file *filp,
unsigned int ioctl, unsigned long arg);
-int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf);
+vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf);
int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext);
-int kvm_get_dirty_log(struct kvm *kvm,
- struct kvm_dirty_log *log, int *is_dirty);
-
-int kvm_get_dirty_log_protect(struct kvm *kvm,
- struct kvm_dirty_log *log, bool *is_dirty);
-
void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm,
struct kvm_memory_slot *slot,
gfn_t gfn_offset,
unsigned long mask);
-
-int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
- struct kvm_dirty_log *log);
+void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot);
+
+#ifdef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT
+void kvm_arch_flush_remote_tlbs_memslot(struct kvm *kvm,
+ const struct kvm_memory_slot *memslot);
+#else /* !CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT */
+int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log);
+int kvm_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log,
+ int *is_dirty, struct kvm_memory_slot **memslot);
+#endif
int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_level,
bool line_status);
-long kvm_arch_vm_ioctl(struct file *filp,
- unsigned int ioctl, unsigned long arg);
+int kvm_vm_ioctl_enable_cap(struct kvm *kvm,
+ struct kvm_enable_cap *cap);
+int kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg);
+long kvm_arch_vm_compat_ioctl(struct file *filp, unsigned int ioctl,
+ unsigned long arg);
int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu);
int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu);
@@ -770,44 +1420,67 @@ int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
struct kvm_mp_state *mp_state);
int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
struct kvm_guest_debug *dbg);
-int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run);
-
-int kvm_arch_init(void *opaque);
-void kvm_arch_exit(void);
-
-int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu);
-void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu);
+int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu);
void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu);
-void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu);
void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu);
-struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id);
-int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu);
+int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id);
+int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu);
void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu);
void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu);
-bool kvm_arch_has_vcpu_debugfs(void);
-int kvm_arch_create_vcpu_debugfs(struct kvm_vcpu *vcpu);
+#ifdef CONFIG_HAVE_KVM_PM_NOTIFIER
+int kvm_arch_pm_notifier(struct kvm *kvm, unsigned long state);
+#endif
+#ifdef __KVM_HAVE_ARCH_VCPU_DEBUGFS
+void kvm_arch_create_vcpu_debugfs(struct kvm_vcpu *vcpu, struct dentry *debugfs_dentry);
+#else
+static inline void kvm_create_vcpu_debugfs(struct kvm_vcpu *vcpu) {}
+#endif
+
+#ifdef CONFIG_KVM_GENERIC_HARDWARE_ENABLING
int kvm_arch_hardware_enable(void);
void kvm_arch_hardware_disable(void);
-int kvm_arch_hardware_setup(void);
-void kvm_arch_hardware_unsetup(void);
-void kvm_arch_check_processor_compat(void *rtn);
+#endif
int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu);
+bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu);
int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu);
+bool kvm_arch_dy_runnable(struct kvm_vcpu *vcpu);
+bool kvm_arch_dy_has_pending_interrupt(struct kvm_vcpu *vcpu);
+int kvm_arch_post_init_vm(struct kvm *kvm);
+void kvm_arch_pre_destroy_vm(struct kvm *kvm);
+int kvm_arch_create_vm_debugfs(struct kvm *kvm);
#ifndef __KVM_HAVE_ARCH_VM_ALLOC
+/*
+ * All architectures that want to use vzalloc currently also
+ * need their own kvm_arch_alloc_vm implementation.
+ */
static inline struct kvm *kvm_arch_alloc_vm(void)
{
- return kzalloc(sizeof(struct kvm), GFP_KERNEL);
+ return kzalloc(sizeof(struct kvm), GFP_KERNEL_ACCOUNT);
+}
+#endif
+
+static inline void __kvm_arch_free_vm(struct kvm *kvm)
+{
+ kvfree(kvm);
}
+#ifndef __KVM_HAVE_ARCH_VM_FREE
static inline void kvm_arch_free_vm(struct kvm *kvm)
{
- kfree(kvm);
+ __kvm_arch_free_vm(kvm);
+}
+#endif
+
+#ifndef __KVM_HAVE_ARCH_FLUSH_REMOTE_TLB
+static inline int kvm_arch_flush_remote_tlb(struct kvm *kvm)
+{
+ return -ENOTSUPP;
}
#endif
@@ -842,21 +1515,35 @@ static inline void kvm_arch_end_assignment(struct kvm *kvm)
{
}
-static inline bool kvm_arch_has_assigned_device(struct kvm *kvm)
+static __always_inline bool kvm_arch_has_assigned_device(struct kvm *kvm)
{
return false;
}
#endif
-static inline struct swait_queue_head *kvm_arch_vcpu_wq(struct kvm_vcpu *vcpu)
+static inline struct rcuwait *kvm_arch_vcpu_get_wait(struct kvm_vcpu *vcpu)
{
#ifdef __KVM_HAVE_ARCH_WQP
- return vcpu->arch.wqp;
+ return vcpu->arch.waitp;
#else
- return &vcpu->wq;
+ return &vcpu->wait;
#endif
}
+/*
+ * Wake a vCPU if necessary, but don't do any stats/metadata updates. Returns
+ * true if the vCPU was blocking and was awakened, false otherwise.
+ */
+static inline bool __kvm_vcpu_wake_up(struct kvm_vcpu *vcpu)
+{
+ return !!rcuwait_wake_up(kvm_arch_vcpu_get_wait(vcpu));
+}
+
+static inline bool kvm_vcpu_is_blocking(struct kvm_vcpu *vcpu)
+{
+ return rcuwait_active(kvm_arch_vcpu_get_wait(vcpu));
+}
+
#ifdef __KVM_HAVE_ARCH_INTC_INITIALIZED
/*
* returns true if the virtual interrupt controller is initialized and
@@ -871,14 +1558,24 @@ static inline bool kvm_arch_intc_initialized(struct kvm *kvm)
}
#endif
+#ifdef CONFIG_GUEST_PERF_EVENTS
+unsigned long kvm_arch_vcpu_get_ip(struct kvm_vcpu *vcpu);
+
+void kvm_register_perf_callbacks(unsigned int (*pt_intr_handler)(void));
+void kvm_unregister_perf_callbacks(void);
+#else
+static inline void kvm_register_perf_callbacks(void *ign) {}
+static inline void kvm_unregister_perf_callbacks(void) {}
+#endif /* CONFIG_GUEST_PERF_EVENTS */
+
int kvm_arch_init_vm(struct kvm *kvm, unsigned long type);
void kvm_arch_destroy_vm(struct kvm *kvm);
void kvm_arch_sync_events(struct kvm *kvm);
int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu);
-void kvm_vcpu_kick(struct kvm_vcpu *vcpu);
-bool kvm_is_reserved_pfn(kvm_pfn_t pfn);
+struct page *kvm_pfn_to_refcounted_page(kvm_pfn_t pfn);
+bool kvm_is_zone_device_page(struct page *page);
struct kvm_irq_ack_notifier {
struct hlist_node link;
@@ -906,52 +1603,94 @@ void kvm_unregister_irq_ack_notifier(struct kvm *kvm,
struct kvm_irq_ack_notifier *kian);
int kvm_request_irq_source_id(struct kvm *kvm);
void kvm_free_irq_source_id(struct kvm *kvm, int irq_source_id);
+bool kvm_arch_irqfd_allowed(struct kvm *kvm, struct kvm_irqfd *args);
/*
- * search_memslots() and __gfn_to_memslot() are here because they are
- * used in non-modular code in arch/powerpc/kvm/book3s_hv_rm_mmu.c.
- * gfn_to_memslot() itself isn't here as an inline because that would
- * bloat other code too much.
+ * Returns a pointer to the memslot if it contains gfn.
+ * Otherwise returns NULL.
*/
static inline struct kvm_memory_slot *
-search_memslots(struct kvm_memslots *slots, gfn_t gfn)
+try_get_memslot(struct kvm_memory_slot *slot, gfn_t gfn)
{
- int start = 0, end = slots->used_slots;
- int slot = atomic_read(&slots->lru_slot);
- struct kvm_memory_slot *memslots = slots->memslots;
-
- if (gfn >= memslots[slot].base_gfn &&
- gfn < memslots[slot].base_gfn + memslots[slot].npages)
- return &memslots[slot];
+ if (!slot)
+ return NULL;
- while (start < end) {
- slot = start + (end - start) / 2;
+ if (gfn >= slot->base_gfn && gfn < slot->base_gfn + slot->npages)
+ return slot;
+ else
+ return NULL;
+}
- if (gfn >= memslots[slot].base_gfn)
- end = slot;
- else
- start = slot + 1;
+/*
+ * Returns a pointer to the memslot that contains gfn. Otherwise returns NULL.
+ *
+ * With "approx" set returns the memslot also when the address falls
+ * in a hole. In that case one of the memslots bordering the hole is
+ * returned.
+ */
+static inline struct kvm_memory_slot *
+search_memslots(struct kvm_memslots *slots, gfn_t gfn, bool approx)
+{
+ struct kvm_memory_slot *slot;
+ struct rb_node *node;
+ int idx = slots->node_idx;
+
+ slot = NULL;
+ for (node = slots->gfn_tree.rb_node; node; ) {
+ slot = container_of(node, struct kvm_memory_slot, gfn_node[idx]);
+ if (gfn >= slot->base_gfn) {
+ if (gfn < slot->base_gfn + slot->npages)
+ return slot;
+ node = node->rb_right;
+ } else
+ node = node->rb_left;
}
- if (gfn >= memslots[start].base_gfn &&
- gfn < memslots[start].base_gfn + memslots[start].npages) {
- atomic_set(&slots->lru_slot, start);
- return &memslots[start];
+ return approx ? slot : NULL;
+}
+
+static inline struct kvm_memory_slot *
+____gfn_to_memslot(struct kvm_memslots *slots, gfn_t gfn, bool approx)
+{
+ struct kvm_memory_slot *slot;
+
+ slot = (struct kvm_memory_slot *)atomic_long_read(&slots->last_used_slot);
+ slot = try_get_memslot(slot, gfn);
+ if (slot)
+ return slot;
+
+ slot = search_memslots(slots, gfn, approx);
+ if (slot) {
+ atomic_long_set(&slots->last_used_slot, (unsigned long)slot);
+ return slot;
}
return NULL;
}
+/*
+ * __gfn_to_memslot() and its descendants are here to allow arch code to inline
+ * the lookups in hot paths. gfn_to_memslot() itself isn't here as an inline
+ * because that would bloat other code too much.
+ */
static inline struct kvm_memory_slot *
__gfn_to_memslot(struct kvm_memslots *slots, gfn_t gfn)
{
- return search_memslots(slots, gfn);
+ return ____gfn_to_memslot(slots, gfn, false);
}
static inline unsigned long
-__gfn_to_hva_memslot(struct kvm_memory_slot *slot, gfn_t gfn)
+__gfn_to_hva_memslot(const struct kvm_memory_slot *slot, gfn_t gfn)
{
- return slot->userspace_addr + (gfn - slot->base_gfn) * PAGE_SIZE;
+ /*
+ * The index was checked originally in search_memslots. To avoid
+ * that a malicious guest builds a Spectre gadget out of e.g. page
+ * table walks, do not let the processor speculate loads outside
+ * the guest's registered memslots.
+ */
+ unsigned long offset = gfn - slot->base_gfn;
+ offset = array_index_nospec(offset, slot->npages);
+ return slot->userspace_addr + offset * PAGE_SIZE;
}
static inline int memslot_id(struct kvm *kvm, gfn_t gfn)
@@ -995,35 +1734,223 @@ enum kvm_stat_kind {
};
struct kvm_stat_data {
- int offset;
struct kvm *kvm;
+ const struct _kvm_stats_desc *desc;
+ enum kvm_stat_kind kind;
};
-struct kvm_stats_debugfs_item {
- const char *name;
- int offset;
- enum kvm_stat_kind kind;
+struct _kvm_stats_desc {
+ struct kvm_stats_desc desc;
+ char name[KVM_STATS_NAME_SIZE];
};
-extern struct kvm_stats_debugfs_item debugfs_entries[];
+
+#define STATS_DESC_COMMON(type, unit, base, exp, sz, bsz) \
+ .flags = type | unit | base | \
+ BUILD_BUG_ON_ZERO(type & ~KVM_STATS_TYPE_MASK) | \
+ BUILD_BUG_ON_ZERO(unit & ~KVM_STATS_UNIT_MASK) | \
+ BUILD_BUG_ON_ZERO(base & ~KVM_STATS_BASE_MASK), \
+ .exponent = exp, \
+ .size = sz, \
+ .bucket_size = bsz
+
+#define VM_GENERIC_STATS_DESC(stat, type, unit, base, exp, sz, bsz) \
+ { \
+ { \
+ STATS_DESC_COMMON(type, unit, base, exp, sz, bsz), \
+ .offset = offsetof(struct kvm_vm_stat, generic.stat) \
+ }, \
+ .name = #stat, \
+ }
+#define VCPU_GENERIC_STATS_DESC(stat, type, unit, base, exp, sz, bsz) \
+ { \
+ { \
+ STATS_DESC_COMMON(type, unit, base, exp, sz, bsz), \
+ .offset = offsetof(struct kvm_vcpu_stat, generic.stat) \
+ }, \
+ .name = #stat, \
+ }
+#define VM_STATS_DESC(stat, type, unit, base, exp, sz, bsz) \
+ { \
+ { \
+ STATS_DESC_COMMON(type, unit, base, exp, sz, bsz), \
+ .offset = offsetof(struct kvm_vm_stat, stat) \
+ }, \
+ .name = #stat, \
+ }
+#define VCPU_STATS_DESC(stat, type, unit, base, exp, sz, bsz) \
+ { \
+ { \
+ STATS_DESC_COMMON(type, unit, base, exp, sz, bsz), \
+ .offset = offsetof(struct kvm_vcpu_stat, stat) \
+ }, \
+ .name = #stat, \
+ }
+/* SCOPE: VM, VM_GENERIC, VCPU, VCPU_GENERIC */
+#define STATS_DESC(SCOPE, stat, type, unit, base, exp, sz, bsz) \
+ SCOPE##_STATS_DESC(stat, type, unit, base, exp, sz, bsz)
+
+#define STATS_DESC_CUMULATIVE(SCOPE, name, unit, base, exponent) \
+ STATS_DESC(SCOPE, name, KVM_STATS_TYPE_CUMULATIVE, \
+ unit, base, exponent, 1, 0)
+#define STATS_DESC_INSTANT(SCOPE, name, unit, base, exponent) \
+ STATS_DESC(SCOPE, name, KVM_STATS_TYPE_INSTANT, \
+ unit, base, exponent, 1, 0)
+#define STATS_DESC_PEAK(SCOPE, name, unit, base, exponent) \
+ STATS_DESC(SCOPE, name, KVM_STATS_TYPE_PEAK, \
+ unit, base, exponent, 1, 0)
+#define STATS_DESC_LINEAR_HIST(SCOPE, name, unit, base, exponent, sz, bsz) \
+ STATS_DESC(SCOPE, name, KVM_STATS_TYPE_LINEAR_HIST, \
+ unit, base, exponent, sz, bsz)
+#define STATS_DESC_LOG_HIST(SCOPE, name, unit, base, exponent, sz) \
+ STATS_DESC(SCOPE, name, KVM_STATS_TYPE_LOG_HIST, \
+ unit, base, exponent, sz, 0)
+
+/* Cumulative counter, read/write */
+#define STATS_DESC_COUNTER(SCOPE, name) \
+ STATS_DESC_CUMULATIVE(SCOPE, name, KVM_STATS_UNIT_NONE, \
+ KVM_STATS_BASE_POW10, 0)
+/* Instantaneous counter, read only */
+#define STATS_DESC_ICOUNTER(SCOPE, name) \
+ STATS_DESC_INSTANT(SCOPE, name, KVM_STATS_UNIT_NONE, \
+ KVM_STATS_BASE_POW10, 0)
+/* Peak counter, read/write */
+#define STATS_DESC_PCOUNTER(SCOPE, name) \
+ STATS_DESC_PEAK(SCOPE, name, KVM_STATS_UNIT_NONE, \
+ KVM_STATS_BASE_POW10, 0)
+
+/* Instantaneous boolean value, read only */
+#define STATS_DESC_IBOOLEAN(SCOPE, name) \
+ STATS_DESC_INSTANT(SCOPE, name, KVM_STATS_UNIT_BOOLEAN, \
+ KVM_STATS_BASE_POW10, 0)
+/* Peak (sticky) boolean value, read/write */
+#define STATS_DESC_PBOOLEAN(SCOPE, name) \
+ STATS_DESC_PEAK(SCOPE, name, KVM_STATS_UNIT_BOOLEAN, \
+ KVM_STATS_BASE_POW10, 0)
+
+/* Cumulative time in nanosecond */
+#define STATS_DESC_TIME_NSEC(SCOPE, name) \
+ STATS_DESC_CUMULATIVE(SCOPE, name, KVM_STATS_UNIT_SECONDS, \
+ KVM_STATS_BASE_POW10, -9)
+/* Linear histogram for time in nanosecond */
+#define STATS_DESC_LINHIST_TIME_NSEC(SCOPE, name, sz, bsz) \
+ STATS_DESC_LINEAR_HIST(SCOPE, name, KVM_STATS_UNIT_SECONDS, \
+ KVM_STATS_BASE_POW10, -9, sz, bsz)
+/* Logarithmic histogram for time in nanosecond */
+#define STATS_DESC_LOGHIST_TIME_NSEC(SCOPE, name, sz) \
+ STATS_DESC_LOG_HIST(SCOPE, name, KVM_STATS_UNIT_SECONDS, \
+ KVM_STATS_BASE_POW10, -9, sz)
+
+#define KVM_GENERIC_VM_STATS() \
+ STATS_DESC_COUNTER(VM_GENERIC, remote_tlb_flush), \
+ STATS_DESC_COUNTER(VM_GENERIC, remote_tlb_flush_requests)
+
+#define KVM_GENERIC_VCPU_STATS() \
+ STATS_DESC_COUNTER(VCPU_GENERIC, halt_successful_poll), \
+ STATS_DESC_COUNTER(VCPU_GENERIC, halt_attempted_poll), \
+ STATS_DESC_COUNTER(VCPU_GENERIC, halt_poll_invalid), \
+ STATS_DESC_COUNTER(VCPU_GENERIC, halt_wakeup), \
+ STATS_DESC_TIME_NSEC(VCPU_GENERIC, halt_poll_success_ns), \
+ STATS_DESC_TIME_NSEC(VCPU_GENERIC, halt_poll_fail_ns), \
+ STATS_DESC_TIME_NSEC(VCPU_GENERIC, halt_wait_ns), \
+ STATS_DESC_LOGHIST_TIME_NSEC(VCPU_GENERIC, halt_poll_success_hist, \
+ HALT_POLL_HIST_COUNT), \
+ STATS_DESC_LOGHIST_TIME_NSEC(VCPU_GENERIC, halt_poll_fail_hist, \
+ HALT_POLL_HIST_COUNT), \
+ STATS_DESC_LOGHIST_TIME_NSEC(VCPU_GENERIC, halt_wait_hist, \
+ HALT_POLL_HIST_COUNT), \
+ STATS_DESC_IBOOLEAN(VCPU_GENERIC, blocking)
+
extern struct dentry *kvm_debugfs_dir;
+ssize_t kvm_stats_read(char *id, const struct kvm_stats_header *header,
+ const struct _kvm_stats_desc *desc,
+ void *stats, size_t size_stats,
+ char __user *user_buffer, size_t size, loff_t *offset);
+
+/**
+ * kvm_stats_linear_hist_update() - Update bucket value for linear histogram
+ * statistics data.
+ *
+ * @data: start address of the stats data
+ * @size: the number of bucket of the stats data
+ * @value: the new value used to update the linear histogram's bucket
+ * @bucket_size: the size (width) of a bucket
+ */
+static inline void kvm_stats_linear_hist_update(u64 *data, size_t size,
+ u64 value, size_t bucket_size)
+{
+ size_t index = div64_u64(value, bucket_size);
+
+ index = min(index, size - 1);
+ ++data[index];
+}
+
+/**
+ * kvm_stats_log_hist_update() - Update bucket value for logarithmic histogram
+ * statistics data.
+ *
+ * @data: start address of the stats data
+ * @size: the number of bucket of the stats data
+ * @value: the new value used to update the logarithmic histogram's bucket
+ */
+static inline void kvm_stats_log_hist_update(u64 *data, size_t size, u64 value)
+{
+ size_t index = fls64(value);
+
+ index = min(index, size - 1);
+ ++data[index];
+}
+
+#define KVM_STATS_LINEAR_HIST_UPDATE(array, value, bsize) \
+ kvm_stats_linear_hist_update(array, ARRAY_SIZE(array), value, bsize)
+#define KVM_STATS_LOG_HIST_UPDATE(array, value) \
+ kvm_stats_log_hist_update(array, ARRAY_SIZE(array), value)
+
+
+extern const struct kvm_stats_header kvm_vm_stats_header;
+extern const struct _kvm_stats_desc kvm_vm_stats_desc[];
+extern const struct kvm_stats_header kvm_vcpu_stats_header;
+extern const struct _kvm_stats_desc kvm_vcpu_stats_desc[];
+
#if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
-static inline int mmu_notifier_retry(struct kvm *kvm, unsigned long mmu_seq)
+static inline int mmu_invalidate_retry(struct kvm *kvm, unsigned long mmu_seq)
{
- if (unlikely(kvm->mmu_notifier_count))
+ if (unlikely(kvm->mmu_invalidate_in_progress))
return 1;
/*
- * Ensure the read of mmu_notifier_count happens before the read
- * of mmu_notifier_seq. This interacts with the smp_wmb() in
- * mmu_notifier_invalidate_range_end to make sure that the caller
- * either sees the old (non-zero) value of mmu_notifier_count or
- * the new (incremented) value of mmu_notifier_seq.
- * PowerPC Book3s HV KVM calls this under a per-page lock
- * rather than under kvm->mmu_lock, for scalability, so
- * can't rely on kvm->mmu_lock to keep things ordered.
+ * Ensure the read of mmu_invalidate_in_progress happens before
+ * the read of mmu_invalidate_seq. This interacts with the
+ * smp_wmb() in mmu_notifier_invalidate_range_end to make sure
+ * that the caller either sees the old (non-zero) value of
+ * mmu_invalidate_in_progress or the new (incremented) value of
+ * mmu_invalidate_seq.
+ *
+ * PowerPC Book3s HV KVM calls this under a per-page lock rather
+ * than under kvm->mmu_lock, for scalability, so can't rely on
+ * kvm->mmu_lock to keep things ordered.
*/
smp_rmb();
- if (kvm->mmu_notifier_seq != mmu_seq)
+ if (kvm->mmu_invalidate_seq != mmu_seq)
+ return 1;
+ return 0;
+}
+
+static inline int mmu_invalidate_retry_hva(struct kvm *kvm,
+ unsigned long mmu_seq,
+ unsigned long hva)
+{
+ lockdep_assert_held(&kvm->mmu_lock);
+ /*
+ * If mmu_invalidate_in_progress is non-zero, then the range maintained
+ * by kvm_mmu_notifier_invalidate_range_start contains all addresses
+ * that might be being invalidated. Note that it may include some false
+ * positives, due to shortcuts when handing concurrent invalidations.
+ */
+ if (unlikely(kvm->mmu_invalidate_in_progress) &&
+ hva >= kvm->mmu_invalidate_range_start &&
+ hva < kvm->mmu_invalidate_range_end)
+ return 1;
+ if (kvm->mmu_invalidate_seq != mmu_seq)
return 1;
return 0;
}
@@ -1031,13 +1958,7 @@ static inline int mmu_notifier_retry(struct kvm *kvm, unsigned long mmu_seq)
#ifdef CONFIG_HAVE_KVM_IRQ_ROUTING
-#ifdef CONFIG_S390
-#define KVM_MAX_IRQ_ROUTES 4096 //FIXME: we can have more than that...
-#elif defined(CONFIG_ARM64)
-#define KVM_MAX_IRQ_ROUTES 4096
-#else
-#define KVM_MAX_IRQ_ROUTES 1024
-#endif
+#define KVM_MAX_IRQ_ROUTES 4096 /* might need extension/rework in the future */
bool kvm_arch_can_set_irq_routing(struct kvm *kvm);
int kvm_set_irq_routing(struct kvm *kvm,
@@ -1065,6 +1986,9 @@ int kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args);
#ifdef CONFIG_HAVE_KVM_IRQFD
int kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args);
void kvm_irqfd_release(struct kvm *kvm);
+bool kvm_notify_irqfd_resampler(struct kvm *kvm,
+ unsigned int irqchip,
+ unsigned int pin);
void kvm_irq_routing_update(struct kvm *);
#else
static inline int kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args)
@@ -1073,6 +1997,13 @@ static inline int kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args)
}
static inline void kvm_irqfd_release(struct kvm *kvm) {}
+
+static inline bool kvm_notify_irqfd_resampler(struct kvm *kvm,
+ unsigned int irqchip,
+ unsigned int pin)
+{
+ return false;
+}
#endif
#else
@@ -1091,7 +2022,6 @@ static inline void kvm_irq_routing_update(struct kvm *kvm)
{
}
#endif
-void kvm_arch_irq_routing_update(struct kvm *kvm);
static inline int kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
{
@@ -1100,14 +2030,29 @@ static inline int kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
#endif /* CONFIG_HAVE_KVM_EVENTFD */
-static inline void kvm_make_request(int req, struct kvm_vcpu *vcpu)
+void kvm_arch_irq_routing_update(struct kvm *kvm);
+
+static inline void __kvm_make_request(int req, struct kvm_vcpu *vcpu)
{
/*
* Ensure the rest of the request is published to kvm_check_request's
* caller. Paired with the smp_mb__after_atomic in kvm_check_request.
*/
smp_wmb();
- set_bit(req & KVM_REQUEST_MASK, &vcpu->requests);
+ set_bit(req & KVM_REQUEST_MASK, (void *)&vcpu->requests);
+}
+
+static __always_inline void kvm_make_request(int req, struct kvm_vcpu *vcpu)
+{
+ /*
+ * Request that don't require vCPU action should never be logged in
+ * vcpu->requests. The vCPU won't clear the request, so it will stay
+ * logged indefinitely and prevent the vCPU from entering the guest.
+ */
+ BUILD_BUG_ON(!__builtin_constant_p(req) ||
+ (req & KVM_REQUEST_NO_ACTION));
+
+ __kvm_make_request(req, vcpu);
}
static inline bool kvm_request_pending(struct kvm_vcpu *vcpu)
@@ -1117,12 +2062,12 @@ static inline bool kvm_request_pending(struct kvm_vcpu *vcpu)
static inline bool kvm_test_request(int req, struct kvm_vcpu *vcpu)
{
- return test_bit(req & KVM_REQUEST_MASK, &vcpu->requests);
+ return test_bit(req & KVM_REQUEST_MASK, (void *)&vcpu->requests);
}
static inline void kvm_clear_request(int req, struct kvm_vcpu *vcpu)
{
- clear_bit(req & KVM_REQUEST_MASK, &vcpu->requests);
+ clear_bit(req & KVM_REQUEST_MASK, (void *)&vcpu->requests);
}
static inline bool kvm_check_request(int req, struct kvm_vcpu *vcpu)
@@ -1141,14 +2086,17 @@ static inline bool kvm_check_request(int req, struct kvm_vcpu *vcpu)
}
}
+#ifdef CONFIG_KVM_GENERIC_HARDWARE_ENABLING
extern bool kvm_rebooting;
+#endif
extern unsigned int halt_poll_ns;
extern unsigned int halt_poll_ns_grow;
+extern unsigned int halt_poll_ns_grow_start;
extern unsigned int halt_poll_ns_shrink;
struct kvm_device {
- struct kvm_device_ops *ops;
+ const struct kvm_device_ops *ops;
struct kvm *kvm;
void *private;
struct list_head vm_node;
@@ -1181,17 +2129,27 @@ struct kvm_device_ops {
*/
void (*destroy)(struct kvm_device *dev);
+ /*
+ * Release is an alternative method to free the device. It is
+ * called when the device file descriptor is closed. Once
+ * release is called, the destroy method will not be called
+ * anymore as the device is removed from the device list of
+ * the VM. kvm->lock is held.
+ */
+ void (*release)(struct kvm_device *dev);
+
int (*set_attr)(struct kvm_device *dev, struct kvm_device_attr *attr);
int (*get_attr)(struct kvm_device *dev, struct kvm_device_attr *attr);
int (*has_attr)(struct kvm_device *dev, struct kvm_device_attr *attr);
long (*ioctl)(struct kvm_device *dev, unsigned int ioctl,
unsigned long arg);
+ int (*mmap)(struct kvm_device *dev, struct vm_area_struct *vma);
};
void kvm_device_get(struct kvm_device *dev);
void kvm_device_put(struct kvm_device *dev);
struct kvm_device *kvm_device_from_filp(struct file *filp);
-int kvm_register_device_ops(struct kvm_device_ops *ops, u32 type);
+int kvm_register_device_ops(const struct kvm_device_ops *ops, u32 type);
void kvm_unregister_device_ops(u32 type);
extern struct kvm_device_ops kvm_mpic_ops;
@@ -1220,6 +2178,15 @@ static inline void kvm_vcpu_set_dy_eligible(struct kvm_vcpu *vcpu, bool val)
}
#endif /* CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT */
+static inline bool kvm_is_visible_memslot(struct kvm_memory_slot *memslot)
+{
+ return (memslot && memslot->id < KVM_USER_MEM_SLOTS &&
+ !(memslot->flags & KVM_MEMSLOT_INVALID));
+}
+
+struct kvm_vcpu *kvm_get_running_vcpu(void);
+struct kvm_vcpu * __percpu *kvm_get_running_vcpus(void);
+
#ifdef CONFIG_HAVE_KVM_IRQ_BYPASS
bool kvm_arch_has_irq_bypass(void);
int kvm_arch_irq_bypass_add_producer(struct irq_bypass_consumer *,
@@ -1230,6 +2197,8 @@ void kvm_arch_irq_bypass_stop(struct irq_bypass_consumer *);
void kvm_arch_irq_bypass_start(struct irq_bypass_consumer *);
int kvm_arch_update_irqfd_routing(struct kvm *kvm, unsigned int host_irq,
uint32_t guest_irq, bool set);
+bool kvm_arch_irqfd_route_changed(struct kvm_kernel_irq_routing_entry *,
+ struct kvm_kernel_irq_routing_entry *);
#endif /* CONFIG_HAVE_KVM_IRQ_BYPASS */
#ifdef CONFIG_HAVE_KVM_INVALID_WAKEUPS
@@ -1246,4 +2215,77 @@ static inline bool vcpu_valid_wakeup(struct kvm_vcpu *vcpu)
}
#endif /* CONFIG_HAVE_KVM_INVALID_WAKEUPS */
+#ifdef CONFIG_HAVE_KVM_NO_POLL
+/* Callback that tells if we must not poll */
+bool kvm_arch_no_poll(struct kvm_vcpu *vcpu);
+#else
+static inline bool kvm_arch_no_poll(struct kvm_vcpu *vcpu)
+{
+ return false;
+}
+#endif /* CONFIG_HAVE_KVM_NO_POLL */
+
+#ifdef CONFIG_HAVE_KVM_VCPU_ASYNC_IOCTL
+long kvm_arch_vcpu_async_ioctl(struct file *filp,
+ unsigned int ioctl, unsigned long arg);
+#else
+static inline long kvm_arch_vcpu_async_ioctl(struct file *filp,
+ unsigned int ioctl,
+ unsigned long arg)
+{
+ return -ENOIOCTLCMD;
+}
+#endif /* CONFIG_HAVE_KVM_VCPU_ASYNC_IOCTL */
+
+void kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm,
+ unsigned long start, unsigned long end);
+
+void kvm_arch_guest_memory_reclaimed(struct kvm *kvm);
+
+#ifdef CONFIG_HAVE_KVM_VCPU_RUN_PID_CHANGE
+int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu);
+#else
+static inline int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu)
+{
+ return 0;
+}
+#endif /* CONFIG_HAVE_KVM_VCPU_RUN_PID_CHANGE */
+
+typedef int (*kvm_vm_thread_fn_t)(struct kvm *kvm, uintptr_t data);
+
+int kvm_vm_create_worker_thread(struct kvm *kvm, kvm_vm_thread_fn_t thread_fn,
+ uintptr_t data, const char *name,
+ struct task_struct **thread_ptr);
+
+#ifdef CONFIG_KVM_XFER_TO_GUEST_WORK
+static inline void kvm_handle_signal_exit(struct kvm_vcpu *vcpu)
+{
+ vcpu->run->exit_reason = KVM_EXIT_INTR;
+ vcpu->stat.signal_exits++;
+}
+#endif /* CONFIG_KVM_XFER_TO_GUEST_WORK */
+
+/*
+ * If more than one page is being (un)accounted, @virt must be the address of
+ * the first page of a block of pages what were allocated together (i.e
+ * accounted together).
+ *
+ * kvm_account_pgtable_pages() is thread-safe because mod_lruvec_page_state()
+ * is thread-safe.
+ */
+static inline void kvm_account_pgtable_pages(void *virt, int nr)
+{
+ mod_lruvec_page_state(virt_to_page(virt), NR_SECONDARY_PAGETABLE, nr);
+}
+
+/*
+ * This defines how many reserved entries we want to keep before we
+ * kick the vcpu to the userspace to avoid dirty ring full. This
+ * value can be tuned to higher if e.g. PML is enabled on the host.
+ */
+#define KVM_DIRTY_RING_RSVD_ENTRIES 64
+
+/* Max number of entries allowed for each kvm dirty ring */
+#define KVM_DIRTY_RING_MAX_ENTRIES 65536
+
#endif
diff --git a/include/linux/kvm_irqfd.h b/include/linux/kvm_irqfd.h
index 76c2fbc59f35..8ad43692e3bb 100644
--- a/include/linux/kvm_irqfd.h
+++ b/include/linux/kvm_irqfd.h
@@ -1,12 +1,5 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*
* irqfd: Allows an fd to be used to inject an interrupt to the guest
* Credit goes to Avi Kivity for the original idea.
@@ -38,7 +31,7 @@ struct kvm_kernel_irqfd_resampler {
/*
* Entry in list of kvm->irqfd.resampler_list. Use for sharing
* resamplers among irqfds on the same gsi.
- * Accessed and modified under kvm->irqfds.resampler_lock
+ * RCU list modified under kvm->irqfds.resampler_lock
*/
struct list_head link;
};
@@ -49,7 +42,7 @@ struct kvm_kernel_irqfd {
wait_queue_entry_t wait;
/* Update side is protected by irqfds.lock */
struct kvm_kernel_irq_routing_entry irq_entry;
- seqcount_t irq_entry_sc;
+ seqcount_spinlock_t irq_entry_sc;
/* Used for level IRQ fast-path */
int gsi;
struct work_struct inject;
diff --git a/include/linux/kvm_para.h b/include/linux/kvm_para.h
index 35e568f04b1e..f23b90b02898 100644
--- a/include/linux/kvm_para.h
+++ b/include/linux/kvm_para.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __LINUX_KVM_PARA_H
#define __LINUX_KVM_PARA_H
@@ -8,4 +9,9 @@ static inline bool kvm_para_has_feature(unsigned int feature)
{
return !!(kvm_arch_para_features() & (1UL << feature));
}
+
+static inline bool kvm_para_has_hint(unsigned int feature)
+{
+ return !!(kvm_arch_para_hints() & (1UL << feature));
+}
#endif /* __LINUX_KVM_PARA_H */
diff --git a/include/linux/kvm_types.h b/include/linux/kvm_types.h
index 8bf259dae9f6..6f4737d5046a 100644
--- a/include/linux/kvm_types.h
+++ b/include/linux/kvm_types.h
@@ -1,18 +1,4 @@
-/*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
- *
- */
+/* SPDX-License-Identifier: GPL-2.0-only */
#ifndef __KVM_TYPES_H__
#define __KVM_TYPES_H__
@@ -32,7 +18,12 @@ struct kvm_memslots;
enum kvm_mr_change;
-#include <asm/types.h>
+#include <linux/bits.h>
+#include <linux/mutex.h>
+#include <linux/types.h>
+#include <linux/spinlock_types.h>
+
+#include <asm/kvm_types.h>
/*
* Address types:
@@ -49,12 +40,20 @@ typedef unsigned long gva_t;
typedef u64 gpa_t;
typedef u64 gfn_t;
+#define INVALID_GPA (~(gpa_t)0)
+
typedef unsigned long hva_t;
typedef u64 hpa_t;
typedef u64 hfn_t;
typedef hfn_t kvm_pfn_t;
+enum pfn_cache_usage {
+ KVM_GUEST_USES_PFN = BIT(0),
+ KVM_HOST_USES_PFN = BIT(1),
+ KVM_GUEST_AND_HOST_USE_PFN = KVM_GUEST_USES_PFN | KVM_HOST_USES_PFN,
+};
+
struct gfn_to_hva_cache {
u64 generation;
gpa_t gpa;
@@ -63,4 +62,65 @@ struct gfn_to_hva_cache {
struct kvm_memory_slot *memslot;
};
+struct gfn_to_pfn_cache {
+ u64 generation;
+ gpa_t gpa;
+ unsigned long uhva;
+ struct kvm_memory_slot *memslot;
+ struct kvm *kvm;
+ struct kvm_vcpu *vcpu;
+ struct list_head list;
+ rwlock_t lock;
+ struct mutex refresh_lock;
+ void *khva;
+ kvm_pfn_t pfn;
+ enum pfn_cache_usage usage;
+ bool active;
+ bool valid;
+};
+
+#ifdef KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE
+/*
+ * Memory caches are used to preallocate memory ahead of various MMU flows,
+ * e.g. page fault handlers. Gracefully handling allocation failures deep in
+ * MMU flows is problematic, as is triggering reclaim, I/O, etc... while
+ * holding MMU locks. Note, these caches act more like prefetch buffers than
+ * classical caches, i.e. objects are not returned to the cache on being freed.
+ *
+ * The @capacity field and @objects array are lazily initialized when the cache
+ * is topped up (__kvm_mmu_topup_memory_cache()).
+ */
+struct kvm_mmu_memory_cache {
+ gfp_t gfp_zero;
+ gfp_t gfp_custom;
+ struct kmem_cache *kmem_cache;
+ int capacity;
+ int nobjs;
+ void **objects;
+};
+#endif
+
+#define HALT_POLL_HIST_COUNT 32
+
+struct kvm_vm_stat_generic {
+ u64 remote_tlb_flush;
+ u64 remote_tlb_flush_requests;
+};
+
+struct kvm_vcpu_stat_generic {
+ u64 halt_successful_poll;
+ u64 halt_attempted_poll;
+ u64 halt_poll_invalid;
+ u64 halt_wakeup;
+ u64 halt_poll_success_ns;
+ u64 halt_poll_fail_ns;
+ u64 halt_wait_ns;
+ u64 halt_poll_success_hist[HALT_POLL_HIST_COUNT];
+ u64 halt_poll_fail_hist[HALT_POLL_HIST_COUNT];
+ u64 halt_wait_hist[HALT_POLL_HIST_COUNT];
+ u64 blocking;
+};
+
+#define KVM_STATS_NAME_SIZE 48
+
#endif /* __KVM_TYPES_H__ */
diff --git a/include/linux/l2tp.h b/include/linux/l2tp.h
index bffdb962f1a6..0402eda1a94e 100644
--- a/include/linux/l2tp.h
+++ b/include/linux/l2tp.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* L2TP-over-IP socket for L2TPv3.
*
diff --git a/include/linux/lantiq.h b/include/linux/lantiq.h
new file mode 100644
index 000000000000..67921169d84d
--- /dev/null
+++ b/include/linux/lantiq.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __LINUX_LANTIQ_H
+#define __LINUX_LANTIQ_H
+
+#ifdef CONFIG_LANTIQ
+#include <lantiq_soc.h>
+#else
+
+#ifndef LTQ_EARLY_ASC
+#define LTQ_EARLY_ASC 0
+#endif
+
+#ifndef CPHYSADDR
+#define CPHYSADDR(a) 0
+#endif
+
+static inline struct clk *clk_get_fpi(void)
+{
+ return NULL;
+}
+#endif /* CONFIG_LANTIQ */
+#endif /* __LINUX_LANTIQ_H */
diff --git a/include/linux/lapb.h b/include/linux/lapb.h
index 873c1eb635e4..b5333f9413dc 100644
--- a/include/linux/lapb.h
+++ b/include/linux/lapb.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* These are the public elements of the Linux LAPB module.
*/
@@ -5,6 +6,11 @@
#ifndef LAPB_KERNEL_H
#define LAPB_KERNEL_H
+#include <linux/skbuff.h>
+#include <linux/timer.h>
+
+struct net_device;
+
#define LAPB_OK 0
#define LAPB_BADTOKEN 1
#define LAPB_INVALUE 2
diff --git a/include/linux/latencytop.h b/include/linux/latencytop.h
index 59ccab297ae0..84f1053cf2a8 100644
--- a/include/linux/latencytop.h
+++ b/include/linux/latencytop.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* latencytop.h: Infrastructure for displaying latency
*
@@ -35,10 +36,7 @@ account_scheduler_latency(struct task_struct *task, int usecs, int inter)
__account_scheduler_latency(task, usecs, inter);
}
-void clear_all_latency_tracing(struct task_struct *p);
-
-extern int sysctl_latencytop(struct ctl_table *table, int write,
- void __user *buffer, size_t *lenp, loff_t *ppos);
+void clear_tsk_latency_tracing(struct task_struct *p);
#else
@@ -47,7 +45,7 @@ account_scheduler_latency(struct task_struct *task, int usecs, int inter)
{
}
-static inline void clear_all_latency_tracing(struct task_struct *p)
+static inline void clear_tsk_latency_tracing(struct task_struct *p)
{
}
diff --git a/include/linux/lcd.h b/include/linux/lcd.h
index 504f6246f38f..238fb1dfed98 100644
--- a/include/linux/lcd.h
+++ b/include/linux/lcd.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* LCD Lowlevel Control Abstraction
*
@@ -40,16 +41,6 @@ struct lcd_ops {
/* Get the LCD panel power status (0: full on, 1..3: controller
power on, flat panel power off, 4: full off), see FB_BLANK_XXX */
int (*get_power)(struct lcd_device *);
- /*
- * Enable or disable power to the LCD(0: on; 4: off, see FB_BLANK_XXX)
- * and this callback would be called proir to fb driver's callback.
- *
- * P.S. note that if early_set_power is not NULL then early fb notifier
- * would be registered.
- */
- int (*early_set_power)(struct lcd_device *, int power);
- /* revert the effects of the early blank event. */
- int (*r_early_set_power)(struct lcd_device *, int power);
/* Enable or disable power to the LCD (0: on; 4: off, see FB_BLANK_XXX) */
int (*set_power)(struct lcd_device *, int power);
/* Get the current contrast setting (0-max_contrast) */
diff --git a/include/linux/lcm.h b/include/linux/lcm.h
index 1ce79a7f1daa..0db3efd56efb 100644
--- a/include/linux/lcm.h
+++ b/include/linux/lcm.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LCM_H
#define _LCM_H
diff --git a/include/linux/led-class-flash.h b/include/linux/led-class-flash.h
index e97966d1fb8d..612b4cab3819 100644
--- a/include/linux/led-class-flash.h
+++ b/include/linux/led-class-flash.h
@@ -1,13 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* LED Flash class interface
*
* Copyright (C) 2015 Samsung Electronics Co., Ltd.
* Author: Jacek Anaszewski <j.anaszewski@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
*/
#ifndef __LINUX_FLASH_LEDS_H_INCLUDED
#define __LINUX_FLASH_LEDS_H_INCLUDED
@@ -89,16 +85,19 @@ static inline struct led_classdev_flash *lcdev_to_flcdev(
return container_of(lcdev, struct led_classdev_flash, led_cdev);
}
+#if IS_ENABLED(CONFIG_LEDS_CLASS_FLASH)
/**
- * led_classdev_flash_register - register a new object of led_classdev class
- * with support for flash LEDs
- * @parent: the flash LED to register
+ * led_classdev_flash_register_ext - register a new object of LED class with
+ * init data and with support for flash LEDs
+ * @parent: LED flash controller device this flash LED is driven by
* @fled_cdev: the led_classdev_flash structure for this device
+ * @init_data: the LED class flash device initialization data
*
* Returns: 0 on success or negative error value on failure
*/
-extern int led_classdev_flash_register(struct device *parent,
- struct led_classdev_flash *fled_cdev);
+int led_classdev_flash_register_ext(struct device *parent,
+ struct led_classdev_flash *fled_cdev,
+ struct led_init_data *init_data);
/**
* led_classdev_flash_unregister - unregisters an object of led_classdev class
@@ -107,7 +106,50 @@ extern int led_classdev_flash_register(struct device *parent,
*
* Unregister a previously registered via led_classdev_flash_register object
*/
-extern void led_classdev_flash_unregister(struct led_classdev_flash *fled_cdev);
+void led_classdev_flash_unregister(struct led_classdev_flash *fled_cdev);
+
+int devm_led_classdev_flash_register_ext(struct device *parent,
+ struct led_classdev_flash *fled_cdev,
+ struct led_init_data *init_data);
+
+
+void devm_led_classdev_flash_unregister(struct device *parent,
+ struct led_classdev_flash *fled_cdev);
+
+#else
+
+static inline int led_classdev_flash_register_ext(struct device *parent,
+ struct led_classdev_flash *fled_cdev,
+ struct led_init_data *init_data)
+{
+ return 0;
+}
+
+static inline void led_classdev_flash_unregister(struct led_classdev_flash *fled_cdev) {};
+static inline int devm_led_classdev_flash_register_ext(struct device *parent,
+ struct led_classdev_flash *fled_cdev,
+ struct led_init_data *init_data)
+{
+ return 0;
+}
+
+static inline void devm_led_classdev_flash_unregister(struct device *parent,
+ struct led_classdev_flash *fled_cdev)
+{};
+
+#endif /* IS_ENABLED(CONFIG_LEDS_CLASS_FLASH) */
+
+static inline int led_classdev_flash_register(struct device *parent,
+ struct led_classdev_flash *fled_cdev)
+{
+ return led_classdev_flash_register_ext(parent, fled_cdev, NULL);
+}
+
+static inline int devm_led_classdev_flash_register(struct device *parent,
+ struct led_classdev_flash *fled_cdev)
+{
+ return devm_led_classdev_flash_register_ext(parent, fled_cdev, NULL);
+}
/**
* led_set_flash_strobe - setup flash strobe
@@ -121,6 +163,8 @@ extern void led_classdev_flash_unregister(struct led_classdev_flash *fled_cdev);
static inline int led_set_flash_strobe(struct led_classdev_flash *fled_cdev,
bool state)
{
+ if (!fled_cdev)
+ return -EINVAL;
return fled_cdev->ops->strobe_set(fled_cdev, state);
}
@@ -136,6 +180,8 @@ static inline int led_set_flash_strobe(struct led_classdev_flash *fled_cdev,
static inline int led_get_flash_strobe(struct led_classdev_flash *fled_cdev,
bool *state)
{
+ if (!fled_cdev)
+ return -EINVAL;
if (fled_cdev->ops->strobe_get)
return fled_cdev->ops->strobe_get(fled_cdev, state);
@@ -151,8 +197,8 @@ static inline int led_get_flash_strobe(struct led_classdev_flash *fled_cdev,
*
* Returns: 0 on success or negative error value on failure
*/
-extern int led_set_flash_brightness(struct led_classdev_flash *fled_cdev,
- u32 brightness);
+int led_set_flash_brightness(struct led_classdev_flash *fled_cdev,
+ u32 brightness);
/**
* led_update_flash_brightness - update flash LED brightness
@@ -163,7 +209,7 @@ extern int led_set_flash_brightness(struct led_classdev_flash *fled_cdev,
*
* Returns: 0 on success or negative error value on failure
*/
-extern int led_update_flash_brightness(struct led_classdev_flash *fled_cdev);
+int led_update_flash_brightness(struct led_classdev_flash *fled_cdev);
/**
* led_set_flash_timeout - set flash LED timeout
@@ -174,8 +220,7 @@ extern int led_update_flash_brightness(struct led_classdev_flash *fled_cdev);
*
* Returns: 0 on success or negative error value on failure
*/
-extern int led_set_flash_timeout(struct led_classdev_flash *fled_cdev,
- u32 timeout);
+int led_set_flash_timeout(struct led_classdev_flash *fled_cdev, u32 timeout);
/**
* led_get_flash_fault - get the flash LED fault
@@ -186,7 +231,6 @@ extern int led_set_flash_timeout(struct led_classdev_flash *fled_cdev,
*
* Returns: 0 on success or negative error value on failure
*/
-extern int led_get_flash_fault(struct led_classdev_flash *fled_cdev,
- u32 *fault);
+int led_get_flash_fault(struct led_classdev_flash *fled_cdev, u32 *fault);
#endif /* __LINUX_FLASH_LEDS_H_INCLUDED */
diff --git a/include/linux/led-class-multicolor.h b/include/linux/led-class-multicolor.h
new file mode 100644
index 000000000000..210d57bcd767
--- /dev/null
+++ b/include/linux/led-class-multicolor.h
@@ -0,0 +1,109 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* LED Multicolor class interface
+ * Copyright (C) 2019-20 Texas Instruments Incorporated - http://www.ti.com/
+ */
+
+#ifndef _LINUX_MULTICOLOR_LEDS_H_INCLUDED
+#define _LINUX_MULTICOLOR_LEDS_H_INCLUDED
+
+#include <linux/leds.h>
+#include <dt-bindings/leds/common.h>
+
+struct mc_subled {
+ unsigned int color_index;
+ unsigned int brightness;
+ unsigned int intensity;
+ unsigned int channel;
+};
+
+struct led_classdev_mc {
+ /* led class device */
+ struct led_classdev led_cdev;
+ unsigned int num_colors;
+
+ struct mc_subled *subled_info;
+};
+
+static inline struct led_classdev_mc *lcdev_to_mccdev(
+ struct led_classdev *led_cdev)
+{
+ return container_of(led_cdev, struct led_classdev_mc, led_cdev);
+}
+
+#if IS_ENABLED(CONFIG_LEDS_CLASS_MULTICOLOR)
+/**
+ * led_classdev_multicolor_register_ext - register a new object of led_classdev
+ * class with support for multicolor LEDs
+ * @parent: the multicolor LED to register
+ * @mcled_cdev: the led_classdev_mc structure for this device
+ * @init_data: the LED class multicolor device initialization data
+ *
+ * Returns: 0 on success or negative error value on failure
+ */
+int led_classdev_multicolor_register_ext(struct device *parent,
+ struct led_classdev_mc *mcled_cdev,
+ struct led_init_data *init_data);
+
+/**
+ * led_classdev_multicolor_unregister - unregisters an object of led_classdev
+ * class with support for multicolor LEDs
+ * @mcled_cdev: the multicolor LED to unregister
+ *
+ * Unregister a previously registered via led_classdev_multicolor_register
+ * object
+ */
+void led_classdev_multicolor_unregister(struct led_classdev_mc *mcled_cdev);
+
+/* Calculate brightness for the monochrome LED cluster */
+int led_mc_calc_color_components(struct led_classdev_mc *mcled_cdev,
+ enum led_brightness brightness);
+
+int devm_led_classdev_multicolor_register_ext(struct device *parent,
+ struct led_classdev_mc *mcled_cdev,
+ struct led_init_data *init_data);
+
+void devm_led_classdev_multicolor_unregister(struct device *parent,
+ struct led_classdev_mc *mcled_cdev);
+#else
+
+static inline int led_classdev_multicolor_register_ext(struct device *parent,
+ struct led_classdev_mc *mcled_cdev,
+ struct led_init_data *init_data)
+{
+ return 0;
+}
+
+static inline void led_classdev_multicolor_unregister(struct led_classdev_mc *mcled_cdev) {};
+static inline int led_mc_calc_color_components(struct led_classdev_mc *mcled_cdev,
+ enum led_brightness brightness)
+{
+ return 0;
+}
+
+static inline int devm_led_classdev_multicolor_register_ext(struct device *parent,
+ struct led_classdev_mc *mcled_cdev,
+ struct led_init_data *init_data)
+{
+ return 0;
+}
+
+static inline void devm_led_classdev_multicolor_unregister(struct device *parent,
+ struct led_classdev_mc *mcled_cdev)
+{};
+
+#endif /* IS_ENABLED(CONFIG_LEDS_CLASS_MULTICOLOR) */
+
+static inline int led_classdev_multicolor_register(struct device *parent,
+ struct led_classdev_mc *mcled_cdev)
+{
+ return led_classdev_multicolor_register_ext(parent, mcled_cdev, NULL);
+}
+
+static inline int devm_led_classdev_multicolor_register(struct device *parent,
+ struct led_classdev_mc *mcled_cdev)
+{
+ return devm_led_classdev_multicolor_register_ext(parent, mcled_cdev,
+ NULL);
+}
+
+#endif /* _LINUX_MULTICOLOR_LEDS_H_INCLUDED */
diff --git a/include/linux/led-lm3530.h b/include/linux/led-lm3530.h
index 4b133479d6ea..811f7ce4e218 100644
--- a/include/linux/led-lm3530.h
+++ b/include/linux/led-lm3530.h
@@ -1,9 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2011 ST-Ericsson SA.
* Copyright (C) 2009 Motorola, Inc.
*
- * License Terms: GNU General Public License v2
- *
* Simple driver for National Semiconductor LM35330 Backlight driver chip
*
* Author: Shreshtha Kumar SAHU <shreshthakumar.sahu@stericsson.com>
diff --git a/include/linux/leds-bd2802.h b/include/linux/leds-bd2802.h
index 42f854a1a199..ec577f5f8707 100644
--- a/include/linux/leds-bd2802.h
+++ b/include/linux/leds-bd2802.h
@@ -1,21 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* leds-bd2802.h - RGB LED Driver
*
* Copyright (C) 2009 Samsung Electronics
* Kim Kyuwon <q1.kim@samsung.com>
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
* Datasheet: http://www.rohm.com/products/databook/driver/pdf/bd2802gu-e.pdf
- *
*/
#ifndef _LEDS_BD2802_H_
#define _LEDS_BD2802_H_
struct bd2802_led_platform_data{
- int reset_gpio;
u8 rgb_time;
};
diff --git a/include/linux/leds-lp3944.h b/include/linux/leds-lp3944.h
index 2618aa9063bc..f681fefff281 100644
--- a/include/linux/leds-lp3944.h
+++ b/include/linux/leds-lp3944.h
@@ -1,12 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* leds-lp3944.h - platform data structure for lp3944 led controller
*
* Copyright (C) 2009 Antonio Ospite <ospite@studenti.unina.it>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
*/
#ifndef __LINUX_LEDS_LP3944_H
diff --git a/include/linux/leds-lp3952.h b/include/linux/leds-lp3952.h
index 49b37ed8d456..937ae5f2eac9 100644
--- a/include/linux/leds-lp3952.h
+++ b/include/linux/leds-lp3952.h
@@ -1,13 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* LED driver for TI lp3952 controller
*
* Copyright (C) 2016, DAQRI, LLC.
* Author: Tony Makkiel <tony.makkiel@daqri.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
*/
#ifndef LEDS_LP3952_H_
diff --git a/include/linux/leds-pca9532.h b/include/linux/leds-pca9532.h
index 5e240b2b4d58..f4796d333974 100644
--- a/include/linux/leds-pca9532.h
+++ b/include/linux/leds-pca9532.h
@@ -1,14 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* pca9532.h - platform data structure for pca9532 led controller
*
* Copyright (C) 2008 Riku Voipio <riku.voipio@movial.fi>
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
* Datasheet: http://www.nxp.com/acrobat/datasheets/PCA9532_3.pdf
- *
*/
#ifndef __LINUX_PCA9532_H
diff --git a/include/linux/leds-regulator.h b/include/linux/leds-regulator.h
index e2337a8c90b0..899f816073a1 100644
--- a/include/linux/leds-regulator.h
+++ b/include/linux/leds-regulator.h
@@ -1,12 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* leds-regulator.h - platform data structure for regulator driven LEDs.
*
* Copyright (C) 2009 Antonio Ospite <ospite@studenti.unina.it>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
*/
#ifndef __LINUX_LEDS_REGULATOR_H
diff --git a/include/linux/leds-tca6507.h b/include/linux/leds-tca6507.h
deleted file mode 100644
index dcabf4fa2aef..000000000000
--- a/include/linux/leds-tca6507.h
+++ /dev/null
@@ -1,34 +0,0 @@
-/*
- * TCA6507 LED chip driver.
- *
- * Copyright (C) 2011 Neil Brown <neil@brown.name>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
- * 02110-1301 USA
- */
-
-#ifndef __LINUX_TCA6507_H
-#define __LINUX_TCA6507_H
-#include <linux/leds.h>
-
-struct tca6507_platform_data {
- struct led_platform_data leds;
-#ifdef CONFIG_GPIOLIB
- int gpio_base;
- void (*setup)(unsigned gpio_base, unsigned ngpio);
-#endif
-};
-
-#define TCA6507_MAKE_GPIO 1
-#endif /* __LINUX_TCA6507_H*/
diff --git a/include/linux/leds-ti-lmu-common.h b/include/linux/leds-ti-lmu-common.h
new file mode 100644
index 000000000000..420b61e5a213
--- /dev/null
+++ b/include/linux/leds-ti-lmu-common.h
@@ -0,0 +1,47 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+// TI LMU Common Core
+// Copyright (C) 2018 Texas Instruments Incorporated - https://www.ti.com/
+
+#ifndef _TI_LMU_COMMON_H_
+#define _TI_LMU_COMMON_H_
+
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/init.h>
+#include <linux/leds.h>
+#include <linux/module.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+#include <uapi/linux/uleds.h>
+
+#define LMU_11BIT_LSB_MASK (BIT(0) | BIT(1) | BIT(2))
+#define LMU_11BIT_MSB_SHIFT 3
+
+#define MAX_BRIGHTNESS_8BIT 255
+#define MAX_BRIGHTNESS_11BIT 2047
+
+struct ti_lmu_bank {
+ struct regmap *regmap;
+
+ int max_brightness;
+
+ u8 lsb_brightness_reg;
+ u8 msb_brightness_reg;
+
+ u8 runtime_ramp_reg;
+ u32 ramp_up_usec;
+ u32 ramp_down_usec;
+};
+
+int ti_lmu_common_set_brightness(struct ti_lmu_bank *lmu_bank, int brightness);
+
+int ti_lmu_common_set_ramp(struct ti_lmu_bank *lmu_bank);
+
+int ti_lmu_common_get_ramp_params(struct device *dev,
+ struct fwnode_handle *child,
+ struct ti_lmu_bank *lmu_data);
+
+int ti_lmu_common_get_brt_res(struct device *dev, struct fwnode_handle *child,
+ struct ti_lmu_bank *lmu_data);
+
+#endif /* _TI_LMU_COMMON_H_ */
diff --git a/include/linux/leds.h b/include/linux/leds.h
index 64c56d454f7d..c39bbf17a25b 100644
--- a/include/linux/leds.h
+++ b/include/linux/leds.h
@@ -1,31 +1,35 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Driver model for leds and led triggers
*
* Copyright (C) 2005 John Lenz <lenz@cs.wisc.edu>
* Copyright (C) 2005 Richard Purdie <rpurdie@openedhand.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
*/
#ifndef __LINUX_LEDS_H_INCLUDED
#define __LINUX_LEDS_H_INCLUDED
+#include <dt-bindings/leds/common.h>
#include <linux/device.h>
-#include <linux/kernfs.h>
-#include <linux/list.h>
#include <linux/mutex.h>
#include <linux/rwsem.h>
#include <linux/spinlock.h>
#include <linux/timer.h>
+#include <linux/types.h>
#include <linux/workqueue.h>
-struct device;
+struct attribute_group;
+struct device_node;
+struct fwnode_handle;
+struct gpio_desc;
+struct kernfs_node;
+struct led_pattern;
+struct platform_device;
+
/*
* LED Core
*/
+/* This is obsolete/useless. We now support variable maximum brightness. */
enum led_brightness {
LED_OFF = 0,
LED_ON = 1,
@@ -33,22 +37,83 @@ enum led_brightness {
LED_FULL = 255,
};
+enum led_default_state {
+ LEDS_DEFSTATE_OFF = 0,
+ LEDS_DEFSTATE_ON = 1,
+ LEDS_DEFSTATE_KEEP = 2,
+};
+
+/**
+ * struct led_lookup_data - represents a single LED lookup entry
+ *
+ * @list: internal list of all LED lookup entries
+ * @provider: name of led_classdev providing the LED
+ * @dev_id: name of the device associated with this LED
+ * @con_id: name of the LED from the device's point of view
+ */
+struct led_lookup_data {
+ struct list_head list;
+ const char *provider;
+ const char *dev_id;
+ const char *con_id;
+};
+
+struct led_init_data {
+ /* device fwnode handle */
+ struct fwnode_handle *fwnode;
+ /*
+ * default <color:function> tuple, for backward compatibility
+ * with in-driver hard-coded LED names used as a fallback when
+ * DT "label" property is absent; it should be set to NULL
+ * in new LED class drivers.
+ */
+ const char *default_label;
+ /*
+ * string to be used for devicename section of LED class device
+ * either for label based LED name composition path or for fwnode
+ * based when devname_mandatory is true
+ */
+ const char *devicename;
+ /*
+ * indicates if LED name should always comprise devicename section;
+ * only LEDs exposed by drivers of hot-pluggable devices should
+ * set it to true
+ */
+ bool devname_mandatory;
+};
+
+#if IS_ENABLED(CONFIG_NEW_LEDS)
+enum led_default_state led_init_default_state_get(struct fwnode_handle *fwnode);
+#else
+static inline enum led_default_state
+led_init_default_state_get(struct fwnode_handle *fwnode)
+{
+ return LEDS_DEFSTATE_OFF;
+}
+#endif
+
+struct led_hw_trigger_type {
+ int dummy;
+};
+
struct led_classdev {
const char *name;
- enum led_brightness brightness;
- enum led_brightness max_brightness;
+ unsigned int brightness;
+ unsigned int max_brightness;
int flags;
/* Lower 16 bits reflect status */
-#define LED_SUSPENDED (1 << 0)
-#define LED_UNREGISTERING (1 << 1)
+#define LED_SUSPENDED BIT(0)
+#define LED_UNREGISTERING BIT(1)
/* Upper 16 bits reflect control information */
-#define LED_CORE_SUSPENDRESUME (1 << 16)
-#define LED_SYSFS_DISABLE (1 << 17)
-#define LED_DEV_CAP_FLASH (1 << 18)
-#define LED_HW_PLUGGABLE (1 << 19)
-#define LED_PANIC_INDICATOR (1 << 20)
-#define LED_BRIGHT_HW_CHANGED (1 << 21)
+#define LED_CORE_SUSPENDRESUME BIT(16)
+#define LED_SYSFS_DISABLE BIT(17)
+#define LED_DEV_CAP_FLASH BIT(18)
+#define LED_HW_PLUGGABLE BIT(19)
+#define LED_PANIC_INDICATOR BIT(20)
+#define LED_BRIGHT_HW_CHANGED BIT(21)
+#define LED_RETAIN_AT_SHUTDOWN BIT(22)
+#define LED_INIT_DEFAULT_TRIGGER BIT(23)
/* set_brightness_work / blink_timer flags, atomic, private. */
unsigned long work_flags;
@@ -87,6 +152,10 @@ struct led_classdev {
unsigned long *delay_on,
unsigned long *delay_off);
+ int (*pattern_set)(struct led_classdev *led_cdev,
+ struct led_pattern *pattern, u32 len, int repeat);
+ int (*pattern_clear)(struct led_classdev *led_cdev);
+
struct device *dev;
const struct attribute_group **groups;
@@ -111,6 +180,9 @@ struct led_classdev {
void *trigger_data;
/* true if activated - deactivate routine uses it to do cleanup */
bool activated;
+
+ /* LEDs that have private triggers have this set */
+ struct led_hw_trigger_type *trigger_type;
#endif
#ifdef CONFIG_LEDS_BRIGHTNESS_HW_CHANGED
@@ -122,21 +194,72 @@ struct led_classdev {
struct mutex led_access;
};
-extern int of_led_classdev_register(struct device *parent,
- struct device_node *np,
- struct led_classdev *led_cdev);
-#define led_classdev_register(parent, led_cdev) \
- of_led_classdev_register(parent, NULL, led_cdev)
-extern int devm_of_led_classdev_register(struct device *parent,
- struct device_node *np,
- struct led_classdev *led_cdev);
-#define devm_led_classdev_register(parent, led_cdev) \
- devm_of_led_classdev_register(parent, NULL, led_cdev)
-extern void led_classdev_unregister(struct led_classdev *led_cdev);
-extern void devm_led_classdev_unregister(struct device *parent,
- struct led_classdev *led_cdev);
-extern void led_classdev_suspend(struct led_classdev *led_cdev);
-extern void led_classdev_resume(struct led_classdev *led_cdev);
+/**
+ * led_classdev_register_ext - register a new object of LED class with
+ * init data
+ * @parent: LED controller device this LED is driven by
+ * @led_cdev: the led_classdev structure for this device
+ * @init_data: the LED class device initialization data
+ *
+ * Register a new object of LED class, with name derived from init_data.
+ *
+ * Returns: 0 on success or negative error value on failure
+ */
+int led_classdev_register_ext(struct device *parent,
+ struct led_classdev *led_cdev,
+ struct led_init_data *init_data);
+
+/**
+ * led_classdev_register - register a new object of LED class
+ * @parent: LED controller device this LED is driven by
+ * @led_cdev: the led_classdev structure for this device
+ *
+ * Register a new object of LED class, with name derived from the name property
+ * of passed led_cdev argument.
+ *
+ * Returns: 0 on success or negative error value on failure
+ */
+static inline int led_classdev_register(struct device *parent,
+ struct led_classdev *led_cdev)
+{
+ return led_classdev_register_ext(parent, led_cdev, NULL);
+}
+
+#if IS_ENABLED(CONFIG_LEDS_CLASS)
+int devm_led_classdev_register_ext(struct device *parent,
+ struct led_classdev *led_cdev,
+ struct led_init_data *init_data);
+#else
+static inline int
+devm_led_classdev_register_ext(struct device *parent,
+ struct led_classdev *led_cdev,
+ struct led_init_data *init_data)
+{
+ return 0;
+}
+#endif
+
+static inline int devm_led_classdev_register(struct device *parent,
+ struct led_classdev *led_cdev)
+{
+ return devm_led_classdev_register_ext(parent, led_cdev, NULL);
+}
+void led_classdev_unregister(struct led_classdev *led_cdev);
+void devm_led_classdev_unregister(struct device *parent,
+ struct led_classdev *led_cdev);
+void led_classdev_suspend(struct led_classdev *led_cdev);
+void led_classdev_resume(struct led_classdev *led_cdev);
+
+void led_add_lookup(struct led_lookup_data *led_lookup);
+void led_remove_lookup(struct led_lookup_data *led_lookup);
+
+struct led_classdev *__must_check led_get(struct device *dev, char *con_id);
+struct led_classdev *__must_check devm_led_get(struct device *dev, char *con_id);
+
+extern struct led_classdev *of_led_get(struct device_node *np, int index);
+extern void led_put(struct led_classdev *led_cdev);
+struct led_classdev *__must_check devm_of_led_get(struct device *dev,
+ int index);
/**
* led_blink_set - set blinking with software fallback
@@ -151,11 +274,10 @@ extern void led_classdev_resume(struct led_classdev *led_cdev);
*
* Note that if software blinking is active, simply calling
* led_cdev->brightness_set() will not stop the blinking,
- * use led_classdev_brightness_set() instead.
+ * use led_set_brightness() instead.
*/
-extern void led_blink_set(struct led_classdev *led_cdev,
- unsigned long *delay_on,
- unsigned long *delay_off);
+void led_blink_set(struct led_classdev *led_cdev, unsigned long *delay_on,
+ unsigned long *delay_off);
/**
* led_blink_set_oneshot - do a oneshot software blink
* @led_cdev: the LED to start blinking
@@ -170,10 +292,9 @@ extern void led_blink_set(struct led_classdev *led_cdev,
* If invert is set, led blinks for delay_off first, then for
* delay_on and leave the led on after the on-off cycle.
*/
-extern void led_blink_set_oneshot(struct led_classdev *led_cdev,
- unsigned long *delay_on,
- unsigned long *delay_off,
- int invert);
+void led_blink_set_oneshot(struct led_classdev *led_cdev,
+ unsigned long *delay_on, unsigned long *delay_off,
+ int invert);
/**
* led_set_brightness - set LED brightness
* @led_cdev: the LED to set
@@ -183,13 +304,12 @@ extern void led_blink_set_oneshot(struct led_classdev *led_cdev,
* software blink timer that implements blinking when the
* hardware doesn't. This function is guaranteed not to sleep.
*/
-extern void led_set_brightness(struct led_classdev *led_cdev,
- enum led_brightness brightness);
+void led_set_brightness(struct led_classdev *led_cdev, unsigned int brightness);
/**
* led_set_brightness_sync - set LED brightness synchronously
* @led_cdev: the LED to set
- * @brightness: the brightness to set it to
+ * @value: the brightness to set it to
*
* Set an LED's brightness immediately. This function will block
* the caller for the time required for accessing device registers,
@@ -197,8 +317,7 @@ extern void led_set_brightness(struct led_classdev *led_cdev,
*
* Returns: 0 on success or negative error value on failure
*/
-extern int led_set_brightness_sync(struct led_classdev *led_cdev,
- enum led_brightness value);
+int led_set_brightness_sync(struct led_classdev *led_cdev, unsigned int value);
/**
* led_update_brightness - update LED brightness
@@ -209,7 +328,19 @@ extern int led_set_brightness_sync(struct led_classdev *led_cdev,
*
* Returns: 0 on success or negative error value on failure
*/
-extern int led_update_brightness(struct led_classdev *led_cdev);
+int led_update_brightness(struct led_classdev *led_cdev);
+
+/**
+ * led_get_default_pattern - return default pattern
+ *
+ * @led_cdev: the LED to get default pattern for
+ * @size: pointer for storing the number of elements in returned array,
+ * modified only if return != NULL
+ *
+ * Return: Allocated array of integers with default pattern from device tree
+ * or NULL. Caller is responsible for kfree().
+ */
+u32 *led_get_default_pattern(struct led_classdev *led_cdev, unsigned int *size);
/**
* led_sysfs_disable - disable LED sysfs interface
@@ -217,7 +348,7 @@ extern int led_update_brightness(struct led_classdev *led_cdev);
*
* Disable the led_cdev's sysfs interface.
*/
-extern void led_sysfs_disable(struct led_classdev *led_cdev);
+void led_sysfs_disable(struct led_classdev *led_cdev);
/**
* led_sysfs_enable - enable LED sysfs interface
@@ -225,7 +356,22 @@ extern void led_sysfs_disable(struct led_classdev *led_cdev);
*
* Enable the led_cdev's sysfs interface.
*/
-extern void led_sysfs_enable(struct led_classdev *led_cdev);
+void led_sysfs_enable(struct led_classdev *led_cdev);
+
+/**
+ * led_compose_name - compose LED class device name
+ * @dev: LED controller device object
+ * @init_data: the LED class device initialization data
+ * @led_classdev_name: composed LED class device name
+ *
+ * Create LED class device name basing on the provided init_data argument.
+ * The name can have <devicename:color:function> or <color:function>.
+ * form, depending on the init_data configuration.
+ *
+ * Returns: 0 on success or negative error value on failure
+ */
+int led_compose_name(struct device *dev, struct led_init_data *init_data,
+ char *led_classdev_name);
/**
* led_sysfs_is_disabled - check if LED sysfs interface is disabled
@@ -252,44 +398,56 @@ static inline bool led_sysfs_is_disabled(struct led_classdev *led_cdev)
struct led_trigger {
/* Trigger Properties */
const char *name;
- void (*activate)(struct led_classdev *led_cdev);
+ int (*activate)(struct led_classdev *led_cdev);
void (*deactivate)(struct led_classdev *led_cdev);
+ /* LED-private triggers have this set */
+ struct led_hw_trigger_type *trigger_type;
+
/* LEDs under control by this trigger (for simple triggers) */
- rwlock_t leddev_list_lock;
+ spinlock_t leddev_list_lock;
struct list_head led_cdevs;
/* Link to next registered trigger */
struct list_head next_trig;
+
+ const struct attribute_group **groups;
};
-ssize_t led_trigger_store(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count);
-ssize_t led_trigger_show(struct device *dev, struct device_attribute *attr,
- char *buf);
+/*
+ * Currently the attributes in struct led_trigger::groups are added directly to
+ * the LED device. As this might change in the future, the following
+ * macros abstract getting the LED device and its trigger_data from the dev
+ * parameter passed to the attribute accessor functions.
+ */
+#define led_trigger_get_led(dev) ((struct led_classdev *)dev_get_drvdata((dev)))
+#define led_trigger_get_drvdata(dev) (led_get_trigger_data(led_trigger_get_led(dev)))
/* Registration functions for complex triggers */
-extern int led_trigger_register(struct led_trigger *trigger);
-extern void led_trigger_unregister(struct led_trigger *trigger);
-extern int devm_led_trigger_register(struct device *dev,
+int led_trigger_register(struct led_trigger *trigger);
+void led_trigger_unregister(struct led_trigger *trigger);
+int devm_led_trigger_register(struct device *dev,
struct led_trigger *trigger);
-extern void led_trigger_register_simple(const char *name,
+void led_trigger_register_simple(const char *name,
struct led_trigger **trigger);
-extern void led_trigger_unregister_simple(struct led_trigger *trigger);
-extern void led_trigger_event(struct led_trigger *trigger,
- enum led_brightness event);
-extern void led_trigger_blink(struct led_trigger *trigger,
- unsigned long *delay_on,
- unsigned long *delay_off);
-extern void led_trigger_blink_oneshot(struct led_trigger *trigger,
- unsigned long *delay_on,
- unsigned long *delay_off,
- int invert);
-extern void led_trigger_set_default(struct led_classdev *led_cdev);
-extern void led_trigger_set(struct led_classdev *led_cdev,
- struct led_trigger *trigger);
-extern void led_trigger_remove(struct led_classdev *led_cdev);
+void led_trigger_unregister_simple(struct led_trigger *trigger);
+void led_trigger_event(struct led_trigger *trigger, enum led_brightness event);
+void led_trigger_blink(struct led_trigger *trigger, unsigned long *delay_on,
+ unsigned long *delay_off);
+void led_trigger_blink_oneshot(struct led_trigger *trigger,
+ unsigned long *delay_on,
+ unsigned long *delay_off,
+ int invert);
+void led_trigger_set_default(struct led_classdev *led_cdev);
+int led_trigger_set(struct led_classdev *led_cdev, struct led_trigger *trigger);
+void led_trigger_remove(struct led_classdev *led_cdev);
+
+static inline void led_set_trigger_data(struct led_classdev *led_cdev,
+ void *trigger_data)
+{
+ led_cdev->trigger_data = trigger_data;
+}
static inline void *led_get_trigger_data(struct led_classdev *led_cdev)
{
@@ -311,8 +469,11 @@ static inline void *led_get_trigger_data(struct led_classdev *led_cdev)
* This is meant to be used on triggers with statically
* allocated name.
*/
-extern void led_trigger_rename_static(const char *name,
- struct led_trigger *trig);
+void led_trigger_rename_static(const char *name, struct led_trigger *trig);
+
+#define module_led_trigger(__led_trigger) \
+ module_driver(__led_trigger, led_trigger_register, \
+ led_trigger_unregister)
#else
@@ -333,9 +494,14 @@ static inline void led_trigger_blink_oneshot(struct led_trigger *trigger,
unsigned long *delay_off,
int invert) {}
static inline void led_trigger_set_default(struct led_classdev *led_cdev) {}
-static inline void led_trigger_set(struct led_classdev *led_cdev,
- struct led_trigger *trigger) {}
+static inline int led_trigger_set(struct led_classdev *led_cdev,
+ struct led_trigger *trigger)
+{
+ return 0;
+}
+
static inline void led_trigger_remove(struct led_classdev *led_cdev) {}
+static inline void led_set_trigger_data(struct led_classdev *led_cdev) {}
static inline void *led_get_trigger_data(struct led_classdev *led_cdev)
{
return NULL;
@@ -345,20 +511,20 @@ static inline void *led_get_trigger_data(struct led_classdev *led_cdev)
/* Trigger specific functions */
#ifdef CONFIG_LEDS_TRIGGER_DISK
-extern void ledtrig_disk_activity(void);
+void ledtrig_disk_activity(bool write);
#else
-static inline void ledtrig_disk_activity(void) {}
+static inline void ledtrig_disk_activity(bool write) {}
#endif
#ifdef CONFIG_LEDS_TRIGGER_MTD
-extern void ledtrig_mtd_activity(void);
+void ledtrig_mtd_activity(void);
#else
static inline void ledtrig_mtd_activity(void) {}
#endif
#if defined(CONFIG_LEDS_TRIGGER_CAMERA) || defined(CONFIG_LEDS_TRIGGER_CAMERA_MODULE)
-extern void ledtrig_flash_ctrl(bool on);
-extern void ledtrig_torch_ctrl(bool on);
+void ledtrig_flash_ctrl(bool on);
+void ledtrig_torch_ctrl(bool on);
#else
static inline void ledtrig_flash_ctrl(bool on) {}
static inline void ledtrig_torch_ctrl(bool on) {}
@@ -378,7 +544,15 @@ struct led_platform_data {
struct led_info *leds;
};
-struct gpio_desc;
+struct led_properties {
+ u32 color;
+ bool color_present;
+ const char *function;
+ u32 func_enum;
+ bool func_enum_present;
+ const char *label;
+};
+
typedef int (*gpio_blink_set_t)(struct gpio_desc *desc, int state,
unsigned long *delay_on,
unsigned long *delay_off);
@@ -392,12 +566,13 @@ struct gpio_led {
unsigned retain_state_suspended : 1;
unsigned panic_indicator : 1;
unsigned default_state : 2;
+ unsigned retain_state_shutdown : 1;
/* default_state should be one of LEDS_GPIO_DEFSTATE_(ON|OFF|KEEP) */
struct gpio_desc *gpiod;
};
-#define LEDS_GPIO_DEFSTATE_OFF 0
-#define LEDS_GPIO_DEFSTATE_ON 1
-#define LEDS_GPIO_DEFSTATE_KEEP 2
+#define LEDS_GPIO_DEFSTATE_OFF LEDS_DEFSTATE_OFF
+#define LEDS_GPIO_DEFSTATE_ON LEDS_DEFSTATE_ON
+#define LEDS_GPIO_DEFSTATE_KEEP LEDS_DEFSTATE_KEEP
struct gpio_led_platform_data {
int num_leds;
@@ -428,7 +603,7 @@ enum cpu_led_event {
CPU_LED_HALTED, /* Machine shutdown */
};
#ifdef CONFIG_LEDS_TRIGGER_CPU
-extern void ledtrig_cpu(enum cpu_led_event evt);
+void ledtrig_cpu(enum cpu_led_event evt);
#else
static inline void ledtrig_cpu(enum cpu_led_event evt)
{
@@ -437,11 +612,41 @@ static inline void ledtrig_cpu(enum cpu_led_event evt)
#endif
#ifdef CONFIG_LEDS_BRIGHTNESS_HW_CHANGED
-extern void led_classdev_notify_brightness_hw_changed(
- struct led_classdev *led_cdev, enum led_brightness brightness);
+void led_classdev_notify_brightness_hw_changed(
+ struct led_classdev *led_cdev, unsigned int brightness);
#else
static inline void led_classdev_notify_brightness_hw_changed(
struct led_classdev *led_cdev, enum led_brightness brightness) { }
#endif
+/**
+ * struct led_pattern - pattern interval settings
+ * @delta_t: pattern interval delay, in milliseconds
+ * @brightness: pattern interval brightness
+ */
+struct led_pattern {
+ u32 delta_t;
+ int brightness;
+};
+
+enum led_audio {
+ LED_AUDIO_MUTE, /* master mute LED */
+ LED_AUDIO_MICMUTE, /* mic mute LED */
+ NUM_AUDIO_LEDS
+};
+
+#if IS_ENABLED(CONFIG_LEDS_TRIGGER_AUDIO)
+enum led_brightness ledtrig_audio_get(enum led_audio type);
+void ledtrig_audio_set(enum led_audio type, enum led_brightness state);
+#else
+static inline enum led_brightness ledtrig_audio_get(enum led_audio type)
+{
+ return LED_OFF;
+}
+static inline void ledtrig_audio_set(enum led_audio type,
+ enum led_brightness state)
+{
+}
+#endif
+
#endif /* __LINUX_LEDS_H_INCLUDED */
diff --git a/include/linux/leds_pwm.h b/include/linux/leds_pwm.h
deleted file mode 100644
index a65e9646e4b1..000000000000
--- a/include/linux/leds_pwm.h
+++ /dev/null
@@ -1,21 +0,0 @@
-/*
- * PWM LED driver data - see drivers/leds/leds-pwm.c
- */
-#ifndef __LINUX_LEDS_PWM_H
-#define __LINUX_LEDS_PWM_H
-
-struct led_pwm {
- const char *name;
- const char *default_trigger;
- unsigned pwm_id __deprecated;
- u8 active_low;
- unsigned max_brightness;
- unsigned pwm_period_ns;
-};
-
-struct led_pwm_platform_data {
- int num_leds;
- struct led_pwm *leds;
-};
-
-#endif
diff --git a/include/linux/lguest.h b/include/linux/lguest.h
deleted file mode 100644
index 6db19f35f7c5..000000000000
--- a/include/linux/lguest.h
+++ /dev/null
@@ -1,73 +0,0 @@
-/*
- * Things the lguest guest needs to know. Note: like all lguest interfaces,
- * this is subject to wild and random change between versions.
- */
-#ifndef _LINUX_LGUEST_H
-#define _LINUX_LGUEST_H
-
-#ifndef __ASSEMBLY__
-#include <linux/time.h>
-#include <asm/irq.h>
-#include <asm/lguest_hcall.h>
-
-#define LG_CLOCK_MIN_DELTA 100UL
-#define LG_CLOCK_MAX_DELTA ULONG_MAX
-
-/*G:031
- * The second method of communicating with the Host is to via "struct
- * lguest_data". Once the Guest's initialization hypercall tells the Host where
- * this is, the Guest and Host both publish information in it.
-:*/
-struct lguest_data {
- /*
- * 512 == enabled (same as eflags in normal hardware). The Guest
- * changes interrupts so often that a hypercall is too slow.
- */
- unsigned int irq_enabled;
- /* Fine-grained interrupt disabling by the Guest */
- DECLARE_BITMAP(blocked_interrupts, LGUEST_IRQS);
-
- /*
- * The Host writes the virtual address of the last page fault here,
- * which saves the Guest a hypercall. CR2 is the native register where
- * this address would normally be found.
- */
- unsigned long cr2;
-
- /* Wallclock time set by the Host. */
- struct timespec time;
-
- /*
- * Interrupt pending set by the Host. The Guest should do a hypercall
- * if it re-enables interrupts and sees this set (to X86_EFLAGS_IF).
- */
- int irq_pending;
-
- /*
- * Async hypercall ring. Instead of directly making hypercalls, we can
- * place them in here for processing the next time the Host wants.
- * This batching can be quite efficient.
- */
-
- /* 0xFF == done (set by Host), 0 == pending (set by Guest). */
- u8 hcall_status[LHCALL_RING_SIZE];
- /* The actual registers for the hypercalls. */
- struct hcall_args hcalls[LHCALL_RING_SIZE];
-
-/* Fields initialized by the Host at boot: */
- /* Memory not to try to access */
- unsigned long reserve_mem;
- /* KHz for the TSC clock. */
- u32 tsc_khz;
-
-/* Fields initialized by the Guest at boot: */
- /* Instruction to suppress interrupts even if enabled */
- unsigned long noirq_iret;
- /* Address above which page tables are all identical. */
- unsigned long kernel_address;
- /* The vector to try to use for system calls (0x40 or 0x80). */
- unsigned int syscall_vec;
-};
-extern struct lguest_data lguest_data;
-#endif /* __ASSEMBLY__ */
-#endif /* _LINUX_LGUEST_H */
diff --git a/include/linux/lguest_launcher.h b/include/linux/lguest_launcher.h
deleted file mode 100644
index acd5b12565cc..000000000000
--- a/include/linux/lguest_launcher.h
+++ /dev/null
@@ -1,44 +0,0 @@
-#ifndef _LINUX_LGUEST_LAUNCHER
-#define _LINUX_LGUEST_LAUNCHER
-/* Everything the "lguest" userspace program needs to know. */
-#include <linux/types.h>
-
-/*D:010
- * Drivers
- *
- * The Guest needs devices to do anything useful. Since we don't let it touch
- * real devices (think of the damage it could do!) we provide virtual devices.
- * We emulate a PCI bus with virtio devices on it; we used to have our own
- * lguest bus which was far simpler, but this tests the virtio 1.0 standard.
- *
- * Virtio devices are also used by kvm, so we can simply reuse their optimized
- * device drivers. And one day when everyone uses virtio, my plan will be
- * complete. Bwahahahah!
- */
-
-/* Write command first word is a request. */
-enum lguest_req
-{
- LHREQ_INITIALIZE, /* + base, pfnlimit, start */
- LHREQ_GETDMA, /* No longer used */
- LHREQ_IRQ, /* + irq */
- LHREQ_BREAK, /* No longer used */
- LHREQ_EVENTFD, /* No longer used. */
- LHREQ_GETREG, /* + offset within struct pt_regs (then read value). */
- LHREQ_SETREG, /* + offset within struct pt_regs, value. */
- LHREQ_TRAP, /* + trap number to deliver to guest. */
-};
-
-/*
- * This is what read() of the lguest fd populates. trap ==
- * LGUEST_TRAP_ENTRY for an LHCALL_NOTIFY (addr is the
- * argument), 14 for a page fault in the MMIO region (addr is
- * the trap address, insn is the instruction), or 13 for a GPF
- * (insn is the instruction).
- */
-struct lguest_pending {
- __u8 trap;
- __u8 insn[7];
- __u32 addr;
-};
-#endif /* _LINUX_LGUEST_LAUNCHER */
diff --git a/include/linux/libata.h b/include/linux/libata.h
index 55de3da58b1c..311cd93377c7 100644
--- a/include/linux/libata.h
+++ b/include/linux/libata.h
@@ -1,26 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Copyright 2003-2005 Red Hat, Inc. All rights reserved.
* Copyright 2003-2005 Jeff Garzik
*
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; see the file COPYING. If not, write to
- * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- *
* libata documentation is available via 'make {ps|pdf}docs',
* as Documentation/driver-api/libata.rst
- *
*/
#ifndef __LINUX_LIBATA_H__
@@ -38,6 +22,7 @@
#include <linux/acpi.h>
#include <linux/cdrom.h>
#include <linux/sched.h>
+#include <linux/async.h>
/*
* Define if arch has non-standard setup. This is a _PCI_ standard
@@ -54,27 +39,9 @@
* compile-time options: to be removed as soon as all the drivers are
* converted to the new debugging mechanism
*/
-#undef ATA_DEBUG /* debugging output */
-#undef ATA_VERBOSE_DEBUG /* yet more debugging output */
#undef ATA_IRQ_TRAP /* define to ack screaming irqs */
-#undef ATA_NDEBUG /* define to disable quick runtime checks */
-/* note: prints function name for you */
-#ifdef ATA_DEBUG
-#define DPRINTK(fmt, args...) printk(KERN_ERR "%s: " fmt, __func__, ## args)
-#ifdef ATA_VERBOSE_DEBUG
-#define VPRINTK(fmt, args...) printk(KERN_ERR "%s: " fmt, __func__, ## args)
-#else
-#define VPRINTK(fmt, args...)
-#endif /* ATA_VERBOSE_DEBUG */
-#else
-#define DPRINTK(fmt, args...)
-#define VPRINTK(fmt, args...)
-#endif /* ATA_DEBUG */
-
-#define BPRINTK(fmt, args...) if (ap->flags & ATA_FLAG_DEBUGMSG) printk(KERN_ERR "%s: " fmt, __func__, ## args)
-
#define ata_print_version_once(dev, version) \
({ \
static bool __print_once; \
@@ -85,38 +52,6 @@
} \
})
-/* NEW: debug levels */
-#define HAVE_LIBATA_MSG 1
-
-enum {
- ATA_MSG_DRV = 0x0001,
- ATA_MSG_INFO = 0x0002,
- ATA_MSG_PROBE = 0x0004,
- ATA_MSG_WARN = 0x0008,
- ATA_MSG_MALLOC = 0x0010,
- ATA_MSG_CTL = 0x0020,
- ATA_MSG_INTR = 0x0040,
- ATA_MSG_ERR = 0x0080,
-};
-
-#define ata_msg_drv(p) ((p)->msg_enable & ATA_MSG_DRV)
-#define ata_msg_info(p) ((p)->msg_enable & ATA_MSG_INFO)
-#define ata_msg_probe(p) ((p)->msg_enable & ATA_MSG_PROBE)
-#define ata_msg_warn(p) ((p)->msg_enable & ATA_MSG_WARN)
-#define ata_msg_malloc(p) ((p)->msg_enable & ATA_MSG_MALLOC)
-#define ata_msg_ctl(p) ((p)->msg_enable & ATA_MSG_CTL)
-#define ata_msg_intr(p) ((p)->msg_enable & ATA_MSG_INTR)
-#define ata_msg_err(p) ((p)->msg_enable & ATA_MSG_ERR)
-
-static inline u32 ata_msg_init(int dval, int default_msg_enable_bits)
-{
- if (dval < 0 || dval >= (sizeof(u32) * 8))
- return default_msg_enable_bits; /* should be 0x1 - only driver info msgs */
- if (!dval)
- return 0;
- return (1 << dval) - 1;
-}
-
/* defines only for the constants which don't work well as enums */
#define ATA_TAG_POISON 0xfafbfcfdU
@@ -125,9 +60,8 @@ enum {
LIBATA_MAX_PRD = ATA_MAX_PRD / 2,
LIBATA_DUMB_MAX_PRD = ATA_MAX_PRD / 4, /* Worst case */
ATA_DEF_QUEUE = 1,
- /* tag ATA_MAX_QUEUE - 1 is reserved for internal commands */
ATA_MAX_QUEUE = 32,
- ATA_TAG_INTERNAL = ATA_MAX_QUEUE - 1,
+ ATA_TAG_INTERNAL = ATA_MAX_QUEUE,
ATA_SHORT_PAUSE = 16,
ATAPI_MAX_DRAIN = 16 << 10,
@@ -136,7 +70,6 @@ enum {
ATA_SHT_EMULATED = 1,
ATA_SHT_THIS_ID = -1,
- ATA_SHT_USE_CLUSTERING = 1,
/* struct ata_taskfile flags */
ATA_TFLAG_LBA48 = (1 << 0), /* enable 48-bit LBA and "HOB" */
@@ -157,29 +90,33 @@ enum {
ATA_DFLAG_ACPI_FAILED = (1 << 6), /* ACPI on devcfg has failed */
ATA_DFLAG_AN = (1 << 7), /* AN configured */
ATA_DFLAG_TRUSTED = (1 << 8), /* device supports trusted send/recv */
+ ATA_DFLAG_FUA = (1 << 9), /* device supports FUA */
ATA_DFLAG_DMADIR = (1 << 10), /* device requires DMADIR */
- ATA_DFLAG_CFG_MASK = (1 << 12) - 1,
+ ATA_DFLAG_NCQ_SEND_RECV = (1 << 11), /* device supports NCQ SEND and RECV */
+ ATA_DFLAG_NCQ_PRIO = (1 << 12), /* device supports NCQ priority */
+ ATA_DFLAG_CFG_MASK = (1 << 13) - 1,
- ATA_DFLAG_PIO = (1 << 12), /* device limited to PIO mode */
- ATA_DFLAG_NCQ_OFF = (1 << 13), /* device limited to non-NCQ mode */
+ ATA_DFLAG_PIO = (1 << 13), /* device limited to PIO mode */
+ ATA_DFLAG_NCQ_OFF = (1 << 14), /* device limited to non-NCQ mode */
ATA_DFLAG_SLEEPING = (1 << 15), /* device is sleeping */
ATA_DFLAG_DUBIOUS_XFER = (1 << 16), /* data transfer not verified */
ATA_DFLAG_NO_UNLOAD = (1 << 17), /* device doesn't support unload */
ATA_DFLAG_UNLOCK_HPA = (1 << 18), /* unlock HPA */
- ATA_DFLAG_NCQ_SEND_RECV = (1 << 19), /* device supports NCQ SEND and RECV */
- ATA_DFLAG_NCQ_PRIO = (1 << 20), /* device supports NCQ priority */
- ATA_DFLAG_NCQ_PRIO_ENABLE = (1 << 21), /* Priority cmds sent to dev */
- ATA_DFLAG_INIT_MASK = (1 << 24) - 1,
+ ATA_DFLAG_INIT_MASK = (1 << 19) - 1,
+ ATA_DFLAG_NCQ_PRIO_ENABLED = (1 << 19), /* Priority cmds sent to dev */
ATA_DFLAG_DETACH = (1 << 24),
ATA_DFLAG_DETACHED = (1 << 25),
-
ATA_DFLAG_DA = (1 << 26), /* device supports Device Attention */
ATA_DFLAG_DEVSLP = (1 << 27), /* device supports Device Sleep */
ATA_DFLAG_ACPI_DISABLED = (1 << 28), /* ACPI for the device is disabled */
ATA_DFLAG_D_SENSE = (1 << 29), /* Descriptor sense requested */
ATA_DFLAG_ZAC = (1 << 30), /* ZAC device */
+ ATA_DFLAG_FEATURES_MASK = (ATA_DFLAG_TRUSTED | ATA_DFLAG_DA | \
+ ATA_DFLAG_DEVSLP | ATA_DFLAG_NCQ_SEND_RECV | \
+ ATA_DFLAG_NCQ_PRIO | ATA_DFLAG_FUA),
+
ATA_DEV_UNKNOWN = 0, /* unknown device */
ATA_DEV_ATA = 1, /* ATA device */
ATA_DEV_ATA_UNSUP = 2, /* ATA device (unsupported) */
@@ -194,6 +131,7 @@ enum {
ATA_DEV_NONE = 11, /* no device */
/* struct ata_link flags */
+ /* NOTE: struct ata_force_param currently stores lflags in u16 */
ATA_LFLAG_NO_HRST = (1 << 1), /* avoid hardreset */
ATA_LFLAG_NO_SRST = (1 << 2), /* avoid softreset */
ATA_LFLAG_ASSUME_ATA = (1 << 3), /* assume ATA class */
@@ -205,12 +143,13 @@ enum {
ATA_LFLAG_NO_LPM = (1 << 8), /* disable LPM on this link */
ATA_LFLAG_RST_ONCE = (1 << 9), /* limit recovery to one reset */
ATA_LFLAG_CHANGED = (1 << 10), /* LPM state changed on this link */
- ATA_LFLAG_NO_DB_DELAY = (1 << 11), /* no debounce delay on link resume */
+ ATA_LFLAG_NO_DEBOUNCE_DELAY = (1 << 11), /* no debounce delay on link resume */
/* struct ata_port flags */
ATA_FLAG_SLAVE_POSS = (1 << 0), /* host supports slave dev */
/* (doesn't imply presence) */
ATA_FLAG_SATA = (1 << 1),
+ ATA_FLAG_NO_LPM = (1 << 2), /* host not happy with LPM */
ATA_FLAG_NO_LOG_PAGE = (1 << 5), /* do not issue log page read */
ATA_FLAG_NO_ATAPI = (1 << 6), /* No ATAPI support */
ATA_FLAG_PIO_DMA = (1 << 7), /* PIO cmds via DMA */
@@ -261,13 +200,14 @@ enum {
/* struct ata_queued_cmd flags */
ATA_QCFLAG_ACTIVE = (1 << 0), /* cmd not yet ack'd to scsi lyer */
ATA_QCFLAG_DMAMAP = (1 << 1), /* SG table is DMA mapped */
+ ATA_QCFLAG_RTF_FILLED = (1 << 2), /* result TF has been filled */
ATA_QCFLAG_IO = (1 << 3), /* standard IO command */
ATA_QCFLAG_RESULT_TF = (1 << 4), /* result TF requested */
ATA_QCFLAG_CLEAR_EXCL = (1 << 5), /* clear excl_link on completion */
ATA_QCFLAG_QUIET = (1 << 6), /* don't report device error */
ATA_QCFLAG_RETRY = (1 << 7), /* retry after failure */
- ATA_QCFLAG_FAILED = (1 << 16), /* cmd failed and is owned by EH */
+ ATA_QCFLAG_EH = (1 << 16), /* cmd aborted and owned by EH */
ATA_QCFLAG_SENSE_VALID = (1 << 17), /* sense data valid */
ATA_QCFLAG_EH_SCHEDULED = (1 << 18), /* EH scheduled (obsolete) */
@@ -336,7 +276,7 @@ enum {
PORT_DISABLED = 2,
/* encoding various smaller bitmaps into a single
- * unsigned long bitmap
+ * unsigned int bitmap
*/
ATA_NR_PIO_MODES = 7,
ATA_NR_MWDMA_MODES = 5,
@@ -407,7 +347,7 @@ enum {
/* This should match the actual table size of
* ata_eh_cmd_timeout_table in libata-eh.c.
*/
- ATA_EH_CMD_TIMEOUT_TABLE_SIZE = 6,
+ ATA_EH_CMD_TIMEOUT_TABLE_SIZE = 7,
/* Horkage types. May be set by libata or controller on drives
(some horkage may be drive/controller pair dependent */
@@ -435,9 +375,14 @@ enum {
ATA_HORKAGE_NOLPM = (1 << 20), /* don't use LPM */
ATA_HORKAGE_WD_BROKEN_LPM = (1 << 21), /* some WDs have broken LPM */
ATA_HORKAGE_ZERO_AFTER_TRIM = (1 << 22),/* guarantees zero after trim */
- ATA_HORKAGE_NO_NCQ_LOG = (1 << 23), /* don't use NCQ for log read */
+ ATA_HORKAGE_NO_DMA_LOG = (1 << 23), /* don't use DMA for log read */
ATA_HORKAGE_NOTRIM = (1 << 24), /* don't use TRIM */
ATA_HORKAGE_MAX_SEC_1024 = (1 << 25), /* Limit max sects to 1024 */
+ ATA_HORKAGE_MAX_TRIM_128M = (1 << 26), /* Limit max trim size to 128M */
+ ATA_HORKAGE_NO_NCQ_ON_ATI = (1 << 27), /* Disable NCQ on ATI chipset */
+ ATA_HORKAGE_NO_ID_DEV_LOG = (1 << 28), /* Identify device log missing */
+ ATA_HORKAGE_NO_LOG_DIR = (1 << 29), /* Do not read log directory */
+ ATA_HORKAGE_NO_FUA = (1 << 30), /* Do not use FUA */
/* DMA mask for user DMA control: User visible values; DO NOT
renumber */
@@ -483,12 +428,9 @@ enum {
};
enum ata_xfer_mask {
- ATA_MASK_PIO = ((1LU << ATA_NR_PIO_MODES) - 1)
- << ATA_SHIFT_PIO,
- ATA_MASK_MWDMA = ((1LU << ATA_NR_MWDMA_MODES) - 1)
- << ATA_SHIFT_MWDMA,
- ATA_MASK_UDMA = ((1LU << ATA_NR_UDMA_MODES) - 1)
- << ATA_SHIFT_UDMA,
+ ATA_MASK_PIO = ((1U << ATA_NR_PIO_MODES) - 1) << ATA_SHIFT_PIO,
+ ATA_MASK_MWDMA = ((1U << ATA_NR_MWDMA_MODES) - 1) << ATA_SHIFT_MWDMA,
+ ATA_MASK_UDMA = ((1U << ATA_NR_UDMA_MODES) - 1) << ATA_SHIFT_UDMA,
};
enum hsm_task_states {
@@ -501,6 +443,7 @@ enum hsm_task_states {
};
enum ata_completion_errors {
+ AC_ERR_OK = 0, /* no error */
AC_ERR_DEV = (1 << 0), /* device reported error */
AC_ERR_HSM = (1 << 1), /* host state machine violation */
AC_ERR_TIMEOUT = (1 << 2), /* timeout */
@@ -522,7 +465,9 @@ enum ata_lpm_policy {
ATA_LPM_UNKNOWN,
ATA_LPM_MAX_POWER,
ATA_LPM_MED_POWER,
- ATA_LPM_MIN_POWER,
+ ATA_LPM_MED_POWER_WITH_DIPM, /* Med power + DIPM as win IRST does */
+ ATA_LPM_MIN_POWER_WITH_PARTIAL, /* Min Power + partial and slumber */
+ ATA_LPM_MIN_POWER, /* Min power + no partial (slumber only) */
};
enum ata_lpm_hints {
@@ -545,12 +490,15 @@ typedef int (*ata_reset_fn_t)(struct ata_link *link, unsigned int *classes,
unsigned long deadline);
typedef void (*ata_postreset_fn_t)(struct ata_link *link, unsigned int *classes);
-extern struct device_attribute dev_attr_link_power_management_policy;
extern struct device_attribute dev_attr_unload_heads;
+#ifdef CONFIG_SATA_HOST
+extern struct device_attribute dev_attr_link_power_management_policy;
+extern struct device_attribute dev_attr_ncq_prio_supported;
extern struct device_attribute dev_attr_ncq_prio_enable;
extern struct device_attribute dev_attr_em_message_type;
extern struct device_attribute dev_attr_em_message;
extern struct device_attribute dev_attr_sw_activity;
+#endif
enum sw_activity {
OFF,
@@ -570,7 +518,10 @@ struct ata_taskfile {
u8 hob_lbam;
u8 hob_lbah;
- u8 feature;
+ union {
+ u8 error;
+ u8 feature;
+ };
u8 nsect;
u8 lbal;
u8 lbam;
@@ -578,7 +529,10 @@ struct ata_taskfile {
u8 device;
- u8 command; /* IO operation */
+ union {
+ u8 status;
+ u8 command;
+ };
u32 auxiliary; /* auxiliary field */
/* from SATA 3.1 and */
@@ -616,12 +570,13 @@ struct ata_host {
void *private_data;
struct ata_port_operations *ops;
unsigned long flags;
+ struct kref kref;
struct mutex eh_mutex;
struct task_struct *eh_owner;
struct ata_port *simplex_claimed; /* channel owning the DMA */
- struct ata_port *ports[0];
+ struct ata_port *ports[];
};
struct ata_queued_cmd {
@@ -635,7 +590,8 @@ struct ata_queued_cmd {
u8 cdb[ATAPI_CDB_LEN];
unsigned long flags; /* ATA_QCFLAG_xxx */
- unsigned int tag;
+ unsigned int tag; /* libata core tag */
+ unsigned int hw_tag; /* driver tag */
unsigned int n_elem;
unsigned int orig_n_elem;
@@ -679,6 +635,18 @@ struct ata_ering {
struct ata_ering_entry ring[ATA_ERING_SIZE];
};
+struct ata_cpr {
+ u8 num;
+ u8 num_storage_elements;
+ u64 start_lba;
+ u64 num_lbas;
+};
+
+struct ata_cpr_log {
+ u8 nr_cpr;
+ struct ata_cpr cpr[];
+};
+
struct ata_device {
struct ata_link *link;
unsigned int devno; /* 0 or 1 */
@@ -711,9 +679,9 @@ struct ata_device {
unsigned int cdb_len;
/* per-dev xfer mask */
- unsigned long pio_mask;
- unsigned long mwdma_mask;
- unsigned long udma_mask;
+ unsigned int pio_mask;
+ unsigned int mwdma_mask;
+ unsigned int udma_mask;
/* for CHS addressing */
u16 cylinders; /* Number of cylinders */
@@ -738,6 +706,9 @@ struct ata_device {
u32 zac_zones_optimal_nonseq;
u32 zac_zones_max_open;
+ /* Concurrent positioning ranges */
+ struct ata_cpr_log *cpr_log;
+
/* error history */
int spdn_cnt;
/* ering is CLEAR_END, read comment above CLEAR_END */
@@ -847,11 +818,9 @@ struct ata_port {
unsigned int udma_mask;
unsigned int cbl; /* cable type; ATA_CBL_xxx */
- struct ata_queued_cmd qcmd[ATA_MAX_QUEUE];
- unsigned long sas_tag_allocated; /* for sas tag allocation only */
- unsigned int qc_active;
+ struct ata_queued_cmd qcmd[ATA_MAX_QUEUE + 1];
+ u64 qc_active;
int nr_active_links; /* #links with active qcs */
- unsigned int sas_last_tag; /* track next tag hw expects */
struct ata_link link; /* host default link */
struct ata_link *slave_link; /* see ata_slave_link_init() */
@@ -871,7 +840,6 @@ struct ata_port {
unsigned int hsm_task_state;
- u32 msg_enable;
struct list_head eh_done_q;
wait_queue_head_t eh_wait_q;
int eh_tries;
@@ -881,7 +849,9 @@ struct ata_port {
enum ata_lpm_policy target_lpm_policy;
struct timer_list fastdrain_timer;
- unsigned long fastdrain_cnt;
+ unsigned int fastdrain_cnt;
+
+ async_cookie_t cookie;
int em_message_type;
void *private_data;
@@ -904,21 +874,23 @@ struct ata_port_operations {
/*
* Command execution
*/
- int (*qc_defer)(struct ata_queued_cmd *qc);
- int (*check_atapi_dma)(struct ata_queued_cmd *qc);
- void (*qc_prep)(struct ata_queued_cmd *qc);
+ int (*qc_defer)(struct ata_queued_cmd *qc);
+ int (*check_atapi_dma)(struct ata_queued_cmd *qc);
+ enum ata_completion_errors (*qc_prep)(struct ata_queued_cmd *qc);
unsigned int (*qc_issue)(struct ata_queued_cmd *qc);
- bool (*qc_fill_rtf)(struct ata_queued_cmd *qc);
+ void (*qc_fill_rtf)(struct ata_queued_cmd *qc);
+ void (*qc_ncq_fill_rtf)(struct ata_port *ap, u64 done_mask);
/*
* Configuration and exception handling
*/
int (*cable_detect)(struct ata_port *ap);
- unsigned long (*mode_filter)(struct ata_device *dev, unsigned long xfer_mask);
+ unsigned int (*mode_filter)(struct ata_device *dev, unsigned int xfer_mask);
void (*set_piomode)(struct ata_port *ap, struct ata_device *dev);
void (*set_dmamode)(struct ata_port *ap, struct ata_device *dev);
int (*set_mode)(struct ata_link *link, struct ata_device **r_failed_dev);
- unsigned int (*read_id)(struct ata_device *dev, struct ata_taskfile *tf, u16 *id);
+ unsigned int (*read_id)(struct ata_device *dev, struct ata_taskfile *tf,
+ __le16 *id);
void (*dev_config)(struct ata_device *dev);
@@ -1009,9 +981,9 @@ struct ata_port_operations {
struct ata_port_info {
unsigned long flags;
unsigned long link_flags;
- unsigned long pio_mask;
- unsigned long mwdma_mask;
- unsigned long udma_mask;
+ unsigned int pio_mask;
+ unsigned int mwdma_mask;
+ unsigned int udma_mask;
struct ata_port_operations *port_ops;
void *private_data;
};
@@ -1032,10 +1004,6 @@ struct ata_timing {
/*
* Core layer - drivers/ata/libata-core.c
*/
-extern const unsigned long sata_deb_timing_normal[];
-extern const unsigned long sata_deb_timing_hotplug[];
-extern const unsigned long sata_deb_timing_long[];
-
extern struct ata_port_operations ata_dummy_port_ops;
extern const struct ata_port_info ata_dummy_port_info;
@@ -1073,33 +1041,19 @@ static inline int is_multi_taskfile(struct ata_taskfile *tf)
(tf->command == ATA_CMD_WRITE_MULTI_FUA_EXT);
}
-static inline const unsigned long *
-sata_ehc_deb_timing(struct ata_eh_context *ehc)
+static inline int ata_port_is_dummy(struct ata_port *ap)
{
- if (ehc->i.flags & ATA_EHI_HOTPLUGGED)
- return sata_deb_timing_hotplug;
- else
- return sata_deb_timing_normal;
+ return ap->ops == &ata_dummy_port_ops;
}
-static inline int ata_port_is_dummy(struct ata_port *ap)
+static inline bool ata_port_is_frozen(const struct ata_port *ap)
{
- return ap->ops == &ata_dummy_port_ops;
+ return ap->pflags & ATA_PFLAG_FROZEN;
}
-extern int sata_set_spd(struct ata_link *link);
extern int ata_std_prereset(struct ata_link *link, unsigned long deadline);
extern int ata_wait_after_reset(struct ata_link *link, unsigned long deadline,
int (*check_ready)(struct ata_link *link));
-extern int sata_link_debounce(struct ata_link *link,
- const unsigned long *params, unsigned long deadline);
-extern int sata_link_resume(struct ata_link *link, const unsigned long *params,
- unsigned long deadline);
-extern int sata_link_scr_lpm(struct ata_link *link, enum ata_lpm_policy policy,
- bool spm_wakeup);
-extern int sata_link_hardreset(struct ata_link *link,
- const unsigned long *timing, unsigned long deadline,
- bool *online, int (*check_ready)(struct ata_link *));
extern int sata_std_hardreset(struct ata_link *link, unsigned int *class,
unsigned long deadline);
extern void ata_std_postreset(struct ata_link *link, unsigned int *classes);
@@ -1107,39 +1061,36 @@ extern void ata_std_postreset(struct ata_link *link, unsigned int *classes);
extern struct ata_host *ata_host_alloc(struct device *dev, int max_ports);
extern struct ata_host *ata_host_alloc_pinfo(struct device *dev,
const struct ata_port_info * const * ppi, int n_ports);
-extern int ata_slave_link_init(struct ata_port *ap);
+extern void ata_host_get(struct ata_host *host);
+extern void ata_host_put(struct ata_host *host);
extern int ata_host_start(struct ata_host *host);
extern int ata_host_register(struct ata_host *host,
- struct scsi_host_template *sht);
+ const struct scsi_host_template *sht);
extern int ata_host_activate(struct ata_host *host, int irq,
irq_handler_t irq_handler, unsigned long irq_flags,
- struct scsi_host_template *sht);
+ const struct scsi_host_template *sht);
extern void ata_host_detach(struct ata_host *host);
extern void ata_host_init(struct ata_host *, struct device *, struct ata_port_operations *);
extern int ata_scsi_detect(struct scsi_host_template *sht);
-extern int ata_scsi_ioctl(struct scsi_device *dev, int cmd, void __user *arg);
+extern int ata_scsi_ioctl(struct scsi_device *dev, unsigned int cmd,
+ void __user *arg);
+#ifdef CONFIG_COMPAT
+#define ATA_SCSI_COMPAT_IOCTL .compat_ioctl = ata_scsi_ioctl,
+#else
+#define ATA_SCSI_COMPAT_IOCTL /* empty */
+#endif
extern int ata_scsi_queuecmd(struct Scsi_Host *h, struct scsi_cmnd *cmd);
+#if IS_REACHABLE(CONFIG_ATA)
+bool ata_scsi_dma_need_drain(struct request *rq);
+#else
+#define ata_scsi_dma_need_drain NULL
+#endif
extern int ata_sas_scsi_ioctl(struct ata_port *ap, struct scsi_device *dev,
- int cmd, void __user *arg);
-extern void ata_sas_port_destroy(struct ata_port *);
-extern struct ata_port *ata_sas_port_alloc(struct ata_host *,
- struct ata_port_info *, struct Scsi_Host *);
-extern void ata_sas_async_probe(struct ata_port *ap);
-extern int ata_sas_sync_probe(struct ata_port *ap);
-extern int ata_sas_port_init(struct ata_port *);
-extern int ata_sas_port_start(struct ata_port *ap);
-extern void ata_sas_port_stop(struct ata_port *ap);
-extern int ata_sas_slave_configure(struct scsi_device *, struct ata_port *);
-extern int ata_sas_queuecmd(struct scsi_cmnd *cmd, struct ata_port *ap);
-extern enum blk_eh_timer_return ata_scsi_timed_out(struct scsi_cmnd *cmd);
-extern int sata_scr_valid(struct ata_link *link);
-extern int sata_scr_read(struct ata_link *link, int reg, u32 *val);
-extern int sata_scr_write(struct ata_link *link, int reg, u32 val);
-extern int sata_scr_write_flush(struct ata_link *link, int reg, u32 val);
+ unsigned int cmd, void __user *arg);
extern bool ata_link_online(struct ata_link *link);
extern bool ata_link_offline(struct ata_link *link);
#ifdef CONFIG_PM
-extern int ata_host_suspend(struct ata_host *host, pm_message_t mesg);
+extern void ata_host_suspend(struct ata_host *host, pm_message_t mesg);
extern void ata_host_resume(struct ata_host *host);
extern void ata_sas_port_suspend(struct ata_port *ap);
extern void ata_sas_port_resume(struct ata_port *ap);
@@ -1156,33 +1107,34 @@ extern void ata_msleep(struct ata_port *ap, unsigned int msecs);
extern u32 ata_wait_register(struct ata_port *ap, void __iomem *reg, u32 mask,
u32 val, unsigned long interval, unsigned long timeout);
extern int atapi_cmd_type(u8 opcode);
-extern void ata_tf_to_fis(const struct ata_taskfile *tf,
- u8 pmp, int is_cmd, u8 *fis);
-extern void ata_tf_from_fis(const u8 *fis, struct ata_taskfile *tf);
-extern unsigned long ata_pack_xfermask(unsigned long pio_mask,
- unsigned long mwdma_mask, unsigned long udma_mask);
-extern void ata_unpack_xfermask(unsigned long xfer_mask,
- unsigned long *pio_mask, unsigned long *mwdma_mask,
- unsigned long *udma_mask);
-extern u8 ata_xfer_mask2mode(unsigned long xfer_mask);
-extern unsigned long ata_xfer_mode2mask(u8 xfer_mode);
-extern int ata_xfer_mode2shift(unsigned long xfer_mode);
-extern const char *ata_mode_string(unsigned long xfer_mask);
-extern unsigned long ata_id_xfermask(const u16 *id);
+extern unsigned int ata_pack_xfermask(unsigned int pio_mask,
+ unsigned int mwdma_mask,
+ unsigned int udma_mask);
+extern void ata_unpack_xfermask(unsigned int xfer_mask,
+ unsigned int *pio_mask,
+ unsigned int *mwdma_mask,
+ unsigned int *udma_mask);
+extern u8 ata_xfer_mask2mode(unsigned int xfer_mask);
+extern unsigned int ata_xfer_mode2mask(u8 xfer_mode);
+extern int ata_xfer_mode2shift(u8 xfer_mode);
+extern const char *ata_mode_string(unsigned int xfer_mask);
+extern unsigned int ata_id_xfermask(const u16 *id);
extern int ata_std_qc_defer(struct ata_queued_cmd *qc);
-extern void ata_noop_qc_prep(struct ata_queued_cmd *qc);
+extern enum ata_completion_errors ata_noop_qc_prep(struct ata_queued_cmd *qc);
extern void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
unsigned int n_elem);
extern unsigned int ata_dev_classify(const struct ata_taskfile *tf);
+extern unsigned int ata_port_classify(struct ata_port *ap,
+ const struct ata_taskfile *tf);
extern void ata_dev_disable(struct ata_device *adev);
extern void ata_id_string(const u16 *id, unsigned char *s,
unsigned int ofs, unsigned int len);
extern void ata_id_c_string(const u16 *id, unsigned char *s,
unsigned int ofs, unsigned int len);
extern unsigned int ata_do_dev_read_id(struct ata_device *dev,
- struct ata_taskfile *tf, u16 *id);
+ struct ata_taskfile *tf, __le16 *id);
extern void ata_qc_complete(struct ata_queued_cmd *qc);
-extern int ata_qc_complete_multiple(struct ata_port *ap, u32 qc_active);
+extern u64 ata_qc_get_active(struct ata_port *ap);
extern void ata_scsi_simulate(struct ata_device *dev, struct scsi_cmnd *cmd);
extern int ata_std_bios_param(struct scsi_device *sdev,
struct block_device *bdev,
@@ -1192,13 +1144,102 @@ extern int ata_scsi_slave_config(struct scsi_device *sdev);
extern void ata_scsi_slave_destroy(struct scsi_device *sdev);
extern int ata_scsi_change_queue_depth(struct scsi_device *sdev,
int queue_depth);
-extern int __ata_change_queue_depth(struct ata_port *ap, struct scsi_device *sdev,
- int queue_depth);
+extern int ata_change_queue_depth(struct ata_port *ap, struct ata_device *dev,
+ struct scsi_device *sdev, int queue_depth);
extern struct ata_device *ata_dev_pair(struct ata_device *adev);
extern int ata_do_set_mode(struct ata_link *link, struct ata_device **r_failed_dev);
extern void ata_scsi_port_error_handler(struct Scsi_Host *host, struct ata_port *ap);
extern void ata_scsi_cmd_error_handler(struct Scsi_Host *host, struct ata_port *ap, struct list_head *eh_q);
+
+/*
+ * SATA specific code - drivers/ata/libata-sata.c
+ */
+#ifdef CONFIG_SATA_HOST
+extern const unsigned long sata_deb_timing_normal[];
+extern const unsigned long sata_deb_timing_hotplug[];
+extern const unsigned long sata_deb_timing_long[];
+
+static inline const unsigned long *
+sata_ehc_deb_timing(struct ata_eh_context *ehc)
+{
+ if (ehc->i.flags & ATA_EHI_HOTPLUGGED)
+ return sata_deb_timing_hotplug;
+ else
+ return sata_deb_timing_normal;
+}
+
+extern int sata_scr_valid(struct ata_link *link);
+extern int sata_scr_read(struct ata_link *link, int reg, u32 *val);
+extern int sata_scr_write(struct ata_link *link, int reg, u32 val);
+extern int sata_scr_write_flush(struct ata_link *link, int reg, u32 val);
+extern int sata_set_spd(struct ata_link *link);
+extern int sata_link_hardreset(struct ata_link *link,
+ const unsigned long *timing, unsigned long deadline,
+ bool *online, int (*check_ready)(struct ata_link *));
+extern int sata_link_resume(struct ata_link *link, const unsigned long *params,
+ unsigned long deadline);
+extern void ata_eh_analyze_ncq_error(struct ata_link *link);
+#else
+static inline const unsigned long *
+sata_ehc_deb_timing(struct ata_eh_context *ehc)
+{
+ return NULL;
+}
+static inline int sata_scr_valid(struct ata_link *link) { return 0; }
+static inline int sata_scr_read(struct ata_link *link, int reg, u32 *val)
+{
+ return -EOPNOTSUPP;
+}
+static inline int sata_scr_write(struct ata_link *link, int reg, u32 val)
+{
+ return -EOPNOTSUPP;
+}
+static inline int sata_scr_write_flush(struct ata_link *link, int reg, u32 val)
+{
+ return -EOPNOTSUPP;
+}
+static inline int sata_set_spd(struct ata_link *link) { return -EOPNOTSUPP; }
+static inline int sata_link_hardreset(struct ata_link *link,
+ const unsigned long *timing,
+ unsigned long deadline,
+ bool *online,
+ int (*check_ready)(struct ata_link *))
+{
+ if (online)
+ *online = false;
+ return -EOPNOTSUPP;
+}
+static inline int sata_link_resume(struct ata_link *link,
+ const unsigned long *params,
+ unsigned long deadline)
+{
+ return -EOPNOTSUPP;
+}
+static inline void ata_eh_analyze_ncq_error(struct ata_link *link) { }
+#endif
+extern int sata_link_debounce(struct ata_link *link,
+ const unsigned long *params, unsigned long deadline);
+extern int sata_link_scr_lpm(struct ata_link *link, enum ata_lpm_policy policy,
+ bool spm_wakeup);
+extern int ata_slave_link_init(struct ata_port *ap);
+extern void ata_sas_port_destroy(struct ata_port *);
+extern struct ata_port *ata_sas_port_alloc(struct ata_host *,
+ struct ata_port_info *, struct Scsi_Host *);
+extern void ata_sas_async_probe(struct ata_port *ap);
+extern int ata_sas_sync_probe(struct ata_port *ap);
+extern int ata_sas_port_init(struct ata_port *);
+extern int ata_sas_port_start(struct ata_port *ap);
+extern int ata_sas_tport_add(struct device *parent, struct ata_port *ap);
+extern void ata_sas_tport_delete(struct ata_port *ap);
+extern void ata_sas_port_stop(struct ata_port *ap);
+extern int ata_sas_slave_configure(struct scsi_device *, struct ata_port *);
+extern int ata_sas_queuecmd(struct scsi_cmnd *cmd, struct ata_port *ap);
+extern void ata_tf_to_fis(const struct ata_taskfile *tf,
+ u8 pmp, int is_cmd, u8 *fis);
+extern void ata_tf_from_fis(const u8 *fis, struct ata_taskfile *tf);
+extern int ata_qc_complete_multiple(struct ata_port *ap, u64 qc_active);
extern bool sata_lpm_ignore_phy_events(struct ata_link *link);
+extern int sata_async_notification(struct ata_port *ap);
extern int ata_cable_40wire(struct ata_port *ap);
extern int ata_cable_80wire(struct ata_port *ap);
@@ -1208,12 +1249,6 @@ extern int ata_cable_unknown(struct ata_port *ap);
/* Timing helpers */
extern unsigned int ata_pio_need_iordy(const struct ata_device *);
-extern const struct ata_timing *ata_timing_find_mode(u8 xfer_mode);
-extern int ata_timing_compute(struct ata_device *, unsigned short,
- struct ata_timing *, int, int);
-extern void ata_timing_merge(const struct ata_timing *,
- const struct ata_timing *, struct ata_timing *,
- unsigned int);
extern u8 ata_timing_cycle2mode(unsigned int xfer_shift, int cycle);
/* PCI */
@@ -1228,6 +1263,7 @@ struct pci_bits {
};
extern int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits);
+extern void ata_pci_shutdown_one(struct pci_dev *pdev);
extern void ata_pci_remove_one(struct pci_dev *pdev);
#ifdef CONFIG_PM
@@ -1254,8 +1290,8 @@ static inline const struct ata_acpi_gtm *ata_acpi_init_gtm(struct ata_port *ap)
}
int ata_acpi_stm(struct ata_port *ap, const struct ata_acpi_gtm *stm);
int ata_acpi_gtm(struct ata_port *ap, struct ata_acpi_gtm *stm);
-unsigned long ata_acpi_gtm_xfermask(struct ata_device *dev,
- const struct ata_acpi_gtm *gtm);
+unsigned int ata_acpi_gtm_xfermask(struct ata_device *dev,
+ const struct ata_acpi_gtm *gtm);
int ata_acpi_cbl_80wire(struct ata_port *ap, const struct ata_acpi_gtm *gtm);
#else
static inline const struct ata_acpi_gtm *ata_acpi_init_gtm(struct ata_port *ap)
@@ -1296,14 +1332,12 @@ extern void ata_port_wait_eh(struct ata_port *ap);
extern int ata_link_abort(struct ata_link *link);
extern int ata_port_abort(struct ata_port *ap);
extern int ata_port_freeze(struct ata_port *ap);
-extern int sata_async_notification(struct ata_port *ap);
extern void ata_eh_freeze_port(struct ata_port *ap);
extern void ata_eh_thaw_port(struct ata_port *ap);
extern void ata_eh_qc_complete(struct ata_queued_cmd *qc);
extern void ata_eh_qc_retry(struct ata_queued_cmd *qc);
-extern void ata_eh_analyze_ncq_error(struct ata_link *link);
extern void ata_do_eh(struct ata_port *ap, ata_prereset_fn_t prereset,
ata_reset_fn_t softreset, ata_reset_fn_t hardreset,
@@ -1336,7 +1370,7 @@ extern int ata_link_nr_enabled(struct ata_link *link);
*/
extern const struct ata_port_operations ata_base_port_ops;
extern const struct ata_port_operations sata_port_ops;
-extern struct device_attribute *ata_common_sdev_attrs[];
+extern const struct attribute_group *ata_common_sdev_groups[];
/*
* All sht initializers (BASE, PIO, BMDMA, NCQ) must be instantiated
@@ -1344,27 +1378,50 @@ extern struct device_attribute *ata_common_sdev_attrs[];
* edge driver's module reference, otherwise the driver can be unloaded
* even if the scsi_device is being accessed.
*/
-#define ATA_BASE_SHT(drv_name) \
+#define __ATA_BASE_SHT(drv_name) \
.module = THIS_MODULE, \
.name = drv_name, \
.ioctl = ata_scsi_ioctl, \
+ ATA_SCSI_COMPAT_IOCTL \
.queuecommand = ata_scsi_queuecmd, \
- .can_queue = ATA_DEF_QUEUE, \
- .tag_alloc_policy = BLK_TAG_ALLOC_RR, \
+ .dma_need_drain = ata_scsi_dma_need_drain, \
.this_id = ATA_SHT_THIS_ID, \
.emulated = ATA_SHT_EMULATED, \
- .use_clustering = ATA_SHT_USE_CLUSTERING, \
.proc_name = drv_name, \
- .slave_configure = ata_scsi_slave_config, \
.slave_destroy = ata_scsi_slave_destroy, \
- .eh_timed_out = ata_scsi_timed_out, \
.bios_param = ata_std_bios_param, \
- .unlock_native_capacity = ata_scsi_unlock_native_capacity, \
- .sdev_attrs = ata_common_sdev_attrs
+ .unlock_native_capacity = ata_scsi_unlock_native_capacity,\
+ .max_sectors = ATA_MAX_SECTORS_LBA48
+
+#define ATA_SUBBASE_SHT(drv_name) \
+ __ATA_BASE_SHT(drv_name), \
+ .can_queue = ATA_DEF_QUEUE, \
+ .tag_alloc_policy = BLK_TAG_ALLOC_RR, \
+ .slave_configure = ata_scsi_slave_config
+
+#define ATA_SUBBASE_SHT_QD(drv_name, drv_qd) \
+ __ATA_BASE_SHT(drv_name), \
+ .can_queue = drv_qd, \
+ .tag_alloc_policy = BLK_TAG_ALLOC_RR, \
+ .slave_configure = ata_scsi_slave_config
+
+#define ATA_BASE_SHT(drv_name) \
+ ATA_SUBBASE_SHT(drv_name), \
+ .sdev_groups = ata_common_sdev_groups
+
+#ifdef CONFIG_SATA_HOST
+extern const struct attribute_group *ata_ncq_sdev_groups[];
#define ATA_NCQ_SHT(drv_name) \
- ATA_BASE_SHT(drv_name), \
+ ATA_SUBBASE_SHT(drv_name), \
+ .sdev_groups = ata_ncq_sdev_groups, \
+ .change_queue_depth = ata_scsi_change_queue_depth
+
+#define ATA_NCQ_SHT_QD(drv_name, drv_qd) \
+ ATA_SUBBASE_SHT_QD(drv_name, drv_qd), \
+ .sdev_groups = ata_ncq_sdev_groups, \
.change_queue_depth = ata_scsi_change_queue_depth
+#endif
/*
* PMP helpers
@@ -1397,7 +1454,7 @@ static inline bool sata_pmp_attached(struct ata_port *ap)
static inline bool ata_is_host_link(const struct ata_link *link)
{
- return 1;
+ return true;
}
#endif /* CONFIG_SATA_PMP */
@@ -1408,51 +1465,61 @@ static inline int sata_srst_pmp(struct ata_link *link)
return link->pmp;
}
-/*
- * printk helpers
- */
-__printf(3, 4)
-void ata_port_printk(const struct ata_port *ap, const char *level,
- const char *fmt, ...);
-__printf(3, 4)
-void ata_link_printk(const struct ata_link *link, const char *level,
- const char *fmt, ...);
-__printf(3, 4)
-void ata_dev_printk(const struct ata_device *dev, const char *level,
- const char *fmt, ...);
+#define ata_port_printk(level, ap, fmt, ...) \
+ pr_ ## level ("ata%u: " fmt, (ap)->print_id, ##__VA_ARGS__)
#define ata_port_err(ap, fmt, ...) \
- ata_port_printk(ap, KERN_ERR, fmt, ##__VA_ARGS__)
+ ata_port_printk(err, ap, fmt, ##__VA_ARGS__)
#define ata_port_warn(ap, fmt, ...) \
- ata_port_printk(ap, KERN_WARNING, fmt, ##__VA_ARGS__)
+ ata_port_printk(warn, ap, fmt, ##__VA_ARGS__)
#define ata_port_notice(ap, fmt, ...) \
- ata_port_printk(ap, KERN_NOTICE, fmt, ##__VA_ARGS__)
+ ata_port_printk(notice, ap, fmt, ##__VA_ARGS__)
#define ata_port_info(ap, fmt, ...) \
- ata_port_printk(ap, KERN_INFO, fmt, ##__VA_ARGS__)
+ ata_port_printk(info, ap, fmt, ##__VA_ARGS__)
#define ata_port_dbg(ap, fmt, ...) \
- ata_port_printk(ap, KERN_DEBUG, fmt, ##__VA_ARGS__)
+ ata_port_printk(debug, ap, fmt, ##__VA_ARGS__)
+
+#define ata_link_printk(level, link, fmt, ...) \
+do { \
+ if (sata_pmp_attached((link)->ap) || \
+ (link)->ap->slave_link) \
+ pr_ ## level ("ata%u.%02u: " fmt, \
+ (link)->ap->print_id, \
+ (link)->pmp, \
+ ##__VA_ARGS__); \
+ else \
+ pr_ ## level ("ata%u: " fmt, \
+ (link)->ap->print_id, \
+ ##__VA_ARGS__); \
+} while (0)
#define ata_link_err(link, fmt, ...) \
- ata_link_printk(link, KERN_ERR, fmt, ##__VA_ARGS__)
+ ata_link_printk(err, link, fmt, ##__VA_ARGS__)
#define ata_link_warn(link, fmt, ...) \
- ata_link_printk(link, KERN_WARNING, fmt, ##__VA_ARGS__)
+ ata_link_printk(warn, link, fmt, ##__VA_ARGS__)
#define ata_link_notice(link, fmt, ...) \
- ata_link_printk(link, KERN_NOTICE, fmt, ##__VA_ARGS__)
+ ata_link_printk(notice, link, fmt, ##__VA_ARGS__)
#define ata_link_info(link, fmt, ...) \
- ata_link_printk(link, KERN_INFO, fmt, ##__VA_ARGS__)
+ ata_link_printk(info, link, fmt, ##__VA_ARGS__)
#define ata_link_dbg(link, fmt, ...) \
- ata_link_printk(link, KERN_DEBUG, fmt, ##__VA_ARGS__)
+ ata_link_printk(debug, link, fmt, ##__VA_ARGS__)
+
+#define ata_dev_printk(level, dev, fmt, ...) \
+ pr_ ## level("ata%u.%02u: " fmt, \
+ (dev)->link->ap->print_id, \
+ (dev)->link->pmp + (dev)->devno, \
+ ##__VA_ARGS__)
#define ata_dev_err(dev, fmt, ...) \
- ata_dev_printk(dev, KERN_ERR, fmt, ##__VA_ARGS__)
+ ata_dev_printk(err, dev, fmt, ##__VA_ARGS__)
#define ata_dev_warn(dev, fmt, ...) \
- ata_dev_printk(dev, KERN_WARNING, fmt, ##__VA_ARGS__)
+ ata_dev_printk(warn, dev, fmt, ##__VA_ARGS__)
#define ata_dev_notice(dev, fmt, ...) \
- ata_dev_printk(dev, KERN_NOTICE, fmt, ##__VA_ARGS__)
+ ata_dev_printk(notice, dev, fmt, ##__VA_ARGS__)
#define ata_dev_info(dev, fmt, ...) \
- ata_dev_printk(dev, KERN_INFO, fmt, ##__VA_ARGS__)
+ ata_dev_printk(info, dev, fmt, ##__VA_ARGS__)
#define ata_dev_dbg(dev, fmt, ...) \
- ata_dev_printk(dev, KERN_DEBUG, fmt, ##__VA_ARGS__)
+ ata_dev_printk(debug, dev, fmt, ##__VA_ARGS__)
void ata_print_version(const struct device *dev, const char *version);
@@ -1483,16 +1550,39 @@ extern void ata_port_pbar_desc(struct ata_port *ap, int bar, ssize_t offset,
const char *name);
#endif
-static inline unsigned int ata_tag_valid(unsigned int tag)
+static inline bool ata_tag_internal(unsigned int tag)
{
- return (tag < ATA_MAX_QUEUE) ? 1 : 0;
+ return tag == ATA_TAG_INTERNAL;
}
-static inline unsigned int ata_tag_internal(unsigned int tag)
+static inline bool ata_tag_valid(unsigned int tag)
{
- return tag == ATA_TAG_INTERNAL;
+ return tag < ATA_MAX_QUEUE || ata_tag_internal(tag);
}
+#define __ata_qc_for_each(ap, qc, tag, max_tag, fn) \
+ for ((tag) = 0; (tag) < (max_tag) && \
+ ({ qc = fn((ap), (tag)); 1; }); (tag)++) \
+
+/*
+ * Internal use only, iterate commands ignoring error handling and
+ * status of 'qc'.
+ */
+#define ata_qc_for_each_raw(ap, qc, tag) \
+ __ata_qc_for_each(ap, qc, tag, ATA_MAX_QUEUE, __ata_qc_from_tag)
+
+/*
+ * Iterate all potential commands that can be queued
+ */
+#define ata_qc_for_each(ap, qc, tag) \
+ __ata_qc_for_each(ap, qc, tag, ATA_MAX_QUEUE, ata_qc_from_tag)
+
+/*
+ * Like ata_qc_for_each, but with the internal tag included
+ */
+#define ata_qc_for_each_with_internal(ap, qc, tag) \
+ __ata_qc_for_each(ap, qc, tag, ATA_MAX_QUEUE + 1, ata_qc_from_tag)
+
/*
* device helpers
*/
@@ -1603,19 +1693,35 @@ extern struct ata_device *ata_dev_next(struct ata_device *dev,
(dev) = ata_dev_next((dev), (link), ATA_DITER_##mode))
/**
+ * ata_ncq_supported - Test whether NCQ is supported
+ * @dev: ATA device to test
+ *
+ * LOCKING:
+ * spin_lock_irqsave(host lock)
+ *
+ * RETURNS:
+ * true if @dev supports NCQ, false otherwise.
+ */
+static inline bool ata_ncq_supported(struct ata_device *dev)
+{
+ if (!IS_ENABLED(CONFIG_SATA_HOST))
+ return false;
+ return (dev->flags & (ATA_DFLAG_PIO | ATA_DFLAG_NCQ)) == ATA_DFLAG_NCQ;
+}
+
+/**
* ata_ncq_enabled - Test whether NCQ is enabled
- * @dev: ATA device to test for
+ * @dev: ATA device to test
*
* LOCKING:
* spin_lock_irqsave(host lock)
*
* RETURNS:
- * 1 if NCQ is enabled for @dev, 0 otherwise.
+ * true if NCQ is enabled for @dev, false otherwise.
*/
-static inline int ata_ncq_enabled(struct ata_device *dev)
+static inline bool ata_ncq_enabled(struct ata_device *dev)
{
- return (dev->flags & (ATA_DFLAG_PIO | ATA_DFLAG_NCQ_OFF |
- ATA_DFLAG_NCQ)) == ATA_DFLAG_NCQ;
+ return ata_ncq_supported(dev) && !(dev->flags & ATA_DFLAG_NCQ_OFF);
}
static inline bool ata_fpdma_dsm_supported(struct ata_device *dev)
@@ -1653,7 +1759,7 @@ static inline void ata_qc_set_polling(struct ata_queued_cmd *qc)
static inline struct ata_queued_cmd *__ata_qc_from_tag(struct ata_port *ap,
unsigned int tag)
{
- if (likely(ata_tag_valid(tag)))
+ if (ata_tag_valid(tag))
return &ap->qcmd[tag];
return NULL;
}
@@ -1667,7 +1773,7 @@ static inline struct ata_queued_cmd *ata_qc_from_tag(struct ata_port *ap,
return qc;
if ((qc->flags & (ATA_QCFLAG_ACTIVE |
- ATA_QCFLAG_FAILED)) == ATA_QCFLAG_ACTIVE)
+ ATA_QCFLAG_EH)) == ATA_QCFLAG_ACTIVE)
return qc;
return NULL;
@@ -1783,6 +1889,16 @@ static inline int ata_dma_enabled(struct ata_device *adev)
}
/**************************************************************************
+ * PATA timings - drivers/ata/libata-pata-timings.c
+ */
+extern const struct ata_timing *ata_timing_find_mode(u8 xfer_mode);
+extern int ata_timing_compute(struct ata_device *, unsigned short,
+ struct ata_timing *, int, int);
+extern void ata_timing_merge(const struct ata_timing *,
+ const struct ata_timing *, struct ata_timing *,
+ unsigned int);
+
+/**************************************************************************
* PMP - drivers/ata/libata-pmp.c
*/
#ifdef CONFIG_SATA_PMP
@@ -1819,8 +1935,6 @@ extern void ata_sff_dev_select(struct ata_port *ap, unsigned int device);
extern u8 ata_sff_check_status(struct ata_port *ap);
extern void ata_sff_pause(struct ata_port *ap);
extern void ata_sff_dma_pause(struct ata_port *ap);
-extern int ata_sff_busy_sleep(struct ata_port *ap,
- unsigned long timeout_pat, unsigned long timeout);
extern int ata_sff_wait_ready(struct ata_link *link, unsigned long deadline);
extern void ata_sff_tf_load(struct ata_port *ap, const struct ata_taskfile *tf);
extern void ata_sff_tf_read(struct ata_port *ap, struct ata_taskfile *tf);
@@ -1830,8 +1944,6 @@ extern unsigned int ata_sff_data_xfer(struct ata_queued_cmd *qc,
unsigned char *buf, unsigned int buflen, int rw);
extern unsigned int ata_sff_data_xfer32(struct ata_queued_cmd *qc,
unsigned char *buf, unsigned int buflen, int rw);
-extern unsigned int ata_sff_data_xfer_noirq(struct ata_queued_cmd *qc,
- unsigned char *buf, unsigned int buflen, int rw);
extern void ata_sff_irq_on(struct ata_port *ap);
extern void ata_sff_irq_clear(struct ata_port *ap);
extern int ata_sff_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc,
@@ -1841,7 +1953,7 @@ extern void ata_sff_queue_delayed_work(struct delayed_work *dwork,
unsigned long delay);
extern void ata_sff_queue_pio_task(struct ata_link *link, unsigned long delay);
extern unsigned int ata_sff_qc_issue(struct ata_queued_cmd *qc);
-extern bool ata_sff_qc_fill_rtf(struct ata_queued_cmd *qc);
+extern void ata_sff_qc_fill_rtf(struct ata_queued_cmd *qc);
extern unsigned int ata_sff_port_intr(struct ata_port *ap,
struct ata_queued_cmd *qc);
extern irqreturn_t ata_sff_interrupt(int irq, void *dev_instance);
@@ -1868,10 +1980,10 @@ extern int ata_pci_sff_prepare_host(struct pci_dev *pdev,
struct ata_host **r_host);
extern int ata_pci_sff_activate_host(struct ata_host *host,
irq_handler_t irq_handler,
- struct scsi_host_template *sht);
+ const struct scsi_host_template *sht);
extern int ata_pci_sff_init_one(struct pci_dev *pdev,
const struct ata_port_info * const * ppi,
- struct scsi_host_template *sht, void *host_priv, int hflags);
+ const struct scsi_host_template *sht, void *host_priv, int hflags);
#endif /* CONFIG_PCI */
#ifdef CONFIG_ATA_BMDMA
@@ -1883,9 +1995,9 @@ extern const struct ata_port_operations ata_bmdma_port_ops;
.sg_tablesize = LIBATA_MAX_PRD, \
.dma_boundary = ATA_DMA_BOUNDARY
-extern void ata_bmdma_qc_prep(struct ata_queued_cmd *qc);
+extern enum ata_completion_errors ata_bmdma_qc_prep(struct ata_queued_cmd *qc);
extern unsigned int ata_bmdma_qc_issue(struct ata_queued_cmd *qc);
-extern void ata_bmdma_dumb_qc_prep(struct ata_queued_cmd *qc);
+extern enum ata_completion_errors ata_bmdma_dumb_qc_prep(struct ata_queued_cmd *qc);
extern unsigned int ata_bmdma_port_intr(struct ata_port *ap,
struct ata_queued_cmd *qc);
extern irqreturn_t ata_bmdma_interrupt(int irq, void *dev_instance);
@@ -1907,7 +2019,7 @@ extern int ata_pci_bmdma_prepare_host(struct pci_dev *pdev,
struct ata_host **r_host);
extern int ata_pci_bmdma_init_one(struct pci_dev *pdev,
const struct ata_port_info * const * ppi,
- struct scsi_host_template *sht,
+ const struct scsi_host_template *sht,
void *host_priv, int hflags);
#endif /* CONFIG_PCI */
#endif /* CONFIG_ATA_BMDMA */
@@ -1953,11 +2065,8 @@ static inline u8 ata_wait_idle(struct ata_port *ap)
{
u8 status = ata_sff_busy_wait(ap, ATA_BUSY | ATA_DRQ, 1000);
-#ifdef ATA_DEBUG
if (status != 0xff && (status & (ATA_BUSY | ATA_DRQ)))
- ata_port_printk(ap, KERN_DEBUG, "abnormal Status 0x%X\n",
- status);
-#endif
+ ata_port_dbg(ap, "abnormal Status 0x%X\n", status);
return status;
}
diff --git a/include/linux/libfdt.h b/include/linux/libfdt.h
index 4c0306c69b4e..90ed4ebfa692 100644
--- a/include/linux/libfdt.h
+++ b/include/linux/libfdt.h
@@ -1,8 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _INCLUDE_LIBFDT_H_
#define _INCLUDE_LIBFDT_H_
#include <linux/libfdt_env.h>
-#include "../../scripts/dtc/libfdt/fdt.h"
#include "../../scripts/dtc/libfdt/libfdt.h"
#endif /* _INCLUDE_LIBFDT_H_ */
diff --git a/include/linux/libfdt_env.h b/include/linux/libfdt_env.h
index 2a663c6bb428..cea8574a29b1 100644
--- a/include/linux/libfdt_env.h
+++ b/include/linux/libfdt_env.h
@@ -1,10 +1,15 @@
-#ifndef _LIBFDT_ENV_H
-#define _LIBFDT_ENV_H
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef LIBFDT_ENV_H
+#define LIBFDT_ENV_H
+#include <linux/limits.h> /* For INT_MAX */
#include <linux/string.h>
#include <asm/byteorder.h>
+#define INT32_MAX S32_MAX
+#define UINT32_MAX U32_MAX
+
typedef __be16 fdt16_t;
typedef __be32 fdt32_t;
typedef __be64 fdt64_t;
@@ -14,4 +19,4 @@ typedef __be64 fdt64_t;
#define fdt64_to_cpu(x) be64_to_cpu(x)
#define cpu_to_fdt64(x) cpu_to_be64(x)
-#endif /* _LIBFDT_ENV_H */
+#endif /* LIBFDT_ENV_H */
diff --git a/include/linux/libgcc.h b/include/linux/libgcc.h
new file mode 100644
index 000000000000..fc388da6a027
--- /dev/null
+++ b/include/linux/libgcc.h
@@ -0,0 +1,37 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * include/lib/libgcc.h
+ */
+
+#ifndef __LIB_LIBGCC_H
+#define __LIB_LIBGCC_H
+
+#include <asm/byteorder.h>
+
+typedef int word_type __attribute__ ((mode (__word__)));
+
+#ifdef __BIG_ENDIAN
+struct DWstruct {
+ int high, low;
+};
+#elif defined(__LITTLE_ENDIAN)
+struct DWstruct {
+ int low, high;
+};
+#else
+#error I feel sick.
+#endif
+
+typedef union {
+ struct DWstruct s;
+ long long ll;
+} DWunion;
+
+long long notrace __ashldi3(long long u, word_type b);
+long long notrace __ashrdi3(long long u, word_type b);
+word_type notrace __cmpdi2(long long a, long long b);
+long long notrace __lshrdi3(long long u, word_type b);
+long long notrace __muldi3(long long u, long long v);
+word_type notrace __ucmpdi2(unsigned long long a, unsigned long long b);
+
+#endif /* __ASM_LIBGCC_H */
diff --git a/include/linux/libnvdimm.h b/include/linux/libnvdimm.h
index f3d3e6af8838..e772aae71843 100644
--- a/include/linux/libnvdimm.h
+++ b/include/linux/libnvdimm.h
@@ -1,16 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* libnvdimm - Non-volatile-memory Devices Subsystem
*
* Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
*/
#ifndef __LIBNVDIMM_H__
#define __LIBNVDIMM_H__
@@ -18,14 +10,39 @@
#include <linux/sizes.h>
#include <linux/types.h>
#include <linux/uuid.h>
+#include <linux/spinlock.h>
+#include <linux/bio.h>
+
+struct badrange_entry {
+ u64 start;
+ u64 length;
+ struct list_head list;
+};
+
+struct badrange {
+ struct list_head list;
+ spinlock_t lock;
+};
enum {
- /* when a dimm supports both PMEM and BLK access a label is required */
- NDD_ALIASING = 0,
/* unarmed memory devices may not persist writes */
NDD_UNARMED = 1,
/* locked memory devices should not be accessed */
NDD_LOCKED = 2,
+ /* memory under security wipes should not be accessed */
+ NDD_SECURITY_OVERWRITE = 3,
+ /* tracking whether or not there is a pending device reference */
+ NDD_WORK_PENDING = 4,
+ /* dimm supports namespace labels */
+ NDD_LABELING = 6,
+ /*
+ * dimm contents have changed requiring invalidation of CPU caches prior
+ * to activation of a region that includes this device
+ */
+ NDD_INCOHERENT = 7,
+
+ /* dimm provider wants synchronous registration by __nvdimm_create() */
+ NDD_REGISTER_SYNC = 8,
/* need to set a limit somewhere, but yes, this is likely overkill */
ND_IOCTL_MAX_BUFLEN = SZ_4M,
@@ -35,34 +52,48 @@ enum {
/* region flag indicating to direct-map persistent memory by default */
ND_REGION_PAGEMAP = 0,
+ /*
+ * Platform ensures entire CPU store data path is flushed to pmem on
+ * system power loss.
+ */
+ ND_REGION_PERSIST_CACHE = 1,
+ /*
+ * Platform provides mechanisms to automatically flush outstanding
+ * write data from memory controler to pmem on system power loss.
+ * (ADR)
+ */
+ ND_REGION_PERSIST_MEMCTRL = 2,
+
+ /* Platform provides asynchronous flush mechanism */
+ ND_REGION_ASYNC = 3,
+
+ /* Region was created by CXL subsystem */
+ ND_REGION_CXL = 4,
/* mark newly adjusted resources as requiring a label update */
DPA_RESOURCE_ADJUSTED = 1 << 0,
};
-extern struct attribute_group nvdimm_bus_attribute_group;
-extern struct attribute_group nvdimm_attribute_group;
-extern struct attribute_group nd_device_attribute_group;
-extern struct attribute_group nd_numa_attribute_group;
-extern struct attribute_group nd_region_attribute_group;
-extern struct attribute_group nd_mapping_attribute_group;
-
struct nvdimm;
struct nvdimm_bus_descriptor;
typedef int (*ndctl_fn)(struct nvdimm_bus_descriptor *nd_desc,
struct nvdimm *nvdimm, unsigned int cmd, void *buf,
unsigned int buf_len, int *cmd_rc);
+struct device_node;
struct nvdimm_bus_descriptor {
const struct attribute_group **attr_groups;
- unsigned long bus_dsm_mask;
unsigned long cmd_mask;
+ unsigned long dimm_family_mask;
+ unsigned long bus_family_mask;
struct module *module;
char *provider_name;
+ struct device_node *of_node;
ndctl_fn ndctl;
int (*flush_probe)(struct nvdimm_bus_descriptor *nd_desc);
int (*clear_to_send)(struct nvdimm_bus_descriptor *nd_desc,
- struct nvdimm *nvdimm, unsigned int cmd);
+ struct nvdimm *nvdimm, unsigned int cmd, void *data);
+ const struct nvdimm_bus_fw_ops *fw_ops;
};
struct nd_cmd_desc {
@@ -87,8 +118,10 @@ struct nd_mapping_desc {
struct nvdimm *nvdimm;
u64 start;
u64 size;
+ int position;
};
+struct nd_region;
struct nd_region_desc {
struct resource *res;
struct nd_mapping_desc *mapping;
@@ -98,7 +131,11 @@ struct nd_region_desc {
void *provider_data;
int num_lanes;
int numa_node;
+ int target_node;
unsigned long flags;
+ int memregion;
+ struct device_node *of_node;
+ int (*flush)(struct nd_region *nd_region, struct bio *bio);
};
struct device;
@@ -111,43 +148,133 @@ static inline void __iomem *devm_nvdimm_ioremap(struct device *dev,
}
struct nvdimm_bus;
-struct module;
-struct device;
-struct nd_blk_region;
-struct nd_blk_region_desc {
- int (*enable)(struct nvdimm_bus *nvdimm_bus, struct device *dev);
- int (*do_io)(struct nd_blk_region *ndbr, resource_size_t dpa,
- void *iobuf, u64 len, int rw);
- struct nd_region_desc ndr_desc;
+
+/*
+ * Note that separate bits for locked + unlocked are defined so that
+ * 'flags == 0' corresponds to an error / not-supported state.
+ */
+enum nvdimm_security_bits {
+ NVDIMM_SECURITY_DISABLED,
+ NVDIMM_SECURITY_UNLOCKED,
+ NVDIMM_SECURITY_LOCKED,
+ NVDIMM_SECURITY_FROZEN,
+ NVDIMM_SECURITY_OVERWRITE,
};
-static inline struct nd_blk_region_desc *to_blk_region_desc(
- struct nd_region_desc *ndr_desc)
-{
- return container_of(ndr_desc, struct nd_blk_region_desc, ndr_desc);
+#define NVDIMM_PASSPHRASE_LEN 32
+#define NVDIMM_KEY_DESC_LEN 22
-}
+struct nvdimm_key_data {
+ u8 data[NVDIMM_PASSPHRASE_LEN];
+};
+
+enum nvdimm_passphrase_type {
+ NVDIMM_USER,
+ NVDIMM_MASTER,
+};
+
+struct nvdimm_security_ops {
+ unsigned long (*get_flags)(struct nvdimm *nvdimm,
+ enum nvdimm_passphrase_type pass_type);
+ int (*freeze)(struct nvdimm *nvdimm);
+ int (*change_key)(struct nvdimm *nvdimm,
+ const struct nvdimm_key_data *old_data,
+ const struct nvdimm_key_data *new_data,
+ enum nvdimm_passphrase_type pass_type);
+ int (*unlock)(struct nvdimm *nvdimm,
+ const struct nvdimm_key_data *key_data);
+ int (*disable)(struct nvdimm *nvdimm,
+ const struct nvdimm_key_data *key_data);
+ int (*erase)(struct nvdimm *nvdimm,
+ const struct nvdimm_key_data *key_data,
+ enum nvdimm_passphrase_type pass_type);
+ int (*overwrite)(struct nvdimm *nvdimm,
+ const struct nvdimm_key_data *key_data);
+ int (*query_overwrite)(struct nvdimm *nvdimm);
+ int (*disable_master)(struct nvdimm *nvdimm,
+ const struct nvdimm_key_data *key_data);
+};
+
+enum nvdimm_fwa_state {
+ NVDIMM_FWA_INVALID,
+ NVDIMM_FWA_IDLE,
+ NVDIMM_FWA_ARMED,
+ NVDIMM_FWA_BUSY,
+ NVDIMM_FWA_ARM_OVERFLOW,
+};
-int nvdimm_bus_add_poison(struct nvdimm_bus *nvdimm_bus, u64 addr, u64 length);
-void nvdimm_forget_poison(struct nvdimm_bus *nvdimm_bus,
- phys_addr_t start, unsigned int len);
+enum nvdimm_fwa_trigger {
+ NVDIMM_FWA_ARM,
+ NVDIMM_FWA_DISARM,
+};
+
+enum nvdimm_fwa_capability {
+ NVDIMM_FWA_CAP_INVALID,
+ NVDIMM_FWA_CAP_NONE,
+ NVDIMM_FWA_CAP_QUIESCE,
+ NVDIMM_FWA_CAP_LIVE,
+};
+
+enum nvdimm_fwa_result {
+ NVDIMM_FWA_RESULT_INVALID,
+ NVDIMM_FWA_RESULT_NONE,
+ NVDIMM_FWA_RESULT_SUCCESS,
+ NVDIMM_FWA_RESULT_NOTSTAGED,
+ NVDIMM_FWA_RESULT_NEEDRESET,
+ NVDIMM_FWA_RESULT_FAIL,
+};
+
+struct nvdimm_bus_fw_ops {
+ enum nvdimm_fwa_state (*activate_state)
+ (struct nvdimm_bus_descriptor *nd_desc);
+ enum nvdimm_fwa_capability (*capability)
+ (struct nvdimm_bus_descriptor *nd_desc);
+ int (*activate)(struct nvdimm_bus_descriptor *nd_desc);
+};
+
+struct nvdimm_fw_ops {
+ enum nvdimm_fwa_state (*activate_state)(struct nvdimm *nvdimm);
+ enum nvdimm_fwa_result (*activate_result)(struct nvdimm *nvdimm);
+ int (*arm)(struct nvdimm *nvdimm, enum nvdimm_fwa_trigger arg);
+};
+
+void badrange_init(struct badrange *badrange);
+int badrange_add(struct badrange *badrange, u64 addr, u64 length);
+void badrange_forget(struct badrange *badrange, phys_addr_t start,
+ unsigned int len);
+int nvdimm_bus_add_badrange(struct nvdimm_bus *nvdimm_bus, u64 addr,
+ u64 length);
struct nvdimm_bus *nvdimm_bus_register(struct device *parent,
struct nvdimm_bus_descriptor *nfit_desc);
void nvdimm_bus_unregister(struct nvdimm_bus *nvdimm_bus);
struct nvdimm_bus *to_nvdimm_bus(struct device *dev);
+struct nvdimm_bus *nvdimm_to_bus(struct nvdimm *nvdimm);
struct nvdimm *to_nvdimm(struct device *dev);
struct nd_region *to_nd_region(struct device *dev);
-struct nd_blk_region *to_nd_blk_region(struct device *dev);
+struct device *nd_region_dev(struct nd_region *nd_region);
struct nvdimm_bus_descriptor *to_nd_desc(struct nvdimm_bus *nvdimm_bus);
struct device *to_nvdimm_bus_dev(struct nvdimm_bus *nvdimm_bus);
const char *nvdimm_name(struct nvdimm *nvdimm);
struct kobject *nvdimm_kobj(struct nvdimm *nvdimm);
unsigned long nvdimm_cmd_mask(struct nvdimm *nvdimm);
void *nvdimm_provider_data(struct nvdimm *nvdimm);
-struct nvdimm *nvdimm_create(struct nvdimm_bus *nvdimm_bus, void *provider_data,
- const struct attribute_group **groups, unsigned long flags,
- unsigned long cmd_mask, int num_flush,
- struct resource *flush_wpq);
+struct nvdimm *__nvdimm_create(struct nvdimm_bus *nvdimm_bus,
+ void *provider_data, const struct attribute_group **groups,
+ unsigned long flags, unsigned long cmd_mask, int num_flush,
+ struct resource *flush_wpq, const char *dimm_id,
+ const struct nvdimm_security_ops *sec_ops,
+ const struct nvdimm_fw_ops *fw_ops);
+static inline struct nvdimm *nvdimm_create(struct nvdimm_bus *nvdimm_bus,
+ void *provider_data, const struct attribute_group **groups,
+ unsigned long flags, unsigned long cmd_mask, int num_flush,
+ struct resource *flush_wpq)
+{
+ return __nvdimm_create(nvdimm_bus, provider_data, groups, flags,
+ cmd_mask, num_flush, flush_wpq, NULL, NULL, NULL);
+}
+void nvdimm_delete(struct nvdimm *nvdimm);
+void nvdimm_region_delete(struct nd_region *nd_region);
+
const struct nd_cmd_desc *nd_cmd_dimm_desc(int cmd);
const struct nd_cmd_desc *nd_cmd_bus_desc(int cmd);
u32 nd_cmd_in_size(struct nvdimm *nvdimm, int cmd,
@@ -163,14 +290,37 @@ struct nd_region *nvdimm_blk_region_create(struct nvdimm_bus *nvdimm_bus,
struct nd_region *nvdimm_volatile_region_create(struct nvdimm_bus *nvdimm_bus,
struct nd_region_desc *ndr_desc);
void *nd_region_provider_data(struct nd_region *nd_region);
-void *nd_blk_region_provider_data(struct nd_blk_region *ndbr);
-void nd_blk_region_set_provider_data(struct nd_blk_region *ndbr, void *data);
-struct nvdimm *nd_blk_region_to_dimm(struct nd_blk_region *ndbr);
-unsigned long nd_blk_memremap_flags(struct nd_blk_region *ndbr);
unsigned int nd_region_acquire_lane(struct nd_region *nd_region);
void nd_region_release_lane(struct nd_region *nd_region, unsigned int lane);
u64 nd_fletcher64(void *addr, size_t len, bool le);
-void nvdimm_flush(struct nd_region *nd_region);
+int nvdimm_flush(struct nd_region *nd_region, struct bio *bio);
+int generic_nvdimm_flush(struct nd_region *nd_region);
int nvdimm_has_flush(struct nd_region *nd_region);
int nvdimm_has_cache(struct nd_region *nd_region);
+int nvdimm_in_overwrite(struct nvdimm *nvdimm);
+bool is_nvdimm_sync(struct nd_region *nd_region);
+
+static inline int nvdimm_ctl(struct nvdimm *nvdimm, unsigned int cmd, void *buf,
+ unsigned int buf_len, int *cmd_rc)
+{
+ struct nvdimm_bus *nvdimm_bus = nvdimm_to_bus(nvdimm);
+ struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus);
+
+ return nd_desc->ndctl(nd_desc, nvdimm, cmd, buf, buf_len, cmd_rc);
+}
+
+#ifdef CONFIG_ARCH_HAS_PMEM_API
+#define ARCH_MEMREMAP_PMEM MEMREMAP_WB
+void arch_wb_cache_pmem(void *addr, size_t size);
+void arch_invalidate_pmem(void *addr, size_t size);
+#else
+#define ARCH_MEMREMAP_PMEM MEMREMAP_WT
+static inline void arch_wb_cache_pmem(void *addr, size_t size)
+{
+}
+static inline void arch_invalidate_pmem(void *addr, size_t size)
+{
+}
+#endif
+
#endif /* __LIBNVDIMM_H__ */
diff --git a/include/linux/libps2.h b/include/linux/libps2.h
index 4ad06e824f76..53f7e4d0f4b7 100644
--- a/include/linux/libps2.h
+++ b/include/linux/libps2.h
@@ -1,16 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
#ifndef _LIBPS2_H
#define _LIBPS2_H
/*
* Copyright (C) 1999-2002 Vojtech Pavlik
* Copyright (C) 2004 Dmitry Torokhov
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
*/
+#include <linux/bitops.h>
+#include <linux/mutex.h>
+#include <linux/types.h>
+#include <linux/wait.h>
+#define PS2_CMD_SETSCALE11 0x00e6
+#define PS2_CMD_SETRES 0x10e8
#define PS2_CMD_GETID 0x02f2
#define PS2_CMD_RESET_BAT 0x02ff
@@ -20,11 +23,12 @@
#define PS2_RET_NAK 0xfe
#define PS2_RET_ERR 0xfc
-#define PS2_FLAG_ACK 1 /* Waiting for ACK/NAK */
-#define PS2_FLAG_CMD 2 /* Waiting for command to finish */
-#define PS2_FLAG_CMD1 4 /* Waiting for the first byte of command response */
-#define PS2_FLAG_WAITID 8 /* Command execiting is GET ID */
-#define PS2_FLAG_NAK 16 /* Last transmission was NAKed */
+#define PS2_FLAG_ACK BIT(0) /* Waiting for ACK/NAK */
+#define PS2_FLAG_CMD BIT(1) /* Waiting for a command to finish */
+#define PS2_FLAG_CMD1 BIT(2) /* Waiting for the first byte of command response */
+#define PS2_FLAG_WAITID BIT(3) /* Command executing is GET ID */
+#define PS2_FLAG_NAK BIT(4) /* Last transmission was NAKed */
+#define PS2_FLAG_ACK_CMD BIT(5) /* Waiting to ACK the command (first) byte */
struct ps2dev {
struct serio *serio;
@@ -36,21 +40,22 @@ struct ps2dev {
wait_queue_head_t wait;
unsigned long flags;
- unsigned char cmdbuf[8];
- unsigned char cmdcnt;
- unsigned char nak;
+ u8 cmdbuf[8];
+ u8 cmdcnt;
+ u8 nak;
};
void ps2_init(struct ps2dev *ps2dev, struct serio *serio);
-int ps2_sendbyte(struct ps2dev *ps2dev, unsigned char byte, int timeout);
-void ps2_drain(struct ps2dev *ps2dev, int maxbytes, int timeout);
+int ps2_sendbyte(struct ps2dev *ps2dev, u8 byte, unsigned int timeout);
+void ps2_drain(struct ps2dev *ps2dev, size_t maxbytes, unsigned int timeout);
void ps2_begin_command(struct ps2dev *ps2dev);
void ps2_end_command(struct ps2dev *ps2dev);
-int __ps2_command(struct ps2dev *ps2dev, unsigned char *param, int command);
-int ps2_command(struct ps2dev *ps2dev, unsigned char *param, int command);
-int ps2_handle_ack(struct ps2dev *ps2dev, unsigned char data);
-int ps2_handle_response(struct ps2dev *ps2dev, unsigned char data);
+int __ps2_command(struct ps2dev *ps2dev, u8 *param, unsigned int command);
+int ps2_command(struct ps2dev *ps2dev, u8 *param, unsigned int command);
+int ps2_sliced_command(struct ps2dev *ps2dev, u8 command);
+bool ps2_handle_ack(struct ps2dev *ps2dev, u8 data);
+bool ps2_handle_response(struct ps2dev *ps2dev, u8 data);
void ps2_cmd_aborted(struct ps2dev *ps2dev);
-int ps2_is_keyboard_id(char id);
+bool ps2_is_keyboard_id(u8 id);
#endif /* _LIBPS2_H */
diff --git a/include/linux/license.h b/include/linux/license.h
index decdbf43cb5c..7cce390f120b 100644
--- a/include/linux/license.h
+++ b/include/linux/license.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
#ifndef __LICENSE_H
#define __LICENSE_H
diff --git a/include/linux/lightnvm.h b/include/linux/lightnvm.h
deleted file mode 100644
index 7dfa56ebbc6d..000000000000
--- a/include/linux/lightnvm.h
+++ /dev/null
@@ -1,510 +0,0 @@
-#ifndef NVM_H
-#define NVM_H
-
-#include <linux/blkdev.h>
-#include <linux/types.h>
-#include <uapi/linux/lightnvm.h>
-
-enum {
- NVM_IO_OK = 0,
- NVM_IO_REQUEUE = 1,
- NVM_IO_DONE = 2,
- NVM_IO_ERR = 3,
-
- NVM_IOTYPE_NONE = 0,
- NVM_IOTYPE_GC = 1,
-};
-
-#define NVM_BLK_BITS (16)
-#define NVM_PG_BITS (16)
-#define NVM_SEC_BITS (8)
-#define NVM_PL_BITS (8)
-#define NVM_LUN_BITS (8)
-#define NVM_CH_BITS (7)
-
-struct ppa_addr {
- /* Generic structure for all addresses */
- union {
- struct {
- u64 blk : NVM_BLK_BITS;
- u64 pg : NVM_PG_BITS;
- u64 sec : NVM_SEC_BITS;
- u64 pl : NVM_PL_BITS;
- u64 lun : NVM_LUN_BITS;
- u64 ch : NVM_CH_BITS;
- u64 reserved : 1;
- } g;
-
- struct {
- u64 line : 63;
- u64 is_cached : 1;
- } c;
-
- u64 ppa;
- };
-};
-
-struct nvm_rq;
-struct nvm_id;
-struct nvm_dev;
-struct nvm_tgt_dev;
-
-typedef int (nvm_l2p_update_fn)(u64, u32, __le64 *, void *);
-typedef int (nvm_id_fn)(struct nvm_dev *, struct nvm_id *);
-typedef int (nvm_get_l2p_tbl_fn)(struct nvm_dev *, u64, u32,
- nvm_l2p_update_fn *, void *);
-typedef int (nvm_op_bb_tbl_fn)(struct nvm_dev *, struct ppa_addr, u8 *);
-typedef int (nvm_op_set_bb_fn)(struct nvm_dev *, struct ppa_addr *, int, int);
-typedef int (nvm_submit_io_fn)(struct nvm_dev *, struct nvm_rq *);
-typedef void *(nvm_create_dma_pool_fn)(struct nvm_dev *, char *);
-typedef void (nvm_destroy_dma_pool_fn)(void *);
-typedef void *(nvm_dev_dma_alloc_fn)(struct nvm_dev *, void *, gfp_t,
- dma_addr_t *);
-typedef void (nvm_dev_dma_free_fn)(void *, void*, dma_addr_t);
-
-struct nvm_dev_ops {
- nvm_id_fn *identity;
- nvm_get_l2p_tbl_fn *get_l2p_tbl;
- nvm_op_bb_tbl_fn *get_bb_tbl;
- nvm_op_set_bb_fn *set_bb_tbl;
-
- nvm_submit_io_fn *submit_io;
-
- nvm_create_dma_pool_fn *create_dma_pool;
- nvm_destroy_dma_pool_fn *destroy_dma_pool;
- nvm_dev_dma_alloc_fn *dev_dma_alloc;
- nvm_dev_dma_free_fn *dev_dma_free;
-
- unsigned int max_phys_sect;
-};
-
-#ifdef CONFIG_NVM
-
-#include <linux/blkdev.h>
-#include <linux/file.h>
-#include <linux/dmapool.h>
-#include <uapi/linux/lightnvm.h>
-
-enum {
- /* HW Responsibilities */
- NVM_RSP_L2P = 1 << 0,
- NVM_RSP_ECC = 1 << 1,
-
- /* Physical Adressing Mode */
- NVM_ADDRMODE_LINEAR = 0,
- NVM_ADDRMODE_CHANNEL = 1,
-
- /* Plane programming mode for LUN */
- NVM_PLANE_SINGLE = 1,
- NVM_PLANE_DOUBLE = 2,
- NVM_PLANE_QUAD = 4,
-
- /* Status codes */
- NVM_RSP_SUCCESS = 0x0,
- NVM_RSP_NOT_CHANGEABLE = 0x1,
- NVM_RSP_ERR_FAILWRITE = 0x40ff,
- NVM_RSP_ERR_EMPTYPAGE = 0x42ff,
- NVM_RSP_ERR_FAILECC = 0x4281,
- NVM_RSP_ERR_FAILCRC = 0x4004,
- NVM_RSP_WARN_HIGHECC = 0x4700,
-
- /* Device opcodes */
- NVM_OP_HBREAD = 0x02,
- NVM_OP_HBWRITE = 0x81,
- NVM_OP_PWRITE = 0x91,
- NVM_OP_PREAD = 0x92,
- NVM_OP_ERASE = 0x90,
-
- /* PPA Command Flags */
- NVM_IO_SNGL_ACCESS = 0x0,
- NVM_IO_DUAL_ACCESS = 0x1,
- NVM_IO_QUAD_ACCESS = 0x2,
-
- /* NAND Access Modes */
- NVM_IO_SUSPEND = 0x80,
- NVM_IO_SLC_MODE = 0x100,
- NVM_IO_SCRAMBLE_ENABLE = 0x200,
-
- /* Block Types */
- NVM_BLK_T_FREE = 0x0,
- NVM_BLK_T_BAD = 0x1,
- NVM_BLK_T_GRWN_BAD = 0x2,
- NVM_BLK_T_DEV = 0x4,
- NVM_BLK_T_HOST = 0x8,
-
- /* Memory capabilities */
- NVM_ID_CAP_SLC = 0x1,
- NVM_ID_CAP_CMD_SUSPEND = 0x2,
- NVM_ID_CAP_SCRAMBLE = 0x4,
- NVM_ID_CAP_ENCRYPT = 0x8,
-
- /* Memory types */
- NVM_ID_FMTYPE_SLC = 0,
- NVM_ID_FMTYPE_MLC = 1,
-
- /* Device capabilities */
- NVM_ID_DCAP_BBLKMGMT = 0x1,
- NVM_UD_DCAP_ECC = 0x2,
-};
-
-struct nvm_id_lp_mlc {
- u16 num_pairs;
- u8 pairs[886];
-};
-
-struct nvm_id_lp_tbl {
- __u8 id[8];
- struct nvm_id_lp_mlc mlc;
-};
-
-struct nvm_id_group {
- u8 mtype;
- u8 fmtype;
- u8 num_ch;
- u8 num_lun;
- u8 num_pln;
- u16 num_blk;
- u16 num_pg;
- u16 fpg_sz;
- u16 csecs;
- u16 sos;
- u32 trdt;
- u32 trdm;
- u32 tprt;
- u32 tprm;
- u32 tbet;
- u32 tbem;
- u32 mpos;
- u32 mccap;
- u16 cpar;
-
- struct nvm_id_lp_tbl lptbl;
-};
-
-struct nvm_addr_format {
- u8 ch_offset;
- u8 ch_len;
- u8 lun_offset;
- u8 lun_len;
- u8 pln_offset;
- u8 pln_len;
- u8 blk_offset;
- u8 blk_len;
- u8 pg_offset;
- u8 pg_len;
- u8 sect_offset;
- u8 sect_len;
-};
-
-struct nvm_id {
- u8 ver_id;
- u8 vmnt;
- u32 cap;
- u32 dom;
- struct nvm_addr_format ppaf;
- struct nvm_id_group grp;
-} __packed;
-
-struct nvm_target {
- struct list_head list;
- struct nvm_tgt_dev *dev;
- struct nvm_tgt_type *type;
- struct gendisk *disk;
-};
-
-#define ADDR_EMPTY (~0ULL)
-
-#define NVM_VERSION_MAJOR 1
-#define NVM_VERSION_MINOR 0
-#define NVM_VERSION_PATCH 0
-
-struct nvm_rq;
-typedef void (nvm_end_io_fn)(struct nvm_rq *);
-
-struct nvm_rq {
- struct nvm_tgt_dev *dev;
-
- struct bio *bio;
-
- union {
- struct ppa_addr ppa_addr;
- dma_addr_t dma_ppa_list;
- };
-
- struct ppa_addr *ppa_list;
-
- void *meta_list;
- dma_addr_t dma_meta_list;
-
- struct completion *wait;
- nvm_end_io_fn *end_io;
-
- uint8_t opcode;
- uint16_t nr_ppas;
- uint16_t flags;
-
- u64 ppa_status; /* ppa media status */
- int error;
-
- void *private;
-};
-
-static inline struct nvm_rq *nvm_rq_from_pdu(void *pdu)
-{
- return pdu - sizeof(struct nvm_rq);
-}
-
-static inline void *nvm_rq_to_pdu(struct nvm_rq *rqdata)
-{
- return rqdata + 1;
-}
-
-enum {
- NVM_BLK_ST_FREE = 0x1, /* Free block */
- NVM_BLK_ST_TGT = 0x2, /* Block in use by target */
- NVM_BLK_ST_BAD = 0x8, /* Bad block */
-};
-
-/* Device generic information */
-struct nvm_geo {
- int nr_chnls;
- int nr_luns;
- int luns_per_chnl; /* -1 if channels are not symmetric */
- int nr_planes;
- int sec_per_pg; /* only sectors for a single page */
- int pgs_per_blk;
- int blks_per_lun;
- int fpg_size;
- int pfpg_size; /* size of buffer if all pages are to be read */
- int sec_size;
- int oob_size;
- int mccap;
- struct nvm_addr_format ppaf;
-
- /* Calculated/Cached values. These do not reflect the actual usable
- * blocks at run-time.
- */
- int max_rq_size;
- int plane_mode; /* drive device in single, double or quad mode */
-
- int sec_per_pl; /* all sectors across planes */
- int sec_per_blk;
- int sec_per_lun;
-};
-
-/* sub-device structure */
-struct nvm_tgt_dev {
- /* Device information */
- struct nvm_geo geo;
-
- /* Base ppas for target LUNs */
- struct ppa_addr *luns;
-
- sector_t total_secs;
-
- struct nvm_id identity;
- struct request_queue *q;
-
- struct nvm_dev *parent;
- void *map;
-};
-
-struct nvm_dev {
- struct nvm_dev_ops *ops;
-
- struct list_head devices;
-
- /* Device information */
- struct nvm_geo geo;
-
- /* lower page table */
- int lps_per_blk;
- int *lptbl;
-
- unsigned long total_secs;
-
- unsigned long *lun_map;
- void *dma_pool;
-
- struct nvm_id identity;
-
- /* Backend device */
- struct request_queue *q;
- char name[DISK_NAME_LEN];
- void *private_data;
-
- void *rmap;
-
- struct mutex mlock;
- spinlock_t lock;
-
- /* target management */
- struct list_head area_list;
- struct list_head targets;
-};
-
-static inline struct ppa_addr linear_to_generic_addr(struct nvm_geo *geo,
- u64 pba)
-{
- struct ppa_addr l;
- int secs, pgs, blks, luns;
- sector_t ppa = pba;
-
- l.ppa = 0;
-
- div_u64_rem(ppa, geo->sec_per_pg, &secs);
- l.g.sec = secs;
-
- sector_div(ppa, geo->sec_per_pg);
- div_u64_rem(ppa, geo->pgs_per_blk, &pgs);
- l.g.pg = pgs;
-
- sector_div(ppa, geo->pgs_per_blk);
- div_u64_rem(ppa, geo->blks_per_lun, &blks);
- l.g.blk = blks;
-
- sector_div(ppa, geo->blks_per_lun);
- div_u64_rem(ppa, geo->luns_per_chnl, &luns);
- l.g.lun = luns;
-
- sector_div(ppa, geo->luns_per_chnl);
- l.g.ch = ppa;
-
- return l;
-}
-
-static inline struct ppa_addr generic_to_dev_addr(struct nvm_tgt_dev *tgt_dev,
- struct ppa_addr r)
-{
- struct nvm_geo *geo = &tgt_dev->geo;
- struct ppa_addr l;
-
- l.ppa = ((u64)r.g.blk) << geo->ppaf.blk_offset;
- l.ppa |= ((u64)r.g.pg) << geo->ppaf.pg_offset;
- l.ppa |= ((u64)r.g.sec) << geo->ppaf.sect_offset;
- l.ppa |= ((u64)r.g.pl) << geo->ppaf.pln_offset;
- l.ppa |= ((u64)r.g.lun) << geo->ppaf.lun_offset;
- l.ppa |= ((u64)r.g.ch) << geo->ppaf.ch_offset;
-
- return l;
-}
-
-static inline struct ppa_addr dev_to_generic_addr(struct nvm_tgt_dev *tgt_dev,
- struct ppa_addr r)
-{
- struct nvm_geo *geo = &tgt_dev->geo;
- struct ppa_addr l;
-
- l.ppa = 0;
- /*
- * (r.ppa << X offset) & X len bitmask. X eq. blk, pg, etc.
- */
- l.g.blk = (r.ppa >> geo->ppaf.blk_offset) &
- (((1 << geo->ppaf.blk_len) - 1));
- l.g.pg |= (r.ppa >> geo->ppaf.pg_offset) &
- (((1 << geo->ppaf.pg_len) - 1));
- l.g.sec |= (r.ppa >> geo->ppaf.sect_offset) &
- (((1 << geo->ppaf.sect_len) - 1));
- l.g.pl |= (r.ppa >> geo->ppaf.pln_offset) &
- (((1 << geo->ppaf.pln_len) - 1));
- l.g.lun |= (r.ppa >> geo->ppaf.lun_offset) &
- (((1 << geo->ppaf.lun_len) - 1));
- l.g.ch |= (r.ppa >> geo->ppaf.ch_offset) &
- (((1 << geo->ppaf.ch_len) - 1));
-
- return l;
-}
-
-static inline int ppa_empty(struct ppa_addr ppa_addr)
-{
- return (ppa_addr.ppa == ADDR_EMPTY);
-}
-
-static inline void ppa_set_empty(struct ppa_addr *ppa_addr)
-{
- ppa_addr->ppa = ADDR_EMPTY;
-}
-
-static inline int ppa_cmp_blk(struct ppa_addr ppa1, struct ppa_addr ppa2)
-{
- if (ppa_empty(ppa1) || ppa_empty(ppa2))
- return 0;
-
- return ((ppa1.g.ch == ppa2.g.ch) && (ppa1.g.lun == ppa2.g.lun) &&
- (ppa1.g.blk == ppa2.g.blk));
-}
-
-typedef blk_qc_t (nvm_tgt_make_rq_fn)(struct request_queue *, struct bio *);
-typedef sector_t (nvm_tgt_capacity_fn)(void *);
-typedef void *(nvm_tgt_init_fn)(struct nvm_tgt_dev *, struct gendisk *,
- int flags);
-typedef void (nvm_tgt_exit_fn)(void *);
-typedef int (nvm_tgt_sysfs_init_fn)(struct gendisk *);
-typedef void (nvm_tgt_sysfs_exit_fn)(struct gendisk *);
-
-struct nvm_tgt_type {
- const char *name;
- unsigned int version[3];
-
- /* target entry points */
- nvm_tgt_make_rq_fn *make_rq;
- nvm_tgt_capacity_fn *capacity;
-
- /* module-specific init/teardown */
- nvm_tgt_init_fn *init;
- nvm_tgt_exit_fn *exit;
-
- /* sysfs */
- nvm_tgt_sysfs_init_fn *sysfs_init;
- nvm_tgt_sysfs_exit_fn *sysfs_exit;
-
- /* For internal use */
- struct list_head list;
-};
-
-extern struct nvm_tgt_type *nvm_find_target_type(const char *, int);
-
-extern int nvm_register_tgt_type(struct nvm_tgt_type *);
-extern void nvm_unregister_tgt_type(struct nvm_tgt_type *);
-
-extern void *nvm_dev_dma_alloc(struct nvm_dev *, gfp_t, dma_addr_t *);
-extern void nvm_dev_dma_free(struct nvm_dev *, void *, dma_addr_t);
-
-extern struct nvm_dev *nvm_alloc_dev(int);
-extern int nvm_register(struct nvm_dev *);
-extern void nvm_unregister(struct nvm_dev *);
-
-extern int nvm_set_tgt_bb_tbl(struct nvm_tgt_dev *, struct ppa_addr *,
- int, int);
-extern int nvm_max_phys_sects(struct nvm_tgt_dev *);
-extern int nvm_submit_io(struct nvm_tgt_dev *, struct nvm_rq *);
-extern int nvm_erase_sync(struct nvm_tgt_dev *, struct ppa_addr *, int);
-extern int nvm_set_rqd_ppalist(struct nvm_tgt_dev *, struct nvm_rq *,
- const struct ppa_addr *, int, int);
-extern void nvm_free_rqd_ppalist(struct nvm_tgt_dev *, struct nvm_rq *);
-extern int nvm_get_l2p_tbl(struct nvm_tgt_dev *, u64, u32, nvm_l2p_update_fn *,
- void *);
-extern int nvm_get_area(struct nvm_tgt_dev *, sector_t *, sector_t);
-extern void nvm_put_area(struct nvm_tgt_dev *, sector_t);
-extern void nvm_end_io(struct nvm_rq *);
-extern int nvm_bb_tbl_fold(struct nvm_dev *, u8 *, int);
-extern int nvm_get_tgt_bb_tbl(struct nvm_tgt_dev *, struct ppa_addr, u8 *);
-
-extern int nvm_dev_factory(struct nvm_dev *, int flags);
-
-extern void nvm_part_to_tgt(struct nvm_dev *, sector_t *, int);
-
-#else /* CONFIG_NVM */
-struct nvm_dev_ops;
-
-static inline struct nvm_dev *nvm_alloc_dev(int node)
-{
- return ERR_PTR(-EINVAL);
-}
-static inline int nvm_register(struct nvm_dev *dev)
-{
- return -EINVAL;
-}
-static inline void nvm_unregister(struct nvm_dev *dev) {}
-#endif /* CONFIG_NVM */
-#endif /* LIGHTNVM.H */
diff --git a/include/linux/limits.h b/include/linux/limits.h
new file mode 100644
index 000000000000..f6bcc9369010
--- /dev/null
+++ b/include/linux/limits.h
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_LIMITS_H
+#define _LINUX_LIMITS_H
+
+#include <uapi/linux/limits.h>
+#include <linux/types.h>
+#include <vdso/limits.h>
+
+#define SIZE_MAX (~(size_t)0)
+#define SSIZE_MAX ((ssize_t)(SIZE_MAX >> 1))
+#define PHYS_ADDR_MAX (~(phys_addr_t)0)
+
+#define U8_MAX ((u8)~0U)
+#define S8_MAX ((s8)(U8_MAX >> 1))
+#define S8_MIN ((s8)(-S8_MAX - 1))
+#define U16_MAX ((u16)~0U)
+#define S16_MAX ((s16)(U16_MAX >> 1))
+#define S16_MIN ((s16)(-S16_MAX - 1))
+#define U32_MAX ((u32)~0U)
+#define U32_MIN ((u32)0)
+#define S32_MAX ((s32)(U32_MAX >> 1))
+#define S32_MIN ((s32)(-S32_MAX - 1))
+#define U64_MAX ((u64)~0ULL)
+#define S64_MAX ((s64)(U64_MAX >> 1))
+#define S64_MIN ((s64)(-S64_MAX - 1))
+
+#endif /* _LINUX_LIMITS_H */
diff --git a/include/linux/linear_range.h b/include/linux/linear_range.h
new file mode 100644
index 000000000000..2e4f4c3539c0
--- /dev/null
+++ b/include/linux/linear_range.h
@@ -0,0 +1,61 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2020 ROHM Semiconductors */
+
+#ifndef LINEAR_RANGE_H
+#define LINEAR_RANGE_H
+
+#include <linux/types.h>
+
+/**
+ * struct linear_range - table of selector - value pairs
+ *
+ * Define a lookup-table for range of values. Intended to help when looking
+ * for a register value matching certaing physical measure (like voltage).
+ * Usable when increment of one in register always results a constant increment
+ * of the physical measure (like voltage).
+ *
+ * @min: Lowest value in range
+ * @min_sel: Lowest selector for range
+ * @max_sel: Highest selector for range
+ * @step: Value step size
+ */
+struct linear_range {
+ unsigned int min;
+ unsigned int min_sel;
+ unsigned int max_sel;
+ unsigned int step;
+};
+
+#define LINEAR_RANGE(_min, _min_sel, _max_sel, _step) \
+ { \
+ .min = _min, \
+ .min_sel = _min_sel, \
+ .max_sel = _max_sel, \
+ .step = _step, \
+ }
+
+#define LINEAR_RANGE_IDX(_idx, _min, _min_sel, _max_sel, _step) \
+ [_idx] = LINEAR_RANGE(_min, _min_sel, _max_sel, _step)
+
+unsigned int linear_range_values_in_range(const struct linear_range *r);
+unsigned int linear_range_values_in_range_array(const struct linear_range *r,
+ int ranges);
+unsigned int linear_range_get_max_value(const struct linear_range *r);
+
+int linear_range_get_value(const struct linear_range *r, unsigned int selector,
+ unsigned int *val);
+int linear_range_get_value_array(const struct linear_range *r, int ranges,
+ unsigned int selector, unsigned int *val);
+int linear_range_get_selector_low(const struct linear_range *r,
+ unsigned int val, unsigned int *selector,
+ bool *found);
+int linear_range_get_selector_high(const struct linear_range *r,
+ unsigned int val, unsigned int *selector,
+ bool *found);
+void linear_range_get_selector_within(const struct linear_range *r,
+ unsigned int val, unsigned int *selector);
+int linear_range_get_selector_low_array(const struct linear_range *r,
+ int ranges, unsigned int val,
+ unsigned int *selector, bool *found);
+
+#endif
diff --git a/include/linux/linkage.h b/include/linux/linkage.h
index a6a42dd02466..5c8865bb59d9 100644
--- a/include/linux/linkage.h
+++ b/include/linux/linkage.h
@@ -1,7 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_LINKAGE_H
#define _LINUX_LINKAGE_H
-#include <linux/compiler.h>
+#include <linux/compiler_types.h>
#include <linux/stringify.h>
#include <linux/export.h>
#include <asm/linkage.h>
@@ -23,20 +24,20 @@
#ifndef cond_syscall
#define cond_syscall(x) asm( \
- ".weak " VMLINUX_SYMBOL_STR(x) "\n\t" \
- ".set " VMLINUX_SYMBOL_STR(x) "," \
- VMLINUX_SYMBOL_STR(sys_ni_syscall))
+ ".weak " __stringify(x) "\n\t" \
+ ".set " __stringify(x) "," \
+ __stringify(sys_ni_syscall))
#endif
#ifndef SYSCALL_ALIAS
#define SYSCALL_ALIAS(alias, name) asm( \
- ".globl " VMLINUX_SYMBOL_STR(alias) "\n\t" \
- ".set " VMLINUX_SYMBOL_STR(alias) "," \
- VMLINUX_SYMBOL_STR(name))
+ ".globl " __stringify(alias) "\n\t" \
+ ".set " __stringify(alias) "," \
+ __stringify(name))
#endif
-#define __page_aligned_data __section(.data..page_aligned) __aligned(PAGE_SIZE)
-#define __page_aligned_bss __section(.bss..page_aligned) __aligned(PAGE_SIZE)
+#define __page_aligned_data __section(".data..page_aligned") __aligned(PAGE_SIZE)
+#define __page_aligned_bss __section(".bss..page_aligned") __aligned(PAGE_SIZE)
/*
* For assembly routines.
@@ -68,31 +69,67 @@
#endif
#ifndef __ALIGN
-#define __ALIGN .align 4,0x90
-#define __ALIGN_STR ".align 4,0x90"
+#define __ALIGN .balign CONFIG_FUNCTION_ALIGNMENT
+#define __ALIGN_STR __stringify(__ALIGN)
#endif
#ifdef __ASSEMBLY__
+/* SYM_T_FUNC -- type used by assembler to mark functions */
+#ifndef SYM_T_FUNC
+#define SYM_T_FUNC STT_FUNC
+#endif
+
+/* SYM_T_OBJECT -- type used by assembler to mark data */
+#ifndef SYM_T_OBJECT
+#define SYM_T_OBJECT STT_OBJECT
+#endif
+
+/* SYM_T_NONE -- type used by assembler to mark entries of unknown type */
+#ifndef SYM_T_NONE
+#define SYM_T_NONE STT_NOTYPE
+#endif
+
+/* SYM_A_* -- align the symbol? */
+#define SYM_A_ALIGN ALIGN
+#define SYM_A_NONE /* nothing */
+
+/* SYM_L_* -- linkage of symbols */
+#define SYM_L_GLOBAL(name) .globl name
+#define SYM_L_WEAK(name) .weak name
+#define SYM_L_LOCAL(name) /* nothing */
+
#ifndef LINKER_SCRIPT
#define ALIGN __ALIGN
#define ALIGN_STR __ALIGN_STR
-#ifndef ENTRY
-#define ENTRY(name) \
+/* === DEPRECATED annotations === */
+
+#ifndef CONFIG_ARCH_USE_SYM_ANNOTATIONS
+#ifndef GLOBAL
+/* deprecated, use SYM_DATA*, SYM_ENTRY, or similar */
+#define GLOBAL(name) \
.globl name ASM_NL \
- ALIGN ASM_NL \
name:
#endif
+
+#ifndef ENTRY
+/* deprecated, use SYM_FUNC_START */
+#define ENTRY(name) \
+ SYM_FUNC_START(name)
+#endif
+#endif /* CONFIG_ARCH_USE_SYM_ANNOTATIONS */
#endif /* LINKER_SCRIPT */
+#ifndef CONFIG_ARCH_USE_SYM_ANNOTATIONS
#ifndef WEAK
+/* deprecated, use SYM_FUNC_START_WEAK* */
#define WEAK(name) \
- .weak name ASM_NL \
- name:
+ SYM_FUNC_START_WEAK(name)
#endif
#ifndef END
+/* deprecated, use SYM_FUNC_END, SYM_DATA_END, or SYM_END */
#define END(name) \
.size name, .-name
#endif
@@ -102,11 +139,222 @@
* static analysis tools such as stack depth analyzer.
*/
#ifndef ENDPROC
+/* deprecated, use SYM_FUNC_END */
#define ENDPROC(name) \
- .type name, @function ASM_NL \
- END(name)
+ SYM_FUNC_END(name)
+#endif
+#endif /* CONFIG_ARCH_USE_SYM_ANNOTATIONS */
+
+/* === generic annotations === */
+
+/* SYM_ENTRY -- use only if you have to for non-paired symbols */
+#ifndef SYM_ENTRY
+#define SYM_ENTRY(name, linkage, align...) \
+ linkage(name) ASM_NL \
+ align ASM_NL \
+ name:
+#endif
+
+/* SYM_START -- use only if you have to */
+#ifndef SYM_START
+#define SYM_START(name, linkage, align...) \
+ SYM_ENTRY(name, linkage, align)
+#endif
+
+/* SYM_END -- use only if you have to */
+#ifndef SYM_END
+#define SYM_END(name, sym_type) \
+ .type name sym_type ASM_NL \
+ .set .L__sym_size_##name, .-name ASM_NL \
+ .size name, .L__sym_size_##name
+#endif
+
+/* SYM_ALIAS -- use only if you have to */
+#ifndef SYM_ALIAS
+#define SYM_ALIAS(alias, name, linkage) \
+ linkage(alias) ASM_NL \
+ .set alias, name ASM_NL
+#endif
+
+/* === code annotations === */
+
+/*
+ * FUNC -- C-like functions (proper stack frame etc.)
+ * CODE -- non-C code (e.g. irq handlers with different, special stack etc.)
+ *
+ * Objtool validates stack for FUNC, but not for CODE.
+ * Objtool generates debug info for both FUNC & CODE, but needs special
+ * annotations for each CODE's start (to describe the actual stack frame).
+ *
+ * Objtool requires that all code must be contained in an ELF symbol. Symbol
+ * names that have a .L prefix do not emit symbol table entries. .L
+ * prefixed symbols can be used within a code region, but should be avoided for
+ * denoting a range of code via ``SYM_*_START/END`` annotations.
+ *
+ * ALIAS -- does not generate debug info -- the aliased function will
+ */
+
+/* SYM_INNER_LABEL_ALIGN -- only for labels in the middle of code */
+#ifndef SYM_INNER_LABEL_ALIGN
+#define SYM_INNER_LABEL_ALIGN(name, linkage) \
+ .type name SYM_T_NONE ASM_NL \
+ SYM_ENTRY(name, linkage, SYM_A_ALIGN)
+#endif
+
+/* SYM_INNER_LABEL -- only for labels in the middle of code */
+#ifndef SYM_INNER_LABEL
+#define SYM_INNER_LABEL(name, linkage) \
+ .type name SYM_T_NONE ASM_NL \
+ SYM_ENTRY(name, linkage, SYM_A_NONE)
+#endif
+
+/* SYM_FUNC_START -- use for global functions */
+#ifndef SYM_FUNC_START
+#define SYM_FUNC_START(name) \
+ SYM_START(name, SYM_L_GLOBAL, SYM_A_ALIGN)
+#endif
+
+/* SYM_FUNC_START_NOALIGN -- use for global functions, w/o alignment */
+#ifndef SYM_FUNC_START_NOALIGN
+#define SYM_FUNC_START_NOALIGN(name) \
+ SYM_START(name, SYM_L_GLOBAL, SYM_A_NONE)
+#endif
+
+/* SYM_FUNC_START_LOCAL -- use for local functions */
+#ifndef SYM_FUNC_START_LOCAL
+#define SYM_FUNC_START_LOCAL(name) \
+ SYM_START(name, SYM_L_LOCAL, SYM_A_ALIGN)
+#endif
+
+/* SYM_FUNC_START_LOCAL_NOALIGN -- use for local functions, w/o alignment */
+#ifndef SYM_FUNC_START_LOCAL_NOALIGN
+#define SYM_FUNC_START_LOCAL_NOALIGN(name) \
+ SYM_START(name, SYM_L_LOCAL, SYM_A_NONE)
+#endif
+
+/* SYM_FUNC_START_WEAK -- use for weak functions */
+#ifndef SYM_FUNC_START_WEAK
+#define SYM_FUNC_START_WEAK(name) \
+ SYM_START(name, SYM_L_WEAK, SYM_A_ALIGN)
+#endif
+
+/* SYM_FUNC_START_WEAK_NOALIGN -- use for weak functions, w/o alignment */
+#ifndef SYM_FUNC_START_WEAK_NOALIGN
+#define SYM_FUNC_START_WEAK_NOALIGN(name) \
+ SYM_START(name, SYM_L_WEAK, SYM_A_NONE)
+#endif
+
+/*
+ * SYM_FUNC_END -- the end of SYM_FUNC_START_LOCAL, SYM_FUNC_START,
+ * SYM_FUNC_START_WEAK, ...
+ */
+#ifndef SYM_FUNC_END
+#define SYM_FUNC_END(name) \
+ SYM_END(name, SYM_T_FUNC)
+#endif
+
+/*
+ * SYM_FUNC_ALIAS -- define a global alias for an existing function
+ */
+#ifndef SYM_FUNC_ALIAS
+#define SYM_FUNC_ALIAS(alias, name) \
+ SYM_ALIAS(alias, name, SYM_L_GLOBAL)
+#endif
+
+/*
+ * SYM_FUNC_ALIAS_LOCAL -- define a local alias for an existing function
+ */
+#ifndef SYM_FUNC_ALIAS_LOCAL
+#define SYM_FUNC_ALIAS_LOCAL(alias, name) \
+ SYM_ALIAS(alias, name, SYM_L_LOCAL)
+#endif
+
+/*
+ * SYM_FUNC_ALIAS_WEAK -- define a weak global alias for an existing function
+ */
+#ifndef SYM_FUNC_ALIAS_WEAK
+#define SYM_FUNC_ALIAS_WEAK(alias, name) \
+ SYM_ALIAS(alias, name, SYM_L_WEAK)
+#endif
+
+/* SYM_CODE_START -- use for non-C (special) functions */
+#ifndef SYM_CODE_START
+#define SYM_CODE_START(name) \
+ SYM_START(name, SYM_L_GLOBAL, SYM_A_ALIGN)
+#endif
+
+/* SYM_CODE_START_NOALIGN -- use for non-C (special) functions, w/o alignment */
+#ifndef SYM_CODE_START_NOALIGN
+#define SYM_CODE_START_NOALIGN(name) \
+ SYM_START(name, SYM_L_GLOBAL, SYM_A_NONE)
+#endif
+
+/* SYM_CODE_START_LOCAL -- use for local non-C (special) functions */
+#ifndef SYM_CODE_START_LOCAL
+#define SYM_CODE_START_LOCAL(name) \
+ SYM_START(name, SYM_L_LOCAL, SYM_A_ALIGN)
+#endif
+
+/*
+ * SYM_CODE_START_LOCAL_NOALIGN -- use for local non-C (special) functions,
+ * w/o alignment
+ */
+#ifndef SYM_CODE_START_LOCAL_NOALIGN
+#define SYM_CODE_START_LOCAL_NOALIGN(name) \
+ SYM_START(name, SYM_L_LOCAL, SYM_A_NONE)
+#endif
+
+/* SYM_CODE_END -- the end of SYM_CODE_START_LOCAL, SYM_CODE_START, ... */
+#ifndef SYM_CODE_END
+#define SYM_CODE_END(name) \
+ SYM_END(name, SYM_T_NONE)
#endif
+/* === data annotations === */
+
+/* SYM_DATA_START -- global data symbol */
+#ifndef SYM_DATA_START
+#define SYM_DATA_START(name) \
+ SYM_START(name, SYM_L_GLOBAL, SYM_A_NONE)
+#endif
+
+/* SYM_DATA_START -- local data symbol */
+#ifndef SYM_DATA_START_LOCAL
+#define SYM_DATA_START_LOCAL(name) \
+ SYM_START(name, SYM_L_LOCAL, SYM_A_NONE)
#endif
+/* SYM_DATA_END -- the end of SYM_DATA_START symbol */
+#ifndef SYM_DATA_END
+#define SYM_DATA_END(name) \
+ SYM_END(name, SYM_T_OBJECT)
#endif
+
+/* SYM_DATA_END_LABEL -- the labeled end of SYM_DATA_START symbol */
+#ifndef SYM_DATA_END_LABEL
+#define SYM_DATA_END_LABEL(name, linkage, label) \
+ linkage(label) ASM_NL \
+ .type label SYM_T_OBJECT ASM_NL \
+ label: \
+ SYM_END(name, SYM_T_OBJECT)
+#endif
+
+/* SYM_DATA -- start+end wrapper around simple global data */
+#ifndef SYM_DATA
+#define SYM_DATA(name, data...) \
+ SYM_DATA_START(name) ASM_NL \
+ data ASM_NL \
+ SYM_DATA_END(name)
+#endif
+
+/* SYM_DATA_LOCAL -- start+end wrapper around simple local data */
+#ifndef SYM_DATA_LOCAL
+#define SYM_DATA_LOCAL(name, data...) \
+ SYM_DATA_START_LOCAL(name) ASM_NL \
+ data ASM_NL \
+ SYM_DATA_END(name)
+#endif
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* _LINUX_LINKAGE_H */
diff --git a/include/linux/linkmode.h b/include/linux/linkmode.h
new file mode 100644
index 000000000000..15e0e0209da4
--- /dev/null
+++ b/include/linux/linkmode.h
@@ -0,0 +1,98 @@
+#ifndef __LINKMODE_H
+#define __LINKMODE_H
+
+#include <linux/bitmap.h>
+#include <linux/ethtool.h>
+#include <uapi/linux/ethtool.h>
+
+static inline void linkmode_zero(unsigned long *dst)
+{
+ bitmap_zero(dst, __ETHTOOL_LINK_MODE_MASK_NBITS);
+}
+
+static inline void linkmode_copy(unsigned long *dst, const unsigned long *src)
+{
+ bitmap_copy(dst, src, __ETHTOOL_LINK_MODE_MASK_NBITS);
+}
+
+static inline void linkmode_and(unsigned long *dst, const unsigned long *a,
+ const unsigned long *b)
+{
+ bitmap_and(dst, a, b, __ETHTOOL_LINK_MODE_MASK_NBITS);
+}
+
+static inline void linkmode_or(unsigned long *dst, const unsigned long *a,
+ const unsigned long *b)
+{
+ bitmap_or(dst, a, b, __ETHTOOL_LINK_MODE_MASK_NBITS);
+}
+
+static inline bool linkmode_empty(const unsigned long *src)
+{
+ return bitmap_empty(src, __ETHTOOL_LINK_MODE_MASK_NBITS);
+}
+
+static inline int linkmode_andnot(unsigned long *dst, const unsigned long *src1,
+ const unsigned long *src2)
+{
+ return bitmap_andnot(dst, src1, src2, __ETHTOOL_LINK_MODE_MASK_NBITS);
+}
+
+static inline void linkmode_set_bit(int nr, volatile unsigned long *addr)
+{
+ __set_bit(nr, addr);
+}
+
+static inline void linkmode_set_bit_array(const int *array, int array_size,
+ unsigned long *addr)
+{
+ int i;
+
+ for (i = 0; i < array_size; i++)
+ linkmode_set_bit(array[i], addr);
+}
+
+static inline void linkmode_clear_bit(int nr, volatile unsigned long *addr)
+{
+ __clear_bit(nr, addr);
+}
+
+static inline void linkmode_mod_bit(int nr, volatile unsigned long *addr,
+ int set)
+{
+ if (set)
+ linkmode_set_bit(nr, addr);
+ else
+ linkmode_clear_bit(nr, addr);
+}
+
+static inline int linkmode_test_bit(int nr, const volatile unsigned long *addr)
+{
+ return test_bit(nr, addr);
+}
+
+static inline int linkmode_equal(const unsigned long *src1,
+ const unsigned long *src2)
+{
+ return bitmap_equal(src1, src2, __ETHTOOL_LINK_MODE_MASK_NBITS);
+}
+
+static inline int linkmode_intersects(const unsigned long *src1,
+ const unsigned long *src2)
+{
+ return bitmap_intersects(src1, src2, __ETHTOOL_LINK_MODE_MASK_NBITS);
+}
+
+static inline int linkmode_subset(const unsigned long *src1,
+ const unsigned long *src2)
+{
+ return bitmap_subset(src1, src2, __ETHTOOL_LINK_MODE_MASK_NBITS);
+}
+
+void linkmode_resolve_pause(const unsigned long *local_adv,
+ const unsigned long *partner_adv,
+ bool *tx_pause, bool *rx_pause);
+
+void linkmode_set_pause(unsigned long *advertisement, bool tx, bool rx);
+
+#endif /* __LINKMODE_H */
diff --git a/include/linux/linux_logo.h b/include/linux/linux_logo.h
index ca5bd91d12e1..d4d5b93efe84 100644
--- a/include/linux/linux_logo.h
+++ b/include/linux/linux_logo.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_LINUX_LOGO_H
#define _LINUX_LINUX_LOGO_H
@@ -35,8 +36,6 @@ struct linux_logo {
extern const struct linux_logo logo_linux_mono;
extern const struct linux_logo logo_linux_vga16;
extern const struct linux_logo logo_linux_clut224;
-extern const struct linux_logo logo_blackfin_vga16;
-extern const struct linux_logo logo_blackfin_clut224;
extern const struct linux_logo logo_dec_clut224;
extern const struct linux_logo logo_mac_clut224;
extern const struct linux_logo logo_parisc_clut224;
@@ -45,7 +44,6 @@ extern const struct linux_logo logo_sun_clut224;
extern const struct linux_logo logo_superh_mono;
extern const struct linux_logo logo_superh_vga16;
extern const struct linux_logo logo_superh_clut224;
-extern const struct linux_logo logo_m32r_clut224;
extern const struct linux_logo logo_spe_clut224;
extern const struct linux_logo *fb_find_logo(int depth);
diff --git a/include/linux/lis3lv02d.h b/include/linux/lis3lv02d.h
index f1664c636af0..b72b8cdba765 100644
--- a/include/linux/lis3lv02d.h
+++ b/include/linux/lis3lv02d.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __LIS3LV02D_H_
#define __LIS3LV02D_H_
diff --git a/include/linux/list.h b/include/linux/list.h
index ae537fa46216..f10344dbad4d 100644
--- a/include/linux/list.h
+++ b/include/linux/list.h
@@ -1,14 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_LIST_H
#define _LINUX_LIST_H
+#include <linux/container_of.h>
#include <linux/types.h>
#include <linux/stddef.h>
#include <linux/poison.h>
#include <linux/const.h>
-#include <linux/kernel.h>
+
+#include <asm/barrier.h>
/*
- * Simple doubly linked list implementation.
+ * Circular doubly linked list implementation.
*
* Some of the internal functions ("__xxx") are useful when
* manipulating whole lists rather than single entries, as
@@ -22,10 +25,17 @@
#define LIST_HEAD(name) \
struct list_head name = LIST_HEAD_INIT(name)
+/**
+ * INIT_LIST_HEAD - Initialize a list_head structure
+ * @list: list_head structure to be initialized.
+ *
+ * Initializes the list_head to point to itself. If it is a list header,
+ * the result is an empty list.
+ */
static inline void INIT_LIST_HEAD(struct list_head *list)
{
WRITE_ONCE(list->next, list);
- list->prev = list;
+ WRITE_ONCE(list->prev, list);
}
#ifdef CONFIG_DEBUG_LIST
@@ -105,12 +115,20 @@ static inline void __list_del(struct list_head * prev, struct list_head * next)
WRITE_ONCE(prev->next, next);
}
-/**
- * list_del - deletes entry from list.
- * @entry: the element to delete from the list.
- * Note: list_empty() on entry does not return true after this, the entry is
- * in an undefined state.
+/*
+ * Delete a list entry and clear the 'prev' pointer.
+ *
+ * This is a special-purpose list clearing method used in the networking code
+ * for lists allocated as per-cpu, where we don't want to incur the extra
+ * WRITE_ONCE() overhead of a regular list_del_init(). The code that uses this
+ * needs to check the node 'prev' pointer instead of calling list_empty().
*/
+static inline void __list_del_clearprev(struct list_head *entry)
+{
+ __list_del(entry->prev, entry->next);
+ entry->prev = NULL;
+}
+
static inline void __list_del_entry(struct list_head *entry)
{
if (!__list_del_entry_valid(entry))
@@ -119,6 +137,12 @@ static inline void __list_del_entry(struct list_head *entry)
__list_del(entry->prev, entry->next);
}
+/**
+ * list_del - deletes entry from list.
+ * @entry: the element to delete from the list.
+ * Note: list_empty() on entry does not return true after this, the entry is
+ * in an undefined state.
+ */
static inline void list_del(struct list_head *entry)
{
__list_del_entry(entry);
@@ -142,14 +166,38 @@ static inline void list_replace(struct list_head *old,
new->prev->next = new;
}
+/**
+ * list_replace_init - replace old entry by new one and initialize the old one
+ * @old : the element to be replaced
+ * @new : the new element to insert
+ *
+ * If @old was empty, it will be overwritten.
+ */
static inline void list_replace_init(struct list_head *old,
- struct list_head *new)
+ struct list_head *new)
{
list_replace(old, new);
INIT_LIST_HEAD(old);
}
/**
+ * list_swap - replace entry1 with entry2 and re-add entry1 at entry2's position
+ * @entry1: the location to place entry2
+ * @entry2: the location to place entry1
+ */
+static inline void list_swap(struct list_head *entry1,
+ struct list_head *entry2)
+{
+ struct list_head *pos = entry2->prev;
+
+ list_del(entry2);
+ list_replace(entry1, entry2);
+ if (pos == entry1)
+ pos = entry2;
+ list_add(entry1, pos);
+}
+
+/**
* list_del_init - deletes entry from list and reinitialize it.
* @entry: the element to delete from the list.
*/
@@ -183,17 +231,59 @@ static inline void list_move_tail(struct list_head *list,
}
/**
+ * list_bulk_move_tail - move a subsection of a list to its tail
+ * @head: the head that will follow our entry
+ * @first: first entry to move
+ * @last: last entry to move, can be the same as first
+ *
+ * Move all entries between @first and including @last before @head.
+ * All three entries must belong to the same linked list.
+ */
+static inline void list_bulk_move_tail(struct list_head *head,
+ struct list_head *first,
+ struct list_head *last)
+{
+ first->prev->next = last->next;
+ last->next->prev = first->prev;
+
+ head->prev->next = first;
+ first->prev = head->prev;
+
+ last->next = head;
+ head->prev = last;
+}
+
+/**
+ * list_is_first -- tests whether @list is the first entry in list @head
+ * @list: the entry to test
+ * @head: the head of the list
+ */
+static inline int list_is_first(const struct list_head *list, const struct list_head *head)
+{
+ return list->prev == head;
+}
+
+/**
* list_is_last - tests whether @list is the last entry in list @head
* @list: the entry to test
* @head: the head of the list
*/
-static inline int list_is_last(const struct list_head *list,
- const struct list_head *head)
+static inline int list_is_last(const struct list_head *list, const struct list_head *head)
{
return list->next == head;
}
/**
+ * list_is_head - tests whether @list is the list @head
+ * @list: the entry to test
+ * @head: the head of the list
+ */
+static inline int list_is_head(const struct list_head *list, const struct list_head *head)
+{
+ return list == head;
+}
+
+/**
* list_empty - tests whether a list is empty
* @head: the list to test.
*/
@@ -203,6 +293,24 @@ static inline int list_empty(const struct list_head *head)
}
/**
+ * list_del_init_careful - deletes entry from list and reinitialize it.
+ * @entry: the element to delete from the list.
+ *
+ * This is the same as list_del_init(), except designed to be used
+ * together with list_empty_careful() in a way to guarantee ordering
+ * of other memory operations.
+ *
+ * Any memory operations done before a list_del_init_careful() are
+ * guaranteed to be visible after a list_empty_careful() test.
+ */
+static inline void list_del_init_careful(struct list_head *entry)
+{
+ __list_del_entry(entry);
+ WRITE_ONCE(entry->prev, entry);
+ smp_store_release(&entry->next, entry);
+}
+
+/**
* list_empty_careful - tests whether a list is empty and not being modified
* @head: the list to test
*
@@ -217,8 +325,8 @@ static inline int list_empty(const struct list_head *head)
*/
static inline int list_empty_careful(const struct list_head *head)
{
- struct list_head *next = head->next;
- return (next == head) && (next == head->prev);
+ struct list_head *next = smp_load_acquire(&head->next);
+ return list_is_head(next, head) && (next == READ_ONCE(head->prev));
}
/**
@@ -236,6 +344,24 @@ static inline void list_rotate_left(struct list_head *head)
}
/**
+ * list_rotate_to_front() - Rotate list to specific item.
+ * @list: The desired new front of the list.
+ * @head: The head of the list.
+ *
+ * Rotates list so that @list becomes the new front of the list.
+ */
+static inline void list_rotate_to_front(struct list_head *list,
+ struct list_head *head)
+{
+ /*
+ * Deletes the list head from the list denoted by @head and
+ * places it as the tail of @list, this effectively rotates the
+ * list so that @list is at the front.
+ */
+ list_move_tail(head, list);
+}
+
+/**
* list_is_singular - tests whether a list has just one entry.
* @head: the list to test.
*/
@@ -275,15 +401,44 @@ static inline void list_cut_position(struct list_head *list,
{
if (list_empty(head))
return;
- if (list_is_singular(head) &&
- (head->next != entry && head != entry))
+ if (list_is_singular(head) && !list_is_head(entry, head) && (entry != head->next))
return;
- if (entry == head)
+ if (list_is_head(entry, head))
INIT_LIST_HEAD(list);
else
__list_cut_position(list, head, entry);
}
+/**
+ * list_cut_before - cut a list into two, before given entry
+ * @list: a new list to add all removed entries
+ * @head: a list with entries
+ * @entry: an entry within head, could be the head itself
+ *
+ * This helper moves the initial part of @head, up to but
+ * excluding @entry, from @head to @list. You should pass
+ * in @entry an element you know is on @head. @list should
+ * be an empty list or a list you do not care about losing
+ * its data.
+ * If @entry == @head, all entries on @head are moved to
+ * @list.
+ */
+static inline void list_cut_before(struct list_head *list,
+ struct list_head *head,
+ struct list_head *entry)
+{
+ if (head->next == entry) {
+ INIT_LIST_HEAD(list);
+ return;
+ }
+ list->next = head->next;
+ list->next->prev = list;
+ list->prev = entry->prev;
+ list->prev->next = list;
+ head->next = entry;
+ entry->prev = head;
+}
+
static inline void __list_splice(const struct list_head *list,
struct list_head *prev,
struct list_head *next)
@@ -409,6 +564,19 @@ static inline void list_splice_tail_init(struct list_head *list,
list_entry((pos)->member.next, typeof(*(pos)), member)
/**
+ * list_next_entry_circular - get the next element in list
+ * @pos: the type * to cursor.
+ * @head: the list head to take the element from.
+ * @member: the name of the list_head within the struct.
+ *
+ * Wraparound if pos is the last element (return the first element).
+ * Note, that list is expected to be not empty.
+ */
+#define list_next_entry_circular(pos, head, member) \
+ (list_is_last(&(pos)->member, head) ? \
+ list_first_entry(head, typeof(*(pos)), member) : list_next_entry(pos, member))
+
+/**
* list_prev_entry - get the prev element in list
* @pos: the type * to cursor
* @member: the name of the list_head within the struct.
@@ -417,12 +585,45 @@ static inline void list_splice_tail_init(struct list_head *list,
list_entry((pos)->member.prev, typeof(*(pos)), member)
/**
+ * list_prev_entry_circular - get the prev element in list
+ * @pos: the type * to cursor.
+ * @head: the list head to take the element from.
+ * @member: the name of the list_head within the struct.
+ *
+ * Wraparound if pos is the first element (return the last element).
+ * Note, that list is expected to be not empty.
+ */
+#define list_prev_entry_circular(pos, head, member) \
+ (list_is_first(&(pos)->member, head) ? \
+ list_last_entry(head, typeof(*(pos)), member) : list_prev_entry(pos, member))
+
+/**
* list_for_each - iterate over a list
* @pos: the &struct list_head to use as a loop cursor.
* @head: the head for your list.
*/
#define list_for_each(pos, head) \
- for (pos = (head)->next; pos != (head); pos = pos->next)
+ for (pos = (head)->next; !list_is_head(pos, (head)); pos = pos->next)
+
+/**
+ * list_for_each_rcu - Iterate over a list in an RCU-safe fashion
+ * @pos: the &struct list_head to use as a loop cursor.
+ * @head: the head for your list.
+ */
+#define list_for_each_rcu(pos, head) \
+ for (pos = rcu_dereference((head)->next); \
+ !list_is_head(pos, (head)); \
+ pos = rcu_dereference(pos->next))
+
+/**
+ * list_for_each_continue - continue iteration over a list
+ * @pos: the &struct list_head to use as a loop cursor.
+ * @head: the head for your list.
+ *
+ * Continue to iterate over a list, continuing after the current position.
+ */
+#define list_for_each_continue(pos, head) \
+ for (pos = pos->next; !list_is_head(pos, (head)); pos = pos->next)
/**
* list_for_each_prev - iterate over a list backwards
@@ -430,7 +631,7 @@ static inline void list_splice_tail_init(struct list_head *list,
* @head: the head for your list.
*/
#define list_for_each_prev(pos, head) \
- for (pos = (head)->prev; pos != (head); pos = pos->prev)
+ for (pos = (head)->prev; !list_is_head(pos, (head)); pos = pos->prev)
/**
* list_for_each_safe - iterate over a list safe against removal of list entry
@@ -439,8 +640,9 @@ static inline void list_splice_tail_init(struct list_head *list,
* @head: the head for your list.
*/
#define list_for_each_safe(pos, n, head) \
- for (pos = (head)->next, n = pos->next; pos != (head); \
- pos = n, n = pos->next)
+ for (pos = (head)->next, n = pos->next; \
+ !list_is_head(pos, (head)); \
+ pos = n, n = pos->next)
/**
* list_for_each_prev_safe - iterate over a list backwards safe against removal of list entry
@@ -450,10 +652,34 @@ static inline void list_splice_tail_init(struct list_head *list,
*/
#define list_for_each_prev_safe(pos, n, head) \
for (pos = (head)->prev, n = pos->prev; \
- pos != (head); \
+ !list_is_head(pos, (head)); \
pos = n, n = pos->prev)
/**
+ * list_count_nodes - count nodes in the list
+ * @head: the head for your list.
+ */
+static inline size_t list_count_nodes(struct list_head *head)
+{
+ struct list_head *pos;
+ size_t count = 0;
+
+ list_for_each(pos, head)
+ count++;
+
+ return count;
+}
+
+/**
+ * list_entry_is_head - test if the entry points to the head of the list
+ * @pos: the type * to cursor
+ * @head: the head for your list.
+ * @member: the name of the list_head within the struct.
+ */
+#define list_entry_is_head(pos, head, member) \
+ (&pos->member == (head))
+
+/**
* list_for_each_entry - iterate over list of given type
* @pos: the type * to use as a loop cursor.
* @head: the head for your list.
@@ -461,7 +687,7 @@ static inline void list_splice_tail_init(struct list_head *list,
*/
#define list_for_each_entry(pos, head, member) \
for (pos = list_first_entry(head, typeof(*pos), member); \
- &pos->member != (head); \
+ !list_entry_is_head(pos, head, member); \
pos = list_next_entry(pos, member))
/**
@@ -472,7 +698,7 @@ static inline void list_splice_tail_init(struct list_head *list,
*/
#define list_for_each_entry_reverse(pos, head, member) \
for (pos = list_last_entry(head, typeof(*pos), member); \
- &pos->member != (head); \
+ !list_entry_is_head(pos, head, member); \
pos = list_prev_entry(pos, member))
/**
@@ -497,7 +723,7 @@ static inline void list_splice_tail_init(struct list_head *list,
*/
#define list_for_each_entry_continue(pos, head, member) \
for (pos = list_next_entry(pos, member); \
- &pos->member != (head); \
+ !list_entry_is_head(pos, head, member); \
pos = list_next_entry(pos, member))
/**
@@ -511,7 +737,7 @@ static inline void list_splice_tail_init(struct list_head *list,
*/
#define list_for_each_entry_continue_reverse(pos, head, member) \
for (pos = list_prev_entry(pos, member); \
- &pos->member != (head); \
+ !list_entry_is_head(pos, head, member); \
pos = list_prev_entry(pos, member))
/**
@@ -523,7 +749,7 @@ static inline void list_splice_tail_init(struct list_head *list,
* Iterate over list of given type, continuing from current position.
*/
#define list_for_each_entry_from(pos, head, member) \
- for (; &pos->member != (head); \
+ for (; !list_entry_is_head(pos, head, member); \
pos = list_next_entry(pos, member))
/**
@@ -536,7 +762,7 @@ static inline void list_splice_tail_init(struct list_head *list,
* Iterate backwards over list of given type, continuing from current position.
*/
#define list_for_each_entry_from_reverse(pos, head, member) \
- for (; &pos->member != (head); \
+ for (; !list_entry_is_head(pos, head, member); \
pos = list_prev_entry(pos, member))
/**
@@ -549,7 +775,7 @@ static inline void list_splice_tail_init(struct list_head *list,
#define list_for_each_entry_safe(pos, n, head, member) \
for (pos = list_first_entry(head, typeof(*pos), member), \
n = list_next_entry(pos, member); \
- &pos->member != (head); \
+ !list_entry_is_head(pos, head, member); \
pos = n, n = list_next_entry(n, member))
/**
@@ -565,7 +791,7 @@ static inline void list_splice_tail_init(struct list_head *list,
#define list_for_each_entry_safe_continue(pos, n, head, member) \
for (pos = list_next_entry(pos, member), \
n = list_next_entry(pos, member); \
- &pos->member != (head); \
+ !list_entry_is_head(pos, head, member); \
pos = n, n = list_next_entry(n, member))
/**
@@ -580,7 +806,7 @@ static inline void list_splice_tail_init(struct list_head *list,
*/
#define list_for_each_entry_safe_from(pos, n, head, member) \
for (n = list_next_entry(pos, member); \
- &pos->member != (head); \
+ !list_entry_is_head(pos, head, member); \
pos = n, n = list_next_entry(n, member))
/**
@@ -596,7 +822,7 @@ static inline void list_splice_tail_init(struct list_head *list,
#define list_for_each_entry_safe_reverse(pos, n, head, member) \
for (pos = list_last_entry(head, typeof(*pos), member), \
n = list_prev_entry(pos, member); \
- &pos->member != (head); \
+ !list_entry_is_head(pos, head, member); \
pos = n, n = list_prev_entry(n, member))
/**
@@ -630,11 +856,36 @@ static inline void INIT_HLIST_NODE(struct hlist_node *h)
h->pprev = NULL;
}
+/**
+ * hlist_unhashed - Has node been removed from list and reinitialized?
+ * @h: Node to be checked
+ *
+ * Not that not all removal functions will leave a node in unhashed
+ * state. For example, hlist_nulls_del_init_rcu() does leave the
+ * node in unhashed state, but hlist_nulls_del() does not.
+ */
static inline int hlist_unhashed(const struct hlist_node *h)
{
return !h->pprev;
}
+/**
+ * hlist_unhashed_lockless - Version of hlist_unhashed for lockless use
+ * @h: Node to be checked
+ *
+ * This variant of hlist_unhashed() must be used in lockless contexts
+ * to avoid potential load-tearing. The READ_ONCE() is paired with the
+ * various WRITE_ONCE() in hlist helpers that are defined below.
+ */
+static inline int hlist_unhashed_lockless(const struct hlist_node *h)
+{
+ return !READ_ONCE(h->pprev);
+}
+
+/**
+ * hlist_empty - Is the specified hlist_head structure an empty hlist?
+ * @h: Structure to check.
+ */
static inline int hlist_empty(const struct hlist_head *h)
{
return !READ_ONCE(h->first);
@@ -647,9 +898,16 @@ static inline void __hlist_del(struct hlist_node *n)
WRITE_ONCE(*pprev, next);
if (next)
- next->pprev = pprev;
+ WRITE_ONCE(next->pprev, pprev);
}
+/**
+ * hlist_del - Delete the specified hlist_node from its list
+ * @n: Node to delete.
+ *
+ * Note that this function leaves the node in hashed state. Use
+ * hlist_del_init() or similar instead to unhash @n.
+ */
static inline void hlist_del(struct hlist_node *n)
{
__hlist_del(n);
@@ -657,6 +915,12 @@ static inline void hlist_del(struct hlist_node *n)
n->pprev = LIST_POISON2;
}
+/**
+ * hlist_del_init - Delete the specified hlist_node from its list and initialize
+ * @n: Node to delete.
+ *
+ * Note that this function leaves the node in unhashed state.
+ */
static inline void hlist_del_init(struct hlist_node *n)
{
if (!hlist_unhashed(n)) {
@@ -665,51 +929,83 @@ static inline void hlist_del_init(struct hlist_node *n)
}
}
+/**
+ * hlist_add_head - add a new entry at the beginning of the hlist
+ * @n: new entry to be added
+ * @h: hlist head to add it after
+ *
+ * Insert a new entry after the specified head.
+ * This is good for implementing stacks.
+ */
static inline void hlist_add_head(struct hlist_node *n, struct hlist_head *h)
{
struct hlist_node *first = h->first;
- n->next = first;
+ WRITE_ONCE(n->next, first);
if (first)
- first->pprev = &n->next;
+ WRITE_ONCE(first->pprev, &n->next);
WRITE_ONCE(h->first, n);
- n->pprev = &h->first;
+ WRITE_ONCE(n->pprev, &h->first);
}
-/* next must be != NULL */
+/**
+ * hlist_add_before - add a new entry before the one specified
+ * @n: new entry to be added
+ * @next: hlist node to add it before, which must be non-NULL
+ */
static inline void hlist_add_before(struct hlist_node *n,
- struct hlist_node *next)
+ struct hlist_node *next)
{
- n->pprev = next->pprev;
- n->next = next;
- next->pprev = &n->next;
+ WRITE_ONCE(n->pprev, next->pprev);
+ WRITE_ONCE(n->next, next);
+ WRITE_ONCE(next->pprev, &n->next);
WRITE_ONCE(*(n->pprev), n);
}
+/**
+ * hlist_add_behind - add a new entry after the one specified
+ * @n: new entry to be added
+ * @prev: hlist node to add it after, which must be non-NULL
+ */
static inline void hlist_add_behind(struct hlist_node *n,
struct hlist_node *prev)
{
- n->next = prev->next;
+ WRITE_ONCE(n->next, prev->next);
WRITE_ONCE(prev->next, n);
- n->pprev = &prev->next;
+ WRITE_ONCE(n->pprev, &prev->next);
if (n->next)
- n->next->pprev = &n->next;
+ WRITE_ONCE(n->next->pprev, &n->next);
}
-/* after that we'll appear to be on some hlist and hlist_del will work */
+/**
+ * hlist_add_fake - create a fake hlist consisting of a single headless node
+ * @n: Node to make a fake list out of
+ *
+ * This makes @n appear to be its own predecessor on a headless hlist.
+ * The point of this is to allow things like hlist_del() to work correctly
+ * in cases where there is no list.
+ */
static inline void hlist_add_fake(struct hlist_node *n)
{
n->pprev = &n->next;
}
+/**
+ * hlist_fake: Is this node a fake hlist?
+ * @h: Node to check for being a self-referential fake hlist.
+ */
static inline bool hlist_fake(struct hlist_node *h)
{
return h->pprev == &h->next;
}
-/*
+/**
+ * hlist_is_singular_node - is node the only element of the specified hlist?
+ * @n: Node to check for singularity.
+ * @h: Header for potentially singular list.
+ *
* Check whether the node is the only node of the head without
- * accessing head:
+ * accessing head, thus avoiding unnecessary cache misses.
*/
static inline bool
hlist_is_singular_node(struct hlist_node *n, struct hlist_head *h)
@@ -717,7 +1013,11 @@ hlist_is_singular_node(struct hlist_node *n, struct hlist_head *h)
return !n->next && n->pprev == &h->first;
}
-/*
+/**
+ * hlist_move_list - Move an hlist
+ * @old: hlist_head for old list.
+ * @new: hlist_head for new list.
+ *
* Move a list from one list head to another. Fixup the pprev
* reference of the first entry if it exists.
*/
@@ -777,7 +1077,7 @@ static inline void hlist_move_list(struct hlist_head *old,
/**
* hlist_for_each_entry_safe - iterate over list of given type safe against removal of list entry
* @pos: the type * to use as a loop cursor.
- * @n: another &struct hlist_node to use as temporary storage
+ * @n: a &struct hlist_node to use as temporary storage
* @head: the head for your list.
* @member: the name of the hlist_node within the struct.
*/
diff --git a/include/linux/list_bl.h b/include/linux/list_bl.h
index cb483305e1f5..ae1b541446c9 100644
--- a/include/linux/list_bl.h
+++ b/include/linux/list_bl.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_LIST_BL_H
#define _LINUX_LIST_BL_H
@@ -85,6 +86,32 @@ static inline void hlist_bl_add_head(struct hlist_bl_node *n,
hlist_bl_set_first(h, n);
}
+static inline void hlist_bl_add_before(struct hlist_bl_node *n,
+ struct hlist_bl_node *next)
+{
+ struct hlist_bl_node **pprev = next->pprev;
+
+ n->pprev = pprev;
+ n->next = next;
+ next->pprev = &n->next;
+
+ /* pprev may be `first`, so be careful not to lose the lock bit */
+ WRITE_ONCE(*pprev,
+ (struct hlist_bl_node *)
+ ((uintptr_t)n | ((uintptr_t)*pprev & LIST_BL_LOCKMASK)));
+}
+
+static inline void hlist_bl_add_behind(struct hlist_bl_node *n,
+ struct hlist_bl_node *prev)
+{
+ n->next = prev->next;
+ n->pprev = &prev->next;
+ prev->next = n;
+
+ if (n->next)
+ n->next->pprev = &n->next;
+}
+
static inline void __hlist_bl_del(struct hlist_bl_node *n)
{
struct hlist_bl_node *next = n->next;
diff --git a/include/linux/list_lru.h b/include/linux/list_lru.h
index fa7fd03cb5f9..b35968ee9fb5 100644
--- a/include/linux/list_lru.h
+++ b/include/linux/list_lru.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (c) 2013 Red Hat, Inc. and Parallels Inc. All rights reserved.
* Authors: David Chinner and Glauber Costa
@@ -10,6 +11,7 @@
#include <linux/list.h>
#include <linux/nodemask.h>
#include <linux/shrinker.h>
+#include <linux/xarray.h>
struct mem_cgroup;
@@ -31,8 +33,9 @@ struct list_lru_one {
};
struct list_lru_memcg {
- /* array of per cgroup lists, indexed by memcg_cache_id */
- struct list_lru_one *lru[0];
+ struct rcu_head rcu;
+ /* array of per cgroup per node lists, indexed by node id */
+ struct list_lru_one node[];
};
struct list_lru_node {
@@ -40,30 +43,33 @@ struct list_lru_node {
spinlock_t lock;
/* global list, used for the root cgroup in cgroup aware lrus */
struct list_lru_one lru;
-#if defined(CONFIG_MEMCG) && !defined(CONFIG_SLOB)
- /* for cgroup aware lrus points to per cgroup lists, otherwise NULL */
- struct list_lru_memcg *memcg_lrus;
-#endif
- long nr_items;
+ long nr_items;
} ____cacheline_aligned_in_smp;
struct list_lru {
struct list_lru_node *node;
-#if defined(CONFIG_MEMCG) && !defined(CONFIG_SLOB)
+#ifdef CONFIG_MEMCG_KMEM
struct list_head list;
+ int shrinker_id;
+ bool memcg_aware;
+ struct xarray xa;
#endif
};
void list_lru_destroy(struct list_lru *lru);
int __list_lru_init(struct list_lru *lru, bool memcg_aware,
- struct lock_class_key *key);
+ struct lock_class_key *key, struct shrinker *shrinker);
-#define list_lru_init(lru) __list_lru_init((lru), false, NULL)
-#define list_lru_init_key(lru, key) __list_lru_init((lru), false, (key))
-#define list_lru_init_memcg(lru) __list_lru_init((lru), true, NULL)
+#define list_lru_init(lru) \
+ __list_lru_init((lru), false, NULL, NULL)
+#define list_lru_init_key(lru, key) \
+ __list_lru_init((lru), false, (key), NULL)
+#define list_lru_init_memcg(lru, shrinker) \
+ __list_lru_init((lru), true, NULL, shrinker)
-int memcg_update_all_list_lrus(int num_memcgs);
-void memcg_drain_all_list_lrus(int src_idx, int dst_idx);
+int memcg_list_lru_alloc(struct mem_cgroup *memcg, struct list_lru *lru,
+ gfp_t gfp);
+void memcg_reparent_list_lrus(struct mem_cgroup *memcg, struct mem_cgroup *parent);
/**
* list_lru_add: add an element to the lru list's tail
@@ -139,7 +145,7 @@ typedef enum lru_status (*list_lru_walk_cb)(struct list_head *item,
* @lru: the lru pointer.
* @nid: the node id to scan from.
* @memcg: the cgroup to scan from.
- * @isolate: callback function that is resposible for deciding what to do with
+ * @isolate: callback function that is responsible for deciding what to do with
* the item currently being scanned
* @cb_arg: opaque type that will be passed to @isolate
* @nr_to_walk: how many items to scan.
@@ -160,6 +166,23 @@ unsigned long list_lru_walk_one(struct list_lru *lru,
int nid, struct mem_cgroup *memcg,
list_lru_walk_cb isolate, void *cb_arg,
unsigned long *nr_to_walk);
+/**
+ * list_lru_walk_one_irq: walk a list_lru, isolating and disposing freeable items.
+ * @lru: the lru pointer.
+ * @nid: the node id to scan from.
+ * @memcg: the cgroup to scan from.
+ * @isolate: callback function that is responsible for deciding what to do with
+ * the item currently being scanned
+ * @cb_arg: opaque type that will be passed to @isolate
+ * @nr_to_walk: how many items to scan.
+ *
+ * Same as @list_lru_walk_one except that the spinlock is acquired with
+ * spin_lock_irq().
+ */
+unsigned long list_lru_walk_one_irq(struct list_lru *lru,
+ int nid, struct mem_cgroup *memcg,
+ list_lru_walk_cb isolate, void *cb_arg,
+ unsigned long *nr_to_walk);
unsigned long list_lru_walk_node(struct list_lru *lru, int nid,
list_lru_walk_cb isolate, void *cb_arg,
unsigned long *nr_to_walk);
@@ -173,6 +196,14 @@ list_lru_shrink_walk(struct list_lru *lru, struct shrink_control *sc,
}
static inline unsigned long
+list_lru_shrink_walk_irq(struct list_lru *lru, struct shrink_control *sc,
+ list_lru_walk_cb isolate, void *cb_arg)
+{
+ return list_lru_walk_one_irq(lru, sc->nid, sc->memcg, isolate, cb_arg,
+ &sc->nr_to_scan);
+}
+
+static inline unsigned long
list_lru_walk(struct list_lru *lru, list_lru_walk_cb isolate,
void *cb_arg, unsigned long nr_to_walk)
{
diff --git a/include/linux/list_nulls.h b/include/linux/list_nulls.h
index 87ff4f58a2f0..fa6e8471bd22 100644
--- a/include/linux/list_nulls.h
+++ b/include/linux/list_nulls.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_LIST_NULLS_H
#define _LINUX_LIST_NULLS_H
@@ -55,11 +56,33 @@ static inline unsigned long get_nulls_value(const struct hlist_nulls_node *ptr)
return ((unsigned long)ptr) >> 1;
}
+/**
+ * hlist_nulls_unhashed - Has node been removed and reinitialized?
+ * @h: Node to be checked
+ *
+ * Not that not all removal functions will leave a node in unhashed state.
+ * For example, hlist_del_init_rcu() leaves the node in unhashed state,
+ * but hlist_nulls_del() does not.
+ */
static inline int hlist_nulls_unhashed(const struct hlist_nulls_node *h)
{
return !h->pprev;
}
+/**
+ * hlist_nulls_unhashed_lockless - Has node been removed and reinitialized?
+ * @h: Node to be checked
+ *
+ * Not that not all removal functions will leave a node in unhashed state.
+ * For example, hlist_del_init_rcu() leaves the node in unhashed state,
+ * but hlist_nulls_del() does not. Unlike hlist_nulls_unhashed(), this
+ * function may be used locklessly.
+ */
+static inline int hlist_nulls_unhashed_lockless(const struct hlist_nulls_node *h)
+{
+ return !READ_ONCE(h->pprev);
+}
+
static inline int hlist_nulls_empty(const struct hlist_nulls_head *h)
{
return is_a_nulls(READ_ONCE(h->first));
@@ -71,10 +94,10 @@ static inline void hlist_nulls_add_head(struct hlist_nulls_node *n,
struct hlist_nulls_node *first = h->first;
n->next = first;
- n->pprev = &h->first;
+ WRITE_ONCE(n->pprev, &h->first);
h->first = n;
if (!is_a_nulls(first))
- first->pprev = &n->next;
+ WRITE_ONCE(first->pprev, &n->next);
}
static inline void __hlist_nulls_del(struct hlist_nulls_node *n)
@@ -84,13 +107,13 @@ static inline void __hlist_nulls_del(struct hlist_nulls_node *n)
WRITE_ONCE(*pprev, next);
if (!is_a_nulls(next))
- next->pprev = pprev;
+ WRITE_ONCE(next->pprev, pprev);
}
static inline void hlist_nulls_del(struct hlist_nulls_node *n)
{
__hlist_nulls_del(n);
- n->pprev = LIST_POISON2;
+ WRITE_ONCE(n->pprev, LIST_POISON2);
}
/**
diff --git a/include/linux/list_sort.h b/include/linux/list_sort.h
index 1a2df2efb771..453105f74e05 100644
--- a/include/linux/list_sort.h
+++ b/include/linux/list_sort.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_LIST_SORT_H
#define _LINUX_LIST_SORT_H
@@ -5,7 +6,9 @@
struct list_head;
-void list_sort(void *priv, struct list_head *head,
- int (*cmp)(void *priv, struct list_head *a,
- struct list_head *b));
+typedef int __attribute__((nonnull(2,3))) (*list_cmp_func_t)(void *,
+ const struct list_head *, const struct list_head *);
+
+__attribute__((nonnull(2,3)))
+void list_sort(void *priv, struct list_head *head, list_cmp_func_t cmp);
#endif
diff --git a/include/linux/litex.h b/include/linux/litex.h
new file mode 100644
index 000000000000..f2edb86d5f44
--- /dev/null
+++ b/include/linux/litex.h
@@ -0,0 +1,83 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Common LiteX header providing
+ * helper functions for accessing CSRs.
+ *
+ * Copyright (C) 2019-2020 Antmicro <www.antmicro.com>
+ */
+
+#ifndef _LINUX_LITEX_H
+#define _LINUX_LITEX_H
+
+#include <linux/io.h>
+
+static inline void _write_litex_subregister(u32 val, void __iomem *addr)
+{
+ writel((u32 __force)cpu_to_le32(val), addr);
+}
+
+static inline u32 _read_litex_subregister(void __iomem *addr)
+{
+ return le32_to_cpu((__le32 __force)readl(addr));
+}
+
+/*
+ * LiteX SoC Generator, depending on the configuration, can split a single
+ * logical CSR (Control&Status Register) into a series of consecutive physical
+ * registers.
+ *
+ * For example, in the configuration with 8-bit CSR Bus, a 32-bit aligned,
+ * 32-bit wide logical CSR will be laid out as four 32-bit physical
+ * subregisters, each one containing one byte of meaningful data.
+ *
+ * For Linux support, upstream LiteX enforces a 32-bit wide CSR bus, which
+ * means that only larger-than-32-bit CSRs will be split across multiple
+ * subregisters (e.g., a 64-bit CSR will be spread across two consecutive
+ * 32-bit subregisters).
+ *
+ * For details see: https://github.com/enjoy-digital/litex/wiki/CSR-Bus
+ */
+
+static inline void litex_write8(void __iomem *reg, u8 val)
+{
+ _write_litex_subregister(val, reg);
+}
+
+static inline void litex_write16(void __iomem *reg, u16 val)
+{
+ _write_litex_subregister(val, reg);
+}
+
+static inline void litex_write32(void __iomem *reg, u32 val)
+{
+ _write_litex_subregister(val, reg);
+}
+
+static inline void litex_write64(void __iomem *reg, u64 val)
+{
+ _write_litex_subregister(val >> 32, reg);
+ _write_litex_subregister(val, reg + 4);
+}
+
+static inline u8 litex_read8(void __iomem *reg)
+{
+ return _read_litex_subregister(reg);
+}
+
+static inline u16 litex_read16(void __iomem *reg)
+{
+ return _read_litex_subregister(reg);
+}
+
+static inline u32 litex_read32(void __iomem *reg)
+{
+ return _read_litex_subregister(reg);
+}
+
+static inline u64 litex_read64(void __iomem *reg)
+{
+ return ((u64)_read_litex_subregister(reg) << 32) |
+ _read_litex_subregister(reg + 4);
+}
+
+#endif /* _LINUX_LITEX_H */
diff --git a/include/linux/livepatch.h b/include/linux/livepatch.h
index 194991ef9347..9b9b38e89563 100644
--- a/include/linux/livepatch.h
+++ b/include/linux/livepatch.h
@@ -1,21 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* livepatch.h - Kernel Live Patching Core
*
* Copyright (C) 2014 Seth Jennings <sjenning@redhat.com>
* Copyright (C) 2014 SUSE
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version 2
- * of the License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#ifndef _LINUX_LIVEPATCH_H_
@@ -24,11 +12,11 @@
#include <linux/module.h>
#include <linux/ftrace.h>
#include <linux/completion.h>
+#include <linux/list.h>
+#include <linux/livepatch_sched.h>
#if IS_ENABLED(CONFIG_LIVEPATCH)
-#include <asm/livepatch.h>
-
/* task patch states */
#define KLP_UNDEFINED -1
#define KLP_UNPATCHED 0
@@ -40,12 +28,13 @@
* @new_func: pointer to the patched function code
* @old_sympos: a hint indicating which symbol position the old function
* can be found (optional)
- * @immediate: patch the func immediately, bypassing safety mechanisms
- * @old_addr: the address of the function being patched
+ * @old_func: pointer to the function being patched
* @kobj: kobject for sysfs resources
+ * @node: list node for klp_object func_list
* @stack_node: list node for klp_ops func_stack list
* @old_size: size of the old function
* @new_size: size of the new function
+ * @nop: temporary patch to use the original code again; dyn. allocated
* @patched: the func has been added to the klp_ops list
* @transition: the func is currently being applied or reverted
*
@@ -76,75 +65,134 @@ struct klp_func {
* in kallsyms for the given object is used.
*/
unsigned long old_sympos;
- bool immediate;
/* internal */
- unsigned long old_addr;
+ void *old_func;
struct kobject kobj;
+ struct list_head node;
struct list_head stack_node;
unsigned long old_size, new_size;
+ bool nop;
bool patched;
bool transition;
};
+struct klp_object;
+
+/**
+ * struct klp_callbacks - pre/post live-(un)patch callback structure
+ * @pre_patch: executed before code patching
+ * @post_patch: executed after code patching
+ * @pre_unpatch: executed before code unpatching
+ * @post_unpatch: executed after code unpatching
+ * @post_unpatch_enabled: flag indicating if post-unpatch callback
+ * should run
+ *
+ * All callbacks are optional. Only the pre-patch callback, if provided,
+ * will be unconditionally executed. If the parent klp_object fails to
+ * patch for any reason, including a non-zero error status returned from
+ * the pre-patch callback, no further callbacks will be executed.
+ */
+struct klp_callbacks {
+ int (*pre_patch)(struct klp_object *obj);
+ void (*post_patch)(struct klp_object *obj);
+ void (*pre_unpatch)(struct klp_object *obj);
+ void (*post_unpatch)(struct klp_object *obj);
+ bool post_unpatch_enabled;
+};
+
/**
* struct klp_object - kernel object structure for live patching
* @name: module name (or NULL for vmlinux)
* @funcs: function entries for functions to be patched in the object
+ * @callbacks: functions to be executed pre/post (un)patching
* @kobj: kobject for sysfs resources
+ * @func_list: dynamic list of the function entries
+ * @node: list node for klp_patch obj_list
* @mod: kernel module associated with the patched object
* (NULL for vmlinux)
+ * @dynamic: temporary object for nop functions; dynamically allocated
* @patched: the object's funcs have been added to the klp_ops list
*/
struct klp_object {
/* external */
const char *name;
struct klp_func *funcs;
+ struct klp_callbacks callbacks;
/* internal */
struct kobject kobj;
+ struct list_head func_list;
+ struct list_head node;
struct module *mod;
+ bool dynamic;
bool patched;
};
/**
+ * struct klp_state - state of the system modified by the livepatch
+ * @id: system state identifier (non-zero)
+ * @version: version of the change
+ * @data: custom data
+ */
+struct klp_state {
+ unsigned long id;
+ unsigned int version;
+ void *data;
+};
+
+/**
* struct klp_patch - patch structure for live patching
* @mod: reference to the live patch module
* @objs: object entries for kernel objects to be patched
- * @immediate: patch all funcs immediately, bypassing safety mechanisms
- * @list: list node for global list of registered patches
+ * @states: system states that can get modified
+ * @replace: replace all actively used patches
+ * @list: list node for global list of actively used patches
* @kobj: kobject for sysfs resources
+ * @obj_list: dynamic list of the object entries
* @enabled: the patch is enabled (but operation may be incomplete)
+ * @forced: was involved in a forced transition
+ * @free_work: patch cleanup from workqueue-context
* @finish: for waiting till it is safe to remove the patch module
*/
struct klp_patch {
/* external */
struct module *mod;
struct klp_object *objs;
- bool immediate;
+ struct klp_state *states;
+ bool replace;
/* internal */
struct list_head list;
struct kobject kobj;
+ struct list_head obj_list;
bool enabled;
+ bool forced;
+ struct work_struct free_work;
struct completion finish;
};
-#define klp_for_each_object(patch, obj) \
+#define klp_for_each_object_static(patch, obj) \
for (obj = patch->objs; obj->funcs || obj->name; obj++)
-#define klp_for_each_func(obj, func) \
+#define klp_for_each_object_safe(patch, obj, tmp_obj) \
+ list_for_each_entry_safe(obj, tmp_obj, &patch->obj_list, node)
+
+#define klp_for_each_object(patch, obj) \
+ list_for_each_entry(obj, &patch->obj_list, node)
+
+#define klp_for_each_func_static(obj, func) \
for (func = obj->funcs; \
func->old_name || func->new_func || func->old_sympos; \
func++)
-int klp_register_patch(struct klp_patch *);
-int klp_unregister_patch(struct klp_patch *);
-int klp_enable_patch(struct klp_patch *);
-int klp_disable_patch(struct klp_patch *);
+#define klp_for_each_func_safe(obj, func, tmp_func) \
+ list_for_each_entry_safe(func, tmp_func, &obj->func_list, node)
-void arch_klp_init_object_loaded(struct klp_patch *patch,
- struct klp_object *obj);
+#define klp_for_each_func(obj, func) \
+ list_for_each_entry(func, &obj->func_list, node)
+
+int klp_enable_patch(struct klp_patch *);
/* Called from the module loader during module coming/going states */
int klp_module_coming(struct module *mod);
@@ -164,6 +212,29 @@ static inline bool klp_have_reliable_stack(void)
IS_ENABLED(CONFIG_HAVE_RELIABLE_STACKTRACE);
}
+typedef int (*klp_shadow_ctor_t)(void *obj,
+ void *shadow_data,
+ void *ctor_data);
+typedef void (*klp_shadow_dtor_t)(void *obj, void *shadow_data);
+
+void *klp_shadow_get(void *obj, unsigned long id);
+void *klp_shadow_alloc(void *obj, unsigned long id,
+ size_t size, gfp_t gfp_flags,
+ klp_shadow_ctor_t ctor, void *ctor_data);
+void *klp_shadow_get_or_alloc(void *obj, unsigned long id,
+ size_t size, gfp_t gfp_flags,
+ klp_shadow_ctor_t ctor, void *ctor_data);
+void klp_shadow_free(void *obj, unsigned long id, klp_shadow_dtor_t dtor);
+void klp_shadow_free_all(unsigned long id, klp_shadow_dtor_t dtor);
+
+struct klp_state *klp_get_state(struct klp_patch *patch, unsigned long id);
+struct klp_state *klp_get_prev_state(unsigned long id);
+
+int klp_apply_section_relocs(struct module *pmod, Elf_Shdr *sechdrs,
+ const char *shstrtab, const char *strtab,
+ unsigned int symindex, unsigned int secindex,
+ const char *objname);
+
#else /* !CONFIG_LIVEPATCH */
static inline int klp_module_coming(struct module *mod) { return 0; }
@@ -172,6 +243,15 @@ static inline bool klp_patch_pending(struct task_struct *task) { return false; }
static inline void klp_update_patch_state(struct task_struct *task) {}
static inline void klp_copy_process(struct task_struct *child) {}
+static inline
+int klp_apply_section_relocs(struct module *pmod, Elf_Shdr *sechdrs,
+ const char *shstrtab, const char *strtab,
+ unsigned int symindex, unsigned int secindex,
+ const char *objname)
+{
+ return 0;
+}
+
#endif /* CONFIG_LIVEPATCH */
#endif /* _LINUX_LIVEPATCH_H_ */
diff --git a/include/linux/livepatch_sched.h b/include/linux/livepatch_sched.h
new file mode 100644
index 000000000000..013794fb5da0
--- /dev/null
+++ b/include/linux/livepatch_sched.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+#ifndef _LINUX_LIVEPATCH_SCHED_H_
+#define _LINUX_LIVEPATCH_SCHED_H_
+
+#include <linux/jump_label.h>
+#include <linux/static_call_types.h>
+
+#ifdef CONFIG_LIVEPATCH
+
+void __klp_sched_try_switch(void);
+
+#if !defined(CONFIG_PREEMPT_DYNAMIC) || !defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL)
+
+DECLARE_STATIC_KEY_FALSE(klp_sched_try_switch_key);
+
+static __always_inline void klp_sched_try_switch(void)
+{
+ if (static_branch_unlikely(&klp_sched_try_switch_key))
+ __klp_sched_try_switch();
+}
+
+#endif /* !CONFIG_PREEMPT_DYNAMIC || !CONFIG_HAVE_PREEMPT_DYNAMIC_CALL */
+
+#else /* !CONFIG_LIVEPATCH */
+static inline void klp_sched_try_switch(void) {}
+static inline void __klp_sched_try_switch(void) {}
+#endif /* CONFIG_LIVEPATCH */
+
+#endif /* _LINUX_LIVEPATCH_SCHED_H_ */
diff --git a/include/linux/llist.h b/include/linux/llist.h
index d11738110a7a..85bda2d02d65 100644
--- a/include/linux/llist.h
+++ b/include/linux/llist.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
#ifndef LLIST_H
#define LLIST_H
/*
@@ -45,23 +46,12 @@
*
* Copyright 2010,2011 Intel Corp.
* Author: Huang Ying <ying.huang@intel.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation;
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/atomic.h>
-#include <linux/kernel.h>
+#include <linux/container_of.h>
+#include <linux/stddef.h>
+#include <linux/types.h>
struct llist_head {
struct llist_node *first;
@@ -93,6 +83,23 @@ static inline void init_llist_head(struct llist_head *list)
container_of(ptr, type, member)
/**
+ * member_address_is_nonnull - check whether the member address is not NULL
+ * @ptr: the object pointer (struct type * that contains the llist_node)
+ * @member: the name of the llist_node within the struct.
+ *
+ * This macro is conceptually the same as
+ * &ptr->member != NULL
+ * but it works around the fact that compilers can decide that taking a member
+ * address is never a NULL pointer.
+ *
+ * Real objects that start at a high address and have a member at NULL are
+ * unlikely to exist, but such pointers may be returned e.g. by the
+ * container_of() macro.
+ */
+#define member_address_is_nonnull(ptr, member) \
+ ((uintptr_t)(ptr) + offsetof(typeof(*(ptr)), member) != 0)
+
+/**
* llist_for_each - iterate over some deleted entries of a lock-less list
* @pos: the &struct llist_node to use as a loop cursor
* @node: the first entry of deleted list entries
@@ -145,7 +152,7 @@ static inline void init_llist_head(struct llist_head *list)
*/
#define llist_for_each_entry(pos, node, member) \
for ((pos) = llist_entry((node), typeof(*(pos)), member); \
- &(pos)->member != NULL; \
+ member_address_is_nonnull(pos, member); \
(pos) = llist_entry((pos)->member.next, typeof(*(pos)), member))
/**
@@ -167,7 +174,7 @@ static inline void init_llist_head(struct llist_head *list)
*/
#define llist_for_each_entry_safe(pos, n, node, member) \
for (pos = llist_entry((node), typeof(*pos), member); \
- &pos->member != NULL && \
+ member_address_is_nonnull(pos, member) && \
(n = llist_entry(pos->member.next, typeof(*n), member), true); \
pos = n)
@@ -181,7 +188,7 @@ static inline void init_llist_head(struct llist_head *list)
*/
static inline bool llist_empty(const struct llist_head *head)
{
- return ACCESS_ONCE(head->first) == NULL;
+ return READ_ONCE(head->first) == NULL;
}
static inline struct llist_node *llist_next(struct llist_node *node)
@@ -192,6 +199,16 @@ static inline struct llist_node *llist_next(struct llist_node *node)
extern bool llist_add_batch(struct llist_node *new_first,
struct llist_node *new_last,
struct llist_head *head);
+
+static inline bool __llist_add_batch(struct llist_node *new_first,
+ struct llist_node *new_last,
+ struct llist_head *head)
+{
+ new_last->next = head->first;
+ head->first = new_first;
+ return new_last->next == NULL;
+}
+
/**
* llist_add - add a new entry
* @new: new entry to be added
@@ -204,6 +221,11 @@ static inline bool llist_add(struct llist_node *new, struct llist_head *head)
return llist_add_batch(new, new, head);
}
+static inline bool __llist_add(struct llist_node *new, struct llist_head *head)
+{
+ return __llist_add_batch(new, new, head);
+}
+
/**
* llist_del_all - delete all entries from lock-less list
* @head: the head of lock-less list to delete all entries
@@ -217,6 +239,14 @@ static inline struct llist_node *llist_del_all(struct llist_head *head)
return xchg(&head->first, NULL);
}
+static inline struct llist_node *__llist_del_all(struct llist_head *head)
+{
+ struct llist_node *first = head->first;
+
+ head->first = NULL;
+ return first;
+}
+
extern struct llist_node *llist_del_first(struct llist_head *head);
struct llist_node *llist_reverse_order(struct llist_node *head);
diff --git a/include/linux/llist_api.h b/include/linux/llist_api.h
new file mode 100644
index 000000000000..625bec0393a1
--- /dev/null
+++ b/include/linux/llist_api.h
@@ -0,0 +1 @@
+#include <linux/llist.h>
diff --git a/include/linux/local_lock.h b/include/linux/local_lock.h
new file mode 100644
index 000000000000..e55010fa7329
--- /dev/null
+++ b/include/linux/local_lock.h
@@ -0,0 +1,54 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_LOCAL_LOCK_H
+#define _LINUX_LOCAL_LOCK_H
+
+#include <linux/local_lock_internal.h>
+
+/**
+ * local_lock_init - Runtime initialize a lock instance
+ */
+#define local_lock_init(lock) __local_lock_init(lock)
+
+/**
+ * local_lock - Acquire a per CPU local lock
+ * @lock: The lock variable
+ */
+#define local_lock(lock) __local_lock(lock)
+
+/**
+ * local_lock_irq - Acquire a per CPU local lock and disable interrupts
+ * @lock: The lock variable
+ */
+#define local_lock_irq(lock) __local_lock_irq(lock)
+
+/**
+ * local_lock_irqsave - Acquire a per CPU local lock, save and disable
+ * interrupts
+ * @lock: The lock variable
+ * @flags: Storage for interrupt flags
+ */
+#define local_lock_irqsave(lock, flags) \
+ __local_lock_irqsave(lock, flags)
+
+/**
+ * local_unlock - Release a per CPU local lock
+ * @lock: The lock variable
+ */
+#define local_unlock(lock) __local_unlock(lock)
+
+/**
+ * local_unlock_irq - Release a per CPU local lock and enable interrupts
+ * @lock: The lock variable
+ */
+#define local_unlock_irq(lock) __local_unlock_irq(lock)
+
+/**
+ * local_unlock_irqrestore - Release a per CPU local lock and restore
+ * interrupt flags
+ * @lock: The lock variable
+ * @flags: Interrupt flags to restore
+ */
+#define local_unlock_irqrestore(lock, flags) \
+ __local_unlock_irqrestore(lock, flags)
+
+#endif
diff --git a/include/linux/local_lock_internal.h b/include/linux/local_lock_internal.h
new file mode 100644
index 000000000000..975e33b793a7
--- /dev/null
+++ b/include/linux/local_lock_internal.h
@@ -0,0 +1,141 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_LOCAL_LOCK_H
+# error "Do not include directly, include linux/local_lock.h"
+#endif
+
+#include <linux/percpu-defs.h>
+#include <linux/lockdep.h>
+
+#ifndef CONFIG_PREEMPT_RT
+
+typedef struct {
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+ struct lockdep_map dep_map;
+ struct task_struct *owner;
+#endif
+} local_lock_t;
+
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+# define LOCAL_LOCK_DEBUG_INIT(lockname) \
+ .dep_map = { \
+ .name = #lockname, \
+ .wait_type_inner = LD_WAIT_CONFIG, \
+ .lock_type = LD_LOCK_PERCPU, \
+ }, \
+ .owner = NULL,
+
+static inline void local_lock_acquire(local_lock_t *l)
+{
+ lock_map_acquire(&l->dep_map);
+ DEBUG_LOCKS_WARN_ON(l->owner);
+ l->owner = current;
+}
+
+static inline void local_lock_release(local_lock_t *l)
+{
+ DEBUG_LOCKS_WARN_ON(l->owner != current);
+ l->owner = NULL;
+ lock_map_release(&l->dep_map);
+}
+
+static inline void local_lock_debug_init(local_lock_t *l)
+{
+ l->owner = NULL;
+}
+#else /* CONFIG_DEBUG_LOCK_ALLOC */
+# define LOCAL_LOCK_DEBUG_INIT(lockname)
+static inline void local_lock_acquire(local_lock_t *l) { }
+static inline void local_lock_release(local_lock_t *l) { }
+static inline void local_lock_debug_init(local_lock_t *l) { }
+#endif /* !CONFIG_DEBUG_LOCK_ALLOC */
+
+#define INIT_LOCAL_LOCK(lockname) { LOCAL_LOCK_DEBUG_INIT(lockname) }
+
+#define __local_lock_init(lock) \
+do { \
+ static struct lock_class_key __key; \
+ \
+ debug_check_no_locks_freed((void *)lock, sizeof(*lock));\
+ lockdep_init_map_type(&(lock)->dep_map, #lock, &__key, \
+ 0, LD_WAIT_CONFIG, LD_WAIT_INV, \
+ LD_LOCK_PERCPU); \
+ local_lock_debug_init(lock); \
+} while (0)
+
+#define __local_lock(lock) \
+ do { \
+ preempt_disable(); \
+ local_lock_acquire(this_cpu_ptr(lock)); \
+ } while (0)
+
+#define __local_lock_irq(lock) \
+ do { \
+ local_irq_disable(); \
+ local_lock_acquire(this_cpu_ptr(lock)); \
+ } while (0)
+
+#define __local_lock_irqsave(lock, flags) \
+ do { \
+ local_irq_save(flags); \
+ local_lock_acquire(this_cpu_ptr(lock)); \
+ } while (0)
+
+#define __local_unlock(lock) \
+ do { \
+ local_lock_release(this_cpu_ptr(lock)); \
+ preempt_enable(); \
+ } while (0)
+
+#define __local_unlock_irq(lock) \
+ do { \
+ local_lock_release(this_cpu_ptr(lock)); \
+ local_irq_enable(); \
+ } while (0)
+
+#define __local_unlock_irqrestore(lock, flags) \
+ do { \
+ local_lock_release(this_cpu_ptr(lock)); \
+ local_irq_restore(flags); \
+ } while (0)
+
+#else /* !CONFIG_PREEMPT_RT */
+
+/*
+ * On PREEMPT_RT local_lock maps to a per CPU spinlock, which protects the
+ * critical section while staying preemptible.
+ */
+typedef spinlock_t local_lock_t;
+
+#define INIT_LOCAL_LOCK(lockname) __LOCAL_SPIN_LOCK_UNLOCKED((lockname))
+
+#define __local_lock_init(l) \
+ do { \
+ local_spin_lock_init((l)); \
+ } while (0)
+
+#define __local_lock(__lock) \
+ do { \
+ migrate_disable(); \
+ spin_lock(this_cpu_ptr((__lock))); \
+ } while (0)
+
+#define __local_lock_irq(lock) __local_lock(lock)
+
+#define __local_lock_irqsave(lock, flags) \
+ do { \
+ typecheck(unsigned long, flags); \
+ flags = 0; \
+ __local_lock(lock); \
+ } while (0)
+
+#define __local_unlock(__lock) \
+ do { \
+ spin_unlock(this_cpu_ptr((__lock))); \
+ migrate_enable(); \
+ } while (0)
+
+#define __local_unlock_irq(lock) __local_unlock(lock)
+
+#define __local_unlock_irqrestore(lock, flags) __local_unlock(lock)
+
+#endif /* CONFIG_PREEMPT_RT */
diff --git a/include/linux/lockd/bind.h b/include/linux/lockd/bind.h
index 05728396a1a1..3bc9f7410e21 100644
--- a/include/linux/lockd/bind.h
+++ b/include/linux/lockd/bind.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* linux/include/linux/lockd/bind.h
*
@@ -26,7 +27,8 @@ struct rpc_task;
struct nlmsvc_binding {
__be32 (*fopen)(struct svc_rqst *,
struct nfs_fh *,
- struct file **);
+ struct file **,
+ int mode);
void (*fclose)(struct file *);
};
@@ -45,6 +47,7 @@ struct nlmclnt_initdata {
int noresvport;
struct net *net;
const struct nlmclnt_operations *nlmclnt_ops;
+ const struct cred *cred;
};
/*
@@ -74,7 +77,7 @@ struct nlmclnt_operations {
};
extern int nlmclnt_proc(struct nlm_host *host, int cmd, struct file_lock *fl, void *data);
-extern int lockd_up(struct net *net);
+extern int lockd_up(struct net *net, const struct cred *cred);
extern void lockd_down(struct net *net);
#endif /* LINUX_LOCKD_BIND_H */
diff --git a/include/linux/lockd/debug.h b/include/linux/lockd/debug.h
index 0ca8109934e4..eede2ab5246f 100644
--- a/include/linux/lockd/debug.h
+++ b/include/linux/lockd/debug.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* linux/include/linux/lockd/debug.h
*
@@ -9,8 +10,6 @@
#ifndef LINUX_LOCKD_DEBUG_H
#define LINUX_LOCKD_DEBUG_H
-#ifdef __KERNEL__
-
#include <linux/sunrpc/debug.h>
/*
@@ -24,8 +23,6 @@
# define ifdebug(flag) if (0)
#endif
-#endif /* __KERNEL__ */
-
/*
* Debug flags
*/
diff --git a/include/linux/lockd/lockd.h b/include/linux/lockd/lockd.h
index 3eca67728366..f42594a9efe0 100644
--- a/include/linux/lockd/lockd.h
+++ b/include/linux/lockd/lockd.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* linux/include/linux/lockd/lockd.h
*
@@ -9,13 +10,14 @@
#ifndef LINUX_LOCKD_LOCKD_H
#define LINUX_LOCKD_LOCKD_H
-#ifdef __KERNEL__
+/* XXX: a lot of this should really be under fs/lockd. */
#include <linux/in.h>
#include <linux/in6.h>
#include <net/ipv6.h>
#include <linux/fs.h>
#include <linux/kref.h>
+#include <linux/refcount.h>
#include <linux/utsname.h>
#include <linux/lockd/bind.h>
#include <linux/lockd/xdr.h>
@@ -57,7 +59,7 @@ struct nlm_host {
u32 h_state; /* pseudo-state counter */
u32 h_nsmstate; /* true remote NSM state */
u32 h_pidcount; /* Pseudopids */
- atomic_t h_count; /* reference count */
+ refcount_t h_count; /* reference count */
struct mutex h_mutex; /* mutex for pmap binding */
unsigned long h_nextrebind; /* next portmap call */
unsigned long h_expires; /* eligible for GC */
@@ -68,6 +70,7 @@ struct nlm_host {
struct nsm_handle *h_nsmhandle; /* NSM status handle */
char *h_addrbuf; /* address eyecatcher */
struct net *net; /* host net */
+ const struct cred *h_cred;
char nodename[UNX_MAXNODENAME + 1];
const struct nlmclnt_operations *h_nlmclnt_ops; /* Callback ops for NLM users */
};
@@ -82,7 +85,7 @@ struct nlm_host {
struct nsm_handle {
struct list_head sm_link;
- atomic_t sm_count;
+ refcount_t sm_count;
char *sm_mon_name;
char *sm_name;
struct sockaddr_storage sm_addr;
@@ -96,21 +99,11 @@ struct nsm_handle {
/*
* Rigorous type checking on sockaddr type conversions
*/
-static inline struct sockaddr_in *nlm_addr_in(const struct nlm_host *host)
-{
- return (struct sockaddr_in *)&host->h_addr;
-}
-
static inline struct sockaddr *nlm_addr(const struct nlm_host *host)
{
return (struct sockaddr *)&host->h_addr;
}
-static inline struct sockaddr_in *nlm_srcaddr_in(const struct nlm_host *host)
-{
- return (struct sockaddr_in *)&host->h_srcaddr;
-}
-
static inline struct sockaddr *nlm_srcaddr(const struct nlm_host *host)
{
return (struct sockaddr *)&host->h_srcaddr;
@@ -121,21 +114,30 @@ static inline struct sockaddr *nlm_srcaddr(const struct nlm_host *host)
*/
struct nlm_lockowner {
struct list_head list;
- atomic_t count;
+ refcount_t count;
struct nlm_host *host;
fl_owner_t owner;
uint32_t pid;
};
-struct nlm_wait;
+/*
+ * This is the representation of a blocked client lock.
+ */
+struct nlm_wait {
+ struct list_head b_list; /* linked list */
+ wait_queue_head_t b_wait; /* where to wait on */
+ struct nlm_host *b_host;
+ struct file_lock *b_lock; /* local file lock */
+ __be32 b_status; /* grant callback status */
+};
/*
* Memory chunk for NLM client RPC request.
*/
#define NLMCLNT_OHSIZE ((__NEW_UTS_LEN) + 10u)
struct nlm_rqst {
- atomic_t a_count;
+ refcount_t a_count;
unsigned int a_flags; /* initial RPC task flags */
struct nlm_host * a_host; /* host handle */
struct nlm_args a_args; /* arguments */
@@ -153,7 +155,8 @@ struct nlm_rqst {
struct nlm_file {
struct hlist_node f_list; /* linked list */
struct nfs_fh f_handle; /* NFS file handle */
- struct file * f_file; /* VFS file pointer */
+ struct file * f_file[2]; /* VFS file pointers,
+ indexed by O_ flags */
struct nlm_share * f_shares; /* DOS shares */
struct list_head f_blocks; /* blocked locks */
unsigned int f_locks; /* guesstimate # of locks */
@@ -192,9 +195,9 @@ struct nlm_block {
* Global variables
*/
extern const struct rpc_program nlm_program;
-extern const struct svc_procedure nlmsvc_procedures[];
+extern const struct svc_procedure nlmsvc_procedures[24];
#ifdef CONFIG_LOCKD_V4
-extern const struct svc_procedure nlmsvc_procedures4[];
+extern const struct svc_procedure nlmsvc_procedures4[24];
#endif
extern int nlmsvc_grace_period;
extern unsigned long nlmsvc_timeout;
@@ -208,9 +211,11 @@ struct nlm_rqst * nlm_alloc_call(struct nlm_host *host);
int nlm_async_call(struct nlm_rqst *, u32, const struct rpc_call_ops *);
int nlm_async_reply(struct nlm_rqst *, u32, const struct rpc_call_ops *);
void nlmclnt_release_call(struct nlm_rqst *);
-struct nlm_wait * nlmclnt_prepare_block(struct nlm_host *host, struct file_lock *fl);
-void nlmclnt_finish_block(struct nlm_wait *block);
-int nlmclnt_block(struct nlm_wait *block, struct nlm_rqst *req, long timeout);
+void nlmclnt_prepare_block(struct nlm_wait *block, struct nlm_host *host,
+ struct file_lock *fl);
+void nlmclnt_queue_block(struct nlm_wait *block);
+__be32 nlmclnt_dequeue_block(struct nlm_wait *block);
+int nlmclnt_wait(struct nlm_wait *block, struct nlm_rqst *req, long timeout);
__be32 nlmclnt_grant(const struct sockaddr *addr,
const struct nlm_lock *lock);
void nlmclnt_recovery(struct nlm_host *);
@@ -227,7 +232,8 @@ struct nlm_host *nlmclnt_lookup_host(const struct sockaddr *sap,
const u32 version,
const char *hostname,
int noresvport,
- struct net *net);
+ struct net *net,
+ const struct cred *cred);
void nlmclnt_release_host(struct nlm_host *);
struct nlm_host *nlmsvc_lookup_host(const struct svc_rqst *rqstp,
const char *hostname,
@@ -265,6 +271,7 @@ typedef int (*nlm_host_match_fn_t)(void *cur, struct nlm_host *ref);
/*
* Server-side lock handling
*/
+int lock_to_openmode(struct file_lock *);
__be32 nlmsvc_lock(struct svc_rqst *, struct nlm_file *,
struct nlm_host *, struct nlm_lock *, int,
struct nlm_cookie *, int);
@@ -278,13 +285,16 @@ void nlmsvc_traverse_blocks(struct nlm_host *, struct nlm_file *,
nlm_host_match_fn_t match);
void nlmsvc_grant_reply(struct nlm_cookie *, __be32);
void nlmsvc_release_call(struct nlm_rqst *);
+void nlmsvc_locks_init_private(struct file_lock *, struct nlm_host *, pid_t);
/*
* File handling for the server personality
*/
__be32 nlm_lookup_file(struct svc_rqst *, struct nlm_file **,
- struct nfs_fh *);
+ struct nlm_lock *);
void nlm_release_file(struct nlm_file *);
+void nlmsvc_put_lockowner(struct nlm_lockowner *);
+void nlmsvc_release_lockowner(struct nlm_lock *);
void nlmsvc_mark_resources(struct net *);
void nlmsvc_free_host_resources(struct nlm_host *);
void nlmsvc_invalidate_all(void);
@@ -295,9 +305,15 @@ void nlmsvc_invalidate_all(void);
int nlmsvc_unlock_all_by_sb(struct super_block *sb);
int nlmsvc_unlock_all_by_ip(struct sockaddr *server_addr);
+static inline struct file *nlmsvc_file_file(struct nlm_file *file)
+{
+ return file->f_file[O_RDONLY] ?
+ file->f_file[O_RDONLY] : file->f_file[O_WRONLY];
+}
+
static inline struct inode *nlmsvc_file_inode(struct nlm_file *file)
{
- return file_inode(file->f_file);
+ return file_inode(nlmsvc_file_file(file));
}
static inline int __nlm_privileged_request4(const struct sockaddr *sap)
@@ -367,6 +383,4 @@ static inline int nlm_compare_locks(const struct file_lock *fl1,
extern const struct lock_manager_operations nlmsvc_lock_operations;
-#endif /* __KERNEL__ */
-
#endif /* LINUX_LOCKD_LOCKD_H */
diff --git a/include/linux/lockd/nlm.h b/include/linux/lockd/nlm.h
index d9d46e442538..6e343ef760dc 100644
--- a/include/linux/lockd/nlm.h
+++ b/include/linux/lockd/nlm.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* linux/include/linux/lockd/nlm.h
*
diff --git a/include/linux/lockd/share.h b/include/linux/lockd/share.h
index 630c5bf69b07..1f18a9faf645 100644
--- a/include/linux/lockd/share.h
+++ b/include/linux/lockd/share.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* linux/include/linux/lockd/share.h
*
diff --git a/include/linux/lockd/xdr.h b/include/linux/lockd/xdr.h
index 7acbecc21a40..b60fbcd8cdfa 100644
--- a/include/linux/lockd/xdr.h
+++ b/include/linux/lockd/xdr.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* linux/include/linux/lockd/xdr.h
*
@@ -10,6 +11,7 @@
#define LOCKD_XDR_H
#include <linux/fs.h>
+#include <linux/filelock.h>
#include <linux/nfs.h>
#include <linux/sunrpc/xdr.h>
@@ -40,6 +42,8 @@ struct nlm_lock {
struct nfs_fh fh;
struct xdr_netobj oh;
u32 svid;
+ u64 lock_start;
+ u64 lock_len;
struct file_lock fl;
};
@@ -95,24 +99,19 @@ struct nlm_reboot {
*/
#define NLMSVC_XDRSIZE sizeof(struct nlm_args)
-int nlmsvc_decode_testargs(struct svc_rqst *, __be32 *);
-int nlmsvc_encode_testres(struct svc_rqst *, __be32 *);
-int nlmsvc_decode_lockargs(struct svc_rqst *, __be32 *);
-int nlmsvc_decode_cancargs(struct svc_rqst *, __be32 *);
-int nlmsvc_decode_unlockargs(struct svc_rqst *, __be32 *);
-int nlmsvc_encode_res(struct svc_rqst *, __be32 *);
-int nlmsvc_decode_res(struct svc_rqst *, __be32 *);
-int nlmsvc_encode_void(struct svc_rqst *, __be32 *);
-int nlmsvc_decode_void(struct svc_rqst *, __be32 *);
-int nlmsvc_decode_shareargs(struct svc_rqst *, __be32 *);
-int nlmsvc_encode_shareres(struct svc_rqst *, __be32 *);
-int nlmsvc_decode_notify(struct svc_rqst *, __be32 *);
-int nlmsvc_decode_reboot(struct svc_rqst *, __be32 *);
-/*
-int nlmclt_encode_testargs(struct rpc_rqst *, u32 *, struct nlm_args *);
-int nlmclt_encode_lockargs(struct rpc_rqst *, u32 *, struct nlm_args *);
-int nlmclt_encode_cancargs(struct rpc_rqst *, u32 *, struct nlm_args *);
-int nlmclt_encode_unlockargs(struct rpc_rqst *, u32 *, struct nlm_args *);
- */
+bool nlmsvc_decode_void(struct svc_rqst *rqstp, struct xdr_stream *xdr);
+bool nlmsvc_decode_testargs(struct svc_rqst *rqstp, struct xdr_stream *xdr);
+bool nlmsvc_decode_lockargs(struct svc_rqst *rqstp, struct xdr_stream *xdr);
+bool nlmsvc_decode_cancargs(struct svc_rqst *rqstp, struct xdr_stream *xdr);
+bool nlmsvc_decode_unlockargs(struct svc_rqst *rqstp, struct xdr_stream *xdr);
+bool nlmsvc_decode_res(struct svc_rqst *rqstp, struct xdr_stream *xdr);
+bool nlmsvc_decode_reboot(struct svc_rqst *rqstp, struct xdr_stream *xdr);
+bool nlmsvc_decode_shareargs(struct svc_rqst *rqstp, struct xdr_stream *xdr);
+bool nlmsvc_decode_notify(struct svc_rqst *rqstp, struct xdr_stream *xdr);
+
+bool nlmsvc_encode_testres(struct svc_rqst *rqstp, struct xdr_stream *xdr);
+bool nlmsvc_encode_res(struct svc_rqst *rqstp, struct xdr_stream *xdr);
+bool nlmsvc_encode_void(struct svc_rqst *rqstp, struct xdr_stream *xdr);
+bool nlmsvc_encode_shareres(struct svc_rqst *rqstp, struct xdr_stream *xdr);
#endif /* LOCKD_XDR_H */
diff --git a/include/linux/lockd/xdr4.h b/include/linux/lockd/xdr4.h
index bf1645609225..72831e35dca3 100644
--- a/include/linux/lockd/xdr4.h
+++ b/include/linux/lockd/xdr4.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* linux/include/linux/lockd/xdr4.h
*
@@ -21,27 +22,22 @@
#define nlm4_fbig cpu_to_be32(NLM_FBIG)
#define nlm4_failed cpu_to_be32(NLM_FAILED)
+void nlm4svc_set_file_lock_range(struct file_lock *fl, u64 off, u64 len);
+bool nlm4svc_decode_void(struct svc_rqst *rqstp, struct xdr_stream *xdr);
+bool nlm4svc_decode_testargs(struct svc_rqst *rqstp, struct xdr_stream *xdr);
+bool nlm4svc_decode_lockargs(struct svc_rqst *rqstp, struct xdr_stream *xdr);
+bool nlm4svc_decode_cancargs(struct svc_rqst *rqstp, struct xdr_stream *xdr);
+bool nlm4svc_decode_unlockargs(struct svc_rqst *rqstp, struct xdr_stream *xdr);
+bool nlm4svc_decode_res(struct svc_rqst *rqstp, struct xdr_stream *xdr);
+bool nlm4svc_decode_reboot(struct svc_rqst *rqstp, struct xdr_stream *xdr);
+bool nlm4svc_decode_shareargs(struct svc_rqst *rqstp, struct xdr_stream *xdr);
+bool nlm4svc_decode_notify(struct svc_rqst *rqstp, struct xdr_stream *xdr);
+bool nlm4svc_encode_testres(struct svc_rqst *rqstp, struct xdr_stream *xdr);
+bool nlm4svc_encode_res(struct svc_rqst *rqstp, struct xdr_stream *xdr);
+bool nlm4svc_encode_void(struct svc_rqst *rqstp, struct xdr_stream *xdr);
+bool nlm4svc_encode_shareres(struct svc_rqst *rqstp, struct xdr_stream *xdr);
-int nlm4svc_decode_testargs(struct svc_rqst *, __be32 *);
-int nlm4svc_encode_testres(struct svc_rqst *, __be32 *);
-int nlm4svc_decode_lockargs(struct svc_rqst *, __be32 *);
-int nlm4svc_decode_cancargs(struct svc_rqst *, __be32 *);
-int nlm4svc_decode_unlockargs(struct svc_rqst *, __be32 *);
-int nlm4svc_encode_res(struct svc_rqst *, __be32 *);
-int nlm4svc_decode_res(struct svc_rqst *, __be32 *);
-int nlm4svc_encode_void(struct svc_rqst *, __be32 *);
-int nlm4svc_decode_void(struct svc_rqst *, __be32 *);
-int nlm4svc_decode_shareargs(struct svc_rqst *, __be32 *);
-int nlm4svc_encode_shareres(struct svc_rqst *, __be32 *);
-int nlm4svc_decode_notify(struct svc_rqst *, __be32 *);
-int nlm4svc_decode_reboot(struct svc_rqst *, __be32 *);
-/*
-int nlmclt_encode_testargs(struct rpc_rqst *, u32 *, struct nlm_args *);
-int nlmclt_encode_lockargs(struct rpc_rqst *, u32 *, struct nlm_args *);
-int nlmclt_encode_cancargs(struct rpc_rqst *, u32 *, struct nlm_args *);
-int nlmclt_encode_unlockargs(struct rpc_rqst *, u32 *, struct nlm_args *);
- */
extern const struct rpc_version nlm_version4;
#endif /* LOCKD_XDR4_H */
diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h
index fffe49f188e6..b32256e9e944 100644
--- a/include/linux/lockdep.h
+++ b/include/linux/lockdep.h
@@ -1,22 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Runtime locking correctness validator
*
* Copyright (C) 2006,2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
* Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra
*
- * see Documentation/locking/lockdep-design.txt for more details.
+ * see Documentation/locking/lockdep-design.rst for more details.
*/
#ifndef __LINUX_LOCKDEP_H
#define __LINUX_LOCKDEP_H
-struct task_struct;
-struct lockdep_map;
-
-/* for sysctl */
-extern int prove_locking;
-extern int lock_stat;
+#include <linux/lockdep_types.h>
+#include <linux/smp.h>
+#include <asm/percpu.h>
-#define MAX_LOCKDEP_SUBCLASSES 8UL
+struct task_struct;
#ifdef CONFIG_LOCKDEP
@@ -25,138 +23,6 @@ extern int lock_stat;
#include <linux/debug_locks.h>
#include <linux/stacktrace.h>
-/*
- * We'd rather not expose kernel/lockdep_states.h this wide, but we do need
- * the total number of states... :-(
- */
-#define XXX_LOCK_USAGE_STATES (1+3*4)
-
-/*
- * NR_LOCKDEP_CACHING_CLASSES ... Number of classes
- * cached in the instance of lockdep_map
- *
- * Currently main class (subclass == 0) and signle depth subclass
- * are cached in lockdep_map. This optimization is mainly targeting
- * on rq->lock. double_rq_lock() acquires this highly competitive with
- * single depth.
- */
-#define NR_LOCKDEP_CACHING_CLASSES 2
-
-/*
- * Lock-classes are keyed via unique addresses, by embedding the
- * lockclass-key into the kernel (or module) .data section. (For
- * static locks we use the lock address itself as the key.)
- */
-struct lockdep_subclass_key {
- char __one_byte;
-} __attribute__ ((__packed__));
-
-struct lock_class_key {
- struct lockdep_subclass_key subkeys[MAX_LOCKDEP_SUBCLASSES];
-};
-
-extern struct lock_class_key __lockdep_no_validate__;
-
-#define LOCKSTAT_POINTS 4
-
-/*
- * The lock-class itself:
- */
-struct lock_class {
- /*
- * class-hash:
- */
- struct hlist_node hash_entry;
-
- /*
- * global list of all lock-classes:
- */
- struct list_head lock_entry;
-
- struct lockdep_subclass_key *key;
- unsigned int subclass;
- unsigned int dep_gen_id;
-
- /*
- * IRQ/softirq usage tracking bits:
- */
- unsigned long usage_mask;
- struct stack_trace usage_traces[XXX_LOCK_USAGE_STATES];
-
- /*
- * These fields represent a directed graph of lock dependencies,
- * to every node we attach a list of "forward" and a list of
- * "backward" graph nodes.
- */
- struct list_head locks_after, locks_before;
-
- /*
- * Generation counter, when doing certain classes of graph walking,
- * to ensure that we check one node only once:
- */
- unsigned int version;
-
- /*
- * Statistics counter:
- */
- unsigned long ops;
-
- const char *name;
- int name_version;
-
-#ifdef CONFIG_LOCK_STAT
- unsigned long contention_point[LOCKSTAT_POINTS];
- unsigned long contending_point[LOCKSTAT_POINTS];
-#endif
-};
-
-#ifdef CONFIG_LOCK_STAT
-struct lock_time {
- s64 min;
- s64 max;
- s64 total;
- unsigned long nr;
-};
-
-enum bounce_type {
- bounce_acquired_write,
- bounce_acquired_read,
- bounce_contended_write,
- bounce_contended_read,
- nr_bounce_types,
-
- bounce_acquired = bounce_acquired_write,
- bounce_contended = bounce_contended_write,
-};
-
-struct lock_class_stats {
- unsigned long contention_point[LOCKSTAT_POINTS];
- unsigned long contending_point[LOCKSTAT_POINTS];
- struct lock_time read_waittime;
- struct lock_time write_waittime;
- struct lock_time read_holdtime;
- struct lock_time write_holdtime;
- unsigned long bounces[nr_bounce_types];
-};
-
-struct lock_class_stats lock_stats(struct lock_class *class);
-void clear_lock_stats(struct lock_class *class);
-#endif
-
-/*
- * Map the lock object (the lock instance) to the lock-class object.
- * This is embedded into specific lock instances:
- */
-struct lockdep_map {
- struct lock_class_key *key;
- struct lock_class *class_cache[NR_LOCKDEP_CACHING_CLASSES];
- const char *name;
-#ifdef CONFIG_LOCK_STAT
- int cpu;
- unsigned long ip;
-#endif
-};
-
static inline void lockdep_copy_map(struct lockdep_map *to,
struct lockdep_map *from)
{
@@ -182,8 +48,13 @@ static inline void lockdep_copy_map(struct lockdep_map *to,
struct lock_list {
struct list_head entry;
struct lock_class *class;
- struct stack_trace trace;
- int distance;
+ struct lock_class *links_to;
+ const struct lock_trace *trace;
+ u16 distance;
+ /* bitmap of different dependencies from head to this */
+ u8 dep;
+ /* used by BFS to record whether "prev -> this" only has -(*R)-> */
+ u8 only_xr;
/*
* The parent field is used to implement breadth-first search, and the
@@ -192,11 +63,17 @@ struct lock_list {
struct lock_list *parent;
};
-/*
- * We record lock dependency chains, so that we can cache them:
+/**
+ * struct lock_chain - lock dependency chain record
+ *
+ * @irq_context: the same as irq_context in held_lock below
+ * @depth: the number of held locks in this chain
+ * @base: the index in chain_hlocks for this chain
+ * @entry: the collided lock chains in lock_chain hash list
+ * @chain_key: the hash key of this lock_chain
*/
struct lock_chain {
- /* see BUILD_BUG_ON()s in lookup_chain_cache() */
+ /* see BUILD_BUG_ON()s in add_chain_cache() */
unsigned int irq_context : 2,
depth : 6,
base : 24;
@@ -206,12 +83,8 @@ struct lock_chain {
};
#define MAX_LOCKDEP_KEYS_BITS 13
-/*
- * Subtract one because we offset hlock->class_idx by 1 in order
- * to make 0 mean no class. This avoids overflowing the class_idx
- * bitfield and hitting the BUG in hlock_class().
- */
-#define MAX_LOCKDEP_KEYS ((1UL << MAX_LOCKDEP_KEYS_BITS) - 1)
+#define MAX_LOCKDEP_KEYS (1UL << MAX_LOCKDEP_KEYS_BITS)
+#define INITIAL_CHAIN_KEY -1
struct held_lock {
/*
@@ -236,6 +109,11 @@ struct held_lock {
u64 waittime_stamp;
u64 holdtime_stamp;
#endif
+ /*
+ * class_idx is zero-indexed; it points to the element in
+ * lock_classes this held lock instance belongs to. class_idx is in
+ * the range from 0 to (MAX_LOCKDEP_KEYS-1) inclusive.
+ */
unsigned int class_idx:MAX_LOCKDEP_KEYS_BITS;
/*
* The lock-stack is unified in that the lock chains of interrupt
@@ -256,21 +134,47 @@ struct held_lock {
unsigned int read:2; /* see lock_acquire() comment */
unsigned int check:1; /* see lock_acquire() comment */
unsigned int hardirqs_off:1;
- unsigned int references:12; /* 32 bits */
+ unsigned int sync:1;
+ unsigned int references:11; /* 32 bits */
unsigned int pin_count;
};
/*
* Initialization, self-test and debugging-output methods:
*/
-extern void lockdep_info(void);
+extern void lockdep_init(void);
extern void lockdep_reset(void);
extern void lockdep_reset_lock(struct lockdep_map *lock);
extern void lockdep_free_key_range(void *start, unsigned long size);
extern asmlinkage void lockdep_sys_exit(void);
+extern void lockdep_set_selftest_task(struct task_struct *task);
+
+extern void lockdep_init_task(struct task_struct *task);
-extern void lockdep_off(void);
-extern void lockdep_on(void);
+/*
+ * Split the recursion counter in two to readily detect 'off' vs recursion.
+ */
+#define LOCKDEP_RECURSION_BITS 16
+#define LOCKDEP_OFF (1U << LOCKDEP_RECURSION_BITS)
+#define LOCKDEP_RECURSION_MASK (LOCKDEP_OFF - 1)
+
+/*
+ * lockdep_{off,on}() are macros to avoid tracing and kprobes; not inlines due
+ * to header dependencies.
+ */
+
+#define lockdep_off() \
+do { \
+ current->lockdep_recursion += LOCKDEP_OFF; \
+} while (0)
+
+#define lockdep_on() \
+do { \
+ current->lockdep_recursion -= LOCKDEP_OFF; \
+} while (0)
+
+extern void lockdep_register_key(struct lock_class_key *key);
+extern void lockdep_unregister_key(struct lock_class_key *key);
/*
* These methods are used by specific locking variants (spinlocks,
@@ -278,15 +182,28 @@ extern void lockdep_on(void);
* to lockdep:
*/
-extern void lockdep_init_map(struct lockdep_map *lock, const char *name,
- struct lock_class_key *key, int subclass);
+extern void lockdep_init_map_type(struct lockdep_map *lock, const char *name,
+ struct lock_class_key *key, int subclass, u8 inner, u8 outer, u8 lock_type);
-/*
- * To initialize a lockdep_map statically use this macro.
- * Note that _name must not be NULL.
- */
-#define STATIC_LOCKDEP_MAP_INIT(_name, _key) \
- { .name = (_name), .key = (void *)(_key), }
+static inline void
+lockdep_init_map_waits(struct lockdep_map *lock, const char *name,
+ struct lock_class_key *key, int subclass, u8 inner, u8 outer)
+{
+ lockdep_init_map_type(lock, name, key, subclass, inner, outer, LD_LOCK_NORMAL);
+}
+
+static inline void
+lockdep_init_map_wait(struct lockdep_map *lock, const char *name,
+ struct lock_class_key *key, int subclass, u8 inner)
+{
+ lockdep_init_map_waits(lock, name, key, subclass, inner, LD_WAIT_INV);
+}
+
+static inline void lockdep_init_map(struct lockdep_map *lock, const char *name,
+ struct lock_class_key *key, int subclass)
+{
+ lockdep_init_map_wait(lock, name, key, subclass, LD_WAIT_INV);
+}
/*
* Reinitialize a lock key - for cases where there is special locking or
@@ -294,18 +211,33 @@ extern void lockdep_init_map(struct lockdep_map *lock, const char *name,
* of dependencies wrong: they are either too broad (they need a class-split)
* or they are too narrow (they suffer from a false class-split):
*/
-#define lockdep_set_class(lock, key) \
- lockdep_init_map(&(lock)->dep_map, #key, key, 0)
-#define lockdep_set_class_and_name(lock, key, name) \
- lockdep_init_map(&(lock)->dep_map, name, key, 0)
-#define lockdep_set_class_and_subclass(lock, key, sub) \
- lockdep_init_map(&(lock)->dep_map, #key, key, sub)
-#define lockdep_set_subclass(lock, sub) \
- lockdep_init_map(&(lock)->dep_map, #lock, \
- (lock)->dep_map.key, sub)
+#define lockdep_set_class(lock, key) \
+ lockdep_init_map_type(&(lock)->dep_map, #key, key, 0, \
+ (lock)->dep_map.wait_type_inner, \
+ (lock)->dep_map.wait_type_outer, \
+ (lock)->dep_map.lock_type)
+
+#define lockdep_set_class_and_name(lock, key, name) \
+ lockdep_init_map_type(&(lock)->dep_map, name, key, 0, \
+ (lock)->dep_map.wait_type_inner, \
+ (lock)->dep_map.wait_type_outer, \
+ (lock)->dep_map.lock_type)
+
+#define lockdep_set_class_and_subclass(lock, key, sub) \
+ lockdep_init_map_type(&(lock)->dep_map, #key, key, sub, \
+ (lock)->dep_map.wait_type_inner, \
+ (lock)->dep_map.wait_type_outer, \
+ (lock)->dep_map.lock_type)
+
+#define lockdep_set_subclass(lock, sub) \
+ lockdep_init_map_type(&(lock)->dep_map, #lock, (lock)->dep_map.key, sub,\
+ (lock)->dep_map.wait_type_inner, \
+ (lock)->dep_map.wait_type_outer, \
+ (lock)->dep_map.lock_type)
#define lockdep_set_novalidate_class(lock) \
lockdep_set_class_and_name(lock, &__lockdep_no_validate__, #lock)
+
/*
* Compare locking classes
*/
@@ -335,15 +267,23 @@ extern void lock_acquire(struct lockdep_map *lock, unsigned int subclass,
int trylock, int read, int check,
struct lockdep_map *nest_lock, unsigned long ip);
-extern void lock_release(struct lockdep_map *lock, int nested,
- unsigned long ip);
+extern void lock_release(struct lockdep_map *lock, unsigned long ip);
+
+extern void lock_sync(struct lockdep_map *lock, unsigned int subclass,
+ int read, int check, struct lockdep_map *nest_lock,
+ unsigned long ip);
+
+/* lock_is_held_type() returns */
+#define LOCK_STATE_UNKNOWN -1
+#define LOCK_STATE_NOT_HELD 0
+#define LOCK_STATE_HELD 1
/*
* Same "read" as for lock_acquire(), except -1 means any.
*/
-extern int lock_is_held_type(struct lockdep_map *lock, int read);
+extern int lock_is_held_type(const struct lockdep_map *lock, int read);
-static inline int lock_is_held(struct lockdep_map *lock)
+static inline int lock_is_held(const struct lockdep_map *lock)
{
return lock_is_held_type(lock, -1);
}
@@ -355,6 +295,9 @@ extern void lock_set_class(struct lockdep_map *lock, const char *name,
struct lock_class_key *key, unsigned int subclass,
unsigned long ip);
+#define lock_set_novalidate_class(l, n, i) \
+ lock_set_class(l, n, &__lockdep_no_validate__, 0, i)
+
static inline void lock_set_subclass(struct lockdep_map *lock,
unsigned int subclass, unsigned long ip)
{
@@ -363,37 +306,37 @@ static inline void lock_set_subclass(struct lockdep_map *lock,
extern void lock_downgrade(struct lockdep_map *lock, unsigned long ip);
-extern void lockdep_set_current_reclaim_state(gfp_t gfp_mask);
-extern void lockdep_clear_current_reclaim_state(void);
-extern void lockdep_trace_alloc(gfp_t mask);
-
-struct pin_cookie { unsigned int val; };
-
#define NIL_COOKIE (struct pin_cookie){ .val = 0U, }
extern struct pin_cookie lock_pin_lock(struct lockdep_map *lock);
extern void lock_repin_lock(struct lockdep_map *lock, struct pin_cookie);
extern void lock_unpin_lock(struct lockdep_map *lock, struct pin_cookie);
-# define INIT_LOCKDEP .lockdep_recursion = 0, .lockdep_reclaim_gfp = 0,
-
#define lockdep_depth(tsk) (debug_locks ? (tsk)->lockdep_depth : 0)
-#define lockdep_assert_held(l) do { \
- WARN_ON(debug_locks && !lockdep_is_held(l)); \
- } while (0)
+#define lockdep_assert(cond) \
+ do { WARN_ON(debug_locks && !(cond)); } while (0)
+
+#define lockdep_assert_once(cond) \
+ do { WARN_ON_ONCE(debug_locks && !(cond)); } while (0)
-#define lockdep_assert_held_exclusive(l) do { \
- WARN_ON(debug_locks && !lockdep_is_held_type(l, 0)); \
- } while (0)
+#define lockdep_assert_held(l) \
+ lockdep_assert(lockdep_is_held(l) != LOCK_STATE_NOT_HELD)
-#define lockdep_assert_held_read(l) do { \
- WARN_ON(debug_locks && !lockdep_is_held_type(l, 1)); \
- } while (0)
+#define lockdep_assert_not_held(l) \
+ lockdep_assert(lockdep_is_held(l) != LOCK_STATE_HELD)
-#define lockdep_assert_held_once(l) do { \
- WARN_ON_ONCE(debug_locks && !lockdep_is_held(l)); \
- } while (0)
+#define lockdep_assert_held_write(l) \
+ lockdep_assert(lockdep_is_held_type(l, 0))
+
+#define lockdep_assert_held_read(l) \
+ lockdep_assert(lockdep_is_held_type(l, 1))
+
+#define lockdep_assert_held_once(l) \
+ lockdep_assert_once(lockdep_is_held(l) != LOCK_STATE_NOT_HELD)
+
+#define lockdep_assert_none_held_once() \
+ lockdep_assert_once(!current->lockdep_depth)
#define lockdep_recursing(tsk) ((tsk)->lockdep_recursion)
@@ -403,6 +346,10 @@ extern void lock_unpin_lock(struct lockdep_map *lock, struct pin_cookie);
#else /* !CONFIG_LOCKDEP */
+static inline void lockdep_init_task(struct task_struct *task)
+{
+}
+
static inline void lockdep_off(void)
{
}
@@ -411,15 +358,23 @@ static inline void lockdep_on(void)
{
}
+static inline void lockdep_set_selftest_task(struct task_struct *task)
+{
+}
+
# define lock_acquire(l, s, t, r, c, n, i) do { } while (0)
-# define lock_release(l, n, i) do { } while (0)
+# define lock_release(l, i) do { } while (0)
# define lock_downgrade(l, i) do { } while (0)
-# define lock_set_class(l, n, k, s, i) do { } while (0)
+# define lock_set_class(l, n, key, s, i) do { (void)(key); } while (0)
+# define lock_set_novalidate_class(l, n, i) do { } while (0)
# define lock_set_subclass(l, s, i) do { } while (0)
-# define lockdep_set_current_reclaim_state(g) do { } while (0)
-# define lockdep_clear_current_reclaim_state() do { } while (0)
-# define lockdep_trace_alloc(g) do { } while (0)
-# define lockdep_info() do { } while (0)
+# define lockdep_init() do { } while (0)
+# define lockdep_init_map_type(lock, name, key, sub, inner, outer, type) \
+ do { (void)(name); (void)(key); } while (0)
+# define lockdep_init_map_waits(lock, name, key, sub, inner, outer) \
+ do { (void)(name); (void)(key); } while (0)
+# define lockdep_init_map_wait(lock, name, key, sub, inner) \
+ do { (void)(name); (void)(key); } while (0)
# define lockdep_init_map(lock, name, key, sub) \
do { (void)(name); (void)(key); } while (0)
# define lockdep_set_class(lock, key) do { (void)(key); } while (0)
@@ -437,36 +392,64 @@ static inline void lockdep_on(void)
* #ifdef the call himself.
*/
-# define INIT_LOCKDEP
# define lockdep_reset() do { debug_locks = 1; } while (0)
# define lockdep_free_key_range(start, size) do { } while (0)
# define lockdep_sys_exit() do { } while (0)
-/*
- * The class key takes no space if lockdep is disabled:
- */
-struct lock_class_key { };
+
+static inline void lockdep_register_key(struct lock_class_key *key)
+{
+}
+
+static inline void lockdep_unregister_key(struct lock_class_key *key)
+{
+}
#define lockdep_depth(tsk) (0)
+/*
+ * Dummy forward declarations, allow users to write less ifdef-y code
+ * and depend on dead code elimination.
+ */
+extern int lock_is_held(const void *);
+extern int lockdep_is_held(const void *);
#define lockdep_is_held_type(l, r) (1)
+#define lockdep_assert(c) do { } while (0)
+#define lockdep_assert_once(c) do { } while (0)
+
#define lockdep_assert_held(l) do { (void)(l); } while (0)
-#define lockdep_assert_held_exclusive(l) do { (void)(l); } while (0)
+#define lockdep_assert_not_held(l) do { (void)(l); } while (0)
+#define lockdep_assert_held_write(l) do { (void)(l); } while (0)
#define lockdep_assert_held_read(l) do { (void)(l); } while (0)
#define lockdep_assert_held_once(l) do { (void)(l); } while (0)
+#define lockdep_assert_none_held_once() do { } while (0)
#define lockdep_recursing(tsk) (0)
-struct pin_cookie { };
-
#define NIL_COOKIE (struct pin_cookie){ }
-#define lockdep_pin_lock(l) ({ struct pin_cookie cookie; cookie; })
+#define lockdep_pin_lock(l) ({ struct pin_cookie cookie = { }; cookie; })
#define lockdep_repin_lock(l, c) do { (void)(l); (void)(c); } while (0)
#define lockdep_unpin_lock(l, c) do { (void)(l); (void)(c); } while (0)
#endif /* !LOCKDEP */
+enum xhlock_context_t {
+ XHLOCK_HARD,
+ XHLOCK_SOFT,
+ XHLOCK_CTX_NR,
+};
+
+/*
+ * To initialize a lockdep_map statically use this macro.
+ * Note that _name must not be NULL.
+ */
+#define STATIC_LOCKDEP_MAP_INIT(_name, _key) \
+ { .name = (_name), .key = (void *)(_key), }
+
+static inline void lockdep_invariant_state(bool force) {}
+static inline void lockdep_free_task(struct task_struct *task) {}
+
#ifdef CONFIG_LOCK_STAT
extern void lock_contended(struct lockdep_map *lock, unsigned long ip);
@@ -506,24 +489,7 @@ do { \
#endif /* CONFIG_LOCK_STAT */
-#ifdef CONFIG_LOCKDEP
-
-/*
- * On lockdep we dont want the hand-coded irq-enable of
- * _raw_*_lock_flags() code, because lockdep assumes
- * that interrupts are not re-enabled during lock-acquire:
- */
-#define LOCK_CONTENDED_FLAGS(_lock, try, lock, lockfl, flags) \
- LOCK_CONTENDED((_lock), (try), (lock))
-
-#else /* CONFIG_LOCKDEP */
-
-#define LOCK_CONTENDED_FLAGS(_lock, try, lock, lockfl, flags) \
- lockfl((_lock), (flags))
-
-#endif /* CONFIG_LOCKDEP */
-
-#ifdef CONFIG_TRACE_IRQFLAGS
+#ifdef CONFIG_PROVE_LOCKING
extern void print_irqtrace_events(struct task_struct *curr);
#else
static inline void print_irqtrace_events(struct task_struct *curr)
@@ -531,6 +497,20 @@ static inline void print_irqtrace_events(struct task_struct *curr)
}
#endif
+/* Variable used to make lockdep treat read_lock() as recursive in selftests */
+#ifdef CONFIG_DEBUG_LOCKING_API_SELFTESTS
+extern unsigned int force_read_lock_recursive;
+#else /* CONFIG_DEBUG_LOCKING_API_SELFTESTS */
+#define force_read_lock_recursive 0
+#endif /* CONFIG_DEBUG_LOCKING_API_SELFTESTS */
+
+#ifdef CONFIG_LOCKDEP
+extern bool read_lock_is_recursive(void);
+#else /* CONFIG_LOCKDEP */
+/* If !LOCKDEP, the value is meaningless */
+#define read_lock_is_recursive() 0
+#endif
+
/*
* For trivial one-depth nesting of a lock-class, the following
* global define can be used. (Subsystems with multiple levels
@@ -549,46 +529,133 @@ static inline void print_irqtrace_events(struct task_struct *curr)
#define spin_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i)
#define spin_acquire_nest(l, s, t, n, i) lock_acquire_exclusive(l, s, t, n, i)
-#define spin_release(l, n, i) lock_release(l, n, i)
+#define spin_release(l, i) lock_release(l, i)
#define rwlock_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i)
-#define rwlock_acquire_read(l, s, t, i) lock_acquire_shared_recursive(l, s, t, NULL, i)
-#define rwlock_release(l, n, i) lock_release(l, n, i)
+#define rwlock_acquire_read(l, s, t, i) \
+do { \
+ if (read_lock_is_recursive()) \
+ lock_acquire_shared_recursive(l, s, t, NULL, i); \
+ else \
+ lock_acquire_shared(l, s, t, NULL, i); \
+} while (0)
+
+#define rwlock_release(l, i) lock_release(l, i)
#define seqcount_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i)
#define seqcount_acquire_read(l, s, t, i) lock_acquire_shared_recursive(l, s, t, NULL, i)
-#define seqcount_release(l, n, i) lock_release(l, n, i)
+#define seqcount_release(l, i) lock_release(l, i)
#define mutex_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i)
#define mutex_acquire_nest(l, s, t, n, i) lock_acquire_exclusive(l, s, t, n, i)
-#define mutex_release(l, n, i) lock_release(l, n, i)
+#define mutex_release(l, i) lock_release(l, i)
#define rwsem_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i)
#define rwsem_acquire_nest(l, s, t, n, i) lock_acquire_exclusive(l, s, t, n, i)
#define rwsem_acquire_read(l, s, t, i) lock_acquire_shared(l, s, t, NULL, i)
-#define rwsem_release(l, n, i) lock_release(l, n, i)
+#define rwsem_release(l, i) lock_release(l, i)
#define lock_map_acquire(l) lock_acquire_exclusive(l, 0, 0, NULL, _THIS_IP_)
#define lock_map_acquire_read(l) lock_acquire_shared_recursive(l, 0, 0, NULL, _THIS_IP_)
#define lock_map_acquire_tryread(l) lock_acquire_shared_recursive(l, 0, 1, NULL, _THIS_IP_)
-#define lock_map_release(l) lock_release(l, 1, _THIS_IP_)
+#define lock_map_release(l) lock_release(l, _THIS_IP_)
+#define lock_map_sync(l) lock_sync(l, 0, 0, 1, NULL, _THIS_IP_)
#ifdef CONFIG_PROVE_LOCKING
-# define might_lock(lock) \
+# define might_lock(lock) \
do { \
typecheck(struct lockdep_map *, &(lock)->dep_map); \
lock_acquire(&(lock)->dep_map, 0, 0, 0, 1, NULL, _THIS_IP_); \
- lock_release(&(lock)->dep_map, 0, _THIS_IP_); \
+ lock_release(&(lock)->dep_map, _THIS_IP_); \
} while (0)
-# define might_lock_read(lock) \
+# define might_lock_read(lock) \
do { \
typecheck(struct lockdep_map *, &(lock)->dep_map); \
lock_acquire(&(lock)->dep_map, 0, 0, 1, 1, NULL, _THIS_IP_); \
- lock_release(&(lock)->dep_map, 0, _THIS_IP_); \
+ lock_release(&(lock)->dep_map, _THIS_IP_); \
+} while (0)
+# define might_lock_nested(lock, subclass) \
+do { \
+ typecheck(struct lockdep_map *, &(lock)->dep_map); \
+ lock_acquire(&(lock)->dep_map, subclass, 0, 1, 1, NULL, \
+ _THIS_IP_); \
+ lock_release(&(lock)->dep_map, _THIS_IP_); \
+} while (0)
+
+DECLARE_PER_CPU(int, hardirqs_enabled);
+DECLARE_PER_CPU(int, hardirq_context);
+DECLARE_PER_CPU(unsigned int, lockdep_recursion);
+
+#define __lockdep_enabled (debug_locks && !this_cpu_read(lockdep_recursion))
+
+#define lockdep_assert_irqs_enabled() \
+do { \
+ WARN_ON_ONCE(__lockdep_enabled && !this_cpu_read(hardirqs_enabled)); \
+} while (0)
+
+#define lockdep_assert_irqs_disabled() \
+do { \
+ WARN_ON_ONCE(__lockdep_enabled && this_cpu_read(hardirqs_enabled)); \
} while (0)
+
+#define lockdep_assert_in_irq() \
+do { \
+ WARN_ON_ONCE(__lockdep_enabled && !this_cpu_read(hardirq_context)); \
+} while (0)
+
+#define lockdep_assert_preemption_enabled() \
+do { \
+ WARN_ON_ONCE(IS_ENABLED(CONFIG_PREEMPT_COUNT) && \
+ __lockdep_enabled && \
+ (preempt_count() != 0 || \
+ !this_cpu_read(hardirqs_enabled))); \
+} while (0)
+
+#define lockdep_assert_preemption_disabled() \
+do { \
+ WARN_ON_ONCE(IS_ENABLED(CONFIG_PREEMPT_COUNT) && \
+ __lockdep_enabled && \
+ (preempt_count() == 0 && \
+ this_cpu_read(hardirqs_enabled))); \
+} while (0)
+
+/*
+ * Acceptable for protecting per-CPU resources accessed from BH.
+ * Much like in_softirq() - semantics are ambiguous, use carefully.
+ */
+#define lockdep_assert_in_softirq() \
+do { \
+ WARN_ON_ONCE(__lockdep_enabled && \
+ (!in_softirq() || in_irq() || in_nmi())); \
+} while (0)
+
#else
# define might_lock(lock) do { } while (0)
# define might_lock_read(lock) do { } while (0)
+# define might_lock_nested(lock, subclass) do { } while (0)
+
+# define lockdep_assert_irqs_enabled() do { } while (0)
+# define lockdep_assert_irqs_disabled() do { } while (0)
+# define lockdep_assert_in_irq() do { } while (0)
+
+# define lockdep_assert_preemption_enabled() do { } while (0)
+# define lockdep_assert_preemption_disabled() do { } while (0)
+# define lockdep_assert_in_softirq() do { } while (0)
+#endif
+
+#ifdef CONFIG_PROVE_RAW_LOCK_NESTING
+
+# define lockdep_assert_RT_in_threaded_ctx() do { \
+ WARN_ONCE(debug_locks && !current->lockdep_recursion && \
+ lockdep_hardirq_context() && \
+ !(current->hardirq_threaded || current->irq_config), \
+ "Not in threaded context on PREEMPT_RT as expected\n"); \
+} while (0)
+
+#else
+
+# define lockdep_assert_RT_in_threaded_ctx() do { } while (0)
+
#endif
#ifdef CONFIG_LOCKDEP
diff --git a/include/linux/lockdep_api.h b/include/linux/lockdep_api.h
new file mode 100644
index 000000000000..907e66979ab2
--- /dev/null
+++ b/include/linux/lockdep_api.h
@@ -0,0 +1 @@
+#include <linux/lockdep.h>
diff --git a/include/linux/lockdep_types.h b/include/linux/lockdep_types.h
new file mode 100644
index 000000000000..d22430840b53
--- /dev/null
+++ b/include/linux/lockdep_types.h
@@ -0,0 +1,208 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Runtime locking correctness validator
+ *
+ * Copyright (C) 2006,2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
+ * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra
+ *
+ * see Documentation/locking/lockdep-design.rst for more details.
+ */
+#ifndef __LINUX_LOCKDEP_TYPES_H
+#define __LINUX_LOCKDEP_TYPES_H
+
+#include <linux/types.h>
+
+#define MAX_LOCKDEP_SUBCLASSES 8UL
+
+enum lockdep_wait_type {
+ LD_WAIT_INV = 0, /* not checked, catch all */
+
+ LD_WAIT_FREE, /* wait free, rcu etc.. */
+ LD_WAIT_SPIN, /* spin loops, raw_spinlock_t etc.. */
+
+#ifdef CONFIG_PROVE_RAW_LOCK_NESTING
+ LD_WAIT_CONFIG, /* preemptible in PREEMPT_RT, spinlock_t etc.. */
+#else
+ LD_WAIT_CONFIG = LD_WAIT_SPIN,
+#endif
+ LD_WAIT_SLEEP, /* sleeping locks, mutex_t etc.. */
+
+ LD_WAIT_MAX, /* must be last */
+};
+
+enum lockdep_lock_type {
+ LD_LOCK_NORMAL = 0, /* normal, catch all */
+ LD_LOCK_PERCPU, /* percpu */
+ LD_LOCK_MAX,
+};
+
+#ifdef CONFIG_LOCKDEP
+
+/*
+ * We'd rather not expose kernel/lockdep_states.h this wide, but we do need
+ * the total number of states... :-(
+ *
+ * XXX_LOCK_USAGE_STATES is the number of lines in lockdep_states.h, for each
+ * of those we generates 4 states, Additionally we report on USED and USED_READ.
+ */
+#define XXX_LOCK_USAGE_STATES 2
+#define LOCK_TRACE_STATES (XXX_LOCK_USAGE_STATES*4 + 2)
+
+/*
+ * NR_LOCKDEP_CACHING_CLASSES ... Number of classes
+ * cached in the instance of lockdep_map
+ *
+ * Currently main class (subclass == 0) and single depth subclass
+ * are cached in lockdep_map. This optimization is mainly targeting
+ * on rq->lock. double_rq_lock() acquires this highly competitive with
+ * single depth.
+ */
+#define NR_LOCKDEP_CACHING_CLASSES 2
+
+/*
+ * A lockdep key is associated with each lock object. For static locks we use
+ * the lock address itself as the key. Dynamically allocated lock objects can
+ * have a statically or dynamically allocated key. Dynamically allocated lock
+ * keys must be registered before being used and must be unregistered before
+ * the key memory is freed.
+ */
+struct lockdep_subclass_key {
+ char __one_byte;
+} __attribute__ ((__packed__));
+
+/* hash_entry is used to keep track of dynamically allocated keys. */
+struct lock_class_key {
+ union {
+ struct hlist_node hash_entry;
+ struct lockdep_subclass_key subkeys[MAX_LOCKDEP_SUBCLASSES];
+ };
+};
+
+extern struct lock_class_key __lockdep_no_validate__;
+
+struct lock_trace;
+
+#define LOCKSTAT_POINTS 4
+
+/*
+ * The lock-class itself. The order of the structure members matters.
+ * reinit_class() zeroes the key member and all subsequent members.
+ */
+struct lock_class {
+ /*
+ * class-hash:
+ */
+ struct hlist_node hash_entry;
+
+ /*
+ * Entry in all_lock_classes when in use. Entry in free_lock_classes
+ * when not in use. Instances that are being freed are on one of the
+ * zapped_classes lists.
+ */
+ struct list_head lock_entry;
+
+ /*
+ * These fields represent a directed graph of lock dependencies,
+ * to every node we attach a list of "forward" and a list of
+ * "backward" graph nodes.
+ */
+ struct list_head locks_after, locks_before;
+
+ const struct lockdep_subclass_key *key;
+ unsigned int subclass;
+ unsigned int dep_gen_id;
+
+ /*
+ * IRQ/softirq usage tracking bits:
+ */
+ unsigned long usage_mask;
+ const struct lock_trace *usage_traces[LOCK_TRACE_STATES];
+
+ /*
+ * Generation counter, when doing certain classes of graph walking,
+ * to ensure that we check one node only once:
+ */
+ int name_version;
+ const char *name;
+
+ u8 wait_type_inner;
+ u8 wait_type_outer;
+ u8 lock_type;
+ /* u8 hole; */
+
+#ifdef CONFIG_LOCK_STAT
+ unsigned long contention_point[LOCKSTAT_POINTS];
+ unsigned long contending_point[LOCKSTAT_POINTS];
+#endif
+} __no_randomize_layout;
+
+#ifdef CONFIG_LOCK_STAT
+struct lock_time {
+ s64 min;
+ s64 max;
+ s64 total;
+ unsigned long nr;
+};
+
+enum bounce_type {
+ bounce_acquired_write,
+ bounce_acquired_read,
+ bounce_contended_write,
+ bounce_contended_read,
+ nr_bounce_types,
+
+ bounce_acquired = bounce_acquired_write,
+ bounce_contended = bounce_contended_write,
+};
+
+struct lock_class_stats {
+ unsigned long contention_point[LOCKSTAT_POINTS];
+ unsigned long contending_point[LOCKSTAT_POINTS];
+ struct lock_time read_waittime;
+ struct lock_time write_waittime;
+ struct lock_time read_holdtime;
+ struct lock_time write_holdtime;
+ unsigned long bounces[nr_bounce_types];
+};
+
+struct lock_class_stats lock_stats(struct lock_class *class);
+void clear_lock_stats(struct lock_class *class);
+#endif
+
+/*
+ * Map the lock object (the lock instance) to the lock-class object.
+ * This is embedded into specific lock instances:
+ */
+struct lockdep_map {
+ struct lock_class_key *key;
+ struct lock_class *class_cache[NR_LOCKDEP_CACHING_CLASSES];
+ const char *name;
+ u8 wait_type_outer; /* can be taken in this context */
+ u8 wait_type_inner; /* presents this context */
+ u8 lock_type;
+ /* u8 hole; */
+#ifdef CONFIG_LOCK_STAT
+ int cpu;
+ unsigned long ip;
+#endif
+};
+
+struct pin_cookie { unsigned int val; };
+
+#else /* !CONFIG_LOCKDEP */
+
+/*
+ * The class key takes no space if lockdep is disabled:
+ */
+struct lock_class_key { };
+
+/*
+ * The lockdep_map takes no space if lockdep is disabled:
+ */
+struct lockdep_map { };
+
+struct pin_cookie { };
+
+#endif /* !LOCKDEP */
+
+#endif /* __LINUX_LOCKDEP_TYPES_H */
diff --git a/include/linux/lockref.h b/include/linux/lockref.h
index b10b122dd099..c3a1f78bc884 100644
--- a/include/linux/lockref.h
+++ b/include/linux/lockref.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __LINUX_LOCKREF_H
#define __LINUX_LOCKREF_H
@@ -36,14 +37,14 @@ struct lockref {
extern void lockref_get(struct lockref *);
extern int lockref_put_return(struct lockref *);
extern int lockref_get_not_zero(struct lockref *);
-extern int lockref_get_or_lock(struct lockref *);
+extern int lockref_put_not_zero(struct lockref *);
extern int lockref_put_or_lock(struct lockref *);
extern void lockref_mark_dead(struct lockref *);
extern int lockref_get_not_dead(struct lockref *);
/* Must be called under spinlock for reliable results */
-static inline int __lockref_is_dead(const struct lockref *l)
+static inline bool __lockref_is_dead(const struct lockref *l)
{
return ((int)l->count < 0);
}
diff --git a/include/linux/log2.h b/include/linux/log2.h
index c373295f359f..9f30d087a128 100644
--- a/include/linux/log2.h
+++ b/include/linux/log2.h
@@ -1,12 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/* Integer base 2 logarithm calculation
*
* Copyright (C) 2006 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
*/
#ifndef _LINUX_LOG2_H
@@ -22,7 +18,7 @@
* - the arch is not required to handle n==0 if implementing the fallback
*/
#ifndef CONFIG_ARCH_HAS_ILOG2_U32
-static inline __attribute__((const))
+static __always_inline __attribute__((const))
int __ilog2_u32(u32 n)
{
return fls(n) - 1;
@@ -30,26 +26,30 @@ int __ilog2_u32(u32 n)
#endif
#ifndef CONFIG_ARCH_HAS_ILOG2_U64
-static inline __attribute__((const))
+static __always_inline __attribute__((const))
int __ilog2_u64(u64 n)
{
return fls64(n) - 1;
}
#endif
-/*
- * Determine whether some value is a power of two, where zero is
+/**
+ * is_power_of_2() - check if a value is a power of two
+ * @n: the value to check
+ *
+ * Determine whether some value is a power of two, where zero is
* *not* considered a power of two.
+ * Return: true if @n is a power of 2, otherwise false.
*/
-
static inline __attribute__((const))
bool is_power_of_2(unsigned long n)
{
return (n != 0 && ((n & (n - 1)) == 0));
}
-/*
- * round up to nearest power of two
+/**
+ * __roundup_pow_of_two() - round up to nearest power of two
+ * @n: value to round up
*/
static inline __attribute__((const))
unsigned long __roundup_pow_of_two(unsigned long n)
@@ -57,8 +57,9 @@ unsigned long __roundup_pow_of_two(unsigned long n)
return 1UL << fls_long(n - 1);
}
-/*
- * round down to nearest power of two
+/**
+ * __rounddown_pow_of_two() - round down to nearest power of two
+ * @n: value to round down
*/
static inline __attribute__((const))
unsigned long __rounddown_pow_of_two(unsigned long n)
@@ -67,16 +68,13 @@ unsigned long __rounddown_pow_of_two(unsigned long n)
}
/**
- * ilog2 - log of base 2 of 32-bit or a 64-bit unsigned value
- * @n - parameter
- *
- * constant-capable log of base 2 calculation
- * - this can be used to initialise global variables from constant data, hence
- * the massive ternary operator construction
+ * const_ilog2 - log base 2 of 32-bit or a 64-bit constant unsigned value
+ * @n: parameter
*
- * selects the appropriately-sized optimised version depending on sizeof(n)
+ * Use this where sparse expects a true constant expression, e.g. for array
+ * indices.
*/
-#define ilog2(n) \
+#define const_ilog2(n) \
( \
__builtin_constant_p(n) ? ( \
(n) < 2 ? 0 : \
@@ -142,15 +140,32 @@ unsigned long __rounddown_pow_of_two(unsigned long n)
(n) & (1ULL << 4) ? 4 : \
(n) & (1ULL << 3) ? 3 : \
(n) & (1ULL << 2) ? 2 : \
- 1 ) : \
- (sizeof(n) <= 4) ? \
- __ilog2_u32(n) : \
- __ilog2_u64(n) \
+ 1) : \
+ -1)
+
+/**
+ * ilog2 - log base 2 of 32-bit or a 64-bit unsigned value
+ * @n: parameter
+ *
+ * constant-capable log of base 2 calculation
+ * - this can be used to initialise global variables from constant data, hence
+ * the massive ternary operator construction
+ *
+ * selects the appropriately-sized optimised version depending on sizeof(n)
+ */
+#define ilog2(n) \
+( \
+ __builtin_constant_p(n) ? \
+ ((n) < 2 ? 0 : \
+ 63 - __builtin_clzll(n)) : \
+ (sizeof(n) <= 4) ? \
+ __ilog2_u32(n) : \
+ __ilog2_u64(n) \
)
/**
* roundup_pow_of_two - round the given value up to nearest power of two
- * @n - parameter
+ * @n: parameter
*
* round the given value up to the nearest power of two
* - the result is undefined when n == 0
@@ -159,7 +174,7 @@ unsigned long __rounddown_pow_of_two(unsigned long n)
#define roundup_pow_of_two(n) \
( \
__builtin_constant_p(n) ? ( \
- (n == 1) ? 1 : \
+ ((n) == 1) ? 1 : \
(1UL << (ilog2((n) - 1) + 1)) \
) : \
__roundup_pow_of_two(n) \
@@ -167,7 +182,7 @@ unsigned long __rounddown_pow_of_two(unsigned long n)
/**
* rounddown_pow_of_two - round the given value down to nearest power of two
- * @n - parameter
+ * @n: parameter
*
* round the given value down to the nearest power of two
* - the result is undefined when n == 0
@@ -180,6 +195,12 @@ unsigned long __rounddown_pow_of_two(unsigned long n)
__rounddown_pow_of_two(n) \
)
+static inline __attribute_const__
+int __order_base_2(unsigned long n)
+{
+ return n > 1 ? ilog2(n - 1) + 1 : 0;
+}
+
/**
* order_base_2 - calculate the (rounded up) base 2 order of the argument
* @n: parameter
@@ -193,13 +214,6 @@ unsigned long __rounddown_pow_of_two(unsigned long n)
* ob2(5) = 3
* ... and so on.
*/
-
-static inline __attribute_const__
-int __order_base_2(unsigned long n)
-{
- return n > 1 ? ilog2(n - 1) + 1 : 0;
-}
-
#define order_base_2(n) \
( \
__builtin_constant_p(n) ? ( \
@@ -207,4 +221,38 @@ int __order_base_2(unsigned long n)
ilog2((n) - 1) + 1) : \
__order_base_2(n) \
)
+
+static inline __attribute__((const))
+int __bits_per(unsigned long n)
+{
+ if (n < 2)
+ return 1;
+ if (is_power_of_2(n))
+ return order_base_2(n) + 1;
+ return order_base_2(n);
+}
+
+/**
+ * bits_per - calculate the number of bits required for the argument
+ * @n: parameter
+ *
+ * This is constant-capable and can be used for compile time
+ * initializations, e.g bitfields.
+ *
+ * The first few values calculated by this routine:
+ * bf(0) = 1
+ * bf(1) = 1
+ * bf(2) = 2
+ * bf(3) = 2
+ * bf(4) = 3
+ * ... and so on.
+ */
+#define bits_per(n) \
+( \
+ __builtin_constant_p(n) ? ( \
+ ((n) == 0 || (n) == 1) \
+ ? 1 : ilog2(n) + 1 \
+ ) : \
+ __bits_per(n) \
+)
#endif /* _LINUX_LOG2_H */
diff --git a/include/linux/logic_iomem.h b/include/linux/logic_iomem.h
new file mode 100644
index 000000000000..3fa65c964379
--- /dev/null
+++ b/include/linux/logic_iomem.h
@@ -0,0 +1,62 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2021 Intel Corporation
+ * Author: johannes@sipsolutions.net
+ */
+#ifndef __LOGIC_IOMEM_H
+#define __LOGIC_IOMEM_H
+#include <linux/types.h>
+#include <linux/ioport.h>
+
+/**
+ * struct logic_iomem_ops - emulated IO memory ops
+ * @read: read an 8, 16, 32 or 64 bit quantity from the given offset,
+ * size is given in bytes (1, 2, 4 or 8)
+ * (64-bit only necessary if CONFIG_64BIT is set)
+ * @write: write an 8, 16 32 or 64 bit quantity to the given offset,
+ * size is given in bytes (1, 2, 4 or 8)
+ * (64-bit only necessary if CONFIG_64BIT is set)
+ * @set: optional, for memset_io()
+ * @copy_from: optional, for memcpy_fromio()
+ * @copy_to: optional, for memcpy_toio()
+ * @unmap: optional, this region is getting unmapped
+ */
+struct logic_iomem_ops {
+ unsigned long (*read)(void *priv, unsigned int offset, int size);
+ void (*write)(void *priv, unsigned int offset, int size,
+ unsigned long val);
+
+ void (*set)(void *priv, unsigned int offset, u8 value, int size);
+ void (*copy_from)(void *priv, void *buffer, unsigned int offset,
+ int size);
+ void (*copy_to)(void *priv, unsigned int offset, const void *buffer,
+ int size);
+
+ void (*unmap)(void *priv);
+};
+
+/**
+ * struct logic_iomem_region_ops - ops for an IO memory handler
+ * @map: map a range in the registered IO memory region, must
+ * fill *ops with the ops and may fill *priv to be passed
+ * to the ops. The offset is given as the offset into the
+ * registered resource region.
+ * The return value is negative for errors, or >= 0 for
+ * success. On success, the return value is added to the
+ * offset for later ops, to allow for partial mappings.
+ */
+struct logic_iomem_region_ops {
+ long (*map)(unsigned long offset, size_t size,
+ const struct logic_iomem_ops **ops,
+ void **priv);
+};
+
+/**
+ * logic_iomem_add_region - register an IO memory region
+ * @resource: the resource description for this region
+ * @ops: the IO memory mapping ops for this resource
+ */
+int logic_iomem_add_region(struct resource *resource,
+ const struct logic_iomem_region_ops *ops);
+
+#endif /* __LOGIC_IOMEM_H */
diff --git a/include/linux/logic_pio.h b/include/linux/logic_pio.h
new file mode 100644
index 000000000000..54945aa824b4
--- /dev/null
+++ b/include/linux/logic_pio.h
@@ -0,0 +1,124 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2017 HiSilicon Limited, All Rights Reserved.
+ * Author: Gabriele Paoloni <gabriele.paoloni@huawei.com>
+ * Author: Zhichang Yuan <yuanzhichang@hisilicon.com>
+ */
+
+#ifndef __LINUX_LOGIC_PIO_H
+#define __LINUX_LOGIC_PIO_H
+
+#include <linux/fwnode.h>
+
+enum {
+ LOGIC_PIO_INDIRECT, /* Indirect IO flag */
+ LOGIC_PIO_CPU_MMIO, /* Memory-mapped IO flag */
+};
+
+struct logic_pio_hwaddr {
+ struct list_head list;
+ struct fwnode_handle *fwnode;
+ resource_size_t hw_start;
+ resource_size_t io_start;
+ resource_size_t size; /* range size populated */
+ unsigned long flags;
+
+ void *hostdata;
+ const struct logic_pio_host_ops *ops;
+};
+
+struct logic_pio_host_ops {
+ u32 (*in)(void *hostdata, unsigned long addr, size_t dwidth);
+ void (*out)(void *hostdata, unsigned long addr, u32 val,
+ size_t dwidth);
+ u32 (*ins)(void *hostdata, unsigned long addr, void *buffer,
+ size_t dwidth, unsigned int count);
+ void (*outs)(void *hostdata, unsigned long addr, const void *buffer,
+ size_t dwidth, unsigned int count);
+};
+
+#ifdef CONFIG_INDIRECT_PIO
+u8 logic_inb(unsigned long addr);
+void logic_outb(u8 value, unsigned long addr);
+void logic_outw(u16 value, unsigned long addr);
+void logic_outl(u32 value, unsigned long addr);
+u16 logic_inw(unsigned long addr);
+u32 logic_inl(unsigned long addr);
+void logic_outb(u8 value, unsigned long addr);
+void logic_outw(u16 value, unsigned long addr);
+void logic_outl(u32 value, unsigned long addr);
+void logic_insb(unsigned long addr, void *buffer, unsigned int count);
+void logic_insl(unsigned long addr, void *buffer, unsigned int count);
+void logic_insw(unsigned long addr, void *buffer, unsigned int count);
+void logic_outsb(unsigned long addr, const void *buffer, unsigned int count);
+void logic_outsw(unsigned long addr, const void *buffer, unsigned int count);
+void logic_outsl(unsigned long addr, const void *buffer, unsigned int count);
+
+#ifndef inb
+#define inb logic_inb
+#endif
+
+#ifndef inw
+#define inw logic_inw
+#endif
+
+#ifndef inl
+#define inl logic_inl
+#endif
+
+#ifndef outb
+#define outb logic_outb
+#endif
+
+#ifndef outw
+#define outw logic_outw
+#endif
+
+#ifndef outl
+#define outl logic_outl
+#endif
+
+#ifndef insb
+#define insb logic_insb
+#endif
+
+#ifndef insw
+#define insw logic_insw
+#endif
+
+#ifndef insl
+#define insl logic_insl
+#endif
+
+#ifndef outsb
+#define outsb logic_outsb
+#endif
+
+#ifndef outsw
+#define outsw logic_outsw
+#endif
+
+#ifndef outsl
+#define outsl logic_outsl
+#endif
+
+/*
+ * We reserve 0x4000 bytes for Indirect IO as so far this library is only
+ * used by the HiSilicon LPC Host. If needed, we can reserve a wider IO
+ * area by redefining the macro below.
+ */
+#define PIO_INDIRECT_SIZE 0x4000
+#else
+#define PIO_INDIRECT_SIZE 0
+#endif /* CONFIG_INDIRECT_PIO */
+#define MMIO_UPPER_LIMIT (IO_SPACE_LIMIT - PIO_INDIRECT_SIZE)
+
+struct logic_pio_hwaddr *find_io_range_by_fwnode(struct fwnode_handle *fwnode);
+unsigned long logic_pio_trans_hwaddr(struct fwnode_handle *fwnode,
+ resource_size_t hw_addr, resource_size_t size);
+int logic_pio_register_range(struct logic_pio_hwaddr *newrange);
+void logic_pio_unregister_range(struct logic_pio_hwaddr *range);
+resource_size_t logic_pio_to_hwaddr(unsigned long pio);
+unsigned long logic_pio_trans_cpuaddr(resource_size_t hw_addr);
+
+#endif /* __LINUX_LOGIC_PIO_H */
diff --git a/include/linux/lp.h b/include/linux/lp.h
index 0dd276af9e4e..be8a07eb2083 100644
--- a/include/linux/lp.h
+++ b/include/linux/lp.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* usr/include/linux/lp.h c.1991-1992 James Wiegand
* many modifications copyright (C) 1992 Michael K. Johnson
diff --git a/include/linux/lru_cache.h b/include/linux/lru_cache.h
index 04fc6e6c7ff0..c9afcdd9324c 100644
--- a/include/linux/lru_cache.h
+++ b/include/linux/lru_cache.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
lru_cache.c
@@ -7,19 +8,6 @@
Copyright (C) 2003-2008, Philipp Reisner <philipp.reisner@linbit.com>.
Copyright (C) 2003-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
- drbd is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2, or (at your option)
- any later version.
-
- drbd is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with drbd; see the file COPYING. If not, write to
- the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
*/
@@ -44,7 +32,7 @@ This header file (and its .c file; kernel-doc of functions see there)
Because of this later property, it is called "lru_cache".
As it actually Tracks Objects in an Active SeT, we could also call it
toast (incidentally that is what may happen to the data on the
- backend storage uppon next resync, if we don't get it right).
+ backend storage upon next resync, if we don't get it right).
What for?
@@ -164,7 +152,7 @@ struct lc_element {
* for paranoia, and for "lc_element_to_index" */
unsigned lc_index;
/* if we want to track a larger set of objects,
- * it needs to become arch independend u64 */
+ * it needs to become an architecture independent u64 */
unsigned lc_number;
/* special label when on free list */
#define LC_FREE (~0U)
@@ -211,7 +199,6 @@ struct lru_cache {
unsigned long flags;
- void *lc_private;
const char *name;
/* nr_elements there */
@@ -253,7 +240,6 @@ extern struct lru_cache *lc_create(const char *name, struct kmem_cache *cache,
unsigned e_count, size_t e_size, size_t e_off);
extern void lc_reset(struct lru_cache *lc);
extern void lc_destroy(struct lru_cache *lc);
-extern void lc_set(struct lru_cache *lc, unsigned int enr, int index);
extern void lc_del(struct lru_cache *lc, struct lc_element *element);
extern struct lc_element *lc_get_cumulative(struct lru_cache *lc, unsigned int enr);
@@ -275,7 +261,7 @@ extern void lc_seq_dump_details(struct seq_file *seq, struct lru_cache *lc, char
*
* Allows (expects) the set to be "dirty". Note that the reference counts and
* order on the active and lru lists may still change. Used to serialize
- * changing transactions. Returns true if we aquired the lock.
+ * changing transactions. Returns true if we acquired the lock.
*/
static inline int lc_try_lock_for_transaction(struct lru_cache *lc)
{
@@ -287,7 +273,7 @@ static inline int lc_try_lock_for_transaction(struct lru_cache *lc)
* @lc: the lru cache to operate on
*
* Note that the reference counts and order on the active and lru lists may
- * still change. Only works on a "clean" set. Returns true if we aquired the
+ * still change. Only works on a "clean" set. Returns true if we acquired the
* lock, which means there are no pending changes, and any further attempt to
* change the set will not succeed until the next lc_unlock().
*/
@@ -309,6 +295,5 @@ extern bool lc_is_used(struct lru_cache *lc, unsigned int enr);
container_of(ptr, type, member)
extern struct lc_element *lc_element_by_index(struct lru_cache *lc, unsigned i);
-extern unsigned int lc_index_of(struct lru_cache *lc, struct lc_element *e);
#endif
diff --git a/include/linux/lsm_audit.h b/include/linux/lsm_audit.h
index 22b5d4e687ce..97a8b21eb033 100644
--- a/include/linux/lsm_audit.h
+++ b/include/linux/lsm_audit.h
@@ -1,10 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Common LSM logging functions
* Heavily borrowed from selinux/avc.h
*
* Author : Etienne BASSET <etienne.basset@ensta.org>
*
- * All credits to : Stephen Smalley, <sds@epoch.ncsc.mil>
+ * All credits to : Stephen Smalley, <sds@tycho.nsa.gov>
* All BUGS to : Etienne BASSET <etienne.basset@ensta.org>
*/
#ifndef _LSM_COMMON_LOGGING_
@@ -25,7 +26,7 @@
struct lsm_network_audit {
int netif;
- struct sock *sk;
+ const struct sock *sk;
u16 family;
__be16 dport;
__be16 sport;
@@ -47,13 +48,13 @@ struct lsm_ioctlop_audit {
};
struct lsm_ibpkey_audit {
- u64 subnet_prefix;
- u16 pkey;
+ u64 subnet_prefix;
+ u16 pkey;
};
struct lsm_ibendport_audit {
- char dev_name[IB_DEVICE_NAME_MAX];
- u8 port;
+ const char *dev_name;
+ u8 port;
};
/* Auxiliary data to use in generating the audit record. */
@@ -73,6 +74,9 @@ struct common_audit_data {
#define LSM_AUDIT_DATA_FILE 12
#define LSM_AUDIT_DATA_IBPKEY 13
#define LSM_AUDIT_DATA_IBENDPORT 14
+#define LSM_AUDIT_DATA_LOCKDOWN 15
+#define LSM_AUDIT_DATA_NOTIFICATION 16
+#define LSM_AUDIT_DATA_ANONINODE 17
union {
struct path path;
struct dentry *dentry;
@@ -92,6 +96,8 @@ struct common_audit_data {
struct file *file;
struct lsm_ibpkey_audit *ibpkey;
struct lsm_ibendport_audit *ibendport;
+ int reason;
+ const char *anonclass;
} u;
/* this union contains LSM specific data */
union {
diff --git a/include/linux/lsm_hook_defs.h b/include/linux/lsm_hook_defs.h
new file mode 100644
index 000000000000..6bb55e61e8e8
--- /dev/null
+++ b/include/linux/lsm_hook_defs.h
@@ -0,0 +1,419 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/*
+ * Linux Security Module Hook declarations.
+ *
+ * Copyright (C) 2001 WireX Communications, Inc <chris@wirex.com>
+ * Copyright (C) 2001 Greg Kroah-Hartman <greg@kroah.com>
+ * Copyright (C) 2001 Networks Associates Technology, Inc <ssmalley@nai.com>
+ * Copyright (C) 2001 James Morris <jmorris@intercode.com.au>
+ * Copyright (C) 2001 Silicon Graphics, Inc. (Trust Technology Group)
+ * Copyright (C) 2015 Intel Corporation.
+ * Copyright (C) 2015 Casey Schaufler <casey@schaufler-ca.com>
+ * Copyright (C) 2016 Mellanox Techonologies
+ * Copyright (C) 2020 Google LLC.
+ */
+
+/*
+ * The macro LSM_HOOK is used to define the data structures required by
+ * the LSM framework using the pattern:
+ *
+ * LSM_HOOK(<return_type>, <default_value>, <hook_name>, args...)
+ *
+ * struct security_hook_heads {
+ * #define LSM_HOOK(RET, DEFAULT, NAME, ...) struct hlist_head NAME;
+ * #include <linux/lsm_hook_defs.h>
+ * #undef LSM_HOOK
+ * };
+ */
+LSM_HOOK(int, 0, binder_set_context_mgr, const struct cred *mgr)
+LSM_HOOK(int, 0, binder_transaction, const struct cred *from,
+ const struct cred *to)
+LSM_HOOK(int, 0, binder_transfer_binder, const struct cred *from,
+ const struct cred *to)
+LSM_HOOK(int, 0, binder_transfer_file, const struct cred *from,
+ const struct cred *to, struct file *file)
+LSM_HOOK(int, 0, ptrace_access_check, struct task_struct *child,
+ unsigned int mode)
+LSM_HOOK(int, 0, ptrace_traceme, struct task_struct *parent)
+LSM_HOOK(int, 0, capget, struct task_struct *target, kernel_cap_t *effective,
+ kernel_cap_t *inheritable, kernel_cap_t *permitted)
+LSM_HOOK(int, 0, capset, struct cred *new, const struct cred *old,
+ const kernel_cap_t *effective, const kernel_cap_t *inheritable,
+ const kernel_cap_t *permitted)
+LSM_HOOK(int, 0, capable, const struct cred *cred, struct user_namespace *ns,
+ int cap, unsigned int opts)
+LSM_HOOK(int, 0, quotactl, int cmds, int type, int id, struct super_block *sb)
+LSM_HOOK(int, 0, quota_on, struct dentry *dentry)
+LSM_HOOK(int, 0, syslog, int type)
+LSM_HOOK(int, 0, settime, const struct timespec64 *ts,
+ const struct timezone *tz)
+LSM_HOOK(int, 0, vm_enough_memory, struct mm_struct *mm, long pages)
+LSM_HOOK(int, 0, bprm_creds_for_exec, struct linux_binprm *bprm)
+LSM_HOOK(int, 0, bprm_creds_from_file, struct linux_binprm *bprm, struct file *file)
+LSM_HOOK(int, 0, bprm_check_security, struct linux_binprm *bprm)
+LSM_HOOK(void, LSM_RET_VOID, bprm_committing_creds, struct linux_binprm *bprm)
+LSM_HOOK(void, LSM_RET_VOID, bprm_committed_creds, struct linux_binprm *bprm)
+LSM_HOOK(int, 0, fs_context_dup, struct fs_context *fc,
+ struct fs_context *src_sc)
+LSM_HOOK(int, -ENOPARAM, fs_context_parse_param, struct fs_context *fc,
+ struct fs_parameter *param)
+LSM_HOOK(int, 0, sb_alloc_security, struct super_block *sb)
+LSM_HOOK(void, LSM_RET_VOID, sb_delete, struct super_block *sb)
+LSM_HOOK(void, LSM_RET_VOID, sb_free_security, struct super_block *sb)
+LSM_HOOK(void, LSM_RET_VOID, sb_free_mnt_opts, void *mnt_opts)
+LSM_HOOK(int, 0, sb_eat_lsm_opts, char *orig, void **mnt_opts)
+LSM_HOOK(int, 0, sb_mnt_opts_compat, struct super_block *sb, void *mnt_opts)
+LSM_HOOK(int, 0, sb_remount, struct super_block *sb, void *mnt_opts)
+LSM_HOOK(int, 0, sb_kern_mount, struct super_block *sb)
+LSM_HOOK(int, 0, sb_show_options, struct seq_file *m, struct super_block *sb)
+LSM_HOOK(int, 0, sb_statfs, struct dentry *dentry)
+LSM_HOOK(int, 0, sb_mount, const char *dev_name, const struct path *path,
+ const char *type, unsigned long flags, void *data)
+LSM_HOOK(int, 0, sb_umount, struct vfsmount *mnt, int flags)
+LSM_HOOK(int, 0, sb_pivotroot, const struct path *old_path,
+ const struct path *new_path)
+LSM_HOOK(int, 0, sb_set_mnt_opts, struct super_block *sb, void *mnt_opts,
+ unsigned long kern_flags, unsigned long *set_kern_flags)
+LSM_HOOK(int, 0, sb_clone_mnt_opts, const struct super_block *oldsb,
+ struct super_block *newsb, unsigned long kern_flags,
+ unsigned long *set_kern_flags)
+LSM_HOOK(int, 0, move_mount, const struct path *from_path,
+ const struct path *to_path)
+LSM_HOOK(int, -EOPNOTSUPP, dentry_init_security, struct dentry *dentry,
+ int mode, const struct qstr *name, const char **xattr_name,
+ void **ctx, u32 *ctxlen)
+LSM_HOOK(int, 0, dentry_create_files_as, struct dentry *dentry, int mode,
+ struct qstr *name, const struct cred *old, struct cred *new)
+
+#ifdef CONFIG_SECURITY_PATH
+LSM_HOOK(int, 0, path_unlink, const struct path *dir, struct dentry *dentry)
+LSM_HOOK(int, 0, path_mkdir, const struct path *dir, struct dentry *dentry,
+ umode_t mode)
+LSM_HOOK(int, 0, path_rmdir, const struct path *dir, struct dentry *dentry)
+LSM_HOOK(int, 0, path_mknod, const struct path *dir, struct dentry *dentry,
+ umode_t mode, unsigned int dev)
+LSM_HOOK(int, 0, path_truncate, const struct path *path)
+LSM_HOOK(int, 0, path_symlink, const struct path *dir, struct dentry *dentry,
+ const char *old_name)
+LSM_HOOK(int, 0, path_link, struct dentry *old_dentry,
+ const struct path *new_dir, struct dentry *new_dentry)
+LSM_HOOK(int, 0, path_rename, const struct path *old_dir,
+ struct dentry *old_dentry, const struct path *new_dir,
+ struct dentry *new_dentry, unsigned int flags)
+LSM_HOOK(int, 0, path_chmod, const struct path *path, umode_t mode)
+LSM_HOOK(int, 0, path_chown, const struct path *path, kuid_t uid, kgid_t gid)
+LSM_HOOK(int, 0, path_chroot, const struct path *path)
+#endif /* CONFIG_SECURITY_PATH */
+
+/* Needed for inode based security check */
+LSM_HOOK(int, 0, path_notify, const struct path *path, u64 mask,
+ unsigned int obj_type)
+LSM_HOOK(int, 0, inode_alloc_security, struct inode *inode)
+LSM_HOOK(void, LSM_RET_VOID, inode_free_security, struct inode *inode)
+LSM_HOOK(int, 0, inode_init_security, struct inode *inode,
+ struct inode *dir, const struct qstr *qstr, const char **name,
+ void **value, size_t *len)
+LSM_HOOK(int, 0, inode_init_security_anon, struct inode *inode,
+ const struct qstr *name, const struct inode *context_inode)
+LSM_HOOK(int, 0, inode_create, struct inode *dir, struct dentry *dentry,
+ umode_t mode)
+LSM_HOOK(int, 0, inode_link, struct dentry *old_dentry, struct inode *dir,
+ struct dentry *new_dentry)
+LSM_HOOK(int, 0, inode_unlink, struct inode *dir, struct dentry *dentry)
+LSM_HOOK(int, 0, inode_symlink, struct inode *dir, struct dentry *dentry,
+ const char *old_name)
+LSM_HOOK(int, 0, inode_mkdir, struct inode *dir, struct dentry *dentry,
+ umode_t mode)
+LSM_HOOK(int, 0, inode_rmdir, struct inode *dir, struct dentry *dentry)
+LSM_HOOK(int, 0, inode_mknod, struct inode *dir, struct dentry *dentry,
+ umode_t mode, dev_t dev)
+LSM_HOOK(int, 0, inode_rename, struct inode *old_dir, struct dentry *old_dentry,
+ struct inode *new_dir, struct dentry *new_dentry)
+LSM_HOOK(int, 0, inode_readlink, struct dentry *dentry)
+LSM_HOOK(int, 0, inode_follow_link, struct dentry *dentry, struct inode *inode,
+ bool rcu)
+LSM_HOOK(int, 0, inode_permission, struct inode *inode, int mask)
+LSM_HOOK(int, 0, inode_setattr, struct dentry *dentry, struct iattr *attr)
+LSM_HOOK(int, 0, inode_getattr, const struct path *path)
+LSM_HOOK(int, 0, inode_setxattr, struct mnt_idmap *idmap,
+ struct dentry *dentry, const char *name, const void *value,
+ size_t size, int flags)
+LSM_HOOK(void, LSM_RET_VOID, inode_post_setxattr, struct dentry *dentry,
+ const char *name, const void *value, size_t size, int flags)
+LSM_HOOK(int, 0, inode_getxattr, struct dentry *dentry, const char *name)
+LSM_HOOK(int, 0, inode_listxattr, struct dentry *dentry)
+LSM_HOOK(int, 0, inode_removexattr, struct mnt_idmap *idmap,
+ struct dentry *dentry, const char *name)
+LSM_HOOK(int, 0, inode_set_acl, struct mnt_idmap *idmap,
+ struct dentry *dentry, const char *acl_name, struct posix_acl *kacl)
+LSM_HOOK(int, 0, inode_get_acl, struct mnt_idmap *idmap,
+ struct dentry *dentry, const char *acl_name)
+LSM_HOOK(int, 0, inode_remove_acl, struct mnt_idmap *idmap,
+ struct dentry *dentry, const char *acl_name)
+LSM_HOOK(int, 0, inode_need_killpriv, struct dentry *dentry)
+LSM_HOOK(int, 0, inode_killpriv, struct mnt_idmap *idmap,
+ struct dentry *dentry)
+LSM_HOOK(int, -EOPNOTSUPP, inode_getsecurity, struct mnt_idmap *idmap,
+ struct inode *inode, const char *name, void **buffer, bool alloc)
+LSM_HOOK(int, -EOPNOTSUPP, inode_setsecurity, struct inode *inode,
+ const char *name, const void *value, size_t size, int flags)
+LSM_HOOK(int, 0, inode_listsecurity, struct inode *inode, char *buffer,
+ size_t buffer_size)
+LSM_HOOK(void, LSM_RET_VOID, inode_getsecid, struct inode *inode, u32 *secid)
+LSM_HOOK(int, 0, inode_copy_up, struct dentry *src, struct cred **new)
+LSM_HOOK(int, -EOPNOTSUPP, inode_copy_up_xattr, const char *name)
+LSM_HOOK(int, 0, kernfs_init_security, struct kernfs_node *kn_dir,
+ struct kernfs_node *kn)
+LSM_HOOK(int, 0, file_permission, struct file *file, int mask)
+LSM_HOOK(int, 0, file_alloc_security, struct file *file)
+LSM_HOOK(void, LSM_RET_VOID, file_free_security, struct file *file)
+LSM_HOOK(int, 0, file_ioctl, struct file *file, unsigned int cmd,
+ unsigned long arg)
+LSM_HOOK(int, 0, mmap_addr, unsigned long addr)
+LSM_HOOK(int, 0, mmap_file, struct file *file, unsigned long reqprot,
+ unsigned long prot, unsigned long flags)
+LSM_HOOK(int, 0, file_mprotect, struct vm_area_struct *vma,
+ unsigned long reqprot, unsigned long prot)
+LSM_HOOK(int, 0, file_lock, struct file *file, unsigned int cmd)
+LSM_HOOK(int, 0, file_fcntl, struct file *file, unsigned int cmd,
+ unsigned long arg)
+LSM_HOOK(void, LSM_RET_VOID, file_set_fowner, struct file *file)
+LSM_HOOK(int, 0, file_send_sigiotask, struct task_struct *tsk,
+ struct fown_struct *fown, int sig)
+LSM_HOOK(int, 0, file_receive, struct file *file)
+LSM_HOOK(int, 0, file_open, struct file *file)
+LSM_HOOK(int, 0, file_truncate, struct file *file)
+LSM_HOOK(int, 0, task_alloc, struct task_struct *task,
+ unsigned long clone_flags)
+LSM_HOOK(void, LSM_RET_VOID, task_free, struct task_struct *task)
+LSM_HOOK(int, 0, cred_alloc_blank, struct cred *cred, gfp_t gfp)
+LSM_HOOK(void, LSM_RET_VOID, cred_free, struct cred *cred)
+LSM_HOOK(int, 0, cred_prepare, struct cred *new, const struct cred *old,
+ gfp_t gfp)
+LSM_HOOK(void, LSM_RET_VOID, cred_transfer, struct cred *new,
+ const struct cred *old)
+LSM_HOOK(void, LSM_RET_VOID, cred_getsecid, const struct cred *c, u32 *secid)
+LSM_HOOK(int, 0, kernel_act_as, struct cred *new, u32 secid)
+LSM_HOOK(int, 0, kernel_create_files_as, struct cred *new, struct inode *inode)
+LSM_HOOK(int, 0, kernel_module_request, char *kmod_name)
+LSM_HOOK(int, 0, kernel_load_data, enum kernel_load_data_id id, bool contents)
+LSM_HOOK(int, 0, kernel_post_load_data, char *buf, loff_t size,
+ enum kernel_load_data_id id, char *description)
+LSM_HOOK(int, 0, kernel_read_file, struct file *file,
+ enum kernel_read_file_id id, bool contents)
+LSM_HOOK(int, 0, kernel_post_read_file, struct file *file, char *buf,
+ loff_t size, enum kernel_read_file_id id)
+LSM_HOOK(int, 0, task_fix_setuid, struct cred *new, const struct cred *old,
+ int flags)
+LSM_HOOK(int, 0, task_fix_setgid, struct cred *new, const struct cred * old,
+ int flags)
+LSM_HOOK(int, 0, task_fix_setgroups, struct cred *new, const struct cred * old)
+LSM_HOOK(int, 0, task_setpgid, struct task_struct *p, pid_t pgid)
+LSM_HOOK(int, 0, task_getpgid, struct task_struct *p)
+LSM_HOOK(int, 0, task_getsid, struct task_struct *p)
+LSM_HOOK(void, LSM_RET_VOID, current_getsecid_subj, u32 *secid)
+LSM_HOOK(void, LSM_RET_VOID, task_getsecid_obj,
+ struct task_struct *p, u32 *secid)
+LSM_HOOK(int, 0, task_setnice, struct task_struct *p, int nice)
+LSM_HOOK(int, 0, task_setioprio, struct task_struct *p, int ioprio)
+LSM_HOOK(int, 0, task_getioprio, struct task_struct *p)
+LSM_HOOK(int, 0, task_prlimit, const struct cred *cred,
+ const struct cred *tcred, unsigned int flags)
+LSM_HOOK(int, 0, task_setrlimit, struct task_struct *p, unsigned int resource,
+ struct rlimit *new_rlim)
+LSM_HOOK(int, 0, task_setscheduler, struct task_struct *p)
+LSM_HOOK(int, 0, task_getscheduler, struct task_struct *p)
+LSM_HOOK(int, 0, task_movememory, struct task_struct *p)
+LSM_HOOK(int, 0, task_kill, struct task_struct *p, struct kernel_siginfo *info,
+ int sig, const struct cred *cred)
+LSM_HOOK(int, -ENOSYS, task_prctl, int option, unsigned long arg2,
+ unsigned long arg3, unsigned long arg4, unsigned long arg5)
+LSM_HOOK(void, LSM_RET_VOID, task_to_inode, struct task_struct *p,
+ struct inode *inode)
+LSM_HOOK(int, 0, userns_create, const struct cred *cred)
+LSM_HOOK(int, 0, ipc_permission, struct kern_ipc_perm *ipcp, short flag)
+LSM_HOOK(void, LSM_RET_VOID, ipc_getsecid, struct kern_ipc_perm *ipcp,
+ u32 *secid)
+LSM_HOOK(int, 0, msg_msg_alloc_security, struct msg_msg *msg)
+LSM_HOOK(void, LSM_RET_VOID, msg_msg_free_security, struct msg_msg *msg)
+LSM_HOOK(int, 0, msg_queue_alloc_security, struct kern_ipc_perm *perm)
+LSM_HOOK(void, LSM_RET_VOID, msg_queue_free_security,
+ struct kern_ipc_perm *perm)
+LSM_HOOK(int, 0, msg_queue_associate, struct kern_ipc_perm *perm, int msqflg)
+LSM_HOOK(int, 0, msg_queue_msgctl, struct kern_ipc_perm *perm, int cmd)
+LSM_HOOK(int, 0, msg_queue_msgsnd, struct kern_ipc_perm *perm,
+ struct msg_msg *msg, int msqflg)
+LSM_HOOK(int, 0, msg_queue_msgrcv, struct kern_ipc_perm *perm,
+ struct msg_msg *msg, struct task_struct *target, long type, int mode)
+LSM_HOOK(int, 0, shm_alloc_security, struct kern_ipc_perm *perm)
+LSM_HOOK(void, LSM_RET_VOID, shm_free_security, struct kern_ipc_perm *perm)
+LSM_HOOK(int, 0, shm_associate, struct kern_ipc_perm *perm, int shmflg)
+LSM_HOOK(int, 0, shm_shmctl, struct kern_ipc_perm *perm, int cmd)
+LSM_HOOK(int, 0, shm_shmat, struct kern_ipc_perm *perm, char __user *shmaddr,
+ int shmflg)
+LSM_HOOK(int, 0, sem_alloc_security, struct kern_ipc_perm *perm)
+LSM_HOOK(void, LSM_RET_VOID, sem_free_security, struct kern_ipc_perm *perm)
+LSM_HOOK(int, 0, sem_associate, struct kern_ipc_perm *perm, int semflg)
+LSM_HOOK(int, 0, sem_semctl, struct kern_ipc_perm *perm, int cmd)
+LSM_HOOK(int, 0, sem_semop, struct kern_ipc_perm *perm, struct sembuf *sops,
+ unsigned nsops, int alter)
+LSM_HOOK(int, 0, netlink_send, struct sock *sk, struct sk_buff *skb)
+LSM_HOOK(void, LSM_RET_VOID, d_instantiate, struct dentry *dentry,
+ struct inode *inode)
+LSM_HOOK(int, -EINVAL, getprocattr, struct task_struct *p, const char *name,
+ char **value)
+LSM_HOOK(int, -EINVAL, setprocattr, const char *name, void *value, size_t size)
+LSM_HOOK(int, 0, ismaclabel, const char *name)
+LSM_HOOK(int, -EOPNOTSUPP, secid_to_secctx, u32 secid, char **secdata,
+ u32 *seclen)
+LSM_HOOK(int, 0, secctx_to_secid, const char *secdata, u32 seclen, u32 *secid)
+LSM_HOOK(void, LSM_RET_VOID, release_secctx, char *secdata, u32 seclen)
+LSM_HOOK(void, LSM_RET_VOID, inode_invalidate_secctx, struct inode *inode)
+LSM_HOOK(int, 0, inode_notifysecctx, struct inode *inode, void *ctx, u32 ctxlen)
+LSM_HOOK(int, 0, inode_setsecctx, struct dentry *dentry, void *ctx, u32 ctxlen)
+LSM_HOOK(int, 0, inode_getsecctx, struct inode *inode, void **ctx,
+ u32 *ctxlen)
+
+#if defined(CONFIG_SECURITY) && defined(CONFIG_WATCH_QUEUE)
+LSM_HOOK(int, 0, post_notification, const struct cred *w_cred,
+ const struct cred *cred, struct watch_notification *n)
+#endif /* CONFIG_SECURITY && CONFIG_WATCH_QUEUE */
+
+#if defined(CONFIG_SECURITY) && defined(CONFIG_KEY_NOTIFICATIONS)
+LSM_HOOK(int, 0, watch_key, struct key *key)
+#endif /* CONFIG_SECURITY && CONFIG_KEY_NOTIFICATIONS */
+
+#ifdef CONFIG_SECURITY_NETWORK
+LSM_HOOK(int, 0, unix_stream_connect, struct sock *sock, struct sock *other,
+ struct sock *newsk)
+LSM_HOOK(int, 0, unix_may_send, struct socket *sock, struct socket *other)
+LSM_HOOK(int, 0, socket_create, int family, int type, int protocol, int kern)
+LSM_HOOK(int, 0, socket_post_create, struct socket *sock, int family, int type,
+ int protocol, int kern)
+LSM_HOOK(int, 0, socket_socketpair, struct socket *socka, struct socket *sockb)
+LSM_HOOK(int, 0, socket_bind, struct socket *sock, struct sockaddr *address,
+ int addrlen)
+LSM_HOOK(int, 0, socket_connect, struct socket *sock, struct sockaddr *address,
+ int addrlen)
+LSM_HOOK(int, 0, socket_listen, struct socket *sock, int backlog)
+LSM_HOOK(int, 0, socket_accept, struct socket *sock, struct socket *newsock)
+LSM_HOOK(int, 0, socket_sendmsg, struct socket *sock, struct msghdr *msg,
+ int size)
+LSM_HOOK(int, 0, socket_recvmsg, struct socket *sock, struct msghdr *msg,
+ int size, int flags)
+LSM_HOOK(int, 0, socket_getsockname, struct socket *sock)
+LSM_HOOK(int, 0, socket_getpeername, struct socket *sock)
+LSM_HOOK(int, 0, socket_getsockopt, struct socket *sock, int level, int optname)
+LSM_HOOK(int, 0, socket_setsockopt, struct socket *sock, int level, int optname)
+LSM_HOOK(int, 0, socket_shutdown, struct socket *sock, int how)
+LSM_HOOK(int, 0, socket_sock_rcv_skb, struct sock *sk, struct sk_buff *skb)
+LSM_HOOK(int, 0, socket_getpeersec_stream, struct socket *sock,
+ sockptr_t optval, sockptr_t optlen, unsigned int len)
+LSM_HOOK(int, 0, socket_getpeersec_dgram, struct socket *sock,
+ struct sk_buff *skb, u32 *secid)
+LSM_HOOK(int, 0, sk_alloc_security, struct sock *sk, int family, gfp_t priority)
+LSM_HOOK(void, LSM_RET_VOID, sk_free_security, struct sock *sk)
+LSM_HOOK(void, LSM_RET_VOID, sk_clone_security, const struct sock *sk,
+ struct sock *newsk)
+LSM_HOOK(void, LSM_RET_VOID, sk_getsecid, struct sock *sk, u32 *secid)
+LSM_HOOK(void, LSM_RET_VOID, sock_graft, struct sock *sk, struct socket *parent)
+LSM_HOOK(int, 0, inet_conn_request, const struct sock *sk, struct sk_buff *skb,
+ struct request_sock *req)
+LSM_HOOK(void, LSM_RET_VOID, inet_csk_clone, struct sock *newsk,
+ const struct request_sock *req)
+LSM_HOOK(void, LSM_RET_VOID, inet_conn_established, struct sock *sk,
+ struct sk_buff *skb)
+LSM_HOOK(int, 0, secmark_relabel_packet, u32 secid)
+LSM_HOOK(void, LSM_RET_VOID, secmark_refcount_inc, void)
+LSM_HOOK(void, LSM_RET_VOID, secmark_refcount_dec, void)
+LSM_HOOK(void, LSM_RET_VOID, req_classify_flow, const struct request_sock *req,
+ struct flowi_common *flic)
+LSM_HOOK(int, 0, tun_dev_alloc_security, void **security)
+LSM_HOOK(void, LSM_RET_VOID, tun_dev_free_security, void *security)
+LSM_HOOK(int, 0, tun_dev_create, void)
+LSM_HOOK(int, 0, tun_dev_attach_queue, void *security)
+LSM_HOOK(int, 0, tun_dev_attach, struct sock *sk, void *security)
+LSM_HOOK(int, 0, tun_dev_open, void *security)
+LSM_HOOK(int, 0, sctp_assoc_request, struct sctp_association *asoc,
+ struct sk_buff *skb)
+LSM_HOOK(int, 0, sctp_bind_connect, struct sock *sk, int optname,
+ struct sockaddr *address, int addrlen)
+LSM_HOOK(void, LSM_RET_VOID, sctp_sk_clone, struct sctp_association *asoc,
+ struct sock *sk, struct sock *newsk)
+LSM_HOOK(int, 0, sctp_assoc_established, struct sctp_association *asoc,
+ struct sk_buff *skb)
+#endif /* CONFIG_SECURITY_NETWORK */
+
+#ifdef CONFIG_SECURITY_INFINIBAND
+LSM_HOOK(int, 0, ib_pkey_access, void *sec, u64 subnet_prefix, u16 pkey)
+LSM_HOOK(int, 0, ib_endport_manage_subnet, void *sec, const char *dev_name,
+ u8 port_num)
+LSM_HOOK(int, 0, ib_alloc_security, void **sec)
+LSM_HOOK(void, LSM_RET_VOID, ib_free_security, void *sec)
+#endif /* CONFIG_SECURITY_INFINIBAND */
+
+#ifdef CONFIG_SECURITY_NETWORK_XFRM
+LSM_HOOK(int, 0, xfrm_policy_alloc_security, struct xfrm_sec_ctx **ctxp,
+ struct xfrm_user_sec_ctx *sec_ctx, gfp_t gfp)
+LSM_HOOK(int, 0, xfrm_policy_clone_security, struct xfrm_sec_ctx *old_ctx,
+ struct xfrm_sec_ctx **new_ctx)
+LSM_HOOK(void, LSM_RET_VOID, xfrm_policy_free_security,
+ struct xfrm_sec_ctx *ctx)
+LSM_HOOK(int, 0, xfrm_policy_delete_security, struct xfrm_sec_ctx *ctx)
+LSM_HOOK(int, 0, xfrm_state_alloc, struct xfrm_state *x,
+ struct xfrm_user_sec_ctx *sec_ctx)
+LSM_HOOK(int, 0, xfrm_state_alloc_acquire, struct xfrm_state *x,
+ struct xfrm_sec_ctx *polsec, u32 secid)
+LSM_HOOK(void, LSM_RET_VOID, xfrm_state_free_security, struct xfrm_state *x)
+LSM_HOOK(int, 0, xfrm_state_delete_security, struct xfrm_state *x)
+LSM_HOOK(int, 0, xfrm_policy_lookup, struct xfrm_sec_ctx *ctx, u32 fl_secid)
+LSM_HOOK(int, 1, xfrm_state_pol_flow_match, struct xfrm_state *x,
+ struct xfrm_policy *xp, const struct flowi_common *flic)
+LSM_HOOK(int, 0, xfrm_decode_session, struct sk_buff *skb, u32 *secid,
+ int ckall)
+#endif /* CONFIG_SECURITY_NETWORK_XFRM */
+
+/* key management security hooks */
+#ifdef CONFIG_KEYS
+LSM_HOOK(int, 0, key_alloc, struct key *key, const struct cred *cred,
+ unsigned long flags)
+LSM_HOOK(void, LSM_RET_VOID, key_free, struct key *key)
+LSM_HOOK(int, 0, key_permission, key_ref_t key_ref, const struct cred *cred,
+ enum key_need_perm need_perm)
+LSM_HOOK(int, 0, key_getsecurity, struct key *key, char **buffer)
+#endif /* CONFIG_KEYS */
+
+#ifdef CONFIG_AUDIT
+LSM_HOOK(int, 0, audit_rule_init, u32 field, u32 op, char *rulestr,
+ void **lsmrule)
+LSM_HOOK(int, 0, audit_rule_known, struct audit_krule *krule)
+LSM_HOOK(int, 0, audit_rule_match, u32 secid, u32 field, u32 op, void *lsmrule)
+LSM_HOOK(void, LSM_RET_VOID, audit_rule_free, void *lsmrule)
+#endif /* CONFIG_AUDIT */
+
+#ifdef CONFIG_BPF_SYSCALL
+LSM_HOOK(int, 0, bpf, int cmd, union bpf_attr *attr, unsigned int size)
+LSM_HOOK(int, 0, bpf_map, struct bpf_map *map, fmode_t fmode)
+LSM_HOOK(int, 0, bpf_prog, struct bpf_prog *prog)
+LSM_HOOK(int, 0, bpf_map_alloc_security, struct bpf_map *map)
+LSM_HOOK(void, LSM_RET_VOID, bpf_map_free_security, struct bpf_map *map)
+LSM_HOOK(int, 0, bpf_prog_alloc_security, struct bpf_prog_aux *aux)
+LSM_HOOK(void, LSM_RET_VOID, bpf_prog_free_security, struct bpf_prog_aux *aux)
+#endif /* CONFIG_BPF_SYSCALL */
+
+LSM_HOOK(int, 0, locked_down, enum lockdown_reason what)
+
+#ifdef CONFIG_PERF_EVENTS
+LSM_HOOK(int, 0, perf_event_open, struct perf_event_attr *attr, int type)
+LSM_HOOK(int, 0, perf_event_alloc, struct perf_event *event)
+LSM_HOOK(void, LSM_RET_VOID, perf_event_free, struct perf_event *event)
+LSM_HOOK(int, 0, perf_event_read, struct perf_event *event)
+LSM_HOOK(int, 0, perf_event_write, struct perf_event *event)
+#endif /* CONFIG_PERF_EVENTS */
+
+#ifdef CONFIG_IO_URING
+LSM_HOOK(int, 0, uring_override_creds, const struct cred *new)
+LSM_HOOK(int, 0, uring_sqpoll, void)
+LSM_HOOK(int, 0, uring_cmd, struct io_uring_cmd *ioucmd)
+#endif /* CONFIG_IO_URING */
diff --git a/include/linux/lsm_hooks.h b/include/linux/lsm_hooks.h
index 7a86925ba8f3..ab2b2fafa4a4 100644
--- a/include/linux/lsm_hooks.h
+++ b/include/linux/lsm_hooks.h
@@ -29,1903 +29,49 @@
#include <linux/init.h>
#include <linux/rculist.h>
-/**
- * union security_list_options - Linux Security Module hook function list
- *
- * Security hooks for program execution operations.
- *
- * @bprm_set_creds:
- * Save security information in the bprm->security field, typically based
- * on information about the bprm->file, for later use by the apply_creds
- * hook. This hook may also optionally check permissions (e.g. for
- * transitions between security domains).
- * This hook may be called multiple times during a single execve, e.g. for
- * interpreters. The hook can tell whether it has already been called by
- * checking to see if @bprm->security is non-NULL. If so, then the hook
- * may decide either to retain the security information saved earlier or
- * to replace it.
- * @bprm contains the linux_binprm structure.
- * Return 0 if the hook is successful and permission is granted.
- * @bprm_check_security:
- * This hook mediates the point when a search for a binary handler will
- * begin. It allows a check the @bprm->security value which is set in the
- * preceding set_creds call. The primary difference from set_creds is
- * that the argv list and envp list are reliably available in @bprm. This
- * hook may be called multiple times during a single execve; and in each
- * pass set_creds is called first.
- * @bprm contains the linux_binprm structure.
- * Return 0 if the hook is successful and permission is granted.
- * @bprm_committing_creds:
- * Prepare to install the new security attributes of a process being
- * transformed by an execve operation, based on the old credentials
- * pointed to by @current->cred and the information set in @bprm->cred by
- * the bprm_set_creds hook. @bprm points to the linux_binprm structure.
- * This hook is a good place to perform state changes on the process such
- * as closing open file descriptors to which access will no longer be
- * granted when the attributes are changed. This is called immediately
- * before commit_creds().
- * @bprm_committed_creds:
- * Tidy up after the installation of the new security attributes of a
- * process being transformed by an execve operation. The new credentials
- * have, by this point, been set to @current->cred. @bprm points to the
- * linux_binprm structure. This hook is a good place to perform state
- * changes on the process such as clearing out non-inheritable signal
- * state. This is called immediately after commit_creds().
- * @bprm_secureexec:
- * Return a boolean value (0 or 1) indicating whether a "secure exec"
- * is required. The flag is passed in the auxiliary table
- * on the initial stack to the ELF interpreter to indicate whether libc
- * should enable secure mode.
- * @bprm contains the linux_binprm structure.
- *
- * Security hooks for filesystem operations.
- *
- * @sb_alloc_security:
- * Allocate and attach a security structure to the sb->s_security field.
- * The s_security field is initialized to NULL when the structure is
- * allocated.
- * @sb contains the super_block structure to be modified.
- * Return 0 if operation was successful.
- * @sb_free_security:
- * Deallocate and clear the sb->s_security field.
- * @sb contains the super_block structure to be modified.
- * @sb_statfs:
- * Check permission before obtaining filesystem statistics for the @mnt
- * mountpoint.
- * @dentry is a handle on the superblock for the filesystem.
- * Return 0 if permission is granted.
- * @sb_mount:
- * Check permission before an object specified by @dev_name is mounted on
- * the mount point named by @nd. For an ordinary mount, @dev_name
- * identifies a device if the file system type requires a device. For a
- * remount (@flags & MS_REMOUNT), @dev_name is irrelevant. For a
- * loopback/bind mount (@flags & MS_BIND), @dev_name identifies the
- * pathname of the object being mounted.
- * @dev_name contains the name for object being mounted.
- * @path contains the path for mount point object.
- * @type contains the filesystem type.
- * @flags contains the mount flags.
- * @data contains the filesystem-specific data.
- * Return 0 if permission is granted.
- * @sb_copy_data:
- * Allow mount option data to be copied prior to parsing by the filesystem,
- * so that the security module can extract security-specific mount
- * options cleanly (a filesystem may modify the data e.g. with strsep()).
- * This also allows the original mount data to be stripped of security-
- * specific options to avoid having to make filesystems aware of them.
- * @type the type of filesystem being mounted.
- * @orig the original mount data copied from userspace.
- * @copy copied data which will be passed to the security module.
- * Returns 0 if the copy was successful.
- * @sb_remount:
- * Extracts security system specific mount options and verifies no changes
- * are being made to those options.
- * @sb superblock being remounted
- * @data contains the filesystem-specific data.
- * Return 0 if permission is granted.
- * @sb_umount:
- * Check permission before the @mnt file system is unmounted.
- * @mnt contains the mounted file system.
- * @flags contains the unmount flags, e.g. MNT_FORCE.
- * Return 0 if permission is granted.
- * @sb_pivotroot:
- * Check permission before pivoting the root filesystem.
- * @old_path contains the path for the new location of the
- * current root (put_old).
- * @new_path contains the path for the new root (new_root).
- * Return 0 if permission is granted.
- * @sb_set_mnt_opts:
- * Set the security relevant mount options used for a superblock
- * @sb the superblock to set security mount options for
- * @opts binary data structure containing all lsm mount data
- * @sb_clone_mnt_opts:
- * Copy all security options from a given superblock to another
- * @oldsb old superblock which contain information to clone
- * @newsb new superblock which needs filled in
- * @sb_parse_opts_str:
- * Parse a string of security data filling in the opts structure
- * @options string containing all mount options known by the LSM
- * @opts binary data structure usable by the LSM
- * @dentry_init_security:
- * Compute a context for a dentry as the inode is not yet available
- * since NFSv4 has no label backed by an EA anyway.
- * @dentry dentry to use in calculating the context.
- * @mode mode used to determine resource type.
- * @name name of the last path component used to create file
- * @ctx pointer to place the pointer to the resulting context in.
- * @ctxlen point to place the length of the resulting context.
- * @dentry_create_files_as:
- * Compute a context for a dentry as the inode is not yet available
- * and set that context in passed in creds so that new files are
- * created using that context. Context is calculated using the
- * passed in creds and not the creds of the caller.
- * @dentry dentry to use in calculating the context.
- * @mode mode used to determine resource type.
- * @name name of the last path component used to create file
- * @old creds which should be used for context calculation
- * @new creds to modify
- *
- *
- * Security hooks for inode operations.
- *
- * @inode_alloc_security:
- * Allocate and attach a security structure to @inode->i_security. The
- * i_security field is initialized to NULL when the inode structure is
- * allocated.
- * @inode contains the inode structure.
- * Return 0 if operation was successful.
- * @inode_free_security:
- * @inode contains the inode structure.
- * Deallocate the inode security structure and set @inode->i_security to
- * NULL.
- * @inode_init_security:
- * Obtain the security attribute name suffix and value to set on a newly
- * created inode and set up the incore security field for the new inode.
- * This hook is called by the fs code as part of the inode creation
- * transaction and provides for atomic labeling of the inode, unlike
- * the post_create/mkdir/... hooks called by the VFS. The hook function
- * is expected to allocate the name and value via kmalloc, with the caller
- * being responsible for calling kfree after using them.
- * If the security module does not use security attributes or does
- * not wish to put a security attribute on this particular inode,
- * then it should return -EOPNOTSUPP to skip this processing.
- * @inode contains the inode structure of the newly created inode.
- * @dir contains the inode structure of the parent directory.
- * @qstr contains the last path component of the new object
- * @name will be set to the allocated name suffix (e.g. selinux).
- * @value will be set to the allocated attribute value.
- * @len will be set to the length of the value.
- * Returns 0 if @name and @value have been successfully set,
- * -EOPNOTSUPP if no security attribute is needed, or
- * -ENOMEM on memory allocation failure.
- * @inode_create:
- * Check permission to create a regular file.
- * @dir contains inode structure of the parent of the new file.
- * @dentry contains the dentry structure for the file to be created.
- * @mode contains the file mode of the file to be created.
- * Return 0 if permission is granted.
- * @inode_link:
- * Check permission before creating a new hard link to a file.
- * @old_dentry contains the dentry structure for an existing
- * link to the file.
- * @dir contains the inode structure of the parent directory
- * of the new link.
- * @new_dentry contains the dentry structure for the new link.
- * Return 0 if permission is granted.
- * @path_link:
- * Check permission before creating a new hard link to a file.
- * @old_dentry contains the dentry structure for an existing link
- * to the file.
- * @new_dir contains the path structure of the parent directory of
- * the new link.
- * @new_dentry contains the dentry structure for the new link.
- * Return 0 if permission is granted.
- * @inode_unlink:
- * Check the permission to remove a hard link to a file.
- * @dir contains the inode structure of parent directory of the file.
- * @dentry contains the dentry structure for file to be unlinked.
- * Return 0 if permission is granted.
- * @path_unlink:
- * Check the permission to remove a hard link to a file.
- * @dir contains the path structure of parent directory of the file.
- * @dentry contains the dentry structure for file to be unlinked.
- * Return 0 if permission is granted.
- * @inode_symlink:
- * Check the permission to create a symbolic link to a file.
- * @dir contains the inode structure of parent directory of
- * the symbolic link.
- * @dentry contains the dentry structure of the symbolic link.
- * @old_name contains the pathname of file.
- * Return 0 if permission is granted.
- * @path_symlink:
- * Check the permission to create a symbolic link to a file.
- * @dir contains the path structure of parent directory of
- * the symbolic link.
- * @dentry contains the dentry structure of the symbolic link.
- * @old_name contains the pathname of file.
- * Return 0 if permission is granted.
- * @inode_mkdir:
- * Check permissions to create a new directory in the existing directory
- * associated with inode structure @dir.
- * @dir contains the inode structure of parent of the directory
- * to be created.
- * @dentry contains the dentry structure of new directory.
- * @mode contains the mode of new directory.
- * Return 0 if permission is granted.
- * @path_mkdir:
- * Check permissions to create a new directory in the existing directory
- * associated with path structure @path.
- * @dir contains the path structure of parent of the directory
- * to be created.
- * @dentry contains the dentry structure of new directory.
- * @mode contains the mode of new directory.
- * Return 0 if permission is granted.
- * @inode_rmdir:
- * Check the permission to remove a directory.
- * @dir contains the inode structure of parent of the directory
- * to be removed.
- * @dentry contains the dentry structure of directory to be removed.
- * Return 0 if permission is granted.
- * @path_rmdir:
- * Check the permission to remove a directory.
- * @dir contains the path structure of parent of the directory to be
- * removed.
- * @dentry contains the dentry structure of directory to be removed.
- * Return 0 if permission is granted.
- * @inode_mknod:
- * Check permissions when creating a special file (or a socket or a fifo
- * file created via the mknod system call). Note that if mknod operation
- * is being done for a regular file, then the create hook will be called
- * and not this hook.
- * @dir contains the inode structure of parent of the new file.
- * @dentry contains the dentry structure of the new file.
- * @mode contains the mode of the new file.
- * @dev contains the device number.
- * Return 0 if permission is granted.
- * @path_mknod:
- * Check permissions when creating a file. Note that this hook is called
- * even if mknod operation is being done for a regular file.
- * @dir contains the path structure of parent of the new file.
- * @dentry contains the dentry structure of the new file.
- * @mode contains the mode of the new file.
- * @dev contains the undecoded device number. Use new_decode_dev() to get
- * the decoded device number.
- * Return 0 if permission is granted.
- * @inode_rename:
- * Check for permission to rename a file or directory.
- * @old_dir contains the inode structure for parent of the old link.
- * @old_dentry contains the dentry structure of the old link.
- * @new_dir contains the inode structure for parent of the new link.
- * @new_dentry contains the dentry structure of the new link.
- * Return 0 if permission is granted.
- * @path_rename:
- * Check for permission to rename a file or directory.
- * @old_dir contains the path structure for parent of the old link.
- * @old_dentry contains the dentry structure of the old link.
- * @new_dir contains the path structure for parent of the new link.
- * @new_dentry contains the dentry structure of the new link.
- * Return 0 if permission is granted.
- * @path_chmod:
- * Check for permission to change DAC's permission of a file or directory.
- * @dentry contains the dentry structure.
- * @mnt contains the vfsmnt structure.
- * @mode contains DAC's mode.
- * Return 0 if permission is granted.
- * @path_chown:
- * Check for permission to change owner/group of a file or directory.
- * @path contains the path structure.
- * @uid contains new owner's ID.
- * @gid contains new group's ID.
- * Return 0 if permission is granted.
- * @path_chroot:
- * Check for permission to change root directory.
- * @path contains the path structure.
- * Return 0 if permission is granted.
- * @inode_readlink:
- * Check the permission to read the symbolic link.
- * @dentry contains the dentry structure for the file link.
- * Return 0 if permission is granted.
- * @inode_follow_link:
- * Check permission to follow a symbolic link when looking up a pathname.
- * @dentry contains the dentry structure for the link.
- * @inode contains the inode, which itself is not stable in RCU-walk
- * @rcu indicates whether we are in RCU-walk mode.
- * Return 0 if permission is granted.
- * @inode_permission:
- * Check permission before accessing an inode. This hook is called by the
- * existing Linux permission function, so a security module can use it to
- * provide additional checking for existing Linux permission checks.
- * Notice that this hook is called when a file is opened (as well as many
- * other operations), whereas the file_security_ops permission hook is
- * called when the actual read/write operations are performed.
- * @inode contains the inode structure to check.
- * @mask contains the permission mask.
- * Return 0 if permission is granted.
- * @inode_setattr:
- * Check permission before setting file attributes. Note that the kernel
- * call to notify_change is performed from several locations, whenever
- * file attributes change (such as when a file is truncated, chown/chmod
- * operations, transferring disk quotas, etc).
- * @dentry contains the dentry structure for the file.
- * @attr is the iattr structure containing the new file attributes.
- * Return 0 if permission is granted.
- * @path_truncate:
- * Check permission before truncating a file.
- * @path contains the path structure for the file.
- * Return 0 if permission is granted.
- * @inode_getattr:
- * Check permission before obtaining file attributes.
- * @path contains the path structure for the file.
- * Return 0 if permission is granted.
- * @inode_setxattr:
- * Check permission before setting the extended attributes
- * @value identified by @name for @dentry.
- * Return 0 if permission is granted.
- * @inode_post_setxattr:
- * Update inode security field after successful setxattr operation.
- * @value identified by @name for @dentry.
- * @inode_getxattr:
- * Check permission before obtaining the extended attributes
- * identified by @name for @dentry.
- * Return 0 if permission is granted.
- * @inode_listxattr:
- * Check permission before obtaining the list of extended attribute
- * names for @dentry.
- * Return 0 if permission is granted.
- * @inode_removexattr:
- * Check permission before removing the extended attribute
- * identified by @name for @dentry.
- * Return 0 if permission is granted.
- * @inode_getsecurity:
- * Retrieve a copy of the extended attribute representation of the
- * security label associated with @name for @inode via @buffer. Note that
- * @name is the remainder of the attribute name after the security prefix
- * has been removed. @alloc is used to specify of the call should return a
- * value via the buffer or just the value length Return size of buffer on
- * success.
- * @inode_setsecurity:
- * Set the security label associated with @name for @inode from the
- * extended attribute value @value. @size indicates the size of the
- * @value in bytes. @flags may be XATTR_CREATE, XATTR_REPLACE, or 0.
- * Note that @name is the remainder of the attribute name after the
- * security. prefix has been removed.
- * Return 0 on success.
- * @inode_listsecurity:
- * Copy the extended attribute names for the security labels
- * associated with @inode into @buffer. The maximum size of @buffer
- * is specified by @buffer_size. @buffer may be NULL to request
- * the size of the buffer required.
- * Returns number of bytes used/required on success.
- * @inode_need_killpriv:
- * Called when an inode has been changed.
- * @dentry is the dentry being changed.
- * Return <0 on error to abort the inode change operation.
- * Return 0 if inode_killpriv does not need to be called.
- * Return >0 if inode_killpriv does need to be called.
- * @inode_killpriv:
- * The setuid bit is being removed. Remove similar security labels.
- * Called with the dentry->d_inode->i_mutex held.
- * @dentry is the dentry being changed.
- * Return 0 on success. If error is returned, then the operation
- * causing setuid bit removal is failed.
- * @inode_getsecid:
- * Get the secid associated with the node.
- * @inode contains a pointer to the inode.
- * @secid contains a pointer to the location where result will be saved.
- * In case of failure, @secid will be set to zero.
- * @inode_copy_up:
- * A file is about to be copied up from lower layer to upper layer of
- * overlay filesystem. Security module can prepare a set of new creds
- * and modify as need be and return new creds. Caller will switch to
- * new creds temporarily to create new file and release newly allocated
- * creds.
- * @src indicates the union dentry of file that is being copied up.
- * @new pointer to pointer to return newly allocated creds.
- * Returns 0 on success or a negative error code on error.
- * @inode_copy_up_xattr:
- * Filter the xattrs being copied up when a unioned file is copied
- * up from a lower layer to the union/overlay layer.
- * @name indicates the name of the xattr.
- * Returns 0 to accept the xattr, 1 to discard the xattr, -EOPNOTSUPP if
- * security module does not know about attribute or a negative error code
- * to abort the copy up. Note that the caller is responsible for reading
- * and writing the xattrs as this hook is merely a filter.
- *
- * Security hooks for file operations
- *
- * @file_permission:
- * Check file permissions before accessing an open file. This hook is
- * called by various operations that read or write files. A security
- * module can use this hook to perform additional checking on these
- * operations, e.g. to revalidate permissions on use to support privilege
- * bracketing or policy changes. Notice that this hook is used when the
- * actual read/write operations are performed, whereas the
- * inode_security_ops hook is called when a file is opened (as well as
- * many other operations).
- * Caveat: Although this hook can be used to revalidate permissions for
- * various system call operations that read or write files, it does not
- * address the revalidation of permissions for memory-mapped files.
- * Security modules must handle this separately if they need such
- * revalidation.
- * @file contains the file structure being accessed.
- * @mask contains the requested permissions.
- * Return 0 if permission is granted.
- * @file_alloc_security:
- * Allocate and attach a security structure to the file->f_security field.
- * The security field is initialized to NULL when the structure is first
- * created.
- * @file contains the file structure to secure.
- * Return 0 if the hook is successful and permission is granted.
- * @file_free_security:
- * Deallocate and free any security structures stored in file->f_security.
- * @file contains the file structure being modified.
- * @file_ioctl:
- * @file contains the file structure.
- * @cmd contains the operation to perform.
- * @arg contains the operational arguments.
- * Check permission for an ioctl operation on @file. Note that @arg
- * sometimes represents a user space pointer; in other cases, it may be a
- * simple integer value. When @arg represents a user space pointer, it
- * should never be used by the security module.
- * Return 0 if permission is granted.
- * @mmap_addr :
- * Check permissions for a mmap operation at @addr.
- * @addr contains virtual address that will be used for the operation.
- * Return 0 if permission is granted.
- * @mmap_file :
- * Check permissions for a mmap operation. The @file may be NULL, e.g.
- * if mapping anonymous memory.
- * @file contains the file structure for file to map (may be NULL).
- * @reqprot contains the protection requested by the application.
- * @prot contains the protection that will be applied by the kernel.
- * @flags contains the operational flags.
- * Return 0 if permission is granted.
- * @file_mprotect:
- * Check permissions before changing memory access permissions.
- * @vma contains the memory region to modify.
- * @reqprot contains the protection requested by the application.
- * @prot contains the protection that will be applied by the kernel.
- * Return 0 if permission is granted.
- * @file_lock:
- * Check permission before performing file locking operations.
- * Note: this hook mediates both flock and fcntl style locks.
- * @file contains the file structure.
- * @cmd contains the posix-translated lock operation to perform
- * (e.g. F_RDLCK, F_WRLCK).
- * Return 0 if permission is granted.
- * @file_fcntl:
- * Check permission before allowing the file operation specified by @cmd
- * from being performed on the file @file. Note that @arg sometimes
- * represents a user space pointer; in other cases, it may be a simple
- * integer value. When @arg represents a user space pointer, it should
- * never be used by the security module.
- * @file contains the file structure.
- * @cmd contains the operation to be performed.
- * @arg contains the operational arguments.
- * Return 0 if permission is granted.
- * @file_set_fowner:
- * Save owner security information (typically from current->security) in
- * file->f_security for later use by the send_sigiotask hook.
- * @file contains the file structure to update.
- * Return 0 on success.
- * @file_send_sigiotask:
- * Check permission for the file owner @fown to send SIGIO or SIGURG to the
- * process @tsk. Note that this hook is sometimes called from interrupt.
- * Note that the fown_struct, @fown, is never outside the context of a
- * struct file, so the file structure (and associated security information)
- * can always be obtained: container_of(fown, struct file, f_owner)
- * @tsk contains the structure of task receiving signal.
- * @fown contains the file owner information.
- * @sig is the signal that will be sent. When 0, kernel sends SIGIO.
- * Return 0 if permission is granted.
- * @file_receive:
- * This hook allows security modules to control the ability of a process
- * to receive an open file descriptor via socket IPC.
- * @file contains the file structure being received.
- * Return 0 if permission is granted.
- * @file_open:
- * Save open-time permission checking state for later use upon
- * file_permission, and recheck access if anything has changed
- * since inode_permission.
- *
- * Security hooks for task operations.
- *
- * @task_create:
- * Check permission before creating a child process. See the clone(2)
- * manual page for definitions of the @clone_flags.
- * @clone_flags contains the flags indicating what should be shared.
- * Return 0 if permission is granted.
- * @task_alloc:
- * @task task being allocated.
- * @clone_flags contains the flags indicating what should be shared.
- * Handle allocation of task-related resources.
- * Returns a zero on success, negative values on failure.
- * @task_free:
- * @task task about to be freed.
- * Handle release of task-related resources. (Note that this can be called
- * from interrupt context.)
- * @cred_alloc_blank:
- * @cred points to the credentials.
- * @gfp indicates the atomicity of any memory allocations.
- * Only allocate sufficient memory and attach to @cred such that
- * cred_transfer() will not get ENOMEM.
- * @cred_free:
- * @cred points to the credentials.
- * Deallocate and clear the cred->security field in a set of credentials.
- * @cred_prepare:
- * @new points to the new credentials.
- * @old points to the original credentials.
- * @gfp indicates the atomicity of any memory allocations.
- * Prepare a new set of credentials by copying the data from the old set.
- * @cred_transfer:
- * @new points to the new credentials.
- * @old points to the original credentials.
- * Transfer data from original creds to new creds
- * @kernel_act_as:
- * Set the credentials for a kernel service to act as (subjective context).
- * @new points to the credentials to be modified.
- * @secid specifies the security ID to be set
- * The current task must be the one that nominated @secid.
- * Return 0 if successful.
- * @kernel_create_files_as:
- * Set the file creation context in a set of credentials to be the same as
- * the objective context of the specified inode.
- * @new points to the credentials to be modified.
- * @inode points to the inode to use as a reference.
- * The current task must be the one that nominated @inode.
- * Return 0 if successful.
- * @kernel_module_request:
- * Ability to trigger the kernel to automatically upcall to userspace for
- * userspace to load a kernel module with the given name.
- * @kmod_name name of the module requested by the kernel
- * Return 0 if successful.
- * @kernel_read_file:
- * Read a file specified by userspace.
- * @file contains the file structure pointing to the file being read
- * by the kernel.
- * @id kernel read file identifier
- * Return 0 if permission is granted.
- * @kernel_post_read_file:
- * Read a file specified by userspace.
- * @file contains the file structure pointing to the file being read
- * by the kernel.
- * @buf pointer to buffer containing the file contents.
- * @size length of the file contents.
- * @id kernel read file identifier
- * Return 0 if permission is granted.
- * @task_fix_setuid:
- * Update the module's state after setting one or more of the user
- * identity attributes of the current process. The @flags parameter
- * indicates which of the set*uid system calls invoked this hook. If
- * @new is the set of credentials that will be installed. Modifications
- * should be made to this rather than to @current->cred.
- * @old is the set of credentials that are being replaces
- * @flags contains one of the LSM_SETID_* values.
- * Return 0 on success.
- * @task_setpgid:
- * Check permission before setting the process group identifier of the
- * process @p to @pgid.
- * @p contains the task_struct for process being modified.
- * @pgid contains the new pgid.
- * Return 0 if permission is granted.
- * @task_getpgid:
- * Check permission before getting the process group identifier of the
- * process @p.
- * @p contains the task_struct for the process.
- * Return 0 if permission is granted.
- * @task_getsid:
- * Check permission before getting the session identifier of the process
- * @p.
- * @p contains the task_struct for the process.
- * Return 0 if permission is granted.
- * @task_getsecid:
- * Retrieve the security identifier of the process @p.
- * @p contains the task_struct for the process and place is into @secid.
- * In case of failure, @secid will be set to zero.
- *
- * @task_setnice:
- * Check permission before setting the nice value of @p to @nice.
- * @p contains the task_struct of process.
- * @nice contains the new nice value.
- * Return 0 if permission is granted.
- * @task_setioprio
- * Check permission before setting the ioprio value of @p to @ioprio.
- * @p contains the task_struct of process.
- * @ioprio contains the new ioprio value
- * Return 0 if permission is granted.
- * @task_getioprio
- * Check permission before getting the ioprio value of @p.
- * @p contains the task_struct of process.
- * Return 0 if permission is granted.
- * @task_prlimit:
- * Check permission before getting and/or setting the resource limits of
- * another task.
- * @cred points to the cred structure for the current task.
- * @tcred points to the cred structure for the target task.
- * @flags contains the LSM_PRLIMIT_* flag bits indicating whether the
- * resource limits are being read, modified, or both.
- * Return 0 if permission is granted.
- * @task_setrlimit:
- * Check permission before setting the resource limits of process @p
- * for @resource to @new_rlim. The old resource limit values can
- * be examined by dereferencing (p->signal->rlim + resource).
- * @p points to the task_struct for the target task's group leader.
- * @resource contains the resource whose limit is being set.
- * @new_rlim contains the new limits for @resource.
- * Return 0 if permission is granted.
- * @task_setscheduler:
- * Check permission before setting scheduling policy and/or parameters of
- * process @p based on @policy and @lp.
- * @p contains the task_struct for process.
- * @policy contains the scheduling policy.
- * @lp contains the scheduling parameters.
- * Return 0 if permission is granted.
- * @task_getscheduler:
- * Check permission before obtaining scheduling information for process
- * @p.
- * @p contains the task_struct for process.
- * Return 0 if permission is granted.
- * @task_movememory
- * Check permission before moving memory owned by process @p.
- * @p contains the task_struct for process.
- * Return 0 if permission is granted.
- * @task_kill:
- * Check permission before sending signal @sig to @p. @info can be NULL,
- * the constant 1, or a pointer to a siginfo structure. If @info is 1 or
- * SI_FROMKERNEL(info) is true, then the signal should be viewed as coming
- * from the kernel and should typically be permitted.
- * SIGIO signals are handled separately by the send_sigiotask hook in
- * file_security_ops.
- * @p contains the task_struct for process.
- * @info contains the signal information.
- * @sig contains the signal value.
- * @secid contains the sid of the process where the signal originated
- * Return 0 if permission is granted.
- * @task_prctl:
- * Check permission before performing a process control operation on the
- * current process.
- * @option contains the operation.
- * @arg2 contains a argument.
- * @arg3 contains a argument.
- * @arg4 contains a argument.
- * @arg5 contains a argument.
- * Return -ENOSYS if no-one wanted to handle this op, any other value to
- * cause prctl() to return immediately with that value.
- * @task_to_inode:
- * Set the security attributes for an inode based on an associated task's
- * security attributes, e.g. for /proc/pid inodes.
- * @p contains the task_struct for the task.
- * @inode contains the inode structure for the inode.
- *
- * Security hooks for Netlink messaging.
- *
- * @netlink_send:
- * Save security information for a netlink message so that permission
- * checking can be performed when the message is processed. The security
- * information can be saved using the eff_cap field of the
- * netlink_skb_parms structure. Also may be used to provide fine
- * grained control over message transmission.
- * @sk associated sock of task sending the message.
- * @skb contains the sk_buff structure for the netlink message.
- * Return 0 if the information was successfully saved and message
- * is allowed to be transmitted.
- *
- * Security hooks for Unix domain networking.
- *
- * @unix_stream_connect:
- * Check permissions before establishing a Unix domain stream connection
- * between @sock and @other.
- * @sock contains the sock structure.
- * @other contains the peer sock structure.
- * @newsk contains the new sock structure.
- * Return 0 if permission is granted.
- * @unix_may_send:
- * Check permissions before connecting or sending datagrams from @sock to
- * @other.
- * @sock contains the socket structure.
- * @other contains the peer socket structure.
- * Return 0 if permission is granted.
- *
- * The @unix_stream_connect and @unix_may_send hooks were necessary because
- * Linux provides an alternative to the conventional file name space for Unix
- * domain sockets. Whereas binding and connecting to sockets in the file name
- * space is mediated by the typical file permissions (and caught by the mknod
- * and permission hooks in inode_security_ops), binding and connecting to
- * sockets in the abstract name space is completely unmediated. Sufficient
- * control of Unix domain sockets in the abstract name space isn't possible
- * using only the socket layer hooks, since we need to know the actual target
- * socket, which is not looked up until we are inside the af_unix code.
- *
- * Security hooks for socket operations.
- *
- * @socket_create:
- * Check permissions prior to creating a new socket.
- * @family contains the requested protocol family.
- * @type contains the requested communications type.
- * @protocol contains the requested protocol.
- * @kern set to 1 if a kernel socket.
- * Return 0 if permission is granted.
- * @socket_post_create:
- * This hook allows a module to update or allocate a per-socket security
- * structure. Note that the security field was not added directly to the
- * socket structure, but rather, the socket security information is stored
- * in the associated inode. Typically, the inode alloc_security hook will
- * allocate and and attach security information to
- * sock->inode->i_security. This hook may be used to update the
- * sock->inode->i_security field with additional information that wasn't
- * available when the inode was allocated.
- * @sock contains the newly created socket structure.
- * @family contains the requested protocol family.
- * @type contains the requested communications type.
- * @protocol contains the requested protocol.
- * @kern set to 1 if a kernel socket.
- * @socket_bind:
- * Check permission before socket protocol layer bind operation is
- * performed and the socket @sock is bound to the address specified in the
- * @address parameter.
- * @sock contains the socket structure.
- * @address contains the address to bind to.
- * @addrlen contains the length of address.
- * Return 0 if permission is granted.
- * @socket_connect:
- * Check permission before socket protocol layer connect operation
- * attempts to connect socket @sock to a remote address, @address.
- * @sock contains the socket structure.
- * @address contains the address of remote endpoint.
- * @addrlen contains the length of address.
- * Return 0 if permission is granted.
- * @socket_listen:
- * Check permission before socket protocol layer listen operation.
- * @sock contains the socket structure.
- * @backlog contains the maximum length for the pending connection queue.
- * Return 0 if permission is granted.
- * @socket_accept:
- * Check permission before accepting a new connection. Note that the new
- * socket, @newsock, has been created and some information copied to it,
- * but the accept operation has not actually been performed.
- * @sock contains the listening socket structure.
- * @newsock contains the newly created server socket for connection.
- * Return 0 if permission is granted.
- * @socket_sendmsg:
- * Check permission before transmitting a message to another socket.
- * @sock contains the socket structure.
- * @msg contains the message to be transmitted.
- * @size contains the size of message.
- * Return 0 if permission is granted.
- * @socket_recvmsg:
- * Check permission before receiving a message from a socket.
- * @sock contains the socket structure.
- * @msg contains the message structure.
- * @size contains the size of message structure.
- * @flags contains the operational flags.
- * Return 0 if permission is granted.
- * @socket_getsockname:
- * Check permission before the local address (name) of the socket object
- * @sock is retrieved.
- * @sock contains the socket structure.
- * Return 0 if permission is granted.
- * @socket_getpeername:
- * Check permission before the remote address (name) of a socket object
- * @sock is retrieved.
- * @sock contains the socket structure.
- * Return 0 if permission is granted.
- * @socket_getsockopt:
- * Check permissions before retrieving the options associated with socket
- * @sock.
- * @sock contains the socket structure.
- * @level contains the protocol level to retrieve option from.
- * @optname contains the name of option to retrieve.
- * Return 0 if permission is granted.
- * @socket_setsockopt:
- * Check permissions before setting the options associated with socket
- * @sock.
- * @sock contains the socket structure.
- * @level contains the protocol level to set options for.
- * @optname contains the name of the option to set.
- * Return 0 if permission is granted.
- * @socket_shutdown:
- * Checks permission before all or part of a connection on the socket
- * @sock is shut down.
- * @sock contains the socket structure.
- * @how contains the flag indicating how future sends and receives
- * are handled.
- * Return 0 if permission is granted.
- * @socket_sock_rcv_skb:
- * Check permissions on incoming network packets. This hook is distinct
- * from Netfilter's IP input hooks since it is the first time that the
- * incoming sk_buff @skb has been associated with a particular socket, @sk.
- * Must not sleep inside this hook because some callers hold spinlocks.
- * @sk contains the sock (not socket) associated with the incoming sk_buff.
- * @skb contains the incoming network data.
- * @socket_getpeersec_stream:
- * This hook allows the security module to provide peer socket security
- * state for unix or connected tcp sockets to userspace via getsockopt
- * SO_GETPEERSEC. For tcp sockets this can be meaningful if the
- * socket is associated with an ipsec SA.
- * @sock is the local socket.
- * @optval userspace memory where the security state is to be copied.
- * @optlen userspace int where the module should copy the actual length
- * of the security state.
- * @len as input is the maximum length to copy to userspace provided
- * by the caller.
- * Return 0 if all is well, otherwise, typical getsockopt return
- * values.
- * @socket_getpeersec_dgram:
- * This hook allows the security module to provide peer socket security
- * state for udp sockets on a per-packet basis to userspace via
- * getsockopt SO_GETPEERSEC. The application must first have indicated
- * the IP_PASSSEC option via getsockopt. It can then retrieve the
- * security state returned by this hook for a packet via the SCM_SECURITY
- * ancillary message type.
- * @skb is the skbuff for the packet being queried
- * @secdata is a pointer to a buffer in which to copy the security data
- * @seclen is the maximum length for @secdata
- * Return 0 on success, error on failure.
- * @sk_alloc_security:
- * Allocate and attach a security structure to the sk->sk_security field,
- * which is used to copy security attributes between local stream sockets.
- * @sk_free_security:
- * Deallocate security structure.
- * @sk_clone_security:
- * Clone/copy security structure.
- * @sk_getsecid:
- * Retrieve the LSM-specific secid for the sock to enable caching
- * of network authorizations.
- * @sock_graft:
- * Sets the socket's isec sid to the sock's sid.
- * @inet_conn_request:
- * Sets the openreq's sid to socket's sid with MLS portion taken
- * from peer sid.
- * @inet_csk_clone:
- * Sets the new child socket's sid to the openreq sid.
- * @inet_conn_established:
- * Sets the connection's peersid to the secmark on skb.
- * @secmark_relabel_packet:
- * check if the process should be allowed to relabel packets to
- * the given secid
- * @security_secmark_refcount_inc
- * tells the LSM to increment the number of secmark labeling rules loaded
- * @security_secmark_refcount_dec
- * tells the LSM to decrement the number of secmark labeling rules loaded
- * @req_classify_flow:
- * Sets the flow's sid to the openreq sid.
- * @tun_dev_alloc_security:
- * This hook allows a module to allocate a security structure for a TUN
- * device.
- * @security pointer to a security structure pointer.
- * Returns a zero on success, negative values on failure.
- * @tun_dev_free_security:
- * This hook allows a module to free the security structure for a TUN
- * device.
- * @security pointer to the TUN device's security structure
- * @tun_dev_create:
- * Check permissions prior to creating a new TUN device.
- * @tun_dev_attach_queue:
- * Check permissions prior to attaching to a TUN device queue.
- * @security pointer to the TUN device's security structure.
- * @tun_dev_attach:
- * This hook can be used by the module to update any security state
- * associated with the TUN device's sock structure.
- * @sk contains the existing sock structure.
- * @security pointer to the TUN device's security structure.
- * @tun_dev_open:
- * This hook can be used by the module to update any security state
- * associated with the TUN device's security structure.
- * @security pointer to the TUN devices's security structure.
- *
- * Security hooks for Infiniband
- *
- * @ib_pkey_access:
- * Check permission to access a pkey when modifing a QP.
- * @subnet_prefix the subnet prefix of the port being used.
- * @pkey the pkey to be accessed.
- * @sec pointer to a security structure.
- * @ib_endport_manage_subnet:
- * Check permissions to send and receive SMPs on a end port.
- * @dev_name the IB device name (i.e. mlx4_0).
- * @port_num the port number.
- * @sec pointer to a security structure.
- * @ib_alloc_security:
- * Allocate a security structure for Infiniband objects.
- * @sec pointer to a security structure pointer.
- * Returns 0 on success, non-zero on failure
- * @ib_free_security:
- * Deallocate an Infiniband security structure.
- * @sec contains the security structure to be freed.
- *
- * Security hooks for XFRM operations.
- *
- * @xfrm_policy_alloc_security:
- * @ctxp is a pointer to the xfrm_sec_ctx being added to Security Policy
- * Database used by the XFRM system.
- * @sec_ctx contains the security context information being provided by
- * the user-level policy update program (e.g., setkey).
- * Allocate a security structure to the xp->security field; the security
- * field is initialized to NULL when the xfrm_policy is allocated.
- * Return 0 if operation was successful (memory to allocate, legal context)
- * @gfp is to specify the context for the allocation
- * @xfrm_policy_clone_security:
- * @old_ctx contains an existing xfrm_sec_ctx.
- * @new_ctxp contains a new xfrm_sec_ctx being cloned from old.
- * Allocate a security structure in new_ctxp that contains the
- * information from the old_ctx structure.
- * Return 0 if operation was successful (memory to allocate).
- * @xfrm_policy_free_security:
- * @ctx contains the xfrm_sec_ctx
- * Deallocate xp->security.
- * @xfrm_policy_delete_security:
- * @ctx contains the xfrm_sec_ctx.
- * Authorize deletion of xp->security.
- * @xfrm_state_alloc:
- * @x contains the xfrm_state being added to the Security Association
- * Database by the XFRM system.
- * @sec_ctx contains the security context information being provided by
- * the user-level SA generation program (e.g., setkey or racoon).
- * Allocate a security structure to the x->security field; the security
- * field is initialized to NULL when the xfrm_state is allocated. Set the
- * context to correspond to sec_ctx. Return 0 if operation was successful
- * (memory to allocate, legal context).
- * @xfrm_state_alloc_acquire:
- * @x contains the xfrm_state being added to the Security Association
- * Database by the XFRM system.
- * @polsec contains the policy's security context.
- * @secid contains the secid from which to take the mls portion of the
- * context.
- * Allocate a security structure to the x->security field; the security
- * field is initialized to NULL when the xfrm_state is allocated. Set the
- * context to correspond to secid. Return 0 if operation was successful
- * (memory to allocate, legal context).
- * @xfrm_state_free_security:
- * @x contains the xfrm_state.
- * Deallocate x->security.
- * @xfrm_state_delete_security:
- * @x contains the xfrm_state.
- * Authorize deletion of x->security.
- * @xfrm_policy_lookup:
- * @ctx contains the xfrm_sec_ctx for which the access control is being
- * checked.
- * @fl_secid contains the flow security label that is used to authorize
- * access to the policy xp.
- * @dir contains the direction of the flow (input or output).
- * Check permission when a flow selects a xfrm_policy for processing
- * XFRMs on a packet. The hook is called when selecting either a
- * per-socket policy or a generic xfrm policy.
- * Return 0 if permission is granted, -ESRCH otherwise, or -errno
- * on other errors.
- * @xfrm_state_pol_flow_match:
- * @x contains the state to match.
- * @xp contains the policy to check for a match.
- * @fl contains the flow to check for a match.
- * Return 1 if there is a match.
- * @xfrm_decode_session:
- * @skb points to skb to decode.
- * @secid points to the flow key secid to set.
- * @ckall says if all xfrms used should be checked for same secid.
- * Return 0 if ckall is zero or all xfrms used have the same secid.
- *
- * Security hooks affecting all Key Management operations
- *
- * @key_alloc:
- * Permit allocation of a key and assign security data. Note that key does
- * not have a serial number assigned at this point.
- * @key points to the key.
- * @flags is the allocation flags
- * Return 0 if permission is granted, -ve error otherwise.
- * @key_free:
- * Notification of destruction; free security data.
- * @key points to the key.
- * No return value.
- * @key_permission:
- * See whether a specific operational right is granted to a process on a
- * key.
- * @key_ref refers to the key (key pointer + possession attribute bit).
- * @cred points to the credentials to provide the context against which to
- * evaluate the security data on the key.
- * @perm describes the combination of permissions required of this key.
- * Return 0 if permission is granted, -ve error otherwise.
- * @key_getsecurity:
- * Get a textual representation of the security context attached to a key
- * for the purposes of honouring KEYCTL_GETSECURITY. This function
- * allocates the storage for the NUL-terminated string and the caller
- * should free it.
- * @key points to the key to be queried.
- * @_buffer points to a pointer that should be set to point to the
- * resulting string (if no label or an error occurs).
- * Return the length of the string (including terminating NUL) or -ve if
- * an error.
- * May also return 0 (and a NULL buffer pointer) if there is no label.
- *
- * Security hooks affecting all System V IPC operations.
- *
- * @ipc_permission:
- * Check permissions for access to IPC
- * @ipcp contains the kernel IPC permission structure
- * @flag contains the desired (requested) permission set
- * Return 0 if permission is granted.
- * @ipc_getsecid:
- * Get the secid associated with the ipc object.
- * @ipcp contains the kernel IPC permission structure.
- * @secid contains a pointer to the location where result will be saved.
- * In case of failure, @secid will be set to zero.
- *
- * Security hooks for individual messages held in System V IPC message queues
- * @msg_msg_alloc_security:
- * Allocate and attach a security structure to the msg->security field.
- * The security field is initialized to NULL when the structure is first
- * created.
- * @msg contains the message structure to be modified.
- * Return 0 if operation was successful and permission is granted.
- * @msg_msg_free_security:
- * Deallocate the security structure for this message.
- * @msg contains the message structure to be modified.
- *
- * Security hooks for System V IPC Message Queues
- *
- * @msg_queue_alloc_security:
- * Allocate and attach a security structure to the
- * msq->q_perm.security field. The security field is initialized to
- * NULL when the structure is first created.
- * @msq contains the message queue structure to be modified.
- * Return 0 if operation was successful and permission is granted.
- * @msg_queue_free_security:
- * Deallocate security structure for this message queue.
- * @msq contains the message queue structure to be modified.
- * @msg_queue_associate:
- * Check permission when a message queue is requested through the
- * msgget system call. This hook is only called when returning the
- * message queue identifier for an existing message queue, not when a
- * new message queue is created.
- * @msq contains the message queue to act upon.
- * @msqflg contains the operation control flags.
- * Return 0 if permission is granted.
- * @msg_queue_msgctl:
- * Check permission when a message control operation specified by @cmd
- * is to be performed on the message queue @msq.
- * The @msq may be NULL, e.g. for IPC_INFO or MSG_INFO.
- * @msq contains the message queue to act upon. May be NULL.
- * @cmd contains the operation to be performed.
- * Return 0 if permission is granted.
- * @msg_queue_msgsnd:
- * Check permission before a message, @msg, is enqueued on the message
- * queue, @msq.
- * @msq contains the message queue to send message to.
- * @msg contains the message to be enqueued.
- * @msqflg contains operational flags.
- * Return 0 if permission is granted.
- * @msg_queue_msgrcv:
- * Check permission before a message, @msg, is removed from the message
- * queue, @msq. The @target task structure contains a pointer to the
- * process that will be receiving the message (not equal to the current
- * process when inline receives are being performed).
- * @msq contains the message queue to retrieve message from.
- * @msg contains the message destination.
- * @target contains the task structure for recipient process.
- * @type contains the type of message requested.
- * @mode contains the operational flags.
- * Return 0 if permission is granted.
- *
- * Security hooks for System V Shared Memory Segments
- *
- * @shm_alloc_security:
- * Allocate and attach a security structure to the shp->shm_perm.security
- * field. The security field is initialized to NULL when the structure is
- * first created.
- * @shp contains the shared memory structure to be modified.
- * Return 0 if operation was successful and permission is granted.
- * @shm_free_security:
- * Deallocate the security struct for this memory segment.
- * @shp contains the shared memory structure to be modified.
- * @shm_associate:
- * Check permission when a shared memory region is requested through the
- * shmget system call. This hook is only called when returning the shared
- * memory region identifier for an existing region, not when a new shared
- * memory region is created.
- * @shp contains the shared memory structure to be modified.
- * @shmflg contains the operation control flags.
- * Return 0 if permission is granted.
- * @shm_shmctl:
- * Check permission when a shared memory control operation specified by
- * @cmd is to be performed on the shared memory region @shp.
- * The @shp may be NULL, e.g. for IPC_INFO or SHM_INFO.
- * @shp contains shared memory structure to be modified.
- * @cmd contains the operation to be performed.
- * Return 0 if permission is granted.
- * @shm_shmat:
- * Check permissions prior to allowing the shmat system call to attach the
- * shared memory segment @shp to the data segment of the calling process.
- * The attaching address is specified by @shmaddr.
- * @shp contains the shared memory structure to be modified.
- * @shmaddr contains the address to attach memory region to.
- * @shmflg contains the operational flags.
- * Return 0 if permission is granted.
- *
- * Security hooks for System V Semaphores
- *
- * @sem_alloc_security:
- * Allocate and attach a security structure to the sma->sem_perm.security
- * field. The security field is initialized to NULL when the structure is
- * first created.
- * @sma contains the semaphore structure
- * Return 0 if operation was successful and permission is granted.
- * @sem_free_security:
- * deallocate security struct for this semaphore
- * @sma contains the semaphore structure.
- * @sem_associate:
- * Check permission when a semaphore is requested through the semget
- * system call. This hook is only called when returning the semaphore
- * identifier for an existing semaphore, not when a new one must be
- * created.
- * @sma contains the semaphore structure.
- * @semflg contains the operation control flags.
- * Return 0 if permission is granted.
- * @sem_semctl:
- * Check permission when a semaphore operation specified by @cmd is to be
- * performed on the semaphore @sma. The @sma may be NULL, e.g. for
- * IPC_INFO or SEM_INFO.
- * @sma contains the semaphore structure. May be NULL.
- * @cmd contains the operation to be performed.
- * Return 0 if permission is granted.
- * @sem_semop:
- * Check permissions before performing operations on members of the
- * semaphore set @sma. If the @alter flag is nonzero, the semaphore set
- * may be modified.
- * @sma contains the semaphore structure.
- * @sops contains the operations to perform.
- * @nsops contains the number of operations to perform.
- * @alter contains the flag indicating whether changes are to be made.
- * Return 0 if permission is granted.
- *
- * @binder_set_context_mgr:
- * Check whether @mgr is allowed to be the binder context manager.
- * @mgr contains the task_struct for the task being registered.
- * Return 0 if permission is granted.
- * @binder_transaction:
- * Check whether @from is allowed to invoke a binder transaction call
- * to @to.
- * @from contains the task_struct for the sending task.
- * @to contains the task_struct for the receiving task.
- * @binder_transfer_binder:
- * Check whether @from is allowed to transfer a binder reference to @to.
- * @from contains the task_struct for the sending task.
- * @to contains the task_struct for the receiving task.
- * @binder_transfer_file:
- * Check whether @from is allowed to transfer @file to @to.
- * @from contains the task_struct for the sending task.
- * @file contains the struct file being transferred.
- * @to contains the task_struct for the receiving task.
- *
- * @ptrace_access_check:
- * Check permission before allowing the current process to trace the
- * @child process.
- * Security modules may also want to perform a process tracing check
- * during an execve in the set_security or apply_creds hooks of
- * tracing check during an execve in the bprm_set_creds hook of
- * binprm_security_ops if the process is being traced and its security
- * attributes would be changed by the execve.
- * @child contains the task_struct structure for the target process.
- * @mode contains the PTRACE_MODE flags indicating the form of access.
- * Return 0 if permission is granted.
- * @ptrace_traceme:
- * Check that the @parent process has sufficient permission to trace the
- * current process before allowing the current process to present itself
- * to the @parent process for tracing.
- * @parent contains the task_struct structure for debugger process.
- * Return 0 if permission is granted.
- * @capget:
- * Get the @effective, @inheritable, and @permitted capability sets for
- * the @target process. The hook may also perform permission checking to
- * determine if the current process is allowed to see the capability sets
- * of the @target process.
- * @target contains the task_struct structure for target process.
- * @effective contains the effective capability set.
- * @inheritable contains the inheritable capability set.
- * @permitted contains the permitted capability set.
- * Return 0 if the capability sets were successfully obtained.
- * @capset:
- * Set the @effective, @inheritable, and @permitted capability sets for
- * the current process.
- * @new contains the new credentials structure for target process.
- * @old contains the current credentials structure for target process.
- * @effective contains the effective capability set.
- * @inheritable contains the inheritable capability set.
- * @permitted contains the permitted capability set.
- * Return 0 and update @new if permission is granted.
- * @capable:
- * Check whether the @tsk process has the @cap capability in the indicated
- * credentials.
- * @cred contains the credentials to use.
- * @ns contains the user namespace we want the capability in
- * @cap contains the capability <include/linux/capability.h>.
- * @audit contains whether to write an audit message or not
- * Return 0 if the capability is granted for @tsk.
- * @syslog:
- * Check permission before accessing the kernel message ring or changing
- * logging to the console.
- * See the syslog(2) manual page for an explanation of the @type values.
- * @type contains the type of action.
- * @from_file indicates the context of action (if it came from /proc).
- * Return 0 if permission is granted.
- * @settime:
- * Check permission to change the system time.
- * struct timespec64 is defined in include/linux/time64.h and timezone
- * is defined in include/linux/time.h
- * @ts contains new time
- * @tz contains new timezone
- * Return 0 if permission is granted.
- * @vm_enough_memory:
- * Check permissions for allocating a new virtual mapping.
- * @mm contains the mm struct it is being added to.
- * @pages contains the number of pages.
- * Return 0 if permission is granted.
- *
- * @ismaclabel:
- * Check if the extended attribute specified by @name
- * represents a MAC label. Returns 1 if name is a MAC
- * attribute otherwise returns 0.
- * @name full extended attribute name to check against
- * LSM as a MAC label.
- *
- * @secid_to_secctx:
- * Convert secid to security context. If secdata is NULL the length of
- * the result will be returned in seclen, but no secdata will be returned.
- * This does mean that the length could change between calls to check the
- * length and the next call which actually allocates and returns the
- * secdata.
- * @secid contains the security ID.
- * @secdata contains the pointer that stores the converted security
- * context.
- * @seclen pointer which contains the length of the data
- * @secctx_to_secid:
- * Convert security context to secid.
- * @secid contains the pointer to the generated security ID.
- * @secdata contains the security context.
- *
- * @release_secctx:
- * Release the security context.
- * @secdata contains the security context.
- * @seclen contains the length of the security context.
- *
- * Security hooks for Audit
- *
- * @audit_rule_init:
- * Allocate and initialize an LSM audit rule structure.
- * @field contains the required Audit action.
- * Fields flags are defined in include/linux/audit.h
- * @op contains the operator the rule uses.
- * @rulestr contains the context where the rule will be applied to.
- * @lsmrule contains a pointer to receive the result.
- * Return 0 if @lsmrule has been successfully set,
- * -EINVAL in case of an invalid rule.
- *
- * @audit_rule_known:
- * Specifies whether given @rule contains any fields related to
- * current LSM.
- * @rule contains the audit rule of interest.
- * Return 1 in case of relation found, 0 otherwise.
- *
- * @audit_rule_match:
- * Determine if given @secid matches a rule previously approved
- * by @audit_rule_known.
- * @secid contains the security id in question.
- * @field contains the field which relates to current LSM.
- * @op contains the operator that will be used for matching.
- * @rule points to the audit rule that will be checked against.
- * @actx points to the audit context associated with the check.
- * Return 1 if secid matches the rule, 0 if it does not, -ERRNO on failure.
- *
- * @audit_rule_free:
- * Deallocate the LSM audit rule structure previously allocated by
- * audit_rule_init.
- * @rule contains the allocated rule
- *
- * @inode_invalidate_secctx:
- * Notify the security module that it must revalidate the security context
- * of an inode.
- *
- * @inode_notifysecctx:
- * Notify the security module of what the security context of an inode
- * should be. Initializes the incore security context managed by the
- * security module for this inode. Example usage: NFS client invokes
- * this hook to initialize the security context in its incore inode to the
- * value provided by the server for the file when the server returned the
- * file's attributes to the client.
- *
- * Must be called with inode->i_mutex locked.
- *
- * @inode we wish to set the security context of.
- * @ctx contains the string which we wish to set in the inode.
- * @ctxlen contains the length of @ctx.
- *
- * @inode_setsecctx:
- * Change the security context of an inode. Updates the
- * incore security context managed by the security module and invokes the
- * fs code as needed (via __vfs_setxattr_noperm) to update any backing
- * xattrs that represent the context. Example usage: NFS server invokes
- * this hook to change the security context in its incore inode and on the
- * backing filesystem to a value provided by the client on a SETATTR
- * operation.
- *
- * Must be called with inode->i_mutex locked.
- *
- * @dentry contains the inode we wish to set the security context of.
- * @ctx contains the string which we wish to set in the inode.
- * @ctxlen contains the length of @ctx.
- *
- * @inode_getsecctx:
- * On success, returns 0 and fills out @ctx and @ctxlen with the security
- * context for the given @inode.
- *
- * @inode we wish to get the security context of.
- * @ctx is a pointer in which to place the allocated security context.
- * @ctxlen points to the place to put the length of @ctx.
- */
union security_list_options {
- int (*binder_set_context_mgr)(struct task_struct *mgr);
- int (*binder_transaction)(struct task_struct *from,
- struct task_struct *to);
- int (*binder_transfer_binder)(struct task_struct *from,
- struct task_struct *to);
- int (*binder_transfer_file)(struct task_struct *from,
- struct task_struct *to,
- struct file *file);
-
- int (*ptrace_access_check)(struct task_struct *child,
- unsigned int mode);
- int (*ptrace_traceme)(struct task_struct *parent);
- int (*capget)(struct task_struct *target, kernel_cap_t *effective,
- kernel_cap_t *inheritable, kernel_cap_t *permitted);
- int (*capset)(struct cred *new, const struct cred *old,
- const kernel_cap_t *effective,
- const kernel_cap_t *inheritable,
- const kernel_cap_t *permitted);
- int (*capable)(const struct cred *cred, struct user_namespace *ns,
- int cap, int audit);
- int (*quotactl)(int cmds, int type, int id, struct super_block *sb);
- int (*quota_on)(struct dentry *dentry);
- int (*syslog)(int type);
- int (*settime)(const struct timespec64 *ts, const struct timezone *tz);
- int (*vm_enough_memory)(struct mm_struct *mm, long pages);
-
- int (*bprm_set_creds)(struct linux_binprm *bprm);
- int (*bprm_check_security)(struct linux_binprm *bprm);
- int (*bprm_secureexec)(struct linux_binprm *bprm);
- void (*bprm_committing_creds)(struct linux_binprm *bprm);
- void (*bprm_committed_creds)(struct linux_binprm *bprm);
-
- int (*sb_alloc_security)(struct super_block *sb);
- void (*sb_free_security)(struct super_block *sb);
- int (*sb_copy_data)(char *orig, char *copy);
- int (*sb_remount)(struct super_block *sb, void *data);
- int (*sb_kern_mount)(struct super_block *sb, int flags, void *data);
- int (*sb_show_options)(struct seq_file *m, struct super_block *sb);
- int (*sb_statfs)(struct dentry *dentry);
- int (*sb_mount)(const char *dev_name, const struct path *path,
- const char *type, unsigned long flags, void *data);
- int (*sb_umount)(struct vfsmount *mnt, int flags);
- int (*sb_pivotroot)(const struct path *old_path, const struct path *new_path);
- int (*sb_set_mnt_opts)(struct super_block *sb,
- struct security_mnt_opts *opts,
- unsigned long kern_flags,
- unsigned long *set_kern_flags);
- int (*sb_clone_mnt_opts)(const struct super_block *oldsb,
- struct super_block *newsb,
- unsigned long kern_flags,
- unsigned long *set_kern_flags);
- int (*sb_parse_opts_str)(char *options, struct security_mnt_opts *opts);
- int (*dentry_init_security)(struct dentry *dentry, int mode,
- const struct qstr *name, void **ctx,
- u32 *ctxlen);
- int (*dentry_create_files_as)(struct dentry *dentry, int mode,
- struct qstr *name,
- const struct cred *old,
- struct cred *new);
-
-
-#ifdef CONFIG_SECURITY_PATH
- int (*path_unlink)(const struct path *dir, struct dentry *dentry);
- int (*path_mkdir)(const struct path *dir, struct dentry *dentry,
- umode_t mode);
- int (*path_rmdir)(const struct path *dir, struct dentry *dentry);
- int (*path_mknod)(const struct path *dir, struct dentry *dentry,
- umode_t mode, unsigned int dev);
- int (*path_truncate)(const struct path *path);
- int (*path_symlink)(const struct path *dir, struct dentry *dentry,
- const char *old_name);
- int (*path_link)(struct dentry *old_dentry, const struct path *new_dir,
- struct dentry *new_dentry);
- int (*path_rename)(const struct path *old_dir, struct dentry *old_dentry,
- const struct path *new_dir,
- struct dentry *new_dentry);
- int (*path_chmod)(const struct path *path, umode_t mode);
- int (*path_chown)(const struct path *path, kuid_t uid, kgid_t gid);
- int (*path_chroot)(const struct path *path);
-#endif
-
- int (*inode_alloc_security)(struct inode *inode);
- void (*inode_free_security)(struct inode *inode);
- int (*inode_init_security)(struct inode *inode, struct inode *dir,
- const struct qstr *qstr,
- const char **name, void **value,
- size_t *len);
- int (*inode_create)(struct inode *dir, struct dentry *dentry,
- umode_t mode);
- int (*inode_link)(struct dentry *old_dentry, struct inode *dir,
- struct dentry *new_dentry);
- int (*inode_unlink)(struct inode *dir, struct dentry *dentry);
- int (*inode_symlink)(struct inode *dir, struct dentry *dentry,
- const char *old_name);
- int (*inode_mkdir)(struct inode *dir, struct dentry *dentry,
- umode_t mode);
- int (*inode_rmdir)(struct inode *dir, struct dentry *dentry);
- int (*inode_mknod)(struct inode *dir, struct dentry *dentry,
- umode_t mode, dev_t dev);
- int (*inode_rename)(struct inode *old_dir, struct dentry *old_dentry,
- struct inode *new_dir,
- struct dentry *new_dentry);
- int (*inode_readlink)(struct dentry *dentry);
- int (*inode_follow_link)(struct dentry *dentry, struct inode *inode,
- bool rcu);
- int (*inode_permission)(struct inode *inode, int mask);
- int (*inode_setattr)(struct dentry *dentry, struct iattr *attr);
- int (*inode_getattr)(const struct path *path);
- int (*inode_setxattr)(struct dentry *dentry, const char *name,
- const void *value, size_t size, int flags);
- void (*inode_post_setxattr)(struct dentry *dentry, const char *name,
- const void *value, size_t size,
- int flags);
- int (*inode_getxattr)(struct dentry *dentry, const char *name);
- int (*inode_listxattr)(struct dentry *dentry);
- int (*inode_removexattr)(struct dentry *dentry, const char *name);
- int (*inode_need_killpriv)(struct dentry *dentry);
- int (*inode_killpriv)(struct dentry *dentry);
- int (*inode_getsecurity)(struct inode *inode, const char *name,
- void **buffer, bool alloc);
- int (*inode_setsecurity)(struct inode *inode, const char *name,
- const void *value, size_t size,
- int flags);
- int (*inode_listsecurity)(struct inode *inode, char *buffer,
- size_t buffer_size);
- void (*inode_getsecid)(struct inode *inode, u32 *secid);
- int (*inode_copy_up)(struct dentry *src, struct cred **new);
- int (*inode_copy_up_xattr)(const char *name);
-
- int (*file_permission)(struct file *file, int mask);
- int (*file_alloc_security)(struct file *file);
- void (*file_free_security)(struct file *file);
- int (*file_ioctl)(struct file *file, unsigned int cmd,
- unsigned long arg);
- int (*mmap_addr)(unsigned long addr);
- int (*mmap_file)(struct file *file, unsigned long reqprot,
- unsigned long prot, unsigned long flags);
- int (*file_mprotect)(struct vm_area_struct *vma, unsigned long reqprot,
- unsigned long prot);
- int (*file_lock)(struct file *file, unsigned int cmd);
- int (*file_fcntl)(struct file *file, unsigned int cmd,
- unsigned long arg);
- void (*file_set_fowner)(struct file *file);
- int (*file_send_sigiotask)(struct task_struct *tsk,
- struct fown_struct *fown, int sig);
- int (*file_receive)(struct file *file);
- int (*file_open)(struct file *file, const struct cred *cred);
-
- int (*task_create)(unsigned long clone_flags);
- int (*task_alloc)(struct task_struct *task, unsigned long clone_flags);
- void (*task_free)(struct task_struct *task);
- int (*cred_alloc_blank)(struct cred *cred, gfp_t gfp);
- void (*cred_free)(struct cred *cred);
- int (*cred_prepare)(struct cred *new, const struct cred *old,
- gfp_t gfp);
- void (*cred_transfer)(struct cred *new, const struct cred *old);
- int (*kernel_act_as)(struct cred *new, u32 secid);
- int (*kernel_create_files_as)(struct cred *new, struct inode *inode);
- int (*kernel_module_request)(char *kmod_name);
- int (*kernel_read_file)(struct file *file, enum kernel_read_file_id id);
- int (*kernel_post_read_file)(struct file *file, char *buf, loff_t size,
- enum kernel_read_file_id id);
- int (*task_fix_setuid)(struct cred *new, const struct cred *old,
- int flags);
- int (*task_setpgid)(struct task_struct *p, pid_t pgid);
- int (*task_getpgid)(struct task_struct *p);
- int (*task_getsid)(struct task_struct *p);
- void (*task_getsecid)(struct task_struct *p, u32 *secid);
- int (*task_setnice)(struct task_struct *p, int nice);
- int (*task_setioprio)(struct task_struct *p, int ioprio);
- int (*task_getioprio)(struct task_struct *p);
- int (*task_prlimit)(const struct cred *cred, const struct cred *tcred,
- unsigned int flags);
- int (*task_setrlimit)(struct task_struct *p, unsigned int resource,
- struct rlimit *new_rlim);
- int (*task_setscheduler)(struct task_struct *p);
- int (*task_getscheduler)(struct task_struct *p);
- int (*task_movememory)(struct task_struct *p);
- int (*task_kill)(struct task_struct *p, struct siginfo *info,
- int sig, u32 secid);
- int (*task_prctl)(int option, unsigned long arg2, unsigned long arg3,
- unsigned long arg4, unsigned long arg5);
- void (*task_to_inode)(struct task_struct *p, struct inode *inode);
-
- int (*ipc_permission)(struct kern_ipc_perm *ipcp, short flag);
- void (*ipc_getsecid)(struct kern_ipc_perm *ipcp, u32 *secid);
-
- int (*msg_msg_alloc_security)(struct msg_msg *msg);
- void (*msg_msg_free_security)(struct msg_msg *msg);
-
- int (*msg_queue_alloc_security)(struct msg_queue *msq);
- void (*msg_queue_free_security)(struct msg_queue *msq);
- int (*msg_queue_associate)(struct msg_queue *msq, int msqflg);
- int (*msg_queue_msgctl)(struct msg_queue *msq, int cmd);
- int (*msg_queue_msgsnd)(struct msg_queue *msq, struct msg_msg *msg,
- int msqflg);
- int (*msg_queue_msgrcv)(struct msg_queue *msq, struct msg_msg *msg,
- struct task_struct *target, long type,
- int mode);
-
- int (*shm_alloc_security)(struct shmid_kernel *shp);
- void (*shm_free_security)(struct shmid_kernel *shp);
- int (*shm_associate)(struct shmid_kernel *shp, int shmflg);
- int (*shm_shmctl)(struct shmid_kernel *shp, int cmd);
- int (*shm_shmat)(struct shmid_kernel *shp, char __user *shmaddr,
- int shmflg);
-
- int (*sem_alloc_security)(struct sem_array *sma);
- void (*sem_free_security)(struct sem_array *sma);
- int (*sem_associate)(struct sem_array *sma, int semflg);
- int (*sem_semctl)(struct sem_array *sma, int cmd);
- int (*sem_semop)(struct sem_array *sma, struct sembuf *sops,
- unsigned nsops, int alter);
-
- int (*netlink_send)(struct sock *sk, struct sk_buff *skb);
-
- void (*d_instantiate)(struct dentry *dentry, struct inode *inode);
-
- int (*getprocattr)(struct task_struct *p, char *name, char **value);
- int (*setprocattr)(const char *name, void *value, size_t size);
- int (*ismaclabel)(const char *name);
- int (*secid_to_secctx)(u32 secid, char **secdata, u32 *seclen);
- int (*secctx_to_secid)(const char *secdata, u32 seclen, u32 *secid);
- void (*release_secctx)(char *secdata, u32 seclen);
-
- void (*inode_invalidate_secctx)(struct inode *inode);
- int (*inode_notifysecctx)(struct inode *inode, void *ctx, u32 ctxlen);
- int (*inode_setsecctx)(struct dentry *dentry, void *ctx, u32 ctxlen);
- int (*inode_getsecctx)(struct inode *inode, void **ctx, u32 *ctxlen);
-
-#ifdef CONFIG_SECURITY_NETWORK
- int (*unix_stream_connect)(struct sock *sock, struct sock *other,
- struct sock *newsk);
- int (*unix_may_send)(struct socket *sock, struct socket *other);
-
- int (*socket_create)(int family, int type, int protocol, int kern);
- int (*socket_post_create)(struct socket *sock, int family, int type,
- int protocol, int kern);
- int (*socket_bind)(struct socket *sock, struct sockaddr *address,
- int addrlen);
- int (*socket_connect)(struct socket *sock, struct sockaddr *address,
- int addrlen);
- int (*socket_listen)(struct socket *sock, int backlog);
- int (*socket_accept)(struct socket *sock, struct socket *newsock);
- int (*socket_sendmsg)(struct socket *sock, struct msghdr *msg,
- int size);
- int (*socket_recvmsg)(struct socket *sock, struct msghdr *msg,
- int size, int flags);
- int (*socket_getsockname)(struct socket *sock);
- int (*socket_getpeername)(struct socket *sock);
- int (*socket_getsockopt)(struct socket *sock, int level, int optname);
- int (*socket_setsockopt)(struct socket *sock, int level, int optname);
- int (*socket_shutdown)(struct socket *sock, int how);
- int (*socket_sock_rcv_skb)(struct sock *sk, struct sk_buff *skb);
- int (*socket_getpeersec_stream)(struct socket *sock,
- char __user *optval,
- int __user *optlen, unsigned len);
- int (*socket_getpeersec_dgram)(struct socket *sock,
- struct sk_buff *skb, u32 *secid);
- int (*sk_alloc_security)(struct sock *sk, int family, gfp_t priority);
- void (*sk_free_security)(struct sock *sk);
- void (*sk_clone_security)(const struct sock *sk, struct sock *newsk);
- void (*sk_getsecid)(struct sock *sk, u32 *secid);
- void (*sock_graft)(struct sock *sk, struct socket *parent);
- int (*inet_conn_request)(struct sock *sk, struct sk_buff *skb,
- struct request_sock *req);
- void (*inet_csk_clone)(struct sock *newsk,
- const struct request_sock *req);
- void (*inet_conn_established)(struct sock *sk, struct sk_buff *skb);
- int (*secmark_relabel_packet)(u32 secid);
- void (*secmark_refcount_inc)(void);
- void (*secmark_refcount_dec)(void);
- void (*req_classify_flow)(const struct request_sock *req,
- struct flowi *fl);
- int (*tun_dev_alloc_security)(void **security);
- void (*tun_dev_free_security)(void *security);
- int (*tun_dev_create)(void);
- int (*tun_dev_attach_queue)(void *security);
- int (*tun_dev_attach)(struct sock *sk, void *security);
- int (*tun_dev_open)(void *security);
-#endif /* CONFIG_SECURITY_NETWORK */
-
-#ifdef CONFIG_SECURITY_INFINIBAND
- int (*ib_pkey_access)(void *sec, u64 subnet_prefix, u16 pkey);
- int (*ib_endport_manage_subnet)(void *sec, const char *dev_name,
- u8 port_num);
- int (*ib_alloc_security)(void **sec);
- void (*ib_free_security)(void *sec);
-#endif /* CONFIG_SECURITY_INFINIBAND */
-
-#ifdef CONFIG_SECURITY_NETWORK_XFRM
- int (*xfrm_policy_alloc_security)(struct xfrm_sec_ctx **ctxp,
- struct xfrm_user_sec_ctx *sec_ctx,
- gfp_t gfp);
- int (*xfrm_policy_clone_security)(struct xfrm_sec_ctx *old_ctx,
- struct xfrm_sec_ctx **new_ctx);
- void (*xfrm_policy_free_security)(struct xfrm_sec_ctx *ctx);
- int (*xfrm_policy_delete_security)(struct xfrm_sec_ctx *ctx);
- int (*xfrm_state_alloc)(struct xfrm_state *x,
- struct xfrm_user_sec_ctx *sec_ctx);
- int (*xfrm_state_alloc_acquire)(struct xfrm_state *x,
- struct xfrm_sec_ctx *polsec,
- u32 secid);
- void (*xfrm_state_free_security)(struct xfrm_state *x);
- int (*xfrm_state_delete_security)(struct xfrm_state *x);
- int (*xfrm_policy_lookup)(struct xfrm_sec_ctx *ctx, u32 fl_secid,
- u8 dir);
- int (*xfrm_state_pol_flow_match)(struct xfrm_state *x,
- struct xfrm_policy *xp,
- const struct flowi *fl);
- int (*xfrm_decode_session)(struct sk_buff *skb, u32 *secid, int ckall);
-#endif /* CONFIG_SECURITY_NETWORK_XFRM */
-
- /* key management security hooks */
-#ifdef CONFIG_KEYS
- int (*key_alloc)(struct key *key, const struct cred *cred,
- unsigned long flags);
- void (*key_free)(struct key *key);
- int (*key_permission)(key_ref_t key_ref, const struct cred *cred,
- unsigned perm);
- int (*key_getsecurity)(struct key *key, char **_buffer);
-#endif /* CONFIG_KEYS */
-
-#ifdef CONFIG_AUDIT
- int (*audit_rule_init)(u32 field, u32 op, char *rulestr,
- void **lsmrule);
- int (*audit_rule_known)(struct audit_krule *krule);
- int (*audit_rule_match)(u32 secid, u32 field, u32 op, void *lsmrule,
- struct audit_context *actx);
- void (*audit_rule_free)(void *lsmrule);
-#endif /* CONFIG_AUDIT */
+ #define LSM_HOOK(RET, DEFAULT, NAME, ...) RET (*NAME)(__VA_ARGS__);
+ #include "lsm_hook_defs.h"
+ #undef LSM_HOOK
};
struct security_hook_heads {
- struct list_head binder_set_context_mgr;
- struct list_head binder_transaction;
- struct list_head binder_transfer_binder;
- struct list_head binder_transfer_file;
- struct list_head ptrace_access_check;
- struct list_head ptrace_traceme;
- struct list_head capget;
- struct list_head capset;
- struct list_head capable;
- struct list_head quotactl;
- struct list_head quota_on;
- struct list_head syslog;
- struct list_head settime;
- struct list_head vm_enough_memory;
- struct list_head bprm_set_creds;
- struct list_head bprm_check_security;
- struct list_head bprm_secureexec;
- struct list_head bprm_committing_creds;
- struct list_head bprm_committed_creds;
- struct list_head sb_alloc_security;
- struct list_head sb_free_security;
- struct list_head sb_copy_data;
- struct list_head sb_remount;
- struct list_head sb_kern_mount;
- struct list_head sb_show_options;
- struct list_head sb_statfs;
- struct list_head sb_mount;
- struct list_head sb_umount;
- struct list_head sb_pivotroot;
- struct list_head sb_set_mnt_opts;
- struct list_head sb_clone_mnt_opts;
- struct list_head sb_parse_opts_str;
- struct list_head dentry_init_security;
- struct list_head dentry_create_files_as;
-#ifdef CONFIG_SECURITY_PATH
- struct list_head path_unlink;
- struct list_head path_mkdir;
- struct list_head path_rmdir;
- struct list_head path_mknod;
- struct list_head path_truncate;
- struct list_head path_symlink;
- struct list_head path_link;
- struct list_head path_rename;
- struct list_head path_chmod;
- struct list_head path_chown;
- struct list_head path_chroot;
-#endif
- struct list_head inode_alloc_security;
- struct list_head inode_free_security;
- struct list_head inode_init_security;
- struct list_head inode_create;
- struct list_head inode_link;
- struct list_head inode_unlink;
- struct list_head inode_symlink;
- struct list_head inode_mkdir;
- struct list_head inode_rmdir;
- struct list_head inode_mknod;
- struct list_head inode_rename;
- struct list_head inode_readlink;
- struct list_head inode_follow_link;
- struct list_head inode_permission;
- struct list_head inode_setattr;
- struct list_head inode_getattr;
- struct list_head inode_setxattr;
- struct list_head inode_post_setxattr;
- struct list_head inode_getxattr;
- struct list_head inode_listxattr;
- struct list_head inode_removexattr;
- struct list_head inode_need_killpriv;
- struct list_head inode_killpriv;
- struct list_head inode_getsecurity;
- struct list_head inode_setsecurity;
- struct list_head inode_listsecurity;
- struct list_head inode_getsecid;
- struct list_head inode_copy_up;
- struct list_head inode_copy_up_xattr;
- struct list_head file_permission;
- struct list_head file_alloc_security;
- struct list_head file_free_security;
- struct list_head file_ioctl;
- struct list_head mmap_addr;
- struct list_head mmap_file;
- struct list_head file_mprotect;
- struct list_head file_lock;
- struct list_head file_fcntl;
- struct list_head file_set_fowner;
- struct list_head file_send_sigiotask;
- struct list_head file_receive;
- struct list_head file_open;
- struct list_head task_create;
- struct list_head task_alloc;
- struct list_head task_free;
- struct list_head cred_alloc_blank;
- struct list_head cred_free;
- struct list_head cred_prepare;
- struct list_head cred_transfer;
- struct list_head kernel_act_as;
- struct list_head kernel_create_files_as;
- struct list_head kernel_read_file;
- struct list_head kernel_post_read_file;
- struct list_head kernel_module_request;
- struct list_head task_fix_setuid;
- struct list_head task_setpgid;
- struct list_head task_getpgid;
- struct list_head task_getsid;
- struct list_head task_getsecid;
- struct list_head task_setnice;
- struct list_head task_setioprio;
- struct list_head task_getioprio;
- struct list_head task_prlimit;
- struct list_head task_setrlimit;
- struct list_head task_setscheduler;
- struct list_head task_getscheduler;
- struct list_head task_movememory;
- struct list_head task_kill;
- struct list_head task_prctl;
- struct list_head task_to_inode;
- struct list_head ipc_permission;
- struct list_head ipc_getsecid;
- struct list_head msg_msg_alloc_security;
- struct list_head msg_msg_free_security;
- struct list_head msg_queue_alloc_security;
- struct list_head msg_queue_free_security;
- struct list_head msg_queue_associate;
- struct list_head msg_queue_msgctl;
- struct list_head msg_queue_msgsnd;
- struct list_head msg_queue_msgrcv;
- struct list_head shm_alloc_security;
- struct list_head shm_free_security;
- struct list_head shm_associate;
- struct list_head shm_shmctl;
- struct list_head shm_shmat;
- struct list_head sem_alloc_security;
- struct list_head sem_free_security;
- struct list_head sem_associate;
- struct list_head sem_semctl;
- struct list_head sem_semop;
- struct list_head netlink_send;
- struct list_head d_instantiate;
- struct list_head getprocattr;
- struct list_head setprocattr;
- struct list_head ismaclabel;
- struct list_head secid_to_secctx;
- struct list_head secctx_to_secid;
- struct list_head release_secctx;
- struct list_head inode_invalidate_secctx;
- struct list_head inode_notifysecctx;
- struct list_head inode_setsecctx;
- struct list_head inode_getsecctx;
-#ifdef CONFIG_SECURITY_NETWORK
- struct list_head unix_stream_connect;
- struct list_head unix_may_send;
- struct list_head socket_create;
- struct list_head socket_post_create;
- struct list_head socket_bind;
- struct list_head socket_connect;
- struct list_head socket_listen;
- struct list_head socket_accept;
- struct list_head socket_sendmsg;
- struct list_head socket_recvmsg;
- struct list_head socket_getsockname;
- struct list_head socket_getpeername;
- struct list_head socket_getsockopt;
- struct list_head socket_setsockopt;
- struct list_head socket_shutdown;
- struct list_head socket_sock_rcv_skb;
- struct list_head socket_getpeersec_stream;
- struct list_head socket_getpeersec_dgram;
- struct list_head sk_alloc_security;
- struct list_head sk_free_security;
- struct list_head sk_clone_security;
- struct list_head sk_getsecid;
- struct list_head sock_graft;
- struct list_head inet_conn_request;
- struct list_head inet_csk_clone;
- struct list_head inet_conn_established;
- struct list_head secmark_relabel_packet;
- struct list_head secmark_refcount_inc;
- struct list_head secmark_refcount_dec;
- struct list_head req_classify_flow;
- struct list_head tun_dev_alloc_security;
- struct list_head tun_dev_free_security;
- struct list_head tun_dev_create;
- struct list_head tun_dev_attach_queue;
- struct list_head tun_dev_attach;
- struct list_head tun_dev_open;
-#endif /* CONFIG_SECURITY_NETWORK */
-#ifdef CONFIG_SECURITY_INFINIBAND
- struct list_head ib_pkey_access;
- struct list_head ib_endport_manage_subnet;
- struct list_head ib_alloc_security;
- struct list_head ib_free_security;
-#endif /* CONFIG_SECURITY_INFINIBAND */
-#ifdef CONFIG_SECURITY_NETWORK_XFRM
- struct list_head xfrm_policy_alloc_security;
- struct list_head xfrm_policy_clone_security;
- struct list_head xfrm_policy_free_security;
- struct list_head xfrm_policy_delete_security;
- struct list_head xfrm_state_alloc;
- struct list_head xfrm_state_alloc_acquire;
- struct list_head xfrm_state_free_security;
- struct list_head xfrm_state_delete_security;
- struct list_head xfrm_policy_lookup;
- struct list_head xfrm_state_pol_flow_match;
- struct list_head xfrm_decode_session;
-#endif /* CONFIG_SECURITY_NETWORK_XFRM */
-#ifdef CONFIG_KEYS
- struct list_head key_alloc;
- struct list_head key_free;
- struct list_head key_permission;
- struct list_head key_getsecurity;
-#endif /* CONFIG_KEYS */
-#ifdef CONFIG_AUDIT
- struct list_head audit_rule_init;
- struct list_head audit_rule_known;
- struct list_head audit_rule_match;
- struct list_head audit_rule_free;
-#endif /* CONFIG_AUDIT */
-};
+ #define LSM_HOOK(RET, DEFAULT, NAME, ...) struct hlist_head NAME;
+ #include "lsm_hook_defs.h"
+ #undef LSM_HOOK
+} __randomize_layout;
/*
* Security module hook list structure.
* For use with generic list macros for common operations.
*/
struct security_hook_list {
- struct list_head list;
- struct list_head *head;
+ struct hlist_node list;
+ struct hlist_head *head;
union security_list_options hook;
- char *lsm;
+ const char *lsm;
+} __randomize_layout;
+
+/*
+ * Security blob size or offset data.
+ */
+struct lsm_blob_sizes {
+ int lbs_cred;
+ int lbs_file;
+ int lbs_inode;
+ int lbs_superblock;
+ int lbs_ipc;
+ int lbs_msg_msg;
+ int lbs_task;
};
/*
+ * LSM_RET_VOID is used as the default value in LSM_HOOK definitions for void
+ * LSM hooks (in include/linux/lsm_hook_defs.h).
+ */
+#define LSM_RET_VOID ((void) 0)
+
+/*
* Initializing a security_hook_list structure takes
* up a lot of space in a source file. This macro takes
* care of the common case and reduces the amount of
@@ -1938,49 +84,39 @@ extern struct security_hook_heads security_hook_heads;
extern char *lsm_names;
extern void security_add_hooks(struct security_hook_list *hooks, int count,
- char *lsm);
+ const char *lsm);
-#ifdef CONFIG_SECURITY_SELINUX_DISABLE
-/*
- * Assuring the safety of deleting a security module is up to
- * the security module involved. This may entail ordering the
- * module's hook list in a particular way, refusing to disable
- * the module once a policy is loaded or any number of other
- * actions better imagined than described.
- *
- * The name of the configuration option reflects the only module
- * that currently uses the mechanism. Any developer who thinks
- * disabling their module is a good idea needs to be at least as
- * careful as the SELinux team.
- */
-static inline void security_delete_hooks(struct security_hook_list *hooks,
- int count)
-{
- int i;
+#define LSM_FLAG_LEGACY_MAJOR BIT(0)
+#define LSM_FLAG_EXCLUSIVE BIT(1)
+
+enum lsm_order {
+ LSM_ORDER_FIRST = -1, /* This is only for capabilities. */
+ LSM_ORDER_MUTABLE = 0,
+ LSM_ORDER_LAST = 1, /* This is only for integrity. */
+};
+
+struct lsm_info {
+ const char *name; /* Required. */
+ enum lsm_order order; /* Optional: default is LSM_ORDER_MUTABLE */
+ unsigned long flags; /* Optional: flags describing LSM */
+ int *enabled; /* Optional: controlled by CONFIG_LSM */
+ int (*init)(void); /* Required. */
+ struct lsm_blob_sizes *blobs; /* Optional: for blob sharing. */
+};
+
+extern struct lsm_info __start_lsm_info[], __end_lsm_info[];
+extern struct lsm_info __start_early_lsm_info[], __end_early_lsm_info[];
- for (i = 0; i < count; i++)
- list_del_rcu(&hooks[i].list);
-}
-#endif /* CONFIG_SECURITY_SELINUX_DISABLE */
+#define DEFINE_LSM(lsm) \
+ static struct lsm_info __lsm_##lsm \
+ __used __section(".lsm_info.init") \
+ __aligned(sizeof(unsigned long))
-/* Currently required to handle SELinux runtime hook disable. */
-#ifdef CONFIG_SECURITY_WRITABLE_HOOKS
-#define __lsm_ro_after_init
-#else
-#define __lsm_ro_after_init __ro_after_init
-#endif /* CONFIG_SECURITY_WRITABLE_HOOKS */
+#define DEFINE_EARLY_LSM(lsm) \
+ static struct lsm_info __early_lsm_##lsm \
+ __used __section(".early_lsm_info.init") \
+ __aligned(sizeof(unsigned long))
-extern int __init security_module_enable(const char *module);
-extern void __init capability_add_hooks(void);
-#ifdef CONFIG_SECURITY_YAMA
-extern void __init yama_add_hooks(void);
-#else
-static inline void __init yama_add_hooks(void) { }
-#endif
-#ifdef CONFIG_SECURITY_LOADPIN
-void __init loadpin_add_hooks(void);
-#else
-static inline void loadpin_add_hooks(void) { };
-#endif
+extern int lsm_inode_alloc(struct inode *inode);
#endif /* ! __LINUX_LSM_HOOKS_H */
diff --git a/include/linux/lz4.h b/include/linux/lz4.h
index 394e3d9213b8..b16e15b9587a 100644
--- a/include/linux/lz4.h
+++ b/include/linux/lz4.h
@@ -278,7 +278,7 @@ int LZ4_decompress_fast(const char *source, char *dest, int originalSize);
* @compressedSize: is the precise full size of the compressed block
* @maxDecompressedSize: is the size of 'dest' buffer
*
- * Decompresses data fom 'source' into 'dest'.
+ * Decompresses data from 'source' into 'dest'.
* If the source stream is detected malformed, the function will
* stop decoding and return a negative result.
* This function is protected against buffer overflow exploits,
@@ -522,7 +522,7 @@ int LZ4_setStreamDecode(LZ4_streamDecode_t *LZ4_streamDecode,
const char *dictionary, int dictSize);
/**
- * LZ4_decompress_fast_continue() - Decompress blocks in streaming mode
+ * LZ4_decompress_safe_continue() - Decompress blocks in streaming mode
* @LZ4_streamDecode: the 'LZ4_streamDecode_t' structure
* @source: source address of the compressed data
* @dest: output buffer address of the uncompressed data
@@ -530,7 +530,7 @@ int LZ4_setStreamDecode(LZ4_streamDecode_t *LZ4_streamDecode,
* @compressedSize: is the precise full size of the compressed block
* @maxDecompressedSize: is the size of 'dest' buffer
*
- * These decoding function allows decompression of multiple blocks
+ * This decoding function allows decompression of multiple blocks
* in "streaming" mode.
* Previously decoded blocks *must* remain available at the memory position
* where they were decoded (up to 64 KB)
@@ -569,7 +569,7 @@ int LZ4_decompress_safe_continue(LZ4_streamDecode_t *LZ4_streamDecode,
* which must be already allocated with 'originalSize' bytes
* @originalSize: is the original and therefore uncompressed size
*
- * These decoding function allows decompression of multiple blocks
+ * This decoding function allows decompression of multiple blocks
* in "streaming" mode.
* Previously decoded blocks *must* remain available at the memory position
* where they were decoded (up to 64 KB)
@@ -610,10 +610,10 @@ int LZ4_decompress_fast_continue(LZ4_streamDecode_t *LZ4_streamDecode,
* @dictStart: pointer to the start of the dictionary in memory
* @dictSize: size of dictionary
*
- * These decoding function works the same as
+ * This decoding function works the same as
* a combination of LZ4_setStreamDecode() followed by
* LZ4_decompress_safe_continue()
- * It is stand-alone, and don'tn eed a LZ4_streamDecode_t structure.
+ * It is stand-alone, and doesn't need an LZ4_streamDecode_t structure.
*
* Return: number of bytes decompressed into destination buffer
* (necessarily <= maxDecompressedSize)
@@ -633,10 +633,10 @@ int LZ4_decompress_safe_usingDict(const char *source, char *dest,
* @dictStart: pointer to the start of the dictionary in memory
* @dictSize: size of dictionary
*
- * These decoding function works the same as
+ * This decoding function works the same as
* a combination of LZ4_setStreamDecode() followed by
- * LZ4_decompress_safe_continue()
- * It is stand-alone, and don'tn eed a LZ4_streamDecode_t structure.
+ * LZ4_decompress_fast_continue()
+ * It is stand-alone, and doesn't need an LZ4_streamDecode_t structure.
*
* Return: number of bytes decompressed into destination buffer
* (necessarily <= maxDecompressedSize)
diff --git a/include/linux/lzo.h b/include/linux/lzo.h
index a0848d9377e5..e95c7d1092b2 100644
--- a/include/linux/lzo.h
+++ b/include/linux/lzo.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __LZO_H__
#define __LZO_H__
/*
@@ -17,12 +18,16 @@
#define LZO1X_1_MEM_COMPRESS (8192 * sizeof(unsigned short))
#define LZO1X_MEM_COMPRESS LZO1X_1_MEM_COMPRESS
-#define lzo1x_worst_compress(x) ((x) + ((x) / 16) + 64 + 3)
+#define lzo1x_worst_compress(x) ((x) + ((x) / 16) + 64 + 3 + 2)
/* This requires 'wrkmem' of size LZO1X_1_MEM_COMPRESS */
int lzo1x_1_compress(const unsigned char *src, size_t src_len,
unsigned char *dst, size_t *dst_len, void *wrkmem);
+/* This requires 'wrkmem' of size LZO1X_1_MEM_COMPRESS */
+int lzorle1x_1_compress(const unsigned char *src, size_t src_len,
+ unsigned char *dst, size_t *dst_len, void *wrkmem);
+
/* safe decompression with overrun testing */
int lzo1x_decompress_safe(const unsigned char *src, size_t src_len,
unsigned char *dst, size_t *dst_len);
diff --git a/include/linux/mISDNdsp.h b/include/linux/mISDNdsp.h
index 41d1eeb9b3bd..00758f45fddc 100644
--- a/include/linux/mISDNdsp.h
+++ b/include/linux/mISDNdsp.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __mISDNdsp_H__
#define __mISDNdsp_H__
diff --git a/include/linux/mISDNhw.h b/include/linux/mISDNhw.h
index 9d96d5d4dfed..ef4f8eb02eac 100644
--- a/include/linux/mISDNhw.h
+++ b/include/linux/mISDNhw.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
*
* Author Karsten Keil <kkeil@novell.com>
@@ -5,16 +6,6 @@
* Basic declarations for the mISDN HW channels
*
* Copyright 2008 by Karsten Keil <kkeil@novell.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
*/
#ifndef MISDNHW_H
diff --git a/include/linux/mISDNif.h b/include/linux/mISDNif.h
index a7330eb3ec64..7aab4a769736 100644
--- a/include/linux/mISDNif.h
+++ b/include/linux/mISDNif.h
@@ -18,7 +18,6 @@
#ifndef mISDNIF_H
#define mISDNIF_H
-#include <stdarg.h>
#include <linux/types.h>
#include <linux/errno.h>
#include <linux/socket.h>
@@ -587,7 +586,7 @@ extern struct mISDNclock *mISDN_register_clock(char *, int, clockctl_func_t *,
void *);
extern void mISDN_unregister_clock(struct mISDNclock *);
-static inline struct mISDNdevice *dev_to_mISDN(struct device *dev)
+static inline struct mISDNdevice *dev_to_mISDN(const struct device *dev)
{
if (dev)
return dev_get_drvdata(dev);
diff --git a/include/linux/mailbox/arm_mhuv2_message.h b/include/linux/mailbox/arm_mhuv2_message.h
new file mode 100644
index 000000000000..821b9d96daa4
--- /dev/null
+++ b/include/linux/mailbox/arm_mhuv2_message.h
@@ -0,0 +1,20 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * ARM MHUv2 Mailbox Message
+ *
+ * Copyright (C) 2020 Arm Ltd.
+ * Copyright (C) 2020 Linaro Ltd.
+ */
+
+#ifndef _LINUX_ARM_MHUV2_MESSAGE_H_
+#define _LINUX_ARM_MHUV2_MESSAGE_H_
+
+#include <linux/types.h>
+
+/* Data structure for data-transfer protocol */
+struct arm_mhuv2_mbox_msg {
+ void *data;
+ size_t len;
+};
+
+#endif /* _LINUX_ARM_MHUV2_MESSAGE_H_ */
diff --git a/include/linux/mailbox/brcm-message.h b/include/linux/mailbox/brcm-message.h
index c20b4843fc2d..18da82115476 100644
--- a/include/linux/mailbox/brcm-message.h
+++ b/include/linux/mailbox/brcm-message.h
@@ -1,10 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2016 Broadcom
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
* Common header for Broadcom mailbox messages which is shared across
* Broadcom SoCs and Broadcom mailbox client drivers.
*/
diff --git a/include/linux/mailbox/mtk-cmdq-mailbox.h b/include/linux/mailbox/mtk-cmdq-mailbox.h
new file mode 100644
index 000000000000..a8f0070c7aa9
--- /dev/null
+++ b/include/linux/mailbox/mtk-cmdq-mailbox.h
@@ -0,0 +1,83 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2018 MediaTek Inc.
+ *
+ */
+
+#ifndef __MTK_CMDQ_MAILBOX_H__
+#define __MTK_CMDQ_MAILBOX_H__
+
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+
+#define CMDQ_INST_SIZE 8 /* instruction is 64-bit */
+#define CMDQ_SUBSYS_SHIFT 16
+#define CMDQ_OP_CODE_SHIFT 24
+#define CMDQ_JUMP_PASS CMDQ_INST_SIZE
+
+#define CMDQ_WFE_UPDATE BIT(31)
+#define CMDQ_WFE_UPDATE_VALUE BIT(16)
+#define CMDQ_WFE_WAIT BIT(15)
+#define CMDQ_WFE_WAIT_VALUE 0x1
+
+/*
+ * WFE arg_b
+ * bit 0-11: wait value
+ * bit 15: 1 - wait, 0 - no wait
+ * bit 16-27: update value
+ * bit 31: 1 - update, 0 - no update
+ */
+#define CMDQ_WFE_OPTION (CMDQ_WFE_WAIT | CMDQ_WFE_WAIT_VALUE)
+
+/** cmdq event maximum */
+#define CMDQ_MAX_EVENT 0x3ff
+
+/*
+ * CMDQ_CODE_MASK:
+ * set write mask
+ * format: op mask
+ * CMDQ_CODE_WRITE:
+ * write value into target register
+ * format: op subsys address value
+ * CMDQ_CODE_JUMP:
+ * jump by offset
+ * format: op offset
+ * CMDQ_CODE_WFE:
+ * wait for event and clear
+ * it is just clear if no wait
+ * format: [wait] op event update:1 to_wait:1 wait:1
+ * [clear] op event update:1 to_wait:0 wait:0
+ * CMDQ_CODE_EOC:
+ * end of command
+ * format: op irq_flag
+ */
+enum cmdq_code {
+ CMDQ_CODE_MASK = 0x02,
+ CMDQ_CODE_WRITE = 0x04,
+ CMDQ_CODE_POLL = 0x08,
+ CMDQ_CODE_JUMP = 0x10,
+ CMDQ_CODE_WFE = 0x20,
+ CMDQ_CODE_EOC = 0x40,
+ CMDQ_CODE_READ_S = 0x80,
+ CMDQ_CODE_WRITE_S = 0x90,
+ CMDQ_CODE_WRITE_S_MASK = 0x91,
+ CMDQ_CODE_LOGIC = 0xa0,
+};
+
+struct cmdq_cb_data {
+ int sta;
+ struct cmdq_pkt *pkt;
+};
+
+struct cmdq_pkt {
+ void *va_base;
+ dma_addr_t pa_base;
+ size_t cmd_buf_size; /* command occupied size */
+ size_t buf_size; /* real buffer size */
+ void *cl;
+};
+
+u8 cmdq_get_shift_pa(struct mbox_chan *chan);
+
+#endif /* __MTK_CMDQ_MAILBOX_H__ */
diff --git a/include/linux/mailbox/zynqmp-ipi-message.h b/include/linux/mailbox/zynqmp-ipi-message.h
new file mode 100644
index 000000000000..31d8046d945e
--- /dev/null
+++ b/include/linux/mailbox/zynqmp-ipi-message.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef _LINUX_ZYNQMP_IPI_MESSAGE_H_
+#define _LINUX_ZYNQMP_IPI_MESSAGE_H_
+
+/**
+ * struct zynqmp_ipi_message - ZynqMP IPI message structure
+ * @len: Length of message
+ * @data: message payload
+ *
+ * This is the structure for data used in mbox_send_message
+ * the maximum length of data buffer is fixed to 32 bytes.
+ * Client is supposed to be aware of this.
+ */
+struct zynqmp_ipi_message {
+ size_t len;
+ u8 data[];
+};
+
+#endif /* _LINUX_ZYNQMP_IPI_MESSAGE_H_ */
diff --git a/include/linux/mailbox_client.h b/include/linux/mailbox_client.h
index 44348710953f..734694912ef7 100644
--- a/include/linux/mailbox_client.h
+++ b/include/linux/mailbox_client.h
@@ -1,10 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2013-2014 Linaro Ltd.
* Author: Jassi Brar <jassisinghbrar@gmail.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef __MAILBOX_CLIENT_H
@@ -40,10 +37,12 @@ struct mbox_client {
void (*tx_done)(struct mbox_client *cl, void *mssg, int r);
};
+int mbox_bind_client(struct mbox_chan *chan, struct mbox_client *cl);
struct mbox_chan *mbox_request_channel_byname(struct mbox_client *cl,
const char *name);
struct mbox_chan *mbox_request_channel(struct mbox_client *cl, int index);
int mbox_send_message(struct mbox_chan *chan, void *mssg);
+int mbox_flush(struct mbox_chan *chan, unsigned long timeout);
void mbox_client_txdone(struct mbox_chan *chan, int r); /* atomic */
bool mbox_client_peek_data(struct mbox_chan *chan); /* atomic */
void mbox_free_channel(struct mbox_chan *chan); /* may sleep */
diff --git a/include/linux/mailbox_controller.h b/include/linux/mailbox_controller.h
index 74deadb42d76..6fee33cb52f5 100644
--- a/include/linux/mailbox_controller.h
+++ b/include/linux/mailbox_controller.h
@@ -1,8 +1,4 @@
-/*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
+/* SPDX-License-Identifier: GPL-2.0-only */
#ifndef __MAILBOX_CONTROLLER_H
#define __MAILBOX_CONTROLLER_H
@@ -24,6 +20,9 @@ struct mbox_chan;
* transmission of data is reported by the controller via
* mbox_chan_txdone (if it has some TX ACK irq). It must not
* sleep.
+ * @flush: Called when a client requests transmissions to be blocking but
+ * the context doesn't allow sleeping. Typically the controller
+ * will implement a busy loop waiting for the data to flush out.
* @startup: Called when a client requests the chan. The controller
* could ask clients for additional parameters of communication
* to be provided via client's chan_data. This call may
@@ -46,6 +45,7 @@ struct mbox_chan;
*/
struct mbox_chan_ops {
int (*send_data)(struct mbox_chan *chan, void *data);
+ int (*flush)(struct mbox_chan *chan, unsigned long timeout);
int (*startup)(struct mbox_chan *chan);
void (*shutdown)(struct mbox_chan *chan);
bool (*last_tx_done)(struct mbox_chan *chan);
@@ -83,6 +83,7 @@ struct mbox_controller {
const struct of_phandle_args *sp);
/* Internal to API */
struct hrtimer poll_hrt;
+ spinlock_t poll_hrt_lock;
struct list_head node;
};
@@ -131,4 +132,9 @@ void mbox_controller_unregister(struct mbox_controller *mbox); /* can sleep */
void mbox_chan_received_data(struct mbox_chan *chan, void *data); /* atomic */
void mbox_chan_txdone(struct mbox_chan *chan, int r); /* atomic */
+int devm_mbox_controller_register(struct device *dev,
+ struct mbox_controller *mbox);
+void devm_mbox_controller_unregister(struct device *dev,
+ struct mbox_controller *mbox);
+
#endif /* __MAILBOX_CONTROLLER_H */
diff --git a/include/linux/map_benchmark.h b/include/linux/map_benchmark.h
new file mode 100644
index 000000000000..62674c83bde4
--- /dev/null
+++ b/include/linux/map_benchmark.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2022 HiSilicon Limited.
+ */
+
+#ifndef _KERNEL_DMA_BENCHMARK_H
+#define _KERNEL_DMA_BENCHMARK_H
+
+#define DMA_MAP_BENCHMARK _IOWR('d', 1, struct map_benchmark)
+#define DMA_MAP_MAX_THREADS 1024
+#define DMA_MAP_MAX_SECONDS 300
+#define DMA_MAP_MAX_TRANS_DELAY (10 * NSEC_PER_MSEC)
+
+#define DMA_MAP_BIDIRECTIONAL 0
+#define DMA_MAP_TO_DEVICE 1
+#define DMA_MAP_FROM_DEVICE 2
+
+struct map_benchmark {
+ __u64 avg_map_100ns; /* average map latency in 100ns */
+ __u64 map_stddev; /* standard deviation of map latency */
+ __u64 avg_unmap_100ns; /* as above */
+ __u64 unmap_stddev;
+ __u32 threads; /* how many threads will do map/unmap in parallel */
+ __u32 seconds; /* how long the test will last */
+ __s32 node; /* which numa node this benchmark will run on */
+ __u32 dma_bits; /* DMA addressing capability */
+ __u32 dma_dir; /* DMA data direction */
+ __u32 dma_trans_ns; /* time for DMA transmission in ns */
+ __u32 granule; /* how many PAGE_SIZE will do map/unmap once a time */
+};
+#endif /* _KERNEL_DMA_BENCHMARK_H */
diff --git a/include/linux/maple.h b/include/linux/maple.h
index c37288b23e0c..9b140272ee16 100644
--- a/include/linux/maple.h
+++ b/include/linux/maple.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __LINUX_MAPLE_H
#define __LINUX_MAPLE_H
diff --git a/include/linux/maple_tree.h b/include/linux/maple_tree.h
new file mode 100644
index 000000000000..1fadb5f5978b
--- /dev/null
+++ b/include/linux/maple_tree.h
@@ -0,0 +1,697 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+#ifndef _LINUX_MAPLE_TREE_H
+#define _LINUX_MAPLE_TREE_H
+/*
+ * Maple Tree - An RCU-safe adaptive tree for storing ranges
+ * Copyright (c) 2018-2022 Oracle
+ * Authors: Liam R. Howlett <Liam.Howlett@Oracle.com>
+ * Matthew Wilcox <willy@infradead.org>
+ */
+
+#include <linux/kernel.h>
+#include <linux/rcupdate.h>
+#include <linux/spinlock.h>
+/* #define CONFIG_MAPLE_RCU_DISABLED */
+
+/*
+ * Allocated nodes are mutable until they have been inserted into the tree,
+ * at which time they cannot change their type until they have been removed
+ * from the tree and an RCU grace period has passed.
+ *
+ * Removed nodes have their ->parent set to point to themselves. RCU readers
+ * check ->parent before relying on the value that they loaded from the
+ * slots array. This lets us reuse the slots array for the RCU head.
+ *
+ * Nodes in the tree point to their parent unless bit 0 is set.
+ */
+#if defined(CONFIG_64BIT) || defined(BUILD_VDSO32_64)
+/* 64bit sizes */
+#define MAPLE_NODE_SLOTS 31 /* 256 bytes including ->parent */
+#define MAPLE_RANGE64_SLOTS 16 /* 256 bytes */
+#define MAPLE_ARANGE64_SLOTS 10 /* 240 bytes */
+#define MAPLE_ARANGE64_META_MAX 15 /* Out of range for metadata */
+#define MAPLE_ALLOC_SLOTS (MAPLE_NODE_SLOTS - 1)
+#else
+/* 32bit sizes */
+#define MAPLE_NODE_SLOTS 63 /* 256 bytes including ->parent */
+#define MAPLE_RANGE64_SLOTS 32 /* 256 bytes */
+#define MAPLE_ARANGE64_SLOTS 21 /* 240 bytes */
+#define MAPLE_ARANGE64_META_MAX 31 /* Out of range for metadata */
+#define MAPLE_ALLOC_SLOTS (MAPLE_NODE_SLOTS - 2)
+#endif /* defined(CONFIG_64BIT) || defined(BUILD_VDSO32_64) */
+
+#define MAPLE_NODE_MASK 255UL
+
+/*
+ * The node->parent of the root node has bit 0 set and the rest of the pointer
+ * is a pointer to the tree itself. No more bits are available in this pointer
+ * (on m68k, the data structure may only be 2-byte aligned).
+ *
+ * Internal non-root nodes can only have maple_range_* nodes as parents. The
+ * parent pointer is 256B aligned like all other tree nodes. When storing a 32
+ * or 64 bit values, the offset can fit into 4 bits. The 16 bit values need an
+ * extra bit to store the offset. This extra bit comes from a reuse of the last
+ * bit in the node type. This is possible by using bit 1 to indicate if bit 2
+ * is part of the type or the slot.
+ *
+ * Once the type is decided, the decision of an allocation range type or a range
+ * type is done by examining the immutable tree flag for the MAPLE_ALLOC_RANGE
+ * flag.
+ *
+ * Node types:
+ * 0x??1 = Root
+ * 0x?00 = 16 bit nodes
+ * 0x010 = 32 bit nodes
+ * 0x110 = 64 bit nodes
+ *
+ * Slot size and location in the parent pointer:
+ * type : slot location
+ * 0x??1 : Root
+ * 0x?00 : 16 bit values, type in 0-1, slot in 2-6
+ * 0x010 : 32 bit values, type in 0-2, slot in 3-6
+ * 0x110 : 64 bit values, type in 0-2, slot in 3-6
+ */
+
+/*
+ * This metadata is used to optimize the gap updating code and in reverse
+ * searching for gaps or any other code that needs to find the end of the data.
+ */
+struct maple_metadata {
+ unsigned char end;
+ unsigned char gap;
+};
+
+/*
+ * Leaf nodes do not store pointers to nodes, they store user data. Users may
+ * store almost any bit pattern. As noted above, the optimisation of storing an
+ * entry at 0 in the root pointer cannot be done for data which have the bottom
+ * two bits set to '10'. We also reserve values with the bottom two bits set to
+ * '10' which are below 4096 (ie 2, 6, 10 .. 4094) for internal use. Some APIs
+ * return errnos as a negative errno shifted right by two bits and the bottom
+ * two bits set to '10', and while choosing to store these values in the array
+ * is not an error, it may lead to confusion if you're testing for an error with
+ * mas_is_err().
+ *
+ * Non-leaf nodes store the type of the node pointed to (enum maple_type in bits
+ * 3-6), bit 2 is reserved. That leaves bits 0-1 unused for now.
+ *
+ * In regular B-Tree terms, pivots are called keys. The term pivot is used to
+ * indicate that the tree is specifying ranges, Pivots may appear in the
+ * subtree with an entry attached to the value whereas keys are unique to a
+ * specific position of a B-tree. Pivot values are inclusive of the slot with
+ * the same index.
+ */
+
+struct maple_range_64 {
+ struct maple_pnode *parent;
+ unsigned long pivot[MAPLE_RANGE64_SLOTS - 1];
+ union {
+ void __rcu *slot[MAPLE_RANGE64_SLOTS];
+ struct {
+ void __rcu *pad[MAPLE_RANGE64_SLOTS - 1];
+ struct maple_metadata meta;
+ };
+ };
+};
+
+/*
+ * At tree creation time, the user can specify that they're willing to trade off
+ * storing fewer entries in a tree in return for storing more information in
+ * each node.
+ *
+ * The maple tree supports recording the largest range of NULL entries available
+ * in this node, also called gaps. This optimises the tree for allocating a
+ * range.
+ */
+struct maple_arange_64 {
+ struct maple_pnode *parent;
+ unsigned long pivot[MAPLE_ARANGE64_SLOTS - 1];
+ void __rcu *slot[MAPLE_ARANGE64_SLOTS];
+ unsigned long gap[MAPLE_ARANGE64_SLOTS];
+ struct maple_metadata meta;
+};
+
+struct maple_alloc {
+ unsigned long total;
+ unsigned char node_count;
+ unsigned int request_count;
+ struct maple_alloc *slot[MAPLE_ALLOC_SLOTS];
+};
+
+struct maple_topiary {
+ struct maple_pnode *parent;
+ struct maple_enode *next; /* Overlaps the pivot */
+};
+
+enum maple_type {
+ maple_dense,
+ maple_leaf_64,
+ maple_range_64,
+ maple_arange_64,
+};
+
+
+/**
+ * DOC: Maple tree flags
+ *
+ * * MT_FLAGS_ALLOC_RANGE - Track gaps in this tree
+ * * MT_FLAGS_USE_RCU - Operate in RCU mode
+ * * MT_FLAGS_HEIGHT_OFFSET - The position of the tree height in the flags
+ * * MT_FLAGS_HEIGHT_MASK - The mask for the maple tree height value
+ * * MT_FLAGS_LOCK_MASK - How the mt_lock is used
+ * * MT_FLAGS_LOCK_IRQ - Acquired irq-safe
+ * * MT_FLAGS_LOCK_BH - Acquired bh-safe
+ * * MT_FLAGS_LOCK_EXTERN - mt_lock is not used
+ *
+ * MAPLE_HEIGHT_MAX The largest height that can be stored
+ */
+#define MT_FLAGS_ALLOC_RANGE 0x01
+#define MT_FLAGS_USE_RCU 0x02
+#define MT_FLAGS_HEIGHT_OFFSET 0x02
+#define MT_FLAGS_HEIGHT_MASK 0x7C
+#define MT_FLAGS_LOCK_MASK 0x300
+#define MT_FLAGS_LOCK_IRQ 0x100
+#define MT_FLAGS_LOCK_BH 0x200
+#define MT_FLAGS_LOCK_EXTERN 0x300
+
+#define MAPLE_HEIGHT_MAX 31
+
+
+#define MAPLE_NODE_TYPE_MASK 0x0F
+#define MAPLE_NODE_TYPE_SHIFT 0x03
+
+#define MAPLE_RESERVED_RANGE 4096
+
+#ifdef CONFIG_LOCKDEP
+typedef struct lockdep_map *lockdep_map_p;
+#define mt_lock_is_held(mt) lock_is_held(mt->ma_external_lock)
+#define mt_set_external_lock(mt, lock) \
+ (mt)->ma_external_lock = &(lock)->dep_map
+#else
+typedef struct { /* nothing */ } lockdep_map_p;
+#define mt_lock_is_held(mt) 1
+#define mt_set_external_lock(mt, lock) do { } while (0)
+#endif
+
+/*
+ * If the tree contains a single entry at index 0, it is usually stored in
+ * tree->ma_root. To optimise for the page cache, an entry which ends in '00',
+ * '01' or '11' is stored in the root, but an entry which ends in '10' will be
+ * stored in a node. Bits 3-6 are used to store enum maple_type.
+ *
+ * The flags are used both to store some immutable information about this tree
+ * (set at tree creation time) and dynamic information set under the spinlock.
+ *
+ * Another use of flags are to indicate global states of the tree. This is the
+ * case with the MAPLE_USE_RCU flag, which indicates the tree is currently in
+ * RCU mode. This mode was added to allow the tree to reuse nodes instead of
+ * re-allocating and RCU freeing nodes when there is a single user.
+ */
+struct maple_tree {
+ union {
+ spinlock_t ma_lock;
+ lockdep_map_p ma_external_lock;
+ };
+ void __rcu *ma_root;
+ unsigned int ma_flags;
+};
+
+/**
+ * MTREE_INIT() - Initialize a maple tree
+ * @name: The maple tree name
+ * @__flags: The maple tree flags
+ *
+ */
+#define MTREE_INIT(name, __flags) { \
+ .ma_lock = __SPIN_LOCK_UNLOCKED((name).ma_lock), \
+ .ma_flags = __flags, \
+ .ma_root = NULL, \
+}
+
+/**
+ * MTREE_INIT_EXT() - Initialize a maple tree with an external lock.
+ * @name: The tree name
+ * @__flags: The maple tree flags
+ * @__lock: The external lock
+ */
+#ifdef CONFIG_LOCKDEP
+#define MTREE_INIT_EXT(name, __flags, __lock) { \
+ .ma_external_lock = &(__lock).dep_map, \
+ .ma_flags = (__flags), \
+ .ma_root = NULL, \
+}
+#else
+#define MTREE_INIT_EXT(name, __flags, __lock) MTREE_INIT(name, __flags)
+#endif
+
+#define DEFINE_MTREE(name) \
+ struct maple_tree name = MTREE_INIT(name, 0)
+
+#define mtree_lock(mt) spin_lock((&(mt)->ma_lock))
+#define mtree_unlock(mt) spin_unlock((&(mt)->ma_lock))
+
+/*
+ * The Maple Tree squeezes various bits in at various points which aren't
+ * necessarily obvious. Usually, this is done by observing that pointers are
+ * N-byte aligned and thus the bottom log_2(N) bits are available for use. We
+ * don't use the high bits of pointers to store additional information because
+ * we don't know what bits are unused on any given architecture.
+ *
+ * Nodes are 256 bytes in size and are also aligned to 256 bytes, giving us 8
+ * low bits for our own purposes. Nodes are currently of 4 types:
+ * 1. Single pointer (Range is 0-0)
+ * 2. Non-leaf Allocation Range nodes
+ * 3. Non-leaf Range nodes
+ * 4. Leaf Range nodes All nodes consist of a number of node slots,
+ * pivots, and a parent pointer.
+ */
+
+struct maple_node {
+ union {
+ struct {
+ struct maple_pnode *parent;
+ void __rcu *slot[MAPLE_NODE_SLOTS];
+ };
+ struct {
+ void *pad;
+ struct rcu_head rcu;
+ struct maple_enode *piv_parent;
+ unsigned char parent_slot;
+ enum maple_type type;
+ unsigned char slot_len;
+ unsigned int ma_flags;
+ };
+ struct maple_range_64 mr64;
+ struct maple_arange_64 ma64;
+ struct maple_alloc alloc;
+ };
+};
+
+/*
+ * More complicated stores can cause two nodes to become one or three and
+ * potentially alter the height of the tree. Either half of the tree may need
+ * to be rebalanced against the other. The ma_topiary struct is used to track
+ * which nodes have been 'cut' from the tree so that the change can be done
+ * safely at a later date. This is done to support RCU.
+ */
+struct ma_topiary {
+ struct maple_enode *head;
+ struct maple_enode *tail;
+ struct maple_tree *mtree;
+};
+
+void *mtree_load(struct maple_tree *mt, unsigned long index);
+
+int mtree_insert(struct maple_tree *mt, unsigned long index,
+ void *entry, gfp_t gfp);
+int mtree_insert_range(struct maple_tree *mt, unsigned long first,
+ unsigned long last, void *entry, gfp_t gfp);
+int mtree_alloc_range(struct maple_tree *mt, unsigned long *startp,
+ void *entry, unsigned long size, unsigned long min,
+ unsigned long max, gfp_t gfp);
+int mtree_alloc_rrange(struct maple_tree *mt, unsigned long *startp,
+ void *entry, unsigned long size, unsigned long min,
+ unsigned long max, gfp_t gfp);
+
+int mtree_store_range(struct maple_tree *mt, unsigned long first,
+ unsigned long last, void *entry, gfp_t gfp);
+int mtree_store(struct maple_tree *mt, unsigned long index,
+ void *entry, gfp_t gfp);
+void *mtree_erase(struct maple_tree *mt, unsigned long index);
+
+void mtree_destroy(struct maple_tree *mt);
+void __mt_destroy(struct maple_tree *mt);
+
+/**
+ * mtree_empty() - Determine if a tree has any present entries.
+ * @mt: Maple Tree.
+ *
+ * Context: Any context.
+ * Return: %true if the tree contains only NULL pointers.
+ */
+static inline bool mtree_empty(const struct maple_tree *mt)
+{
+ return mt->ma_root == NULL;
+}
+
+/* Advanced API */
+
+/*
+ * The maple state is defined in the struct ma_state and is used to keep track
+ * of information during operations, and even between operations when using the
+ * advanced API.
+ *
+ * If state->node has bit 0 set then it references a tree location which is not
+ * a node (eg the root). If bit 1 is set, the rest of the bits are a negative
+ * errno. Bit 2 (the 'unallocated slots' bit) is clear. Bits 3-6 indicate the
+ * node type.
+ *
+ * state->alloc either has a request number of nodes or an allocated node. If
+ * stat->alloc has a requested number of nodes, the first bit will be set (0x1)
+ * and the remaining bits are the value. If state->alloc is a node, then the
+ * node will be of type maple_alloc. maple_alloc has MAPLE_NODE_SLOTS - 1 for
+ * storing more allocated nodes, a total number of nodes allocated, and the
+ * node_count in this node. node_count is the number of allocated nodes in this
+ * node. The scaling beyond MAPLE_NODE_SLOTS - 1 is handled by storing further
+ * nodes into state->alloc->slot[0]'s node. Nodes are taken from state->alloc
+ * by removing a node from the state->alloc node until state->alloc->node_count
+ * is 1, when state->alloc is returned and the state->alloc->slot[0] is promoted
+ * to state->alloc. Nodes are pushed onto state->alloc by putting the current
+ * state->alloc into the pushed node's slot[0].
+ *
+ * The state also contains the implied min/max of the state->node, the depth of
+ * this search, and the offset. The implied min/max are either from the parent
+ * node or are 0-oo for the root node. The depth is incremented or decremented
+ * every time a node is walked down or up. The offset is the slot/pivot of
+ * interest in the node - either for reading or writing.
+ *
+ * When returning a value the maple state index and last respectively contain
+ * the start and end of the range for the entry. Ranges are inclusive in the
+ * Maple Tree.
+ */
+struct ma_state {
+ struct maple_tree *tree; /* The tree we're operating in */
+ unsigned long index; /* The index we're operating on - range start */
+ unsigned long last; /* The last index we're operating on - range end */
+ struct maple_enode *node; /* The node containing this entry */
+ unsigned long min; /* The minimum index of this node - implied pivot min */
+ unsigned long max; /* The maximum index of this node - implied pivot max */
+ struct maple_alloc *alloc; /* Allocated nodes for this operation */
+ unsigned char depth; /* depth of tree descent during write */
+ unsigned char offset;
+ unsigned char mas_flags;
+};
+
+struct ma_wr_state {
+ struct ma_state *mas;
+ struct maple_node *node; /* Decoded mas->node */
+ unsigned long r_min; /* range min */
+ unsigned long r_max; /* range max */
+ enum maple_type type; /* mas->node type */
+ unsigned char offset_end; /* The offset where the write ends */
+ unsigned char node_end; /* mas->node end */
+ unsigned long *pivots; /* mas->node->pivots pointer */
+ unsigned long end_piv; /* The pivot at the offset end */
+ void __rcu **slots; /* mas->node->slots pointer */
+ void *entry; /* The entry to write */
+ void *content; /* The existing entry that is being overwritten */
+};
+
+#define mas_lock(mas) spin_lock(&((mas)->tree->ma_lock))
+#define mas_unlock(mas) spin_unlock(&((mas)->tree->ma_lock))
+
+
+/*
+ * Special values for ma_state.node.
+ * MAS_START means we have not searched the tree.
+ * MAS_ROOT means we have searched the tree and the entry we found lives in
+ * the root of the tree (ie it has index 0, length 1 and is the only entry in
+ * the tree).
+ * MAS_NONE means we have searched the tree and there is no node in the
+ * tree for this entry. For example, we searched for index 1 in an empty
+ * tree. Or we have a tree which points to a full leaf node and we
+ * searched for an entry which is larger than can be contained in that
+ * leaf node.
+ * MA_ERROR represents an errno. After dropping the lock and attempting
+ * to resolve the error, the walk would have to be restarted from the
+ * top of the tree as the tree may have been modified.
+ */
+#define MAS_START ((struct maple_enode *)1UL)
+#define MAS_ROOT ((struct maple_enode *)5UL)
+#define MAS_NONE ((struct maple_enode *)9UL)
+#define MAS_PAUSE ((struct maple_enode *)17UL)
+#define MA_ERROR(err) \
+ ((struct maple_enode *)(((unsigned long)err << 2) | 2UL))
+
+#define MA_STATE(name, mt, first, end) \
+ struct ma_state name = { \
+ .tree = mt, \
+ .index = first, \
+ .last = end, \
+ .node = MAS_START, \
+ .min = 0, \
+ .max = ULONG_MAX, \
+ .alloc = NULL, \
+ .mas_flags = 0, \
+ }
+
+#define MA_WR_STATE(name, ma_state, wr_entry) \
+ struct ma_wr_state name = { \
+ .mas = ma_state, \
+ .content = NULL, \
+ .entry = wr_entry, \
+ }
+
+#define MA_TOPIARY(name, tree) \
+ struct ma_topiary name = { \
+ .head = NULL, \
+ .tail = NULL, \
+ .mtree = tree, \
+ }
+
+void *mas_walk(struct ma_state *mas);
+void *mas_store(struct ma_state *mas, void *entry);
+void *mas_erase(struct ma_state *mas);
+int mas_store_gfp(struct ma_state *mas, void *entry, gfp_t gfp);
+void mas_store_prealloc(struct ma_state *mas, void *entry);
+void *mas_find(struct ma_state *mas, unsigned long max);
+void *mas_find_rev(struct ma_state *mas, unsigned long min);
+int mas_preallocate(struct ma_state *mas, gfp_t gfp);
+bool mas_is_err(struct ma_state *mas);
+
+bool mas_nomem(struct ma_state *mas, gfp_t gfp);
+void mas_pause(struct ma_state *mas);
+void maple_tree_init(void);
+void mas_destroy(struct ma_state *mas);
+int mas_expected_entries(struct ma_state *mas, unsigned long nr_entries);
+
+void *mas_prev(struct ma_state *mas, unsigned long min);
+void *mas_next(struct ma_state *mas, unsigned long max);
+
+int mas_empty_area(struct ma_state *mas, unsigned long min, unsigned long max,
+ unsigned long size);
+
+static inline void mas_init(struct ma_state *mas, struct maple_tree *tree,
+ unsigned long addr)
+{
+ memset(mas, 0, sizeof(struct ma_state));
+ mas->tree = tree;
+ mas->index = mas->last = addr;
+ mas->max = ULONG_MAX;
+ mas->node = MAS_START;
+}
+
+/* Checks if a mas has not found anything */
+static inline bool mas_is_none(struct ma_state *mas)
+{
+ return mas->node == MAS_NONE;
+}
+
+/* Checks if a mas has been paused */
+static inline bool mas_is_paused(struct ma_state *mas)
+{
+ return mas->node == MAS_PAUSE;
+}
+
+/*
+ * This finds an empty area from the highest address to the lowest.
+ * AKA "Topdown" version,
+ */
+int mas_empty_area_rev(struct ma_state *mas, unsigned long min,
+ unsigned long max, unsigned long size);
+/**
+ * mas_reset() - Reset a Maple Tree operation state.
+ * @mas: Maple Tree operation state.
+ *
+ * Resets the error or walk state of the @mas so future walks of the
+ * array will start from the root. Use this if you have dropped the
+ * lock and want to reuse the ma_state.
+ *
+ * Context: Any context.
+ */
+static inline void mas_reset(struct ma_state *mas)
+{
+ mas->node = MAS_START;
+}
+
+/**
+ * mas_for_each() - Iterate over a range of the maple tree.
+ * @__mas: Maple Tree operation state (maple_state)
+ * @__entry: Entry retrieved from the tree
+ * @__max: maximum index to retrieve from the tree
+ *
+ * When returned, mas->index and mas->last will hold the entire range for the
+ * entry.
+ *
+ * Note: may return the zero entry.
+ */
+#define mas_for_each(__mas, __entry, __max) \
+ while (((__entry) = mas_find((__mas), (__max))) != NULL)
+
+
+/**
+ * mas_set_range() - Set up Maple Tree operation state for a different index.
+ * @mas: Maple Tree operation state.
+ * @start: New start of range in the Maple Tree.
+ * @last: New end of range in the Maple Tree.
+ *
+ * Move the operation state to refer to a different range. This will
+ * have the effect of starting a walk from the top; see mas_next()
+ * to move to an adjacent index.
+ */
+static inline
+void mas_set_range(struct ma_state *mas, unsigned long start, unsigned long last)
+{
+ mas->index = start;
+ mas->last = last;
+ mas->node = MAS_START;
+}
+
+/**
+ * mas_set() - Set up Maple Tree operation state for a different index.
+ * @mas: Maple Tree operation state.
+ * @index: New index into the Maple Tree.
+ *
+ * Move the operation state to refer to a different index. This will
+ * have the effect of starting a walk from the top; see mas_next()
+ * to move to an adjacent index.
+ */
+static inline void mas_set(struct ma_state *mas, unsigned long index)
+{
+
+ mas_set_range(mas, index, index);
+}
+
+static inline bool mt_external_lock(const struct maple_tree *mt)
+{
+ return (mt->ma_flags & MT_FLAGS_LOCK_MASK) == MT_FLAGS_LOCK_EXTERN;
+}
+
+/**
+ * mt_init_flags() - Initialise an empty maple tree with flags.
+ * @mt: Maple Tree
+ * @flags: maple tree flags.
+ *
+ * If you need to initialise a Maple Tree with special flags (eg, an
+ * allocation tree), use this function.
+ *
+ * Context: Any context.
+ */
+static inline void mt_init_flags(struct maple_tree *mt, unsigned int flags)
+{
+ mt->ma_flags = flags;
+ if (!mt_external_lock(mt))
+ spin_lock_init(&mt->ma_lock);
+ rcu_assign_pointer(mt->ma_root, NULL);
+}
+
+/**
+ * mt_init() - Initialise an empty maple tree.
+ * @mt: Maple Tree
+ *
+ * An empty Maple Tree.
+ *
+ * Context: Any context.
+ */
+static inline void mt_init(struct maple_tree *mt)
+{
+ mt_init_flags(mt, 0);
+}
+
+static inline bool mt_in_rcu(struct maple_tree *mt)
+{
+#ifdef CONFIG_MAPLE_RCU_DISABLED
+ return false;
+#endif
+ return mt->ma_flags & MT_FLAGS_USE_RCU;
+}
+
+/**
+ * mt_clear_in_rcu() - Switch the tree to non-RCU mode.
+ * @mt: The Maple Tree
+ */
+static inline void mt_clear_in_rcu(struct maple_tree *mt)
+{
+ if (!mt_in_rcu(mt))
+ return;
+
+ if (mt_external_lock(mt)) {
+ BUG_ON(!mt_lock_is_held(mt));
+ mt->ma_flags &= ~MT_FLAGS_USE_RCU;
+ } else {
+ mtree_lock(mt);
+ mt->ma_flags &= ~MT_FLAGS_USE_RCU;
+ mtree_unlock(mt);
+ }
+}
+
+/**
+ * mt_set_in_rcu() - Switch the tree to RCU safe mode.
+ * @mt: The Maple Tree
+ */
+static inline void mt_set_in_rcu(struct maple_tree *mt)
+{
+ if (mt_in_rcu(mt))
+ return;
+
+ if (mt_external_lock(mt)) {
+ BUG_ON(!mt_lock_is_held(mt));
+ mt->ma_flags |= MT_FLAGS_USE_RCU;
+ } else {
+ mtree_lock(mt);
+ mt->ma_flags |= MT_FLAGS_USE_RCU;
+ mtree_unlock(mt);
+ }
+}
+
+static inline unsigned int mt_height(const struct maple_tree *mt)
+{
+ return (mt->ma_flags & MT_FLAGS_HEIGHT_MASK) >> MT_FLAGS_HEIGHT_OFFSET;
+}
+
+void *mt_find(struct maple_tree *mt, unsigned long *index, unsigned long max);
+void *mt_find_after(struct maple_tree *mt, unsigned long *index,
+ unsigned long max);
+void *mt_prev(struct maple_tree *mt, unsigned long index, unsigned long min);
+void *mt_next(struct maple_tree *mt, unsigned long index, unsigned long max);
+
+/**
+ * mt_for_each - Iterate over each entry starting at index until max.
+ * @__tree: The Maple Tree
+ * @__entry: The current entry
+ * @__index: The index to update to track the location in the tree
+ * @__max: The maximum limit for @index
+ *
+ * Note: Will not return the zero entry.
+ */
+#define mt_for_each(__tree, __entry, __index, __max) \
+ for (__entry = mt_find(__tree, &(__index), __max); \
+ __entry; __entry = mt_find_after(__tree, &(__index), __max))
+
+
+#ifdef CONFIG_DEBUG_MAPLE_TREE
+extern atomic_t maple_tree_tests_run;
+extern atomic_t maple_tree_tests_passed;
+
+void mt_dump(const struct maple_tree *mt);
+void mt_validate(struct maple_tree *mt);
+void mt_cache_shrink(void);
+#define MT_BUG_ON(__tree, __x) do { \
+ atomic_inc(&maple_tree_tests_run); \
+ if (__x) { \
+ pr_info("BUG at %s:%d (%u)\n", \
+ __func__, __LINE__, __x); \
+ mt_dump(__tree); \
+ pr_info("Pass: %u Run:%u\n", \
+ atomic_read(&maple_tree_tests_passed), \
+ atomic_read(&maple_tree_tests_run)); \
+ dump_stack(); \
+ } else { \
+ atomic_inc(&maple_tree_tests_passed); \
+ } \
+} while (0)
+#else
+#define MT_BUG_ON(__tree, __x) BUG_ON(__x)
+#endif /* CONFIG_DEBUG_MAPLE_TREE */
+
+#endif /*_LINUX_MAPLE_TREE_H */
diff --git a/include/linux/marvell_phy.h b/include/linux/marvell_phy.h
index 4055cf8cc978..0f06c2287b52 100644
--- a/include/linux/marvell_phy.h
+++ b/include/linux/marvell_phy.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _MARVELL_PHY_H
#define _MARVELL_PHY_H
@@ -14,20 +15,33 @@
#define MARVELL_PHY_ID_88E1149R 0x01410e50
#define MARVELL_PHY_ID_88E1240 0x01410e30
#define MARVELL_PHY_ID_88E1318S 0x01410e90
+#define MARVELL_PHY_ID_88E1340S 0x01410dc0
#define MARVELL_PHY_ID_88E1116R 0x01410e40
#define MARVELL_PHY_ID_88E1510 0x01410dd0
#define MARVELL_PHY_ID_88E1540 0x01410eb0
#define MARVELL_PHY_ID_88E1545 0x01410ea0
+#define MARVELL_PHY_ID_88E1548P 0x01410ec0
#define MARVELL_PHY_ID_88E3016 0x01410e60
+#define MARVELL_PHY_ID_88X3310 0x002b09a0
+#define MARVELL_PHY_ID_88E2110 0x002b09b0
+#define MARVELL_PHY_ID_88X2222 0x01410f10
-/* The MV88e6390 Ethernet switch contains embedded PHYs. These PHYs do
+/* Marvel 88E1111 in Finisar SFP module with modified PHY ID */
+#define MARVELL_PHY_ID_88E1111_FINISAR 0x01ff0cc0
+
+/* These Ethernet switch families contain embedded PHYs, but they do
* not have a model ID. So the switch driver traps reads to the ID2
* register and returns the switch family ID
*/
-#define MARVELL_PHY_ID_88E6390 0x01410f90
+#define MARVELL_PHY_ID_88E6341_FAMILY 0x01410f41
+#define MARVELL_PHY_ID_88E6390_FAMILY 0x01410f90
+#define MARVELL_PHY_ID_88E6393_FAMILY 0x002b0b9b
+
+#define MARVELL_PHY_FAMILY_ID(id) ((id) >> 4)
/* struct phy_device dev_flags definitions */
#define MARVELL_PHY_M1145_FLAGS_RESISTANCE 0x00000001
#define MARVELL_PHY_M1118_DNS323_LEDS 0x00000002
+#define MARVELL_PHY_LED0_LINK_LED1_ACTIVE 0x00000004
#endif /* _MARVELL_PHY_H */
diff --git a/include/linux/math.h b/include/linux/math.h
new file mode 100644
index 000000000000..439b8f0b9ebd
--- /dev/null
+++ b/include/linux/math.h
@@ -0,0 +1,189 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_MATH_H
+#define _LINUX_MATH_H
+
+#include <linux/types.h>
+#include <asm/div64.h>
+#include <uapi/linux/kernel.h>
+
+/*
+ * This looks more complex than it should be. But we need to
+ * get the type for the ~ right in round_down (it needs to be
+ * as wide as the result!), and we want to evaluate the macro
+ * arguments just once each.
+ */
+#define __round_mask(x, y) ((__typeof__(x))((y)-1))
+
+/**
+ * round_up - round up to next specified power of 2
+ * @x: the value to round
+ * @y: multiple to round up to (must be a power of 2)
+ *
+ * Rounds @x up to next multiple of @y (which must be a power of 2).
+ * To perform arbitrary rounding up, use roundup() below.
+ */
+#define round_up(x, y) ((((x)-1) | __round_mask(x, y))+1)
+
+/**
+ * round_down - round down to next specified power of 2
+ * @x: the value to round
+ * @y: multiple to round down to (must be a power of 2)
+ *
+ * Rounds @x down to next multiple of @y (which must be a power of 2).
+ * To perform arbitrary rounding down, use rounddown() below.
+ */
+#define round_down(x, y) ((x) & ~__round_mask(x, y))
+
+#define DIV_ROUND_UP __KERNEL_DIV_ROUND_UP
+
+#define DIV_ROUND_DOWN_ULL(ll, d) \
+ ({ unsigned long long _tmp = (ll); do_div(_tmp, d); _tmp; })
+
+#define DIV_ROUND_UP_ULL(ll, d) \
+ DIV_ROUND_DOWN_ULL((unsigned long long)(ll) + (d) - 1, (d))
+
+#if BITS_PER_LONG == 32
+# define DIV_ROUND_UP_SECTOR_T(ll,d) DIV_ROUND_UP_ULL(ll, d)
+#else
+# define DIV_ROUND_UP_SECTOR_T(ll,d) DIV_ROUND_UP(ll,d)
+#endif
+
+/**
+ * roundup - round up to the next specified multiple
+ * @x: the value to up
+ * @y: multiple to round up to
+ *
+ * Rounds @x up to next multiple of @y. If @y will always be a power
+ * of 2, consider using the faster round_up().
+ */
+#define roundup(x, y) ( \
+{ \
+ typeof(y) __y = y; \
+ (((x) + (__y - 1)) / __y) * __y; \
+} \
+)
+/**
+ * rounddown - round down to next specified multiple
+ * @x: the value to round
+ * @y: multiple to round down to
+ *
+ * Rounds @x down to next multiple of @y. If @y will always be a power
+ * of 2, consider using the faster round_down().
+ */
+#define rounddown(x, y) ( \
+{ \
+ typeof(x) __x = (x); \
+ __x - (__x % (y)); \
+} \
+)
+
+/*
+ * Divide positive or negative dividend by positive or negative divisor
+ * and round to closest integer. Result is undefined for negative
+ * divisors if the dividend variable type is unsigned and for negative
+ * dividends if the divisor variable type is unsigned.
+ */
+#define DIV_ROUND_CLOSEST(x, divisor)( \
+{ \
+ typeof(x) __x = x; \
+ typeof(divisor) __d = divisor; \
+ (((typeof(x))-1) > 0 || \
+ ((typeof(divisor))-1) > 0 || \
+ (((__x) > 0) == ((__d) > 0))) ? \
+ (((__x) + ((__d) / 2)) / (__d)) : \
+ (((__x) - ((__d) / 2)) / (__d)); \
+} \
+)
+/*
+ * Same as above but for u64 dividends. divisor must be a 32-bit
+ * number.
+ */
+#define DIV_ROUND_CLOSEST_ULL(x, divisor)( \
+{ \
+ typeof(divisor) __d = divisor; \
+ unsigned long long _tmp = (x) + (__d) / 2; \
+ do_div(_tmp, __d); \
+ _tmp; \
+} \
+)
+
+#define __STRUCT_FRACT(type) \
+struct type##_fract { \
+ __##type numerator; \
+ __##type denominator; \
+};
+__STRUCT_FRACT(s16)
+__STRUCT_FRACT(u16)
+__STRUCT_FRACT(s32)
+__STRUCT_FRACT(u32)
+#undef __STRUCT_FRACT
+
+/*
+ * Multiplies an integer by a fraction, while avoiding unnecessary
+ * overflow or loss of precision.
+ */
+#define mult_frac(x, numer, denom)( \
+{ \
+ typeof(x) quot = (x) / (denom); \
+ typeof(x) rem = (x) % (denom); \
+ (quot * (numer)) + ((rem * (numer)) / (denom)); \
+} \
+)
+
+#define sector_div(a, b) do_div(a, b)
+
+/**
+ * abs - return absolute value of an argument
+ * @x: the value. If it is unsigned type, it is converted to signed type first.
+ * char is treated as if it was signed (regardless of whether it really is)
+ * but the macro's return type is preserved as char.
+ *
+ * Return: an absolute value of x.
+ */
+#define abs(x) __abs_choose_expr(x, long long, \
+ __abs_choose_expr(x, long, \
+ __abs_choose_expr(x, int, \
+ __abs_choose_expr(x, short, \
+ __abs_choose_expr(x, char, \
+ __builtin_choose_expr( \
+ __builtin_types_compatible_p(typeof(x), char), \
+ (char)({ signed char __x = (x); __x<0?-__x:__x; }), \
+ ((void)0)))))))
+
+#define __abs_choose_expr(x, type, other) __builtin_choose_expr( \
+ __builtin_types_compatible_p(typeof(x), signed type) || \
+ __builtin_types_compatible_p(typeof(x), unsigned type), \
+ ({ signed type __x = (x); __x < 0 ? -__x : __x; }), other)
+
+/**
+ * reciprocal_scale - "scale" a value into range [0, ep_ro)
+ * @val: value
+ * @ep_ro: right open interval endpoint
+ *
+ * Perform a "reciprocal multiplication" in order to "scale" a value into
+ * range [0, @ep_ro), where the upper interval endpoint is right-open.
+ * This is useful, e.g. for accessing a index of an array containing
+ * @ep_ro elements, for example. Think of it as sort of modulus, only that
+ * the result isn't that of modulo. ;) Note that if initial input is a
+ * small value, then result will return 0.
+ *
+ * Return: a result based on @val in interval [0, @ep_ro).
+ */
+static inline u32 reciprocal_scale(u32 val, u32 ep_ro)
+{
+ return (u32)(((u64) val * ep_ro) >> 32);
+}
+
+u64 int_pow(u64 base, unsigned int exp);
+unsigned long int_sqrt(unsigned long);
+
+#if BITS_PER_LONG < 64
+u32 int_sqrt64(u64 x);
+#else
+static inline u32 int_sqrt64(u64 x)
+{
+ return (u32)int_sqrt(x);
+}
+#endif
+
+#endif /* _LINUX_MATH_H */
diff --git a/include/linux/math64.h b/include/linux/math64.h
index 80690c96c734..8b9191a2849e 100644
--- a/include/linux/math64.h
+++ b/include/linux/math64.h
@@ -1,7 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_MATH64_H
#define _LINUX_MATH64_H
#include <linux/types.h>
+#include <linux/math.h>
+#include <vdso/math64.h>
#include <asm/div64.h>
#if BITS_PER_LONG == 64
@@ -11,6 +14,11 @@
/**
* div_u64_rem - unsigned 64bit divide with 32bit divisor with remainder
+ * @dividend: unsigned 64bit dividend
+ * @divisor: unsigned 32bit divisor
+ * @remainder: pointer to unsigned 32bit remainder
+ *
+ * Return: sets ``*remainder``, then returns dividend / divisor
*
* This is commonly provided by 32bit archs to provide an optimized 64bit
* divide.
@@ -23,6 +31,11 @@ static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
/**
* div_s64_rem - signed 64bit divide with 32bit divisor with remainder
+ * @dividend: signed 64bit dividend
+ * @divisor: signed 32bit divisor
+ * @remainder: pointer to signed 32bit remainder
+ *
+ * Return: sets ``*remainder``, then returns dividend / divisor
*/
static inline s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder)
{
@@ -32,6 +45,11 @@ static inline s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder)
/**
* div64_u64_rem - unsigned 64bit divide with 64bit divisor and remainder
+ * @dividend: unsigned 64bit dividend
+ * @divisor: unsigned 64bit divisor
+ * @remainder: pointer to unsigned 64bit remainder
+ *
+ * Return: sets ``*remainder``, then returns dividend / divisor
*/
static inline u64 div64_u64_rem(u64 dividend, u64 divisor, u64 *remainder)
{
@@ -41,6 +59,10 @@ static inline u64 div64_u64_rem(u64 dividend, u64 divisor, u64 *remainder)
/**
* div64_u64 - unsigned 64bit divide with 64bit divisor
+ * @dividend: unsigned 64bit dividend
+ * @divisor: unsigned 64bit divisor
+ *
+ * Return: dividend / divisor
*/
static inline u64 div64_u64(u64 dividend, u64 divisor)
{
@@ -49,6 +71,10 @@ static inline u64 div64_u64(u64 dividend, u64 divisor)
/**
* div64_s64 - signed 64bit divide with 64bit divisor
+ * @dividend: signed 64bit dividend
+ * @divisor: signed 64bit divisor
+ *
+ * Return: dividend / divisor
*/
static inline s64 div64_s64(s64 dividend, s64 divisor)
{
@@ -88,10 +114,14 @@ extern s64 div64_s64(s64 dividend, s64 divisor);
/**
* div_u64 - unsigned 64bit divide with 32bit divisor
+ * @dividend: unsigned 64bit dividend
+ * @divisor: unsigned 32bit divisor
*
* This is the most common 64bit divide and should be used if possible,
* as many 32bit archs can optimize this variant better than a full 64bit
* divide.
+ *
+ * Return: dividend / divisor
*/
#ifndef div_u64
static inline u64 div_u64(u64 dividend, u32 divisor)
@@ -103,6 +133,10 @@ static inline u64 div_u64(u64 dividend, u32 divisor)
/**
* div_s64 - signed 64bit divide with 32bit divisor
+ * @dividend: signed 64bit dividend
+ * @divisor: signed 32bit divisor
+ *
+ * Return: dividend / divisor
*/
#ifndef div_s64
static inline s64 div_s64(s64 dividend, s32 divisor)
@@ -114,25 +148,6 @@ static inline s64 div_s64(s64 dividend, s32 divisor)
u32 iter_div_u64_rem(u64 dividend, u32 divisor, u64 *remainder);
-static __always_inline u32
-__iter_div_u64_rem(u64 dividend, u32 divisor, u64 *remainder)
-{
- u32 ret = 0;
-
- while (dividend >= divisor) {
- /* The following asm() prevents the compiler from
- optimising this loop into a modulo operation. */
- asm("" : "+rm"(dividend));
-
- dividend -= divisor;
- ret++;
- }
-
- *remainder = dividend;
-
- return ret;
-}
-
#ifndef mul_u32_u32
/*
* Many a GCC version messes this up and generates a 64x64 mult :-(
@@ -146,7 +161,7 @@ static inline u64 mul_u32_u32(u32 a, u32 b)
#if defined(CONFIG_ARCH_SUPPORTS_INT128) && defined(__SIZEOF_INT128__)
#ifndef mul_u64_u32_shr
-static inline u64 mul_u64_u32_shr(u64 a, u32 mul, unsigned int shift)
+static __always_inline u64 mul_u64_u32_shr(u64 a, u32 mul, unsigned int shift)
{
return (u64)(((unsigned __int128)a * mul) >> shift);
}
@@ -162,7 +177,7 @@ static inline u64 mul_u64_u64_shr(u64 a, u64 mul, unsigned int shift)
#else
#ifndef mul_u64_u32_shr
-static inline u64 mul_u64_u32_shr(u64 a, u32 mul, unsigned int shift)
+static __always_inline u64 mul_u64_u32_shr(u64 a, u32 mul, unsigned int shift)
{
u32 ah, al;
u64 ret;
@@ -224,6 +239,24 @@ static inline u64 mul_u64_u64_shr(u64 a, u64 b, unsigned int shift)
#endif
+#ifndef mul_s64_u64_shr
+static inline u64 mul_s64_u64_shr(s64 a, u64 b, unsigned int shift)
+{
+ u64 ret;
+
+ /*
+ * Extract the sign before the multiplication and put it back
+ * afterwards if needed.
+ */
+ ret = mul_u64_u64_shr(abs(a), b, shift);
+
+ if (a < 0)
+ ret = -((s64) ret);
+
+ return ret;
+}
+#endif /* mul_s64_u64_shr */
+
#ifndef mul_u64_u32_div
static inline u64 mul_u64_u32_div(u64 a, u32 mul, u32 divisor)
{
@@ -253,4 +286,64 @@ static inline u64 mul_u64_u32_div(u64 a, u32 mul, u32 divisor)
}
#endif /* mul_u64_u32_div */
+u64 mul_u64_u64_div_u64(u64 a, u64 mul, u64 div);
+
+/**
+ * DIV64_U64_ROUND_UP - unsigned 64bit divide with 64bit divisor rounded up
+ * @ll: unsigned 64bit dividend
+ * @d: unsigned 64bit divisor
+ *
+ * Divide unsigned 64bit dividend by unsigned 64bit divisor
+ * and round up.
+ *
+ * Return: dividend / divisor rounded up
+ */
+#define DIV64_U64_ROUND_UP(ll, d) \
+ ({ u64 _tmp = (d); div64_u64((ll) + _tmp - 1, _tmp); })
+
+/**
+ * DIV64_U64_ROUND_CLOSEST - unsigned 64bit divide with 64bit divisor rounded to nearest integer
+ * @dividend: unsigned 64bit dividend
+ * @divisor: unsigned 64bit divisor
+ *
+ * Divide unsigned 64bit dividend by unsigned 64bit divisor
+ * and round to closest integer.
+ *
+ * Return: dividend / divisor rounded to nearest integer
+ */
+#define DIV64_U64_ROUND_CLOSEST(dividend, divisor) \
+ ({ u64 _tmp = (divisor); div64_u64((dividend) + _tmp / 2, _tmp); })
+
+/**
+ * DIV_U64_ROUND_CLOSEST - unsigned 64bit divide with 32bit divisor rounded to nearest integer
+ * @dividend: unsigned 64bit dividend
+ * @divisor: unsigned 32bit divisor
+ *
+ * Divide unsigned 64bit dividend by unsigned 32bit divisor
+ * and round to closest integer.
+ *
+ * Return: dividend / divisor rounded to nearest integer
+ */
+#define DIV_U64_ROUND_CLOSEST(dividend, divisor) \
+ ({ u32 _tmp = (divisor); div_u64((u64)(dividend) + _tmp / 2, _tmp); })
+
+/**
+ * DIV_S64_ROUND_CLOSEST - signed 64bit divide with 32bit divisor rounded to nearest integer
+ * @dividend: signed 64bit dividend
+ * @divisor: signed 32bit divisor
+ *
+ * Divide signed 64bit dividend by signed 32bit divisor
+ * and round to closest integer.
+ *
+ * Return: dividend / divisor rounded to nearest integer
+ */
+#define DIV_S64_ROUND_CLOSEST(dividend, divisor)( \
+{ \
+ s64 __x = (dividend); \
+ s32 __d = (divisor); \
+ ((__x > 0) == (__d > 0)) ? \
+ div_s64((__x + (__d / 2)), __d) : \
+ div_s64((__x - (__d / 2)), __d); \
+} \
+)
#endif /* _LINUX_MATH64_H */
diff --git a/include/linux/max17040_battery.h b/include/linux/max17040_battery.h
deleted file mode 100644
index ad97b06cf930..000000000000
--- a/include/linux/max17040_battery.h
+++ /dev/null
@@ -1,19 +0,0 @@
-/*
- * Copyright (C) 2009 Samsung Electronics
- * Minkyu Kang <mk7.kang@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#ifndef __MAX17040_BATTERY_H_
-#define __MAX17040_BATTERY_H_
-
-struct max17040_platform_data {
- int (*battery_online)(void);
- int (*charger_online)(void);
- int (*charger_enable)(void);
-};
-
-#endif
diff --git a/include/linux/mbcache.h b/include/linux/mbcache.h
index e1bc73414983..97e64184767d 100644
--- a/include/linux/mbcache.h
+++ b/include/linux/mbcache.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_MBCACHE_H
#define _LINUX_MBCACHE_H
@@ -9,16 +10,29 @@
struct mb_cache;
+/* Cache entry flags */
+enum {
+ MBE_REFERENCED_B = 0,
+ MBE_REUSABLE_B
+};
+
struct mb_cache_entry {
/* List of entries in cache - protected by cache->c_list_lock */
struct list_head e_list;
- /* Hash table list - protected by hash chain bitlock */
+ /*
+ * Hash table list - protected by hash chain bitlock. The entry is
+ * guaranteed to be hashed while e_refcnt > 0.
+ */
struct hlist_bl_node e_hash_list;
+ /*
+ * Entry refcount. Once it reaches zero, entry is unhashed and freed.
+ * While refcount > 0, the entry is guaranteed to stay in the hash and
+ * e.g. mb_cache_entry_try_delete() will fail.
+ */
atomic_t e_refcnt;
/* Key in hash - stable during lifetime of the entry */
u32 e_key;
- u32 e_referenced:1;
- u32 e_reusable:1;
+ unsigned long e_flags;
/* User provided value - stable during lifetime of the entry */
u64 e_value;
};
@@ -28,17 +42,24 @@ void mb_cache_destroy(struct mb_cache *cache);
int mb_cache_entry_create(struct mb_cache *cache, gfp_t mask, u32 key,
u64 value, bool reusable);
-void __mb_cache_entry_free(struct mb_cache_entry *entry);
-static inline int mb_cache_entry_put(struct mb_cache *cache,
- struct mb_cache_entry *entry)
+void __mb_cache_entry_free(struct mb_cache *cache,
+ struct mb_cache_entry *entry);
+void mb_cache_entry_wait_unused(struct mb_cache_entry *entry);
+static inline void mb_cache_entry_put(struct mb_cache *cache,
+ struct mb_cache_entry *entry)
{
- if (!atomic_dec_and_test(&entry->e_refcnt))
- return 0;
- __mb_cache_entry_free(entry);
- return 1;
+ unsigned int cnt = atomic_dec_return(&entry->e_refcnt);
+
+ if (cnt > 0) {
+ if (cnt <= 2)
+ wake_up_var(&entry->e_refcnt);
+ return;
+ }
+ __mb_cache_entry_free(cache, entry);
}
-void mb_cache_entry_delete(struct mb_cache *cache, u32 key, u64 value);
+struct mb_cache_entry *mb_cache_entry_delete_or_get(struct mb_cache *cache,
+ u32 key, u64 value);
struct mb_cache_entry *mb_cache_entry_get(struct mb_cache *cache, u32 key,
u64 value);
struct mb_cache_entry *mb_cache_entry_find_first(struct mb_cache *cache,
diff --git a/include/linux/mbus.h b/include/linux/mbus.h
index 0d3f14fd2621..4773145246ed 100644
--- a/include/linux/mbus.h
+++ b/include/linux/mbus.h
@@ -31,8 +31,8 @@ struct mbus_dram_target_info
struct mbus_dram_window {
u8 cs_index;
u8 mbus_attr;
- u32 base;
- u32 size;
+ u64 base;
+ u64 size;
} cs[4];
};
diff --git a/include/linux/mc146818rtc.h b/include/linux/mc146818rtc.h
index 0661af17a758..b0da04fe087b 100644
--- a/include/linux/mc146818rtc.h
+++ b/include/linux/mc146818rtc.h
@@ -86,6 +86,8 @@ struct cmos_rtc_board_info {
/* 2 values for divider stage reset, others for "testing purposes only" */
# define RTC_DIV_RESET1 0x60
# define RTC_DIV_RESET2 0x70
+ /* In AMD BKDG bit 5 and 6 are reserved, bit 4 is for select dv0 bank */
+# define RTC_AMD_BANK_SELECT 0x10
/* Periodic intr. / Square wave rate select. 0=none, 1=32.8kHz,... 15=2Hz */
# define RTC_RATE_SELECT 0x0F
@@ -123,7 +125,11 @@ struct cmos_rtc_board_info {
#define RTC_IO_EXTENT_USED RTC_IO_EXTENT
#endif /* ARCH_RTC_LOCATION */
-unsigned int mc146818_get_time(struct rtc_time *time);
+bool mc146818_does_rtc_work(void);
+int mc146818_get_time(struct rtc_time *time);
int mc146818_set_time(struct rtc_time *time);
+bool mc146818_avoid_UIP(void (*callback)(unsigned char seconds, void *param),
+ void *param);
+
#endif /* _MC146818RTC_H */
diff --git a/include/linux/mc6821.h b/include/linux/mc6821.h
index 28e301e295da..8dffab19b4ac 100644
--- a/include/linux/mc6821.h
+++ b/include/linux/mc6821.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _MC6821_H_
#define _MC6821_H_
diff --git a/include/linux/mcb.h b/include/linux/mcb.h
index 4097ac9ea13a..1e5893138afe 100644
--- a/include/linux/mcb.h
+++ b/include/linux/mcb.h
@@ -1,12 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* MEN Chameleon Bus.
*
* Copyright (C) 2014 MEN Mikroelektronik GmbH (www.men.de)
* Author: Johannes Thumshirn <johannes.thumshirn@men.de>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the Free
- * Software Foundation; version 2 of the License.
*/
#ifndef _LINUX_MCB_H
#define _LINUX_MCB_H
@@ -79,10 +76,7 @@ struct mcb_device {
struct device *dma_dev;
};
-static inline struct mcb_device *to_mcb_device(struct device *dev)
-{
- return container_of(dev, struct mcb_device, dev);
-}
+#define to_mcb_device(__dev) container_of_const(__dev, struct mcb_device, dev)
/**
* struct mcb_driver - MEN Chameleon Bus device driver
@@ -123,7 +117,7 @@ extern int __must_check __mcb_register_driver(struct mcb_driver *drv,
__mcb_register_driver(driver, THIS_MODULE, KBUILD_MODNAME)
extern void mcb_unregister_driver(struct mcb_driver *driver);
#define module_mcb_driver(__mcb_driver) \
- module_driver(__mcb_driver, mcb_register_driver, mcb_unregister_driver);
+ module_driver(__mcb_driver, mcb_register_driver, mcb_unregister_driver)
extern void mcb_bus_add_devices(const struct mcb_bus *bus);
extern int mcb_device_register(struct mcb_bus *bus, struct mcb_device *dev);
extern struct mcb_bus *mcb_alloc_bus(struct device *carrier);
@@ -136,5 +130,7 @@ extern struct resource *mcb_request_mem(struct mcb_device *dev,
const char *name);
extern void mcb_release_mem(struct resource *mem);
extern int mcb_get_irq(struct mcb_device *dev);
+extern struct resource *mcb_get_resource(struct mcb_device *dev,
+ unsigned int type);
#endif /* _LINUX_MCB_H */
diff --git a/include/linux/mdev.h b/include/linux/mdev.h
index b6e048e1045f..139d05b26f82 100644
--- a/include/linux/mdev.h
+++ b/include/linux/mdev.h
@@ -1,138 +1,89 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Mediated device definition
*
* Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
* Author: Neo Jia <cjia@nvidia.com>
* Kirti Wankhede <kwankhede@nvidia.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef MDEV_H
#define MDEV_H
-struct mdev_device;
+#include <linux/device.h>
+#include <linux/uuid.h>
-/**
- * struct mdev_parent_ops - Structure to be registered for each parent device to
- * register the device to mdev module.
- *
- * @owner: The module owner.
- * @dev_attr_groups: Attributes of the parent device.
- * @mdev_attr_groups: Attributes of the mediated device.
- * @supported_type_groups: Attributes to define supported types. It is mandatory
- * to provide supported types.
- * @create: Called to allocate basic resources in parent device's
- * driver for a particular mediated device. It is
- * mandatory to provide create ops.
- * @kobj: kobject of type for which 'create' is called.
- * @mdev: mdev_device structure on of mediated device
- * that is being created
- * Returns integer: success (0) or error (< 0)
- * @remove: Called to free resources in parent device's driver for a
- * a mediated device. It is mandatory to provide 'remove'
- * ops.
- * @mdev: mdev_device device structure which is being
- * destroyed
- * Returns integer: success (0) or error (< 0)
- * @open: Open mediated device.
- * @mdev: mediated device.
- * Returns integer: success (0) or error (< 0)
- * @release: release mediated device
- * @mdev: mediated device.
- * @read: Read emulation callback
- * @mdev: mediated device structure
- * @buf: read buffer
- * @count: number of bytes to read
- * @ppos: address.
- * Retuns number on bytes read on success or error.
- * @write: Write emulation callback
- * @mdev: mediated device structure
- * @buf: write buffer
- * @count: number of bytes to be written
- * @ppos: address.
- * Retuns number on bytes written on success or error.
- * @ioctl: IOCTL callback
- * @mdev: mediated device structure
- * @cmd: ioctl command
- * @arg: arguments to ioctl
- * @mmap: mmap callback
- * @mdev: mediated device structure
- * @vma: vma structure
- * Parent device that support mediated device should be registered with mdev
- * module with mdev_parent_ops structure.
- **/
-struct mdev_parent_ops {
- struct module *owner;
- const struct attribute_group **dev_attr_groups;
- const struct attribute_group **mdev_attr_groups;
- struct attribute_group **supported_type_groups;
+struct mdev_type;
+
+struct mdev_device {
+ struct device dev;
+ guid_t uuid;
+ struct list_head next;
+ struct mdev_type *type;
+ bool active;
+};
- int (*create)(struct kobject *kobj, struct mdev_device *mdev);
- int (*remove)(struct mdev_device *mdev);
- int (*open)(struct mdev_device *mdev);
- void (*release)(struct mdev_device *mdev);
- ssize_t (*read)(struct mdev_device *mdev, char __user *buf,
- size_t count, loff_t *ppos);
- ssize_t (*write)(struct mdev_device *mdev, const char __user *buf,
- size_t count, loff_t *ppos);
- long (*ioctl)(struct mdev_device *mdev, unsigned int cmd,
- unsigned long arg);
- int (*mmap)(struct mdev_device *mdev, struct vm_area_struct *vma);
+struct mdev_type {
+ /* set by the driver before calling mdev_register parent: */
+ const char *sysfs_name;
+ const char *pretty_name;
+
+ /* set by the core, can be used drivers */
+ struct mdev_parent *parent;
+
+ /* internal only */
+ struct kobject kobj;
+ struct kobject *devices_kobj;
};
-/* interface for exporting mdev supported type attributes */
-struct mdev_type_attribute {
- struct attribute attr;
- ssize_t (*show)(struct kobject *kobj, struct device *dev, char *buf);
- ssize_t (*store)(struct kobject *kobj, struct device *dev,
- const char *buf, size_t count);
+/* embedded into the struct device that the mdev devices hang off */
+struct mdev_parent {
+ struct device *dev;
+ struct mdev_driver *mdev_driver;
+ struct kset *mdev_types_kset;
+ /* Synchronize device creation/removal with parent unregistration */
+ struct rw_semaphore unreg_sem;
+ struct mdev_type **types;
+ unsigned int nr_types;
+ atomic_t available_instances;
};
-#define MDEV_TYPE_ATTR(_name, _mode, _show, _store) \
-struct mdev_type_attribute mdev_type_attr_##_name = \
- __ATTR(_name, _mode, _show, _store)
-#define MDEV_TYPE_ATTR_RW(_name) \
- struct mdev_type_attribute mdev_type_attr_##_name = __ATTR_RW(_name)
-#define MDEV_TYPE_ATTR_RO(_name) \
- struct mdev_type_attribute mdev_type_attr_##_name = __ATTR_RO(_name)
-#define MDEV_TYPE_ATTR_WO(_name) \
- struct mdev_type_attribute mdev_type_attr_##_name = __ATTR_WO(_name)
+static inline struct mdev_device *to_mdev_device(struct device *dev)
+{
+ return container_of(dev, struct mdev_device, dev);
+}
/**
* struct mdev_driver - Mediated device driver
- * @name: driver name
+ * @device_api: string to return for the device_api sysfs
+ * @max_instances: maximum number of instances supported (optional)
* @probe: called when new device created
* @remove: called when device removed
+ * @get_available: Return the max number of instances that can be created
+ * @show_description: Print a description of the mtype
* @driver: device driver structure
- *
**/
struct mdev_driver {
- const char *name;
- int (*probe)(struct device *dev);
- void (*remove)(struct device *dev);
+ const char *device_api;
+ unsigned int max_instances;
+ int (*probe)(struct mdev_device *dev);
+ void (*remove)(struct mdev_device *dev);
+ unsigned int (*get_available)(struct mdev_type *mtype);
+ ssize_t (*show_description)(struct mdev_type *mtype, char *buf);
struct device_driver driver;
};
-#define to_mdev_driver(drv) container_of(drv, struct mdev_driver, driver)
-
-extern void *mdev_get_drvdata(struct mdev_device *mdev);
-extern void mdev_set_drvdata(struct mdev_device *mdev, void *data);
-extern uuid_le mdev_uuid(struct mdev_device *mdev);
-
-extern struct bus_type mdev_bus_type;
-
-extern int mdev_register_device(struct device *dev,
- const struct mdev_parent_ops *ops);
-extern void mdev_unregister_device(struct device *dev);
+int mdev_register_parent(struct mdev_parent *parent, struct device *dev,
+ struct mdev_driver *mdev_driver, struct mdev_type **types,
+ unsigned int nr_types);
+void mdev_unregister_parent(struct mdev_parent *parent);
-extern int mdev_register_driver(struct mdev_driver *drv, struct module *owner);
-extern void mdev_unregister_driver(struct mdev_driver *drv);
+int mdev_register_driver(struct mdev_driver *drv);
+void mdev_unregister_driver(struct mdev_driver *drv);
-extern struct device *mdev_parent_dev(struct mdev_device *mdev);
-extern struct device *mdev_dev(struct mdev_device *mdev);
-extern struct mdev_device *mdev_from_dev(struct device *dev);
+static inline struct device *mdev_dev(struct mdev_device *mdev)
+{
+ return &mdev->dev;
+}
#endif /* MDEV_H */
diff --git a/include/linux/mdio-bitbang.h b/include/linux/mdio-bitbang.h
index 76f52bbbb2f4..cffabdbce075 100644
--- a/include/linux/mdio-bitbang.h
+++ b/include/linux/mdio-bitbang.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __LINUX_MDIO_BITBANG_H
#define __LINUX_MDIO_BITBANG_H
@@ -32,10 +33,16 @@ struct mdiobb_ops {
struct mdiobb_ctrl {
const struct mdiobb_ops *ops;
- /* reset callback */
- int (*reset)(struct mii_bus *bus);
+ unsigned int override_op_c22;
+ u8 op_c22_read;
+ u8 op_c22_write;
};
+int mdiobb_read_c22(struct mii_bus *bus, int phy, int reg);
+int mdiobb_write_c22(struct mii_bus *bus, int phy, int reg, u16 val);
+int mdiobb_read_c45(struct mii_bus *bus, int devad, int phy, int reg);
+int mdiobb_write_c45(struct mii_bus *bus, int devad, int phy, int reg, u16 val);
+
/* The returned bus is not yet registered with the phy layer. */
struct mii_bus *alloc_mdio_bitbang(struct mdiobb_ctrl *ctrl);
diff --git a/include/linux/mdio-gpio.h b/include/linux/mdio-gpio.h
new file mode 100644
index 000000000000..cea443a672cb
--- /dev/null
+++ b/include/linux/mdio-gpio.h
@@ -0,0 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __LINUX_MDIO_GPIO_H
+#define __LINUX_MDIO_GPIO_H
+
+#define MDIO_GPIO_MDC 0
+#define MDIO_GPIO_MDIO 1
+#define MDIO_GPIO_MDO 2
+
+#endif
diff --git a/include/linux/mdio-mux.h b/include/linux/mdio-mux.h
index 61f5b21b31c7..a5d58f221939 100644
--- a/include/linux/mdio-mux.h
+++ b/include/linux/mdio-mux.h
@@ -12,7 +12,16 @@
#include <linux/device.h>
#include <linux/phy.h>
+/* mdio_mux_init() - Initialize a MDIO mux
+ * @dev The device owning the MDIO mux
+ * @mux_node The device node of the MDIO mux
+ * @switch_fn The function called for switching target MDIO child
+ * mux_handle A pointer to a (void *) used internaly by mdio-mux
+ * @data Private data used by switch_fn()
+ * @mux_bus An optional parent bus (Other case are to use parent_bus property)
+ */
int mdio_mux_init(struct device *dev,
+ struct device_node *mux_node,
int (*switch_fn) (int cur, int desired, void *data),
void **mux_handle,
void *data,
diff --git a/include/linux/mdio.h b/include/linux/mdio.h
index ca08ab16ecdc..27013d6bf24a 100644
--- a/include/linux/mdio.h
+++ b/include/linux/mdio.h
@@ -1,18 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* linux/mdio.h: definitions for MDIO (clause 45) transceivers
* Copyright 2006-2009 Solarflare Communications Inc.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published
- * by the Free Software Foundation, incorporated herein by reference.
*/
#ifndef __LINUX_MDIO_H__
#define __LINUX_MDIO_H__
#include <uapi/linux/mdio.h>
+#include <linux/bitfield.h>
#include <linux/mod_devicetable.h>
+struct gpio_desc;
struct mii_bus;
+struct reset_control;
/* Multiple levels of nesting are possible. However typically this is
* limited to nested DSA like layer, a MUX layer, and the normal
@@ -28,7 +28,6 @@ enum mdio_mutex_lock_class {
struct mdio_device {
struct device dev;
- const struct dev_pm_ops *pm_ops;
struct mii_bus *bus;
char modalias[MDIO_NAME_SIZE];
@@ -39,8 +38,16 @@ struct mdio_device {
/* Bus address of the MDIO device (0-31) */
int addr;
int flags;
+ struct gpio_desc *reset_gpio;
+ struct reset_control *reset_ctrl;
+ unsigned int reset_assert_delay;
+ unsigned int reset_deassert_delay;
};
-#define to_mdio_device(d) container_of(d, struct mdio_device, dev)
+
+static inline struct mdio_device *to_mdio_device(const struct device *dev)
+{
+ return container_of(dev, struct mdio_device, dev);
+}
/* struct mdio_driver_common: Common to all MDIO drivers */
struct mdio_driver_common {
@@ -48,8 +55,12 @@ struct mdio_driver_common {
int flags;
};
#define MDIO_DEVICE_FLAG_PHY 1
-#define to_mdio_common_driver(d) \
- container_of(d, struct mdio_driver_common, driver)
+
+static inline struct mdio_driver_common *
+to_mdio_common_driver(const struct device_driver *driver)
+{
+ return container_of(driver, struct mdio_driver_common, driver);
+}
/* struct mdio_driver: Generic MDIO driver */
struct mdio_driver {
@@ -63,14 +74,34 @@ struct mdio_driver {
/* Clears up any memory if needed */
void (*remove)(struct mdio_device *mdiodev);
+
+ /* Quiesces the device on system shutdown, turns off interrupts etc */
+ void (*shutdown)(struct mdio_device *mdiodev);
};
-#define to_mdio_driver(d) \
- container_of(to_mdio_common_driver(d), struct mdio_driver, mdiodrv)
+
+static inline struct mdio_driver *
+to_mdio_driver(const struct device_driver *driver)
+{
+ return container_of(to_mdio_common_driver(driver), struct mdio_driver,
+ mdiodrv);
+}
+
+/* device driver data */
+static inline void mdiodev_set_drvdata(struct mdio_device *mdio, void *data)
+{
+ dev_set_drvdata(&mdio->dev, data);
+}
+
+static inline void *mdiodev_get_drvdata(struct mdio_device *mdio)
+{
+ return dev_get_drvdata(&mdio->dev);
+}
void mdio_device_free(struct mdio_device *mdiodev);
struct mdio_device *mdio_device_create(struct mii_bus *bus, int addr);
int mdio_device_register(struct mdio_device *mdiodev);
void mdio_device_remove(struct mdio_device *mdiodev);
+void mdio_device_reset(struct mdio_device *mdiodev, int value);
int mdio_driver_register(struct mdio_driver *drv);
void mdio_driver_unregister(struct mdio_driver *drv);
int mdio_device_bus_match(struct device *dev, struct device_driver *drv);
@@ -257,10 +288,284 @@ static inline u16 ethtool_adv_to_mmd_eee_adv_t(u32 adv)
return reg;
}
+/**
+ * linkmode_adv_to_mii_10gbt_adv_t
+ * @advertising: the linkmode advertisement settings
+ *
+ * A small helper function that translates linkmode advertisement
+ * settings to phy autonegotiation advertisements for the C45
+ * 10GBASE-T AN CONTROL (7.32) register.
+ */
+static inline u32 linkmode_adv_to_mii_10gbt_adv_t(unsigned long *advertising)
+{
+ u32 result = 0;
+
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_2500baseT_Full_BIT,
+ advertising))
+ result |= MDIO_AN_10GBT_CTRL_ADV2_5G;
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_5000baseT_Full_BIT,
+ advertising))
+ result |= MDIO_AN_10GBT_CTRL_ADV5G;
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_10000baseT_Full_BIT,
+ advertising))
+ result |= MDIO_AN_10GBT_CTRL_ADV10G;
+
+ return result;
+}
+
+/**
+ * mii_10gbt_stat_mod_linkmode_lpa_t
+ * @advertising: target the linkmode advertisement settings
+ * @lpa: value of the C45 10GBASE-T AN STATUS register
+ *
+ * A small helper function that translates C45 10GBASE-T AN STATUS register bits
+ * to linkmode advertisement settings. Other bits in advertising aren't changed.
+ */
+static inline void mii_10gbt_stat_mod_linkmode_lpa_t(unsigned long *advertising,
+ u32 lpa)
+{
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_2500baseT_Full_BIT,
+ advertising, lpa & MDIO_AN_10GBT_STAT_LP2_5G);
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_5000baseT_Full_BIT,
+ advertising, lpa & MDIO_AN_10GBT_STAT_LP5G);
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_10000baseT_Full_BIT,
+ advertising, lpa & MDIO_AN_10GBT_STAT_LP10G);
+}
+
+/**
+ * mii_t1_adv_l_mod_linkmode_t
+ * @advertising: target the linkmode advertisement settings
+ * @lpa: value of the BASE-T1 Autonegotiation Advertisement [15:0] Register
+ *
+ * A small helper function that translates BASE-T1 Autonegotiation
+ * Advertisement [15:0] Register bits to linkmode advertisement settings.
+ * Other bits in advertising aren't changed.
+ */
+static inline void mii_t1_adv_l_mod_linkmode_t(unsigned long *advertising, u32 lpa)
+{
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_Pause_BIT, advertising,
+ lpa & MDIO_AN_T1_ADV_L_PAUSE_CAP);
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, advertising,
+ lpa & MDIO_AN_T1_ADV_L_PAUSE_ASYM);
+}
+
+/**
+ * mii_t1_adv_m_mod_linkmode_t
+ * @advertising: target the linkmode advertisement settings
+ * @lpa: value of the BASE-T1 Autonegotiation Advertisement [31:16] Register
+ *
+ * A small helper function that translates BASE-T1 Autonegotiation
+ * Advertisement [31:16] Register bits to linkmode advertisement settings.
+ * Other bits in advertising aren't changed.
+ */
+static inline void mii_t1_adv_m_mod_linkmode_t(unsigned long *advertising, u32 lpa)
+{
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_10baseT1L_Full_BIT,
+ advertising, lpa & MDIO_AN_T1_ADV_M_B10L);
+}
+
+/**
+ * linkmode_adv_to_mii_t1_adv_l_t
+ * @advertising: the linkmode advertisement settings
+ *
+ * A small helper function that translates linkmode advertisement
+ * settings to phy autonegotiation advertisements for the
+ * BASE-T1 Autonegotiation Advertisement [15:0] Register.
+ */
+static inline u32 linkmode_adv_to_mii_t1_adv_l_t(unsigned long *advertising)
+{
+ u32 result = 0;
+
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_Pause_BIT, advertising))
+ result |= MDIO_AN_T1_ADV_L_PAUSE_CAP;
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, advertising))
+ result |= MDIO_AN_T1_ADV_L_PAUSE_ASYM;
+
+ return result;
+}
+
+/**
+ * linkmode_adv_to_mii_t1_adv_m_t
+ * @advertising: the linkmode advertisement settings
+ *
+ * A small helper function that translates linkmode advertisement
+ * settings to phy autonegotiation advertisements for the
+ * BASE-T1 Autonegotiation Advertisement [31:16] Register.
+ */
+static inline u32 linkmode_adv_to_mii_t1_adv_m_t(unsigned long *advertising)
+{
+ u32 result = 0;
+
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_10baseT1L_Full_BIT, advertising))
+ result |= MDIO_AN_T1_ADV_M_B10L;
+
+ return result;
+}
+
+/**
+ * mii_eee_cap1_mod_linkmode_t()
+ * @adv: target the linkmode advertisement settings
+ * @val: register value
+ *
+ * A function that translates value of following registers to the linkmode:
+ * IEEE 802.3-2018 45.2.3.10 "EEE control and capability 1" register (3.20)
+ * IEEE 802.3-2018 45.2.7.13 "EEE advertisement 1" register (7.60)
+ * IEEE 802.3-2018 45.2.7.14 "EEE "link partner ability 1 register (7.61)
+ */
+static inline void mii_eee_cap1_mod_linkmode_t(unsigned long *adv, u32 val)
+{
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
+ adv, val & MDIO_EEE_100TX);
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
+ adv, val & MDIO_EEE_1000T);
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_10000baseT_Full_BIT,
+ adv, val & MDIO_EEE_10GT);
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
+ adv, val & MDIO_EEE_1000KX);
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT,
+ adv, val & MDIO_EEE_10GKX4);
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
+ adv, val & MDIO_EEE_10GKR);
+}
+
+/**
+ * linkmode_to_mii_eee_cap1_t()
+ * @adv: the linkmode advertisement settings
+ *
+ * A function that translates linkmode to value for IEEE 802.3-2018 45.2.7.13
+ * "EEE advertisement 1" register (7.60)
+ */
+static inline u32 linkmode_to_mii_eee_cap1_t(unsigned long *adv)
+{
+ u32 result = 0;
+
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT, adv))
+ result |= MDIO_EEE_100TX;
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, adv))
+ result |= MDIO_EEE_1000T;
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_10000baseT_Full_BIT, adv))
+ result |= MDIO_EEE_10GT;
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseKX_Full_BIT, adv))
+ result |= MDIO_EEE_1000KX;
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT, adv))
+ result |= MDIO_EEE_10GKX4;
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_10000baseKR_Full_BIT, adv))
+ result |= MDIO_EEE_10GKR;
+
+ return result;
+}
+
+/**
+ * mii_10base_t1_adv_mod_linkmode_t()
+ * @adv: linkmode advertisement settings
+ * @val: register value
+ *
+ * A function that translates IEEE 802.3cg-2019 45.2.7.26 "10BASE-T1 AN status"
+ * register (7.527) value to the linkmode.
+ */
+static inline void mii_10base_t1_adv_mod_linkmode_t(unsigned long *adv, u16 val)
+{
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_10baseT1L_Full_BIT,
+ adv, val & MDIO_AN_10BT1_AN_CTRL_ADV_EEE_T1L);
+}
+
+/**
+ * linkmode_adv_to_mii_10base_t1_t()
+ * @adv: linkmode advertisement settings
+ *
+ * A function that translates the linkmode to IEEE 802.3cg-2019 45.2.7.25
+ * "10BASE-T1 AN control" register (7.526) value.
+ */
+static inline u32 linkmode_adv_to_mii_10base_t1_t(unsigned long *adv)
+{
+ u32 result = 0;
+
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_10baseT1L_Full_BIT, adv))
+ result |= MDIO_AN_10BT1_AN_CTRL_ADV_EEE_T1L;
+
+ return result;
+}
+
+int __mdiobus_read(struct mii_bus *bus, int addr, u32 regnum);
+int __mdiobus_write(struct mii_bus *bus, int addr, u32 regnum, u16 val);
+int __mdiobus_modify_changed(struct mii_bus *bus, int addr, u32 regnum,
+ u16 mask, u16 set);
+
int mdiobus_read(struct mii_bus *bus, int addr, u32 regnum);
int mdiobus_read_nested(struct mii_bus *bus, int addr, u32 regnum);
int mdiobus_write(struct mii_bus *bus, int addr, u32 regnum, u16 val);
int mdiobus_write_nested(struct mii_bus *bus, int addr, u32 regnum, u16 val);
+int mdiobus_modify(struct mii_bus *bus, int addr, u32 regnum, u16 mask,
+ u16 set);
+int mdiobus_modify_changed(struct mii_bus *bus, int addr, u32 regnum,
+ u16 mask, u16 set);
+int __mdiobus_c45_read(struct mii_bus *bus, int addr, int devad, u32 regnum);
+int mdiobus_c45_read(struct mii_bus *bus, int addr, int devad, u32 regnum);
+int mdiobus_c45_read_nested(struct mii_bus *bus, int addr, int devad,
+ u32 regnum);
+int __mdiobus_c45_write(struct mii_bus *bus, int addr, int devad, u32 regnum,
+ u16 val);
+int mdiobus_c45_write(struct mii_bus *bus, int addr, int devad, u32 regnum,
+ u16 val);
+int mdiobus_c45_write_nested(struct mii_bus *bus, int addr, int devad,
+ u32 regnum, u16 val);
+int mdiobus_c45_modify(struct mii_bus *bus, int addr, int devad, u32 regnum,
+ u16 mask, u16 set);
+
+int mdiobus_c45_modify_changed(struct mii_bus *bus, int addr, int devad,
+ u32 regnum, u16 mask, u16 set);
+
+static inline int mdiodev_read(struct mdio_device *mdiodev, u32 regnum)
+{
+ return mdiobus_read(mdiodev->bus, mdiodev->addr, regnum);
+}
+
+static inline int mdiodev_write(struct mdio_device *mdiodev, u32 regnum,
+ u16 val)
+{
+ return mdiobus_write(mdiodev->bus, mdiodev->addr, regnum, val);
+}
+
+static inline int mdiodev_modify(struct mdio_device *mdiodev, u32 regnum,
+ u16 mask, u16 set)
+{
+ return mdiobus_modify(mdiodev->bus, mdiodev->addr, regnum, mask, set);
+}
+
+static inline int mdiodev_modify_changed(struct mdio_device *mdiodev,
+ u32 regnum, u16 mask, u16 set)
+{
+ return mdiobus_modify_changed(mdiodev->bus, mdiodev->addr, regnum,
+ mask, set);
+}
+
+static inline int mdiodev_c45_modify(struct mdio_device *mdiodev, int devad,
+ u32 regnum, u16 mask, u16 set)
+{
+ return mdiobus_c45_modify(mdiodev->bus, mdiodev->addr, devad, regnum,
+ mask, set);
+}
+
+static inline int mdiodev_c45_modify_changed(struct mdio_device *mdiodev,
+ int devad, u32 regnum, u16 mask,
+ u16 set)
+{
+ return mdiobus_c45_modify_changed(mdiodev->bus, mdiodev->addr, devad,
+ regnum, mask, set);
+}
+
+static inline int mdiodev_c45_read(struct mdio_device *mdiodev, int devad,
+ u16 regnum)
+{
+ return mdiobus_c45_read(mdiodev->bus, mdiodev->addr, devad, regnum);
+}
+
+static inline int mdiodev_c45_write(struct mdio_device *mdiodev, u32 devad,
+ u16 regnum, u16 val)
+{
+ return mdiobus_c45_write(mdiodev->bus, mdiodev->addr, devad, regnum,
+ val);
+}
int mdiobus_register_device(struct mdio_device *mdiodev);
int mdiobus_unregister_device(struct mdio_device *mdiodev);
@@ -269,6 +574,7 @@ struct phy_device *mdiobus_get_phy(struct mii_bus *bus, int addr);
/**
* mdio_module_driver() - Helper macro for registering mdio drivers
+ * @_mdio_driver: driver to register
*
* Helper macro for MDIO drivers which do not do anything special in module
* init/exit. Each module may only use this macro once, and calling it
diff --git a/include/linux/mdio/mdio-i2c.h b/include/linux/mdio/mdio-i2c.h
new file mode 100644
index 000000000000..65b550a6fc32
--- /dev/null
+++ b/include/linux/mdio/mdio-i2c.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * MDIO I2C bridge
+ *
+ * Copyright (C) 2015 Russell King
+ */
+#ifndef MDIO_I2C_H
+#define MDIO_I2C_H
+
+struct device;
+struct i2c_adapter;
+struct mii_bus;
+
+enum mdio_i2c_proto {
+ MDIO_I2C_NONE,
+ MDIO_I2C_MARVELL_C22,
+ MDIO_I2C_C45,
+ MDIO_I2C_ROLLBALL,
+};
+
+struct mii_bus *mdio_i2c_alloc(struct device *parent, struct i2c_adapter *i2c,
+ enum mdio_i2c_proto protocol);
+
+#endif
diff --git a/include/linux/mdio/mdio-mscc-miim.h b/include/linux/mdio/mdio-mscc-miim.h
new file mode 100644
index 000000000000..1ce699740af6
--- /dev/null
+++ b/include/linux/mdio/mdio-mscc-miim.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
+/*
+ * Driver for the MDIO interface of Microsemi network switches.
+ *
+ * Author: Colin Foster <colin.foster@in-advantage.com>
+ * Copyright (C) 2021 Innovative Advantage
+ */
+#ifndef MDIO_MSCC_MIIM_H
+#define MDIO_MSCC_MIIM_H
+
+#include <linux/device.h>
+#include <linux/phy.h>
+#include <linux/regmap.h>
+
+int mscc_miim_setup(struct device *device, struct mii_bus **bus,
+ const char *name, struct regmap *mii_regmap,
+ int status_offset, bool ignore_read_errors);
+
+#endif
diff --git a/include/linux/mdio/mdio-xgene.h b/include/linux/mdio/mdio-xgene.h
new file mode 100644
index 000000000000..9e588965dc83
--- /dev/null
+++ b/include/linux/mdio/mdio-xgene.h
@@ -0,0 +1,134 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/* Applied Micro X-Gene SoC MDIO Driver
+ *
+ * Copyright (c) 2016, Applied Micro Circuits Corporation
+ * Author: Iyappan Subramanian <isubramanian@apm.com>
+ */
+
+#ifndef __MDIO_XGENE_H__
+#define __MDIO_XGENE_H__
+
+#include <linux/bits.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+
+#define BLOCK_XG_MDIO_CSR_OFFSET 0x5000
+#define BLOCK_DIAG_CSR_OFFSET 0xd000
+#define XGENET_CONFIG_REG_ADDR 0x20
+
+#define MAC_ADDR_REG_OFFSET 0x00
+#define MAC_COMMAND_REG_OFFSET 0x04
+#define MAC_WRITE_REG_OFFSET 0x08
+#define MAC_READ_REG_OFFSET 0x0c
+#define MAC_COMMAND_DONE_REG_OFFSET 0x10
+
+#define CLKEN_OFFSET 0x08
+#define SRST_OFFSET 0x00
+
+#define MENET_CFG_MEM_RAM_SHUTDOWN_ADDR 0x70
+#define MENET_BLOCK_MEM_RDY_ADDR 0x74
+
+#define MAC_CONFIG_1_ADDR 0x00
+#define MII_MGMT_COMMAND_ADDR 0x24
+#define MII_MGMT_ADDRESS_ADDR 0x28
+#define MII_MGMT_CONTROL_ADDR 0x2c
+#define MII_MGMT_STATUS_ADDR 0x30
+#define MII_MGMT_INDICATORS_ADDR 0x34
+#define SOFT_RESET BIT(31)
+
+#define MII_MGMT_CONFIG_ADDR 0x20
+#define MII_MGMT_COMMAND_ADDR 0x24
+#define MII_MGMT_ADDRESS_ADDR 0x28
+#define MII_MGMT_CONTROL_ADDR 0x2c
+#define MII_MGMT_STATUS_ADDR 0x30
+#define MII_MGMT_INDICATORS_ADDR 0x34
+
+#define MIIM_COMMAND_ADDR 0x20
+#define MIIM_FIELD_ADDR 0x24
+#define MIIM_CONFIGURATION_ADDR 0x28
+#define MIIM_LINKFAILVECTOR_ADDR 0x2c
+#define MIIM_INDICATOR_ADDR 0x30
+#define MIIMRD_FIELD_ADDR 0x34
+
+#define MDIO_CSR_OFFSET 0x5000
+
+#define REG_ADDR_POS 0
+#define REG_ADDR_LEN 5
+#define PHY_ADDR_POS 8
+#define PHY_ADDR_LEN 5
+
+#define HSTMIIMWRDAT_POS 0
+#define HSTMIIMWRDAT_LEN 16
+#define HSTPHYADX_POS 23
+#define HSTPHYADX_LEN 5
+#define HSTREGADX_POS 18
+#define HSTREGADX_LEN 5
+#define HSTLDCMD BIT(3)
+#define HSTMIIMCMD_POS 0
+#define HSTMIIMCMD_LEN 3
+
+#define BUSY_MASK BIT(0)
+#define READ_CYCLE_MASK BIT(0)
+
+enum xgene_enet_cmd {
+ XGENE_ENET_WR_CMD = BIT(31),
+ XGENE_ENET_RD_CMD = BIT(30)
+};
+
+enum {
+ MIIM_CMD_IDLE,
+ MIIM_CMD_LEGACY_WRITE,
+ MIIM_CMD_LEGACY_READ,
+};
+
+enum xgene_mdio_id {
+ XGENE_MDIO_RGMII = 1,
+ XGENE_MDIO_XFI
+};
+
+struct xgene_mdio_pdata {
+ struct clk *clk;
+ struct device *dev;
+ void __iomem *mac_csr_addr;
+ void __iomem *diag_csr_addr;
+ void __iomem *mdio_csr_addr;
+ struct mii_bus *mdio_bus;
+ int mdio_id;
+ spinlock_t mac_lock; /* mac lock */
+};
+
+/* Set the specified value into a bit-field defined by its starting position
+ * and length within a single u64.
+ */
+static inline u64 xgene_enet_set_field_value(int pos, int len, u64 val)
+{
+ return (val & ((1ULL << len) - 1)) << pos;
+}
+
+#define SET_VAL(field, val) \
+ xgene_enet_set_field_value(field ## _POS, field ## _LEN, val)
+
+#define SET_BIT(field) \
+ xgene_enet_set_field_value(field ## _POS, 1, 1)
+
+/* Get the value from a bit-field defined by its starting position
+ * and length within the specified u64.
+ */
+static inline u64 xgene_enet_get_field_value(int pos, int len, u64 src)
+{
+ return (src >> pos) & ((1ULL << len) - 1);
+}
+
+#define GET_VAL(field, src) \
+ xgene_enet_get_field_value(field ## _POS, field ## _LEN, src)
+
+#define GET_BIT(field, src) \
+ xgene_enet_get_field_value(field ## _POS, 1, src)
+
+u32 xgene_mdio_rd_mac(struct xgene_mdio_pdata *pdata, u32 rd_addr);
+void xgene_mdio_wr_mac(struct xgene_mdio_pdata *pdata, u32 wr_addr, u32 data);
+int xgene_mdio_rgmii_read(struct mii_bus *bus, int phy_id, int reg);
+int xgene_mdio_rgmii_write(struct mii_bus *bus, int phy_id, int reg, u16 data);
+struct phy_device *xgene_enet_phy_register(struct mii_bus *bus, int phy_addr);
+
+#endif /* __MDIO_XGENE_H__ */
diff --git a/include/linux/mei_aux.h b/include/linux/mei_aux.h
new file mode 100644
index 000000000000..506912ad363b
--- /dev/null
+++ b/include/linux/mei_aux.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2022, Intel Corporation. All rights reserved.
+ */
+#ifndef _LINUX_MEI_AUX_H
+#define _LINUX_MEI_AUX_H
+
+#include <linux/auxiliary_bus.h>
+
+/**
+ * struct mei_aux_device - mei auxiliary device
+ * @aux_dev: - auxiliary device object
+ * @irq: interrupt driving the mei auxiliary device
+ * @bar: mmio resource bar reserved to mei auxiliary device
+ * @ext_op_mem: resource for extend operational memory
+ * used in graphics PXP mode.
+ * @slow_firmware: The device has slow underlying firmware.
+ * Such firmware will require to use larger operation timeouts.
+ */
+struct mei_aux_device {
+ struct auxiliary_device aux_dev;
+ int irq;
+ struct resource bar;
+ struct resource ext_op_mem;
+ bool slow_firmware;
+};
+
+#define auxiliary_dev_to_mei_aux_dev(auxiliary_dev) \
+ container_of(auxiliary_dev, struct mei_aux_device, aux_dev)
+
+#endif /* _LINUX_MEI_AUX_H */
diff --git a/include/linux/mei_cl_bus.h b/include/linux/mei_cl_bus.h
index a0d274fe08f1..fd6e0620658d 100644
--- a/include/linux/mei_cl_bus.h
+++ b/include/linux/mei_cl_bus.h
@@ -1,3 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2013-2016, Intel Corporation. All rights reserved.
+ */
#ifndef _LINUX_MEI_CL_BUS_H
#define _LINUX_MEI_CL_BUS_H
@@ -7,6 +11,7 @@
struct mei_cl_device;
struct mei_device;
+struct scatterlist;
typedef void (*mei_cldev_cb_t)(struct mei_cl_device *cldev);
@@ -54,6 +59,8 @@ struct mei_cl_device {
void *priv_data;
};
+#define to_mei_cl_device(d) container_of(d, struct mei_cl_device, dev)
+
struct mei_cl_driver {
struct device_driver driver;
const char *name;
@@ -62,7 +69,7 @@ struct mei_cl_driver {
int (*probe)(struct mei_cl_device *cldev,
const struct mei_cl_device_id *id);
- int (*remove)(struct mei_cl_device *cldev);
+ void (*remove)(struct mei_cl_device *cldev);
};
int __mei_cldev_driver_register(struct mei_cl_driver *cldrv,
@@ -85,10 +92,17 @@ void mei_cldev_driver_unregister(struct mei_cl_driver *cldrv);
mei_cldev_driver_register,\
mei_cldev_driver_unregister)
-ssize_t mei_cldev_send(struct mei_cl_device *cldev, u8 *buf, size_t length);
+ssize_t mei_cldev_send(struct mei_cl_device *cldev, const u8 *buf,
+ size_t length);
ssize_t mei_cldev_recv(struct mei_cl_device *cldev, u8 *buf, size_t length);
ssize_t mei_cldev_recv_nonblock(struct mei_cl_device *cldev, u8 *buf,
size_t length);
+ssize_t mei_cldev_send_vtag(struct mei_cl_device *cldev, const u8 *buf,
+ size_t length, u8 vtag);
+ssize_t mei_cldev_recv_vtag(struct mei_cl_device *cldev, u8 *buf, size_t length,
+ u8 *vtag);
+ssize_t mei_cldev_recv_nonblock_vtag(struct mei_cl_device *cldev, u8 *buf,
+ size_t length, u8 *vtag);
int mei_cldev_register_rx_cb(struct mei_cl_device *cldev, mei_cldev_cb_t rx_cb);
int mei_cldev_register_notif_cb(struct mei_cl_device *cldev,
@@ -102,6 +116,14 @@ void mei_cldev_set_drvdata(struct mei_cl_device *cldev, void *data);
int mei_cldev_enable(struct mei_cl_device *cldev);
int mei_cldev_disable(struct mei_cl_device *cldev);
-bool mei_cldev_enabled(struct mei_cl_device *cldev);
+bool mei_cldev_enabled(const struct mei_cl_device *cldev);
+ssize_t mei_cldev_send_gsc_command(struct mei_cl_device *cldev,
+ u8 client_id, u32 fence_id,
+ struct scatterlist *sg_in,
+ size_t total_in_len,
+ struct scatterlist *sg_out);
+
+void *mei_cldev_dma_map(struct mei_cl_device *cldev, u8 buffer_id, size_t size);
+int mei_cldev_dma_unmap(struct mei_cl_device *cldev);
#endif /* _LINUX_MEI_CL_BUS_H */
diff --git a/include/linux/mem_encrypt.h b/include/linux/mem_encrypt.h
new file mode 100644
index 000000000000..ae4526389261
--- /dev/null
+++ b/include/linux/mem_encrypt.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * AMD Memory Encryption Support
+ *
+ * Copyright (C) 2016 Advanced Micro Devices, Inc.
+ *
+ * Author: Tom Lendacky <thomas.lendacky@amd.com>
+ */
+
+#ifndef __MEM_ENCRYPT_H__
+#define __MEM_ENCRYPT_H__
+
+#ifndef __ASSEMBLY__
+
+#ifdef CONFIG_ARCH_HAS_MEM_ENCRYPT
+
+#include <asm/mem_encrypt.h>
+
+#endif /* CONFIG_ARCH_HAS_MEM_ENCRYPT */
+
+#ifdef CONFIG_AMD_MEM_ENCRYPT
+/*
+ * The __sme_set() and __sme_clr() macros are useful for adding or removing
+ * the encryption mask from a value (e.g. when dealing with pagetable
+ * entries).
+ */
+#define __sme_set(x) ((x) | sme_me_mask)
+#define __sme_clr(x) ((x) & ~sme_me_mask)
+#else
+#define __sme_set(x) (x)
+#define __sme_clr(x) (x)
+#endif
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* __MEM_ENCRYPT_H__ */
diff --git a/include/linux/memblock.h b/include/linux/memblock.h
index 77d427974f57..f82ee3fac1cd 100644
--- a/include/linux/memblock.h
+++ b/include/linux/memblock.h
@@ -1,87 +1,122 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
#ifndef _LINUX_MEMBLOCK_H
#define _LINUX_MEMBLOCK_H
-#ifdef __KERNEL__
-#ifdef CONFIG_HAVE_MEMBLOCK
/*
* Logical memory blocks.
*
* Copyright (C) 2001 Peter Bergner, IBM Corp.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
*/
#include <linux/init.h>
#include <linux/mm.h>
+#include <asm/dma.h>
+
+extern unsigned long max_low_pfn;
+extern unsigned long min_low_pfn;
-#define INIT_MEMBLOCK_REGIONS 128
-#define INIT_PHYSMEM_REGIONS 4
+/*
+ * highest page
+ */
+extern unsigned long max_pfn;
+/*
+ * highest possible page
+ */
+extern unsigned long long max_possible_pfn;
-/* Definition of memblock flags. */
-enum {
+/**
+ * enum memblock_flags - definition of memory region attributes
+ * @MEMBLOCK_NONE: no special request
+ * @MEMBLOCK_HOTPLUG: memory region indicated in the firmware-provided memory
+ * map during early boot as hot(un)pluggable system RAM (e.g., memory range
+ * that might get hotunplugged later). With "movable_node" set on the kernel
+ * commandline, try keeping this memory region hotunpluggable. Does not apply
+ * to memblocks added ("hotplugged") after early boot.
+ * @MEMBLOCK_MIRROR: mirrored region
+ * @MEMBLOCK_NOMAP: don't add to kernel direct mapping and treat as
+ * reserved in the memory map; refer to memblock_mark_nomap() description
+ * for further details
+ * @MEMBLOCK_DRIVER_MANAGED: memory region that is always detected and added
+ * via a driver, and never indicated in the firmware-provided memory map as
+ * system RAM. This corresponds to IORESOURCE_SYSRAM_DRIVER_MANAGED in the
+ * kernel resource tree.
+ */
+enum memblock_flags {
MEMBLOCK_NONE = 0x0, /* No special request */
MEMBLOCK_HOTPLUG = 0x1, /* hotpluggable region */
MEMBLOCK_MIRROR = 0x2, /* mirrored region */
MEMBLOCK_NOMAP = 0x4, /* don't add to kernel direct mapping */
+ MEMBLOCK_DRIVER_MANAGED = 0x8, /* always detected via a driver */
};
+/**
+ * struct memblock_region - represents a memory region
+ * @base: base address of the region
+ * @size: size of the region
+ * @flags: memory region attributes
+ * @nid: NUMA node id
+ */
struct memblock_region {
phys_addr_t base;
phys_addr_t size;
- unsigned long flags;
-#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
+ enum memblock_flags flags;
+#ifdef CONFIG_NUMA
int nid;
#endif
};
+/**
+ * struct memblock_type - collection of memory regions of certain type
+ * @cnt: number of regions
+ * @max: size of the allocated array
+ * @total_size: size of all regions
+ * @regions: array of regions
+ * @name: the memory type symbolic name
+ */
struct memblock_type {
- unsigned long cnt; /* number of regions */
- unsigned long max; /* size of the allocated array */
- phys_addr_t total_size; /* size of all regions */
+ unsigned long cnt;
+ unsigned long max;
+ phys_addr_t total_size;
struct memblock_region *regions;
char *name;
};
+/**
+ * struct memblock - memblock allocator metadata
+ * @bottom_up: is bottom up direction?
+ * @current_limit: physical address of the current allocation limit
+ * @memory: usable memory regions
+ * @reserved: reserved memory regions
+ */
struct memblock {
bool bottom_up; /* is bottom up direction? */
phys_addr_t current_limit;
struct memblock_type memory;
struct memblock_type reserved;
-#ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
- struct memblock_type physmem;
-#endif
};
extern struct memblock memblock;
-extern int memblock_debug;
-#ifdef CONFIG_ARCH_DISCARD_MEMBLOCK
+#ifndef CONFIG_ARCH_KEEP_MEMBLOCK
#define __init_memblock __meminit
#define __initdata_memblock __meminitdata
+void memblock_discard(void);
#else
#define __init_memblock
#define __initdata_memblock
+static inline void memblock_discard(void) {}
#endif
-#define memblock_dbg(fmt, ...) \
- if (memblock_debug) printk(KERN_INFO pr_fmt(fmt), ##__VA_ARGS__)
-
-phys_addr_t memblock_find_in_range_node(phys_addr_t size, phys_addr_t align,
- phys_addr_t start, phys_addr_t end,
- int nid, ulong flags);
-phys_addr_t memblock_find_in_range(phys_addr_t start, phys_addr_t end,
- phys_addr_t size, phys_addr_t align);
-phys_addr_t get_allocated_memblock_reserved_regions_info(phys_addr_t *addr);
-phys_addr_t get_allocated_memblock_memory_regions_info(phys_addr_t *addr);
void memblock_allow_resize(void);
-int memblock_add_node(phys_addr_t base, phys_addr_t size, int nid);
+int memblock_add_node(phys_addr_t base, phys_addr_t size, int nid,
+ enum memblock_flags flags);
int memblock_add(phys_addr_t base, phys_addr_t size);
int memblock_remove(phys_addr_t base, phys_addr_t size);
-int memblock_free(phys_addr_t base, phys_addr_t size);
+int memblock_phys_free(phys_addr_t base, phys_addr_t size);
int memblock_reserve(phys_addr_t base, phys_addr_t size);
+#ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
+int memblock_physmem_add(phys_addr_t base, phys_addr_t size);
+#endif
void memblock_trim_memory(phys_addr_t align);
bool memblock_overlaps_region(struct memblock_type *type,
phys_addr_t base, phys_addr_t size);
@@ -90,28 +125,51 @@ int memblock_clear_hotplug(phys_addr_t base, phys_addr_t size);
int memblock_mark_mirror(phys_addr_t base, phys_addr_t size);
int memblock_mark_nomap(phys_addr_t base, phys_addr_t size);
int memblock_clear_nomap(phys_addr_t base, phys_addr_t size);
-ulong choose_memblock_flags(void);
-/* Low level functions */
-int memblock_add_range(struct memblock_type *type,
- phys_addr_t base, phys_addr_t size,
- int nid, unsigned long flags);
+void memblock_free_all(void);
+void memblock_free(void *ptr, size_t size);
+void reset_node_managed_pages(pg_data_t *pgdat);
+void reset_all_zones_managed_pages(void);
-void __next_mem_range(u64 *idx, int nid, ulong flags,
+/* Low level functions */
+void __next_mem_range(u64 *idx, int nid, enum memblock_flags flags,
struct memblock_type *type_a,
struct memblock_type *type_b, phys_addr_t *out_start,
phys_addr_t *out_end, int *out_nid);
-void __next_mem_range_rev(u64 *idx, int nid, ulong flags,
+void __next_mem_range_rev(u64 *idx, int nid, enum memblock_flags flags,
struct memblock_type *type_a,
struct memblock_type *type_b, phys_addr_t *out_start,
phys_addr_t *out_end, int *out_nid);
-void __next_reserved_mem_region(u64 *idx, phys_addr_t *out_start,
- phys_addr_t *out_end);
+void memblock_free_late(phys_addr_t base, phys_addr_t size);
+
+#ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
+static inline void __next_physmem_range(u64 *idx, struct memblock_type *type,
+ phys_addr_t *out_start,
+ phys_addr_t *out_end)
+{
+ extern struct memblock_type physmem;
+
+ __next_mem_range(idx, NUMA_NO_NODE, MEMBLOCK_NONE, &physmem, type,
+ out_start, out_end, NULL);
+}
/**
- * for_each_mem_range - iterate through memblock areas from type_a and not
+ * for_each_physmem_range - iterate through physmem areas not included in type.
+ * @i: u64 used as loop variable
+ * @type: ptr to memblock_type which excludes from the iteration, can be %NULL
+ * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
+ * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
+ */
+#define for_each_physmem_range(i, type, p_start, p_end) \
+ for (i = 0, __next_physmem_range(&i, type, p_start, p_end); \
+ i != (u64)ULLONG_MAX; \
+ __next_physmem_range(&i, type, p_start, p_end))
+#endif /* CONFIG_HAVE_MEMBLOCK_PHYS_MAP */
+
+/**
+ * __for_each_mem_range - iterate through memblock areas from type_a and not
* included in type_b. Or just type_a if type_b is NULL.
* @i: u64 used as loop variable
* @type_a: ptr to memblock_type to iterate
@@ -122,7 +180,7 @@ void __next_reserved_mem_region(u64 *idx, phys_addr_t *out_start,
* @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
* @p_nid: ptr to int for nid of the range, can be %NULL
*/
-#define for_each_mem_range(i, type_a, type_b, nid, flags, \
+#define __for_each_mem_range(i, type_a, type_b, nid, flags, \
p_start, p_end, p_nid) \
for (i = 0, __next_mem_range(&i, nid, flags, type_a, type_b, \
p_start, p_end, p_nid); \
@@ -131,7 +189,7 @@ void __next_reserved_mem_region(u64 *idx, phys_addr_t *out_start,
p_start, p_end, p_nid))
/**
- * for_each_mem_range_rev - reverse iterate through memblock areas from
+ * __for_each_mem_range_rev - reverse iterate through memblock areas from
* type_a and not included in type_b. Or just type_a if type_b is NULL.
* @i: u64 used as loop variable
* @type_a: ptr to memblock_type to iterate
@@ -142,17 +200,40 @@ void __next_reserved_mem_region(u64 *idx, phys_addr_t *out_start,
* @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
* @p_nid: ptr to int for nid of the range, can be %NULL
*/
-#define for_each_mem_range_rev(i, type_a, type_b, nid, flags, \
- p_start, p_end, p_nid) \
+#define __for_each_mem_range_rev(i, type_a, type_b, nid, flags, \
+ p_start, p_end, p_nid) \
for (i = (u64)ULLONG_MAX, \
- __next_mem_range_rev(&i, nid, flags, type_a, type_b,\
+ __next_mem_range_rev(&i, nid, flags, type_a, type_b, \
p_start, p_end, p_nid); \
i != (u64)ULLONG_MAX; \
__next_mem_range_rev(&i, nid, flags, type_a, type_b, \
p_start, p_end, p_nid))
/**
- * for_each_reserved_mem_region - iterate over all reserved memblock areas
+ * for_each_mem_range - iterate through memory areas.
+ * @i: u64 used as loop variable
+ * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
+ * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
+ */
+#define for_each_mem_range(i, p_start, p_end) \
+ __for_each_mem_range(i, &memblock.memory, NULL, NUMA_NO_NODE, \
+ MEMBLOCK_HOTPLUG | MEMBLOCK_DRIVER_MANAGED, \
+ p_start, p_end, NULL)
+
+/**
+ * for_each_mem_range_rev - reverse iterate through memblock areas from
+ * type_a and not included in type_b. Or just type_a if type_b is NULL.
+ * @i: u64 used as loop variable
+ * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
+ * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
+ */
+#define for_each_mem_range_rev(i, p_start, p_end) \
+ __for_each_mem_range_rev(i, &memblock.memory, NULL, NUMA_NO_NODE, \
+ MEMBLOCK_HOTPLUG | MEMBLOCK_DRIVER_MANAGED,\
+ p_start, p_end, NULL)
+
+/**
+ * for_each_reserved_mem_range - iterate over all reserved memblock areas
* @i: u64 used as loop variable
* @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
* @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
@@ -160,10 +241,9 @@ void __next_reserved_mem_region(u64 *idx, phys_addr_t *out_start,
* Walks over reserved areas of memblock. Available as soon as memblock
* is initialized.
*/
-#define for_each_reserved_mem_region(i, p_start, p_end) \
- for (i = 0UL, __next_reserved_mem_region(&i, p_start, p_end); \
- i != (u64)ULLONG_MAX; \
- __next_reserved_mem_region(&i, p_start, p_end))
+#define for_each_reserved_mem_range(i, p_start, p_end) \
+ __for_each_mem_range(i, &memblock.reserved, NULL, NUMA_NO_NODE, \
+ MEMBLOCK_NONE, p_start, p_end, NULL)
static inline bool memblock_is_hotpluggable(struct memblock_region *m)
{
@@ -180,12 +260,15 @@ static inline bool memblock_is_nomap(struct memblock_region *m)
return m->flags & MEMBLOCK_NOMAP;
}
-#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
+static inline bool memblock_is_driver_managed(struct memblock_region *m)
+{
+ return m->flags & MEMBLOCK_DRIVER_MANAGED;
+}
+
int memblock_search_pfn_nid(unsigned long pfn, unsigned long *start_pfn,
unsigned long *end_pfn);
void __next_mem_pfn_range(int *idx, int nid, unsigned long *out_start_pfn,
unsigned long *out_end_pfn, int *out_nid);
-unsigned long memblock_next_valid_pfn(unsigned long pfn, unsigned long max_pfn);
/**
* for_each_mem_pfn_range - early memory pfn range iterator
@@ -200,7 +283,50 @@ unsigned long memblock_next_valid_pfn(unsigned long pfn, unsigned long max_pfn);
#define for_each_mem_pfn_range(i, nid, p_start, p_end, p_nid) \
for (i = -1, __next_mem_pfn_range(&i, nid, p_start, p_end, p_nid); \
i >= 0; __next_mem_pfn_range(&i, nid, p_start, p_end, p_nid))
-#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
+
+#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
+void __next_mem_pfn_range_in_zone(u64 *idx, struct zone *zone,
+ unsigned long *out_spfn,
+ unsigned long *out_epfn);
+/**
+ * for_each_free_mem_pfn_range_in_zone - iterate through zone specific free
+ * memblock areas
+ * @i: u64 used as loop variable
+ * @zone: zone in which all of the memory blocks reside
+ * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
+ * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
+ *
+ * Walks over free (memory && !reserved) areas of memblock in a specific
+ * zone. Available once memblock and an empty zone is initialized. The main
+ * assumption is that the zone start, end, and pgdat have been associated.
+ * This way we can use the zone to determine NUMA node, and if a given part
+ * of the memblock is valid for the zone.
+ */
+#define for_each_free_mem_pfn_range_in_zone(i, zone, p_start, p_end) \
+ for (i = 0, \
+ __next_mem_pfn_range_in_zone(&i, zone, p_start, p_end); \
+ i != U64_MAX; \
+ __next_mem_pfn_range_in_zone(&i, zone, p_start, p_end))
+
+/**
+ * for_each_free_mem_pfn_range_in_zone_from - iterate through zone specific
+ * free memblock areas from a given point
+ * @i: u64 used as loop variable
+ * @zone: zone in which all of the memory blocks reside
+ * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
+ * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
+ *
+ * Walks over free (memory && !reserved) areas of memblock in a specific
+ * zone, continuing from current position. Available as soon as memblock is
+ * initialized.
+ */
+#define for_each_free_mem_pfn_range_in_zone_from(i, zone, p_start, p_end) \
+ for (; i != U64_MAX; \
+ __next_mem_pfn_range_in_zone(&i, zone, p_start, p_end))
+
+int __init deferred_page_init_max_threads(const struct cpumask *node_cpumask);
+
+#endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */
/**
* for_each_free_mem_range - iterate through free memblock areas
@@ -215,8 +341,8 @@ unsigned long memblock_next_valid_pfn(unsigned long pfn, unsigned long max_pfn);
* soon as memblock is initialized.
*/
#define for_each_free_mem_range(i, nid, flags, p_start, p_end, p_nid) \
- for_each_mem_range(i, &memblock.memory, &memblock.reserved, \
- nid, flags, p_start, p_end, p_nid)
+ __for_each_mem_range(i, &memblock.memory, &memblock.reserved, \
+ nid, flags, p_start, p_end, p_nid)
/**
* for_each_free_mem_range_reverse - rev-iterate through free memblock areas
@@ -232,25 +358,13 @@ unsigned long memblock_next_valid_pfn(unsigned long pfn, unsigned long max_pfn);
*/
#define for_each_free_mem_range_reverse(i, nid, flags, p_start, p_end, \
p_nid) \
- for_each_mem_range_rev(i, &memblock.memory, &memblock.reserved, \
- nid, flags, p_start, p_end, p_nid)
-
-static inline void memblock_set_region_flags(struct memblock_region *r,
- unsigned long flags)
-{
- r->flags |= flags;
-}
+ __for_each_mem_range_rev(i, &memblock.memory, &memblock.reserved, \
+ nid, flags, p_start, p_end, p_nid)
-static inline void memblock_clear_region_flags(struct memblock_region *r,
- unsigned long flags)
-{
- r->flags &= ~flags;
-}
-
-#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
int memblock_set_node(phys_addr_t base, phys_addr_t size,
struct memblock_type *type, int nid);
+#ifdef CONFIG_NUMA
static inline void memblock_set_region_node(struct memblock_region *r, int nid)
{
r->nid = nid;
@@ -269,17 +383,84 @@ static inline int memblock_get_region_node(const struct memblock_region *r)
{
return 0;
}
-#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
+#endif /* CONFIG_NUMA */
+
+/* Flags for memblock allocation APIs */
+#define MEMBLOCK_ALLOC_ANYWHERE (~(phys_addr_t)0)
+#define MEMBLOCK_ALLOC_ACCESSIBLE 0
+#define MEMBLOCK_ALLOC_NOLEAKTRACE 1
+
+/* We are using top down, so it is safe to use 0 here */
+#define MEMBLOCK_LOW_LIMIT 0
+
+#ifndef ARCH_LOW_ADDRESS_LIMIT
+#define ARCH_LOW_ADDRESS_LIMIT 0xffffffffUL
+#endif
+
+phys_addr_t memblock_phys_alloc_range(phys_addr_t size, phys_addr_t align,
+ phys_addr_t start, phys_addr_t end);
+phys_addr_t memblock_alloc_range_nid(phys_addr_t size,
+ phys_addr_t align, phys_addr_t start,
+ phys_addr_t end, int nid, bool exact_nid);
+phys_addr_t memblock_phys_alloc_try_nid(phys_addr_t size, phys_addr_t align, int nid);
+
+static __always_inline phys_addr_t memblock_phys_alloc(phys_addr_t size,
+ phys_addr_t align)
+{
+ return memblock_phys_alloc_range(size, align, 0,
+ MEMBLOCK_ALLOC_ACCESSIBLE);
+}
+
+void *memblock_alloc_exact_nid_raw(phys_addr_t size, phys_addr_t align,
+ phys_addr_t min_addr, phys_addr_t max_addr,
+ int nid);
+void *memblock_alloc_try_nid_raw(phys_addr_t size, phys_addr_t align,
+ phys_addr_t min_addr, phys_addr_t max_addr,
+ int nid);
+void *memblock_alloc_try_nid(phys_addr_t size, phys_addr_t align,
+ phys_addr_t min_addr, phys_addr_t max_addr,
+ int nid);
+
+static __always_inline void *memblock_alloc(phys_addr_t size, phys_addr_t align)
+{
+ return memblock_alloc_try_nid(size, align, MEMBLOCK_LOW_LIMIT,
+ MEMBLOCK_ALLOC_ACCESSIBLE, NUMA_NO_NODE);
+}
-phys_addr_t memblock_alloc_nid(phys_addr_t size, phys_addr_t align, int nid);
-phys_addr_t memblock_alloc_try_nid(phys_addr_t size, phys_addr_t align, int nid);
+static inline void *memblock_alloc_raw(phys_addr_t size,
+ phys_addr_t align)
+{
+ return memblock_alloc_try_nid_raw(size, align, MEMBLOCK_LOW_LIMIT,
+ MEMBLOCK_ALLOC_ACCESSIBLE,
+ NUMA_NO_NODE);
+}
+
+static inline void *memblock_alloc_from(phys_addr_t size,
+ phys_addr_t align,
+ phys_addr_t min_addr)
+{
+ return memblock_alloc_try_nid(size, align, min_addr,
+ MEMBLOCK_ALLOC_ACCESSIBLE, NUMA_NO_NODE);
+}
-phys_addr_t memblock_alloc(phys_addr_t size, phys_addr_t align);
+static inline void *memblock_alloc_low(phys_addr_t size,
+ phys_addr_t align)
+{
+ return memblock_alloc_try_nid(size, align, MEMBLOCK_LOW_LIMIT,
+ ARCH_LOW_ADDRESS_LIMIT, NUMA_NO_NODE);
+}
+
+static inline void *memblock_alloc_node(phys_addr_t size,
+ phys_addr_t align, int nid)
+{
+ return memblock_alloc_try_nid(size, align, MEMBLOCK_LOW_LIMIT,
+ MEMBLOCK_ALLOC_ACCESSIBLE, nid);
+}
/*
* Set the allocation direction to bottom-up or top-down.
*/
-static inline void __init memblock_set_bottom_up(bool enable)
+static inline __init_memblock void memblock_set_bottom_up(bool enable)
{
memblock.bottom_up = enable;
}
@@ -289,43 +470,25 @@ static inline void __init memblock_set_bottom_up(bool enable)
* if this is true, that said, memblock will allocate memory
* in bottom-up direction.
*/
-static inline bool memblock_bottom_up(void)
+static inline __init_memblock bool memblock_bottom_up(void)
{
return memblock.bottom_up;
}
-/* Flags for memblock_alloc_base() amd __memblock_alloc_base() */
-#define MEMBLOCK_ALLOC_ANYWHERE (~(phys_addr_t)0)
-#define MEMBLOCK_ALLOC_ACCESSIBLE 0
-
-phys_addr_t __init memblock_alloc_range(phys_addr_t size, phys_addr_t align,
- phys_addr_t start, phys_addr_t end,
- ulong flags);
-phys_addr_t memblock_alloc_base(phys_addr_t size, phys_addr_t align,
- phys_addr_t max_addr);
-phys_addr_t __memblock_alloc_base(phys_addr_t size, phys_addr_t align,
- phys_addr_t max_addr);
phys_addr_t memblock_phys_mem_size(void);
phys_addr_t memblock_reserved_size(void);
-phys_addr_t memblock_mem_size(unsigned long limit_pfn);
phys_addr_t memblock_start_of_DRAM(void);
phys_addr_t memblock_end_of_DRAM(void);
void memblock_enforce_memory_limit(phys_addr_t memory_limit);
void memblock_cap_memory_range(phys_addr_t base, phys_addr_t size);
void memblock_mem_limit_remove_map(phys_addr_t limit);
bool memblock_is_memory(phys_addr_t addr);
-int memblock_is_map_memory(phys_addr_t addr);
-int memblock_is_region_memory(phys_addr_t base, phys_addr_t size);
+bool memblock_is_map_memory(phys_addr_t addr);
+bool memblock_is_region_memory(phys_addr_t base, phys_addr_t size);
bool memblock_is_reserved(phys_addr_t addr);
bool memblock_is_region_reserved(phys_addr_t base, phys_addr_t size);
-extern void __memblock_dump_all(void);
-
-static inline void memblock_dump_all(void)
-{
- if (memblock_debug)
- __memblock_dump_all();
-}
+void memblock_dump_all(void);
/**
* memblock_set_current_limit - Set the current allocation limit to allow
@@ -347,8 +510,10 @@ phys_addr_t memblock_get_current_limit(void);
*/
/**
- * memblock_region_memory_base_pfn - Return the lowest pfn intersecting with the memory region
+ * memblock_region_memory_base_pfn - get the lowest pfn of the memory region
* @reg: memblock_region structure
+ *
+ * Return: the lowest pfn intersecting with the memory region
*/
static inline unsigned long memblock_region_memory_base_pfn(const struct memblock_region *reg)
{
@@ -356,8 +521,10 @@ static inline unsigned long memblock_region_memory_base_pfn(const struct membloc
}
/**
- * memblock_region_memory_end_pfn - Return the end_pfn this region
+ * memblock_region_memory_end_pfn - get the end pfn of the memory region
* @reg: memblock_region structure
+ *
+ * Return: the end_pfn of the reserved region
*/
static inline unsigned long memblock_region_memory_end_pfn(const struct memblock_region *reg)
{
@@ -365,8 +532,10 @@ static inline unsigned long memblock_region_memory_end_pfn(const struct memblock
}
/**
- * memblock_region_reserved_base_pfn - Return the lowest pfn intersecting with the reserved region
+ * memblock_region_reserved_base_pfn - get the lowest pfn of the reserved region
* @reg: memblock_region structure
+ *
+ * Return: the lowest pfn intersecting with the reserved region
*/
static inline unsigned long memblock_region_reserved_base_pfn(const struct memblock_region *reg)
{
@@ -374,25 +543,62 @@ static inline unsigned long memblock_region_reserved_base_pfn(const struct membl
}
/**
- * memblock_region_reserved_end_pfn - Return the end_pfn this region
+ * memblock_region_reserved_end_pfn - get the end pfn of the reserved region
* @reg: memblock_region structure
+ *
+ * Return: the end_pfn of the reserved region
*/
static inline unsigned long memblock_region_reserved_end_pfn(const struct memblock_region *reg)
{
return PFN_UP(reg->base + reg->size);
}
-#define for_each_memblock(memblock_type, region) \
- for (region = memblock.memblock_type.regions; \
- region < (memblock.memblock_type.regions + memblock.memblock_type.cnt); \
+/**
+ * for_each_mem_region - itereate over memory regions
+ * @region: loop variable
+ */
+#define for_each_mem_region(region) \
+ for (region = memblock.memory.regions; \
+ region < (memblock.memory.regions + memblock.memory.cnt); \
region++)
-#define for_each_memblock_type(memblock_type, rgn) \
- for (idx = 0, rgn = &memblock_type->regions[0]; \
- idx < memblock_type->cnt; \
- idx++, rgn = &memblock_type->regions[idx])
+/**
+ * for_each_reserved_mem_region - itereate over reserved memory regions
+ * @region: loop variable
+ */
+#define for_each_reserved_mem_region(region) \
+ for (region = memblock.reserved.regions; \
+ region < (memblock.reserved.regions + memblock.reserved.cnt); \
+ region++)
+
+extern void *alloc_large_system_hash(const char *tablename,
+ unsigned long bucketsize,
+ unsigned long numentries,
+ int scale,
+ int flags,
+ unsigned int *_hash_shift,
+ unsigned int *_hash_mask,
+ unsigned long low_limit,
+ unsigned long high_limit);
+
+#define HASH_EARLY 0x00000001 /* Allocating during early boot? */
+#define HASH_SMALL 0x00000002 /* sub-page allocation allowed, min
+ * shift passed via *_hash_shift */
+#define HASH_ZERO 0x00000004 /* Zero allocated hash table */
+
+/* Only NUMA needs hash distribution. 64bit NUMA architectures have
+ * sufficient vmalloc space.
+ */
+#ifdef CONFIG_NUMA
+#define HASHDIST_DEFAULT IS_ENABLED(CONFIG_64BIT)
+extern int hashdist; /* Distribute hashes across NUMA nodes? */
+#else
+#define hashdist (0)
+#endif
#ifdef CONFIG_MEMTEST
+extern phys_addr_t early_memtest_bad_size; /* Size of faulty ram found by memtest */
+extern bool early_memtest_done; /* Was early memtest done? */
extern void early_memtest(phys_addr_t start, phys_addr_t end);
#else
static inline void early_memtest(phys_addr_t start, phys_addr_t end)
@@ -400,22 +606,5 @@ static inline void early_memtest(phys_addr_t start, phys_addr_t end)
}
#endif
-extern unsigned long memblock_reserved_memory_within(phys_addr_t start_addr,
- phys_addr_t end_addr);
-#else
-static inline phys_addr_t memblock_alloc(phys_addr_t size, phys_addr_t align)
-{
- return 0;
-}
-
-static inline unsigned long memblock_reserved_memory_within(phys_addr_t start_addr,
- phys_addr_t end_addr)
-{
- return 0;
-}
-
-#endif /* CONFIG_HAVE_MEMBLOCK */
-
-#endif /* __KERNEL__ */
#endif /* _LINUX_MEMBLOCK_H */
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index 3914e3dd6168..222d7370134c 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/* memcontrol.h - Memory Controller
*
* Copyright IBM Corporation, 2007
@@ -5,16 +6,6 @@
*
* Copyright 2007 OpenVZ SWsoft Inc
* Author: Pavel Emelianov <xemul@openvz.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#ifndef _LINUX_MEMCONTROL_H
@@ -32,34 +23,38 @@
#include <linux/page-flags.h>
struct mem_cgroup;
+struct obj_cgroup;
struct page;
struct mm_struct;
struct kmem_cache;
/* Cgroup-specific page state, on top of universal node page state */
enum memcg_stat_item {
- MEMCG_CACHE = NR_VM_NODE_STAT_ITEMS,
- MEMCG_RSS,
- MEMCG_RSS_HUGE,
- MEMCG_SWAP,
+ MEMCG_SWAP = NR_VM_NODE_STAT_ITEMS,
MEMCG_SOCK,
- /* XXX: why are these zone and not node counters? */
- MEMCG_KERNEL_STACK_KB,
+ MEMCG_PERCPU_B,
+ MEMCG_VMALLOC,
+ MEMCG_KMEM,
+ MEMCG_ZSWAP_B,
+ MEMCG_ZSWAPPED,
MEMCG_NR_STAT,
};
-/* Cgroup-specific events, on top of universal VM events */
-enum memcg_event_item {
- MEMCG_LOW = NR_VM_EVENT_ITEMS,
+enum memcg_memory_event {
+ MEMCG_LOW,
MEMCG_HIGH,
MEMCG_MAX,
MEMCG_OOM,
- MEMCG_NR_EVENTS,
+ MEMCG_OOM_KILL,
+ MEMCG_OOM_GROUP_KILL,
+ MEMCG_SWAP_HIGH,
+ MEMCG_SWAP_MAX,
+ MEMCG_SWAP_FAIL,
+ MEMCG_NR_MEMORY_EVENTS,
};
struct mem_cgroup_reclaim_cookie {
pg_data_t *pgdat;
- int priority;
unsigned int generation;
};
@@ -70,28 +65,23 @@ struct mem_cgroup_reclaim_cookie {
struct mem_cgroup_id {
int id;
- atomic_t ref;
+ refcount_t ref;
};
/*
* Per memcg event counter is incremented at every pagein/pageout. With THP,
- * it will be incremated by the number of pages. This counter is used for
- * for trigger some periodic events. This is straightforward and better
+ * it will be incremented by the number of pages. This counter is used
+ * to trigger some periodic events. This is straightforward and better
* than using jiffies etc. to handle periodic memcg event.
*/
enum mem_cgroup_events_target {
MEM_CGROUP_TARGET_THRESH,
MEM_CGROUP_TARGET_SOFTLIMIT,
- MEM_CGROUP_TARGET_NUMAINFO,
MEM_CGROUP_NTARGETS,
};
-struct mem_cgroup_stat_cpu {
- long count[MEMCG_NR_STAT];
- unsigned long events[MEMCG_NR_EVENTS];
- unsigned long nr_page_events;
- unsigned long targets[MEM_CGROUP_NTARGETS];
-};
+struct memcg_vmstats_percpu;
+struct memcg_vmstats;
struct mem_cgroup_reclaim_iter {
struct mem_cgroup *position;
@@ -99,19 +89,47 @@ struct mem_cgroup_reclaim_iter {
unsigned int generation;
};
-struct lruvec_stat {
- long count[NR_VM_NODE_STAT_ITEMS];
+/*
+ * Bitmap and deferred work of shrinker::id corresponding to memcg-aware
+ * shrinkers, which have elements charged to this memcg.
+ */
+struct shrinker_info {
+ struct rcu_head rcu;
+ atomic_long_t *nr_deferred;
+ unsigned long *map;
+ int map_nr_max;
+};
+
+struct lruvec_stats_percpu {
+ /* Local (CPU and cgroup) state */
+ long state[NR_VM_NODE_STAT_ITEMS];
+
+ /* Delta calculation for lockless upward propagation */
+ long state_prev[NR_VM_NODE_STAT_ITEMS];
+};
+
+struct lruvec_stats {
+ /* Aggregated (CPU and subtree) state */
+ long state[NR_VM_NODE_STAT_ITEMS];
+
+ /* Pending child counts during tree propagation */
+ long state_pending[NR_VM_NODE_STAT_ITEMS];
};
/*
- * per-zone information in memory controller.
+ * per-node information in memory controller.
*/
struct mem_cgroup_per_node {
struct lruvec lruvec;
- struct lruvec_stat __percpu *lruvec_stat;
+
+ struct lruvec_stats_percpu __percpu *lruvec_stats_percpu;
+ struct lruvec_stats lruvec_stats;
+
unsigned long lru_zone_size[MAX_NR_ZONES][NR_LRU_LISTS];
- struct mem_cgroup_reclaim_iter iter[DEF_PRIORITY + 1];
+ struct mem_cgroup_reclaim_iter iter;
+
+ struct shrinker_info __rcu *shrinker_info;
struct rb_node tree_node; /* RB tree node */
unsigned long usage_in_excess;/* Set to the value by which */
@@ -133,7 +151,7 @@ struct mem_cgroup_threshold_ary {
/* Size of entries[] */
unsigned int size;
/* Array of thresholds */
- struct mem_cgroup_threshold entries[0];
+ struct mem_cgroup_threshold entries[];
};
struct mem_cgroup_thresholds {
@@ -147,10 +165,37 @@ struct mem_cgroup_thresholds {
struct mem_cgroup_threshold_ary *spare;
};
-enum memcg_kmem_state {
- KMEM_NONE,
- KMEM_ALLOCATED,
- KMEM_ONLINE,
+/*
+ * Remember four most recent foreign writebacks with dirty pages in this
+ * cgroup. Inode sharing is expected to be uncommon and, even if we miss
+ * one in a given round, we're likely to catch it later if it keeps
+ * foreign-dirtying, so a fairly low count should be enough.
+ *
+ * See mem_cgroup_track_foreign_dirty_slowpath() for details.
+ */
+#define MEMCG_CGWB_FRN_CNT 4
+
+struct memcg_cgwb_frn {
+ u64 bdi_id; /* bdi->id of the foreign inode */
+ int memcg_id; /* memcg->css.id of foreign inode */
+ u64 at; /* jiffies_64 at the time of dirtying */
+ struct wb_completion done; /* tracks in-flight foreign writebacks */
+};
+
+/*
+ * Bucket for arbitrarily byte-sized objects charged to a memory
+ * cgroup. The bucket can be reparented in one piece when the cgroup
+ * is destroyed, without having to round up the individual references
+ * of all live memory objects in the wild.
+ */
+struct obj_cgroup {
+ struct percpu_ref refcnt;
+ struct mem_cgroup *memcg;
+ atomic_t nr_charged_bytes;
+ union {
+ struct list_head list; /* protected by objcg_lock */
+ struct rcu_head rcu;
+ };
};
/*
@@ -166,30 +211,33 @@ struct mem_cgroup {
struct mem_cgroup_id id;
/* Accounted resources */
- struct page_counter memory;
- struct page_counter swap;
+ struct page_counter memory; /* Both v1 & v2 */
- /* Legacy consumer-oriented counters */
- struct page_counter memsw;
- struct page_counter kmem;
- struct page_counter tcpmem;
+ union {
+ struct page_counter swap; /* v2 only */
+ struct page_counter memsw; /* v1 only */
+ };
- /* Normal memory consumption range */
- unsigned long low;
- unsigned long high;
+ /* Legacy consumer-oriented counters */
+ struct page_counter kmem; /* v1 only */
+ struct page_counter tcpmem; /* v1 only */
/* Range enforcement for interrupt charges */
struct work_struct high_work;
+#if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_ZSWAP)
+ unsigned long zswap_max;
+#endif
+
unsigned long soft_limit;
/* vmpressure notifications */
struct vmpressure vmpressure;
/*
- * Should the accounting and control be hierarchical, per subtree?
+ * Should the OOM killer kill all belonging tasks, had it kill one?
*/
- bool use_hierarchy;
+ bool oom_group;
/* protected by memcg_oom_lock */
bool oom_lock;
@@ -199,8 +247,12 @@ struct mem_cgroup {
/* OOM-Killer disable */
int oom_kill_disable;
- /* handle for "memory.events" */
+ /* memory.events and memory.events.local */
struct cgroup_file events_file;
+ struct cgroup_file events_local_file;
+
+ /* handle for "memory.swap.events" */
+ struct cgroup_file swap_events_file;
/* protect arrays of thresholds */
struct mutex thresholds_lock;
@@ -219,18 +271,18 @@ struct mem_cgroup {
* mem_cgroup ? And what type of charges should we move ?
*/
unsigned long move_charge_at_immigrate;
- /*
- * set > 0 if pages under this cgroup are moving to other cgroup.
- */
- atomic_t moving_account;
/* taken only while moving_account > 0 */
spinlock_t move_lock;
- struct task_struct *move_lock_task;
unsigned long move_lock_flags;
- /*
- * percpu counter.
- */
- struct mem_cgroup_stat_cpu __percpu *stat;
+
+ CACHELINE_PADDING(_pad1_);
+
+ /* memory.stat */
+ struct memcg_vmstats *vmstats;
+
+ /* memory.events */
+ atomic_long_t memory_events[MEMCG_NR_MEMORY_EVENTS];
+ atomic_long_t memory_events_local[MEMCG_NR_MEMORY_EVENTS];
unsigned long socket_pressure;
@@ -238,88 +290,448 @@ struct mem_cgroup {
bool tcpmem_active;
int tcpmem_pressure;
-#ifndef CONFIG_SLOB
- /* Index in the kmem_cache->memcg_params.memcg_caches array */
+#ifdef CONFIG_MEMCG_KMEM
int kmemcg_id;
- enum memcg_kmem_state kmem_state;
- struct list_head kmem_caches;
+ struct obj_cgroup __rcu *objcg;
+ /* list of inherited objcgs, protected by objcg_lock */
+ struct list_head objcg_list;
#endif
- int last_scanned_node;
-#if MAX_NUMNODES > 1
- nodemask_t scan_nodes;
- atomic_t numainfo_events;
- atomic_t numainfo_updating;
-#endif
+ CACHELINE_PADDING(_pad2_);
+
+ /*
+ * set > 0 if pages under this cgroup are moving to other cgroup.
+ */
+ atomic_t moving_account;
+ struct task_struct *move_lock_task;
+
+ struct memcg_vmstats_percpu __percpu *vmstats_percpu;
#ifdef CONFIG_CGROUP_WRITEBACK
struct list_head cgwb_list;
struct wb_domain cgwb_domain;
+ struct memcg_cgwb_frn cgwb_frn[MEMCG_CGWB_FRN_CNT];
#endif
/* List of events which userspace want to receive */
struct list_head event_list;
spinlock_t event_list_lock;
- struct mem_cgroup_per_node *nodeinfo[0];
- /* WARNING: nodeinfo must be the last member here */
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+ struct deferred_split deferred_split_queue;
+#endif
+
+#ifdef CONFIG_LRU_GEN
+ /* per-memcg mm_struct list */
+ struct lru_gen_mm_list mm_list;
+#endif
+
+ struct mem_cgroup_per_node *nodeinfo[];
};
+/*
+ * size of first charge trial.
+ * TODO: maybe necessary to use big numbers in big irons or dynamic based of the
+ * workload.
+ */
+#define MEMCG_CHARGE_BATCH 64U
+
extern struct mem_cgroup *root_mem_cgroup;
+enum page_memcg_data_flags {
+ /* page->memcg_data is a pointer to an objcgs vector */
+ MEMCG_DATA_OBJCGS = (1UL << 0),
+ /* page has been accounted as a non-slab kernel page */
+ MEMCG_DATA_KMEM = (1UL << 1),
+ /* the next bit after the last actual flag */
+ __NR_MEMCG_DATA_FLAGS = (1UL << 2),
+};
+
+#define MEMCG_DATA_FLAGS_MASK (__NR_MEMCG_DATA_FLAGS - 1)
+
+static inline bool folio_memcg_kmem(struct folio *folio);
+
+/*
+ * After the initialization objcg->memcg is always pointing at
+ * a valid memcg, but can be atomically swapped to the parent memcg.
+ *
+ * The caller must ensure that the returned memcg won't be released:
+ * e.g. acquire the rcu_read_lock or css_set_lock.
+ */
+static inline struct mem_cgroup *obj_cgroup_memcg(struct obj_cgroup *objcg)
+{
+ return READ_ONCE(objcg->memcg);
+}
+
+/*
+ * __folio_memcg - Get the memory cgroup associated with a non-kmem folio
+ * @folio: Pointer to the folio.
+ *
+ * Returns a pointer to the memory cgroup associated with the folio,
+ * or NULL. This function assumes that the folio is known to have a
+ * proper memory cgroup pointer. It's not safe to call this function
+ * against some type of folios, e.g. slab folios or ex-slab folios or
+ * kmem folios.
+ */
+static inline struct mem_cgroup *__folio_memcg(struct folio *folio)
+{
+ unsigned long memcg_data = folio->memcg_data;
+
+ VM_BUG_ON_FOLIO(folio_test_slab(folio), folio);
+ VM_BUG_ON_FOLIO(memcg_data & MEMCG_DATA_OBJCGS, folio);
+ VM_BUG_ON_FOLIO(memcg_data & MEMCG_DATA_KMEM, folio);
+
+ return (struct mem_cgroup *)(memcg_data & ~MEMCG_DATA_FLAGS_MASK);
+}
+
+/*
+ * __folio_objcg - get the object cgroup associated with a kmem folio.
+ * @folio: Pointer to the folio.
+ *
+ * Returns a pointer to the object cgroup associated with the folio,
+ * or NULL. This function assumes that the folio is known to have a
+ * proper object cgroup pointer. It's not safe to call this function
+ * against some type of folios, e.g. slab folios or ex-slab folios or
+ * LRU folios.
+ */
+static inline struct obj_cgroup *__folio_objcg(struct folio *folio)
+{
+ unsigned long memcg_data = folio->memcg_data;
+
+ VM_BUG_ON_FOLIO(folio_test_slab(folio), folio);
+ VM_BUG_ON_FOLIO(memcg_data & MEMCG_DATA_OBJCGS, folio);
+ VM_BUG_ON_FOLIO(!(memcg_data & MEMCG_DATA_KMEM), folio);
+
+ return (struct obj_cgroup *)(memcg_data & ~MEMCG_DATA_FLAGS_MASK);
+}
+
+/*
+ * folio_memcg - Get the memory cgroup associated with a folio.
+ * @folio: Pointer to the folio.
+ *
+ * Returns a pointer to the memory cgroup associated with the folio,
+ * or NULL. This function assumes that the folio is known to have a
+ * proper memory cgroup pointer. It's not safe to call this function
+ * against some type of folios, e.g. slab folios or ex-slab folios.
+ *
+ * For a non-kmem folio any of the following ensures folio and memcg binding
+ * stability:
+ *
+ * - the folio lock
+ * - LRU isolation
+ * - lock_page_memcg()
+ * - exclusive reference
+ * - mem_cgroup_trylock_pages()
+ *
+ * For a kmem folio a caller should hold an rcu read lock to protect memcg
+ * associated with a kmem folio from being released.
+ */
+static inline struct mem_cgroup *folio_memcg(struct folio *folio)
+{
+ if (folio_memcg_kmem(folio))
+ return obj_cgroup_memcg(__folio_objcg(folio));
+ return __folio_memcg(folio);
+}
+
+static inline struct mem_cgroup *page_memcg(struct page *page)
+{
+ return folio_memcg(page_folio(page));
+}
+
+/**
+ * folio_memcg_rcu - Locklessly get the memory cgroup associated with a folio.
+ * @folio: Pointer to the folio.
+ *
+ * This function assumes that the folio is known to have a
+ * proper memory cgroup pointer. It's not safe to call this function
+ * against some type of folios, e.g. slab folios or ex-slab folios.
+ *
+ * Return: A pointer to the memory cgroup associated with the folio,
+ * or NULL.
+ */
+static inline struct mem_cgroup *folio_memcg_rcu(struct folio *folio)
+{
+ unsigned long memcg_data = READ_ONCE(folio->memcg_data);
+
+ VM_BUG_ON_FOLIO(folio_test_slab(folio), folio);
+ WARN_ON_ONCE(!rcu_read_lock_held());
+
+ if (memcg_data & MEMCG_DATA_KMEM) {
+ struct obj_cgroup *objcg;
+
+ objcg = (void *)(memcg_data & ~MEMCG_DATA_FLAGS_MASK);
+ return obj_cgroup_memcg(objcg);
+ }
+
+ return (struct mem_cgroup *)(memcg_data & ~MEMCG_DATA_FLAGS_MASK);
+}
+
+/*
+ * folio_memcg_check - Get the memory cgroup associated with a folio.
+ * @folio: Pointer to the folio.
+ *
+ * Returns a pointer to the memory cgroup associated with the folio,
+ * or NULL. This function unlike folio_memcg() can take any folio
+ * as an argument. It has to be used in cases when it's not known if a folio
+ * has an associated memory cgroup pointer or an object cgroups vector or
+ * an object cgroup.
+ *
+ * For a non-kmem folio any of the following ensures folio and memcg binding
+ * stability:
+ *
+ * - the folio lock
+ * - LRU isolation
+ * - lock_folio_memcg()
+ * - exclusive reference
+ * - mem_cgroup_trylock_pages()
+ *
+ * For a kmem folio a caller should hold an rcu read lock to protect memcg
+ * associated with a kmem folio from being released.
+ */
+static inline struct mem_cgroup *folio_memcg_check(struct folio *folio)
+{
+ /*
+ * Because folio->memcg_data might be changed asynchronously
+ * for slabs, READ_ONCE() should be used here.
+ */
+ unsigned long memcg_data = READ_ONCE(folio->memcg_data);
+
+ if (memcg_data & MEMCG_DATA_OBJCGS)
+ return NULL;
+
+ if (memcg_data & MEMCG_DATA_KMEM) {
+ struct obj_cgroup *objcg;
+
+ objcg = (void *)(memcg_data & ~MEMCG_DATA_FLAGS_MASK);
+ return obj_cgroup_memcg(objcg);
+ }
+
+ return (struct mem_cgroup *)(memcg_data & ~MEMCG_DATA_FLAGS_MASK);
+}
+
+static inline struct mem_cgroup *page_memcg_check(struct page *page)
+{
+ if (PageTail(page))
+ return NULL;
+ return folio_memcg_check((struct folio *)page);
+}
+
+static inline struct mem_cgroup *get_mem_cgroup_from_objcg(struct obj_cgroup *objcg)
+{
+ struct mem_cgroup *memcg;
+
+ rcu_read_lock();
+retry:
+ memcg = obj_cgroup_memcg(objcg);
+ if (unlikely(!css_tryget(&memcg->css)))
+ goto retry;
+ rcu_read_unlock();
+
+ return memcg;
+}
+
+#ifdef CONFIG_MEMCG_KMEM
+/*
+ * folio_memcg_kmem - Check if the folio has the memcg_kmem flag set.
+ * @folio: Pointer to the folio.
+ *
+ * Checks if the folio has MemcgKmem flag set. The caller must ensure
+ * that the folio has an associated memory cgroup. It's not safe to call
+ * this function against some types of folios, e.g. slab folios.
+ */
+static inline bool folio_memcg_kmem(struct folio *folio)
+{
+ VM_BUG_ON_PGFLAGS(PageTail(&folio->page), &folio->page);
+ VM_BUG_ON_FOLIO(folio->memcg_data & MEMCG_DATA_OBJCGS, folio);
+ return folio->memcg_data & MEMCG_DATA_KMEM;
+}
+
+
+#else
+static inline bool folio_memcg_kmem(struct folio *folio)
+{
+ return false;
+}
+
+#endif
+
+static inline bool PageMemcgKmem(struct page *page)
+{
+ return folio_memcg_kmem(page_folio(page));
+}
+
+static inline bool mem_cgroup_is_root(struct mem_cgroup *memcg)
+{
+ return (memcg == root_mem_cgroup);
+}
+
static inline bool mem_cgroup_disabled(void)
{
return !cgroup_subsys_enabled(memory_cgrp_subsys);
}
-static inline void mem_cgroup_event(struct mem_cgroup *memcg,
- enum memcg_event_item event)
+static inline void mem_cgroup_protection(struct mem_cgroup *root,
+ struct mem_cgroup *memcg,
+ unsigned long *min,
+ unsigned long *low)
+{
+ *min = *low = 0;
+
+ if (mem_cgroup_disabled())
+ return;
+
+ /*
+ * There is no reclaim protection applied to a targeted reclaim.
+ * We are special casing this specific case here because
+ * mem_cgroup_protected calculation is not robust enough to keep
+ * the protection invariant for calculated effective values for
+ * parallel reclaimers with different reclaim target. This is
+ * especially a problem for tail memcgs (as they have pages on LRU)
+ * which would want to have effective values 0 for targeted reclaim
+ * but a different value for external reclaim.
+ *
+ * Example
+ * Let's have global and A's reclaim in parallel:
+ * |
+ * A (low=2G, usage = 3G, max = 3G, children_low_usage = 1.5G)
+ * |\
+ * | C (low = 1G, usage = 2.5G)
+ * B (low = 1G, usage = 0.5G)
+ *
+ * For the global reclaim
+ * A.elow = A.low
+ * B.elow = min(B.usage, B.low) because children_low_usage <= A.elow
+ * C.elow = min(C.usage, C.low)
+ *
+ * With the effective values resetting we have A reclaim
+ * A.elow = 0
+ * B.elow = B.low
+ * C.elow = C.low
+ *
+ * If the global reclaim races with A's reclaim then
+ * B.elow = C.elow = 0 because children_low_usage > A.elow)
+ * is possible and reclaiming B would be violating the protection.
+ *
+ */
+ if (root == memcg)
+ return;
+
+ *min = READ_ONCE(memcg->memory.emin);
+ *low = READ_ONCE(memcg->memory.elow);
+}
+
+void mem_cgroup_calculate_protection(struct mem_cgroup *root,
+ struct mem_cgroup *memcg);
+
+static inline bool mem_cgroup_unprotected(struct mem_cgroup *target,
+ struct mem_cgroup *memcg)
+{
+ /*
+ * The root memcg doesn't account charges, and doesn't support
+ * protection. The target memcg's protection is ignored, see
+ * mem_cgroup_calculate_protection() and mem_cgroup_protection()
+ */
+ return mem_cgroup_disabled() || mem_cgroup_is_root(memcg) ||
+ memcg == target;
+}
+
+static inline bool mem_cgroup_below_low(struct mem_cgroup *target,
+ struct mem_cgroup *memcg)
+{
+ if (mem_cgroup_unprotected(target, memcg))
+ return false;
+
+ return READ_ONCE(memcg->memory.elow) >=
+ page_counter_read(&memcg->memory);
+}
+
+static inline bool mem_cgroup_below_min(struct mem_cgroup *target,
+ struct mem_cgroup *memcg)
+{
+ if (mem_cgroup_unprotected(target, memcg))
+ return false;
+
+ return READ_ONCE(memcg->memory.emin) >=
+ page_counter_read(&memcg->memory);
+}
+
+int __mem_cgroup_charge(struct folio *folio, struct mm_struct *mm, gfp_t gfp);
+
+/**
+ * mem_cgroup_charge - Charge a newly allocated folio to a cgroup.
+ * @folio: Folio to charge.
+ * @mm: mm context of the allocating task.
+ * @gfp: Reclaim mode.
+ *
+ * Try to charge @folio to the memcg that @mm belongs to, reclaiming
+ * pages according to @gfp if necessary. If @mm is NULL, try to
+ * charge to the active memcg.
+ *
+ * Do not use this for folios allocated for swapin.
+ *
+ * Return: 0 on success. Otherwise, an error code is returned.
+ */
+static inline int mem_cgroup_charge(struct folio *folio, struct mm_struct *mm,
+ gfp_t gfp)
{
- this_cpu_inc(memcg->stat->events[event]);
- cgroup_file_notify(&memcg->events_file);
+ if (mem_cgroup_disabled())
+ return 0;
+ return __mem_cgroup_charge(folio, mm, gfp);
}
-bool mem_cgroup_low(struct mem_cgroup *root, struct mem_cgroup *memcg);
+int mem_cgroup_swapin_charge_folio(struct folio *folio, struct mm_struct *mm,
+ gfp_t gfp, swp_entry_t entry);
+void mem_cgroup_swapin_uncharge_swap(swp_entry_t entry);
-int mem_cgroup_try_charge(struct page *page, struct mm_struct *mm,
- gfp_t gfp_mask, struct mem_cgroup **memcgp,
- bool compound);
-void mem_cgroup_commit_charge(struct page *page, struct mem_cgroup *memcg,
- bool lrucare, bool compound);
-void mem_cgroup_cancel_charge(struct page *page, struct mem_cgroup *memcg,
- bool compound);
-void mem_cgroup_uncharge(struct page *page);
-void mem_cgroup_uncharge_list(struct list_head *page_list);
+void __mem_cgroup_uncharge(struct folio *folio);
-void mem_cgroup_migrate(struct page *oldpage, struct page *newpage);
+/**
+ * mem_cgroup_uncharge - Uncharge a folio.
+ * @folio: Folio to uncharge.
+ *
+ * Uncharge a folio previously charged with mem_cgroup_charge().
+ */
+static inline void mem_cgroup_uncharge(struct folio *folio)
+{
+ if (mem_cgroup_disabled())
+ return;
+ __mem_cgroup_uncharge(folio);
+}
-static struct mem_cgroup_per_node *
-mem_cgroup_nodeinfo(struct mem_cgroup *memcg, int nid)
+void __mem_cgroup_uncharge_list(struct list_head *page_list);
+static inline void mem_cgroup_uncharge_list(struct list_head *page_list)
{
- return memcg->nodeinfo[nid];
+ if (mem_cgroup_disabled())
+ return;
+ __mem_cgroup_uncharge_list(page_list);
}
+void mem_cgroup_migrate(struct folio *old, struct folio *new);
+
/**
- * mem_cgroup_lruvec - get the lru list vector for a node or a memcg zone
- * @node: node of the wanted lruvec
+ * mem_cgroup_lruvec - get the lru list vector for a memcg & node
* @memcg: memcg of the wanted lruvec
+ * @pgdat: pglist_data
*
- * Returns the lru list vector holding pages for a given @node or a given
- * @memcg and @zone. This can be the node lruvec, if the memory controller
- * is disabled.
+ * Returns the lru list vector holding pages for a given @memcg &
+ * @pgdat combination. This can be the node lruvec, if the memory
+ * controller is disabled.
*/
-static inline struct lruvec *mem_cgroup_lruvec(struct pglist_data *pgdat,
- struct mem_cgroup *memcg)
+static inline struct lruvec *mem_cgroup_lruvec(struct mem_cgroup *memcg,
+ struct pglist_data *pgdat)
{
struct mem_cgroup_per_node *mz;
struct lruvec *lruvec;
if (mem_cgroup_disabled()) {
- lruvec = node_lruvec(pgdat);
+ lruvec = &pgdat->__lruvec;
goto out;
}
- mz = mem_cgroup_nodeinfo(memcg, pgdat->node_id);
+ if (!memcg)
+ memcg = root_mem_cgroup;
+
+ mz = memcg->nodeinfo[pgdat->node_id];
lruvec = &mz->lruvec;
out:
/*
@@ -332,16 +744,75 @@ out:
return lruvec;
}
-struct lruvec *mem_cgroup_page_lruvec(struct page *, struct pglist_data *);
+/**
+ * folio_lruvec - return lruvec for isolating/putting an LRU folio
+ * @folio: Pointer to the folio.
+ *
+ * This function relies on folio->mem_cgroup being stable.
+ */
+static inline struct lruvec *folio_lruvec(struct folio *folio)
+{
+ struct mem_cgroup *memcg = folio_memcg(folio);
+
+ VM_WARN_ON_ONCE_FOLIO(!memcg && !mem_cgroup_disabled(), folio);
+ return mem_cgroup_lruvec(memcg, folio_pgdat(folio));
+}
-bool task_in_mem_cgroup(struct task_struct *task, struct mem_cgroup *memcg);
struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p);
+struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm);
+
+struct lruvec *folio_lruvec_lock(struct folio *folio);
+struct lruvec *folio_lruvec_lock_irq(struct folio *folio);
+struct lruvec *folio_lruvec_lock_irqsave(struct folio *folio,
+ unsigned long *flags);
+
+#ifdef CONFIG_DEBUG_VM
+void lruvec_memcg_debug(struct lruvec *lruvec, struct folio *folio);
+#else
+static inline
+void lruvec_memcg_debug(struct lruvec *lruvec, struct folio *folio)
+{
+}
+#endif
+
static inline
struct mem_cgroup *mem_cgroup_from_css(struct cgroup_subsys_state *css){
return css ? container_of(css, struct mem_cgroup, css) : NULL;
}
+static inline bool obj_cgroup_tryget(struct obj_cgroup *objcg)
+{
+ return percpu_ref_tryget(&objcg->refcnt);
+}
+
+static inline void obj_cgroup_get(struct obj_cgroup *objcg)
+{
+ percpu_ref_get(&objcg->refcnt);
+}
+
+static inline void obj_cgroup_get_many(struct obj_cgroup *objcg,
+ unsigned long nr)
+{
+ percpu_ref_get_many(&objcg->refcnt, nr);
+}
+
+static inline void obj_cgroup_put(struct obj_cgroup *objcg)
+{
+ percpu_ref_put(&objcg->refcnt);
+}
+
+static inline bool mem_cgroup_tryget(struct mem_cgroup *memcg)
+{
+ return !memcg || css_tryget(&memcg->css);
+}
+
+static inline void mem_cgroup_put(struct mem_cgroup *memcg)
+{
+ if (memcg)
+ css_put(&memcg->css);
+}
+
#define mem_cgroup_from_counter(counter, member) \
container_of(counter, struct mem_cgroup, member)
@@ -361,6 +832,20 @@ static inline unsigned short mem_cgroup_id(struct mem_cgroup *memcg)
}
struct mem_cgroup *mem_cgroup_from_id(unsigned short id);
+#ifdef CONFIG_SHRINKER_DEBUG
+static inline unsigned long mem_cgroup_ino(struct mem_cgroup *memcg)
+{
+ return memcg ? cgroup_ino(memcg->css.cgroup) : 0;
+}
+
+struct mem_cgroup *mem_cgroup_get_from_ino(unsigned long ino);
+#endif
+
+static inline struct mem_cgroup *mem_cgroup_from_seq(struct seq_file *m)
+{
+ return mem_cgroup_from_css(seq_css(m));
+}
+
static inline struct mem_cgroup *lruvec_memcg(struct lruvec *lruvec)
{
struct mem_cgroup_per_node *mz;
@@ -381,9 +866,7 @@ static inline struct mem_cgroup *lruvec_memcg(struct lruvec *lruvec)
*/
static inline struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg)
{
- if (!memcg->memory.parent)
- return NULL;
- return mem_cgroup_from_counter(memcg->memory.parent, memory);
+ return mem_cgroup_from_css(memcg->css.parent);
}
static inline bool mem_cgroup_is_descendant(struct mem_cgroup *memcg,
@@ -391,8 +874,6 @@ static inline bool mem_cgroup_is_descendant(struct mem_cgroup *memcg,
{
if (root == memcg)
return true;
- if (!root->use_hierarchy)
- return false;
return cgroup_is_descendant(memcg->css.cgroup, root->css.cgroup);
}
@@ -410,7 +891,7 @@ static inline bool mm_match_cgroup(struct mm_struct *mm,
return match;
}
-struct cgroup_subsys_state *mem_cgroup_css_from_page(struct page *page);
+struct cgroup_subsys_state *mem_cgroup_css_from_folio(struct folio *folio);
ino_t page_cgroup_ino(struct page *page);
static inline bool mem_cgroup_online(struct mem_cgroup *memcg)
@@ -420,30 +901,9 @@ static inline bool mem_cgroup_online(struct mem_cgroup *memcg)
return !!(memcg->css.flags & CSS_ONLINE);
}
-/*
- * For memory reclaim.
- */
-int mem_cgroup_select_victim_node(struct mem_cgroup *memcg);
-
void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru,
int zid, int nr_pages);
-unsigned long mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg,
- int nid, unsigned int lru_mask);
-
-static inline
-unsigned long mem_cgroup_get_lru_size(struct lruvec *lruvec, enum lru_list lru)
-{
- struct mem_cgroup_per_node *mz;
- unsigned long nr_pages = 0;
- int zid;
-
- mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
- for (zid = 0; zid < MAX_NR_ZONES; zid++)
- nr_pages += mz->lru_zone_size[zid][lru];
- return nr_pages;
-}
-
static inline
unsigned long mem_cgroup_get_zone_lru_size(struct lruvec *lruvec,
enum lru_list lru, int zone_idx)
@@ -451,26 +911,30 @@ unsigned long mem_cgroup_get_zone_lru_size(struct lruvec *lruvec,
struct mem_cgroup_per_node *mz;
mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
- return mz->lru_zone_size[zone_idx][lru];
+ return READ_ONCE(mz->lru_zone_size[zone_idx][lru]);
}
void mem_cgroup_handle_over_high(void);
-unsigned long mem_cgroup_get_limit(struct mem_cgroup *memcg);
+unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg);
+
+unsigned long mem_cgroup_size(struct mem_cgroup *memcg);
-void mem_cgroup_print_oom_info(struct mem_cgroup *memcg,
+void mem_cgroup_print_oom_context(struct mem_cgroup *memcg,
struct task_struct *p);
-static inline void mem_cgroup_oom_enable(void)
+void mem_cgroup_print_oom_meminfo(struct mem_cgroup *memcg);
+
+static inline void mem_cgroup_enter_user_fault(void)
{
- WARN_ON(current->memcg_may_oom);
- current->memcg_may_oom = 1;
+ WARN_ON(current->in_user_fault);
+ current->in_user_fault = 1;
}
-static inline void mem_cgroup_oom_disable(void)
+static inline void mem_cgroup_exit_user_fault(void)
{
- WARN_ON(!current->memcg_may_oom);
- current->memcg_may_oom = 0;
+ WARN_ON(!current->in_user_fault);
+ current->in_user_fault = 0;
}
static inline bool task_in_memcg_oom(struct task_struct *p)
@@ -479,163 +943,158 @@ static inline bool task_in_memcg_oom(struct task_struct *p)
}
bool mem_cgroup_oom_synchronize(bool wait);
+struct mem_cgroup *mem_cgroup_get_oom_group(struct task_struct *victim,
+ struct mem_cgroup *oom_domain);
+void mem_cgroup_print_oom_group(struct mem_cgroup *memcg);
-#ifdef CONFIG_MEMCG_SWAP
-extern int do_swap_account;
-#endif
-
+void folio_memcg_lock(struct folio *folio);
+void folio_memcg_unlock(struct folio *folio);
void lock_page_memcg(struct page *page);
void unlock_page_memcg(struct page *page);
-static inline unsigned long memcg_page_state(struct mem_cgroup *memcg,
- enum memcg_stat_item idx)
-{
- long val = 0;
- int cpu;
+void __mod_memcg_state(struct mem_cgroup *memcg, int idx, int val);
- for_each_possible_cpu(cpu)
- val += per_cpu(memcg->stat->count[idx], cpu);
+/* try to stablize folio_memcg() for all the pages in a memcg */
+static inline bool mem_cgroup_trylock_pages(struct mem_cgroup *memcg)
+{
+ rcu_read_lock();
- if (val < 0)
- val = 0;
+ if (mem_cgroup_disabled() || !atomic_read(&memcg->moving_account))
+ return true;
- return val;
+ rcu_read_unlock();
+ return false;
}
-static inline void __mod_memcg_state(struct mem_cgroup *memcg,
- enum memcg_stat_item idx, int val)
+static inline void mem_cgroup_unlock_pages(void)
{
- if (!mem_cgroup_disabled())
- __this_cpu_add(memcg->stat->count[idx], val);
+ rcu_read_unlock();
}
+/* idx can be of type enum memcg_stat_item or node_stat_item */
static inline void mod_memcg_state(struct mem_cgroup *memcg,
- enum memcg_stat_item idx, int val)
+ int idx, int val)
{
- if (!mem_cgroup_disabled())
- this_cpu_add(memcg->stat->count[idx], val);
-}
+ unsigned long flags;
-/**
- * mod_memcg_page_state - update page state statistics
- * @page: the page
- * @idx: page state item to account
- * @val: number of pages (positive or negative)
- *
- * The @page must be locked or the caller must use lock_page_memcg()
- * to prevent double accounting when the page is concurrently being
- * moved to another memcg:
- *
- * lock_page(page) or lock_page_memcg(page)
- * if (TestClearPageState(page))
- * mod_memcg_page_state(page, state, -1);
- * unlock_page(page) or unlock_page_memcg(page)
- *
- * Kernel pages are an exception to this, since they'll never move.
- */
-static inline void __mod_memcg_page_state(struct page *page,
- enum memcg_stat_item idx, int val)
-{
- if (page->mem_cgroup)
- __mod_memcg_state(page->mem_cgroup, idx, val);
+ local_irq_save(flags);
+ __mod_memcg_state(memcg, idx, val);
+ local_irq_restore(flags);
}
static inline void mod_memcg_page_state(struct page *page,
- enum memcg_stat_item idx, int val)
+ int idx, int val)
{
- if (page->mem_cgroup)
- mod_memcg_state(page->mem_cgroup, idx, val);
+ struct mem_cgroup *memcg;
+
+ if (mem_cgroup_disabled())
+ return;
+
+ rcu_read_lock();
+ memcg = page_memcg(page);
+ if (memcg)
+ mod_memcg_state(memcg, idx, val);
+ rcu_read_unlock();
}
+unsigned long memcg_page_state(struct mem_cgroup *memcg, int idx);
+
static inline unsigned long lruvec_page_state(struct lruvec *lruvec,
enum node_stat_item idx)
{
struct mem_cgroup_per_node *pn;
- long val = 0;
- int cpu;
+ long x;
if (mem_cgroup_disabled())
return node_page_state(lruvec_pgdat(lruvec), idx);
pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
- for_each_possible_cpu(cpu)
- val += per_cpu(pn->lruvec_stat->count[idx], cpu);
-
- if (val < 0)
- val = 0;
-
- return val;
+ x = READ_ONCE(pn->lruvec_stats.state[idx]);
+#ifdef CONFIG_SMP
+ if (x < 0)
+ x = 0;
+#endif
+ return x;
}
-static inline void __mod_lruvec_state(struct lruvec *lruvec,
- enum node_stat_item idx, int val)
+static inline unsigned long lruvec_page_state_local(struct lruvec *lruvec,
+ enum node_stat_item idx)
{
struct mem_cgroup_per_node *pn;
+ long x = 0;
+ int cpu;
- __mod_node_page_state(lruvec_pgdat(lruvec), idx, val);
if (mem_cgroup_disabled())
- return;
+ return node_page_state(lruvec_pgdat(lruvec), idx);
+
pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
- __mod_memcg_state(pn->memcg, idx, val);
- __this_cpu_add(pn->lruvec_stat->count[idx], val);
+ for_each_possible_cpu(cpu)
+ x += per_cpu(pn->lruvec_stats_percpu->state[idx], cpu);
+#ifdef CONFIG_SMP
+ if (x < 0)
+ x = 0;
+#endif
+ return x;
}
-static inline void mod_lruvec_state(struct lruvec *lruvec,
- enum node_stat_item idx, int val)
-{
- struct mem_cgroup_per_node *pn;
+void mem_cgroup_flush_stats(void);
+void mem_cgroup_flush_stats_atomic(void);
+void mem_cgroup_flush_stats_ratelimited(void);
- mod_node_page_state(lruvec_pgdat(lruvec), idx, val);
- if (mem_cgroup_disabled())
- return;
- pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
- mod_memcg_state(pn->memcg, idx, val);
- this_cpu_add(pn->lruvec_stat->count[idx], val);
-}
+void __mod_memcg_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx,
+ int val);
+void __mod_lruvec_kmem_state(void *p, enum node_stat_item idx, int val);
-static inline void __mod_lruvec_page_state(struct page *page,
- enum node_stat_item idx, int val)
+static inline void mod_lruvec_kmem_state(void *p, enum node_stat_item idx,
+ int val)
{
- struct mem_cgroup_per_node *pn;
+ unsigned long flags;
- __mod_node_page_state(page_pgdat(page), idx, val);
- if (mem_cgroup_disabled() || !page->mem_cgroup)
- return;
- __mod_memcg_state(page->mem_cgroup, idx, val);
- pn = page->mem_cgroup->nodeinfo[page_to_nid(page)];
- __this_cpu_add(pn->lruvec_stat->count[idx], val);
+ local_irq_save(flags);
+ __mod_lruvec_kmem_state(p, idx, val);
+ local_irq_restore(flags);
}
-static inline void mod_lruvec_page_state(struct page *page,
- enum node_stat_item idx, int val)
+static inline void mod_memcg_lruvec_state(struct lruvec *lruvec,
+ enum node_stat_item idx, int val)
{
- struct mem_cgroup_per_node *pn;
+ unsigned long flags;
- mod_node_page_state(page_pgdat(page), idx, val);
- if (mem_cgroup_disabled() || !page->mem_cgroup)
- return;
- mod_memcg_state(page->mem_cgroup, idx, val);
- pn = page->mem_cgroup->nodeinfo[page_to_nid(page)];
- this_cpu_add(pn->lruvec_stat->count[idx], val);
+ local_irq_save(flags);
+ __mod_memcg_lruvec_state(lruvec, idx, val);
+ local_irq_restore(flags);
}
-unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order,
- gfp_t gfp_mask,
- unsigned long *total_scanned);
+void __count_memcg_events(struct mem_cgroup *memcg, enum vm_event_item idx,
+ unsigned long count);
static inline void count_memcg_events(struct mem_cgroup *memcg,
enum vm_event_item idx,
unsigned long count)
{
- if (!mem_cgroup_disabled())
- this_cpu_add(memcg->stat->events[idx], count);
+ unsigned long flags;
+
+ local_irq_save(flags);
+ __count_memcg_events(memcg, idx, count);
+ local_irq_restore(flags);
}
static inline void count_memcg_page_event(struct page *page,
- enum memcg_stat_item idx)
+ enum vm_event_item idx)
{
- if (page->mem_cgroup)
- count_memcg_events(page->mem_cgroup, idx, 1);
+ struct mem_cgroup *memcg = page_memcg(page);
+
+ if (memcg)
+ count_memcg_events(memcg, idx, 1);
+}
+
+static inline void count_memcg_folio_events(struct folio *folio,
+ enum vm_event_item idx, unsigned long nr)
+{
+ struct mem_cgroup *memcg = folio_memcg(folio);
+
+ if (memcg)
+ count_memcg_events(memcg, idx, nr);
}
static inline void count_memcg_event_mm(struct mm_struct *mm,
@@ -648,62 +1107,165 @@ static inline void count_memcg_event_mm(struct mm_struct *mm,
rcu_read_lock();
memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
- if (likely(memcg)) {
- this_cpu_inc(memcg->stat->events[idx]);
- if (idx == OOM_KILL)
+ if (likely(memcg))
+ count_memcg_events(memcg, idx, 1);
+ rcu_read_unlock();
+}
+
+static inline void memcg_memory_event(struct mem_cgroup *memcg,
+ enum memcg_memory_event event)
+{
+ bool swap_event = event == MEMCG_SWAP_HIGH || event == MEMCG_SWAP_MAX ||
+ event == MEMCG_SWAP_FAIL;
+
+ atomic_long_inc(&memcg->memory_events_local[event]);
+ if (!swap_event)
+ cgroup_file_notify(&memcg->events_local_file);
+
+ do {
+ atomic_long_inc(&memcg->memory_events[event]);
+ if (swap_event)
+ cgroup_file_notify(&memcg->swap_events_file);
+ else
cgroup_file_notify(&memcg->events_file);
- }
+
+ if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
+ break;
+ if (cgrp_dfl_root.flags & CGRP_ROOT_MEMORY_LOCAL_EVENTS)
+ break;
+ } while ((memcg = parent_mem_cgroup(memcg)) &&
+ !mem_cgroup_is_root(memcg));
+}
+
+static inline void memcg_memory_event_mm(struct mm_struct *mm,
+ enum memcg_memory_event event)
+{
+ struct mem_cgroup *memcg;
+
+ if (mem_cgroup_disabled())
+ return;
+
+ rcu_read_lock();
+ memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
+ if (likely(memcg))
+ memcg_memory_event(memcg, event);
rcu_read_unlock();
}
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
-void mem_cgroup_split_huge_fixup(struct page *head);
-#endif
+
+void split_page_memcg(struct page *head, unsigned int nr);
+
+unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order,
+ gfp_t gfp_mask,
+ unsigned long *total_scanned);
#else /* CONFIG_MEMCG */
#define MEM_CGROUP_ID_SHIFT 0
#define MEM_CGROUP_ID_MAX 0
-struct mem_cgroup;
+static inline struct mem_cgroup *folio_memcg(struct folio *folio)
+{
+ return NULL;
+}
+
+static inline struct mem_cgroup *page_memcg(struct page *page)
+{
+ return NULL;
+}
+
+static inline struct mem_cgroup *folio_memcg_rcu(struct folio *folio)
+{
+ WARN_ON_ONCE(!rcu_read_lock_held());
+ return NULL;
+}
+
+static inline struct mem_cgroup *folio_memcg_check(struct folio *folio)
+{
+ return NULL;
+}
+
+static inline struct mem_cgroup *page_memcg_check(struct page *page)
+{
+ return NULL;
+}
+
+static inline bool folio_memcg_kmem(struct folio *folio)
+{
+ return false;
+}
+
+static inline bool PageMemcgKmem(struct page *page)
+{
+ return false;
+}
+
+static inline bool mem_cgroup_is_root(struct mem_cgroup *memcg)
+{
+ return true;
+}
static inline bool mem_cgroup_disabled(void)
{
return true;
}
-static inline void mem_cgroup_event(struct mem_cgroup *memcg,
- enum memcg_event_item event)
+static inline void memcg_memory_event(struct mem_cgroup *memcg,
+ enum memcg_memory_event event)
+{
+}
+
+static inline void memcg_memory_event_mm(struct mm_struct *mm,
+ enum memcg_memory_event event)
{
}
-static inline bool mem_cgroup_low(struct mem_cgroup *root,
- struct mem_cgroup *memcg)
+static inline void mem_cgroup_protection(struct mem_cgroup *root,
+ struct mem_cgroup *memcg,
+ unsigned long *min,
+ unsigned long *low)
+{
+ *min = *low = 0;
+}
+
+static inline void mem_cgroup_calculate_protection(struct mem_cgroup *root,
+ struct mem_cgroup *memcg)
+{
+}
+
+static inline bool mem_cgroup_unprotected(struct mem_cgroup *target,
+ struct mem_cgroup *memcg)
+{
+ return true;
+}
+static inline bool mem_cgroup_below_low(struct mem_cgroup *target,
+ struct mem_cgroup *memcg)
+{
+ return false;
+}
+
+static inline bool mem_cgroup_below_min(struct mem_cgroup *target,
+ struct mem_cgroup *memcg)
{
return false;
}
-static inline int mem_cgroup_try_charge(struct page *page, struct mm_struct *mm,
- gfp_t gfp_mask,
- struct mem_cgroup **memcgp,
- bool compound)
+static inline int mem_cgroup_charge(struct folio *folio,
+ struct mm_struct *mm, gfp_t gfp)
{
- *memcgp = NULL;
return 0;
}
-static inline void mem_cgroup_commit_charge(struct page *page,
- struct mem_cgroup *memcg,
- bool lrucare, bool compound)
+static inline int mem_cgroup_swapin_charge_folio(struct folio *folio,
+ struct mm_struct *mm, gfp_t gfp, swp_entry_t entry)
{
+ return 0;
}
-static inline void mem_cgroup_cancel_charge(struct page *page,
- struct mem_cgroup *memcg,
- bool compound)
+static inline void mem_cgroup_swapin_uncharge_swap(swp_entry_t entry)
{
}
-static inline void mem_cgroup_uncharge(struct page *page)
+static inline void mem_cgroup_uncharge(struct folio *folio)
{
}
@@ -711,20 +1273,30 @@ static inline void mem_cgroup_uncharge_list(struct list_head *page_list)
{
}
-static inline void mem_cgroup_migrate(struct page *old, struct page *new)
+static inline void mem_cgroup_migrate(struct folio *old, struct folio *new)
{
}
-static inline struct lruvec *mem_cgroup_lruvec(struct pglist_data *pgdat,
- struct mem_cgroup *memcg)
+static inline struct lruvec *mem_cgroup_lruvec(struct mem_cgroup *memcg,
+ struct pglist_data *pgdat)
{
- return node_lruvec(pgdat);
+ return &pgdat->__lruvec;
}
-static inline struct lruvec *mem_cgroup_page_lruvec(struct page *page,
- struct pglist_data *pgdat)
+static inline struct lruvec *folio_lruvec(struct folio *folio)
+{
+ struct pglist_data *pgdat = folio_pgdat(folio);
+ return &pgdat->__lruvec;
+}
+
+static inline
+void lruvec_memcg_debug(struct lruvec *lruvec, struct folio *folio)
+{
+}
+
+static inline struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg)
{
- return &pgdat->lruvec;
+ return NULL;
}
static inline bool mm_match_cgroup(struct mm_struct *mm,
@@ -733,12 +1305,55 @@ static inline bool mm_match_cgroup(struct mm_struct *mm,
return true;
}
-static inline bool task_in_mem_cgroup(struct task_struct *task,
- const struct mem_cgroup *memcg)
+static inline struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm)
+{
+ return NULL;
+}
+
+static inline
+struct mem_cgroup *mem_cgroup_from_css(struct cgroup_subsys_state *css)
+{
+ return NULL;
+}
+
+static inline void obj_cgroup_put(struct obj_cgroup *objcg)
+{
+}
+
+static inline bool mem_cgroup_tryget(struct mem_cgroup *memcg)
{
return true;
}
+static inline void mem_cgroup_put(struct mem_cgroup *memcg)
+{
+}
+
+static inline struct lruvec *folio_lruvec_lock(struct folio *folio)
+{
+ struct pglist_data *pgdat = folio_pgdat(folio);
+
+ spin_lock(&pgdat->__lruvec.lru_lock);
+ return &pgdat->__lruvec;
+}
+
+static inline struct lruvec *folio_lruvec_lock_irq(struct folio *folio)
+{
+ struct pglist_data *pgdat = folio_pgdat(folio);
+
+ spin_lock_irq(&pgdat->__lruvec.lru_lock);
+ return &pgdat->__lruvec;
+}
+
+static inline struct lruvec *folio_lruvec_lock_irqsave(struct folio *folio,
+ unsigned long *flagsp)
+{
+ struct pglist_data *pgdat = folio_pgdat(folio);
+
+ spin_lock_irqsave(&pgdat->__lruvec.lru_lock, *flagsp);
+ return &pgdat->__lruvec;
+}
+
static inline struct mem_cgroup *
mem_cgroup_iter(struct mem_cgroup *root,
struct mem_cgroup *prev,
@@ -770,6 +1385,23 @@ static inline struct mem_cgroup *mem_cgroup_from_id(unsigned short id)
return NULL;
}
+#ifdef CONFIG_SHRINKER_DEBUG
+static inline unsigned long mem_cgroup_ino(struct mem_cgroup *memcg)
+{
+ return 0;
+}
+
+static inline struct mem_cgroup *mem_cgroup_get_from_ino(unsigned long ino)
+{
+ return NULL;
+}
+#endif
+
+static inline struct mem_cgroup *mem_cgroup_from_seq(struct seq_file *m)
+{
+ return NULL;
+}
+
static inline struct mem_cgroup *lruvec_memcg(struct lruvec *lruvec)
{
return NULL;
@@ -780,11 +1412,6 @@ static inline bool mem_cgroup_online(struct mem_cgroup *memcg)
return true;
}
-static inline unsigned long
-mem_cgroup_get_lru_size(struct lruvec *lruvec, enum lru_list lru)
-{
- return 0;
-}
static inline
unsigned long mem_cgroup_get_zone_lru_size(struct lruvec *lruvec,
enum lru_list lru, int zone_idx)
@@ -792,20 +1419,23 @@ unsigned long mem_cgroup_get_zone_lru_size(struct lruvec *lruvec,
return 0;
}
-static inline unsigned long
-mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg,
- int nid, unsigned int lru_mask)
+static inline unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg)
{
return 0;
}
-static inline unsigned long mem_cgroup_get_limit(struct mem_cgroup *memcg)
+static inline unsigned long mem_cgroup_size(struct mem_cgroup *memcg)
{
return 0;
}
static inline void
-mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p)
+mem_cgroup_print_oom_context(struct mem_cgroup *memcg, struct task_struct *p)
+{
+}
+
+static inline void
+mem_cgroup_print_oom_meminfo(struct mem_cgroup *memcg)
{
}
@@ -817,15 +1447,35 @@ static inline void unlock_page_memcg(struct page *page)
{
}
+static inline void folio_memcg_lock(struct folio *folio)
+{
+}
+
+static inline void folio_memcg_unlock(struct folio *folio)
+{
+}
+
+static inline bool mem_cgroup_trylock_pages(struct mem_cgroup *memcg)
+{
+ /* to match folio_memcg_rcu() */
+ rcu_read_lock();
+ return true;
+}
+
+static inline void mem_cgroup_unlock_pages(void)
+{
+ rcu_read_unlock();
+}
+
static inline void mem_cgroup_handle_over_high(void)
{
}
-static inline void mem_cgroup_oom_enable(void)
+static inline void mem_cgroup_enter_user_fault(void)
{
}
-static inline void mem_cgroup_oom_disable(void)
+static inline void mem_cgroup_exit_user_fault(void)
{
}
@@ -839,34 +1489,36 @@ static inline bool mem_cgroup_oom_synchronize(bool wait)
return false;
}
-static inline unsigned long memcg_page_state(struct mem_cgroup *memcg,
- enum memcg_stat_item idx)
+static inline struct mem_cgroup *mem_cgroup_get_oom_group(
+ struct task_struct *victim, struct mem_cgroup *oom_domain)
+{
+ return NULL;
+}
+
+static inline void mem_cgroup_print_oom_group(struct mem_cgroup *memcg)
{
- return 0;
}
static inline void __mod_memcg_state(struct mem_cgroup *memcg,
- enum memcg_stat_item idx,
+ int idx,
int nr)
{
}
static inline void mod_memcg_state(struct mem_cgroup *memcg,
- enum memcg_stat_item idx,
+ int idx,
int nr)
{
}
-static inline void __mod_memcg_page_state(struct page *page,
- enum memcg_stat_item idx,
- int nr)
+static inline void mod_memcg_page_state(struct page *page,
+ int idx, int val)
{
}
-static inline void mod_memcg_page_state(struct page *page,
- enum memcg_stat_item idx,
- int nr)
+static inline unsigned long memcg_page_state(struct mem_cgroup *memcg, int idx)
{
+ return 0;
}
static inline unsigned long lruvec_page_state(struct lruvec *lruvec,
@@ -875,40 +1527,43 @@ static inline unsigned long lruvec_page_state(struct lruvec *lruvec,
return node_page_state(lruvec_pgdat(lruvec), idx);
}
-static inline void __mod_lruvec_state(struct lruvec *lruvec,
- enum node_stat_item idx, int val)
+static inline unsigned long lruvec_page_state_local(struct lruvec *lruvec,
+ enum node_stat_item idx)
{
- __mod_node_page_state(lruvec_pgdat(lruvec), idx, val);
+ return node_page_state(lruvec_pgdat(lruvec), idx);
}
-static inline void mod_lruvec_state(struct lruvec *lruvec,
- enum node_stat_item idx, int val)
+static inline void mem_cgroup_flush_stats(void)
{
- mod_node_page_state(lruvec_pgdat(lruvec), idx, val);
}
-static inline void __mod_lruvec_page_state(struct page *page,
- enum node_stat_item idx, int val)
+static inline void mem_cgroup_flush_stats_atomic(void)
{
- __mod_node_page_state(page_pgdat(page), idx, val);
}
-static inline void mod_lruvec_page_state(struct page *page,
- enum node_stat_item idx, int val)
+static inline void mem_cgroup_flush_stats_ratelimited(void)
{
- mod_node_page_state(page_pgdat(page), idx, val);
}
-static inline
-unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order,
- gfp_t gfp_mask,
- unsigned long *total_scanned)
+static inline void __mod_memcg_lruvec_state(struct lruvec *lruvec,
+ enum node_stat_item idx, int val)
{
- return 0;
}
-static inline void mem_cgroup_split_huge_fixup(struct page *head)
+static inline void __mod_lruvec_kmem_state(void *p, enum node_stat_item idx,
+ int val)
+{
+ struct page *page = virt_to_head_page(p);
+
+ __mod_node_page_state(page_pgdat(page), idx, val);
+}
+
+static inline void mod_lruvec_kmem_state(void *p, enum node_stat_item idx,
+ int val)
{
+ struct page *page = virt_to_head_page(p);
+
+ mod_node_page_state(page_pgdat(page), idx, val);
}
static inline void count_memcg_events(struct mem_cgroup *memcg,
@@ -917,121 +1572,140 @@ static inline void count_memcg_events(struct mem_cgroup *memcg,
{
}
-static inline void count_memcg_page_event(struct page *page,
- enum memcg_stat_item idx)
+static inline void __count_memcg_events(struct mem_cgroup *memcg,
+ enum vm_event_item idx,
+ unsigned long count)
{
}
-static inline
-void count_memcg_event_mm(struct mm_struct *mm, enum vm_event_item idx)
+static inline void count_memcg_page_event(struct page *page,
+ int idx)
{
}
-#endif /* CONFIG_MEMCG */
-static inline void __inc_memcg_state(struct mem_cgroup *memcg,
- enum memcg_stat_item idx)
+static inline void count_memcg_folio_events(struct folio *folio,
+ enum vm_event_item idx, unsigned long nr)
{
- __mod_memcg_state(memcg, idx, 1);
}
-static inline void __dec_memcg_state(struct mem_cgroup *memcg,
- enum memcg_stat_item idx)
+static inline
+void count_memcg_event_mm(struct mm_struct *mm, enum vm_event_item idx)
{
- __mod_memcg_state(memcg, idx, -1);
}
-static inline void __inc_memcg_page_state(struct page *page,
- enum memcg_stat_item idx)
+static inline void split_page_memcg(struct page *head, unsigned int nr)
{
- __mod_memcg_page_state(page, idx, 1);
}
-static inline void __dec_memcg_page_state(struct page *page,
- enum memcg_stat_item idx)
+static inline
+unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order,
+ gfp_t gfp_mask,
+ unsigned long *total_scanned)
{
- __mod_memcg_page_state(page, idx, -1);
+ return 0;
}
+#endif /* CONFIG_MEMCG */
-static inline void __inc_lruvec_state(struct lruvec *lruvec,
- enum node_stat_item idx)
+static inline void __inc_lruvec_kmem_state(void *p, enum node_stat_item idx)
{
- __mod_lruvec_state(lruvec, idx, 1);
+ __mod_lruvec_kmem_state(p, idx, 1);
}
-static inline void __dec_lruvec_state(struct lruvec *lruvec,
- enum node_stat_item idx)
+static inline void __dec_lruvec_kmem_state(void *p, enum node_stat_item idx)
{
- __mod_lruvec_state(lruvec, idx, -1);
+ __mod_lruvec_kmem_state(p, idx, -1);
}
-static inline void __inc_lruvec_page_state(struct page *page,
- enum node_stat_item idx)
+static inline struct lruvec *parent_lruvec(struct lruvec *lruvec)
{
- __mod_lruvec_page_state(page, idx, 1);
-}
+ struct mem_cgroup *memcg;
-static inline void __dec_lruvec_page_state(struct page *page,
- enum node_stat_item idx)
-{
- __mod_lruvec_page_state(page, idx, -1);
+ memcg = lruvec_memcg(lruvec);
+ if (!memcg)
+ return NULL;
+ memcg = parent_mem_cgroup(memcg);
+ if (!memcg)
+ return NULL;
+ return mem_cgroup_lruvec(memcg, lruvec_pgdat(lruvec));
}
-static inline void inc_memcg_state(struct mem_cgroup *memcg,
- enum memcg_stat_item idx)
+static inline void unlock_page_lruvec(struct lruvec *lruvec)
{
- mod_memcg_state(memcg, idx, 1);
+ spin_unlock(&lruvec->lru_lock);
}
-static inline void dec_memcg_state(struct mem_cgroup *memcg,
- enum memcg_stat_item idx)
+static inline void unlock_page_lruvec_irq(struct lruvec *lruvec)
{
- mod_memcg_state(memcg, idx, -1);
+ spin_unlock_irq(&lruvec->lru_lock);
}
-static inline void inc_memcg_page_state(struct page *page,
- enum memcg_stat_item idx)
+static inline void unlock_page_lruvec_irqrestore(struct lruvec *lruvec,
+ unsigned long flags)
{
- mod_memcg_page_state(page, idx, 1);
+ spin_unlock_irqrestore(&lruvec->lru_lock, flags);
}
-static inline void dec_memcg_page_state(struct page *page,
- enum memcg_stat_item idx)
+/* Test requires a stable page->memcg binding, see page_memcg() */
+static inline bool folio_matches_lruvec(struct folio *folio,
+ struct lruvec *lruvec)
{
- mod_memcg_page_state(page, idx, -1);
+ return lruvec_pgdat(lruvec) == folio_pgdat(folio) &&
+ lruvec_memcg(lruvec) == folio_memcg(folio);
}
-static inline void inc_lruvec_state(struct lruvec *lruvec,
- enum node_stat_item idx)
+/* Don't lock again iff page's lruvec locked */
+static inline struct lruvec *folio_lruvec_relock_irq(struct folio *folio,
+ struct lruvec *locked_lruvec)
{
- mod_lruvec_state(lruvec, idx, 1);
-}
+ if (locked_lruvec) {
+ if (folio_matches_lruvec(folio, locked_lruvec))
+ return locked_lruvec;
-static inline void dec_lruvec_state(struct lruvec *lruvec,
- enum node_stat_item idx)
-{
- mod_lruvec_state(lruvec, idx, -1);
-}
+ unlock_page_lruvec_irq(locked_lruvec);
+ }
-static inline void inc_lruvec_page_state(struct page *page,
- enum node_stat_item idx)
-{
- mod_lruvec_page_state(page, idx, 1);
+ return folio_lruvec_lock_irq(folio);
}
-static inline void dec_lruvec_page_state(struct page *page,
- enum node_stat_item idx)
+/* Don't lock again iff page's lruvec locked */
+static inline struct lruvec *folio_lruvec_relock_irqsave(struct folio *folio,
+ struct lruvec *locked_lruvec, unsigned long *flags)
{
- mod_lruvec_page_state(page, idx, -1);
+ if (locked_lruvec) {
+ if (folio_matches_lruvec(folio, locked_lruvec))
+ return locked_lruvec;
+
+ unlock_page_lruvec_irqrestore(locked_lruvec, *flags);
+ }
+
+ return folio_lruvec_lock_irqsave(folio, flags);
}
#ifdef CONFIG_CGROUP_WRITEBACK
-struct list_head *mem_cgroup_cgwb_list(struct mem_cgroup *memcg);
struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb);
void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pfilepages,
unsigned long *pheadroom, unsigned long *pdirty,
unsigned long *pwriteback);
+void mem_cgroup_track_foreign_dirty_slowpath(struct folio *folio,
+ struct bdi_writeback *wb);
+
+static inline void mem_cgroup_track_foreign_dirty(struct folio *folio,
+ struct bdi_writeback *wb)
+{
+ struct mem_cgroup *memcg;
+
+ if (mem_cgroup_disabled())
+ return;
+
+ memcg = folio_memcg(folio);
+ if (unlikely(memcg && &memcg->css != wb->memcg_css))
+ mem_cgroup_track_foreign_dirty_slowpath(folio, wb);
+}
+
+void mem_cgroup_flush_foreign(struct bdi_writeback *wb);
+
#else /* CONFIG_CGROUP_WRITEBACK */
static inline struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb)
@@ -1047,10 +1721,20 @@ static inline void mem_cgroup_wb_stats(struct bdi_writeback *wb,
{
}
+static inline void mem_cgroup_track_foreign_dirty(struct folio *folio,
+ struct bdi_writeback *wb)
+{
+}
+
+static inline void mem_cgroup_flush_foreign(struct bdi_writeback *wb)
+{
+}
+
#endif /* CONFIG_CGROUP_WRITEBACK */
struct sock;
-bool mem_cgroup_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages);
+bool mem_cgroup_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages,
+ gfp_t gfp_mask);
void mem_cgroup_uncharge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages);
#ifdef CONFIG_MEMCG
extern struct static_key_false memcg_sockets_enabled_key;
@@ -1062,11 +1746,16 @@ static inline bool mem_cgroup_under_socket_pressure(struct mem_cgroup *memcg)
if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && memcg->tcpmem_pressure)
return true;
do {
- if (time_before(jiffies, memcg->socket_pressure))
+ if (time_before(jiffies, READ_ONCE(memcg->socket_pressure)))
return true;
} while ((memcg = parent_mem_cgroup(memcg)));
return false;
}
+
+int alloc_shrinker_info(struct mem_cgroup *memcg);
+void free_shrinker_info(struct mem_cgroup *memcg);
+void set_shrinker_bit(struct mem_cgroup *memcg, int nid, int shrinker_id);
+void reparent_shrinker_deferred(struct mem_cgroup *memcg);
#else
#define mem_cgroup_sockets_enabled 0
static inline void mem_cgroup_sk_alloc(struct sock *sk) { };
@@ -1075,68 +1764,157 @@ static inline bool mem_cgroup_under_socket_pressure(struct mem_cgroup *memcg)
{
return false;
}
+
+static inline void set_shrinker_bit(struct mem_cgroup *memcg,
+ int nid, int shrinker_id)
+{
+}
#endif
-struct kmem_cache *memcg_kmem_get_cache(struct kmem_cache *cachep);
-void memcg_kmem_put_cache(struct kmem_cache *cachep);
-int memcg_kmem_charge_memcg(struct page *page, gfp_t gfp, int order,
- struct mem_cgroup *memcg);
-int memcg_kmem_charge(struct page *page, gfp_t gfp, int order);
-void memcg_kmem_uncharge(struct page *page, int order);
+#ifdef CONFIG_MEMCG_KMEM
+bool mem_cgroup_kmem_disabled(void);
+int __memcg_kmem_charge_page(struct page *page, gfp_t gfp, int order);
+void __memcg_kmem_uncharge_page(struct page *page, int order);
-#if defined(CONFIG_MEMCG) && !defined(CONFIG_SLOB)
-extern struct static_key_false memcg_kmem_enabled_key;
-extern struct workqueue_struct *memcg_kmem_cache_wq;
+struct obj_cgroup *get_obj_cgroup_from_current(void);
+struct obj_cgroup *get_obj_cgroup_from_page(struct page *page);
-extern int memcg_nr_cache_ids;
-void memcg_get_cache_ids(void);
-void memcg_put_cache_ids(void);
+int obj_cgroup_charge(struct obj_cgroup *objcg, gfp_t gfp, size_t size);
+void obj_cgroup_uncharge(struct obj_cgroup *objcg, size_t size);
-/*
- * Helper macro to loop through all memcg-specific caches. Callers must still
- * check if the cache is valid (it is either valid or NULL).
- * the slab_mutex must be held when looping through those caches
- */
-#define for_each_memcg_cache_index(_idx) \
- for ((_idx) = 0; (_idx) < memcg_nr_cache_ids; (_idx)++)
+extern struct static_key_false memcg_bpf_enabled_key;
+static inline bool memcg_bpf_enabled(void)
+{
+ return static_branch_likely(&memcg_bpf_enabled_key);
+}
+
+extern struct static_key_false memcg_kmem_online_key;
-static inline bool memcg_kmem_enabled(void)
+static inline bool memcg_kmem_online(void)
{
- return static_branch_unlikely(&memcg_kmem_enabled_key);
+ return static_branch_likely(&memcg_kmem_online_key);
+}
+
+static inline int memcg_kmem_charge_page(struct page *page, gfp_t gfp,
+ int order)
+{
+ if (memcg_kmem_online())
+ return __memcg_kmem_charge_page(page, gfp, order);
+ return 0;
+}
+
+static inline void memcg_kmem_uncharge_page(struct page *page, int order)
+{
+ if (memcg_kmem_online())
+ __memcg_kmem_uncharge_page(page, order);
}
/*
- * helper for accessing a memcg's index. It will be used as an index in the
- * child cache array in kmem_cache, and also to derive its name. This function
- * will return -1 when this is not a kmem-limited memcg.
+ * A helper for accessing memcg's kmem_id, used for getting
+ * corresponding LRU lists.
*/
-static inline int memcg_cache_id(struct mem_cgroup *memcg)
+static inline int memcg_kmem_id(struct mem_cgroup *memcg)
{
return memcg ? memcg->kmemcg_id : -1;
}
+struct mem_cgroup *mem_cgroup_from_obj(void *p);
+struct mem_cgroup *mem_cgroup_from_slab_obj(void *p);
+
+static inline void count_objcg_event(struct obj_cgroup *objcg,
+ enum vm_event_item idx)
+{
+ struct mem_cgroup *memcg;
+
+ if (!memcg_kmem_online())
+ return;
+
+ rcu_read_lock();
+ memcg = obj_cgroup_memcg(objcg);
+ count_memcg_events(memcg, idx, 1);
+ rcu_read_unlock();
+}
+
#else
-#define for_each_memcg_cache_index(_idx) \
- for (; NULL; )
+static inline bool mem_cgroup_kmem_disabled(void)
+{
+ return true;
+}
-static inline bool memcg_kmem_enabled(void)
+static inline int memcg_kmem_charge_page(struct page *page, gfp_t gfp,
+ int order)
+{
+ return 0;
+}
+
+static inline void memcg_kmem_uncharge_page(struct page *page, int order)
+{
+}
+
+static inline int __memcg_kmem_charge_page(struct page *page, gfp_t gfp,
+ int order)
+{
+ return 0;
+}
+
+static inline void __memcg_kmem_uncharge_page(struct page *page, int order)
+{
+}
+
+static inline struct obj_cgroup *get_obj_cgroup_from_page(struct page *page)
+{
+ return NULL;
+}
+
+static inline bool memcg_bpf_enabled(void)
{
return false;
}
-static inline int memcg_cache_id(struct mem_cgroup *memcg)
+static inline bool memcg_kmem_online(void)
+{
+ return false;
+}
+
+static inline int memcg_kmem_id(struct mem_cgroup *memcg)
{
return -1;
}
-static inline void memcg_get_cache_ids(void)
+static inline struct mem_cgroup *mem_cgroup_from_obj(void *p)
{
+ return NULL;
}
-static inline void memcg_put_cache_ids(void)
+static inline struct mem_cgroup *mem_cgroup_from_slab_obj(void *p)
{
+ return NULL;
}
-#endif /* CONFIG_MEMCG && !CONFIG_SLOB */
+static inline void count_objcg_event(struct obj_cgroup *objcg,
+ enum vm_event_item idx)
+{
+}
+
+#endif /* CONFIG_MEMCG_KMEM */
+
+#if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_ZSWAP)
+bool obj_cgroup_may_zswap(struct obj_cgroup *objcg);
+void obj_cgroup_charge_zswap(struct obj_cgroup *objcg, size_t size);
+void obj_cgroup_uncharge_zswap(struct obj_cgroup *objcg, size_t size);
+#else
+static inline bool obj_cgroup_may_zswap(struct obj_cgroup *objcg)
+{
+ return true;
+}
+static inline void obj_cgroup_charge_zswap(struct obj_cgroup *objcg,
+ size_t size)
+{
+}
+static inline void obj_cgroup_uncharge_zswap(struct obj_cgroup *objcg,
+ size_t size)
+{
+}
+#endif
#endif /* _LINUX_MEMCONTROL_H */
diff --git a/include/linux/memfd.h b/include/linux/memfd.h
new file mode 100644
index 000000000000..e7abf6fa4c52
--- /dev/null
+++ b/include/linux/memfd.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __LINUX_MEMFD_H
+#define __LINUX_MEMFD_H
+
+#include <linux/file.h>
+
+#ifdef CONFIG_MEMFD_CREATE
+extern long memfd_fcntl(struct file *file, unsigned int cmd, unsigned int arg);
+#else
+static inline long memfd_fcntl(struct file *f, unsigned int c, unsigned int a)
+{
+ return -EINVAL;
+}
+#endif
+
+#endif /* __LINUX_MEMFD_H */
diff --git a/include/linux/memory-tiers.h b/include/linux/memory-tiers.h
new file mode 100644
index 000000000000..fc9647b1b4f9
--- /dev/null
+++ b/include/linux/memory-tiers.h
@@ -0,0 +1,101 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_MEMORY_TIERS_H
+#define _LINUX_MEMORY_TIERS_H
+
+#include <linux/types.h>
+#include <linux/nodemask.h>
+#include <linux/kref.h>
+#include <linux/mmzone.h>
+/*
+ * Each tier cover a abstrace distance chunk size of 128
+ */
+#define MEMTIER_CHUNK_BITS 7
+#define MEMTIER_CHUNK_SIZE (1 << MEMTIER_CHUNK_BITS)
+/*
+ * Smaller abstract distance values imply faster (higher) memory tiers. Offset
+ * the DRAM adistance so that we can accommodate devices with a slightly lower
+ * adistance value (slightly faster) than default DRAM adistance to be part of
+ * the same memory tier.
+ */
+#define MEMTIER_ADISTANCE_DRAM ((4 * MEMTIER_CHUNK_SIZE) + (MEMTIER_CHUNK_SIZE >> 1))
+
+struct memory_tier;
+struct memory_dev_type {
+ /* list of memory types that are part of same tier as this type */
+ struct list_head tier_sibiling;
+ /* abstract distance for this specific memory type */
+ int adistance;
+ /* Nodes of same abstract distance */
+ nodemask_t nodes;
+ struct kref kref;
+};
+
+#ifdef CONFIG_NUMA
+extern bool numa_demotion_enabled;
+struct memory_dev_type *alloc_memory_type(int adistance);
+void destroy_memory_type(struct memory_dev_type *memtype);
+void init_node_memory_type(int node, struct memory_dev_type *default_type);
+void clear_node_memory_type(int node, struct memory_dev_type *memtype);
+#ifdef CONFIG_MIGRATION
+int next_demotion_node(int node);
+void node_get_allowed_targets(pg_data_t *pgdat, nodemask_t *targets);
+bool node_is_toptier(int node);
+#else
+static inline int next_demotion_node(int node)
+{
+ return NUMA_NO_NODE;
+}
+
+static inline void node_get_allowed_targets(pg_data_t *pgdat, nodemask_t *targets)
+{
+ *targets = NODE_MASK_NONE;
+}
+
+static inline bool node_is_toptier(int node)
+{
+ return true;
+}
+#endif
+
+#else
+
+#define numa_demotion_enabled false
+/*
+ * CONFIG_NUMA implementation returns non NULL error.
+ */
+static inline struct memory_dev_type *alloc_memory_type(int adistance)
+{
+ return NULL;
+}
+
+static inline void destroy_memory_type(struct memory_dev_type *memtype)
+{
+
+}
+
+static inline void init_node_memory_type(int node, struct memory_dev_type *default_type)
+{
+
+}
+
+static inline void clear_node_memory_type(int node, struct memory_dev_type *memtype)
+{
+
+}
+
+static inline int next_demotion_node(int node)
+{
+ return NUMA_NO_NODE;
+}
+
+static inline void node_get_allowed_targets(pg_data_t *pgdat, nodemask_t *targets)
+{
+ *targets = NODE_MASK_NONE;
+}
+
+static inline bool node_is_toptier(int node)
+{
+ return true;
+}
+#endif /* CONFIG_NUMA */
+#endif /* _LINUX_MEMORY_TIERS_H */
diff --git a/include/linux/memory.h b/include/linux/memory.h
index b723a686fc10..31343566c221 100644
--- a/include/linux/memory.h
+++ b/include/linux/memory.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* include/linux/memory.h - generic memory definition
*
@@ -18,24 +19,79 @@
#include <linux/node.h>
#include <linux/compiler.h>
#include <linux/mutex.h>
-#include <linux/notifier.h>
#define MIN_MEMORY_BLOCK_SIZE (1UL << SECTION_SIZE_BITS)
+/**
+ * struct memory_group - a logical group of memory blocks
+ * @nid: The node id for all memory blocks inside the memory group.
+ * @blocks: List of all memory blocks belonging to this memory group.
+ * @present_kernel_pages: Present (online) memory outside ZONE_MOVABLE of this
+ * memory group.
+ * @present_movable_pages: Present (online) memory in ZONE_MOVABLE of this
+ * memory group.
+ * @is_dynamic: The memory group type: static vs. dynamic
+ * @s.max_pages: Valid with &memory_group.is_dynamic == false. The maximum
+ * number of pages we'll have in this static memory group.
+ * @d.unit_pages: Valid with &memory_group.is_dynamic == true. Unit in pages
+ * in which memory is added/removed in this dynamic memory group.
+ * This granularity defines the alignment of a unit in physical
+ * address space; it has to be at least as big as a single
+ * memory block.
+ *
+ * A memory group logically groups memory blocks; each memory block
+ * belongs to at most one memory group. A memory group corresponds to
+ * a memory device, such as a DIMM or a NUMA node, which spans multiple
+ * memory blocks and might even span multiple non-contiguous physical memory
+ * ranges.
+ *
+ * Modification of members after registration is serialized by memory
+ * hot(un)plug code.
+ */
+struct memory_group {
+ int nid;
+ struct list_head memory_blocks;
+ unsigned long present_kernel_pages;
+ unsigned long present_movable_pages;
+ bool is_dynamic;
+ union {
+ struct {
+ unsigned long max_pages;
+ } s;
+ struct {
+ unsigned long unit_pages;
+ } d;
+ };
+};
+
struct memory_block {
unsigned long start_section_nr;
- unsigned long end_section_nr;
unsigned long state; /* serialized by the dev->lock */
- int section_count; /* serialized by mem_sysfs_mutex */
int online_type; /* for passing data to online routine */
- int phys_device; /* to which fru does this belong? */
- void *hw; /* optional pointer to fw/hw data */
- int (*phys_callback)(struct memory_block *);
+ int nid; /* NID for this memory block */
+ /*
+ * The single zone of this memory block if all PFNs of this memory block
+ * that are System RAM (not a memory hole, not ZONE_DEVICE ranges) are
+ * managed by a single zone. NULL if multiple zones (including nodes)
+ * apply.
+ */
+ struct zone *zone;
struct device dev;
+ /*
+ * Number of vmemmap pages. These pages
+ * lay at the beginning of the memory block.
+ */
+ unsigned long nr_vmemmap_pages;
+ struct memory_group *group; /* group (if any) for this block */
+ struct list_head group_next; /* next block inside memory group */
+#if defined(CONFIG_MEMORY_FAILURE) && defined(CONFIG_MEMORY_HOTPLUG)
+ atomic_long_t nr_hwpoison;
+#endif
};
int arch_get_memory_phys_device(unsigned long start_pfn);
unsigned long memory_block_size_bytes(void);
+int set_memory_block_size_order(unsigned int order);
/* These states are exposed to userspace as text strings in sysfs */
#define MEM_ONLINE (1<<0) /* exposed to userspace */
@@ -49,23 +105,9 @@ struct memory_notify {
unsigned long start_pfn;
unsigned long nr_pages;
int status_change_nid_normal;
- int status_change_nid_high;
int status_change_nid;
};
-/*
- * During pageblock isolation, count the number of pages within the
- * range [start_pfn, start_pfn + nr_pages) which are owned by code
- * in the notifier chain.
- */
-#define MEM_ISOLATE_COUNT (1<<0)
-
-struct memory_isolate_notify {
- unsigned long start_pfn; /* Start of range to check */
- unsigned int nr_pages; /* # pages in range to check */
- unsigned int pages_found; /* # pages owned found by callbacks */
-};
-
struct notifier_block;
struct mem_section;
@@ -73,13 +115,18 @@ struct mem_section;
* Priorities for the hotplug memory callback routines (stored in decreasing
* order in the callback chain)
*/
-#define SLAB_CALLBACK_PRI 1
-#define IPC_CALLBACK_PRI 10
+#define DEFAULT_CALLBACK_PRI 0
+#define SLAB_CALLBACK_PRI 1
+#define HMAT_CALLBACK_PRI 2
+#define MM_COMPUTE_BATCH_PRI 10
+#define CPUSET_CALLBACK_PRI 10
+#define MEMTIER_HOTPLUG_PRI 100
+#define KSM_CALLBACK_PRI 100
-#ifndef CONFIG_MEMORY_HOTPLUG_SPARSE
-static inline int memory_dev_init(void)
+#ifndef CONFIG_MEMORY_HOTPLUG
+static inline void memory_dev_init(void)
{
- return 0;
+ return;
}
static inline int register_memory_notifier(struct notifier_block *nb)
{
@@ -92,49 +139,43 @@ static inline int memory_notify(unsigned long val, void *v)
{
return 0;
}
-static inline int register_memory_isolate_notifier(struct notifier_block *nb)
-{
- return 0;
-}
-static inline void unregister_memory_isolate_notifier(struct notifier_block *nb)
-{
-}
-static inline int memory_isolate_notify(unsigned long val, void *v)
+static inline int hotplug_memory_notifier(notifier_fn_t fn, int pri)
{
return 0;
}
-#else
+#else /* CONFIG_MEMORY_HOTPLUG */
extern int register_memory_notifier(struct notifier_block *nb);
extern void unregister_memory_notifier(struct notifier_block *nb);
-extern int register_memory_isolate_notifier(struct notifier_block *nb);
-extern void unregister_memory_isolate_notifier(struct notifier_block *nb);
-extern int register_new_memory(int, struct mem_section *);
-#ifdef CONFIG_MEMORY_HOTREMOVE
-extern int unregister_memory_section(struct mem_section *);
-#endif
-extern int memory_dev_init(void);
+int create_memory_block_devices(unsigned long start, unsigned long size,
+ unsigned long vmemmap_pages,
+ struct memory_group *group);
+void remove_memory_block_devices(unsigned long start, unsigned long size);
+extern void memory_dev_init(void);
extern int memory_notify(unsigned long val, void *v);
-extern int memory_isolate_notify(unsigned long val, void *v);
-extern struct memory_block *find_memory_block_hinted(struct mem_section *,
- struct memory_block *);
-extern struct memory_block *find_memory_block(struct mem_section *);
-#define CONFIG_MEM_BLOCK_SIZE (PAGES_PER_SECTION<<PAGE_SHIFT)
-#endif /* CONFIG_MEMORY_HOTPLUG_SPARSE */
+extern struct memory_block *find_memory_block(unsigned long section_nr);
+typedef int (*walk_memory_blocks_func_t)(struct memory_block *, void *);
+extern int walk_memory_blocks(unsigned long start, unsigned long size,
+ void *arg, walk_memory_blocks_func_t func);
+extern int for_each_memory_block(void *arg, walk_memory_blocks_func_t func);
-#ifdef CONFIG_MEMORY_HOTPLUG
+extern int memory_group_register_static(int nid, unsigned long max_pages);
+extern int memory_group_register_dynamic(int nid, unsigned long unit_pages);
+extern int memory_group_unregister(int mgid);
+struct memory_group *memory_group_find_by_id(int mgid);
+typedef int (*walk_memory_groups_func_t)(struct memory_group *, void *);
+int walk_dynamic_memory_groups(int nid, walk_memory_groups_func_t func,
+ struct memory_group *excluded, void *arg);
#define hotplug_memory_notifier(fn, pri) ({ \
static __meminitdata struct notifier_block fn##_mem_nb =\
{ .notifier_call = fn, .priority = pri };\
register_memory_notifier(&fn##_mem_nb); \
})
-#define register_hotmemory_notifier(nb) register_memory_notifier(nb)
-#define unregister_hotmemory_notifier(nb) unregister_memory_notifier(nb)
-#else
-#define hotplug_memory_notifier(fn, pri) ({ 0; })
-/* These aren't inline functions due to a GCC bug. */
-#define register_hotmemory_notifier(nb) ({ (void)(nb); 0; })
-#define unregister_hotmemory_notifier(nb) ({ (void)(nb); })
-#endif
+
+#ifdef CONFIG_NUMA
+void memory_block_add_nid(struct memory_block *mem, int nid,
+ enum meminit_context context);
+#endif /* CONFIG_NUMA */
+#endif /* CONFIG_MEMORY_HOTPLUG */
/*
* Kernel text modification mutex, used for code patching. Users of this lock
diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h
index c8a5056a5ae0..9fcbf5706595 100644
--- a/include/linux/memory_hotplug.h
+++ b/include/linux/memory_hotplug.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __LINUX_MEMORY_HOTPLUG_H
#define __LINUX_MEMORY_HOTPLUG_H
@@ -10,63 +11,115 @@ struct page;
struct zone;
struct pglist_data;
struct mem_section;
-struct memory_block;
+struct memory_group;
struct resource;
+struct vmem_altmap;
+struct dev_pagemap;
-#ifdef CONFIG_MEMORY_HOTPLUG
+#ifdef CONFIG_HAVE_ARCH_NODEDATA_EXTENSION
/*
- * Return page for the valid pfn only if the page is online. All pfn
- * walkers which rely on the fully initialized page->flags and others
- * should use this rather than pfn_valid && pfn_to_page
+ * For supporting node-hotadd, we have to allocate a new pgdat.
+ *
+ * If an arch has generic style NODE_DATA(),
+ * node_data[nid] = kzalloc() works well. But it depends on the architecture.
+ *
+ * In general, generic_alloc_nodedata() is used.
+ *
*/
-#define pfn_to_online_page(pfn) \
-({ \
- struct page *___page = NULL; \
- unsigned long ___nr = pfn_to_section_nr(pfn); \
- \
- if (___nr < NR_MEM_SECTIONS && online_section_nr(___nr))\
- ___page = pfn_to_page(pfn); \
- ___page; \
-})
+extern pg_data_t *arch_alloc_nodedata(int nid);
+extern void arch_refresh_nodedata(int nid, pg_data_t *pgdat);
+#else /* CONFIG_HAVE_ARCH_NODEDATA_EXTENSION */
+
+#define arch_alloc_nodedata(nid) generic_alloc_nodedata(nid)
+
+#ifdef CONFIG_NUMA
/*
- * Types for free bootmem stored in page->lru.next. These have to be in
- * some random range in unsigned long space for debugging purposes.
+ * XXX: node aware allocation can't work well to get new node's memory at this time.
+ * Because, pgdat for the new node is not allocated/initialized yet itself.
+ * To use new node's memory, more consideration will be necessary.
*/
-enum {
- MEMORY_HOTPLUG_MIN_BOOTMEM_TYPE = 12,
- SECTION_INFO = MEMORY_HOTPLUG_MIN_BOOTMEM_TYPE,
- MIX_SECTION_INFO,
- NODE_INFO,
- MEMORY_HOTPLUG_MAX_BOOTMEM_TYPE = NODE_INFO,
-};
+#define generic_alloc_nodedata(nid) \
+({ \
+ memblock_alloc(sizeof(*pgdat), SMP_CACHE_BYTES); \
+})
+
+extern pg_data_t *node_data[];
+static inline void arch_refresh_nodedata(int nid, pg_data_t *pgdat)
+{
+ node_data[nid] = pgdat;
+}
+
+#else /* !CONFIG_NUMA */
+
+/* never called */
+static inline pg_data_t *generic_alloc_nodedata(int nid)
+{
+ BUG();
+ return NULL;
+}
+static inline void arch_refresh_nodedata(int nid, pg_data_t *pgdat)
+{
+}
+#endif /* CONFIG_NUMA */
+#endif /* CONFIG_HAVE_ARCH_NODEDATA_EXTENSION */
+
+#ifdef CONFIG_MEMORY_HOTPLUG
+struct page *pfn_to_online_page(unsigned long pfn);
/* Types for control the zone type of onlined and offlined memory */
enum {
- MMOP_OFFLINE = -1,
- MMOP_ONLINE_KEEP,
+ /* Offline the memory. */
+ MMOP_OFFLINE = 0,
+ /* Online the memory. Zone depends, see default_zone_for_pfn(). */
+ MMOP_ONLINE,
+ /* Online the memory to ZONE_NORMAL. */
MMOP_ONLINE_KERNEL,
+ /* Online the memory to ZONE_MOVABLE. */
MMOP_ONLINE_MOVABLE,
};
+/* Flags for add_memory() and friends to specify memory hotplug details. */
+typedef int __bitwise mhp_t;
+
+/* No special request */
+#define MHP_NONE ((__force mhp_t)0)
/*
- * pgdat resizing functions
+ * Allow merging of the added System RAM resource with adjacent,
+ * mergeable resources. After a successful call to add_memory_resource()
+ * with this flag set, the resource pointer must no longer be used as it
+ * might be stale, or the resource might have changed.
*/
-static inline
-void pgdat_resize_lock(struct pglist_data *pgdat, unsigned long *flags)
-{
- spin_lock_irqsave(&pgdat->node_size_lock, *flags);
-}
-static inline
-void pgdat_resize_unlock(struct pglist_data *pgdat, unsigned long *flags)
-{
- spin_unlock_irqrestore(&pgdat->node_size_lock, *flags);
-}
-static inline
-void pgdat_resize_init(struct pglist_data *pgdat)
-{
- spin_lock_init(&pgdat->node_size_lock);
-}
+#define MHP_MERGE_RESOURCE ((__force mhp_t)BIT(0))
+
+/*
+ * We want memmap (struct page array) to be self contained.
+ * To do so, we will use the beginning of the hot-added range to build
+ * the page tables for the memmap array that describes the entire range.
+ * Only selected architectures support it with SPARSE_VMEMMAP.
+ */
+#define MHP_MEMMAP_ON_MEMORY ((__force mhp_t)BIT(1))
+/*
+ * The nid field specifies a memory group id (mgid) instead. The memory group
+ * implies the node id (nid).
+ */
+#define MHP_NID_IS_MGID ((__force mhp_t)BIT(2))
+
+/*
+ * Extended parameters for memory hotplug:
+ * altmap: alternative allocator for memmap array (optional)
+ * pgprot: page protection flags to apply to newly created page tables
+ * (required)
+ */
+struct mhp_params {
+ struct vmem_altmap *altmap;
+ pgprot_t pgprot;
+ struct dev_pagemap *pgmap;
+};
+
+bool mhp_range_allowed(u64 start, u64 size, bool need_mapping);
+struct range mhp_get_pluggable_range(bool need_mapping);
+
/*
* Zone resizing functions
*
@@ -94,27 +147,34 @@ static inline void zone_seqlock_init(struct zone *zone)
{
seqlock_init(&zone->span_seqlock);
}
-extern int zone_grow_free_lists(struct zone *zone, unsigned long new_nr_pages);
-extern int zone_grow_waitqueues(struct zone *zone, unsigned long nr_pages);
-extern int add_one_highpage(struct page *page, int pfn, int bad_ppro);
+extern void adjust_present_page_count(struct page *page,
+ struct memory_group *group,
+ long nr_pages);
/* VM interface that may be used by firmware interface */
-extern int online_pages(unsigned long, unsigned long, int);
-extern int test_pages_in_a_zone(unsigned long start_pfn, unsigned long end_pfn,
- unsigned long *valid_start, unsigned long *valid_end);
-extern void __offline_isolated_pages(unsigned long, unsigned long);
+extern int mhp_init_memmap_on_memory(unsigned long pfn, unsigned long nr_pages,
+ struct zone *zone);
+extern void mhp_deinit_memmap_on_memory(unsigned long pfn, unsigned long nr_pages);
+extern int online_pages(unsigned long pfn, unsigned long nr_pages,
+ struct zone *zone, struct memory_group *group);
+extern void __offline_isolated_pages(unsigned long start_pfn,
+ unsigned long end_pfn);
-typedef void (*online_page_callback_t)(struct page *page);
+typedef void (*online_page_callback_t)(struct page *page, unsigned int order);
+extern void generic_online_page(struct page *page, unsigned int order);
extern int set_online_page_callback(online_page_callback_t callback);
extern int restore_online_page_callback(online_page_callback_t callback);
-extern void __online_page_set_limits(struct page *page);
-extern void __online_page_increment_counters(struct page *page);
-extern void __online_page_free(struct page *page);
-
extern int try_online_node(int nid);
-extern bool memhp_auto_online;
+extern int arch_add_memory(int nid, u64 start, u64 size,
+ struct mhp_params *params);
+extern u64 max_mem_size;
+
+extern int mhp_online_type_from_str(const char *str);
+
+/* Default online_type (MMOP_*) when new memory blocks are added. */
+extern int mhp_default_online_type;
/* If movable_node boot option specified */
extern bool movable_node_enabled;
static inline bool movable_node_is_enabled(void)
@@ -122,105 +182,46 @@ static inline bool movable_node_is_enabled(void)
return movable_node_enabled;
}
-#ifdef CONFIG_MEMORY_HOTREMOVE
-extern bool is_pageblock_removable_nolock(struct page *page);
-extern int arch_remove_memory(u64 start, u64 size);
-extern int __remove_pages(struct zone *zone, unsigned long start_pfn,
- unsigned long nr_pages);
-#endif /* CONFIG_MEMORY_HOTREMOVE */
+extern void arch_remove_memory(u64 start, u64 size, struct vmem_altmap *altmap);
+extern void __remove_pages(unsigned long start_pfn, unsigned long nr_pages,
+ struct vmem_altmap *altmap);
/* reasonably generic interface to expand the physical pages */
-extern int __add_pages(int nid, unsigned long start_pfn,
- unsigned long nr_pages, bool want_memblock);
+extern int __add_pages(int nid, unsigned long start_pfn, unsigned long nr_pages,
+ struct mhp_params *params);
-#ifdef CONFIG_NUMA
-extern int memory_add_physaddr_to_nid(u64 start);
-#else
-static inline int memory_add_physaddr_to_nid(u64 start)
+#ifndef CONFIG_ARCH_HAS_ADD_PAGES
+static inline int add_pages(int nid, unsigned long start_pfn,
+ unsigned long nr_pages, struct mhp_params *params)
{
- return 0;
+ return __add_pages(nid, start_pfn, nr_pages, params);
}
-#endif
-
-#ifdef CONFIG_HAVE_ARCH_NODEDATA_EXTENSION
-/*
- * For supporting node-hotadd, we have to allocate a new pgdat.
- *
- * If an arch has generic style NODE_DATA(),
- * node_data[nid] = kzalloc() works well. But it depends on the architecture.
- *
- * In general, generic_alloc_nodedata() is used.
- * Now, arch_free_nodedata() is just defined for error path of node_hot_add.
- *
- */
-extern pg_data_t *arch_alloc_nodedata(int nid);
-extern void arch_free_nodedata(pg_data_t *pgdat);
-extern void arch_refresh_nodedata(int nid, pg_data_t *pgdat);
-
-#else /* CONFIG_HAVE_ARCH_NODEDATA_EXTENSION */
+#else /* ARCH_HAS_ADD_PAGES */
+int add_pages(int nid, unsigned long start_pfn, unsigned long nr_pages,
+ struct mhp_params *params);
+#endif /* ARCH_HAS_ADD_PAGES */
-#define arch_alloc_nodedata(nid) generic_alloc_nodedata(nid)
-#define arch_free_nodedata(pgdat) generic_free_nodedata(pgdat)
+void get_online_mems(void);
+void put_online_mems(void);
-#ifdef CONFIG_NUMA
-/*
- * If ARCH_HAS_NODEDATA_EXTENSION=n, this func is used to allocate pgdat.
- * XXX: kmalloc_node() can't work well to get new node's memory at this time.
- * Because, pgdat for the new node is not allocated/initialized yet itself.
- * To use new node's memory, more consideration will be necessary.
- */
-#define generic_alloc_nodedata(nid) \
-({ \
- kzalloc(sizeof(pg_data_t), GFP_KERNEL); \
-})
-/*
- * This definition is just for error path in node hotadd.
- * For node hotremove, we have to replace this.
- */
-#define generic_free_nodedata(pgdat) kfree(pgdat)
+void mem_hotplug_begin(void);
+void mem_hotplug_done(void);
-extern pg_data_t *node_data[];
-static inline void arch_refresh_nodedata(int nid, pg_data_t *pgdat)
+/* See kswapd_is_running() */
+static inline void pgdat_kswapd_lock(pg_data_t *pgdat)
{
- node_data[nid] = pgdat;
+ mutex_lock(&pgdat->kswapd_lock);
}
-#else /* !CONFIG_NUMA */
-
-/* never called */
-static inline pg_data_t *generic_alloc_nodedata(int nid)
-{
- BUG();
- return NULL;
-}
-static inline void generic_free_nodedata(pg_data_t *pgdat)
-{
-}
-static inline void arch_refresh_nodedata(int nid, pg_data_t *pgdat)
+static inline void pgdat_kswapd_unlock(pg_data_t *pgdat)
{
+ mutex_unlock(&pgdat->kswapd_lock);
}
-#endif /* CONFIG_NUMA */
-#endif /* CONFIG_HAVE_ARCH_NODEDATA_EXTENSION */
-#ifdef CONFIG_HAVE_BOOTMEM_INFO_NODE
-extern void __init register_page_bootmem_info_node(struct pglist_data *pgdat);
-#else
-static inline void register_page_bootmem_info_node(struct pglist_data *pgdat)
+static inline void pgdat_kswapd_lock_init(pg_data_t *pgdat)
{
+ mutex_init(&pgdat->kswapd_lock);
}
-#endif
-extern void put_page_bootmem(struct page *page);
-extern void get_page_bootmem(unsigned long ingo, struct page *page,
- unsigned long type);
-
-void get_online_mems(void);
-void put_online_mems(void);
-
-void mem_hotplug_begin(void);
-void mem_hotplug_done(void);
-
-extern void set_zone_contiguous(struct zone *zone);
-extern void clear_zone_contiguous(struct zone *zone);
#else /* ! CONFIG_MEMORY_HOTPLUG */
#define pfn_to_online_page(pfn) \
@@ -231,13 +232,6 @@ extern void clear_zone_contiguous(struct zone *zone);
___page; \
})
-/*
- * Stub functions for when hotplug is off
- */
-static inline void pgdat_resize_lock(struct pglist_data *p, unsigned long *f) {}
-static inline void pgdat_resize_unlock(struct pglist_data *p, unsigned long *f) {}
-static inline void pgdat_resize_init(struct pglist_data *pgdat) {}
-
static inline unsigned zone_span_seqbegin(struct zone *zone)
{
return 0;
@@ -250,17 +244,6 @@ static inline void zone_span_writelock(struct zone *zone) {}
static inline void zone_span_writeunlock(struct zone *zone) {}
static inline void zone_seqlock_init(struct zone *zone) {}
-static inline int mhp_notimplemented(const char *func)
-{
- printk(KERN_WARNING "%s() called, with CONFIG_MEMORY_HOTPLUG disabled\n", func);
- dump_stack();
- return -ENOSYS;
-}
-
-static inline void register_page_bootmem_info_node(struct pglist_data *pgdat)
-{
-}
-
static inline int try_online_node(int nid)
{
return 0;
@@ -276,49 +259,106 @@ static inline bool movable_node_is_enabled(void)
{
return false;
}
+
+static inline void pgdat_kswapd_lock(pg_data_t *pgdat) {}
+static inline void pgdat_kswapd_unlock(pg_data_t *pgdat) {}
+static inline void pgdat_kswapd_lock_init(pg_data_t *pgdat) {}
#endif /* ! CONFIG_MEMORY_HOTPLUG */
+/*
+ * Keep this declaration outside CONFIG_MEMORY_HOTPLUG as some
+ * platforms might override and use arch_get_mappable_range()
+ * for internal non memory hotplug purposes.
+ */
+struct range arch_get_mappable_range(void);
+
+#if defined(CONFIG_MEMORY_HOTPLUG) || defined(CONFIG_DEFERRED_STRUCT_PAGE_INIT)
+/*
+ * pgdat resizing functions
+ */
+static inline
+void pgdat_resize_lock(struct pglist_data *pgdat, unsigned long *flags)
+{
+ spin_lock_irqsave(&pgdat->node_size_lock, *flags);
+}
+static inline
+void pgdat_resize_unlock(struct pglist_data *pgdat, unsigned long *flags)
+{
+ spin_unlock_irqrestore(&pgdat->node_size_lock, *flags);
+}
+static inline
+void pgdat_resize_init(struct pglist_data *pgdat)
+{
+ spin_lock_init(&pgdat->node_size_lock);
+}
+#else /* !(CONFIG_MEMORY_HOTPLUG || CONFIG_DEFERRED_STRUCT_PAGE_INIT) */
+/*
+ * Stub functions for when hotplug is off
+ */
+static inline void pgdat_resize_lock(struct pglist_data *p, unsigned long *f) {}
+static inline void pgdat_resize_unlock(struct pglist_data *p, unsigned long *f) {}
+static inline void pgdat_resize_init(struct pglist_data *pgdat) {}
+#endif /* !(CONFIG_MEMORY_HOTPLUG || CONFIG_DEFERRED_STRUCT_PAGE_INIT) */
+
#ifdef CONFIG_MEMORY_HOTREMOVE
-extern bool is_mem_section_removable(unsigned long pfn, unsigned long nr_pages);
extern void try_offline_node(int nid);
-extern int offline_pages(unsigned long start_pfn, unsigned long nr_pages);
-extern void remove_memory(int nid, u64 start, u64 size);
+extern int offline_pages(unsigned long start_pfn, unsigned long nr_pages,
+ struct zone *zone, struct memory_group *group);
+extern int remove_memory(u64 start, u64 size);
+extern void __remove_memory(u64 start, u64 size);
+extern int offline_and_remove_memory(u64 start, u64 size);
#else
-static inline bool is_mem_section_removable(unsigned long pfn,
- unsigned long nr_pages)
-{
- return false;
-}
-
static inline void try_offline_node(int nid) {}
-static inline int offline_pages(unsigned long start_pfn, unsigned long nr_pages)
+static inline int offline_pages(unsigned long start_pfn, unsigned long nr_pages,
+ struct zone *zone, struct memory_group *group)
{
return -EINVAL;
}
-static inline void remove_memory(int nid, u64 start, u64 size) {}
+static inline int remove_memory(u64 start, u64 size)
+{
+ return -EBUSY;
+}
+
+static inline void __remove_memory(u64 start, u64 size) {}
#endif /* CONFIG_MEMORY_HOTREMOVE */
-extern int walk_memory_range(unsigned long start_pfn, unsigned long end_pfn,
- void *arg, int (*func)(struct memory_block *, void *));
-extern int add_memory(int nid, u64 start, u64 size);
-extern int add_memory_resource(int nid, struct resource *resource, bool online);
-extern int arch_add_memory(int nid, u64 start, u64 size, bool want_memblock);
+extern void set_zone_contiguous(struct zone *zone);
+extern void clear_zone_contiguous(struct zone *zone);
+
+#ifdef CONFIG_MEMORY_HOTPLUG
+extern void __ref free_area_init_core_hotplug(struct pglist_data *pgdat);
+extern int __add_memory(int nid, u64 start, u64 size, mhp_t mhp_flags);
+extern int add_memory(int nid, u64 start, u64 size, mhp_t mhp_flags);
+extern int add_memory_resource(int nid, struct resource *resource,
+ mhp_t mhp_flags);
+extern int add_memory_driver_managed(int nid, u64 start, u64 size,
+ const char *resource_name,
+ mhp_t mhp_flags);
extern void move_pfn_range_to_zone(struct zone *zone, unsigned long start_pfn,
- unsigned long nr_pages);
-extern int offline_pages(unsigned long start_pfn, unsigned long nr_pages);
-extern bool is_memblock_offlined(struct memory_block *mem);
-extern void remove_memory(int nid, u64 start, u64 size);
-extern int sparse_add_one_section(struct pglist_data *pgdat, unsigned long start_pfn);
-extern void sparse_remove_one_section(struct zone *zone, struct mem_section *ms,
- unsigned long map_offset);
+ unsigned long nr_pages,
+ struct vmem_altmap *altmap, int migratetype);
+extern void remove_pfn_range_from_zone(struct zone *zone,
+ unsigned long start_pfn,
+ unsigned long nr_pages);
+extern int sparse_add_section(int nid, unsigned long pfn,
+ unsigned long nr_pages, struct vmem_altmap *altmap,
+ struct dev_pagemap *pgmap);
+extern void sparse_remove_section(struct mem_section *ms,
+ unsigned long pfn, unsigned long nr_pages,
+ unsigned long map_offset, struct vmem_altmap *altmap);
extern struct page *sparse_decode_mem_map(unsigned long coded_mem_map,
unsigned long pnum);
-extern bool allow_online_pfn_range(int nid, unsigned long pfn, unsigned long nr_pages,
- int online_type);
-extern struct zone *default_zone_for_pfn(int nid, unsigned long pfn,
+extern struct zone *zone_for_pfn_range(int online_type, int nid,
+ struct memory_group *group, unsigned long start_pfn,
unsigned long nr_pages);
+extern int arch_create_linear_mapping(int nid, u64 start, u64 size,
+ struct mhp_params *params);
+void arch_remove_linear_mapping(u64 start, u64 size);
+extern bool mhp_supports_memmap_on_memory(unsigned long size);
+#endif /* CONFIG_MEMORY_HOTPLUG */
+
#endif /* __LINUX_MEMORY_HOTPLUG_H */
diff --git a/include/linux/mempolicy.h b/include/linux/mempolicy.h
index 3a58b4be1b0c..d232de7cdc56 100644
--- a/include/linux/mempolicy.h
+++ b/include/linux/mempolicy.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* NUMA memory policies for Linux.
* Copyright 2003,2004 Andi Kleen SuSE Labs
@@ -5,9 +6,8 @@
#ifndef _LINUX_MEMPOLICY_H
#define _LINUX_MEMPOLICY_H 1
-
+#include <linux/sched.h>
#include <linux/mmzone.h>
-#include <linux/dax.h>
#include <linux/slab.h>
#include <linux/rbtree.h>
#include <linux/spinlock.h>
@@ -27,10 +27,10 @@ struct mm_struct;
* the process policy is used. Interrupts ignore the memory policy
* of the current process.
*
- * Locking policy for interlave:
+ * Locking policy for interleave:
* In process context there is no locking because only the process accesses
* its own state. All vma manipulation is somewhat protected by a down_read on
- * mmap_sem.
+ * mmap_lock.
*
* Freeing policy:
* Mempolicy objects are reference counted. A mempolicy will be freed when
@@ -45,11 +45,9 @@ struct mempolicy {
atomic_t refcnt;
unsigned short mode; /* See MPOL_* above */
unsigned short flags; /* See set_mempolicy() MPOL_F_* above */
- union {
- short preferred_node; /* preferred */
- nodemask_t nodes; /* interleave/bind */
- /* undefined for default */
- } v;
+ nodemask_t nodes; /* interleave/bind/perfer */
+ int home_node; /* Home node to use for MPOL_BIND and MPOL_PREFERRED_MANY */
+
union {
nodemask_t cpuset_mems_allowed; /* relative to these nodes */
nodemask_t user_nodemask; /* nodemask passed by user */
@@ -149,8 +147,10 @@ extern int huge_node(struct vm_area_struct *vma,
unsigned long addr, gfp_t gfp_flags,
struct mempolicy **mpol, nodemask_t **nodemask);
extern bool init_nodemask_of_mempolicy(nodemask_t *mask);
-extern bool mempolicy_nodemask_intersects(struct task_struct *tsk,
+extern bool mempolicy_in_oom_domain(struct task_struct *tsk,
const nodemask_t *mask);
+extern nodemask_t *policy_nodemask(gfp_t gfp, struct mempolicy *policy);
+
extern unsigned int mempolicy_slab_node(void);
extern enum zone_type policy_zone;
@@ -172,38 +172,18 @@ extern int mpol_parse_str(char *str, struct mempolicy **mpol);
extern void mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol);
/* Check if a vma is migratable */
-static inline bool vma_migratable(struct vm_area_struct *vma)
-{
- if (vma->vm_flags & (VM_IO | VM_PFNMAP))
- return false;
-
- /*
- * DAX device mappings require predictable access latency, so avoid
- * incurring periodic faults.
- */
- if (vma_is_dax(vma))
- return false;
-
-#ifndef CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION
- if (vma->vm_flags & VM_HUGETLB)
- return false;
-#endif
-
- /*
- * Migration allocates pages in the highest zone. If we cannot
- * do so then migration (at least from node to node) is not
- * possible.
- */
- if (vma->vm_file &&
- gfp_zone(mapping_gfp_mask(vma->vm_file->f_mapping))
- < policy_zone)
- return false;
- return true;
-}
+extern bool vma_migratable(struct vm_area_struct *vma);
extern int mpol_misplaced(struct page *, struct vm_area_struct *, unsigned long);
extern void mpol_put_task_policy(struct task_struct *);
+static inline bool mpol_is_preferred_many(struct mempolicy *pol)
+{
+ return (pol->mode == MPOL_PREFERRED_MANY);
+}
+
+extern bool apply_policy_zone(struct mempolicy *policy, enum zone_type zone);
+
#else
struct mempolicy {};
@@ -307,5 +287,11 @@ static inline int mpol_misplaced(struct page *page, struct vm_area_struct *vma,
static inline void mpol_put_task_policy(struct task_struct *task)
{
}
+
+static inline bool mpol_is_preferred_many(struct mempolicy *pol)
+{
+ return false;
+}
+
#endif /* CONFIG_NUMA */
#endif
diff --git a/include/linux/mempool.h b/include/linux/mempool.h
index b1086c936507..4aae6c06c5f2 100644
--- a/include/linux/mempool.h
+++ b/include/linux/mempool.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* memory buffer pool support
*/
@@ -24,6 +25,23 @@ typedef struct mempool_s {
wait_queue_head_t wait;
} mempool_t;
+static inline bool mempool_initialized(mempool_t *pool)
+{
+ return pool->elements != NULL;
+}
+
+static inline bool mempool_is_saturated(mempool_t *pool)
+{
+ return READ_ONCE(pool->curr_nr) >= pool->min_nr;
+}
+
+void mempool_exit(mempool_t *pool);
+int mempool_init_node(mempool_t *pool, int min_nr, mempool_alloc_t *alloc_fn,
+ mempool_free_t *free_fn, void *pool_data,
+ gfp_t gfp_mask, int node_id);
+int mempool_init(mempool_t *pool, int min_nr, mempool_alloc_t *alloc_fn,
+ mempool_free_t *free_fn, void *pool_data);
+
extern mempool_t *mempool_create(int min_nr, mempool_alloc_t *alloc_fn,
mempool_free_t *free_fn, void *pool_data);
extern mempool_t *mempool_create_node(int min_nr, mempool_alloc_t *alloc_fn,
@@ -42,6 +60,14 @@ extern void mempool_free(void *element, mempool_t *pool);
*/
void *mempool_alloc_slab(gfp_t gfp_mask, void *pool_data);
void mempool_free_slab(void *element, void *pool_data);
+
+static inline int
+mempool_init_slab_pool(mempool_t *pool, int min_nr, struct kmem_cache *kc)
+{
+ return mempool_init(pool, min_nr, mempool_alloc_slab,
+ mempool_free_slab, (void *) kc);
+}
+
static inline mempool_t *
mempool_create_slab_pool(int min_nr, struct kmem_cache *kc)
{
@@ -55,6 +81,13 @@ mempool_create_slab_pool(int min_nr, struct kmem_cache *kc)
*/
void *mempool_kmalloc(gfp_t gfp_mask, void *pool_data);
void mempool_kfree(void *element, void *pool_data);
+
+static inline int mempool_init_kmalloc_pool(mempool_t *pool, int min_nr, size_t size)
+{
+ return mempool_init(pool, min_nr, mempool_kmalloc,
+ mempool_kfree, (void *) size);
+}
+
static inline mempool_t *mempool_create_kmalloc_pool(int min_nr, size_t size)
{
return mempool_create(min_nr, mempool_kmalloc, mempool_kfree,
@@ -67,6 +100,13 @@ static inline mempool_t *mempool_create_kmalloc_pool(int min_nr, size_t size)
*/
void *mempool_alloc_pages(gfp_t gfp_mask, void *pool_data);
void mempool_free_pages(void *element, void *pool_data);
+
+static inline int mempool_init_page_pool(mempool_t *pool, int min_nr, int order)
+{
+ return mempool_init(pool, min_nr, mempool_alloc_pages,
+ mempool_free_pages, (void *)(long)order);
+}
+
static inline mempool_t *mempool_create_page_pool(int min_nr, int order)
{
return mempool_create(min_nr, mempool_alloc_pages, mempool_free_pages,
diff --git a/include/linux/memregion.h b/include/linux/memregion.h
new file mode 100644
index 000000000000..c01321467789
--- /dev/null
+++ b/include/linux/memregion.h
@@ -0,0 +1,63 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _MEMREGION_H_
+#define _MEMREGION_H_
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/range.h>
+#include <linux/bug.h>
+
+struct memregion_info {
+ int target_node;
+ struct range range;
+};
+
+#ifdef CONFIG_MEMREGION
+int memregion_alloc(gfp_t gfp);
+void memregion_free(int id);
+#else
+static inline int memregion_alloc(gfp_t gfp)
+{
+ return -ENOMEM;
+}
+static inline void memregion_free(int id)
+{
+}
+#endif
+
+/**
+ * cpu_cache_invalidate_memregion - drop any CPU cached data for
+ * memregions described by @res_desc
+ * @res_desc: one of the IORES_DESC_* types
+ *
+ * Perform cache maintenance after a memory event / operation that
+ * changes the contents of physical memory in a cache-incoherent manner.
+ * For example, device memory technologies like NVDIMM and CXL have
+ * device secure erase, and dynamic region provision that can replace
+ * the memory mapped to a given physical address.
+ *
+ * Limit the functionality to architectures that have an efficient way
+ * to writeback and invalidate potentially terabytes of address space at
+ * once. Note that this routine may or may not write back any dirty
+ * contents while performing the invalidation. It is only exported for
+ * the explicit usage of the NVDIMM and CXL modules in the 'DEVMEM'
+ * symbol namespace on bare platforms.
+ *
+ * Returns 0 on success or negative error code on a failure to perform
+ * the cache maintenance.
+ */
+#ifdef CONFIG_ARCH_HAS_CPU_CACHE_INVALIDATE_MEMREGION
+int cpu_cache_invalidate_memregion(int res_desc);
+bool cpu_cache_has_invalidate_memregion(void);
+#else
+static inline bool cpu_cache_has_invalidate_memregion(void)
+{
+ return false;
+}
+
+static inline int cpu_cache_invalidate_memregion(int res_desc)
+{
+ WARN_ON_ONCE("CPU cache invalidation required");
+ return -ENXIO;
+}
+#endif
+#endif /* _MEMREGION_H_ */
diff --git a/include/linux/memremap.h b/include/linux/memremap.h
index 93416196ba64..1314d9c5f05b 100644
--- a/include/linux/memremap.h
+++ b/include/linux/memremap.h
@@ -1,6 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_MEMREMAP_H_
#define _LINUX_MEMREMAP_H_
-#include <linux/mm.h>
+
+#include <linux/mmzone.h>
+#include <linux/range.h>
#include <linux/ioport.h>
#include <linux/percpu-refcount.h>
@@ -16,47 +19,189 @@ struct device;
* @alloc: track pages consumed, private to vmemmap_populate()
*/
struct vmem_altmap {
- const unsigned long base_pfn;
+ unsigned long base_pfn;
+ const unsigned long end_pfn;
const unsigned long reserve;
unsigned long free;
unsigned long align;
unsigned long alloc;
};
-unsigned long vmem_altmap_offset(struct vmem_altmap *altmap);
-void vmem_altmap_free(struct vmem_altmap *altmap, unsigned long nr_pfns);
+/*
+ * Specialize ZONE_DEVICE memory into multiple types each has a different
+ * usage.
+ *
+ * MEMORY_DEVICE_PRIVATE:
+ * Device memory that is not directly addressable by the CPU: CPU can neither
+ * read nor write private memory. In this case, we do still have struct pages
+ * backing the device memory. Doing so simplifies the implementation, but it is
+ * important to remember that there are certain points at which the struct page
+ * must be treated as an opaque object, rather than a "normal" struct page.
+ *
+ * A more complete discussion of unaddressable memory may be found in
+ * include/linux/hmm.h and Documentation/mm/hmm.rst.
+ *
+ * MEMORY_DEVICE_COHERENT:
+ * Device memory that is cache coherent from device and CPU point of view. This
+ * is used on platforms that have an advanced system bus (like CAPI or CXL). A
+ * driver can hotplug the device memory using ZONE_DEVICE and with that memory
+ * type. Any page of a process can be migrated to such memory. However no one
+ * should be allowed to pin such memory so that it can always be evicted.
+ *
+ * MEMORY_DEVICE_FS_DAX:
+ * Host memory that has similar access semantics as System RAM i.e. DMA
+ * coherent and supports page pinning. In support of coordinating page
+ * pinning vs other operations MEMORY_DEVICE_FS_DAX arranges for a
+ * wakeup event whenever a page is unpinned and becomes idle. This
+ * wakeup is used to coordinate physical address space management (ex:
+ * fs truncate/hole punch) vs pinned pages (ex: device dma).
+ *
+ * MEMORY_DEVICE_GENERIC:
+ * Host memory that has similar access semantics as System RAM i.e. DMA
+ * coherent and supports page pinning. This is for example used by DAX devices
+ * that expose memory using a character device.
+ *
+ * MEMORY_DEVICE_PCI_P2PDMA:
+ * Device memory residing in a PCI BAR intended for use with Peer-to-Peer
+ * transactions.
+ */
+enum memory_type {
+ /* 0 is reserved to catch uninitialized type fields */
+ MEMORY_DEVICE_PRIVATE = 1,
+ MEMORY_DEVICE_COHERENT,
+ MEMORY_DEVICE_FS_DAX,
+ MEMORY_DEVICE_GENERIC,
+ MEMORY_DEVICE_PCI_P2PDMA,
+};
-#ifdef CONFIG_ZONE_DEVICE
-struct vmem_altmap *to_vmem_altmap(unsigned long memmap_start);
-#else
-static inline struct vmem_altmap *to_vmem_altmap(unsigned long memmap_start)
-{
- return NULL;
-}
-#endif
+struct dev_pagemap_ops {
+ /*
+ * Called once the page refcount reaches 0. The reference count will be
+ * reset to one by the core code after the method is called to prepare
+ * for handing out the page again.
+ */
+ void (*page_free)(struct page *page);
+
+ /*
+ * Used for private (un-addressable) device memory only. Must migrate
+ * the page back to a CPU accessible page.
+ */
+ vm_fault_t (*migrate_to_ram)(struct vm_fault *vmf);
+
+ /*
+ * Handle the memory failure happens on a range of pfns. Notify the
+ * processes who are using these pfns, and try to recover the data on
+ * them if necessary. The mf_flags is finally passed to the recover
+ * function through the whole notify routine.
+ *
+ * When this is not implemented, or it returns -EOPNOTSUPP, the caller
+ * will fall back to a common handler called mf_generic_kill_procs().
+ */
+ int (*memory_failure)(struct dev_pagemap *pgmap, unsigned long pfn,
+ unsigned long nr_pages, int mf_flags);
+};
+
+#define PGMAP_ALTMAP_VALID (1 << 0)
/**
* struct dev_pagemap - metadata for ZONE_DEVICE mappings
* @altmap: pre-allocated/reserved memory for vmemmap allocations
- * @res: physical address range covered by @ref
* @ref: reference count that pins the devm_memremap_pages() mapping
- * @dev: host device of the mapping for debug
+ * @done: completion for @ref
+ * @type: memory type: see MEMORY_* in memory_hotplug.h
+ * @flags: PGMAP_* flags to specify defailed behavior
+ * @vmemmap_shift: structural definition of how the vmemmap page metadata
+ * is populated, specifically the metadata page order.
+ * A zero value (default) uses base pages as the vmemmap metadata
+ * representation. A bigger value will set up compound struct pages
+ * of the requested order value.
+ * @ops: method table
+ * @owner: an opaque pointer identifying the entity that manages this
+ * instance. Used by various helpers to make sure that no
+ * foreign ZONE_DEVICE memory is accessed.
+ * @nr_range: number of ranges to be mapped
+ * @range: range to be mapped when nr_range == 1
+ * @ranges: array of ranges to be mapped when nr_range > 1
*/
struct dev_pagemap {
- struct vmem_altmap *altmap;
- const struct resource *res;
- struct percpu_ref *ref;
- struct device *dev;
+ struct vmem_altmap altmap;
+ struct percpu_ref ref;
+ struct completion done;
+ enum memory_type type;
+ unsigned int flags;
+ unsigned long vmemmap_shift;
+ const struct dev_pagemap_ops *ops;
+ void *owner;
+ int nr_range;
+ union {
+ struct range range;
+ DECLARE_FLEX_ARRAY(struct range, ranges);
+ };
};
+static inline bool pgmap_has_memory_failure(struct dev_pagemap *pgmap)
+{
+ return pgmap->ops && pgmap->ops->memory_failure;
+}
+
+static inline struct vmem_altmap *pgmap_altmap(struct dev_pagemap *pgmap)
+{
+ if (pgmap->flags & PGMAP_ALTMAP_VALID)
+ return &pgmap->altmap;
+ return NULL;
+}
+
+static inline unsigned long pgmap_vmemmap_nr(struct dev_pagemap *pgmap)
+{
+ return 1 << pgmap->vmemmap_shift;
+}
+
+static inline bool is_device_private_page(const struct page *page)
+{
+ return IS_ENABLED(CONFIG_DEVICE_PRIVATE) &&
+ is_zone_device_page(page) &&
+ page->pgmap->type == MEMORY_DEVICE_PRIVATE;
+}
+
+static inline bool folio_is_device_private(const struct folio *folio)
+{
+ return is_device_private_page(&folio->page);
+}
+
+static inline bool is_pci_p2pdma_page(const struct page *page)
+{
+ return IS_ENABLED(CONFIG_PCI_P2PDMA) &&
+ is_zone_device_page(page) &&
+ page->pgmap->type == MEMORY_DEVICE_PCI_P2PDMA;
+}
+
+static inline bool is_device_coherent_page(const struct page *page)
+{
+ return is_zone_device_page(page) &&
+ page->pgmap->type == MEMORY_DEVICE_COHERENT;
+}
+
+static inline bool folio_is_device_coherent(const struct folio *folio)
+{
+ return is_device_coherent_page(&folio->page);
+}
+
#ifdef CONFIG_ZONE_DEVICE
-void *devm_memremap_pages(struct device *dev, struct resource *res,
- struct percpu_ref *ref, struct vmem_altmap *altmap);
-struct dev_pagemap *find_dev_pagemap(resource_size_t phys);
+void zone_device_page_init(struct page *page);
+void *memremap_pages(struct dev_pagemap *pgmap, int nid);
+void memunmap_pages(struct dev_pagemap *pgmap);
+void *devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap);
+void devm_memunmap_pages(struct device *dev, struct dev_pagemap *pgmap);
+struct dev_pagemap *get_dev_pagemap(unsigned long pfn,
+ struct dev_pagemap *pgmap);
+bool pgmap_pfn_valid(struct dev_pagemap *pgmap, unsigned long pfn);
+
+unsigned long vmem_altmap_offset(struct vmem_altmap *altmap);
+void vmem_altmap_free(struct vmem_altmap *altmap, unsigned long nr_pfns);
+unsigned long memremap_compat_align(void);
#else
static inline void *devm_memremap_pages(struct device *dev,
- struct resource *res, struct percpu_ref *ref,
- struct vmem_altmap *altmap)
+ struct dev_pagemap *pgmap)
{
/*
* Fail attempts to call devm_memremap_pages() without
@@ -67,48 +212,43 @@ static inline void *devm_memremap_pages(struct device *dev,
return ERR_PTR(-ENXIO);
}
-static inline struct dev_pagemap *find_dev_pagemap(resource_size_t phys)
+static inline void devm_memunmap_pages(struct device *dev,
+ struct dev_pagemap *pgmap)
{
- return NULL;
}
-#endif
-/**
- * get_dev_pagemap() - take a new live reference on the dev_pagemap for @pfn
- * @pfn: page frame number to lookup page_map
- * @pgmap: optional known pgmap that already has a reference
- *
- * @pgmap allows the overhead of a lookup to be bypassed when @pfn lands in the
- * same mapping.
- */
static inline struct dev_pagemap *get_dev_pagemap(unsigned long pfn,
struct dev_pagemap *pgmap)
{
- const struct resource *res = pgmap ? pgmap->res : NULL;
- resource_size_t phys = PFN_PHYS(pfn);
+ return NULL;
+}
- /*
- * In the cached case we're already holding a live reference so
- * we can simply do a blind increment
- */
- if (res && phys >= res->start && phys <= res->end) {
- percpu_ref_get(pgmap->ref);
- return pgmap;
- }
+static inline bool pgmap_pfn_valid(struct dev_pagemap *pgmap, unsigned long pfn)
+{
+ return false;
+}
- /* fall back to slow path lookup */
- rcu_read_lock();
- pgmap = find_dev_pagemap(phys);
- if (pgmap && !percpu_ref_tryget_live(pgmap->ref))
- pgmap = NULL;
- rcu_read_unlock();
+static inline unsigned long vmem_altmap_offset(struct vmem_altmap *altmap)
+{
+ return 0;
+}
- return pgmap;
+static inline void vmem_altmap_free(struct vmem_altmap *altmap,
+ unsigned long nr_pfns)
+{
}
+/* when memremap_pages() is disabled all archs can remap a single page */
+static inline unsigned long memremap_compat_align(void)
+{
+ return PAGE_SIZE;
+}
+#endif /* CONFIG_ZONE_DEVICE */
+
static inline void put_dev_pagemap(struct dev_pagemap *pgmap)
{
if (pgmap)
- percpu_ref_put(pgmap->ref);
+ percpu_ref_put(&pgmap->ref);
}
+
#endif /* _LINUX_MEMREMAP_H_ */
diff --git a/include/linux/memstick.h b/include/linux/memstick.h
index 690c35a9d4cc..ebf73d4ee969 100644
--- a/include/linux/memstick.h
+++ b/include/linux/memstick.h
@@ -1,12 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Sony MemoryStick support
*
* Copyright (C) 2007 Alex Dubov <oakad@yahoo.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
*/
#ifndef _MEMSTICK_H
@@ -285,6 +281,7 @@ struct memstick_host {
struct memstick_dev *card;
unsigned int retries;
+ bool removing;
/* Notify the host that some requests are pending. */
void (*request)(struct memstick_host *host);
@@ -292,7 +289,7 @@ struct memstick_host {
int (*set_param)(struct memstick_host *host,
enum memstick_param param,
int value);
- unsigned long private[0] ____cacheline_aligned;
+ unsigned long private[] ____cacheline_aligned;
};
struct memstick_driver {
diff --git a/include/linux/mfd/88pm80x.h b/include/linux/mfd/88pm80x.h
index c118a7ec94d6..def5df6e74bf 100644
--- a/include/linux/mfd/88pm80x.h
+++ b/include/linux/mfd/88pm80x.h
@@ -1,12 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Marvell 88PM80x Interface
*
* Copyright (C) 2012 Marvell International Ltd.
* Qiao Zhou <zhouqiao@marvell.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef __LINUX_MFD_88PM80X_H
diff --git a/include/linux/mfd/88pm860x.h b/include/linux/mfd/88pm860x.h
index cd97530205c2..473545a2c425 100644
--- a/include/linux/mfd/88pm860x.h
+++ b/include/linux/mfd/88pm860x.h
@@ -1,12 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Marvell 88PM860x Interface
*
* Copyright (C) 2009 Marvell International Ltd.
* Haojian Zhuang <haojian.zhuang@marvell.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef __LINUX_MFD_88PM860X_H
diff --git a/include/linux/mfd/aat2870.h b/include/linux/mfd/aat2870.h
index f7316c29bdec..2445842d482d 100644
--- a/include/linux/mfd/aat2870.h
+++ b/include/linux/mfd/aat2870.h
@@ -1,22 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* linux/include/linux/mfd/aat2870.h
*
* Copyright (c) 2011, NVIDIA Corporation.
* Author: Jin Park <jinyoungp@nvidia.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
- * 02110-1301 USA
*/
#ifndef __LINUX_MFD_AAT2870_H
@@ -149,7 +136,6 @@ struct aat2870_data {
/* for debugfs */
struct dentry *dentry_root;
- struct dentry *dentry_reg;
};
struct aat2870_subdev_info {
diff --git a/include/linux/mfd/ab3100.h b/include/linux/mfd/ab3100.h
deleted file mode 100644
index afd3080bde24..000000000000
--- a/include/linux/mfd/ab3100.h
+++ /dev/null
@@ -1,129 +0,0 @@
-/*
- * Copyright (C) 2007-2009 ST-Ericsson AB
- * License terms: GNU General Public License (GPL) version 2
- * AB3100 core access functions
- * Author: Linus Walleij <linus.walleij@stericsson.com>
- *
- */
-
-#include <linux/regulator/machine.h>
-
-struct device;
-
-#ifndef MFD_AB3100_H
-#define MFD_AB3100_H
-
-
-#define AB3100_P1A 0xc0
-#define AB3100_P1B 0xc1
-#define AB3100_P1C 0xc2
-#define AB3100_P1D 0xc3
-#define AB3100_P1E 0xc4
-#define AB3100_P1F 0xc5
-#define AB3100_P1G 0xc6
-#define AB3100_R2A 0xc7
-#define AB3100_R2B 0xc8
-
-/*
- * AB3100, EVENTA1, A2 and A3 event register flags
- * these are catenated into a single 32-bit flag in the code
- * for event notification broadcasts.
- */
-#define AB3100_EVENTA1_ONSWA (0x01<<16)
-#define AB3100_EVENTA1_ONSWB (0x02<<16)
-#define AB3100_EVENTA1_ONSWC (0x04<<16)
-#define AB3100_EVENTA1_DCIO (0x08<<16)
-#define AB3100_EVENTA1_OVER_TEMP (0x10<<16)
-#define AB3100_EVENTA1_SIM_OFF (0x20<<16)
-#define AB3100_EVENTA1_VBUS (0x40<<16)
-#define AB3100_EVENTA1_VSET_USB (0x80<<16)
-
-#define AB3100_EVENTA2_READY_TX (0x01<<8)
-#define AB3100_EVENTA2_READY_RX (0x02<<8)
-#define AB3100_EVENTA2_OVERRUN_ERROR (0x04<<8)
-#define AB3100_EVENTA2_FRAMING_ERROR (0x08<<8)
-#define AB3100_EVENTA2_CHARG_OVERCURRENT (0x10<<8)
-#define AB3100_EVENTA2_MIDR (0x20<<8)
-#define AB3100_EVENTA2_BATTERY_REM (0x40<<8)
-#define AB3100_EVENTA2_ALARM (0x80<<8)
-
-#define AB3100_EVENTA3_ADC_TRIG5 (0x01)
-#define AB3100_EVENTA3_ADC_TRIG4 (0x02)
-#define AB3100_EVENTA3_ADC_TRIG3 (0x04)
-#define AB3100_EVENTA3_ADC_TRIG2 (0x08)
-#define AB3100_EVENTA3_ADC_TRIGVBAT (0x10)
-#define AB3100_EVENTA3_ADC_TRIGVTX (0x20)
-#define AB3100_EVENTA3_ADC_TRIG1 (0x40)
-#define AB3100_EVENTA3_ADC_TRIG0 (0x80)
-
-/* AB3100, STR register flags */
-#define AB3100_STR_ONSWA (0x01)
-#define AB3100_STR_ONSWB (0x02)
-#define AB3100_STR_ONSWC (0x04)
-#define AB3100_STR_DCIO (0x08)
-#define AB3100_STR_BOOT_MODE (0x10)
-#define AB3100_STR_SIM_OFF (0x20)
-#define AB3100_STR_BATT_REMOVAL (0x40)
-#define AB3100_STR_VBUS (0x80)
-
-/*
- * AB3100 contains 8 regulators, one external regulator controller
- * and a buck converter, further the LDO E and buck converter can
- * have separate settings if they are in sleep mode, this is
- * modeled as a separate regulator.
- */
-#define AB3100_NUM_REGULATORS 10
-
-/**
- * struct ab3100
- * @access_mutex: lock out concurrent accesses to the AB3100 registers
- * @dev: pointer to the containing device
- * @i2c_client: I2C client for this chip
- * @testreg_client: secondary client for test registers
- * @chip_name: name of this chip variant
- * @chip_id: 8 bit chip ID for this chip variant
- * @event_subscribers: event subscribers are listed here
- * @startup_events: a copy of the first reading of the event registers
- * @startup_events_read: whether the first events have been read
- *
- * This struct is PRIVATE and devices using it should NOT
- * access ANY fields. It is used as a token for calling the
- * AB3100 functions.
- */
-struct ab3100 {
- struct mutex access_mutex;
- struct device *dev;
- struct i2c_client *i2c_client;
- struct i2c_client *testreg_client;
- char chip_name[32];
- u8 chip_id;
- struct blocking_notifier_head event_subscribers;
- u8 startup_events[3];
- bool startup_events_read;
-};
-
-/**
- * struct ab3100_platform_data
- * Data supplied to initialize board connections to the AB3100
- * @reg_constraints: regulator constraints for target board
- * the order of these constraints are: LDO A, C, D, E,
- * F, G, H, K, EXT and BUCK.
- * @reg_initvals: initial values for the regulator registers
- * plus two sleep settings for LDO E and the BUCK converter.
- * exactly AB3100_NUM_REGULATORS+2 values must be sent in.
- * Order: LDO A, C, E, E sleep, F, G, H, K, EXT, BUCK,
- * BUCK sleep, LDO D. (LDO D need to be initialized last.)
- * @external_voltage: voltage level of the external regulator.
- */
-struct ab3100_platform_data {
- struct regulator_init_data reg_constraints[AB3100_NUM_REGULATORS];
- u8 reg_initvals[AB3100_NUM_REGULATORS+2];
- int external_voltage;
-};
-
-int ab3100_event_register(struct ab3100 *ab3100,
- struct notifier_block *nb);
-int ab3100_event_unregister(struct ab3100 *ab3100,
- struct notifier_block *nb);
-
-#endif /* MFD_AB3100_H */
diff --git a/include/linux/mfd/abx500.h b/include/linux/mfd/abx500.h
index 44412c9d26e1..7f07cfe44753 100644
--- a/include/linux/mfd/abx500.h
+++ b/include/linux/mfd/abx500.h
@@ -1,6 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2007-2009 ST-Ericsson AB
- * License terms: GNU General Public License (GPL) version 2
*
* ABX500 core access functions.
* The abx500 interface is used for the Analog Baseband chips.
@@ -28,283 +28,6 @@ struct abx500_init_settings {
u8 setting;
};
-/* Battery driver related data */
-/*
- * ADC for the battery thermistor.
- * When using the ABx500_ADC_THERM_BATCTRL the battery ID resistor is combined
- * with a NTC resistor to both identify the battery and to measure its
- * temperature. Different phone manufactures uses different techniques to both
- * identify the battery and to read its temperature.
- */
-enum abx500_adc_therm {
- ABx500_ADC_THERM_BATCTRL,
- ABx500_ADC_THERM_BATTEMP,
-};
-
-/**
- * struct abx500_res_to_temp - defines one point in a temp to res curve. To
- * be used in battery packs that combines the identification resistor with a
- * NTC resistor.
- * @temp: battery pack temperature in Celsius
- * @resist: NTC resistor net total resistance
- */
-struct abx500_res_to_temp {
- int temp;
- int resist;
-};
-
-/**
- * struct abx500_v_to_cap - Table for translating voltage to capacity
- * @voltage: Voltage in mV
- * @capacity: Capacity in percent
- */
-struct abx500_v_to_cap {
- int voltage;
- int capacity;
-};
-
-/* Forward declaration */
-struct abx500_fg;
-
-/**
- * struct abx500_fg_parameters - Fuel gauge algorithm parameters, in seconds
- * if not specified
- * @recovery_sleep_timer: Time between measurements while recovering
- * @recovery_total_time: Total recovery time
- * @init_timer: Measurement interval during startup
- * @init_discard_time: Time we discard voltage measurement at startup
- * @init_total_time: Total init time during startup
- * @high_curr_time: Time current has to be high to go to recovery
- * @accu_charging: FG accumulation time while charging
- * @accu_high_curr: FG accumulation time in high current mode
- * @high_curr_threshold: High current threshold, in mA
- * @lowbat_threshold: Low battery threshold, in mV
- * @overbat_threshold: Over battery threshold, in mV
- * @battok_falling_th_sel0 Threshold in mV for battOk signal sel0
- * Resolution in 50 mV step.
- * @battok_raising_th_sel1 Threshold in mV for battOk signal sel1
- * Resolution in 50 mV step.
- * @user_cap_limit Capacity reported from user must be within this
- * limit to be considered as sane, in percentage
- * points.
- * @maint_thres This is the threshold where we stop reporting
- * battery full while in maintenance, in per cent
- * @pcut_enable: Enable power cut feature in ab8505
- * @pcut_max_time: Max time threshold
- * @pcut_flag_time: Flagtime threshold
- * @pcut_max_restart: Max number of restarts
- * @pcut_debounce_time: Sets battery debounce time
- */
-struct abx500_fg_parameters {
- int recovery_sleep_timer;
- int recovery_total_time;
- int init_timer;
- int init_discard_time;
- int init_total_time;
- int high_curr_time;
- int accu_charging;
- int accu_high_curr;
- int high_curr_threshold;
- int lowbat_threshold;
- int overbat_threshold;
- int battok_falling_th_sel0;
- int battok_raising_th_sel1;
- int user_cap_limit;
- int maint_thres;
- bool pcut_enable;
- u8 pcut_max_time;
- u8 pcut_flag_time;
- u8 pcut_max_restart;
- u8 pcut_debounce_time;
-};
-
-/**
- * struct abx500_charger_maximization - struct used by the board config.
- * @use_maxi: Enable maximization for this battery type
- * @maxi_chg_curr: Maximum charger current allowed
- * @maxi_wait_cycles: cycles to wait before setting charger current
- * @charger_curr_step delta between two charger current settings (mA)
- */
-struct abx500_maxim_parameters {
- bool ena_maxi;
- int chg_curr;
- int wait_cycles;
- int charger_curr_step;
-};
-
-/**
- * struct abx500_battery_type - different batteries supported
- * @name: battery technology
- * @resis_high: battery upper resistance limit
- * @resis_low: battery lower resistance limit
- * @charge_full_design: Maximum battery capacity in mAh
- * @nominal_voltage: Nominal voltage of the battery in mV
- * @termination_vol: max voltage upto which battery can be charged
- * @termination_curr battery charging termination current in mA
- * @recharge_cap battery capacity limit that will trigger a new
- * full charging cycle in the case where maintenan-
- * -ce charging has been disabled
- * @normal_cur_lvl: charger current in normal state in mA
- * @normal_vol_lvl: charger voltage in normal state in mV
- * @maint_a_cur_lvl: charger current in maintenance A state in mA
- * @maint_a_vol_lvl: charger voltage in maintenance A state in mV
- * @maint_a_chg_timer_h: charge time in maintenance A state
- * @maint_b_cur_lvl: charger current in maintenance B state in mA
- * @maint_b_vol_lvl: charger voltage in maintenance B state in mV
- * @maint_b_chg_timer_h: charge time in maintenance B state
- * @low_high_cur_lvl: charger current in temp low/high state in mA
- * @low_high_vol_lvl: charger voltage in temp low/high state in mV'
- * @battery_resistance: battery inner resistance in mOhm.
- * @n_r_t_tbl_elements: number of elements in r_to_t_tbl
- * @r_to_t_tbl: table containing resistance to temp points
- * @n_v_cap_tbl_elements: number of elements in v_to_cap_tbl
- * @v_to_cap_tbl: Voltage to capacity (in %) table
- * @n_batres_tbl_elements number of elements in the batres_tbl
- * @batres_tbl battery internal resistance vs temperature table
- */
-struct abx500_battery_type {
- int name;
- int resis_high;
- int resis_low;
- int charge_full_design;
- int nominal_voltage;
- int termination_vol;
- int termination_curr;
- int recharge_cap;
- int normal_cur_lvl;
- int normal_vol_lvl;
- int maint_a_cur_lvl;
- int maint_a_vol_lvl;
- int maint_a_chg_timer_h;
- int maint_b_cur_lvl;
- int maint_b_vol_lvl;
- int maint_b_chg_timer_h;
- int low_high_cur_lvl;
- int low_high_vol_lvl;
- int battery_resistance;
- int n_temp_tbl_elements;
- const struct abx500_res_to_temp *r_to_t_tbl;
- int n_v_cap_tbl_elements;
- const struct abx500_v_to_cap *v_to_cap_tbl;
- int n_batres_tbl_elements;
- const struct batres_vs_temp *batres_tbl;
-};
-
-/**
- * struct abx500_bm_capacity_levels - abx500 capacity level data
- * @critical: critical capacity level in percent
- * @low: low capacity level in percent
- * @normal: normal capacity level in percent
- * @high: high capacity level in percent
- * @full: full capacity level in percent
- */
-struct abx500_bm_capacity_levels {
- int critical;
- int low;
- int normal;
- int high;
- int full;
-};
-
-/**
- * struct abx500_bm_charger_parameters - Charger specific parameters
- * @usb_volt_max: maximum allowed USB charger voltage in mV
- * @usb_curr_max: maximum allowed USB charger current in mA
- * @ac_volt_max: maximum allowed AC charger voltage in mV
- * @ac_curr_max: maximum allowed AC charger current in mA
- */
-struct abx500_bm_charger_parameters {
- int usb_volt_max;
- int usb_curr_max;
- int ac_volt_max;
- int ac_curr_max;
-};
-
-/**
- * struct abx500_bm_data - abx500 battery management data
- * @temp_under under this temp, charging is stopped
- * @temp_low between this temp and temp_under charging is reduced
- * @temp_high between this temp and temp_over charging is reduced
- * @temp_over over this temp, charging is stopped
- * @temp_now present battery temperature
- * @temp_interval_chg temperature measurement interval in s when charging
- * @temp_interval_nochg temperature measurement interval in s when not charging
- * @main_safety_tmr_h safety timer for main charger
- * @usb_safety_tmr_h safety timer for usb charger
- * @bkup_bat_v voltage which we charge the backup battery with
- * @bkup_bat_i current which we charge the backup battery with
- * @no_maintenance indicates that maintenance charging is disabled
- * @capacity_scaling indicates whether capacity scaling is to be used
- * @abx500_adc_therm placement of thermistor, batctrl or battemp adc
- * @chg_unknown_bat flag to enable charging of unknown batteries
- * @enable_overshoot flag to enable VBAT overshoot control
- * @auto_trig flag to enable auto adc trigger
- * @fg_res resistance of FG resistor in 0.1mOhm
- * @n_btypes number of elements in array bat_type
- * @batt_id index of the identified battery in array bat_type
- * @interval_charging charge alg cycle period time when charging (sec)
- * @interval_not_charging charge alg cycle period time when not charging (sec)
- * @temp_hysteresis temperature hysteresis
- * @gnd_lift_resistance Battery ground to phone ground resistance (mOhm)
- * @n_chg_out_curr number of elements in array chg_output_curr
- * @n_chg_in_curr number of elements in array chg_input_curr
- * @chg_output_curr charger output current level map
- * @chg_input_curr charger input current level map
- * @maxi maximization parameters
- * @cap_levels capacity in percent for the different capacity levels
- * @bat_type table of supported battery types
- * @chg_params charger parameters
- * @fg_params fuel gauge parameters
- */
-struct abx500_bm_data {
- int temp_under;
- int temp_low;
- int temp_high;
- int temp_over;
- int temp_now;
- int temp_interval_chg;
- int temp_interval_nochg;
- int main_safety_tmr_h;
- int usb_safety_tmr_h;
- int bkup_bat_v;
- int bkup_bat_i;
- bool autopower_cfg;
- bool ac_enabled;
- bool usb_enabled;
- bool usb_power_path;
- bool no_maintenance;
- bool capacity_scaling;
- bool chg_unknown_bat;
- bool enable_overshoot;
- bool auto_trig;
- enum abx500_adc_therm adc_therm;
- int fg_res;
- int n_btypes;
- int batt_id;
- int interval_charging;
- int interval_not_charging;
- int temp_hysteresis;
- int gnd_lift_resistance;
- int n_chg_out_curr;
- int n_chg_in_curr;
- int *chg_output_curr;
- int *chg_input_curr;
- const struct abx500_maxim_parameters *maxi;
- const struct abx500_bm_capacity_levels *cap_levels;
- struct abx500_battery_type *bat_type;
- const struct abx500_bm_charger_parameters *chg_params;
- const struct abx500_fg_parameters *fg_params;
-};
-
-enum {
- NTC_EXTERNAL = 0,
- NTC_INTERNAL,
-};
-
-int ab8500_bm_of_probe(struct device *dev,
- struct device_node *np,
- struct abx500_bm_data *bm);
-
int abx500_set_register_interruptible(struct device *dev, u8 bank, u8 reg,
u8 value);
int abx500_get_register_interruptible(struct device *dev, u8 bank, u8 reg,
diff --git a/include/linux/mfd/abx500/ab8500-bm.h b/include/linux/mfd/abx500/ab8500-bm.h
deleted file mode 100644
index e63681eb6c62..000000000000
--- a/include/linux/mfd/abx500/ab8500-bm.h
+++ /dev/null
@@ -1,478 +0,0 @@
-/*
- * Copyright ST-Ericsson 2012.
- *
- * Author: Arun Murthy <arun.murthy@stericsson.com>
- * Licensed under GPLv2.
- */
-
-#ifndef _AB8500_BM_H
-#define _AB8500_BM_H
-
-#include <linux/kernel.h>
-#include <linux/mfd/abx500.h>
-
-/*
- * System control 2 register offsets.
- * bank = 0x02
- */
-#define AB8500_MAIN_WDOG_CTRL_REG 0x01
-#define AB8500_LOW_BAT_REG 0x03
-#define AB8500_BATT_OK_REG 0x04
-/*
- * USB/ULPI register offsets
- * Bank : 0x5
- */
-#define AB8500_USB_LINE_STAT_REG 0x80
-#define AB8500_USB_LINE_CTRL2_REG 0x82
-#define AB8500_USB_LINK1_STAT_REG 0x94
-
-/*
- * Charger / status register offfsets
- * Bank : 0x0B
- */
-#define AB8500_CH_STATUS1_REG 0x00
-#define AB8500_CH_STATUS2_REG 0x01
-#define AB8500_CH_USBCH_STAT1_REG 0x02
-#define AB8500_CH_USBCH_STAT2_REG 0x03
-#define AB8540_CH_USBCH_STAT3_REG 0x04
-#define AB8500_CH_STAT_REG 0x05
-
-/*
- * Charger / control register offfsets
- * Bank : 0x0B
- */
-#define AB8500_CH_VOLT_LVL_REG 0x40
-#define AB8500_CH_VOLT_LVL_MAX_REG 0x41 /*Only in Cut2.0*/
-#define AB8500_CH_OPT_CRNTLVL_REG 0x42
-#define AB8500_CH_OPT_CRNTLVL_MAX_REG 0x43 /*Only in Cut2.0*/
-#define AB8500_CH_WD_TIMER_REG 0x50
-#define AB8500_CHARG_WD_CTRL 0x51
-#define AB8500_BTEMP_HIGH_TH 0x52
-#define AB8500_LED_INDICATOR_PWM_CTRL 0x53
-#define AB8500_LED_INDICATOR_PWM_DUTY 0x54
-#define AB8500_BATT_OVV 0x55
-#define AB8500_CHARGER_CTRL 0x56
-#define AB8500_BAT_CTRL_CURRENT_SOURCE 0x60 /*Only in Cut2.0*/
-
-/*
- * Charger / main control register offsets
- * Bank : 0x0B
- */
-#define AB8500_MCH_CTRL1 0x80
-#define AB8500_MCH_CTRL2 0x81
-#define AB8500_MCH_IPT_CURLVL_REG 0x82
-#define AB8500_CH_WD_REG 0x83
-
-/*
- * Charger / USB control register offsets
- * Bank : 0x0B
- */
-#define AB8500_USBCH_CTRL1_REG 0xC0
-#define AB8500_USBCH_CTRL2_REG 0xC1
-#define AB8500_USBCH_IPT_CRNTLVL_REG 0xC2
-#define AB8540_USB_PP_MODE_REG 0xC5
-#define AB8540_USB_PP_CHR_REG 0xC6
-
-/*
- * Gas Gauge register offsets
- * Bank : 0x0C
- */
-#define AB8500_GASG_CC_CTRL_REG 0x00
-#define AB8500_GASG_CC_ACCU1_REG 0x01
-#define AB8500_GASG_CC_ACCU2_REG 0x02
-#define AB8500_GASG_CC_ACCU3_REG 0x03
-#define AB8500_GASG_CC_ACCU4_REG 0x04
-#define AB8500_GASG_CC_SMPL_CNTRL_REG 0x05
-#define AB8500_GASG_CC_SMPL_CNTRH_REG 0x06
-#define AB8500_GASG_CC_SMPL_CNVL_REG 0x07
-#define AB8500_GASG_CC_SMPL_CNVH_REG 0x08
-#define AB8500_GASG_CC_CNTR_AVGOFF_REG 0x09
-#define AB8500_GASG_CC_OFFSET_REG 0x0A
-#define AB8500_GASG_CC_NCOV_ACCU 0x10
-#define AB8500_GASG_CC_NCOV_ACCU_CTRL 0x11
-#define AB8500_GASG_CC_NCOV_ACCU_LOW 0x12
-#define AB8500_GASG_CC_NCOV_ACCU_MED 0x13
-#define AB8500_GASG_CC_NCOV_ACCU_HIGH 0x14
-
-/*
- * Interrupt register offsets
- * Bank : 0x0E
- */
-#define AB8500_IT_SOURCE2_REG 0x01
-#define AB8500_IT_SOURCE21_REG 0x14
-
-/*
- * RTC register offsets
- * Bank: 0x0F
- */
-#define AB8500_RTC_BACKUP_CHG_REG 0x0C
-#define AB8500_RTC_CC_CONF_REG 0x01
-#define AB8500_RTC_CTRL_REG 0x0B
-#define AB8500_RTC_CTRL1_REG 0x11
-
-/*
- * OTP register offsets
- * Bank : 0x15
- */
-#define AB8500_OTP_CONF_15 0x0E
-
-/* GPADC constants from AB8500 spec, UM0836 */
-#define ADC_RESOLUTION 1024
-#define ADC_CH_MAIN_MIN 0
-#define ADC_CH_MAIN_MAX 20030
-#define ADC_CH_VBUS_MIN 0
-#define ADC_CH_VBUS_MAX 20030
-#define ADC_CH_VBAT_MIN 2300
-#define ADC_CH_VBAT_MAX 4800
-#define ADC_CH_BKBAT_MIN 0
-#define ADC_CH_BKBAT_MAX 3200
-
-/* Main charge i/p current */
-#define MAIN_CH_IP_CUR_0P9A 0x80
-#define MAIN_CH_IP_CUR_1P0A 0x90
-#define MAIN_CH_IP_CUR_1P1A 0xA0
-#define MAIN_CH_IP_CUR_1P2A 0xB0
-#define MAIN_CH_IP_CUR_1P3A 0xC0
-#define MAIN_CH_IP_CUR_1P4A 0xD0
-#define MAIN_CH_IP_CUR_1P5A 0xE0
-
-/* ChVoltLevel */
-#define CH_VOL_LVL_3P5 0x00
-#define CH_VOL_LVL_4P0 0x14
-#define CH_VOL_LVL_4P05 0x16
-#define CH_VOL_LVL_4P1 0x1B
-#define CH_VOL_LVL_4P15 0x20
-#define CH_VOL_LVL_4P2 0x25
-#define CH_VOL_LVL_4P6 0x4D
-
-/* ChOutputCurrentLevel */
-#define CH_OP_CUR_LVL_0P1 0x00
-#define CH_OP_CUR_LVL_0P2 0x01
-#define CH_OP_CUR_LVL_0P3 0x02
-#define CH_OP_CUR_LVL_0P4 0x03
-#define CH_OP_CUR_LVL_0P5 0x04
-#define CH_OP_CUR_LVL_0P6 0x05
-#define CH_OP_CUR_LVL_0P7 0x06
-#define CH_OP_CUR_LVL_0P8 0x07
-#define CH_OP_CUR_LVL_0P9 0x08
-#define CH_OP_CUR_LVL_1P4 0x0D
-#define CH_OP_CUR_LVL_1P5 0x0E
-#define CH_OP_CUR_LVL_1P6 0x0F
-#define CH_OP_CUR_LVL_2P 0x3F
-
-/* BTEMP High thermal limits */
-#define BTEMP_HIGH_TH_57_0 0x00
-#define BTEMP_HIGH_TH_52 0x01
-#define BTEMP_HIGH_TH_57_1 0x02
-#define BTEMP_HIGH_TH_62 0x03
-
-/* current is mA */
-#define USB_0P1A 100
-#define USB_0P2A 200
-#define USB_0P3A 300
-#define USB_0P4A 400
-#define USB_0P5A 500
-
-#define LOW_BAT_3P1V 0x20
-#define LOW_BAT_2P3V 0x00
-#define LOW_BAT_RESET 0x01
-#define LOW_BAT_ENABLE 0x01
-
-/* Backup battery constants */
-#define BUP_ICH_SEL_50UA 0x00
-#define BUP_ICH_SEL_150UA 0x04
-#define BUP_ICH_SEL_300UA 0x08
-#define BUP_ICH_SEL_700UA 0x0C
-
-enum bup_vch_sel {
- BUP_VCH_SEL_2P5V,
- BUP_VCH_SEL_2P6V,
- BUP_VCH_SEL_2P8V,
- BUP_VCH_SEL_3P1V,
- /*
- * Note that the following 5 values 2.7v, 2.9v, 3.0v, 3.2v, 3.3v
- * are only available on ab8540. You can't choose these 5
- * voltage on ab8500/ab8505/ab9540.
- */
- BUP_VCH_SEL_2P7V,
- BUP_VCH_SEL_2P9V,
- BUP_VCH_SEL_3P0V,
- BUP_VCH_SEL_3P2V,
- BUP_VCH_SEL_3P3V,
-};
-
-#define BUP_VCH_RANGE 0x02
-#define VBUP33_VRTCN 0x01
-
-/* Battery OVV constants */
-#define BATT_OVV_ENA 0x02
-#define BATT_OVV_TH_3P7 0x00
-#define BATT_OVV_TH_4P75 0x01
-
-/* A value to indicate over voltage */
-#define BATT_OVV_VALUE 4750
-
-/* VBUS OVV constants */
-#define VBUS_OVV_SELECT_MASK 0x78
-#define VBUS_OVV_SELECT_5P6V 0x00
-#define VBUS_OVV_SELECT_5P7V 0x08
-#define VBUS_OVV_SELECT_5P8V 0x10
-#define VBUS_OVV_SELECT_5P9V 0x18
-#define VBUS_OVV_SELECT_6P0V 0x20
-#define VBUS_OVV_SELECT_6P1V 0x28
-#define VBUS_OVV_SELECT_6P2V 0x30
-#define VBUS_OVV_SELECT_6P3V 0x38
-
-#define VBUS_AUTO_IN_CURR_LIM_ENA 0x04
-
-/* Fuel Gauge constants */
-#define RESET_ACCU 0x02
-#define READ_REQ 0x01
-#define CC_DEEP_SLEEP_ENA 0x02
-#define CC_PWR_UP_ENA 0x01
-#define CC_SAMPLES_40 0x28
-#define RD_NCONV_ACCU_REQ 0x01
-#define CC_CALIB 0x08
-#define CC_INTAVGOFFSET_ENA 0x10
-#define CC_MUXOFFSET 0x80
-#define CC_INT_CAL_N_AVG_MASK 0x60
-#define CC_INT_CAL_SAMPLES_16 0x40
-#define CC_INT_CAL_SAMPLES_8 0x20
-#define CC_INT_CAL_SAMPLES_4 0x00
-
-/* RTC constants */
-#define RTC_BUP_CH_ENA 0x10
-
-/* BatCtrl Current Source Constants */
-#define BAT_CTRL_7U_ENA 0x01
-#define BAT_CTRL_20U_ENA 0x02
-#define BAT_CTRL_18U_ENA 0x01
-#define BAT_CTRL_16U_ENA 0x02
-#define BAT_CTRL_60U_ENA 0x01
-#define BAT_CTRL_120U_ENA 0x02
-#define BAT_CTRL_CMP_ENA 0x04
-#define FORCE_BAT_CTRL_CMP_HIGH 0x08
-#define BAT_CTRL_PULL_UP_ENA 0x10
-
-/* Battery type */
-#define BATTERY_UNKNOWN 00
-
-/* Registers for pcut feature in ab8505 and ab9540 */
-#define AB8505_RTC_PCUT_CTL_STATUS_REG 0x12
-#define AB8505_RTC_PCUT_TIME_REG 0x13
-#define AB8505_RTC_PCUT_MAX_TIME_REG 0x14
-#define AB8505_RTC_PCUT_FLAG_TIME_REG 0x15
-#define AB8505_RTC_PCUT_RESTART_REG 0x16
-#define AB8505_RTC_PCUT_DEBOUNCE_REG 0x17
-
-/* USB Power Path constants for ab8540 */
-#define BUS_VSYS_VOL_SELECT_MASK 0x06
-#define BUS_VSYS_VOL_SELECT_3P6V 0x00
-#define BUS_VSYS_VOL_SELECT_3P325V 0x02
-#define BUS_VSYS_VOL_SELECT_3P9V 0x04
-#define BUS_VSYS_VOL_SELECT_4P3V 0x06
-#define BUS_POWER_PATH_MODE_ENA 0x01
-#define BUS_PP_PRECHG_CURRENT_MASK 0x0E
-#define BUS_POWER_PATH_PRECHG_ENA 0x01
-
-/**
- * struct res_to_temp - defines one point in a temp to res curve. To
- * be used in battery packs that combines the identification resistor with a
- * NTC resistor.
- * @temp: battery pack temperature in Celsius
- * @resist: NTC resistor net total resistance
- */
-struct res_to_temp {
- int temp;
- int resist;
-};
-
-/**
- * struct batres_vs_temp - defines one point in a temp vs battery internal
- * resistance curve.
- * @temp: battery pack temperature in Celsius
- * @resist: battery internal reistance in mOhm
- */
-struct batres_vs_temp {
- int temp;
- int resist;
-};
-
-/* Forward declaration */
-struct ab8500_fg;
-
-/**
- * struct ab8500_fg_parameters - Fuel gauge algorithm parameters, in seconds
- * if not specified
- * @recovery_sleep_timer: Time between measurements while recovering
- * @recovery_total_time: Total recovery time
- * @init_timer: Measurement interval during startup
- * @init_discard_time: Time we discard voltage measurement at startup
- * @init_total_time: Total init time during startup
- * @high_curr_time: Time current has to be high to go to recovery
- * @accu_charging: FG accumulation time while charging
- * @accu_high_curr: FG accumulation time in high current mode
- * @high_curr_threshold: High current threshold, in mA
- * @lowbat_threshold: Low battery threshold, in mV
- * @battok_falling_th_sel0 Threshold in mV for battOk signal sel0
- * Resolution in 50 mV step.
- * @battok_raising_th_sel1 Threshold in mV for battOk signal sel1
- * Resolution in 50 mV step.
- * @user_cap_limit Capacity reported from user must be within this
- * limit to be considered as sane, in percentage
- * points.
- * @maint_thres This is the threshold where we stop reporting
- * battery full while in maintenance, in per cent
- * @pcut_enable: Enable power cut feature in ab8505
- * @pcut_max_time: Max time threshold
- * @pcut_flag_time: Flagtime threshold
- * @pcut_max_restart: Max number of restarts
- * @pcut_debunce_time: Sets battery debounce time
- */
-struct ab8500_fg_parameters {
- int recovery_sleep_timer;
- int recovery_total_time;
- int init_timer;
- int init_discard_time;
- int init_total_time;
- int high_curr_time;
- int accu_charging;
- int accu_high_curr;
- int high_curr_threshold;
- int lowbat_threshold;
- int battok_falling_th_sel0;
- int battok_raising_th_sel1;
- int user_cap_limit;
- int maint_thres;
- bool pcut_enable;
- u8 pcut_max_time;
- u8 pcut_flag_time;
- u8 pcut_max_restart;
- u8 pcut_debunce_time;
-};
-
-/**
- * struct ab8500_charger_maximization - struct used by the board config.
- * @use_maxi: Enable maximization for this battery type
- * @maxi_chg_curr: Maximum charger current allowed
- * @maxi_wait_cycles: cycles to wait before setting charger current
- * @charger_curr_step delta between two charger current settings (mA)
- */
-struct ab8500_maxim_parameters {
- bool ena_maxi;
- int chg_curr;
- int wait_cycles;
- int charger_curr_step;
-};
-
-/**
- * struct ab8500_bm_capacity_levels - ab8500 capacity level data
- * @critical: critical capacity level in percent
- * @low: low capacity level in percent
- * @normal: normal capacity level in percent
- * @high: high capacity level in percent
- * @full: full capacity level in percent
- */
-struct ab8500_bm_capacity_levels {
- int critical;
- int low;
- int normal;
- int high;
- int full;
-};
-
-/**
- * struct ab8500_bm_charger_parameters - Charger specific parameters
- * @usb_volt_max: maximum allowed USB charger voltage in mV
- * @usb_curr_max: maximum allowed USB charger current in mA
- * @ac_volt_max: maximum allowed AC charger voltage in mV
- * @ac_curr_max: maximum allowed AC charger current in mA
- */
-struct ab8500_bm_charger_parameters {
- int usb_volt_max;
- int usb_curr_max;
- int ac_volt_max;
- int ac_curr_max;
-};
-
-/**
- * struct ab8500_bm_data - ab8500 battery management data
- * @temp_under under this temp, charging is stopped
- * @temp_low between this temp and temp_under charging is reduced
- * @temp_high between this temp and temp_over charging is reduced
- * @temp_over over this temp, charging is stopped
- * @temp_interval_chg temperature measurement interval in s when charging
- * @temp_interval_nochg temperature measurement interval in s when not charging
- * @main_safety_tmr_h safety timer for main charger
- * @usb_safety_tmr_h safety timer for usb charger
- * @bkup_bat_v voltage which we charge the backup battery with
- * @bkup_bat_i current which we charge the backup battery with
- * @no_maintenance indicates that maintenance charging is disabled
- * @capacity_scaling indicates whether capacity scaling is to be used
- * @adc_therm placement of thermistor, batctrl or battemp adc
- * @chg_unknown_bat flag to enable charging of unknown batteries
- * @enable_overshoot flag to enable VBAT overshoot control
- * @fg_res resistance of FG resistor in 0.1mOhm
- * @n_btypes number of elements in array bat_type
- * @batt_id index of the identified battery in array bat_type
- * @interval_charging charge alg cycle period time when charging (sec)
- * @interval_not_charging charge alg cycle period time when not charging (sec)
- * @temp_hysteresis temperature hysteresis
- * @gnd_lift_resistance Battery ground to phone ground resistance (mOhm)
- * @maxi: maximization parameters
- * @cap_levels capacity in percent for the different capacity levels
- * @bat_type table of supported battery types
- * @chg_params charger parameters
- * @fg_params fuel gauge parameters
- */
-struct ab8500_bm_data {
- int temp_under;
- int temp_low;
- int temp_high;
- int temp_over;
- int temp_interval_chg;
- int temp_interval_nochg;
- int main_safety_tmr_h;
- int usb_safety_tmr_h;
- int bkup_bat_v;
- int bkup_bat_i;
- bool no_maintenance;
- bool capacity_scaling;
- bool chg_unknown_bat;
- bool enable_overshoot;
- enum abx500_adc_therm adc_therm;
- int fg_res;
- int n_btypes;
- int batt_id;
- int interval_charging;
- int interval_not_charging;
- int temp_hysteresis;
- int gnd_lift_resistance;
- const struct ab8500_maxim_parameters *maxi;
- const struct ab8500_bm_capacity_levels *cap_levels;
- const struct ab8500_bm_charger_parameters *chg_params;
- const struct ab8500_fg_parameters *fg_params;
-};
-
-struct ab8500_btemp;
-struct ab8500_gpadc;
-struct ab8500_fg;
-
-#ifdef CONFIG_AB8500_BM
-extern struct abx500_bm_data ab8500_bm_data;
-
-void ab8500_charger_usb_state_changed(u8 bm_usb_state, u16 mA);
-struct ab8500_btemp *ab8500_btemp_get(void);
-int ab8500_btemp_get_batctrl_temp(struct ab8500_btemp *btemp);
-int ab8500_btemp_get_temp(struct ab8500_btemp *btemp);
-struct ab8500_fg *ab8500_fg_get(void);
-int ab8500_fg_inst_curr_blocking(struct ab8500_fg *dev);
-int ab8500_fg_inst_curr_start(struct ab8500_fg *di);
-int ab8500_fg_inst_curr_finalize(struct ab8500_fg *di, int *res);
-int ab8500_fg_inst_curr_started(struct ab8500_fg *di);
-int ab8500_fg_inst_curr_done(struct ab8500_fg *di);
-
-#else
-static struct abx500_bm_data ab8500_bm_data;
-#endif
-#endif /* _AB8500_BM_H */
diff --git a/include/linux/mfd/abx500/ab8500-codec.h b/include/linux/mfd/abx500/ab8500-codec.h
index d7079413def0..c19f505122ac 100644
--- a/include/linux/mfd/abx500/ab8500-codec.h
+++ b/include/linux/mfd/abx500/ab8500-codec.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) ST-Ericsson SA 2012
*
@@ -5,10 +6,6 @@
* for ST-Ericsson.
*
* License terms:
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published
- * by the Free Software Foundation.
*/
#ifndef AB8500_CORE_CODEC_H
diff --git a/include/linux/mfd/abx500/ab8500-gpadc.h b/include/linux/mfd/abx500/ab8500-gpadc.h
deleted file mode 100644
index 49ded001049b..000000000000
--- a/include/linux/mfd/abx500/ab8500-gpadc.h
+++ /dev/null
@@ -1,75 +0,0 @@
-/*
- * Copyright (C) 2010 ST-Ericsson SA
- * Licensed under GPLv2.
- *
- * Author: Arun R Murthy <arun.murthy@stericsson.com>
- * Author: Daniel Willerud <daniel.willerud@stericsson.com>
- * Author: M'boumba Cedric Madianga <cedric.madianga@stericsson.com>
- */
-
-#ifndef _AB8500_GPADC_H
-#define _AB8500_GPADC_H
-
-/* GPADC source: From datasheet(ADCSwSel[4:0] in GPADCCtrl2
- * and ADCHwSel[4:0] in GPADCCtrl3 ) */
-#define BAT_CTRL 0x01
-#define BTEMP_BALL 0x02
-#define MAIN_CHARGER_V 0x03
-#define ACC_DETECT1 0x04
-#define ACC_DETECT2 0x05
-#define ADC_AUX1 0x06
-#define ADC_AUX2 0x07
-#define MAIN_BAT_V 0x08
-#define VBUS_V 0x09
-#define MAIN_CHARGER_C 0x0A
-#define USB_CHARGER_C 0x0B
-#define BK_BAT_V 0x0C
-#define DIE_TEMP 0x0D
-#define USB_ID 0x0E
-#define XTAL_TEMP 0x12
-#define VBAT_TRUE_MEAS 0x13
-#define BAT_CTRL_AND_IBAT 0x1C
-#define VBAT_MEAS_AND_IBAT 0x1D
-#define VBAT_TRUE_MEAS_AND_IBAT 0x1E
-#define BAT_TEMP_AND_IBAT 0x1F
-
-/* Virtual channel used only for ibat convertion to ampere
- * Battery current conversion (ibat) cannot be requested as a single conversion
- * but it is always in combination with other input requests
- */
-#define IBAT_VIRTUAL_CHANNEL 0xFF
-
-#define SAMPLE_1 1
-#define SAMPLE_4 4
-#define SAMPLE_8 8
-#define SAMPLE_16 16
-#define RISING_EDGE 0
-#define FALLING_EDGE 1
-
-/* Arbitrary ADC conversion type constants */
-#define ADC_SW 0
-#define ADC_HW 1
-
-struct ab8500_gpadc;
-
-struct ab8500_gpadc *ab8500_gpadc_get(char *name);
-int ab8500_gpadc_sw_hw_convert(struct ab8500_gpadc *gpadc, u8 channel,
- u8 avg_sample, u8 trig_edge, u8 trig_timer, u8 conv_type);
-static inline int ab8500_gpadc_convert(struct ab8500_gpadc *gpadc, u8 channel)
-{
- return ab8500_gpadc_sw_hw_convert(gpadc, channel,
- SAMPLE_16, 0, 0, ADC_SW);
-}
-
-int ab8500_gpadc_read_raw(struct ab8500_gpadc *gpadc, u8 channel,
- u8 avg_sample, u8 trig_edge, u8 trig_timer, u8 conv_type);
-int ab8500_gpadc_double_read_raw(struct ab8500_gpadc *gpadc, u8 channel,
- u8 avg_sample, u8 trig_edge, u8 trig_timer, u8 conv_type,
- int *ibat);
-int ab8500_gpadc_ad_to_voltage(struct ab8500_gpadc *gpadc,
- u8 channel, int ad_value);
-void ab8540_gpadc_get_otp(struct ab8500_gpadc *gpadc,
- u16 *vmain_l, u16 *vmain_h, u16 *btemp_l, u16 *btemp_h,
- u16 *vbat_l, u16 *vbat_h, u16 *ibat_l, u16 *ibat_h);
-
-#endif /* _AB8500_GPADC_H */
diff --git a/include/linux/mfd/abx500/ab8500-sysctrl.h b/include/linux/mfd/abx500/ab8500-sysctrl.h
index 01024d1aed0e..825f6059d4e3 100644
--- a/include/linux/mfd/abx500/ab8500-sysctrl.h
+++ b/include/linux/mfd/abx500/ab8500-sysctrl.h
@@ -1,7 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) ST-Ericsson SA 2010
* Author: Mattias Nilsson <mattias.i.nilsson@stericsson.com> for ST Ericsson.
- * License terms: GNU General Public License (GPL) version 2
*/
#ifndef __AB8500_SYSCTRL_H
#define __AB8500_SYSCTRL_H
diff --git a/include/linux/mfd/abx500/ab8500.h b/include/linux/mfd/abx500/ab8500.h
index d33c245e75ca..302a330c5c84 100644
--- a/include/linux/mfd/abx500/ab8500.h
+++ b/include/linux/mfd/abx500/ab8500.h
@@ -1,7 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) ST-Ericsson SA 2010
*
- * License Terms: GNU General Public License v2
* Author: Srinidhi Kasagar <srinidhi.kasagar@stericsson.com>
*/
#ifndef MFD_AB8500_H
@@ -368,7 +368,6 @@ struct ab8500 {
int it_latchhier_num;
};
-struct ab8500_regulator_platform_data;
struct ab8500_codec_platform_data;
struct ab8500_sysctrl_platform_data;
@@ -376,11 +375,9 @@ struct ab8500_sysctrl_platform_data;
* struct ab8500_platform_data - AB8500 platform data
* @irq_base: start of AB8500 IRQs, AB8500_NR_IRQS will be used
* @init: board-specific initialization after detection of ab8500
- * @regulator: machine-specific constraints for regulators
*/
struct ab8500_platform_data {
void (*init) (struct ab8500 *);
- struct ab8500_regulator_platform_data *regulator;
struct ab8500_codec_platform_data *codec;
struct ab8500_sysctrl_platform_data *sysctrl;
};
diff --git a/include/linux/mfd/abx500/ux500_chargalg.h b/include/linux/mfd/abx500/ux500_chargalg.h
deleted file mode 100644
index 67703f23e7ba..000000000000
--- a/include/linux/mfd/abx500/ux500_chargalg.h
+++ /dev/null
@@ -1,55 +0,0 @@
-/*
- * Copyright (C) ST-Ericsson SA 2012
- * Author: Johan Gardsmark <johan.gardsmark@stericsson.com> for ST-Ericsson.
- * License terms: GNU General Public License (GPL), version 2
- */
-
-#ifndef _UX500_CHARGALG_H
-#define _UX500_CHARGALG_H
-
-#include <linux/power_supply.h>
-
-/*
- * Valid only for supplies of type:
- * - POWER_SUPPLY_TYPE_MAINS,
- * - POWER_SUPPLY_TYPE_USB,
- * because only them store as drv_data pointer to struct ux500_charger.
- */
-#define psy_to_ux500_charger(x) power_supply_get_drvdata(psy)
-
-/* Forward declaration */
-struct ux500_charger;
-
-struct ux500_charger_ops {
- int (*enable) (struct ux500_charger *, int, int, int);
- int (*check_enable) (struct ux500_charger *, int, int);
- int (*kick_wd) (struct ux500_charger *);
- int (*update_curr) (struct ux500_charger *, int);
- int (*pp_enable) (struct ux500_charger *, bool);
- int (*pre_chg_enable) (struct ux500_charger *, bool);
-};
-
-/**
- * struct ux500_charger - power supply ux500 charger sub class
- * @psy power supply base class
- * @ops ux500 charger operations
- * @max_out_volt maximum output charger voltage in mV
- * @max_out_curr maximum output charger current in mA
- * @enabled indicates if this charger is used or not
- * @external external charger unit (pm2xxx)
- * @power_path USB power path support
- */
-struct ux500_charger {
- struct power_supply *psy;
- struct ux500_charger_ops ops;
- int max_out_volt;
- int max_out_curr;
- int wdt_refresh;
- bool enabled;
- bool external;
- bool power_path;
-};
-
-extern struct blocking_notifier_head charger_notifier_list;
-
-#endif
diff --git a/include/linux/mfd/ac100.h b/include/linux/mfd/ac100.h
index 3c148f196b9f..88005c3a1b2d 100644
--- a/include/linux/mfd/ac100.h
+++ b/include/linux/mfd/ac100.h
@@ -1,13 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Functions and registers to access AC100 codec / RTC combo IC.
*
* Copyright (C) 2016 Chen-Yu Tsai
*
* Chen-Yu Tsai <wens@csie.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef __LINUX_MFD_AC100_H
diff --git a/include/linux/mfd/adp5520.h b/include/linux/mfd/adp5520.h
index ac37558a4673..9a14f80ec4ad 100644
--- a/include/linux/mfd/adp5520.h
+++ b/include/linux/mfd/adp5520.h
@@ -1,10 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Definitions and platform data for Analog Devices
* ADP5520/ADP5501 MFD PMICs (Backlight, LED, GPIO and Keys)
*
* Copyright 2009 Analog Devices Inc.
- *
- * Licensed under the GPL-2 or later.
*/
diff --git a/include/linux/mfd/altera-a10sr.h b/include/linux/mfd/altera-a10sr.h
index 45a5e6e7db54..d616da4b3c4c 100644
--- a/include/linux/mfd/altera-a10sr.h
+++ b/include/linux/mfd/altera-a10sr.h
@@ -1,18 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright Intel Corporation (C) 2014-2016. All Rights Reserved
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program. If not, see <http://www.gnu.org/licenses/>.
- *
* Declarations for Altera Arria10 MAX5 System Resource Chip
*
* Adapted from DA9052
diff --git a/include/linux/mfd/altera-sysmgr.h b/include/linux/mfd/altera-sysmgr.h
new file mode 100644
index 000000000000..b1ef11a83872
--- /dev/null
+++ b/include/linux/mfd/altera-sysmgr.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2018-2019 Intel Corporation
+ * Copyright (C) 2012 Freescale Semiconductor, Inc.
+ * Copyright (C) 2012 Linaro Ltd.
+ */
+
+#ifndef __LINUX_MFD_ALTERA_SYSMGR_H__
+#define __LINUX_MFD_ALTERA_SYSMGR_H__
+
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/firmware/intel/stratix10-smc.h>
+
+struct device_node;
+
+#ifdef CONFIG_MFD_ALTERA_SYSMGR
+struct regmap *altr_sysmgr_regmap_lookup_by_phandle(struct device_node *np,
+ const char *property);
+#else
+static inline struct regmap *
+altr_sysmgr_regmap_lookup_by_phandle(struct device_node *np,
+ const char *property)
+{
+ return ERR_PTR(-ENOTSUPP);
+}
+#endif
+
+#endif /* __LINUX_MFD_ALTERA_SYSMGR_H__ */
diff --git a/include/linux/mfd/arizona/core.h b/include/linux/mfd/arizona/core.h
index b31b3be7f8c9..6d6f96b2b29f 100644
--- a/include/linux/mfd/arizona/core.h
+++ b/include/linux/mfd/arizona/core.h
@@ -1,13 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Arizona MFD internals
*
* Copyright 2012 Wolfson Microelectronics plc
*
* Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef _WM_ARIZONA_CORE_H
diff --git a/include/linux/mfd/arizona/pdata.h b/include/linux/mfd/arizona/pdata.h
index bfeecf179895..2d13bbea4f3a 100644
--- a/include/linux/mfd/arizona/pdata.h
+++ b/include/linux/mfd/arizona/pdata.h
@@ -1,11 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Platform data for Arizona devices
*
* Copyright 2012 Wolfson Microelectronics. PLC.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef _ARIZONA_PDATA_H
@@ -56,6 +53,7 @@
#define ARIZONA_MAX_PDM_SPK 2
struct regulator_init_data;
+struct gpio_desc;
struct arizona_micbias {
int mV; /** Regulated voltage */
@@ -77,7 +75,7 @@ struct arizona_micd_range {
};
struct arizona_pdata {
- int reset; /** GPIO controlling /RESET, if any */
+ struct gpio_desc *reset; /** GPIO controlling /RESET, if any */
/** Regulator configuration for MICVDD */
struct arizona_micsupp_pdata micvdd;
@@ -174,6 +172,9 @@ struct arizona_pdata {
/** Mode for outputs */
int out_mono[ARIZONA_MAX_OUTPUT];
+ /** Limit output volumes */
+ unsigned int out_vol_limit[2 * ARIZONA_MAX_OUTPUT];
+
/** PDM speaker mute setting */
unsigned int spk_mute[ARIZONA_MAX_PDM_SPK];
diff --git a/include/linux/mfd/arizona/registers.h b/include/linux/mfd/arizona/registers.h
index 0d06c5d0af93..49e24d1de8d4 100644
--- a/include/linux/mfd/arizona/registers.h
+++ b/include/linux/mfd/arizona/registers.h
@@ -1,13 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* ARIZONA register definitions
*
* Copyright 2012 Wolfson Microelectronics plc
*
* Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef _ARIZONA_REGISTERS_H
@@ -1189,13 +1186,6 @@
#define ARIZONA_DSP4_SCRATCH_1 0x1441
#define ARIZONA_DSP4_SCRATCH_2 0x1442
#define ARIZONA_DSP4_SCRATCH_3 0x1443
-#define ARIZONA_FRF_COEFF_1 0x1700
-#define ARIZONA_FRF_COEFF_2 0x1701
-#define ARIZONA_FRF_COEFF_3 0x1702
-#define ARIZONA_FRF_COEFF_4 0x1703
-#define ARIZONA_V2_DAC_COMP_1 0x1704
-#define ARIZONA_V2_DAC_COMP_2 0x1705
-
/*
* Field Definitions.
diff --git a/include/linux/mfd/as3711.h b/include/linux/mfd/as3711.h
index 34cc85864be5..4be16b4d2c8a 100644
--- a/include/linux/mfd/as3711.h
+++ b/include/linux/mfd/as3711.h
@@ -1,12 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* AS3711 PMIC MFC driver header
*
* Copyright (C) 2012 Renesas Electronics Corporation
* Author: Guennadi Liakhovetski, <g.liakhovetski@gmx.de>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the version 2 of the GNU General Public License as
- * published by the Free Software Foundation
*/
#ifndef MFD_AS3711_H
@@ -108,9 +105,9 @@ struct as3711_regulator_pdata {
};
struct as3711_bl_pdata {
- const char *su1_fb;
+ bool su1_fb;
int su1_max_uA;
- const char *su2_fb;
+ bool su2_fb;
int su2_max_uA;
enum as3711_su2_feedback su2_feedback;
enum as3711_su2_fbprot su2_fbprot;
diff --git a/include/linux/mfd/as3722.h b/include/linux/mfd/as3722.h
index 51e6f9414575..5162dfc7c24b 100644
--- a/include/linux/mfd/as3722.h
+++ b/include/linux/mfd/as3722.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* as3722 definitions
*
@@ -6,21 +7,6 @@
*
* Author: Florian Lobmaier <florian.lobmaier@ams.com>
* Author: Laxman Dewangan <ldewangan@nvidia.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
*/
#ifndef __LINUX_MFD_AS3722_H__
@@ -296,6 +282,8 @@
#define AS3722_ADC1_CONV_NOTREADY BIT(7)
#define AS3722_ADC1_SOURCE_SELECT_MASK 0x1F
+#define AS3722_CTRL_SEQU1_AC_OK_PWR_ON BIT(0)
+
/* GPIO modes */
#define AS3722_GPIO_MODE_MASK 0x07
#define AS3722_GPIO_MODE_INPUT 0x00
@@ -391,6 +379,7 @@ struct as3722 {
unsigned long irq_flags;
bool en_intern_int_pullup;
bool en_intern_i2c_pullup;
+ bool en_ac_ok_pwr_on;
struct regmap_irq_chip_data *irq_data;
};
diff --git a/include/linux/mfd/asic3.h b/include/linux/mfd/asic3.h
deleted file mode 100644
index e1148d037e7b..000000000000
--- a/include/linux/mfd/asic3.h
+++ /dev/null
@@ -1,316 +0,0 @@
-/*
- * include/linux/mfd/asic3.h
- *
- * Compaq ASIC3 headers.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * Copyright 2001 Compaq Computer Corporation.
- * Copyright 2007-2008 OpenedHand Ltd.
- */
-
-#ifndef __ASIC3_H__
-#define __ASIC3_H__
-
-#include <linux/types.h>
-
-struct led_classdev;
-struct asic3_led {
- const char *name;
- const char *default_trigger;
- struct led_classdev *cdev;
-};
-
-struct asic3_platform_data {
- u16 *gpio_config;
- unsigned int gpio_config_num;
-
- unsigned int irq_base;
-
- unsigned int gpio_base;
-
- unsigned int clock_rate;
-
- struct asic3_led *leds;
-};
-
-#define ASIC3_NUM_GPIO_BANKS 4
-#define ASIC3_GPIOS_PER_BANK 16
-#define ASIC3_NUM_GPIOS 64
-#define ASIC3_NR_IRQS ASIC3_NUM_GPIOS + 6
-
-#define ASIC3_IRQ_LED0 64
-#define ASIC3_IRQ_LED1 65
-#define ASIC3_IRQ_LED2 66
-#define ASIC3_IRQ_SPI 67
-#define ASIC3_IRQ_SMBUS 68
-#define ASIC3_IRQ_OWM 69
-
-#define ASIC3_TO_GPIO(gpio) (NR_BUILTIN_GPIO + (gpio))
-
-#define ASIC3_GPIO_BANK_A 0
-#define ASIC3_GPIO_BANK_B 1
-#define ASIC3_GPIO_BANK_C 2
-#define ASIC3_GPIO_BANK_D 3
-
-#define ASIC3_GPIO(bank, gpio) \
- ((ASIC3_GPIOS_PER_BANK * ASIC3_GPIO_BANK_##bank) + (gpio))
-#define ASIC3_GPIO_bit(gpio) (1 << (gpio & 0xf))
-/* All offsets below are specified with this address bus shift */
-#define ASIC3_DEFAULT_ADDR_SHIFT 2
-
-#define ASIC3_OFFSET(base, reg) (ASIC3_##base##_BASE + ASIC3_##base##_##reg)
-#define ASIC3_GPIO_OFFSET(base, reg) \
- (ASIC3_GPIO_##base##_BASE + ASIC3_GPIO_##reg)
-
-#define ASIC3_GPIO_A_BASE 0x0000
-#define ASIC3_GPIO_B_BASE 0x0100
-#define ASIC3_GPIO_C_BASE 0x0200
-#define ASIC3_GPIO_D_BASE 0x0300
-
-#define ASIC3_GPIO_TO_BANK(gpio) ((gpio) >> 4)
-#define ASIC3_GPIO_TO_BIT(gpio) ((gpio) - \
- (ASIC3_GPIOS_PER_BANK * ((gpio) >> 4)))
-#define ASIC3_GPIO_TO_MASK(gpio) (1 << ASIC3_GPIO_TO_BIT(gpio))
-#define ASIC3_GPIO_TO_BASE(gpio) (ASIC3_GPIO_A_BASE + (((gpio) >> 4) * 0x0100))
-#define ASIC3_BANK_TO_BASE(bank) (ASIC3_GPIO_A_BASE + ((bank) * 0x100))
-
-#define ASIC3_GPIO_MASK 0x00 /* R/W 0:don't mask */
-#define ASIC3_GPIO_DIRECTION 0x04 /* R/W 0:input */
-#define ASIC3_GPIO_OUT 0x08 /* R/W 0:output low */
-#define ASIC3_GPIO_TRIGGER_TYPE 0x0c /* R/W 0:level */
-#define ASIC3_GPIO_EDGE_TRIGGER 0x10 /* R/W 0:falling */
-#define ASIC3_GPIO_LEVEL_TRIGGER 0x14 /* R/W 0:low level detect */
-#define ASIC3_GPIO_SLEEP_MASK 0x18 /* R/W 0:don't mask in sleep mode */
-#define ASIC3_GPIO_SLEEP_OUT 0x1c /* R/W level 0:low in sleep mode */
-#define ASIC3_GPIO_BAT_FAULT_OUT 0x20 /* R/W level 0:low in batt_fault */
-#define ASIC3_GPIO_INT_STATUS 0x24 /* R/W 0:none, 1:detect */
-#define ASIC3_GPIO_ALT_FUNCTION 0x28 /* R/W 1:LED register control */
-#define ASIC3_GPIO_SLEEP_CONF 0x2c /*
- * R/W bit 1: autosleep
- * 0: disable gposlpout in normal mode,
- * enable gposlpout in sleep mode.
- */
-#define ASIC3_GPIO_STATUS 0x30 /* R Pin status */
-
-/*
- * ASIC3 GPIO config
- *
- * Bits 0..6 gpio number
- * Bits 7..13 Alternate function
- * Bit 14 Direction
- * Bit 15 Initial value
- *
- */
-#define ASIC3_CONFIG_GPIO_PIN(config) ((config) & 0x7f)
-#define ASIC3_CONFIG_GPIO_ALT(config) (((config) & (0x7f << 7)) >> 7)
-#define ASIC3_CONFIG_GPIO_DIR(config) ((config & (1 << 14)) >> 14)
-#define ASIC3_CONFIG_GPIO_INIT(config) ((config & (1 << 15)) >> 15)
-#define ASIC3_CONFIG_GPIO(gpio, alt, dir, init) (((gpio) & 0x7f) \
- | (((alt) & 0x7f) << 7) | (((dir) & 0x1) << 14) \
- | (((init) & 0x1) << 15))
-#define ASIC3_CONFIG_GPIO_DEFAULT(gpio, dir, init) \
- ASIC3_CONFIG_GPIO((gpio), 0, (dir), (init))
-#define ASIC3_CONFIG_GPIO_DEFAULT_OUT(gpio, init) \
- ASIC3_CONFIG_GPIO((gpio), 0, 1, (init))
-
-/*
- * Alternate functions
- */
-#define ASIC3_GPIOA11_PWM0 ASIC3_CONFIG_GPIO(11, 1, 1, 0)
-#define ASIC3_GPIOA12_PWM1 ASIC3_CONFIG_GPIO(12, 1, 1, 0)
-#define ASIC3_GPIOA15_CONTROL_CX ASIC3_CONFIG_GPIO(15, 1, 1, 0)
-#define ASIC3_GPIOC0_LED0 ASIC3_CONFIG_GPIO(32, 1, 0, 0)
-#define ASIC3_GPIOC1_LED1 ASIC3_CONFIG_GPIO(33, 1, 0, 0)
-#define ASIC3_GPIOC2_LED2 ASIC3_CONFIG_GPIO(34, 1, 0, 0)
-#define ASIC3_GPIOC3_SPI_RXD ASIC3_CONFIG_GPIO(35, 1, 0, 0)
-#define ASIC3_GPIOC4_CF_nCD ASIC3_CONFIG_GPIO(36, 1, 0, 0)
-#define ASIC3_GPIOC4_SPI_TXD ASIC3_CONFIG_GPIO(36, 1, 1, 0)
-#define ASIC3_GPIOC5_SPI_CLK ASIC3_CONFIG_GPIO(37, 1, 1, 0)
-#define ASIC3_GPIOC5_nCIOW ASIC3_CONFIG_GPIO(37, 1, 1, 0)
-#define ASIC3_GPIOC6_nCIOR ASIC3_CONFIG_GPIO(38, 1, 1, 0)
-#define ASIC3_GPIOC7_nPCE_1 ASIC3_CONFIG_GPIO(39, 1, 0, 0)
-#define ASIC3_GPIOC8_nPCE_2 ASIC3_CONFIG_GPIO(40, 1, 0, 0)
-#define ASIC3_GPIOC9_nPOE ASIC3_CONFIG_GPIO(41, 1, 0, 0)
-#define ASIC3_GPIOC10_nPWE ASIC3_CONFIG_GPIO(42, 1, 0, 0)
-#define ASIC3_GPIOC11_PSKTSEL ASIC3_CONFIG_GPIO(43, 1, 0, 0)
-#define ASIC3_GPIOC12_nPREG ASIC3_CONFIG_GPIO(44, 1, 0, 0)
-#define ASIC3_GPIOC13_nPWAIT ASIC3_CONFIG_GPIO(45, 1, 1, 0)
-#define ASIC3_GPIOC14_nPIOIS16 ASIC3_CONFIG_GPIO(46, 1, 1, 0)
-#define ASIC3_GPIOC15_nPIOR ASIC3_CONFIG_GPIO(47, 1, 0, 0)
-#define ASIC3_GPIOD4_CF_nCD ASIC3_CONFIG_GPIO(52, 1, 0, 0)
-#define ASIC3_GPIOD11_nCIOIS16 ASIC3_CONFIG_GPIO(59, 1, 0, 0)
-#define ASIC3_GPIOD12_nCWAIT ASIC3_CONFIG_GPIO(60, 1, 0, 0)
-#define ASIC3_GPIOD15_nPIOW ASIC3_CONFIG_GPIO(63, 1, 0, 0)
-
-
-#define ASIC3_SPI_Base 0x0400
-#define ASIC3_SPI_Control 0x0000
-#define ASIC3_SPI_TxData 0x0004
-#define ASIC3_SPI_RxData 0x0008
-#define ASIC3_SPI_Int 0x000c
-#define ASIC3_SPI_Status 0x0010
-
-#define SPI_CONTROL_SPR(clk) ((clk) & 0x0f) /* Clock rate */
-
-#define ASIC3_PWM_0_Base 0x0500
-#define ASIC3_PWM_1_Base 0x0600
-#define ASIC3_PWM_TimeBase 0x0000
-#define ASIC3_PWM_PeriodTime 0x0004
-#define ASIC3_PWM_DutyTime 0x0008
-
-#define PWM_TIMEBASE_VALUE(x) ((x)&0xf) /* Low 4 bits sets time base */
-#define PWM_TIMEBASE_ENABLE (1 << 4) /* Enable clock */
-
-#define ASIC3_NUM_LEDS 3
-#define ASIC3_LED_0_Base 0x0700
-#define ASIC3_LED_1_Base 0x0800
-#define ASIC3_LED_2_Base 0x0900
-#define ASIC3_LED_TimeBase 0x0000 /* R/W 7 bits */
-#define ASIC3_LED_PeriodTime 0x0004 /* R/W 12 bits */
-#define ASIC3_LED_DutyTime 0x0008 /* R/W 12 bits */
-#define ASIC3_LED_AutoStopCount 0x000c /* R/W 16 bits */
-
-/* LED TimeBase bits - match ASIC2 */
-#define LED_TBS 0x0f /* Low 4 bits sets time base, max = 13 */
- /* Note: max = 5 on hx4700 */
- /* 0: maximum time base */
- /* 1: maximum time base / 2 */
- /* n: maximum time base / 2^n */
-
-#define LED_EN (1 << 4) /* LED ON/OFF 0:off, 1:on */
-#define LED_AUTOSTOP (1 << 5) /* LED ON/OFF auto stop 0:disable, 1:enable */
-#define LED_ALWAYS (1 << 6) /* LED Interrupt Mask 0:No mask, 1:mask */
-
-#define ASIC3_CLOCK_BASE 0x0A00
-#define ASIC3_CLOCK_CDEX 0x00
-#define ASIC3_CLOCK_SEL 0x04
-
-#define CLOCK_CDEX_SOURCE (1 << 0) /* 2 bits */
-#define CLOCK_CDEX_SOURCE0 (1 << 0)
-#define CLOCK_CDEX_SOURCE1 (1 << 1)
-#define CLOCK_CDEX_SPI (1 << 2)
-#define CLOCK_CDEX_OWM (1 << 3)
-#define CLOCK_CDEX_PWM0 (1 << 4)
-#define CLOCK_CDEX_PWM1 (1 << 5)
-#define CLOCK_CDEX_LED0 (1 << 6)
-#define CLOCK_CDEX_LED1 (1 << 7)
-#define CLOCK_CDEX_LED2 (1 << 8)
-
-/* Clocks settings: 1 for 24.576 MHz, 0 for 12.288Mhz */
-#define CLOCK_CDEX_SD_HOST (1 << 9) /* R/W: SD host clock source */
-#define CLOCK_CDEX_SD_BUS (1 << 10) /* R/W: SD bus clock source ctrl */
-#define CLOCK_CDEX_SMBUS (1 << 11)
-#define CLOCK_CDEX_CONTROL_CX (1 << 12)
-
-#define CLOCK_CDEX_EX0 (1 << 13) /* R/W: 32.768 kHz crystal */
-#define CLOCK_CDEX_EX1 (1 << 14) /* R/W: 24.576 MHz crystal */
-
-#define CLOCK_SEL_SD_HCLK_SEL (1 << 0) /* R/W: SDIO host clock select */
-#define CLOCK_SEL_SD_BCLK_SEL (1 << 1) /* R/W: SDIO bus clock select */
-
-/* R/W: INT clock source control (32.768 kHz) */
-#define CLOCK_SEL_CX (1 << 2)
-
-
-#define ASIC3_INTR_BASE 0x0B00
-
-#define ASIC3_INTR_INT_MASK 0x00 /* Interrupt mask control */
-#define ASIC3_INTR_P_INT_STAT 0x04 /* Peripheral interrupt status */
-#define ASIC3_INTR_INT_CPS 0x08 /* Interrupt timer clock pre-scale */
-#define ASIC3_INTR_INT_TBS 0x0c /* Interrupt timer set */
-
-#define ASIC3_INTMASK_GINTMASK (1 << 0) /* Global INTs mask 1:enable */
-#define ASIC3_INTMASK_GINTEL (1 << 1) /* 1: rising edge, 0: hi level */
-#define ASIC3_INTMASK_MASK0 (1 << 2)
-#define ASIC3_INTMASK_MASK1 (1 << 3)
-#define ASIC3_INTMASK_MASK2 (1 << 4)
-#define ASIC3_INTMASK_MASK3 (1 << 5)
-#define ASIC3_INTMASK_MASK4 (1 << 6)
-#define ASIC3_INTMASK_MASK5 (1 << 7)
-
-#define ASIC3_INTR_PERIPHERAL_A (1 << 0)
-#define ASIC3_INTR_PERIPHERAL_B (1 << 1)
-#define ASIC3_INTR_PERIPHERAL_C (1 << 2)
-#define ASIC3_INTR_PERIPHERAL_D (1 << 3)
-#define ASIC3_INTR_LED0 (1 << 4)
-#define ASIC3_INTR_LED1 (1 << 5)
-#define ASIC3_INTR_LED2 (1 << 6)
-#define ASIC3_INTR_SPI (1 << 7)
-#define ASIC3_INTR_SMBUS (1 << 8)
-#define ASIC3_INTR_OWM (1 << 9)
-
-#define ASIC3_INTR_CPS(x) ((x)&0x0f) /* 4 bits, max 14 */
-#define ASIC3_INTR_CPS_SET (1 << 4) /* Time base enable */
-
-
-/* Basic control of the SD ASIC */
-#define ASIC3_SDHWCTRL_BASE 0x0E00
-#define ASIC3_SDHWCTRL_SDCONF 0x00
-
-#define ASIC3_SDHWCTRL_SUSPEND (1 << 0) /* 1=suspend all SD operations */
-#define ASIC3_SDHWCTRL_CLKSEL (1 << 1) /* 1=SDICK, 0=HCLK */
-#define ASIC3_SDHWCTRL_PCLR (1 << 2) /* All registers of SDIO cleared */
-#define ASIC3_SDHWCTRL_LEVCD (1 << 3) /* SD card detection: 0:low */
-
-/* SD card write protection: 0=high */
-#define ASIC3_SDHWCTRL_LEVWP (1 << 4)
-#define ASIC3_SDHWCTRL_SDLED (1 << 5) /* SD card LED signal 0=disable */
-
-/* SD card power supply ctrl 1=enable */
-#define ASIC3_SDHWCTRL_SDPWR (1 << 6)
-
-#define ASIC3_EXTCF_BASE 0x1100
-
-#define ASIC3_EXTCF_SELECT 0x00
-#define ASIC3_EXTCF_RESET 0x04
-
-#define ASIC3_EXTCF_SMOD0 (1 << 0) /* slot number of mode 0 */
-#define ASIC3_EXTCF_SMOD1 (1 << 1) /* slot number of mode 1 */
-#define ASIC3_EXTCF_SMOD2 (1 << 2) /* slot number of mode 2 */
-#define ASIC3_EXTCF_OWM_EN (1 << 4) /* enable onewire module */
-#define ASIC3_EXTCF_OWM_SMB (1 << 5) /* OWM bus selection */
-#define ASIC3_EXTCF_OWM_RESET (1 << 6) /* ?? used by OWM and CF */
-#define ASIC3_EXTCF_CF0_SLEEP_MODE (1 << 7) /* CF0 sleep state */
-#define ASIC3_EXTCF_CF1_SLEEP_MODE (1 << 8) /* CF1 sleep state */
-#define ASIC3_EXTCF_CF0_PWAIT_EN (1 << 10) /* CF0 PWAIT_n control */
-#define ASIC3_EXTCF_CF1_PWAIT_EN (1 << 11) /* CF1 PWAIT_n control */
-#define ASIC3_EXTCF_CF0_BUF_EN (1 << 12) /* CF0 buffer control */
-#define ASIC3_EXTCF_CF1_BUF_EN (1 << 13) /* CF1 buffer control */
-#define ASIC3_EXTCF_SD_MEM_ENABLE (1 << 14)
-#define ASIC3_EXTCF_CF_SLEEP (1 << 15) /* CF sleep mode control */
-
-/*********************************************
- * The Onewire interface (DS1WM) is handled
- * by the ds1wm driver.
- *
- *********************************************/
-
-#define ASIC3_OWM_BASE 0xC00
-
-/*****************************************************************************
- * The SD configuration registers are at a completely different location
- * in memory. They are divided into three sets of registers:
- *
- * SD_CONFIG Core configuration register
- * SD_CTRL Control registers for SD operations
- * SDIO_CTRL Control registers for SDIO operations
- *
- *****************************************************************************/
-#define ASIC3_SD_CONFIG_BASE 0x0400 /* Assumes 32 bit addressing */
-#define ASIC3_SD_CONFIG_SIZE 0x0200 /* Assumes 32 bit addressing */
-#define ASIC3_SD_CTRL_BASE 0x1000
-#define ASIC3_SDIO_CTRL_BASE 0x1200
-
-#define ASIC3_MAP_SIZE_32BIT 0x2000
-#define ASIC3_MAP_SIZE_16BIT 0x1000
-
-/* Functions needed by leds-asic3 */
-
-struct asic3;
-extern void asic3_write_register(struct asic3 *asic, unsigned int reg, u32 val);
-extern u32 asic3_read_register(struct asic3 *asic, unsigned int reg);
-
-#endif /* __ASIC3_H__ */
diff --git a/include/linux/mfd/atc260x/atc2603c.h b/include/linux/mfd/atc260x/atc2603c.h
new file mode 100644
index 000000000000..07ac640ef3e1
--- /dev/null
+++ b/include/linux/mfd/atc260x/atc2603c.h
@@ -0,0 +1,281 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * ATC2603C PMIC register definitions
+ *
+ * Copyright (C) 2020 Cristian Ciocaltea <cristian.ciocaltea@gmail.com>
+ */
+
+#ifndef __LINUX_MFD_ATC260X_ATC2603C_H
+#define __LINUX_MFD_ATC260X_ATC2603C_H
+
+enum atc2603c_irq_def {
+ ATC2603C_IRQ_AUDIO = 0,
+ ATC2603C_IRQ_OV,
+ ATC2603C_IRQ_OC,
+ ATC2603C_IRQ_OT,
+ ATC2603C_IRQ_UV,
+ ATC2603C_IRQ_ALARM,
+ ATC2603C_IRQ_ONOFF,
+ ATC2603C_IRQ_SGPIO,
+ ATC2603C_IRQ_IR,
+ ATC2603C_IRQ_REMCON,
+ ATC2603C_IRQ_POWER_IN,
+};
+
+/* PMU Registers */
+#define ATC2603C_PMU_SYS_CTL0 0x00
+#define ATC2603C_PMU_SYS_CTL1 0x01
+#define ATC2603C_PMU_SYS_CTL2 0x02
+#define ATC2603C_PMU_SYS_CTL3 0x03
+#define ATC2603C_PMU_SYS_CTL4 0x04
+#define ATC2603C_PMU_SYS_CTL5 0x05
+#define ATC2603C_PMU_SYS_CTL6 0x06
+#define ATC2603C_PMU_SYS_CTL7 0x07
+#define ATC2603C_PMU_SYS_CTL8 0x08
+#define ATC2603C_PMU_SYS_CTL9 0x09
+#define ATC2603C_PMU_BAT_CTL0 0x0A
+#define ATC2603C_PMU_BAT_CTL1 0x0B
+#define ATC2603C_PMU_VBUS_CTL0 0x0C
+#define ATC2603C_PMU_VBUS_CTL1 0x0D
+#define ATC2603C_PMU_WALL_CTL0 0x0E
+#define ATC2603C_PMU_WALL_CTL1 0x0F
+#define ATC2603C_PMU_SYS_PENDING 0x10
+#define ATC2603C_PMU_DC1_CTL0 0x11
+#define ATC2603C_PMU_DC1_CTL1 0x12 // Undocumented
+#define ATC2603C_PMU_DC1_CTL2 0x13 // Undocumented
+#define ATC2603C_PMU_DC2_CTL0 0x14
+#define ATC2603C_PMU_DC2_CTL1 0x15 // Undocumented
+#define ATC2603C_PMU_DC2_CTL2 0x16 // Undocumented
+#define ATC2603C_PMU_DC3_CTL0 0x17
+#define ATC2603C_PMU_DC3_CTL1 0x18 // Undocumented
+#define ATC2603C_PMU_DC3_CTL2 0x19 // Undocumented
+#define ATC2603C_PMU_DC4_CTL0 0x1A // Undocumented
+#define ATC2603C_PMU_DC4_CTL1 0x1B // Undocumented
+#define ATC2603C_PMU_DC5_CTL0 0x1C // Undocumented
+#define ATC2603C_PMU_DC5_CTL1 0x1D // Undocumented
+#define ATC2603C_PMU_LDO1_CTL 0x1E
+#define ATC2603C_PMU_LDO2_CTL 0x1F
+#define ATC2603C_PMU_LDO3_CTL 0x20
+#define ATC2603C_PMU_LDO4_CTL 0x21 // Undocumented
+#define ATC2603C_PMU_LDO5_CTL 0x22
+#define ATC2603C_PMU_LDO6_CTL 0x23
+#define ATC2603C_PMU_LDO7_CTL 0x24
+#define ATC2603C_PMU_LDO8_CTL 0x25 // Undocumented
+#define ATC2603C_PMU_LDO9_CTL 0x26 // Undocumented
+#define ATC2603C_PMU_LDO10_CTL 0x27 // Undocumented
+#define ATC2603C_PMU_LDO11_CTL 0x28
+#define ATC2603C_PMU_SWITCH_CTL 0x29
+#define ATC2603C_PMU_OV_CTL0 0x2A
+#define ATC2603C_PMU_OV_CTL1 0x2B
+#define ATC2603C_PMU_OV_STATUS 0x2C
+#define ATC2603C_PMU_OV_EN 0x2D
+#define ATC2603C_PMU_OV_INT_EN 0x2E
+#define ATC2603C_PMU_OC_CTL 0x2F
+#define ATC2603C_PMU_OC_STATUS 0x30
+#define ATC2603C_PMU_OC_EN 0x31
+#define ATC2603C_PMU_OC_INT_EN 0x32
+#define ATC2603C_PMU_UV_CTL0 0x33
+#define ATC2603C_PMU_UV_CTL1 0x34
+#define ATC2603C_PMU_UV_STATUS 0x35
+#define ATC2603C_PMU_UV_EN 0x36
+#define ATC2603C_PMU_UV_INT_EN 0x37
+#define ATC2603C_PMU_OT_CTL 0x38
+#define ATC2603C_PMU_CHARGER_CTL0 0x39
+#define ATC2603C_PMU_CHARGER_CTL1 0x3A
+#define ATC2603C_PMU_CHARGER_CTL2 0x3B
+#define ATC2603C_PMU_BAKCHARGER_CTL 0x3C // Undocumented
+#define ATC2603C_PMU_APDS_CTL 0x3D
+#define ATC2603C_PMU_AUXADC_CTL0 0x3E
+#define ATC2603C_PMU_AUXADC_CTL1 0x3F
+#define ATC2603C_PMU_BATVADC 0x40
+#define ATC2603C_PMU_BATIADC 0x41
+#define ATC2603C_PMU_WALLVADC 0x42
+#define ATC2603C_PMU_WALLIADC 0x43
+#define ATC2603C_PMU_VBUSVADC 0x44
+#define ATC2603C_PMU_VBUSIADC 0x45
+#define ATC2603C_PMU_SYSPWRADC 0x46
+#define ATC2603C_PMU_REMCONADC 0x47
+#define ATC2603C_PMU_SVCCADC 0x48
+#define ATC2603C_PMU_CHGIADC 0x49
+#define ATC2603C_PMU_IREFADC 0x4A
+#define ATC2603C_PMU_BAKBATADC 0x4B
+#define ATC2603C_PMU_ICTEMPADC 0x4C
+#define ATC2603C_PMU_AUXADC0 0x4D
+#define ATC2603C_PMU_AUXADC1 0x4E
+#define ATC2603C_PMU_AUXADC2 0x4F
+#define ATC2603C_PMU_ICMADC 0x50
+#define ATC2603C_PMU_BDG_CTL 0x51 // Undocumented
+#define ATC2603C_RTC_CTL 0x52
+#define ATC2603C_RTC_MSALM 0x53
+#define ATC2603C_RTC_HALM 0x54
+#define ATC2603C_RTC_YMDALM 0x55
+#define ATC2603C_RTC_MS 0x56
+#define ATC2603C_RTC_H 0x57
+#define ATC2603C_RTC_DC 0x58
+#define ATC2603C_RTC_YMD 0x59
+#define ATC2603C_EFUSE_DAT 0x5A // Undocumented
+#define ATC2603C_EFUSECRTL1 0x5B // Undocumented
+#define ATC2603C_EFUSECRTL2 0x5C // Undocumented
+#define ATC2603C_PMU_FW_USE0 0x5D // Undocumented
+#define ATC2603C_PMU_FW_USE1 0x5E // Undocumented
+#define ATC2603C_PMU_FW_USE2 0x5F // Undocumented
+#define ATC2603C_PMU_FW_USE3 0x60 // Undocumented
+#define ATC2603C_PMU_FW_USE4 0x61 // Undocumented
+#define ATC2603C_PMU_ABNORMAL_STATUS 0x62
+#define ATC2603C_PMU_WALL_APDS_CTL 0x63
+#define ATC2603C_PMU_REMCON_CTL0 0x64
+#define ATC2603C_PMU_REMCON_CTL1 0x65
+#define ATC2603C_PMU_MUX_CTL0 0x66
+#define ATC2603C_PMU_SGPIO_CTL0 0x67
+#define ATC2603C_PMU_SGPIO_CTL1 0x68
+#define ATC2603C_PMU_SGPIO_CTL2 0x69
+#define ATC2603C_PMU_SGPIO_CTL3 0x6A
+#define ATC2603C_PMU_SGPIO_CTL4 0x6B
+#define ATC2603C_PWMCLK_CTL 0x6C
+#define ATC2603C_PWM0_CTL 0x6D
+#define ATC2603C_PWM1_CTL 0x6E
+#define ATC2603C_PMU_ADC_DBG0 0x70
+#define ATC2603C_PMU_ADC_DBG1 0x71
+#define ATC2603C_PMU_ADC_DBG2 0x72
+#define ATC2603C_PMU_ADC_DBG3 0x73
+#define ATC2603C_PMU_ADC_DBG4 0x74
+#define ATC2603C_IRC_CTL 0x80
+#define ATC2603C_IRC_STAT 0x81
+#define ATC2603C_IRC_CC 0x82
+#define ATC2603C_IRC_KDC 0x83
+#define ATC2603C_IRC_WK 0x84
+#define ATC2603C_IRC_RCC 0x85
+#define ATC2603C_IRC_FILTER 0x86
+
+/* AUDIO_OUT Registers */
+#define ATC2603C_AUDIOINOUT_CTL 0xA0
+#define ATC2603C_AUDIO_DEBUGOUTCTL 0xA1
+#define ATC2603C_DAC_DIGITALCTL 0xA2
+#define ATC2603C_DAC_VOLUMECTL0 0xA3
+#define ATC2603C_DAC_ANALOG0 0xA4
+#define ATC2603C_DAC_ANALOG1 0xA5
+#define ATC2603C_DAC_ANALOG2 0xA6
+#define ATC2603C_DAC_ANALOG3 0xA7
+
+/* AUDIO_IN Registers */
+#define ATC2603C_ADC_DIGITALCTL 0xA8
+#define ATC2603C_ADC_HPFCTL 0xA9
+#define ATC2603C_ADC_CTL 0xAA
+#define ATC2603C_AGC_CTL0 0xAB
+#define ATC2603C_AGC_CTL1 0xAC // Undocumented
+#define ATC2603C_AGC_CTL2 0xAD
+#define ATC2603C_ADC_ANALOG0 0xAE
+#define ATC2603C_ADC_ANALOG1 0xAF
+
+/* PCM_IF Registers */
+#define ATC2603C_PCM0_CTL 0xB0 // Undocumented
+#define ATC2603C_PCM1_CTL 0xB1 // Undocumented
+#define ATC2603C_PCM2_CTL 0xB2 // Undocumented
+#define ATC2603C_PCMIF_CTL 0xB3 // Undocumented
+
+/* CMU_CONTROL Registers */
+#define ATC2603C_CMU_DEVRST 0xC1 // Undocumented
+
+/* INTS Registers */
+#define ATC2603C_INTS_PD 0xC8
+#define ATC2603C_INTS_MSK 0xC9
+
+/* MFP Registers */
+#define ATC2603C_MFP_CTL 0xD0
+#define ATC2603C_PAD_VSEL 0xD1 // Undocumented
+#define ATC2603C_GPIO_OUTEN 0xD2
+#define ATC2603C_GPIO_INEN 0xD3
+#define ATC2603C_GPIO_DAT 0xD4
+#define ATC2603C_PAD_DRV 0xD5
+#define ATC2603C_PAD_EN 0xD6
+#define ATC2603C_DEBUG_SEL 0xD7 // Undocumented
+#define ATC2603C_DEBUG_IE 0xD8 // Undocumented
+#define ATC2603C_DEBUG_OE 0xD9 // Undocumented
+#define ATC2603C_BIST_START 0x0A // Undocumented
+#define ATC2603C_BIST_RESULT 0x0B // Undocumented
+#define ATC2603C_CHIP_VER 0xDC
+
+/* TWSI Registers */
+#define ATC2603C_SADDR 0xFF
+
+/* PMU_SYS_CTL0 Register Mask Bits */
+#define ATC2603C_PMU_SYS_CTL0_IR_WK_EN BIT(5)
+#define ATC2603C_PMU_SYS_CTL0_RESET_WK_EN BIT(6)
+#define ATC2603C_PMU_SYS_CTL0_HDSW_WK_EN BIT(7)
+#define ATC2603C_PMU_SYS_CTL0_ALARM_WK_EN BIT(8)
+#define ATC2603C_PMU_SYS_CTL0_REM_CON_WK_EN BIT(9)
+#define ATC2603C_PMU_SYS_CTL0_RESTART_EN BIT(10)
+#define ATC2603C_PMU_SYS_CTL0_SGPIOIRQ_WK_EN BIT(11)
+#define ATC2603C_PMU_SYS_CTL0_ONOFF_SHORT_WK_EN BIT(12)
+#define ATC2603C_PMU_SYS_CTL0_ONOFF_LONG_WK_EN BIT(13)
+#define ATC2603C_PMU_SYS_CTL0_WALL_WK_EN BIT(14)
+#define ATC2603C_PMU_SYS_CTL0_USB_WK_EN BIT(15)
+#define ATC2603C_PMU_SYS_CTL0_WK_ALL (GENMASK(15, 5) & (~BIT(10)))
+
+/* PMU_SYS_CTL1 Register Mask Bits */
+#define ATC2603C_PMU_SYS_CTL1_EN_S1 BIT(0)
+#define ATC2603C_PMU_SYS_CTL1_LB_S4_EN BIT(2)
+#define ATC2603C_PMU_SYS_CTL1_LB_S4 GENMASK(4, 3)
+#define ATC2603C_PMU_SYS_CTL1_LB_S4_3_1V BIT(4)
+#define ATC2603C_PMU_SYS_CTL1_IR_WK_FLAG BIT(5)
+#define ATC2603C_PMU_SYS_CTL1_RESET_WK_FLAG BIT(6)
+#define ATC2603C_PMU_SYS_CTL1_HDSW_WK_FLAG BIT(7)
+#define ATC2603C_PMU_SYS_CTL1_ALARM_WK_FLAG BIT(8)
+#define ATC2603C_PMU_SYS_CTL1_REM_CON_WK_FLAG BIT(9)
+#define ATC2603C_PMU_SYS_CTL1_ONOFF_PRESS_RESET_IRQ_PD BIT(10)
+#define ATC2603C_PMU_SYS_CTL1_SGPIOIRQ_WK_FLAG BIT(11)
+#define ATC2603C_PMU_SYS_CTL1_ONOFF_SHORT_WK_FLAG BIT(12)
+#define ATC2603C_PMU_SYS_CTL1_ONOFF_LONG_WK_FLAG BIT(13)
+#define ATC2603C_PMU_SYS_CTL1_WALL_WK_FLAG BIT(14)
+#define ATC2603C_PMU_SYS_CTL1_USB_WK_FLAG BIT(15)
+
+/* PMU_SYS_CTL2 Register Mask Bits */
+#define ATC2603C_PMU_SYS_CTL2_PMU_A_EN BIT(0)
+#define ATC2603C_PMU_SYS_CTL2_ONOFF_PRESS_INT_EN BIT(1)
+#define ATC2603C_PMU_SYS_CTL2_ONOFF_PRESS_PD BIT(2)
+#define ATC2603C_PMU_SYS_CTL2_S2TIMER GENMASK(5, 3)
+#define ATC2603C_PMU_SYS_CTL2_S2_TIMER_EN BIT(6)
+#define ATC2603C_PMU_SYS_CTL2_ONOFF_RESET_TIME_SEL GENMASK(8, 7)
+#define ATC2603C_PMU_SYS_CTL2_ONOFF_PRESS_RESET_EN BIT(9)
+#define ATC2603C_PMU_SYS_CTL2_ONOFF_PRESS_TIME GENMASK(11, 10)
+#define ATC2603C_PMU_SYS_CTL2_ONOFF_INT_EN BIT(12)
+#define ATC2603C_PMU_SYS_CTL2_ONOFF_LONG_PRESS BIT(13)
+#define ATC2603C_PMU_SYS_CTL2_ONOFF_SHORT_PRESS BIT(14)
+#define ATC2603C_PMU_SYS_CTL2_ONOFF_PRESS BIT(15)
+
+/* PMU_SYS_CTL3 Register Mask Bits */
+#define ATC2603C_PMU_SYS_CTL3_S2S3TOS1_TIMER GENMASK(8, 7)
+#define ATC2603C_PMU_SYS_CTL3_S2S3TOS1_TIMER_EN BIT(9)
+#define ATC2603C_PMU_SYS_CTL3_S3_TIMER GENMASK(12, 10)
+#define ATC2603C_PMU_SYS_CTL3_S3_TIMER_EN BIT(13)
+#define ATC2603C_PMU_SYS_CTL3_EN_S3 BIT(14)
+#define ATC2603C_PMU_SYS_CTL3_EN_S2 BIT(15)
+
+/* PMU_SYS_CTL5 Register Mask Bits */
+#define ATC2603C_PMU_SYS_CTL5_WALLWKDTEN BIT(7)
+#define ATC2603C_PMU_SYS_CTL5_VBUSWKDTEN BIT(8)
+#define ATC2603C_PMU_SYS_CTL5_REMCON_DECT_EN BIT(9)
+#define ATC2603C_PMU_SYS_CTL5_ONOFF_8S_SEL BIT(10)
+
+/* INTS_MSK Register Mask Bits */
+#define ATC2603C_INTS_MSK_AUDIO BIT(0)
+#define ATC2603C_INTS_MSK_OV BIT(1)
+#define ATC2603C_INTS_MSK_OC BIT(2)
+#define ATC2603C_INTS_MSK_OT BIT(3)
+#define ATC2603C_INTS_MSK_UV BIT(4)
+#define ATC2603C_INTS_MSK_ALARM BIT(5)
+#define ATC2603C_INTS_MSK_ONOFF BIT(6)
+#define ATC2603C_INTS_MSK_SGPIO BIT(7)
+#define ATC2603C_INTS_MSK_IR BIT(8)
+#define ATC2603C_INTS_MSK_REMCON BIT(9)
+#define ATC2603C_INTS_MSK_POWERIN BIT(10)
+
+/* CMU_DEVRST Register Mask Bits */
+#define ATC2603C_CMU_DEVRST_MFP BIT(1)
+#define ATC2603C_CMU_DEVRST_INTS BIT(2)
+#define ATC2603C_CMU_DEVRST_AUDIO BIT(4)
+
+/* PAD_EN Register Mask Bits */
+#define ATC2603C_PAD_EN_EXTIRQ BIT(0)
+
+#endif /* __LINUX_MFD_ATC260X_ATC2603C_H */
diff --git a/include/linux/mfd/atc260x/atc2609a.h b/include/linux/mfd/atc260x/atc2609a.h
new file mode 100644
index 000000000000..b957d7bd73e9
--- /dev/null
+++ b/include/linux/mfd/atc260x/atc2609a.h
@@ -0,0 +1,308 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * ATC2609A PMIC register definitions
+ *
+ * Copyright (C) 2019 Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
+ */
+
+#ifndef __LINUX_MFD_ATC260X_ATC2609A_H
+#define __LINUX_MFD_ATC260X_ATC2609A_H
+
+enum atc2609a_irq_def {
+ ATC2609A_IRQ_AUDIO = 0,
+ ATC2609A_IRQ_OV,
+ ATC2609A_IRQ_OC,
+ ATC2609A_IRQ_OT,
+ ATC2609A_IRQ_UV,
+ ATC2609A_IRQ_ALARM,
+ ATC2609A_IRQ_ONOFF,
+ ATC2609A_IRQ_WKUP,
+ ATC2609A_IRQ_IR,
+ ATC2609A_IRQ_REMCON,
+ ATC2609A_IRQ_POWER_IN,
+};
+
+/* PMU Registers */
+#define ATC2609A_PMU_SYS_CTL0 0x00
+#define ATC2609A_PMU_SYS_CTL1 0x01
+#define ATC2609A_PMU_SYS_CTL2 0x02
+#define ATC2609A_PMU_SYS_CTL3 0x03
+#define ATC2609A_PMU_SYS_CTL4 0x04
+#define ATC2609A_PMU_SYS_CTL5 0x05
+#define ATC2609A_PMU_SYS_CTL6 0x06
+#define ATC2609A_PMU_SYS_CTL7 0x07
+#define ATC2609A_PMU_SYS_CTL8 0x08
+#define ATC2609A_PMU_SYS_CTL9 0x09
+#define ATC2609A_PMU_BAT_CTL0 0x0A
+#define ATC2609A_PMU_BAT_CTL1 0x0B
+#define ATC2609A_PMU_VBUS_CTL0 0x0C
+#define ATC2609A_PMU_VBUS_CTL1 0x0D
+#define ATC2609A_PMU_WALL_CTL0 0x0E
+#define ATC2609A_PMU_WALL_CTL1 0x0F
+#define ATC2609A_PMU_SYS_PENDING 0x10
+#define ATC2609A_PMU_APDS_CTL0 0x11
+#define ATC2609A_PMU_APDS_CTL1 0x12
+#define ATC2609A_PMU_APDS_CTL2 0x13
+#define ATC2609A_PMU_CHARGER_CTL 0x14
+#define ATC2609A_PMU_BAKCHARGER_CTL 0x15
+#define ATC2609A_PMU_SWCHG_CTL0 0x16
+#define ATC2609A_PMU_SWCHG_CTL1 0x17
+#define ATC2609A_PMU_SWCHG_CTL2 0x18
+#define ATC2609A_PMU_SWCHG_CTL3 0x19
+#define ATC2609A_PMU_SWCHG_CTL4 0x1A
+#define ATC2609A_PMU_DC_OSC 0x1B
+#define ATC2609A_PMU_DC0_CTL0 0x1C
+#define ATC2609A_PMU_DC0_CTL1 0x1D
+#define ATC2609A_PMU_DC0_CTL2 0x1E
+#define ATC2609A_PMU_DC0_CTL3 0x1F
+#define ATC2609A_PMU_DC0_CTL4 0x20
+#define ATC2609A_PMU_DC0_CTL5 0x21
+#define ATC2609A_PMU_DC0_CTL6 0x22
+#define ATC2609A_PMU_DC1_CTL0 0x23
+#define ATC2609A_PMU_DC1_CTL1 0x24
+#define ATC2609A_PMU_DC1_CTL2 0x25
+#define ATC2609A_PMU_DC1_CTL3 0x26
+#define ATC2609A_PMU_DC1_CTL4 0x27
+#define ATC2609A_PMU_DC1_CTL5 0x28
+#define ATC2609A_PMU_DC1_CTL6 0x29
+#define ATC2609A_PMU_DC2_CTL0 0x2A
+#define ATC2609A_PMU_DC2_CTL1 0x2B
+#define ATC2609A_PMU_DC2_CTL2 0x2C
+#define ATC2609A_PMU_DC2_CTL3 0x2D
+#define ATC2609A_PMU_DC2_CTL4 0x2E
+#define ATC2609A_PMU_DC2_CTL5 0x2F
+#define ATC2609A_PMU_DC2_CTL6 0x30
+#define ATC2609A_PMU_DC3_CTL0 0x31
+#define ATC2609A_PMU_DC3_CTL1 0x32
+#define ATC2609A_PMU_DC3_CTL2 0x33
+#define ATC2609A_PMU_DC3_CTL3 0x34
+#define ATC2609A_PMU_DC3_CTL4 0x35
+#define ATC2609A_PMU_DC3_CTL5 0x36
+#define ATC2609A_PMU_DC3_CTL6 0x37
+#define ATC2609A_PMU_DC_ZR 0x38
+#define ATC2609A_PMU_LDO0_CTL0 0x39
+#define ATC2609A_PMU_LDO0_CTL1 0x3A
+#define ATC2609A_PMU_LDO1_CTL0 0x3B
+#define ATC2609A_PMU_LDO1_CTL1 0x3C
+#define ATC2609A_PMU_LDO2_CTL0 0x3D
+#define ATC2609A_PMU_LDO2_CTL1 0x3E
+#define ATC2609A_PMU_LDO3_CTL0 0x3F
+#define ATC2609A_PMU_LDO3_CTL1 0x40
+#define ATC2609A_PMU_LDO4_CTL0 0x41
+#define ATC2609A_PMU_LDO4_CTL1 0x42
+#define ATC2609A_PMU_LDO5_CTL0 0x43
+#define ATC2609A_PMU_LDO5_CTL1 0x44
+#define ATC2609A_PMU_LDO6_CTL0 0x45
+#define ATC2609A_PMU_LDO6_CTL1 0x46
+#define ATC2609A_PMU_LDO7_CTL0 0x47
+#define ATC2609A_PMU_LDO7_CTL1 0x48
+#define ATC2609A_PMU_LDO8_CTL0 0x49
+#define ATC2609A_PMU_LDO8_CTL1 0x4A
+#define ATC2609A_PMU_LDO9_CTL 0x4B
+#define ATC2609A_PMU_OV_INT_EN 0x4C
+#define ATC2609A_PMU_OV_STATUS 0x4D
+#define ATC2609A_PMU_UV_INT_EN 0x4E
+#define ATC2609A_PMU_UV_STATUS 0x4F
+#define ATC2609A_PMU_OC_INT_EN 0x50
+#define ATC2609A_PMU_OC_STATUS 0x51
+#define ATC2609A_PMU_OT_CTL 0x52
+#define ATC2609A_PMU_CM_CTL0 0x53
+#define ATC2609A_PMU_FW_USE0 0x54
+#define ATC2609A_PMU_FW_USE1 0x55
+#define ATC2609A_PMU_ADC12B_I 0x56
+#define ATC2609A_PMU_ADC12B_V 0x57
+#define ATC2609A_PMU_ADC12B_DUMMY 0x58
+#define ATC2609A_PMU_AUXADC_CTL0 0x59
+#define ATC2609A_PMU_AUXADC_CTL1 0x5A
+#define ATC2609A_PMU_BATVADC 0x5B
+#define ATC2609A_PMU_BATIADC 0x5C
+#define ATC2609A_PMU_WALLVADC 0x5D
+#define ATC2609A_PMU_WALLIADC 0x5E
+#define ATC2609A_PMU_VBUSVADC 0x5F
+#define ATC2609A_PMU_VBUSIADC 0x60
+#define ATC2609A_PMU_SYSPWRADC 0x61
+#define ATC2609A_PMU_REMCONADC 0x62
+#define ATC2609A_PMU_SVCCADC 0x63
+#define ATC2609A_PMU_CHGIADC 0x64
+#define ATC2609A_PMU_IREFADC 0x65
+#define ATC2609A_PMU_BAKBATADC 0x66
+#define ATC2609A_PMU_ICTEMPADC 0x67
+#define ATC2609A_PMU_AUXADC0 0x68
+#define ATC2609A_PMU_AUXADC1 0x69
+#define ATC2609A_PMU_AUXADC2 0x6A
+#define ATC2609A_PMU_AUXADC3 0x6B
+#define ATC2609A_PMU_ICTEMPADC_ADJ 0x6C
+#define ATC2609A_PMU_BDG_CTL 0x6D
+#define ATC2609A_RTC_CTL 0x6E
+#define ATC2609A_RTC_MSALM 0x6F
+#define ATC2609A_RTC_HALM 0x70
+#define ATC2609A_RTC_YMDALM 0x71
+#define ATC2609A_RTC_MS 0x72
+#define ATC2609A_RTC_H 0x73
+#define ATC2609A_RTC_DC 0x74
+#define ATC2609A_RTC_YMD 0x75
+#define ATC2609A_EFUSE_DAT 0x76
+#define ATC2609A_EFUSECRTL1 0x77
+#define ATC2609A_EFUSECRTL2 0x78
+#define ATC2609A_PMU_DC4_CTL0 0x79
+#define ATC2609A_PMU_DC4_CTL1 0x7A
+#define ATC2609A_PMU_DC4_CTL2 0x7B
+#define ATC2609A_PMU_DC4_CTL3 0x7C
+#define ATC2609A_PMU_DC4_CTL4 0x7D
+#define ATC2609A_PMU_DC4_CTL5 0x7E
+#define ATC2609A_PMU_DC4_CTL6 0x7F
+#define ATC2609A_PMU_PWR_STATUS 0x80
+#define ATC2609A_PMU_S2_PWR 0x81
+#define ATC2609A_CLMT_CTL0 0x82
+#define ATC2609A_CLMT_DATA0 0x83
+#define ATC2609A_CLMT_DATA1 0x84
+#define ATC2609A_CLMT_DATA2 0x85
+#define ATC2609A_CLMT_DATA3 0x86
+#define ATC2609A_CLMT_ADD0 0x87
+#define ATC2609A_CLMT_ADD1 0x88
+#define ATC2609A_CLMT_OCV_TABLE 0x89
+#define ATC2609A_CLMT_R_TABLE 0x8A
+#define ATC2609A_PMU_PWRON_CTL0 0x8D
+#define ATC2609A_PMU_PWRON_CTL1 0x8E
+#define ATC2609A_PMU_PWRON_CTL2 0x8F
+#define ATC2609A_IRC_CTL 0x90
+#define ATC2609A_IRC_STAT 0x91
+#define ATC2609A_IRC_CC 0x92
+#define ATC2609A_IRC_KDC 0x93
+#define ATC2609A_IRC_WK 0x94
+#define ATC2609A_IRC_RCC 0x95
+
+/* AUDIO_OUT Registers */
+#define ATC2609A_AUDIOINOUT_CTL 0xA0
+#define ATC2609A_AUDIO_DEBUGOUTCTL 0xA1
+#define ATC2609A_DAC_DIGITALCTL 0xA2
+#define ATC2609A_DAC_VOLUMECTL0 0xA3
+#define ATC2609A_DAC_ANALOG0 0xA4
+#define ATC2609A_DAC_ANALOG1 0xA5
+#define ATC2609A_DAC_ANALOG2 0xA6
+#define ATC2609A_DAC_ANALOG3 0xA7
+
+/* AUDIO_IN Registers */
+#define ATC2609A_ADC_DIGITALCTL 0xA8
+#define ATC2609A_ADC_HPFCTL 0xA9
+#define ATC2609A_ADC_CTL 0xAA
+#define ATC2609A_AGC_CTL0 0xAB
+#define ATC2609A_AGC_CTL1 0xAC
+#define ATC2609A_AGC_CTL2 0xAD
+#define ATC2609A_ADC_ANALOG0 0xAE
+#define ATC2609A_ADC_ANALOG1 0xAF
+
+/* PCM_IF Registers */
+#define ATC2609A_PCM0_CTL 0xB0
+#define ATC2609A_PCM1_CTL 0xB1
+#define ATC2609A_PCM2_CTL 0xB2
+#define ATC2609A_PCMIF_CTL 0xB3
+
+/* CMU_CONTROL Registers */
+#define ATC2609A_CMU_DEVRST 0xC1
+
+/* INTS Registers */
+#define ATC2609A_INTS_PD 0xC8
+#define ATC2609A_INTS_MSK 0xC9
+
+/* MFP Registers */
+#define ATC2609A_MFP_CTL 0xD0
+#define ATC2609A_PAD_VSEL 0xD1
+#define ATC2609A_GPIO_OUTEN 0xD2
+#define ATC2609A_GPIO_INEN 0xD3
+#define ATC2609A_GPIO_DAT 0xD4
+#define ATC2609A_PAD_DRV 0xD5
+#define ATC2609A_PAD_EN 0xD6
+#define ATC2609A_DEBUG_SEL 0xD7
+#define ATC2609A_DEBUG_IE 0xD8
+#define ATC2609A_DEBUG_OE 0xD9
+#define ATC2609A_CHIP_VER 0xDC
+
+/* PWSI Registers */
+#define ATC2609A_PWSI_CTL 0xF0
+#define ATC2609A_PWSI_STATUS 0xF1
+
+/* TWSI Registers */
+#define ATC2609A_SADDR 0xFF
+
+/* PMU_SYS_CTL0 Register Mask Bits */
+#define ATC2609A_PMU_SYS_CTL0_IR_WK_EN BIT(5)
+#define ATC2609A_PMU_SYS_CTL0_RESET_WK_EN BIT(6)
+#define ATC2609A_PMU_SYS_CTL0_HDSW_WK_EN BIT(7)
+#define ATC2609A_PMU_SYS_CTL0_ALARM_WK_EN BIT(8)
+#define ATC2609A_PMU_SYS_CTL0_REM_CON_WK_EN BIT(9)
+#define ATC2609A_PMU_SYS_CTL0_RESTART_EN BIT(10)
+#define ATC2609A_PMU_SYS_CTL0_WKIRQ_WK_EN BIT(11)
+#define ATC2609A_PMU_SYS_CTL0_ONOFF_SHORT_WK_EN BIT(12)
+#define ATC2609A_PMU_SYS_CTL0_ONOFF_LONG_WK_EN BIT(13)
+#define ATC2609A_PMU_SYS_CTL0_WALL_WK_EN BIT(14)
+#define ATC2609A_PMU_SYS_CTL0_USB_WK_EN BIT(15)
+#define ATC2609A_PMU_SYS_CTL0_WK_ALL (GENMASK(15, 5) & (~BIT(10)))
+
+/* PMU_SYS_CTL1 Register Mask Bits */
+#define ATC2609A_PMU_SYS_CTL1_EN_S1 BIT(0)
+#define ATC2609A_PMU_SYS_CTL1_LB_S4_EN BIT(2)
+#define ATC2609A_PMU_SYS_CTL1_LB_S4 GENMASK(4, 3)
+#define ATC2609A_PMU_SYS_CTL1_LB_S4_3_1V BIT(4)
+#define ATC2609A_PMU_SYS_CTL1_IR_WK_FLAG BIT(5)
+#define ATC2609A_PMU_SYS_CTL1_RESET_WK_FLAG BIT(6)
+#define ATC2609A_PMU_SYS_CTL1_HDSW_WK_FLAG BIT(7)
+#define ATC2609A_PMU_SYS_CTL1_ALARM_WK_FLAG BIT(8)
+#define ATC2609A_PMU_SYS_CTL1_REM_CON_WK_FLAG BIT(9)
+#define ATC2609A_PMU_SYS_CTL1_RESTART_WK_FLAG BIT(10)
+#define ATC2609A_PMU_SYS_CTL1_WKIRQ_WK_FLAG BIT(11)
+#define ATC2609A_PMU_SYS_CTL1_ONOFF_SHORT_WK_FLAG BIT(12)
+#define ATC2609A_PMU_SYS_CTL1_ONOFF_LONG_WK_FLAG BIT(13)
+#define ATC2609A_PMU_SYS_CTL1_WALL_WK_FLAG BIT(14)
+#define ATC2609A_PMU_SYS_CTL1_USB_WK_FLAG BIT(15)
+
+/* PMU_SYS_CTL2 Register Mask Bits */
+#define ATC2609A_PMU_SYS_CTL2_PMU_A_EN BIT(0)
+#define ATC2609A_PMU_SYS_CTL2_ONOFF_PRESS_INT_EN BIT(1)
+#define ATC2609A_PMU_SYS_CTL2_ONOFF_PRESS_PD BIT(2)
+#define ATC2609A_PMU_SYS_CTL2_S2TIMER GENMASK(5, 3)
+#define ATC2609A_PMU_SYS_CTL2_S2_TIMER_EN BIT(6)
+#define ATC2609A_PMU_SYS_CTL2_ONOFF_RESET_TIME_SEL GENMASK(8, 7)
+#define ATC2609A_PMU_SYS_CTL2_ONOFF_RESET_EN BIT(9)
+#define ATC2609A_PMU_SYS_CTL2_ONOFF_PRESS_TIME GENMASK(11, 10)
+#define ATC2609A_PMU_SYS_CTL2_ONOFF_LSP_INT_EN BIT(12)
+#define ATC2609A_PMU_SYS_CTL2_ONOFF_LONG_PRESS BIT(13)
+#define ATC2609A_PMU_SYS_CTL2_ONOFF_SHORT_PRESS BIT(14)
+#define ATC2609A_PMU_SYS_CTL2_ONOFF_PRESS BIT(15)
+
+/* PMU_SYS_CTL3 Register Mask Bits */
+#define ATC2609A_PMU_SYS_CTL3_S2S3TOS1_TIMER GENMASK(8, 7)
+#define ATC2609A_PMU_SYS_CTL3_S2S3TOS1_TIMER_EN BIT(9)
+#define ATC2609A_PMU_SYS_CTL3_S3_TIMER GENMASK(12, 10)
+#define ATC2609A_PMU_SYS_CTL3_S3_TIMER_EN BIT(13)
+#define ATC2609A_PMU_SYS_CTL3_EN_S3 BIT(14)
+#define ATC2609A_PMU_SYS_CTL3_EN_S2 BIT(15)
+
+/* PMU_SYS_CTL5 Register Mask Bits */
+#define ATC2609A_PMU_SYS_CTL5_WALLWKDTEN BIT(7)
+#define ATC2609A_PMU_SYS_CTL5_VBUSWKDTEN BIT(8)
+#define ATC2609A_PMU_SYS_CTL5_REMCON_DECT_EN BIT(9)
+#define ATC2609A_PMU_SYS_CTL5_ONOFF_8S_SEL BIT(10)
+
+/* INTS_MSK Register Mask Bits */
+#define ATC2609A_INTS_MSK_AUDIO BIT(0)
+#define ATC2609A_INTS_MSK_OV BIT(1)
+#define ATC2609A_INTS_MSK_OC BIT(2)
+#define ATC2609A_INTS_MSK_OT BIT(3)
+#define ATC2609A_INTS_MSK_UV BIT(4)
+#define ATC2609A_INTS_MSK_ALARM BIT(5)
+#define ATC2609A_INTS_MSK_ONOFF BIT(6)
+#define ATC2609A_INTS_MSK_WKUP BIT(7)
+#define ATC2609A_INTS_MSK_IR BIT(8)
+#define ATC2609A_INTS_MSK_REMCON BIT(9)
+#define ATC2609A_INTS_MSK_POWERIN BIT(10)
+
+/* CMU_DEVRST Register Mask Bits */
+#define ATC2609A_CMU_DEVRST_AUDIO BIT(0)
+#define ATC2609A_CMU_DEVRST_MFP BIT(1)
+#define ATC2609A_CMU_DEVRST_INTS BIT(2)
+
+/* PAD_EN Register Mask Bits */
+#define ATC2609A_PAD_EN_EXTIRQ BIT(0)
+
+#endif /* __LINUX_MFD_ATC260X_ATC2609A_H */
diff --git a/include/linux/mfd/atc260x/core.h b/include/linux/mfd/atc260x/core.h
new file mode 100644
index 000000000000..777b6c345d44
--- /dev/null
+++ b/include/linux/mfd/atc260x/core.h
@@ -0,0 +1,58 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Core MFD defines for ATC260x PMICs
+ *
+ * Copyright (C) 2019 Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
+ * Copyright (C) 2020 Cristian Ciocaltea <cristian.ciocaltea@gmail.com>
+ */
+
+#ifndef __LINUX_MFD_ATC260X_CORE_H
+#define __LINUX_MFD_ATC260X_CORE_H
+
+#include <linux/mfd/atc260x/atc2603c.h>
+#include <linux/mfd/atc260x/atc2609a.h>
+
+enum atc260x_type {
+ ATC2603A = 0,
+ ATC2603C,
+ ATC2609A,
+};
+
+enum atc260x_ver {
+ ATC260X_A = 0,
+ ATC260X_B,
+ ATC260X_C,
+ ATC260X_D,
+ ATC260X_E,
+ ATC260X_F,
+ ATC260X_G,
+ ATC260X_H,
+};
+
+struct atc260x {
+ struct device *dev;
+
+ struct regmap *regmap;
+ const struct regmap_irq_chip *regmap_irq_chip;
+ struct regmap_irq_chip_data *irq_data;
+
+ struct mutex *regmap_mutex; /* mutex for custom regmap locking */
+
+ const struct mfd_cell *cells;
+ int nr_cells;
+ int irq;
+
+ enum atc260x_type ic_type;
+ enum atc260x_ver ic_ver;
+ const char *type_name;
+ unsigned int rev_reg;
+
+ const struct atc260x_init_regs *init_regs; /* regs for device init */
+};
+
+struct regmap_config;
+
+int atc260x_match_device(struct atc260x *atc260x, struct regmap_config *regmap_cfg);
+int atc260x_device_probe(struct atc260x *atc260x);
+
+#endif /* __LINUX_MFD_ATC260X_CORE_H */
diff --git a/include/linux/mfd/atmel-hlcdc.h b/include/linux/mfd/atmel-hlcdc.h
index 1279ab1644b5..a186119a49b5 100644
--- a/include/linux/mfd/atmel-hlcdc.h
+++ b/include/linux/mfd/atmel-hlcdc.h
@@ -1,20 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2014 Free Electrons
* Copyright (C) 2014 Atmel
*
* Author: Boris BREZILLON <boris.brezillon@free-electrons.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef __LINUX_MFD_HLCDC_H
diff --git a/include/linux/mfd/axp20x.h b/include/linux/mfd/axp20x.h
index 965b027e31b3..beb3f44f85c5 100644
--- a/include/linux/mfd/axp20x.h
+++ b/include/linux/mfd/axp20x.h
@@ -1,11 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Functions and registers to access AXP20X power management chip.
*
* Copyright (C) 2013, Carlo Caione <carlo@caione.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef __LINUX_MFD_AXP20X_H
@@ -23,6 +20,8 @@ enum axp20x_variants {
AXP803_ID,
AXP806_ID,
AXP809_ID,
+ AXP813_ID,
+ AXP15060_ID,
NR_AXP20X_VARIANTS,
};
@@ -34,7 +33,7 @@ enum axp20x_variants {
#define AXP152_ALDO_OP_MODE 0x13
#define AXP152_LDO0_CTRL 0x15
#define AXP152_DCDC2_V_OUT 0x23
-#define AXP152_DCDC2_V_SCAL 0x25
+#define AXP152_DCDC2_V_RAMP 0x25
#define AXP152_DCDC1_V_OUT 0x26
#define AXP152_DCDC3_V_OUT 0x27
#define AXP152_ALDO12_V_OUT 0x28
@@ -52,7 +51,7 @@ enum axp20x_variants {
#define AXP20X_USB_OTG_STATUS 0x02
#define AXP20X_PWR_OUT_CTRL 0x12
#define AXP20X_DCDC2_V_OUT 0x23
-#define AXP20X_DCDC2_LDO3_V_SCAL 0x25
+#define AXP20X_DCDC2_LDO3_V_RAMP 0x25
#define AXP20X_DCDC3_V_OUT 0x27
#define AXP20X_LDO24_V_OUT 0x28
#define AXP20X_LDO3_V_OUT 0x29
@@ -130,6 +129,42 @@ enum axp20x_variants {
#define AXP803_DCDC6_V_OUT 0x25
#define AXP803_DCDC_FREQ_CTRL 0x3b
+/* Other DCDC regulator control registers are the same as AXP803 */
+#define AXP813_DCDC7_V_OUT 0x26
+
+#define AXP15060_STARTUP_SRC 0x00
+#define AXP15060_PWR_OUT_CTRL1 0x10
+#define AXP15060_PWR_OUT_CTRL2 0x11
+#define AXP15060_PWR_OUT_CTRL3 0x12
+#define AXP15060_DCDC1_V_CTRL 0x13
+#define AXP15060_DCDC2_V_CTRL 0x14
+#define AXP15060_DCDC3_V_CTRL 0x15
+#define AXP15060_DCDC4_V_CTRL 0x16
+#define AXP15060_DCDC5_V_CTRL 0x17
+#define AXP15060_DCDC6_V_CTRL 0x18
+#define AXP15060_ALDO1_V_CTRL 0x19
+#define AXP15060_DCDC_MODE_CTRL1 0x1a
+#define AXP15060_DCDC_MODE_CTRL2 0x1b
+#define AXP15060_OUTPUT_MONITOR_DISCHARGE 0x1e
+#define AXP15060_IRQ_PWROK_VOFF 0x1f
+#define AXP15060_ALDO2_V_CTRL 0x20
+#define AXP15060_ALDO3_V_CTRL 0x21
+#define AXP15060_ALDO4_V_CTRL 0x22
+#define AXP15060_ALDO5_V_CTRL 0x23
+#define AXP15060_BLDO1_V_CTRL 0x24
+#define AXP15060_BLDO2_V_CTRL 0x25
+#define AXP15060_BLDO3_V_CTRL 0x26
+#define AXP15060_BLDO4_V_CTRL 0x27
+#define AXP15060_BLDO5_V_CTRL 0x28
+#define AXP15060_CLDO1_V_CTRL 0x29
+#define AXP15060_CLDO2_V_CTRL 0x2a
+#define AXP15060_CLDO3_V_CTRL 0x2b
+#define AXP15060_CLDO4_V_CTRL 0x2d
+#define AXP15060_CPUSLDO_V_CTRL 0x2e
+#define AXP15060_PWR_WAKEUP_CTRL 0x31
+#define AXP15060_PWR_DISABLE_DOWN_SEQ 0x32
+#define AXP15060_PEK_KEY 0x36
+
/* Interrupt */
#define AXP152_IRQ1_EN 0x40
#define AXP152_IRQ2_EN 0x41
@@ -151,6 +186,11 @@ enum axp20x_variants {
#define AXP20X_IRQ5_STATE 0x4c
#define AXP20X_IRQ6_STATE 0x4d
+#define AXP15060_IRQ1_EN 0x40
+#define AXP15060_IRQ2_EN 0x41
+#define AXP15060_IRQ1_STATE 0x48
+#define AXP15060_IRQ2_STATE 0x49
+
/* ADC */
#define AXP20X_ACIN_V_ADC_H 0x56
#define AXP20X_ACIN_V_ADC_L 0x57
@@ -221,6 +261,8 @@ enum axp20x_variants {
#define AXP22X_GPIO_STATE 0x94
#define AXP22X_GPIO_PULL_DOWN 0x95
+#define AXP15060_CLDO4_GPIO2_MODESET 0x2c
+
/* Battery */
#define AXP20X_CHRG_CC_31_24 0xb0
#define AXP20X_CHRG_CC_23_16 0xb1
@@ -262,6 +304,9 @@ enum axp20x_variants {
#define AXP288_RT_BATT_V_H 0xa0
#define AXP288_RT_BATT_V_L 0xa1
+#define AXP813_ACIN_PATH_CTRL 0x3a
+#define AXP813_ADC_RATE 0x85
+
/* Fuel Gauge */
#define AXP288_FG_RDC1_REG 0xba
#define AXP288_FG_RDC0_REG 0xbb
@@ -387,6 +432,61 @@ enum {
AXP803_REG_ID_MAX,
};
+enum {
+ AXP813_DCDC1 = 0,
+ AXP813_DCDC2,
+ AXP813_DCDC3,
+ AXP813_DCDC4,
+ AXP813_DCDC5,
+ AXP813_DCDC6,
+ AXP813_DCDC7,
+ AXP813_ALDO1,
+ AXP813_ALDO2,
+ AXP813_ALDO3,
+ AXP813_DLDO1,
+ AXP813_DLDO2,
+ AXP813_DLDO3,
+ AXP813_DLDO4,
+ AXP813_ELDO1,
+ AXP813_ELDO2,
+ AXP813_ELDO3,
+ AXP813_FLDO1,
+ AXP813_FLDO2,
+ AXP813_FLDO3,
+ AXP813_RTC_LDO,
+ AXP813_LDO_IO0,
+ AXP813_LDO_IO1,
+ AXP813_SW,
+ AXP813_REG_ID_MAX,
+};
+
+enum {
+ AXP15060_DCDC1 = 0,
+ AXP15060_DCDC2,
+ AXP15060_DCDC3,
+ AXP15060_DCDC4,
+ AXP15060_DCDC5,
+ AXP15060_DCDC6,
+ AXP15060_ALDO1,
+ AXP15060_ALDO2,
+ AXP15060_ALDO3,
+ AXP15060_ALDO4,
+ AXP15060_ALDO5,
+ AXP15060_BLDO1,
+ AXP15060_BLDO2,
+ AXP15060_BLDO3,
+ AXP15060_BLDO4,
+ AXP15060_BLDO5,
+ AXP15060_CLDO1,
+ AXP15060_CLDO2,
+ AXP15060_CLDO3,
+ AXP15060_CLDO4,
+ AXP15060_CPUSLDO,
+ AXP15060_SW,
+ AXP15060_RTC_LDO,
+ AXP15060_REG_ID_MAX,
+};
+
/* IRQs */
enum {
AXP152_IRQ_LDO0IN_CONNECT = 1,
@@ -400,8 +500,9 @@ enum {
AXP152_IRQ_PEK_SHORT,
AXP152_IRQ_PEK_LONG,
AXP152_IRQ_TIMER,
- AXP152_IRQ_PEK_RIS_EDGE,
+ /* out of bit order to make sure the press event is handled first */
AXP152_IRQ_PEK_FAL_EDGE,
+ AXP152_IRQ_PEK_RIS_EDGE,
AXP152_IRQ_GPIO3_INPUT,
AXP152_IRQ_GPIO2_INPUT,
AXP152_IRQ_GPIO1_INPUT,
@@ -440,8 +541,9 @@ enum {
AXP20X_IRQ_LOW_PWR_LVL1,
AXP20X_IRQ_LOW_PWR_LVL2,
AXP20X_IRQ_TIMER,
- AXP20X_IRQ_PEK_RIS_EDGE,
+ /* out of bit order to make sure the press event is handled first */
AXP20X_IRQ_PEK_FAL_EDGE,
+ AXP20X_IRQ_PEK_RIS_EDGE,
AXP20X_IRQ_GPIO3_INPUT,
AXP20X_IRQ_GPIO2_INPUT,
AXP20X_IRQ_GPIO1_INPUT,
@@ -470,8 +572,9 @@ enum axp22x_irqs {
AXP22X_IRQ_LOW_PWR_LVL1,
AXP22X_IRQ_LOW_PWR_LVL2,
AXP22X_IRQ_TIMER,
- AXP22X_IRQ_PEK_RIS_EDGE,
+ /* out of bit order to make sure the press event is handled first */
AXP22X_IRQ_PEK_FAL_EDGE,
+ AXP22X_IRQ_PEK_RIS_EDGE,
AXP22X_IRQ_GPIO1_INPUT,
AXP22X_IRQ_GPIO0_INPUT,
};
@@ -539,8 +642,9 @@ enum axp803_irqs {
AXP803_IRQ_LOW_PWR_LVL1,
AXP803_IRQ_LOW_PWR_LVL2,
AXP803_IRQ_TIMER,
- AXP803_IRQ_PEK_RIS_EDGE,
+ /* out of bit order to make sure the press event is handled first */
AXP803_IRQ_PEK_FAL_EDGE,
+ AXP803_IRQ_PEK_RIS_EDGE,
AXP803_IRQ_PEK_SHORT,
AXP803_IRQ_PEK_LONG,
AXP803_IRQ_PEK_OVER_OFF,
@@ -558,11 +662,11 @@ enum axp806_irqs {
AXP806_IRQ_DCDCC_V_LOW,
AXP806_IRQ_DCDCD_V_LOW,
AXP806_IRQ_DCDCE_V_LOW,
- AXP806_IRQ_PWROK_LONG,
- AXP806_IRQ_PWROK_SHORT,
+ AXP806_IRQ_POK_LONG,
+ AXP806_IRQ_POK_SHORT,
AXP806_IRQ_WAKEUP,
- AXP806_IRQ_PWROK_FALL,
- AXP806_IRQ_PWROK_RISE,
+ AXP806_IRQ_POK_FALL,
+ AXP806_IRQ_POK_RISE,
};
enum axp809_irqs {
@@ -591,8 +695,9 @@ enum axp809_irqs {
AXP809_IRQ_LOW_PWR_LVL1,
AXP809_IRQ_LOW_PWR_LVL2,
AXP809_IRQ_TIMER,
- AXP809_IRQ_PEK_RIS_EDGE,
+ /* out of bit order to make sure the press event is handled first */
AXP809_IRQ_PEK_FAL_EDGE,
+ AXP809_IRQ_PEK_RIS_EDGE,
AXP809_IRQ_PEK_SHORT,
AXP809_IRQ_PEK_LONG,
AXP809_IRQ_PEK_OVER_OFF,
@@ -600,6 +705,23 @@ enum axp809_irqs {
AXP809_IRQ_GPIO0_INPUT,
};
+enum axp15060_irqs {
+ AXP15060_IRQ_DIE_TEMP_HIGH_LV1 = 1,
+ AXP15060_IRQ_DIE_TEMP_HIGH_LV2,
+ AXP15060_IRQ_DCDC1_V_LOW,
+ AXP15060_IRQ_DCDC2_V_LOW,
+ AXP15060_IRQ_DCDC3_V_LOW,
+ AXP15060_IRQ_DCDC4_V_LOW,
+ AXP15060_IRQ_DCDC5_V_LOW,
+ AXP15060_IRQ_DCDC6_V_LOW,
+ AXP15060_IRQ_PEK_LONG,
+ AXP15060_IRQ_PEK_SHORT,
+ AXP15060_IRQ_GPIO1_INPUT,
+ AXP15060_IRQ_PEK_FAL_EDGE,
+ AXP15060_IRQ_PEK_RIS_EDGE,
+ AXP15060_IRQ_GPIO2_INPUT,
+};
+
struct axp20x_dev {
struct device *dev;
int irq;
@@ -608,16 +730,11 @@ struct axp20x_dev {
struct regmap_irq_chip_data *regmap_irqc;
long variant;
int nr_cells;
- struct mfd_cell *cells;
+ const struct mfd_cell *cells;
const struct regmap_config *regmap_cfg;
const struct regmap_irq_chip *regmap_irq_chip;
};
-struct axp288_extcon_pdata {
- /* GPIO pin control to switch D+/D- lines b/w PMIC and SOC */
- struct gpio_desc *gpio_mux_cntl;
-};
-
/* generic helper function for reading 9-16 bit wide regs */
static inline int axp20x_read_variable_width(struct regmap *regmap,
unsigned int reg, unsigned int width)
@@ -669,6 +786,6 @@ int axp20x_device_probe(struct axp20x_dev *axp20x);
*
* This tells the axp20x core to remove the associated mfd devices
*/
-int axp20x_device_remove(struct axp20x_dev *axp20x);
+void axp20x_device_remove(struct axp20x_dev *axp20x);
#endif /* __LINUX_MFD_AXP20X_H */
diff --git a/include/linux/mfd/bcm2835-pm.h b/include/linux/mfd/bcm2835-pm.h
new file mode 100644
index 000000000000..f70a810c55f7
--- /dev/null
+++ b/include/linux/mfd/bcm2835-pm.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+
+#ifndef BCM2835_MFD_PM_H
+#define BCM2835_MFD_PM_H
+
+#include <linux/regmap.h>
+
+struct bcm2835_pm {
+ struct device *dev;
+ void __iomem *base;
+ void __iomem *asb;
+ void __iomem *rpivid_asb;
+};
+
+#endif /* BCM2835_MFD_PM_H */
diff --git a/include/linux/mfd/bcm590xx.h b/include/linux/mfd/bcm590xx.h
index 267aedee1c7a..6b8791da6119 100644
--- a/include/linux/mfd/bcm590xx.h
+++ b/include/linux/mfd/bcm590xx.h
@@ -1,14 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Broadcom BCM590xx PMU
*
* Copyright 2014 Linaro Limited
* Author: Matt Porter <mporter@linaro.org>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
*/
#ifndef __LINUX_MFD_BCM590XX_H
diff --git a/include/linux/mfd/bd9571mwv.h b/include/linux/mfd/bd9571mwv.h
new file mode 100644
index 000000000000..8efd99d07c9e
--- /dev/null
+++ b/include/linux/mfd/bd9571mwv.h
@@ -0,0 +1,109 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * ROHM BD9571MWV-M and BD9574MWF-M driver
+ *
+ * Copyright (C) 2017 Marek Vasut <marek.vasut+renesas@gmail.com>
+ * Copyright (C) 2020 Renesas Electronics Corporation
+ *
+ * Based on the TPS65086 driver
+ */
+
+#ifndef __LINUX_MFD_BD9571MWV_H
+#define __LINUX_MFD_BD9571MWV_H
+
+#include <linux/device.h>
+#include <linux/regmap.h>
+
+/* List of registers for BD9571MWV and BD9574MWF */
+#define BD9571MWV_VENDOR_CODE 0x00
+#define BD9571MWV_VENDOR_CODE_VAL 0xdb
+#define BD9571MWV_PRODUCT_CODE 0x01
+#define BD9571MWV_PRODUCT_CODE_BD9571MWV 0x60
+#define BD9571MWV_PRODUCT_CODE_BD9574MWF 0x74
+#define BD9571MWV_PRODUCT_REVISION 0x02
+
+#define BD9571MWV_I2C_FUSA_MODE 0x10
+#define BD9571MWV_I2C_MD2_E1_BIT_1 0x11
+#define BD9571MWV_I2C_MD2_E1_BIT_2 0x12
+
+#define BD9571MWV_BKUP_MODE_CNT 0x20
+#define BD9571MWV_BKUP_MODE_CNT_KEEPON_MASK GENMASK(3, 0)
+#define BD9571MWV_BKUP_MODE_CNT_KEEPON_DDR0 BIT(0)
+#define BD9571MWV_BKUP_MODE_CNT_KEEPON_DDR1 BIT(1)
+#define BD9571MWV_BKUP_MODE_CNT_KEEPON_DDR0C BIT(2)
+#define BD9571MWV_BKUP_MODE_CNT_KEEPON_DDR1C BIT(3)
+#define BD9571MWV_BKUP_MODE_STATUS 0x21
+#define BD9571MWV_BKUP_RECOVERY_CNT 0x22
+#define BD9571MWV_BKUP_CTRL_TIM_CNT 0x23
+#define BD9571MWV_WAITBKUP_WDT_CNT 0x24
+#define BD9571MWV_128H_TIM_CNT 0x26
+#define BD9571MWV_QLLM_CNT 0x27
+
+#define BD9571MWV_AVS_SET_MONI 0x31
+#define BD9571MWV_AVS_SET_MONI_MASK 0x3
+#define BD9571MWV_AVS_VD09_VID(n) (0x32 + (n))
+#define BD9571MWV_AVS_DVFS_VID(n) (0x36 + (n))
+
+#define BD9571MWV_VD18_VID 0x42
+#define BD9571MWV_VD25_VID 0x43
+#define BD9571MWV_VD33_VID 0x44
+
+#define BD9571MWV_DVFS_VINIT 0x50
+#define BD9574MWF_VD09_VINIT 0x51
+#define BD9571MWV_DVFS_SETVMAX 0x52
+#define BD9571MWV_DVFS_BOOSTVID 0x53
+#define BD9571MWV_DVFS_SETVID 0x54
+#define BD9571MWV_DVFS_MONIVDAC 0x55
+#define BD9571MWV_DVFS_PGD_CNT 0x56
+
+#define BD9571MWV_GPIO_DIR 0x60
+#define BD9571MWV_GPIO_OUT 0x61
+#define BD9571MWV_GPIO_IN 0x62
+#define BD9571MWV_GPIO_DEB 0x63
+#define BD9571MWV_GPIO_INT_SET 0x64
+#define BD9571MWV_GPIO_INT 0x65
+#define BD9571MWV_GPIO_INTMASK 0x66
+#define BD9574MWF_GPIO_MUX 0x67
+
+#define BD9571MWV_REG_KEEP(n) (0x70 + (n))
+
+#define BD9571MWV_PMIC_INTERNAL_STATUS 0x80
+#define BD9571MWV_PROT_ERROR_STATUS0 0x81
+#define BD9571MWV_PROT_ERROR_STATUS1 0x82
+#define BD9571MWV_PROT_ERROR_STATUS2 0x83
+#define BD9571MWV_PROT_ERROR_STATUS3 0x84
+#define BD9571MWV_PROT_ERROR_STATUS4 0x85
+#define BD9574MWF_PROT_ERROR_STATUS5 0x86
+#define BD9574MWF_SYSTEM_ERROR_STATUS 0x87
+
+#define BD9571MWV_INT_INTREQ 0x90
+#define BD9571MWV_INT_INTREQ_MD1_INT BIT(0)
+#define BD9571MWV_INT_INTREQ_MD2_E1_INT BIT(1)
+#define BD9571MWV_INT_INTREQ_MD2_E2_INT BIT(2)
+#define BD9571MWV_INT_INTREQ_PROT_ERR_INT BIT(3)
+#define BD9571MWV_INT_INTREQ_GP_INT BIT(4)
+#define BD9571MWV_INT_INTREQ_128H_OF_INT BIT(5)
+#define BD9571MWV_INT_INTREQ_WDT_OF_INT BIT(6)
+#define BD9571MWV_INT_INTREQ_BKUP_TRG_INT BIT(7)
+#define BD9571MWV_INT_INTMASK 0x91
+
+#define BD9574MWF_SSCG_CNT 0xA0
+#define BD9574MWF_POFFB_MRB 0xA1
+#define BD9574MWF_SMRB_WR_PROT 0xA2
+#define BD9574MWF_SMRB_ASSERT 0xA3
+#define BD9574MWF_SMRB_STATUS 0xA4
+
+#define BD9571MWV_ACCESS_KEY 0xff
+
+/* Define the BD9571MWV IRQ numbers */
+enum bd9571mwv_irqs {
+ BD9571MWV_IRQ_MD1,
+ BD9571MWV_IRQ_MD2_E1,
+ BD9571MWV_IRQ_MD2_E2,
+ BD9571MWV_IRQ_PROT_ERR,
+ BD9571MWV_IRQ_GP,
+ BD9571MWV_IRQ_128H_OF, /* BKUP_HOLD on BD9574MWF */
+ BD9571MWV_IRQ_WDT_OF,
+ BD9571MWV_IRQ_BKUP_TRG,
+};
+#endif /* __LINUX_MFD_BD9571MWV_H */
diff --git a/include/linux/mfd/core.h b/include/linux/mfd/core.h
index 99c0395fe1f9..47e7a3a61ce6 100644
--- a/include/linux/mfd/core.h
+++ b/include/linux/mfd/core.h
@@ -1,14 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* drivers/mfd/mfd-core.h
*
* core MFD support
* Copyright (c) 2006 Ian Molton
* Copyright (c) 2007 Dmitry Baryshkov
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
*/
#ifndef MFD_CORE_H
@@ -16,8 +12,45 @@
#include <linux/platform_device.h>
+#define MFD_RES_SIZE(arr) (sizeof(arr) / sizeof(struct resource))
+
+#define MFD_CELL_ALL(_name, _res, _pdata, _pdsize, _id, _compat, _of_reg, _use_of_reg, _match) \
+ { \
+ .name = (_name), \
+ .resources = (_res), \
+ .num_resources = MFD_RES_SIZE((_res)), \
+ .platform_data = (_pdata), \
+ .pdata_size = (_pdsize), \
+ .of_compatible = (_compat), \
+ .of_reg = (_of_reg), \
+ .use_of_reg = (_use_of_reg), \
+ .acpi_match = (_match), \
+ .id = (_id), \
+ }
+
+#define MFD_CELL_OF_REG(_name, _res, _pdata, _pdsize, _id, _compat, _of_reg) \
+ MFD_CELL_ALL(_name, _res, _pdata, _pdsize, _id, _compat, _of_reg, true, NULL)
+
+#define MFD_CELL_OF(_name, _res, _pdata, _pdsize, _id, _compat) \
+ MFD_CELL_ALL(_name, _res, _pdata, _pdsize, _id, _compat, 0, false, NULL)
+
+#define MFD_CELL_ACPI(_name, _res, _pdata, _pdsize, _id, _match) \
+ MFD_CELL_ALL(_name, _res, _pdata, _pdsize, _id, NULL, 0, false, _match)
+
+#define MFD_CELL_BASIC(_name, _res, _pdata, _pdsize, _id) \
+ MFD_CELL_ALL(_name, _res, _pdata, _pdsize, _id, NULL, 0, false, NULL)
+
+#define MFD_CELL_RES(_name, _res) \
+ MFD_CELL_ALL(_name, _res, NULL, 0, 0, NULL, 0, false, NULL)
+
+#define MFD_CELL_NAME(_name) \
+ MFD_CELL_ALL(_name, NULL, NULL, 0, 0, NULL, 0, false, NULL)
+
+#define MFD_DEP_LEVEL_NORMAL 0
+#define MFD_DEP_LEVEL_HIGH 1
+
struct irq_domain;
-struct property_entry;
+struct software_node;
/* Matches ACPI PNP id, either _HID or _CID, or ACPI _ADR */
struct mfd_cell_acpi_match {
@@ -33,11 +66,7 @@ struct mfd_cell_acpi_match {
struct mfd_cell {
const char *name;
int id;
-
- /* refcounting for multiple drivers to use a single cell */
- atomic_t *usage_count;
- int (*enable)(struct platform_device *dev);
- int (*disable)(struct platform_device *dev);
+ int level;
int (*suspend)(struct platform_device *dev);
int (*resume)(struct platform_device *dev);
@@ -46,17 +75,27 @@ struct mfd_cell {
void *platform_data;
size_t pdata_size;
- /* device properties passed to the sub devices drivers */
- struct property_entry *properties;
+ /* Matches ACPI */
+ const struct mfd_cell_acpi_match *acpi_match;
+
+ /* Software node for the device. */
+ const struct software_node *swnode;
/*
* Device Tree compatible string
- * See: Documentation/devicetree/usage-model.txt Chapter 2.2 for details
+ * See: Documentation/devicetree/usage-model.rst Chapter 2.2 for details
*/
const char *of_compatible;
- /* Matches ACPI */
- const struct mfd_cell_acpi_match *acpi_match;
+ /*
+ * Address as defined in Device Tree. Used to complement 'of_compatible'
+ * (above) when matching OF nodes with devices that have identical
+ * compatible strings
+ */
+ const u64 of_reg;
+
+ /* Set to 'true' to use 'of_reg' (above) - allows for of_reg=0 */
+ bool use_of_reg;
/*
* These resources can be specified relative to the parent device.
@@ -77,38 +116,11 @@ struct mfd_cell {
/* A list of regulator supplies that should be mapped to the MFD
* device rather than the child device when requested
*/
- const char * const *parent_supplies;
int num_parent_supplies;
+ const char * const *parent_supplies;
};
/*
- * Convenience functions for clients using shared cells. Refcounting
- * happens automatically, with the cell's enable/disable callbacks
- * being called only when a device is first being enabled or no other
- * clients are making use of it.
- */
-extern int mfd_cell_enable(struct platform_device *pdev);
-extern int mfd_cell_disable(struct platform_device *pdev);
-
-/*
- * "Clone" multiple platform devices for a single cell. This is to be used
- * for devices that have multiple users of a cell. For example, if an mfd
- * driver wants the cell "foo" to be used by a GPIO driver, an MTD driver,
- * and a platform driver, the following bit of code would be use after first
- * calling mfd_add_devices():
- *
- * const char *fclones[] = { "foo-gpio", "foo-mtd" };
- * err = mfd_clone_cells("foo", fclones, ARRAY_SIZE(fclones));
- *
- * Each driver (MTD, GPIO, and platform driver) would then register
- * platform_drivers for "foo-mtd", "foo-gpio", and "foo", respectively.
- * The cell's .enable/.disable hooks should be used to deal with hardware
- * resource contention.
- */
-extern int mfd_clone_cell(const char *cell, const char **clones,
- size_t n_clones);
-
-/*
* Given a platform device that's been created by mfd_add_devices(), fetch
* the mfd_cell that created it.
*/
@@ -130,6 +142,7 @@ static inline int mfd_add_hotplug_devices(struct device *parent,
}
extern void mfd_remove_devices(struct device *parent);
+extern void mfd_remove_devices_late(struct device *parent);
extern int devm_mfd_add_devices(struct device *dev, int id,
const struct mfd_cell *cells, int n_devs,
diff --git a/include/linux/mfd/cros_ec.h b/include/linux/mfd/cros_ec.h
deleted file mode 100644
index 4e887ba22635..000000000000
--- a/include/linux/mfd/cros_ec.h
+++ /dev/null
@@ -1,343 +0,0 @@
-/*
- * ChromeOS EC multi-function device
- *
- * Copyright (C) 2012 Google, Inc
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#ifndef __LINUX_MFD_CROS_EC_H
-#define __LINUX_MFD_CROS_EC_H
-
-#include <linux/cdev.h>
-#include <linux/device.h>
-#include <linux/notifier.h>
-#include <linux/mfd/cros_ec_commands.h>
-#include <linux/mutex.h>
-
-#define CROS_EC_DEV_NAME "cros_ec"
-#define CROS_EC_DEV_PD_NAME "cros_pd"
-
-/*
- * The EC is unresponsive for a time after a reboot command. Add a
- * simple delay to make sure that the bus stays locked.
- */
-#define EC_REBOOT_DELAY_MS 50
-
-/*
- * Max bus-specific overhead incurred by request/responses.
- * I2C requires 1 additional byte for requests.
- * I2C requires 2 additional bytes for responses.
- * SPI requires up to 32 additional bytes for responses.
- * */
-#define EC_PROTO_VERSION_UNKNOWN 0
-#define EC_MAX_REQUEST_OVERHEAD 1
-#define EC_MAX_RESPONSE_OVERHEAD 32
-
-/*
- * Command interface between EC and AP, for LPC, I2C and SPI interfaces.
- */
-enum {
- EC_MSG_TX_HEADER_BYTES = 3,
- EC_MSG_TX_TRAILER_BYTES = 1,
- EC_MSG_TX_PROTO_BYTES = EC_MSG_TX_HEADER_BYTES +
- EC_MSG_TX_TRAILER_BYTES,
- EC_MSG_RX_PROTO_BYTES = 3,
-
- /* Max length of messages for proto 2*/
- EC_PROTO2_MSG_BYTES = EC_PROTO2_MAX_PARAM_SIZE +
- EC_MSG_TX_PROTO_BYTES,
-
- EC_MAX_MSG_BYTES = 64 * 1024,
-};
-
-/*
- * @version: Command version number (often 0)
- * @command: Command to send (EC_CMD_...)
- * @outsize: Outgoing length in bytes
- * @insize: Max number of bytes to accept from EC
- * @result: EC's response to the command (separate from communication failure)
- * @data: Where to put the incoming data from EC and outgoing data to EC
- */
-struct cros_ec_command {
- uint32_t version;
- uint32_t command;
- uint32_t outsize;
- uint32_t insize;
- uint32_t result;
- uint8_t data[0];
-};
-
-/**
- * struct cros_ec_device - Information about a ChromeOS EC device
- *
- * @phys_name: name of physical comms layer (e.g. 'i2c-4')
- * @dev: Device pointer for physical comms device
- * @was_wake_device: true if this device was set to wake the system from
- * sleep at the last suspend
- * @cmd_readmem: direct read of the EC memory-mapped region, if supported
- * @offset is within EC_LPC_ADDR_MEMMAP region.
- * @bytes: number of bytes to read. zero means "read a string" (including
- * the trailing '\0'). At most only EC_MEMMAP_SIZE bytes can be read.
- * Caller must ensure that the buffer is large enough for the result when
- * reading a string.
- *
- * @priv: Private data
- * @irq: Interrupt to use
- * @id: Device id
- * @din: input buffer (for data from EC)
- * @dout: output buffer (for data to EC)
- * \note
- * These two buffers will always be dword-aligned and include enough
- * space for up to 7 word-alignment bytes also, so we can ensure that
- * the body of the message is always dword-aligned (64-bit).
- * We use this alignment to keep ARM and x86 happy. Probably word
- * alignment would be OK, there might be a small performance advantage
- * to using dword.
- * @din_size: size of din buffer to allocate (zero to use static din)
- * @dout_size: size of dout buffer to allocate (zero to use static dout)
- * @wake_enabled: true if this device can wake the system from sleep
- * @suspended: true if this device had been suspended
- * @cmd_xfer: send command to EC and get response
- * Returns the number of bytes received if the communication succeeded, but
- * that doesn't mean the EC was happy with the command. The caller
- * should check msg.result for the EC's result code.
- * @pkt_xfer: send packet to EC and get response
- * @lock: one transaction at a time
- * @mkbp_event_supported: true if this EC supports the MKBP event protocol.
- * @event_notifier: interrupt event notifier for transport devices.
- * @event_data: raw payload transferred with the MKBP event.
- * @event_size: size in bytes of the event data.
- */
-struct cros_ec_device {
-
- /* These are used by other drivers that want to talk to the EC */
- const char *phys_name;
- struct device *dev;
- bool was_wake_device;
- struct class *cros_class;
- int (*cmd_readmem)(struct cros_ec_device *ec, unsigned int offset,
- unsigned int bytes, void *dest);
-
- /* These are used to implement the platform-specific interface */
- u16 max_request;
- u16 max_response;
- u16 max_passthru;
- u16 proto_version;
- void *priv;
- int irq;
- u8 *din;
- u8 *dout;
- int din_size;
- int dout_size;
- bool wake_enabled;
- bool suspended;
- int (*cmd_xfer)(struct cros_ec_device *ec,
- struct cros_ec_command *msg);
- int (*pkt_xfer)(struct cros_ec_device *ec,
- struct cros_ec_command *msg);
- struct mutex lock;
- bool mkbp_event_supported;
- struct blocking_notifier_head event_notifier;
-
- struct ec_response_get_next_event event_data;
- int event_size;
- u32 host_event_wake_mask;
-};
-
-/**
- * struct cros_ec_sensor_platform - ChromeOS EC sensor platform information
- *
- * @sensor_num: Id of the sensor, as reported by the EC.
- */
-struct cros_ec_sensor_platform {
- u8 sensor_num;
-};
-
-/* struct cros_ec_platform - ChromeOS EC platform information
- *
- * @ec_name: name of EC device (e.g. 'cros-ec', 'cros-pd', ...)
- * used in /dev/ and sysfs.
- * @cmd_offset: offset to apply for each command. Set when
- * registering a devicde behind another one.
- */
-struct cros_ec_platform {
- const char *ec_name;
- u16 cmd_offset;
-};
-
-struct cros_ec_debugfs;
-
-/*
- * struct cros_ec_dev - ChromeOS EC device entry point
- *
- * @class_dev: Device structure used in sysfs
- * @cdev: Character device structure in /dev
- * @ec_dev: cros_ec_device structure to talk to the physical device
- * @dev: pointer to the platform device
- * @debug_info: cros_ec_debugfs structure for debugging information
- * @cmd_offset: offset to apply for each command.
- */
-struct cros_ec_dev {
- struct device class_dev;
- struct cdev cdev;
- struct cros_ec_device *ec_dev;
- struct device *dev;
- struct cros_ec_debugfs *debug_info;
- u16 cmd_offset;
- u32 features[2];
-};
-
-/**
- * cros_ec_suspend - Handle a suspend operation for the ChromeOS EC device
- *
- * This can be called by drivers to handle a suspend event.
- *
- * ec_dev: Device to suspend
- * @return 0 if ok, -ve on error
- */
-int cros_ec_suspend(struct cros_ec_device *ec_dev);
-
-/**
- * cros_ec_resume - Handle a resume operation for the ChromeOS EC device
- *
- * This can be called by drivers to handle a resume event.
- *
- * @ec_dev: Device to resume
- * @return 0 if ok, -ve on error
- */
-int cros_ec_resume(struct cros_ec_device *ec_dev);
-
-/**
- * cros_ec_prepare_tx - Prepare an outgoing message in the output buffer
- *
- * This is intended to be used by all ChromeOS EC drivers, but at present
- * only SPI uses it. Once LPC uses the same protocol it can start using it.
- * I2C could use it now, with a refactor of the existing code.
- *
- * @ec_dev: Device to register
- * @msg: Message to write
- */
-int cros_ec_prepare_tx(struct cros_ec_device *ec_dev,
- struct cros_ec_command *msg);
-
-/**
- * cros_ec_check_result - Check ec_msg->result
- *
- * This is used by ChromeOS EC drivers to check the ec_msg->result for
- * errors and to warn about them.
- *
- * @ec_dev: EC device
- * @msg: Message to check
- */
-int cros_ec_check_result(struct cros_ec_device *ec_dev,
- struct cros_ec_command *msg);
-
-/**
- * cros_ec_cmd_xfer - Send a command to the ChromeOS EC
- *
- * Call this to send a command to the ChromeOS EC. This should be used
- * instead of calling the EC's cmd_xfer() callback directly.
- *
- * @ec_dev: EC device
- * @msg: Message to write
- */
-int cros_ec_cmd_xfer(struct cros_ec_device *ec_dev,
- struct cros_ec_command *msg);
-
-/**
- * cros_ec_cmd_xfer_status - Send a command to the ChromeOS EC
- *
- * This function is identical to cros_ec_cmd_xfer, except it returns success
- * status only if both the command was transmitted successfully and the EC
- * replied with success status. It's not necessary to check msg->result when
- * using this function.
- *
- * @ec_dev: EC device
- * @msg: Message to write
- * @return: Num. of bytes transferred on success, <0 on failure
- */
-int cros_ec_cmd_xfer_status(struct cros_ec_device *ec_dev,
- struct cros_ec_command *msg);
-
-/**
- * cros_ec_remove - Remove a ChromeOS EC
- *
- * Call this to deregister a ChromeOS EC, then clean up any private data.
- *
- * @ec_dev: Device to register
- * @return 0 if ok, -ve on error
- */
-int cros_ec_remove(struct cros_ec_device *ec_dev);
-
-/**
- * cros_ec_register - Register a new ChromeOS EC, using the provided info
- *
- * Before calling this, allocate a pointer to a new device and then fill
- * in all the fields up to the --private-- marker.
- *
- * @ec_dev: Device to register
- * @return 0 if ok, -ve on error
- */
-int cros_ec_register(struct cros_ec_device *ec_dev);
-
-/**
- * cros_ec_query_all - Query the protocol version supported by the ChromeOS EC
- *
- * @ec_dev: Device to register
- * @return 0 if ok, -ve on error
- */
-int cros_ec_query_all(struct cros_ec_device *ec_dev);
-
-/**
- * cros_ec_get_next_event - Fetch next event from the ChromeOS EC
- *
- * @ec_dev: Device to fetch event from
- * @wake_event: Pointer to a bool set to true upon return if the event might be
- * treated as a wake event. Ignored if null.
- *
- * Returns: 0 on success, Linux error number on failure
- */
-int cros_ec_get_next_event(struct cros_ec_device *ec_dev, bool *wake_event);
-
-/**
- * cros_ec_get_host_event - Return a mask of event set by the EC.
- *
- * When MKBP is supported, when the EC raises an interrupt,
- * We collect the events raised and call the functions in the ec notifier.
- *
- * This function is a helper to know which events are raised.
- */
-u32 cros_ec_get_host_event(struct cros_ec_device *ec_dev);
-
-/* sysfs stuff */
-extern struct attribute_group cros_ec_attr_group;
-extern struct attribute_group cros_ec_lightbar_attr_group;
-extern struct attribute_group cros_ec_vbc_attr_group;
-
-/* ACPI GPE handler */
-#ifdef CONFIG_ACPI
-
-int cros_ec_acpi_install_gpe_handler(struct device *dev);
-void cros_ec_acpi_remove_gpe_handler(void);
-void cros_ec_acpi_clear_gpe(void);
-
-#else /* CONFIG_ACPI */
-
-static inline int cros_ec_acpi_install_gpe_handler(struct device *dev)
-{
- return -ENODEV;
-}
-static inline void cros_ec_acpi_remove_gpe_handler(void) {}
-static inline void cros_ec_acpi_clear_gpe(void) {}
-
-#endif /* CONFIG_ACPI */
-
-#endif /* __LINUX_MFD_CROS_EC_H */
diff --git a/include/linux/mfd/cros_ec_commands.h b/include/linux/mfd/cros_ec_commands.h
deleted file mode 100644
index 190c8f4afa02..000000000000
--- a/include/linux/mfd/cros_ec_commands.h
+++ /dev/null
@@ -1,2946 +0,0 @@
-/*
- * Host communication command constants for ChromeOS EC
- *
- * Copyright (C) 2012 Google, Inc
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * The ChromeOS EC multi function device is used to mux all the requests
- * to the EC device for its multiple features: keyboard controller,
- * battery charging and regulator control, firmware update.
- *
- * NOTE: This file is copied verbatim from the ChromeOS EC Open Source
- * project in an attempt to make future updates easy to make.
- */
-
-#ifndef __CROS_EC_COMMANDS_H
-#define __CROS_EC_COMMANDS_H
-
-/*
- * Current version of this protocol
- *
- * TODO(crosbug.com/p/11223): This is effectively useless; protocol is
- * determined in other ways. Remove this once the kernel code no longer
- * depends on it.
- */
-#define EC_PROTO_VERSION 0x00000002
-
-/* Command version mask */
-#define EC_VER_MASK(version) (1UL << (version))
-
-/* I/O addresses for ACPI commands */
-#define EC_LPC_ADDR_ACPI_DATA 0x62
-#define EC_LPC_ADDR_ACPI_CMD 0x66
-
-/* I/O addresses for host command */
-#define EC_LPC_ADDR_HOST_DATA 0x200
-#define EC_LPC_ADDR_HOST_CMD 0x204
-
-/* I/O addresses for host command args and params */
-/* Protocol version 2 */
-#define EC_LPC_ADDR_HOST_ARGS 0x800 /* And 0x801, 0x802, 0x803 */
-#define EC_LPC_ADDR_HOST_PARAM 0x804 /* For version 2 params; size is
- * EC_PROTO2_MAX_PARAM_SIZE */
-/* Protocol version 3 */
-#define EC_LPC_ADDR_HOST_PACKET 0x800 /* Offset of version 3 packet */
-#define EC_LPC_HOST_PACKET_SIZE 0x100 /* Max size of version 3 packet */
-
-/* The actual block is 0x800-0x8ff, but some BIOSes think it's 0x880-0x8ff
- * and they tell the kernel that so we have to think of it as two parts. */
-#define EC_HOST_CMD_REGION0 0x800
-#define EC_HOST_CMD_REGION1 0x880
-#define EC_HOST_CMD_REGION_SIZE 0x80
-
-/* EC command register bit functions */
-#define EC_LPC_CMDR_DATA (1 << 0) /* Data ready for host to read */
-#define EC_LPC_CMDR_PENDING (1 << 1) /* Write pending to EC */
-#define EC_LPC_CMDR_BUSY (1 << 2) /* EC is busy processing a command */
-#define EC_LPC_CMDR_CMD (1 << 3) /* Last host write was a command */
-#define EC_LPC_CMDR_ACPI_BRST (1 << 4) /* Burst mode (not used) */
-#define EC_LPC_CMDR_SCI (1 << 5) /* SCI event is pending */
-#define EC_LPC_CMDR_SMI (1 << 6) /* SMI event is pending */
-
-#define EC_LPC_ADDR_MEMMAP 0x900
-#define EC_MEMMAP_SIZE 255 /* ACPI IO buffer max is 255 bytes */
-#define EC_MEMMAP_TEXT_MAX 8 /* Size of a string in the memory map */
-
-/* The offset address of each type of data in mapped memory. */
-#define EC_MEMMAP_TEMP_SENSOR 0x00 /* Temp sensors 0x00 - 0x0f */
-#define EC_MEMMAP_FAN 0x10 /* Fan speeds 0x10 - 0x17 */
-#define EC_MEMMAP_TEMP_SENSOR_B 0x18 /* More temp sensors 0x18 - 0x1f */
-#define EC_MEMMAP_ID 0x20 /* 0x20 == 'E', 0x21 == 'C' */
-#define EC_MEMMAP_ID_VERSION 0x22 /* Version of data in 0x20 - 0x2f */
-#define EC_MEMMAP_THERMAL_VERSION 0x23 /* Version of data in 0x00 - 0x1f */
-#define EC_MEMMAP_BATTERY_VERSION 0x24 /* Version of data in 0x40 - 0x7f */
-#define EC_MEMMAP_SWITCHES_VERSION 0x25 /* Version of data in 0x30 - 0x33 */
-#define EC_MEMMAP_EVENTS_VERSION 0x26 /* Version of data in 0x34 - 0x3f */
-#define EC_MEMMAP_HOST_CMD_FLAGS 0x27 /* Host cmd interface flags (8 bits) */
-/* Unused 0x28 - 0x2f */
-#define EC_MEMMAP_SWITCHES 0x30 /* 8 bits */
-/* Unused 0x31 - 0x33 */
-#define EC_MEMMAP_HOST_EVENTS 0x34 /* 32 bits */
-/* Reserve 0x38 - 0x3f for additional host event-related stuff */
-/* Battery values are all 32 bits */
-#define EC_MEMMAP_BATT_VOLT 0x40 /* Battery Present Voltage */
-#define EC_MEMMAP_BATT_RATE 0x44 /* Battery Present Rate */
-#define EC_MEMMAP_BATT_CAP 0x48 /* Battery Remaining Capacity */
-#define EC_MEMMAP_BATT_FLAG 0x4c /* Battery State, defined below */
-#define EC_MEMMAP_BATT_DCAP 0x50 /* Battery Design Capacity */
-#define EC_MEMMAP_BATT_DVLT 0x54 /* Battery Design Voltage */
-#define EC_MEMMAP_BATT_LFCC 0x58 /* Battery Last Full Charge Capacity */
-#define EC_MEMMAP_BATT_CCNT 0x5c /* Battery Cycle Count */
-/* Strings are all 8 bytes (EC_MEMMAP_TEXT_MAX) */
-#define EC_MEMMAP_BATT_MFGR 0x60 /* Battery Manufacturer String */
-#define EC_MEMMAP_BATT_MODEL 0x68 /* Battery Model Number String */
-#define EC_MEMMAP_BATT_SERIAL 0x70 /* Battery Serial Number String */
-#define EC_MEMMAP_BATT_TYPE 0x78 /* Battery Type String */
-#define EC_MEMMAP_ALS 0x80 /* ALS readings in lux (2 X 16 bits) */
-/* Unused 0x84 - 0x8f */
-#define EC_MEMMAP_ACC_STATUS 0x90 /* Accelerometer status (8 bits )*/
-/* Unused 0x91 */
-#define EC_MEMMAP_ACC_DATA 0x92 /* Accelerometer data 0x92 - 0x9f */
-#define EC_MEMMAP_GYRO_DATA 0xa0 /* Gyroscope data 0xa0 - 0xa5 */
-/* Unused 0xa6 - 0xfe (remember, 0xff is NOT part of the memmap region) */
-
-
-/* Define the format of the accelerometer mapped memory status byte. */
-#define EC_MEMMAP_ACC_STATUS_SAMPLE_ID_MASK 0x0f
-#define EC_MEMMAP_ACC_STATUS_BUSY_BIT (1 << 4)
-#define EC_MEMMAP_ACC_STATUS_PRESENCE_BIT (1 << 7)
-
-/* Number of temp sensors at EC_MEMMAP_TEMP_SENSOR */
-#define EC_TEMP_SENSOR_ENTRIES 16
-/*
- * Number of temp sensors at EC_MEMMAP_TEMP_SENSOR_B.
- *
- * Valid only if EC_MEMMAP_THERMAL_VERSION returns >= 2.
- */
-#define EC_TEMP_SENSOR_B_ENTRIES 8
-
-/* Special values for mapped temperature sensors */
-#define EC_TEMP_SENSOR_NOT_PRESENT 0xff
-#define EC_TEMP_SENSOR_ERROR 0xfe
-#define EC_TEMP_SENSOR_NOT_POWERED 0xfd
-#define EC_TEMP_SENSOR_NOT_CALIBRATED 0xfc
-/*
- * The offset of temperature value stored in mapped memory. This allows
- * reporting a temperature range of 200K to 454K = -73C to 181C.
- */
-#define EC_TEMP_SENSOR_OFFSET 200
-
-/*
- * Number of ALS readings at EC_MEMMAP_ALS
- */
-#define EC_ALS_ENTRIES 2
-
-/*
- * The default value a temperature sensor will return when it is present but
- * has not been read this boot. This is a reasonable number to avoid
- * triggering alarms on the host.
- */
-#define EC_TEMP_SENSOR_DEFAULT (296 - EC_TEMP_SENSOR_OFFSET)
-
-#define EC_FAN_SPEED_ENTRIES 4 /* Number of fans at EC_MEMMAP_FAN */
-#define EC_FAN_SPEED_NOT_PRESENT 0xffff /* Entry not present */
-#define EC_FAN_SPEED_STALLED 0xfffe /* Fan stalled */
-
-/* Battery bit flags at EC_MEMMAP_BATT_FLAG. */
-#define EC_BATT_FLAG_AC_PRESENT 0x01
-#define EC_BATT_FLAG_BATT_PRESENT 0x02
-#define EC_BATT_FLAG_DISCHARGING 0x04
-#define EC_BATT_FLAG_CHARGING 0x08
-#define EC_BATT_FLAG_LEVEL_CRITICAL 0x10
-
-/* Switch flags at EC_MEMMAP_SWITCHES */
-#define EC_SWITCH_LID_OPEN 0x01
-#define EC_SWITCH_POWER_BUTTON_PRESSED 0x02
-#define EC_SWITCH_WRITE_PROTECT_DISABLED 0x04
-/* Was recovery requested via keyboard; now unused. */
-#define EC_SWITCH_IGNORE1 0x08
-/* Recovery requested via dedicated signal (from servo board) */
-#define EC_SWITCH_DEDICATED_RECOVERY 0x10
-/* Was fake developer mode switch; now unused. Remove in next refactor. */
-#define EC_SWITCH_IGNORE0 0x20
-
-/* Host command interface flags */
-/* Host command interface supports LPC args (LPC interface only) */
-#define EC_HOST_CMD_FLAG_LPC_ARGS_SUPPORTED 0x01
-/* Host command interface supports version 3 protocol */
-#define EC_HOST_CMD_FLAG_VERSION_3 0x02
-
-/* Wireless switch flags */
-#define EC_WIRELESS_SWITCH_ALL ~0x00 /* All flags */
-#define EC_WIRELESS_SWITCH_WLAN 0x01 /* WLAN radio */
-#define EC_WIRELESS_SWITCH_BLUETOOTH 0x02 /* Bluetooth radio */
-#define EC_WIRELESS_SWITCH_WWAN 0x04 /* WWAN power */
-#define EC_WIRELESS_SWITCH_WLAN_POWER 0x08 /* WLAN power */
-
-/*
- * This header file is used in coreboot both in C and ACPI code. The ACPI code
- * is pre-processed to handle constants but the ASL compiler is unable to
- * handle actual C code so keep it separate.
- */
-#ifndef __ACPI__
-
-/*
- * Define __packed if someone hasn't beat us to it. Linux kernel style
- * checking prefers __packed over __attribute__((packed)).
- */
-#ifndef __packed
-#define __packed __attribute__((packed))
-#endif
-
-/* LPC command status byte masks */
-/* EC has written a byte in the data register and host hasn't read it yet */
-#define EC_LPC_STATUS_TO_HOST 0x01
-/* Host has written a command/data byte and the EC hasn't read it yet */
-#define EC_LPC_STATUS_FROM_HOST 0x02
-/* EC is processing a command */
-#define EC_LPC_STATUS_PROCESSING 0x04
-/* Last write to EC was a command, not data */
-#define EC_LPC_STATUS_LAST_CMD 0x08
-/* EC is in burst mode. Unsupported by Chrome EC, so this bit is never set */
-#define EC_LPC_STATUS_BURST_MODE 0x10
-/* SCI event is pending (requesting SCI query) */
-#define EC_LPC_STATUS_SCI_PENDING 0x20
-/* SMI event is pending (requesting SMI query) */
-#define EC_LPC_STATUS_SMI_PENDING 0x40
-/* (reserved) */
-#define EC_LPC_STATUS_RESERVED 0x80
-
-/*
- * EC is busy. This covers both the EC processing a command, and the host has
- * written a new command but the EC hasn't picked it up yet.
- */
-#define EC_LPC_STATUS_BUSY_MASK \
- (EC_LPC_STATUS_FROM_HOST | EC_LPC_STATUS_PROCESSING)
-
-/* Host command response codes */
-enum ec_status {
- EC_RES_SUCCESS = 0,
- EC_RES_INVALID_COMMAND = 1,
- EC_RES_ERROR = 2,
- EC_RES_INVALID_PARAM = 3,
- EC_RES_ACCESS_DENIED = 4,
- EC_RES_INVALID_RESPONSE = 5,
- EC_RES_INVALID_VERSION = 6,
- EC_RES_INVALID_CHECKSUM = 7,
- EC_RES_IN_PROGRESS = 8, /* Accepted, command in progress */
- EC_RES_UNAVAILABLE = 9, /* No response available */
- EC_RES_TIMEOUT = 10, /* We got a timeout */
- EC_RES_OVERFLOW = 11, /* Table / data overflow */
- EC_RES_INVALID_HEADER = 12, /* Header contains invalid data */
- EC_RES_REQUEST_TRUNCATED = 13, /* Didn't get the entire request */
- EC_RES_RESPONSE_TOO_BIG = 14 /* Response was too big to handle */
-};
-
-/*
- * Host event codes. Note these are 1-based, not 0-based, because ACPI query
- * EC command uses code 0 to mean "no event pending". We explicitly specify
- * each value in the enum listing so they won't change if we delete/insert an
- * item or rearrange the list (it needs to be stable across platforms, not
- * just within a single compiled instance).
- */
-enum host_event_code {
- EC_HOST_EVENT_LID_CLOSED = 1,
- EC_HOST_EVENT_LID_OPEN = 2,
- EC_HOST_EVENT_POWER_BUTTON = 3,
- EC_HOST_EVENT_AC_CONNECTED = 4,
- EC_HOST_EVENT_AC_DISCONNECTED = 5,
- EC_HOST_EVENT_BATTERY_LOW = 6,
- EC_HOST_EVENT_BATTERY_CRITICAL = 7,
- EC_HOST_EVENT_BATTERY = 8,
- EC_HOST_EVENT_THERMAL_THRESHOLD = 9,
- EC_HOST_EVENT_THERMAL_OVERLOAD = 10,
- EC_HOST_EVENT_THERMAL = 11,
- EC_HOST_EVENT_USB_CHARGER = 12,
- EC_HOST_EVENT_KEY_PRESSED = 13,
- /*
- * EC has finished initializing the host interface. The host can check
- * for this event following sending a EC_CMD_REBOOT_EC command to
- * determine when the EC is ready to accept subsequent commands.
- */
- EC_HOST_EVENT_INTERFACE_READY = 14,
- /* Keyboard recovery combo has been pressed */
- EC_HOST_EVENT_KEYBOARD_RECOVERY = 15,
-
- /* Shutdown due to thermal overload */
- EC_HOST_EVENT_THERMAL_SHUTDOWN = 16,
- /* Shutdown due to battery level too low */
- EC_HOST_EVENT_BATTERY_SHUTDOWN = 17,
-
- /* Suggest that the AP throttle itself */
- EC_HOST_EVENT_THROTTLE_START = 18,
- /* Suggest that the AP resume normal speed */
- EC_HOST_EVENT_THROTTLE_STOP = 19,
-
- /* Hang detect logic detected a hang and host event timeout expired */
- EC_HOST_EVENT_HANG_DETECT = 20,
- /* Hang detect logic detected a hang and warm rebooted the AP */
- EC_HOST_EVENT_HANG_REBOOT = 21,
-
- /*
- * The high bit of the event mask is not used as a host event code. If
- * it reads back as set, then the entire event mask should be
- * considered invalid by the host. This can happen when reading the
- * raw event status via EC_MEMMAP_HOST_EVENTS but the LPC interface is
- * not initialized on the EC, or improperly configured on the host.
- */
- EC_HOST_EVENT_INVALID = 32
-};
-/* Host event mask */
-#define EC_HOST_EVENT_MASK(event_code) (1UL << ((event_code) - 1))
-
-/* Arguments at EC_LPC_ADDR_HOST_ARGS */
-struct ec_lpc_host_args {
- uint8_t flags;
- uint8_t command_version;
- uint8_t data_size;
- /*
- * Checksum; sum of command + flags + command_version + data_size +
- * all params/response data bytes.
- */
- uint8_t checksum;
-} __packed;
-
-/* Flags for ec_lpc_host_args.flags */
-/*
- * Args are from host. Data area at EC_LPC_ADDR_HOST_PARAM contains command
- * params.
- *
- * If EC gets a command and this flag is not set, this is an old-style command.
- * Command version is 0 and params from host are at EC_LPC_ADDR_OLD_PARAM with
- * unknown length. EC must respond with an old-style response (that is,
- * withouth setting EC_HOST_ARGS_FLAG_TO_HOST).
- */
-#define EC_HOST_ARGS_FLAG_FROM_HOST 0x01
-/*
- * Args are from EC. Data area at EC_LPC_ADDR_HOST_PARAM contains response.
- *
- * If EC responds to a command and this flag is not set, this is an old-style
- * response. Command version is 0 and response data from EC is at
- * EC_LPC_ADDR_OLD_PARAM with unknown length.
- */
-#define EC_HOST_ARGS_FLAG_TO_HOST 0x02
-
-/*****************************************************************************/
-/*
- * Byte codes returned by EC over SPI interface.
- *
- * These can be used by the AP to debug the EC interface, and to determine
- * when the EC is not in a state where it will ever get around to responding
- * to the AP.
- *
- * Example of sequence of bytes read from EC for a current good transfer:
- * 1. - - AP asserts chip select (CS#)
- * 2. EC_SPI_OLD_READY - AP sends first byte(s) of request
- * 3. - - EC starts handling CS# interrupt
- * 4. EC_SPI_RECEIVING - AP sends remaining byte(s) of request
- * 5. EC_SPI_PROCESSING - EC starts processing request; AP is clocking in
- * bytes looking for EC_SPI_FRAME_START
- * 6. - - EC finishes processing and sets up response
- * 7. EC_SPI_FRAME_START - AP reads frame byte
- * 8. (response packet) - AP reads response packet
- * 9. EC_SPI_PAST_END - Any additional bytes read by AP
- * 10 - - AP deasserts chip select
- * 11 - - EC processes CS# interrupt and sets up DMA for
- * next request
- *
- * If the AP is waiting for EC_SPI_FRAME_START and sees any value other than
- * the following byte values:
- * EC_SPI_OLD_READY
- * EC_SPI_RX_READY
- * EC_SPI_RECEIVING
- * EC_SPI_PROCESSING
- *
- * Then the EC found an error in the request, or was not ready for the request
- * and lost data. The AP should give up waiting for EC_SPI_FRAME_START,
- * because the EC is unable to tell when the AP is done sending its request.
- */
-
-/*
- * Framing byte which precedes a response packet from the EC. After sending a
- * request, the AP will clock in bytes until it sees the framing byte, then
- * clock in the response packet.
- */
-#define EC_SPI_FRAME_START 0xec
-
-/*
- * Padding bytes which are clocked out after the end of a response packet.
- */
-#define EC_SPI_PAST_END 0xed
-
-/*
- * EC is ready to receive, and has ignored the byte sent by the AP. EC expects
- * that the AP will send a valid packet header (starting with
- * EC_COMMAND_PROTOCOL_3) in the next 32 bytes.
- */
-#define EC_SPI_RX_READY 0xf8
-
-/*
- * EC has started receiving the request from the AP, but hasn't started
- * processing it yet.
- */
-#define EC_SPI_RECEIVING 0xf9
-
-/* EC has received the entire request from the AP and is processing it. */
-#define EC_SPI_PROCESSING 0xfa
-
-/*
- * EC received bad data from the AP, such as a packet header with an invalid
- * length. EC will ignore all data until chip select deasserts.
- */
-#define EC_SPI_RX_BAD_DATA 0xfb
-
-/*
- * EC received data from the AP before it was ready. That is, the AP asserted
- * chip select and started clocking data before the EC was ready to receive it.
- * EC will ignore all data until chip select deasserts.
- */
-#define EC_SPI_NOT_READY 0xfc
-
-/*
- * EC was ready to receive a request from the AP. EC has treated the byte sent
- * by the AP as part of a request packet, or (for old-style ECs) is processing
- * a fully received packet but is not ready to respond yet.
- */
-#define EC_SPI_OLD_READY 0xfd
-
-/*****************************************************************************/
-
-/*
- * Protocol version 2 for I2C and SPI send a request this way:
- *
- * 0 EC_CMD_VERSION0 + (command version)
- * 1 Command number
- * 2 Length of params = N
- * 3..N+2 Params, if any
- * N+3 8-bit checksum of bytes 0..N+2
- *
- * The corresponding response is:
- *
- * 0 Result code (EC_RES_*)
- * 1 Length of params = M
- * 2..M+1 Params, if any
- * M+2 8-bit checksum of bytes 0..M+1
- */
-#define EC_PROTO2_REQUEST_HEADER_BYTES 3
-#define EC_PROTO2_REQUEST_TRAILER_BYTES 1
-#define EC_PROTO2_REQUEST_OVERHEAD (EC_PROTO2_REQUEST_HEADER_BYTES + \
- EC_PROTO2_REQUEST_TRAILER_BYTES)
-
-#define EC_PROTO2_RESPONSE_HEADER_BYTES 2
-#define EC_PROTO2_RESPONSE_TRAILER_BYTES 1
-#define EC_PROTO2_RESPONSE_OVERHEAD (EC_PROTO2_RESPONSE_HEADER_BYTES + \
- EC_PROTO2_RESPONSE_TRAILER_BYTES)
-
-/* Parameter length was limited by the LPC interface */
-#define EC_PROTO2_MAX_PARAM_SIZE 0xfc
-
-/* Maximum request and response packet sizes for protocol version 2 */
-#define EC_PROTO2_MAX_REQUEST_SIZE (EC_PROTO2_REQUEST_OVERHEAD + \
- EC_PROTO2_MAX_PARAM_SIZE)
-#define EC_PROTO2_MAX_RESPONSE_SIZE (EC_PROTO2_RESPONSE_OVERHEAD + \
- EC_PROTO2_MAX_PARAM_SIZE)
-
-/*****************************************************************************/
-
-/*
- * Value written to legacy command port / prefix byte to indicate protocol
- * 3+ structs are being used. Usage is bus-dependent.
- */
-#define EC_COMMAND_PROTOCOL_3 0xda
-
-#define EC_HOST_REQUEST_VERSION 3
-
-/* Version 3 request from host */
-struct ec_host_request {
- /* Struct version (=3)
- *
- * EC will return EC_RES_INVALID_HEADER if it receives a header with a
- * version it doesn't know how to parse.
- */
- uint8_t struct_version;
-
- /*
- * Checksum of request and data; sum of all bytes including checksum
- * should total to 0.
- */
- uint8_t checksum;
-
- /* Command code */
- uint16_t command;
-
- /* Command version */
- uint8_t command_version;
-
- /* Unused byte in current protocol version; set to 0 */
- uint8_t reserved;
-
- /* Length of data which follows this header */
- uint16_t data_len;
-} __packed;
-
-#define EC_HOST_RESPONSE_VERSION 3
-
-/* Version 3 response from EC */
-struct ec_host_response {
- /* Struct version (=3) */
- uint8_t struct_version;
-
- /*
- * Checksum of response and data; sum of all bytes including checksum
- * should total to 0.
- */
- uint8_t checksum;
-
- /* Result code (EC_RES_*) */
- uint16_t result;
-
- /* Length of data which follows this header */
- uint16_t data_len;
-
- /* Unused bytes in current protocol version; set to 0 */
- uint16_t reserved;
-} __packed;
-
-/*****************************************************************************/
-/*
- * Notes on commands:
- *
- * Each command is an 16-bit command value. Commands which take params or
- * return response data specify structs for that data. If no struct is
- * specified, the command does not input or output data, respectively.
- * Parameter/response length is implicit in the structs. Some underlying
- * communication protocols (I2C, SPI) may add length or checksum headers, but
- * those are implementation-dependent and not defined here.
- */
-
-/*****************************************************************************/
-/* General / test commands */
-
-/*
- * Get protocol version, used to deal with non-backward compatible protocol
- * changes.
- */
-#define EC_CMD_PROTO_VERSION 0x00
-
-struct ec_response_proto_version {
- uint32_t version;
-} __packed;
-
-/*
- * Hello. This is a simple command to test the EC is responsive to
- * commands.
- */
-#define EC_CMD_HELLO 0x01
-
-struct ec_params_hello {
- uint32_t in_data; /* Pass anything here */
-} __packed;
-
-struct ec_response_hello {
- uint32_t out_data; /* Output will be in_data + 0x01020304 */
-} __packed;
-
-/* Get version number */
-#define EC_CMD_GET_VERSION 0x02
-
-enum ec_current_image {
- EC_IMAGE_UNKNOWN = 0,
- EC_IMAGE_RO,
- EC_IMAGE_RW
-};
-
-struct ec_response_get_version {
- /* Null-terminated version strings for RO, RW */
- char version_string_ro[32];
- char version_string_rw[32];
- char reserved[32]; /* Was previously RW-B string */
- uint32_t current_image; /* One of ec_current_image */
-} __packed;
-
-/* Read test */
-#define EC_CMD_READ_TEST 0x03
-
-struct ec_params_read_test {
- uint32_t offset; /* Starting value for read buffer */
- uint32_t size; /* Size to read in bytes */
-} __packed;
-
-struct ec_response_read_test {
- uint32_t data[32];
-} __packed;
-
-/*
- * Get build information
- *
- * Response is null-terminated string.
- */
-#define EC_CMD_GET_BUILD_INFO 0x04
-
-/* Get chip info */
-#define EC_CMD_GET_CHIP_INFO 0x05
-
-struct ec_response_get_chip_info {
- /* Null-terminated strings */
- char vendor[32];
- char name[32];
- char revision[32]; /* Mask version */
-} __packed;
-
-/* Get board HW version */
-#define EC_CMD_GET_BOARD_VERSION 0x06
-
-struct ec_response_board_version {
- uint16_t board_version; /* A monotonously incrementing number. */
-} __packed;
-
-/*
- * Read memory-mapped data.
- *
- * This is an alternate interface to memory-mapped data for bus protocols
- * which don't support direct-mapped memory - I2C, SPI, etc.
- *
- * Response is params.size bytes of data.
- */
-#define EC_CMD_READ_MEMMAP 0x07
-
-struct ec_params_read_memmap {
- uint8_t offset; /* Offset in memmap (EC_MEMMAP_*) */
- uint8_t size; /* Size to read in bytes */
-} __packed;
-
-/* Read versions supported for a command */
-#define EC_CMD_GET_CMD_VERSIONS 0x08
-
-struct ec_params_get_cmd_versions {
- uint8_t cmd; /* Command to check */
-} __packed;
-
-struct ec_params_get_cmd_versions_v1 {
- uint16_t cmd; /* Command to check */
-} __packed;
-
-struct ec_response_get_cmd_versions {
- /*
- * Mask of supported versions; use EC_VER_MASK() to compare with a
- * desired version.
- */
- uint32_t version_mask;
-} __packed;
-
-/*
- * Check EC communcations status (busy). This is needed on i2c/spi but not
- * on lpc since it has its own out-of-band busy indicator.
- *
- * lpc must read the status from the command register. Attempting this on
- * lpc will overwrite the args/parameter space and corrupt its data.
- */
-#define EC_CMD_GET_COMMS_STATUS 0x09
-
-/* Avoid using ec_status which is for return values */
-enum ec_comms_status {
- EC_COMMS_STATUS_PROCESSING = 1 << 0, /* Processing cmd */
-};
-
-struct ec_response_get_comms_status {
- uint32_t flags; /* Mask of enum ec_comms_status */
-} __packed;
-
-/* Fake a variety of responses, purely for testing purposes. */
-#define EC_CMD_TEST_PROTOCOL 0x0a
-
-/* Tell the EC what to send back to us. */
-struct ec_params_test_protocol {
- uint32_t ec_result;
- uint32_t ret_len;
- uint8_t buf[32];
-} __packed;
-
-/* Here it comes... */
-struct ec_response_test_protocol {
- uint8_t buf[32];
-} __packed;
-
-/* Get prococol information */
-#define EC_CMD_GET_PROTOCOL_INFO 0x0b
-
-/* Flags for ec_response_get_protocol_info.flags */
-/* EC_RES_IN_PROGRESS may be returned if a command is slow */
-#define EC_PROTOCOL_INFO_IN_PROGRESS_SUPPORTED (1 << 0)
-
-struct ec_response_get_protocol_info {
- /* Fields which exist if at least protocol version 3 supported */
-
- /* Bitmask of protocol versions supported (1 << n means version n)*/
- uint32_t protocol_versions;
-
- /* Maximum request packet size, in bytes */
- uint16_t max_request_packet_size;
-
- /* Maximum response packet size, in bytes */
- uint16_t max_response_packet_size;
-
- /* Flags; see EC_PROTOCOL_INFO_* */
- uint32_t flags;
-} __packed;
-
-
-/*****************************************************************************/
-/* Get/Set miscellaneous values */
-
-/* The upper byte of .flags tells what to do (nothing means "get") */
-#define EC_GSV_SET 0x80000000
-
-/* The lower three bytes of .flags identifies the parameter, if that has
- meaning for an individual command. */
-#define EC_GSV_PARAM_MASK 0x00ffffff
-
-struct ec_params_get_set_value {
- uint32_t flags;
- uint32_t value;
-} __packed;
-
-struct ec_response_get_set_value {
- uint32_t flags;
- uint32_t value;
-} __packed;
-
-/* More than one command can use these structs to get/set paramters. */
-#define EC_CMD_GSV_PAUSE_IN_S5 0x0c
-
-/*****************************************************************************/
-/* List the features supported by the firmware */
-#define EC_CMD_GET_FEATURES 0x0d
-
-/* Supported features */
-enum ec_feature_code {
- /*
- * This image contains a limited set of features. Another image
- * in RW partition may support more features.
- */
- EC_FEATURE_LIMITED = 0,
- /*
- * Commands for probing/reading/writing/erasing the flash in the
- * EC are present.
- */
- EC_FEATURE_FLASH = 1,
- /*
- * Can control the fan speed directly.
- */
- EC_FEATURE_PWM_FAN = 2,
- /*
- * Can control the intensity of the keyboard backlight.
- */
- EC_FEATURE_PWM_KEYB = 3,
- /*
- * Support Google lightbar, introduced on Pixel.
- */
- EC_FEATURE_LIGHTBAR = 4,
- /* Control of LEDs */
- EC_FEATURE_LED = 5,
- /* Exposes an interface to control gyro and sensors.
- * The host goes through the EC to access these sensors.
- * In addition, the EC may provide composite sensors, like lid angle.
- */
- EC_FEATURE_MOTION_SENSE = 6,
- /* The keyboard is controlled by the EC */
- EC_FEATURE_KEYB = 7,
- /* The AP can use part of the EC flash as persistent storage. */
- EC_FEATURE_PSTORE = 8,
- /* The EC monitors BIOS port 80h, and can return POST codes. */
- EC_FEATURE_PORT80 = 9,
- /*
- * Thermal management: include TMP specific commands.
- * Higher level than direct fan control.
- */
- EC_FEATURE_THERMAL = 10,
- /* Can switch the screen backlight on/off */
- EC_FEATURE_BKLIGHT_SWITCH = 11,
- /* Can switch the wifi module on/off */
- EC_FEATURE_WIFI_SWITCH = 12,
- /* Monitor host events, through for example SMI or SCI */
- EC_FEATURE_HOST_EVENTS = 13,
- /* The EC exposes GPIO commands to control/monitor connected devices. */
- EC_FEATURE_GPIO = 14,
- /* The EC can send i2c messages to downstream devices. */
- EC_FEATURE_I2C = 15,
- /* Command to control charger are included */
- EC_FEATURE_CHARGER = 16,
- /* Simple battery support. */
- EC_FEATURE_BATTERY = 17,
- /*
- * Support Smart battery protocol
- * (Common Smart Battery System Interface Specification)
- */
- EC_FEATURE_SMART_BATTERY = 18,
- /* EC can dectect when the host hangs. */
- EC_FEATURE_HANG_DETECT = 19,
- /* Report power information, for pit only */
- EC_FEATURE_PMU = 20,
- /* Another Cros EC device is present downstream of this one */
- EC_FEATURE_SUB_MCU = 21,
- /* Support USB Power delivery (PD) commands */
- EC_FEATURE_USB_PD = 22,
- /* Control USB multiplexer, for audio through USB port for instance. */
- EC_FEATURE_USB_MUX = 23,
- /* Motion Sensor code has an internal software FIFO */
- EC_FEATURE_MOTION_SENSE_FIFO = 24,
-};
-
-#define EC_FEATURE_MASK_0(event_code) (1UL << (event_code % 32))
-#define EC_FEATURE_MASK_1(event_code) (1UL << (event_code - 32))
-struct ec_response_get_features {
- uint32_t flags[2];
-} __packed;
-
-/*****************************************************************************/
-/* Flash commands */
-
-/* Get flash info */
-#define EC_CMD_FLASH_INFO 0x10
-
-/* Version 0 returns these fields */
-struct ec_response_flash_info {
- /* Usable flash size, in bytes */
- uint32_t flash_size;
- /*
- * Write block size. Write offset and size must be a multiple
- * of this.
- */
- uint32_t write_block_size;
- /*
- * Erase block size. Erase offset and size must be a multiple
- * of this.
- */
- uint32_t erase_block_size;
- /*
- * Protection block size. Protection offset and size must be a
- * multiple of this.
- */
- uint32_t protect_block_size;
-} __packed;
-
-/* Flags for version 1+ flash info command */
-/* EC flash erases bits to 0 instead of 1 */
-#define EC_FLASH_INFO_ERASE_TO_0 (1 << 0)
-
-/*
- * Version 1 returns the same initial fields as version 0, with additional
- * fields following.
- *
- * gcc anonymous structs don't seem to get along with the __packed directive;
- * if they did we'd define the version 0 struct as a sub-struct of this one.
- */
-struct ec_response_flash_info_1 {
- /* Version 0 fields; see above for description */
- uint32_t flash_size;
- uint32_t write_block_size;
- uint32_t erase_block_size;
- uint32_t protect_block_size;
-
- /* Version 1 adds these fields: */
- /*
- * Ideal write size in bytes. Writes will be fastest if size is
- * exactly this and offset is a multiple of this. For example, an EC
- * may have a write buffer which can do half-page operations if data is
- * aligned, and a slower word-at-a-time write mode.
- */
- uint32_t write_ideal_size;
-
- /* Flags; see EC_FLASH_INFO_* */
- uint32_t flags;
-} __packed;
-
-/*
- * Read flash
- *
- * Response is params.size bytes of data.
- */
-#define EC_CMD_FLASH_READ 0x11
-
-struct ec_params_flash_read {
- uint32_t offset; /* Byte offset to read */
- uint32_t size; /* Size to read in bytes */
-} __packed;
-
-/* Write flash */
-#define EC_CMD_FLASH_WRITE 0x12
-#define EC_VER_FLASH_WRITE 1
-
-/* Version 0 of the flash command supported only 64 bytes of data */
-#define EC_FLASH_WRITE_VER0_SIZE 64
-
-struct ec_params_flash_write {
- uint32_t offset; /* Byte offset to write */
- uint32_t size; /* Size to write in bytes */
- /* Followed by data to write */
-} __packed;
-
-/* Erase flash */
-#define EC_CMD_FLASH_ERASE 0x13
-
-struct ec_params_flash_erase {
- uint32_t offset; /* Byte offset to erase */
- uint32_t size; /* Size to erase in bytes */
-} __packed;
-
-/*
- * Get/set flash protection.
- *
- * If mask!=0, sets/clear the requested bits of flags. Depending on the
- * firmware write protect GPIO, not all flags will take effect immediately;
- * some flags require a subsequent hard reset to take effect. Check the
- * returned flags bits to see what actually happened.
- *
- * If mask=0, simply returns the current flags state.
- */
-#define EC_CMD_FLASH_PROTECT 0x15
-#define EC_VER_FLASH_PROTECT 1 /* Command version 1 */
-
-/* Flags for flash protection */
-/* RO flash code protected when the EC boots */
-#define EC_FLASH_PROTECT_RO_AT_BOOT (1 << 0)
-/*
- * RO flash code protected now. If this bit is set, at-boot status cannot
- * be changed.
- */
-#define EC_FLASH_PROTECT_RO_NOW (1 << 1)
-/* Entire flash code protected now, until reboot. */
-#define EC_FLASH_PROTECT_ALL_NOW (1 << 2)
-/* Flash write protect GPIO is asserted now */
-#define EC_FLASH_PROTECT_GPIO_ASSERTED (1 << 3)
-/* Error - at least one bank of flash is stuck locked, and cannot be unlocked */
-#define EC_FLASH_PROTECT_ERROR_STUCK (1 << 4)
-/*
- * Error - flash protection is in inconsistent state. At least one bank of
- * flash which should be protected is not protected. Usually fixed by
- * re-requesting the desired flags, or by a hard reset if that fails.
- */
-#define EC_FLASH_PROTECT_ERROR_INCONSISTENT (1 << 5)
-/* Entile flash code protected when the EC boots */
-#define EC_FLASH_PROTECT_ALL_AT_BOOT (1 << 6)
-
-struct ec_params_flash_protect {
- uint32_t mask; /* Bits in flags to apply */
- uint32_t flags; /* New flags to apply */
-} __packed;
-
-struct ec_response_flash_protect {
- /* Current value of flash protect flags */
- uint32_t flags;
- /*
- * Flags which are valid on this platform. This allows the caller
- * to distinguish between flags which aren't set vs. flags which can't
- * be set on this platform.
- */
- uint32_t valid_flags;
- /* Flags which can be changed given the current protection state */
- uint32_t writable_flags;
-} __packed;
-
-/*
- * Note: commands 0x14 - 0x19 version 0 were old commands to get/set flash
- * write protect. These commands may be reused with version > 0.
- */
-
-/* Get the region offset/size */
-#define EC_CMD_FLASH_REGION_INFO 0x16
-#define EC_VER_FLASH_REGION_INFO 1
-
-enum ec_flash_region {
- /* Region which holds read-only EC image */
- EC_FLASH_REGION_RO = 0,
- /* Region which holds rewritable EC image */
- EC_FLASH_REGION_RW,
- /*
- * Region which should be write-protected in the factory (a superset of
- * EC_FLASH_REGION_RO)
- */
- EC_FLASH_REGION_WP_RO,
- /* Number of regions */
- EC_FLASH_REGION_COUNT,
-};
-
-struct ec_params_flash_region_info {
- uint32_t region; /* enum ec_flash_region */
-} __packed;
-
-struct ec_response_flash_region_info {
- uint32_t offset;
- uint32_t size;
-} __packed;
-
-/* Read/write VbNvContext */
-#define EC_CMD_VBNV_CONTEXT 0x17
-#define EC_VER_VBNV_CONTEXT 1
-#define EC_VBNV_BLOCK_SIZE 16
-
-enum ec_vbnvcontext_op {
- EC_VBNV_CONTEXT_OP_READ,
- EC_VBNV_CONTEXT_OP_WRITE,
-};
-
-struct ec_params_vbnvcontext {
- uint32_t op;
- uint8_t block[EC_VBNV_BLOCK_SIZE];
-} __packed;
-
-struct ec_response_vbnvcontext {
- uint8_t block[EC_VBNV_BLOCK_SIZE];
-} __packed;
-
-/*****************************************************************************/
-/* PWM commands */
-
-/* Get fan target RPM */
-#define EC_CMD_PWM_GET_FAN_TARGET_RPM 0x20
-
-struct ec_response_pwm_get_fan_rpm {
- uint32_t rpm;
-} __packed;
-
-/* Set target fan RPM */
-#define EC_CMD_PWM_SET_FAN_TARGET_RPM 0x21
-
-struct ec_params_pwm_set_fan_target_rpm {
- uint32_t rpm;
-} __packed;
-
-/* Get keyboard backlight */
-#define EC_CMD_PWM_GET_KEYBOARD_BACKLIGHT 0x22
-
-struct ec_response_pwm_get_keyboard_backlight {
- uint8_t percent;
- uint8_t enabled;
-} __packed;
-
-/* Set keyboard backlight */
-#define EC_CMD_PWM_SET_KEYBOARD_BACKLIGHT 0x23
-
-struct ec_params_pwm_set_keyboard_backlight {
- uint8_t percent;
-} __packed;
-
-/* Set target fan PWM duty cycle */
-#define EC_CMD_PWM_SET_FAN_DUTY 0x24
-
-struct ec_params_pwm_set_fan_duty {
- uint32_t percent;
-} __packed;
-
-#define EC_CMD_PWM_SET_DUTY 0x25
-/* 16 bit duty cycle, 0xffff = 100% */
-#define EC_PWM_MAX_DUTY 0xffff
-
-enum ec_pwm_type {
- /* All types, indexed by board-specific enum pwm_channel */
- EC_PWM_TYPE_GENERIC = 0,
- /* Keyboard backlight */
- EC_PWM_TYPE_KB_LIGHT,
- /* Display backlight */
- EC_PWM_TYPE_DISPLAY_LIGHT,
- EC_PWM_TYPE_COUNT,
-};
-
-struct ec_params_pwm_set_duty {
- uint16_t duty; /* Duty cycle, EC_PWM_MAX_DUTY = 100% */
- uint8_t pwm_type; /* ec_pwm_type */
- uint8_t index; /* Type-specific index, or 0 if unique */
-} __packed;
-
-#define EC_CMD_PWM_GET_DUTY 0x26
-
-struct ec_params_pwm_get_duty {
- uint8_t pwm_type; /* ec_pwm_type */
- uint8_t index; /* Type-specific index, or 0 if unique */
-} __packed;
-
-struct ec_response_pwm_get_duty {
- uint16_t duty; /* Duty cycle, EC_PWM_MAX_DUTY = 100% */
-} __packed;
-
-/*****************************************************************************/
-/*
- * Lightbar commands. This looks worse than it is. Since we only use one HOST
- * command to say "talk to the lightbar", we put the "and tell it to do X" part
- * into a subcommand. We'll make separate structs for subcommands with
- * different input args, so that we know how much to expect.
- */
-#define EC_CMD_LIGHTBAR_CMD 0x28
-
-struct rgb_s {
- uint8_t r, g, b;
-};
-
-#define LB_BATTERY_LEVELS 4
-/* List of tweakable parameters. NOTE: It's __packed so it can be sent in a
- * host command, but the alignment is the same regardless. Keep it that way.
- */
-struct lightbar_params_v0 {
- /* Timing */
- int32_t google_ramp_up;
- int32_t google_ramp_down;
- int32_t s3s0_ramp_up;
- int32_t s0_tick_delay[2]; /* AC=0/1 */
- int32_t s0a_tick_delay[2]; /* AC=0/1 */
- int32_t s0s3_ramp_down;
- int32_t s3_sleep_for;
- int32_t s3_ramp_up;
- int32_t s3_ramp_down;
-
- /* Oscillation */
- uint8_t new_s0;
- uint8_t osc_min[2]; /* AC=0/1 */
- uint8_t osc_max[2]; /* AC=0/1 */
- uint8_t w_ofs[2]; /* AC=0/1 */
-
- /* Brightness limits based on the backlight and AC. */
- uint8_t bright_bl_off_fixed[2]; /* AC=0/1 */
- uint8_t bright_bl_on_min[2]; /* AC=0/1 */
- uint8_t bright_bl_on_max[2]; /* AC=0/1 */
-
- /* Battery level thresholds */
- uint8_t battery_threshold[LB_BATTERY_LEVELS - 1];
-
- /* Map [AC][battery_level] to color index */
- uint8_t s0_idx[2][LB_BATTERY_LEVELS]; /* AP is running */
- uint8_t s3_idx[2][LB_BATTERY_LEVELS]; /* AP is sleeping */
-
- /* Color palette */
- struct rgb_s color[8]; /* 0-3 are Google colors */
-} __packed;
-
-struct lightbar_params_v1 {
- /* Timing */
- int32_t google_ramp_up;
- int32_t google_ramp_down;
- int32_t s3s0_ramp_up;
- int32_t s0_tick_delay[2]; /* AC=0/1 */
- int32_t s0a_tick_delay[2]; /* AC=0/1 */
- int32_t s0s3_ramp_down;
- int32_t s3_sleep_for;
- int32_t s3_ramp_up;
- int32_t s3_ramp_down;
- int32_t tap_tick_delay;
- int32_t tap_display_time;
-
- /* Tap-for-battery params */
- uint8_t tap_pct_red;
- uint8_t tap_pct_green;
- uint8_t tap_seg_min_on;
- uint8_t tap_seg_max_on;
- uint8_t tap_seg_osc;
- uint8_t tap_idx[3];
-
- /* Oscillation */
- uint8_t osc_min[2]; /* AC=0/1 */
- uint8_t osc_max[2]; /* AC=0/1 */
- uint8_t w_ofs[2]; /* AC=0/1 */
-
- /* Brightness limits based on the backlight and AC. */
- uint8_t bright_bl_off_fixed[2]; /* AC=0/1 */
- uint8_t bright_bl_on_min[2]; /* AC=0/1 */
- uint8_t bright_bl_on_max[2]; /* AC=0/1 */
-
- /* Battery level thresholds */
- uint8_t battery_threshold[LB_BATTERY_LEVELS - 1];
-
- /* Map [AC][battery_level] to color index */
- uint8_t s0_idx[2][LB_BATTERY_LEVELS]; /* AP is running */
- uint8_t s3_idx[2][LB_BATTERY_LEVELS]; /* AP is sleeping */
-
- /* Color palette */
- struct rgb_s color[8]; /* 0-3 are Google colors */
-} __packed;
-
-/* Lightbar program */
-#define EC_LB_PROG_LEN 192
-struct lightbar_program {
- uint8_t size;
- uint8_t data[EC_LB_PROG_LEN];
-};
-
-struct ec_params_lightbar {
- uint8_t cmd; /* Command (see enum lightbar_command) */
- union {
- struct {
- /* no args */
- } dump, off, on, init, get_seq, get_params_v0, get_params_v1,
- version, get_brightness, get_demo, suspend, resume;
-
- struct {
- uint8_t num;
- } set_brightness, seq, demo;
-
- struct {
- uint8_t ctrl, reg, value;
- } reg;
-
- struct {
- uint8_t led, red, green, blue;
- } set_rgb;
-
- struct {
- uint8_t led;
- } get_rgb;
-
- struct {
- uint8_t enable;
- } manual_suspend_ctrl;
-
- struct lightbar_params_v0 set_params_v0;
- struct lightbar_params_v1 set_params_v1;
- struct lightbar_program set_program;
- };
-} __packed;
-
-struct ec_response_lightbar {
- union {
- struct {
- struct {
- uint8_t reg;
- uint8_t ic0;
- uint8_t ic1;
- } vals[23];
- } dump;
-
- struct {
- uint8_t num;
- } get_seq, get_brightness, get_demo;
-
- struct lightbar_params_v0 get_params_v0;
- struct lightbar_params_v1 get_params_v1;
-
- struct {
- uint32_t num;
- uint32_t flags;
- } version;
-
- struct {
- uint8_t red, green, blue;
- } get_rgb;
-
- struct {
- /* no return params */
- } off, on, init, set_brightness, seq, reg, set_rgb,
- demo, set_params_v0, set_params_v1,
- set_program, manual_suspend_ctrl, suspend, resume;
- };
-} __packed;
-
-/* Lightbar commands */
-enum lightbar_command {
- LIGHTBAR_CMD_DUMP = 0,
- LIGHTBAR_CMD_OFF = 1,
- LIGHTBAR_CMD_ON = 2,
- LIGHTBAR_CMD_INIT = 3,
- LIGHTBAR_CMD_SET_BRIGHTNESS = 4,
- LIGHTBAR_CMD_SEQ = 5,
- LIGHTBAR_CMD_REG = 6,
- LIGHTBAR_CMD_SET_RGB = 7,
- LIGHTBAR_CMD_GET_SEQ = 8,
- LIGHTBAR_CMD_DEMO = 9,
- LIGHTBAR_CMD_GET_PARAMS_V0 = 10,
- LIGHTBAR_CMD_SET_PARAMS_V0 = 11,
- LIGHTBAR_CMD_VERSION = 12,
- LIGHTBAR_CMD_GET_BRIGHTNESS = 13,
- LIGHTBAR_CMD_GET_RGB = 14,
- LIGHTBAR_CMD_GET_DEMO = 15,
- LIGHTBAR_CMD_GET_PARAMS_V1 = 16,
- LIGHTBAR_CMD_SET_PARAMS_V1 = 17,
- LIGHTBAR_CMD_SET_PROGRAM = 18,
- LIGHTBAR_CMD_MANUAL_SUSPEND_CTRL = 19,
- LIGHTBAR_CMD_SUSPEND = 20,
- LIGHTBAR_CMD_RESUME = 21,
- LIGHTBAR_NUM_CMDS
-};
-
-/*****************************************************************************/
-/* LED control commands */
-
-#define EC_CMD_LED_CONTROL 0x29
-
-enum ec_led_id {
- /* LED to indicate battery state of charge */
- EC_LED_ID_BATTERY_LED = 0,
- /*
- * LED to indicate system power state (on or in suspend).
- * May be on power button or on C-panel.
- */
- EC_LED_ID_POWER_LED,
- /* LED on power adapter or its plug */
- EC_LED_ID_ADAPTER_LED,
-
- EC_LED_ID_COUNT
-};
-
-/* LED control flags */
-#define EC_LED_FLAGS_QUERY (1 << 0) /* Query LED capability only */
-#define EC_LED_FLAGS_AUTO (1 << 1) /* Switch LED back to automatic control */
-
-enum ec_led_colors {
- EC_LED_COLOR_RED = 0,
- EC_LED_COLOR_GREEN,
- EC_LED_COLOR_BLUE,
- EC_LED_COLOR_YELLOW,
- EC_LED_COLOR_WHITE,
-
- EC_LED_COLOR_COUNT
-};
-
-struct ec_params_led_control {
- uint8_t led_id; /* Which LED to control */
- uint8_t flags; /* Control flags */
-
- uint8_t brightness[EC_LED_COLOR_COUNT];
-} __packed;
-
-struct ec_response_led_control {
- /*
- * Available brightness value range.
- *
- * Range 0 means color channel not present.
- * Range 1 means on/off control.
- * Other values means the LED is control by PWM.
- */
- uint8_t brightness_range[EC_LED_COLOR_COUNT];
-} __packed;
-
-/*****************************************************************************/
-/* Verified boot commands */
-
-/*
- * Note: command code 0x29 version 0 was VBOOT_CMD in Link EVT; it may be
- * reused for other purposes with version > 0.
- */
-
-/* Verified boot hash command */
-#define EC_CMD_VBOOT_HASH 0x2A
-
-struct ec_params_vboot_hash {
- uint8_t cmd; /* enum ec_vboot_hash_cmd */
- uint8_t hash_type; /* enum ec_vboot_hash_type */
- uint8_t nonce_size; /* Nonce size; may be 0 */
- uint8_t reserved0; /* Reserved; set 0 */
- uint32_t offset; /* Offset in flash to hash */
- uint32_t size; /* Number of bytes to hash */
- uint8_t nonce_data[64]; /* Nonce data; ignored if nonce_size=0 */
-} __packed;
-
-struct ec_response_vboot_hash {
- uint8_t status; /* enum ec_vboot_hash_status */
- uint8_t hash_type; /* enum ec_vboot_hash_type */
- uint8_t digest_size; /* Size of hash digest in bytes */
- uint8_t reserved0; /* Ignore; will be 0 */
- uint32_t offset; /* Offset in flash which was hashed */
- uint32_t size; /* Number of bytes hashed */
- uint8_t hash_digest[64]; /* Hash digest data */
-} __packed;
-
-enum ec_vboot_hash_cmd {
- EC_VBOOT_HASH_GET = 0, /* Get current hash status */
- EC_VBOOT_HASH_ABORT = 1, /* Abort calculating current hash */
- EC_VBOOT_HASH_START = 2, /* Start computing a new hash */
- EC_VBOOT_HASH_RECALC = 3, /* Synchronously compute a new hash */
-};
-
-enum ec_vboot_hash_type {
- EC_VBOOT_HASH_TYPE_SHA256 = 0, /* SHA-256 */
-};
-
-enum ec_vboot_hash_status {
- EC_VBOOT_HASH_STATUS_NONE = 0, /* No hash (not started, or aborted) */
- EC_VBOOT_HASH_STATUS_DONE = 1, /* Finished computing a hash */
- EC_VBOOT_HASH_STATUS_BUSY = 2, /* Busy computing a hash */
-};
-
-/*
- * Special values for offset for EC_VBOOT_HASH_START and EC_VBOOT_HASH_RECALC.
- * If one of these is specified, the EC will automatically update offset and
- * size to the correct values for the specified image (RO or RW).
- */
-#define EC_VBOOT_HASH_OFFSET_RO 0xfffffffe
-#define EC_VBOOT_HASH_OFFSET_RW 0xfffffffd
-
-/*****************************************************************************/
-/*
- * Motion sense commands. We'll make separate structs for sub-commands with
- * different input args, so that we know how much to expect.
- */
-#define EC_CMD_MOTION_SENSE_CMD 0x2B
-
-/* Motion sense commands */
-enum motionsense_command {
- /*
- * Dump command returns all motion sensor data including motion sense
- * module flags and individual sensor flags.
- */
- MOTIONSENSE_CMD_DUMP = 0,
-
- /*
- * Info command returns data describing the details of a given sensor,
- * including enum motionsensor_type, enum motionsensor_location, and
- * enum motionsensor_chip.
- */
- MOTIONSENSE_CMD_INFO = 1,
-
- /*
- * EC Rate command is a setter/getter command for the EC sampling rate
- * of all motion sensors in milliseconds.
- */
- MOTIONSENSE_CMD_EC_RATE = 2,
-
- /*
- * Sensor ODR command is a setter/getter command for the output data
- * rate of a specific motion sensor in millihertz.
- */
- MOTIONSENSE_CMD_SENSOR_ODR = 3,
-
- /*
- * Sensor range command is a setter/getter command for the range of
- * a specified motion sensor in +/-G's or +/- deg/s.
- */
- MOTIONSENSE_CMD_SENSOR_RANGE = 4,
-
- /*
- * Setter/getter command for the keyboard wake angle. When the lid
- * angle is greater than this value, keyboard wake is disabled in S3,
- * and when the lid angle goes less than this value, keyboard wake is
- * enabled. Note, the lid angle measurement is an approximate,
- * un-calibrated value, hence the wake angle isn't exact.
- */
- MOTIONSENSE_CMD_KB_WAKE_ANGLE = 5,
-
- /*
- * Returns a single sensor data.
- */
- MOTIONSENSE_CMD_DATA = 6,
-
- /*
- * Perform low level calibration.. On sensors that support it, ask to
- * do offset calibration.
- */
- MOTIONSENSE_CMD_PERFORM_CALIB = 10,
-
- /*
- * Sensor Offset command is a setter/getter command for the offset used
- * for calibration. The offsets can be calculated by the host, or via
- * PERFORM_CALIB command.
- */
- MOTIONSENSE_CMD_SENSOR_OFFSET = 11,
-
- /* Number of motionsense sub-commands. */
- MOTIONSENSE_NUM_CMDS
-};
-
-enum motionsensor_id {
- EC_MOTION_SENSOR_ACCEL_BASE = 0,
- EC_MOTION_SENSOR_ACCEL_LID = 1,
- EC_MOTION_SENSOR_GYRO = 2,
-
- /*
- * Note, if more sensors are added and this count changes, the padding
- * in ec_response_motion_sense dump command must be modified.
- */
- EC_MOTION_SENSOR_COUNT = 3
-};
-
-/* List of motion sensor types. */
-enum motionsensor_type {
- MOTIONSENSE_TYPE_ACCEL = 0,
- MOTIONSENSE_TYPE_GYRO = 1,
- MOTIONSENSE_TYPE_MAG = 2,
- MOTIONSENSE_TYPE_PROX = 3,
- MOTIONSENSE_TYPE_LIGHT = 4,
- MOTIONSENSE_TYPE_ACTIVITY = 5,
- MOTIONSENSE_TYPE_BARO = 6,
- MOTIONSENSE_TYPE_MAX,
-};
-
-/* List of motion sensor locations. */
-enum motionsensor_location {
- MOTIONSENSE_LOC_BASE = 0,
- MOTIONSENSE_LOC_LID = 1,
- MOTIONSENSE_LOC_MAX,
-};
-
-/* List of motion sensor chips. */
-enum motionsensor_chip {
- MOTIONSENSE_CHIP_KXCJ9 = 0,
-};
-
-/* Module flag masks used for the dump sub-command. */
-#define MOTIONSENSE_MODULE_FLAG_ACTIVE (1<<0)
-
-/* Sensor flag masks used for the dump sub-command. */
-#define MOTIONSENSE_SENSOR_FLAG_PRESENT (1<<0)
-
-/*
- * Send this value for the data element to only perform a read. If you
- * send any other value, the EC will interpret it as data to set and will
- * return the actual value set.
- */
-#define EC_MOTION_SENSE_NO_VALUE -1
-
-#define EC_MOTION_SENSE_INVALID_CALIB_TEMP 0x8000
-
-/* Set Calibration information */
-#define MOTION_SENSE_SET_OFFSET 1
-
-struct ec_response_motion_sensor_data {
- /* Flags for each sensor. */
- uint8_t flags;
- /* Sensor number the data comes from */
- uint8_t sensor_num;
- /* Each sensor is up to 3-axis. */
- union {
- int16_t data[3];
- struct {
- uint16_t rsvd;
- uint32_t timestamp;
- } __packed;
- struct {
- uint8_t activity; /* motionsensor_activity */
- uint8_t state;
- int16_t add_info[2];
- };
- };
-} __packed;
-
-struct ec_params_motion_sense {
- uint8_t cmd;
- union {
- /* Used for MOTIONSENSE_CMD_DUMP. */
- struct {
- /* no args */
- } dump;
-
- /*
- * Used for MOTIONSENSE_CMD_EC_RATE and
- * MOTIONSENSE_CMD_KB_WAKE_ANGLE.
- */
- struct {
- /* Data to set or EC_MOTION_SENSE_NO_VALUE to read. */
- int16_t data;
- } ec_rate, kb_wake_angle;
-
- /* Used for MOTIONSENSE_CMD_SENSOR_OFFSET */
- struct {
- uint8_t sensor_num;
-
- /*
- * bit 0: If set (MOTION_SENSE_SET_OFFSET), set
- * the calibration information in the EC.
- * If unset, just retrieve calibration information.
- */
- uint16_t flags;
-
- /*
- * Temperature at calibration, in units of 0.01 C
- * 0x8000: invalid / unknown.
- * 0x0: 0C
- * 0x7fff: +327.67C
- */
- int16_t temp;
-
- /*
- * Offset for calibration.
- * Unit:
- * Accelerometer: 1/1024 g
- * Gyro: 1/1024 deg/s
- * Compass: 1/16 uT
- */
- int16_t offset[3];
- } __packed sensor_offset;
-
- /* Used for MOTIONSENSE_CMD_INFO. */
- struct {
- uint8_t sensor_num;
- } info;
-
- /*
- * Used for MOTIONSENSE_CMD_SENSOR_ODR and
- * MOTIONSENSE_CMD_SENSOR_RANGE.
- */
- struct {
- /* Should be element of enum motionsensor_id. */
- uint8_t sensor_num;
-
- /* Rounding flag, true for round-up, false for down. */
- uint8_t roundup;
-
- uint16_t reserved;
-
- /* Data to set or EC_MOTION_SENSE_NO_VALUE to read. */
- int32_t data;
- } sensor_odr, sensor_range;
- };
-} __packed;
-
-struct ec_response_motion_sense {
- union {
- /* Used for MOTIONSENSE_CMD_DUMP. */
- struct {
- /* Flags representing the motion sensor module. */
- uint8_t module_flags;
-
- /* Number of sensors managed directly by the EC. */
- uint8_t sensor_count;
-
- /*
- * Sensor data is truncated if response_max is too small
- * for holding all the data.
- */
- struct ec_response_motion_sensor_data sensor[0];
- } dump;
-
- /* Used for MOTIONSENSE_CMD_INFO. */
- struct {
- /* Should be element of enum motionsensor_type. */
- uint8_t type;
-
- /* Should be element of enum motionsensor_location. */
- uint8_t location;
-
- /* Should be element of enum motionsensor_chip. */
- uint8_t chip;
- } info;
-
- /* Used for MOTIONSENSE_CMD_DATA */
- struct ec_response_motion_sensor_data data;
-
- /*
- * Used for MOTIONSENSE_CMD_EC_RATE, MOTIONSENSE_CMD_SENSOR_ODR,
- * MOTIONSENSE_CMD_SENSOR_RANGE, and
- * MOTIONSENSE_CMD_KB_WAKE_ANGLE.
- */
- struct {
- /* Current value of the parameter queried. */
- int32_t ret;
- } ec_rate, sensor_odr, sensor_range, kb_wake_angle;
-
- /* Used for MOTIONSENSE_CMD_SENSOR_OFFSET */
- struct {
- int16_t temp;
- int16_t offset[3];
- } sensor_offset, perform_calib;
- };
-} __packed;
-
-/*****************************************************************************/
-/* USB charging control commands */
-
-/* Set USB port charging mode */
-#define EC_CMD_USB_CHARGE_SET_MODE 0x30
-
-struct ec_params_usb_charge_set_mode {
- uint8_t usb_port_id;
- uint8_t mode;
-} __packed;
-
-/*****************************************************************************/
-/* Persistent storage for host */
-
-/* Maximum bytes that can be read/written in a single command */
-#define EC_PSTORE_SIZE_MAX 64
-
-/* Get persistent storage info */
-#define EC_CMD_PSTORE_INFO 0x40
-
-struct ec_response_pstore_info {
- /* Persistent storage size, in bytes */
- uint32_t pstore_size;
- /* Access size; read/write offset and size must be a multiple of this */
- uint32_t access_size;
-} __packed;
-
-/*
- * Read persistent storage
- *
- * Response is params.size bytes of data.
- */
-#define EC_CMD_PSTORE_READ 0x41
-
-struct ec_params_pstore_read {
- uint32_t offset; /* Byte offset to read */
- uint32_t size; /* Size to read in bytes */
-} __packed;
-
-/* Write persistent storage */
-#define EC_CMD_PSTORE_WRITE 0x42
-
-struct ec_params_pstore_write {
- uint32_t offset; /* Byte offset to write */
- uint32_t size; /* Size to write in bytes */
- uint8_t data[EC_PSTORE_SIZE_MAX];
-} __packed;
-
-/*****************************************************************************/
-/* Real-time clock */
-
-/* RTC params and response structures */
-struct ec_params_rtc {
- uint32_t time;
-} __packed;
-
-struct ec_response_rtc {
- uint32_t time;
-} __packed;
-
-/* These use ec_response_rtc */
-#define EC_CMD_RTC_GET_VALUE 0x44
-#define EC_CMD_RTC_GET_ALARM 0x45
-
-/* These all use ec_params_rtc */
-#define EC_CMD_RTC_SET_VALUE 0x46
-#define EC_CMD_RTC_SET_ALARM 0x47
-
-/*****************************************************************************/
-/* Port80 log access */
-
-/* Maximum entries that can be read/written in a single command */
-#define EC_PORT80_SIZE_MAX 32
-
-/* Get last port80 code from previous boot */
-#define EC_CMD_PORT80_LAST_BOOT 0x48
-#define EC_CMD_PORT80_READ 0x48
-
-enum ec_port80_subcmd {
- EC_PORT80_GET_INFO = 0,
- EC_PORT80_READ_BUFFER,
-};
-
-struct ec_params_port80_read {
- uint16_t subcmd;
- union {
- struct {
- uint32_t offset;
- uint32_t num_entries;
- } read_buffer;
- };
-} __packed;
-
-struct ec_response_port80_read {
- union {
- struct {
- uint32_t writes;
- uint32_t history_size;
- uint32_t last_boot;
- } get_info;
- struct {
- uint16_t codes[EC_PORT80_SIZE_MAX];
- } data;
- };
-} __packed;
-
-struct ec_response_port80_last_boot {
- uint16_t code;
-} __packed;
-
-/*****************************************************************************/
-/* Thermal engine commands. Note that there are two implementations. We'll
- * reuse the command number, but the data and behavior is incompatible.
- * Version 0 is what originally shipped on Link.
- * Version 1 separates the CPU thermal limits from the fan control.
- */
-
-#define EC_CMD_THERMAL_SET_THRESHOLD 0x50
-#define EC_CMD_THERMAL_GET_THRESHOLD 0x51
-
-/* The version 0 structs are opaque. You have to know what they are for
- * the get/set commands to make any sense.
- */
-
-/* Version 0 - set */
-struct ec_params_thermal_set_threshold {
- uint8_t sensor_type;
- uint8_t threshold_id;
- uint16_t value;
-} __packed;
-
-/* Version 0 - get */
-struct ec_params_thermal_get_threshold {
- uint8_t sensor_type;
- uint8_t threshold_id;
-} __packed;
-
-struct ec_response_thermal_get_threshold {
- uint16_t value;
-} __packed;
-
-
-/* The version 1 structs are visible. */
-enum ec_temp_thresholds {
- EC_TEMP_THRESH_WARN = 0,
- EC_TEMP_THRESH_HIGH,
- EC_TEMP_THRESH_HALT,
-
- EC_TEMP_THRESH_COUNT
-};
-
-/* Thermal configuration for one temperature sensor. Temps are in degrees K.
- * Zero values will be silently ignored by the thermal task.
- */
-struct ec_thermal_config {
- uint32_t temp_host[EC_TEMP_THRESH_COUNT]; /* levels of hotness */
- uint32_t temp_fan_off; /* no active cooling needed */
- uint32_t temp_fan_max; /* max active cooling needed */
-} __packed;
-
-/* Version 1 - get config for one sensor. */
-struct ec_params_thermal_get_threshold_v1 {
- uint32_t sensor_num;
-} __packed;
-/* This returns a struct ec_thermal_config */
-
-/* Version 1 - set config for one sensor.
- * Use read-modify-write for best results! */
-struct ec_params_thermal_set_threshold_v1 {
- uint32_t sensor_num;
- struct ec_thermal_config cfg;
-} __packed;
-/* This returns no data */
-
-/****************************************************************************/
-
-/* Toggle automatic fan control */
-#define EC_CMD_THERMAL_AUTO_FAN_CTRL 0x52
-
-/* Get TMP006 calibration data */
-#define EC_CMD_TMP006_GET_CALIBRATION 0x53
-
-struct ec_params_tmp006_get_calibration {
- uint8_t index;
-} __packed;
-
-struct ec_response_tmp006_get_calibration {
- float s0;
- float b0;
- float b1;
- float b2;
-} __packed;
-
-/* Set TMP006 calibration data */
-#define EC_CMD_TMP006_SET_CALIBRATION 0x54
-
-struct ec_params_tmp006_set_calibration {
- uint8_t index;
- uint8_t reserved[3]; /* Reserved; set 0 */
- float s0;
- float b0;
- float b1;
- float b2;
-} __packed;
-
-/* Read raw TMP006 data */
-#define EC_CMD_TMP006_GET_RAW 0x55
-
-struct ec_params_tmp006_get_raw {
- uint8_t index;
-} __packed;
-
-struct ec_response_tmp006_get_raw {
- int32_t t; /* In 1/100 K */
- int32_t v; /* In nV */
-};
-
-/*****************************************************************************/
-/* MKBP - Matrix KeyBoard Protocol */
-
-/*
- * Read key state
- *
- * Returns raw data for keyboard cols; see ec_response_mkbp_info.cols for
- * expected response size.
- *
- * NOTE: This has been superseded by EC_CMD_MKBP_GET_NEXT_EVENT. If you wish
- * to obtain the instantaneous state, use EC_CMD_MKBP_INFO with the type
- * EC_MKBP_INFO_CURRENT and event EC_MKBP_EVENT_KEY_MATRIX.
- */
-#define EC_CMD_MKBP_STATE 0x60
-
-/*
- * Provide information about various MKBP things. See enum ec_mkbp_info_type.
- */
-#define EC_CMD_MKBP_INFO 0x61
-
-struct ec_response_mkbp_info {
- uint32_t rows;
- uint32_t cols;
- /* Formerly "switches", which was 0. */
- uint8_t reserved;
-} __packed;
-
-struct ec_params_mkbp_info {
- uint8_t info_type;
- uint8_t event_type;
-} __packed;
-
-enum ec_mkbp_info_type {
- /*
- * Info about the keyboard matrix: number of rows and columns.
- *
- * Returns struct ec_response_mkbp_info.
- */
- EC_MKBP_INFO_KBD = 0,
-
- /*
- * For buttons and switches, info about which specifically are
- * supported. event_type must be set to one of the values in enum
- * ec_mkbp_event.
- *
- * For EC_MKBP_EVENT_BUTTON and EC_MKBP_EVENT_SWITCH, returns a 4 byte
- * bitmask indicating which buttons or switches are present. See the
- * bit inidices below.
- */
- EC_MKBP_INFO_SUPPORTED = 1,
-
- /*
- * Instantaneous state of buttons and switches.
- *
- * event_type must be set to one of the values in enum ec_mkbp_event.
- *
- * For EC_MKBP_EVENT_KEY_MATRIX, returns uint8_t key_matrix[13]
- * indicating the current state of the keyboard matrix.
- *
- * For EC_MKBP_EVENT_HOST_EVENT, return uint32_t host_event, the raw
- * event state.
- *
- * For EC_MKBP_EVENT_BUTTON, returns uint32_t buttons, indicating the
- * state of supported buttons.
- *
- * For EC_MKBP_EVENT_SWITCH, returns uint32_t switches, indicating the
- * state of supported switches.
- */
- EC_MKBP_INFO_CURRENT = 2,
-};
-
-/* Simulate key press */
-#define EC_CMD_MKBP_SIMULATE_KEY 0x62
-
-struct ec_params_mkbp_simulate_key {
- uint8_t col;
- uint8_t row;
- uint8_t pressed;
-} __packed;
-
-/* Configure keyboard scanning */
-#define EC_CMD_MKBP_SET_CONFIG 0x64
-#define EC_CMD_MKBP_GET_CONFIG 0x65
-
-/* flags */
-enum mkbp_config_flags {
- EC_MKBP_FLAGS_ENABLE = 1, /* Enable keyboard scanning */
-};
-
-enum mkbp_config_valid {
- EC_MKBP_VALID_SCAN_PERIOD = 1 << 0,
- EC_MKBP_VALID_POLL_TIMEOUT = 1 << 1,
- EC_MKBP_VALID_MIN_POST_SCAN_DELAY = 1 << 3,
- EC_MKBP_VALID_OUTPUT_SETTLE = 1 << 4,
- EC_MKBP_VALID_DEBOUNCE_DOWN = 1 << 5,
- EC_MKBP_VALID_DEBOUNCE_UP = 1 << 6,
- EC_MKBP_VALID_FIFO_MAX_DEPTH = 1 << 7,
-};
-
-/* Configuration for our key scanning algorithm */
-struct ec_mkbp_config {
- uint32_t valid_mask; /* valid fields */
- uint8_t flags; /* some flags (enum mkbp_config_flags) */
- uint8_t valid_flags; /* which flags are valid */
- uint16_t scan_period_us; /* period between start of scans */
- /* revert to interrupt mode after no activity for this long */
- uint32_t poll_timeout_us;
- /*
- * minimum post-scan relax time. Once we finish a scan we check
- * the time until we are due to start the next one. If this time is
- * shorter this field, we use this instead.
- */
- uint16_t min_post_scan_delay_us;
- /* delay between setting up output and waiting for it to settle */
- uint16_t output_settle_us;
- uint16_t debounce_down_us; /* time for debounce on key down */
- uint16_t debounce_up_us; /* time for debounce on key up */
- /* maximum depth to allow for fifo (0 = no keyscan output) */
- uint8_t fifo_max_depth;
-} __packed;
-
-struct ec_params_mkbp_set_config {
- struct ec_mkbp_config config;
-} __packed;
-
-struct ec_response_mkbp_get_config {
- struct ec_mkbp_config config;
-} __packed;
-
-/* Run the key scan emulation */
-#define EC_CMD_KEYSCAN_SEQ_CTRL 0x66
-
-enum ec_keyscan_seq_cmd {
- EC_KEYSCAN_SEQ_STATUS = 0, /* Get status information */
- EC_KEYSCAN_SEQ_CLEAR = 1, /* Clear sequence */
- EC_KEYSCAN_SEQ_ADD = 2, /* Add item to sequence */
- EC_KEYSCAN_SEQ_START = 3, /* Start running sequence */
- EC_KEYSCAN_SEQ_COLLECT = 4, /* Collect sequence summary data */
-};
-
-enum ec_collect_flags {
- /*
- * Indicates this scan was processed by the EC. Due to timing, some
- * scans may be skipped.
- */
- EC_KEYSCAN_SEQ_FLAG_DONE = 1 << 0,
-};
-
-struct ec_collect_item {
- uint8_t flags; /* some flags (enum ec_collect_flags) */
-};
-
-struct ec_params_keyscan_seq_ctrl {
- uint8_t cmd; /* Command to send (enum ec_keyscan_seq_cmd) */
- union {
- struct {
- uint8_t active; /* still active */
- uint8_t num_items; /* number of items */
- /* Current item being presented */
- uint8_t cur_item;
- } status;
- struct {
- /*
- * Absolute time for this scan, measured from the
- * start of the sequence.
- */
- uint32_t time_us;
- uint8_t scan[0]; /* keyscan data */
- } add;
- struct {
- uint8_t start_item; /* First item to return */
- uint8_t num_items; /* Number of items to return */
- } collect;
- };
-} __packed;
-
-struct ec_result_keyscan_seq_ctrl {
- union {
- struct {
- uint8_t num_items; /* Number of items */
- /* Data for each item */
- struct ec_collect_item item[0];
- } collect;
- };
-} __packed;
-
-/*
- * Command for retrieving the next pending MKBP event from the EC device
- *
- * The device replies with UNAVAILABLE if there aren't any pending events.
- */
-#define EC_CMD_GET_NEXT_EVENT 0x67
-
-enum ec_mkbp_event {
- /* Keyboard matrix changed. The event data is the new matrix state. */
- EC_MKBP_EVENT_KEY_MATRIX = 0,
-
- /* New host event. The event data is 4 bytes of host event flags. */
- EC_MKBP_EVENT_HOST_EVENT = 1,
-
- /* New Sensor FIFO data. The event data is fifo_info structure. */
- EC_MKBP_EVENT_SENSOR_FIFO = 2,
-
- /* The state of the non-matrixed buttons have changed. */
- EC_MKBP_EVENT_BUTTON = 3,
-
- /* The state of the switches have changed. */
- EC_MKBP_EVENT_SWITCH = 4,
-
- /* EC sent a sysrq command */
- EC_MKBP_EVENT_SYSRQ = 6,
-
- /* Number of MKBP events */
- EC_MKBP_EVENT_COUNT,
-};
-
-union ec_response_get_next_data {
- uint8_t key_matrix[13];
-
- /* Unaligned */
- uint32_t host_event;
-
- uint32_t buttons;
- uint32_t switches;
- uint32_t sysrq;
-} __packed;
-
-struct ec_response_get_next_event {
- uint8_t event_type;
- /* Followed by event data if any */
- union ec_response_get_next_data data;
-} __packed;
-
-/* Bit indices for buttons and switches.*/
-/* Buttons */
-#define EC_MKBP_POWER_BUTTON 0
-#define EC_MKBP_VOL_UP 1
-#define EC_MKBP_VOL_DOWN 2
-
-/* Switches */
-#define EC_MKBP_LID_OPEN 0
-#define EC_MKBP_TABLET_MODE 1
-
-/*****************************************************************************/
-/* Temperature sensor commands */
-
-/* Read temperature sensor info */
-#define EC_CMD_TEMP_SENSOR_GET_INFO 0x70
-
-struct ec_params_temp_sensor_get_info {
- uint8_t id;
-} __packed;
-
-struct ec_response_temp_sensor_get_info {
- char sensor_name[32];
- uint8_t sensor_type;
-} __packed;
-
-/*****************************************************************************/
-
-/*
- * Note: host commands 0x80 - 0x87 are reserved to avoid conflict with ACPI
- * commands accidentally sent to the wrong interface. See the ACPI section
- * below.
- */
-
-/*****************************************************************************/
-/* Host event commands */
-
-/*
- * Host event mask params and response structures, shared by all of the host
- * event commands below.
- */
-struct ec_params_host_event_mask {
- uint32_t mask;
-} __packed;
-
-struct ec_response_host_event_mask {
- uint32_t mask;
-} __packed;
-
-/* These all use ec_response_host_event_mask */
-#define EC_CMD_HOST_EVENT_GET_B 0x87
-#define EC_CMD_HOST_EVENT_GET_SMI_MASK 0x88
-#define EC_CMD_HOST_EVENT_GET_SCI_MASK 0x89
-#define EC_CMD_HOST_EVENT_GET_WAKE_MASK 0x8d
-
-/* These all use ec_params_host_event_mask */
-#define EC_CMD_HOST_EVENT_SET_SMI_MASK 0x8a
-#define EC_CMD_HOST_EVENT_SET_SCI_MASK 0x8b
-#define EC_CMD_HOST_EVENT_CLEAR 0x8c
-#define EC_CMD_HOST_EVENT_SET_WAKE_MASK 0x8e
-#define EC_CMD_HOST_EVENT_CLEAR_B 0x8f
-
-/*****************************************************************************/
-/* Switch commands */
-
-/* Enable/disable LCD backlight */
-#define EC_CMD_SWITCH_ENABLE_BKLIGHT 0x90
-
-struct ec_params_switch_enable_backlight {
- uint8_t enabled;
-} __packed;
-
-/* Enable/disable WLAN/Bluetooth */
-#define EC_CMD_SWITCH_ENABLE_WIRELESS 0x91
-#define EC_VER_SWITCH_ENABLE_WIRELESS 1
-
-/* Version 0 params; no response */
-struct ec_params_switch_enable_wireless_v0 {
- uint8_t enabled;
-} __packed;
-
-/* Version 1 params */
-struct ec_params_switch_enable_wireless_v1 {
- /* Flags to enable now */
- uint8_t now_flags;
-
- /* Which flags to copy from now_flags */
- uint8_t now_mask;
-
- /*
- * Flags to leave enabled in S3, if they're on at the S0->S3
- * transition. (Other flags will be disabled by the S0->S3
- * transition.)
- */
- uint8_t suspend_flags;
-
- /* Which flags to copy from suspend_flags */
- uint8_t suspend_mask;
-} __packed;
-
-/* Version 1 response */
-struct ec_response_switch_enable_wireless_v1 {
- /* Flags to enable now */
- uint8_t now_flags;
-
- /* Flags to leave enabled in S3 */
- uint8_t suspend_flags;
-} __packed;
-
-/*****************************************************************************/
-/* GPIO commands. Only available on EC if write protect has been disabled. */
-
-/* Set GPIO output value */
-#define EC_CMD_GPIO_SET 0x92
-
-struct ec_params_gpio_set {
- char name[32];
- uint8_t val;
-} __packed;
-
-/* Get GPIO value */
-#define EC_CMD_GPIO_GET 0x93
-
-/* Version 0 of input params and response */
-struct ec_params_gpio_get {
- char name[32];
-} __packed;
-struct ec_response_gpio_get {
- uint8_t val;
-} __packed;
-
-/* Version 1 of input params and response */
-struct ec_params_gpio_get_v1 {
- uint8_t subcmd;
- union {
- struct {
- char name[32];
- } get_value_by_name;
- struct {
- uint8_t index;
- } get_info;
- };
-} __packed;
-
-struct ec_response_gpio_get_v1 {
- union {
- struct {
- uint8_t val;
- } get_value_by_name, get_count;
- struct {
- uint8_t val;
- char name[32];
- uint32_t flags;
- } get_info;
- };
-} __packed;
-
-enum gpio_get_subcmd {
- EC_GPIO_GET_BY_NAME = 0,
- EC_GPIO_GET_COUNT = 1,
- EC_GPIO_GET_INFO = 2,
-};
-
-/*****************************************************************************/
-/* I2C commands. Only available when flash write protect is unlocked. */
-
-/*
- * TODO(crosbug.com/p/23570): These commands are deprecated, and will be
- * removed soon. Use EC_CMD_I2C_XFER instead.
- */
-
-/* Read I2C bus */
-#define EC_CMD_I2C_READ 0x94
-
-struct ec_params_i2c_read {
- uint16_t addr; /* 8-bit address (7-bit shifted << 1) */
- uint8_t read_size; /* Either 8 or 16. */
- uint8_t port;
- uint8_t offset;
-} __packed;
-struct ec_response_i2c_read {
- uint16_t data;
-} __packed;
-
-/* Write I2C bus */
-#define EC_CMD_I2C_WRITE 0x95
-
-struct ec_params_i2c_write {
- uint16_t data;
- uint16_t addr; /* 8-bit address (7-bit shifted << 1) */
- uint8_t write_size; /* Either 8 or 16. */
- uint8_t port;
- uint8_t offset;
-} __packed;
-
-/*****************************************************************************/
-/* Charge state commands. Only available when flash write protect unlocked. */
-
-/* Force charge state machine to stop charging the battery or force it to
- * discharge the battery.
- */
-#define EC_CMD_CHARGE_CONTROL 0x96
-#define EC_VER_CHARGE_CONTROL 1
-
-enum ec_charge_control_mode {
- CHARGE_CONTROL_NORMAL = 0,
- CHARGE_CONTROL_IDLE,
- CHARGE_CONTROL_DISCHARGE,
-};
-
-struct ec_params_charge_control {
- uint32_t mode; /* enum charge_control_mode */
-} __packed;
-
-/*****************************************************************************/
-/* Console commands. Only available when flash write protect is unlocked. */
-
-/* Snapshot console output buffer for use by EC_CMD_CONSOLE_READ. */
-#define EC_CMD_CONSOLE_SNAPSHOT 0x97
-
-/*
- * Read data from the saved snapshot. If the subcmd parameter is
- * CONSOLE_READ_NEXT, this will return data starting from the beginning of
- * the latest snapshot. If it is CONSOLE_READ_RECENT, it will start from the
- * end of the previous snapshot.
- *
- * The params are only looked at in version >= 1 of this command. Prior
- * versions will just default to CONSOLE_READ_NEXT behavior.
- *
- * Response is null-terminated string. Empty string, if there is no more
- * remaining output.
- */
-#define EC_CMD_CONSOLE_READ 0x98
-
-enum ec_console_read_subcmd {
- CONSOLE_READ_NEXT = 0,
- CONSOLE_READ_RECENT
-};
-
-struct ec_params_console_read_v1 {
- uint8_t subcmd; /* enum ec_console_read_subcmd */
-} __packed;
-
-/*****************************************************************************/
-
-/*
- * Cut off battery power immediately or after the host has shut down.
- *
- * return EC_RES_INVALID_COMMAND if unsupported by a board/battery.
- * EC_RES_SUCCESS if the command was successful.
- * EC_RES_ERROR if the cut off command failed.
- */
-
-#define EC_CMD_BATTERY_CUT_OFF 0x99
-
-#define EC_BATTERY_CUTOFF_FLAG_AT_SHUTDOWN (1 << 0)
-
-struct ec_params_battery_cutoff {
- uint8_t flags;
-} __packed;
-
-/*****************************************************************************/
-/* USB port mux control. */
-
-/*
- * Switch USB mux or return to automatic switching.
- */
-#define EC_CMD_USB_MUX 0x9a
-
-struct ec_params_usb_mux {
- uint8_t mux;
-} __packed;
-
-/*****************************************************************************/
-/* LDOs / FETs control. */
-
-enum ec_ldo_state {
- EC_LDO_STATE_OFF = 0, /* the LDO / FET is shut down */
- EC_LDO_STATE_ON = 1, /* the LDO / FET is ON / providing power */
-};
-
-/*
- * Switch on/off a LDO.
- */
-#define EC_CMD_LDO_SET 0x9b
-
-struct ec_params_ldo_set {
- uint8_t index;
- uint8_t state;
-} __packed;
-
-/*
- * Get LDO state.
- */
-#define EC_CMD_LDO_GET 0x9c
-
-struct ec_params_ldo_get {
- uint8_t index;
-} __packed;
-
-struct ec_response_ldo_get {
- uint8_t state;
-} __packed;
-
-/*****************************************************************************/
-/* Power info. */
-
-/*
- * Get power info.
- */
-#define EC_CMD_POWER_INFO 0x9d
-
-struct ec_response_power_info {
- uint32_t usb_dev_type;
- uint16_t voltage_ac;
- uint16_t voltage_system;
- uint16_t current_system;
- uint16_t usb_current_limit;
-} __packed;
-
-/*****************************************************************************/
-/* I2C passthru command */
-
-#define EC_CMD_I2C_PASSTHRU 0x9e
-
-/* Read data; if not present, message is a write */
-#define EC_I2C_FLAG_READ (1 << 15)
-
-/* Mask for address */
-#define EC_I2C_ADDR_MASK 0x3ff
-
-#define EC_I2C_STATUS_NAK (1 << 0) /* Transfer was not acknowledged */
-#define EC_I2C_STATUS_TIMEOUT (1 << 1) /* Timeout during transfer */
-
-/* Any error */
-#define EC_I2C_STATUS_ERROR (EC_I2C_STATUS_NAK | EC_I2C_STATUS_TIMEOUT)
-
-struct ec_params_i2c_passthru_msg {
- uint16_t addr_flags; /* I2C slave address (7 or 10 bits) and flags */
- uint16_t len; /* Number of bytes to read or write */
-} __packed;
-
-struct ec_params_i2c_passthru {
- uint8_t port; /* I2C port number */
- uint8_t num_msgs; /* Number of messages */
- struct ec_params_i2c_passthru_msg msg[];
- /* Data to write for all messages is concatenated here */
-} __packed;
-
-struct ec_response_i2c_passthru {
- uint8_t i2c_status; /* Status flags (EC_I2C_STATUS_...) */
- uint8_t num_msgs; /* Number of messages processed */
- uint8_t data[]; /* Data read by messages concatenated here */
-} __packed;
-
-/*****************************************************************************/
-/* Power button hang detect */
-
-#define EC_CMD_HANG_DETECT 0x9f
-
-/* Reasons to start hang detection timer */
-/* Power button pressed */
-#define EC_HANG_START_ON_POWER_PRESS (1 << 0)
-
-/* Lid closed */
-#define EC_HANG_START_ON_LID_CLOSE (1 << 1)
-
- /* Lid opened */
-#define EC_HANG_START_ON_LID_OPEN (1 << 2)
-
-/* Start of AP S3->S0 transition (booting or resuming from suspend) */
-#define EC_HANG_START_ON_RESUME (1 << 3)
-
-/* Reasons to cancel hang detection */
-
-/* Power button released */
-#define EC_HANG_STOP_ON_POWER_RELEASE (1 << 8)
-
-/* Any host command from AP received */
-#define EC_HANG_STOP_ON_HOST_COMMAND (1 << 9)
-
-/* Stop on end of AP S0->S3 transition (suspending or shutting down) */
-#define EC_HANG_STOP_ON_SUSPEND (1 << 10)
-
-/*
- * If this flag is set, all the other fields are ignored, and the hang detect
- * timer is started. This provides the AP a way to start the hang timer
- * without reconfiguring any of the other hang detect settings. Note that
- * you must previously have configured the timeouts.
- */
-#define EC_HANG_START_NOW (1 << 30)
-
-/*
- * If this flag is set, all the other fields are ignored (including
- * EC_HANG_START_NOW). This provides the AP a way to stop the hang timer
- * without reconfiguring any of the other hang detect settings.
- */
-#define EC_HANG_STOP_NOW (1 << 31)
-
-struct ec_params_hang_detect {
- /* Flags; see EC_HANG_* */
- uint32_t flags;
-
- /* Timeout in msec before generating host event, if enabled */
- uint16_t host_event_timeout_msec;
-
- /* Timeout in msec before generating warm reboot, if enabled */
- uint16_t warm_reboot_timeout_msec;
-} __packed;
-
-/*****************************************************************************/
-/* Commands for battery charging */
-
-/*
- * This is the single catch-all host command to exchange data regarding the
- * charge state machine (v2 and up).
- */
-#define EC_CMD_CHARGE_STATE 0xa0
-
-/* Subcommands for this host command */
-enum charge_state_command {
- CHARGE_STATE_CMD_GET_STATE,
- CHARGE_STATE_CMD_GET_PARAM,
- CHARGE_STATE_CMD_SET_PARAM,
- CHARGE_STATE_NUM_CMDS
-};
-
-/*
- * Known param numbers are defined here. Ranges are reserved for board-specific
- * params, which are handled by the particular implementations.
- */
-enum charge_state_params {
- CS_PARAM_CHG_VOLTAGE, /* charger voltage limit */
- CS_PARAM_CHG_CURRENT, /* charger current limit */
- CS_PARAM_CHG_INPUT_CURRENT, /* charger input current limit */
- CS_PARAM_CHG_STATUS, /* charger-specific status */
- CS_PARAM_CHG_OPTION, /* charger-specific options */
- /* How many so far? */
- CS_NUM_BASE_PARAMS,
-
- /* Range for CONFIG_CHARGER_PROFILE_OVERRIDE params */
- CS_PARAM_CUSTOM_PROFILE_MIN = 0x10000,
- CS_PARAM_CUSTOM_PROFILE_MAX = 0x1ffff,
-
- /* Other custom param ranges go here... */
-};
-
-struct ec_params_charge_state {
- uint8_t cmd; /* enum charge_state_command */
- union {
- struct {
- /* no args */
- } get_state;
-
- struct {
- uint32_t param; /* enum charge_state_param */
- } get_param;
-
- struct {
- uint32_t param; /* param to set */
- uint32_t value; /* value to set */
- } set_param;
- };
-} __packed;
-
-struct ec_response_charge_state {
- union {
- struct {
- int ac;
- int chg_voltage;
- int chg_current;
- int chg_input_current;
- int batt_state_of_charge;
- } get_state;
-
- struct {
- uint32_t value;
- } get_param;
- struct {
- /* no return values */
- } set_param;
- };
-} __packed;
-
-
-/*
- * Set maximum battery charging current.
- */
-#define EC_CMD_CHARGE_CURRENT_LIMIT 0xa1
-
-struct ec_params_current_limit {
- uint32_t limit; /* in mA */
-} __packed;
-
-/*
- * Set maximum external power current.
- */
-#define EC_CMD_EXT_POWER_CURRENT_LIMIT 0xa2
-
-struct ec_params_ext_power_current_limit {
- uint32_t limit; /* in mA */
-} __packed;
-
-/* Inform the EC when entering a sleep state */
-#define EC_CMD_HOST_SLEEP_EVENT 0xa9
-
-enum host_sleep_event {
- HOST_SLEEP_EVENT_S3_SUSPEND = 1,
- HOST_SLEEP_EVENT_S3_RESUME = 2,
- HOST_SLEEP_EVENT_S0IX_SUSPEND = 3,
- HOST_SLEEP_EVENT_S0IX_RESUME = 4
-};
-
-struct ec_params_host_sleep_event {
- uint8_t sleep_event;
-} __packed;
-
-/*****************************************************************************/
-/* Smart battery pass-through */
-
-/* Get / Set 16-bit smart battery registers */
-#define EC_CMD_SB_READ_WORD 0xb0
-#define EC_CMD_SB_WRITE_WORD 0xb1
-
-/* Get / Set string smart battery parameters
- * formatted as SMBUS "block".
- */
-#define EC_CMD_SB_READ_BLOCK 0xb2
-#define EC_CMD_SB_WRITE_BLOCK 0xb3
-
-struct ec_params_sb_rd {
- uint8_t reg;
-} __packed;
-
-struct ec_response_sb_rd_word {
- uint16_t value;
-} __packed;
-
-struct ec_params_sb_wr_word {
- uint8_t reg;
- uint16_t value;
-} __packed;
-
-struct ec_response_sb_rd_block {
- uint8_t data[32];
-} __packed;
-
-struct ec_params_sb_wr_block {
- uint8_t reg;
- uint16_t data[32];
-} __packed;
-
-/*****************************************************************************/
-/* Battery vendor parameters
- *
- * Get or set vendor-specific parameters in the battery. Implementations may
- * differ between boards or batteries. On a set operation, the response
- * contains the actual value set, which may be rounded or clipped from the
- * requested value.
- */
-
-#define EC_CMD_BATTERY_VENDOR_PARAM 0xb4
-
-enum ec_battery_vendor_param_mode {
- BATTERY_VENDOR_PARAM_MODE_GET = 0,
- BATTERY_VENDOR_PARAM_MODE_SET,
-};
-
-struct ec_params_battery_vendor_param {
- uint32_t param;
- uint32_t value;
- uint8_t mode;
-} __packed;
-
-struct ec_response_battery_vendor_param {
- uint32_t value;
-} __packed;
-
-/*****************************************************************************/
-/* System commands */
-
-/*
- * TODO(crosbug.com/p/23747): This is a confusing name, since it doesn't
- * necessarily reboot the EC. Rename to "image" or something similar?
- */
-#define EC_CMD_REBOOT_EC 0xd2
-
-/* Command */
-enum ec_reboot_cmd {
- EC_REBOOT_CANCEL = 0, /* Cancel a pending reboot */
- EC_REBOOT_JUMP_RO = 1, /* Jump to RO without rebooting */
- EC_REBOOT_JUMP_RW = 2, /* Jump to RW without rebooting */
- /* (command 3 was jump to RW-B) */
- EC_REBOOT_COLD = 4, /* Cold-reboot */
- EC_REBOOT_DISABLE_JUMP = 5, /* Disable jump until next reboot */
- EC_REBOOT_HIBERNATE = 6 /* Hibernate EC */
-};
-
-/* Flags for ec_params_reboot_ec.reboot_flags */
-#define EC_REBOOT_FLAG_RESERVED0 (1 << 0) /* Was recovery request */
-#define EC_REBOOT_FLAG_ON_AP_SHUTDOWN (1 << 1) /* Reboot after AP shutdown */
-
-struct ec_params_reboot_ec {
- uint8_t cmd; /* enum ec_reboot_cmd */
- uint8_t flags; /* See EC_REBOOT_FLAG_* */
-} __packed;
-
-/*
- * Get information on last EC panic.
- *
- * Returns variable-length platform-dependent panic information. See panic.h
- * for details.
- */
-#define EC_CMD_GET_PANIC_INFO 0xd3
-
-/*****************************************************************************/
-/*
- * ACPI commands
- *
- * These are valid ONLY on the ACPI command/data port.
- */
-
-/*
- * ACPI Read Embedded Controller
- *
- * This reads from ACPI memory space on the EC (EC_ACPI_MEM_*).
- *
- * Use the following sequence:
- *
- * - Write EC_CMD_ACPI_READ to EC_LPC_ADDR_ACPI_CMD
- * - Wait for EC_LPC_CMDR_PENDING bit to clear
- * - Write address to EC_LPC_ADDR_ACPI_DATA
- * - Wait for EC_LPC_CMDR_DATA bit to set
- * - Read value from EC_LPC_ADDR_ACPI_DATA
- */
-#define EC_CMD_ACPI_READ 0x80
-
-/*
- * ACPI Write Embedded Controller
- *
- * This reads from ACPI memory space on the EC (EC_ACPI_MEM_*).
- *
- * Use the following sequence:
- *
- * - Write EC_CMD_ACPI_WRITE to EC_LPC_ADDR_ACPI_CMD
- * - Wait for EC_LPC_CMDR_PENDING bit to clear
- * - Write address to EC_LPC_ADDR_ACPI_DATA
- * - Wait for EC_LPC_CMDR_PENDING bit to clear
- * - Write value to EC_LPC_ADDR_ACPI_DATA
- */
-#define EC_CMD_ACPI_WRITE 0x81
-
-/*
- * ACPI Query Embedded Controller
- *
- * This clears the lowest-order bit in the currently pending host events, and
- * sets the result code to the 1-based index of the bit (event 0x00000001 = 1,
- * event 0x80000000 = 32), or 0 if no event was pending.
- */
-#define EC_CMD_ACPI_QUERY_EVENT 0x84
-
-/* Valid addresses in ACPI memory space, for read/write commands */
-
-/* Memory space version; set to EC_ACPI_MEM_VERSION_CURRENT */
-#define EC_ACPI_MEM_VERSION 0x00
-/*
- * Test location; writing value here updates test compliment byte to (0xff -
- * value).
- */
-#define EC_ACPI_MEM_TEST 0x01
-/* Test compliment; writes here are ignored. */
-#define EC_ACPI_MEM_TEST_COMPLIMENT 0x02
-
-/* Keyboard backlight brightness percent (0 - 100) */
-#define EC_ACPI_MEM_KEYBOARD_BACKLIGHT 0x03
-/* DPTF Target Fan Duty (0-100, 0xff for auto/none) */
-#define EC_ACPI_MEM_FAN_DUTY 0x04
-
-/*
- * DPTF temp thresholds. Any of the EC's temp sensors can have up to two
- * independent thresholds attached to them. The current value of the ID
- * register determines which sensor is affected by the THRESHOLD and COMMIT
- * registers. The THRESHOLD register uses the same EC_TEMP_SENSOR_OFFSET scheme
- * as the memory-mapped sensors. The COMMIT register applies those settings.
- *
- * The spec does not mandate any way to read back the threshold settings
- * themselves, but when a threshold is crossed the AP needs a way to determine
- * which sensor(s) are responsible. Each reading of the ID register clears and
- * returns one sensor ID that has crossed one of its threshold (in either
- * direction) since the last read. A value of 0xFF means "no new thresholds
- * have tripped". Setting or enabling the thresholds for a sensor will clear
- * the unread event count for that sensor.
- */
-#define EC_ACPI_MEM_TEMP_ID 0x05
-#define EC_ACPI_MEM_TEMP_THRESHOLD 0x06
-#define EC_ACPI_MEM_TEMP_COMMIT 0x07
-/*
- * Here are the bits for the COMMIT register:
- * bit 0 selects the threshold index for the chosen sensor (0/1)
- * bit 1 enables/disables the selected threshold (0 = off, 1 = on)
- * Each write to the commit register affects one threshold.
- */
-#define EC_ACPI_MEM_TEMP_COMMIT_SELECT_MASK (1 << 0)
-#define EC_ACPI_MEM_TEMP_COMMIT_ENABLE_MASK (1 << 1)
-/*
- * Example:
- *
- * Set the thresholds for sensor 2 to 50 C and 60 C:
- * write 2 to [0x05] -- select temp sensor 2
- * write 0x7b to [0x06] -- C_TO_K(50) - EC_TEMP_SENSOR_OFFSET
- * write 0x2 to [0x07] -- enable threshold 0 with this value
- * write 0x85 to [0x06] -- C_TO_K(60) - EC_TEMP_SENSOR_OFFSET
- * write 0x3 to [0x07] -- enable threshold 1 with this value
- *
- * Disable the 60 C threshold, leaving the 50 C threshold unchanged:
- * write 2 to [0x05] -- select temp sensor 2
- * write 0x1 to [0x07] -- disable threshold 1
- */
-
-/* DPTF battery charging current limit */
-#define EC_ACPI_MEM_CHARGING_LIMIT 0x08
-
-/* Charging limit is specified in 64 mA steps */
-#define EC_ACPI_MEM_CHARGING_LIMIT_STEP_MA 64
-/* Value to disable DPTF battery charging limit */
-#define EC_ACPI_MEM_CHARGING_LIMIT_DISABLED 0xff
-
-/* Current version of ACPI memory address space */
-#define EC_ACPI_MEM_VERSION_CURRENT 1
-
-
-/*****************************************************************************/
-/*
- * Special commands
- *
- * These do not follow the normal rules for commands. See each command for
- * details.
- */
-
-/*
- * Reboot NOW
- *
- * This command will work even when the EC LPC interface is busy, because the
- * reboot command is processed at interrupt level. Note that when the EC
- * reboots, the host will reboot too, so there is no response to this command.
- *
- * Use EC_CMD_REBOOT_EC to reboot the EC more politely.
- */
-#define EC_CMD_REBOOT 0xd1 /* Think "die" */
-
-/*
- * Resend last response (not supported on LPC).
- *
- * Returns EC_RES_UNAVAILABLE if there is no response available - for example,
- * there was no previous command, or the previous command's response was too
- * big to save.
- */
-#define EC_CMD_RESEND_RESPONSE 0xdb
-
-/*
- * This header byte on a command indicate version 0. Any header byte less
- * than this means that we are talking to an old EC which doesn't support
- * versioning. In that case, we assume version 0.
- *
- * Header bytes greater than this indicate a later version. For example,
- * EC_CMD_VERSION0 + 1 means we are using version 1.
- *
- * The old EC interface must not use commands 0xdc or higher.
- */
-#define EC_CMD_VERSION0 0xdc
-
-#endif /* !__ACPI__ */
-
-/*****************************************************************************/
-/*
- * PD commands
- *
- * These commands are for PD MCU communication.
- */
-
-/* EC to PD MCU exchange status command */
-#define EC_CMD_PD_EXCHANGE_STATUS 0x100
-
-/* Status of EC being sent to PD */
-struct ec_params_pd_status {
- int8_t batt_soc; /* battery state of charge */
-} __packed;
-
-/* Status of PD being sent back to EC */
-struct ec_response_pd_status {
- int8_t status; /* PD MCU status */
- uint32_t curr_lim_ma; /* input current limit */
-} __packed;
-
-/* Set USB type-C port role and muxes */
-#define EC_CMD_USB_PD_CONTROL 0x101
-
-enum usb_pd_control_role {
- USB_PD_CTRL_ROLE_NO_CHANGE = 0,
- USB_PD_CTRL_ROLE_TOGGLE_ON = 1, /* == AUTO */
- USB_PD_CTRL_ROLE_TOGGLE_OFF = 2,
- USB_PD_CTRL_ROLE_FORCE_SINK = 3,
- USB_PD_CTRL_ROLE_FORCE_SOURCE = 4,
-};
-
-enum usb_pd_control_mux {
- USB_PD_CTRL_MUX_NO_CHANGE = 0,
- USB_PD_CTRL_MUX_NONE = 1,
- USB_PD_CTRL_MUX_USB = 2,
- USB_PD_CTRL_MUX_DP = 3,
- USB_PD_CTRL_MUX_DOCK = 4,
- USB_PD_CTRL_MUX_AUTO = 5,
-};
-
-struct ec_params_usb_pd_control {
- uint8_t port;
- uint8_t role;
- uint8_t mux;
-} __packed;
-
-/*****************************************************************************/
-/*
- * Passthru commands
- *
- * Some platforms have sub-processors chained to each other. For example.
- *
- * AP <--> EC <--> PD MCU
- *
- * The top 2 bits of the command number are used to indicate which device the
- * command is intended for. Device 0 is always the device receiving the
- * command; other device mapping is board-specific.
- *
- * When a device receives a command to be passed to a sub-processor, it passes
- * it on with the device number set back to 0. This allows the sub-processor
- * to remain blissfully unaware of whether the command originated on the next
- * device up the chain, or was passed through from the AP.
- *
- * In the above example, if the AP wants to send command 0x0002 to the PD MCU,
- * AP sends command 0x4002 to the EC
- * EC sends command 0x0002 to the PD MCU
- * EC forwards PD MCU response back to the AP
- */
-
-/* Offset and max command number for sub-device n */
-#define EC_CMD_PASSTHRU_OFFSET(n) (0x4000 * (n))
-#define EC_CMD_PASSTHRU_MAX(n) (EC_CMD_PASSTHRU_OFFSET(n) + 0x3fff)
-
-/*****************************************************************************/
-/*
- * Deprecated constants. These constants have been renamed for clarity. The
- * meaning and size has not changed. Programs that use the old names should
- * switch to the new names soon, as the old names may not be carried forward
- * forever.
- */
-#define EC_HOST_PARAM_SIZE EC_PROTO2_MAX_PARAM_SIZE
-#define EC_LPC_ADDR_OLD_PARAM EC_HOST_CMD_REGION1
-#define EC_OLD_PARAM_SIZE EC_HOST_CMD_REGION_SIZE
-
-#endif /* __CROS_EC_COMMANDS_H */
diff --git a/include/linux/mfd/cros_ec_lpc_mec.h b/include/linux/mfd/cros_ec_lpc_mec.h
deleted file mode 100644
index 176496ddc66c..000000000000
--- a/include/linux/mfd/cros_ec_lpc_mec.h
+++ /dev/null
@@ -1,90 +0,0 @@
-/*
- * cros_ec_lpc_mec - LPC variant I/O for Microchip EC
- *
- * Copyright (C) 2016 Google, Inc
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * This driver uses the Chrome OS EC byte-level message-based protocol for
- * communicating the keyboard state (which keys are pressed) from a keyboard EC
- * to the AP over some bus (such as i2c, lpc, spi). The EC does debouncing,
- * but everything else (including deghosting) is done here. The main
- * motivation for this is to keep the EC firmware as simple as possible, since
- * it cannot be easily upgraded and EC flash/IRAM space is relatively
- * expensive.
- */
-
-#ifndef __LINUX_MFD_CROS_EC_MEC_H
-#define __LINUX_MFD_CROS_EC_MEC_H
-
-#include <linux/mfd/cros_ec_commands.h>
-
-enum cros_ec_lpc_mec_emi_access_mode {
- /* 8-bit access */
- ACCESS_TYPE_BYTE = 0x0,
- /* 16-bit access */
- ACCESS_TYPE_WORD = 0x1,
- /* 32-bit access */
- ACCESS_TYPE_LONG = 0x2,
- /*
- * 32-bit access, read or write of MEC_EMI_EC_DATA_B3 causes the
- * EC data register to be incremented.
- */
- ACCESS_TYPE_LONG_AUTO_INCREMENT = 0x3,
-};
-
-enum cros_ec_lpc_mec_io_type {
- MEC_IO_READ,
- MEC_IO_WRITE,
-};
-
-/* Access IO ranges 0x800 thru 0x9ff using EMI interface instead of LPC */
-#define MEC_EMI_RANGE_START EC_HOST_CMD_REGION0
-#define MEC_EMI_RANGE_END (EC_LPC_ADDR_MEMMAP + EC_MEMMAP_SIZE)
-
-/* EMI registers are relative to base */
-#define MEC_EMI_BASE 0x800
-#define MEC_EMI_HOST_TO_EC (MEC_EMI_BASE + 0)
-#define MEC_EMI_EC_TO_HOST (MEC_EMI_BASE + 1)
-#define MEC_EMI_EC_ADDRESS_B0 (MEC_EMI_BASE + 2)
-#define MEC_EMI_EC_ADDRESS_B1 (MEC_EMI_BASE + 3)
-#define MEC_EMI_EC_DATA_B0 (MEC_EMI_BASE + 4)
-#define MEC_EMI_EC_DATA_B1 (MEC_EMI_BASE + 5)
-#define MEC_EMI_EC_DATA_B2 (MEC_EMI_BASE + 6)
-#define MEC_EMI_EC_DATA_B3 (MEC_EMI_BASE + 7)
-
-/*
- * cros_ec_lpc_mec_init
- *
- * Initialize MEC I/O.
- */
-void cros_ec_lpc_mec_init(void);
-
-/*
- * cros_ec_lpc_mec_destroy
- *
- * Cleanup MEC I/O.
- */
-void cros_ec_lpc_mec_destroy(void);
-
-/**
- * cros_ec_lpc_io_bytes_mec - Read / write bytes to MEC EMI port
- *
- * @io_type: MEC_IO_READ or MEC_IO_WRITE, depending on request
- * @offset: Base read / write address
- * @length: Number of bytes to read / write
- * @buf: Destination / source buffer
- *
- * @return 8-bit checksum of all bytes read / written
- */
-u8 cros_ec_lpc_io_bytes_mec(enum cros_ec_lpc_mec_io_type io_type,
- unsigned int offset, unsigned int length, u8 *buf);
-
-#endif /* __LINUX_MFD_CROS_EC_MEC_H */
diff --git a/include/linux/mfd/cros_ec_lpc_reg.h b/include/linux/mfd/cros_ec_lpc_reg.h
deleted file mode 100644
index 5560bef63c2b..000000000000
--- a/include/linux/mfd/cros_ec_lpc_reg.h
+++ /dev/null
@@ -1,61 +0,0 @@
-/*
- * cros_ec_lpc_reg - LPC access to the Chrome OS Embedded Controller
- *
- * Copyright (C) 2016 Google, Inc
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * This driver uses the Chrome OS EC byte-level message-based protocol for
- * communicating the keyboard state (which keys are pressed) from a keyboard EC
- * to the AP over some bus (such as i2c, lpc, spi). The EC does debouncing,
- * but everything else (including deghosting) is done here. The main
- * motivation for this is to keep the EC firmware as simple as possible, since
- * it cannot be easily upgraded and EC flash/IRAM space is relatively
- * expensive.
- */
-
-#ifndef __LINUX_MFD_CROS_EC_REG_H
-#define __LINUX_MFD_CROS_EC_REG_H
-
-/**
- * cros_ec_lpc_read_bytes - Read bytes from a given LPC-mapped address.
- * Returns 8-bit checksum of all bytes read.
- *
- * @offset: Base read address
- * @length: Number of bytes to read
- * @dest: Destination buffer
- */
-u8 cros_ec_lpc_read_bytes(unsigned int offset, unsigned int length, u8 *dest);
-
-/**
- * cros_ec_lpc_write_bytes - Write bytes to a given LPC-mapped address.
- * Returns 8-bit checksum of all bytes written.
- *
- * @offset: Base write address
- * @length: Number of bytes to write
- * @msg: Write data buffer
- */
-u8 cros_ec_lpc_write_bytes(unsigned int offset, unsigned int length, u8 *msg);
-
-/**
- * cros_ec_lpc_reg_init
- *
- * Initialize register I/O.
- */
-void cros_ec_lpc_reg_init(void);
-
-/**
- * cros_ec_lpc_reg_destroy
- *
- * Cleanup reg I/O.
- */
-void cros_ec_lpc_reg_destroy(void);
-
-#endif /* __LINUX_MFD_CROS_EC_REG_H */
diff --git a/include/linux/mfd/da8xx-cfgchip.h b/include/linux/mfd/da8xx-cfgchip.h
index 304985e288d2..93bbfc2c1d54 100644
--- a/include/linux/mfd/da8xx-cfgchip.h
+++ b/include/linux/mfd/da8xx-cfgchip.h
@@ -1,17 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* TI DaVinci DA8xx CHIPCFGx registers for syscon consumers.
*
* Copyright (C) 2016 David Lechner <david@lechnology.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#ifndef __LINUX_MFD_DA8XX_CFGCHIP_H
diff --git a/include/linux/mfd/da903x.h b/include/linux/mfd/da903x.h
index 0aa3a1a49ee3..d1c57b8dbba4 100644
--- a/include/linux/mfd/da903x.h
+++ b/include/linux/mfd/da903x.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __LINUX_PMIC_DA903X_H
#define __LINUX_PMIC_DA903X_H
diff --git a/include/linux/mfd/da9052/da9052.h b/include/linux/mfd/da9052/da9052.h
index ce9230af09c2..76feb3a7066d 100644
--- a/include/linux/mfd/da9052/da9052.h
+++ b/include/linux/mfd/da9052/da9052.h
@@ -1,24 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* da9052 declarations for DA9052 PMICs.
*
* Copyright(c) 2011 Dialog Semiconductor Ltd.
*
* Author: David Dajun Chen <dchen@diasemi.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
*/
#ifndef __MFD_DA9052_DA9052_H
@@ -45,6 +31,12 @@
#define DA9052_ADC_TJUNC 8
#define DA9052_ADC_VBBAT 9
+/* TSI channel has its own 4 channel mux */
+#define DA9052_ADC_TSI_XP 70
+#define DA9052_ADC_TSI_XN 71
+#define DA9052_ADC_TSI_YP 72
+#define DA9052_ADC_TSI_YN 73
+
#define DA9052_IRQ_DCIN 0
#define DA9052_IRQ_VBUS 1
#define DA9052_IRQ_DCINREM 2
diff --git a/include/linux/mfd/da9052/pdata.h b/include/linux/mfd/da9052/pdata.h
index 62c5c3c2992e..60fcab32542d 100644
--- a/include/linux/mfd/da9052/pdata.h
+++ b/include/linux/mfd/da9052/pdata.h
@@ -1,24 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Platform data declarations for DA9052 PMICs.
*
* Copyright(c) 2011 Dialog Semiconductor Ltd.
*
* Author: David Dajun Chen <dchen@diasemi.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
*/
#ifndef __MFD_DA9052_PDATA_H__
diff --git a/include/linux/mfd/da9052/reg.h b/include/linux/mfd/da9052/reg.h
index 5010f978725c..752b20b16dc3 100644
--- a/include/linux/mfd/da9052/reg.h
+++ b/include/linux/mfd/da9052/reg.h
@@ -1,24 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Register declarations for DA9052 PMICs.
*
* Copyright(c) 2011 Dialog Semiconductor Ltd.
*
* Author: David Dajun Chen <dchen@diasemi.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
*/
#ifndef __LINUX_MFD_DA9052_REG_H
@@ -690,7 +676,10 @@
/* TSI CONTROL REGISTER B BITS */
#define DA9052_TSICONTB_ADCREF 0X80
#define DA9052_TSICONTB_TSIMAN 0X40
-#define DA9052_TSICONTB_TSIMUX 0X30
+#define DA9052_TSICONTB_TSIMUX_XP 0X00
+#define DA9052_TSICONTB_TSIMUX_YP 0X10
+#define DA9052_TSICONTB_TSIMUX_XN 0X20
+#define DA9052_TSICONTB_TSIMUX_YN 0X30
#define DA9052_TSICONTB_TSISEL3 0X08
#define DA9052_TSICONTB_TSISEL2 0X04
#define DA9052_TSICONTB_TSISEL1 0X02
@@ -705,8 +694,14 @@
/* TSI CO-ORDINATE LSB RESULT REGISTER BITS */
#define DA9052_TSILSB_PENDOWN 0X40
#define DA9052_TSILSB_TSIZL 0X30
+#define DA9052_TSILSB_TSIZL_SHIFT 4
+#define DA9052_TSILSB_TSIZL_BITS 2
#define DA9052_TSILSB_TSIYL 0X0C
+#define DA9052_TSILSB_TSIYL_SHIFT 2
+#define DA9052_TSILSB_TSIYL_BITS 2
#define DA9052_TSILSB_TSIXL 0X03
+#define DA9052_TSILSB_TSIXL_SHIFT 0
+#define DA9052_TSILSB_TSIXL_BITS 2
/* TSI Z MEASUREMENT MSB RESULT REGISTER BIT */
#define DA9052_TSIZMSB_TSIZM 0XFF
diff --git a/include/linux/mfd/da9055/core.h b/include/linux/mfd/da9055/core.h
index 5dc743fd63a6..a96eba52c4d6 100644
--- a/include/linux/mfd/da9055/core.h
+++ b/include/linux/mfd/da9055/core.h
@@ -1,24 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* da9055 declarations for DA9055 PMICs.
*
* Copyright(c) 2012 Dialog Semiconductor Ltd.
*
* Author: David Dajun Chen <dchen@diasemi.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
*/
#ifndef __DA9055_CORE_H
diff --git a/include/linux/mfd/da9055/pdata.h b/include/linux/mfd/da9055/pdata.h
index 04e092be4b07..d3f126990ad0 100644
--- a/include/linux/mfd/da9055/pdata.h
+++ b/include/linux/mfd/da9055/pdata.h
@@ -1,10 +1,5 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/* Copyright (C) 2012 Dialog Semiconductor Ltd.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
*/
#ifndef __DA9055_PDATA_H
#define __DA9055_PDATA_H
@@ -12,6 +7,7 @@
#define DA9055_MAX_REGULATORS 8
struct da9055;
+struct gpio_desc;
enum gpio_select {
NO_GPIO = 0,
@@ -39,7 +35,7 @@ struct da9055_pdata {
int *gpio_rsel;
/*
* Regulator mode control bits value (GPI offset) that
- * that controls the regulator state, 0 if not available.
+ * controls the regulator state, 0 if not available.
*/
enum gpio_select *reg_ren;
/*
@@ -47,7 +43,7 @@ struct da9055_pdata {
* controls the regulator set A/B, 0 if not available.
*/
enum gpio_select *reg_rsel;
- /* GPIOs to enable regulator, 0 if not available */
- int *ena_gpio;
+ /* GPIO descriptors to enable regulator, NULL if not available */
+ struct gpio_desc **ena_gpiods;
};
#endif /* __DA9055_PDATA_H */
diff --git a/include/linux/mfd/da9055/reg.h b/include/linux/mfd/da9055/reg.h
index 2b592e072dbf..54a717b6c3de 100644
--- a/include/linux/mfd/da9055/reg.h
+++ b/include/linux/mfd/da9055/reg.h
@@ -1,24 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* DA9055 declarations for DA9055 PMICs.
*
* Copyright(c) 2012 Dialog Semiconductor Ltd.
*
* Author: David Dajun Chen <dchen@diasemi.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
*/
#ifndef __DA9055_REG_H
diff --git a/include/linux/mfd/da9062/core.h b/include/linux/mfd/da9062/core.h
index 74d33a01ddae..ea0c670992de 100644
--- a/include/linux/mfd/da9062/core.h
+++ b/include/linux/mfd/da9062/core.h
@@ -1,15 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Copyright (C) 2015-2017 Dialog Semiconductor
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version 2
- * of the License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#ifndef __MFD_DA9062_CORE_H__
diff --git a/include/linux/mfd/da9062/registers.h b/include/linux/mfd/da9062/registers.h
index 18d576aed902..2906bf6160fb 100644
--- a/include/linux/mfd/da9062/registers.h
+++ b/include/linux/mfd/da9062/registers.h
@@ -1,15 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Copyright (C) 2015-2017 Dialog Semiconductor
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version 2
- * of the License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#ifndef __DA9062_H__
@@ -806,6 +797,9 @@
#define DA9062AA_BUCK3_SL_A_SHIFT 7
#define DA9062AA_BUCK3_SL_A_MASK BIT(7)
+/* DA9062AA_VLDO[1-4]_A common */
+#define DA9062AA_VLDO_A_MIN_SEL 2
+
/* DA9062AA_VLDO1_A = 0x0A9 */
#define DA9062AA_VLDO1_A_SHIFT 0
#define DA9062AA_VLDO1_A_MASK 0x3f
diff --git a/include/linux/mfd/da9063/core.h b/include/linux/mfd/da9063/core.h
index f3ae65db4c86..8db52324f416 100644
--- a/include/linux/mfd/da9063/core.h
+++ b/include/linux/mfd/da9063/core.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* Definitions for DA9063 MFD driver
*
@@ -5,12 +6,6 @@
*
* Author: Michal Hajduk, Dialog Semiconductor
* Author: Krystian Garbaciak, Dialog Semiconductor
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
*/
#ifndef __MFD_DA9063_CORE_H__
@@ -29,14 +24,19 @@
#define DA9063_DRVNAME_RTC "da9063-rtc"
#define DA9063_DRVNAME_VIBRATION "da9063-vibration"
-enum da9063_models {
- PMIC_DA9063 = 0x61,
+#define PMIC_CHIP_ID_DA9063 0x61
+
+enum da9063_type {
+ PMIC_TYPE_DA9063 = 0,
+ PMIC_TYPE_DA9063L,
};
enum da9063_variant_codes {
PMIC_DA9063_AD = 0x3,
PMIC_DA9063_BB = 0x5,
PMIC_DA9063_CA = 0x6,
+ PMIC_DA9063_DA = 0x7,
+ PMIC_DA9063_EA = 0x8,
};
/* Interrupts */
@@ -72,13 +72,10 @@ enum da9063_irqs {
DA9063_IRQ_GPI15,
};
-#define DA9063_IRQ_BASE_OFFSET 0
-#define DA9063_NUM_IRQ (DA9063_IRQ_GPI15 + 1 - DA9063_IRQ_BASE_OFFSET)
-
struct da9063 {
/* Device */
struct device *dev;
- unsigned short model;
+ enum da9063_type type;
unsigned char variant_code;
unsigned int flags;
@@ -94,7 +91,4 @@ struct da9063 {
int da9063_device_init(struct da9063 *da9063, unsigned int irq);
int da9063_irq_init(struct da9063 *da9063);
-void da9063_device_exit(struct da9063 *da9063);
-void da9063_irq_exit(struct da9063 *da9063);
-
#endif /* __MFD_DA9063_CORE_H__ */
diff --git a/include/linux/mfd/da9063/pdata.h b/include/linux/mfd/da9063/pdata.h
deleted file mode 100644
index 8a125701ef7b..000000000000
--- a/include/linux/mfd/da9063/pdata.h
+++ /dev/null
@@ -1,112 +0,0 @@
-/*
- * Platform configuration options for DA9063
- *
- * Copyright 2012 Dialog Semiconductor Ltd.
- *
- * Author: Michal Hajduk, Dialog Semiconductor
- * Author: Krystian Garbaciak, Dialog Semiconductor
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- */
-
-#ifndef __MFD_DA9063_PDATA_H__
-#define __MFD_DA9063_PDATA_H__
-
-#include <linux/regulator/machine.h>
-
-/*
- * Regulator configuration
- */
-/* DA9063 regulator IDs */
-enum {
- /* BUCKs */
- DA9063_ID_BCORE1,
- DA9063_ID_BCORE2,
- DA9063_ID_BPRO,
- DA9063_ID_BMEM,
- DA9063_ID_BIO,
- DA9063_ID_BPERI,
-
- /* BCORE1 and BCORE2 in merged mode */
- DA9063_ID_BCORES_MERGED,
- /* BMEM and BIO in merged mode */
- DA9063_ID_BMEM_BIO_MERGED,
- /* When two BUCKs are merged, they cannot be reused separately */
-
- /* LDOs */
- DA9063_ID_LDO1,
- DA9063_ID_LDO2,
- DA9063_ID_LDO3,
- DA9063_ID_LDO4,
- DA9063_ID_LDO5,
- DA9063_ID_LDO6,
- DA9063_ID_LDO7,
- DA9063_ID_LDO8,
- DA9063_ID_LDO9,
- DA9063_ID_LDO10,
- DA9063_ID_LDO11,
-};
-
-/* Regulators platform data */
-struct da9063_regulator_data {
- int id;
- struct regulator_init_data *initdata;
-};
-
-struct da9063_regulators_pdata {
- unsigned n_regulators;
- struct da9063_regulator_data *regulator_data;
-};
-
-
-/*
- * RGB LED configuration
- */
-/* LED IDs for flags in struct led_info. */
-enum {
- DA9063_GPIO11_LED,
- DA9063_GPIO14_LED,
- DA9063_GPIO15_LED,
-
- DA9063_LED_NUM
-};
-#define DA9063_LED_ID_MASK 0x3
-
-/* LED polarity for flags in struct led_info. */
-#define DA9063_LED_HIGH_LEVEL_ACTIVE 0x0
-#define DA9063_LED_LOW_LEVEL_ACTIVE 0x4
-
-
-/*
- * General PMIC configuration
- */
-/* HWMON ADC channels configuration */
-#define DA9063_FLG_FORCE_IN0_MANUAL_MODE 0x0010
-#define DA9063_FLG_FORCE_IN0_AUTO_MODE 0x0020
-#define DA9063_FLG_FORCE_IN1_MANUAL_MODE 0x0040
-#define DA9063_FLG_FORCE_IN1_AUTO_MODE 0x0080
-#define DA9063_FLG_FORCE_IN2_MANUAL_MODE 0x0100
-#define DA9063_FLG_FORCE_IN2_AUTO_MODE 0x0200
-#define DA9063_FLG_FORCE_IN3_MANUAL_MODE 0x0400
-#define DA9063_FLG_FORCE_IN3_AUTO_MODE 0x0800
-
-/* Disable register caching. */
-#define DA9063_FLG_NO_CACHE 0x0008
-
-struct da9063;
-
-/* DA9063 platform data */
-struct da9063_pdata {
- int (*init)(struct da9063 *da9063);
- int irq_base;
- bool key_power;
- unsigned flags;
- struct da9063_regulators_pdata *regulators_pdata;
- struct led_platform_data *leds_pdata;
-};
-
-#endif /* __MFD_DA9063_PDATA_H__ */
diff --git a/include/linux/mfd/da9063/registers.h b/include/linux/mfd/da9063/registers.h
index 5d42859cb441..7b8364bd08a0 100644
--- a/include/linux/mfd/da9063/registers.h
+++ b/include/linux/mfd/da9063/registers.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* Registers definition for DA9063 modules
*
@@ -5,12 +6,6 @@
*
* Author: Michal Hajduk, Dialog Semiconductor
* Author: Krystian Garbaciak, Dialog Semiconductor
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
*/
#ifndef _DA9063_REG_H
@@ -215,9 +210,9 @@
/* DA9063 Configuration registers */
/* OTP */
-#define DA9063_REG_OPT_COUNT 0x101
-#define DA9063_REG_OPT_ADDR 0x102
-#define DA9063_REG_OPT_DATA 0x103
+#define DA9063_REG_OTP_CONT 0x101
+#define DA9063_REG_OTP_ADDR 0x102
+#define DA9063_REG_OTP_DATA 0x103
/* Customer Trim and Configuration */
#define DA9063_REG_T_OFFSET 0x104
@@ -297,8 +292,10 @@
#define DA9063_BB_REG_GP_ID_19 0x134
/* Chip ID and variant */
-#define DA9063_REG_CHIP_ID 0x181
-#define DA9063_REG_CHIP_VARIANT 0x182
+#define DA9063_REG_DEVICE_ID 0x181
+#define DA9063_REG_VARIANT_ID 0x182
+#define DA9063_REG_CUSTOMER_ID 0x183
+#define DA9063_REG_CONFIG_ID 0x184
/*
* PMIC registers bits
@@ -934,9 +931,6 @@
#define DA9063_RTC_CLOCK 0x40
#define DA9063_OUT_32K_EN 0x80
-/* DA9063_REG_CHIP_VARIANT */
-#define DA9063_CHIP_VARIANT_SHIFT 4
-
/* DA9063_REG_BUCK_ILIM_A (addr=0x9A) */
#define DA9063_BIO_ILIM_MASK 0x0F
#define DA9063_BMEM_ILIM_MASK 0xF0
@@ -1043,6 +1037,32 @@
#define DA9063_NONKEY_PIN_AUTODOWN 0x02
#define DA9063_NONKEY_PIN_AUTOFLPRT 0x03
+/* DA9063_REG_CONFIG_J (addr=0x10F) */
+#define DA9063_TWOWIRE_TO 0x40
+
+/* DA9063_REG_MON_REG_2 (addr=0x115) */
+#define DA9063_LDO1_MON_EN 0x01
+#define DA9063_LDO2_MON_EN 0x02
+#define DA9063_LDO3_MON_EN 0x04
+#define DA9063_LDO4_MON_EN 0x08
+#define DA9063_LDO5_MON_EN 0x10
+#define DA9063_LDO6_MON_EN 0x20
+#define DA9063_LDO7_MON_EN 0x40
+#define DA9063_LDO8_MON_EN 0x80
+
+/* DA9063_REG_MON_REG_3 (addr=0x116) */
+#define DA9063_LDO9_MON_EN 0x01
+#define DA9063_LDO10_MON_EN 0x02
+#define DA9063_LDO11_MON_EN 0x04
+
+/* DA9063_REG_MON_REG_4 (addr=0x117) */
+#define DA9063_BCORE1_MON_EN 0x04
+#define DA9063_BCORE2_MON_EN 0x08
+#define DA9063_BPRO_MON_EN 0x10
+#define DA9063_BIO_MON_EN 0x20
+#define DA9063_BMEM_MON_EN 0x40
+#define DA9063_BPERI_MON_EN 0x80
+
/* DA9063_REG_MON_REG_5 (addr=0x116) */
#define DA9063_MON_A8_IDX_MASK 0x07
#define DA9063_MON_A8_IDX_NONE 0x00
@@ -1070,4 +1090,10 @@
#define DA9063_MON_A10_IDX_LDO9 0x04
#define DA9063_MON_A10_IDX_LDO10 0x05
+/* DA9063_REG_VARIANT_ID (addr=0x182) */
+#define DA9063_VARIANT_ID_VRC_SHIFT 0
+#define DA9063_VARIANT_ID_VRC_MASK 0x0F
+#define DA9063_VARIANT_ID_MRC_SHIFT 4
+#define DA9063_VARIANT_ID_MRC_MASK 0xF0
+
#endif /* _DA9063_REG_H */
diff --git a/include/linux/mfd/da9150/core.h b/include/linux/mfd/da9150/core.h
index 1bf50caeb9fa..d116d5f3ef56 100644
--- a/include/linux/mfd/da9150/core.h
+++ b/include/linux/mfd/da9150/core.h
@@ -1,14 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* DA9150 MFD Driver - Core Data
*
* Copyright (c) 2014 Dialog Semiconductor
*
* Author: Adam Thomson <Adam.Thomson.Opensource@diasemi.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
*/
#ifndef __DA9150_CORE_H
diff --git a/include/linux/mfd/da9150/registers.h b/include/linux/mfd/da9150/registers.h
index 27ca6ee4d840..1fd8f5968817 100644
--- a/include/linux/mfd/da9150/registers.h
+++ b/include/linux/mfd/da9150/registers.h
@@ -1,14 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* DA9150 MFD Driver - Registers
*
* Copyright (c) 2014 Dialog Semiconductor
*
* Author: Adam Thomson <Adam.Thomson.Opensource@diasemi.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
*/
#ifndef __DA9150_REGISTERS_H
diff --git a/include/linux/mfd/davinci_voicecodec.h b/include/linux/mfd/davinci_voicecodec.h
index 2c0127cb06c5..556375b91316 100644
--- a/include/linux/mfd/davinci_voicecodec.h
+++ b/include/linux/mfd/davinci_voicecodec.h
@@ -1,23 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* DaVinci Voice Codec Core Interface for TI platforms
*
* Copyright (C) 2010 Texas Instruments, Inc
*
* Author: Miguel Aguilar <miguel.aguilar@ridgerun.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#ifndef __LINUX_MFD_DAVINCI_VOICECODEC_H_
diff --git a/include/linux/mfd/db8500-prcmu.h b/include/linux/mfd/db8500-prcmu.h
index 7ba67b55b312..a62de3d155ed 100644
--- a/include/linux/mfd/db8500-prcmu.h
+++ b/include/linux/mfd/db8500-prcmu.h
@@ -1,8 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) STMicroelectronics 2009
* Copyright (C) ST-Ericsson SA 2010
*
- * License Terms: GNU General Public License v2
* Author: Kumar Sanghvi <kumar.sanghvi@stericsson.com>
*
* PRCMU f/w APIs
@@ -489,7 +489,7 @@ struct prcmu_auto_pm_config {
#ifdef CONFIG_MFD_DB8500_PRCMU
-void db8500_prcmu_early_init(u32 phy_base, u32 size);
+void db8500_prcmu_early_init(void);
int prcmu_set_rc_a2p(enum romcode_write);
enum romcode_read prcmu_get_rc_p2a(void);
enum ap_pwrst prcmu_get_xp70_current_state(void);
@@ -525,9 +525,6 @@ u8 db8500_prcmu_get_power_state_result(void);
void db8500_prcmu_enable_wakeups(u32 wakeups);
int db8500_prcmu_set_epod(u16 epod_id, u8 epod_state);
int db8500_prcmu_request_clock(u8 clock, bool enable);
-int db8500_prcmu_set_display_clocks(void);
-int db8500_prcmu_disable_dsipll(void);
-int db8500_prcmu_enable_dsipll(void);
void db8500_prcmu_config_abb_event_readout(u32 abb_events);
void db8500_prcmu_get_abb_event_buffer(void __iomem **buf);
int db8500_prcmu_config_esram0_deep_sleep(u8 state);
@@ -546,7 +543,7 @@ void db8500_prcmu_write_masked(unsigned int reg, u32 mask, u32 value);
#else /* !CONFIG_MFD_DB8500_PRCMU */
-static inline void db8500_prcmu_early_init(u32 phy_base, u32 size) {}
+static inline void db8500_prcmu_early_init(void) {}
static inline int prcmu_set_rc_a2p(enum romcode_write code)
{
@@ -682,21 +679,6 @@ static inline int db8500_prcmu_request_clock(u8 clock, bool enable)
return 0;
}
-static inline int db8500_prcmu_set_display_clocks(void)
-{
- return 0;
-}
-
-static inline int db8500_prcmu_disable_dsipll(void)
-{
- return 0;
-}
-
-static inline int db8500_prcmu_enable_dsipll(void)
-{
- return 0;
-}
-
static inline int db8500_prcmu_config_esram0_deep_sleep(u8 state)
{
return 0;
@@ -738,7 +720,7 @@ static inline int db8500_prcmu_load_a9wdog(u8 id, u32 val)
static inline bool db8500_prcmu_is_ac_wake_requested(void)
{
- return 0;
+ return false;
}
static inline int db8500_prcmu_set_arm_opp(u8 opp)
diff --git a/include/linux/mfd/dbx500-prcmu.h b/include/linux/mfd/dbx500-prcmu.h
index 2e2c6a63a065..e7a7e70fdb38 100644
--- a/include/linux/mfd/dbx500-prcmu.h
+++ b/include/linux/mfd/dbx500-prcmu.h
@@ -1,8 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) ST Ericsson SA 2011
*
- * License Terms: GNU General Public License v2
- *
* STE Ux500 PRCMU API
*/
#ifndef __MACH_PRCMU_H
@@ -187,10 +186,12 @@ enum ddr_pwrst {
#define PRCMU_FW_PROJECT_U8500_C3 8
#define PRCMU_FW_PROJECT_U8500_C4 9
#define PRCMU_FW_PROJECT_U9500_MBL 10
-#define PRCMU_FW_PROJECT_U8500_MBL 11 /* Customer specific */
+#define PRCMU_FW_PROJECT_U8500_SSG1 11 /* Samsung specific */
#define PRCMU_FW_PROJECT_U8500_MBL2 12 /* Customer specific */
#define PRCMU_FW_PROJECT_U8520 13
#define PRCMU_FW_PROJECT_U8420 14
+#define PRCMU_FW_PROJECT_U8500_SSG2 15 /* Samsung specific */
+#define PRCMU_FW_PROJECT_U8420_SYSCLK 17
#define PRCMU_FW_PROJECT_A9420 20
/* [32..63] 9540 and derivatives */
#define PRCMU_FW_PROJECT_U9540 32
@@ -212,9 +213,9 @@ struct prcmu_fw_version {
#if defined(CONFIG_UX500_SOC_DB8500)
-static inline void prcmu_early_init(u32 phy_base, u32 size)
+static inline void prcmu_early_init(void)
{
- return db8500_prcmu_early_init(phy_base, size);
+ return db8500_prcmu_early_init();
}
static inline int prcmu_set_power_state(u8 state, bool keep_ulp_clk,
@@ -321,21 +322,6 @@ static inline bool prcmu_is_ac_wake_requested(void)
return db8500_prcmu_is_ac_wake_requested();
}
-static inline int prcmu_set_display_clocks(void)
-{
- return db8500_prcmu_set_display_clocks();
-}
-
-static inline int prcmu_disable_dsipll(void)
-{
- return db8500_prcmu_disable_dsipll();
-}
-
-static inline int prcmu_enable_dsipll(void)
-{
- return db8500_prcmu_enable_dsipll();
-}
-
static inline int prcmu_config_esram0_deep_sleep(u8 state)
{
return db8500_prcmu_config_esram0_deep_sleep(state);
@@ -402,7 +388,7 @@ static inline int prcmu_config_a9wdog(u8 num, bool sleep_auto_off)
}
#else
-static inline void prcmu_early_init(u32 phy_base, u32 size) {}
+static inline void prcmu_early_init(void) {}
static inline int prcmu_set_power_state(u8 state, bool keep_ulp_clk,
bool keep_ap_pll)
@@ -511,21 +497,6 @@ static inline bool prcmu_is_ac_wake_requested(void)
return false;
}
-static inline int prcmu_set_display_clocks(void)
-{
- return 0;
-}
-
-static inline int prcmu_disable_dsipll(void)
-{
- return 0;
-}
-
-static inline int prcmu_enable_dsipll(void)
-{
- return 0;
-}
-
static inline int prcmu_config_esram0_deep_sleep(u8 state)
{
return 0;
@@ -585,31 +556,11 @@ static inline void prcmu_clear(unsigned int reg, u32 bits)
#define PRCMU_QOS_ARM_OPP 3
#define PRCMU_QOS_DEFAULT_VALUE -1
-#ifdef CONFIG_DBX500_PRCMU_QOS_POWER
-
-unsigned long prcmu_qos_get_cpufreq_opp_delay(void);
-void prcmu_qos_set_cpufreq_opp_delay(unsigned long);
-void prcmu_qos_force_opp(int, s32);
-int prcmu_qos_requirement(int pm_qos_class);
-int prcmu_qos_add_requirement(int pm_qos_class, char *name, s32 value);
-int prcmu_qos_update_requirement(int pm_qos_class, char *name, s32 new_value);
-void prcmu_qos_remove_requirement(int pm_qos_class, char *name);
-int prcmu_qos_add_notifier(int prcmu_qos_class,
- struct notifier_block *notifier);
-int prcmu_qos_remove_notifier(int prcmu_qos_class,
- struct notifier_block *notifier);
-
-#else
-
static inline unsigned long prcmu_qos_get_cpufreq_opp_delay(void)
{
return 0;
}
-static inline void prcmu_qos_set_cpufreq_opp_delay(unsigned long n) {}
-
-static inline void prcmu_qos_force_opp(int prcmu_qos_class, s32 i) {}
-
static inline int prcmu_qos_requirement(int prcmu_qos_class)
{
return 0;
@@ -642,6 +593,4 @@ static inline int prcmu_qos_remove_notifier(int prcmu_qos_class,
return 0;
}
-#endif
-
#endif /* __MACH_PRCMU_H */
diff --git a/include/linux/mfd/dln2.h b/include/linux/mfd/dln2.h
index 004b24576da8..4cade9aa8edb 100644
--- a/include/linux/mfd/dln2.h
+++ b/include/linux/mfd/dln2.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __LINUX_USB_DLN2_H
#define __LINUX_USB_DLN2_H
diff --git a/include/linux/mfd/ds1wm.h b/include/linux/mfd/ds1wm.h
index 38a372a0e285..43dfca1c9702 100644
--- a/include/linux/mfd/ds1wm.h
+++ b/include/linux/mfd/ds1wm.h
@@ -1,13 +1,29 @@
-/* MFD cell driver data for the DS1WM driver */
+/* SPDX-License-Identifier: GPL-2.0 */
+/* MFD cell driver data for the DS1WM driver
+ *
+ * to be defined in the MFD device that is
+ * using this driver for one of his sub devices
+ */
struct ds1wm_driver_data {
int active_high;
int clock_rate;
- /* in milliseconds, the amount of time to */
- /* sleep following a reset pulse. Zero */
- /* should work if your bus devices recover*/
- /* time respects the 1-wire spec since the*/
- /* ds1wm implements the precise timings of*/
- /* a reset pulse/presence detect sequence.*/
+ /* in milliseconds, the amount of time to
+ * sleep following a reset pulse. Zero
+ * should work if your bus devices recover
+ * time respects the 1-wire spec since the
+ * ds1wm implements the precise timings of
+ * a reset pulse/presence detect sequence.
+ */
unsigned int reset_recover_delay;
+
+ /* Say 1 here for big endian Hardware
+ * (only relevant with bus-shift > 0
+ */
+ bool is_hw_big_endian;
+
+ /* left shift of register number to get register address offsett.
+ * Only 0,1,2 allowed for 8,16 or 32 bit bus width respectively
+ */
+ unsigned int bus_shift;
};
diff --git a/include/linux/mfd/ezx-pcap.h b/include/linux/mfd/ezx-pcap.h
index 32a1b5cfeba1..ffde195e12b7 100644
--- a/include/linux/mfd/ezx-pcap.h
+++ b/include/linux/mfd/ezx-pcap.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright 2009 Daniel Ribeiro <drwyrm@gmail.com>
*
diff --git a/include/linux/mfd/gsc.h b/include/linux/mfd/gsc.h
new file mode 100644
index 000000000000..6bd639c285b4
--- /dev/null
+++ b/include/linux/mfd/gsc.h
@@ -0,0 +1,76 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright (C) 2020 Gateworks Corporation
+ */
+#ifndef __LINUX_MFD_GSC_H_
+#define __LINUX_MFD_GSC_H_
+
+#include <linux/regmap.h>
+
+/* Device Addresses */
+#define GSC_MISC 0x20
+#define GSC_UPDATE 0x21
+#define GSC_GPIO 0x23
+#define GSC_HWMON 0x29
+#define GSC_EEPROM0 0x50
+#define GSC_EEPROM1 0x51
+#define GSC_EEPROM2 0x52
+#define GSC_EEPROM3 0x53
+#define GSC_RTC 0x68
+
+/* Register offsets */
+enum {
+ GSC_CTRL_0 = 0x00,
+ GSC_CTRL_1 = 0x01,
+ GSC_TIME = 0x02,
+ GSC_TIME_ADD = 0x06,
+ GSC_IRQ_STATUS = 0x0A,
+ GSC_IRQ_ENABLE = 0x0B,
+ GSC_FW_CRC = 0x0C,
+ GSC_FW_VER = 0x0E,
+ GSC_WP = 0x0F,
+};
+
+/* Bit definitions */
+#define GSC_CTRL_0_PB_HARD_RESET 0
+#define GSC_CTRL_0_PB_CLEAR_SECURE_KEY 1
+#define GSC_CTRL_0_PB_SOFT_POWER_DOWN 2
+#define GSC_CTRL_0_PB_BOOT_ALTERNATE 3
+#define GSC_CTRL_0_PERFORM_CRC 4
+#define GSC_CTRL_0_TAMPER_DETECT 5
+#define GSC_CTRL_0_SWITCH_HOLD 6
+
+#define GSC_CTRL_1_SLEEP_ENABLE 0
+#define GSC_CTRL_1_SLEEP_ACTIVATE 1
+#define GSC_CTRL_1_SLEEP_ADD 2
+#define GSC_CTRL_1_SLEEP_NOWAKEPB 3
+#define GSC_CTRL_1_WDT_TIME 4
+#define GSC_CTRL_1_WDT_ENABLE 5
+#define GSC_CTRL_1_SWITCH_BOOT_ENABLE 6
+#define GSC_CTRL_1_SWITCH_BOOT_CLEAR 7
+
+#define GSC_IRQ_PB 0
+#define GSC_IRQ_KEY_ERASED 1
+#define GSC_IRQ_EEPROM_WP 2
+#define GSC_IRQ_RESV 3
+#define GSC_IRQ_GPIO 4
+#define GSC_IRQ_TAMPER 5
+#define GSC_IRQ_WDT_TIMEOUT 6
+#define GSC_IRQ_SWITCH_HOLD 7
+
+int gsc_read(void *context, unsigned int reg, unsigned int *val);
+int gsc_write(void *context, unsigned int reg, unsigned int val);
+
+struct gsc_dev {
+ struct device *dev;
+
+ struct i2c_client *i2c; /* 0x20: interrupt controller, WDT */
+ struct i2c_client *i2c_hwmon; /* 0x29: hwmon, fan controller */
+
+ struct regmap *regmap;
+
+ unsigned int fwver;
+ unsigned short fwcrc;
+};
+
+#endif /* __LINUX_MFD_GSC_H_ */
diff --git a/include/linux/mfd/hi6421-pmic.h b/include/linux/mfd/hi6421-pmic.h
index 587273e35acf..2cadf8897c64 100644
--- a/include/linux/mfd/hi6421-pmic.h
+++ b/include/linux/mfd/hi6421-pmic.h
@@ -1,16 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Header file for device driver Hi6421 PMIC
*
* Copyright (c) <2011-2014> HiSilicon Technologies Co., Ltd.
* http://www.hisilicon.com
* Copyright (c) <2013-2014> Linaro Ltd.
- * http://www.linaro.org
+ * https://www.linaro.org
*
* Author: Guodong Xu <guodong.xu@linaro.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef __HI6421_PMIC_H
@@ -38,4 +35,9 @@ struct hi6421_pmic {
struct regmap *regmap;
};
+enum hi6421_type {
+ HI6421 = 0,
+ HI6421_V530,
+};
+
#endif /* __HI6421_PMIC_H */
diff --git a/include/linux/mfd/hi655x-pmic.h b/include/linux/mfd/hi655x-pmic.h
index 62f03c2b1bb0..6a012784dd1b 100644
--- a/include/linux/mfd/hi655x-pmic.h
+++ b/include/linux/mfd/hi655x-pmic.h
@@ -1,20 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Device driver for regulators in hi655x IC
*
- * Copyright (c) 2016 Hisilicon.
+ * Copyright (c) 2016 HiSilicon Ltd.
*
* Authors:
* Chen Feng <puck.chen@hisilicon.com>
* Fei Wang <w.f@huawei.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef __HI655X_PMIC_H
#define __HI655X_PMIC_H
+#include <linux/gpio/consumer.h>
+
/* Hi655x registers are mapped to memory bus in 4 bytes stride */
#define HI655X_STRIDE 4
#define HI655X_BUS_ADDR(x) ((x) << 2)
@@ -56,7 +55,7 @@ struct hi655x_pmic {
struct resource *res;
struct device *dev;
struct regmap *regmap;
- int gpio;
+ struct gpio_desc *gpio;
unsigned int ver;
struct regmap_irq_chip_data *irq_data;
};
diff --git a/include/linux/mfd/htc-pasic3.h b/include/linux/mfd/htc-pasic3.h
deleted file mode 100644
index 3d3ed67bd969..000000000000
--- a/include/linux/mfd/htc-pasic3.h
+++ /dev/null
@@ -1,54 +0,0 @@
-/*
- * HTC PASIC3 driver - LEDs and DS1WM
- *
- * Copyright (c) 2007 Philipp Zabel <philipp.zabel@gmail.com>
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file COPYING in the main directory of this archive for
- * more details.
- *
- */
-
-#ifndef __PASIC3_H
-#define __PASIC3_H
-
-#include <linux/platform_device.h>
-#include <linux/leds.h>
-
-extern void pasic3_write_register(struct device *dev, u32 reg, u8 val);
-extern u8 pasic3_read_register(struct device *dev, u32 reg);
-
-/*
- * mask for registers 0x20,0x21,0x22
- */
-#define PASIC3_MASK_LED0 0x04
-#define PASIC3_MASK_LED1 0x08
-#define PASIC3_MASK_LED2 0x40
-
-/*
- * bits in register 0x06
- */
-#define PASIC3_BIT2_LED0 0x08
-#define PASIC3_BIT2_LED1 0x10
-#define PASIC3_BIT2_LED2 0x20
-
-struct pasic3_led {
- struct led_classdev led;
- unsigned int hw_num;
- unsigned int bit2;
- unsigned int mask;
- struct pasic3_leds_machinfo *pdata;
-};
-
-struct pasic3_leds_machinfo {
- unsigned int num_leds;
- unsigned int power_gpio;
- struct pasic3_led *leds;
-};
-
-struct pasic3_platform_data {
- struct pasic3_leds_machinfo *led_pdata;
- unsigned int clock_rate;
-};
-
-#endif
diff --git a/include/linux/mfd/idt82p33_reg.h b/include/linux/mfd/idt82p33_reg.h
new file mode 100644
index 000000000000..1db532feeb91
--- /dev/null
+++ b/include/linux/mfd/idt82p33_reg.h
@@ -0,0 +1,115 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Register Map - Based on AN888_SMUforIEEE_SynchEther_82P33xxx_RevH.pdf
+ *
+ * Copyright (C) 2021 Integrated Device Technology, Inc., a Renesas Company.
+ */
+#ifndef HAVE_IDT82P33_REG
+#define HAVE_IDT82P33_REG
+
+#define REG_ADDR(page, offset) (((page) << 0x7) | ((offset) & 0x7f))
+
+/* Register address */
+#define DPLL1_TOD_CNFG 0x134
+#define DPLL2_TOD_CNFG 0x1B4
+
+#define DPLL1_TOD_STS 0x10B
+#define DPLL2_TOD_STS 0x18B
+
+#define DPLL1_TOD_TRIGGER 0x115
+#define DPLL2_TOD_TRIGGER 0x195
+
+#define DPLL1_OPERATING_MODE_CNFG 0x120
+#define DPLL2_OPERATING_MODE_CNFG 0x1A0
+
+#define DPLL1_HOLDOVER_FREQ_CNFG 0x12C
+#define DPLL2_HOLDOVER_FREQ_CNFG 0x1AC
+
+#define DPLL1_PHASE_OFFSET_CNFG 0x143
+#define DPLL2_PHASE_OFFSET_CNFG 0x1C3
+
+#define DPLL1_SYNC_EDGE_CNFG 0x140
+#define DPLL2_SYNC_EDGE_CNFG 0x1C0
+
+#define DPLL1_INPUT_MODE_CNFG 0x116
+#define DPLL2_INPUT_MODE_CNFG 0x196
+
+#define DPLL1_OPERATING_STS 0x102
+#define DPLL2_OPERATING_STS 0x182
+
+#define DPLL1_CURRENT_FREQ_STS 0x103
+#define DPLL2_CURRENT_FREQ_STS 0x183
+
+#define REG_SOFT_RESET 0X381
+
+#define OUT_MUX_CNFG(outn) REG_ADDR(0x6, (0xC * (outn)))
+#define TOD_TRIGGER(wr_trig, rd_trig) ((wr_trig & 0xf) << 4 | (rd_trig & 0xf))
+
+/* Register bit definitions */
+#define SYNC_TOD BIT(1)
+#define PH_OFFSET_EN BIT(7)
+#define SQUELCH_ENABLE BIT(5)
+
+/* Bit definitions for the DPLL_MODE register */
+#define PLL_MODE_SHIFT (0)
+#define PLL_MODE_MASK (0x1F)
+#define COMBO_MODE_EN BIT(5)
+#define COMBO_MODE_SHIFT (6)
+#define COMBO_MODE_MASK (0x3)
+
+/* Bit definitions for DPLL_OPERATING_STS register */
+#define OPERATING_STS_MASK (0x7)
+#define OPERATING_STS_SHIFT (0x0)
+
+/* Bit definitions for DPLL_TOD_TRIGGER register */
+#define READ_TRIGGER_MASK (0xF)
+#define READ_TRIGGER_SHIFT (0x0)
+#define WRITE_TRIGGER_MASK (0xF0)
+#define WRITE_TRIGGER_SHIFT (0x4)
+
+/* Bit definitions for REG_SOFT_RESET register */
+#define SOFT_RESET_EN BIT(7)
+
+enum pll_mode {
+ PLL_MODE_MIN = 0,
+ PLL_MODE_AUTOMATIC = PLL_MODE_MIN,
+ PLL_MODE_FORCE_FREERUN = 1,
+ PLL_MODE_FORCE_HOLDOVER = 2,
+ PLL_MODE_FORCE_LOCKED = 4,
+ PLL_MODE_FORCE_PRE_LOCKED2 = 5,
+ PLL_MODE_FORCE_PRE_LOCKED = 6,
+ PLL_MODE_FORCE_LOST_PHASE = 7,
+ PLL_MODE_DCO = 10,
+ PLL_MODE_WPH = 18,
+ PLL_MODE_MAX = PLL_MODE_WPH,
+};
+
+enum hw_tod_trig_sel {
+ HW_TOD_TRIG_SEL_MIN = 0,
+ HW_TOD_TRIG_SEL_NO_WRITE = HW_TOD_TRIG_SEL_MIN,
+ HW_TOD_TRIG_SEL_NO_READ = HW_TOD_TRIG_SEL_MIN,
+ HW_TOD_TRIG_SEL_SYNC_SEL = 1,
+ HW_TOD_TRIG_SEL_IN12 = 2,
+ HW_TOD_TRIG_SEL_IN13 = 3,
+ HW_TOD_TRIG_SEL_IN14 = 4,
+ HW_TOD_TRIG_SEL_TOD_PPS = 5,
+ HW_TOD_TRIG_SEL_TIMER_INTERVAL = 6,
+ HW_TOD_TRIG_SEL_MSB_PHASE_OFFSET_CNFG = 7,
+ HW_TOD_TRIG_SEL_MSB_HOLDOVER_FREQ_CNFG = 8,
+ HW_TOD_WR_TRIG_SEL_MSB_TOD_CNFG = 9,
+ HW_TOD_RD_TRIG_SEL_LSB_TOD_STS = HW_TOD_WR_TRIG_SEL_MSB_TOD_CNFG,
+ WR_TRIG_SEL_MAX = HW_TOD_WR_TRIG_SEL_MSB_TOD_CNFG,
+};
+
+/** @brief Enumerated type listing DPLL operational modes */
+enum dpll_state {
+ DPLL_STATE_FREERUN = 1,
+ DPLL_STATE_HOLDOVER = 2,
+ DPLL_STATE_LOCKED = 4,
+ DPLL_STATE_PRELOCKED2 = 5,
+ DPLL_STATE_PRELOCKED = 6,
+ DPLL_STATE_LOSTPHASE = 7,
+ DPLL_STATE_MAX
+};
+
+#endif
diff --git a/include/linux/mfd/idt8a340_reg.h b/include/linux/mfd/idt8a340_reg.h
new file mode 100644
index 000000000000..0c706085c205
--- /dev/null
+++ b/include/linux/mfd/idt8a340_reg.h
@@ -0,0 +1,768 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Based on 5.2.0, Family Programming Guide (Sept 30, 2020)
+ *
+ * Copyright (C) 2021 Integrated Device Technology, Inc., a Renesas Company.
+ */
+#ifndef HAVE_IDT8A340_REG
+#define HAVE_IDT8A340_REG
+
+#define PAGE_ADDR_BASE 0x0000
+#define PAGE_ADDR 0x00fc
+
+#define HW_REVISION 0x8180
+#define REV_ID 0x007a
+
+#define HW_DPLL_0 (0x8a00)
+#define HW_DPLL_1 (0x8b00)
+#define HW_DPLL_2 (0x8c00)
+#define HW_DPLL_3 (0x8d00)
+#define HW_DPLL_4 (0x8e00)
+#define HW_DPLL_5 (0x8f00)
+#define HW_DPLL_6 (0x9000)
+#define HW_DPLL_7 (0x9100)
+
+#define HW_DPLL_TOD_SW_TRIG_ADDR__0 (0x080)
+#define HW_DPLL_TOD_CTRL_1 (0x089)
+#define HW_DPLL_TOD_CTRL_2 (0x08A)
+#define HW_DPLL_TOD_OVR__0 (0x098)
+#define HW_DPLL_TOD_OUT_0__0 (0x0B0)
+
+#define HW_Q0_Q1_CH_SYNC_CTRL_0 (0xa740)
+#define HW_Q0_Q1_CH_SYNC_CTRL_1 (0xa741)
+#define HW_Q2_Q3_CH_SYNC_CTRL_0 (0xa742)
+#define HW_Q2_Q3_CH_SYNC_CTRL_1 (0xa743)
+#define HW_Q4_Q5_CH_SYNC_CTRL_0 (0xa744)
+#define HW_Q4_Q5_CH_SYNC_CTRL_1 (0xa745)
+#define HW_Q6_Q7_CH_SYNC_CTRL_0 (0xa746)
+#define HW_Q6_Q7_CH_SYNC_CTRL_1 (0xa747)
+#define HW_Q8_CH_SYNC_CTRL_0 (0xa748)
+#define HW_Q8_CH_SYNC_CTRL_1 (0xa749)
+#define HW_Q9_CH_SYNC_CTRL_0 (0xa74a)
+#define HW_Q9_CH_SYNC_CTRL_1 (0xa74b)
+#define HW_Q10_CH_SYNC_CTRL_0 (0xa74c)
+#define HW_Q10_CH_SYNC_CTRL_1 (0xa74d)
+#define HW_Q11_CH_SYNC_CTRL_0 (0xa74e)
+#define HW_Q11_CH_SYNC_CTRL_1 (0xa74f)
+
+#define SYNC_SOURCE_DPLL0_TOD_PPS 0x14
+#define SYNC_SOURCE_DPLL1_TOD_PPS 0x15
+#define SYNC_SOURCE_DPLL2_TOD_PPS 0x16
+#define SYNC_SOURCE_DPLL3_TOD_PPS 0x17
+
+#define SYNCTRL1_MASTER_SYNC_RST BIT(7)
+#define SYNCTRL1_MASTER_SYNC_TRIG BIT(5)
+#define SYNCTRL1_TOD_SYNC_TRIG BIT(4)
+#define SYNCTRL1_FBDIV_FRAME_SYNC_TRIG BIT(3)
+#define SYNCTRL1_FBDIV_SYNC_TRIG BIT(2)
+#define SYNCTRL1_Q1_DIV_SYNC_TRIG BIT(1)
+#define SYNCTRL1_Q0_DIV_SYNC_TRIG BIT(0)
+
+#define HW_Q8_CTRL_SPARE (0xa7d4)
+#define HW_Q11_CTRL_SPARE (0xa7ec)
+
+/**
+ * Select FOD5 as sync_trigger for Q8 divider.
+ * Transition from logic zero to one
+ * sets trigger to sync Q8 divider.
+ *
+ * Unused when FOD4 is driving Q8 divider (normal operation).
+ */
+#define Q9_TO_Q8_SYNC_TRIG BIT(1)
+
+/**
+ * Enable FOD5 as driver for clock and sync for Q8 divider.
+ * Enable fanout buffer for FOD5.
+ *
+ * Unused when FOD4 is driving Q8 divider (normal operation).
+ */
+#define Q9_TO_Q8_FANOUT_AND_CLOCK_SYNC_ENABLE_MASK (BIT(0) | BIT(2))
+
+/**
+ * Select FOD6 as sync_trigger for Q11 divider.
+ * Transition from logic zero to one
+ * sets trigger to sync Q11 divider.
+ *
+ * Unused when FOD7 is driving Q11 divider (normal operation).
+ */
+#define Q10_TO_Q11_SYNC_TRIG BIT(1)
+
+/**
+ * Enable FOD6 as driver for clock and sync for Q11 divider.
+ * Enable fanout buffer for FOD6.
+ *
+ * Unused when FOD7 is driving Q11 divider (normal operation).
+ */
+#define Q10_TO_Q11_FANOUT_AND_CLOCK_SYNC_ENABLE_MASK (BIT(0) | BIT(2))
+
+#define RESET_CTRL 0xc000
+#define SM_RESET 0x0012
+#define SM_RESET_V520 0x0013
+#define SM_RESET_CMD 0x5A
+
+#define GENERAL_STATUS 0xc014
+#define BOOT_STATUS 0x0000
+#define HW_REV_ID 0x000A
+#define BOND_ID 0x000B
+#define HW_CSR_ID 0x000C
+#define HW_IRQ_ID 0x000E
+#define MAJ_REL 0x0010
+#define MIN_REL 0x0011
+#define HOTFIX_REL 0x0012
+#define PIPELINE_ID 0x0014
+#define BUILD_ID 0x0018
+#define JTAG_DEVICE_ID 0x001c
+#define PRODUCT_ID 0x001e
+#define OTP_SCSR_CONFIG_SELECT 0x0022
+
+#define STATUS 0xc03c
+#define DPLL0_STATUS 0x0018
+#define DPLL1_STATUS 0x0019
+#define DPLL2_STATUS 0x001a
+#define DPLL3_STATUS 0x001b
+#define DPLL4_STATUS 0x001c
+#define DPLL5_STATUS 0x001d
+#define DPLL6_STATUS 0x001e
+#define DPLL7_STATUS 0x001f
+#define DPLL_SYS_STATUS 0x0020
+#define DPLL_SYS_APLL_STATUS 0x0021
+#define DPLL0_FILTER_STATUS 0x0044
+#define DPLL1_FILTER_STATUS 0x004c
+#define DPLL2_FILTER_STATUS 0x0054
+#define DPLL3_FILTER_STATUS 0x005c
+#define DPLL4_FILTER_STATUS 0x0064
+#define DPLL5_FILTER_STATUS 0x006c
+#define DPLL6_FILTER_STATUS 0x0074
+#define DPLL7_FILTER_STATUS 0x007c
+#define DPLLSYS_FILTER_STATUS 0x0084
+#define USER_GPIO0_TO_7_STATUS 0x008a
+#define USER_GPIO8_TO_15_STATUS 0x008b
+
+#define GPIO_USER_CONTROL 0xc160
+#define GPIO0_TO_7_OUT 0x0000
+#define GPIO8_TO_15_OUT 0x0001
+#define GPIO0_TO_7_OUT_V520 0x0002
+#define GPIO8_TO_15_OUT_V520 0x0003
+
+#define STICKY_STATUS_CLEAR 0xc164
+
+#define GPIO_TOD_NOTIFICATION_CLEAR 0xc16c
+
+#define ALERT_CFG 0xc188
+
+#define SYS_DPLL_XO 0xc194
+
+#define SYS_APLL 0xc19c
+
+#define INPUT_0 0xc1b0
+#define INPUT_1 0xc1c0
+#define INPUT_2 0xc1d0
+#define INPUT_3 0xc200
+#define INPUT_4 0xc210
+#define INPUT_5 0xc220
+#define INPUT_6 0xc230
+#define INPUT_7 0xc240
+#define INPUT_8 0xc250
+#define INPUT_9 0xc260
+#define INPUT_10 0xc280
+#define INPUT_11 0xc290
+#define INPUT_12 0xc2a0
+#define INPUT_13 0xc2b0
+#define INPUT_14 0xc2c0
+#define INPUT_15 0xc2d0
+
+#define REF_MON_0 0xc2e0
+#define REF_MON_1 0xc2ec
+#define REF_MON_2 0xc300
+#define REF_MON_3 0xc30c
+#define REF_MON_4 0xc318
+#define REF_MON_5 0xc324
+#define REF_MON_6 0xc330
+#define REF_MON_7 0xc33c
+#define REF_MON_8 0xc348
+#define REF_MON_9 0xc354
+#define REF_MON_10 0xc360
+#define REF_MON_11 0xc36c
+#define REF_MON_12 0xc380
+#define REF_MON_13 0xc38c
+#define REF_MON_14 0xc398
+#define REF_MON_15 0xc3a4
+
+#define DPLL_0 0xc3b0
+#define DPLL_CTRL_REG_0 0x0002
+#define DPLL_CTRL_REG_1 0x0003
+#define DPLL_CTRL_REG_2 0x0004
+#define DPLL_TOD_SYNC_CFG 0x0031
+#define DPLL_COMBO_SLAVE_CFG_0 0x0032
+#define DPLL_COMBO_SLAVE_CFG_1 0x0033
+#define DPLL_SLAVE_REF_CFG 0x0034
+#define DPLL_REF_MODE 0x0035
+#define DPLL_PHASE_MEASUREMENT_CFG 0x0036
+#define DPLL_MODE 0x0037
+#define DPLL_MODE_V520 0x003B
+#define DPLL_1 0xc400
+#define DPLL_2 0xc438
+#define DPLL_2_V520 0xc43c
+#define DPLL_3 0xc480
+#define DPLL_4 0xc4b8
+#define DPLL_4_V520 0xc4bc
+#define DPLL_5 0xc500
+#define DPLL_6 0xc538
+#define DPLL_6_V520 0xc53c
+#define DPLL_7 0xc580
+#define SYS_DPLL 0xc5b8
+#define SYS_DPLL_V520 0xc5bc
+
+#define DPLL_CTRL_0 0xc600
+#define DPLL_CTRL_DPLL_MANU_REF_CFG 0x0001
+#define DPLL_CTRL_DPLL_FOD_FREQ 0x001c
+#define DPLL_CTRL_COMBO_MASTER_CFG 0x003a
+#define DPLL_CTRL_1 0xc63c
+#define DPLL_CTRL_2 0xc680
+#define DPLL_CTRL_3 0xc6bc
+#define DPLL_CTRL_4 0xc700
+#define DPLL_CTRL_5 0xc73c
+#define DPLL_CTRL_6 0xc780
+#define DPLL_CTRL_7 0xc7bc
+#define SYS_DPLL_CTRL 0xc800
+
+#define DPLL_PHASE_0 0xc818
+/* Signed 42-bit FFO in units of 2^(-53) */
+#define DPLL_WR_PHASE 0x0000
+#define DPLL_PHASE_1 0xc81c
+#define DPLL_PHASE_2 0xc820
+#define DPLL_PHASE_3 0xc824
+#define DPLL_PHASE_4 0xc828
+#define DPLL_PHASE_5 0xc82c
+#define DPLL_PHASE_6 0xc830
+#define DPLL_PHASE_7 0xc834
+
+#define DPLL_FREQ_0 0xc838
+/* Signed 42-bit FFO in units of 2^(-53) */
+#define DPLL_WR_FREQ 0x0000
+#define DPLL_FREQ_1 0xc840
+#define DPLL_FREQ_2 0xc848
+#define DPLL_FREQ_3 0xc850
+#define DPLL_FREQ_4 0xc858
+#define DPLL_FREQ_5 0xc860
+#define DPLL_FREQ_6 0xc868
+#define DPLL_FREQ_7 0xc870
+
+#define DPLL_PHASE_PULL_IN_0 0xc880
+#define PULL_IN_OFFSET 0x0000 /* Signed 32 bit */
+#define PULL_IN_SLOPE_LIMIT 0x0004 /* Unsigned 24 bit */
+#define PULL_IN_CTRL 0x0007
+#define DPLL_PHASE_PULL_IN_1 0xc888
+#define DPLL_PHASE_PULL_IN_2 0xc890
+#define DPLL_PHASE_PULL_IN_3 0xc898
+#define DPLL_PHASE_PULL_IN_4 0xc8a0
+#define DPLL_PHASE_PULL_IN_5 0xc8a8
+#define DPLL_PHASE_PULL_IN_6 0xc8b0
+#define DPLL_PHASE_PULL_IN_7 0xc8b8
+
+#define GPIO_CFG 0xc8c0
+#define GPIO_CFG_GBL 0x0000
+#define GPIO_0 0xc8c2
+#define GPIO_DCO_INC_DEC 0x0000
+#define GPIO_OUT_CTRL_0 0x0001
+#define GPIO_OUT_CTRL_1 0x0002
+#define GPIO_TOD_TRIG 0x0003
+#define GPIO_DPLL_INDICATOR 0x0004
+#define GPIO_LOS_INDICATOR 0x0005
+#define GPIO_REF_INPUT_DSQ_0 0x0006
+#define GPIO_REF_INPUT_DSQ_1 0x0007
+#define GPIO_REF_INPUT_DSQ_2 0x0008
+#define GPIO_REF_INPUT_DSQ_3 0x0009
+#define GPIO_MAN_CLK_SEL_0 0x000a
+#define GPIO_MAN_CLK_SEL_1 0x000b
+#define GPIO_MAN_CLK_SEL_2 0x000c
+#define GPIO_SLAVE 0x000d
+#define GPIO_ALERT_OUT_CFG 0x000e
+#define GPIO_TOD_NOTIFICATION_CFG 0x000f
+#define GPIO_CTRL 0x0010
+#define GPIO_CTRL_V520 0x0011
+#define GPIO_1 0xc8d4
+#define GPIO_2 0xc8e6
+#define GPIO_3 0xc900
+#define GPIO_4 0xc912
+#define GPIO_5 0xc924
+#define GPIO_6 0xc936
+#define GPIO_7 0xc948
+#define GPIO_8 0xc95a
+#define GPIO_9 0xc980
+#define GPIO_10 0xc992
+#define GPIO_11 0xc9a4
+#define GPIO_12 0xc9b6
+#define GPIO_13 0xc9c8
+#define GPIO_14 0xc9da
+#define GPIO_15 0xca00
+
+#define OUT_DIV_MUX 0xca12
+#define OUTPUT_0 0xca14
+#define OUTPUT_0_V520 0xca20
+/* FOD frequency output divider value */
+#define OUT_DIV 0x0000
+#define OUT_DUTY_CYCLE_HIGH 0x0004
+#define OUT_CTRL_0 0x0008
+#define OUT_CTRL_1 0x0009
+/* Phase adjustment in FOD cycles */
+#define OUT_PHASE_ADJ 0x000c
+#define OUTPUT_1 0xca24
+#define OUTPUT_1_V520 0xca30
+#define OUTPUT_2 0xca34
+#define OUTPUT_2_V520 0xca40
+#define OUTPUT_3 0xca44
+#define OUTPUT_3_V520 0xca50
+#define OUTPUT_4 0xca54
+#define OUTPUT_4_V520 0xca60
+#define OUTPUT_5 0xca64
+#define OUTPUT_5_V520 0xca80
+#define OUTPUT_6 0xca80
+#define OUTPUT_6_V520 0xca90
+#define OUTPUT_7 0xca90
+#define OUTPUT_7_V520 0xcaa0
+#define OUTPUT_8 0xcaa0
+#define OUTPUT_8_V520 0xcab0
+#define OUTPUT_9 0xcab0
+#define OUTPUT_9_V520 0xcac0
+#define OUTPUT_10 0xcac0
+#define OUTPUT_10_V520 0xcad0
+#define OUTPUT_11 0xcad0
+#define OUTPUT_11_V520 0xcae0
+
+#define SERIAL 0xcae0
+#define SERIAL_V520 0xcaf0
+
+#define PWM_ENCODER_0 0xcb00
+#define PWM_ENCODER_1 0xcb08
+#define PWM_ENCODER_2 0xcb10
+#define PWM_ENCODER_3 0xcb18
+#define PWM_ENCODER_4 0xcb20
+#define PWM_ENCODER_5 0xcb28
+#define PWM_ENCODER_6 0xcb30
+#define PWM_ENCODER_7 0xcb38
+#define PWM_DECODER_0 0xcb40
+#define PWM_DECODER_1 0xcb48
+#define PWM_DECODER_1_V520 0xcb4a
+#define PWM_DECODER_2 0xcb50
+#define PWM_DECODER_2_V520 0xcb54
+#define PWM_DECODER_3 0xcb58
+#define PWM_DECODER_3_V520 0xcb5e
+#define PWM_DECODER_4 0xcb60
+#define PWM_DECODER_4_V520 0xcb68
+#define PWM_DECODER_5 0xcb68
+#define PWM_DECODER_5_V520 0xcb80
+#define PWM_DECODER_6 0xcb70
+#define PWM_DECODER_6_V520 0xcb8a
+#define PWM_DECODER_7 0xcb80
+#define PWM_DECODER_7_V520 0xcb94
+#define PWM_DECODER_8 0xcb88
+#define PWM_DECODER_8_V520 0xcb9e
+#define PWM_DECODER_9 0xcb90
+#define PWM_DECODER_9_V520 0xcba8
+#define PWM_DECODER_10 0xcb98
+#define PWM_DECODER_10_V520 0xcbb2
+#define PWM_DECODER_11 0xcba0
+#define PWM_DECODER_11_V520 0xcbbc
+#define PWM_DECODER_12 0xcba8
+#define PWM_DECODER_12_V520 0xcbc6
+#define PWM_DECODER_13 0xcbb0
+#define PWM_DECODER_13_V520 0xcbd0
+#define PWM_DECODER_14 0xcbb8
+#define PWM_DECODER_14_V520 0xcbda
+#define PWM_DECODER_15 0xcbc0
+#define PWM_DECODER_15_V520 0xcbe4
+#define PWM_USER_DATA 0xcbc8
+#define PWM_USER_DATA_V520 0xcbf0
+
+#define TOD_0 0xcbcc
+#define TOD_0_V520 0xcc00
+/* Enable TOD counter, output channel sync and even-PPS mode */
+#define TOD_CFG 0x0000
+#define TOD_CFG_V520 0x0001
+#define TOD_1 0xcbce
+#define TOD_1_V520 0xcc02
+#define TOD_2 0xcbd0
+#define TOD_2_V520 0xcc04
+#define TOD_3 0xcbd2
+#define TOD_3_V520 0xcc06
+
+#define TOD_WRITE_0 0xcc00
+#define TOD_WRITE_0_V520 0xcc10
+/* 8-bit subns, 32-bit ns, 48-bit seconds */
+#define TOD_WRITE 0x0000
+/* Counter increments after TOD write is completed */
+#define TOD_WRITE_COUNTER 0x000c
+/* TOD write trigger configuration */
+#define TOD_WRITE_SELECT_CFG_0 0x000d
+/* TOD write trigger selection */
+#define TOD_WRITE_CMD 0x000f
+#define TOD_WRITE_1 0xcc10
+#define TOD_WRITE_1_V520 0xcc20
+#define TOD_WRITE_2 0xcc20
+#define TOD_WRITE_2_V520 0xcc30
+#define TOD_WRITE_3 0xcc30
+#define TOD_WRITE_3_V520 0xcc40
+
+#define TOD_READ_PRIMARY_0 0xcc40
+#define TOD_READ_PRIMARY_0_V520 0xcc50
+/* 8-bit subns, 32-bit ns, 48-bit seconds */
+#define TOD_READ_PRIMARY_BASE 0x0000
+/* Counter increments after TOD write is completed */
+#define TOD_READ_PRIMARY_COUNTER 0x000b
+/* Read trigger configuration */
+#define TOD_READ_PRIMARY_SEL_CFG_0 0x000c
+/* Read trigger selection */
+#define TOD_READ_PRIMARY_CMD 0x000e
+#define TOD_READ_PRIMARY_CMD_V520 0x000f
+#define TOD_READ_PRIMARY_1 0xcc50
+#define TOD_READ_PRIMARY_1_V520 0xcc60
+#define TOD_READ_PRIMARY_2 0xcc60
+#define TOD_READ_PRIMARY_2_V520 0xcc80
+#define TOD_READ_PRIMARY_3 0xcc80
+#define TOD_READ_PRIMARY_3_V520 0xcc90
+
+#define TOD_READ_SECONDARY_0 0xcc90
+#define TOD_READ_SECONDARY_0_V520 0xcca0
+/* 8-bit subns, 32-bit ns, 48-bit seconds */
+#define TOD_READ_SECONDARY_BASE 0x0000
+/* Counter increments after TOD write is completed */
+#define TOD_READ_SECONDARY_COUNTER 0x000b
+/* Read trigger configuration */
+#define TOD_READ_SECONDARY_SEL_CFG_0 0x000c
+/* Read trigger selection */
+#define TOD_READ_SECONDARY_CMD 0x000e
+#define TOD_READ_SECONDARY_CMD_V520 0x000f
+
+#define TOD_READ_SECONDARY_1 0xcca0
+#define TOD_READ_SECONDARY_1_V520 0xccb0
+#define TOD_READ_SECONDARY_2 0xccb0
+#define TOD_READ_SECONDARY_2_V520 0xccc0
+#define TOD_READ_SECONDARY_3 0xccc0
+#define TOD_READ_SECONDARY_3_V520 0xccd0
+
+#define OUTPUT_TDC_CFG 0xccd0
+#define OUTPUT_TDC_CFG_V520 0xcce0
+#define OUTPUT_TDC_0 0xcd00
+#define OUTPUT_TDC_1 0xcd08
+#define OUTPUT_TDC_2 0xcd10
+#define OUTPUT_TDC_3 0xcd18
+#define INPUT_TDC 0xcd20
+
+#define SCRATCH 0xcf50
+#define SCRATCH_V520 0xcf4c
+
+#define EEPROM 0xcf68
+#define EEPROM_V520 0xcf64
+
+#define OTP 0xcf70
+
+#define BYTE 0xcf80
+
+/* Bit definitions for the MAJ_REL register */
+#define MAJOR_SHIFT (1)
+#define MAJOR_MASK (0x7f)
+#define PR_BUILD BIT(0)
+
+/* Bit definitions for the USER_GPIO0_TO_7_STATUS register */
+#define GPIO0_LEVEL BIT(0)
+#define GPIO1_LEVEL BIT(1)
+#define GPIO2_LEVEL BIT(2)
+#define GPIO3_LEVEL BIT(3)
+#define GPIO4_LEVEL BIT(4)
+#define GPIO5_LEVEL BIT(5)
+#define GPIO6_LEVEL BIT(6)
+#define GPIO7_LEVEL BIT(7)
+
+/* Bit definitions for the USER_GPIO8_TO_15_STATUS register */
+#define GPIO8_LEVEL BIT(0)
+#define GPIO9_LEVEL BIT(1)
+#define GPIO10_LEVEL BIT(2)
+#define GPIO11_LEVEL BIT(3)
+#define GPIO12_LEVEL BIT(4)
+#define GPIO13_LEVEL BIT(5)
+#define GPIO14_LEVEL BIT(6)
+#define GPIO15_LEVEL BIT(7)
+
+/* Bit definitions for the GPIO0_TO_7_OUT register */
+#define GPIO0_DRIVE_LEVEL BIT(0)
+#define GPIO1_DRIVE_LEVEL BIT(1)
+#define GPIO2_DRIVE_LEVEL BIT(2)
+#define GPIO3_DRIVE_LEVEL BIT(3)
+#define GPIO4_DRIVE_LEVEL BIT(4)
+#define GPIO5_DRIVE_LEVEL BIT(5)
+#define GPIO6_DRIVE_LEVEL BIT(6)
+#define GPIO7_DRIVE_LEVEL BIT(7)
+
+/* Bit definitions for the GPIO8_TO_15_OUT register */
+#define GPIO8_DRIVE_LEVEL BIT(0)
+#define GPIO9_DRIVE_LEVEL BIT(1)
+#define GPIO10_DRIVE_LEVEL BIT(2)
+#define GPIO11_DRIVE_LEVEL BIT(3)
+#define GPIO12_DRIVE_LEVEL BIT(4)
+#define GPIO13_DRIVE_LEVEL BIT(5)
+#define GPIO14_DRIVE_LEVEL BIT(6)
+#define GPIO15_DRIVE_LEVEL BIT(7)
+
+/* Bit definitions for the DPLL_TOD_SYNC_CFG register */
+#define TOD_SYNC_SOURCE_SHIFT (1)
+#define TOD_SYNC_SOURCE_MASK (0x3)
+#define TOD_SYNC_EN BIT(0)
+
+/* Bit definitions for the DPLL_MODE register */
+#define WRITE_TIMER_MODE BIT(6)
+#define PLL_MODE_SHIFT (3)
+#define PLL_MODE_MASK (0x7)
+#define STATE_MODE_SHIFT (0)
+#define STATE_MODE_MASK (0x7)
+
+/* Bit definitions for the DPLL_MANU_REF_CFG register */
+#define MANUAL_REFERENCE_SHIFT (0)
+#define MANUAL_REFERENCE_MASK (0x1f)
+
+/* Bit definitions for the GPIO_CFG_GBL register */
+#define SUPPLY_MODE_SHIFT (0)
+#define SUPPLY_MODE_MASK (0x3)
+
+/* Bit definitions for the GPIO_DCO_INC_DEC register */
+#define INCDEC_DPLL_INDEX_SHIFT (0)
+#define INCDEC_DPLL_INDEX_MASK (0x7)
+
+/* Bit definitions for the GPIO_OUT_CTRL_0 register */
+#define CTRL_OUT_0 BIT(0)
+#define CTRL_OUT_1 BIT(1)
+#define CTRL_OUT_2 BIT(2)
+#define CTRL_OUT_3 BIT(3)
+#define CTRL_OUT_4 BIT(4)
+#define CTRL_OUT_5 BIT(5)
+#define CTRL_OUT_6 BIT(6)
+#define CTRL_OUT_7 BIT(7)
+
+/* Bit definitions for the GPIO_OUT_CTRL_1 register */
+#define CTRL_OUT_8 BIT(0)
+#define CTRL_OUT_9 BIT(1)
+#define CTRL_OUT_10 BIT(2)
+#define CTRL_OUT_11 BIT(3)
+#define CTRL_OUT_12 BIT(4)
+#define CTRL_OUT_13 BIT(5)
+#define CTRL_OUT_14 BIT(6)
+#define CTRL_OUT_15 BIT(7)
+
+/* Bit definitions for the GPIO_TOD_TRIG register */
+#define TOD_TRIG_0 BIT(0)
+#define TOD_TRIG_1 BIT(1)
+#define TOD_TRIG_2 BIT(2)
+#define TOD_TRIG_3 BIT(3)
+
+/* Bit definitions for the GPIO_DPLL_INDICATOR register */
+#define IND_DPLL_INDEX_SHIFT (0)
+#define IND_DPLL_INDEX_MASK (0x7)
+
+/* Bit definitions for the GPIO_LOS_INDICATOR register */
+#define REFMON_INDEX_SHIFT (0)
+#define REFMON_INDEX_MASK (0xf)
+/* Active level of LOS indicator, 0=low 1=high */
+#define ACTIVE_LEVEL BIT(4)
+
+/* Bit definitions for the GPIO_REF_INPUT_DSQ_0 register */
+#define DSQ_INP_0 BIT(0)
+#define DSQ_INP_1 BIT(1)
+#define DSQ_INP_2 BIT(2)
+#define DSQ_INP_3 BIT(3)
+#define DSQ_INP_4 BIT(4)
+#define DSQ_INP_5 BIT(5)
+#define DSQ_INP_6 BIT(6)
+#define DSQ_INP_7 BIT(7)
+
+/* Bit definitions for the GPIO_REF_INPUT_DSQ_1 register */
+#define DSQ_INP_8 BIT(0)
+#define DSQ_INP_9 BIT(1)
+#define DSQ_INP_10 BIT(2)
+#define DSQ_INP_11 BIT(3)
+#define DSQ_INP_12 BIT(4)
+#define DSQ_INP_13 BIT(5)
+#define DSQ_INP_14 BIT(6)
+#define DSQ_INP_15 BIT(7)
+
+/* Bit definitions for the GPIO_REF_INPUT_DSQ_2 register */
+#define DSQ_DPLL_0 BIT(0)
+#define DSQ_DPLL_1 BIT(1)
+#define DSQ_DPLL_2 BIT(2)
+#define DSQ_DPLL_3 BIT(3)
+#define DSQ_DPLL_4 BIT(4)
+#define DSQ_DPLL_5 BIT(5)
+#define DSQ_DPLL_6 BIT(6)
+#define DSQ_DPLL_7 BIT(7)
+
+/* Bit definitions for the GPIO_REF_INPUT_DSQ_3 register */
+#define DSQ_DPLL_SYS BIT(0)
+#define GPIO_DSQ_LEVEL BIT(1)
+
+/* Bit definitions for the GPIO_TOD_NOTIFICATION_CFG register */
+#define DPLL_TOD_SHIFT (0)
+#define DPLL_TOD_MASK (0x3)
+#define TOD_READ_SECONDARY BIT(2)
+#define GPIO_ASSERT_LEVEL BIT(3)
+
+/* Bit definitions for the GPIO_CTRL register */
+#define GPIO_FUNCTION_EN BIT(0)
+#define GPIO_CMOS_OD_MODE BIT(1)
+#define GPIO_CONTROL_DIR BIT(2)
+#define GPIO_PU_PD_MODE BIT(3)
+#define GPIO_FUNCTION_SHIFT (4)
+#define GPIO_FUNCTION_MASK (0xf)
+
+/* Bit definitions for the OUT_CTRL_1 register */
+#define OUT_SYNC_DISABLE BIT(7)
+#define SQUELCH_VALUE BIT(6)
+#define SQUELCH_DISABLE BIT(5)
+#define PAD_VDDO_SHIFT (2)
+#define PAD_VDDO_MASK (0x7)
+#define PAD_CMOSDRV_SHIFT (0)
+#define PAD_CMOSDRV_MASK (0x3)
+
+/* Bit definitions for the TOD_CFG register */
+#define TOD_EVEN_PPS_MODE BIT(2)
+#define TOD_OUT_SYNC_ENABLE BIT(1)
+#define TOD_ENABLE BIT(0)
+
+/* Bit definitions for the TOD_WRITE_SELECT_CFG_0 register */
+#define WR_PWM_DECODER_INDEX_SHIFT (4)
+#define WR_PWM_DECODER_INDEX_MASK (0xf)
+#define WR_REF_INDEX_SHIFT (0)
+#define WR_REF_INDEX_MASK (0xf)
+
+/* Bit definitions for the TOD_WRITE_CMD register */
+#define TOD_WRITE_SELECTION_SHIFT (0)
+#define TOD_WRITE_SELECTION_MASK (0xf)
+/* 4.8.7 */
+#define TOD_WRITE_TYPE_SHIFT (4)
+#define TOD_WRITE_TYPE_MASK (0x3)
+
+/* Bit definitions for the TOD_READ_PRIMARY_SEL_CFG_0 register */
+#define RD_PWM_DECODER_INDEX_SHIFT (4)
+#define RD_PWM_DECODER_INDEX_MASK (0xf)
+#define RD_REF_INDEX_SHIFT (0)
+#define RD_REF_INDEX_MASK (0xf)
+
+/* Bit definitions for the TOD_READ_PRIMARY_CMD register */
+#define TOD_READ_TRIGGER_MODE BIT(4)
+#define TOD_READ_TRIGGER_SHIFT (0)
+#define TOD_READ_TRIGGER_MASK (0xf)
+
+/* Bit definitions for the DPLL_CTRL_COMBO_MASTER_CFG register */
+#define COMBO_MASTER_HOLD BIT(0)
+
+/* Bit definitions for DPLL_SYS_STATUS register */
+#define DPLL_SYS_STATE_MASK (0xf)
+
+/* Bit definitions for SYS_APLL_STATUS register */
+#define SYS_APLL_LOSS_LOCK_LIVE_MASK BIT(0)
+#define SYS_APLL_LOSS_LOCK_LIVE_LOCKED 0
+#define SYS_APLL_LOSS_LOCK_LIVE_UNLOCKED 1
+
+/* Bit definitions for the DPLL0_STATUS register */
+#define DPLL_STATE_MASK (0xf)
+#define DPLL_STATE_SHIFT (0x0)
+
+/* Values of DPLL_N.DPLL_MODE.PLL_MODE */
+enum pll_mode {
+ PLL_MODE_MIN = 0,
+ PLL_MODE_PLL = PLL_MODE_MIN,
+ PLL_MODE_WRITE_PHASE = 1,
+ PLL_MODE_WRITE_FREQUENCY = 2,
+ PLL_MODE_GPIO_INC_DEC = 3,
+ PLL_MODE_SYNTHESIS = 4,
+ PLL_MODE_PHASE_MEASUREMENT = 5,
+ PLL_MODE_DISABLED = 6,
+ PLL_MODE_MAX = PLL_MODE_DISABLED,
+};
+
+/* Values of DPLL_CTRL_n.DPLL_MANU_REF_CFG.MANUAL_REFERENCE */
+enum manual_reference {
+ MANU_REF_MIN = 0,
+ MANU_REF_CLK0 = MANU_REF_MIN,
+ MANU_REF_CLK1,
+ MANU_REF_CLK2,
+ MANU_REF_CLK3,
+ MANU_REF_CLK4,
+ MANU_REF_CLK5,
+ MANU_REF_CLK6,
+ MANU_REF_CLK7,
+ MANU_REF_CLK8,
+ MANU_REF_CLK9,
+ MANU_REF_CLK10,
+ MANU_REF_CLK11,
+ MANU_REF_CLK12,
+ MANU_REF_CLK13,
+ MANU_REF_CLK14,
+ MANU_REF_CLK15,
+ MANU_REF_WRITE_PHASE,
+ MANU_REF_WRITE_FREQUENCY,
+ MANU_REF_XO_DPLL,
+ MANU_REF_MAX = MANU_REF_XO_DPLL,
+};
+
+enum hw_tod_write_trig_sel {
+ HW_TOD_WR_TRIG_SEL_MIN = 0,
+ HW_TOD_WR_TRIG_SEL_MSB = HW_TOD_WR_TRIG_SEL_MIN,
+ HW_TOD_WR_TRIG_SEL_RESERVED = 1,
+ HW_TOD_WR_TRIG_SEL_TOD_PPS = 2,
+ HW_TOD_WR_TRIG_SEL_IRIGB_PPS = 3,
+ HW_TOD_WR_TRIG_SEL_PWM_PPS = 4,
+ HW_TOD_WR_TRIG_SEL_GPIO = 5,
+ HW_TOD_WR_TRIG_SEL_FOD_SYNC = 6,
+ WR_TRIG_SEL_MAX = HW_TOD_WR_TRIG_SEL_FOD_SYNC,
+};
+
+enum scsr_read_trig_sel {
+ /* CANCEL CURRENT TOD READ; MODULE BECOMES IDLE - NO TRIGGER OCCURS */
+ SCSR_TOD_READ_TRIG_SEL_DISABLE = 0,
+ /* TRIGGER IMMEDIATELY */
+ SCSR_TOD_READ_TRIG_SEL_IMMEDIATE = 1,
+ /* TRIGGER ON RISING EDGE OF INTERNAL TOD PPS SIGNAL */
+ SCSR_TOD_READ_TRIG_SEL_TODPPS = 2,
+ /* TRGGER ON RISING EDGE OF SELECTED REFERENCE INPUT */
+ SCSR_TOD_READ_TRIG_SEL_REFCLK = 3,
+ /* TRIGGER ON RISING EDGE OF SELECTED PWM DECODER 1PPS OUTPUT */
+ SCSR_TOD_READ_TRIG_SEL_PWMPPS = 4,
+ SCSR_TOD_READ_TRIG_SEL_RESERVED = 5,
+ /* TRIGGER WHEN WRITE FREQUENCY EVENT OCCURS */
+ SCSR_TOD_READ_TRIG_SEL_WRITEFREQUENCYEVENT = 6,
+ /* TRIGGER ON SELECTED GPIO */
+ SCSR_TOD_READ_TRIG_SEL_GPIO = 7,
+ SCSR_TOD_READ_TRIG_SEL_MAX = SCSR_TOD_READ_TRIG_SEL_GPIO,
+};
+
+/* Values STATUS.DPLL_SYS_STATUS.DPLL_SYS_STATE */
+enum dpll_state {
+ DPLL_STATE_MIN = 0,
+ DPLL_STATE_FREERUN = DPLL_STATE_MIN,
+ DPLL_STATE_LOCKACQ = 1,
+ DPLL_STATE_LOCKREC = 2,
+ DPLL_STATE_LOCKED = 3,
+ DPLL_STATE_HOLDOVER = 4,
+ DPLL_STATE_OPEN_LOOP = 5,
+ DPLL_STATE_MAX = DPLL_STATE_OPEN_LOOP,
+};
+
+/* 4.8.7 only */
+enum scsr_tod_write_trig_sel {
+ SCSR_TOD_WR_TRIG_SEL_DISABLE = 0,
+ SCSR_TOD_WR_TRIG_SEL_IMMEDIATE = 1,
+ SCSR_TOD_WR_TRIG_SEL_REFCLK = 2,
+ SCSR_TOD_WR_TRIG_SEL_PWMPPS = 3,
+ SCSR_TOD_WR_TRIG_SEL_TODPPS = 4,
+ SCSR_TOD_WR_TRIG_SEL_SYNCFOD = 5,
+ SCSR_TOD_WR_TRIG_SEL_GPIO = 6,
+ SCSR_TOD_WR_TRIG_SEL_MAX = SCSR_TOD_WR_TRIG_SEL_GPIO,
+};
+
+/* 4.8.7 only */
+enum scsr_tod_write_type_sel {
+ SCSR_TOD_WR_TYPE_SEL_ABSOLUTE = 0,
+ SCSR_TOD_WR_TYPE_SEL_DELTA_PLUS = 1,
+ SCSR_TOD_WR_TYPE_SEL_DELTA_MINUS = 2,
+ SCSR_TOD_WR_TYPE_SEL_MAX = SCSR_TOD_WR_TYPE_SEL_DELTA_MINUS,
+};
+#endif
diff --git a/include/linux/mfd/imx25-tsadc.h b/include/linux/mfd/imx25-tsadc.h
index 7fe4b8c3baac..21f8adfefd1d 100644
--- a/include/linux/mfd/imx25-tsadc.h
+++ b/include/linux/mfd/imx25-tsadc.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_INCLUDE_MFD_IMX25_TSADC_H_
#define _LINUX_INCLUDE_MFD_IMX25_TSADC_H_
diff --git a/include/linux/mfd/ingenic-tcu.h b/include/linux/mfd/ingenic-tcu.h
new file mode 100644
index 000000000000..2083fa20821d
--- /dev/null
+++ b/include/linux/mfd/ingenic-tcu.h
@@ -0,0 +1,56 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Header file for the Ingenic JZ47xx TCU driver
+ */
+#ifndef __LINUX_MFD_INGENIC_TCU_H_
+#define __LINUX_MFD_INGENIC_TCU_H_
+
+#include <linux/bitops.h>
+
+#define TCU_REG_WDT_TDR 0x00
+#define TCU_REG_WDT_TCER 0x04
+#define TCU_REG_WDT_TCNT 0x08
+#define TCU_REG_WDT_TCSR 0x0c
+#define TCU_REG_TER 0x10
+#define TCU_REG_TESR 0x14
+#define TCU_REG_TECR 0x18
+#define TCU_REG_TSR 0x1c
+#define TCU_REG_TFR 0x20
+#define TCU_REG_TFSR 0x24
+#define TCU_REG_TFCR 0x28
+#define TCU_REG_TSSR 0x2c
+#define TCU_REG_TMR 0x30
+#define TCU_REG_TMSR 0x34
+#define TCU_REG_TMCR 0x38
+#define TCU_REG_TSCR 0x3c
+#define TCU_REG_TDFR0 0x40
+#define TCU_REG_TDHR0 0x44
+#define TCU_REG_TCNT0 0x48
+#define TCU_REG_TCSR0 0x4c
+#define TCU_REG_OST_DR 0xe0
+#define TCU_REG_OST_CNTL 0xe4
+#define TCU_REG_OST_CNTH 0xe8
+#define TCU_REG_OST_TCSR 0xec
+#define TCU_REG_TSTR 0xf0
+#define TCU_REG_TSTSR 0xf4
+#define TCU_REG_TSTCR 0xf8
+#define TCU_REG_OST_CNTHBUF 0xfc
+
+#define TCU_TCSR_RESERVED_BITS 0x3f
+#define TCU_TCSR_PARENT_CLOCK_MASK 0x07
+#define TCU_TCSR_PRESCALE_LSB 3
+#define TCU_TCSR_PRESCALE_MASK 0x38
+
+#define TCU_TCSR_PWM_SD BIT(9) /* 0: Shutdown gracefully 1: abruptly */
+#define TCU_TCSR_PWM_INITL_HIGH BIT(8) /* Sets the initial output level */
+#define TCU_TCSR_PWM_EN BIT(7) /* PWM pin output enable */
+
+#define TCU_WDT_TCER_TCEN BIT(0) /* Watchdog timer enable */
+
+#define TCU_CHANNEL_STRIDE 0x10
+#define TCU_REG_TDFRc(c) (TCU_REG_TDFR0 + ((c) * TCU_CHANNEL_STRIDE))
+#define TCU_REG_TDHRc(c) (TCU_REG_TDHR0 + ((c) * TCU_CHANNEL_STRIDE))
+#define TCU_REG_TCNTc(c) (TCU_REG_TCNT0 + ((c) * TCU_CHANNEL_STRIDE))
+#define TCU_REG_TCSRc(c) (TCU_REG_TCSR0 + ((c) * TCU_CHANNEL_STRIDE))
+
+#endif /* __LINUX_MFD_INGENIC_TCU_H_ */
diff --git a/include/linux/mfd/intel-m10-bmc.h b/include/linux/mfd/intel-m10-bmc.h
new file mode 100644
index 000000000000..1812ebfa11a8
--- /dev/null
+++ b/include/linux/mfd/intel-m10-bmc.h
@@ -0,0 +1,291 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Intel MAX 10 Board Management Controller chip.
+ *
+ * Copyright (C) 2018-2020 Intel Corporation, Inc.
+ */
+#ifndef __MFD_INTEL_M10_BMC_H
+#define __MFD_INTEL_M10_BMC_H
+
+#include <linux/bitfield.h>
+#include <linux/bits.h>
+#include <linux/dev_printk.h>
+#include <linux/regmap.h>
+
+#define M10BMC_N3000_LEGACY_BUILD_VER 0x300468
+#define M10BMC_N3000_SYS_BASE 0x300800
+#define M10BMC_N3000_SYS_END 0x300fff
+#define M10BMC_N3000_FLASH_BASE 0x10000000
+#define M10BMC_N3000_FLASH_END 0x1fffffff
+#define M10BMC_N3000_MEM_END M10BMC_N3000_FLASH_END
+
+#define M10BMC_STAGING_BASE 0x18000000
+#define M10BMC_STAGING_SIZE 0x3800000
+
+/* Register offset of system registers */
+#define NIOS2_N3000_FW_VERSION 0x0
+#define M10BMC_N3000_MAC_LOW 0x10
+#define M10BMC_N3000_MAC_BYTE4 GENMASK(7, 0)
+#define M10BMC_N3000_MAC_BYTE3 GENMASK(15, 8)
+#define M10BMC_N3000_MAC_BYTE2 GENMASK(23, 16)
+#define M10BMC_N3000_MAC_BYTE1 GENMASK(31, 24)
+#define M10BMC_N3000_MAC_HIGH 0x14
+#define M10BMC_N3000_MAC_BYTE6 GENMASK(7, 0)
+#define M10BMC_N3000_MAC_BYTE5 GENMASK(15, 8)
+#define M10BMC_N3000_MAC_COUNT GENMASK(23, 16)
+#define M10BMC_N3000_TEST_REG 0x3c
+#define M10BMC_N3000_BUILD_VER 0x68
+#define M10BMC_N3000_VER_MAJOR_MSK GENMASK(23, 16)
+#define M10BMC_N3000_VER_PCB_INFO_MSK GENMASK(31, 24)
+#define M10BMC_N3000_VER_LEGACY_INVALID 0xffffffff
+
+/* Secure update doorbell register, in system register region */
+#define M10BMC_N3000_DOORBELL 0x400
+
+/* Authorization Result register, in system register region */
+#define M10BMC_N3000_AUTH_RESULT 0x404
+
+/* Doorbell register fields */
+#define DRBL_RSU_REQUEST BIT(0)
+#define DRBL_RSU_PROGRESS GENMASK(7, 4)
+#define DRBL_HOST_STATUS GENMASK(11, 8)
+#define DRBL_RSU_STATUS GENMASK(23, 16)
+#define DRBL_PKVL_EEPROM_LOAD_SEC BIT(24)
+#define DRBL_PKVL1_POLL_EN BIT(25)
+#define DRBL_PKVL2_POLL_EN BIT(26)
+#define DRBL_CONFIG_SEL BIT(28)
+#define DRBL_REBOOT_REQ BIT(29)
+#define DRBL_REBOOT_DISABLED BIT(30)
+
+/* Progress states */
+#define RSU_PROG_IDLE 0x0
+#define RSU_PROG_PREPARE 0x1
+#define RSU_PROG_READY 0x3
+#define RSU_PROG_AUTHENTICATING 0x4
+#define RSU_PROG_COPYING 0x5
+#define RSU_PROG_UPDATE_CANCEL 0x6
+#define RSU_PROG_PROGRAM_KEY_HASH 0x7
+#define RSU_PROG_RSU_DONE 0x8
+#define RSU_PROG_PKVL_PROM_DONE 0x9
+
+/* Device and error states */
+#define RSU_STAT_NORMAL 0x0
+#define RSU_STAT_TIMEOUT 0x1
+#define RSU_STAT_AUTH_FAIL 0x2
+#define RSU_STAT_COPY_FAIL 0x3
+#define RSU_STAT_FATAL 0x4
+#define RSU_STAT_PKVL_REJECT 0x5
+#define RSU_STAT_NON_INC 0x6
+#define RSU_STAT_ERASE_FAIL 0x7
+#define RSU_STAT_WEAROUT 0x8
+#define RSU_STAT_NIOS_OK 0x80
+#define RSU_STAT_USER_OK 0x81
+#define RSU_STAT_FACTORY_OK 0x82
+#define RSU_STAT_USER_FAIL 0x83
+#define RSU_STAT_FACTORY_FAIL 0x84
+#define RSU_STAT_NIOS_FLASH_ERR 0x85
+#define RSU_STAT_FPGA_FLASH_ERR 0x86
+
+#define HOST_STATUS_IDLE 0x0
+#define HOST_STATUS_WRITE_DONE 0x1
+#define HOST_STATUS_ABORT_RSU 0x2
+
+#define rsu_prog(doorbell) FIELD_GET(DRBL_RSU_PROGRESS, doorbell)
+
+/* interval 100ms and timeout 5s */
+#define NIOS_HANDSHAKE_INTERVAL_US (100 * 1000)
+#define NIOS_HANDSHAKE_TIMEOUT_US (5 * 1000 * 1000)
+
+/* RSU PREP Timeout (2 minutes) to erase flash staging area */
+#define RSU_PREP_INTERVAL_MS 100
+#define RSU_PREP_TIMEOUT_MS (2 * 60 * 1000)
+
+/* RSU Complete Timeout (40 minutes) for full flash update */
+#define RSU_COMPLETE_INTERVAL_MS 1000
+#define RSU_COMPLETE_TIMEOUT_MS (40 * 60 * 1000)
+
+/* Addresses for security related data in FLASH */
+#define M10BMC_N3000_BMC_REH_ADDR 0x17ffc004
+#define M10BMC_N3000_BMC_PROG_ADDR 0x17ffc000
+#define M10BMC_N3000_BMC_PROG_MAGIC 0x5746
+
+#define M10BMC_N3000_SR_REH_ADDR 0x17ffd004
+#define M10BMC_N3000_SR_PROG_ADDR 0x17ffd000
+#define M10BMC_N3000_SR_PROG_MAGIC 0x5253
+
+#define M10BMC_N3000_PR_REH_ADDR 0x17ffe004
+#define M10BMC_N3000_PR_PROG_ADDR 0x17ffe000
+#define M10BMC_N3000_PR_PROG_MAGIC 0x5250
+
+/* Address of 4KB inverted bit vector containing staging area FLASH count */
+#define M10BMC_N3000_STAGING_FLASH_COUNT 0x17ffb000
+
+#define M10BMC_N6000_INDIRECT_BASE 0x400
+
+#define M10BMC_N6000_SYS_BASE 0x0
+#define M10BMC_N6000_SYS_END 0xfff
+
+#define M10BMC_N6000_DOORBELL 0x1c0
+#define M10BMC_N6000_AUTH_RESULT 0x1c4
+#define AUTH_RESULT_RSU_STATUS GENMASK(23, 16)
+
+#define M10BMC_N6000_BUILD_VER 0x0
+#define NIOS2_N6000_FW_VERSION 0x4
+#define M10BMC_N6000_MAC_LOW 0x20
+#define M10BMC_N6000_MAC_HIGH (M10BMC_N6000_MAC_LOW + 4)
+
+/* Addresses for security related data in FLASH */
+#define M10BMC_N6000_BMC_REH_ADDR 0x7ffc004
+#define M10BMC_N6000_BMC_PROG_ADDR 0x7ffc000
+#define M10BMC_N6000_BMC_PROG_MAGIC 0x5746
+
+#define M10BMC_N6000_SR_REH_ADDR 0x7ffd004
+#define M10BMC_N6000_SR_PROG_ADDR 0x7ffd000
+#define M10BMC_N6000_SR_PROG_MAGIC 0x5253
+
+#define M10BMC_N6000_PR_REH_ADDR 0x7ffe004
+#define M10BMC_N6000_PR_PROG_ADDR 0x7ffe000
+#define M10BMC_N6000_PR_PROG_MAGIC 0x5250
+
+#define M10BMC_N6000_STAGING_FLASH_COUNT 0x7ff5000
+
+#define M10BMC_N6000_FLASH_MUX_CTRL 0x1d0
+#define M10BMC_N6000_FLASH_MUX_SELECTION GENMASK(2, 0)
+#define M10BMC_N6000_FLASH_MUX_IDLE 0
+#define M10BMC_N6000_FLASH_MUX_NIOS 1
+#define M10BMC_N6000_FLASH_MUX_HOST 2
+#define M10BMC_N6000_FLASH_MUX_PFL 4
+#define get_flash_mux(mux) FIELD_GET(M10BMC_N6000_FLASH_MUX_SELECTION, mux)
+
+#define M10BMC_N6000_FLASH_NIOS_REQUEST BIT(4)
+#define M10BMC_N6000_FLASH_HOST_REQUEST BIT(5)
+
+#define M10BMC_N6000_FLASH_CTRL 0x40
+#define M10BMC_N6000_FLASH_WR_MODE BIT(0)
+#define M10BMC_N6000_FLASH_RD_MODE BIT(1)
+#define M10BMC_N6000_FLASH_BUSY BIT(2)
+#define M10BMC_N6000_FLASH_FIFO_SPACE GENMASK(13, 4)
+#define M10BMC_N6000_FLASH_READ_COUNT GENMASK(25, 16)
+
+#define M10BMC_N6000_FLASH_ADDR 0x44
+#define M10BMC_N6000_FLASH_FIFO 0x800
+#define M10BMC_N6000_READ_BLOCK_SIZE 0x800
+#define M10BMC_N6000_FIFO_MAX_BYTES 0x800
+#define M10BMC_N6000_FIFO_WORD_SIZE 4
+#define M10BMC_N6000_FIFO_MAX_WORDS (M10BMC_N6000_FIFO_MAX_BYTES / \
+ M10BMC_N6000_FIFO_WORD_SIZE)
+
+#define M10BMC_FLASH_INT_US 1
+#define M10BMC_FLASH_TIMEOUT_US 10000
+
+/**
+ * struct m10bmc_csr_map - Intel MAX 10 BMC CSR register map
+ */
+struct m10bmc_csr_map {
+ unsigned int base;
+ unsigned int build_version;
+ unsigned int fw_version;
+ unsigned int mac_low;
+ unsigned int mac_high;
+ unsigned int doorbell;
+ unsigned int auth_result;
+ unsigned int bmc_prog_addr;
+ unsigned int bmc_reh_addr;
+ unsigned int bmc_magic;
+ unsigned int sr_prog_addr;
+ unsigned int sr_reh_addr;
+ unsigned int sr_magic;
+ unsigned int pr_prog_addr;
+ unsigned int pr_reh_addr;
+ unsigned int pr_magic;
+ unsigned int rsu_update_counter;
+};
+
+/**
+ * struct intel_m10bmc_platform_info - Intel MAX 10 BMC platform specific information
+ * @cells: MFD cells
+ * @n_cells: MFD cells ARRAY_SIZE()
+ * @csr_map: the mappings for register definition of MAX10 BMC
+ */
+struct intel_m10bmc_platform_info {
+ struct mfd_cell *cells;
+ int n_cells;
+ const struct m10bmc_csr_map *csr_map;
+};
+
+struct intel_m10bmc;
+
+/**
+ * struct intel_m10bmc_flash_bulk_ops - device specific operations for flash R/W
+ * @read: read a block of data from flash
+ * @write: write a block of data to flash
+ * @lock_write: locks flash access for erase+write
+ * @unlock_write: unlock flash access
+ *
+ * Write must be protected with @lock_write and @unlock_write. While the flash
+ * is locked, @read returns -EBUSY.
+ */
+struct intel_m10bmc_flash_bulk_ops {
+ int (*read)(struct intel_m10bmc *m10bmc, u8 *buf, u32 addr, u32 size);
+ int (*write)(struct intel_m10bmc *m10bmc, const u8 *buf, u32 offset, u32 size);
+ int (*lock_write)(struct intel_m10bmc *m10bmc);
+ void (*unlock_write)(struct intel_m10bmc *m10bmc);
+};
+
+/**
+ * struct intel_m10bmc - Intel MAX 10 BMC parent driver data structure
+ * @dev: this device
+ * @regmap: the regmap used to access registers by m10bmc itself
+ * @info: the platform information for MAX10 BMC
+ * @flash_bulk_ops: optional device specific operations for flash R/W
+ */
+struct intel_m10bmc {
+ struct device *dev;
+ struct regmap *regmap;
+ const struct intel_m10bmc_platform_info *info;
+ const struct intel_m10bmc_flash_bulk_ops *flash_bulk_ops;
+};
+
+/*
+ * register access helper functions.
+ *
+ * m10bmc_raw_read - read m10bmc register per addr
+ * m10bmc_sys_read - read m10bmc system register per offset
+ */
+static inline int
+m10bmc_raw_read(struct intel_m10bmc *m10bmc, unsigned int addr,
+ unsigned int *val)
+{
+ int ret;
+
+ ret = regmap_read(m10bmc->regmap, addr, val);
+ if (ret)
+ dev_err(m10bmc->dev, "fail to read raw reg %x: %d\n",
+ addr, ret);
+
+ return ret;
+}
+
+/*
+ * The base of the system registers could be configured by HW developers, and
+ * in HW SPEC, the base is not added to the addresses of the system registers.
+ *
+ * This function helps to simplify the accessing of the system registers. And if
+ * the base is reconfigured in HW, SW developers could simply change the
+ * csr_map's base accordingly.
+ */
+static inline int m10bmc_sys_read(struct intel_m10bmc *m10bmc, unsigned int offset,
+ unsigned int *val)
+{
+ const struct m10bmc_csr_map *csr_map = m10bmc->info->csr_map;
+
+ return m10bmc_raw_read(m10bmc, csr_map->base + offset, val);
+}
+
+/*
+ * MAX10 BMC Core support
+ */
+int m10bmc_dev_init(struct intel_m10bmc *m10bmc, const struct intel_m10bmc_platform_info *info);
+extern const struct attribute_group *m10bmc_dev_groups[];
+
+#endif /* __MFD_INTEL_M10_BMC_H */
diff --git a/include/linux/mfd/intel_msic.h b/include/linux/mfd/intel_msic.h
deleted file mode 100644
index 439a7a617bc9..000000000000
--- a/include/linux/mfd/intel_msic.h
+++ /dev/null
@@ -1,456 +0,0 @@
-/*
- * include/linux/mfd/intel_msic.h - Core interface for Intel MSIC
- *
- * Copyright (C) 2011, Intel Corporation
- * Author: Mika Westerberg <mika.westerberg@linux.intel.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#ifndef __LINUX_MFD_INTEL_MSIC_H__
-#define __LINUX_MFD_INTEL_MSIC_H__
-
-/* ID */
-#define INTEL_MSIC_ID0 0x000 /* RO */
-#define INTEL_MSIC_ID1 0x001 /* RO */
-
-/* IRQ */
-#define INTEL_MSIC_IRQLVL1 0x002
-#define INTEL_MSIC_ADC1INT 0x003
-#define INTEL_MSIC_CCINT 0x004
-#define INTEL_MSIC_PWRSRCINT 0x005
-#define INTEL_MSIC_PWRSRCINT1 0x006
-#define INTEL_MSIC_CHRINT 0x007
-#define INTEL_MSIC_CHRINT1 0x008
-#define INTEL_MSIC_RTCIRQ 0x009
-#define INTEL_MSIC_GPIO0LVIRQ 0x00a
-#define INTEL_MSIC_GPIO1LVIRQ 0x00b
-#define INTEL_MSIC_GPIOHVIRQ 0x00c
-#define INTEL_MSIC_VRINT 0x00d
-#define INTEL_MSIC_OCAUDIO 0x00e
-#define INTEL_MSIC_ACCDET 0x00f
-#define INTEL_MSIC_RESETIRQ1 0x010
-#define INTEL_MSIC_RESETIRQ2 0x011
-#define INTEL_MSIC_MADC1INT 0x012
-#define INTEL_MSIC_MCCINT 0x013
-#define INTEL_MSIC_MPWRSRCINT 0x014
-#define INTEL_MSIC_MPWRSRCINT1 0x015
-#define INTEL_MSIC_MCHRINT 0x016
-#define INTEL_MSIC_MCHRINT1 0x017
-#define INTEL_MSIC_RTCIRQMASK 0x018
-#define INTEL_MSIC_GPIO0LVIRQMASK 0x019
-#define INTEL_MSIC_GPIO1LVIRQMASK 0x01a
-#define INTEL_MSIC_GPIOHVIRQMASK 0x01b
-#define INTEL_MSIC_VRINTMASK 0x01c
-#define INTEL_MSIC_OCAUDIOMASK 0x01d
-#define INTEL_MSIC_ACCDETMASK 0x01e
-#define INTEL_MSIC_RESETIRQ1MASK 0x01f
-#define INTEL_MSIC_RESETIRQ2MASK 0x020
-#define INTEL_MSIC_IRQLVL1MSK 0x021
-#define INTEL_MSIC_PBCONFIG 0x03e
-#define INTEL_MSIC_PBSTATUS 0x03f /* RO */
-
-/* GPIO */
-#define INTEL_MSIC_GPIO0LV7CTLO 0x040
-#define INTEL_MSIC_GPIO0LV6CTLO 0x041
-#define INTEL_MSIC_GPIO0LV5CTLO 0x042
-#define INTEL_MSIC_GPIO0LV4CTLO 0x043
-#define INTEL_MSIC_GPIO0LV3CTLO 0x044
-#define INTEL_MSIC_GPIO0LV2CTLO 0x045
-#define INTEL_MSIC_GPIO0LV1CTLO 0x046
-#define INTEL_MSIC_GPIO0LV0CTLO 0x047
-#define INTEL_MSIC_GPIO1LV7CTLOS 0x048
-#define INTEL_MSIC_GPIO1LV6CTLO 0x049
-#define INTEL_MSIC_GPIO1LV5CTLO 0x04a
-#define INTEL_MSIC_GPIO1LV4CTLO 0x04b
-#define INTEL_MSIC_GPIO1LV3CTLO 0x04c
-#define INTEL_MSIC_GPIO1LV2CTLO 0x04d
-#define INTEL_MSIC_GPIO1LV1CTLO 0x04e
-#define INTEL_MSIC_GPIO1LV0CTLO 0x04f
-#define INTEL_MSIC_GPIO0LV7CTLI 0x050
-#define INTEL_MSIC_GPIO0LV6CTLI 0x051
-#define INTEL_MSIC_GPIO0LV5CTLI 0x052
-#define INTEL_MSIC_GPIO0LV4CTLI 0x053
-#define INTEL_MSIC_GPIO0LV3CTLI 0x054
-#define INTEL_MSIC_GPIO0LV2CTLI 0x055
-#define INTEL_MSIC_GPIO0LV1CTLI 0x056
-#define INTEL_MSIC_GPIO0LV0CTLI 0x057
-#define INTEL_MSIC_GPIO1LV7CTLIS 0x058
-#define INTEL_MSIC_GPIO1LV6CTLI 0x059
-#define INTEL_MSIC_GPIO1LV5CTLI 0x05a
-#define INTEL_MSIC_GPIO1LV4CTLI 0x05b
-#define INTEL_MSIC_GPIO1LV3CTLI 0x05c
-#define INTEL_MSIC_GPIO1LV2CTLI 0x05d
-#define INTEL_MSIC_GPIO1LV1CTLI 0x05e
-#define INTEL_MSIC_GPIO1LV0CTLI 0x05f
-#define INTEL_MSIC_PWM0CLKDIV1 0x061
-#define INTEL_MSIC_PWM0CLKDIV0 0x062
-#define INTEL_MSIC_PWM1CLKDIV1 0x063
-#define INTEL_MSIC_PWM1CLKDIV0 0x064
-#define INTEL_MSIC_PWM2CLKDIV1 0x065
-#define INTEL_MSIC_PWM2CLKDIV0 0x066
-#define INTEL_MSIC_PWM0DUTYCYCLE 0x067
-#define INTEL_MSIC_PWM1DUTYCYCLE 0x068
-#define INTEL_MSIC_PWM2DUTYCYCLE 0x069
-#define INTEL_MSIC_GPIO0HV3CTLO 0x06d
-#define INTEL_MSIC_GPIO0HV2CTLO 0x06e
-#define INTEL_MSIC_GPIO0HV1CTLO 0x06f
-#define INTEL_MSIC_GPIO0HV0CTLO 0x070
-#define INTEL_MSIC_GPIO1HV3CTLO 0x071
-#define INTEL_MSIC_GPIO1HV2CTLO 0x072
-#define INTEL_MSIC_GPIO1HV1CTLO 0x073
-#define INTEL_MSIC_GPIO1HV0CTLO 0x074
-#define INTEL_MSIC_GPIO0HV3CTLI 0x075
-#define INTEL_MSIC_GPIO0HV2CTLI 0x076
-#define INTEL_MSIC_GPIO0HV1CTLI 0x077
-#define INTEL_MSIC_GPIO0HV0CTLI 0x078
-#define INTEL_MSIC_GPIO1HV3CTLI 0x079
-#define INTEL_MSIC_GPIO1HV2CTLI 0x07a
-#define INTEL_MSIC_GPIO1HV1CTLI 0x07b
-#define INTEL_MSIC_GPIO1HV0CTLI 0x07c
-
-/* SVID */
-#define INTEL_MSIC_SVIDCTRL0 0x080
-#define INTEL_MSIC_SVIDCTRL1 0x081
-#define INTEL_MSIC_SVIDCTRL2 0x082
-#define INTEL_MSIC_SVIDTXLASTPKT3 0x083 /* RO */
-#define INTEL_MSIC_SVIDTXLASTPKT2 0x084 /* RO */
-#define INTEL_MSIC_SVIDTXLASTPKT1 0x085 /* RO */
-#define INTEL_MSIC_SVIDTXLASTPKT0 0x086 /* RO */
-#define INTEL_MSIC_SVIDPKTOUTBYTE3 0x087
-#define INTEL_MSIC_SVIDPKTOUTBYTE2 0x088
-#define INTEL_MSIC_SVIDPKTOUTBYTE1 0x089
-#define INTEL_MSIC_SVIDPKTOUTBYTE0 0x08a
-#define INTEL_MSIC_SVIDRXVPDEBUG1 0x08b
-#define INTEL_MSIC_SVIDRXVPDEBUG0 0x08c
-#define INTEL_MSIC_SVIDRXLASTPKT3 0x08d /* RO */
-#define INTEL_MSIC_SVIDRXLASTPKT2 0x08e /* RO */
-#define INTEL_MSIC_SVIDRXLASTPKT1 0x08f /* RO */
-#define INTEL_MSIC_SVIDRXLASTPKT0 0x090 /* RO */
-#define INTEL_MSIC_SVIDRXCHKSTATUS3 0x091 /* RO */
-#define INTEL_MSIC_SVIDRXCHKSTATUS2 0x092 /* RO */
-#define INTEL_MSIC_SVIDRXCHKSTATUS1 0x093 /* RO */
-#define INTEL_MSIC_SVIDRXCHKSTATUS0 0x094 /* RO */
-
-/* VREG */
-#define INTEL_MSIC_VCCLATCH 0x0c0
-#define INTEL_MSIC_VNNLATCH 0x0c1
-#define INTEL_MSIC_VCCCNT 0x0c2
-#define INTEL_MSIC_SMPSRAMP 0x0c3
-#define INTEL_MSIC_VNNCNT 0x0c4
-#define INTEL_MSIC_VNNAONCNT 0x0c5
-#define INTEL_MSIC_VCC122AONCNT 0x0c6
-#define INTEL_MSIC_V180AONCNT 0x0c7
-#define INTEL_MSIC_V500CNT 0x0c8
-#define INTEL_MSIC_VIHFCNT 0x0c9
-#define INTEL_MSIC_LDORAMP1 0x0ca
-#define INTEL_MSIC_LDORAMP2 0x0cb
-#define INTEL_MSIC_VCC108AONCNT 0x0cc
-#define INTEL_MSIC_VCC108ASCNT 0x0cd
-#define INTEL_MSIC_VCC108CNT 0x0ce
-#define INTEL_MSIC_VCCA100ASCNT 0x0cf
-#define INTEL_MSIC_VCCA100CNT 0x0d0
-#define INTEL_MSIC_VCC180AONCNT 0x0d1
-#define INTEL_MSIC_VCC180CNT 0x0d2
-#define INTEL_MSIC_VCC330CNT 0x0d3
-#define INTEL_MSIC_VUSB330CNT 0x0d4
-#define INTEL_MSIC_VCCSDIOCNT 0x0d5
-#define INTEL_MSIC_VPROG1CNT 0x0d6
-#define INTEL_MSIC_VPROG2CNT 0x0d7
-#define INTEL_MSIC_VEMMCSCNT 0x0d8
-#define INTEL_MSIC_VEMMC1CNT 0x0d9
-#define INTEL_MSIC_VEMMC2CNT 0x0da
-#define INTEL_MSIC_VAUDACNT 0x0db
-#define INTEL_MSIC_VHSPCNT 0x0dc
-#define INTEL_MSIC_VHSNCNT 0x0dd
-#define INTEL_MSIC_VHDMICNT 0x0de
-#define INTEL_MSIC_VOTGCNT 0x0df
-#define INTEL_MSIC_V1P35CNT 0x0e0
-#define INTEL_MSIC_V330AONCNT 0x0e1
-
-/* RESET */
-#define INTEL_MSIC_CHIPCNTRL 0x100 /* WO */
-#define INTEL_MSIC_ERCONFIG 0x101
-
-/* BURST */
-#define INTEL_MSIC_BATCURRENTLIMIT12 0x102
-#define INTEL_MSIC_BATTIMELIMIT12 0x103
-#define INTEL_MSIC_BATTIMELIMIT3 0x104
-#define INTEL_MSIC_BATTIMEDB 0x105
-#define INTEL_MSIC_BRSTCONFIGOUTPUTS 0x106
-#define INTEL_MSIC_BRSTCONFIGACTIONS 0x107
-#define INTEL_MSIC_BURSTCONTROLSTATUS 0x108
-
-/* RTC */
-#define INTEL_MSIC_RTCB1 0x140 /* RO */
-#define INTEL_MSIC_RTCB2 0x141 /* RO */
-#define INTEL_MSIC_RTCB3 0x142 /* RO */
-#define INTEL_MSIC_RTCB4 0x143 /* RO */
-#define INTEL_MSIC_RTCOB1 0x144
-#define INTEL_MSIC_RTCOB2 0x145
-#define INTEL_MSIC_RTCOB3 0x146
-#define INTEL_MSIC_RTCOB4 0x147
-#define INTEL_MSIC_RTCAB1 0x148
-#define INTEL_MSIC_RTCAB2 0x149
-#define INTEL_MSIC_RTCAB3 0x14a
-#define INTEL_MSIC_RTCAB4 0x14b
-#define INTEL_MSIC_RTCWAB1 0x14c
-#define INTEL_MSIC_RTCWAB2 0x14d
-#define INTEL_MSIC_RTCWAB3 0x14e
-#define INTEL_MSIC_RTCWAB4 0x14f
-#define INTEL_MSIC_RTCSC1 0x150
-#define INTEL_MSIC_RTCSC2 0x151
-#define INTEL_MSIC_RTCSC3 0x152
-#define INTEL_MSIC_RTCSC4 0x153
-#define INTEL_MSIC_RTCSTATUS 0x154 /* RO */
-#define INTEL_MSIC_RTCCONFIG1 0x155
-#define INTEL_MSIC_RTCCONFIG2 0x156
-
-/* CHARGER */
-#define INTEL_MSIC_BDTIMER 0x180
-#define INTEL_MSIC_BATTRMV 0x181
-#define INTEL_MSIC_VBUSDET 0x182
-#define INTEL_MSIC_VBUSDET1 0x183
-#define INTEL_MSIC_ADPHVDET 0x184
-#define INTEL_MSIC_ADPLVDET 0x185
-#define INTEL_MSIC_ADPDETDBDM 0x186
-#define INTEL_MSIC_LOWBATTDET 0x187
-#define INTEL_MSIC_CHRCTRL 0x188
-#define INTEL_MSIC_CHRCVOLTAGE 0x189
-#define INTEL_MSIC_CHRCCURRENT 0x18a
-#define INTEL_MSIC_SPCHARGER 0x18b
-#define INTEL_MSIC_CHRTTIME 0x18c
-#define INTEL_MSIC_CHRCTRL1 0x18d
-#define INTEL_MSIC_PWRSRCLMT 0x18e
-#define INTEL_MSIC_CHRSTWDT 0x18f
-#define INTEL_MSIC_WDTWRITE 0x190 /* WO */
-#define INTEL_MSIC_CHRSAFELMT 0x191
-#define INTEL_MSIC_SPWRSRCINT 0x192 /* RO */
-#define INTEL_MSIC_SPWRSRCINT1 0x193 /* RO */
-#define INTEL_MSIC_CHRLEDPWM 0x194
-#define INTEL_MSIC_CHRLEDCTRL 0x195
-
-/* ADC */
-#define INTEL_MSIC_ADC1CNTL1 0x1c0
-#define INTEL_MSIC_ADC1CNTL2 0x1c1
-#define INTEL_MSIC_ADC1CNTL3 0x1c2
-#define INTEL_MSIC_ADC1OFFSETH 0x1c3 /* RO */
-#define INTEL_MSIC_ADC1OFFSETL 0x1c4 /* RO */
-#define INTEL_MSIC_ADC1ADDR0 0x1c5
-#define INTEL_MSIC_ADC1ADDR1 0x1c6
-#define INTEL_MSIC_ADC1ADDR2 0x1c7
-#define INTEL_MSIC_ADC1ADDR3 0x1c8
-#define INTEL_MSIC_ADC1ADDR4 0x1c9
-#define INTEL_MSIC_ADC1ADDR5 0x1ca
-#define INTEL_MSIC_ADC1ADDR6 0x1cb
-#define INTEL_MSIC_ADC1ADDR7 0x1cc
-#define INTEL_MSIC_ADC1ADDR8 0x1cd
-#define INTEL_MSIC_ADC1ADDR9 0x1ce
-#define INTEL_MSIC_ADC1ADDR10 0x1cf
-#define INTEL_MSIC_ADC1ADDR11 0x1d0
-#define INTEL_MSIC_ADC1ADDR12 0x1d1
-#define INTEL_MSIC_ADC1ADDR13 0x1d2
-#define INTEL_MSIC_ADC1ADDR14 0x1d3
-#define INTEL_MSIC_ADC1SNS0H 0x1d4 /* RO */
-#define INTEL_MSIC_ADC1SNS0L 0x1d5 /* RO */
-#define INTEL_MSIC_ADC1SNS1H 0x1d6 /* RO */
-#define INTEL_MSIC_ADC1SNS1L 0x1d7 /* RO */
-#define INTEL_MSIC_ADC1SNS2H 0x1d8 /* RO */
-#define INTEL_MSIC_ADC1SNS2L 0x1d9 /* RO */
-#define INTEL_MSIC_ADC1SNS3H 0x1da /* RO */
-#define INTEL_MSIC_ADC1SNS3L 0x1db /* RO */
-#define INTEL_MSIC_ADC1SNS4H 0x1dc /* RO */
-#define INTEL_MSIC_ADC1SNS4L 0x1dd /* RO */
-#define INTEL_MSIC_ADC1SNS5H 0x1de /* RO */
-#define INTEL_MSIC_ADC1SNS5L 0x1df /* RO */
-#define INTEL_MSIC_ADC1SNS6H 0x1e0 /* RO */
-#define INTEL_MSIC_ADC1SNS6L 0x1e1 /* RO */
-#define INTEL_MSIC_ADC1SNS7H 0x1e2 /* RO */
-#define INTEL_MSIC_ADC1SNS7L 0x1e3 /* RO */
-#define INTEL_MSIC_ADC1SNS8H 0x1e4 /* RO */
-#define INTEL_MSIC_ADC1SNS8L 0x1e5 /* RO */
-#define INTEL_MSIC_ADC1SNS9H 0x1e6 /* RO */
-#define INTEL_MSIC_ADC1SNS9L 0x1e7 /* RO */
-#define INTEL_MSIC_ADC1SNS10H 0x1e8 /* RO */
-#define INTEL_MSIC_ADC1SNS10L 0x1e9 /* RO */
-#define INTEL_MSIC_ADC1SNS11H 0x1ea /* RO */
-#define INTEL_MSIC_ADC1SNS11L 0x1eb /* RO */
-#define INTEL_MSIC_ADC1SNS12H 0x1ec /* RO */
-#define INTEL_MSIC_ADC1SNS12L 0x1ed /* RO */
-#define INTEL_MSIC_ADC1SNS13H 0x1ee /* RO */
-#define INTEL_MSIC_ADC1SNS13L 0x1ef /* RO */
-#define INTEL_MSIC_ADC1SNS14H 0x1f0 /* RO */
-#define INTEL_MSIC_ADC1SNS14L 0x1f1 /* RO */
-#define INTEL_MSIC_ADC1BV0H 0x1f2 /* RO */
-#define INTEL_MSIC_ADC1BV0L 0x1f3 /* RO */
-#define INTEL_MSIC_ADC1BV1H 0x1f4 /* RO */
-#define INTEL_MSIC_ADC1BV1L 0x1f5 /* RO */
-#define INTEL_MSIC_ADC1BV2H 0x1f6 /* RO */
-#define INTEL_MSIC_ADC1BV2L 0x1f7 /* RO */
-#define INTEL_MSIC_ADC1BV3H 0x1f8 /* RO */
-#define INTEL_MSIC_ADC1BV3L 0x1f9 /* RO */
-#define INTEL_MSIC_ADC1BI0H 0x1fa /* RO */
-#define INTEL_MSIC_ADC1BI0L 0x1fb /* RO */
-#define INTEL_MSIC_ADC1BI1H 0x1fc /* RO */
-#define INTEL_MSIC_ADC1BI1L 0x1fd /* RO */
-#define INTEL_MSIC_ADC1BI2H 0x1fe /* RO */
-#define INTEL_MSIC_ADC1BI2L 0x1ff /* RO */
-#define INTEL_MSIC_ADC1BI3H 0x200 /* RO */
-#define INTEL_MSIC_ADC1BI3L 0x201 /* RO */
-#define INTEL_MSIC_CCCNTL 0x202
-#define INTEL_MSIC_CCOFFSETH 0x203 /* RO */
-#define INTEL_MSIC_CCOFFSETL 0x204 /* RO */
-#define INTEL_MSIC_CCADCHA 0x205 /* RO */
-#define INTEL_MSIC_CCADCLA 0x206 /* RO */
-
-/* AUDIO */
-#define INTEL_MSIC_AUDPLLCTRL 0x240
-#define INTEL_MSIC_DMICBUF0123 0x241
-#define INTEL_MSIC_DMICBUF45 0x242
-#define INTEL_MSIC_DMICGPO 0x244
-#define INTEL_MSIC_DMICMUX 0x245
-#define INTEL_MSIC_DMICCLK 0x246
-#define INTEL_MSIC_MICBIAS 0x247
-#define INTEL_MSIC_ADCCONFIG 0x248
-#define INTEL_MSIC_MICAMP1 0x249
-#define INTEL_MSIC_MICAMP2 0x24a
-#define INTEL_MSIC_NOISEMUX 0x24b
-#define INTEL_MSIC_AUDIOMUX12 0x24c
-#define INTEL_MSIC_AUDIOMUX34 0x24d
-#define INTEL_MSIC_AUDIOSINC 0x24e
-#define INTEL_MSIC_AUDIOTXEN 0x24f
-#define INTEL_MSIC_HSEPRXCTRL 0x250
-#define INTEL_MSIC_IHFRXCTRL 0x251
-#define INTEL_MSIC_VOICETXVOL 0x252
-#define INTEL_MSIC_SIDETONEVOL 0x253
-#define INTEL_MSIC_MUSICSHARVOL 0x254
-#define INTEL_MSIC_VOICETXCTRL 0x255
-#define INTEL_MSIC_HSMIXER 0x256
-#define INTEL_MSIC_DACCONFIG 0x257
-#define INTEL_MSIC_SOFTMUTE 0x258
-#define INTEL_MSIC_HSLVOLCTRL 0x259
-#define INTEL_MSIC_HSRVOLCTRL 0x25a
-#define INTEL_MSIC_IHFLVOLCTRL 0x25b
-#define INTEL_MSIC_IHFRVOLCTRL 0x25c
-#define INTEL_MSIC_DRIVEREN 0x25d
-#define INTEL_MSIC_LINEOUTCTRL 0x25e
-#define INTEL_MSIC_VIB1CTRL1 0x25f
-#define INTEL_MSIC_VIB1CTRL2 0x260
-#define INTEL_MSIC_VIB1CTRL3 0x261
-#define INTEL_MSIC_VIB1SPIPCM_1 0x262
-#define INTEL_MSIC_VIB1SPIPCM_2 0x263
-#define INTEL_MSIC_VIB1CTRL5 0x264
-#define INTEL_MSIC_VIB2CTRL1 0x265
-#define INTEL_MSIC_VIB2CTRL2 0x266
-#define INTEL_MSIC_VIB2CTRL3 0x267
-#define INTEL_MSIC_VIB2SPIPCM_1 0x268
-#define INTEL_MSIC_VIB2SPIPCM_2 0x269
-#define INTEL_MSIC_VIB2CTRL5 0x26a
-#define INTEL_MSIC_BTNCTRL1 0x26b
-#define INTEL_MSIC_BTNCTRL2 0x26c
-#define INTEL_MSIC_PCM1TXSLOT01 0x26d
-#define INTEL_MSIC_PCM1TXSLOT23 0x26e
-#define INTEL_MSIC_PCM1TXSLOT45 0x26f
-#define INTEL_MSIC_PCM1RXSLOT0123 0x270
-#define INTEL_MSIC_PCM1RXSLOT045 0x271
-#define INTEL_MSIC_PCM2TXSLOT01 0x272
-#define INTEL_MSIC_PCM2TXSLOT23 0x273
-#define INTEL_MSIC_PCM2TXSLOT45 0x274
-#define INTEL_MSIC_PCM2RXSLOT01 0x275
-#define INTEL_MSIC_PCM2RXSLOT23 0x276
-#define INTEL_MSIC_PCM2RXSLOT45 0x277
-#define INTEL_MSIC_PCM1CTRL1 0x278
-#define INTEL_MSIC_PCM1CTRL2 0x279
-#define INTEL_MSIC_PCM1CTRL3 0x27a
-#define INTEL_MSIC_PCM2CTRL1 0x27b
-#define INTEL_MSIC_PCM2CTRL2 0x27c
-
-/* HDMI */
-#define INTEL_MSIC_HDMIPUEN 0x280
-#define INTEL_MSIC_HDMISTATUS 0x281 /* RO */
-
-/* Physical address of the start of the MSIC interrupt tree in SRAM */
-#define INTEL_MSIC_IRQ_PHYS_BASE 0xffff7fc0
-
-/**
- * struct intel_msic_gpio_pdata - platform data for the MSIC GPIO driver
- * @gpio_base: base number for the GPIOs
- */
-struct intel_msic_gpio_pdata {
- unsigned gpio_base;
-};
-
-/**
- * struct intel_msic_ocd_pdata - platform data for the MSIC OCD driver
- * @gpio: GPIO number used for OCD interrupts
- *
- * The MSIC MFD driver converts @gpio into an IRQ number and passes it to
- * the OCD driver as %IORESOURCE_IRQ.
- */
-struct intel_msic_ocd_pdata {
- unsigned gpio;
-};
-
-/* MSIC embedded blocks (subdevices) */
-enum intel_msic_block {
- INTEL_MSIC_BLOCK_TOUCH,
- INTEL_MSIC_BLOCK_ADC,
- INTEL_MSIC_BLOCK_BATTERY,
- INTEL_MSIC_BLOCK_GPIO,
- INTEL_MSIC_BLOCK_AUDIO,
- INTEL_MSIC_BLOCK_HDMI,
- INTEL_MSIC_BLOCK_THERMAL,
- INTEL_MSIC_BLOCK_POWER_BTN,
- INTEL_MSIC_BLOCK_OCD,
-
- INTEL_MSIC_BLOCK_LAST,
-};
-
-/**
- * struct intel_msic_platform_data - platform data for the MSIC driver
- * @irq: array of interrupt numbers, one per device. If @irq is set to %0
- * for a given block, the corresponding platform device is not
- * created. For devices which don't have an interrupt, use %0xff
- * (this is same as in SFI spec).
- * @gpio: platform data for the MSIC GPIO driver
- * @ocd: platform data for the MSIC OCD driver
- *
- * Once the MSIC driver is initialized, the register interface is ready to
- * use. All the platform devices for subdevices are created after the
- * register interface is ready so that we can guarantee its availability to
- * the subdevice drivers.
- *
- * Interrupt numbers are passed to the subdevices via %IORESOURCE_IRQ
- * resources of the created platform device.
- */
-struct intel_msic_platform_data {
- int irq[INTEL_MSIC_BLOCK_LAST];
- struct intel_msic_gpio_pdata *gpio;
- struct intel_msic_ocd_pdata *ocd;
-};
-
-struct intel_msic;
-
-extern int intel_msic_reg_read(unsigned short reg, u8 *val);
-extern int intel_msic_reg_write(unsigned short reg, u8 val);
-extern int intel_msic_reg_update(unsigned short reg, u8 val, u8 mask);
-extern int intel_msic_bulk_read(unsigned short *reg, u8 *buf, size_t count);
-extern int intel_msic_bulk_write(unsigned short *reg, u8 *buf, size_t count);
-
-/*
- * pdev_to_intel_msic - gets an MSIC instance from the platform device
- * @pdev: platform device pointer
- *
- * The client drivers need to have pointer to the MSIC instance if they
- * want to call intel_msic_irq_read(). This macro can be used for
- * convenience to get the MSIC pointer from @pdev where needed. This is
- * _only_ valid for devices which are managed by the MSIC.
- */
-#define pdev_to_intel_msic(pdev) (dev_get_drvdata(pdev->dev.parent))
-
-extern int intel_msic_irq_read(struct intel_msic *msic, unsigned short reg,
- u8 *val);
-
-#endif /* __LINUX_MFD_INTEL_MSIC_H__ */
diff --git a/include/linux/mfd/intel_pmc_bxt.h b/include/linux/mfd/intel_pmc_bxt.h
new file mode 100644
index 000000000000..f51a43d25ffd
--- /dev/null
+++ b/include/linux/mfd/intel_pmc_bxt.h
@@ -0,0 +1,53 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef MFD_INTEL_PMC_BXT_H
+#define MFD_INTEL_PMC_BXT_H
+
+/* GCR reg offsets from GCR base */
+#define PMC_GCR_PMC_CFG_REG 0x08
+#define PMC_GCR_TELEM_DEEP_S0IX_REG 0x78
+#define PMC_GCR_TELEM_SHLW_S0IX_REG 0x80
+
+/* PMC_CFG_REG bit masks */
+#define PMC_CFG_NO_REBOOT_EN BIT(4)
+
+/**
+ * struct intel_pmc_dev - Intel PMC device structure
+ * @dev: Pointer to the parent PMC device
+ * @scu: Pointer to the SCU IPC device data structure
+ * @gcr_mem_base: Virtual base address of GCR (Global Configuration Registers)
+ * @gcr_lock: Lock used to serialize access to GCR registers
+ * @telem_base: Pointer to telemetry SSRAM base resource or %NULL if not
+ * available
+ */
+struct intel_pmc_dev {
+ struct device *dev;
+ struct intel_scu_ipc_dev *scu;
+ void __iomem *gcr_mem_base;
+ spinlock_t gcr_lock;
+ struct resource *telem_base;
+};
+
+#if IS_ENABLED(CONFIG_MFD_INTEL_PMC_BXT)
+int intel_pmc_gcr_read64(struct intel_pmc_dev *pmc, u32 offset, u64 *data);
+int intel_pmc_gcr_update(struct intel_pmc_dev *pmc, u32 offset, u32 mask, u32 val);
+int intel_pmc_s0ix_counter_read(struct intel_pmc_dev *pmc, u64 *data);
+#else
+static inline int intel_pmc_gcr_read64(struct intel_pmc_dev *pmc, u32 offset,
+ u64 *data)
+{
+ return -ENOTSUPP;
+}
+
+static inline int intel_pmc_gcr_update(struct intel_pmc_dev *pmc, u32 offset,
+ u32 mask, u32 val)
+{
+ return -ENOTSUPP;
+}
+
+static inline int intel_pmc_s0ix_counter_read(struct intel_pmc_dev *pmc, u64 *data)
+{
+ return -ENOTSUPP;
+}
+#endif
+
+#endif /* MFD_INTEL_PMC_BXT_H */
diff --git a/include/linux/mfd/intel_soc_pmic.h b/include/linux/mfd/intel_soc_pmic.h
index 5aacdb017a9f..9ba2c1a8d836 100644
--- a/include/linux/mfd/intel_soc_pmic.h
+++ b/include/linux/mfd/intel_soc_pmic.h
@@ -1,17 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
- * intel_soc_pmic.h - Intel SoC PMIC Driver
+ * Intel SoC PMIC Driver
*
* Copyright (C) 2012-2014 Intel Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
* Author: Yang, Bin <bin.yang@intel.com>
* Author: Zhu, Lejun <lejun.zhu@linux.intel.com>
*/
@@ -21,16 +13,44 @@
#include <linux/regmap.h>
+enum intel_cht_wc_models {
+ INTEL_CHT_WC_UNKNOWN,
+ INTEL_CHT_WC_GPD_WIN_POCKET,
+ INTEL_CHT_WC_XIAOMI_MIPAD2,
+ INTEL_CHT_WC_LENOVO_YOGABOOK1,
+ INTEL_CHT_WC_LENOVO_YT3_X90,
+};
+
+/**
+ * struct intel_soc_pmic - Intel SoC PMIC data
+ * @irq: Master interrupt number of the parent PMIC device
+ * @regmap: Pointer to the parent PMIC device regmap structure
+ * @irq_chip_data: IRQ chip data for the PMIC itself
+ * @irq_chip_data_pwrbtn: Chained IRQ chip data for the Power Button
+ * @irq_chip_data_tmu: Chained IRQ chip data for the Time Management Unit
+ * @irq_chip_data_bcu: Chained IRQ chip data for the Burst Control Unit
+ * @irq_chip_data_adc: Chained IRQ chip data for the General Purpose ADC
+ * @irq_chip_data_chgr: Chained IRQ chip data for the External Charger
+ * @irq_chip_data_crit: Chained IRQ chip data for the Critical Event Handler
+ * @dev: Pointer to the parent PMIC device
+ * @scu: Pointer to the SCU IPC device data structure
+ */
struct intel_soc_pmic {
int irq;
struct regmap *regmap;
struct regmap_irq_chip_data *irq_chip_data;
+ struct regmap_irq_chip_data *irq_chip_data_pwrbtn;
struct regmap_irq_chip_data *irq_chip_data_tmu;
struct regmap_irq_chip_data *irq_chip_data_bcu;
struct regmap_irq_chip_data *irq_chip_data_adc;
struct regmap_irq_chip_data *irq_chip_data_chgr;
struct regmap_irq_chip_data *irq_chip_data_crit;
struct device *dev;
+ struct intel_scu_ipc_dev *scu;
+ enum intel_cht_wc_models cht_wc_model;
};
+int intel_soc_pmic_exec_mipi_pmic_seq_element(u16 i2c_address, u32 reg_address,
+ u32 value, u32 mask);
+
#endif /* __INTEL_SOC_PMIC_H__ */
diff --git a/include/linux/mfd/intel_soc_pmic_bxtwc.h b/include/linux/mfd/intel_soc_pmic_bxtwc.h
index 0c351bc85d2d..9be566cc58c6 100644
--- a/include/linux/mfd/intel_soc_pmic_bxtwc.h
+++ b/include/linux/mfd/intel_soc_pmic_bxtwc.h
@@ -1,16 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Header file for Intel Broxton Whiskey Cove PMIC
*
* Copyright (C) 2015 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
*/
#ifndef __INTEL_BXTWC_H__
diff --git a/include/linux/mfd/intel_soc_pmic_mrfld.h b/include/linux/mfd/intel_soc_pmic_mrfld.h
new file mode 100644
index 000000000000..4daecd682275
--- /dev/null
+++ b/include/linux/mfd/intel_soc_pmic_mrfld.h
@@ -0,0 +1,81 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Header file for Intel Merrifield Basin Cove PMIC
+ *
+ * Copyright (C) 2019 Intel Corporation. All rights reserved.
+ */
+
+#ifndef __INTEL_SOC_PMIC_MRFLD_H__
+#define __INTEL_SOC_PMIC_MRFLD_H__
+
+#include <linux/bits.h>
+
+#define BCOVE_ID 0x00
+
+#define BCOVE_ID_MINREV0 GENMASK(2, 0)
+#define BCOVE_ID_MAJREV0 GENMASK(5, 3)
+#define BCOVE_ID_VENDID0 GENMASK(7, 6)
+
+#define BCOVE_MINOR(x) (unsigned int)(((x) & BCOVE_ID_MINREV0) >> 0)
+#define BCOVE_MAJOR(x) (unsigned int)(((x) & BCOVE_ID_MAJREV0) >> 3)
+#define BCOVE_VENDOR(x) (unsigned int)(((x) & BCOVE_ID_VENDID0) >> 6)
+
+#define BCOVE_IRQLVL1 0x01
+
+#define BCOVE_PBIRQ 0x02
+#define BCOVE_TMUIRQ 0x03
+#define BCOVE_THRMIRQ 0x04
+#define BCOVE_BCUIRQ 0x05
+#define BCOVE_ADCIRQ 0x06
+#define BCOVE_CHGRIRQ0 0x07
+#define BCOVE_CHGRIRQ1 0x08
+#define BCOVE_GPIOIRQ 0x09
+#define BCOVE_CRITIRQ 0x0B
+
+#define BCOVE_MIRQLVL1 0x0C
+
+#define BCOVE_MPBIRQ 0x0D
+#define BCOVE_MTMUIRQ 0x0E
+#define BCOVE_MTHRMIRQ 0x0F
+#define BCOVE_MBCUIRQ 0x10
+#define BCOVE_MADCIRQ 0x11
+#define BCOVE_MCHGRIRQ0 0x12
+#define BCOVE_MCHGRIRQ1 0x13
+#define BCOVE_MGPIOIRQ 0x14
+#define BCOVE_MCRITIRQ 0x16
+
+#define BCOVE_SCHGRIRQ0 0x4E
+#define BCOVE_SCHGRIRQ1 0x4F
+
+/* Level 1 IRQs */
+#define BCOVE_LVL1_PWRBTN BIT(0) /* power button */
+#define BCOVE_LVL1_TMU BIT(1) /* time management unit */
+#define BCOVE_LVL1_THRM BIT(2) /* thermal */
+#define BCOVE_LVL1_BCU BIT(3) /* burst control unit */
+#define BCOVE_LVL1_ADC BIT(4) /* ADC */
+#define BCOVE_LVL1_CHGR BIT(5) /* charger */
+#define BCOVE_LVL1_GPIO BIT(6) /* GPIO */
+#define BCOVE_LVL1_CRIT BIT(7) /* critical event */
+
+/* Level 2 IRQs: power button */
+#define BCOVE_PBIRQ_PBTN BIT(0)
+#define BCOVE_PBIRQ_UBTN BIT(1)
+
+/* Level 2 IRQs: ADC */
+#define BCOVE_ADCIRQ_BATTEMP BIT(2)
+#define BCOVE_ADCIRQ_SYSTEMP BIT(3)
+#define BCOVE_ADCIRQ_BATTID BIT(4)
+#define BCOVE_ADCIRQ_VIBATT BIT(5)
+#define BCOVE_ADCIRQ_CCTICK BIT(7)
+
+/* Level 2 IRQs: charger */
+#define BCOVE_CHGRIRQ_BAT0ALRT BIT(4)
+#define BCOVE_CHGRIRQ_BAT1ALRT BIT(5)
+#define BCOVE_CHGRIRQ_BATCRIT BIT(6)
+
+#define BCOVE_CHGRIRQ_VBUSDET BIT(0)
+#define BCOVE_CHGRIRQ_DCDET BIT(1)
+#define BCOVE_CHGRIRQ_BATTDET BIT(2)
+#define BCOVE_CHGRIRQ_USBIDDET BIT(3)
+
+#endif /* __INTEL_SOC_PMIC_MRFLD_H__ */
diff --git a/include/linux/mfd/ipaq-micro.h b/include/linux/mfd/ipaq-micro.h
index 5c4d29f6674f..d5caa4c86ecc 100644
--- a/include/linux/mfd/ipaq-micro.h
+++ b/include/linux/mfd/ipaq-micro.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Header file for the compaq Micro MFD
*/
@@ -74,8 +75,8 @@ struct ipaq_micro_rxdev {
* @id: 4-bit ID of the message
* @tx_len: length of TX data
* @tx_data: TX data to send
- * @rx_len: length of receieved RX data
- * @rx_data: RX data to recieve
+ * @rx_len: length of received RX data
+ * @rx_data: RX data to receive
* @ack: a completion that will be completed when RX is complete
* @node: list node if message gets queued
*/
diff --git a/include/linux/mfd/iqs62x.h b/include/linux/mfd/iqs62x.h
new file mode 100644
index 000000000000..ffc86010af74
--- /dev/null
+++ b/include/linux/mfd/iqs62x.h
@@ -0,0 +1,143 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Azoteq IQS620A/621/622/624/625 Multi-Function Sensors
+ *
+ * Copyright (C) 2019 Jeff LaBundy <jeff@labundy.com>
+ */
+
+#ifndef __LINUX_MFD_IQS62X_H
+#define __LINUX_MFD_IQS62X_H
+
+#define IQS620_PROD_NUM 0x41
+#define IQS621_PROD_NUM 0x46
+#define IQS622_PROD_NUM 0x42
+#define IQS624_PROD_NUM 0x43
+#define IQS625_PROD_NUM 0x4E
+
+#define IQS620_HW_NUM_V0 0x82
+#define IQS620_HW_NUM_V1 IQS620_HW_NUM_V0
+#define IQS620_HW_NUM_V2 IQS620_HW_NUM_V0
+#define IQS620_HW_NUM_V3 0x92
+
+#define IQS621_ALS_FLAGS 0x16
+#define IQS622_ALS_FLAGS 0x14
+
+#define IQS624_HALL_UI 0x70
+#define IQS624_HALL_UI_WHL_EVENT BIT(4)
+#define IQS624_HALL_UI_INT_EVENT BIT(3)
+#define IQS624_HALL_UI_AUTO_CAL BIT(2)
+
+#define IQS624_INTERVAL_DIV 0x7D
+
+#define IQS620_GLBL_EVENT_MASK 0xD7
+#define IQS620_GLBL_EVENT_MASK_PMU BIT(6)
+
+#define IQS62X_NUM_KEYS 16
+#define IQS62X_NUM_EVENTS (IQS62X_NUM_KEYS + 6)
+
+#define IQS62X_EVENT_SIZE 10
+
+enum iqs62x_ui_sel {
+ IQS62X_UI_PROX,
+ IQS62X_UI_SAR1,
+};
+
+enum iqs62x_event_reg {
+ IQS62X_EVENT_NONE,
+ IQS62X_EVENT_SYS,
+ IQS62X_EVENT_PROX,
+ IQS62X_EVENT_HYST,
+ IQS62X_EVENT_HALL,
+ IQS62X_EVENT_ALS,
+ IQS62X_EVENT_IR,
+ IQS62X_EVENT_WHEEL,
+ IQS62X_EVENT_INTER,
+ IQS62X_EVENT_UI_LO,
+ IQS62X_EVENT_UI_HI,
+};
+
+enum iqs62x_event_flag {
+ /* keys */
+ IQS62X_EVENT_PROX_CH0_T,
+ IQS62X_EVENT_PROX_CH0_P,
+ IQS62X_EVENT_PROX_CH1_T,
+ IQS62X_EVENT_PROX_CH1_P,
+ IQS62X_EVENT_PROX_CH2_T,
+ IQS62X_EVENT_PROX_CH2_P,
+ IQS62X_EVENT_HYST_POS_T,
+ IQS62X_EVENT_HYST_POS_P,
+ IQS62X_EVENT_HYST_NEG_T,
+ IQS62X_EVENT_HYST_NEG_P,
+ IQS62X_EVENT_SAR1_ACT,
+ IQS62X_EVENT_SAR1_QRD,
+ IQS62X_EVENT_SAR1_MOVE,
+ IQS62X_EVENT_SAR1_HALT,
+ IQS62X_EVENT_WHEEL_UP,
+ IQS62X_EVENT_WHEEL_DN,
+
+ /* switches */
+ IQS62X_EVENT_HALL_N_T,
+ IQS62X_EVENT_HALL_N_P,
+ IQS62X_EVENT_HALL_S_T,
+ IQS62X_EVENT_HALL_S_P,
+
+ /* everything else */
+ IQS62X_EVENT_SYS_RESET,
+ IQS62X_EVENT_SYS_ATI,
+};
+
+struct iqs62x_event_data {
+ u16 ui_data;
+ u8 als_flags;
+ u8 ir_flags;
+ u8 interval;
+};
+
+struct iqs62x_event_desc {
+ enum iqs62x_event_reg reg;
+ u8 mask;
+ u8 val;
+};
+
+struct iqs62x_dev_desc {
+ const char *dev_name;
+ const struct mfd_cell *sub_devs;
+ int num_sub_devs;
+ u8 prod_num;
+ u8 sw_num;
+ const u8 *cal_regs;
+ int num_cal_regs;
+ u8 prox_mask;
+ u8 sar_mask;
+ u8 hall_mask;
+ u8 hyst_mask;
+ u8 temp_mask;
+ u8 als_mask;
+ u8 ir_mask;
+ u8 prox_settings;
+ u8 als_flags;
+ u8 hall_flags;
+ u8 hyst_shift;
+ u8 interval;
+ u8 interval_div;
+ const char *fw_name;
+ const enum iqs62x_event_reg (*event_regs)[IQS62X_EVENT_SIZE];
+};
+
+struct iqs62x_core {
+ const struct iqs62x_dev_desc *dev_desc;
+ struct i2c_client *client;
+ struct regmap *regmap;
+ struct blocking_notifier_head nh;
+ struct list_head fw_blk_head;
+ struct completion ati_done;
+ struct completion fw_done;
+ enum iqs62x_ui_sel ui_sel;
+ unsigned long event_cache;
+ u8 sw_num;
+ u8 hw_num;
+};
+
+extern const struct iqs62x_event_desc iqs62x_events[IQS62X_NUM_EVENTS];
+
+#endif /* __LINUX_MFD_IQS62X_H */
diff --git a/include/linux/mfd/janz.h b/include/linux/mfd/janz.h
index e9994c469803..90dea65fd733 100644
--- a/include/linux/mfd/janz.h
+++ b/include/linux/mfd/janz.h
@@ -1,12 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Common Definitions for Janz MODULbus devices
*
* Copyright (c) 2010 Ira W. Snyder <iws@ovro.caltech.edu>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
*/
#ifndef JANZ_H
diff --git a/include/linux/mfd/kempld.h b/include/linux/mfd/kempld.h
index 26e0b469e567..643c096b93ac 100644
--- a/include/linux/mfd/kempld.h
+++ b/include/linux/mfd/kempld.h
@@ -1,12 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Kontron PLD driver definitions
*
* Copyright (c) 2010-2012 Kontron Europe GmbH
* Author: Michael Brunner <michael.brunner@kontron.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License 2 as published
- * by the Free Software Foundation.
*/
#ifndef _LINUX_MFD_KEMPLD_H_
diff --git a/include/linux/mfd/khadas-mcu.h b/include/linux/mfd/khadas-mcu.h
new file mode 100644
index 000000000000..a99ba2ed0e4e
--- /dev/null
+++ b/include/linux/mfd/khadas-mcu.h
@@ -0,0 +1,91 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Khadas System control Microcontroller Register map
+ *
+ * Copyright (C) 2020 BayLibre SAS
+ *
+ * Author(s): Neil Armstrong <narmstrong@baylibre.com>
+ */
+
+#ifndef MFD_KHADAS_MCU_H
+#define MFD_KHADAS_MCU_H
+
+#define KHADAS_MCU_PASSWD_VEN_0_REG 0x00 /* RO */
+#define KHADAS_MCU_PASSWD_VEN_1_REG 0x01 /* RO */
+#define KHADAS_MCU_PASSWD_VEN_2_REG 0x02 /* RO */
+#define KHADAS_MCU_PASSWD_VEN_3_REG 0x03 /* RO */
+#define KHADAS_MCU_PASSWD_VEN_4_REG 0x04 /* RO */
+#define KHADAS_MCU_PASSWD_VEN_5_REG 0x05 /* RO */
+#define KHADAS_MCU_MAC_0_REG 0x06 /* RO */
+#define KHADAS_MCU_MAC_1_REG 0x07 /* RO */
+#define KHADAS_MCU_MAC_2_REG 0x08 /* RO */
+#define KHADAS_MCU_MAC_3_REG 0x09 /* RO */
+#define KHADAS_MCU_MAC_4_REG 0x0a /* RO */
+#define KHADAS_MCU_MAC_5_REG 0x0b /* RO */
+#define KHADAS_MCU_USID_0_REG 0x0c /* RO */
+#define KHADAS_MCU_USID_1_REG 0x0d /* RO */
+#define KHADAS_MCU_USID_2_REG 0x0e /* RO */
+#define KHADAS_MCU_USID_3_REG 0x0f /* RO */
+#define KHADAS_MCU_USID_4_REG 0x10 /* RO */
+#define KHADAS_MCU_USID_5_REG 0x11 /* RO */
+#define KHADAS_MCU_VERSION_0_REG 0x12 /* RO */
+#define KHADAS_MCU_VERSION_1_REG 0x13 /* RO */
+#define KHADAS_MCU_DEVICE_NO_0_REG 0x14 /* RO */
+#define KHADAS_MCU_DEVICE_NO_1_REG 0x15 /* RO */
+#define KHADAS_MCU_FACTORY_TEST_REG 0x16 /* R */
+#define KHADAS_MCU_BOOT_MODE_REG 0x20 /* RW */
+#define KHADAS_MCU_BOOT_EN_WOL_REG 0x21 /* RW */
+#define KHADAS_MCU_BOOT_EN_RTC_REG 0x22 /* RW */
+#define KHADAS_MCU_BOOT_EN_EXP_REG 0x23 /* RW */
+#define KHADAS_MCU_BOOT_EN_IR_REG 0x24 /* RW */
+#define KHADAS_MCU_BOOT_EN_DCIN_REG 0x25 /* RW */
+#define KHADAS_MCU_BOOT_EN_KEY_REG 0x26 /* RW */
+#define KHADAS_MCU_KEY_MODE_REG 0x27 /* RW */
+#define KHADAS_MCU_LED_MODE_ON_REG 0x28 /* RW */
+#define KHADAS_MCU_LED_MODE_OFF_REG 0x29 /* RW */
+#define KHADAS_MCU_SHUTDOWN_NORMAL_REG 0x2c /* RW */
+#define KHADAS_MCU_MAC_SWITCH_REG 0x2d /* RW */
+#define KHADAS_MCU_MCU_SLEEP_MODE_REG 0x2e /* RW */
+#define KHADAS_MCU_IR_CODE1_0_REG 0x2f /* RW */
+#define KHADAS_MCU_IR_CODE1_1_REG 0x30 /* RW */
+#define KHADAS_MCU_IR_CODE1_2_REG 0x31 /* RW */
+#define KHADAS_MCU_IR_CODE1_3_REG 0x32 /* RW */
+#define KHADAS_MCU_USB_PCIE_SWITCH_REG 0x33 /* RW */
+#define KHADAS_MCU_IR_CODE2_0_REG 0x34 /* RW */
+#define KHADAS_MCU_IR_CODE2_1_REG 0x35 /* RW */
+#define KHADAS_MCU_IR_CODE2_2_REG 0x36 /* RW */
+#define KHADAS_MCU_IR_CODE2_3_REG 0x37 /* RW */
+#define KHADAS_MCU_PASSWD_USER_0_REG 0x40 /* RW */
+#define KHADAS_MCU_PASSWD_USER_1_REG 0x41 /* RW */
+#define KHADAS_MCU_PASSWD_USER_2_REG 0x42 /* RW */
+#define KHADAS_MCU_PASSWD_USER_3_REG 0x43 /* RW */
+#define KHADAS_MCU_PASSWD_USER_4_REG 0x44 /* RW */
+#define KHADAS_MCU_PASSWD_USER_5_REG 0x45 /* RW */
+#define KHADAS_MCU_USER_DATA_0_REG 0x46 /* RW 56 bytes */
+#define KHADAS_MCU_PWR_OFF_CMD_REG 0x80 /* WO */
+#define KHADAS_MCU_PASSWD_START_REG 0x81 /* WO */
+#define KHADAS_MCU_CHECK_VEN_PASSWD_REG 0x82 /* WO */
+#define KHADAS_MCU_CHECK_USER_PASSWD_REG 0x83 /* WO */
+#define KHADAS_MCU_SHUTDOWN_NORMAL_STATUS_REG 0x86 /* RO */
+#define KHADAS_MCU_WOL_INIT_START_REG 0x87 /* WO */
+#define KHADAS_MCU_CMD_FAN_STATUS_CTRL_REG 0x88 /* WO */
+
+enum {
+ KHADAS_BOARD_VIM1 = 0x1,
+ KHADAS_BOARD_VIM2,
+ KHADAS_BOARD_VIM3,
+ KHADAS_BOARD_EDGE = 0x11,
+ KHADAS_BOARD_EDGE_V,
+};
+
+/**
+ * struct khadas_mcu - Khadas MCU structure
+ * @device: device reference used for logs
+ * @regmap: register map
+ */
+struct khadas_mcu {
+ struct device *dev;
+ struct regmap *regmap;
+};
+
+#endif /* MFD_KHADAS_MCU_H */
diff --git a/include/linux/mfd/lm3533.h b/include/linux/mfd/lm3533.h
index 594bc591f256..77092f6363ad 100644
--- a/include/linux/mfd/lm3533.h
+++ b/include/linux/mfd/lm3533.h
@@ -1,14 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* lm3533.h -- LM3533 interface
*
* Copyright (C) 2011-2012 Texas Instruments
*
* Author: Johan Hovold <jhovold@gmail.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
*/
#ifndef __LINUX_MFD_LM3533_H
diff --git a/include/linux/mfd/lochnagar.h b/include/linux/mfd/lochnagar.h
new file mode 100644
index 000000000000..ff9e64cfc9fb
--- /dev/null
+++ b/include/linux/mfd/lochnagar.h
@@ -0,0 +1,55 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Lochnagar internals
+ *
+ * Copyright (c) 2013-2018 Cirrus Logic, Inc. and
+ * Cirrus Logic International Semiconductor Ltd.
+ *
+ * Author: Charles Keepax <ckeepax@opensource.cirrus.com>
+ */
+
+#include <linux/device.h>
+#include <linux/mutex.h>
+#include <linux/regmap.h>
+
+#ifndef CIRRUS_LOCHNAGAR_H
+#define CIRRUS_LOCHNAGAR_H
+
+enum lochnagar_type {
+ LOCHNAGAR1,
+ LOCHNAGAR2,
+};
+
+/**
+ * struct lochnagar - Core data for the Lochnagar audio board driver.
+ *
+ * @type: The type of Lochnagar device connected.
+ * @dev: A pointer to the struct device for the main MFD.
+ * @regmap: The devices main register map.
+ * @analogue_config_lock: Lock used to protect updates in the analogue
+ * configuration as these must not be changed whilst the hardware is processing
+ * the last update.
+ */
+struct lochnagar {
+ enum lochnagar_type type;
+ struct device *dev;
+ struct regmap *regmap;
+
+ /* Lock to protect updates to the analogue configuration */
+ struct mutex analogue_config_lock;
+};
+
+/* Register Addresses */
+#define LOCHNAGAR_SOFTWARE_RESET 0x00
+#define LOCHNAGAR_FIRMWARE_ID1 0x01
+#define LOCHNAGAR_FIRMWARE_ID2 0x02
+
+/* (0x0000) Software Reset */
+#define LOCHNAGAR_DEVICE_ID_MASK 0xFFFC
+#define LOCHNAGAR_DEVICE_ID_SHIFT 2
+#define LOCHNAGAR_REV_ID_MASK 0x0003
+#define LOCHNAGAR_REV_ID_SHIFT 0
+
+int lochnagar_update_config(struct lochnagar *lochnagar);
+
+#endif
diff --git a/include/linux/mfd/lochnagar1_regs.h b/include/linux/mfd/lochnagar1_regs.h
new file mode 100644
index 000000000000..114b846245d9
--- /dev/null
+++ b/include/linux/mfd/lochnagar1_regs.h
@@ -0,0 +1,157 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Lochnagar1 register definitions
+ *
+ * Copyright (c) 2017-2018 Cirrus Logic, Inc. and
+ * Cirrus Logic International Semiconductor Ltd.
+ *
+ * Author: Charles Keepax <ckeepax@opensource.cirrus.com>
+ */
+
+#ifndef LOCHNAGAR1_REGISTERS_H
+#define LOCHNAGAR1_REGISTERS_H
+
+/* Register Addresses */
+#define LOCHNAGAR1_CDC_AIF1_SEL 0x0008
+#define LOCHNAGAR1_CDC_AIF2_SEL 0x0009
+#define LOCHNAGAR1_CDC_AIF3_SEL 0x000A
+#define LOCHNAGAR1_CDC_MCLK1_SEL 0x000B
+#define LOCHNAGAR1_CDC_MCLK2_SEL 0x000C
+#define LOCHNAGAR1_CDC_AIF_CTRL1 0x000D
+#define LOCHNAGAR1_CDC_AIF_CTRL2 0x000E
+#define LOCHNAGAR1_EXT_AIF_CTRL 0x000F
+#define LOCHNAGAR1_DSP_AIF1_SEL 0x0010
+#define LOCHNAGAR1_DSP_AIF2_SEL 0x0011
+#define LOCHNAGAR1_DSP_CLKIN_SEL 0x0012
+#define LOCHNAGAR1_DSP_AIF 0x0013
+#define LOCHNAGAR1_GF_AIF1 0x0014
+#define LOCHNAGAR1_GF_AIF2 0x0015
+#define LOCHNAGAR1_PSIA_AIF 0x0016
+#define LOCHNAGAR1_PSIA1_SEL 0x0017
+#define LOCHNAGAR1_PSIA2_SEL 0x0018
+#define LOCHNAGAR1_SPDIF_AIF_SEL 0x0019
+#define LOCHNAGAR1_GF_AIF3_SEL 0x001C
+#define LOCHNAGAR1_GF_AIF4_SEL 0x001D
+#define LOCHNAGAR1_GF_CLKOUT1_SEL 0x001E
+#define LOCHNAGAR1_GF_AIF1_SEL 0x001F
+#define LOCHNAGAR1_GF_AIF2_SEL 0x0020
+#define LOCHNAGAR1_GF_GPIO2 0x0026
+#define LOCHNAGAR1_GF_GPIO3 0x0027
+#define LOCHNAGAR1_GF_GPIO7 0x0028
+#define LOCHNAGAR1_RST 0x0029
+#define LOCHNAGAR1_LED1 0x002A
+#define LOCHNAGAR1_LED2 0x002B
+#define LOCHNAGAR1_I2C_CTRL 0x0046
+
+/*
+ * (0x0008 - 0x000C, 0x0010 - 0x0012, 0x0017 - 0x0020)
+ * CDC_AIF1_SEL - GF_AIF2_SEL
+ */
+#define LOCHNAGAR1_SRC_MASK 0xFF
+#define LOCHNAGAR1_SRC_SHIFT 0
+
+/* (0x000D) CDC_AIF_CTRL1 */
+#define LOCHNAGAR1_CDC_AIF2_LRCLK_DIR_MASK 0x40
+#define LOCHNAGAR1_CDC_AIF2_LRCLK_DIR_SHIFT 6
+#define LOCHNAGAR1_CDC_AIF2_BCLK_DIR_MASK 0x20
+#define LOCHNAGAR1_CDC_AIF2_BCLK_DIR_SHIFT 5
+#define LOCHNAGAR1_CDC_AIF2_ENA_MASK 0x10
+#define LOCHNAGAR1_CDC_AIF2_ENA_SHIFT 4
+#define LOCHNAGAR1_CDC_AIF1_LRCLK_DIR_MASK 0x04
+#define LOCHNAGAR1_CDC_AIF1_LRCLK_DIR_SHIFT 2
+#define LOCHNAGAR1_CDC_AIF1_BCLK_DIR_MASK 0x02
+#define LOCHNAGAR1_CDC_AIF1_BCLK_DIR_SHIFT 1
+#define LOCHNAGAR1_CDC_AIF1_ENA_MASK 0x01
+#define LOCHNAGAR1_CDC_AIF1_ENA_SHIFT 0
+
+/* (0x000E) CDC_AIF_CTRL2 */
+#define LOCHNAGAR1_CDC_AIF3_LRCLK_DIR_MASK 0x40
+#define LOCHNAGAR1_CDC_AIF3_LRCLK_DIR_SHIFT 6
+#define LOCHNAGAR1_CDC_AIF3_BCLK_DIR_MASK 0x20
+#define LOCHNAGAR1_CDC_AIF3_BCLK_DIR_SHIFT 5
+#define LOCHNAGAR1_CDC_AIF3_ENA_MASK 0x10
+#define LOCHNAGAR1_CDC_AIF3_ENA_SHIFT 4
+#define LOCHNAGAR1_CDC_MCLK1_ENA_MASK 0x02
+#define LOCHNAGAR1_CDC_MCLK1_ENA_SHIFT 1
+#define LOCHNAGAR1_CDC_MCLK2_ENA_MASK 0x01
+#define LOCHNAGAR1_CDC_MCLK2_ENA_SHIFT 0
+
+/* (0x000F) EXT_AIF_CTRL */
+#define LOCHNAGAR1_SPDIF_AIF_LRCLK_DIR_MASK 0x20
+#define LOCHNAGAR1_SPDIF_AIF_LRCLK_DIR_SHIFT 5
+#define LOCHNAGAR1_SPDIF_AIF_BCLK_DIR_MASK 0x10
+#define LOCHNAGAR1_SPDIF_AIF_BCLK_DIR_SHIFT 4
+#define LOCHNAGAR1_SPDIF_AIF_ENA_MASK 0x08
+#define LOCHNAGAR1_SPDIF_AIF_ENA_SHIFT 3
+
+/* (0x0013) DSP_AIF */
+#define LOCHNAGAR1_DSP_AIF2_LRCLK_DIR_MASK 0x40
+#define LOCHNAGAR1_DSP_AIF2_LRCLK_DIR_SHIFT 6
+#define LOCHNAGAR1_DSP_AIF2_BCLK_DIR_MASK 0x20
+#define LOCHNAGAR1_DSP_AIF2_BCLK_DIR_SHIFT 5
+#define LOCHNAGAR1_DSP_AIF2_ENA_MASK 0x10
+#define LOCHNAGAR1_DSP_AIF2_ENA_SHIFT 4
+#define LOCHNAGAR1_DSP_CLKIN_ENA_MASK 0x08
+#define LOCHNAGAR1_DSP_CLKIN_ENA_SHIFT 3
+#define LOCHNAGAR1_DSP_AIF1_LRCLK_DIR_MASK 0x04
+#define LOCHNAGAR1_DSP_AIF1_LRCLK_DIR_SHIFT 2
+#define LOCHNAGAR1_DSP_AIF1_BCLK_DIR_MASK 0x02
+#define LOCHNAGAR1_DSP_AIF1_BCLK_DIR_SHIFT 1
+#define LOCHNAGAR1_DSP_AIF1_ENA_MASK 0x01
+#define LOCHNAGAR1_DSP_AIF1_ENA_SHIFT 0
+
+/* (0x0014) GF_AIF1 */
+#define LOCHNAGAR1_GF_CLKOUT1_ENA_MASK 0x40
+#define LOCHNAGAR1_GF_CLKOUT1_ENA_SHIFT 6
+#define LOCHNAGAR1_GF_AIF3_LRCLK_DIR_MASK 0x20
+#define LOCHNAGAR1_GF_AIF3_LRCLK_DIR_SHIFT 5
+#define LOCHNAGAR1_GF_AIF3_BCLK_DIR_MASK 0x10
+#define LOCHNAGAR1_GF_AIF3_BCLK_DIR_SHIFT 4
+#define LOCHNAGAR1_GF_AIF3_ENA_MASK 0x08
+#define LOCHNAGAR1_GF_AIF3_ENA_SHIFT 3
+#define LOCHNAGAR1_GF_AIF1_LRCLK_DIR_MASK 0x04
+#define LOCHNAGAR1_GF_AIF1_LRCLK_DIR_SHIFT 2
+#define LOCHNAGAR1_GF_AIF1_BCLK_DIR_MASK 0x02
+#define LOCHNAGAR1_GF_AIF1_BCLK_DIR_SHIFT 1
+#define LOCHNAGAR1_GF_AIF1_ENA_MASK 0x01
+#define LOCHNAGAR1_GF_AIF1_ENA_SHIFT 0
+
+/* (0x0015) GF_AIF2 */
+#define LOCHNAGAR1_GF_AIF4_LRCLK_DIR_MASK 0x20
+#define LOCHNAGAR1_GF_AIF4_LRCLK_DIR_SHIFT 5
+#define LOCHNAGAR1_GF_AIF4_BCLK_DIR_MASK 0x10
+#define LOCHNAGAR1_GF_AIF4_BCLK_DIR_SHIFT 4
+#define LOCHNAGAR1_GF_AIF4_ENA_MASK 0x08
+#define LOCHNAGAR1_GF_AIF4_ENA_SHIFT 3
+#define LOCHNAGAR1_GF_AIF2_LRCLK_DIR_MASK 0x04
+#define LOCHNAGAR1_GF_AIF2_LRCLK_DIR_SHIFT 2
+#define LOCHNAGAR1_GF_AIF2_BCLK_DIR_MASK 0x02
+#define LOCHNAGAR1_GF_AIF2_BCLK_DIR_SHIFT 1
+#define LOCHNAGAR1_GF_AIF2_ENA_MASK 0x01
+#define LOCHNAGAR1_GF_AIF2_ENA_SHIFT 0
+
+/* (0x0016) PSIA_AIF */
+#define LOCHNAGAR1_PSIA2_LRCLK_DIR_MASK 0x40
+#define LOCHNAGAR1_PSIA2_LRCLK_DIR_SHIFT 6
+#define LOCHNAGAR1_PSIA2_BCLK_DIR_MASK 0x20
+#define LOCHNAGAR1_PSIA2_BCLK_DIR_SHIFT 5
+#define LOCHNAGAR1_PSIA2_ENA_MASK 0x10
+#define LOCHNAGAR1_PSIA2_ENA_SHIFT 4
+#define LOCHNAGAR1_PSIA1_LRCLK_DIR_MASK 0x04
+#define LOCHNAGAR1_PSIA1_LRCLK_DIR_SHIFT 2
+#define LOCHNAGAR1_PSIA1_BCLK_DIR_MASK 0x02
+#define LOCHNAGAR1_PSIA1_BCLK_DIR_SHIFT 1
+#define LOCHNAGAR1_PSIA1_ENA_MASK 0x01
+#define LOCHNAGAR1_PSIA1_ENA_SHIFT 0
+
+/* (0x0029) RST */
+#define LOCHNAGAR1_DSP_RESET_MASK 0x02
+#define LOCHNAGAR1_DSP_RESET_SHIFT 1
+#define LOCHNAGAR1_CDC_RESET_MASK 0x01
+#define LOCHNAGAR1_CDC_RESET_SHIFT 0
+
+/* (0x0046) I2C_CTRL */
+#define LOCHNAGAR1_CDC_CIF_MODE_MASK 0x01
+#define LOCHNAGAR1_CDC_CIF_MODE_SHIFT 0
+
+#endif
diff --git a/include/linux/mfd/lochnagar2_regs.h b/include/linux/mfd/lochnagar2_regs.h
new file mode 100644
index 000000000000..419b25a332fd
--- /dev/null
+++ b/include/linux/mfd/lochnagar2_regs.h
@@ -0,0 +1,291 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Lochnagar2 register definitions
+ *
+ * Copyright (c) 2017-2018 Cirrus Logic, Inc. and
+ * Cirrus Logic International Semiconductor Ltd.
+ *
+ * Author: Charles Keepax <ckeepax@opensource.cirrus.com>
+ */
+
+#ifndef LOCHNAGAR2_REGISTERS_H
+#define LOCHNAGAR2_REGISTERS_H
+
+/* Register Addresses */
+#define LOCHNAGAR2_CDC_AIF1_CTRL 0x000D
+#define LOCHNAGAR2_CDC_AIF2_CTRL 0x000E
+#define LOCHNAGAR2_CDC_AIF3_CTRL 0x000F
+#define LOCHNAGAR2_DSP_AIF1_CTRL 0x0010
+#define LOCHNAGAR2_DSP_AIF2_CTRL 0x0011
+#define LOCHNAGAR2_PSIA1_CTRL 0x0012
+#define LOCHNAGAR2_PSIA2_CTRL 0x0013
+#define LOCHNAGAR2_GF_AIF3_CTRL 0x0014
+#define LOCHNAGAR2_GF_AIF4_CTRL 0x0015
+#define LOCHNAGAR2_GF_AIF1_CTRL 0x0016
+#define LOCHNAGAR2_GF_AIF2_CTRL 0x0017
+#define LOCHNAGAR2_SPDIF_AIF_CTRL 0x0018
+#define LOCHNAGAR2_USB_AIF1_CTRL 0x0019
+#define LOCHNAGAR2_USB_AIF2_CTRL 0x001A
+#define LOCHNAGAR2_ADAT_AIF_CTRL 0x001B
+#define LOCHNAGAR2_CDC_MCLK1_CTRL 0x001E
+#define LOCHNAGAR2_CDC_MCLK2_CTRL 0x001F
+#define LOCHNAGAR2_DSP_CLKIN_CTRL 0x0020
+#define LOCHNAGAR2_PSIA1_MCLK_CTRL 0x0021
+#define LOCHNAGAR2_PSIA2_MCLK_CTRL 0x0022
+#define LOCHNAGAR2_SPDIF_MCLK_CTRL 0x0023
+#define LOCHNAGAR2_GF_CLKOUT1_CTRL 0x0024
+#define LOCHNAGAR2_GF_CLKOUT2_CTRL 0x0025
+#define LOCHNAGAR2_ADAT_MCLK_CTRL 0x0026
+#define LOCHNAGAR2_SOUNDCARD_MCLK_CTRL 0x0027
+#define LOCHNAGAR2_GPIO_FPGA_GPIO1 0x0031
+#define LOCHNAGAR2_GPIO_FPGA_GPIO2 0x0032
+#define LOCHNAGAR2_GPIO_FPGA_GPIO3 0x0033
+#define LOCHNAGAR2_GPIO_FPGA_GPIO4 0x0034
+#define LOCHNAGAR2_GPIO_FPGA_GPIO5 0x0035
+#define LOCHNAGAR2_GPIO_FPGA_GPIO6 0x0036
+#define LOCHNAGAR2_GPIO_CDC_GPIO1 0x0037
+#define LOCHNAGAR2_GPIO_CDC_GPIO2 0x0038
+#define LOCHNAGAR2_GPIO_CDC_GPIO3 0x0039
+#define LOCHNAGAR2_GPIO_CDC_GPIO4 0x003A
+#define LOCHNAGAR2_GPIO_CDC_GPIO5 0x003B
+#define LOCHNAGAR2_GPIO_CDC_GPIO6 0x003C
+#define LOCHNAGAR2_GPIO_CDC_GPIO7 0x003D
+#define LOCHNAGAR2_GPIO_CDC_GPIO8 0x003E
+#define LOCHNAGAR2_GPIO_DSP_GPIO1 0x003F
+#define LOCHNAGAR2_GPIO_DSP_GPIO2 0x0040
+#define LOCHNAGAR2_GPIO_DSP_GPIO3 0x0041
+#define LOCHNAGAR2_GPIO_DSP_GPIO4 0x0042
+#define LOCHNAGAR2_GPIO_DSP_GPIO5 0x0043
+#define LOCHNAGAR2_GPIO_DSP_GPIO6 0x0044
+#define LOCHNAGAR2_GPIO_GF_GPIO2 0x0045
+#define LOCHNAGAR2_GPIO_GF_GPIO3 0x0046
+#define LOCHNAGAR2_GPIO_GF_GPIO7 0x0047
+#define LOCHNAGAR2_GPIO_CDC_AIF1_BCLK 0x0048
+#define LOCHNAGAR2_GPIO_CDC_AIF1_RXDAT 0x0049
+#define LOCHNAGAR2_GPIO_CDC_AIF1_LRCLK 0x004A
+#define LOCHNAGAR2_GPIO_CDC_AIF1_TXDAT 0x004B
+#define LOCHNAGAR2_GPIO_CDC_AIF2_BCLK 0x004C
+#define LOCHNAGAR2_GPIO_CDC_AIF2_RXDAT 0x004D
+#define LOCHNAGAR2_GPIO_CDC_AIF2_LRCLK 0x004E
+#define LOCHNAGAR2_GPIO_CDC_AIF2_TXDAT 0x004F
+#define LOCHNAGAR2_GPIO_CDC_AIF3_BCLK 0x0050
+#define LOCHNAGAR2_GPIO_CDC_AIF3_RXDAT 0x0051
+#define LOCHNAGAR2_GPIO_CDC_AIF3_LRCLK 0x0052
+#define LOCHNAGAR2_GPIO_CDC_AIF3_TXDAT 0x0053
+#define LOCHNAGAR2_GPIO_DSP_AIF1_BCLK 0x0054
+#define LOCHNAGAR2_GPIO_DSP_AIF1_RXDAT 0x0055
+#define LOCHNAGAR2_GPIO_DSP_AIF1_LRCLK 0x0056
+#define LOCHNAGAR2_GPIO_DSP_AIF1_TXDAT 0x0057
+#define LOCHNAGAR2_GPIO_DSP_AIF2_BCLK 0x0058
+#define LOCHNAGAR2_GPIO_DSP_AIF2_RXDAT 0x0059
+#define LOCHNAGAR2_GPIO_DSP_AIF2_LRCLK 0x005A
+#define LOCHNAGAR2_GPIO_DSP_AIF2_TXDAT 0x005B
+#define LOCHNAGAR2_GPIO_PSIA1_BCLK 0x005C
+#define LOCHNAGAR2_GPIO_PSIA1_RXDAT 0x005D
+#define LOCHNAGAR2_GPIO_PSIA1_LRCLK 0x005E
+#define LOCHNAGAR2_GPIO_PSIA1_TXDAT 0x005F
+#define LOCHNAGAR2_GPIO_PSIA2_BCLK 0x0060
+#define LOCHNAGAR2_GPIO_PSIA2_RXDAT 0x0061
+#define LOCHNAGAR2_GPIO_PSIA2_LRCLK 0x0062
+#define LOCHNAGAR2_GPIO_PSIA2_TXDAT 0x0063
+#define LOCHNAGAR2_GPIO_GF_AIF3_BCLK 0x0064
+#define LOCHNAGAR2_GPIO_GF_AIF3_RXDAT 0x0065
+#define LOCHNAGAR2_GPIO_GF_AIF3_LRCLK 0x0066
+#define LOCHNAGAR2_GPIO_GF_AIF3_TXDAT 0x0067
+#define LOCHNAGAR2_GPIO_GF_AIF4_BCLK 0x0068
+#define LOCHNAGAR2_GPIO_GF_AIF4_RXDAT 0x0069
+#define LOCHNAGAR2_GPIO_GF_AIF4_LRCLK 0x006A
+#define LOCHNAGAR2_GPIO_GF_AIF4_TXDAT 0x006B
+#define LOCHNAGAR2_GPIO_GF_AIF1_BCLK 0x006C
+#define LOCHNAGAR2_GPIO_GF_AIF1_RXDAT 0x006D
+#define LOCHNAGAR2_GPIO_GF_AIF1_LRCLK 0x006E
+#define LOCHNAGAR2_GPIO_GF_AIF1_TXDAT 0x006F
+#define LOCHNAGAR2_GPIO_GF_AIF2_BCLK 0x0070
+#define LOCHNAGAR2_GPIO_GF_AIF2_RXDAT 0x0071
+#define LOCHNAGAR2_GPIO_GF_AIF2_LRCLK 0x0072
+#define LOCHNAGAR2_GPIO_GF_AIF2_TXDAT 0x0073
+#define LOCHNAGAR2_GPIO_DSP_UART1_RX 0x0074
+#define LOCHNAGAR2_GPIO_DSP_UART1_TX 0x0075
+#define LOCHNAGAR2_GPIO_DSP_UART2_RX 0x0076
+#define LOCHNAGAR2_GPIO_DSP_UART2_TX 0x0077
+#define LOCHNAGAR2_GPIO_GF_UART2_RX 0x0078
+#define LOCHNAGAR2_GPIO_GF_UART2_TX 0x0079
+#define LOCHNAGAR2_GPIO_USB_UART_RX 0x007A
+#define LOCHNAGAR2_GPIO_CDC_PDMCLK1 0x007C
+#define LOCHNAGAR2_GPIO_CDC_PDMDAT1 0x007D
+#define LOCHNAGAR2_GPIO_CDC_PDMCLK2 0x007E
+#define LOCHNAGAR2_GPIO_CDC_PDMDAT2 0x007F
+#define LOCHNAGAR2_GPIO_CDC_DMICCLK1 0x0080
+#define LOCHNAGAR2_GPIO_CDC_DMICDAT1 0x0081
+#define LOCHNAGAR2_GPIO_CDC_DMICCLK2 0x0082
+#define LOCHNAGAR2_GPIO_CDC_DMICDAT2 0x0083
+#define LOCHNAGAR2_GPIO_CDC_DMICCLK3 0x0084
+#define LOCHNAGAR2_GPIO_CDC_DMICDAT3 0x0085
+#define LOCHNAGAR2_GPIO_CDC_DMICCLK4 0x0086
+#define LOCHNAGAR2_GPIO_CDC_DMICDAT4 0x0087
+#define LOCHNAGAR2_GPIO_DSP_DMICCLK1 0x0088
+#define LOCHNAGAR2_GPIO_DSP_DMICDAT1 0x0089
+#define LOCHNAGAR2_GPIO_DSP_DMICCLK2 0x008A
+#define LOCHNAGAR2_GPIO_DSP_DMICDAT2 0x008B
+#define LOCHNAGAR2_GPIO_I2C2_SCL 0x008C
+#define LOCHNAGAR2_GPIO_I2C2_SDA 0x008D
+#define LOCHNAGAR2_GPIO_I2C3_SCL 0x008E
+#define LOCHNAGAR2_GPIO_I2C3_SDA 0x008F
+#define LOCHNAGAR2_GPIO_I2C4_SCL 0x0090
+#define LOCHNAGAR2_GPIO_I2C4_SDA 0x0091
+#define LOCHNAGAR2_GPIO_DSP_STANDBY 0x0092
+#define LOCHNAGAR2_GPIO_CDC_MCLK1 0x0093
+#define LOCHNAGAR2_GPIO_CDC_MCLK2 0x0094
+#define LOCHNAGAR2_GPIO_DSP_CLKIN 0x0095
+#define LOCHNAGAR2_GPIO_PSIA1_MCLK 0x0096
+#define LOCHNAGAR2_GPIO_PSIA2_MCLK 0x0097
+#define LOCHNAGAR2_GPIO_GF_GPIO1 0x0098
+#define LOCHNAGAR2_GPIO_GF_GPIO5 0x0099
+#define LOCHNAGAR2_GPIO_DSP_GPIO20 0x009A
+#define LOCHNAGAR2_GPIO_CHANNEL1 0x00B9
+#define LOCHNAGAR2_GPIO_CHANNEL2 0x00BA
+#define LOCHNAGAR2_GPIO_CHANNEL3 0x00BB
+#define LOCHNAGAR2_GPIO_CHANNEL4 0x00BC
+#define LOCHNAGAR2_GPIO_CHANNEL5 0x00BD
+#define LOCHNAGAR2_GPIO_CHANNEL6 0x00BE
+#define LOCHNAGAR2_GPIO_CHANNEL7 0x00BF
+#define LOCHNAGAR2_GPIO_CHANNEL8 0x00C0
+#define LOCHNAGAR2_GPIO_CHANNEL9 0x00C1
+#define LOCHNAGAR2_GPIO_CHANNEL10 0x00C2
+#define LOCHNAGAR2_GPIO_CHANNEL11 0x00C3
+#define LOCHNAGAR2_GPIO_CHANNEL12 0x00C4
+#define LOCHNAGAR2_GPIO_CHANNEL13 0x00C5
+#define LOCHNAGAR2_GPIO_CHANNEL14 0x00C6
+#define LOCHNAGAR2_GPIO_CHANNEL15 0x00C7
+#define LOCHNAGAR2_GPIO_CHANNEL16 0x00C8
+#define LOCHNAGAR2_MINICARD_RESETS 0x00DF
+#define LOCHNAGAR2_ANALOGUE_PATH_CTRL1 0x00E3
+#define LOCHNAGAR2_ANALOGUE_PATH_CTRL2 0x00E4
+#define LOCHNAGAR2_COMMS_CTRL4 0x00F0
+#define LOCHNAGAR2_SPDIF_CTRL 0x00FE
+#define LOCHNAGAR2_IMON_CTRL1 0x0108
+#define LOCHNAGAR2_IMON_CTRL2 0x0109
+#define LOCHNAGAR2_IMON_CTRL3 0x010A
+#define LOCHNAGAR2_IMON_CTRL4 0x010B
+#define LOCHNAGAR2_IMON_DATA1 0x010C
+#define LOCHNAGAR2_IMON_DATA2 0x010D
+#define LOCHNAGAR2_POWER_CTRL 0x0116
+#define LOCHNAGAR2_MICVDD_CTRL1 0x0119
+#define LOCHNAGAR2_MICVDD_CTRL2 0x011B
+#define LOCHNAGAR2_VDDCORE_CDC_CTRL1 0x011E
+#define LOCHNAGAR2_VDDCORE_CDC_CTRL2 0x0120
+#define LOCHNAGAR2_SOUNDCARD_AIF_CTRL 0x0180
+
+/* (0x000D-0x001B, 0x0180) CDC_AIF1_CTRL - SOUNCARD_AIF_CTRL */
+#define LOCHNAGAR2_AIF_ENA_MASK 0x8000
+#define LOCHNAGAR2_AIF_ENA_SHIFT 15
+#define LOCHNAGAR2_AIF_LRCLK_DIR_MASK 0x4000
+#define LOCHNAGAR2_AIF_LRCLK_DIR_SHIFT 14
+#define LOCHNAGAR2_AIF_BCLK_DIR_MASK 0x2000
+#define LOCHNAGAR2_AIF_BCLK_DIR_SHIFT 13
+#define LOCHNAGAR2_AIF_SRC_MASK 0x00FF
+#define LOCHNAGAR2_AIF_SRC_SHIFT 0
+
+/* (0x001E - 0x0027) CDC_MCLK1_CTRL - SOUNDCARD_MCLK_CTRL */
+#define LOCHNAGAR2_CLK_ENA_MASK 0x8000
+#define LOCHNAGAR2_CLK_ENA_SHIFT 15
+#define LOCHNAGAR2_CLK_SRC_MASK 0x00FF
+#define LOCHNAGAR2_CLK_SRC_SHIFT 0
+
+/* (0x0031 - 0x009A) GPIO_FPGA_GPIO1 - GPIO_DSP_GPIO20 */
+#define LOCHNAGAR2_GPIO_SRC_MASK 0x00FF
+#define LOCHNAGAR2_GPIO_SRC_SHIFT 0
+
+/* (0x00B9 - 0x00C8) GPIO_CHANNEL1 - GPIO_CHANNEL16 */
+#define LOCHNAGAR2_GPIO_CHANNEL_STS_MASK 0x8000
+#define LOCHNAGAR2_GPIO_CHANNEL_STS_SHIFT 15
+#define LOCHNAGAR2_GPIO_CHANNEL_SRC_MASK 0x00FF
+#define LOCHNAGAR2_GPIO_CHANNEL_SRC_SHIFT 0
+
+/* (0x00DF) MINICARD_RESETS */
+#define LOCHNAGAR2_DSP_RESET_MASK 0x0002
+#define LOCHNAGAR2_DSP_RESET_SHIFT 1
+#define LOCHNAGAR2_CDC_RESET_MASK 0x0001
+#define LOCHNAGAR2_CDC_RESET_SHIFT 0
+
+/* (0x00E3) ANALOGUE_PATH_CTRL1 */
+#define LOCHNAGAR2_ANALOGUE_PATH_UPDATE_MASK 0x8000
+#define LOCHNAGAR2_ANALOGUE_PATH_UPDATE_SHIFT 15
+#define LOCHNAGAR2_ANALOGUE_PATH_UPDATE_STS_MASK 0x4000
+#define LOCHNAGAR2_ANALOGUE_PATH_UPDATE_STS_SHIFT 14
+
+/* (0x00E4) ANALOGUE_PATH_CTRL2 */
+#define LOCHNAGAR2_P2_INPUT_BIAS_ENA_MASK 0x0080
+#define LOCHNAGAR2_P2_INPUT_BIAS_ENA_SHIFT 7
+#define LOCHNAGAR2_P1_INPUT_BIAS_ENA_MASK 0x0040
+#define LOCHNAGAR2_P1_INPUT_BIAS_ENA_SHIFT 6
+#define LOCHNAGAR2_P2_MICBIAS_SRC_MASK 0x0038
+#define LOCHNAGAR2_P2_MICBIAS_SRC_SHIFT 3
+#define LOCHNAGAR2_P1_MICBIAS_SRC_MASK 0x0007
+#define LOCHNAGAR2_P1_MICBIAS_SRC_SHIFT 0
+
+/* (0x00F0) COMMS_CTRL4 */
+#define LOCHNAGAR2_CDC_CIF1MODE_MASK 0x0001
+#define LOCHNAGAR2_CDC_CIF1MODE_SHIFT 0
+
+/* (0x00FE) SPDIF_CTRL */
+#define LOCHNAGAR2_SPDIF_HWMODE_MASK 0x0008
+#define LOCHNAGAR2_SPDIF_HWMODE_SHIFT 3
+#define LOCHNAGAR2_SPDIF_RESET_MASK 0x0001
+#define LOCHNAGAR2_SPDIF_RESET_SHIFT 0
+
+/* (0x0108) IMON_CTRL1 */
+#define LOCHNAGAR2_IMON_ENA_MASK 0x8000
+#define LOCHNAGAR2_IMON_ENA_SHIFT 15
+#define LOCHNAGAR2_IMON_MEASURED_CHANNELS_MASK 0x03FC
+#define LOCHNAGAR2_IMON_MEASURED_CHANNELS_SHIFT 2
+#define LOCHNAGAR2_IMON_MODE_SEL_MASK 0x0003
+#define LOCHNAGAR2_IMON_MODE_SEL_SHIFT 0
+
+/* (0x0109) IMON_CTRL2 */
+#define LOCHNAGAR2_IMON_FSR_MASK 0x03FF
+#define LOCHNAGAR2_IMON_FSR_SHIFT 0
+
+/* (0x010A) IMON_CTRL3 */
+#define LOCHNAGAR2_IMON_DONE_MASK 0x0004
+#define LOCHNAGAR2_IMON_DONE_SHIFT 2
+#define LOCHNAGAR2_IMON_CONFIGURE_MASK 0x0002
+#define LOCHNAGAR2_IMON_CONFIGURE_SHIFT 1
+#define LOCHNAGAR2_IMON_MEASURE_MASK 0x0001
+#define LOCHNAGAR2_IMON_MEASURE_SHIFT 0
+
+/* (0x010B) IMON_CTRL4 */
+#define LOCHNAGAR2_IMON_DATA_REQ_MASK 0x0080
+#define LOCHNAGAR2_IMON_DATA_REQ_SHIFT 7
+#define LOCHNAGAR2_IMON_CH_SEL_MASK 0x0070
+#define LOCHNAGAR2_IMON_CH_SEL_SHIFT 4
+#define LOCHNAGAR2_IMON_DATA_RDY_MASK 0x0008
+#define LOCHNAGAR2_IMON_DATA_RDY_SHIFT 3
+#define LOCHNAGAR2_IMON_CH_SRC_MASK 0x0007
+#define LOCHNAGAR2_IMON_CH_SRC_SHIFT 0
+
+/* (0x010C, 0x010D) IMON_DATA1, IMON_DATA2 */
+#define LOCHNAGAR2_IMON_DATA_MASK 0xFFFF
+#define LOCHNAGAR2_IMON_DATA_SHIFT 0
+
+/* (0x0116) POWER_CTRL */
+#define LOCHNAGAR2_PWR_ENA_MASK 0x0001
+#define LOCHNAGAR2_PWR_ENA_SHIFT 0
+
+/* (0x0119) MICVDD_CTRL1 */
+#define LOCHNAGAR2_MICVDD_REG_ENA_MASK 0x8000
+#define LOCHNAGAR2_MICVDD_REG_ENA_SHIFT 15
+
+/* (0x011B) MICVDD_CTRL2 */
+#define LOCHNAGAR2_MICVDD_VSEL_MASK 0x001F
+#define LOCHNAGAR2_MICVDD_VSEL_SHIFT 0
+
+/* (0x011E) VDDCORE_CDC_CTRL1 */
+#define LOCHNAGAR2_VDDCORE_CDC_REG_ENA_MASK 0x8000
+#define LOCHNAGAR2_VDDCORE_CDC_REG_ENA_SHIFT 15
+
+/* (0x0120) VDDCORE_CDC_CTRL2 */
+#define LOCHNAGAR2_VDDCORE_CDC_VSEL_MASK 0x007F
+#define LOCHNAGAR2_VDDCORE_CDC_VSEL_SHIFT 0
+
+#endif
diff --git a/include/linux/mfd/lp3943.h b/include/linux/mfd/lp3943.h
index 3490db782988..020a339f96e8 100644
--- a/include/linux/mfd/lp3943.h
+++ b/include/linux/mfd/lp3943.h
@@ -1,14 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* TI/National Semiconductor LP3943 Device
*
* Copyright 2013 Texas Instruments
*
* Author: Milo Kim <milo.kim@ti.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
*/
#ifndef __MFD_LP3943_H__
diff --git a/include/linux/mfd/lp873x.h b/include/linux/mfd/lp873x.h
index edbec8350a49..fe8174cc8637 100644
--- a/include/linux/mfd/lp873x.h
+++ b/include/linux/mfd/lp873x.h
@@ -1,16 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Functions to access LP873X power management chip.
*
- * Copyright (C) 2016 Texas Instruments Incorporated - http://www.ti.com/
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
+ * Copyright (C) 2016 Texas Instruments Incorporated - https://www.ti.com/
*/
#ifndef __LINUX_MFD_LP873X_H
diff --git a/include/linux/mfd/lp87565.h b/include/linux/mfd/lp87565.h
index d0c91ba65525..4c895072d91b 100644
--- a/include/linux/mfd/lp87565.h
+++ b/include/linux/mfd/lp87565.h
@@ -1,11 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Functions to access LP87565 power management chip.
*
- * Copyright (C) 2017 Texas Instruments Incorporated - http://www.ti.com/
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
+ * Copyright (C) 2017 Texas Instruments Incorporated - https://www.ti.com/
*/
#ifndef __LINUX_MFD_LP87565_H
@@ -17,6 +14,8 @@
enum lp87565_device_type {
LP87565_DEVICE_TYPE_UNKNOWN = 0,
+ LP87565_DEVICE_TYPE_LP87524_Q1,
+ LP87565_DEVICE_TYPE_LP87561_Q1,
LP87565_DEVICE_TYPE_LP87565_Q1,
};
@@ -223,33 +222,20 @@ enum lp87565_device_type {
#define LP87565_GPIO2_SEL BIT(1)
#define LP87565_GPIO1_SEL BIT(0)
-#define LP87565_GOIO3_OD BIT(6)
-#define LP87565_GOIO2_OD BIT(5)
-#define LP87565_GOIO1_OD BIT(4)
-#define LP87565_GOIO3_DIR BIT(2)
-#define LP87565_GOIO2_DIR BIT(1)
-#define LP87565_GOIO1_DIR BIT(0)
-
-#define LP87565_GOIO3_IN BIT(2)
-#define LP87565_GOIO2_IN BIT(1)
-#define LP87565_GOIO1_IN BIT(0)
-
-#define LP87565_GOIO3_OUT BIT(2)
-#define LP87565_GOIO2_OUT BIT(1)
-#define LP87565_GOIO1_OUT BIT(0)
-
-/* Number of step-down converters available */
-#define LP87565_NUM_BUCK 6
-
-enum LP87565_regulator_id {
- /* BUCK's */
- LP87565_BUCK_0,
- LP87565_BUCK_1,
- LP87565_BUCK_2,
- LP87565_BUCK_3,
- LP87565_BUCK_10,
- LP87565_BUCK_23,
-};
+#define LP87565_GPIO3_OD BIT(6)
+#define LP87565_GPIO2_OD BIT(5)
+#define LP87565_GPIO1_OD BIT(4)
+#define LP87565_GPIO3_DIR BIT(2)
+#define LP87565_GPIO2_DIR BIT(1)
+#define LP87565_GPIO1_DIR BIT(0)
+
+#define LP87565_GPIO3_IN BIT(2)
+#define LP87565_GPIO2_IN BIT(1)
+#define LP87565_GPIO1_IN BIT(0)
+
+#define LP87565_GPIO3_OUT BIT(2)
+#define LP87565_GPIO2_OUT BIT(1)
+#define LP87565_GPIO1_OUT BIT(0)
/**
* struct LP87565 - state holder for the LP87565 driver
@@ -266,5 +252,6 @@ struct lp87565 {
u8 rev;
u8 dev_type;
struct regmap *regmap;
+ struct gpio_desc *reset_gpio;
};
#endif /* __LINUX_MFD_LP87565_H */
diff --git a/include/linux/mfd/lp8788-isink.h b/include/linux/mfd/lp8788-isink.h
index f38262d21ff1..464dc4c937e4 100644
--- a/include/linux/mfd/lp8788-isink.h
+++ b/include/linux/mfd/lp8788-isink.h
@@ -1,14 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* TI LP8788 MFD - common definitions for current sinks
*
* Copyright 2012 Texas Instruments
*
* Author: Milo(Woogyom) Kim <milo.kim@ti.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
*/
#ifndef __ISINK_LP8788_H__
diff --git a/include/linux/mfd/lp8788.h b/include/linux/mfd/lp8788.h
index 786bf6679a28..3d5c480d58ea 100644
--- a/include/linux/mfd/lp8788.h
+++ b/include/linux/mfd/lp8788.h
@@ -1,14 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* TI LP8788 MFD Device
*
* Copyright 2012 Texas Instruments
*
* Author: Milo(Woogyom) Kim <milo.kim@ti.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
*/
#ifndef __MFD_LP8788_H__
@@ -182,20 +178,6 @@ struct lp8788_buck2_dvs {
};
/*
- * struct lp8788_ldo_enable_pin
- *
- * Basically, all LDOs are enabled through the I2C commands.
- * But ALDO 1 ~ 5, 7, DLDO 7, 9, 11 can be enabled by external gpio pins.
- *
- * @gpio : gpio number which is used for enabling ldos
- * @init_state : initial gpio state (ex. GPIOF_OUT_INIT_LOW)
- */
-struct lp8788_ldo_enable_pin {
- int gpio;
- int init_state;
-};
-
-/*
* struct lp8788_chg_param
* @addr : charging control register address (range : 0x11 ~ 0x1C)
* @val : charging parameter value
@@ -288,7 +270,6 @@ struct lp8788_vib_platform_data {
* @aldo_data : regulator initial data for analog ldo
* @buck1_dvs : gpio configurations for buck1 dvs
* @buck2_dvs : gpio configurations for buck2 dvs
- * @ldo_pin : gpio configurations for enabling LDOs
* @chg_pdata : platform data for charger driver
* @alarm_sel : rtc alarm selection (1 or 2)
* @bl_pdata : configurable data for backlight driver
@@ -306,7 +287,6 @@ struct lp8788_platform_data {
struct regulator_init_data *aldo_data[LP8788_NUM_ALDOS];
struct lp8788_buck1_dvs *buck1_dvs;
struct lp8788_buck2_dvs *buck2_dvs;
- struct lp8788_ldo_enable_pin *ldo_pin[EN_LDOS_MAX];
/* charger */
struct lp8788_charger_platform_data *chg_pdata;
diff --git a/include/linux/mfd/lpc_ich.h b/include/linux/mfd/lpc_ich.h
index fba8fcb54f8c..ea4a4b1b246a 100644
--- a/include/linux/mfd/lpc_ich.h
+++ b/include/linux/mfd/lpc_ich.h
@@ -1,26 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* linux/drivers/mfd/lpc_ich.h
*
* Copyright (c) 2012 Extreme Engineering Solution, Inc.
* Author: Aaron Sierra <asierra@xes-inc.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License 2 as published
- * by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; see the file COPYING. If not, write to
- * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#ifndef LPC_ICH_H
#define LPC_ICH_H
-#include <linux/platform_data/intel-spi.h>
+#include <linux/platform_data/x86/spi-intel.h>
/* GPIO resources */
#define ICH_RES_GPIO 0
diff --git a/include/linux/mfd/madera/core.h b/include/linux/mfd/madera/core.h
new file mode 100644
index 000000000000..03a8a788424a
--- /dev/null
+++ b/include/linux/mfd/madera/core.h
@@ -0,0 +1,210 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * MFD internals for Cirrus Logic Madera codecs
+ *
+ * Copyright (C) 2015-2018 Cirrus Logic
+ */
+
+#ifndef MADERA_CORE_H
+#define MADERA_CORE_H
+
+#include <linux/clk.h>
+#include <linux/gpio/consumer.h>
+#include <linux/interrupt.h>
+#include <linux/mfd/madera/pdata.h>
+#include <linux/mutex.h>
+#include <linux/notifier.h>
+#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
+
+enum madera_type {
+ /* 0 is reserved for indicating failure to identify */
+ CS47L35 = 1,
+ CS47L85 = 2,
+ CS47L90 = 3,
+ CS47L91 = 4,
+ CS47L92 = 5,
+ CS47L93 = 6,
+ WM1840 = 7,
+ CS47L15 = 8,
+ CS42L92 = 9,
+};
+
+enum {
+ MADERA_MCLK1,
+ MADERA_MCLK2,
+ MADERA_MCLK3,
+ MADERA_NUM_MCLK
+};
+
+#define MADERA_MAX_CORE_SUPPLIES 2
+#define MADERA_MAX_GPIOS 40
+
+#define CS47L15_NUM_GPIOS 15
+#define CS47L35_NUM_GPIOS 16
+#define CS47L85_NUM_GPIOS 40
+#define CS47L90_NUM_GPIOS 38
+#define CS47L92_NUM_GPIOS 16
+
+#define MADERA_MAX_MICBIAS 4
+
+#define MADERA_MAX_HP_OUTPUT 3
+
+/* Notifier events */
+#define MADERA_NOTIFY_VOICE_TRIGGER 0x1
+#define MADERA_NOTIFY_HPDET 0x2
+#define MADERA_NOTIFY_MICDET 0x4
+
+/* GPIO Function Definitions */
+#define MADERA_GP_FN_ALTERNATE 0x00
+#define MADERA_GP_FN_GPIO 0x01
+#define MADERA_GP_FN_DSP_GPIO 0x02
+#define MADERA_GP_FN_IRQ1 0x03
+#define MADERA_GP_FN_IRQ2 0x04
+#define MADERA_GP_FN_FLL1_CLOCK 0x10
+#define MADERA_GP_FN_FLL2_CLOCK 0x11
+#define MADERA_GP_FN_FLL3_CLOCK 0x12
+#define MADERA_GP_FN_FLLAO_CLOCK 0x13
+#define MADERA_GP_FN_FLL1_LOCK 0x18
+#define MADERA_GP_FN_FLL2_LOCK 0x19
+#define MADERA_GP_FN_FLL3_LOCK 0x1A
+#define MADERA_GP_FN_FLLAO_LOCK 0x1B
+#define MADERA_GP_FN_OPCLK_OUT 0x40
+#define MADERA_GP_FN_OPCLK_ASYNC_OUT 0x41
+#define MADERA_GP_FN_PWM1 0x48
+#define MADERA_GP_FN_PWM2 0x49
+#define MADERA_GP_FN_SPDIF_OUT 0x4C
+#define MADERA_GP_FN_HEADPHONE_DET 0x50
+#define MADERA_GP_FN_MIC_DET 0x58
+#define MADERA_GP_FN_DRC1_SIGNAL_DETECT 0x80
+#define MADERA_GP_FN_DRC2_SIGNAL_DETECT 0x81
+#define MADERA_GP_FN_ASRC1_IN1_LOCK 0x88
+#define MADERA_GP_FN_ASRC1_IN2_LOCK 0x89
+#define MADERA_GP_FN_ASRC2_IN1_LOCK 0x8A
+#define MADERA_GP_FN_ASRC2_IN2_LOCK 0x8B
+#define MADERA_GP_FN_DSP_IRQ1 0xA0
+#define MADERA_GP_FN_DSP_IRQ2 0xA1
+#define MADERA_GP_FN_DSP_IRQ3 0xA2
+#define MADERA_GP_FN_DSP_IRQ4 0xA3
+#define MADERA_GP_FN_DSP_IRQ5 0xA4
+#define MADERA_GP_FN_DSP_IRQ6 0xA5
+#define MADERA_GP_FN_DSP_IRQ7 0xA6
+#define MADERA_GP_FN_DSP_IRQ8 0xA7
+#define MADERA_GP_FN_DSP_IRQ9 0xA8
+#define MADERA_GP_FN_DSP_IRQ10 0xA9
+#define MADERA_GP_FN_DSP_IRQ11 0xAA
+#define MADERA_GP_FN_DSP_IRQ12 0xAB
+#define MADERA_GP_FN_DSP_IRQ13 0xAC
+#define MADERA_GP_FN_DSP_IRQ14 0xAD
+#define MADERA_GP_FN_DSP_IRQ15 0xAE
+#define MADERA_GP_FN_DSP_IRQ16 0xAF
+#define MADERA_GP_FN_HPOUT1L_SC 0xB0
+#define MADERA_GP_FN_HPOUT1R_SC 0xB1
+#define MADERA_GP_FN_HPOUT2L_SC 0xB2
+#define MADERA_GP_FN_HPOUT2R_SC 0xB3
+#define MADERA_GP_FN_HPOUT3L_SC 0xB4
+#define MADERA_GP_FN_HPOUT4R_SC 0xB5
+#define MADERA_GP_FN_SPKOUTL_SC 0xB6
+#define MADERA_GP_FN_SPKOUTR_SC 0xB7
+#define MADERA_GP_FN_HPOUT1L_ENA 0xC0
+#define MADERA_GP_FN_HPOUT1R_ENA 0xC1
+#define MADERA_GP_FN_HPOUT2L_ENA 0xC2
+#define MADERA_GP_FN_HPOUT2R_ENA 0xC3
+#define MADERA_GP_FN_HPOUT3L_ENA 0xC4
+#define MADERA_GP_FN_HPOUT4R_ENA 0xC5
+#define MADERA_GP_FN_SPKOUTL_ENA 0xC6
+#define MADERA_GP_FN_SPKOUTR_ENA 0xC7
+#define MADERA_GP_FN_HPOUT1L_DIS 0xD0
+#define MADERA_GP_FN_HPOUT1R_DIS 0xD1
+#define MADERA_GP_FN_HPOUT2L_DIS 0xD2
+#define MADERA_GP_FN_HPOUT2R_DIS 0xD3
+#define MADERA_GP_FN_HPOUT3L_DIS 0xD4
+#define MADERA_GP_FN_HPOUT4R_DIS 0xD5
+#define MADERA_GP_FN_SPKOUTL_DIS 0xD6
+#define MADERA_GP_FN_SPKOUTR_DIS 0xD7
+#define MADERA_GP_FN_SPK_SHUTDOWN 0xE0
+#define MADERA_GP_FN_SPK_OVH_SHUTDOWN 0xE1
+#define MADERA_GP_FN_SPK_OVH_WARN 0xE2
+#define MADERA_GP_FN_TIMER1_STATUS 0x140
+#define MADERA_GP_FN_TIMER2_STATUS 0x141
+#define MADERA_GP_FN_TIMER3_STATUS 0x142
+#define MADERA_GP_FN_TIMER4_STATUS 0x143
+#define MADERA_GP_FN_TIMER5_STATUS 0x144
+#define MADERA_GP_FN_TIMER6_STATUS 0x145
+#define MADERA_GP_FN_TIMER7_STATUS 0x146
+#define MADERA_GP_FN_TIMER8_STATUS 0x147
+#define MADERA_GP_FN_EVENTLOG1_FIFO_STS 0x150
+#define MADERA_GP_FN_EVENTLOG2_FIFO_STS 0x151
+#define MADERA_GP_FN_EVENTLOG3_FIFO_STS 0x152
+#define MADERA_GP_FN_EVENTLOG4_FIFO_STS 0x153
+#define MADERA_GP_FN_EVENTLOG5_FIFO_STS 0x154
+#define MADERA_GP_FN_EVENTLOG6_FIFO_STS 0x155
+#define MADERA_GP_FN_EVENTLOG7_FIFO_STS 0x156
+#define MADERA_GP_FN_EVENTLOG8_FIFO_STS 0x157
+
+struct snd_soc_dapm_context;
+
+/*
+ * struct madera - internal data shared by the set of Madera drivers
+ *
+ * This should not be used by anything except child drivers of the Madera MFD
+ *
+ * @regmap: pointer to the regmap instance for 16-bit registers
+ * @regmap_32bit: pointer to the regmap instance for 32-bit registers
+ * @dev: pointer to the MFD device
+ * @type: type of codec
+ * @rev: silicon revision
+ * @type_name: display name of this codec
+ * @num_core_supplies: number of core supply regulators
+ * @core_supplies: list of core supplies that are always required
+ * @dcvdd: pointer to DCVDD regulator
+ * @internal_dcvdd: true if DCVDD is supplied from the internal LDO1
+ * @pdata: our pdata
+ * @irq_dev: the irqchip child driver device
+ * @irq_data: pointer to irqchip data for the child irqchip driver
+ * @irq: host irq number from SPI or I2C configuration
+ * @mclk: Structure holding clock supplies
+ * @out_clamp: indicates output clamp state for each analogue output
+ * @out_shorted: indicates short circuit state for each analogue output
+ * @hp_ena: bitflags of enable state for the headphone outputs
+ * @num_micbias: number of MICBIAS outputs
+ * @num_childbias: number of child biases for each MICBIAS
+ * @dapm: pointer to codec driver DAPM context
+ * @notifier: notifier for signalling events to ASoC machine driver
+ */
+struct madera {
+ struct regmap *regmap;
+ struct regmap *regmap_32bit;
+
+ struct device *dev;
+
+ enum madera_type type;
+ unsigned int rev;
+ const char *type_name;
+
+ int num_core_supplies;
+ struct regulator_bulk_data core_supplies[MADERA_MAX_CORE_SUPPLIES];
+ struct regulator *dcvdd;
+ bool internal_dcvdd;
+ bool reset_errata;
+
+ struct madera_pdata pdata;
+
+ struct device *irq_dev;
+ struct regmap_irq_chip_data *irq_data;
+ int irq;
+
+ struct clk_bulk_data mclk[MADERA_NUM_MCLK];
+
+ unsigned int num_micbias;
+ unsigned int num_childbias[MADERA_MAX_MICBIAS];
+
+ struct snd_soc_dapm_context *dapm;
+ struct mutex dapm_ptr_lock;
+ unsigned int hp_ena;
+ bool out_clamp[MADERA_MAX_HP_OUTPUT];
+ bool out_shorted[MADERA_MAX_HP_OUTPUT];
+
+ struct blocking_notifier_head notifier;
+};
+#endif
diff --git a/include/linux/mfd/madera/pdata.h b/include/linux/mfd/madera/pdata.h
new file mode 100644
index 000000000000..32e3470708ed
--- /dev/null
+++ b/include/linux/mfd/madera/pdata.h
@@ -0,0 +1,58 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Platform data for Cirrus Logic Madera codecs
+ *
+ * Copyright (C) 2015-2018 Cirrus Logic
+ */
+
+#ifndef MADERA_PDATA_H
+#define MADERA_PDATA_H
+
+#include <linux/kernel.h>
+#include <linux/regulator/arizona-ldo1.h>
+#include <linux/regulator/arizona-micsupp.h>
+#include <linux/regulator/machine.h>
+#include <sound/madera-pdata.h>
+
+#define MADERA_MAX_MICBIAS 4
+#define MADERA_MAX_CHILD_MICBIAS 4
+
+#define MADERA_MAX_GPSW 2
+
+struct gpio_desc;
+struct pinctrl_map;
+
+/**
+ * struct madera_pdata - Configuration data for Madera devices
+ *
+ * @reset: GPIO controlling /RESET (NULL = none)
+ * @ldo1: Substruct of pdata for the LDO1 regulator
+ * @micvdd: Substruct of pdata for the MICVDD regulator
+ * @irq_flags: Mode for primary IRQ (defaults to active low)
+ * @gpio_base: Base GPIO number
+ * @gpio_configs: Array of GPIO configurations (See
+ * Documentation/driver-api/pin-control.rst)
+ * @n_gpio_configs: Number of entries in gpio_configs
+ * @gpsw: General purpose switch mode setting. Depends on the external
+ * hardware connected to the switch. (See the SW1_MODE field
+ * in the datasheet for the available values for your codec)
+ * @codec: Substruct of pdata for the ASoC codec driver
+ */
+struct madera_pdata {
+ struct gpio_desc *reset;
+
+ struct arizona_ldo1_pdata ldo1;
+ struct arizona_micsupp_pdata micvdd;
+
+ unsigned int irq_flags;
+ int gpio_base;
+
+ const struct pinctrl_map *gpio_configs;
+ int n_gpio_configs;
+
+ u32 gpsw[MADERA_MAX_GPSW];
+
+ struct madera_codec_pdata codec;
+};
+
+#endif
diff --git a/include/linux/mfd/madera/registers.h b/include/linux/mfd/madera/registers.h
new file mode 100644
index 000000000000..b44aeb461d0c
--- /dev/null
+++ b/include/linux/mfd/madera/registers.h
@@ -0,0 +1,3449 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Madera register definitions
+ *
+ * Copyright (C) 2015-2018 Cirrus Logic
+ */
+
+#ifndef MADERA_REGISTERS_H
+#define MADERA_REGISTERS_H
+
+/*
+ * Register Addresses.
+ */
+#define MADERA_SOFTWARE_RESET 0x00
+#define MADERA_HARDWARE_REVISION 0x01
+#define MADERA_CTRL_IF_CFG_1 0x08
+#define MADERA_CTRL_IF_CFG_2 0x09
+#define MADERA_CTRL_IF_CFG_3 0x0A
+#define MADERA_WRITE_SEQUENCER_CTRL_0 0x16
+#define MADERA_WRITE_SEQUENCER_CTRL_1 0x17
+#define MADERA_WRITE_SEQUENCER_CTRL_2 0x18
+#define MADERA_TONE_GENERATOR_1 0x20
+#define MADERA_TONE_GENERATOR_2 0x21
+#define MADERA_TONE_GENERATOR_3 0x22
+#define MADERA_TONE_GENERATOR_4 0x23
+#define MADERA_TONE_GENERATOR_5 0x24
+#define MADERA_PWM_DRIVE_1 0x30
+#define MADERA_PWM_DRIVE_2 0x31
+#define MADERA_PWM_DRIVE_3 0x32
+#define MADERA_SEQUENCE_CONTROL 0x41
+#define MADERA_SAMPLE_RATE_SEQUENCE_SELECT_1 0x61
+#define MADERA_SAMPLE_RATE_SEQUENCE_SELECT_2 0x62
+#define MADERA_SAMPLE_RATE_SEQUENCE_SELECT_3 0x63
+#define MADERA_SAMPLE_RATE_SEQUENCE_SELECT_4 0x64
+#define MADERA_ALWAYS_ON_TRIGGERS_SEQUENCE_SELECT_1 0x66
+#define MADERA_ALWAYS_ON_TRIGGERS_SEQUENCE_SELECT_2 0x67
+#define MADERA_HAPTICS_CONTROL_1 0x90
+#define MADERA_HAPTICS_CONTROL_2 0x91
+#define MADERA_HAPTICS_PHASE_1_INTENSITY 0x92
+#define MADERA_HAPTICS_PHASE_1_DURATION 0x93
+#define MADERA_HAPTICS_PHASE_2_INTENSITY 0x94
+#define MADERA_HAPTICS_PHASE_2_DURATION 0x95
+#define MADERA_HAPTICS_PHASE_3_INTENSITY 0x96
+#define MADERA_HAPTICS_PHASE_3_DURATION 0x97
+#define MADERA_HAPTICS_STATUS 0x98
+#define MADERA_COMFORT_NOISE_GENERATOR 0xA0
+#define MADERA_CLOCK_32K_1 0x100
+#define MADERA_SYSTEM_CLOCK_1 0x101
+#define MADERA_SAMPLE_RATE_1 0x102
+#define MADERA_SAMPLE_RATE_2 0x103
+#define MADERA_SAMPLE_RATE_3 0x104
+#define MADERA_SAMPLE_RATE_1_STATUS 0x10A
+#define MADERA_SAMPLE_RATE_2_STATUS 0x10B
+#define MADERA_SAMPLE_RATE_3_STATUS 0x10C
+#define MADERA_ASYNC_CLOCK_1 0x112
+#define MADERA_ASYNC_SAMPLE_RATE_1 0x113
+#define MADERA_ASYNC_SAMPLE_RATE_2 0x114
+#define MADERA_ASYNC_SAMPLE_RATE_1_STATUS 0x11B
+#define MADERA_ASYNC_SAMPLE_RATE_2_STATUS 0x11C
+#define MADERA_DSP_CLOCK_1 0x120
+#define MADERA_DSP_CLOCK_2 0x122
+#define MADERA_OUTPUT_SYSTEM_CLOCK 0x149
+#define MADERA_OUTPUT_ASYNC_CLOCK 0x14A
+#define MADERA_RATE_ESTIMATOR_1 0x152
+#define MADERA_RATE_ESTIMATOR_2 0x153
+#define MADERA_RATE_ESTIMATOR_3 0x154
+#define MADERA_RATE_ESTIMATOR_4 0x155
+#define MADERA_RATE_ESTIMATOR_5 0x156
+#define MADERA_FLL1_CONTROL_1 0x171
+#define MADERA_FLL1_CONTROL_2 0x172
+#define MADERA_FLL1_CONTROL_3 0x173
+#define MADERA_FLL1_CONTROL_4 0x174
+#define MADERA_FLL1_CONTROL_5 0x175
+#define MADERA_FLL1_CONTROL_6 0x176
+#define CS47L92_FLL1_CONTROL_7 0x177
+#define CS47L92_FLL1_CONTROL_8 0x178
+#define MADERA_FLL1_CONTROL_7 0x179
+#define CS47L92_FLL1_CONTROL_9 0x179
+#define MADERA_FLL1_EFS_2 0x17A
+#define CS47L92_FLL1_CONTROL_10 0x17A
+#define MADERA_FLL1_CONTROL_11 0x17B
+#define MADERA_FLL1_DIGITAL_TEST_1 0x17D
+#define CS47L35_FLL1_SYNCHRONISER_1 0x17F
+#define CS47L35_FLL1_SYNCHRONISER_2 0x180
+#define CS47L35_FLL1_SYNCHRONISER_3 0x181
+#define CS47L35_FLL1_SYNCHRONISER_4 0x182
+#define CS47L35_FLL1_SYNCHRONISER_5 0x183
+#define CS47L35_FLL1_SYNCHRONISER_6 0x184
+#define CS47L35_FLL1_SYNCHRONISER_7 0x185
+#define CS47L35_FLL1_SPREAD_SPECTRUM 0x187
+#define CS47L35_FLL1_GPIO_CLOCK 0x188
+#define MADERA_FLL1_SYNCHRONISER_1 0x181
+#define MADERA_FLL1_SYNCHRONISER_2 0x182
+#define MADERA_FLL1_SYNCHRONISER_3 0x183
+#define MADERA_FLL1_SYNCHRONISER_4 0x184
+#define MADERA_FLL1_SYNCHRONISER_5 0x185
+#define MADERA_FLL1_SYNCHRONISER_6 0x186
+#define MADERA_FLL1_SYNCHRONISER_7 0x187
+#define MADERA_FLL1_SPREAD_SPECTRUM 0x189
+#define MADERA_FLL1_GPIO_CLOCK 0x18A
+#define CS47L92_FLL1_GPIO_CLOCK 0x18E
+#define MADERA_FLL2_CONTROL_1 0x191
+#define MADERA_FLL2_CONTROL_2 0x192
+#define MADERA_FLL2_CONTROL_3 0x193
+#define MADERA_FLL2_CONTROL_4 0x194
+#define MADERA_FLL2_CONTROL_5 0x195
+#define MADERA_FLL2_CONTROL_6 0x196
+#define CS47L92_FLL2_CONTROL_7 0x197
+#define CS47L92_FLL2_CONTROL_8 0x198
+#define MADERA_FLL2_CONTROL_7 0x199
+#define CS47L92_FLL2_CONTROL_9 0x199
+#define MADERA_FLL2_EFS_2 0x19A
+#define CS47L92_FLL2_CONTROL_10 0x19A
+#define MADERA_FLL2_CONTROL_11 0x19B
+#define MADERA_FLL2_DIGITAL_TEST_1 0x19D
+#define MADERA_FLL2_SYNCHRONISER_1 0x1A1
+#define MADERA_FLL2_SYNCHRONISER_2 0x1A2
+#define MADERA_FLL2_SYNCHRONISER_3 0x1A3
+#define MADERA_FLL2_SYNCHRONISER_4 0x1A4
+#define MADERA_FLL2_SYNCHRONISER_5 0x1A5
+#define MADERA_FLL2_SYNCHRONISER_6 0x1A6
+#define MADERA_FLL2_SYNCHRONISER_7 0x1A7
+#define MADERA_FLL2_SPREAD_SPECTRUM 0x1A9
+#define MADERA_FLL2_GPIO_CLOCK 0x1AA
+#define CS47L92_FLL2_GPIO_CLOCK 0x1AE
+#define MADERA_FLL3_CONTROL_1 0x1B1
+#define MADERA_FLL3_CONTROL_2 0x1B2
+#define MADERA_FLL3_CONTROL_3 0x1B3
+#define MADERA_FLL3_CONTROL_4 0x1B4
+#define MADERA_FLL3_CONTROL_5 0x1B5
+#define MADERA_FLL3_CONTROL_6 0x1B6
+#define MADERA_FLL3_CONTROL_7 0x1B9
+#define MADERA_FLL3_SYNCHRONISER_1 0x1C1
+#define MADERA_FLL3_SYNCHRONISER_2 0x1C2
+#define MADERA_FLL3_SYNCHRONISER_3 0x1C3
+#define MADERA_FLL3_SYNCHRONISER_4 0x1C4
+#define MADERA_FLL3_SYNCHRONISER_5 0x1C5
+#define MADERA_FLL3_SYNCHRONISER_6 0x1C6
+#define MADERA_FLL3_SYNCHRONISER_7 0x1C7
+#define MADERA_FLL3_SPREAD_SPECTRUM 0x1C9
+#define MADERA_FLL3_GPIO_CLOCK 0x1CA
+#define MADERA_FLLAO_CONTROL_1 0x1D1
+#define MADERA_FLLAO_CONTROL_2 0x1D2
+#define MADERA_FLLAO_CONTROL_3 0x1D3
+#define MADERA_FLLAO_CONTROL_4 0x1D4
+#define MADERA_FLLAO_CONTROL_5 0x1D5
+#define MADERA_FLLAO_CONTROL_6 0x1D6
+#define MADERA_FLLAO_CONTROL_7 0x1D8
+#define MADERA_FLLAO_CONTROL_8 0x1DA
+#define MADERA_FLLAO_CONTROL_9 0x1DB
+#define MADERA_FLLAO_CONTROL_10 0x1DC
+#define MADERA_FLLAO_CONTROL_11 0x1DD
+#define MADERA_MIC_CHARGE_PUMP_1 0x200
+#define MADERA_HP_CHARGE_PUMP_8 0x20B
+#define MADERA_LDO1_CONTROL_1 0x210
+#define MADERA_LDO2_CONTROL_1 0x213
+#define MADERA_MIC_BIAS_CTRL_1 0x218
+#define MADERA_MIC_BIAS_CTRL_2 0x219
+#define MADERA_MIC_BIAS_CTRL_3 0x21A
+#define MADERA_MIC_BIAS_CTRL_4 0x21B
+#define MADERA_MIC_BIAS_CTRL_5 0x21C
+#define MADERA_MIC_BIAS_CTRL_6 0x21E
+#define MADERA_HP_CTRL_1L 0x225
+#define MADERA_HP_CTRL_1R 0x226
+#define MADERA_HP_CTRL_2L 0x227
+#define MADERA_HP_CTRL_2R 0x228
+#define MADERA_HP_CTRL_3L 0x229
+#define MADERA_HP_CTRL_3R 0x22A
+#define MADERA_DCS_HP1L_CONTROL 0x232
+#define MADERA_DCS_HP1R_CONTROL 0x238
+#define MADERA_EDRE_HP_STEREO_CONTROL 0x27E
+#define MADERA_ACCESSORY_DETECT_MODE_1 0x293
+#define MADERA_HEADPHONE_DETECT_0 0x299
+#define MADERA_HEADPHONE_DETECT_1 0x29B
+#define MADERA_HEADPHONE_DETECT_2 0x29C
+#define MADERA_HEADPHONE_DETECT_3 0x29D
+#define MADERA_HEADPHONE_DETECT_4 0x29E
+#define MADERA_HEADPHONE_DETECT_5 0x29F
+#define MADERA_MIC_DETECT_1_CONTROL_0 0x2A2
+#define MADERA_MIC_DETECT_1_CONTROL_1 0x2A3
+#define MADERA_MIC_DETECT_1_CONTROL_2 0x2A4
+#define MADERA_MIC_DETECT_1_CONTROL_3 0x2A5
+#define MADERA_MIC_DETECT_1_LEVEL_1 0x2A6
+#define MADERA_MIC_DETECT_1_LEVEL_2 0x2A7
+#define MADERA_MIC_DETECT_1_LEVEL_3 0x2A8
+#define MADERA_MIC_DETECT_1_LEVEL_4 0x2A9
+#define MADERA_MIC_DETECT_1_CONTROL_4 0x2AB
+#define MADERA_MIC_DETECT_2_CONTROL_0 0x2B2
+#define MADERA_MIC_DETECT_2_CONTROL_1 0x2B3
+#define MADERA_MIC_DETECT_2_CONTROL_2 0x2B4
+#define MADERA_MIC_DETECT_2_CONTROL_3 0x2B5
+#define MADERA_MIC_DETECT_2_LEVEL_1 0x2B6
+#define MADERA_MIC_DETECT_2_LEVEL_2 0x2B7
+#define MADERA_MIC_DETECT_2_LEVEL_3 0x2B8
+#define MADERA_MIC_DETECT_2_LEVEL_4 0x2B9
+#define MADERA_MIC_DETECT_2_CONTROL_4 0x2BB
+#define MADERA_MICD_CLAMP_CONTROL 0x2C6
+#define MADERA_GP_SWITCH_1 0x2C8
+#define MADERA_JACK_DETECT_ANALOGUE 0x2D3
+#define MADERA_INPUT_ENABLES 0x300
+#define MADERA_INPUT_ENABLES_STATUS 0x301
+#define MADERA_INPUT_RATE 0x308
+#define MADERA_INPUT_VOLUME_RAMP 0x309
+#define MADERA_HPF_CONTROL 0x30C
+#define MADERA_IN1L_CONTROL 0x310
+#define MADERA_ADC_DIGITAL_VOLUME_1L 0x311
+#define MADERA_DMIC1L_CONTROL 0x312
+#define MADERA_IN1L_RATE_CONTROL 0x313
+#define MADERA_IN1R_CONTROL 0x314
+#define MADERA_ADC_DIGITAL_VOLUME_1R 0x315
+#define MADERA_DMIC1R_CONTROL 0x316
+#define MADERA_IN1R_RATE_CONTROL 0x317
+#define MADERA_IN2L_CONTROL 0x318
+#define MADERA_ADC_DIGITAL_VOLUME_2L 0x319
+#define MADERA_DMIC2L_CONTROL 0x31A
+#define MADERA_IN2L_RATE_CONTROL 0x31B
+#define MADERA_IN2R_CONTROL 0x31C
+#define MADERA_ADC_DIGITAL_VOLUME_2R 0x31D
+#define MADERA_DMIC2R_CONTROL 0x31E
+#define MADERA_IN2R_RATE_CONTROL 0x31F
+#define MADERA_IN3L_CONTROL 0x320
+#define MADERA_ADC_DIGITAL_VOLUME_3L 0x321
+#define MADERA_DMIC3L_CONTROL 0x322
+#define MADERA_IN3L_RATE_CONTROL 0x323
+#define MADERA_IN3R_CONTROL 0x324
+#define MADERA_ADC_DIGITAL_VOLUME_3R 0x325
+#define MADERA_DMIC3R_CONTROL 0x326
+#define MADERA_IN3R_RATE_CONTROL 0x327
+#define MADERA_IN4L_CONTROL 0x328
+#define MADERA_ADC_DIGITAL_VOLUME_4L 0x329
+#define MADERA_DMIC4L_CONTROL 0x32A
+#define MADERA_IN4L_RATE_CONTROL 0x32B
+#define MADERA_IN4R_CONTROL 0x32C
+#define MADERA_ADC_DIGITAL_VOLUME_4R 0x32D
+#define MADERA_DMIC4R_CONTROL 0x32E
+#define MADERA_IN4R_RATE_CONTROL 0x32F
+#define MADERA_IN5L_CONTROL 0x330
+#define MADERA_ADC_DIGITAL_VOLUME_5L 0x331
+#define MADERA_DMIC5L_CONTROL 0x332
+#define MADERA_IN5L_RATE_CONTROL 0x333
+#define MADERA_IN5R_CONTROL 0x334
+#define MADERA_ADC_DIGITAL_VOLUME_5R 0x335
+#define MADERA_DMIC5R_CONTROL 0x336
+#define MADERA_IN5R_RATE_CONTROL 0x337
+#define MADERA_IN6L_CONTROL 0x338
+#define MADERA_ADC_DIGITAL_VOLUME_6L 0x339
+#define MADERA_DMIC6L_CONTROL 0x33A
+#define MADERA_IN6R_CONTROL 0x33C
+#define MADERA_ADC_DIGITAL_VOLUME_6R 0x33D
+#define MADERA_DMIC6R_CONTROL 0x33E
+#define CS47L15_ADC_INT_BIAS 0x3A8
+#define CS47L15_PGA_BIAS_SEL 0x3C4
+#define MADERA_OUTPUT_ENABLES_1 0x400
+#define MADERA_OUTPUT_STATUS_1 0x401
+#define MADERA_RAW_OUTPUT_STATUS_1 0x406
+#define MADERA_OUTPUT_RATE_1 0x408
+#define MADERA_OUTPUT_VOLUME_RAMP 0x409
+#define MADERA_OUTPUT_PATH_CONFIG_1L 0x410
+#define MADERA_DAC_DIGITAL_VOLUME_1L 0x411
+#define MADERA_OUTPUT_PATH_CONFIG_1 0x412
+#define MADERA_NOISE_GATE_SELECT_1L 0x413
+#define MADERA_OUTPUT_PATH_CONFIG_1R 0x414
+#define MADERA_DAC_DIGITAL_VOLUME_1R 0x415
+#define MADERA_NOISE_GATE_SELECT_1R 0x417
+#define MADERA_OUTPUT_PATH_CONFIG_2L 0x418
+#define MADERA_DAC_DIGITAL_VOLUME_2L 0x419
+#define MADERA_OUTPUT_PATH_CONFIG_2 0x41A
+#define MADERA_NOISE_GATE_SELECT_2L 0x41B
+#define MADERA_OUTPUT_PATH_CONFIG_2R 0x41C
+#define MADERA_DAC_DIGITAL_VOLUME_2R 0x41D
+#define MADERA_NOISE_GATE_SELECT_2R 0x41F
+#define MADERA_OUTPUT_PATH_CONFIG_3L 0x420
+#define MADERA_DAC_DIGITAL_VOLUME_3L 0x421
+#define MADERA_OUTPUT_PATH_CONFIG_3 0x422
+#define MADERA_NOISE_GATE_SELECT_3L 0x423
+#define MADERA_OUTPUT_PATH_CONFIG_3R 0x424
+#define MADERA_DAC_DIGITAL_VOLUME_3R 0x425
+#define MADERA_NOISE_GATE_SELECT_3R 0x427
+#define MADERA_OUTPUT_PATH_CONFIG_4L 0x428
+#define MADERA_DAC_DIGITAL_VOLUME_4L 0x429
+#define MADERA_NOISE_GATE_SELECT_4L 0x42B
+#define MADERA_OUTPUT_PATH_CONFIG_4R 0x42C
+#define MADERA_DAC_DIGITAL_VOLUME_4R 0x42D
+#define MADERA_NOISE_GATE_SELECT_4R 0x42F
+#define MADERA_OUTPUT_PATH_CONFIG_5L 0x430
+#define MADERA_DAC_DIGITAL_VOLUME_5L 0x431
+#define MADERA_NOISE_GATE_SELECT_5L 0x433
+#define MADERA_OUTPUT_PATH_CONFIG_5R 0x434
+#define MADERA_DAC_DIGITAL_VOLUME_5R 0x435
+#define MADERA_NOISE_GATE_SELECT_5R 0x437
+#define MADERA_OUTPUT_PATH_CONFIG_6L 0x438
+#define MADERA_DAC_DIGITAL_VOLUME_6L 0x439
+#define MADERA_NOISE_GATE_SELECT_6L 0x43B
+#define MADERA_OUTPUT_PATH_CONFIG_6R 0x43C
+#define MADERA_DAC_DIGITAL_VOLUME_6R 0x43D
+#define MADERA_NOISE_GATE_SELECT_6R 0x43F
+#define MADERA_DAC_AEC_CONTROL_1 0x450
+#define MADERA_DAC_AEC_CONTROL_2 0x451
+#define MADERA_NOISE_GATE_CONTROL 0x458
+#define MADERA_PDM_SPK1_CTRL_1 0x490
+#define MADERA_PDM_SPK1_CTRL_2 0x491
+#define MADERA_PDM_SPK2_CTRL_1 0x492
+#define MADERA_PDM_SPK2_CTRL_2 0x493
+#define MADERA_HP1_SHORT_CIRCUIT_CTRL 0x4A0
+#define MADERA_HP2_SHORT_CIRCUIT_CTRL 0x4A1
+#define MADERA_HP3_SHORT_CIRCUIT_CTRL 0x4A2
+#define MADERA_HP_TEST_CTRL_1 0x4A4
+#define MADERA_HP_TEST_CTRL_5 0x4A8
+#define MADERA_HP_TEST_CTRL_6 0x4A9
+#define MADERA_AIF1_BCLK_CTRL 0x500
+#define MADERA_AIF1_TX_PIN_CTRL 0x501
+#define MADERA_AIF1_RX_PIN_CTRL 0x502
+#define MADERA_AIF1_RATE_CTRL 0x503
+#define MADERA_AIF1_FORMAT 0x504
+#define MADERA_AIF1_RX_BCLK_RATE 0x506
+#define MADERA_AIF1_FRAME_CTRL_1 0x507
+#define MADERA_AIF1_FRAME_CTRL_2 0x508
+#define MADERA_AIF1_FRAME_CTRL_3 0x509
+#define MADERA_AIF1_FRAME_CTRL_4 0x50A
+#define MADERA_AIF1_FRAME_CTRL_5 0x50B
+#define MADERA_AIF1_FRAME_CTRL_6 0x50C
+#define MADERA_AIF1_FRAME_CTRL_7 0x50D
+#define MADERA_AIF1_FRAME_CTRL_8 0x50E
+#define MADERA_AIF1_FRAME_CTRL_9 0x50F
+#define MADERA_AIF1_FRAME_CTRL_10 0x510
+#define MADERA_AIF1_FRAME_CTRL_11 0x511
+#define MADERA_AIF1_FRAME_CTRL_12 0x512
+#define MADERA_AIF1_FRAME_CTRL_13 0x513
+#define MADERA_AIF1_FRAME_CTRL_14 0x514
+#define MADERA_AIF1_FRAME_CTRL_15 0x515
+#define MADERA_AIF1_FRAME_CTRL_16 0x516
+#define MADERA_AIF1_FRAME_CTRL_17 0x517
+#define MADERA_AIF1_FRAME_CTRL_18 0x518
+#define MADERA_AIF1_TX_ENABLES 0x519
+#define MADERA_AIF1_RX_ENABLES 0x51A
+#define MADERA_AIF1_FORCE_WRITE 0x51B
+#define MADERA_AIF2_BCLK_CTRL 0x540
+#define MADERA_AIF2_TX_PIN_CTRL 0x541
+#define MADERA_AIF2_RX_PIN_CTRL 0x542
+#define MADERA_AIF2_RATE_CTRL 0x543
+#define MADERA_AIF2_FORMAT 0x544
+#define MADERA_AIF2_RX_BCLK_RATE 0x546
+#define MADERA_AIF2_FRAME_CTRL_1 0x547
+#define MADERA_AIF2_FRAME_CTRL_2 0x548
+#define MADERA_AIF2_FRAME_CTRL_3 0x549
+#define MADERA_AIF2_FRAME_CTRL_4 0x54A
+#define MADERA_AIF2_FRAME_CTRL_5 0x54B
+#define MADERA_AIF2_FRAME_CTRL_6 0x54C
+#define MADERA_AIF2_FRAME_CTRL_7 0x54D
+#define MADERA_AIF2_FRAME_CTRL_8 0x54E
+#define MADERA_AIF2_FRAME_CTRL_9 0x54F
+#define MADERA_AIF2_FRAME_CTRL_10 0x550
+#define MADERA_AIF2_FRAME_CTRL_11 0x551
+#define MADERA_AIF2_FRAME_CTRL_12 0x552
+#define MADERA_AIF2_FRAME_CTRL_13 0x553
+#define MADERA_AIF2_FRAME_CTRL_14 0x554
+#define MADERA_AIF2_FRAME_CTRL_15 0x555
+#define MADERA_AIF2_FRAME_CTRL_16 0x556
+#define MADERA_AIF2_FRAME_CTRL_17 0x557
+#define MADERA_AIF2_FRAME_CTRL_18 0x558
+#define MADERA_AIF2_TX_ENABLES 0x559
+#define MADERA_AIF2_RX_ENABLES 0x55A
+#define MADERA_AIF2_FORCE_WRITE 0x55B
+#define MADERA_AIF3_BCLK_CTRL 0x580
+#define MADERA_AIF3_TX_PIN_CTRL 0x581
+#define MADERA_AIF3_RX_PIN_CTRL 0x582
+#define MADERA_AIF3_RATE_CTRL 0x583
+#define MADERA_AIF3_FORMAT 0x584
+#define MADERA_AIF3_RX_BCLK_RATE 0x586
+#define MADERA_AIF3_FRAME_CTRL_1 0x587
+#define MADERA_AIF3_FRAME_CTRL_2 0x588
+#define MADERA_AIF3_FRAME_CTRL_3 0x589
+#define MADERA_AIF3_FRAME_CTRL_4 0x58A
+#define MADERA_AIF3_FRAME_CTRL_5 0x58B
+#define MADERA_AIF3_FRAME_CTRL_6 0x58C
+#define MADERA_AIF3_FRAME_CTRL_7 0x58D
+#define MADERA_AIF3_FRAME_CTRL_8 0x58E
+#define MADERA_AIF3_FRAME_CTRL_9 0x58F
+#define MADERA_AIF3_FRAME_CTRL_10 0x590
+#define MADERA_AIF3_FRAME_CTRL_11 0x591
+#define MADERA_AIF3_FRAME_CTRL_12 0x592
+#define MADERA_AIF3_FRAME_CTRL_13 0x593
+#define MADERA_AIF3_FRAME_CTRL_14 0x594
+#define MADERA_AIF3_FRAME_CTRL_15 0x595
+#define MADERA_AIF3_FRAME_CTRL_16 0x596
+#define MADERA_AIF3_FRAME_CTRL_17 0x597
+#define MADERA_AIF3_FRAME_CTRL_18 0x598
+#define MADERA_AIF3_TX_ENABLES 0x599
+#define MADERA_AIF3_RX_ENABLES 0x59A
+#define MADERA_AIF3_FORCE_WRITE 0x59B
+#define MADERA_AIF4_BCLK_CTRL 0x5A0
+#define MADERA_AIF4_TX_PIN_CTRL 0x5A1
+#define MADERA_AIF4_RX_PIN_CTRL 0x5A2
+#define MADERA_AIF4_RATE_CTRL 0x5A3
+#define MADERA_AIF4_FORMAT 0x5A4
+#define MADERA_AIF4_RX_BCLK_RATE 0x5A6
+#define MADERA_AIF4_FRAME_CTRL_1 0x5A7
+#define MADERA_AIF4_FRAME_CTRL_2 0x5A8
+#define MADERA_AIF4_FRAME_CTRL_3 0x5A9
+#define MADERA_AIF4_FRAME_CTRL_4 0x5AA
+#define MADERA_AIF4_FRAME_CTRL_11 0x5B1
+#define MADERA_AIF4_FRAME_CTRL_12 0x5B2
+#define MADERA_AIF4_TX_ENABLES 0x5B9
+#define MADERA_AIF4_RX_ENABLES 0x5BA
+#define MADERA_AIF4_FORCE_WRITE 0x5BB
+#define MADERA_SPD1_TX_CONTROL 0x5C2
+#define MADERA_SPD1_TX_CHANNEL_STATUS_1 0x5C3
+#define MADERA_SPD1_TX_CHANNEL_STATUS_2 0x5C4
+#define MADERA_SPD1_TX_CHANNEL_STATUS_3 0x5C5
+#define MADERA_SLIMBUS_FRAMER_REF_GEAR 0x5E3
+#define MADERA_SLIMBUS_RATES_1 0x5E5
+#define MADERA_SLIMBUS_RATES_2 0x5E6
+#define MADERA_SLIMBUS_RATES_3 0x5E7
+#define MADERA_SLIMBUS_RATES_4 0x5E8
+#define MADERA_SLIMBUS_RATES_5 0x5E9
+#define MADERA_SLIMBUS_RATES_6 0x5EA
+#define MADERA_SLIMBUS_RATES_7 0x5EB
+#define MADERA_SLIMBUS_RATES_8 0x5EC
+#define MADERA_SLIMBUS_RX_CHANNEL_ENABLE 0x5F5
+#define MADERA_SLIMBUS_TX_CHANNEL_ENABLE 0x5F6
+#define MADERA_SLIMBUS_RX_PORT_STATUS 0x5F7
+#define MADERA_SLIMBUS_TX_PORT_STATUS 0x5F8
+#define MADERA_PWM1MIX_INPUT_1_SOURCE 0x640
+#define MADERA_PWM1MIX_INPUT_1_VOLUME 0x641
+#define MADERA_PWM1MIX_INPUT_2_SOURCE 0x642
+#define MADERA_PWM1MIX_INPUT_2_VOLUME 0x643
+#define MADERA_PWM1MIX_INPUT_3_SOURCE 0x644
+#define MADERA_PWM1MIX_INPUT_3_VOLUME 0x645
+#define MADERA_PWM1MIX_INPUT_4_SOURCE 0x646
+#define MADERA_PWM1MIX_INPUT_4_VOLUME 0x647
+#define MADERA_PWM2MIX_INPUT_1_SOURCE 0x648
+#define MADERA_PWM2MIX_INPUT_1_VOLUME 0x649
+#define MADERA_PWM2MIX_INPUT_2_SOURCE 0x64A
+#define MADERA_PWM2MIX_INPUT_2_VOLUME 0x64B
+#define MADERA_PWM2MIX_INPUT_3_SOURCE 0x64C
+#define MADERA_PWM2MIX_INPUT_3_VOLUME 0x64D
+#define MADERA_PWM2MIX_INPUT_4_SOURCE 0x64E
+#define MADERA_PWM2MIX_INPUT_4_VOLUME 0x64F
+#define MADERA_OUT1LMIX_INPUT_1_SOURCE 0x680
+#define MADERA_OUT1LMIX_INPUT_1_VOLUME 0x681
+#define MADERA_OUT1LMIX_INPUT_2_SOURCE 0x682
+#define MADERA_OUT1LMIX_INPUT_2_VOLUME 0x683
+#define MADERA_OUT1LMIX_INPUT_3_SOURCE 0x684
+#define MADERA_OUT1LMIX_INPUT_3_VOLUME 0x685
+#define MADERA_OUT1LMIX_INPUT_4_SOURCE 0x686
+#define MADERA_OUT1LMIX_INPUT_4_VOLUME 0x687
+#define MADERA_OUT1RMIX_INPUT_1_SOURCE 0x688
+#define MADERA_OUT1RMIX_INPUT_1_VOLUME 0x689
+#define MADERA_OUT1RMIX_INPUT_2_SOURCE 0x68A
+#define MADERA_OUT1RMIX_INPUT_2_VOLUME 0x68B
+#define MADERA_OUT1RMIX_INPUT_3_SOURCE 0x68C
+#define MADERA_OUT1RMIX_INPUT_3_VOLUME 0x68D
+#define MADERA_OUT1RMIX_INPUT_4_SOURCE 0x68E
+#define MADERA_OUT1RMIX_INPUT_4_VOLUME 0x68F
+#define MADERA_OUT2LMIX_INPUT_1_SOURCE 0x690
+#define MADERA_OUT2LMIX_INPUT_1_VOLUME 0x691
+#define MADERA_OUT2LMIX_INPUT_2_SOURCE 0x692
+#define MADERA_OUT2LMIX_INPUT_2_VOLUME 0x693
+#define MADERA_OUT2LMIX_INPUT_3_SOURCE 0x694
+#define MADERA_OUT2LMIX_INPUT_3_VOLUME 0x695
+#define MADERA_OUT2LMIX_INPUT_4_SOURCE 0x696
+#define MADERA_OUT2LMIX_INPUT_4_VOLUME 0x697
+#define MADERA_OUT2RMIX_INPUT_1_SOURCE 0x698
+#define MADERA_OUT2RMIX_INPUT_1_VOLUME 0x699
+#define MADERA_OUT2RMIX_INPUT_2_SOURCE 0x69A
+#define MADERA_OUT2RMIX_INPUT_2_VOLUME 0x69B
+#define MADERA_OUT2RMIX_INPUT_3_SOURCE 0x69C
+#define MADERA_OUT2RMIX_INPUT_3_VOLUME 0x69D
+#define MADERA_OUT2RMIX_INPUT_4_SOURCE 0x69E
+#define MADERA_OUT2RMIX_INPUT_4_VOLUME 0x69F
+#define MADERA_OUT3LMIX_INPUT_1_SOURCE 0x6A0
+#define MADERA_OUT3LMIX_INPUT_1_VOLUME 0x6A1
+#define MADERA_OUT3LMIX_INPUT_2_SOURCE 0x6A2
+#define MADERA_OUT3LMIX_INPUT_2_VOLUME 0x6A3
+#define MADERA_OUT3LMIX_INPUT_3_SOURCE 0x6A4
+#define MADERA_OUT3LMIX_INPUT_3_VOLUME 0x6A5
+#define MADERA_OUT3LMIX_INPUT_4_SOURCE 0x6A6
+#define MADERA_OUT3LMIX_INPUT_4_VOLUME 0x6A7
+#define MADERA_OUT3RMIX_INPUT_1_SOURCE 0x6A8
+#define MADERA_OUT3RMIX_INPUT_1_VOLUME 0x6A9
+#define MADERA_OUT3RMIX_INPUT_2_SOURCE 0x6AA
+#define MADERA_OUT3RMIX_INPUT_2_VOLUME 0x6AB
+#define MADERA_OUT3RMIX_INPUT_3_SOURCE 0x6AC
+#define MADERA_OUT3RMIX_INPUT_3_VOLUME 0x6AD
+#define MADERA_OUT3RMIX_INPUT_4_SOURCE 0x6AE
+#define MADERA_OUT3RMIX_INPUT_4_VOLUME 0x6AF
+#define MADERA_OUT4LMIX_INPUT_1_SOURCE 0x6B0
+#define MADERA_OUT4LMIX_INPUT_1_VOLUME 0x6B1
+#define MADERA_OUT4LMIX_INPUT_2_SOURCE 0x6B2
+#define MADERA_OUT4LMIX_INPUT_2_VOLUME 0x6B3
+#define MADERA_OUT4LMIX_INPUT_3_SOURCE 0x6B4
+#define MADERA_OUT4LMIX_INPUT_3_VOLUME 0x6B5
+#define MADERA_OUT4LMIX_INPUT_4_SOURCE 0x6B6
+#define MADERA_OUT4LMIX_INPUT_4_VOLUME 0x6B7
+#define MADERA_OUT4RMIX_INPUT_1_SOURCE 0x6B8
+#define MADERA_OUT4RMIX_INPUT_1_VOLUME 0x6B9
+#define MADERA_OUT4RMIX_INPUT_2_SOURCE 0x6BA
+#define MADERA_OUT4RMIX_INPUT_2_VOLUME 0x6BB
+#define MADERA_OUT4RMIX_INPUT_3_SOURCE 0x6BC
+#define MADERA_OUT4RMIX_INPUT_3_VOLUME 0x6BD
+#define MADERA_OUT4RMIX_INPUT_4_SOURCE 0x6BE
+#define MADERA_OUT4RMIX_INPUT_4_VOLUME 0x6BF
+#define MADERA_OUT5LMIX_INPUT_1_SOURCE 0x6C0
+#define MADERA_OUT5LMIX_INPUT_1_VOLUME 0x6C1
+#define MADERA_OUT5LMIX_INPUT_2_SOURCE 0x6C2
+#define MADERA_OUT5LMIX_INPUT_2_VOLUME 0x6C3
+#define MADERA_OUT5LMIX_INPUT_3_SOURCE 0x6C4
+#define MADERA_OUT5LMIX_INPUT_3_VOLUME 0x6C5
+#define MADERA_OUT5LMIX_INPUT_4_SOURCE 0x6C6
+#define MADERA_OUT5LMIX_INPUT_4_VOLUME 0x6C7
+#define MADERA_OUT5RMIX_INPUT_1_SOURCE 0x6C8
+#define MADERA_OUT5RMIX_INPUT_1_VOLUME 0x6C9
+#define MADERA_OUT5RMIX_INPUT_2_SOURCE 0x6CA
+#define MADERA_OUT5RMIX_INPUT_2_VOLUME 0x6CB
+#define MADERA_OUT5RMIX_INPUT_3_SOURCE 0x6CC
+#define MADERA_OUT5RMIX_INPUT_3_VOLUME 0x6CD
+#define MADERA_OUT5RMIX_INPUT_4_SOURCE 0x6CE
+#define MADERA_OUT5RMIX_INPUT_4_VOLUME 0x6CF
+#define MADERA_OUT6LMIX_INPUT_1_SOURCE 0x6D0
+#define MADERA_OUT6LMIX_INPUT_1_VOLUME 0x6D1
+#define MADERA_OUT6LMIX_INPUT_2_SOURCE 0x6D2
+#define MADERA_OUT6LMIX_INPUT_2_VOLUME 0x6D3
+#define MADERA_OUT6LMIX_INPUT_3_SOURCE 0x6D4
+#define MADERA_OUT6LMIX_INPUT_3_VOLUME 0x6D5
+#define MADERA_OUT6LMIX_INPUT_4_SOURCE 0x6D6
+#define MADERA_OUT6LMIX_INPUT_4_VOLUME 0x6D7
+#define MADERA_OUT6RMIX_INPUT_1_SOURCE 0x6D8
+#define MADERA_OUT6RMIX_INPUT_1_VOLUME 0x6D9
+#define MADERA_OUT6RMIX_INPUT_2_SOURCE 0x6DA
+#define MADERA_OUT6RMIX_INPUT_2_VOLUME 0x6DB
+#define MADERA_OUT6RMIX_INPUT_3_SOURCE 0x6DC
+#define MADERA_OUT6RMIX_INPUT_3_VOLUME 0x6DD
+#define MADERA_OUT6RMIX_INPUT_4_SOURCE 0x6DE
+#define MADERA_OUT6RMIX_INPUT_4_VOLUME 0x6DF
+#define MADERA_AIF1TX1MIX_INPUT_1_SOURCE 0x700
+#define MADERA_AIF1TX1MIX_INPUT_1_VOLUME 0x701
+#define MADERA_AIF1TX1MIX_INPUT_2_SOURCE 0x702
+#define MADERA_AIF1TX1MIX_INPUT_2_VOLUME 0x703
+#define MADERA_AIF1TX1MIX_INPUT_3_SOURCE 0x704
+#define MADERA_AIF1TX1MIX_INPUT_3_VOLUME 0x705
+#define MADERA_AIF1TX1MIX_INPUT_4_SOURCE 0x706
+#define MADERA_AIF1TX1MIX_INPUT_4_VOLUME 0x707
+#define MADERA_AIF1TX2MIX_INPUT_1_SOURCE 0x708
+#define MADERA_AIF1TX2MIX_INPUT_1_VOLUME 0x709
+#define MADERA_AIF1TX2MIX_INPUT_2_SOURCE 0x70A
+#define MADERA_AIF1TX2MIX_INPUT_2_VOLUME 0x70B
+#define MADERA_AIF1TX2MIX_INPUT_3_SOURCE 0x70C
+#define MADERA_AIF1TX2MIX_INPUT_3_VOLUME 0x70D
+#define MADERA_AIF1TX2MIX_INPUT_4_SOURCE 0x70E
+#define MADERA_AIF1TX2MIX_INPUT_4_VOLUME 0x70F
+#define MADERA_AIF1TX3MIX_INPUT_1_SOURCE 0x710
+#define MADERA_AIF1TX3MIX_INPUT_1_VOLUME 0x711
+#define MADERA_AIF1TX3MIX_INPUT_2_SOURCE 0x712
+#define MADERA_AIF1TX3MIX_INPUT_2_VOLUME 0x713
+#define MADERA_AIF1TX3MIX_INPUT_3_SOURCE 0x714
+#define MADERA_AIF1TX3MIX_INPUT_3_VOLUME 0x715
+#define MADERA_AIF1TX3MIX_INPUT_4_SOURCE 0x716
+#define MADERA_AIF1TX3MIX_INPUT_4_VOLUME 0x717
+#define MADERA_AIF1TX4MIX_INPUT_1_SOURCE 0x718
+#define MADERA_AIF1TX4MIX_INPUT_1_VOLUME 0x719
+#define MADERA_AIF1TX4MIX_INPUT_2_SOURCE 0x71A
+#define MADERA_AIF1TX4MIX_INPUT_2_VOLUME 0x71B
+#define MADERA_AIF1TX4MIX_INPUT_3_SOURCE 0x71C
+#define MADERA_AIF1TX4MIX_INPUT_3_VOLUME 0x71D
+#define MADERA_AIF1TX4MIX_INPUT_4_SOURCE 0x71E
+#define MADERA_AIF1TX4MIX_INPUT_4_VOLUME 0x71F
+#define MADERA_AIF1TX5MIX_INPUT_1_SOURCE 0x720
+#define MADERA_AIF1TX5MIX_INPUT_1_VOLUME 0x721
+#define MADERA_AIF1TX5MIX_INPUT_2_SOURCE 0x722
+#define MADERA_AIF1TX5MIX_INPUT_2_VOLUME 0x723
+#define MADERA_AIF1TX5MIX_INPUT_3_SOURCE 0x724
+#define MADERA_AIF1TX5MIX_INPUT_3_VOLUME 0x725
+#define MADERA_AIF1TX5MIX_INPUT_4_SOURCE 0x726
+#define MADERA_AIF1TX5MIX_INPUT_4_VOLUME 0x727
+#define MADERA_AIF1TX6MIX_INPUT_1_SOURCE 0x728
+#define MADERA_AIF1TX6MIX_INPUT_1_VOLUME 0x729
+#define MADERA_AIF1TX6MIX_INPUT_2_SOURCE 0x72A
+#define MADERA_AIF1TX6MIX_INPUT_2_VOLUME 0x72B
+#define MADERA_AIF1TX6MIX_INPUT_3_SOURCE 0x72C
+#define MADERA_AIF1TX6MIX_INPUT_3_VOLUME 0x72D
+#define MADERA_AIF1TX6MIX_INPUT_4_SOURCE 0x72E
+#define MADERA_AIF1TX6MIX_INPUT_4_VOLUME 0x72F
+#define MADERA_AIF1TX7MIX_INPUT_1_SOURCE 0x730
+#define MADERA_AIF1TX7MIX_INPUT_1_VOLUME 0x731
+#define MADERA_AIF1TX7MIX_INPUT_2_SOURCE 0x732
+#define MADERA_AIF1TX7MIX_INPUT_2_VOLUME 0x733
+#define MADERA_AIF1TX7MIX_INPUT_3_SOURCE 0x734
+#define MADERA_AIF1TX7MIX_INPUT_3_VOLUME 0x735
+#define MADERA_AIF1TX7MIX_INPUT_4_SOURCE 0x736
+#define MADERA_AIF1TX7MIX_INPUT_4_VOLUME 0x737
+#define MADERA_AIF1TX8MIX_INPUT_1_SOURCE 0x738
+#define MADERA_AIF1TX8MIX_INPUT_1_VOLUME 0x739
+#define MADERA_AIF1TX8MIX_INPUT_2_SOURCE 0x73A
+#define MADERA_AIF1TX8MIX_INPUT_2_VOLUME 0x73B
+#define MADERA_AIF1TX8MIX_INPUT_3_SOURCE 0x73C
+#define MADERA_AIF1TX8MIX_INPUT_3_VOLUME 0x73D
+#define MADERA_AIF1TX8MIX_INPUT_4_SOURCE 0x73E
+#define MADERA_AIF1TX8MIX_INPUT_4_VOLUME 0x73F
+#define MADERA_AIF2TX1MIX_INPUT_1_SOURCE 0x740
+#define MADERA_AIF2TX1MIX_INPUT_1_VOLUME 0x741
+#define MADERA_AIF2TX1MIX_INPUT_2_SOURCE 0x742
+#define MADERA_AIF2TX1MIX_INPUT_2_VOLUME 0x743
+#define MADERA_AIF2TX1MIX_INPUT_3_SOURCE 0x744
+#define MADERA_AIF2TX1MIX_INPUT_3_VOLUME 0x745
+#define MADERA_AIF2TX1MIX_INPUT_4_SOURCE 0x746
+#define MADERA_AIF2TX1MIX_INPUT_4_VOLUME 0x747
+#define MADERA_AIF2TX2MIX_INPUT_1_SOURCE 0x748
+#define MADERA_AIF2TX2MIX_INPUT_1_VOLUME 0x749
+#define MADERA_AIF2TX2MIX_INPUT_2_SOURCE 0x74A
+#define MADERA_AIF2TX2MIX_INPUT_2_VOLUME 0x74B
+#define MADERA_AIF2TX2MIX_INPUT_3_SOURCE 0x74C
+#define MADERA_AIF2TX2MIX_INPUT_3_VOLUME 0x74D
+#define MADERA_AIF2TX2MIX_INPUT_4_SOURCE 0x74E
+#define MADERA_AIF2TX2MIX_INPUT_4_VOLUME 0x74F
+#define MADERA_AIF2TX3MIX_INPUT_1_SOURCE 0x750
+#define MADERA_AIF2TX3MIX_INPUT_1_VOLUME 0x751
+#define MADERA_AIF2TX3MIX_INPUT_2_SOURCE 0x752
+#define MADERA_AIF2TX3MIX_INPUT_2_VOLUME 0x753
+#define MADERA_AIF2TX3MIX_INPUT_3_SOURCE 0x754
+#define MADERA_AIF2TX3MIX_INPUT_3_VOLUME 0x755
+#define MADERA_AIF2TX3MIX_INPUT_4_SOURCE 0x756
+#define MADERA_AIF2TX3MIX_INPUT_4_VOLUME 0x757
+#define MADERA_AIF2TX4MIX_INPUT_1_SOURCE 0x758
+#define MADERA_AIF2TX4MIX_INPUT_1_VOLUME 0x759
+#define MADERA_AIF2TX4MIX_INPUT_2_SOURCE 0x75A
+#define MADERA_AIF2TX4MIX_INPUT_2_VOLUME 0x75B
+#define MADERA_AIF2TX4MIX_INPUT_3_SOURCE 0x75C
+#define MADERA_AIF2TX4MIX_INPUT_3_VOLUME 0x75D
+#define MADERA_AIF2TX4MIX_INPUT_4_SOURCE 0x75E
+#define MADERA_AIF2TX4MIX_INPUT_4_VOLUME 0x75F
+#define MADERA_AIF2TX5MIX_INPUT_1_SOURCE 0x760
+#define MADERA_AIF2TX5MIX_INPUT_1_VOLUME 0x761
+#define MADERA_AIF2TX5MIX_INPUT_2_SOURCE 0x762
+#define MADERA_AIF2TX5MIX_INPUT_2_VOLUME 0x763
+#define MADERA_AIF2TX5MIX_INPUT_3_SOURCE 0x764
+#define MADERA_AIF2TX5MIX_INPUT_3_VOLUME 0x765
+#define MADERA_AIF2TX5MIX_INPUT_4_SOURCE 0x766
+#define MADERA_AIF2TX5MIX_INPUT_4_VOLUME 0x767
+#define MADERA_AIF2TX6MIX_INPUT_1_SOURCE 0x768
+#define MADERA_AIF2TX6MIX_INPUT_1_VOLUME 0x769
+#define MADERA_AIF2TX6MIX_INPUT_2_SOURCE 0x76A
+#define MADERA_AIF2TX6MIX_INPUT_2_VOLUME 0x76B
+#define MADERA_AIF2TX6MIX_INPUT_3_SOURCE 0x76C
+#define MADERA_AIF2TX6MIX_INPUT_3_VOLUME 0x76D
+#define MADERA_AIF2TX6MIX_INPUT_4_SOURCE 0x76E
+#define MADERA_AIF2TX6MIX_INPUT_4_VOLUME 0x76F
+#define MADERA_AIF2TX7MIX_INPUT_1_SOURCE 0x770
+#define MADERA_AIF2TX7MIX_INPUT_1_VOLUME 0x771
+#define MADERA_AIF2TX7MIX_INPUT_2_SOURCE 0x772
+#define MADERA_AIF2TX7MIX_INPUT_2_VOLUME 0x773
+#define MADERA_AIF2TX7MIX_INPUT_3_SOURCE 0x774
+#define MADERA_AIF2TX7MIX_INPUT_3_VOLUME 0x775
+#define MADERA_AIF2TX7MIX_INPUT_4_SOURCE 0x776
+#define MADERA_AIF2TX7MIX_INPUT_4_VOLUME 0x777
+#define MADERA_AIF2TX8MIX_INPUT_1_SOURCE 0x778
+#define MADERA_AIF2TX8MIX_INPUT_1_VOLUME 0x779
+#define MADERA_AIF2TX8MIX_INPUT_2_SOURCE 0x77A
+#define MADERA_AIF2TX8MIX_INPUT_2_VOLUME 0x77B
+#define MADERA_AIF2TX8MIX_INPUT_3_SOURCE 0x77C
+#define MADERA_AIF2TX8MIX_INPUT_3_VOLUME 0x77D
+#define MADERA_AIF2TX8MIX_INPUT_4_SOURCE 0x77E
+#define MADERA_AIF2TX8MIX_INPUT_4_VOLUME 0x77F
+#define MADERA_AIF3TX1MIX_INPUT_1_SOURCE 0x780
+#define MADERA_AIF3TX1MIX_INPUT_1_VOLUME 0x781
+#define MADERA_AIF3TX1MIX_INPUT_2_SOURCE 0x782
+#define MADERA_AIF3TX1MIX_INPUT_2_VOLUME 0x783
+#define MADERA_AIF3TX1MIX_INPUT_3_SOURCE 0x784
+#define MADERA_AIF3TX1MIX_INPUT_3_VOLUME 0x785
+#define MADERA_AIF3TX1MIX_INPUT_4_SOURCE 0x786
+#define MADERA_AIF3TX1MIX_INPUT_4_VOLUME 0x787
+#define MADERA_AIF3TX2MIX_INPUT_1_SOURCE 0x788
+#define MADERA_AIF3TX2MIX_INPUT_1_VOLUME 0x789
+#define MADERA_AIF3TX2MIX_INPUT_2_SOURCE 0x78A
+#define MADERA_AIF3TX2MIX_INPUT_2_VOLUME 0x78B
+#define MADERA_AIF3TX2MIX_INPUT_3_SOURCE 0x78C
+#define MADERA_AIF3TX2MIX_INPUT_3_VOLUME 0x78D
+#define MADERA_AIF3TX2MIX_INPUT_4_SOURCE 0x78E
+#define MADERA_AIF3TX2MIX_INPUT_4_VOLUME 0x78F
+#define MADERA_AIF3TX3MIX_INPUT_1_SOURCE 0x790
+#define MADERA_AIF3TX3MIX_INPUT_1_VOLUME 0x791
+#define MADERA_AIF3TX3MIX_INPUT_2_SOURCE 0x792
+#define MADERA_AIF3TX3MIX_INPUT_2_VOLUME 0x793
+#define MADERA_AIF3TX3MIX_INPUT_3_SOURCE 0x794
+#define MADERA_AIF3TX3MIX_INPUT_3_VOLUME 0x795
+#define MADERA_AIF3TX3MIX_INPUT_4_SOURCE 0x796
+#define MADERA_AIF3TX3MIX_INPUT_4_VOLUME 0x797
+#define MADERA_AIF3TX4MIX_INPUT_1_SOURCE 0x798
+#define MADERA_AIF3TX4MIX_INPUT_1_VOLUME 0x799
+#define MADERA_AIF3TX4MIX_INPUT_2_SOURCE 0x79A
+#define MADERA_AIF3TX4MIX_INPUT_2_VOLUME 0x79B
+#define MADERA_AIF3TX4MIX_INPUT_3_SOURCE 0x79C
+#define MADERA_AIF3TX4MIX_INPUT_3_VOLUME 0x79D
+#define MADERA_AIF3TX4MIX_INPUT_4_SOURCE 0x79E
+#define MADERA_AIF3TX4MIX_INPUT_4_VOLUME 0x79F
+#define CS47L92_AIF3TX5MIX_INPUT_1_SOURCE 0x7A0
+#define CS47L92_AIF3TX5MIX_INPUT_1_VOLUME 0x7A1
+#define CS47L92_AIF3TX5MIX_INPUT_2_SOURCE 0x7A2
+#define CS47L92_AIF3TX5MIX_INPUT_2_VOLUME 0x7A3
+#define CS47L92_AIF3TX5MIX_INPUT_3_SOURCE 0x7A4
+#define CS47L92_AIF3TX5MIX_INPUT_3_VOLUME 0x7A5
+#define CS47L92_AIF3TX5MIX_INPUT_4_SOURCE 0x7A6
+#define CS47L92_AIF3TX5MIX_INPUT_4_VOLUME 0x7A7
+#define CS47L92_AIF3TX6MIX_INPUT_1_SOURCE 0x7A8
+#define CS47L92_AIF3TX6MIX_INPUT_1_VOLUME 0x7A9
+#define CS47L92_AIF3TX6MIX_INPUT_2_SOURCE 0x7AA
+#define CS47L92_AIF3TX6MIX_INPUT_2_VOLUME 0x7AB
+#define CS47L92_AIF3TX6MIX_INPUT_3_SOURCE 0x7AC
+#define CS47L92_AIF3TX6MIX_INPUT_3_VOLUME 0x7AD
+#define CS47L92_AIF3TX6MIX_INPUT_4_SOURCE 0x7AE
+#define CS47L92_AIF3TX6MIX_INPUT_4_VOLUME 0x7AF
+#define CS47L92_AIF3TX7MIX_INPUT_1_SOURCE 0x7B0
+#define CS47L92_AIF3TX7MIX_INPUT_1_VOLUME 0x7B1
+#define CS47L92_AIF3TX7MIX_INPUT_2_SOURCE 0x7B2
+#define CS47L92_AIF3TX7MIX_INPUT_2_VOLUME 0x7B3
+#define CS47L92_AIF3TX7MIX_INPUT_3_SOURCE 0x7B4
+#define CS47L92_AIF3TX7MIX_INPUT_3_VOLUME 0x7B5
+#define CS47L92_AIF3TX7MIX_INPUT_4_SOURCE 0x7B6
+#define CS47L92_AIF3TX7MIX_INPUT_4_VOLUME 0x7B7
+#define CS47L92_AIF3TX8MIX_INPUT_1_SOURCE 0x7B8
+#define CS47L92_AIF3TX8MIX_INPUT_1_VOLUME 0x7B9
+#define CS47L92_AIF3TX8MIX_INPUT_2_SOURCE 0x7BA
+#define CS47L92_AIF3TX8MIX_INPUT_2_VOLUME 0x7BB
+#define CS47L92_AIF3TX8MIX_INPUT_3_SOURCE 0x7BC
+#define CS47L92_AIF3TX8MIX_INPUT_3_VOLUME 0x7BD
+#define CS47L92_AIF3TX8MIX_INPUT_4_SOURCE 0x7BE
+#define CS47L92_AIF3TX8MIX_INPUT_4_VOLUME 0x7BF
+#define MADERA_AIF4TX1MIX_INPUT_1_SOURCE 0x7A0
+#define MADERA_AIF4TX1MIX_INPUT_1_VOLUME 0x7A1
+#define MADERA_AIF4TX1MIX_INPUT_2_SOURCE 0x7A2
+#define MADERA_AIF4TX1MIX_INPUT_2_VOLUME 0x7A3
+#define MADERA_AIF4TX1MIX_INPUT_3_SOURCE 0x7A4
+#define MADERA_AIF4TX1MIX_INPUT_3_VOLUME 0x7A5
+#define MADERA_AIF4TX1MIX_INPUT_4_SOURCE 0x7A6
+#define MADERA_AIF4TX1MIX_INPUT_4_VOLUME 0x7A7
+#define MADERA_AIF4TX2MIX_INPUT_1_SOURCE 0x7A8
+#define MADERA_AIF4TX2MIX_INPUT_1_VOLUME 0x7A9
+#define MADERA_AIF4TX2MIX_INPUT_2_SOURCE 0x7AA
+#define MADERA_AIF4TX2MIX_INPUT_2_VOLUME 0x7AB
+#define MADERA_AIF4TX2MIX_INPUT_3_SOURCE 0x7AC
+#define MADERA_AIF4TX2MIX_INPUT_3_VOLUME 0x7AD
+#define MADERA_AIF4TX2MIX_INPUT_4_SOURCE 0x7AE
+#define MADERA_AIF4TX2MIX_INPUT_4_VOLUME 0x7AF
+#define MADERA_SLIMTX1MIX_INPUT_1_SOURCE 0x7C0
+#define MADERA_SLIMTX1MIX_INPUT_1_VOLUME 0x7C1
+#define MADERA_SLIMTX1MIX_INPUT_2_SOURCE 0x7C2
+#define MADERA_SLIMTX1MIX_INPUT_2_VOLUME 0x7C3
+#define MADERA_SLIMTX1MIX_INPUT_3_SOURCE 0x7C4
+#define MADERA_SLIMTX1MIX_INPUT_3_VOLUME 0x7C5
+#define MADERA_SLIMTX1MIX_INPUT_4_SOURCE 0x7C6
+#define MADERA_SLIMTX1MIX_INPUT_4_VOLUME 0x7C7
+#define MADERA_SLIMTX2MIX_INPUT_1_SOURCE 0x7C8
+#define MADERA_SLIMTX2MIX_INPUT_1_VOLUME 0x7C9
+#define MADERA_SLIMTX2MIX_INPUT_2_SOURCE 0x7CA
+#define MADERA_SLIMTX2MIX_INPUT_2_VOLUME 0x7CB
+#define MADERA_SLIMTX2MIX_INPUT_3_SOURCE 0x7CC
+#define MADERA_SLIMTX2MIX_INPUT_3_VOLUME 0x7CD
+#define MADERA_SLIMTX2MIX_INPUT_4_SOURCE 0x7CE
+#define MADERA_SLIMTX2MIX_INPUT_4_VOLUME 0x7CF
+#define MADERA_SLIMTX3MIX_INPUT_1_SOURCE 0x7D0
+#define MADERA_SLIMTX3MIX_INPUT_1_VOLUME 0x7D1
+#define MADERA_SLIMTX3MIX_INPUT_2_SOURCE 0x7D2
+#define MADERA_SLIMTX3MIX_INPUT_2_VOLUME 0x7D3
+#define MADERA_SLIMTX3MIX_INPUT_3_SOURCE 0x7D4
+#define MADERA_SLIMTX3MIX_INPUT_3_VOLUME 0x7D5
+#define MADERA_SLIMTX3MIX_INPUT_4_SOURCE 0x7D6
+#define MADERA_SLIMTX3MIX_INPUT_4_VOLUME 0x7D7
+#define MADERA_SLIMTX4MIX_INPUT_1_SOURCE 0x7D8
+#define MADERA_SLIMTX4MIX_INPUT_1_VOLUME 0x7D9
+#define MADERA_SLIMTX4MIX_INPUT_2_SOURCE 0x7DA
+#define MADERA_SLIMTX4MIX_INPUT_2_VOLUME 0x7DB
+#define MADERA_SLIMTX4MIX_INPUT_3_SOURCE 0x7DC
+#define MADERA_SLIMTX4MIX_INPUT_3_VOLUME 0x7DD
+#define MADERA_SLIMTX4MIX_INPUT_4_SOURCE 0x7DE
+#define MADERA_SLIMTX4MIX_INPUT_4_VOLUME 0x7DF
+#define MADERA_SLIMTX5MIX_INPUT_1_SOURCE 0x7E0
+#define MADERA_SLIMTX5MIX_INPUT_1_VOLUME 0x7E1
+#define MADERA_SLIMTX5MIX_INPUT_2_SOURCE 0x7E2
+#define MADERA_SLIMTX5MIX_INPUT_2_VOLUME 0x7E3
+#define MADERA_SLIMTX5MIX_INPUT_3_SOURCE 0x7E4
+#define MADERA_SLIMTX5MIX_INPUT_3_VOLUME 0x7E5
+#define MADERA_SLIMTX5MIX_INPUT_4_SOURCE 0x7E6
+#define MADERA_SLIMTX5MIX_INPUT_4_VOLUME 0x7E7
+#define MADERA_SLIMTX6MIX_INPUT_1_SOURCE 0x7E8
+#define MADERA_SLIMTX6MIX_INPUT_1_VOLUME 0x7E9
+#define MADERA_SLIMTX6MIX_INPUT_2_SOURCE 0x7EA
+#define MADERA_SLIMTX6MIX_INPUT_2_VOLUME 0x7EB
+#define MADERA_SLIMTX6MIX_INPUT_3_SOURCE 0x7EC
+#define MADERA_SLIMTX6MIX_INPUT_3_VOLUME 0x7ED
+#define MADERA_SLIMTX6MIX_INPUT_4_SOURCE 0x7EE
+#define MADERA_SLIMTX6MIX_INPUT_4_VOLUME 0x7EF
+#define MADERA_SLIMTX7MIX_INPUT_1_SOURCE 0x7F0
+#define MADERA_SLIMTX7MIX_INPUT_1_VOLUME 0x7F1
+#define MADERA_SLIMTX7MIX_INPUT_2_SOURCE 0x7F2
+#define MADERA_SLIMTX7MIX_INPUT_2_VOLUME 0x7F3
+#define MADERA_SLIMTX7MIX_INPUT_3_SOURCE 0x7F4
+#define MADERA_SLIMTX7MIX_INPUT_3_VOLUME 0x7F5
+#define MADERA_SLIMTX7MIX_INPUT_4_SOURCE 0x7F6
+#define MADERA_SLIMTX7MIX_INPUT_4_VOLUME 0x7F7
+#define MADERA_SLIMTX8MIX_INPUT_1_SOURCE 0x7F8
+#define MADERA_SLIMTX8MIX_INPUT_1_VOLUME 0x7F9
+#define MADERA_SLIMTX8MIX_INPUT_2_SOURCE 0x7FA
+#define MADERA_SLIMTX8MIX_INPUT_2_VOLUME 0x7FB
+#define MADERA_SLIMTX8MIX_INPUT_3_SOURCE 0x7FC
+#define MADERA_SLIMTX8MIX_INPUT_3_VOLUME 0x7FD
+#define MADERA_SLIMTX8MIX_INPUT_4_SOURCE 0x7FE
+#define MADERA_SLIMTX8MIX_INPUT_4_VOLUME 0x7FF
+#define MADERA_SPDIF1TX1MIX_INPUT_1_SOURCE 0x800
+#define MADERA_SPDIF1TX1MIX_INPUT_1_VOLUME 0x801
+#define MADERA_SPDIF1TX2MIX_INPUT_1_SOURCE 0x808
+#define MADERA_SPDIF1TX2MIX_INPUT_1_VOLUME 0x809
+#define MADERA_EQ1MIX_INPUT_1_SOURCE 0x880
+#define MADERA_EQ1MIX_INPUT_1_VOLUME 0x881
+#define MADERA_EQ1MIX_INPUT_2_SOURCE 0x882
+#define MADERA_EQ1MIX_INPUT_2_VOLUME 0x883
+#define MADERA_EQ1MIX_INPUT_3_SOURCE 0x884
+#define MADERA_EQ1MIX_INPUT_3_VOLUME 0x885
+#define MADERA_EQ1MIX_INPUT_4_SOURCE 0x886
+#define MADERA_EQ1MIX_INPUT_4_VOLUME 0x887
+#define MADERA_EQ2MIX_INPUT_1_SOURCE 0x888
+#define MADERA_EQ2MIX_INPUT_1_VOLUME 0x889
+#define MADERA_EQ2MIX_INPUT_2_SOURCE 0x88A
+#define MADERA_EQ2MIX_INPUT_2_VOLUME 0x88B
+#define MADERA_EQ2MIX_INPUT_3_SOURCE 0x88C
+#define MADERA_EQ2MIX_INPUT_3_VOLUME 0x88D
+#define MADERA_EQ2MIX_INPUT_4_SOURCE 0x88E
+#define MADERA_EQ2MIX_INPUT_4_VOLUME 0x88F
+#define MADERA_EQ3MIX_INPUT_1_SOURCE 0x890
+#define MADERA_EQ3MIX_INPUT_1_VOLUME 0x891
+#define MADERA_EQ3MIX_INPUT_2_SOURCE 0x892
+#define MADERA_EQ3MIX_INPUT_2_VOLUME 0x893
+#define MADERA_EQ3MIX_INPUT_3_SOURCE 0x894
+#define MADERA_EQ3MIX_INPUT_3_VOLUME 0x895
+#define MADERA_EQ3MIX_INPUT_4_SOURCE 0x896
+#define MADERA_EQ3MIX_INPUT_4_VOLUME 0x897
+#define MADERA_EQ4MIX_INPUT_1_SOURCE 0x898
+#define MADERA_EQ4MIX_INPUT_1_VOLUME 0x899
+#define MADERA_EQ4MIX_INPUT_2_SOURCE 0x89A
+#define MADERA_EQ4MIX_INPUT_2_VOLUME 0x89B
+#define MADERA_EQ4MIX_INPUT_3_SOURCE 0x89C
+#define MADERA_EQ4MIX_INPUT_3_VOLUME 0x89D
+#define MADERA_EQ4MIX_INPUT_4_SOURCE 0x89E
+#define MADERA_EQ4MIX_INPUT_4_VOLUME 0x89F
+#define MADERA_DRC1LMIX_INPUT_1_SOURCE 0x8C0
+#define MADERA_DRC1LMIX_INPUT_1_VOLUME 0x8C1
+#define MADERA_DRC1LMIX_INPUT_2_SOURCE 0x8C2
+#define MADERA_DRC1LMIX_INPUT_2_VOLUME 0x8C3
+#define MADERA_DRC1LMIX_INPUT_3_SOURCE 0x8C4
+#define MADERA_DRC1LMIX_INPUT_3_VOLUME 0x8C5
+#define MADERA_DRC1LMIX_INPUT_4_SOURCE 0x8C6
+#define MADERA_DRC1LMIX_INPUT_4_VOLUME 0x8C7
+#define MADERA_DRC1RMIX_INPUT_1_SOURCE 0x8C8
+#define MADERA_DRC1RMIX_INPUT_1_VOLUME 0x8C9
+#define MADERA_DRC1RMIX_INPUT_2_SOURCE 0x8CA
+#define MADERA_DRC1RMIX_INPUT_2_VOLUME 0x8CB
+#define MADERA_DRC1RMIX_INPUT_3_SOURCE 0x8CC
+#define MADERA_DRC1RMIX_INPUT_3_VOLUME 0x8CD
+#define MADERA_DRC1RMIX_INPUT_4_SOURCE 0x8CE
+#define MADERA_DRC1RMIX_INPUT_4_VOLUME 0x8CF
+#define MADERA_DRC2LMIX_INPUT_1_SOURCE 0x8D0
+#define MADERA_DRC2LMIX_INPUT_1_VOLUME 0x8D1
+#define MADERA_DRC2LMIX_INPUT_2_SOURCE 0x8D2
+#define MADERA_DRC2LMIX_INPUT_2_VOLUME 0x8D3
+#define MADERA_DRC2LMIX_INPUT_3_SOURCE 0x8D4
+#define MADERA_DRC2LMIX_INPUT_3_VOLUME 0x8D5
+#define MADERA_DRC2LMIX_INPUT_4_SOURCE 0x8D6
+#define MADERA_DRC2LMIX_INPUT_4_VOLUME 0x8D7
+#define MADERA_DRC2RMIX_INPUT_1_SOURCE 0x8D8
+#define MADERA_DRC2RMIX_INPUT_1_VOLUME 0x8D9
+#define MADERA_DRC2RMIX_INPUT_2_SOURCE 0x8DA
+#define MADERA_DRC2RMIX_INPUT_2_VOLUME 0x8DB
+#define MADERA_DRC2RMIX_INPUT_3_SOURCE 0x8DC
+#define MADERA_DRC2RMIX_INPUT_3_VOLUME 0x8DD
+#define MADERA_DRC2RMIX_INPUT_4_SOURCE 0x8DE
+#define MADERA_DRC2RMIX_INPUT_4_VOLUME 0x8DF
+#define MADERA_HPLP1MIX_INPUT_1_SOURCE 0x900
+#define MADERA_HPLP1MIX_INPUT_1_VOLUME 0x901
+#define MADERA_HPLP1MIX_INPUT_2_SOURCE 0x902
+#define MADERA_HPLP1MIX_INPUT_2_VOLUME 0x903
+#define MADERA_HPLP1MIX_INPUT_3_SOURCE 0x904
+#define MADERA_HPLP1MIX_INPUT_3_VOLUME 0x905
+#define MADERA_HPLP1MIX_INPUT_4_SOURCE 0x906
+#define MADERA_HPLP1MIX_INPUT_4_VOLUME 0x907
+#define MADERA_HPLP2MIX_INPUT_1_SOURCE 0x908
+#define MADERA_HPLP2MIX_INPUT_1_VOLUME 0x909
+#define MADERA_HPLP2MIX_INPUT_2_SOURCE 0x90A
+#define MADERA_HPLP2MIX_INPUT_2_VOLUME 0x90B
+#define MADERA_HPLP2MIX_INPUT_3_SOURCE 0x90C
+#define MADERA_HPLP2MIX_INPUT_3_VOLUME 0x90D
+#define MADERA_HPLP2MIX_INPUT_4_SOURCE 0x90E
+#define MADERA_HPLP2MIX_INPUT_4_VOLUME 0x90F
+#define MADERA_HPLP3MIX_INPUT_1_SOURCE 0x910
+#define MADERA_HPLP3MIX_INPUT_1_VOLUME 0x911
+#define MADERA_HPLP3MIX_INPUT_2_SOURCE 0x912
+#define MADERA_HPLP3MIX_INPUT_2_VOLUME 0x913
+#define MADERA_HPLP3MIX_INPUT_3_SOURCE 0x914
+#define MADERA_HPLP3MIX_INPUT_3_VOLUME 0x915
+#define MADERA_HPLP3MIX_INPUT_4_SOURCE 0x916
+#define MADERA_HPLP3MIX_INPUT_4_VOLUME 0x917
+#define MADERA_HPLP4MIX_INPUT_1_SOURCE 0x918
+#define MADERA_HPLP4MIX_INPUT_1_VOLUME 0x919
+#define MADERA_HPLP4MIX_INPUT_2_SOURCE 0x91A
+#define MADERA_HPLP4MIX_INPUT_2_VOLUME 0x91B
+#define MADERA_HPLP4MIX_INPUT_3_SOURCE 0x91C
+#define MADERA_HPLP4MIX_INPUT_3_VOLUME 0x91D
+#define MADERA_HPLP4MIX_INPUT_4_SOURCE 0x91E
+#define MADERA_HPLP4MIX_INPUT_4_VOLUME 0x91F
+#define MADERA_DSP1LMIX_INPUT_1_SOURCE 0x940
+#define MADERA_DSP1LMIX_INPUT_1_VOLUME 0x941
+#define MADERA_DSP1LMIX_INPUT_2_SOURCE 0x942
+#define MADERA_DSP1LMIX_INPUT_2_VOLUME 0x943
+#define MADERA_DSP1LMIX_INPUT_3_SOURCE 0x944
+#define MADERA_DSP1LMIX_INPUT_3_VOLUME 0x945
+#define MADERA_DSP1LMIX_INPUT_4_SOURCE 0x946
+#define MADERA_DSP1LMIX_INPUT_4_VOLUME 0x947
+#define MADERA_DSP1RMIX_INPUT_1_SOURCE 0x948
+#define MADERA_DSP1RMIX_INPUT_1_VOLUME 0x949
+#define MADERA_DSP1RMIX_INPUT_2_SOURCE 0x94A
+#define MADERA_DSP1RMIX_INPUT_2_VOLUME 0x94B
+#define MADERA_DSP1RMIX_INPUT_3_SOURCE 0x94C
+#define MADERA_DSP1RMIX_INPUT_3_VOLUME 0x94D
+#define MADERA_DSP1RMIX_INPUT_4_SOURCE 0x94E
+#define MADERA_DSP1RMIX_INPUT_4_VOLUME 0x94F
+#define MADERA_DSP1AUX1MIX_INPUT_1_SOURCE 0x950
+#define MADERA_DSP1AUX2MIX_INPUT_1_SOURCE 0x958
+#define MADERA_DSP1AUX3MIX_INPUT_1_SOURCE 0x960
+#define MADERA_DSP1AUX4MIX_INPUT_1_SOURCE 0x968
+#define MADERA_DSP1AUX5MIX_INPUT_1_SOURCE 0x970
+#define MADERA_DSP1AUX6MIX_INPUT_1_SOURCE 0x978
+#define MADERA_DSP2LMIX_INPUT_1_SOURCE 0x980
+#define MADERA_DSP2LMIX_INPUT_1_VOLUME 0x981
+#define MADERA_DSP2LMIX_INPUT_2_SOURCE 0x982
+#define MADERA_DSP2LMIX_INPUT_2_VOLUME 0x983
+#define MADERA_DSP2LMIX_INPUT_3_SOURCE 0x984
+#define MADERA_DSP2LMIX_INPUT_3_VOLUME 0x985
+#define MADERA_DSP2LMIX_INPUT_4_SOURCE 0x986
+#define MADERA_DSP2LMIX_INPUT_4_VOLUME 0x987
+#define MADERA_DSP2RMIX_INPUT_1_SOURCE 0x988
+#define MADERA_DSP2RMIX_INPUT_1_VOLUME 0x989
+#define MADERA_DSP2RMIX_INPUT_2_SOURCE 0x98A
+#define MADERA_DSP2RMIX_INPUT_2_VOLUME 0x98B
+#define MADERA_DSP2RMIX_INPUT_3_SOURCE 0x98C
+#define MADERA_DSP2RMIX_INPUT_3_VOLUME 0x98D
+#define MADERA_DSP2RMIX_INPUT_4_SOURCE 0x98E
+#define MADERA_DSP2RMIX_INPUT_4_VOLUME 0x98F
+#define MADERA_DSP2AUX1MIX_INPUT_1_SOURCE 0x990
+#define MADERA_DSP2AUX2MIX_INPUT_1_SOURCE 0x998
+#define MADERA_DSP2AUX3MIX_INPUT_1_SOURCE 0x9A0
+#define MADERA_DSP2AUX4MIX_INPUT_1_SOURCE 0x9A8
+#define MADERA_DSP2AUX5MIX_INPUT_1_SOURCE 0x9B0
+#define MADERA_DSP2AUX6MIX_INPUT_1_SOURCE 0x9B8
+#define MADERA_DSP3LMIX_INPUT_1_SOURCE 0x9C0
+#define MADERA_DSP3LMIX_INPUT_1_VOLUME 0x9C1
+#define MADERA_DSP3LMIX_INPUT_2_SOURCE 0x9C2
+#define MADERA_DSP3LMIX_INPUT_2_VOLUME 0x9C3
+#define MADERA_DSP3LMIX_INPUT_3_SOURCE 0x9C4
+#define MADERA_DSP3LMIX_INPUT_3_VOLUME 0x9C5
+#define MADERA_DSP3LMIX_INPUT_4_SOURCE 0x9C6
+#define MADERA_DSP3LMIX_INPUT_4_VOLUME 0x9C7
+#define MADERA_DSP3RMIX_INPUT_1_SOURCE 0x9C8
+#define MADERA_DSP3RMIX_INPUT_1_VOLUME 0x9C9
+#define MADERA_DSP3RMIX_INPUT_2_SOURCE 0x9CA
+#define MADERA_DSP3RMIX_INPUT_2_VOLUME 0x9CB
+#define MADERA_DSP3RMIX_INPUT_3_SOURCE 0x9CC
+#define MADERA_DSP3RMIX_INPUT_3_VOLUME 0x9CD
+#define MADERA_DSP3RMIX_INPUT_4_SOURCE 0x9CE
+#define MADERA_DSP3RMIX_INPUT_4_VOLUME 0x9CF
+#define MADERA_DSP3AUX1MIX_INPUT_1_SOURCE 0x9D0
+#define MADERA_DSP3AUX2MIX_INPUT_1_SOURCE 0x9D8
+#define MADERA_DSP3AUX3MIX_INPUT_1_SOURCE 0x9E0
+#define MADERA_DSP3AUX4MIX_INPUT_1_SOURCE 0x9E8
+#define MADERA_DSP3AUX5MIX_INPUT_1_SOURCE 0x9F0
+#define MADERA_DSP3AUX6MIX_INPUT_1_SOURCE 0x9F8
+#define MADERA_DSP4LMIX_INPUT_1_SOURCE 0xA00
+#define MADERA_DSP4LMIX_INPUT_1_VOLUME 0xA01
+#define MADERA_DSP4LMIX_INPUT_2_SOURCE 0xA02
+#define MADERA_DSP4LMIX_INPUT_2_VOLUME 0xA03
+#define MADERA_DSP4LMIX_INPUT_3_SOURCE 0xA04
+#define MADERA_DSP4LMIX_INPUT_3_VOLUME 0xA05
+#define MADERA_DSP4LMIX_INPUT_4_SOURCE 0xA06
+#define MADERA_DSP4LMIX_INPUT_4_VOLUME 0xA07
+#define MADERA_DSP4RMIX_INPUT_1_SOURCE 0xA08
+#define MADERA_DSP4RMIX_INPUT_1_VOLUME 0xA09
+#define MADERA_DSP4RMIX_INPUT_2_SOURCE 0xA0A
+#define MADERA_DSP4RMIX_INPUT_2_VOLUME 0xA0B
+#define MADERA_DSP4RMIX_INPUT_3_SOURCE 0xA0C
+#define MADERA_DSP4RMIX_INPUT_3_VOLUME 0xA0D
+#define MADERA_DSP4RMIX_INPUT_4_SOURCE 0xA0E
+#define MADERA_DSP4RMIX_INPUT_4_VOLUME 0xA0F
+#define MADERA_DSP4AUX1MIX_INPUT_1_SOURCE 0xA10
+#define MADERA_DSP4AUX2MIX_INPUT_1_SOURCE 0xA18
+#define MADERA_DSP4AUX3MIX_INPUT_1_SOURCE 0xA20
+#define MADERA_DSP4AUX4MIX_INPUT_1_SOURCE 0xA28
+#define MADERA_DSP4AUX5MIX_INPUT_1_SOURCE 0xA30
+#define MADERA_DSP4AUX6MIX_INPUT_1_SOURCE 0xA38
+#define MADERA_DSP5LMIX_INPUT_1_SOURCE 0xA40
+#define MADERA_DSP5LMIX_INPUT_1_VOLUME 0xA41
+#define MADERA_DSP5LMIX_INPUT_2_SOURCE 0xA42
+#define MADERA_DSP5LMIX_INPUT_2_VOLUME 0xA43
+#define MADERA_DSP5LMIX_INPUT_3_SOURCE 0xA44
+#define MADERA_DSP5LMIX_INPUT_3_VOLUME 0xA45
+#define MADERA_DSP5LMIX_INPUT_4_SOURCE 0xA46
+#define MADERA_DSP5LMIX_INPUT_4_VOLUME 0xA47
+#define MADERA_DSP5RMIX_INPUT_1_SOURCE 0xA48
+#define MADERA_DSP5RMIX_INPUT_1_VOLUME 0xA49
+#define MADERA_DSP5RMIX_INPUT_2_SOURCE 0xA4A
+#define MADERA_DSP5RMIX_INPUT_2_VOLUME 0xA4B
+#define MADERA_DSP5RMIX_INPUT_3_SOURCE 0xA4C
+#define MADERA_DSP5RMIX_INPUT_3_VOLUME 0xA4D
+#define MADERA_DSP5RMIX_INPUT_4_SOURCE 0xA4E
+#define MADERA_DSP5RMIX_INPUT_4_VOLUME 0xA4F
+#define MADERA_DSP5AUX1MIX_INPUT_1_SOURCE 0xA50
+#define MADERA_DSP5AUX2MIX_INPUT_1_SOURCE 0xA58
+#define MADERA_DSP5AUX3MIX_INPUT_1_SOURCE 0xA60
+#define MADERA_DSP5AUX4MIX_INPUT_1_SOURCE 0xA68
+#define MADERA_DSP5AUX5MIX_INPUT_1_SOURCE 0xA70
+#define MADERA_DSP5AUX6MIX_INPUT_1_SOURCE 0xA78
+#define MADERA_ASRC1_1LMIX_INPUT_1_SOURCE 0xA80
+#define MADERA_ASRC1_1RMIX_INPUT_1_SOURCE 0xA88
+#define MADERA_ASRC1_2LMIX_INPUT_1_SOURCE 0xA90
+#define MADERA_ASRC1_2RMIX_INPUT_1_SOURCE 0xA98
+#define MADERA_ASRC2_1LMIX_INPUT_1_SOURCE 0xAA0
+#define MADERA_ASRC2_1RMIX_INPUT_1_SOURCE 0xAA8
+#define MADERA_ASRC2_2LMIX_INPUT_1_SOURCE 0xAB0
+#define MADERA_ASRC2_2RMIX_INPUT_1_SOURCE 0xAB8
+#define MADERA_ISRC1DEC1MIX_INPUT_1_SOURCE 0xB00
+#define MADERA_ISRC1DEC2MIX_INPUT_1_SOURCE 0xB08
+#define MADERA_ISRC1DEC3MIX_INPUT_1_SOURCE 0xB10
+#define MADERA_ISRC1DEC4MIX_INPUT_1_SOURCE 0xB18
+#define MADERA_ISRC1INT1MIX_INPUT_1_SOURCE 0xB20
+#define MADERA_ISRC1INT2MIX_INPUT_1_SOURCE 0xB28
+#define MADERA_ISRC1INT3MIX_INPUT_1_SOURCE 0xB30
+#define MADERA_ISRC1INT4MIX_INPUT_1_SOURCE 0xB38
+#define MADERA_ISRC2DEC1MIX_INPUT_1_SOURCE 0xB40
+#define MADERA_ISRC2DEC2MIX_INPUT_1_SOURCE 0xB48
+#define MADERA_ISRC2DEC3MIX_INPUT_1_SOURCE 0xB50
+#define MADERA_ISRC2DEC4MIX_INPUT_1_SOURCE 0xB58
+#define MADERA_ISRC2INT1MIX_INPUT_1_SOURCE 0xB60
+#define MADERA_ISRC2INT2MIX_INPUT_1_SOURCE 0xB68
+#define MADERA_ISRC2INT3MIX_INPUT_1_SOURCE 0xB70
+#define MADERA_ISRC2INT4MIX_INPUT_1_SOURCE 0xB78
+#define MADERA_ISRC3DEC1MIX_INPUT_1_SOURCE 0xB80
+#define MADERA_ISRC3DEC2MIX_INPUT_1_SOURCE 0xB88
+#define MADERA_ISRC3DEC3MIX_INPUT_1_SOURCE 0xB90
+#define MADERA_ISRC3DEC4MIX_INPUT_1_SOURCE 0xB98
+#define MADERA_ISRC3INT1MIX_INPUT_1_SOURCE 0xBA0
+#define MADERA_ISRC3INT2MIX_INPUT_1_SOURCE 0xBA8
+#define MADERA_ISRC3INT3MIX_INPUT_1_SOURCE 0xBB0
+#define MADERA_ISRC3INT4MIX_INPUT_1_SOURCE 0xBB8
+#define MADERA_ISRC4DEC1MIX_INPUT_1_SOURCE 0xBC0
+#define MADERA_ISRC4DEC2MIX_INPUT_1_SOURCE 0xBC8
+#define MADERA_ISRC4INT1MIX_INPUT_1_SOURCE 0xBE0
+#define MADERA_ISRC4INT2MIX_INPUT_1_SOURCE 0xBE8
+#define MADERA_DSP6LMIX_INPUT_1_SOURCE 0xC00
+#define MADERA_DSP6LMIX_INPUT_1_VOLUME 0xC01
+#define MADERA_DSP6LMIX_INPUT_2_SOURCE 0xC02
+#define MADERA_DSP6LMIX_INPUT_2_VOLUME 0xC03
+#define MADERA_DSP6LMIX_INPUT_3_SOURCE 0xC04
+#define MADERA_DSP6LMIX_INPUT_3_VOLUME 0xC05
+#define MADERA_DSP6LMIX_INPUT_4_SOURCE 0xC06
+#define MADERA_DSP6LMIX_INPUT_4_VOLUME 0xC07
+#define MADERA_DSP6RMIX_INPUT_1_SOURCE 0xC08
+#define MADERA_DSP6RMIX_INPUT_1_VOLUME 0xC09
+#define MADERA_DSP6RMIX_INPUT_2_SOURCE 0xC0A
+#define MADERA_DSP6RMIX_INPUT_2_VOLUME 0xC0B
+#define MADERA_DSP6RMIX_INPUT_3_SOURCE 0xC0C
+#define MADERA_DSP6RMIX_INPUT_3_VOLUME 0xC0D
+#define MADERA_DSP6RMIX_INPUT_4_SOURCE 0xC0E
+#define MADERA_DSP6RMIX_INPUT_4_VOLUME 0xC0F
+#define MADERA_DSP6AUX1MIX_INPUT_1_SOURCE 0xC10
+#define MADERA_DSP6AUX2MIX_INPUT_1_SOURCE 0xC18
+#define MADERA_DSP6AUX3MIX_INPUT_1_SOURCE 0xC20
+#define MADERA_DSP6AUX4MIX_INPUT_1_SOURCE 0xC28
+#define MADERA_DSP6AUX5MIX_INPUT_1_SOURCE 0xC30
+#define MADERA_DSP6AUX6MIX_INPUT_1_SOURCE 0xC38
+#define MADERA_DSP7LMIX_INPUT_1_SOURCE 0xC40
+#define MADERA_DSP7LMIX_INPUT_1_VOLUME 0xC41
+#define MADERA_DSP7LMIX_INPUT_2_SOURCE 0xC42
+#define MADERA_DSP7LMIX_INPUT_2_VOLUME 0xC43
+#define MADERA_DSP7LMIX_INPUT_3_SOURCE 0xC44
+#define MADERA_DSP7LMIX_INPUT_3_VOLUME 0xC45
+#define MADERA_DSP7LMIX_INPUT_4_SOURCE 0xC46
+#define MADERA_DSP7LMIX_INPUT_4_VOLUME 0xC47
+#define MADERA_DSP7RMIX_INPUT_1_SOURCE 0xC48
+#define MADERA_DSP7RMIX_INPUT_1_VOLUME 0xC49
+#define MADERA_DSP7RMIX_INPUT_2_SOURCE 0xC4A
+#define MADERA_DSP7RMIX_INPUT_2_VOLUME 0xC4B
+#define MADERA_DSP7RMIX_INPUT_3_SOURCE 0xC4C
+#define MADERA_DSP7RMIX_INPUT_3_VOLUME 0xC4D
+#define MADERA_DSP7RMIX_INPUT_4_SOURCE 0xC4E
+#define MADERA_DSP7RMIX_INPUT_4_VOLUME 0xC4F
+#define MADERA_DSP7AUX1MIX_INPUT_1_SOURCE 0xC50
+#define MADERA_DSP7AUX2MIX_INPUT_1_SOURCE 0xC58
+#define MADERA_DSP7AUX3MIX_INPUT_1_SOURCE 0xC60
+#define MADERA_DSP7AUX4MIX_INPUT_1_SOURCE 0xC68
+#define MADERA_DSP7AUX5MIX_INPUT_1_SOURCE 0xC70
+#define MADERA_DSP7AUX6MIX_INPUT_1_SOURCE 0xC78
+#define MADERA_DFC1MIX_INPUT_1_SOURCE 0xDC0
+#define MADERA_DFC2MIX_INPUT_1_SOURCE 0xDC8
+#define MADERA_DFC3MIX_INPUT_1_SOURCE 0xDD0
+#define MADERA_DFC4MIX_INPUT_1_SOURCE 0xDD8
+#define MADERA_DFC5MIX_INPUT_1_SOURCE 0xDE0
+#define MADERA_DFC6MIX_INPUT_1_SOURCE 0xDE8
+#define MADERA_DFC7MIX_INPUT_1_SOURCE 0xDF0
+#define MADERA_DFC8MIX_INPUT_1_SOURCE 0xDF8
+#define MADERA_FX_CTRL1 0xE00
+#define MADERA_FX_CTRL2 0xE01
+#define MADERA_EQ1_1 0xE10
+#define MADERA_EQ1_2 0xE11
+#define MADERA_EQ1_21 0xE24
+#define MADERA_EQ2_1 0xE26
+#define MADERA_EQ2_2 0xE27
+#define MADERA_EQ2_21 0xE3A
+#define MADERA_EQ3_1 0xE3C
+#define MADERA_EQ3_2 0xE3D
+#define MADERA_EQ3_21 0xE50
+#define MADERA_EQ4_1 0xE52
+#define MADERA_EQ4_2 0xE53
+#define MADERA_EQ4_21 0xE66
+#define MADERA_DRC1_CTRL1 0xE80
+#define MADERA_DRC1_CTRL2 0xE81
+#define MADERA_DRC1_CTRL3 0xE82
+#define MADERA_DRC1_CTRL4 0xE83
+#define MADERA_DRC1_CTRL5 0xE84
+#define MADERA_DRC2_CTRL1 0xE88
+#define MADERA_DRC2_CTRL2 0xE89
+#define MADERA_DRC2_CTRL3 0xE8A
+#define MADERA_DRC2_CTRL4 0xE8B
+#define MADERA_DRC2_CTRL5 0xE8C
+#define MADERA_HPLPF1_1 0xEC0
+#define MADERA_HPLPF1_2 0xEC1
+#define MADERA_HPLPF2_1 0xEC4
+#define MADERA_HPLPF2_2 0xEC5
+#define MADERA_HPLPF3_1 0xEC8
+#define MADERA_HPLPF3_2 0xEC9
+#define MADERA_HPLPF4_1 0xECC
+#define MADERA_HPLPF4_2 0xECD
+#define MADERA_ASRC2_ENABLE 0xED0
+#define MADERA_ASRC2_STATUS 0xED1
+#define MADERA_ASRC2_RATE1 0xED2
+#define MADERA_ASRC2_RATE2 0xED3
+#define MADERA_ASRC1_ENABLE 0xEE0
+#define MADERA_ASRC1_STATUS 0xEE1
+#define MADERA_ASRC1_RATE1 0xEE2
+#define MADERA_ASRC1_RATE2 0xEE3
+#define MADERA_ISRC_1_CTRL_1 0xEF0
+#define MADERA_ISRC_1_CTRL_2 0xEF1
+#define MADERA_ISRC_1_CTRL_3 0xEF2
+#define MADERA_ISRC_2_CTRL_1 0xEF3
+#define MADERA_ISRC_2_CTRL_2 0xEF4
+#define MADERA_ISRC_2_CTRL_3 0xEF5
+#define MADERA_ISRC_3_CTRL_1 0xEF6
+#define MADERA_ISRC_3_CTRL_2 0xEF7
+#define MADERA_ISRC_3_CTRL_3 0xEF8
+#define MADERA_ISRC_4_CTRL_1 0xEF9
+#define MADERA_ISRC_4_CTRL_2 0xEFA
+#define MADERA_ISRC_4_CTRL_3 0xEFB
+#define MADERA_CLOCK_CONTROL 0xF00
+#define MADERA_ANC_SRC 0xF01
+#define MADERA_DSP_STATUS 0xF02
+#define MADERA_ANC_COEFF_START 0xF08
+#define MADERA_ANC_COEFF_END 0xF12
+#define MADERA_FCL_FILTER_CONTROL 0xF15
+#define MADERA_FCL_ADC_REFORMATTER_CONTROL 0xF17
+#define MADERA_FCL_COEFF_START 0xF18
+#define MADERA_FCL_COEFF_END 0xF69
+#define MADERA_FCR_FILTER_CONTROL 0xF71
+#define MADERA_FCR_ADC_REFORMATTER_CONTROL 0xF73
+#define MADERA_FCR_COEFF_START 0xF74
+#define MADERA_FCR_COEFF_END 0xFC5
+#define MADERA_AUXPDM1_CTRL_0 0x10C0
+#define MADERA_AUXPDM1_CTRL_1 0x10C1
+#define MADERA_DFC1_CTRL 0x1480
+#define MADERA_DFC1_RX 0x1482
+#define MADERA_DFC1_TX 0x1484
+#define MADERA_DFC2_CTRL 0x1486
+#define MADERA_DFC2_RX 0x1488
+#define MADERA_DFC2_TX 0x148A
+#define MADERA_DFC3_CTRL 0x148C
+#define MADERA_DFC3_RX 0x148E
+#define MADERA_DFC3_TX 0x1490
+#define MADERA_DFC4_CTRL 0x1492
+#define MADERA_DFC4_RX 0x1494
+#define MADERA_DFC4_TX 0x1496
+#define MADERA_DFC5_CTRL 0x1498
+#define MADERA_DFC5_RX 0x149A
+#define MADERA_DFC5_TX 0x149C
+#define MADERA_DFC6_CTRL 0x149E
+#define MADERA_DFC6_RX 0x14A0
+#define MADERA_DFC6_TX 0x14A2
+#define MADERA_DFC7_CTRL 0x14A4
+#define MADERA_DFC7_RX 0x14A6
+#define MADERA_DFC7_TX 0x14A8
+#define MADERA_DFC8_CTRL 0x14AA
+#define MADERA_DFC8_RX 0x14AC
+#define MADERA_DFC8_TX 0x14AE
+#define MADERA_DFC_STATUS 0x14B6
+#define MADERA_ADSP2_IRQ0 0x1600
+#define MADERA_ADSP2_IRQ1 0x1601
+#define MADERA_ADSP2_IRQ2 0x1602
+#define MADERA_ADSP2_IRQ3 0x1603
+#define MADERA_ADSP2_IRQ4 0x1604
+#define MADERA_ADSP2_IRQ5 0x1605
+#define MADERA_ADSP2_IRQ6 0x1606
+#define MADERA_ADSP2_IRQ7 0x1607
+#define MADERA_GPIO1_CTRL_1 0x1700
+#define MADERA_GPIO1_CTRL_2 0x1701
+#define MADERA_GPIO2_CTRL_1 0x1702
+#define MADERA_GPIO2_CTRL_2 0x1703
+#define MADERA_GPIO15_CTRL_1 0x171C
+#define MADERA_GPIO15_CTRL_2 0x171D
+#define MADERA_GPIO16_CTRL_1 0x171E
+#define MADERA_GPIO16_CTRL_2 0x171F
+#define MADERA_GPIO38_CTRL_1 0x174A
+#define MADERA_GPIO38_CTRL_2 0x174B
+#define MADERA_GPIO40_CTRL_1 0x174E
+#define MADERA_GPIO40_CTRL_2 0x174F
+#define MADERA_IRQ1_STATUS_1 0x1800
+#define MADERA_IRQ1_STATUS_2 0x1801
+#define MADERA_IRQ1_STATUS_6 0x1805
+#define MADERA_IRQ1_STATUS_7 0x1806
+#define MADERA_IRQ1_STATUS_9 0x1808
+#define MADERA_IRQ1_STATUS_11 0x180A
+#define MADERA_IRQ1_STATUS_12 0x180B
+#define MADERA_IRQ1_STATUS_15 0x180E
+#define MADERA_IRQ1_STATUS_33 0x1820
+#define MADERA_IRQ1_MASK_1 0x1840
+#define MADERA_IRQ1_MASK_2 0x1841
+#define MADERA_IRQ1_MASK_6 0x1845
+#define MADERA_IRQ1_MASK_33 0x1860
+#define MADERA_IRQ1_RAW_STATUS_1 0x1880
+#define MADERA_IRQ1_RAW_STATUS_2 0x1881
+#define MADERA_IRQ1_RAW_STATUS_7 0x1886
+#define MADERA_IRQ1_RAW_STATUS_15 0x188E
+#define MADERA_IRQ1_RAW_STATUS_33 0x18A0
+#define MADERA_INTERRUPT_DEBOUNCE_7 0x1A06
+#define MADERA_INTERRUPT_DEBOUNCE_15 0x1A0E
+#define MADERA_IRQ1_CTRL 0x1A80
+#define MADERA_IRQ2_CTRL 0x1A82
+#define MADERA_INTERRUPT_RAW_STATUS_1 0x1AA0
+#define MADERA_WSEQ_SEQUENCE_1 0x3000
+#define MADERA_WSEQ_SEQUENCE_225 0x31C0
+#define MADERA_WSEQ_SEQUENCE_252 0x31F6
+#define CS47L35_OTP_HPDET_CAL_1 0x31F8
+#define CS47L35_OTP_HPDET_CAL_2 0x31FA
+#define MADERA_WSEQ_SEQUENCE_508 0x33F6
+#define CS47L85_OTP_HPDET_CAL_1 0x33F8
+#define CS47L85_OTP_HPDET_CAL_2 0x33FA
+#define MADERA_OTP_HPDET_CAL_1 0x20004
+#define MADERA_OTP_HPDET_CAL_2 0x20006
+#define MADERA_DSP1_CONFIG_1 0x0FFE00
+#define MADERA_DSP1_CONFIG_2 0x0FFE02
+#define MADERA_DSP1_SCRATCH_1 0x0FFE40
+#define MADERA_DSP1_SCRATCH_2 0x0FFE42
+#define MADERA_DSP1_PMEM_ERR_ADDR___XMEM_ERR_ADDR 0xFFE7C
+#define MADERA_DSP2_CONFIG_1 0x17FE00
+#define MADERA_DSP2_CONFIG_2 0x17FE02
+#define MADERA_DSP2_SCRATCH_1 0x17FE40
+#define MADERA_DSP2_SCRATCH_2 0x17FE42
+#define MADERA_DSP2_PMEM_ERR_ADDR___XMEM_ERR_ADDR 0x17FE7C
+#define MADERA_DSP3_CONFIG_1 0x1FFE00
+#define MADERA_DSP3_CONFIG_2 0x1FFE02
+#define MADERA_DSP3_SCRATCH_1 0x1FFE40
+#define MADERA_DSP3_SCRATCH_2 0x1FFE42
+#define MADERA_DSP3_PMEM_ERR_ADDR___XMEM_ERR_ADDR 0x1FFE7C
+#define MADERA_DSP4_CONFIG_1 0x27FE00
+#define MADERA_DSP4_CONFIG_2 0x27FE02
+#define MADERA_DSP4_SCRATCH_1 0x27FE40
+#define MADERA_DSP4_SCRATCH_2 0x27FE42
+#define MADERA_DSP4_PMEM_ERR_ADDR___XMEM_ERR_ADDR 0x27FE7C
+#define MADERA_DSP5_CONFIG_1 0x2FFE00
+#define MADERA_DSP5_CONFIG_2 0x2FFE02
+#define MADERA_DSP5_SCRATCH_1 0x2FFE40
+#define MADERA_DSP5_SCRATCH_2 0x2FFE42
+#define MADERA_DSP5_PMEM_ERR_ADDR___XMEM_ERR_ADDR 0x2FFE7C
+#define MADERA_DSP6_CONFIG_1 0x37FE00
+#define MADERA_DSP6_CONFIG_2 0x37FE02
+#define MADERA_DSP6_SCRATCH_1 0x37FE40
+#define MADERA_DSP6_SCRATCH_2 0x37FE42
+#define MADERA_DSP6_PMEM_ERR_ADDR___XMEM_ERR_ADDR 0x37FE7C
+#define MADERA_DSP7_CONFIG_1 0x3FFE00
+#define MADERA_DSP7_CONFIG_2 0x3FFE02
+#define MADERA_DSP7_SCRATCH_1 0x3FFE40
+#define MADERA_DSP7_SCRATCH_2 0x3FFE42
+#define MADERA_DSP7_PMEM_ERR_ADDR___XMEM_ERR_ADDR 0x3FFE7C
+
+/* (0x0000) Software_Reset */
+#define MADERA_SW_RST_DEV_ID1_MASK 0xFFFF
+#define MADERA_SW_RST_DEV_ID1_SHIFT 0
+
+/* (0x0001) Hardware_Revision */
+#define MADERA_HW_REVISION_MASK 0x00FF
+#define MADERA_HW_REVISION_SHIFT 0
+
+/* (0x0020) Tone_Generator_1 */
+#define MADERA_TONE2_ENA 0x0002
+#define MADERA_TONE2_ENA_MASK 0x0002
+#define MADERA_TONE2_ENA_SHIFT 1
+#define MADERA_TONE1_ENA 0x0001
+#define MADERA_TONE1_ENA_MASK 0x0001
+#define MADERA_TONE1_ENA_SHIFT 0
+
+/* (0x0021) Tone_Generator_2 */
+#define MADERA_TONE1_LVL_0_MASK 0xFFFF
+#define MADERA_TONE1_LVL_0_SHIFT 0
+
+/* (0x0022) Tone_Generator_3 */
+#define MADERA_TONE1_LVL_MASK 0x00FF
+#define MADERA_TONE1_LVL_SHIFT 0
+
+/* (0x0023) Tone_Generator_4 */
+#define MADERA_TONE2_LVL_0_MASK 0xFFFF
+#define MADERA_TONE2_LVL_0_SHIFT 0
+
+/* (0x0024) Tone_Generator_5 */
+#define MADERA_TONE2_LVL_MASK 0x00FF
+#define MADERA_TONE2_LVL_SHIFT 0
+
+/* (0x0030) PWM_Drive_1 */
+#define MADERA_PWM2_ENA 0x0002
+#define MADERA_PWM2_ENA_MASK 0x0002
+#define MADERA_PWM2_ENA_SHIFT 1
+#define MADERA_PWM1_ENA 0x0001
+#define MADERA_PWM1_ENA_MASK 0x0001
+#define MADERA_PWM1_ENA_SHIFT 0
+
+/* (0x00A0) Comfort_Noise_Generator */
+#define MADERA_NOISE_GEN_ENA 0x0020
+#define MADERA_NOISE_GEN_ENA_MASK 0x0020
+#define MADERA_NOISE_GEN_ENA_SHIFT 5
+#define MADERA_NOISE_GEN_GAIN_MASK 0x001F
+#define MADERA_NOISE_GEN_GAIN_SHIFT 0
+
+/* (0x0100) Clock_32k_1 */
+#define MADERA_CLK_32K_ENA 0x0040
+#define MADERA_CLK_32K_ENA_MASK 0x0040
+#define MADERA_CLK_32K_ENA_SHIFT 6
+#define MADERA_CLK_32K_SRC_MASK 0x0003
+#define MADERA_CLK_32K_SRC_SHIFT 0
+
+/* (0x0101) System_Clock_1 */
+#define MADERA_SYSCLK_FRAC 0x8000
+#define MADERA_SYSCLK_FRAC_MASK 0x8000
+#define MADERA_SYSCLK_FRAC_SHIFT 15
+#define MADERA_SYSCLK_FREQ_MASK 0x0700
+#define MADERA_SYSCLK_FREQ_SHIFT 8
+#define MADERA_SYSCLK_ENA 0x0040
+#define MADERA_SYSCLK_ENA_MASK 0x0040
+#define MADERA_SYSCLK_ENA_SHIFT 6
+#define MADERA_SYSCLK_SRC_MASK 0x000F
+#define MADERA_SYSCLK_SRC_SHIFT 0
+
+/* (0x0102) Sample_rate_1 */
+#define MADERA_SAMPLE_RATE_1_MASK 0x001F
+#define MADERA_SAMPLE_RATE_1_SHIFT 0
+
+/* (0x0103) Sample_rate_2 */
+#define MADERA_SAMPLE_RATE_2_MASK 0x001F
+#define MADERA_SAMPLE_RATE_2_SHIFT 0
+
+/* (0x0104) Sample_rate_3 */
+#define MADERA_SAMPLE_RATE_3_MASK 0x001F
+#define MADERA_SAMPLE_RATE_3_SHIFT 0
+
+/* (0x0112) Async_clock_1 */
+#define MADERA_ASYNC_CLK_FREQ_MASK 0x0700
+#define MADERA_ASYNC_CLK_FREQ_SHIFT 8
+#define MADERA_ASYNC_CLK_ENA 0x0040
+#define MADERA_ASYNC_CLK_ENA_MASK 0x0040
+#define MADERA_ASYNC_CLK_ENA_SHIFT 6
+#define MADERA_ASYNC_CLK_SRC_MASK 0x000F
+#define MADERA_ASYNC_CLK_SRC_SHIFT 0
+
+/* (0x0113) Async_sample_rate_1 */
+#define MADERA_ASYNC_SAMPLE_RATE_1_MASK 0x001F
+#define MADERA_ASYNC_SAMPLE_RATE_1_SHIFT 0
+
+/* (0x0114) Async_sample_rate_2 */
+#define MADERA_ASYNC_SAMPLE_RATE_2_MASK 0x001F
+#define MADERA_ASYNC_SAMPLE_RATE_2_SHIFT 0
+
+/* (0x0120) DSP_Clock_1 */
+#define MADERA_DSP_CLK_FREQ_LEGACY 0x0700
+#define MADERA_DSP_CLK_FREQ_LEGACY_MASK 0x0700
+#define MADERA_DSP_CLK_FREQ_LEGACY_SHIFT 8
+#define MADERA_DSP_CLK_ENA 0x0040
+#define MADERA_DSP_CLK_ENA_MASK 0x0040
+#define MADERA_DSP_CLK_ENA_SHIFT 6
+#define MADERA_DSP_CLK_SRC 0x000F
+#define MADERA_DSP_CLK_SRC_MASK 0x000F
+#define MADERA_DSP_CLK_SRC_SHIFT 0
+
+/* (0x0122) DSP_Clock_2 */
+#define MADERA_DSP_CLK_FREQ_MASK 0x03FF
+#define MADERA_DSP_CLK_FREQ_SHIFT 0
+
+/* (0x0149) Output_system_clock */
+#define MADERA_OPCLK_ENA 0x8000
+#define MADERA_OPCLK_ENA_MASK 0x8000
+#define MADERA_OPCLK_ENA_SHIFT 15
+#define MADERA_OPCLK_DIV_MASK 0x00F8
+#define MADERA_OPCLK_DIV_SHIFT 3
+#define MADERA_OPCLK_SEL_MASK 0x0007
+#define MADERA_OPCLK_SEL_SHIFT 0
+
+/* (0x014A) Output_async_clock */
+#define MADERA_OPCLK_ASYNC_ENA 0x8000
+#define MADERA_OPCLK_ASYNC_ENA_MASK 0x8000
+#define MADERA_OPCLK_ASYNC_ENA_SHIFT 15
+#define MADERA_OPCLK_ASYNC_DIV_MASK 0x00F8
+#define MADERA_OPCLK_ASYNC_DIV_SHIFT 3
+#define MADERA_OPCLK_ASYNC_SEL_MASK 0x0007
+#define MADERA_OPCLK_ASYNC_SEL_SHIFT 0
+
+/* (0x0171) FLL1_Control_1 */
+#define CS47L92_FLL1_REFCLK_SRC_MASK 0xF000
+#define CS47L92_FLL1_REFCLK_SRC_SHIFT 12
+#define MADERA_FLL1_HOLD_MASK 0x0004
+#define MADERA_FLL1_HOLD_SHIFT 2
+#define MADERA_FLL1_FREERUN 0x0002
+#define MADERA_FLL1_FREERUN_MASK 0x0002
+#define MADERA_FLL1_FREERUN_SHIFT 1
+#define MADERA_FLL1_ENA 0x0001
+#define MADERA_FLL1_ENA_MASK 0x0001
+#define MADERA_FLL1_ENA_SHIFT 0
+
+/* (0x0172) FLL1_Control_2 */
+#define MADERA_FLL1_CTRL_UPD 0x8000
+#define MADERA_FLL1_CTRL_UPD_MASK 0x8000
+#define MADERA_FLL1_CTRL_UPD_SHIFT 15
+#define MADERA_FLL1_N_MASK 0x03FF
+#define MADERA_FLL1_N_SHIFT 0
+
+/* (0x0173) FLL1_Control_3 */
+#define MADERA_FLL1_THETA_MASK 0xFFFF
+#define MADERA_FLL1_THETA_SHIFT 0
+
+/* (0x0174) FLL1_Control_4 */
+#define MADERA_FLL1_LAMBDA_MASK 0xFFFF
+#define MADERA_FLL1_LAMBDA_SHIFT 0
+
+/* (0x0175) FLL1_Control_5 */
+#define MADERA_FLL1_FRATIO_MASK 0x0F00
+#define MADERA_FLL1_FRATIO_SHIFT 8
+#define MADERA_FLL1_FB_DIV_MASK 0x03FF
+#define MADERA_FLL1_FB_DIV_SHIFT 0
+
+/* (0x0176) FLL1_Control_6 */
+#define MADERA_FLL1_REFCLK_DIV_MASK 0x00C0
+#define MADERA_FLL1_REFCLK_DIV_SHIFT 6
+#define MADERA_FLL1_REFCLK_SRC_MASK 0x000F
+#define MADERA_FLL1_REFCLK_SRC_SHIFT 0
+
+/* (0x0179) FLL1_Control_7 */
+#define MADERA_FLL1_GAIN_MASK 0x003c
+#define MADERA_FLL1_GAIN_SHIFT 2
+
+/* (0x017A) FLL1_EFS_2 */
+#define MADERA_FLL1_PHASE_GAIN_MASK 0xF000
+#define MADERA_FLL1_PHASE_GAIN_SHIFT 12
+#define MADERA_FLL1_PHASE_ENA_MASK 0x0800
+#define MADERA_FLL1_PHASE_ENA_SHIFT 11
+
+/* (0x017A) FLL1_Control_10 */
+#define MADERA_FLL1_HP_MASK 0xC000
+#define MADERA_FLL1_HP_SHIFT 14
+#define MADERA_FLL1_PHASEDET_ENA_MASK 0x1000
+#define MADERA_FLL1_PHASEDET_ENA_SHIFT 12
+
+/* (0x017B) FLL1_Control_11 */
+#define MADERA_FLL1_LOCKDET_THR_MASK 0x001E
+#define MADERA_FLL1_LOCKDET_THR_SHIFT 1
+#define MADERA_FLL1_LOCKDET_MASK 0x0001
+#define MADERA_FLL1_LOCKDET_SHIFT 0
+
+/* (0x017D) FLL1_Digital_Test_1 */
+#define MADERA_FLL1_SYNC_EFS_ENA_MASK 0x0100
+#define MADERA_FLL1_SYNC_EFS_ENA_SHIFT 8
+#define MADERA_FLL1_CLK_VCO_FAST_SRC_MASK 0x0003
+#define MADERA_FLL1_CLK_VCO_FAST_SRC_SHIFT 0
+
+/* (0x0181) FLL1_Synchroniser_1 */
+#define MADERA_FLL1_SYNC_ENA 0x0001
+#define MADERA_FLL1_SYNC_ENA_MASK 0x0001
+#define MADERA_FLL1_SYNC_ENA_SHIFT 0
+
+/* (0x0182) FLL1_Synchroniser_2 */
+#define MADERA_FLL1_SYNC_N_MASK 0x03FF
+#define MADERA_FLL1_SYNC_N_SHIFT 0
+
+/* (0x0183) FLL1_Synchroniser_3 */
+#define MADERA_FLL1_SYNC_THETA_MASK 0xFFFF
+#define MADERA_FLL1_SYNC_THETA_SHIFT 0
+
+/* (0x0184) FLL1_Synchroniser_4 */
+#define MADERA_FLL1_SYNC_LAMBDA_MASK 0xFFFF
+#define MADERA_FLL1_SYNC_LAMBDA_SHIFT 0
+
+/* (0x0185) FLL1_Synchroniser_5 */
+#define MADERA_FLL1_SYNC_FRATIO_MASK 0x0700
+#define MADERA_FLL1_SYNC_FRATIO_SHIFT 8
+
+/* (0x0186) FLL1_Synchroniser_6 */
+#define MADERA_FLL1_SYNCCLK_DIV_MASK 0x00C0
+#define MADERA_FLL1_SYNCCLK_DIV_SHIFT 6
+#define MADERA_FLL1_SYNCCLK_SRC_MASK 0x000F
+#define MADERA_FLL1_SYNCCLK_SRC_SHIFT 0
+
+/* (0x0187) FLL1_Synchroniser_7 */
+#define MADERA_FLL1_SYNC_GAIN_MASK 0x003c
+#define MADERA_FLL1_SYNC_GAIN_SHIFT 2
+#define MADERA_FLL1_SYNC_DFSAT 0x0001
+#define MADERA_FLL1_SYNC_DFSAT_MASK 0x0001
+#define MADERA_FLL1_SYNC_DFSAT_SHIFT 0
+
+/* (0x01D1) FLL_AO_Control_1 */
+#define MADERA_FLL_AO_HOLD 0x0004
+#define MADERA_FLL_AO_HOLD_MASK 0x0004
+#define MADERA_FLL_AO_HOLD_SHIFT 2
+#define MADERA_FLL_AO_FREERUN 0x0002
+#define MADERA_FLL_AO_FREERUN_MASK 0x0002
+#define MADERA_FLL_AO_FREERUN_SHIFT 1
+#define MADERA_FLL_AO_ENA 0x0001
+#define MADERA_FLL_AO_ENA_MASK 0x0001
+#define MADERA_FLL_AO_ENA_SHIFT 0
+
+/* (0x01D2) FLL_AO_Control_2 */
+#define MADERA_FLL_AO_CTRL_UPD 0x8000
+#define MADERA_FLL_AO_CTRL_UPD_MASK 0x8000
+#define MADERA_FLL_AO_CTRL_UPD_SHIFT 15
+
+/* (0x01D6) FLL_AO_Control_6 */
+#define MADERA_FLL_AO_REFCLK_SRC_MASK 0x000F
+#define MADERA_FLL_AO_REFCLK_SRC_SHIFT 0
+
+/* (0x0200) Mic_Charge_Pump_1 */
+#define MADERA_CPMIC_BYPASS 0x0002
+#define MADERA_CPMIC_BYPASS_MASK 0x0002
+#define MADERA_CPMIC_BYPASS_SHIFT 1
+#define MADERA_CPMIC_ENA 0x0001
+#define MADERA_CPMIC_ENA_MASK 0x0001
+#define MADERA_CPMIC_ENA_SHIFT 0
+
+/* (0x0210) LDO1_Control_1 */
+#define MADERA_LDO1_VSEL_MASK 0x07E0
+#define MADERA_LDO1_VSEL_SHIFT 5
+#define MADERA_LDO1_FAST 0x0010
+#define MADERA_LDO1_FAST_MASK 0x0010
+#define MADERA_LDO1_FAST_SHIFT 4
+#define MADERA_LDO1_DISCH 0x0004
+#define MADERA_LDO1_DISCH_MASK 0x0004
+#define MADERA_LDO1_DISCH_SHIFT 2
+#define MADERA_LDO1_BYPASS 0x0002
+#define MADERA_LDO1_BYPASS_MASK 0x0002
+#define MADERA_LDO1_BYPASS_SHIFT 1
+#define MADERA_LDO1_ENA 0x0001
+#define MADERA_LDO1_ENA_MASK 0x0001
+#define MADERA_LDO1_ENA_SHIFT 0
+
+/* (0x0213) LDO2_Control_1 */
+#define MADERA_LDO2_VSEL_MASK 0x07E0
+#define MADERA_LDO2_VSEL_SHIFT 5
+#define MADERA_LDO2_FAST 0x0010
+#define MADERA_LDO2_FAST_MASK 0x0010
+#define MADERA_LDO2_FAST_SHIFT 4
+#define MADERA_LDO2_DISCH 0x0004
+#define MADERA_LDO2_DISCH_MASK 0x0004
+#define MADERA_LDO2_DISCH_SHIFT 2
+#define MADERA_LDO2_BYPASS 0x0002
+#define MADERA_LDO2_BYPASS_MASK 0x0002
+#define MADERA_LDO2_BYPASS_SHIFT 1
+#define MADERA_LDO2_ENA 0x0001
+#define MADERA_LDO2_ENA_MASK 0x0001
+#define MADERA_LDO2_ENA_SHIFT 0
+
+/* (0x0218) Mic_Bias_Ctrl_1 */
+#define MADERA_MICB1_EXT_CAP 0x8000
+#define MADERA_MICB1_EXT_CAP_MASK 0x8000
+#define MADERA_MICB1_EXT_CAP_SHIFT 15
+#define MADERA_MICB1_LVL_MASK 0x01E0
+#define MADERA_MICB1_LVL_SHIFT 5
+#define MADERA_MICB1_ENA 0x0001
+#define MADERA_MICB1_ENA_MASK 0x0001
+#define MADERA_MICB1_ENA_SHIFT 0
+
+/* (0x021C) Mic_Bias_Ctrl_5 */
+#define MADERA_MICB1D_ENA 0x1000
+#define MADERA_MICB1D_ENA_MASK 0x1000
+#define MADERA_MICB1D_ENA_SHIFT 12
+#define MADERA_MICB1C_ENA 0x0100
+#define MADERA_MICB1C_ENA_MASK 0x0100
+#define MADERA_MICB1C_ENA_SHIFT 8
+#define MADERA_MICB1B_ENA 0x0010
+#define MADERA_MICB1B_ENA_MASK 0x0010
+#define MADERA_MICB1B_ENA_SHIFT 4
+#define MADERA_MICB1A_ENA 0x0001
+#define MADERA_MICB1A_ENA_MASK 0x0001
+#define MADERA_MICB1A_ENA_SHIFT 0
+
+/* (0x021E) Mic_Bias_Ctrl_6 */
+#define MADERA_MICB2D_ENA 0x1000
+#define MADERA_MICB2D_ENA_MASK 0x1000
+#define MADERA_MICB2D_ENA_SHIFT 12
+#define MADERA_MICB2C_ENA 0x0100
+#define MADERA_MICB2C_ENA_MASK 0x0100
+#define MADERA_MICB2C_ENA_SHIFT 8
+#define MADERA_MICB2B_ENA 0x0010
+#define MADERA_MICB2B_ENA_MASK 0x0010
+#define MADERA_MICB2B_ENA_SHIFT 4
+#define MADERA_MICB2A_ENA 0x0001
+#define MADERA_MICB2A_ENA_MASK 0x0001
+#define MADERA_MICB2A_ENA_SHIFT 0
+
+/* (0x0225) - HP Ctrl 1L */
+#define MADERA_RMV_SHRT_HP1L 0x4000
+#define MADERA_RMV_SHRT_HP1L_MASK 0x4000
+#define MADERA_RMV_SHRT_HP1L_SHIFT 14
+#define MADERA_HP1L_FLWR 0x0004
+#define MADERA_HP1L_FLWR_MASK 0x0004
+#define MADERA_HP1L_FLWR_SHIFT 2
+#define MADERA_HP1L_SHRTI 0x0002
+#define MADERA_HP1L_SHRTI_MASK 0x0002
+#define MADERA_HP1L_SHRTI_SHIFT 1
+#define MADERA_HP1L_SHRTO 0x0001
+#define MADERA_HP1L_SHRTO_MASK 0x0001
+#define MADERA_HP1L_SHRTO_SHIFT 0
+
+/* (0x0226) - HP Ctrl 1R */
+#define MADERA_RMV_SHRT_HP1R 0x4000
+#define MADERA_RMV_SHRT_HP1R_MASK 0x4000
+#define MADERA_RMV_SHRT_HP1R_SHIFT 14
+#define MADERA_HP1R_FLWR 0x0004
+#define MADERA_HP1R_FLWR_MASK 0x0004
+#define MADERA_HP1R_FLWR_SHIFT 2
+#define MADERA_HP1R_SHRTI 0x0002
+#define MADERA_HP1R_SHRTI_MASK 0x0002
+#define MADERA_HP1R_SHRTI_SHIFT 1
+#define MADERA_HP1R_SHRTO 0x0001
+#define MADERA_HP1R_SHRTO_MASK 0x0001
+#define MADERA_HP1R_SHRTO_SHIFT 0
+
+/* (0x0293) Accessory_Detect_Mode_1 */
+#define MADERA_ACCDET_SRC 0x2000
+#define MADERA_ACCDET_SRC_MASK 0x2000
+#define MADERA_ACCDET_SRC_SHIFT 13
+#define MADERA_ACCDET_POLARITY_INV_ENA 0x0080
+#define MADERA_ACCDET_POLARITY_INV_ENA_MASK 0x0080
+#define MADERA_ACCDET_POLARITY_INV_ENA_SHIFT 7
+#define MADERA_ACCDET_MODE_MASK 0x0007
+#define MADERA_ACCDET_MODE_SHIFT 0
+
+/* (0x0299) Headphone_Detect_0 */
+#define MADERA_HPD_GND_SEL 0x0007
+#define MADERA_HPD_GND_SEL_MASK 0x0007
+#define MADERA_HPD_GND_SEL_SHIFT 0
+#define MADERA_HPD_SENSE_SEL 0x00F0
+#define MADERA_HPD_SENSE_SEL_MASK 0x00F0
+#define MADERA_HPD_SENSE_SEL_SHIFT 4
+#define MADERA_HPD_FRC_SEL 0x0F00
+#define MADERA_HPD_FRC_SEL_MASK 0x0F00
+#define MADERA_HPD_FRC_SEL_SHIFT 8
+#define MADERA_HPD_OUT_SEL 0x7000
+#define MADERA_HPD_OUT_SEL_MASK 0x7000
+#define MADERA_HPD_OUT_SEL_SHIFT 12
+#define MADERA_HPD_OVD_ENA_SEL 0x8000
+#define MADERA_HPD_OVD_ENA_SEL_MASK 0x8000
+#define MADERA_HPD_OVD_ENA_SEL_SHIFT 15
+
+/* (0x029B) Headphone_Detect_1 */
+#define MADERA_HP_IMPEDANCE_RANGE_MASK 0x0600
+#define MADERA_HP_IMPEDANCE_RANGE_SHIFT 9
+#define MADERA_HP_STEP_SIZE 0x0100
+#define MADERA_HP_STEP_SIZE_MASK 0x0100
+#define MADERA_HP_STEP_SIZE_SHIFT 8
+#define MADERA_HP_CLK_DIV_MASK 0x0018
+#define MADERA_HP_CLK_DIV_SHIFT 3
+#define MADERA_HP_RATE_MASK 0x0006
+#define MADERA_HP_RATE_SHIFT 1
+#define MADERA_HP_POLL 0x0001
+#define MADERA_HP_POLL_MASK 0x0001
+#define MADERA_HP_POLL_SHIFT 0
+
+/* (0x029C) Headphone_Detect_2 */
+#define MADERA_HP_DONE_MASK 0x8000
+#define MADERA_HP_DONE_SHIFT 15
+#define MADERA_HP_LVL_MASK 0x7FFF
+#define MADERA_HP_LVL_SHIFT 0
+
+/* (0x029D) Headphone_Detect_3 */
+#define MADERA_HP_DACVAL_MASK 0x03FF
+#define MADERA_HP_DACVAL_SHIFT 0
+
+/* (0x029F) - Headphone Detect 5 */
+#define MADERA_HP_DACVAL_DOWN_MASK 0x03FF
+#define MADERA_HP_DACVAL_DOWN_SHIFT 0
+
+/* (0x02A2) Mic_Detect_1_Control_0 */
+#define MADERA_MICD1_GND_MASK 0x0007
+#define MADERA_MICD1_GND_SHIFT 0
+#define MADERA_MICD1_SENSE_MASK 0x00F0
+#define MADERA_MICD1_SENSE_SHIFT 4
+#define MADERA_MICD1_ADC_MODE_MASK 0x8000
+#define MADERA_MICD1_ADC_MODE_SHIFT 15
+
+/* (0x02A3) Mic_Detect_1_Control_1 */
+#define MADERA_MICD_BIAS_STARTTIME_MASK 0xF000
+#define MADERA_MICD_BIAS_STARTTIME_SHIFT 12
+#define MADERA_MICD_RATE_MASK 0x0F00
+#define MADERA_MICD_RATE_SHIFT 8
+#define MADERA_MICD_BIAS_SRC_MASK 0x00F0
+#define MADERA_MICD_BIAS_SRC_SHIFT 4
+#define MADERA_MICD_DBTIME 0x0002
+#define MADERA_MICD_DBTIME_MASK 0x0002
+#define MADERA_MICD_DBTIME_SHIFT 1
+#define MADERA_MICD_ENA 0x0001
+#define MADERA_MICD_ENA_MASK 0x0001
+#define MADERA_MICD_ENA_SHIFT 0
+
+/* (0x02A4) Mic_Detect_1_Control_2 */
+#define MADERA_MICD_LVL_SEL_MASK 0x00FF
+#define MADERA_MICD_LVL_SEL_SHIFT 0
+
+/* (0x02A5) Mic_Detect_1_Control_3 */
+#define MADERA_MICD_LVL_0 0x0004
+#define MADERA_MICD_LVL_1 0x0008
+#define MADERA_MICD_LVL_2 0x0010
+#define MADERA_MICD_LVL_3 0x0020
+#define MADERA_MICD_LVL_4 0x0040
+#define MADERA_MICD_LVL_5 0x0080
+#define MADERA_MICD_LVL_6 0x0100
+#define MADERA_MICD_LVL_7 0x0200
+#define MADERA_MICD_LVL_8 0x0400
+#define MADERA_MICD_LVL_MASK 0x07FC
+#define MADERA_MICD_LVL_SHIFT 2
+#define MADERA_MICD_VALID 0x0002
+#define MADERA_MICD_VALID_MASK 0x0002
+#define MADERA_MICD_VALID_SHIFT 1
+#define MADERA_MICD_STS 0x0001
+#define MADERA_MICD_STS_MASK 0x0001
+#define MADERA_MICD_STS_SHIFT 0
+
+/* (0x02AB) Mic_Detect_1_Control_4 */
+#define MADERA_MICDET_ADCVAL_DIFF_MASK 0xFF00
+#define MADERA_MICDET_ADCVAL_DIFF_SHIFT 8
+#define MADERA_MICDET_ADCVAL_MASK 0x007F
+#define MADERA_MICDET_ADCVAL_SHIFT 0
+
+/* (0x02C6) Micd_Clamp_control */
+#define MADERA_MICD_CLAMP_OVD 0x0010
+#define MADERA_MICD_CLAMP_OVD_MASK 0x0010
+#define MADERA_MICD_CLAMP_OVD_SHIFT 4
+#define MADERA_MICD_CLAMP_MODE_MASK 0x000F
+#define MADERA_MICD_CLAMP_MODE_SHIFT 0
+
+/* (0x02C8) GP_Switch_1 */
+#define MADERA_SW2_MODE_MASK 0x000C
+#define MADERA_SW2_MODE_SHIFT 2
+#define MADERA_SW1_MODE_MASK 0x0003
+#define MADERA_SW1_MODE_SHIFT 0
+
+/* (0x02D3) Jack_detect_analogue */
+#define MADERA_JD2_ENA 0x0002
+#define MADERA_JD2_ENA_MASK 0x0002
+#define MADERA_JD2_ENA_SHIFT 1
+#define MADERA_JD1_ENA 0x0001
+#define MADERA_JD1_ENA_MASK 0x0001
+#define MADERA_JD1_ENA_SHIFT 0
+
+/* (0x0300) Input_Enables */
+#define MADERA_IN6L_ENA 0x0800
+#define MADERA_IN6L_ENA_MASK 0x0800
+#define MADERA_IN6L_ENA_SHIFT 11
+#define MADERA_IN6R_ENA 0x0400
+#define MADERA_IN6R_ENA_MASK 0x0400
+#define MADERA_IN6R_ENA_SHIFT 10
+#define MADERA_IN5L_ENA 0x0200
+#define MADERA_IN5L_ENA_MASK 0x0200
+#define MADERA_IN5L_ENA_SHIFT 9
+#define MADERA_IN5R_ENA 0x0100
+#define MADERA_IN5R_ENA_MASK 0x0100
+#define MADERA_IN5R_ENA_SHIFT 8
+#define MADERA_IN4L_ENA 0x0080
+#define MADERA_IN4L_ENA_MASK 0x0080
+#define MADERA_IN4L_ENA_SHIFT 7
+#define MADERA_IN4R_ENA 0x0040
+#define MADERA_IN4R_ENA_MASK 0x0040
+#define MADERA_IN4R_ENA_SHIFT 6
+#define MADERA_IN3L_ENA 0x0020
+#define MADERA_IN3L_ENA_MASK 0x0020
+#define MADERA_IN3L_ENA_SHIFT 5
+#define MADERA_IN3R_ENA 0x0010
+#define MADERA_IN3R_ENA_MASK 0x0010
+#define MADERA_IN3R_ENA_SHIFT 4
+#define MADERA_IN2L_ENA 0x0008
+#define MADERA_IN2L_ENA_MASK 0x0008
+#define MADERA_IN2L_ENA_SHIFT 3
+#define MADERA_IN2R_ENA 0x0004
+#define MADERA_IN2R_ENA_MASK 0x0004
+#define MADERA_IN2R_ENA_SHIFT 2
+#define MADERA_IN1L_ENA 0x0002
+#define MADERA_IN1L_ENA_MASK 0x0002
+#define MADERA_IN1L_ENA_SHIFT 1
+#define MADERA_IN1R_ENA 0x0001
+#define MADERA_IN1R_ENA_MASK 0x0001
+#define MADERA_IN1R_ENA_SHIFT 0
+
+/* (0x0308) Input_Rate */
+#define MADERA_IN_RATE_MASK 0xF800
+#define MADERA_IN_RATE_SHIFT 11
+#define MADERA_IN_MODE_MASK 0x0400
+#define MADERA_IN_MODE_SHIFT 10
+
+/* (0x0309) Input_Volume_Ramp */
+#define MADERA_IN_VD_RAMP_MASK 0x0070
+#define MADERA_IN_VD_RAMP_SHIFT 4
+#define MADERA_IN_VI_RAMP_MASK 0x0007
+#define MADERA_IN_VI_RAMP_SHIFT 0
+
+/* (0x030C) HPF_Control */
+#define MADERA_IN_HPF_CUT_MASK 0x0007
+#define MADERA_IN_HPF_CUT_SHIFT 0
+
+/* (0x0310) IN1L_Control */
+#define MADERA_IN1L_HPF_MASK 0x8000
+#define MADERA_IN1L_HPF_SHIFT 15
+#define MADERA_IN1_DMIC_SUP_MASK 0x1800
+#define MADERA_IN1_DMIC_SUP_SHIFT 11
+#define MADERA_IN1_MODE_MASK 0x0400
+#define MADERA_IN1_MODE_SHIFT 10
+#define MADERA_IN1L_PGA_VOL_MASK 0x00FE
+#define MADERA_IN1L_PGA_VOL_SHIFT 1
+
+/* (0x0311) ADC_Digital_Volume_1L */
+#define MADERA_IN1L_SRC_MASK 0x4000
+#define MADERA_IN1L_SRC_SHIFT 14
+#define MADERA_IN1L_SRC_SE_MASK 0x2000
+#define MADERA_IN1L_SRC_SE_SHIFT 13
+#define MADERA_IN1L_LP_MODE 0x0800
+#define MADERA_IN1L_LP_MODE_MASK 0x0800
+#define MADERA_IN1L_LP_MODE_SHIFT 11
+#define MADERA_IN_VU 0x0200
+#define MADERA_IN_VU_MASK 0x0200
+#define MADERA_IN_VU_SHIFT 9
+#define MADERA_IN1L_MUTE 0x0100
+#define MADERA_IN1L_MUTE_MASK 0x0100
+#define MADERA_IN1L_MUTE_SHIFT 8
+#define MADERA_IN1L_DIG_VOL_MASK 0x00FF
+#define MADERA_IN1L_DIG_VOL_SHIFT 0
+
+/* (0x0312) DMIC1L_Control */
+#define MADERA_IN1_OSR_MASK 0x0700
+#define MADERA_IN1_OSR_SHIFT 8
+
+/* (0x0313) IN1L_Rate_Control */
+#define MADERA_IN1L_RATE_MASK 0xF800
+#define MADERA_IN1L_RATE_SHIFT 11
+
+/* (0x0314) IN1R_Control */
+#define MADERA_IN1R_HPF_MASK 0x8000
+#define MADERA_IN1R_HPF_SHIFT 15
+#define MADERA_IN1R_PGA_VOL_MASK 0x00FE
+#define MADERA_IN1R_PGA_VOL_SHIFT 1
+#define MADERA_IN1_DMICCLK_SRC_MASK 0x1800
+#define MADERA_IN1_DMICCLK_SRC_SHIFT 11
+
+/* (0x0315) ADC_Digital_Volume_1R */
+#define MADERA_IN1R_SRC_MASK 0x4000
+#define MADERA_IN1R_SRC_SHIFT 14
+#define MADERA_IN1R_SRC_SE_MASK 0x2000
+#define MADERA_IN1R_SRC_SE_SHIFT 13
+#define MADERA_IN1R_LP_MODE 0x0800
+#define MADERA_IN1R_LP_MODE_MASK 0x0800
+#define MADERA_IN1R_LP_MODE_SHIFT 11
+#define MADERA_IN1R_MUTE 0x0100
+#define MADERA_IN1R_MUTE_MASK 0x0100
+#define MADERA_IN1R_MUTE_SHIFT 8
+#define MADERA_IN1R_DIG_VOL_MASK 0x00FF
+#define MADERA_IN1R_DIG_VOL_SHIFT 0
+
+/* (0x0317) IN1R_Rate_Control */
+#define MADERA_IN1R_RATE_MASK 0xF800
+#define MADERA_IN1R_RATE_SHIFT 11
+
+/* (0x0318) IN2L_Control */
+#define MADERA_IN2L_HPF_MASK 0x8000
+#define MADERA_IN2L_HPF_SHIFT 15
+#define MADERA_IN2_DMIC_SUP_MASK 0x1800
+#define MADERA_IN2_DMIC_SUP_SHIFT 11
+#define MADERA_IN2_MODE_MASK 0x0400
+#define MADERA_IN2_MODE_SHIFT 10
+#define MADERA_IN2L_PGA_VOL_MASK 0x00FE
+#define MADERA_IN2L_PGA_VOL_SHIFT 1
+
+/* (0x0319) ADC_Digital_Volume_2L */
+#define MADERA_IN2L_SRC_MASK 0x4000
+#define MADERA_IN2L_SRC_SHIFT 14
+#define MADERA_IN2L_SRC_SE_MASK 0x2000
+#define MADERA_IN2L_SRC_SE_SHIFT 13
+#define MADERA_IN2L_LP_MODE 0x0800
+#define MADERA_IN2L_LP_MODE_MASK 0x0800
+#define MADERA_IN2L_LP_MODE_SHIFT 11
+#define MADERA_IN2L_MUTE 0x0100
+#define MADERA_IN2L_MUTE_MASK 0x0100
+#define MADERA_IN2L_MUTE_SHIFT 8
+#define MADERA_IN2L_DIG_VOL_MASK 0x00FF
+#define MADERA_IN2L_DIG_VOL_SHIFT 0
+
+/* (0x031A) DMIC2L_Control */
+#define MADERA_IN2_OSR_MASK 0x0700
+#define MADERA_IN2_OSR_SHIFT 8
+
+/* (0x031C) IN2R_Control */
+#define MADERA_IN2R_HPF_MASK 0x8000
+#define MADERA_IN2R_HPF_SHIFT 15
+#define MADERA_IN2R_PGA_VOL_MASK 0x00FE
+#define MADERA_IN2R_PGA_VOL_SHIFT 1
+#define MADERA_IN2_DMICCLK_SRC_MASK 0x1800
+#define MADERA_IN2_DMICCLK_SRC_SHIFT 11
+
+/* (0x031D) ADC_Digital_Volume_2R */
+#define MADERA_IN2R_SRC_MASK 0x4000
+#define MADERA_IN2R_SRC_SHIFT 14
+#define MADERA_IN2R_SRC_SE_MASK 0x2000
+#define MADERA_IN2R_SRC_SE_SHIFT 13
+#define MADERA_IN2R_LP_MODE 0x0800
+#define MADERA_IN2R_LP_MODE_MASK 0x0800
+#define MADERA_IN2R_LP_MODE_SHIFT 11
+#define MADERA_IN2R_MUTE 0x0100
+#define MADERA_IN2R_MUTE_MASK 0x0100
+#define MADERA_IN2R_MUTE_SHIFT 8
+#define MADERA_IN2R_DIG_VOL_MASK 0x00FF
+#define MADERA_IN2R_DIG_VOL_SHIFT 0
+
+/* (0x0320) IN3L_Control */
+#define MADERA_IN3L_HPF_MASK 0x8000
+#define MADERA_IN3L_HPF_SHIFT 15
+#define MADERA_IN3_DMIC_SUP_MASK 0x1800
+#define MADERA_IN3_DMIC_SUP_SHIFT 11
+#define MADERA_IN3_MODE_MASK 0x0400
+#define MADERA_IN3_MODE_SHIFT 10
+#define MADERA_IN3L_PGA_VOL_MASK 0x00FE
+#define MADERA_IN3L_PGA_VOL_SHIFT 1
+
+/* (0x0321) ADC_Digital_Volume_3L */
+#define MADERA_IN3L_MUTE 0x0100
+#define MADERA_IN3L_MUTE_MASK 0x0100
+#define MADERA_IN3L_MUTE_SHIFT 8
+#define MADERA_IN3L_DIG_VOL_MASK 0x00FF
+#define MADERA_IN3L_DIG_VOL_SHIFT 0
+
+/* (0x0322) DMIC3L_Control */
+#define MADERA_IN3_OSR_MASK 0x0700
+#define MADERA_IN3_OSR_SHIFT 8
+
+/* (0x0324) IN3R_Control */
+#define MADERA_IN3R_HPF_MASK 0x8000
+#define MADERA_IN3R_HPF_SHIFT 15
+#define MADERA_IN3R_PGA_VOL_MASK 0x00FE
+#define MADERA_IN3R_PGA_VOL_SHIFT 1
+#define MADERA_IN3_DMICCLK_SRC_MASK 0x1800
+#define MADERA_IN3_DMICCLK_SRC_SHIFT 11
+
+/* (0x0325) ADC_Digital_Volume_3R */
+#define MADERA_IN3R_MUTE 0x0100
+#define MADERA_IN3R_MUTE_MASK 0x0100
+#define MADERA_IN3R_MUTE_SHIFT 8
+#define MADERA_IN3R_DIG_VOL_MASK 0x00FF
+#define MADERA_IN3R_DIG_VOL_SHIFT 0
+
+/* (0x0328) IN4L_Control */
+#define MADERA_IN4L_HPF_MASK 0x8000
+#define MADERA_IN4L_HPF_SHIFT 15
+#define MADERA_IN4_DMIC_SUP_MASK 0x1800
+#define MADERA_IN4_DMIC_SUP_SHIFT 11
+
+/* (0x0329) ADC_Digital_Volume_4L */
+#define MADERA_IN4L_MUTE 0x0100
+#define MADERA_IN4L_MUTE_MASK 0x0100
+#define MADERA_IN4L_MUTE_SHIFT 8
+#define MADERA_IN4L_DIG_VOL_MASK 0x00FF
+#define MADERA_IN4L_DIG_VOL_SHIFT 0
+
+/* (0x032A) DMIC4L_Control */
+#define MADERA_IN4_OSR_MASK 0x0700
+#define MADERA_IN4_OSR_SHIFT 8
+
+/* (0x032C) IN4R_Control */
+#define MADERA_IN4R_HPF_MASK 0x8000
+#define MADERA_IN4R_HPF_SHIFT 15
+#define MADERA_IN4_DMICCLK_SRC_MASK 0x1800
+#define MADERA_IN4_DMICCLK_SRC_SHIFT 11
+
+/* (0x032D) ADC_Digital_Volume_4R */
+#define MADERA_IN4R_MUTE 0x0100
+#define MADERA_IN4R_MUTE_MASK 0x0100
+#define MADERA_IN4R_MUTE_SHIFT 8
+#define MADERA_IN4R_DIG_VOL_MASK 0x00FF
+#define MADERA_IN4R_DIG_VOL_SHIFT 0
+
+/* (0x0330) IN5L_Control */
+#define MADERA_IN5L_HPF_MASK 0x8000
+#define MADERA_IN5L_HPF_SHIFT 15
+#define MADERA_IN5_DMIC_SUP_MASK 0x1800
+#define MADERA_IN5_DMIC_SUP_SHIFT 11
+
+/* (0x0331) ADC_Digital_Volume_5L */
+#define MADERA_IN5L_MUTE 0x0100
+#define MADERA_IN5L_MUTE_MASK 0x0100
+#define MADERA_IN5L_MUTE_SHIFT 8
+#define MADERA_IN5L_DIG_VOL_MASK 0x00FF
+#define MADERA_IN5L_DIG_VOL_SHIFT 0
+
+/* (0x0332) DMIC5L_Control */
+#define MADERA_IN5_OSR_MASK 0x0700
+#define MADERA_IN5_OSR_SHIFT 8
+
+/* (0x0334) IN5R_Control */
+#define MADERA_IN5R_HPF_MASK 0x8000
+#define MADERA_IN5R_HPF_SHIFT 15
+#define MADERA_IN5_DMICCLK_SRC_MASK 0x1800
+#define MADERA_IN5_DMICCLK_SRC_SHIFT 11
+
+/* (0x0335) ADC_Digital_Volume_5R */
+#define MADERA_IN5R_MUTE 0x0100
+#define MADERA_IN5R_MUTE_MASK 0x0100
+#define MADERA_IN5R_MUTE_SHIFT 8
+#define MADERA_IN5R_DIG_VOL_MASK 0x00FF
+#define MADERA_IN5R_DIG_VOL_SHIFT 0
+
+/* (0x0338) IN6L_Control */
+#define MADERA_IN6L_HPF_MASK 0x8000
+#define MADERA_IN6L_HPF_SHIFT 15
+#define MADERA_IN6_DMIC_SUP_MASK 0x1800
+#define MADERA_IN6_DMIC_SUP_SHIFT 11
+
+/* (0x0339) ADC_Digital_Volume_6L */
+#define MADERA_IN6L_MUTE 0x0100
+#define MADERA_IN6L_MUTE_MASK 0x0100
+#define MADERA_IN6L_MUTE_SHIFT 8
+#define MADERA_IN6L_DIG_VOL_MASK 0x00FF
+#define MADERA_IN6L_DIG_VOL_SHIFT 0
+
+/* (0x033A) DMIC6L_Control */
+#define MADERA_IN6_OSR_MASK 0x0700
+#define MADERA_IN6_OSR_SHIFT 8
+
+/* (0x033C) IN6R_Control */
+#define MADERA_IN6R_HPF_MASK 0x8000
+#define MADERA_IN6R_HPF_SHIFT 15
+
+/* (0x033D) ADC_Digital_Volume_6R */
+#define MADERA_IN6R_MUTE 0x0100
+#define MADERA_IN6R_MUTE_MASK 0x0100
+#define MADERA_IN6R_MUTE_SHIFT 8
+#define MADERA_IN6R_DIG_VOL_MASK 0x00FF
+#define MADERA_IN6R_DIG_VOL_SHIFT 0
+
+/* (0x033E) DMIC6R_Control */
+#define MADERA_IN6_DMICCLK_SRC_MASK 0x1800
+#define MADERA_IN6_DMICCLK_SRC_SHIFT 11
+
+/* (0x0400) Output_Enables_1 */
+#define MADERA_EP_SEL 0x8000
+#define MADERA_EP_SEL_MASK 0x8000
+#define MADERA_EP_SEL_SHIFT 15
+#define MADERA_OUT6L_ENA 0x0800
+#define MADERA_OUT6L_ENA_MASK 0x0800
+#define MADERA_OUT6L_ENA_SHIFT 11
+#define MADERA_OUT6R_ENA 0x0400
+#define MADERA_OUT6R_ENA_MASK 0x0400
+#define MADERA_OUT6R_ENA_SHIFT 10
+#define MADERA_OUT5L_ENA 0x0200
+#define MADERA_OUT5L_ENA_MASK 0x0200
+#define MADERA_OUT5L_ENA_SHIFT 9
+#define MADERA_OUT5R_ENA 0x0100
+#define MADERA_OUT5R_ENA_MASK 0x0100
+#define MADERA_OUT5R_ENA_SHIFT 8
+#define MADERA_OUT4L_ENA 0x0080
+#define MADERA_OUT4L_ENA_MASK 0x0080
+#define MADERA_OUT4L_ENA_SHIFT 7
+#define MADERA_OUT4R_ENA 0x0040
+#define MADERA_OUT4R_ENA_MASK 0x0040
+#define MADERA_OUT4R_ENA_SHIFT 6
+#define MADERA_OUT3L_ENA 0x0020
+#define MADERA_OUT3L_ENA_MASK 0x0020
+#define MADERA_OUT3L_ENA_SHIFT 5
+#define MADERA_OUT3R_ENA 0x0010
+#define MADERA_OUT3R_ENA_MASK 0x0010
+#define MADERA_OUT3R_ENA_SHIFT 4
+#define MADERA_OUT2L_ENA 0x0008
+#define MADERA_OUT2L_ENA_MASK 0x0008
+#define MADERA_OUT2L_ENA_SHIFT 3
+#define MADERA_OUT2R_ENA 0x0004
+#define MADERA_OUT2R_ENA_MASK 0x0004
+#define MADERA_OUT2R_ENA_SHIFT 2
+#define MADERA_OUT1L_ENA 0x0002
+#define MADERA_OUT1L_ENA_MASK 0x0002
+#define MADERA_OUT1L_ENA_SHIFT 1
+#define MADERA_OUT1R_ENA 0x0001
+#define MADERA_OUT1R_ENA_MASK 0x0001
+#define MADERA_OUT1R_ENA_SHIFT 0
+
+/* (0x0408) Output_Rate_1 */
+#define MADERA_CP_DAC_MODE_MASK 0x0040
+#define MADERA_CP_DAC_MODE_SHIFT 6
+#define MADERA_OUT_EXT_CLK_DIV_MASK 0x0030
+#define MADERA_OUT_EXT_CLK_DIV_SHIFT 4
+#define MADERA_OUT_CLK_SRC_MASK 0x0007
+#define MADERA_OUT_CLK_SRC_SHIFT 0
+
+/* (0x0409) Output_Volume_Ramp */
+#define MADERA_OUT_VD_RAMP_MASK 0x0070
+#define MADERA_OUT_VD_RAMP_SHIFT 4
+#define MADERA_OUT_VI_RAMP_MASK 0x0007
+#define MADERA_OUT_VI_RAMP_SHIFT 0
+
+/* (0x0410) Output_Path_Config_1L */
+#define MADERA_OUT1_MONO 0x1000
+#define MADERA_OUT1_MONO_MASK 0x1000
+#define MADERA_OUT1_MONO_SHIFT 12
+#define MADERA_OUT1L_ANC_SRC_MASK 0x0C00
+#define MADERA_OUT1L_ANC_SRC_SHIFT 10
+
+/* (0x0411) DAC_Digital_Volume_1L */
+#define MADERA_OUT1L_VU 0x0200
+#define MADERA_OUT1L_VU_MASK 0x0200
+#define MADERA_OUT1L_VU_SHIFT 9
+#define MADERA_OUT1L_MUTE 0x0100
+#define MADERA_OUT1L_MUTE_MASK 0x0100
+#define MADERA_OUT1L_MUTE_SHIFT 8
+#define MADERA_OUT1L_VOL_MASK 0x00FF
+#define MADERA_OUT1L_VOL_SHIFT 0
+
+/* (0x0412) Output_Path_Config_1 */
+#define MADERA_HP1_GND_SEL_MASK 0x0007
+#define MADERA_HP1_GND_SEL_SHIFT 0
+
+/* (0x0414) Output_Path_Config_1R */
+#define MADERA_OUT1R_ANC_SRC_MASK 0x0C00
+#define MADERA_OUT1R_ANC_SRC_SHIFT 10
+
+/* (0x0415) DAC_Digital_Volume_1R */
+#define MADERA_OUT1R_MUTE 0x0100
+#define MADERA_OUT1R_MUTE_MASK 0x0100
+#define MADERA_OUT1R_MUTE_SHIFT 8
+#define MADERA_OUT1R_VOL_MASK 0x00FF
+#define MADERA_OUT1R_VOL_SHIFT 0
+
+/* (0x0418) Output_Path_Config_2L */
+#define MADERA_OUT2L_ANC_SRC_MASK 0x0C00
+#define MADERA_OUT2L_ANC_SRC_SHIFT 10
+
+/* (0x0419) DAC_Digital_Volume_2L */
+#define MADERA_OUT2L_MUTE 0x0100
+#define MADERA_OUT2L_MUTE_MASK 0x0100
+#define MADERA_OUT2L_MUTE_SHIFT 8
+#define MADERA_OUT2L_VOL_MASK 0x00FF
+#define MADERA_OUT2L_VOL_SHIFT 0
+
+/* (0x041A) Output_Path_Config_2 */
+#define MADERA_HP2_GND_SEL_MASK 0x0007
+#define MADERA_HP2_GND_SEL_SHIFT 0
+
+/* (0x041C) Output_Path_Config_2R */
+#define MADERA_OUT2R_ANC_SRC_MASK 0x0C00
+#define MADERA_OUT2R_ANC_SRC_SHIFT 10
+
+/* (0x041D) DAC_Digital_Volume_2R */
+#define MADERA_OUT2R_MUTE 0x0100
+#define MADERA_OUT2R_MUTE_MASK 0x0100
+#define MADERA_OUT2R_MUTE_SHIFT 8
+#define MADERA_OUT2R_VOL_MASK 0x00FF
+#define MADERA_OUT2R_VOL_SHIFT 0
+
+/* (0x0420) Output_Path_Config_3L */
+#define MADERA_OUT3L_ANC_SRC_MASK 0x0C00
+#define MADERA_OUT3L_ANC_SRC_SHIFT 10
+
+/* (0x0421) DAC_Digital_Volume_3L */
+#define MADERA_OUT3L_MUTE 0x0100
+#define MADERA_OUT3L_MUTE_MASK 0x0100
+#define MADERA_OUT3L_MUTE_SHIFT 8
+#define MADERA_OUT3L_VOL_MASK 0x00FF
+#define MADERA_OUT3L_VOL_SHIFT 0
+
+/* (0x0424) Output_Path_Config_3R */
+#define MADERA_OUT3R_ANC_SRC_MASK 0x0C00
+#define MADERA_OUT3R_ANC_SRC_SHIFT 10
+
+/* (0x0425) DAC_Digital_Volume_3R */
+#define MADERA_OUT3R_MUTE 0x0100
+#define MADERA_OUT3R_MUTE_MASK 0x0100
+#define MADERA_OUT3R_MUTE_SHIFT 8
+#define MADERA_OUT3R_VOL_MASK 0x00FF
+#define MADERA_OUT3R_VOL_SHIFT 0
+
+/* (0x0428) Output_Path_Config_4L */
+#define MADERA_OUT4L_ANC_SRC_MASK 0x0C00
+#define MADERA_OUT4L_ANC_SRC_SHIFT 10
+
+/* (0x0429) DAC_Digital_Volume_4L */
+#define MADERA_OUT4L_MUTE 0x0100
+#define MADERA_OUT4L_MUTE_MASK 0x0100
+#define MADERA_OUT4L_MUTE_SHIFT 8
+#define MADERA_OUT4L_VOL_MASK 0x00FF
+#define MADERA_OUT4L_VOL_SHIFT 0
+
+/* (0x042C) Output_Path_Config_4R */
+#define MADERA_OUT4R_ANC_SRC_MASK 0x0C00
+#define MADERA_OUT4R_ANC_SRC_SHIFT 10
+
+/* (0x042D) DAC_Digital_Volume_4R */
+#define MADERA_OUT4R_MUTE 0x0100
+#define MADERA_OUT4R_MUTE_MASK 0x0100
+#define MADERA_OUT4R_MUTE_SHIFT 8
+#define MADERA_OUT4R_VOL_MASK 0x00FF
+#define MADERA_OUT4R_VOL_SHIFT 0
+
+/* (0x0430) Output_Path_Config_5L */
+#define MADERA_OUT5_OSR 0x2000
+#define MADERA_OUT5_OSR_MASK 0x2000
+#define MADERA_OUT5_OSR_SHIFT 13
+#define MADERA_OUT5L_ANC_SRC_MASK 0x0C00
+#define MADERA_OUT5L_ANC_SRC_SHIFT 10
+
+/* (0x0431) DAC_Digital_Volume_5L */
+#define MADERA_OUT5L_MUTE 0x0100
+#define MADERA_OUT5L_MUTE_MASK 0x0100
+#define MADERA_OUT5L_MUTE_SHIFT 8
+#define MADERA_OUT5L_VOL_MASK 0x00FF
+#define MADERA_OUT5L_VOL_SHIFT 0
+
+/* (0x0434) Output_Path_Config_5R */
+#define MADERA_OUT5R_ANC_SRC_MASK 0x0C00
+#define MADERA_OUT5R_ANC_SRC_SHIFT 10
+
+/* (0x0435) DAC_Digital_Volume_5R */
+#define MADERA_OUT5R_MUTE 0x0100
+#define MADERA_OUT5R_MUTE_MASK 0x0100
+#define MADERA_OUT5R_MUTE_SHIFT 8
+#define MADERA_OUT5R_VOL_MASK 0x00FF
+#define MADERA_OUT5R_VOL_SHIFT 0
+
+/* (0x0438) Output_Path_Config_6L */
+#define MADERA_OUT6_OSR 0x2000
+#define MADERA_OUT6_OSR_MASK 0x2000
+#define MADERA_OUT6_OSR_SHIFT 13
+#define MADERA_OUT6L_ANC_SRC_MASK 0x0C00
+#define MADERA_OUT6L_ANC_SRC_SHIFT 10
+
+/* (0x0439) DAC_Digital_Volume_6L */
+#define MADERA_OUT6L_MUTE 0x0100
+#define MADERA_OUT6L_MUTE_MASK 0x0100
+#define MADERA_OUT6L_MUTE_SHIFT 8
+#define MADERA_OUT6L_VOL_MASK 0x00FF
+#define MADERA_OUT6L_VOL_SHIFT 0
+
+/* (0x043C) Output_Path_Config_6R */
+#define MADERA_OUT6R_ANC_SRC_MASK 0x0C00
+#define MADERA_OUT6R_ANC_SRC_SHIFT 10
+
+/* (0x043D) DAC_Digital_Volume_6R */
+#define MADERA_OUT6R_MUTE 0x0100
+#define MADERA_OUT6R_MUTE_MASK 0x0100
+#define MADERA_OUT6R_MUTE_SHIFT 8
+#define MADERA_OUT6R_VOL_MASK 0x00FF
+#define MADERA_OUT6R_VOL_SHIFT 0
+
+/* (0x0450) - DAC AEC Control 1 */
+#define MADERA_AEC1_LOOPBACK_SRC_MASK 0x003C
+#define MADERA_AEC1_LOOPBACK_SRC_SHIFT 2
+#define MADERA_AEC1_ENA_STS 0x0002
+#define MADERA_AEC1_ENA_STS_MASK 0x0002
+#define MADERA_AEC1_ENA_STS_SHIFT 1
+#define MADERA_AEC1_LOOPBACK_ENA 0x0001
+#define MADERA_AEC1_LOOPBACK_ENA_MASK 0x0001
+#define MADERA_AEC1_LOOPBACK_ENA_SHIFT 0
+
+/* (0x0451) DAC_AEC_Control_2 */
+#define MADERA_AEC2_LOOPBACK_SRC_MASK 0x003C
+#define MADERA_AEC2_LOOPBACK_SRC_SHIFT 2
+#define MADERA_AEC2_ENA_STS 0x0002
+#define MADERA_AEC2_ENA_STS_MASK 0x0002
+#define MADERA_AEC2_ENA_STS_SHIFT 1
+#define MADERA_AEC2_LOOPBACK_ENA 0x0001
+#define MADERA_AEC2_LOOPBACK_ENA_MASK 0x0001
+#define MADERA_AEC2_LOOPBACK_ENA_SHIFT 0
+
+/* (0x0458) Noise_Gate_Control */
+#define MADERA_NGATE_HOLD_MASK 0x0030
+#define MADERA_NGATE_HOLD_SHIFT 4
+#define MADERA_NGATE_THR_MASK 0x000E
+#define MADERA_NGATE_THR_SHIFT 1
+#define MADERA_NGATE_ENA 0x0001
+#define MADERA_NGATE_ENA_MASK 0x0001
+#define MADERA_NGATE_ENA_SHIFT 0
+
+/* (0x0490) PDM_SPK1_CTRL_1 */
+#define MADERA_SPK1R_MUTE 0x2000
+#define MADERA_SPK1R_MUTE_MASK 0x2000
+#define MADERA_SPK1R_MUTE_SHIFT 13
+#define MADERA_SPK1L_MUTE 0x1000
+#define MADERA_SPK1L_MUTE_MASK 0x1000
+#define MADERA_SPK1L_MUTE_SHIFT 12
+#define MADERA_SPK1_MUTE_ENDIAN 0x0100
+#define MADERA_SPK1_MUTE_ENDIAN_MASK 0x0100
+#define MADERA_SPK1_MUTE_ENDIAN_SHIFT 8
+#define MADERA_SPK1_MUTE_SEQ1_MASK 0x00FF
+#define MADERA_SPK1_MUTE_SEQ1_SHIFT 0
+
+/* (0x0491) PDM_SPK1_CTRL_2 */
+#define MADERA_SPK1_FMT 0x0001
+#define MADERA_SPK1_FMT_MASK 0x0001
+#define MADERA_SPK1_FMT_SHIFT 0
+
+/* (0x0492) PDM_SPK2_CTRL_1 */
+#define MADERA_SPK2R_MUTE 0x2000
+#define MADERA_SPK2R_MUTE_MASK 0x2000
+#define MADERA_SPK2R_MUTE_SHIFT 13
+#define MADERA_SPK2L_MUTE 0x1000
+#define MADERA_SPK2L_MUTE_MASK 0x1000
+#define MADERA_SPK2L_MUTE_SHIFT 12
+
+/* (0x04A0) - HP1 Short Circuit Ctrl */
+#define MADERA_HP1_SC_ENA 0x1000
+#define MADERA_HP1_SC_ENA_MASK 0x1000
+#define MADERA_HP1_SC_ENA_SHIFT 12
+
+/* (0x04A1) - HP2 Short Circuit Ctrl */
+#define MADERA_HP2_SC_ENA 0x1000
+#define MADERA_HP2_SC_ENA_MASK 0x1000
+#define MADERA_HP2_SC_ENA_SHIFT 12
+
+/* (0x04A2) - HP3 Short Circuit Ctrl */
+#define MADERA_HP3_SC_ENA 0x1000
+#define MADERA_HP3_SC_ENA_MASK 0x1000
+#define MADERA_HP3_SC_ENA_SHIFT 12
+
+/* (0x04A8) - HP_Test_Ctrl_5 */
+#define MADERA_HP1L_ONEFLT 0x0100
+#define MADERA_HP1L_ONEFLT_MASK 0x0100
+#define MADERA_HP1L_ONEFLT_SHIFT 8
+
+/* (0x04A9) - HP_Test_Ctrl_6 */
+#define MADERA_HP1R_ONEFLT 0x0100
+#define MADERA_HP1R_ONEFLT_MASK 0x0100
+#define MADERA_HP1R_ONEFLT_SHIFT 8
+
+/* (0x0500) AIF1_BCLK_Ctrl */
+#define MADERA_AIF1_BCLK_INV 0x0080
+#define MADERA_AIF1_BCLK_INV_MASK 0x0080
+#define MADERA_AIF1_BCLK_INV_SHIFT 7
+#define MADERA_AIF1_BCLK_MSTR 0x0020
+#define MADERA_AIF1_BCLK_MSTR_MASK 0x0020
+#define MADERA_AIF1_BCLK_MSTR_SHIFT 5
+#define MADERA_AIF1_BCLK_FREQ_MASK 0x001F
+#define MADERA_AIF1_BCLK_FREQ_SHIFT 0
+
+/* (0x0501) AIF1_Tx_Pin_Ctrl */
+#define MADERA_AIF1TX_LRCLK_SRC 0x0008
+#define MADERA_AIF1TX_LRCLK_SRC_MASK 0x0008
+#define MADERA_AIF1TX_LRCLK_SRC_SHIFT 3
+#define MADERA_AIF1TX_LRCLK_INV 0x0004
+#define MADERA_AIF1TX_LRCLK_INV_MASK 0x0004
+#define MADERA_AIF1TX_LRCLK_INV_SHIFT 2
+#define MADERA_AIF1TX_LRCLK_MSTR 0x0001
+#define MADERA_AIF1TX_LRCLK_MSTR_MASK 0x0001
+#define MADERA_AIF1TX_LRCLK_MSTR_SHIFT 0
+
+/* (0x0502) AIF1_Rx_Pin_Ctrl */
+#define MADERA_AIF1RX_LRCLK_INV 0x0004
+#define MADERA_AIF1RX_LRCLK_INV_MASK 0x0004
+#define MADERA_AIF1RX_LRCLK_INV_SHIFT 2
+#define MADERA_AIF1RX_LRCLK_FRC 0x0002
+#define MADERA_AIF1RX_LRCLK_FRC_MASK 0x0002
+#define MADERA_AIF1RX_LRCLK_FRC_SHIFT 1
+#define MADERA_AIF1RX_LRCLK_MSTR 0x0001
+#define MADERA_AIF1RX_LRCLK_MSTR_MASK 0x0001
+#define MADERA_AIF1RX_LRCLK_MSTR_SHIFT 0
+
+/* (0x0503) AIF1_Rate_Ctrl */
+#define MADERA_AIF1_RATE_MASK 0xF800
+#define MADERA_AIF1_RATE_SHIFT 11
+#define MADERA_AIF1_TRI 0x0040
+#define MADERA_AIF1_TRI_MASK 0x0040
+#define MADERA_AIF1_TRI_SHIFT 6
+
+/* (0x0504) AIF1_Format */
+#define MADERA_AIF1_FMT_MASK 0x0007
+#define MADERA_AIF1_FMT_SHIFT 0
+
+/* (0x0506) AIF1_Rx_BCLK_Rate */
+#define MADERA_AIF1RX_BCPF_MASK 0x1FFF
+#define MADERA_AIF1RX_BCPF_SHIFT 0
+
+/* (0x0507) AIF1_Frame_Ctrl_1 */
+#define MADERA_AIF1TX_WL_MASK 0x3F00
+#define MADERA_AIF1TX_WL_SHIFT 8
+#define MADERA_AIF1TX_SLOT_LEN_MASK 0x00FF
+#define MADERA_AIF1TX_SLOT_LEN_SHIFT 0
+
+/* (0x0508) AIF1_Frame_Ctrl_2 */
+#define MADERA_AIF1RX_WL_MASK 0x3F00
+#define MADERA_AIF1RX_WL_SHIFT 8
+#define MADERA_AIF1RX_SLOT_LEN_MASK 0x00FF
+#define MADERA_AIF1RX_SLOT_LEN_SHIFT 0
+
+/* (0x0509) AIF1_Frame_Ctrl_3 */
+#define MADERA_AIF1TX1_SLOT_MASK 0x003F
+#define MADERA_AIF1TX1_SLOT_SHIFT 0
+
+/* (0x0519) AIF1_Tx_Enables */
+#define MADERA_AIF1TX8_ENA 0x0080
+#define MADERA_AIF1TX8_ENA_MASK 0x0080
+#define MADERA_AIF1TX8_ENA_SHIFT 7
+#define MADERA_AIF1TX7_ENA 0x0040
+#define MADERA_AIF1TX7_ENA_MASK 0x0040
+#define MADERA_AIF1TX7_ENA_SHIFT 6
+#define MADERA_AIF1TX6_ENA 0x0020
+#define MADERA_AIF1TX6_ENA_MASK 0x0020
+#define MADERA_AIF1TX6_ENA_SHIFT 5
+#define MADERA_AIF1TX5_ENA 0x0010
+#define MADERA_AIF1TX5_ENA_MASK 0x0010
+#define MADERA_AIF1TX5_ENA_SHIFT 4
+#define MADERA_AIF1TX4_ENA 0x0008
+#define MADERA_AIF1TX4_ENA_MASK 0x0008
+#define MADERA_AIF1TX4_ENA_SHIFT 3
+#define MADERA_AIF1TX3_ENA 0x0004
+#define MADERA_AIF1TX3_ENA_MASK 0x0004
+#define MADERA_AIF1TX3_ENA_SHIFT 2
+#define MADERA_AIF1TX2_ENA 0x0002
+#define MADERA_AIF1TX2_ENA_MASK 0x0002
+#define MADERA_AIF1TX2_ENA_SHIFT 1
+#define MADERA_AIF1TX1_ENA 0x0001
+#define MADERA_AIF1TX1_ENA_MASK 0x0001
+#define MADERA_AIF1TX1_ENA_SHIFT 0
+
+/* (0x051A) AIF1_Rx_Enables */
+#define MADERA_AIF1RX8_ENA 0x0080
+#define MADERA_AIF1RX8_ENA_MASK 0x0080
+#define MADERA_AIF1RX8_ENA_SHIFT 7
+#define MADERA_AIF1RX7_ENA 0x0040
+#define MADERA_AIF1RX7_ENA_MASK 0x0040
+#define MADERA_AIF1RX7_ENA_SHIFT 6
+#define MADERA_AIF1RX6_ENA 0x0020
+#define MADERA_AIF1RX6_ENA_MASK 0x0020
+#define MADERA_AIF1RX6_ENA_SHIFT 5
+#define MADERA_AIF1RX5_ENA 0x0010
+#define MADERA_AIF1RX5_ENA_MASK 0x0010
+#define MADERA_AIF1RX5_ENA_SHIFT 4
+#define MADERA_AIF1RX4_ENA 0x0008
+#define MADERA_AIF1RX4_ENA_MASK 0x0008
+#define MADERA_AIF1RX4_ENA_SHIFT 3
+#define MADERA_AIF1RX3_ENA 0x0004
+#define MADERA_AIF1RX3_ENA_MASK 0x0004
+#define MADERA_AIF1RX3_ENA_SHIFT 2
+#define MADERA_AIF1RX2_ENA 0x0002
+#define MADERA_AIF1RX2_ENA_MASK 0x0002
+#define MADERA_AIF1RX2_ENA_SHIFT 1
+#define MADERA_AIF1RX1_ENA 0x0001
+#define MADERA_AIF1RX1_ENA_MASK 0x0001
+#define MADERA_AIF1RX1_ENA_SHIFT 0
+
+/* (0x0559) AIF2_Tx_Enables */
+#define MADERA_AIF2TX8_ENA 0x0080
+#define MADERA_AIF2TX8_ENA_MASK 0x0080
+#define MADERA_AIF2TX8_ENA_SHIFT 7
+#define MADERA_AIF2TX7_ENA 0x0040
+#define MADERA_AIF2TX7_ENA_MASK 0x0040
+#define MADERA_AIF2TX7_ENA_SHIFT 6
+#define MADERA_AIF2TX6_ENA 0x0020
+#define MADERA_AIF2TX6_ENA_MASK 0x0020
+#define MADERA_AIF2TX6_ENA_SHIFT 5
+#define MADERA_AIF2TX5_ENA 0x0010
+#define MADERA_AIF2TX5_ENA_MASK 0x0010
+#define MADERA_AIF2TX5_ENA_SHIFT 4
+#define MADERA_AIF2TX4_ENA 0x0008
+#define MADERA_AIF2TX4_ENA_MASK 0x0008
+#define MADERA_AIF2TX4_ENA_SHIFT 3
+#define MADERA_AIF2TX3_ENA 0x0004
+#define MADERA_AIF2TX3_ENA_MASK 0x0004
+#define MADERA_AIF2TX3_ENA_SHIFT 2
+#define MADERA_AIF2TX2_ENA 0x0002
+#define MADERA_AIF2TX2_ENA_MASK 0x0002
+#define MADERA_AIF2TX2_ENA_SHIFT 1
+#define MADERA_AIF2TX1_ENA 0x0001
+#define MADERA_AIF2TX1_ENA_MASK 0x0001
+#define MADERA_AIF2TX1_ENA_SHIFT 0
+
+/* (0x055A) AIF2_Rx_Enables */
+#define MADERA_AIF2RX8_ENA 0x0080
+#define MADERA_AIF2RX8_ENA_MASK 0x0080
+#define MADERA_AIF2RX8_ENA_SHIFT 7
+#define MADERA_AIF2RX7_ENA 0x0040
+#define MADERA_AIF2RX7_ENA_MASK 0x0040
+#define MADERA_AIF2RX7_ENA_SHIFT 6
+#define MADERA_AIF2RX6_ENA 0x0020
+#define MADERA_AIF2RX6_ENA_MASK 0x0020
+#define MADERA_AIF2RX6_ENA_SHIFT 5
+#define MADERA_AIF2RX5_ENA 0x0010
+#define MADERA_AIF2RX5_ENA_MASK 0x0010
+#define MADERA_AIF2RX5_ENA_SHIFT 4
+#define MADERA_AIF2RX4_ENA 0x0008
+#define MADERA_AIF2RX4_ENA_MASK 0x0008
+#define MADERA_AIF2RX4_ENA_SHIFT 3
+#define MADERA_AIF2RX3_ENA 0x0004
+#define MADERA_AIF2RX3_ENA_MASK 0x0004
+#define MADERA_AIF2RX3_ENA_SHIFT 2
+#define MADERA_AIF2RX2_ENA 0x0002
+#define MADERA_AIF2RX2_ENA_MASK 0x0002
+#define MADERA_AIF2RX2_ENA_SHIFT 1
+#define MADERA_AIF2RX1_ENA 0x0001
+#define MADERA_AIF2RX1_ENA_MASK 0x0001
+#define MADERA_AIF2RX1_ENA_SHIFT 0
+
+/* (0x0599) AIF3_Tx_Enables */
+#define MADERA_AIF3TX8_ENA 0x0080
+#define MADERA_AIF3TX8_ENA_MASK 0x0080
+#define MADERA_AIF3TX8_ENA_SHIFT 7
+#define MADERA_AIF3TX7_ENA 0x0040
+#define MADERA_AIF3TX7_ENA_MASK 0x0040
+#define MADERA_AIF3TX7_ENA_SHIFT 6
+#define MADERA_AIF3TX6_ENA 0x0020
+#define MADERA_AIF3TX6_ENA_MASK 0x0020
+#define MADERA_AIF3TX6_ENA_SHIFT 5
+#define MADERA_AIF3TX5_ENA 0x0010
+#define MADERA_AIF3TX5_ENA_MASK 0x0010
+#define MADERA_AIF3TX5_ENA_SHIFT 4
+#define MADERA_AIF3TX4_ENA 0x0008
+#define MADERA_AIF3TX4_ENA_MASK 0x0008
+#define MADERA_AIF3TX4_ENA_SHIFT 3
+#define MADERA_AIF3TX3_ENA 0x0004
+#define MADERA_AIF3TX3_ENA_MASK 0x0004
+#define MADERA_AIF3TX3_ENA_SHIFT 2
+#define MADERA_AIF3TX2_ENA 0x0002
+#define MADERA_AIF3TX2_ENA_MASK 0x0002
+#define MADERA_AIF3TX2_ENA_SHIFT 1
+#define MADERA_AIF3TX1_ENA 0x0001
+#define MADERA_AIF3TX1_ENA_MASK 0x0001
+#define MADERA_AIF3TX1_ENA_SHIFT 0
+
+/* (0x059A) AIF3_Rx_Enables */
+#define MADERA_AIF3RX8_ENA 0x0080
+#define MADERA_AIF3RX8_ENA_MASK 0x0080
+#define MADERA_AIF3RX8_ENA_SHIFT 7
+#define MADERA_AIF3RX7_ENA 0x0040
+#define MADERA_AIF3RX7_ENA_MASK 0x0040
+#define MADERA_AIF3RX7_ENA_SHIFT 6
+#define MADERA_AIF3RX6_ENA 0x0020
+#define MADERA_AIF3RX6_ENA_MASK 0x0020
+#define MADERA_AIF3RX6_ENA_SHIFT 5
+#define MADERA_AIF3RX5_ENA 0x0010
+#define MADERA_AIF3RX5_ENA_MASK 0x0010
+#define MADERA_AIF3RX5_ENA_SHIFT 4
+#define MADERA_AIF3RX4_ENA 0x0008
+#define MADERA_AIF3RX4_ENA_MASK 0x0008
+#define MADERA_AIF3RX4_ENA_SHIFT 3
+#define MADERA_AIF3RX3_ENA 0x0004
+#define MADERA_AIF3RX3_ENA_MASK 0x0004
+#define MADERA_AIF3RX3_ENA_SHIFT 2
+#define MADERA_AIF3RX2_ENA 0x0002
+#define MADERA_AIF3RX2_ENA_MASK 0x0002
+#define MADERA_AIF3RX2_ENA_SHIFT 1
+#define MADERA_AIF3RX1_ENA 0x0001
+#define MADERA_AIF3RX1_ENA_MASK 0x0001
+#define MADERA_AIF3RX1_ENA_SHIFT 0
+
+/* (0x05B9) AIF4_Tx_Enables */
+#define MADERA_AIF4TX2_ENA 0x0002
+#define MADERA_AIF4TX2_ENA_MASK 0x0002
+#define MADERA_AIF4TX2_ENA_SHIFT 1
+#define MADERA_AIF4TX1_ENA 0x0001
+#define MADERA_AIF4TX1_ENA_MASK 0x0001
+#define MADERA_AIF4TX1_ENA_SHIFT 0
+
+/* (0x05BA) AIF4_Rx_Enables */
+#define MADERA_AIF4RX2_ENA 0x0002
+#define MADERA_AIF4RX2_ENA_MASK 0x0002
+#define MADERA_AIF4RX2_ENA_SHIFT 1
+#define MADERA_AIF4RX1_ENA 0x0001
+#define MADERA_AIF4RX1_ENA_MASK 0x0001
+#define MADERA_AIF4RX1_ENA_SHIFT 0
+
+/* (0x05C2) SPD1_TX_Control */
+#define MADERA_SPD1_VAL2 0x2000
+#define MADERA_SPD1_VAL2_MASK 0x2000
+#define MADERA_SPD1_VAL2_SHIFT 13
+#define MADERA_SPD1_VAL1 0x1000
+#define MADERA_SPD1_VAL1_MASK 0x1000
+#define MADERA_SPD1_VAL1_SHIFT 12
+#define MADERA_SPD1_RATE_MASK 0x00F0
+#define MADERA_SPD1_RATE_SHIFT 4
+#define MADERA_SPD1_ENA 0x0001
+#define MADERA_SPD1_ENA_MASK 0x0001
+#define MADERA_SPD1_ENA_SHIFT 0
+
+/* (0x05F5) SLIMbus_RX_Channel_Enable */
+#define MADERA_SLIMRX8_ENA 0x0080
+#define MADERA_SLIMRX8_ENA_MASK 0x0080
+#define MADERA_SLIMRX8_ENA_SHIFT 7
+#define MADERA_SLIMRX7_ENA 0x0040
+#define MADERA_SLIMRX7_ENA_MASK 0x0040
+#define MADERA_SLIMRX7_ENA_SHIFT 6
+#define MADERA_SLIMRX6_ENA 0x0020
+#define MADERA_SLIMRX6_ENA_MASK 0x0020
+#define MADERA_SLIMRX6_ENA_SHIFT 5
+#define MADERA_SLIMRX5_ENA 0x0010
+#define MADERA_SLIMRX5_ENA_MASK 0x0010
+#define MADERA_SLIMRX5_ENA_SHIFT 4
+#define MADERA_SLIMRX4_ENA 0x0008
+#define MADERA_SLIMRX4_ENA_MASK 0x0008
+#define MADERA_SLIMRX4_ENA_SHIFT 3
+#define MADERA_SLIMRX3_ENA 0x0004
+#define MADERA_SLIMRX3_ENA_MASK 0x0004
+#define MADERA_SLIMRX3_ENA_SHIFT 2
+#define MADERA_SLIMRX2_ENA 0x0002
+#define MADERA_SLIMRX2_ENA_MASK 0x0002
+#define MADERA_SLIMRX2_ENA_SHIFT 1
+#define MADERA_SLIMRX1_ENA 0x0001
+#define MADERA_SLIMRX1_ENA_MASK 0x0001
+#define MADERA_SLIMRX1_ENA_SHIFT 0
+
+/* (0x05F6) SLIMbus_TX_Channel_Enable */
+#define MADERA_SLIMTX8_ENA 0x0080
+#define MADERA_SLIMTX8_ENA_MASK 0x0080
+#define MADERA_SLIMTX8_ENA_SHIFT 7
+#define MADERA_SLIMTX7_ENA 0x0040
+#define MADERA_SLIMTX7_ENA_MASK 0x0040
+#define MADERA_SLIMTX7_ENA_SHIFT 6
+#define MADERA_SLIMTX6_ENA 0x0020
+#define MADERA_SLIMTX6_ENA_MASK 0x0020
+#define MADERA_SLIMTX6_ENA_SHIFT 5
+#define MADERA_SLIMTX5_ENA 0x0010
+#define MADERA_SLIMTX5_ENA_MASK 0x0010
+#define MADERA_SLIMTX5_ENA_SHIFT 4
+#define MADERA_SLIMTX4_ENA 0x0008
+#define MADERA_SLIMTX4_ENA_MASK 0x0008
+#define MADERA_SLIMTX4_ENA_SHIFT 3
+#define MADERA_SLIMTX3_ENA 0x0004
+#define MADERA_SLIMTX3_ENA_MASK 0x0004
+#define MADERA_SLIMTX3_ENA_SHIFT 2
+#define MADERA_SLIMTX2_ENA 0x0002
+#define MADERA_SLIMTX2_ENA_MASK 0x0002
+#define MADERA_SLIMTX2_ENA_SHIFT 1
+#define MADERA_SLIMTX1_ENA 0x0001
+#define MADERA_SLIMTX1_ENA_MASK 0x0001
+#define MADERA_SLIMTX1_ENA_SHIFT 0
+
+/* (0x0E10) EQ1_1 */
+#define MADERA_EQ1_B1_GAIN_MASK 0xF800
+#define MADERA_EQ1_B1_GAIN_SHIFT 11
+#define MADERA_EQ1_B2_GAIN_MASK 0x07C0
+#define MADERA_EQ1_B2_GAIN_SHIFT 6
+#define MADERA_EQ1_B3_GAIN_MASK 0x003E
+#define MADERA_EQ1_B3_GAIN_SHIFT 1
+#define MADERA_EQ1_ENA 0x0001
+#define MADERA_EQ1_ENA_MASK 0x0001
+#define MADERA_EQ1_ENA_SHIFT 0
+
+/* (0x0E11) EQ1_2 */
+#define MADERA_EQ1_B4_GAIN_MASK 0xF800
+#define MADERA_EQ1_B4_GAIN_SHIFT 11
+#define MADERA_EQ1_B5_GAIN_MASK 0x07C0
+#define MADERA_EQ1_B5_GAIN_SHIFT 6
+#define MADERA_EQ1_B1_MODE 0x0001
+#define MADERA_EQ1_B1_MODE_MASK 0x0001
+#define MADERA_EQ1_B1_MODE_SHIFT 0
+
+/* (0x0E26) EQ2_1 */
+#define MADERA_EQ2_B1_GAIN_MASK 0xF800
+#define MADERA_EQ2_B1_GAIN_SHIFT 11
+#define MADERA_EQ2_B2_GAIN_MASK 0x07C0
+#define MADERA_EQ2_B2_GAIN_SHIFT 6
+#define MADERA_EQ2_B3_GAIN_MASK 0x003E
+#define MADERA_EQ2_B3_GAIN_SHIFT 1
+#define MADERA_EQ2_ENA 0x0001
+#define MADERA_EQ2_ENA_MASK 0x0001
+#define MADERA_EQ2_ENA_SHIFT 0
+
+/* (0x0E27) EQ2_2 */
+#define MADERA_EQ2_B4_GAIN_MASK 0xF800
+#define MADERA_EQ2_B4_GAIN_SHIFT 11
+#define MADERA_EQ2_B5_GAIN_MASK 0x07C0
+#define MADERA_EQ2_B5_GAIN_SHIFT 6
+#define MADERA_EQ2_B1_MODE 0x0001
+#define MADERA_EQ2_B1_MODE_MASK 0x0001
+#define MADERA_EQ2_B1_MODE_SHIFT 0
+
+/* (0x0E3C) EQ3_1 */
+#define MADERA_EQ3_B1_GAIN_MASK 0xF800
+#define MADERA_EQ3_B1_GAIN_SHIFT 11
+#define MADERA_EQ3_B2_GAIN_MASK 0x07C0
+#define MADERA_EQ3_B2_GAIN_SHIFT 6
+#define MADERA_EQ3_B3_GAIN_MASK 0x003E
+#define MADERA_EQ3_B3_GAIN_SHIFT 1
+#define MADERA_EQ3_ENA 0x0001
+#define MADERA_EQ3_ENA_MASK 0x0001
+#define MADERA_EQ3_ENA_SHIFT 0
+
+/* (0x0E3D) EQ3_2 */
+#define MADERA_EQ3_B4_GAIN_MASK 0xF800
+#define MADERA_EQ3_B4_GAIN_SHIFT 11
+#define MADERA_EQ3_B5_GAIN_MASK 0x07C0
+#define MADERA_EQ3_B5_GAIN_SHIFT 6
+#define MADERA_EQ3_B1_MODE 0x0001
+#define MADERA_EQ3_B1_MODE_MASK 0x0001
+#define MADERA_EQ3_B1_MODE_SHIFT 0
+
+/* (0x0E52) EQ4_1 */
+#define MADERA_EQ4_B1_GAIN_MASK 0xF800
+#define MADERA_EQ4_B1_GAIN_SHIFT 11
+#define MADERA_EQ4_B2_GAIN_MASK 0x07C0
+#define MADERA_EQ4_B2_GAIN_SHIFT 6
+#define MADERA_EQ4_B3_GAIN_MASK 0x003E
+#define MADERA_EQ4_B3_GAIN_SHIFT 1
+#define MADERA_EQ4_ENA 0x0001
+#define MADERA_EQ4_ENA_MASK 0x0001
+#define MADERA_EQ4_ENA_SHIFT 0
+
+/* (0x0E53) EQ4_2 */
+#define MADERA_EQ4_B4_GAIN_MASK 0xF800
+#define MADERA_EQ4_B4_GAIN_SHIFT 11
+#define MADERA_EQ4_B5_GAIN_MASK 0x07C0
+#define MADERA_EQ4_B5_GAIN_SHIFT 6
+#define MADERA_EQ4_B1_MODE 0x0001
+#define MADERA_EQ4_B1_MODE_MASK 0x0001
+#define MADERA_EQ4_B1_MODE_SHIFT 0
+
+/* (0x0E80) DRC1_ctrl1 */
+#define MADERA_DRC1L_ENA 0x0002
+#define MADERA_DRC1L_ENA_MASK 0x0002
+#define MADERA_DRC1L_ENA_SHIFT 1
+#define MADERA_DRC1R_ENA 0x0001
+#define MADERA_DRC1R_ENA_MASK 0x0001
+#define MADERA_DRC1R_ENA_SHIFT 0
+
+/* (0x0E88) DRC2_ctrl1 */
+#define MADERA_DRC2L_ENA 0x0002
+#define MADERA_DRC2L_ENA_MASK 0x0002
+#define MADERA_DRC2L_ENA_SHIFT 1
+#define MADERA_DRC2R_ENA 0x0001
+#define MADERA_DRC2R_ENA_MASK 0x0001
+#define MADERA_DRC2R_ENA_SHIFT 0
+
+/* (0x0EC0) HPLPF1_1 */
+#define MADERA_LHPF1_MODE 0x0002
+#define MADERA_LHPF1_MODE_MASK 0x0002
+#define MADERA_LHPF1_MODE_SHIFT 1
+#define MADERA_LHPF1_ENA 0x0001
+#define MADERA_LHPF1_ENA_MASK 0x0001
+#define MADERA_LHPF1_ENA_SHIFT 0
+
+/* (0x0EC1) HPLPF1_2 */
+#define MADERA_LHPF1_COEFF_MASK 0xFFFF
+#define MADERA_LHPF1_COEFF_SHIFT 0
+
+/* (0x0EC4) HPLPF2_1 */
+#define MADERA_LHPF2_MODE 0x0002
+#define MADERA_LHPF2_MODE_MASK 0x0002
+#define MADERA_LHPF2_MODE_SHIFT 1
+#define MADERA_LHPF2_ENA 0x0001
+#define MADERA_LHPF2_ENA_MASK 0x0001
+#define MADERA_LHPF2_ENA_SHIFT 0
+
+/* (0x0EC5) HPLPF2_2 */
+#define MADERA_LHPF2_COEFF_MASK 0xFFFF
+#define MADERA_LHPF2_COEFF_SHIFT 0
+
+/* (0x0EC8) HPLPF3_1 */
+#define MADERA_LHPF3_MODE 0x0002
+#define MADERA_LHPF3_MODE_MASK 0x0002
+#define MADERA_LHPF3_MODE_SHIFT 1
+#define MADERA_LHPF3_ENA 0x0001
+#define MADERA_LHPF3_ENA_MASK 0x0001
+#define MADERA_LHPF3_ENA_SHIFT 0
+
+/* (0x0EC9) HPLPF3_2 */
+#define MADERA_LHPF3_COEFF_MASK 0xFFFF
+#define MADERA_LHPF3_COEFF_SHIFT 0
+
+/* (0x0ECC) HPLPF4_1 */
+#define MADERA_LHPF4_MODE 0x0002
+#define MADERA_LHPF4_MODE_MASK 0x0002
+#define MADERA_LHPF4_MODE_SHIFT 1
+#define MADERA_LHPF4_ENA 0x0001
+#define MADERA_LHPF4_ENA_MASK 0x0001
+#define MADERA_LHPF4_ENA_SHIFT 0
+
+/* (0x0ECD) HPLPF4_2 */
+#define MADERA_LHPF4_COEFF_MASK 0xFFFF
+#define MADERA_LHPF4_COEFF_SHIFT 0
+
+/* (0x0ED0) ASRC2_ENABLE */
+#define MADERA_ASRC2_IN2L_ENA 0x0008
+#define MADERA_ASRC2_IN2L_ENA_MASK 0x0008
+#define MADERA_ASRC2_IN2L_ENA_SHIFT 3
+#define MADERA_ASRC2_IN2R_ENA 0x0004
+#define MADERA_ASRC2_IN2R_ENA_MASK 0x0004
+#define MADERA_ASRC2_IN2R_ENA_SHIFT 2
+#define MADERA_ASRC2_IN1L_ENA 0x0002
+#define MADERA_ASRC2_IN1L_ENA_MASK 0x0002
+#define MADERA_ASRC2_IN1L_ENA_SHIFT 1
+#define MADERA_ASRC2_IN1R_ENA 0x0001
+#define MADERA_ASRC2_IN1R_ENA_MASK 0x0001
+#define MADERA_ASRC2_IN1R_ENA_SHIFT 0
+
+/* (0x0ED2) ASRC2_RATE1 */
+#define MADERA_ASRC2_RATE1_MASK 0xF800
+#define MADERA_ASRC2_RATE1_SHIFT 11
+
+/* (0x0ED3) ASRC2_RATE2 */
+#define MADERA_ASRC2_RATE2_MASK 0xF800
+#define MADERA_ASRC2_RATE2_SHIFT 11
+
+/* (0x0EE0) ASRC1_ENABLE */
+#define MADERA_ASRC1_IN2L_ENA 0x0008
+#define MADERA_ASRC1_IN2L_ENA_MASK 0x0008
+#define MADERA_ASRC1_IN2L_ENA_SHIFT 3
+#define MADERA_ASRC1_IN2R_ENA 0x0004
+#define MADERA_ASRC1_IN2R_ENA_MASK 0x0004
+#define MADERA_ASRC1_IN2R_ENA_SHIFT 2
+#define MADERA_ASRC1_IN1L_ENA 0x0002
+#define MADERA_ASRC1_IN1L_ENA_MASK 0x0002
+#define MADERA_ASRC1_IN1L_ENA_SHIFT 1
+#define MADERA_ASRC1_IN1R_ENA 0x0001
+#define MADERA_ASRC1_IN1R_ENA_MASK 0x0001
+#define MADERA_ASRC1_IN1R_ENA_SHIFT 0
+
+/* (0x0EE2) ASRC1_RATE1 */
+#define MADERA_ASRC1_RATE1_MASK 0xF800
+#define MADERA_ASRC1_RATE1_SHIFT 11
+
+/* (0x0EE3) ASRC1_RATE2 */
+#define MADERA_ASRC1_RATE2_MASK 0xF800
+#define MADERA_ASRC1_RATE2_SHIFT 11
+
+/* (0x0EF0) - ISRC1 CTRL 1 */
+#define MADERA_ISRC1_FSH_MASK 0xF800
+#define MADERA_ISRC1_FSH_SHIFT 11
+#define MADERA_ISRC1_CLK_SEL_MASK 0x0700
+#define MADERA_ISRC1_CLK_SEL_SHIFT 8
+
+/* (0x0EF1) ISRC1_CTRL_2 */
+#define MADERA_ISRC1_FSL_MASK 0xF800
+#define MADERA_ISRC1_FSL_SHIFT 11
+
+/* (0x0EF2) ISRC1_CTRL_3 */
+#define MADERA_ISRC1_INT1_ENA 0x8000
+#define MADERA_ISRC1_INT1_ENA_MASK 0x8000
+#define MADERA_ISRC1_INT1_ENA_SHIFT 15
+#define MADERA_ISRC1_INT2_ENA 0x4000
+#define MADERA_ISRC1_INT2_ENA_MASK 0x4000
+#define MADERA_ISRC1_INT2_ENA_SHIFT 14
+#define MADERA_ISRC1_INT3_ENA 0x2000
+#define MADERA_ISRC1_INT3_ENA_MASK 0x2000
+#define MADERA_ISRC1_INT3_ENA_SHIFT 13
+#define MADERA_ISRC1_INT4_ENA 0x1000
+#define MADERA_ISRC1_INT4_ENA_MASK 0x1000
+#define MADERA_ISRC1_INT4_ENA_SHIFT 12
+#define MADERA_ISRC1_DEC1_ENA 0x0200
+#define MADERA_ISRC1_DEC1_ENA_MASK 0x0200
+#define MADERA_ISRC1_DEC1_ENA_SHIFT 9
+#define MADERA_ISRC1_DEC2_ENA 0x0100
+#define MADERA_ISRC1_DEC2_ENA_MASK 0x0100
+#define MADERA_ISRC1_DEC2_ENA_SHIFT 8
+#define MADERA_ISRC1_DEC3_ENA 0x0080
+#define MADERA_ISRC1_DEC3_ENA_MASK 0x0080
+#define MADERA_ISRC1_DEC3_ENA_SHIFT 7
+#define MADERA_ISRC1_DEC4_ENA 0x0040
+#define MADERA_ISRC1_DEC4_ENA_MASK 0x0040
+#define MADERA_ISRC1_DEC4_ENA_SHIFT 6
+#define MADERA_ISRC1_NOTCH_ENA 0x0001
+#define MADERA_ISRC1_NOTCH_ENA_MASK 0x0001
+#define MADERA_ISRC1_NOTCH_ENA_SHIFT 0
+
+/* (0x0EF3) ISRC2_CTRL_1 */
+#define MADERA_ISRC2_FSH_MASK 0xF800
+#define MADERA_ISRC2_FSH_SHIFT 11
+#define MADERA_ISRC2_CLK_SEL_MASK 0x0700
+#define MADERA_ISRC2_CLK_SEL_SHIFT 8
+
+/* (0x0EF4) ISRC2_CTRL_2 */
+#define MADERA_ISRC2_FSL_MASK 0xF800
+#define MADERA_ISRC2_FSL_SHIFT 11
+
+/* (0x0EF5) ISRC2_CTRL_3 */
+#define MADERA_ISRC2_INT1_ENA 0x8000
+#define MADERA_ISRC2_INT1_ENA_MASK 0x8000
+#define MADERA_ISRC2_INT1_ENA_SHIFT 15
+#define MADERA_ISRC2_INT2_ENA 0x4000
+#define MADERA_ISRC2_INT2_ENA_MASK 0x4000
+#define MADERA_ISRC2_INT2_ENA_SHIFT 14
+#define MADERA_ISRC2_INT3_ENA 0x2000
+#define MADERA_ISRC2_INT3_ENA_MASK 0x2000
+#define MADERA_ISRC2_INT3_ENA_SHIFT 13
+#define MADERA_ISRC2_INT4_ENA 0x1000
+#define MADERA_ISRC2_INT4_ENA_MASK 0x1000
+#define MADERA_ISRC2_INT4_ENA_SHIFT 12
+#define MADERA_ISRC2_DEC1_ENA 0x0200
+#define MADERA_ISRC2_DEC1_ENA_MASK 0x0200
+#define MADERA_ISRC2_DEC1_ENA_SHIFT 9
+#define MADERA_ISRC2_DEC2_ENA 0x0100
+#define MADERA_ISRC2_DEC2_ENA_MASK 0x0100
+#define MADERA_ISRC2_DEC2_ENA_SHIFT 8
+#define MADERA_ISRC2_DEC3_ENA 0x0080
+#define MADERA_ISRC2_DEC3_ENA_MASK 0x0080
+#define MADERA_ISRC2_DEC3_ENA_SHIFT 7
+#define MADERA_ISRC2_DEC4_ENA 0x0040
+#define MADERA_ISRC2_DEC4_ENA_MASK 0x0040
+#define MADERA_ISRC2_DEC4_ENA_SHIFT 6
+#define MADERA_ISRC2_NOTCH_ENA 0x0001
+#define MADERA_ISRC2_NOTCH_ENA_MASK 0x0001
+#define MADERA_ISRC2_NOTCH_ENA_SHIFT 0
+
+/* (0x0EF6) ISRC3_CTRL_1 */
+#define MADERA_ISRC3_FSH_MASK 0xF800
+#define MADERA_ISRC3_FSH_SHIFT 11
+#define MADERA_ISRC3_CLK_SEL_MASK 0x0700
+#define MADERA_ISRC3_CLK_SEL_SHIFT 8
+
+/* (0x0EF7) ISRC3_CTRL_2 */
+#define MADERA_ISRC3_FSL_MASK 0xF800
+#define MADERA_ISRC3_FSL_SHIFT 11
+
+/* (0x0EF8) ISRC3_CTRL_3 */
+#define MADERA_ISRC3_INT1_ENA 0x8000
+#define MADERA_ISRC3_INT1_ENA_MASK 0x8000
+#define MADERA_ISRC3_INT1_ENA_SHIFT 15
+#define MADERA_ISRC3_INT2_ENA 0x4000
+#define MADERA_ISRC3_INT2_ENA_MASK 0x4000
+#define MADERA_ISRC3_INT2_ENA_SHIFT 14
+#define MADERA_ISRC3_INT3_ENA 0x2000
+#define MADERA_ISRC3_INT3_ENA_MASK 0x2000
+#define MADERA_ISRC3_INT3_ENA_SHIFT 13
+#define MADERA_ISRC3_INT4_ENA 0x1000
+#define MADERA_ISRC3_INT4_ENA_MASK 0x1000
+#define MADERA_ISRC3_INT4_ENA_SHIFT 12
+#define MADERA_ISRC3_DEC1_ENA 0x0200
+#define MADERA_ISRC3_DEC1_ENA_MASK 0x0200
+#define MADERA_ISRC3_DEC1_ENA_SHIFT 9
+#define MADERA_ISRC3_DEC2_ENA 0x0100
+#define MADERA_ISRC3_DEC2_ENA_MASK 0x0100
+#define MADERA_ISRC3_DEC2_ENA_SHIFT 8
+#define MADERA_ISRC3_DEC3_ENA 0x0080
+#define MADERA_ISRC3_DEC3_ENA_MASK 0x0080
+#define MADERA_ISRC3_DEC3_ENA_SHIFT 7
+#define MADERA_ISRC3_DEC4_ENA 0x0040
+#define MADERA_ISRC3_DEC4_ENA_MASK 0x0040
+#define MADERA_ISRC3_DEC4_ENA_SHIFT 6
+#define MADERA_ISRC3_NOTCH_ENA 0x0001
+#define MADERA_ISRC3_NOTCH_ENA_MASK 0x0001
+#define MADERA_ISRC3_NOTCH_ENA_SHIFT 0
+
+/* (0x0EF9) ISRC4_CTRL_1 */
+#define MADERA_ISRC4_FSH_MASK 0xF800
+#define MADERA_ISRC4_FSH_SHIFT 11
+#define MADERA_ISRC4_CLK_SEL_MASK 0x0700
+#define MADERA_ISRC4_CLK_SEL_SHIFT 8
+
+/* (0x0EFA) ISRC4_CTRL_2 */
+#define MADERA_ISRC4_FSL_MASK 0xF800
+#define MADERA_ISRC4_FSL_SHIFT 11
+
+/* (0x0EFB) ISRC4_CTRL_3 */
+#define MADERA_ISRC4_INT1_ENA 0x8000
+#define MADERA_ISRC4_INT1_ENA_MASK 0x8000
+#define MADERA_ISRC4_INT1_ENA_SHIFT 15
+#define MADERA_ISRC4_INT2_ENA 0x4000
+#define MADERA_ISRC4_INT2_ENA_MASK 0x4000
+#define MADERA_ISRC4_INT2_ENA_SHIFT 14
+#define MADERA_ISRC4_INT3_ENA 0x2000
+#define MADERA_ISRC4_INT3_ENA_MASK 0x2000
+#define MADERA_ISRC4_INT3_ENA_SHIFT 13
+#define MADERA_ISRC4_INT4_ENA 0x1000
+#define MADERA_ISRC4_INT4_ENA_MASK 0x1000
+#define MADERA_ISRC4_INT4_ENA_SHIFT 12
+#define MADERA_ISRC4_DEC1_ENA 0x0200
+#define MADERA_ISRC4_DEC1_ENA_MASK 0x0200
+#define MADERA_ISRC4_DEC1_ENA_SHIFT 9
+#define MADERA_ISRC4_DEC2_ENA 0x0100
+#define MADERA_ISRC4_DEC2_ENA_MASK 0x0100
+#define MADERA_ISRC4_DEC2_ENA_SHIFT 8
+#define MADERA_ISRC4_DEC3_ENA 0x0080
+#define MADERA_ISRC4_DEC3_ENA_MASK 0x0080
+#define MADERA_ISRC4_DEC3_ENA_SHIFT 7
+#define MADERA_ISRC4_DEC4_ENA 0x0040
+#define MADERA_ISRC4_DEC4_ENA_MASK 0x0040
+#define MADERA_ISRC4_DEC4_ENA_SHIFT 6
+#define MADERA_ISRC4_NOTCH_ENA 0x0001
+#define MADERA_ISRC4_NOTCH_ENA_MASK 0x0001
+#define MADERA_ISRC4_NOTCH_ENA_SHIFT 0
+
+/* (0x0F00) Clock_Control */
+#define MADERA_EXT_NG_SEL_CLR 0x0080
+#define MADERA_EXT_NG_SEL_CLR_MASK 0x0080
+#define MADERA_EXT_NG_SEL_CLR_SHIFT 7
+#define MADERA_EXT_NG_SEL_SET 0x0040
+#define MADERA_EXT_NG_SEL_SET_MASK 0x0040
+#define MADERA_EXT_NG_SEL_SET_SHIFT 6
+#define MADERA_CLK_R_ENA_CLR 0x0020
+#define MADERA_CLK_R_ENA_CLR_MASK 0x0020
+#define MADERA_CLK_R_ENA_CLR_SHIFT 5
+#define MADERA_CLK_R_ENA_SET 0x0010
+#define MADERA_CLK_R_ENA_SET_MASK 0x0010
+#define MADERA_CLK_R_ENA_SET_SHIFT 4
+#define MADERA_CLK_NG_ENA_CLR 0x0008
+#define MADERA_CLK_NG_ENA_CLR_MASK 0x0008
+#define MADERA_CLK_NG_ENA_CLR_SHIFT 3
+#define MADERA_CLK_NG_ENA_SET 0x0004
+#define MADERA_CLK_NG_ENA_SET_MASK 0x0004
+#define MADERA_CLK_NG_ENA_SET_SHIFT 2
+#define MADERA_CLK_L_ENA_CLR 0x0002
+#define MADERA_CLK_L_ENA_CLR_MASK 0x0002
+#define MADERA_CLK_L_ENA_CLR_SHIFT 1
+#define MADERA_CLK_L_ENA_SET 0x0001
+#define MADERA_CLK_L_ENA_SET_MASK 0x0001
+#define MADERA_CLK_L_ENA_SET_SHIFT 0
+
+/* (0x0F01) ANC_SRC */
+#define MADERA_IN_RXANCR_SEL_MASK 0x0070
+#define MADERA_IN_RXANCR_SEL_SHIFT 4
+#define MADERA_IN_RXANCL_SEL_MASK 0x0007
+#define MADERA_IN_RXANCL_SEL_SHIFT 0
+
+/* (0x0F17) FCL_ADC_reformatter_control */
+#define MADERA_FCL_MIC_MODE_SEL 0x000C
+#define MADERA_FCL_MIC_MODE_SEL_SHIFT 2
+
+/* (0x0F73) FCR_ADC_reformatter_control */
+#define MADERA_FCR_MIC_MODE_SEL 0x000C
+#define MADERA_FCR_MIC_MODE_SEL_SHIFT 2
+
+/* (0x10C0) AUXPDM1_CTRL_0 */
+#define MADERA_AUXPDM1_SRC_MASK 0x0F00
+#define MADERA_AUXPDM1_SRC_SHIFT 8
+#define MADERA_AUXPDM1_TXEDGE_MASK 0x0010
+#define MADERA_AUXPDM1_TXEDGE_SHIFT 4
+#define MADERA_AUXPDM1_MSTR_MASK 0x0008
+#define MADERA_AUXPDM1_MSTR_SHIFT 3
+#define MADERA_AUXPDM1_ENABLE_MASK 0x0001
+#define MADERA_AUXPDM1_ENABLE_SHIFT 0
+
+/* (0x10C1) AUXPDM1_CTRL_1 */
+#define MADERA_AUXPDM1_CLK_FREQ_MASK 0xC000
+#define MADERA_AUXPDM1_CLK_FREQ_SHIFT 14
+
+/* (0x1480) DFC1_CTRL_W0 */
+#define MADERA_DFC1_RATE_MASK 0x007C
+#define MADERA_DFC1_RATE_SHIFT 2
+#define MADERA_DFC1_DITH_ENA 0x0002
+#define MADERA_DFC1_DITH_ENA_MASK 0x0002
+#define MADERA_DFC1_DITH_ENA_SHIFT 1
+#define MADERA_DFC1_ENA 0x0001
+#define MADERA_DFC1_ENA_MASK 0x0001
+#define MADERA_DFC1_ENA_SHIFT 0
+
+/* (0x1482) DFC1_RX_W0 */
+#define MADERA_DFC1_RX_DATA_WIDTH_MASK 0x1F00
+#define MADERA_DFC1_RX_DATA_WIDTH_SHIFT 8
+
+#define MADERA_DFC1_RX_DATA_TYPE_MASK 0x0007
+#define MADERA_DFC1_RX_DATA_TYPE_SHIFT 0
+
+/* (0x1484) DFC1_TX_W0 */
+#define MADERA_DFC1_TX_DATA_WIDTH_MASK 0x1F00
+#define MADERA_DFC1_TX_DATA_WIDTH_SHIFT 8
+
+#define MADERA_DFC1_TX_DATA_TYPE_MASK 0x0007
+#define MADERA_DFC1_TX_DATA_TYPE_SHIFT 0
+
+/* (0x1600) ADSP2_IRQ0 */
+#define MADERA_DSP_IRQ2 0x0002
+#define MADERA_DSP_IRQ1 0x0001
+
+/* (0x1601) ADSP2_IRQ1 */
+#define MADERA_DSP_IRQ4 0x0002
+#define MADERA_DSP_IRQ3 0x0001
+
+/* (0x1602) ADSP2_IRQ2 */
+#define MADERA_DSP_IRQ6 0x0002
+#define MADERA_DSP_IRQ5 0x0001
+
+/* (0x1603) ADSP2_IRQ3 */
+#define MADERA_DSP_IRQ8 0x0002
+#define MADERA_DSP_IRQ7 0x0001
+
+/* (0x1604) ADSP2_IRQ4 */
+#define MADERA_DSP_IRQ10 0x0002
+#define MADERA_DSP_IRQ9 0x0001
+
+/* (0x1605) ADSP2_IRQ5 */
+#define MADERA_DSP_IRQ12 0x0002
+#define MADERA_DSP_IRQ11 0x0001
+
+/* (0x1606) ADSP2_IRQ6 */
+#define MADERA_DSP_IRQ14 0x0002
+#define MADERA_DSP_IRQ13 0x0001
+
+/* (0x1607) ADSP2_IRQ7 */
+#define MADERA_DSP_IRQ16 0x0002
+#define MADERA_DSP_IRQ15 0x0001
+
+/* (0x1700) GPIO1_CTRL_1 */
+#define MADERA_GP1_LVL 0x8000
+#define MADERA_GP1_LVL_MASK 0x8000
+#define MADERA_GP1_LVL_SHIFT 15
+#define MADERA_GP1_OP_CFG 0x4000
+#define MADERA_GP1_OP_CFG_MASK 0x4000
+#define MADERA_GP1_OP_CFG_SHIFT 14
+#define MADERA_GP1_DB 0x2000
+#define MADERA_GP1_DB_MASK 0x2000
+#define MADERA_GP1_DB_SHIFT 13
+#define MADERA_GP1_POL 0x1000
+#define MADERA_GP1_POL_MASK 0x1000
+#define MADERA_GP1_POL_SHIFT 12
+#define MADERA_GP1_IP_CFG 0x0800
+#define MADERA_GP1_IP_CFG_MASK 0x0800
+#define MADERA_GP1_IP_CFG_SHIFT 11
+#define MADERA_GP1_FN_MASK 0x03FF
+#define MADERA_GP1_FN_SHIFT 0
+
+/* (0x1701) GPIO1_CTRL_2 */
+#define MADERA_GP1_DIR 0x8000
+#define MADERA_GP1_DIR_MASK 0x8000
+#define MADERA_GP1_DIR_SHIFT 15
+#define MADERA_GP1_PU 0x4000
+#define MADERA_GP1_PU_MASK 0x4000
+#define MADERA_GP1_PU_SHIFT 14
+#define MADERA_GP1_PD 0x2000
+#define MADERA_GP1_PD_MASK 0x2000
+#define MADERA_GP1_PD_SHIFT 13
+#define MADERA_GP1_DRV_STR_MASK 0x1800
+#define MADERA_GP1_DRV_STR_SHIFT 11
+
+/* (0x1800) IRQ1_Status_1 */
+#define MADERA_CTRLIF_ERR_EINT1 0x1000
+#define MADERA_CTRLIF_ERR_EINT1_MASK 0x1000
+#define MADERA_CTRLIF_ERR_EINT1_SHIFT 12
+#define MADERA_SYSCLK_FAIL_EINT1 0x0200
+#define MADERA_SYSCLK_FAIL_EINT1_MASK 0x0200
+#define MADERA_SYSCLK_FAIL_EINT1_SHIFT 9
+#define MADERA_CLOCK_DETECT_EINT1 0x0100
+#define MADERA_CLOCK_DETECT_EINT1_MASK 0x0100
+#define MADERA_CLOCK_DETECT_EINT1_SHIFT 8
+#define MADERA_BOOT_DONE_EINT1 0x0080
+#define MADERA_BOOT_DONE_EINT1_MASK 0x0080
+#define MADERA_BOOT_DONE_EINT1_SHIFT 7
+
+/* (0x1801) IRQ1_Status_2 */
+#define MADERA_FLLAO_LOCK_EINT1 0x0800
+#define MADERA_FLLAO_LOCK_EINT1_MASK 0x0800
+#define MADERA_FLLAO_LOCK_EINT1_SHIFT 11
+#define MADERA_FLL3_LOCK_EINT1 0x0400
+#define MADERA_FLL3_LOCK_EINT1_MASK 0x0400
+#define MADERA_FLL3_LOCK_EINT1_SHIFT 10
+#define MADERA_FLL2_LOCK_EINT1 0x0200
+#define MADERA_FLL2_LOCK_EINT1_MASK 0x0200
+#define MADERA_FLL2_LOCK_EINT1_SHIFT 9
+#define MADERA_FLL1_LOCK_EINT1 0x0100
+#define MADERA_FLL1_LOCK_EINT1_MASK 0x0100
+#define MADERA_FLL1_LOCK_EINT1_SHIFT 8
+
+/* (0x1805) IRQ1_Status_6 */
+#define MADERA_MICDET2_EINT1 0x0200
+#define MADERA_MICDET2_EINT1_MASK 0x0200
+#define MADERA_MICDET2_EINT1_SHIFT 9
+#define MADERA_MICDET1_EINT1 0x0100
+#define MADERA_MICDET1_EINT1_MASK 0x0100
+#define MADERA_MICDET1_EINT1_SHIFT 8
+#define MADERA_HPDET_EINT1 0x0001
+#define MADERA_HPDET_EINT1_MASK 0x0001
+#define MADERA_HPDET_EINT1_SHIFT 0
+
+/* (0x1806) IRQ1_Status_7 */
+#define MADERA_MICD_CLAMP_FALL_EINT1 0x0020
+#define MADERA_MICD_CLAMP_FALL_EINT1_MASK 0x0020
+#define MADERA_MICD_CLAMP_FALL_EINT1_SHIFT 5
+#define MADERA_MICD_CLAMP_RISE_EINT1 0x0010
+#define MADERA_MICD_CLAMP_RISE_EINT1_MASK 0x0010
+#define MADERA_MICD_CLAMP_RISE_EINT1_SHIFT 4
+#define MADERA_JD2_FALL_EINT1 0x0008
+#define MADERA_JD2_FALL_EINT1_MASK 0x0008
+#define MADERA_JD2_FALL_EINT1_SHIFT 3
+#define MADERA_JD2_RISE_EINT1 0x0004
+#define MADERA_JD2_RISE_EINT1_MASK 0x0004
+#define MADERA_JD2_RISE_EINT1_SHIFT 2
+#define MADERA_JD1_FALL_EINT1 0x0002
+#define MADERA_JD1_FALL_EINT1_MASK 0x0002
+#define MADERA_JD1_FALL_EINT1_SHIFT 1
+#define MADERA_JD1_RISE_EINT1 0x0001
+#define MADERA_JD1_RISE_EINT1_MASK 0x0001
+#define MADERA_JD1_RISE_EINT1_SHIFT 0
+
+/* (0x1808) IRQ1_Status_9 */
+#define MADERA_ASRC2_IN2_LOCK_EINT1 0x0800
+#define MADERA_ASRC2_IN2_LOCK_EINT1_MASK 0x0800
+#define MADERA_ASRC2_IN2_LOCK_EINT1_SHIFT 11
+#define MADERA_ASRC2_IN1_LOCK_EINT1 0x0400
+#define MADERA_ASRC2_IN1_LOCK_EINT1_MASK 0x0400
+#define MADERA_ASRC2_IN1_LOCK_EINT1_SHIFT 10
+#define MADERA_ASRC1_IN2_LOCK_EINT1 0x0200
+#define MADERA_ASRC1_IN2_LOCK_EINT1_MASK 0x0200
+#define MADERA_ASRC1_IN2_LOCK_EINT1_SHIFT 9
+#define MADERA_ASRC1_IN1_LOCK_EINT1 0x0100
+#define MADERA_ASRC1_IN1_LOCK_EINT1_MASK 0x0100
+#define MADERA_ASRC1_IN1_LOCK_EINT1_SHIFT 8
+#define MADERA_DRC2_SIG_DET_EINT1 0x0002
+#define MADERA_DRC2_SIG_DET_EINT1_MASK 0x0002
+#define MADERA_DRC2_SIG_DET_EINT1_SHIFT 1
+#define MADERA_DRC1_SIG_DET_EINT1 0x0001
+#define MADERA_DRC1_SIG_DET_EINT1_MASK 0x0001
+#define MADERA_DRC1_SIG_DET_EINT1_SHIFT 0
+
+/* (0x180A) IRQ1_Status_11 */
+#define MADERA_DSP_IRQ16_EINT1 0x8000
+#define MADERA_DSP_IRQ16_EINT1_MASK 0x8000
+#define MADERA_DSP_IRQ16_EINT1_SHIFT 15
+#define MADERA_DSP_IRQ15_EINT1 0x4000
+#define MADERA_DSP_IRQ15_EINT1_MASK 0x4000
+#define MADERA_DSP_IRQ15_EINT1_SHIFT 14
+#define MADERA_DSP_IRQ14_EINT1 0x2000
+#define MADERA_DSP_IRQ14_EINT1_MASK 0x2000
+#define MADERA_DSP_IRQ14_EINT1_SHIFT 13
+#define MADERA_DSP_IRQ13_EINT1 0x1000
+#define MADERA_DSP_IRQ13_EINT1_MASK 0x1000
+#define MADERA_DSP_IRQ13_EINT1_SHIFT 12
+#define MADERA_DSP_IRQ12_EINT1 0x0800
+#define MADERA_DSP_IRQ12_EINT1_MASK 0x0800
+#define MADERA_DSP_IRQ12_EINT1_SHIFT 11
+#define MADERA_DSP_IRQ11_EINT1 0x0400
+#define MADERA_DSP_IRQ11_EINT1_MASK 0x0400
+#define MADERA_DSP_IRQ11_EINT1_SHIFT 10
+#define MADERA_DSP_IRQ10_EINT1 0x0200
+#define MADERA_DSP_IRQ10_EINT1_MASK 0x0200
+#define MADERA_DSP_IRQ10_EINT1_SHIFT 9
+#define MADERA_DSP_IRQ9_EINT1 0x0100
+#define MADERA_DSP_IRQ9_EINT1_MASK 0x0100
+#define MADERA_DSP_IRQ9_EINT1_SHIFT 8
+#define MADERA_DSP_IRQ8_EINT1 0x0080
+#define MADERA_DSP_IRQ8_EINT1_MASK 0x0080
+#define MADERA_DSP_IRQ8_EINT1_SHIFT 7
+#define MADERA_DSP_IRQ7_EINT1 0x0040
+#define MADERA_DSP_IRQ7_EINT1_MASK 0x0040
+#define MADERA_DSP_IRQ7_EINT1_SHIFT 6
+#define MADERA_DSP_IRQ6_EINT1 0x0020
+#define MADERA_DSP_IRQ6_EINT1_MASK 0x0020
+#define MADERA_DSP_IRQ6_EINT1_SHIFT 5
+#define MADERA_DSP_IRQ5_EINT1 0x0010
+#define MADERA_DSP_IRQ5_EINT1_MASK 0x0010
+#define MADERA_DSP_IRQ5_EINT1_SHIFT 4
+#define MADERA_DSP_IRQ4_EINT1 0x0008
+#define MADERA_DSP_IRQ4_EINT1_MASK 0x0008
+#define MADERA_DSP_IRQ4_EINT1_SHIFT 3
+#define MADERA_DSP_IRQ3_EINT1 0x0004
+#define MADERA_DSP_IRQ3_EINT1_MASK 0x0004
+#define MADERA_DSP_IRQ3_EINT1_SHIFT 2
+#define MADERA_DSP_IRQ2_EINT1 0x0002
+#define MADERA_DSP_IRQ2_EINT1_MASK 0x0002
+#define MADERA_DSP_IRQ2_EINT1_SHIFT 1
+#define MADERA_DSP_IRQ1_EINT1 0x0001
+#define MADERA_DSP_IRQ1_EINT1_MASK 0x0001
+#define MADERA_DSP_IRQ1_EINT1_SHIFT 0
+
+/* (0x180B) IRQ1_Status_12 */
+#define MADERA_SPKOUTR_SC_EINT1 0x0080
+#define MADERA_SPKOUTR_SC_EINT1_MASK 0x0080
+#define MADERA_SPKOUTR_SC_EINT1_SHIFT 7
+#define MADERA_SPKOUTL_SC_EINT1 0x0040
+#define MADERA_SPKOUTL_SC_EINT1_MASK 0x0040
+#define MADERA_SPKOUTL_SC_EINT1_SHIFT 6
+#define MADERA_HP3R_SC_EINT1 0x0020
+#define MADERA_HP3R_SC_EINT1_MASK 0x0020
+#define MADERA_HP3R_SC_EINT1_SHIFT 5
+#define MADERA_HP3L_SC_EINT1 0x0010
+#define MADERA_HP3L_SC_EINT1_MASK 0x0010
+#define MADERA_HP3L_SC_EINT1_SHIFT 4
+#define MADERA_HP2R_SC_EINT1 0x0008
+#define MADERA_HP2R_SC_EINT1_MASK 0x0008
+#define MADERA_HP2R_SC_EINT1_SHIFT 3
+#define MADERA_HP2L_SC_EINT1 0x0004
+#define MADERA_HP2L_SC_EINT1_MASK 0x0004
+#define MADERA_HP2L_SC_EINT1_SHIFT 2
+#define MADERA_HP1R_SC_EINT1 0x0002
+#define MADERA_HP1R_SC_EINT1_MASK 0x0002
+#define MADERA_HP1R_SC_EINT1_SHIFT 1
+#define MADERA_HP1L_SC_EINT1 0x0001
+#define MADERA_HP1L_SC_EINT1_MASK 0x0001
+#define MADERA_HP1L_SC_EINT1_SHIFT 0
+
+/* (0x180E) IRQ1_Status_15 */
+#define MADERA_SPK_OVERHEAT_WARN_EINT1 0x0004
+#define MADERA_SPK_OVERHEAT_WARN_EINT1_MASK 0x0004
+#define MADERA_SPK_OVERHEAT_WARN_EINT1_SHIFT 2
+#define MADERA_SPK_OVERHEAT_EINT1 0x0002
+#define MADERA_SPK_OVERHEAT_EINT1_MASK 0x0002
+#define MADERA_SPK_OVERHEAT_EINT1_SHIFT 1
+#define MADERA_SPK_SHUTDOWN_EINT1 0x0001
+#define MADERA_SPK_SHUTDOWN_EINT1_MASK 0x0001
+#define MADERA_SPK_SHUTDOWN_EINT1_SHIFT 0
+
+/* (0x1820) - IRQ1 Status 33 */
+#define MADERA_DSP7_BUS_ERR_EINT1 0x0040
+#define MADERA_DSP7_BUS_ERR_EINT1_MASK 0x0040
+#define MADERA_DSP7_BUS_ERR_EINT1_SHIFT 6
+#define MADERA_DSP6_BUS_ERR_EINT1 0x0020
+#define MADERA_DSP6_BUS_ERR_EINT1_MASK 0x0020
+#define MADERA_DSP6_BUS_ERR_EINT1_SHIFT 5
+#define MADERA_DSP5_BUS_ERR_EINT1 0x0010
+#define MADERA_DSP5_BUS_ERR_EINT1_MASK 0x0010
+#define MADERA_DSP5_BUS_ERR_EINT1_SHIFT 4
+#define MADERA_DSP4_BUS_ERR_EINT1 0x0008
+#define MADERA_DSP4_BUS_ERR_EINT1_MASK 0x0008
+#define MADERA_DSP4_BUS_ERR_EINT1_SHIFT 3
+#define MADERA_DSP3_BUS_ERR_EINT1 0x0004
+#define MADERA_DSP3_BUS_ERR_EINT1_MASK 0x0004
+#define MADERA_DSP3_BUS_ERR_EINT1_SHIFT 2
+#define MADERA_DSP2_BUS_ERR_EINT1 0x0002
+#define MADERA_DSP2_BUS_ERR_EINT1_MASK 0x0002
+#define MADERA_DSP2_BUS_ERR_EINT1_SHIFT 1
+#define MADERA_DSP1_BUS_ERR_EINT1 0x0001
+#define MADERA_DSP1_BUS_ERR_EINT1_MASK 0x0001
+#define MADERA_DSP1_BUS_ERR_EINT1_SHIFT 0
+
+/* (0x1845) IRQ1_Mask_6 */
+#define MADERA_IM_MICDET2_EINT1 0x0200
+#define MADERA_IM_MICDET2_EINT1_MASK 0x0200
+#define MADERA_IM_MICDET2_EINT1_SHIFT 9
+#define MADERA_IM_MICDET1_EINT1 0x0100
+#define MADERA_IM_MICDET1_EINT1_MASK 0x0100
+#define MADERA_IM_MICDET1_EINT1_SHIFT 8
+#define MADERA_IM_HPDET_EINT1 0x0001
+#define MADERA_IM_HPDET_EINT1_MASK 0x0001
+#define MADERA_IM_HPDET_EINT1_SHIFT 0
+/* (0x184E) IRQ1_Mask_15 */
+#define MADERA_IM_SPK_OVERHEAT_WARN_EINT1 0x0004
+#define MADERA_IM_SPK_OVERHEAT_WARN_EINT1_MASK 0x0004
+#define MADERA_IM_SPK_OVERHEAT_WARN_EINT1_SHIFT 2
+#define MADERA_IM_SPK_OVERHEAT_EINT1 0x0002
+#define MADERA_IM_SPK_OVERHEAT_EINT1_MASK 0x0002
+#define MADERA_IM_SPK_OVERHEAT_EINT1_SHIFT 1
+#define MADERA_IM_SPK_SHUTDOWN_EINT1 0x0001
+#define MADERA_IM_SPK_SHUTDOWN_EINT1_MASK 0x0001
+#define MADERA_IM_SPK_SHUTDOWN_EINT1_SHIFT 0
+
+/* (0x1880) - IRQ1 Raw Status 1 */
+#define MADERA_CTRLIF_ERR_STS1 0x1000
+#define MADERA_CTRLIF_ERR_STS1_MASK 0x1000
+#define MADERA_CTRLIF_ERR_STS1_SHIFT 12
+#define MADERA_SYSCLK_FAIL_STS1 0x0200
+#define MADERA_SYSCLK_FAIL_STS1_MASK 0x0200
+#define MADERA_SYSCLK_FAIL_STS1_SHIFT 9
+#define MADERA_CLOCK_DETECT_STS1 0x0100
+#define MADERA_CLOCK_DETECT_STS1_MASK 0x0100
+#define MADERA_CLOCK_DETECT_STS1_SHIFT 8
+#define MADERA_BOOT_DONE_STS1 0x0080
+#define MADERA_BOOT_DONE_STS1_MASK 0x0080
+#define MADERA_BOOT_DONE_STS1_SHIFT 7
+
+/* (0x1881) - IRQ1 Raw Status 2 */
+#define MADERA_FLL3_LOCK_STS1 0x0400
+#define MADERA_FLL3_LOCK_STS1_MASK 0x0400
+#define MADERA_FLL3_LOCK_STS1_SHIFT 10
+#define MADERA_FLL2_LOCK_STS1 0x0200
+#define MADERA_FLL2_LOCK_STS1_MASK 0x0200
+#define MADERA_FLL2_LOCK_STS1_SHIFT 9
+#define MADERA_FLL1_LOCK_STS1 0x0100
+#define MADERA_FLL1_LOCK_STS1_MASK 0x0100
+#define MADERA_FLL1_LOCK_STS1_SHIFT 8
+
+/* (0x1886) - IRQ1 Raw Status 7 */
+#define MADERA_MICD_CLAMP_FALL_STS1 0x0020
+#define MADERA_MICD_CLAMP_FALL_STS1_MASK 0x0020
+#define MADERA_MICD_CLAMP_FALL_STS1_SHIFT 5
+#define MADERA_MICD_CLAMP_RISE_STS1 0x0010
+#define MADERA_MICD_CLAMP_RISE_STS1_MASK 0x0010
+#define MADERA_MICD_CLAMP_RISE_STS1_SHIFT 4
+#define MADERA_JD2_FALL_STS1 0x0008
+#define MADERA_JD2_FALL_STS1_MASK 0x0008
+#define MADERA_JD2_FALL_STS1_SHIFT 3
+#define MADERA_JD2_RISE_STS1 0x0004
+#define MADERA_JD2_RISE_STS1_MASK 0x0004
+#define MADERA_JD2_RISE_STS1_SHIFT 2
+#define MADERA_JD1_FALL_STS1 0x0002
+#define MADERA_JD1_FALL_STS1_MASK 0x0002
+#define MADERA_JD1_FALL_STS1_SHIFT 1
+#define MADERA_JD1_RISE_STS1 0x0001
+#define MADERA_JD1_RISE_STS1_MASK 0x0001
+#define MADERA_JD1_RISE_STS1_SHIFT 0
+
+/* (0x188E) - IRQ1 Raw Status 15 */
+#define MADERA_SPK_OVERHEAT_WARN_STS1 0x0004
+#define MADERA_SPK_OVERHEAT_WARN_STS1_MASK 0x0004
+#define MADERA_SPK_OVERHEAT_WARN_STS1_SHIFT 2
+#define MADERA_SPK_OVERHEAT_STS1 0x0002
+#define MADERA_SPK_OVERHEAT_STS1_MASK 0x0002
+#define MADERA_SPK_OVERHEAT_STS1_SHIFT 1
+#define MADERA_SPK_SHUTDOWN_STS1 0x0001
+#define MADERA_SPK_SHUTDOWN_STS1_MASK 0x0001
+#define MADERA_SPK_SHUTDOWN_STS1_SHIFT 0
+
+/* (0x1A06) Interrupt_Debounce_7 */
+#define MADERA_MICD_CLAMP_DB 0x0010
+#define MADERA_MICD_CLAMP_DB_MASK 0x0010
+#define MADERA_MICD_CLAMP_DB_SHIFT 4
+#define MADERA_JD2_DB 0x0004
+#define MADERA_JD2_DB_MASK 0x0004
+#define MADERA_JD2_DB_SHIFT 2
+#define MADERA_JD1_DB 0x0001
+#define MADERA_JD1_DB_MASK 0x0001
+#define MADERA_JD1_DB_SHIFT 0
+
+/* (0x1A0E) Interrupt_Debounce_15 */
+#define MADERA_SPK_OVERHEAT_WARN_DB 0x0004
+#define MADERA_SPK_OVERHEAT_WARN_DB_MASK 0x0004
+#define MADERA_SPK_OVERHEAT_WARN_DB_SHIFT 2
+#define MADERA_SPK_OVERHEAT_DB 0x0002
+#define MADERA_SPK_OVERHEAT_DB_MASK 0x0002
+#define MADERA_SPK_OVERHEAT_DB_SHIFT 1
+
+/* (0x1A80) IRQ1_CTRL */
+#define MADERA_IM_IRQ1 0x0800
+#define MADERA_IM_IRQ1_MASK 0x0800
+#define MADERA_IM_IRQ1_SHIFT 11
+#define MADERA_IRQ_POL 0x0400
+#define MADERA_IRQ_POL_MASK 0x0400
+#define MADERA_IRQ_POL_SHIFT 10
+
+/* (0x20004) OTP_HPDET_Cal_1 */
+#define MADERA_OTP_HPDET_CALIB_OFFSET_11 0xFF000000
+#define MADERA_OTP_HPDET_CALIB_OFFSET_11_MASK 0xFF000000
+#define MADERA_OTP_HPDET_CALIB_OFFSET_11_SHIFT 24
+#define MADERA_OTP_HPDET_CALIB_OFFSET_10 0x00FF0000
+#define MADERA_OTP_HPDET_CALIB_OFFSET_10_MASK 0x00FF0000
+#define MADERA_OTP_HPDET_CALIB_OFFSET_10_SHIFT 16
+#define MADERA_OTP_HPDET_CALIB_OFFSET_01 0x0000FF00
+#define MADERA_OTP_HPDET_CALIB_OFFSET_01_MASK 0x0000FF00
+#define MADERA_OTP_HPDET_CALIB_OFFSET_01_SHIFT 8
+#define MADERA_OTP_HPDET_CALIB_OFFSET_00 0x000000FF
+#define MADERA_OTP_HPDET_CALIB_OFFSET_00_MASK 0x000000FF
+#define MADERA_OTP_HPDET_CALIB_OFFSET_00_SHIFT 0
+
+/* (0x20006) OTP_HPDET_Cal_2 */
+#define MADERA_OTP_HPDET_GRADIENT_1X 0x0000FF00
+#define MADERA_OTP_HPDET_GRADIENT_1X_MASK 0x0000FF00
+#define MADERA_OTP_HPDET_GRADIENT_1X_SHIFT 8
+#define MADERA_OTP_HPDET_GRADIENT_0X 0x000000FF
+#define MADERA_OTP_HPDET_GRADIENT_0X_MASK 0x000000FF
+#define MADERA_OTP_HPDET_GRADIENT_0X_SHIFT 0
+
+#endif
diff --git a/include/linux/mfd/max14577-private.h b/include/linux/mfd/max14577-private.h
index df75234f979d..a21374f8ad26 100644
--- a/include/linux/mfd/max14577-private.h
+++ b/include/linux/mfd/max14577-private.h
@@ -1,19 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* max14577-private.h - Common API for the Maxim 14577/77836 internal sub chip
*
* Copyright (C) 2014 Samsung Electrnoics
* Chanwoo Choi <cw00.choi@samsung.com>
* Krzysztof Kozlowski <krzk@kernel.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#ifndef __MAX14577_PRIVATE_H__
diff --git a/include/linux/mfd/max14577.h b/include/linux/mfd/max14577.h
index d81b52bb8bee..8b3ef891ba42 100644
--- a/include/linux/mfd/max14577.h
+++ b/include/linux/mfd/max14577.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* max14577.h - Driver for the Maxim 14577/77836
*
@@ -5,16 +6,6 @@
* Chanwoo Choi <cw00.choi@samsung.com>
* Krzysztof Kozlowski <krzk@kernel.org>
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
* This driver is based on max8997.h
*
* MAX14577 has MUIC, Charger devices.
diff --git a/include/linux/mfd/max597x.h b/include/linux/mfd/max597x.h
new file mode 100644
index 000000000000..a850b2e02e6a
--- /dev/null
+++ b/include/linux/mfd/max597x.h
@@ -0,0 +1,96 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Device driver for regulators in MAX5970 and MAX5978 IC
+ *
+ * Copyright (c) 2022 9elements GmbH
+ *
+ * Author: Patrick Rudolph <patrick.rudolph@9elements.com>
+ */
+
+#ifndef _MFD_MAX597X_H
+#define _MFD_MAX597X_H
+
+#include <linux/regmap.h>
+
+#define MAX5970_NUM_SWITCHES 2
+#define MAX5978_NUM_SWITCHES 1
+#define MAX597X_NUM_LEDS 4
+
+struct max597x_data {
+ int num_switches;
+ u32 irng[MAX5970_NUM_SWITCHES];
+ u32 mon_rng[MAX5970_NUM_SWITCHES];
+ u32 shunt_micro_ohms[MAX5970_NUM_SWITCHES];
+};
+
+enum max597x_chip_type {
+ MAX597x_TYPE_MAX5978 = 1,
+ MAX597x_TYPE_MAX5970,
+};
+
+#define MAX5970_REG_CURRENT_L(ch) (0x01 + (ch) * 4)
+#define MAX5970_REG_CURRENT_H(ch) (0x00 + (ch) * 4)
+#define MAX5970_REG_VOLTAGE_L(ch) (0x03 + (ch) * 4)
+#define MAX5970_REG_VOLTAGE_H(ch) (0x02 + (ch) * 4)
+#define MAX5970_REG_MON_RANGE 0x18
+#define MAX5970_MON_MASK 0x3
+#define MAX5970_MON(reg, ch) (((reg) >> ((ch) * 2)) & MAX5970_MON_MASK)
+#define MAX5970_MON_MAX_RANGE_UV 16000000
+
+#define MAX5970_REG_CH_UV_WARN_H(ch) (0x1A + (ch) * 10)
+#define MAX5970_REG_CH_UV_WARN_L(ch) (0x1B + (ch) * 10)
+#define MAX5970_REG_CH_UV_CRIT_H(ch) (0x1C + (ch) * 10)
+#define MAX5970_REG_CH_UV_CRIT_L(ch) (0x1D + (ch) * 10)
+#define MAX5970_REG_CH_OV_WARN_H(ch) (0x1E + (ch) * 10)
+#define MAX5970_REG_CH_OV_WARN_L(ch) (0x1F + (ch) * 10)
+#define MAX5970_REG_CH_OV_CRIT_H(ch) (0x20 + (ch) * 10)
+#define MAX5970_REG_CH_OV_CRIT_L(ch) (0x21 + (ch) * 10)
+
+#define MAX5970_VAL2REG_H(x) (((x) >> 2) & 0xFF)
+#define MAX5970_VAL2REG_L(x) ((x) & 0x3)
+
+#define MAX5970_REG_DAC_FAST(ch) (0x2E + (ch))
+
+#define MAX5970_FAST2SLOW_RATIO 200
+
+#define MAX5970_REG_STATUS0 0x31
+#define MAX5970_CB_IFAULTF(ch) (1 << (ch))
+#define MAX5970_CB_IFAULTS(ch) (1 << ((ch) + 4))
+
+#define MAX5970_REG_STATUS1 0x32
+#define STATUS1_PROT_MASK 0x3
+#define STATUS1_PROT(reg) \
+ (((reg) >> 6) & STATUS1_PROT_MASK)
+#define STATUS1_PROT_SHUTDOWN 0
+#define STATUS1_PROT_CLEAR_PG 1
+#define STATUS1_PROT_ALERT_ONLY 2
+
+#define MAX5970_REG_STATUS2 0x33
+#define MAX5970_IRNG_MASK 0x3
+#define MAX5970_IRNG(reg, ch) \
+ (((reg) >> ((ch) * 2)) & MAX5970_IRNG_MASK)
+
+#define MAX5970_REG_STATUS3 0x34
+#define MAX5970_STATUS3_ALERT BIT(4)
+#define MAX5970_STATUS3_PG(ch) BIT(ch)
+
+#define MAX5970_REG_FAULT0 0x35
+#define UV_STATUS_WARN(ch) (1 << (ch))
+#define UV_STATUS_CRIT(ch) (1 << ((ch) + 4))
+
+#define MAX5970_REG_FAULT1 0x36
+#define OV_STATUS_WARN(ch) (1 << (ch))
+#define OV_STATUS_CRIT(ch) (1 << ((ch) + 4))
+
+#define MAX5970_REG_FAULT2 0x37
+#define OC_STATUS_WARN(ch) (1 << (ch))
+
+#define MAX5970_REG_CHXEN 0x3b
+#define CHXEN(ch) (3 << ((ch) * 2))
+
+#define MAX5970_REG_LED_FLASH 0x43
+
+#define MAX_REGISTERS 0x49
+#define ADC_MASK 0x3FF
+
+#endif /* _MFD_MAX597X_H */
diff --git a/include/linux/mfd/max77620.h b/include/linux/mfd/max77620.h
index ad2a9a852aea..f552ef5b1100 100644
--- a/include/linux/mfd/max77620.h
+++ b/include/linux/mfd/max77620.h
@@ -1,11 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Defining registers address and its bit definitions of MAX77620 and MAX20024
*
* Copyright (C) 2016 NVIDIA CORPORATION. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
*/
#ifndef _MFD_MAX77620_H_
@@ -136,8 +133,8 @@
#define MAX77620_FPS_PERIOD_MIN_US 40
#define MAX20024_FPS_PERIOD_MIN_US 20
-#define MAX77620_FPS_PERIOD_MAX_US 2560
-#define MAX20024_FPS_PERIOD_MAX_US 5120
+#define MAX20024_FPS_PERIOD_MAX_US 2560
+#define MAX77620_FPS_PERIOD_MAX_US 5120
#define MAX77620_REG_FPS_GPIO1 0x54
#define MAX77620_REG_FPS_GPIO2 0x55
@@ -324,6 +321,7 @@ enum max77620_fps_src {
enum max77620_chip_id {
MAX77620,
MAX20024,
+ MAX77663,
};
struct max77620_chip {
@@ -331,7 +329,6 @@ struct max77620_chip {
struct regmap *rmap;
int chip_irq;
- int irq_base;
/* chip id */
enum max77620_chip_id chip_id;
diff --git a/include/linux/mfd/max77650.h b/include/linux/mfd/max77650.h
new file mode 100644
index 000000000000..c809e211a8cd
--- /dev/null
+++ b/include/linux/mfd/max77650.h
@@ -0,0 +1,59 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2018 BayLibre SAS
+ * Author: Bartosz Golaszewski <bgolaszewski@baylibre.com>
+ *
+ * Common definitions for MAXIM 77650/77651 charger/power-supply.
+ */
+
+#ifndef MAX77650_H
+#define MAX77650_H
+
+#include <linux/bits.h>
+
+#define MAX77650_REG_INT_GLBL 0x00
+#define MAX77650_REG_INT_CHG 0x01
+#define MAX77650_REG_STAT_CHG_A 0x02
+#define MAX77650_REG_STAT_CHG_B 0x03
+#define MAX77650_REG_ERCFLAG 0x04
+#define MAX77650_REG_STAT_GLBL 0x05
+#define MAX77650_REG_INTM_GLBL 0x06
+#define MAX77650_REG_INTM_CHG 0x07
+#define MAX77650_REG_CNFG_GLBL 0x10
+#define MAX77650_REG_CID 0x11
+#define MAX77650_REG_CNFG_GPIO 0x12
+#define MAX77650_REG_CNFG_CHG_A 0x18
+#define MAX77650_REG_CNFG_CHG_B 0x19
+#define MAX77650_REG_CNFG_CHG_C 0x1a
+#define MAX77650_REG_CNFG_CHG_D 0x1b
+#define MAX77650_REG_CNFG_CHG_E 0x1c
+#define MAX77650_REG_CNFG_CHG_F 0x1d
+#define MAX77650_REG_CNFG_CHG_G 0x1e
+#define MAX77650_REG_CNFG_CHG_H 0x1f
+#define MAX77650_REG_CNFG_CHG_I 0x20
+#define MAX77650_REG_CNFG_SBB_TOP 0x28
+#define MAX77650_REG_CNFG_SBB0_A 0x29
+#define MAX77650_REG_CNFG_SBB0_B 0x2a
+#define MAX77650_REG_CNFG_SBB1_A 0x2b
+#define MAX77650_REG_CNFG_SBB1_B 0x2c
+#define MAX77650_REG_CNFG_SBB2_A 0x2d
+#define MAX77650_REG_CNFG_SBB2_B 0x2e
+#define MAX77650_REG_CNFG_LDO_A 0x38
+#define MAX77650_REG_CNFG_LDO_B 0x39
+#define MAX77650_REG_CNFG_LED0_A 0x40
+#define MAX77650_REG_CNFG_LED1_A 0x41
+#define MAX77650_REG_CNFG_LED2_A 0x42
+#define MAX77650_REG_CNFG_LED0_B 0x43
+#define MAX77650_REG_CNFG_LED1_B 0x44
+#define MAX77650_REG_CNFG_LED2_B 0x45
+#define MAX77650_REG_CNFG_LED_TOP 0x46
+
+#define MAX77650_CID_MASK GENMASK(3, 0)
+#define MAX77650_CID_BITS(_reg) (_reg & MAX77650_CID_MASK)
+
+#define MAX77650_CID_77650A 0x03
+#define MAX77650_CID_77650C 0x0a
+#define MAX77650_CID_77651A 0x06
+#define MAX77650_CID_77651B 0x08
+
+#endif /* MAX77650_H */
diff --git a/include/linux/mfd/max77686-private.h b/include/linux/mfd/max77686-private.h
index 643dae777b43..3acceeedbaba 100644
--- a/include/linux/mfd/max77686-private.h
+++ b/include/linux/mfd/max77686-private.h
@@ -1,22 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* max77686-private.h - Voltage regulator driver for the Maxim 77686/802
*
* Copyright (C) 2012 Samsung Electrnoics
* Chiwoong Byun <woong.byun@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#ifndef __LINUX_MFD_MAX77686_PRIV_H
@@ -146,35 +133,35 @@ enum max77686_pmic_reg {
/* Reserved: 0x7A-0x7D */
MAX77686_REG_BBAT_CHG = 0x7E,
- MAX77686_REG_32KHZ = 0x7F,
+ MAX77686_REG_32KHZ = 0x7F,
MAX77686_REG_PMIC_END = 0x80,
};
enum max77686_rtc_reg {
- MAX77686_RTC_INT = 0x00,
- MAX77686_RTC_INTM = 0x01,
+ MAX77686_RTC_INT = 0x00,
+ MAX77686_RTC_INTM = 0x01,
MAX77686_RTC_CONTROLM = 0x02,
MAX77686_RTC_CONTROL = 0x03,
MAX77686_RTC_UPDATE0 = 0x04,
/* Reserved: 0x5 */
MAX77686_WTSR_SMPL_CNTL = 0x06,
- MAX77686_RTC_SEC = 0x07,
- MAX77686_RTC_MIN = 0x08,
- MAX77686_RTC_HOUR = 0x09,
+ MAX77686_RTC_SEC = 0x07,
+ MAX77686_RTC_MIN = 0x08,
+ MAX77686_RTC_HOUR = 0x09,
MAX77686_RTC_WEEKDAY = 0x0A,
- MAX77686_RTC_MONTH = 0x0B,
- MAX77686_RTC_YEAR = 0x0C,
- MAX77686_RTC_DATE = 0x0D,
- MAX77686_ALARM1_SEC = 0x0E,
- MAX77686_ALARM1_MIN = 0x0F,
+ MAX77686_RTC_MONTH = 0x0B,
+ MAX77686_RTC_YEAR = 0x0C,
+ MAX77686_RTC_MONTHDAY = 0x0D,
+ MAX77686_ALARM1_SEC = 0x0E,
+ MAX77686_ALARM1_MIN = 0x0F,
MAX77686_ALARM1_HOUR = 0x10,
MAX77686_ALARM1_WEEKDAY = 0x11,
MAX77686_ALARM1_MONTH = 0x12,
MAX77686_ALARM1_YEAR = 0x13,
MAX77686_ALARM1_DATE = 0x14,
- MAX77686_ALARM2_SEC = 0x15,
- MAX77686_ALARM2_MIN = 0x16,
+ MAX77686_ALARM2_SEC = 0x15,
+ MAX77686_ALARM2_MIN = 0x16,
MAX77686_ALARM2_HOUR = 0x17,
MAX77686_ALARM2_WEEKDAY = 0x18,
MAX77686_ALARM2_MONTH = 0x19,
@@ -365,7 +352,7 @@ enum max77802_rtc_reg {
MAX77802_RTC_WEEKDAY = 0xCA,
MAX77802_RTC_MONTH = 0xCB,
MAX77802_RTC_YEAR = 0xCC,
- MAX77802_RTC_DATE = 0xCD,
+ MAX77802_RTC_MONTHDAY = 0xCD,
MAX77802_RTC_AE1 = 0xCE,
MAX77802_ALARM1_SEC = 0xCF,
MAX77802_ALARM1_MIN = 0xD0,
diff --git a/include/linux/mfd/max77686.h b/include/linux/mfd/max77686.h
index d4b72d519115..d0fb510875e6 100644
--- a/include/linux/mfd/max77686.h
+++ b/include/linux/mfd/max77686.h
@@ -1,23 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* max77686.h - Driver for the Maxim 77686/802
*
* Copyright (C) 2012 Samsung Electrnoics
* Chiwoong Byun <woong.byun@samsung.com>
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
* This driver is based on max8997.h
*
* MAX77686 has PMIC, RTC devices.
diff --git a/include/linux/mfd/max77693-common.h b/include/linux/mfd/max77693-common.h
index 095b121aa725..a5bce099f1ed 100644
--- a/include/linux/mfd/max77693-common.h
+++ b/include/linux/mfd/max77693-common.h
@@ -1,12 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* Common data shared between Maxim 77693 and 77843 drivers
*
* Copyright (C) 2015 Samsung Electronics
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
#ifndef __LINUX_MFD_MAX77693_COMMON_H
diff --git a/include/linux/mfd/max77693-private.h b/include/linux/mfd/max77693-private.h
index 3c7a63b98ad6..311f7d3d2323 100644
--- a/include/linux/mfd/max77693-private.h
+++ b/include/linux/mfd/max77693-private.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* max77693-private.h - Voltage regulator driver for the Maxim 77693
*
@@ -5,20 +6,6 @@
* SangYoung Son <hello.son@samsung.com>
*
* This program is not provided / owned by Maxim Integrated Products.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#ifndef __LINUX_MFD_MAX77693_PRIV_H
@@ -144,7 +131,7 @@ enum max77693_pmic_reg {
#define FLASH_INT_FLED1_SHORT BIT(3)
#define FLASH_INT_OVER_CURRENT BIT(4)
-/* Fast charge timer in in hours */
+/* Fast charge timer in hours */
#define DEFAULT_FAST_CHARGE_TIMER 4
/* microamps */
#define DEFAULT_TOP_OFF_THRESHOLD_CURRENT 150000
diff --git a/include/linux/mfd/max77693.h b/include/linux/mfd/max77693.h
index d450f687301b..c67c16ba8649 100644
--- a/include/linux/mfd/max77693.h
+++ b/include/linux/mfd/max77693.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* max77693.h - Driver for the Maxim 77693
*
@@ -6,20 +7,6 @@
*
* This program is not provided / owned by Maxim Integrated Products.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
* This driver is based on max8997.h
*
* MAX77693 has PMIC, Charger, Flash LED, Haptic, MUIC devices.
diff --git a/include/linux/mfd/max77714.h b/include/linux/mfd/max77714.h
new file mode 100644
index 000000000000..7947e0d697a5
--- /dev/null
+++ b/include/linux/mfd/max77714.h
@@ -0,0 +1,60 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Maxim MAX77714 Register and data structures definition.
+ *
+ * Copyright (C) 2022 Luca Ceresoli
+ * Author: Luca Ceresoli <luca.ceresoli@bootlin.com>
+ */
+
+#ifndef __LINUX_MFD_MAX77714_H_
+#define __LINUX_MFD_MAX77714_H_
+
+#include <linux/bits.h>
+
+#define MAX77714_INT_TOP 0x00
+#define MAX77714_INT_TOPM 0x07 /* Datasheet says "read only", but it is RW */
+
+#define MAX77714_INT_TOP_ONOFF BIT(1)
+#define MAX77714_INT_TOP_RTC BIT(3)
+#define MAX77714_INT_TOP_GPIO BIT(4)
+#define MAX77714_INT_TOP_LDO BIT(5)
+#define MAX77714_INT_TOP_SD BIT(6)
+#define MAX77714_INT_TOP_GLBL BIT(7)
+
+#define MAX77714_32K_STATUS 0x30
+#define MAX77714_32K_STATUS_SIOSCOK BIT(5)
+#define MAX77714_32K_STATUS_XOSCOK BIT(4)
+#define MAX77714_32K_STATUS_32KSOURCE BIT(3)
+#define MAX77714_32K_STATUS_32KLOAD_MSK 0x3
+#define MAX77714_32K_STATUS_32KLOAD_SHF 1
+#define MAX77714_32K_STATUS_CRYSTAL_CFG BIT(0)
+
+#define MAX77714_32K_CONFIG 0x31
+#define MAX77714_32K_CONFIG_XOSC_RETRY BIT(4)
+
+#define MAX77714_CNFG_GLBL2 0x91
+#define MAX77714_WDTEN BIT(2)
+#define MAX77714_WDTSLPC BIT(3)
+#define MAX77714_TWD_MASK 0x3
+#define MAX77714_TWD_2s 0x0
+#define MAX77714_TWD_16s 0x1
+#define MAX77714_TWD_64s 0x2
+#define MAX77714_TWD_128s 0x3
+
+#define MAX77714_CNFG_GLBL3 0x92
+#define MAX77714_WDTC BIT(0)
+
+#define MAX77714_CNFG2_ONOFF 0x94
+#define MAX77714_WD_RST_WK BIT(5)
+
+/* Interrupts */
+enum {
+ MAX77714_IRQ_TOP_ONOFF,
+ MAX77714_IRQ_TOP_RTC, /* Real-time clock */
+ MAX77714_IRQ_TOP_GPIO, /* GPIOs */
+ MAX77714_IRQ_TOP_LDO, /* Low-dropout regulators */
+ MAX77714_IRQ_TOP_SD, /* Step-down regulators */
+ MAX77714_IRQ_TOP_GLBL, /* "Global resources": Low-Battery, overtemp... */
+};
+
+#endif /* __LINUX_MFD_MAX77714_H_ */
diff --git a/include/linux/mfd/max77843-private.h b/include/linux/mfd/max77843-private.h
index c19303b0ccfd..0bc7454c4dbe 100644
--- a/include/linux/mfd/max77843-private.h
+++ b/include/linux/mfd/max77843-private.h
@@ -1,14 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* Common variables for the Maxim MAX77843 driver
*
* Copyright (C) 2015 Samsung Electronics
* Author: Jaewon Kim <jaewon02.kim@samsung.com>
* Author: Beomho Seo <beomho.seo@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
#ifndef __MAX77843_PRIVATE_H_
@@ -245,10 +241,13 @@ enum max77843_irq_muic {
#define MAX77843_CHG_OVER_CURRENT_BAT (0x06 << 4)
/* MAX77843 CHG_CNFG_00 register */
+#define MAX77843_CHG_MODE_MASK 0x0f
#define MAX77843_CHG_DISABLE 0x00
#define MAX77843_CHG_ENABLE 0x05
#define MAX77843_CHG_MASK 0x01
+#define MAX77843_CHG_OTG_MASK 0x02
#define MAX77843_CHG_BUCK_MASK 0x04
+#define MAX77843_CHG_BOOST_MASK 0x08
/* MAX77843 CHG_CNFG_01 register */
#define MAX77843_CHG_RESTART_THRESHOLD_100 0x00
@@ -347,6 +346,7 @@ enum max77843_irq_muic {
/* MAX77843 CONTROL register */
#define MAX77843_MUIC_CONTROL1_COMP1SW_SHIFT 0
#define MAX77843_MUIC_CONTROL1_COMP2SW_SHIFT 3
+#define MAX77843_MUIC_CONTROL1_NOBCCOMP_SHIFT 6
#define MAX77843_MUIC_CONTROL1_IDBEN_SHIFT 7
#define MAX77843_MUIC_CONTROL2_LOWPWR_SHIFT 0
#define MAX77843_MUIC_CONTROL2_ADCEN_SHIFT 1
@@ -363,6 +363,7 @@ enum max77843_irq_muic {
#define MAX77843_MUIC_CONTROL1_COMP1SW_MASK (0x7 << MAX77843_MUIC_CONTROL1_COMP1SW_SHIFT)
#define MAX77843_MUIC_CONTROL1_COMP2SW_MASK (0x7 << MAX77843_MUIC_CONTROL1_COMP2SW_SHIFT)
#define MAX77843_MUIC_CONTROL1_IDBEN_MASK BIT(MAX77843_MUIC_CONTROL1_IDBEN_SHIFT)
+#define MAX77843_MUIC_CONTROL1_NOBCCOMP_MASK BIT(MAX77843_MUIC_CONTROL1_NOBCCOMP_SHIFT)
#define MAX77843_MUIC_CONTROL2_LOWPWR_MASK BIT(MAX77843_MUIC_CONTROL2_LOWPWR_SHIFT)
#define MAX77843_MUIC_CONTROL2_ADCEN_MASK BIT(MAX77843_MUIC_CONTROL2_ADCEN_SHIFT)
#define MAX77843_MUIC_CONTROL2_CPEN_MASK BIT(MAX77843_MUIC_CONTROL2_CPEN_SHIFT)
diff --git a/include/linux/mfd/max8907.h b/include/linux/mfd/max8907.h
index b06f7a6a1e80..4be3c2370e2a 100644
--- a/include/linux/mfd/max8907.h
+++ b/include/linux/mfd/max8907.h
@@ -1,12 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Functions to access MAX8907 power management chip.
*
* Copyright (C) 2010 Gyungoh Yoo <jack.yoo@maxim-ic.com>
* Copyright (C) 2012, NVIDIA CORPORATION. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef __LINUX_MFD_MAX8907_H
diff --git a/include/linux/mfd/max8925.h b/include/linux/mfd/max8925.h
index ce8502e9e7dc..07f9af579fb9 100644
--- a/include/linux/mfd/max8925.h
+++ b/include/linux/mfd/max8925.h
@@ -1,12 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Maxim8925 Interface
*
* Copyright (C) 2009 Marvell International Ltd.
* Haojian Zhuang <haojian.zhuang@marvell.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef __LINUX_MFD_MAX8925_H
diff --git a/include/linux/mfd/max8997-private.h b/include/linux/mfd/max8997-private.h
index 78c76cd4d37b..a10cd6945232 100644
--- a/include/linux/mfd/max8997-private.h
+++ b/include/linux/mfd/max8997-private.h
@@ -1,22 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* max8997-private.h - Voltage regulator driver for the Maxim 8997
*
* Copyright (C) 2010 Samsung Electrnoics
* MyungJoo Ham <myungjoo.ham@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#ifndef __LINUX_MFD_MAX8997_PRIV_H
diff --git a/include/linux/mfd/max8997.h b/include/linux/mfd/max8997.h
index cf815577bd68..6193905abbb5 100644
--- a/include/linux/mfd/max8997.h
+++ b/include/linux/mfd/max8997.h
@@ -1,23 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* max8997.h - Driver for the Maxim 8997/8966
*
* Copyright (C) 2009-2010 Samsung Electrnoics
* MyungJoo Ham <myungjoo.ham@samsung.com>
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
* This driver is based on max8998.h
*
* MAX8997 has PMIC, MUIC, HAPTIC, RTC, FLASH, and Fuel Gauge devices.
@@ -27,13 +14,13 @@
* others and b) it can be enabled simply by using MAX17042 driver.
*/
-#ifndef __LINUX_MFD_MAX8998_H
-#define __LINUX_MFD_MAX8998_H
+#ifndef __LINUX_MFD_MAX8997_H
+#define __LINUX_MFD_MAX8997_H
#include <linux/regulator/consumer.h>
/* MAX8997/8966 regulator IDs */
-enum max8998_regulators {
+enum max8997_regulators {
MAX8997_LDO1 = 0,
MAX8997_LDO2,
MAX8997_LDO3,
@@ -123,8 +110,6 @@ enum max8997_haptic_pwm_divisor {
/**
* max8997_haptic_platform_data
- * @pwm_channel_id: channel number of PWM device
- * valid for MAX8997_EXTERNAL_MODE
* @pwm_period: period in nano second for PWM device
* valid for MAX8997_EXTERNAL_MODE
* @type: motor type
@@ -141,7 +126,6 @@ enum max8997_haptic_pwm_divisor {
* [0 - 255]: available period
*/
struct max8997_haptic_platform_data {
- unsigned int pwm_channel_id;
unsigned int pwm_period;
enum max8997_haptic_motor_type type;
@@ -178,7 +162,6 @@ struct max8997_led_platform_data {
struct max8997_platform_data {
/* IRQ */
int ono;
- int wakeup;
/* ---- PMIC ---- */
struct max8997_regulator_data *regulators;
@@ -221,4 +204,4 @@ struct max8997_platform_data {
struct max8997_led_platform_data *led_pdata;
};
-#endif /* __LINUX_MFD_MAX8998_H */
+#endif /* __LINUX_MFD_MAX8997_H */
diff --git a/include/linux/mfd/max8998-private.h b/include/linux/mfd/max8998-private.h
index d68ada502ff3..6deb5f577602 100644
--- a/include/linux/mfd/max8998-private.h
+++ b/include/linux/mfd/max8998-private.h
@@ -1,23 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* max8998-private.h - Voltage regulator driver for the Maxim 8998
*
* Copyright (C) 2009-2010 Samsung Electrnoics
* Kyungmin Park <kyungmin.park@samsung.com>
* Marek Szyprowski <m.szyprowski@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#ifndef __LINUX_MFD_MAX8998_PRIV_H
diff --git a/include/linux/mfd/max8998.h b/include/linux/mfd/max8998.h
index e3956a654cbc..79c020bd0c70 100644
--- a/include/linux/mfd/max8998.h
+++ b/include/linux/mfd/max8998.h
@@ -1,23 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* max8998.h - Voltage regulator driver for the Maxim 8998
*
* Copyright (C) 2009-2010 Samsung Electrnoics
* Kyungmin Park <kyungmin.park@samsung.com>
* Marek Szyprowski <m.szyprowski@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#ifndef __LINUX_MFD_MAX8998_H
@@ -52,6 +39,7 @@ enum {
MAX8998_ENVICHG,
MAX8998_ESAFEOUT1,
MAX8998_ESAFEOUT2,
+ MAX8998_CHARGER,
};
/**
diff --git a/include/linux/mfd/mc13783.h b/include/linux/mfd/mc13783.h
index 4ff6137d8d67..c25b1676741b 100644
--- a/include/linux/mfd/mc13783.h
+++ b/include/linux/mfd/mc13783.h
@@ -1,11 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright 2010 Yong Shen <yong.shen@linaro.org>
* Copyright 2009-2010 Pengutronix
* Uwe Kleine-Koenig <u.kleine-koenig@pengutronix.de>
- *
- * This program is free software; you can redistribute it and/or modify it under
- * the terms of the GNU General Public License version 2 as published by the
- * Free Software Foundation.
*/
#ifndef __LINUX_MFD_MC13783_H
#define __LINUX_MFD_MC13783_H
diff --git a/include/linux/mfd/mc13892.h b/include/linux/mfd/mc13892.h
index a00f2bec178c..880cd949d12a 100644
--- a/include/linux/mfd/mc13892.h
+++ b/include/linux/mfd/mc13892.h
@@ -1,9 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright 2010 Yong Shen <yong.shen@linaro.org>
- *
- * This program is free software; you can redistribute it and/or modify it under
- * the terms of the GNU General Public License version 2 as published by the
- * Free Software Foundation.
*/
#ifndef __LINUX_MFD_MC13892_H
diff --git a/include/linux/mfd/mc13xxx.h b/include/linux/mfd/mc13xxx.h
index 638222e43e48..f372926d5894 100644
--- a/include/linux/mfd/mc13xxx.h
+++ b/include/linux/mfd/mc13xxx.h
@@ -1,10 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright 2009-2010 Pengutronix
* Uwe Kleine-Koenig <u.kleine-koenig@pengutronix.de>
- *
- * This program is free software; you can redistribute it and/or modify it under
- * the terms of the GNU General Public License version 2 as published by the
- * Free Software Foundation.
*/
#ifndef __LINUX_MFD_MC13XXX_H
#define __LINUX_MFD_MC13XXX_H
@@ -243,10 +240,13 @@ struct mc13xxx_platform_data {
#define MC13XXX_ADC0_LICELLCON (1 << 0)
#define MC13XXX_ADC0_CHRGICON (1 << 1)
#define MC13XXX_ADC0_BATICON (1 << 2)
+#define MC13XXX_ADC0_ADIN7SEL_DIE (1 << 4)
+#define MC13XXX_ADC0_ADIN7SEL_UID (2 << 4)
#define MC13XXX_ADC0_ADREFEN (1 << 10)
#define MC13XXX_ADC0_TSMOD0 (1 << 12)
#define MC13XXX_ADC0_TSMOD1 (1 << 13)
#define MC13XXX_ADC0_TSMOD2 (1 << 14)
+#define MC13XXX_ADC0_CHRGRAWDIV (1 << 15)
#define MC13XXX_ADC0_ADINC1 (1 << 16)
#define MC13XXX_ADC0_ADINC2 (1 << 17)
diff --git a/include/linux/mfd/mcp.h b/include/linux/mfd/mcp.h
index f682953043ba..fd5cafc77e8a 100644
--- a/include/linux/mfd/mcp.h
+++ b/include/linux/mfd/mcp.h
@@ -1,11 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* linux/drivers/mfd/mcp.h
*
* Copyright (C) 2001 Russell King, All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License.
*/
#ifndef MCP_H
#define MCP_H
diff --git a/include/linux/mfd/menelaus.h b/include/linux/mfd/menelaus.h
index 9e85ac06da89..ce489aba88ec 100644
--- a/include/linux/mfd/menelaus.h
+++ b/include/linux/mfd/menelaus.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Functions to access Menelaus power management chip
*/
diff --git a/include/linux/mfd/motorola-cpcap.h b/include/linux/mfd/motorola-cpcap.h
index aefc49cb7ba9..981e5777deb7 100644
--- a/include/linux/mfd/motorola-cpcap.h
+++ b/include/linux/mfd/motorola-cpcap.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* The register defines are based on earlier cpcap.h in Motorola Linux kernel
* tree.
@@ -8,10 +9,6 @@
* to make the defines usable with Linux kernel regmap support
*
* Copyright (C) 2016 Tony Lindgren <tony@atomide.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/device.h>
diff --git a/include/linux/mfd/mp2629.h b/include/linux/mfd/mp2629.h
new file mode 100644
index 000000000000..89b706900b57
--- /dev/null
+++ b/include/linux/mfd/mp2629.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright 2020 Monolithic Power Systems, Inc
+ */
+
+#ifndef __MP2629_H__
+#define __MP2629_H__
+
+#include <linux/device.h>
+#include <linux/regmap.h>
+
+struct mp2629_data {
+ struct device *dev;
+ struct regmap *regmap;
+};
+
+enum mp2629_adc_chan {
+ MP2629_BATT_VOLT,
+ MP2629_SYSTEM_VOLT,
+ MP2629_INPUT_VOLT,
+ MP2629_BATT_CURRENT,
+ MP2629_INPUT_CURRENT,
+ MP2629_ADC_CHAN_END
+};
+
+#endif
diff --git a/include/linux/mfd/mt6323/core.h b/include/linux/mfd/mt6323/core.h
index 06d0ec3b1f8f..2becc3443179 100644
--- a/include/linux/mfd/mt6323/core.h
+++ b/include/linux/mfd/mt6323/core.h
@@ -1,9 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2016 Chen Zhong <chen.zhong@mediatek.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef __MFD_MT6323_CORE_H__
diff --git a/include/linux/mfd/mt6323/registers.h b/include/linux/mfd/mt6323/registers.h
index 160f3c0e2589..4455e57544eb 100644
--- a/include/linux/mfd/mt6323/registers.h
+++ b/include/linux/mfd/mt6323/registers.h
@@ -1,9 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2016 Chen Zhong <chen.zhong@mediatek.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef __MFD_MT6323_REGISTERS_H__
diff --git a/include/linux/mfd/mt6331/core.h b/include/linux/mfd/mt6331/core.h
new file mode 100644
index 000000000000..df8e6b1e4bc1
--- /dev/null
+++ b/include/linux/mfd/mt6331/core.h
@@ -0,0 +1,40 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2022 AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com>
+ */
+
+#ifndef __MFD_MT6331_CORE_H__
+#define __MFD_MT6331_CORE_H__
+
+enum mt6331_irq_status_numbers {
+ MT6331_IRQ_STATUS_PWRKEY = 0,
+ MT6331_IRQ_STATUS_HOMEKEY,
+ MT6331_IRQ_STATUS_CHRDET,
+ MT6331_IRQ_STATUS_THR_H,
+ MT6331_IRQ_STATUS_THR_L,
+ MT6331_IRQ_STATUS_BAT_H,
+ MT6331_IRQ_STATUS_BAT_L,
+ MT6331_IRQ_STATUS_RTC,
+ MT6331_IRQ_STATUS_AUDIO,
+ MT6331_IRQ_STATUS_MAD,
+ MT6331_IRQ_STATUS_ACCDET,
+ MT6331_IRQ_STATUS_ACCDET_EINT,
+ MT6331_IRQ_STATUS_ACCDET_NEGV = 12,
+ MT6331_IRQ_STATUS_VDVFS11_OC = 16,
+ MT6331_IRQ_STATUS_VDVFS12_OC,
+ MT6331_IRQ_STATUS_VDVFS13_OC,
+ MT6331_IRQ_STATUS_VDVFS14_OC,
+ MT6331_IRQ_STATUS_GPU_OC,
+ MT6331_IRQ_STATUS_VCORE1_OC,
+ MT6331_IRQ_STATUS_VCORE2_OC,
+ MT6331_IRQ_STATUS_VIO18_OC,
+ MT6331_IRQ_STATUS_LDO_OC,
+ MT6331_IRQ_STATUS_NR,
+};
+
+#define MT6331_IRQ_CON0_BASE MT6331_IRQ_STATUS_PWRKEY
+#define MT6331_IRQ_CON0_BITS (MT6331_IRQ_STATUS_ACCDET_NEGV + 1)
+#define MT6331_IRQ_CON1_BASE MT6331_IRQ_STATUS_VDVFS11_OC
+#define MT6331_IRQ_CON1_BITS (MT6331_IRQ_STATUS_LDO_OC - MT6331_IRQ_STATUS_VDFS11_OC + 1)
+
+#endif /* __MFD_MT6331_CORE_H__ */
diff --git a/include/linux/mfd/mt6331/registers.h b/include/linux/mfd/mt6331/registers.h
new file mode 100644
index 000000000000..e2be6bccd1a7
--- /dev/null
+++ b/include/linux/mfd/mt6331/registers.h
@@ -0,0 +1,584 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2022 AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com>
+ */
+
+#ifndef __MFD_MT6331_REGISTERS_H__
+#define __MFD_MT6331_REGISTERS_H__
+
+/* PMIC Registers */
+#define MT6331_STRUP_CON0 0x0
+#define MT6331_STRUP_CON2 0x2
+#define MT6331_STRUP_CON3 0x4
+#define MT6331_STRUP_CON4 0x6
+#define MT6331_STRUP_CON5 0x8
+#define MT6331_STRUP_CON6 0xA
+#define MT6331_STRUP_CON7 0xC
+#define MT6331_STRUP_CON8 0xE
+#define MT6331_STRUP_CON9 0x10
+#define MT6331_STRUP_CON10 0x12
+#define MT6331_STRUP_CON11 0x14
+#define MT6331_STRUP_CON12 0x16
+#define MT6331_STRUP_CON13 0x18
+#define MT6331_STRUP_CON14 0x1A
+#define MT6331_STRUP_CON15 0x1C
+#define MT6331_STRUP_CON16 0x1E
+#define MT6331_STRUP_CON17 0x20
+#define MT6331_STRUP_CON18 0x22
+#define MT6331_HWCID 0x100
+#define MT6331_SWCID 0x102
+#define MT6331_EXT_PMIC_STATUS 0x104
+#define MT6331_TOP_CON 0x106
+#define MT6331_TEST_OUT 0x108
+#define MT6331_TEST_CON0 0x10A
+#define MT6331_TEST_CON1 0x10C
+#define MT6331_TESTMODE_SW 0x10E
+#define MT6331_EN_STATUS0 0x110
+#define MT6331_EN_STATUS1 0x112
+#define MT6331_EN_STATUS2 0x114
+#define MT6331_OCSTATUS0 0x116
+#define MT6331_OCSTATUS1 0x118
+#define MT6331_OCSTATUS2 0x11A
+#define MT6331_PGSTATUS 0x11C
+#define MT6331_TOPSTATUS 0x11E
+#define MT6331_TDSEL_CON 0x120
+#define MT6331_RDSEL_CON 0x122
+#define MT6331_SMT_CON0 0x124
+#define MT6331_SMT_CON1 0x126
+#define MT6331_SMT_CON2 0x128
+#define MT6331_DRV_CON0 0x12A
+#define MT6331_DRV_CON1 0x12C
+#define MT6331_DRV_CON2 0x12E
+#define MT6331_DRV_CON3 0x130
+#define MT6331_TOP_STATUS 0x132
+#define MT6331_TOP_STATUS_SET 0x134
+#define MT6331_TOP_STATUS_CLR 0x136
+#define MT6331_TOP_CKPDN_CON0 0x138
+#define MT6331_TOP_CKPDN_CON0_SET 0x13A
+#define MT6331_TOP_CKPDN_CON0_CLR 0x13C
+#define MT6331_TOP_CKPDN_CON1 0x13E
+#define MT6331_TOP_CKPDN_CON1_SET 0x140
+#define MT6331_TOP_CKPDN_CON1_CLR 0x142
+#define MT6331_TOP_CKPDN_CON2 0x144
+#define MT6331_TOP_CKPDN_CON2_SET 0x146
+#define MT6331_TOP_CKPDN_CON2_CLR 0x148
+#define MT6331_TOP_CKSEL_CON 0x14A
+#define MT6331_TOP_CKSEL_CON_SET 0x14C
+#define MT6331_TOP_CKSEL_CON_CLR 0x14E
+#define MT6331_TOP_CKHWEN_CON 0x150
+#define MT6331_TOP_CKHWEN_CON_SET 0x152
+#define MT6331_TOP_CKHWEN_CON_CLR 0x154
+#define MT6331_TOP_CKTST_CON0 0x156
+#define MT6331_TOP_CKTST_CON1 0x158
+#define MT6331_TOP_CLKSQ 0x15A
+#define MT6331_TOP_CLKSQ_SET 0x15C
+#define MT6331_TOP_CLKSQ_CLR 0x15E
+#define MT6331_TOP_RST_CON 0x160
+#define MT6331_TOP_RST_CON_SET 0x162
+#define MT6331_TOP_RST_CON_CLR 0x164
+#define MT6331_TOP_RST_MISC 0x166
+#define MT6331_TOP_RST_MISC_SET 0x168
+#define MT6331_TOP_RST_MISC_CLR 0x16A
+#define MT6331_INT_CON0 0x16C
+#define MT6331_INT_CON0_SET 0x16E
+#define MT6331_INT_CON0_CLR 0x170
+#define MT6331_INT_CON1 0x172
+#define MT6331_INT_CON1_SET 0x174
+#define MT6331_INT_CON1_CLR 0x176
+#define MT6331_INT_MISC_CON 0x178
+#define MT6331_INT_MISC_CON_SET 0x17A
+#define MT6331_INT_MISC_CON_CLR 0x17C
+#define MT6331_INT_STATUS_CON0 0x17E
+#define MT6331_INT_STATUS_CON1 0x180
+#define MT6331_OC_GEAR_0 0x182
+#define MT6331_FQMTR_CON0 0x184
+#define MT6331_FQMTR_CON1 0x186
+#define MT6331_FQMTR_CON2 0x188
+#define MT6331_RG_SPI_CON 0x18A
+#define MT6331_DEW_DIO_EN 0x18C
+#define MT6331_DEW_READ_TEST 0x18E
+#define MT6331_DEW_WRITE_TEST 0x190
+#define MT6331_DEW_CRC_SWRST 0x192
+#define MT6331_DEW_CRC_EN 0x194
+#define MT6331_DEW_CRC_VAL 0x196
+#define MT6331_DEW_DBG_MON_SEL 0x198
+#define MT6331_DEW_CIPHER_KEY_SEL 0x19A
+#define MT6331_DEW_CIPHER_IV_SEL 0x19C
+#define MT6331_DEW_CIPHER_EN 0x19E
+#define MT6331_DEW_CIPHER_RDY 0x1A0
+#define MT6331_DEW_CIPHER_MODE 0x1A2
+#define MT6331_DEW_CIPHER_SWRST 0x1A4
+#define MT6331_DEW_RDDMY_NO 0x1A6
+#define MT6331_INT_TYPE_CON0 0x1A8
+#define MT6331_INT_TYPE_CON0_SET 0x1AA
+#define MT6331_INT_TYPE_CON0_CLR 0x1AC
+#define MT6331_INT_TYPE_CON1 0x1AE
+#define MT6331_INT_TYPE_CON1_SET 0x1B0
+#define MT6331_INT_TYPE_CON1_CLR 0x1B2
+#define MT6331_INT_STA 0x1B4
+#define MT6331_BUCK_ALL_CON0 0x200
+#define MT6331_BUCK_ALL_CON1 0x202
+#define MT6331_BUCK_ALL_CON2 0x204
+#define MT6331_BUCK_ALL_CON3 0x206
+#define MT6331_BUCK_ALL_CON4 0x208
+#define MT6331_BUCK_ALL_CON5 0x20A
+#define MT6331_BUCK_ALL_CON6 0x20C
+#define MT6331_BUCK_ALL_CON7 0x20E
+#define MT6331_BUCK_ALL_CON8 0x210
+#define MT6331_BUCK_ALL_CON9 0x212
+#define MT6331_BUCK_ALL_CON10 0x214
+#define MT6331_BUCK_ALL_CON11 0x216
+#define MT6331_BUCK_ALL_CON12 0x218
+#define MT6331_BUCK_ALL_CON13 0x21A
+#define MT6331_BUCK_ALL_CON14 0x21C
+#define MT6331_BUCK_ALL_CON15 0x21E
+#define MT6331_BUCK_ALL_CON16 0x220
+#define MT6331_BUCK_ALL_CON17 0x222
+#define MT6331_BUCK_ALL_CON18 0x224
+#define MT6331_BUCK_ALL_CON19 0x226
+#define MT6331_BUCK_ALL_CON20 0x228
+#define MT6331_BUCK_ALL_CON21 0x22A
+#define MT6331_BUCK_ALL_CON22 0x22C
+#define MT6331_BUCK_ALL_CON23 0x22E
+#define MT6331_BUCK_ALL_CON24 0x230
+#define MT6331_BUCK_ALL_CON25 0x232
+#define MT6331_BUCK_ALL_CON26 0x234
+#define MT6331_VDVFS11_CON0 0x236
+#define MT6331_VDVFS11_CON1 0x238
+#define MT6331_VDVFS11_CON2 0x23A
+#define MT6331_VDVFS11_CON3 0x23C
+#define MT6331_VDVFS11_CON4 0x23E
+#define MT6331_VDVFS11_CON5 0x240
+#define MT6331_VDVFS11_CON6 0x242
+#define MT6331_VDVFS11_CON7 0x244
+#define MT6331_VDVFS11_CON8 0x246
+#define MT6331_VDVFS11_CON9 0x248
+#define MT6331_VDVFS11_CON10 0x24A
+#define MT6331_VDVFS11_CON11 0x24C
+#define MT6331_VDVFS11_CON12 0x24E
+#define MT6331_VDVFS11_CON13 0x250
+#define MT6331_VDVFS11_CON14 0x252
+#define MT6331_VDVFS11_CON18 0x25A
+#define MT6331_VDVFS11_CON19 0x25C
+#define MT6331_VDVFS11_CON20 0x25E
+#define MT6331_VDVFS11_CON21 0x260
+#define MT6331_VDVFS11_CON22 0x262
+#define MT6331_VDVFS11_CON23 0x264
+#define MT6331_VDVFS11_CON24 0x266
+#define MT6331_VDVFS11_CON25 0x268
+#define MT6331_VDVFS11_CON26 0x26A
+#define MT6331_VDVFS11_CON27 0x26C
+#define MT6331_VDVFS12_CON0 0x26E
+#define MT6331_VDVFS12_CON1 0x270
+#define MT6331_VDVFS12_CON2 0x272
+#define MT6331_VDVFS12_CON3 0x274
+#define MT6331_VDVFS12_CON4 0x276
+#define MT6331_VDVFS12_CON5 0x278
+#define MT6331_VDVFS12_CON6 0x27A
+#define MT6331_VDVFS12_CON7 0x27C
+#define MT6331_VDVFS12_CON8 0x27E
+#define MT6331_VDVFS12_CON9 0x280
+#define MT6331_VDVFS12_CON10 0x282
+#define MT6331_VDVFS12_CON11 0x284
+#define MT6331_VDVFS12_CON12 0x286
+#define MT6331_VDVFS12_CON13 0x288
+#define MT6331_VDVFS12_CON14 0x28A
+#define MT6331_VDVFS12_CON18 0x292
+#define MT6331_VDVFS12_CON19 0x294
+#define MT6331_VDVFS12_CON20 0x296
+#define MT6331_VDVFS13_CON0 0x298
+#define MT6331_VDVFS13_CON1 0x29A
+#define MT6331_VDVFS13_CON2 0x29C
+#define MT6331_VDVFS13_CON3 0x29E
+#define MT6331_VDVFS13_CON4 0x2A0
+#define MT6331_VDVFS13_CON5 0x2A2
+#define MT6331_VDVFS13_CON6 0x2A4
+#define MT6331_VDVFS13_CON7 0x2A6
+#define MT6331_VDVFS13_CON8 0x2A8
+#define MT6331_VDVFS13_CON9 0x2AA
+#define MT6331_VDVFS13_CON10 0x2AC
+#define MT6331_VDVFS13_CON11 0x2AE
+#define MT6331_VDVFS13_CON12 0x2B0
+#define MT6331_VDVFS13_CON13 0x2B2
+#define MT6331_VDVFS13_CON14 0x2B4
+#define MT6331_VDVFS13_CON18 0x2BC
+#define MT6331_VDVFS13_CON19 0x2BE
+#define MT6331_VDVFS13_CON20 0x2C0
+#define MT6331_VDVFS14_CON0 0x2C2
+#define MT6331_VDVFS14_CON1 0x2C4
+#define MT6331_VDVFS14_CON2 0x2C6
+#define MT6331_VDVFS14_CON3 0x2C8
+#define MT6331_VDVFS14_CON4 0x2CA
+#define MT6331_VDVFS14_CON5 0x2CC
+#define MT6331_VDVFS14_CON6 0x2CE
+#define MT6331_VDVFS14_CON7 0x2D0
+#define MT6331_VDVFS14_CON8 0x2D2
+#define MT6331_VDVFS14_CON9 0x2D4
+#define MT6331_VDVFS14_CON10 0x2D6
+#define MT6331_VDVFS14_CON11 0x2D8
+#define MT6331_VDVFS14_CON12 0x2DA
+#define MT6331_VDVFS14_CON13 0x2DC
+#define MT6331_VDVFS14_CON14 0x2DE
+#define MT6331_VDVFS14_CON18 0x2E6
+#define MT6331_VDVFS14_CON19 0x2E8
+#define MT6331_VDVFS14_CON20 0x2EA
+#define MT6331_VGPU_CON0 0x300
+#define MT6331_VGPU_CON1 0x302
+#define MT6331_VGPU_CON2 0x304
+#define MT6331_VGPU_CON3 0x306
+#define MT6331_VGPU_CON4 0x308
+#define MT6331_VGPU_CON5 0x30A
+#define MT6331_VGPU_CON6 0x30C
+#define MT6331_VGPU_CON7 0x30E
+#define MT6331_VGPU_CON8 0x310
+#define MT6331_VGPU_CON9 0x312
+#define MT6331_VGPU_CON10 0x314
+#define MT6331_VGPU_CON11 0x316
+#define MT6331_VGPU_CON12 0x318
+#define MT6331_VGPU_CON13 0x31A
+#define MT6331_VGPU_CON14 0x31C
+#define MT6331_VGPU_CON15 0x31E
+#define MT6331_VGPU_CON16 0x320
+#define MT6331_VGPU_CON17 0x322
+#define MT6331_VGPU_CON18 0x324
+#define MT6331_VGPU_CON19 0x326
+#define MT6331_VGPU_CON20 0x328
+#define MT6331_VCORE1_CON0 0x32A
+#define MT6331_VCORE1_CON1 0x32C
+#define MT6331_VCORE1_CON2 0x32E
+#define MT6331_VCORE1_CON3 0x330
+#define MT6331_VCORE1_CON4 0x332
+#define MT6331_VCORE1_CON5 0x334
+#define MT6331_VCORE1_CON6 0x336
+#define MT6331_VCORE1_CON7 0x338
+#define MT6331_VCORE1_CON8 0x33A
+#define MT6331_VCORE1_CON9 0x33C
+#define MT6331_VCORE1_CON10 0x33E
+#define MT6331_VCORE1_CON11 0x340
+#define MT6331_VCORE1_CON12 0x342
+#define MT6331_VCORE1_CON13 0x344
+#define MT6331_VCORE1_CON14 0x346
+#define MT6331_VCORE1_CON15 0x348
+#define MT6331_VCORE1_CON16 0x34A
+#define MT6331_VCORE1_CON17 0x34C
+#define MT6331_VCORE1_CON18 0x34E
+#define MT6331_VCORE1_CON19 0x350
+#define MT6331_VCORE1_CON20 0x352
+#define MT6331_VCORE2_CON0 0x354
+#define MT6331_VCORE2_CON1 0x356
+#define MT6331_VCORE2_CON2 0x358
+#define MT6331_VCORE2_CON3 0x35A
+#define MT6331_VCORE2_CON4 0x35C
+#define MT6331_VCORE2_CON5 0x35E
+#define MT6331_VCORE2_CON6 0x360
+#define MT6331_VCORE2_CON7 0x362
+#define MT6331_VCORE2_CON8 0x364
+#define MT6331_VCORE2_CON9 0x366
+#define MT6331_VCORE2_CON10 0x368
+#define MT6331_VCORE2_CON11 0x36A
+#define MT6331_VCORE2_CON12 0x36C
+#define MT6331_VCORE2_CON13 0x36E
+#define MT6331_VCORE2_CON14 0x370
+#define MT6331_VCORE2_CON15 0x372
+#define MT6331_VCORE2_CON16 0x374
+#define MT6331_VCORE2_CON17 0x376
+#define MT6331_VCORE2_CON18 0x378
+#define MT6331_VCORE2_CON19 0x37A
+#define MT6331_VCORE2_CON20 0x37C
+#define MT6331_VCORE2_CON21 0x37E
+#define MT6331_VIO18_CON0 0x380
+#define MT6331_VIO18_CON1 0x382
+#define MT6331_VIO18_CON2 0x384
+#define MT6331_VIO18_CON3 0x386
+#define MT6331_VIO18_CON4 0x388
+#define MT6331_VIO18_CON5 0x38A
+#define MT6331_VIO18_CON6 0x38C
+#define MT6331_VIO18_CON7 0x38E
+#define MT6331_VIO18_CON8 0x390
+#define MT6331_VIO18_CON9 0x392
+#define MT6331_VIO18_CON10 0x394
+#define MT6331_VIO18_CON11 0x396
+#define MT6331_VIO18_CON12 0x398
+#define MT6331_VIO18_CON13 0x39A
+#define MT6331_VIO18_CON14 0x39C
+#define MT6331_VIO18_CON15 0x39E
+#define MT6331_VIO18_CON16 0x3A0
+#define MT6331_VIO18_CON17 0x3A2
+#define MT6331_VIO18_CON18 0x3A4
+#define MT6331_VIO18_CON19 0x3A6
+#define MT6331_VIO18_CON20 0x3A8
+#define MT6331_BUCK_K_CON0 0x3AA
+#define MT6331_BUCK_K_CON1 0x3AC
+#define MT6331_BUCK_K_CON2 0x3AE
+#define MT6331_BUCK_K_CON3 0x3B0
+#define MT6331_ZCD_CON0 0x400
+#define MT6331_ZCD_CON1 0x402
+#define MT6331_ZCD_CON2 0x404
+#define MT6331_ZCD_CON3 0x406
+#define MT6331_ZCD_CON4 0x408
+#define MT6331_ZCD_CON5 0x40A
+#define MT6331_ISINK0_CON0 0x40C
+#define MT6331_ISINK0_CON1 0x40E
+#define MT6331_ISINK0_CON2 0x410
+#define MT6331_ISINK0_CON3 0x412
+#define MT6331_ISINK0_CON4 0x414
+#define MT6331_ISINK1_CON0 0x416
+#define MT6331_ISINK1_CON1 0x418
+#define MT6331_ISINK1_CON2 0x41A
+#define MT6331_ISINK1_CON3 0x41C
+#define MT6331_ISINK1_CON4 0x41E
+#define MT6331_ISINK2_CON0 0x420
+#define MT6331_ISINK2_CON1 0x422
+#define MT6331_ISINK2_CON2 0x424
+#define MT6331_ISINK2_CON3 0x426
+#define MT6331_ISINK2_CON4 0x428
+#define MT6331_ISINK3_CON0 0x42A
+#define MT6331_ISINK3_CON1 0x42C
+#define MT6331_ISINK3_CON2 0x42E
+#define MT6331_ISINK3_CON3 0x430
+#define MT6331_ISINK3_CON4 0x432
+#define MT6331_ISINK_ANA0 0x434
+#define MT6331_ISINK_ANA1 0x436
+#define MT6331_ISINK_PHASE_DLY 0x438
+#define MT6331_ISINK_EN_CTRL 0x43A
+#define MT6331_ANALDO_CON0 0x500
+#define MT6331_ANALDO_CON1 0x502
+#define MT6331_ANALDO_CON2 0x504
+#define MT6331_ANALDO_CON3 0x506
+#define MT6331_ANALDO_CON4 0x508
+#define MT6331_ANALDO_CON5 0x50A
+#define MT6331_ANALDO_CON6 0x50C
+#define MT6331_ANALDO_CON7 0x50E
+#define MT6331_ANALDO_CON8 0x510
+#define MT6331_ANALDO_CON9 0x512
+#define MT6331_ANALDO_CON10 0x514
+#define MT6331_ANALDO_CON11 0x516
+#define MT6331_ANALDO_CON12 0x518
+#define MT6331_ANALDO_CON13 0x51A
+#define MT6331_SYSLDO_CON0 0x51C
+#define MT6331_SYSLDO_CON1 0x51E
+#define MT6331_SYSLDO_CON2 0x520
+#define MT6331_SYSLDO_CON3 0x522
+#define MT6331_SYSLDO_CON4 0x524
+#define MT6331_SYSLDO_CON5 0x526
+#define MT6331_SYSLDO_CON6 0x528
+#define MT6331_SYSLDO_CON7 0x52A
+#define MT6331_SYSLDO_CON8 0x52C
+#define MT6331_SYSLDO_CON9 0x52E
+#define MT6331_SYSLDO_CON10 0x530
+#define MT6331_SYSLDO_CON11 0x532
+#define MT6331_SYSLDO_CON12 0x534
+#define MT6331_SYSLDO_CON13 0x536
+#define MT6331_SYSLDO_CON14 0x538
+#define MT6331_SYSLDO_CON15 0x53A
+#define MT6331_SYSLDO_CON16 0x53C
+#define MT6331_SYSLDO_CON17 0x53E
+#define MT6331_SYSLDO_CON18 0x540
+#define MT6331_SYSLDO_CON19 0x542
+#define MT6331_SYSLDO_CON20 0x544
+#define MT6331_SYSLDO_CON21 0x546
+#define MT6331_DIGLDO_CON0 0x548
+#define MT6331_DIGLDO_CON1 0x54A
+#define MT6331_DIGLDO_CON2 0x54C
+#define MT6331_DIGLDO_CON3 0x54E
+#define MT6331_DIGLDO_CON4 0x550
+#define MT6331_DIGLDO_CON5 0x552
+#define MT6331_DIGLDO_CON6 0x554
+#define MT6331_DIGLDO_CON7 0x556
+#define MT6331_DIGLDO_CON8 0x558
+#define MT6331_DIGLDO_CON9 0x55A
+#define MT6331_DIGLDO_CON10 0x55C
+#define MT6331_DIGLDO_CON11 0x55E
+#define MT6331_DIGLDO_CON12 0x560
+#define MT6331_DIGLDO_CON13 0x562
+#define MT6331_DIGLDO_CON14 0x564
+#define MT6331_DIGLDO_CON15 0x566
+#define MT6331_DIGLDO_CON16 0x568
+#define MT6331_DIGLDO_CON17 0x56A
+#define MT6331_DIGLDO_CON18 0x56C
+#define MT6331_DIGLDO_CON19 0x56E
+#define MT6331_DIGLDO_CON20 0x570
+#define MT6331_DIGLDO_CON21 0x572
+#define MT6331_DIGLDO_CON22 0x574
+#define MT6331_DIGLDO_CON23 0x576
+#define MT6331_DIGLDO_CON24 0x578
+#define MT6331_DIGLDO_CON25 0x57A
+#define MT6331_DIGLDO_CON26 0x57C
+#define MT6331_DIGLDO_CON27 0x57E
+#define MT6331_DIGLDO_CON28 0x580
+#define MT6331_OTP_CON0 0x600
+#define MT6331_OTP_CON1 0x602
+#define MT6331_OTP_CON2 0x604
+#define MT6331_OTP_CON3 0x606
+#define MT6331_OTP_CON4 0x608
+#define MT6331_OTP_CON5 0x60A
+#define MT6331_OTP_CON6 0x60C
+#define MT6331_OTP_CON7 0x60E
+#define MT6331_OTP_CON8 0x610
+#define MT6331_OTP_CON9 0x612
+#define MT6331_OTP_CON10 0x614
+#define MT6331_OTP_CON11 0x616
+#define MT6331_OTP_CON12 0x618
+#define MT6331_OTP_CON13 0x61A
+#define MT6331_OTP_CON14 0x61C
+#define MT6331_OTP_DOUT_0_15 0x61E
+#define MT6331_OTP_DOUT_16_31 0x620
+#define MT6331_OTP_DOUT_32_47 0x622
+#define MT6331_OTP_DOUT_48_63 0x624
+#define MT6331_OTP_DOUT_64_79 0x626
+#define MT6331_OTP_DOUT_80_95 0x628
+#define MT6331_OTP_DOUT_96_111 0x62A
+#define MT6331_OTP_DOUT_112_127 0x62C
+#define MT6331_OTP_DOUT_128_143 0x62E
+#define MT6331_OTP_DOUT_144_159 0x630
+#define MT6331_OTP_DOUT_160_175 0x632
+#define MT6331_OTP_DOUT_176_191 0x634
+#define MT6331_OTP_DOUT_192_207 0x636
+#define MT6331_OTP_DOUT_208_223 0x638
+#define MT6331_OTP_DOUT_224_239 0x63A
+#define MT6331_OTP_DOUT_240_255 0x63C
+#define MT6331_OTP_VAL_0_15 0x63E
+#define MT6331_OTP_VAL_16_31 0x640
+#define MT6331_OTP_VAL_32_47 0x642
+#define MT6331_OTP_VAL_48_63 0x644
+#define MT6331_OTP_VAL_64_79 0x646
+#define MT6331_OTP_VAL_80_95 0x648
+#define MT6331_OTP_VAL_96_111 0x64A
+#define MT6331_OTP_VAL_112_127 0x64C
+#define MT6331_OTP_VAL_128_143 0x64E
+#define MT6331_OTP_VAL_144_159 0x650
+#define MT6331_OTP_VAL_160_175 0x652
+#define MT6331_OTP_VAL_176_191 0x654
+#define MT6331_OTP_VAL_192_207 0x656
+#define MT6331_OTP_VAL_208_223 0x658
+#define MT6331_OTP_VAL_224_239 0x65A
+#define MT6331_OTP_VAL_240_255 0x65C
+#define MT6331_RTC_MIX_CON0 0x65E
+#define MT6331_RTC_MIX_CON1 0x660
+#define MT6331_AUDDAC_CFG0 0x662
+#define MT6331_AUDBUF_CFG0 0x664
+#define MT6331_AUDBUF_CFG1 0x666
+#define MT6331_AUDBUF_CFG2 0x668
+#define MT6331_AUDBUF_CFG3 0x66A
+#define MT6331_AUDBUF_CFG4 0x66C
+#define MT6331_AUDBUF_CFG5 0x66E
+#define MT6331_AUDBUF_CFG6 0x670
+#define MT6331_AUDBUF_CFG7 0x672
+#define MT6331_AUDBUF_CFG8 0x674
+#define MT6331_IBIASDIST_CFG0 0x676
+#define MT6331_AUDCLKGEN_CFG0 0x678
+#define MT6331_AUDLDO_CFG0 0x67A
+#define MT6331_AUDDCDC_CFG0 0x67C
+#define MT6331_AUDDCDC_CFG1 0x67E
+#define MT6331_AUDNVREGGLB_CFG0 0x680
+#define MT6331_AUD_NCP0 0x682
+#define MT6331_AUD_ZCD_CFG0 0x684
+#define MT6331_AUDPREAMP_CFG0 0x686
+#define MT6331_AUDPREAMP_CFG1 0x688
+#define MT6331_AUDPREAMP_CFG2 0x68A
+#define MT6331_AUDADC_CFG0 0x68C
+#define MT6331_AUDADC_CFG1 0x68E
+#define MT6331_AUDADC_CFG2 0x690
+#define MT6331_AUDADC_CFG3 0x692
+#define MT6331_AUDADC_CFG4 0x694
+#define MT6331_AUDADC_CFG5 0x696
+#define MT6331_AUDDIGMI_CFG0 0x698
+#define MT6331_AUDDIGMI_CFG1 0x69A
+#define MT6331_AUDMICBIAS_CFG0 0x69C
+#define MT6331_AUDMICBIAS_CFG1 0x69E
+#define MT6331_AUDENCSPARE_CFG0 0x6A0
+#define MT6331_AUDPREAMPGAIN_CFG0 0x6A2
+#define MT6331_AUDMADPLL_CFG0 0x6A4
+#define MT6331_AUDMADPLL_CFG1 0x6A6
+#define MT6331_AUDMADPLL_CFG2 0x6A8
+#define MT6331_AUDLDO_NVREG_CFG0 0x6AA
+#define MT6331_AUDLDO_NVREG_CFG1 0x6AC
+#define MT6331_AUDLDO_NVREG_CFG2 0x6AE
+#define MT6331_AUXADC_ADC0 0x700
+#define MT6331_AUXADC_ADC1 0x702
+#define MT6331_AUXADC_ADC2 0x704
+#define MT6331_AUXADC_ADC3 0x706
+#define MT6331_AUXADC_ADC4 0x708
+#define MT6331_AUXADC_ADC5 0x70A
+#define MT6331_AUXADC_ADC6 0x70C
+#define MT6331_AUXADC_ADC7 0x70E
+#define MT6331_AUXADC_ADC8 0x710
+#define MT6331_AUXADC_ADC9 0x712
+#define MT6331_AUXADC_ADC10 0x714
+#define MT6331_AUXADC_ADC11 0x716
+#define MT6331_AUXADC_ADC12 0x718
+#define MT6331_AUXADC_ADC13 0x71A
+#define MT6331_AUXADC_ADC14 0x71C
+#define MT6331_AUXADC_ADC15 0x71E
+#define MT6331_AUXADC_ADC16 0x720
+#define MT6331_AUXADC_ADC17 0x722
+#define MT6331_AUXADC_ADC18 0x724
+#define MT6331_AUXADC_ADC19 0x726
+#define MT6331_AUXADC_STA0 0x728
+#define MT6331_AUXADC_STA1 0x72A
+#define MT6331_AUXADC_RQST0 0x72C
+#define MT6331_AUXADC_RQST0_SET 0x72E
+#define MT6331_AUXADC_RQST0_CLR 0x730
+#define MT6331_AUXADC_RQST1 0x732
+#define MT6331_AUXADC_RQST1_SET 0x734
+#define MT6331_AUXADC_RQST1_CLR 0x736
+#define MT6331_AUXADC_CON0 0x738
+#define MT6331_AUXADC_CON1 0x73A
+#define MT6331_AUXADC_CON2 0x73C
+#define MT6331_AUXADC_CON3 0x73E
+#define MT6331_AUXADC_CON4 0x740
+#define MT6331_AUXADC_CON5 0x742
+#define MT6331_AUXADC_CON6 0x744
+#define MT6331_AUXADC_CON7 0x746
+#define MT6331_AUXADC_CON8 0x748
+#define MT6331_AUXADC_CON9 0x74A
+#define MT6331_AUXADC_CON10 0x74C
+#define MT6331_AUXADC_CON11 0x74E
+#define MT6331_AUXADC_CON12 0x750
+#define MT6331_AUXADC_CON13 0x752
+#define MT6331_AUXADC_CON14 0x754
+#define MT6331_AUXADC_CON15 0x756
+#define MT6331_AUXADC_CON16 0x758
+#define MT6331_AUXADC_CON17 0x75A
+#define MT6331_AUXADC_CON18 0x75C
+#define MT6331_AUXADC_CON19 0x75E
+#define MT6331_AUXADC_CON20 0x760
+#define MT6331_AUXADC_CON21 0x762
+#define MT6331_AUXADC_CON22 0x764
+#define MT6331_AUXADC_CON23 0x766
+#define MT6331_AUXADC_CON24 0x768
+#define MT6331_AUXADC_CON25 0x76A
+#define MT6331_AUXADC_CON26 0x76C
+#define MT6331_AUXADC_CON27 0x76E
+#define MT6331_AUXADC_CON28 0x770
+#define MT6331_AUXADC_CON29 0x772
+#define MT6331_AUXADC_CON30 0x774
+#define MT6331_AUXADC_CON31 0x776
+#define MT6331_AUXADC_CON32 0x778
+#define MT6331_ACCDET_CON0 0x77A
+#define MT6331_ACCDET_CON1 0x77C
+#define MT6331_ACCDET_CON2 0x77E
+#define MT6331_ACCDET_CON3 0x780
+#define MT6331_ACCDET_CON4 0x782
+#define MT6331_ACCDET_CON5 0x784
+#define MT6331_ACCDET_CON6 0x786
+#define MT6331_ACCDET_CON7 0x788
+#define MT6331_ACCDET_CON8 0x78A
+#define MT6331_ACCDET_CON9 0x78C
+#define MT6331_ACCDET_CON10 0x78E
+#define MT6331_ACCDET_CON11 0x790
+#define MT6331_ACCDET_CON12 0x792
+#define MT6331_ACCDET_CON13 0x794
+#define MT6331_ACCDET_CON14 0x796
+#define MT6331_ACCDET_CON15 0x798
+#define MT6331_ACCDET_CON16 0x79A
+#define MT6331_ACCDET_CON17 0x79C
+#define MT6331_ACCDET_CON18 0x79E
+#define MT6331_ACCDET_CON19 0x7A0
+#define MT6331_ACCDET_CON20 0x7A2
+#define MT6331_ACCDET_CON21 0x7A4
+#define MT6331_ACCDET_CON22 0x7A6
+#define MT6331_ACCDET_CON23 0x7A8
+#define MT6331_ACCDET_CON24 0x7AA
+
+#endif /* __MFD_MT6331_REGISTERS_H__ */
diff --git a/include/linux/mfd/mt6332/core.h b/include/linux/mfd/mt6332/core.h
new file mode 100644
index 000000000000..cd6013eb82d9
--- /dev/null
+++ b/include/linux/mfd/mt6332/core.h
@@ -0,0 +1,65 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2022 AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com>
+ */
+
+#ifndef __MFD_MT6332_CORE_H__
+#define __MFD_MT6332_CORE_H__
+
+enum mt6332_irq_status_numbers {
+ MT6332_IRQ_STATUS_CHR_COMPLETE = 0,
+ MT6332_IRQ_STATUS_THERMAL_SD,
+ MT6332_IRQ_STATUS_THERMAL_REG_IN,
+ MT6332_IRQ_STATUS_THERMAL_REG_OUT,
+ MT6332_IRQ_STATUS_OTG_OC,
+ MT6332_IRQ_STATUS_CHR_OC,
+ MT6332_IRQ_STATUS_OTG_THERMAL,
+ MT6332_IRQ_STATUS_CHRIN_SHORT,
+ MT6332_IRQ_STATUS_DRVCDT_SHORT,
+ MT6332_IRQ_STATUS_PLUG_IN_FLASH,
+ MT6332_IRQ_STATUS_CHRWDT_FLAG,
+ MT6332_IRQ_STATUS_FLASH_EN_TIMEOUT,
+ MT6332_IRQ_STATUS_FLASH_VLED1_SHORT,
+ MT6332_IRQ_STATUS_FLASH_VLED1_OPEN = 13,
+ MT6332_IRQ_STATUS_OV = 16,
+ MT6332_IRQ_STATUS_BVALID_DET,
+ MT6332_IRQ_STATUS_VBATON_UNDET,
+ MT6332_IRQ_STATUS_CHR_PLUG_IN,
+ MT6332_IRQ_STATUS_CHR_PLUG_OUT,
+ MT6332_IRQ_STATUS_BC11_TIMEOUT,
+ MT6332_IRQ_STATUS_FLASH_VLED2_SHORT,
+ MT6332_IRQ_STATUS_FLASH_VLED2_OPEN = 23,
+ MT6332_IRQ_STATUS_THR_H = 32,
+ MT6332_IRQ_STATUS_THR_L,
+ MT6332_IRQ_STATUS_BAT_H,
+ MT6332_IRQ_STATUS_BAT_L,
+ MT6332_IRQ_STATUS_M3_H,
+ MT6332_IRQ_STATUS_M3_L,
+ MT6332_IRQ_STATUS_FG_BAT_H,
+ MT6332_IRQ_STATUS_FG_BAT_L,
+ MT6332_IRQ_STATUS_FG_CUR_H,
+ MT6332_IRQ_STATUS_FG_CUR_L,
+ MT6332_IRQ_STATUS_SPKL_D,
+ MT6332_IRQ_STATUS_SPKL_AB,
+ MT6332_IRQ_STATUS_BIF,
+ MT6332_IRQ_STATUS_VWLED_OC = 45,
+ MT6332_IRQ_STATUS_VDRAM_OC = 48,
+ MT6332_IRQ_STATUS_VDVFS2_OC,
+ MT6332_IRQ_STATUS_VRF1_OC,
+ MT6332_IRQ_STATUS_VRF2_OC,
+ MT6332_IRQ_STATUS_VPA_OC,
+ MT6332_IRQ_STATUS_VSBST_OC,
+ MT6332_IRQ_STATUS_LDO_OC,
+ MT6332_IRQ_STATUS_NR,
+};
+
+#define MT6332_IRQ_CON0_BASE MT6332_IRQ_STATUS_CHR_COMPLETE
+#define MT6332_IRQ_CON0_BITS (MT6332_IRQ_STATUS_FLASH_VLED1_OPEN + 1)
+#define MT6332_IRQ_CON1_BASE MT6332_IRQ_STATUS_OV
+#define MT6332_IRQ_CON1_BITS (MT6332_IRQ_STATUS_FLASH_VLED2_OPEN - MT6332_IRQ_STATUS_OV + 1)
+#define MT6332_IRQ_CON2_BASE MT6332_IRQ_STATUS_THR_H
+#define MT6332_IRQ_CON2_BITS (MT6332_IRQ_STATUS_VWLED_OC - MT6332_IRQ_STATUS_THR_H + 1)
+#define MT6332_IRQ_CON3_BASE MT6332_IRQ_STATUS_VDRAM_OC
+#define MT6332_IRQ_CON3_BITS (MT6332_IRQ_STATUS_LDO_OC - MT6332_IRQ_STATUS_VDRAM_OC + 1)
+
+#endif /* __MFD_MT6332_CORE_H__ */
diff --git a/include/linux/mfd/mt6332/registers.h b/include/linux/mfd/mt6332/registers.h
new file mode 100644
index 000000000000..65e0b86fceac
--- /dev/null
+++ b/include/linux/mfd/mt6332/registers.h
@@ -0,0 +1,642 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2022 AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com>
+ */
+
+#ifndef __MFD_MT6332_REGISTERS_H__
+#define __MFD_MT6332_REGISTERS_H__
+
+/* PMIC Registers */
+#define MT6332_HWCID 0x8000
+#define MT6332_SWCID 0x8002
+#define MT6332_TOP_CON 0x8004
+#define MT6332_DDR_VREF_AP_CON 0x8006
+#define MT6332_DDR_VREF_DQ_CON 0x8008
+#define MT6332_DDR_VREF_CA_CON 0x800A
+#define MT6332_TEST_OUT 0x800C
+#define MT6332_TEST_CON0 0x800E
+#define MT6332_TEST_CON1 0x8010
+#define MT6332_TESTMODE_SW 0x8012
+#define MT6332_TESTMODE_ANA 0x8014
+#define MT6332_TDSEL_CON 0x8016
+#define MT6332_RDSEL_CON 0x8018
+#define MT6332_SMT_CON0 0x801A
+#define MT6332_SMT_CON1 0x801C
+#define MT6332_DRV_CON0 0x801E
+#define MT6332_DRV_CON1 0x8020
+#define MT6332_DRV_CON2 0x8022
+#define MT6332_EN_STATUS0 0x8024
+#define MT6332_OCSTATUS0 0x8026
+#define MT6332_TOP_STATUS 0x8028
+#define MT6332_TOP_STATUS_SET 0x802A
+#define MT6332_TOP_STATUS_CLR 0x802C
+#define MT6332_FLASH_CON0 0x802E
+#define MT6332_FLASH_CON1 0x8030
+#define MT6332_FLASH_CON2 0x8032
+#define MT6332_CORE_CON0 0x8034
+#define MT6332_CORE_CON1 0x8036
+#define MT6332_CORE_CON2 0x8038
+#define MT6332_CORE_CON3 0x803A
+#define MT6332_CORE_CON4 0x803C
+#define MT6332_CORE_CON5 0x803E
+#define MT6332_CORE_CON6 0x8040
+#define MT6332_CORE_CON7 0x8042
+#define MT6332_CORE_CON8 0x8044
+#define MT6332_CORE_CON9 0x8046
+#define MT6332_CORE_CON10 0x8048
+#define MT6332_CORE_CON11 0x804A
+#define MT6332_CORE_CON12 0x804C
+#define MT6332_CORE_CON13 0x804E
+#define MT6332_CORE_CON14 0x8050
+#define MT6332_CORE_CON15 0x8052
+#define MT6332_STA_CON0 0x8054
+#define MT6332_STA_CON1 0x8056
+#define MT6332_STA_CON2 0x8058
+#define MT6332_STA_CON3 0x805A
+#define MT6332_STA_CON4 0x805C
+#define MT6332_STA_CON5 0x805E
+#define MT6332_STA_CON6 0x8060
+#define MT6332_STA_CON7 0x8062
+#define MT6332_CHR_CON0 0x8064
+#define MT6332_CHR_CON1 0x8066
+#define MT6332_CHR_CON2 0x8068
+#define MT6332_CHR_CON3 0x806A
+#define MT6332_CHR_CON4 0x806C
+#define MT6332_CHR_CON5 0x806E
+#define MT6332_CHR_CON6 0x8070
+#define MT6332_CHR_CON7 0x8072
+#define MT6332_CHR_CON8 0x8074
+#define MT6332_CHR_CON9 0x8076
+#define MT6332_CHR_CON10 0x8078
+#define MT6332_CHR_CON11 0x807A
+#define MT6332_CHR_CON12 0x807C
+#define MT6332_CHR_CON13 0x807E
+#define MT6332_CHR_CON14 0x8080
+#define MT6332_CHR_CON15 0x8082
+#define MT6332_BOOST_CON0 0x8084
+#define MT6332_BOOST_CON1 0x8086
+#define MT6332_BOOST_CON2 0x8088
+#define MT6332_BOOST_CON3 0x808A
+#define MT6332_BOOST_CON4 0x808C
+#define MT6332_BOOST_CON5 0x808E
+#define MT6332_BOOST_CON6 0x8090
+#define MT6332_BOOST_CON7 0x8092
+#define MT6332_TOP_CKPDN_CON0 0x8094
+#define MT6332_TOP_CKPDN_CON0_SET 0x8096
+#define MT6332_TOP_CKPDN_CON0_CLR 0x8098
+#define MT6332_TOP_CKPDN_CON1 0x809A
+#define MT6332_TOP_CKPDN_CON1_SET 0x809C
+#define MT6332_TOP_CKPDN_CON1_CLR 0x809E
+#define MT6332_TOP_CKPDN_CON2 0x80A0
+#define MT6332_TOP_CKPDN_CON2_SET 0x80A2
+#define MT6332_TOP_CKPDN_CON2_CLR 0x80A4
+#define MT6332_TOP_CKSEL_CON0 0x80A6
+#define MT6332_TOP_CKSEL_CON0_SET 0x80A8
+#define MT6332_TOP_CKSEL_CON0_CLR 0x80AA
+#define MT6332_TOP_CKSEL_CON1 0x80AC
+#define MT6332_TOP_CKSEL_CON1_SET 0x80AE
+#define MT6332_TOP_CKSEL_CON1_CLR 0x80B0
+#define MT6332_TOP_CKHWEN_CON 0x80B2
+#define MT6332_TOP_CKHWEN_CON_SET 0x80B4
+#define MT6332_TOP_CKHWEN_CON_CLR 0x80B6
+#define MT6332_TOP_CKTST_CON0 0x80B8
+#define MT6332_TOP_CKTST_CON1 0x80BA
+#define MT6332_TOP_RST_CON 0x80BC
+#define MT6332_TOP_RST_CON_SET 0x80BE
+#define MT6332_TOP_RST_CON_CLR 0x80C0
+#define MT6332_TOP_RST_MISC 0x80C2
+#define MT6332_TOP_RST_MISC_SET 0x80C4
+#define MT6332_TOP_RST_MISC_CLR 0x80C6
+#define MT6332_INT_CON0 0x80C8
+#define MT6332_INT_CON0_SET 0x80CA
+#define MT6332_INT_CON0_CLR 0x80CC
+#define MT6332_INT_CON1 0x80CE
+#define MT6332_INT_CON1_SET 0x80D0
+#define MT6332_INT_CON1_CLR 0x80D2
+#define MT6332_INT_CON2 0x80D4
+#define MT6332_INT_CON2_SET 0x80D6
+#define MT6332_INT_CON2_CLR 0x80D8
+#define MT6332_INT_CON3 0x80DA
+#define MT6332_INT_CON3_SET 0x80DC
+#define MT6332_INT_CON3_CLR 0x80DE
+#define MT6332_CHRWDT_CON0 0x80E0
+#define MT6332_CHRWDT_STATUS0 0x80E2
+#define MT6332_INT_STATUS0 0x80E4
+#define MT6332_INT_STATUS1 0x80E6
+#define MT6332_INT_STATUS2 0x80E8
+#define MT6332_INT_STATUS3 0x80EA
+#define MT6332_OC_GEAR_0 0x80EC
+#define MT6332_OC_GEAR_1 0x80EE
+#define MT6332_OC_GEAR_2 0x80F0
+#define MT6332_INT_MISC_CON 0x80F2
+#define MT6332_RG_SPI_CON 0x80F4
+#define MT6332_DEW_DIO_EN 0x80F6
+#define MT6332_DEW_READ_TEST 0x80F8
+#define MT6332_DEW_WRITE_TEST 0x80FA
+#define MT6332_DEW_CRC_SWRST 0x80FC
+#define MT6332_DEW_CRC_EN 0x80FE
+#define MT6332_DEW_CRC_VAL 0x8100
+#define MT6332_DEW_DBG_MON_SEL 0x8102
+#define MT6332_DEW_CIPHER_KEY_SEL 0x8104
+#define MT6332_DEW_CIPHER_IV_SEL 0x8106
+#define MT6332_DEW_CIPHER_EN 0x8108
+#define MT6332_DEW_CIPHER_RDY 0x810A
+#define MT6332_DEW_CIPHER_MODE 0x810C
+#define MT6332_DEW_CIPHER_SWRST 0x810E
+#define MT6332_DEW_RDDMY_NO 0x8110
+#define MT6332_INT_STA 0x8112
+#define MT6332_BIF_CON0 0x8114
+#define MT6332_BIF_CON1 0x8116
+#define MT6332_BIF_CON2 0x8118
+#define MT6332_BIF_CON3 0x811A
+#define MT6332_BIF_CON4 0x811C
+#define MT6332_BIF_CON5 0x811E
+#define MT6332_BIF_CON6 0x8120
+#define MT6332_BIF_CON7 0x8122
+#define MT6332_BIF_CON8 0x8124
+#define MT6332_BIF_CON9 0x8126
+#define MT6332_BIF_CON10 0x8128
+#define MT6332_BIF_CON11 0x812A
+#define MT6332_BIF_CON12 0x812C
+#define MT6332_BIF_CON13 0x812E
+#define MT6332_BIF_CON14 0x8130
+#define MT6332_BIF_CON15 0x8132
+#define MT6332_BIF_CON16 0x8134
+#define MT6332_BIF_CON17 0x8136
+#define MT6332_BIF_CON18 0x8138
+#define MT6332_BIF_CON19 0x813A
+#define MT6332_BIF_CON20 0x813C
+#define MT6332_BIF_CON21 0x813E
+#define MT6332_BIF_CON22 0x8140
+#define MT6332_BIF_CON23 0x8142
+#define MT6332_BIF_CON24 0x8144
+#define MT6332_BIF_CON25 0x8146
+#define MT6332_BIF_CON26 0x8148
+#define MT6332_BIF_CON27 0x814A
+#define MT6332_BIF_CON28 0x814C
+#define MT6332_BIF_CON29 0x814E
+#define MT6332_BIF_CON30 0x8150
+#define MT6332_BIF_CON31 0x8152
+#define MT6332_BIF_CON32 0x8154
+#define MT6332_BIF_CON33 0x8156
+#define MT6332_BIF_CON34 0x8158
+#define MT6332_BIF_CON35 0x815A
+#define MT6332_BIF_CON36 0x815C
+#define MT6332_BATON_CON0 0x815E
+#define MT6332_BIF_CON37 0x8160
+#define MT6332_BIF_CON38 0x8162
+#define MT6332_CHR_CON16 0x8164
+#define MT6332_CHR_CON17 0x8166
+#define MT6332_CHR_CON18 0x8168
+#define MT6332_CHR_CON19 0x816A
+#define MT6332_CHR_CON20 0x816C
+#define MT6332_CHR_CON21 0x816E
+#define MT6332_CHR_CON22 0x8170
+#define MT6332_CHR_CON23 0x8172
+#define MT6332_CHR_CON24 0x8174
+#define MT6332_CHR_CON25 0x8176
+#define MT6332_STA_CON8 0x8178
+#define MT6332_BUCK_ALL_CON0 0x8400
+#define MT6332_BUCK_ALL_CON1 0x8402
+#define MT6332_BUCK_ALL_CON2 0x8404
+#define MT6332_BUCK_ALL_CON3 0x8406
+#define MT6332_BUCK_ALL_CON4 0x8408
+#define MT6332_BUCK_ALL_CON5 0x840A
+#define MT6332_BUCK_ALL_CON6 0x840C
+#define MT6332_BUCK_ALL_CON7 0x840E
+#define MT6332_BUCK_ALL_CON8 0x8410
+#define MT6332_BUCK_ALL_CON9 0x8412
+#define MT6332_BUCK_ALL_CON10 0x8414
+#define MT6332_BUCK_ALL_CON11 0x8416
+#define MT6332_BUCK_ALL_CON12 0x8418
+#define MT6332_BUCK_ALL_CON13 0x841A
+#define MT6332_BUCK_ALL_CON14 0x841C
+#define MT6332_BUCK_ALL_CON15 0x841E
+#define MT6332_BUCK_ALL_CON16 0x8420
+#define MT6332_BUCK_ALL_CON17 0x8422
+#define MT6332_BUCK_ALL_CON18 0x8424
+#define MT6332_BUCK_ALL_CON19 0x8426
+#define MT6332_BUCK_ALL_CON20 0x8428
+#define MT6332_BUCK_ALL_CON21 0x842A
+#define MT6332_BUCK_ALL_CON22 0x842C
+#define MT6332_BUCK_ALL_CON23 0x842E
+#define MT6332_BUCK_ALL_CON24 0x8430
+#define MT6332_BUCK_ALL_CON25 0x8432
+#define MT6332_BUCK_ALL_CON26 0x8434
+#define MT6332_BUCK_ALL_CON27 0x8436
+#define MT6332_VDRAM_CON0 0x8438
+#define MT6332_VDRAM_CON1 0x843A
+#define MT6332_VDRAM_CON2 0x843C
+#define MT6332_VDRAM_CON3 0x843E
+#define MT6332_VDRAM_CON4 0x8440
+#define MT6332_VDRAM_CON5 0x8442
+#define MT6332_VDRAM_CON6 0x8444
+#define MT6332_VDRAM_CON7 0x8446
+#define MT6332_VDRAM_CON8 0x8448
+#define MT6332_VDRAM_CON9 0x844A
+#define MT6332_VDRAM_CON10 0x844C
+#define MT6332_VDRAM_CON11 0x844E
+#define MT6332_VDRAM_CON12 0x8450
+#define MT6332_VDRAM_CON13 0x8452
+#define MT6332_VDRAM_CON14 0x8454
+#define MT6332_VDRAM_CON15 0x8456
+#define MT6332_VDRAM_CON16 0x8458
+#define MT6332_VDRAM_CON17 0x845A
+#define MT6332_VDRAM_CON18 0x845C
+#define MT6332_VDRAM_CON19 0x845E
+#define MT6332_VDRAM_CON20 0x8460
+#define MT6332_VDRAM_CON21 0x8462
+#define MT6332_VDVFS2_CON0 0x8464
+#define MT6332_VDVFS2_CON1 0x8466
+#define MT6332_VDVFS2_CON2 0x8468
+#define MT6332_VDVFS2_CON3 0x846A
+#define MT6332_VDVFS2_CON4 0x846C
+#define MT6332_VDVFS2_CON5 0x846E
+#define MT6332_VDVFS2_CON6 0x8470
+#define MT6332_VDVFS2_CON7 0x8472
+#define MT6332_VDVFS2_CON8 0x8474
+#define MT6332_VDVFS2_CON9 0x8476
+#define MT6332_VDVFS2_CON10 0x8478
+#define MT6332_VDVFS2_CON11 0x847A
+#define MT6332_VDVFS2_CON12 0x847C
+#define MT6332_VDVFS2_CON13 0x847E
+#define MT6332_VDVFS2_CON14 0x8480
+#define MT6332_VDVFS2_CON15 0x8482
+#define MT6332_VDVFS2_CON16 0x8484
+#define MT6332_VDVFS2_CON17 0x8486
+#define MT6332_VDVFS2_CON18 0x8488
+#define MT6332_VDVFS2_CON19 0x848A
+#define MT6332_VDVFS2_CON20 0x848C
+#define MT6332_VDVFS2_CON21 0x848E
+#define MT6332_VDVFS2_CON22 0x8490
+#define MT6332_VDVFS2_CON23 0x8492
+#define MT6332_VDVFS2_CON24 0x8494
+#define MT6332_VDVFS2_CON25 0x8496
+#define MT6332_VDVFS2_CON26 0x8498
+#define MT6332_VDVFS2_CON27 0x849A
+#define MT6332_VRF1_CON0 0x849C
+#define MT6332_VRF1_CON1 0x849E
+#define MT6332_VRF1_CON2 0x84A0
+#define MT6332_VRF1_CON3 0x84A2
+#define MT6332_VRF1_CON4 0x84A4
+#define MT6332_VRF1_CON5 0x84A6
+#define MT6332_VRF1_CON6 0x84A8
+#define MT6332_VRF1_CON7 0x84AA
+#define MT6332_VRF1_CON8 0x84AC
+#define MT6332_VRF1_CON9 0x84AE
+#define MT6332_VRF1_CON10 0x84B0
+#define MT6332_VRF1_CON11 0x84B2
+#define MT6332_VRF1_CON12 0x84B4
+#define MT6332_VRF1_CON13 0x84B6
+#define MT6332_VRF1_CON14 0x84B8
+#define MT6332_VRF1_CON15 0x84BA
+#define MT6332_VRF1_CON16 0x84BC
+#define MT6332_VRF1_CON17 0x84BE
+#define MT6332_VRF1_CON18 0x84C0
+#define MT6332_VRF1_CON19 0x84C2
+#define MT6332_VRF1_CON20 0x84C4
+#define MT6332_VRF1_CON21 0x84C6
+#define MT6332_VRF2_CON0 0x84C8
+#define MT6332_VRF2_CON1 0x84CA
+#define MT6332_VRF2_CON2 0x84CC
+#define MT6332_VRF2_CON3 0x84CE
+#define MT6332_VRF2_CON4 0x84D0
+#define MT6332_VRF2_CON5 0x84D2
+#define MT6332_VRF2_CON6 0x84D4
+#define MT6332_VRF2_CON7 0x84D6
+#define MT6332_VRF2_CON8 0x84D8
+#define MT6332_VRF2_CON9 0x84DA
+#define MT6332_VRF2_CON10 0x84DC
+#define MT6332_VRF2_CON11 0x84DE
+#define MT6332_VRF2_CON12 0x84E0
+#define MT6332_VRF2_CON13 0x84E2
+#define MT6332_VRF2_CON14 0x84E4
+#define MT6332_VRF2_CON15 0x84E6
+#define MT6332_VRF2_CON16 0x84E8
+#define MT6332_VRF2_CON17 0x84EA
+#define MT6332_VRF2_CON18 0x84EC
+#define MT6332_VRF2_CON19 0x84EE
+#define MT6332_VRF2_CON20 0x84F0
+#define MT6332_VRF2_CON21 0x84F2
+#define MT6332_VPA_CON0 0x84F4
+#define MT6332_VPA_CON1 0x84F6
+#define MT6332_VPA_CON2 0x84F8
+#define MT6332_VPA_CON3 0x84FC
+#define MT6332_VPA_CON4 0x84FE
+#define MT6332_VPA_CON5 0x8500
+#define MT6332_VPA_CON6 0x8502
+#define MT6332_VPA_CON7 0x8504
+#define MT6332_VPA_CON8 0x8506
+#define MT6332_VPA_CON9 0x8508
+#define MT6332_VPA_CON10 0x850A
+#define MT6332_VPA_CON11 0x850C
+#define MT6332_VPA_CON12 0x850E
+#define MT6332_VPA_CON13 0x8510
+#define MT6332_VPA_CON14 0x8512
+#define MT6332_VPA_CON15 0x8514
+#define MT6332_VPA_CON16 0x8516
+#define MT6332_VPA_CON17 0x8518
+#define MT6332_VPA_CON18 0x851A
+#define MT6332_VPA_CON19 0x851C
+#define MT6332_VPA_CON20 0x851E
+#define MT6332_VPA_CON21 0x8520
+#define MT6332_VPA_CON22 0x8522
+#define MT6332_VPA_CON23 0x8524
+#define MT6332_VPA_CON24 0x8526
+#define MT6332_VPA_CON25 0x8528
+#define MT6332_VSBST_CON0 0x852A
+#define MT6332_VSBST_CON1 0x852C
+#define MT6332_VSBST_CON2 0x852E
+#define MT6332_VSBST_CON3 0x8530
+#define MT6332_VSBST_CON4 0x8532
+#define MT6332_VSBST_CON5 0x8534
+#define MT6332_VSBST_CON6 0x8536
+#define MT6332_VSBST_CON7 0x8538
+#define MT6332_VSBST_CON8 0x853A
+#define MT6332_VSBST_CON9 0x853C
+#define MT6332_VSBST_CON10 0x853E
+#define MT6332_VSBST_CON11 0x8540
+#define MT6332_VSBST_CON12 0x8542
+#define MT6332_VSBST_CON13 0x8544
+#define MT6332_VSBST_CON14 0x8546
+#define MT6332_VSBST_CON15 0x8548
+#define MT6332_VSBST_CON16 0x854A
+#define MT6332_VSBST_CON17 0x854C
+#define MT6332_VSBST_CON18 0x854E
+#define MT6332_VSBST_CON19 0x8550
+#define MT6332_VSBST_CON20 0x8552
+#define MT6332_VSBST_CON21 0x8554
+#define MT6332_BUCK_K_CON0 0x8556
+#define MT6332_BUCK_K_CON1 0x8558
+#define MT6332_BUCK_K_CON2 0x855A
+#define MT6332_BUCK_K_CON3 0x855C
+#define MT6332_BUCK_K_CON4 0x855E
+#define MT6332_BUCK_K_CON5 0x8560
+#define MT6332_AUXADC_ADC0 0x8800
+#define MT6332_AUXADC_ADC1 0x8802
+#define MT6332_AUXADC_ADC2 0x8804
+#define MT6332_AUXADC_ADC3 0x8806
+#define MT6332_AUXADC_ADC4 0x8808
+#define MT6332_AUXADC_ADC5 0x880A
+#define MT6332_AUXADC_ADC6 0x880C
+#define MT6332_AUXADC_ADC7 0x880E
+#define MT6332_AUXADC_ADC8 0x8810
+#define MT6332_AUXADC_ADC9 0x8812
+#define MT6332_AUXADC_ADC10 0x8814
+#define MT6332_AUXADC_ADC11 0x8816
+#define MT6332_AUXADC_ADC12 0x8818
+#define MT6332_AUXADC_ADC13 0x881A
+#define MT6332_AUXADC_ADC14 0x881C
+#define MT6332_AUXADC_ADC15 0x881E
+#define MT6332_AUXADC_ADC16 0x8820
+#define MT6332_AUXADC_ADC17 0x8822
+#define MT6332_AUXADC_ADC18 0x8824
+#define MT6332_AUXADC_ADC19 0x8826
+#define MT6332_AUXADC_ADC20 0x8828
+#define MT6332_AUXADC_ADC21 0x882A
+#define MT6332_AUXADC_ADC22 0x882C
+#define MT6332_AUXADC_ADC23 0x882E
+#define MT6332_AUXADC_ADC24 0x8830
+#define MT6332_AUXADC_ADC25 0x8832
+#define MT6332_AUXADC_ADC26 0x8834
+#define MT6332_AUXADC_ADC27 0x8836
+#define MT6332_AUXADC_ADC28 0x8838
+#define MT6332_AUXADC_ADC29 0x883A
+#define MT6332_AUXADC_ADC30 0x883C
+#define MT6332_AUXADC_ADC31 0x883E
+#define MT6332_AUXADC_ADC32 0x8840
+#define MT6332_AUXADC_ADC33 0x8842
+#define MT6332_AUXADC_ADC34 0x8844
+#define MT6332_AUXADC_ADC35 0x8846
+#define MT6332_AUXADC_ADC36 0x8848
+#define MT6332_AUXADC_ADC37 0x884A
+#define MT6332_AUXADC_ADC38 0x884C
+#define MT6332_AUXADC_ADC39 0x884E
+#define MT6332_AUXADC_ADC40 0x8850
+#define MT6332_AUXADC_ADC41 0x8852
+#define MT6332_AUXADC_ADC42 0x8854
+#define MT6332_AUXADC_ADC43 0x8856
+#define MT6332_AUXADC_STA0 0x8858
+#define MT6332_AUXADC_STA1 0x885A
+#define MT6332_AUXADC_RQST0 0x885C
+#define MT6332_AUXADC_RQST0_SET 0x885E
+#define MT6332_AUXADC_RQST0_CLR 0x8860
+#define MT6332_AUXADC_RQST1 0x8862
+#define MT6332_AUXADC_RQST1_SET 0x8864
+#define MT6332_AUXADC_RQST1_CLR 0x8866
+#define MT6332_AUXADC_CON0 0x8868
+#define MT6332_AUXADC_CON1 0x886A
+#define MT6332_AUXADC_CON2 0x886C
+#define MT6332_AUXADC_CON3 0x886E
+#define MT6332_AUXADC_CON4 0x8870
+#define MT6332_AUXADC_CON5 0x8872
+#define MT6332_AUXADC_CON6 0x8874
+#define MT6332_AUXADC_CON7 0x8876
+#define MT6332_AUXADC_CON8 0x8878
+#define MT6332_AUXADC_CON9 0x887A
+#define MT6332_AUXADC_CON10 0x887C
+#define MT6332_AUXADC_CON11 0x887E
+#define MT6332_AUXADC_CON12 0x8880
+#define MT6332_AUXADC_CON13 0x8882
+#define MT6332_AUXADC_CON14 0x8884
+#define MT6332_AUXADC_CON15 0x8886
+#define MT6332_AUXADC_CON16 0x8888
+#define MT6332_AUXADC_CON17 0x888A
+#define MT6332_AUXADC_CON18 0x888C
+#define MT6332_AUXADC_CON19 0x888E
+#define MT6332_AUXADC_CON20 0x8890
+#define MT6332_AUXADC_CON21 0x8892
+#define MT6332_AUXADC_CON22 0x8894
+#define MT6332_AUXADC_CON23 0x8896
+#define MT6332_AUXADC_CON24 0x8898
+#define MT6332_AUXADC_CON25 0x889A
+#define MT6332_AUXADC_CON26 0x889C
+#define MT6332_AUXADC_CON27 0x889E
+#define MT6332_AUXADC_CON28 0x88A0
+#define MT6332_AUXADC_CON29 0x88A2
+#define MT6332_AUXADC_CON30 0x88A4
+#define MT6332_AUXADC_CON31 0x88A6
+#define MT6332_AUXADC_CON32 0x88A8
+#define MT6332_AUXADC_CON33 0x88AA
+#define MT6332_AUXADC_CON34 0x88AC
+#define MT6332_AUXADC_CON35 0x88AE
+#define MT6332_AUXADC_CON36 0x88B0
+#define MT6332_AUXADC_CON37 0x88B2
+#define MT6332_AUXADC_CON38 0x88B4
+#define MT6332_AUXADC_CON39 0x88B6
+#define MT6332_AUXADC_CON40 0x88B8
+#define MT6332_AUXADC_CON41 0x88BA
+#define MT6332_AUXADC_CON42 0x88BC
+#define MT6332_AUXADC_CON43 0x88BE
+#define MT6332_AUXADC_CON44 0x88C0
+#define MT6332_AUXADC_CON45 0x88C2
+#define MT6332_AUXADC_CON46 0x88C4
+#define MT6332_AUXADC_CON47 0x88C6
+#define MT6332_STRUP_CONA0 0x8C00
+#define MT6332_STRUP_CONA1 0x8C02
+#define MT6332_STRUP_CONA2 0x8C04
+#define MT6332_STRUP_CON0 0x8C06
+#define MT6332_STRUP_CON2 0x8C08
+#define MT6332_STRUP_CON3 0x8C0A
+#define MT6332_STRUP_CON4 0x8C0C
+#define MT6332_STRUP_CON5 0x8C0E
+#define MT6332_STRUP_CON6 0x8C10
+#define MT6332_STRUP_CON7 0x8C12
+#define MT6332_STRUP_CON8 0x8C14
+#define MT6332_STRUP_CON9 0x8C16
+#define MT6332_STRUP_CON10 0x8C18
+#define MT6332_STRUP_CON11 0x8C1A
+#define MT6332_STRUP_CON12 0x8C1C
+#define MT6332_STRUP_CON13 0x8C1E
+#define MT6332_STRUP_CON14 0x8C20
+#define MT6332_STRUP_CON15 0x8C22
+#define MT6332_STRUP_CON16 0x8C24
+#define MT6332_STRUP_CON17 0x8C26
+#define MT6332_FGADC_CON0 0x8C28
+#define MT6332_FGADC_CON1 0x8C2A
+#define MT6332_FGADC_CON2 0x8C2C
+#define MT6332_FGADC_CON3 0x8C2E
+#define MT6332_FGADC_CON4 0x8C30
+#define MT6332_FGADC_CON5 0x8C32
+#define MT6332_FGADC_CON6 0x8C34
+#define MT6332_FGADC_CON7 0x8C36
+#define MT6332_FGADC_CON8 0x8C38
+#define MT6332_FGADC_CON9 0x8C3A
+#define MT6332_FGADC_CON10 0x8C3C
+#define MT6332_FGADC_CON11 0x8C3E
+#define MT6332_FGADC_CON12 0x8C40
+#define MT6332_FGADC_CON13 0x8C42
+#define MT6332_FGADC_CON14 0x8C44
+#define MT6332_FGADC_CON15 0x8C46
+#define MT6332_FGADC_CON16 0x8C48
+#define MT6332_FGADC_CON17 0x8C4A
+#define MT6332_FGADC_CON18 0x8C4C
+#define MT6332_FGADC_CON19 0x8C4E
+#define MT6332_FGADC_CON20 0x8C50
+#define MT6332_FGADC_CON21 0x8C52
+#define MT6332_FGADC_CON22 0x8C54
+#define MT6332_OTP_CON0 0x8C56
+#define MT6332_OTP_CON1 0x8C58
+#define MT6332_OTP_CON2 0x8C5A
+#define MT6332_OTP_CON3 0x8C5C
+#define MT6332_OTP_CON4 0x8C5E
+#define MT6332_OTP_CON5 0x8C60
+#define MT6332_OTP_CON6 0x8C62
+#define MT6332_OTP_CON7 0x8C64
+#define MT6332_OTP_CON8 0x8C66
+#define MT6332_OTP_CON9 0x8C68
+#define MT6332_OTP_CON10 0x8C6A
+#define MT6332_OTP_CON11 0x8C6C
+#define MT6332_OTP_CON12 0x8C6E
+#define MT6332_OTP_CON13 0x8C70
+#define MT6332_OTP_CON14 0x8C72
+#define MT6332_OTP_DOUT_0_15 0x8C74
+#define MT6332_OTP_DOUT_16_31 0x8C76
+#define MT6332_OTP_DOUT_32_47 0x8C78
+#define MT6332_OTP_DOUT_48_63 0x8C7A
+#define MT6332_OTP_DOUT_64_79 0x8C7C
+#define MT6332_OTP_DOUT_80_95 0x8C7E
+#define MT6332_OTP_DOUT_96_111 0x8C80
+#define MT6332_OTP_DOUT_112_127 0x8C82
+#define MT6332_OTP_DOUT_128_143 0x8C84
+#define MT6332_OTP_DOUT_144_159 0x8C86
+#define MT6332_OTP_DOUT_160_175 0x8C88
+#define MT6332_OTP_DOUT_176_191 0x8C8A
+#define MT6332_OTP_DOUT_192_207 0x8C8C
+#define MT6332_OTP_DOUT_208_223 0x8C8E
+#define MT6332_OTP_DOUT_224_239 0x8C90
+#define MT6332_OTP_DOUT_240_255 0x8C92
+#define MT6332_OTP_VAL_0_15 0x8C94
+#define MT6332_OTP_VAL_16_31 0x8C96
+#define MT6332_OTP_VAL_32_47 0x8C98
+#define MT6332_OTP_VAL_48_63 0x8C9A
+#define MT6332_OTP_VAL_64_79 0x8C9C
+#define MT6332_OTP_VAL_80_95 0x8C9E
+#define MT6332_OTP_VAL_96_111 0x8CA0
+#define MT6332_OTP_VAL_112_127 0x8CA2
+#define MT6332_OTP_VAL_128_143 0x8CA4
+#define MT6332_OTP_VAL_144_159 0x8CA6
+#define MT6332_OTP_VAL_160_175 0x8CA8
+#define MT6332_OTP_VAL_176_191 0x8CAA
+#define MT6332_OTP_VAL_192_207 0x8CAC
+#define MT6332_OTP_VAL_208_223 0x8CAE
+#define MT6332_OTP_VAL_224_239 0x8CB0
+#define MT6332_OTP_VAL_240_255 0x8CB2
+#define MT6332_LDO_CON0 0x8CB4
+#define MT6332_LDO_CON1 0x8CB6
+#define MT6332_LDO_CON2 0x8CB8
+#define MT6332_LDO_CON3 0x8CBA
+#define MT6332_LDO_CON5 0x8CBC
+#define MT6332_LDO_CON6 0x8CBE
+#define MT6332_LDO_CON7 0x8CC0
+#define MT6332_LDO_CON8 0x8CC2
+#define MT6332_LDO_CON9 0x8CC4
+#define MT6332_LDO_CON10 0x8CC6
+#define MT6332_LDO_CON11 0x8CC8
+#define MT6332_LDO_CON12 0x8CCA
+#define MT6332_LDO_CON13 0x8CCC
+#define MT6332_FQMTR_CON0 0x8CCE
+#define MT6332_FQMTR_CON1 0x8CD0
+#define MT6332_FQMTR_CON2 0x8CD2
+#define MT6332_IWLED_CON0 0x8CD4
+#define MT6332_IWLED_DEG 0x8CD6
+#define MT6332_IWLED_STATUS 0x8CD8
+#define MT6332_IWLED_EN_CTRL 0x8CDA
+#define MT6332_IWLED_CON1 0x8CDC
+#define MT6332_IWLED_CON2 0x8CDE
+#define MT6332_IWLED_TRIM0 0x8CE0
+#define MT6332_IWLED_TRIM1 0x8CE2
+#define MT6332_IWLED_CON3 0x8CE4
+#define MT6332_IWLED_CON4 0x8CE6
+#define MT6332_IWLED_CON5 0x8CE8
+#define MT6332_IWLED_CON6 0x8CEA
+#define MT6332_IWLED_CON7 0x8CEC
+#define MT6332_IWLED_CON8 0x8CEE
+#define MT6332_IWLED_CON9 0x8CF0
+#define MT6332_SPK_CON0 0x8CF2
+#define MT6332_SPK_CON1 0x8CF4
+#define MT6332_SPK_CON2 0x8CF6
+#define MT6332_SPK_CON3 0x8CF8
+#define MT6332_SPK_CON4 0x8CFA
+#define MT6332_SPK_CON5 0x8CFC
+#define MT6332_SPK_CON6 0x8CFE
+#define MT6332_SPK_CON7 0x8D00
+#define MT6332_SPK_CON8 0x8D02
+#define MT6332_SPK_CON9 0x8D04
+#define MT6332_SPK_CON10 0x8D06
+#define MT6332_SPK_CON11 0x8D08
+#define MT6332_SPK_CON12 0x8D0A
+#define MT6332_SPK_CON13 0x8D0C
+#define MT6332_SPK_CON14 0x8D0E
+#define MT6332_SPK_CON15 0x8D10
+#define MT6332_SPK_CON16 0x8D12
+#define MT6332_TESTI_CON0 0x8D14
+#define MT6332_TESTI_CON1 0x8D16
+#define MT6332_TESTI_CON2 0x8D18
+#define MT6332_TESTI_CON3 0x8D1A
+#define MT6332_TESTI_CON4 0x8D1C
+#define MT6332_TESTI_CON5 0x8D1E
+#define MT6332_TESTI_CON6 0x8D20
+#define MT6332_TESTI_MUX_CON0 0x8D22
+#define MT6332_TESTI_MUX_CON1 0x8D24
+#define MT6332_TESTI_MUX_CON2 0x8D26
+#define MT6332_TESTI_MUX_CON3 0x8D28
+#define MT6332_TESTI_MUX_CON4 0x8D2A
+#define MT6332_TESTI_MUX_CON5 0x8D2C
+#define MT6332_TESTI_MUX_CON6 0x8D2E
+#define MT6332_TESTO_CON0 0x8D30
+#define MT6332_TESTO_CON1 0x8D32
+#define MT6332_TEST_OMUX_CON0 0x8D34
+#define MT6332_TEST_OMUX_CON1 0x8D36
+#define MT6332_DEBUG_CON0 0x8D38
+#define MT6332_DEBUG_CON1 0x8D3A
+#define MT6332_DEBUG_CON2 0x8D3C
+#define MT6332_FGADC_CON23 0x8D3E
+#define MT6332_FGADC_CON24 0x8D40
+#define MT6332_FGADC_CON25 0x8D42
+#define MT6332_TOP_RST_STATUS 0x8D44
+#define MT6332_TOP_RST_STATUS_SET 0x8D46
+#define MT6332_TOP_RST_STATUS_CLR 0x8D48
+#define MT6332_VDVFS2_CON28 0x8D4A
+
+#endif /* __MFD_MT6332_REGISTERS_H__ */
diff --git a/include/linux/mfd/mt6357/core.h b/include/linux/mfd/mt6357/core.h
new file mode 100644
index 000000000000..2441611264fd
--- /dev/null
+++ b/include/linux/mfd/mt6357/core.h
@@ -0,0 +1,119 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2022 BayLibre, SAS
+ * Author: Fabien Parent <fparent@baylibre.com>
+ */
+
+#ifndef __MFD_MT6357_CORE_H__
+#define __MFD_MT6357_CORE_H__
+
+enum mt6357_irq_top_status_shift {
+ MT6357_BUCK_TOP = 0,
+ MT6357_LDO_TOP,
+ MT6357_PSC_TOP,
+ MT6357_SCK_TOP,
+ MT6357_BM_TOP,
+ MT6357_HK_TOP,
+ MT6357_XPP_TOP,
+ MT6357_AUD_TOP,
+ MT6357_MISC_TOP,
+};
+
+enum mt6357_irq_numbers {
+ MT6357_IRQ_VPROC_OC = 0,
+ MT6357_IRQ_VCORE_OC,
+ MT6357_IRQ_VMODEM_OC,
+ MT6357_IRQ_VS1_OC,
+ MT6357_IRQ_VPA_OC,
+ MT6357_IRQ_VCORE_PREOC,
+ MT6357_IRQ_VFE28_OC = 16,
+ MT6357_IRQ_VXO22_OC,
+ MT6357_IRQ_VRF18_OC,
+ MT6357_IRQ_VRF12_OC,
+ MT6357_IRQ_VEFUSE_OC,
+ MT6357_IRQ_VCN33_OC,
+ MT6357_IRQ_VCN28_OC,
+ MT6357_IRQ_VCN18_OC,
+ MT6357_IRQ_VCAMA_OC,
+ MT6357_IRQ_VCAMD_OC,
+ MT6357_IRQ_VCAMIO_OC,
+ MT6357_IRQ_VLDO28_OC,
+ MT6357_IRQ_VUSB33_OC,
+ MT6357_IRQ_VAUX18_OC,
+ MT6357_IRQ_VAUD28_OC,
+ MT6357_IRQ_VIO28_OC,
+ MT6357_IRQ_VIO18_OC,
+ MT6357_IRQ_VSRAM_PROC_OC,
+ MT6357_IRQ_VSRAM_OTHERS_OC,
+ MT6357_IRQ_VIBR_OC,
+ MT6357_IRQ_VDRAM_OC,
+ MT6357_IRQ_VMC_OC,
+ MT6357_IRQ_VMCH_OC,
+ MT6357_IRQ_VEMC_OC,
+ MT6357_IRQ_VSIM1_OC,
+ MT6357_IRQ_VSIM2_OC,
+ MT6357_IRQ_PWRKEY = 48,
+ MT6357_IRQ_HOMEKEY,
+ MT6357_IRQ_PWRKEY_R,
+ MT6357_IRQ_HOMEKEY_R,
+ MT6357_IRQ_NI_LBAT_INT,
+ MT6357_IRQ_CHRDET,
+ MT6357_IRQ_CHRDET_EDGE,
+ MT6357_IRQ_VCDT_HV_DET,
+ MT6357_IRQ_WATCHDOG,
+ MT6357_IRQ_VBATON_UNDET,
+ MT6357_IRQ_BVALID_DET,
+ MT6357_IRQ_OV,
+ MT6357_IRQ_RTC = 64,
+ MT6357_IRQ_FG_BAT0_H = 80,
+ MT6357_IRQ_FG_BAT0_L,
+ MT6357_IRQ_FG_CUR_H,
+ MT6357_IRQ_FG_CUR_L,
+ MT6357_IRQ_FG_ZCV,
+ MT6357_IRQ_BATON_LV = 96,
+ MT6357_IRQ_BATON_HT,
+ MT6357_IRQ_BAT_H = 112,
+ MT6357_IRQ_BAT_L,
+ MT6357_IRQ_AUXADC_IMP,
+ MT6357_IRQ_NAG_C_DLTV,
+ MT6357_IRQ_AUDIO = 128,
+ MT6357_IRQ_ACCDET = 133,
+ MT6357_IRQ_ACCDET_EINT0,
+ MT6357_IRQ_ACCDET_EINT1,
+ MT6357_IRQ_SPI_CMD_ALERT = 144,
+ MT6357_IRQ_NR,
+};
+
+#define MT6357_IRQ_BUCK_BASE MT6357_IRQ_VPROC_OC
+#define MT6357_IRQ_LDO_BASE MT6357_IRQ_VFE28_OC
+#define MT6357_IRQ_PSC_BASE MT6357_IRQ_PWRKEY
+#define MT6357_IRQ_SCK_BASE MT6357_IRQ_RTC
+#define MT6357_IRQ_BM_BASE MT6357_IRQ_FG_BAT0_H
+#define MT6357_IRQ_HK_BASE MT6357_IRQ_BAT_H
+#define MT6357_IRQ_AUD_BASE MT6357_IRQ_AUDIO
+#define MT6357_IRQ_MISC_BASE MT6357_IRQ_SPI_CMD_ALERT
+
+#define MT6357_IRQ_BUCK_BITS (MT6357_IRQ_VCORE_PREOC - MT6357_IRQ_BUCK_BASE + 1)
+#define MT6357_IRQ_LDO_BITS (MT6357_IRQ_VSIM2_OC - MT6357_IRQ_LDO_BASE + 1)
+#define MT6357_IRQ_PSC_BITS (MT6357_IRQ_VCDT_HV_DET - MT6357_IRQ_PSC_BASE + 1)
+#define MT6357_IRQ_SCK_BITS (MT6357_IRQ_RTC - MT6357_IRQ_SCK_BASE + 1)
+#define MT6357_IRQ_BM_BITS (MT6357_IRQ_BATON_HT - MT6357_IRQ_BM_BASE + 1)
+#define MT6357_IRQ_HK_BITS (MT6357_IRQ_NAG_C_DLTV - MT6357_IRQ_HK_BASE + 1)
+#define MT6357_IRQ_AUD_BITS (MT6357_IRQ_ACCDET_EINT1 - MT6357_IRQ_AUD_BASE + 1)
+#define MT6357_IRQ_MISC_BITS \
+ (MT6357_IRQ_SPI_CMD_ALERT - MT6357_IRQ_MISC_BASE + 1)
+
+#define MT6357_TOP_GEN(sp) \
+{ \
+ .hwirq_base = MT6357_IRQ_##sp##_BASE, \
+ .num_int_regs = \
+ ((MT6357_IRQ_##sp##_BITS - 1) / \
+ MTK_PMIC_REG_WIDTH) + 1, \
+ .en_reg = MT6357_##sp##_TOP_INT_CON0, \
+ .en_reg_shift = 0x6, \
+ .sta_reg = MT6357_##sp##_TOP_INT_STATUS0, \
+ .sta_reg_shift = 0x2, \
+ .top_offset = MT6357_##sp##_TOP, \
+}
+
+#endif /* __MFD_MT6357_CORE_H__ */
diff --git a/include/linux/mfd/mt6357/registers.h b/include/linux/mfd/mt6357/registers.h
new file mode 100644
index 000000000000..e24af83b618d
--- /dev/null
+++ b/include/linux/mfd/mt6357/registers.h
@@ -0,0 +1,1574 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2022 MediaTek Inc.
+ */
+
+#ifndef __MFD_MT6357_REGISTERS_H__
+#define __MFD_MT6357_REGISTERS_H__
+
+/* PMIC Registers */
+#define MT6357_TOP0_ID 0x0
+#define MT6357_TOP0_REV0 0x2
+#define MT6357_TOP0_DSN_DBI 0x4
+#define MT6357_TOP0_DSN_DXI 0x6
+#define MT6357_HWCID 0x8
+#define MT6357_SWCID 0xa
+#define MT6357_PONSTS 0xc
+#define MT6357_POFFSTS 0xe
+#define MT6357_PSTSCTL 0x10
+#define MT6357_PG_DEB_STS0 0x12
+#define MT6357_PG_SDN_STS0 0x14
+#define MT6357_OC_SDN_STS0 0x16
+#define MT6357_THERMALSTATUS 0x18
+#define MT6357_TOP_CON 0x1a
+#define MT6357_TEST_OUT 0x1c
+#define MT6357_TEST_CON0 0x1e
+#define MT6357_TEST_CON1 0x20
+#define MT6357_TESTMODE_SW 0x22
+#define MT6357_TOPSTATUS 0x24
+#define MT6357_TDSEL_CON 0x26
+#define MT6357_RDSEL_CON 0x28
+#define MT6357_SMT_CON0 0x2a
+#define MT6357_SMT_CON1 0x2c
+#define MT6357_TOP_RSV0 0x2e
+#define MT6357_TOP_RSV1 0x30
+#define MT6357_DRV_CON0 0x32
+#define MT6357_DRV_CON1 0x34
+#define MT6357_DRV_CON2 0x36
+#define MT6357_DRV_CON3 0x38
+#define MT6357_FILTER_CON0 0x3a
+#define MT6357_FILTER_CON1 0x3c
+#define MT6357_FILTER_CON2 0x3e
+#define MT6357_FILTER_CON3 0x40
+#define MT6357_TOP_STATUS 0x42
+#define MT6357_TOP_STATUS_SET 0x44
+#define MT6357_TOP_STATUS_CLR 0x46
+#define MT6357_TOP_TRAP 0x48
+#define MT6357_TOP1_ID 0x80
+#define MT6357_TOP1_REV0 0x82
+#define MT6357_TOP1_DSN_DBI 0x84
+#define MT6357_TOP1_DSN_DXI 0x86
+#define MT6357_GPIO_DIR0 0x88
+#define MT6357_GPIO_DIR0_SET 0x8a
+#define MT6357_GPIO_DIR0_CLR 0x8c
+#define MT6357_GPIO_PULLEN0 0x8e
+#define MT6357_GPIO_PULLEN0_SET 0x90
+#define MT6357_GPIO_PULLEN0_CLR 0x92
+#define MT6357_GPIO_PULLSEL0 0x94
+#define MT6357_GPIO_PULLSEL0_SET 0x96
+#define MT6357_GPIO_PULLSEL0_CLR 0x98
+#define MT6357_GPIO_DINV0 0x9a
+#define MT6357_GPIO_DINV0_SET 0x9c
+#define MT6357_GPIO_DINV0_CLR 0x9e
+#define MT6357_GPIO_DOUT0 0xa0
+#define MT6357_GPIO_DOUT0_SET 0xa2
+#define MT6357_GPIO_DOUT0_CLR 0xa4
+#define MT6357_GPIO_PI0 0xa6
+#define MT6357_GPIO_POE0 0xa8
+#define MT6357_GPIO_MODE0 0xaa
+#define MT6357_GPIO_MODE0_SET 0xac
+#define MT6357_GPIO_MODE0_CLR 0xae
+#define MT6357_GPIO_MODE1 0xb0
+#define MT6357_GPIO_MODE1_SET 0xb2
+#define MT6357_GPIO_MODE1_CLR 0xb4
+#define MT6357_GPIO_MODE2 0xb6
+#define MT6357_GPIO_MODE2_SET 0xb8
+#define MT6357_GPIO_MODE2_CLR 0xba
+#define MT6357_GPIO_MODE3 0xbc
+#define MT6357_GPIO_MODE3_SET 0xbe
+#define MT6357_GPIO_MODE3_CLR 0xc0
+#define MT6357_GPIO_RSV 0xc2
+#define MT6357_TOP2_ID 0x100
+#define MT6357_TOP2_REV0 0x102
+#define MT6357_TOP2_DSN_DBI 0x104
+#define MT6357_TOP2_DSN_DXI 0x106
+#define MT6357_TOP_PAM0 0x108
+#define MT6357_TOP_PAM1 0x10a
+#define MT6357_TOP_CKPDN_CON0 0x10c
+#define MT6357_TOP_CKPDN_CON0_SET 0x10e
+#define MT6357_TOP_CKPDN_CON0_CLR 0x110
+#define MT6357_TOP_CKPDN_CON1 0x112
+#define MT6357_TOP_CKPDN_CON1_SET 0x114
+#define MT6357_TOP_CKPDN_CON1_CLR 0x116
+#define MT6357_TOP_CKSEL_CON0 0x118
+#define MT6357_TOP_CKSEL_CON0_SET 0x11a
+#define MT6357_TOP_CKSEL_CON0_CLR 0x11c
+#define MT6357_TOP_CKSEL_CON1 0x11e
+#define MT6357_TOP_CKSEL_CON1_SET 0x120
+#define MT6357_TOP_CKSEL_CON1_CLR 0x122
+#define MT6357_TOP_CKDIVSEL_CON0 0x124
+#define MT6357_TOP_CKDIVSEL_CON0_SET 0x126
+#define MT6357_TOP_CKDIVSEL_CON0_CLR 0x128
+#define MT6357_TOP_CKHWEN_CON0 0x12a
+#define MT6357_TOP_CKHWEN_CON0_SET 0x12c
+#define MT6357_TOP_CKHWEN_CON0_CLR 0x12e
+#define MT6357_TOP_CKTST_CON0 0x130
+#define MT6357_TOP_CKTST_CON1 0x132
+#define MT6357_TOP_CLK_CON0 0x134
+#define MT6357_TOP_CLK_CON0_SET 0x136
+#define MT6357_TOP_CLK_CON0_CLR 0x138
+#define MT6357_TOP_DCM_CON0 0x13a
+#define MT6357_TOP_HANDOVER_DEBUG0 0x13c
+#define MT6357_TOP_RST_CON0 0x13e
+#define MT6357_TOP_RST_CON0_SET 0x140
+#define MT6357_TOP_RST_CON0_CLR 0x142
+#define MT6357_TOP_RST_CON1 0x144
+#define MT6357_TOP_RST_CON1_SET 0x146
+#define MT6357_TOP_RST_CON1_CLR 0x148
+#define MT6357_TOP_RST_CON2 0x14a
+#define MT6357_TOP_RST_MISC 0x14c
+#define MT6357_TOP_RST_MISC_SET 0x14e
+#define MT6357_TOP_RST_MISC_CLR 0x150
+#define MT6357_TOP_RST_STATUS 0x152
+#define MT6357_TOP_RST_STATUS_SET 0x154
+#define MT6357_TOP_RST_STATUS_CLR 0x156
+#define MT6357_TOP2_ELR_NUM 0x158
+#define MT6357_TOP2_ELR0 0x15a
+#define MT6357_TOP2_ELR1 0x15c
+#define MT6357_TOP3_ID 0x180
+#define MT6357_TOP3_REV0 0x182
+#define MT6357_TOP3_DSN_DBI 0x184
+#define MT6357_TOP3_DSN_DXI 0x186
+#define MT6357_MISC_TOP_INT_CON0 0x188
+#define MT6357_MISC_TOP_INT_CON0_SET 0x18a
+#define MT6357_MISC_TOP_INT_CON0_CLR 0x18c
+#define MT6357_MISC_TOP_INT_MASK_CON0 0x18e
+#define MT6357_MISC_TOP_INT_MASK_CON0_SET 0x190
+#define MT6357_MISC_TOP_INT_MASK_CON0_CLR 0x192
+#define MT6357_MISC_TOP_INT_STATUS0 0x194
+#define MT6357_MISC_TOP_INT_RAW_STATUS0 0x196
+#define MT6357_TOP_INT_MASK_CON0 0x198
+#define MT6357_TOP_INT_MASK_CON0_SET 0x19a
+#define MT6357_TOP_INT_MASK_CON0_CLR 0x19c
+#define MT6357_TOP_INT_STATUS0 0x19e
+#define MT6357_TOP_INT_RAW_STATUS0 0x1a0
+#define MT6357_TOP_INT_CON0 0x1a2
+#define MT6357_PLT0_ID 0x380
+#define MT6357_PLT0_REV0 0x382
+#define MT6357_PLT0_REV1 0x384
+#define MT6357_PLT0_DSN_DXI 0x386
+#define MT6357_FQMTR_CON0 0x388
+#define MT6357_FQMTR_CON1 0x38a
+#define MT6357_FQMTR_CON2 0x38c
+#define MT6357_TOP_CLK_TRIM 0x38e
+#define MT6357_OTP_CON0 0x390
+#define MT6357_OTP_CON1 0x392
+#define MT6357_OTP_CON2 0x394
+#define MT6357_OTP_CON3 0x396
+#define MT6357_OTP_CON4 0x398
+#define MT6357_OTP_CON5 0x39a
+#define MT6357_OTP_CON6 0x39c
+#define MT6357_OTP_CON7 0x39e
+#define MT6357_OTP_CON8 0x3a0
+#define MT6357_OTP_CON9 0x3a2
+#define MT6357_OTP_CON10 0x3a4
+#define MT6357_OTP_CON11 0x3a6
+#define MT6357_OTP_CON12 0x3a8
+#define MT6357_OTP_CON13 0x3aa
+#define MT6357_OTP_CON14 0x3ac
+#define MT6357_TOP_TMA_KEY 0x3ae
+#define MT6357_TOP_MDB_CONF0 0x3b0
+#define MT6357_TOP_MDB_CONF1 0x3b2
+#define MT6357_TOP_MDB_CONF2 0x3b4
+#define MT6357_PLT0_ELR_NUM 0x3b6
+#define MT6357_PLT0_ELR0 0x3b8
+#define MT6357_PLT0_ELR1 0x3ba
+#define MT6357_SPISLV_ID 0x400
+#define MT6357_SPISLV_REV0 0x402
+#define MT6357_SPISLV_REV1 0x404
+#define MT6357_SPISLV_DSN_DXI 0x406
+#define MT6357_RG_SPI_CON0 0x408
+#define MT6357_DEW_DIO_EN 0x40a
+#define MT6357_DEW_READ_TEST 0x40c
+#define MT6357_DEW_WRITE_TEST 0x40e
+#define MT6357_DEW_CRC_SWRST 0x410
+#define MT6357_DEW_CRC_EN 0x412
+#define MT6357_DEW_CRC_VAL 0x414
+#define MT6357_DEW_DBG_MON_SEL 0x416
+#define MT6357_DEW_CIPHER_KEY_SEL 0x418
+#define MT6357_DEW_CIPHER_IV_SEL 0x41a
+#define MT6357_DEW_CIPHER_EN 0x41c
+#define MT6357_DEW_CIPHER_RDY 0x41e
+#define MT6357_DEW_CIPHER_MODE 0x420
+#define MT6357_DEW_CIPHER_SWRST 0x422
+#define MT6357_DEW_RDDMY_NO 0x424
+#define MT6357_INT_TYPE_CON0 0x426
+#define MT6357_INT_TYPE_CON0_SET 0x428
+#define MT6357_INT_TYPE_CON0_CLR 0x42a
+#define MT6357_INT_STA 0x42c
+#define MT6357_RG_SPI_CON1 0x42e
+#define MT6357_RG_SPI_CON2 0x430
+#define MT6357_RG_SPI_CON3 0x432
+#define MT6357_RG_SPI_CON4 0x434
+#define MT6357_RG_SPI_CON5 0x436
+#define MT6357_RG_SPI_CON6 0x438
+#define MT6357_RG_SPI_CON7 0x43a
+#define MT6357_RG_SPI_CON8 0x43c
+#define MT6357_RG_SPI_CON9 0x43e
+#define MT6357_RG_SPI_CON10 0x440
+#define MT6357_RG_SPI_CON11 0x442
+#define MT6357_RG_SPI_CON12 0x444
+#define MT6357_RG_SPI_CON13 0x446
+#define MT6357_TOP_SPI_CON0 0x448
+#define MT6357_TOP_SPI_CON1 0x44a
+#define MT6357_SCK_TOP_DSN_ID 0x500
+#define MT6357_SCK_TOP_DSN_REV0 0x502
+#define MT6357_SCK_TOP_DBI 0x504
+#define MT6357_SCK_TOP_DXI 0x506
+#define MT6357_SCK_TOP_TPM0 0x508
+#define MT6357_SCK_TOP_TPM1 0x50a
+#define MT6357_SCK_TOP_CON0 0x50c
+#define MT6357_SCK_TOP_CON1 0x50e
+#define MT6357_SCK_TOP_TEST_OUT 0x510
+#define MT6357_SCK_TOP_TEST_CON0 0x512
+#define MT6357_SCK_TOP_CKPDN_CON0 0x514
+#define MT6357_SCK_TOP_CKPDN_CON0_SET 0x516
+#define MT6357_SCK_TOP_CKPDN_CON0_CLR 0x518
+#define MT6357_SCK_TOP_CKHWEN_CON0 0x51a
+#define MT6357_SCK_TOP_CKHWEN_CON0_SET 0x51c
+#define MT6357_SCK_TOP_CKHWEN_CON0_CLR 0x51e
+#define MT6357_SCK_TOP_CKTST_CON 0x520
+#define MT6357_SCK_TOP_RST_CON0 0x522
+#define MT6357_SCK_TOP_RST_CON0_SET 0x524
+#define MT6357_SCK_TOP_RST_CON0_CLR 0x526
+#define MT6357_SCK_TOP_INT_CON0 0x528
+#define MT6357_SCK_TOP_INT_CON0_SET 0x52a
+#define MT6357_SCK_TOP_INT_CON0_CLR 0x52c
+#define MT6357_SCK_TOP_INT_MASK_CON0 0x52e
+#define MT6357_SCK_TOP_INT_MASK_CON0_SET 0x530
+#define MT6357_SCK_TOP_INT_MASK_CON0_CLR 0x532
+#define MT6357_SCK_TOP_INT_STATUS0 0x534
+#define MT6357_SCK_TOP_INT_RAW_STATUS0 0x536
+#define MT6357_SCK_TOP_INT_MISC_CON 0x538
+#define MT6357_EOSC_CALI_CON0 0x53a
+#define MT6357_EOSC_CALI_CON1 0x53c
+#define MT6357_RTC_MIX_CON0 0x53e
+#define MT6357_RTC_MIX_CON1 0x540
+#define MT6357_RTC_MIX_CON2 0x542
+#define MT6357_RTC_DSN_ID 0x580
+#define MT6357_RTC_DSN_REV0 0x582
+#define MT6357_RTC_DBI 0x584
+#define MT6357_RTC_DXI 0x586
+#define MT6357_RTC_BBPU 0x588
+#define MT6357_RTC_IRQ_STA 0x58a
+#define MT6357_RTC_IRQ_EN 0x58c
+#define MT6357_RTC_CII_EN 0x58e
+#define MT6357_RTC_AL_MASK 0x590
+#define MT6357_RTC_TC_SEC 0x592
+#define MT6357_RTC_TC_MIN 0x594
+#define MT6357_RTC_TC_HOU 0x596
+#define MT6357_RTC_TC_DOM 0x598
+#define MT6357_RTC_TC_DOW 0x59a
+#define MT6357_RTC_TC_MTH 0x59c
+#define MT6357_RTC_TC_YEA 0x59e
+#define MT6357_RTC_AL_SEC 0x5a0
+#define MT6357_RTC_AL_MIN 0x5a2
+#define MT6357_RTC_AL_HOU 0x5a4
+#define MT6357_RTC_AL_DOM 0x5a6
+#define MT6357_RTC_AL_DOW 0x5a8
+#define MT6357_RTC_AL_MTH 0x5aa
+#define MT6357_RTC_AL_YEA 0x5ac
+#define MT6357_RTC_OSC32CON 0x5ae
+#define MT6357_RTC_POWERKEY1 0x5b0
+#define MT6357_RTC_POWERKEY2 0x5b2
+#define MT6357_RTC_PDN1 0x5b4
+#define MT6357_RTC_PDN2 0x5b6
+#define MT6357_RTC_SPAR0 0x5b8
+#define MT6357_RTC_SPAR1 0x5ba
+#define MT6357_RTC_PROT 0x5bc
+#define MT6357_RTC_DIFF 0x5be
+#define MT6357_RTC_CALI 0x5c0
+#define MT6357_RTC_WRTGR 0x5c2
+#define MT6357_RTC_CON 0x5c4
+#define MT6357_RTC_SEC_CTRL 0x5c6
+#define MT6357_RTC_INT_CNT 0x5c8
+#define MT6357_RTC_SEC_DAT0 0x5ca
+#define MT6357_RTC_SEC_DAT1 0x5cc
+#define MT6357_RTC_SEC_DAT2 0x5ce
+#define MT6357_RTC_SEC_DSN_ID 0x600
+#define MT6357_RTC_SEC_DSN_REV0 0x602
+#define MT6357_RTC_SEC_DBI 0x604
+#define MT6357_RTC_SEC_DXI 0x606
+#define MT6357_RTC_TC_SEC_SEC 0x608
+#define MT6357_RTC_TC_MIN_SEC 0x60a
+#define MT6357_RTC_TC_HOU_SEC 0x60c
+#define MT6357_RTC_TC_DOM_SEC 0x60e
+#define MT6357_RTC_TC_DOW_SEC 0x610
+#define MT6357_RTC_TC_MTH_SEC 0x612
+#define MT6357_RTC_TC_YEA_SEC 0x614
+#define MT6357_RTC_SEC_CK_PDN 0x616
+#define MT6357_RTC_SEC_WRTGR 0x618
+#define MT6357_DCXO_DSN_ID 0x780
+#define MT6357_DCXO_DSN_REV0 0x782
+#define MT6357_DCXO_DSN_DBI 0x784
+#define MT6357_DCXO_DSN_DXI 0x786
+#define MT6357_DCXO_CW00 0x788
+#define MT6357_DCXO_CW00_SET 0x78a
+#define MT6357_DCXO_CW00_CLR 0x78c
+#define MT6357_DCXO_CW01 0x78e
+#define MT6357_DCXO_CW02 0x790
+#define MT6357_DCXO_CW03 0x792
+#define MT6357_DCXO_CW04 0x794
+#define MT6357_DCXO_CW05 0x796
+#define MT6357_DCXO_CW06 0x798
+#define MT6357_DCXO_CW07 0x79a
+#define MT6357_DCXO_CW08 0x79c
+#define MT6357_DCXO_CW09 0x79e
+#define MT6357_DCXO_CW10 0x7a0
+#define MT6357_DCXO_CW11 0x7a2
+#define MT6357_DCXO_CW11_SET 0x7a4
+#define MT6357_DCXO_CW11_CLR 0x7a6
+#define MT6357_DCXO_CW12 0x7a8
+#define MT6357_DCXO_CW13 0x7aa
+#define MT6357_DCXO_CW14 0x7ac
+#define MT6357_DCXO_CW15 0x7ae
+#define MT6357_DCXO_CW16 0x7b0
+#define MT6357_DCXO_CW17 0x7b2
+#define MT6357_DCXO_CW18 0x7b4
+#define MT6357_DCXO_CW19 0x7b6
+#define MT6357_DCXO_CW20 0x7b8
+#define MT6357_DCXO_CW21 0x7ba
+#define MT6357_DCXO_CW22 0x7bc
+#define MT6357_DCXO_ELR_NUM 0x7be
+#define MT6357_DCXO_ELR0 0x7c0
+#define MT6357_PSC_TOP_ID 0x900
+#define MT6357_PSC_TOP_REV0 0x902
+#define MT6357_PSC_TOP_DBI 0x904
+#define MT6357_PSC_TOP_DXI 0x906
+#define MT6357_PSC_TPM0 0x908
+#define MT6357_PSC_TPM1 0x90a
+#define MT6357_PSC_TOP_RSTCTL_0 0x90c
+#define MT6357_PSC_TOP_INT_CON0 0x90e
+#define MT6357_PSC_TOP_INT_CON0_SET 0x910
+#define MT6357_PSC_TOP_INT_CON0_CLR 0x912
+#define MT6357_PSC_TOP_INT_MASK_CON0 0x914
+#define MT6357_PSC_TOP_INT_MASK_CON0_SET 0x916
+#define MT6357_PSC_TOP_INT_MASK_CON0_CLR 0x918
+#define MT6357_PSC_TOP_INT_STATUS0 0x91a
+#define MT6357_PSC_TOP_INT_RAW_STATUS0 0x91c
+#define MT6357_PSC_TOP_INT_MISC_CON 0x91e
+#define MT6357_PSC_TOP_INT_MISC_CON_SET 0x920
+#define MT6357_PSC_TOP_INT_MISC_CON_CLR 0x922
+#define MT6357_PSC_TOP_MON_CTL 0x924
+#define MT6357_STRUP_ID 0x980
+#define MT6357_STRUP_REV0 0x982
+#define MT6357_STRUP_DBI 0x984
+#define MT6357_STRUP_DXI 0x986
+#define MT6357_STRUP_ANA_CON0 0x988
+#define MT6357_STRUP_ANA_CON1 0x98a
+#define MT6357_STRUP_ANA_CON2 0x98c
+#define MT6357_STRUP_ELR_NUM 0x98e
+#define MT6357_STRUP_ELR_0 0x990
+#define MT6357_PSEQ_ID 0xa00
+#define MT6357_PSEQ_REV0 0xa02
+#define MT6357_PSEQ_DBI 0xa04
+#define MT6357_PSEQ_DXI 0xa06
+#define MT6357_PPCCTL0 0xa08
+#define MT6357_PPCCTL1 0xa0a
+#define MT6357_PPCCTL2 0xa0c
+#define MT6357_PPCCFG0 0xa0e
+#define MT6357_PPCTST0 0xa10
+#define MT6357_PORFLAG 0xa12
+#define MT6357_STRUP_CON0 0xa14
+#define MT6357_STRUP_CON1 0xa16
+#define MT6357_STRUP_CON2 0xa18
+#define MT6357_STRUP_CON3 0xa1a
+#define MT6357_STRUP_CON4 0xa1c
+#define MT6357_STRUP_CON5 0xa1e
+#define MT6357_STRUP_CON6 0xa20
+#define MT6357_STRUP_CON7 0xa22
+#define MT6357_CPSCFG0 0xa24
+#define MT6357_STRUP_CON9 0xa26
+#define MT6357_STRUP_CON10 0xa28
+#define MT6357_STRUP_CON11 0xa2a
+#define MT6357_STRUP_CON12 0xa2c
+#define MT6357_STRUP_CON13 0xa2e
+#define MT6357_STRUP_CON14 0xa30
+#define MT6357_STRUP_CON15 0xa32
+#define MT6357_STRUP_CON16 0xa34
+#define MT6357_STRUP_CON19 0xa36
+#define MT6357_PSEQ_ELR_NUM 0xa38
+#define MT6357_PSEQ_ELR7 0xa3a
+#define MT6357_PSEQ_ELR8 0xa3c
+#define MT6357_PCHR_DIG_DSN_ID 0xa80
+#define MT6357_PCHR_DIG_DSN_REV0 0xa82
+#define MT6357_PCHR_DIG_DSN_DBI 0xa84
+#define MT6357_PCHR_DIG_DSN_DXI 0xa86
+#define MT6357_CHR_TOP_CON0 0xa88
+#define MT6357_CHR_TOP_CON1 0xa8a
+#define MT6357_CHR_TOP_CON2 0xa8c
+#define MT6357_CHR_TOP_CON3 0xa8e
+#define MT6357_CHR_TOP_CON4 0xa90
+#define MT6357_CHR_TOP_CON5 0xa92
+#define MT6357_CHR_TOP_CON6 0xa94
+#define MT6357_PCHR_DIG_ELR_NUM 0xa96
+#define MT6357_PCHR_ELR0 0xa98
+#define MT6357_PCHR_ELR1 0xa9a
+#define MT6357_PCHR_MACRO_DSN_ID 0xb80
+#define MT6357_PCHR_MACRO_DSN_REV0 0xb82
+#define MT6357_PCHR_MACRO_DSN_DBI 0xb84
+#define MT6357_PCHR_MACRO_DSN_DXI 0xb86
+#define MT6357_CHR_CON0 0xb88
+#define MT6357_CHR_CON1 0xb8a
+#define MT6357_CHR_CON2 0xb8c
+#define MT6357_CHR_CON3 0xb8e
+#define MT6357_CHR_CON4 0xb90
+#define MT6357_CHR_CON5 0xb92
+#define MT6357_CHR_CON6 0xb94
+#define MT6357_CHR_CON7 0xb96
+#define MT6357_CHR_CON8 0xb98
+#define MT6357_CHR_CON9 0xb9a
+#define MT6357_BM_TOP_DSN_ID 0xc00
+#define MT6357_BM_TOP_DSN_REV0 0xc02
+#define MT6357_BM_TOP_DBI 0xc04
+#define MT6357_BM_TOP_DXI 0xc06
+#define MT6357_BM_TPM0 0xc08
+#define MT6357_BM_TPM1 0xc0a
+#define MT6357_BM_TOP_CKPDN_CON0 0xc0c
+#define MT6357_BM_TOP_CKPDN_CON0_SET 0xc0e
+#define MT6357_BM_TOP_CKPDN_CON0_CLR 0xc10
+#define MT6357_BM_TOP_CKSEL_CON0 0xc12
+#define MT6357_BM_TOP_CKSEL_CON0_SET 0xc14
+#define MT6357_BM_TOP_CKSEL_CON0_CLR 0xc16
+#define MT6357_BM_TOP_CKTST_CON0 0xc18
+#define MT6357_BM_TOP_RST_CON0 0xc1a
+#define MT6357_BM_TOP_RST_CON0_SET 0xc1c
+#define MT6357_BM_TOP_RST_CON0_CLR 0xc1e
+#define MT6357_BM_TOP_INT_CON0 0xc20
+#define MT6357_BM_TOP_INT_CON0_SET 0xc22
+#define MT6357_BM_TOP_INT_CON0_CLR 0xc24
+#define MT6357_BM_TOP_INT_CON1 0xc26
+#define MT6357_BM_TOP_INT_CON1_SET 0xc28
+#define MT6357_BM_TOP_INT_CON1_CLR 0xc2a
+#define MT6357_BM_TOP_INT_MASK_CON0 0xc2c
+#define MT6357_BM_TOP_INT_MASK_CON0_SET 0xc2e
+#define MT6357_BM_TOP_INT_MASK_CON0_CLR 0xc30
+#define MT6357_BM_TOP_INT_MASK_CON1 0xc32
+#define MT6357_BM_TOP_INT_MASK_CON1_SET 0xc34
+#define MT6357_BM_TOP_INT_MASK_CON1_CLR 0xc36
+#define MT6357_BM_TOP_INT_STATUS0 0xc38
+#define MT6357_BM_TOP_INT_STATUS1 0xc3a
+#define MT6357_BM_TOP_INT_RAW_STATUS0 0xc3c
+#define MT6357_BM_TOP_INT_RAW_STATUS1 0xc3e
+#define MT6357_BM_TOP_INT_MISC_CON 0xc40
+#define MT6357_BM_TOP_DBG_CON 0xc42
+#define MT6357_BM_TOP_RSV0 0xc44
+#define MT6357_FGADC_ANA_DSN_ID 0xc80
+#define MT6357_FGADC_ANA_DSN_REV0 0xc82
+#define MT6357_FGADC_ANA_DSN_DBI 0xc84
+#define MT6357_FGADC_ANA_DSN_DXI 0xc86
+#define MT6357_FGADC_ANA_CON0 0xc88
+#define MT6357_FGADC_ANA_TEST_CON0 0xc8a
+#define MT6357_FGADC_ANA_ELR_NUM 0xc8c
+#define MT6357_FGADC_ANA_ELR0 0xc8e
+#define MT6357_FGADC_ANA_ELR1 0xc90
+#define MT6357_FGADC0_DSN_ID 0xd00
+#define MT6357_FGADC0_DSN_REV0 0xd02
+#define MT6357_FGADC0_DSN_DBI 0xd04
+#define MT6357_FGADC0_DSN_DXI 0xd06
+#define MT6357_FGADC_CON0 0xd08
+#define MT6357_FGADC_CON1 0xd0a
+#define MT6357_FGADC_CON2 0xd0c
+#define MT6357_FGADC_CON3 0xd0e
+#define MT6357_FGADC_CON4 0xd10
+#define MT6357_FGADC_CAR_CON0 0xd12
+#define MT6357_FGADC_CAR_CON1 0xd14
+#define MT6357_FGADC_CAR_CON2 0xd16
+#define MT6357_FGADC_CARTH_CON0 0xd18
+#define MT6357_FGADC_CARTH_CON1 0xd1a
+#define MT6357_FGADC_CARTH_CON2 0xd1c
+#define MT6357_FGADC_CARTH_CON3 0xd1e
+#define MT6357_FGADC_NTER_CON0 0xd20
+#define MT6357_FGADC_NTER_CON1 0xd22
+#define MT6357_FGADC_NTER_CON2 0xd24
+#define MT6357_FGADC_SON_CON0 0xd26
+#define MT6357_FGADC_SON_CON1 0xd28
+#define MT6357_FGADC_SON_CON2 0xd2a
+#define MT6357_FGADC_SON_CON3 0xd2c
+#define MT6357_FGADC_ZCV_CON0 0xd2e
+#define MT6357_FGADC_ZCV_CON1 0xd30
+#define MT6357_FGADC_ZCV_CON2 0xd32
+#define MT6357_FGADC_ZCV_CON3 0xd34
+#define MT6357_FGADC_ZCV_CON4 0xd36
+#define MT6357_FGADC_ZCVTH_CON0 0xd38
+#define MT6357_FGADC_ZCVTH_CON1 0xd3a
+#define MT6357_FGADC_ZCVTH_CON2 0xd3c
+#define MT6357_FGADC1_DSN_ID 0xd80
+#define MT6357_FGADC1_DSN_REV0 0xd82
+#define MT6357_FGADC1_DSN_DBI 0xd84
+#define MT6357_FGADC1_DSN_DXI 0xd86
+#define MT6357_FGADC_R_CON0 0xd88
+#define MT6357_FGADC_CUR_CON0 0xd8a
+#define MT6357_FGADC_CUR_CON1 0xd8c
+#define MT6357_FGADC_CUR_CON2 0xd8e
+#define MT6357_FGADC_CUR_CON3 0xd90
+#define MT6357_FGADC_OFFSET_CON0 0xd92
+#define MT6357_FGADC_OFFSET_CON1 0xd94
+#define MT6357_FGADC_GAIN_CON0 0xd96
+#define MT6357_FGADC_TEST_CON0 0xd98
+#define MT6357_SYSTEM_INFO_CON0 0xd9a
+#define MT6357_SYSTEM_INFO_CON1 0xd9c
+#define MT6357_SYSTEM_INFO_CON2 0xd9e
+#define MT6357_SYSTEM_INFO_CON3 0xda0
+#define MT6357_SYSTEM_INFO_CON4 0xda2
+#define MT6357_BATON_ANA_DSN_ID 0xe00
+#define MT6357_BATON_ANA_DSN_REV0 0xe02
+#define MT6357_BATON_ANA_DSN_DBI 0xe04
+#define MT6357_BATON_ANA_DSN_DXI 0xe06
+#define MT6357_BATON_ANA_CON0 0xe08
+#define MT6357_BATON_ANA_ELR_NUM 0xe0a
+#define MT6357_BATON_ANA_ELR0 0xe0c
+#define MT6357_HK_TOP_ID 0xf80
+#define MT6357_HK_TOP_REV0 0xf82
+#define MT6357_HK_TOP_DBI 0xf84
+#define MT6357_HK_TOP_DXI 0xf86
+#define MT6357_HK_TPM0 0xf88
+#define MT6357_HK_TPM1 0xf8a
+#define MT6357_HK_TOP_CLK_CON0 0xf8c
+#define MT6357_HK_TOP_CLK_CON1 0xf8e
+#define MT6357_HK_TOP_RST_CON0 0xf90
+#define MT6357_HK_TOP_INT_CON0 0xf92
+#define MT6357_HK_TOP_INT_CON0_SET 0xf94
+#define MT6357_HK_TOP_INT_CON0_CLR 0xf96
+#define MT6357_HK_TOP_INT_MASK_CON0 0xf98
+#define MT6357_HK_TOP_INT_MASK_CON0_SET 0xf9a
+#define MT6357_HK_TOP_INT_MASK_CON0_CLR 0xf9c
+#define MT6357_HK_TOP_INT_STATUS0 0xf9e
+#define MT6357_HK_TOP_INT_RAW_STATUS0 0xfa0
+#define MT6357_HK_TOP_MON_CON0 0xfa2
+#define MT6357_HK_TOP_MON_CON1 0xfa4
+#define MT6357_HK_TOP_MON_CON2 0xfa6
+#define MT6357_AUXADC_DSN_ID 0x1000
+#define MT6357_AUXADC_DSN_REV0 0x1002
+#define MT6357_AUXADC_DSN_DBI 0x1004
+#define MT6357_AUXADC_DSN_DXI 0x1006
+#define MT6357_AUXADC_ANA_CON0 0x1008
+#define MT6357_AUXADC_DIG_1_DSN_ID 0x1080
+#define MT6357_AUXADC_DIG_1_DSN_REV0 0x1082
+#define MT6357_AUXADC_DIG_1_DSN_DBI 0x1084
+#define MT6357_AUXADC_DIG_1_DSN_DXI 0x1086
+#define MT6357_AUXADC_ADC0 0x1088
+#define MT6357_AUXADC_ADC1 0x108a
+#define MT6357_AUXADC_ADC2 0x108c
+#define MT6357_AUXADC_ADC3 0x108e
+#define MT6357_AUXADC_ADC4 0x1090
+#define MT6357_AUXADC_ADC5 0x1092
+#define MT6357_AUXADC_ADC6 0x1094
+#define MT6357_AUXADC_ADC7 0x1096
+#define MT6357_AUXADC_ADC8 0x1098
+#define MT6357_AUXADC_ADC9 0x109a
+#define MT6357_AUXADC_ADC10 0x109c
+#define MT6357_AUXADC_ADC11 0x109e
+#define MT6357_AUXADC_ADC12 0x10a0
+#define MT6357_AUXADC_ADC14 0x10a2
+#define MT6357_AUXADC_ADC16 0x10a4
+#define MT6357_AUXADC_ADC17 0x10a6
+#define MT6357_AUXADC_ADC18 0x10a8
+#define MT6357_AUXADC_ADC19 0x10aa
+#define MT6357_AUXADC_ADC20 0x10ac
+#define MT6357_AUXADC_ADC21 0x10ae
+#define MT6357_AUXADC_ADC22 0x10b0
+#define MT6357_AUXADC_ADC23 0x10b2
+#define MT6357_AUXADC_ADC24 0x10b4
+#define MT6357_AUXADC_ADC25 0x10b6
+#define MT6357_AUXADC_ADC26 0x10b8
+#define MT6357_AUXADC_ADC27 0x10ba
+#define MT6357_AUXADC_ADC29 0x10bc
+#define MT6357_AUXADC_ADC30 0x10be
+#define MT6357_AUXADC_ADC31 0x10c0
+#define MT6357_AUXADC_ADC32 0x10c2
+#define MT6357_AUXADC_ADC33 0x10c4
+#define MT6357_AUXADC_ADC34 0x10c6
+#define MT6357_AUXADC_ADC35 0x10c8
+#define MT6357_AUXADC_ADC36 0x10ca
+#define MT6357_AUXADC_ADC38 0x10cc
+#define MT6357_AUXADC_ADC39 0x10ce
+#define MT6357_AUXADC_ADC40 0x10d0
+#define MT6357_AUXADC_ADC41 0x10d2
+#define MT6357_AUXADC_ADC42 0x10d4
+#define MT6357_AUXADC_ADC43 0x10d6
+#define MT6357_AUXADC_ADC46 0x10d8
+#define MT6357_AUXADC_ADC47 0x10da
+#define MT6357_AUXADC_DIG_1_ELR_NUM 0x10dc
+#define MT6357_AUXADC_DIG_1_ELR0 0x10de
+#define MT6357_AUXADC_DIG_1_ELR1 0x10e0
+#define MT6357_AUXADC_DIG_2_DSN_ID 0x1100
+#define MT6357_AUXADC_DIG_2_DSN_REV0 0x1102
+#define MT6357_AUXADC_DIG_2_DSN_DBI 0x1104
+#define MT6357_AUXADC_DIG_2_DSN_DXI 0x1106
+#define MT6357_AUXADC_STA0 0x1108
+#define MT6357_AUXADC_STA1 0x110a
+#define MT6357_AUXADC_STA2 0x110c
+#define MT6357_AUXADC_RQST0 0x110e
+#define MT6357_AUXADC_RQST0_SET 0x1110
+#define MT6357_AUXADC_RQST0_CLR 0x1112
+#define MT6357_AUXADC_RQST2 0x1114
+#define MT6357_AUXADC_RQST2_SET 0x1116
+#define MT6357_AUXADC_RQST2_CLR 0x1118
+#define MT6357_AUXADC_RQST1 0x111a
+#define MT6357_AUXADC_RQST1_SET 0x111c
+#define MT6357_AUXADC_RQST1_CLR 0x111e
+#define MT6357_AUXADC_CON0 0x1120
+#define MT6357_AUXADC_CON0_SET 0x1122
+#define MT6357_AUXADC_CON0_CLR 0x1124
+#define MT6357_AUXADC_CON1 0x1126
+#define MT6357_AUXADC_CON2 0x1128
+#define MT6357_AUXADC_CON3 0x112a
+#define MT6357_AUXADC_CON4 0x112c
+#define MT6357_AUXADC_CON5 0x112e
+#define MT6357_AUXADC_CON6 0x1130
+#define MT6357_AUXADC_CON7 0x1132
+#define MT6357_AUXADC_CON8 0x1134
+#define MT6357_AUXADC_CON9 0x1136
+#define MT6357_AUXADC_CON10 0x1138
+#define MT6357_AUXADC_CON11 0x113a
+#define MT6357_AUXADC_CON12 0x113c
+#define MT6357_AUXADC_CON13 0x113e
+#define MT6357_AUXADC_CON14 0x1140
+#define MT6357_AUXADC_CON15 0x1142
+#define MT6357_AUXADC_CON16 0x1144
+#define MT6357_AUXADC_CON17 0x1146
+#define MT6357_AUXADC_CON18 0x1148
+#define MT6357_AUXADC_CON19 0x114a
+#define MT6357_AUXADC_CON20 0x114c
+#define MT6357_AUXADC_DIG_3_DSN_ID 0x1180
+#define MT6357_AUXADC_DIG_3_DSN_REV0 0x1182
+#define MT6357_AUXADC_DIG_3_DSN_DBI 0x1184
+#define MT6357_AUXADC_DIG_3_DSN_DXI 0x1186
+#define MT6357_AUXADC_AUTORPT0 0x1188
+#define MT6357_AUXADC_LBAT0 0x118a
+#define MT6357_AUXADC_LBAT1 0x118c
+#define MT6357_AUXADC_LBAT2 0x118e
+#define MT6357_AUXADC_LBAT3 0x1190
+#define MT6357_AUXADC_LBAT4 0x1192
+#define MT6357_AUXADC_LBAT5 0x1194
+#define MT6357_AUXADC_LBAT6 0x1196
+#define MT6357_AUXADC_ACCDET 0x1198
+#define MT6357_AUXADC_DBG0 0x119a
+#define MT6357_AUXADC_IMP0 0x119c
+#define MT6357_AUXADC_IMP1 0x119e
+#define MT6357_AUXADC_DIG_3_ELR_NUM 0x11a0
+#define MT6357_AUXADC_DIG_3_ELR0 0x11a2
+#define MT6357_AUXADC_DIG_3_ELR1 0x11a4
+#define MT6357_AUXADC_DIG_3_ELR2 0x11a6
+#define MT6357_AUXADC_DIG_3_ELR3 0x11a8
+#define MT6357_AUXADC_DIG_3_ELR4 0x11aa
+#define MT6357_AUXADC_DIG_3_ELR5 0x11ac
+#define MT6357_AUXADC_DIG_3_ELR6 0x11ae
+#define MT6357_AUXADC_DIG_3_ELR7 0x11b0
+#define MT6357_AUXADC_DIG_3_ELR8 0x11b2
+#define MT6357_AUXADC_DIG_3_ELR9 0x11b4
+#define MT6357_AUXADC_DIG_3_ELR10 0x11b6
+#define MT6357_AUXADC_DIG_3_ELR11 0x11b8
+#define MT6357_AUXADC_DIG_4_DSN_ID 0x1200
+#define MT6357_AUXADC_DIG_4_DSN_REV0 0x1202
+#define MT6357_AUXADC_DIG_4_DSN_DBI 0x1204
+#define MT6357_AUXADC_DIG_4_DSN_DXI 0x1206
+#define MT6357_AUXADC_MDRT_0 0x1208
+#define MT6357_AUXADC_MDRT_1 0x120a
+#define MT6357_AUXADC_MDRT_2 0x120c
+#define MT6357_AUXADC_MDRT_3 0x120e
+#define MT6357_AUXADC_MDRT_4 0x1210
+#define MT6357_AUXADC_DCXO_MDRT_0 0x1212
+#define MT6357_AUXADC_DCXO_MDRT_1 0x1214
+#define MT6357_AUXADC_DCXO_MDRT_2 0x1216
+#define MT6357_AUXADC_NAG_0 0x1218
+#define MT6357_AUXADC_NAG_1 0x121a
+#define MT6357_AUXADC_NAG_2 0x121c
+#define MT6357_AUXADC_NAG_3 0x121e
+#define MT6357_AUXADC_NAG_4 0x1220
+#define MT6357_AUXADC_NAG_5 0x1222
+#define MT6357_AUXADC_NAG_6 0x1224
+#define MT6357_AUXADC_NAG_7 0x1226
+#define MT6357_AUXADC_NAG_8 0x1228
+#define MT6357_AUXADC_RSV_1 0x122a
+#define MT6357_AUXADC_ANA_0 0x122c
+#define MT6357_AUXADC_IMP_CG0 0x122e
+#define MT6357_AUXADC_LBAT_CG0 0x1230
+#define MT6357_AUXADC_NAG_CG0 0x1232
+#define MT6357_AUXADC_PRI_NEW 0x1234
+#define MT6357_AUXADC_CHR_TOP_CON2 0x1236
+#define MT6357_BUCK_TOP_DSN_ID 0x1400
+#define MT6357_BUCK_TOP_DSN_REV0 0x1402
+#define MT6357_BUCK_TOP_DBI 0x1404
+#define MT6357_BUCK_TOP_DXI 0x1406
+#define MT6357_BUCK_TOP_PAM0 0x1408
+#define MT6357_BUCK_TOP_PAM1 0x140a
+#define MT6357_BUCK_TOP_CLK_CON0 0x140c
+#define MT6357_BUCK_TOP_CLK_CON0_SET 0x140e
+#define MT6357_BUCK_TOP_CLK_CON0_CLR 0x1410
+#define MT6357_BUCK_TOP_CLK_HWEN_CON0 0x1412
+#define MT6357_BUCK_TOP_CLK_HWEN_CON0_SET 0x1414
+#define MT6357_BUCK_TOP_CLK_HWEN_CON0_CLR 0x1416
+#define MT6357_BUCK_TOP_CLK_MISC_CON0 0x1418
+#define MT6357_BUCK_TOP_INT_CON0 0x141a
+#define MT6357_BUCK_TOP_INT_CON0_SET 0x141c
+#define MT6357_BUCK_TOP_INT_CON0_CLR 0x141e
+#define MT6357_BUCK_TOP_INT_MASK_CON0 0x1420
+#define MT6357_BUCK_TOP_INT_MASK_CON0_SET 0x1422
+#define MT6357_BUCK_TOP_INT_MASK_CON0_CLR 0x1424
+#define MT6357_BUCK_TOP_INT_STATUS0 0x1426
+#define MT6357_BUCK_TOP_INT_RAW_STATUS0 0x1428
+#define MT6357_BUCK_TOP_STB_CON 0x142a
+#define MT6357_BUCK_TOP_SLP_CON0 0x142c
+#define MT6357_BUCK_TOP_SLP_CON1 0x142e
+#define MT6357_BUCK_TOP_SLP_CON2 0x1430
+#define MT6357_BUCK_TOP_MINFREQ_CON 0x1432
+#define MT6357_BUCK_TOP_OC_CON0 0x1434
+#define MT6357_BUCK_TOP_K_CON0 0x1436
+#define MT6357_BUCK_TOP_K_CON1 0x1438
+#define MT6357_BUCK_TOP_K_CON2 0x143a
+#define MT6357_BUCK_TOP_WDTDBG0 0x143c
+#define MT6357_BUCK_TOP_WDTDBG1 0x143e
+#define MT6357_BUCK_TOP_WDTDBG2 0x1440
+#define MT6357_BUCK_TOP_ELR_NUM 0x1442
+#define MT6357_BUCK_TOP_ELR0 0x1444
+#define MT6357_BUCK_TOP_ELR1 0x1446
+#define MT6357_BUCK_VPROC_DSN_ID 0x1480
+#define MT6357_BUCK_VPROC_DSN_REV0 0x1482
+#define MT6357_BUCK_VPROC_DSN_DBI 0x1484
+#define MT6357_BUCK_VPROC_DSN_DXI 0x1486
+#define MT6357_BUCK_VPROC_CON0 0x1488
+#define MT6357_BUCK_VPROC_CON1 0x148a
+#define MT6357_BUCK_VPROC_CFG0 0x148c
+#define MT6357_BUCK_VPROC_CFG1 0x148e
+#define MT6357_BUCK_VPROC_OP_EN 0x1490
+#define MT6357_BUCK_VPROC_OP_EN_SET 0x1492
+#define MT6357_BUCK_VPROC_OP_EN_CLR 0x1494
+#define MT6357_BUCK_VPROC_OP_CFG 0x1496
+#define MT6357_BUCK_VPROC_OP_CFG_SET 0x1498
+#define MT6357_BUCK_VPROC_OP_CFG_CLR 0x149a
+#define MT6357_BUCK_VPROC_SP_CON 0x149c
+#define MT6357_BUCK_VPROC_SP_CFG 0x149e
+#define MT6357_BUCK_VPROC_OC_CFG 0x14a0
+#define MT6357_BUCK_VPROC_DBG0 0x14a2
+#define MT6357_BUCK_VPROC_DBG1 0x14a4
+#define MT6357_BUCK_VPROC_DBG2 0x14a6
+#define MT6357_BUCK_VPROC_ELR_NUM 0x14a8
+#define MT6357_BUCK_VPROC_ELR0 0x14aa
+#define MT6357_BUCK_VCORE_DSN_ID 0x1500
+#define MT6357_BUCK_VCORE_DSN_REV0 0x1502
+#define MT6357_BUCK_VCORE_DSN_DBI 0x1504
+#define MT6357_BUCK_VCORE_DSN_DXI 0x1506
+#define MT6357_BUCK_VCORE_CON0 0x1508
+#define MT6357_BUCK_VCORE_CON1 0x150a
+#define MT6357_BUCK_VCORE_CFG0 0x150c
+#define MT6357_BUCK_VCORE_CFG1 0x150e
+#define MT6357_BUCK_VCORE_OP_EN 0x1510
+#define MT6357_BUCK_VCORE_OP_EN_SET 0x1512
+#define MT6357_BUCK_VCORE_OP_EN_CLR 0x1514
+#define MT6357_BUCK_VCORE_OP_CFG 0x1516
+#define MT6357_BUCK_VCORE_OP_CFG_SET 0x1518
+#define MT6357_BUCK_VCORE_OP_CFG_CLR 0x151a
+#define MT6357_BUCK_VCORE_SP_CON 0x151c
+#define MT6357_BUCK_VCORE_SP_CFG 0x151e
+#define MT6357_BUCK_VCORE_OC_CFG 0x1520
+#define MT6357_BUCK_VCORE_DBG0 0x1522
+#define MT6357_BUCK_VCORE_DBG1 0x1524
+#define MT6357_BUCK_VCORE_DBG2 0x1526
+#define MT6357_BUCK_VCORE_ELR_NUM 0x1528
+#define MT6357_BUCK_VCORE_ELR0 0x152a
+#define MT6357_BUCK_VMODEM_DSN_ID 0x1580
+#define MT6357_BUCK_VMODEM_DSN_REV0 0x1582
+#define MT6357_BUCK_VMODEM_DSN_DBI 0x1584
+#define MT6357_BUCK_VMODEM_DSN_DXI 0x1586
+#define MT6357_BUCK_VMODEM_CON0 0x1588
+#define MT6357_BUCK_VMODEM_CON1 0x158a
+#define MT6357_BUCK_VMODEM_CFG0 0x158c
+#define MT6357_BUCK_VMODEM_CFG1 0x158e
+#define MT6357_BUCK_VMODEM_OP_EN 0x1590
+#define MT6357_BUCK_VMODEM_OP_EN_SET 0x1592
+#define MT6357_BUCK_VMODEM_OP_EN_CLR 0x1594
+#define MT6357_BUCK_VMODEM_OP_CFG 0x1596
+#define MT6357_BUCK_VMODEM_OP_CFG_SET 0x1598
+#define MT6357_BUCK_VMODEM_OP_CFG_CLR 0x159a
+#define MT6357_BUCK_VMODEM_SP_CON 0x159c
+#define MT6357_BUCK_VMODEM_SP_CFG 0x159e
+#define MT6357_BUCK_VMODEM_OC_CFG 0x15a0
+#define MT6357_BUCK_VMODEM_DBG0 0x15a2
+#define MT6357_BUCK_VMODEM_DBG1 0x15a4
+#define MT6357_BUCK_VMODEM_DBG2 0x15a6
+#define MT6357_BUCK_VMODEM_ELR_NUM 0x15a8
+#define MT6357_BUCK_VMODEM_ELR0 0x15aa
+#define MT6357_BUCK_VS1_DSN_ID 0x1600
+#define MT6357_BUCK_VS1_DSN_REV0 0x1602
+#define MT6357_BUCK_VS1_DSN_DBI 0x1604
+#define MT6357_BUCK_VS1_DSN_DXI 0x1606
+#define MT6357_BUCK_VS1_CON0 0x1608
+#define MT6357_BUCK_VS1_CON1 0x160a
+#define MT6357_BUCK_VS1_CFG0 0x160c
+#define MT6357_BUCK_VS1_CFG1 0x160e
+#define MT6357_BUCK_VS1_OP_EN 0x1610
+#define MT6357_BUCK_VS1_OP_EN_SET 0x1612
+#define MT6357_BUCK_VS1_OP_EN_CLR 0x1614
+#define MT6357_BUCK_VS1_OP_CFG 0x1616
+#define MT6357_BUCK_VS1_OP_CFG_SET 0x1618
+#define MT6357_BUCK_VS1_OP_CFG_CLR 0x161a
+#define MT6357_BUCK_VS1_SP_CON 0x161c
+#define MT6357_BUCK_VS1_SP_CFG 0x161e
+#define MT6357_BUCK_VS1_OC_CFG 0x1620
+#define MT6357_BUCK_VS1_DBG0 0x1622
+#define MT6357_BUCK_VS1_DBG1 0x1624
+#define MT6357_BUCK_VS1_DBG2 0x1626
+#define MT6357_BUCK_VS1_VOTER 0x1628
+#define MT6357_BUCK_VS1_VOTER_SET 0x162a
+#define MT6357_BUCK_VS1_VOTER_CLR 0x162c
+#define MT6357_BUCK_VS1_VOTER_CFG 0x162e
+#define MT6357_BUCK_VS1_ELR_NUM 0x1630
+#define MT6357_BUCK_VS1_ELR0 0x1632
+#define MT6357_BUCK_VPA_DSN_ID 0x1680
+#define MT6357_BUCK_VPA_DSN_REV0 0x1682
+#define MT6357_BUCK_VPA_DSN_DBI 0x1684
+#define MT6357_BUCK_VPA_DSN_DXI 0x1686
+#define MT6357_BUCK_VPA_CON0 0x1688
+#define MT6357_BUCK_VPA_CON1 0x168a
+#define MT6357_BUCK_VPA_CFG0 0x168c
+#define MT6357_BUCK_VPA_CFG1 0x168e
+#define MT6357_BUCK_VPA_OC_CFG 0x1690
+#define MT6357_BUCK_VPA_DBG0 0x1692
+#define MT6357_BUCK_VPA_DBG1 0x1694
+#define MT6357_BUCK_VPA_DBG2 0x1696
+#define MT6357_BUCK_VPA_DLC_CON0 0x1698
+#define MT6357_BUCK_VPA_DLC_CON1 0x169a
+#define MT6357_BUCK_VPA_DLC_CON2 0x169c
+#define MT6357_BUCK_VPA_MSFG_CON0 0x169e
+#define MT6357_BUCK_VPA_MSFG_CON1 0x16a0
+#define MT6357_BUCK_VPA_MSFG_RRATE0 0x16a2
+#define MT6357_BUCK_VPA_MSFG_RRATE1 0x16a4
+#define MT6357_BUCK_VPA_MSFG_RRATE2 0x16a6
+#define MT6357_BUCK_VPA_MSFG_RTHD0 0x16a8
+#define MT6357_BUCK_VPA_MSFG_RTHD1 0x16aa
+#define MT6357_BUCK_VPA_MSFG_RTHD2 0x16ac
+#define MT6357_BUCK_VPA_MSFG_FRATE0 0x16ae
+#define MT6357_BUCK_VPA_MSFG_FRATE1 0x16b0
+#define MT6357_BUCK_VPA_MSFG_FRATE2 0x16b2
+#define MT6357_BUCK_VPA_MSFG_FTHD0 0x16b4
+#define MT6357_BUCK_VPA_MSFG_FTHD1 0x16b6
+#define MT6357_BUCK_VPA_MSFG_FTHD2 0x16b8
+#define MT6357_BUCK_ANA_DSN_ID 0x1700
+#define MT6357_BUCK_ANA_DSN_REV0 0x1702
+#define MT6357_BUCK_ANA_DSN_DBI 0x1704
+#define MT6357_BUCK_ANA_DSN_FPI 0x1706
+#define MT6357_SMPS_ANA_CON0 0x1708
+#define MT6357_SMPS_ANA_CON1 0x170a
+#define MT6357_SMPS_ANA_CON2 0x170c
+#define MT6357_VCORE_VPROC_ANA_CON0 0x170e
+#define MT6357_VCORE_VPROC_ANA_CON1 0x1710
+#define MT6357_VCORE_VPROC_ANA_CON2 0x1712
+#define MT6357_VCORE_VPROC_ANA_CON3 0x1714
+#define MT6357_VCORE_VPROC_ANA_CON4 0x1716
+#define MT6357_VCORE_VPROC_ANA_CON5 0x1718
+#define MT6357_VCORE_VPROC_ANA_CON6 0x171a
+#define MT6357_VCORE_VPROC_ANA_CON7 0x171c
+#define MT6357_VCORE_VPROC_ANA_CON8 0x171e
+#define MT6357_VCORE_VPROC_ANA_CON9 0x1720
+#define MT6357_VCORE_VPROC_ANA_CON10 0x1722
+#define MT6357_VCORE_VPROC_ANA_CON11 0x1724
+#define MT6357_VMODEM_ANA_CON0 0x1726
+#define MT6357_VMODEM_ANA_CON1 0x1728
+#define MT6357_VMODEM_ANA_CON2 0x172a
+#define MT6357_VMODEM_ANA_CON3 0x172c
+#define MT6357_VMODEM_ANA_CON4 0x172e
+#define MT6357_VMODEM_ANA_CON5 0x1730
+#define MT6357_VS1_ANA_CON0 0x1732
+#define MT6357_VS1_ANA_CON1 0x1734
+#define MT6357_VS1_ANA_CON2 0x1736
+#define MT6357_VS1_ANA_CON3 0x1738
+#define MT6357_VS1_ANA_CON4 0x173a
+#define MT6357_VS1_ANA_CON5 0x173c
+#define MT6357_VPA_ANA_CON0 0x173e
+#define MT6357_VPA_ANA_CON1 0x1740
+#define MT6357_VPA_ANA_CON2 0x1742
+#define MT6357_VPA_ANA_CON3 0x1744
+#define MT6357_VPA_ANA_CON4 0x1746
+#define MT6357_VPA_ANA_CON5 0x1748
+#define MT6357_BUCK_ANA_ELR_NUM 0x174a
+#define MT6357_SMPS_ELR_0 0x174c
+#define MT6357_SMPS_ELR_1 0x174e
+#define MT6357_SMPS_ELR_2 0x1750
+#define MT6357_SMPS_ELR_3 0x1752
+#define MT6357_SMPS_ELR_4 0x1754
+#define MT6357_SMPS_ELR_5 0x1756
+#define MT6357_VCORE_VPROC_ELR_0 0x1758
+#define MT6357_VCORE_VPROC_ELR_1 0x175a
+#define MT6357_VCORE_VPROC_ELR_2 0x175c
+#define MT6357_VCORE_VPROC_ELR_3 0x175e
+#define MT6357_VCORE_VPROC_ELR_4 0x1760
+#define MT6357_VMODEM_ELR_0 0x1762
+#define MT6357_VMODEM_ELR_1 0x1764
+#define MT6357_VMODEM_ELR_2 0x1766
+#define MT6357_VS1_ELR_0 0x1768
+#define MT6357_VS1_ELR_1 0x176a
+#define MT6357_VPA_ELR_0 0x176c
+#define MT6357_LDO_TOP_ID 0x1880
+#define MT6357_LDO_TOP_REV0 0x1882
+#define MT6357_LDO_TOP_DBI 0x1884
+#define MT6357_LDO_TOP_DXI 0x1886
+#define MT6357_LDO_TPM0 0x1888
+#define MT6357_LDO_TPM1 0x188a
+#define MT6357_LDO_TOP_CLK_DCM_CON0 0x188c
+#define MT6357_LDO_TOP_CLK_VIO28_CON0 0x188e
+#define MT6357_LDO_TOP_CLK_VIO18_CON0 0x1890
+#define MT6357_LDO_TOP_CLK_VAUD28_CON0 0x1892
+#define MT6357_LDO_TOP_CLK_VDRAM_CON0 0x1894
+#define MT6357_LDO_TOP_CLK_VSRAM_PROC_CON0 0x1896
+#define MT6357_LDO_TOP_CLK_VSRAM_OTHERS_CON0 0x1898
+#define MT6357_LDO_TOP_CLK_VAUX18_CON0 0x189a
+#define MT6357_LDO_TOP_CLK_VUSB33_CON0 0x189c
+#define MT6357_LDO_TOP_CLK_VEMC_CON0 0x189e
+#define MT6357_LDO_TOP_CLK_VXO22_CON0 0x18a0
+#define MT6357_LDO_TOP_CLK_VSIM1_CON0 0x18a2
+#define MT6357_LDO_TOP_CLK_VSIM2_CON0 0x18a4
+#define MT6357_LDO_TOP_CLK_VCAMD_CON0 0x18a6
+#define MT6357_LDO_TOP_CLK_VCAMIO_CON0 0x18a8
+#define MT6357_LDO_TOP_CLK_VEFUSE_CON0 0x18aa
+#define MT6357_LDO_TOP_CLK_VCN33_CON0 0x18ac
+#define MT6357_LDO_TOP_CLK_VCN18_CON0 0x18ae
+#define MT6357_LDO_TOP_CLK_VCN28_CON0 0x18b0
+#define MT6357_LDO_TOP_CLK_VIBR_CON0 0x18b2
+#define MT6357_LDO_TOP_CLK_VFE28_CON0 0x18b4
+#define MT6357_LDO_TOP_CLK_VMCH_CON0 0x18b6
+#define MT6357_LDO_TOP_CLK_VMC_CON0 0x18b8
+#define MT6357_LDO_TOP_CLK_VRF18_CON0 0x18ba
+#define MT6357_LDO_TOP_CLK_VLDO28_CON0 0x18bc
+#define MT6357_LDO_TOP_CLK_VRF12_CON0 0x18be
+#define MT6357_LDO_TOP_CLK_VCAMA_CON0 0x18c0
+#define MT6357_LDO_TOP_CLK_TREF_CON0 0x18c2
+#define MT6357_LDO_TOP_INT_CON0 0x18c4
+#define MT6357_LDO_TOP_INT_CON0_SET 0x18c6
+#define MT6357_LDO_TOP_INT_CON0_CLR 0x18c8
+#define MT6357_LDO_TOP_INT_CON1 0x18ca
+#define MT6357_LDO_TOP_INT_CON1_SET 0x18cc
+#define MT6357_LDO_TOP_INT_CON1_CLR 0x18ce
+#define MT6357_LDO_TOP_INT_MASK_CON0 0x18d0
+#define MT6357_LDO_TOP_INT_MASK_CON0_SET 0x18d2
+#define MT6357_LDO_TOP_INT_MASK_CON0_CLR 0x18d4
+#define MT6357_LDO_TOP_INT_MASK_CON1 0x18d6
+#define MT6357_LDO_TOP_INT_MASK_CON1_SET 0x18d8
+#define MT6357_LDO_TOP_INT_MASK_CON1_CLR 0x18da
+#define MT6357_LDO_TOP_INT_STATUS0 0x18dc
+#define MT6357_LDO_TOP_INT_STATUS1 0x18de
+#define MT6357_LDO_TOP_INT_RAW_STATUS0 0x18e0
+#define MT6357_LDO_TOP_INT_RAW_STATUS1 0x18e2
+#define MT6357_LDO_TEST_CON0 0x18e4
+#define MT6357_LDO_TOP_WDT_CON0 0x18e6
+#define MT6357_LDO_TOP_RSV_CON0 0x18e8
+#define MT6357_LDO_TOP_RSV_CON1 0x18ea
+#define MT6357_LDO_OCFB0 0x18ec
+#define MT6357_LDO_LP_PROTECTION 0x18ee
+#define MT6357_LDO_DUMMY_LOAD_GATED 0x18f0
+#define MT6357_LDO_GON0_DSN_ID 0x1900
+#define MT6357_LDO_GON0_DSN_REV0 0x1902
+#define MT6357_LDO_GON0_DSN_DBI 0x1904
+#define MT6357_LDO_GON0_DSN_DXI 0x1906
+#define MT6357_LDO_VXO22_CON0 0x1908
+#define MT6357_LDO_VXO22_OP_EN 0x190a
+#define MT6357_LDO_VXO22_OP_EN_SET 0x190c
+#define MT6357_LDO_VXO22_OP_EN_CLR 0x190e
+#define MT6357_LDO_VXO22_OP_CFG 0x1910
+#define MT6357_LDO_VXO22_OP_CFG_SET 0x1912
+#define MT6357_LDO_VXO22_OP_CFG_CLR 0x1914
+#define MT6357_LDO_VXO22_CON1 0x1916
+#define MT6357_LDO_VXO22_CON2 0x1918
+#define MT6357_LDO_VXO22_CON3 0x191a
+#define MT6357_LDO_VAUX18_CON0 0x191c
+#define MT6357_LDO_VAUX18_OP_EN 0x191e
+#define MT6357_LDO_VAUX18_OP_EN_SET 0x1920
+#define MT6357_LDO_VAUX18_OP_EN_CLR 0x1922
+#define MT6357_LDO_VAUX18_OP_CFG 0x1924
+#define MT6357_LDO_VAUX18_OP_CFG_SET 0x1926
+#define MT6357_LDO_VAUX18_OP_CFG_CLR 0x1928
+#define MT6357_LDO_VAUX18_CON1 0x192a
+#define MT6357_LDO_VAUX18_CON2 0x192c
+#define MT6357_LDO_VAUX18_CON3 0x192e
+#define MT6357_LDO_VAUD28_CON0 0x1930
+#define MT6357_LDO_VAUD28_OP_EN 0x1932
+#define MT6357_LDO_VAUD28_OP_EN_SET 0x1934
+#define MT6357_LDO_VAUD28_OP_EN_CLR 0x1936
+#define MT6357_LDO_VAUD28_OP_CFG 0x1938
+#define MT6357_LDO_VAUD28_OP_CFG_SET 0x193a
+#define MT6357_LDO_VAUD28_OP_CFG_CLR 0x193c
+#define MT6357_LDO_VAUD28_CON1 0x193e
+#define MT6357_LDO_VAUD28_CON2 0x1940
+#define MT6357_LDO_VAUD28_CON3 0x1942
+#define MT6357_LDO_VIO28_CON0 0x1944
+#define MT6357_LDO_VIO28_OP_EN 0x1946
+#define MT6357_LDO_VIO28_OP_EN_SET 0x1948
+#define MT6357_LDO_VIO28_OP_EN_CLR 0x194a
+#define MT6357_LDO_VIO28_OP_CFG 0x194c
+#define MT6357_LDO_VIO28_OP_CFG_SET 0x194e
+#define MT6357_LDO_VIO28_OP_CFG_CLR 0x1950
+#define MT6357_LDO_VIO28_CON1 0x1952
+#define MT6357_LDO_VIO28_CON2 0x1954
+#define MT6357_LDO_VIO28_CON3 0x1956
+#define MT6357_LDO_VIO18_CON0 0x1958
+#define MT6357_LDO_VIO18_OP_EN 0x195a
+#define MT6357_LDO_VIO18_OP_EN_SET 0x195c
+#define MT6357_LDO_VIO18_OP_EN_CLR 0x195e
+#define MT6357_LDO_VIO18_OP_CFG 0x1960
+#define MT6357_LDO_VIO18_OP_CFG_SET 0x1962
+#define MT6357_LDO_VIO18_OP_CFG_CLR 0x1964
+#define MT6357_LDO_VIO18_CON1 0x1966
+#define MT6357_LDO_VIO18_CON2 0x1968
+#define MT6357_LDO_VIO18_CON3 0x196a
+#define MT6357_LDO_VDRAM_CON0 0x196c
+#define MT6357_LDO_VDRAM_OP_EN 0x196e
+#define MT6357_LDO_VDRAM_OP_EN_SET 0x1970
+#define MT6357_LDO_VDRAM_OP_EN_CLR 0x1972
+#define MT6357_LDO_VDRAM_OP_CFG 0x1974
+#define MT6357_LDO_VDRAM_OP_CFG_SET 0x1976
+#define MT6357_LDO_VDRAM_OP_CFG_CLR 0x1978
+#define MT6357_LDO_VDRAM_CON1 0x197a
+#define MT6357_LDO_VDRAM_CON2 0x197c
+#define MT6357_LDO_VDRAM_CON3 0x197e
+#define MT6357_LDO_GON1_DSN_ID 0x1980
+#define MT6357_LDO_GON1_DSN_REV0 0x1982
+#define MT6357_LDO_GON1_DSN_DBI 0x1984
+#define MT6357_LDO_GON1_DSN_DXI 0x1986
+#define MT6357_LDO_VEMC_CON0 0x1988
+#define MT6357_LDO_VEMC_OP_EN 0x198a
+#define MT6357_LDO_VEMC_OP_EN_SET 0x198c
+#define MT6357_LDO_VEMC_OP_EN_CLR 0x198e
+#define MT6357_LDO_VEMC_OP_CFG 0x1990
+#define MT6357_LDO_VEMC_OP_CFG_SET 0x1992
+#define MT6357_LDO_VEMC_OP_CFG_CLR 0x1994
+#define MT6357_LDO_VEMC_CON1 0x1996
+#define MT6357_LDO_VEMC_CON2 0x1998
+#define MT6357_LDO_VEMC_CON3 0x199a
+#define MT6357_LDO_VUSB33_CON0_0 0x199c
+#define MT6357_LDO_VUSB33_OP_EN 0x199e
+#define MT6357_LDO_VUSB33_OP_EN_SET 0x19a0
+#define MT6357_LDO_VUSB33_OP_EN_CLR 0x19a2
+#define MT6357_LDO_VUSB33_OP_CFG 0x19a4
+#define MT6357_LDO_VUSB33_OP_CFG_SET 0x19a6
+#define MT6357_LDO_VUSB33_OP_CFG_CLR 0x19a8
+#define MT6357_LDO_VUSB33_CON0_1 0x19aa
+#define MT6357_LDO_VUSB33_CON1 0x19ac
+#define MT6357_LDO_VUSB33_CON2 0x19ae
+#define MT6357_LDO_VUSB33_CON3 0x19b0
+#define MT6357_LDO_VSRAM_PROC_CON0 0x19b2
+#define MT6357_LDO_VSRAM_PROC_CON2 0x19b4
+#define MT6357_LDO_VSRAM_PROC_CFG0 0x19b6
+#define MT6357_LDO_VSRAM_PROC_CFG1 0x19b8
+#define MT6357_LDO_VSRAM_PROC_OP_EN 0x19ba
+#define MT6357_LDO_VSRAM_PROC_OP_EN_SET 0x19bc
+#define MT6357_LDO_VSRAM_PROC_OP_EN_CLR 0x19be
+#define MT6357_LDO_VSRAM_PROC_OP_CFG 0x19c0
+#define MT6357_LDO_VSRAM_PROC_OP_CFG_SET 0x19c2
+#define MT6357_LDO_VSRAM_PROC_OP_CFG_CLR 0x19c4
+#define MT6357_LDO_VSRAM_PROC_CON3 0x19c6
+#define MT6357_LDO_VSRAM_PROC_CON4 0x19c8
+#define MT6357_LDO_VSRAM_PROC_CON5 0x19ca
+#define MT6357_LDO_VSRAM_PROC_DBG0 0x19cc
+#define MT6357_LDO_VSRAM_PROC_DBG1 0x19ce
+#define MT6357_LDO_VSRAM_OTHERS_CON0 0x19d0
+#define MT6357_LDO_VSRAM_OTHERS_CON2 0x19d2
+#define MT6357_LDO_VSRAM_OTHERS_CFG0 0x19d4
+#define MT6357_LDO_VSRAM_OTHERS_CFG1 0x19d6
+#define MT6357_LDO_VSRAM_OTHERS_OP_EN 0x19d8
+#define MT6357_LDO_VSRAM_OTHERS_OP_EN_SET 0x19da
+#define MT6357_LDO_VSRAM_OTHERS_OP_EN_CLR 0x19dc
+#define MT6357_LDO_VSRAM_OTHERS_OP_CFG 0x19de
+#define MT6357_LDO_VSRAM_OTHERS_OP_CFG_SET 0x19e0
+#define MT6357_LDO_VSRAM_OTHERS_OP_CFG_CLR 0x19e2
+#define MT6357_LDO_VSRAM_OTHERS_CON3 0x19e4
+#define MT6357_LDO_VSRAM_OTHERS_CON4 0x19e6
+#define MT6357_LDO_VSRAM_OTHERS_CON5 0x19e8
+#define MT6357_LDO_VSRAM_OTHERS_DBG0 0x19ea
+#define MT6357_LDO_VSRAM_OTHERS_DBG1 0x19ec
+#define MT6357_LDO_VSRAM_PROC_SP 0x19ee
+#define MT6357_LDO_VSRAM_OTHERS_SP 0x19f0
+#define MT6357_LDO_VSRAM_PROC_R2R_PDN_DIS 0x19f2
+#define MT6357_LDO_VSRAM_OTHERS_R2R_PDN_DIS 0x19f4
+#define MT6357_LDO_VSRAM_WDT_DBG0 0x19f6
+#define MT6357_LDO_GON1_ELR_NUM 0x19f8
+#define MT6357_LDO_VSRAM_CON0 0x19fa
+#define MT6357_LDO_VSRAM_CON1 0x19fc
+#define MT6357_LDO_VSRAM_CON2 0x19fe
+#define MT6357_LDO_GOFF0_DSN_ID 0x1a00
+#define MT6357_LDO_GOFF0_DSN_REV0 0x1a02
+#define MT6357_LDO_GOFF0_DSN_DBI 0x1a04
+#define MT6357_LDO_GOFF0_DSN_DXI 0x1a06
+#define MT6357_LDO_VFE28_CON0 0x1a08
+#define MT6357_LDO_VFE28_OP_EN 0x1a0a
+#define MT6357_LDO_VFE28_OP_EN_SET 0x1a0c
+#define MT6357_LDO_VFE28_OP_EN_CLR 0x1a0e
+#define MT6357_LDO_VFE28_OP_CFG 0x1a10
+#define MT6357_LDO_VFE28_OP_CFG_SET 0x1a12
+#define MT6357_LDO_VFE28_OP_CFG_CLR 0x1a14
+#define MT6357_LDO_VFE28_CON1 0x1a16
+#define MT6357_LDO_VFE28_CON2 0x1a18
+#define MT6357_LDO_VFE28_CON3 0x1a1a
+#define MT6357_LDO_VRF18_CON0 0x1a1c
+#define MT6357_LDO_VRF18_OP_EN 0x1a1e
+#define MT6357_LDO_VRF18_OP_EN_SET 0x1a20
+#define MT6357_LDO_VRF18_OP_EN_CLR 0x1a22
+#define MT6357_LDO_VRF18_OP_CFG 0x1a24
+#define MT6357_LDO_VRF18_OP_CFG_SET 0x1a26
+#define MT6357_LDO_VRF18_OP_CFG_CLR 0x1a28
+#define MT6357_LDO_VRF18_CON1 0x1a2a
+#define MT6357_LDO_VRF18_CON2 0x1a2c
+#define MT6357_LDO_VRF18_CON3 0x1a2e
+#define MT6357_LDO_VRF12_CON0 0x1a30
+#define MT6357_LDO_VRF12_OP_EN 0x1a32
+#define MT6357_LDO_VRF12_OP_EN_SET 0x1a34
+#define MT6357_LDO_VRF12_OP_EN_CLR 0x1a36
+#define MT6357_LDO_VRF12_OP_CFG 0x1a38
+#define MT6357_LDO_VRF12_OP_CFG_SET 0x1a3a
+#define MT6357_LDO_VRF12_OP_CFG_CLR 0x1a3c
+#define MT6357_LDO_VRF12_CON1 0x1a3e
+#define MT6357_LDO_VRF12_CON2 0x1a40
+#define MT6357_LDO_VRF12_CON3 0x1a42
+#define MT6357_LDO_VEFUSE_CON0 0x1a44
+#define MT6357_LDO_VEFUSE_OP_EN 0x1a46
+#define MT6357_LDO_VEFUSE_OP_EN_SET 0x1a48
+#define MT6357_LDO_VEFUSE_OP_EN_CLR 0x1a4a
+#define MT6357_LDO_VEFUSE_OP_CFG 0x1a4c
+#define MT6357_LDO_VEFUSE_OP_CFG_SET 0x1a4e
+#define MT6357_LDO_VEFUSE_OP_CFG_CLR 0x1a50
+#define MT6357_LDO_VEFUSE_CON1 0x1a52
+#define MT6357_LDO_VEFUSE_CON2 0x1a54
+#define MT6357_LDO_VEFUSE_CON3 0x1a56
+#define MT6357_LDO_VCN18_CON0 0x1a58
+#define MT6357_LDO_VCN18_OP_EN 0x1a5a
+#define MT6357_LDO_VCN18_OP_EN_SET 0x1a5c
+#define MT6357_LDO_VCN18_OP_EN_CLR 0x1a5e
+#define MT6357_LDO_VCN18_OP_CFG 0x1a60
+#define MT6357_LDO_VCN18_OP_CFG_SET 0x1a62
+#define MT6357_LDO_VCN18_OP_CFG_CLR 0x1a64
+#define MT6357_LDO_VCN18_CON1 0x1a66
+#define MT6357_LDO_VCN18_CON2 0x1a68
+#define MT6357_LDO_VCN18_CON3 0x1a6a
+#define MT6357_LDO_VCAMA_CON0 0x1a6c
+#define MT6357_LDO_VCAMA_OP_EN 0x1a6e
+#define MT6357_LDO_VCAMA_OP_EN_SET 0x1a70
+#define MT6357_LDO_VCAMA_OP_EN_CLR 0x1a72
+#define MT6357_LDO_VCAMA_OP_CFG 0x1a74
+#define MT6357_LDO_VCAMA_OP_CFG_SET 0x1a76
+#define MT6357_LDO_VCAMA_OP_CFG_CLR 0x1a78
+#define MT6357_LDO_VCAMA_CON1 0x1a7a
+#define MT6357_LDO_VCAMA_CON2 0x1a7c
+#define MT6357_LDO_VCAMA_CON3 0x1a7e
+#define MT6357_LDO_GOFF1_DSN_ID 0x1a80
+#define MT6357_LDO_GOFF1_DSN_REV0 0x1a82
+#define MT6357_LDO_GOFF1_DSN_DBI 0x1a84
+#define MT6357_LDO_GOFF1_DSN_DXI 0x1a86
+#define MT6357_LDO_VCAMD_CON0 0x1a88
+#define MT6357_LDO_VCAMD_OP_EN 0x1a8a
+#define MT6357_LDO_VCAMD_OP_EN_SET 0x1a8c
+#define MT6357_LDO_VCAMD_OP_EN_CLR 0x1a8e
+#define MT6357_LDO_VCAMD_OP_CFG 0x1a90
+#define MT6357_LDO_VCAMD_OP_CFG_SET 0x1a92
+#define MT6357_LDO_VCAMD_OP_CFG_CLR 0x1a94
+#define MT6357_LDO_VCAMD_CON1 0x1a96
+#define MT6357_LDO_VCAMD_CON2 0x1a98
+#define MT6357_LDO_VCAMD_CON3 0x1a9a
+#define MT6357_LDO_VCAMIO_CON0 0x1a9c
+#define MT6357_LDO_VCAMIO_OP_EN 0x1a9e
+#define MT6357_LDO_VCAMIO_OP_EN_SET 0x1aa0
+#define MT6357_LDO_VCAMIO_OP_EN_CLR 0x1aa2
+#define MT6357_LDO_VCAMIO_OP_CFG 0x1aa4
+#define MT6357_LDO_VCAMIO_OP_CFG_SET 0x1aa6
+#define MT6357_LDO_VCAMIO_OP_CFG_CLR 0x1aa8
+#define MT6357_LDO_VCAMIO_CON1 0x1aaa
+#define MT6357_LDO_VCAMIO_CON2 0x1aac
+#define MT6357_LDO_VCAMIO_CON3 0x1aae
+#define MT6357_LDO_VMC_CON0 0x1ab0
+#define MT6357_LDO_VMC_OP_EN 0x1ab2
+#define MT6357_LDO_VMC_OP_EN_SET 0x1ab4
+#define MT6357_LDO_VMC_OP_EN_CLR 0x1ab6
+#define MT6357_LDO_VMC_OP_CFG 0x1ab8
+#define MT6357_LDO_VMC_OP_CFG_SET 0x1aba
+#define MT6357_LDO_VMC_OP_CFG_CLR 0x1abc
+#define MT6357_LDO_VMC_CON1 0x1abe
+#define MT6357_LDO_VMC_CON2 0x1ac0
+#define MT6357_LDO_VMC_CON3 0x1ac2
+#define MT6357_LDO_VMCH_CON0 0x1ac4
+#define MT6357_LDO_VMCH_OP_EN 0x1ac6
+#define MT6357_LDO_VMCH_OP_EN_SET 0x1ac8
+#define MT6357_LDO_VMCH_OP_EN_CLR 0x1aca
+#define MT6357_LDO_VMCH_OP_CFG 0x1acc
+#define MT6357_LDO_VMCH_OP_CFG_SET 0x1ace
+#define MT6357_LDO_VMCH_OP_CFG_CLR 0x1ad0
+#define MT6357_LDO_VMCH_CON1 0x1ad2
+#define MT6357_LDO_VMCH_CON2 0x1ad4
+#define MT6357_LDO_VMCH_CON3 0x1ad6
+#define MT6357_LDO_VSIM1_CON0 0x1ad8
+#define MT6357_LDO_VSIM1_OP_EN 0x1ada
+#define MT6357_LDO_VSIM1_OP_EN_SET 0x1adc
+#define MT6357_LDO_VSIM1_OP_EN_CLR 0x1ade
+#define MT6357_LDO_VSIM1_OP_CFG 0x1ae0
+#define MT6357_LDO_VSIM1_OP_CFG_SET 0x1ae2
+#define MT6357_LDO_VSIM1_OP_CFG_CLR 0x1ae4
+#define MT6357_LDO_VSIM1_CON1 0x1ae6
+#define MT6357_LDO_VSIM1_CON2 0x1ae8
+#define MT6357_LDO_VSIM1_CON3 0x1aea
+#define MT6357_LDO_VSIM2_CON0 0x1aec
+#define MT6357_LDO_VSIM2_OP_EN 0x1aee
+#define MT6357_LDO_VSIM2_OP_EN_SET 0x1af0
+#define MT6357_LDO_VSIM2_OP_EN_CLR 0x1af2
+#define MT6357_LDO_VSIM2_OP_CFG 0x1af4
+#define MT6357_LDO_VSIM2_OP_CFG_SET 0x1af6
+#define MT6357_LDO_VSIM2_OP_CFG_CLR 0x1af8
+#define MT6357_LDO_VSIM2_CON1 0x1afa
+#define MT6357_LDO_VSIM2_CON2 0x1afc
+#define MT6357_LDO_VSIM2_CON3 0x1afe
+#define MT6357_LDO_GOFF2_DSN_ID 0x1b00
+#define MT6357_LDO_GOFF2_DSN_REV0 0x1b02
+#define MT6357_LDO_GOFF2_DSN_DBI 0x1b04
+#define MT6357_LDO_GOFF2_DSN_DXI 0x1b06
+#define MT6357_LDO_VIBR_CON0 0x1b08
+#define MT6357_LDO_VIBR_OP_EN 0x1b0a
+#define MT6357_LDO_VIBR_OP_EN_SET 0x1b0c
+#define MT6357_LDO_VIBR_OP_EN_CLR 0x1b0e
+#define MT6357_LDO_VIBR_OP_CFG 0x1b10
+#define MT6357_LDO_VIBR_OP_CFG_SET 0x1b12
+#define MT6357_LDO_VIBR_OP_CFG_CLR 0x1b14
+#define MT6357_LDO_VIBR_CON1 0x1b16
+#define MT6357_LDO_VIBR_CON2 0x1b18
+#define MT6357_LDO_VIBR_CON3 0x1b1a
+#define MT6357_LDO_VCN33_CON0_0 0x1b1c
+#define MT6357_LDO_VCN33_OP_EN 0x1b1e
+#define MT6357_LDO_VCN33_OP_EN_SET 0x1b20
+#define MT6357_LDO_VCN33_OP_EN_CLR 0x1b22
+#define MT6357_LDO_VCN33_OP_CFG 0x1b24
+#define MT6357_LDO_VCN33_OP_CFG_SET 0x1b26
+#define MT6357_LDO_VCN33_OP_CFG_CLR 0x1b28
+#define MT6357_LDO_VCN33_CON0_1 0x1b2a
+#define MT6357_LDO_VCN33_CON1 0x1b2c
+#define MT6357_LDO_VCN33_CON2 0x1b2e
+#define MT6357_LDO_VCN33_CON3 0x1b30
+#define MT6357_LDO_VLDO28_CON0_0 0x1b32
+#define MT6357_LDO_VLDO28_OP_EN 0x1b34
+#define MT6357_LDO_VLDO28_OP_EN_SET 0x1b36
+#define MT6357_LDO_VLDO28_OP_EN_CLR 0x1b38
+#define MT6357_LDO_VLDO28_OP_CFG 0x1b3a
+#define MT6357_LDO_VLDO28_OP_CFG_SET 0x1b3c
+#define MT6357_LDO_VLDO28_OP_CFG_CLR 0x1b3e
+#define MT6357_LDO_VLDO28_CON0_1 0x1b40
+#define MT6357_LDO_VLDO28_CON1 0x1b42
+#define MT6357_LDO_VLDO28_CON2 0x1b44
+#define MT6357_LDO_VLDO28_CON3 0x1b46
+#define MT6357_LDO_GOFF2_RSV_CON0 0x1b48
+#define MT6357_LDO_GOFF2_RSV_CON1 0x1b4a
+#define MT6357_LDO_GOFF3_DSN_ID 0x1b80
+#define MT6357_LDO_GOFF3_DSN_REV0 0x1b82
+#define MT6357_LDO_GOFF3_DSN_DBI 0x1b84
+#define MT6357_LDO_GOFF3_DSN_DXI 0x1b86
+#define MT6357_LDO_VCN28_CON0 0x1b88
+#define MT6357_LDO_VCN28_OP_EN 0x1b8a
+#define MT6357_LDO_VCN28_OP_EN_SET 0x1b8c
+#define MT6357_LDO_VCN28_OP_EN_CLR 0x1b8e
+#define MT6357_LDO_VCN28_OP_CFG 0x1b90
+#define MT6357_LDO_VCN28_OP_CFG_SET 0x1b92
+#define MT6357_LDO_VCN28_OP_CFG_CLR 0x1b94
+#define MT6357_LDO_VCN28_CON1 0x1b96
+#define MT6357_LDO_VCN28_CON2 0x1b98
+#define MT6357_LDO_VCN28_CON3 0x1b9a
+#define MT6357_VRTC_CON0 0x1b9c
+#define MT6357_LDO_TREF_CON0 0x1b9e
+#define MT6357_LDO_TREF_OP_EN 0x1ba0
+#define MT6357_LDO_TREF_OP_EN_SET 0x1ba2
+#define MT6357_LDO_TREF_OP_EN_CLR 0x1ba4
+#define MT6357_LDO_TREF_OP_CFG 0x1ba6
+#define MT6357_LDO_TREF_OP_CFG_SET 0x1ba8
+#define MT6357_LDO_TREF_OP_CFG_CLR 0x1baa
+#define MT6357_LDO_TREF_CON1 0x1bac
+#define MT6357_LDO_GOFF3_RSV_CON0 0x1bae
+#define MT6357_LDO_GOFF3_RSV_CON1 0x1bb0
+#define MT6357_LDO_ANA0_DSN_ID 0x1c00
+#define MT6357_LDO_ANA0_DSN_REV0 0x1c02
+#define MT6357_LDO_ANA0_DSN_DBI 0x1c04
+#define MT6357_LDO_ANA0_DSN_DXI 0x1c06
+#define MT6357_VFE28_ANA_CON0 0x1c08
+#define MT6357_VFE28_ANA_CON1 0x1c0a
+#define MT6357_VCN28_ANA_CON0 0x1c0c
+#define MT6357_VCN28_ANA_CON1 0x1c0e
+#define MT6357_VAUD28_ANA_CON0 0x1c10
+#define MT6357_VAUD28_ANA_CON1 0x1c12
+#define MT6357_VAUX18_ANA_CON0 0x1c14
+#define MT6357_VAUX18_ANA_CON1 0x1c16
+#define MT6357_VXO22_ANA_CON0 0x1c18
+#define MT6357_VXO22_ANA_CON1 0x1c1a
+#define MT6357_VCN33_ANA_CON0 0x1c1c
+#define MT6357_VCN33_ANA_CON1 0x1c1e
+#define MT6357_VEMC_ANA_CON0 0x1c20
+#define MT6357_VEMC_ANA_CON1 0x1c22
+#define MT6357_VLDO28_ANA_CON0 0x1c24
+#define MT6357_VLDO28_ANA_CON1 0x1c26
+#define MT6357_VIO28_ANA_CON0 0x1c28
+#define MT6357_VIO28_ANA_CON1 0x1c2a
+#define MT6357_VIBR_ANA_CON0 0x1c2c
+#define MT6357_VIBR_ANA_CON1 0x1c2e
+#define MT6357_VSIM1_ANA_CON0 0x1c30
+#define MT6357_VSIM1_ANA_CON1 0x1c32
+#define MT6357_VSIM2_ANA_CON0 0x1c34
+#define MT6357_VSIM2_ANA_CON1 0x1c36
+#define MT6357_VMCH_ANA_CON0 0x1c38
+#define MT6357_VMCH_ANA_CON1 0x1c3a
+#define MT6357_VMC_ANA_CON0 0x1c3c
+#define MT6357_VMC_ANA_CON1 0x1c3e
+#define MT6357_VCAMIO_ANA_CON0 0x1c40
+#define MT6357_VCAMIO_ANA_CON1 0x1c42
+#define MT6357_VCN18_ANA_CON0 0x1c44
+#define MT6357_VCN18_ANA_CON1 0x1c46
+#define MT6357_VRF18_ANA_CON0 0x1c48
+#define MT6357_VRF18_ANA_CON1 0x1c4a
+#define MT6357_VIO18_ANA_CON0 0x1c4c
+#define MT6357_VIO18_ANA_CON1 0x1c4e
+#define MT6357_VDRAM_ANA_CON1 0x1c50
+#define MT6357_VRF12_ANA_CON0 0x1c52
+#define MT6357_VRF12_ANA_CON1 0x1c54
+#define MT6357_VSRAM_PROC_ANA_CON0 0x1c56
+#define MT6357_VSRAM_OTHERS_ANA_CON0 0x1c58
+#define MT6357_LDO_ANA0_ELR_NUM 0x1c5a
+#define MT6357_VFE28_ELR_0 0x1c5c
+#define MT6357_VCN28_ELR_0 0x1c5e
+#define MT6357_VAUD28_ELR_0 0x1c60
+#define MT6357_VAUX18_ELR_0 0x1c62
+#define MT6357_VXO22_ELR_0 0x1c64
+#define MT6357_VCN33_ELR_0 0x1c66
+#define MT6357_VEMC_ELR_0 0x1c68
+#define MT6357_VLDO28_ELR_0 0x1c6a
+#define MT6357_VIO28_ELR_0 0x1c6c
+#define MT6357_VIBR_ELR_0 0x1c6e
+#define MT6357_VSIM1_ELR_0 0x1c70
+#define MT6357_VSIM2_ELR_0 0x1c72
+#define MT6357_VMCH_ELR_0 0x1c74
+#define MT6357_VMC_ELR_0 0x1c76
+#define MT6357_VCAMIO_ELR_0 0x1c78
+#define MT6357_VCN18_ELR_0 0x1c7a
+#define MT6357_VRF18_ELR_0 0x1c7c
+#define MT6357_LDO_ANA1_DSN_ID 0x1c80
+#define MT6357_LDO_ANA1_DSN_REV0 0x1c82
+#define MT6357_LDO_ANA1_DSN_DBI 0x1c84
+#define MT6357_LDO_ANA1_DSN_DXI 0x1c86
+#define MT6357_VUSB33_ANA_CON0 0x1c88
+#define MT6357_VUSB33_ANA_CON1 0x1c8a
+#define MT6357_VCAMA_ANA_CON0 0x1c8c
+#define MT6357_VCAMA_ANA_CON1 0x1c8e
+#define MT6357_VEFUSE_ANA_CON0 0x1c90
+#define MT6357_VEFUSE_ANA_CON1 0x1c92
+#define MT6357_VCAMD_ANA_CON0 0x1c94
+#define MT6357_VCAMD_ANA_CON1 0x1c96
+#define MT6357_LDO_ANA1_ELR_NUM 0x1c98
+#define MT6357_VUSB33_ELR_0 0x1c9a
+#define MT6357_VCAMA_ELR_0 0x1c9c
+#define MT6357_VEFUSE_ELR_0 0x1c9e
+#define MT6357_VCAMD_ELR_0 0x1ca0
+#define MT6357_VIO18_ELR_0 0x1ca2
+#define MT6357_VDRAM_ELR_0 0x1ca4
+#define MT6357_VRF12_ELR_0 0x1ca6
+#define MT6357_VRTC_ELR_0 0x1ca8
+#define MT6357_VDRAM_ELR_1 0x1caa
+#define MT6357_VDRAM_ELR_2 0x1cac
+#define MT6357_XPP_TOP_ID 0x1e00
+#define MT6357_XPP_TOP_REV0 0x1e02
+#define MT6357_XPP_TOP_DBI 0x1e04
+#define MT6357_XPP_TOP_DXI 0x1e06
+#define MT6357_XPP_TPM0 0x1e08
+#define MT6357_XPP_TPM1 0x1e0a
+#define MT6357_XPP_TOP_TEST_OUT 0x1e0c
+#define MT6357_XPP_TOP_TEST_CON0 0x1e0e
+#define MT6357_XPP_TOP_CKPDN_CON0 0x1e10
+#define MT6357_XPP_TOP_CKPDN_CON0_SET 0x1e12
+#define MT6357_XPP_TOP_CKPDN_CON0_CLR 0x1e14
+#define MT6357_XPP_TOP_CKSEL_CON0 0x1e16
+#define MT6357_XPP_TOP_CKSEL_CON0_SET 0x1e18
+#define MT6357_XPP_TOP_CKSEL_CON0_CLR 0x1e1a
+#define MT6357_XPP_TOP_RST_CON0 0x1e1c
+#define MT6357_XPP_TOP_RST_CON0_SET 0x1e1e
+#define MT6357_XPP_TOP_RST_CON0_CLR 0x1e20
+#define MT6357_XPP_TOP_RST_BANK_CON0 0x1e22
+#define MT6357_XPP_TOP_RST_BANK_CON0_SET 0x1e24
+#define MT6357_XPP_TOP_RST_BANK_CON0_CLR 0x1e26
+#define MT6357_DRIVER_BL_DSN_ID 0x1e80
+#define MT6357_DRIVER_BL_DSN_REV0 0x1e82
+#define MT6357_DRIVER_BL_DSN_DBI 0x1e84
+#define MT6357_DRIVER_BL_DSN_DXI 0x1e86
+#define MT6357_ISINK1_CON0 0x1e88
+#define MT6357_ISINK1_CON1 0x1e8a
+#define MT6357_ISINK1_CON2 0x1e8c
+#define MT6357_ISINK1_CON3 0x1e8e
+#define MT6357_ISINK_ANA1 0x1e90
+#define MT6357_ISINK_PHASE_DLY 0x1e92
+#define MT6357_ISINK_SFSTR 0x1e94
+#define MT6357_ISINK_EN_CTRL 0x1e96
+#define MT6357_ISINK_MODE_CTRL 0x1e98
+#define MT6357_DRIVER_ANA_CON0 0x1e9a
+#define MT6357_ISINK_ANA_CON0 0x1e9c
+#define MT6357_ISINK_ANA_CON1 0x1e9e
+#define MT6357_DRIVER_BL_ELR_NUM 0x1ea0
+#define MT6357_DRIVER_BL_ELR_0 0x1ea2
+#define MT6357_DRIVER_CI_DSN_ID 0x1f00
+#define MT6357_DRIVER_CI_DSN_REV0 0x1f02
+#define MT6357_DRIVER_CI_DSN_DBI 0x1f04
+#define MT6357_DRIVER_CI_DSN_DXI 0x1f06
+#define MT6357_CHRIND_CON0 0x1f08
+#define MT6357_CHRIND_CON1 0x1f0a
+#define MT6357_CHRIND_CON2 0x1f0c
+#define MT6357_CHRIND_CON3 0x1f0e
+#define MT6357_CHRIND_CON4 0x1f10
+#define MT6357_CHRIND_EN_CTRL 0x1f12
+#define MT6357_CHRIND_ANA_CON0 0x1f14
+#define MT6357_DRIVER_DL_DSN_ID 0x1f80
+#define MT6357_DRIVER_DL_DSN_REV0 0x1f82
+#define MT6357_DRIVER_DL_DSN_DBI 0x1f84
+#define MT6357_DRIVER_DL_DSN_DXI 0x1f86
+#define MT6357_ISINK2_CON0 0x1f88
+#define MT6357_ISINK3_CON0 0x1f8a
+#define MT6357_ISINK_EN_CTRL_SMPL 0x1f8c
+#define MT6357_AUD_TOP_ID 0x2080
+#define MT6357_AUD_TOP_REV0 0x2082
+#define MT6357_AUD_TOP_DBI 0x2084
+#define MT6357_AUD_TOP_DXI 0x2086
+#define MT6357_AUD_TOP_CKPDN_TPM0 0x2088
+#define MT6357_AUD_TOP_CKPDN_TPM1 0x208a
+#define MT6357_AUD_TOP_CKPDN_CON0 0x208c
+#define MT6357_AUD_TOP_CKPDN_CON0_SET 0x208e
+#define MT6357_AUD_TOP_CKPDN_CON0_CLR 0x2090
+#define MT6357_AUD_TOP_CKSEL_CON0 0x2092
+#define MT6357_AUD_TOP_CKSEL_CON0_SET 0x2094
+#define MT6357_AUD_TOP_CKSEL_CON0_CLR 0x2096
+#define MT6357_AUD_TOP_CKTST_CON0 0x2098
+#define MT6357_AUD_TOP_RST_CON0 0x209a
+#define MT6357_AUD_TOP_RST_CON0_SET 0x209c
+#define MT6357_AUD_TOP_RST_CON0_CLR 0x209e
+#define MT6357_AUD_TOP_RST_BANK_CON0 0x20a0
+#define MT6357_AUD_TOP_INT_CON0 0x20a2
+#define MT6357_AUD_TOP_INT_CON0_SET 0x20a4
+#define MT6357_AUD_TOP_INT_CON0_CLR 0x20a6
+#define MT6357_AUD_TOP_INT_MASK_CON0 0x20a8
+#define MT6357_AUD_TOP_INT_MASK_CON0_SET 0x20aa
+#define MT6357_AUD_TOP_INT_MASK_CON0_CLR 0x20ac
+#define MT6357_AUD_TOP_INT_STATUS0 0x20ae
+#define MT6357_AUD_TOP_INT_RAW_STATUS0 0x20b0
+#define MT6357_AUD_TOP_INT_MISC_CON0 0x20b2
+#define MT6357_AUDNCP_CLKDIV_CON0 0x20b4
+#define MT6357_AUDNCP_CLKDIV_CON1 0x20b6
+#define MT6357_AUDNCP_CLKDIV_CON2 0x20b8
+#define MT6357_AUDNCP_CLKDIV_CON3 0x20ba
+#define MT6357_AUDNCP_CLKDIV_CON4 0x20bc
+#define MT6357_AUD_TOP_MON_CON0 0x20be
+#define MT6357_AUDIO_DIG_DSN_ID 0x2100
+#define MT6357_AUDIO_DIG_DSN_REV0 0x2102
+#define MT6357_AUDIO_DIG_DSN_DBI 0x2104
+#define MT6357_AUDIO_DIG_DSN_DXI 0x2106
+#define MT6357_AFE_UL_DL_CON0 0x2108
+#define MT6357_AFE_DL_SRC2_CON0_L 0x210a
+#define MT6357_AFE_UL_SRC_CON0_H 0x210c
+#define MT6357_AFE_UL_SRC_CON0_L 0x210e
+#define MT6357_AFE_TOP_CON0 0x2110
+#define MT6357_AUDIO_TOP_CON0 0x2112
+#define MT6357_AFE_MON_DEBUG0 0x2114
+#define MT6357_AFUNC_AUD_CON0 0x2116
+#define MT6357_AFUNC_AUD_CON1 0x2118
+#define MT6357_AFUNC_AUD_CON2 0x211a
+#define MT6357_AFUNC_AUD_CON3 0x211c
+#define MT6357_AFUNC_AUD_CON4 0x211e
+#define MT6357_AFUNC_AUD_CON5 0x2120
+#define MT6357_AFUNC_AUD_CON6 0x2122
+#define MT6357_AFUNC_AUD_MON0 0x2124
+#define MT6357_AUDRC_TUNE_MON0 0x2126
+#define MT6357_AFE_ADDA_MTKAIF_FIFO_CFG0 0x2128
+#define MT6357_AFE_ADDA_MTKAIF_FIFO_LOG_MON1 0x212a
+#define MT6357_AFE_ADDA_MTKAIF_MON0 0x212c
+#define MT6357_AFE_ADDA_MTKAIF_MON1 0x212e
+#define MT6357_AFE_ADDA_MTKAIF_MON2 0x2130
+#define MT6357_AFE_ADDA_MTKAIF_MON3 0x2132
+#define MT6357_AFE_ADDA_MTKAIF_CFG0 0x2134
+#define MT6357_AFE_ADDA_MTKAIF_RX_CFG0 0x2136
+#define MT6357_AFE_ADDA_MTKAIF_RX_CFG1 0x2138
+#define MT6357_AFE_ADDA_MTKAIF_RX_CFG2 0x213a
+#define MT6357_AFE_ADDA_MTKAIF_RX_CFG3 0x213c
+#define MT6357_AFE_ADDA_MTKAIF_TX_CFG1 0x213e
+#define MT6357_AFE_SGEN_CFG0 0x2140
+#define MT6357_AFE_SGEN_CFG1 0x2142
+#define MT6357_AFE_ADC_ASYNC_FIFO_CFG 0x2144
+#define MT6357_AFE_DCCLK_CFG0 0x2146
+#define MT6357_AFE_DCCLK_CFG1 0x2148
+#define MT6357_AUDIO_DIG_CFG 0x214a
+#define MT6357_AFE_AUD_PAD_TOP 0x214c
+#define MT6357_AFE_AUD_PAD_TOP_MON 0x214e
+#define MT6357_AFE_AUD_PAD_TOP_MON1 0x2150
+#define MT6357_AUDENC_DSN_ID 0x2180
+#define MT6357_AUDENC_DSN_REV0 0x2182
+#define MT6357_AUDENC_DSN_DBI 0x2184
+#define MT6357_AUDENC_DSN_FPI 0x2186
+#define MT6357_AUDENC_ANA_CON0 0x2188
+#define MT6357_AUDENC_ANA_CON1 0x218a
+#define MT6357_AUDENC_ANA_CON2 0x218c
+#define MT6357_AUDENC_ANA_CON3 0x218e
+#define MT6357_AUDENC_ANA_CON4 0x2190
+#define MT6357_AUDENC_ANA_CON5 0x2192
+#define MT6357_AUDENC_ANA_CON6 0x2194
+#define MT6357_AUDENC_ANA_CON7 0x2196
+#define MT6357_AUDENC_ANA_CON8 0x2198
+#define MT6357_AUDENC_ANA_CON9 0x219a
+#define MT6357_AUDENC_ANA_CON10 0x219c
+#define MT6357_AUDENC_ANA_CON11 0x219e
+#define MT6357_AUDDEC_DSN_ID 0x2200
+#define MT6357_AUDDEC_DSN_REV0 0x2202
+#define MT6357_AUDDEC_DSN_DBI 0x2204
+#define MT6357_AUDDEC_DSN_FPI 0x2206
+#define MT6357_AUDDEC_ANA_CON0 0x2208
+#define MT6357_AUDDEC_ANA_CON1 0x220a
+#define MT6357_AUDDEC_ANA_CON2 0x220c
+#define MT6357_AUDDEC_ANA_CON3 0x220e
+#define MT6357_AUDDEC_ANA_CON4 0x2210
+#define MT6357_AUDDEC_ANA_CON5 0x2212
+#define MT6357_AUDDEC_ANA_CON6 0x2214
+#define MT6357_AUDDEC_ANA_CON7 0x2216
+#define MT6357_AUDDEC_ANA_CON8 0x2218
+#define MT6357_AUDDEC_ANA_CON9 0x221a
+#define MT6357_AUDDEC_ANA_CON10 0x221c
+#define MT6357_AUDDEC_ANA_CON11 0x221e
+#define MT6357_AUDDEC_ANA_CON12 0x2220
+#define MT6357_AUDDEC_ANA_CON13 0x2222
+#define MT6357_AUDDEC_ELR_NUM 0x2224
+#define MT6357_AUDDEC_ELR_0 0x2226
+#define MT6357_AUDZCD_DSN_ID 0x2280
+#define MT6357_AUDZCD_DSN_REV0 0x2282
+#define MT6357_AUDZCD_DSN_DBI 0x2284
+#define MT6357_AUDZCD_DSN_FPI 0x2286
+#define MT6357_ZCD_CON0 0x2288
+#define MT6357_ZCD_CON1 0x228a
+#define MT6357_ZCD_CON2 0x228c
+#define MT6357_ZCD_CON3 0x228e
+#define MT6357_ZCD_CON4 0x2290
+#define MT6357_ZCD_CON5 0x2292
+#define MT6357_ACCDET_DSN_DIG_ID 0x2300
+#define MT6357_ACCDET_DSN_DIG_REV0 0x2302
+#define MT6357_ACCDET_DSN_DBI 0x2304
+#define MT6357_ACCDET_DSN_FPI 0x2306
+#define MT6357_ACCDET_CON0 0x2308
+#define MT6357_ACCDET_CON1 0x230a
+#define MT6357_ACCDET_CON2 0x230c
+#define MT6357_ACCDET_CON3 0x230e
+#define MT6357_ACCDET_CON4 0x2310
+#define MT6357_ACCDET_CON5 0x2312
+#define MT6357_ACCDET_CON6 0x2314
+#define MT6357_ACCDET_CON7 0x2316
+#define MT6357_ACCDET_CON8 0x2318
+#define MT6357_ACCDET_CON9 0x231a
+#define MT6357_ACCDET_CON10 0x231c
+#define MT6357_ACCDET_CON11 0x231e
+#define MT6357_ACCDET_CON12 0x2320
+#define MT6357_ACCDET_CON13 0x2322
+#define MT6357_ACCDET_CON14 0x2324
+#define MT6357_ACCDET_CON15 0x2326
+#define MT6357_ACCDET_CON16 0x2328
+#define MT6357_ACCDET_CON17 0x232a
+#define MT6357_ACCDET_CON18 0x232c
+#define MT6357_ACCDET_CON19 0x232e
+#define MT6357_ACCDET_CON20 0x2330
+#define MT6357_ACCDET_CON21 0x2332
+#define MT6357_ACCDET_CON22 0x2334
+#define MT6357_ACCDET_CON23 0x2336
+#define MT6357_ACCDET_CON24 0x2338
+#define MT6357_ACCDET_CON25 0x233a
+#define MT6357_ACCDET_CON26 0x233c
+#define MT6357_ACCDET_CON27 0x233e
+#define MT6357_ACCDET_CON28 0x2340
+
+#endif /* __MFD_MT6357_REGISTERS_H__ */
diff --git a/include/linux/mfd/mt6358/core.h b/include/linux/mfd/mt6358/core.h
new file mode 100644
index 000000000000..68578e2019b0
--- /dev/null
+++ b/include/linux/mfd/mt6358/core.h
@@ -0,0 +1,156 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2020 MediaTek Inc.
+ */
+
+#ifndef __MFD_MT6358_CORE_H__
+#define __MFD_MT6358_CORE_H__
+
+struct irq_top_t {
+ int hwirq_base;
+ unsigned int num_int_regs;
+ unsigned int en_reg;
+ unsigned int en_reg_shift;
+ unsigned int sta_reg;
+ unsigned int sta_reg_shift;
+ unsigned int top_offset;
+};
+
+struct pmic_irq_data {
+ unsigned int num_top;
+ unsigned int num_pmic_irqs;
+ unsigned short top_int_status_reg;
+ bool *enable_hwirq;
+ bool *cache_hwirq;
+ const struct irq_top_t *pmic_ints;
+};
+
+enum mt6358_irq_top_status_shift {
+ MT6358_BUCK_TOP = 0,
+ MT6358_LDO_TOP,
+ MT6358_PSC_TOP,
+ MT6358_SCK_TOP,
+ MT6358_BM_TOP,
+ MT6358_HK_TOP,
+ MT6358_AUD_TOP,
+ MT6358_MISC_TOP,
+};
+
+enum mt6358_irq_numbers {
+ MT6358_IRQ_VPROC11_OC = 0,
+ MT6358_IRQ_VPROC12_OC,
+ MT6358_IRQ_VCORE_OC,
+ MT6358_IRQ_VGPU_OC,
+ MT6358_IRQ_VMODEM_OC,
+ MT6358_IRQ_VDRAM1_OC,
+ MT6358_IRQ_VS1_OC,
+ MT6358_IRQ_VS2_OC,
+ MT6358_IRQ_VPA_OC,
+ MT6358_IRQ_VCORE_PREOC,
+ MT6358_IRQ_VFE28_OC = 16,
+ MT6358_IRQ_VXO22_OC,
+ MT6358_IRQ_VRF18_OC,
+ MT6358_IRQ_VRF12_OC,
+ MT6358_IRQ_VEFUSE_OC,
+ MT6358_IRQ_VCN33_OC,
+ MT6358_IRQ_VCN28_OC,
+ MT6358_IRQ_VCN18_OC,
+ MT6358_IRQ_VCAMA1_OC,
+ MT6358_IRQ_VCAMA2_OC,
+ MT6358_IRQ_VCAMD_OC,
+ MT6358_IRQ_VCAMIO_OC,
+ MT6358_IRQ_VLDO28_OC,
+ MT6358_IRQ_VA12_OC,
+ MT6358_IRQ_VAUX18_OC,
+ MT6358_IRQ_VAUD28_OC,
+ MT6358_IRQ_VIO28_OC,
+ MT6358_IRQ_VIO18_OC,
+ MT6358_IRQ_VSRAM_PROC11_OC,
+ MT6358_IRQ_VSRAM_PROC12_OC,
+ MT6358_IRQ_VSRAM_OTHERS_OC,
+ MT6358_IRQ_VSRAM_GPU_OC,
+ MT6358_IRQ_VDRAM2_OC,
+ MT6358_IRQ_VMC_OC,
+ MT6358_IRQ_VMCH_OC,
+ MT6358_IRQ_VEMC_OC,
+ MT6358_IRQ_VSIM1_OC,
+ MT6358_IRQ_VSIM2_OC,
+ MT6358_IRQ_VIBR_OC,
+ MT6358_IRQ_VUSB_OC,
+ MT6358_IRQ_VBIF28_OC,
+ MT6358_IRQ_PWRKEY = 48,
+ MT6358_IRQ_HOMEKEY,
+ MT6358_IRQ_PWRKEY_R,
+ MT6358_IRQ_HOMEKEY_R,
+ MT6358_IRQ_NI_LBAT_INT,
+ MT6358_IRQ_CHRDET,
+ MT6358_IRQ_CHRDET_EDGE,
+ MT6358_IRQ_VCDT_HV_DET,
+ MT6358_IRQ_RTC = 64,
+ MT6358_IRQ_FG_BAT0_H = 80,
+ MT6358_IRQ_FG_BAT0_L,
+ MT6358_IRQ_FG_CUR_H,
+ MT6358_IRQ_FG_CUR_L,
+ MT6358_IRQ_FG_ZCV,
+ MT6358_IRQ_FG_BAT1_H,
+ MT6358_IRQ_FG_BAT1_L,
+ MT6358_IRQ_FG_N_CHARGE_L,
+ MT6358_IRQ_FG_IAVG_H,
+ MT6358_IRQ_FG_IAVG_L,
+ MT6358_IRQ_FG_TIME_H,
+ MT6358_IRQ_FG_DISCHARGE,
+ MT6358_IRQ_FG_CHARGE,
+ MT6358_IRQ_BATON_LV = 96,
+ MT6358_IRQ_BATON_HT,
+ MT6358_IRQ_BATON_BAT_IN,
+ MT6358_IRQ_BATON_BAT_OUT,
+ MT6358_IRQ_BIF,
+ MT6358_IRQ_BAT_H = 112,
+ MT6358_IRQ_BAT_L,
+ MT6358_IRQ_BAT2_H,
+ MT6358_IRQ_BAT2_L,
+ MT6358_IRQ_BAT_TEMP_H,
+ MT6358_IRQ_BAT_TEMP_L,
+ MT6358_IRQ_AUXADC_IMP,
+ MT6358_IRQ_NAG_C_DLTV,
+ MT6358_IRQ_AUDIO = 128,
+ MT6358_IRQ_ACCDET = 133,
+ MT6358_IRQ_ACCDET_EINT0,
+ MT6358_IRQ_ACCDET_EINT1,
+ MT6358_IRQ_SPI_CMD_ALERT = 144,
+ MT6358_IRQ_NR,
+};
+
+#define MT6358_IRQ_BUCK_BASE MT6358_IRQ_VPROC11_OC
+#define MT6358_IRQ_LDO_BASE MT6358_IRQ_VFE28_OC
+#define MT6358_IRQ_PSC_BASE MT6358_IRQ_PWRKEY
+#define MT6358_IRQ_SCK_BASE MT6358_IRQ_RTC
+#define MT6358_IRQ_BM_BASE MT6358_IRQ_FG_BAT0_H
+#define MT6358_IRQ_HK_BASE MT6358_IRQ_BAT_H
+#define MT6358_IRQ_AUD_BASE MT6358_IRQ_AUDIO
+#define MT6358_IRQ_MISC_BASE MT6358_IRQ_SPI_CMD_ALERT
+
+#define MT6358_IRQ_BUCK_BITS (MT6358_IRQ_VCORE_PREOC - MT6358_IRQ_BUCK_BASE + 1)
+#define MT6358_IRQ_LDO_BITS (MT6358_IRQ_VBIF28_OC - MT6358_IRQ_LDO_BASE + 1)
+#define MT6358_IRQ_PSC_BITS (MT6358_IRQ_VCDT_HV_DET - MT6358_IRQ_PSC_BASE + 1)
+#define MT6358_IRQ_SCK_BITS (MT6358_IRQ_RTC - MT6358_IRQ_SCK_BASE + 1)
+#define MT6358_IRQ_BM_BITS (MT6358_IRQ_BIF - MT6358_IRQ_BM_BASE + 1)
+#define MT6358_IRQ_HK_BITS (MT6358_IRQ_NAG_C_DLTV - MT6358_IRQ_HK_BASE + 1)
+#define MT6358_IRQ_AUD_BITS (MT6358_IRQ_ACCDET_EINT1 - MT6358_IRQ_AUD_BASE + 1)
+#define MT6358_IRQ_MISC_BITS \
+ (MT6358_IRQ_SPI_CMD_ALERT - MT6358_IRQ_MISC_BASE + 1)
+
+#define MT6358_TOP_GEN(sp) \
+{ \
+ .hwirq_base = MT6358_IRQ_##sp##_BASE, \
+ .num_int_regs = \
+ ((MT6358_IRQ_##sp##_BITS - 1) / \
+ MTK_PMIC_REG_WIDTH) + 1, \
+ .en_reg = MT6358_##sp##_TOP_INT_CON0, \
+ .en_reg_shift = 0x6, \
+ .sta_reg = MT6358_##sp##_TOP_INT_STATUS0, \
+ .sta_reg_shift = 0x2, \
+ .top_offset = MT6358_##sp##_TOP, \
+}
+
+#endif /* __MFD_MT6358_CORE_H__ */
diff --git a/include/linux/mfd/mt6358/registers.h b/include/linux/mfd/mt6358/registers.h
new file mode 100644
index 000000000000..3d33517f178c
--- /dev/null
+++ b/include/linux/mfd/mt6358/registers.h
@@ -0,0 +1,291 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2020 MediaTek Inc.
+ */
+
+#ifndef __MFD_MT6358_REGISTERS_H__
+#define __MFD_MT6358_REGISTERS_H__
+
+/* PMIC Registers */
+#define MT6358_SWCID 0xa
+#define MT6358_TOPSTATUS 0x28
+#define MT6358_TOP_RST_MISC 0x14c
+#define MT6358_MISC_TOP_INT_CON0 0x188
+#define MT6358_MISC_TOP_INT_STATUS0 0x194
+#define MT6358_TOP_INT_STATUS0 0x19e
+#define MT6358_SCK_TOP_INT_CON0 0x52e
+#define MT6358_SCK_TOP_INT_STATUS0 0x53a
+#define MT6358_EOSC_CALI_CON0 0x540
+#define MT6358_EOSC_CALI_CON1 0x542
+#define MT6358_RTC_MIX_CON0 0x544
+#define MT6358_RTC_MIX_CON1 0x546
+#define MT6358_RTC_MIX_CON2 0x548
+#define MT6358_RTC_DSN_ID 0x580
+#define MT6358_RTC_DSN_REV0 0x582
+#define MT6358_RTC_DBI 0x584
+#define MT6358_RTC_DXI 0x586
+#define MT6358_RTC_BBPU 0x588
+#define MT6358_RTC_IRQ_STA 0x58a
+#define MT6358_RTC_IRQ_EN 0x58c
+#define MT6358_RTC_CII_EN 0x58e
+#define MT6358_RTC_AL_MASK 0x590
+#define MT6358_RTC_TC_SEC 0x592
+#define MT6358_RTC_TC_MIN 0x594
+#define MT6358_RTC_TC_HOU 0x596
+#define MT6358_RTC_TC_DOM 0x598
+#define MT6358_RTC_TC_DOW 0x59a
+#define MT6358_RTC_TC_MTH 0x59c
+#define MT6358_RTC_TC_YEA 0x59e
+#define MT6358_RTC_AL_SEC 0x5a0
+#define MT6358_RTC_AL_MIN 0x5a2
+#define MT6358_RTC_AL_HOU 0x5a4
+#define MT6358_RTC_AL_DOM 0x5a6
+#define MT6358_RTC_AL_DOW 0x5a8
+#define MT6358_RTC_AL_MTH 0x5aa
+#define MT6358_RTC_AL_YEA 0x5ac
+#define MT6358_RTC_OSC32CON 0x5ae
+#define MT6358_RTC_POWERKEY1 0x5b0
+#define MT6358_RTC_POWERKEY2 0x5b2
+#define MT6358_RTC_PDN1 0x5b4
+#define MT6358_RTC_PDN2 0x5b6
+#define MT6358_RTC_SPAR0 0x5b8
+#define MT6358_RTC_SPAR1 0x5ba
+#define MT6358_RTC_PROT 0x5bc
+#define MT6358_RTC_DIFF 0x5be
+#define MT6358_RTC_CALI 0x5c0
+#define MT6358_RTC_WRTGR 0x5c2
+#define MT6358_RTC_CON 0x5c4
+#define MT6358_RTC_SEC_CTRL 0x5c6
+#define MT6358_RTC_INT_CNT 0x5c8
+#define MT6358_RTC_SEC_DAT0 0x5ca
+#define MT6358_RTC_SEC_DAT1 0x5cc
+#define MT6358_RTC_SEC_DAT2 0x5ce
+#define MT6358_RTC_SEC_DSN_ID 0x600
+#define MT6358_RTC_SEC_DSN_REV0 0x602
+#define MT6358_RTC_SEC_DBI 0x604
+#define MT6358_RTC_SEC_DXI 0x606
+#define MT6358_RTC_TC_SEC_SEC 0x608
+#define MT6358_RTC_TC_MIN_SEC 0x60a
+#define MT6358_RTC_TC_HOU_SEC 0x60c
+#define MT6358_RTC_TC_DOM_SEC 0x60e
+#define MT6358_RTC_TC_DOW_SEC 0x610
+#define MT6358_RTC_TC_MTH_SEC 0x612
+#define MT6358_RTC_TC_YEA_SEC 0x614
+#define MT6358_RTC_SEC_CK_PDN 0x616
+#define MT6358_RTC_SEC_WRTGR 0x618
+#define MT6358_PSC_TOP_INT_CON0 0x910
+#define MT6358_PSC_TOP_INT_STATUS0 0x91c
+#define MT6358_BM_TOP_INT_CON0 0xc32
+#define MT6358_BM_TOP_INT_CON1 0xc38
+#define MT6358_BM_TOP_INT_STATUS0 0xc4a
+#define MT6358_BM_TOP_INT_STATUS1 0xc4c
+#define MT6358_HK_TOP_INT_CON0 0xf92
+#define MT6358_HK_TOP_INT_STATUS0 0xf9e
+#define MT6358_BUCK_TOP_INT_CON0 0x1318
+#define MT6358_BUCK_TOP_INT_STATUS0 0x1324
+#define MT6358_BUCK_VPROC11_CON0 0x1388
+#define MT6358_BUCK_VPROC11_DBG0 0x139e
+#define MT6358_BUCK_VPROC11_DBG1 0x13a0
+#define MT6358_BUCK_VPROC11_ELR0 0x13a6
+#define MT6358_BUCK_VPROC12_CON0 0x1408
+#define MT6358_BUCK_VPROC12_DBG0 0x141e
+#define MT6358_BUCK_VPROC12_DBG1 0x1420
+#define MT6358_BUCK_VPROC12_ELR0 0x1426
+#define MT6358_BUCK_VCORE_CON0 0x1488
+#define MT6358_BUCK_VCORE_DBG0 0x149e
+#define MT6358_BUCK_VCORE_DBG1 0x14a0
+#define MT6358_BUCK_VCORE_SSHUB_CON0 0x14a4
+#define MT6358_BUCK_VCORE_SSHUB_CON1 0x14a6
+#define MT6358_BUCK_VCORE_SSHUB_ELR0 MT6358_BUCK_VCORE_SSHUB_CON1
+#define MT6358_BUCK_VCORE_SSHUB_DBG1 MT6358_BUCK_VCORE_DBG1
+#define MT6358_BUCK_VCORE_ELR0 0x14aa
+#define MT6358_BUCK_VGPU_CON0 0x1508
+#define MT6358_BUCK_VGPU_DBG0 0x151e
+#define MT6358_BUCK_VGPU_DBG1 0x1520
+#define MT6358_BUCK_VGPU_ELR0 0x1526
+#define MT6358_BUCK_VMODEM_CON0 0x1588
+#define MT6358_BUCK_VMODEM_DBG0 0x159e
+#define MT6358_BUCK_VMODEM_DBG1 0x15a0
+#define MT6358_BUCK_VMODEM_ELR0 0x15a6
+#define MT6358_BUCK_VDRAM1_CON0 0x1608
+#define MT6358_BUCK_VDRAM1_DBG0 0x161e
+#define MT6358_BUCK_VDRAM1_DBG1 0x1620
+#define MT6358_BUCK_VDRAM1_ELR0 0x1626
+#define MT6358_BUCK_VS1_CON0 0x1688
+#define MT6358_BUCK_VS1_DBG0 0x169e
+#define MT6358_BUCK_VS1_DBG1 0x16a0
+#define MT6358_BUCK_VS1_ELR0 0x16ae
+#define MT6358_BUCK_VS2_CON0 0x1708
+#define MT6358_BUCK_VS2_DBG0 0x171e
+#define MT6358_BUCK_VS2_DBG1 0x1720
+#define MT6358_BUCK_VS2_ELR0 0x172e
+#define MT6358_BUCK_VPA_CON0 0x1788
+#define MT6358_BUCK_VPA_CON1 0x178a
+#define MT6358_BUCK_VPA_ELR0 MT6358_BUCK_VPA_CON1
+#define MT6358_BUCK_VPA_DBG0 0x1792
+#define MT6358_BUCK_VPA_DBG1 0x1794
+#define MT6358_VPROC_ANA_CON0 0x180c
+#define MT6358_VCORE_VGPU_ANA_CON0 0x1828
+#define MT6358_VMODEM_ANA_CON0 0x1888
+#define MT6358_VDRAM1_ANA_CON0 0x1896
+#define MT6358_VS1_ANA_CON0 0x18a2
+#define MT6358_VS2_ANA_CON0 0x18ae
+#define MT6358_VPA_ANA_CON0 0x18ba
+#define MT6358_LDO_TOP_INT_CON0 0x1a50
+#define MT6358_LDO_TOP_INT_CON1 0x1a56
+#define MT6358_LDO_TOP_INT_STATUS0 0x1a68
+#define MT6358_LDO_TOP_INT_STATUS1 0x1a6a
+#define MT6358_LDO_VXO22_CON0 0x1a88
+#define MT6358_LDO_VXO22_CON1 0x1a96
+#define MT6358_LDO_VA12_CON0 0x1a9c
+#define MT6358_LDO_VA12_CON1 0x1aaa
+#define MT6358_LDO_VAUX18_CON0 0x1ab0
+#define MT6358_LDO_VAUX18_CON1 0x1abe
+#define MT6358_LDO_VAUD28_CON0 0x1ac4
+#define MT6358_LDO_VAUD28_CON1 0x1ad2
+#define MT6358_LDO_VIO28_CON0 0x1ad8
+#define MT6358_LDO_VIO28_CON1 0x1ae6
+#define MT6358_LDO_VIO18_CON0 0x1aec
+#define MT6358_LDO_VIO18_CON1 0x1afa
+#define MT6358_LDO_VDRAM2_CON0 0x1b08
+#define MT6358_LDO_VDRAM2_CON1 0x1b16
+#define MT6358_LDO_VEMC_CON0 0x1b1c
+#define MT6358_LDO_VEMC_CON1 0x1b2a
+#define MT6358_LDO_VUSB_CON0_0 0x1b30
+#define MT6358_LDO_VUSB_CON1 0x1b40
+#define MT6358_LDO_VSRAM_PROC11_CON0 0x1b46
+#define MT6358_LDO_VSRAM_PROC11_DBG0 0x1b60
+#define MT6358_LDO_VSRAM_PROC11_DBG1 0x1b62
+#define MT6358_LDO_VSRAM_PROC11_TRACKING_CON0 0x1b64
+#define MT6358_LDO_VSRAM_PROC11_TRACKING_CON1 0x1b66
+#define MT6358_LDO_VSRAM_PROC11_TRACKING_CON2 0x1b68
+#define MT6358_LDO_VSRAM_PROC11_TRACKING_CON3 0x1b6a
+#define MT6358_LDO_VSRAM_PROC12_TRACKING_CON0 0x1b6c
+#define MT6358_LDO_VSRAM_PROC12_TRACKING_CON1 0x1b6e
+#define MT6358_LDO_VSRAM_PROC12_TRACKING_CON2 0x1b70
+#define MT6358_LDO_VSRAM_PROC12_TRACKING_CON3 0x1b72
+#define MT6358_LDO_VSRAM_WAKEUP_CON0 0x1b74
+#define MT6358_LDO_GON1_ELR_NUM 0x1b76
+#define MT6358_LDO_VDRAM2_ELR0 0x1b78
+#define MT6358_LDO_VSRAM_PROC12_CON0 0x1b88
+#define MT6358_LDO_VSRAM_PROC12_DBG0 0x1ba2
+#define MT6358_LDO_VSRAM_PROC12_DBG1 0x1ba4
+#define MT6358_LDO_VSRAM_OTHERS_CON0 0x1ba6
+#define MT6358_LDO_VSRAM_OTHERS_DBG0 0x1bc0
+#define MT6358_LDO_VSRAM_OTHERS_DBG1 0x1bc2
+#define MT6358_LDO_VSRAM_OTHERS_SSHUB_CON0 0x1bc4
+#define MT6358_LDO_VSRAM_OTHERS_SSHUB_CON1 0x1bc6
+#define MT6358_LDO_VSRAM_OTHERS_SSHUB_DBG1 MT6358_LDO_VSRAM_OTHERS_DBG1
+#define MT6358_LDO_VSRAM_GPU_CON0 0x1bc8
+#define MT6358_LDO_VSRAM_GPU_DBG0 0x1be2
+#define MT6358_LDO_VSRAM_GPU_DBG1 0x1be4
+#define MT6358_LDO_VSRAM_CON0 0x1bee
+#define MT6358_LDO_VSRAM_CON1 0x1bf0
+#define MT6358_LDO_VSRAM_CON2 0x1bf2
+#define MT6358_LDO_VSRAM_CON3 0x1bf4
+#define MT6358_LDO_VFE28_CON0 0x1c08
+#define MT6358_LDO_VFE28_CON1 0x1c16
+#define MT6358_LDO_VFE28_CON2 0x1c18
+#define MT6358_LDO_VFE28_CON3 0x1c1a
+#define MT6358_LDO_VRF18_CON0 0x1c1c
+#define MT6358_LDO_VRF18_CON1 0x1c2a
+#define MT6358_LDO_VRF18_CON2 0x1c2c
+#define MT6358_LDO_VRF18_CON3 0x1c2e
+#define MT6358_LDO_VRF12_CON0 0x1c30
+#define MT6358_LDO_VRF12_CON1 0x1c3e
+#define MT6358_LDO_VRF12_CON2 0x1c40
+#define MT6358_LDO_VRF12_CON3 0x1c42
+#define MT6358_LDO_VEFUSE_CON0 0x1c44
+#define MT6358_LDO_VEFUSE_CON1 0x1c52
+#define MT6358_LDO_VEFUSE_CON2 0x1c54
+#define MT6358_LDO_VEFUSE_CON3 0x1c56
+#define MT6358_LDO_VCN18_CON0 0x1c58
+#define MT6358_LDO_VCN18_CON1 0x1c66
+#define MT6358_LDO_VCN18_CON2 0x1c68
+#define MT6358_LDO_VCN18_CON3 0x1c6a
+#define MT6358_LDO_VCAMA1_CON0 0x1c6c
+#define MT6358_LDO_VCAMA1_CON1 0x1c7a
+#define MT6358_LDO_VCAMA1_CON2 0x1c7c
+#define MT6358_LDO_VCAMA1_CON3 0x1c7e
+#define MT6358_LDO_VCAMA2_CON0 0x1c88
+#define MT6358_LDO_VCAMA2_CON1 0x1c96
+#define MT6358_LDO_VCAMA2_CON2 0x1c98
+#define MT6358_LDO_VCAMA2_CON3 0x1c9a
+#define MT6358_LDO_VCAMD_CON0 0x1c9c
+#define MT6358_LDO_VCAMD_CON1 0x1caa
+#define MT6358_LDO_VCAMD_CON2 0x1cac
+#define MT6358_LDO_VCAMD_CON3 0x1cae
+#define MT6358_LDO_VCAMIO_CON0 0x1cb0
+#define MT6358_LDO_VCAMIO_CON1 0x1cbe
+#define MT6358_LDO_VCAMIO_CON2 0x1cc0
+#define MT6358_LDO_VCAMIO_CON3 0x1cc2
+#define MT6358_LDO_VMC_CON0 0x1cc4
+#define MT6358_LDO_VMC_CON1 0x1cd2
+#define MT6358_LDO_VMC_CON2 0x1cd4
+#define MT6358_LDO_VMC_CON3 0x1cd6
+#define MT6358_LDO_VMCH_CON0 0x1cd8
+#define MT6358_LDO_VMCH_CON1 0x1ce6
+#define MT6358_LDO_VMCH_CON2 0x1ce8
+#define MT6358_LDO_VMCH_CON3 0x1cea
+#define MT6358_LDO_VIBR_CON0 0x1d08
+#define MT6358_LDO_VIBR_CON1 0x1d16
+#define MT6358_LDO_VIBR_CON2 0x1d18
+#define MT6358_LDO_VIBR_CON3 0x1d1a
+#define MT6358_LDO_VCN33_CON0_0 0x1d1c
+#define MT6358_LDO_VCN33_CON0_1 0x1d2a
+#define MT6358_LDO_VCN33_CON1 0x1d2c
+#define MT6358_LDO_VCN33_BT_CON1 MT6358_LDO_VCN33_CON1
+#define MT6358_LDO_VCN33_WIFI_CON1 MT6358_LDO_VCN33_CON1
+#define MT6358_LDO_VCN33_CON2 0x1d2e
+#define MT6358_LDO_VCN33_CON3 0x1d30
+#define MT6358_LDO_VLDO28_CON0_0 0x1d32
+#define MT6358_LDO_VLDO28_CON0_1 0x1d40
+#define MT6358_LDO_VLDO28_CON1 0x1d42
+#define MT6358_LDO_VLDO28_CON2 0x1d44
+#define MT6358_LDO_VLDO28_CON3 0x1d46
+#define MT6358_LDO_VSIM1_CON0 0x1d48
+#define MT6358_LDO_VSIM1_CON1 0x1d56
+#define MT6358_LDO_VSIM1_CON2 0x1d58
+#define MT6358_LDO_VSIM1_CON3 0x1d5a
+#define MT6358_LDO_VSIM2_CON0 0x1d5c
+#define MT6358_LDO_VSIM2_CON1 0x1d6a
+#define MT6358_LDO_VSIM2_CON2 0x1d6c
+#define MT6358_LDO_VSIM2_CON3 0x1d6e
+#define MT6358_LDO_VCN28_CON0 0x1d88
+#define MT6358_LDO_VCN28_CON1 0x1d96
+#define MT6358_LDO_VCN28_CON2 0x1d98
+#define MT6358_LDO_VCN28_CON3 0x1d9a
+#define MT6358_VRTC28_CON0 0x1d9c
+#define MT6358_LDO_VBIF28_CON0 0x1d9e
+#define MT6358_LDO_VBIF28_CON1 0x1dac
+#define MT6358_LDO_VBIF28_CON2 0x1dae
+#define MT6358_LDO_VBIF28_CON3 0x1db0
+#define MT6358_VCAMA1_ANA_CON0 0x1e08
+#define MT6358_VCAMA2_ANA_CON0 0x1e0c
+#define MT6358_VCN33_ANA_CON0 0x1e28
+#define MT6358_VSIM1_ANA_CON0 0x1e2c
+#define MT6358_VSIM2_ANA_CON0 0x1e30
+#define MT6358_VUSB_ANA_CON0 0x1e34
+#define MT6358_VEMC_ANA_CON0 0x1e38
+#define MT6358_VLDO28_ANA_CON0 0x1e3c
+#define MT6358_VIO28_ANA_CON0 0x1e40
+#define MT6358_VIBR_ANA_CON0 0x1e44
+#define MT6358_VMCH_ANA_CON0 0x1e48
+#define MT6358_VMC_ANA_CON0 0x1e4c
+#define MT6358_VRF18_ANA_CON0 0x1e88
+#define MT6358_VCN18_ANA_CON0 0x1e8c
+#define MT6358_VCAMIO_ANA_CON0 0x1e90
+#define MT6358_VIO18_ANA_CON0 0x1e94
+#define MT6358_VEFUSE_ANA_CON0 0x1e98
+#define MT6358_VRF12_ANA_CON0 0x1e9c
+#define MT6358_VSRAM_PROC11_ANA_CON0 0x1ea0
+#define MT6358_VSRAM_PROC12_ANA_CON0 0x1ea4
+#define MT6358_VSRAM_OTHERS_ANA_CON0 0x1ea6
+#define MT6358_VSRAM_GPU_ANA_CON0 0x1ea8
+#define MT6358_VDRAM2_ANA_CON0 0x1eaa
+#define MT6358_VCAMD_ANA_CON0 0x1eae
+#define MT6358_VA12_ANA_CON0 0x1eb2
+#define MT6358_AUD_TOP_INT_CON0 0x2228
+#define MT6358_AUD_TOP_INT_STATUS0 0x2234
+
+#endif /* __MFD_MT6358_REGISTERS_H__ */
diff --git a/include/linux/mfd/mt6359/core.h b/include/linux/mfd/mt6359/core.h
new file mode 100644
index 000000000000..8d298868126d
--- /dev/null
+++ b/include/linux/mfd/mt6359/core.h
@@ -0,0 +1,133 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2021 MediaTek Inc.
+ */
+
+#ifndef __MFD_MT6359_CORE_H__
+#define __MFD_MT6359_CORE_H__
+
+enum mt6359_irq_top_status_shift {
+ MT6359_BUCK_TOP = 0,
+ MT6359_LDO_TOP,
+ MT6359_PSC_TOP,
+ MT6359_SCK_TOP,
+ MT6359_BM_TOP,
+ MT6359_HK_TOP,
+ MT6359_AUD_TOP = 7,
+ MT6359_MISC_TOP,
+};
+
+enum mt6359_irq_numbers {
+ MT6359_IRQ_VCORE_OC = 1,
+ MT6359_IRQ_VGPU11_OC,
+ MT6359_IRQ_VGPU12_OC,
+ MT6359_IRQ_VMODEM_OC,
+ MT6359_IRQ_VPROC1_OC,
+ MT6359_IRQ_VPROC2_OC,
+ MT6359_IRQ_VS1_OC,
+ MT6359_IRQ_VS2_OC,
+ MT6359_IRQ_VPA_OC = 9,
+ MT6359_IRQ_VFE28_OC = 16,
+ MT6359_IRQ_VXO22_OC,
+ MT6359_IRQ_VRF18_OC,
+ MT6359_IRQ_VRF12_OC,
+ MT6359_IRQ_VEFUSE_OC,
+ MT6359_IRQ_VCN33_1_OC,
+ MT6359_IRQ_VCN33_2_OC,
+ MT6359_IRQ_VCN13_OC,
+ MT6359_IRQ_VCN18_OC,
+ MT6359_IRQ_VA09_OC,
+ MT6359_IRQ_VCAMIO_OC,
+ MT6359_IRQ_VA12_OC,
+ MT6359_IRQ_VAUX18_OC,
+ MT6359_IRQ_VAUD18_OC,
+ MT6359_IRQ_VIO18_OC,
+ MT6359_IRQ_VSRAM_PROC1_OC,
+ MT6359_IRQ_VSRAM_PROC2_OC,
+ MT6359_IRQ_VSRAM_OTHERS_OC,
+ MT6359_IRQ_VSRAM_MD_OC,
+ MT6359_IRQ_VEMC_OC,
+ MT6359_IRQ_VSIM1_OC,
+ MT6359_IRQ_VSIM2_OC,
+ MT6359_IRQ_VUSB_OC,
+ MT6359_IRQ_VRFCK_OC,
+ MT6359_IRQ_VBBCK_OC,
+ MT6359_IRQ_VBIF28_OC,
+ MT6359_IRQ_VIBR_OC,
+ MT6359_IRQ_VIO28_OC,
+ MT6359_IRQ_VM18_OC,
+ MT6359_IRQ_VUFS_OC = 45,
+ MT6359_IRQ_PWRKEY = 48,
+ MT6359_IRQ_HOMEKEY,
+ MT6359_IRQ_PWRKEY_R,
+ MT6359_IRQ_HOMEKEY_R,
+ MT6359_IRQ_NI_LBAT_INT,
+ MT6359_IRQ_CHRDET_EDGE = 53,
+ MT6359_IRQ_RTC = 64,
+ MT6359_IRQ_FG_BAT_H = 80,
+ MT6359_IRQ_FG_BAT_L,
+ MT6359_IRQ_FG_CUR_H,
+ MT6359_IRQ_FG_CUR_L,
+ MT6359_IRQ_FG_ZCV = 84,
+ MT6359_IRQ_FG_N_CHARGE_L = 87,
+ MT6359_IRQ_FG_IAVG_H,
+ MT6359_IRQ_FG_IAVG_L = 89,
+ MT6359_IRQ_FG_DISCHARGE = 91,
+ MT6359_IRQ_FG_CHARGE,
+ MT6359_IRQ_BATON_LV = 96,
+ MT6359_IRQ_BATON_BAT_IN = 98,
+ MT6359_IRQ_BATON_BAT_OU,
+ MT6359_IRQ_BIF = 100,
+ MT6359_IRQ_BAT_H = 112,
+ MT6359_IRQ_BAT_L,
+ MT6359_IRQ_BAT2_H,
+ MT6359_IRQ_BAT2_L,
+ MT6359_IRQ_BAT_TEMP_H,
+ MT6359_IRQ_BAT_TEMP_L,
+ MT6359_IRQ_THR_H,
+ MT6359_IRQ_THR_L,
+ MT6359_IRQ_AUXADC_IMP,
+ MT6359_IRQ_NAG_C_DLTV = 121,
+ MT6359_IRQ_AUDIO = 128,
+ MT6359_IRQ_ACCDET = 133,
+ MT6359_IRQ_ACCDET_EINT0,
+ MT6359_IRQ_ACCDET_EINT1,
+ MT6359_IRQ_SPI_CMD_ALERT = 144,
+ MT6359_IRQ_NR,
+};
+
+#define MT6359_IRQ_BUCK_BASE MT6359_IRQ_VCORE_OC
+#define MT6359_IRQ_LDO_BASE MT6359_IRQ_VFE28_OC
+#define MT6359_IRQ_PSC_BASE MT6359_IRQ_PWRKEY
+#define MT6359_IRQ_SCK_BASE MT6359_IRQ_RTC
+#define MT6359_IRQ_BM_BASE MT6359_IRQ_FG_BAT_H
+#define MT6359_IRQ_HK_BASE MT6359_IRQ_BAT_H
+#define MT6359_IRQ_AUD_BASE MT6359_IRQ_AUDIO
+#define MT6359_IRQ_MISC_BASE MT6359_IRQ_SPI_CMD_ALERT
+
+#define MT6359_IRQ_BUCK_BITS (MT6359_IRQ_VPA_OC - MT6359_IRQ_BUCK_BASE + 1)
+#define MT6359_IRQ_LDO_BITS (MT6359_IRQ_VUFS_OC - MT6359_IRQ_LDO_BASE + 1)
+#define MT6359_IRQ_PSC_BITS \
+ (MT6359_IRQ_CHRDET_EDGE - MT6359_IRQ_PSC_BASE + 1)
+#define MT6359_IRQ_SCK_BITS (MT6359_IRQ_RTC - MT6359_IRQ_SCK_BASE + 1)
+#define MT6359_IRQ_BM_BITS (MT6359_IRQ_BIF - MT6359_IRQ_BM_BASE + 1)
+#define MT6359_IRQ_HK_BITS (MT6359_IRQ_NAG_C_DLTV - MT6359_IRQ_HK_BASE + 1)
+#define MT6359_IRQ_AUD_BITS \
+ (MT6359_IRQ_ACCDET_EINT1 - MT6359_IRQ_AUD_BASE + 1)
+#define MT6359_IRQ_MISC_BITS \
+ (MT6359_IRQ_SPI_CMD_ALERT - MT6359_IRQ_MISC_BASE + 1)
+
+#define MT6359_TOP_GEN(sp) \
+{ \
+ .hwirq_base = MT6359_IRQ_##sp##_BASE, \
+ .num_int_regs = \
+ ((MT6359_IRQ_##sp##_BITS - 1) / \
+ MTK_PMIC_REG_WIDTH) + 1, \
+ .en_reg = MT6359_##sp##_TOP_INT_CON0, \
+ .en_reg_shift = 0x6, \
+ .sta_reg = MT6359_##sp##_TOP_INT_STATUS0, \
+ .sta_reg_shift = 0x2, \
+ .top_offset = MT6359_##sp##_TOP, \
+}
+
+#endif /* __MFD_MT6359_CORE_H__ */
diff --git a/include/linux/mfd/mt6359/registers.h b/include/linux/mfd/mt6359/registers.h
new file mode 100644
index 000000000000..2a4394a27b1c
--- /dev/null
+++ b/include/linux/mfd/mt6359/registers.h
@@ -0,0 +1,531 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2021 MediaTek Inc.
+ */
+
+#ifndef __MFD_MT6359_REGISTERS_H__
+#define __MFD_MT6359_REGISTERS_H__
+
+/* PMIC Registers */
+#define MT6359_SWCID 0xa
+#define MT6359_TOPSTATUS 0x2a
+#define MT6359_TOP_RST_MISC 0x14c
+#define MT6359_MISC_TOP_INT_CON0 0x188
+#define MT6359_MISC_TOP_INT_STATUS0 0x194
+#define MT6359_TOP_INT_STATUS0 0x19e
+#define MT6359_SCK_TOP_INT_CON0 0x528
+#define MT6359_SCK_TOP_INT_STATUS0 0x534
+#define MT6359_EOSC_CALI_CON0 0x53a
+#define MT6359_EOSC_CALI_CON1 0x53c
+#define MT6359_RTC_MIX_CON0 0x53e
+#define MT6359_RTC_MIX_CON1 0x540
+#define MT6359_RTC_MIX_CON2 0x542
+#define MT6359_RTC_DSN_ID 0x580
+#define MT6359_RTC_DSN_REV0 0x582
+#define MT6359_RTC_DBI 0x584
+#define MT6359_RTC_DXI 0x586
+#define MT6359_RTC_BBPU 0x588
+#define MT6359_RTC_IRQ_STA 0x58a
+#define MT6359_RTC_IRQ_EN 0x58c
+#define MT6359_RTC_CII_EN 0x58e
+#define MT6359_RTC_AL_MASK 0x590
+#define MT6359_RTC_TC_SEC 0x592
+#define MT6359_RTC_TC_MIN 0x594
+#define MT6359_RTC_TC_HOU 0x596
+#define MT6359_RTC_TC_DOM 0x598
+#define MT6359_RTC_TC_DOW 0x59a
+#define MT6359_RTC_TC_MTH 0x59c
+#define MT6359_RTC_TC_YEA 0x59e
+#define MT6359_RTC_AL_SEC 0x5a0
+#define MT6359_RTC_AL_MIN 0x5a2
+#define MT6359_RTC_AL_HOU 0x5a4
+#define MT6359_RTC_AL_DOM 0x5a6
+#define MT6359_RTC_AL_DOW 0x5a8
+#define MT6359_RTC_AL_MTH 0x5aa
+#define MT6359_RTC_AL_YEA 0x5ac
+#define MT6359_RTC_OSC32CON 0x5ae
+#define MT6359_RTC_POWERKEY1 0x5b0
+#define MT6359_RTC_POWERKEY2 0x5b2
+#define MT6359_RTC_PDN1 0x5b4
+#define MT6359_RTC_PDN2 0x5b6
+#define MT6359_RTC_SPAR0 0x5b8
+#define MT6359_RTC_SPAR1 0x5ba
+#define MT6359_RTC_PROT 0x5bc
+#define MT6359_RTC_DIFF 0x5be
+#define MT6359_RTC_CALI 0x5c0
+#define MT6359_RTC_WRTGR 0x5c2
+#define MT6359_RTC_CON 0x5c4
+#define MT6359_RTC_SEC_CTRL 0x5c6
+#define MT6359_RTC_INT_CNT 0x5c8
+#define MT6359_RTC_SEC_DAT0 0x5ca
+#define MT6359_RTC_SEC_DAT1 0x5cc
+#define MT6359_RTC_SEC_DAT2 0x5ce
+#define MT6359_RTC_SEC_DSN_ID 0x600
+#define MT6359_RTC_SEC_DSN_REV0 0x602
+#define MT6359_RTC_SEC_DBI 0x604
+#define MT6359_RTC_SEC_DXI 0x606
+#define MT6359_RTC_TC_SEC_SEC 0x608
+#define MT6359_RTC_TC_MIN_SEC 0x60a
+#define MT6359_RTC_TC_HOU_SEC 0x60c
+#define MT6359_RTC_TC_DOM_SEC 0x60e
+#define MT6359_RTC_TC_DOW_SEC 0x610
+#define MT6359_RTC_TC_MTH_SEC 0x612
+#define MT6359_RTC_TC_YEA_SEC 0x614
+#define MT6359_RTC_SEC_CK_PDN 0x616
+#define MT6359_RTC_SEC_WRTGR 0x618
+#define MT6359_PSC_TOP_INT_CON0 0x910
+#define MT6359_PSC_TOP_INT_STATUS0 0x91c
+#define MT6359_BM_TOP_INT_CON0 0xc32
+#define MT6359_BM_TOP_INT_CON1 0xc38
+#define MT6359_BM_TOP_INT_STATUS0 0xc4a
+#define MT6359_BM_TOP_INT_STATUS1 0xc4c
+#define MT6359_HK_TOP_INT_CON0 0xf92
+#define MT6359_HK_TOP_INT_STATUS0 0xf9e
+#define MT6359_BUCK_TOP_INT_CON0 0x1418
+#define MT6359_BUCK_TOP_INT_STATUS0 0x1424
+#define MT6359_BUCK_VPU_CON0 0x1488
+#define MT6359_BUCK_VPU_DBG0 0x14a6
+#define MT6359_BUCK_VPU_DBG1 0x14a8
+#define MT6359_BUCK_VPU_ELR0 0x14ac
+#define MT6359_BUCK_VCORE_CON0 0x1508
+#define MT6359_BUCK_VCORE_DBG0 0x1526
+#define MT6359_BUCK_VCORE_DBG1 0x1528
+#define MT6359_BUCK_VCORE_SSHUB_CON0 0x152a
+#define MT6359_BUCK_VCORE_ELR0 0x1534
+#define MT6359_BUCK_VGPU11_CON0 0x1588
+#define MT6359_BUCK_VGPU11_DBG0 0x15a6
+#define MT6359_BUCK_VGPU11_DBG1 0x15a8
+#define MT6359_BUCK_VGPU11_ELR0 0x15ac
+#define MT6359_BUCK_VMODEM_CON0 0x1688
+#define MT6359_BUCK_VMODEM_DBG0 0x16a6
+#define MT6359_BUCK_VMODEM_DBG1 0x16a8
+#define MT6359_BUCK_VMODEM_ELR0 0x16ae
+#define MT6359_BUCK_VPROC1_CON0 0x1708
+#define MT6359_BUCK_VPROC1_DBG0 0x1726
+#define MT6359_BUCK_VPROC1_DBG1 0x1728
+#define MT6359_BUCK_VPROC1_ELR0 0x172e
+#define MT6359_BUCK_VPROC2_CON0 0x1788
+#define MT6359_BUCK_VPROC2_DBG0 0x17a6
+#define MT6359_BUCK_VPROC2_DBG1 0x17a8
+#define MT6359_BUCK_VPROC2_ELR0 0x17b2
+#define MT6359_BUCK_VS1_CON0 0x1808
+#define MT6359_BUCK_VS1_DBG0 0x1826
+#define MT6359_BUCK_VS1_DBG1 0x1828
+#define MT6359_BUCK_VS1_ELR0 0x1834
+#define MT6359_BUCK_VS2_CON0 0x1888
+#define MT6359_BUCK_VS2_DBG0 0x18a6
+#define MT6359_BUCK_VS2_DBG1 0x18a8
+#define MT6359_BUCK_VS2_ELR0 0x18b4
+#define MT6359_BUCK_VPA_CON0 0x1908
+#define MT6359_BUCK_VPA_CON1 0x190e
+#define MT6359_BUCK_VPA_CFG0 0x1910
+#define MT6359_BUCK_VPA_CFG1 0x1912
+#define MT6359_BUCK_VPA_DBG0 0x1914
+#define MT6359_BUCK_VPA_DBG1 0x1916
+#define MT6359_VGPUVCORE_ANA_CON2 0x198e
+#define MT6359_VGPUVCORE_ANA_CON13 0x19a4
+#define MT6359_VPROC1_ANA_CON3 0x19b2
+#define MT6359_VPROC2_ANA_CON3 0x1a0e
+#define MT6359_VMODEM_ANA_CON3 0x1a1a
+#define MT6359_VPU_ANA_CON3 0x1a26
+#define MT6359_VS1_ANA_CON0 0x1a2c
+#define MT6359_VS2_ANA_CON0 0x1a34
+#define MT6359_VPA_ANA_CON0 0x1a3c
+#define MT6359_LDO_TOP_INT_CON0 0x1b14
+#define MT6359_LDO_TOP_INT_CON1 0x1b1a
+#define MT6359_LDO_TOP_INT_STATUS0 0x1b28
+#define MT6359_LDO_TOP_INT_STATUS1 0x1b2a
+#define MT6359_LDO_VSRAM_PROC1_ELR 0x1b40
+#define MT6359_LDO_VSRAM_PROC2_ELR 0x1b42
+#define MT6359_LDO_VSRAM_OTHERS_ELR 0x1b44
+#define MT6359_LDO_VSRAM_MD_ELR 0x1b46
+#define MT6359_LDO_VFE28_CON0 0x1b88
+#define MT6359_LDO_VFE28_MON 0x1b8a
+#define MT6359_LDO_VXO22_CON0 0x1b98
+#define MT6359_LDO_VXO22_MON 0x1b9a
+#define MT6359_LDO_VRF18_CON0 0x1ba8
+#define MT6359_LDO_VRF18_MON 0x1baa
+#define MT6359_LDO_VRF12_CON0 0x1bb8
+#define MT6359_LDO_VRF12_MON 0x1bba
+#define MT6359_LDO_VEFUSE_CON0 0x1bc8
+#define MT6359_LDO_VEFUSE_MON 0x1bca
+#define MT6359_LDO_VCN33_1_CON0 0x1bd8
+#define MT6359_LDO_VCN33_1_MON 0x1bda
+#define MT6359_LDO_VCN33_1_MULTI_SW 0x1be8
+#define MT6359_LDO_VCN33_2_CON0 0x1c08
+#define MT6359_LDO_VCN33_2_MON 0x1c0a
+#define MT6359_LDO_VCN33_2_MULTI_SW 0x1c18
+#define MT6359_LDO_VCN13_CON0 0x1c1a
+#define MT6359_LDO_VCN13_MON 0x1c1c
+#define MT6359_LDO_VCN18_CON0 0x1c2a
+#define MT6359_LDO_VCN18_MON 0x1c2c
+#define MT6359_LDO_VA09_CON0 0x1c3a
+#define MT6359_LDO_VA09_MON 0x1c3c
+#define MT6359_LDO_VCAMIO_CON0 0x1c4a
+#define MT6359_LDO_VCAMIO_MON 0x1c4c
+#define MT6359_LDO_VA12_CON0 0x1c5a
+#define MT6359_LDO_VA12_MON 0x1c5c
+#define MT6359_LDO_VAUX18_CON0 0x1c88
+#define MT6359_LDO_VAUX18_MON 0x1c8a
+#define MT6359_LDO_VAUD18_CON0 0x1c98
+#define MT6359_LDO_VAUD18_MON 0x1c9a
+#define MT6359_LDO_VIO18_CON0 0x1ca8
+#define MT6359_LDO_VIO18_MON 0x1caa
+#define MT6359_LDO_VEMC_CON0 0x1cb8
+#define MT6359_LDO_VEMC_MON 0x1cba
+#define MT6359_LDO_VSIM1_CON0 0x1cc8
+#define MT6359_LDO_VSIM1_MON 0x1cca
+#define MT6359_LDO_VSIM2_CON0 0x1cd8
+#define MT6359_LDO_VSIM2_MON 0x1cda
+#define MT6359_LDO_VUSB_CON0 0x1d08
+#define MT6359_LDO_VUSB_MON 0x1d0a
+#define MT6359_LDO_VUSB_MULTI_SW 0x1d18
+#define MT6359_LDO_VRFCK_CON0 0x1d1a
+#define MT6359_LDO_VRFCK_MON 0x1d1c
+#define MT6359_LDO_VBBCK_CON0 0x1d2a
+#define MT6359_LDO_VBBCK_MON 0x1d2c
+#define MT6359_LDO_VBIF28_CON0 0x1d3a
+#define MT6359_LDO_VBIF28_MON 0x1d3c
+#define MT6359_LDO_VIBR_CON0 0x1d4a
+#define MT6359_LDO_VIBR_MON 0x1d4c
+#define MT6359_LDO_VIO28_CON0 0x1d5a
+#define MT6359_LDO_VIO28_MON 0x1d5c
+#define MT6359_LDO_VM18_CON0 0x1d88
+#define MT6359_LDO_VM18_MON 0x1d8a
+#define MT6359_LDO_VUFS_CON0 0x1d98
+#define MT6359_LDO_VUFS_MON 0x1d9a
+#define MT6359_LDO_VSRAM_PROC1_CON0 0x1e88
+#define MT6359_LDO_VSRAM_PROC1_MON 0x1e8a
+#define MT6359_LDO_VSRAM_PROC1_VOSEL1 0x1e8e
+#define MT6359_LDO_VSRAM_PROC2_CON0 0x1ea6
+#define MT6359_LDO_VSRAM_PROC2_MON 0x1ea8
+#define MT6359_LDO_VSRAM_PROC2_VOSEL1 0x1eac
+#define MT6359_LDO_VSRAM_OTHERS_CON0 0x1f08
+#define MT6359_LDO_VSRAM_OTHERS_MON 0x1f0a
+#define MT6359_LDO_VSRAM_OTHERS_VOSEL1 0x1f0e
+#define MT6359_LDO_VSRAM_OTHERS_SSHUB 0x1f26
+#define MT6359_LDO_VSRAM_MD_CON0 0x1f2c
+#define MT6359_LDO_VSRAM_MD_MON 0x1f2e
+#define MT6359_LDO_VSRAM_MD_VOSEL1 0x1f32
+#define MT6359_VFE28_ANA_CON0 0x1f88
+#define MT6359_VAUX18_ANA_CON0 0x1f8c
+#define MT6359_VUSB_ANA_CON0 0x1f90
+#define MT6359_VBIF28_ANA_CON0 0x1f94
+#define MT6359_VCN33_1_ANA_CON0 0x1f98
+#define MT6359_VCN33_2_ANA_CON0 0x1f9c
+#define MT6359_VEMC_ANA_CON0 0x1fa0
+#define MT6359_VSIM1_ANA_CON0 0x1fa4
+#define MT6359_VSIM2_ANA_CON0 0x1fa8
+#define MT6359_VIO28_ANA_CON0 0x1fac
+#define MT6359_VIBR_ANA_CON0 0x1fb0
+#define MT6359_VRF18_ANA_CON0 0x2008
+#define MT6359_VEFUSE_ANA_CON0 0x200c
+#define MT6359_VCN18_ANA_CON0 0x2010
+#define MT6359_VCAMIO_ANA_CON0 0x2014
+#define MT6359_VAUD18_ANA_CON0 0x2018
+#define MT6359_VIO18_ANA_CON0 0x201c
+#define MT6359_VM18_ANA_CON0 0x2020
+#define MT6359_VUFS_ANA_CON0 0x2024
+#define MT6359_VRF12_ANA_CON0 0x202a
+#define MT6359_VCN13_ANA_CON0 0x202e
+#define MT6359_VA09_ANA_CON0 0x2032
+#define MT6359_VA12_ANA_CON0 0x2036
+#define MT6359_VXO22_ANA_CON0 0x2088
+#define MT6359_VRFCK_ANA_CON0 0x208c
+#define MT6359_VBBCK_ANA_CON0 0x2094
+#define MT6359_AUD_TOP_INT_CON0 0x2328
+#define MT6359_AUD_TOP_INT_STATUS0 0x2334
+
+#define MT6359_RG_BUCK_VPU_EN_ADDR MT6359_BUCK_VPU_CON0
+#define MT6359_RG_BUCK_VPU_LP_ADDR MT6359_BUCK_VPU_CON0
+#define MT6359_RG_BUCK_VPU_LP_SHIFT 1
+#define MT6359_DA_VPU_VOSEL_ADDR MT6359_BUCK_VPU_DBG0
+#define MT6359_DA_VPU_VOSEL_MASK 0x7F
+#define MT6359_DA_VPU_VOSEL_SHIFT 0
+#define MT6359_DA_VPU_EN_ADDR MT6359_BUCK_VPU_DBG1
+#define MT6359_RG_BUCK_VPU_VOSEL_ADDR MT6359_BUCK_VPU_ELR0
+#define MT6359_RG_BUCK_VPU_VOSEL_MASK 0x7F
+#define MT6359_RG_BUCK_VPU_VOSEL_SHIFT 0
+#define MT6359_RG_BUCK_VCORE_EN_ADDR MT6359_BUCK_VCORE_CON0
+#define MT6359_RG_BUCK_VCORE_LP_ADDR MT6359_BUCK_VCORE_CON0
+#define MT6359_RG_BUCK_VCORE_LP_SHIFT 1
+#define MT6359_DA_VCORE_VOSEL_ADDR MT6359_BUCK_VCORE_DBG0
+#define MT6359_DA_VCORE_VOSEL_MASK 0x7F
+#define MT6359_DA_VCORE_VOSEL_SHIFT 0
+#define MT6359_DA_VCORE_EN_ADDR MT6359_BUCK_VCORE_DBG1
+#define MT6359_RG_BUCK_VCORE_SSHUB_EN_ADDR MT6359_BUCK_VCORE_SSHUB_CON0
+#define MT6359_RG_BUCK_VCORE_SSHUB_VOSEL_ADDR MT6359_BUCK_VCORE_SSHUB_CON0
+#define MT6359_RG_BUCK_VCORE_SSHUB_VOSEL_MASK 0x7F
+#define MT6359_RG_BUCK_VCORE_SSHUB_VOSEL_SHIFT 4
+#define MT6359_RG_BUCK_VCORE_VOSEL_ADDR MT6359_BUCK_VCORE_ELR0
+#define MT6359_RG_BUCK_VCORE_VOSEL_MASK 0x7F
+#define MT6359_RG_BUCK_VCORE_VOSEL_SHIFT 0
+#define MT6359_RG_BUCK_VGPU11_EN_ADDR MT6359_BUCK_VGPU11_CON0
+#define MT6359_RG_BUCK_VGPU11_LP_ADDR MT6359_BUCK_VGPU11_CON0
+#define MT6359_RG_BUCK_VGPU11_LP_SHIFT 1
+#define MT6359_DA_VGPU11_VOSEL_ADDR MT6359_BUCK_VGPU11_DBG0
+#define MT6359_DA_VGPU11_VOSEL_MASK 0x7F
+#define MT6359_DA_VGPU11_VOSEL_SHIFT 0
+#define MT6359_DA_VGPU11_EN_ADDR MT6359_BUCK_VGPU11_DBG1
+#define MT6359_RG_BUCK_VGPU11_VOSEL_ADDR MT6359_BUCK_VGPU11_ELR0
+#define MT6359_RG_BUCK_VGPU11_VOSEL_MASK 0x7F
+#define MT6359_RG_BUCK_VGPU11_VOSEL_SHIFT 0
+#define MT6359_RG_BUCK_VMODEM_EN_ADDR MT6359_BUCK_VMODEM_CON0
+#define MT6359_RG_BUCK_VMODEM_LP_ADDR MT6359_BUCK_VMODEM_CON0
+#define MT6359_RG_BUCK_VMODEM_LP_SHIFT 1
+#define MT6359_DA_VMODEM_VOSEL_ADDR MT6359_BUCK_VMODEM_DBG0
+#define MT6359_DA_VMODEM_VOSEL_MASK 0x7F
+#define MT6359_DA_VMODEM_VOSEL_SHIFT 0
+#define MT6359_DA_VMODEM_EN_ADDR MT6359_BUCK_VMODEM_DBG1
+#define MT6359_RG_BUCK_VMODEM_VOSEL_ADDR MT6359_BUCK_VMODEM_ELR0
+#define MT6359_RG_BUCK_VMODEM_VOSEL_MASK 0x7F
+#define MT6359_RG_BUCK_VMODEM_VOSEL_SHIFT 0
+#define MT6359_RG_BUCK_VPROC1_EN_ADDR MT6359_BUCK_VPROC1_CON0
+#define MT6359_RG_BUCK_VPROC1_LP_ADDR MT6359_BUCK_VPROC1_CON0
+#define MT6359_RG_BUCK_VPROC1_LP_SHIFT 1
+#define MT6359_DA_VPROC1_VOSEL_ADDR MT6359_BUCK_VPROC1_DBG0
+#define MT6359_DA_VPROC1_VOSEL_MASK 0x7F
+#define MT6359_DA_VPROC1_VOSEL_SHIFT 0
+#define MT6359_DA_VPROC1_EN_ADDR MT6359_BUCK_VPROC1_DBG1
+#define MT6359_RG_BUCK_VPROC1_VOSEL_ADDR MT6359_BUCK_VPROC1_ELR0
+#define MT6359_RG_BUCK_VPROC1_VOSEL_MASK 0x7F
+#define MT6359_RG_BUCK_VPROC1_VOSEL_SHIFT 0
+#define MT6359_RG_BUCK_VPROC2_EN_ADDR MT6359_BUCK_VPROC2_CON0
+#define MT6359_RG_BUCK_VPROC2_LP_ADDR MT6359_BUCK_VPROC2_CON0
+#define MT6359_RG_BUCK_VPROC2_LP_SHIFT 1
+#define MT6359_DA_VPROC2_VOSEL_ADDR MT6359_BUCK_VPROC2_DBG0
+#define MT6359_DA_VPROC2_VOSEL_MASK 0x7F
+#define MT6359_DA_VPROC2_VOSEL_SHIFT 0
+#define MT6359_DA_VPROC2_EN_ADDR MT6359_BUCK_VPROC2_DBG1
+#define MT6359_RG_BUCK_VPROC2_VOSEL_ADDR MT6359_BUCK_VPROC2_ELR0
+#define MT6359_RG_BUCK_VPROC2_VOSEL_MASK 0x7F
+#define MT6359_RG_BUCK_VPROC2_VOSEL_SHIFT 0
+#define MT6359_RG_BUCK_VS1_EN_ADDR MT6359_BUCK_VS1_CON0
+#define MT6359_RG_BUCK_VS1_LP_ADDR MT6359_BUCK_VS1_CON0
+#define MT6359_RG_BUCK_VS1_LP_SHIFT 1
+#define MT6359_DA_VS1_VOSEL_ADDR MT6359_BUCK_VS1_DBG0
+#define MT6359_DA_VS1_VOSEL_MASK 0x7F
+#define MT6359_DA_VS1_VOSEL_SHIFT 0
+#define MT6359_DA_VS1_EN_ADDR MT6359_BUCK_VS1_DBG1
+#define MT6359_RG_BUCK_VS1_VOSEL_ADDR MT6359_BUCK_VS1_ELR0
+#define MT6359_RG_BUCK_VS1_VOSEL_MASK 0x7F
+#define MT6359_RG_BUCK_VS1_VOSEL_SHIFT 0
+#define MT6359_RG_BUCK_VS2_EN_ADDR MT6359_BUCK_VS2_CON0
+#define MT6359_RG_BUCK_VS2_LP_ADDR MT6359_BUCK_VS2_CON0
+#define MT6359_RG_BUCK_VS2_LP_SHIFT 1
+#define MT6359_DA_VS2_VOSEL_ADDR MT6359_BUCK_VS2_DBG0
+#define MT6359_DA_VS2_VOSEL_MASK 0x7F
+#define MT6359_DA_VS2_VOSEL_SHIFT 0
+#define MT6359_DA_VS2_EN_ADDR MT6359_BUCK_VS2_DBG1
+#define MT6359_RG_BUCK_VS2_VOSEL_ADDR MT6359_BUCK_VS2_ELR0
+#define MT6359_RG_BUCK_VS2_VOSEL_MASK 0x7F
+#define MT6359_RG_BUCK_VS2_VOSEL_SHIFT 0
+#define MT6359_RG_BUCK_VPA_EN_ADDR MT6359_BUCK_VPA_CON0
+#define MT6359_RG_BUCK_VPA_LP_ADDR MT6359_BUCK_VPA_CON0
+#define MT6359_RG_BUCK_VPA_LP_SHIFT 1
+#define MT6359_RG_BUCK_VPA_VOSEL_ADDR MT6359_BUCK_VPA_CON1
+#define MT6359_RG_BUCK_VPA_VOSEL_MASK 0x3F
+#define MT6359_RG_BUCK_VPA_VOSEL_SHIFT 0
+#define MT6359_DA_VPA_VOSEL_ADDR MT6359_BUCK_VPA_DBG0
+#define MT6359_DA_VPA_VOSEL_MASK 0x3F
+#define MT6359_DA_VPA_VOSEL_SHIFT 0
+#define MT6359_DA_VPA_EN_ADDR MT6359_BUCK_VPA_DBG1
+#define MT6359_RG_VGPU11_FCCM_ADDR MT6359_VGPUVCORE_ANA_CON2
+#define MT6359_RG_VGPU11_FCCM_SHIFT 9
+#define MT6359_RG_VCORE_FCCM_ADDR MT6359_VGPUVCORE_ANA_CON13
+#define MT6359_RG_VCORE_FCCM_SHIFT 5
+#define MT6359_RG_VPROC1_FCCM_ADDR MT6359_VPROC1_ANA_CON3
+#define MT6359_RG_VPROC1_FCCM_SHIFT 1
+#define MT6359_RG_VPROC2_FCCM_ADDR MT6359_VPROC2_ANA_CON3
+#define MT6359_RG_VPROC2_FCCM_SHIFT 1
+#define MT6359_RG_VMODEM_FCCM_ADDR MT6359_VMODEM_ANA_CON3
+#define MT6359_RG_VMODEM_FCCM_SHIFT 1
+#define MT6359_RG_VPU_FCCM_ADDR MT6359_VPU_ANA_CON3
+#define MT6359_RG_VPU_FCCM_SHIFT 1
+#define MT6359_RG_VS1_FPWM_ADDR MT6359_VS1_ANA_CON0
+#define MT6359_RG_VS1_FPWM_SHIFT 3
+#define MT6359_RG_VS2_FPWM_ADDR MT6359_VS2_ANA_CON0
+#define MT6359_RG_VS2_FPWM_SHIFT 3
+#define MT6359_RG_VPA_MODESET_ADDR MT6359_VPA_ANA_CON0
+#define MT6359_RG_VPA_MODESET_SHIFT 1
+#define MT6359_RG_LDO_VSRAM_PROC1_VOSEL_ADDR MT6359_LDO_VSRAM_PROC1_ELR
+#define MT6359_RG_LDO_VSRAM_PROC1_VOSEL_MASK 0x7F
+#define MT6359_RG_LDO_VSRAM_PROC1_VOSEL_SHIFT 0
+#define MT6359_RG_LDO_VSRAM_PROC2_VOSEL_ADDR MT6359_LDO_VSRAM_PROC2_ELR
+#define MT6359_RG_LDO_VSRAM_PROC2_VOSEL_MASK 0x7F
+#define MT6359_RG_LDO_VSRAM_PROC2_VOSEL_SHIFT 0
+#define MT6359_RG_LDO_VSRAM_OTHERS_VOSEL_ADDR MT6359_LDO_VSRAM_OTHERS_ELR
+#define MT6359_RG_LDO_VSRAM_OTHERS_VOSEL_MASK 0x7F
+#define MT6359_RG_LDO_VSRAM_OTHERS_VOSEL_SHIFT 0
+#define MT6359_RG_LDO_VSRAM_MD_VOSEL_ADDR MT6359_LDO_VSRAM_MD_ELR
+#define MT6359_RG_LDO_VSRAM_MD_VOSEL_MASK 0x7F
+#define MT6359_RG_LDO_VSRAM_MD_VOSEL_SHIFT 0
+#define MT6359_RG_LDO_VFE28_EN_ADDR MT6359_LDO_VFE28_CON0
+#define MT6359_DA_VFE28_B_EN_ADDR MT6359_LDO_VFE28_MON
+#define MT6359_RG_LDO_VXO22_EN_ADDR MT6359_LDO_VXO22_CON0
+#define MT6359_RG_LDO_VXO22_EN_SHIFT 0
+#define MT6359_DA_VXO22_B_EN_ADDR MT6359_LDO_VXO22_MON
+#define MT6359_RG_LDO_VRF18_EN_ADDR MT6359_LDO_VRF18_CON0
+#define MT6359_RG_LDO_VRF18_EN_SHIFT 0
+#define MT6359_DA_VRF18_B_EN_ADDR MT6359_LDO_VRF18_MON
+#define MT6359_RG_LDO_VRF12_EN_ADDR MT6359_LDO_VRF12_CON0
+#define MT6359_RG_LDO_VRF12_EN_SHIFT 0
+#define MT6359_DA_VRF12_B_EN_ADDR MT6359_LDO_VRF12_MON
+#define MT6359_RG_LDO_VEFUSE_EN_ADDR MT6359_LDO_VEFUSE_CON0
+#define MT6359_RG_LDO_VEFUSE_EN_SHIFT 0
+#define MT6359_DA_VEFUSE_B_EN_ADDR MT6359_LDO_VEFUSE_MON
+#define MT6359_RG_LDO_VCN33_1_EN_0_ADDR MT6359_LDO_VCN33_1_CON0
+#define MT6359_RG_LDO_VCN33_1_EN_0_MASK 0x1
+#define MT6359_RG_LDO_VCN33_1_EN_0_SHIFT 0
+#define MT6359_DA_VCN33_1_B_EN_ADDR MT6359_LDO_VCN33_1_MON
+#define MT6359_RG_LDO_VCN33_1_EN_1_ADDR MT6359_LDO_VCN33_1_MULTI_SW
+#define MT6359_RG_LDO_VCN33_1_EN_1_SHIFT 15
+#define MT6359_RG_LDO_VCN33_2_EN_0_ADDR MT6359_LDO_VCN33_2_CON0
+#define MT6359_RG_LDO_VCN33_2_EN_0_SHIFT 0
+#define MT6359_DA_VCN33_2_B_EN_ADDR MT6359_LDO_VCN33_2_MON
+#define MT6359_RG_LDO_VCN33_2_EN_1_ADDR MT6359_LDO_VCN33_2_MULTI_SW
+#define MT6359_RG_LDO_VCN33_2_EN_1_MASK 0x1
+#define MT6359_RG_LDO_VCN33_2_EN_1_SHIFT 15
+#define MT6359_RG_LDO_VCN13_EN_ADDR MT6359_LDO_VCN13_CON0
+#define MT6359_RG_LDO_VCN13_EN_SHIFT 0
+#define MT6359_DA_VCN13_B_EN_ADDR MT6359_LDO_VCN13_MON
+#define MT6359_RG_LDO_VCN18_EN_ADDR MT6359_LDO_VCN18_CON0
+#define MT6359_DA_VCN18_B_EN_ADDR MT6359_LDO_VCN18_MON
+#define MT6359_RG_LDO_VA09_EN_ADDR MT6359_LDO_VA09_CON0
+#define MT6359_RG_LDO_VA09_EN_SHIFT 0
+#define MT6359_DA_VA09_B_EN_ADDR MT6359_LDO_VA09_MON
+#define MT6359_RG_LDO_VCAMIO_EN_ADDR MT6359_LDO_VCAMIO_CON0
+#define MT6359_RG_LDO_VCAMIO_EN_SHIFT 0
+#define MT6359_DA_VCAMIO_B_EN_ADDR MT6359_LDO_VCAMIO_MON
+#define MT6359_RG_LDO_VA12_EN_ADDR MT6359_LDO_VA12_CON0
+#define MT6359_RG_LDO_VA12_EN_SHIFT 0
+#define MT6359_DA_VA12_B_EN_ADDR MT6359_LDO_VA12_MON
+#define MT6359_RG_LDO_VAUX18_EN_ADDR MT6359_LDO_VAUX18_CON0
+#define MT6359_DA_VAUX18_B_EN_ADDR MT6359_LDO_VAUX18_MON
+#define MT6359_RG_LDO_VAUD18_EN_ADDR MT6359_LDO_VAUD18_CON0
+#define MT6359_DA_VAUD18_B_EN_ADDR MT6359_LDO_VAUD18_MON
+#define MT6359_RG_LDO_VIO18_EN_ADDR MT6359_LDO_VIO18_CON0
+#define MT6359_RG_LDO_VIO18_EN_SHIFT 0
+#define MT6359_DA_VIO18_B_EN_ADDR MT6359_LDO_VIO18_MON
+#define MT6359_RG_LDO_VEMC_EN_ADDR MT6359_LDO_VEMC_CON0
+#define MT6359_RG_LDO_VEMC_EN_SHIFT 0
+#define MT6359_DA_VEMC_B_EN_ADDR MT6359_LDO_VEMC_MON
+#define MT6359_RG_LDO_VSIM1_EN_ADDR MT6359_LDO_VSIM1_CON0
+#define MT6359_RG_LDO_VSIM1_EN_SHIFT 0
+#define MT6359_DA_VSIM1_B_EN_ADDR MT6359_LDO_VSIM1_MON
+#define MT6359_RG_LDO_VSIM2_EN_ADDR MT6359_LDO_VSIM2_CON0
+#define MT6359_RG_LDO_VSIM2_EN_SHIFT 0
+#define MT6359_DA_VSIM2_B_EN_ADDR MT6359_LDO_VSIM2_MON
+#define MT6359_RG_LDO_VUSB_EN_0_ADDR MT6359_LDO_VUSB_CON0
+#define MT6359_RG_LDO_VUSB_EN_0_MASK 0x1
+#define MT6359_RG_LDO_VUSB_EN_0_SHIFT 0
+#define MT6359_DA_VUSB_B_EN_ADDR MT6359_LDO_VUSB_MON
+#define MT6359_RG_LDO_VUSB_EN_1_ADDR MT6359_LDO_VUSB_MULTI_SW
+#define MT6359_RG_LDO_VUSB_EN_1_MASK 0x1
+#define MT6359_RG_LDO_VUSB_EN_1_SHIFT 15
+#define MT6359_RG_LDO_VRFCK_EN_ADDR MT6359_LDO_VRFCK_CON0
+#define MT6359_RG_LDO_VRFCK_EN_SHIFT 0
+#define MT6359_DA_VRFCK_B_EN_ADDR MT6359_LDO_VRFCK_MON
+#define MT6359_RG_LDO_VBBCK_EN_ADDR MT6359_LDO_VBBCK_CON0
+#define MT6359_RG_LDO_VBBCK_EN_SHIFT 0
+#define MT6359_DA_VBBCK_B_EN_ADDR MT6359_LDO_VBBCK_MON
+#define MT6359_RG_LDO_VBIF28_EN_ADDR MT6359_LDO_VBIF28_CON0
+#define MT6359_DA_VBIF28_B_EN_ADDR MT6359_LDO_VBIF28_MON
+#define MT6359_RG_LDO_VIBR_EN_ADDR MT6359_LDO_VIBR_CON0
+#define MT6359_RG_LDO_VIBR_EN_SHIFT 0
+#define MT6359_DA_VIBR_B_EN_ADDR MT6359_LDO_VIBR_MON
+#define MT6359_RG_LDO_VIO28_EN_ADDR MT6359_LDO_VIO28_CON0
+#define MT6359_RG_LDO_VIO28_EN_SHIFT 0
+#define MT6359_DA_VIO28_B_EN_ADDR MT6359_LDO_VIO28_MON
+#define MT6359_RG_LDO_VM18_EN_ADDR MT6359_LDO_VM18_CON0
+#define MT6359_RG_LDO_VM18_EN_SHIFT 0
+#define MT6359_DA_VM18_B_EN_ADDR MT6359_LDO_VM18_MON
+#define MT6359_RG_LDO_VUFS_EN_ADDR MT6359_LDO_VUFS_CON0
+#define MT6359_RG_LDO_VUFS_EN_SHIFT 0
+#define MT6359_DA_VUFS_B_EN_ADDR MT6359_LDO_VUFS_MON
+#define MT6359_RG_LDO_VSRAM_PROC1_EN_ADDR MT6359_LDO_VSRAM_PROC1_CON0
+#define MT6359_DA_VSRAM_PROC1_B_EN_ADDR MT6359_LDO_VSRAM_PROC1_MON
+#define MT6359_DA_VSRAM_PROC1_VOSEL_ADDR MT6359_LDO_VSRAM_PROC1_VOSEL1
+#define MT6359_DA_VSRAM_PROC1_VOSEL_MASK 0x7F
+#define MT6359_DA_VSRAM_PROC1_VOSEL_SHIFT 8
+#define MT6359_RG_LDO_VSRAM_PROC2_EN_ADDR MT6359_LDO_VSRAM_PROC2_CON0
+#define MT6359_DA_VSRAM_PROC2_B_EN_ADDR MT6359_LDO_VSRAM_PROC2_MON
+#define MT6359_DA_VSRAM_PROC2_VOSEL_ADDR MT6359_LDO_VSRAM_PROC2_VOSEL1
+#define MT6359_DA_VSRAM_PROC2_VOSEL_MASK 0x7F
+#define MT6359_DA_VSRAM_PROC2_VOSEL_SHIFT 8
+#define MT6359_RG_LDO_VSRAM_OTHERS_EN_ADDR MT6359_LDO_VSRAM_OTHERS_CON0
+#define MT6359_DA_VSRAM_OTHERS_B_EN_ADDR MT6359_LDO_VSRAM_OTHERS_MON
+#define MT6359_DA_VSRAM_OTHERS_VOSEL_ADDR MT6359_LDO_VSRAM_OTHERS_VOSEL1
+#define MT6359_DA_VSRAM_OTHERS_VOSEL_MASK 0x7F
+#define MT6359_DA_VSRAM_OTHERS_VOSEL_SHIFT 8
+#define MT6359_RG_LDO_VSRAM_OTHERS_SSHUB_EN_ADDR MT6359_LDO_VSRAM_OTHERS_SSHUB
+#define MT6359_RG_LDO_VSRAM_OTHERS_SSHUB_VOSEL_ADDR MT6359_LDO_VSRAM_OTHERS_SSHUB
+#define MT6359_RG_LDO_VSRAM_OTHERS_SSHUB_VOSEL_MASK 0x7F
+#define MT6359_RG_LDO_VSRAM_OTHERS_SSHUB_VOSEL_SHIFT 1
+#define MT6359_RG_LDO_VSRAM_MD_EN_ADDR MT6359_LDO_VSRAM_MD_CON0
+#define MT6359_DA_VSRAM_MD_B_EN_ADDR MT6359_LDO_VSRAM_MD_MON
+#define MT6359_DA_VSRAM_MD_VOSEL_ADDR MT6359_LDO_VSRAM_MD_VOSEL1
+#define MT6359_DA_VSRAM_MD_VOSEL_MASK 0x7F
+#define MT6359_DA_VSRAM_MD_VOSEL_SHIFT 8
+#define MT6359_RG_VCN33_1_VOSEL_ADDR MT6359_VCN33_1_ANA_CON0
+#define MT6359_RG_VCN33_1_VOSEL_MASK 0xF
+#define MT6359_RG_VCN33_1_VOSEL_SHIFT 8
+#define MT6359_RG_VCN33_2_VOSEL_ADDR MT6359_VCN33_2_ANA_CON0
+#define MT6359_RG_VCN33_2_VOSEL_MASK 0xF
+#define MT6359_RG_VCN33_2_VOSEL_SHIFT 8
+#define MT6359_RG_VEMC_VOSEL_ADDR MT6359_VEMC_ANA_CON0
+#define MT6359_RG_VEMC_VOSEL_MASK 0xF
+#define MT6359_RG_VEMC_VOSEL_SHIFT 8
+#define MT6359_RG_VSIM1_VOSEL_ADDR MT6359_VSIM1_ANA_CON0
+#define MT6359_RG_VSIM1_VOSEL_MASK 0xF
+#define MT6359_RG_VSIM1_VOSEL_SHIFT 8
+#define MT6359_RG_VSIM2_VOSEL_ADDR MT6359_VSIM2_ANA_CON0
+#define MT6359_RG_VSIM2_VOSEL_MASK 0xF
+#define MT6359_RG_VSIM2_VOSEL_SHIFT 8
+#define MT6359_RG_VIO28_VOSEL_ADDR MT6359_VIO28_ANA_CON0
+#define MT6359_RG_VIO28_VOSEL_MASK 0xF
+#define MT6359_RG_VIO28_VOSEL_SHIFT 8
+#define MT6359_RG_VIBR_VOSEL_ADDR MT6359_VIBR_ANA_CON0
+#define MT6359_RG_VIBR_VOSEL_MASK 0xF
+#define MT6359_RG_VIBR_VOSEL_SHIFT 8
+#define MT6359_RG_VRF18_VOSEL_ADDR MT6359_VRF18_ANA_CON0
+#define MT6359_RG_VRF18_VOSEL_MASK 0xF
+#define MT6359_RG_VRF18_VOSEL_SHIFT 8
+#define MT6359_RG_VEFUSE_VOSEL_ADDR MT6359_VEFUSE_ANA_CON0
+#define MT6359_RG_VEFUSE_VOSEL_MASK 0xF
+#define MT6359_RG_VEFUSE_VOSEL_SHIFT 8
+#define MT6359_RG_VCAMIO_VOSEL_ADDR MT6359_VCAMIO_ANA_CON0
+#define MT6359_RG_VCAMIO_VOSEL_MASK 0xF
+#define MT6359_RG_VCAMIO_VOSEL_SHIFT 8
+#define MT6359_RG_VIO18_VOSEL_ADDR MT6359_VIO18_ANA_CON0
+#define MT6359_RG_VIO18_VOSEL_MASK 0xF
+#define MT6359_RG_VIO18_VOSEL_SHIFT 8
+#define MT6359_RG_VM18_VOSEL_ADDR MT6359_VM18_ANA_CON0
+#define MT6359_RG_VM18_VOSEL_MASK 0xF
+#define MT6359_RG_VM18_VOSEL_SHIFT 8
+#define MT6359_RG_VUFS_VOSEL_ADDR MT6359_VUFS_ANA_CON0
+#define MT6359_RG_VUFS_VOSEL_MASK 0xF
+#define MT6359_RG_VUFS_VOSEL_SHIFT 8
+#define MT6359_RG_VRF12_VOSEL_ADDR MT6359_VRF12_ANA_CON0
+#define MT6359_RG_VRF12_VOSEL_MASK 0xF
+#define MT6359_RG_VRF12_VOSEL_SHIFT 8
+#define MT6359_RG_VCN13_VOSEL_ADDR MT6359_VCN13_ANA_CON0
+#define MT6359_RG_VCN13_VOSEL_MASK 0xF
+#define MT6359_RG_VCN13_VOSEL_SHIFT 8
+#define MT6359_RG_VA09_VOSEL_ADDR MT6359_VA09_ANA_CON0
+#define MT6359_RG_VA09_VOSEL_MASK 0xF
+#define MT6359_RG_VA09_VOSEL_SHIFT 8
+#define MT6359_RG_VA12_VOSEL_ADDR MT6359_VA12_ANA_CON0
+#define MT6359_RG_VA12_VOSEL_MASK 0xF
+#define MT6359_RG_VA12_VOSEL_SHIFT 8
+#define MT6359_RG_VXO22_VOSEL_ADDR MT6359_VXO22_ANA_CON0
+#define MT6359_RG_VXO22_VOSEL_MASK 0xF
+#define MT6359_RG_VXO22_VOSEL_SHIFT 8
+#define MT6359_RG_VRFCK_VOSEL_ADDR MT6359_VRFCK_ANA_CON0
+#define MT6359_RG_VRFCK_VOSEL_MASK 0xF
+#define MT6359_RG_VRFCK_VOSEL_SHIFT 8
+#define MT6359_RG_VBBCK_VOSEL_ADDR MT6359_VBBCK_ANA_CON0
+#define MT6359_RG_VBBCK_VOSEL_MASK 0xF
+#define MT6359_RG_VBBCK_VOSEL_SHIFT 8
+
+#endif /* __MFD_MT6359_REGISTERS_H__ */
diff --git a/include/linux/mfd/mt6359p/registers.h b/include/linux/mfd/mt6359p/registers.h
new file mode 100644
index 000000000000..3d97c1885171
--- /dev/null
+++ b/include/linux/mfd/mt6359p/registers.h
@@ -0,0 +1,249 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2021 MediaTek Inc.
+ */
+
+#ifndef __MFD_MT6359P_REGISTERS_H__
+#define __MFD_MT6359P_REGISTERS_H__
+
+#define MT6359P_CHIP_VER 0x5930
+
+/* PMIC Registers */
+#define MT6359P_HWCID 0x8
+#define MT6359P_TOP_TRAP 0x50
+#define MT6359P_TOP_TMA_KEY 0x3a8
+#define MT6359P_BUCK_VCORE_ELR_NUM 0x152a
+#define MT6359P_BUCK_VCORE_ELR0 0x152c
+#define MT6359P_BUCK_VGPU11_SSHUB_CON0 0x15aa
+#define MT6359P_BUCK_VGPU11_ELR0 0x15b4
+#define MT6359P_LDO_VSRAM_PROC1_ELR 0x1b44
+#define MT6359P_LDO_VSRAM_PROC2_ELR 0x1b46
+#define MT6359P_LDO_VSRAM_OTHERS_ELR 0x1b48
+#define MT6359P_LDO_VSRAM_MD_ELR 0x1b4a
+#define MT6359P_LDO_VEMC_ELR_0 0x1b4c
+#define MT6359P_LDO_VFE28_CON0 0x1b88
+#define MT6359P_LDO_VFE28_MON 0x1b8c
+#define MT6359P_LDO_VXO22_CON0 0x1b9a
+#define MT6359P_LDO_VXO22_MON 0x1b9e
+#define MT6359P_LDO_VRF18_CON0 0x1bac
+#define MT6359P_LDO_VRF18_MON 0x1bb0
+#define MT6359P_LDO_VRF12_CON0 0x1bbe
+#define MT6359P_LDO_VRF12_MON 0x1bc2
+#define MT6359P_LDO_VEFUSE_CON0 0x1bd0
+#define MT6359P_LDO_VEFUSE_MON 0x1bd4
+#define MT6359P_LDO_VCN33_1_CON0 0x1be2
+#define MT6359P_LDO_VCN33_1_MON 0x1be6
+#define MT6359P_LDO_VCN33_1_MULTI_SW 0x1bf4
+#define MT6359P_LDO_VCN33_2_CON0 0x1c08
+#define MT6359P_LDO_VCN33_2_MON 0x1c0c
+#define MT6359P_LDO_VCN33_2_MULTI_SW 0x1c1a
+#define MT6359P_LDO_VCN13_CON0 0x1c1c
+#define MT6359P_LDO_VCN13_MON 0x1c20
+#define MT6359P_LDO_VCN18_CON0 0x1c2e
+#define MT6359P_LDO_VCN18_MON 0x1c32
+#define MT6359P_LDO_VA09_CON0 0x1c40
+#define MT6359P_LDO_VA09_MON 0x1c44
+#define MT6359P_LDO_VCAMIO_CON0 0x1c52
+#define MT6359P_LDO_VCAMIO_MON 0x1c56
+#define MT6359P_LDO_VA12_CON0 0x1c64
+#define MT6359P_LDO_VA12_MON 0x1c68
+#define MT6359P_LDO_VAUX18_CON0 0x1c88
+#define MT6359P_LDO_VAUX18_MON 0x1c8c
+#define MT6359P_LDO_VAUD18_CON0 0x1c9a
+#define MT6359P_LDO_VAUD18_MON 0x1c9e
+#define MT6359P_LDO_VIO18_CON0 0x1cac
+#define MT6359P_LDO_VIO18_MON 0x1cb0
+#define MT6359P_LDO_VEMC_CON0 0x1cbe
+#define MT6359P_LDO_VEMC_MON 0x1cc2
+#define MT6359P_LDO_VSIM1_CON0 0x1cd0
+#define MT6359P_LDO_VSIM1_MON 0x1cd4
+#define MT6359P_LDO_VSIM2_CON0 0x1ce2
+#define MT6359P_LDO_VSIM2_MON 0x1ce6
+#define MT6359P_LDO_VUSB_CON0 0x1d08
+#define MT6359P_LDO_VUSB_MON 0x1d0c
+#define MT6359P_LDO_VUSB_MULTI_SW 0x1d1a
+#define MT6359P_LDO_VRFCK_CON0 0x1d1c
+#define MT6359P_LDO_VRFCK_MON 0x1d20
+#define MT6359P_LDO_VBBCK_CON0 0x1d2e
+#define MT6359P_LDO_VBBCK_MON 0x1d32
+#define MT6359P_LDO_VBIF28_CON0 0x1d40
+#define MT6359P_LDO_VBIF28_MON 0x1d44
+#define MT6359P_LDO_VIBR_CON0 0x1d52
+#define MT6359P_LDO_VIBR_MON 0x1d56
+#define MT6359P_LDO_VIO28_CON0 0x1d64
+#define MT6359P_LDO_VIO28_MON 0x1d68
+#define MT6359P_LDO_VM18_CON0 0x1d88
+#define MT6359P_LDO_VM18_MON 0x1d8c
+#define MT6359P_LDO_VUFS_CON0 0x1d9a
+#define MT6359P_LDO_VUFS_MON 0x1d9e
+#define MT6359P_LDO_VSRAM_PROC1_CON0 0x1e88
+#define MT6359P_LDO_VSRAM_PROC1_MON 0x1e8c
+#define MT6359P_LDO_VSRAM_PROC1_VOSEL1 0x1e90
+#define MT6359P_LDO_VSRAM_PROC2_CON0 0x1ea8
+#define MT6359P_LDO_VSRAM_PROC2_MON 0x1eac
+#define MT6359P_LDO_VSRAM_PROC2_VOSEL1 0x1eb0
+#define MT6359P_LDO_VSRAM_OTHERS_CON0 0x1f08
+#define MT6359P_LDO_VSRAM_OTHERS_MON 0x1f0c
+#define MT6359P_LDO_VSRAM_OTHERS_VOSEL1 0x1f10
+#define MT6359P_LDO_VSRAM_OTHERS_SSHUB 0x1f28
+#define MT6359P_LDO_VSRAM_MD_CON0 0x1f2e
+#define MT6359P_LDO_VSRAM_MD_MON 0x1f32
+#define MT6359P_LDO_VSRAM_MD_VOSEL1 0x1f36
+#define MT6359P_VFE28_ANA_CON0 0x1f88
+#define MT6359P_VAUX18_ANA_CON0 0x1f8c
+#define MT6359P_VUSB_ANA_CON0 0x1f90
+#define MT6359P_VBIF28_ANA_CON0 0x1f94
+#define MT6359P_VCN33_1_ANA_CON0 0x1f98
+#define MT6359P_VCN33_2_ANA_CON0 0x1f9c
+#define MT6359P_VEMC_ANA_CON0 0x1fa0
+#define MT6359P_VSIM1_ANA_CON0 0x1fa2
+#define MT6359P_VSIM2_ANA_CON0 0x1fa6
+#define MT6359P_VIO28_ANA_CON0 0x1faa
+#define MT6359P_VIBR_ANA_CON0 0x1fae
+#define MT6359P_VFE28_ELR_4 0x1fc0
+#define MT6359P_VRF18_ANA_CON0 0x2008
+#define MT6359P_VEFUSE_ANA_CON0 0x200c
+#define MT6359P_VCN18_ANA_CON0 0x2010
+#define MT6359P_VCAMIO_ANA_CON0 0x2014
+#define MT6359P_VAUD18_ANA_CON0 0x2018
+#define MT6359P_VIO18_ANA_CON0 0x201c
+#define MT6359P_VM18_ANA_CON0 0x2020
+#define MT6359P_VUFS_ANA_CON0 0x2024
+#define MT6359P_VRF12_ANA_CON0 0x202a
+#define MT6359P_VCN13_ANA_CON0 0x202e
+#define MT6359P_VA09_ANA_CON0 0x2032
+#define MT6359P_VRF18_ELR_3 0x204e
+#define MT6359P_VXO22_ANA_CON0 0x2088
+#define MT6359P_VRFCK_ANA_CON0 0x208c
+#define MT6359P_VBBCK_ANA_CON0 0x2096
+
+#define MT6359P_RG_BUCK_VCORE_VOSEL_ADDR MT6359P_BUCK_VCORE_ELR0
+#define MT6359P_RG_BUCK_VGPU11_SSHUB_EN_ADDR MT6359P_BUCK_VGPU11_SSHUB_CON0
+#define MT6359P_RG_BUCK_VGPU11_VOSEL_ADDR MT6359P_BUCK_VGPU11_ELR0
+#define MT6359P_RG_BUCK_VGPU11_SSHUB_VOSEL_ADDR MT6359P_BUCK_VGPU11_SSHUB_CON0
+#define MT6359P_RG_BUCK_VGPU11_SSHUB_VOSEL_MASK 0x7F
+#define MT6359P_RG_BUCK_VGPU11_SSHUB_VOSEL_SHIFT 4
+#define MT6359P_RG_LDO_VSRAM_PROC1_VOSEL_ADDR MT6359P_LDO_VSRAM_PROC1_ELR
+#define MT6359P_RG_LDO_VSRAM_PROC2_VOSEL_ADDR MT6359P_LDO_VSRAM_PROC2_ELR
+#define MT6359P_RG_LDO_VSRAM_OTHERS_VOSEL_ADDR MT6359P_LDO_VSRAM_OTHERS_ELR
+#define MT6359P_RG_LDO_VSRAM_MD_VOSEL_ADDR MT6359P_LDO_VSRAM_MD_ELR
+#define MT6359P_RG_LDO_VEMC_VOSEL_0_ADDR MT6359P_LDO_VEMC_ELR_0
+#define MT6359P_RG_LDO_VEMC_VOSEL_0_MASK 0xF
+#define MT6359P_RG_LDO_VEMC_VOSEL_0_SHIFT 0
+#define MT6359P_RG_LDO_VFE28_EN_ADDR MT6359P_LDO_VFE28_CON0
+#define MT6359P_DA_VFE28_B_EN_ADDR MT6359P_LDO_VFE28_MON
+#define MT6359P_RG_LDO_VXO22_EN_ADDR MT6359P_LDO_VXO22_CON0
+#define MT6359P_RG_LDO_VXO22_EN_SHIFT 0
+#define MT6359P_DA_VXO22_B_EN_ADDR MT6359P_LDO_VXO22_MON
+#define MT6359P_RG_LDO_VRF18_EN_ADDR MT6359P_LDO_VRF18_CON0
+#define MT6359P_RG_LDO_VRF18_EN_SHIFT 0
+#define MT6359P_DA_VRF18_B_EN_ADDR MT6359P_LDO_VRF18_MON
+#define MT6359P_RG_LDO_VRF12_EN_ADDR MT6359P_LDO_VRF12_CON0
+#define MT6359P_RG_LDO_VRF12_EN_SHIFT 0
+#define MT6359P_DA_VRF12_B_EN_ADDR MT6359P_LDO_VRF12_MON
+#define MT6359P_RG_LDO_VEFUSE_EN_ADDR MT6359P_LDO_VEFUSE_CON0
+#define MT6359P_RG_LDO_VEFUSE_EN_SHIFT 0
+#define MT6359P_DA_VEFUSE_B_EN_ADDR MT6359P_LDO_VEFUSE_MON
+#define MT6359P_RG_LDO_VCN33_1_EN_0_ADDR MT6359P_LDO_VCN33_1_CON0
+#define MT6359P_DA_VCN33_1_B_EN_ADDR MT6359P_LDO_VCN33_1_MON
+#define MT6359P_RG_LDO_VCN33_1_EN_1_ADDR MT6359P_LDO_VCN33_1_MULTI_SW
+#define MT6359P_RG_LDO_VCN33_1_EN_1_SHIFT 15
+#define MT6359P_RG_LDO_VCN33_2_EN_0_ADDR MT6359P_LDO_VCN33_2_CON0
+#define MT6359P_RG_LDO_VCN33_2_EN_0_SHIFT 0
+#define MT6359P_DA_VCN33_2_B_EN_ADDR MT6359P_LDO_VCN33_2_MON
+#define MT6359P_RG_LDO_VCN33_2_EN_1_ADDR MT6359P_LDO_VCN33_2_MULTI_SW
+#define MT6359P_RG_LDO_VCN13_EN_ADDR MT6359P_LDO_VCN13_CON0
+#define MT6359P_RG_LDO_VCN13_EN_SHIFT 0
+#define MT6359P_DA_VCN13_B_EN_ADDR MT6359P_LDO_VCN13_MON
+#define MT6359P_RG_LDO_VCN18_EN_ADDR MT6359P_LDO_VCN18_CON0
+#define MT6359P_DA_VCN18_B_EN_ADDR MT6359P_LDO_VCN18_MON
+#define MT6359P_RG_LDO_VA09_EN_ADDR MT6359P_LDO_VA09_CON0
+#define MT6359P_RG_LDO_VA09_EN_SHIFT 0
+#define MT6359P_DA_VA09_B_EN_ADDR MT6359P_LDO_VA09_MON
+#define MT6359P_RG_LDO_VCAMIO_EN_ADDR MT6359P_LDO_VCAMIO_CON0
+#define MT6359P_RG_LDO_VCAMIO_EN_SHIFT 0
+#define MT6359P_DA_VCAMIO_B_EN_ADDR MT6359P_LDO_VCAMIO_MON
+#define MT6359P_RG_LDO_VA12_EN_ADDR MT6359P_LDO_VA12_CON0
+#define MT6359P_RG_LDO_VA12_EN_SHIFT 0
+#define MT6359P_DA_VA12_B_EN_ADDR MT6359P_LDO_VA12_MON
+#define MT6359P_RG_LDO_VAUX18_EN_ADDR MT6359P_LDO_VAUX18_CON0
+#define MT6359P_DA_VAUX18_B_EN_ADDR MT6359P_LDO_VAUX18_MON
+#define MT6359P_RG_LDO_VAUD18_EN_ADDR MT6359P_LDO_VAUD18_CON0
+#define MT6359P_DA_VAUD18_B_EN_ADDR MT6359P_LDO_VAUD18_MON
+#define MT6359P_RG_LDO_VIO18_EN_ADDR MT6359P_LDO_VIO18_CON0
+#define MT6359P_RG_LDO_VIO18_EN_SHIFT 0
+#define MT6359P_DA_VIO18_B_EN_ADDR MT6359P_LDO_VIO18_MON
+#define MT6359P_RG_LDO_VEMC_EN_ADDR MT6359P_LDO_VEMC_CON0
+#define MT6359P_RG_LDO_VEMC_EN_SHIFT 0
+#define MT6359P_DA_VEMC_B_EN_ADDR MT6359P_LDO_VEMC_MON
+#define MT6359P_RG_LDO_VSIM1_EN_ADDR MT6359P_LDO_VSIM1_CON0
+#define MT6359P_RG_LDO_VSIM1_EN_SHIFT 0
+#define MT6359P_DA_VSIM1_B_EN_ADDR MT6359P_LDO_VSIM1_MON
+#define MT6359P_RG_LDO_VSIM2_EN_ADDR MT6359P_LDO_VSIM2_CON0
+#define MT6359P_RG_LDO_VSIM2_EN_SHIFT 0
+#define MT6359P_DA_VSIM2_B_EN_ADDR MT6359P_LDO_VSIM2_MON
+#define MT6359P_RG_LDO_VUSB_EN_0_ADDR MT6359P_LDO_VUSB_CON0
+#define MT6359P_DA_VUSB_B_EN_ADDR MT6359P_LDO_VUSB_MON
+#define MT6359P_RG_LDO_VUSB_EN_1_ADDR MT6359P_LDO_VUSB_MULTI_SW
+#define MT6359P_RG_LDO_VRFCK_EN_ADDR MT6359P_LDO_VRFCK_CON0
+#define MT6359P_RG_LDO_VRFCK_EN_SHIFT 0
+#define MT6359P_DA_VRFCK_B_EN_ADDR MT6359P_LDO_VRFCK_MON
+#define MT6359P_RG_LDO_VBBCK_EN_ADDR MT6359P_LDO_VBBCK_CON0
+#define MT6359P_RG_LDO_VBBCK_EN_SHIFT 0
+#define MT6359P_DA_VBBCK_B_EN_ADDR MT6359P_LDO_VBBCK_MON
+#define MT6359P_RG_LDO_VBIF28_EN_ADDR MT6359P_LDO_VBIF28_CON0
+#define MT6359P_DA_VBIF28_B_EN_ADDR MT6359P_LDO_VBIF28_MON
+#define MT6359P_RG_LDO_VIBR_EN_ADDR MT6359P_LDO_VIBR_CON0
+#define MT6359P_RG_LDO_VIBR_EN_SHIFT 0
+#define MT6359P_DA_VIBR_B_EN_ADDR MT6359P_LDO_VIBR_MON
+#define MT6359P_RG_LDO_VIO28_EN_ADDR MT6359P_LDO_VIO28_CON0
+#define MT6359P_RG_LDO_VIO28_EN_SHIFT 0
+#define MT6359P_DA_VIO28_B_EN_ADDR MT6359P_LDO_VIO28_MON
+#define MT6359P_RG_LDO_VM18_EN_ADDR MT6359P_LDO_VM18_CON0
+#define MT6359P_RG_LDO_VM18_EN_SHIFT 0
+#define MT6359P_DA_VM18_B_EN_ADDR MT6359P_LDO_VM18_MON
+#define MT6359P_RG_LDO_VUFS_EN_ADDR MT6359P_LDO_VUFS_CON0
+#define MT6359P_RG_LDO_VUFS_EN_SHIFT 0
+#define MT6359P_DA_VUFS_B_EN_ADDR MT6359P_LDO_VUFS_MON
+#define MT6359P_RG_LDO_VSRAM_PROC1_EN_ADDR MT6359P_LDO_VSRAM_PROC1_CON0
+#define MT6359P_DA_VSRAM_PROC1_B_EN_ADDR MT6359P_LDO_VSRAM_PROC1_MON
+#define MT6359P_DA_VSRAM_PROC1_VOSEL_ADDR MT6359P_LDO_VSRAM_PROC1_VOSEL1
+#define MT6359P_RG_LDO_VSRAM_PROC2_EN_ADDR MT6359P_LDO_VSRAM_PROC2_CON0
+#define MT6359P_DA_VSRAM_PROC2_B_EN_ADDR MT6359P_LDO_VSRAM_PROC2_MON
+#define MT6359P_DA_VSRAM_PROC2_VOSEL_ADDR MT6359P_LDO_VSRAM_PROC2_VOSEL1
+#define MT6359P_RG_LDO_VSRAM_OTHERS_EN_ADDR MT6359P_LDO_VSRAM_OTHERS_CON0
+#define MT6359P_DA_VSRAM_OTHERS_B_EN_ADDR MT6359P_LDO_VSRAM_OTHERS_MON
+#define MT6359P_DA_VSRAM_OTHERS_VOSEL_ADDR MT6359P_LDO_VSRAM_OTHERS_VOSEL1
+#define MT6359P_RG_LDO_VSRAM_OTHERS_SSHUB_EN_ADDR MT6359P_LDO_VSRAM_OTHERS_SSHUB
+#define MT6359P_RG_LDO_VSRAM_OTHERS_SSHUB_VOSEL_ADDR MT6359P_LDO_VSRAM_OTHERS_SSHUB
+#define MT6359P_RG_LDO_VSRAM_MD_EN_ADDR MT6359P_LDO_VSRAM_MD_CON0
+#define MT6359P_DA_VSRAM_MD_B_EN_ADDR MT6359P_LDO_VSRAM_MD_MON
+#define MT6359P_DA_VSRAM_MD_VOSEL_ADDR MT6359P_LDO_VSRAM_MD_VOSEL1
+#define MT6359P_RG_VCN33_1_VOSEL_ADDR MT6359P_VCN33_1_ANA_CON0
+#define MT6359P_RG_VCN33_2_VOSEL_ADDR MT6359P_VCN33_2_ANA_CON0
+#define MT6359P_RG_VEMC_VOSEL_ADDR MT6359P_VEMC_ANA_CON0
+#define MT6359P_RG_VSIM1_VOSEL_ADDR MT6359P_VSIM1_ANA_CON0
+#define MT6359P_RG_VSIM2_VOSEL_ADDR MT6359P_VSIM2_ANA_CON0
+#define MT6359P_RG_VIO28_VOSEL_ADDR MT6359P_VIO28_ANA_CON0
+#define MT6359P_RG_VIBR_VOSEL_ADDR MT6359P_VIBR_ANA_CON0
+#define MT6359P_RG_VRF18_VOSEL_ADDR MT6359P_VRF18_ANA_CON0
+#define MT6359P_RG_VEFUSE_VOSEL_ADDR MT6359P_VEFUSE_ANA_CON0
+#define MT6359P_RG_VCAMIO_VOSEL_ADDR MT6359P_VCAMIO_ANA_CON0
+#define MT6359P_RG_VIO18_VOSEL_ADDR MT6359P_VIO18_ANA_CON0
+#define MT6359P_RG_VM18_VOSEL_ADDR MT6359P_VM18_ANA_CON0
+#define MT6359P_RG_VUFS_VOSEL_ADDR MT6359P_VUFS_ANA_CON0
+#define MT6359P_RG_VRF12_VOSEL_ADDR MT6359P_VRF12_ANA_CON0
+#define MT6359P_RG_VCN13_VOSEL_ADDR MT6359P_VCN13_ANA_CON0
+#define MT6359P_RG_VA09_VOSEL_ADDR MT6359P_VRF18_ELR_3
+#define MT6359P_RG_VA12_VOSEL_ADDR MT6359P_VFE28_ELR_4
+#define MT6359P_RG_VXO22_VOSEL_ADDR MT6359P_VXO22_ANA_CON0
+#define MT6359P_RG_VRFCK_VOSEL_ADDR MT6359P_VRFCK_ANA_CON0
+#define MT6359P_RG_VBBCK_VOSEL_ADDR MT6359P_VBBCK_ANA_CON0
+#define MT6359P_RG_VBBCK_VOSEL_MASK 0xF
+#define MT6359P_RG_VBBCK_VOSEL_SHIFT 4
+#define MT6359P_VM_MODE_ADDR MT6359P_TOP_TRAP
+#define MT6359P_TMA_KEY_ADDR MT6359P_TOP_TMA_KEY
+
+#define TMA_KEY 0x9CA6
+
+#endif /* __MFD_MT6359P_REGISTERS_H__ */
diff --git a/include/linux/mfd/mt6397/core.h b/include/linux/mfd/mt6397/core.h
index d678f526e498..627487e26287 100644
--- a/include/linux/mfd/mt6397/core.h
+++ b/include/linux/mfd/mt6397/core.h
@@ -1,20 +1,27 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2014 MediaTek Inc.
* Author: Flora Fu, MediaTek
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#ifndef __MFD_MT6397_CORE_H__
#define __MFD_MT6397_CORE_H__
+#include <linux/mutex.h>
+#include <linux/notifier.h>
+
+enum chip_id {
+ MT6323_CHIP_ID = 0x23,
+ MT6331_CHIP_ID = 0x20,
+ MT6332_CHIP_ID = 0x20,
+ MT6357_CHIP_ID = 0x57,
+ MT6358_CHIP_ID = 0x58,
+ MT6359_CHIP_ID = 0x59,
+ MT6366_CHIP_ID = 0x66,
+ MT6391_CHIP_ID = 0x91,
+ MT6397_CHIP_ID = 0x97,
+};
+
enum mt6397_irq_numbers {
MT6397_IRQ_SPKL_AB = 0,
MT6397_IRQ_SPKR_AB,
@@ -54,6 +61,7 @@ enum mt6397_irq_numbers {
struct mt6397_chip {
struct device *dev;
struct regmap *regmap;
+ struct notifier_block pm_nb;
int irq;
struct irq_domain *irq_domain;
struct mutex irqlock;
@@ -62,6 +70,11 @@ struct mt6397_chip {
u16 irq_masks_cache[2];
u16 int_con[2];
u16 int_status[2];
+ u16 chip_id;
+ void *irq_data;
};
+int mt6358_irq_init(struct mt6397_chip *chip);
+int mt6397_irq_init(struct mt6397_chip *chip);
+
#endif /* __MFD_MT6397_CORE_H__ */
diff --git a/include/linux/mfd/mt6397/registers.h b/include/linux/mfd/mt6397/registers.h
index f23a0a60a877..34d140627a27 100644
--- a/include/linux/mfd/mt6397/registers.h
+++ b/include/linux/mfd/mt6397/registers.h
@@ -1,15 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2014 MediaTek Inc.
* Author: Flora Fu, MediaTek
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#ifndef __MFD_MT6397_REGISTERS_H__
diff --git a/include/linux/mfd/mt6397/rtc.h b/include/linux/mfd/mt6397/rtc.h
new file mode 100644
index 000000000000..068ae1c0f0e8
--- /dev/null
+++ b/include/linux/mfd/mt6397/rtc.h
@@ -0,0 +1,86 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2014-2019 MediaTek Inc.
+ *
+ * Author: Tianping.Fang <tianping.fang@mediatek.com>
+ * Sean Wang <sean.wang@mediatek.com>
+ */
+
+#ifndef _LINUX_MFD_MT6397_RTC_H_
+#define _LINUX_MFD_MT6397_RTC_H_
+
+#include <linux/jiffies.h>
+#include <linux/mutex.h>
+#include <linux/regmap.h>
+#include <linux/rtc.h>
+
+#define RTC_BBPU 0x0000
+#define RTC_BBPU_CBUSY BIT(6)
+#define RTC_BBPU_KEY (0x43 << 8)
+
+#define RTC_WRTGR_MT6358 0x003a
+#define RTC_WRTGR_MT6397 0x003c
+#define RTC_WRTGR_MT6323 RTC_WRTGR_MT6397
+
+#define RTC_IRQ_STA 0x0002
+#define RTC_IRQ_STA_AL BIT(0)
+#define RTC_IRQ_STA_LP BIT(3)
+
+#define RTC_IRQ_EN 0x0004
+#define RTC_IRQ_EN_AL BIT(0)
+#define RTC_IRQ_EN_ONESHOT BIT(2)
+#define RTC_IRQ_EN_LP BIT(3)
+#define RTC_IRQ_EN_ONESHOT_AL (RTC_IRQ_EN_ONESHOT | RTC_IRQ_EN_AL)
+
+#define RTC_AL_MASK 0x0008
+#define RTC_AL_MASK_DOW BIT(4)
+
+#define RTC_TC_SEC 0x000a
+#define RTC_TC_MTH_MASK 0x000f
+/* Min, Hour, Dom... register offset to RTC_TC_SEC */
+#define RTC_OFFSET_SEC 0
+#define RTC_OFFSET_MIN 1
+#define RTC_OFFSET_HOUR 2
+#define RTC_OFFSET_DOM 3
+#define RTC_OFFSET_DOW 4
+#define RTC_OFFSET_MTH 5
+#define RTC_OFFSET_YEAR 6
+#define RTC_OFFSET_COUNT 7
+
+#define RTC_AL_SEC 0x0018
+
+#define RTC_AL_SEC_MASK 0x003f
+#define RTC_AL_MIN_MASK 0x003f
+#define RTC_AL_HOU_MASK 0x001f
+#define RTC_AL_DOM_MASK 0x001f
+#define RTC_AL_DOW_MASK 0x0007
+#define RTC_AL_MTH_MASK 0x000f
+#define RTC_AL_YEA_MASK 0x007f
+
+#define RTC_PDN2 0x002e
+#define RTC_PDN2_PWRON_ALARM BIT(4)
+
+#define RTC_MIN_YEAR 1968
+#define RTC_BASE_YEAR 1900
+#define RTC_NUM_YEARS 128
+#define RTC_MIN_YEAR_OFFSET (RTC_MIN_YEAR - RTC_BASE_YEAR)
+
+#define MTK_RTC_POLL_DELAY_US 10
+#define MTK_RTC_POLL_TIMEOUT (jiffies_to_usecs(HZ))
+
+struct mtk_rtc_data {
+ u32 wrtgr;
+};
+
+struct mt6397_rtc {
+ struct rtc_device *rtc_dev;
+
+ /* Protect register access from multiple tasks */
+ struct mutex lock;
+ struct regmap *regmap;
+ int irq;
+ u32 addr_base;
+ const struct mtk_rtc_data *data;
+};
+
+#endif /* _LINUX_MFD_MT6397_RTC_H_ */
diff --git a/include/linux/mfd/mxs-lradc.h b/include/linux/mfd/mxs-lradc.h
index 661a4521f723..ada3d81ee277 100644
--- a/include/linux/mfd/mxs-lradc.h
+++ b/include/linux/mfd/mxs-lradc.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Freescale MXS Low Resolution Analog-to-Digital Converter driver
*
@@ -5,16 +6,6 @@
* Copyright (c) 2016 Ksenija Stanojevic <ksenija.stanojevic@gmail.com>
*
* Author: Marek Vasut <marex@denx.de>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#ifndef __MFD_MXS_LRADC_H
diff --git a/include/linux/mfd/ntxec.h b/include/linux/mfd/ntxec.h
new file mode 100644
index 000000000000..e5880c346da9
--- /dev/null
+++ b/include/linux/mfd/ntxec.h
@@ -0,0 +1,38 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright 2020 Jonathan Neuschäfer
+ *
+ * Register access and version information for the Netronix embedded
+ * controller.
+ */
+
+#ifndef NTXEC_H
+#define NTXEC_H
+
+#include <linux/types.h>
+
+struct device;
+struct regmap;
+
+struct ntxec {
+ struct device *dev;
+ struct regmap *regmap;
+};
+
+/*
+ * Some registers, such as the battery status register (0x41), are in
+ * big-endian, but others only have eight significant bits, which are in the
+ * first byte transmitted over I2C (the MSB of the big-endian value).
+ * This convenience function converts an 8-bit value to 16-bit for use in the
+ * second kind of register.
+ */
+static inline u16 ntxec_reg8(u8 value)
+{
+ return value << 8;
+}
+
+/* Known firmware versions */
+#define NTXEC_VERSION_KOBO_AURA 0xd726 /* found in Kobo Aura */
+#define NTXEC_VERSION_TOLINO_SHINE2 0xf110 /* found in Tolino Shine 2 HD */
+#define NTXEC_VERSION_TOLINO_VISION 0xe135 /* found in Tolino Vision, contains RTC, ADC, PWM, home pad */
+#endif
diff --git a/include/linux/mfd/ocelot.h b/include/linux/mfd/ocelot.h
new file mode 100644
index 000000000000..dd72073d2d4f
--- /dev/null
+++ b/include/linux/mfd/ocelot.h
@@ -0,0 +1,62 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+/* Copyright 2022 Innovative Advantage Inc. */
+
+#ifndef _LINUX_MFD_OCELOT_H
+#define _LINUX_MFD_OCELOT_H
+
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/types.h>
+
+struct resource;
+
+static inline struct regmap *
+ocelot_regmap_from_resource_optional(struct platform_device *pdev,
+ unsigned int index,
+ const struct regmap_config *config)
+{
+ struct device *dev = &pdev->dev;
+ struct resource *res;
+ void __iomem *regs;
+
+ /*
+ * Don't use _get_and_ioremap_resource() here, since that will invoke
+ * prints of "invalid resource" which will simply add confusion.
+ */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, index);
+ if (res) {
+ regs = devm_ioremap_resource(dev, res);
+ if (IS_ERR(regs))
+ return ERR_CAST(regs);
+ return devm_regmap_init_mmio(dev, regs, config);
+ }
+
+ /*
+ * Fall back to using REG and getting the resource from the parent
+ * device, which is possible in an MFD configuration
+ */
+ if (dev->parent) {
+ res = platform_get_resource(pdev, IORESOURCE_REG, index);
+ if (!res)
+ return NULL;
+
+ return dev_get_regmap(dev->parent, res->name);
+ }
+
+ return NULL;
+}
+
+static inline struct regmap *
+ocelot_regmap_from_resource(struct platform_device *pdev, unsigned int index,
+ const struct regmap_config *config)
+{
+ struct regmap *map;
+
+ map = ocelot_regmap_from_resource_optional(pdev, index, config);
+ return map ?: ERR_PTR(-ENOENT);
+}
+
+#endif
diff --git a/include/linux/mfd/palmas.h b/include/linux/mfd/palmas.h
index 6dec43826303..eda1ffd99c1a 100644
--- a/include/linux/mfd/palmas.h
+++ b/include/linux/mfd/palmas.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* TI Palmas
*
@@ -5,12 +6,6 @@
*
* Author: Graeme Gregory <gg@slimlogic.co.uk>
* Author: Ian Lartey <ian@slimlogic.co.uk>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
*/
#ifndef __LINUX_MFD_PALMAS_H
@@ -20,8 +15,7 @@
#include <linux/leds.h>
#include <linux/regmap.h>
#include <linux/regulator/driver.h>
-#include <linux/extcon.h>
-#include <linux/of_gpio.h>
+#include <linux/extcon-provider.h>
#include <linux/usb/phy_companion.h>
#define PALMAS_NUM_CLIENTS 3
@@ -134,12 +128,6 @@ struct palmas_pmic_driver_data {
struct regulator_config config);
};
-struct palmas_adc_wakeup_property {
- int adc_channel_number;
- int adc_high_threshold;
- int adc_low_threshold;
-};
-
struct palmas_gpadc_platform_data {
/* Channel 3 current source is only enabled during conversion */
int ch3_current; /* 0: off; 1: 10uA; 2: 400uA; 3: 800 uA */
@@ -158,8 +146,6 @@ struct palmas_gpadc_platform_data {
int start_polarity;
int auto_conversion_period_ms;
- struct palmas_adc_wakeup_property *adc_wakeup1_data;
- struct palmas_adc_wakeup_property *adc_wakeup2_data;
};
struct palmas_reg_init {
@@ -553,7 +539,6 @@ struct palmas_pmic {
struct palmas *palmas;
struct device *dev;
struct regulator_desc desc[PALMAS_NUM_REGS];
- struct regulator_dev *rdev[PALMAS_NUM_REGS];
struct mutex mutex;
int smps123;
@@ -3733,6 +3718,9 @@ enum usb_irq_events {
#define TPS65917_REGEN3_CTRL_MODE_ACTIVE 0x01
#define TPS65917_REGEN3_CTRL_MODE_ACTIVE_SHIFT 0x00
+/* POWERHOLD Mask field for PRIMARY_SECONDARY_PAD2 register */
+#define TPS65917_PRIMARY_SECONDARY_PAD2_GPIO_5_MASK 0xC
+
/* Registers for function RESOURCE */
#define TPS65917_REGEN1_CTRL 0x2
#define TPS65917_PLLEN_CTRL 0x3
diff --git a/include/linux/mfd/pcf50633/adc.h b/include/linux/mfd/pcf50633/adc.h
index b35e62801ffa..6a81896d4889 100644
--- a/include/linux/mfd/pcf50633/adc.h
+++ b/include/linux/mfd/pcf50633/adc.h
@@ -1,13 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* adc.h -- Driver for NXP PCF50633 ADC
*
* (C) 2006-2008 by Openmoko, Inc.
* All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
*/
#ifndef __LINUX_MFD_PCF50633_ADC_H
diff --git a/include/linux/mfd/pcf50633/backlight.h b/include/linux/mfd/pcf50633/backlight.h
index 83747e217b27..fd4a4f8d6c13 100644
--- a/include/linux/mfd/pcf50633/backlight.h
+++ b/include/linux/mfd/pcf50633/backlight.h
@@ -1,16 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Copyright (C) 2009-2010, Lars-Peter Clausen <lars@metafoo.de>
* PCF50633 backlight device driver
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 675 Mass Ave, Cambridge, MA 02139, USA.
- *
*/
#ifndef __LINUX_MFD_PCF50633_BACKLIGHT
diff --git a/include/linux/mfd/pcf50633/core.h b/include/linux/mfd/pcf50633/core.h
index a80840752b4c..539f27f8bd89 100644
--- a/include/linux/mfd/pcf50633/core.h
+++ b/include/linux/mfd/pcf50633/core.h
@@ -1,13 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* core.h -- Core driver for NXP PCF50633
*
* (C) 2006-2008 by Openmoko, Inc.
* All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
*/
#ifndef __LINUX_MFD_PCF50633_CORE_H
@@ -17,6 +13,7 @@
#include <linux/workqueue.h>
#include <linux/regulator/driver.h>
#include <linux/regulator/machine.h>
+#include <linux/pm.h>
#include <linux/power_supply.h>
#include <linux/mfd/pcf50633/backlight.h>
@@ -230,9 +227,6 @@ static inline struct pcf50633 *dev_to_pcf50633(struct device *dev)
int pcf50633_irq_init(struct pcf50633 *pcf, int irq);
void pcf50633_irq_free(struct pcf50633 *pcf);
-#ifdef CONFIG_PM
-int pcf50633_irq_suspend(struct pcf50633 *pcf);
-int pcf50633_irq_resume(struct pcf50633 *pcf);
-#endif
+extern const struct dev_pm_ops pcf50633_pm;
#endif
diff --git a/include/linux/mfd/pcf50633/gpio.h b/include/linux/mfd/pcf50633/gpio.h
index a42b845efc54..f589e35795f1 100644
--- a/include/linux/mfd/pcf50633/gpio.h
+++ b/include/linux/mfd/pcf50633/gpio.h
@@ -1,13 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* gpio.h -- GPIO driver for NXP PCF50633
*
* (C) 2006-2008 by Openmoko, Inc.
* All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
*/
#ifndef __LINUX_MFD_PCF50633_GPIO_H
diff --git a/include/linux/mfd/pcf50633/mbc.h b/include/linux/mfd/pcf50633/mbc.h
index df4f5fa88de3..fa5cb9256d99 100644
--- a/include/linux/mfd/pcf50633/mbc.h
+++ b/include/linux/mfd/pcf50633/mbc.h
@@ -1,13 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* mbc.h -- Driver for NXP PCF50633 Main Battery Charger
*
* (C) 2006-2008 by Openmoko, Inc.
* All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
*/
#ifndef __LINUX_MFD_PCF50633_MBC_H
diff --git a/include/linux/mfd/pcf50633/pmic.h b/include/linux/mfd/pcf50633/pmic.h
index 2d3dbe53b235..eac0c3d8e984 100644
--- a/include/linux/mfd/pcf50633/pmic.h
+++ b/include/linux/mfd/pcf50633/pmic.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __LINUX_MFD_PCF50633_PMIC_H
#define __LINUX_MFD_PCF50633_PMIC_H
diff --git a/include/linux/mfd/qcom_rpm.h b/include/linux/mfd/qcom_rpm.h
index 742ebf1b76ca..4b6b644f1108 100644
--- a/include/linux/mfd/qcom_rpm.h
+++ b/include/linux/mfd/qcom_rpm.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __QCOM_RPM_H__
#define __QCOM_RPM_H__
diff --git a/include/linux/mfd/rave-sp.h b/include/linux/mfd/rave-sp.h
new file mode 100644
index 000000000000..11eef77ef976
--- /dev/null
+++ b/include/linux/mfd/rave-sp.h
@@ -0,0 +1,62 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+
+/*
+ * Core definitions for RAVE SP MFD driver.
+ *
+ * Copyright (C) 2017 Zodiac Inflight Innovations
+ */
+
+#ifndef _LINUX_RAVE_SP_H_
+#define _LINUX_RAVE_SP_H_
+
+#include <linux/notifier.h>
+
+enum rave_sp_command {
+ RAVE_SP_CMD_GET_FIRMWARE_VERSION = 0x20,
+ RAVE_SP_CMD_GET_BOOTLOADER_VERSION = 0x21,
+ RAVE_SP_CMD_BOOT_SOURCE = 0x26,
+ RAVE_SP_CMD_GET_BOARD_COPPER_REV = 0x2B,
+ RAVE_SP_CMD_GET_GPIO_STATE = 0x2F,
+
+ RAVE_SP_CMD_STATUS = 0xA0,
+ RAVE_SP_CMD_SW_WDT = 0xA1,
+ RAVE_SP_CMD_PET_WDT = 0xA2,
+ RAVE_SP_CMD_RMB_EEPROM = 0xA4,
+ RAVE_SP_CMD_SET_BACKLIGHT = 0xA6,
+ RAVE_SP_CMD_RESET = 0xA7,
+ RAVE_SP_CMD_RESET_REASON = 0xA8,
+
+ RAVE_SP_CMD_REQ_COPPER_REV = 0xB6,
+ RAVE_SP_CMD_GET_I2C_DEVICE_STATUS = 0xBA,
+ RAVE_SP_CMD_GET_SP_SILICON_REV = 0xB9,
+ RAVE_SP_CMD_CONTROL_EVENTS = 0xBB,
+
+ RAVE_SP_EVNT_BASE = 0xE0,
+};
+
+struct rave_sp;
+
+static inline unsigned long rave_sp_action_pack(u8 event, u8 value)
+{
+ return ((unsigned long)value << 8) | event;
+}
+
+static inline u8 rave_sp_action_unpack_event(unsigned long action)
+{
+ return action;
+}
+
+static inline u8 rave_sp_action_unpack_value(unsigned long action)
+{
+ return action >> 8;
+}
+
+int rave_sp_exec(struct rave_sp *sp,
+ void *__data, size_t data_size,
+ void *reply_data, size_t reply_data_size);
+
+struct device;
+int devm_rave_sp_register_event_notifier(struct device *dev,
+ struct notifier_block *nb);
+
+#endif /* _LINUX_RAVE_SP_H_ */
diff --git a/include/linux/mfd/rc5t583.h b/include/linux/mfd/rc5t583.h
index 8d0a392e0a7f..4f220146cc02 100644
--- a/include/linux/mfd/rc5t583.h
+++ b/include/linux/mfd/rc5t583.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Core driver interface to access RICOH_RC5T583 power management chip.
*
@@ -6,19 +7,6 @@
*
* Based on code
* Copyright (C) 2011 RICOH COMPANY,LTD
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- *
*/
#ifndef __LINUX_MFD_RC5T583_H
diff --git a/include/linux/mfd/rdc321x.h b/include/linux/mfd/rdc321x.h
index 442743a8f915..697933b2227b 100644
--- a/include/linux/mfd/rdc321x.h
+++ b/include/linux/mfd/rdc321x.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __RDC321X_MFD_H
#define __RDC321X_MFD_H
diff --git a/include/linux/mfd/rk808.h b/include/linux/mfd/rk808.h
index 83701ef7d3c7..9af1f3105f80 100644
--- a/include/linux/mfd/rk808.h
+++ b/include/linux/mfd/rk808.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Register definitions for Rockchip's RK808/RK818 PMIC
*
@@ -9,15 +10,6 @@
* Copyright (C) 2016 PHYTEC Messtechnik GmbH
*
* Author: Wadim Egorov <w.egorov@phytec.de>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
*/
#ifndef __LINUX_REGULATOR_RK808_H
@@ -206,6 +198,97 @@ enum rk818_reg {
#define RK818_USB_ILMIN_2000MA 0x7
#define RK818_USB_CHG_SD_VSEL_MASK 0x70
+/* RK805 */
+enum rk805_reg {
+ RK805_ID_DCDC1,
+ RK805_ID_DCDC2,
+ RK805_ID_DCDC3,
+ RK805_ID_DCDC4,
+ RK805_ID_LDO1,
+ RK805_ID_LDO2,
+ RK805_ID_LDO3,
+};
+
+/* CONFIG REGISTER */
+#define RK805_VB_MON_REG 0x21
+#define RK805_THERMAL_REG 0x22
+
+/* POWER CHANNELS ENABLE REGISTER */
+#define RK805_DCDC_EN_REG 0x23
+#define RK805_SLP_DCDC_EN_REG 0x25
+#define RK805_SLP_LDO_EN_REG 0x26
+#define RK805_LDO_EN_REG 0x27
+
+/* BUCK AND LDO CONFIG REGISTER */
+#define RK805_BUCK_LDO_SLP_LP_EN_REG 0x2A
+#define RK805_BUCK1_CONFIG_REG 0x2E
+#define RK805_BUCK1_ON_VSEL_REG 0x2F
+#define RK805_BUCK1_SLP_VSEL_REG 0x30
+#define RK805_BUCK2_CONFIG_REG 0x32
+#define RK805_BUCK2_ON_VSEL_REG 0x33
+#define RK805_BUCK2_SLP_VSEL_REG 0x34
+#define RK805_BUCK3_CONFIG_REG 0x36
+#define RK805_BUCK4_CONFIG_REG 0x37
+#define RK805_BUCK4_ON_VSEL_REG 0x38
+#define RK805_BUCK4_SLP_VSEL_REG 0x39
+#define RK805_LDO1_ON_VSEL_REG 0x3B
+#define RK805_LDO1_SLP_VSEL_REG 0x3C
+#define RK805_LDO2_ON_VSEL_REG 0x3D
+#define RK805_LDO2_SLP_VSEL_REG 0x3E
+#define RK805_LDO3_ON_VSEL_REG 0x3F
+#define RK805_LDO3_SLP_VSEL_REG 0x40
+
+/* INTERRUPT REGISTER */
+#define RK805_PWRON_LP_INT_TIME_REG 0x47
+#define RK805_PWRON_DB_REG 0x48
+#define RK805_DEV_CTRL_REG 0x4B
+#define RK805_INT_STS_REG 0x4C
+#define RK805_INT_STS_MSK_REG 0x4D
+#define RK805_GPIO_IO_POL_REG 0x50
+#define RK805_OUT_REG 0x52
+#define RK805_ON_SOURCE_REG 0xAE
+#define RK805_OFF_SOURCE_REG 0xAF
+
+#define RK805_NUM_REGULATORS 7
+
+#define RK805_PWRON_FALL_RISE_INT_EN 0x0
+#define RK805_PWRON_FALL_RISE_INT_MSK 0x81
+
+/* RK805 IRQ Definitions */
+#define RK805_IRQ_PWRON_RISE 0
+#define RK805_IRQ_VB_LOW 1
+#define RK805_IRQ_PWRON 2
+#define RK805_IRQ_PWRON_LP 3
+#define RK805_IRQ_HOTDIE 4
+#define RK805_IRQ_RTC_ALARM 5
+#define RK805_IRQ_RTC_PERIOD 6
+#define RK805_IRQ_PWRON_FALL 7
+
+#define RK805_IRQ_PWRON_RISE_MSK BIT(0)
+#define RK805_IRQ_VB_LOW_MSK BIT(1)
+#define RK805_IRQ_PWRON_MSK BIT(2)
+#define RK805_IRQ_PWRON_LP_MSK BIT(3)
+#define RK805_IRQ_HOTDIE_MSK BIT(4)
+#define RK805_IRQ_RTC_ALARM_MSK BIT(5)
+#define RK805_IRQ_RTC_PERIOD_MSK BIT(6)
+#define RK805_IRQ_PWRON_FALL_MSK BIT(7)
+
+#define RK805_PWR_RISE_INT_STATUS BIT(0)
+#define RK805_VB_LOW_INT_STATUS BIT(1)
+#define RK805_PWRON_INT_STATUS BIT(2)
+#define RK805_PWRON_LP_INT_STATUS BIT(3)
+#define RK805_HOTDIE_INT_STATUS BIT(4)
+#define RK805_ALARM_INT_STATUS BIT(5)
+#define RK805_PERIOD_INT_STATUS BIT(6)
+#define RK805_PWR_FALL_INT_STATUS BIT(7)
+
+#define RK805_BUCK1_2_ILMAX_MASK (3 << 6)
+#define RK805_BUCK3_4_ILMAX_MASK (3 << 3)
+#define RK805_RTC_PERIOD_INT_MASK (1 << 6)
+#define RK805_RTC_ALARM_INT_MASK (1 << 5)
+#define RK805_INT_ALARM_EN (1 << 3)
+#define RK805_INT_TIMER_EN (1 << 2)
+
/* RK808 IRQ Definitions */
#define RK808_IRQ_VOUT_LO 0
#define RK808_IRQ_VB_LO 1
@@ -290,7 +373,9 @@ enum rk818_reg {
#define SWITCH2_EN BIT(6)
#define SWITCH1_EN BIT(5)
#define DEV_OFF_RST BIT(3)
+#define DEV_RST BIT(2)
#define DEV_OFF BIT(0)
+#define RTC_STOP BIT(0)
#define VB_LO_ACT BIT(4)
#define VB_LO_SEL_3500MV (7 << 0)
@@ -298,6 +383,358 @@ enum rk818_reg {
#define VOUT_LO_INT BIT(0)
#define CLK32KOUT2_EN BIT(0)
+#define TEMP115C 0x0c
+#define TEMP_HOTDIE_MSK 0x0c
+#define SLP_SD_MSK (0x3 << 2)
+#define SHUTDOWN_FUN (0x2 << 2)
+#define SLEEP_FUN (0x1 << 2)
+#define RK8XX_ID_MSK 0xfff0
+#define PWM_MODE_MSK BIT(7)
+#define FPWM_MODE BIT(7)
+#define AUTO_PWM_MODE 0
+
+enum rk817_reg_id {
+ RK817_ID_DCDC1 = 0,
+ RK817_ID_DCDC2,
+ RK817_ID_DCDC3,
+ RK817_ID_DCDC4,
+ RK817_ID_LDO1,
+ RK817_ID_LDO2,
+ RK817_ID_LDO3,
+ RK817_ID_LDO4,
+ RK817_ID_LDO5,
+ RK817_ID_LDO6,
+ RK817_ID_LDO7,
+ RK817_ID_LDO8,
+ RK817_ID_LDO9,
+ RK817_ID_BOOST,
+ RK817_ID_BOOST_OTG_SW,
+ RK817_NUM_REGULATORS
+};
+
+enum rk809_reg_id {
+ RK809_ID_DCDC5 = RK817_ID_BOOST,
+ RK809_ID_SW1,
+ RK809_ID_SW2,
+ RK809_NUM_REGULATORS
+};
+
+#define RK817_SECONDS_REG 0x00
+#define RK817_MINUTES_REG 0x01
+#define RK817_HOURS_REG 0x02
+#define RK817_DAYS_REG 0x03
+#define RK817_MONTHS_REG 0x04
+#define RK817_YEARS_REG 0x05
+#define RK817_WEEKS_REG 0x06
+#define RK817_ALARM_SECONDS_REG 0x07
+#define RK817_ALARM_MINUTES_REG 0x08
+#define RK817_ALARM_HOURS_REG 0x09
+#define RK817_ALARM_DAYS_REG 0x0a
+#define RK817_ALARM_MONTHS_REG 0x0b
+#define RK817_ALARM_YEARS_REG 0x0c
+#define RK817_RTC_CTRL_REG 0xd
+#define RK817_RTC_STATUS_REG 0xe
+#define RK817_RTC_INT_REG 0xf
+#define RK817_RTC_COMP_LSB_REG 0x10
+#define RK817_RTC_COMP_MSB_REG 0x11
+
+/* RK817 Codec Registers */
+#define RK817_CODEC_DTOP_VUCTL 0x12
+#define RK817_CODEC_DTOP_VUCTIME 0x13
+#define RK817_CODEC_DTOP_LPT_SRST 0x14
+#define RK817_CODEC_DTOP_DIGEN_CLKE 0x15
+#define RK817_CODEC_AREF_RTCFG0 0x16
+#define RK817_CODEC_AREF_RTCFG1 0x17
+#define RK817_CODEC_AADC_CFG0 0x18
+#define RK817_CODEC_AADC_CFG1 0x19
+#define RK817_CODEC_DADC_VOLL 0x1a
+#define RK817_CODEC_DADC_VOLR 0x1b
+#define RK817_CODEC_DADC_SR_ACL0 0x1e
+#define RK817_CODEC_DADC_ALC1 0x1f
+#define RK817_CODEC_DADC_ALC2 0x20
+#define RK817_CODEC_DADC_NG 0x21
+#define RK817_CODEC_DADC_HPF 0x22
+#define RK817_CODEC_DADC_RVOLL 0x23
+#define RK817_CODEC_DADC_RVOLR 0x24
+#define RK817_CODEC_AMIC_CFG0 0x27
+#define RK817_CODEC_AMIC_CFG1 0x28
+#define RK817_CODEC_DMIC_PGA_GAIN 0x29
+#define RK817_CODEC_DMIC_LMT1 0x2a
+#define RK817_CODEC_DMIC_LMT2 0x2b
+#define RK817_CODEC_DMIC_NG1 0x2c
+#define RK817_CODEC_DMIC_NG2 0x2d
+#define RK817_CODEC_ADAC_CFG0 0x2e
+#define RK817_CODEC_ADAC_CFG1 0x2f
+#define RK817_CODEC_DDAC_POPD_DACST 0x30
+#define RK817_CODEC_DDAC_VOLL 0x31
+#define RK817_CODEC_DDAC_VOLR 0x32
+#define RK817_CODEC_DDAC_SR_LMT0 0x35
+#define RK817_CODEC_DDAC_LMT1 0x36
+#define RK817_CODEC_DDAC_LMT2 0x37
+#define RK817_CODEC_DDAC_MUTE_MIXCTL 0x38
+#define RK817_CODEC_DDAC_RVOLL 0x39
+#define RK817_CODEC_DDAC_RVOLR 0x3a
+#define RK817_CODEC_AHP_ANTI0 0x3b
+#define RK817_CODEC_AHP_ANTI1 0x3c
+#define RK817_CODEC_AHP_CFG0 0x3d
+#define RK817_CODEC_AHP_CFG1 0x3e
+#define RK817_CODEC_AHP_CP 0x3f
+#define RK817_CODEC_ACLASSD_CFG1 0x40
+#define RK817_CODEC_ACLASSD_CFG2 0x41
+#define RK817_CODEC_APLL_CFG0 0x42
+#define RK817_CODEC_APLL_CFG1 0x43
+#define RK817_CODEC_APLL_CFG2 0x44
+#define RK817_CODEC_APLL_CFG3 0x45
+#define RK817_CODEC_APLL_CFG4 0x46
+#define RK817_CODEC_APLL_CFG5 0x47
+#define RK817_CODEC_DI2S_CKM 0x48
+#define RK817_CODEC_DI2S_RSD 0x49
+#define RK817_CODEC_DI2S_RXCR1 0x4a
+#define RK817_CODEC_DI2S_RXCR2 0x4b
+#define RK817_CODEC_DI2S_RXCMD_TSD 0x4c
+#define RK817_CODEC_DI2S_TXCR1 0x4d
+#define RK817_CODEC_DI2S_TXCR2 0x4e
+#define RK817_CODEC_DI2S_TXCR3_TXCMD 0x4f
+
+/* RK817_CODEC_DI2S_CKM */
+#define RK817_I2S_MODE_MASK (0x1 << 0)
+#define RK817_I2S_MODE_MST (0x1 << 0)
+#define RK817_I2S_MODE_SLV (0x0 << 0)
+
+/* RK817_CODEC_DDAC_MUTE_MIXCTL */
+#define DACMT_MASK (0x1 << 0)
+#define DACMT_ENABLE (0x1 << 0)
+#define DACMT_DISABLE (0x0 << 0)
+
+/* RK817_CODEC_DI2S_RXCR2 */
+#define VDW_RX_24BITS (0x17)
+#define VDW_RX_16BITS (0x0f)
+
+/* RK817_CODEC_DI2S_TXCR2 */
+#define VDW_TX_24BITS (0x17)
+#define VDW_TX_16BITS (0x0f)
+
+/* RK817_CODEC_AMIC_CFG0 */
+#define MIC_DIFF_MASK (0x1 << 7)
+#define MIC_DIFF_DIS (0x0 << 7)
+#define MIC_DIFF_EN (0x1 << 7)
+
+/* RK817 Battery Registers */
+#define RK817_GAS_GAUGE_ADC_CONFIG0 0x50
+#define RK817_GG_EN (0x1 << 7)
+#define RK817_SYS_VOL_ADC_EN (0x1 << 6)
+#define RK817_TS_ADC_EN (0x1 << 5)
+#define RK817_USB_VOL_ADC_EN (0x1 << 4)
+#define RK817_BAT_VOL_ADC_EN (0x1 << 3)
+#define RK817_BAT_CUR_ADC_EN (0x1 << 2)
+
+#define RK817_GAS_GAUGE_ADC_CONFIG1 0x55
+
+#define RK817_VOL_CUR_CALIB_UPD BIT(7)
+
+#define RK817_GAS_GAUGE_GG_CON 0x56
+#define RK817_GAS_GAUGE_GG_STS 0x57
+
+#define RK817_BAT_CON (0x1 << 4)
+#define RK817_RELAX_VOL_UPD (0x3 << 2)
+#define RK817_RELAX_STS (0x1 << 1)
+
+#define RK817_GAS_GAUGE_RELAX_THRE_H 0x58
+#define RK817_GAS_GAUGE_RELAX_THRE_L 0x59
+#define RK817_GAS_GAUGE_OCV_THRE_VOL 0x62
+#define RK817_GAS_GAUGE_OCV_VOL_H 0x63
+#define RK817_GAS_GAUGE_OCV_VOL_L 0x64
+#define RK817_GAS_GAUGE_PWRON_VOL_H 0x6b
+#define RK817_GAS_GAUGE_PWRON_VOL_L 0x6c
+#define RK817_GAS_GAUGE_PWRON_CUR_H 0x6d
+#define RK817_GAS_GAUGE_PWRON_CUR_L 0x6e
+#define RK817_GAS_GAUGE_OFF_CNT 0x6f
+#define RK817_GAS_GAUGE_Q_INIT_H3 0x70
+#define RK817_GAS_GAUGE_Q_INIT_H2 0x71
+#define RK817_GAS_GAUGE_Q_INIT_L1 0x72
+#define RK817_GAS_GAUGE_Q_INIT_L0 0x73
+#define RK817_GAS_GAUGE_Q_PRES_H3 0x74
+#define RK817_GAS_GAUGE_Q_PRES_H2 0x75
+#define RK817_GAS_GAUGE_Q_PRES_L1 0x76
+#define RK817_GAS_GAUGE_Q_PRES_L0 0x77
+#define RK817_GAS_GAUGE_BAT_VOL_H 0x78
+#define RK817_GAS_GAUGE_BAT_VOL_L 0x79
+#define RK817_GAS_GAUGE_BAT_CUR_H 0x7a
+#define RK817_GAS_GAUGE_BAT_CUR_L 0x7b
+#define RK817_GAS_GAUGE_USB_VOL_H 0x7e
+#define RK817_GAS_GAUGE_USB_VOL_L 0x7f
+#define RK817_GAS_GAUGE_SYS_VOL_H 0x80
+#define RK817_GAS_GAUGE_SYS_VOL_L 0x81
+#define RK817_GAS_GAUGE_Q_MAX_H3 0x82
+#define RK817_GAS_GAUGE_Q_MAX_H2 0x83
+#define RK817_GAS_GAUGE_Q_MAX_L1 0x84
+#define RK817_GAS_GAUGE_Q_MAX_L0 0x85
+#define RK817_GAS_GAUGE_SLEEP_CON_SAMP_CUR_H 0x8f
+#define RK817_GAS_GAUGE_SLEEP_CON_SAMP_CUR_L 0x90
+#define RK817_GAS_GAUGE_CAL_OFFSET_H 0x91
+#define RK817_GAS_GAUGE_CAL_OFFSET_L 0x92
+#define RK817_GAS_GAUGE_VCALIB0_H 0x93
+#define RK817_GAS_GAUGE_VCALIB0_L 0x94
+#define RK817_GAS_GAUGE_VCALIB1_H 0x95
+#define RK817_GAS_GAUGE_VCALIB1_L 0x96
+#define RK817_GAS_GAUGE_IOFFSET_H 0x97
+#define RK817_GAS_GAUGE_IOFFSET_L 0x98
+#define RK817_GAS_GAUGE_BAT_R1 0x9a
+#define RK817_GAS_GAUGE_BAT_R2 0x9b
+#define RK817_GAS_GAUGE_BAT_R3 0x9c
+#define RK817_GAS_GAUGE_DATA0 0x9d
+#define RK817_GAS_GAUGE_DATA1 0x9e
+#define RK817_GAS_GAUGE_DATA2 0x9f
+#define RK817_GAS_GAUGE_DATA3 0xa0
+#define RK817_GAS_GAUGE_DATA4 0xa1
+#define RK817_GAS_GAUGE_DATA5 0xa2
+#define RK817_GAS_GAUGE_CUR_ADC_K0 0xb0
+
+#define RK817_POWER_EN_REG(i) (0xb1 + (i))
+#define RK817_POWER_SLP_EN_REG(i) (0xb5 + (i))
+
+#define RK817_POWER_CONFIG (0xb9)
+
+#define RK817_BUCK_CONFIG_REG(i) (0xba + (i) * 3)
+
+#define RK817_BUCK1_ON_VSEL_REG 0xBB
+#define RK817_BUCK1_SLP_VSEL_REG 0xBC
+
+#define RK817_BUCK2_CONFIG_REG 0xBD
+#define RK817_BUCK2_ON_VSEL_REG 0xBE
+#define RK817_BUCK2_SLP_VSEL_REG 0xBF
+
+#define RK817_BUCK3_CONFIG_REG 0xC0
+#define RK817_BUCK3_ON_VSEL_REG 0xC1
+#define RK817_BUCK3_SLP_VSEL_REG 0xC2
+
+#define RK817_BUCK4_CONFIG_REG 0xC3
+#define RK817_BUCK4_ON_VSEL_REG 0xC4
+#define RK817_BUCK4_SLP_VSEL_REG 0xC5
+
+#define RK817_LDO_ON_VSEL_REG(idx) (0xcc + (idx) * 2)
+#define RK817_BOOST_OTG_CFG (0xde)
+
+#define RK817_PMIC_CHRG_OUT 0xe4
+#define RK817_CHRG_VOL_SEL (0x07 << 4)
+#define RK817_CHRG_CUR_SEL (0x07 << 0)
+
+#define RK817_PMIC_CHRG_IN 0xe5
+#define RK817_USB_VLIM_EN (0x01 << 7)
+#define RK817_USB_VLIM_SEL (0x07 << 4)
+#define RK817_USB_ILIM_EN (0x01 << 3)
+#define RK817_USB_ILIM_SEL (0x07 << 0)
+#define RK817_PMIC_CHRG_TERM 0xe6
+#define RK817_CHRG_TERM_ANA_DIG (0x01 << 2)
+#define RK817_CHRG_TERM_ANA_SEL (0x03 << 0)
+#define RK817_CHRG_EN (0x01 << 6)
+
+#define RK817_PMIC_CHRG_STS 0xeb
+#define RK817_BAT_EXS BIT(7)
+#define RK817_CHG_STS (0x07 << 4)
+
+#define RK817_ID_MSB 0xed
+#define RK817_ID_LSB 0xee
+
+#define RK817_SYS_STS 0xf0
+#define RK817_PLUG_IN_STS (0x1 << 6)
+
+#define RK817_SYS_CFG(i) (0xf1 + (i))
+
+#define RK817_ON_SOURCE_REG 0xf5
+#define RK817_OFF_SOURCE_REG 0xf6
+
+/* INTERRUPT REGISTER */
+#define RK817_INT_STS_REG0 0xf8
+#define RK817_INT_STS_MSK_REG0 0xf9
+#define RK817_INT_STS_REG1 0xfa
+#define RK817_INT_STS_MSK_REG1 0xfb
+#define RK817_INT_STS_REG2 0xfc
+#define RK817_INT_STS_MSK_REG2 0xfd
+#define RK817_GPIO_INT_CFG 0xfe
+
+/* IRQ Definitions */
+#define RK817_IRQ_PWRON_FALL 0
+#define RK817_IRQ_PWRON_RISE 1
+#define RK817_IRQ_PWRON 2
+#define RK817_IRQ_PWMON_LP 3
+#define RK817_IRQ_HOTDIE 4
+#define RK817_IRQ_RTC_ALARM 5
+#define RK817_IRQ_RTC_PERIOD 6
+#define RK817_IRQ_VB_LO 7
+#define RK817_IRQ_PLUG_IN 8
+#define RK817_IRQ_PLUG_OUT 9
+#define RK817_IRQ_CHRG_TERM 10
+#define RK817_IRQ_CHRG_TIME 11
+#define RK817_IRQ_CHRG_TS 12
+#define RK817_IRQ_USB_OV 13
+#define RK817_IRQ_CHRG_IN_CLMP 14
+#define RK817_IRQ_BAT_DIS_ILIM 15
+#define RK817_IRQ_GATE_GPIO 16
+#define RK817_IRQ_TS_GPIO 17
+#define RK817_IRQ_CODEC_PD 18
+#define RK817_IRQ_CODEC_PO 19
+#define RK817_IRQ_CLASSD_MUTE_DONE 20
+#define RK817_IRQ_CLASSD_OCP 21
+#define RK817_IRQ_BAT_OVP 22
+#define RK817_IRQ_CHRG_BAT_HI 23
+#define RK817_IRQ_END (RK817_IRQ_CHRG_BAT_HI + 1)
+
+/*
+ * rtc_ctrl 0xd
+ * same as 808, except bit4
+ */
+#define RK817_RTC_CTRL_RSV4 BIT(4)
+
+/* power config 0xb9 */
+#define RK817_BUCK3_FB_RES_MSK BIT(6)
+#define RK817_BUCK3_FB_RES_INTER BIT(6)
+#define RK817_BUCK3_FB_RES_EXT 0
+
+/* buck config 0xba */
+#define RK817_RAMP_RATE_OFFSET 6
+#define RK817_RAMP_RATE_MASK (0x3 << RK817_RAMP_RATE_OFFSET)
+#define RK817_RAMP_RATE_3MV_PER_US (0x0 << RK817_RAMP_RATE_OFFSET)
+#define RK817_RAMP_RATE_6_3MV_PER_US (0x1 << RK817_RAMP_RATE_OFFSET)
+#define RK817_RAMP_RATE_12_5MV_PER_US (0x2 << RK817_RAMP_RATE_OFFSET)
+#define RK817_RAMP_RATE_25MV_PER_US (0x3 << RK817_RAMP_RATE_OFFSET)
+
+/* sys_cfg1 0xf2 */
+#define RK817_HOTDIE_TEMP_MSK (0x3 << 4)
+#define RK817_HOTDIE_85 (0x0 << 4)
+#define RK817_HOTDIE_95 (0x1 << 4)
+#define RK817_HOTDIE_105 (0x2 << 4)
+#define RK817_HOTDIE_115 (0x3 << 4)
+
+#define RK817_TSD_TEMP_MSK BIT(6)
+#define RK817_TSD_140 0
+#define RK817_TSD_160 BIT(6)
+
+#define RK817_CLK32KOUT2_EN BIT(7)
+
+/* sys_cfg3 0xf4 */
+#define RK817_SLPPIN_FUNC_MSK (0x3 << 3)
+#define SLPPIN_NULL_FUN (0x0 << 3)
+#define SLPPIN_SLP_FUN (0x1 << 3)
+#define SLPPIN_DN_FUN (0x2 << 3)
+#define SLPPIN_RST_FUN (0x3 << 3)
+
+#define RK817_RST_FUNC_MSK (0x3 << 6)
+#define RK817_RST_FUNC_SFT (6)
+#define RK817_RST_FUNC_CNT (3)
+#define RK817_RST_FUNC_DEV (0) /* reset the dev */
+#define RK817_RST_FUNC_REG (0x1 << 6) /* reset the reg only */
+
+#define RK817_SLPPOL_MSK BIT(5)
+#define RK817_SLPPOL_H BIT(5)
+#define RK817_SLPPOL_L (0)
+
+/* gpio&int 0xfe */
+#define RK817_INT_POL_MSK BIT(1)
+#define RK817_INT_POL_H BIT(1)
+#define RK817_INT_POL_L 0
+#define RK809_BUCK5_CONFIG(i) (RK817_BOOST_OTG_CFG + (i) * 1)
+
enum {
BUCK_ILMIN_50MA,
BUCK_ILMIN_100MA,
@@ -321,8 +758,32 @@ enum {
};
enum {
+ RK805_BUCK1_2_ILMAX_2500MA,
+ RK805_BUCK1_2_ILMAX_3000MA,
+ RK805_BUCK1_2_ILMAX_3500MA,
+ RK805_BUCK1_2_ILMAX_4000MA,
+};
+
+enum {
+ RK805_BUCK3_ILMAX_1500MA,
+ RK805_BUCK3_ILMAX_2000MA,
+ RK805_BUCK3_ILMAX_2500MA,
+ RK805_BUCK3_ILMAX_3000MA,
+};
+
+enum {
+ RK805_BUCK4_ILMAX_2000MA,
+ RK805_BUCK4_ILMAX_2500MA,
+ RK805_BUCK4_ILMAX_3000MA,
+ RK805_BUCK4_ILMAX_3500MA,
+};
+
+enum {
+ RK805_ID = 0x8050,
RK808_ID = 0x0000,
- RK818_ID = 0x8181,
+ RK809_ID = 0x8090,
+ RK817_ID = 0x8170,
+ RK818_ID = 0x8180,
};
struct rk808 {
diff --git a/include/linux/mfd/rn5t618.h b/include/linux/mfd/rn5t618.h
index e5a6cdeb77db..aacb6d51e99c 100644
--- a/include/linux/mfd/rn5t618.h
+++ b/include/linux/mfd/rn5t618.h
@@ -1,14 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* MFD core driver for Ricoh RN5T618 PMIC
*
* Copyright (C) 2014 Beniamino Galvani <b.galvani@gmail.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2 as published by the Free Software Foundation.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef __LINUX_MFD_RN5T618_H
@@ -145,6 +139,17 @@
#define RN5T618_INTPOL 0x9c
#define RN5T618_INTEN 0x9d
#define RN5T618_INTMON 0x9e
+
+#define RN5T618_RTC_SECONDS 0xA0
+#define RN5T618_RTC_MDAY 0xA4
+#define RN5T618_RTC_MONTH 0xA5
+#define RN5T618_RTC_YEAR 0xA6
+#define RN5T618_RTC_ADJUST 0xA7
+#define RN5T618_RTC_ALARM_Y_SEC 0xA8
+#define RN5T618_RTC_DAL_MONTH 0xAC
+#define RN5T618_RTC_CTRL1 0xAE
+#define RN5T618_RTC_CTRL2 0xAF
+
#define RN5T618_PREVINDAC 0xb0
#define RN5T618_BATDAC 0xb1
#define RN5T618_CHGCTL1 0xb3
@@ -183,6 +188,7 @@
#define RN5T618_CHGOSCSCORESET3 0xd7
#define RN5T618_CHGOSCFREQSET1 0xd8
#define RN5T618_CHGOSCFREQSET2 0xd9
+#define RN5T618_GCHGDET 0xda
#define RN5T618_CONTROL 0xe0
#define RN5T618_SOC 0xe1
#define RN5T618_RE_CAP_H 0xe2
@@ -221,16 +227,31 @@
#define RN5T618_WATCHDOG_WDOGTIM_S 0
#define RN5T618_PWRIRQ_IR_WDOG BIT(6)
+#define RN5T618_POFFHIS_PWRON BIT(0)
+#define RN5T618_POFFHIS_TSHUT BIT(1)
+#define RN5T618_POFFHIS_VINDET BIT(2)
+#define RN5T618_POFFHIS_IODET BIT(3)
+#define RN5T618_POFFHIS_CPU BIT(4)
+#define RN5T618_POFFHIS_WDG BIT(5)
+#define RN5T618_POFFHIS_DCLIM BIT(6)
+#define RN5T618_POFFHIS_N_OE BIT(7)
+
enum {
RN5T618_DCDC1,
RN5T618_DCDC2,
RN5T618_DCDC3,
RN5T618_DCDC4,
+ RN5T618_DCDC5,
RN5T618_LDO1,
RN5T618_LDO2,
RN5T618_LDO3,
RN5T618_LDO4,
RN5T618_LDO5,
+ RN5T618_LDO6,
+ RN5T618_LDO7,
+ RN5T618_LDO8,
+ RN5T618_LDO9,
+ RN5T618_LDO10,
RN5T618_LDORTC1,
RN5T618_LDORTC2,
RN5T618_REG_NUM,
@@ -242,9 +263,24 @@ enum {
RC5T619,
};
+/* RN5T618 IRQ definitions */
+enum {
+ RN5T618_IRQ_SYS = 0,
+ RN5T618_IRQ_DCDC,
+ RN5T618_IRQ_RTC,
+ RN5T618_IRQ_ADC,
+ RN5T618_IRQ_GPIO,
+ RN5T618_IRQ_CHG,
+ RN5T618_NR_IRQS,
+};
+
struct rn5t618 {
struct regmap *regmap;
+ struct device *dev;
long variant;
+
+ int irq;
+ struct regmap_irq_chip_data *irq_data;
};
#endif /* __LINUX_MFD_RN5T618_H */
diff --git a/include/linux/mfd/rohm-bd71815.h b/include/linux/mfd/rohm-bd71815.h
new file mode 100644
index 000000000000..ec6d9612bebe
--- /dev/null
+++ b/include/linux/mfd/rohm-bd71815.h
@@ -0,0 +1,562 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright 2021 ROHM Semiconductors.
+ *
+ * Author: Matti Vaittinen <matti.vaittinen@fi.rohmeurope.com>
+ *
+ * Copyright 2014 Embest Technology Co. Ltd. Inc.
+ *
+ * Author: yanglsh@embest-tech.com
+ */
+
+#ifndef _MFD_BD71815_H
+#define _MFD_BD71815_H
+
+#include <linux/regmap.h>
+
+enum {
+ BD71815_BUCK1 = 0,
+ BD71815_BUCK2,
+ BD71815_BUCK3,
+ BD71815_BUCK4,
+ BD71815_BUCK5,
+ /* General Purpose */
+ BD71815_LDO1,
+ BD71815_LDO2,
+ BD71815_LDO3,
+ /* LDOs for SD Card and SD Card Interface */
+ BD71815_LDO4,
+ BD71815_LDO5,
+ /* LDO for DDR Reference Voltage */
+ BD71815_LDODVREF,
+ /* LDO for Low-Power State Retention */
+ BD71815_LDOLPSR,
+ BD71815_WLED,
+ BD71815_REGULATOR_CNT,
+};
+
+#define BD71815_SUPPLY_STATE_ENABLED 0x1
+
+enum {
+ BD71815_REG_DEVICE = 0,
+ BD71815_REG_PWRCTRL,
+ BD71815_REG_BUCK1_MODE,
+ BD71815_REG_BUCK2_MODE,
+ BD71815_REG_BUCK3_MODE,
+ BD71815_REG_BUCK4_MODE,
+ BD71815_REG_BUCK5_MODE,
+ BD71815_REG_BUCK1_VOLT_H,
+ BD71815_REG_BUCK1_VOLT_L,
+ BD71815_REG_BUCK2_VOLT_H,
+ BD71815_REG_BUCK2_VOLT_L,
+ BD71815_REG_BUCK3_VOLT,
+ BD71815_REG_BUCK4_VOLT,
+ BD71815_REG_BUCK5_VOLT,
+ BD71815_REG_LED_CTRL,
+ BD71815_REG_LED_DIMM,
+ BD71815_REG_LDO_MODE1,
+ BD71815_REG_LDO_MODE2,
+ BD71815_REG_LDO_MODE3,
+ BD71815_REG_LDO_MODE4,
+ BD71815_REG_LDO1_VOLT,
+ BD71815_REG_LDO2_VOLT,
+ BD71815_REG_LDO3_VOLT,
+ BD71815_REG_LDO4_VOLT,
+ BD71815_REG_LDO5_VOLT_H,
+ BD71815_REG_LDO5_VOLT_L,
+ BD71815_REG_BUCK_PD_DIS,
+ BD71815_REG_LDO_PD_DIS,
+ BD71815_REG_GPO,
+ BD71815_REG_OUT32K,
+ BD71815_REG_SEC,
+ BD71815_REG_MIN,
+ BD71815_REG_HOUR,
+ BD71815_REG_WEEK,
+ BD71815_REG_DAY,
+ BD71815_REG_MONTH,
+ BD71815_REG_YEAR,
+ BD71815_REG_ALM0_SEC,
+
+ BD71815_REG_ALM1_SEC = 0x2C,
+
+ BD71815_REG_ALM0_MASK = 0x33,
+ BD71815_REG_ALM1_MASK,
+ BD71815_REG_ALM2,
+ BD71815_REG_TRIM,
+ BD71815_REG_CONF,
+ BD71815_REG_SYS_INIT,
+ BD71815_REG_CHG_STATE,
+ BD71815_REG_CHG_LAST_STATE,
+ BD71815_REG_BAT_STAT,
+ BD71815_REG_DCIN_STAT,
+ BD71815_REG_VSYS_STAT,
+ BD71815_REG_CHG_STAT,
+ BD71815_REG_CHG_WDT_STAT,
+ BD71815_REG_BAT_TEMP,
+ BD71815_REG_IGNORE_0,
+ BD71815_REG_INHIBIT_0,
+ BD71815_REG_DCIN_CLPS,
+ BD71815_REG_VSYS_REG,
+ BD71815_REG_VSYS_MAX,
+ BD71815_REG_VSYS_MIN,
+ BD71815_REG_CHG_SET1,
+ BD71815_REG_CHG_SET2,
+ BD71815_REG_CHG_WDT_PRE,
+ BD71815_REG_CHG_WDT_FST,
+ BD71815_REG_CHG_IPRE,
+ BD71815_REG_CHG_IFST,
+ BD71815_REG_CHG_IFST_TERM,
+ BD71815_REG_CHG_VPRE,
+ BD71815_REG_CHG_VBAT_1,
+ BD71815_REG_CHG_VBAT_2,
+ BD71815_REG_CHG_VBAT_3,
+ BD71815_REG_CHG_LED_1,
+ BD71815_REG_VF_TH,
+ BD71815_REG_BAT_SET_1,
+ BD71815_REG_BAT_SET_2,
+ BD71815_REG_BAT_SET_3,
+ BD71815_REG_ALM_VBAT_TH_U,
+ BD71815_REG_ALM_VBAT_TH_L,
+ BD71815_REG_ALM_DCIN_TH,
+ BD71815_REG_ALM_VSYS_TH,
+ BD71815_REG_VM_IBAT_U,
+ BD71815_REG_VM_IBAT_L,
+ BD71815_REG_VM_VBAT_U,
+ BD71815_REG_VM_VBAT_L,
+ BD71815_REG_VM_BTMP,
+ BD71815_REG_VM_VTH,
+ BD71815_REG_VM_DCIN_U,
+ BD71815_REG_VM_DCIN_L,
+ BD71815_REG_VM_VSYS,
+ BD71815_REG_VM_VF,
+ BD71815_REG_VM_OCI_PRE_U,
+ BD71815_REG_VM_OCI_PRE_L,
+ BD71815_REG_VM_OCV_PRE_U,
+ BD71815_REG_VM_OCV_PRE_L,
+ BD71815_REG_VM_OCI_PST_U,
+ BD71815_REG_VM_OCI_PST_L,
+ BD71815_REG_VM_OCV_PST_U,
+ BD71815_REG_VM_OCV_PST_L,
+ BD71815_REG_VM_SA_VBAT_U,
+ BD71815_REG_VM_SA_VBAT_L,
+ BD71815_REG_VM_SA_IBAT_U,
+ BD71815_REG_VM_SA_IBAT_L,
+ BD71815_REG_CC_CTRL,
+ BD71815_REG_CC_BATCAP1_TH_U,
+ BD71815_REG_CC_BATCAP1_TH_L,
+ BD71815_REG_CC_BATCAP2_TH_U,
+ BD71815_REG_CC_BATCAP2_TH_L,
+ BD71815_REG_CC_BATCAP3_TH_U,
+ BD71815_REG_CC_BATCAP3_TH_L,
+ BD71815_REG_CC_STAT,
+ BD71815_REG_CC_CCNTD_3,
+ BD71815_REG_CC_CCNTD_2,
+ BD71815_REG_CC_CCNTD_1,
+ BD71815_REG_CC_CCNTD_0,
+ BD71815_REG_CC_CURCD_U,
+ BD71815_REG_CC_CURCD_L,
+ BD71815_REG_VM_OCUR_THR_1,
+ BD71815_REG_VM_OCUR_DUR_1,
+ BD71815_REG_VM_OCUR_THR_2,
+ BD71815_REG_VM_OCUR_DUR_2,
+ BD71815_REG_VM_OCUR_THR_3,
+ BD71815_REG_VM_OCUR_DUR_3,
+ BD71815_REG_VM_OCUR_MON,
+ BD71815_REG_VM_BTMP_OV_THR,
+ BD71815_REG_VM_BTMP_OV_DUR,
+ BD71815_REG_VM_BTMP_LO_THR,
+ BD71815_REG_VM_BTMP_LO_DUR,
+ BD71815_REG_VM_BTMP_MON,
+ BD71815_REG_INT_EN_01,
+
+ BD71815_REG_INT_EN_11 = 0x95,
+ BD71815_REG_INT_EN_12,
+ BD71815_REG_INT_STAT,
+ BD71815_REG_INT_STAT_01,
+ BD71815_REG_INT_STAT_02,
+ BD71815_REG_INT_STAT_03,
+ BD71815_REG_INT_STAT_04,
+ BD71815_REG_INT_STAT_05,
+ BD71815_REG_INT_STAT_06,
+ BD71815_REG_INT_STAT_07,
+ BD71815_REG_INT_STAT_08,
+ BD71815_REG_INT_STAT_09,
+ BD71815_REG_INT_STAT_10,
+ BD71815_REG_INT_STAT_11,
+ BD71815_REG_INT_STAT_12,
+ BD71815_REG_INT_UPDATE,
+
+ BD71815_REG_VM_VSYS_U = 0xC0,
+ BD71815_REG_VM_VSYS_L,
+ BD71815_REG_VM_SA_VSYS_U,
+ BD71815_REG_VM_SA_VSYS_L,
+
+ BD71815_REG_VM_SA_IBAT_MIN_U = 0xD0,
+ BD71815_REG_VM_SA_IBAT_MIN_L,
+ BD71815_REG_VM_SA_IBAT_MAX_U,
+ BD71815_REG_VM_SA_IBAT_MAX_L,
+ BD71815_REG_VM_SA_VBAT_MIN_U,
+ BD71815_REG_VM_SA_VBAT_MIN_L,
+ BD71815_REG_VM_SA_VBAT_MAX_U,
+ BD71815_REG_VM_SA_VBAT_MAX_L,
+ BD71815_REG_VM_SA_VSYS_MIN_U,
+ BD71815_REG_VM_SA_VSYS_MIN_L,
+ BD71815_REG_VM_SA_VSYS_MAX_U,
+ BD71815_REG_VM_SA_VSYS_MAX_L,
+ BD71815_REG_VM_SA_MINMAX_CLR,
+
+ BD71815_REG_REX_CCNTD_3 = 0xE0,
+ BD71815_REG_REX_CCNTD_2,
+ BD71815_REG_REX_CCNTD_1,
+ BD71815_REG_REX_CCNTD_0,
+ BD71815_REG_REX_SA_VBAT_U,
+ BD71815_REG_REX_SA_VBAT_L,
+ BD71815_REG_REX_CTRL_1,
+ BD71815_REG_REX_CTRL_2,
+ BD71815_REG_FULL_CCNTD_3,
+ BD71815_REG_FULL_CCNTD_2,
+ BD71815_REG_FULL_CCNTD_1,
+ BD71815_REG_FULL_CCNTD_0,
+ BD71815_REG_FULL_CTRL,
+
+ BD71815_REG_CCNTD_CHG_3 = 0xF0,
+ BD71815_REG_CCNTD_CHG_2,
+
+ BD71815_REG_TEST_MODE = 0xFE,
+ BD71815_MAX_REGISTER,
+};
+
+/* BD71815_REG_BUCK1_MODE bits */
+#define BD71815_BUCK_RAMPRATE_MASK 0xC0
+#define BD71815_BUCK_RAMPRATE_10P00MV 0x0
+#define BD71815_BUCK_RAMPRATE_5P00MV 0x01
+#define BD71815_BUCK_RAMPRATE_2P50MV 0x02
+#define BD71815_BUCK_RAMPRATE_1P25MV 0x03
+
+#define BD71815_BUCK_PWM_FIXED BIT(4)
+#define BD71815_BUCK_SNVS_ON BIT(3)
+#define BD71815_BUCK_RUN_ON BIT(2)
+#define BD71815_BUCK_LPSR_ON BIT(1)
+#define BD71815_BUCK_SUSP_ON BIT(0)
+
+/* BD71815_REG_BUCK1_VOLT_H bits */
+#define BD71815_BUCK_DVSSEL BIT(7)
+#define BD71815_BUCK_STBY_DVS BIT(6)
+#define BD71815_VOLT_MASK 0x3F
+#define BD71815_BUCK1_H_DEFAULT 0x14
+#define BD71815_BUCK1_L_DEFAULT 0x14
+
+/* BD71815_REG_BUCK2_VOLT_H bits */
+#define BD71815_BUCK2_H_DEFAULT 0x14
+#define BD71815_BUCK2_L_DEFAULT 0x14
+
+/* WLED output */
+/* current register mask */
+#define LED_DIMM_MASK 0x3f
+/* LED enable bits at LED_CTRL reg */
+#define LED_CHGDONE_EN BIT(4)
+#define LED_RUN_ON BIT(2)
+#define LED_LPSR_ON BIT(1)
+#define LED_SUSP_ON BIT(0)
+
+/* BD71815_REG_LDO1_CTRL bits */
+#define LDO1_EN BIT(0)
+#define LDO2_EN BIT(1)
+#define LDO3_EN BIT(2)
+#define DVREF_EN BIT(3)
+#define VOSNVS_SW_EN BIT(4)
+
+/* LDO_MODE1_register */
+#define LDO1_SNVS_ON BIT(7)
+#define LDO1_RUN_ON BIT(6)
+#define LDO1_LPSR_ON BIT(5)
+#define LDO1_SUSP_ON BIT(4)
+/* set => register control, unset => GPIO control */
+#define LDO4_MODE_MASK BIT(3)
+#define LDO4_MODE_I2C BIT(3)
+#define LDO4_MODE_GPIO 0
+/* set => register control, unset => start when DCIN connected */
+#define LDO3_MODE_MASK BIT(2)
+#define LDO3_MODE_I2C BIT(2)
+#define LDO3_MODE_DCIN 0
+
+/* LDO_MODE2 register */
+#define LDO3_SNVS_ON BIT(7)
+#define LDO3_RUN_ON BIT(6)
+#define LDO3_LPSR_ON BIT(5)
+#define LDO3_SUSP_ON BIT(4)
+#define LDO2_SNVS_ON BIT(3)
+#define LDO2_RUN_ON BIT(2)
+#define LDO2_LPSR_ON BIT(1)
+#define LDO2_SUSP_ON BIT(0)
+
+
+/* LDO_MODE3 register */
+#define LDO5_SNVS_ON BIT(7)
+#define LDO5_RUN_ON BIT(6)
+#define LDO5_LPSR_ON BIT(5)
+#define LDO5_SUSP_ON BIT(4)
+#define LDO4_SNVS_ON BIT(3)
+#define LDO4_RUN_ON BIT(2)
+#define LDO4_LPSR_ON BIT(1)
+#define LDO4_SUSP_ON BIT(0)
+
+/* LDO_MODE4 register */
+#define DVREF_SNVS_ON BIT(7)
+#define DVREF_RUN_ON BIT(6)
+#define DVREF_LPSR_ON BIT(5)
+#define DVREF_SUSP_ON BIT(4)
+#define LDO_LPSR_SNVS_ON BIT(3)
+#define LDO_LPSR_RUN_ON BIT(2)
+#define LDO_LPSR_LPSR_ON BIT(1)
+#define LDO_LPSR_SUSP_ON BIT(0)
+
+/* BD71815_REG_OUT32K bits */
+#define OUT32K_EN BIT(0)
+#define OUT32K_MODE BIT(1)
+#define OUT32K_MODE_CMOS BIT(1)
+#define OUT32K_MODE_OPEN_DRAIN 0
+
+/* BD71815_REG_BAT_STAT bits */
+#define BAT_DET BIT(5)
+#define BAT_DET_OFFSET 5
+#define BAT_DET_DONE BIT(4)
+#define VBAT_OV BIT(3)
+#define DBAT_DET BIT(0)
+
+/* BD71815_REG_VBUS_STAT bits */
+#define VBUS_DET BIT(0)
+
+#define BD71815_REG_RTC_START BD71815_REG_SEC
+#define BD71815_REG_RTC_ALM_START BD71815_REG_ALM0_SEC
+
+/* BD71815_REG_ALM0_MASK bits */
+#define A0_ONESEC BIT(7)
+
+/* BD71815_REG_INT_EN_00 bits */
+#define ALMALE BIT(0)
+
+/* BD71815_REG_INT_STAT_03 bits */
+#define DCIN_MON_DET BIT(1)
+#define DCIN_MON_RES BIT(0)
+#define POWERON_LONG BIT(2)
+#define POWERON_MID BIT(3)
+#define POWERON_SHORT BIT(4)
+#define POWERON_PRESS BIT(5)
+
+/* BD71805_REG_INT_STAT_08 bits */
+#define VBAT_MON_DET BIT(1)
+#define VBAT_MON_RES BIT(0)
+
+/* BD71805_REG_INT_STAT_11 bits */
+#define INT_STAT_11_VF_DET BIT(7)
+#define INT_STAT_11_VF_RES BIT(6)
+#define INT_STAT_11_VF125_DET BIT(5)
+#define INT_STAT_11_VF125_RES BIT(4)
+#define INT_STAT_11_OVTMP_DET BIT(3)
+#define INT_STAT_11_OVTMP_RES BIT(2)
+#define INT_STAT_11_LOTMP_DET BIT(1)
+#define INT_STAT_11_LOTMP_RES BIT(0)
+
+#define VBAT_MON_DET BIT(1)
+#define VBAT_MON_RES BIT(0)
+
+/* BD71815_REG_PWRCTRL bits */
+#define RESTARTEN BIT(0)
+
+/* BD71815_REG_GPO bits */
+#define READY_FORCE_LOW BIT(2)
+#define BD71815_GPIO_DRIVE_MASK BIT(4)
+#define BD71815_GPIO_OPEN_DRAIN 0
+#define BD71815_GPIO_CMOS BIT(4)
+
+/* BD71815 interrupt masks */
+enum {
+ BD71815_INT_EN_01_BUCKAST_MASK = 0x0F,
+ BD71815_INT_EN_02_DCINAST_MASK = 0x3E,
+ BD71815_INT_EN_03_DCINAST_MASK = 0x3F,
+ BD71815_INT_EN_04_VSYSAST_MASK = 0xCF,
+ BD71815_INT_EN_05_CHGAST_MASK = 0xFC,
+ BD71815_INT_EN_06_BATAST_MASK = 0xF3,
+ BD71815_INT_EN_07_BMONAST_MASK = 0xFE,
+ BD71815_INT_EN_08_BMONAST_MASK = 0x03,
+ BD71815_INT_EN_09_BMONAST_MASK = 0x07,
+ BD71815_INT_EN_10_BMONAST_MASK = 0x3F,
+ BD71815_INT_EN_11_TMPAST_MASK = 0xFF,
+ BD71815_INT_EN_12_ALMAST_MASK = 0x07,
+};
+/* BD71815 interrupt irqs */
+enum {
+ /* BUCK reg interrupts */
+ BD71815_INT_BUCK1_OCP,
+ BD71815_INT_BUCK2_OCP,
+ BD71815_INT_BUCK3_OCP,
+ BD71815_INT_BUCK4_OCP,
+ BD71815_INT_BUCK5_OCP,
+ BD71815_INT_LED_OVP,
+ BD71815_INT_LED_OCP,
+ BD71815_INT_LED_SCP,
+ /* DCIN1 interrupts */
+ BD71815_INT_DCIN_RMV,
+ BD71815_INT_CLPS_OUT,
+ BD71815_INT_CLPS_IN,
+ BD71815_INT_DCIN_OVP_RES,
+ BD71815_INT_DCIN_OVP_DET,
+ /* DCIN2 interrupts */
+ BD71815_INT_DCIN_MON_RES,
+ BD71815_INT_DCIN_MON_DET,
+ BD71815_INT_WDOG,
+ /* Vsys INT_STAT_04 */
+ BD71815_INT_VSYS_UV_RES,
+ BD71815_INT_VSYS_UV_DET,
+ BD71815_INT_VSYS_LOW_RES,
+ BD71815_INT_VSYS_LOW_DET,
+ BD71815_INT_VSYS_MON_RES,
+ BD71815_INT_VSYS_MON_DET,
+ /* Charger INT_STAT_05 */
+ BD71815_INT_CHG_WDG_TEMP,
+ BD71815_INT_CHG_WDG_TIME,
+ BD71815_INT_CHG_RECHARGE_RES,
+ BD71815_INT_CHG_RECHARGE_DET,
+ BD71815_INT_CHG_RANGED_TEMP_TRANSITION,
+ BD71815_INT_CHG_STATE_TRANSITION,
+ /* Battery INT_STAT_06 */
+ BD71815_INT_BAT_TEMP_NORMAL,
+ BD71815_INT_BAT_TEMP_ERANGE,
+ BD71815_INT_BAT_REMOVED,
+ BD71815_INT_BAT_DETECTED,
+ BD71815_INT_THERM_REMOVED,
+ BD71815_INT_THERM_DETECTED,
+ /* Battery Mon 1 INT_STAT_07 */
+ BD71815_INT_BAT_DEAD,
+ BD71815_INT_BAT_SHORTC_RES,
+ BD71815_INT_BAT_SHORTC_DET,
+ BD71815_INT_BAT_LOW_VOLT_RES,
+ BD71815_INT_BAT_LOW_VOLT_DET,
+ BD71815_INT_BAT_OVER_VOLT_RES,
+ BD71815_INT_BAT_OVER_VOLT_DET,
+ /* Battery Mon 2 INT_STAT_08 */
+ BD71815_INT_BAT_MON_RES,
+ BD71815_INT_BAT_MON_DET,
+ /* Battery Mon 3 (Coulomb counter) INT_STAT_09 */
+ BD71815_INT_BAT_CC_MON1,
+ BD71815_INT_BAT_CC_MON2,
+ BD71815_INT_BAT_CC_MON3,
+ /* Battery Mon 4 INT_STAT_10 */
+ BD71815_INT_BAT_OVER_CURR_1_RES,
+ BD71815_INT_BAT_OVER_CURR_1_DET,
+ BD71815_INT_BAT_OVER_CURR_2_RES,
+ BD71815_INT_BAT_OVER_CURR_2_DET,
+ BD71815_INT_BAT_OVER_CURR_3_RES,
+ BD71815_INT_BAT_OVER_CURR_3_DET,
+ /* Temperature INT_STAT_11 */
+ BD71815_INT_TEMP_BAT_LOW_RES,
+ BD71815_INT_TEMP_BAT_LOW_DET,
+ BD71815_INT_TEMP_BAT_HI_RES,
+ BD71815_INT_TEMP_BAT_HI_DET,
+ BD71815_INT_TEMP_CHIP_OVER_125_RES,
+ BD71815_INT_TEMP_CHIP_OVER_125_DET,
+ BD71815_INT_TEMP_CHIP_OVER_VF_RES,
+ BD71815_INT_TEMP_CHIP_OVER_VF_DET,
+ /* RTC Alarm INT_STAT_12 */
+ BD71815_INT_RTC0,
+ BD71815_INT_RTC1,
+ BD71815_INT_RTC2,
+};
+
+#define BD71815_INT_BUCK1_OCP_MASK BIT(0)
+#define BD71815_INT_BUCK2_OCP_MASK BIT(1)
+#define BD71815_INT_BUCK3_OCP_MASK BIT(2)
+#define BD71815_INT_BUCK4_OCP_MASK BIT(3)
+#define BD71815_INT_BUCK5_OCP_MASK BIT(4)
+#define BD71815_INT_LED_OVP_MASK BIT(5)
+#define BD71815_INT_LED_OCP_MASK BIT(6)
+#define BD71815_INT_LED_SCP_MASK BIT(7)
+
+#define BD71815_INT_DCIN_RMV_MASK BIT(1)
+#define BD71815_INT_CLPS_OUT_MASK BIT(2)
+#define BD71815_INT_CLPS_IN_MASK BIT(3)
+#define BD71815_INT_DCIN_OVP_RES_MASK BIT(4)
+#define BD71815_INT_DCIN_OVP_DET_MASK BIT(5)
+
+#define BD71815_INT_DCIN_MON_RES_MASK BIT(0)
+#define BD71815_INT_DCIN_MON_DET_MASK BIT(1)
+#define BD71815_INT_WDOG_MASK BIT(6)
+
+#define BD71815_INT_VSYS_UV_RES_MASK BIT(0)
+#define BD71815_INT_VSYS_UV_DET_MASK BIT(1)
+#define BD71815_INT_VSYS_LOW_RES_MASK BIT(2)
+#define BD71815_INT_VSYS_LOW_DET_MASK BIT(3)
+#define BD71815_INT_VSYS_MON_RES_MASK BIT(6)
+#define BD71815_INT_VSYS_MON_DET_MASK BIT(7)
+
+#define BD71815_INT_CHG_WDG_TEMP_MASK BIT(2)
+#define BD71815_INT_CHG_WDG_TIME_MASK BIT(3)
+#define BD71815_INT_CHG_RECHARGE_RES_MASK BIT(4)
+#define BD71815_INT_CHG_RECHARGE_DET_MASK BIT(5)
+#define BD71815_INT_CHG_RANGED_TEMP_TRANSITION_MASK BIT(6)
+#define BD71815_INT_CHG_STATE_TRANSITION_MASK BIT(7)
+
+#define BD71815_INT_BAT_TEMP_NORMAL_MASK BIT(0)
+#define BD71815_INT_BAT_TEMP_ERANGE_MASK BIT(1)
+#define BD71815_INT_BAT_REMOVED_MASK BIT(4)
+#define BD71815_INT_BAT_DETECTED_MASK BIT(5)
+#define BD71815_INT_THERM_REMOVED_MASK BIT(6)
+#define BD71815_INT_THERM_DETECTED_MASK BIT(7)
+
+#define BD71815_INT_BAT_DEAD_MASK BIT(1)
+#define BD71815_INT_BAT_SHORTC_RES_MASK BIT(2)
+#define BD71815_INT_BAT_SHORTC_DET_MASK BIT(3)
+#define BD71815_INT_BAT_LOW_VOLT_RES_MASK BIT(4)
+#define BD71815_INT_BAT_LOW_VOLT_DET_MASK BIT(5)
+#define BD71815_INT_BAT_OVER_VOLT_RES_MASK BIT(6)
+#define BD71815_INT_BAT_OVER_VOLT_DET_MASK BIT(7)
+
+#define BD71815_INT_BAT_MON_RES_MASK BIT(0)
+#define BD71815_INT_BAT_MON_DET_MASK BIT(1)
+
+#define BD71815_INT_BAT_CC_MON1_MASK BIT(0)
+#define BD71815_INT_BAT_CC_MON2_MASK BIT(1)
+#define BD71815_INT_BAT_CC_MON3_MASK BIT(2)
+
+#define BD71815_INT_BAT_OVER_CURR_1_RES_MASK BIT(0)
+#define BD71815_INT_BAT_OVER_CURR_1_DET_MASK BIT(1)
+#define BD71815_INT_BAT_OVER_CURR_2_RES_MASK BIT(2)
+#define BD71815_INT_BAT_OVER_CURR_2_DET_MASK BIT(3)
+#define BD71815_INT_BAT_OVER_CURR_3_RES_MASK BIT(4)
+#define BD71815_INT_BAT_OVER_CURR_3_DET_MASK BIT(5)
+
+#define BD71815_INT_TEMP_BAT_LOW_RES_MASK BIT(0)
+#define BD71815_INT_TEMP_BAT_LOW_DET_MASK BIT(1)
+#define BD71815_INT_TEMP_BAT_HI_RES_MASK BIT(2)
+#define BD71815_INT_TEMP_BAT_HI_DET_MASK BIT(3)
+#define BD71815_INT_TEMP_CHIP_OVER_125_RES_MASK BIT(4)
+#define BD71815_INT_TEMP_CHIP_OVER_125_DET_MASK BIT(5)
+#define BD71815_INT_TEMP_CHIP_OVER_VF_RES_MASK BIT(6)
+#define BD71815_INT_TEMP_CHIP_OVER_VF_DET_MASK BIT(7)
+
+#define BD71815_INT_RTC0_MASK BIT(0)
+#define BD71815_INT_RTC1_MASK BIT(1)
+#define BD71815_INT_RTC2_MASK BIT(2)
+
+/* BD71815_REG_CC_CTRL bits */
+#define CCNTRST 0x80
+#define CCNTENB 0x40
+#define CCCALIB 0x20
+
+/* BD71815_REG_CC_CURCD */
+#define CURDIR_Discharging 0x8000
+
+/* BD71815_REG_VM_SA_IBAT */
+#define IBAT_SA_DIR_Discharging 0x8000
+
+/* BD71815_REG_REX_CTRL_1 bits */
+#define REX_CLR BIT(4)
+
+/* BD71815_REG_REX_CTRL_1 bits */
+#define REX_PMU_STATE_MASK BIT(2)
+
+/* BD71815_REG_LED_CTRL bits */
+#define CHGDONE_LED_EN BIT(4)
+
+#endif /* __LINUX_MFD_BD71815_H */
diff --git a/include/linux/mfd/rohm-bd71828.h b/include/linux/mfd/rohm-bd71828.h
new file mode 100644
index 000000000000..3b5f3a7db4bd
--- /dev/null
+++ b/include/linux/mfd/rohm-bd71828.h
@@ -0,0 +1,426 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/* Copyright (C) 2019 ROHM Semiconductors */
+
+#ifndef __LINUX_MFD_BD71828_H__
+#define __LINUX_MFD_BD71828_H__
+
+#include <linux/mfd/rohm-generic.h>
+#include <linux/mfd/rohm-shared.h>
+
+/* Regulator IDs */
+enum {
+ BD71828_BUCK1,
+ BD71828_BUCK2,
+ BD71828_BUCK3,
+ BD71828_BUCK4,
+ BD71828_BUCK5,
+ BD71828_BUCK6,
+ BD71828_BUCK7,
+ BD71828_LDO1,
+ BD71828_LDO2,
+ BD71828_LDO3,
+ BD71828_LDO4,
+ BD71828_LDO5,
+ BD71828_LDO6,
+ BD71828_LDO_SNVS,
+ BD71828_REGULATOR_AMOUNT,
+};
+
+#define BD71828_BUCK1267_VOLTS 0x100
+#define BD71828_BUCK3_VOLTS 0x20
+#define BD71828_BUCK4_VOLTS 0x40
+#define BD71828_BUCK5_VOLTS 0x20
+#define BD71828_LDO_VOLTS 0x40
+/* LDO6 is fixed 1.8V voltage */
+#define BD71828_LDO_6_VOLTAGE 1800000
+
+/* Registers and masks*/
+
+/* MODE control */
+#define BD71828_REG_PS_CTRL_1 0x04
+#define BD71828_REG_PS_CTRL_2 0x05
+#define BD71828_REG_PS_CTRL_3 0x06
+
+//#define BD71828_REG_SWRESET 0x06
+#define BD71828_MASK_RUN_LVL_CTRL 0x30
+
+/* Regulator control masks */
+
+#define BD71828_MASK_RAMP_DELAY 0x6
+
+#define BD71828_MASK_RUN_EN 0x08
+#define BD71828_MASK_SUSP_EN 0x04
+#define BD71828_MASK_IDLE_EN 0x02
+#define BD71828_MASK_LPSR_EN 0x01
+
+#define BD71828_MASK_RUN0_EN 0x01
+#define BD71828_MASK_RUN1_EN 0x02
+#define BD71828_MASK_RUN2_EN 0x04
+#define BD71828_MASK_RUN3_EN 0x08
+
+#define BD71828_MASK_DVS_BUCK1_CTRL 0x10
+#define BD71828_DVS_BUCK1_CTRL_I2C 0
+#define BD71828_DVS_BUCK1_USE_RUNLVL 0x10
+
+#define BD71828_MASK_DVS_BUCK2_CTRL 0x20
+#define BD71828_DVS_BUCK2_CTRL_I2C 0
+#define BD71828_DVS_BUCK2_USE_RUNLVL 0x20
+
+#define BD71828_MASK_DVS_BUCK6_CTRL 0x40
+#define BD71828_DVS_BUCK6_CTRL_I2C 0
+#define BD71828_DVS_BUCK6_USE_RUNLVL 0x40
+
+#define BD71828_MASK_DVS_BUCK7_CTRL 0x80
+#define BD71828_DVS_BUCK7_CTRL_I2C 0
+#define BD71828_DVS_BUCK7_USE_RUNLVL 0x80
+
+#define BD71828_MASK_BUCK1267_VOLT 0xff
+#define BD71828_MASK_BUCK3_VOLT 0x1f
+#define BD71828_MASK_BUCK4_VOLT 0x3f
+#define BD71828_MASK_BUCK5_VOLT 0x1f
+#define BD71828_MASK_LDO_VOLT 0x3f
+
+/* Regulator control regs */
+#define BD71828_REG_BUCK1_EN 0x08
+#define BD71828_REG_BUCK1_CTRL 0x09
+#define BD71828_REG_BUCK1_MODE 0x0a
+#define BD71828_REG_BUCK1_IDLE_VOLT 0x0b
+#define BD71828_REG_BUCK1_SUSP_VOLT 0x0c
+#define BD71828_REG_BUCK1_VOLT 0x0d
+
+#define BD71828_REG_BUCK2_EN 0x12
+#define BD71828_REG_BUCK2_CTRL 0x13
+#define BD71828_REG_BUCK2_MODE 0x14
+#define BD71828_REG_BUCK2_IDLE_VOLT 0x15
+#define BD71828_REG_BUCK2_SUSP_VOLT 0x16
+#define BD71828_REG_BUCK2_VOLT 0x17
+
+#define BD71828_REG_BUCK3_EN 0x1c
+#define BD71828_REG_BUCK3_MODE 0x1d
+#define BD71828_REG_BUCK3_VOLT 0x1e
+
+#define BD71828_REG_BUCK4_EN 0x1f
+#define BD71828_REG_BUCK4_MODE 0x20
+#define BD71828_REG_BUCK4_VOLT 0x21
+
+#define BD71828_REG_BUCK5_EN 0x22
+#define BD71828_REG_BUCK5_MODE 0x23
+#define BD71828_REG_BUCK5_VOLT 0x24
+
+#define BD71828_REG_BUCK6_EN 0x25
+#define BD71828_REG_BUCK6_CTRL 0x26
+#define BD71828_REG_BUCK6_MODE 0x27
+#define BD71828_REG_BUCK6_IDLE_VOLT 0x28
+#define BD71828_REG_BUCK6_SUSP_VOLT 0x29
+#define BD71828_REG_BUCK6_VOLT 0x2a
+
+#define BD71828_REG_BUCK7_EN 0x2f
+#define BD71828_REG_BUCK7_CTRL 0x30
+#define BD71828_REG_BUCK7_MODE 0x31
+#define BD71828_REG_BUCK7_IDLE_VOLT 0x32
+#define BD71828_REG_BUCK7_SUSP_VOLT 0x33
+#define BD71828_REG_BUCK7_VOLT 0x34
+
+#define BD71828_REG_LDO1_EN 0x39
+#define BD71828_REG_LDO1_VOLT 0x3a
+#define BD71828_REG_LDO2_EN 0x3b
+#define BD71828_REG_LDO2_VOLT 0x3c
+#define BD71828_REG_LDO3_EN 0x3d
+#define BD71828_REG_LDO3_VOLT 0x3e
+#define BD71828_REG_LDO4_EN 0x3f
+#define BD71828_REG_LDO4_VOLT 0x40
+#define BD71828_REG_LDO5_EN 0x41
+#define BD71828_REG_LDO5_VOLT 0x43
+#define BD71828_REG_LDO5_VOLT_OPT 0x42
+#define BD71828_REG_LDO6_EN 0x44
+//#define BD71828_REG_LDO6_VOLT 0x4
+#define BD71828_REG_LDO7_EN 0x45
+#define BD71828_REG_LDO7_VOLT 0x46
+
+/* GPIO */
+
+#define BD71828_GPIO_DRIVE_MASK 0x2
+#define BD71828_GPIO_OPEN_DRAIN 0x0
+#define BD71828_GPIO_PUSH_PULL 0x2
+#define BD71828_GPIO_OUT_HI 0x1
+#define BD71828_GPIO_OUT_LO 0x0
+#define BD71828_GPIO_OUT_MASK 0x1
+
+#define BD71828_REG_GPIO_CTRL1 0x47
+#define BD71828_REG_GPIO_CTRL2 0x48
+#define BD71828_REG_GPIO_CTRL3 0x49
+#define BD71828_REG_IO_STAT 0xed
+
+/* clk */
+#define BD71828_REG_OUT32K 0x4b
+
+/* RTC */
+#define BD71828_REG_RTC_SEC 0x4c
+#define BD71828_REG_RTC_MINUTE 0x4d
+#define BD71828_REG_RTC_HOUR 0x4e
+#define BD71828_REG_RTC_WEEK 0x4f
+#define BD71828_REG_RTC_DAY 0x50
+#define BD71828_REG_RTC_MONTH 0x51
+#define BD71828_REG_RTC_YEAR 0x52
+
+#define BD71828_REG_RTC_ALM0_SEC 0x53
+#define BD71828_REG_RTC_ALM_START BD71828_REG_RTC_ALM0_SEC
+#define BD71828_REG_RTC_ALM0_MINUTE 0x54
+#define BD71828_REG_RTC_ALM0_HOUR 0x55
+#define BD71828_REG_RTC_ALM0_WEEK 0x56
+#define BD71828_REG_RTC_ALM0_DAY 0x57
+#define BD71828_REG_RTC_ALM0_MONTH 0x58
+#define BD71828_REG_RTC_ALM0_YEAR 0x59
+#define BD71828_REG_RTC_ALM0_MASK 0x61
+
+#define BD71828_REG_RTC_ALM1_SEC 0x5a
+#define BD71828_REG_RTC_ALM1_MINUTE 0x5b
+#define BD71828_REG_RTC_ALM1_HOUR 0x5c
+#define BD71828_REG_RTC_ALM1_WEEK 0x5d
+#define BD71828_REG_RTC_ALM1_DAY 0x5e
+#define BD71828_REG_RTC_ALM1_MONTH 0x5f
+#define BD71828_REG_RTC_ALM1_YEAR 0x60
+#define BD71828_REG_RTC_ALM1_MASK 0x62
+
+#define BD71828_REG_RTC_ALM2 0x63
+#define BD71828_REG_RTC_START BD71828_REG_RTC_SEC
+
+/* Charger/Battey */
+#define BD71828_REG_CHG_STATE 0x65
+#define BD71828_REG_CHG_FULL 0xd2
+
+/* LEDs */
+#define BD71828_REG_LED_CTRL 0x4A
+#define BD71828_MASK_LED_AMBER 0x80
+#define BD71828_MASK_LED_GREEN 0x40
+#define BD71828_LED_ON 0xff
+#define BD71828_LED_OFF 0x0
+
+/* IRQ registers */
+#define BD71828_REG_INT_MASK_BUCK 0xd3
+#define BD71828_REG_INT_MASK_DCIN1 0xd4
+#define BD71828_REG_INT_MASK_DCIN2 0xd5
+#define BD71828_REG_INT_MASK_VSYS 0xd6
+#define BD71828_REG_INT_MASK_CHG 0xd7
+#define BD71828_REG_INT_MASK_BAT 0xd8
+#define BD71828_REG_INT_MASK_BAT_MON1 0xd9
+#define BD71828_REG_INT_MASK_BAT_MON2 0xda
+#define BD71828_REG_INT_MASK_BAT_MON3 0xdb
+#define BD71828_REG_INT_MASK_BAT_MON4 0xdc
+#define BD71828_REG_INT_MASK_TEMP 0xdd
+#define BD71828_REG_INT_MASK_RTC 0xde
+
+#define BD71828_REG_INT_MAIN 0xdf
+#define BD71828_REG_INT_BUCK 0xe0
+#define BD71828_REG_INT_DCIN1 0xe1
+#define BD71828_REG_INT_DCIN2 0xe2
+#define BD71828_REG_INT_VSYS 0xe3
+#define BD71828_REG_INT_CHG 0xe4
+#define BD71828_REG_INT_BAT 0xe5
+#define BD71828_REG_INT_BAT_MON1 0xe6
+#define BD71828_REG_INT_BAT_MON2 0xe7
+#define BD71828_REG_INT_BAT_MON3 0xe8
+#define BD71828_REG_INT_BAT_MON4 0xe9
+#define BD71828_REG_INT_TEMP 0xea
+#define BD71828_REG_INT_RTC 0xeb
+#define BD71828_REG_INT_UPDATE 0xec
+
+#define BD71828_MAX_REGISTER BD71828_REG_IO_STAT
+
+/* Masks for main IRQ register bits */
+enum {
+ BD71828_INT_BUCK,
+#define BD71828_INT_BUCK_MASK BIT(BD71828_INT_BUCK)
+ BD71828_INT_DCIN,
+#define BD71828_INT_DCIN_MASK BIT(BD71828_INT_DCIN)
+ BD71828_INT_VSYS,
+#define BD71828_INT_VSYS_MASK BIT(BD71828_INT_VSYS)
+ BD71828_INT_CHG,
+#define BD71828_INT_CHG_MASK BIT(BD71828_INT_CHG)
+ BD71828_INT_BAT,
+#define BD71828_INT_BAT_MASK BIT(BD71828_INT_BAT)
+ BD71828_INT_BAT_MON,
+#define BD71828_INT_BAT_MON_MASK BIT(BD71828_INT_BAT_MON)
+ BD71828_INT_TEMP,
+#define BD71828_INT_TEMP_MASK BIT(BD71828_INT_TEMP)
+ BD71828_INT_RTC,
+#define BD71828_INT_RTC_MASK BIT(BD71828_INT_RTC)
+};
+
+/* Interrupts */
+enum {
+ /* BUCK reg interrupts */
+ BD71828_INT_BUCK1_OCP,
+ BD71828_INT_BUCK2_OCP,
+ BD71828_INT_BUCK3_OCP,
+ BD71828_INT_BUCK4_OCP,
+ BD71828_INT_BUCK5_OCP,
+ BD71828_INT_BUCK6_OCP,
+ BD71828_INT_BUCK7_OCP,
+ BD71828_INT_PGFAULT,
+ /* DCIN1 interrupts */
+ BD71828_INT_DCIN_DET,
+ BD71828_INT_DCIN_RMV,
+ BD71828_INT_CLPS_OUT,
+ BD71828_INT_CLPS_IN,
+ /* DCIN2 interrupts */
+ BD71828_INT_DCIN_MON_RES,
+ BD71828_INT_DCIN_MON_DET,
+ BD71828_INT_LONGPUSH,
+ BD71828_INT_MIDPUSH,
+ BD71828_INT_SHORTPUSH,
+ BD71828_INT_PUSH,
+ BD71828_INT_WDOG,
+ BD71828_INT_SWRESET,
+ /* Vsys */
+ BD71828_INT_VSYS_UV_RES,
+ BD71828_INT_VSYS_UV_DET,
+ BD71828_INT_VSYS_LOW_RES,
+ BD71828_INT_VSYS_LOW_DET,
+ BD71828_INT_VSYS_HALL_IN,
+ BD71828_INT_VSYS_HALL_TOGGLE,
+ BD71828_INT_VSYS_MON_RES,
+ BD71828_INT_VSYS_MON_DET,
+ /* Charger */
+ BD71828_INT_CHG_DCIN_ILIM,
+ BD71828_INT_CHG_TOPOFF_TO_DONE,
+ BD71828_INT_CHG_WDG_TEMP,
+ BD71828_INT_CHG_WDG_TIME,
+ BD71828_INT_CHG_RECHARGE_RES,
+ BD71828_INT_CHG_RECHARGE_DET,
+ BD71828_INT_CHG_RANGED_TEMP_TRANSITION,
+ BD71828_INT_CHG_STATE_TRANSITION,
+ /* Battery */
+ BD71828_INT_BAT_TEMP_NORMAL,
+ BD71828_INT_BAT_TEMP_ERANGE,
+ BD71828_INT_BAT_TEMP_WARN,
+ BD71828_INT_BAT_REMOVED,
+ BD71828_INT_BAT_DETECTED,
+ BD71828_INT_THERM_REMOVED,
+ BD71828_INT_THERM_DETECTED,
+ /* Battery Mon 1 */
+ BD71828_INT_BAT_DEAD,
+ BD71828_INT_BAT_SHORTC_RES,
+ BD71828_INT_BAT_SHORTC_DET,
+ BD71828_INT_BAT_LOW_VOLT_RES,
+ BD71828_INT_BAT_LOW_VOLT_DET,
+ BD71828_INT_BAT_OVER_VOLT_RES,
+ BD71828_INT_BAT_OVER_VOLT_DET,
+ /* Battery Mon 2 */
+ BD71828_INT_BAT_MON_RES,
+ BD71828_INT_BAT_MON_DET,
+ /* Battery Mon 3 (Coulomb counter) */
+ BD71828_INT_BAT_CC_MON1,
+ BD71828_INT_BAT_CC_MON2,
+ BD71828_INT_BAT_CC_MON3,
+ /* Battery Mon 4 */
+ BD71828_INT_BAT_OVER_CURR_1_RES,
+ BD71828_INT_BAT_OVER_CURR_1_DET,
+ BD71828_INT_BAT_OVER_CURR_2_RES,
+ BD71828_INT_BAT_OVER_CURR_2_DET,
+ BD71828_INT_BAT_OVER_CURR_3_RES,
+ BD71828_INT_BAT_OVER_CURR_3_DET,
+ /* Temperature */
+ BD71828_INT_TEMP_BAT_LOW_RES,
+ BD71828_INT_TEMP_BAT_LOW_DET,
+ BD71828_INT_TEMP_BAT_HI_RES,
+ BD71828_INT_TEMP_BAT_HI_DET,
+ BD71828_INT_TEMP_CHIP_OVER_125_RES,
+ BD71828_INT_TEMP_CHIP_OVER_125_DET,
+ BD71828_INT_TEMP_CHIP_OVER_VF_DET,
+ BD71828_INT_TEMP_CHIP_OVER_VF_RES,
+ /* RTC Alarm */
+ BD71828_INT_RTC0,
+ BD71828_INT_RTC1,
+ BD71828_INT_RTC2,
+};
+
+#define BD71828_INT_BUCK1_OCP_MASK 0x1
+#define BD71828_INT_BUCK2_OCP_MASK 0x2
+#define BD71828_INT_BUCK3_OCP_MASK 0x4
+#define BD71828_INT_BUCK4_OCP_MASK 0x8
+#define BD71828_INT_BUCK5_OCP_MASK 0x10
+#define BD71828_INT_BUCK6_OCP_MASK 0x20
+#define BD71828_INT_BUCK7_OCP_MASK 0x40
+#define BD71828_INT_PGFAULT_MASK 0x80
+
+#define BD71828_INT_DCIN_DET_MASK 0x1
+#define BD71828_INT_DCIN_RMV_MASK 0x2
+#define BD71828_INT_CLPS_OUT_MASK 0x4
+#define BD71828_INT_CLPS_IN_MASK 0x8
+ /* DCIN2 interrupts */
+#define BD71828_INT_DCIN_MON_RES_MASK 0x1
+#define BD71828_INT_DCIN_MON_DET_MASK 0x2
+#define BD71828_INT_LONGPUSH_MASK 0x4
+#define BD71828_INT_MIDPUSH_MASK 0x8
+#define BD71828_INT_SHORTPUSH_MASK 0x10
+#define BD71828_INT_PUSH_MASK 0x20
+#define BD71828_INT_WDOG_MASK 0x40
+#define BD71828_INT_SWRESET_MASK 0x80
+ /* Vsys */
+#define BD71828_INT_VSYS_UV_RES_MASK 0x1
+#define BD71828_INT_VSYS_UV_DET_MASK 0x2
+#define BD71828_INT_VSYS_LOW_RES_MASK 0x4
+#define BD71828_INT_VSYS_LOW_DET_MASK 0x8
+#define BD71828_INT_VSYS_HALL_IN_MASK 0x10
+#define BD71828_INT_VSYS_HALL_TOGGLE_MASK 0x20
+#define BD71828_INT_VSYS_MON_RES_MASK 0x40
+#define BD71828_INT_VSYS_MON_DET_MASK 0x80
+ /* Charger */
+#define BD71828_INT_CHG_DCIN_ILIM_MASK 0x1
+#define BD71828_INT_CHG_TOPOFF_TO_DONE_MASK 0x2
+#define BD71828_INT_CHG_WDG_TEMP_MASK 0x4
+#define BD71828_INT_CHG_WDG_TIME_MASK 0x8
+#define BD71828_INT_CHG_RECHARGE_RES_MASK 0x10
+#define BD71828_INT_CHG_RECHARGE_DET_MASK 0x20
+#define BD71828_INT_CHG_RANGED_TEMP_TRANSITION_MASK 0x40
+#define BD71828_INT_CHG_STATE_TRANSITION_MASK 0x80
+ /* Battery */
+#define BD71828_INT_BAT_TEMP_NORMAL_MASK 0x1
+#define BD71828_INT_BAT_TEMP_ERANGE_MASK 0x2
+#define BD71828_INT_BAT_TEMP_WARN_MASK 0x4
+#define BD71828_INT_BAT_REMOVED_MASK 0x10
+#define BD71828_INT_BAT_DETECTED_MASK 0x20
+#define BD71828_INT_THERM_REMOVED_MASK 0x40
+#define BD71828_INT_THERM_DETECTED_MASK 0x80
+ /* Battery Mon 1 */
+#define BD71828_INT_BAT_DEAD_MASK 0x2
+#define BD71828_INT_BAT_SHORTC_RES_MASK 0x4
+#define BD71828_INT_BAT_SHORTC_DET_MASK 0x8
+#define BD71828_INT_BAT_LOW_VOLT_RES_MASK 0x10
+#define BD71828_INT_BAT_LOW_VOLT_DET_MASK 0x20
+#define BD71828_INT_BAT_OVER_VOLT_RES_MASK 0x40
+#define BD71828_INT_BAT_OVER_VOLT_DET_MASK 0x80
+ /* Battery Mon 2 */
+#define BD71828_INT_BAT_MON_RES_MASK 0x1
+#define BD71828_INT_BAT_MON_DET_MASK 0x2
+ /* Battery Mon 3 (Coulomb counter) */
+#define BD71828_INT_BAT_CC_MON1_MASK 0x1
+#define BD71828_INT_BAT_CC_MON2_MASK 0x2
+#define BD71828_INT_BAT_CC_MON3_MASK 0x4
+ /* Battery Mon 4 */
+#define BD71828_INT_BAT_OVER_CURR_1_RES_MASK 0x1
+#define BD71828_INT_BAT_OVER_CURR_1_DET_MASK 0x2
+#define BD71828_INT_BAT_OVER_CURR_2_RES_MASK 0x4
+#define BD71828_INT_BAT_OVER_CURR_2_DET_MASK 0x8
+#define BD71828_INT_BAT_OVER_CURR_3_RES_MASK 0x10
+#define BD71828_INT_BAT_OVER_CURR_3_DET_MASK 0x20
+ /* Temperature */
+#define BD71828_INT_TEMP_BAT_LOW_RES_MASK 0x1
+#define BD71828_INT_TEMP_BAT_LOW_DET_MASK 0x2
+#define BD71828_INT_TEMP_BAT_HI_RES_MASK 0x4
+#define BD71828_INT_TEMP_BAT_HI_DET_MASK 0x8
+#define BD71828_INT_TEMP_CHIP_OVER_125_RES_MASK 0x10
+#define BD71828_INT_TEMP_CHIP_OVER_125_DET_MASK 0x20
+#define BD71828_INT_TEMP_CHIP_OVER_VF_RES_MASK 0x40
+#define BD71828_INT_TEMP_CHIP_OVER_VF_DET_MASK 0x80
+ /* RTC Alarm */
+#define BD71828_INT_RTC0_MASK 0x1
+#define BD71828_INT_RTC1_MASK 0x2
+#define BD71828_INT_RTC2_MASK 0x4
+
+#define BD71828_OUT_TYPE_MASK 0x2
+#define BD71828_OUT_TYPE_OPEN_DRAIN 0x0
+#define BD71828_OUT_TYPE_CMOS 0x2
+
+#endif /* __LINUX_MFD_BD71828_H__ */
diff --git a/include/linux/mfd/rohm-bd718x7.h b/include/linux/mfd/rohm-bd718x7.h
new file mode 100644
index 000000000000..df2918198d37
--- /dev/null
+++ b/include/linux/mfd/rohm-bd718x7.h
@@ -0,0 +1,313 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/* Copyright (C) 2018 ROHM Semiconductors */
+
+#ifndef __LINUX_MFD_BD718XX_H__
+#define __LINUX_MFD_BD718XX_H__
+
+#include <linux/mfd/rohm-generic.h>
+#include <linux/regmap.h>
+
+enum {
+ BD718XX_BUCK1 = 0,
+ BD718XX_BUCK2,
+ BD718XX_BUCK3,
+ BD718XX_BUCK4,
+ BD718XX_BUCK5,
+ BD718XX_BUCK6,
+ BD718XX_BUCK7,
+ BD718XX_BUCK8,
+ BD718XX_LDO1,
+ BD718XX_LDO2,
+ BD718XX_LDO3,
+ BD718XX_LDO4,
+ BD718XX_LDO5,
+ BD718XX_LDO6,
+ BD718XX_LDO7,
+ BD718XX_REGULATOR_AMOUNT,
+};
+
+/* Common voltage configurations */
+#define BD718XX_DVS_BUCK_VOLTAGE_NUM 0x3D
+#define BD718XX_4TH_NODVS_BUCK_VOLTAGE_NUM 0x3D
+
+#define BD718XX_LDO1_VOLTAGE_NUM 0x08
+#define BD718XX_LDO2_VOLTAGE_NUM 0x02
+#define BD718XX_LDO3_VOLTAGE_NUM 0x10
+#define BD718XX_LDO4_VOLTAGE_NUM 0x0A
+#define BD718XX_LDO6_VOLTAGE_NUM 0x0A
+
+/* BD71837 specific voltage configurations */
+#define BD71837_BUCK5_VOLTAGE_NUM 0x10
+#define BD71837_BUCK6_VOLTAGE_NUM 0x04
+#define BD71837_BUCK7_VOLTAGE_NUM 0x08
+#define BD71837_LDO5_VOLTAGE_NUM 0x10
+#define BD71837_LDO7_VOLTAGE_NUM 0x10
+
+/* BD71847 specific voltage configurations */
+#define BD71847_BUCK3_VOLTAGE_NUM 0x18
+#define BD71847_BUCK4_VOLTAGE_NUM 0x08
+#define BD71847_LDO5_VOLTAGE_NUM 0x20
+
+/* Registers specific to BD71837 */
+enum {
+ BD71837_REG_BUCK3_CTRL = 0x07,
+ BD71837_REG_BUCK4_CTRL = 0x08,
+ BD71837_REG_BUCK3_VOLT_RUN = 0x12,
+ BD71837_REG_BUCK4_VOLT_RUN = 0x13,
+ BD71837_REG_LDO7_VOLT = 0x1E,
+};
+
+/* Registers common for BD71837 and BD71847 */
+enum {
+ BD718XX_REG_REV = 0x00,
+ BD718XX_REG_SWRESET = 0x01,
+ BD718XX_REG_I2C_DEV = 0x02,
+ BD718XX_REG_PWRCTRL0 = 0x03,
+ BD718XX_REG_PWRCTRL1 = 0x04,
+ BD718XX_REG_BUCK1_CTRL = 0x05,
+ BD718XX_REG_BUCK2_CTRL = 0x06,
+ BD718XX_REG_1ST_NODVS_BUCK_CTRL = 0x09,
+ BD718XX_REG_2ND_NODVS_BUCK_CTRL = 0x0A,
+ BD718XX_REG_3RD_NODVS_BUCK_CTRL = 0x0B,
+ BD718XX_REG_4TH_NODVS_BUCK_CTRL = 0x0C,
+ BD718XX_REG_BUCK1_VOLT_RUN = 0x0D,
+ BD718XX_REG_BUCK1_VOLT_IDLE = 0x0E,
+ BD718XX_REG_BUCK1_VOLT_SUSP = 0x0F,
+ BD718XX_REG_BUCK2_VOLT_RUN = 0x10,
+ BD718XX_REG_BUCK2_VOLT_IDLE = 0x11,
+ BD718XX_REG_1ST_NODVS_BUCK_VOLT = 0x14,
+ BD718XX_REG_2ND_NODVS_BUCK_VOLT = 0x15,
+ BD718XX_REG_3RD_NODVS_BUCK_VOLT = 0x16,
+ BD718XX_REG_4TH_NODVS_BUCK_VOLT = 0x17,
+ BD718XX_REG_LDO1_VOLT = 0x18,
+ BD718XX_REG_LDO2_VOLT = 0x19,
+ BD718XX_REG_LDO3_VOLT = 0x1A,
+ BD718XX_REG_LDO4_VOLT = 0x1B,
+ BD718XX_REG_LDO5_VOLT = 0x1C,
+ BD718XX_REG_LDO6_VOLT = 0x1D,
+ BD718XX_REG_TRANS_COND0 = 0x1F,
+ BD718XX_REG_TRANS_COND1 = 0x20,
+ BD718XX_REG_VRFAULTEN = 0x21,
+ BD718XX_REG_MVRFLTMASK0 = 0x22,
+ BD718XX_REG_MVRFLTMASK1 = 0x23,
+ BD718XX_REG_MVRFLTMASK2 = 0x24,
+ BD718XX_REG_RCVCFG = 0x25,
+ BD718XX_REG_RCVNUM = 0x26,
+ BD718XX_REG_PWRONCONFIG0 = 0x27,
+ BD718XX_REG_PWRONCONFIG1 = 0x28,
+ BD718XX_REG_RESETSRC = 0x29,
+ BD718XX_REG_MIRQ = 0x2A,
+ BD718XX_REG_IRQ = 0x2B,
+ BD718XX_REG_IN_MON = 0x2C,
+ BD718XX_REG_POW_STATE = 0x2D,
+ BD718XX_REG_OUT32K = 0x2E,
+ BD718XX_REG_REGLOCK = 0x2F,
+ BD718XX_REG_OTPVER = 0xFF,
+ BD718XX_MAX_REGISTER = 0x100,
+};
+
+#define REGLOCK_PWRSEQ 0x1
+#define REGLOCK_VREG 0x10
+
+/* Generic BUCK control masks */
+#define BD718XX_BUCK_SEL 0x02
+#define BD718XX_BUCK_EN 0x01
+#define BD718XX_BUCK_RUN_ON 0x04
+
+/* Generic LDO masks */
+#define BD718XX_LDO_SEL 0x80
+#define BD718XX_LDO_EN 0x40
+
+/* BD71837 BUCK ramp rate CTRL reg bits */
+#define BUCK_RAMPRATE_MASK 0xC0
+#define BUCK_RAMPRATE_10P00MV 0x0
+#define BUCK_RAMPRATE_5P00MV 0x1
+#define BUCK_RAMPRATE_2P50MV 0x2
+#define BUCK_RAMPRATE_1P25MV 0x3
+
+#define DVS_BUCK_RUN_MASK 0x3F
+#define DVS_BUCK_SUSP_MASK 0x3F
+#define DVS_BUCK_IDLE_MASK 0x3F
+
+#define BD718XX_1ST_NODVS_BUCK_MASK 0x07
+#define BD718XX_3RD_NODVS_BUCK_MASK 0x07
+#define BD718XX_4TH_NODVS_BUCK_MASK 0x3F
+
+#define BD71847_BUCK3_MASK 0x07
+#define BD71847_BUCK3_RANGE_MASK 0xC0
+#define BD71847_BUCK4_MASK 0x03
+#define BD71847_BUCK4_RANGE_MASK 0x40
+
+#define BD71837_BUCK5_MASK 0x07
+#define BD71837_BUCK5_RANGE_MASK 0x80
+#define BD71837_BUCK6_MASK 0x03
+
+#define BD718XX_LDO1_MASK 0x03
+#define BD718XX_LDO1_RANGE_MASK 0x20
+#define BD718XX_LDO2_MASK 0x20
+#define BD718XX_LDO3_MASK 0x0F
+#define BD718XX_LDO4_MASK 0x0F
+#define BD718XX_LDO6_MASK 0x0F
+
+#define BD71837_LDO5_MASK 0x0F
+#define BD71847_LDO5_MASK 0x0F
+#define BD71847_LDO5_RANGE_MASK 0x20
+
+#define BD71837_LDO7_MASK 0x0F
+
+/* BD718XX Voltage monitoring masks */
+#define BD718XX_BUCK1_VRMON80 0x1
+#define BD718XX_BUCK1_VRMON130 0x2
+#define BD718XX_BUCK2_VRMON80 0x4
+#define BD718XX_BUCK2_VRMON130 0x8
+#define BD718XX_1ST_NODVS_BUCK_VRMON80 0x1
+#define BD718XX_1ST_NODVS_BUCK_VRMON130 0x2
+#define BD718XX_2ND_NODVS_BUCK_VRMON80 0x4
+#define BD718XX_2ND_NODVS_BUCK_VRMON130 0x8
+#define BD718XX_3RD_NODVS_BUCK_VRMON80 0x10
+#define BD718XX_3RD_NODVS_BUCK_VRMON130 0x20
+#define BD718XX_4TH_NODVS_BUCK_VRMON80 0x40
+#define BD718XX_4TH_NODVS_BUCK_VRMON130 0x80
+#define BD718XX_LDO1_VRMON80 0x1
+#define BD718XX_LDO2_VRMON80 0x2
+#define BD718XX_LDO3_VRMON80 0x4
+#define BD718XX_LDO4_VRMON80 0x8
+#define BD718XX_LDO5_VRMON80 0x10
+#define BD718XX_LDO6_VRMON80 0x20
+
+/* BD71837 specific voltage monitoring masks */
+#define BD71837_BUCK3_VRMON80 0x10
+#define BD71837_BUCK3_VRMON130 0x20
+#define BD71837_BUCK4_VRMON80 0x40
+#define BD71837_BUCK4_VRMON130 0x80
+#define BD71837_LDO7_VRMON80 0x40
+
+/* BD718XX_REG_IRQ bits */
+#define IRQ_SWRST 0x40
+#define IRQ_PWRON_S 0x20
+#define IRQ_PWRON_L 0x10
+#define IRQ_PWRON 0x08
+#define IRQ_WDOG 0x04
+#define IRQ_ON_REQ 0x02
+#define IRQ_STBY_REQ 0x01
+
+/* ROHM BD718XX irqs */
+enum {
+ BD718XX_INT_STBY_REQ,
+ BD718XX_INT_ON_REQ,
+ BD718XX_INT_WDOG,
+ BD718XX_INT_PWRBTN,
+ BD718XX_INT_PWRBTN_L,
+ BD718XX_INT_PWRBTN_S,
+ BD718XX_INT_SWRST
+};
+
+/* ROHM BD718XX interrupt masks */
+#define BD718XX_INT_SWRST_MASK 0x40
+#define BD718XX_INT_PWRBTN_S_MASK 0x20
+#define BD718XX_INT_PWRBTN_L_MASK 0x10
+#define BD718XX_INT_PWRBTN_MASK 0x8
+#define BD718XX_INT_WDOG_MASK 0x4
+#define BD718XX_INT_ON_REQ_MASK 0x2
+#define BD718XX_INT_STBY_REQ_MASK 0x1
+
+/* Register write induced reset settings */
+
+/*
+ * Even though the bit zero is not SWRESET type we still want to write zero
+ * to it when changing type. Bit zero is 'SWRESET' trigger bit and if we
+ * write 1 to it we will trigger the action. So always write 0 to it when
+ * changning SWRESET action - no matter what we read from it.
+ */
+#define BD718XX_SWRESET_TYPE_MASK 7
+#define BD718XX_SWRESET_TYPE_DISABLED 0
+#define BD718XX_SWRESET_TYPE_COLD 4
+#define BD718XX_SWRESET_TYPE_WARM 6
+
+#define BD718XX_SWRESET_RESET_MASK 1
+#define BD718XX_SWRESET_RESET 1
+
+/* Poweroff state transition conditions */
+
+#define BD718XX_ON_REQ_POWEROFF_MASK 1
+#define BD718XX_SWRESET_POWEROFF_MASK 2
+#define BD718XX_WDOG_POWEROFF_MASK 4
+#define BD718XX_KEY_L_POWEROFF_MASK 8
+
+#define BD718XX_POWOFF_TO_SNVS 0
+#define BD718XX_POWOFF_TO_RDY 0xF
+
+#define BD718XX_POWOFF_TIME_MASK 0xF0
+enum {
+ BD718XX_POWOFF_TIME_5MS = 0,
+ BD718XX_POWOFF_TIME_10MS,
+ BD718XX_POWOFF_TIME_15MS,
+ BD718XX_POWOFF_TIME_20MS,
+ BD718XX_POWOFF_TIME_25MS,
+ BD718XX_POWOFF_TIME_30MS,
+ BD718XX_POWOFF_TIME_35MS,
+ BD718XX_POWOFF_TIME_40MS,
+ BD718XX_POWOFF_TIME_45MS,
+ BD718XX_POWOFF_TIME_50MS,
+ BD718XX_POWOFF_TIME_75MS,
+ BD718XX_POWOFF_TIME_100MS,
+ BD718XX_POWOFF_TIME_250MS,
+ BD718XX_POWOFF_TIME_500MS,
+ BD718XX_POWOFF_TIME_750MS,
+ BD718XX_POWOFF_TIME_1500MS
+};
+
+/* Poweron sequence state transition conditions */
+#define BD718XX_RDY_TO_SNVS_MASK 0xF
+#define BD718XX_SNVS_TO_RUN_MASK 0xF0
+
+#define BD718XX_PWR_TRIG_KEY_L 1
+#define BD718XX_PWR_TRIG_KEY_S 2
+#define BD718XX_PWR_TRIG_PMIC_ON 4
+#define BD718XX_PWR_TRIG_VSYS_UVLO 8
+#define BD718XX_RDY_TO_SNVS_SIFT 0
+#define BD718XX_SNVS_TO_RUN_SIFT 4
+
+#define BD718XX_PWRBTN_PRESS_DURATION_MASK 0xF
+
+/* Timeout value for detecting short press */
+enum {
+ BD718XX_PWRBTN_SHORT_PRESS_10MS = 0,
+ BD718XX_PWRBTN_SHORT_PRESS_500MS,
+ BD718XX_PWRBTN_SHORT_PRESS_1000MS,
+ BD718XX_PWRBTN_SHORT_PRESS_1500MS,
+ BD718XX_PWRBTN_SHORT_PRESS_2000MS,
+ BD718XX_PWRBTN_SHORT_PRESS_2500MS,
+ BD718XX_PWRBTN_SHORT_PRESS_3000MS,
+ BD718XX_PWRBTN_SHORT_PRESS_3500MS,
+ BD718XX_PWRBTN_SHORT_PRESS_4000MS,
+ BD718XX_PWRBTN_SHORT_PRESS_4500MS,
+ BD718XX_PWRBTN_SHORT_PRESS_5000MS,
+ BD718XX_PWRBTN_SHORT_PRESS_5500MS,
+ BD718XX_PWRBTN_SHORT_PRESS_6000MS,
+ BD718XX_PWRBTN_SHORT_PRESS_6500MS,
+ BD718XX_PWRBTN_SHORT_PRESS_7000MS,
+ BD718XX_PWRBTN_SHORT_PRESS_7500MS
+};
+
+/* Timeout value for detecting LONG press */
+enum {
+ BD718XX_PWRBTN_LONG_PRESS_10MS = 0,
+ BD718XX_PWRBTN_LONG_PRESS_1S,
+ BD718XX_PWRBTN_LONG_PRESS_2S,
+ BD718XX_PWRBTN_LONG_PRESS_3S,
+ BD718XX_PWRBTN_LONG_PRESS_4S,
+ BD718XX_PWRBTN_LONG_PRESS_5S,
+ BD718XX_PWRBTN_LONG_PRESS_6S,
+ BD718XX_PWRBTN_LONG_PRESS_7S,
+ BD718XX_PWRBTN_LONG_PRESS_8S,
+ BD718XX_PWRBTN_LONG_PRESS_9S,
+ BD718XX_PWRBTN_LONG_PRESS_10S,
+ BD718XX_PWRBTN_LONG_PRESS_11S,
+ BD718XX_PWRBTN_LONG_PRESS_12S,
+ BD718XX_PWRBTN_LONG_PRESS_13S,
+ BD718XX_PWRBTN_LONG_PRESS_14S,
+ BD718XX_PWRBTN_LONG_PRESS_15S
+};
+
+#endif /* __LINUX_MFD_BD718XX_H__ */
diff --git a/include/linux/mfd/rohm-bd957x.h b/include/linux/mfd/rohm-bd957x.h
new file mode 100644
index 000000000000..acc920b64f75
--- /dev/null
+++ b/include/linux/mfd/rohm-bd957x.h
@@ -0,0 +1,140 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/* Copyright (C) 2021 ROHM Semiconductors */
+
+#ifndef __LINUX_MFD_BD957X_H__
+#define __LINUX_MFD_BD957X_H__
+
+enum {
+ BD957X_VD50,
+ BD957X_VD18,
+ BD957X_VDDDR,
+ BD957X_VD10,
+ BD957X_VOUTL1,
+ BD957X_VOUTS1,
+};
+
+/*
+ * The BD9576 has own IRQ 'blocks' for:
+ * - I2C/thermal,
+ * - Over voltage protection
+ * - Short-circuit protection
+ * - Over current protection
+ * - Over voltage detection
+ * - Under voltage detection
+ * - Under voltage protection
+ * - 'system interrupt'.
+ *
+ * Each of the blocks have a status register giving more accurate IRQ source
+ * information - for example which of the regulators have over-voltage.
+ *
+ * On top of this, there is "main IRQ" status register where each bit indicates
+ * which of sub-blocks have active IRQs. Fine. That would fit regmap-irq main
+ * status handling. Except that:
+ * - Only some sub-IRQs can be masked.
+ * - The IRQ informs us about fault-condition, not when fault state changes.
+ * The IRQ line it is kept asserted until the detected condition is acked
+ * AND cleared in HW. This is annoying for IRQs like the one informing high
+ * temperature because if IRQ is not disabled it keeps the CPU in IRQ
+ * handling loop.
+ *
+ * For now we do just use the main-IRQ register as source for our IRQ
+ * information and bind the regmap-irq to this. We leave fine-grained sub-IRQ
+ * register handling to handlers in sub-devices. The regulator driver shall
+ * read which regulators are source for problem - or if the detected error is
+ * regulator temperature error. The sub-drivers do also handle masking of "sub-
+ * IRQs" if this is supported/needed.
+ *
+ * To overcome the problem with HW keeping IRQ asserted we do call
+ * disable_irq_nosync() from sub-device handler and add a delayed work to
+ * re-enable IRQ roughly 1 second later. This should keep our CPU out of
+ * busy-loop.
+ */
+#define IRQS_SILENT_MS 1000
+
+enum {
+ BD9576_INT_THERM,
+ BD9576_INT_OVP,
+ BD9576_INT_SCP,
+ BD9576_INT_OCP,
+ BD9576_INT_OVD,
+ BD9576_INT_UVD,
+ BD9576_INT_UVP,
+ BD9576_INT_SYS,
+};
+
+#define BD957X_REG_SMRB_ASSERT 0x15
+#define BD957X_REG_PMIC_INTERNAL_STAT 0x20
+#define BD957X_REG_INT_THERM_STAT 0x23
+#define BD957X_REG_INT_THERM_MASK 0x24
+#define BD957X_REG_INT_OVP_STAT 0x25
+#define BD957X_REG_INT_SCP_STAT 0x26
+#define BD957X_REG_INT_OCP_STAT 0x27
+#define BD957X_REG_INT_OVD_STAT 0x28
+#define BD957X_REG_INT_UVD_STAT 0x29
+#define BD957X_REG_INT_UVP_STAT 0x2a
+#define BD957X_REG_INT_SYS_STAT 0x2b
+#define BD957X_REG_INT_SYS_MASK 0x2c
+#define BD957X_REG_INT_MAIN_STAT 0x30
+#define BD957X_REG_INT_MAIN_MASK 0x31
+
+#define UVD_IRQ_VALID_MASK 0x6F
+#define OVD_IRQ_VALID_MASK 0x2F
+
+#define BD957X_MASK_INT_MAIN_THERM BIT(0)
+#define BD957X_MASK_INT_MAIN_OVP BIT(1)
+#define BD957X_MASK_INT_MAIN_SCP BIT(2)
+#define BD957X_MASK_INT_MAIN_OCP BIT(3)
+#define BD957X_MASK_INT_MAIN_OVD BIT(4)
+#define BD957X_MASK_INT_MAIN_UVD BIT(5)
+#define BD957X_MASK_INT_MAIN_UVP BIT(6)
+#define BD957X_MASK_INT_MAIN_SYS BIT(7)
+#define BD957X_MASK_INT_ALL 0xff
+
+#define BD957X_REG_WDT_CONF 0x16
+
+#define BD957X_REG_POW_TRIGGER1 0x41
+#define BD957X_REG_POW_TRIGGER2 0x42
+#define BD957X_REG_POW_TRIGGER3 0x43
+#define BD957X_REG_POW_TRIGGER4 0x44
+#define BD957X_REG_POW_TRIGGERL1 0x45
+#define BD957X_REG_POW_TRIGGERS1 0x46
+
+#define BD957X_REGULATOR_EN_MASK 0xff
+#define BD957X_REGULATOR_DIS_VAL 0xff
+
+#define BD957X_VSEL_REG_MASK 0xff
+
+#define BD957X_MASK_VOUT1_TUNE 0x87
+#define BD957X_MASK_VOUT2_TUNE 0x87
+#define BD957X_MASK_VOUT3_TUNE 0x1f
+#define BD957X_MASK_VOUT4_TUNE 0x1f
+#define BD957X_MASK_VOUTL1_TUNE 0x87
+
+#define BD957X_REG_VOUT1_TUNE 0x50
+#define BD957X_REG_VOUT2_TUNE 0x53
+#define BD957X_REG_VOUT3_TUNE 0x56
+#define BD957X_REG_VOUT4_TUNE 0x59
+#define BD957X_REG_VOUTL1_TUNE 0x5c
+
+#define BD9576_REG_VOUT1_OVD 0x51
+#define BD9576_REG_VOUT1_UVD 0x52
+#define BD9576_REG_VOUT2_OVD 0x54
+#define BD9576_REG_VOUT2_UVD 0x55
+#define BD9576_REG_VOUT3_OVD 0x57
+#define BD9576_REG_VOUT3_UVD 0x58
+#define BD9576_REG_VOUT4_OVD 0x5a
+#define BD9576_REG_VOUT4_UVD 0x5b
+#define BD9576_REG_VOUTL1_OVD 0x5d
+#define BD9576_REG_VOUTL1_UVD 0x5e
+
+#define BD9576_MASK_XVD 0x7f
+
+#define BD9576_REG_VOUT1S_OCW 0x5f
+#define BD9576_REG_VOUT1S_OCP 0x60
+
+#define BD9576_MASK_VOUT1S_OCW 0x3f
+#define BD9576_MASK_VOUT1S_OCP 0x3f
+
+#define BD957X_MAX_REGISTER 0x61
+
+#endif
diff --git a/include/linux/mfd/rohm-generic.h b/include/linux/mfd/rohm-generic.h
new file mode 100644
index 000000000000..4eeb22876bad
--- /dev/null
+++ b/include/linux/mfd/rohm-generic.h
@@ -0,0 +1,86 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/* Copyright (C) 2018 ROHM Semiconductors */
+
+#ifndef __LINUX_MFD_ROHM_H__
+#define __LINUX_MFD_ROHM_H__
+
+#include <linux/regmap.h>
+#include <linux/regulator/driver.h>
+
+enum rohm_chip_type {
+ ROHM_CHIP_TYPE_BD9571,
+ ROHM_CHIP_TYPE_BD9573,
+ ROHM_CHIP_TYPE_BD9574,
+ ROHM_CHIP_TYPE_BD9576,
+ ROHM_CHIP_TYPE_BD71815,
+ ROHM_CHIP_TYPE_BD71828,
+ ROHM_CHIP_TYPE_BD71837,
+ ROHM_CHIP_TYPE_BD71847,
+ ROHM_CHIP_TYPE_AMOUNT
+};
+
+struct rohm_regmap_dev {
+ struct device *dev;
+ struct regmap *regmap;
+};
+
+#define ROHM_DVS_LEVEL_RUN BIT(0)
+#define ROHM_DVS_LEVEL_IDLE BIT(1)
+#define ROHM_DVS_LEVEL_SUSPEND BIT(2)
+#define ROHM_DVS_LEVEL_LPSR BIT(3)
+#define ROHM_DVS_LEVEL_SNVS BIT(4)
+#define ROHM_DVS_LEVEL_VALID_AMOUNT 5
+#define ROHM_DVS_LEVEL_UNKNOWN 0
+
+/**
+ * struct rohm_dvs_config - dynamic voltage scaling register descriptions
+ *
+ * @level_map: bitmap representing supported run-levels for this
+ * regulator
+ * @run_reg: register address for regulator config at 'run' state
+ * @run_mask: value mask for regulator voltages at 'run' state
+ * @run_on_mask: enable mask for regulator at 'run' state
+ * @idle_reg: register address for regulator config at 'idle' state
+ * @idle_mask: value mask for regulator voltages at 'idle' state
+ * @idle_on_mask: enable mask for regulator at 'idle' state
+ * @suspend_reg: register address for regulator config at 'suspend' state
+ * @suspend_mask: value mask for regulator voltages at 'suspend' state
+ * @suspend_on_mask: enable mask for regulator at 'suspend' state
+ * @lpsr_reg: register address for regulator config at 'lpsr' state
+ * @lpsr_mask: value mask for regulator voltages at 'lpsr' state
+ * @lpsr_on_mask: enable mask for regulator at 'lpsr' state
+ *
+ * Description of ROHM PMICs voltage configuration registers for different
+ * system states. This is used to correctly configure the PMIC at startup
+ * based on values read from DT.
+ */
+struct rohm_dvs_config {
+ uint64_t level_map;
+ unsigned int run_reg;
+ unsigned int run_mask;
+ unsigned int run_on_mask;
+ unsigned int idle_reg;
+ unsigned int idle_mask;
+ unsigned int idle_on_mask;
+ unsigned int suspend_reg;
+ unsigned int suspend_mask;
+ unsigned int suspend_on_mask;
+ unsigned int lpsr_reg;
+ unsigned int lpsr_mask;
+ unsigned int lpsr_on_mask;
+ unsigned int snvs_reg;
+ unsigned int snvs_mask;
+ unsigned int snvs_on_mask;
+};
+
+#if IS_ENABLED(CONFIG_REGULATOR_ROHM)
+int rohm_regulator_set_dvs_levels(const struct rohm_dvs_config *dvs,
+ struct device_node *np,
+ const struct regulator_desc *desc,
+ struct regmap *regmap);
+
+int rohm_regulator_set_voltage_sel_restricted(struct regulator_dev *rdev,
+ unsigned int sel);
+#endif
+
+#endif
diff --git a/include/linux/mfd/rohm-shared.h b/include/linux/mfd/rohm-shared.h
new file mode 100644
index 000000000000..53dd7f638bfd
--- /dev/null
+++ b/include/linux/mfd/rohm-shared.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/* Copyright (C) 2020 ROHM Semiconductors */
+
+
+#ifndef __LINUX_MFD_ROHM_SHARED_H__
+#define __LINUX_MFD_ROHM_SHARED_H__
+
+/* RTC definitions shared between BD70528 and BD71828 */
+
+#define BD70528_MASK_RTC_SEC 0x7f
+#define BD70528_MASK_RTC_MINUTE 0x7f
+#define BD70528_MASK_RTC_HOUR_24H 0x80
+#define BD70528_MASK_RTC_HOUR_PM 0x20
+#define BD70528_MASK_RTC_HOUR 0x3f
+#define BD70528_MASK_RTC_DAY 0x3f
+#define BD70528_MASK_RTC_WEEK 0x07
+#define BD70528_MASK_RTC_MONTH 0x1f
+#define BD70528_MASK_RTC_YEAR 0xff
+#define BD70528_MASK_ALM_EN 0x7
+
+#endif /* __LINUX_MFD_ROHM_SHARED_H__ */
diff --git a/include/linux/mfd/rsmu.h b/include/linux/mfd/rsmu.h
new file mode 100644
index 000000000000..0379aa207428
--- /dev/null
+++ b/include/linux/mfd/rsmu.h
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Core interface for Renesas Synchronization Management Unit (SMU) devices.
+ *
+ * Copyright (C) 2021 Integrated Device Technology, Inc., a Renesas Company.
+ */
+
+#ifndef __LINUX_MFD_RSMU_H
+#define __LINUX_MFD_RSMU_H
+
+#define RSMU_MAX_WRITE_COUNT (255)
+#define RSMU_MAX_READ_COUNT (255)
+
+/* The supported devices are ClockMatrix, Sabre and SnowLotus */
+enum rsmu_type {
+ RSMU_CM = 0x34000,
+ RSMU_SABRE = 0x33810,
+ RSMU_SL = 0x19850,
+};
+
+/**
+ *
+ * struct rsmu_ddata - device data structure for sub devices.
+ *
+ * @dev: i2c/spi device.
+ * @regmap: i2c/spi bus access.
+ * @lock: mutex used by sub devices to make sure a series of
+ * bus access requests are not interrupted.
+ * @type: RSMU device type.
+ * @page: i2c/spi bus driver internal use only.
+ */
+struct rsmu_ddata {
+ struct device *dev;
+ struct regmap *regmap;
+ struct mutex lock;
+ enum rsmu_type type;
+ u32 page;
+};
+#endif /* __LINUX_MFD_RSMU_H */
diff --git a/include/linux/mfd/rt5033-private.h b/include/linux/mfd/rt5033-private.h
index 1b63fc2f42d1..6bb432f6a96c 100644
--- a/include/linux/mfd/rt5033-private.h
+++ b/include/linux/mfd/rt5033-private.h
@@ -1,12 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* MFD core driver for Richtek RT5033
*
* Copyright (C) 2014 Samsung Electronics, Co., Ltd.
* Author: Beomho Seo <beomho.seo@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published bythe Free Software Foundation.
*/
#ifndef __RT5033_PRIVATE_H__
@@ -94,14 +91,14 @@ enum rt5033_reg {
#define RT5033_RT_HZ_MASK 0x01
/* RT5033 control register */
-#define RT5033_CTRL_FCCM_BUCK_MASK 0x00
-#define RT5033_CTRL_BUCKOMS_MASK 0x01
-#define RT5033_CTRL_LDOOMS_MASK 0x02
-#define RT5033_CTRL_SLDOOMS_MASK 0x03
-#define RT5033_CTRL_EN_BUCK_MASK 0x04
-#define RT5033_CTRL_EN_LDO_MASK 0x05
-#define RT5033_CTRL_EN_SAFE_LDO_MASK 0x06
-#define RT5033_CTRL_LDO_SLEEP_MASK 0x07
+#define RT5033_CTRL_FCCM_BUCK_MASK BIT(0)
+#define RT5033_CTRL_BUCKOMS_MASK BIT(1)
+#define RT5033_CTRL_LDOOMS_MASK BIT(2)
+#define RT5033_CTRL_SLDOOMS_MASK BIT(3)
+#define RT5033_CTRL_EN_BUCK_MASK BIT(4)
+#define RT5033_CTRL_EN_LDO_MASK BIT(5)
+#define RT5033_CTRL_EN_SAFE_LDO_MASK BIT(6)
+#define RT5033_CTRL_LDO_SLEEP_MASK BIT(7)
/* RT5033 BUCK control register */
#define RT5033_BUCK_CTRL_MASK 0x1f
@@ -110,14 +107,13 @@ enum rt5033_reg {
#define RT5033_LDO_CTRL_MASK 0x1f
/* RT5033 charger property - model, manufacturer */
-
#define RT5033_CHARGER_MODEL "RT5033WSC Charger"
#define RT5033_MANUFACTURER "Richtek Technology Corporation"
/*
- * RT5033 charger fast-charge current lmits (as in CHGCTRL1 register),
- * AICR mode limits the input current for example,
- * the AIRC 100 mode limits the input current to 100 mA.
+ * While RT5033 charger can limit the fast-charge current (as in CHGCTRL1
+ * register), AICR mode limits the input current. For example, the AIRC 100
+ * mode limits the input current to 100 mA.
*/
#define RT5033_AICR_100_MODE 0x20
#define RT5033_AICR_500_MODE 0x40
@@ -142,10 +138,9 @@ enum rt5033_reg {
#define RT5033_TE_ENABLE_MASK 0x08
/*
- * RT5033 charger opa mode. RT50300 have two opa mode charger mode
- * and boost mode for OTG
+ * RT5033 charger opa mode. RT5033 has two opa modes for OTG: charger mode
+ * and boost mode.
*/
-
#define RT5033_CHARGER_MODE 0x00
#define RT5033_BOOST_MODE 0x01
@@ -184,18 +179,17 @@ enum rt5033_reg {
* RT5033 charger pre-charge threshold volt limits
* (as in CHGCTRL5 register), uV
*/
-
#define RT5033_CHARGER_PRE_THRESHOLD_LIMIT_MIN 2300000U
#define RT5033_CHARGER_PRE_THRESHOLD_STEP_NUM 100000U
#define RT5033_CHARGER_PRE_THRESHOLD_LIMIT_MAX 3800000U
/*
- * RT5033 charger enable UUG, If UUG enable MOS auto control by H/W charger
+ * RT5033 charger UUG. It enables MOS auto control by H/W charger
* circuit.
*/
#define RT5033_CHARGER_UUG_ENABLE 0x02
-/* RT5033 charger High impedance mode */
+/* RT5033 charger high impedance mode */
#define RT5033_CHARGER_HZ_DISABLE 0x00
#define RT5033_CHARGER_HZ_ENABLE 0x01
@@ -250,11 +244,11 @@ enum rt5033_fuel_reg {
#define RT5033_FUEL_BAT_PRESENT 0x02
/* RT5033 PMIC interrupts */
-#define RT5033_PMIC_IRQ_BUCKOCP 2
-#define RT5033_PMIC_IRQ_BUCKLV 3
-#define RT5033_PMIC_IRQ_SAFELDOLV 4
-#define RT5033_PMIC_IRQ_LDOLV 5
-#define RT5033_PMIC_IRQ_OT 6
-#define RT5033_PMIC_IRQ_VDDA_UV 7
+#define RT5033_PMIC_IRQ_BUCKOCP BIT(2)
+#define RT5033_PMIC_IRQ_BUCKLV BIT(3)
+#define RT5033_PMIC_IRQ_SAFELDOLV BIT(4)
+#define RT5033_PMIC_IRQ_LDOLV BIT(5)
+#define RT5033_PMIC_IRQ_OT BIT(6)
+#define RT5033_PMIC_IRQ_VDDA_UV BIT(7)
#endif /* __RT5033_PRIVATE_H__ */
diff --git a/include/linux/mfd/rt5033.h b/include/linux/mfd/rt5033.h
index 6cff5cf458d2..8f306ac15a27 100644
--- a/include/linux/mfd/rt5033.h
+++ b/include/linux/mfd/rt5033.h
@@ -1,12 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* MFD core driver for the RT5033
*
* Copyright (C) 2014 Samsung Electronics
* Author: Beomho Seo <beomho.seo@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published bythe Free Software Foundation.
*/
#ifndef __RT5033_H__
@@ -52,10 +49,9 @@ struct rt5033_charger_data {
};
struct rt5033_charger {
- struct device *dev;
- struct rt5033_dev *rt5033;
- struct power_supply psy;
-
+ struct device *dev;
+ struct rt5033_dev *rt5033;
+ struct power_supply psy;
struct rt5033_charger_data *chg;
};
diff --git a/include/linux/mfd/rz-mtu3.h b/include/linux/mfd/rz-mtu3.h
new file mode 100644
index 000000000000..c5173bc06270
--- /dev/null
+++ b/include/linux/mfd/rz-mtu3.h
@@ -0,0 +1,257 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2022 Renesas Electronics Corporation
+ */
+#ifndef __MFD_RZ_MTU3_H__
+#define __MFD_RZ_MTU3_H__
+
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/mutex.h>
+
+/* 8-bit shared register offsets macros */
+#define RZ_MTU3_TSTRA 0x080 /* Timer start register A */
+#define RZ_MTU3_TSTRB 0x880 /* Timer start register B */
+
+/* 16-bit shared register offset macros */
+#define RZ_MTU3_TDDRA 0x016 /* Timer dead time data register A */
+#define RZ_MTU3_TDDRB 0x816 /* Timer dead time data register B */
+#define RZ_MTU3_TCDRA 0x014 /* Timer cycle data register A */
+#define RZ_MTU3_TCDRB 0x814 /* Timer cycle data register B */
+#define RZ_MTU3_TCBRA 0x022 /* Timer cycle buffer register A */
+#define RZ_MTU3_TCBRB 0x822 /* Timer cycle buffer register B */
+#define RZ_MTU3_TCNTSA 0x020 /* Timer subcounter A */
+#define RZ_MTU3_TCNTSB 0x820 /* Timer subcounter B */
+
+/*
+ * MTU5 contains 3 timer counter registers and is totaly different
+ * from other channels, so we must separate its offset
+ */
+
+/* 8-bit register offset macros of MTU3 channels except MTU5 */
+#define RZ_MTU3_TIER 0 /* Timer interrupt register */
+#define RZ_MTU3_NFCR 1 /* Noise filter control register */
+#define RZ_MTU3_TSR 2 /* Timer status register */
+#define RZ_MTU3_TCR 3 /* Timer control register */
+#define RZ_MTU3_TCR2 4 /* Timer control register 2 */
+
+/* Timer mode register 1 */
+#define RZ_MTU3_TMDR1 5
+#define RZ_MTU3_TMDR1_MD GENMASK(3, 0)
+#define RZ_MTU3_TMDR1_MD_NORMAL FIELD_PREP(RZ_MTU3_TMDR1_MD, 0)
+#define RZ_MTU3_TMDR1_MD_PWMMODE1 FIELD_PREP(RZ_MTU3_TMDR1_MD, 2)
+
+#define RZ_MTU3_TIOR 6 /* Timer I/O control register */
+#define RZ_MTU3_TIORH 6 /* Timer I/O control register H */
+#define RZ_MTU3_TIORL 7 /* Timer I/O control register L */
+/* Only MTU3/4/6/7 have TBTM registers */
+#define RZ_MTU3_TBTM 8 /* Timer buffer operation transfer mode register */
+
+/* 8-bit MTU5 register offset macros */
+#define RZ_MTU3_TSTR 2 /* MTU5 Timer start register */
+#define RZ_MTU3_TCNTCMPCLR 3 /* MTU5 Timer compare match clear register */
+#define RZ_MTU3_TCRU 4 /* Timer control register U */
+#define RZ_MTU3_TCR2U 5 /* Timer control register 2U */
+#define RZ_MTU3_TIORU 6 /* Timer I/O control register U */
+#define RZ_MTU3_TCRV 7 /* Timer control register V */
+#define RZ_MTU3_TCR2V 8 /* Timer control register 2V */
+#define RZ_MTU3_TIORV 9 /* Timer I/O control register V */
+#define RZ_MTU3_TCRW 10 /* Timer control register W */
+#define RZ_MTU3_TCR2W 11 /* Timer control register 2W */
+#define RZ_MTU3_TIORW 12 /* Timer I/O control register W */
+
+/* 16-bit register offset macros of MTU3 channels except MTU5 */
+#define RZ_MTU3_TCNT 0 /* Timer counter */
+#define RZ_MTU3_TGRA 1 /* Timer general register A */
+#define RZ_MTU3_TGRB 2 /* Timer general register B */
+#define RZ_MTU3_TGRC 3 /* Timer general register C */
+#define RZ_MTU3_TGRD 4 /* Timer general register D */
+#define RZ_MTU3_TGRE 5 /* Timer general register E */
+#define RZ_MTU3_TGRF 6 /* Timer general register F */
+/* Timer A/D converter start request registers */
+#define RZ_MTU3_TADCR 7 /* control register */
+#define RZ_MTU3_TADCORA 8 /* cycle set register A */
+#define RZ_MTU3_TADCORB 9 /* cycle set register B */
+#define RZ_MTU3_TADCOBRA 10 /* cycle set buffer register A */
+#define RZ_MTU3_TADCOBRB 11 /* cycle set buffer register B */
+
+/* 16-bit MTU5 register offset macros */
+#define RZ_MTU3_TCNTU 0 /* MTU5 Timer counter U */
+#define RZ_MTU3_TGRU 1 /* MTU5 Timer general register U */
+#define RZ_MTU3_TCNTV 2 /* MTU5 Timer counter V */
+#define RZ_MTU3_TGRV 3 /* MTU5 Timer general register V */
+#define RZ_MTU3_TCNTW 4 /* MTU5 Timer counter W */
+#define RZ_MTU3_TGRW 5 /* MTU5 Timer general register W */
+
+/* 32-bit register offset */
+#define RZ_MTU3_TCNTLW 0 /* Timer longword counter */
+#define RZ_MTU3_TGRALW 1 /* Timer longword general register A */
+#define RZ_MTU3_TGRBLW 2 /* Timer longowrd general register B */
+
+#define RZ_MTU3_TMDR3 0x191 /* MTU1 Timer Mode Register 3 */
+
+/* Macros for setting registers */
+#define RZ_MTU3_TCR_CCLR GENMASK(7, 5)
+#define RZ_MTU3_TCR_CKEG GENMASK(4, 3)
+#define RZ_MTU3_TCR_TPCS GENMASK(2, 0)
+#define RZ_MTU3_TCR_CCLR_TGRA BIT(5)
+#define RZ_MTU3_TCR_CCLR_TGRC FIELD_PREP(RZ_MTU3_TCR_CCLR, 5)
+#define RZ_MTU3_TCR_CKEG_RISING FIELD_PREP(RZ_MTU3_TCR_CKEG, 0)
+
+#define RZ_MTU3_TIOR_IOB GENMASK(7, 4)
+#define RZ_MTU3_TIOR_IOA GENMASK(3, 0)
+#define RZ_MTU3_TIOR_OC_RETAIN 0
+#define RZ_MTU3_TIOR_OC_INIT_OUT_LO_HI_OUT 2
+#define RZ_MTU3_TIOR_OC_INIT_OUT_HI_TOGGLE_OUT 7
+
+#define RZ_MTU3_TIOR_OC_IOA_H_COMP_MATCH \
+ FIELD_PREP(RZ_MTU3_TIOR_IOA, RZ_MTU3_TIOR_OC_INIT_OUT_LO_HI_OUT)
+#define RZ_MTU3_TIOR_OC_IOB_TOGGLE \
+ FIELD_PREP(RZ_MTU3_TIOR_IOB, RZ_MTU3_TIOR_OC_INIT_OUT_HI_TOGGLE_OUT)
+
+enum rz_mtu3_channels {
+ RZ_MTU3_CHAN_0,
+ RZ_MTU3_CHAN_1,
+ RZ_MTU3_CHAN_2,
+ RZ_MTU3_CHAN_3,
+ RZ_MTU3_CHAN_4,
+ RZ_MTU3_CHAN_5,
+ RZ_MTU3_CHAN_6,
+ RZ_MTU3_CHAN_7,
+ RZ_MTU3_CHAN_8,
+ RZ_MTU_NUM_CHANNELS
+};
+
+/**
+ * struct rz_mtu3_channel - MTU3 channel private data
+ *
+ * @dev: device handle
+ * @channel_number: channel number
+ * @lock: Lock to protect channel state
+ * @is_busy: channel state
+ */
+struct rz_mtu3_channel {
+ struct device *dev;
+ unsigned int channel_number;
+ struct mutex lock;
+ bool is_busy;
+};
+
+/**
+ * struct rz_mtu3 - MTU3 core private data
+ *
+ * @clk: MTU3 module clock
+ * @rz_mtu3_channel: HW channels
+ * @priv_data: MTU3 core driver private data
+ */
+struct rz_mtu3 {
+ struct clk *clk;
+ struct rz_mtu3_channel channels[RZ_MTU_NUM_CHANNELS];
+
+ void *priv_data;
+};
+
+#if IS_ENABLED(CONFIG_RZ_MTU3)
+static inline bool rz_mtu3_request_channel(struct rz_mtu3_channel *ch)
+{
+ mutex_lock(&ch->lock);
+ if (ch->is_busy) {
+ mutex_unlock(&ch->lock);
+ return false;
+ }
+
+ ch->is_busy = true;
+ mutex_unlock(&ch->lock);
+
+ return true;
+}
+
+static inline void rz_mtu3_release_channel(struct rz_mtu3_channel *ch)
+{
+ mutex_lock(&ch->lock);
+ ch->is_busy = false;
+ mutex_unlock(&ch->lock);
+}
+
+bool rz_mtu3_is_enabled(struct rz_mtu3_channel *ch);
+void rz_mtu3_disable(struct rz_mtu3_channel *ch);
+int rz_mtu3_enable(struct rz_mtu3_channel *ch);
+
+u8 rz_mtu3_8bit_ch_read(struct rz_mtu3_channel *ch, u16 off);
+u16 rz_mtu3_16bit_ch_read(struct rz_mtu3_channel *ch, u16 off);
+u32 rz_mtu3_32bit_ch_read(struct rz_mtu3_channel *ch, u16 off);
+u16 rz_mtu3_shared_reg_read(struct rz_mtu3_channel *ch, u16 off);
+
+void rz_mtu3_8bit_ch_write(struct rz_mtu3_channel *ch, u16 off, u8 val);
+void rz_mtu3_16bit_ch_write(struct rz_mtu3_channel *ch, u16 off, u16 val);
+void rz_mtu3_32bit_ch_write(struct rz_mtu3_channel *ch, u16 off, u32 val);
+void rz_mtu3_shared_reg_write(struct rz_mtu3_channel *ch, u16 off, u16 val);
+void rz_mtu3_shared_reg_update_bit(struct rz_mtu3_channel *ch, u16 off,
+ u16 pos, u8 val);
+#else
+static inline bool rz_mtu3_request_channel(struct rz_mtu3_channel *ch)
+{
+ return false;
+}
+
+static inline void rz_mtu3_release_channel(struct rz_mtu3_channel *ch)
+{
+}
+
+static inline bool rz_mtu3_is_enabled(struct rz_mtu3_channel *ch)
+{
+ return false;
+}
+
+static inline void rz_mtu3_disable(struct rz_mtu3_channel *ch)
+{
+}
+
+static inline int rz_mtu3_enable(struct rz_mtu3_channel *ch)
+{
+ return 0;
+}
+
+static inline u8 rz_mtu3_8bit_ch_read(struct rz_mtu3_channel *ch, u16 off)
+{
+ return 0;
+}
+
+static inline u16 rz_mtu3_16bit_ch_read(struct rz_mtu3_channel *ch, u16 off)
+{
+ return 0;
+}
+
+static inline u32 rz_mtu3_32bit_ch_read(struct rz_mtu3_channel *ch, u16 off)
+{
+ return 0;
+}
+
+static inline u16 rz_mtu3_shared_reg_read(struct rz_mtu3_channel *ch, u16 off)
+{
+ return 0;
+}
+
+static inline void rz_mtu3_8bit_ch_write(struct rz_mtu3_channel *ch, u16 off, u8 val)
+{
+}
+
+static inline void rz_mtu3_16bit_ch_write(struct rz_mtu3_channel *ch, u16 off, u16 val)
+{
+}
+
+static inline void rz_mtu3_32bit_ch_write(struct rz_mtu3_channel *ch, u16 off, u32 val)
+{
+}
+
+static inline void rz_mtu3_shared_reg_write(struct rz_mtu3_channel *ch, u16 off, u16 val)
+{
+}
+
+static inline void rz_mtu3_shared_reg_update_bit(struct rz_mtu3_channel *ch,
+ u16 off, u16 pos, u8 val)
+{
+}
+#endif
+
+#endif /* __MFD_RZ_MTU3_H__ */
diff --git a/include/linux/mfd/samsung/core.h b/include/linux/mfd/samsung/core.h
index 5a23dd4df432..a212b9f72bc9 100644
--- a/include/linux/mfd/samsung/core.h
+++ b/include/linux/mfd/samsung/core.h
@@ -1,14 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
- * core.h
- *
- * copyright (c) 2011 Samsung Electronics Co., Ltd
+ * Copyright (c) 2011 Samsung Electronics Co., Ltd
* http://www.samsung.com
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
*/
#ifndef __LINUX_MFD_SEC_CORE_H
@@ -27,6 +20,7 @@
#define MIN_850_MV 850000
#define MIN_800_MV 800000
#define MIN_750_MV 750000
+#define MIN_650_MV 650000
#define MIN_600_MV 600000
#define MIN_500_MV 500000
@@ -39,9 +33,9 @@
#define STEP_12_5_MV 12500
#define STEP_6_25_MV 6250
+struct gpio_desc;
+
enum sec_device_type {
- S5M8751X,
- S5M8763X,
S5M8767X,
S2MPA01,
S2MPS11X,
@@ -71,11 +65,8 @@ struct sec_pmic_dev {
struct i2c_client *i2c;
unsigned long device_type;
- int irq_base;
int irq;
struct regmap_irq_chip_data *irq_data;
-
- bool wakeup;
};
int sec_irq_init(struct sec_pmic_dev *sec_pmic);
@@ -85,15 +76,8 @@ int sec_irq_resume(struct sec_pmic_dev *sec_pmic);
struct sec_platform_data {
struct sec_regulator_data *regulators;
struct sec_opmode_data *opmode;
- int device_type;
int num_regulators;
- int irq_base;
- int (*cfg_pmic_irq)(void);
-
- bool wakeup;
- bool buck_voltage_lock;
-
int buck_gpios[3];
int buck_ds[3];
unsigned int buck2_voltage[8];
@@ -103,35 +87,12 @@ struct sec_platform_data {
unsigned int buck4_voltage[8];
bool buck4_gpiodvs;
- int buck_set1;
- int buck_set2;
- int buck_set3;
- int buck2_enable;
- int buck3_enable;
- int buck4_enable;
int buck_default_idx;
- int buck2_default_idx;
- int buck3_default_idx;
- int buck4_default_idx;
-
int buck_ramp_delay;
- int buck2_ramp_delay;
- int buck34_ramp_delay;
- int buck5_ramp_delay;
- int buck16_ramp_delay;
- int buck7810_ramp_delay;
- int buck9_ramp_delay;
- int buck24_ramp_delay;
- int buck3_ramp_delay;
- int buck7_ramp_delay;
- int buck8910_ramp_delay;
-
- bool buck1_ramp_enable;
bool buck2_ramp_enable;
bool buck3_ramp_enable;
bool buck4_ramp_enable;
- bool buck6_ramp_enable;
int buck2_init;
int buck3_init;
@@ -151,7 +112,7 @@ struct sec_regulator_data {
int id;
struct regulator_init_data *initdata;
struct device_node *reg_node;
- int ext_control_gpio;
+ struct gpio_desc *ext_control_gpiod;
};
/*
diff --git a/include/linux/mfd/samsung/irq.h b/include/linux/mfd/samsung/irq.h
index 667aa40486dd..3fd2775eb9bb 100644
--- a/include/linux/mfd/samsung/irq.h
+++ b/include/linux/mfd/samsung/irq.h
@@ -1,13 +1,7 @@
-/* irq.h
- *
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
* Copyright (c) 2012 Samsung Electronics Co., Ltd
* http://www.samsung.com
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
*/
#ifndef __LINUX_MFD_SEC_IRQ_H
@@ -200,54 +194,4 @@ enum s5m8767_irq {
#define S5M8767_IRQ_RTC1S_MASK (1 << 4)
#define S5M8767_IRQ_WTSR_MASK (1 << 5)
-enum s5m8763_irq {
- S5M8763_IRQ_DCINF,
- S5M8763_IRQ_DCINR,
- S5M8763_IRQ_JIGF,
- S5M8763_IRQ_JIGR,
- S5M8763_IRQ_PWRONF,
- S5M8763_IRQ_PWRONR,
-
- S5M8763_IRQ_WTSREVNT,
- S5M8763_IRQ_SMPLEVNT,
- S5M8763_IRQ_ALARM1,
- S5M8763_IRQ_ALARM0,
-
- S5M8763_IRQ_ONKEY1S,
- S5M8763_IRQ_TOPOFFR,
- S5M8763_IRQ_DCINOVPR,
- S5M8763_IRQ_CHGRSTF,
- S5M8763_IRQ_DONER,
- S5M8763_IRQ_CHGFAULT,
-
- S5M8763_IRQ_LOBAT1,
- S5M8763_IRQ_LOBAT2,
-
- S5M8763_IRQ_NR,
-};
-
-#define S5M8763_IRQ_DCINF_MASK (1 << 2)
-#define S5M8763_IRQ_DCINR_MASK (1 << 3)
-#define S5M8763_IRQ_JIGF_MASK (1 << 4)
-#define S5M8763_IRQ_JIGR_MASK (1 << 5)
-#define S5M8763_IRQ_PWRONF_MASK (1 << 6)
-#define S5M8763_IRQ_PWRONR_MASK (1 << 7)
-
-#define S5M8763_IRQ_WTSREVNT_MASK (1 << 0)
-#define S5M8763_IRQ_SMPLEVNT_MASK (1 << 1)
-#define S5M8763_IRQ_ALARM1_MASK (1 << 2)
-#define S5M8763_IRQ_ALARM0_MASK (1 << 3)
-
-#define S5M8763_IRQ_ONKEY1S_MASK (1 << 0)
-#define S5M8763_IRQ_TOPOFFR_MASK (1 << 2)
-#define S5M8763_IRQ_DCINOVPR_MASK (1 << 3)
-#define S5M8763_IRQ_CHGRSTF_MASK (1 << 4)
-#define S5M8763_IRQ_DONER_MASK (1 << 5)
-#define S5M8763_IRQ_CHGFAULT_MASK (1 << 7)
-
-#define S5M8763_IRQ_LOBAT1_MASK (1 << 0)
-#define S5M8763_IRQ_LOBAT2_MASK (1 << 1)
-
-#define S5M8763_ENRAMP (1 << 4)
-
#endif /* __LINUX_MFD_SEC_IRQ_H */
diff --git a/include/linux/mfd/samsung/rtc.h b/include/linux/mfd/samsung/rtc.h
index 48c3c5be7eb1..0204decfc9aa 100644
--- a/include/linux/mfd/samsung/rtc.h
+++ b/include/linux/mfd/samsung/rtc.h
@@ -1,18 +1,7 @@
-/* rtc.h
- *
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
* Copyright (c) 2011-2014 Samsung Electronics Co., Ltd
* http://www.samsung.com
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
*/
#ifndef __LINUX_MFD_SEC_RTC_H
@@ -141,15 +130,4 @@ enum s2mps_rtc_reg {
#define WTSR_ENABLE_SHIFT 6
#define WTSR_ENABLE_MASK (1 << WTSR_ENABLE_SHIFT)
-enum {
- RTC_SEC = 0,
- RTC_MIN,
- RTC_HOUR,
- RTC_WEEKDAY,
- RTC_DATE,
- RTC_MONTH,
- RTC_YEAR1,
- RTC_YEAR2,
-};
-
#endif /* __LINUX_MFD_SEC_RTC_H */
diff --git a/include/linux/mfd/samsung/s2mpa01.h b/include/linux/mfd/samsung/s2mpa01.h
index 2766108bca2f..0762e9de6f2f 100644
--- a/include/linux/mfd/samsung/s2mpa01.h
+++ b/include/linux/mfd/samsung/s2mpa01.h
@@ -1,12 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* Copyright (c) 2013 Samsung Electronics Co., Ltd
* http://www.samsung.com
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
*/
#ifndef __LINUX_MFD_S2MPA01_H
diff --git a/include/linux/mfd/samsung/s2mps11.h b/include/linux/mfd/samsung/s2mps11.h
index 2c14eeca46f0..4805c90609c4 100644
--- a/include/linux/mfd/samsung/s2mps11.h
+++ b/include/linux/mfd/samsung/s2mps11.h
@@ -1,14 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
- * s2mps11.h
- *
* Copyright (c) 2012 Samsung Electronics Co., Ltd
* http://www.samsung.com
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
*/
#ifndef __LINUX_MFD_S2MPS11_H
@@ -177,7 +170,9 @@ enum s2mps11_regulators {
#define S2MPS11_ENABLE_MASK (0x03 << S2MPS11_ENABLE_SHIFT)
#define S2MPS11_ENABLE_SHIFT 0x06
#define S2MPS11_LDO_N_VOLTAGES (S2MPS11_LDO_VSEL_MASK + 1)
-#define S2MPS11_BUCK_N_VOLTAGES (S2MPS11_BUCK_VSEL_MASK + 1)
+#define S2MPS11_BUCK12346_N_VOLTAGES 153
+#define S2MPS11_BUCK5_N_VOLTAGES 216
+#define S2MPS11_BUCK7810_N_VOLTAGES 225
#define S2MPS11_BUCK9_N_VOLTAGES (S2MPS11_BUCK9_VSEL_MASK + 1)
#define S2MPS11_RAMP_DELAY 25000 /* uV/us */
@@ -195,4 +190,9 @@ enum s2mps11_regulators {
#define S2MPS11_BUCK6_RAMP_EN_SHIFT 0
#define S2MPS11_PMIC_EN_SHIFT 6
+/*
+ * Bits for "enable suspend" (On/Off controlled by PWREN)
+ * are the same as in S2MPS14: S2MPS14_ENABLE_SUSPEND
+ */
+
#endif /* __LINUX_MFD_S2MPS11_H */
diff --git a/include/linux/mfd/samsung/s2mps13.h b/include/linux/mfd/samsung/s2mps13.h
index 239e977ba45d..b96d8a11dcd3 100644
--- a/include/linux/mfd/samsung/s2mps13.h
+++ b/include/linux/mfd/samsung/s2mps13.h
@@ -1,19 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
- * s2mps13.h
- *
* Copyright (c) 2014 Samsung Electronics Co., Ltd
* http://www.samsung.com
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
*/
#ifndef __LINUX_MFD_S2MPS13_H
diff --git a/include/linux/mfd/samsung/s2mps14.h b/include/linux/mfd/samsung/s2mps14.h
index c92f4782afb5..f4afa0cfc24f 100644
--- a/include/linux/mfd/samsung/s2mps14.h
+++ b/include/linux/mfd/samsung/s2mps14.h
@@ -1,19 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
- * s2mps14.h
- *
* Copyright (c) 2014 Samsung Electronics Co., Ltd
* http://www.samsung.com
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
*/
#ifndef __LINUX_MFD_S2MPS14_H
diff --git a/include/linux/mfd/samsung/s2mps15.h b/include/linux/mfd/samsung/s2mps15.h
index 36d35287c3c0..eac6bf74b72e 100644
--- a/include/linux/mfd/samsung/s2mps15.h
+++ b/include/linux/mfd/samsung/s2mps15.h
@@ -1,16 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* Copyright (c) 2015 Samsung Electronics Co., Ltd
* http://www.samsung.com
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#ifndef __LINUX_MFD_S2MPS15_H
diff --git a/include/linux/mfd/samsung/s2mpu02.h b/include/linux/mfd/samsung/s2mpu02.h
index 47ae9bc583a7..76cd5380cf0f 100644
--- a/include/linux/mfd/samsung/s2mpu02.h
+++ b/include/linux/mfd/samsung/s2mpu02.h
@@ -1,19 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
- * s2mpu02.h
- *
* Copyright (c) 2014 Samsung Electronics Co., Ltd
* http://www.samsung.com
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
*/
#ifndef __LINUX_MFD_S2MPU02_H
diff --git a/include/linux/mfd/samsung/s5m8763.h b/include/linux/mfd/samsung/s5m8763.h
deleted file mode 100644
index e025418e5589..000000000000
--- a/include/linux/mfd/samsung/s5m8763.h
+++ /dev/null
@@ -1,96 +0,0 @@
-/* s5m8763.h
- *
- * Copyright (c) 2011 Samsung Electronics Co., Ltd
- * http://www.samsung.com
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- */
-
-#ifndef __LINUX_MFD_S5M8763_H
-#define __LINUX_MFD_S5M8763_H
-
-/* S5M8763 registers */
-enum s5m8763_reg {
- S5M8763_REG_IRQ1,
- S5M8763_REG_IRQ2,
- S5M8763_REG_IRQ3,
- S5M8763_REG_IRQ4,
- S5M8763_REG_IRQM1,
- S5M8763_REG_IRQM2,
- S5M8763_REG_IRQM3,
- S5M8763_REG_IRQM4,
- S5M8763_REG_STATUS1,
- S5M8763_REG_STATUS2,
- S5M8763_REG_STATUSM1,
- S5M8763_REG_STATUSM2,
- S5M8763_REG_CHGR1,
- S5M8763_REG_CHGR2,
- S5M8763_REG_LDO_ACTIVE_DISCHARGE1,
- S5M8763_REG_LDO_ACTIVE_DISCHARGE2,
- S5M8763_REG_BUCK_ACTIVE_DISCHARGE3,
- S5M8763_REG_ONOFF1,
- S5M8763_REG_ONOFF2,
- S5M8763_REG_ONOFF3,
- S5M8763_REG_ONOFF4,
- S5M8763_REG_BUCK1_VOLTAGE1,
- S5M8763_REG_BUCK1_VOLTAGE2,
- S5M8763_REG_BUCK1_VOLTAGE3,
- S5M8763_REG_BUCK1_VOLTAGE4,
- S5M8763_REG_BUCK2_VOLTAGE1,
- S5M8763_REG_BUCK2_VOLTAGE2,
- S5M8763_REG_BUCK3,
- S5M8763_REG_BUCK4,
- S5M8763_REG_LDO1_LDO2,
- S5M8763_REG_LDO3,
- S5M8763_REG_LDO4,
- S5M8763_REG_LDO5,
- S5M8763_REG_LDO6,
- S5M8763_REG_LDO7,
- S5M8763_REG_LDO7_LDO8,
- S5M8763_REG_LDO9_LDO10,
- S5M8763_REG_LDO11,
- S5M8763_REG_LDO12,
- S5M8763_REG_LDO13,
- S5M8763_REG_LDO14,
- S5M8763_REG_LDO15,
- S5M8763_REG_LDO16,
- S5M8763_REG_BKCHR,
- S5M8763_REG_LBCNFG1,
- S5M8763_REG_LBCNFG2,
-};
-
-/* S5M8763 regulator ids */
-enum s5m8763_regulators {
- S5M8763_LDO1,
- S5M8763_LDO2,
- S5M8763_LDO3,
- S5M8763_LDO4,
- S5M8763_LDO5,
- S5M8763_LDO6,
- S5M8763_LDO7,
- S5M8763_LDO8,
- S5M8763_LDO9,
- S5M8763_LDO10,
- S5M8763_LDO11,
- S5M8763_LDO12,
- S5M8763_LDO13,
- S5M8763_LDO14,
- S5M8763_LDO15,
- S5M8763_LDO16,
- S5M8763_BUCK1,
- S5M8763_BUCK2,
- S5M8763_BUCK3,
- S5M8763_BUCK4,
- S5M8763_AP_EN32KHZ,
- S5M8763_CP_EN32KHZ,
- S5M8763_ENCHGVI,
- S5M8763_ESAFEUSB1,
- S5M8763_ESAFEUSB2,
-};
-
-#define S5M8763_ENRAMP (1 << 4)
-#endif /* __LINUX_MFD_S5M8763_H */
diff --git a/include/linux/mfd/samsung/s5m8767.h b/include/linux/mfd/samsung/s5m8767.h
index 243b58fec33d..704f8d80e96e 100644
--- a/include/linux/mfd/samsung/s5m8767.h
+++ b/include/linux/mfd/samsung/s5m8767.h
@@ -1,13 +1,7 @@
-/* s5m8767.h
- *
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
* Copyright (c) 2011 Samsung Electronics Co., Ltd
* http://www.samsung.com
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
*/
#ifndef __LINUX_MFD_S5M8767_H
diff --git a/include/linux/mfd/sc27xx-pmic.h b/include/linux/mfd/sc27xx-pmic.h
new file mode 100644
index 000000000000..57e45c0b3ae2
--- /dev/null
+++ b/include/linux/mfd/sc27xx-pmic.h
@@ -0,0 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __LINUX_MFD_SC27XX_PMIC_H
+#define __LINUX_MFD_SC27XX_PMIC_H
+
+extern enum usb_charger_type sprd_pmic_detect_charger_type(struct device *dev);
+
+#endif /* __LINUX_MFD_SC27XX_PMIC_H */
diff --git a/include/linux/mfd/si476x-core.h b/include/linux/mfd/si476x-core.h
index 674b45d5a757..dd95c37ca134 100644
--- a/include/linux/mfd/si476x-core.h
+++ b/include/linux/mfd/si476x-core.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* include/media/si476x-core.h -- Common definitions for si476x core
* device
@@ -6,16 +7,6 @@
* Copyright (C) 2013 Andrey Smirnov
*
* Author: Andrey Smirnov <andrew.smirnov@gmail.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
*/
#ifndef SI476X_CORE_H
@@ -66,7 +57,7 @@ enum si476x_mfd_cells {
* @SI476X_POWER_DOWN: In this state all regulators are turned off
* and the reset line is pulled low. The device is completely
* inactive.
- * @SI476X_POWER_UP_FULL: In this state all the power regualtors are
+ * @SI476X_POWER_UP_FULL: In this state all the power regulators are
* turned on, reset line pulled high, IRQ line is enabled(polling is
* active for polling use scenario) and device is turned on with
* POWER_UP command. The device is ready to be used.
diff --git a/include/linux/mfd/si476x-platform.h b/include/linux/mfd/si476x-platform.h
index 88bb93b7a9d5..18363b773d07 100644
--- a/include/linux/mfd/si476x-platform.h
+++ b/include/linux/mfd/si476x-platform.h
@@ -1,19 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* include/media/si476x-platform.h -- Platform data specific definitions
*
* Copyright (C) 2013 Andrey Smirnov
*
* Author: Andrey Smirnov <andrew.smirnov@gmail.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
*/
#ifndef __SI476X_PLATFORM_H__
diff --git a/include/linux/mfd/si476x-reports.h b/include/linux/mfd/si476x-reports.h
index e0b9455a79c0..93b34184699d 100644
--- a/include/linux/mfd/si476x-reports.h
+++ b/include/linux/mfd/si476x-reports.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* include/media/si476x-platform.h -- Definitions of the data formats
* returned by debugfs hooks
@@ -5,16 +6,6 @@
* Copyright (C) 2013 Andrey Smirnov
*
* Author: Andrey Smirnov <andrew.smirnov@gmail.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
*/
#ifndef __SI476X_REPORTS_H__
diff --git a/include/linux/mfd/sky81452.h b/include/linux/mfd/sky81452.h
index b0925fa3e9ef..b08570ff34df 100644
--- a/include/linux/mfd/sky81452.h
+++ b/include/linux/mfd/sky81452.h
@@ -1,30 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* sky81452.h SKY81452 MFD driver
*
* Copyright 2014 Skyworks Solutions Inc.
* Author : Gyungoh Yoo <jack.yoo@skyworksinc.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#ifndef _SKY81452_H
#define _SKY81452_H
-#include <linux/platform_data/sky81452-backlight.h>
#include <linux/regulator/machine.h>
struct sky81452_platform_data {
- struct sky81452_bl_platform_data *bl_pdata;
struct regulator_init_data *regulator_init_data;
};
diff --git a/include/linux/mfd/smsc.h b/include/linux/mfd/smsc.h
deleted file mode 100644
index 9747b29f356f..000000000000
--- a/include/linux/mfd/smsc.h
+++ /dev/null
@@ -1,109 +0,0 @@
-/*
- * SMSC ECE1099
- *
- * Copyright 2012 Texas Instruments Inc.
- *
- * Author: Sourav Poddar <sourav.poddar@ti.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- */
-
-#ifndef __LINUX_MFD_SMSC_H
-#define __LINUX_MFD_SMSC_H
-
-#include <linux/regmap.h>
-
-#define SMSC_ID_ECE1099 1
-#define SMSC_NUM_CLIENTS 2
-
-#define SMSC_BASE_ADDR 0x38
-#define OMAP_GPIO_SMSC_IRQ 151
-
-#define SMSC_MAXGPIO 32
-#define SMSC_BANK(offs) ((offs) >> 3)
-#define SMSC_BIT(offs) (1u << ((offs) & 0x7))
-
-struct smsc {
- struct device *dev;
- struct i2c_client *i2c_clients[SMSC_NUM_CLIENTS];
- struct regmap *regmap;
- int clk;
- /* Stored chip id */
- int id;
-};
-
-struct smsc_gpio;
-struct smsc_keypad;
-
-static inline int smsc_read(struct device *child, unsigned int reg,
- unsigned int *dest)
-{
- struct smsc *smsc = dev_get_drvdata(child->parent);
-
- return regmap_read(smsc->regmap, reg, dest);
-}
-
-static inline int smsc_write(struct device *child, unsigned int reg,
- unsigned int value)
-{
- struct smsc *smsc = dev_get_drvdata(child->parent);
-
- return regmap_write(smsc->regmap, reg, value);
-}
-
-/* Registers for SMSC */
-#define SMSC_RESET 0xF5
-#define SMSC_GRP_INT 0xF9
-#define SMSC_CLK_CTRL 0xFA
-#define SMSC_WKUP_CTRL 0xFB
-#define SMSC_DEV_ID 0xFC
-#define SMSC_DEV_REV 0xFD
-#define SMSC_VEN_ID_L 0xFE
-#define SMSC_VEN_ID_H 0xFF
-
-/* CLK VALUE */
-#define SMSC_CLK_VALUE 0x13
-
-/* Registers for function GPIO INPUT */
-#define SMSC_GPIO_DATA_IN_START 0x00
-
-/* Registers for function GPIO OUPUT */
-#define SMSC_GPIO_DATA_OUT_START 0x05
-
-/* Definitions for SMSC GPIO CONFIGURATION REGISTER*/
-#define SMSC_GPIO_INPUT_LOW 0x01
-#define SMSC_GPIO_INPUT_RISING 0x09
-#define SMSC_GPIO_INPUT_FALLING 0x11
-#define SMSC_GPIO_INPUT_BOTH_EDGE 0x19
-#define SMSC_GPIO_OUTPUT_PP 0x21
-#define SMSC_GPIO_OUTPUT_OP 0x31
-
-#define GRP_INT_STAT 0xf9
-#define SMSC_GPI_INT 0x0f
-#define SMSC_CFG_START 0x0A
-
-/* Registers for SMSC GPIO INTERRUPT STATUS REGISTER*/
-#define SMSC_GPIO_INT_STAT_START 0x32
-
-/* Registers for SMSC GPIO INTERRUPT MASK REGISTER*/
-#define SMSC_GPIO_INT_MASK_START 0x37
-
-/* Registers for SMSC function KEYPAD*/
-#define SMSC_KP_OUT 0x40
-#define SMSC_KP_IN 0x41
-#define SMSC_KP_INT_STAT 0x42
-#define SMSC_KP_INT_MASK 0x43
-
-/* Definitions for keypad */
-#define SMSC_KP_KSO 0x70
-#define SMSC_KP_KSI 0x51
-#define SMSC_KSO_ALL_LOW 0x20
-#define SMSC_KP_SET_LOW_PWR 0x0B
-#define SMSC_KP_SET_HIGH 0xFF
-#define SMSC_KSO_EVAL 0x00
-
-#endif /* __LINUX_MFD_SMSC_H */
diff --git a/include/linux/mfd/sta2x11-mfd.h b/include/linux/mfd/sta2x11-mfd.h
index 9a855ac11cbf..2001ca5c44a9 100644
--- a/include/linux/mfd/sta2x11-mfd.h
+++ b/include/linux/mfd/sta2x11-mfd.h
@@ -1,20 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2009-2011 Wind River Systems, Inc.
* Copyright (c) 2011 ST Microelectronics (Alessandro Rubini)
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
- * See the GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
* The STMicroelectronics ConneXt (STA2X11) chip has several unrelated
* functions in one PCI endpoint functions. This driver simply
* registers the platform devices in this iomemregion and exports a few
diff --git a/include/linux/mfd/stm32-lptimer.h b/include/linux/mfd/stm32-lptimer.h
new file mode 100644
index 000000000000..06d3f11dc3c9
--- /dev/null
+++ b/include/linux/mfd/stm32-lptimer.h
@@ -0,0 +1,68 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * STM32 Low-Power Timer parent driver.
+ * Copyright (C) STMicroelectronics 2017
+ * Author: Fabrice Gasnier <fabrice.gasnier@st.com>
+ * Inspired by Benjamin Gaignard's stm32-timers driver
+ */
+
+#ifndef _LINUX_STM32_LPTIMER_H_
+#define _LINUX_STM32_LPTIMER_H_
+
+#include <linux/clk.h>
+#include <linux/regmap.h>
+
+#define STM32_LPTIM_ISR 0x00 /* Interrupt and Status Reg */
+#define STM32_LPTIM_ICR 0x04 /* Interrupt Clear Reg */
+#define STM32_LPTIM_IER 0x08 /* Interrupt Enable Reg */
+#define STM32_LPTIM_CFGR 0x0C /* Configuration Reg */
+#define STM32_LPTIM_CR 0x10 /* Control Reg */
+#define STM32_LPTIM_CMP 0x14 /* Compare Reg */
+#define STM32_LPTIM_ARR 0x18 /* Autoreload Reg */
+#define STM32_LPTIM_CNT 0x1C /* Counter Reg */
+
+/* STM32_LPTIM_ISR - bit fields */
+#define STM32_LPTIM_CMPOK_ARROK GENMASK(4, 3)
+#define STM32_LPTIM_ARROK BIT(4)
+#define STM32_LPTIM_CMPOK BIT(3)
+
+/* STM32_LPTIM_ICR - bit fields */
+#define STM32_LPTIM_ARRMCF BIT(1)
+#define STM32_LPTIM_CMPOKCF_ARROKCF GENMASK(4, 3)
+
+/* STM32_LPTIM_IER - bit flieds */
+#define STM32_LPTIM_ARRMIE BIT(1)
+
+/* STM32_LPTIM_CR - bit fields */
+#define STM32_LPTIM_CNTSTRT BIT(2)
+#define STM32_LPTIM_SNGSTRT BIT(1)
+#define STM32_LPTIM_ENABLE BIT(0)
+
+/* STM32_LPTIM_CFGR - bit fields */
+#define STM32_LPTIM_ENC BIT(24)
+#define STM32_LPTIM_COUNTMODE BIT(23)
+#define STM32_LPTIM_WAVPOL BIT(21)
+#define STM32_LPTIM_PRESC GENMASK(11, 9)
+#define STM32_LPTIM_CKPOL GENMASK(2, 1)
+
+/* STM32_LPTIM_CKPOL */
+#define STM32_LPTIM_CKPOL_RISING_EDGE 0
+#define STM32_LPTIM_CKPOL_FALLING_EDGE 1
+#define STM32_LPTIM_CKPOL_BOTH_EDGES 2
+
+/* STM32_LPTIM_ARR */
+#define STM32_LPTIM_MAX_ARR 0xFFFF
+
+/**
+ * struct stm32_lptimer - STM32 Low-Power Timer data assigned by parent device
+ * @clk: clock reference for this instance
+ * @regmap: register map reference for this instance
+ * @has_encoder: indicates this Low-Power Timer supports encoder mode
+ */
+struct stm32_lptimer {
+ struct clk *clk;
+ struct regmap *regmap;
+ bool has_encoder;
+};
+
+#endif
diff --git a/include/linux/mfd/stm32-timers.h b/include/linux/mfd/stm32-timers.h
index ce7346e7f77a..1b94325febb3 100644
--- a/include/linux/mfd/stm32-timers.h
+++ b/include/linux/mfd/stm32-timers.h
@@ -1,15 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) STMicroelectronics 2016
- *
* Author: Benjamin Gaignard <benjamin.gaignard@st.com>
- *
- * License terms: GNU General Public License (GPL), version 2
*/
#ifndef _LINUX_STM32_GPTIMER_H_
#define _LINUX_STM32_GPTIMER_H_
#include <linux/clk.h>
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
#include <linux/regmap.h>
#define TIM_CR1 0x00 /* Control Register 1 */
@@ -29,6 +29,9 @@
#define TIM_CCR3 0x3C /* Capt/Comp Register 3 */
#define TIM_CCR4 0x40 /* Capt/Comp Register 4 */
#define TIM_BDTR 0x44 /* Break and Dead-Time Reg */
+#define TIM_DCR 0x48 /* DMA control register */
+#define TIM_DMAR 0x4C /* DMA register for transfer */
+#define TIM_TISEL 0x68 /* Input Selection */
#define TIM_CR1_CEN BIT(0) /* Counter Enable */
#define TIM_CR1_DIR BIT(4) /* Counter Direction */
@@ -38,38 +41,104 @@
#define TIM_SMCR_SMS (BIT(0) | BIT(1) | BIT(2)) /* Slave mode selection */
#define TIM_SMCR_TS (BIT(4) | BIT(5) | BIT(6)) /* Trigger selection */
#define TIM_DIER_UIE BIT(0) /* Update interrupt */
+#define TIM_DIER_UDE BIT(8) /* Update DMA request Enable */
+#define TIM_DIER_CC1DE BIT(9) /* CC1 DMA request Enable */
+#define TIM_DIER_CC2DE BIT(10) /* CC2 DMA request Enable */
+#define TIM_DIER_CC3DE BIT(11) /* CC3 DMA request Enable */
+#define TIM_DIER_CC4DE BIT(12) /* CC4 DMA request Enable */
+#define TIM_DIER_COMDE BIT(13) /* COM DMA request Enable */
+#define TIM_DIER_TDE BIT(14) /* Trigger DMA request Enable */
#define TIM_SR_UIF BIT(0) /* Update interrupt flag */
#define TIM_EGR_UG BIT(0) /* Update Generation */
#define TIM_CCMR_PE BIT(3) /* Channel Preload Enable */
#define TIM_CCMR_M1 (BIT(6) | BIT(5)) /* Channel PWM Mode 1 */
+#define TIM_CCMR_CC1S (BIT(0) | BIT(1)) /* Capture/compare 1 sel */
+#define TIM_CCMR_IC1PSC GENMASK(3, 2) /* Input capture 1 prescaler */
+#define TIM_CCMR_CC2S (BIT(8) | BIT(9)) /* Capture/compare 2 sel */
+#define TIM_CCMR_IC2PSC GENMASK(11, 10) /* Input capture 2 prescaler */
+#define TIM_CCMR_CC1S_TI1 BIT(0) /* IC1/IC3 selects TI1/TI3 */
+#define TIM_CCMR_CC1S_TI2 BIT(1) /* IC1/IC3 selects TI2/TI4 */
+#define TIM_CCMR_CC2S_TI2 BIT(8) /* IC2/IC4 selects TI2/TI4 */
+#define TIM_CCMR_CC2S_TI1 BIT(9) /* IC2/IC4 selects TI1/TI3 */
#define TIM_CCER_CC1E BIT(0) /* Capt/Comp 1 out Ena */
#define TIM_CCER_CC1P BIT(1) /* Capt/Comp 1 Polarity */
#define TIM_CCER_CC1NE BIT(2) /* Capt/Comp 1N out Ena */
#define TIM_CCER_CC1NP BIT(3) /* Capt/Comp 1N Polarity */
#define TIM_CCER_CC2E BIT(4) /* Capt/Comp 2 out Ena */
+#define TIM_CCER_CC2P BIT(5) /* Capt/Comp 2 Polarity */
#define TIM_CCER_CC3E BIT(8) /* Capt/Comp 3 out Ena */
+#define TIM_CCER_CC3P BIT(9) /* Capt/Comp 3 Polarity */
#define TIM_CCER_CC4E BIT(12) /* Capt/Comp 4 out Ena */
+#define TIM_CCER_CC4P BIT(13) /* Capt/Comp 4 Polarity */
#define TIM_CCER_CCXE (BIT(0) | BIT(4) | BIT(8) | BIT(12))
-#define TIM_BDTR_BKE BIT(12) /* Break input enable */
-#define TIM_BDTR_BKP BIT(13) /* Break input polarity */
+#define TIM_BDTR_BKE(x) BIT(12 + (x) * 12) /* Break input enable */
+#define TIM_BDTR_BKP(x) BIT(13 + (x) * 12) /* Break input polarity */
#define TIM_BDTR_AOE BIT(14) /* Automatic Output Enable */
#define TIM_BDTR_MOE BIT(15) /* Main Output Enable */
-#define TIM_BDTR_BKF (BIT(16) | BIT(17) | BIT(18) | BIT(19))
-#define TIM_BDTR_BK2F (BIT(20) | BIT(21) | BIT(22) | BIT(23))
-#define TIM_BDTR_BK2E BIT(24) /* Break 2 input enable */
-#define TIM_BDTR_BK2P BIT(25) /* Break 2 input polarity */
+#define TIM_BDTR_BKF(x) (0xf << (16 + (x) * 4))
+#define TIM_DCR_DBA GENMASK(4, 0) /* DMA base addr */
+#define TIM_DCR_DBL GENMASK(12, 8) /* DMA burst len */
#define MAX_TIM_PSC 0xFFFF
+#define MAX_TIM_ICPSC 0x3
#define TIM_CR2_MMS_SHIFT 4
#define TIM_CR2_MMS2_SHIFT 20
+#define TIM_SMCR_SMS_SLAVE_MODE_DISABLED 0 /* counts on internal clock when CEN=1 */
+#define TIM_SMCR_SMS_ENCODER_MODE_1 1 /* counts TI1FP1 edges, depending on TI2FP2 level */
+#define TIM_SMCR_SMS_ENCODER_MODE_2 2 /* counts TI2FP2 edges, depending on TI1FP1 level */
+#define TIM_SMCR_SMS_ENCODER_MODE_3 3 /* counts on both TI1FP1 and TI2FP2 edges */
#define TIM_SMCR_TS_SHIFT 4
#define TIM_BDTR_BKF_MASK 0xF
-#define TIM_BDTR_BKF_SHIFT 16
-#define TIM_BDTR_BK2F_SHIFT 20
+#define TIM_BDTR_BKF_SHIFT(x) (16 + (x) * 4)
+
+enum stm32_timers_dmas {
+ STM32_TIMERS_DMA_CH1,
+ STM32_TIMERS_DMA_CH2,
+ STM32_TIMERS_DMA_CH3,
+ STM32_TIMERS_DMA_CH4,
+ STM32_TIMERS_DMA_UP,
+ STM32_TIMERS_DMA_TRIG,
+ STM32_TIMERS_DMA_COM,
+ STM32_TIMERS_MAX_DMAS,
+};
+
+/**
+ * struct stm32_timers_dma - STM32 timer DMA handling.
+ * @completion: end of DMA transfer completion
+ * @phys_base: control registers physical base address
+ * @lock: protect DMA access
+ * @chan: DMA channel in use
+ * @chans: DMA channels available for this timer instance
+ */
+struct stm32_timers_dma {
+ struct completion completion;
+ phys_addr_t phys_base;
+ struct mutex lock;
+ struct dma_chan *chan;
+ struct dma_chan *chans[STM32_TIMERS_MAX_DMAS];
+};
struct stm32_timers {
struct clk *clk;
struct regmap *regmap;
u32 max_arr;
+ struct stm32_timers_dma dma; /* Only to be used by the parent */
};
+
+#if IS_REACHABLE(CONFIG_MFD_STM32_TIMERS)
+int stm32_timers_dma_burst_read(struct device *dev, u32 *buf,
+ enum stm32_timers_dmas id, u32 reg,
+ unsigned int num_reg, unsigned int bursts,
+ unsigned long tmo_ms);
+#else
+static inline int stm32_timers_dma_burst_read(struct device *dev, u32 *buf,
+ enum stm32_timers_dmas id,
+ u32 reg,
+ unsigned int num_reg,
+ unsigned int bursts,
+ unsigned long tmo_ms)
+{
+ return -ENODEV;
+}
+#endif
#endif
diff --git a/include/linux/mfd/stmfx.h b/include/linux/mfd/stmfx.h
new file mode 100644
index 000000000000..967a2e486800
--- /dev/null
+++ b/include/linux/mfd/stmfx.h
@@ -0,0 +1,122 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2019 STMicroelectronics
+ * Author(s): Amelie Delaunay <amelie.delaunay@st.com>.
+ */
+
+#ifndef MFD_STMFX_H
+#define MFD_STMFX_H
+
+#include <linux/regmap.h>
+
+/* General */
+#define STMFX_REG_CHIP_ID 0x00 /* R */
+#define STMFX_REG_FW_VERSION_MSB 0x01 /* R */
+#define STMFX_REG_FW_VERSION_LSB 0x02 /* R */
+#define STMFX_REG_SYS_CTRL 0x40 /* RW */
+/* IRQ output management */
+#define STMFX_REG_IRQ_OUT_PIN 0x41 /* RW */
+#define STMFX_REG_IRQ_SRC_EN 0x42 /* RW */
+#define STMFX_REG_IRQ_PENDING 0x08 /* R */
+#define STMFX_REG_IRQ_ACK 0x44 /* RW */
+/* GPIO management */
+#define STMFX_REG_IRQ_GPI_PENDING1 0x0C /* R */
+#define STMFX_REG_IRQ_GPI_PENDING2 0x0D /* R */
+#define STMFX_REG_IRQ_GPI_PENDING3 0x0E /* R */
+#define STMFX_REG_GPIO_STATE1 0x10 /* R */
+#define STMFX_REG_GPIO_STATE2 0x11 /* R */
+#define STMFX_REG_GPIO_STATE3 0x12 /* R */
+#define STMFX_REG_IRQ_GPI_SRC1 0x48 /* RW */
+#define STMFX_REG_IRQ_GPI_SRC2 0x49 /* RW */
+#define STMFX_REG_IRQ_GPI_SRC3 0x4A /* RW */
+#define STMFX_REG_IRQ_GPI_EVT1 0x4C /* RW */
+#define STMFX_REG_IRQ_GPI_EVT2 0x4D /* RW */
+#define STMFX_REG_IRQ_GPI_EVT3 0x4E /* RW */
+#define STMFX_REG_IRQ_GPI_TYPE1 0x50 /* RW */
+#define STMFX_REG_IRQ_GPI_TYPE2 0x51 /* RW */
+#define STMFX_REG_IRQ_GPI_TYPE3 0x52 /* RW */
+#define STMFX_REG_IRQ_GPI_ACK1 0x54 /* RW */
+#define STMFX_REG_IRQ_GPI_ACK2 0x55 /* RW */
+#define STMFX_REG_IRQ_GPI_ACK3 0x56 /* RW */
+#define STMFX_REG_GPIO_DIR1 0x60 /* RW */
+#define STMFX_REG_GPIO_DIR2 0x61 /* RW */
+#define STMFX_REG_GPIO_DIR3 0x62 /* RW */
+#define STMFX_REG_GPIO_TYPE1 0x64 /* RW */
+#define STMFX_REG_GPIO_TYPE2 0x65 /* RW */
+#define STMFX_REG_GPIO_TYPE3 0x66 /* RW */
+#define STMFX_REG_GPIO_PUPD1 0x68 /* RW */
+#define STMFX_REG_GPIO_PUPD2 0x69 /* RW */
+#define STMFX_REG_GPIO_PUPD3 0x6A /* RW */
+#define STMFX_REG_GPO_SET1 0x6C /* RW */
+#define STMFX_REG_GPO_SET2 0x6D /* RW */
+#define STMFX_REG_GPO_SET3 0x6E /* RW */
+#define STMFX_REG_GPO_CLR1 0x70 /* RW */
+#define STMFX_REG_GPO_CLR2 0x71 /* RW */
+#define STMFX_REG_GPO_CLR3 0x72 /* RW */
+
+#define STMFX_REG_MAX 0xB0
+
+/* MFX boot time is around 10ms, so after reset, we have to wait this delay */
+#define STMFX_BOOT_TIME_MS 10
+
+/* STMFX_REG_CHIP_ID bitfields */
+#define STMFX_REG_CHIP_ID_MASK GENMASK(7, 0)
+
+/* STMFX_REG_SYS_CTRL bitfields */
+#define STMFX_REG_SYS_CTRL_GPIO_EN BIT(0)
+#define STMFX_REG_SYS_CTRL_TS_EN BIT(1)
+#define STMFX_REG_SYS_CTRL_IDD_EN BIT(2)
+#define STMFX_REG_SYS_CTRL_ALTGPIO_EN BIT(3)
+#define STMFX_REG_SYS_CTRL_SWRST BIT(7)
+
+/* STMFX_REG_IRQ_OUT_PIN bitfields */
+#define STMFX_REG_IRQ_OUT_PIN_TYPE BIT(0) /* 0-OD 1-PP */
+#define STMFX_REG_IRQ_OUT_PIN_POL BIT(1) /* 0-active LOW 1-active HIGH */
+
+/* STMFX_REG_IRQ_(SRC_EN/PENDING/ACK) bit shift */
+enum stmfx_irqs {
+ STMFX_REG_IRQ_SRC_EN_GPIO = 0,
+ STMFX_REG_IRQ_SRC_EN_IDD,
+ STMFX_REG_IRQ_SRC_EN_ERROR,
+ STMFX_REG_IRQ_SRC_EN_TS_DET,
+ STMFX_REG_IRQ_SRC_EN_TS_NE,
+ STMFX_REG_IRQ_SRC_EN_TS_TH,
+ STMFX_REG_IRQ_SRC_EN_TS_FULL,
+ STMFX_REG_IRQ_SRC_EN_TS_OVF,
+ STMFX_REG_IRQ_SRC_MAX,
+};
+
+enum stmfx_functions {
+ STMFX_FUNC_GPIO = BIT(0), /* GPIO[15:0] */
+ STMFX_FUNC_ALTGPIO_LOW = BIT(1), /* aGPIO[3:0] */
+ STMFX_FUNC_ALTGPIO_HIGH = BIT(2), /* aGPIO[7:4] */
+ STMFX_FUNC_TS = BIT(3),
+ STMFX_FUNC_IDD = BIT(4),
+};
+
+/**
+ * struct stmfx_ddata - STMFX MFD structure
+ * @device: device reference used for logs
+ * @map: register map
+ * @vdd: STMFX power supply
+ * @irq_domain: IRQ domain
+ * @lock: IRQ bus lock
+ * @irq_src: cache of IRQ_SRC_EN register for bus_lock
+ * @bkp_sysctrl: backup of SYS_CTRL register for suspend/resume
+ * @bkp_irqoutpin: backup of IRQ_OUT_PIN register for suspend/resume
+ */
+struct stmfx {
+ struct device *dev;
+ struct regmap *map;
+ struct regulator *vdd;
+ int irq;
+ struct irq_domain *irq_domain;
+ struct mutex lock; /* IRQ bus lock */
+ u8 irq_src;
+ u8 bkp_sysctrl;
+ u8 bkp_irqoutpin;
+};
+
+int stmfx_function_enable(struct stmfx *stmfx, u32 func);
+int stmfx_function_disable(struct stmfx *stmfx, u32 func);
+#endif
diff --git a/include/linux/mfd/stmpe.h b/include/linux/mfd/stmpe.h
index 4a827af17e59..87e29d561e22 100644
--- a/include/linux/mfd/stmpe.h
+++ b/include/linux/mfd/stmpe.h
@@ -1,7 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) ST-Ericsson SA 2010
*
- * License Terms: GNU General Public License, version 2
* Author: Rabin Vincent <rabin.vincent@stericsson.com> for ST-Ericsson
*/
@@ -10,6 +10,20 @@
#include <linux/mutex.h>
+#define STMPE_SAMPLE_TIME(x) ((x & 0xf) << 4)
+#define STMPE_MOD_12B(x) ((x & 0x1) << 3)
+#define STMPE_REF_SEL(x) ((x & 0x1) << 1)
+#define STMPE_ADC_FREQ(x) (x & 0x3)
+#define STMPE_AVE_CTRL(x) ((x & 0x3) << 6)
+#define STMPE_DET_DELAY(x) ((x & 0x7) << 3)
+#define STMPE_SETTLING(x) (x & 0x7)
+#define STMPE_FRACTION_Z(x) (x & 0x7)
+#define STMPE_I_DRIVE(x) (x & 0x1)
+#define STMPE_OP_MODE(x) ((x & 0x7) << 1)
+
+#define STMPE811_REG_ADC_CTRL1 0x20
+#define STMPE811_REG_ADC_CTRL2 0x21
+
struct device;
struct regulator;
@@ -123,6 +137,12 @@ struct stmpe {
u8 ier[2];
u8 oldier[2];
struct stmpe_platform_data *pdata;
+
+ /* For devices that use an ADC */
+ u8 sample_time;
+ u8 mod_12b;
+ u8 ref_sel;
+ u8 adc_freq;
};
extern int stmpe_reg_write(struct stmpe *stmpe, u8 reg, u8 data);
@@ -136,6 +156,7 @@ extern int stmpe_set_altfunc(struct stmpe *stmpe, u32 pins,
enum stmpe_block block);
extern int stmpe_enable(struct stmpe *stmpe, unsigned int blocks);
extern int stmpe_disable(struct stmpe *stmpe, unsigned int blocks);
+extern int stmpe811_adc_common_init(struct stmpe *stmpe);
#define STMPE_GPIO_NOREQ_811_TOUCH (0xf0)
diff --git a/include/linux/mfd/stpmic1.h b/include/linux/mfd/stpmic1.h
new file mode 100644
index 000000000000..fa3f99f7e9a1
--- /dev/null
+++ b/include/linux/mfd/stpmic1.h
@@ -0,0 +1,212 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) STMicroelectronics 2018 - All Rights Reserved
+ * Author: Philippe Peurichard <philippe.peurichard@st.com>,
+ * Pascal Paillet <p.paillet@st.com> for STMicroelectronics.
+ */
+
+#ifndef __LINUX_MFD_STPMIC1_H
+#define __LINUX_MFD_STPMIC1_H
+
+#define TURN_ON_SR 0x1
+#define TURN_OFF_SR 0x2
+#define ICC_LDO_TURN_OFF_SR 0x3
+#define ICC_BUCK_TURN_OFF_SR 0x4
+#define RREQ_STATE_SR 0x5
+#define VERSION_SR 0x6
+
+#define SWOFF_PWRCTRL_CR 0x10
+#define PADS_PULL_CR 0x11
+#define BUCKS_PD_CR 0x12
+#define LDO14_PD_CR 0x13
+#define LDO56_VREF_PD_CR 0x14
+#define VBUS_DET_VIN_CR 0x15
+#define PKEY_TURNOFF_CR 0x16
+#define BUCKS_MASK_RANK_CR 0x17
+#define BUCKS_MASK_RESET_CR 0x18
+#define LDOS_MASK_RANK_CR 0x19
+#define LDOS_MASK_RESET_CR 0x1A
+#define WCHDG_CR 0x1B
+#define WCHDG_TIMER_CR 0x1C
+#define BUCKS_ICCTO_CR 0x1D
+#define LDOS_ICCTO_CR 0x1E
+
+#define BUCK1_ACTIVE_CR 0x20
+#define BUCK2_ACTIVE_CR 0x21
+#define BUCK3_ACTIVE_CR 0x22
+#define BUCK4_ACTIVE_CR 0x23
+#define VREF_DDR_ACTIVE_CR 0x24
+#define LDO1_ACTIVE_CR 0x25
+#define LDO2_ACTIVE_CR 0x26
+#define LDO3_ACTIVE_CR 0x27
+#define LDO4_ACTIVE_CR 0x28
+#define LDO5_ACTIVE_CR 0x29
+#define LDO6_ACTIVE_CR 0x2A
+
+#define BUCK1_STDBY_CR 0x30
+#define BUCK2_STDBY_CR 0x31
+#define BUCK3_STDBY_CR 0x32
+#define BUCK4_STDBY_CR 0x33
+#define VREF_DDR_STDBY_CR 0x34
+#define LDO1_STDBY_CR 0x35
+#define LDO2_STDBY_CR 0x36
+#define LDO3_STDBY_CR 0x37
+#define LDO4_STDBY_CR 0x38
+#define LDO5_STDBY_CR 0x39
+#define LDO6_STDBY_CR 0x3A
+
+#define BST_SW_CR 0x40
+
+#define INT_PENDING_R1 0x50
+#define INT_PENDING_R2 0x51
+#define INT_PENDING_R3 0x52
+#define INT_PENDING_R4 0x53
+
+#define INT_DBG_LATCH_R1 0x60
+#define INT_DBG_LATCH_R2 0x61
+#define INT_DBG_LATCH_R3 0x62
+#define INT_DBG_LATCH_R4 0x63
+
+#define INT_CLEAR_R1 0x70
+#define INT_CLEAR_R2 0x71
+#define INT_CLEAR_R3 0x72
+#define INT_CLEAR_R4 0x73
+
+#define INT_MASK_R1 0x80
+#define INT_MASK_R2 0x81
+#define INT_MASK_R3 0x82
+#define INT_MASK_R4 0x83
+
+#define INT_SET_MASK_R1 0x90
+#define INT_SET_MASK_R2 0x91
+#define INT_SET_MASK_R3 0x92
+#define INT_SET_MASK_R4 0x93
+
+#define INT_CLEAR_MASK_R1 0xA0
+#define INT_CLEAR_MASK_R2 0xA1
+#define INT_CLEAR_MASK_R3 0xA2
+#define INT_CLEAR_MASK_R4 0xA3
+
+#define INT_SRC_R1 0xB0
+#define INT_SRC_R2 0xB1
+#define INT_SRC_R3 0xB2
+#define INT_SRC_R4 0xB3
+
+#define PMIC_MAX_REGISTER_ADDRESS INT_SRC_R4
+
+#define STPMIC1_PMIC_NUM_IRQ_REGS 4
+
+#define TURN_OFF_SR_ICC_EVENT 0x08
+
+#define LDO_VOLTAGE_MASK GENMASK(6, 2)
+#define BUCK_VOLTAGE_MASK GENMASK(7, 2)
+#define LDO_BUCK_VOLTAGE_SHIFT 2
+
+#define LDO_ENABLE_MASK BIT(0)
+#define BUCK_ENABLE_MASK BIT(0)
+
+#define BUCK_HPLP_ENABLE_MASK BIT(1)
+#define BUCK_HPLP_SHIFT 1
+
+#define STDBY_ENABLE_MASK BIT(0)
+
+#define BUCKS_PD_CR_REG_MASK GENMASK(7, 0)
+#define BUCK_MASK_RANK_REGISTER_MASK GENMASK(3, 0)
+#define BUCK_MASK_RESET_REGISTER_MASK GENMASK(3, 0)
+#define LDO1234_PULL_DOWN_REGISTER_MASK GENMASK(7, 0)
+#define LDO56_VREF_PD_CR_REG_MASK GENMASK(5, 0)
+#define LDO_MASK_RANK_REGISTER_MASK GENMASK(5, 0)
+#define LDO_MASK_RESET_REGISTER_MASK GENMASK(5, 0)
+
+#define BUCK1_PULL_DOWN_REG BUCKS_PD_CR
+#define BUCK1_PULL_DOWN_MASK BIT(0)
+#define BUCK2_PULL_DOWN_REG BUCKS_PD_CR
+#define BUCK2_PULL_DOWN_MASK BIT(2)
+#define BUCK3_PULL_DOWN_REG BUCKS_PD_CR
+#define BUCK3_PULL_DOWN_MASK BIT(4)
+#define BUCK4_PULL_DOWN_REG BUCKS_PD_CR
+#define BUCK4_PULL_DOWN_MASK BIT(6)
+
+#define LDO1_PULL_DOWN_REG LDO14_PD_CR
+#define LDO1_PULL_DOWN_MASK BIT(0)
+#define LDO2_PULL_DOWN_REG LDO14_PD_CR
+#define LDO2_PULL_DOWN_MASK BIT(2)
+#define LDO3_PULL_DOWN_REG LDO14_PD_CR
+#define LDO3_PULL_DOWN_MASK BIT(4)
+#define LDO4_PULL_DOWN_REG LDO14_PD_CR
+#define LDO4_PULL_DOWN_MASK BIT(6)
+#define LDO5_PULL_DOWN_REG LDO56_VREF_PD_CR
+#define LDO5_PULL_DOWN_MASK BIT(0)
+#define LDO6_PULL_DOWN_REG LDO56_VREF_PD_CR
+#define LDO6_PULL_DOWN_MASK BIT(2)
+#define VREF_DDR_PULL_DOWN_REG LDO56_VREF_PD_CR
+#define VREF_DDR_PULL_DOWN_MASK BIT(4)
+
+#define BUCKS_ICCTO_CR_REG_MASK GENMASK(6, 0)
+#define LDOS_ICCTO_CR_REG_MASK GENMASK(5, 0)
+
+#define LDO_BYPASS_MASK BIT(7)
+
+/* Main PMIC Control Register
+ * SWOFF_PWRCTRL_CR
+ * Address : 0x10
+ */
+#define ICC_EVENT_ENABLED BIT(4)
+#define PWRCTRL_POLARITY_HIGH BIT(3)
+#define PWRCTRL_PIN_VALID BIT(2)
+#define RESTART_REQUEST_ENABLED BIT(1)
+#define SOFTWARE_SWITCH_OFF_ENABLED BIT(0)
+
+/* Main PMIC PADS Control Register
+ * PADS_PULL_CR
+ * Address : 0x11
+ */
+#define WAKEUP_DETECTOR_DISABLED BIT(4)
+#define PWRCTRL_PD_ACTIVE BIT(3)
+#define PWRCTRL_PU_ACTIVE BIT(2)
+#define WAKEUP_PD_ACTIVE BIT(1)
+#define PONKEY_PU_INACTIVE BIT(0)
+
+/* Main PMIC VINLOW Control Register
+ * VBUS_DET_VIN_CRC DMSC
+ * Address : 0x15
+ */
+#define SWIN_DETECTOR_ENABLED BIT(7)
+#define SWOUT_DETECTOR_ENABLED BIT(6)
+#define VINLOW_ENABLED BIT(0)
+#define VINLOW_CTRL_REG_MASK GENMASK(7, 0)
+
+/* USB Control Register
+ * Address : 0x40
+ */
+#define BOOST_OVP_DISABLED BIT(7)
+#define VBUS_OTG_DETECTION_DISABLED BIT(6)
+#define SW_OUT_DISCHARGE BIT(5)
+#define VBUS_OTG_DISCHARGE BIT(4)
+#define OCP_LIMIT_HIGH BIT(3)
+#define SWIN_SWOUT_ENABLED BIT(2)
+#define USBSW_OTG_SWITCH_ENABLED BIT(1)
+#define BOOST_ENABLED BIT(0)
+
+/* PKEY_TURNOFF_CR
+ * Address : 0x16
+ */
+#define PONKEY_PWR_OFF BIT(7)
+#define PONKEY_CC_FLAG_CLEAR BIT(6)
+#define PONKEY_TURNOFF_TIMER_MASK GENMASK(3, 0)
+#define PONKEY_TURNOFF_MASK GENMASK(7, 0)
+
+/*
+ * struct stpmic1 - stpmic1 master device for sub-drivers
+ * @dev: master device of the chip (can be used to access platform data)
+ * @irq: main IRQ number
+ * @regmap_irq_chip_data: irq chip data
+ */
+struct stpmic1 {
+ struct device *dev;
+ struct regmap *regmap;
+ int irq;
+ struct regmap_irq_chip_data *irq_data;
+};
+
+#endif /* __LINUX_MFD_STPMIC1_H */
diff --git a/include/linux/mfd/stw481x.h b/include/linux/mfd/stw481x.h
index 833074b766bd..5312804666b3 100644
--- a/include/linux/mfd/stw481x.h
+++ b/include/linux/mfd/stw481x.h
@@ -1,10 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2011 ST-Ericsson SA
* Written on behalf of Linaro for ST-Ericsson
*
* Author: Linus Walleij <linus.walleij@linaro.org>
- *
- * License terms: GNU General Public License (GPL) version 2
*/
#ifndef MFD_STW481X_H
#define MFD_STW481X_H
diff --git a/include/linux/mfd/sun4i-gpadc.h b/include/linux/mfd/sun4i-gpadc.h
index 139872c2e0fe..ea0ccf33a459 100644
--- a/include/linux/mfd/sun4i-gpadc.h
+++ b/include/linux/mfd/sun4i-gpadc.h
@@ -1,10 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/* Header of ADC MFD core driver for sunxi platforms
*
* Copyright (c) 2016 Quentin Schulz <quentin.schulz@free-electrons.com>
- *
- * This program is free software; you can redistribute it and/or modify it under
- * the terms of the GNU General Public License version 2 as published by the
- * Free Software Foundation.
*/
#ifndef __SUN4I_GPADC__H__
diff --git a/include/linux/mfd/sy7636a.h b/include/linux/mfd/sy7636a.h
new file mode 100644
index 000000000000..22f03b2f851e
--- /dev/null
+++ b/include/linux/mfd/sy7636a.h
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Functions to access SY3686A power management chip.
+ *
+ * Copyright (C) 2021 reMarkable AS - http://www.remarkable.com/
+ */
+
+#ifndef __MFD_SY7636A_H
+#define __MFD_SY7636A_H
+
+#define SY7636A_REG_OPERATION_MODE_CRL 0x00
+/* It is set if a gpio is used to control the regulator */
+#define SY7636A_OPERATION_MODE_CRL_VCOMCTL BIT(6)
+#define SY7636A_OPERATION_MODE_CRL_ONOFF BIT(7)
+#define SY7636A_REG_VCOM_ADJUST_CTRL_L 0x01
+#define SY7636A_REG_VCOM_ADJUST_CTRL_H 0x02
+#define SY7636A_REG_VCOM_ADJUST_CTRL_MASK 0x01ff
+#define SY7636A_REG_VLDO_VOLTAGE_ADJULST_CTRL 0x03
+#define SY7636A_REG_POWER_ON_DELAY_TIME 0x06
+#define SY7636A_REG_FAULT_FLAG 0x07
+#define SY7636A_FAULT_FLAG_PG BIT(0)
+#define SY7636A_REG_TERMISTOR_READOUT 0x08
+
+#define SY7636A_REG_MAX 0x08
+
+#define VCOM_ADJUST_CTRL_MASK 0x1ff
+// Used to shift the high byte
+#define VCOM_ADJUST_CTRL_SHIFT 8
+// Used to scale from VCOM_ADJUST_CTRL to mv
+#define VCOM_ADJUST_CTRL_SCAL 10000
+
+#define FAULT_FLAG_SHIFT 1
+
+#endif /* __LINUX_MFD_SY7636A_H */
diff --git a/include/linux/mfd/syscon.h b/include/linux/mfd/syscon.h
index 40a76b97b7ab..fecc2fa2a364 100644
--- a/include/linux/mfd/syscon.h
+++ b/include/linux/mfd/syscon.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* System Control Driver
*
@@ -5,11 +6,6 @@
* Copyright (C) 2012 Linaro Ltd.
*
* Author: Dong Aisheng <dong.aisheng@linaro.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
#ifndef __LINUX_MFD_SYSCON_H__
@@ -21,24 +17,32 @@
struct device_node;
#ifdef CONFIG_MFD_SYSCON
+extern struct regmap *device_node_to_regmap(struct device_node *np);
extern struct regmap *syscon_node_to_regmap(struct device_node *np);
extern struct regmap *syscon_regmap_lookup_by_compatible(const char *s);
-extern struct regmap *syscon_regmap_lookup_by_pdevname(const char *s);
extern struct regmap *syscon_regmap_lookup_by_phandle(
struct device_node *np,
const char *property);
+extern struct regmap *syscon_regmap_lookup_by_phandle_args(
+ struct device_node *np,
+ const char *property,
+ int arg_count,
+ unsigned int *out_args);
+extern struct regmap *syscon_regmap_lookup_by_phandle_optional(
+ struct device_node *np,
+ const char *property);
#else
-static inline struct regmap *syscon_node_to_regmap(struct device_node *np)
+static inline struct regmap *device_node_to_regmap(struct device_node *np)
{
return ERR_PTR(-ENOTSUPP);
}
-static inline struct regmap *syscon_regmap_lookup_by_compatible(const char *s)
+static inline struct regmap *syscon_node_to_regmap(struct device_node *np)
{
return ERR_PTR(-ENOTSUPP);
}
-static inline struct regmap *syscon_regmap_lookup_by_pdevname(const char *s)
+static inline struct regmap *syscon_regmap_lookup_by_compatible(const char *s)
{
return ERR_PTR(-ENOTSUPP);
}
@@ -49,6 +53,23 @@ static inline struct regmap *syscon_regmap_lookup_by_phandle(
{
return ERR_PTR(-ENOTSUPP);
}
+
+static inline struct regmap *syscon_regmap_lookup_by_phandle_args(
+ struct device_node *np,
+ const char *property,
+ int arg_count,
+ unsigned int *out_args)
+{
+ return ERR_PTR(-ENOTSUPP);
+}
+
+static inline struct regmap *syscon_regmap_lookup_by_phandle_optional(
+ struct device_node *np,
+ const char *property)
+{
+ return NULL;
+}
+
#endif
#endif /* __LINUX_MFD_SYSCON_H__ */
diff --git a/include/linux/mfd/syscon/atmel-matrix.h b/include/linux/mfd/syscon/atmel-matrix.h
index 8293c3e2a82a..20c25665216a 100644
--- a/include/linux/mfd/syscon/atmel-matrix.h
+++ b/include/linux/mfd/syscon/atmel-matrix.h
@@ -1,12 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* Copyright (C) 2014 Atmel Corporation.
*
* Memory Controllers (MATRIX, EBI) - System peripherals registers.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
#ifndef _LINUX_MFD_SYSCON_ATMEL_MATRIX_H
@@ -110,7 +106,6 @@
#define AT91_MATRIX_DDR_IOSR BIT(18)
#define AT91_MATRIX_NFD0_SELECT BIT(24)
#define AT91_MATRIX_DDR_MP_EN BIT(25)
-#define AT91_MATRIX_EBI_NUM_CS 8
#define AT91_MATRIX_USBPUCR_PUON BIT(30)
diff --git a/include/linux/mfd/syscon/atmel-mc.h b/include/linux/mfd/syscon/atmel-mc.h
index afd9b8f1e363..99c56205c410 100644
--- a/include/linux/mfd/syscon/atmel-mc.h
+++ b/include/linux/mfd/syscon/atmel-mc.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* Copyright (C) 2005 Ivan Kokshaysky
* Copyright (C) SAN People
@@ -5,11 +6,6 @@
* Memory Controllers (MC, EBI, SMC, SDRAMC, BFC) - System peripherals
* registers.
* Based on AT91RM9200 datasheet revision E.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
#ifndef _LINUX_MFD_SYSCON_ATMEL_MC_H_
diff --git a/include/linux/mfd/syscon/atmel-smc.h b/include/linux/mfd/syscon/atmel-smc.h
index afa266169800..e9e24f4c4578 100644
--- a/include/linux/mfd/syscon/atmel-smc.h
+++ b/include/linux/mfd/syscon/atmel-smc.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Atmel SMC (Static Memory Controller) register offsets and bit definitions.
*
@@ -5,31 +6,32 @@
* Copyright (C) 2014 Free Electrons
*
* Author: Boris Brezillon <boris.brezillon@free-electrons.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef _LINUX_MFD_SYSCON_ATMEL_SMC_H_
#define _LINUX_MFD_SYSCON_ATMEL_SMC_H_
#include <linux/kernel.h>
+#include <linux/of.h>
#include <linux/regmap.h>
#define ATMEL_SMC_SETUP(cs) (((cs) * 0x10))
-#define ATMEL_HSMC_SETUP(cs) (0x600 + ((cs) * 0x14))
+#define ATMEL_HSMC_SETUP(layout, cs) \
+ ((layout)->timing_regs_offset + ((cs) * 0x14))
#define ATMEL_SMC_PULSE(cs) (((cs) * 0x10) + 0x4)
-#define ATMEL_HSMC_PULSE(cs) (0x600 + ((cs) * 0x14) + 0x4)
+#define ATMEL_HSMC_PULSE(layout, cs) \
+ ((layout)->timing_regs_offset + ((cs) * 0x14) + 0x4)
#define ATMEL_SMC_CYCLE(cs) (((cs) * 0x10) + 0x8)
-#define ATMEL_HSMC_CYCLE(cs) (0x600 + ((cs) * 0x14) + 0x8)
+#define ATMEL_HSMC_CYCLE(layout, cs) \
+ ((layout)->timing_regs_offset + ((cs) * 0x14) + 0x8)
#define ATMEL_SMC_NWE_SHIFT 0
#define ATMEL_SMC_NCS_WR_SHIFT 8
#define ATMEL_SMC_NRD_SHIFT 16
#define ATMEL_SMC_NCS_RD_SHIFT 24
#define ATMEL_SMC_MODE(cs) (((cs) * 0x10) + 0xc)
-#define ATMEL_HSMC_MODE(cs) (0x600 + ((cs) * 0x14) + 0x10)
+#define ATMEL_HSMC_MODE(layout, cs) \
+ ((layout)->timing_regs_offset + ((cs) * 0x14) + 0x10)
#define ATMEL_SMC_MODE_READMODE_MASK BIT(0)
#define ATMEL_SMC_MODE_READMODE_NCS (0 << 0)
#define ATMEL_SMC_MODE_READMODE_NRD (1 << 0)
@@ -59,7 +61,8 @@
#define ATMEL_SMC_MODE_PS_16 (2 << 28)
#define ATMEL_SMC_MODE_PS_32 (3 << 28)
-#define ATMEL_HSMC_TIMINGS(cs) (0x600 + ((cs) * 0x14) + 0xc)
+#define ATMEL_HSMC_TIMINGS(layout, cs) \
+ ((layout)->timing_regs_offset + ((cs) * 0x14) + 0xc)
#define ATMEL_HSMC_TIMINGS_OCMS BIT(12)
#define ATMEL_HSMC_TIMINGS_RBNSEL(x) ((x) << 28)
#define ATMEL_HSMC_TIMINGS_NFSEL BIT(31)
@@ -69,6 +72,10 @@
#define ATMEL_HSMC_TIMINGS_TRR_SHIFT 16
#define ATMEL_HSMC_TIMINGS_TWB_SHIFT 24
+struct atmel_hsmc_reg_layout {
+ unsigned int timing_regs_offset;
+};
+
/**
* struct atmel_smc_cs_conf - SMC CS config as described in the datasheet.
* @setup: NCS/NWE/NRD setup timings (not applicable to at91rm9200)
@@ -98,11 +105,15 @@ int atmel_smc_cs_conf_set_cycle(struct atmel_smc_cs_conf *conf,
unsigned int shift, unsigned int ncycles);
void atmel_smc_cs_conf_apply(struct regmap *regmap, int cs,
const struct atmel_smc_cs_conf *conf);
-void atmel_hsmc_cs_conf_apply(struct regmap *regmap, int cs,
- const struct atmel_smc_cs_conf *conf);
+void atmel_hsmc_cs_conf_apply(struct regmap *regmap,
+ const struct atmel_hsmc_reg_layout *reglayout,
+ int cs, const struct atmel_smc_cs_conf *conf);
void atmel_smc_cs_conf_get(struct regmap *regmap, int cs,
struct atmel_smc_cs_conf *conf);
-void atmel_hsmc_cs_conf_get(struct regmap *regmap, int cs,
- struct atmel_smc_cs_conf *conf);
+void atmel_hsmc_cs_conf_get(struct regmap *regmap,
+ const struct atmel_hsmc_reg_layout *reglayout,
+ int cs, struct atmel_smc_cs_conf *conf);
+const struct atmel_hsmc_reg_layout *
+atmel_hsmc_get_reg_layout(struct device_node *np);
#endif /* _LINUX_MFD_SYSCON_ATMEL_SMC_H_ */
diff --git a/include/linux/mfd/syscon/atmel-st.h b/include/linux/mfd/syscon/atmel-st.h
index 8acf1ec1fa32..5b6013d0c440 100644
--- a/include/linux/mfd/syscon/atmel-st.h
+++ b/include/linux/mfd/syscon/atmel-st.h
@@ -1,14 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* Copyright (C) 2005 Ivan Kokshaysky
* Copyright (C) SAN People
*
* System Timer (ST) - System peripherals registers.
* Based on AT91RM9200 datasheet revision E.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
#ifndef _LINUX_MFD_SYSCON_ATMEL_ST_H
diff --git a/include/linux/mfd/syscon/clps711x.h b/include/linux/mfd/syscon/clps711x.h
index 26355abae515..4c12850dec89 100644
--- a/include/linux/mfd/syscon/clps711x.h
+++ b/include/linux/mfd/syscon/clps711x.h
@@ -1,12 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* CLPS711X system register bits definitions
*
* Copyright (C) 2013 Alexander Shiyan <shc_work@mail.ru>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
#ifndef _LINUX_MFD_SYSCON_CLPS711X_H_
diff --git a/include/linux/mfd/syscon/exynos4-pmu.h b/include/linux/mfd/syscon/exynos4-pmu.h
deleted file mode 100644
index 278b1b1549e9..000000000000
--- a/include/linux/mfd/syscon/exynos4-pmu.h
+++ /dev/null
@@ -1,21 +0,0 @@
-/*
- * Copyright (C) 2015 Samsung Electronics Co., Ltd.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#ifndef _LINUX_MFD_SYSCON_PMU_EXYNOS4_H_
-#define _LINUX_MFD_SYSCON_PMU_EXYNOS4_H_
-
-/* Exynos4 PMU register definitions */
-
-/* MIPI_PHYn_CONTROL register offset: n = 0..1 */
-#define EXYNOS4_MIPI_PHY_CONTROL(n) (0x710 + (n) * 4)
-#define EXYNOS4_MIPI_PHY_ENABLE (1 << 0)
-#define EXYNOS4_MIPI_PHY_SRESETN (1 << 1)
-#define EXYNOS4_MIPI_PHY_MRESETN (1 << 2)
-#define EXYNOS4_MIPI_PHY_RESET_MASK (3 << 1)
-
-#endif /* _LINUX_MFD_SYSCON_PMU_EXYNOS4_H_ */
diff --git a/include/linux/mfd/syscon/exynos5-pmu.h b/include/linux/mfd/syscon/exynos5-pmu.h
deleted file mode 100644
index b4942a32b81d..000000000000
--- a/include/linux/mfd/syscon/exynos5-pmu.h
+++ /dev/null
@@ -1,19 +0,0 @@
-/*
- * Exynos5 SoC series Power Management Unit (PMU) register offsets
- * and bit definitions.
- *
- * Copyright (C) 2014 Samsung Electronics Co., Ltd.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#ifndef _LINUX_MFD_SYSCON_PMU_EXYNOS5_H_
-#define _LINUX_MFD_SYSCON_PMU_EXYNOS5_H_
-
-#define EXYNOS5_PHY_ENABLE BIT(0)
-#define EXYNOS5_MIPI_PHY_S_RESETN BIT(1)
-#define EXYNOS5_MIPI_PHY_M_RESETN BIT(2)
-
-#endif /* _LINUX_MFD_SYSCON_PMU_EXYNOS5_H_ */
diff --git a/include/linux/mfd/syscon/imx6q-iomuxc-gpr.h b/include/linux/mfd/syscon/imx6q-iomuxc-gpr.h
index c8e0164c5423..09c6b3184bb0 100644
--- a/include/linux/mfd/syscon/imx6q-iomuxc-gpr.h
+++ b/include/linux/mfd/syscon/imx6q-iomuxc-gpr.h
@@ -1,9 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2012 Freescale Semiconductor, Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef __LINUX_IMX6Q_IOMUXC_GPR_H
@@ -243,6 +240,8 @@
#define IMX6Q_GPR4_IPU_RD_CACHE_CTL BIT(0)
#define IMX6Q_GPR5_L2_CLK_STOP BIT(8)
+#define IMX6Q_GPR5_SATA_SW_PD BIT(10)
+#define IMX6Q_GPR5_SATA_SW_RST BIT(11)
#define IMX6Q_GPR6_IPU1_ID00_WR_QOS_MASK (0xf << 0)
#define IMX6Q_GPR6_IPU1_ID01_WR_QOS_MASK (0xf << 4)
@@ -408,6 +407,15 @@
#define IMX6SX_GPR1_FEC_CLOCK_PAD_DIR_MASK (0x3 << 17)
#define IMX6SX_GPR1_FEC_CLOCK_MUX_SEL_EXT (0x3 << 13)
+#define IMX6SX_GPR2_MQS_OVERSAMPLE_MASK (0x1 << 26)
+#define IMX6SX_GPR2_MQS_OVERSAMPLE_SHIFT (26)
+#define IMX6SX_GPR2_MQS_EN_MASK (0x1 << 25)
+#define IMX6SX_GPR2_MQS_EN_SHIFT (25)
+#define IMX6SX_GPR2_MQS_SW_RST_MASK (0x1 << 24)
+#define IMX6SX_GPR2_MQS_SW_RST_SHIFT (24)
+#define IMX6SX_GPR2_MQS_CLK_DIV_MASK (0xFF << 16)
+#define IMX6SX_GPR2_MQS_CLK_DIV_SHIFT (16)
+
#define IMX6SX_GPR4_FEC_ENET1_STOP_REQ (0x1 << 3)
#define IMX6SX_GPR4_FEC_ENET2_STOP_REQ (0x1 << 4)
@@ -438,12 +446,15 @@
#define IMX6SX_GPR5_DISP_MUX_DCIC1_MASK (0x1 << 1)
#define IMX6SX_GPR12_PCIE_TEST_POWERDOWN BIT(30)
+#define IMX6SX_GPR12_PCIE_PM_TURN_OFF BIT(16)
#define IMX6SX_GPR12_PCIE_RX_EQ_MASK (0x7 << 0)
#define IMX6SX_GPR12_PCIE_RX_EQ_2 (0x2 << 0)
/* For imx6ul iomux gpr register field define */
-#define IMX6UL_GPR1_ENET1_CLK_DIR (0x1 << 17)
-#define IMX6UL_GPR1_ENET2_CLK_DIR (0x1 << 18)
+#define IMX6UL_GPR1_ENET2_TX_CLK_DIR BIT(18)
+#define IMX6UL_GPR1_ENET1_TX_CLK_DIR BIT(17)
+#define IMX6UL_GPR1_ENET2_CLK_SEL BIT(14)
+#define IMX6UL_GPR1_ENET1_CLK_SEL BIT(13)
#define IMX6UL_GPR1_ENET1_CLK_OUTPUT (0x1 << 17)
#define IMX6UL_GPR1_ENET2_CLK_OUTPUT (0x1 << 18)
#define IMX6UL_GPR1_ENET_CLK_DIR (0x3 << 17)
@@ -455,4 +466,7 @@
#define MCLK_DIR(x) (x == 1 ? IMX6UL_GPR1_SAI1_MCLK_DIR : x == 2 ? \
IMX6UL_GPR1_SAI2_MCLK_DIR : IMX6UL_GPR1_SAI3_MCLK_DIR)
+/* For imx6sll iomux gpr register field define */
+#define IMX6SLL_GPR5_AFCG_X_BYPASS_MASK (0x1f << 11)
+
#endif /* __LINUX_IMX6Q_IOMUXC_GPR_H */
diff --git a/include/linux/mfd/syscon/imx7-iomuxc-gpr.h b/include/linux/mfd/syscon/imx7-iomuxc-gpr.h
index abbd52466573..3d46907bab89 100644
--- a/include/linux/mfd/syscon/imx7-iomuxc-gpr.h
+++ b/include/linux/mfd/syscon/imx7-iomuxc-gpr.h
@@ -1,9 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2015 Freescale Semiconductor, Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef __LINUX_IMX7_IOMUXC_GPR_H
diff --git a/include/linux/mfd/syscon/xlnx-vcu.h b/include/linux/mfd/syscon/xlnx-vcu.h
new file mode 100644
index 000000000000..ff7bc3656f6e
--- /dev/null
+++ b/include/linux/mfd/syscon/xlnx-vcu.h
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2020 Pengutronix, Michael Tretter <kernel@pengutronix.de>
+ */
+
+#ifndef __XLNX_VCU_H
+#define __XLNX_VCU_H
+
+#define VCU_ECODER_ENABLE 0x00
+#define VCU_DECODER_ENABLE 0x04
+#define VCU_MEMORY_DEPTH 0x08
+#define VCU_ENC_COLOR_DEPTH 0x0c
+#define VCU_ENC_VERTICAL_RANGE 0x10
+#define VCU_ENC_FRAME_SIZE_X 0x14
+#define VCU_ENC_FRAME_SIZE_Y 0x18
+#define VCU_ENC_COLOR_FORMAT 0x1c
+#define VCU_ENC_FPS 0x20
+#define VCU_MCU_CLK 0x24
+#define VCU_CORE_CLK 0x28
+#define VCU_PLL_BYPASS 0x2c
+#define VCU_ENC_CLK 0x30
+#define VCU_PLL_CLK 0x34
+#define VCU_ENC_VIDEO_STANDARD 0x38
+#define VCU_STATUS 0x3c
+#define VCU_AXI_ENC_CLK 0x40
+#define VCU_AXI_DEC_CLK 0x44
+#define VCU_AXI_MCU_CLK 0x48
+#define VCU_DEC_VIDEO_STANDARD 0x4c
+#define VCU_DEC_FRAME_SIZE_X 0x50
+#define VCU_DEC_FRAME_SIZE_Y 0x54
+#define VCU_DEC_FPS 0x58
+#define VCU_BUFFER_B_FRAME 0x5c
+#define VCU_WPP_EN 0x60
+#define VCU_PLL_CLK_DEC 0x64
+#define VCU_NUM_CORE 0x6c
+#define VCU_GASKET_INIT 0x74
+#define VCU_GASKET_VALUE 0x03
+
+#endif /* __XLNX_VCU_H */
diff --git a/include/linux/mfd/t7l66xb.h b/include/linux/mfd/t7l66xb.h
deleted file mode 100644
index b4629818aea5..000000000000
--- a/include/linux/mfd/t7l66xb.h
+++ /dev/null
@@ -1,34 +0,0 @@
-/*
- * This file contains the definitions for the T7L66XB
- *
- * (C) Copyright 2005 Ian Molton <spyro@f2s.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- */
-#ifndef MFD_T7L66XB_H
-#define MFD_T7L66XB_H
-
-#include <linux/mfd/core.h>
-#include <linux/mfd/tmio.h>
-
-struct t7l66xb_platform_data {
- int (*enable)(struct platform_device *dev);
- int (*disable)(struct platform_device *dev);
- int (*suspend)(struct platform_device *dev);
- int (*resume)(struct platform_device *dev);
-
- int irq_base; /* The base for subdevice irqs */
-
- struct tmio_nand_data *nand_data;
-};
-
-
-#define IRQ_T7L66XB_MMC (1)
-#define IRQ_T7L66XB_NAND (3)
-
-#define T7L66XB_NR_IRQS 8
-
-#endif
diff --git a/include/linux/mfd/tc3589x.h b/include/linux/mfd/tc3589x.h
index 468c31a27fcf..b84955410e03 100644
--- a/include/linux/mfd/tc3589x.h
+++ b/include/linux/mfd/tc3589x.h
@@ -1,7 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) ST-Ericsson SA 2010
- *
- * License Terms: GNU General Public License, version 2
*/
#ifndef __LINUX_MFD_TC3589x_H
@@ -20,6 +19,9 @@ enum tx3589x_block {
#define TC3589x_RSTCTRL_KBDRST (1 << 1)
#define TC3589x_RSTCTRL_GPIRST (1 << 0)
+#define TC3589x_DKBDMSK_ELINT (1 << 1)
+#define TC3589x_DKBDMSK_EINT (1 << 0)
+
/* Keyboard Configuration Registers */
#define TC3589x_KBDSETTLE_REG 0x01
#define TC3589x_KBDBOUNCE 0x02
@@ -102,6 +104,9 @@ enum tx3589x_block {
#define TC3589x_GPIOODM2 0xE4
#define TC3589x_GPIOODE2 0xE5
+#define TC3589x_DIRECT0 0xEC
+#define TC3589x_DKBDMSK 0xF3
+
#define TC3589x_INT_GPIIRQ 0
#define TC3589x_INT_TI0IRQ 1
#define TC3589x_INT_TI1IRQ 2
diff --git a/include/linux/mfd/tc6387xb.h b/include/linux/mfd/tc6387xb.h
deleted file mode 100644
index b4888209494a..000000000000
--- a/include/linux/mfd/tc6387xb.h
+++ /dev/null
@@ -1,20 +0,0 @@
-/*
- * This file contains the definitions for the TC6387XB
- *
- * (C) Copyright 2005 Ian Molton <spyro@f2s.com>
- *
- * May be copied or modified under the terms of the GNU General Public
- * License. See linux/COPYING for more information.
- *
- */
-#ifndef MFD_TC6387XB_H
-#define MFD_TC6387XB_H
-
-struct tc6387xb_platform_data {
- int (*enable)(struct platform_device *dev);
- int (*disable)(struct platform_device *dev);
- int (*suspend)(struct platform_device *dev);
- int (*resume)(struct platform_device *dev);
-};
-
-#endif
diff --git a/include/linux/mfd/tc6393xb.h b/include/linux/mfd/tc6393xb.h
deleted file mode 100644
index 626e448205c5..000000000000
--- a/include/linux/mfd/tc6393xb.h
+++ /dev/null
@@ -1,59 +0,0 @@
-/*
- * Toshiba TC6393XB SoC support
- *
- * Copyright(c) 2005-2006 Chris Humbert
- * Copyright(c) 2005 Dirk Opfer
- * Copyright(c) 2005 Ian Molton <spyro@f2s.com>
- * Copyright(c) 2007 Dmitry Baryshkov
- *
- * Based on code written by Sharp/Lineo for 2.4 kernels
- * Based on locomo.c
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#ifndef MFD_TC6393XB_H
-#define MFD_TC6393XB_H
-
-#include <linux/fb.h>
-
-/* Also one should provide the CK3P6MI clock */
-struct tc6393xb_platform_data {
- u16 scr_pll2cr; /* PLL2 Control */
- u16 scr_gper; /* GP Enable */
-
- int (*enable)(struct platform_device *dev);
- int (*disable)(struct platform_device *dev);
- int (*suspend)(struct platform_device *dev);
- int (*resume)(struct platform_device *dev);
-
- int irq_base; /* base for subdevice irqs */
- int gpio_base;
- int (*setup)(struct platform_device *dev);
- void (*teardown)(struct platform_device *dev);
-
- struct tmio_nand_data *nand_data;
- struct tmio_fb_data *fb_data;
-
- unsigned resume_restore : 1; /* make special actions
- to preserve the state
- on suspend/resume */
-};
-
-extern int tc6393xb_lcd_mode(struct platform_device *fb,
- const struct fb_videomode *mode);
-extern int tc6393xb_lcd_set_power(struct platform_device *fb, bool on);
-
-/*
- * Relative to irq_base
- */
-#define IRQ_TC6393_NAND 0
-#define IRQ_TC6393_MMC 1
-#define IRQ_TC6393_OHCI 2
-#define IRQ_TC6393_FB 4
-
-#define TC6393XB_NR_IRQS 8
-
-#endif
diff --git a/include/linux/mfd/ti-lmu-register.h b/include/linux/mfd/ti-lmu-register.h
index 2125c7c02818..116a749e0302 100644
--- a/include/linux/mfd/ti-lmu-register.h
+++ b/include/linux/mfd/ti-lmu-register.h
@@ -1,13 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* TI LMU (Lighting Management Unit) Device Register Map
*
* Copyright 2017 Texas Instruments
*
* Author: Milo Kim <milo.kim@ti.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef __MFD_TI_LMU_REGISTER_H__
@@ -15,50 +12,6 @@
#include <linux/bitops.h>
-/* LM3532 */
-#define LM3532_REG_OUTPUT_CFG 0x10
-#define LM3532_ILED1_CFG_MASK 0x03
-#define LM3532_ILED2_CFG_MASK 0x0C
-#define LM3532_ILED3_CFG_MASK 0x30
-#define LM3532_ILED1_CFG_SHIFT 0
-#define LM3532_ILED2_CFG_SHIFT 2
-#define LM3532_ILED3_CFG_SHIFT 4
-
-#define LM3532_REG_RAMPUP 0x12
-#define LM3532_REG_RAMPDN LM3532_REG_RAMPUP
-#define LM3532_RAMPUP_MASK 0x07
-#define LM3532_RAMPUP_SHIFT 0
-#define LM3532_RAMPDN_MASK 0x38
-#define LM3532_RAMPDN_SHIFT 3
-
-#define LM3532_REG_ENABLE 0x1D
-
-#define LM3532_REG_PWM_A_CFG 0x13
-#define LM3532_PWM_A_MASK 0x05 /* zone 0 */
-#define LM3532_PWM_ZONE_0 BIT(2)
-
-#define LM3532_REG_PWM_B_CFG 0x14
-#define LM3532_PWM_B_MASK 0x09 /* zone 1 */
-#define LM3532_PWM_ZONE_1 BIT(3)
-
-#define LM3532_REG_PWM_C_CFG 0x15
-#define LM3532_PWM_C_MASK 0x11 /* zone 2 */
-#define LM3532_PWM_ZONE_2 BIT(4)
-
-#define LM3532_REG_ZONE_CFG_A 0x16
-#define LM3532_REG_ZONE_CFG_B 0x18
-#define LM3532_REG_ZONE_CFG_C 0x1A
-#define LM3532_ZONE_MASK (BIT(2) | BIT(3) | BIT(4))
-#define LM3532_ZONE_0 0
-#define LM3532_ZONE_1 BIT(2)
-#define LM3532_ZONE_2 BIT(3)
-
-#define LM3532_REG_BRT_A 0x70 /* zone 0 */
-#define LM3532_REG_BRT_B 0x76 /* zone 1 */
-#define LM3532_REG_BRT_C 0x7C /* zone 2 */
-
-#define LM3532_MAX_REG 0x7E
-
/* LM3631 */
#define LM3631_REG_DEVCTRL 0x00
#define LM3631_LCD_EN_MASK BIT(1)
@@ -234,47 +187,26 @@
#define LM3695_MAX_REG 0x14
-/* LM3697 */
-#define LM3697_REG_HVLED_OUTPUT_CFG 0x10
-#define LM3697_HVLED1_CFG_MASK BIT(0)
-#define LM3697_HVLED2_CFG_MASK BIT(1)
-#define LM3697_HVLED3_CFG_MASK BIT(2)
-#define LM3697_HVLED1_CFG_SHIFT 0
-#define LM3697_HVLED2_CFG_SHIFT 1
-#define LM3697_HVLED3_CFG_SHIFT 2
-
-#define LM3697_REG_BL0_RAMP 0x11
-#define LM3697_REG_BL1_RAMP 0x12
-#define LM3697_RAMPUP_MASK 0xF0
-#define LM3697_RAMPUP_SHIFT 4
-#define LM3697_RAMPDN_MASK 0x0F
-#define LM3697_RAMPDN_SHIFT 0
-
-#define LM3697_REG_RAMP_CONF 0x14
-#define LM3697_RAMP_MASK 0x0F
-#define LM3697_RAMP_EACH 0x05
-
-#define LM3697_REG_PWM_CFG 0x1C
-#define LM3697_PWM_A_MASK BIT(0)
-#define LM3697_PWM_B_MASK BIT(1)
-
-#define LM3697_REG_IMAX_A 0x17
-#define LM3697_REG_IMAX_B 0x18
-
-#define LM3697_REG_FEEDBACK_ENABLE 0x19
-
-#define LM3697_REG_BRT_A_LSB 0x20
-#define LM3697_REG_BRT_A_MSB 0x21
-#define LM3697_REG_BRT_B_LSB 0x22
-#define LM3697_REG_BRT_B_MSB 0x23
-
-#define LM3697_REG_ENABLE 0x24
+/* LM36274 */
+#define LM36274_REG_REV 0x01
+#define LM36274_REG_BL_CFG_1 0x02
+#define LM36274_REG_BL_CFG_2 0x03
+#define LM36274_REG_BRT_LSB 0x04
+#define LM36274_REG_BRT_MSB 0x05
+#define LM36274_REG_BL_EN 0x08
-#define LM3697_REG_OPEN_FAULT_STATUS 0xB0
+#define LM36274_REG_BIAS_CONFIG_1 0x09
+#define LM36274_EXT_EN_MASK BIT(0)
+#define LM36274_EN_VNEG_MASK BIT(1)
+#define LM36274_EN_VPOS_MASK BIT(2)
-#define LM3697_REG_SHORT_FAULT_STATUS 0xB2
+#define LM36274_REG_BIAS_CONFIG_2 0x0a
+#define LM36274_REG_BIAS_CONFIG_3 0x0b
+#define LM36274_REG_VOUT_BOOST 0x0c
+#define LM36274_REG_VOUT_POS 0x0d
+#define LM36274_REG_VOUT_NEG 0x0e
+#define LM36274_VOUT_MASK 0x3F
-#define LM3697_REG_MONITOR_ENABLE 0xB4
+#define LM36274_MAX_REG 0x13
-#define LM3697_MAX_REG 0xB4
#endif
diff --git a/include/linux/mfd/ti-lmu.h b/include/linux/mfd/ti-lmu.h
index 09d5f30384e5..0bc0e8199798 100644
--- a/include/linux/mfd/ti-lmu.h
+++ b/include/linux/mfd/ti-lmu.h
@@ -1,13 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* TI LMU (Lighting Management Unit) Devices
*
* Copyright 2017 Texas Instruments
*
* Author: Milo Kim <milo.kim@ti.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef __MFD_TI_LMU_H__
@@ -16,17 +13,17 @@
#include <linux/gpio.h>
#include <linux/notifier.h>
#include <linux/regmap.h>
+#include <linux/gpio/consumer.h>
/* Notifier event */
#define LMU_EVENT_MONITOR_DONE 0x01
enum ti_lmu_id {
- LM3532,
LM3631,
LM3632,
LM3633,
LM3695,
- LM3697,
+ LM36274,
LMU_MAX_ID,
};
@@ -68,6 +65,9 @@ enum lm363x_regulator_id {
LM3632_BOOST, /* Boost output */
LM3632_LDO_POS, /* Positive display bias output */
LM3632_LDO_NEG, /* Negative display bias output */
+ LM36274_BOOST, /* Boost output */
+ LM36274_LDO_POS, /* Positive display bias output */
+ LM36274_LDO_NEG, /* Negative display bias output */
};
/**
@@ -81,7 +81,7 @@ enum lm363x_regulator_id {
struct ti_lmu {
struct device *dev;
struct regmap *regmap;
- int en_gpio;
+ struct gpio_desc *en_gpio;
struct blocking_notifier_head notifier;
};
#endif
diff --git a/include/linux/mfd/ti_am335x_tscadc.h b/include/linux/mfd/ti_am335x_tscadc.h
index b9a53e013bff..4063b0614d90 100644
--- a/include/linux/mfd/ti_am335x_tscadc.h
+++ b/include/linux/mfd/ti_am335x_tscadc.h
@@ -1,22 +1,16 @@
-#ifndef __LINUX_TI_AM335X_TSCADC_MFD_H
-#define __LINUX_TI_AM335X_TSCADC_MFD_H
-
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* TI Touch Screen / ADC MFD driver
*
- * Copyright (C) 2012 Texas Instruments Incorporated - http://www.ti.com/
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
+ * Copyright (C) 2012 Texas Instruments Incorporated - https://www.ti.com/
*/
+#ifndef __LINUX_TI_AM335X_TSCADC_MFD_H
+#define __LINUX_TI_AM335X_TSCADC_MFD_H
+
+#include <linux/bitfield.h>
#include <linux/mfd/core.h>
+#include <linux/units.h>
#define REG_RAWIRQSTATUS 0x024
#define REG_IRQSTATUS 0x028
@@ -46,13 +40,6 @@
/* IRQ wakeup enable */
#define IRQWKUP_ENB BIT(0)
-/* Step Enable */
-#define STEPENB_MASK (0x1FFFF << 0)
-#define STEPENB(val) ((val) << 0)
-#define ENB(val) (1 << (val))
-#define STPENB_STEPENB STEPENB(0x1FFFF)
-#define STPENB_STEPENB_TC STEPENB(0x1FFF)
-
/* IRQ enable */
#define IRQENB_HW_PEN BIT(0)
#define IRQENB_EOS BIT(1)
@@ -65,12 +52,10 @@
#define IRQENB_PENUP BIT(9)
/* Step Configuration */
-#define STEPCONFIG_MODE_MASK (3 << 0)
-#define STEPCONFIG_MODE(val) ((val) << 0)
+#define STEPCONFIG_MODE(val) FIELD_PREP(GENMASK(1, 0), (val))
#define STEPCONFIG_MODE_SWCNT STEPCONFIG_MODE(1)
#define STEPCONFIG_MODE_HWSYNC STEPCONFIG_MODE(2)
-#define STEPCONFIG_AVG_MASK (7 << 2)
-#define STEPCONFIG_AVG(val) ((val) << 2)
+#define STEPCONFIG_AVG(val) FIELD_PREP(GENMASK(4, 2), (val))
#define STEPCONFIG_AVG_16 STEPCONFIG_AVG(4)
#define STEPCONFIG_XPP BIT(5)
#define STEPCONFIG_XNN BIT(6)
@@ -78,66 +63,67 @@
#define STEPCONFIG_YNN BIT(8)
#define STEPCONFIG_XNP BIT(9)
#define STEPCONFIG_YPN BIT(10)
-#define STEPCONFIG_INM_MASK (0xF << 15)
-#define STEPCONFIG_INM(val) ((val) << 15)
+#define STEPCONFIG_RFP(val) FIELD_PREP(GENMASK(13, 12), (val))
+#define STEPCONFIG_RFP_VREFP STEPCONFIG_RFP(3)
+#define STEPCONFIG_INM(val) FIELD_PREP(GENMASK(18, 15), (val))
#define STEPCONFIG_INM_ADCREFM STEPCONFIG_INM(8)
-#define STEPCONFIG_INP_MASK (0xF << 19)
-#define STEPCONFIG_INP(val) ((val) << 19)
+#define STEPCONFIG_INP(val) FIELD_PREP(GENMASK(22, 19), (val))
#define STEPCONFIG_INP_AN4 STEPCONFIG_INP(4)
#define STEPCONFIG_INP_ADCREFM STEPCONFIG_INP(8)
#define STEPCONFIG_FIFO1 BIT(26)
+#define STEPCONFIG_RFM(val) FIELD_PREP(GENMASK(24, 23), (val))
+#define STEPCONFIG_RFM_VREFN STEPCONFIG_RFM(3)
/* Delay register */
-#define STEPDELAY_OPEN_MASK (0x3FFFF << 0)
-#define STEPDELAY_OPEN(val) ((val) << 0)
+#define STEPDELAY_OPEN(val) FIELD_PREP(GENMASK(17, 0), (val))
#define STEPCONFIG_OPENDLY STEPDELAY_OPEN(0x098)
-#define STEPDELAY_SAMPLE_MASK (0xFF << 24)
-#define STEPDELAY_SAMPLE(val) ((val) << 24)
+#define STEPCONFIG_MAX_OPENDLY GENMASK(17, 0)
+#define STEPDELAY_SAMPLE(val) FIELD_PREP(GENMASK(31, 24), (val))
#define STEPCONFIG_SAMPLEDLY STEPDELAY_SAMPLE(0)
+#define STEPCONFIG_MAX_SAMPLE GENMASK(7, 0)
/* Charge Config */
-#define STEPCHARGE_RFP_MASK (7 << 12)
-#define STEPCHARGE_RFP(val) ((val) << 12)
+#define STEPCHARGE_RFP(val) FIELD_PREP(GENMASK(14, 12), (val))
#define STEPCHARGE_RFP_XPUL STEPCHARGE_RFP(1)
-#define STEPCHARGE_INM_MASK (0xF << 15)
-#define STEPCHARGE_INM(val) ((val) << 15)
+#define STEPCHARGE_INM(val) FIELD_PREP(GENMASK(18, 15), (val))
#define STEPCHARGE_INM_AN1 STEPCHARGE_INM(1)
-#define STEPCHARGE_INP_MASK (0xF << 19)
-#define STEPCHARGE_INP(val) ((val) << 19)
-#define STEPCHARGE_RFM_MASK (3 << 23)
-#define STEPCHARGE_RFM(val) ((val) << 23)
+#define STEPCHARGE_INP(val) FIELD_PREP(GENMASK(22, 19), (val))
+#define STEPCHARGE_RFM(val) FIELD_PREP(GENMASK(24, 23), (val))
#define STEPCHARGE_RFM_XNUR STEPCHARGE_RFM(1)
/* Charge delay */
-#define CHARGEDLY_OPEN_MASK (0x3FFFF << 0)
-#define CHARGEDLY_OPEN(val) ((val) << 0)
+#define CHARGEDLY_OPEN(val) FIELD_PREP(GENMASK(17, 0), (val))
#define CHARGEDLY_OPENDLY CHARGEDLY_OPEN(0x400)
/* Control register */
-#define CNTRLREG_TSCSSENB BIT(0)
+#define CNTRLREG_SSENB BIT(0)
#define CNTRLREG_STEPID BIT(1)
-#define CNTRLREG_STEPCONFIGWRT BIT(2)
+#define CNTRLREG_TSC_STEPCONFIGWRT BIT(2)
#define CNTRLREG_POWERDOWN BIT(4)
-#define CNTRLREG_AFE_CTRL_MASK (3 << 5)
-#define CNTRLREG_AFE_CTRL(val) ((val) << 5)
-#define CNTRLREG_4WIRE CNTRLREG_AFE_CTRL(1)
-#define CNTRLREG_5WIRE CNTRLREG_AFE_CTRL(2)
-#define CNTRLREG_8WIRE CNTRLREG_AFE_CTRL(3)
-#define CNTRLREG_TSCENB BIT(7)
+#define CNTRLREG_TSC_AFE_CTRL(val) FIELD_PREP(GENMASK(6, 5), (val))
+#define CNTRLREG_TSC_4WIRE CNTRLREG_TSC_AFE_CTRL(1)
+#define CNTRLREG_TSC_5WIRE CNTRLREG_TSC_AFE_CTRL(2)
+#define CNTRLREG_TSC_ENB BIT(7)
+
+/*Control registers bitfields for MAGADC IP */
+#define CNTRLREG_MAGADCENB BIT(0)
+#define CNTRLREG_MAG_PREAMP_PWRDOWN BIT(5)
+#define CNTRLREG_MAG_PREAMP_BYPASS BIT(6)
/* FIFO READ Register */
-#define FIFOREAD_DATA_MASK (0xfff << 0)
-#define FIFOREAD_CHNLID_MASK (0xf << 16)
+#define FIFOREAD_DATA_MASK GENMASK(11, 0)
+#define FIFOREAD_CHNLID_MASK GENMASK(19, 16)
/* DMA ENABLE/CLEAR Register */
#define DMA_FIFO0 BIT(0)
#define DMA_FIFO1 BIT(1)
/* Sequencer Status */
-#define SEQ_STATUS BIT(5)
+#define SEQ_STATUS BIT(5)
#define CHARGE_STEP 0x11
-#define ADC_CLK 3000000
+#define TSC_ADC_CLK (3 * HZ_PER_MHZ)
+#define MAG_ADC_CLK (13 * HZ_PER_MHZ)
#define TOTAL_STEPS 16
#define TOTAL_CHANNELS 8
#define FIFO1_THRESHOLD 19
@@ -154,21 +140,27 @@
*
* max processing time: 266431 * 308ns = 83ms(approx)
*/
-#define IDLE_TIMEOUT 83 /* milliseconds */
+#define IDLE_TIMEOUT_MS 83 /* milliseconds */
#define TSCADC_CELLS 2
+struct ti_tscadc_data {
+ char *adc_feature_name;
+ char *adc_feature_compatible;
+ char *secondary_feature_name;
+ char *secondary_feature_compatible;
+ unsigned int target_clk_rate;
+};
+
struct ti_tscadc_dev {
struct device *dev;
struct regmap *regmap;
void __iomem *tscadc_base;
phys_addr_t tscadc_phys_base;
+ const struct ti_tscadc_data *data;
int irq;
- int used_cells; /* 1-2 */
- int tsc_wires;
- int tsc_cell; /* -1 if not used */
- int adc_cell; /* -1 if not used */
struct mfd_cell cells[TSCADC_CELLS];
+ u32 ctrl;
u32 reg_se_cache;
bool adc_waiting;
bool adc_in_use;
@@ -190,6 +182,12 @@ static inline struct ti_tscadc_dev *ti_tscadc_dev_get(struct platform_device *p)
return *tscadc_dev;
}
+static inline bool ti_adc_with_touchscreen(struct ti_tscadc_dev *tscadc)
+{
+ return of_device_is_compatible(tscadc->dev->of_node,
+ "ti,am3359-tscadc");
+}
+
void am335x_tsc_se_set_cache(struct ti_tscadc_dev *tsadc, u32 val);
void am335x_tsc_se_set_once(struct ti_tscadc_dev *tsadc, u32 val);
void am335x_tsc_se_clr(struct ti_tscadc_dev *tsadc, u32 val);
diff --git a/include/linux/mfd/tmio.h b/include/linux/mfd/tmio.h
index 357b6cfdf34a..eace8ea6cda0 100644
--- a/include/linux/mfd/tmio.h
+++ b/include/linux/mfd/tmio.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef MFD_TMIO_H
#define MFD_TMIO_H
@@ -24,26 +25,6 @@
writew((val) >> 16, (addr) + 2); \
} while (0)
-#define CNF_CMD 0x04
-#define CNF_CTL_BASE 0x10
-#define CNF_INT_PIN 0x3d
-#define CNF_STOP_CLK_CTL 0x40
-#define CNF_GCLK_CTL 0x41
-#define CNF_SD_CLK_MODE 0x42
-#define CNF_PIN_STATUS 0x44
-#define CNF_PWR_CTL_1 0x48
-#define CNF_PWR_CTL_2 0x49
-#define CNF_PWR_CTL_3 0x4a
-#define CNF_CARD_DETECT_MODE 0x4c
-#define CNF_SD_SLOT 0x50
-#define CNF_EXT_GCLK_CTL_1 0xf0
-#define CNF_EXT_GCLK_CTL_2 0xf1
-#define CNF_EXT_GCLK_CTL_3 0xf9
-#define CNF_SD_LED_EN_1 0xfa
-#define CNF_SD_LED_EN_2 0xfe
-
-#define SDCREN 0x2 /* Enable access to MMC CTL regs. (flag in COMMAND_REG)*/
-
#define sd_config_write8(base, shift, reg, val) \
tmio_iowrite8((val), (base) + ((reg) << (shift)))
#define sd_config_write16(base, shift, reg, val) \
@@ -55,7 +36,6 @@
} while (0)
/* tmio MMC platform flags */
-#define TMIO_MMC_WRPROTECT_DISABLE BIT(0)
/*
* Some controllers can support a 2-byte block size when the bus width
* is configured in 4-bit mode.
@@ -74,19 +54,13 @@
* idle before writing to some registers.
*/
#define TMIO_MMC_HAS_IDLE_WAIT BIT(4)
-/*
- * A GPIO is used for card hotplug detection. We need an extra flag for this,
- * because 0 is a valid GPIO number too, and requiring users to specify
- * cd_gpio < 0 to disable GPIO hotplug would break backwards compatibility.
- */
-#define TMIO_MMC_USE_GPIO_CD BIT(5)
/*
- * Some controllers doesn't have over 0x100 register.
- * it is used to checking accessibility of
- * CTL_SD_CARD_CLK_CTL / CTL_CLK_AND_WAIT_CTL
+ * Use the busy timeout feature. Probably all TMIO versions support it. Yet,
+ * we don't have documentation for old variants, so we enable only known good
+ * variants with this flag. Can be removed once all variants are known good.
*/
-#define TMIO_MMC_HAVE_HIGH_REG BIT(6)
+#define TMIO_MMC_USE_BUSY_TIMEOUT BIT(5)
/*
* Some controllers have CMD12 automatically
@@ -107,10 +81,8 @@
*/
#define TMIO_MMC_CLK_ACTUAL BIT(10)
-int tmio_core_mmc_enable(void __iomem *cnf, int shift, unsigned long base);
-int tmio_core_mmc_resume(void __iomem *cnf, int shift, unsigned long base);
-void tmio_core_mmc_pwr(void __iomem *cnf, int shift, int state);
-void tmio_core_mmc_clk_div(void __iomem *cnf, int shift, int state);
+/* Some controllers have a CBSY bit */
+#define TMIO_MMC_HAVE_CBSY BIT(11)
struct dma_chan;
@@ -125,9 +97,9 @@ struct tmio_mmc_data {
unsigned long capabilities2;
unsigned long flags;
u32 ocr_mask; /* available voltages */
- unsigned int cd_gpio;
- int alignment_shift;
dma_addr_t dma_rx_offset;
+ unsigned int max_blk_count;
+ unsigned short max_segs;
void (*set_pwr)(struct platform_device *host, int state);
void (*set_clk_div)(struct platform_device *host, int state);
};
diff --git a/include/linux/mfd/tps6105x.h b/include/linux/mfd/tps6105x.h
index 8bc51180800a..b1313411ef09 100644
--- a/include/linux/mfd/tps6105x.h
+++ b/include/linux/mfd/tps6105x.h
@@ -1,10 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2011 ST-Ericsson SA
* Written on behalf of Linaro for ST-Ericsson
*
* Author: Linus Walleij <linus.walleij@linaro.org>
- *
- * License terms: GNU General Public License (GPL) version 2
*/
#ifndef MFD_TPS6105X_H
#define MFD_TPS6105X_H
diff --git a/include/linux/i2c/tps65010.h b/include/linux/mfd/tps65010.h
index 08aa92278d71..a1fb9bc5311d 100644
--- a/include/linux/i2c/tps65010.h
+++ b/include/linux/mfd/tps65010.h
@@ -1,4 +1,4 @@
-/* linux/i2c/tps65010.h
+/* linux/mfd/tps65010.h
*
* Functions to access TPS65010 power management device.
*
diff --git a/include/linux/mfd/tps65086.h b/include/linux/mfd/tps65086.h
index a228ae4c88d9..16f87cccc003 100644
--- a/include/linux/mfd/tps65086.h
+++ b/include/linux/mfd/tps65086.h
@@ -1,16 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * Copyright (C) 2015 Texas Instruments Incorporated - http://www.ti.com/
+ * Copyright (C) 2015 Texas Instruments Incorporated - https://www.ti.com/
* Andrew F. Davis <afd@ti.com>
*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether expressed or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License version 2 for more details.
- *
* Based on the TPS65912 driver
*/
diff --git a/include/linux/mfd/tps65090.h b/include/linux/mfd/tps65090.h
index 67d144b3b8f9..44ebcc4d8f01 100644
--- a/include/linux/mfd/tps65090.h
+++ b/include/linux/mfd/tps65090.h
@@ -1,22 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Core driver interface for TI TPS65090 PMIC family
*
* Copyright (C) 2012 NVIDIA Corporation
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
- *
*/
#ifndef __LINUX_MFD_TPS65090_H
@@ -83,6 +69,8 @@ enum {
#define TPS65090_MAX_REG TPS65090_REG_AD_OUT2
#define TPS65090_NUM_REGS (TPS65090_MAX_REG + 1)
+struct gpio_desc;
+
struct tps65090 {
struct device *dev;
struct regmap *rmap;
@@ -95,8 +83,8 @@ struct tps65090 {
* @reg_init_data: The regulator init data.
* @enable_ext_control: Enable extrenal control or not. Only available for
* DCDC1, DCDC2 and DCDC3.
- * @gpio: Gpio number if external control is enabled and controlled through
- * gpio.
+ * @gpiod: Gpio descriptor if external control is enabled and controlled through
+ * gpio
* @overcurrent_wait_valid: True if the overcurrent_wait should be applied.
* @overcurrent_wait: Value to set as the overcurrent wait time. This is the
* actual bitfield value, not a time in ms (valid value are 0 - 3).
@@ -104,7 +92,7 @@ struct tps65090 {
struct tps65090_regulator_plat_data {
struct regulator_init_data *reg_init_data;
bool enable_ext_control;
- int gpio;
+ struct gpio_desc *gpiod;
bool overcurrent_wait_valid;
int overcurrent_wait;
};
diff --git a/include/linux/mfd/tps65217.h b/include/linux/mfd/tps65217.h
index eac285756b37..877d9c41c53d 100644
--- a/include/linux/mfd/tps65217.h
+++ b/include/linux/mfd/tps65217.h
@@ -1,18 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* linux/mfd/tps65217.h
*
* Functions to access TPS65217 power management chip.
*
- * Copyright (C) 2011 Texas Instruments Incorporated - http://www.ti.com/
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
+ * Copyright (C) 2011 Texas Instruments Incorporated - https://www.ti.com/
*/
#ifndef __LINUX_MFD_TPS65217_H
@@ -263,7 +255,6 @@ struct tps65217_board {
struct tps65217 {
struct device *dev;
struct tps65217_board *pdata;
- unsigned long id;
struct regulator_desc desc[TPS65217_NUM_REGULATOR];
struct regmap *regmap;
u8 *strobes;
@@ -278,11 +269,6 @@ static inline struct tps65217 *dev_to_tps65217(struct device *dev)
return dev_get_drvdata(dev);
}
-static inline unsigned long tps65217_chip_id(struct tps65217 *tps65217)
-{
- return tps65217->id;
-}
-
int tps65217_reg_read(struct tps65217 *tps, unsigned int reg,
unsigned int *val);
int tps65217_reg_write(struct tps65217 *tps, unsigned int reg,
diff --git a/include/linux/mfd/tps65218.h b/include/linux/mfd/tps65218.h
index bccd2d68b1e3..2946be2f15f3 100644
--- a/include/linux/mfd/tps65218.h
+++ b/include/linux/mfd/tps65218.h
@@ -1,18 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* linux/mfd/tps65218.h
*
- * Functions to access TPS65219 power management chip.
+ * Functions to access TPS65218 power management chip.
*
- * Copyright (C) 2014 Texas Instruments Incorporated - http://www.ti.com/
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether expressed or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License version 2 for more details.
+ * Copyright (C) 2014 Texas Instruments Incorporated - https://www.ti.com/
*/
#ifndef __LINUX_MFD_TPS65218_H
@@ -137,6 +129,10 @@
#define TPS65218_CONFIG1_PGDLY_MASK 0x18
#define TPS65218_CONFIG1_STRICT BIT(2)
#define TPS65218_CONFIG1_UVLO_MASK 0x3
+#define TPS65218_CONFIG1_UVLO_2750000 0x0
+#define TPS65218_CONFIG1_UVLO_2950000 0x1
+#define TPS65218_CONFIG1_UVLO_3250000 0x2
+#define TPS65218_CONFIG1_UVLO_3350000 0x3
#define TPS65218_CONFIG2_DC12_RST BIT(7)
#define TPS65218_CONFIG2_UVLOHYS BIT(6)
@@ -205,10 +201,11 @@ enum tps65218_regulator_id {
TPS65218_DCDC_4,
TPS65218_DCDC_5,
TPS65218_DCDC_6,
- /* LS's */
- TPS65218_LS_3,
/* LDOs */
TPS65218_LDO_1,
+ /* LS's */
+ TPS65218_LS_2,
+ TPS65218_LS_3,
};
#define TPS65218_MAX_REG_ID TPS65218_LDO_1
@@ -218,7 +215,7 @@ enum tps65218_regulator_id {
/* Number of LDO voltage regulators available */
#define TPS65218_NUM_LDO 1
/* Number of total LS current regulators available */
-#define TPS65218_NUM_LS 1
+#define TPS65218_NUM_LS 2
/* Number of total regulators available */
#define TPS65218_NUM_REGULATOR (TPS65218_NUM_DCDC + TPS65218_NUM_LDO \
+ TPS65218_NUM_LS)
@@ -246,24 +243,6 @@ enum tps65218_irqs {
};
/**
- * struct tps_info - packages regulator constraints
- * @id: Id of the regulator
- * @name: Voltage regulator name
- * @min_uV: minimum micro volts
- * @max_uV: minimum micro volts
- * @strobe: sequencing strobe value for the regulator
- *
- * This data is used to check the regualtor voltage limits while setting.
- */
-struct tps_info {
- int id;
- const char *name;
- int min_uV;
- int max_uV;
- int strobe;
-};
-
-/**
* struct tps65218 - tps65218 sub-driver chip access routines
*
* Device data may be used to access the TPS65218 chip
@@ -280,7 +259,6 @@ struct tps65218 {
u32 irq_mask;
struct regmap_irq_chip_data *irq_data;
struct regulator_desc desc[TPS65218_NUM_REGULATOR];
- struct tps_info *info[TPS65218_NUM_REGULATOR];
struct regmap *regmap;
u8 *strobes;
};
diff --git a/include/linux/mfd/tps65219.h b/include/linux/mfd/tps65219.h
new file mode 100644
index 000000000000..e6826e34e2a6
--- /dev/null
+++ b/include/linux/mfd/tps65219.h
@@ -0,0 +1,345 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Functions to access TPS65219 Power Management IC.
+ *
+ * Copyright (C) 2022 BayLibre Incorporated - https://www.baylibre.com/
+ */
+
+#ifndef MFD_TPS65219_H
+#define MFD_TPS65219_H
+
+#include <linux/bitops.h>
+#include <linux/notifier.h>
+#include <linux/regulator/driver.h>
+
+struct regmap;
+struct regmap_irq_chip_data;
+
+#define TPS65219_1V35 1350000
+#define TPS65219_1V8 1800000
+
+/* TPS chip id list */
+#define TPS65219 0xF0
+
+/* I2C ID for TPS65219 part */
+#define TPS65219_I2C_ID 0x24
+
+/* All register addresses */
+#define TPS65219_REG_TI_DEV_ID 0x00
+#define TPS65219_REG_NVM_ID 0x01
+#define TPS65219_REG_ENABLE_CTRL 0x02
+#define TPS65219_REG_BUCKS_CONFIG 0x03
+#define TPS65219_REG_LDO4_VOUT 0x04
+#define TPS65219_REG_LDO3_VOUT 0x05
+#define TPS65219_REG_LDO2_VOUT 0x06
+#define TPS65219_REG_LDO1_VOUT 0x07
+#define TPS65219_REG_BUCK3_VOUT 0x8
+#define TPS65219_REG_BUCK2_VOUT 0x9
+#define TPS65219_REG_BUCK1_VOUT 0xA
+#define TPS65219_REG_LDO4_SEQUENCE_SLOT 0xB
+#define TPS65219_REG_LDO3_SEQUENCE_SLOT 0xC
+#define TPS65219_REG_LDO2_SEQUENCE_SLOT 0xD
+#define TPS65219_REG_LDO1_SEQUENCE_SLOT 0xE
+#define TPS65219_REG_BUCK3_SEQUENCE_SLOT 0xF
+#define TPS65219_REG_BUCK2_SEQUENCE_SLOT 0x10
+#define TPS65219_REG_BUCK1_SEQUENCE_SLOT 0x11
+#define TPS65219_REG_nRST_SEQUENCE_SLOT 0x12
+#define TPS65219_REG_GPIO_SEQUENCE_SLOT 0x13
+#define TPS65219_REG_GPO2_SEQUENCE_SLOT 0x14
+#define TPS65219_REG_GPO1_SEQUENCE_SLOT 0x15
+#define TPS65219_REG_POWER_UP_SLOT_DURATION_1 0x16
+#define TPS65219_REG_POWER_UP_SLOT_DURATION_2 0x17
+#define TPS65219_REG_POWER_UP_SLOT_DURATION_3 0x18
+#define TPS65219_REG_POWER_UP_SLOT_DURATION_4 0x19
+#define TPS65219_REG_POWER_DOWN_SLOT_DURATION_1 0x1A
+#define TPS65219_REG_POWER_DOWN_SLOT_DURATION_2 0x1B
+#define TPS65219_REG_POWER_DOWN_SLOT_DURATION_3 0x1C
+#define TPS65219_REG_POWER_DOWN_SLOT_DURATION_4 0x1D
+#define TPS65219_REG_GENERAL_CONFIG 0x1E
+#define TPS65219_REG_MFP_1_CONFIG 0x1F
+#define TPS65219_REG_MFP_2_CONFIG 0x20
+#define TPS65219_REG_STBY_1_CONFIG 0x21
+#define TPS65219_REG_STBY_2_CONFIG 0x22
+#define TPS65219_REG_OC_DEGL_CONFIG 0x23
+/* 'sub irq' MASK registers */
+#define TPS65219_REG_INT_MASK_UV 0x24
+#define TPS65219_REG_MASK_CONFIG 0x25
+
+#define TPS65219_REG_I2C_ADDRESS_REG 0x26
+#define TPS65219_REG_USER_GENERAL_NVM_STORAGE 0x27
+#define TPS65219_REG_MANUFACTURING_VER 0x28
+#define TPS65219_REG_MFP_CTRL 0x29
+#define TPS65219_REG_DISCHARGE_CONFIG 0x2A
+/* main irq registers */
+#define TPS65219_REG_INT_SOURCE 0x2B
+/* 'sub irq' registers */
+#define TPS65219_REG_INT_LDO_3_4 0x2C
+#define TPS65219_REG_INT_LDO_1_2 0x2D
+#define TPS65219_REG_INT_BUCK_3 0x2E
+#define TPS65219_REG_INT_BUCK_1_2 0x2F
+#define TPS65219_REG_INT_SYSTEM 0x30
+#define TPS65219_REG_INT_RV 0x31
+#define TPS65219_REG_INT_TIMEOUT_RV_SD 0x32
+#define TPS65219_REG_INT_PB 0x33
+
+#define TPS65219_REG_INT_LDO_3_4_POS 0
+#define TPS65219_REG_INT_LDO_1_2_POS 1
+#define TPS65219_REG_INT_BUCK_3_POS 2
+#define TPS65219_REG_INT_BUCK_1_2_POS 3
+#define TPS65219_REG_INT_SYS_POS 4
+#define TPS65219_REG_INT_RV_POS 5
+#define TPS65219_REG_INT_TO_RV_POS 6
+#define TPS65219_REG_INT_PB_POS 7
+
+#define TPS65219_REG_USER_NVM_CMD 0x34
+#define TPS65219_REG_POWER_UP_STATUS 0x35
+#define TPS65219_REG_SPARE_2 0x36
+#define TPS65219_REG_SPARE_3 0x37
+#define TPS65219_REG_FACTORY_CONFIG_2 0x41
+
+/* Register field definitions */
+#define TPS65219_DEVID_REV_MASK GENMASK(7, 0)
+#define TPS65219_BUCKS_LDOS_VOUT_VSET_MASK GENMASK(5, 0)
+#define TPS65219_BUCKS_UV_THR_SEL_MASK BIT(6)
+#define TPS65219_BUCKS_BW_SEL_MASK BIT(7)
+#define LDO_BYP_SHIFT 6
+#define TPS65219_LDOS_BYP_CONFIG_MASK BIT(LDO_BYP_SHIFT)
+#define TPS65219_LDOS_LSW_CONFIG_MASK BIT(7)
+/* Regulators enable control */
+#define TPS65219_ENABLE_BUCK1_EN_MASK BIT(0)
+#define TPS65219_ENABLE_BUCK2_EN_MASK BIT(1)
+#define TPS65219_ENABLE_BUCK3_EN_MASK BIT(2)
+#define TPS65219_ENABLE_LDO1_EN_MASK BIT(3)
+#define TPS65219_ENABLE_LDO2_EN_MASK BIT(4)
+#define TPS65219_ENABLE_LDO3_EN_MASK BIT(5)
+#define TPS65219_ENABLE_LDO4_EN_MASK BIT(6)
+/* power ON-OFF sequence slot */
+#define TPS65219_BUCKS_LDOS_SEQUENCE_OFF_SLOT_MASK GENMASK(3, 0)
+#define TPS65219_BUCKS_LDOS_SEQUENCE_ON_SLOT_MASK GENMASK(7, 4)
+/* TODO: Not needed, same mapping as TPS65219_ENABLE_REGNAME_EN, factorize */
+#define TPS65219_STBY1_BUCK1_STBY_EN_MASK BIT(0)
+#define TPS65219_STBY1_BUCK2_STBY_EN_MASK BIT(1)
+#define TPS65219_STBY1_BUCK3_STBY_EN_MASK BIT(2)
+#define TPS65219_STBY1_LDO1_STBY_EN_MASK BIT(3)
+#define TPS65219_STBY1_LDO2_STBY_EN_MASK BIT(4)
+#define TPS65219_STBY1_LDO3_STBY_EN_MASK BIT(5)
+#define TPS65219_STBY1_LDO4_STBY_EN_MASK BIT(6)
+/* STBY_2 config */
+#define TPS65219_STBY2_GPO1_STBY_EN_MASK BIT(0)
+#define TPS65219_STBY2_GPO2_STBY_EN_MASK BIT(1)
+#define TPS65219_STBY2_GPIO_STBY_EN_MASK BIT(2)
+/* MFP Control */
+#define TPS65219_MFP_I2C_OFF_REQ_MASK BIT(0)
+#define TPS65219_MFP_STBY_I2C_CTRL_MASK BIT(1)
+#define TPS65219_MFP_COLD_RESET_I2C_CTRL_MASK BIT(2)
+#define TPS65219_MFP_WARM_RESET_I2C_CTRL_MASK BIT(3)
+#define TPS65219_MFP_GPIO_STATUS_MASK BIT(4)
+/* MFP_1 Config */
+#define TPS65219_MFP_1_VSEL_DDR_SEL_MASK BIT(0)
+#define TPS65219_MFP_1_VSEL_SD_POL_MASK BIT(1)
+#define TPS65219_MFP_1_VSEL_RAIL_MASK BIT(2)
+/* MFP_2 Config */
+#define TPS65219_MFP_2_MODE_STBY_MASK GENMASK(1, 0)
+#define TPS65219_MFP_2_MODE_RESET_MASK BIT(2)
+#define TPS65219_MFP_2_EN_PB_VSENSE_DEGL_MASK BIT(3)
+#define TPS65219_MFP_2_EN_PB_VSENSE_MASK GENMASK(5, 4)
+#define TPS65219_MFP_2_WARM_COLD_RESET_MASK BIT(6)
+#define TPS65219_MFP_2_PU_ON_FSD_MASK BIT(7)
+#define TPS65219_MFP_2_EN 0
+#define TPS65219_MFP_2_PB BIT(4)
+#define TPS65219_MFP_2_VSENSE BIT(5)
+/* MASK_UV Config */
+#define TPS65219_REG_MASK_UV_LDO1_UV_MASK BIT(0)
+#define TPS65219_REG_MASK_UV_LDO2_UV_MASK BIT(1)
+#define TPS65219_REG_MASK_UV_LDO3_UV_MASK BIT(2)
+#define TPS65219_REG_MASK_UV_LDO4_UV_MASK BIT(3)
+#define TPS65219_REG_MASK_UV_BUCK1_UV_MASK BIT(4)
+#define TPS65219_REG_MASK_UV_BUCK2_UV_MASK BIT(5)
+#define TPS65219_REG_MASK_UV_BUCK3_UV_MASK BIT(6)
+#define TPS65219_REG_MASK_UV_RETRY_MASK BIT(7)
+/* MASK Config */
+// SENSOR_N_WARM_MASK already defined in Thermal
+#define TPS65219_REG_MASK_INT_FOR_RV_MASK BIT(4)
+#define TPS65219_REG_MASK_EFFECT_MASK GENMASK(2, 1)
+#define TPS65219_REG_MASK_INT_FOR_PB_MASK BIT(7)
+/* UnderVoltage - Short to GND - OverCurrent*/
+/* LDO3-4 */
+#define TPS65219_INT_LDO3_SCG_MASK BIT(0)
+#define TPS65219_INT_LDO3_OC_MASK BIT(1)
+#define TPS65219_INT_LDO3_UV_MASK BIT(2)
+#define TPS65219_INT_LDO4_SCG_MASK BIT(3)
+#define TPS65219_INT_LDO4_OC_MASK BIT(4)
+#define TPS65219_INT_LDO4_UV_MASK BIT(5)
+/* LDO1-2 */
+#define TPS65219_INT_LDO1_SCG_MASK BIT(0)
+#define TPS65219_INT_LDO1_OC_MASK BIT(1)
+#define TPS65219_INT_LDO1_UV_MASK BIT(2)
+#define TPS65219_INT_LDO2_SCG_MASK BIT(3)
+#define TPS65219_INT_LDO2_OC_MASK BIT(4)
+#define TPS65219_INT_LDO2_UV_MASK BIT(5)
+/* BUCK3 */
+#define TPS65219_INT_BUCK3_SCG_MASK BIT(0)
+#define TPS65219_INT_BUCK3_OC_MASK BIT(1)
+#define TPS65219_INT_BUCK3_NEG_OC_MASK BIT(2)
+#define TPS65219_INT_BUCK3_UV_MASK BIT(3)
+/* BUCK1-2 */
+#define TPS65219_INT_BUCK1_SCG_MASK BIT(0)
+#define TPS65219_INT_BUCK1_OC_MASK BIT(1)
+#define TPS65219_INT_BUCK1_NEG_OC_MASK BIT(2)
+#define TPS65219_INT_BUCK1_UV_MASK BIT(3)
+#define TPS65219_INT_BUCK2_SCG_MASK BIT(4)
+#define TPS65219_INT_BUCK2_OC_MASK BIT(5)
+#define TPS65219_INT_BUCK2_NEG_OC_MASK BIT(6)
+#define TPS65219_INT_BUCK2_UV_MASK BIT(7)
+/* Thermal Sensor */
+#define TPS65219_INT_SENSOR_3_WARM_MASK BIT(0)
+#define TPS65219_INT_SENSOR_2_WARM_MASK BIT(1)
+#define TPS65219_INT_SENSOR_1_WARM_MASK BIT(2)
+#define TPS65219_INT_SENSOR_0_WARM_MASK BIT(3)
+#define TPS65219_INT_SENSOR_3_HOT_MASK BIT(4)
+#define TPS65219_INT_SENSOR_2_HOT_MASK BIT(5)
+#define TPS65219_INT_SENSOR_1_HOT_MASK BIT(6)
+#define TPS65219_INT_SENSOR_0_HOT_MASK BIT(7)
+/* Residual Voltage */
+#define TPS65219_INT_BUCK1_RV_MASK BIT(0)
+#define TPS65219_INT_BUCK2_RV_MASK BIT(1)
+#define TPS65219_INT_BUCK3_RV_MASK BIT(2)
+#define TPS65219_INT_LDO1_RV_MASK BIT(3)
+#define TPS65219_INT_LDO2_RV_MASK BIT(4)
+#define TPS65219_INT_LDO3_RV_MASK BIT(5)
+#define TPS65219_INT_LDO4_RV_MASK BIT(6)
+/* Residual Voltage ShutDown */
+#define TPS65219_INT_BUCK1_RV_SD_MASK BIT(0)
+#define TPS65219_INT_BUCK2_RV_SD_MASK BIT(1)
+#define TPS65219_INT_BUCK3_RV_SD_MASK BIT(2)
+#define TPS65219_INT_LDO1_RV_SD_MASK BIT(3)
+#define TPS65219_INT_LDO2_RV_SD_MASK BIT(4)
+#define TPS65219_INT_LDO3_RV_SD_MASK BIT(5)
+#define TPS65219_INT_LDO4_RV_SD_MASK BIT(6)
+#define TPS65219_INT_TIMEOUT_MASK BIT(7)
+/* Power Button */
+#define TPS65219_INT_PB_FALLING_EDGE_DETECT_MASK BIT(0)
+#define TPS65219_INT_PB_RISING_EDGE_DETECT_MASK BIT(1)
+#define TPS65219_INT_PB_REAL_TIME_STATUS_MASK BIT(2)
+
+#define TPS65219_PB_POS 7
+#define TPS65219_TO_RV_POS 6
+#define TPS65219_RV_POS 5
+#define TPS65219_SYS_POS 4
+#define TPS65219_BUCK_1_2_POS 3
+#define TPS65219_BUCK_3_POS 2
+#define TPS65219_LDO_1_2_POS 1
+#define TPS65219_LDO_3_4_POS 0
+
+/* IRQs */
+enum {
+ /* LDO3-4 register IRQs */
+ TPS65219_INT_LDO3_SCG,
+ TPS65219_INT_LDO3_OC,
+ TPS65219_INT_LDO3_UV,
+ TPS65219_INT_LDO4_SCG,
+ TPS65219_INT_LDO4_OC,
+ TPS65219_INT_LDO4_UV,
+ /* LDO1-2 */
+ TPS65219_INT_LDO1_SCG,
+ TPS65219_INT_LDO1_OC,
+ TPS65219_INT_LDO1_UV,
+ TPS65219_INT_LDO2_SCG,
+ TPS65219_INT_LDO2_OC,
+ TPS65219_INT_LDO2_UV,
+ /* BUCK3 */
+ TPS65219_INT_BUCK3_SCG,
+ TPS65219_INT_BUCK3_OC,
+ TPS65219_INT_BUCK3_NEG_OC,
+ TPS65219_INT_BUCK3_UV,
+ /* BUCK1-2 */
+ TPS65219_INT_BUCK1_SCG,
+ TPS65219_INT_BUCK1_OC,
+ TPS65219_INT_BUCK1_NEG_OC,
+ TPS65219_INT_BUCK1_UV,
+ TPS65219_INT_BUCK2_SCG,
+ TPS65219_INT_BUCK2_OC,
+ TPS65219_INT_BUCK2_NEG_OC,
+ TPS65219_INT_BUCK2_UV,
+ /* Thermal Sensor */
+ TPS65219_INT_SENSOR_3_WARM,
+ TPS65219_INT_SENSOR_2_WARM,
+ TPS65219_INT_SENSOR_1_WARM,
+ TPS65219_INT_SENSOR_0_WARM,
+ TPS65219_INT_SENSOR_3_HOT,
+ TPS65219_INT_SENSOR_2_HOT,
+ TPS65219_INT_SENSOR_1_HOT,
+ TPS65219_INT_SENSOR_0_HOT,
+ /* Residual Voltage */
+ TPS65219_INT_BUCK1_RV,
+ TPS65219_INT_BUCK2_RV,
+ TPS65219_INT_BUCK3_RV,
+ TPS65219_INT_LDO1_RV,
+ TPS65219_INT_LDO2_RV,
+ TPS65219_INT_LDO3_RV,
+ TPS65219_INT_LDO4_RV,
+ /* Residual Voltage ShutDown */
+ TPS65219_INT_BUCK1_RV_SD,
+ TPS65219_INT_BUCK2_RV_SD,
+ TPS65219_INT_BUCK3_RV_SD,
+ TPS65219_INT_LDO1_RV_SD,
+ TPS65219_INT_LDO2_RV_SD,
+ TPS65219_INT_LDO3_RV_SD,
+ TPS65219_INT_LDO4_RV_SD,
+ TPS65219_INT_TIMEOUT,
+ /* Power Button */
+ TPS65219_INT_PB_FALLING_EDGE_DETECT,
+ TPS65219_INT_PB_RISING_EDGE_DETECT,
+};
+
+enum tps65219_regulator_id {
+ /* DCDC's */
+ TPS65219_BUCK_1,
+ TPS65219_BUCK_2,
+ TPS65219_BUCK_3,
+ /* LDOs */
+ TPS65219_LDO_1,
+ TPS65219_LDO_2,
+ TPS65219_LDO_3,
+ TPS65219_LDO_4,
+};
+
+/* Number of step-down converters available */
+#define TPS65219_NUM_DCDC 3
+/* Number of LDO voltage regulators available */
+#define TPS65219_NUM_LDO 4
+/* Number of total regulators available */
+#define TPS65219_NUM_REGULATOR (TPS65219_NUM_DCDC + TPS65219_NUM_LDO)
+
+/* Define the TPS65219 IRQ numbers */
+enum tps65219_irqs {
+ /* INT source registers */
+ TPS65219_TO_RV_SD_SET_IRQ,
+ TPS65219_RV_SET_IRQ,
+ TPS65219_SYS_SET_IRQ,
+ TPS65219_BUCK_1_2_SET_IRQ,
+ TPS65219_BUCK_3_SET_IRQ,
+ TPS65219_LDO_1_2_SET_IRQ,
+ TPS65219_LDO_3_4_SET_IRQ,
+ TPS65219_PB_SET_IRQ,
+};
+
+/**
+ * struct tps65219 - tps65219 sub-driver chip access routines
+ *
+ * Device data may be used to access the TPS65219 chip
+ *
+ * @dev: MFD device
+ * @regmap: Regmap for accessing the device registers
+ * @irq_data: Regmap irq data used for the irq chip
+ * @nb: notifier block for the restart handler
+ */
+struct tps65219 {
+ struct device *dev;
+ struct regmap *regmap;
+
+ struct regmap_irq_chip_data *irq_data;
+ struct notifier_block nb;
+};
+
+#endif /* MFD_TPS65219_H */
diff --git a/include/linux/mfd/tps6586x.h b/include/linux/mfd/tps6586x.h
index 96187ed9f9bb..b19c2801a30e 100644
--- a/include/linux/mfd/tps6586x.h
+++ b/include/linux/mfd/tps6586x.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __LINUX_MFD_TPS6586X_H
#define __LINUX_MFD_TPS6586X_H
@@ -17,6 +18,7 @@
#define TPS658621A 0x15
#define TPS658621CD 0x2c
#define TPS658623 0x1b
+#define TPS658624 0x0a
#define TPS658640 0x01
#define TPS658640v2 0x02
#define TPS658643 0x03
diff --git a/include/linux/mfd/tps65910.h b/include/linux/mfd/tps65910.h
index deffdcd0236f..701925db75b3 100644
--- a/include/linux/mfd/tps65910.h
+++ b/include/linux/mfd/tps65910.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* tps65910.h -- TI TPS6591x
*
@@ -6,12 +7,6 @@
* Author: Graeme Gregory <gg@slimlogic.co.uk>
* Author: Jorge Eduardo Candelaria <jedu@slimlogic.co.uk>
* Author: Arnaud Deconinck <a-deconinck@ti.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
*/
#ifndef __LINUX_MFD_TPS65910_H
@@ -895,11 +890,6 @@ struct tps65910 {
struct regmap *regmap;
unsigned long id;
- /* Client devices */
- struct tps65910_pmic *pmic;
- struct tps65910_rtc *rtc;
- struct tps65910_power *power;
-
/* Device node parsed board data */
struct tps65910_board *of_plat_data;
@@ -918,39 +908,4 @@ static inline int tps65910_chip_id(struct tps65910 *tps65910)
return tps65910->id;
}
-static inline int tps65910_reg_read(struct tps65910 *tps65910, u8 reg,
- unsigned int *val)
-{
- return regmap_read(tps65910->regmap, reg, val);
-}
-
-static inline int tps65910_reg_write(struct tps65910 *tps65910, u8 reg,
- unsigned int val)
-{
- return regmap_write(tps65910->regmap, reg, val);
-}
-
-static inline int tps65910_reg_set_bits(struct tps65910 *tps65910, u8 reg,
- u8 mask)
-{
- return regmap_update_bits(tps65910->regmap, reg, mask, mask);
-}
-
-static inline int tps65910_reg_clear_bits(struct tps65910 *tps65910, u8 reg,
- u8 mask)
-{
- return regmap_update_bits(tps65910->regmap, reg, mask, 0);
-}
-
-static inline int tps65910_reg_update_bits(struct tps65910 *tps65910, u8 reg,
- u8 mask, u8 val)
-{
- return regmap_update_bits(tps65910->regmap, reg, mask, val);
-}
-
-static inline int tps65910_irq_get_virq(struct tps65910 *tps65910, int irq)
-{
- return regmap_irq_get_virq(tps65910->irq_data, irq);
-}
-
#endif /* __LINUX_MFD_TPS65910_H */
diff --git a/include/linux/mfd/tps65912.h b/include/linux/mfd/tps65912.h
index b25d0297ba88..860ec0a16c96 100644
--- a/include/linux/mfd/tps65912.h
+++ b/include/linux/mfd/tps65912.h
@@ -1,16 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * Copyright (C) 2015 Texas Instruments Incorporated - http://www.ti.com/
+ * Copyright (C) 2015 Texas Instruments Incorporated - https://www.ti.com/
* Andrew F. Davis <afd@ti.com>
*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether expressed or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License version 2 for more details.
- *
* Based on the TPS65218 driver and the previous TPS65912 driver by
* Margarita Olaya Cabrera <magi@slimlogic.co.uk>
*/
@@ -322,6 +314,6 @@ struct tps65912 {
extern const struct regmap_config tps65912_regmap_config;
int tps65912_device_init(struct tps65912 *tps);
-int tps65912_device_exit(struct tps65912 *tps);
+void tps65912_device_exit(struct tps65912 *tps);
#endif /* __LINUX_MFD_TPS65912_H */
diff --git a/include/linux/mfd/tps68470.h b/include/linux/mfd/tps68470.h
new file mode 100644
index 000000000000..7807fa329db0
--- /dev/null
+++ b/include/linux/mfd/tps68470.h
@@ -0,0 +1,97 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2017 Intel Corporation */
+/* Functions to access TPS68470 power management chip. */
+
+#ifndef __LINUX_MFD_TPS68470_H
+#define __LINUX_MFD_TPS68470_H
+
+/* Register addresses */
+#define TPS68470_REG_POSTDIV2 0x06
+#define TPS68470_REG_BOOSTDIV 0x07
+#define TPS68470_REG_BUCKDIV 0x08
+#define TPS68470_REG_PLLSWR 0x09
+#define TPS68470_REG_XTALDIV 0x0A
+#define TPS68470_REG_PLLDIV 0x0B
+#define TPS68470_REG_POSTDIV 0x0C
+#define TPS68470_REG_PLLCTL 0x0D
+#define TPS68470_REG_PLLCTL2 0x0E
+#define TPS68470_REG_CLKCFG1 0x0F
+#define TPS68470_REG_CLKCFG2 0x10
+#define TPS68470_REG_GPCTL0A 0x14
+#define TPS68470_REG_GPCTL0B 0x15
+#define TPS68470_REG_GPCTL1A 0x16
+#define TPS68470_REG_GPCTL1B 0x17
+#define TPS68470_REG_GPCTL2A 0x18
+#define TPS68470_REG_GPCTL2B 0x19
+#define TPS68470_REG_GPCTL3A 0x1A
+#define TPS68470_REG_GPCTL3B 0x1B
+#define TPS68470_REG_GPCTL4A 0x1C
+#define TPS68470_REG_GPCTL4B 0x1D
+#define TPS68470_REG_GPCTL5A 0x1E
+#define TPS68470_REG_GPCTL5B 0x1F
+#define TPS68470_REG_GPCTL6A 0x20
+#define TPS68470_REG_GPCTL6B 0x21
+#define TPS68470_REG_SGPO 0x22
+#define TPS68470_REG_GPDI 0x26
+#define TPS68470_REG_GPDO 0x27
+#define TPS68470_REG_VCMVAL 0x3C
+#define TPS68470_REG_VAUX1VAL 0x3D
+#define TPS68470_REG_VAUX2VAL 0x3E
+#define TPS68470_REG_VIOVAL 0x3F
+#define TPS68470_REG_VSIOVAL 0x40
+#define TPS68470_REG_VAVAL 0x41
+#define TPS68470_REG_VDVAL 0x42
+#define TPS68470_REG_S_I2C_CTL 0x43
+#define TPS68470_REG_VCMCTL 0x44
+#define TPS68470_REG_VAUX1CTL 0x45
+#define TPS68470_REG_VAUX2CTL 0x46
+#define TPS68470_REG_VACTL 0x47
+#define TPS68470_REG_VDCTL 0x48
+#define TPS68470_REG_RESET 0x50
+#define TPS68470_REG_REVID 0xFF
+
+#define TPS68470_REG_MAX TPS68470_REG_REVID
+
+/* Register field definitions */
+
+#define TPS68470_REG_RESET_MASK GENMASK(7, 0)
+#define TPS68470_VAVAL_AVOLT_MASK GENMASK(6, 0)
+
+#define TPS68470_VDVAL_DVOLT_MASK GENMASK(5, 0)
+#define TPS68470_VCMVAL_VCVOLT_MASK GENMASK(6, 0)
+#define TPS68470_VIOVAL_IOVOLT_MASK GENMASK(6, 0)
+#define TPS68470_VSIOVAL_IOVOLT_MASK GENMASK(6, 0)
+#define TPS68470_VAUX1VAL_AUX1VOLT_MASK GENMASK(6, 0)
+#define TPS68470_VAUX2VAL_AUX2VOLT_MASK GENMASK(6, 0)
+
+#define TPS68470_VACTL_EN_MASK GENMASK(0, 0)
+#define TPS68470_VDCTL_EN_MASK GENMASK(0, 0)
+#define TPS68470_VCMCTL_EN_MASK GENMASK(0, 0)
+#define TPS68470_S_I2C_CTL_EN_MASK GENMASK(1, 0)
+#define TPS68470_VAUX1CTL_EN_MASK GENMASK(0, 0)
+#define TPS68470_VAUX2CTL_EN_MASK GENMASK(0, 0)
+#define TPS68470_PLL_EN_MASK GENMASK(0, 0)
+
+#define TPS68470_CLKCFG1_MODE_A_MASK GENMASK(1, 0)
+#define TPS68470_CLKCFG1_MODE_B_MASK GENMASK(3, 2)
+
+#define TPS68470_CLKCFG2_DRV_STR_2MA 0x05
+#define TPS68470_PLL_OUTPUT_ENABLE 0x02
+#define TPS68470_CLK_SRC_XTAL BIT(0)
+#define TPS68470_PLLSWR_DEFAULT GENMASK(1, 0)
+#define TPS68470_OSC_EXT_CAP_DEFAULT 0x05
+
+#define TPS68470_OUTPUT_A_SHIFT 0x00
+#define TPS68470_OUTPUT_B_SHIFT 0x02
+#define TPS68470_CLK_SRC_SHIFT GENMASK(2, 0)
+#define TPS68470_OSC_EXT_CAP_SHIFT BIT(2)
+
+#define TPS68470_GPIO_CTL_REG_A(x) (TPS68470_REG_GPCTL0A + (x) * 2)
+#define TPS68470_GPIO_CTL_REG_B(x) (TPS68470_REG_GPCTL0B + (x) * 2)
+#define TPS68470_GPIO_MODE_MASK GENMASK(1, 0)
+#define TPS68470_GPIO_MODE_IN 0
+#define TPS68470_GPIO_MODE_IN_PULLUP 1
+#define TPS68470_GPIO_MODE_OUT_CMOS 2
+#define TPS68470_GPIO_MODE_OUT_ODRAIN 3
+
+#endif /* __LINUX_MFD_TPS68470_H */
diff --git a/include/linux/mfd/tps80031.h b/include/linux/mfd/tps80031.h
deleted file mode 100644
index 2c75c9c9318f..000000000000
--- a/include/linux/mfd/tps80031.h
+++ /dev/null
@@ -1,637 +0,0 @@
-/*
- * tps80031.h -- TI TPS80031 and TI TPS80032 PMIC driver.
- *
- * Copyright (c) 2012, NVIDIA Corporation.
- *
- * Author: Laxman Dewangan <ldewangan@nvidia.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any kind,
- * whether express or implied; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
- * 02111-1307, USA
- */
-
-#ifndef __LINUX_MFD_TPS80031_H
-#define __LINUX_MFD_TPS80031_H
-
-#include <linux/device.h>
-#include <linux/regmap.h>
-
-/* Pull-ups/Pull-downs */
-#define TPS80031_CFG_INPUT_PUPD1 0xF0
-#define TPS80031_CFG_INPUT_PUPD2 0xF1
-#define TPS80031_CFG_INPUT_PUPD3 0xF2
-#define TPS80031_CFG_INPUT_PUPD4 0xF3
-#define TPS80031_CFG_LDO_PD1 0xF4
-#define TPS80031_CFG_LDO_PD2 0xF5
-#define TPS80031_CFG_SMPS_PD 0xF6
-
-/* Real Time Clock */
-#define TPS80031_SECONDS_REG 0x00
-#define TPS80031_MINUTES_REG 0x01
-#define TPS80031_HOURS_REG 0x02
-#define TPS80031_DAYS_REG 0x03
-#define TPS80031_MONTHS_REG 0x04
-#define TPS80031_YEARS_REG 0x05
-#define TPS80031_WEEKS_REG 0x06
-#define TPS80031_ALARM_SECONDS_REG 0x08
-#define TPS80031_ALARM_MINUTES_REG 0x09
-#define TPS80031_ALARM_HOURS_REG 0x0A
-#define TPS80031_ALARM_DAYS_REG 0x0B
-#define TPS80031_ALARM_MONTHS_REG 0x0C
-#define TPS80031_ALARM_YEARS_REG 0x0D
-#define TPS80031_RTC_CTRL_REG 0x10
-#define TPS80031_RTC_STATUS_REG 0x11
-#define TPS80031_RTC_INTERRUPTS_REG 0x12
-#define TPS80031_RTC_COMP_LSB_REG 0x13
-#define TPS80031_RTC_COMP_MSB_REG 0x14
-#define TPS80031_RTC_RESET_STATUS_REG 0x16
-
-/*PMC Master Module */
-#define TPS80031_PHOENIX_START_CONDITION 0x1F
-#define TPS80031_PHOENIX_MSK_TRANSITION 0x20
-#define TPS80031_STS_HW_CONDITIONS 0x21
-#define TPS80031_PHOENIX_LAST_TURNOFF_STS 0x22
-#define TPS80031_VSYSMIN_LO_THRESHOLD 0x23
-#define TPS80031_VSYSMIN_HI_THRESHOLD 0x24
-#define TPS80031_PHOENIX_DEV_ON 0x25
-#define TPS80031_STS_PWR_GRP_STATE 0x27
-#define TPS80031_PH_CFG_VSYSLOW 0x28
-#define TPS80031_PH_STS_BOOT 0x29
-#define TPS80031_PHOENIX_SENS_TRANSITION 0x2A
-#define TPS80031_PHOENIX_SEQ_CFG 0x2B
-#define TPS80031_PRIMARY_WATCHDOG_CFG 0X2C
-#define TPS80031_KEY_PRESS_DUR_CFG 0X2D
-#define TPS80031_SMPS_LDO_SHORT_STS 0x2E
-
-/* PMC Slave Module - Broadcast */
-#define TPS80031_BROADCAST_ADDR_ALL 0x31
-#define TPS80031_BROADCAST_ADDR_REF 0x32
-#define TPS80031_BROADCAST_ADDR_PROV 0x33
-#define TPS80031_BROADCAST_ADDR_CLK_RST 0x34
-
-/* PMC Slave Module SMPS Regulators */
-#define TPS80031_SMPS4_CFG_TRANS 0x41
-#define TPS80031_SMPS4_CFG_STATE 0x42
-#define TPS80031_SMPS4_CFG_VOLTAGE 0x44
-#define TPS80031_VIO_CFG_TRANS 0x47
-#define TPS80031_VIO_CFG_STATE 0x48
-#define TPS80031_VIO_CFG_FORCE 0x49
-#define TPS80031_VIO_CFG_VOLTAGE 0x4A
-#define TPS80031_VIO_CFG_STEP 0x48
-#define TPS80031_SMPS1_CFG_TRANS 0x53
-#define TPS80031_SMPS1_CFG_STATE 0x54
-#define TPS80031_SMPS1_CFG_FORCE 0x55
-#define TPS80031_SMPS1_CFG_VOLTAGE 0x56
-#define TPS80031_SMPS1_CFG_STEP 0x57
-#define TPS80031_SMPS2_CFG_TRANS 0x59
-#define TPS80031_SMPS2_CFG_STATE 0x5A
-#define TPS80031_SMPS2_CFG_FORCE 0x5B
-#define TPS80031_SMPS2_CFG_VOLTAGE 0x5C
-#define TPS80031_SMPS2_CFG_STEP 0x5D
-#define TPS80031_SMPS3_CFG_TRANS 0x65
-#define TPS80031_SMPS3_CFG_STATE 0x66
-#define TPS80031_SMPS3_CFG_VOLTAGE 0x68
-
-/* PMC Slave Module LDO Regulators */
-#define TPS80031_VANA_CFG_TRANS 0x81
-#define TPS80031_VANA_CFG_STATE 0x82
-#define TPS80031_VANA_CFG_VOLTAGE 0x83
-#define TPS80031_LDO2_CFG_TRANS 0x85
-#define TPS80031_LDO2_CFG_STATE 0x86
-#define TPS80031_LDO2_CFG_VOLTAGE 0x87
-#define TPS80031_LDO4_CFG_TRANS 0x89
-#define TPS80031_LDO4_CFG_STATE 0x8A
-#define TPS80031_LDO4_CFG_VOLTAGE 0x8B
-#define TPS80031_LDO3_CFG_TRANS 0x8D
-#define TPS80031_LDO3_CFG_STATE 0x8E
-#define TPS80031_LDO3_CFG_VOLTAGE 0x8F
-#define TPS80031_LDO6_CFG_TRANS 0x91
-#define TPS80031_LDO6_CFG_STATE 0x92
-#define TPS80031_LDO6_CFG_VOLTAGE 0x93
-#define TPS80031_LDOLN_CFG_TRANS 0x95
-#define TPS80031_LDOLN_CFG_STATE 0x96
-#define TPS80031_LDOLN_CFG_VOLTAGE 0x97
-#define TPS80031_LDO5_CFG_TRANS 0x99
-#define TPS80031_LDO5_CFG_STATE 0x9A
-#define TPS80031_LDO5_CFG_VOLTAGE 0x9B
-#define TPS80031_LDO1_CFG_TRANS 0x9D
-#define TPS80031_LDO1_CFG_STATE 0x9E
-#define TPS80031_LDO1_CFG_VOLTAGE 0x9F
-#define TPS80031_LDOUSB_CFG_TRANS 0xA1
-#define TPS80031_LDOUSB_CFG_STATE 0xA2
-#define TPS80031_LDOUSB_CFG_VOLTAGE 0xA3
-#define TPS80031_LDO7_CFG_TRANS 0xA5
-#define TPS80031_LDO7_CFG_STATE 0xA6
-#define TPS80031_LDO7_CFG_VOLTAGE 0xA7
-
-/* PMC Slave Module External Control */
-#define TPS80031_REGEN1_CFG_TRANS 0xAE
-#define TPS80031_REGEN1_CFG_STATE 0xAF
-#define TPS80031_REGEN2_CFG_TRANS 0xB1
-#define TPS80031_REGEN2_CFG_STATE 0xB2
-#define TPS80031_SYSEN_CFG_TRANS 0xB4
-#define TPS80031_SYSEN_CFG_STATE 0xB5
-
-/* PMC Slave Module Internal Control */
-#define TPS80031_NRESPWRON_CFG_TRANS 0xB7
-#define TPS80031_NRESPWRON_CFG_STATE 0xB8
-#define TPS80031_CLK32KAO_CFG_TRANS 0xBA
-#define TPS80031_CLK32KAO_CFG_STATE 0xBB
-#define TPS80031_CLK32KG_CFG_TRANS 0xBD
-#define TPS80031_CLK32KG_CFG_STATE 0xBE
-#define TPS80031_CLK32KAUDIO_CFG_TRANS 0xC0
-#define TPS80031_CLK32KAUDIO_CFG_STATE 0xC1
-#define TPS80031_VRTC_CFG_TRANS 0xC3
-#define TPS80031_VRTC_CFG_STATE 0xC4
-#define TPS80031_BIAS_CFG_TRANS 0xC6
-#define TPS80031_BIAS_CFG_STATE 0xC7
-#define TPS80031_VSYSMIN_HI_CFG_TRANS 0xC9
-#define TPS80031_VSYSMIN_HI_CFG_STATE 0xCA
-#define TPS80031_RC6MHZ_CFG_TRANS 0xCC
-#define TPS80031_RC6MHZ_CFG_STATE 0xCD
-#define TPS80031_TMP_CFG_TRANS 0xCF
-#define TPS80031_TMP_CFG_STATE 0xD0
-
-/* PMC Slave Module resources assignment */
-#define TPS80031_PREQ1_RES_ASS_A 0xD7
-#define TPS80031_PREQ1_RES_ASS_B 0xD8
-#define TPS80031_PREQ1_RES_ASS_C 0xD9
-#define TPS80031_PREQ2_RES_ASS_A 0xDA
-#define TPS80031_PREQ2_RES_ASS_B 0xDB
-#define TPS80031_PREQ2_RES_ASS_C 0xDC
-#define TPS80031_PREQ3_RES_ASS_A 0xDD
-#define TPS80031_PREQ3_RES_ASS_B 0xDE
-#define TPS80031_PREQ3_RES_ASS_C 0xDF
-
-/* PMC Slave Module Miscellaneous */
-#define TPS80031_SMPS_OFFSET 0xE0
-#define TPS80031_SMPS_MULT 0xE3
-#define TPS80031_MISC1 0xE4
-#define TPS80031_MISC2 0xE5
-#define TPS80031_BBSPOR_CFG 0xE6
-#define TPS80031_TMP_CFG 0xE7
-
-/* Battery Charging Controller and Indicator LED */
-#define TPS80031_CONTROLLER_CTRL2 0xDA
-#define TPS80031_CONTROLLER_VSEL_COMP 0xDB
-#define TPS80031_CHARGERUSB_VSYSREG 0xDC
-#define TPS80031_CHARGERUSB_VICHRG_PC 0xDD
-#define TPS80031_LINEAR_CHRG_STS 0xDE
-#define TPS80031_CONTROLLER_INT_MASK 0xE0
-#define TPS80031_CONTROLLER_CTRL1 0xE1
-#define TPS80031_CONTROLLER_WDG 0xE2
-#define TPS80031_CONTROLLER_STAT1 0xE3
-#define TPS80031_CHARGERUSB_INT_STATUS 0xE4
-#define TPS80031_CHARGERUSB_INT_MASK 0xE5
-#define TPS80031_CHARGERUSB_STATUS_INT1 0xE6
-#define TPS80031_CHARGERUSB_STATUS_INT2 0xE7
-#define TPS80031_CHARGERUSB_CTRL1 0xE8
-#define TPS80031_CHARGERUSB_CTRL2 0xE9
-#define TPS80031_CHARGERUSB_CTRL3 0xEA
-#define TPS80031_CHARGERUSB_STAT1 0xEB
-#define TPS80031_CHARGERUSB_VOREG 0xEC
-#define TPS80031_CHARGERUSB_VICHRG 0xED
-#define TPS80031_CHARGERUSB_CINLIMIT 0xEE
-#define TPS80031_CHARGERUSB_CTRLLIMIT1 0xEF
-#define TPS80031_CHARGERUSB_CTRLLIMIT2 0xF0
-#define TPS80031_LED_PWM_CTRL1 0xF4
-#define TPS80031_LED_PWM_CTRL2 0xF5
-
-/* USB On-The-Go */
-#define TPS80031_BACKUP_REG 0xFA
-#define TPS80031_USB_VENDOR_ID_LSB 0x00
-#define TPS80031_USB_VENDOR_ID_MSB 0x01
-#define TPS80031_USB_PRODUCT_ID_LSB 0x02
-#define TPS80031_USB_PRODUCT_ID_MSB 0x03
-#define TPS80031_USB_VBUS_CTRL_SET 0x04
-#define TPS80031_USB_VBUS_CTRL_CLR 0x05
-#define TPS80031_USB_ID_CTRL_SET 0x06
-#define TPS80031_USB_ID_CTRL_CLR 0x07
-#define TPS80031_USB_VBUS_INT_SRC 0x08
-#define TPS80031_USB_VBUS_INT_LATCH_SET 0x09
-#define TPS80031_USB_VBUS_INT_LATCH_CLR 0x0A
-#define TPS80031_USB_VBUS_INT_EN_LO_SET 0x0B
-#define TPS80031_USB_VBUS_INT_EN_LO_CLR 0x0C
-#define TPS80031_USB_VBUS_INT_EN_HI_SET 0x0D
-#define TPS80031_USB_VBUS_INT_EN_HI_CLR 0x0E
-#define TPS80031_USB_ID_INT_SRC 0x0F
-#define TPS80031_USB_ID_INT_LATCH_SET 0x10
-#define TPS80031_USB_ID_INT_LATCH_CLR 0x11
-#define TPS80031_USB_ID_INT_EN_LO_SET 0x12
-#define TPS80031_USB_ID_INT_EN_LO_CLR 0x13
-#define TPS80031_USB_ID_INT_EN_HI_SET 0x14
-#define TPS80031_USB_ID_INT_EN_HI_CLR 0x15
-#define TPS80031_USB_OTG_ADP_CTRL 0x16
-#define TPS80031_USB_OTG_ADP_HIGH 0x17
-#define TPS80031_USB_OTG_ADP_LOW 0x18
-#define TPS80031_USB_OTG_ADP_RISE 0x19
-#define TPS80031_USB_OTG_REVISION 0x1A
-
-/* Gas Gauge */
-#define TPS80031_FG_REG_00 0xC0
-#define TPS80031_FG_REG_01 0xC1
-#define TPS80031_FG_REG_02 0xC2
-#define TPS80031_FG_REG_03 0xC3
-#define TPS80031_FG_REG_04 0xC4
-#define TPS80031_FG_REG_05 0xC5
-#define TPS80031_FG_REG_06 0xC6
-#define TPS80031_FG_REG_07 0xC7
-#define TPS80031_FG_REG_08 0xC8
-#define TPS80031_FG_REG_09 0xC9
-#define TPS80031_FG_REG_10 0xCA
-#define TPS80031_FG_REG_11 0xCB
-
-/* General Purpose ADC */
-#define TPS80031_GPADC_CTRL 0x2E
-#define TPS80031_GPADC_CTRL2 0x2F
-#define TPS80031_RTSELECT_LSB 0x32
-#define TPS80031_RTSELECT_ISB 0x33
-#define TPS80031_RTSELECT_MSB 0x34
-#define TPS80031_GPSELECT_ISB 0x35
-#define TPS80031_CTRL_P1 0x36
-#define TPS80031_RTCH0_LSB 0x37
-#define TPS80031_RTCH0_MSB 0x38
-#define TPS80031_RTCH1_LSB 0x39
-#define TPS80031_RTCH1_MSB 0x3A
-#define TPS80031_GPCH0_LSB 0x3B
-#define TPS80031_GPCH0_MSB 0x3C
-
-/* SIM, MMC and Battery Detection */
-#define TPS80031_SIMDEBOUNCING 0xEB
-#define TPS80031_SIMCTRL 0xEC
-#define TPS80031_MMCDEBOUNCING 0xED
-#define TPS80031_MMCCTRL 0xEE
-#define TPS80031_BATDEBOUNCING 0xEF
-
-/* Vibrator Driver and PWMs */
-#define TPS80031_VIBCTRL 0x9B
-#define TPS80031_VIBMODE 0x9C
-#define TPS80031_PWM1ON 0xBA
-#define TPS80031_PWM1OFF 0xBB
-#define TPS80031_PWM2ON 0xBD
-#define TPS80031_PWM2OFF 0xBE
-
-/* Control Interface */
-#define TPS80031_INT_STS_A 0xD0
-#define TPS80031_INT_STS_B 0xD1
-#define TPS80031_INT_STS_C 0xD2
-#define TPS80031_INT_MSK_LINE_A 0xD3
-#define TPS80031_INT_MSK_LINE_B 0xD4
-#define TPS80031_INT_MSK_LINE_C 0xD5
-#define TPS80031_INT_MSK_STS_A 0xD6
-#define TPS80031_INT_MSK_STS_B 0xD7
-#define TPS80031_INT_MSK_STS_C 0xD8
-#define TPS80031_TOGGLE1 0x90
-#define TPS80031_TOGGLE2 0x91
-#define TPS80031_TOGGLE3 0x92
-#define TPS80031_PWDNSTATUS1 0x93
-#define TPS80031_PWDNSTATUS2 0x94
-#define TPS80031_VALIDITY0 0x17
-#define TPS80031_VALIDITY1 0x18
-#define TPS80031_VALIDITY2 0x19
-#define TPS80031_VALIDITY3 0x1A
-#define TPS80031_VALIDITY4 0x1B
-#define TPS80031_VALIDITY5 0x1C
-#define TPS80031_VALIDITY6 0x1D
-#define TPS80031_VALIDITY7 0x1E
-
-/* Version number related register */
-#define TPS80031_JTAGVERNUM 0x87
-#define TPS80031_EPROM_REV 0xDF
-
-/* GPADC Trimming Bits. */
-#define TPS80031_GPADC_TRIM0 0xCC
-#define TPS80031_GPADC_TRIM1 0xCD
-#define TPS80031_GPADC_TRIM2 0xCE
-#define TPS80031_GPADC_TRIM3 0xCF
-#define TPS80031_GPADC_TRIM4 0xD0
-#define TPS80031_GPADC_TRIM5 0xD1
-#define TPS80031_GPADC_TRIM6 0xD2
-#define TPS80031_GPADC_TRIM7 0xD3
-#define TPS80031_GPADC_TRIM8 0xD4
-#define TPS80031_GPADC_TRIM9 0xD5
-#define TPS80031_GPADC_TRIM10 0xD6
-#define TPS80031_GPADC_TRIM11 0xD7
-#define TPS80031_GPADC_TRIM12 0xD8
-#define TPS80031_GPADC_TRIM13 0xD9
-#define TPS80031_GPADC_TRIM14 0xDA
-#define TPS80031_GPADC_TRIM15 0xDB
-#define TPS80031_GPADC_TRIM16 0xDC
-#define TPS80031_GPADC_TRIM17 0xDD
-#define TPS80031_GPADC_TRIM18 0xDE
-
-/* TPS80031_CONTROLLER_STAT1 bit fields */
-#define TPS80031_CONTROLLER_STAT1_BAT_TEMP 0
-#define TPS80031_CONTROLLER_STAT1_BAT_REMOVED 1
-#define TPS80031_CONTROLLER_STAT1_VBUS_DET 2
-#define TPS80031_CONTROLLER_STAT1_VAC_DET 3
-#define TPS80031_CONTROLLER_STAT1_FAULT_WDG 4
-#define TPS80031_CONTROLLER_STAT1_LINCH_GATED 6
-/* TPS80031_CONTROLLER_INT_MASK bit filed */
-#define TPS80031_CONTROLLER_INT_MASK_MVAC_DET 0
-#define TPS80031_CONTROLLER_INT_MASK_MVBUS_DET 1
-#define TPS80031_CONTROLLER_INT_MASK_MBAT_TEMP 2
-#define TPS80031_CONTROLLER_INT_MASK_MFAULT_WDG 3
-#define TPS80031_CONTROLLER_INT_MASK_MBAT_REMOVED 4
-#define TPS80031_CONTROLLER_INT_MASK_MLINCH_GATED 5
-
-#define TPS80031_CHARGE_CONTROL_SUB_INT_MASK 0x3F
-
-/* TPS80031_PHOENIX_DEV_ON bit field */
-#define TPS80031_DEVOFF 0x1
-
-#define TPS80031_EXT_CONTROL_CFG_TRANS 0
-#define TPS80031_EXT_CONTROL_CFG_STATE 1
-
-/* State register field */
-#define TPS80031_STATE_OFF 0x00
-#define TPS80031_STATE_ON 0x01
-#define TPS80031_STATE_MASK 0x03
-
-/* Trans register field */
-#define TPS80031_TRANS_ACTIVE_OFF 0x00
-#define TPS80031_TRANS_ACTIVE_ON 0x01
-#define TPS80031_TRANS_ACTIVE_MASK 0x03
-#define TPS80031_TRANS_SLEEP_OFF 0x00
-#define TPS80031_TRANS_SLEEP_ON 0x04
-#define TPS80031_TRANS_SLEEP_MASK 0x0C
-#define TPS80031_TRANS_OFF_OFF 0x00
-#define TPS80031_TRANS_OFF_ACTIVE 0x10
-#define TPS80031_TRANS_OFF_MASK 0x30
-
-#define TPS80031_EXT_PWR_REQ (TPS80031_PWR_REQ_INPUT_PREQ1 | \
- TPS80031_PWR_REQ_INPUT_PREQ2 | \
- TPS80031_PWR_REQ_INPUT_PREQ3)
-
-/* TPS80031_BBSPOR_CFG bit field */
-#define TPS80031_BBSPOR_CHG_EN 0x8
-#define TPS80031_MAX_REGISTER 0xFF
-
-struct i2c_client;
-
-/* Supported chips */
-enum chips {
- TPS80031 = 0x00000001,
- TPS80032 = 0x00000002,
-};
-
-enum {
- TPS80031_INT_PWRON,
- TPS80031_INT_RPWRON,
- TPS80031_INT_SYS_VLOW,
- TPS80031_INT_RTC_ALARM,
- TPS80031_INT_RTC_PERIOD,
- TPS80031_INT_HOT_DIE,
- TPS80031_INT_VXX_SHORT,
- TPS80031_INT_SPDURATION,
- TPS80031_INT_WATCHDOG,
- TPS80031_INT_BAT,
- TPS80031_INT_SIM,
- TPS80031_INT_MMC,
- TPS80031_INT_RES,
- TPS80031_INT_GPADC_RT,
- TPS80031_INT_GPADC_SW2_EOC,
- TPS80031_INT_CC_AUTOCAL,
- TPS80031_INT_ID_WKUP,
- TPS80031_INT_VBUSS_WKUP,
- TPS80031_INT_ID,
- TPS80031_INT_VBUS,
- TPS80031_INT_CHRG_CTRL,
- TPS80031_INT_EXT_CHRG,
- TPS80031_INT_INT_CHRG,
- TPS80031_INT_RES2,
- TPS80031_INT_BAT_TEMP_OVRANGE,
- TPS80031_INT_BAT_REMOVED,
- TPS80031_INT_VBUS_DET,
- TPS80031_INT_VAC_DET,
- TPS80031_INT_FAULT_WDG,
- TPS80031_INT_LINCH_GATED,
-
- /* Last interrupt id to get the end number */
- TPS80031_INT_NR,
-};
-
-/* TPS80031 Slave IDs */
-#define TPS80031_NUM_SLAVES 4
-#define TPS80031_SLAVE_ID0 0
-#define TPS80031_SLAVE_ID1 1
-#define TPS80031_SLAVE_ID2 2
-#define TPS80031_SLAVE_ID3 3
-
-/* TPS80031 I2C addresses */
-#define TPS80031_I2C_ID0_ADDR 0x12
-#define TPS80031_I2C_ID1_ADDR 0x48
-#define TPS80031_I2C_ID2_ADDR 0x49
-#define TPS80031_I2C_ID3_ADDR 0x4A
-
-enum {
- TPS80031_REGULATOR_VIO,
- TPS80031_REGULATOR_SMPS1,
- TPS80031_REGULATOR_SMPS2,
- TPS80031_REGULATOR_SMPS3,
- TPS80031_REGULATOR_SMPS4,
- TPS80031_REGULATOR_VANA,
- TPS80031_REGULATOR_LDO1,
- TPS80031_REGULATOR_LDO2,
- TPS80031_REGULATOR_LDO3,
- TPS80031_REGULATOR_LDO4,
- TPS80031_REGULATOR_LDO5,
- TPS80031_REGULATOR_LDO6,
- TPS80031_REGULATOR_LDO7,
- TPS80031_REGULATOR_LDOLN,
- TPS80031_REGULATOR_LDOUSB,
- TPS80031_REGULATOR_VBUS,
- TPS80031_REGULATOR_REGEN1,
- TPS80031_REGULATOR_REGEN2,
- TPS80031_REGULATOR_SYSEN,
- TPS80031_REGULATOR_MAX,
-};
-
-/* Different configurations for the rails */
-enum {
- /* USBLDO input selection */
- TPS80031_USBLDO_INPUT_VSYS = 0x00000001,
- TPS80031_USBLDO_INPUT_PMID = 0x00000002,
-
- /* LDO3 output mode */
- TPS80031_LDO3_OUTPUT_VIB = 0x00000004,
-
- /* VBUS configuration */
- TPS80031_VBUS_DISCHRG_EN_PDN = 0x00000004,
- TPS80031_VBUS_SW_ONLY = 0x00000008,
- TPS80031_VBUS_SW_N_ID = 0x00000010,
-};
-
-/* External controls requests */
-enum tps80031_ext_control {
- TPS80031_PWR_REQ_INPUT_NONE = 0x00000000,
- TPS80031_PWR_REQ_INPUT_PREQ1 = 0x00000001,
- TPS80031_PWR_REQ_INPUT_PREQ2 = 0x00000002,
- TPS80031_PWR_REQ_INPUT_PREQ3 = 0x00000004,
- TPS80031_PWR_OFF_ON_SLEEP = 0x00000008,
- TPS80031_PWR_ON_ON_SLEEP = 0x00000010,
-};
-
-enum tps80031_pupd_pins {
- TPS80031_PREQ1 = 0,
- TPS80031_PREQ2A,
- TPS80031_PREQ2B,
- TPS80031_PREQ2C,
- TPS80031_PREQ3,
- TPS80031_NRES_WARM,
- TPS80031_PWM_FORCE,
- TPS80031_CHRG_EXT_CHRG_STATZ,
- TPS80031_SIM,
- TPS80031_MMC,
- TPS80031_GPADC_START,
- TPS80031_DVSI2C_SCL,
- TPS80031_DVSI2C_SDA,
- TPS80031_CTLI2C_SCL,
- TPS80031_CTLI2C_SDA,
-};
-
-enum tps80031_pupd_settings {
- TPS80031_PUPD_NORMAL,
- TPS80031_PUPD_PULLDOWN,
- TPS80031_PUPD_PULLUP,
-};
-
-struct tps80031 {
- struct device *dev;
- unsigned long chip_info;
- int es_version;
- struct i2c_client *clients[TPS80031_NUM_SLAVES];
- struct regmap *regmap[TPS80031_NUM_SLAVES];
- struct regmap_irq_chip_data *irq_data;
-};
-
-struct tps80031_pupd_init_data {
- int input_pin;
- int setting;
-};
-
-/*
- * struct tps80031_regulator_platform_data - tps80031 regulator platform data.
- *
- * @reg_init_data: The regulator init data.
- * @ext_ctrl_flag: External control flag for sleep/power request control.
- * @config_flags: Configuration flag to configure the rails.
- * It should be ORed of config enums.
- */
-
-struct tps80031_regulator_platform_data {
- struct regulator_init_data *reg_init_data;
- unsigned int ext_ctrl_flag;
- unsigned int config_flags;
-};
-
-struct tps80031_platform_data {
- int irq_base;
- bool use_power_off;
- struct tps80031_pupd_init_data *pupd_init_data;
- int pupd_init_data_size;
- struct tps80031_regulator_platform_data
- *regulator_pdata[TPS80031_REGULATOR_MAX];
-};
-
-static inline int tps80031_write(struct device *dev, int sid,
- int reg, uint8_t val)
-{
- struct tps80031 *tps80031 = dev_get_drvdata(dev);
-
- return regmap_write(tps80031->regmap[sid], reg, val);
-}
-
-static inline int tps80031_writes(struct device *dev, int sid, int reg,
- int len, uint8_t *val)
-{
- struct tps80031 *tps80031 = dev_get_drvdata(dev);
-
- return regmap_bulk_write(tps80031->regmap[sid], reg, val, len);
-}
-
-static inline int tps80031_read(struct device *dev, int sid,
- int reg, uint8_t *val)
-{
- struct tps80031 *tps80031 = dev_get_drvdata(dev);
- unsigned int ival;
- int ret;
-
- ret = regmap_read(tps80031->regmap[sid], reg, &ival);
- if (ret < 0) {
- dev_err(dev, "failed reading from reg 0x%02x\n", reg);
- return ret;
- }
-
- *val = ival;
- return ret;
-}
-
-static inline int tps80031_reads(struct device *dev, int sid,
- int reg, int len, uint8_t *val)
-{
- struct tps80031 *tps80031 = dev_get_drvdata(dev);
-
- return regmap_bulk_read(tps80031->regmap[sid], reg, val, len);
-}
-
-static inline int tps80031_set_bits(struct device *dev, int sid,
- int reg, uint8_t bit_mask)
-{
- struct tps80031 *tps80031 = dev_get_drvdata(dev);
-
- return regmap_update_bits(tps80031->regmap[sid], reg,
- bit_mask, bit_mask);
-}
-
-static inline int tps80031_clr_bits(struct device *dev, int sid,
- int reg, uint8_t bit_mask)
-{
- struct tps80031 *tps80031 = dev_get_drvdata(dev);
-
- return regmap_update_bits(tps80031->regmap[sid], reg, bit_mask, 0);
-}
-
-static inline int tps80031_update(struct device *dev, int sid,
- int reg, uint8_t val, uint8_t mask)
-{
- struct tps80031 *tps80031 = dev_get_drvdata(dev);
-
- return regmap_update_bits(tps80031->regmap[sid], reg, mask, val);
-}
-
-static inline unsigned long tps80031_get_chip_info(struct device *dev)
-{
- struct tps80031 *tps80031 = dev_get_drvdata(dev);
-
- return tps80031->chip_info;
-}
-
-static inline int tps80031_get_pmu_version(struct device *dev)
-{
- struct tps80031 *tps80031 = dev_get_drvdata(dev);
-
- return tps80031->es_version;
-}
-
-static inline int tps80031_irq_get_virq(struct device *dev, int irq)
-{
- struct tps80031 *tps80031 = dev_get_drvdata(dev);
-
- return regmap_irq_get_virq(tps80031->irq_data, irq);
-}
-
-extern int tps80031_ext_power_req_config(struct device *dev,
- unsigned long ext_ctrl_flag, int preq_bit,
- int state_reg_add, int trans_reg_add);
-#endif /*__LINUX_MFD_TPS80031_H */
diff --git a/include/linux/i2c/twl.h b/include/linux/mfd/twl.h
index 9ad7828d9d34..6e3d99b7a0ee 100644
--- a/include/linux/i2c/twl.h
+++ b/include/linux/mfd/twl.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* twl4030.h - header for TWL4030 PM and audio CODEC device
*
@@ -5,21 +6,6 @@
*
* Based on tlv320aic23.c:
* Copyright (c) by Kai Svahn <kai.svahn@nokia.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
*/
#ifndef __TWL_H_
@@ -83,6 +69,8 @@ enum twl6030_module_ids {
TWL6030_MODULE_GPADC,
TWL6030_MODULE_GASGAUGE,
+ /* A few extra registers before the registers shared with the 6030 */
+ TWL6032_MODULE_CHARGE,
TWL6030_MODULE_LAST,
};
@@ -195,14 +183,18 @@ static inline int twl_i2c_read_u8(u8 mod_no, u8 *val, u8 reg) {
}
static inline int twl_i2c_write_u16(u8 mod_no, u16 val, u8 reg) {
- val = cpu_to_le16(val);
- return twl_i2c_write(mod_no, (u8*) &val, reg, 2);
+ __le16 value;
+
+ value = cpu_to_le16(val);
+ return twl_i2c_write(mod_no, (u8 *) &value, reg, 2);
}
static inline int twl_i2c_read_u16(u8 mod_no, u16 *val, u8 reg) {
int ret;
- ret = twl_i2c_read(mod_no, (u8*) val, reg, 2);
- *val = le16_to_cpu(*val);
+ __le16 value;
+
+ ret = twl_i2c_read(mod_no, (u8 *) &value, reg, 2);
+ *val = le16_to_cpu(value);
return ret;
}
@@ -604,8 +596,6 @@ struct twl4030_gpio_platform_data {
int (*setup)(struct device *dev,
unsigned gpio, unsigned ngpio);
- int (*teardown)(struct device *dev,
- unsigned gpio, unsigned ngpio);
};
struct twl4030_madc_platform_data {
@@ -704,61 +694,6 @@ struct twl4030_audio_data {
unsigned int irq_base;
};
-struct twl4030_platform_data {
- struct twl4030_clock_init_data *clock;
- struct twl4030_bci_platform_data *bci;
- struct twl4030_gpio_platform_data *gpio;
- struct twl4030_madc_platform_data *madc;
- struct twl4030_keypad_data *keypad;
- struct twl4030_usb_data *usb;
- struct twl4030_power_data *power;
- struct twl4030_audio_data *audio;
-
- /* Common LDO regulators for TWL4030/TWL6030 */
- struct regulator_init_data *vdac;
- struct regulator_init_data *vaux1;
- struct regulator_init_data *vaux2;
- struct regulator_init_data *vaux3;
- struct regulator_init_data *vdd1;
- struct regulator_init_data *vdd2;
- struct regulator_init_data *vdd3;
- /* TWL4030 LDO regulators */
- struct regulator_init_data *vpll1;
- struct regulator_init_data *vpll2;
- struct regulator_init_data *vmmc1;
- struct regulator_init_data *vmmc2;
- struct regulator_init_data *vsim;
- struct regulator_init_data *vaux4;
- struct regulator_init_data *vio;
- struct regulator_init_data *vintana1;
- struct regulator_init_data *vintana2;
- struct regulator_init_data *vintdig;
- /* TWL6030 LDO regulators */
- struct regulator_init_data *vmmc;
- struct regulator_init_data *vpp;
- struct regulator_init_data *vusim;
- struct regulator_init_data *vana;
- struct regulator_init_data *vcxio;
- struct regulator_init_data *vusb;
- struct regulator_init_data *clk32kg;
- struct regulator_init_data *v1v8;
- struct regulator_init_data *v2v1;
- /* TWL6032 LDO regulators */
- struct regulator_init_data *ldo1;
- struct regulator_init_data *ldo2;
- struct regulator_init_data *ldo3;
- struct regulator_init_data *ldo4;
- struct regulator_init_data *ldo5;
- struct regulator_init_data *ldo6;
- struct regulator_init_data *ldo7;
- struct regulator_init_data *ldoln;
- struct regulator_init_data *ldousb;
- /* TWL6032 DCDC regulators */
- struct regulator_init_data *smps3;
- struct regulator_init_data *smps4;
- struct regulator_init_data *vio6025;
-};
-
struct twl_regulator_driver_data {
int (*set_voltage)(void *data, int target_uV);
int (*get_voltage)(void *data);
@@ -791,8 +726,6 @@ int twl4030_sih_setup(struct device *dev, int module, int irq_base);
#define TWL4030_VAUX3_DEV_GRP 0x1F
#define TWL4030_VAUX3_DEDICATED 0x22
-static inline int twl4030charger_usb_en(int enable) { return 0; }
-
/*----------------------------------------------------------------------*/
/* Linux-specific regulator identifiers ... for now, we only support
diff --git a/include/linux/mfd/twl4030-audio.h b/include/linux/mfd/twl4030-audio.h
index 3d22b72df076..1c28605dfda8 100644
--- a/include/linux/mfd/twl4030-audio.h
+++ b/include/linux/mfd/twl4030-audio.h
@@ -1,24 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* MFD driver for twl4030 audio submodule
*
* Author: Peter Ujfalusi <peter.ujfalusi@ti.com>
*
* Copyright: (C) 2009 Nokia Corporation
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
- * 02110-1301 USA
- *
*/
#ifndef __TWL4030_CODEC_H__
diff --git a/include/linux/mfd/twl6040.h b/include/linux/mfd/twl6040.h
index a2e88761c09f..286a724e379a 100644
--- a/include/linux/mfd/twl6040.h
+++ b/include/linux/mfd/twl6040.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* MFD driver for twl6040
*
@@ -5,21 +6,6 @@
* Misael Lopez Cruz <misael.lopez@ti.com>
*
* Copyright: (C) 2011 Texas Instruments, Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
- * 02110-1301 USA
- *
*/
#ifndef __TWL6040_CODEC_H__
@@ -188,35 +174,7 @@
#define TWL6040_GPO_MAX 3
-/* TODO: All platform data struct can be removed */
-struct twl6040_codec_data {
- u16 hs_left_step;
- u16 hs_right_step;
- u16 hf_left_step;
- u16 hf_right_step;
-};
-
-struct twl6040_vibra_data {
- unsigned int vibldrv_res; /* left driver resistance */
- unsigned int vibrdrv_res; /* right driver resistance */
- unsigned int viblmotor_res; /* left motor resistance */
- unsigned int vibrmotor_res; /* right motor resistance */
- int vddvibl_uV; /* VDDVIBL volt, set 0 for fixed reg */
- int vddvibr_uV; /* VDDVIBR volt, set 0 for fixed reg */
-};
-
-struct twl6040_gpo_data {
- int gpio_base;
-};
-
-struct twl6040_platform_data {
- int audpwron_gpio; /* audio power-on gpio */
-
- struct twl6040_codec_data *codec;
- struct twl6040_vibra_data *vibra;
- struct twl6040_gpo_data *gpo;
-};
-
+struct gpio_desc;
struct regmap;
struct regmap_irq_chips_data;
@@ -232,7 +190,7 @@ struct twl6040 {
struct mfd_cell cells[TWL6040_CELLS];
struct completion ready;
- int audpwron;
+ struct gpio_desc *audpwron;
int power_count;
int rev;
diff --git a/include/linux/mfd/ucb1x00.h b/include/linux/mfd/ucb1x00.h
index 88f90cbf8e6a..ede237384723 100644
--- a/include/linux/mfd/ucb1x00.h
+++ b/include/linux/mfd/ucb1x00.h
@@ -1,11 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* linux/include/mfd/ucb1x00.h
*
* Copyright (C) 2001 Russell King, All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License.
*/
#ifndef UCB1200_H
#define UCB1200_H
@@ -13,6 +10,7 @@
#include <linux/device.h>
#include <linux/mfd/mcp.h>
#include <linux/gpio.h>
+#include <linux/gpio/driver.h>
#include <linux/mutex.h>
#define UCB_IO_DATA 0x00
diff --git a/include/linux/mfd/viperboard.h b/include/linux/mfd/viperboard.h
index 193452848c04..0557667fe544 100644
--- a/include/linux/mfd/viperboard.h
+++ b/include/linux/mfd/viperboard.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* include/linux/mfd/viperboard.h
*
@@ -6,12 +7,6 @@
* (C) 2012 by Lemonage GmbH
* Author: Lars Poeschel <poeschel@lemonage.de>
* All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
*/
#ifndef __MFD_VIPERBOARD_H__
diff --git a/include/linux/mfd/wcd934x/registers.h b/include/linux/mfd/wcd934x/registers.h
new file mode 100644
index 000000000000..76a943c83c63
--- /dev/null
+++ b/include/linux/mfd/wcd934x/registers.h
@@ -0,0 +1,588 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef _WCD934X_REGISTERS_H
+#define _WCD934X_REGISTERS_H
+
+#define WCD934X_CODEC_RPM_CLK_GATE 0x0002
+#define WCD934X_CODEC_RPM_CLK_GATE_MASK GENMASK(1, 0)
+#define WCD934X_CODEC_RPM_CLK_MCLK_CFG 0x0003
+#define WCD934X_CODEC_RPM_CLK_MCLK_CFG_9P6MHZ BIT(0)
+#define WCD934X_CODEC_RPM_CLK_MCLK_CFG_12P288MHZ BIT(1)
+#define WCD934X_CODEC_RPM_CLK_MCLK_CFG_MCLK_MASK GENMASK(1, 0)
+#define WCD934X_CODEC_RPM_RST_CTL 0x0009
+#define WCD934X_CODEC_RPM_PWR_CDC_DIG_HM_CTL 0x0011
+#define WCD934X_CHIP_TIER_CTRL_CHIP_ID_BYTE0 0x0021
+#define WCD934X_CHIP_TIER_CTRL_CHIP_ID_BYTE2 0x0023
+#define WCD934X_CHIP_TIER_CTRL_EFUSE_CTL 0x0025
+#define WCD934X_EFUSE_SENSE_STATE_MASK GENMASK(4, 1)
+#define WCD934X_EFUSE_SENSE_STATE_DEF 0x10
+#define WCD934X_EFUSE_SENSE_EN_MASK BIT(0)
+#define WCD934X_EFUSE_SENSE_ENABLE BIT(0)
+#define WCD934X_CHIP_TIER_CTRL_EFUSE_VAL_OUT1 0x002a
+#define WCD934X_CHIP_TIER_CTRL_EFUSE_VAL_OUT2 0x002b
+#define WCD934X_CHIP_TIER_CTRL_EFUSE_VAL_OUT14 0x0037
+#define WCD934X_CHIP_TIER_CTRL_EFUSE_VAL_OUT15 0x0038
+#define WCD934X_CHIP_TIER_CTRL_EFUSE_STATUS 0x0039
+#define WCD934X_DATA_HUB_SB_TX10_INP_CFG 0x006b
+#define WCD934X_DATA_HUB_SB_TX11_INP_CFG 0x006c
+#define WCD934X_DATA_HUB_SB_TX13_INP_CFG 0x006e
+#define WCD934X_CPE_FLL_CONFIG_CTL_2 0x0111
+#define WCD934X_CPE_SS_CPARMAD_BUFRDY_INT_PERIOD 0x0213
+#define WCD934X_CPE_SS_SVA_CFG 0x0214
+#define WCD934X_CPE_SS_DMIC0_CTL 0x0218
+#define WCD934X_CPE_SS_DMIC1_CTL 0x0219
+#define WCD934X_DMIC_RATE_MASK GENMASK(3, 1)
+#define WCD934X_CPE_SS_DMIC2_CTL 0x021a
+#define WCD934X_CPE_SS_DMIC_CFG 0x021b
+#define WCD934X_CPE_SS_DMIC_CFG 0x021b
+#define WCD934X_CPE_SS_CPAR_CFG 0x021c
+#define WCD934X_INTR_PIN1_MASK0 0x0409
+#define WCD934X_INTR_PIN1_STATUS0 0x0411
+#define WCD934X_INTR_PIN1_CLEAR0 0x0419
+#define WCD934X_INTR_PIN2_CLEAR3 0x0434
+#define WCD934X_INTR_LEVEL0 0x0461
+/* INTR_REG 0 */
+#define WCD934X_IRQ_SLIMBUS 0
+#define WCD934X_IRQ_MISC 1
+#define WCD934X_IRQ_HPH_PA_OCPL_FAULT 2
+#define WCD934X_IRQ_HPH_PA_OCPR_FAULT 3
+#define WCD934X_IRQ_EAR_PA_OCP_FAULT 4
+#define WCD934X_IRQ_HPH_PA_CNPL_COMPLETE 5
+#define WCD934X_IRQ_HPH_PA_CNPR_COMPLETE 6
+#define WCD934X_IRQ_EAR_PA_CNP_COMPLETE 7
+/* INTR_REG 1 */
+#define WCD934X_IRQ_MBHC_SW_DET 8
+#define WCD934X_IRQ_MBHC_ELECT_INS_REM_DET 9
+#define WCD934X_IRQ_MBHC_BUTTON_PRESS_DET 10
+#define WCD934X_IRQ_MBHC_BUTTON_RELEASE_DET 11
+#define WCD934X_IRQ_MBHC_ELECT_INS_REM_LEG_DET 12
+#define WCD934X_IRQ_RESERVED_0 13
+#define WCD934X_IRQ_RESERVED_1 14
+#define WCD934X_IRQ_RESERVED_2 15
+/* INTR_REG 2 */
+#define WCD934X_IRQ_LINE_PA1_CNP_COMPLETE 16
+#define WCD934X_IRQ_LINE_PA2_CNP_COMPLETE 17
+#define WCD934X_IRQ_SLNQ_ANALOG_ERROR 18
+#define WCD934X_IRQ_RESERVED_3 19
+#define WCD934X_IRQ_SOUNDWIRE 20
+#define WCD934X_IRQ_VDD_DIG_RAMP_COMPLETE 21
+#define WCD934X_IRQ_RCO_ERROR 22
+#define WCD934X_IRQ_CPE_ERROR 23
+/* INTR_REG 3 */
+#define WCD934X_IRQ_MAD_AUDIO 24
+#define WCD934X_IRQ_MAD_BEACON 25
+#define WCD934X_IRQ_MAD_ULTRASOUND 26
+#define WCD934X_IRQ_VBAT_ATTACK 27
+#define WCD934X_IRQ_VBAT_RESTORE 28
+#define WCD934X_IRQ_CPE1_INTR 29
+#define WCD934X_IRQ_RESERVED_4 30
+#define WCD934X_IRQ_SLNQ_DIGITAL 31
+#define WCD934X_NUM_IRQS 32
+#define WCD934X_ANA_BIAS 0x0601
+#define WCD934X_ANA_BIAS_EN_MASK BIT(7)
+#define WCD934X_ANA_BIAS_EN BIT(7)
+#define WCD934X_ANA_PRECHRG_EN_MASK BIT(6)
+#define WCD934X_ANA_PRECHRG_EN BIT(6)
+#define WCD934X_ANA_PRECHRG_MODE_MASK BIT(5)
+#define WCD934X_ANA_PRECHRG_MODE_AUTO BIT(5)
+#define WCD934X_ANA_RCO 0x0603
+#define WCD934X_ANA_RCO_BG_EN_MASK BIT(7)
+#define WCD934X_ANA_RCO_BG_ENABLE BIT(7)
+#define WCD934X_ANA_BUCK_CTL 0x0606
+#define WCD934X_ANA_BUCK_HI_ACCU_PRE_ENX_MASK GENMASK(1, 0)
+#define WCD934X_ANA_BUCK_PRE_EN2_MASK BIT(0)
+#define WCD934X_ANA_BUCK_PRE_EN2_ENABLE BIT(0)
+#define WCD934X_ANA_BUCK_PRE_EN1_MASK BIT(1)
+#define WCD934X_ANA_BUCK_PRE_EN1_ENABLE BIT(1)
+#define WCD934X_ANA_BUCK_HI_ACCU_EN_MASK BIT(2)
+#define WCD934X_ANA_BUCK_HI_ACCU_ENABLE BIT(2)
+#define WCD934X_ANA_RX_SUPPLIES 0x0608
+#define WCD934X_ANA_HPH 0x0609
+#define WCD934X_ANA_EAR 0x060a
+#define WCD934X_ANA_LO_1_2 0x060b
+#define WCD934X_ANA_AMIC1 0x060e
+#define WCD934X_ANA_AMIC2 0x060f
+#define WCD934X_ANA_AMIC3 0x0610
+#define WCD934X_ANA_AMIC4 0x0611
+#define WCD934X_ANA_MBHC_MECH 0x0614
+#define WCD934X_MBHC_L_DET_EN_MASK BIT(7)
+#define WCD934X_MBHC_L_DET_EN BIT(7)
+#define WCD934X_MBHC_GND_DET_EN_MASK BIT(6)
+#define WCD934X_MBHC_MECH_DETECT_TYPE_MASK BIT(5)
+#define WCD934X_MBHC_MECH_DETECT_TYPE_INS 1
+#define WCD934X_MBHC_HPHL_PLUG_TYPE_MASK BIT(4)
+#define WCD934X_MBHC_HPHL_PLUG_TYPE_NO 1
+#define WCD934X_MBHC_GND_PLUG_TYPE_MASK BIT(3)
+#define WCD934X_MBHC_GND_PLUG_TYPE_NO 1
+#define WCD934X_MBHC_HSL_PULLUP_COMP_EN BIT(2)
+#define WCD934X_MBHC_HSG_PULLUP_COMP_EN BIT(1)
+#define WCD934X_MBHC_HPHL_100K_TO_GND_EN BIT(0)
+#define WCD934X_ANA_MBHC_ELECT 0x0615
+#define WCD934X_ANA_MBHC_BIAS_EN_MASK BIT(0)
+#define WCD934X_ANA_MBHC_BIAS_EN BIT(0)
+#define WCD934X_ANA_MBHC_ZDET 0x0616
+#define WCD934X_ANA_MBHC_RESULT_1 0x0617
+#define WCD934X_ANA_MBHC_RESULT_2 0x0618
+#define WCD934X_ANA_MBHC_RESULT_3 0x0619
+#define WCD934X_ANA_MBHC_BTN0 0x061a
+#define WCD934X_VTH_MASK GENMASK(7, 2)
+#define WCD934X_ANA_MBHC_BTN1 0x061b
+#define WCD934X_ANA_MBHC_BTN2 0x061c
+#define WCD934X_ANA_MBHC_BTN3 0x061d
+#define WCD934X_ANA_MBHC_BTN4 0x061e
+#define WCD934X_ANA_MBHC_BTN5 0x061f
+#define WCD934X_ANA_MBHC_BTN6 0x0620
+#define WCD934X_ANA_MBHC_BTN7 0x0621
+#define WCD934X_MBHC_BTN_VTH_MASK GENMASK(7, 2)
+#define WCD934X_ANA_MICB1 0x0622
+#define WCD934X_MICB_VAL_MASK GENMASK(5, 0)
+#define WCD934X_ANA_MICB_EN_MASK GENMASK(7, 6)
+#define WCD934X_MICB_DISABLE 0
+#define WCD934X_MICB_ENABLE 1
+#define WCD934X_MICB_PULL_UP 2
+#define WCD934X_MICB_PULL_DOWN 3
+#define WCD934X_ANA_MICB_PULL_UP 0x80
+#define WCD934X_ANA_MICB_ENABLE 0x40
+#define WCD934X_ANA_MICB_DISABLE 0x0
+#define WCD934X_ANA_MICB2 0x0623
+#define WCD934X_ANA_MICB2_ENABLE BIT(6)
+#define WCD934X_ANA_MICB2_ENABLE_MASK GENMASK(7, 6)
+#define WCD934X_ANA_MICB2_VOUT_MASK GENMASK(5, 0)
+#define WCD934X_ANA_MICB2_RAMP 0x0624
+#define WCD934X_RAMP_EN_MASK BIT(7)
+#define WCD934X_RAMP_SHIFT_CTRL_MASK GENMASK(4, 2)
+#define WCD934X_ANA_MICB3 0x0625
+#define WCD934X_ANA_MICB4 0x0626
+#define WCD934X_BIAS_VBG_FINE_ADJ 0x0629
+#define WCD934X_MBHC_CTL_CLK 0x0656
+#define WCD934X_MBHC_CTL_BCS 0x065a
+#define WCD934X_MBHC_STATUS_SPARE_1 0x065b
+#define WCD934X_MICB1_TEST_CTL_1 0x066b
+#define WCD934X_MICB1_TEST_CTL_2 0x066c
+#define WCD934X_MICB2_TEST_CTL_1 0x066e
+#define WCD934X_MICB3_TEST_CTL_1 0x0671
+#define WCD934X_MICB4_TEST_CTL_1 0x0674
+#define WCD934X_CLASSH_MODE_1 0x0697
+#define WCD934X_CLASSH_MODE_2 0x0698
+#define WCD934X_CLASSH_MODE_3 0x0699
+#define WCD934X_CLASSH_CTRL_VCL_1 0x069a
+#define WCD934X_CLASSH_CTRL_VCL_2 0x069b
+#define WCD934X_CLASSH_CTRL_CCL_1 0x069c
+#define WCD934X_CLASSH_CTRL_CCL_2 0x069d
+#define WCD934X_CLASSH_CTRL_CCL_3 0x069e
+#define WCD934X_CLASSH_CTRL_CCL_4 0x069f
+#define WCD934X_CLASSH_CTRL_CCL_5 0x06a0
+#define WCD934X_CLASSH_BUCK_TMUX_A_D 0x06a1
+#define WCD934X_CLASSH_BUCK_SW_DRV_CNTL 0x06a2
+#define WCD934X_RX_OCP_CTL 0x06b6
+#define WCD934X_RX_OCP_COUNT 0x06b7
+#define WCD934X_HPH_CNP_EN 0x06cb
+#define WCD934X_HPH_CNP_WG_CTL 0x06cc
+#define WCD934X_HPH_GM3_BOOST_EN_MASK BIT(7)
+#define WCD934X_HPH_GM3_BOOST_ENABLE BIT(7)
+#define WCD934X_HPH_CNP_WG_TIME 0x06cd
+#define WCD934X_HPH_OCP_CTL 0x06ce
+#define WCD934X_HPH_PA_CTL2 0x06d2
+#define WCD934X_HPHPA_GND_R_MASK BIT(6)
+#define WCD934X_HPHPA_GND_L_MASK BIT(4)
+#define WCD934X_HPH_L_EN 0x06d3
+#define WCD934X_HPH_GAIN_SRC_SEL_MASK BIT(5)
+#define WCD934X_HPH_GAIN_SRC_SEL_COMPANDER 0
+#define WCD934X_HPH_GAIN_SRC_SEL_REGISTER BIT(5)
+#define WCD934X_HPH_L_TEST 0x06d4
+#define WCD934X_HPH_R_EN 0x06d6
+#define WCD934X_HPH_R_TEST 0x06d7
+#define WCD934X_HPH_OCP_DET_MASK BIT(0)
+#define WCD934X_HPH_OCP_DET_ENABLE BIT(0)
+#define WCD934X_HPH_OCP_DET_DISABLE 0
+#define WCD934X_HPH_R_ATEST 0x06d8
+#define WCD934X_HPHPA_GND_OVR_MASK BIT(1)
+#define WCD934X_DIFF_LO_LO2_COMPANDER 0x06ea
+#define WCD934X_DIFF_LO_LO1_COMPANDER 0x06eb
+#define WCD934X_CLK_SYS_MCLK_PRG 0x0711
+#define WCD934X_EXT_CLK_BUF_EN_MASK BIT(7)
+#define WCD934X_EXT_CLK_BUF_EN BIT(7)
+#define WCD934X_EXT_CLK_DIV_RATIO_MASK GENMASK(5, 4)
+#define WCD934X_EXT_CLK_DIV_BY_2 0x10
+#define WCD934X_MCLK_SRC_MASK BIT(1)
+#define WCD934X_MCLK_SRC_EXT_CLK 0
+#define WCD934X_MCLK_SRC_MASK BIT(1)
+#define WCD934X_MCLK_EN_MASK BIT(0)
+#define WCD934X_MCLK_EN BIT(0)
+#define WCD934X_CLK_SYS_MCLK2_PRG1 0x0712
+#define WCD934X_CLK_SYS_MCLK2_PRG2 0x0713
+#define WCD934X_SIDO_NEW_VOUT_A_STARTUP 0x071b
+#define WCD934X_SIDO_NEW_VOUT_D_STARTUP 0x071c
+#define WCD934X_SIDO_NEW_VOUT_D_FREQ1 0x071d
+#define WCD934X_SIDO_NEW_VOUT_D_FREQ2 0x071e
+#define WCD934X_SIDO_RIPPLE_FREQ_EN_MASK BIT(0)
+#define WCD934X_SIDO_RIPPLE_FREQ_ENABLE BIT(0)
+#define WCD934X_MBHC_NEW_CTL_1 0x0720
+#define WCD934X_MBHC_CTL_RCO_EN_MASK BIT(7)
+#define WCD935X_MBHC_CTL_RCO_EN BIT(7)
+#define WCD934X_MBHC_NEW_CTL_2 0x0721
+#define WCD934X_M_RTH_CTL_MASK GENMASK(3, 2)
+#define WCD934X_MBHC_NEW_PLUG_DETECT_CTL 0x0722
+#define WCD934X_HSDET_PULLUP_C_MASK GENMASK(7, 6)
+#define WCD934X_MBHC_NEW_ZDET_ANA_CTL 0x0723
+#define WCD934X_ZDET_RANGE_CTL_MASK GENMASK(3, 0)
+#define WCD934X_ZDET_MAXV_CTL_MASK GENMASK(6, 4)
+#define WCD934X_MBHC_NEW_ZDET_RAMP_CTL 0x0724
+#define WCD934X_MBHC_NEW_FSM_STATUS 0x0725
+#define WCD934X_MBHC_NEW_ADC_RESULT 0x0726
+#define WCD934X_TX_NEW_AMIC_4_5_SEL 0x0727
+#define WCD934X_HPH_NEW_INT_RDAC_HD2_CTL_L 0x0733
+#define WCD934X_HPH_NEW_INT_RDAC_OVERRIDE_CTL 0x0735
+#define WCD934X_HPH_NEW_INT_RDAC_HD2_CTL_R 0x0736
+#define WCD934X_HPH_NEW_INT_HPH_TIMER1 0x073a
+#define WCD934X_HPH_AUTOCHOP_TIMER_EN_MASK BIT(1)
+#define WCD934X_HPH_AUTOCHOP_TIMER_ENABLE BIT(1)
+#define WCD934X_CDC_TX0_TX_PATH_CTL 0x0a31
+#define WCD934X_CDC_TX_PATH_CTL_PCM_RATE_MASK GENMASK(3, 0)
+#define WCD934X_CDC_TX_PATH_CTL(dec) (0xa31 + dec * 0x10)
+#define WCD934X_CDC_TX0_TX_PATH_CFG0 0x0a32
+#define WCD934X_CDC_TX0_TX_PATH_CFG1 0x0a33
+#define WCD934X_CDC_TX0_TX_VOL_CTL 0x0a34
+#define WCD934X_CDC_TX0_TX_PATH_192_CTL 0x0a35
+#define WCD934X_CDC_TX0_TX_PATH_192_CFG 0x0a36
+#define WCD934X_CDC_TX0_TX_PATH_SEC2 0x0a39
+#define WCD934X_HPH_CUTOFF_FREQ_CHANGE_REQ_MASK BIT(1)
+#define WCD934X_HPH_CUTOFF_FREQ_CHANGE_REQ BIT(1)
+#define WCD934X_CDC_TX1_TX_PATH_CTL 0x0a41
+#define WCD934X_CDC_TX1_TX_PATH_CFG0 0x0a42
+#define WCD934X_CDC_TX1_TX_PATH_CFG1 0x0a43
+#define WCD934X_CDC_TX1_TX_VOL_CTL 0x0a44
+#define WCD934X_CDC_TX2_TX_PATH_CTL 0x0a51
+#define WCD934X_CDC_TX2_TX_PATH_CFG0 0x0a52
+#define WCD934X_CDC_TX2_TX_PATH_CFG1 0x0a53
+#define WCD934X_CDC_TX2_TX_VOL_CTL 0x0a54
+#define WCD934X_CDC_TX3_TX_PATH_CTL 0x0a61
+#define WCD934X_CDC_TX3_TX_PATH_CFG0 0x0a62
+#define WCD934X_CDC_TX3_TX_PATH_CFG1 0x0a63
+#define WCD934X_CDC_TX3_TX_VOL_CTL 0x0a64
+#define WCD934X_CDC_TX3_TX_PATH_192_CTL 0x0a65
+#define WCD934X_CDC_TX3_TX_PATH_192_CFG 0x0a66
+#define WCD934X_CDC_TX4_TX_PATH_CTL 0x0a71
+#define WCD934X_CDC_TX4_TX_PATH_CFG0 0x0a72
+#define WCD934X_CDC_TX4_TX_PATH_CFG1 0x0a73
+#define WCD934X_CDC_TX4_TX_VOL_CTL 0x0a74
+#define WCD934X_CDC_TX4_TX_PATH_192_CTL 0x0a75
+#define WCD934X_CDC_TX4_TX_PATH_192_CFG 0x0a76
+#define WCD934X_CDC_TX5_TX_PATH_CTL 0x0a81
+#define WCD934X_CDC_TX5_TX_PATH_CFG0 0x0a82
+#define WCD934X_CDC_TX5_TX_PATH_CFG1 0x0a83
+#define WCD934X_CDC_TX5_TX_VOL_CTL 0x0a84
+#define WCD934X_CDC_TX5_TX_PATH_192_CTL 0x0a85
+#define WCD934X_CDC_TX5_TX_PATH_192_CFG 0x0a86
+#define WCD934X_CDC_TX6_TX_PATH_CTL 0x0a91
+#define WCD934X_CDC_TX6_TX_PATH_CFG0 0x0a92
+#define WCD934X_CDC_TX6_TX_PATH_CFG1 0x0a93
+#define WCD934X_CDC_TX6_TX_VOL_CTL 0x0a94
+#define WCD934X_CDC_TX6_TX_PATH_192_CTL 0x0a95
+#define WCD934X_CDC_TX6_TX_PATH_192_CFG 0x0a96
+#define WCD934X_CDC_TX7_TX_PATH_CTL 0x0aa1
+#define WCD934X_CDC_TX7_TX_PATH_CFG0 0x0aa2
+#define WCD934X_CDC_TX7_TX_PATH_CFG1 0x0aa3
+#define WCD934X_CDC_TX7_TX_VOL_CTL 0x0aa4
+#define WCD934X_CDC_TX7_TX_PATH_192_CTL 0x0aa5
+#define WCD934X_CDC_TX7_TX_PATH_192_CFG 0x0aa6
+#define WCD934X_CDC_TX8_TX_PATH_CTL 0x0ab1
+#define WCD934X_CDC_TX8_TX_PATH_CFG0 0x0ab2
+#define WCD934X_CDC_TX8_TX_PATH_CFG1 0x0ab3
+#define WCD934X_CDC_TX8_TX_VOL_CTL 0x0ab4
+#define WCD934X_CDC_TX8_TX_PATH_192_CTL 0x0ab5
+#define WCD934X_CDC_TX8_TX_PATH_192_CFG 0x0ab6
+#define WCD934X_CDC_TX9_SPKR_PROT_PATH_CFG0 0x0ac3
+#define WCD934X_CDC_TX10_SPKR_PROT_PATH_CFG0 0x0ac7
+#define WCD934X_CDC_TX11_SPKR_PROT_PATH_CFG0 0x0acb
+#define WCD934X_CDC_TX12_SPKR_PROT_PATH_CFG0 0x0acf
+#define WCD934X_CDC_COMPANDER1_CTL0 0x0b01
+#define WCD934X_COMP_CLK_EN_MASK BIT(0)
+#define WCD934X_COMP_CLK_ENABLE BIT(0)
+#define WCD934X_COMP_SOFT_RST_MASK BIT(1)
+#define WCD934X_COMP_SOFT_RST_ENABLE BIT(1)
+#define WCD934X_COMP_HALT_MASK BIT(2)
+#define WCD934X_COMP_HALT BIT(2)
+#define WCD934X_COMP_SOFT_RST_DISABLE 0
+#define WCD934X_CDC_COMPANDER1_CTL7 0x0b08
+#define WCD934X_HPH_LOW_PWR_MODE_EN_MASK BIT(5)
+#define WCD934X_CDC_COMPANDER2_CTL7 0x0b10
+#define WCD934X_CDC_COMPANDER7_CTL3 0x0b34
+#define WCD934X_CDC_COMPANDER7_CTL7 0x0b38
+#define WCD934X_CDC_COMPANDER8_CTL3 0x0b3c
+#define WCD934X_CDC_COMPANDER8_CTL7 0x0b40
+#define WCD934X_CDC_RX0_RX_PATH_CTL 0x0b41
+#define WCD934X_CDC_RX_PGA_MUTE_EN_MASK BIT(4)
+#define WCD934X_CDC_RX_PGA_MUTE_ENABLE BIT(4)
+#define WCD934X_CDC_RX_PGA_MUTE_DISABLE 0
+#define WCD934X_RX_CLK_EN_MASK BIT(5)
+#define WCD934X_RX_CLK_ENABLE BIT(5)
+#define WCD934X_RX_RESET_MASK BIT(6)
+#define WCD934X_RX_RESET_ENABLE BIT(6)
+#define WCD934X_RX_RESET_DISABLE 0
+#define WCD934X_RX_PCM_RATE_MASK GENMASK(3, 0)
+#define WCD934X_RX_PCM_RATE_F_48K 0x04
+#define WCD934X_CDC_RX_PATH_CTL(rx) (0xb41 + rx * 0x14)
+#define WCD934X_CDC_MIX_PCM_RATE_MASK GENMASK(3, 0)
+#define WCD934X_CDC_RX0_RX_PATH_CFG0 0x0b42
+#define WCD934X_RX_DLY_ZN_EN_MASK BIT(3)
+#define WCD934X_RX_DLY_ZN_ENABLE BIT(3)
+#define WCD934X_RX_DLY_ZN_DISABLE 0
+#define WCD934X_CDC_RX0_RX_PATH_CFG1 0x0b43
+#define WCD934X_CDC_RX0_RX_PATH_CFG2 0x0b44
+#define WCD934X_CDC_RX0_RX_VOL_CTL 0x0b45
+#define WCD934X_CDC_RX0_RX_PATH_MIX_CTL 0x0b46
+#define WCD934X_CDC_RX_MIX_CLK_EN_MASK BIT(5)
+#define WCD934X_CDC_RX_MIX_CLK_ENABLE BIT(5)
+#define WCD934X_CDC_RX_PATH_MIX_CTL(rx) (0xb46 + rx * 0x14)
+#define WCD934X_CDC_RX0_RX_PATH_MIX_CFG 0x0b47
+#define WCD934X_CDC_RX0_RX_VOL_MIX_CTL 0x0b48
+#define WCD934X_CDC_RX0_RX_PATH_SEC0 0x0b49
+#define WCD934X_CDC_RX0_RX_PATH_DSMDEM_CTL 0x0b53
+#define WCD934X_CDC_RX1_RX_PATH_CTL 0x0b55
+#define WCD934X_RX_PATH_PGA_MUTE_EN_MASK BIT(4)
+#define WCD934X_RX_PATH_PGA_MUTE_ENABLE BIT(4)
+#define WCD934X_CDC_RX_PATH_PGA_MUTE_DISABLE 0
+#define WCD934X_CDC_RX_PATH_CLK_EN_MASK BIT(5)
+#define WCD934X_CDC_RX_PATH_CLK_ENABLE BIT(5)
+#define WCD934X_CDC_RX_PATH_CLK_DISABLE 0
+#define WCD934X_CDC_RX1_RX_PATH_CFG0 0x0b56
+#define WCD934X_HPH_CMP_EN_MASK BIT(1)
+#define WCD934X_HPH_CMP_ENABLE BIT(1)
+#define WCD934X_HPH_CMP_DISABLE 0
+#define WCD934X_CDC_RX1_RX_PATH_CFG2 0x0b58
+#define WCD934X_CDC_RX1_RX_VOL_CTL 0x0b59
+#define WCD934X_CDC_RX1_RX_PATH_MIX_CTL 0x0b5a
+#define WCD934X_CDC_RX1_RX_PATH_MIX_CFG 0x0b5b
+#define WCD934X_CDC_RX1_RX_VOL_MIX_CTL 0x0b5c
+#define WCD934X_CDC_RX1_RX_PATH_SEC0 0x0b5d
+#define WCD934X_CDC_RX1_RX_PATH_SEC3 0x0b60
+#define WCD934X_CDC_RX_PATH_SEC_HD2_ALPHA_MASK GENMASK(5, 2)
+#define WCD934X_CDC_RX_PATH_SEC_HD2_ALPHA_0P3125 0x14
+#define WCD934X_CDC_RX_PATH_SEC_HD2_ALPHA_0P0000 0
+#define WCD934X_CDC_RX1_RX_PATH_DSMDEM_CTL 0x0b67
+#define WCD934X_CDC_RX2_RX_PATH_CTL 0x0b69
+#define WCD934X_CDC_RX2_RX_PATH_CFG0 0x0b6a
+#define WCD934X_CDC_RX_PATH_CFG_HD2_EN_MASK BIT(2)
+#define WCD934X_CDC_RX_PATH_CFG_HD2_ENABLE BIT(2)
+#define WCD934X_CDC_RX_PATH_CFG_HD2_DISABLE 0
+#define WCD934X_CDC_RX2_RX_PATH_CFG2 0x0b6c
+#define WCD934X_CDC_RX2_RX_VOL_CTL 0x0b6d
+#define WCD934X_CDC_RX2_RX_PATH_MIX_CTL 0x0b6e
+#define WCD934X_CDC_RX2_RX_PATH_MIX_CFG 0x0b6f
+#define WCD934X_CDC_RX2_RX_VOL_MIX_CTL 0x0b70
+#define WCD934X_CDC_RX2_RX_PATH_SEC0 0x0b71
+#define WCD934X_CDC_RX2_RX_PATH_SEC3 0x0b74
+#define WCD934X_CDC_RX2_RX_PATH_DSMDEM_CTL 0x0b7b
+#define WCD934X_CDC_RX3_RX_PATH_CTL 0x0b7d
+#define WCD934X_CDC_RX3_RX_PATH_CFG0 0x0b6e
+#define WCD934X_CDC_RX3_RX_PATH_CFG2 0x0b80
+#define WCD934X_CDC_RX3_RX_VOL_CTL 0x0b81
+#define WCD934X_CDC_RX3_RX_PATH_MIX_CTL 0x0b82
+#define WCD934X_CDC_RX3_RX_PATH_MIX_CFG 0x0b83
+#define WCD934X_CDC_RX3_RX_VOL_MIX_CTL 0x0b84
+#define WCD934X_CDC_RX3_RX_PATH_SEC0 0x0b85
+#define WCD934X_CDC_RX3_RX_PATH_DSMDEM_CTL 0x0b8f
+#define WCD934X_CDC_RX4_RX_PATH_CTL 0x0b91
+#define WCD934X_CDC_RX4_RX_PATH_CFG0 0x0b92
+#define WCD934X_CDC_RX4_RX_PATH_CFG2 0x0b94
+#define WCD934X_CDC_RX4_RX_VOL_CTL 0x0b95
+#define WCD934X_CDC_RX4_RX_PATH_MIX_CTL 0x0b96
+#define WCD934X_CDC_RX4_RX_PATH_MIX_CFG 0x0b97
+#define WCD934X_CDC_RX4_RX_VOL_MIX_CTL 0x0b98
+#define WCD934X_CDC_RX4_RX_PATH_SEC0 0x0b99
+#define WCD934X_CDC_RX4_RX_PATH_DSMDEM_CTL 0x0ba3
+#define WCD934X_CDC_RX7_RX_PATH_CTL 0x0bcd
+#define WCD934X_CDC_RX7_RX_PATH_CFG0 0x0bce
+#define WCD934X_CDC_RX7_RX_PATH_CFG1 0x0bcf
+#define WCD934X_CDC_RX7_RX_PATH_CFG2 0x0bd0
+#define WCD934X_CDC_RX7_RX_VOL_CTL 0x0bd1
+#define WCD934X_CDC_RX7_RX_PATH_MIX_CTL 0x0bd2
+#define WCD934X_CDC_RX7_RX_PATH_MIX_CFG 0x0bd3
+#define WCD934X_CDC_RX7_RX_VOL_MIX_CTL 0x0bd4
+#define WCD934X_CDC_RX7_RX_PATH_SEC1 0x0bd6
+#define WCD934X_CDC_RX7_RX_PATH_MIX_SEC0 0x0bdd
+#define WCD934X_CDC_RX7_RX_PATH_DSMDEM_CTL 0x0bdf
+#define WCD934X_CDC_RX8_RX_PATH_CTL 0x0be1
+#define WCD934X_CDC_RX8_RX_PATH_CFG0 0x0be2
+#define WCD934X_CDC_RX8_RX_PATH_CFG1 0x0be3
+#define WCD934X_RX_SMART_BOOST_EN_MASK BIT(0)
+#define WCD934X_RX_SMART_BOOST_ENABLE BIT(0)
+#define WCD934X_RX_SMART_BOOST_DISABLE 0
+#define WCD934X_CDC_RX8_RX_PATH_CFG2 0x0be4
+#define WCD934X_CDC_RX8_RX_VOL_CTL 0x0be5
+#define WCD934X_CDC_RX8_RX_PATH_MIX_CTL 0x0be6
+#define WCD934X_CDC_RX8_RX_PATH_MIX_CFG 0x0be7
+#define WCD934X_CDC_RX8_RX_VOL_MIX_CTL 0x0be8
+#define WCD934X_CDC_RX8_RX_PATH_SEC1 0x0bea
+#define WCD934X_CDC_RX8_RX_PATH_MIX_SEC0 0x0bf1
+#define WCD934X_CDC_RX8_RX_PATH_DSMDEM_CTL 0x0bf3
+#define WCD934X_CDC_CLSH_DECAY_CTRL 0x0c03
+#define WCD934X_CDC_CLSH_K2_MSB 0x0c0a
+#define WCD934X_CDC_CLSH_K2_LSB 0x0c0b
+#define WCD934X_CDC_CLSH_TEST0 0x0c0f
+#define WCD934X_CDC_BOOST0_BOOST_PATH_CTL 0x0c19
+#define WCD934X_BOOST_PATH_CLK_EN_MASK BIT(4)
+#define WCD934X_BOOST_PATH_CLK_ENABLE BIT(4)
+#define WCD934X_BOOST_PATH_CLK_DISABLE 0
+#define WCD934X_CDC_BOOST0_BOOST_CTL 0x0c1a
+#define WCD934X_CDC_BOOST0_BOOST_CFG1 0x0c1b
+#define WCD934X_CDC_BOOST0_BOOST_CFG2 0x0c1c
+#define WCD934X_CDC_BOOST1_BOOST_PATH_CTL 0x0c21
+#define WCD934X_CDC_BOOST1_BOOST_CTL 0x0c22
+#define WCD934X_CDC_BOOST1_BOOST_CFG1 0x0c23
+#define WCD934X_CDC_BOOST1_BOOST_CFG2 0x0c24
+#define WCD934X_SWR_AHB_BRIDGE_RD_DATA_0 0x0c91
+#define WCD934X_SWR_AHB_BRIDGE_RD_DATA_1 0x0c92
+#define WCD934X_SWR_AHB_BRIDGE_RD_DATA_2 0x0c93
+#define WCD934X_SWR_AHB_BRIDGE_RD_DATA_3 0x0c94
+#define WCD934X_SWR_AHB_BRIDGE_ACCESS_STATUS 0x0c96
+#define WCD934X_CDC_SIDETONE_SRC0_ST_SRC_PATH_CTL 0x0cb5
+#define WCD934X_CDC_SIDETONE_SRC1_ST_SRC_PATH_CTL 0x0cb9
+#define WCD934X_CDC_RX_INP_MUX_RX_INT0_CFG0 0x0d01
+#define WCD934X_CDC_RX_INP_MUX_RX_INT_CFG0(i) (0xd01 + i * 0x2)
+#define WCD934X_CDC_RX_INP_MUX_RX_INT_SEL_MASK GENMASK(3, 0)
+#define WCD934X_CDC_RX_INP_MUX_RX_INT0_CFG1 0x0d02
+#define WCD934X_CDC_RX_INP_MUX_RX_INT_CFG1(i) (0xd02 + i * 0x2)
+#define WCD934X_CDC_RX_INP_MUX_RX_INT1_CFG0 0x0d03
+#define WCD934X_CDC_RX_INP_MUX_RX_INT1_CFG1 0x0d04
+#define WCD934X_CDC_RX_INP_MUX_RX_INT2_CFG0 0x0d05
+#define WCD934X_CDC_RX_INP_MUX_RX_INT2_CFG1 0x0d06
+#define WCD934X_CDC_RX_INP_MUX_RX_INT3_CFG0 0x0d07
+#define WCD934X_CDC_RX_INP_MUX_RX_INT3_CFG1 0x0d08
+#define WCD934X_CDC_RX_INP_MUX_RX_INT4_CFG0 0x0d09
+#define WCD934X_CDC_RX_INP_MUX_RX_INT4_CFG1 0x0d0a
+#define WCD934X_CDC_RX_INP_MUX_RX_INT7_CFG0 0x0d0f
+#define WCD934X_CDC_RX_INP_MUX_RX_INT7_CFG1 0x0d10
+#define WCD934X_CDC_RX_INP_MUX_RX_INT8_CFG0 0x0d11
+#define WCD934X_CDC_RX_INP_MUX_RX_INT8_CFG1 0x0d12
+#define WCD934X_CDC_RX_INP_MUX_RX_MIX_CFG0 0x0d13
+#define WCD934X_CDC_RX_INP_MUX_RX_MIX_CFG1 0x0d14
+#define WCD934X_CDC_RX_INP_MUX_RX_MIX_CFG2 0x0d15
+#define WCD934X_CDC_RX_INP_MUX_RX_MIX_CFG3 0x0d16
+#define WCD934X_CDC_RX_INP_MUX_RX_MIX_CFG4 0x0d17
+#define WCD934X_CDC_RX_INP_MUX_SIDETONE_SRC_CFG0 0x0d18
+#define WCD934X_CDC_RX_INP_MUX_SIDETONE_SRC_CFG1 0x0d19
+#define WCD934X_CDC_TX_INP_MUX_ADC_MUX0_CFG0 0x0d1d
+#define WCD934X_CDC_TX_INP_MUX_ADC_MUX0_CFG1 0x0d1e
+#define WCD934X_CDC_TX_INP_MUX_ADC_MUX1_CFG0 0x0d1f
+#define WCD934X_CDC_TX_INP_MUX_ADC_MUX1_CFG1 0x0d20
+#define WCD934X_CDC_TX_INP_MUX_ADC_MUX2_CFG0 0x0d21
+#define WCD934X_CDC_TX_INP_MUX_ADC_MUX2_CFG1 0x0d22
+#define WCD934X_CDC_TX_INP_MUX_ADC_MUX3_CFG0 0x0d23
+#define WCD934X_CDC_TX_INP_MUX_ADC_MUX3_CFG1 0x0d25
+#define WCD934X_CDC_TX_INP_MUX_ADC_MUX4_CFG0 0x0d26
+#define WCD934X_CDC_TX_INP_MUX_ADC_MUX5_CFG0 0x0d27
+#define WCD934X_CDC_TX_INP_MUX_ADC_MUX6_CFG0 0x0d28
+#define WCD934X_CDC_TX_INP_MUX_ADC_MUX7_CFG0 0x0d29
+#define WCD934X_CDC_TX_INP_MUX_ADC_MUX8_CFG0 0x0d2a
+#define WCD934X_CDC_TX_INP_MUX_ADC_MUX10_CFG0 0x0d2b
+#define WCD934X_CDC_TX_INP_MUX_ADC_MUX11_CFG0 0x0d2c
+#define WCD934X_CDC_TX_INP_MUX_ADC_MUX12_CFG0 0x0d2d
+#define WCD934X_CDC_TX_INP_MUX_ADC_MUX13_CFG0 0x0d2e
+#define WCD934X_CDC_SIDETONE_IIR_INP_MUX_IIR0_MIX_CFG0 0x0d31
+#define WCD934X_CDC_SIDETONE_IIR_INP_MUX_IIR0_MIX_CFG1 0x0d32
+#define WCD934X_CDC_SIDETONE_IIR_INP_MUX_IIR0_MIX_CFG2 0x0d33
+#define WCD934X_CDC_SIDETONE_IIR_INP_MUX_IIR0_MIX_CFG3 0x0d34
+#define WCD934X_CDC_SIDETONE_IIR_INP_MUX_IIR1_MIX_CFG0 0x0d35
+#define WCD934X_CDC_SIDETONE_IIR_INP_MUX_IIR1_MIX_CFG1 0x0d36
+#define WCD934X_CDC_SIDETONE_IIR_INP_MUX_IIR1_MIX_CFG2 0x0d37
+#define WCD934X_CDC_SIDETONE_IIR_INP_MUX_IIR1_MIX_CFG3 0x0d38
+#define WCD934X_CDC_IF_ROUTER_TX_MUX_CFG0 0x0d3a
+#define WCD934X_CDC_IF_ROUTER_TX_MUX_CFG1 0x0d3b
+#define WCD934X_CDC_IF_ROUTER_TX_MUX_CFG2 0x0d3c
+#define WCD934X_CDC_IF_ROUTER_TX_MUX_CFG3 0x0d3d
+#define WCD934X_CDC_CLK_RST_CTRL_MCLK_CONTROL 0x0d41
+#define WCD934X_CDC_MCLK_EN_MASK BIT(0)
+#define WCD934X_CDC_MCLK_EN_ENABLE BIT(0)
+#define WCD934X_CDC_CLK_RST_CTRL_FS_CNT_CONTROL 0x0d42
+#define WCD934X_CDC_FS_MCLK_CNT_EN_MASK BIT(0)
+#define WCD934X_CDC_FS_MCLK_CNT_ENABLE BIT(0)
+#define WCD934X_CDC_CLK_RST_CTRL_SWR_CONTROL 0x0d43
+#define WCD934X_CDC_SWR_CLK_EN_MASK BIT(0)
+#define WCD934X_CDC_SWR_CLK_ENABLE BIT(0)
+#define WCD934X_CDC_CLK_RST_CTRL_DSD_CONTROL 0x0d44
+#define WCD934X_CDC_CLK_RST_CTRL_ASRC_SHARE_CONTROL 0x0d45
+#define WCD934X_CDC_CLK_RST_CTRL_GFM_CONTROL 0x0d46
+#define WCD934X_CDC_SIDETONE_IIR0_IIR_PATH_CTL 0x0d55
+#define WCD934X_CDC_SIDETONE_IIR0_IIR_GAIN_B1_CTL 0x0d56
+#define WCD934X_CDC_SIDETONE_IIR0_IIR_GAIN_B2_CTL 0x0d57
+#define WCD934X_CDC_SIDETONE_IIR0_IIR_GAIN_B3_CTL 0x0d58
+#define WCD934X_CDC_SIDETONE_IIR0_IIR_GAIN_B4_CTL 0x0d59
+#define WCD934X_CDC_SIDETONE_IIR0_IIR_GAIN_B5_CTL 0x0d5a
+#define WCD934X_CDC_SIDETONE_IIR0_IIR_GAIN_B6_CTL 0x0d5b
+#define WCD934X_CDC_SIDETONE_IIR0_IIR_GAIN_B7_CTL 0x0d5c
+#define WCD934X_CDC_SIDETONE_IIR0_IIR_GAIN_B8_CTL 0x0d5d
+#define WCD934X_CDC_SIDETONE_IIR0_IIR_CTL 0x0d5e
+#define WCD934X_CDC_SIDETONE_IIR0_IIR_GAIN_TIMER_CTL 0x0d5f
+#define WCD934X_CDC_SIDETONE_IIR0_IIR_COEF_B1_CTL 0x0d60
+#define WCD934X_CDC_SIDETONE_IIR0_IIR_COEF_B2_CTL 0x0d61
+#define WCD934X_CDC_SIDETONE_IIR1_IIR_PATH_CTL 0x0d65
+#define WCD934X_CDC_SIDETONE_IIR1_IIR_GAIN_B1_CTL 0x0d66
+#define WCD934X_CDC_SIDETONE_IIR1_IIR_GAIN_B2_CTL 0x0d67
+#define WCD934X_CDC_SIDETONE_IIR1_IIR_GAIN_B3_CTL 0x0d68
+#define WCD934X_CDC_SIDETONE_IIR1_IIR_GAIN_B4_CTL 0x0d69
+#define WCD934X_CDC_SIDETONE_IIR1_IIR_GAIN_B5_CTL 0x0d6a
+#define WCD934X_CDC_SIDETONE_IIR1_IIR_GAIN_B6_CTL 0x0d6b
+#define WCD934X_CDC_SIDETONE_IIR1_IIR_GAIN_B7_CTL 0x0d6c
+#define WCD934X_CDC_SIDETONE_IIR1_IIR_GAIN_B8_CTL 0x0d6d
+#define WCD934X_CDC_SIDETONE_IIR1_IIR_CTL 0x0d6e
+#define WCD934X_CDC_SIDETONE_IIR1_IIR_GAIN_TIMER_CTL 0x0d6f
+#define WCD934X_CDC_SIDETONE_IIR1_IIR_COEF_B1_CTL 0x0d70
+#define WCD934X_CDC_SIDETONE_IIR1_IIR_COEF_B2_CTL 0x0d71
+#define WCD934X_CDC_TOP_TOP_CFG1 0x0d82
+#define WCD934X_CDC_TOP_TOP_CFG7 0x0d88
+#define WCD934X_CDC_TOP_HPHL_COMP_LUT 0x0d8b
+#define WCD934X_CDC_TOP_HPHR_COMP_LUT 0x0d90
+#define WCD934X_HPH_LUT_BYPASS_MASK BIT(7)
+#define WCD934X_HPH_LUT_BYPASS_ENABLE BIT(7)
+#define WCD934X_HPH_LUT_BYPASS_DISABLE 0
+#define WCD934X_CODEC_CPR_WR_DATA_0 0x5001
+#define WCD934X_CODEC_CPR_WR_ADDR_0 0x5005
+#define WCD934X_CODEC_CPR_SVS_CX_VDD 0x5022
+#define WCD934X_CODEC_CPR_SVS2_CX_VDD 0x5023
+#define WCD934X_CODEC_CPR_SVS2_MIN_CX_VDD 0x5027
+#define WCD934X_TLMM_DMIC1_CLK_PINCFG 0x8015
+#define WCD934X_TLMM_DMIC1_DATA_PINCFG 0x8016
+#define WCD934X_TLMM_DMIC2_CLK_PINCFG 0x8017
+#define WCD934X_TLMM_DMIC2_DATA_PINCFG 0x8018
+#define WCD934X_TLMM_DMIC3_CLK_PINCFG 0x8019
+#define WCD934X_TLMM_DMIC3_DATA_PINCFG 0x801a
+#define WCD934X_TEST_DEBUG_PAD_DRVCTL_0 0x803b
+#define WCD934X_TEST_DEBUG_NPL_DLY_TEST_1 0x803e
+
+#define WCD934X_MAX_REGISTER 0xffff
+#define WCD934X_SEL_REGISTER 0x800
+#define WCD934X_SEL_MASK 0xff
+#define WCD934X_SEL_SHIFT 0x0
+#define WCD934X_WINDOW_START 0x800
+#define WCD934X_WINDOW_LENGTH 0x100
+
+/* SLIMBUS Slave Registers */
+#define WCD934X_SLIM_PGD_PORT_INT_EN0 0x30
+#define WCD934X_SLIM_PGD_PORT_INT_STATUS_RX_0 0x34
+#define WCD934X_SLIM_PGD_PORT_INT_STATUS_RX_1 0x35
+#define WCD934X_SLIM_PGD_PORT_INT_STATUS_TX_0 0x36
+#define WCD934X_SLIM_PGD_PORT_INT_STATUS_TX_1 0x37
+#define WCD934X_SLIM_PGD_PORT_INT_CLR_RX_0 0x38
+#define WCD934X_SLIM_PGD_PORT_INT_CLR_RX_1 0x39
+#define WCD934X_SLIM_PGD_PORT_INT_CLR_TX_0 0x3A
+#define WCD934X_SLIM_PGD_PORT_INT_CLR_TX_1 0x3B
+#define WCD934X_SLIM_PGD_PORT_INT_RX_SOURCE0 0x60
+#define WCD934X_SLIM_PGD_PORT_INT_TX_SOURCE0 0x70
+#define WCD934X_SLIM_PGD_RX_PORT_CFG(p) (0x30 + p)
+#define WCD934X_SLIM_PGD_PORT_CFG(p) (0x40 + p)
+#define WCD934X_SLIM_PGD_TX_PORT_CFG(p) (0x50 + p)
+#define WCD934X_SLIM_PGD_PORT_INT_SRC(p) (0x60 + p)
+#define WCD934X_SLIM_PGD_PORT_INT_STATUS(p) (0x80 + p)
+#define WCD934X_SLIM_PGD_TX_PORT_MULTI_CHNL_0(p) (0x100 + 4 * p)
+/* ports range from 10-16 */
+#define WCD934X_SLIM_PGD_TX_PORT_MULTI_CHNL_1(p) (0x101 + 4 * p)
+#define WCD934X_SLIM_PGD_RX_PORT_MULTI_CHNL_0(p) (0x140 + 4 * p)
+
+#define SLIM_MANF_ID_QCOM 0x217
+#define SLIM_PROD_CODE_WCD9340 0x250
+#define SLIM_DEV_IDX_WCD9340 0x1
+#define SLIM_DEV_INSTANCE_ID_WCD9340 0
+
+#endif
diff --git a/include/linux/mfd/wcd934x/wcd934x.h b/include/linux/mfd/wcd934x/wcd934x.h
new file mode 100644
index 000000000000..f3c65a035150
--- /dev/null
+++ b/include/linux/mfd/wcd934x/wcd934x.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __WCD934X_H__
+#define __WCD934X_H__
+#include <linux/clk.h>
+#include <linux/regulator/consumer.h>
+#include <linux/regmap.h>
+#include <linux/slimbus.h>
+
+#define WCD934X_MAX_SUPPLY 5
+
+/**
+ * struct wcd934x_ddata - wcd934x driver data
+ *
+ * @supplies: wcd934x regulator supplies
+ * @irq_data: wcd934x irq_chip data
+ * @regmap: wcd934x regmap pointer
+ * @extclk: External clock
+ * @dev: device instance of wcd934x slim device
+ * @irq: irq for wcd934x.
+ */
+struct wcd934x_ddata {
+ struct regulator_bulk_data supplies[WCD934X_MAX_SUPPLY];
+ struct regmap_irq_chip_data *irq_data;
+ struct regmap *regmap;
+ struct clk *extclk;
+ struct device *dev;
+ int irq;
+};
+
+#endif /* __WCD934X_H__ */
diff --git a/include/linux/mfd/wl1273-core.h b/include/linux/mfd/wl1273-core.h
index db2f3f454a1b..c28cf76d5c31 100644
--- a/include/linux/mfd/wl1273-core.h
+++ b/include/linux/mfd/wl1273-core.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* include/linux/mfd/wl1273-core.h
*
@@ -5,20 +6,6 @@
*
* Copyright (C) 2010 Nokia Corporation
* Author: Matti J. Aaltonen <matti.j.aaltonen@nokia.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
- * 02110-1301 USA
*/
#ifndef WL1273_CORE_H
diff --git a/include/linux/mfd/wm831x/auxadc.h b/include/linux/mfd/wm831x/auxadc.h
index 867aa23f9370..02ddb4fe1608 100644
--- a/include/linux/mfd/wm831x/auxadc.h
+++ b/include/linux/mfd/wm831x/auxadc.h
@@ -1,15 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* include/linux/mfd/wm831x/auxadc.h -- Auxiliary ADC interface for WM831x
*
* Copyright 2009 Wolfson Microelectronics PLC.
*
* Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
*/
#ifndef __MFD_WM831X_AUXADC_H__
diff --git a/include/linux/mfd/wm831x/core.h b/include/linux/mfd/wm831x/core.h
index b49fa67612f1..511bcad876f0 100644
--- a/include/linux/mfd/wm831x/core.h
+++ b/include/linux/mfd/wm831x/core.h
@@ -1,15 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* include/linux/mfd/wm831x/core.h -- Core interface for WM831x
*
* Copyright 2009 Wolfson Microelectronics PLC.
*
* Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
*/
#ifndef __MFD_WM831X_CORE_H__
@@ -418,7 +413,6 @@ int wm831x_bulk_read(struct wm831x *wm831x, unsigned short reg,
int count, u16 *buf);
int wm831x_device_init(struct wm831x *wm831x, int irq);
-void wm831x_device_exit(struct wm831x *wm831x);
int wm831x_device_suspend(struct wm831x *wm831x);
void wm831x_device_shutdown(struct wm831x *wm831x);
int wm831x_irq_init(struct wm831x *wm831x, int irq);
diff --git a/include/linux/mfd/wm831x/gpio.h b/include/linux/mfd/wm831x/gpio.h
index 9b163c58865f..70587a4ec634 100644
--- a/include/linux/mfd/wm831x/gpio.h
+++ b/include/linux/mfd/wm831x/gpio.h
@@ -1,15 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* include/linux/mfd/wm831x/gpio.h -- GPIO for WM831x
*
* Copyright 2009 Wolfson Microelectronics PLC.
*
* Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
*/
#ifndef __MFD_WM831X_GPIO_H__
diff --git a/include/linux/mfd/wm831x/irq.h b/include/linux/mfd/wm831x/irq.h
index 3a8c97656fda..ab2d1524e729 100644
--- a/include/linux/mfd/wm831x/irq.h
+++ b/include/linux/mfd/wm831x/irq.h
@@ -1,15 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* include/linux/mfd/wm831x/irq.h -- Interrupt controller for WM831x
*
* Copyright 2009 Wolfson Microelectronics PLC.
*
* Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
*/
#ifndef __MFD_WM831X_IRQ_H__
diff --git a/include/linux/mfd/wm831x/otp.h b/include/linux/mfd/wm831x/otp.h
index ce1f81a39bfc..bc244456ad56 100644
--- a/include/linux/mfd/wm831x/otp.h
+++ b/include/linux/mfd/wm831x/otp.h
@@ -1,15 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* include/linux/mfd/wm831x/otp.h -- OTP interface for WM831x
*
* Copyright 2009 Wolfson Microelectronics PLC.
*
* Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
*/
#ifndef __MFD_WM831X_OTP_H__
diff --git a/include/linux/mfd/wm831x/pdata.h b/include/linux/mfd/wm831x/pdata.h
index dcc9631b3052..75aa94dadf1c 100644
--- a/include/linux/mfd/wm831x/pdata.h
+++ b/include/linux/mfd/wm831x/pdata.h
@@ -1,15 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* include/linux/mfd/wm831x/pdata.h -- Platform data for WM831x
*
* Copyright 2009 Wolfson Microelectronics PLC.
*
* Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
*/
#ifndef __MFD_WM831X_PDATA_H__
@@ -52,7 +47,6 @@ struct wm831x_battery_pdata {
* I2C or SPI buses.
*/
struct wm831x_buckv_pdata {
- int dvs_gpio; /** CPU GPIO to use for DVS switching */
int dvs_control_src; /** Hardware DVS source to use (1 or 2) */
int dvs_init_state; /** DVS state to expect on startup */
int dvs_state_gpio; /** CPU GPIO to use for monitoring status */
@@ -95,7 +89,6 @@ enum wm831x_watchdog_action {
struct wm831x_watchdog_pdata {
enum wm831x_watchdog_action primary, secondary;
- int update_gpio;
unsigned int software:1;
};
diff --git a/include/linux/mfd/wm831x/pmu.h b/include/linux/mfd/wm831x/pmu.h
index b18cbb027bc3..77187fcaf226 100644
--- a/include/linux/mfd/wm831x/pmu.h
+++ b/include/linux/mfd/wm831x/pmu.h
@@ -1,15 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* include/linux/mfd/wm831x/pmu.h -- PMU for WM831x
*
* Copyright 2009 Wolfson Microelectronics PLC.
*
* Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
*/
#ifndef __MFD_WM831X_PMU_H__
diff --git a/include/linux/mfd/wm831x/regulator.h b/include/linux/mfd/wm831x/regulator.h
index 955d30fc6a27..233b3017954a 100644
--- a/include/linux/mfd/wm831x/regulator.h
+++ b/include/linux/mfd/wm831x/regulator.h
@@ -1,15 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* linux/mfd/wm831x/regulator.h -- Regulator definitons for wm831x
*
* Copyright 2009 Wolfson Microelectronics PLC.
*
* Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
*/
#ifndef __MFD_WM831X_REGULATOR_H__
@@ -1213,6 +1208,6 @@
#define WM831X_LDO1_OK_WIDTH 1 /* LDO1_OK */
#define WM831X_ISINK_MAX_ISEL 55
-extern int wm831x_isinkv_values[WM831X_ISINK_MAX_ISEL + 1];
+extern const unsigned int wm831x_isinkv_values[WM831X_ISINK_MAX_ISEL + 1];
#endif
diff --git a/include/linux/mfd/wm831x/status.h b/include/linux/mfd/wm831x/status.h
index 6bc090d0e3ac..0d263577d21d 100644
--- a/include/linux/mfd/wm831x/status.h
+++ b/include/linux/mfd/wm831x/status.h
@@ -1,15 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* include/linux/mfd/wm831x/status.h -- Status LEDs for WM831x
*
* Copyright 2009 Wolfson Microelectronics PLC.
*
* Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
*/
#ifndef __MFD_WM831X_STATUS_H__
diff --git a/include/linux/mfd/wm831x/watchdog.h b/include/linux/mfd/wm831x/watchdog.h
index 97a99b52956f..c997c792946c 100644
--- a/include/linux/mfd/wm831x/watchdog.h
+++ b/include/linux/mfd/wm831x/watchdog.h
@@ -1,15 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* include/linux/mfd/wm831x/watchdog.h -- Watchdog for WM831x
*
* Copyright 2009 Wolfson Microelectronics PLC.
*
* Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
*/
#ifndef __MFD_WM831X_WATCHDOG_H__
diff --git a/include/linux/mfd/wm8350/audio.h b/include/linux/mfd/wm8350/audio.h
index bd581c6fa085..ec01ec84d495 100644
--- a/include/linux/mfd/wm8350/audio.h
+++ b/include/linux/mfd/wm8350/audio.h
@@ -1,13 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* audio.h -- Audio Driver for Wolfson WM8350 PMIC
*
* Copyright 2007, 2008 Wolfson Microelectronics PLC
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
*/
#ifndef __LINUX_MFD_WM8350_AUDIO_H_
@@ -617,11 +612,8 @@ struct wm8350_audio_platform_data {
u32 codec_current_charge:2; /* codec current @ vmid charge */
};
-struct snd_soc_codec;
-
struct wm8350_codec {
struct platform_device *pdev;
- struct snd_soc_codec *codec;
struct wm8350_audio_platform_data *platform_data;
};
diff --git a/include/linux/mfd/wm8350/comparator.h b/include/linux/mfd/wm8350/comparator.h
index 54bc5d0fd502..250d89239528 100644
--- a/include/linux/mfd/wm8350/comparator.h
+++ b/include/linux/mfd/wm8350/comparator.h
@@ -1,12 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* comparator.h -- Comparator Aux ADC for Wolfson WM8350 PMIC
*
* Copyright 2007 Wolfson Microelectronics PLC
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
*/
#ifndef __LINUX_MFD_WM8350_COMPARATOR_H_
diff --git a/include/linux/mfd/wm8350/core.h b/include/linux/mfd/wm8350/core.h
index 509481d9cf19..a3241e4d7548 100644
--- a/include/linux/mfd/wm8350/core.h
+++ b/include/linux/mfd/wm8350/core.h
@@ -1,13 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* core.h -- Core Driver for Wolfson WM8350 PMIC
*
* Copyright 2007 Wolfson Microelectronics PLC
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
*/
#ifndef __LINUX_MFD_WM8350_CORE_H_
@@ -643,7 +638,6 @@ struct wm8350_platform_data {
*/
int wm8350_device_init(struct wm8350 *wm8350, int irq,
struct wm8350_platform_data *pdata);
-void wm8350_device_exit(struct wm8350 *wm8350);
/*
* WM8350 device IO
diff --git a/include/linux/mfd/wm8350/gpio.h b/include/linux/mfd/wm8350/gpio.h
index d657bcd6d955..e831b30dde3e 100644
--- a/include/linux/mfd/wm8350/gpio.h
+++ b/include/linux/mfd/wm8350/gpio.h
@@ -1,13 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* gpio.h -- GPIO Driver for Wolfson WM8350 PMIC
*
* Copyright 2007 Wolfson Microelectronics PLC
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
*/
#ifndef __LINUX_MFD_WM8350_GPIO_H_
diff --git a/include/linux/mfd/wm8350/pmic.h b/include/linux/mfd/wm8350/pmic.h
index 7a09e7f1f984..04b09a2ddb28 100644
--- a/include/linux/mfd/wm8350/pmic.h
+++ b/include/linux/mfd/wm8350/pmic.h
@@ -1,13 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* pmic.h -- Power Management Driver for Wolfson WM8350 PMIC
*
* Copyright 2007 Wolfson Microelectronics PLC
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
*/
#ifndef __LINUX_MFD_WM8350_PMIC_H
diff --git a/include/linux/mfd/wm8350/rtc.h b/include/linux/mfd/wm8350/rtc.h
index ebd72ffc62d1..b2f58359b2eb 100644
--- a/include/linux/mfd/wm8350/rtc.h
+++ b/include/linux/mfd/wm8350/rtc.h
@@ -1,12 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* rtc.h -- RTC driver for Wolfson WM8350 PMIC
*
* Copyright 2007 Wolfson Microelectronics PLC
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
*/
#ifndef __LINUX_MFD_WM8350_RTC_H
diff --git a/include/linux/mfd/wm8350/supply.h b/include/linux/mfd/wm8350/supply.h
index 8dc93673e34a..d7a91e26177c 100644
--- a/include/linux/mfd/wm8350/supply.h
+++ b/include/linux/mfd/wm8350/supply.h
@@ -1,13 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* supply.h -- Power Supply Driver for Wolfson WM8350 PMIC
*
* Copyright 2007 Wolfson Microelectronics PLC
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
*/
#ifndef __LINUX_MFD_WM8350_SUPPLY_H_
diff --git a/include/linux/mfd/wm8350/wdt.h b/include/linux/mfd/wm8350/wdt.h
index f6135b5e5ef4..97454aa8c436 100644
--- a/include/linux/mfd/wm8350/wdt.h
+++ b/include/linux/mfd/wm8350/wdt.h
@@ -1,12 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* wdt.h -- Watchdog Driver for Wolfson WM8350 PMIC
*
* Copyright 2007, 2008 Wolfson Microelectronics PLC
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
*/
#ifndef __LINUX_MFD_WM8350_WDT_H_
diff --git a/include/linux/mfd/wm8400-audio.h b/include/linux/mfd/wm8400-audio.h
index e06ed3eb1d0a..d47bdcc7a765 100644
--- a/include/linux/mfd/wm8400-audio.h
+++ b/include/linux/mfd/wm8400-audio.h
@@ -1,21 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* wm8400 private definitions for audio
*
* Copyright 2008 Wolfson Microelectronics plc
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#ifndef __LINUX_MFD_WM8400_AUDIO_H
diff --git a/include/linux/mfd/wm8400-private.h b/include/linux/mfd/wm8400-private.h
index 4ee908f5b834..bc8c2ca6dc70 100644
--- a/include/linux/mfd/wm8400-private.h
+++ b/include/linux/mfd/wm8400-private.h
@@ -1,21 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* wm8400 private definitions.
*
* Copyright 2008 Wolfson Microelectronics plc
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#ifndef __LINUX_MFD_WM8400_PRIV_H
@@ -923,12 +910,4 @@ struct wm8400 {
#define WM8400_LINE_CMP_VTHD_SHIFT 0 /* LINE_CMP_VTHD - [3:0] */
#define WM8400_LINE_CMP_VTHD_WIDTH 4 /* LINE_CMP_VTHD - [3:0] */
-int wm8400_block_read(struct wm8400 *wm8400, u8 reg, int count, u16 *data);
-
-static inline int wm8400_set_bits(struct wm8400 *wm8400, u8 reg,
- u16 mask, u16 val)
-{
- return regmap_update_bits(wm8400->regmap, reg, mask, val);
-}
-
#endif
diff --git a/include/linux/mfd/wm8400.h b/include/linux/mfd/wm8400.h
index b46b566ac1ac..a812d89e7cb3 100644
--- a/include/linux/mfd/wm8400.h
+++ b/include/linux/mfd/wm8400.h
@@ -1,21 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* wm8400 client interface
*
* Copyright 2008 Wolfson Microelectronics plc
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#ifndef __LINUX_MFD_WM8400_H
diff --git a/include/linux/mfd/wm8994/core.h b/include/linux/mfd/wm8994/core.h
index eefafa62d304..e8b093522ffd 100644
--- a/include/linux/mfd/wm8994/core.h
+++ b/include/linux/mfd/wm8994/core.h
@@ -1,15 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* include/linux/mfd/wm8994/core.h -- Core interface for WM8994
*
* Copyright 2009 Wolfson Microelectronics PLC.
*
* Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
*/
#ifndef __MFD_WM8994_CORE_H__
diff --git a/include/linux/mfd/wm8994/gpio.h b/include/linux/mfd/wm8994/gpio.h
index 0c79b5ff4b5a..723fa331776e 100644
--- a/include/linux/mfd/wm8994/gpio.h
+++ b/include/linux/mfd/wm8994/gpio.h
@@ -1,15 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* include/linux/mfd/wm8994/gpio.h - GPIO configuration for WM8994
*
* Copyright 2009 Wolfson Microelectronics PLC.
*
* Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
*/
#ifndef __MFD_WM8994_GPIO_H__
diff --git a/include/linux/mfd/wm8994/pdata.h b/include/linux/mfd/wm8994/pdata.h
index 90c60524a496..6e2962ef5b81 100644
--- a/include/linux/mfd/wm8994/pdata.h
+++ b/include/linux/mfd/wm8994/pdata.h
@@ -1,15 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* include/linux/mfd/wm8994/pdata.h -- Platform data for WM8994
*
* Copyright 2009 Wolfson Microelectronics PLC.
*
* Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
*/
#ifndef __MFD_WM8994_PDATA_H__
@@ -20,9 +15,6 @@
#define WM8994_NUM_AIF 3
struct wm8994_ldo_pdata {
- /** GPIOs to enable regulator, 0 or less if not available */
- int enable;
-
const struct regulator_init_data *init_data;
};
@@ -41,7 +33,7 @@ struct wm8994_ldo_pdata {
* DRC configurations are specified with a label and a set of register
* values to write (the enable bits will be ignored). At runtime an
* enumerated control will be presented for each DRC block allowing
- * the user to choose the configration to use.
+ * the user to choose the configuration to use.
*
* Configurations may be generated by hand or by using the DRC control
* panel provided by the WISCE - see http://www.wolfsonmicro.com/wisce/
@@ -222,6 +214,12 @@ struct wm8994_pdata {
*/
bool spkmode_pu;
+ /*
+ * CS/ADDR must be pulled internally by the device on this
+ * system.
+ */
+ bool csnaddr_pd;
+
/**
* Maximum number of channels clocks will be generated for,
* useful for systems where and I2S bus with multiple data
diff --git a/include/linux/mfd/wm8994/registers.h b/include/linux/mfd/wm8994/registers.h
index db8cef3d5321..8782a207faf7 100644
--- a/include/linux/mfd/wm8994/registers.h
+++ b/include/linux/mfd/wm8994/registers.h
@@ -1,15 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* include/linux/mfd/wm8994/registers.h -- Register definitions for WM8994
*
* Copyright 2009 Wolfson Microelectronics PLC.
*
* Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
*/
#ifndef __MFD_WM8994_REGISTERS_H__
diff --git a/include/linux/mfd/wm97xx.h b/include/linux/mfd/wm97xx.h
new file mode 100644
index 000000000000..446a5546ceca
--- /dev/null
+++ b/include/linux/mfd/wm97xx.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * wm97xx client interface
+ *
+ * Copyright (C) 2017 Robert Jarzmik
+ */
+
+#ifndef __LINUX_MFD_WM97XX_H
+#define __LINUX_MFD_WM97XX_H
+
+struct regmap;
+struct wm97xx_batt_pdata;
+struct snd_ac97;
+
+struct wm97xx_platform_data {
+ struct snd_ac97 *ac97;
+ struct regmap *regmap;
+ struct wm97xx_batt_pdata *batt_pdata;
+};
+
+#endif
diff --git a/include/linux/mhi.h b/include/linux/mhi.h
new file mode 100644
index 000000000000..f6de4b6ecfc7
--- /dev/null
+++ b/include/linux/mhi.h
@@ -0,0 +1,810 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved.
+ *
+ */
+#ifndef _MHI_H_
+#define _MHI_H_
+
+#include <linux/device.h>
+#include <linux/dma-direction.h>
+#include <linux/mutex.h>
+#include <linux/skbuff.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/wait.h>
+#include <linux/workqueue.h>
+
+#define MHI_MAX_OEM_PK_HASH_SEGMENTS 16
+
+struct mhi_chan;
+struct mhi_event;
+struct mhi_ctxt;
+struct mhi_cmd;
+struct mhi_buf_info;
+
+/**
+ * enum mhi_callback - MHI callback
+ * @MHI_CB_IDLE: MHI entered idle state
+ * @MHI_CB_PENDING_DATA: New data available for client to process
+ * @MHI_CB_LPM_ENTER: MHI host entered low power mode
+ * @MHI_CB_LPM_EXIT: MHI host about to exit low power mode
+ * @MHI_CB_EE_RDDM: MHI device entered RDDM exec env
+ * @MHI_CB_EE_MISSION_MODE: MHI device entered Mission Mode exec env
+ * @MHI_CB_SYS_ERROR: MHI device entered error state (may recover)
+ * @MHI_CB_FATAL_ERROR: MHI device entered fatal error state
+ * @MHI_CB_BW_REQ: Received a bandwidth switch request from device
+ */
+enum mhi_callback {
+ MHI_CB_IDLE,
+ MHI_CB_PENDING_DATA,
+ MHI_CB_LPM_ENTER,
+ MHI_CB_LPM_EXIT,
+ MHI_CB_EE_RDDM,
+ MHI_CB_EE_MISSION_MODE,
+ MHI_CB_SYS_ERROR,
+ MHI_CB_FATAL_ERROR,
+ MHI_CB_BW_REQ,
+};
+
+/**
+ * enum mhi_flags - Transfer flags
+ * @MHI_EOB: End of buffer for bulk transfer
+ * @MHI_EOT: End of transfer
+ * @MHI_CHAIN: Linked transfer
+ */
+enum mhi_flags {
+ MHI_EOB = BIT(0),
+ MHI_EOT = BIT(1),
+ MHI_CHAIN = BIT(2),
+};
+
+/**
+ * enum mhi_device_type - Device types
+ * @MHI_DEVICE_XFER: Handles data transfer
+ * @MHI_DEVICE_CONTROLLER: Control device
+ */
+enum mhi_device_type {
+ MHI_DEVICE_XFER,
+ MHI_DEVICE_CONTROLLER,
+};
+
+/**
+ * enum mhi_ch_type - Channel types
+ * @MHI_CH_TYPE_INVALID: Invalid channel type
+ * @MHI_CH_TYPE_OUTBOUND: Outbound channel to the device
+ * @MHI_CH_TYPE_INBOUND: Inbound channel from the device
+ * @MHI_CH_TYPE_INBOUND_COALESCED: Coalesced channel for the device to combine
+ * multiple packets and send them as a single
+ * large packet to reduce CPU consumption
+ */
+enum mhi_ch_type {
+ MHI_CH_TYPE_INVALID = 0,
+ MHI_CH_TYPE_OUTBOUND = DMA_TO_DEVICE,
+ MHI_CH_TYPE_INBOUND = DMA_FROM_DEVICE,
+ MHI_CH_TYPE_INBOUND_COALESCED = 3,
+};
+
+/**
+ * struct image_info - Firmware and RDDM table
+ * @mhi_buf: Buffer for firmware and RDDM table
+ * @entries: # of entries in table
+ */
+struct image_info {
+ struct mhi_buf *mhi_buf;
+ /* private: from internal.h */
+ struct bhi_vec_entry *bhi_vec;
+ /* public: */
+ u32 entries;
+};
+
+/**
+ * struct mhi_link_info - BW requirement
+ * target_link_speed - Link speed as defined by TLS bits in LinkControl reg
+ * target_link_width - Link width as defined by NLW bits in LinkStatus reg
+ */
+struct mhi_link_info {
+ unsigned int target_link_speed;
+ unsigned int target_link_width;
+};
+
+/**
+ * enum mhi_ee_type - Execution environment types
+ * @MHI_EE_PBL: Primary Bootloader
+ * @MHI_EE_SBL: Secondary Bootloader
+ * @MHI_EE_AMSS: Modem, aka the primary runtime EE
+ * @MHI_EE_RDDM: Ram dump download mode
+ * @MHI_EE_WFW: WLAN firmware mode
+ * @MHI_EE_PTHRU: Passthrough
+ * @MHI_EE_EDL: Embedded downloader
+ * @MHI_EE_FP: Flash Programmer Environment
+ */
+enum mhi_ee_type {
+ MHI_EE_PBL,
+ MHI_EE_SBL,
+ MHI_EE_AMSS,
+ MHI_EE_RDDM,
+ MHI_EE_WFW,
+ MHI_EE_PTHRU,
+ MHI_EE_EDL,
+ MHI_EE_FP,
+ MHI_EE_MAX_SUPPORTED = MHI_EE_FP,
+ MHI_EE_DISABLE_TRANSITION, /* local EE, not related to mhi spec */
+ MHI_EE_NOT_SUPPORTED,
+ MHI_EE_MAX,
+};
+
+/**
+ * enum mhi_state - MHI states
+ * @MHI_STATE_RESET: Reset state
+ * @MHI_STATE_READY: Ready state
+ * @MHI_STATE_M0: M0 state
+ * @MHI_STATE_M1: M1 state
+ * @MHI_STATE_M2: M2 state
+ * @MHI_STATE_M3: M3 state
+ * @MHI_STATE_M3_FAST: M3 Fast state
+ * @MHI_STATE_BHI: BHI state
+ * @MHI_STATE_SYS_ERR: System Error state
+ */
+enum mhi_state {
+ MHI_STATE_RESET = 0x0,
+ MHI_STATE_READY = 0x1,
+ MHI_STATE_M0 = 0x2,
+ MHI_STATE_M1 = 0x3,
+ MHI_STATE_M2 = 0x4,
+ MHI_STATE_M3 = 0x5,
+ MHI_STATE_M3_FAST = 0x6,
+ MHI_STATE_BHI = 0x7,
+ MHI_STATE_SYS_ERR = 0xFF,
+ MHI_STATE_MAX,
+};
+
+/**
+ * enum mhi_ch_ee_mask - Execution environment mask for channel
+ * @MHI_CH_EE_PBL: Allow channel to be used in PBL EE
+ * @MHI_CH_EE_SBL: Allow channel to be used in SBL EE
+ * @MHI_CH_EE_AMSS: Allow channel to be used in AMSS EE
+ * @MHI_CH_EE_RDDM: Allow channel to be used in RDDM EE
+ * @MHI_CH_EE_PTHRU: Allow channel to be used in PTHRU EE
+ * @MHI_CH_EE_WFW: Allow channel to be used in WFW EE
+ * @MHI_CH_EE_EDL: Allow channel to be used in EDL EE
+ */
+enum mhi_ch_ee_mask {
+ MHI_CH_EE_PBL = BIT(MHI_EE_PBL),
+ MHI_CH_EE_SBL = BIT(MHI_EE_SBL),
+ MHI_CH_EE_AMSS = BIT(MHI_EE_AMSS),
+ MHI_CH_EE_RDDM = BIT(MHI_EE_RDDM),
+ MHI_CH_EE_PTHRU = BIT(MHI_EE_PTHRU),
+ MHI_CH_EE_WFW = BIT(MHI_EE_WFW),
+ MHI_CH_EE_EDL = BIT(MHI_EE_EDL),
+};
+
+/**
+ * enum mhi_er_data_type - Event ring data types
+ * @MHI_ER_DATA: Only client data over this ring
+ * @MHI_ER_CTRL: MHI control data and client data
+ */
+enum mhi_er_data_type {
+ MHI_ER_DATA,
+ MHI_ER_CTRL,
+};
+
+/**
+ * enum mhi_db_brst_mode - Doorbell mode
+ * @MHI_DB_BRST_DISABLE: Burst mode disable
+ * @MHI_DB_BRST_ENABLE: Burst mode enable
+ */
+enum mhi_db_brst_mode {
+ MHI_DB_BRST_DISABLE = 0x2,
+ MHI_DB_BRST_ENABLE = 0x3,
+};
+
+/**
+ * struct mhi_channel_config - Channel configuration structure for controller
+ * @name: The name of this channel
+ * @num: The number assigned to this channel
+ * @num_elements: The number of elements that can be queued to this channel
+ * @local_elements: The local ring length of the channel
+ * @event_ring: The event ring index that services this channel
+ * @dir: Direction that data may flow on this channel
+ * @type: Channel type
+ * @ee_mask: Execution Environment mask for this channel
+ * @pollcfg: Polling configuration for burst mode. 0 is default. milliseconds
+ for UL channels, multiple of 8 ring elements for DL channels
+ * @doorbell: Doorbell mode
+ * @lpm_notify: The channel master requires low power mode notifications
+ * @offload_channel: The client manages the channel completely
+ * @doorbell_mode_switch: Channel switches to doorbell mode on M0 transition
+ * @auto_queue: Framework will automatically queue buffers for DL traffic
+ * @wake-capable: Channel capable of waking up the system
+ */
+struct mhi_channel_config {
+ char *name;
+ u32 num;
+ u32 num_elements;
+ u32 local_elements;
+ u32 event_ring;
+ enum dma_data_direction dir;
+ enum mhi_ch_type type;
+ u32 ee_mask;
+ u32 pollcfg;
+ enum mhi_db_brst_mode doorbell;
+ bool lpm_notify;
+ bool offload_channel;
+ bool doorbell_mode_switch;
+ bool auto_queue;
+ bool wake_capable;
+};
+
+/**
+ * struct mhi_event_config - Event ring configuration structure for controller
+ * @num_elements: The number of elements that can be queued to this ring
+ * @irq_moderation_ms: Delay irq for additional events to be aggregated
+ * @irq: IRQ associated with this ring
+ * @channel: Dedicated channel number. U32_MAX indicates a non-dedicated ring
+ * @priority: Priority of this ring. Use 1 for now
+ * @mode: Doorbell mode
+ * @data_type: Type of data this ring will process
+ * @hardware_event: This ring is associated with hardware channels
+ * @client_managed: This ring is client managed
+ * @offload_channel: This ring is associated with an offloaded channel
+ */
+struct mhi_event_config {
+ u32 num_elements;
+ u32 irq_moderation_ms;
+ u32 irq;
+ u32 channel;
+ u32 priority;
+ enum mhi_db_brst_mode mode;
+ enum mhi_er_data_type data_type;
+ bool hardware_event;
+ bool client_managed;
+ bool offload_channel;
+};
+
+/**
+ * struct mhi_controller_config - Root MHI controller configuration
+ * @max_channels: Maximum number of channels supported
+ * @timeout_ms: Timeout value for operations. 0 means use default
+ * @buf_len: Size of automatically allocated buffers. 0 means use default
+ * @num_channels: Number of channels defined in @ch_cfg
+ * @ch_cfg: Array of defined channels
+ * @num_events: Number of event rings defined in @event_cfg
+ * @event_cfg: Array of defined event rings
+ * @use_bounce_buf: Use a bounce buffer pool due to limited DDR access
+ * @m2_no_db: Host is not allowed to ring DB in M2 state
+ */
+struct mhi_controller_config {
+ u32 max_channels;
+ u32 timeout_ms;
+ u32 buf_len;
+ u32 num_channels;
+ const struct mhi_channel_config *ch_cfg;
+ u32 num_events;
+ struct mhi_event_config *event_cfg;
+ bool use_bounce_buf;
+ bool m2_no_db;
+};
+
+/**
+ * struct mhi_controller - Master MHI controller structure
+ * @cntrl_dev: Pointer to the struct device of physical bus acting as the MHI
+ * controller (required)
+ * @mhi_dev: MHI device instance for the controller
+ * @debugfs_dentry: MHI controller debugfs directory
+ * @regs: Base address of MHI MMIO register space (required)
+ * @bhi: Points to base of MHI BHI register space
+ * @bhie: Points to base of MHI BHIe register space
+ * @wake_db: MHI WAKE doorbell register address
+ * @iova_start: IOMMU starting address for data (required)
+ * @iova_stop: IOMMU stop address for data (required)
+ * @fw_image: Firmware image name for normal booting (optional)
+ * @edl_image: Firmware image name for emergency download mode (optional)
+ * @rddm_size: RAM dump size that host should allocate for debugging purpose
+ * @sbl_size: SBL image size downloaded through BHIe (optional)
+ * @seg_len: BHIe vector size (optional)
+ * @reg_len: Length of the MHI MMIO region (required)
+ * @fbc_image: Points to firmware image buffer
+ * @rddm_image: Points to RAM dump buffer
+ * @mhi_chan: Points to the channel configuration table
+ * @lpm_chans: List of channels that require LPM notifications
+ * @irq: base irq # to request (required)
+ * @max_chan: Maximum number of channels the controller supports
+ * @total_ev_rings: Total # of event rings allocated
+ * @hw_ev_rings: Number of hardware event rings
+ * @sw_ev_rings: Number of software event rings
+ * @nr_irqs: Number of IRQ allocated by bus master (required)
+ * @family_number: MHI controller family number
+ * @device_number: MHI controller device number
+ * @major_version: MHI controller major revision number
+ * @minor_version: MHI controller minor revision number
+ * @serial_number: MHI controller serial number obtained from BHI
+ * @oem_pk_hash: MHI controller OEM PK Hash obtained from BHI
+ * @mhi_event: MHI event ring configurations table
+ * @mhi_cmd: MHI command ring configurations table
+ * @mhi_ctxt: MHI device context, shared memory between host and device
+ * @pm_mutex: Mutex for suspend/resume operation
+ * @pm_lock: Lock for protecting MHI power management state
+ * @timeout_ms: Timeout in ms for state transitions
+ * @pm_state: MHI power management state
+ * @db_access: DB access states
+ * @ee: MHI device execution environment
+ * @dev_state: MHI device state
+ * @dev_wake: Device wakeup count
+ * @pending_pkts: Pending packets for the controller
+ * @M0, M2, M3: Counters to track number of device MHI state changes
+ * @transition_list: List of MHI state transitions
+ * @transition_lock: Lock for protecting MHI state transition list
+ * @wlock: Lock for protecting device wakeup
+ * @mhi_link_info: Device bandwidth info
+ * @st_worker: State transition worker
+ * @hiprio_wq: High priority workqueue for MHI work such as state transitions
+ * @state_event: State change event
+ * @status_cb: CB function to notify power states of the device (required)
+ * @wake_get: CB function to assert device wake (optional)
+ * @wake_put: CB function to de-assert device wake (optional)
+ * @wake_toggle: CB function to assert and de-assert device wake (optional)
+ * @runtime_get: CB function to controller runtime resume (required)
+ * @runtime_put: CB function to decrement pm usage (required)
+ * @map_single: CB function to create TRE buffer
+ * @unmap_single: CB function to destroy TRE buffer
+ * @read_reg: Read a MHI register via the physical link (required)
+ * @write_reg: Write a MHI register via the physical link (required)
+ * @reset: Controller specific reset function (optional)
+ * @buffer_len: Bounce buffer length
+ * @index: Index of the MHI controller instance
+ * @bounce_buf: Use of bounce buffer
+ * @fbc_download: MHI host needs to do complete image transfer (optional)
+ * @wake_set: Device wakeup set flag
+ * @irq_flags: irq flags passed to request_irq (optional)
+ * @mru: the default MRU for the MHI device
+ *
+ * Fields marked as (required) need to be populated by the controller driver
+ * before calling mhi_register_controller(). For the fields marked as (optional)
+ * they can be populated depending on the usecase.
+ *
+ * The following fields are present for the purpose of implementing any device
+ * specific quirks or customizations for specific MHI revisions used in device
+ * by the controller drivers. The MHI stack will just populate these fields
+ * during mhi_register_controller():
+ * family_number
+ * device_number
+ * major_version
+ * minor_version
+ */
+struct mhi_controller {
+ struct device *cntrl_dev;
+ struct mhi_device *mhi_dev;
+ struct dentry *debugfs_dentry;
+ void __iomem *regs;
+ void __iomem *bhi;
+ void __iomem *bhie;
+ void __iomem *wake_db;
+
+ dma_addr_t iova_start;
+ dma_addr_t iova_stop;
+ const char *fw_image;
+ const char *edl_image;
+ size_t rddm_size;
+ size_t sbl_size;
+ size_t seg_len;
+ size_t reg_len;
+ struct image_info *fbc_image;
+ struct image_info *rddm_image;
+ struct mhi_chan *mhi_chan;
+ struct list_head lpm_chans;
+ int *irq;
+ u32 max_chan;
+ u32 total_ev_rings;
+ u32 hw_ev_rings;
+ u32 sw_ev_rings;
+ u32 nr_irqs;
+ u32 family_number;
+ u32 device_number;
+ u32 major_version;
+ u32 minor_version;
+ u32 serial_number;
+ u32 oem_pk_hash[MHI_MAX_OEM_PK_HASH_SEGMENTS];
+
+ struct mhi_event *mhi_event;
+ struct mhi_cmd *mhi_cmd;
+ struct mhi_ctxt *mhi_ctxt;
+
+ struct mutex pm_mutex;
+ rwlock_t pm_lock;
+ u32 timeout_ms;
+ u32 pm_state;
+ u32 db_access;
+ enum mhi_ee_type ee;
+ enum mhi_state dev_state;
+ atomic_t dev_wake;
+ atomic_t pending_pkts;
+ u32 M0, M2, M3;
+ struct list_head transition_list;
+ spinlock_t transition_lock;
+ spinlock_t wlock;
+ struct mhi_link_info mhi_link_info;
+ struct work_struct st_worker;
+ struct workqueue_struct *hiprio_wq;
+ wait_queue_head_t state_event;
+
+ void (*status_cb)(struct mhi_controller *mhi_cntrl,
+ enum mhi_callback cb);
+ void (*wake_get)(struct mhi_controller *mhi_cntrl, bool override);
+ void (*wake_put)(struct mhi_controller *mhi_cntrl, bool override);
+ void (*wake_toggle)(struct mhi_controller *mhi_cntrl);
+ int (*runtime_get)(struct mhi_controller *mhi_cntrl);
+ void (*runtime_put)(struct mhi_controller *mhi_cntrl);
+ int (*map_single)(struct mhi_controller *mhi_cntrl,
+ struct mhi_buf_info *buf);
+ void (*unmap_single)(struct mhi_controller *mhi_cntrl,
+ struct mhi_buf_info *buf);
+ int (*read_reg)(struct mhi_controller *mhi_cntrl, void __iomem *addr,
+ u32 *out);
+ void (*write_reg)(struct mhi_controller *mhi_cntrl, void __iomem *addr,
+ u32 val);
+ void (*reset)(struct mhi_controller *mhi_cntrl);
+
+ size_t buffer_len;
+ int index;
+ bool bounce_buf;
+ bool fbc_download;
+ bool wake_set;
+ unsigned long irq_flags;
+ u32 mru;
+};
+
+/**
+ * struct mhi_device - Structure representing an MHI device which binds
+ * to channels or is associated with controllers
+ * @id: Pointer to MHI device ID struct
+ * @name: Name of the associated MHI device
+ * @mhi_cntrl: Controller the device belongs to
+ * @ul_chan: UL channel for the device
+ * @dl_chan: DL channel for the device
+ * @dev: Driver model device node for the MHI device
+ * @dev_type: MHI device type
+ * @ul_chan_id: MHI channel id for UL transfer
+ * @dl_chan_id: MHI channel id for DL transfer
+ * @dev_wake: Device wakeup counter
+ */
+struct mhi_device {
+ const struct mhi_device_id *id;
+ const char *name;
+ struct mhi_controller *mhi_cntrl;
+ struct mhi_chan *ul_chan;
+ struct mhi_chan *dl_chan;
+ struct device dev;
+ enum mhi_device_type dev_type;
+ int ul_chan_id;
+ int dl_chan_id;
+ u32 dev_wake;
+};
+
+/**
+ * struct mhi_result - Completed buffer information
+ * @buf_addr: Address of data buffer
+ * @bytes_xferd: # of bytes transferred
+ * @dir: Channel direction
+ * @transaction_status: Status of last transaction
+ */
+struct mhi_result {
+ void *buf_addr;
+ size_t bytes_xferd;
+ enum dma_data_direction dir;
+ int transaction_status;
+};
+
+/**
+ * struct mhi_buf - MHI Buffer description
+ * @buf: Virtual address of the buffer
+ * @name: Buffer label. For offload channel, configurations name must be:
+ * ECA - Event context array data
+ * CCA - Channel context array data
+ * @dma_addr: IOMMU address of the buffer
+ * @len: # of bytes
+ */
+struct mhi_buf {
+ void *buf;
+ const char *name;
+ dma_addr_t dma_addr;
+ size_t len;
+};
+
+/**
+ * struct mhi_driver - Structure representing a MHI client driver
+ * @probe: CB function for client driver probe function
+ * @remove: CB function for client driver remove function
+ * @ul_xfer_cb: CB function for UL data transfer
+ * @dl_xfer_cb: CB function for DL data transfer
+ * @status_cb: CB functions for asynchronous status
+ * @driver: Device driver model driver
+ */
+struct mhi_driver {
+ const struct mhi_device_id *id_table;
+ int (*probe)(struct mhi_device *mhi_dev,
+ const struct mhi_device_id *id);
+ void (*remove)(struct mhi_device *mhi_dev);
+ void (*ul_xfer_cb)(struct mhi_device *mhi_dev,
+ struct mhi_result *result);
+ void (*dl_xfer_cb)(struct mhi_device *mhi_dev,
+ struct mhi_result *result);
+ void (*status_cb)(struct mhi_device *mhi_dev, enum mhi_callback mhi_cb);
+ struct device_driver driver;
+};
+
+#define to_mhi_driver(drv) container_of(drv, struct mhi_driver, driver)
+#define to_mhi_device(dev) container_of(dev, struct mhi_device, dev)
+
+/**
+ * mhi_alloc_controller - Allocate the MHI Controller structure
+ * Allocate the mhi_controller structure using zero initialized memory
+ */
+struct mhi_controller *mhi_alloc_controller(void);
+
+/**
+ * mhi_free_controller - Free the MHI Controller structure
+ * Free the mhi_controller structure which was previously allocated
+ */
+void mhi_free_controller(struct mhi_controller *mhi_cntrl);
+
+/**
+ * mhi_register_controller - Register MHI controller
+ * @mhi_cntrl: MHI controller to register
+ * @config: Configuration to use for the controller
+ */
+int mhi_register_controller(struct mhi_controller *mhi_cntrl,
+ const struct mhi_controller_config *config);
+
+/**
+ * mhi_unregister_controller - Unregister MHI controller
+ * @mhi_cntrl: MHI controller to unregister
+ */
+void mhi_unregister_controller(struct mhi_controller *mhi_cntrl);
+
+/*
+ * module_mhi_driver() - Helper macro for drivers that don't do
+ * anything special other than using default mhi_driver_register() and
+ * mhi_driver_unregister(). This eliminates a lot of boilerplate.
+ * Each module may only use this macro once.
+ */
+#define module_mhi_driver(mhi_drv) \
+ module_driver(mhi_drv, mhi_driver_register, \
+ mhi_driver_unregister)
+
+/*
+ * Macro to avoid include chaining to get THIS_MODULE
+ */
+#define mhi_driver_register(mhi_drv) \
+ __mhi_driver_register(mhi_drv, THIS_MODULE)
+
+/**
+ * __mhi_driver_register - Register driver with MHI framework
+ * @mhi_drv: Driver associated with the device
+ * @owner: The module owner
+ */
+int __mhi_driver_register(struct mhi_driver *mhi_drv, struct module *owner);
+
+/**
+ * mhi_driver_unregister - Unregister a driver for mhi_devices
+ * @mhi_drv: Driver associated with the device
+ */
+void mhi_driver_unregister(struct mhi_driver *mhi_drv);
+
+/**
+ * mhi_set_mhi_state - Set MHI device state
+ * @mhi_cntrl: MHI controller
+ * @state: State to set
+ */
+void mhi_set_mhi_state(struct mhi_controller *mhi_cntrl,
+ enum mhi_state state);
+
+/**
+ * mhi_notify - Notify the MHI client driver about client device status
+ * @mhi_dev: MHI device instance
+ * @cb_reason: MHI callback reason
+ */
+void mhi_notify(struct mhi_device *mhi_dev, enum mhi_callback cb_reason);
+
+/**
+ * mhi_get_free_desc_count - Get transfer ring length
+ * Get # of TD available to queue buffers
+ * @mhi_dev: Device associated with the channels
+ * @dir: Direction of the channel
+ */
+int mhi_get_free_desc_count(struct mhi_device *mhi_dev,
+ enum dma_data_direction dir);
+
+/**
+ * mhi_prepare_for_power_up - Do pre-initialization before power up.
+ * This is optional, call this before power up if
+ * the controller does not want bus framework to
+ * automatically free any allocated memory during
+ * shutdown process.
+ * @mhi_cntrl: MHI controller
+ */
+int mhi_prepare_for_power_up(struct mhi_controller *mhi_cntrl);
+
+/**
+ * mhi_async_power_up - Start MHI power up sequence
+ * @mhi_cntrl: MHI controller
+ */
+int mhi_async_power_up(struct mhi_controller *mhi_cntrl);
+
+/**
+ * mhi_sync_power_up - Start MHI power up sequence and wait till the device
+ * enters valid EE state
+ * @mhi_cntrl: MHI controller
+ */
+int mhi_sync_power_up(struct mhi_controller *mhi_cntrl);
+
+/**
+ * mhi_power_down - Start MHI power down sequence
+ * @mhi_cntrl: MHI controller
+ * @graceful: Link is still accessible, so do a graceful shutdown process
+ */
+void mhi_power_down(struct mhi_controller *mhi_cntrl, bool graceful);
+
+/**
+ * mhi_unprepare_after_power_down - Free any allocated memory after power down
+ * @mhi_cntrl: MHI controller
+ */
+void mhi_unprepare_after_power_down(struct mhi_controller *mhi_cntrl);
+
+/**
+ * mhi_pm_suspend - Move MHI into a suspended state
+ * @mhi_cntrl: MHI controller
+ */
+int mhi_pm_suspend(struct mhi_controller *mhi_cntrl);
+
+/**
+ * mhi_pm_resume - Resume MHI from suspended state
+ * @mhi_cntrl: MHI controller
+ */
+int mhi_pm_resume(struct mhi_controller *mhi_cntrl);
+
+/**
+ * mhi_pm_resume_force - Force resume MHI from suspended state
+ * @mhi_cntrl: MHI controller
+ *
+ * Resume the device irrespective of its MHI state. As per the MHI spec, devices
+ * has to be in M3 state during resume. But some devices seem to be in a
+ * different MHI state other than M3 but they continue working fine if allowed.
+ * This API is intented to be used for such devices.
+ *
+ * Return: 0 if the resume succeeds, a negative error code otherwise
+ */
+int mhi_pm_resume_force(struct mhi_controller *mhi_cntrl);
+
+/**
+ * mhi_download_rddm_image - Download ramdump image from device for
+ * debugging purpose.
+ * @mhi_cntrl: MHI controller
+ * @in_panic: Download rddm image during kernel panic
+ */
+int mhi_download_rddm_image(struct mhi_controller *mhi_cntrl, bool in_panic);
+
+/**
+ * mhi_force_rddm_mode - Force device into rddm mode
+ * @mhi_cntrl: MHI controller
+ */
+int mhi_force_rddm_mode(struct mhi_controller *mhi_cntrl);
+
+/**
+ * mhi_get_exec_env - Get BHI execution environment of the device
+ * @mhi_cntrl: MHI controller
+ */
+enum mhi_ee_type mhi_get_exec_env(struct mhi_controller *mhi_cntrl);
+
+/**
+ * mhi_get_mhi_state - Get MHI state of the device
+ * @mhi_cntrl: MHI controller
+ */
+enum mhi_state mhi_get_mhi_state(struct mhi_controller *mhi_cntrl);
+
+/**
+ * mhi_soc_reset - Trigger a device reset. This can be used as a last resort
+ * to reset and recover a device.
+ * @mhi_cntrl: MHI controller
+ */
+void mhi_soc_reset(struct mhi_controller *mhi_cntrl);
+
+/**
+ * mhi_device_get - Disable device low power mode
+ * @mhi_dev: Device associated with the channel
+ */
+void mhi_device_get(struct mhi_device *mhi_dev);
+
+/**
+ * mhi_device_get_sync - Disable device low power mode. Synchronously
+ * take the controller out of suspended state
+ * @mhi_dev: Device associated with the channel
+ */
+int mhi_device_get_sync(struct mhi_device *mhi_dev);
+
+/**
+ * mhi_device_put - Re-enable device low power mode
+ * @mhi_dev: Device associated with the channel
+ */
+void mhi_device_put(struct mhi_device *mhi_dev);
+
+/**
+ * mhi_prepare_for_transfer - Setup UL and DL channels for data transfer.
+ * @mhi_dev: Device associated with the channels
+ *
+ * Allocate and initialize the channel context and also issue the START channel
+ * command to both channels. Channels can be started only if both host and
+ * device execution environments match and channels are in a DISABLED state.
+ */
+int mhi_prepare_for_transfer(struct mhi_device *mhi_dev);
+
+/**
+ * mhi_prepare_for_transfer_autoqueue - Setup UL and DL channels with auto queue
+ * buffers for DL traffic
+ * @mhi_dev: Device associated with the channels
+ *
+ * Allocate and initialize the channel context and also issue the START channel
+ * command to both channels. Channels can be started only if both host and
+ * device execution environments match and channels are in a DISABLED state.
+ * The MHI core will automatically allocate and queue buffers for the DL traffic.
+ */
+int mhi_prepare_for_transfer_autoqueue(struct mhi_device *mhi_dev);
+
+/**
+ * mhi_unprepare_from_transfer - Reset UL and DL channels for data transfer.
+ * Issue the RESET channel command and let the
+ * device clean-up the context so no incoming
+ * transfers are seen on the host. Free memory
+ * associated with the context on host. If device
+ * is unresponsive, only perform a host side
+ * clean-up. Channels can be reset only if both
+ * host and device execution environments match
+ * and channels are in an ENABLED, STOPPED or
+ * SUSPENDED state.
+ * @mhi_dev: Device associated with the channels
+ */
+void mhi_unprepare_from_transfer(struct mhi_device *mhi_dev);
+
+/**
+ * mhi_queue_dma - Send or receive DMA mapped buffers from client device
+ * over MHI channel
+ * @mhi_dev: Device associated with the channels
+ * @dir: DMA direction for the channel
+ * @mhi_buf: Buffer for holding the DMA mapped data
+ * @len: Buffer length
+ * @mflags: MHI transfer flags used for the transfer
+ */
+int mhi_queue_dma(struct mhi_device *mhi_dev, enum dma_data_direction dir,
+ struct mhi_buf *mhi_buf, size_t len, enum mhi_flags mflags);
+
+/**
+ * mhi_queue_buf - Send or receive raw buffers from client device over MHI
+ * channel
+ * @mhi_dev: Device associated with the channels
+ * @dir: DMA direction for the channel
+ * @buf: Buffer for holding the data
+ * @len: Buffer length
+ * @mflags: MHI transfer flags used for the transfer
+ */
+int mhi_queue_buf(struct mhi_device *mhi_dev, enum dma_data_direction dir,
+ void *buf, size_t len, enum mhi_flags mflags);
+
+/**
+ * mhi_queue_skb - Send or receive SKBs from client device over MHI channel
+ * @mhi_dev: Device associated with the channels
+ * @dir: DMA direction for the channel
+ * @skb: Buffer for holding SKBs
+ * @len: Buffer length
+ * @mflags: MHI transfer flags used for the transfer
+ */
+int mhi_queue_skb(struct mhi_device *mhi_dev, enum dma_data_direction dir,
+ struct sk_buff *skb, size_t len, enum mhi_flags mflags);
+
+/**
+ * mhi_queue_is_full - Determine whether queueing new elements is possible
+ * @mhi_dev: Device associated with the channels
+ * @dir: DMA direction for the channel
+ */
+bool mhi_queue_is_full(struct mhi_device *mhi_dev, enum dma_data_direction dir);
+
+#endif /* _MHI_H_ */
diff --git a/include/linux/mhi_ep.h b/include/linux/mhi_ep.h
new file mode 100644
index 000000000000..f198a8ac7ee7
--- /dev/null
+++ b/include/linux/mhi_ep.h
@@ -0,0 +1,277 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2022, Linaro Ltd.
+ *
+ */
+#ifndef _MHI_EP_H_
+#define _MHI_EP_H_
+
+#include <linux/dma-direction.h>
+#include <linux/mhi.h>
+
+#define MHI_EP_DEFAULT_MTU 0x8000
+
+/**
+ * struct mhi_ep_channel_config - Channel configuration structure for controller
+ * @name: The name of this channel
+ * @num: The number assigned to this channel
+ * @num_elements: The number of elements that can be queued to this channel
+ * @dir: Direction that data may flow on this channel
+ */
+struct mhi_ep_channel_config {
+ char *name;
+ u32 num;
+ u32 num_elements;
+ enum dma_data_direction dir;
+};
+
+/**
+ * struct mhi_ep_cntrl_config - MHI Endpoint controller configuration
+ * @mhi_version: MHI spec version supported by the controller
+ * @max_channels: Maximum number of channels supported
+ * @num_channels: Number of channels defined in @ch_cfg
+ * @ch_cfg: Array of defined channels
+ */
+struct mhi_ep_cntrl_config {
+ u32 mhi_version;
+ u32 max_channels;
+ u32 num_channels;
+ const struct mhi_ep_channel_config *ch_cfg;
+};
+
+/**
+ * struct mhi_ep_db_info - MHI Endpoint doorbell info
+ * @mask: Mask of the doorbell interrupt
+ * @status: Status of the doorbell interrupt
+ */
+struct mhi_ep_db_info {
+ u32 mask;
+ u32 status;
+};
+
+/**
+ * struct mhi_ep_cntrl - MHI Endpoint controller structure
+ * @cntrl_dev: Pointer to the struct device of physical bus acting as the MHI
+ * Endpoint controller
+ * @mhi_dev: MHI Endpoint device instance for the controller
+ * @mmio: MMIO region containing the MHI registers
+ * @mhi_chan: Points to the channel configuration table
+ * @mhi_event: Points to the event ring configurations table
+ * @mhi_cmd: Points to the command ring configurations table
+ * @sm: MHI Endpoint state machine
+ * @ch_ctx_cache: Cache of host channel context data structure
+ * @ev_ctx_cache: Cache of host event context data structure
+ * @cmd_ctx_cache: Cache of host command context data structure
+ * @ch_ctx_host_pa: Physical address of host channel context data structure
+ * @ev_ctx_host_pa: Physical address of host event context data structure
+ * @cmd_ctx_host_pa: Physical address of host command context data structure
+ * @ch_ctx_cache_phys: Physical address of the host channel context cache
+ * @ev_ctx_cache_phys: Physical address of the host event context cache
+ * @cmd_ctx_cache_phys: Physical address of the host command context cache
+ * @chdb: Array of channel doorbell interrupt info
+ * @event_lock: Lock for protecting event rings
+ * @state_lock: Lock for protecting state transitions
+ * @list_lock: Lock for protecting state transition and channel doorbell lists
+ * @st_transition_list: List of state transitions
+ * @ch_db_list: List of queued channel doorbells
+ * @wq: Dedicated workqueue for handling rings and state changes
+ * @state_work: State transition worker
+ * @reset_work: Worker for MHI Endpoint reset
+ * @cmd_ring_work: Worker for processing command rings
+ * @ch_ring_work: Worker for processing channel rings
+ * @raise_irq: CB function for raising IRQ to the host
+ * @alloc_map: CB function for allocating memory in endpoint for storing host context and mapping it
+ * @unmap_free: CB function to unmap and free the allocated memory in endpoint for storing host context
+ * @read_from_host: CB function for reading from host memory from endpoint
+ * @write_to_host: CB function for writing to host memory from endpoint
+ * @mhi_state: MHI Endpoint state
+ * @max_chan: Maximum channels supported by the endpoint controller
+ * @mru: MRU (Maximum Receive Unit) value of the endpoint controller
+ * @event_rings: Number of event rings supported by the endpoint controller
+ * @hw_event_rings: Number of hardware event rings supported by the endpoint controller
+ * @chdb_offset: Channel doorbell offset set by the host
+ * @erdb_offset: Event ring doorbell offset set by the host
+ * @index: MHI Endpoint controller index
+ * @irq: IRQ used by the endpoint controller
+ * @enabled: Check if the endpoint controller is enabled or not
+ */
+struct mhi_ep_cntrl {
+ struct device *cntrl_dev;
+ struct mhi_ep_device *mhi_dev;
+ void __iomem *mmio;
+
+ struct mhi_ep_chan *mhi_chan;
+ struct mhi_ep_event *mhi_event;
+ struct mhi_ep_cmd *mhi_cmd;
+ struct mhi_ep_sm *sm;
+
+ struct mhi_chan_ctxt *ch_ctx_cache;
+ struct mhi_event_ctxt *ev_ctx_cache;
+ struct mhi_cmd_ctxt *cmd_ctx_cache;
+ u64 ch_ctx_host_pa;
+ u64 ev_ctx_host_pa;
+ u64 cmd_ctx_host_pa;
+ phys_addr_t ch_ctx_cache_phys;
+ phys_addr_t ev_ctx_cache_phys;
+ phys_addr_t cmd_ctx_cache_phys;
+
+ struct mhi_ep_db_info chdb[4];
+ struct mutex event_lock;
+ struct mutex state_lock;
+ spinlock_t list_lock;
+
+ struct list_head st_transition_list;
+ struct list_head ch_db_list;
+
+ struct workqueue_struct *wq;
+ struct work_struct state_work;
+ struct work_struct reset_work;
+ struct work_struct cmd_ring_work;
+ struct work_struct ch_ring_work;
+
+ void (*raise_irq)(struct mhi_ep_cntrl *mhi_cntrl, u32 vector);
+ int (*alloc_map)(struct mhi_ep_cntrl *mhi_cntrl, u64 pci_addr, phys_addr_t *phys_ptr,
+ void __iomem **virt, size_t size);
+ void (*unmap_free)(struct mhi_ep_cntrl *mhi_cntrl, u64 pci_addr, phys_addr_t phys,
+ void __iomem *virt, size_t size);
+ int (*read_from_host)(struct mhi_ep_cntrl *mhi_cntrl, u64 from, void *to, size_t size);
+ int (*write_to_host)(struct mhi_ep_cntrl *mhi_cntrl, void *from, u64 to, size_t size);
+
+ enum mhi_state mhi_state;
+
+ u32 max_chan;
+ u32 mru;
+ u32 event_rings;
+ u32 hw_event_rings;
+ u32 chdb_offset;
+ u32 erdb_offset;
+ u32 index;
+ int irq;
+ bool enabled;
+};
+
+/**
+ * struct mhi_ep_device - Structure representing an MHI Endpoint device that binds
+ * to channels or is associated with controllers
+ * @dev: Driver model device node for the MHI Endpoint device
+ * @mhi_cntrl: Controller the device belongs to
+ * @id: Pointer to MHI Endpoint device ID struct
+ * @name: Name of the associated MHI Endpoint device
+ * @ul_chan: UL (from host to endpoint) channel for the device
+ * @dl_chan: DL (from endpoint to host) channel for the device
+ * @dev_type: MHI device type
+ */
+struct mhi_ep_device {
+ struct device dev;
+ struct mhi_ep_cntrl *mhi_cntrl;
+ const struct mhi_device_id *id;
+ const char *name;
+ struct mhi_ep_chan *ul_chan;
+ struct mhi_ep_chan *dl_chan;
+ enum mhi_device_type dev_type;
+};
+
+/**
+ * struct mhi_ep_driver - Structure representing a MHI Endpoint client driver
+ * @id_table: Pointer to MHI Endpoint device ID table
+ * @driver: Device driver model driver
+ * @probe: CB function for client driver probe function
+ * @remove: CB function for client driver remove function
+ * @ul_xfer_cb: CB function for UL (from host to endpoint) data transfer
+ * @dl_xfer_cb: CB function for DL (from endpoint to host) data transfer
+ */
+struct mhi_ep_driver {
+ const struct mhi_device_id *id_table;
+ struct device_driver driver;
+ int (*probe)(struct mhi_ep_device *mhi_ep,
+ const struct mhi_device_id *id);
+ void (*remove)(struct mhi_ep_device *mhi_ep);
+ void (*ul_xfer_cb)(struct mhi_ep_device *mhi_dev,
+ struct mhi_result *result);
+ void (*dl_xfer_cb)(struct mhi_ep_device *mhi_dev,
+ struct mhi_result *result);
+};
+
+#define to_mhi_ep_device(dev) container_of(dev, struct mhi_ep_device, dev)
+#define to_mhi_ep_driver(drv) container_of(drv, struct mhi_ep_driver, driver)
+
+/*
+ * module_mhi_ep_driver() - Helper macro for drivers that don't do
+ * anything special other than using default mhi_ep_driver_register() and
+ * mhi_ep_driver_unregister(). This eliminates a lot of boilerplate.
+ * Each module may only use this macro once.
+ */
+#define module_mhi_ep_driver(mhi_drv) \
+ module_driver(mhi_drv, mhi_ep_driver_register, \
+ mhi_ep_driver_unregister)
+
+/*
+ * Macro to avoid include chaining to get THIS_MODULE
+ */
+#define mhi_ep_driver_register(mhi_drv) \
+ __mhi_ep_driver_register(mhi_drv, THIS_MODULE)
+
+/**
+ * __mhi_ep_driver_register - Register a driver with MHI Endpoint bus
+ * @mhi_drv: Driver to be associated with the device
+ * @owner: The module owner
+ *
+ * Return: 0 if driver registrations succeeds, a negative error code otherwise.
+ */
+int __mhi_ep_driver_register(struct mhi_ep_driver *mhi_drv, struct module *owner);
+
+/**
+ * mhi_ep_driver_unregister - Unregister a driver from MHI Endpoint bus
+ * @mhi_drv: Driver associated with the device
+ */
+void mhi_ep_driver_unregister(struct mhi_ep_driver *mhi_drv);
+
+/**
+ * mhi_ep_register_controller - Register MHI Endpoint controller
+ * @mhi_cntrl: MHI Endpoint controller to register
+ * @config: Configuration to use for the controller
+ *
+ * Return: 0 if controller registrations succeeds, a negative error code otherwise.
+ */
+int mhi_ep_register_controller(struct mhi_ep_cntrl *mhi_cntrl,
+ const struct mhi_ep_cntrl_config *config);
+
+/**
+ * mhi_ep_unregister_controller - Unregister MHI Endpoint controller
+ * @mhi_cntrl: MHI Endpoint controller to unregister
+ */
+void mhi_ep_unregister_controller(struct mhi_ep_cntrl *mhi_cntrl);
+
+/**
+ * mhi_ep_power_up - Power up the MHI endpoint stack
+ * @mhi_cntrl: MHI Endpoint controller
+ *
+ * Return: 0 if power up succeeds, a negative error code otherwise.
+ */
+int mhi_ep_power_up(struct mhi_ep_cntrl *mhi_cntrl);
+
+/**
+ * mhi_ep_power_down - Power down the MHI endpoint stack
+ * @mhi_cntrl: MHI controller
+ */
+void mhi_ep_power_down(struct mhi_ep_cntrl *mhi_cntrl);
+
+/**
+ * mhi_ep_queue_is_empty - Determine whether the transfer queue is empty
+ * @mhi_dev: Device associated with the channels
+ * @dir: DMA direction for the channel
+ *
+ * Return: true if the queue is empty, false otherwise.
+ */
+bool mhi_ep_queue_is_empty(struct mhi_ep_device *mhi_dev, enum dma_data_direction dir);
+
+/**
+ * mhi_ep_queue_skb - Send SKBs to host over MHI Endpoint
+ * @mhi_dev: Device associated with the DL channel
+ * @skb: SKBs to be queued
+ *
+ * Return: 0 if the SKBs has been sent successfully, a negative error code otherwise.
+ */
+int mhi_ep_queue_skb(struct mhi_ep_device *mhi_dev, struct sk_buff *skb);
+
+#endif
diff --git a/include/linux/mic_bus.h b/include/linux/mic_bus.h
deleted file mode 100644
index 504d54c71bdb..000000000000
--- a/include/linux/mic_bus.h
+++ /dev/null
@@ -1,111 +0,0 @@
-/*
- * Intel MIC Platform Software Stack (MPSS)
- *
- * Copyright(c) 2014 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License, version 2, as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Intel MIC Bus driver.
- *
- * This implementation is very similar to the the virtio bus driver
- * implementation @ include/linux/virtio.h.
- */
-#ifndef _MIC_BUS_H_
-#define _MIC_BUS_H_
-/*
- * Everything a mbus driver needs to work with any particular mbus
- * implementation.
- */
-#include <linux/interrupt.h>
-#include <linux/dma-mapping.h>
-
-struct mbus_device_id {
- __u32 device;
- __u32 vendor;
-};
-
-#define MBUS_DEV_DMA_HOST 2
-#define MBUS_DEV_DMA_MIC 3
-#define MBUS_DEV_ANY_ID 0xffffffff
-
-/**
- * mbus_device - representation of a device using mbus
- * @mmio_va: virtual address of mmio space
- * @hw_ops: the hardware ops supported by this device.
- * @id: the device type identification (used to match it with a driver).
- * @dev: underlying device.
- * be used to communicate with.
- * @index: unique position on the mbus bus
- */
-struct mbus_device {
- void __iomem *mmio_va;
- struct mbus_hw_ops *hw_ops;
- struct mbus_device_id id;
- struct device dev;
- int index;
-};
-
-/**
- * mbus_driver - operations for a mbus I/O driver
- * @driver: underlying device driver (populate name and owner).
- * @id_table: the ids serviced by this driver.
- * @probe: the function to call when a device is found. Returns 0 or -errno.
- * @remove: the function to call when a device is removed.
- */
-struct mbus_driver {
- struct device_driver driver;
- const struct mbus_device_id *id_table;
- int (*probe)(struct mbus_device *dev);
- void (*scan)(struct mbus_device *dev);
- void (*remove)(struct mbus_device *dev);
-};
-
-/**
- * struct mic_irq - opaque pointer used as cookie
- */
-struct mic_irq;
-
-/**
- * mbus_hw_ops - Hardware operations for accessing a MIC device on the MIC bus.
- */
-struct mbus_hw_ops {
- struct mic_irq* (*request_threaded_irq)(struct mbus_device *mbdev,
- irq_handler_t handler,
- irq_handler_t thread_fn,
- const char *name, void *data,
- int intr_src);
- void (*free_irq)(struct mbus_device *mbdev,
- struct mic_irq *cookie, void *data);
- void (*ack_interrupt)(struct mbus_device *mbdev, int num);
-};
-
-struct mbus_device *
-mbus_register_device(struct device *pdev, int id, const struct dma_map_ops *dma_ops,
- struct mbus_hw_ops *hw_ops, int index,
- void __iomem *mmio_va);
-void mbus_unregister_device(struct mbus_device *mbdev);
-
-int mbus_register_driver(struct mbus_driver *drv);
-void mbus_unregister_driver(struct mbus_driver *drv);
-
-static inline struct mbus_device *dev_to_mbus(struct device *_dev)
-{
- return container_of(_dev, struct mbus_device, dev);
-}
-
-static inline struct mbus_driver *drv_to_mbus(struct device_driver *drv)
-{
- return container_of(drv, struct mbus_driver, driver);
-}
-
-#endif /* _MIC_BUS_H */
diff --git a/include/linux/micrel_phy.h b/include/linux/micrel_phy.h
index 472fa4d4ea62..8bef1ab62bba 100644
--- a/include/linux/micrel_phy.h
+++ b/include/linux/micrel_phy.h
@@ -1,18 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* include/linux/micrel_phy.h
*
* Micrel PHY IDs
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
*/
#ifndef _MICREL_PHY_H
#define _MICREL_PHY_H
+#define MICREL_OUI 0x0885
+
#define MICREL_PHY_ID_MASK 0x00fffff0
#define PHY_ID_KSZ8873MLL 0x000e7237
@@ -31,21 +28,41 @@
#define PHY_ID_KSZ8081 0x00221560
#define PHY_ID_KSZ8061 0x00221570
#define PHY_ID_KSZ9031 0x00221620
+#define PHY_ID_KSZ9131 0x00221640
+#define PHY_ID_LAN8814 0x00221660
+#define PHY_ID_LAN8804 0x00221670
+#define PHY_ID_LAN8841 0x00221650
#define PHY_ID_KSZ886X 0x00221430
#define PHY_ID_KSZ8863 0x00221435
-#define PHY_ID_KSZ8795 0x00221550
+#define PHY_ID_KSZ87XX 0x00221550
#define PHY_ID_KSZ9477 0x00221631
/* struct phy_device dev_flags definitions */
#define MICREL_PHY_50MHZ_CLK 0x00000001
#define MICREL_PHY_FXEN 0x00000002
+#define MICREL_KSZ8_P1_ERRATA 0x00000003
#define MICREL_KSZ9021_EXTREG_CTRL 0xB
#define MICREL_KSZ9021_EXTREG_DATA_WRITE 0xC
#define MICREL_KSZ9021_RGMII_CLK_CTRL_PAD_SCEW 0x104
#define MICREL_KSZ9021_RGMII_RX_DATA_PAD_SCEW 0x105
+/* Device specific MII_BMCR (Reg 0) bits */
+/* 1 = HP Auto MDI/MDI-X mode, 0 = Microchip Auto MDI/MDI-X mode */
+#define KSZ886X_BMCR_HP_MDIX BIT(5)
+/* 1 = Force MDI (transmit on RXP/RXM pins), 0 = Normal operation
+ * (transmit on TXP/TXM pins)
+ */
+#define KSZ886X_BMCR_FORCE_MDI BIT(4)
+/* 1 = Disable auto MDI-X */
+#define KSZ886X_BMCR_DISABLE_AUTO_MDIX BIT(3)
+#define KSZ886X_BMCR_DISABLE_FAR_END_FAULT BIT(2)
+#define KSZ886X_BMCR_DISABLE_TRANSMIT BIT(1)
+#define KSZ886X_BMCR_DISABLE_LED BIT(0)
+
+#define KSZ886X_CTRL_MDIX_STAT BIT(4)
+
#endif /* _MICREL_PHY_H */
diff --git a/include/linux/microchipphy.h b/include/linux/microchipphy.h
index eb492d47f717..517288da19fd 100644
--- a/include/linux/microchipphy.h
+++ b/include/linux/microchipphy.h
@@ -1,18 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Copyright (C) 2015 Microchip Technology
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version 2
- * of the License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#ifndef _MICROCHIPPHY_H
@@ -70,4 +58,15 @@
#define LAN88XX_MMD3_CHIP_ID (32877)
#define LAN88XX_MMD3_CHIP_REV (32878)
+/* Registers specific to the LAN7800/LAN7850 embedded phy */
+#define LAN78XX_PHY_LED_MODE_SELECT (0x1D)
+
+/* DSP registers */
+#define PHY_ARDENNES_MMD_DEV_3_PHY_CFG (0x806A)
+#define PHY_ARDENNES_MMD_DEV_3_PHY_CFG_ZD_DLY_EN_ (0x2000)
+#define LAN88XX_EXT_PAGE_ACCESS_TR (0x52B5)
+#define LAN88XX_EXT_PAGE_TR_CR 16
+#define LAN88XX_EXT_PAGE_TR_LOW_DATA 17
+#define LAN88XX_EXT_PAGE_TR_HIGH_DATA 18
+
#endif /* _MICROCHIPPHY_H */
diff --git a/include/linux/migrate.h b/include/linux/migrate.h
index 3e0d405dc842..6241a1596a75 100644
--- a/include/linux/migrate.h
+++ b/include/linux/migrate.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_MIGRATE_H
#define _LINUX_MIGRATE_H
@@ -6,84 +7,96 @@
#include <linux/migrate_mode.h>
#include <linux/hugetlb.h>
-typedef struct page *new_page_t(struct page *page, unsigned long private,
- int **reason);
+typedef struct page *new_page_t(struct page *page, unsigned long private);
typedef void free_page_t(struct page *page, unsigned long private);
+struct migration_target_control;
+
/*
* Return values from addresss_space_operations.migratepage():
* - negative errno on page migration failure;
* - zero on page migration success;
*/
#define MIGRATEPAGE_SUCCESS 0
-
-enum migrate_reason {
- MR_COMPACTION,
- MR_MEMORY_FAILURE,
- MR_MEMORY_HOTPLUG,
- MR_SYSCALL, /* also applies to cpusets */
- MR_MEMPOLICY_MBIND,
- MR_NUMA_MISPLACED,
- MR_CMA,
- MR_TYPES
+#define MIGRATEPAGE_UNMAP 1
+
+/**
+ * struct movable_operations - Driver page migration
+ * @isolate_page:
+ * The VM calls this function to prepare the page to be moved. The page
+ * is locked and the driver should not unlock it. The driver should
+ * return ``true`` if the page is movable and ``false`` if it is not
+ * currently movable. After this function returns, the VM uses the
+ * page->lru field, so the driver must preserve any information which
+ * is usually stored here.
+ *
+ * @migrate_page:
+ * After isolation, the VM calls this function with the isolated
+ * @src page. The driver should copy the contents of the
+ * @src page to the @dst page and set up the fields of @dst page.
+ * Both pages are locked.
+ * If page migration is successful, the driver should call
+ * __ClearPageMovable(@src) and return MIGRATEPAGE_SUCCESS.
+ * If the driver cannot migrate the page at the moment, it can return
+ * -EAGAIN. The VM interprets this as a temporary migration failure and
+ * will retry it later. Any other error value is a permanent migration
+ * failure and migration will not be retried.
+ * The driver shouldn't touch the @src->lru field while in the
+ * migrate_page() function. It may write to @dst->lru.
+ *
+ * @putback_page:
+ * If migration fails on the isolated page, the VM informs the driver
+ * that the page is no longer a candidate for migration by calling
+ * this function. The driver should put the isolated page back into
+ * its own data structure.
+ */
+struct movable_operations {
+ bool (*isolate_page)(struct page *, isolate_mode_t);
+ int (*migrate_page)(struct page *dst, struct page *src,
+ enum migrate_mode);
+ void (*putback_page)(struct page *);
};
-/* In mm/debug.c; also keep sync with include/trace/events/migrate.h */
-extern char *migrate_reason_names[MR_TYPES];
-
-static inline struct page *new_page_nodemask(struct page *page,
- int preferred_nid, nodemask_t *nodemask)
-{
- gfp_t gfp_mask = GFP_USER | __GFP_MOVABLE | __GFP_RETRY_MAYFAIL;
-
- if (PageHuge(page))
- return alloc_huge_page_nodemask(page_hstate(compound_head(page)),
- preferred_nid, nodemask);
-
- if (PageHighMem(page) || (zone_idx(page_zone(page)) == ZONE_MOVABLE))
- gfp_mask |= __GFP_HIGHMEM;
-
- return __alloc_pages_nodemask(gfp_mask, 0, preferred_nid, nodemask);
-}
+/* Defined in mm/debug.c: */
+extern const char *migrate_reason_names[MR_TYPES];
#ifdef CONFIG_MIGRATION
-extern void putback_movable_pages(struct list_head *l);
-extern int migrate_page(struct address_space *mapping,
- struct page *newpage, struct page *page,
- enum migrate_mode mode);
-extern int migrate_pages(struct list_head *l, new_page_t new, free_page_t free,
- unsigned long private, enum migrate_mode mode, int reason);
-extern int isolate_movable_page(struct page *page, isolate_mode_t mode);
-extern void putback_movable_page(struct page *page);
-
-extern int migrate_prep(void);
-extern int migrate_prep_local(void);
-extern void migrate_page_copy(struct page *newpage, struct page *page);
-extern int migrate_huge_page_move_mapping(struct address_space *mapping,
- struct page *newpage, struct page *page);
-extern int migrate_page_move_mapping(struct address_space *mapping,
- struct page *newpage, struct page *page,
- struct buffer_head *head, enum migrate_mode mode,
- int extra_count);
+void putback_movable_pages(struct list_head *l);
+int migrate_folio_extra(struct address_space *mapping, struct folio *dst,
+ struct folio *src, enum migrate_mode mode, int extra_count);
+int migrate_folio(struct address_space *mapping, struct folio *dst,
+ struct folio *src, enum migrate_mode mode);
+int migrate_pages(struct list_head *l, new_page_t new, free_page_t free,
+ unsigned long private, enum migrate_mode mode, int reason,
+ unsigned int *ret_succeeded);
+struct page *alloc_migration_target(struct page *page, unsigned long private);
+bool isolate_movable_page(struct page *page, isolate_mode_t mode);
+
+int migrate_huge_page_move_mapping(struct address_space *mapping,
+ struct folio *dst, struct folio *src);
+void migration_entry_wait_on_locked(swp_entry_t entry, pte_t *ptep,
+ spinlock_t *ptl);
+void folio_migrate_flags(struct folio *newfolio, struct folio *folio);
+void folio_migrate_copy(struct folio *newfolio, struct folio *folio);
+int folio_migrate_mapping(struct address_space *mapping,
+ struct folio *newfolio, struct folio *folio, int extra_count);
+
#else
static inline void putback_movable_pages(struct list_head *l) {}
static inline int migrate_pages(struct list_head *l, new_page_t new,
free_page_t free, unsigned long private, enum migrate_mode mode,
- int reason)
+ int reason, unsigned int *ret_succeeded)
{ return -ENOSYS; }
-static inline int isolate_movable_page(struct page *page, isolate_mode_t mode)
- { return -EBUSY; }
-
-static inline int migrate_prep(void) { return -ENOSYS; }
-static inline int migrate_prep_local(void) { return -ENOSYS; }
-
-static inline void migrate_page_copy(struct page *newpage,
- struct page *page) {}
+static inline struct page *alloc_migration_target(struct page *page,
+ unsigned long private)
+ { return NULL; }
+static inline bool isolate_movable_page(struct page *page, isolate_mode_t mode)
+ { return false; }
static inline int migrate_huge_page_move_mapping(struct address_space *mapping,
- struct page *newpage, struct page *page)
+ struct folio *dst, struct folio *src)
{
return -ENOSYS;
}
@@ -91,13 +104,13 @@ static inline int migrate_huge_page_move_mapping(struct address_space *mapping,
#endif /* CONFIG_MIGRATION */
#ifdef CONFIG_COMPACTION
-extern int PageMovable(struct page *page);
-extern void __SetPageMovable(struct page *page, struct address_space *mapping);
-extern void __ClearPageMovable(struct page *page);
+bool PageMovable(struct page *page);
+void __SetPageMovable(struct page *page, const struct movable_operations *ops);
+void __ClearPageMovable(struct page *page);
#else
-static inline int PageMovable(struct page *page) { return 0; };
+static inline bool PageMovable(struct page *page) { return false; }
static inline void __SetPageMovable(struct page *page,
- struct address_space *mapping)
+ const struct movable_operations *ops)
{
}
static inline void __ClearPageMovable(struct page *page)
@@ -105,15 +118,33 @@ static inline void __ClearPageMovable(struct page *page)
}
#endif
-#ifdef CONFIG_NUMA_BALANCING
-extern bool pmd_trans_migrating(pmd_t pmd);
-extern int migrate_misplaced_page(struct page *page,
- struct vm_area_struct *vma, int node);
-#else
-static inline bool pmd_trans_migrating(pmd_t pmd)
+static inline bool folio_test_movable(struct folio *folio)
+{
+ return PageMovable(&folio->page);
+}
+
+static inline
+const struct movable_operations *folio_movable_ops(struct folio *folio)
{
- return false;
+ VM_BUG_ON(!__folio_test_movable(folio));
+
+ return (const struct movable_operations *)
+ ((unsigned long)folio->mapping - PAGE_MAPPING_MOVABLE);
}
+
+static inline
+const struct movable_operations *page_movable_ops(struct page *page)
+{
+ VM_BUG_ON(!__PageMovable(page));
+
+ return (const struct movable_operations *)
+ ((unsigned long)page->mapping - PAGE_MAPPING_MOVABLE);
+}
+
+#ifdef CONFIG_NUMA_BALANCING
+int migrate_misplaced_page(struct page *page, struct vm_area_struct *vma,
+ int node);
+#else
static inline int migrate_misplaced_page(struct page *page,
struct vm_area_struct *vma, int node)
{
@@ -121,21 +152,81 @@ static inline int migrate_misplaced_page(struct page *page,
}
#endif /* CONFIG_NUMA_BALANCING */
-#if defined(CONFIG_NUMA_BALANCING) && defined(CONFIG_TRANSPARENT_HUGEPAGE)
-extern int migrate_misplaced_transhuge_page(struct mm_struct *mm,
- struct vm_area_struct *vma,
- pmd_t *pmd, pmd_t entry,
- unsigned long address,
- struct page *page, int node);
-#else
-static inline int migrate_misplaced_transhuge_page(struct mm_struct *mm,
- struct vm_area_struct *vma,
- pmd_t *pmd, pmd_t entry,
- unsigned long address,
- struct page *page, int node)
+#ifdef CONFIG_MIGRATION
+
+/*
+ * Watch out for PAE architecture, which has an unsigned long, and might not
+ * have enough bits to store all physical address and flags. So far we have
+ * enough room for all our flags.
+ */
+#define MIGRATE_PFN_VALID (1UL << 0)
+#define MIGRATE_PFN_MIGRATE (1UL << 1)
+#define MIGRATE_PFN_WRITE (1UL << 3)
+#define MIGRATE_PFN_SHIFT 6
+
+static inline struct page *migrate_pfn_to_page(unsigned long mpfn)
{
- return -EAGAIN;
+ if (!(mpfn & MIGRATE_PFN_VALID))
+ return NULL;
+ return pfn_to_page(mpfn >> MIGRATE_PFN_SHIFT);
}
-#endif /* CONFIG_NUMA_BALANCING && CONFIG_TRANSPARENT_HUGEPAGE*/
+
+static inline unsigned long migrate_pfn(unsigned long pfn)
+{
+ return (pfn << MIGRATE_PFN_SHIFT) | MIGRATE_PFN_VALID;
+}
+
+enum migrate_vma_direction {
+ MIGRATE_VMA_SELECT_SYSTEM = 1 << 0,
+ MIGRATE_VMA_SELECT_DEVICE_PRIVATE = 1 << 1,
+ MIGRATE_VMA_SELECT_DEVICE_COHERENT = 1 << 2,
+};
+
+struct migrate_vma {
+ struct vm_area_struct *vma;
+ /*
+ * Both src and dst array must be big enough for
+ * (end - start) >> PAGE_SHIFT entries.
+ *
+ * The src array must not be modified by the caller after
+ * migrate_vma_setup(), and must not change the dst array after
+ * migrate_vma_pages() returns.
+ */
+ unsigned long *dst;
+ unsigned long *src;
+ unsigned long cpages;
+ unsigned long npages;
+ unsigned long start;
+ unsigned long end;
+
+ /*
+ * Set to the owner value also stored in page->pgmap->owner for
+ * migrating out of device private memory. The flags also need to
+ * be set to MIGRATE_VMA_SELECT_DEVICE_PRIVATE.
+ * The caller should always set this field when using mmu notifier
+ * callbacks to avoid device MMU invalidations for device private
+ * pages that are not being migrated.
+ */
+ void *pgmap_owner;
+ unsigned long flags;
+
+ /*
+ * Set to vmf->page if this is being called to migrate a page as part of
+ * a migrate_to_ram() callback.
+ */
+ struct page *fault_page;
+};
+
+int migrate_vma_setup(struct migrate_vma *args);
+void migrate_vma_pages(struct migrate_vma *migrate);
+void migrate_vma_finalize(struct migrate_vma *migrate);
+int migrate_device_range(unsigned long *src_pfns, unsigned long start,
+ unsigned long npages);
+void migrate_device_pages(unsigned long *src_pfns, unsigned long *dst_pfns,
+ unsigned long npages);
+void migrate_device_finalize(unsigned long *src_pfns,
+ unsigned long *dst_pfns, unsigned long npages);
+
+#endif /* CONFIG_MIGRATION */
#endif /* _LINUX_MIGRATE_H */
diff --git a/include/linux/migrate_mode.h b/include/linux/migrate_mode.h
index ebf3d89a3919..f37cc03f9369 100644
--- a/include/linux/migrate_mode.h
+++ b/include/linux/migrate_mode.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef MIGRATE_MODE_H_INCLUDED
#define MIGRATE_MODE_H_INCLUDED
/*
@@ -6,11 +7,29 @@
* on most operations but not ->writepage as the potential stall time
* is too significant
* MIGRATE_SYNC will block when migrating pages
+ * MIGRATE_SYNC_NO_COPY will block when migrating pages but will not copy pages
+ * with the CPU. Instead, page copy happens outside the migratepage()
+ * callback and is likely using a DMA engine. See migrate_vma() and HMM
+ * (mm/hmm.c) for users of this mode.
*/
enum migrate_mode {
MIGRATE_ASYNC,
MIGRATE_SYNC_LIGHT,
MIGRATE_SYNC,
+ MIGRATE_SYNC_NO_COPY,
+};
+
+enum migrate_reason {
+ MR_COMPACTION,
+ MR_MEMORY_FAILURE,
+ MR_MEMORY_HOTPLUG,
+ MR_SYSCALL, /* also applies to cpusets */
+ MR_MEMPOLICY_MBIND,
+ MR_NUMA_MISPLACED,
+ MR_CONTIG_RANGE,
+ MR_LONGTERM_PIN,
+ MR_DEMOTION,
+ MR_TYPES
};
#endif /* MIGRATE_MODE_H_INCLUDED */
diff --git a/include/linux/mii.h b/include/linux/mii.h
index e870bfa6abfe..d5a959ce4877 100644
--- a/include/linux/mii.h
+++ b/include/linux/mii.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* linux/mii.h: definitions for MII-compatible transceivers
* Originally drivers/net/sunhme.h.
@@ -9,6 +10,7 @@
#include <linux/if.h>
+#include <linux/linkmode.h>
#include <uapi/linux/mii.h>
struct ethtool_cmd;
@@ -30,7 +32,7 @@ struct mii_if_info {
extern int mii_link_ok (struct mii_if_info *mii);
extern int mii_nway_restart (struct mii_if_info *mii);
-extern int mii_ethtool_gset(struct mii_if_info *mii, struct ethtool_cmd *ecmd);
+extern void mii_ethtool_gset(struct mii_if_info *mii, struct ethtool_cmd *ecmd);
extern void mii_ethtool_get_link_ksettings(
struct mii_if_info *mii, struct ethtool_link_ksettings *cmd);
extern int mii_ethtool_sset(struct mii_if_info *mii, struct ethtool_cmd *ecmd);
@@ -131,6 +133,34 @@ static inline u32 ethtool_adv_to_mii_adv_t(u32 ethadv)
}
/**
+ * linkmode_adv_to_mii_adv_t
+ * @advertising: the linkmode advertisement settings
+ *
+ * A small helper function that translates linkmode advertisement
+ * settings to phy autonegotiation advertisements for the
+ * MII_ADVERTISE register.
+ */
+static inline u32 linkmode_adv_to_mii_adv_t(unsigned long *advertising)
+{
+ u32 result = 0;
+
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, advertising))
+ result |= ADVERTISE_10HALF;
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, advertising))
+ result |= ADVERTISE_10FULL;
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT, advertising))
+ result |= ADVERTISE_100HALF;
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT, advertising))
+ result |= ADVERTISE_100FULL;
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_Pause_BIT, advertising))
+ result |= ADVERTISE_PAUSE_CAP;
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, advertising))
+ result |= ADVERTISE_PAUSE_ASYM;
+
+ return result;
+}
+
+/**
* mii_adv_to_ethtool_adv_t
* @adv: value of the MII_ADVERTISE register
*
@@ -178,6 +208,28 @@ static inline u32 ethtool_adv_to_mii_ctrl1000_t(u32 ethadv)
}
/**
+ * linkmode_adv_to_mii_ctrl1000_t
+ * @advertising: the linkmode advertisement settings
+ *
+ * A small helper function that translates linkmode advertisement
+ * settings to phy autonegotiation advertisements for the
+ * MII_CTRL1000 register when in 1000T mode.
+ */
+static inline u32 linkmode_adv_to_mii_ctrl1000_t(unsigned long *advertising)
+{
+ u32 result = 0;
+
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT,
+ advertising))
+ result |= ADVERTISE_1000HALF;
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
+ advertising))
+ result |= ADVERTISE_1000FULL;
+
+ return result;
+}
+
+/**
* mii_ctrl1000_to_ethtool_adv_t
* @adv: value of the MII_CTRL1000 register
*
@@ -236,6 +288,25 @@ static inline u32 mii_stat1000_to_ethtool_lpa_t(u32 lpa)
}
/**
+ * mii_stat1000_mod_linkmode_lpa_t
+ * @advertising: target the linkmode advertisement settings
+ * @adv: value of the MII_STAT1000 register
+ *
+ * A small helper function that translates MII_STAT1000 bits, when in
+ * 1000Base-T mode, to linkmode advertisement settings. Other bits in
+ * advertising are not changes.
+ */
+static inline void mii_stat1000_mod_linkmode_lpa_t(unsigned long *advertising,
+ u32 lpa)
+{
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT,
+ advertising, lpa & LPA_1000HALF);
+
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
+ advertising, lpa & LPA_1000FULL);
+}
+
+/**
* ethtool_adv_to_mii_adv_x
* @ethadv: the ethtool advertisement settings
*
@@ -284,21 +355,155 @@ static inline u32 mii_adv_to_ethtool_adv_x(u32 adv)
}
/**
- * mii_lpa_to_ethtool_lpa_x
+ * mii_adv_mod_linkmode_adv_t
+ * @advertising:pointer to destination link mode.
+ * @adv: value of the MII_ADVERTISE register
+ *
+ * A small helper function that translates MII_ADVERTISE bits to
+ * linkmode advertisement settings. Leaves other bits unchanged.
+ */
+static inline void mii_adv_mod_linkmode_adv_t(unsigned long *advertising,
+ u32 adv)
+{
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT,
+ advertising, adv & ADVERTISE_10HALF);
+
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT,
+ advertising, adv & ADVERTISE_10FULL);
+
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT,
+ advertising, adv & ADVERTISE_100HALF);
+
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
+ advertising, adv & ADVERTISE_100FULL);
+
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_Pause_BIT, advertising,
+ adv & ADVERTISE_PAUSE_CAP);
+
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
+ advertising, adv & ADVERTISE_PAUSE_ASYM);
+}
+
+/**
+ * mii_adv_to_linkmode_adv_t
+ * @advertising:pointer to destination link mode.
+ * @adv: value of the MII_ADVERTISE register
+ *
+ * A small helper function that translates MII_ADVERTISE bits
+ * to linkmode advertisement settings. Clears the old value
+ * of advertising.
+ */
+static inline void mii_adv_to_linkmode_adv_t(unsigned long *advertising,
+ u32 adv)
+{
+ linkmode_zero(advertising);
+
+ mii_adv_mod_linkmode_adv_t(advertising, adv);
+}
+
+/**
+ * mii_lpa_to_linkmode_lpa_t
* @adv: value of the MII_LPA register
*
- * A small helper function that translates MII_LPA
- * bits, when in 1000Base-X mode, to ethtool
- * LP advertisement settings.
+ * A small helper function that translates MII_LPA bits, when in
+ * 1000Base-T mode, to linkmode LP advertisement settings. Clears the
+ * old value of advertising
*/
-static inline u32 mii_lpa_to_ethtool_lpa_x(u32 lpa)
+static inline void mii_lpa_to_linkmode_lpa_t(unsigned long *lp_advertising,
+ u32 lpa)
{
- u32 result = 0;
+ mii_adv_to_linkmode_adv_t(lp_advertising, lpa);
if (lpa & LPA_LPACK)
- result |= ADVERTISED_Autoneg;
+ linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
+ lp_advertising);
+
+}
+
+/**
+ * mii_lpa_mod_linkmode_lpa_t
+ * @adv: value of the MII_LPA register
+ *
+ * A small helper function that translates MII_LPA bits, when in
+ * 1000Base-T mode, to linkmode LP advertisement settings. Leaves
+ * other bits unchanged.
+ */
+static inline void mii_lpa_mod_linkmode_lpa_t(unsigned long *lp_advertising,
+ u32 lpa)
+{
+ mii_adv_mod_linkmode_adv_t(lp_advertising, lpa);
+
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
+ lp_advertising, lpa & LPA_LPACK);
+}
+
+static inline void mii_ctrl1000_mod_linkmode_adv_t(unsigned long *advertising,
+ u32 ctrl1000)
+{
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT, advertising,
+ ctrl1000 & ADVERTISE_1000HALF);
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, advertising,
+ ctrl1000 & ADVERTISE_1000FULL);
+}
+
+/**
+ * linkmode_adv_to_lcl_adv_t
+ * @advertising:pointer to linkmode advertising
+ *
+ * A small helper function that translates linkmode advertising to LVL
+ * pause capabilities.
+ */
+static inline u32 linkmode_adv_to_lcl_adv_t(unsigned long *advertising)
+{
+ u32 lcl_adv = 0;
+
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_Pause_BIT,
+ advertising))
+ lcl_adv |= ADVERTISE_PAUSE_CAP;
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
+ advertising))
+ lcl_adv |= ADVERTISE_PAUSE_ASYM;
- return result | mii_adv_to_ethtool_adv_x(lpa);
+ return lcl_adv;
+}
+
+/**
+ * mii_lpa_mod_linkmode_x - decode the link partner's config_reg to linkmodes
+ * @linkmodes: link modes array
+ * @lpa: config_reg word from link partner
+ * @fd_bit: link mode for 1000XFULL bit
+ */
+static inline void mii_lpa_mod_linkmode_x(unsigned long *linkmodes, u16 lpa,
+ int fd_bit)
+{
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, linkmodes,
+ lpa & LPA_LPACK);
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_Pause_BIT, linkmodes,
+ lpa & LPA_1000XPAUSE);
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, linkmodes,
+ lpa & LPA_1000XPAUSE_ASYM);
+ linkmode_mod_bit(fd_bit, linkmodes,
+ lpa & LPA_1000XFULL);
+}
+
+/**
+ * linkmode_adv_to_mii_adv_x - encode a linkmode to config_reg
+ * @linkmodes: linkmodes
+ * @fd_bit: full duplex bit
+ */
+static inline u16 linkmode_adv_to_mii_adv_x(const unsigned long *linkmodes,
+ int fd_bit)
+{
+ u16 adv = 0;
+
+ if (linkmode_test_bit(fd_bit, linkmodes))
+ adv |= ADVERTISE_1000XFULL;
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_Pause_BIT, linkmodes))
+ adv |= ADVERTISE_1000XPAUSE;
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, linkmodes))
+ adv |= ADVERTISE_1000XPSE_ASYM;
+
+ return adv;
}
/**
@@ -340,4 +545,39 @@ static inline u8 mii_resolve_flowctrl_fdx(u16 lcladv, u16 rmtadv)
return cap;
}
+/**
+ * mii_bmcr_encode_fixed - encode fixed speed/duplex settings to a BMCR value
+ * @speed: a SPEED_* value
+ * @duplex: a DUPLEX_* value
+ *
+ * Encode the speed and duplex to a BMCR value. 2500, 1000, 100 and 10 Mbps are
+ * supported. 2500Mbps is encoded to 1000Mbps. Other speeds are encoded as 10
+ * Mbps. Unknown duplex values are encoded to half-duplex.
+ */
+static inline u16 mii_bmcr_encode_fixed(int speed, int duplex)
+{
+ u16 bmcr;
+
+ switch (speed) {
+ case SPEED_2500:
+ case SPEED_1000:
+ bmcr = BMCR_SPEED1000;
+ break;
+
+ case SPEED_100:
+ bmcr = BMCR_SPEED100;
+ break;
+
+ case SPEED_10:
+ default:
+ bmcr = BMCR_SPEED10;
+ break;
+ }
+
+ if (duplex == DUPLEX_FULL)
+ bmcr |= BMCR_FULLDPLX;
+
+ return bmcr;
+}
+
#endif /* __LINUX_MII_H__ */
diff --git a/include/linux/mii_timestamper.h b/include/linux/mii_timestamper.h
new file mode 100644
index 000000000000..fa940bbaf8ae
--- /dev/null
+++ b/include/linux/mii_timestamper.h
@@ -0,0 +1,121 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for generic time stamping devices on MII buses.
+ * Copyright (C) 2018 Richard Cochran <richardcochran@gmail.com>
+ */
+#ifndef _LINUX_MII_TIMESTAMPER_H
+#define _LINUX_MII_TIMESTAMPER_H
+
+#include <linux/device.h>
+#include <linux/ethtool.h>
+#include <linux/skbuff.h>
+
+struct phy_device;
+
+/**
+ * struct mii_timestamper - Callback interface to MII time stamping devices.
+ *
+ * @rxtstamp: Requests a Rx timestamp for 'skb'. If the skb is accepted,
+ * the MII time stamping device promises to deliver it using
+ * netif_rx() as soon as a timestamp becomes available. One of
+ * the PTP_CLASS_ values is passed in 'type'. The function
+ * must return true if the skb is accepted for delivery.
+ *
+ * @txtstamp: Requests a Tx timestamp for 'skb'. The MII time stamping
+ * device promises to deliver it using skb_complete_tx_timestamp()
+ * as soon as a timestamp becomes available. One of the PTP_CLASS_
+ * values is passed in 'type'.
+ *
+ * @hwtstamp: Handles SIOCSHWTSTAMP ioctl for hardware time stamping.
+ *
+ * @link_state: Allows the device to respond to changes in the link
+ * state. The caller invokes this function while holding
+ * the phy_device mutex.
+ *
+ * @ts_info: Handles ethtool queries for hardware time stamping.
+ * @device: Remembers the device to which the instance belongs.
+ *
+ * Drivers for PHY time stamping devices should embed their
+ * mii_timestamper within a private structure, obtaining a reference
+ * to it using container_of().
+ *
+ * Drivers for non-PHY time stamping devices should return a pointer
+ * to a mii_timestamper from the probe_channel() callback of their
+ * mii_timestamping_ctrl interface.
+ */
+struct mii_timestamper {
+ bool (*rxtstamp)(struct mii_timestamper *mii_ts,
+ struct sk_buff *skb, int type);
+
+ void (*txtstamp)(struct mii_timestamper *mii_ts,
+ struct sk_buff *skb, int type);
+
+ int (*hwtstamp)(struct mii_timestamper *mii_ts,
+ struct ifreq *ifreq);
+
+ void (*link_state)(struct mii_timestamper *mii_ts,
+ struct phy_device *phydev);
+
+ int (*ts_info)(struct mii_timestamper *mii_ts,
+ struct ethtool_ts_info *ts_info);
+
+ struct device *device;
+};
+
+/**
+ * struct mii_timestamping_ctrl - MII time stamping controller interface.
+ *
+ * @probe_channel: Callback into the controller driver announcing the
+ * presence of the 'port' channel. The 'device' field
+ * had been passed to register_mii_tstamp_controller().
+ * The driver must return either a pointer to a valid
+ * MII timestamper instance or PTR_ERR.
+ *
+ * @release_channel: Releases an instance obtained via .probe_channel.
+ */
+struct mii_timestamping_ctrl {
+ struct mii_timestamper *(*probe_channel)(struct device *device,
+ unsigned int port);
+ void (*release_channel)(struct device *device,
+ struct mii_timestamper *mii_ts);
+};
+
+#ifdef CONFIG_NETWORK_PHY_TIMESTAMPING
+
+int register_mii_tstamp_controller(struct device *device,
+ struct mii_timestamping_ctrl *ctrl);
+
+void unregister_mii_tstamp_controller(struct device *device);
+
+struct mii_timestamper *register_mii_timestamper(struct device_node *node,
+ unsigned int port);
+
+void unregister_mii_timestamper(struct mii_timestamper *mii_ts);
+
+#else
+
+static inline
+int register_mii_tstamp_controller(struct device *device,
+ struct mii_timestamping_ctrl *ctrl)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline void unregister_mii_tstamp_controller(struct device *device)
+{
+}
+
+static inline
+struct mii_timestamper *register_mii_timestamper(struct device_node *node,
+ unsigned int port)
+{
+ return NULL;
+}
+
+static inline void unregister_mii_timestamper(struct mii_timestamper *mii_ts)
+{
+}
+
+#endif
+
+#endif
diff --git a/include/linux/min_heap.h b/include/linux/min_heap.h
new file mode 100644
index 000000000000..44077837385f
--- /dev/null
+++ b/include/linux/min_heap.h
@@ -0,0 +1,134 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_MIN_HEAP_H
+#define _LINUX_MIN_HEAP_H
+
+#include <linux/bug.h>
+#include <linux/string.h>
+#include <linux/types.h>
+
+/**
+ * struct min_heap - Data structure to hold a min-heap.
+ * @data: Start of array holding the heap elements.
+ * @nr: Number of elements currently in the heap.
+ * @size: Maximum number of elements that can be held in current storage.
+ */
+struct min_heap {
+ void *data;
+ int nr;
+ int size;
+};
+
+/**
+ * struct min_heap_callbacks - Data/functions to customise the min_heap.
+ * @elem_size: The nr of each element in bytes.
+ * @less: Partial order function for this heap.
+ * @swp: Swap elements function.
+ */
+struct min_heap_callbacks {
+ int elem_size;
+ bool (*less)(const void *lhs, const void *rhs);
+ void (*swp)(void *lhs, void *rhs);
+};
+
+/* Sift the element at pos down the heap. */
+static __always_inline
+void min_heapify(struct min_heap *heap, int pos,
+ const struct min_heap_callbacks *func)
+{
+ void *left, *right, *parent, *smallest;
+ void *data = heap->data;
+
+ for (;;) {
+ if (pos * 2 + 1 >= heap->nr)
+ break;
+
+ left = data + ((pos * 2 + 1) * func->elem_size);
+ parent = data + (pos * func->elem_size);
+ smallest = parent;
+ if (func->less(left, smallest))
+ smallest = left;
+
+ if (pos * 2 + 2 < heap->nr) {
+ right = data + ((pos * 2 + 2) * func->elem_size);
+ if (func->less(right, smallest))
+ smallest = right;
+ }
+ if (smallest == parent)
+ break;
+ func->swp(smallest, parent);
+ if (smallest == left)
+ pos = (pos * 2) + 1;
+ else
+ pos = (pos * 2) + 2;
+ }
+}
+
+/* Floyd's approach to heapification that is O(nr). */
+static __always_inline
+void min_heapify_all(struct min_heap *heap,
+ const struct min_heap_callbacks *func)
+{
+ int i;
+
+ for (i = heap->nr / 2; i >= 0; i--)
+ min_heapify(heap, i, func);
+}
+
+/* Remove minimum element from the heap, O(log2(nr)). */
+static __always_inline
+void min_heap_pop(struct min_heap *heap,
+ const struct min_heap_callbacks *func)
+{
+ void *data = heap->data;
+
+ if (WARN_ONCE(heap->nr <= 0, "Popping an empty heap"))
+ return;
+
+ /* Place last element at the root (position 0) and then sift down. */
+ heap->nr--;
+ memcpy(data, data + (heap->nr * func->elem_size), func->elem_size);
+ min_heapify(heap, 0, func);
+}
+
+/*
+ * Remove the minimum element and then push the given element. The
+ * implementation performs 1 sift (O(log2(nr))) and is therefore more
+ * efficient than a pop followed by a push that does 2.
+ */
+static __always_inline
+void min_heap_pop_push(struct min_heap *heap,
+ const void *element,
+ const struct min_heap_callbacks *func)
+{
+ memcpy(heap->data, element, func->elem_size);
+ min_heapify(heap, 0, func);
+}
+
+/* Push an element on to the heap, O(log2(nr)). */
+static __always_inline
+void min_heap_push(struct min_heap *heap, const void *element,
+ const struct min_heap_callbacks *func)
+{
+ void *data = heap->data;
+ void *child, *parent;
+ int pos;
+
+ if (WARN_ONCE(heap->nr >= heap->size, "Pushing on a full heap"))
+ return;
+
+ /* Place at the end of data. */
+ pos = heap->nr;
+ memcpy(data + (pos * func->elem_size), element, func->elem_size);
+ heap->nr++;
+
+ /* Sift child at pos up. */
+ for (; pos > 0; pos = (pos - 1) / 2) {
+ child = data + (pos * func->elem_size);
+ parent = data + ((pos - 1) / 2) * func->elem_size;
+ if (func->less(parent, child))
+ break;
+ func->swp(parent, child);
+ }
+}
+
+#endif /* _LINUX_MIN_HEAP_H */
diff --git a/include/linux/minmax.h b/include/linux/minmax.h
new file mode 100644
index 000000000000..396df1121bff
--- /dev/null
+++ b/include/linux/minmax.h
@@ -0,0 +1,169 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_MINMAX_H
+#define _LINUX_MINMAX_H
+
+#include <linux/const.h>
+
+/*
+ * min()/max()/clamp() macros must accomplish three things:
+ *
+ * - avoid multiple evaluations of the arguments (so side-effects like
+ * "x++" happen only once) when non-constant.
+ * - perform strict type-checking (to generate warnings instead of
+ * nasty runtime surprises). See the "unnecessary" pointer comparison
+ * in __typecheck().
+ * - retain result as a constant expressions when called with only
+ * constant expressions (to avoid tripping VLA warnings in stack
+ * allocation usage).
+ */
+#define __typecheck(x, y) \
+ (!!(sizeof((typeof(x) *)1 == (typeof(y) *)1)))
+
+#define __no_side_effects(x, y) \
+ (__is_constexpr(x) && __is_constexpr(y))
+
+#define __safe_cmp(x, y) \
+ (__typecheck(x, y) && __no_side_effects(x, y))
+
+#define __cmp(x, y, op) ((x) op (y) ? (x) : (y))
+
+#define __cmp_once(x, y, unique_x, unique_y, op) ({ \
+ typeof(x) unique_x = (x); \
+ typeof(y) unique_y = (y); \
+ __cmp(unique_x, unique_y, op); })
+
+#define __careful_cmp(x, y, op) \
+ __builtin_choose_expr(__safe_cmp(x, y), \
+ __cmp(x, y, op), \
+ __cmp_once(x, y, __UNIQUE_ID(__x), __UNIQUE_ID(__y), op))
+
+#define __clamp(val, lo, hi) \
+ ((val) >= (hi) ? (hi) : ((val) <= (lo) ? (lo) : (val)))
+
+#define __clamp_once(val, lo, hi, unique_val, unique_lo, unique_hi) ({ \
+ typeof(val) unique_val = (val); \
+ typeof(lo) unique_lo = (lo); \
+ typeof(hi) unique_hi = (hi); \
+ __clamp(unique_val, unique_lo, unique_hi); })
+
+#define __clamp_input_check(lo, hi) \
+ (BUILD_BUG_ON_ZERO(__builtin_choose_expr( \
+ __is_constexpr((lo) > (hi)), (lo) > (hi), false)))
+
+#define __careful_clamp(val, lo, hi) ({ \
+ __clamp_input_check(lo, hi) + \
+ __builtin_choose_expr(__typecheck(val, lo) && __typecheck(val, hi) && \
+ __typecheck(hi, lo) && __is_constexpr(val) && \
+ __is_constexpr(lo) && __is_constexpr(hi), \
+ __clamp(val, lo, hi), \
+ __clamp_once(val, lo, hi, __UNIQUE_ID(__val), \
+ __UNIQUE_ID(__lo), __UNIQUE_ID(__hi))); })
+
+/**
+ * min - return minimum of two values of the same or compatible types
+ * @x: first value
+ * @y: second value
+ */
+#define min(x, y) __careful_cmp(x, y, <)
+
+/**
+ * max - return maximum of two values of the same or compatible types
+ * @x: first value
+ * @y: second value
+ */
+#define max(x, y) __careful_cmp(x, y, >)
+
+/**
+ * min3 - return minimum of three values
+ * @x: first value
+ * @y: second value
+ * @z: third value
+ */
+#define min3(x, y, z) min((typeof(x))min(x, y), z)
+
+/**
+ * max3 - return maximum of three values
+ * @x: first value
+ * @y: second value
+ * @z: third value
+ */
+#define max3(x, y, z) max((typeof(x))max(x, y), z)
+
+/**
+ * min_not_zero - return the minimum that is _not_ zero, unless both are zero
+ * @x: value1
+ * @y: value2
+ */
+#define min_not_zero(x, y) ({ \
+ typeof(x) __x = (x); \
+ typeof(y) __y = (y); \
+ __x == 0 ? __y : ((__y == 0) ? __x : min(__x, __y)); })
+
+/**
+ * clamp - return a value clamped to a given range with strict typechecking
+ * @val: current value
+ * @lo: lowest allowable value
+ * @hi: highest allowable value
+ *
+ * This macro does strict typechecking of @lo/@hi to make sure they are of the
+ * same type as @val. See the unnecessary pointer comparisons.
+ */
+#define clamp(val, lo, hi) __careful_clamp(val, lo, hi)
+
+/*
+ * ..and if you can't take the strict
+ * types, you can specify one yourself.
+ *
+ * Or not use min/max/clamp at all, of course.
+ */
+
+/**
+ * min_t - return minimum of two values, using the specified type
+ * @type: data type to use
+ * @x: first value
+ * @y: second value
+ */
+#define min_t(type, x, y) __careful_cmp((type)(x), (type)(y), <)
+
+/**
+ * max_t - return maximum of two values, using the specified type
+ * @type: data type to use
+ * @x: first value
+ * @y: second value
+ */
+#define max_t(type, x, y) __careful_cmp((type)(x), (type)(y), >)
+
+/**
+ * clamp_t - return a value clamped to a given range using a given type
+ * @type: the type of variable to use
+ * @val: current value
+ * @lo: minimum allowable value
+ * @hi: maximum allowable value
+ *
+ * This macro does no typechecking and uses temporary variables of type
+ * @type to make all the comparisons.
+ */
+#define clamp_t(type, val, lo, hi) __careful_clamp((type)(val), (type)(lo), (type)(hi))
+
+/**
+ * clamp_val - return a value clamped to a given range using val's type
+ * @val: current value
+ * @lo: minimum allowable value
+ * @hi: maximum allowable value
+ *
+ * This macro does no typechecking and uses temporary variables of whatever
+ * type the input argument @val is. This is useful when @val is an unsigned
+ * type and @lo and @hi are literals that will otherwise be assigned a signed
+ * integer type.
+ */
+#define clamp_val(val, lo, hi) clamp_t(typeof(val), val, lo, hi)
+
+/**
+ * swap - swap values of @a and @b
+ * @a: first value
+ * @b: second value
+ */
+#define swap(a, b) \
+ do { typeof(a) __tmp = (a); (a) = (b); (b) = __tmp; } while (0)
+
+#endif /* _LINUX_MINMAX_H */
diff --git a/include/linux/misc_cgroup.h b/include/linux/misc_cgroup.h
new file mode 100644
index 000000000000..c238207d1615
--- /dev/null
+++ b/include/linux/misc_cgroup.h
@@ -0,0 +1,136 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Miscellaneous cgroup controller.
+ *
+ * Copyright 2020 Google LLC
+ * Author: Vipin Sharma <vipinsh@google.com>
+ */
+#ifndef _MISC_CGROUP_H_
+#define _MISC_CGROUP_H_
+
+/**
+ * Types of misc cgroup entries supported by the host.
+ */
+enum misc_res_type {
+#ifdef CONFIG_KVM_AMD_SEV
+ /* AMD SEV ASIDs resource */
+ MISC_CG_RES_SEV,
+ /* AMD SEV-ES ASIDs resource */
+ MISC_CG_RES_SEV_ES,
+#endif
+ MISC_CG_RES_TYPES
+};
+
+struct misc_cg;
+
+#ifdef CONFIG_CGROUP_MISC
+
+#include <linux/cgroup.h>
+
+/**
+ * struct misc_res: Per cgroup per misc type resource
+ * @max: Maximum limit on the resource.
+ * @usage: Current usage of the resource.
+ * @failed: True if charged failed for the resource in a cgroup.
+ */
+struct misc_res {
+ unsigned long max;
+ atomic_long_t usage;
+ atomic_long_t events;
+};
+
+/**
+ * struct misc_cg - Miscellaneous controller's cgroup structure.
+ * @css: cgroup subsys state object.
+ * @res: Array of misc resources usage in the cgroup.
+ */
+struct misc_cg {
+ struct cgroup_subsys_state css;
+
+ /* misc.events */
+ struct cgroup_file events_file;
+
+ struct misc_res res[MISC_CG_RES_TYPES];
+};
+
+unsigned long misc_cg_res_total_usage(enum misc_res_type type);
+int misc_cg_set_capacity(enum misc_res_type type, unsigned long capacity);
+int misc_cg_try_charge(enum misc_res_type type, struct misc_cg *cg,
+ unsigned long amount);
+void misc_cg_uncharge(enum misc_res_type type, struct misc_cg *cg,
+ unsigned long amount);
+
+/**
+ * css_misc() - Get misc cgroup from the css.
+ * @css: cgroup subsys state object.
+ *
+ * Context: Any context.
+ * Return:
+ * * %NULL - If @css is null.
+ * * struct misc_cg* - misc cgroup pointer of the passed css.
+ */
+static inline struct misc_cg *css_misc(struct cgroup_subsys_state *css)
+{
+ return css ? container_of(css, struct misc_cg, css) : NULL;
+}
+
+/*
+ * get_current_misc_cg() - Find and get the misc cgroup of the current task.
+ *
+ * Returned cgroup has its ref count increased by 1. Caller must call
+ * put_misc_cg() to return the reference.
+ *
+ * Return: Misc cgroup to which the current task belongs to.
+ */
+static inline struct misc_cg *get_current_misc_cg(void)
+{
+ return css_misc(task_get_css(current, misc_cgrp_id));
+}
+
+/*
+ * put_misc_cg() - Put the misc cgroup and reduce its ref count.
+ * @cg - cgroup to put.
+ */
+static inline void put_misc_cg(struct misc_cg *cg)
+{
+ if (cg)
+ css_put(&cg->css);
+}
+
+#else /* !CONFIG_CGROUP_MISC */
+
+static inline unsigned long misc_cg_res_total_usage(enum misc_res_type type)
+{
+ return 0;
+}
+
+static inline int misc_cg_set_capacity(enum misc_res_type type,
+ unsigned long capacity)
+{
+ return 0;
+}
+
+static inline int misc_cg_try_charge(enum misc_res_type type,
+ struct misc_cg *cg,
+ unsigned long amount)
+{
+ return 0;
+}
+
+static inline void misc_cg_uncharge(enum misc_res_type type,
+ struct misc_cg *cg,
+ unsigned long amount)
+{
+}
+
+static inline struct misc_cg *get_current_misc_cg(void)
+{
+ return NULL;
+}
+
+static inline void put_misc_cg(struct misc_cg *cg)
+{
+}
+
+#endif /* CONFIG_CGROUP_MISC */
+#endif /* _MISC_CGROUP_H_ */
diff --git a/include/linux/miscdevice.h b/include/linux/miscdevice.h
index 58751eae5f77..c0fea6ca5076 100644
--- a/include/linux/miscdevice.h
+++ b/include/linux/miscdevice.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_MISCDEVICE_H
#define _LINUX_MISCDEVICE_H
#include <linux/major.h>
@@ -6,9 +7,9 @@
#include <linux/device.h>
/*
- * These allocations are managed by device@lanana.org. If you use an
- * entry that is not in assigned your entry may well be moved and
- * reassigned, or set dynamic if a fixed value is not justified.
+ * These allocations are managed by device@lanana.org. If you need
+ * an entry that is not assigned here, it can be moved and
+ * reassigned or dynamically set if a fixed value is not justified.
*/
#define PSMOUSE_MINOR 1
@@ -24,18 +25,31 @@
#define TEMP_MINOR 131 /* Temperature Sensor */
#define APM_MINOR_DEV 134
#define RTC_MINOR 135
-#define EFI_RTC_MINOR 136 /* EFI Time services */
+/*#define EFI_RTC_MINOR 136 was EFI Time services */
#define VHCI_MINOR 137
#define SUN_OPENPROM_MINOR 139
#define DMAPI_MINOR 140 /* unused */
#define NVRAM_MINOR 144
+#define SBUS_FLASH_MINOR 152
#define SGI_MMTIMER 153
+#define PMU_MINOR 154
#define STORE_QUEUE_MINOR 155 /* unused */
+#define LCD_MINOR 156
+#define AC_MINOR 157
+#define BUTTON_MINOR 158 /* Major 10, Minor 158, /dev/nwbutton */
+#define NWFLASH_MINOR 160 /* MAJOR is 10 - miscdevice */
+#define ENVCTRL_MINOR 162
#define I2O_MINOR 166
+#define UCTRL_MINOR 174
+#define AGPGART_MINOR 175
+#define TOSH_MINOR_DEV 181
#define HWRNG_MINOR 183
-#define MICROCODE_MINOR 184
+/*#define MICROCODE_MINOR 184 unused */
+#define KEYPAD_MINOR 185
#define IRNET_MINOR 187
+#define D7S_MINOR 193
#define VFIO_MINOR 196
+#define PXA3XX_GCU_MINOR 197
#define TUN_MINOR 200
#define CUSE_MINOR 203
#define MWAVE_MINOR 219 /* ACP/Mwave Modem */
@@ -46,6 +60,7 @@
#define MISC_MCELOG_MINOR 227
#define HPET_MINOR 228
#define FUSE_MINOR 229
+#define SNAPSHOT_MINOR 231
#define KVM_MINOR 232
#define BTRFS_MINOR 234
#define AUTOFS_MINOR 235
@@ -55,6 +70,7 @@
#define UHID_MINOR 239
#define USERIO_MINOR 240
#define VHOST_VSOCK_MINOR 241
+#define RFKILL_MINOR 242
#define MISC_DYNAMIC_MINOR 255
struct device;
@@ -77,14 +93,14 @@ extern void misc_deregister(struct miscdevice *misc);
/*
* Helper macro for drivers that don't do anything special in the initcall.
- * This helps in eleminating of boilerplate code.
+ * This helps to eliminate boilerplate code.
*/
#define builtin_misc_device(__misc_device) \
builtin_driver(__misc_device, misc_register)
/*
* Helper macro for drivers that don't do anything special in module init / exit
- * call. This helps in eleminating of boilerplate code.
+ * call. This helps to eliminate boilerplate code.
*/
#define module_misc_device(__misc_device) \
module_driver(__misc_device, misc_register, misc_deregister)
diff --git a/include/linux/mlx4/cq.h b/include/linux/mlx4/cq.h
index 09cebe528488..653d2a0aa44c 100644
--- a/include/linux/mlx4/cq.h
+++ b/include/linux/mlx4/cq.h
@@ -130,12 +130,20 @@ enum {
MLX4_CQE_STATUS_IPOK = 1 << 12,
};
+/* L4_CSUM is logically part of status, but has to checked against badfcs_enc */
+enum {
+ MLX4_CQE_STATUS_L4_CSUM = 1 << 2,
+};
+
enum {
MLX4_CQE_LLC = 1,
MLX4_CQE_SNAP = 1 << 1,
MLX4_CQE_BAD_FCS = 1 << 4,
};
+#define MLX4_MAX_CQ_PERIOD (BIT(16) - 1)
+#define MLX4_MAX_CQ_COUNT (BIT(16) - 1)
+
static inline void mlx4_cq_arm(struct mlx4_cq *cq, u32 cmd,
void __iomem *uar_page,
spinlock_t *doorbell_lock)
diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h
index d5bed0875d30..6646634a0b9d 100644
--- a/include/linux/mlx4/device.h
+++ b/include/linux/mlx4/device.h
@@ -40,14 +40,13 @@
#include <linux/cpu_rmap.h>
#include <linux/crash_dump.h>
-#include <linux/atomic.h>
+#include <linux/refcount.h>
#include <linux/timecounter.h>
#define DEFAULT_UAR_PAGE_SHIFT 12
-#define MAX_MSIX_P_PORT 17
-#define MAX_MSIX 64
+#define MAX_MSIX 128
#define MIN_MSIX_P_PORT 5
#define MLX4_IS_LEGACY_EQ_MODE(dev_cap) ((dev_cap).num_comp_vectors < \
(dev_cap).num_ports * MIN_MSIX_P_PORT)
@@ -224,6 +223,9 @@ enum {
MLX4_DEV_CAP_FLAG2_DIAG_PER_PORT = 1ULL << 35,
MLX4_DEV_CAP_FLAG2_SVLAN_BY_QP = 1ULL << 36,
MLX4_DEV_CAP_FLAG2_SL_TO_VL_CHANGE_EVENT = 1ULL << 37,
+ MLX4_DEV_CAP_FLAG2_USER_MAC_EN = 1ULL << 38,
+ MLX4_DEV_CAP_FLAG2_DRIVER_VERSION_TO_FW = 1ULL << 39,
+ MLX4_DEV_CAP_FLAG2_SW_CQ_INIT = 1ULL << 40,
};
enum {
@@ -256,10 +258,6 @@ enum {
};
enum {
- MLX4_USER_DEV_CAP_LARGE_CQE = 1L << 0
-};
-
-enum {
MLX4_FUNC_CAP_64B_EQE_CQE = 1L << 0,
MLX4_FUNC_CAP_EQE_CQE_STRIDE = 1L << 1,
MLX4_FUNC_CAP_DMFS_A0_STATIC = 1L << 2
@@ -428,6 +426,12 @@ enum mlx4_steer_type {
MLX4_NUM_STEERS
};
+enum mlx4_resource_usage {
+ MLX4_RES_USAGE_NONE,
+ MLX4_RES_USAGE_DRIVER,
+ MLX4_RES_USAGE_USER_VERBS,
+};
+
enum {
MLX4_NUM_FEXCH = 64 * 1024,
};
@@ -518,6 +522,14 @@ struct mlx4_phys_caps {
u32 base_tunnel_sqpn;
};
+struct mlx4_spec_qps {
+ u32 qp0_qkey;
+ u32 qp0_proxy;
+ u32 qp0_tunnel;
+ u32 qp1_proxy;
+ u32 qp1_tunnel;
+};
+
struct mlx4_caps {
u64 fw_ver;
u32 function;
@@ -547,11 +559,7 @@ struct mlx4_caps {
int max_qp_init_rdma;
int max_qp_dest_rdma;
int max_tc_eth;
- u32 *qp0_qkey;
- u32 *qp0_proxy;
- u32 *qp1_proxy;
- u32 *qp0_tunnel;
- u32 *qp1_tunnel;
+ struct mlx4_spec_qps *spec_qps;
int num_srqs;
int max_srq_wqes;
int max_srq_sge;
@@ -564,7 +572,6 @@ struct mlx4_caps {
int reserved_eqs;
int num_comp_vectors;
int num_mpts;
- int max_fmr_maps;
int num_mtts;
int fmr_reserved_mtts;
int reserved_mtts;
@@ -620,7 +627,10 @@ struct mlx4_caps {
u32 dmfs_high_rate_qpn_base;
u32 dmfs_high_rate_qpn_range;
u32 vf_caps;
+ bool wol_port[MLX4_MAX_PORTS + 1];
struct mlx4_rate_limit_caps rl_caps;
+ u32 health_buffer_addrs;
+ bool map_clock_to_user;
};
struct mlx4_buf_list {
@@ -696,17 +706,6 @@ struct mlx4_mw {
int enabled;
};
-struct mlx4_fmr {
- struct mlx4_mr mr;
- struct mlx4_mpt_entry *mpt;
- __be64 *mtts;
- dma_addr_t dma_handle;
- int max_pages;
- int max_maps;
- int maps;
- u8 page_shift;
-};
-
struct mlx4_uar {
unsigned long pfn;
int index;
@@ -739,7 +738,7 @@ struct mlx4_cq {
int cqn;
unsigned vector;
- atomic_t refcount;
+ refcount_t refcount;
struct completion free;
struct {
struct list_head list;
@@ -748,6 +747,7 @@ struct mlx4_cq {
} tasklet_ctx;
int reset_notify_added;
struct list_head reset_notify;
+ u8 usage;
};
struct mlx4_qp {
@@ -755,8 +755,9 @@ struct mlx4_qp {
int qpn;
- atomic_t refcount;
+ refcount_t refcount;
struct completion free;
+ u8 usage;
};
struct mlx4_srq {
@@ -767,7 +768,7 @@ struct mlx4_srq {
int max_gs;
int wqe_shift;
- atomic_t refcount;
+ refcount_t refcount;
struct completion free;
};
@@ -840,6 +841,12 @@ struct mlx4_vf_dev {
u8 n_ports;
};
+struct mlx4_fw_crdump {
+ bool snapshot_enable;
+ struct devlink_region *region_crspace;
+ struct devlink_region *region_fw_health;
+};
+
enum mlx4_pci_status {
MLX4_PCI_STATUS_DISABLED,
MLX4_PCI_STATUS_ENABLED,
@@ -860,6 +867,7 @@ struct mlx4_dev_persistent {
u8 interface_state;
struct mutex pci_status_mutex; /* sync pci state */
enum mlx4_pci_status pci_status;
+ struct mlx4_fw_crdump crdump;
};
struct mlx4_dev {
@@ -1068,7 +1076,7 @@ static inline int mlx4_is_eth(struct mlx4_dev *dev, int port)
}
int mlx4_buf_alloc(struct mlx4_dev *dev, int size, int max_direct,
- struct mlx4_buf *buf, gfp_t gfp);
+ struct mlx4_buf *buf);
void mlx4_buf_free(struct mlx4_dev *dev, int size, struct mlx4_buf *buf);
static inline void *mlx4_buf_offset(struct mlx4_buf *buf, int offset)
{
@@ -1105,10 +1113,9 @@ int mlx4_mw_enable(struct mlx4_dev *dev, struct mlx4_mw *mw);
int mlx4_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
int start_index, int npages, u64 *page_list);
int mlx4_buf_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
- struct mlx4_buf *buf, gfp_t gfp);
+ struct mlx4_buf *buf);
-int mlx4_db_alloc(struct mlx4_dev *dev, struct mlx4_db *db, int order,
- gfp_t gfp);
+int mlx4_db_alloc(struct mlx4_dev *dev, struct mlx4_db *db, int order);
void mlx4_db_free(struct mlx4_dev *dev, struct mlx4_db *db);
int mlx4_alloc_hwq_res(struct mlx4_dev *dev, struct mlx4_hwq_resources *wqres,
@@ -1118,14 +1125,14 @@ void mlx4_free_hwq_res(struct mlx4_dev *mdev, struct mlx4_hwq_resources *wqres,
int mlx4_cq_alloc(struct mlx4_dev *dev, int nent, struct mlx4_mtt *mtt,
struct mlx4_uar *uar, u64 db_rec, struct mlx4_cq *cq,
- unsigned vector, int collapsed, int timestamp_en);
+ unsigned int vector, int collapsed, int timestamp_en,
+ void *buf_addr, bool user_cq);
void mlx4_cq_free(struct mlx4_dev *dev, struct mlx4_cq *cq);
int mlx4_qp_reserve_range(struct mlx4_dev *dev, int cnt, int align,
- int *base, u8 flags);
+ int *base, u8 flags, u8 usage);
void mlx4_qp_release_range(struct mlx4_dev *dev, int base_qpn, int cnt);
-int mlx4_qp_alloc(struct mlx4_dev *dev, int qpn, struct mlx4_qp *qp,
- gfp_t gfp);
+int mlx4_qp_alloc(struct mlx4_dev *dev, int qpn, struct mlx4_qp *qp);
void mlx4_qp_free(struct mlx4_dev *dev, struct mlx4_qp *qp);
int mlx4_srq_alloc(struct mlx4_dev *dev, u32 pdn, u32 cqn, u16 xrcdn,
@@ -1374,6 +1381,7 @@ int mlx4_get_base_qpn(struct mlx4_dev *dev, u8 port);
int __mlx4_replace_mac(struct mlx4_dev *dev, u8 port, int qpn, u64 new_mac);
int mlx4_SET_PORT_general(struct mlx4_dev *dev, u8 port, int mtu,
u8 pptx, u8 pfctx, u8 pprx, u8 pfcrx);
+int mlx4_SET_PORT_user_mac(struct mlx4_dev *dev, u8 port, u8 *user_mac);
int mlx4_SET_PORT_user_mtu(struct mlx4_dev *dev, u8 port, u16 user_mtu);
int mlx4_SET_PORT_qpn_calc(struct mlx4_dev *dev, u8 port, u32 base_qpn,
u8 promisc);
@@ -1392,14 +1400,6 @@ int mlx4_find_cached_vlan(struct mlx4_dev *dev, u8 port, u16 vid, int *idx);
int mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index);
void mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, u16 vlan);
-int mlx4_map_phys_fmr(struct mlx4_dev *dev, struct mlx4_fmr *fmr, u64 *page_list,
- int npages, u64 iova, u32 *lkey, u32 *rkey);
-int mlx4_fmr_alloc(struct mlx4_dev *dev, u32 pd, u32 access, int max_pages,
- int max_maps, u8 page_shift, struct mlx4_fmr *fmr);
-int mlx4_fmr_enable(struct mlx4_dev *dev, struct mlx4_fmr *fmr);
-void mlx4_fmr_unmap(struct mlx4_dev *dev, struct mlx4_fmr *fmr,
- u32 *lkey, u32 *rkey);
-int mlx4_fmr_free(struct mlx4_dev *dev, struct mlx4_fmr *fmr);
int mlx4_SYNC_TPT(struct mlx4_dev *dev);
int mlx4_test_interrupt(struct mlx4_dev *dev, int vector);
int mlx4_test_async(struct mlx4_dev *dev);
@@ -1419,7 +1419,7 @@ int mlx4_get_phys_port_id(struct mlx4_dev *dev);
int mlx4_wol_read(struct mlx4_dev *dev, u64 *config, int port);
int mlx4_wol_write(struct mlx4_dev *dev, u64 config, int port);
-int mlx4_counter_alloc(struct mlx4_dev *dev, u32 *idx);
+int mlx4_counter_alloc(struct mlx4_dev *dev, u32 *idx, u8 usage);
void mlx4_counter_free(struct mlx4_dev *dev, u32 idx);
int mlx4_get_default_counter_index(struct mlx4_dev *dev, int port);
@@ -1436,7 +1436,7 @@ int mlx4_map_sw_to_hw_steering_id(struct mlx4_dev *dev,
enum mlx4_net_trans_rule_id id);
int mlx4_hw_rule_sz(struct mlx4_dev *dev, enum mlx4_net_trans_rule_id id);
-int mlx4_tunnel_steer_add(struct mlx4_dev *dev, unsigned char *addr,
+int mlx4_tunnel_steer_add(struct mlx4_dev *dev, const unsigned char *addr,
int port, int qpn, u16 prio, u64 *reg_id);
void mlx4_sync_pkey_table(struct mlx4_dev *dev, int slave, int port,
@@ -1502,6 +1502,8 @@ int mlx4_vf_smi_enabled(struct mlx4_dev *dev, int slave, int port);
int mlx4_vf_get_enable_smi_admin(struct mlx4_dev *dev, int slave, int port);
int mlx4_vf_set_enable_smi_admin(struct mlx4_dev *dev, int slave, int port,
int enable);
+
+struct mlx4_mpt_entry;
int mlx4_mr_hw_get_mpt(struct mlx4_dev *dev, struct mlx4_mr *mmr,
struct mlx4_mpt_entry ***mpt_entry);
int mlx4_mr_hw_write_mpt(struct mlx4_dev *dev, struct mlx4_mr *mmr,
diff --git a/include/linux/mlx4/driver.h b/include/linux/mlx4/driver.h
index a858bcb6220b..1834c8fad12e 100644
--- a/include/linux/mlx4/driver.h
+++ b/include/linux/mlx4/driver.h
@@ -92,26 +92,4 @@ void *mlx4_get_protocol_dev(struct mlx4_dev *dev, enum mlx4_protocol proto, int
struct devlink_port *mlx4_get_devlink_port(struct mlx4_dev *dev, int port);
-static inline u64 mlx4_mac_to_u64(u8 *addr)
-{
- u64 mac = 0;
- int i;
-
- for (i = 0; i < ETH_ALEN; i++) {
- mac <<= 8;
- mac |= addr[i];
- }
- return mac;
-}
-
-static inline void mlx4_u64_to_mac(u8 *addr, u64 mac)
-{
- int i;
-
- for (i = ETH_ALEN; i > 0; i--) {
- addr[i - 1] = mac & 0xFF;
- mac >>= 8;
- }
-}
-
#endif /* MLX4_DRIVER_H */
diff --git a/include/linux/mlx4/qp.h b/include/linux/mlx4/qp.h
index 8e2828d48d7f..b9a7b1319f5d 100644
--- a/include/linux/mlx4/qp.h
+++ b/include/linux/mlx4/qp.h
@@ -362,7 +362,7 @@ struct mlx4_wqe_datagram_seg {
struct mlx4_wqe_lso_seg {
__be32 mss_hdr_size;
- __be32 header[0];
+ __be32 header[];
};
enum mlx4_wqe_bind_seg_flags2 {
@@ -446,6 +446,7 @@ enum {
struct mlx4_wqe_inline_seg {
__be32 byte_count;
+ __u8 data[];
};
enum mlx4_update_qp_attr {
@@ -503,4 +504,5 @@ static inline u16 folded_qp(u32 q)
u16 mlx4_qp_roce_entropy(struct mlx4_dev *dev, u32 qpn);
+void mlx4_put_qp(struct mlx4_qp *qp);
#endif /* MLX4_QP_H */
diff --git a/include/linux/mlx5/cmd.h b/include/linux/mlx5/cmd.h
deleted file mode 100644
index 68cd08f02c2f..000000000000
--- a/include/linux/mlx5/cmd.h
+++ /dev/null
@@ -1,51 +0,0 @@
-/*
- * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#ifndef MLX5_CMD_H
-#define MLX5_CMD_H
-
-#include <linux/types.h>
-
-struct manage_pages_layout {
- u64 ptr;
- u32 reserved;
- u16 num_entries;
- u16 func_id;
-};
-
-
-struct mlx5_cmd_alloc_uar_imm_out {
- u32 rsvd[3];
- u32 uarn;
-};
-
-#endif /* MLX5_CMD_H */
diff --git a/include/linux/mlx5/cq.h b/include/linux/mlx5/cq.h
index 95898847c7d4..cb15308b5cb0 100644
--- a/include/linux/mlx5/cq.h
+++ b/include/linux/mlx5/cq.h
@@ -33,9 +33,8 @@
#ifndef MLX5_CORE_CQ_H
#define MLX5_CORE_CQ_H
-#include <rdma/ib_verbs.h>
#include <linux/mlx5/driver.h>
-
+#include <linux/refcount.h>
struct mlx5_core_cq {
u32 cqn;
@@ -43,11 +42,11 @@ struct mlx5_core_cq {
__be32 *set_ci_db;
__be32 *arm_db;
struct mlx5_uars_page *uar;
- atomic_t refcount;
+ refcount_t refcount;
struct completion free;
unsigned vector;
unsigned int irqn;
- void (*comp) (struct mlx5_core_cq *);
+ void (*comp)(struct mlx5_core_cq *cq, struct mlx5_eqe *eqe);
void (*event) (struct mlx5_core_cq *, enum mlx5_event);
u32 cons_index;
unsigned arm_sn;
@@ -55,11 +54,13 @@ struct mlx5_core_cq {
int pid;
struct {
struct list_head list;
- void (*comp)(struct mlx5_core_cq *);
+ void (*comp)(struct mlx5_core_cq *cq, struct mlx5_eqe *eqe);
void *priv;
} tasklet_ctx;
int reset_notify_added;
struct list_head reset_notify;
+ struct mlx5_eq_comp *eq;
+ u16 uid;
};
@@ -123,13 +124,18 @@ struct mlx5_cq_modify_params {
};
enum {
- CQE_SIZE_64 = 0,
- CQE_SIZE_128 = 1,
+ CQE_STRIDE_64 = 0,
+ CQE_STRIDE_128 = 1,
+ CQE_STRIDE_128_PAD = 2,
};
-static inline int cqe_sz_to_mlx_sz(u8 size)
+#define MLX5_MAX_CQ_PERIOD (BIT(__mlx5_bit_sz(cqc, cq_period)) - 1)
+#define MLX5_MAX_CQ_COUNT (BIT(__mlx5_bit_sz(cqc, cq_max_count)) - 1)
+
+static inline int cqe_sz_to_mlx_sz(u8 size, int padding_128_en)
{
- return size == 64 ? CQE_SIZE_64 : CQE_SIZE_128;
+ return padding_128_en ? CQE_STRIDE_128_PAD :
+ size == 64 ? CQE_STRIDE_64 : CQE_STRIDE_128;
}
static inline void mlx5_cq_set_ci(struct mlx5_core_cq *cq)
@@ -163,21 +169,38 @@ static inline void mlx5_cq_arm(struct mlx5_core_cq *cq, u32 cmd,
doorbell[0] = cpu_to_be32(sn << 28 | cmd | ci);
doorbell[1] = cpu_to_be32(cq->cqn);
- mlx5_write64(doorbell, uar_page + MLX5_CQ_DOORBELL, NULL);
+ mlx5_write64(doorbell, uar_page + MLX5_CQ_DOORBELL);
+}
+
+static inline void mlx5_cq_hold(struct mlx5_core_cq *cq)
+{
+ refcount_inc(&cq->refcount);
}
-int mlx5_init_cq_table(struct mlx5_core_dev *dev);
-void mlx5_cleanup_cq_table(struct mlx5_core_dev *dev);
+static inline void mlx5_cq_put(struct mlx5_core_cq *cq)
+{
+ if (refcount_dec_and_test(&cq->refcount))
+ complete(&cq->free);
+}
+
+int mlx5_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
+ u32 *in, int inlen, u32 *out, int outlen);
int mlx5_core_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
- u32 *in, int inlen);
+ u32 *in, int inlen, u32 *out, int outlen);
int mlx5_core_destroy_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq);
int mlx5_core_query_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
- u32 *out, int outlen);
+ u32 *out);
int mlx5_core_modify_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
u32 *in, int inlen);
int mlx5_core_modify_cq_moderation(struct mlx5_core_dev *dev,
struct mlx5_core_cq *cq, u16 cq_period,
u16 cq_max_count);
+static inline void mlx5_dump_err_cqe(struct mlx5_core_dev *dev,
+ struct mlx5_err_cqe *err_cqe)
+{
+ print_hex_dump(KERN_WARNING, "", DUMP_PREFIX_OFFSET, 16, 1, err_cqe,
+ sizeof(*err_cqe), false);
+}
int mlx5_debug_cq_add(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq);
void mlx5_debug_cq_remove(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq);
diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h
index f31a0b5377e1..c0af74efd3cb 100644
--- a/include/linux/mlx5/device.h
+++ b/include/linux/mlx5/device.h
@@ -36,6 +36,7 @@
#include <linux/types.h>
#include <rdma/ib_verbs.h>
#include <linux/mlx5/mlx5_ifc.h>
+#include <linux/bitfield.h>
#if defined(__LITTLE_ENDIAN)
#define MLX5_SET_HOST_ENDIANNESS 0
@@ -48,12 +49,16 @@
/* helper macros */
#define __mlx5_nullp(typ) ((struct mlx5_ifc_##typ##_bits *)0)
#define __mlx5_bit_sz(typ, fld) sizeof(__mlx5_nullp(typ)->fld)
-#define __mlx5_bit_off(typ, fld) ((unsigned)(unsigned long)(&(__mlx5_nullp(typ)->fld)))
+#define __mlx5_bit_off(typ, fld) (offsetof(struct mlx5_ifc_##typ##_bits, fld))
+#define __mlx5_16_off(typ, fld) (__mlx5_bit_off(typ, fld) / 16)
#define __mlx5_dw_off(typ, fld) (__mlx5_bit_off(typ, fld) / 32)
#define __mlx5_64_off(typ, fld) (__mlx5_bit_off(typ, fld) / 64)
+#define __mlx5_16_bit_off(typ, fld) (16 - __mlx5_bit_sz(typ, fld) - (__mlx5_bit_off(typ, fld) & 0xf))
#define __mlx5_dw_bit_off(typ, fld) (32 - __mlx5_bit_sz(typ, fld) - (__mlx5_bit_off(typ, fld) & 0x1f))
#define __mlx5_mask(typ, fld) ((u32)((1ull << __mlx5_bit_sz(typ, fld)) - 1))
#define __mlx5_dw_mask(typ, fld) (__mlx5_mask(typ, fld) << __mlx5_dw_bit_off(typ, fld))
+#define __mlx5_mask16(typ, fld) ((u16)((1ull << __mlx5_bit_sz(typ, fld)) - 1))
+#define __mlx5_16_mask(typ, fld) (__mlx5_mask16(typ, fld) << __mlx5_16_bit_off(typ, fld))
#define __mlx5_st_sz_bits(typ) sizeof(struct mlx5_ifc_##typ##_bits)
#define MLX5_FLD_SZ_BYTES(typ, fld) (__mlx5_bit_sz(typ, fld) / 8)
@@ -63,7 +68,7 @@
#define MLX5_UN_SZ_BYTES(typ) (sizeof(union mlx5_ifc_##typ##_bits) / 8)
#define MLX5_UN_SZ_DW(typ) (sizeof(union mlx5_ifc_##typ##_bits) / 32)
#define MLX5_BYTE_OFF(typ, fld) (__mlx5_bit_off(typ, fld) / 8)
-#define MLX5_ADDR_OF(typ, p, fld) ((char *)(p) + MLX5_BYTE_OFF(typ, fld))
+#define MLX5_ADDR_OF(typ, p, fld) ((void *)((uint8_t *)(p) + MLX5_BYTE_OFF(typ, fld)))
/* insert a value to a struct */
#define MLX5_SET(typ, p, fld, v) do { \
@@ -75,6 +80,11 @@
<< __mlx5_dw_bit_off(typ, fld))); \
} while (0)
+#define MLX5_ARRAY_SET(typ, p, fld, idx, v) do { \
+ BUILD_BUG_ON(__mlx5_bit_off(typ, fld) % 32); \
+ MLX5_SET(typ, p, fld[idx], v); \
+} while (0)
+
#define MLX5_SET_TO_ONES(typ, p, fld) do { \
BUILD_BUG_ON(__mlx5_st_sz_bits(typ) % 32); \
*((__be32 *)(p) + __mlx5_dw_off(typ, fld)) = \
@@ -116,6 +126,19 @@ __mlx5_mask(typ, fld))
___t; \
})
+#define MLX5_GET16(typ, p, fld) ((be16_to_cpu(*((__be16 *)(p) +\
+__mlx5_16_off(typ, fld))) >> __mlx5_16_bit_off(typ, fld)) & \
+__mlx5_mask16(typ, fld))
+
+#define MLX5_SET16(typ, p, fld, v) do { \
+ u16 _v = v; \
+ BUILD_BUG_ON(__mlx5_st_sz_bits(typ) % 16); \
+ *((__be16 *)(p) + __mlx5_16_off(typ, fld)) = \
+ cpu_to_be16((be16_to_cpu(*((__be16 *)(p) + __mlx5_16_off(typ, fld))) & \
+ (~__mlx5_16_mask(typ, fld))) | (((_v) & __mlx5_mask16(typ, fld)) \
+ << __mlx5_16_bit_off(typ, fld))); \
+} while (0)
+
/* Big endian getters */
#define MLX5_GET64_BE(typ, p, fld) (*((__be64 *)(p) +\
__mlx5_64_off(typ, fld)))
@@ -190,6 +213,13 @@ enum {
MLX5_PFAULT_SUBTYPE_RDMA = 1,
};
+enum wqe_page_fault_type {
+ MLX5_WQE_PF_TYPE_RMP = 0,
+ MLX5_WQE_PF_TYPE_REQ_SEND_OR_WRITE = 1,
+ MLX5_WQE_PF_TYPE_RESP = 2,
+ MLX5_WQE_PF_TYPE_REQ_READ_OR_ATOMIC = 3,
+};
+
enum {
MLX5_PERM_LOCAL_READ = 1 << 2,
MLX5_PERM_LOCAL_WRITE = 1 << 3,
@@ -227,6 +257,8 @@ enum {
MLX5_NON_FP_BFREGS_PER_UAR,
MLX5_UARS_IN_PAGE = PAGE_SIZE / MLX5_ADAPTER_PAGE_SIZE,
MLX5_NON_FP_BFREGS_IN_PAGE = MLX5_NON_FP_BFREGS_PER_UAR * MLX5_UARS_IN_PAGE,
+ MLX5_MIN_DYN_BFREGS = 512,
+ MLX5_MAX_DYN_BFREGS = 1024,
};
enum {
@@ -245,7 +277,9 @@ enum {
MLX5_MKEY_MASK_RW = 1ull << 20,
MLX5_MKEY_MASK_A = 1ull << 21,
MLX5_MKEY_MASK_SMALL_FENCE = 1ull << 23,
- MLX5_MKEY_MASK_FREE = 1ull << 29,
+ MLX5_MKEY_MASK_RELAXED_ORDERING_WRITE = 1ull << 25,
+ MLX5_MKEY_MASK_FREE = 1ull << 29,
+ MLX5_MKEY_MASK_RELAXED_ORDERING_READ = 1ull << 47,
};
enum {
@@ -257,9 +291,9 @@ enum {
MLX5_UMR_INLINE = (1 << 7),
};
-#define MLX5_UMR_MTT_ALIGNMENT 0x40
-#define MLX5_UMR_MTT_MASK (MLX5_UMR_MTT_ALIGNMENT - 1)
-#define MLX5_UMR_MTT_MIN_CHUNK_SIZE MLX5_UMR_MTT_ALIGNMENT
+#define MLX5_UMR_FLEX_ALIGNMENT 0x40
+#define MLX5_UMR_MTT_NUM_ENTRIES_ALIGNMENT (MLX5_UMR_FLEX_ALIGNMENT / sizeof(struct mlx5_mtt))
+#define MLX5_UMR_KLM_NUM_ENTRIES_ALIGNMENT (MLX5_UMR_FLEX_ALIGNMENT / sizeof(struct mlx5_klm))
#define MLX5_USER_INDEX_LEN (MLX5_FLD_SZ_BYTES(qpc, user_index) * 8)
@@ -267,11 +301,18 @@ enum {
MLX5_EVENT_QUEUE_TYPE_QP = 0,
MLX5_EVENT_QUEUE_TYPE_RQ = 1,
MLX5_EVENT_QUEUE_TYPE_SQ = 2,
+ MLX5_EVENT_QUEUE_TYPE_DCT = 6,
};
+/* mlx5 components can subscribe to any one of these events via
+ * mlx5_eq_notifier_register API.
+ */
enum mlx5_event {
+ /* Special value to subscribe to any event */
+ MLX5_EVENT_TYPE_NOTIFY_ANY = 0x0,
+ /* HW events enum start: comp events are not subscribable */
MLX5_EVENT_TYPE_COMP = 0x0,
-
+ /* HW Async events enum start: subscribable events */
MLX5_EVENT_TYPE_PATH_MIG = 0x01,
MLX5_EVENT_TYPE_COMM_EST = 0x02,
MLX5_EVENT_TYPE_SQ_DRAINED = 0x03,
@@ -284,12 +325,17 @@ enum mlx5_event {
MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR = 0x10,
MLX5_EVENT_TYPE_WQ_ACCESS_ERROR = 0x11,
MLX5_EVENT_TYPE_SRQ_CATAS_ERROR = 0x12,
+ MLX5_EVENT_TYPE_OBJECT_CHANGE = 0x27,
MLX5_EVENT_TYPE_INTERNAL_ERROR = 0x08,
MLX5_EVENT_TYPE_PORT_CHANGE = 0x09,
MLX5_EVENT_TYPE_GPIO_EVENT = 0x15,
MLX5_EVENT_TYPE_PORT_MODULE_EVENT = 0x16,
+ MLX5_EVENT_TYPE_TEMP_WARN_EVENT = 0x17,
+ MLX5_EVENT_TYPE_XRQ_ERROR = 0x18,
MLX5_EVENT_TYPE_REMOTE_CONFIG = 0x19,
+ MLX5_EVENT_TYPE_GENERAL_EVENT = 0x22,
+ MLX5_EVENT_TYPE_MONITOR_COUNTER = 0x24,
MLX5_EVENT_TYPE_PPS_EVENT = 0x25,
MLX5_EVENT_TYPE_DB_BF_CONGESTION = 0x1a,
@@ -301,7 +347,36 @@ enum mlx5_event {
MLX5_EVENT_TYPE_PAGE_FAULT = 0xc,
MLX5_EVENT_TYPE_NIC_VPORT_CHANGE = 0xd,
+ MLX5_EVENT_TYPE_ESW_FUNCTIONS_CHANGED = 0xe,
+ MLX5_EVENT_TYPE_VHCA_STATE_CHANGE = 0xf,
+
+ MLX5_EVENT_TYPE_DCT_DRAINED = 0x1c,
+ MLX5_EVENT_TYPE_DCT_KEY_VIOLATION = 0x1d,
+
MLX5_EVENT_TYPE_FPGA_ERROR = 0x20,
+ MLX5_EVENT_TYPE_FPGA_QP_ERROR = 0x21,
+
+ MLX5_EVENT_TYPE_DEVICE_TRACER = 0x26,
+
+ MLX5_EVENT_TYPE_MAX = 0x100,
+};
+
+enum mlx5_driver_event {
+ MLX5_DRIVER_EVENT_TYPE_TRAP = 0,
+ MLX5_DRIVER_EVENT_UPLINK_NETDEV,
+};
+
+enum {
+ MLX5_TRACER_SUBTYPE_OWNERSHIP_CHANGE = 0x0,
+ MLX5_TRACER_SUBTYPE_TRACES_AVAILABLE = 0x1,
+ MLX5_TRACER_SUBTYPE_STRINGS_DB_UPDATE = 0x2,
+};
+
+enum {
+ MLX5_GENERAL_SUBTYPE_DELAY_DROP_TIMEOUT = 0x1,
+ MLX5_GENERAL_SUBTYPE_PCI_POWER_CHANGE_EVENT = 0x5,
+ MLX5_GENERAL_SUBTYPE_FW_LIVE_PATCH_EVENT = 0x7,
+ MLX5_GENERAL_SUBTYPE_PCI_SYNC_FOR_FW_UPDATE_EVENT = 0x8,
};
enum {
@@ -315,21 +390,6 @@ enum {
};
enum {
- MLX5_DEV_CAP_FLAG_XRC = 1LL << 3,
- MLX5_DEV_CAP_FLAG_BAD_PKEY_CNTR = 1LL << 8,
- MLX5_DEV_CAP_FLAG_BAD_QKEY_CNTR = 1LL << 9,
- MLX5_DEV_CAP_FLAG_APM = 1LL << 17,
- MLX5_DEV_CAP_FLAG_ATOMIC = 1LL << 18,
- MLX5_DEV_CAP_FLAG_BLOCK_MCAST = 1LL << 23,
- MLX5_DEV_CAP_FLAG_ON_DMND_PG = 1LL << 24,
- MLX5_DEV_CAP_FLAG_CQ_MODER = 1LL << 29,
- MLX5_DEV_CAP_FLAG_RESIZE_CQ = 1LL << 30,
- MLX5_DEV_CAP_FLAG_DCT = 1LL << 37,
- MLX5_DEV_CAP_FLAG_SIG_HAND_OVER = 1LL << 40,
- MLX5_DEV_CAP_FLAG_CMDIF_CSUM = 3LL << 46,
-};
-
-enum {
MLX5_ROCE_VERSION_1 = 0,
MLX5_ROCE_VERSION_2 = 2,
};
@@ -364,6 +424,7 @@ enum {
MLX5_OPCODE_ATOMIC_MASKED_FA = 0x15,
MLX5_OPCODE_BIND_MW = 0x18,
MLX5_OPCODE_CONFIG_CMD = 0x1f,
+ MLX5_OPCODE_ENHANCED_MPSW = 0x29,
MLX5_RECV_OPCODE_RDMA_WRITE_IMM = 0x00,
MLX5_RECV_OPCODE_SEND = 0x01,
@@ -376,11 +437,34 @@ enum {
MLX5_OPCODE_SET_PSV = 0x20,
MLX5_OPCODE_GET_PSV = 0x21,
MLX5_OPCODE_CHECK_PSV = 0x22,
+ MLX5_OPCODE_DUMP = 0x23,
MLX5_OPCODE_RGET_PSV = 0x26,
MLX5_OPCODE_RCHECK_PSV = 0x27,
MLX5_OPCODE_UMR = 0x25,
+ MLX5_OPCODE_FLOW_TBL_ACCESS = 0x2c,
+
+ MLX5_OPCODE_ACCESS_ASO = 0x2d,
+};
+
+enum {
+ MLX5_OPC_MOD_TLS_TIS_STATIC_PARAMS = 0x1,
+ MLX5_OPC_MOD_TLS_TIR_STATIC_PARAMS = 0x2,
+};
+
+enum {
+ MLX5_OPC_MOD_TLS_TIS_PROGRESS_PARAMS = 0x1,
+ MLX5_OPC_MOD_TLS_TIR_PROGRESS_PARAMS = 0x2,
+};
+
+struct mlx5_wqe_tls_static_params_seg {
+ u8 ctx[MLX5_ST_SZ_BYTES(tls_static_params)];
+};
+
+struct mlx5_wqe_tls_progress_params_seg {
+ __be32 tis_tir_num;
+ u8 ctx[MLX5_ST_SZ_BYTES(tls_progress_params)];
};
enum {
@@ -403,10 +487,6 @@ enum {
};
enum {
- MLX5_CAP_OFF_CMDIF_CSUM = 46,
-};
-
-enum {
/*
* Max wqe size for rdma read is 512 bytes, so this
* limits our max_sge_rd as the wqe needs to fit:
@@ -449,20 +529,34 @@ struct mlx5_cmd_layout {
u8 status_own;
};
+enum mlx5_rfr_severity_bit_offsets {
+ MLX5_RFR_BIT_OFFSET = 0x7,
+};
+
struct health_buffer {
- __be32 assert_var[5];
- __be32 rsvd0[3];
+ __be32 assert_var[6];
+ __be32 rsvd0[2];
__be32 assert_exit_ptr;
__be32 assert_callra;
- __be32 rsvd1[2];
+ __be32 rsvd1[1];
+ __be32 time;
__be32 fw_ver;
__be32 hw_id;
- __be32 rsvd2;
+ u8 rfr_severity;
+ u8 rsvd2[3];
u8 irisc_index;
u8 synd;
__be16 ext_synd;
};
+enum mlx5_initializing_bit_offsets {
+ MLX5_FW_RESET_SUPPORTED_OFFSET = 30,
+};
+
+enum mlx5_cmd_addr_l_sz_offset {
+ MLX5_NIC_IFC_OFFSET = 8,
+};
+
struct mlx5_init_seg {
__be32 fw_rev;
__be32 cmdif_rev_fw_sub;
@@ -473,12 +567,17 @@ struct mlx5_init_seg {
__be32 rsvd1[120];
__be32 initializing;
struct health_buffer health;
- __be32 rsvd2[880];
+ __be32 rsvd2[878];
+ __be32 cmd_exec_to;
+ __be32 cmd_q_init_to;
__be32 internal_timer_h;
__be32 internal_timer_l;
__be32 rsvd3[2];
__be32 health_counter;
- __be32 rsvd4[1019];
+ __be32 rsvd4[11];
+ __be32 real_time_h;
+ __be32 real_time_l;
+ __be32 rsvd5[1006];
__be64 ieee1588_clk;
__be32 ieee1588_clk_type;
__be32 clr_intx;
@@ -502,6 +601,12 @@ struct mlx5_eqe_cq_err {
u8 syndrome;
};
+struct mlx5_eqe_xrq_err {
+ __be32 reserved1[5];
+ __be32 type_xrqn;
+ __be32 reserved2;
+};
+
struct mlx5_eqe_port_state {
u8 reserved0[8];
u8 port;
@@ -529,7 +634,7 @@ struct mlx5_eqe_cmd {
};
struct mlx5_eqe_page_req {
- u8 rsvd0[2];
+ __be16 ec_function;
__be16 func_id;
__be32 num_pages;
__be32 rsvd1[5];
@@ -589,6 +694,40 @@ struct mlx5_eqe_pps {
u8 rsvd2[12];
} __packed;
+struct mlx5_eqe_dct {
+ __be32 reserved[6];
+ __be32 dctn;
+};
+
+struct mlx5_eqe_temp_warning {
+ __be64 sensor_warning_msb;
+ __be64 sensor_warning_lsb;
+} __packed;
+
+struct mlx5_eqe_obj_change {
+ u8 rsvd0[2];
+ __be16 obj_type;
+ __be32 obj_id;
+} __packed;
+
+#define SYNC_RST_STATE_MASK 0xf
+
+enum sync_rst_state_type {
+ MLX5_SYNC_RST_STATE_RESET_REQUEST = 0x0,
+ MLX5_SYNC_RST_STATE_RESET_NOW = 0x1,
+ MLX5_SYNC_RST_STATE_RESET_ABORT = 0x2,
+};
+
+struct mlx5_eqe_sync_fw_update {
+ u8 reserved_at_0[3];
+ u8 sync_rst_state;
+};
+
+struct mlx5_eqe_vhca_state {
+ __be16 ec_function;
+ __be16 function_id;
+} __packed;
+
union ev_data {
__be32 raw[7];
struct mlx5_eqe_cmd cmd;
@@ -604,6 +743,12 @@ union ev_data {
struct mlx5_eqe_vport_change vport_change;
struct mlx5_eqe_port_module port_module;
struct mlx5_eqe_pps pps;
+ struct mlx5_eqe_dct dct;
+ struct mlx5_eqe_temp_warning temp_warning;
+ struct mlx5_eqe_xrq_err xrq_err;
+ struct mlx5_eqe_sync_fw_update sync_fw_update;
+ struct mlx5_eqe_vhca_state vhca_state;
+ struct mlx5_eqe_obj_change obj_change;
} __packed;
struct mlx5_eqe {
@@ -646,13 +791,26 @@ struct mlx5_err_cqe {
};
struct mlx5_cqe64 {
- u8 outer_l3_tunneled;
+ u8 tls_outer_l3_tunneled;
u8 rsvd0;
__be16 wqe_id;
- u8 lro_tcppsh_abort_dupack;
- u8 lro_min_ttl;
- __be16 lro_tcp_win;
- __be32 lro_ack_seq_num;
+ union {
+ struct {
+ u8 tcppsh_abort_dupack;
+ u8 min_ttl;
+ __be16 tcp_win;
+ __be32 ack_seq_num;
+ } lro;
+ struct {
+ u8 reserved0:1;
+ u8 match:1;
+ u8 flush:1;
+ u8 reserved3:5;
+ u8 header_size;
+ __be16 header_entry_index;
+ __be32 data_offset;
+ } shampo;
+ };
__be32 rss_hash_result;
u8 rss_hash_type;
u8 ml_path;
@@ -664,14 +822,22 @@ struct mlx5_cqe64 {
u8 l4_l3_hdr_type;
__be16 vlan_info;
__be32 srqn; /* [31:24]: lro_num_seg, [23:0]: srqn */
- __be32 imm_inval_pkey;
+ union {
+ __be32 immediate;
+ __be32 inval_rkey;
+ __be32 pkey;
+ __be32 ft_metadata;
+ };
u8 rsvd40[4];
__be32 byte_cnt;
__be32 timestamp_h;
__be32 timestamp_l;
__be32 sop_drop_qpn;
__be16 wqe_counter;
- u8 signature;
+ union {
+ u8 signature;
+ u8 validity_iteration_count;
+ };
u8 op_own;
};
@@ -680,7 +846,7 @@ struct mlx5_mini_cqe8 {
__be32 rx_hash_result;
struct {
__be16 checksum;
- __be16 rsvd;
+ __be16 stridx;
};
struct {
__be16 wqe_counter;
@@ -700,18 +866,35 @@ enum {
enum {
MLX5_CQE_FORMAT_CSUM = 0x1,
+ MLX5_CQE_FORMAT_CSUM_STRIDX = 0x3,
+};
+
+enum {
+ MLX5_CQE_COMPRESS_LAYOUT_BASIC = 0,
+ MLX5_CQE_COMPRESS_LAYOUT_ENHANCED = 1,
};
#define MLX5_MINI_CQE_ARRAY_SIZE 8
-static inline int mlx5_get_cqe_format(struct mlx5_cqe64 *cqe)
+static inline u8 mlx5_get_cqe_format(struct mlx5_cqe64 *cqe)
{
return (cqe->op_own >> 2) & 0x3;
}
-static inline int get_cqe_lro_tcppsh(struct mlx5_cqe64 *cqe)
+static inline u8 get_cqe_opcode(struct mlx5_cqe64 *cqe)
+{
+ return cqe->op_own >> 4;
+}
+
+static inline u8 get_cqe_enhanced_num_mini_cqes(struct mlx5_cqe64 *cqe)
+{
+ /* num_of_mini_cqes is zero based */
+ return get_cqe_opcode(cqe) + 1;
+}
+
+static inline u8 get_cqe_lro_tcppsh(struct mlx5_cqe64 *cqe)
{
- return (cqe->lro_tcppsh_abort_dupack >> 6) & 1;
+ return (cqe->lro.tcppsh_abort_dupack >> 6) & 1;
}
static inline u8 get_cqe_l4_hdr_type(struct mlx5_cqe64 *cqe)
@@ -719,19 +902,19 @@ static inline u8 get_cqe_l4_hdr_type(struct mlx5_cqe64 *cqe)
return (cqe->l4_l3_hdr_type >> 4) & 0x7;
}
-static inline u8 get_cqe_l3_hdr_type(struct mlx5_cqe64 *cqe)
+static inline bool cqe_is_tunneled(struct mlx5_cqe64 *cqe)
{
- return (cqe->l4_l3_hdr_type >> 2) & 0x3;
+ return cqe->tls_outer_l3_tunneled & 0x1;
}
-static inline u8 cqe_is_tunneled(struct mlx5_cqe64 *cqe)
+static inline u8 get_cqe_tls_offload(struct mlx5_cqe64 *cqe)
{
- return cqe->outer_l3_tunneled & 0x1;
+ return (cqe->tls_outer_l3_tunneled >> 3) & 0x3;
}
-static inline int cqe_has_vlan(struct mlx5_cqe64 *cqe)
+static inline bool cqe_has_vlan(struct mlx5_cqe64 *cqe)
{
- return !!(cqe->l4_l3_hdr_type & 0x1);
+ return cqe->l4_l3_hdr_type & 0x1;
}
static inline u64 get_cqe_ts(struct mlx5_cqe64 *cqe)
@@ -744,6 +927,17 @@ static inline u64 get_cqe_ts(struct mlx5_cqe64 *cqe)
return (u64)lo | ((u64)hi << 32);
}
+static inline u16 get_cqe_flow_tag(struct mlx5_cqe64 *cqe)
+{
+ return be32_to_cpu(cqe->sop_drop_qpn) & 0xFFF;
+}
+
+#define MLX5_MPWQE_LOG_NUM_STRIDES_EXT_BASE 3
+#define MLX5_MPWQE_LOG_NUM_STRIDES_BASE 9
+#define MLX5_MPWQE_LOG_NUM_STRIDES_MAX 16
+#define MLX5_MPWQE_LOG_STRIDE_SZ_BASE 6
+#define MLX5_MPWQE_LOG_STRIDE_SZ_MAX 13
+
struct mpwrq_cqe_bc {
__be16 filler_consumed_strides;
__be16 byte_cnt;
@@ -789,14 +983,23 @@ enum {
};
enum {
- CQE_RSS_HTYPE_IP = 0x3 << 2,
+ CQE_RSS_HTYPE_IP = GENMASK(3, 2),
/* cqe->rss_hash_type[3:2] - IP destination selected for hash
* (00 = none, 01 = IPv4, 10 = IPv6, 11 = Reserved)
*/
- CQE_RSS_HTYPE_L4 = 0x3 << 6,
+ CQE_RSS_IP_NONE = 0x0,
+ CQE_RSS_IPV4 = 0x1,
+ CQE_RSS_IPV6 = 0x2,
+ CQE_RSS_RESERVED = 0x3,
+
+ CQE_RSS_HTYPE_L4 = GENMASK(7, 6),
/* cqe->rss_hash_type[7:6] - L4 destination selected for hash
* (00 = none, 01 = TCP. 10 = UDP, 11 = IPSEC.SPI
*/
+ CQE_RSS_L4_NONE = 0x0,
+ CQE_RSS_L4_TCP = 0x1,
+ CQE_RSS_L4_UDP = 0x2,
+ CQE_RSS_L4_IPSEC = 0x3,
};
enum {
@@ -811,6 +1014,13 @@ enum {
CQE_L4_OK = 1 << 2,
};
+enum {
+ CQE_TLS_OFFLOAD_NOT_DECRYPTED = 0x0,
+ CQE_TLS_OFFLOAD_DECRYPTED = 0x1,
+ CQE_TLS_OFFLOAD_RESYNC = 0x2,
+ CQE_TLS_OFFLOAD_ERROR = 0x3,
+};
+
struct mlx5_sig_err_cqe {
u8 rsvd0[16];
__be32 expected_trans_sig;
@@ -853,13 +1063,12 @@ enum {
MLX5_MKEY_REMOTE_INVAL = 1 << 24,
MLX5_MKEY_FLAG_SYNC_UMR = 1 << 29,
MLX5_MKEY_BSF_EN = 1 << 30,
- MLX5_MKEY_LEN64 = 1 << 31,
};
struct mlx5_mkey_seg {
/* This is a two bit field occupying bits 31-30.
* bit 31 is always 0,
- * bit 30 is zero for regular MRs and 1 (e.g free) for UMRs that do not have tanslation
+ * bit 30 is zero for regular MRs and 1 (e.g free) for UMRs that do not have translation
*/
u8 status;
u8 pcie_control;
@@ -890,9 +1099,14 @@ enum {
};
enum {
- MLX5_ESW_VPORT_ADMIN_STATE_DOWN = 0x0,
- MLX5_ESW_VPORT_ADMIN_STATE_UP = 0x1,
- MLX5_ESW_VPORT_ADMIN_STATE_AUTO = 0x2,
+ MLX5_VPORT_ADMIN_STATE_DOWN = 0x0,
+ MLX5_VPORT_ADMIN_STATE_UP = 0x1,
+ MLX5_VPORT_ADMIN_STATE_AUTO = 0x2,
+};
+
+enum {
+ MLX5_VPORT_CVLAN_INSERT_WHEN_NO_CVLAN = 0x1,
+ MLX5_VPORT_CVLAN_INSERT_ALWAYS = 0x3,
};
enum {
@@ -917,7 +1131,10 @@ enum {
MLX5_MATCH_OUTER_HEADERS = 1 << 0,
MLX5_MATCH_MISC_PARAMETERS = 1 << 1,
MLX5_MATCH_INNER_HEADERS = 1 << 2,
-
+ MLX5_MATCH_MISC_PARAMETERS_2 = 1 << 3,
+ MLX5_MATCH_MISC_PARAMETERS_3 = 1 << 4,
+ MLX5_MATCH_MISC_PARAMETERS_4 = 1 << 5,
+ MLX5_MATCH_MISC_PARAMETERS_5 = 1 << 6,
};
enum {
@@ -953,6 +1170,21 @@ enum mlx5_wol_mode {
MLX5_WOL_PHY_ACTIVITY = 1 << 7,
};
+enum mlx5_mpls_supported_fields {
+ MLX5_FIELD_SUPPORT_MPLS_LABEL = 1 << 0,
+ MLX5_FIELD_SUPPORT_MPLS_EXP = 1 << 1,
+ MLX5_FIELD_SUPPORT_MPLS_S_BOS = 1 << 2,
+ MLX5_FIELD_SUPPORT_MPLS_TTL = 1 << 3
+};
+
+enum mlx5_flex_parser_protos {
+ MLX5_FLEX_PROTO_GENEVE = 1 << 3,
+ MLX5_FLEX_PROTO_CW_MPLS_GRE = 1 << 4,
+ MLX5_FLEX_PROTO_CW_MPLS_UDP = 1 << 5,
+ MLX5_FLEX_PROTO_ICMP = 1 << 8,
+ MLX5_FLEX_PROTO_ICMPV6 = 1 << 9,
+};
+
/* MLX5 DEV CAPs */
/* TODO: EAT.ME */
@@ -961,6 +1193,9 @@ enum mlx5_cap_mode {
HCA_CAP_OPMOD_GET_CUR = 1,
};
+/* Any new cap addition must update mlx5_hca_caps_alloc() to allocate
+ * capability memory.
+ */
enum mlx5_cap_type {
MLX5_CAP_GENERAL = 0,
MLX5_CAP_ETHERNET_OFFLOADS,
@@ -968,14 +1203,27 @@ enum mlx5_cap_type {
MLX5_CAP_ATOMIC,
MLX5_CAP_ROCE,
MLX5_CAP_IPOIB_OFFLOADS,
- MLX5_CAP_EOIB_OFFLOADS,
+ MLX5_CAP_IPOIB_ENHANCED_OFFLOADS,
MLX5_CAP_FLOW_TABLE,
MLX5_CAP_ESWITCH_FLOW_TABLE,
MLX5_CAP_ESWITCH,
MLX5_CAP_RESERVED,
MLX5_CAP_VECTOR_CALC,
MLX5_CAP_QOS,
- MLX5_CAP_FPGA,
+ MLX5_CAP_DEBUG,
+ MLX5_CAP_RESERVED_14,
+ MLX5_CAP_DEV_MEM,
+ MLX5_CAP_RESERVED_16,
+ MLX5_CAP_TLS,
+ MLX5_CAP_VDPA_EMULATION = 0x13,
+ MLX5_CAP_DEV_EVENT = 0x14,
+ MLX5_CAP_IPSEC,
+ MLX5_CAP_CRYPTO = 0x1a,
+ MLX5_CAP_DEV_SHAMPO = 0x1d,
+ MLX5_CAP_MACSEC = 0x1f,
+ MLX5_CAP_GENERAL_2 = 0x20,
+ MLX5_CAP_PORT_SELECTION = 0x25,
+ MLX5_CAP_ADV_VIRTUALIZATION = 0x26,
/* NUM OF CAP Types */
MLX5_CAP_NUM
};
@@ -990,44 +1238,74 @@ enum mlx5_pcam_feature_groups {
enum mlx5_mcam_reg_groups {
MLX5_MCAM_REGS_FIRST_128 = 0x0,
+ MLX5_MCAM_REGS_0x9080_0x90FF = 0x1,
+ MLX5_MCAM_REGS_0x9100_0x917F = 0x2,
+ MLX5_MCAM_REGS_NUM = 0x3,
};
enum mlx5_mcam_feature_groups {
MLX5_MCAM_FEATURE_ENHANCED_FEATURES = 0x0,
};
+enum mlx5_qcam_reg_groups {
+ MLX5_QCAM_REGS_FIRST_128 = 0x0,
+};
+
+enum mlx5_qcam_feature_groups {
+ MLX5_QCAM_FEATURE_ENHANCED_FEATURES = 0x0,
+};
+
/* GET Dev Caps macros */
#define MLX5_CAP_GEN(mdev, cap) \
- MLX5_GET(cmd_hca_cap, mdev->caps.hca_cur[MLX5_CAP_GENERAL], cap)
+ MLX5_GET(cmd_hca_cap, mdev->caps.hca[MLX5_CAP_GENERAL]->cur, cap)
+
+#define MLX5_CAP_GEN_64(mdev, cap) \
+ MLX5_GET64(cmd_hca_cap, mdev->caps.hca[MLX5_CAP_GENERAL]->cur, cap)
#define MLX5_CAP_GEN_MAX(mdev, cap) \
- MLX5_GET(cmd_hca_cap, mdev->caps.hca_max[MLX5_CAP_GENERAL], cap)
+ MLX5_GET(cmd_hca_cap, mdev->caps.hca[MLX5_CAP_GENERAL]->max, cap)
+
+#define MLX5_CAP_GEN_2(mdev, cap) \
+ MLX5_GET(cmd_hca_cap_2, mdev->caps.hca[MLX5_CAP_GENERAL_2]->cur, cap)
+
+#define MLX5_CAP_GEN_2_64(mdev, cap) \
+ MLX5_GET64(cmd_hca_cap_2, mdev->caps.hca[MLX5_CAP_GENERAL_2]->cur, cap)
+
+#define MLX5_CAP_GEN_2_MAX(mdev, cap) \
+ MLX5_GET(cmd_hca_cap_2, mdev->caps.hca[MLX5_CAP_GENERAL_2]->max, cap)
#define MLX5_CAP_ETH(mdev, cap) \
MLX5_GET(per_protocol_networking_offload_caps,\
- mdev->caps.hca_cur[MLX5_CAP_ETHERNET_OFFLOADS], cap)
+ mdev->caps.hca[MLX5_CAP_ETHERNET_OFFLOADS]->cur, cap)
#define MLX5_CAP_ETH_MAX(mdev, cap) \
MLX5_GET(per_protocol_networking_offload_caps,\
- mdev->caps.hca_max[MLX5_CAP_ETHERNET_OFFLOADS], cap)
+ mdev->caps.hca[MLX5_CAP_ETHERNET_OFFLOADS]->max, cap)
+
+#define MLX5_CAP_IPOIB_ENHANCED(mdev, cap) \
+ MLX5_GET(per_protocol_networking_offload_caps,\
+ mdev->caps.hca[MLX5_CAP_IPOIB_ENHANCED_OFFLOADS]->cur, cap)
#define MLX5_CAP_ROCE(mdev, cap) \
- MLX5_GET(roce_cap, mdev->caps.hca_cur[MLX5_CAP_ROCE], cap)
+ MLX5_GET(roce_cap, mdev->caps.hca[MLX5_CAP_ROCE]->cur, cap)
#define MLX5_CAP_ROCE_MAX(mdev, cap) \
- MLX5_GET(roce_cap, mdev->caps.hca_max[MLX5_CAP_ROCE], cap)
+ MLX5_GET(roce_cap, mdev->caps.hca[MLX5_CAP_ROCE]->max, cap)
#define MLX5_CAP_ATOMIC(mdev, cap) \
- MLX5_GET(atomic_caps, mdev->caps.hca_cur[MLX5_CAP_ATOMIC], cap)
+ MLX5_GET(atomic_caps, mdev->caps.hca[MLX5_CAP_ATOMIC]->cur, cap)
#define MLX5_CAP_ATOMIC_MAX(mdev, cap) \
- MLX5_GET(atomic_caps, mdev->caps.hca_max[MLX5_CAP_ATOMIC], cap)
+ MLX5_GET(atomic_caps, mdev->caps.hca[MLX5_CAP_ATOMIC]->max, cap)
#define MLX5_CAP_FLOWTABLE(mdev, cap) \
- MLX5_GET(flow_table_nic_cap, mdev->caps.hca_cur[MLX5_CAP_FLOW_TABLE], cap)
+ MLX5_GET(flow_table_nic_cap, mdev->caps.hca[MLX5_CAP_FLOW_TABLE]->cur, cap)
+
+#define MLX5_CAP64_FLOWTABLE(mdev, cap) \
+ MLX5_GET64(flow_table_nic_cap, (mdev)->caps.hca[MLX5_CAP_FLOW_TABLE]->cur, cap)
#define MLX5_CAP_FLOWTABLE_MAX(mdev, cap) \
- MLX5_GET(flow_table_nic_cap, mdev->caps.hca_max[MLX5_CAP_FLOW_TABLE], cap)
+ MLX5_GET(flow_table_nic_cap, mdev->caps.hca[MLX5_CAP_FLOW_TABLE]->max, cap)
#define MLX5_CAP_FLOWTABLE_NIC_RX(mdev, cap) \
MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_receive.cap)
@@ -1035,6 +1313,12 @@ enum mlx5_mcam_feature_groups {
#define MLX5_CAP_FLOWTABLE_NIC_RX_MAX(mdev, cap) \
MLX5_CAP_FLOWTABLE_MAX(mdev, flow_table_properties_nic_receive.cap)
+#define MLX5_CAP_FLOWTABLE_NIC_TX(mdev, cap) \
+ MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_transmit.cap)
+
+#define MLX5_CAP_FLOWTABLE_NIC_TX_MAX(mdev, cap) \
+ MLX5_CAP_FLOWTABLE_MAX(mdev, flow_table_properties_nic_transmit.cap)
+
#define MLX5_CAP_FLOWTABLE_SNIFFER_RX(mdev, cap) \
MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_receive_sniffer.cap)
@@ -1047,13 +1331,25 @@ enum mlx5_mcam_feature_groups {
#define MLX5_CAP_FLOWTABLE_SNIFFER_TX_MAX(mdev, cap) \
MLX5_CAP_FLOWTABLE_MAX(mdev, flow_table_properties_nic_transmit_sniffer.cap)
+#define MLX5_CAP_FLOWTABLE_RDMA_RX(mdev, cap) \
+ MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_receive_rdma.cap)
+
+#define MLX5_CAP_FLOWTABLE_RDMA_RX_MAX(mdev, cap) \
+ MLX5_CAP_FLOWTABLE_MAX(mdev, flow_table_properties_nic_receive_rdma.cap)
+
+#define MLX5_CAP_FLOWTABLE_RDMA_TX(mdev, cap) \
+ MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_transmit_rdma.cap)
+
+#define MLX5_CAP_FLOWTABLE_RDMA_TX_MAX(mdev, cap) \
+ MLX5_CAP_FLOWTABLE_MAX(mdev, flow_table_properties_nic_transmit_rdma.cap)
+
#define MLX5_CAP_ESW_FLOWTABLE(mdev, cap) \
MLX5_GET(flow_table_eswitch_cap, \
- mdev->caps.hca_cur[MLX5_CAP_ESWITCH_FLOW_TABLE], cap)
+ mdev->caps.hca[MLX5_CAP_ESWITCH_FLOW_TABLE]->cur, cap)
#define MLX5_CAP_ESW_FLOWTABLE_MAX(mdev, cap) \
MLX5_GET(flow_table_eswitch_cap, \
- mdev->caps.hca_max[MLX5_CAP_ESWITCH_FLOW_TABLE], cap)
+ mdev->caps.hca[MLX5_CAP_ESWITCH_FLOW_TABLE]->max, cap)
#define MLX5_CAP_ESW_FLOWTABLE_FDB(mdev, cap) \
MLX5_CAP_ESW_FLOWTABLE(mdev, flow_table_properties_nic_esw_fdb.cap)
@@ -1073,38 +1369,126 @@ enum mlx5_mcam_feature_groups {
#define MLX5_CAP_ESW_INGRESS_ACL_MAX(mdev, cap) \
MLX5_CAP_ESW_FLOWTABLE_MAX(mdev, flow_table_properties_esw_acl_ingress.cap)
+#define MLX5_CAP_ESW_FT_FIELD_SUPPORT_2(mdev, cap) \
+ MLX5_CAP_ESW_FLOWTABLE(mdev, ft_field_support_2_esw_fdb.cap)
+
+#define MLX5_CAP_ESW_FT_FIELD_SUPPORT_2_MAX(mdev, cap) \
+ MLX5_CAP_ESW_FLOWTABLE_MAX(mdev, ft_field_support_2_esw_fdb.cap)
+
#define MLX5_CAP_ESW(mdev, cap) \
MLX5_GET(e_switch_cap, \
- mdev->caps.hca_cur[MLX5_CAP_ESWITCH], cap)
+ mdev->caps.hca[MLX5_CAP_ESWITCH]->cur, cap)
+
+#define MLX5_CAP64_ESW_FLOWTABLE(mdev, cap) \
+ MLX5_GET64(flow_table_eswitch_cap, \
+ (mdev)->caps.hca[MLX5_CAP_ESWITCH_FLOW_TABLE]->cur, cap)
#define MLX5_CAP_ESW_MAX(mdev, cap) \
MLX5_GET(e_switch_cap, \
- mdev->caps.hca_max[MLX5_CAP_ESWITCH], cap)
+ mdev->caps.hca[MLX5_CAP_ESWITCH]->max, cap)
+
+#define MLX5_CAP_PORT_SELECTION(mdev, cap) \
+ MLX5_GET(port_selection_cap, \
+ mdev->caps.hca[MLX5_CAP_PORT_SELECTION]->cur, cap)
+
+#define MLX5_CAP_PORT_SELECTION_MAX(mdev, cap) \
+ MLX5_GET(port_selection_cap, \
+ mdev->caps.hca[MLX5_CAP_PORT_SELECTION]->max, cap)
+
+#define MLX5_CAP_ADV_VIRTUALIZATION(mdev, cap) \
+ MLX5_GET(adv_virtualization_cap, \
+ mdev->caps.hca[MLX5_CAP_ADV_VIRTUALIZATION]->cur, cap)
+
+#define MLX5_CAP_ADV_VIRTUALIZATION_MAX(mdev, cap) \
+ MLX5_GET(adv_virtualization_cap, \
+ mdev->caps.hca[MLX5_CAP_ADV_VIRTUALIZATION]->max, cap)
+
+#define MLX5_CAP_FLOWTABLE_PORT_SELECTION(mdev, cap) \
+ MLX5_CAP_PORT_SELECTION(mdev, flow_table_properties_port_selection.cap)
+
+#define MLX5_CAP_FLOWTABLE_PORT_SELECTION_MAX(mdev, cap) \
+ MLX5_CAP_PORT_SELECTION_MAX(mdev, flow_table_properties_port_selection.cap)
#define MLX5_CAP_ODP(mdev, cap)\
- MLX5_GET(odp_cap, mdev->caps.hca_cur[MLX5_CAP_ODP], cap)
+ MLX5_GET(odp_cap, mdev->caps.hca[MLX5_CAP_ODP]->cur, cap)
+
+#define MLX5_CAP_ODP_MAX(mdev, cap)\
+ MLX5_GET(odp_cap, mdev->caps.hca[MLX5_CAP_ODP]->max, cap)
#define MLX5_CAP_VECTOR_CALC(mdev, cap) \
MLX5_GET(vector_calc_cap, \
- mdev->caps.hca_cur[MLX5_CAP_VECTOR_CALC], cap)
+ mdev->caps.hca[MLX5_CAP_VECTOR_CALC]->cur, cap)
#define MLX5_CAP_QOS(mdev, cap)\
- MLX5_GET(qos_cap, mdev->caps.hca_cur[MLX5_CAP_QOS], cap)
+ MLX5_GET(qos_cap, mdev->caps.hca[MLX5_CAP_QOS]->cur, cap)
+
+#define MLX5_CAP_DEBUG(mdev, cap)\
+ MLX5_GET(debug_cap, mdev->caps.hca[MLX5_CAP_DEBUG]->cur, cap)
#define MLX5_CAP_PCAM_FEATURE(mdev, fld) \
MLX5_GET(pcam_reg, (mdev)->caps.pcam, feature_cap_mask.enhanced_features.fld)
+#define MLX5_CAP_PCAM_REG(mdev, reg) \
+ MLX5_GET(pcam_reg, (mdev)->caps.pcam, port_access_reg_cap_mask.regs_5000_to_507f.reg)
+
#define MLX5_CAP_MCAM_REG(mdev, reg) \
- MLX5_GET(mcam_reg, (mdev)->caps.mcam, mng_access_reg_cap_mask.access_regs.reg)
+ MLX5_GET(mcam_reg, (mdev)->caps.mcam[MLX5_MCAM_REGS_FIRST_128], \
+ mng_access_reg_cap_mask.access_regs.reg)
+
+#define MLX5_CAP_MCAM_REG1(mdev, reg) \
+ MLX5_GET(mcam_reg, (mdev)->caps.mcam[MLX5_MCAM_REGS_0x9080_0x90FF], \
+ mng_access_reg_cap_mask.access_regs1.reg)
+
+#define MLX5_CAP_MCAM_REG2(mdev, reg) \
+ MLX5_GET(mcam_reg, (mdev)->caps.mcam[MLX5_MCAM_REGS_0x9100_0x917F], \
+ mng_access_reg_cap_mask.access_regs2.reg)
#define MLX5_CAP_MCAM_FEATURE(mdev, fld) \
MLX5_GET(mcam_reg, (mdev)->caps.mcam, mng_feature_cap_mask.enhanced_features.fld)
+#define MLX5_CAP_QCAM_REG(mdev, fld) \
+ MLX5_GET(qcam_reg, (mdev)->caps.qcam, qos_access_reg_cap_mask.reg_cap.fld)
+
+#define MLX5_CAP_QCAM_FEATURE(mdev, fld) \
+ MLX5_GET(qcam_reg, (mdev)->caps.qcam, qos_feature_cap_mask.feature_cap.fld)
+
#define MLX5_CAP_FPGA(mdev, cap) \
- MLX5_GET(fpga_cap, (mdev)->caps.hca_cur[MLX5_CAP_FPGA], cap)
+ MLX5_GET(fpga_cap, (mdev)->caps.fpga, cap)
#define MLX5_CAP64_FPGA(mdev, cap) \
- MLX5_GET64(fpga_cap, (mdev)->caps.hca_cur[MLX5_CAP_FPGA], cap)
+ MLX5_GET64(fpga_cap, (mdev)->caps.fpga, cap)
+
+#define MLX5_CAP_DEV_MEM(mdev, cap)\
+ MLX5_GET(device_mem_cap, mdev->caps.hca[MLX5_CAP_DEV_MEM]->cur, cap)
+
+#define MLX5_CAP64_DEV_MEM(mdev, cap)\
+ MLX5_GET64(device_mem_cap, mdev->caps.hca[MLX5_CAP_DEV_MEM]->cur, cap)
+
+#define MLX5_CAP_TLS(mdev, cap) \
+ MLX5_GET(tls_cap, (mdev)->caps.hca[MLX5_CAP_TLS]->cur, cap)
+
+#define MLX5_CAP_DEV_EVENT(mdev, cap)\
+ MLX5_ADDR_OF(device_event_cap, (mdev)->caps.hca[MLX5_CAP_DEV_EVENT]->cur, cap)
+
+#define MLX5_CAP_DEV_VDPA_EMULATION(mdev, cap)\
+ MLX5_GET(virtio_emulation_cap, \
+ (mdev)->caps.hca[MLX5_CAP_VDPA_EMULATION]->cur, cap)
+
+#define MLX5_CAP64_DEV_VDPA_EMULATION(mdev, cap)\
+ MLX5_GET64(virtio_emulation_cap, \
+ (mdev)->caps.hca[MLX5_CAP_VDPA_EMULATION]->cur, cap)
+
+#define MLX5_CAP_IPSEC(mdev, cap)\
+ MLX5_GET(ipsec_cap, (mdev)->caps.hca[MLX5_CAP_IPSEC]->cur, cap)
+
+#define MLX5_CAP_CRYPTO(mdev, cap)\
+ MLX5_GET(crypto_cap, (mdev)->caps.hca[MLX5_CAP_CRYPTO]->cur, cap)
+
+#define MLX5_CAP_DEV_SHAMPO(mdev, cap)\
+ MLX5_GET(shampo_cap, mdev->caps.hca_cur[MLX5_CAP_DEV_SHAMPO], cap)
+
+#define MLX5_CAP_MACSEC(mdev, cap)\
+ MLX5_GET(macsec_cap, (mdev)->caps.hca[MLX5_CAP_MACSEC]->cur, cap)
enum {
MLX5_CMD_STAT_OK = 0x0,
@@ -1134,6 +1518,7 @@ enum {
MLX5_PER_PRIORITY_COUNTERS_GROUP = 0x10,
MLX5_PER_TRAFFIC_CLASS_COUNTERS_GROUP = 0x11,
MLX5_PHYSICAL_LAYER_COUNTERS_GROUP = 0x12,
+ MLX5_PER_TRAFFIC_CLASS_CONGESTION_GROUP = 0x13,
MLX5_PHYSICAL_LAYER_STATISTICAL_GROUP = 0x16,
MLX5_INFINIBAND_PORT_COUNTERS_GROUP = 0x20,
};
@@ -1149,8 +1534,10 @@ static inline u16 mlx5_to_sw_pkey_sz(int pkey_sz)
return MLX5_MIN_PKEY_TABLE_SIZE << pkey_sz;
}
-#define MLX5_BY_PASS_NUM_REGULAR_PRIOS 8
-#define MLX5_BY_PASS_NUM_DONT_TRAP_PRIOS 8
+#define MLX5_RDMA_RX_NUM_COUNTERS_PRIOS 2
+#define MLX5_RDMA_TX_NUM_COUNTERS_PRIOS 1
+#define MLX5_BY_PASS_NUM_REGULAR_PRIOS 16
+#define MLX5_BY_PASS_NUM_DONT_TRAP_PRIOS 16
#define MLX5_BY_PASS_NUM_MULTICAST_PRIOS 1
#define MLX5_BY_PASS_NUM_PRIOS (MLX5_BY_PASS_NUM_REGULAR_PRIOS +\
MLX5_BY_PASS_NUM_DONT_TRAP_PRIOS +\
diff --git a/include/linux/mlx5/doorbell.h b/include/linux/mlx5/doorbell.h
index 0787de28f2fc..5c267707e1df 100644
--- a/include/linux/mlx5/doorbell.h
+++ b/include/linux/mlx5/doorbell.h
@@ -36,46 +36,25 @@
#define MLX5_BF_OFFSET 0x800
#define MLX5_CQ_DOORBELL 0x20
-#if BITS_PER_LONG == 64
/* Assume that we can just write a 64-bit doorbell atomically. s390
* actually doesn't have writeq() but S/390 systems don't even have
* PCI so we won't worry about it.
+ *
+ * Note that the write is not atomic on 32-bit systems! In contrast to 64-bit
+ * ones, it requires proper locking. mlx5_write64 doesn't do any locking, so use
+ * it at your own discretion, protected by some kind of lock on 32 bits.
+ *
+ * TODO: use write{q,l}_relaxed()
*/
-#define MLX5_DECLARE_DOORBELL_LOCK(name)
-#define MLX5_INIT_DOORBELL_LOCK(ptr) do { } while (0)
-#define MLX5_GET_DOORBELL_LOCK(ptr) (NULL)
-
-static inline void mlx5_write64(__be32 val[2], void __iomem *dest,
- spinlock_t *doorbell_lock)
+static inline void mlx5_write64(__be32 val[2], void __iomem *dest)
{
+#if BITS_PER_LONG == 64
__raw_writeq(*(u64 *)val, dest);
-}
-
#else
-
-/* Just fall back to a spinlock to protect the doorbell if
- * BITS_PER_LONG is 32 -- there's no portable way to do atomic 64-bit
- * MMIO writes.
- */
-
-#define MLX5_DECLARE_DOORBELL_LOCK(name) spinlock_t name;
-#define MLX5_INIT_DOORBELL_LOCK(ptr) spin_lock_init(ptr)
-#define MLX5_GET_DOORBELL_LOCK(ptr) (ptr)
-
-static inline void mlx5_write64(__be32 val[2], void __iomem *dest,
- spinlock_t *doorbell_lock)
-{
- unsigned long flags;
-
- if (doorbell_lock)
- spin_lock_irqsave(doorbell_lock, flags);
__raw_writel((__force u32) val[0], dest);
__raw_writel((__force u32) val[1], dest + 4);
- if (doorbell_lock)
- spin_unlock_irqrestore(doorbell_lock, flags);
-}
-
#endif
+}
#endif /* MLX5_DOORBELL_H */
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
index df6ce59a1f95..a4c4f737f9c1 100644
--- a/include/linux/mlx5/driver.h
+++ b/include/linux/mlx5/driver.h
@@ -36,30 +36,37 @@
#include <linux/kernel.h>
#include <linux/completion.h>
#include <linux/pci.h>
+#include <linux/irq.h>
#include <linux/spinlock_types.h>
#include <linux/semaphore.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
-#include <linux/radix-tree.h>
+#include <linux/xarray.h>
#include <linux/workqueue.h>
#include <linux/mempool.h>
#include <linux/interrupt.h>
#include <linux/idr.h>
+#include <linux/notifier.h>
+#include <linux/refcount.h>
+#include <linux/auxiliary_bus.h>
+#include <linux/mutex.h>
#include <linux/mlx5/device.h>
#include <linux/mlx5/doorbell.h>
-#include <linux/mlx5/srq.h>
+#include <linux/mlx5/eq.h>
+#include <linux/timecounter.h>
+#include <linux/ptp_clock_kernel.h>
+#include <net/devlink.h>
+
+#define MLX5_ADEV_NAME "mlx5_core"
+
+#define MLX5_IRQ_EQ_CTRL (U8_MAX)
enum {
MLX5_BOARD_ID_LEN = 64,
- MLX5_MAX_NAME_LEN = 16,
};
enum {
- /* one minute for the sake of bringup. Generally, commands must always
- * complete and we may need to increase this timeout value
- */
- MLX5_CMD_TIMEOUT_MSEC = 60 * 1000,
MLX5_CMD_WQ_MAX_NAME = 32,
};
@@ -78,65 +85,84 @@ enum mlx5_sqp_t {
};
enum {
- MLX5_MAX_PORTS = 2,
-};
-
-enum {
- MLX5_EQ_VEC_PAGES = 0,
- MLX5_EQ_VEC_CMD = 1,
- MLX5_EQ_VEC_ASYNC = 2,
- MLX5_EQ_VEC_PFAULT = 3,
- MLX5_EQ_VEC_COMP_BASE,
-};
-
-enum {
- MLX5_MAX_IRQ_NAME = 32
+ MLX5_MAX_PORTS = 4,
};
enum {
- MLX5_ATOMIC_MODE_IB_COMP = 1 << 16,
- MLX5_ATOMIC_MODE_CX = 2 << 16,
- MLX5_ATOMIC_MODE_8B = 3 << 16,
- MLX5_ATOMIC_MODE_16B = 4 << 16,
- MLX5_ATOMIC_MODE_32B = 5 << 16,
- MLX5_ATOMIC_MODE_64B = 6 << 16,
- MLX5_ATOMIC_MODE_128B = 7 << 16,
- MLX5_ATOMIC_MODE_256B = 8 << 16,
+ MLX5_ATOMIC_MODE_OFFSET = 16,
+ MLX5_ATOMIC_MODE_IB_COMP = 1,
+ MLX5_ATOMIC_MODE_CX = 2,
+ MLX5_ATOMIC_MODE_8B = 3,
+ MLX5_ATOMIC_MODE_16B = 4,
+ MLX5_ATOMIC_MODE_32B = 5,
+ MLX5_ATOMIC_MODE_64B = 6,
+ MLX5_ATOMIC_MODE_128B = 7,
+ MLX5_ATOMIC_MODE_256B = 8,
};
enum {
+ MLX5_REG_SBPR = 0xb001,
+ MLX5_REG_SBCM = 0xb002,
+ MLX5_REG_QPTS = 0x4002,
MLX5_REG_QETCR = 0x4005,
MLX5_REG_QTCT = 0x400a,
+ MLX5_REG_QPDPM = 0x4013,
+ MLX5_REG_QCAM = 0x4019,
MLX5_REG_DCBX_PARAM = 0x4020,
MLX5_REG_DCBX_APP = 0x4021,
MLX5_REG_FPGA_CAP = 0x4022,
MLX5_REG_FPGA_CTRL = 0x4023,
MLX5_REG_FPGA_ACCESS_REG = 0x4024,
+ MLX5_REG_CORE_DUMP = 0x402e,
MLX5_REG_PCAP = 0x5001,
MLX5_REG_PMTU = 0x5003,
MLX5_REG_PTYS = 0x5004,
MLX5_REG_PAOS = 0x5006,
MLX5_REG_PFCC = 0x5007,
MLX5_REG_PPCNT = 0x5008,
+ MLX5_REG_PPTB = 0x500b,
+ MLX5_REG_PBMC = 0x500c,
MLX5_REG_PMAOS = 0x5012,
MLX5_REG_PUDE = 0x5009,
MLX5_REG_PMPE = 0x5010,
MLX5_REG_PELC = 0x500e,
MLX5_REG_PVLC = 0x500f,
MLX5_REG_PCMR = 0x5041,
+ MLX5_REG_PDDR = 0x5031,
MLX5_REG_PMLP = 0x5002,
+ MLX5_REG_PPLM = 0x5023,
MLX5_REG_PCAM = 0x507f,
MLX5_REG_NODE_DESC = 0x6001,
MLX5_REG_HOST_ENDIANNESS = 0x7004,
+ MLX5_REG_MTMP = 0x900A,
MLX5_REG_MCIA = 0x9014,
+ MLX5_REG_MFRL = 0x9028,
MLX5_REG_MLCR = 0x902b,
+ MLX5_REG_MRTC = 0x902d,
+ MLX5_REG_MTRC_CAP = 0x9040,
+ MLX5_REG_MTRC_CONF = 0x9041,
+ MLX5_REG_MTRC_STDB = 0x9042,
+ MLX5_REG_MTRC_CTRL = 0x9043,
+ MLX5_REG_MPEIN = 0x9050,
MLX5_REG_MPCNT = 0x9051,
MLX5_REG_MTPPS = 0x9053,
MLX5_REG_MTPPSE = 0x9054,
+ MLX5_REG_MTUTC = 0x9055,
+ MLX5_REG_MPEGC = 0x9056,
+ MLX5_REG_MCQS = 0x9060,
MLX5_REG_MCQI = 0x9061,
MLX5_REG_MCC = 0x9062,
MLX5_REG_MCDA = 0x9063,
MLX5_REG_MCAM = 0x907f,
+ MLX5_REG_MIRC = 0x9162,
+ MLX5_REG_SBCAM = 0xB01F,
+ MLX5_REG_RESOURCE_DUMP = 0xC000,
+ MLX5_REG_DTOR = 0xC00E,
+};
+
+enum mlx5_qpts_trust_state {
+ MLX5_QPTS_TRUST_PCP = 1,
+ MLX5_QPTS_TRUST_DSCP = 2,
};
enum mlx5_dcbx_oper_mode {
@@ -147,6 +173,8 @@ enum mlx5_dcbx_oper_mode {
enum {
MLX5_ATOMIC_OPS_CMP_SWAP = 1 << 0,
MLX5_ATOMIC_OPS_FETCH_ADD = 1 << 1,
+ MLX5_ATOMIC_OPS_EXTENDED_CMP_SWAP = 1 << 2,
+ MLX5_ATOMIC_OPS_EXTENDED_FETCH_ADD = 1 << 3,
};
enum mlx5_page_fault_resume_flags {
@@ -162,8 +190,20 @@ enum dbg_rsc_type {
MLX5_DBG_RSC_CQ,
};
+enum port_state_policy {
+ MLX5_POLICY_DOWN = 0,
+ MLX5_POLICY_UP = 1,
+ MLX5_POLICY_FOLLOW = 2,
+ MLX5_POLICY_INVALID = 0xffffffff
+};
+
+enum mlx5_coredev_type {
+ MLX5_COREDEV_PF,
+ MLX5_COREDEV_VF,
+ MLX5_COREDEV_SF,
+};
+
struct mlx5_field_desc {
- struct dentry *dent;
int i;
};
@@ -172,19 +212,13 @@ struct mlx5_rsc_debug {
void *object;
enum dbg_rsc_type type;
struct dentry *root;
- struct mlx5_field_desc fields[0];
+ struct mlx5_field_desc fields[];
};
enum mlx5_dev_event {
- MLX5_DEV_EVENT_SYS_ERROR,
- MLX5_DEV_EVENT_PORT_UP,
- MLX5_DEV_EVENT_PORT_DOWN,
- MLX5_DEV_EVENT_PORT_INITIALIZED,
- MLX5_DEV_EVENT_LID_CHANGE,
- MLX5_DEV_EVENT_PKEY_CHANGE,
- MLX5_DEV_EVENT_GUID_CHANGE,
- MLX5_DEV_EVENT_CLIENT_REREG,
- MLX5_DEV_EVENT_PPS,
+ MLX5_DEV_EVENT_SYS_ERROR = 128, /* 0 - 127 are FW events */
+ MLX5_DEV_EVENT_PORT_AFFINITY = 129,
+ MLX5_DEV_EVENT_MULTIPORT_ESW = 130,
};
enum mlx5_port_status {
@@ -192,26 +226,10 @@ enum mlx5_port_status {
MLX5_PORT_DOWN = 2,
};
-enum mlx5_eq_type {
- MLX5_EQ_TYPE_COMP,
- MLX5_EQ_TYPE_ASYNC,
-#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
- MLX5_EQ_TYPE_PF,
-#endif
-};
-
-struct mlx5_bfreg_info {
- u32 *sys_pages;
- int num_low_latency_bfregs;
- unsigned int *count;
-
- /*
- * protect bfreg allocation data structs
- */
- struct mutex lock;
- u32 ver;
- bool lib_uar_4k;
- u32 num_sys_pages;
+enum mlx5_cmdif_state {
+ MLX5_CMDIF_STATE_UNINITIALIZED,
+ MLX5_CMDIF_STATE_UP,
+ MLX5_CMDIF_STATE_DOWN,
};
struct mlx5_cmd_first {
@@ -228,11 +246,6 @@ struct mlx5_cmd_msg {
struct mlx5_cmd_debug {
struct dentry *dbg_root;
- struct dentry *dbg_in;
- struct dentry *dbg_out;
- struct dentry *dbg_outlen;
- struct dentry *dbg_status;
- struct dentry *dbg_run;
void *in_msg;
void *out_msg;
u8 status;
@@ -256,14 +269,25 @@ enum {
struct mlx5_cmd_stats {
u64 sum;
u64 n;
+ /* number of times command failed */
+ u64 failed;
+ /* number of times command failed on bad status returned by FW */
+ u64 failed_mbox_status;
+ /* last command failed returned errno */
+ u32 last_failed_errno;
+ /* last bad status returned by FW */
+ u8 last_failed_mbox_status;
+ /* last command failed syndrome returned by FW */
+ u32 last_failed_syndrome;
struct dentry *root;
- struct dentry *avg;
- struct dentry *count;
/* protect command average calculations */
spinlock_t lock;
};
struct mlx5_cmd {
+ struct mlx5_nb nb;
+
+ enum mlx5_cmdif_state state;
void *cmd_alloc_buf;
dma_addr_t alloc_dma;
int alloc_size;
@@ -289,22 +313,17 @@ struct mlx5_cmd {
struct workqueue_struct *wq;
struct semaphore sem;
struct semaphore pages_sem;
+ struct semaphore throttle_sem;
int mode;
+ u16 allowed_opcode;
struct mlx5_cmd_work_ent *ent_arr[MLX5_MAX_COMMANDS];
- struct pci_pool *pool;
+ struct dma_pool *pool;
struct mlx5_cmd_debug dbg;
struct cmd_msg_cache cache[MLX5_NUM_COMMAND_CACHES];
int checksum_disabled;
struct mlx5_cmd_stats stats[MLX5_CMD_OP_MAX];
};
-struct mlx5_port_caps {
- int gid_table_len;
- int pkey_table_len;
- u8 ext_port_cap;
- bool has_smi;
-};
-
struct mlx5_cmd_mailbox {
void *buf;
dma_addr_t dma;
@@ -316,13 +335,6 @@ struct mlx5_buf_list {
dma_addr_t map;
};
-struct mlx5_buf {
- struct mlx5_buf_list direct;
- int npages;
- int size;
- u8 page_shift;
-};
-
struct mlx5_frag_buf {
struct mlx5_buf_list *frags;
int npages;
@@ -330,42 +342,14 @@ struct mlx5_frag_buf {
u8 page_shift;
};
-struct mlx5_eq_tasklet {
- struct list_head list;
- struct list_head process_list;
- struct tasklet_struct task;
- /* lock on completion tasklet list */
- spinlock_t lock;
-};
-
-struct mlx5_eq_pagefault {
- struct work_struct work;
- /* Pagefaults lock */
- spinlock_t lock;
- struct workqueue_struct *wq;
- mempool_t *pool;
-};
-
-struct mlx5_eq {
- struct mlx5_core_dev *dev;
- __be32 __iomem *doorbell;
- u32 cons_index;
- struct mlx5_buf buf;
- int size;
- unsigned int irqn;
- u8 eqn;
- int nent;
- u64 mask;
- struct list_head list;
- int index;
- struct mlx5_rsc_debug *dbg;
- enum mlx5_eq_type type;
- union {
- struct mlx5_eq_tasklet tasklet_ctx;
-#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
- struct mlx5_eq_pagefault pf_ctx;
-#endif
- };
+struct mlx5_frag_buf_ctrl {
+ struct mlx5_buf_list *frags;
+ u32 sz_m1;
+ u16 frag_sz_m1;
+ u16 strides_offset;
+ u8 log_sz;
+ u8 log_stride;
+ u8 log_frag_strides;
};
struct mlx5_core_psv {
@@ -389,19 +373,6 @@ struct mlx5_core_sig_ctx {
u32 sigerr_count;
};
-enum {
- MLX5_MKEY_MR = 1,
- MLX5_MKEY_MW,
-};
-
-struct mlx5_core_mkey {
- u64 iova;
- u64 size;
- u32 key;
- u32 pd;
- u32 type;
-};
-
#define MLX5_24BIT_MASK ((1 << 24) - 1)
enum mlx5_res_type {
@@ -410,43 +381,16 @@ enum mlx5_res_type {
MLX5_RES_SQ = MLX5_EVENT_QUEUE_TYPE_SQ,
MLX5_RES_SRQ = 3,
MLX5_RES_XSRQ = 4,
+ MLX5_RES_XRQ = 5,
+ MLX5_RES_DCT = MLX5_EVENT_QUEUE_TYPE_DCT,
};
struct mlx5_core_rsc_common {
enum mlx5_res_type res;
- atomic_t refcount;
- struct completion free;
-};
-
-struct mlx5_core_srq {
- struct mlx5_core_rsc_common common; /* must be first */
- u32 srqn;
- int max;
- int max_gs;
- int max_avail_gather;
- int wqe_shift;
- void (*event) (struct mlx5_core_srq *, enum mlx5_event);
-
- atomic_t refcount;
+ refcount_t refcount;
struct completion free;
};
-struct mlx5_eq_table {
- void __iomem *update_ci;
- void __iomem *update_arm_ci;
- struct list_head comp_eqs_list;
- struct mlx5_eq pages_eq;
- struct mlx5_eq async_eq;
- struct mlx5_eq cmd_eq;
-#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
- struct mlx5_eq pfault_eq;
-#endif
- int num_comp_vectors;
- /* protect EQs list
- */
- spinlock_t lock;
-};
-
struct mlx5_uars_page {
void __iomem *map;
bool wc;
@@ -486,78 +430,108 @@ struct mlx5_core_health {
struct timer_list timer;
u32 prev;
int miss_counter;
- bool sick;
- /* wq spinlock to synchronize draining */
- spinlock_t wq_lock;
+ u8 synd;
+ u32 fatal_error;
+ u32 crdump_size;
struct workqueue_struct *wq;
unsigned long flags;
- struct work_struct work;
- struct delayed_work recover_work;
-};
-
-struct mlx5_cq_table {
- /* protect radix tree
- */
- spinlock_t lock;
- struct radix_tree_root tree;
+ struct work_struct fatal_report_work;
+ struct work_struct report_work;
+ struct devlink_health_reporter *fw_reporter;
+ struct devlink_health_reporter *fw_fatal_reporter;
+ struct devlink_health_reporter *vnic_reporter;
+ struct delayed_work update_fw_log_ts_work;
};
struct mlx5_qp_table {
- /* protect radix tree
- */
- spinlock_t lock;
- struct radix_tree_root tree;
-};
+ struct notifier_block nb;
-struct mlx5_srq_table {
/* protect radix tree
*/
spinlock_t lock;
struct radix_tree_root tree;
};
-struct mlx5_mkey_table {
- /* protect radix tree
- */
- rwlock_t lock;
- struct radix_tree_root tree;
+enum {
+ MLX5_PF_NOTIFY_DISABLE_VF,
+ MLX5_PF_NOTIFY_ENABLE_VF,
};
struct mlx5_vf_context {
int enabled;
+ u64 port_guid;
+ u64 node_guid;
+ /* Valid bits are used to validate administrative guid only.
+ * Enabled after ndo_set_vf_guid
+ */
+ u8 port_guid_valid:1;
+ u8 node_guid_valid:1;
+ enum port_state_policy policy;
+ struct blocking_notifier_head notifier;
};
struct mlx5_core_sriov {
struct mlx5_vf_context *vfs_ctx;
int num_vfs;
- int enabled_vfs;
+ u16 max_vfs;
};
-struct mlx5_irq_info {
- cpumask_var_t mask;
- char name[MLX5_MAX_IRQ_NAME];
+struct mlx5_fc_pool {
+ struct mlx5_core_dev *dev;
+ struct mutex pool_lock; /* protects pool lists */
+ struct list_head fully_used;
+ struct list_head partially_used;
+ struct list_head unused;
+ int available_fcs;
+ int used_fcs;
+ int threshold;
};
struct mlx5_fc_stats {
- struct rb_root counters;
- struct list_head addlist;
- /* protect addlist add/splice operations */
- spinlock_t addlist_lock;
+ spinlock_t counters_idr_lock; /* protects counters_idr */
+ struct idr counters_idr;
+ struct list_head counters;
+ struct llist_head addlist;
+ struct llist_head dellist;
struct workqueue_struct *wq;
struct delayed_work work;
unsigned long next_query;
unsigned long sampling_interval; /* jiffies */
+ u32 *bulk_query_out;
+ int bulk_query_len;
+ size_t num_counters;
+ bool bulk_query_alloc_failed;
+ unsigned long next_bulk_query_alloc;
+ struct mlx5_fc_pool fc_pool;
};
+struct mlx5_events;
+struct mlx5_mpfs;
struct mlx5_eswitch;
struct mlx5_lag;
-struct mlx5_pagefault;
+struct mlx5_devcom;
+struct mlx5_fw_reset;
+struct mlx5_eq_table;
+struct mlx5_irq_table;
+struct mlx5_vhca_state_notifier;
+struct mlx5_sf_dev_table;
+struct mlx5_sf_hw_table;
+struct mlx5_sf_table;
+struct mlx5_crypto_dek_priv;
+
+struct mlx5_rate_limit {
+ u32 rate;
+ u32 max_burst_sz;
+ u16 typical_pkt_sz;
+};
struct mlx5_rl_entry {
- u32 rate;
- u16 index;
- u16 refcount;
+ u8 rl_raw[MLX5_ST_SZ_BYTES(set_pp_rate_limit_context)];
+ u64 refcount;
+ u16 index;
+ u16 uid;
+ u8 dedicated : 1;
};
struct mlx5_rl_table {
@@ -567,115 +541,119 @@ struct mlx5_rl_table {
u32 max_rate;
u32 min_rate;
struct mlx5_rl_entry *rl_entry;
+ u64 refcount;
+};
+
+struct mlx5_core_roce {
+ struct mlx5_flow_table *ft;
+ struct mlx5_flow_group *fg;
+ struct mlx5_flow_handle *allow_rule;
+};
+
+enum {
+ MLX5_PRIV_FLAGS_DISABLE_IB_ADEV = 1 << 0,
+ MLX5_PRIV_FLAGS_DISABLE_ALL_ADEV = 1 << 1,
+ /* Set during device detach to block any further devices
+ * creation/deletion on drivers rescan. Unset during device attach.
+ */
+ MLX5_PRIV_FLAGS_DETACH = 1 << 2,
};
-enum port_module_event_status_type {
- MLX5_MODULE_STATUS_PLUGGED = 0x1,
- MLX5_MODULE_STATUS_UNPLUGGED = 0x2,
- MLX5_MODULE_STATUS_ERROR = 0x3,
- MLX5_MODULE_STATUS_NUM = 0x3,
+struct mlx5_adev {
+ struct auxiliary_device adev;
+ struct mlx5_core_dev *mdev;
+ int idx;
};
-enum port_module_event_error_type {
- MLX5_MODULE_EVENT_ERROR_POWER_BUDGET_EXCEEDED,
- MLX5_MODULE_EVENT_ERROR_LONG_RANGE_FOR_NON_MLNX_CABLE_MODULE,
- MLX5_MODULE_EVENT_ERROR_BUS_STUCK,
- MLX5_MODULE_EVENT_ERROR_NO_EEPROM_RETRY_TIMEOUT,
- MLX5_MODULE_EVENT_ERROR_ENFORCE_PART_NUMBER_LIST,
- MLX5_MODULE_EVENT_ERROR_UNKNOWN_IDENTIFIER,
- MLX5_MODULE_EVENT_ERROR_HIGH_TEMPERATURE,
- MLX5_MODULE_EVENT_ERROR_BAD_CABLE,
- MLX5_MODULE_EVENT_ERROR_UNKNOWN,
- MLX5_MODULE_EVENT_ERROR_NUM,
+struct mlx5_debugfs_entries {
+ struct dentry *dbg_root;
+ struct dentry *qp_debugfs;
+ struct dentry *eq_debugfs;
+ struct dentry *cq_debugfs;
+ struct dentry *cmdif_debugfs;
+ struct dentry *pages_debugfs;
+ struct dentry *lag_debugfs;
};
-struct mlx5_port_module_event_stats {
- u64 status_counters[MLX5_MODULE_STATUS_NUM];
- u64 error_counters[MLX5_MODULE_EVENT_ERROR_NUM];
+enum mlx5_func_type {
+ MLX5_PF,
+ MLX5_VF,
+ MLX5_SF,
+ MLX5_HOST_PF,
+ MLX5_FUNC_TYPE_NUM,
};
+struct mlx5_ft_pool;
struct mlx5_priv {
- char name[MLX5_MAX_NAME_LEN];
- struct mlx5_eq_table eq_table;
- struct msix_entry *msix_arr;
- struct mlx5_irq_info *irq_info;
+ /* IRQ table valid only for real pci devices PF or VF */
+ struct mlx5_irq_table *irq_table;
+ struct mlx5_eq_table *eq_table;
/* pages stuff */
+ struct mlx5_nb pg_nb;
struct workqueue_struct *pg_wq;
- struct rb_root page_root;
- int fw_pages;
+ struct xarray page_root_xa;
atomic_t reg_pages;
struct list_head free_list;
- int vfs_pages;
+ u32 fw_pages;
+ u32 page_counters[MLX5_FUNC_TYPE_NUM];
+ u32 fw_pages_alloc_failed;
+ u32 give_pages_dropped;
+ u32 reclaim_pages_discard;
struct mlx5_core_health health;
+ struct list_head traps;
- struct mlx5_srq_table srq_table;
-
- /* start: qp staff */
- struct mlx5_qp_table qp_table;
- struct dentry *qp_debugfs;
- struct dentry *eq_debugfs;
- struct dentry *cq_debugfs;
- struct dentry *cmdif_debugfs;
- /* end: qp staff */
-
- /* start: cq staff */
- struct mlx5_cq_table cq_table;
- /* end: cq staff */
-
- /* start: mkey staff */
- struct mlx5_mkey_table mkey_table;
- /* end: mkey staff */
+ struct mlx5_debugfs_entries dbg;
/* start: alloc staff */
- /* protect buffer alocation according to numa node */
+ /* protect buffer allocation according to numa node */
struct mutex alloc_mutex;
int numa_node;
struct mutex pgdir_mutex;
struct list_head pgdir_list;
/* end: alloc staff */
- struct dentry *dbg_root;
-
- /* protect mkey key part */
- spinlock_t mkey_lock;
- u8 mkey_key;
- struct list_head dev_list;
- struct list_head ctx_list;
- spinlock_t ctx_lock;
+ struct mlx5_adev **adev;
+ int adev_idx;
+ int sw_vhca_id;
+ struct mlx5_events *events;
struct mlx5_flow_steering *steering;
+ struct mlx5_mpfs *mpfs;
struct mlx5_eswitch *eswitch;
struct mlx5_core_sriov sriov;
struct mlx5_lag *lag;
- unsigned long pci_dev_data;
+ u32 flags;
+ struct mlx5_devcom *devcom;
+ struct mlx5_fw_reset *fw_reset;
+ struct mlx5_core_roce roce;
struct mlx5_fc_stats fc_stats;
struct mlx5_rl_table rl_table;
+ struct mlx5_ft_pool *ft_pool;
- struct mlx5_port_module_event_stats pme_stats;
-
-#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
- void (*pfault)(struct mlx5_core_dev *dev,
- void *context,
- struct mlx5_pagefault *pfault);
- void *pfault_ctx;
- struct srcu_struct pfault_srcu;
-#endif
struct mlx5_bfreg_data bfregs;
struct mlx5_uars_page *uar;
+#ifdef CONFIG_MLX5_SF
+ struct mlx5_vhca_state_notifier *vhca_state_notifier;
+ struct mlx5_sf_dev_table *sf_dev_table;
+ struct mlx5_core_dev *parent_mdev;
+#endif
+#ifdef CONFIG_MLX5_SF_MANAGER
+ struct mlx5_sf_hw_table *sf_hw_table;
+ struct mlx5_sf_table *sf_table;
+#endif
};
enum mlx5_device_state {
- MLX5_DEVICE_STATE_UP,
+ MLX5_DEVICE_STATE_UP = 1,
MLX5_DEVICE_STATE_INTERNAL_ERROR,
};
enum mlx5_interface_state {
- MLX5_INTERFACE_STATE_DOWN = BIT(0),
- MLX5_INTERFACE_STATE_UP = BIT(1),
- MLX5_INTERFACE_STATE_SHUTDOWN = BIT(2),
+ MLX5_INTERFACE_STATE_UP = BIT(0),
+ MLX5_BREAK_FW_WAIT = BIT(1),
};
enum mlx5_pci_status {
@@ -689,54 +667,29 @@ enum mlx5_pagefault_type_flags {
MLX5_PFAULT_RDMA = 1 << 2,
};
-/* Contains the details of a pagefault. */
-struct mlx5_pagefault {
- u32 bytes_committed;
- u32 token;
- u8 event_subtype;
- u8 type;
- union {
- /* Initiator or send message responder pagefault details. */
- struct {
- /* Received packet size, only valid for responders. */
- u32 packet_size;
- /*
- * Number of resource holding WQE, depends on type.
- */
- u32 wq_num;
- /*
- * WQE index. Refers to either the send queue or
- * receive queue, according to event_subtype.
- */
- u16 wqe_index;
- } wqe;
- /* RDMA responder pagefault details */
- struct {
- u32 r_key;
- /*
- * Received packet size, minimal size page fault
- * resolution required for forward progress.
- */
- u32 packet_size;
- u32 rdma_op_len;
- u64 rdma_va;
- } rdma;
- };
-
- struct mlx5_eq *eq;
- struct work_struct work;
-};
-
struct mlx5_td {
+ /* protects tirs list changes while tirs refresh */
+ struct mutex list_lock;
struct list_head tirs_list;
u32 tdn;
};
struct mlx5e_resources {
- u32 pdn;
- struct mlx5_td td;
- struct mlx5_core_mkey mkey;
- struct mlx5_sq_bfreg bfreg;
+ struct mlx5e_hw_objs {
+ u32 pdn;
+ struct mlx5_td td;
+ u32 mkey;
+ struct mlx5_sq_bfreg bfreg;
+ } hw_objs;
+ struct net_device *uplink_netdev;
+ struct mutex uplink_netdev_lock;
+ struct mlx5_crypto_dek_priv *dek_priv;
+};
+
+enum mlx5_sw_icm_type {
+ MLX5_SW_ICM_TYPE_STEERING,
+ MLX5_SW_ICM_TYPE_HEADER_MODIFY,
+ MLX5_SW_ICM_TYPE_HEADER_MODIFY_PATTERN,
};
#define MLX5_MAX_RESERVED_GIDS 8
@@ -747,7 +700,73 @@ struct mlx5_rsvd_gids {
struct ida ida;
};
+#define MAX_PIN_NUM 8
+struct mlx5_pps {
+ u8 pin_caps[MAX_PIN_NUM];
+ struct work_struct out_work;
+ u64 start[MAX_PIN_NUM];
+ u8 enabled;
+ u64 min_npps_period;
+ u64 min_out_pulse_duration_ns;
+};
+
+struct mlx5_timer {
+ struct cyclecounter cycles;
+ struct timecounter tc;
+ u32 nominal_c_mult;
+ unsigned long overflow_period;
+ struct delayed_work overflow_work;
+};
+
+struct mlx5_clock {
+ struct mlx5_nb pps_nb;
+ seqlock_t lock;
+ struct hwtstamp_config hwtstamp_config;
+ struct ptp_clock *ptp;
+ struct ptp_clock_info ptp_info;
+ struct mlx5_pps pps_info;
+ struct mlx5_timer timer;
+};
+
+struct mlx5_dm;
+struct mlx5_fw_tracer;
+struct mlx5_vxlan;
+struct mlx5_geneve;
+struct mlx5_hv_vhca;
+struct mlx5_thermal;
+
+#define MLX5_LOG_SW_ICM_BLOCK_SIZE(dev) (MLX5_CAP_DEV_MEM(dev, log_sw_icm_alloc_granularity))
+#define MLX5_SW_ICM_BLOCK_SIZE(dev) (1 << MLX5_LOG_SW_ICM_BLOCK_SIZE(dev))
+
+enum {
+ MLX5_PROF_MASK_QP_SIZE = (u64)1 << 0,
+ MLX5_PROF_MASK_MR_CACHE = (u64)1 << 1,
+};
+
+enum {
+ MKEY_CACHE_LAST_STD_ENTRY = 20,
+ MLX5_IMR_KSM_CACHE_ENTRY,
+ MAX_MKEY_CACHE_ENTRIES
+};
+
+struct mlx5_profile {
+ u64 mask;
+ u8 log_max_qp;
+ u8 num_cmd_caches;
+ struct {
+ int size;
+ int limit;
+ } mr_cache[MAX_MKEY_CACHE_ENTRIES];
+};
+
+struct mlx5_hca_cap {
+ u32 cur[MLX5_UN_SZ_DW(hca_cap_union)];
+ u32 max[MLX5_UN_SZ_DW(hca_cap_union)];
+};
+
struct mlx5_core_dev {
+ struct device *device;
+ enum mlx5_coredev_type coredev_type;
struct pci_dev *pdev;
/* sync pci state */
struct mutex pci_status_mutex;
@@ -755,37 +774,45 @@ struct mlx5_core_dev {
u8 rev_id;
char board_id[MLX5_BOARD_ID_LEN];
struct mlx5_cmd cmd;
- struct mlx5_port_caps port_caps[MLX5_MAX_PORTS];
struct {
- u32 hca_cur[MLX5_CAP_NUM][MLX5_UN_SZ_DW(hca_cap_union)];
- u32 hca_max[MLX5_CAP_NUM][MLX5_UN_SZ_DW(hca_cap_union)];
+ struct mlx5_hca_cap *hca[MLX5_CAP_NUM];
u32 pcam[MLX5_ST_SZ_DW(pcam_reg)];
- u32 mcam[MLX5_ST_SZ_DW(mcam_reg)];
+ u32 mcam[MLX5_MCAM_REGS_NUM][MLX5_ST_SZ_DW(mcam_reg)];
+ u32 fpga[MLX5_ST_SZ_DW(fpga_cap)];
+ u32 qcam[MLX5_ST_SZ_DW(qcam_reg)];
+ u8 embedded_cpu;
} caps;
+ struct mlx5_timeouts *timeouts;
+ u64 sys_image_guid;
phys_addr_t iseg_base;
struct mlx5_init_seg __iomem *iseg;
+ phys_addr_t bar_addr;
enum mlx5_device_state state;
/* sync interface state */
struct mutex intf_state_mutex;
+ struct lock_class_key lock_key;
unsigned long intf_state;
- void (*event) (struct mlx5_core_dev *dev,
- enum mlx5_dev_event event,
- unsigned long param);
struct mlx5_priv priv;
- struct mlx5_profile *profile;
- atomic_t num_qps;
+ struct mlx5_profile profile;
u32 issi;
struct mlx5e_resources mlx5e_res;
+ struct mlx5_dm *dm;
+ struct mlx5_vxlan *vxlan;
+ struct mlx5_geneve *geneve;
struct {
struct mlx5_rsvd_gids reserved_gids;
- atomic_t roce_en;
+ u32 roce_en;
} roce;
#ifdef CONFIG_MLX5_FPGA
struct mlx5_fpga_device *fpga;
#endif
-#ifdef CONFIG_RFS_ACCEL
- struct cpu_rmap *rmap;
-#endif
+ struct mlx5_clock clock;
+ struct mlx5_ib_clock_info *clock_info;
+ struct mlx5_fw_tracer *tracer;
+ struct mlx5_rsc_dump *rsc_dump;
+ u32 vsc_addr;
+ struct mlx5_hv_vhca *hv_vhca;
+ struct mlx5_thermal *thermal;
};
struct mlx5_db {
@@ -823,6 +850,7 @@ struct mlx5_cmd_work_ent {
struct delayed_work cb_timeout_work;
void *context;
int idx;
+ struct completion handling;
struct completion done;
struct mlx5_cmd *cmd;
struct work_struct work;
@@ -835,18 +863,8 @@ struct mlx5_cmd_work_ent {
u64 ts2;
u16 op;
bool polling;
-};
-
-struct mlx5_pas {
- u64 pa;
- u8 log_sz;
-};
-
-enum port_state_policy {
- MLX5_POLICY_DOWN = 0,
- MLX5_POLICY_UP = 1,
- MLX5_POLICY_FOLLOW = 2,
- MLX5_POLICY_INVALID = 0xffffffff
+ /* Track the max comp handlers */
+ refcount_t refcnt;
};
enum phy_port_state {
@@ -867,8 +885,8 @@ struct mlx5_hca_vport_context {
u64 node_guid;
u32 cap_mask1;
u32 cap_mask1_perm;
- u32 cap_mask2;
- u32 cap_mask2_perm;
+ u16 cap_mask2;
+ u16 cap_mask2_perm;
u16 lid;
u8 init_type_reply; /* bitmask: see ib spec 14.2.5.6 InitTypeReply */
u8 lmc;
@@ -880,22 +898,10 @@ struct mlx5_hca_vport_context {
bool grh_required;
};
-static inline void *mlx5_buf_offset(struct mlx5_buf *buf, int offset)
-{
- return buf->direct.buf + offset;
-}
-
-extern struct workqueue_struct *mlx5_core_wq;
-
#define STRUCT_FIELD(header, field) \
.struct_offset_bytes = offsetof(struct ib_unpacked_ ## header, field), \
.struct_size_bytes = sizeof((struct ib_unpacked_ ## header *)0)->field
-static inline struct mlx5_core_dev *pci2mlx5_core_dev(struct pci_dev *pdev)
-{
- return pci_get_drvdata(pdev);
-}
-
extern struct dentry *mlx5_debugfs_root;
static inline u16 fw_rev_maj(struct mlx5_core_dev *dev)
@@ -913,44 +919,121 @@ static inline u16 fw_rev_sub(struct mlx5_core_dev *dev)
return ioread32be(&dev->iseg->cmdif_rev_fw_sub) & 0xffff;
}
-static inline u16 cmdif_rev(struct mlx5_core_dev *dev)
+static inline u32 mlx5_base_mkey(const u32 key)
{
- return ioread32be(&dev->iseg->cmdif_rev_fw_sub) >> 16;
+ return key & 0xffffff00u;
}
-static inline u32 mlx5_base_mkey(const u32 key)
+static inline u32 wq_get_byte_sz(u8 log_sz, u8 log_stride)
{
- return key & 0xffffff00u;
+ return ((u32)1 << log_sz) << log_stride;
+}
+
+static inline void mlx5_init_fbc_offset(struct mlx5_buf_list *frags,
+ u8 log_stride, u8 log_sz,
+ u16 strides_offset,
+ struct mlx5_frag_buf_ctrl *fbc)
+{
+ fbc->frags = frags;
+ fbc->log_stride = log_stride;
+ fbc->log_sz = log_sz;
+ fbc->sz_m1 = (1 << fbc->log_sz) - 1;
+ fbc->log_frag_strides = PAGE_SHIFT - fbc->log_stride;
+ fbc->frag_sz_m1 = (1 << fbc->log_frag_strides) - 1;
+ fbc->strides_offset = strides_offset;
+}
+
+static inline void mlx5_init_fbc(struct mlx5_buf_list *frags,
+ u8 log_stride, u8 log_sz,
+ struct mlx5_frag_buf_ctrl *fbc)
+{
+ mlx5_init_fbc_offset(frags, log_stride, log_sz, 0, fbc);
+}
+
+static inline void *mlx5_frag_buf_get_wqe(struct mlx5_frag_buf_ctrl *fbc,
+ u32 ix)
+{
+ unsigned int frag;
+
+ ix += fbc->strides_offset;
+ frag = ix >> fbc->log_frag_strides;
+
+ return fbc->frags[frag].buf + ((fbc->frag_sz_m1 & ix) << fbc->log_stride);
}
-int mlx5_cmd_init(struct mlx5_core_dev *dev);
-void mlx5_cmd_cleanup(struct mlx5_core_dev *dev);
+static inline u32
+mlx5_frag_buf_get_idx_last_contig_stride(struct mlx5_frag_buf_ctrl *fbc, u32 ix)
+{
+ u32 last_frag_stride_idx = (ix + fbc->strides_offset) | fbc->frag_sz_m1;
+
+ return min_t(u32, last_frag_stride_idx - fbc->strides_offset, fbc->sz_m1);
+}
+
+enum {
+ CMD_ALLOWED_OPCODE_ALL,
+};
+
void mlx5_cmd_use_events(struct mlx5_core_dev *dev);
void mlx5_cmd_use_polling(struct mlx5_core_dev *dev);
+void mlx5_cmd_allowed_opcode(struct mlx5_core_dev *dev, u16 opcode);
+
+struct mlx5_async_ctx {
+ struct mlx5_core_dev *dev;
+ atomic_t num_inflight;
+ struct completion inflight_done;
+};
+struct mlx5_async_work;
+
+typedef void (*mlx5_async_cbk_t)(int status, struct mlx5_async_work *context);
+
+struct mlx5_async_work {
+ struct mlx5_async_ctx *ctx;
+ mlx5_async_cbk_t user_callback;
+ u16 opcode; /* cmd opcode */
+ u16 op_mod; /* cmd op_mod */
+ void *out; /* pointer to the cmd output buffer */
+};
+
+void mlx5_cmd_init_async_ctx(struct mlx5_core_dev *dev,
+ struct mlx5_async_ctx *ctx);
+void mlx5_cmd_cleanup_async_ctx(struct mlx5_async_ctx *ctx);
+int mlx5_cmd_exec_cb(struct mlx5_async_ctx *ctx, void *in, int in_size,
+ void *out, int out_size, mlx5_async_cbk_t callback,
+ struct mlx5_async_work *work);
+void mlx5_cmd_out_err(struct mlx5_core_dev *dev, u16 opcode, u16 op_mod, void *out);
+int mlx5_cmd_do(struct mlx5_core_dev *dev, void *in, int in_size, void *out, int out_size);
+int mlx5_cmd_check(struct mlx5_core_dev *dev, int err, void *in, void *out);
int mlx5_cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
int out_size);
-int mlx5_cmd_exec_cb(struct mlx5_core_dev *dev, void *in, int in_size,
- void *out, int out_size, mlx5_cmd_cbk_t callback,
- void *context);
+
+#define mlx5_cmd_exec_inout(dev, ifc_cmd, in, out) \
+ ({ \
+ mlx5_cmd_exec(dev, in, MLX5_ST_SZ_BYTES(ifc_cmd##_in), out, \
+ MLX5_ST_SZ_BYTES(ifc_cmd##_out)); \
+ })
+
+#define mlx5_cmd_exec_in(dev, ifc_cmd, in) \
+ ({ \
+ u32 _out[MLX5_ST_SZ_DW(ifc_cmd##_out)] = {}; \
+ mlx5_cmd_exec_inout(dev, ifc_cmd, in, _out); \
+ })
+
int mlx5_cmd_exec_polling(struct mlx5_core_dev *dev, void *in, int in_size,
void *out, int out_size);
-void mlx5_cmd_mbox_status(void *out, u8 *status, u32 *syndrome);
+bool mlx5_cmd_is_down(struct mlx5_core_dev *dev);
+
+void mlx5_core_uplink_netdev_set(struct mlx5_core_dev *mdev, struct net_device *netdev);
+void mlx5_core_uplink_netdev_event_replay(struct mlx5_core_dev *mdev);
int mlx5_core_get_caps(struct mlx5_core_dev *dev, enum mlx5_cap_type cap_type);
-int mlx5_cmd_alloc_uar(struct mlx5_core_dev *dev, u32 *uarn);
-int mlx5_cmd_free_uar(struct mlx5_core_dev *dev, u32 uarn);
void mlx5_health_cleanup(struct mlx5_core_dev *dev);
int mlx5_health_init(struct mlx5_core_dev *dev);
void mlx5_start_health_poll(struct mlx5_core_dev *dev);
-void mlx5_stop_health_poll(struct mlx5_core_dev *dev);
+void mlx5_stop_health_poll(struct mlx5_core_dev *dev, bool disable_health);
+void mlx5_start_health_fw_log_up(struct mlx5_core_dev *dev);
void mlx5_drain_health_wq(struct mlx5_core_dev *dev);
void mlx5_trigger_health_work(struct mlx5_core_dev *dev);
-void mlx5_drain_health_recovery(struct mlx5_core_dev *dev);
-int mlx5_buf_alloc_node(struct mlx5_core_dev *dev, int size,
- struct mlx5_buf *buf, int node);
-int mlx5_buf_alloc(struct mlx5_core_dev *dev, int size, struct mlx5_buf *buf);
-void mlx5_buf_free(struct mlx5_core_dev *dev, struct mlx5_buf *buf);
int mlx5_frag_buf_alloc_node(struct mlx5_core_dev *dev, int size,
struct mlx5_frag_buf *buf, int node);
void mlx5_frag_buf_free(struct mlx5_core_dev *dev, struct mlx5_frag_buf *buf);
@@ -958,85 +1041,54 @@ struct mlx5_cmd_mailbox *mlx5_alloc_cmd_mailbox_chain(struct mlx5_core_dev *dev,
gfp_t flags, int npages);
void mlx5_free_cmd_mailbox_chain(struct mlx5_core_dev *dev,
struct mlx5_cmd_mailbox *head);
-int mlx5_core_create_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
- struct mlx5_srq_attr *in);
-int mlx5_core_destroy_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq);
-int mlx5_core_query_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
- struct mlx5_srq_attr *out);
-int mlx5_core_arm_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
- u16 lwm, int is_srq);
-void mlx5_init_mkey_table(struct mlx5_core_dev *dev);
-void mlx5_cleanup_mkey_table(struct mlx5_core_dev *dev);
-int mlx5_core_create_mkey_cb(struct mlx5_core_dev *dev,
- struct mlx5_core_mkey *mkey,
- u32 *in, int inlen,
- u32 *out, int outlen,
- mlx5_cmd_cbk_t callback, void *context);
-int mlx5_core_create_mkey(struct mlx5_core_dev *dev,
- struct mlx5_core_mkey *mkey,
- u32 *in, int inlen);
-int mlx5_core_destroy_mkey(struct mlx5_core_dev *dev,
- struct mlx5_core_mkey *mkey);
-int mlx5_core_query_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mkey *mkey,
- u32 *out, int outlen);
-int mlx5_core_dump_fill_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mkey *_mkey,
- u32 *mkey);
+int mlx5_core_create_mkey(struct mlx5_core_dev *dev, u32 *mkey, u32 *in,
+ int inlen);
+int mlx5_core_destroy_mkey(struct mlx5_core_dev *dev, u32 mkey);
+int mlx5_core_query_mkey(struct mlx5_core_dev *dev, u32 mkey, u32 *out,
+ int outlen);
int mlx5_core_alloc_pd(struct mlx5_core_dev *dev, u32 *pdn);
int mlx5_core_dealloc_pd(struct mlx5_core_dev *dev, u32 pdn);
-int mlx5_core_mad_ifc(struct mlx5_core_dev *dev, const void *inb, void *outb,
- u16 opmod, u8 port);
-void mlx5_pagealloc_init(struct mlx5_core_dev *dev);
+int mlx5_pagealloc_init(struct mlx5_core_dev *dev);
void mlx5_pagealloc_cleanup(struct mlx5_core_dev *dev);
-int mlx5_pagealloc_start(struct mlx5_core_dev *dev);
+void mlx5_pagealloc_start(struct mlx5_core_dev *dev);
void mlx5_pagealloc_stop(struct mlx5_core_dev *dev);
+void mlx5_pages_debugfs_init(struct mlx5_core_dev *dev);
+void mlx5_pages_debugfs_cleanup(struct mlx5_core_dev *dev);
void mlx5_core_req_pages_handler(struct mlx5_core_dev *dev, u16 func_id,
- s32 npages);
+ s32 npages, bool ec_function);
int mlx5_satisfy_startup_pages(struct mlx5_core_dev *dev, int boot);
int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev);
void mlx5_register_debugfs(void);
void mlx5_unregister_debugfs(void);
-int mlx5_eq_init(struct mlx5_core_dev *dev);
-void mlx5_eq_cleanup(struct mlx5_core_dev *dev);
-void mlx5_fill_page_array(struct mlx5_buf *buf, __be64 *pas);
+
+void mlx5_fill_page_frag_array_perm(struct mlx5_frag_buf *buf, __be64 *pas, u8 perm);
void mlx5_fill_page_frag_array(struct mlx5_frag_buf *frag_buf, __be64 *pas);
-void mlx5_cq_completion(struct mlx5_core_dev *dev, u32 cqn);
-void mlx5_rsc_event(struct mlx5_core_dev *dev, u32 rsn, int event_type);
-void mlx5_srq_event(struct mlx5_core_dev *dev, u32 srqn, int event_type);
-struct mlx5_core_srq *mlx5_core_get_srq(struct mlx5_core_dev *dev, u32 srqn);
-void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec, bool forced);
-void mlx5_cq_event(struct mlx5_core_dev *dev, u32 cqn, int event_type);
-int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx,
- int nent, u64 mask, const char *name,
- enum mlx5_eq_type type);
-int mlx5_destroy_unmap_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq);
-int mlx5_start_eqs(struct mlx5_core_dev *dev);
-int mlx5_stop_eqs(struct mlx5_core_dev *dev);
-int mlx5_vector2eqn(struct mlx5_core_dev *dev, int vector, int *eqn,
- unsigned int *irqn);
+int mlx5_vector2eqn(struct mlx5_core_dev *dev, int vector, int *eqn);
int mlx5_core_attach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn);
int mlx5_core_detach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn);
-int mlx5_qp_debugfs_init(struct mlx5_core_dev *dev);
+struct dentry *mlx5_debugfs_get_dev_root(struct mlx5_core_dev *dev);
+void mlx5_qp_debugfs_init(struct mlx5_core_dev *dev);
void mlx5_qp_debugfs_cleanup(struct mlx5_core_dev *dev);
+int mlx5_access_reg(struct mlx5_core_dev *dev, void *data_in, int size_in,
+ void *data_out, int size_out, u16 reg_id, int arg,
+ int write, bool verbose);
int mlx5_core_access_reg(struct mlx5_core_dev *dev, void *data_in,
int size_in, void *data_out, int size_out,
u16 reg_num, int arg, int write);
-int mlx5_debug_eq_add(struct mlx5_core_dev *dev, struct mlx5_eq *eq);
-void mlx5_debug_eq_remove(struct mlx5_core_dev *dev, struct mlx5_eq *eq);
-int mlx5_core_eq_query(struct mlx5_core_dev *dev, struct mlx5_eq *eq,
- u32 *out, int outlen);
-int mlx5_eq_debugfs_init(struct mlx5_core_dev *dev);
-void mlx5_eq_debugfs_cleanup(struct mlx5_core_dev *dev);
-int mlx5_cq_debugfs_init(struct mlx5_core_dev *dev);
-void mlx5_cq_debugfs_cleanup(struct mlx5_core_dev *dev);
-int mlx5_db_alloc(struct mlx5_core_dev *dev, struct mlx5_db *db);
int mlx5_db_alloc_node(struct mlx5_core_dev *dev, struct mlx5_db *db,
int node);
+
+static inline int mlx5_db_alloc(struct mlx5_core_dev *dev, struct mlx5_db *db)
+{
+ return mlx5_db_alloc_node(dev, db, dev->priv.numa_node);
+}
+
void mlx5_db_free(struct mlx5_core_dev *dev, struct mlx5_db *db);
const char *mlx5_command_str(int command);
-int mlx5_cmdif_debugfs_init(struct mlx5_core_dev *dev);
+void mlx5_cmdif_debugfs_init(struct mlx5_core_dev *dev);
void mlx5_cmdif_debugfs_cleanup(struct mlx5_core_dev *dev);
int mlx5_core_create_psv(struct mlx5_core_dev *dev, u32 pdn,
int npsvs, u32 *sig_index);
@@ -1044,31 +1096,29 @@ int mlx5_core_destroy_psv(struct mlx5_core_dev *dev, int psv_num);
void mlx5_core_put_rsc(struct mlx5_core_rsc_common *common);
int mlx5_query_odp_caps(struct mlx5_core_dev *dev,
struct mlx5_odp_caps *odp_caps);
-int mlx5_core_query_ib_ppcnt(struct mlx5_core_dev *dev,
- u8 port_num, void *out, size_t sz);
-#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
-int mlx5_core_page_fault_resume(struct mlx5_core_dev *dev, u32 token,
- u32 wq_num, u8 type, int error);
-#endif
int mlx5_init_rl_table(struct mlx5_core_dev *dev);
void mlx5_cleanup_rl_table(struct mlx5_core_dev *dev);
-int mlx5_rl_add_rate(struct mlx5_core_dev *dev, u32 rate, u16 *index);
-void mlx5_rl_remove_rate(struct mlx5_core_dev *dev, u32 rate);
+int mlx5_rl_add_rate(struct mlx5_core_dev *dev, u16 *index,
+ struct mlx5_rate_limit *rl);
+void mlx5_rl_remove_rate(struct mlx5_core_dev *dev, struct mlx5_rate_limit *rl);
bool mlx5_rl_is_in_range(struct mlx5_core_dev *dev, u32 rate);
+int mlx5_rl_add_rate_raw(struct mlx5_core_dev *dev, void *rl_in, u16 uid,
+ bool dedicated_entry, u16 *index);
+void mlx5_rl_remove_rate_raw(struct mlx5_core_dev *dev, u16 index);
+bool mlx5_rl_are_equal(struct mlx5_rate_limit *rl_0,
+ struct mlx5_rate_limit *rl_1);
int mlx5_alloc_bfreg(struct mlx5_core_dev *mdev, struct mlx5_sq_bfreg *bfreg,
bool map_wc, bool fast_path);
void mlx5_free_bfreg(struct mlx5_core_dev *mdev, struct mlx5_sq_bfreg *bfreg);
+unsigned int mlx5_comp_vectors_count(struct mlx5_core_dev *dev);
+struct cpumask *
+mlx5_comp_irq_get_affinity_mask(struct mlx5_core_dev *dev, int vector);
unsigned int mlx5_core_reserved_gids_count(struct mlx5_core_dev *dev);
int mlx5_core_roce_gid_set(struct mlx5_core_dev *dev, unsigned int index,
u8 roce_version, u8 roce_l3_type, const u8 *gid,
- const u8 *mac, bool vlan, u16 vlan_id);
-
-static inline int fw_initializing(struct mlx5_core_dev *dev)
-{
- return ioread32be(&dev->iseg->initializing) >> 31;
-}
+ const u8 *mac, bool vlan, u16 vlan_id, u8 port_num);
static inline u32 mlx5_mkey_to_idx(u32 mkey)
{
@@ -1085,85 +1135,106 @@ static inline u8 mlx5_mkey_variant(u32 mkey)
return mkey & 0xff;
}
-enum {
- MLX5_PROF_MASK_QP_SIZE = (u64)1 << 0,
- MLX5_PROF_MASK_MR_CACHE = (u64)1 << 1,
-};
+/* Async-atomic event notifier used by mlx5 core to forward FW
+ * evetns received from event queue to mlx5 consumers.
+ * Optimise event queue dipatching.
+ */
+int mlx5_notifier_register(struct mlx5_core_dev *dev, struct notifier_block *nb);
+int mlx5_notifier_unregister(struct mlx5_core_dev *dev, struct notifier_block *nb);
-enum {
- MAX_UMR_CACHE_ENTRY = 20,
- MLX5_IMR_MTT_CACHE_ENTRY,
- MLX5_IMR_KSM_CACHE_ENTRY,
- MAX_MR_CACHE_ENTRIES
-};
+/* Async-atomic event notifier used for forwarding
+ * evetns from the event queue into the to mlx5 events dispatcher,
+ * eswitch, clock and others.
+ */
+int mlx5_eq_notifier_register(struct mlx5_core_dev *dev, struct mlx5_nb *nb);
+int mlx5_eq_notifier_unregister(struct mlx5_core_dev *dev, struct mlx5_nb *nb);
-enum {
- MLX5_INTERFACE_PROTOCOL_IB = 0,
- MLX5_INTERFACE_PROTOCOL_ETH = 1,
-};
-
-struct mlx5_interface {
- void * (*add)(struct mlx5_core_dev *dev);
- void (*remove)(struct mlx5_core_dev *dev, void *context);
- int (*attach)(struct mlx5_core_dev *dev, void *context);
- void (*detach)(struct mlx5_core_dev *dev, void *context);
- void (*event)(struct mlx5_core_dev *dev, void *context,
- enum mlx5_dev_event event, unsigned long param);
- void (*pfault)(struct mlx5_core_dev *dev,
- void *context,
- struct mlx5_pagefault *pfault);
- void * (*get_dev)(void *context);
- int protocol;
- struct list_head list;
-};
+/* Blocking event notifier used to forward SW events, used for slow path */
+int mlx5_blocking_notifier_register(struct mlx5_core_dev *dev, struct notifier_block *nb);
+int mlx5_blocking_notifier_unregister(struct mlx5_core_dev *dev, struct notifier_block *nb);
+int mlx5_blocking_notifier_call_chain(struct mlx5_core_dev *dev, unsigned int event,
+ void *data);
-void *mlx5_get_protocol_dev(struct mlx5_core_dev *mdev, int protocol);
-int mlx5_register_interface(struct mlx5_interface *intf);
-void mlx5_unregister_interface(struct mlx5_interface *intf);
int mlx5_core_query_vendor_id(struct mlx5_core_dev *mdev, u32 *vendor_id);
int mlx5_cmd_create_vport_lag(struct mlx5_core_dev *dev);
int mlx5_cmd_destroy_vport_lag(struct mlx5_core_dev *dev);
+bool mlx5_lag_is_roce(struct mlx5_core_dev *dev);
+bool mlx5_lag_is_sriov(struct mlx5_core_dev *dev);
bool mlx5_lag_is_active(struct mlx5_core_dev *dev);
+bool mlx5_lag_mode_is_hash(struct mlx5_core_dev *dev);
+bool mlx5_lag_is_master(struct mlx5_core_dev *dev);
+bool mlx5_lag_is_shared_fdb(struct mlx5_core_dev *dev);
+bool mlx5_lag_is_mpesw(struct mlx5_core_dev *dev);
struct net_device *mlx5_lag_get_roce_netdev(struct mlx5_core_dev *dev);
+u8 mlx5_lag_get_slave_port(struct mlx5_core_dev *dev,
+ struct net_device *slave);
+int mlx5_lag_query_cong_counters(struct mlx5_core_dev *dev,
+ u64 *values,
+ int num_counters,
+ size_t *offsets);
+struct mlx5_core_dev *mlx5_lag_get_peer_mdev(struct mlx5_core_dev *dev);
+u8 mlx5_lag_get_num_ports(struct mlx5_core_dev *dev);
struct mlx5_uars_page *mlx5_get_uars_page(struct mlx5_core_dev *mdev);
void mlx5_put_uars_page(struct mlx5_core_dev *mdev, struct mlx5_uars_page *up);
-
-#ifndef CONFIG_MLX5_CORE_IPOIB
-static inline
-struct net_device *mlx5_rdma_netdev_alloc(struct mlx5_core_dev *mdev,
- struct ib_device *ibdev,
- const char *name,
- void (*setup)(struct net_device *))
-{
- return ERR_PTR(-EOPNOTSUPP);
-}
-
-static inline void mlx5_rdma_netdev_free(struct net_device *netdev) {}
-#else
+int mlx5_dm_sw_icm_alloc(struct mlx5_core_dev *dev, enum mlx5_sw_icm_type type,
+ u64 length, u32 log_alignment, u16 uid,
+ phys_addr_t *addr, u32 *obj_id);
+int mlx5_dm_sw_icm_dealloc(struct mlx5_core_dev *dev, enum mlx5_sw_icm_type type,
+ u64 length, u16 uid, phys_addr_t addr, u32 obj_id);
+
+struct mlx5_core_dev *mlx5_vf_get_core_dev(struct pci_dev *pdev);
+void mlx5_vf_put_core_dev(struct mlx5_core_dev *mdev);
+
+int mlx5_sriov_blocking_notifier_register(struct mlx5_core_dev *mdev,
+ int vf_id,
+ struct notifier_block *nb);
+void mlx5_sriov_blocking_notifier_unregister(struct mlx5_core_dev *mdev,
+ int vf_id,
+ struct notifier_block *nb);
+#ifdef CONFIG_MLX5_CORE_IPOIB
struct net_device *mlx5_rdma_netdev_alloc(struct mlx5_core_dev *mdev,
struct ib_device *ibdev,
const char *name,
void (*setup)(struct net_device *));
-void mlx5_rdma_netdev_free(struct net_device *netdev);
#endif /* CONFIG_MLX5_CORE_IPOIB */
-
-struct mlx5_profile {
- u64 mask;
- u8 log_max_qp;
- struct {
- int size;
- int limit;
- } mr_cache[MAX_MR_CACHE_ENTRIES];
-};
+int mlx5_rdma_rn_get_params(struct mlx5_core_dev *mdev,
+ struct ib_device *device,
+ struct rdma_netdev_alloc_params *params);
enum {
MLX5_PCI_DEV_IS_VF = 1 << 0,
};
-static inline int mlx5_core_is_pf(struct mlx5_core_dev *dev)
+static inline bool mlx5_core_is_pf(const struct mlx5_core_dev *dev)
+{
+ return dev->coredev_type == MLX5_COREDEV_PF;
+}
+
+static inline bool mlx5_core_is_vf(const struct mlx5_core_dev *dev)
{
- return !(dev->priv.pci_dev_data & MLX5_PCI_DEV_IS_VF);
+ return dev->coredev_type == MLX5_COREDEV_VF;
+}
+
+static inline bool mlx5_core_is_ecpf(const struct mlx5_core_dev *dev)
+{
+ return dev->caps.embedded_cpu;
+}
+
+static inline bool
+mlx5_core_is_ecpf_esw_manager(const struct mlx5_core_dev *dev)
+{
+ return dev->caps.embedded_cpu && MLX5_CAP_GEN(dev, eswitch_manager);
+}
+
+static inline bool mlx5_ecpf_vport_exists(const struct mlx5_core_dev *dev)
+{
+ return mlx5_core_is_pf(dev) && MLX5_CAP_ESW(dev, ecpf_vport_exists);
+}
+
+static inline u16 mlx5_core_max_vfs(const struct mlx5_core_dev *dev)
+{
+ return dev->priv.sriov.max_vfs;
}
static inline int mlx5_get_gid_table_len(u16 param)
@@ -1181,8 +1252,66 @@ static inline bool mlx5_rl_is_supported(struct mlx5_core_dev *dev)
return !!(dev->priv.rl_table.max_size);
}
+static inline int mlx5_core_is_mp_slave(struct mlx5_core_dev *dev)
+{
+ return MLX5_CAP_GEN(dev, affiliate_nic_vport_criteria) &&
+ MLX5_CAP_GEN(dev, num_vhca_ports) <= 1;
+}
+
+static inline int mlx5_core_is_mp_master(struct mlx5_core_dev *dev)
+{
+ return MLX5_CAP_GEN(dev, num_vhca_ports) > 1;
+}
+
+static inline int mlx5_core_mp_enabled(struct mlx5_core_dev *dev)
+{
+ return mlx5_core_is_mp_slave(dev) ||
+ mlx5_core_is_mp_master(dev);
+}
+
+static inline int mlx5_core_native_port_num(struct mlx5_core_dev *dev)
+{
+ if (!mlx5_core_mp_enabled(dev))
+ return 1;
+
+ return MLX5_CAP_GEN(dev, native_port_num);
+}
+
+static inline int mlx5_get_dev_index(struct mlx5_core_dev *dev)
+{
+ int idx = MLX5_CAP_GEN(dev, native_port_num);
+
+ if (idx >= 1 && idx <= MLX5_MAX_PORTS)
+ return idx - 1;
+ else
+ return PCI_FUNC(dev->pdev->devfn);
+}
+
enum {
MLX5_TRIGGERED_CMD_COMP = (u64)1 << 32,
};
+bool mlx5_is_roce_on(struct mlx5_core_dev *dev);
+
+static inline bool mlx5_get_roce_state(struct mlx5_core_dev *dev)
+{
+ if (MLX5_CAP_GEN(dev, roce_rw_supported))
+ return MLX5_CAP_GEN(dev, roce);
+
+ /* If RoCE cap is read-only in FW, get RoCE state from devlink
+ * in order to support RoCE enable/disable feature
+ */
+ return mlx5_is_roce_on(dev);
+}
+
+enum {
+ MLX5_OCTWORD = 16,
+};
+
+struct msi_map mlx5_msix_alloc(struct mlx5_core_dev *dev,
+ irqreturn_t (*handler)(int, void *),
+ const struct irq_affinity_desc *affdesc,
+ const char *name);
+void mlx5_msix_free(struct mlx5_core_dev *dev, struct msi_map map);
+
#endif /* MLX5_DRIVER_H */
diff --git a/include/linux/mlx5/eq.h b/include/linux/mlx5/eq.h
new file mode 100644
index 000000000000..3705a382276b
--- /dev/null
+++ b/include/linux/mlx5/eq.h
@@ -0,0 +1,63 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2018 Mellanox Technologies. */
+
+#ifndef MLX5_CORE_EQ_H
+#define MLX5_CORE_EQ_H
+
+#define MLX5_NUM_CMD_EQE (32)
+#define MLX5_NUM_ASYNC_EQE (0x1000)
+#define MLX5_NUM_SPARE_EQE (0x80)
+
+struct mlx5_eq;
+struct mlx5_irq;
+struct mlx5_core_dev;
+
+struct mlx5_eq_param {
+ int nent;
+ u64 mask[4];
+ struct mlx5_irq *irq;
+};
+
+struct mlx5_eq *
+mlx5_eq_create_generic(struct mlx5_core_dev *dev, struct mlx5_eq_param *param);
+int
+mlx5_eq_destroy_generic(struct mlx5_core_dev *dev, struct mlx5_eq *eq);
+int mlx5_eq_enable(struct mlx5_core_dev *dev, struct mlx5_eq *eq,
+ struct notifier_block *nb);
+void mlx5_eq_disable(struct mlx5_core_dev *dev, struct mlx5_eq *eq,
+ struct notifier_block *nb);
+
+struct mlx5_eqe *mlx5_eq_get_eqe(struct mlx5_eq *eq, u32 cc);
+void mlx5_eq_update_ci(struct mlx5_eq *eq, u32 cc, bool arm);
+
+/* The HCA will think the queue has overflowed if we
+ * don't tell it we've been processing events. We
+ * create EQs with MLX5_NUM_SPARE_EQE extra entries,
+ * so we must update our consumer index at
+ * least that often.
+ *
+ * mlx5_eq_update_cc must be called on every EQE @EQ irq handler
+ */
+static inline u32 mlx5_eq_update_cc(struct mlx5_eq *eq, u32 cc)
+{
+ if (unlikely(cc >= MLX5_NUM_SPARE_EQE)) {
+ mlx5_eq_update_ci(eq, cc, 0);
+ cc = 0;
+ }
+ return cc;
+}
+
+struct mlx5_nb {
+ struct notifier_block nb;
+ u8 event_type;
+};
+
+#define mlx5_nb_cof(ptr, type, member) \
+ (container_of(container_of(ptr, struct mlx5_nb, nb), type, member))
+
+#define MLX5_NB_INIT(name, handler, event) do { \
+ (name)->nb.notifier_call = handler; \
+ (name)->event_type = MLX5_EVENT_TYPE_##event; \
+} while (0)
+
+#endif /* MLX5_CORE_EQ_H */
diff --git a/include/linux/mlx5/eswitch.h b/include/linux/mlx5/eswitch.h
new file mode 100644
index 000000000000..e2701ed0200e
--- /dev/null
+++ b/include/linux/mlx5/eswitch.h
@@ -0,0 +1,210 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
+/*
+ * Copyright (c) 2018 Mellanox Technologies. All rights reserved.
+ */
+
+#ifndef _MLX5_ESWITCH_
+#define _MLX5_ESWITCH_
+
+#include <linux/mlx5/driver.h>
+#include <net/devlink.h>
+
+#define MLX5_ESWITCH_MANAGER(mdev) MLX5_CAP_GEN(mdev, eswitch_manager)
+
+enum {
+ MLX5_ESWITCH_LEGACY,
+ MLX5_ESWITCH_OFFLOADS
+};
+
+enum {
+ REP_ETH,
+ REP_IB,
+ NUM_REP_TYPES,
+};
+
+enum {
+ REP_UNREGISTERED,
+ REP_REGISTERED,
+ REP_LOADED,
+};
+
+enum mlx5_switchdev_event {
+ MLX5_SWITCHDEV_EVENT_PAIR,
+ MLX5_SWITCHDEV_EVENT_UNPAIR,
+};
+
+struct mlx5_eswitch_rep;
+struct mlx5_eswitch_rep_ops {
+ int (*load)(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep);
+ void (*unload)(struct mlx5_eswitch_rep *rep);
+ void *(*get_proto_dev)(struct mlx5_eswitch_rep *rep);
+ int (*event)(struct mlx5_eswitch *esw,
+ struct mlx5_eswitch_rep *rep,
+ enum mlx5_switchdev_event event,
+ void *data);
+};
+
+struct mlx5_eswitch_rep_data {
+ void *priv;
+ atomic_t state;
+};
+
+struct mlx5_eswitch_rep {
+ struct mlx5_eswitch_rep_data rep_data[NUM_REP_TYPES];
+ u16 vport;
+ u16 vlan;
+ /* Only IB rep is using vport_index */
+ u16 vport_index;
+ u32 vlan_refcount;
+ struct mlx5_eswitch *esw;
+};
+
+void mlx5_eswitch_register_vport_reps(struct mlx5_eswitch *esw,
+ const struct mlx5_eswitch_rep_ops *ops,
+ u8 rep_type);
+void mlx5_eswitch_unregister_vport_reps(struct mlx5_eswitch *esw, u8 rep_type);
+void *mlx5_eswitch_get_proto_dev(struct mlx5_eswitch *esw,
+ u16 vport_num,
+ u8 rep_type);
+struct mlx5_eswitch_rep *mlx5_eswitch_vport_rep(struct mlx5_eswitch *esw,
+ u16 vport_num);
+void *mlx5_eswitch_uplink_get_proto_dev(struct mlx5_eswitch *esw, u8 rep_type);
+struct mlx5_flow_handle *
+mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *on_esw,
+ struct mlx5_eswitch *from_esw,
+ struct mlx5_eswitch_rep *rep, u32 sqn);
+
+#ifdef CONFIG_MLX5_ESWITCH
+enum devlink_eswitch_encap_mode
+mlx5_eswitch_get_encap_mode(const struct mlx5_core_dev *dev);
+
+bool mlx5_eswitch_reg_c1_loopback_enabled(const struct mlx5_eswitch *esw);
+bool mlx5_eswitch_vport_match_metadata_enabled(const struct mlx5_eswitch *esw);
+
+/* Reg C0 usage:
+ * Reg C0 = < ESW_PFNUM_BITS(4) | ESW_VPORT BITS(12) | ESW_REG_C0_OBJ(16) >
+ *
+ * Highest 4 bits of the reg c0 is the PF_NUM (range 0-15), 12 bits of
+ * unique non-zero vport id (range 1-4095). The rest (lowest 16 bits) is left
+ * for user data objects managed by a common mapping context.
+ * PFNUM + VPORT comprise the SOURCE_PORT matching.
+ */
+#define ESW_VPORT_BITS 12
+#define ESW_PFNUM_BITS 4
+#define ESW_SOURCE_PORT_METADATA_BITS (ESW_PFNUM_BITS + ESW_VPORT_BITS)
+#define ESW_SOURCE_PORT_METADATA_OFFSET (32 - ESW_SOURCE_PORT_METADATA_BITS)
+#define ESW_REG_C0_USER_DATA_METADATA_BITS (32 - ESW_SOURCE_PORT_METADATA_BITS)
+#define ESW_REG_C0_USER_DATA_METADATA_MASK GENMASK(ESW_REG_C0_USER_DATA_METADATA_BITS - 1, 0)
+
+static inline u32 mlx5_eswitch_get_vport_metadata_mask(void)
+{
+ return GENMASK(31, 32 - ESW_SOURCE_PORT_METADATA_BITS);
+}
+
+u32 mlx5_eswitch_get_vport_metadata_for_match(struct mlx5_eswitch *esw,
+ u16 vport_num);
+u32 mlx5_eswitch_get_vport_metadata_for_set(struct mlx5_eswitch *esw,
+ u16 vport_num);
+
+/* Reg C1 usage:
+ * Reg C1 = < Reserved(1) | ESW_TUN_ID(12) | ESW_TUN_OPTS(11) | ESW_ZONE_ID(8) >
+ *
+ * Highest bit is reserved for other offloads as marker bit, next 12 bits of reg c1
+ * is the encapsulation tunnel id, next 11 bits is encapsulation tunnel options,
+ * and the lowest 8 bits are used for zone id.
+ *
+ * Zone id is used to restore CT flow when packet misses on chain.
+ *
+ * Tunnel id and options are used together to restore the tunnel info metadata
+ * on miss and to support inner header rewrite by means of implicit chain 0
+ * flows.
+ */
+#define ESW_RESERVED_BITS 1
+#define ESW_ZONE_ID_BITS 8
+#define ESW_TUN_OPTS_BITS 11
+#define ESW_TUN_ID_BITS 12
+#define ESW_TUN_OPTS_OFFSET ESW_ZONE_ID_BITS
+#define ESW_TUN_OFFSET ESW_TUN_OPTS_OFFSET
+#define ESW_ZONE_ID_MASK GENMASK(ESW_ZONE_ID_BITS - 1, 0)
+#define ESW_TUN_OPTS_MASK GENMASK(31 - ESW_TUN_ID_BITS - ESW_RESERVED_BITS, ESW_TUN_OPTS_OFFSET)
+#define ESW_TUN_MASK GENMASK(31 - ESW_RESERVED_BITS, ESW_TUN_OFFSET)
+#define ESW_TUN_ID_SLOW_TABLE_GOTO_VPORT 0 /* 0 is not a valid tunnel id */
+#define ESW_TUN_ID_BRIDGE_INGRESS_PUSH_VLAN ESW_TUN_ID_SLOW_TABLE_GOTO_VPORT
+/* 0x7FF is a reserved mapping */
+#define ESW_TUN_OPTS_SLOW_TABLE_GOTO_VPORT GENMASK(ESW_TUN_OPTS_BITS - 1, 0)
+#define ESW_TUN_SLOW_TABLE_GOTO_VPORT ((ESW_TUN_ID_SLOW_TABLE_GOTO_VPORT << ESW_TUN_OPTS_BITS) | \
+ ESW_TUN_OPTS_SLOW_TABLE_GOTO_VPORT)
+#define ESW_TUN_SLOW_TABLE_GOTO_VPORT_MARK ESW_TUN_OPTS_MASK
+/* 0x7FE is a reserved mapping for bridge ingress push vlan mark */
+#define ESW_TUN_OPTS_BRIDGE_INGRESS_PUSH_VLAN (ESW_TUN_OPTS_SLOW_TABLE_GOTO_VPORT - 1)
+#define ESW_TUN_BRIDGE_INGRESS_PUSH_VLAN ((ESW_TUN_ID_BRIDGE_INGRESS_PUSH_VLAN << \
+ ESW_TUN_OPTS_BITS) | \
+ ESW_TUN_OPTS_BRIDGE_INGRESS_PUSH_VLAN)
+#define ESW_TUN_BRIDGE_INGRESS_PUSH_VLAN_MARK \
+ GENMASK(31 - ESW_TUN_ID_BITS - ESW_RESERVED_BITS, \
+ ESW_TUN_OPTS_OFFSET + 1)
+
+u8 mlx5_eswitch_mode(const struct mlx5_core_dev *dev);
+u16 mlx5_eswitch_get_total_vports(const struct mlx5_core_dev *dev);
+struct mlx5_core_dev *mlx5_eswitch_get_core_dev(struct mlx5_eswitch *esw);
+
+#else /* CONFIG_MLX5_ESWITCH */
+
+static inline u8 mlx5_eswitch_mode(const struct mlx5_core_dev *dev)
+{
+ return MLX5_ESWITCH_LEGACY;
+}
+
+static inline enum devlink_eswitch_encap_mode
+mlx5_eswitch_get_encap_mode(const struct mlx5_core_dev *dev)
+{
+ return DEVLINK_ESWITCH_ENCAP_MODE_NONE;
+}
+
+static inline bool
+mlx5_eswitch_reg_c1_loopback_enabled(const struct mlx5_eswitch *esw)
+{
+ return false;
+};
+
+static inline bool
+mlx5_eswitch_vport_match_metadata_enabled(const struct mlx5_eswitch *esw)
+{
+ return false;
+};
+
+static inline u32
+mlx5_eswitch_get_vport_metadata_for_match(struct mlx5_eswitch *esw, u16 vport_num)
+{
+ return 0;
+};
+
+static inline u32
+mlx5_eswitch_get_vport_metadata_mask(void)
+{
+ return 0;
+}
+
+static inline u16 mlx5_eswitch_get_total_vports(const struct mlx5_core_dev *dev)
+{
+ return 0;
+}
+
+static inline struct mlx5_core_dev *mlx5_eswitch_get_core_dev(struct mlx5_eswitch *esw)
+{
+ return NULL;
+}
+
+#endif /* CONFIG_MLX5_ESWITCH */
+
+static inline bool is_mdev_legacy_mode(struct mlx5_core_dev *dev)
+{
+ return mlx5_eswitch_mode(dev) == MLX5_ESWITCH_LEGACY;
+}
+
+static inline bool is_mdev_switchdev_mode(struct mlx5_core_dev *dev)
+{
+ return mlx5_eswitch_mode(dev) == MLX5_ESWITCH_OFFLOADS;
+}
+
+#endif
diff --git a/include/linux/mlx5/fs.h b/include/linux/mlx5/fs.h
index b25e7baa273e..2cb404c7ea13 100644
--- a/include/linux/mlx5/fs.h
+++ b/include/linux/mlx5/fs.h
@@ -38,12 +38,35 @@
#define MLX5_FS_DEFAULT_FLOW_TAG 0x0
+#define MLX5_SET_CFG(p, f, v) MLX5_SET(create_flow_group_in, p, f, v)
+
+enum mlx5_flow_destination_type {
+ MLX5_FLOW_DESTINATION_TYPE_NONE,
+ MLX5_FLOW_DESTINATION_TYPE_VPORT,
+ MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE,
+ MLX5_FLOW_DESTINATION_TYPE_TIR,
+ MLX5_FLOW_DESTINATION_TYPE_FLOW_SAMPLER,
+ MLX5_FLOW_DESTINATION_TYPE_UPLINK,
+ MLX5_FLOW_DESTINATION_TYPE_PORT,
+ MLX5_FLOW_DESTINATION_TYPE_COUNTER,
+ MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE_NUM,
+ MLX5_FLOW_DESTINATION_TYPE_RANGE,
+ MLX5_FLOW_DESTINATION_TYPE_TABLE_TYPE,
+};
+
enum {
MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO = 1 << 16,
+ MLX5_FLOW_CONTEXT_ACTION_ENCRYPT = 1 << 17,
+ MLX5_FLOW_CONTEXT_ACTION_DECRYPT = 1 << 18,
+ MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_NS = 1 << 19,
};
enum {
- MLX5_FLOW_TABLE_TUNNEL_EN = BIT(0),
+ MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT = BIT(0),
+ MLX5_FLOW_TABLE_TUNNEL_EN_DECAP = BIT(1),
+ MLX5_FLOW_TABLE_TERMINATION = BIT(2),
+ MLX5_FLOW_TABLE_UNMANAGED = BIT(3),
+ MLX5_FLOW_TABLE_OTHER_VPORT = BIT(4),
};
#define LEFTOVERS_RULE_NUM 2
@@ -58,57 +81,127 @@ static inline void build_leftovers_ft_param(int *priority,
enum mlx5_flow_namespace_type {
MLX5_FLOW_NAMESPACE_BYPASS,
+ MLX5_FLOW_NAMESPACE_KERNEL_RX_MACSEC,
MLX5_FLOW_NAMESPACE_LAG,
MLX5_FLOW_NAMESPACE_OFFLOADS,
MLX5_FLOW_NAMESPACE_ETHTOOL,
MLX5_FLOW_NAMESPACE_KERNEL,
MLX5_FLOW_NAMESPACE_LEFTOVERS,
MLX5_FLOW_NAMESPACE_ANCHOR,
+ MLX5_FLOW_NAMESPACE_FDB_BYPASS,
MLX5_FLOW_NAMESPACE_FDB,
MLX5_FLOW_NAMESPACE_ESW_EGRESS,
MLX5_FLOW_NAMESPACE_ESW_INGRESS,
MLX5_FLOW_NAMESPACE_SNIFFER_RX,
MLX5_FLOW_NAMESPACE_SNIFFER_TX,
+ MLX5_FLOW_NAMESPACE_EGRESS,
+ MLX5_FLOW_NAMESPACE_EGRESS_IPSEC,
+ MLX5_FLOW_NAMESPACE_EGRESS_MACSEC,
+ MLX5_FLOW_NAMESPACE_RDMA_RX,
+ MLX5_FLOW_NAMESPACE_RDMA_RX_KERNEL,
+ MLX5_FLOW_NAMESPACE_RDMA_TX,
+ MLX5_FLOW_NAMESPACE_PORT_SEL,
+ MLX5_FLOW_NAMESPACE_RDMA_RX_COUNTERS,
+ MLX5_FLOW_NAMESPACE_RDMA_TX_COUNTERS,
+ MLX5_FLOW_NAMESPACE_RDMA_RX_IPSEC,
+ MLX5_FLOW_NAMESPACE_RDMA_TX_IPSEC,
+};
+
+enum {
+ FDB_BYPASS_PATH,
+ FDB_TC_OFFLOAD,
+ FDB_FT_OFFLOAD,
+ FDB_TC_MISS,
+ FDB_BR_OFFLOAD,
+ FDB_SLOW_PATH,
+ FDB_PER_VPORT,
};
+struct mlx5_pkt_reformat;
+struct mlx5_modify_hdr;
+struct mlx5_flow_definer;
struct mlx5_flow_table;
struct mlx5_flow_group;
struct mlx5_flow_namespace;
struct mlx5_flow_handle;
+enum {
+ FLOW_CONTEXT_HAS_TAG = BIT(0),
+};
+
+struct mlx5_flow_context {
+ u32 flags;
+ u32 flow_tag;
+ u32 flow_source;
+};
+
struct mlx5_flow_spec {
u8 match_criteria_enable;
u32 match_criteria[MLX5_ST_SZ_DW(fte_match_param)];
u32 match_value[MLX5_ST_SZ_DW(fte_match_param)];
+ struct mlx5_flow_context flow_context;
+};
+
+enum {
+ MLX5_FLOW_DEST_VPORT_VHCA_ID = BIT(0),
+ MLX5_FLOW_DEST_VPORT_REFORMAT_ID = BIT(1),
+};
+
+enum mlx5_flow_dest_range_field {
+ MLX5_FLOW_DEST_RANGE_FIELD_PKT_LEN = 0,
};
struct mlx5_flow_destination {
enum mlx5_flow_destination_type type;
union {
u32 tir_num;
+ u32 ft_num;
struct mlx5_flow_table *ft;
- u32 vport_num;
- struct mlx5_fc *counter;
+ u32 counter_id;
+ struct {
+ u16 num;
+ u16 vhca_id;
+ struct mlx5_pkt_reformat *pkt_reformat;
+ u8 flags;
+ } vport;
+ struct {
+ struct mlx5_flow_table *hit_ft;
+ struct mlx5_flow_table *miss_ft;
+ enum mlx5_flow_dest_range_field field;
+ u32 min;
+ u32 max;
+ } range;
+ u32 sampler_id;
};
};
+struct mod_hdr_tbl {
+ struct mutex lock; /* protects hlist */
+ DECLARE_HASHTABLE(hlist, 8);
+};
+
+struct mlx5_flow_namespace *
+mlx5_get_fdb_sub_ns(struct mlx5_core_dev *dev, int n);
struct mlx5_flow_namespace *
mlx5_get_flow_namespace(struct mlx5_core_dev *dev,
enum mlx5_flow_namespace_type type);
-
-struct mlx5_flow_table *
-mlx5_create_auto_grouped_flow_table(struct mlx5_flow_namespace *ns,
- int prio,
- int num_flow_table_entries,
- int max_num_groups,
- u32 level,
- u32 flags);
+struct mlx5_flow_namespace *
+mlx5_get_flow_vport_acl_namespace(struct mlx5_core_dev *dev,
+ enum mlx5_flow_namespace_type type,
+ int vport);
struct mlx5_flow_table_attr {
int prio;
int max_fte;
u32 level;
u32 flags;
+ u16 uid;
+ struct mlx5_flow_table *next_ft;
+
+ struct {
+ int max_num_groups;
+ int num_reserved_entries;
+ } autogroup;
};
struct mlx5_flow_table *
@@ -116,10 +209,12 @@ mlx5_create_flow_table(struct mlx5_flow_namespace *ns,
struct mlx5_flow_table_attr *ft_attr);
struct mlx5_flow_table *
+mlx5_create_auto_grouped_flow_table(struct mlx5_flow_namespace *ns,
+ struct mlx5_flow_table_attr *ft_attr);
+
+struct mlx5_flow_table *
mlx5_create_vport_flow_table(struct mlx5_flow_namespace *ns,
- int prio,
- int num_flow_table_entries,
- u32 level, u16 vport);
+ struct mlx5_flow_table_attr *ft_attr, u16 vport);
struct mlx5_flow_table *mlx5_create_lag_demux_flow_table(
struct mlx5_flow_namespace *ns,
int prio, u32 level);
@@ -135,38 +230,110 @@ struct mlx5_flow_group *
mlx5_create_flow_group(struct mlx5_flow_table *ft, u32 *in);
void mlx5_destroy_flow_group(struct mlx5_flow_group *fg);
+struct mlx5_exe_aso {
+ u32 object_id;
+ u8 type;
+ u8 return_reg_id;
+ union {
+ u32 ctrl_data;
+ struct {
+ u8 meter_idx;
+ u8 init_color;
+ } flow_meter;
+ };
+};
+
+struct mlx5_fs_vlan {
+ u16 ethtype;
+ u16 vid;
+ u8 prio;
+};
+
+#define MLX5_FS_VLAN_DEPTH 2
+
+enum {
+ FLOW_ACT_NO_APPEND = BIT(0),
+ FLOW_ACT_IGNORE_FLOW_LEVEL = BIT(1),
+};
+
struct mlx5_flow_act {
u32 action;
- u32 flow_tag;
- u32 encap_id;
- u32 modify_id;
+ struct mlx5_modify_hdr *modify_hdr;
+ struct mlx5_pkt_reformat *pkt_reformat;
+ struct mlx5_flow_act_crypto_params {
+ u8 type;
+ u32 obj_id;
+ } crypto;
+ u32 flags;
+ struct mlx5_fs_vlan vlan[MLX5_FS_VLAN_DEPTH];
+ struct ib_counters *counters;
+ struct mlx5_flow_group *fg;
+ struct mlx5_exe_aso exe_aso;
};
#define MLX5_DECLARE_FLOW_ACT(name) \
- struct mlx5_flow_act name = {MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,\
- MLX5_FS_DEFAULT_FLOW_TAG, 0, 0}
+ struct mlx5_flow_act name = { .action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,\
+ .flags = 0, }
/* Single destination per rule.
* Group ID is implied by the match criteria.
*/
struct mlx5_flow_handle *
mlx5_add_flow_rules(struct mlx5_flow_table *ft,
- struct mlx5_flow_spec *spec,
+ const struct mlx5_flow_spec *spec,
struct mlx5_flow_act *flow_act,
struct mlx5_flow_destination *dest,
- int dest_num);
+ int num_dest);
void mlx5_del_flow_rules(struct mlx5_flow_handle *fr);
int mlx5_modify_rule_destination(struct mlx5_flow_handle *handler,
struct mlx5_flow_destination *new_dest,
struct mlx5_flow_destination *old_dest);
-struct mlx5_fc *mlx5_flow_rule_counter(struct mlx5_flow_handle *handler);
struct mlx5_fc *mlx5_fc_create(struct mlx5_core_dev *dev, bool aging);
+
+/* As mlx5_fc_create() but doesn't queue stats refresh thread. */
+struct mlx5_fc *mlx5_fc_create_ex(struct mlx5_core_dev *dev, bool aging);
+
void mlx5_fc_destroy(struct mlx5_core_dev *dev, struct mlx5_fc *counter);
+u64 mlx5_fc_query_lastuse(struct mlx5_fc *counter);
void mlx5_fc_query_cached(struct mlx5_fc *counter,
u64 *bytes, u64 *packets, u64 *lastuse);
+void mlx5_fc_query_cached_raw(struct mlx5_fc *counter,
+ u64 *bytes, u64 *packets, u64 *lastuse);
+int mlx5_fc_query(struct mlx5_core_dev *dev, struct mlx5_fc *counter,
+ u64 *packets, u64 *bytes);
+u32 mlx5_fc_id(struct mlx5_fc *counter);
+
int mlx5_fs_add_rx_underlay_qpn(struct mlx5_core_dev *dev, u32 underlay_qpn);
int mlx5_fs_remove_rx_underlay_qpn(struct mlx5_core_dev *dev, u32 underlay_qpn);
+struct mlx5_modify_hdr *mlx5_modify_header_alloc(struct mlx5_core_dev *dev,
+ u8 ns_type, u8 num_actions,
+ void *modify_actions);
+void mlx5_modify_header_dealloc(struct mlx5_core_dev *dev,
+ struct mlx5_modify_hdr *modify_hdr);
+struct mlx5_flow_definer *
+mlx5_create_match_definer(struct mlx5_core_dev *dev,
+ enum mlx5_flow_namespace_type ns_type, u16 format_id,
+ u32 *match_mask);
+void mlx5_destroy_match_definer(struct mlx5_core_dev *dev,
+ struct mlx5_flow_definer *definer);
+int mlx5_get_match_definer_id(struct mlx5_flow_definer *definer);
+
+struct mlx5_pkt_reformat_params {
+ int type;
+ u8 param_0;
+ u8 param_1;
+ size_t size;
+ void *data;
+};
+
+struct mlx5_pkt_reformat *mlx5_packet_reformat_alloc(struct mlx5_core_dev *dev,
+ struct mlx5_pkt_reformat_params *params,
+ enum mlx5_flow_namespace_type ns_type);
+void mlx5_packet_reformat_dealloc(struct mlx5_core_dev *dev,
+ struct mlx5_pkt_reformat *reformat);
+
+u32 mlx5_flow_table_id(struct mlx5_flow_table *ft);
#endif
diff --git a/include/linux/mlx5/fs_helpers.h b/include/linux/mlx5/fs_helpers.h
new file mode 100644
index 000000000000..bc5125bc0561
--- /dev/null
+++ b/include/linux/mlx5/fs_helpers.h
@@ -0,0 +1,94 @@
+/*
+ * Copyright (c) 2018, Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef _MLX5_FS_HELPERS_
+#define _MLX5_FS_HELPERS_
+
+#include <linux/mlx5/mlx5_ifc.h>
+
+#define MLX5_FS_IPV4_VERSION 4
+#define MLX5_FS_IPV6_VERSION 6
+
+static inline bool _mlx5_fs_is_outer_ipv_flow(struct mlx5_core_dev *mdev,
+ const u32 *match_c,
+ const u32 *match_v, int version)
+{
+ int match_ipv = MLX5_CAP_FLOWTABLE_NIC_RX(mdev,
+ ft_field_support.outer_ip_version);
+ const void *headers_c = MLX5_ADDR_OF(fte_match_param, match_c,
+ outer_headers);
+ const void *headers_v = MLX5_ADDR_OF(fte_match_param, match_v,
+ outer_headers);
+
+ if (!match_ipv) {
+ u16 ethertype;
+
+ switch (version) {
+ case MLX5_FS_IPV4_VERSION:
+ ethertype = ETH_P_IP;
+ break;
+ case MLX5_FS_IPV6_VERSION:
+ ethertype = ETH_P_IPV6;
+ break;
+ default:
+ return false;
+ }
+
+ return MLX5_GET(fte_match_set_lyr_2_4, headers_c,
+ ethertype) == 0xffff &&
+ MLX5_GET(fte_match_set_lyr_2_4, headers_v,
+ ethertype) == ethertype;
+ }
+
+ return MLX5_GET(fte_match_set_lyr_2_4, headers_c,
+ ip_version) == 0xf &&
+ MLX5_GET(fte_match_set_lyr_2_4, headers_v,
+ ip_version) == version;
+}
+
+static inline bool
+mlx5_fs_is_outer_ipv4_flow(struct mlx5_core_dev *mdev, const u32 *match_c,
+ const u32 *match_v)
+{
+ return _mlx5_fs_is_outer_ipv_flow(mdev, match_c, match_v,
+ MLX5_FS_IPV4_VERSION);
+}
+
+static inline bool
+mlx5_fs_is_outer_ipv6_flow(struct mlx5_core_dev *mdev, const u32 *match_c,
+ const u32 *match_v)
+{
+ return _mlx5_fs_is_outer_ipv_flow(mdev, match_c, match_v,
+ MLX5_FS_IPV6_VERSION);
+}
+
+#endif
diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h
index 87869c04849a..dc5e2cb302a5 100644
--- a/include/linux/mlx5/mlx5_ifc.h
+++ b/include/linux/mlx5/mlx5_ifc.h
@@ -60,18 +60,56 @@ enum {
MLX5_EVENT_TYPE_CODING_COMMAND_INTERFACE_COMPLETION = 0xa,
MLX5_EVENT_TYPE_CODING_PAGE_REQUEST = 0xb,
MLX5_EVENT_TYPE_CODING_FPGA_ERROR = 0x20,
+ MLX5_EVENT_TYPE_CODING_FPGA_QP_ERROR = 0x21
};
enum {
- MLX5_MODIFY_TIR_BITMASK_LRO = 0x0,
- MLX5_MODIFY_TIR_BITMASK_INDIRECT_TABLE = 0x1,
- MLX5_MODIFY_TIR_BITMASK_HASH = 0x2,
- MLX5_MODIFY_TIR_BITMASK_TUNNELED_OFFLOAD_EN = 0x3
+ MLX5_SET_HCA_CAP_OP_MOD_GENERAL_DEVICE = 0x0,
+ MLX5_SET_HCA_CAP_OP_MOD_ODP = 0x2,
+ MLX5_SET_HCA_CAP_OP_MOD_ATOMIC = 0x3,
+ MLX5_SET_HCA_CAP_OP_MOD_ROCE = 0x4,
+ MLX5_SET_HCA_CAP_OP_MOD_GENERAL_DEVICE2 = 0x20,
+ MLX5_SET_HCA_CAP_OP_MOD_PORT_SELECTION = 0x25,
};
enum {
- MLX5_SET_HCA_CAP_OP_MOD_GENERAL_DEVICE = 0x0,
- MLX5_SET_HCA_CAP_OP_MOD_ATOMIC = 0x3,
+ MLX5_SHARED_RESOURCE_UID = 0xffff,
+};
+
+enum {
+ MLX5_OBJ_TYPE_SW_ICM = 0x0008,
+ MLX5_OBJ_TYPE_HEADER_MODIFY_ARGUMENT = 0x23,
+};
+
+enum {
+ MLX5_GENERAL_OBJ_TYPES_CAP_SW_ICM = (1ULL << MLX5_OBJ_TYPE_SW_ICM),
+ MLX5_GENERAL_OBJ_TYPES_CAP_GENEVE_TLV_OPT = (1ULL << 11),
+ MLX5_GENERAL_OBJ_TYPES_CAP_VIRTIO_NET_Q = (1ULL << 13),
+ MLX5_GENERAL_OBJ_TYPES_CAP_HEADER_MODIFY_ARGUMENT =
+ (1ULL << MLX5_OBJ_TYPE_HEADER_MODIFY_ARGUMENT),
+ MLX5_GENERAL_OBJ_TYPES_CAP_MACSEC_OFFLOAD = (1ULL << 39),
+};
+
+enum {
+ MLX5_OBJ_TYPE_GENEVE_TLV_OPT = 0x000b,
+ MLX5_OBJ_TYPE_VIRTIO_NET_Q = 0x000d,
+ MLX5_OBJ_TYPE_VIRTIO_Q_COUNTERS = 0x001c,
+ MLX5_OBJ_TYPE_MATCH_DEFINER = 0x0018,
+ MLX5_OBJ_TYPE_PAGE_TRACK = 0x46,
+ MLX5_OBJ_TYPE_MKEY = 0xff01,
+ MLX5_OBJ_TYPE_QP = 0xff02,
+ MLX5_OBJ_TYPE_PSV = 0xff03,
+ MLX5_OBJ_TYPE_RMP = 0xff04,
+ MLX5_OBJ_TYPE_XRC_SRQ = 0xff05,
+ MLX5_OBJ_TYPE_RQ = 0xff06,
+ MLX5_OBJ_TYPE_SQ = 0xff07,
+ MLX5_OBJ_TYPE_TIR = 0xff08,
+ MLX5_OBJ_TYPE_TIS = 0xff09,
+ MLX5_OBJ_TYPE_DCT = 0xff0a,
+ MLX5_OBJ_TYPE_XRQ = 0xff0b,
+ MLX5_OBJ_TYPE_RQT = 0xff0e,
+ MLX5_OBJ_TYPE_FLOW_COUNTER = 0xff0f,
+ MLX5_OBJ_TYPE_CQ = 0xff10,
};
enum {
@@ -87,11 +125,22 @@ enum {
MLX5_CMD_OP_QUERY_ISSI = 0x10a,
MLX5_CMD_OP_SET_ISSI = 0x10b,
MLX5_CMD_OP_SET_DRIVER_VERSION = 0x10d,
+ MLX5_CMD_OP_QUERY_SF_PARTITION = 0x111,
+ MLX5_CMD_OP_ALLOC_SF = 0x113,
+ MLX5_CMD_OP_DEALLOC_SF = 0x114,
+ MLX5_CMD_OP_SUSPEND_VHCA = 0x115,
+ MLX5_CMD_OP_RESUME_VHCA = 0x116,
+ MLX5_CMD_OP_QUERY_VHCA_MIGRATION_STATE = 0x117,
+ MLX5_CMD_OP_SAVE_VHCA_STATE = 0x118,
+ MLX5_CMD_OP_LOAD_VHCA_STATE = 0x119,
MLX5_CMD_OP_CREATE_MKEY = 0x200,
MLX5_CMD_OP_QUERY_MKEY = 0x201,
MLX5_CMD_OP_DESTROY_MKEY = 0x202,
MLX5_CMD_OP_QUERY_SPECIAL_CONTEXTS = 0x203,
MLX5_CMD_OP_PAGE_FAULT_RESUME = 0x204,
+ MLX5_CMD_OP_ALLOC_MEMIC = 0x205,
+ MLX5_CMD_OP_DEALLOC_MEMIC = 0x206,
+ MLX5_CMD_OP_MODIFY_MEMIC = 0x207,
MLX5_CMD_OP_CREATE_EQ = 0x301,
MLX5_CMD_OP_DESTROY_EQ = 0x302,
MLX5_CMD_OP_QUERY_EQ = 0x303,
@@ -131,6 +180,12 @@ enum {
MLX5_CMD_OP_DESTROY_XRQ = 0x718,
MLX5_CMD_OP_QUERY_XRQ = 0x719,
MLX5_CMD_OP_ARM_XRQ = 0x71a,
+ MLX5_CMD_OP_QUERY_XRQ_DC_PARAMS_ENTRY = 0x725,
+ MLX5_CMD_OP_SET_XRQ_DC_PARAMS_ENTRY = 0x726,
+ MLX5_CMD_OP_QUERY_XRQ_ERROR_PARAMS = 0x727,
+ MLX5_CMD_OP_RELEASE_XRQ_ERROR = 0x729,
+ MLX5_CMD_OP_MODIFY_XRQ = 0x72a,
+ MLX5_CMD_OP_QUERY_ESW_FUNCTIONS = 0x740,
MLX5_CMD_OP_QUERY_VPORT_STATE = 0x750,
MLX5_CMD_OP_MODIFY_VPORT_STATE = 0x751,
MLX5_CMD_OP_QUERY_ESW_VPORT_CONTEXT = 0x752,
@@ -143,11 +198,14 @@ enum {
MLX5_CMD_OP_MODIFY_HCA_VPORT_CONTEXT = 0x763,
MLX5_CMD_OP_QUERY_HCA_VPORT_GID = 0x764,
MLX5_CMD_OP_QUERY_HCA_VPORT_PKEY = 0x765,
+ MLX5_CMD_OP_QUERY_VNIC_ENV = 0x76f,
MLX5_CMD_OP_QUERY_VPORT_COUNTER = 0x770,
MLX5_CMD_OP_ALLOC_Q_COUNTER = 0x771,
MLX5_CMD_OP_DEALLOC_Q_COUNTER = 0x772,
MLX5_CMD_OP_QUERY_Q_COUNTER = 0x773,
- MLX5_CMD_OP_SET_RATE_LIMIT = 0x780,
+ MLX5_CMD_OP_SET_MONITOR_COUNTER = 0x774,
+ MLX5_CMD_OP_ARM_MONITOR_COUNTER = 0x775,
+ MLX5_CMD_OP_SET_PP_RATE_LIMIT = 0x780,
MLX5_CMD_OP_QUERY_RATE_LIMIT = 0x781,
MLX5_CMD_OP_CREATE_SCHEDULING_ELEMENT = 0x782,
MLX5_CMD_OP_DESTROY_SCHEDULING_ELEMENT = 0x783,
@@ -200,6 +258,7 @@ enum {
MLX5_CMD_OP_QUERY_SQ = 0x907,
MLX5_CMD_OP_CREATE_RQ = 0x908,
MLX5_CMD_OP_MODIFY_RQ = 0x909,
+ MLX5_CMD_OP_SET_DELAY_DROP_PARAMS = 0x910,
MLX5_CMD_OP_DESTROY_RQ = 0x90a,
MLX5_CMD_OP_QUERY_RQ = 0x90b,
MLX5_CMD_OP_CREATE_RMP = 0x90c,
@@ -228,18 +287,47 @@ enum {
MLX5_CMD_OP_DEALLOC_FLOW_COUNTER = 0x93a,
MLX5_CMD_OP_QUERY_FLOW_COUNTER = 0x93b,
MLX5_CMD_OP_MODIFY_FLOW_TABLE = 0x93c,
- MLX5_CMD_OP_ALLOC_ENCAP_HEADER = 0x93d,
- MLX5_CMD_OP_DEALLOC_ENCAP_HEADER = 0x93e,
+ MLX5_CMD_OP_ALLOC_PACKET_REFORMAT_CONTEXT = 0x93d,
+ MLX5_CMD_OP_DEALLOC_PACKET_REFORMAT_CONTEXT = 0x93e,
+ MLX5_CMD_OP_QUERY_PACKET_REFORMAT_CONTEXT = 0x93f,
MLX5_CMD_OP_ALLOC_MODIFY_HEADER_CONTEXT = 0x940,
MLX5_CMD_OP_DEALLOC_MODIFY_HEADER_CONTEXT = 0x941,
+ MLX5_CMD_OP_QUERY_MODIFY_HEADER_CONTEXT = 0x942,
MLX5_CMD_OP_FPGA_CREATE_QP = 0x960,
MLX5_CMD_OP_FPGA_MODIFY_QP = 0x961,
MLX5_CMD_OP_FPGA_QUERY_QP = 0x962,
MLX5_CMD_OP_FPGA_DESTROY_QP = 0x963,
MLX5_CMD_OP_FPGA_QUERY_QP_COUNTERS = 0x964,
+ MLX5_CMD_OP_CREATE_GENERAL_OBJECT = 0xa00,
+ MLX5_CMD_OP_MODIFY_GENERAL_OBJECT = 0xa01,
+ MLX5_CMD_OP_QUERY_GENERAL_OBJECT = 0xa02,
+ MLX5_CMD_OP_DESTROY_GENERAL_OBJECT = 0xa03,
+ MLX5_CMD_OP_CREATE_UCTX = 0xa04,
+ MLX5_CMD_OP_DESTROY_UCTX = 0xa06,
+ MLX5_CMD_OP_CREATE_UMEM = 0xa08,
+ MLX5_CMD_OP_DESTROY_UMEM = 0xa0a,
+ MLX5_CMD_OP_SYNC_STEERING = 0xb00,
+ MLX5_CMD_OP_QUERY_VHCA_STATE = 0xb0d,
+ MLX5_CMD_OP_MODIFY_VHCA_STATE = 0xb0e,
+ MLX5_CMD_OP_SYNC_CRYPTO = 0xb12,
MLX5_CMD_OP_MAX
};
+/* Valid range for general commands that don't work over an object */
+enum {
+ MLX5_CMD_OP_GENERAL_START = 0xb00,
+ MLX5_CMD_OP_GENERAL_END = 0xd00,
+};
+
+enum {
+ MLX5_FT_NIC_RX_2_NIC_RX_RDMA = BIT(0),
+ MLX5_FT_NIC_TX_RDMA_2_NIC_TX = BIT(1),
+};
+
+enum {
+ MLX5_CMD_OP_MOD_UPDATE_HEADER_MODIFY_ARGUMENT = 0x1,
+};
+
struct mlx5_ifc_flow_table_fields_supported_bits {
u8 outer_dmac[0x1];
u8 outer_smac[0x1];
@@ -267,7 +355,11 @@ struct mlx5_ifc_flow_table_fields_supported_bits {
u8 outer_gre_protocol[0x1];
u8 outer_gre_key[0x1];
u8 outer_vxlan_vni[0x1];
- u8 reserved_at_1a[0x5];
+ u8 outer_geneve_vni[0x1];
+ u8 outer_geneve_oam[0x1];
+ u8 outer_geneve_protocol_type[0x1];
+ u8 outer_geneve_opt_len[0x1];
+ u8 source_vhca_port[0x1];
u8 source_eswitch_port[0x1];
u8 inner_dmac[0x1];
@@ -295,7 +387,39 @@ struct mlx5_ifc_flow_table_fields_supported_bits {
u8 inner_tcp_flags[0x1];
u8 reserved_at_37[0x9];
- u8 reserved_at_40[0x40];
+ u8 geneve_tlv_option_0_data[0x1];
+ u8 geneve_tlv_option_0_exist[0x1];
+ u8 reserved_at_42[0x3];
+ u8 outer_first_mpls_over_udp[0x4];
+ u8 outer_first_mpls_over_gre[0x4];
+ u8 inner_first_mpls[0x4];
+ u8 outer_first_mpls[0x4];
+ u8 reserved_at_55[0x2];
+ u8 outer_esp_spi[0x1];
+ u8 reserved_at_58[0x2];
+ u8 bth_dst_qp[0x1];
+ u8 reserved_at_5b[0x5];
+
+ u8 reserved_at_60[0x18];
+ u8 metadata_reg_c_7[0x1];
+ u8 metadata_reg_c_6[0x1];
+ u8 metadata_reg_c_5[0x1];
+ u8 metadata_reg_c_4[0x1];
+ u8 metadata_reg_c_3[0x1];
+ u8 metadata_reg_c_2[0x1];
+ u8 metadata_reg_c_1[0x1];
+ u8 metadata_reg_c_0[0x1];
+};
+
+/* Table 2170 - Flow Table Fields Supported 2 Format */
+struct mlx5_ifc_flow_table_fields_supported_2_bits {
+ u8 reserved_at_0[0xe];
+ u8 bth_opcode[0x1];
+ u8 reserved_at_f[0x1];
+ u8 tunnel_header_0_1[0x1];
+ u8 reserved_at_11[0xf];
+
+ u8 reserved_at_20[0x60];
};
struct mlx5_ifc_flow_table_prop_layout_bits {
@@ -306,22 +430,60 @@ struct mlx5_ifc_flow_table_prop_layout_bits {
u8 modify_root[0x1];
u8 identified_miss_table_mode[0x1];
u8 flow_table_modify[0x1];
- u8 encap[0x1];
+ u8 reformat[0x1];
u8 decap[0x1];
- u8 reserved_at_9[0x17];
+ u8 reserved_at_9[0x1];
+ u8 pop_vlan[0x1];
+ u8 push_vlan[0x1];
+ u8 reserved_at_c[0x1];
+ u8 pop_vlan_2[0x1];
+ u8 push_vlan_2[0x1];
+ u8 reformat_and_vlan_action[0x1];
+ u8 reserved_at_10[0x1];
+ u8 sw_owner[0x1];
+ u8 reformat_l3_tunnel_to_l2[0x1];
+ u8 reformat_l2_to_l3_tunnel[0x1];
+ u8 reformat_and_modify_action[0x1];
+ u8 ignore_flow_level[0x1];
+ u8 reserved_at_16[0x1];
+ u8 table_miss_action_domain[0x1];
+ u8 termination_table[0x1];
+ u8 reformat_and_fwd_to_table[0x1];
+ u8 reserved_at_1a[0x2];
+ u8 ipsec_encrypt[0x1];
+ u8 ipsec_decrypt[0x1];
+ u8 sw_owner_v2[0x1];
+ u8 reserved_at_1f[0x1];
- u8 reserved_at_20[0x2];
+ u8 termination_table_raw_traffic[0x1];
+ u8 reserved_at_21[0x1];
u8 log_max_ft_size[0x6];
u8 log_max_modify_header_context[0x8];
u8 max_modify_header_actions[0x8];
u8 max_ft_level[0x8];
- u8 reserved_at_40[0x20];
+ u8 reformat_add_esp_trasport[0x1];
+ u8 reformat_l2_to_l3_esp_tunnel[0x1];
+ u8 reserved_at_42[0x1];
+ u8 reformat_del_esp_trasport[0x1];
+ u8 reformat_l3_esp_tunnel_to_l2[0x1];
+ u8 reserved_at_45[0x1];
+ u8 execute_aso[0x1];
+ u8 reserved_at_47[0x19];
- u8 reserved_at_60[0x18];
+ u8 reserved_at_60[0x2];
+ u8 reformat_insert[0x1];
+ u8 reformat_remove[0x1];
+ u8 macsec_encrypt[0x1];
+ u8 macsec_decrypt[0x1];
+ u8 reserved_at_66[0x2];
+ u8 reformat_add_macsec[0x1];
+ u8 reformat_remove_macsec[0x1];
+ u8 reserved_at_6a[0xe];
u8 log_max_ft_num[0x8];
- u8 reserved_at_80[0x18];
+ u8 reserved_at_80[0x10];
+ u8 log_max_flow_counter[0x8];
u8 log_max_destination[0x8];
u8 reserved_at_a0[0x18];
@@ -385,7 +547,10 @@ struct mlx5_ifc_fte_match_set_lyr_2_4_bits {
u8 tcp_sport[0x10];
u8 tcp_dport[0x10];
- u8 reserved_at_c0[0x18];
+ u8 reserved_at_c0[0x10];
+ u8 ipv4_ihl[0x4];
+ u8 reserved_at_c4[0x4];
+
u8 ttl_hoplimit[0x8];
u8 udp_sport[0x10];
@@ -396,11 +561,25 @@ struct mlx5_ifc_fte_match_set_lyr_2_4_bits {
union mlx5_ifc_ipv6_layout_ipv4_layout_auto_bits dst_ipv4_dst_ipv6;
};
+struct mlx5_ifc_nvgre_key_bits {
+ u8 hi[0x18];
+ u8 lo[0x8];
+};
+
+union mlx5_ifc_gre_key_bits {
+ struct mlx5_ifc_nvgre_key_bits nvgre;
+ u8 key[0x20];
+};
+
struct mlx5_ifc_fte_match_set_misc_bits {
- u8 reserved_at_0[0x8];
+ u8 gre_c_present[0x1];
+ u8 reserved_at_1[0x1];
+ u8 gre_k_present[0x1];
+ u8 gre_s_present[0x1];
+ u8 source_vhca_port[0x4];
u8 source_sqn[0x18];
- u8 reserved_at_20[0x10];
+ u8 source_eswitch_owner_vhca_id[0x10];
u8 source_port[0x10];
u8 outer_second_prio[0x3];
@@ -417,13 +596,15 @@ struct mlx5_ifc_fte_match_set_misc_bits {
u8 reserved_at_64[0xc];
u8 gre_protocol[0x10];
- u8 gre_key_h[0x18];
- u8 gre_key_l[0x8];
+ union mlx5_ifc_gre_key_bits gre_key;
u8 vxlan_vni[0x18];
- u8 reserved_at_b8[0x8];
+ u8 bth_opcode[0x8];
- u8 reserved_at_c0[0x20];
+ u8 geneve_vni[0x18];
+ u8 reserved_at_d8[0x6];
+ u8 geneve_tlv_option_0_exist[0x1];
+ u8 geneve_oam[0x1];
u8 reserved_at_e0[0xc];
u8 outer_ipv6_flow_label[0x14];
@@ -431,7 +612,140 @@ struct mlx5_ifc_fte_match_set_misc_bits {
u8 reserved_at_100[0xc];
u8 inner_ipv6_flow_label[0x14];
- u8 reserved_at_120[0xe0];
+ u8 reserved_at_120[0xa];
+ u8 geneve_opt_len[0x6];
+ u8 geneve_protocol_type[0x10];
+
+ u8 reserved_at_140[0x8];
+ u8 bth_dst_qp[0x18];
+ u8 reserved_at_160[0x20];
+ u8 outer_esp_spi[0x20];
+ u8 reserved_at_1a0[0x60];
+};
+
+struct mlx5_ifc_fte_match_mpls_bits {
+ u8 mpls_label[0x14];
+ u8 mpls_exp[0x3];
+ u8 mpls_s_bos[0x1];
+ u8 mpls_ttl[0x8];
+};
+
+struct mlx5_ifc_fte_match_set_misc2_bits {
+ struct mlx5_ifc_fte_match_mpls_bits outer_first_mpls;
+
+ struct mlx5_ifc_fte_match_mpls_bits inner_first_mpls;
+
+ struct mlx5_ifc_fte_match_mpls_bits outer_first_mpls_over_gre;
+
+ struct mlx5_ifc_fte_match_mpls_bits outer_first_mpls_over_udp;
+
+ u8 metadata_reg_c_7[0x20];
+
+ u8 metadata_reg_c_6[0x20];
+
+ u8 metadata_reg_c_5[0x20];
+
+ u8 metadata_reg_c_4[0x20];
+
+ u8 metadata_reg_c_3[0x20];
+
+ u8 metadata_reg_c_2[0x20];
+
+ u8 metadata_reg_c_1[0x20];
+
+ u8 metadata_reg_c_0[0x20];
+
+ u8 metadata_reg_a[0x20];
+
+ u8 reserved_at_1a0[0x8];
+
+ u8 macsec_syndrome[0x8];
+ u8 ipsec_syndrome[0x8];
+ u8 reserved_at_1b8[0x8];
+
+ u8 reserved_at_1c0[0x40];
+};
+
+struct mlx5_ifc_fte_match_set_misc3_bits {
+ u8 inner_tcp_seq_num[0x20];
+
+ u8 outer_tcp_seq_num[0x20];
+
+ u8 inner_tcp_ack_num[0x20];
+
+ u8 outer_tcp_ack_num[0x20];
+
+ u8 reserved_at_80[0x8];
+ u8 outer_vxlan_gpe_vni[0x18];
+
+ u8 outer_vxlan_gpe_next_protocol[0x8];
+ u8 outer_vxlan_gpe_flags[0x8];
+ u8 reserved_at_b0[0x10];
+
+ u8 icmp_header_data[0x20];
+
+ u8 icmpv6_header_data[0x20];
+
+ u8 icmp_type[0x8];
+ u8 icmp_code[0x8];
+ u8 icmpv6_type[0x8];
+ u8 icmpv6_code[0x8];
+
+ u8 geneve_tlv_option_0_data[0x20];
+
+ u8 gtpu_teid[0x20];
+
+ u8 gtpu_msg_type[0x8];
+ u8 gtpu_msg_flags[0x8];
+ u8 reserved_at_170[0x10];
+
+ u8 gtpu_dw_2[0x20];
+
+ u8 gtpu_first_ext_dw_0[0x20];
+
+ u8 gtpu_dw_0[0x20];
+
+ u8 reserved_at_1e0[0x20];
+};
+
+struct mlx5_ifc_fte_match_set_misc4_bits {
+ u8 prog_sample_field_value_0[0x20];
+
+ u8 prog_sample_field_id_0[0x20];
+
+ u8 prog_sample_field_value_1[0x20];
+
+ u8 prog_sample_field_id_1[0x20];
+
+ u8 prog_sample_field_value_2[0x20];
+
+ u8 prog_sample_field_id_2[0x20];
+
+ u8 prog_sample_field_value_3[0x20];
+
+ u8 prog_sample_field_id_3[0x20];
+
+ u8 reserved_at_100[0x100];
+};
+
+struct mlx5_ifc_fte_match_set_misc5_bits {
+ u8 macsec_tag_0[0x20];
+
+ u8 macsec_tag_1[0x20];
+
+ u8 macsec_tag_2[0x20];
+
+ u8 macsec_tag_3[0x20];
+
+ u8 tunnel_header_0[0x20];
+
+ u8 tunnel_header_1[0x20];
+
+ u8 tunnel_header_2[0x20];
+
+ u8 tunnel_header_3[0x20];
+
+ u8 reserved_at_100[0x100];
};
struct mlx5_ifc_cmd_pas_bits {
@@ -496,7 +810,7 @@ struct mlx5_ifc_ads_bits {
u8 dei_cfi[0x1];
u8 eth_prio[0x3];
u8 sl[0x4];
- u8 port[0x8];
+ u8 vhca_port_num[0x8];
u8 rmac_47_32[0x10];
u8 rmac_31_0[0x20];
@@ -506,25 +820,91 @@ struct mlx5_ifc_flow_table_nic_cap_bits {
u8 nic_rx_multi_path_tirs[0x1];
u8 nic_rx_multi_path_tirs_fts[0x1];
u8 allow_sniffer_and_nic_rx_shared_tir[0x1];
- u8 reserved_at_3[0x1fd];
+ u8 reserved_at_3[0x4];
+ u8 sw_owner_reformat_supported[0x1];
+ u8 reserved_at_8[0x18];
+
+ u8 encap_general_header[0x1];
+ u8 reserved_at_21[0xa];
+ u8 log_max_packet_reformat_context[0x5];
+ u8 reserved_at_30[0x6];
+ u8 max_encap_header_size[0xa];
+ u8 reserved_at_40[0x1c0];
struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_nic_receive;
- u8 reserved_at_400[0x200];
+ struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_nic_receive_rdma;
struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_nic_receive_sniffer;
struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_nic_transmit;
- u8 reserved_at_a00[0x200];
+ struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_nic_transmit_rdma;
struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_nic_transmit_sniffer;
- u8 reserved_at_e00[0x7200];
+ u8 reserved_at_e00[0x700];
+
+ struct mlx5_ifc_flow_table_fields_supported_2_bits ft_field_support_2_nic_receive_rdma;
+
+ u8 reserved_at_1580[0x280];
+
+ struct mlx5_ifc_flow_table_fields_supported_2_bits ft_field_support_2_nic_transmit_rdma;
+
+ u8 reserved_at_1880[0x780];
+
+ u8 sw_steering_nic_rx_action_drop_icm_address[0x40];
+
+ u8 sw_steering_nic_tx_action_drop_icm_address[0x40];
+
+ u8 sw_steering_nic_tx_action_allow_icm_address[0x40];
+
+ u8 reserved_at_20c0[0x5f40];
+};
+
+struct mlx5_ifc_port_selection_cap_bits {
+ u8 reserved_at_0[0x10];
+ u8 port_select_flow_table[0x1];
+ u8 reserved_at_11[0x1];
+ u8 port_select_flow_table_bypass[0x1];
+ u8 reserved_at_13[0xd];
+
+ u8 reserved_at_20[0x1e0];
+
+ struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_port_selection;
+
+ u8 reserved_at_400[0x7c00];
+};
+
+enum {
+ MLX5_FDB_TO_VPORT_REG_C_0 = 0x01,
+ MLX5_FDB_TO_VPORT_REG_C_1 = 0x02,
+ MLX5_FDB_TO_VPORT_REG_C_2 = 0x04,
+ MLX5_FDB_TO_VPORT_REG_C_3 = 0x08,
+ MLX5_FDB_TO_VPORT_REG_C_4 = 0x10,
+ MLX5_FDB_TO_VPORT_REG_C_5 = 0x20,
+ MLX5_FDB_TO_VPORT_REG_C_6 = 0x40,
+ MLX5_FDB_TO_VPORT_REG_C_7 = 0x80,
};
struct mlx5_ifc_flow_table_eswitch_cap_bits {
- u8 reserved_at_0[0x200];
+ u8 fdb_to_vport_reg_c_id[0x8];
+ u8 reserved_at_8[0x5];
+ u8 fdb_uplink_hairpin[0x1];
+ u8 fdb_multi_path_any_table_limit_regc[0x1];
+ u8 reserved_at_f[0x3];
+ u8 fdb_multi_path_any_table[0x1];
+ u8 reserved_at_13[0x2];
+ u8 fdb_modify_header_fwd_to_table[0x1];
+ u8 fdb_ipv4_ttl_modify[0x1];
+ u8 flow_source[0x1];
+ u8 reserved_at_18[0x2];
+ u8 multi_fdb_encap[0x1];
+ u8 egress_acl_forward_to_vport[0x1];
+ u8 fdb_multi_path_to_table[0x1];
+ u8 reserved_at_1d[0x3];
+
+ u8 reserved_at_20[0x1e0];
struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_nic_esw_fdb;
@@ -532,7 +912,28 @@ struct mlx5_ifc_flow_table_eswitch_cap_bits {
struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_esw_acl_egress;
- u8 reserved_at_800[0x7800];
+ u8 reserved_at_800[0xC00];
+
+ struct mlx5_ifc_flow_table_fields_supported_2_bits ft_field_support_2_esw_fdb;
+
+ struct mlx5_ifc_flow_table_fields_supported_2_bits ft_field_bitmask_support_2_esw_fdb;
+
+ u8 reserved_at_1500[0x300];
+
+ u8 sw_steering_fdb_action_drop_icm_address_rx[0x40];
+
+ u8 sw_steering_fdb_action_drop_icm_address_tx[0x40];
+
+ u8 sw_steering_uplink_icm_address_rx[0x40];
+
+ u8 sw_steering_uplink_icm_address_tx[0x40];
+
+ u8 reserved_at_1900[0x6700];
+};
+
+enum {
+ MLX5_COUNTER_SOURCE_ESWITCH = 0x0,
+ MLX5_COUNTER_FLOW_ESWITCH = 0x1,
};
struct mlx5_ifc_e_switch_cap_bits {
@@ -541,18 +942,34 @@ struct mlx5_ifc_e_switch_cap_bits {
u8 vport_svlan_insert[0x1];
u8 vport_cvlan_insert_if_not_exist[0x1];
u8 vport_cvlan_insert_overwrite[0x1];
- u8 reserved_at_5[0x19];
+ u8 reserved_at_5[0x1];
+ u8 vport_cvlan_insert_always[0x1];
+ u8 esw_shared_ingress_acl[0x1];
+ u8 esw_uplink_ingress_acl[0x1];
+ u8 root_ft_on_other_esw[0x1];
+ u8 reserved_at_a[0xf];
+ u8 esw_functions_changed[0x1];
+ u8 reserved_at_1a[0x1];
+ u8 ecpf_vport_exists[0x1];
+ u8 counter_eswitch_affinity[0x1];
+ u8 merged_eswitch[0x1];
u8 nic_vport_node_guid_modify[0x1];
u8 nic_vport_port_guid_modify[0x1];
u8 vxlan_encap_decap[0x1];
u8 nvgre_encap_decap[0x1];
- u8 reserved_at_22[0x9];
- u8 log_max_encap_headers[0x5];
+ u8 reserved_at_22[0x1];
+ u8 log_max_fdb_encap_uplink[0x5];
+ u8 reserved_at_21[0x3];
+ u8 log_max_packet_reformat_context[0x5];
u8 reserved_2b[0x6];
u8 max_encap_header_size[0xa];
- u8 reserved_40[0x7c0];
+ u8 reserved_at_40[0xb];
+ u8 log_max_esw_sf[0x5];
+ u8 esw_sf_base_id[0x10];
+
+ u8 reserved_at_60[0x7a0];
};
@@ -561,9 +978,20 @@ struct mlx5_ifc_qos_cap_bits {
u8 esw_scheduling[0x1];
u8 esw_bw_share[0x1];
u8 esw_rate_limit[0x1];
- u8 reserved_at_4[0x1c];
-
- u8 reserved_at_20[0x20];
+ u8 reserved_at_4[0x1];
+ u8 packet_pacing_burst_bound[0x1];
+ u8 packet_pacing_typical_size[0x1];
+ u8 reserved_at_7[0x1];
+ u8 nic_sq_scheduling[0x1];
+ u8 nic_bw_share[0x1];
+ u8 nic_rate_limit[0x1];
+ u8 packet_pacing_uid[0x1];
+ u8 log_esw_max_sched_depth[0x4];
+ u8 reserved_at_10[0x10];
+
+ u8 reserved_at_20[0xb];
+ u8 log_max_qos_nic_queue_group[0x5];
+ u8 reserved_at_30[0x10];
u8 packet_pacing_max_rate[0x20];
@@ -580,7 +1008,31 @@ struct mlx5_ifc_qos_cap_bits {
u8 max_tsar_bw_share[0x20];
- u8 reserved_at_100[0x700];
+ u8 reserved_at_100[0x20];
+
+ u8 reserved_at_120[0x3];
+ u8 log_meter_aso_granularity[0x5];
+ u8 reserved_at_128[0x3];
+ u8 log_meter_aso_max_alloc[0x5];
+ u8 reserved_at_130[0x3];
+ u8 log_max_num_meter_aso[0x5];
+ u8 reserved_at_138[0x8];
+
+ u8 reserved_at_140[0x6c0];
+};
+
+struct mlx5_ifc_debug_cap_bits {
+ u8 core_dump_general[0x1];
+ u8 core_dump_qp[0x1];
+ u8 reserved_at_2[0x7];
+ u8 resource_dump[0x1];
+ u8 reserved_at_a[0x16];
+
+ u8 reserved_at_20[0x2];
+ u8 stall_detect[0x1];
+ u8 reserved_at_23[0x1d];
+
+ u8 reserved_at_40[0x7c0];
};
struct mlx5_ifc_per_protocol_networking_offload_caps_bits {
@@ -599,16 +1051,32 @@ struct mlx5_ifc_per_protocol_networking_offload_caps_bits {
u8 rss_ind_tbl_cap[0x4];
u8 reg_umr_sq[0x1];
u8 scatter_fcs[0x1];
- u8 reserved_at_1a[0x1];
+ u8 enhanced_multi_pkt_send_wqe[0x1];
u8 tunnel_lso_const_out_ip_id[0x1];
- u8 reserved_at_1c[0x2];
- u8 tunnel_statless_gre[0x1];
+ u8 tunnel_lro_gre[0x1];
+ u8 tunnel_lro_vxlan[0x1];
+ u8 tunnel_stateless_gre[0x1];
u8 tunnel_stateless_vxlan[0x1];
u8 swp[0x1];
u8 swp_csum[0x1];
u8 swp_lso[0x1];
- u8 reserved_at_23[0x1d];
+ u8 cqe_checksum_full[0x1];
+ u8 tunnel_stateless_geneve_tx[0x1];
+ u8 tunnel_stateless_mpls_over_udp[0x1];
+ u8 tunnel_stateless_mpls_over_gre[0x1];
+ u8 tunnel_stateless_vxlan_gpe[0x1];
+ u8 tunnel_stateless_ipv4_over_vxlan[0x1];
+ u8 tunnel_stateless_ip_over_ip[0x1];
+ u8 insert_trailer[0x1];
+ u8 reserved_at_2b[0x1];
+ u8 tunnel_stateless_ip_over_ip_rx[0x1];
+ u8 tunnel_stateless_ip_over_ip_tx[0x1];
+ u8 reserved_at_2e[0x2];
+ u8 max_vxlan_udp_ports[0x8];
+ u8 reserved_at_38[0x6];
+ u8 max_geneve_opt_len[0x1];
+ u8 tunnel_stateless_geneve_rx[0x1];
u8 reserved_at_40[0x10];
u8 lro_min_mss_size[0x10];
@@ -620,9 +1088,22 @@ struct mlx5_ifc_per_protocol_networking_offload_caps_bits {
u8 reserved_at_200[0x600];
};
+enum {
+ MLX5_TIMESTAMP_FORMAT_CAP_FREE_RUNNING = 0x0,
+ MLX5_TIMESTAMP_FORMAT_CAP_REAL_TIME = 0x1,
+ MLX5_TIMESTAMP_FORMAT_CAP_FREE_RUNNING_AND_REAL_TIME = 0x2,
+};
+
struct mlx5_ifc_roce_cap_bits {
u8 roce_apm[0x1];
- u8 reserved_at_1[0x1f];
+ u8 reserved_at_1[0x3];
+ u8 sw_r_roce_src_udp_port[0x1];
+ u8 fl_rc_qp_when_roce_disabled[0x1];
+ u8 fl_rc_qp_when_roce_enabled[0x1];
+ u8 reserved_at_7[0x1];
+ u8 qp_ooo_transmit_default[0x1];
+ u8 reserved_at_9[0x15];
+ u8 qp_ts_format[0x2];
u8 reserved_at_20[0x60];
@@ -643,6 +1124,128 @@ struct mlx5_ifc_roce_cap_bits {
u8 reserved_at_100[0x700];
};
+struct mlx5_ifc_sync_steering_in_bits {
+ u8 opcode[0x10];
+ u8 uid[0x10];
+
+ u8 reserved_at_20[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_at_40[0xc0];
+};
+
+struct mlx5_ifc_sync_steering_out_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_at_40[0x40];
+};
+
+struct mlx5_ifc_sync_crypto_in_bits {
+ u8 opcode[0x10];
+ u8 uid[0x10];
+
+ u8 reserved_at_20[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_at_40[0x20];
+
+ u8 reserved_at_60[0x10];
+ u8 crypto_type[0x10];
+
+ u8 reserved_at_80[0x80];
+};
+
+struct mlx5_ifc_sync_crypto_out_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_at_40[0x40];
+};
+
+struct mlx5_ifc_device_mem_cap_bits {
+ u8 memic[0x1];
+ u8 reserved_at_1[0x1f];
+
+ u8 reserved_at_20[0xb];
+ u8 log_min_memic_alloc_size[0x5];
+ u8 reserved_at_30[0x8];
+ u8 log_max_memic_addr_alignment[0x8];
+
+ u8 memic_bar_start_addr[0x40];
+
+ u8 memic_bar_size[0x20];
+
+ u8 max_memic_size[0x20];
+
+ u8 steering_sw_icm_start_address[0x40];
+
+ u8 reserved_at_100[0x8];
+ u8 log_header_modify_sw_icm_size[0x8];
+ u8 reserved_at_110[0x2];
+ u8 log_sw_icm_alloc_granularity[0x6];
+ u8 log_steering_sw_icm_size[0x8];
+
+ u8 reserved_at_120[0x18];
+ u8 log_header_modify_pattern_sw_icm_size[0x8];
+
+ u8 header_modify_sw_icm_start_address[0x40];
+
+ u8 reserved_at_180[0x40];
+
+ u8 header_modify_pattern_sw_icm_start_address[0x40];
+
+ u8 memic_operations[0x20];
+
+ u8 reserved_at_220[0x5e0];
+};
+
+struct mlx5_ifc_device_event_cap_bits {
+ u8 user_affiliated_events[4][0x40];
+
+ u8 user_unaffiliated_events[4][0x40];
+};
+
+struct mlx5_ifc_virtio_emulation_cap_bits {
+ u8 desc_tunnel_offload_type[0x1];
+ u8 eth_frame_offload_type[0x1];
+ u8 virtio_version_1_0[0x1];
+ u8 device_features_bits_mask[0xd];
+ u8 event_mode[0x8];
+ u8 virtio_queue_type[0x8];
+
+ u8 max_tunnel_desc[0x10];
+ u8 reserved_at_30[0x3];
+ u8 log_doorbell_stride[0x5];
+ u8 reserved_at_38[0x3];
+ u8 log_doorbell_bar_size[0x5];
+
+ u8 doorbell_bar_offset[0x40];
+
+ u8 max_emulated_devices[0x8];
+ u8 max_num_virtio_queues[0x18];
+
+ u8 reserved_at_a0[0x60];
+
+ u8 umem_1_buffer_param_a[0x20];
+
+ u8 umem_1_buffer_param_b[0x20];
+
+ u8 umem_2_buffer_param_a[0x20];
+
+ u8 umem_2_buffer_param_b[0x20];
+
+ u8 umem_3_buffer_param_a[0x20];
+
+ u8 umem_3_buffer_param_b[0x20];
+
+ u8 reserved_at_1c0[0x640];
+};
+
enum {
MLX5_ATOMIC_CAPS_ATOMIC_SIZE_QP_1_BYTE = 0x0,
MLX5_ATOMIC_CAPS_ATOMIC_SIZE_QP_2_BYTES = 0x2,
@@ -704,7 +1307,11 @@ struct mlx5_ifc_odp_cap_bits {
struct mlx5_ifc_odp_per_transport_service_cap_bits ud_odp_caps;
- u8 reserved_at_e0[0x720];
+ struct mlx5_ifc_odp_per_transport_service_cap_bits xrc_odp_caps;
+
+ struct mlx5_ifc_odp_per_transport_service_cap_bits dc_odp_caps;
+
+ u8 reserved_at_120[0x6E0];
};
struct mlx5_ifc_calc_op {
@@ -731,13 +1338,59 @@ struct mlx5_ifc_vector_calc_cap_bits {
struct mlx5_ifc_calc_op calc2;
struct mlx5_ifc_calc_op calc3;
- u8 reserved_at_e0[0x720];
+ u8 reserved_at_c0[0x720];
+};
+
+struct mlx5_ifc_tls_cap_bits {
+ u8 tls_1_2_aes_gcm_128[0x1];
+ u8 tls_1_3_aes_gcm_128[0x1];
+ u8 tls_1_2_aes_gcm_256[0x1];
+ u8 tls_1_3_aes_gcm_256[0x1];
+ u8 reserved_at_4[0x1c];
+
+ u8 reserved_at_20[0x7e0];
+};
+
+struct mlx5_ifc_ipsec_cap_bits {
+ u8 ipsec_full_offload[0x1];
+ u8 ipsec_crypto_offload[0x1];
+ u8 ipsec_esn[0x1];
+ u8 ipsec_crypto_esp_aes_gcm_256_encrypt[0x1];
+ u8 ipsec_crypto_esp_aes_gcm_128_encrypt[0x1];
+ u8 ipsec_crypto_esp_aes_gcm_256_decrypt[0x1];
+ u8 ipsec_crypto_esp_aes_gcm_128_decrypt[0x1];
+ u8 reserved_at_7[0x4];
+ u8 log_max_ipsec_offload[0x5];
+ u8 reserved_at_10[0x10];
+
+ u8 min_log_ipsec_full_replay_window[0x8];
+ u8 max_log_ipsec_full_replay_window[0x8];
+ u8 reserved_at_30[0x7d0];
+};
+
+struct mlx5_ifc_macsec_cap_bits {
+ u8 macsec_epn[0x1];
+ u8 reserved_at_1[0x2];
+ u8 macsec_crypto_esp_aes_gcm_256_encrypt[0x1];
+ u8 macsec_crypto_esp_aes_gcm_128_encrypt[0x1];
+ u8 macsec_crypto_esp_aes_gcm_256_decrypt[0x1];
+ u8 macsec_crypto_esp_aes_gcm_128_decrypt[0x1];
+ u8 reserved_at_7[0x4];
+ u8 log_max_macsec_offload[0x5];
+ u8 reserved_at_10[0x10];
+
+ u8 min_log_macsec_full_replay_window[0x8];
+ u8 max_log_macsec_full_replay_window[0x8];
+ u8 reserved_at_30[0x10];
+
+ u8 reserved_at_40[0x7c0];
};
enum {
MLX5_WQ_TYPE_LINKED_LIST = 0x0,
MLX5_WQ_TYPE_CYCLIC = 0x1,
MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ = 0x2,
+ MLX5_WQ_TYPE_CYCLIC_STRIDING_RQ = 0x3,
};
enum {
@@ -784,27 +1437,111 @@ enum {
MLX5_CAP_UMR_FENCE_NONE = 0x2,
};
+enum {
+ MLX5_FLEX_PARSER_GENEVE_ENABLED = 1 << 3,
+ MLX5_FLEX_PARSER_MPLS_OVER_GRE_ENABLED = 1 << 4,
+ MLX5_FLEX_PARSER_MPLS_OVER_UDP_ENABLED = 1 << 5,
+ MLX5_FLEX_PARSER_VXLAN_GPE_ENABLED = 1 << 7,
+ MLX5_FLEX_PARSER_ICMP_V4_ENABLED = 1 << 8,
+ MLX5_FLEX_PARSER_ICMP_V6_ENABLED = 1 << 9,
+ MLX5_FLEX_PARSER_GENEVE_TLV_OPTION_0_ENABLED = 1 << 10,
+ MLX5_FLEX_PARSER_GTPU_ENABLED = 1 << 11,
+ MLX5_FLEX_PARSER_GTPU_DW_2_ENABLED = 1 << 16,
+ MLX5_FLEX_PARSER_GTPU_FIRST_EXT_DW_0_ENABLED = 1 << 17,
+ MLX5_FLEX_PARSER_GTPU_DW_0_ENABLED = 1 << 18,
+ MLX5_FLEX_PARSER_GTPU_TEID_ENABLED = 1 << 19,
+};
+
+enum {
+ MLX5_UCTX_CAP_RAW_TX = 1UL << 0,
+ MLX5_UCTX_CAP_INTERNAL_DEV_RES = 1UL << 1,
+};
+
+#define MLX5_FC_BULK_SIZE_FACTOR 128
+
+enum mlx5_fc_bulk_alloc_bitmask {
+ MLX5_FC_BULK_128 = (1 << 0),
+ MLX5_FC_BULK_256 = (1 << 1),
+ MLX5_FC_BULK_512 = (1 << 2),
+ MLX5_FC_BULK_1024 = (1 << 3),
+ MLX5_FC_BULK_2048 = (1 << 4),
+ MLX5_FC_BULK_4096 = (1 << 5),
+ MLX5_FC_BULK_8192 = (1 << 6),
+ MLX5_FC_BULK_16384 = (1 << 7),
+};
+
+#define MLX5_FC_BULK_NUM_FCS(fc_enum) (MLX5_FC_BULK_SIZE_FACTOR * (fc_enum))
+
+#define MLX5_FT_MAX_MULTIPATH_LEVEL 63
+
+enum {
+ MLX5_STEERING_FORMAT_CONNECTX_5 = 0,
+ MLX5_STEERING_FORMAT_CONNECTX_6DX = 1,
+ MLX5_STEERING_FORMAT_CONNECTX_7 = 2,
+};
+
struct mlx5_ifc_cmd_hca_cap_bits {
- u8 reserved_at_0[0x80];
+ u8 reserved_at_0[0x10];
+ u8 shared_object_to_user_object_allowed[0x1];
+ u8 reserved_at_13[0xe];
+ u8 vhca_resource_manager[0x1];
+
+ u8 hca_cap_2[0x1];
+ u8 create_lag_when_not_master_up[0x1];
+ u8 dtor[0x1];
+ u8 event_on_vhca_state_teardown_request[0x1];
+ u8 event_on_vhca_state_in_use[0x1];
+ u8 event_on_vhca_state_active[0x1];
+ u8 event_on_vhca_state_allocated[0x1];
+ u8 event_on_vhca_state_invalid[0x1];
+ u8 reserved_at_28[0x8];
+ u8 vhca_id[0x10];
+
+ u8 reserved_at_40[0x40];
u8 log_max_srq_sz[0x8];
u8 log_max_qp_sz[0x8];
- u8 reserved_at_90[0xb];
+ u8 event_cap[0x1];
+ u8 reserved_at_91[0x2];
+ u8 isolate_vl_tc_new[0x1];
+ u8 reserved_at_94[0x4];
+ u8 prio_tag_required[0x1];
+ u8 reserved_at_99[0x2];
u8 log_max_qp[0x5];
- u8 reserved_at_a0[0xb];
+ u8 reserved_at_a0[0x3];
+ u8 ece_support[0x1];
+ u8 reserved_at_a4[0x5];
+ u8 reg_c_preserve[0x1];
+ u8 reserved_at_aa[0x1];
u8 log_max_srq[0x5];
- u8 reserved_at_b0[0x10];
-
- u8 reserved_at_c0[0x8];
+ u8 reserved_at_b0[0x1];
+ u8 uplink_follow[0x1];
+ u8 ts_cqe_to_dest_cqn[0x1];
+ u8 reserved_at_b3[0x6];
+ u8 go_back_n[0x1];
+ u8 shampo[0x1];
+ u8 reserved_at_bb[0x5];
+
+ u8 max_sgl_for_optimized_performance[0x8];
u8 log_max_cq_sz[0x8];
- u8 reserved_at_d0[0xb];
+ u8 relaxed_ordering_write_umr[0x1];
+ u8 relaxed_ordering_read_umr[0x1];
+ u8 reserved_at_d2[0x7];
+ u8 virtio_net_device_emualtion_manager[0x1];
+ u8 virtio_blk_device_emualtion_manager[0x1];
u8 log_max_cq[0x5];
u8 log_max_eq_sz[0x8];
- u8 reserved_at_e8[0x2];
+ u8 relaxed_ordering_write[0x1];
+ u8 relaxed_ordering_read_pci_enabled[0x1];
u8 log_max_mkey[0x6];
- u8 reserved_at_f0[0xc];
+ u8 reserved_at_f0[0x6];
+ u8 terminate_scatter_list_mkey[0x1];
+ u8 repeated_mkey[0x1];
+ u8 dump_fill_mkey[0x1];
+ u8 reserved_at_f9[0x2];
+ u8 fast_teardown[0x1];
u8 log_max_eq[0x4];
u8 max_indirection[0x8];
@@ -817,12 +1554,21 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 null_mkey[0x1];
u8 log_max_klm_list_size[0x6];
- u8 reserved_at_120[0xa];
+ u8 reserved_at_120[0x2];
+ u8 qpc_extension[0x1];
+ u8 reserved_at_123[0x7];
u8 log_max_ra_req_dc[0x6];
- u8 reserved_at_130[0xa];
+ u8 reserved_at_130[0x2];
+ u8 eth_wqe_too_small[0x1];
+ u8 reserved_at_133[0x6];
+ u8 vnic_env_cq_overrun[0x1];
u8 log_max_ra_res_dc[0x6];
- u8 reserved_at_140[0xa];
+ u8 reserved_at_140[0x5];
+ u8 release_all_pages[0x1];
+ u8 must_not_use[0x1];
+ u8 reserved_at_147[0x2];
+ u8 roce_accl[0x1];
u8 log_max_ra_req_qp[0x6];
u8 reserved_at_150[0xa];
u8 log_max_ra_res_qp[0x6];
@@ -832,15 +1578,21 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 cc_modify_allowed[0x1];
u8 start_pad[0x1];
u8 cache_line_128byte[0x1];
- u8 reserved_at_165[0xb];
+ u8 reserved_at_165[0x4];
+ u8 rts2rts_qp_counters_set_id[0x1];
+ u8 reserved_at_16a[0x2];
+ u8 vnic_env_int_rq_oob[0x1];
+ u8 sbcam_reg[0x1];
+ u8 reserved_at_16e[0x1];
+ u8 qcam_reg[0x1];
u8 gid_table_size[0x10];
u8 out_of_seq_cnt[0x1];
u8 vport_counters[0x1];
u8 retransmission_q_counters[0x1];
- u8 reserved_at_183[0x1];
+ u8 debug[0x1];
u8 modify_rq_counter_set_id[0x1];
- u8 reserved_at_185[0x1];
+ u8 rq_delay_drop[0x1];
u8 max_qp_cnt[0xa];
u8 pkey_table_size[0x10];
@@ -848,16 +1600,16 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 vhca_group_manager[0x1];
u8 ib_virt[0x1];
u8 eth_virt[0x1];
- u8 reserved_at_1a4[0x1];
+ u8 vnic_env_queue_counters[0x1];
u8 ets[0x1];
u8 nic_flow_table[0x1];
- u8 eswitch_flow_table[0x1];
- u8 early_vf_enable[0x1];
+ u8 eswitch_manager[0x1];
+ u8 device_memory[0x1];
u8 mcam_reg[0x1];
u8 pcam_reg[0x1];
u8 local_ca_ack_delay[0x5];
u8 port_module_event[0x1];
- u8 reserved_at_1b1[0x1];
+ u8 enhanced_error_q_counters[0x1];
u8 ports_check[0x1];
u8 reserved_at_1b3[0x1];
u8 disable_link_up[0x1];
@@ -871,9 +1623,10 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 log_max_msg[0x5];
u8 reserved_at_1c8[0x4];
u8 max_tc[0x4];
- u8 reserved_at_1d0[0x1];
+ u8 temp_warn_event[0x1];
u8 dcbx[0x1];
- u8 reserved_at_1d2[0x3];
+ u8 general_notification_event[0x1];
+ u8 reserved_at_1d3[0x2];
u8 fpga[0x1];
u8 rol_s[0x1];
u8 rol_g[0x1];
@@ -887,7 +1640,11 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 wol_p[0x1];
u8 stat_rate_support[0x10];
- u8 reserved_at_1f0[0xc];
+ u8 reserved_at_1f0[0x1];
+ u8 pci_sync_for_fw_update_event[0x1];
+ u8 reserved_at_1f2[0x6];
+ u8 init2_lag_tx_port_affinity[0x1];
+ u8 reserved_at_1fa[0x3];
u8 cqe_version[0x4];
u8 compact_address_vector[0x1];
@@ -895,9 +1652,14 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 reserved_at_202[0x1];
u8 ipoib_enhanced_offloads[0x1];
u8 ipoib_basic_offloads[0x1];
- u8 reserved_at_205[0x5];
+ u8 reserved_at_205[0x1];
+ u8 repeated_block_disabled[0x1];
+ u8 umr_modify_entity_size_disabled[0x1];
+ u8 umr_modify_atomic_disabled[0x1];
+ u8 umr_indirect_mkey_disabled[0x1];
u8 umr_fence[0x2];
- u8 reserved_at_20c[0x3];
+ u8 dc_req_scat_data_cqe[0x1];
+ u8 reserved_at_20d[0x2];
u8 drain_sigerr[0x1];
u8 cmdif_checksum[0x2];
u8 sigerr_cqe[0x1];
@@ -931,7 +1693,8 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 vector_calc[0x1];
u8 umr_ptr_rlky[0x1];
u8 imaicl[0x1];
- u8 reserved_at_232[0x4];
+ u8 qp_packet_based[0x1];
+ u8 reserved_at_233[0x3];
u8 qkv[0x1];
u8 pkv[0x1];
u8 set_deth_sqpn[0x1];
@@ -944,16 +1707,28 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 uar_4k[0x1];
u8 reserved_at_241[0x9];
u8 uar_sz[0x6];
- u8 reserved_at_250[0x8];
+ u8 port_selection_cap[0x1];
+ u8 reserved_at_248[0x1];
+ u8 umem_uid_0[0x1];
+ u8 reserved_at_250[0x5];
u8 log_pg_sz[0x8];
u8 bf[0x1];
u8 driver_version[0x1];
u8 pad_tx_eth_packet[0x1];
- u8 reserved_at_263[0x8];
+ u8 reserved_at_263[0x3];
+ u8 mkey_by_name[0x1];
+ u8 reserved_at_267[0x4];
+
u8 log_bf_reg_size[0x5];
- u8 reserved_at_270[0xb];
+ u8 reserved_at_270[0x3];
+ u8 qp_error_syndrome[0x1];
+ u8 reserved_at_274[0x2];
+ u8 lag_dct[0x2];
+ u8 lag_tx_port_affinity[0x1];
+ u8 lag_native_fdb_selection[0x1];
+ u8 reserved_at_27a[0x1];
u8 lag_master[0x1];
u8 num_lag_ports[0x4];
@@ -963,25 +1738,36 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 reserved_at_2a0[0x10];
u8 max_wqe_sz_rq[0x10];
- u8 reserved_at_2c0[0x10];
+ u8 max_flow_counter_31_16[0x10];
u8 max_wqe_sz_sq_dc[0x10];
u8 reserved_at_2e0[0x7];
u8 max_qp_mcg[0x19];
- u8 reserved_at_300[0x18];
+ u8 reserved_at_300[0x10];
+ u8 flow_counter_bulk_alloc[0x8];
u8 log_max_mcg[0x8];
u8 reserved_at_320[0x3];
u8 log_max_transport_domain[0x5];
- u8 reserved_at_328[0x3];
+ u8 reserved_at_328[0x2];
+ u8 relaxed_ordering_read[0x1];
u8 log_max_pd[0x5];
- u8 reserved_at_330[0xb];
+ u8 reserved_at_330[0x9];
+ u8 q_counter_aggregation[0x1];
+ u8 q_counter_other_vport[0x1];
u8 log_max_xrcd[0x5];
- u8 reserved_at_340[0x8];
+ u8 nic_receive_steering_discard[0x1];
+ u8 receive_discard_vport_down[0x1];
+ u8 transmit_discard_vport_down[0x1];
+ u8 eq_overrun_count[0x1];
+ u8 reserved_at_344[0x1];
+ u8 invalid_command_count[0x1];
+ u8 quota_exceeded_count[0x1];
+ u8 reserved_at_347[0x1];
u8 log_max_flow_counter_bulk[0x8];
- u8 max_flow_counter[0x10];
+ u8 max_flow_counter_15_0[0x10];
u8 reserved_at_360[0x3];
@@ -1003,7 +1789,9 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 reserved_at_398[0x3];
u8 log_max_tis_per_sq[0x5];
- u8 reserved_at_3a0[0x3];
+ u8 ext_stride_num_range[0x1];
+ u8 roce_rw_supported[0x1];
+ u8 log_max_current_uc_list_wr_supported[0x1];
u8 log_max_stride_sz_rq[0x5];
u8 reserved_at_3a8[0x3];
u8 log_min_stride_sz_rq[0x5];
@@ -1012,20 +1800,48 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 reserved_at_3b8[0x3];
u8 log_min_stride_sz_sq[0x5];
- u8 reserved_at_3c0[0x1b];
+ u8 hairpin[0x1];
+ u8 reserved_at_3c1[0x2];
+ u8 log_max_hairpin_queues[0x5];
+ u8 reserved_at_3c8[0x3];
+ u8 log_max_hairpin_wq_data_sz[0x5];
+ u8 reserved_at_3d0[0x3];
+ u8 log_max_hairpin_num_packets[0x5];
+ u8 reserved_at_3d8[0x3];
u8 log_max_wq_sz[0x5];
u8 nic_vport_change_event[0x1];
- u8 reserved_at_3e1[0xa];
+ u8 disable_local_lb_uc[0x1];
+ u8 disable_local_lb_mc[0x1];
+ u8 log_min_hairpin_wq_data_sz[0x5];
+ u8 reserved_at_3e8[0x2];
+ u8 vhca_state[0x1];
u8 log_max_vlan_list[0x5];
u8 reserved_at_3f0[0x3];
u8 log_max_current_mc_list[0x5];
u8 reserved_at_3f8[0x3];
u8 log_max_current_uc_list[0x5];
- u8 reserved_at_400[0x80];
-
- u8 reserved_at_480[0x3];
+ u8 general_obj_types[0x40];
+
+ u8 sq_ts_format[0x2];
+ u8 rq_ts_format[0x2];
+ u8 steering_format_version[0x4];
+ u8 create_qp_start_hint[0x18];
+
+ u8 reserved_at_460[0x1];
+ u8 ats[0x1];
+ u8 reserved_at_462[0x1];
+ u8 log_max_uctx[0x5];
+ u8 reserved_at_468[0x1];
+ u8 crypto[0x1];
+ u8 ipsec_offload[0x1];
+ u8 log_max_umem[0x5];
+ u8 max_num_eqs[0x10];
+
+ u8 reserved_at_480[0x1];
+ u8 tls_tx[0x1];
+ u8 tls_rx[0x1];
u8 log_max_l2_table[0x5];
u8 reserved_at_488[0x8];
u8 log_uar_page_sz[0x10];
@@ -1036,15 +1852,38 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 reserved_at_500[0x20];
u8 num_of_uars_per_page[0x20];
- u8 reserved_at_540[0x40];
- u8 reserved_at_580[0x3f];
+ u8 flex_parser_protocols[0x20];
+
+ u8 max_geneve_tlv_options[0x8];
+ u8 reserved_at_568[0x3];
+ u8 max_geneve_tlv_option_data_len[0x5];
+ u8 reserved_at_570[0x9];
+ u8 adv_virtualization[0x1];
+ u8 reserved_at_57a[0x6];
+
+ u8 reserved_at_580[0xb];
+ u8 log_max_dci_stream_channels[0x5];
+ u8 reserved_at_590[0x3];
+ u8 log_max_dci_errored_streams[0x5];
+ u8 reserved_at_598[0x8];
+
+ u8 reserved_at_5a0[0x10];
+ u8 enhanced_cqe_compression[0x1];
+ u8 reserved_at_5b1[0x2];
+ u8 log_max_dek[0x5];
+ u8 reserved_at_5b8[0x4];
+ u8 mini_cqe_resp_stride_index[0x1];
+ u8 cqe_128_always[0x1];
+ u8 cqe_compression_128[0x1];
u8 cqe_compression[0x1];
u8 cqe_compression_timeout[0x10];
u8 cqe_compression_max_num[0x10];
- u8 reserved_at_5e0[0x10];
+ u8 reserved_at_5e0[0x8];
+ u8 flex_parser_id_gtpu_dw_0[0x4];
+ u8 reserved_at_5ec[0x4];
u8 tag_matching[0x1];
u8 rndv_offload_rc[0x1];
u8 rndv_offload_dc[0x1];
@@ -1052,36 +1891,149 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 reserved_at_5f8[0x3];
u8 log_max_xrq[0x5];
- u8 reserved_at_600[0x200];
+ u8 affiliate_nic_vport_criteria[0x8];
+ u8 native_port_num[0x8];
+ u8 num_vhca_ports[0x8];
+ u8 flex_parser_id_gtpu_teid[0x4];
+ u8 reserved_at_61c[0x2];
+ u8 sw_owner_id[0x1];
+ u8 reserved_at_61f[0x1];
+
+ u8 max_num_of_monitor_counters[0x10];
+ u8 num_ppcnt_monitor_counters[0x10];
+
+ u8 max_num_sf[0x10];
+ u8 num_q_monitor_counters[0x10];
+
+ u8 reserved_at_660[0x20];
+
+ u8 sf[0x1];
+ u8 sf_set_partition[0x1];
+ u8 reserved_at_682[0x1];
+ u8 log_max_sf[0x5];
+ u8 apu[0x1];
+ u8 reserved_at_689[0x4];
+ u8 migration[0x1];
+ u8 reserved_at_68e[0x2];
+ u8 log_min_sf_size[0x8];
+ u8 max_num_sf_partitions[0x8];
+
+ u8 uctx_cap[0x20];
+
+ u8 reserved_at_6c0[0x4];
+ u8 flex_parser_id_geneve_tlv_option_0[0x4];
+ u8 flex_parser_id_icmp_dw1[0x4];
+ u8 flex_parser_id_icmp_dw0[0x4];
+ u8 flex_parser_id_icmpv6_dw1[0x4];
+ u8 flex_parser_id_icmpv6_dw0[0x4];
+ u8 flex_parser_id_outer_first_mpls_over_gre[0x4];
+ u8 flex_parser_id_outer_first_mpls_over_udp_label[0x4];
+
+ u8 max_num_match_definer[0x10];
+ u8 sf_base_id[0x10];
+
+ u8 flex_parser_id_gtpu_dw_2[0x4];
+ u8 flex_parser_id_gtpu_first_ext_dw_0[0x4];
+ u8 num_total_dynamic_vf_msix[0x18];
+ u8 reserved_at_720[0x14];
+ u8 dynamic_msix_table_size[0xc];
+ u8 reserved_at_740[0xc];
+ u8 min_dynamic_vf_msix_table_size[0x4];
+ u8 reserved_at_750[0x4];
+ u8 max_dynamic_vf_msix_table_size[0xc];
+
+ u8 reserved_at_760[0x3];
+ u8 log_max_num_header_modify_argument[0x5];
+ u8 reserved_at_768[0x4];
+ u8 log_header_modify_argument_granularity[0x4];
+ u8 reserved_at_770[0x3];
+ u8 log_header_modify_argument_max_alloc[0x5];
+ u8 reserved_at_778[0x8];
+
+ u8 vhca_tunnel_commands[0x40];
+ u8 match_definer_format_supported[0x40];
+};
+
+struct mlx5_ifc_cmd_hca_cap_2_bits {
+ u8 reserved_at_0[0x80];
+
+ u8 migratable[0x1];
+ u8 reserved_at_81[0x1f];
+
+ u8 max_reformat_insert_size[0x8];
+ u8 max_reformat_insert_offset[0x8];
+ u8 max_reformat_remove_size[0x8];
+ u8 max_reformat_remove_offset[0x8];
+
+ u8 reserved_at_c0[0x8];
+ u8 migration_multi_load[0x1];
+ u8 migration_tracking_state[0x1];
+ u8 reserved_at_ca[0x16];
+
+ u8 reserved_at_e0[0xc0];
+
+ u8 flow_table_type_2_type[0x8];
+ u8 reserved_at_1a8[0x3];
+ u8 log_min_mkey_entity_size[0x5];
+ u8 reserved_at_1b0[0x10];
+
+ u8 reserved_at_1c0[0x60];
+
+ u8 reserved_at_220[0x1];
+ u8 sw_vhca_id_valid[0x1];
+ u8 sw_vhca_id[0xe];
+ u8 reserved_at_230[0x10];
+
+ u8 reserved_at_240[0xb];
+ u8 ts_cqe_metadata_size2wqe_counter[0x5];
+ u8 reserved_at_250[0x10];
+
+ u8 reserved_at_260[0x5a0];
+};
+
+enum mlx5_ifc_flow_destination_type {
+ MLX5_IFC_FLOW_DESTINATION_TYPE_VPORT = 0x0,
+ MLX5_IFC_FLOW_DESTINATION_TYPE_FLOW_TABLE = 0x1,
+ MLX5_IFC_FLOW_DESTINATION_TYPE_TIR = 0x2,
+ MLX5_IFC_FLOW_DESTINATION_TYPE_FLOW_SAMPLER = 0x6,
+ MLX5_IFC_FLOW_DESTINATION_TYPE_UPLINK = 0x8,
+ MLX5_IFC_FLOW_DESTINATION_TYPE_TABLE_TYPE = 0xA,
};
-enum mlx5_flow_destination_type {
- MLX5_FLOW_DESTINATION_TYPE_VPORT = 0x0,
- MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE = 0x1,
- MLX5_FLOW_DESTINATION_TYPE_TIR = 0x2,
-
- MLX5_FLOW_DESTINATION_TYPE_COUNTER = 0x100,
+enum mlx5_flow_table_miss_action {
+ MLX5_FLOW_TABLE_MISS_ACTION_DEF,
+ MLX5_FLOW_TABLE_MISS_ACTION_FWD,
+ MLX5_FLOW_TABLE_MISS_ACTION_SWITCH_DOMAIN,
};
struct mlx5_ifc_dest_format_struct_bits {
u8 destination_type[0x8];
u8 destination_id[0x18];
- u8 reserved_at_20[0x20];
+ u8 destination_eswitch_owner_vhca_id_valid[0x1];
+ u8 packet_reformat[0x1];
+ u8 reserved_at_22[0x6];
+ u8 destination_table_type[0x8];
+ u8 destination_eswitch_owner_vhca_id[0x10];
};
struct mlx5_ifc_flow_counter_list_bits {
- u8 clear[0x1];
- u8 num_of_counters[0xf];
- u8 flow_counter_id[0x10];
+ u8 flow_counter_id[0x20];
u8 reserved_at_20[0x20];
};
+struct mlx5_ifc_extended_dest_format_bits {
+ struct mlx5_ifc_dest_format_struct_bits destination_entry;
+
+ u8 packet_reformat_id[0x20];
+
+ u8 reserved_at_60[0x20];
+};
+
union mlx5_ifc_dest_format_struct_flow_counter_list_auto_bits {
- struct mlx5_ifc_dest_format_struct_bits dest_format_struct;
+ struct mlx5_ifc_extended_dest_format_bits extended_dest_format;
struct mlx5_ifc_flow_counter_list_bits flow_counter_list;
- u8 reserved_at_0[0x40];
};
struct mlx5_ifc_fte_match_param_bits {
@@ -1091,7 +2043,15 @@ struct mlx5_ifc_fte_match_param_bits {
struct mlx5_ifc_fte_match_set_lyr_2_4_bits inner_headers;
- u8 reserved_at_600[0xa00];
+ struct mlx5_ifc_fte_match_set_misc2_bits misc_parameters_2;
+
+ struct mlx5_ifc_fte_match_set_misc3_bits misc_parameters_3;
+
+ struct mlx5_ifc_fte_match_set_misc4_bits misc_parameters_4;
+
+ struct mlx5_ifc_fte_match_set_misc5_bits misc_parameters_5;
+
+ u8 reserved_at_e00[0x200];
};
enum {
@@ -1150,15 +2110,36 @@ struct mlx5_ifc_wq_bits {
u8 reserved_at_118[0x3];
u8 log_wq_sz[0x5];
- u8 reserved_at_120[0x15];
- u8 log_wqe_num_of_strides[0x3];
+ u8 dbr_umem_valid[0x1];
+ u8 wq_umem_valid[0x1];
+ u8 reserved_at_122[0x1];
+ u8 log_hairpin_num_packets[0x5];
+ u8 reserved_at_128[0x3];
+ u8 log_hairpin_data_sz[0x5];
+
+ u8 reserved_at_130[0x4];
+ u8 log_wqe_num_of_strides[0x4];
u8 two_byte_shift_en[0x1];
u8 reserved_at_139[0x4];
u8 log_wqe_stride_size[0x3];
- u8 reserved_at_140[0x4c0];
+ u8 reserved_at_140[0x80];
- struct mlx5_ifc_cmd_pas_bits pas[0];
+ u8 headers_mkey[0x20];
+
+ u8 shampo_enable[0x1];
+ u8 reserved_at_1e1[0x4];
+ u8 log_reservation_size[0x3];
+ u8 reserved_at_1e8[0x5];
+ u8 log_max_num_of_packets_per_reservation[0x3];
+ u8 reserved_at_1f0[0x6];
+ u8 log_headers_entry_size[0x2];
+ u8 reserved_at_1f8[0x4];
+ u8 log_headers_buffer_entry_num[0x4];
+
+ u8 reserved_at_200[0x400];
+
+ struct mlx5_ifc_cmd_pas_bits pas[];
};
struct mlx5_ifc_rq_num_bits {
@@ -1187,7 +2168,8 @@ struct mlx5_ifc_cong_control_r_roce_ecn_np_bits {
u8 reserved_at_c0[0x12];
u8 cnp_dscp[0x6];
- u8 reserved_at_d8[0x5];
+ u8 reserved_at_d8[0x4];
+ u8 cnp_prio_mode[0x1];
u8 cnp_802p_prio[0x3];
u8 reserved_at_e0[0x720];
@@ -1239,6 +2221,17 @@ struct mlx5_ifc_cong_control_r_roce_ecn_rp_bits {
u8 reserved_at_360[0x4a0];
};
+struct mlx5_ifc_cong_control_r_roce_general_bits {
+ u8 reserved_at_0[0x80];
+
+ u8 reserved_at_80[0x10];
+ u8 rtt_resp_dscp_valid[0x1];
+ u8 reserved_at_91[0x9];
+ u8 rtt_resp_dscp[0x6];
+
+ u8 reserved_at_a0[0x760];
+};
+
struct mlx5_ifc_cong_control_802_1qau_rp_bits {
u8 reserved_at_0[0x80];
@@ -1275,6 +2268,132 @@ struct mlx5_ifc_resize_field_select_bits {
u8 resize_field_select[0x20];
};
+struct mlx5_ifc_resource_dump_bits {
+ u8 more_dump[0x1];
+ u8 inline_dump[0x1];
+ u8 reserved_at_2[0xa];
+ u8 seq_num[0x4];
+ u8 segment_type[0x10];
+
+ u8 reserved_at_20[0x10];
+ u8 vhca_id[0x10];
+
+ u8 index1[0x20];
+
+ u8 index2[0x20];
+
+ u8 num_of_obj1[0x10];
+ u8 num_of_obj2[0x10];
+
+ u8 reserved_at_a0[0x20];
+
+ u8 device_opaque[0x40];
+
+ u8 mkey[0x20];
+
+ u8 size[0x20];
+
+ u8 address[0x40];
+
+ u8 inline_data[52][0x20];
+};
+
+struct mlx5_ifc_resource_dump_menu_record_bits {
+ u8 reserved_at_0[0x4];
+ u8 num_of_obj2_supports_active[0x1];
+ u8 num_of_obj2_supports_all[0x1];
+ u8 must_have_num_of_obj2[0x1];
+ u8 support_num_of_obj2[0x1];
+ u8 num_of_obj1_supports_active[0x1];
+ u8 num_of_obj1_supports_all[0x1];
+ u8 must_have_num_of_obj1[0x1];
+ u8 support_num_of_obj1[0x1];
+ u8 must_have_index2[0x1];
+ u8 support_index2[0x1];
+ u8 must_have_index1[0x1];
+ u8 support_index1[0x1];
+ u8 segment_type[0x10];
+
+ u8 segment_name[4][0x20];
+
+ u8 index1_name[4][0x20];
+
+ u8 index2_name[4][0x20];
+};
+
+struct mlx5_ifc_resource_dump_segment_header_bits {
+ u8 length_dw[0x10];
+ u8 segment_type[0x10];
+};
+
+struct mlx5_ifc_resource_dump_command_segment_bits {
+ struct mlx5_ifc_resource_dump_segment_header_bits segment_header;
+
+ u8 segment_called[0x10];
+ u8 vhca_id[0x10];
+
+ u8 index1[0x20];
+
+ u8 index2[0x20];
+
+ u8 num_of_obj1[0x10];
+ u8 num_of_obj2[0x10];
+};
+
+struct mlx5_ifc_resource_dump_error_segment_bits {
+ struct mlx5_ifc_resource_dump_segment_header_bits segment_header;
+
+ u8 reserved_at_20[0x10];
+ u8 syndrome_id[0x10];
+
+ u8 reserved_at_40[0x40];
+
+ u8 error[8][0x20];
+};
+
+struct mlx5_ifc_resource_dump_info_segment_bits {
+ struct mlx5_ifc_resource_dump_segment_header_bits segment_header;
+
+ u8 reserved_at_20[0x18];
+ u8 dump_version[0x8];
+
+ u8 hw_version[0x20];
+
+ u8 fw_version[0x20];
+};
+
+struct mlx5_ifc_resource_dump_menu_segment_bits {
+ struct mlx5_ifc_resource_dump_segment_header_bits segment_header;
+
+ u8 reserved_at_20[0x10];
+ u8 num_of_records[0x10];
+
+ struct mlx5_ifc_resource_dump_menu_record_bits record[];
+};
+
+struct mlx5_ifc_resource_dump_resource_segment_bits {
+ struct mlx5_ifc_resource_dump_segment_header_bits segment_header;
+
+ u8 reserved_at_20[0x20];
+
+ u8 index1[0x20];
+
+ u8 index2[0x20];
+
+ u8 payload[][0x20];
+};
+
+struct mlx5_ifc_resource_dump_terminate_segment_bits {
+ struct mlx5_ifc_resource_dump_segment_header_bits segment_header;
+};
+
+struct mlx5_ifc_menu_resource_dump_response_bits {
+ struct mlx5_ifc_resource_dump_info_segment_bits info;
+ struct mlx5_ifc_resource_dump_command_segment_bits cmd;
+ struct mlx5_ifc_resource_dump_menu_segment_bits menu;
+ struct mlx5_ifc_resource_dump_terminate_segment_bits terminate;
+};
+
enum {
MLX5_MODIFY_FIELD_SELECT_MODIFY_FIELD_SELECT_CQ_PERIOD = 0x1,
MLX5_MODIFY_FIELD_SELECT_MODIFY_FIELD_SELECT_CQ_MAX_COUNT = 0x2,
@@ -1483,12 +2602,28 @@ struct mlx5_ifc_ib_port_cntrs_grp_data_layout_bits {
u8 port_xmit_wait[0x20];
};
-struct mlx5_ifc_eth_per_traffic_grp_data_layout_bits {
+struct mlx5_ifc_eth_per_tc_prio_grp_data_layout_bits {
u8 transmit_queue_high[0x20];
u8 transmit_queue_low[0x20];
- u8 reserved_at_40[0x780];
+ u8 no_buffer_discard_uc_high[0x20];
+
+ u8 no_buffer_discard_uc_low[0x20];
+
+ u8 reserved_at_80[0x740];
+};
+
+struct mlx5_ifc_eth_per_tc_congest_prio_grp_data_layout_bits {
+ u8 wred_discard_high[0x20];
+
+ u8 wred_discard_low[0x20];
+
+ u8 ecn_marked_tc_high[0x20];
+
+ u8 ecn_marked_tc_low[0x20];
+
+ u8 reserved_at_80[0x740];
};
struct mlx5_ifc_eth_per_prio_grp_data_layout_bits {
@@ -1532,7 +2667,19 @@ struct mlx5_ifc_eth_per_prio_grp_data_layout_bits {
u8 rx_pause_transition_low[0x20];
- u8 reserved_at_3c0[0x400];
+ u8 rx_discards_high[0x20];
+
+ u8 rx_discards_low[0x20];
+
+ u8 device_stall_minor_watermark_cnt_high[0x20];
+
+ u8 device_stall_minor_watermark_cnt_low[0x20];
+
+ u8 device_stall_critical_watermark_cnt_high[0x20];
+
+ u8 device_stall_critical_watermark_cnt_low[0x20];
+
+ u8 reserved_at_480[0x340];
};
struct mlx5_ifc_eth_extended_cntrs_grp_data_layout_bits {
@@ -1540,7 +2687,21 @@ struct mlx5_ifc_eth_extended_cntrs_grp_data_layout_bits {
u8 port_transmit_wait_low[0x20];
- u8 reserved_at_40[0x780];
+ u8 reserved_at_40[0x100];
+
+ u8 rx_buffer_almost_full_high[0x20];
+
+ u8 rx_buffer_almost_full_low[0x20];
+
+ u8 rx_buffer_full_high[0x20];
+
+ u8 rx_buffer_full_low[0x20];
+
+ u8 rx_icrc_encapsulated_high[0x20];
+
+ u8 rx_icrc_encapsulated_low[0x20];
+
+ u8 reserved_at_200[0x5c0];
};
struct mlx5_ifc_eth_3635_cntrs_grp_data_layout_bits {
@@ -1856,7 +3017,19 @@ struct mlx5_ifc_pcie_perf_cntrs_grp_data_layout_bits {
u8 crc_error_tlp[0x20];
- u8 reserved_at_140[0x680];
+ u8 tx_overflow_buffer_pkt_high[0x20];
+
+ u8 tx_overflow_buffer_pkt_low[0x20];
+
+ u8 outbound_stalled_reads[0x20];
+
+ u8 outbound_stalled_writes[0x20];
+
+ u8 outbound_stalled_reads_events[0x20];
+
+ u8 outbound_stalled_writes_events[0x20];
+
+ u8 reserved_at_200[0x5c0];
};
struct mlx5_ifc_cmd_inter_comp_event_bits {
@@ -1906,6 +3079,40 @@ struct mlx5_ifc_dropped_packet_logged_bits {
u8 reserved_at_0[0xe0];
};
+struct mlx5_ifc_default_timeout_bits {
+ u8 to_multiplier[0x3];
+ u8 reserved_at_3[0x9];
+ u8 to_value[0x14];
+};
+
+struct mlx5_ifc_dtor_reg_bits {
+ u8 reserved_at_0[0x20];
+
+ struct mlx5_ifc_default_timeout_bits pcie_toggle_to;
+
+ u8 reserved_at_40[0x60];
+
+ struct mlx5_ifc_default_timeout_bits health_poll_to;
+
+ struct mlx5_ifc_default_timeout_bits full_crdump_to;
+
+ struct mlx5_ifc_default_timeout_bits fw_reset_to;
+
+ struct mlx5_ifc_default_timeout_bits flush_on_err_to;
+
+ struct mlx5_ifc_default_timeout_bits pci_sync_update_to;
+
+ struct mlx5_ifc_default_timeout_bits tear_down_to;
+
+ struct mlx5_ifc_default_timeout_bits fsm_reactivate_to;
+
+ struct mlx5_ifc_default_timeout_bits reclaim_pages_to;
+
+ struct mlx5_ifc_default_timeout_bits reclaim_vfs_pages_to;
+
+ u8 reserved_at_1c0[0x40];
+};
+
enum {
MLX5_CQ_ERROR_SYNDROME_CQ_OVERRUN = 0x1,
MLX5_CQ_ERROR_SYNDROME_CQ_ACCESS_VIOLATION_ERROR = 0x2,
@@ -2015,6 +3222,10 @@ enum {
};
enum {
+ MLX5_QPC_OFFLOAD_TYPE_RNDV = 0x1,
+};
+
+enum {
MLX5_QPC_END_PADDING_MODE_SCATTER_AS_IS = 0x0,
MLX5_QPC_END_PADDING_MODE_PAD_TO_CACHE_LINE_ALIGNMENT = 0x1,
};
@@ -2051,13 +3262,22 @@ enum {
MLX5_QPC_CS_RES_UP_TO_64B = 0x2,
};
+enum {
+ MLX5_TIMESTAMP_FORMAT_FREE_RUNNING = 0x0,
+ MLX5_TIMESTAMP_FORMAT_DEFAULT = 0x1,
+ MLX5_TIMESTAMP_FORMAT_REAL_TIME = 0x2,
+};
+
struct mlx5_ifc_qpc_bits {
u8 state[0x4];
u8 lag_tx_port_affinity[0x4];
u8 st[0x8];
- u8 reserved_at_10[0x3];
+ u8 reserved_at_10[0x2];
+ u8 isolate_vl_tc[0x1];
u8 pm_state[0x2];
- u8 reserved_at_15[0x7];
+ u8 reserved_at_15[0x1];
+ u8 req_e2e_credit_mode[0x2];
+ u8 offload_type[0x4];
u8 end_padding_mode[0x2];
u8 reserved_at_1e[0x2];
@@ -2077,7 +3297,10 @@ struct mlx5_ifc_qpc_bits {
u8 log_rq_stride[0x3];
u8 no_sq[0x1];
u8 log_sq_size[0x4];
- u8 reserved_at_55[0x6];
+ u8 reserved_at_55[0x1];
+ u8 retry_mode[0x2];
+ u8 ts_format[0x2];
+ u8 reserved_at_5a[0x1];
u8 rlky[0x1];
u8 ulp_stateless_offload_mode[0x4];
@@ -2112,10 +3335,12 @@ struct mlx5_ifc_qpc_bits {
u8 reserved_at_3c0[0x8];
u8 next_send_psn[0x18];
- u8 reserved_at_3e0[0x8];
+ u8 reserved_at_3e0[0x3];
+ u8 log_num_dci_stream_channels[0x5];
u8 cqn_snd[0x18];
- u8 reserved_at_400[0x8];
+ u8 reserved_at_400[0x3];
+ u8 log_num_dci_errored_streams[0x5];
u8 deth_sqpn[0x18];
u8 reserved_at_420[0x20];
@@ -2177,7 +3402,10 @@ struct mlx5_ifc_qpc_bits {
u8 dc_access_key[0x40];
- u8 reserved_at_680[0xc0];
+ u8 reserved_at_680[0x3];
+ u8 dbr_umem_valid[0x1];
+
+ u8 reserved_at_684[0xbc];
};
struct mlx5_ifc_roce_addr_layout_bits {
@@ -2197,8 +3425,47 @@ struct mlx5_ifc_roce_addr_layout_bits {
u8 reserved_at_e0[0x20];
};
+struct mlx5_ifc_shampo_cap_bits {
+ u8 reserved_at_0[0x3];
+ u8 shampo_log_max_reservation_size[0x5];
+ u8 reserved_at_8[0x3];
+ u8 shampo_log_min_reservation_size[0x5];
+ u8 shampo_min_mss_size[0x10];
+
+ u8 reserved_at_20[0x3];
+ u8 shampo_max_log_headers_entry_size[0x5];
+ u8 reserved_at_28[0x18];
+
+ u8 reserved_at_40[0x7c0];
+};
+
+struct mlx5_ifc_crypto_cap_bits {
+ u8 reserved_at_0[0x3];
+ u8 synchronize_dek[0x1];
+ u8 int_kek_manual[0x1];
+ u8 int_kek_auto[0x1];
+ u8 reserved_at_6[0x1a];
+
+ u8 reserved_at_20[0x3];
+ u8 log_dek_max_alloc[0x5];
+ u8 reserved_at_28[0x3];
+ u8 log_max_num_deks[0x5];
+ u8 reserved_at_30[0x10];
+
+ u8 reserved_at_40[0x20];
+
+ u8 reserved_at_60[0x3];
+ u8 log_dek_granularity[0x5];
+ u8 reserved_at_68[0x3];
+ u8 log_max_num_int_kek[0x5];
+ u8 sw_wrapped_dek[0x10];
+
+ u8 reserved_at_80[0x780];
+};
+
union mlx5_ifc_hca_cap_union_bits {
struct mlx5_ifc_cmd_hca_cap_bits cmd_hca_cap;
+ struct mlx5_ifc_cmd_hca_cap_2_bits cmd_hca_cap_2;
struct mlx5_ifc_odp_cap_bits odp_cap;
struct mlx5_ifc_atomic_caps_bits atomic_caps;
struct mlx5_ifc_roce_cap_bits roce_cap;
@@ -2206,9 +3473,17 @@ union mlx5_ifc_hca_cap_union_bits {
struct mlx5_ifc_flow_table_nic_cap_bits flow_table_nic_cap;
struct mlx5_ifc_flow_table_eswitch_cap_bits flow_table_eswitch_cap;
struct mlx5_ifc_e_switch_cap_bits e_switch_cap;
+ struct mlx5_ifc_port_selection_cap_bits port_selection_cap;
struct mlx5_ifc_vector_calc_cap_bits vector_calc_cap;
struct mlx5_ifc_qos_cap_bits qos_cap;
+ struct mlx5_ifc_debug_cap_bits debug_cap;
struct mlx5_ifc_fpga_cap_bits fpga_cap;
+ struct mlx5_ifc_tls_cap_bits tls_cap;
+ struct mlx5_ifc_device_mem_cap_bits device_mem_cap;
+ struct mlx5_ifc_virtio_emulation_cap_bits virtio_emulation_cap;
+ struct mlx5_ifc_shampo_cap_bits shampo_cap;
+ struct mlx5_ifc_macsec_cap_bits macsec_cap;
+ struct mlx5_ifc_crypto_cap_bits crypto_cap;
u8 reserved_at_0[0x8000];
};
@@ -2217,13 +3492,70 @@ enum {
MLX5_FLOW_CONTEXT_ACTION_DROP = 0x2,
MLX5_FLOW_CONTEXT_ACTION_FWD_DEST = 0x4,
MLX5_FLOW_CONTEXT_ACTION_COUNT = 0x8,
- MLX5_FLOW_CONTEXT_ACTION_ENCAP = 0x10,
+ MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT = 0x10,
MLX5_FLOW_CONTEXT_ACTION_DECAP = 0x20,
MLX5_FLOW_CONTEXT_ACTION_MOD_HDR = 0x40,
+ MLX5_FLOW_CONTEXT_ACTION_VLAN_POP = 0x80,
+ MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH = 0x100,
+ MLX5_FLOW_CONTEXT_ACTION_VLAN_POP_2 = 0x400,
+ MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2 = 0x800,
+ MLX5_FLOW_CONTEXT_ACTION_CRYPTO_DECRYPT = 0x1000,
+ MLX5_FLOW_CONTEXT_ACTION_CRYPTO_ENCRYPT = 0x2000,
+ MLX5_FLOW_CONTEXT_ACTION_EXECUTE_ASO = 0x4000,
+};
+
+enum {
+ MLX5_FLOW_CONTEXT_FLOW_SOURCE_ANY_VPORT = 0x0,
+ MLX5_FLOW_CONTEXT_FLOW_SOURCE_UPLINK = 0x1,
+ MLX5_FLOW_CONTEXT_FLOW_SOURCE_LOCAL_VPORT = 0x2,
+};
+
+enum {
+ MLX5_FLOW_CONTEXT_ENCRYPT_DECRYPT_TYPE_IPSEC = 0x0,
+ MLX5_FLOW_CONTEXT_ENCRYPT_DECRYPT_TYPE_MACSEC = 0x1,
+};
+
+struct mlx5_ifc_vlan_bits {
+ u8 ethtype[0x10];
+ u8 prio[0x3];
+ u8 cfi[0x1];
+ u8 vid[0xc];
+};
+
+enum {
+ MLX5_FLOW_METER_COLOR_RED = 0x0,
+ MLX5_FLOW_METER_COLOR_YELLOW = 0x1,
+ MLX5_FLOW_METER_COLOR_GREEN = 0x2,
+ MLX5_FLOW_METER_COLOR_UNDEFINED = 0x3,
+};
+
+enum {
+ MLX5_EXE_ASO_FLOW_METER = 0x2,
+};
+
+struct mlx5_ifc_exe_aso_ctrl_flow_meter_bits {
+ u8 return_reg_id[0x4];
+ u8 aso_type[0x4];
+ u8 reserved_at_8[0x14];
+ u8 action[0x1];
+ u8 init_color[0x2];
+ u8 meter_id[0x1];
+};
+
+union mlx5_ifc_exe_aso_ctrl {
+ struct mlx5_ifc_exe_aso_ctrl_flow_meter_bits exe_aso_ctrl_flow_meter;
+};
+
+struct mlx5_ifc_execute_aso_bits {
+ u8 valid[0x1];
+ u8 reserved_at_1[0x7];
+ u8 aso_object_id[0x18];
+
+ union mlx5_ifc_exe_aso_ctrl exe_aso_ctrl;
};
struct mlx5_ifc_flow_context_bits {
- u8 reserved_at_0[0x20];
+ struct mlx5_ifc_vlan_bits push_vlan;
u8 group_id[0x20];
@@ -2233,23 +3565,31 @@ struct mlx5_ifc_flow_context_bits {
u8 reserved_at_60[0x10];
u8 action[0x10];
- u8 reserved_at_80[0x8];
+ u8 extended_destination[0x1];
+ u8 reserved_at_81[0x1];
+ u8 flow_source[0x2];
+ u8 encrypt_decrypt_type[0x4];
u8 destination_list_size[0x18];
u8 reserved_at_a0[0x8];
u8 flow_counter_list_size[0x18];
- u8 encap_id[0x20];
+ u8 packet_reformat_id[0x20];
u8 modify_header_id[0x20];
- u8 reserved_at_100[0x100];
+ struct mlx5_ifc_vlan_bits push_vlan_2;
+
+ u8 encrypt_decrypt_obj_id[0x20];
+ u8 reserved_at_140[0xc0];
struct mlx5_ifc_fte_match_param_bits match_value;
- u8 reserved_at_1200[0x600];
+ struct mlx5_ifc_execute_aso_bits execute_aso[4];
+
+ u8 reserved_at_1300[0x500];
- union mlx5_ifc_dest_format_struct_flow_counter_list_auto_bits destination[0];
+ union mlx5_ifc_dest_format_struct_flow_counter_list_auto_bits destination[];
};
enum {
@@ -2271,7 +3611,8 @@ struct mlx5_ifc_xrc_srqc_bits {
u8 xrcd[0x18];
u8 page_offset[0x6];
- u8 reserved_at_46[0x2];
+ u8 reserved_at_46[0x1];
+ u8 dbr_umem_valid[0x1];
u8 cqn[0x18];
u8 reserved_at_60[0x20];
@@ -2299,6 +3640,40 @@ struct mlx5_ifc_xrc_srqc_bits {
u8 reserved_at_180[0x80];
};
+struct mlx5_ifc_vnic_diagnostic_statistics_bits {
+ u8 counter_error_queues[0x20];
+
+ u8 total_error_queues[0x20];
+
+ u8 send_queue_priority_update_flow[0x20];
+
+ u8 reserved_at_60[0x20];
+
+ u8 nic_receive_steering_discard[0x40];
+
+ u8 receive_discard_vport_down[0x40];
+
+ u8 transmit_discard_vport_down[0x40];
+
+ u8 async_eq_overrun[0x20];
+
+ u8 comp_eq_overrun[0x20];
+
+ u8 reserved_at_180[0x20];
+
+ u8 invalid_command[0x20];
+
+ u8 quota_exceeded_command[0x20];
+
+ u8 internal_rq_out_of_buffer[0x20];
+
+ u8 cq_overrun[0x20];
+
+ u8 eth_wqe_too_small[0x20];
+
+ u8 reserved_at_220[0xdc0];
+};
+
struct mlx5_ifc_traffic_counter_bits {
u8 packets[0x40];
@@ -2307,7 +3682,8 @@ struct mlx5_ifc_traffic_counter_bits {
struct mlx5_ifc_tisc_bits {
u8 strict_lag_tx_port_affinity[0x1];
- u8 reserved_at_1[0x3];
+ u8 tls_en[0x1];
+ u8 reserved_at_2[0x2];
u8 lag_tx_port_affinity[0x04];
u8 reserved_at_8[0x4];
@@ -2321,7 +3697,11 @@ struct mlx5_ifc_tisc_bits {
u8 reserved_at_140[0x8];
u8 underlay_qpn[0x18];
- u8 reserved_at_160[0x3a0];
+
+ u8 reserved_at_160[0x8];
+ u8 pd[0x18];
+
+ u8 reserved_at_180[0x380];
};
enum {
@@ -2330,8 +3710,8 @@ enum {
};
enum {
- MLX5_TIRC_LRO_ENABLE_MASK_IPV4_LRO = 0x1,
- MLX5_TIRC_LRO_ENABLE_MASK_IPV6_LRO = 0x2,
+ MLX5_TIRC_PACKET_MERGE_MASK_IPV4_LRO = BIT(0),
+ MLX5_TIRC_PACKET_MERGE_MASK_IPV6_LRO = BIT(1),
};
enum {
@@ -2341,21 +3721,22 @@ enum {
};
enum {
- MLX5_TIRC_SELF_LB_BLOCK_BLOCK_UNICAST_ = 0x1,
- MLX5_TIRC_SELF_LB_BLOCK_BLOCK_MULTICAST_ = 0x2,
+ MLX5_TIRC_SELF_LB_BLOCK_BLOCK_UNICAST = 0x1,
+ MLX5_TIRC_SELF_LB_BLOCK_BLOCK_MULTICAST = 0x2,
};
struct mlx5_ifc_tirc_bits {
u8 reserved_at_0[0x20];
u8 disp_type[0x4];
- u8 reserved_at_24[0x1c];
+ u8 tls_en[0x1];
+ u8 reserved_at_25[0x1b];
u8 reserved_at_40[0x40];
u8 reserved_at_80[0x4];
u8 lro_timeout_period_usecs[0x10];
- u8 lro_enable_mask[0x4];
+ u8 packet_merge_mask[0x4];
u8 lro_max_ip_payload_size[0x8];
u8 reserved_at_a0[0x40];
@@ -2437,12 +3818,15 @@ struct mlx5_ifc_sqc_bits {
u8 cd_master[0x1];
u8 fre[0x1];
u8 flush_in_error_en[0x1];
- u8 reserved_at_4[0x1];
+ u8 allow_multi_pkt_send_wqe[0x1];
u8 min_wqe_inline_mode[0x3];
u8 state[0x4];
u8 reg_umr[0x1];
u8 allow_swp[0x1];
- u8 reserved_at_e[0x12];
+ u8 hairpin[0x1];
+ u8 reserved_at_f[0xb];
+ u8 ts_format[0x2];
+ u8 reserved_at_1c[0x4];
u8 reserved_at_20[0x8];
u8 user_index[0x18];
@@ -2450,11 +3834,21 @@ struct mlx5_ifc_sqc_bits {
u8 reserved_at_40[0x8];
u8 cqn[0x18];
- u8 reserved_at_60[0x90];
+ u8 reserved_at_60[0x8];
+ u8 hairpin_peer_rq[0x18];
+
+ u8 reserved_at_80[0x10];
+ u8 hairpin_peer_vhca[0x10];
+
+ u8 reserved_at_a0[0x20];
+ u8 reserved_at_c0[0x8];
+ u8 ts_cqe_to_dest_cqn[0x18];
+
+ u8 reserved_at_e0[0x10];
u8 packet_pacing_rate_limit_index[0x10];
u8 tis_lst_sz[0x10];
- u8 reserved_at_110[0x10];
+ u8 qos_queue_group_id[0x10];
u8 reserved_at_120[0x40];
@@ -2469,6 +3863,14 @@ enum {
SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT = 0x1,
SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT_TC = 0x2,
SCHEDULING_CONTEXT_ELEMENT_TYPE_PARA_VPORT_TC = 0x3,
+ SCHEDULING_CONTEXT_ELEMENT_TYPE_QUEUE_GROUP = 0x4,
+};
+
+enum {
+ ELEMENT_TYPE_CAP_MASK_TASR = 1 << 0,
+ ELEMENT_TYPE_CAP_MASK_VPORT = 1 << 1,
+ ELEMENT_TYPE_CAP_MASK_VPORT_TC = 1 << 2,
+ ELEMENT_TYPE_CAP_MASK_PARA_VPORT_TC = 1 << 3,
};
struct mlx5_ifc_scheduling_context_bits {
@@ -2489,17 +3891,20 @@ struct mlx5_ifc_scheduling_context_bits {
};
struct mlx5_ifc_rqtc_bits {
- u8 reserved_at_0[0xa0];
+ u8 reserved_at_0[0xa0];
- u8 reserved_at_a0[0x10];
- u8 rqt_max_size[0x10];
+ u8 reserved_at_a0[0x5];
+ u8 list_q_type[0x3];
+ u8 reserved_at_a8[0x8];
+ u8 rqt_max_size[0x10];
- u8 reserved_at_c0[0x10];
- u8 rqt_actual_size[0x10];
+ u8 rq_vhca_id_format[0x1];
+ u8 reserved_at_c1[0xf];
+ u8 rqt_actual_size[0x10];
- u8 reserved_at_e0[0x6a0];
+ u8 reserved_at_e0[0x6a0];
- struct mlx5_ifc_rq_num_bits rq_num[0];
+ struct mlx5_ifc_rq_num_bits rq_num[];
};
enum {
@@ -2513,16 +3918,31 @@ enum {
MLX5_RQC_STATE_ERR = 0x3,
};
+enum {
+ MLX5_RQC_SHAMPO_NO_MATCH_ALIGNMENT_GRANULARITY_BYTE = 0x0,
+ MLX5_RQC_SHAMPO_NO_MATCH_ALIGNMENT_GRANULARITY_STRIDE = 0x1,
+ MLX5_RQC_SHAMPO_NO_MATCH_ALIGNMENT_GRANULARITY_PAGE = 0x2,
+};
+
+enum {
+ MLX5_RQC_SHAMPO_MATCH_CRITERIA_TYPE_NO_MATCH = 0x0,
+ MLX5_RQC_SHAMPO_MATCH_CRITERIA_TYPE_EXTENDED = 0x1,
+ MLX5_RQC_SHAMPO_MATCH_CRITERIA_TYPE_FIVE_TUPLE = 0x2,
+};
+
struct mlx5_ifc_rqc_bits {
u8 rlky[0x1];
- u8 reserved_at_1[0x1];
+ u8 delay_drop_en[0x1];
u8 scatter_fcs[0x1];
u8 vsd[0x1];
u8 mem_rq_type[0x4];
u8 state[0x4];
u8 reserved_at_c[0x1];
u8 flush_in_error_en[0x1];
- u8 reserved_at_e[0x12];
+ u8 hairpin[0x1];
+ u8 reserved_at_f[0xb];
+ u8 ts_format[0x2];
+ u8 reserved_at_1c[0x4];
u8 reserved_at_20[0x8];
u8 user_index[0x18];
@@ -2536,7 +3956,19 @@ struct mlx5_ifc_rqc_bits {
u8 reserved_at_80[0x8];
u8 rmpn[0x18];
- u8 reserved_at_a0[0xe0];
+ u8 reserved_at_a0[0x8];
+ u8 hairpin_peer_sq[0x18];
+
+ u8 reserved_at_c0[0x10];
+ u8 hairpin_peer_vhca[0x10];
+
+ u8 reserved_at_e0[0x46];
+ u8 shampo_no_match_alignment_granularity[0x2];
+ u8 reserved_at_128[0x6];
+ u8 shampo_match_criteria_type[0x2];
+ u8 reservation_timeout[0x10];
+
+ u8 reserved_at_140[0x40];
struct mlx5_ifc_wq_bits wq;
};
@@ -2559,10 +3991,17 @@ struct mlx5_ifc_rmpc_bits {
struct mlx5_ifc_wq_bits wq;
};
+enum {
+ VHCA_ID_TYPE_HW = 0,
+ VHCA_ID_TYPE_SW = 1,
+};
+
struct mlx5_ifc_nic_vport_context_bits {
u8 reserved_at_0[0x5];
u8 min_wqe_inline_mode[0x3];
- u8 reserved_at_8[0x17];
+ u8 reserved_at_8[0x15];
+ u8 disable_mc_local_lb[0x1];
+ u8 disable_uc_local_lb[0x1];
u8 roce_en[0x1];
u8 arm_change_event[0x1];
@@ -2573,7 +4012,12 @@ struct mlx5_ifc_nic_vport_context_bits {
u8 event_on_mc_address_change[0x1];
u8 event_on_uc_address_change[0x1];
- u8 reserved_at_40[0xf0];
+ u8 vhca_id_type[0x1];
+ u8 reserved_at_41[0xb];
+ u8 affiliation_criteria[0x4];
+ u8 affiliated_vhca_id[0x10];
+
+ u8 reserved_at_60[0xd0];
u8 mtu[0x10];
@@ -2597,7 +4041,7 @@ struct mlx5_ifc_nic_vport_context_bits {
u8 reserved_at_7e0[0x20];
- u8 current_uc_mac_address[0][0x40];
+ u8 current_uc_mac_address[][0x40];
};
enum {
@@ -2605,12 +4049,18 @@ enum {
MLX5_MKC_ACCESS_MODE_MTT = 0x1,
MLX5_MKC_ACCESS_MODE_KLMS = 0x2,
MLX5_MKC_ACCESS_MODE_KSM = 0x3,
+ MLX5_MKC_ACCESS_MODE_SW_ICM = 0x4,
+ MLX5_MKC_ACCESS_MODE_MEMIC = 0x5,
};
struct mlx5_ifc_mkc_bits {
u8 reserved_at_0[0x1];
u8 free[0x1];
- u8 reserved_at_2[0xd];
+ u8 reserved_at_2[0x1];
+ u8 access_mode_4_2[0x3];
+ u8 reserved_at_6[0x7];
+ u8 relaxed_ordering_write[0x1];
+ u8 reserved_at_e[0x1];
u8 small_fence_on_rdma_read_response[0x1];
u8 umr_en[0x1];
u8 a[0x1];
@@ -2618,8 +4068,10 @@ struct mlx5_ifc_mkc_bits {
u8 rr[0x1];
u8 lw[0x1];
u8 lr[0x1];
- u8 access_mode[0x2];
- u8 reserved_at_18[0x8];
+ u8 access_mode_1_0[0x2];
+ u8 reserved_at_18[0x2];
+ u8 ma_translation_mode[0x2];
+ u8 reserved_at_1c[0x4];
u8 qpn[0x18];
u8 mkey_7_0[0x8];
@@ -2645,7 +4097,9 @@ struct mlx5_ifc_mkc_bits {
u8 translations_octword_size[0x20];
- u8 reserved_at_1c0[0x1b];
+ u8 reserved_at_1c0[0x19];
+ u8 relaxed_ordering_read[0x1];
+ u8 reserved_at_1d9[0x1];
u8 log_page_size[0x5];
u8 reserved_at_1e0[0x20];
@@ -2710,12 +4164,14 @@ struct mlx5_ifc_hca_vport_context_bits {
};
struct mlx5_ifc_esw_vport_context_bits {
- u8 reserved_at_0[0x3];
+ u8 fdb_to_vport_reg_c[0x1];
+ u8 reserved_at_1[0x2];
u8 vport_svlan_strip[0x1];
u8 vport_cvlan_strip[0x1];
u8 vport_svlan_insert[0x1];
u8 vport_cvlan_insert[0x2];
- u8 reserved_at_8[0x18];
+ u8 fdb_to_vport_reg_c_id[0x8];
+ u8 reserved_at_10[0x10];
u8 reserved_at_20[0x20];
@@ -2726,7 +4182,11 @@ struct mlx5_ifc_esw_vport_context_bits {
u8 cvlan_pcp[0x3];
u8 cvlan_id[0xc];
- u8 reserved_at_60[0x7a0];
+ u8 reserved_at_60[0x720];
+
+ u8 sw_steering_vport_icm_address_rx[0x40];
+
+ u8 sw_steering_vport_icm_address_tx[0x40];
};
enum {
@@ -2760,8 +4220,8 @@ struct mlx5_ifc_eqc_bits {
u8 reserved_at_80[0x20];
- u8 reserved_at_a0[0x18];
- u8 intr[0x8];
+ u8 reserved_at_a0[0x14];
+ u8 intr[0xc];
u8 reserved_at_c0[0x3];
u8 log_page_size[0x5];
@@ -2856,7 +4316,8 @@ struct mlx5_ifc_dctc_bits {
u8 ecn[0x2];
u8 dscp[0x6];
- u8 reserved_at_1c0[0x40];
+ u8 reserved_at_1c0[0x20];
+ u8 ece[0x20];
};
enum {
@@ -2884,7 +4345,9 @@ enum {
struct mlx5_ifc_cqc_bits {
u8 status[0x4];
- u8 reserved_at_4[0x4];
+ u8 reserved_at_4[0x2];
+ u8 dbr_umem_valid[0x1];
+ u8 apu_cq[0x1];
u8 cqe_sz[0x3];
u8 cc[0x1];
u8 reserved_at_c[0x1];
@@ -2894,7 +4357,8 @@ struct mlx5_ifc_cqc_bits {
u8 cqe_comp_en[0x1];
u8 mini_cqe_res_format[0x2];
u8 st[0x4];
- u8 reserved_at_18[0x8];
+ u8 reserved_at_18[0x6];
+ u8 cqe_compression_layout[0x2];
u8 reserved_at_20[0x20];
@@ -2910,8 +4374,7 @@ struct mlx5_ifc_cqc_bits {
u8 cq_period[0xc];
u8 cq_max_count[0x10];
- u8 reserved_at_a0[0x18];
- u8 c_eqn[0x8];
+ u8 c_eqn_or_apu_element[0x20];
u8 reserved_at_c0[0x3];
u8 log_page_size[0x5];
@@ -2940,6 +4403,7 @@ union mlx5_ifc_cong_control_roce_ecn_auto_bits {
struct mlx5_ifc_cong_control_802_1qau_rp_bits cong_control_802_1qau_rp;
struct mlx5_ifc_cong_control_r_roce_ecn_rp_bits cong_control_r_roce_ecn_rp;
struct mlx5_ifc_cong_control_r_roce_ecn_np_bits cong_control_r_roce_ecn_np;
+ struct mlx5_ifc_cong_control_r_roce_general_bits cong_control_r_roce_general;
u8 reserved_at_0[0x800];
};
@@ -3000,7 +4464,7 @@ struct mlx5_ifc_xrqc_bits {
struct mlx5_ifc_tag_matching_topology_context_bits tag_matching_topology_context;
- u8 reserved_at_180[0x880];
+ u8 reserved_at_180[0x280];
struct mlx5_ifc_wq_bits wq;
};
@@ -3025,7 +4489,8 @@ union mlx5_ifc_eth_cntrs_grp_data_layout_auto_bits {
struct mlx5_ifc_eth_3635_cntrs_grp_data_layout_bits eth_3635_cntrs_grp_data_layout;
struct mlx5_ifc_eth_extended_cntrs_grp_data_layout_bits eth_extended_cntrs_grp_data_layout;
struct mlx5_ifc_eth_per_prio_grp_data_layout_bits eth_per_prio_grp_data_layout;
- struct mlx5_ifc_eth_per_traffic_grp_data_layout_bits eth_per_traffic_grp_data_layout;
+ struct mlx5_ifc_eth_per_tc_prio_grp_data_layout_bits eth_per_tc_prio_grp_data_layout;
+ struct mlx5_ifc_eth_per_tc_congest_prio_grp_data_layout_bits eth_per_tc_congest_prio_grp_data_layout;
struct mlx5_ifc_ib_port_cntrs_grp_data_layout_bits ib_port_cntrs_grp_data_layout;
struct mlx5_ifc_phys_layer_cntrs_bits phys_layer_cntrs;
struct mlx5_ifc_phys_layer_statistical_cntrs_bits phys_layer_statistical_cntrs;
@@ -3060,13 +4525,19 @@ struct mlx5_ifc_health_buffer_bits {
u8 assert_callra[0x20];
- u8 reserved_at_140[0x40];
+ u8 reserved_at_140[0x20];
+
+ u8 time[0x20];
u8 fw_version[0x20];
u8 hw_id[0x20];
- u8 reserved_at_1c0[0x20];
+ u8 rfr[0x1];
+ u8 reserved_at_1c1[0x3];
+ u8 valid[0x1];
+ u8 severity[0x3];
+ u8 reserved_at_1c8[0x18];
u8 irisc_index[0x8];
u8 synd[0x8];
@@ -3118,12 +4589,13 @@ struct mlx5_ifc_teardown_hca_out_bits {
u8 reserved_at_40[0x3f];
- u8 force_state[0x1];
+ u8 state[0x1];
};
enum {
MLX5_TEARDOWN_HCA_IN_PROFILE_GRACEFUL_CLOSE = 0x0,
MLX5_TEARDOWN_HCA_IN_PROFILE_FORCE_CLOSE = 0x1,
+ MLX5_TEARDOWN_HCA_IN_PROFILE_PREPARE_FAST_TEARDOWN = 0x2,
};
struct mlx5_ifc_teardown_hca_in_bits {
@@ -3150,7 +4622,7 @@ struct mlx5_ifc_sqerr2rts_qp_out_bits {
struct mlx5_ifc_sqerr2rts_qp_in_bits {
u8 opcode[0x10];
- u8 reserved_at_10[0x10];
+ u8 uid[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
@@ -3180,7 +4652,7 @@ struct mlx5_ifc_sqd2rts_qp_out_bits {
struct mlx5_ifc_sqd2rts_qp_in_bits {
u8 opcode[0x10];
- u8 reserved_at_10[0x10];
+ u8 uid[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
@@ -3216,7 +4688,8 @@ struct mlx5_ifc_set_roce_address_in_bits {
u8 op_mod[0x10];
u8 roce_address_index[0x10];
- u8 reserved_at_50[0x10];
+ u8 reserved_at_50[0xc];
+ u8 vhca_port_num[0x4];
u8 reserved_at_60[0x20];
@@ -3321,7 +4794,11 @@ struct mlx5_ifc_set_hca_cap_in_bits {
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
- u8 reserved_at_40[0x40];
+ u8 other_function[0x1];
+ u8 reserved_at_41[0xf];
+ u8 function_id[0x10];
+
+ u8 reserved_at_60[0x20];
union mlx5_ifc_hca_cap_union_bits capability;
};
@@ -3330,7 +4807,8 @@ enum {
MLX5_SET_FTE_MODIFY_ENABLE_MASK_ACTION = 0x0,
MLX5_SET_FTE_MODIFY_ENABLE_MASK_FLOW_TAG = 0x1,
MLX5_SET_FTE_MODIFY_ENABLE_MASK_DESTINATION_LIST = 0x2,
- MLX5_SET_FTE_MODIFY_ENABLE_MASK_FLOW_COUNTERS = 0x3
+ MLX5_SET_FTE_MODIFY_ENABLE_MASK_FLOW_COUNTERS = 0x3,
+ MLX5_SET_FTE_MODIFY_ENABLE_MASK_IPSEC_OBJ_ID = 0x4
};
struct mlx5_ifc_set_fte_out_bits {
@@ -3361,7 +4839,8 @@ struct mlx5_ifc_set_fte_in_bits {
u8 reserved_at_a0[0x8];
u8 table_id[0x18];
- u8 reserved_at_c0[0x18];
+ u8 ignore_flow_level[0x1];
+ u8 reserved_at_c1[0x17];
u8 modify_enable_mask[0x8];
u8 reserved_at_e0[0x20];
@@ -3379,12 +4858,13 @@ struct mlx5_ifc_rts2rts_qp_out_bits {
u8 syndrome[0x20];
- u8 reserved_at_40[0x40];
+ u8 reserved_at_40[0x20];
+ u8 ece[0x20];
};
struct mlx5_ifc_rts2rts_qp_in_bits {
u8 opcode[0x10];
- u8 reserved_at_10[0x10];
+ u8 uid[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
@@ -3396,7 +4876,7 @@ struct mlx5_ifc_rts2rts_qp_in_bits {
u8 opt_param_mask[0x20];
- u8 reserved_at_a0[0x20];
+ u8 ece[0x20];
struct mlx5_ifc_qpc_bits qpc;
@@ -3409,12 +4889,13 @@ struct mlx5_ifc_rtr2rts_qp_out_bits {
u8 syndrome[0x20];
- u8 reserved_at_40[0x40];
+ u8 reserved_at_40[0x20];
+ u8 ece[0x20];
};
struct mlx5_ifc_rtr2rts_qp_in_bits {
u8 opcode[0x10];
- u8 reserved_at_10[0x10];
+ u8 uid[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
@@ -3426,7 +4907,7 @@ struct mlx5_ifc_rtr2rts_qp_in_bits {
u8 opt_param_mask[0x20];
- u8 reserved_at_a0[0x20];
+ u8 ece[0x20];
struct mlx5_ifc_qpc_bits qpc;
@@ -3439,12 +4920,13 @@ struct mlx5_ifc_rst2init_qp_out_bits {
u8 syndrome[0x20];
- u8 reserved_at_40[0x40];
+ u8 reserved_at_40[0x20];
+ u8 ece[0x20];
};
struct mlx5_ifc_rst2init_qp_in_bits {
u8 opcode[0x10];
- u8 reserved_at_10[0x10];
+ u8 uid[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
@@ -3456,7 +4938,7 @@ struct mlx5_ifc_rst2init_qp_in_bits {
u8 opt_param_mask[0x20];
- u8 reserved_at_a0[0x20];
+ u8 ece[0x20];
struct mlx5_ifc_qpc_bits qpc;
@@ -3499,7 +4981,7 @@ struct mlx5_ifc_query_xrc_srq_out_bits {
u8 reserved_at_280[0x600];
- u8 pas[0][0x40];
+ u8 pas[][0x40];
};
struct mlx5_ifc_query_xrc_srq_in_bits {
@@ -3534,8 +5016,86 @@ struct mlx5_ifc_query_vport_state_out_bits {
};
enum {
- MLX5_QUERY_VPORT_STATE_IN_OP_MOD_VNIC_VPORT = 0x0,
- MLX5_QUERY_VPORT_STATE_IN_OP_MOD_ESW_VPORT = 0x1,
+ MLX5_VPORT_STATE_OP_MOD_VNIC_VPORT = 0x0,
+ MLX5_VPORT_STATE_OP_MOD_ESW_VPORT = 0x1,
+ MLX5_VPORT_STATE_OP_MOD_UPLINK = 0x2,
+};
+
+struct mlx5_ifc_arm_monitor_counter_in_bits {
+ u8 opcode[0x10];
+ u8 uid[0x10];
+
+ u8 reserved_at_20[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_at_40[0x20];
+
+ u8 reserved_at_60[0x20];
+};
+
+struct mlx5_ifc_arm_monitor_counter_out_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_at_40[0x40];
+};
+
+enum {
+ MLX5_QUERY_MONITOR_CNT_TYPE_PPCNT = 0x0,
+ MLX5_QUERY_MONITOR_CNT_TYPE_Q_COUNTER = 0x1,
+};
+
+enum mlx5_monitor_counter_ppcnt {
+ MLX5_QUERY_MONITOR_PPCNT_IN_RANGE_LENGTH_ERRORS = 0x0,
+ MLX5_QUERY_MONITOR_PPCNT_OUT_OF_RANGE_LENGTH_FIELD = 0x1,
+ MLX5_QUERY_MONITOR_PPCNT_FRAME_TOO_LONG_ERRORS = 0x2,
+ MLX5_QUERY_MONITOR_PPCNT_FRAME_CHECK_SEQUENCE_ERRORS = 0x3,
+ MLX5_QUERY_MONITOR_PPCNT_ALIGNMENT_ERRORS = 0x4,
+ MLX5_QUERY_MONITOR_PPCNT_IF_OUT_DISCARDS = 0x5,
+};
+
+enum {
+ MLX5_QUERY_MONITOR_Q_COUNTER_RX_OUT_OF_BUFFER = 0x4,
+};
+
+struct mlx5_ifc_monitor_counter_output_bits {
+ u8 reserved_at_0[0x4];
+ u8 type[0x4];
+ u8 reserved_at_8[0x8];
+ u8 counter[0x10];
+
+ u8 counter_group_id[0x20];
+};
+
+#define MLX5_CMD_SET_MONITOR_NUM_PPCNT_COUNTER_SET1 (6)
+#define MLX5_CMD_SET_MONITOR_NUM_Q_COUNTERS_SET1 (1)
+#define MLX5_CMD_SET_MONITOR_NUM_COUNTER (MLX5_CMD_SET_MONITOR_NUM_PPCNT_COUNTER_SET1 +\
+ MLX5_CMD_SET_MONITOR_NUM_Q_COUNTERS_SET1)
+
+struct mlx5_ifc_set_monitor_counter_in_bits {
+ u8 opcode[0x10];
+ u8 uid[0x10];
+
+ u8 reserved_at_20[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_at_40[0x10];
+ u8 num_of_counters[0x10];
+
+ u8 reserved_at_60[0x20];
+
+ struct mlx5_ifc_monitor_counter_output_bits monitor_counter[MLX5_CMD_SET_MONITOR_NUM_COUNTER];
+};
+
+struct mlx5_ifc_set_monitor_counter_out_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_at_40[0x40];
};
struct mlx5_ifc_query_vport_state_in_bits {
@@ -3552,6 +5112,35 @@ struct mlx5_ifc_query_vport_state_in_bits {
u8 reserved_at_60[0x20];
};
+struct mlx5_ifc_query_vnic_env_out_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_at_40[0x40];
+
+ struct mlx5_ifc_vnic_diagnostic_statistics_bits vport_env;
+};
+
+enum {
+ MLX5_QUERY_VNIC_ENV_IN_OP_MOD_VPORT_DIAG_STATISTICS = 0x0,
+};
+
+struct mlx5_ifc_query_vnic_env_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_at_10[0x10];
+
+ u8 reserved_at_20[0x10];
+ u8 op_mod[0x10];
+
+ u8 other_vport[0x1];
+ u8 reserved_at_41[0xf];
+ u8 vport_number[0x10];
+
+ u8 reserved_at_60[0x20];
+};
+
struct mlx5_ifc_query_vport_counter_out_bits {
u8 status[0x8];
u8 reserved_at_8[0x18];
@@ -3671,7 +5260,7 @@ struct mlx5_ifc_query_srq_out_bits {
u8 reserved_at_280[0x600];
- u8 pas[0][0x40];
+ u8 pas[][0x40];
};
struct mlx5_ifc_query_srq_in_bits {
@@ -3723,7 +5312,11 @@ struct mlx5_ifc_query_special_contexts_out_bits {
u8 null_mkey[0x20];
- u8 reserved_at_a0[0x60];
+ u8 terminate_scatter_list_mkey[0x20];
+
+ u8 repeated_mkey[0x20];
+
+ u8 reserved_at_a0[0x20];
};
struct mlx5_ifc_query_special_contexts_in_bits {
@@ -3752,6 +5345,7 @@ struct mlx5_ifc_query_scheduling_element_out_bits {
enum {
SCHEDULING_HIERARCHY_E_SWITCH = 0x2,
+ SCHEDULING_HIERARCHY_NIC = 0x3,
};
struct mlx5_ifc_query_scheduling_element_in_bits {
@@ -3836,7 +5430,8 @@ struct mlx5_ifc_query_roce_address_in_bits {
u8 op_mod[0x10];
u8 roce_address_index[0x10];
- u8 reserved_at_50[0x10];
+ u8 reserved_at_50[0xc];
+ u8 vhca_port_num[0x4];
u8 reserved_at_60[0x20];
};
@@ -3865,6 +5460,37 @@ struct mlx5_ifc_query_rmp_in_bits {
u8 reserved_at_60[0x20];
};
+struct mlx5_ifc_cqe_error_syndrome_bits {
+ u8 hw_error_syndrome[0x8];
+ u8 hw_syndrome_type[0x4];
+ u8 reserved_at_c[0x4];
+ u8 vendor_error_syndrome[0x8];
+ u8 syndrome[0x8];
+};
+
+struct mlx5_ifc_qp_context_extension_bits {
+ u8 reserved_at_0[0x60];
+
+ struct mlx5_ifc_cqe_error_syndrome_bits error_syndrome;
+
+ u8 reserved_at_80[0x580];
+};
+
+struct mlx5_ifc_qpc_extension_and_pas_list_in_bits {
+ struct mlx5_ifc_qp_context_extension_bits qpc_data_extension;
+
+ u8 pas[0][0x40];
+};
+
+struct mlx5_ifc_qp_pas_list_in_bits {
+ struct mlx5_ifc_cmd_pas_bits pas[0];
+};
+
+union mlx5_ifc_qp_pas_or_qpc_ext_and_pas_bits {
+ struct mlx5_ifc_qp_pas_list_in_bits qp_pas_list;
+ struct mlx5_ifc_qpc_extension_and_pas_list_in_bits qpc_ext_and_pas_list;
+};
+
struct mlx5_ifc_query_qp_out_bits {
u8 status[0x8];
u8 reserved_at_8[0x18];
@@ -3875,13 +5501,13 @@ struct mlx5_ifc_query_qp_out_bits {
u8 opt_param_mask[0x20];
- u8 reserved_at_a0[0x20];
+ u8 ece[0x20];
struct mlx5_ifc_qpc_bits qpc;
u8 reserved_at_800[0x80];
- u8 pas[0][0x40];
+ union mlx5_ifc_qp_pas_or_qpc_ext_and_pas_bits qp_pas_or_qpc_ext_and_pas;
};
struct mlx5_ifc_query_qp_in_bits {
@@ -3891,7 +5517,8 @@ struct mlx5_ifc_query_qp_in_bits {
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
- u8 reserved_at_40[0x8];
+ u8 qpc_ext[0x1];
+ u8 reserved_at_41[0x7];
u8 qpn[0x18];
u8 reserved_at_60[0x20];
@@ -3947,7 +5574,59 @@ struct mlx5_ifc_query_q_counter_out_bits {
u8 local_ack_timeout_err[0x20];
- u8 reserved_at_320[0x4e0];
+ u8 reserved_at_320[0xa0];
+
+ u8 resp_local_length_error[0x20];
+
+ u8 req_local_length_error[0x20];
+
+ u8 resp_local_qp_error[0x20];
+
+ u8 local_operation_error[0x20];
+
+ u8 resp_local_protection[0x20];
+
+ u8 req_local_protection[0x20];
+
+ u8 resp_cqe_error[0x20];
+
+ u8 req_cqe_error[0x20];
+
+ u8 req_mw_binding[0x20];
+
+ u8 req_bad_response[0x20];
+
+ u8 req_remote_invalid_request[0x20];
+
+ u8 resp_remote_invalid_request[0x20];
+
+ u8 req_remote_access_errors[0x20];
+
+ u8 resp_remote_access_errors[0x20];
+
+ u8 req_remote_operation_errors[0x20];
+
+ u8 req_transport_retries_exceeded[0x20];
+
+ u8 cq_overflow[0x20];
+
+ u8 resp_cqe_flush_error[0x20];
+
+ u8 req_cqe_flush_error[0x20];
+
+ u8 reserved_at_620[0x20];
+
+ u8 roce_adp_retrans[0x20];
+
+ u8 roce_adp_retrans_to[0x20];
+
+ u8 roce_slow_restart[0x20];
+
+ u8 roce_slow_restart_cnps[0x20];
+
+ u8 roce_slow_restart_trans[0x20];
+
+ u8 reserved_at_6e0[0x120];
};
struct mlx5_ifc_query_q_counter_in_bits {
@@ -3957,10 +5636,15 @@ struct mlx5_ifc_query_q_counter_in_bits {
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
- u8 reserved_at_40[0x80];
+ u8 other_vport[0x1];
+ u8 reserved_at_41[0xf];
+ u8 vport_number[0x10];
+
+ u8 reserved_at_60[0x60];
u8 clear[0x1];
- u8 reserved_at_c1[0x1f];
+ u8 aggregate[0x1];
+ u8 reserved_at_c2[0x1e];
u8 reserved_at_e0[0x18];
u8 counter_set_id[0x8];
@@ -3972,7 +5656,8 @@ struct mlx5_ifc_query_pages_out_bits {
u8 syndrome[0x20];
- u8 reserved_at_40[0x10];
+ u8 embedded_cpu_function[0x1];
+ u8 reserved_at_41[0xf];
u8 function_id[0x10];
u8 num_pages[0x20];
@@ -3991,7 +5676,8 @@ struct mlx5_ifc_query_pages_in_bits {
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
- u8 reserved_at_40[0x10];
+ u8 embedded_cpu_function[0x1];
+ u8 reserved_at_41[0xf];
u8 function_id[0x10];
u8 reserved_at_60[0x20];
@@ -4160,7 +5846,7 @@ struct mlx5_ifc_query_hca_vport_pkey_out_bits {
u8 reserved_at_40[0x40];
- struct mlx5_ifc_pkey_bits pkey[0];
+ struct mlx5_ifc_pkey_bits pkey[];
};
struct mlx5_ifc_query_hca_vport_pkey_in_bits {
@@ -4196,7 +5882,7 @@ struct mlx5_ifc_query_hca_vport_gid_out_bits {
u8 gids_num[0x10];
u8 reserved_at_70[0x10];
- struct mlx5_ifc_array128_auto_bits gid[0];
+ struct mlx5_ifc_array128_auto_bits gid[];
};
struct mlx5_ifc_query_hca_vport_gid_in_bits {
@@ -4259,23 +5945,98 @@ struct mlx5_ifc_query_hca_cap_in_bits {
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
+ u8 other_function[0x1];
+ u8 reserved_at_41[0xf];
+ u8 function_id[0x10];
+
+ u8 reserved_at_60[0x20];
+};
+
+struct mlx5_ifc_other_hca_cap_bits {
+ u8 roce[0x1];
+ u8 reserved_at_1[0x27f];
+};
+
+struct mlx5_ifc_query_other_hca_cap_out_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+
+ u8 syndrome[0x20];
+
u8 reserved_at_40[0x40];
+
+ struct mlx5_ifc_other_hca_cap_bits other_capability;
};
-struct mlx5_ifc_query_flow_table_out_bits {
+struct mlx5_ifc_query_other_hca_cap_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_at_10[0x10];
+
+ u8 reserved_at_20[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_at_40[0x10];
+ u8 function_id[0x10];
+
+ u8 reserved_at_60[0x20];
+};
+
+struct mlx5_ifc_modify_other_hca_cap_out_bits {
u8 status[0x8];
u8 reserved_at_8[0x18];
u8 syndrome[0x20];
- u8 reserved_at_40[0x80];
+ u8 reserved_at_40[0x40];
+};
- u8 reserved_at_c0[0x8];
+struct mlx5_ifc_modify_other_hca_cap_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_at_10[0x10];
+
+ u8 reserved_at_20[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_at_40[0x10];
+ u8 function_id[0x10];
+ u8 field_select[0x20];
+
+ struct mlx5_ifc_other_hca_cap_bits other_capability;
+};
+
+struct mlx5_ifc_flow_table_context_bits {
+ u8 reformat_en[0x1];
+ u8 decap_en[0x1];
+ u8 sw_owner[0x1];
+ u8 termination_table[0x1];
+ u8 table_miss_action[0x4];
u8 level[0x8];
- u8 reserved_at_d0[0x8];
+ u8 reserved_at_10[0x8];
u8 log_size[0x8];
- u8 reserved_at_e0[0x120];
+ u8 reserved_at_20[0x8];
+ u8 table_miss_id[0x18];
+
+ u8 reserved_at_40[0x8];
+ u8 lag_master_next_table_id[0x18];
+
+ u8 reserved_at_60[0x60];
+
+ u8 sw_owner_icm_root_1[0x40];
+
+ u8 sw_owner_icm_root_0[0x40];
+
+};
+
+struct mlx5_ifc_query_flow_table_out_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_at_40[0x80];
+
+ struct mlx5_ifc_flow_table_context_bits flow_table_context;
};
struct mlx5_ifc_query_flow_table_in_bits {
@@ -4329,10 +6090,335 @@ struct mlx5_ifc_query_fte_in_bits {
u8 reserved_at_120[0xe0];
};
+struct mlx5_ifc_match_definer_format_0_bits {
+ u8 reserved_at_0[0x100];
+
+ u8 metadata_reg_c_0[0x20];
+
+ u8 metadata_reg_c_1[0x20];
+
+ u8 outer_dmac_47_16[0x20];
+
+ u8 outer_dmac_15_0[0x10];
+ u8 outer_ethertype[0x10];
+
+ u8 reserved_at_180[0x1];
+ u8 sx_sniffer[0x1];
+ u8 functional_lb[0x1];
+ u8 outer_ip_frag[0x1];
+ u8 outer_qp_type[0x2];
+ u8 outer_encap_type[0x2];
+ u8 port_number[0x2];
+ u8 outer_l3_type[0x2];
+ u8 outer_l4_type[0x2];
+ u8 outer_first_vlan_type[0x2];
+ u8 outer_first_vlan_prio[0x3];
+ u8 outer_first_vlan_cfi[0x1];
+ u8 outer_first_vlan_vid[0xc];
+
+ u8 outer_l4_type_ext[0x4];
+ u8 reserved_at_1a4[0x2];
+ u8 outer_ipsec_layer[0x2];
+ u8 outer_l2_type[0x2];
+ u8 force_lb[0x1];
+ u8 outer_l2_ok[0x1];
+ u8 outer_l3_ok[0x1];
+ u8 outer_l4_ok[0x1];
+ u8 outer_second_vlan_type[0x2];
+ u8 outer_second_vlan_prio[0x3];
+ u8 outer_second_vlan_cfi[0x1];
+ u8 outer_second_vlan_vid[0xc];
+
+ u8 outer_smac_47_16[0x20];
+
+ u8 outer_smac_15_0[0x10];
+ u8 inner_ipv4_checksum_ok[0x1];
+ u8 inner_l4_checksum_ok[0x1];
+ u8 outer_ipv4_checksum_ok[0x1];
+ u8 outer_l4_checksum_ok[0x1];
+ u8 inner_l3_ok[0x1];
+ u8 inner_l4_ok[0x1];
+ u8 outer_l3_ok_duplicate[0x1];
+ u8 outer_l4_ok_duplicate[0x1];
+ u8 outer_tcp_cwr[0x1];
+ u8 outer_tcp_ece[0x1];
+ u8 outer_tcp_urg[0x1];
+ u8 outer_tcp_ack[0x1];
+ u8 outer_tcp_psh[0x1];
+ u8 outer_tcp_rst[0x1];
+ u8 outer_tcp_syn[0x1];
+ u8 outer_tcp_fin[0x1];
+};
+
+struct mlx5_ifc_match_definer_format_22_bits {
+ u8 reserved_at_0[0x100];
+
+ u8 outer_ip_src_addr[0x20];
+
+ u8 outer_ip_dest_addr[0x20];
+
+ u8 outer_l4_sport[0x10];
+ u8 outer_l4_dport[0x10];
+
+ u8 reserved_at_160[0x1];
+ u8 sx_sniffer[0x1];
+ u8 functional_lb[0x1];
+ u8 outer_ip_frag[0x1];
+ u8 outer_qp_type[0x2];
+ u8 outer_encap_type[0x2];
+ u8 port_number[0x2];
+ u8 outer_l3_type[0x2];
+ u8 outer_l4_type[0x2];
+ u8 outer_first_vlan_type[0x2];
+ u8 outer_first_vlan_prio[0x3];
+ u8 outer_first_vlan_cfi[0x1];
+ u8 outer_first_vlan_vid[0xc];
+
+ u8 metadata_reg_c_0[0x20];
+
+ u8 outer_dmac_47_16[0x20];
+
+ u8 outer_smac_47_16[0x20];
+
+ u8 outer_smac_15_0[0x10];
+ u8 outer_dmac_15_0[0x10];
+};
+
+struct mlx5_ifc_match_definer_format_23_bits {
+ u8 reserved_at_0[0x100];
+
+ u8 inner_ip_src_addr[0x20];
+
+ u8 inner_ip_dest_addr[0x20];
+
+ u8 inner_l4_sport[0x10];
+ u8 inner_l4_dport[0x10];
+
+ u8 reserved_at_160[0x1];
+ u8 sx_sniffer[0x1];
+ u8 functional_lb[0x1];
+ u8 inner_ip_frag[0x1];
+ u8 inner_qp_type[0x2];
+ u8 inner_encap_type[0x2];
+ u8 port_number[0x2];
+ u8 inner_l3_type[0x2];
+ u8 inner_l4_type[0x2];
+ u8 inner_first_vlan_type[0x2];
+ u8 inner_first_vlan_prio[0x3];
+ u8 inner_first_vlan_cfi[0x1];
+ u8 inner_first_vlan_vid[0xc];
+
+ u8 tunnel_header_0[0x20];
+
+ u8 inner_dmac_47_16[0x20];
+
+ u8 inner_smac_47_16[0x20];
+
+ u8 inner_smac_15_0[0x10];
+ u8 inner_dmac_15_0[0x10];
+};
+
+struct mlx5_ifc_match_definer_format_29_bits {
+ u8 reserved_at_0[0xc0];
+
+ u8 outer_ip_dest_addr[0x80];
+
+ u8 outer_ip_src_addr[0x80];
+
+ u8 outer_l4_sport[0x10];
+ u8 outer_l4_dport[0x10];
+
+ u8 reserved_at_1e0[0x20];
+};
+
+struct mlx5_ifc_match_definer_format_30_bits {
+ u8 reserved_at_0[0xa0];
+
+ u8 outer_ip_dest_addr[0x80];
+
+ u8 outer_ip_src_addr[0x80];
+
+ u8 outer_dmac_47_16[0x20];
+
+ u8 outer_smac_47_16[0x20];
+
+ u8 outer_smac_15_0[0x10];
+ u8 outer_dmac_15_0[0x10];
+};
+
+struct mlx5_ifc_match_definer_format_31_bits {
+ u8 reserved_at_0[0xc0];
+
+ u8 inner_ip_dest_addr[0x80];
+
+ u8 inner_ip_src_addr[0x80];
+
+ u8 inner_l4_sport[0x10];
+ u8 inner_l4_dport[0x10];
+
+ u8 reserved_at_1e0[0x20];
+};
+
+struct mlx5_ifc_match_definer_format_32_bits {
+ u8 reserved_at_0[0xa0];
+
+ u8 inner_ip_dest_addr[0x80];
+
+ u8 inner_ip_src_addr[0x80];
+
+ u8 inner_dmac_47_16[0x20];
+
+ u8 inner_smac_47_16[0x20];
+
+ u8 inner_smac_15_0[0x10];
+ u8 inner_dmac_15_0[0x10];
+};
+
+enum {
+ MLX5_IFC_DEFINER_FORMAT_ID_SELECT = 61,
+};
+
+#define MLX5_IFC_DEFINER_FORMAT_OFFSET_UNUSED 0x0
+#define MLX5_IFC_DEFINER_FORMAT_OFFSET_OUTER_ETH_PKT_LEN 0x48
+#define MLX5_IFC_DEFINER_DW_SELECTORS_NUM 9
+#define MLX5_IFC_DEFINER_BYTE_SELECTORS_NUM 8
+
+struct mlx5_ifc_match_definer_match_mask_bits {
+ u8 reserved_at_1c0[5][0x20];
+ u8 match_dw_8[0x20];
+ u8 match_dw_7[0x20];
+ u8 match_dw_6[0x20];
+ u8 match_dw_5[0x20];
+ u8 match_dw_4[0x20];
+ u8 match_dw_3[0x20];
+ u8 match_dw_2[0x20];
+ u8 match_dw_1[0x20];
+ u8 match_dw_0[0x20];
+
+ u8 match_byte_7[0x8];
+ u8 match_byte_6[0x8];
+ u8 match_byte_5[0x8];
+ u8 match_byte_4[0x8];
+
+ u8 match_byte_3[0x8];
+ u8 match_byte_2[0x8];
+ u8 match_byte_1[0x8];
+ u8 match_byte_0[0x8];
+};
+
+struct mlx5_ifc_match_definer_bits {
+ u8 modify_field_select[0x40];
+
+ u8 reserved_at_40[0x40];
+
+ u8 reserved_at_80[0x10];
+ u8 format_id[0x10];
+
+ u8 reserved_at_a0[0x60];
+
+ u8 format_select_dw3[0x8];
+ u8 format_select_dw2[0x8];
+ u8 format_select_dw1[0x8];
+ u8 format_select_dw0[0x8];
+
+ u8 format_select_dw7[0x8];
+ u8 format_select_dw6[0x8];
+ u8 format_select_dw5[0x8];
+ u8 format_select_dw4[0x8];
+
+ u8 reserved_at_100[0x18];
+ u8 format_select_dw8[0x8];
+
+ u8 reserved_at_120[0x20];
+
+ u8 format_select_byte3[0x8];
+ u8 format_select_byte2[0x8];
+ u8 format_select_byte1[0x8];
+ u8 format_select_byte0[0x8];
+
+ u8 format_select_byte7[0x8];
+ u8 format_select_byte6[0x8];
+ u8 format_select_byte5[0x8];
+ u8 format_select_byte4[0x8];
+
+ u8 reserved_at_180[0x40];
+
+ union {
+ struct {
+ u8 match_mask[16][0x20];
+ };
+ struct mlx5_ifc_match_definer_match_mask_bits match_mask_format;
+ };
+};
+
+struct mlx5_ifc_general_obj_create_param_bits {
+ u8 alias_object[0x1];
+ u8 reserved_at_1[0x2];
+ u8 log_obj_range[0x5];
+ u8 reserved_at_8[0x18];
+};
+
+struct mlx5_ifc_general_obj_query_param_bits {
+ u8 alias_object[0x1];
+ u8 obj_offset[0x1f];
+};
+
+struct mlx5_ifc_general_obj_in_cmd_hdr_bits {
+ u8 opcode[0x10];
+ u8 uid[0x10];
+
+ u8 vhca_tunnel_id[0x10];
+ u8 obj_type[0x10];
+
+ u8 obj_id[0x20];
+
+ union {
+ struct mlx5_ifc_general_obj_create_param_bits create;
+ struct mlx5_ifc_general_obj_query_param_bits query;
+ } op_param;
+};
+
+struct mlx5_ifc_general_obj_out_cmd_hdr_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 obj_id[0x20];
+
+ u8 reserved_at_60[0x20];
+};
+
+struct mlx5_ifc_modify_header_arg_bits {
+ u8 reserved_at_0[0x80];
+
+ u8 reserved_at_80[0x8];
+ u8 access_pd[0x18];
+};
+
+struct mlx5_ifc_create_modify_header_arg_in_bits {
+ struct mlx5_ifc_general_obj_in_cmd_hdr_bits hdr;
+ struct mlx5_ifc_modify_header_arg_bits arg;
+};
+
+struct mlx5_ifc_create_match_definer_in_bits {
+ struct mlx5_ifc_general_obj_in_cmd_hdr_bits general_obj_in_cmd_hdr;
+
+ struct mlx5_ifc_match_definer_bits obj_context;
+};
+
+struct mlx5_ifc_create_match_definer_out_bits {
+ struct mlx5_ifc_general_obj_out_cmd_hdr_bits general_obj_out_cmd_hdr;
+};
+
enum {
MLX5_QUERY_FLOW_GROUP_OUT_MATCH_CRITERIA_ENABLE_OUTER_HEADERS = 0x0,
MLX5_QUERY_FLOW_GROUP_OUT_MATCH_CRITERIA_ENABLE_MISC_PARAMETERS = 0x1,
MLX5_QUERY_FLOW_GROUP_OUT_MATCH_CRITERIA_ENABLE_INNER_HEADERS = 0x2,
+ MLX5_QUERY_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_MISC_PARAMETERS_2 = 0x3,
+ MLX5_QUERY_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_MISC_PARAMETERS_3 = 0x4,
+ MLX5_QUERY_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_MISC_PARAMETERS_4 = 0x5,
+ MLX5_QUERY_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_MISC_PARAMETERS_5 = 0x6,
};
struct mlx5_ifc_query_flow_group_out_bits {
@@ -4387,7 +6473,7 @@ struct mlx5_ifc_query_flow_counter_out_bits {
u8 reserved_at_40[0x40];
- struct mlx5_ifc_traffic_counter_bits flow_statistics[0];
+ struct mlx5_ifc_traffic_counter_bits flow_statistics[];
};
struct mlx5_ifc_query_flow_counter_in_bits {
@@ -4403,8 +6489,7 @@ struct mlx5_ifc_query_flow_counter_in_bits {
u8 reserved_at_c1[0xf];
u8 num_of_counters[0x10];
- u8 reserved_at_e0[0x10];
- u8 flow_counter_id[0x10];
+ u8 flow_counter_id[0x20];
};
struct mlx5_ifc_query_esw_vport_context_out_bits {
@@ -4442,7 +6527,8 @@ struct mlx5_ifc_modify_esw_vport_context_out_bits {
};
struct mlx5_ifc_esw_vport_context_fields_select_bits {
- u8 reserved_at_0[0x1c];
+ u8 reserved_at_0[0x1b];
+ u8 fdb_to_vport_reg_c_id[0x1];
u8 vport_cvlan_insert[0x1];
u8 vport_svlan_insert[0x1];
u8 vport_cvlan_strip[0x1];
@@ -4481,7 +6567,7 @@ struct mlx5_ifc_query_eq_out_bits {
u8 reserved_at_300[0x580];
- u8 pas[0][0x40];
+ u8 pas[][0x40];
};
struct mlx5_ifc_query_eq_in_bits {
@@ -4497,19 +6583,21 @@ struct mlx5_ifc_query_eq_in_bits {
u8 reserved_at_60[0x20];
};
-struct mlx5_ifc_encap_header_in_bits {
- u8 reserved_at_0[0x5];
- u8 header_type[0x3];
- u8 reserved_at_8[0xe];
- u8 encap_header_size[0xa];
+struct mlx5_ifc_packet_reformat_context_in_bits {
+ u8 reformat_type[0x8];
+ u8 reserved_at_8[0x4];
+ u8 reformat_param_0[0x4];
+ u8 reserved_at_10[0x6];
+ u8 reformat_data_size[0xa];
- u8 reserved_at_20[0x10];
- u8 encap_header[2][0x8];
+ u8 reformat_param_1[0x8];
+ u8 reserved_at_28[0x8];
+ u8 reformat_data[2][0x8];
- u8 more_encap_header[0][0x8];
+ u8 more_reformat_data[][0x8];
};
-struct mlx5_ifc_query_encap_header_out_bits {
+struct mlx5_ifc_query_packet_reformat_context_out_bits {
u8 status[0x8];
u8 reserved_at_8[0x18];
@@ -4517,33 +6605,56 @@ struct mlx5_ifc_query_encap_header_out_bits {
u8 reserved_at_40[0xa0];
- struct mlx5_ifc_encap_header_in_bits encap_header[0];
+ struct mlx5_ifc_packet_reformat_context_in_bits packet_reformat_context[];
};
-struct mlx5_ifc_query_encap_header_in_bits {
+struct mlx5_ifc_query_packet_reformat_context_in_bits {
u8 opcode[0x10];
u8 reserved_at_10[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
- u8 encap_id[0x20];
+ u8 packet_reformat_id[0x20];
u8 reserved_at_60[0xa0];
};
-struct mlx5_ifc_alloc_encap_header_out_bits {
+struct mlx5_ifc_alloc_packet_reformat_context_out_bits {
u8 status[0x8];
u8 reserved_at_8[0x18];
u8 syndrome[0x20];
- u8 encap_id[0x20];
+ u8 packet_reformat_id[0x20];
u8 reserved_at_60[0x20];
};
-struct mlx5_ifc_alloc_encap_header_in_bits {
+enum {
+ MLX5_REFORMAT_CONTEXT_ANCHOR_MAC_START = 0x1,
+ MLX5_REFORMAT_CONTEXT_ANCHOR_IP_START = 0x7,
+ MLX5_REFORMAT_CONTEXT_ANCHOR_TCP_UDP_START = 0x9,
+};
+
+enum mlx5_reformat_ctx_type {
+ MLX5_REFORMAT_TYPE_L2_TO_VXLAN = 0x0,
+ MLX5_REFORMAT_TYPE_L2_TO_NVGRE = 0x1,
+ MLX5_REFORMAT_TYPE_L2_TO_L2_TUNNEL = 0x2,
+ MLX5_REFORMAT_TYPE_L3_TUNNEL_TO_L2 = 0x3,
+ MLX5_REFORMAT_TYPE_L2_TO_L3_TUNNEL = 0x4,
+ MLX5_REFORMAT_TYPE_ADD_ESP_TRANSPORT_OVER_IPV4 = 0x5,
+ MLX5_REFORMAT_TYPE_L2_TO_L3_ESP_TUNNEL = 0x6,
+ MLX5_REFORMAT_TYPE_DEL_ESP_TRANSPORT = 0x8,
+ MLX5_REFORMAT_TYPE_L3_ESP_TUNNEL_TO_L2 = 0x9,
+ MLX5_REFORMAT_TYPE_ADD_ESP_TRANSPORT_OVER_IPV6 = 0xb,
+ MLX5_REFORMAT_TYPE_INSERT_HDR = 0xf,
+ MLX5_REFORMAT_TYPE_REMOVE_HDR = 0x10,
+ MLX5_REFORMAT_TYPE_ADD_MACSEC = 0x11,
+ MLX5_REFORMAT_TYPE_DEL_MACSEC = 0x12,
+};
+
+struct mlx5_ifc_alloc_packet_reformat_context_in_bits {
u8 opcode[0x10];
u8 reserved_at_10[0x10];
@@ -4552,10 +6663,10 @@ struct mlx5_ifc_alloc_encap_header_in_bits {
u8 reserved_at_40[0xa0];
- struct mlx5_ifc_encap_header_in_bits encap_header;
+ struct mlx5_ifc_packet_reformat_context_in_bits packet_reformat_context;
};
-struct mlx5_ifc_dealloc_encap_header_out_bits {
+struct mlx5_ifc_dealloc_packet_reformat_context_out_bits {
u8 status[0x8];
u8 reserved_at_8[0x18];
@@ -4564,14 +6675,14 @@ struct mlx5_ifc_dealloc_encap_header_out_bits {
u8 reserved_at_40[0x40];
};
-struct mlx5_ifc_dealloc_encap_header_in_bits {
+struct mlx5_ifc_dealloc_packet_reformat_context_in_bits {
u8 opcode[0x10];
u8 reserved_at_10[0x10];
u8 reserved_20[0x10];
u8 op_mod[0x10];
- u8 encap_id[0x20];
+ u8 packet_reformat_id[0x20];
u8 reserved_60[0x20];
};
@@ -4595,15 +6706,32 @@ struct mlx5_ifc_add_action_in_bits {
u8 data[0x20];
};
-union mlx5_ifc_set_action_in_add_action_in_auto_bits {
- struct mlx5_ifc_set_action_in_bits set_action_in;
- struct mlx5_ifc_add_action_in_bits add_action_in;
+struct mlx5_ifc_copy_action_in_bits {
+ u8 action_type[0x4];
+ u8 src_field[0xc];
+ u8 reserved_at_10[0x3];
+ u8 src_offset[0x5];
+ u8 reserved_at_18[0x3];
+ u8 length[0x5];
+
+ u8 reserved_at_20[0x4];
+ u8 dst_field[0xc];
+ u8 reserved_at_30[0x3];
+ u8 dst_offset[0x5];
+ u8 reserved_at_38[0x8];
+};
+
+union mlx5_ifc_set_add_copy_action_in_auto_bits {
+ struct mlx5_ifc_set_action_in_bits set_action_in;
+ struct mlx5_ifc_add_action_in_bits add_action_in;
+ struct mlx5_ifc_copy_action_in_bits copy_action_in;
u8 reserved_at_0[0x40];
};
enum {
MLX5_ACTION_TYPE_SET = 0x1,
MLX5_ACTION_TYPE_ADD = 0x2,
+ MLX5_ACTION_TYPE_COPY = 0x3,
};
enum {
@@ -4629,7 +6757,23 @@ enum {
MLX5_ACTION_IN_FIELD_OUT_DIPV6_31_0 = 0x14,
MLX5_ACTION_IN_FIELD_OUT_SIPV4 = 0x15,
MLX5_ACTION_IN_FIELD_OUT_DIPV4 = 0x16,
+ MLX5_ACTION_IN_FIELD_OUT_FIRST_VID = 0x17,
MLX5_ACTION_IN_FIELD_OUT_IPV6_HOPLIMIT = 0x47,
+ MLX5_ACTION_IN_FIELD_METADATA_REG_A = 0x49,
+ MLX5_ACTION_IN_FIELD_METADATA_REG_B = 0x50,
+ MLX5_ACTION_IN_FIELD_METADATA_REG_C_0 = 0x51,
+ MLX5_ACTION_IN_FIELD_METADATA_REG_C_1 = 0x52,
+ MLX5_ACTION_IN_FIELD_METADATA_REG_C_2 = 0x53,
+ MLX5_ACTION_IN_FIELD_METADATA_REG_C_3 = 0x54,
+ MLX5_ACTION_IN_FIELD_METADATA_REG_C_4 = 0x55,
+ MLX5_ACTION_IN_FIELD_METADATA_REG_C_5 = 0x56,
+ MLX5_ACTION_IN_FIELD_METADATA_REG_C_6 = 0x57,
+ MLX5_ACTION_IN_FIELD_METADATA_REG_C_7 = 0x58,
+ MLX5_ACTION_IN_FIELD_OUT_TCP_SEQ_NUM = 0x59,
+ MLX5_ACTION_IN_FIELD_OUT_TCP_ACK_NUM = 0x5B,
+ MLX5_ACTION_IN_FIELD_IPSEC_SYNDROME = 0x5D,
+ MLX5_ACTION_IN_FIELD_OUT_EMD_47_32 = 0x6F,
+ MLX5_ACTION_IN_FIELD_OUT_EMD_31_0 = 0x70,
};
struct mlx5_ifc_alloc_modify_header_context_out_bits {
@@ -4656,7 +6800,7 @@ struct mlx5_ifc_alloc_modify_header_context_in_bits {
u8 reserved_at_68[0x10];
u8 num_of_actions[0x8];
- union mlx5_ifc_set_action_in_add_action_in_auto_bits actions[0];
+ union mlx5_ifc_set_add_copy_action_in_auto_bits actions[];
};
struct mlx5_ifc_dealloc_modify_header_context_out_bits {
@@ -4680,6 +6824,18 @@ struct mlx5_ifc_dealloc_modify_header_context_in_bits {
u8 reserved_at_60[0x20];
};
+struct mlx5_ifc_query_modify_header_context_in_bits {
+ u8 opcode[0x10];
+ u8 uid[0x10];
+
+ u8 reserved_at_20[0x10];
+ u8 op_mod[0x10];
+
+ u8 modify_header_id[0x20];
+
+ u8 reserved_at_60[0xa0];
+};
+
struct mlx5_ifc_query_dct_out_bits {
u8 status[0x8];
u8 reserved_at_8[0x18];
@@ -4718,7 +6874,7 @@ struct mlx5_ifc_query_cq_out_bits {
u8 reserved_at_280[0x600];
- u8 pas[0][0x40];
+ u8 pas[][0x40];
};
struct mlx5_ifc_query_cq_in_bits {
@@ -4869,7 +7025,7 @@ struct mlx5_ifc_qp_2rst_out_bits {
struct mlx5_ifc_qp_2rst_in_bits {
u8 opcode[0x10];
- u8 reserved_at_10[0x10];
+ u8 uid[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
@@ -4891,7 +7047,7 @@ struct mlx5_ifc_qp_2err_out_bits {
struct mlx5_ifc_qp_2err_in_bits {
u8 opcode[0x10];
- u8 reserved_at_10[0x10];
+ u8 uid[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
@@ -4991,7 +7147,7 @@ struct mlx5_ifc_modify_tis_bitmask_bits {
struct mlx5_ifc_modify_tis_in_bits {
u8 opcode[0x10];
- u8 reserved_at_10[0x10];
+ u8 uid[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
@@ -5016,7 +7172,7 @@ struct mlx5_ifc_modify_tir_bitmask_bits {
u8 reserved_at_3c[0x1];
u8 hash[0x1];
u8 reserved_at_3e[0x1];
- u8 lro[0x1];
+ u8 packet_merge[0x1];
};
struct mlx5_ifc_modify_tir_out_bits {
@@ -5030,7 +7186,7 @@ struct mlx5_ifc_modify_tir_out_bits {
struct mlx5_ifc_modify_tir_in_bits {
u8 opcode[0x10];
- u8 reserved_at_10[0x10];
+ u8 uid[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
@@ -5058,7 +7214,7 @@ struct mlx5_ifc_modify_sq_out_bits {
struct mlx5_ifc_modify_sq_in_bits {
u8 opcode[0x10];
- u8 reserved_at_10[0x10];
+ u8 uid[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
@@ -5131,7 +7287,7 @@ struct mlx5_ifc_rqt_bitmask_bits {
struct mlx5_ifc_modify_rqt_in_bits {
u8 opcode[0x10];
- u8 reserved_at_10[0x10];
+ u8 uid[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
@@ -5165,7 +7321,7 @@ enum {
struct mlx5_ifc_modify_rq_in_bits {
u8 opcode[0x10];
- u8 reserved_at_10[0x10];
+ u8 uid[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
@@ -5201,7 +7357,7 @@ struct mlx5_ifc_rmp_bitmask_bits {
struct mlx5_ifc_modify_rmp_in_bits {
u8 opcode[0x10];
- u8 reserved_at_10[0x10];
+ u8 uid[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
@@ -5229,7 +7385,11 @@ struct mlx5_ifc_modify_nic_vport_context_out_bits {
};
struct mlx5_ifc_modify_nic_vport_field_select_bits {
- u8 reserved_at_0[0x16];
+ u8 reserved_at_0[0x12];
+ u8 affiliation[0x1];
+ u8 reserved_at_13[0x1];
+ u8 disable_uc_local_lb[0x1];
+ u8 disable_mc_local_lb[0x1];
u8 node_guid[0x1];
u8 port_guid[0x1];
u8 min_inline[0x1];
@@ -5302,7 +7462,7 @@ enum {
struct mlx5_ifc_modify_cq_in_bits {
u8 opcode[0x10];
- u8 reserved_at_10[0x10];
+ u8 uid[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
@@ -5314,9 +7474,14 @@ struct mlx5_ifc_modify_cq_in_bits {
struct mlx5_ifc_cqc_bits cq_context;
- u8 reserved_at_280[0x600];
+ u8 reserved_at_280[0x60];
- u8 pas[0][0x40];
+ u8 cq_umem_valid[0x1];
+ u8 reserved_at_2e1[0x1f];
+
+ u8 reserved_at_300[0x580];
+
+ u8 pas[][0x40];
};
struct mlx5_ifc_modify_cong_status_out_bits {
@@ -5380,7 +7545,7 @@ struct mlx5_ifc_manage_pages_out_bits {
u8 reserved_at_60[0x20];
- u8 pas[0][0x40];
+ u8 pas[][0x40];
};
enum {
@@ -5396,12 +7561,13 @@ struct mlx5_ifc_manage_pages_in_bits {
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
- u8 reserved_at_40[0x10];
+ u8 embedded_cpu_function[0x1];
+ u8 reserved_at_41[0xf];
u8 function_id[0x10];
u8 input_num_entries[0x20];
- u8 pas[0][0x40];
+ u8 pas[][0x40];
};
struct mlx5_ifc_mad_ifc_out_bits {
@@ -5447,7 +7613,13 @@ struct mlx5_ifc_init_hca_in_bits {
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
- u8 reserved_at_40[0x40];
+ u8 reserved_at_40[0x20];
+
+ u8 reserved_at_60[0x2];
+ u8 sw_vhca_id[0xe];
+ u8 reserved_at_70[0x10];
+
+ u8 sw_owner_id[4][0x20];
};
struct mlx5_ifc_init2rtr_qp_out_bits {
@@ -5456,12 +7628,13 @@ struct mlx5_ifc_init2rtr_qp_out_bits {
u8 syndrome[0x20];
- u8 reserved_at_40[0x40];
+ u8 reserved_at_40[0x20];
+ u8 ece[0x20];
};
struct mlx5_ifc_init2rtr_qp_in_bits {
u8 opcode[0x10];
- u8 reserved_at_10[0x10];
+ u8 uid[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
@@ -5473,7 +7646,7 @@ struct mlx5_ifc_init2rtr_qp_in_bits {
u8 opt_param_mask[0x20];
- u8 reserved_at_a0[0x20];
+ u8 ece[0x20];
struct mlx5_ifc_qpc_bits qpc;
@@ -5486,12 +7659,13 @@ struct mlx5_ifc_init2init_qp_out_bits {
u8 syndrome[0x20];
- u8 reserved_at_40[0x40];
+ u8 reserved_at_40[0x20];
+ u8 ece[0x20];
};
struct mlx5_ifc_init2init_qp_in_bits {
u8 opcode[0x10];
- u8 reserved_at_10[0x10];
+ u8 uid[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
@@ -5503,7 +7677,7 @@ struct mlx5_ifc_init2init_qp_in_bits {
u8 opt_param_mask[0x20];
- u8 reserved_at_a0[0x20];
+ u8 ece[0x20];
struct mlx5_ifc_qpc_bits qpc;
@@ -5573,7 +7747,8 @@ struct mlx5_ifc_enable_hca_in_bits {
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
- u8 reserved_at_40[0x10];
+ u8 embedded_cpu_function[0x1];
+ u8 reserved_at_41[0xf];
u8 function_id[0x10];
u8 reserved_at_60[0x20];
@@ -5590,7 +7765,7 @@ struct mlx5_ifc_drain_dct_out_bits {
struct mlx5_ifc_drain_dct_in_bits {
u8 opcode[0x10];
- u8 reserved_at_10[0x10];
+ u8 uid[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
@@ -5617,7 +7792,8 @@ struct mlx5_ifc_disable_hca_in_bits {
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
- u8 reserved_at_40[0x10];
+ u8 embedded_cpu_function[0x1];
+ u8 reserved_at_41[0xf];
u8 function_id[0x10];
u8 reserved_at_60[0x20];
@@ -5634,7 +7810,7 @@ struct mlx5_ifc_detach_from_mcg_out_bits {
struct mlx5_ifc_detach_from_mcg_in_bits {
u8 opcode[0x10];
- u8 reserved_at_10[0x10];
+ u8 uid[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
@@ -5658,7 +7834,7 @@ struct mlx5_ifc_destroy_xrq_out_bits {
struct mlx5_ifc_destroy_xrq_in_bits {
u8 opcode[0x10];
- u8 reserved_at_10[0x10];
+ u8 uid[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
@@ -5680,7 +7856,7 @@ struct mlx5_ifc_destroy_xrc_srq_out_bits {
struct mlx5_ifc_destroy_xrc_srq_in_bits {
u8 opcode[0x10];
- u8 reserved_at_10[0x10];
+ u8 uid[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
@@ -5702,7 +7878,7 @@ struct mlx5_ifc_destroy_tis_out_bits {
struct mlx5_ifc_destroy_tis_in_bits {
u8 opcode[0x10];
- u8 reserved_at_10[0x10];
+ u8 uid[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
@@ -5724,7 +7900,7 @@ struct mlx5_ifc_destroy_tir_out_bits {
struct mlx5_ifc_destroy_tir_in_bits {
u8 opcode[0x10];
- u8 reserved_at_10[0x10];
+ u8 uid[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
@@ -5746,7 +7922,7 @@ struct mlx5_ifc_destroy_srq_out_bits {
struct mlx5_ifc_destroy_srq_in_bits {
u8 opcode[0x10];
- u8 reserved_at_10[0x10];
+ u8 uid[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
@@ -5768,7 +7944,7 @@ struct mlx5_ifc_destroy_sq_out_bits {
struct mlx5_ifc_destroy_sq_in_bits {
u8 opcode[0x10];
- u8 reserved_at_10[0x10];
+ u8 uid[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
@@ -5814,7 +7990,7 @@ struct mlx5_ifc_destroy_rqt_out_bits {
struct mlx5_ifc_destroy_rqt_in_bits {
u8 opcode[0x10];
- u8 reserved_at_10[0x10];
+ u8 uid[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
@@ -5836,7 +8012,7 @@ struct mlx5_ifc_destroy_rq_out_bits {
struct mlx5_ifc_destroy_rq_in_bits {
u8 opcode[0x10];
- u8 reserved_at_10[0x10];
+ u8 uid[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
@@ -5847,6 +8023,28 @@ struct mlx5_ifc_destroy_rq_in_bits {
u8 reserved_at_60[0x20];
};
+struct mlx5_ifc_set_delay_drop_params_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_at_10[0x10];
+
+ u8 reserved_at_20[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_at_40[0x20];
+
+ u8 reserved_at_60[0x10];
+ u8 delay_drop_timeout[0x10];
+};
+
+struct mlx5_ifc_set_delay_drop_params_out_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_at_40[0x40];
+};
+
struct mlx5_ifc_destroy_rmp_out_bits {
u8 status[0x8];
u8 reserved_at_8[0x18];
@@ -5858,7 +8056,7 @@ struct mlx5_ifc_destroy_rmp_out_bits {
struct mlx5_ifc_destroy_rmp_in_bits {
u8 opcode[0x10];
- u8 reserved_at_10[0x10];
+ u8 uid[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
@@ -5880,7 +8078,7 @@ struct mlx5_ifc_destroy_qp_out_bits {
struct mlx5_ifc_destroy_qp_in_bits {
u8 opcode[0x10];
- u8 reserved_at_10[0x10];
+ u8 uid[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
@@ -5924,7 +8122,7 @@ struct mlx5_ifc_destroy_mkey_out_bits {
struct mlx5_ifc_destroy_mkey_in_bits {
u8 opcode[0x10];
- u8 reserved_at_10[0x10];
+ u8 uid[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
@@ -6032,7 +8230,7 @@ struct mlx5_ifc_destroy_dct_out_bits {
struct mlx5_ifc_destroy_dct_in_bits {
u8 opcode[0x10];
- u8 reserved_at_10[0x10];
+ u8 uid[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
@@ -6054,7 +8252,7 @@ struct mlx5_ifc_destroy_cq_out_bits {
struct mlx5_ifc_destroy_cq_in_bits {
u8 opcode[0x10];
- u8 reserved_at_10[0x10];
+ u8 uid[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
@@ -6157,7 +8355,7 @@ struct mlx5_ifc_dealloc_xrcd_out_bits {
struct mlx5_ifc_dealloc_xrcd_in_bits {
u8 opcode[0x10];
- u8 reserved_at_10[0x10];
+ u8 uid[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
@@ -6179,7 +8377,7 @@ struct mlx5_ifc_dealloc_uar_out_bits {
struct mlx5_ifc_dealloc_uar_in_bits {
u8 opcode[0x10];
- u8 reserved_at_10[0x10];
+ u8 uid[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
@@ -6201,7 +8399,7 @@ struct mlx5_ifc_dealloc_transport_domain_out_bits {
struct mlx5_ifc_dealloc_transport_domain_in_bits {
u8 opcode[0x10];
- u8 reserved_at_10[0x10];
+ u8 uid[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
@@ -6245,7 +8443,7 @@ struct mlx5_ifc_dealloc_pd_out_bits {
struct mlx5_ifc_dealloc_pd_in_bits {
u8 opcode[0x10];
- u8 reserved_at_10[0x10];
+ u8 uid[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
@@ -6272,8 +8470,7 @@ struct mlx5_ifc_dealloc_flow_counter_in_bits {
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
- u8 reserved_at_40[0x10];
- u8 flow_counter_id[0x10];
+ u8 flow_counter_id[0x20];
u8 reserved_at_60[0x20];
};
@@ -6292,7 +8489,7 @@ struct mlx5_ifc_create_xrq_out_bits {
struct mlx5_ifc_create_xrq_in_bits {
u8 opcode[0x10];
- u8 reserved_at_10[0x10];
+ u8 uid[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
@@ -6316,7 +8513,7 @@ struct mlx5_ifc_create_xrc_srq_out_bits {
struct mlx5_ifc_create_xrc_srq_in_bits {
u8 opcode[0x10];
- u8 reserved_at_10[0x10];
+ u8 uid[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
@@ -6325,9 +8522,14 @@ struct mlx5_ifc_create_xrc_srq_in_bits {
struct mlx5_ifc_xrc_srqc_bits xrc_srq_context_entry;
- u8 reserved_at_280[0x600];
+ u8 reserved_at_280[0x60];
- u8 pas[0][0x40];
+ u8 xrc_srq_umem_valid[0x1];
+ u8 reserved_at_2e1[0x1f];
+
+ u8 reserved_at_300[0x580];
+
+ u8 pas[][0x40];
};
struct mlx5_ifc_create_tis_out_bits {
@@ -6344,7 +8546,7 @@ struct mlx5_ifc_create_tis_out_bits {
struct mlx5_ifc_create_tis_in_bits {
u8 opcode[0x10];
- u8 reserved_at_10[0x10];
+ u8 uid[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
@@ -6356,19 +8558,19 @@ struct mlx5_ifc_create_tis_in_bits {
struct mlx5_ifc_create_tir_out_bits {
u8 status[0x8];
- u8 reserved_at_8[0x18];
+ u8 icm_address_63_40[0x18];
u8 syndrome[0x20];
- u8 reserved_at_40[0x8];
+ u8 icm_address_39_32[0x8];
u8 tirn[0x18];
- u8 reserved_at_60[0x20];
+ u8 icm_address_31_0[0x20];
};
struct mlx5_ifc_create_tir_in_bits {
u8 opcode[0x10];
- u8 reserved_at_10[0x10];
+ u8 uid[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
@@ -6392,7 +8594,7 @@ struct mlx5_ifc_create_srq_out_bits {
struct mlx5_ifc_create_srq_in_bits {
u8 opcode[0x10];
- u8 reserved_at_10[0x10];
+ u8 uid[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
@@ -6403,7 +8605,7 @@ struct mlx5_ifc_create_srq_in_bits {
u8 reserved_at_280[0x600];
- u8 pas[0][0x40];
+ u8 pas[][0x40];
};
struct mlx5_ifc_create_sq_out_bits {
@@ -6420,7 +8622,7 @@ struct mlx5_ifc_create_sq_out_bits {
struct mlx5_ifc_create_sq_in_bits {
u8 opcode[0x10];
- u8 reserved_at_10[0x10];
+ u8 uid[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
@@ -6474,7 +8676,7 @@ struct mlx5_ifc_create_rqt_out_bits {
struct mlx5_ifc_create_rqt_in_bits {
u8 opcode[0x10];
- u8 reserved_at_10[0x10];
+ u8 uid[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
@@ -6498,7 +8700,7 @@ struct mlx5_ifc_create_rq_out_bits {
struct mlx5_ifc_create_rq_in_bits {
u8 opcode[0x10];
- u8 reserved_at_10[0x10];
+ u8 uid[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
@@ -6522,7 +8724,7 @@ struct mlx5_ifc_create_rmp_out_bits {
struct mlx5_ifc_create_rmp_in_bits {
u8 opcode[0x10];
- u8 reserved_at_10[0x10];
+ u8 uid[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
@@ -6541,27 +8743,33 @@ struct mlx5_ifc_create_qp_out_bits {
u8 reserved_at_40[0x8];
u8 qpn[0x18];
- u8 reserved_at_60[0x20];
+ u8 ece[0x20];
};
struct mlx5_ifc_create_qp_in_bits {
u8 opcode[0x10];
- u8 reserved_at_10[0x10];
+ u8 uid[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
- u8 reserved_at_40[0x40];
+ u8 qpc_ext[0x1];
+ u8 reserved_at_41[0x7];
+ u8 input_qpn[0x18];
+ u8 reserved_at_60[0x20];
u8 opt_param_mask[0x20];
- u8 reserved_at_a0[0x20];
+ u8 ece[0x20];
struct mlx5_ifc_qpc_bits qpc;
- u8 reserved_at_800[0x80];
+ u8 reserved_at_800[0x60];
- u8 pas[0][0x40];
+ u8 wq_umem_valid[0x1];
+ u8 reserved_at_861[0x1f];
+
+ u8 pas[][0x40];
};
struct mlx5_ifc_create_psv_out_bits {
@@ -6613,7 +8821,7 @@ struct mlx5_ifc_create_mkey_out_bits {
struct mlx5_ifc_create_mkey_in_bits {
u8 opcode[0x10];
- u8 reserved_at_10[0x10];
+ u8 uid[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
@@ -6621,7 +8829,8 @@ struct mlx5_ifc_create_mkey_in_bits {
u8 reserved_at_40[0x20];
u8 pg_access[0x1];
- u8 reserved_at_61[0x1f];
+ u8 mkey_umem_valid[0x1];
+ u8 reserved_at_62[0x1e];
struct mlx5_ifc_mkc_bits memory_key_mkey_entry;
@@ -6631,42 +8840,34 @@ struct mlx5_ifc_create_mkey_in_bits {
u8 reserved_at_320[0x560];
- u8 klm_pas_mtt[0][0x20];
+ u8 klm_pas_mtt[][0x20];
+};
+
+enum {
+ MLX5_FLOW_TABLE_TYPE_NIC_RX = 0x0,
+ MLX5_FLOW_TABLE_TYPE_NIC_TX = 0x1,
+ MLX5_FLOW_TABLE_TYPE_ESW_EGRESS_ACL = 0x2,
+ MLX5_FLOW_TABLE_TYPE_ESW_INGRESS_ACL = 0x3,
+ MLX5_FLOW_TABLE_TYPE_FDB = 0X4,
+ MLX5_FLOW_TABLE_TYPE_SNIFFER_RX = 0X5,
+ MLX5_FLOW_TABLE_TYPE_SNIFFER_TX = 0X6,
};
struct mlx5_ifc_create_flow_table_out_bits {
u8 status[0x8];
- u8 reserved_at_8[0x18];
+ u8 icm_address_63_40[0x18];
u8 syndrome[0x20];
- u8 reserved_at_40[0x8];
+ u8 icm_address_39_32[0x8];
u8 table_id[0x18];
- u8 reserved_at_60[0x20];
-};
-
-struct mlx5_ifc_flow_table_context_bits {
- u8 encap_en[0x1];
- u8 decap_en[0x1];
- u8 reserved_at_2[0x2];
- u8 table_miss_action[0x4];
- u8 level[0x8];
- u8 reserved_at_10[0x8];
- u8 log_size[0x8];
-
- u8 reserved_at_20[0x8];
- u8 table_miss_id[0x18];
-
- u8 reserved_at_40[0x8];
- u8 lag_master_next_table_id[0x18];
-
- u8 reserved_at_60[0xe0];
+ u8 icm_address_31_0[0x20];
};
struct mlx5_ifc_create_flow_table_in_bits {
u8 opcode[0x10];
- u8 reserved_at_10[0x10];
+ u8 uid[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
@@ -6698,9 +8899,15 @@ struct mlx5_ifc_create_flow_group_out_bits {
};
enum {
- MLX5_CREATE_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_OUTER_HEADERS = 0x0,
- MLX5_CREATE_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_MISC_PARAMETERS = 0x1,
- MLX5_CREATE_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_INNER_HEADERS = 0x2,
+ MLX5_CREATE_FLOW_GROUP_IN_GROUP_TYPE_TCAM_SUBTABLE = 0x0,
+ MLX5_CREATE_FLOW_GROUP_IN_GROUP_TYPE_HASH_SPLIT = 0x1,
+};
+
+enum {
+ MLX5_CREATE_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_OUTER_HEADERS = 0x0,
+ MLX5_CREATE_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_MISC_PARAMETERS = 0x1,
+ MLX5_CREATE_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_INNER_HEADERS = 0x2,
+ MLX5_CREATE_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_MISC_PARAMETERS_2 = 0x3,
};
struct mlx5_ifc_create_flow_group_in_bits {
@@ -6717,12 +8924,16 @@ struct mlx5_ifc_create_flow_group_in_bits {
u8 reserved_at_60[0x20];
u8 table_type[0x8];
- u8 reserved_at_88[0x18];
+ u8 reserved_at_88[0x4];
+ u8 group_type[0x4];
+ u8 reserved_at_90[0x10];
u8 reserved_at_a0[0x8];
u8 table_id[0x18];
- u8 reserved_at_c0[0x20];
+ u8 source_eswitch_owner_vhca_id_valid[0x1];
+
+ u8 reserved_at_c1[0x1f];
u8 start_flow_index[0x20];
@@ -6730,7 +8941,10 @@ struct mlx5_ifc_create_flow_group_in_bits {
u8 end_flow_index[0x20];
- u8 reserved_at_140[0xa0];
+ u8 reserved_at_140[0x10];
+ u8 match_definer_id[0x10];
+
+ u8 reserved_at_160[0x80];
u8 reserved_at_1e0[0x18];
u8 match_criteria_enable[0x8];
@@ -6754,7 +8968,7 @@ struct mlx5_ifc_create_eq_out_bits {
struct mlx5_ifc_create_eq_in_bits {
u8 opcode[0x10];
- u8 reserved_at_10[0x10];
+ u8 uid[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
@@ -6765,11 +8979,11 @@ struct mlx5_ifc_create_eq_in_bits {
u8 reserved_at_280[0x40];
- u8 event_bitmask[0x40];
+ u8 event_bitmask[4][0x40];
- u8 reserved_at_300[0x580];
+ u8 reserved_at_3c0[0x4c0];
- u8 pas[0][0x40];
+ u8 pas[][0x40];
};
struct mlx5_ifc_create_dct_out_bits {
@@ -6781,12 +8995,12 @@ struct mlx5_ifc_create_dct_out_bits {
u8 reserved_at_40[0x8];
u8 dctn[0x18];
- u8 reserved_at_60[0x20];
+ u8 ece[0x20];
};
struct mlx5_ifc_create_dct_in_bits {
u8 opcode[0x10];
- u8 reserved_at_10[0x10];
+ u8 uid[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
@@ -6812,7 +9026,7 @@ struct mlx5_ifc_create_cq_out_bits {
struct mlx5_ifc_create_cq_in_bits {
u8 opcode[0x10];
- u8 reserved_at_10[0x10];
+ u8 uid[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
@@ -6821,9 +9035,12 @@ struct mlx5_ifc_create_cq_in_bits {
struct mlx5_ifc_cqc_bits cq_context;
- u8 reserved_at_280[0x600];
+ u8 reserved_at_280[0x60];
- u8 pas[0][0x40];
+ u8 cq_umem_valid[0x1];
+ u8 reserved_at_2e1[0x59f];
+
+ u8 pas[][0x40];
};
struct mlx5_ifc_config_int_moderation_out_bits {
@@ -6869,7 +9086,7 @@ struct mlx5_ifc_attach_to_mcg_out_bits {
struct mlx5_ifc_attach_to_mcg_in_bits {
u8 opcode[0x10];
- u8 reserved_at_10[0x10];
+ u8 uid[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
@@ -6920,7 +9137,7 @@ enum {
struct mlx5_ifc_arm_xrc_srq_in_bits {
u8 opcode[0x10];
- u8 reserved_at_10[0x10];
+ u8 uid[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
@@ -6948,7 +9165,7 @@ enum {
struct mlx5_ifc_arm_rq_in_bits {
u8 opcode[0x10];
- u8 reserved_at_10[0x10];
+ u8 uid[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
@@ -6996,7 +9213,7 @@ struct mlx5_ifc_alloc_xrcd_out_bits {
struct mlx5_ifc_alloc_xrcd_in_bits {
u8 opcode[0x10];
- u8 reserved_at_10[0x10];
+ u8 uid[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
@@ -7018,7 +9235,7 @@ struct mlx5_ifc_alloc_uar_out_bits {
struct mlx5_ifc_alloc_uar_in_bits {
u8 opcode[0x10];
- u8 reserved_at_10[0x10];
+ u8 uid[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
@@ -7040,7 +9257,7 @@ struct mlx5_ifc_alloc_transport_domain_out_bits {
struct mlx5_ifc_alloc_transport_domain_in_bits {
u8 opcode[0x10];
- u8 reserved_at_10[0x10];
+ u8 uid[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
@@ -7062,7 +9279,7 @@ struct mlx5_ifc_alloc_q_counter_out_bits {
struct mlx5_ifc_alloc_q_counter_in_bits {
u8 opcode[0x10];
- u8 reserved_at_10[0x10];
+ u8 uid[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
@@ -7084,7 +9301,7 @@ struct mlx5_ifc_alloc_pd_out_bits {
struct mlx5_ifc_alloc_pd_in_bits {
u8 opcode[0x10];
- u8 reserved_at_10[0x10];
+ u8 uid[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
@@ -7098,8 +9315,7 @@ struct mlx5_ifc_alloc_flow_counter_out_bits {
u8 syndrome[0x20];
- u8 reserved_at_40[0x10];
- u8 flow_counter_id[0x10];
+ u8 flow_counter_id[0x20];
u8 reserved_at_60[0x20];
};
@@ -7111,7 +9327,9 @@ struct mlx5_ifc_alloc_flow_counter_in_bits {
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
- u8 reserved_at_40[0x40];
+ u8 reserved_at_40[0x33];
+ u8 flow_counter_bulk_log_size[0x5];
+ u8 flow_counter_bulk[0x8];
};
struct mlx5_ifc_add_vxlan_udp_dport_out_bits {
@@ -7136,7 +9354,7 @@ struct mlx5_ifc_add_vxlan_udp_dport_in_bits {
u8 vxlan_udp_port[0x10];
};
-struct mlx5_ifc_set_rate_limit_out_bits {
+struct mlx5_ifc_set_pp_rate_limit_out_bits {
u8 status[0x8];
u8 reserved_at_8[0x18];
@@ -7145,9 +9363,20 @@ struct mlx5_ifc_set_rate_limit_out_bits {
u8 reserved_at_40[0x40];
};
-struct mlx5_ifc_set_rate_limit_in_bits {
+struct mlx5_ifc_set_pp_rate_limit_context_bits {
+ u8 rate_limit[0x20];
+
+ u8 burst_upper_bound[0x20];
+
+ u8 reserved_at_40[0x10];
+ u8 typical_packet_size[0x10];
+
+ u8 reserved_at_60[0x120];
+};
+
+struct mlx5_ifc_set_pp_rate_limit_in_bits {
u8 opcode[0x10];
- u8 reserved_at_10[0x10];
+ u8 uid[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
@@ -7157,7 +9386,7 @@ struct mlx5_ifc_set_rate_limit_in_bits {
u8 reserved_at_60[0x20];
- u8 rate_limit[0x20];
+ struct mlx5_ifc_set_pp_rate_limit_context_bits ctx;
};
struct mlx5_ifc_access_register_out_bits {
@@ -7168,7 +9397,7 @@ struct mlx5_ifc_access_register_out_bits {
u8 reserved_at_40[0x40];
- u8 register_data[0][0x20];
+ u8 register_data[][0x20];
};
enum {
@@ -7188,7 +9417,7 @@ struct mlx5_ifc_access_register_in_bits {
u8 argument[0x20];
- u8 register_data[0][0x20];
+ u8 register_data[][0x20];
};
struct mlx5_ifc_sltp_reg_bits {
@@ -7293,21 +9522,24 @@ struct mlx5_ifc_ptys_reg_bits {
u8 proto_mask[0x3];
u8 an_status[0x4];
- u8 reserved_at_24[0x3c];
+ u8 reserved_at_24[0xc];
+ u8 data_rate_oper[0x10];
+
+ u8 ext_eth_proto_capability[0x20];
u8 eth_proto_capability[0x20];
u8 ib_link_width_capability[0x10];
u8 ib_proto_capability[0x10];
- u8 reserved_at_a0[0x20];
+ u8 ext_eth_proto_admin[0x20];
u8 eth_proto_admin[0x20];
u8 ib_link_width_admin[0x10];
u8 ib_proto_admin[0x10];
- u8 reserved_at_100[0x20];
+ u8 ext_eth_proto_oper[0x20];
u8 eth_proto_oper[0x20];
@@ -7446,20 +9678,48 @@ struct mlx5_ifc_pplr_reg_bits {
struct mlx5_ifc_pplm_reg_bits {
u8 reserved_at_0[0x8];
- u8 local_port[0x8];
- u8 reserved_at_10[0x10];
+ u8 local_port[0x8];
+ u8 reserved_at_10[0x10];
- u8 reserved_at_20[0x20];
+ u8 reserved_at_20[0x20];
- u8 port_profile_mode[0x8];
- u8 static_port_profile[0x8];
- u8 active_port_profile[0x8];
- u8 reserved_at_58[0x8];
+ u8 port_profile_mode[0x8];
+ u8 static_port_profile[0x8];
+ u8 active_port_profile[0x8];
+ u8 reserved_at_58[0x8];
- u8 retransmission_active[0x8];
- u8 fec_mode_active[0x18];
+ u8 retransmission_active[0x8];
+ u8 fec_mode_active[0x18];
- u8 reserved_at_80[0x20];
+ u8 rs_fec_correction_bypass_cap[0x4];
+ u8 reserved_at_84[0x8];
+ u8 fec_override_cap_56g[0x4];
+ u8 fec_override_cap_100g[0x4];
+ u8 fec_override_cap_50g[0x4];
+ u8 fec_override_cap_25g[0x4];
+ u8 fec_override_cap_10g_40g[0x4];
+
+ u8 rs_fec_correction_bypass_admin[0x4];
+ u8 reserved_at_a4[0x8];
+ u8 fec_override_admin_56g[0x4];
+ u8 fec_override_admin_100g[0x4];
+ u8 fec_override_admin_50g[0x4];
+ u8 fec_override_admin_25g[0x4];
+ u8 fec_override_admin_10g_40g[0x4];
+
+ u8 fec_override_cap_400g_8x[0x10];
+ u8 fec_override_cap_200g_4x[0x10];
+
+ u8 fec_override_cap_100g_2x[0x10];
+ u8 fec_override_cap_50g_1x[0x10];
+
+ u8 fec_override_admin_400g_8x[0x10];
+ u8 fec_override_admin_200g_4x[0x10];
+
+ u8 fec_override_admin_100g_2x[0x10];
+ u8 fec_override_admin_50g_1x[0x10];
+
+ u8 reserved_at_140[0x140];
};
struct mlx5_ifc_ppcnt_reg_bits {
@@ -7476,6 +9736,52 @@ struct mlx5_ifc_ppcnt_reg_bits {
union mlx5_ifc_eth_cntrs_grp_data_layout_auto_bits counter_set;
};
+struct mlx5_ifc_mpein_reg_bits {
+ u8 reserved_at_0[0x2];
+ u8 depth[0x6];
+ u8 pcie_index[0x8];
+ u8 node[0x8];
+ u8 reserved_at_18[0x8];
+
+ u8 capability_mask[0x20];
+
+ u8 reserved_at_40[0x8];
+ u8 link_width_enabled[0x8];
+ u8 link_speed_enabled[0x10];
+
+ u8 lane0_physical_position[0x8];
+ u8 link_width_active[0x8];
+ u8 link_speed_active[0x10];
+
+ u8 num_of_pfs[0x10];
+ u8 num_of_vfs[0x10];
+
+ u8 bdf0[0x10];
+ u8 reserved_at_b0[0x10];
+
+ u8 max_read_request_size[0x4];
+ u8 max_payload_size[0x4];
+ u8 reserved_at_c8[0x5];
+ u8 pwr_status[0x3];
+ u8 port_type[0x4];
+ u8 reserved_at_d4[0xb];
+ u8 lane_reversal[0x1];
+
+ u8 reserved_at_e0[0x14];
+ u8 pci_power[0xc];
+
+ u8 reserved_at_100[0x20];
+
+ u8 device_status[0x10];
+ u8 port_state[0x8];
+ u8 reserved_at_138[0x8];
+
+ u8 reserved_at_140[0x10];
+ u8 receiver_detect_result[0x10];
+
+ u8 reserved_at_160[0x20];
+};
+
struct mlx5_ifc_mpcnt_reg_bits {
u8 reserved_at_0[0x8];
u8 pcie_index[0x8];
@@ -7657,7 +9963,11 @@ struct mlx5_ifc_pifr_reg_bits {
struct mlx5_ifc_pfcc_reg_bits {
u8 reserved_at_0[0x8];
u8 local_port[0x8];
- u8 reserved_at_10[0x10];
+ u8 reserved_at_10[0xb];
+ u8 ppan_mask_n[0x1];
+ u8 minor_stall_mask[0x1];
+ u8 critical_stall_mask[0x1];
+ u8 reserved_at_1e[0x2];
u8 ppan[0x4];
u8 reserved_at_24[0x4];
@@ -7667,17 +9977,22 @@ struct mlx5_ifc_pfcc_reg_bits {
u8 pptx[0x1];
u8 aptx[0x1];
- u8 reserved_at_42[0x6];
+ u8 pptx_mask_n[0x1];
+ u8 reserved_at_43[0x5];
u8 pfctx[0x8];
u8 reserved_at_50[0x10];
u8 pprx[0x1];
u8 aprx[0x1];
- u8 reserved_at_62[0x6];
+ u8 pprx_mask_n[0x1];
+ u8 reserved_at_63[0x5];
u8 pfcrx[0x8];
u8 reserved_at_70[0x10];
- u8 reserved_at_80[0x80];
+ u8 device_stall_minor_watermark[0x10];
+ u8 device_stall_critical_watermark[0x10];
+
+ u8 reserved_at_a0[0x60];
};
struct mlx5_ifc_pelc_reg_bits {
@@ -7717,15 +10032,82 @@ struct mlx5_ifc_peir_reg_bits {
u8 error_type[0x8];
};
-struct mlx5_ifc_pcam_enhanced_features_bits {
- u8 reserved_at_0[0x7c];
+struct mlx5_ifc_mpegc_reg_bits {
+ u8 reserved_at_0[0x30];
+ u8 field_select[0x10];
+
+ u8 tx_overflow_sense[0x1];
+ u8 mark_cqe[0x1];
+ u8 mark_cnp[0x1];
+ u8 reserved_at_43[0x1b];
+ u8 tx_lossy_overflow_oper[0x2];
+
+ u8 reserved_at_60[0x100];
+};
+
+enum {
+ MLX5_MTUTC_FREQ_ADJ_UNITS_PPB = 0x0,
+ MLX5_MTUTC_FREQ_ADJ_UNITS_SCALED_PPM = 0x1,
+};
+
+enum {
+ MLX5_MTUTC_OPERATION_SET_TIME_IMMEDIATE = 0x1,
+ MLX5_MTUTC_OPERATION_ADJUST_TIME = 0x2,
+ MLX5_MTUTC_OPERATION_ADJUST_FREQ_UTC = 0x3,
+};
+
+struct mlx5_ifc_mtutc_reg_bits {
+ u8 reserved_at_0[0x5];
+ u8 freq_adj_units[0x3];
+ u8 reserved_at_8[0x14];
+ u8 operation[0x4];
+
+ u8 freq_adjustment[0x20];
+
+ u8 reserved_at_40[0x40];
+
+ u8 utc_sec[0x20];
+
+ u8 reserved_at_a0[0x2];
+ u8 utc_nsec[0x1e];
+ u8 time_adjustment[0x20];
+};
+
+struct mlx5_ifc_pcam_enhanced_features_bits {
+ u8 reserved_at_0[0x68];
+ u8 fec_50G_per_lane_in_pplm[0x1];
+ u8 reserved_at_69[0x4];
+ u8 rx_icrc_encapsulated_counter[0x1];
+ u8 reserved_at_6e[0x4];
+ u8 ptys_extended_ethernet[0x1];
+ u8 reserved_at_73[0x3];
+ u8 pfcc_mask[0x1];
+ u8 reserved_at_77[0x3];
+ u8 per_lane_error_counters[0x1];
+ u8 rx_buffer_fullness_counters[0x1];
u8 ptys_connector_type[0x1];
u8 reserved_at_7d[0x1];
u8 ppcnt_discard_group[0x1];
u8 ppcnt_statistical_group[0x1];
};
+struct mlx5_ifc_pcam_regs_5000_to_507f_bits {
+ u8 port_access_reg_cap_mask_127_to_96[0x20];
+ u8 port_access_reg_cap_mask_95_to_64[0x20];
+
+ u8 port_access_reg_cap_mask_63_to_36[0x1c];
+ u8 pplm[0x1];
+ u8 port_access_reg_cap_mask_34_to_32[0x3];
+
+ u8 port_access_reg_cap_mask_31_to_13[0x13];
+ u8 pbmc[0x1];
+ u8 pptb[0x1];
+ u8 port_access_reg_cap_mask_10_to_09[0x2];
+ u8 ppcnt[0x1];
+ u8 port_access_reg_cap_mask_07_to_00[0x8];
+};
+
struct mlx5_ifc_pcam_reg_bits {
u8 reserved_at_0[0x8];
u8 feature_group[0x8];
@@ -7735,6 +10117,7 @@ struct mlx5_ifc_pcam_reg_bits {
u8 reserved_at_20[0x20];
union {
+ struct mlx5_ifc_pcam_regs_5000_to_507f_bits regs_5000_to_507f;
u8 reserved_at_0[0x80];
} port_access_reg_cap_mask;
@@ -7749,8 +10132,27 @@ struct mlx5_ifc_pcam_reg_bits {
};
struct mlx5_ifc_mcam_enhanced_features_bits {
- u8 reserved_at_0[0x7f];
-
+ u8 reserved_at_0[0x50];
+ u8 mtutc_freq_adj_units[0x1];
+ u8 mtutc_time_adjustment_extended_range[0x1];
+ u8 reserved_at_52[0xb];
+ u8 mcia_32dwords[0x1];
+ u8 out_pulse_duration_ns[0x1];
+ u8 npps_period[0x1];
+ u8 reserved_at_60[0xa];
+ u8 reset_state[0x1];
+ u8 ptpcyc2realtime_modify[0x1];
+ u8 reserved_at_6c[0x2];
+ u8 pci_status_and_power[0x1];
+ u8 reserved_at_6f[0x5];
+ u8 mark_tx_action_cnp[0x1];
+ u8 mark_tx_action_cqe[0x1];
+ u8 dynamic_tx_overflow[0x1];
+ u8 reserved_at_77[0x4];
+ u8 pcie_outbound_stalled[0x1];
+ u8 tx_overflow_buffer_pkt[0x1];
+ u8 mtpps_enh_out_per_adj[0x1];
+ u8 mtpps_fs[0x1];
u8 pcie_performance_group[0x1];
};
@@ -7759,10 +10161,40 @@ struct mlx5_ifc_mcam_access_reg_bits {
u8 mcda[0x1];
u8 mcc[0x1];
u8 mcqi[0x1];
- u8 reserved_at_1f[0x1];
+ u8 mcqs[0x1];
+
+ u8 regs_95_to_87[0x9];
+ u8 mpegc[0x1];
+ u8 mtutc[0x1];
+ u8 regs_84_to_68[0x11];
+ u8 tracer_registers[0x4];
+
+ u8 regs_63_to_46[0x12];
+ u8 mrtc[0x1];
+ u8 regs_44_to_32[0xd];
+
+ u8 regs_31_to_0[0x20];
+};
+
+struct mlx5_ifc_mcam_access_reg_bits1 {
+ u8 regs_127_to_96[0x20];
u8 regs_95_to_64[0x20];
+
u8 regs_63_to_32[0x20];
+
+ u8 regs_31_to_0[0x20];
+};
+
+struct mlx5_ifc_mcam_access_reg_bits2 {
+ u8 regs_127_to_99[0x1d];
+ u8 mirc[0x1];
+ u8 regs_97_to_96[0x2];
+
+ u8 regs_95_to_64[0x20];
+
+ u8 regs_63_to_32[0x20];
+
u8 regs_31_to_0[0x20];
};
@@ -7776,6 +10208,8 @@ struct mlx5_ifc_mcam_reg_bits {
union {
struct mlx5_ifc_mcam_access_reg_bits access_regs;
+ struct mlx5_ifc_mcam_access_reg_bits1 access_regs1;
+ struct mlx5_ifc_mcam_access_reg_bits2 access_regs2;
u8 reserved_at_0[0x80];
} mng_access_reg_cap_mask;
@@ -7789,6 +10223,55 @@ struct mlx5_ifc_mcam_reg_bits {
u8 reserved_at_1c0[0x80];
};
+struct mlx5_ifc_qcam_access_reg_cap_mask {
+ u8 qcam_access_reg_cap_mask_127_to_20[0x6C];
+ u8 qpdpm[0x1];
+ u8 qcam_access_reg_cap_mask_18_to_4[0x0F];
+ u8 qdpm[0x1];
+ u8 qpts[0x1];
+ u8 qcap[0x1];
+ u8 qcam_access_reg_cap_mask_0[0x1];
+};
+
+struct mlx5_ifc_qcam_qos_feature_cap_mask {
+ u8 qcam_qos_feature_cap_mask_127_to_1[0x7F];
+ u8 qpts_trust_both[0x1];
+};
+
+struct mlx5_ifc_qcam_reg_bits {
+ u8 reserved_at_0[0x8];
+ u8 feature_group[0x8];
+ u8 reserved_at_10[0x8];
+ u8 access_reg_group[0x8];
+ u8 reserved_at_20[0x20];
+
+ union {
+ struct mlx5_ifc_qcam_access_reg_cap_mask reg_cap;
+ u8 reserved_at_0[0x80];
+ } qos_access_reg_cap_mask;
+
+ u8 reserved_at_c0[0x80];
+
+ union {
+ struct mlx5_ifc_qcam_qos_feature_cap_mask feature_cap;
+ u8 reserved_at_0[0x80];
+ } qos_feature_cap_mask;
+
+ u8 reserved_at_1c0[0x80];
+};
+
+struct mlx5_ifc_core_dump_reg_bits {
+ u8 reserved_at_0[0x18];
+ u8 core_dump_type[0x8];
+
+ u8 reserved_at_20[0x30];
+ u8 vhca_id[0x10];
+
+ u8 reserved_at_60[0x8];
+ u8 qpn[0x18];
+ u8 reserved_at_80[0x180];
+};
+
struct mlx5_ifc_pcap_reg_bits {
u8 reserved_at_0[0x8];
u8 local_port[0x8];
@@ -7829,18 +10312,32 @@ struct mlx5_ifc_pamp_reg_bits {
struct mlx5_ifc_pcmr_reg_bits {
u8 reserved_at_0[0x8];
u8 local_port[0x8];
- u8 reserved_at_10[0x2e];
+ u8 reserved_at_10[0x10];
+
+ u8 entropy_force_cap[0x1];
+ u8 entropy_calc_cap[0x1];
+ u8 entropy_gre_calc_cap[0x1];
+ u8 reserved_at_23[0xf];
+ u8 rx_ts_over_crc_cap[0x1];
+ u8 reserved_at_33[0xb];
u8 fcs_cap[0x1];
- u8 reserved_at_3f[0x1f];
+ u8 reserved_at_3f[0x1];
+
+ u8 entropy_force[0x1];
+ u8 entropy_calc[0x1];
+ u8 entropy_gre_calc[0x1];
+ u8 reserved_at_43[0xf];
+ u8 rx_ts_over_crc[0x1];
+ u8 reserved_at_53[0xb];
u8 fcs_chk[0x1];
u8 reserved_at_5f[0x1];
};
struct mlx5_ifc_lane_2_module_mapping_bits {
- u8 reserved_at_0[0x6];
- u8 rx_lane[0x2];
- u8 reserved_at_8[0x6];
- u8 tx_lane[0x2];
+ u8 reserved_at_0[0x4];
+ u8 rx_lane[0x4];
+ u8 reserved_at_8[0x4];
+ u8 tx_lane[0x4];
u8 reserved_at_10[0x8];
u8 module[0x8];
};
@@ -7849,8 +10346,8 @@ struct mlx5_ifc_bufferx_reg_bits {
u8 reserved_at_0[0x6];
u8 lossy[0x1];
u8 epsb[0x1];
- u8 reserved_at_8[0xc];
- u8 size[0xc];
+ u8 reserved_at_8[0x8];
+ u8 size[0x10];
u8 xoff_threshold[0x10];
u8 xon_threshold[0x10];
@@ -7988,7 +10485,7 @@ struct mlx5_ifc_cmd_in_bits {
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
- u8 command[0][0x20];
+ u8 command[][0x20];
};
struct mlx5_ifc_cmd_if_box_bits {
@@ -8116,7 +10613,8 @@ struct mlx5_ifc_initial_seg_bits {
u8 initializing[0x1];
u8 reserved_at_fe1[0x4];
u8 nic_interface_supported[0x3];
- u8 reserved_at_fe8[0x18];
+ u8 embedded_cpu[0x1];
+ u8 reserved_at_fe9[0x17];
struct mlx5_ifc_health_buffer_bits health_buffer;
@@ -8141,7 +10639,12 @@ struct mlx5_ifc_mtpps_reg_bits {
u8 reserved_at_18[0x4];
u8 cap_max_num_of_pps_out_pins[0x4];
- u8 reserved_at_20[0x24];
+ u8 reserved_at_20[0x13];
+ u8 cap_log_min_npps_period[0x5];
+ u8 reserved_at_38[0x3];
+ u8 cap_log_min_out_pulse_duration_ns[0x5];
+
+ u8 reserved_at_40[0x4];
u8 cap_pin_3_mode[0x4];
u8 reserved_at_48[0x4];
u8 cap_pin_2_mode[0x4];
@@ -8159,7 +10662,10 @@ struct mlx5_ifc_mtpps_reg_bits {
u8 reserved_at_78[0x4];
u8 cap_pin_4_mode[0x4];
- u8 reserved_at_80[0x80];
+ u8 field_select[0x20];
+ u8 reserved_at_a0[0x20];
+
+ u8 npps_period[0x40];
u8 enable[0x1];
u8 reserved_at_101[0xb];
@@ -8168,14 +10674,16 @@ struct mlx5_ifc_mtpps_reg_bits {
u8 pin_mode[0x4];
u8 pin[0x8];
- u8 reserved_at_120[0x20];
+ u8 reserved_at_120[0x2];
+ u8 out_pulse_duration_ns[0x1e];
u8 time_stamp[0x40];
u8 out_pulse_duration[0x10];
u8 out_periodic_adjustment[0x10];
+ u8 enhanced_out_periodic_adjustment[0x20];
- u8 reserved_at_1a0[0x60];
+ u8 reserved_at_1c0[0x20];
};
struct mlx5_ifc_mtppse_reg_bits {
@@ -8187,6 +10695,24 @@ struct mlx5_ifc_mtppse_reg_bits {
u8 reserved_at_40[0x40];
};
+struct mlx5_ifc_mcqs_reg_bits {
+ u8 last_index_flag[0x1];
+ u8 reserved_at_1[0x7];
+ u8 fw_device[0x8];
+ u8 component_index[0x10];
+
+ u8 reserved_at_20[0x10];
+ u8 identifier[0x10];
+
+ u8 reserved_at_40[0x17];
+ u8 component_status[0x5];
+ u8 component_update_state[0x4];
+
+ u8 last_update_state_changer_type[0x4];
+ u8 last_update_state_changer_host_id[0x4];
+ u8 reserved_at_68[0x18];
+};
+
struct mlx5_ifc_mcqi_cap_bits {
u8 supported_info_bitmask[0x20];
@@ -8207,6 +10733,43 @@ struct mlx5_ifc_mcqi_cap_bits {
u8 reserved_at_86[0x1a];
};
+struct mlx5_ifc_mcqi_version_bits {
+ u8 reserved_at_0[0x2];
+ u8 build_time_valid[0x1];
+ u8 user_defined_time_valid[0x1];
+ u8 reserved_at_4[0x14];
+ u8 version_string_length[0x8];
+
+ u8 version[0x20];
+
+ u8 build_time[0x40];
+
+ u8 user_defined_time[0x40];
+
+ u8 build_tool_version[0x20];
+
+ u8 reserved_at_e0[0x20];
+
+ u8 version_string[92][0x8];
+};
+
+struct mlx5_ifc_mcqi_activation_method_bits {
+ u8 pending_server_ac_power_cycle[0x1];
+ u8 pending_server_dc_power_cycle[0x1];
+ u8 pending_server_reboot[0x1];
+ u8 pending_fw_reset[0x1];
+ u8 auto_activate[0x1];
+ u8 all_hosts_sync[0x1];
+ u8 device_hw_reset[0x1];
+ u8 reserved_at_7[0x19];
+};
+
+union mlx5_ifc_mcqi_reg_data_bits {
+ struct mlx5_ifc_mcqi_cap_bits mcqi_caps;
+ struct mlx5_ifc_mcqi_version_bits mcqi_version;
+ struct mlx5_ifc_mcqi_activation_method_bits mcqi_activation_mathod;
+};
+
struct mlx5_ifc_mcqi_reg_bits {
u8 read_pending_component[0x1];
u8 reserved_at_1[0xf];
@@ -8224,7 +10787,7 @@ struct mlx5_ifc_mcqi_reg_bits {
u8 reserved_at_a0[0x10];
u8 data_size[0x10];
- u8 data[0][0x20];
+ union mlx5_ifc_mcqi_reg_data_bits data[];
};
struct mlx5_ifc_mcc_reg_bits {
@@ -8263,7 +10826,129 @@ struct mlx5_ifc_mcda_reg_bits {
u8 reserved_at_60[0x20];
- u8 data[0][0x20];
+ u8 data[][0x20];
+};
+
+enum {
+ MLX5_MFRL_REG_RESET_STATE_IDLE = 0,
+ MLX5_MFRL_REG_RESET_STATE_IN_NEGOTIATION = 1,
+ MLX5_MFRL_REG_RESET_STATE_RESET_IN_PROGRESS = 2,
+ MLX5_MFRL_REG_RESET_STATE_TIMEOUT = 3,
+ MLX5_MFRL_REG_RESET_STATE_NACK = 4,
+};
+
+enum {
+ MLX5_MFRL_REG_RESET_TYPE_FULL_CHIP = BIT(0),
+ MLX5_MFRL_REG_RESET_TYPE_NET_PORT_ALIVE = BIT(1),
+};
+
+enum {
+ MLX5_MFRL_REG_RESET_LEVEL0 = BIT(0),
+ MLX5_MFRL_REG_RESET_LEVEL3 = BIT(3),
+ MLX5_MFRL_REG_RESET_LEVEL6 = BIT(6),
+};
+
+struct mlx5_ifc_mfrl_reg_bits {
+ u8 reserved_at_0[0x20];
+
+ u8 reserved_at_20[0x2];
+ u8 pci_sync_for_fw_update_start[0x1];
+ u8 pci_sync_for_fw_update_resp[0x2];
+ u8 rst_type_sel[0x3];
+ u8 reserved_at_28[0x4];
+ u8 reset_state[0x4];
+ u8 reset_type[0x8];
+ u8 reset_level[0x8];
+};
+
+struct mlx5_ifc_mirc_reg_bits {
+ u8 reserved_at_0[0x18];
+ u8 status_code[0x8];
+
+ u8 reserved_at_20[0x20];
+};
+
+struct mlx5_ifc_pddr_monitor_opcode_bits {
+ u8 reserved_at_0[0x10];
+ u8 monitor_opcode[0x10];
+};
+
+union mlx5_ifc_pddr_troubleshooting_page_status_opcode_auto_bits {
+ struct mlx5_ifc_pddr_monitor_opcode_bits pddr_monitor_opcode;
+ u8 reserved_at_0[0x20];
+};
+
+enum {
+ /* Monitor opcodes */
+ MLX5_PDDR_REG_TRBLSH_GROUP_OPCODE_MONITOR = 0x0,
+};
+
+struct mlx5_ifc_pddr_troubleshooting_page_bits {
+ u8 reserved_at_0[0x10];
+ u8 group_opcode[0x10];
+
+ union mlx5_ifc_pddr_troubleshooting_page_status_opcode_auto_bits status_opcode;
+
+ u8 reserved_at_40[0x20];
+
+ u8 status_message[59][0x20];
+};
+
+union mlx5_ifc_pddr_reg_page_data_auto_bits {
+ struct mlx5_ifc_pddr_troubleshooting_page_bits pddr_troubleshooting_page;
+ u8 reserved_at_0[0x7c0];
+};
+
+enum {
+ MLX5_PDDR_REG_PAGE_SELECT_TROUBLESHOOTING_INFO_PAGE = 0x1,
+};
+
+struct mlx5_ifc_pddr_reg_bits {
+ u8 reserved_at_0[0x8];
+ u8 local_port[0x8];
+ u8 pnat[0x2];
+ u8 reserved_at_12[0xe];
+
+ u8 reserved_at_20[0x18];
+ u8 page_select[0x8];
+
+ union mlx5_ifc_pddr_reg_page_data_auto_bits page_data;
+};
+
+struct mlx5_ifc_mrtc_reg_bits {
+ u8 time_synced[0x1];
+ u8 reserved_at_1[0x1f];
+
+ u8 reserved_at_20[0x20];
+
+ u8 time_h[0x20];
+
+ u8 time_l[0x20];
+};
+
+struct mlx5_ifc_mtmp_reg_bits {
+ u8 reserved_at_0[0x14];
+ u8 sensor_index[0xc];
+
+ u8 reserved_at_20[0x10];
+ u8 temperature[0x10];
+
+ u8 mte[0x1];
+ u8 mtr[0x1];
+ u8 reserved_at_42[0xe];
+ u8 max_temperature[0x10];
+
+ u8 tee[0x2];
+ u8 reserved_at_62[0xe];
+ u8 temp_threshold_hi[0x10];
+
+ u8 reserved_at_80[0x10];
+ u8 temp_threshold_lo[0x10];
+
+ u8 reserved_at_a0[0x20];
+
+ u8 sensor_name_hi[0x20];
+ u8 sensor_name_lo[0x20];
};
union mlx5_ifc_ports_control_registers_document_bits {
@@ -8274,11 +10959,15 @@ union mlx5_ifc_ports_control_registers_document_bits {
struct mlx5_ifc_eth_802_3_cntrs_grp_data_layout_bits eth_802_3_cntrs_grp_data_layout;
struct mlx5_ifc_eth_extended_cntrs_grp_data_layout_bits eth_extended_cntrs_grp_data_layout;
struct mlx5_ifc_eth_per_prio_grp_data_layout_bits eth_per_prio_grp_data_layout;
- struct mlx5_ifc_eth_per_traffic_grp_data_layout_bits eth_per_traffic_grp_data_layout;
+ struct mlx5_ifc_eth_per_tc_prio_grp_data_layout_bits eth_per_tc_prio_grp_data_layout;
+ struct mlx5_ifc_eth_per_tc_congest_prio_grp_data_layout_bits eth_per_tc_congest_prio_grp_data_layout;
struct mlx5_ifc_lane_2_module_mapping_bits lane_2_module_mapping;
struct mlx5_ifc_pamp_reg_bits pamp_reg;
struct mlx5_ifc_paos_reg_bits paos_reg;
struct mlx5_ifc_pcap_reg_bits pcap_reg;
+ struct mlx5_ifc_pddr_monitor_opcode_bits pddr_monitor_opcode;
+ struct mlx5_ifc_pddr_reg_bits pddr_reg;
+ struct mlx5_ifc_pddr_troubleshooting_page_bits pddr_troubleshooting_page;
struct mlx5_ifc_peir_reg_bits peir_reg;
struct mlx5_ifc_pelc_reg_bits pelc_reg;
struct mlx5_ifc_pfcc_reg_bits pfcc_reg;
@@ -8298,6 +10987,7 @@ union mlx5_ifc_ports_control_registers_document_bits {
struct mlx5_ifc_pmtu_reg_bits pmtu_reg;
struct mlx5_ifc_ppad_reg_bits ppad_reg;
struct mlx5_ifc_ppcnt_reg_bits ppcnt_reg;
+ struct mlx5_ifc_mpein_reg_bits mpein_reg;
struct mlx5_ifc_mpcnt_reg_bits mpcnt_reg;
struct mlx5_ifc_pplm_reg_bits pplm_reg;
struct mlx5_ifc_pplr_reg_bits pplr_reg;
@@ -8319,6 +11009,11 @@ union mlx5_ifc_ports_control_registers_document_bits {
struct mlx5_ifc_mcqi_reg_bits mcqi_reg;
struct mlx5_ifc_mcc_reg_bits mcc_reg;
struct mlx5_ifc_mcda_reg_bits mcda_reg;
+ struct mlx5_ifc_mirc_reg_bits mirc_reg;
+ struct mlx5_ifc_mfrl_reg_bits mfrl_reg;
+ struct mlx5_ifc_mtutc_reg_bits mtutc_reg;
+ struct mlx5_ifc_mrtc_reg_bits mrtc_reg;
+ struct mlx5_ifc_mtmp_reg_bits mtmp_reg;
u8 reserved_at_0[0x60e0];
};
@@ -8355,14 +11050,19 @@ struct mlx5_ifc_set_flow_table_root_in_bits {
u8 reserved_at_60[0x20];
u8 table_type[0x8];
- u8 reserved_at_88[0x18];
+ u8 reserved_at_88[0x7];
+ u8 table_of_other_vport[0x1];
+ u8 table_vport_number[0x10];
u8 reserved_at_a0[0x8];
u8 table_id[0x18];
u8 reserved_at_c0[0x8];
u8 underlay_qpn[0x18];
- u8 reserved_at_e0[0x120];
+ u8 table_eswitch_owner_vhca_id_valid[0x1];
+ u8 reserved_at_e1[0xf];
+ u8 table_eswitch_owner_vhca_id[0x10];
+ u8 reserved_at_100[0x100];
};
enum {
@@ -8437,6 +11137,150 @@ struct mlx5_ifc_qetc_reg_bits {
struct mlx5_ifc_ets_global_config_reg_bits global_configuration;
};
+struct mlx5_ifc_qpdpm_dscp_reg_bits {
+ u8 e[0x1];
+ u8 reserved_at_01[0x0b];
+ u8 prio[0x04];
+};
+
+struct mlx5_ifc_qpdpm_reg_bits {
+ u8 reserved_at_0[0x8];
+ u8 local_port[0x8];
+ u8 reserved_at_10[0x10];
+ struct mlx5_ifc_qpdpm_dscp_reg_bits dscp[64];
+};
+
+struct mlx5_ifc_qpts_reg_bits {
+ u8 reserved_at_0[0x8];
+ u8 local_port[0x8];
+ u8 reserved_at_10[0x2d];
+ u8 trust_state[0x3];
+};
+
+struct mlx5_ifc_pptb_reg_bits {
+ u8 reserved_at_0[0x2];
+ u8 mm[0x2];
+ u8 reserved_at_4[0x4];
+ u8 local_port[0x8];
+ u8 reserved_at_10[0x6];
+ u8 cm[0x1];
+ u8 um[0x1];
+ u8 pm[0x8];
+
+ u8 prio_x_buff[0x20];
+
+ u8 pm_msb[0x8];
+ u8 reserved_at_48[0x10];
+ u8 ctrl_buff[0x4];
+ u8 untagged_buff[0x4];
+};
+
+struct mlx5_ifc_sbcam_reg_bits {
+ u8 reserved_at_0[0x8];
+ u8 feature_group[0x8];
+ u8 reserved_at_10[0x8];
+ u8 access_reg_group[0x8];
+
+ u8 reserved_at_20[0x20];
+
+ u8 sb_access_reg_cap_mask[4][0x20];
+
+ u8 reserved_at_c0[0x80];
+
+ u8 sb_feature_cap_mask[4][0x20];
+
+ u8 reserved_at_1c0[0x40];
+
+ u8 cap_total_buffer_size[0x20];
+
+ u8 cap_cell_size[0x10];
+ u8 cap_max_pg_buffers[0x8];
+ u8 cap_num_pool_supported[0x8];
+
+ u8 reserved_at_240[0x8];
+ u8 cap_sbsr_stat_size[0x8];
+ u8 cap_max_tclass_data[0x8];
+ u8 cap_max_cpu_ingress_tclass_sb[0x8];
+};
+
+struct mlx5_ifc_pbmc_reg_bits {
+ u8 reserved_at_0[0x8];
+ u8 local_port[0x8];
+ u8 reserved_at_10[0x10];
+
+ u8 xoff_timer_value[0x10];
+ u8 xoff_refresh[0x10];
+
+ u8 reserved_at_40[0x9];
+ u8 fullness_threshold[0x7];
+ u8 port_buffer_size[0x10];
+
+ struct mlx5_ifc_bufferx_reg_bits buffer[10];
+
+ u8 reserved_at_2e0[0x80];
+};
+
+struct mlx5_ifc_sbpr_reg_bits {
+ u8 desc[0x1];
+ u8 snap[0x1];
+ u8 reserved_at_2[0x4];
+ u8 dir[0x2];
+ u8 reserved_at_8[0x14];
+ u8 pool[0x4];
+
+ u8 infi_size[0x1];
+ u8 reserved_at_21[0x7];
+ u8 size[0x18];
+
+ u8 reserved_at_40[0x1c];
+ u8 mode[0x4];
+
+ u8 reserved_at_60[0x8];
+ u8 buff_occupancy[0x18];
+
+ u8 clr[0x1];
+ u8 reserved_at_81[0x7];
+ u8 max_buff_occupancy[0x18];
+
+ u8 reserved_at_a0[0x8];
+ u8 ext_buff_occupancy[0x18];
+};
+
+struct mlx5_ifc_sbcm_reg_bits {
+ u8 desc[0x1];
+ u8 snap[0x1];
+ u8 reserved_at_2[0x6];
+ u8 local_port[0x8];
+ u8 pnat[0x2];
+ u8 pg_buff[0x6];
+ u8 reserved_at_18[0x6];
+ u8 dir[0x2];
+
+ u8 reserved_at_20[0x1f];
+ u8 exc[0x1];
+
+ u8 reserved_at_40[0x40];
+
+ u8 reserved_at_80[0x8];
+ u8 buff_occupancy[0x18];
+
+ u8 clr[0x1];
+ u8 reserved_at_a1[0x7];
+ u8 max_buff_occupancy[0x18];
+
+ u8 reserved_at_c0[0x8];
+ u8 min_buff[0x18];
+
+ u8 infi_max[0x1];
+ u8 reserved_at_e1[0x7];
+ u8 max_buff[0x18];
+
+ u8 reserved_at_100[0x20];
+
+ u8 reserved_at_120[0x1c];
+ u8 pool[0x4];
+};
+
struct mlx5_ifc_qtct_reg_bits {
u8 reserved_at_0[0x8];
u8 port_number[0x8];
@@ -8481,7 +11325,7 @@ struct mlx5_ifc_dcbx_param_bits {
u8 dcbx_cee_cap[0x1];
u8 dcbx_ieee_cap[0x1];
u8 dcbx_standby_cap[0x1];
- u8 reserved_at_0[0x5];
+ u8 reserved_at_3[0x5];
u8 port_number[0x8];
u8 reserved_at_10[0xa];
u8 max_application_table_size[6];
@@ -8508,11 +11352,22 @@ struct mlx5_ifc_dcbx_param_bits {
u8 reserved_at_a0[0x160];
};
+enum {
+ MLX5_LAG_PORT_SELECT_MODE_QUEUE_AFFINITY = 0,
+ MLX5_LAG_PORT_SELECT_MODE_PORT_SELECT_FT = 1,
+ MLX5_LAG_PORT_SELECT_MODE_PORT_SELECT_MPESW = 2,
+};
+
struct mlx5_ifc_lagc_bits {
- u8 reserved_at_0[0x1d];
+ u8 fdb_selection_mode[0x1];
+ u8 reserved_at_1[0x14];
+ u8 port_select_mode[0x3];
+ u8 reserved_at_18[0x5];
u8 lag_state[0x3];
- u8 reserved_at_20[0x14];
+ u8 reserved_at_20[0xc];
+ u8 active_port[0x4];
+ u8 reserved_at_30[0x4];
u8 tx_remap_affinity_2[0x4];
u8 reserved_at_38[0x4];
u8 tx_remap_affinity_1[0x4];
@@ -8565,8 +11420,6 @@ struct mlx5_ifc_query_lag_out_bits {
u8 syndrome[0x20];
- u8 reserved_at_40[0x40];
-
struct mlx5_ifc_lagc_bits ctx;
};
@@ -8637,4 +11490,1067 @@ struct mlx5_ifc_destroy_vport_lag_in_bits {
u8 reserved_at_40[0x40];
};
+enum {
+ MLX5_MODIFY_MEMIC_OP_MOD_ALLOC,
+ MLX5_MODIFY_MEMIC_OP_MOD_DEALLOC,
+};
+
+struct mlx5_ifc_modify_memic_in_bits {
+ u8 opcode[0x10];
+ u8 uid[0x10];
+
+ u8 reserved_at_20[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_at_40[0x20];
+
+ u8 reserved_at_60[0x18];
+ u8 memic_operation_type[0x8];
+
+ u8 memic_start_addr[0x40];
+
+ u8 reserved_at_c0[0x140];
+};
+
+struct mlx5_ifc_modify_memic_out_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_at_40[0x40];
+
+ u8 memic_operation_addr[0x40];
+
+ u8 reserved_at_c0[0x140];
+};
+
+struct mlx5_ifc_alloc_memic_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_at_10[0x10];
+
+ u8 reserved_at_20[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_at_30[0x20];
+
+ u8 reserved_at_40[0x18];
+ u8 log_memic_addr_alignment[0x8];
+
+ u8 range_start_addr[0x40];
+
+ u8 range_size[0x20];
+
+ u8 memic_size[0x20];
+};
+
+struct mlx5_ifc_alloc_memic_out_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 memic_start_addr[0x40];
+};
+
+struct mlx5_ifc_dealloc_memic_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_at_10[0x10];
+
+ u8 reserved_at_20[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_at_40[0x40];
+
+ u8 memic_start_addr[0x40];
+
+ u8 memic_size[0x20];
+
+ u8 reserved_at_e0[0x20];
+};
+
+struct mlx5_ifc_dealloc_memic_out_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_at_40[0x40];
+};
+
+struct mlx5_ifc_umem_bits {
+ u8 reserved_at_0[0x80];
+
+ u8 ats[0x1];
+ u8 reserved_at_81[0x1a];
+ u8 log_page_size[0x5];
+
+ u8 page_offset[0x20];
+
+ u8 num_of_mtt[0x40];
+
+ struct mlx5_ifc_mtt_bits mtt[];
+};
+
+struct mlx5_ifc_uctx_bits {
+ u8 cap[0x20];
+
+ u8 reserved_at_20[0x160];
+};
+
+struct mlx5_ifc_sw_icm_bits {
+ u8 modify_field_select[0x40];
+
+ u8 reserved_at_40[0x18];
+ u8 log_sw_icm_size[0x8];
+
+ u8 reserved_at_60[0x20];
+
+ u8 sw_icm_start_addr[0x40];
+
+ u8 reserved_at_c0[0x140];
+};
+
+struct mlx5_ifc_geneve_tlv_option_bits {
+ u8 modify_field_select[0x40];
+
+ u8 reserved_at_40[0x18];
+ u8 geneve_option_fte_index[0x8];
+
+ u8 option_class[0x10];
+ u8 option_type[0x8];
+ u8 reserved_at_78[0x3];
+ u8 option_data_length[0x5];
+
+ u8 reserved_at_80[0x180];
+};
+
+struct mlx5_ifc_create_umem_in_bits {
+ u8 opcode[0x10];
+ u8 uid[0x10];
+
+ u8 reserved_at_20[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_at_40[0x40];
+
+ struct mlx5_ifc_umem_bits umem;
+};
+
+struct mlx5_ifc_create_umem_out_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_at_40[0x8];
+ u8 umem_id[0x18];
+
+ u8 reserved_at_60[0x20];
+};
+
+struct mlx5_ifc_destroy_umem_in_bits {
+ u8 opcode[0x10];
+ u8 uid[0x10];
+
+ u8 reserved_at_20[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_at_40[0x8];
+ u8 umem_id[0x18];
+
+ u8 reserved_at_60[0x20];
+};
+
+struct mlx5_ifc_destroy_umem_out_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_at_40[0x40];
+};
+
+struct mlx5_ifc_create_uctx_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_at_10[0x10];
+
+ u8 reserved_at_20[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_at_40[0x40];
+
+ struct mlx5_ifc_uctx_bits uctx;
+};
+
+struct mlx5_ifc_create_uctx_out_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_at_40[0x10];
+ u8 uid[0x10];
+
+ u8 reserved_at_60[0x20];
+};
+
+struct mlx5_ifc_destroy_uctx_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_at_10[0x10];
+
+ u8 reserved_at_20[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_at_40[0x10];
+ u8 uid[0x10];
+
+ u8 reserved_at_60[0x20];
+};
+
+struct mlx5_ifc_destroy_uctx_out_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_at_40[0x40];
+};
+
+struct mlx5_ifc_create_sw_icm_in_bits {
+ struct mlx5_ifc_general_obj_in_cmd_hdr_bits hdr;
+ struct mlx5_ifc_sw_icm_bits sw_icm;
+};
+
+struct mlx5_ifc_create_geneve_tlv_option_in_bits {
+ struct mlx5_ifc_general_obj_in_cmd_hdr_bits hdr;
+ struct mlx5_ifc_geneve_tlv_option_bits geneve_tlv_opt;
+};
+
+struct mlx5_ifc_mtrc_string_db_param_bits {
+ u8 string_db_base_address[0x20];
+
+ u8 reserved_at_20[0x8];
+ u8 string_db_size[0x18];
+};
+
+struct mlx5_ifc_mtrc_cap_bits {
+ u8 trace_owner[0x1];
+ u8 trace_to_memory[0x1];
+ u8 reserved_at_2[0x4];
+ u8 trc_ver[0x2];
+ u8 reserved_at_8[0x14];
+ u8 num_string_db[0x4];
+
+ u8 first_string_trace[0x8];
+ u8 num_string_trace[0x8];
+ u8 reserved_at_30[0x28];
+
+ u8 log_max_trace_buffer_size[0x8];
+
+ u8 reserved_at_60[0x20];
+
+ struct mlx5_ifc_mtrc_string_db_param_bits string_db_param[8];
+
+ u8 reserved_at_280[0x180];
+};
+
+struct mlx5_ifc_mtrc_conf_bits {
+ u8 reserved_at_0[0x1c];
+ u8 trace_mode[0x4];
+ u8 reserved_at_20[0x18];
+ u8 log_trace_buffer_size[0x8];
+ u8 trace_mkey[0x20];
+ u8 reserved_at_60[0x3a0];
+};
+
+struct mlx5_ifc_mtrc_stdb_bits {
+ u8 string_db_index[0x4];
+ u8 reserved_at_4[0x4];
+ u8 read_size[0x18];
+ u8 start_offset[0x20];
+ u8 string_db_data[];
+};
+
+struct mlx5_ifc_mtrc_ctrl_bits {
+ u8 trace_status[0x2];
+ u8 reserved_at_2[0x2];
+ u8 arm_event[0x1];
+ u8 reserved_at_5[0xb];
+ u8 modify_field_select[0x10];
+ u8 reserved_at_20[0x2b];
+ u8 current_timestamp52_32[0x15];
+ u8 current_timestamp31_0[0x20];
+ u8 reserved_at_80[0x180];
+};
+
+struct mlx5_ifc_host_params_context_bits {
+ u8 host_number[0x8];
+ u8 reserved_at_8[0x7];
+ u8 host_pf_disabled[0x1];
+ u8 host_num_of_vfs[0x10];
+
+ u8 host_total_vfs[0x10];
+ u8 host_pci_bus[0x10];
+
+ u8 reserved_at_40[0x10];
+ u8 host_pci_device[0x10];
+
+ u8 reserved_at_60[0x10];
+ u8 host_pci_function[0x10];
+
+ u8 reserved_at_80[0x180];
+};
+
+struct mlx5_ifc_query_esw_functions_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_at_10[0x10];
+
+ u8 reserved_at_20[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_at_40[0x40];
+};
+
+struct mlx5_ifc_query_esw_functions_out_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_at_40[0x40];
+
+ struct mlx5_ifc_host_params_context_bits host_params_context;
+
+ u8 reserved_at_280[0x180];
+ u8 host_sf_enable[][0x40];
+};
+
+struct mlx5_ifc_sf_partition_bits {
+ u8 reserved_at_0[0x10];
+ u8 log_num_sf[0x8];
+ u8 log_sf_bar_size[0x8];
+};
+
+struct mlx5_ifc_query_sf_partitions_out_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_at_40[0x18];
+ u8 num_sf_partitions[0x8];
+
+ u8 reserved_at_60[0x20];
+
+ struct mlx5_ifc_sf_partition_bits sf_partition[];
+};
+
+struct mlx5_ifc_query_sf_partitions_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_at_10[0x10];
+
+ u8 reserved_at_20[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_at_40[0x40];
+};
+
+struct mlx5_ifc_dealloc_sf_out_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_at_40[0x40];
+};
+
+struct mlx5_ifc_dealloc_sf_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_at_10[0x10];
+
+ u8 reserved_at_20[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_at_40[0x10];
+ u8 function_id[0x10];
+
+ u8 reserved_at_60[0x20];
+};
+
+struct mlx5_ifc_alloc_sf_out_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_at_40[0x40];
+};
+
+struct mlx5_ifc_alloc_sf_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_at_10[0x10];
+
+ u8 reserved_at_20[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_at_40[0x10];
+ u8 function_id[0x10];
+
+ u8 reserved_at_60[0x20];
+};
+
+struct mlx5_ifc_affiliated_event_header_bits {
+ u8 reserved_at_0[0x10];
+ u8 obj_type[0x10];
+
+ u8 obj_id[0x20];
+};
+
+enum {
+ MLX5_HCA_CAP_GENERAL_OBJECT_TYPES_ENCRYPTION_KEY = BIT_ULL(0xc),
+ MLX5_HCA_CAP_GENERAL_OBJECT_TYPES_IPSEC = BIT_ULL(0x13),
+ MLX5_HCA_CAP_GENERAL_OBJECT_TYPES_SAMPLER = BIT_ULL(0x20),
+ MLX5_HCA_CAP_GENERAL_OBJECT_TYPES_FLOW_METER_ASO = BIT_ULL(0x24),
+};
+
+enum {
+ MLX5_GENERAL_OBJECT_TYPES_ENCRYPTION_KEY = 0xc,
+ MLX5_GENERAL_OBJECT_TYPES_IPSEC = 0x13,
+ MLX5_GENERAL_OBJECT_TYPES_SAMPLER = 0x20,
+ MLX5_GENERAL_OBJECT_TYPES_FLOW_METER_ASO = 0x24,
+ MLX5_GENERAL_OBJECT_TYPES_MACSEC = 0x27,
+ MLX5_GENERAL_OBJECT_TYPES_INT_KEK = 0x47,
+};
+
+enum {
+ MLX5_IPSEC_OBJECT_ICV_LEN_16B,
+};
+
+enum {
+ MLX5_IPSEC_ASO_REG_C_0_1 = 0x0,
+ MLX5_IPSEC_ASO_REG_C_2_3 = 0x1,
+ MLX5_IPSEC_ASO_REG_C_4_5 = 0x2,
+ MLX5_IPSEC_ASO_REG_C_6_7 = 0x3,
+};
+
+enum {
+ MLX5_IPSEC_ASO_MODE = 0x0,
+ MLX5_IPSEC_ASO_REPLAY_PROTECTION = 0x1,
+ MLX5_IPSEC_ASO_INC_SN = 0x2,
+};
+
+struct mlx5_ifc_ipsec_aso_bits {
+ u8 valid[0x1];
+ u8 reserved_at_201[0x1];
+ u8 mode[0x2];
+ u8 window_sz[0x2];
+ u8 soft_lft_arm[0x1];
+ u8 hard_lft_arm[0x1];
+ u8 remove_flow_enable[0x1];
+ u8 esn_event_arm[0x1];
+ u8 reserved_at_20a[0x16];
+
+ u8 remove_flow_pkt_cnt[0x20];
+
+ u8 remove_flow_soft_lft[0x20];
+
+ u8 reserved_at_260[0x80];
+
+ u8 mode_parameter[0x20];
+
+ u8 replay_protection_window[0x100];
+};
+
+struct mlx5_ifc_ipsec_obj_bits {
+ u8 modify_field_select[0x40];
+ u8 full_offload[0x1];
+ u8 reserved_at_41[0x1];
+ u8 esn_en[0x1];
+ u8 esn_overlap[0x1];
+ u8 reserved_at_44[0x2];
+ u8 icv_length[0x2];
+ u8 reserved_at_48[0x4];
+ u8 aso_return_reg[0x4];
+ u8 reserved_at_50[0x10];
+
+ u8 esn_msb[0x20];
+
+ u8 reserved_at_80[0x8];
+ u8 dekn[0x18];
+
+ u8 salt[0x20];
+
+ u8 implicit_iv[0x40];
+
+ u8 reserved_at_100[0x8];
+ u8 ipsec_aso_access_pd[0x18];
+ u8 reserved_at_120[0xe0];
+
+ struct mlx5_ifc_ipsec_aso_bits ipsec_aso;
+};
+
+struct mlx5_ifc_create_ipsec_obj_in_bits {
+ struct mlx5_ifc_general_obj_in_cmd_hdr_bits general_obj_in_cmd_hdr;
+ struct mlx5_ifc_ipsec_obj_bits ipsec_object;
+};
+
+enum {
+ MLX5_MODIFY_IPSEC_BITMASK_ESN_OVERLAP = BIT(0),
+ MLX5_MODIFY_IPSEC_BITMASK_ESN_MSB = BIT(1),
+};
+
+struct mlx5_ifc_query_ipsec_obj_out_bits {
+ struct mlx5_ifc_general_obj_out_cmd_hdr_bits general_obj_out_cmd_hdr;
+ struct mlx5_ifc_ipsec_obj_bits ipsec_object;
+};
+
+struct mlx5_ifc_modify_ipsec_obj_in_bits {
+ struct mlx5_ifc_general_obj_in_cmd_hdr_bits general_obj_in_cmd_hdr;
+ struct mlx5_ifc_ipsec_obj_bits ipsec_object;
+};
+
+enum {
+ MLX5_MACSEC_ASO_REPLAY_PROTECTION = 0x1,
+};
+
+enum {
+ MLX5_MACSEC_ASO_REPLAY_WIN_32BIT = 0x0,
+ MLX5_MACSEC_ASO_REPLAY_WIN_64BIT = 0x1,
+ MLX5_MACSEC_ASO_REPLAY_WIN_128BIT = 0x2,
+ MLX5_MACSEC_ASO_REPLAY_WIN_256BIT = 0x3,
+};
+
+#define MLX5_MACSEC_ASO_INC_SN 0x2
+#define MLX5_MACSEC_ASO_REG_C_4_5 0x2
+
+struct mlx5_ifc_macsec_aso_bits {
+ u8 valid[0x1];
+ u8 reserved_at_1[0x1];
+ u8 mode[0x2];
+ u8 window_size[0x2];
+ u8 soft_lifetime_arm[0x1];
+ u8 hard_lifetime_arm[0x1];
+ u8 remove_flow_enable[0x1];
+ u8 epn_event_arm[0x1];
+ u8 reserved_at_a[0x16];
+
+ u8 remove_flow_packet_count[0x20];
+
+ u8 remove_flow_soft_lifetime[0x20];
+
+ u8 reserved_at_60[0x80];
+
+ u8 mode_parameter[0x20];
+
+ u8 replay_protection_window[8][0x20];
+};
+
+struct mlx5_ifc_macsec_offload_obj_bits {
+ u8 modify_field_select[0x40];
+
+ u8 confidentiality_en[0x1];
+ u8 reserved_at_41[0x1];
+ u8 epn_en[0x1];
+ u8 epn_overlap[0x1];
+ u8 reserved_at_44[0x2];
+ u8 confidentiality_offset[0x2];
+ u8 reserved_at_48[0x4];
+ u8 aso_return_reg[0x4];
+ u8 reserved_at_50[0x10];
+
+ u8 epn_msb[0x20];
+
+ u8 reserved_at_80[0x8];
+ u8 dekn[0x18];
+
+ u8 reserved_at_a0[0x20];
+
+ u8 sci[0x40];
+
+ u8 reserved_at_100[0x8];
+ u8 macsec_aso_access_pd[0x18];
+
+ u8 reserved_at_120[0x60];
+
+ u8 salt[3][0x20];
+
+ u8 reserved_at_1e0[0x20];
+
+ struct mlx5_ifc_macsec_aso_bits macsec_aso;
+};
+
+struct mlx5_ifc_create_macsec_obj_in_bits {
+ struct mlx5_ifc_general_obj_in_cmd_hdr_bits general_obj_in_cmd_hdr;
+ struct mlx5_ifc_macsec_offload_obj_bits macsec_object;
+};
+
+struct mlx5_ifc_modify_macsec_obj_in_bits {
+ struct mlx5_ifc_general_obj_in_cmd_hdr_bits general_obj_in_cmd_hdr;
+ struct mlx5_ifc_macsec_offload_obj_bits macsec_object;
+};
+
+enum {
+ MLX5_MODIFY_MACSEC_BITMASK_EPN_OVERLAP = BIT(0),
+ MLX5_MODIFY_MACSEC_BITMASK_EPN_MSB = BIT(1),
+};
+
+struct mlx5_ifc_query_macsec_obj_out_bits {
+ struct mlx5_ifc_general_obj_out_cmd_hdr_bits general_obj_out_cmd_hdr;
+ struct mlx5_ifc_macsec_offload_obj_bits macsec_object;
+};
+
+struct mlx5_ifc_wrapped_dek_bits {
+ u8 gcm_iv[0x60];
+
+ u8 reserved_at_60[0x20];
+
+ u8 const0[0x1];
+ u8 key_size[0x1];
+ u8 reserved_at_82[0x2];
+ u8 key2_invalid[0x1];
+ u8 reserved_at_85[0x3];
+ u8 pd[0x18];
+
+ u8 key_purpose[0x5];
+ u8 reserved_at_a5[0x13];
+ u8 kek_id[0x8];
+
+ u8 reserved_at_c0[0x40];
+
+ u8 key1[0x8][0x20];
+
+ u8 key2[0x8][0x20];
+
+ u8 reserved_at_300[0x40];
+
+ u8 const1[0x1];
+ u8 reserved_at_341[0x1f];
+
+ u8 reserved_at_360[0x20];
+
+ u8 auth_tag[0x80];
+};
+
+struct mlx5_ifc_encryption_key_obj_bits {
+ u8 modify_field_select[0x40];
+
+ u8 state[0x8];
+ u8 sw_wrapped[0x1];
+ u8 reserved_at_49[0xb];
+ u8 key_size[0x4];
+ u8 reserved_at_58[0x4];
+ u8 key_purpose[0x4];
+
+ u8 reserved_at_60[0x8];
+ u8 pd[0x18];
+
+ u8 reserved_at_80[0x100];
+
+ u8 opaque[0x40];
+
+ u8 reserved_at_1c0[0x40];
+
+ u8 key[8][0x80];
+
+ u8 sw_wrapped_dek[8][0x80];
+
+ u8 reserved_at_a00[0x600];
+};
+
+struct mlx5_ifc_create_encryption_key_in_bits {
+ struct mlx5_ifc_general_obj_in_cmd_hdr_bits general_obj_in_cmd_hdr;
+ struct mlx5_ifc_encryption_key_obj_bits encryption_key_object;
+};
+
+struct mlx5_ifc_modify_encryption_key_in_bits {
+ struct mlx5_ifc_general_obj_in_cmd_hdr_bits general_obj_in_cmd_hdr;
+ struct mlx5_ifc_encryption_key_obj_bits encryption_key_object;
+};
+
+enum {
+ MLX5_FLOW_METER_MODE_BYTES_IP_LENGTH = 0x0,
+ MLX5_FLOW_METER_MODE_BYTES_CALC_WITH_L2 = 0x1,
+ MLX5_FLOW_METER_MODE_BYTES_CALC_WITH_L2_IPG = 0x2,
+ MLX5_FLOW_METER_MODE_NUM_PACKETS = 0x3,
+};
+
+struct mlx5_ifc_flow_meter_parameters_bits {
+ u8 valid[0x1];
+ u8 bucket_overflow[0x1];
+ u8 start_color[0x2];
+ u8 both_buckets_on_green[0x1];
+ u8 reserved_at_5[0x1];
+ u8 meter_mode[0x2];
+ u8 reserved_at_8[0x18];
+
+ u8 reserved_at_20[0x20];
+
+ u8 reserved_at_40[0x3];
+ u8 cbs_exponent[0x5];
+ u8 cbs_mantissa[0x8];
+ u8 reserved_at_50[0x3];
+ u8 cir_exponent[0x5];
+ u8 cir_mantissa[0x8];
+
+ u8 reserved_at_60[0x20];
+
+ u8 reserved_at_80[0x3];
+ u8 ebs_exponent[0x5];
+ u8 ebs_mantissa[0x8];
+ u8 reserved_at_90[0x3];
+ u8 eir_exponent[0x5];
+ u8 eir_mantissa[0x8];
+
+ u8 reserved_at_a0[0x60];
+};
+
+struct mlx5_ifc_flow_meter_aso_obj_bits {
+ u8 modify_field_select[0x40];
+
+ u8 reserved_at_40[0x40];
+
+ u8 reserved_at_80[0x8];
+ u8 meter_aso_access_pd[0x18];
+
+ u8 reserved_at_a0[0x160];
+
+ struct mlx5_ifc_flow_meter_parameters_bits flow_meter_parameters[2];
+};
+
+struct mlx5_ifc_create_flow_meter_aso_obj_in_bits {
+ struct mlx5_ifc_general_obj_in_cmd_hdr_bits hdr;
+ struct mlx5_ifc_flow_meter_aso_obj_bits flow_meter_aso_obj;
+};
+
+struct mlx5_ifc_int_kek_obj_bits {
+ u8 modify_field_select[0x40];
+
+ u8 state[0x8];
+ u8 auto_gen[0x1];
+ u8 reserved_at_49[0xb];
+ u8 key_size[0x4];
+ u8 reserved_at_58[0x8];
+
+ u8 reserved_at_60[0x8];
+ u8 pd[0x18];
+
+ u8 reserved_at_80[0x180];
+ u8 key[8][0x80];
+
+ u8 reserved_at_600[0x200];
+};
+
+struct mlx5_ifc_create_int_kek_obj_in_bits {
+ struct mlx5_ifc_general_obj_in_cmd_hdr_bits general_obj_in_cmd_hdr;
+ struct mlx5_ifc_int_kek_obj_bits int_kek_object;
+};
+
+struct mlx5_ifc_create_int_kek_obj_out_bits {
+ struct mlx5_ifc_general_obj_out_cmd_hdr_bits general_obj_out_cmd_hdr;
+ struct mlx5_ifc_int_kek_obj_bits int_kek_object;
+};
+
+struct mlx5_ifc_sampler_obj_bits {
+ u8 modify_field_select[0x40];
+
+ u8 table_type[0x8];
+ u8 level[0x8];
+ u8 reserved_at_50[0xf];
+ u8 ignore_flow_level[0x1];
+
+ u8 sample_ratio[0x20];
+
+ u8 reserved_at_80[0x8];
+ u8 sample_table_id[0x18];
+
+ u8 reserved_at_a0[0x8];
+ u8 default_table_id[0x18];
+
+ u8 sw_steering_icm_address_rx[0x40];
+ u8 sw_steering_icm_address_tx[0x40];
+
+ u8 reserved_at_140[0xa0];
+};
+
+struct mlx5_ifc_create_sampler_obj_in_bits {
+ struct mlx5_ifc_general_obj_in_cmd_hdr_bits general_obj_in_cmd_hdr;
+ struct mlx5_ifc_sampler_obj_bits sampler_object;
+};
+
+struct mlx5_ifc_query_sampler_obj_out_bits {
+ struct mlx5_ifc_general_obj_out_cmd_hdr_bits general_obj_out_cmd_hdr;
+ struct mlx5_ifc_sampler_obj_bits sampler_object;
+};
+
+enum {
+ MLX5_GENERAL_OBJECT_TYPE_ENCRYPTION_KEY_KEY_SIZE_128 = 0x0,
+ MLX5_GENERAL_OBJECT_TYPE_ENCRYPTION_KEY_KEY_SIZE_256 = 0x1,
+};
+
+enum {
+ MLX5_GENERAL_OBJECT_TYPE_ENCRYPTION_KEY_PURPOSE_TLS = 0x1,
+ MLX5_GENERAL_OBJECT_TYPE_ENCRYPTION_KEY_PURPOSE_IPSEC = 0x2,
+ MLX5_GENERAL_OBJECT_TYPE_ENCRYPTION_KEY_PURPOSE_MACSEC = 0x4,
+};
+
+struct mlx5_ifc_tls_static_params_bits {
+ u8 const_2[0x2];
+ u8 tls_version[0x4];
+ u8 const_1[0x2];
+ u8 reserved_at_8[0x14];
+ u8 encryption_standard[0x4];
+
+ u8 reserved_at_20[0x20];
+
+ u8 initial_record_number[0x40];
+
+ u8 resync_tcp_sn[0x20];
+
+ u8 gcm_iv[0x20];
+
+ u8 implicit_iv[0x40];
+
+ u8 reserved_at_100[0x8];
+ u8 dek_index[0x18];
+
+ u8 reserved_at_120[0xe0];
+};
+
+struct mlx5_ifc_tls_progress_params_bits {
+ u8 next_record_tcp_sn[0x20];
+
+ u8 hw_resync_tcp_sn[0x20];
+
+ u8 record_tracker_state[0x2];
+ u8 auth_state[0x2];
+ u8 reserved_at_44[0x4];
+ u8 hw_offset_record_number[0x18];
+};
+
+enum {
+ MLX5_MTT_PERM_READ = 1 << 0,
+ MLX5_MTT_PERM_WRITE = 1 << 1,
+ MLX5_MTT_PERM_RW = MLX5_MTT_PERM_READ | MLX5_MTT_PERM_WRITE,
+};
+
+enum {
+ MLX5_SUSPEND_VHCA_IN_OP_MOD_SUSPEND_INITIATOR = 0x0,
+ MLX5_SUSPEND_VHCA_IN_OP_MOD_SUSPEND_RESPONDER = 0x1,
+};
+
+struct mlx5_ifc_suspend_vhca_in_bits {
+ u8 opcode[0x10];
+ u8 uid[0x10];
+
+ u8 reserved_at_20[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_at_40[0x10];
+ u8 vhca_id[0x10];
+
+ u8 reserved_at_60[0x20];
+};
+
+struct mlx5_ifc_suspend_vhca_out_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_at_40[0x40];
+};
+
+enum {
+ MLX5_RESUME_VHCA_IN_OP_MOD_RESUME_RESPONDER = 0x0,
+ MLX5_RESUME_VHCA_IN_OP_MOD_RESUME_INITIATOR = 0x1,
+};
+
+struct mlx5_ifc_resume_vhca_in_bits {
+ u8 opcode[0x10];
+ u8 uid[0x10];
+
+ u8 reserved_at_20[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_at_40[0x10];
+ u8 vhca_id[0x10];
+
+ u8 reserved_at_60[0x20];
+};
+
+struct mlx5_ifc_resume_vhca_out_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_at_40[0x40];
+};
+
+struct mlx5_ifc_query_vhca_migration_state_in_bits {
+ u8 opcode[0x10];
+ u8 uid[0x10];
+
+ u8 reserved_at_20[0x10];
+ u8 op_mod[0x10];
+
+ u8 incremental[0x1];
+ u8 reserved_at_41[0xf];
+ u8 vhca_id[0x10];
+
+ u8 reserved_at_60[0x20];
+};
+
+struct mlx5_ifc_query_vhca_migration_state_out_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_at_40[0x40];
+
+ u8 required_umem_size[0x20];
+
+ u8 reserved_at_a0[0x160];
+};
+
+struct mlx5_ifc_save_vhca_state_in_bits {
+ u8 opcode[0x10];
+ u8 uid[0x10];
+
+ u8 reserved_at_20[0x10];
+ u8 op_mod[0x10];
+
+ u8 incremental[0x1];
+ u8 set_track[0x1];
+ u8 reserved_at_42[0xe];
+ u8 vhca_id[0x10];
+
+ u8 reserved_at_60[0x20];
+
+ u8 va[0x40];
+
+ u8 mkey[0x20];
+
+ u8 size[0x20];
+};
+
+struct mlx5_ifc_save_vhca_state_out_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 actual_image_size[0x20];
+
+ u8 reserved_at_60[0x20];
+};
+
+struct mlx5_ifc_load_vhca_state_in_bits {
+ u8 opcode[0x10];
+ u8 uid[0x10];
+
+ u8 reserved_at_20[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_at_40[0x10];
+ u8 vhca_id[0x10];
+
+ u8 reserved_at_60[0x20];
+
+ u8 va[0x40];
+
+ u8 mkey[0x20];
+
+ u8 size[0x20];
+};
+
+struct mlx5_ifc_load_vhca_state_out_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_at_40[0x40];
+};
+
+struct mlx5_ifc_adv_virtualization_cap_bits {
+ u8 reserved_at_0[0x3];
+ u8 pg_track_log_max_num[0x5];
+ u8 pg_track_max_num_range[0x8];
+ u8 pg_track_log_min_addr_space[0x8];
+ u8 pg_track_log_max_addr_space[0x8];
+
+ u8 reserved_at_20[0x3];
+ u8 pg_track_log_min_msg_size[0x5];
+ u8 reserved_at_28[0x3];
+ u8 pg_track_log_max_msg_size[0x5];
+ u8 reserved_at_30[0x3];
+ u8 pg_track_log_min_page_size[0x5];
+ u8 reserved_at_38[0x3];
+ u8 pg_track_log_max_page_size[0x5];
+
+ u8 reserved_at_40[0x7c0];
+};
+
+struct mlx5_ifc_page_track_report_entry_bits {
+ u8 dirty_address_high[0x20];
+
+ u8 dirty_address_low[0x20];
+};
+
+enum {
+ MLX5_PAGE_TRACK_STATE_TRACKING,
+ MLX5_PAGE_TRACK_STATE_REPORTING,
+ MLX5_PAGE_TRACK_STATE_ERROR,
+};
+
+struct mlx5_ifc_page_track_range_bits {
+ u8 start_address[0x40];
+
+ u8 length[0x40];
+};
+
+struct mlx5_ifc_page_track_bits {
+ u8 modify_field_select[0x40];
+
+ u8 reserved_at_40[0x10];
+ u8 vhca_id[0x10];
+
+ u8 reserved_at_60[0x20];
+
+ u8 state[0x4];
+ u8 track_type[0x4];
+ u8 log_addr_space_size[0x8];
+ u8 reserved_at_90[0x3];
+ u8 log_page_size[0x5];
+ u8 reserved_at_98[0x3];
+ u8 log_msg_size[0x5];
+
+ u8 reserved_at_a0[0x8];
+ u8 reporting_qpn[0x18];
+
+ u8 reserved_at_c0[0x18];
+ u8 num_ranges[0x8];
+
+ u8 reserved_at_e0[0x20];
+
+ u8 range_start_address[0x40];
+
+ u8 length[0x40];
+
+ struct mlx5_ifc_page_track_range_bits track_range[0];
+};
+
+struct mlx5_ifc_create_page_track_obj_in_bits {
+ struct mlx5_ifc_general_obj_in_cmd_hdr_bits general_obj_in_cmd_hdr;
+ struct mlx5_ifc_page_track_bits obj_context;
+};
+
+struct mlx5_ifc_modify_page_track_obj_in_bits {
+ struct mlx5_ifc_general_obj_in_cmd_hdr_bits general_obj_in_cmd_hdr;
+ struct mlx5_ifc_page_track_bits obj_context;
+};
+
#endif /* MLX5_IFC_H */
diff --git a/include/linux/mlx5/mlx5_ifc_fpga.h b/include/linux/mlx5/mlx5_ifc_fpga.h
index 255a88d08078..0596472923ad 100644
--- a/include/linux/mlx5/mlx5_ifc_fpga.h
+++ b/include/linux/mlx5/mlx5_ifc_fpga.h
@@ -32,14 +32,6 @@
#ifndef MLX5_IFC_FPGA_H
#define MLX5_IFC_FPGA_H
-enum {
- MLX5_FPGA_CAP_SANDBOX_VENDOR_ID_MLNX = 0x2c9,
-};
-
-enum {
- MLX5_FPGA_CAP_SANDBOX_PRODUCT_ID_IPSEC = 0x2,
-};
-
struct mlx5_ifc_fpga_shell_caps_bits {
u8 max_num_qps[0x10];
u8 reserved_at_10[0x8];
@@ -370,63 +362,20 @@ struct mlx5_ifc_fpga_destroy_qp_out_bits {
u8 reserved_at_40[0x40];
};
-struct mlx5_ifc_ipsec_extended_cap_bits {
- u8 encapsulation[0x20];
-
- u8 reserved_0[0x15];
- u8 ipv4_fragment[0x1];
- u8 ipv6[0x1];
- u8 esn[0x1];
- u8 lso[0x1];
- u8 transport_and_tunnel_mode[0x1];
- u8 tunnel_mode[0x1];
- u8 transport_mode[0x1];
- u8 ah_esp[0x1];
- u8 esp[0x1];
- u8 ah[0x1];
- u8 ipv4_options[0x1];
-
- u8 auth_alg[0x20];
-
- u8 enc_alg[0x20];
-
- u8 sa_cap[0x20];
-
- u8 reserved_1[0x10];
- u8 number_of_ipsec_counters[0x10];
-
- u8 ipsec_counters_addr_low[0x20];
- u8 ipsec_counters_addr_high[0x20];
+enum {
+ MLX5_FPGA_QP_ERROR_EVENT_SYNDROME_RETRY_COUNTER_EXPIRED = 0x1,
+ MLX5_FPGA_QP_ERROR_EVENT_SYNDROME_RNR_EXPIRED = 0x2,
};
-struct mlx5_ifc_ipsec_counters_bits {
- u8 dec_in_packets[0x40];
-
- u8 dec_out_packets[0x40];
-
- u8 dec_bypass_packets[0x40];
-
- u8 enc_in_packets[0x40];
-
- u8 enc_out_packets[0x40];
-
- u8 enc_bypass_packets[0x40];
-
- u8 drop_dec_packets[0x40];
-
- u8 failed_auth_dec_packets[0x40];
-
- u8 drop_enc_packets[0x40];
-
- u8 success_add_sa[0x40];
-
- u8 fail_add_sa[0x40];
+struct mlx5_ifc_fpga_qp_error_event_bits {
+ u8 reserved_at_0[0x40];
- u8 success_delete_sa[0x40];
+ u8 reserved_at_40[0x18];
+ u8 syndrome[0x8];
- u8 fail_delete_sa[0x40];
+ u8 reserved_at_60[0x60];
- u8 dropped_cmd[0x40];
+ u8 reserved_at_c0[0x8];
+ u8 fpga_qpn[0x18];
};
-
#endif /* MLX5_IFC_FPGA_H */
diff --git a/include/linux/mlx5/mlx5_ifc_vdpa.h b/include/linux/mlx5/mlx5_ifc_vdpa.h
new file mode 100644
index 000000000000..9becdc3fa503
--- /dev/null
+++ b/include/linux/mlx5/mlx5_ifc_vdpa.h
@@ -0,0 +1,215 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2020 Mellanox Technologies Ltd. */
+
+#ifndef __MLX5_IFC_VDPA_H_
+#define __MLX5_IFC_VDPA_H_
+
+enum {
+ MLX5_VIRTIO_Q_EVENT_MODE_NO_MSIX_MODE = 0x0,
+ MLX5_VIRTIO_Q_EVENT_MODE_QP_MODE = 0x1,
+ MLX5_VIRTIO_Q_EVENT_MODE_MSIX_MODE = 0x2,
+};
+
+enum {
+ MLX5_VIRTIO_EMULATION_VIRTIO_QUEUE_TYPE_SPLIT = 0,
+ MLX5_VIRTIO_EMULATION_VIRTIO_QUEUE_TYPE_PACKED = 1,
+};
+
+enum {
+ MLX5_VIRTIO_EMULATION_CAP_VIRTIO_QUEUE_TYPE_SPLIT =
+ BIT(MLX5_VIRTIO_EMULATION_VIRTIO_QUEUE_TYPE_SPLIT),
+ MLX5_VIRTIO_EMULATION_CAP_VIRTIO_QUEUE_TYPE_PACKED =
+ BIT(MLX5_VIRTIO_EMULATION_VIRTIO_QUEUE_TYPE_PACKED),
+};
+
+struct mlx5_ifc_virtio_q_bits {
+ u8 virtio_q_type[0x8];
+ u8 reserved_at_8[0x5];
+ u8 event_mode[0x3];
+ u8 queue_index[0x10];
+
+ u8 full_emulation[0x1];
+ u8 virtio_version_1_0[0x1];
+ u8 reserved_at_22[0x2];
+ u8 offload_type[0x4];
+ u8 event_qpn_or_msix[0x18];
+
+ u8 doorbell_stride_index[0x10];
+ u8 queue_size[0x10];
+
+ u8 device_emulation_id[0x20];
+
+ u8 desc_addr[0x40];
+
+ u8 used_addr[0x40];
+
+ u8 available_addr[0x40];
+
+ u8 virtio_q_mkey[0x20];
+
+ u8 max_tunnel_desc[0x10];
+ u8 reserved_at_170[0x8];
+ u8 error_type[0x8];
+
+ u8 umem_1_id[0x20];
+
+ u8 umem_1_size[0x20];
+
+ u8 umem_1_offset[0x40];
+
+ u8 umem_2_id[0x20];
+
+ u8 umem_2_size[0x20];
+
+ u8 umem_2_offset[0x40];
+
+ u8 umem_3_id[0x20];
+
+ u8 umem_3_size[0x20];
+
+ u8 umem_3_offset[0x40];
+
+ u8 counter_set_id[0x20];
+
+ u8 reserved_at_320[0x8];
+ u8 pd[0x18];
+
+ u8 reserved_at_340[0xc0];
+};
+
+struct mlx5_ifc_virtio_net_q_object_bits {
+ u8 modify_field_select[0x40];
+
+ u8 reserved_at_40[0x20];
+
+ u8 vhca_id[0x10];
+ u8 reserved_at_70[0x10];
+
+ u8 queue_feature_bit_mask_12_3[0xa];
+ u8 dirty_bitmap_dump_enable[0x1];
+ u8 vhost_log_page[0x5];
+ u8 reserved_at_90[0xc];
+ u8 state[0x4];
+
+ u8 reserved_at_a0[0x5];
+ u8 queue_feature_bit_mask_2_0[0x3];
+ u8 tisn_or_qpn[0x18];
+
+ u8 dirty_bitmap_mkey[0x20];
+
+ u8 dirty_bitmap_size[0x20];
+
+ u8 dirty_bitmap_addr[0x40];
+
+ u8 hw_available_index[0x10];
+ u8 hw_used_index[0x10];
+
+ u8 reserved_at_160[0xa0];
+
+ struct mlx5_ifc_virtio_q_bits virtio_q_context;
+};
+
+struct mlx5_ifc_create_virtio_net_q_in_bits {
+ struct mlx5_ifc_general_obj_in_cmd_hdr_bits general_obj_in_cmd_hdr;
+
+ struct mlx5_ifc_virtio_net_q_object_bits obj_context;
+};
+
+struct mlx5_ifc_create_virtio_net_q_out_bits {
+ struct mlx5_ifc_general_obj_out_cmd_hdr_bits general_obj_out_cmd_hdr;
+};
+
+struct mlx5_ifc_destroy_virtio_net_q_in_bits {
+ struct mlx5_ifc_general_obj_in_cmd_hdr_bits general_obj_out_cmd_hdr;
+};
+
+struct mlx5_ifc_destroy_virtio_net_q_out_bits {
+ struct mlx5_ifc_general_obj_out_cmd_hdr_bits general_obj_out_cmd_hdr;
+};
+
+struct mlx5_ifc_query_virtio_net_q_in_bits {
+ struct mlx5_ifc_general_obj_in_cmd_hdr_bits general_obj_in_cmd_hdr;
+};
+
+struct mlx5_ifc_query_virtio_net_q_out_bits {
+ struct mlx5_ifc_general_obj_out_cmd_hdr_bits general_obj_out_cmd_hdr;
+
+ struct mlx5_ifc_virtio_net_q_object_bits obj_context;
+};
+
+enum {
+ MLX5_VIRTQ_MODIFY_MASK_STATE = (u64)1 << 0,
+ MLX5_VIRTQ_MODIFY_MASK_DIRTY_BITMAP_PARAMS = (u64)1 << 3,
+ MLX5_VIRTQ_MODIFY_MASK_DIRTY_BITMAP_DUMP_ENABLE = (u64)1 << 4,
+};
+
+enum {
+ MLX5_VIRTIO_NET_Q_OBJECT_STATE_INIT = 0x0,
+ MLX5_VIRTIO_NET_Q_OBJECT_STATE_RDY = 0x1,
+ MLX5_VIRTIO_NET_Q_OBJECT_STATE_SUSPEND = 0x2,
+ MLX5_VIRTIO_NET_Q_OBJECT_STATE_ERR = 0x3,
+};
+
+/* This indicates that the object was not created or has already
+ * been desroyed. It is very safe to assume that this object will never
+ * have so many states
+ */
+enum {
+ MLX5_VIRTIO_NET_Q_OBJECT_NONE = 0xffffffff
+};
+
+enum {
+ MLX5_RQTC_LIST_Q_TYPE_RQ = 0x0,
+ MLX5_RQTC_LIST_Q_TYPE_VIRTIO_NET_Q = 0x1,
+};
+
+struct mlx5_ifc_modify_virtio_net_q_in_bits {
+ struct mlx5_ifc_general_obj_in_cmd_hdr_bits general_obj_in_cmd_hdr;
+
+ struct mlx5_ifc_virtio_net_q_object_bits obj_context;
+};
+
+struct mlx5_ifc_modify_virtio_net_q_out_bits {
+ struct mlx5_ifc_general_obj_out_cmd_hdr_bits general_obj_out_cmd_hdr;
+};
+
+struct mlx5_ifc_virtio_q_counters_bits {
+ u8 modify_field_select[0x40];
+ u8 reserved_at_40[0x40];
+ u8 received_desc[0x40];
+ u8 completed_desc[0x40];
+ u8 error_cqes[0x20];
+ u8 bad_desc_errors[0x20];
+ u8 exceed_max_chain[0x20];
+ u8 invalid_buffer[0x20];
+ u8 reserved_at_180[0x280];
+};
+
+struct mlx5_ifc_create_virtio_q_counters_in_bits {
+ struct mlx5_ifc_general_obj_in_cmd_hdr_bits hdr;
+ struct mlx5_ifc_virtio_q_counters_bits virtio_q_counters;
+};
+
+struct mlx5_ifc_create_virtio_q_counters_out_bits {
+ struct mlx5_ifc_general_obj_in_cmd_hdr_bits hdr;
+ struct mlx5_ifc_virtio_q_counters_bits virtio_q_counters;
+};
+
+struct mlx5_ifc_destroy_virtio_q_counters_in_bits {
+ struct mlx5_ifc_general_obj_in_cmd_hdr_bits hdr;
+};
+
+struct mlx5_ifc_destroy_virtio_q_counters_out_bits {
+ struct mlx5_ifc_general_obj_out_cmd_hdr_bits hdr;
+};
+
+struct mlx5_ifc_query_virtio_q_counters_in_bits {
+ struct mlx5_ifc_general_obj_in_cmd_hdr_bits hdr;
+};
+
+struct mlx5_ifc_query_virtio_q_counters_out_bits {
+ struct mlx5_ifc_general_obj_in_cmd_hdr_bits hdr;
+ struct mlx5_ifc_virtio_q_counters_bits counters;
+};
+
+#endif /* __MLX5_IFC_VDPA_H_ */
diff --git a/include/linux/mlx5/mpfs.h b/include/linux/mlx5/mpfs.h
new file mode 100644
index 000000000000..bf700c8d5516
--- /dev/null
+++ b/include/linux/mlx5/mpfs.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+ * Copyright (c) 2021 Mellanox Technologies Ltd.
+ */
+
+#ifndef _MLX5_MPFS_
+#define _MLX5_MPFS_
+
+struct mlx5_core_dev;
+
+#ifdef CONFIG_MLX5_MPFS
+int mlx5_mpfs_add_mac(struct mlx5_core_dev *dev, u8 *mac);
+int mlx5_mpfs_del_mac(struct mlx5_core_dev *dev, u8 *mac);
+#else /* #ifndef CONFIG_MLX5_MPFS */
+static inline int mlx5_mpfs_add_mac(struct mlx5_core_dev *dev, u8 *mac) { return 0; }
+static inline int mlx5_mpfs_del_mac(struct mlx5_core_dev *dev, u8 *mac) { return 0; }
+#endif
+
+#endif
diff --git a/include/linux/mlx5/port.h b/include/linux/mlx5/port.h
index c57d4b7de3a8..98b2e1e149f9 100644
--- a/include/linux/mlx5/port.h
+++ b/include/linux/mlx5/port.h
@@ -45,6 +45,7 @@ enum mlx5_module_id {
MLX5_MODULE_ID_QSFP = 0xC,
MLX5_MODULE_ID_QSFP_PLUS = 0xD,
MLX5_MODULE_ID_QSFP28 = 0x11,
+ MLX5_MODULE_ID_DSFP = 0x1B,
};
enum mlx5_an_status {
@@ -55,11 +56,19 @@ enum mlx5_an_status {
MLX5_AN_LINK_DOWN = 4,
};
-#define MLX5_EEPROM_MAX_BYTES 32
-#define MLX5_EEPROM_IDENTIFIER_BYTE_MASK 0x000000ff
#define MLX5_I2C_ADDR_LOW 0x50
#define MLX5_I2C_ADDR_HIGH 0x51
#define MLX5_EEPROM_PAGE_LENGTH 256
+#define MLX5_EEPROM_HIGH_PAGE_LENGTH 128
+
+struct mlx5_module_eeprom_query_params {
+ u16 size;
+ u16 offset;
+ u16 i2c_address;
+ u32 page;
+ u32 bank;
+ u32 module_number;
+};
enum mlx5e_link_mode {
MLX5E_1000BASE_CX_SGMII = 0,
@@ -92,6 +101,25 @@ enum mlx5e_link_mode {
MLX5E_LINK_MODES_NUMBER,
};
+enum mlx5e_ext_link_mode {
+ MLX5E_SGMII_100M = 0,
+ MLX5E_1000BASE_X_SGMII = 1,
+ MLX5E_5GBASE_R = 3,
+ MLX5E_10GBASE_XFI_XAUI_1 = 4,
+ MLX5E_40GBASE_XLAUI_4_XLPPI_4 = 5,
+ MLX5E_25GAUI_1_25GBASE_CR_KR = 6,
+ MLX5E_50GAUI_2_LAUI_2_50GBASE_CR2_KR2 = 7,
+ MLX5E_50GAUI_1_LAUI_1_50GBASE_CR_KR = 8,
+ MLX5E_CAUI_4_100GBASE_CR4_KR4 = 9,
+ MLX5E_100GAUI_2_100GBASE_CR2_KR2 = 10,
+ MLX5E_100GAUI_1_100GBASE_CR_KR = 11,
+ MLX5E_200GAUI_4_200GBASE_CR4_KR4 = 12,
+ MLX5E_200GAUI_2_200GBASE_CR2_KR2 = 13,
+ MLX5E_400GAUI_8 = 15,
+ MLX5E_400GAUI_4_400GBASE_CR4_KR4 = 16,
+ MLX5E_EXT_LINK_MODES_NUMBER,
+};
+
enum mlx5e_connector_type {
MLX5E_PORT_UNKNOWN = 0,
MLX5E_PORT_NONE = 1,
@@ -105,35 +133,37 @@ enum mlx5e_connector_type {
MLX5E_CONNECTOR_TYPE_NUMBER,
};
-#define MLX5E_PROT_MASK(link_mode) (1 << link_mode)
+enum mlx5_ptys_width {
+ MLX5_PTYS_WIDTH_1X = 1 << 0,
+ MLX5_PTYS_WIDTH_2X = 1 << 1,
+ MLX5_PTYS_WIDTH_4X = 1 << 2,
+ MLX5_PTYS_WIDTH_8X = 1 << 3,
+ MLX5_PTYS_WIDTH_12X = 1 << 4,
+};
+
+struct mlx5_port_eth_proto {
+ u32 cap;
+ u32 admin;
+ u32 oper;
+};
-#define PORT_MODULE_EVENT_MODULE_STATUS_MASK 0xF
-#define PORT_MODULE_EVENT_ERROR_TYPE_MASK 0xF
+#define MLX5E_PROT_MASK(link_mode) (1U << link_mode)
+#define MLX5_GET_ETH_PROTO(reg, out, ext, field) \
+ (ext ? MLX5_GET(reg, out, ext_##field) : \
+ MLX5_GET(reg, out, field))
int mlx5_set_port_caps(struct mlx5_core_dev *dev, u8 port_num, u32 caps);
int mlx5_query_port_ptys(struct mlx5_core_dev *dev, u32 *ptys,
int ptys_size, int proto_mask, u8 local_port);
-int mlx5_query_port_proto_cap(struct mlx5_core_dev *dev,
- u32 *proto_cap, int proto_mask);
-int mlx5_query_port_proto_admin(struct mlx5_core_dev *dev,
- u32 *proto_admin, int proto_mask);
-int mlx5_query_port_link_width_oper(struct mlx5_core_dev *dev,
- u8 *link_width_oper, u8 local_port);
-int mlx5_query_port_ib_proto_oper(struct mlx5_core_dev *dev,
- u8 *proto_oper, u8 local_port);
-int mlx5_query_port_eth_proto_oper(struct mlx5_core_dev *dev,
- u32 *proto_oper, u8 local_port);
-int mlx5_set_port_ptys(struct mlx5_core_dev *dev, bool an_disable,
- u32 proto_admin, int proto_mask);
+
+int mlx5_query_ib_port_oper(struct mlx5_core_dev *dev, u16 *link_width_oper,
+ u16 *proto_oper, u8 local_port);
void mlx5_toggle_port_link(struct mlx5_core_dev *dev);
int mlx5_set_port_admin_status(struct mlx5_core_dev *dev,
enum mlx5_port_status status);
int mlx5_query_port_admin_status(struct mlx5_core_dev *dev,
enum mlx5_port_status *status);
int mlx5_set_port_beacon(struct mlx5_core_dev *dev, u16 beacon_duration);
-void mlx5_query_port_autoneg(struct mlx5_core_dev *dev, int proto_mask,
- u8 *an_status,
- u8 *an_disable_cap, u8 *an_disable_admin);
int mlx5_set_port_mtu(struct mlx5_core_dev *dev, u16 mtu, u8 port);
void mlx5_query_port_max_mtu(struct mlx5_core_dev *dev, u16 *max_mtu, u8 port);
@@ -151,12 +181,20 @@ int mlx5_set_port_pfc(struct mlx5_core_dev *dev, u8 pfc_en_tx, u8 pfc_en_rx);
int mlx5_query_port_pfc(struct mlx5_core_dev *dev, u8 *pfc_en_tx,
u8 *pfc_en_rx);
+int mlx5_set_port_stall_watermark(struct mlx5_core_dev *dev,
+ u16 stall_critical_watermark,
+ u16 stall_minor_watermark);
+int mlx5_query_port_stall_watermark(struct mlx5_core_dev *dev,
+ u16 *stall_critical_watermark, u16 *stall_minor_watermark);
+
int mlx5_max_tc(struct mlx5_core_dev *mdev);
int mlx5_set_port_prio_tc(struct mlx5_core_dev *mdev, u8 *prio_tc);
int mlx5_query_port_prio_tc(struct mlx5_core_dev *mdev,
u8 prio, u8 *tc);
int mlx5_set_port_tc_group(struct mlx5_core_dev *mdev, u8 *tc_group);
+int mlx5_query_port_tc_group(struct mlx5_core_dev *mdev,
+ u8 tc, u8 *tc_group);
int mlx5_set_port_tc_bw_alloc(struct mlx5_core_dev *mdev, u8 *tc_bw);
int mlx5_query_port_tc_bw_alloc(struct mlx5_core_dev *mdev,
u8 tc, u8 *bw_pct);
@@ -169,12 +207,31 @@ int mlx5_query_port_ets_rate_limit(struct mlx5_core_dev *mdev,
int mlx5_set_port_wol(struct mlx5_core_dev *mdev, u8 wol_mode);
int mlx5_query_port_wol(struct mlx5_core_dev *mdev, u8 *wol_mode);
+int mlx5_query_ports_check(struct mlx5_core_dev *mdev, u32 *out, int outlen);
+int mlx5_set_ports_check(struct mlx5_core_dev *mdev, u32 *in, int inlen);
int mlx5_set_port_fcs(struct mlx5_core_dev *mdev, u8 enable);
void mlx5_query_port_fcs(struct mlx5_core_dev *mdev, bool *supported,
bool *enabled);
int mlx5_query_module_eeprom(struct mlx5_core_dev *dev,
u16 offset, u16 size, u8 *data);
+int mlx5_query_module_eeprom_by_page(struct mlx5_core_dev *dev,
+ struct mlx5_module_eeprom_query_params *params, u8 *data);
int mlx5_query_port_dcbx_param(struct mlx5_core_dev *mdev, u32 *out);
int mlx5_set_port_dcbx_param(struct mlx5_core_dev *mdev, u32 *in);
+
+int mlx5_set_trust_state(struct mlx5_core_dev *mdev, u8 trust_state);
+int mlx5_query_trust_state(struct mlx5_core_dev *mdev, u8 *trust_state);
+int mlx5_set_dscp2prio(struct mlx5_core_dev *mdev, u8 dscp, u8 prio);
+int mlx5_query_dscp2prio(struct mlx5_core_dev *mdev, u8 *dscp2prio);
+
+int mlx5_port_query_eth_proto(struct mlx5_core_dev *dev, u8 port, bool ext,
+ struct mlx5_port_eth_proto *eproto);
+bool mlx5_ptys_ext_supported(struct mlx5_core_dev *mdev);
+u32 mlx5_port_ptys2speed(struct mlx5_core_dev *mdev, u32 eth_proto_oper,
+ bool force_legacy);
+u32 mlx5_port_speed2linkmodes(struct mlx5_core_dev *mdev, u32 speed,
+ bool force_legacy);
+int mlx5_port_max_linkspeed(struct mlx5_core_dev *mdev, u32 *speed);
+
#endif /* __MLX5_PORT_H__ */
diff --git a/include/linux/mlx5/qp.h b/include/linux/mlx5/qp.h
index 6f41270d80c0..bd53cf4be7bd 100644
--- a/include/linux/mlx5/qp.h
+++ b/include/linux/mlx5/qp.h
@@ -36,8 +36,9 @@
#include <linux/mlx5/device.h>
#include <linux/mlx5/driver.h>
-#define MLX5_INVALID_LKEY 0x100
-#define MLX5_SIG_WQE_SIZE (MLX5_SEND_WQE_BB * 5)
+#define MLX5_TERMINATE_SCATTER_LIST_LKEY cpu_to_be32(0x100)
+/* UMR (3 WQE_BB's) + SIG (3 WQE_BB's) + PSV (mem) + PSV (wire) */
+#define MLX5_SIG_WQE_SIZE (MLX5_SEND_WQE_BB * 8)
#define MLX5_DIF_SIZE 8
#define MLX5_STRIDE_BLOCK_OP 0x400
#define MLX5_CPY_GRD_MASK 0xc0
@@ -65,11 +66,13 @@ enum mlx5_qp_optpar {
MLX5_QP_OPTPAR_RETRY_COUNT = 1 << 12,
MLX5_QP_OPTPAR_RNR_RETRY = 1 << 13,
MLX5_QP_OPTPAR_ACK_TIMEOUT = 1 << 14,
+ MLX5_QP_OPTPAR_LAG_TX_AFF = 1 << 15,
MLX5_QP_OPTPAR_PRI_PORT = 1 << 16,
MLX5_QP_OPTPAR_SRQN = 1 << 18,
MLX5_QP_OPTPAR_CQN_RCV = 1 << 19,
MLX5_QP_OPTPAR_DC_HS = 1 << 20,
MLX5_QP_OPTPAR_DC_KEY = 1 << 21,
+ MLX5_QP_OPTPAR_COUNTER_SET_ID = 1 << 25,
};
enum mlx5_qp_state {
@@ -159,6 +162,8 @@ enum {
MLX5_SEND_WQE_MAX_WQEBBS = 16,
};
+#define MLX5_SEND_WQE_MAX_SIZE (MLX5_SEND_WQE_MAX_WQEBBS * MLX5_SEND_WQE_BB)
+
enum {
MLX5_WQE_FMR_PERM_LOCAL_READ = 1 << 27,
MLX5_WQE_FMR_PERM_LOCAL_WRITE = 1 << 28,
@@ -199,10 +204,20 @@ struct mlx5_wqe_fmr_seg {
struct mlx5_wqe_ctrl_seg {
__be32 opmod_idx_opcode;
__be32 qpn_ds;
+
+ struct_group(trailer,
+
u8 signature;
u8 rsvd[2];
u8 fm_ce_se;
- __be32 imm;
+ union {
+ __be32 general_id;
+ __be32 imm;
+ __be32 umr_mkey;
+ __be32 tis_tir_num;
+ };
+
+ ); /* end of trailer group */
};
#define MLX5_WQE_CTRL_DS_MASK 0x3f
@@ -212,7 +227,6 @@ struct mlx5_wqe_ctrl_seg {
#define MLX5_WQE_CTRL_OPCODE_MASK 0xff
#define MLX5_WQE_CTRL_WQE_INDEX_MASK 0x00ffff00
#define MLX5_WQE_CTRL_WQE_INDEX_SHIFT 8
-#define MLX5_WQE_AV_EXT 0x80000000
enum {
MLX5_ETH_WQE_L3_INNER_CSUM = 1 << 4,
@@ -222,6 +236,12 @@ enum {
};
enum {
+ MLX5_ETH_WQE_SVLAN = 1 << 0,
+ MLX5_ETH_WQE_TRAILER_HDR_OUTER_IP_ASSOC = 1 << 26,
+ MLX5_ETH_WQE_TRAILER_HDR_OUTER_L4_ASSOC = 1 << 27,
+ MLX5_ETH_WQE_TRAILER_HDR_INNER_IP_ASSOC = 3 << 26,
+ MLX5_ETH_WQE_TRAILER_HDR_INNER_L4_ASSOC = 1 << 28,
+ MLX5_ETH_WQE_INSERT_TRAILER = 1 << 30,
MLX5_ETH_WQE_INSERT_VLAN = 1 << 15,
};
@@ -232,6 +252,11 @@ enum {
MLX5_ETH_WQE_SWP_OUTER_L4_UDP = 1 << 5,
};
+enum {
+ MLX5_ETH_WQE_FT_META_IPSEC = BIT(0),
+ MLX5_ETH_WQE_FT_META_MACSEC = BIT(1),
+};
+
struct mlx5_wqe_eth_seg {
u8 swp_outer_l4_offset;
u8 swp_outer_l3_offset;
@@ -240,7 +265,7 @@ struct mlx5_wqe_eth_seg {
u8 cs_flags;
u8 swp_flags;
__be16 mss;
- __be32 rsvd2;
+ __be32 flow_table_metadata;
union {
struct {
__be16 sz;
@@ -250,6 +275,7 @@ struct mlx5_wqe_eth_seg {
__be16 type;
__be16 vlan_tci;
} insert;
+ __be32 trailer;
};
};
@@ -308,6 +334,7 @@ struct mlx5_av {
struct mlx5_ib_ah {
struct ib_ah ibah;
struct mlx5_av av;
+ u8 xmit_port;
};
static inline struct mlx5_ib_ah *to_mah(struct ib_ah *ibah)
@@ -395,6 +422,7 @@ struct mlx5_wqe_signature_seg {
struct mlx5_wqe_inline_seg {
__be32 byte_count;
+ __be32 data[];
};
enum mlx5_sig_type {
@@ -450,6 +478,12 @@ struct mlx5_klm {
__be64 va;
};
+struct mlx5_ksm {
+ __be32 reserved;
+ __be32 key;
+ __be64 va;
+};
+
struct mlx5_stride_block_entry {
__be16 stride;
__be16 bcount;
@@ -465,120 +499,32 @@ struct mlx5_stride_block_ctrl_seg {
__be16 num_entries;
};
+struct mlx5_wqe_flow_update_ctrl_seg {
+ __be32 flow_idx_update;
+ __be32 dest_handle;
+ u8 reserved0[40];
+};
+
+struct mlx5_wqe_header_modify_argument_update_seg {
+ u8 argument_list[64];
+};
+
struct mlx5_core_qp {
struct mlx5_core_rsc_common common; /* must be first */
void (*event) (struct mlx5_core_qp *, int);
int qpn;
struct mlx5_rsc_debug *dbg;
int pid;
+ u16 uid;
};
-struct mlx5_qp_path {
- u8 fl_free_ar;
- u8 rsvd3;
- __be16 pkey_index;
- u8 rsvd0;
- u8 grh_mlid;
- __be16 rlid;
- u8 ackto_lt;
- u8 mgid_index;
- u8 static_rate;
- u8 hop_limit;
- __be32 tclass_flowlabel;
- union {
- u8 rgid[16];
- u8 rip[16];
- };
- u8 f_dscp_ecn_prio;
- u8 ecn_dscp;
- __be16 udp_sport;
- u8 dci_cfi_prio_sl;
- u8 port;
- u8 rmac[6];
+struct mlx5_core_dct {
+ struct mlx5_core_qp mqp;
+ struct completion drained;
};
-/* FIXME: use mlx5_ifc.h qpc */
-struct mlx5_qp_context {
- __be32 flags;
- __be32 flags_pd;
- u8 mtu_msgmax;
- u8 rq_size_stride;
- __be16 sq_crq_size;
- __be32 qp_counter_set_usr_page;
- __be32 wire_qpn;
- __be32 log_pg_sz_remote_qpn;
- struct mlx5_qp_path pri_path;
- struct mlx5_qp_path alt_path;
- __be32 params1;
- u8 reserved2[4];
- __be32 next_send_psn;
- __be32 cqn_send;
- __be32 deth_sqpn;
- u8 reserved3[4];
- __be32 last_acked_psn;
- __be32 ssn;
- __be32 params2;
- __be32 rnr_nextrecvpsn;
- __be32 xrcd;
- __be32 cqn_recv;
- __be64 db_rec_addr;
- __be32 qkey;
- __be32 rq_type_srqn;
- __be32 rmsn;
- __be16 hw_sq_wqe_counter;
- __be16 sw_sq_wqe_counter;
- __be16 hw_rcyclic_byte_counter;
- __be16 hw_rq_counter;
- __be16 sw_rcyclic_byte_counter;
- __be16 sw_rq_counter;
- u8 rsvd0[5];
- u8 cgs;
- u8 cs_req;
- u8 cs_res;
- __be64 dc_access_key;
- u8 rsvd1[24];
-};
-
-static inline struct mlx5_core_qp *__mlx5_qp_lookup(struct mlx5_core_dev *dev, u32 qpn)
-{
- return radix_tree_lookup(&dev->priv.qp_table.tree, qpn);
-}
-
-static inline struct mlx5_core_mkey *__mlx5_mr_lookup(struct mlx5_core_dev *dev, u32 key)
-{
- return radix_tree_lookup(&dev->priv.mkey_table.tree, key);
-}
-
-int mlx5_core_create_qp(struct mlx5_core_dev *dev,
- struct mlx5_core_qp *qp,
- u32 *in,
- int inlen);
-int mlx5_core_qp_modify(struct mlx5_core_dev *dev, u16 opcode,
- u32 opt_param_mask, void *qpc,
- struct mlx5_core_qp *qp);
-int mlx5_core_destroy_qp(struct mlx5_core_dev *dev,
- struct mlx5_core_qp *qp);
-int mlx5_core_qp_query(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp,
- u32 *out, int outlen);
-
-int mlx5_core_xrcd_alloc(struct mlx5_core_dev *dev, u32 *xrcdn);
-int mlx5_core_xrcd_dealloc(struct mlx5_core_dev *dev, u32 xrcdn);
-void mlx5_init_qp_table(struct mlx5_core_dev *dev);
-void mlx5_cleanup_qp_table(struct mlx5_core_dev *dev);
int mlx5_debug_qp_add(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp);
void mlx5_debug_qp_remove(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp);
-int mlx5_core_create_rq_tracked(struct mlx5_core_dev *dev, u32 *in, int inlen,
- struct mlx5_core_qp *rq);
-void mlx5_core_destroy_rq_tracked(struct mlx5_core_dev *dev,
- struct mlx5_core_qp *rq);
-int mlx5_core_create_sq_tracked(struct mlx5_core_dev *dev, u32 *in, int inlen,
- struct mlx5_core_qp *sq);
-void mlx5_core_destroy_sq_tracked(struct mlx5_core_dev *dev,
- struct mlx5_core_qp *sq);
-int mlx5_core_alloc_q_counter(struct mlx5_core_dev *dev, u16 *counter_id);
-int mlx5_core_dealloc_q_counter(struct mlx5_core_dev *dev, u16 counter_id);
-int mlx5_core_query_q_counter(struct mlx5_core_dev *dev, u16 counter_id,
- int reset, void *out, int out_size);
static inline const char *mlx5_qp_type_str(int type)
{
@@ -625,4 +571,11 @@ static inline const char *mlx5_qp_state_str(int state)
}
}
+static inline int mlx5_get_qp_default_ts(struct mlx5_core_dev *dev)
+{
+ return !MLX5_CAP_ROCE(dev, qp_ts_format) ?
+ MLX5_TIMESTAMP_FORMAT_FREE_RUNNING :
+ MLX5_TIMESTAMP_FORMAT_DEFAULT;
+}
+
#endif /* MLX5_QP_H */
diff --git a/include/linux/mlx5/rsc_dump.h b/include/linux/mlx5/rsc_dump.h
new file mode 100644
index 000000000000..d11c0b228620
--- /dev/null
+++ b/include/linux/mlx5/rsc_dump.h
@@ -0,0 +1,51 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2020 Mellanox Technologies inc. */
+
+#include <linux/mlx5/driver.h>
+
+#ifndef __MLX5_RSC_DUMP
+#define __MLX5_RSC_DUMP
+
+enum mlx5_sgmt_type {
+ MLX5_SGMT_TYPE_HW_CQPC,
+ MLX5_SGMT_TYPE_HW_SQPC,
+ MLX5_SGMT_TYPE_HW_RQPC,
+ MLX5_SGMT_TYPE_FULL_SRQC,
+ MLX5_SGMT_TYPE_FULL_CQC,
+ MLX5_SGMT_TYPE_FULL_EQC,
+ MLX5_SGMT_TYPE_FULL_QPC,
+ MLX5_SGMT_TYPE_SND_BUFF,
+ MLX5_SGMT_TYPE_RCV_BUFF,
+ MLX5_SGMT_TYPE_SRQ_BUFF,
+ MLX5_SGMT_TYPE_CQ_BUFF,
+ MLX5_SGMT_TYPE_EQ_BUFF,
+ MLX5_SGMT_TYPE_SX_SLICE,
+ MLX5_SGMT_TYPE_SX_SLICE_ALL,
+ MLX5_SGMT_TYPE_RDB,
+ MLX5_SGMT_TYPE_RX_SLICE_ALL,
+ MLX5_SGMT_TYPE_PRM_QUERY_QP,
+ MLX5_SGMT_TYPE_PRM_QUERY_CQ,
+ MLX5_SGMT_TYPE_PRM_QUERY_MKEY,
+ MLX5_SGMT_TYPE_MENU,
+ MLX5_SGMT_TYPE_TERMINATE,
+
+ MLX5_SGMT_TYPE_NUM, /* Keep last */
+};
+
+struct mlx5_rsc_key {
+ enum mlx5_sgmt_type rsc;
+ int index1;
+ int index2;
+ int num_of_obj1;
+ int num_of_obj2;
+ int size;
+};
+
+struct mlx5_rsc_dump_cmd;
+
+struct mlx5_rsc_dump_cmd *mlx5_rsc_dump_cmd_create(struct mlx5_core_dev *dev,
+ struct mlx5_rsc_key *key);
+void mlx5_rsc_dump_cmd_destroy(struct mlx5_rsc_dump_cmd *cmd);
+int mlx5_rsc_dump_next(struct mlx5_core_dev *dev, struct mlx5_rsc_dump_cmd *cmd,
+ struct page *page, int *size);
+#endif /* __MLX5_RSC_DUMP */
diff --git a/include/linux/mlx5/srq.h b/include/linux/mlx5/srq.h
deleted file mode 100644
index 1cde0fd53f90..000000000000
--- a/include/linux/mlx5/srq.h
+++ /dev/null
@@ -1,66 +0,0 @@
-/*
- * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#ifndef MLX5_SRQ_H
-#define MLX5_SRQ_H
-
-#include <linux/mlx5/driver.h>
-
-enum {
- MLX5_SRQ_FLAG_ERR = (1 << 0),
- MLX5_SRQ_FLAG_WQ_SIG = (1 << 1),
-};
-
-struct mlx5_srq_attr {
- u32 type;
- u32 flags;
- u32 log_size;
- u32 wqe_shift;
- u32 log_page_size;
- u32 wqe_cnt;
- u32 srqn;
- u32 xrcd;
- u32 page_offset;
- u32 cqn;
- u32 pd;
- u32 lwm;
- u32 user_index;
- u64 db_record;
- __be64 *pas;
-};
-
-struct mlx5_core_dev;
-
-void mlx5_init_srq_table(struct mlx5_core_dev *dev);
-void mlx5_cleanup_srq_table(struct mlx5_core_dev *dev);
-
-#endif /* MLX5_SRQ_H */
diff --git a/include/linux/mlx5/transobj.h b/include/linux/mlx5/transobj.h
index 88441f5ece25..60ffeb6b67ae 100644
--- a/include/linux/mlx5/transobj.h
+++ b/include/linux/mlx5/transobj.h
@@ -39,40 +39,51 @@ int mlx5_core_alloc_transport_domain(struct mlx5_core_dev *dev, u32 *tdn);
void mlx5_core_dealloc_transport_domain(struct mlx5_core_dev *dev, u32 tdn);
int mlx5_core_create_rq(struct mlx5_core_dev *dev, u32 *in, int inlen,
u32 *rqn);
-int mlx5_core_modify_rq(struct mlx5_core_dev *dev, u32 rqn, u32 *in, int inlen);
+int mlx5_core_modify_rq(struct mlx5_core_dev *dev, u32 rqn, u32 *in);
void mlx5_core_destroy_rq(struct mlx5_core_dev *dev, u32 rqn);
int mlx5_core_query_rq(struct mlx5_core_dev *dev, u32 rqn, u32 *out);
int mlx5_core_create_sq(struct mlx5_core_dev *dev, u32 *in, int inlen,
u32 *sqn);
-int mlx5_core_modify_sq(struct mlx5_core_dev *dev, u32 sqn, u32 *in, int inlen);
+int mlx5_core_modify_sq(struct mlx5_core_dev *dev, u32 sqn, u32 *in);
void mlx5_core_destroy_sq(struct mlx5_core_dev *dev, u32 sqn);
int mlx5_core_query_sq(struct mlx5_core_dev *dev, u32 sqn, u32 *out);
-int mlx5_core_create_tir(struct mlx5_core_dev *dev, u32 *in, int inlen,
- u32 *tirn);
-int mlx5_core_modify_tir(struct mlx5_core_dev *dev, u32 tirn, u32 *in,
- int inlen);
+int mlx5_core_query_sq_state(struct mlx5_core_dev *dev, u32 sqn, u8 *state);
+int mlx5_core_create_tir(struct mlx5_core_dev *dev, u32 *in, u32 *tirn);
+int mlx5_core_modify_tir(struct mlx5_core_dev *dev, u32 tirn, u32 *in);
void mlx5_core_destroy_tir(struct mlx5_core_dev *dev, u32 tirn);
-int mlx5_core_create_tis(struct mlx5_core_dev *dev, u32 *in, int inlen,
- u32 *tisn);
-int mlx5_core_modify_tis(struct mlx5_core_dev *dev, u32 tisn, u32 *in,
- int inlen);
+int mlx5_core_create_tis(struct mlx5_core_dev *dev, u32 *in, u32 *tisn);
+int mlx5_core_modify_tis(struct mlx5_core_dev *dev, u32 tisn, u32 *in);
void mlx5_core_destroy_tis(struct mlx5_core_dev *dev, u32 tisn);
-int mlx5_core_create_rmp(struct mlx5_core_dev *dev, u32 *in, int inlen,
- u32 *rmpn);
-int mlx5_core_modify_rmp(struct mlx5_core_dev *dev, u32 *in, int inlen);
-int mlx5_core_destroy_rmp(struct mlx5_core_dev *dev, u32 rmpn);
-int mlx5_core_query_rmp(struct mlx5_core_dev *dev, u32 rmpn, u32 *out);
-int mlx5_core_arm_rmp(struct mlx5_core_dev *dev, u32 rmpn, u16 lwm);
-int mlx5_core_create_xsrq(struct mlx5_core_dev *dev, u32 *in, int inlen,
- u32 *rmpn);
-int mlx5_core_destroy_xsrq(struct mlx5_core_dev *dev, u32 rmpn);
-int mlx5_core_query_xsrq(struct mlx5_core_dev *dev, u32 rmpn, u32 *out);
-int mlx5_core_arm_xsrq(struct mlx5_core_dev *dev, u32 rmpn, u16 lwm);
-
int mlx5_core_create_rqt(struct mlx5_core_dev *dev, u32 *in, int inlen,
u32 *rqtn);
int mlx5_core_modify_rqt(struct mlx5_core_dev *dev, u32 rqtn, u32 *in,
int inlen);
void mlx5_core_destroy_rqt(struct mlx5_core_dev *dev, u32 rqtn);
+struct mlx5_hairpin_params {
+ u8 log_data_size;
+ u8 log_num_packets;
+ u16 q_counter;
+ int num_channels;
+};
+
+struct mlx5_hairpin {
+ struct mlx5_core_dev *func_mdev;
+ struct mlx5_core_dev *peer_mdev;
+
+ int num_channels;
+
+ u32 *rqn;
+ u32 *sqn;
+
+ bool peer_gone;
+};
+
+struct mlx5_hairpin *
+mlx5_core_hairpin_create(struct mlx5_core_dev *func_mdev,
+ struct mlx5_core_dev *peer_mdev,
+ struct mlx5_hairpin_params *params);
+
+void mlx5_core_hairpin_destroy(struct mlx5_hairpin *pair);
+void mlx5_core_hairpin_clear_dead_peer(struct mlx5_hairpin *hp);
#endif /* __TRANSOBJ_H__ */
diff --git a/include/linux/mlx5/vport.h b/include/linux/mlx5/vport.h
index 656c70b65dd2..7f31432f44c2 100644
--- a/include/linux/mlx5/vport.h
+++ b/include/linux/mlx5/vport.h
@@ -36,33 +36,45 @@
#include <linux/mlx5/driver.h>
#include <linux/mlx5/device.h>
+#define MLX5_VPORT_MANAGER(mdev) \
+ (MLX5_CAP_GEN(mdev, vport_group_manager) && \
+ (MLX5_CAP_GEN(mdev, port_type) == MLX5_CAP_PORT_TYPE_ETH) && \
+ mlx5_core_is_pf(mdev))
+
enum {
MLX5_CAP_INLINE_MODE_L2,
MLX5_CAP_INLINE_MODE_VPORT_CONTEXT,
MLX5_CAP_INLINE_MODE_NOT_REQUIRED,
};
+/* Vport number for each function must keep unchanged */
+enum {
+ MLX5_VPORT_PF = 0x0,
+ MLX5_VPORT_FIRST_VF = 0x1,
+ MLX5_VPORT_ECPF = 0xfffe,
+ MLX5_VPORT_UPLINK = 0xffff
+};
+
u8 mlx5_query_vport_state(struct mlx5_core_dev *mdev, u8 opmod, u16 vport);
-u8 mlx5_query_vport_admin_state(struct mlx5_core_dev *mdev, u8 opmod,
- u16 vport);
int mlx5_modify_vport_admin_state(struct mlx5_core_dev *mdev, u8 opmod,
- u16 vport, u8 state);
+ u16 vport, u8 other_vport, u8 state);
int mlx5_query_nic_vport_mac_address(struct mlx5_core_dev *mdev,
- u16 vport, u8 *addr);
+ u16 vport, bool other, u8 *addr);
+int mlx5_query_mac_address(struct mlx5_core_dev *mdev, u8 *addr);
int mlx5_query_nic_vport_min_inline(struct mlx5_core_dev *mdev,
u16 vport, u8 *min_inline);
void mlx5_query_min_inline(struct mlx5_core_dev *mdev, u8 *min_inline);
int mlx5_modify_nic_vport_min_inline(struct mlx5_core_dev *mdev,
u16 vport, u8 min_inline);
int mlx5_modify_nic_vport_mac_address(struct mlx5_core_dev *dev,
- u16 vport, u8 *addr);
+ u16 vport, const u8 *addr);
int mlx5_query_nic_vport_mtu(struct mlx5_core_dev *mdev, u16 *mtu);
int mlx5_modify_nic_vport_mtu(struct mlx5_core_dev *mdev, u16 mtu);
int mlx5_query_nic_vport_system_image_guid(struct mlx5_core_dev *mdev,
u64 *system_image_guid);
int mlx5_query_nic_vport_node_guid(struct mlx5_core_dev *mdev, u64 *node_guid);
int mlx5_modify_nic_vport_node_guid(struct mlx5_core_dev *mdev,
- u32 vport, u64 node_guid);
+ u16 vport, u64 node_guid);
int mlx5_query_nic_vport_qkey_viol_cntr(struct mlx5_core_dev *mdev,
u16 *qkey_viol_cntr);
int mlx5_query_hca_vport_gid(struct mlx5_core_dev *dev, u8 other_vport,
@@ -80,7 +92,7 @@ int mlx5_query_hca_vport_system_image_guid(struct mlx5_core_dev *dev,
int mlx5_query_hca_vport_node_guid(struct mlx5_core_dev *dev,
u64 *node_guid);
int mlx5_query_nic_vport_mac_list(struct mlx5_core_dev *dev,
- u32 vport,
+ u16 vport,
enum mlx5_list_type list_type,
u8 addr_list[][ETH_ALEN],
int *list_size);
@@ -89,7 +101,7 @@ int mlx5_modify_nic_vport_mac_list(struct mlx5_core_dev *dev,
u8 addr_list[][ETH_ALEN],
int list_size);
int mlx5_query_nic_vport_promisc(struct mlx5_core_dev *mdev,
- u32 vport,
+ u16 vport,
int *promisc_uc,
int *promisc_mc,
int *promisc_all);
@@ -97,22 +109,29 @@ int mlx5_modify_nic_vport_promisc(struct mlx5_core_dev *mdev,
int promisc_uc,
int promisc_mc,
int promisc_all);
-int mlx5_query_nic_vport_vlans(struct mlx5_core_dev *dev,
- u32 vport,
- u16 vlans[],
- int *size);
int mlx5_modify_nic_vport_vlans(struct mlx5_core_dev *dev,
u16 vlans[],
int list_size);
int mlx5_nic_vport_enable_roce(struct mlx5_core_dev *mdev);
int mlx5_nic_vport_disable_roce(struct mlx5_core_dev *mdev);
+int mlx5_query_vport_down_stats(struct mlx5_core_dev *mdev, u16 vport,
+ u8 other_vport, u64 *rx_discard_vport_down,
+ u64 *tx_discard_vport_down);
int mlx5_core_query_vport_counter(struct mlx5_core_dev *dev, u8 other_vport,
- int vf, u8 port_num, void *out,
- size_t out_sz);
+ int vf, u8 port_num, void *out);
int mlx5_core_modify_hca_vport_context(struct mlx5_core_dev *dev,
u8 other_vport, u8 port_num,
int vf,
struct mlx5_hca_vport_context *req);
+int mlx5_nic_vport_update_local_lb(struct mlx5_core_dev *mdev, bool enable);
+int mlx5_nic_vport_query_local_lb(struct mlx5_core_dev *mdev, bool *status);
+
+int mlx5_nic_vport_affiliate_multiport(struct mlx5_core_dev *master_mdev,
+ struct mlx5_core_dev *port_mdev);
+int mlx5_nic_vport_unaffiliate_multiport(struct mlx5_core_dev *port_mdev);
+u64 mlx5_query_nic_system_image_guid(struct mlx5_core_dev *mdev);
+int mlx5_vport_get_other_func_cap(struct mlx5_core_dev *dev, u16 function_id, void *out,
+ u16 opmod);
#endif /* __MLX5_VPORT_H__ */
diff --git a/include/linux/mm-arch-hooks.h b/include/linux/mm-arch-hooks.h
deleted file mode 100644
index 4efc3f56e6df..000000000000
--- a/include/linux/mm-arch-hooks.h
+++ /dev/null
@@ -1,25 +0,0 @@
-/*
- * Generic mm no-op hooks.
- *
- * Copyright (C) 2015, IBM Corporation
- * Author: Laurent Dufour <ldufour@linux.vnet.ibm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-#ifndef _LINUX_MM_ARCH_HOOKS_H
-#define _LINUX_MM_ARCH_HOOKS_H
-
-#include <asm/mm-arch-hooks.h>
-
-#ifndef arch_remap
-static inline void arch_remap(struct mm_struct *mm,
- unsigned long old_start, unsigned long old_end,
- unsigned long new_start, unsigned long new_end)
-{
-}
-#define arch_remap arch_remap
-#endif
-
-#endif /* _LINUX_MM_ARCH_HOOKS_H */
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 46b9ac5e8569..27ce77080c79 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1,10 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_MM_H
#define _LINUX_MM_H
#include <linux/errno.h>
-
-#ifdef __KERNEL__
-
#include <linux/mmdebug.h>
#include <linux/gfp.h>
#include <linux/bug.h>
@@ -14,6 +12,7 @@
#include <linux/atomic.h>
#include <linux/debug_locks.h>
#include <linux/mm_types.h>
+#include <linux/mmap_lock.h>
#include <linux/range.h>
#include <linux/pfn.h>
#include <linux/percpu-refcount.h>
@@ -22,19 +21,28 @@
#include <linux/resource.h>
#include <linux/page_ext.h>
#include <linux/err.h>
+#include <linux/page-flags.h>
#include <linux/page_ref.h>
+#include <linux/overflow.h>
+#include <linux/sizes.h>
+#include <linux/sched.h>
+#include <linux/pgtable.h>
+#include <linux/kasan.h>
+#include <linux/memremap.h>
+#include <linux/slab.h>
struct mempolicy;
struct anon_vma;
struct anon_vma_chain;
-struct file_ra_state;
struct user_struct;
-struct writeback_control;
-struct bdi_writeback;
+struct pt_regs;
+
+extern int sysctl_page_lock_unfairness;
+void mm_core_init(void);
void init_mm_internals(void);
-#ifndef CONFIG_NEED_MULTIPLE_NODES /* Don't use mapnrs, do it properly */
+#ifndef CONFIG_NUMA /* Don't use mapnrs, do it properly */
extern unsigned long max_mapnr;
static inline void set_max_mapnr(unsigned long limit)
@@ -45,9 +53,30 @@ static inline void set_max_mapnr(unsigned long limit)
static inline void set_max_mapnr(unsigned long limit) { }
#endif
-extern unsigned long totalram_pages;
+extern atomic_long_t _totalram_pages;
+static inline unsigned long totalram_pages(void)
+{
+ return (unsigned long)atomic_long_read(&_totalram_pages);
+}
+
+static inline void totalram_pages_inc(void)
+{
+ atomic_long_inc(&_totalram_pages);
+}
+
+static inline void totalram_pages_dec(void)
+{
+ atomic_long_dec(&_totalram_pages);
+}
+
+static inline void totalram_pages_add(long count)
+{
+ atomic_long_add(count, &_totalram_pages);
+}
+
extern void * high_memory;
extern int page_cluster;
+extern const int page_cluster_max;
#ifdef CONFIG_SYSCTL
extern int sysctl_legacy_va_layout;
@@ -67,7 +96,6 @@ extern int mmap_rnd_compat_bits __read_mostly;
#endif
#include <asm/page.h>
-#include <asm/pgtable.h>
#include <asm/processor.h>
#ifndef __pa_symbol
@@ -94,6 +122,59 @@ extern int mmap_rnd_compat_bits __read_mostly;
#endif
/*
+ * On some architectures it is expensive to call memset() for small sizes.
+ * If an architecture decides to implement their own version of
+ * mm_zero_struct_page they should wrap the defines below in a #ifndef and
+ * define their own version of this macro in <asm/pgtable.h>
+ */
+#if BITS_PER_LONG == 64
+/* This function must be updated when the size of struct page grows above 96
+ * or reduces below 56. The idea that compiler optimizes out switch()
+ * statement, and only leaves move/store instructions. Also the compiler can
+ * combine write statements if they are both assignments and can be reordered,
+ * this can result in several of the writes here being dropped.
+ */
+#define mm_zero_struct_page(pp) __mm_zero_struct_page(pp)
+static inline void __mm_zero_struct_page(struct page *page)
+{
+ unsigned long *_pp = (void *)page;
+
+ /* Check that struct page is either 56, 64, 72, 80, 88 or 96 bytes */
+ BUILD_BUG_ON(sizeof(struct page) & 7);
+ BUILD_BUG_ON(sizeof(struct page) < 56);
+ BUILD_BUG_ON(sizeof(struct page) > 96);
+
+ switch (sizeof(struct page)) {
+ case 96:
+ _pp[11] = 0;
+ fallthrough;
+ case 88:
+ _pp[10] = 0;
+ fallthrough;
+ case 80:
+ _pp[9] = 0;
+ fallthrough;
+ case 72:
+ _pp[8] = 0;
+ fallthrough;
+ case 64:
+ _pp[7] = 0;
+ fallthrough;
+ case 56:
+ _pp[6] = 0;
+ _pp[5] = 0;
+ _pp[4] = 0;
+ _pp[3] = 0;
+ _pp[2] = 0;
+ _pp[1] = 0;
+ _pp[0] = 0;
+ }
+}
+#else
+#define mm_zero_struct_page(pp) ((void)memset((pp), 0, sizeof(struct page)))
+#endif
+
+/*
* Default maximum number of active map areas, this limits the number of vmas
* per mm struct. Users can overwrite this number by sysctl but there is a
* problem.
@@ -121,19 +202,39 @@ extern int sysctl_overcommit_memory;
extern int sysctl_overcommit_ratio;
extern unsigned long sysctl_overcommit_kbytes;
-extern int overcommit_ratio_handler(struct ctl_table *, int, void __user *,
- size_t *, loff_t *);
-extern int overcommit_kbytes_handler(struct ctl_table *, int, void __user *,
- size_t *, loff_t *);
+int overcommit_ratio_handler(struct ctl_table *, int, void *, size_t *,
+ loff_t *);
+int overcommit_kbytes_handler(struct ctl_table *, int, void *, size_t *,
+ loff_t *);
+int overcommit_policy_handler(struct ctl_table *, int, void *, size_t *,
+ loff_t *);
+#if defined(CONFIG_SPARSEMEM) && !defined(CONFIG_SPARSEMEM_VMEMMAP)
#define nth_page(page,n) pfn_to_page(page_to_pfn((page)) + (n))
+#define folio_page_idx(folio, p) (page_to_pfn(p) - folio_pfn(folio))
+#else
+#define nth_page(page,n) ((page) + (n))
+#define folio_page_idx(folio, p) ((p) - &(folio)->page)
+#endif
/* to align the pointer to the (next) page boundary */
#define PAGE_ALIGN(addr) ALIGN(addr, PAGE_SIZE)
+/* to align the pointer to the (prev) page boundary */
+#define PAGE_ALIGN_DOWN(addr) ALIGN_DOWN(addr, PAGE_SIZE)
+
/* test whether an address (unsigned long or pointer) is aligned to PAGE_SIZE */
#define PAGE_ALIGNED(addr) IS_ALIGNED((unsigned long)(addr), PAGE_SIZE)
+#define lru_to_page(head) (list_entry((head)->prev, struct page, lru))
+static inline struct folio *lru_to_folio(struct list_head *head)
+{
+ return list_entry((head)->prev, struct folio, lru);
+}
+
+void setup_initial_init_mm(void *start_code, void *end_code,
+ void *end_data, void *brk);
+
/*
* Linux kernel virtual memory manager primitives.
* The idea being to have a "virtual" mm in the same way
@@ -143,7 +244,11 @@ extern int overcommit_kbytes_handler(struct ctl_table *, int, void __user *,
* mmap() functions).
*/
-extern struct kmem_cache *vm_area_cachep;
+struct vm_area_struct *vm_area_alloc(struct mm_struct *);
+struct vm_area_struct *vm_area_dup(struct vm_area_struct *);
+void vm_area_free(struct vm_area_struct *);
+/* Use only if VMA has no other users */
+void __vm_area_free(struct vm_area_struct *vma);
#ifndef CONFIG_MMU
extern struct rb_root nommu_region_tree;
@@ -170,9 +275,13 @@ extern unsigned int kobjsize(const void *objp);
#define VM_MAYSHARE 0x00000080
#define VM_GROWSDOWN 0x00000100 /* general info on the segment */
+#ifdef CONFIG_MMU
#define VM_UFFD_MISSING 0x00000200 /* missing pages tracking */
+#else /* CONFIG_MMU */
+#define VM_MAYOVERLAY 0x00000200 /* nommu: R/O MAP_PRIVATE mapping that might overlay a file mapping */
+#define VM_UFFD_MISSING 0
+#endif /* CONFIG_MMU */
#define VM_PFNMAP 0x00000400 /* Page-ranges managed without "struct page", just pure PFN */
-#define VM_DENYWRITE 0x00000800 /* ETXTBSY on write attempts.. */
#define VM_UFFD_WP 0x00001000 /* wrprotect pages tracking */
#define VM_LOCKED 0x00002000
@@ -188,8 +297,9 @@ extern unsigned int kobjsize(const void *objp);
#define VM_ACCOUNT 0x00100000 /* Is a VM accounted object */
#define VM_NORESERVE 0x00200000 /* should the VM suppress accounting */
#define VM_HUGETLB 0x00400000 /* Huge TLB Page VM */
+#define VM_SYNC 0x00800000 /* Synchronous page faults */
#define VM_ARCH_1 0x01000000 /* Architecture-specific flag */
-#define VM_ARCH_2 0x02000000
+#define VM_WIPEONFORK 0x02000000 /* Wipe VMA contents in child. */
#define VM_DONTDUMP 0x04000000 /* Do not include in the core dump */
#ifdef CONFIG_MEM_SOFT_DIRTY
@@ -208,45 +318,81 @@ extern unsigned int kobjsize(const void *objp);
#define VM_HIGH_ARCH_BIT_1 33 /* bit only usable on 64-bit architectures */
#define VM_HIGH_ARCH_BIT_2 34 /* bit only usable on 64-bit architectures */
#define VM_HIGH_ARCH_BIT_3 35 /* bit only usable on 64-bit architectures */
+#define VM_HIGH_ARCH_BIT_4 36 /* bit only usable on 64-bit architectures */
#define VM_HIGH_ARCH_0 BIT(VM_HIGH_ARCH_BIT_0)
#define VM_HIGH_ARCH_1 BIT(VM_HIGH_ARCH_BIT_1)
#define VM_HIGH_ARCH_2 BIT(VM_HIGH_ARCH_BIT_2)
#define VM_HIGH_ARCH_3 BIT(VM_HIGH_ARCH_BIT_3)
+#define VM_HIGH_ARCH_4 BIT(VM_HIGH_ARCH_BIT_4)
#endif /* CONFIG_ARCH_USES_HIGH_VMA_FLAGS */
-#if defined(CONFIG_X86)
-# define VM_PAT VM_ARCH_1 /* PAT reserves whole VMA at once (x86) */
-#if defined (CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS)
+#ifdef CONFIG_ARCH_HAS_PKEYS
# define VM_PKEY_SHIFT VM_HIGH_ARCH_BIT_0
# define VM_PKEY_BIT0 VM_HIGH_ARCH_0 /* A protection key is a 4-bit value */
-# define VM_PKEY_BIT1 VM_HIGH_ARCH_1
+# define VM_PKEY_BIT1 VM_HIGH_ARCH_1 /* on x86 and 5-bit value on ppc64 */
# define VM_PKEY_BIT2 VM_HIGH_ARCH_2
# define VM_PKEY_BIT3 VM_HIGH_ARCH_3
+#ifdef CONFIG_PPC
+# define VM_PKEY_BIT4 VM_HIGH_ARCH_4
+#else
+# define VM_PKEY_BIT4 0
#endif
+#endif /* CONFIG_ARCH_HAS_PKEYS */
+
+#if defined(CONFIG_X86)
+# define VM_PAT VM_ARCH_1 /* PAT reserves whole VMA at once (x86) */
#elif defined(CONFIG_PPC)
# define VM_SAO VM_ARCH_1 /* Strong Access Ordering (powerpc) */
#elif defined(CONFIG_PARISC)
# define VM_GROWSUP VM_ARCH_1
-#elif defined(CONFIG_METAG)
-# define VM_GROWSUP VM_ARCH_1
#elif defined(CONFIG_IA64)
# define VM_GROWSUP VM_ARCH_1
+#elif defined(CONFIG_SPARC64)
+# define VM_SPARC_ADI VM_ARCH_1 /* Uses ADI tag for access control */
+# define VM_ARCH_CLEAR VM_SPARC_ADI
+#elif defined(CONFIG_ARM64)
+# define VM_ARM64_BTI VM_ARCH_1 /* BTI guarded page, a.k.a. GP bit */
+# define VM_ARCH_CLEAR VM_ARM64_BTI
#elif !defined(CONFIG_MMU)
# define VM_MAPPED_COPY VM_ARCH_1 /* T if mapped copy of data (nommu mmap) */
#endif
-#if defined(CONFIG_X86)
-/* MPX specific bounds table or bounds directory */
-# define VM_MPX VM_ARCH_2
+#if defined(CONFIG_ARM64_MTE)
+# define VM_MTE VM_HIGH_ARCH_0 /* Use Tagged memory for access control */
+# define VM_MTE_ALLOWED VM_HIGH_ARCH_1 /* Tagged memory permitted */
+#else
+# define VM_MTE VM_NONE
+# define VM_MTE_ALLOWED VM_NONE
#endif
#ifndef VM_GROWSUP
# define VM_GROWSUP VM_NONE
#endif
+#ifdef CONFIG_HAVE_ARCH_USERFAULTFD_MINOR
+# define VM_UFFD_MINOR_BIT 37
+# define VM_UFFD_MINOR BIT(VM_UFFD_MINOR_BIT) /* UFFD minor faults */
+#else /* !CONFIG_HAVE_ARCH_USERFAULTFD_MINOR */
+# define VM_UFFD_MINOR VM_NONE
+#endif /* CONFIG_HAVE_ARCH_USERFAULTFD_MINOR */
+
/* Bits set in the VMA until the stack is in its final location */
#define VM_STACK_INCOMPLETE_SETUP (VM_RAND_READ | VM_SEQ_READ)
+#define TASK_EXEC ((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0)
+
+/* Common data flag combinations */
+#define VM_DATA_FLAGS_TSK_EXEC (VM_READ | VM_WRITE | TASK_EXEC | \
+ VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
+#define VM_DATA_FLAGS_NON_EXEC (VM_READ | VM_WRITE | VM_MAYREAD | \
+ VM_MAYWRITE | VM_MAYEXEC)
+#define VM_DATA_FLAGS_EXEC (VM_READ | VM_WRITE | VM_EXEC | \
+ VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
+
+#ifndef VM_DATA_DEFAULT_FLAGS /* arch can override this */
+#define VM_DATA_DEFAULT_FLAGS VM_DATA_FLAGS_EXEC
+#endif
+
#ifndef VM_STACK_DEFAULT_FLAGS /* arch can override this */
#define VM_STACK_DEFAULT_FLAGS VM_DATA_DEFAULT_FLAGS
#endif
@@ -259,33 +405,61 @@ extern unsigned int kobjsize(const void *objp);
#define VM_STACK_FLAGS (VM_STACK | VM_STACK_DEFAULT_FLAGS | VM_ACCOUNT)
+/* VMA basic access permission flags */
+#define VM_ACCESS_FLAGS (VM_READ | VM_WRITE | VM_EXEC)
+
+
/*
* Special vmas that are non-mergable, non-mlock()able.
- * Note: mm/huge_memory.c VM_NO_THP depends on this definition.
*/
#define VM_SPECIAL (VM_IO | VM_DONTEXPAND | VM_PFNMAP | VM_MIXEDMAP)
+/* This mask prevents VMA from being scanned with khugepaged */
+#define VM_NO_KHUGEPAGED (VM_SPECIAL | VM_HUGETLB)
+
/* This mask defines which mm->def_flags a process can inherit its parent */
#define VM_INIT_DEF_MASK VM_NOHUGEPAGE
-/* This mask is used to clear all the VMA flags used by mlock */
-#define VM_LOCKED_CLEAR_MASK (~(VM_LOCKED | VM_LOCKONFAULT))
+/* This mask represents all the VMA flag bits used by mlock */
+#define VM_LOCKED_MASK (VM_LOCKED | VM_LOCKONFAULT)
+
+/* Arch-specific flags to clear when updating VM flags on protection change */
+#ifndef VM_ARCH_CLEAR
+# define VM_ARCH_CLEAR VM_NONE
+#endif
+#define VM_FLAGS_CLEAR (ARCH_VM_PKEY_FLAGS | VM_ARCH_CLEAR)
/*
* mapping from the currently active vm_flags protection bits (the
* low four bits) to a page protection mask..
*/
-extern pgprot_t protection_map[16];
-#define FAULT_FLAG_WRITE 0x01 /* Fault was a write access */
-#define FAULT_FLAG_MKWRITE 0x02 /* Fault was mkwrite of existing pte */
-#define FAULT_FLAG_ALLOW_RETRY 0x04 /* Retry fault if blocking */
-#define FAULT_FLAG_RETRY_NOWAIT 0x08 /* Don't drop mmap_sem and wait when retrying */
-#define FAULT_FLAG_KILLABLE 0x10 /* The fault task is in SIGKILL killable region */
-#define FAULT_FLAG_TRIED 0x20 /* Second try */
-#define FAULT_FLAG_USER 0x40 /* The fault originated in userspace */
-#define FAULT_FLAG_REMOTE 0x80 /* faulting for non current tsk/mm */
-#define FAULT_FLAG_INSTRUCTION 0x100 /* The fault was during an instruction fetch */
+/*
+ * The default fault flags that should be used by most of the
+ * arch-specific page fault handlers.
+ */
+#define FAULT_FLAG_DEFAULT (FAULT_FLAG_ALLOW_RETRY | \
+ FAULT_FLAG_KILLABLE | \
+ FAULT_FLAG_INTERRUPTIBLE)
+
+/**
+ * fault_flag_allow_retry_first - check ALLOW_RETRY the first time
+ * @flags: Fault flags.
+ *
+ * This is mostly used for places where we want to try to avoid taking
+ * the mmap_lock for too long a time when waiting for another condition
+ * to change, in which case we can try to be polite to release the
+ * mmap_lock in the first round to avoid potential starvation of other
+ * processes that would also want the mmap_lock.
+ *
+ * Return: true if the page fault allows retry and this is the first
+ * attempt of the fault handling; false otherwise.
+ */
+static inline bool fault_flag_allow_retry_first(enum fault_flag flags)
+{
+ return (flags & FAULT_FLAG_ALLOW_RETRY) &&
+ (!(flags & FAULT_FLAG_TRIED));
+}
#define FAULT_FLAG_TRACE \
{ FAULT_FLAG_WRITE, "WRITE" }, \
@@ -296,10 +470,12 @@ extern pgprot_t protection_map[16];
{ FAULT_FLAG_TRIED, "TRIED" }, \
{ FAULT_FLAG_USER, "USER" }, \
{ FAULT_FLAG_REMOTE, "REMOTE" }, \
- { FAULT_FLAG_INSTRUCTION, "INSTRUCTION" }
+ { FAULT_FLAG_INSTRUCTION, "INSTRUCTION" }, \
+ { FAULT_FLAG_INTERRUPTIBLE, "INTERRUPTIBLE" }, \
+ { FAULT_FLAG_VMA_LOCK, "VMA_LOCK" }
/*
- * vm_fault is filled by the the pagefault handler and passed to the vma's
+ * vm_fault is filled by the pagefault handler and passed to the vma's
* ->fault function. The vma's ->fault is responsible for returning a bitmask
* of VM_FAULT_xxx flags that give details about how the fault was handled.
*
@@ -309,20 +485,28 @@ extern pgprot_t protection_map[16];
* pgoff should be used in favour of virtual_address, if possible.
*/
struct vm_fault {
- struct vm_area_struct *vma; /* Target VMA */
- unsigned int flags; /* FAULT_FLAG_xxx flags */
- gfp_t gfp_mask; /* gfp mask to be used for allocations */
- pgoff_t pgoff; /* Logical page offset based on vma */
- unsigned long address; /* Faulting virtual address */
+ const struct {
+ struct vm_area_struct *vma; /* Target VMA */
+ gfp_t gfp_mask; /* gfp mask to be used for allocations */
+ pgoff_t pgoff; /* Logical page offset based on vma */
+ unsigned long address; /* Faulting virtual address - masked */
+ unsigned long real_address; /* Faulting virtual address - unmasked */
+ };
+ enum fault_flag flags; /* FAULT_FLAG_xxx flags
+ * XXX: should really be 'const' */
pmd_t *pmd; /* Pointer to pmd entry matching
* the 'address' */
pud_t *pud; /* Pointer to pud entry matching
* the 'address'
*/
- pte_t orig_pte; /* Value of PTE at the time of fault */
+ union {
+ pte_t orig_pte; /* Value of PTE at the time of fault */
+ pmd_t orig_pmd; /* Value of PMD at the time of fault,
+ * used by PMD fault only.
+ */
+ };
struct page *cow_page; /* Page handler may use for COW fault */
- struct mem_cgroup *memcg; /* Cgroup cow_page belongs to */
struct page *page; /* ->fault handlers should return a
* page here, unless VM_FAULT_NOPAGE
* is set (which is also implied by
@@ -338,8 +522,8 @@ struct vm_fault {
* is not NULL, otherwise pmd.
*/
pgtable_t prealloc_pte; /* Pre-allocated pte page table.
- * vm_ops->map_pages() calls
- * alloc_set_pte() from atomic context.
+ * vm_ops->map_pages() sets up a page
+ * table from atomic context.
* do_fault_around() pre-allocates
* page table to avoid allocation from
* atomic context.
@@ -356,26 +540,42 @@ enum page_entry_size {
/*
* These are the virtual MM functions - opening of an area, closing and
* unmapping it (needed to keep files on disk up-to-date etc), pointer
- * to the functions called when a no-page or a wp-page exception occurs.
+ * to the functions called when a no-page or a wp-page exception occurs.
*/
struct vm_operations_struct {
void (*open)(struct vm_area_struct * area);
+ /**
+ * @close: Called when the VMA is being removed from the MM.
+ * Context: User context. May sleep. Caller holds mmap_lock.
+ */
void (*close)(struct vm_area_struct * area);
- int (*mremap)(struct vm_area_struct * area);
- int (*fault)(struct vm_fault *vmf);
- int (*huge_fault)(struct vm_fault *vmf, enum page_entry_size pe_size);
- void (*map_pages)(struct vm_fault *vmf,
+ /* Called any time before splitting to check if it's allowed */
+ int (*may_split)(struct vm_area_struct *area, unsigned long addr);
+ int (*mremap)(struct vm_area_struct *area);
+ /*
+ * Called by mprotect() to make driver-specific permission
+ * checks before mprotect() is finalised. The VMA must not
+ * be modified. Returns 0 if mprotect() can proceed.
+ */
+ int (*mprotect)(struct vm_area_struct *vma, unsigned long start,
+ unsigned long end, unsigned long newflags);
+ vm_fault_t (*fault)(struct vm_fault *vmf);
+ vm_fault_t (*huge_fault)(struct vm_fault *vmf,
+ enum page_entry_size pe_size);
+ vm_fault_t (*map_pages)(struct vm_fault *vmf,
pgoff_t start_pgoff, pgoff_t end_pgoff);
+ unsigned long (*pagesize)(struct vm_area_struct * area);
/* notification that a previously read-only page is about to become
* writable, if an error is returned it will cause a SIGBUS */
- int (*page_mkwrite)(struct vm_fault *vmf);
+ vm_fault_t (*page_mkwrite)(struct vm_fault *vmf);
/* same as page_mkwrite when using VM_PFNMAP|VM_MIXEDMAP */
- int (*pfn_mkwrite)(struct vm_fault *vmf);
+ vm_fault_t (*pfn_mkwrite)(struct vm_fault *vmf);
/* called by access_process_vm when get_user_pages() fails, typically
- * for use by special VMAs that can switch between memory and hardware
+ * for use by special VMAs. See also generic_access_phys() for a generic
+ * implementation useful for any iomem mapping.
*/
int (*access)(struct vm_area_struct *vma, unsigned long addr,
void *buf, int len, int write);
@@ -400,7 +600,7 @@ struct vm_operations_struct {
* (vma,addr) marked as MPOL_SHARED. The shared policy infrastructure
* in mm/mempolicy.c will do this automatically.
* get_policy() must NOT add a ref if the policy at (vma,addr) is not
- * marked as MPOL_SHARED. vma policies are protected by the mmap_sem.
+ * marked as MPOL_SHARED. vma policies are protected by the mmap_lock.
* If no [shared/vma] mempolicy exists at the addr, get_policy() op
* must return NULL--i.e., do not "fallback" to task or system default
* policy.
@@ -417,32 +617,362 @@ struct vm_operations_struct {
unsigned long addr);
};
-struct mmu_gather;
-struct inode;
+#ifdef CONFIG_NUMA_BALANCING
+static inline void vma_numab_state_init(struct vm_area_struct *vma)
+{
+ vma->numab_state = NULL;
+}
+static inline void vma_numab_state_free(struct vm_area_struct *vma)
+{
+ kfree(vma->numab_state);
+}
+#else
+static inline void vma_numab_state_init(struct vm_area_struct *vma) {}
+static inline void vma_numab_state_free(struct vm_area_struct *vma) {}
+#endif /* CONFIG_NUMA_BALANCING */
+
+#ifdef CONFIG_PER_VMA_LOCK
+/*
+ * Try to read-lock a vma. The function is allowed to occasionally yield false
+ * locked result to avoid performance overhead, in which case we fall back to
+ * using mmap_lock. The function should never yield false unlocked result.
+ */
+static inline bool vma_start_read(struct vm_area_struct *vma)
+{
+ /* Check before locking. A race might cause false locked result. */
+ if (vma->vm_lock_seq == READ_ONCE(vma->vm_mm->mm_lock_seq))
+ return false;
-#define page_private(page) ((page)->private)
-#define set_page_private(page, v) ((page)->private = (v))
+ if (unlikely(down_read_trylock(&vma->vm_lock->lock) == 0))
+ return false;
-#if !defined(__HAVE_ARCH_PTE_DEVMAP) || !defined(CONFIG_TRANSPARENT_HUGEPAGE)
-static inline int pmd_devmap(pmd_t pmd)
+ /*
+ * Overflow might produce false locked result.
+ * False unlocked result is impossible because we modify and check
+ * vma->vm_lock_seq under vma->vm_lock protection and mm->mm_lock_seq
+ * modification invalidates all existing locks.
+ */
+ if (unlikely(vma->vm_lock_seq == READ_ONCE(vma->vm_mm->mm_lock_seq))) {
+ up_read(&vma->vm_lock->lock);
+ return false;
+ }
+ return true;
+}
+
+static inline void vma_end_read(struct vm_area_struct *vma)
{
- return 0;
+ rcu_read_lock(); /* keeps vma alive till the end of up_read */
+ up_read(&vma->vm_lock->lock);
+ rcu_read_unlock();
}
-static inline int pud_devmap(pud_t pud)
+
+static bool __is_vma_write_locked(struct vm_area_struct *vma, int *mm_lock_seq)
{
- return 0;
+ mmap_assert_write_locked(vma->vm_mm);
+
+ /*
+ * current task is holding mmap_write_lock, both vma->vm_lock_seq and
+ * mm->mm_lock_seq can't be concurrently modified.
+ */
+ *mm_lock_seq = READ_ONCE(vma->vm_mm->mm_lock_seq);
+ return (vma->vm_lock_seq == *mm_lock_seq);
+}
+
+static inline void vma_start_write(struct vm_area_struct *vma)
+{
+ int mm_lock_seq;
+
+ if (__is_vma_write_locked(vma, &mm_lock_seq))
+ return;
+
+ down_write(&vma->vm_lock->lock);
+ vma->vm_lock_seq = mm_lock_seq;
+ up_write(&vma->vm_lock->lock);
+}
+
+static inline bool vma_try_start_write(struct vm_area_struct *vma)
+{
+ int mm_lock_seq;
+
+ if (__is_vma_write_locked(vma, &mm_lock_seq))
+ return true;
+
+ if (!down_write_trylock(&vma->vm_lock->lock))
+ return false;
+
+ vma->vm_lock_seq = mm_lock_seq;
+ up_write(&vma->vm_lock->lock);
+ return true;
+}
+
+static inline void vma_assert_write_locked(struct vm_area_struct *vma)
+{
+ int mm_lock_seq;
+
+ VM_BUG_ON_VMA(!__is_vma_write_locked(vma, &mm_lock_seq), vma);
+}
+
+static inline void vma_mark_detached(struct vm_area_struct *vma, bool detached)
+{
+ /* When detaching vma should be write-locked */
+ if (detached)
+ vma_assert_write_locked(vma);
+ vma->detached = detached;
+}
+
+struct vm_area_struct *lock_vma_under_rcu(struct mm_struct *mm,
+ unsigned long address);
+
+#else /* CONFIG_PER_VMA_LOCK */
+
+static inline void vma_init_lock(struct vm_area_struct *vma) {}
+static inline bool vma_start_read(struct vm_area_struct *vma)
+ { return false; }
+static inline void vma_end_read(struct vm_area_struct *vma) {}
+static inline void vma_start_write(struct vm_area_struct *vma) {}
+static inline bool vma_try_start_write(struct vm_area_struct *vma)
+ { return true; }
+static inline void vma_assert_write_locked(struct vm_area_struct *vma) {}
+static inline void vma_mark_detached(struct vm_area_struct *vma,
+ bool detached) {}
+
+#endif /* CONFIG_PER_VMA_LOCK */
+
+/*
+ * WARNING: vma_init does not initialize vma->vm_lock.
+ * Use vm_area_alloc()/vm_area_free() if vma needs locking.
+ */
+static inline void vma_init(struct vm_area_struct *vma, struct mm_struct *mm)
+{
+ static const struct vm_operations_struct dummy_vm_ops = {};
+
+ memset(vma, 0, sizeof(*vma));
+ vma->vm_mm = mm;
+ vma->vm_ops = &dummy_vm_ops;
+ INIT_LIST_HEAD(&vma->anon_vma_chain);
+ vma_mark_detached(vma, false);
+ vma_numab_state_init(vma);
+}
+
+/* Use when VMA is not part of the VMA tree and needs no locking */
+static inline void vm_flags_init(struct vm_area_struct *vma,
+ vm_flags_t flags)
+{
+ ACCESS_PRIVATE(vma, __vm_flags) = flags;
+}
+
+/* Use when VMA is part of the VMA tree and modifications need coordination */
+static inline void vm_flags_reset(struct vm_area_struct *vma,
+ vm_flags_t flags)
+{
+ vma_start_write(vma);
+ vm_flags_init(vma, flags);
+}
+
+static inline void vm_flags_reset_once(struct vm_area_struct *vma,
+ vm_flags_t flags)
+{
+ vma_start_write(vma);
+ WRITE_ONCE(ACCESS_PRIVATE(vma, __vm_flags), flags);
+}
+
+static inline void vm_flags_set(struct vm_area_struct *vma,
+ vm_flags_t flags)
+{
+ vma_start_write(vma);
+ ACCESS_PRIVATE(vma, __vm_flags) |= flags;
+}
+
+static inline void vm_flags_clear(struct vm_area_struct *vma,
+ vm_flags_t flags)
+{
+ vma_start_write(vma);
+ ACCESS_PRIVATE(vma, __vm_flags) &= ~flags;
+}
+
+/*
+ * Use only if VMA is not part of the VMA tree or has no other users and
+ * therefore needs no locking.
+ */
+static inline void __vm_flags_mod(struct vm_area_struct *vma,
+ vm_flags_t set, vm_flags_t clear)
+{
+ vm_flags_init(vma, (vma->vm_flags | set) & ~clear);
+}
+
+/*
+ * Use only when the order of set/clear operations is unimportant, otherwise
+ * use vm_flags_{set|clear} explicitly.
+ */
+static inline void vm_flags_mod(struct vm_area_struct *vma,
+ vm_flags_t set, vm_flags_t clear)
+{
+ vma_start_write(vma);
+ __vm_flags_mod(vma, set, clear);
}
-static inline int pgd_devmap(pgd_t pgd)
+
+static inline void vma_set_anonymous(struct vm_area_struct *vma)
+{
+ vma->vm_ops = NULL;
+}
+
+static inline bool vma_is_anonymous(struct vm_area_struct *vma)
+{
+ return !vma->vm_ops;
+}
+
+static inline bool vma_is_temporary_stack(struct vm_area_struct *vma)
+{
+ int maybe_stack = vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP);
+
+ if (!maybe_stack)
+ return false;
+
+ if ((vma->vm_flags & VM_STACK_INCOMPLETE_SETUP) ==
+ VM_STACK_INCOMPLETE_SETUP)
+ return true;
+
+ return false;
+}
+
+static inline bool vma_is_foreign(struct vm_area_struct *vma)
+{
+ if (!current->mm)
+ return true;
+
+ if (current->mm != vma->vm_mm)
+ return true;
+
+ return false;
+}
+
+static inline bool vma_is_accessible(struct vm_area_struct *vma)
+{
+ return vma->vm_flags & VM_ACCESS_FLAGS;
+}
+
+static inline
+struct vm_area_struct *vma_find(struct vma_iterator *vmi, unsigned long max)
+{
+ return mas_find(&vmi->mas, max - 1);
+}
+
+static inline struct vm_area_struct *vma_next(struct vma_iterator *vmi)
{
+ /*
+ * Uses mas_find() to get the first VMA when the iterator starts.
+ * Calling mas_next() could skip the first entry.
+ */
+ return mas_find(&vmi->mas, ULONG_MAX);
+}
+
+static inline struct vm_area_struct *vma_prev(struct vma_iterator *vmi)
+{
+ return mas_prev(&vmi->mas, 0);
+}
+
+static inline unsigned long vma_iter_addr(struct vma_iterator *vmi)
+{
+ return vmi->mas.index;
+}
+
+static inline unsigned long vma_iter_end(struct vma_iterator *vmi)
+{
+ return vmi->mas.last + 1;
+}
+static inline int vma_iter_bulk_alloc(struct vma_iterator *vmi,
+ unsigned long count)
+{
+ return mas_expected_entries(&vmi->mas, count);
+}
+
+/* Free any unused preallocations */
+static inline void vma_iter_free(struct vma_iterator *vmi)
+{
+ mas_destroy(&vmi->mas);
+}
+
+static inline int vma_iter_bulk_store(struct vma_iterator *vmi,
+ struct vm_area_struct *vma)
+{
+ vmi->mas.index = vma->vm_start;
+ vmi->mas.last = vma->vm_end - 1;
+ mas_store(&vmi->mas, vma);
+ if (unlikely(mas_is_err(&vmi->mas)))
+ return -ENOMEM;
+
return 0;
}
+
+static inline void vma_iter_invalidate(struct vma_iterator *vmi)
+{
+ mas_pause(&vmi->mas);
+}
+
+static inline void vma_iter_set(struct vma_iterator *vmi, unsigned long addr)
+{
+ mas_set(&vmi->mas, addr);
+}
+
+#define for_each_vma(__vmi, __vma) \
+ while (((__vma) = vma_next(&(__vmi))) != NULL)
+
+/* The MM code likes to work with exclusive end addresses */
+#define for_each_vma_range(__vmi, __vma, __end) \
+ while (((__vma) = vma_find(&(__vmi), (__end))) != NULL)
+
+#ifdef CONFIG_SHMEM
+/*
+ * The vma_is_shmem is not inline because it is used only by slow
+ * paths in userfault.
+ */
+bool vma_is_shmem(struct vm_area_struct *vma);
+bool vma_is_anon_shmem(struct vm_area_struct *vma);
+#else
+static inline bool vma_is_shmem(struct vm_area_struct *vma) { return false; }
+static inline bool vma_is_anon_shmem(struct vm_area_struct *vma) { return false; }
#endif
+int vma_is_stack_for_current(struct vm_area_struct *vma);
+
+/* flush_tlb_range() takes a vma, not a mm, and can care about flags */
+#define TLB_FLUSH_VMA(mm,flags) { .vm_mm = (mm), .vm_flags = (flags) }
+
+struct mmu_gather;
+struct inode;
+
/*
- * FIXME: take this include out, include page-flags.h in
- * files which need it (119 of them)
+ * compound_order() can be called without holding a reference, which means
+ * that niceties like page_folio() don't work. These callers should be
+ * prepared to handle wild return values. For example, PG_head may be
+ * set before _folio_order is initialised, or this may be a tail page.
+ * See compaction.c for some good examples.
*/
-#include <linux/page-flags.h>
+static inline unsigned int compound_order(struct page *page)
+{
+ struct folio *folio = (struct folio *)page;
+
+ if (!test_bit(PG_head, &folio->flags))
+ return 0;
+ return folio->_folio_order;
+}
+
+/**
+ * folio_order - The allocation order of a folio.
+ * @folio: The folio.
+ *
+ * A folio is composed of 2^order pages. See get_order() for the definition
+ * of order.
+ *
+ * Return: The order of the folio.
+ */
+static inline unsigned int folio_order(struct folio *folio)
+{
+ if (!folio_test_large(folio))
+ return 0;
+ return folio->_folio_order;
+}
+
#include <linux/huge_mm.h>
/*
@@ -467,17 +997,29 @@ static inline int put_page_testzero(struct page *page)
return page_ref_dec_and_test(page);
}
+static inline int folio_put_testzero(struct folio *folio)
+{
+ return put_page_testzero(&folio->page);
+}
+
/*
* Try to grab a ref unless the page has a refcount of zero, return false if
* that is the case.
* This can be called when MMU is off so it must not access
* any of the virtual mappings.
*/
-static inline int get_page_unless_zero(struct page *page)
+static inline bool get_page_unless_zero(struct page *page)
{
return page_ref_add_unless(page, 1, 0);
}
+static inline struct folio *folio_get_nontail_page(struct page *page)
+{
+ if (unlikely(!get_page_unless_zero(page)))
+ return NULL;
+ return (struct folio *)page;
+}
+
extern int page_is_ram(unsigned long pfn);
enum {
@@ -499,59 +1041,35 @@ unsigned long vmalloc_to_pfn(const void *addr);
* On nommu, vmalloc/vfree wrap through kmalloc/kfree directly, so there
* is no special casing required.
*/
-static inline bool is_vmalloc_addr(const void *x)
-{
-#ifdef CONFIG_MMU
- unsigned long addr = (unsigned long)x;
- return addr >= VMALLOC_START && addr < VMALLOC_END;
-#else
- return false;
+#ifndef is_ioremap_addr
+#define is_ioremap_addr(x) is_vmalloc_addr(x)
#endif
-}
+
#ifdef CONFIG_MMU
+extern bool is_vmalloc_addr(const void *x);
extern int is_vmalloc_or_module_addr(const void *x);
#else
+static inline bool is_vmalloc_addr(const void *x)
+{
+ return false;
+}
static inline int is_vmalloc_or_module_addr(const void *x)
{
return 0;
}
#endif
-extern void *kvmalloc_node(size_t size, gfp_t flags, int node);
-static inline void *kvmalloc(size_t size, gfp_t flags)
-{
- return kvmalloc_node(size, flags, NUMA_NO_NODE);
-}
-static inline void *kvzalloc_node(size_t size, gfp_t flags, int node)
-{
- return kvmalloc_node(size, flags | __GFP_ZERO, node);
-}
-static inline void *kvzalloc(size_t size, gfp_t flags)
-{
- return kvmalloc(size, flags | __GFP_ZERO);
-}
-
-static inline void *kvmalloc_array(size_t n, size_t size, gfp_t flags)
-{
- if (size != 0 && n > SIZE_MAX / size)
- return NULL;
-
- return kvmalloc(n * size, flags);
-}
-
-extern void kvfree(const void *addr);
-
-static inline atomic_t *compound_mapcount_ptr(struct page *page)
-{
- return &page[1].compound_mapcount;
-}
-
-static inline int compound_mapcount(struct page *page)
+/*
+ * How many times the entire folio is mapped as a single unit (eg by a
+ * PMD or PUD entry). This is probably not what you want, except for
+ * debugging purposes - it does not include PTE-mapped sub-pages; look
+ * at folio_mapcount() or page_mapcount() or total_mapcount() instead.
+ */
+static inline int folio_entire_mapcount(struct folio *folio)
{
- VM_BUG_ON_PAGE(!PageCompound(page), page);
- page = compound_head(page);
- return atomic_read(compound_mapcount_ptr(page)) + 1;
+ VM_BUG_ON_FOLIO(!folio_test_large(folio), folio);
+ return atomic_read(&folio->_entire_mapcount) + 1;
}
/*
@@ -564,34 +1082,89 @@ static inline void page_mapcount_reset(struct page *page)
atomic_set(&(page)->_mapcount, -1);
}
-int __page_mapcount(struct page *page);
-
+/**
+ * page_mapcount() - Number of times this precise page is mapped.
+ * @page: The page.
+ *
+ * The number of times this page is mapped. If this page is part of
+ * a large folio, it includes the number of times this page is mapped
+ * as part of that folio.
+ *
+ * The result is undefined for pages which cannot be mapped into userspace.
+ * For example SLAB or special types of pages. See function page_has_type().
+ * They use this field in struct page differently.
+ */
static inline int page_mapcount(struct page *page)
{
- VM_BUG_ON_PAGE(PageSlab(page), page);
+ int mapcount = atomic_read(&page->_mapcount) + 1;
if (unlikely(PageCompound(page)))
- return __page_mapcount(page);
- return atomic_read(&page->_mapcount) + 1;
+ mapcount += folio_entire_mapcount(page_folio(page));
+
+ return mapcount;
+}
+
+int folio_total_mapcount(struct folio *folio);
+
+/**
+ * folio_mapcount() - Calculate the number of mappings of this folio.
+ * @folio: The folio.
+ *
+ * A large folio tracks both how many times the entire folio is mapped,
+ * and how many times each individual page in the folio is mapped.
+ * This function calculates the total number of times the folio is
+ * mapped.
+ *
+ * Return: The number of times this folio is mapped.
+ */
+static inline int folio_mapcount(struct folio *folio)
+{
+ if (likely(!folio_test_large(folio)))
+ return atomic_read(&folio->_mapcount) + 1;
+ return folio_total_mapcount(folio);
}
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
-int total_mapcount(struct page *page);
-int page_trans_huge_mapcount(struct page *page, int *total_mapcount);
-#else
static inline int total_mapcount(struct page *page)
{
- return page_mapcount(page);
+ if (likely(!PageCompound(page)))
+ return atomic_read(&page->_mapcount) + 1;
+ return folio_total_mapcount(page_folio(page));
}
-static inline int page_trans_huge_mapcount(struct page *page,
- int *total_mapcount)
+
+static inline bool folio_large_is_mapped(struct folio *folio)
{
- int mapcount = page_mapcount(page);
- if (total_mapcount)
- *total_mapcount = mapcount;
- return mapcount;
+ /*
+ * Reading _entire_mapcount below could be omitted if hugetlb
+ * participated in incrementing nr_pages_mapped when compound mapped.
+ */
+ return atomic_read(&folio->_nr_pages_mapped) > 0 ||
+ atomic_read(&folio->_entire_mapcount) >= 0;
+}
+
+/**
+ * folio_mapped - Is this folio mapped into userspace?
+ * @folio: The folio.
+ *
+ * Return: True if any page in this folio is referenced by user page tables.
+ */
+static inline bool folio_mapped(struct folio *folio)
+{
+ if (likely(!folio_test_large(folio)))
+ return atomic_read(&folio->_mapcount) >= 0;
+ return folio_large_is_mapped(folio);
+}
+
+/*
+ * Return true if this page is mapped into pagetables.
+ * For compound page it returns true if any sub-page of compound page is mapped,
+ * even if this particular sub-page is not itself mapped by any PTE or PMD.
+ */
+static inline bool page_mapped(struct page *page)
+{
+ if (likely(!PageCompound(page)))
+ return atomic_read(&page->_mapcount) >= 0;
+ return folio_large_is_mapped(page_folio(page));
}
-#endif
static inline struct page *virt_to_head_page(const void *x)
{
@@ -600,11 +1173,21 @@ static inline struct page *virt_to_head_page(const void *x)
return compound_head(page);
}
-void __put_page(struct page *page);
+static inline struct folio *virt_to_folio(const void *x)
+{
+ struct page *page = virt_to_page(x);
+
+ return page_folio(page);
+}
+
+void __folio_put(struct folio *folio);
void put_pages_list(struct list_head *pages);
void split_page(struct page *page, unsigned int order);
+void folio_copy(struct folio *dst, struct folio *src);
+
+unsigned long nr_free_buffer_pages(void);
/*
* Compound pages have a destructor function. Provide a
@@ -625,31 +1208,68 @@ enum compound_dtor_id {
#endif
NR_COMPOUND_DTORS,
};
-extern compound_page_dtor * const compound_page_dtors[];
+extern compound_page_dtor * const compound_page_dtors[NR_COMPOUND_DTORS];
static inline void set_compound_page_dtor(struct page *page,
enum compound_dtor_id compound_dtor)
{
+ struct folio *folio = (struct folio *)page;
+
VM_BUG_ON_PAGE(compound_dtor >= NR_COMPOUND_DTORS, page);
- page[1].compound_dtor = compound_dtor;
+ VM_BUG_ON_PAGE(!PageHead(page), page);
+ folio->_folio_dtor = compound_dtor;
}
-static inline compound_page_dtor *get_compound_page_dtor(struct page *page)
+static inline void folio_set_compound_dtor(struct folio *folio,
+ enum compound_dtor_id compound_dtor)
{
- VM_BUG_ON_PAGE(page[1].compound_dtor >= NR_COMPOUND_DTORS, page);
- return compound_page_dtors[page[1].compound_dtor];
+ VM_BUG_ON_FOLIO(compound_dtor >= NR_COMPOUND_DTORS, folio);
+ folio->_folio_dtor = compound_dtor;
}
-static inline unsigned int compound_order(struct page *page)
+void destroy_large_folio(struct folio *folio);
+
+static inline void set_compound_order(struct page *page, unsigned int order)
{
- if (!PageHead(page))
- return 0;
- return page[1].compound_order;
+ struct folio *folio = (struct folio *)page;
+
+ folio->_folio_order = order;
+#ifdef CONFIG_64BIT
+ folio->_folio_nr_pages = 1U << order;
+#endif
}
-static inline void set_compound_order(struct page *page, unsigned int order)
+/* Returns the number of bytes in this potentially compound page. */
+static inline unsigned long page_size(struct page *page)
{
- page[1].compound_order = order;
+ return PAGE_SIZE << compound_order(page);
+}
+
+/* Returns the number of bits needed for the number of bytes in a page */
+static inline unsigned int page_shift(struct page *page)
+{
+ return PAGE_SHIFT + compound_order(page);
+}
+
+/**
+ * thp_order - Order of a transparent huge page.
+ * @page: Head page of a transparent huge page.
+ */
+static inline unsigned int thp_order(struct page *page)
+{
+ VM_BUG_ON_PGFLAGS(PageTail(page), page);
+ return compound_order(page);
+}
+
+/**
+ * thp_size - Size of a transparent huge page.
+ * @page: Head page of a transparent huge page.
+ *
+ * Return: Number of bytes in this page.
+ */
+static inline unsigned long thp_size(struct page *page)
+{
+ return PAGE_SIZE << thp_order(page);
}
void free_compound_page(struct page *page);
@@ -668,10 +1288,11 @@ static inline pte_t maybe_mkwrite(pte_t pte, struct vm_area_struct *vma)
return pte;
}
-int alloc_set_pte(struct vm_fault *vmf, struct mem_cgroup *memcg,
- struct page *page);
-int finish_fault(struct vm_fault *vmf);
-int finish_mkwrite_fault(struct vm_fault *vmf);
+vm_fault_t do_set_pmd(struct vm_fault *vmf, struct page *page);
+void do_set_pte(struct vm_fault *vmf, struct page *page, unsigned long addr);
+
+vm_fault_t finish_fault(struct vm_fault *vmf);
+vm_fault_t finish_mkwrite_fault(struct vm_fault *vmf);
#endif
/*
@@ -721,7 +1342,7 @@ int finish_mkwrite_fault(struct vm_fault *vmf);
* refcount. The each user mapping also has a reference to the page.
*
* The pagecache pages are stored in a per-mapping radix tree, which is
- * rooted at mapping->page_tree, and indexed by offset.
+ * rooted at mapping->i_pages, and indexed by offset.
* Where 2.4 and early 2.6 kernels kept dirty/clean pages in per-address_space
* lists, we instead now tag pages as dirty/writeback in the radix tree.
*
@@ -734,85 +1355,208 @@ int finish_mkwrite_fault(struct vm_fault *vmf);
* back into memory.
*/
-/*
- * The zone field is never updated after free_area_init_core()
- * sets it, so none of the operations on it need to be atomic.
- */
+#if defined(CONFIG_ZONE_DEVICE) && defined(CONFIG_FS_DAX)
+DECLARE_STATIC_KEY_FALSE(devmap_managed_key);
-/* Page flags: | [SECTION] | [NODE] | ZONE | [LAST_CPUPID] | ... | FLAGS | */
-#define SECTIONS_PGOFF ((sizeof(unsigned long)*8) - SECTIONS_WIDTH)
-#define NODES_PGOFF (SECTIONS_PGOFF - NODES_WIDTH)
-#define ZONES_PGOFF (NODES_PGOFF - ZONES_WIDTH)
-#define LAST_CPUPID_PGOFF (ZONES_PGOFF - LAST_CPUPID_WIDTH)
+bool __put_devmap_managed_page_refs(struct page *page, int refs);
+static inline bool put_devmap_managed_page_refs(struct page *page, int refs)
+{
+ if (!static_branch_unlikely(&devmap_managed_key))
+ return false;
+ if (!is_zone_device_page(page))
+ return false;
+ return __put_devmap_managed_page_refs(page, refs);
+}
+#else /* CONFIG_ZONE_DEVICE && CONFIG_FS_DAX */
+static inline bool put_devmap_managed_page_refs(struct page *page, int refs)
+{
+ return false;
+}
+#endif /* CONFIG_ZONE_DEVICE && CONFIG_FS_DAX */
-/*
- * Define the bit shifts to access each section. For non-existent
- * sections we define the shift as 0; that plus a 0 mask ensures
- * the compiler will optimise away reference to them.
- */
-#define SECTIONS_PGSHIFT (SECTIONS_PGOFF * (SECTIONS_WIDTH != 0))
-#define NODES_PGSHIFT (NODES_PGOFF * (NODES_WIDTH != 0))
-#define ZONES_PGSHIFT (ZONES_PGOFF * (ZONES_WIDTH != 0))
-#define LAST_CPUPID_PGSHIFT (LAST_CPUPID_PGOFF * (LAST_CPUPID_WIDTH != 0))
+static inline bool put_devmap_managed_page(struct page *page)
+{
+ return put_devmap_managed_page_refs(page, 1);
+}
-/* NODE:ZONE or SECTION:ZONE is used to ID a zone for the buddy allocator */
-#ifdef NODE_NOT_IN_PAGE_FLAGS
-#define ZONEID_SHIFT (SECTIONS_SHIFT + ZONES_SHIFT)
-#define ZONEID_PGOFF ((SECTIONS_PGOFF < ZONES_PGOFF)? \
- SECTIONS_PGOFF : ZONES_PGOFF)
-#else
-#define ZONEID_SHIFT (NODES_SHIFT + ZONES_SHIFT)
-#define ZONEID_PGOFF ((NODES_PGOFF < ZONES_PGOFF)? \
- NODES_PGOFF : ZONES_PGOFF)
-#endif
+/* 127: arbitrary random number, small enough to assemble well */
+#define folio_ref_zero_or_close_to_overflow(folio) \
+ ((unsigned int) folio_ref_count(folio) + 127u <= 127u)
-#define ZONEID_PGSHIFT (ZONEID_PGOFF * (ZONEID_SHIFT != 0))
+/**
+ * folio_get - Increment the reference count on a folio.
+ * @folio: The folio.
+ *
+ * Context: May be called in any context, as long as you know that
+ * you have a refcount on the folio. If you do not already have one,
+ * folio_try_get() may be the right interface for you to use.
+ */
+static inline void folio_get(struct folio *folio)
+{
+ VM_BUG_ON_FOLIO(folio_ref_zero_or_close_to_overflow(folio), folio);
+ folio_ref_inc(folio);
+}
-#if SECTIONS_WIDTH+NODES_WIDTH+ZONES_WIDTH > BITS_PER_LONG - NR_PAGEFLAGS
-#error SECTIONS_WIDTH+NODES_WIDTH+ZONES_WIDTH > BITS_PER_LONG - NR_PAGEFLAGS
-#endif
+static inline void get_page(struct page *page)
+{
+ folio_get(page_folio(page));
+}
-#define ZONES_MASK ((1UL << ZONES_WIDTH) - 1)
-#define NODES_MASK ((1UL << NODES_WIDTH) - 1)
-#define SECTIONS_MASK ((1UL << SECTIONS_WIDTH) - 1)
-#define LAST_CPUPID_MASK ((1UL << LAST_CPUPID_SHIFT) - 1)
-#define ZONEID_MASK ((1UL << ZONEID_SHIFT) - 1)
+static inline __must_check bool try_get_page(struct page *page)
+{
+ page = compound_head(page);
+ if (WARN_ON_ONCE(page_ref_count(page) <= 0))
+ return false;
+ page_ref_inc(page);
+ return true;
+}
-static inline enum zone_type page_zonenum(const struct page *page)
+/**
+ * folio_put - Decrement the reference count on a folio.
+ * @folio: The folio.
+ *
+ * If the folio's reference count reaches zero, the memory will be
+ * released back to the page allocator and may be used by another
+ * allocation immediately. Do not access the memory or the struct folio
+ * after calling folio_put() unless you can be sure that it wasn't the
+ * last reference.
+ *
+ * Context: May be called in process or interrupt context, but not in NMI
+ * context. May be called while holding a spinlock.
+ */
+static inline void folio_put(struct folio *folio)
{
- return (page->flags >> ZONES_PGSHIFT) & ZONES_MASK;
+ if (folio_put_testzero(folio))
+ __folio_put(folio);
}
-#ifdef CONFIG_ZONE_DEVICE
-static inline bool is_zone_device_page(const struct page *page)
+/**
+ * folio_put_refs - Reduce the reference count on a folio.
+ * @folio: The folio.
+ * @refs: The amount to subtract from the folio's reference count.
+ *
+ * If the folio's reference count reaches zero, the memory will be
+ * released back to the page allocator and may be used by another
+ * allocation immediately. Do not access the memory or the struct folio
+ * after calling folio_put_refs() unless you can be sure that these weren't
+ * the last references.
+ *
+ * Context: May be called in process or interrupt context, but not in NMI
+ * context. May be called while holding a spinlock.
+ */
+static inline void folio_put_refs(struct folio *folio, int refs)
{
- return page_zonenum(page) == ZONE_DEVICE;
+ if (folio_ref_sub_and_test(folio, refs))
+ __folio_put(folio);
}
-#else
-static inline bool is_zone_device_page(const struct page *page)
+
+/*
+ * union release_pages_arg - an array of pages or folios
+ *
+ * release_pages() releases a simple array of multiple pages, and
+ * accepts various different forms of said page array: either
+ * a regular old boring array of pages, an array of folios, or
+ * an array of encoded page pointers.
+ *
+ * The transparent union syntax for this kind of "any of these
+ * argument types" is all kinds of ugly, so look away.
+ */
+typedef union {
+ struct page **pages;
+ struct folio **folios;
+ struct encoded_page **encoded_pages;
+} release_pages_arg __attribute__ ((__transparent_union__));
+
+void release_pages(release_pages_arg, int nr);
+
+/**
+ * folios_put - Decrement the reference count on an array of folios.
+ * @folios: The folios.
+ * @nr: How many folios there are.
+ *
+ * Like folio_put(), but for an array of folios. This is more efficient
+ * than writing the loop yourself as it will optimise the locks which
+ * need to be taken if the folios are freed.
+ *
+ * Context: May be called in process or interrupt context, but not in NMI
+ * context. May be called while holding a spinlock.
+ */
+static inline void folios_put(struct folio **folios, unsigned int nr)
{
- return false;
+ release_pages(folios, nr);
}
-#endif
-static inline void get_page(struct page *page)
+static inline void put_page(struct page *page)
{
- page = compound_head(page);
+ struct folio *folio = page_folio(page);
+
/*
- * Getting a normal page or the head of a compound page
- * requires to already have an elevated page->_refcount.
+ * For some devmap managed pages we need to catch refcount transition
+ * from 2 to 1:
*/
- VM_BUG_ON_PAGE(page_ref_count(page) <= 0, page);
- page_ref_inc(page);
+ if (put_devmap_managed_page(&folio->page))
+ return;
+ folio_put(folio);
}
-static inline void put_page(struct page *page)
+/*
+ * GUP_PIN_COUNTING_BIAS, and the associated functions that use it, overload
+ * the page's refcount so that two separate items are tracked: the original page
+ * reference count, and also a new count of how many pin_user_pages() calls were
+ * made against the page. ("gup-pinned" is another term for the latter).
+ *
+ * With this scheme, pin_user_pages() becomes special: such pages are marked as
+ * distinct from normal pages. As such, the unpin_user_page() call (and its
+ * variants) must be used in order to release gup-pinned pages.
+ *
+ * Choice of value:
+ *
+ * By making GUP_PIN_COUNTING_BIAS a power of two, debugging of page reference
+ * counts with respect to pin_user_pages() and unpin_user_page() becomes
+ * simpler, due to the fact that adding an even power of two to the page
+ * refcount has the effect of using only the upper N bits, for the code that
+ * counts up using the bias value. This means that the lower bits are left for
+ * the exclusive use of the original code that increments and decrements by one
+ * (or at least, by much smaller values than the bias value).
+ *
+ * Of course, once the lower bits overflow into the upper bits (and this is
+ * OK, because subtraction recovers the original values), then visual inspection
+ * no longer suffices to directly view the separate counts. However, for normal
+ * applications that don't have huge page reference counts, this won't be an
+ * issue.
+ *
+ * Locking: the lockless algorithm described in folio_try_get_rcu()
+ * provides safe operation for get_user_pages(), page_mkclean() and
+ * other calls that race to set up page table entries.
+ */
+#define GUP_PIN_COUNTING_BIAS (1U << 10)
+
+void unpin_user_page(struct page *page);
+void unpin_user_pages_dirty_lock(struct page **pages, unsigned long npages,
+ bool make_dirty);
+void unpin_user_page_range_dirty_lock(struct page *page, unsigned long npages,
+ bool make_dirty);
+void unpin_user_pages(struct page **pages, unsigned long npages);
+
+static inline bool is_cow_mapping(vm_flags_t flags)
{
- page = compound_head(page);
+ return (flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE;
+}
- if (put_page_testzero(page))
- __put_page(page);
+#ifndef CONFIG_MMU
+static inline bool is_nommu_shared_mapping(vm_flags_t flags)
+{
+ /*
+ * NOMMU shared mappings are ordinary MAP_SHARED mappings and selected
+ * R/O MAP_PRIVATE file mappings that are an effective R/O overlay of
+ * a file mapping. R/O MAP_PRIVATE mappings might still modify
+ * underlying memory if ptrace is active, so this is only possible if
+ * ptrace does not apply. Note that there is no mprotect() to upgrade
+ * write permissions later.
+ */
+ return flags & (VM_MAYSHARE | VM_MAYOVERLAY);
}
+#endif
#if defined(CONFIG_SPARSEMEM) && !defined(CONFIG_SPARSEMEM_VMEMMAP)
#define SECTION_IN_PAGE_FLAGS
@@ -831,25 +1575,35 @@ static inline int page_zone_id(struct page *page)
return (page->flags >> ZONEID_PGSHIFT) & ZONEID_MASK;
}
-static inline int zone_to_nid(struct zone *zone)
-{
-#ifdef CONFIG_NUMA
- return zone->node;
-#else
- return 0;
-#endif
-}
-
#ifdef NODE_NOT_IN_PAGE_FLAGS
extern int page_to_nid(const struct page *page);
#else
static inline int page_to_nid(const struct page *page)
{
- return (page->flags >> NODES_PGSHIFT) & NODES_MASK;
+ struct page *p = (struct page *)page;
+
+ return (PF_POISONED_CHECK(p)->flags >> NODES_PGSHIFT) & NODES_MASK;
}
#endif
+static inline int folio_nid(const struct folio *folio)
+{
+ return page_to_nid(&folio->page);
+}
+
#ifdef CONFIG_NUMA_BALANCING
+/* page access time bits needs to hold at least 4 seconds */
+#define PAGE_ACCESS_TIME_MIN_BITS 12
+#if LAST_CPUPID_SHIFT < PAGE_ACCESS_TIME_MIN_BITS
+#define PAGE_ACCESS_TIME_BUCKETS \
+ (PAGE_ACCESS_TIME_MIN_BITS - LAST_CPUPID_SHIFT)
+#else
+#define PAGE_ACCESS_TIME_BUCKETS 0
+#endif
+
+#define PAGE_ACCESS_TIME_MASK \
+ (LAST_CPUPID_MASK << PAGE_ACCESS_TIME_BUCKETS)
+
static inline int cpu_pid_to_cpupid(int cpu, int pid)
{
return ((cpu & LAST__CPU_MASK) << LAST__PID_SHIFT) | (pid & LAST__PID_MASK);
@@ -913,12 +1667,35 @@ static inline void page_cpupid_reset_last(struct page *page)
page->flags |= LAST_CPUPID_MASK << LAST_CPUPID_PGSHIFT;
}
#endif /* LAST_CPUPID_NOT_IN_PAGE_FLAGS */
+
+static inline int xchg_page_access_time(struct page *page, int time)
+{
+ int last_time;
+
+ last_time = page_cpupid_xchg_last(page, time >> PAGE_ACCESS_TIME_BUCKETS);
+ return last_time << PAGE_ACCESS_TIME_BUCKETS;
+}
+
+static inline void vma_set_access_pid_bit(struct vm_area_struct *vma)
+{
+ unsigned int pid_bit;
+
+ pid_bit = hash_32(current->pid, ilog2(BITS_PER_LONG));
+ if (vma->numab_state && !test_bit(pid_bit, &vma->numab_state->access_pids[1])) {
+ __set_bit(pid_bit, &vma->numab_state->access_pids[1]);
+ }
+}
#else /* !CONFIG_NUMA_BALANCING */
static inline int page_cpupid_xchg_last(struct page *page, int cpupid)
{
return page_to_nid(page); /* XXX */
}
+static inline int xchg_page_access_time(struct page *page, int time)
+{
+ return 0;
+}
+
static inline int page_cpupid_last(struct page *page)
{
return page_to_nid(page); /* XXX */
@@ -946,7 +1723,7 @@ static inline int cpu_pid_to_cpupid(int nid, int pid)
static inline bool cpupid_pid_unset(int cpupid)
{
- return 1;
+ return true;
}
static inline void page_cpupid_reset_last(struct page *page)
@@ -957,8 +1734,66 @@ static inline bool cpupid_match_pid(struct task_struct *task, int cpupid)
{
return false;
}
+
+static inline void vma_set_access_pid_bit(struct vm_area_struct *vma)
+{
+}
#endif /* CONFIG_NUMA_BALANCING */
+#if defined(CONFIG_KASAN_SW_TAGS) || defined(CONFIG_KASAN_HW_TAGS)
+
+/*
+ * KASAN per-page tags are stored xor'ed with 0xff. This allows to avoid
+ * setting tags for all pages to native kernel tag value 0xff, as the default
+ * value 0x00 maps to 0xff.
+ */
+
+static inline u8 page_kasan_tag(const struct page *page)
+{
+ u8 tag = 0xff;
+
+ if (kasan_enabled()) {
+ tag = (page->flags >> KASAN_TAG_PGSHIFT) & KASAN_TAG_MASK;
+ tag ^= 0xff;
+ }
+
+ return tag;
+}
+
+static inline void page_kasan_tag_set(struct page *page, u8 tag)
+{
+ unsigned long old_flags, flags;
+
+ if (!kasan_enabled())
+ return;
+
+ tag ^= 0xff;
+ old_flags = READ_ONCE(page->flags);
+ do {
+ flags = old_flags;
+ flags &= ~(KASAN_TAG_MASK << KASAN_TAG_PGSHIFT);
+ flags |= (tag & KASAN_TAG_MASK) << KASAN_TAG_PGSHIFT;
+ } while (unlikely(!try_cmpxchg(&page->flags, &old_flags, flags)));
+}
+
+static inline void page_kasan_tag_reset(struct page *page)
+{
+ if (kasan_enabled())
+ page_kasan_tag_set(page, 0xff);
+}
+
+#else /* CONFIG_KASAN_SW_TAGS || CONFIG_KASAN_HW_TAGS */
+
+static inline u8 page_kasan_tag(const struct page *page)
+{
+ return 0xff;
+}
+
+static inline void page_kasan_tag_set(struct page *page, u8 tag) { }
+static inline void page_kasan_tag_reset(struct page *page) { }
+
+#endif /* CONFIG_KASAN_SW_TAGS || CONFIG_KASAN_HW_TAGS */
+
static inline struct zone *page_zone(const struct page *page)
{
return &NODE_DATA(page_to_nid(page))->node_zones[page_zonenum(page)];
@@ -969,6 +1804,16 @@ static inline pg_data_t *page_pgdat(const struct page *page)
return NODE_DATA(page_to_nid(page));
}
+static inline struct zone *folio_zone(const struct folio *folio)
+{
+ return page_zone(&folio->page);
+}
+
+static inline pg_data_t *folio_pgdat(const struct folio *folio)
+{
+ return page_pgdat(&folio->page);
+}
+
#ifdef SECTION_IN_PAGE_FLAGS
static inline void set_page_section(struct page *page, unsigned long section)
{
@@ -982,6 +1827,122 @@ static inline unsigned long page_to_section(const struct page *page)
}
#endif
+/**
+ * folio_pfn - Return the Page Frame Number of a folio.
+ * @folio: The folio.
+ *
+ * A folio may contain multiple pages. The pages have consecutive
+ * Page Frame Numbers.
+ *
+ * Return: The Page Frame Number of the first page in the folio.
+ */
+static inline unsigned long folio_pfn(struct folio *folio)
+{
+ return page_to_pfn(&folio->page);
+}
+
+static inline struct folio *pfn_folio(unsigned long pfn)
+{
+ return page_folio(pfn_to_page(pfn));
+}
+
+/**
+ * folio_maybe_dma_pinned - Report if a folio may be pinned for DMA.
+ * @folio: The folio.
+ *
+ * This function checks if a folio has been pinned via a call to
+ * a function in the pin_user_pages() family.
+ *
+ * For small folios, the return value is partially fuzzy: false is not fuzzy,
+ * because it means "definitely not pinned for DMA", but true means "probably
+ * pinned for DMA, but possibly a false positive due to having at least
+ * GUP_PIN_COUNTING_BIAS worth of normal folio references".
+ *
+ * False positives are OK, because: a) it's unlikely for a folio to
+ * get that many refcounts, and b) all the callers of this routine are
+ * expected to be able to deal gracefully with a false positive.
+ *
+ * For large folios, the result will be exactly correct. That's because
+ * we have more tracking data available: the _pincount field is used
+ * instead of the GUP_PIN_COUNTING_BIAS scheme.
+ *
+ * For more information, please see Documentation/core-api/pin_user_pages.rst.
+ *
+ * Return: True, if it is likely that the page has been "dma-pinned".
+ * False, if the page is definitely not dma-pinned.
+ */
+static inline bool folio_maybe_dma_pinned(struct folio *folio)
+{
+ if (folio_test_large(folio))
+ return atomic_read(&folio->_pincount) > 0;
+
+ /*
+ * folio_ref_count() is signed. If that refcount overflows, then
+ * folio_ref_count() returns a negative value, and callers will avoid
+ * further incrementing the refcount.
+ *
+ * Here, for that overflow case, use the sign bit to count a little
+ * bit higher via unsigned math, and thus still get an accurate result.
+ */
+ return ((unsigned int)folio_ref_count(folio)) >=
+ GUP_PIN_COUNTING_BIAS;
+}
+
+static inline bool page_maybe_dma_pinned(struct page *page)
+{
+ return folio_maybe_dma_pinned(page_folio(page));
+}
+
+/*
+ * This should most likely only be called during fork() to see whether we
+ * should break the cow immediately for an anon page on the src mm.
+ *
+ * The caller has to hold the PT lock and the vma->vm_mm->->write_protect_seq.
+ */
+static inline bool page_needs_cow_for_dma(struct vm_area_struct *vma,
+ struct page *page)
+{
+ VM_BUG_ON(!(raw_read_seqcount(&vma->vm_mm->write_protect_seq) & 1));
+
+ if (!test_bit(MMF_HAS_PINNED, &vma->vm_mm->flags))
+ return false;
+
+ return page_maybe_dma_pinned(page);
+}
+
+/* MIGRATE_CMA and ZONE_MOVABLE do not allow pin pages */
+#ifdef CONFIG_MIGRATION
+static inline bool is_longterm_pinnable_page(struct page *page)
+{
+#ifdef CONFIG_CMA
+ int mt = get_pageblock_migratetype(page);
+
+ if (mt == MIGRATE_CMA || mt == MIGRATE_ISOLATE)
+ return false;
+#endif
+ /* The zero page may always be pinned */
+ if (is_zero_pfn(page_to_pfn(page)))
+ return true;
+
+ /* Coherent device memory must always allow eviction. */
+ if (is_device_coherent_page(page))
+ return false;
+
+ /* Otherwise, non-movable zone pages can be pinned. */
+ return !is_zone_movable_page(page);
+}
+#else
+static inline bool is_longterm_pinnable_page(struct page *page)
+{
+ return true;
+}
+#endif
+
+static inline bool folio_is_longterm_pinnable(struct folio *folio)
+{
+ return is_longterm_pinnable_page(&folio->page);
+}
+
static inline void set_page_zone(struct page *page, enum zone_type zone)
{
page->flags &= ~(ZONES_MASK << ZONES_PGSHIFT);
@@ -1004,25 +1965,137 @@ static inline void set_page_links(struct page *page, enum zone_type zone,
#endif
}
-#ifdef CONFIG_MEMCG
-static inline struct mem_cgroup *page_memcg(struct page *page)
+/**
+ * folio_nr_pages - The number of pages in the folio.
+ * @folio: The folio.
+ *
+ * Return: A positive power of two.
+ */
+static inline long folio_nr_pages(struct folio *folio)
{
- return page->mem_cgroup;
+ if (!folio_test_large(folio))
+ return 1;
+#ifdef CONFIG_64BIT
+ return folio->_folio_nr_pages;
+#else
+ return 1L << folio->_folio_order;
+#endif
}
-static inline struct mem_cgroup *page_memcg_rcu(struct page *page)
+
+/*
+ * compound_nr() returns the number of pages in this potentially compound
+ * page. compound_nr() can be called on a tail page, and is defined to
+ * return 1 in that case.
+ */
+static inline unsigned long compound_nr(struct page *page)
{
- WARN_ON_ONCE(!rcu_read_lock_held());
- return READ_ONCE(page->mem_cgroup);
-}
+ struct folio *folio = (struct folio *)page;
+
+ if (!test_bit(PG_head, &folio->flags))
+ return 1;
+#ifdef CONFIG_64BIT
+ return folio->_folio_nr_pages;
#else
-static inline struct mem_cgroup *page_memcg(struct page *page)
+ return 1L << folio->_folio_order;
+#endif
+}
+
+/**
+ * thp_nr_pages - The number of regular pages in this huge page.
+ * @page: The head page of a huge page.
+ */
+static inline int thp_nr_pages(struct page *page)
{
- return NULL;
+ return folio_nr_pages((struct folio *)page);
+}
+
+/**
+ * folio_next - Move to the next physical folio.
+ * @folio: The folio we're currently operating on.
+ *
+ * If you have physically contiguous memory which may span more than
+ * one folio (eg a &struct bio_vec), use this function to move from one
+ * folio to the next. Do not use it if the memory is only virtually
+ * contiguous as the folios are almost certainly not adjacent to each
+ * other. This is the folio equivalent to writing ``page++``.
+ *
+ * Context: We assume that the folios are refcounted and/or locked at a
+ * higher level and do not adjust the reference counts.
+ * Return: The next struct folio.
+ */
+static inline struct folio *folio_next(struct folio *folio)
+{
+ return (struct folio *)folio_page(folio, folio_nr_pages(folio));
}
-static inline struct mem_cgroup *page_memcg_rcu(struct page *page)
+
+/**
+ * folio_shift - The size of the memory described by this folio.
+ * @folio: The folio.
+ *
+ * A folio represents a number of bytes which is a power-of-two in size.
+ * This function tells you which power-of-two the folio is. See also
+ * folio_size() and folio_order().
+ *
+ * Context: The caller should have a reference on the folio to prevent
+ * it from being split. It is not necessary for the folio to be locked.
+ * Return: The base-2 logarithm of the size of this folio.
+ */
+static inline unsigned int folio_shift(struct folio *folio)
{
- WARN_ON_ONCE(!rcu_read_lock_held());
- return NULL;
+ return PAGE_SHIFT + folio_order(folio);
+}
+
+/**
+ * folio_size - The number of bytes in a folio.
+ * @folio: The folio.
+ *
+ * Context: The caller should have a reference on the folio to prevent
+ * it from being split. It is not necessary for the folio to be locked.
+ * Return: The number of bytes in this folio.
+ */
+static inline size_t folio_size(struct folio *folio)
+{
+ return PAGE_SIZE << folio_order(folio);
+}
+
+/**
+ * folio_estimated_sharers - Estimate the number of sharers of a folio.
+ * @folio: The folio.
+ *
+ * folio_estimated_sharers() aims to serve as a function to efficiently
+ * estimate the number of processes sharing a folio. This is done by
+ * looking at the precise mapcount of the first subpage in the folio, and
+ * assuming the other subpages are the same. This may not be true for large
+ * folios. If you want exact mapcounts for exact calculations, look at
+ * page_mapcount() or folio_total_mapcount().
+ *
+ * Return: The estimated number of processes sharing a folio.
+ */
+static inline int folio_estimated_sharers(struct folio *folio)
+{
+ return page_mapcount(folio_page(folio, 0));
+}
+
+#ifndef HAVE_ARCH_MAKE_PAGE_ACCESSIBLE
+static inline int arch_make_page_accessible(struct page *page)
+{
+ return 0;
+}
+#endif
+
+#ifndef HAVE_ARCH_MAKE_FOLIO_ACCESSIBLE
+static inline int arch_make_folio_accessible(struct folio *folio)
+{
+ int ret;
+ long i, nr = folio_nr_pages(folio);
+
+ for (i = 0; i < nr; i++) {
+ ret = arch_make_page_accessible(folio_page(folio, i));
+ if (ret)
+ break;
+ }
+
+ return ret;
}
#endif
@@ -1064,21 +2137,12 @@ void page_address_init(void);
#define page_address_init() do { } while(0)
#endif
-extern void *page_rmapping(struct page *page);
-extern struct anon_vma *page_anon_vma(struct page *page);
-extern struct address_space *page_mapping(struct page *page);
-
-extern struct address_space *__page_file_mapping(struct page *);
-
-static inline
-struct address_space *page_file_mapping(struct page *page)
+static inline void *folio_address(const struct folio *folio)
{
- if (unlikely(PageSwapCache(page)))
- return __page_file_mapping(page);
-
- return page->mapping;
+ return page_address(&folio->page);
}
+extern void *page_rmapping(struct page *page);
extern pgoff_t __page_file_index(struct page *page);
/*
@@ -1092,21 +2156,34 @@ static inline pgoff_t page_index(struct page *page)
return page->index;
}
-bool page_mapped(struct page *page);
-struct address_space *page_mapping(struct page *page);
-
/*
* Return true only if the page has been allocated with
* ALLOC_NO_WATERMARKS and the low watermark was not
* met implying that the system is under some pressure.
*/
-static inline bool page_is_pfmemalloc(struct page *page)
+static inline bool page_is_pfmemalloc(const struct page *page)
+{
+ /*
+ * lru.next has bit 1 set if the page is allocated from the
+ * pfmemalloc reserves. Callers may simply overwrite it if
+ * they do not need to preserve that information.
+ */
+ return (uintptr_t)page->lru.next & BIT(1);
+}
+
+/*
+ * Return true only if the folio has been allocated with
+ * ALLOC_NO_WATERMARKS and the low watermark was not
+ * met implying that the system is under some pressure.
+ */
+static inline bool folio_is_pfmemalloc(const struct folio *folio)
{
/*
- * Page index cannot be this large so this must be
- * a pfmemalloc page.
+ * lru.next has bit 1 set if the page is allocated from the
+ * pfmemalloc reserves. Callers may simply overwrite it if
+ * they do not need to preserve that information.
*/
- return page->index == -1UL;
+ return (uintptr_t)folio->lru.next & BIT(1);
}
/*
@@ -1115,64 +2192,22 @@ static inline bool page_is_pfmemalloc(struct page *page)
*/
static inline void set_page_pfmemalloc(struct page *page)
{
- page->index = -1UL;
+ page->lru.next = (void *)BIT(1);
}
static inline void clear_page_pfmemalloc(struct page *page)
{
- page->index = 0;
+ page->lru.next = NULL;
}
/*
- * Different kinds of faults, as returned by handle_mm_fault().
- * Used to decide whether a process gets delivered SIGBUS or
- * just gets major/minor fault counters bumped up.
- */
-
-#define VM_FAULT_OOM 0x0001
-#define VM_FAULT_SIGBUS 0x0002
-#define VM_FAULT_MAJOR 0x0004
-#define VM_FAULT_WRITE 0x0008 /* Special case for get_user_pages */
-#define VM_FAULT_HWPOISON 0x0010 /* Hit poisoned small page */
-#define VM_FAULT_HWPOISON_LARGE 0x0020 /* Hit poisoned large page. Index encoded in upper bits */
-#define VM_FAULT_SIGSEGV 0x0040
-
-#define VM_FAULT_NOPAGE 0x0100 /* ->fault installed the pte, not return page */
-#define VM_FAULT_LOCKED 0x0200 /* ->fault locked the returned page */
-#define VM_FAULT_RETRY 0x0400 /* ->fault blocked, must retry */
-#define VM_FAULT_FALLBACK 0x0800 /* huge page fault failed, fall back to small */
-#define VM_FAULT_DONE_COW 0x1000 /* ->fault has fully handled COW */
-
-#define VM_FAULT_HWPOISON_LARGE_MASK 0xf000 /* encodes hpage index for large hwpoison */
-
-#define VM_FAULT_ERROR (VM_FAULT_OOM | VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV | \
- VM_FAULT_HWPOISON | VM_FAULT_HWPOISON_LARGE | \
- VM_FAULT_FALLBACK)
-
-#define VM_FAULT_RESULT_TRACE \
- { VM_FAULT_OOM, "OOM" }, \
- { VM_FAULT_SIGBUS, "SIGBUS" }, \
- { VM_FAULT_MAJOR, "MAJOR" }, \
- { VM_FAULT_WRITE, "WRITE" }, \
- { VM_FAULT_HWPOISON, "HWPOISON" }, \
- { VM_FAULT_HWPOISON_LARGE, "HWPOISON_LARGE" }, \
- { VM_FAULT_SIGSEGV, "SIGSEGV" }, \
- { VM_FAULT_NOPAGE, "NOPAGE" }, \
- { VM_FAULT_LOCKED, "LOCKED" }, \
- { VM_FAULT_RETRY, "RETRY" }, \
- { VM_FAULT_FALLBACK, "FALLBACK" }, \
- { VM_FAULT_DONE_COW, "DONE_COW" }
-
-/* Encode hstate index for a hwpoisoned large page */
-#define VM_FAULT_SET_HINDEX(x) ((x) << 12)
-#define VM_FAULT_GET_HINDEX(x) (((x) >> 12) & 0xf)
-
-/*
* Can be called by the pagefault handler when it gets a VM_FAULT_OOM.
*/
extern void pagefault_out_of_memory(void);
#define offset_in_page(p) ((unsigned long)(p) & ~PAGE_MASK)
+#define offset_in_thp(page, p) ((unsigned long)(p) & (thp_size(page) - 1))
+#define offset_in_folio(folio, p) ((unsigned long)(p) & (folio_size(folio) - 1))
/*
* Flags passed to show_mem() and show_free_areas() to suppress output in
@@ -1180,87 +2215,92 @@ extern void pagefault_out_of_memory(void);
*/
#define SHOW_MEM_FILTER_NODES (0x0001u) /* disallowed nodes */
-extern void show_free_areas(unsigned int flags, nodemask_t *nodemask);
-
-extern bool can_do_mlock(void);
-extern int user_shm_lock(size_t, struct user_struct *);
-extern void user_shm_unlock(size_t, struct user_struct *);
+extern void __show_free_areas(unsigned int flags, nodemask_t *nodemask, int max_zone_idx);
+static void __maybe_unused show_free_areas(unsigned int flags, nodemask_t *nodemask)
+{
+ __show_free_areas(flags, nodemask, MAX_NR_ZONES - 1);
+}
/*
* Parameter block passed down to zap_pte_range in exceptional cases.
*/
struct zap_details {
- struct address_space *check_mapping; /* Check page->mapping if set */
- pgoff_t first_index; /* Lowest page->index to unmap */
- pgoff_t last_index; /* Highest page->index to unmap */
+ struct folio *single_folio; /* Locked folio to be unmapped */
+ bool even_cows; /* Zap COWed private pages too? */
+ zap_flags_t zap_flags; /* Extra flags for zapping */
};
+/*
+ * Whether to drop the pte markers, for example, the uffd-wp information for
+ * file-backed memory. This should only be specified when we will completely
+ * drop the page in the mm, either by truncation or unmapping of the vma. By
+ * default, the flag is not set.
+ */
+#define ZAP_FLAG_DROP_MARKER ((__force zap_flags_t) BIT(0))
+/* Set in unmap_vmas() to indicate a final unmap call. Only used by hugetlb */
+#define ZAP_FLAG_UNMAP ((__force zap_flags_t) BIT(1))
+
+#ifdef CONFIG_SCHED_MM_CID
+void sched_mm_cid_before_execve(struct task_struct *t);
+void sched_mm_cid_after_execve(struct task_struct *t);
+void sched_mm_cid_fork(struct task_struct *t);
+void sched_mm_cid_exit_signals(struct task_struct *t);
+static inline int task_mm_cid(struct task_struct *t)
+{
+ return t->mm_cid;
+}
+#else
+static inline void sched_mm_cid_before_execve(struct task_struct *t) { }
+static inline void sched_mm_cid_after_execve(struct task_struct *t) { }
+static inline void sched_mm_cid_fork(struct task_struct *t) { }
+static inline void sched_mm_cid_exit_signals(struct task_struct *t) { }
+static inline int task_mm_cid(struct task_struct *t)
+{
+ /*
+ * Use the processor id as a fall-back when the mm cid feature is
+ * disabled. This provides functional per-cpu data structure accesses
+ * in user-space, althrough it won't provide the memory usage benefits.
+ */
+ return raw_smp_processor_id();
+}
+#endif
+
+#ifdef CONFIG_MMU
+extern bool can_do_mlock(void);
+#else
+static inline bool can_do_mlock(void) { return false; }
+#endif
+extern int user_shm_lock(size_t, struct ucounts *);
+extern void user_shm_unlock(size_t, struct ucounts *);
+
+struct folio *vm_normal_folio(struct vm_area_struct *vma, unsigned long addr,
+ pte_t pte);
struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
- pte_t pte);
+ pte_t pte);
struct page *vm_normal_page_pmd(struct vm_area_struct *vma, unsigned long addr,
pmd_t pmd);
-int zap_vma_ptes(struct vm_area_struct *vma, unsigned long address,
- unsigned long size);
-void zap_page_range(struct vm_area_struct *vma, unsigned long address,
- unsigned long size);
-void unmap_vmas(struct mmu_gather *tlb, struct vm_area_struct *start_vma,
- unsigned long start, unsigned long end);
+void zap_vma_ptes(struct vm_area_struct *vma, unsigned long address,
+ unsigned long size);
+void zap_page_range_single(struct vm_area_struct *vma, unsigned long address,
+ unsigned long size, struct zap_details *details);
+static inline void zap_vma_pages(struct vm_area_struct *vma)
+{
+ zap_page_range_single(vma, vma->vm_start,
+ vma->vm_end - vma->vm_start, NULL);
+}
+void unmap_vmas(struct mmu_gather *tlb, struct maple_tree *mt,
+ struct vm_area_struct *start_vma, unsigned long start,
+ unsigned long end, bool mm_wr_locked);
-/**
- * mm_walk - callbacks for walk_page_range
- * @pud_entry: if set, called for each non-empty PUD (2nd-level) entry
- * this handler should only handle pud_trans_huge() puds.
- * the pmd_entry or pte_entry callbacks will be used for
- * regular PUDs.
- * @pmd_entry: if set, called for each non-empty PMD (3rd-level) entry
- * this handler is required to be able to handle
- * pmd_trans_huge() pmds. They may simply choose to
- * split_huge_page() instead of handling it explicitly.
- * @pte_entry: if set, called for each non-empty PTE (4th-level) entry
- * @pte_hole: if set, called for each hole at all levels
- * @hugetlb_entry: if set, called for each hugetlb entry
- * @test_walk: caller specific callback function to determine whether
- * we walk over the current vma or not. Returning 0
- * value means "do page table walk over the current vma,"
- * and a negative one means "abort current page table walk
- * right now." 1 means "skip the current vma."
- * @mm: mm_struct representing the target process of page table walk
- * @vma: vma currently walked (NULL if walking outside vmas)
- * @private: private data for callbacks' usage
- *
- * (see the comment on walk_page_range() for more details)
- */
-struct mm_walk {
- int (*pud_entry)(pud_t *pud, unsigned long addr,
- unsigned long next, struct mm_walk *walk);
- int (*pmd_entry)(pmd_t *pmd, unsigned long addr,
- unsigned long next, struct mm_walk *walk);
- int (*pte_entry)(pte_t *pte, unsigned long addr,
- unsigned long next, struct mm_walk *walk);
- int (*pte_hole)(unsigned long addr, unsigned long next,
- struct mm_walk *walk);
- int (*hugetlb_entry)(pte_t *pte, unsigned long hmask,
- unsigned long addr, unsigned long next,
- struct mm_walk *walk);
- int (*test_walk)(unsigned long addr, unsigned long next,
- struct mm_walk *walk);
- struct mm_struct *mm;
- struct vm_area_struct *vma;
- void *private;
-};
+struct mmu_notifier_range;
-int walk_page_range(unsigned long addr, unsigned long end,
- struct mm_walk *walk);
-int walk_page_vma(struct vm_area_struct *vma, struct mm_walk *walk);
void free_pgd_range(struct mmu_gather *tlb, unsigned long addr,
unsigned long end, unsigned long floor, unsigned long ceiling);
-int copy_page_range(struct mm_struct *dst, struct mm_struct *src,
- struct vm_area_struct *vma);
-void unmap_mapping_range(struct address_space *mapping,
- loff_t const holebegin, loff_t const holelen, int even_cows);
-int follow_pte_pmd(struct mm_struct *mm, unsigned long address,
- pte_t **ptepp, pmd_t **pmdpp, spinlock_t **ptlp);
+int
+copy_page_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma);
+int follow_pte(struct mm_struct *mm, unsigned long address,
+ pte_t **ptepp, spinlock_t **ptlp);
int follow_pfn(struct vm_area_struct *vma, unsigned long address,
unsigned long *pfn);
int follow_phys(struct vm_area_struct *vma, unsigned long address,
@@ -1268,195 +2308,183 @@ int follow_phys(struct vm_area_struct *vma, unsigned long address,
int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
void *buf, int len, int write);
-static inline void unmap_shared_mapping_range(struct address_space *mapping,
- loff_t const holebegin, loff_t const holelen)
-{
- unmap_mapping_range(mapping, holebegin, holelen, 0);
-}
-
extern void truncate_pagecache(struct inode *inode, loff_t new);
extern void truncate_setsize(struct inode *inode, loff_t newsize);
void pagecache_isize_extended(struct inode *inode, loff_t from, loff_t to);
void truncate_pagecache_range(struct inode *inode, loff_t offset, loff_t end);
-int truncate_inode_page(struct address_space *mapping, struct page *page);
int generic_error_remove_page(struct address_space *mapping, struct page *page);
-int invalidate_inode_page(struct page *page);
#ifdef CONFIG_MMU
-extern int handle_mm_fault(struct vm_area_struct *vma, unsigned long address,
- unsigned int flags);
-extern int fixup_user_fault(struct task_struct *tsk, struct mm_struct *mm,
+extern vm_fault_t handle_mm_fault(struct vm_area_struct *vma,
+ unsigned long address, unsigned int flags,
+ struct pt_regs *regs);
+extern int fixup_user_fault(struct mm_struct *mm,
unsigned long address, unsigned int fault_flags,
bool *unlocked);
+void unmap_mapping_pages(struct address_space *mapping,
+ pgoff_t start, pgoff_t nr, bool even_cows);
+void unmap_mapping_range(struct address_space *mapping,
+ loff_t const holebegin, loff_t const holelen, int even_cows);
#else
-static inline int handle_mm_fault(struct vm_area_struct *vma,
- unsigned long address, unsigned int flags)
+static inline vm_fault_t handle_mm_fault(struct vm_area_struct *vma,
+ unsigned long address, unsigned int flags,
+ struct pt_regs *regs)
{
/* should never happen if there's no MMU */
BUG();
return VM_FAULT_SIGBUS;
}
-static inline int fixup_user_fault(struct task_struct *tsk,
- struct mm_struct *mm, unsigned long address,
+static inline int fixup_user_fault(struct mm_struct *mm, unsigned long address,
unsigned int fault_flags, bool *unlocked)
{
/* should never happen if there's no MMU */
BUG();
return -EFAULT;
}
+static inline void unmap_mapping_pages(struct address_space *mapping,
+ pgoff_t start, pgoff_t nr, bool even_cows) { }
+static inline void unmap_mapping_range(struct address_space *mapping,
+ loff_t const holebegin, loff_t const holelen, int even_cows) { }
#endif
-extern int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len,
- unsigned int gup_flags);
+static inline void unmap_shared_mapping_range(struct address_space *mapping,
+ loff_t const holebegin, loff_t const holelen)
+{
+ unmap_mapping_range(mapping, holebegin, holelen, 0);
+}
+
+extern int access_process_vm(struct task_struct *tsk, unsigned long addr,
+ void *buf, int len, unsigned int gup_flags);
extern int access_remote_vm(struct mm_struct *mm, unsigned long addr,
void *buf, int len, unsigned int gup_flags);
-extern int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
- unsigned long addr, void *buf, int len, unsigned int gup_flags);
+extern int __access_remote_vm(struct mm_struct *mm, unsigned long addr,
+ void *buf, int len, unsigned int gup_flags);
-long get_user_pages_remote(struct task_struct *tsk, struct mm_struct *mm,
+long get_user_pages_remote(struct mm_struct *mm,
unsigned long start, unsigned long nr_pages,
unsigned int gup_flags, struct page **pages,
struct vm_area_struct **vmas, int *locked);
+long pin_user_pages_remote(struct mm_struct *mm,
+ unsigned long start, unsigned long nr_pages,
+ unsigned int gup_flags, struct page **pages,
+ struct vm_area_struct **vmas, int *locked);
long get_user_pages(unsigned long start, unsigned long nr_pages,
unsigned int gup_flags, struct page **pages,
struct vm_area_struct **vmas);
-long get_user_pages_locked(unsigned long start, unsigned long nr_pages,
- unsigned int gup_flags, struct page **pages, int *locked);
+long pin_user_pages(unsigned long start, unsigned long nr_pages,
+ unsigned int gup_flags, struct page **pages,
+ struct vm_area_struct **vmas);
long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages,
struct page **pages, unsigned int gup_flags);
-int get_user_pages_fast(unsigned long start, int nr_pages, int write,
- struct page **pages);
-
-/* Container for pinned pfns / pages */
-struct frame_vector {
- unsigned int nr_allocated; /* Number of frames we have space for */
- unsigned int nr_frames; /* Number of frames stored in ptrs array */
- bool got_ref; /* Did we pin pages by getting page ref? */
- bool is_pfns; /* Does array contain pages or pfns? */
- void *ptrs[0]; /* Array of pinned pfns / pages. Use
- * pfns_vector_pages() or pfns_vector_pfns()
- * for access */
-};
-
-struct frame_vector *frame_vector_create(unsigned int nr_frames);
-void frame_vector_destroy(struct frame_vector *vec);
-int get_vaddr_frames(unsigned long start, unsigned int nr_pfns,
- unsigned int gup_flags, struct frame_vector *vec);
-void put_vaddr_frames(struct frame_vector *vec);
-int frame_vector_to_pages(struct frame_vector *vec);
-void frame_vector_to_pfns(struct frame_vector *vec);
-
-static inline unsigned int frame_vector_count(struct frame_vector *vec)
-{
- return vec->nr_frames;
-}
+long pin_user_pages_unlocked(unsigned long start, unsigned long nr_pages,
+ struct page **pages, unsigned int gup_flags);
-static inline struct page **frame_vector_pages(struct frame_vector *vec)
-{
- if (vec->is_pfns) {
- int err = frame_vector_to_pages(vec);
+int get_user_pages_fast(unsigned long start, int nr_pages,
+ unsigned int gup_flags, struct page **pages);
+int pin_user_pages_fast(unsigned long start, int nr_pages,
+ unsigned int gup_flags, struct page **pages);
- if (err)
- return ERR_PTR(err);
- }
- return (struct page **)(vec->ptrs);
-}
-
-static inline unsigned long *frame_vector_pfns(struct frame_vector *vec)
-{
- if (!vec->is_pfns)
- frame_vector_to_pfns(vec);
- return (unsigned long *)(vec->ptrs);
-}
+int account_locked_vm(struct mm_struct *mm, unsigned long pages, bool inc);
+int __account_locked_vm(struct mm_struct *mm, unsigned long pages, bool inc,
+ struct task_struct *task, bool bypass_rlim);
struct kvec;
-int get_kernel_pages(const struct kvec *iov, int nr_pages, int write,
- struct page **pages);
-int get_kernel_page(unsigned long start, int write, struct page **pages);
struct page *get_dump_page(unsigned long addr);
-extern int try_to_release_page(struct page * page, gfp_t gfp_mask);
-extern void do_invalidatepage(struct page *page, unsigned int offset,
- unsigned int length);
-
-int __set_page_dirty_nobuffers(struct page *page);
-int __set_page_dirty_no_writeback(struct page *page);
-int redirty_page_for_writepage(struct writeback_control *wbc,
- struct page *page);
-void account_page_dirtied(struct page *page, struct address_space *mapping);
-void account_page_cleaned(struct page *page, struct address_space *mapping,
- struct bdi_writeback *wb);
-int set_page_dirty(struct page *page);
+bool folio_mark_dirty(struct folio *folio);
+bool set_page_dirty(struct page *page);
int set_page_dirty_lock(struct page *page);
-void cancel_dirty_page(struct page *page);
-int clear_page_dirty_for_io(struct page *page);
int get_cmdline(struct task_struct *task, char *buffer, int buflen);
-static inline bool vma_is_anonymous(struct vm_area_struct *vma)
-{
- return !vma->vm_ops;
-}
+extern unsigned long move_page_tables(struct vm_area_struct *vma,
+ unsigned long old_addr, struct vm_area_struct *new_vma,
+ unsigned long new_addr, unsigned long len,
+ bool need_rmap_locks);
-#ifdef CONFIG_SHMEM
/*
- * The vma_is_shmem is not inline because it is used only by slow
- * paths in userfault.
+ * Flags used by change_protection(). For now we make it a bitmap so
+ * that we can pass in multiple flags just like parameters. However
+ * for now all the callers are only use one of the flags at the same
+ * time.
*/
-bool vma_is_shmem(struct vm_area_struct *vma);
-#else
-static inline bool vma_is_shmem(struct vm_area_struct *vma) { return false; }
-#endif
+/*
+ * Whether we should manually check if we can map individual PTEs writable,
+ * because something (e.g., COW, uffd-wp) blocks that from happening for all
+ * PTEs automatically in a writable mapping.
+ */
+#define MM_CP_TRY_CHANGE_WRITABLE (1UL << 0)
+/* Whether this protection change is for NUMA hints */
+#define MM_CP_PROT_NUMA (1UL << 1)
+/* Whether this change is for write protecting */
+#define MM_CP_UFFD_WP (1UL << 2) /* do wp */
+#define MM_CP_UFFD_WP_RESOLVE (1UL << 3) /* Resolve wp */
+#define MM_CP_UFFD_WP_ALL (MM_CP_UFFD_WP | \
+ MM_CP_UFFD_WP_RESOLVE)
-int vma_is_stack_for_current(struct vm_area_struct *vma);
+int vma_wants_writenotify(struct vm_area_struct *vma, pgprot_t vm_page_prot);
+static inline bool vma_wants_manual_pte_write_upgrade(struct vm_area_struct *vma)
+{
+ /*
+ * We want to check manually if we can change individual PTEs writable
+ * if we can't do that automatically for all PTEs in a mapping. For
+ * private mappings, that's always the case when we have write
+ * permissions as we properly have to handle COW.
+ */
+ if (vma->vm_flags & VM_SHARED)
+ return vma_wants_writenotify(vma, vma->vm_page_prot);
+ return !!(vma->vm_flags & VM_WRITE);
-extern unsigned long move_page_tables(struct vm_area_struct *vma,
- unsigned long old_addr, struct vm_area_struct *new_vma,
- unsigned long new_addr, unsigned long len,
- bool need_rmap_locks);
-extern unsigned long change_protection(struct vm_area_struct *vma, unsigned long start,
- unsigned long end, pgprot_t newprot,
- int dirty_accountable, int prot_numa);
-extern int mprotect_fixup(struct vm_area_struct *vma,
- struct vm_area_struct **pprev, unsigned long start,
- unsigned long end, unsigned long newflags);
+}
+bool can_change_pte_writable(struct vm_area_struct *vma, unsigned long addr,
+ pte_t pte);
+extern long change_protection(struct mmu_gather *tlb,
+ struct vm_area_struct *vma, unsigned long start,
+ unsigned long end, unsigned long cp_flags);
+extern int mprotect_fixup(struct vma_iterator *vmi, struct mmu_gather *tlb,
+ struct vm_area_struct *vma, struct vm_area_struct **pprev,
+ unsigned long start, unsigned long end, unsigned long newflags);
/*
* doesn't attempt to fault and will return short.
*/
-int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
- struct page **pages);
+int get_user_pages_fast_only(unsigned long start, int nr_pages,
+ unsigned int gup_flags, struct page **pages);
+
+static inline bool get_user_page_fast_only(unsigned long addr,
+ unsigned int gup_flags, struct page **pagep)
+{
+ return get_user_pages_fast_only(addr, 1, gup_flags, pagep) == 1;
+}
/*
* per-process(per-mm_struct) statistics.
*/
static inline unsigned long get_mm_counter(struct mm_struct *mm, int member)
{
- long val = atomic_long_read(&mm->rss_stat.count[member]);
-
-#ifdef SPLIT_RSS_COUNTING
- /*
- * counter is updated in asynchronous manner and may go to minus.
- * But it's never be expected number for users.
- */
- if (val < 0)
- val = 0;
-#endif
- return (unsigned long)val;
+ return percpu_counter_read_positive(&mm->rss_stat[member]);
}
+void mm_trace_rss_stat(struct mm_struct *mm, int member);
+
static inline void add_mm_counter(struct mm_struct *mm, int member, long value)
{
- atomic_long_add(value, &mm->rss_stat.count[member]);
+ percpu_counter_add(&mm->rss_stat[member], value);
+
+ mm_trace_rss_stat(mm, member);
}
static inline void inc_mm_counter(struct mm_struct *mm, int member)
{
- atomic_long_inc(&mm->rss_stat.count[member]);
+ percpu_counter_inc(&mm->rss_stat[member]);
+
+ mm_trace_rss_stat(mm, member);
}
static inline void dec_mm_counter(struct mm_struct *mm, int member)
{
- atomic_long_dec(&mm->rss_stat.count[member]);
+ percpu_counter_dec(&mm->rss_stat[member]);
+
+ mm_trace_rss_stat(mm, member);
}
/* Optimized variant when page is already known not to be PageAnon */
@@ -1527,14 +2555,24 @@ static inline void sync_mm_rss(struct mm_struct *mm)
}
#endif
-#ifndef __HAVE_ARCH_PTE_DEVMAP
-static inline int pte_devmap(pte_t pte)
+#ifndef CONFIG_ARCH_HAS_PTE_SPECIAL
+static inline int pte_special(pte_t pte)
{
return 0;
}
+
+static inline pte_t pte_mkspecial(pte_t pte)
+{
+ return pte;
+}
#endif
-int vma_wants_writenotify(struct vm_area_struct *vma, pgprot_t vm_page_prot);
+#ifndef CONFIG_ARCH_HAS_PTE_DEVMAP
+static inline int pte_devmap(pte_t pte)
+{
+ return 0;
+}
+#endif
extern pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr,
spinlock_t **ptl);
@@ -1556,26 +2594,36 @@ static inline int __p4d_alloc(struct mm_struct *mm, pgd_t *pgd,
int __p4d_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address);
#endif
-#ifdef __PAGETABLE_PUD_FOLDED
+#if defined(__PAGETABLE_PUD_FOLDED) || !defined(CONFIG_MMU)
static inline int __pud_alloc(struct mm_struct *mm, p4d_t *p4d,
unsigned long address)
{
return 0;
}
+static inline void mm_inc_nr_puds(struct mm_struct *mm) {}
+static inline void mm_dec_nr_puds(struct mm_struct *mm) {}
+
#else
int __pud_alloc(struct mm_struct *mm, p4d_t *p4d, unsigned long address);
-#endif
-#if defined(__PAGETABLE_PMD_FOLDED) || !defined(CONFIG_MMU)
-static inline int __pmd_alloc(struct mm_struct *mm, pud_t *pud,
- unsigned long address)
+static inline void mm_inc_nr_puds(struct mm_struct *mm)
{
- return 0;
+ if (mm_pud_folded(mm))
+ return;
+ atomic_long_add(PTRS_PER_PUD * sizeof(pud_t), &mm->pgtables_bytes);
}
-static inline void mm_nr_pmds_init(struct mm_struct *mm) {}
+static inline void mm_dec_nr_puds(struct mm_struct *mm)
+{
+ if (mm_pud_folded(mm))
+ return;
+ atomic_long_sub(PTRS_PER_PUD * sizeof(pud_t), &mm->pgtables_bytes);
+}
+#endif
-static inline unsigned long mm_nr_pmds(struct mm_struct *mm)
+#if defined(__PAGETABLE_PMD_FOLDED) || !defined(CONFIG_MMU)
+static inline int __pmd_alloc(struct mm_struct *mm, pud_t *pud,
+ unsigned long address)
{
return 0;
}
@@ -1586,37 +2634,58 @@ static inline void mm_dec_nr_pmds(struct mm_struct *mm) {}
#else
int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address);
-static inline void mm_nr_pmds_init(struct mm_struct *mm)
+static inline void mm_inc_nr_pmds(struct mm_struct *mm)
{
- atomic_long_set(&mm->nr_pmds, 0);
+ if (mm_pmd_folded(mm))
+ return;
+ atomic_long_add(PTRS_PER_PMD * sizeof(pmd_t), &mm->pgtables_bytes);
}
-static inline unsigned long mm_nr_pmds(struct mm_struct *mm)
+static inline void mm_dec_nr_pmds(struct mm_struct *mm)
{
- return atomic_long_read(&mm->nr_pmds);
+ if (mm_pmd_folded(mm))
+ return;
+ atomic_long_sub(PTRS_PER_PMD * sizeof(pmd_t), &mm->pgtables_bytes);
}
+#endif
-static inline void mm_inc_nr_pmds(struct mm_struct *mm)
+#ifdef CONFIG_MMU
+static inline void mm_pgtables_bytes_init(struct mm_struct *mm)
{
- atomic_long_inc(&mm->nr_pmds);
+ atomic_long_set(&mm->pgtables_bytes, 0);
}
-static inline void mm_dec_nr_pmds(struct mm_struct *mm)
+static inline unsigned long mm_pgtables_bytes(const struct mm_struct *mm)
+{
+ return atomic_long_read(&mm->pgtables_bytes);
+}
+
+static inline void mm_inc_nr_ptes(struct mm_struct *mm)
+{
+ atomic_long_add(PTRS_PER_PTE * sizeof(pte_t), &mm->pgtables_bytes);
+}
+
+static inline void mm_dec_nr_ptes(struct mm_struct *mm)
{
- atomic_long_dec(&mm->nr_pmds);
+ atomic_long_sub(PTRS_PER_PTE * sizeof(pte_t), &mm->pgtables_bytes);
}
+#else
+
+static inline void mm_pgtables_bytes_init(struct mm_struct *mm) {}
+static inline unsigned long mm_pgtables_bytes(const struct mm_struct *mm)
+{
+ return 0;
+}
+
+static inline void mm_inc_nr_ptes(struct mm_struct *mm) {}
+static inline void mm_dec_nr_ptes(struct mm_struct *mm) {}
#endif
-int __pte_alloc(struct mm_struct *mm, pmd_t *pmd, unsigned long address);
-int __pte_alloc_kernel(pmd_t *pmd, unsigned long address);
+int __pte_alloc(struct mm_struct *mm, pmd_t *pmd);
+int __pte_alloc_kernel(pmd_t *pmd);
-/*
- * The following ifdef needed to get the 4level-fixup.h header to work.
- * Remove it when 4level-fixup.h has been removed.
- */
-#if defined(CONFIG_MMU) && !defined(__ARCH_HAS_4LEVEL_HACK)
+#if defined(CONFIG_MMU)
-#ifndef __ARCH_HAS_5LEVEL_HACK
static inline p4d_t *p4d_alloc(struct mm_struct *mm, pgd_t *pgd,
unsigned long address)
{
@@ -1630,14 +2699,13 @@ static inline pud_t *pud_alloc(struct mm_struct *mm, p4d_t *p4d,
return (unlikely(p4d_none(*p4d)) && __pud_alloc(mm, p4d, address)) ?
NULL : pud_offset(p4d, address);
}
-#endif /* !__ARCH_HAS_5LEVEL_HACK */
static inline pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
{
return (unlikely(pud_none(*pud)) && __pmd_alloc(mm, pud, address))?
NULL: pmd_offset(pud, address);
}
-#endif /* CONFIG_MMU && !__ARCH_HAS_4LEVEL_HACK */
+#endif /* CONFIG_MMU */
#if USE_SPLIT_PTE_PTLOCKS
#if ALLOC_SPLIT_PTLOCKS
@@ -1690,13 +2758,6 @@ static inline bool ptlock_init(struct page *page)
return true;
}
-/* Reset page->mapping so free_pages_check won't complain. */
-static inline void pte_lock_deinit(struct page *page)
-{
- page->mapping = NULL;
- ptlock_free(page);
-}
-
#else /* !USE_SPLIT_PTE_PTLOCKS */
/*
* We use mm->page_table_lock to guard all pagetable pages of the mm.
@@ -1707,27 +2768,23 @@ static inline spinlock_t *pte_lockptr(struct mm_struct *mm, pmd_t *pmd)
}
static inline void ptlock_cache_init(void) {}
static inline bool ptlock_init(struct page *page) { return true; }
-static inline void pte_lock_deinit(struct page *page) {}
+static inline void ptlock_free(struct page *page) {}
#endif /* USE_SPLIT_PTE_PTLOCKS */
-static inline void pgtable_init(void)
-{
- ptlock_cache_init();
- pgtable_cache_init();
-}
-
-static inline bool pgtable_page_ctor(struct page *page)
+static inline bool pgtable_pte_page_ctor(struct page *page)
{
if (!ptlock_init(page))
return false;
- inc_zone_page_state(page, NR_PAGETABLE);
+ __SetPageTable(page);
+ inc_lruvec_page_state(page, NR_PAGETABLE);
return true;
}
-static inline void pgtable_page_dtor(struct page *page)
+static inline void pgtable_pte_page_dtor(struct page *page)
{
- pte_lock_deinit(page);
- dec_zone_page_state(page, NR_PAGETABLE);
+ ptlock_free(page);
+ __ClearPageTable(page);
+ dec_lruvec_page_state(page, NR_PAGETABLE);
}
#define pte_offset_map_lock(mm, pmd, address, ptlp) \
@@ -1744,23 +2801,22 @@ static inline void pgtable_page_dtor(struct page *page)
pte_unmap(pte); \
} while (0)
-#define pte_alloc(mm, pmd, address) \
- (unlikely(pmd_none(*(pmd))) && __pte_alloc(mm, pmd, address))
+#define pte_alloc(mm, pmd) (unlikely(pmd_none(*(pmd))) && __pte_alloc(mm, pmd))
#define pte_alloc_map(mm, pmd, address) \
- (pte_alloc(mm, pmd, address) ? NULL : pte_offset_map(pmd, address))
+ (pte_alloc(mm, pmd) ? NULL : pte_offset_map(pmd, address))
#define pte_alloc_map_lock(mm, pmd, address, ptlp) \
- (pte_alloc(mm, pmd, address) ? \
+ (pte_alloc(mm, pmd) ? \
NULL : pte_offset_map_lock(mm, pmd, address, ptlp))
#define pte_alloc_kernel(pmd, address) \
- ((unlikely(pmd_none(*(pmd))) && __pte_alloc_kernel(pmd, address))? \
+ ((unlikely(pmd_none(*(pmd))) && __pte_alloc_kernel(pmd))? \
NULL: pte_offset_kernel(pmd, address))
#if USE_SPLIT_PMD_PTLOCKS
-static struct page *pmd_to_page(pmd_t *pmd)
+static inline struct page *pmd_pgtable_page(pmd_t *pmd)
{
unsigned long mask = ~(PTRS_PER_PMD * sizeof(pmd_t) - 1);
return virt_to_page((void *)((unsigned long) pmd & mask));
@@ -1768,10 +2824,10 @@ static struct page *pmd_to_page(pmd_t *pmd)
static inline spinlock_t *pmd_lockptr(struct mm_struct *mm, pmd_t *pmd)
{
- return ptlock_ptr(pmd_to_page(pmd));
+ return ptlock_ptr(pmd_pgtable_page(pmd));
}
-static inline bool pgtable_pmd_page_ctor(struct page *page)
+static inline bool pmd_ptlock_init(struct page *page)
{
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
page->pmd_huge_pte = NULL;
@@ -1779,7 +2835,7 @@ static inline bool pgtable_pmd_page_ctor(struct page *page)
return ptlock_init(page);
}
-static inline void pgtable_pmd_page_dtor(struct page *page)
+static inline void pmd_ptlock_free(struct page *page)
{
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
VM_BUG_ON_PAGE(page->pmd_huge_pte, page);
@@ -1787,7 +2843,7 @@ static inline void pgtable_pmd_page_dtor(struct page *page)
ptlock_free(page);
}
-#define pmd_huge_pte(mm, pmd) (pmd_to_page(pmd)->pmd_huge_pte)
+#define pmd_huge_pte(mm, pmd) (pmd_pgtable_page(pmd)->pmd_huge_pte)
#else
@@ -1796,8 +2852,8 @@ static inline spinlock_t *pmd_lockptr(struct mm_struct *mm, pmd_t *pmd)
return &mm->page_table_lock;
}
-static inline bool pgtable_pmd_page_ctor(struct page *page) { return true; }
-static inline void pgtable_pmd_page_dtor(struct page *page) {}
+static inline bool pmd_ptlock_init(struct page *page) { return true; }
+static inline void pmd_ptlock_free(struct page *page) {}
#define pmd_huge_pte(mm, pmd) ((mm)->pmd_huge_pte)
@@ -1810,6 +2866,22 @@ static inline spinlock_t *pmd_lock(struct mm_struct *mm, pmd_t *pmd)
return ptl;
}
+static inline bool pgtable_pmd_page_ctor(struct page *page)
+{
+ if (!pmd_ptlock_init(page))
+ return false;
+ __SetPageTable(page);
+ inc_lruvec_page_state(page, NR_PAGETABLE);
+ return true;
+}
+
+static inline void pgtable_pmd_page_dtor(struct page *page)
+{
+ pmd_ptlock_free(page);
+ __ClearPageTable(page);
+ dec_lruvec_page_state(page, NR_PAGETABLE);
+}
+
/*
* No scalability reason to split PUD locks yet, but follow the same pattern
* as the PMD locks to make it easier if we decide to. The VM should not be
@@ -1830,9 +2902,6 @@ static inline spinlock_t *pud_lock(struct mm_struct *mm, pud_t *pud)
}
extern void __init pagecache_init(void);
-extern void free_area_init(unsigned long * zones_size);
-extern void free_area_init_node(int nid, unsigned long * zones_size,
- unsigned long zone_start_pfn, unsigned long *zholes_size);
extern void free_initmem(void);
/*
@@ -1842,34 +2911,21 @@ extern void free_initmem(void);
* Return pages freed into the buddy system.
*/
extern unsigned long free_reserved_area(void *start, void *end,
- int poison, char *s);
-
-#ifdef CONFIG_HIGHMEM
-/*
- * Free a highmem page into the buddy system, adjusting totalhigh_pages
- * and totalram_pages.
- */
-extern void free_highmem_page(struct page *page);
-#endif
+ int poison, const char *s);
extern void adjust_managed_page_count(struct page *page, long count);
-extern void mem_init_print_info(const char *str);
extern void reserve_bootmem_region(phys_addr_t start, phys_addr_t end);
/* Free the reserved page into the buddy system, so it gets managed. */
-static inline void __free_reserved_page(struct page *page)
+static inline void free_reserved_page(struct page *page)
{
ClearPageReserved(page);
init_page_count(page);
__free_page(page);
-}
-
-static inline void free_reserved_page(struct page *page)
-{
- __free_reserved_page(page);
adjust_managed_page_count(page, 1);
}
+#define free_highmem_page(page) free_reserved_page(page)
static inline void mark_page_reserved(struct page *page)
{
@@ -1888,7 +2944,7 @@ static inline unsigned long free_initmem_default(int poison)
extern char __init_begin[], __init_end[];
return free_reserved_area(&__init_begin, &__init_end,
- poison, "unused kernel");
+ poison, "unused kernel image (initmem)");
}
static inline unsigned long get_num_physpages(void)
@@ -1902,34 +2958,23 @@ static inline unsigned long get_num_physpages(void)
return phys_pages;
}
-#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
/*
- * With CONFIG_HAVE_MEMBLOCK_NODE_MAP set, an architecture may initialise its
- * zones, allocate the backing mem_map and account for memory holes in a more
- * architecture independent manner. This is a substitute for creating the
- * zone_sizes[] and zholes_size[] arrays and passing them to
- * free_area_init_node()
+ * Using memblock node mappings, an architecture may initialise its
+ * zones, allocate the backing mem_map and account for memory holes in an
+ * architecture independent manner.
*
* An architecture is expected to register range of page frames backed by
* physical memory with memblock_add[_node]() before calling
- * free_area_init_nodes() passing in the PFN each zone ends at. At a basic
+ * free_area_init() passing in the PFN each zone ends at. At a basic
* usage, an architecture is expected to do something like
*
* unsigned long max_zone_pfns[MAX_NR_ZONES] = {max_dma, max_normal_pfn,
* max_highmem_pfn};
* for_each_valid_physical_page_range()
- * memblock_add_node(base, size, nid)
- * free_area_init_nodes(max_zone_pfns);
- *
- * free_bootmem_with_active_regions() calls free_bootmem_node() for each
- * registered physical page range. Similarly
- * sparse_memory_present_with_active_regions() calls memory_present() for
- * each range when SPARSEMEM is enabled.
- *
- * See mm/page_alloc.c for more information on each function exposed by
- * CONFIG_HAVE_MEMBLOCK_NODE_MAP.
+ * memblock_add_node(base, size, nid, MEMBLOCK_NONE)
+ * free_area_init(max_zone_pfns);
*/
-extern void free_area_init_nodes(unsigned long *max_zone_pfn);
+void free_area_init(unsigned long *max_zone_pfn);
unsigned long node_map_pfn_alignment(void);
unsigned long __absent_pages_in_range(int nid, unsigned long start_pfn,
unsigned long end_pfn);
@@ -1937,36 +2982,32 @@ extern unsigned long absent_pages_in_range(unsigned long start_pfn,
unsigned long end_pfn);
extern void get_pfn_range_for_nid(unsigned int nid,
unsigned long *start_pfn, unsigned long *end_pfn);
-extern unsigned long find_min_pfn_with_active_regions(void);
-extern void free_bootmem_with_active_regions(int nid,
- unsigned long max_low_pfn);
-extern void sparse_memory_present_with_active_regions(int nid);
-
-#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
-#if !defined(CONFIG_HAVE_MEMBLOCK_NODE_MAP) && \
- !defined(CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID)
-static inline int __early_pfn_to_nid(unsigned long pfn,
- struct mminit_pfnnid_cache *state)
+#ifndef CONFIG_NUMA
+static inline int early_pfn_to_nid(unsigned long pfn)
{
return 0;
}
#else
/* please see mm/page_alloc.c */
extern int __meminit early_pfn_to_nid(unsigned long pfn);
-/* there is a per-arch backend function. */
-extern int __meminit __early_pfn_to_nid(unsigned long pfn,
- struct mminit_pfnnid_cache *state);
#endif
extern void set_dma_reserve(unsigned long new_dma_reserve);
-extern void memmap_init_zone(unsigned long, int, unsigned long,
- unsigned long, enum memmap_context);
+extern void memmap_init_range(unsigned long, int, unsigned long,
+ unsigned long, unsigned long, enum meminit_context,
+ struct vmem_altmap *, int migratetype);
extern void setup_per_zone_wmarks(void);
+extern void calculate_min_free_kbytes(void);
extern int __meminit init_per_zone_wmark_min(void);
extern void mem_init(void);
extern void __init mmap_init(void);
-extern void show_mem(unsigned int flags, nodemask_t *nodemask);
+
+extern void __show_mem(unsigned int flags, nodemask_t *nodemask, int max_zone_idx);
+static inline void show_mem(unsigned int flags, nodemask_t *nodemask)
+{
+ __show_mem(flags, nodemask, MAX_NR_ZONES - 1);
+}
extern long si_mem_available(void);
extern void si_meminfo(struct sysinfo * val);
extern void si_meminfo_node(struct sysinfo *val, int nid);
@@ -1979,11 +3020,9 @@ void warn_alloc(gfp_t gfp_mask, nodemask_t *nodemask, const char *fmt, ...);
extern void setup_per_cpu_pageset(void);
-extern void zone_pcp_update(struct zone *zone);
-extern void zone_pcp_reset(struct zone *zone);
-
/* page_alloc.c */
extern int min_free_kbytes;
+extern int watermark_boost_factor;
extern int watermark_scale_factor;
/* nommu.c */
@@ -1992,13 +3031,13 @@ extern int nommu_shrink_inode_mappings(struct inode *, size_t, size_t);
/* interval_tree.c */
void vma_interval_tree_insert(struct vm_area_struct *node,
- struct rb_root *root);
+ struct rb_root_cached *root);
void vma_interval_tree_insert_after(struct vm_area_struct *node,
struct vm_area_struct *prev,
- struct rb_root *root);
+ struct rb_root_cached *root);
void vma_interval_tree_remove(struct vm_area_struct *node,
- struct rb_root *root);
-struct vm_area_struct *vma_interval_tree_iter_first(struct rb_root *root,
+ struct rb_root_cached *root);
+struct vm_area_struct *vma_interval_tree_iter_first(struct rb_root_cached *root,
unsigned long start, unsigned long last);
struct vm_area_struct *vma_interval_tree_iter_next(struct vm_area_struct *node,
unsigned long start, unsigned long last);
@@ -2008,11 +3047,12 @@ struct vm_area_struct *vma_interval_tree_iter_next(struct vm_area_struct *node,
vma; vma = vma_interval_tree_iter_next(vma, start, last))
void anon_vma_interval_tree_insert(struct anon_vma_chain *node,
- struct rb_root *root);
+ struct rb_root_cached *root);
void anon_vma_interval_tree_remove(struct anon_vma_chain *node,
- struct rb_root *root);
-struct anon_vma_chain *anon_vma_interval_tree_iter_first(
- struct rb_root *root, unsigned long start, unsigned long last);
+ struct rb_root_cached *root);
+struct anon_vma_chain *
+anon_vma_interval_tree_iter_first(struct rb_root_cached *root,
+ unsigned long start, unsigned long last);
struct anon_vma_chain *anon_vma_interval_tree_iter_next(
struct anon_vma_chain *node, unsigned long start, unsigned long last);
#ifdef CONFIG_DEBUG_VM_RB
@@ -2025,26 +3065,22 @@ void anon_vma_interval_tree_verify(struct anon_vma_chain *node);
/* mmap.c */
extern int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin);
-extern int __vma_adjust(struct vm_area_struct *vma, unsigned long start,
- unsigned long end, pgoff_t pgoff, struct vm_area_struct *insert,
- struct vm_area_struct *expand);
-static inline int vma_adjust(struct vm_area_struct *vma, unsigned long start,
- unsigned long end, pgoff_t pgoff, struct vm_area_struct *insert)
-{
- return __vma_adjust(vma, start, end, pgoff, insert, NULL);
-}
-extern struct vm_area_struct *vma_merge(struct mm_struct *,
- struct vm_area_struct *prev, unsigned long addr, unsigned long end,
- unsigned long vm_flags, struct anon_vma *, struct file *, pgoff_t,
- struct mempolicy *, struct vm_userfaultfd_ctx);
+extern int vma_expand(struct vma_iterator *vmi, struct vm_area_struct *vma,
+ unsigned long start, unsigned long end, pgoff_t pgoff,
+ struct vm_area_struct *next);
+extern int vma_shrink(struct vma_iterator *vmi, struct vm_area_struct *vma,
+ unsigned long start, unsigned long end, pgoff_t pgoff);
+extern struct vm_area_struct *vma_merge(struct vma_iterator *vmi,
+ struct mm_struct *, struct vm_area_struct *prev, unsigned long addr,
+ unsigned long end, unsigned long vm_flags, struct anon_vma *,
+ struct file *, pgoff_t, struct mempolicy *, struct vm_userfaultfd_ctx,
+ struct anon_vma_name *);
extern struct anon_vma *find_mergeable_anon_vma(struct vm_area_struct *);
-extern int __split_vma(struct mm_struct *, struct vm_area_struct *,
- unsigned long addr, int new_below);
-extern int split_vma(struct mm_struct *, struct vm_area_struct *,
- unsigned long addr, int new_below);
+extern int __split_vma(struct vma_iterator *vmi, struct vm_area_struct *,
+ unsigned long addr, int new_below);
+extern int split_vma(struct vma_iterator *vmi, struct vm_area_struct *,
+ unsigned long addr, int new_below);
extern int insert_vm_struct(struct mm_struct *, struct vm_area_struct *);
-extern void __vma_link_rb(struct mm_struct *, struct vm_area_struct *,
- struct rb_node **, struct rb_node *);
extern void unlink_file_vma(struct vm_area_struct *);
extern struct vm_area_struct *copy_vma(struct vm_area_struct **,
unsigned long addr, unsigned long len, pgoff_t pgoff,
@@ -2068,7 +3104,8 @@ static inline int check_data_rlimit(unsigned long rlim,
extern int mm_take_all_locks(struct mm_struct *mm);
extern void mm_drop_all_locks(struct mm_struct *mm);
-extern void set_mm_exe_file(struct mm_struct *mm, struct file *new_exe_file);
+extern int set_mm_exe_file(struct mm_struct *mm, struct file *new_exe_file);
+extern int replace_mm_exe_file(struct mm_struct *mm, struct file *new_exe_file);
extern struct file *get_mm_exe_file(struct mm_struct *mm);
extern struct file *get_task_exe_file(struct task_struct *task);
@@ -2086,6 +3123,9 @@ extern int install_special_mapping(struct mm_struct *mm,
unsigned long addr, unsigned long len,
unsigned long flags, struct page **pages);
+unsigned long randomize_stack_top(unsigned long stack_top);
+unsigned long randomize_page(unsigned long start, unsigned long range);
+
extern unsigned long get_unmapped_area(struct file *, unsigned long, unsigned long, unsigned long, unsigned long);
extern unsigned long mmap_region(struct file *file, unsigned long addr,
@@ -2093,21 +3133,18 @@ extern unsigned long mmap_region(struct file *file, unsigned long addr,
struct list_head *uf);
extern unsigned long do_mmap(struct file *file, unsigned long addr,
unsigned long len, unsigned long prot, unsigned long flags,
- vm_flags_t vm_flags, unsigned long pgoff, unsigned long *populate,
- struct list_head *uf);
+ unsigned long pgoff, unsigned long *populate, struct list_head *uf);
+extern int do_vmi_munmap(struct vma_iterator *vmi, struct mm_struct *mm,
+ unsigned long start, size_t len, struct list_head *uf,
+ bool downgrade);
extern int do_munmap(struct mm_struct *, unsigned long, size_t,
struct list_head *uf);
-
-static inline unsigned long
-do_mmap_pgoff(struct file *file, unsigned long addr,
- unsigned long len, unsigned long prot, unsigned long flags,
- unsigned long pgoff, unsigned long *populate,
- struct list_head *uf)
-{
- return do_mmap(file, addr, len, prot, flags, 0, pgoff, populate, uf);
-}
+extern int do_madvise(struct mm_struct *mm, unsigned long start, size_t len_in, int behavior);
#ifdef CONFIG_MMU
+extern int do_vma_munmap(struct vma_iterator *vmi, struct vm_area_struct *vma,
+ unsigned long start, unsigned long end,
+ struct list_head *uf, bool downgrade);
extern int __mm_populate(unsigned long addr, unsigned long len,
int ignore_errors);
static inline void mm_populate(unsigned long addr, unsigned long len)
@@ -2137,26 +3174,7 @@ struct vm_unmapped_area_info {
unsigned long align_offset;
};
-extern unsigned long unmapped_area(struct vm_unmapped_area_info *info);
-extern unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info);
-
-/*
- * Search for an unmapped address range.
- *
- * We are looking for a range that:
- * - does not intersect with any VMA;
- * - is contained within the [low_limit, high_limit) interval;
- * - is at least the desired size.
- * - satisfies (begin_addr & align_mask) == (align_offset & align_mask)
- */
-static inline unsigned long
-vm_unmapped_area(struct vm_unmapped_area_info *info)
-{
- if (info->flags & VM_UNMAPPED_AREA_TOPDOWN)
- return unmapped_area_topdown(info);
- else
- return unmapped_area(info);
-}
+extern unsigned long vm_unmapped_area(struct vm_unmapped_area_info *info);
/* truncate.c */
extern void truncate_inode_pages(struct address_space *, loff_t);
@@ -2165,40 +3183,16 @@ extern void truncate_inode_pages_range(struct address_space *,
extern void truncate_inode_pages_final(struct address_space *);
/* generic vm_area_ops exported for stackable file systems */
-extern int filemap_fault(struct vm_fault *vmf);
-extern void filemap_map_pages(struct vm_fault *vmf,
+extern vm_fault_t filemap_fault(struct vm_fault *vmf);
+extern vm_fault_t filemap_map_pages(struct vm_fault *vmf,
pgoff_t start_pgoff, pgoff_t end_pgoff);
-extern int filemap_page_mkwrite(struct vm_fault *vmf);
-
-/* mm/page-writeback.c */
-int __must_check write_one_page(struct page *page);
-void task_dirty_inc(struct task_struct *tsk);
-
-/* readahead.c */
-#define VM_MAX_READAHEAD 128 /* kbytes */
-#define VM_MIN_READAHEAD 16 /* kbytes (includes current page) */
-
-int force_page_cache_readahead(struct address_space *mapping, struct file *filp,
- pgoff_t offset, unsigned long nr_to_read);
-
-void page_cache_sync_readahead(struct address_space *mapping,
- struct file_ra_state *ra,
- struct file *filp,
- pgoff_t offset,
- unsigned long size);
-
-void page_cache_async_readahead(struct address_space *mapping,
- struct file_ra_state *ra,
- struct file *filp,
- struct page *pg,
- pgoff_t offset,
- unsigned long size);
+extern vm_fault_t filemap_page_mkwrite(struct vm_fault *vmf);
extern unsigned long stack_guard_gap;
/* Generic expand stack which grows the stack according to GROWS{UP,DOWN} */
extern int expand_stack(struct vm_area_struct *vma, unsigned long address);
-/* CONFIG_STACK_GROWSUP still needs to to grow downwards at some places */
+/* CONFIG_STACK_GROWSUP still needs to grow downwards at some places */
extern int expand_downwards(struct vm_area_struct *vma,
unsigned long address);
#if VM_GROWSUP
@@ -2212,15 +3206,24 @@ extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long add
extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
struct vm_area_struct **pprev);
-/* Look up the first VMA which intersects the interval start_addr..end_addr-1,
- NULL if none. Assume start_addr < end_addr. */
-static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr)
-{
- struct vm_area_struct * vma = find_vma(mm,start_addr);
+/*
+ * Look up the first VMA which intersects the interval [start_addr, end_addr)
+ * NULL if none. Assume start_addr < end_addr.
+ */
+struct vm_area_struct *find_vma_intersection(struct mm_struct *mm,
+ unsigned long start_addr, unsigned long end_addr);
- if (vma && end_addr <= vma->vm_start)
- vma = NULL;
- return vma;
+/**
+ * vma_lookup() - Find a VMA at a specific address
+ * @mm: The process address space.
+ * @addr: The user address.
+ *
+ * Return: The vm_area_struct at the given address, %NULL otherwise.
+ */
+static inline
+struct vm_area_struct *vma_lookup(struct mm_struct *mm, unsigned long addr)
+{
+ return mtree_load(&mm->mm_mt, addr);
}
static inline unsigned long vm_start_gap(struct vm_area_struct *vma)
@@ -2256,7 +3259,7 @@ static inline unsigned long vma_pages(struct vm_area_struct *vma)
static inline struct vm_area_struct *find_exact_vma(struct mm_struct *mm,
unsigned long vm_start, unsigned long vm_end)
{
- struct vm_area_struct *vma = find_vma(mm, vm_start);
+ struct vm_area_struct *vma = vma_lookup(mm, vm_start);
if (vma && (vma->vm_start != vm_start || vma->vm_end != vm_end))
vma = NULL;
@@ -2264,6 +3267,12 @@ static inline struct vm_area_struct *find_exact_vma(struct mm_struct *mm,
return vma;
}
+static inline bool range_in_vma(struct vm_area_struct *vma,
+ unsigned long start, unsigned long end)
+{
+ return (vma && vma->vm_start <= start && end <= vma->vm_end);
+}
+
#ifdef CONFIG_MMU
pgprot_t vm_get_page_prot(unsigned long vm_flags);
void vma_set_page_prot(struct vm_area_struct *vma);
@@ -2278,6 +3287,8 @@ static inline void vma_set_page_prot(struct vm_area_struct *vma)
}
#endif
+void vma_set_file(struct vm_area_struct *vma, struct file *file);
+
#ifdef CONFIG_NUMA_BALANCING
unsigned long change_prot_numa(struct vm_area_struct *vma,
unsigned long start, unsigned long end);
@@ -2286,45 +3297,58 @@ unsigned long change_prot_numa(struct vm_area_struct *vma,
struct vm_area_struct *find_extend_vma(struct mm_struct *, unsigned long addr);
int remap_pfn_range(struct vm_area_struct *, unsigned long addr,
unsigned long pfn, unsigned long size, pgprot_t);
+int remap_pfn_range_notrack(struct vm_area_struct *vma, unsigned long addr,
+ unsigned long pfn, unsigned long size, pgprot_t prot);
int vm_insert_page(struct vm_area_struct *, unsigned long addr, struct page *);
-int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
+int vm_insert_pages(struct vm_area_struct *vma, unsigned long addr,
+ struct page **pages, unsigned long *num);
+int vm_map_pages(struct vm_area_struct *vma, struct page **pages,
+ unsigned long num);
+int vm_map_pages_zero(struct vm_area_struct *vma, struct page **pages,
+ unsigned long num);
+vm_fault_t vmf_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
unsigned long pfn);
-int vm_insert_pfn_prot(struct vm_area_struct *vma, unsigned long addr,
+vm_fault_t vmf_insert_pfn_prot(struct vm_area_struct *vma, unsigned long addr,
unsigned long pfn, pgprot_t pgprot);
-int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
+vm_fault_t vmf_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
pfn_t pfn);
+vm_fault_t vmf_insert_mixed_mkwrite(struct vm_area_struct *vma,
+ unsigned long addr, pfn_t pfn);
int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len);
+static inline vm_fault_t vmf_insert_page(struct vm_area_struct *vma,
+ unsigned long addr, struct page *page)
+{
+ int err = vm_insert_page(vma, addr, page);
+
+ if (err == -ENOMEM)
+ return VM_FAULT_OOM;
+ if (err < 0 && err != -EBUSY)
+ return VM_FAULT_SIGBUS;
-struct page *follow_page_mask(struct vm_area_struct *vma,
- unsigned long address, unsigned int foll_flags,
- unsigned int *page_mask);
+ return VM_FAULT_NOPAGE;
+}
-static inline struct page *follow_page(struct vm_area_struct *vma,
- unsigned long address, unsigned int foll_flags)
+#ifndef io_remap_pfn_range
+static inline int io_remap_pfn_range(struct vm_area_struct *vma,
+ unsigned long addr, unsigned long pfn,
+ unsigned long size, pgprot_t prot)
{
- unsigned int unused_page_mask;
- return follow_page_mask(vma, address, foll_flags, &unused_page_mask);
+ return remap_pfn_range(vma, addr, pfn, size, pgprot_decrypted(prot));
}
+#endif
-#define FOLL_WRITE 0x01 /* check pte is writable */
-#define FOLL_TOUCH 0x02 /* mark page accessed */
-#define FOLL_GET 0x04 /* do get_page on page */
-#define FOLL_DUMP 0x08 /* give error on hole if it would be zero */
-#define FOLL_FORCE 0x10 /* get_user_pages read/write w/o permission */
-#define FOLL_NOWAIT 0x20 /* if a disk transfer is needed, start the IO
- * and return without waiting upon it */
-#define FOLL_POPULATE 0x40 /* fault in page */
-#define FOLL_SPLIT 0x80 /* don't return transhuge pages, split them */
-#define FOLL_HWPOISON 0x100 /* check page is hwpoisoned */
-#define FOLL_NUMA 0x200 /* force NUMA hinting page fault */
-#define FOLL_MIGRATION 0x400 /* wait for page to replace migration entry */
-#define FOLL_TRIED 0x800 /* a retry, previous pass started an IO */
-#define FOLL_MLOCK 0x1000 /* lock present pages */
-#define FOLL_REMOTE 0x2000 /* we are working on non-current tsk/mm */
-#define FOLL_COW 0x4000 /* internal GUP flag */
+static inline vm_fault_t vmf_error(int err)
+{
+ if (err == -ENOMEM)
+ return VM_FAULT_OOM;
+ return VM_FAULT_SIGBUS;
+}
-static inline int vm_fault_to_errno(int vm_fault, int foll_flags)
+struct page *follow_page(struct vm_area_struct *vma, unsigned long address,
+ unsigned int foll_flags);
+
+static inline int vm_fault_to_errno(vm_fault_t vm_fault, int foll_flags)
{
if (vm_fault & VM_FAULT_OOM)
return -ENOMEM;
@@ -2335,53 +3359,121 @@ static inline int vm_fault_to_errno(int vm_fault, int foll_flags)
return 0;
}
-typedef int (*pte_fn_t)(pte_t *pte, pgtable_t token, unsigned long addr,
- void *data);
+/*
+ * Indicates whether GUP can follow a PROT_NONE mapped page, or whether
+ * a (NUMA hinting) fault is required.
+ */
+static inline bool gup_can_follow_protnone(unsigned int flags)
+{
+ /*
+ * FOLL_FORCE has to be able to make progress even if the VMA is
+ * inaccessible. Further, FOLL_FORCE access usually does not represent
+ * application behaviour and we should avoid triggering NUMA hinting
+ * faults.
+ */
+ return flags & FOLL_FORCE;
+}
+
+typedef int (*pte_fn_t)(pte_t *pte, unsigned long addr, void *data);
extern int apply_to_page_range(struct mm_struct *mm, unsigned long address,
unsigned long size, pte_fn_t fn, void *data);
-
+extern int apply_to_existing_page_range(struct mm_struct *mm,
+ unsigned long address, unsigned long size,
+ pte_fn_t fn, void *data);
#ifdef CONFIG_PAGE_POISONING
-extern bool page_poisoning_enabled(void);
-extern void kernel_poison_pages(struct page *page, int numpages, int enable);
-extern bool page_is_poisoned(struct page *page);
+extern void __kernel_poison_pages(struct page *page, int numpages);
+extern void __kernel_unpoison_pages(struct page *page, int numpages);
+extern bool _page_poisoning_enabled_early;
+DECLARE_STATIC_KEY_FALSE(_page_poisoning_enabled);
+static inline bool page_poisoning_enabled(void)
+{
+ return _page_poisoning_enabled_early;
+}
+/*
+ * For use in fast paths after init_mem_debugging() has run, or when a
+ * false negative result is not harmful when called too early.
+ */
+static inline bool page_poisoning_enabled_static(void)
+{
+ return static_branch_unlikely(&_page_poisoning_enabled);
+}
+static inline void kernel_poison_pages(struct page *page, int numpages)
+{
+ if (page_poisoning_enabled_static())
+ __kernel_poison_pages(page, numpages);
+}
+static inline void kernel_unpoison_pages(struct page *page, int numpages)
+{
+ if (page_poisoning_enabled_static())
+ __kernel_unpoison_pages(page, numpages);
+}
#else
static inline bool page_poisoning_enabled(void) { return false; }
-static inline void kernel_poison_pages(struct page *page, int numpages,
- int enable) { }
-static inline bool page_is_poisoned(struct page *page) { return false; }
+static inline bool page_poisoning_enabled_static(void) { return false; }
+static inline void __kernel_poison_pages(struct page *page, int nunmpages) { }
+static inline void kernel_poison_pages(struct page *page, int numpages) { }
+static inline void kernel_unpoison_pages(struct page *page, int numpages) { }
#endif
-#ifdef CONFIG_DEBUG_PAGEALLOC
-extern bool _debug_pagealloc_enabled;
-extern void __kernel_map_pages(struct page *page, int numpages, int enable);
+DECLARE_STATIC_KEY_MAYBE(CONFIG_INIT_ON_ALLOC_DEFAULT_ON, init_on_alloc);
+static inline bool want_init_on_alloc(gfp_t flags)
+{
+ if (static_branch_maybe(CONFIG_INIT_ON_ALLOC_DEFAULT_ON,
+ &init_on_alloc))
+ return true;
+ return flags & __GFP_ZERO;
+}
+
+DECLARE_STATIC_KEY_MAYBE(CONFIG_INIT_ON_FREE_DEFAULT_ON, init_on_free);
+static inline bool want_init_on_free(void)
+{
+ return static_branch_maybe(CONFIG_INIT_ON_FREE_DEFAULT_ON,
+ &init_on_free);
+}
+
+extern bool _debug_pagealloc_enabled_early;
+DECLARE_STATIC_KEY_FALSE(_debug_pagealloc_enabled);
static inline bool debug_pagealloc_enabled(void)
{
- return _debug_pagealloc_enabled;
+ return IS_ENABLED(CONFIG_DEBUG_PAGEALLOC) &&
+ _debug_pagealloc_enabled_early;
}
-static inline void
-kernel_map_pages(struct page *page, int numpages, int enable)
+/*
+ * For use in fast paths after init_debug_pagealloc() has run, or when a
+ * false negative result is not harmful when called too early.
+ */
+static inline bool debug_pagealloc_enabled_static(void)
{
- if (!debug_pagealloc_enabled())
- return;
+ if (!IS_ENABLED(CONFIG_DEBUG_PAGEALLOC))
+ return false;
- __kernel_map_pages(page, numpages, enable);
+ return static_branch_unlikely(&_debug_pagealloc_enabled);
}
-#ifdef CONFIG_HIBERNATION
-extern bool kernel_page_present(struct page *page);
-#endif /* CONFIG_HIBERNATION */
-#else /* CONFIG_DEBUG_PAGEALLOC */
-static inline void
-kernel_map_pages(struct page *page, int numpages, int enable) {}
-#ifdef CONFIG_HIBERNATION
-static inline bool kernel_page_present(struct page *page) { return true; }
-#endif /* CONFIG_HIBERNATION */
-static inline bool debug_pagealloc_enabled(void)
+
+#ifdef CONFIG_DEBUG_PAGEALLOC
+/*
+ * To support DEBUG_PAGEALLOC architecture must ensure that
+ * __kernel_map_pages() never fails
+ */
+extern void __kernel_map_pages(struct page *page, int numpages, int enable);
+
+static inline void debug_pagealloc_map_pages(struct page *page, int numpages)
{
- return false;
+ if (debug_pagealloc_enabled_static())
+ __kernel_map_pages(page, numpages, 1);
}
+
+static inline void debug_pagealloc_unmap_pages(struct page *page, int numpages)
+{
+ if (debug_pagealloc_enabled_static())
+ __kernel_map_pages(page, numpages, 0);
+}
+#else /* CONFIG_DEBUG_PAGEALLOC */
+static inline void debug_pagealloc_map_pages(struct page *page, int numpages) {}
+static inline void debug_pagealloc_unmap_pages(struct page *page, int numpages) {}
#endif /* CONFIG_DEBUG_PAGEALLOC */
#ifdef __HAVE_ARCH_GATE_AREA
@@ -2404,12 +3496,11 @@ extern bool process_shares_mm(struct task_struct *p, struct mm_struct *mm);
#ifdef CONFIG_SYSCTL
extern int sysctl_drop_caches;
-int drop_caches_sysctl_handler(struct ctl_table *, int,
- void __user *, size_t *, loff_t *);
+int drop_caches_sysctl_handler(struct ctl_table *, int, void *, size_t *,
+ loff_t *);
#endif
void drop_slab(void);
-void drop_slab_node(int nid);
#ifndef CONFIG_MMU
#define randomize_va_space 0
@@ -2418,57 +3509,141 @@ extern int randomize_va_space;
#endif
const char * arch_vma_name(struct vm_area_struct *vma);
+#ifdef CONFIG_MMU
void print_vma_addr(char *prefix, unsigned long rip);
+#else
+static inline void print_vma_addr(char *prefix, unsigned long rip)
+{
+}
+#endif
-void sparse_mem_maps_populate_node(struct page **map_map,
- unsigned long pnum_begin,
- unsigned long pnum_end,
- unsigned long map_count,
- int nodeid);
-
-struct page *sparse_mem_map_populate(unsigned long pnum, int nid);
+void *sparse_buffer_alloc(unsigned long size);
+struct page * __populate_section_memmap(unsigned long pfn,
+ unsigned long nr_pages, int nid, struct vmem_altmap *altmap,
+ struct dev_pagemap *pgmap);
+void pmd_init(void *addr);
+void pud_init(void *addr);
pgd_t *vmemmap_pgd_populate(unsigned long addr, int node);
p4d_t *vmemmap_p4d_populate(pgd_t *pgd, unsigned long addr, int node);
pud_t *vmemmap_pud_populate(p4d_t *p4d, unsigned long addr, int node);
pmd_t *vmemmap_pmd_populate(pud_t *pud, unsigned long addr, int node);
-pte_t *vmemmap_pte_populate(pmd_t *pmd, unsigned long addr, int node);
+pte_t *vmemmap_pte_populate(pmd_t *pmd, unsigned long addr, int node,
+ struct vmem_altmap *altmap, struct page *reuse);
void *vmemmap_alloc_block(unsigned long size, int node);
struct vmem_altmap;
-void *__vmemmap_alloc_block_buf(unsigned long size, int node,
- struct vmem_altmap *altmap);
-static inline void *vmemmap_alloc_block_buf(unsigned long size, int node)
-{
- return __vmemmap_alloc_block_buf(size, node, NULL);
-}
-
+void *vmemmap_alloc_block_buf(unsigned long size, int node,
+ struct vmem_altmap *altmap);
void vmemmap_verify(pte_t *, int, unsigned long, unsigned long);
+void vmemmap_set_pmd(pmd_t *pmd, void *p, int node,
+ unsigned long addr, unsigned long next);
+int vmemmap_check_pmd(pmd_t *pmd, int node,
+ unsigned long addr, unsigned long next);
int vmemmap_populate_basepages(unsigned long start, unsigned long end,
- int node);
-int vmemmap_populate(unsigned long start, unsigned long end, int node);
+ int node, struct vmem_altmap *altmap);
+int vmemmap_populate_hugepages(unsigned long start, unsigned long end,
+ int node, struct vmem_altmap *altmap);
+int vmemmap_populate(unsigned long start, unsigned long end, int node,
+ struct vmem_altmap *altmap);
void vmemmap_populate_print_last(void);
#ifdef CONFIG_MEMORY_HOTPLUG
-void vmemmap_free(unsigned long start, unsigned long end);
+void vmemmap_free(unsigned long start, unsigned long end,
+ struct vmem_altmap *altmap);
+#endif
+
+#ifdef CONFIG_ARCH_WANT_OPTIMIZE_VMEMMAP
+static inline bool vmemmap_can_optimize(struct vmem_altmap *altmap,
+ struct dev_pagemap *pgmap)
+{
+ return is_power_of_2(sizeof(struct page)) &&
+ pgmap && (pgmap_vmemmap_nr(pgmap) > 1) && !altmap;
+}
+#else
+static inline bool vmemmap_can_optimize(struct vmem_altmap *altmap,
+ struct dev_pagemap *pgmap)
+{
+ return false;
+}
#endif
+
void register_page_bootmem_memmap(unsigned long section_nr, struct page *map,
- unsigned long size);
+ unsigned long nr_pages);
enum mf_flags {
MF_COUNT_INCREASED = 1 << 0,
MF_ACTION_REQUIRED = 1 << 1,
MF_MUST_KILL = 1 << 2,
MF_SOFT_OFFLINE = 1 << 3,
+ MF_UNPOISON = 1 << 4,
+ MF_SW_SIMULATED = 1 << 5,
+ MF_NO_RETRY = 1 << 6,
};
-extern int memory_failure(unsigned long pfn, int trapno, int flags);
-extern void memory_failure_queue(unsigned long pfn, int trapno, int flags);
+int mf_dax_kill_procs(struct address_space *mapping, pgoff_t index,
+ unsigned long count, int mf_flags);
+extern int memory_failure(unsigned long pfn, int flags);
+extern void memory_failure_queue_kick(int cpu);
extern int unpoison_memory(unsigned long pfn);
-extern int get_hwpoison_page(struct page *page);
-#define put_hwpoison_page(page) put_page(page)
-extern int sysctl_memory_failure_early_kill;
-extern int sysctl_memory_failure_recovery;
-extern void shake_page(struct page *p, int access);
-extern atomic_long_t num_poisoned_pages;
-extern int soft_offline_page(struct page *page, int flags);
+extern void shake_page(struct page *p);
+extern atomic_long_t num_poisoned_pages __read_mostly;
+extern int soft_offline_page(unsigned long pfn, int flags);
+#ifdef CONFIG_MEMORY_FAILURE
+extern void memory_failure_queue(unsigned long pfn, int flags);
+extern int __get_huge_page_for_hwpoison(unsigned long pfn, int flags,
+ bool *migratable_cleared);
+void num_poisoned_pages_inc(unsigned long pfn);
+void num_poisoned_pages_sub(unsigned long pfn, long i);
+struct task_struct *task_early_kill(struct task_struct *tsk, int force_early);
+#else
+static inline void memory_failure_queue(unsigned long pfn, int flags)
+{
+}
+static inline int __get_huge_page_for_hwpoison(unsigned long pfn, int flags,
+ bool *migratable_cleared)
+{
+ return 0;
+}
+
+static inline void num_poisoned_pages_inc(unsigned long pfn)
+{
+}
+
+static inline void num_poisoned_pages_sub(unsigned long pfn, long i)
+{
+}
+#endif
+
+#if defined(CONFIG_MEMORY_FAILURE) && defined(CONFIG_KSM)
+void add_to_kill_ksm(struct task_struct *tsk, struct page *p,
+ struct vm_area_struct *vma, struct list_head *to_kill,
+ unsigned long ksm_addr);
+#endif
+
+#if defined(CONFIG_MEMORY_FAILURE) && defined(CONFIG_MEMORY_HOTPLUG)
+extern void memblk_nr_poison_inc(unsigned long pfn);
+extern void memblk_nr_poison_sub(unsigned long pfn, long i);
+#else
+static inline void memblk_nr_poison_inc(unsigned long pfn)
+{
+}
+
+static inline void memblk_nr_poison_sub(unsigned long pfn, long i)
+{
+}
+#endif
+
+#ifndef arch_memory_failure
+static inline int arch_memory_failure(unsigned long pfn, int flags)
+{
+ return -ENXIO;
+}
+#endif
+
+#ifndef arch_is_platform_page
+static inline bool arch_is_platform_page(u64 paddr)
+{
+ return false;
+}
+#endif
/*
* Error handlers for various types of pages.
@@ -2485,7 +3660,6 @@ enum mf_action_page_type {
MF_MSG_KERNEL_HIGH_ORDER,
MF_MSG_SLAB,
MF_MSG_DIFFERENT_COMPOUND,
- MF_MSG_POISONED_HUGE,
MF_MSG_HUGE,
MF_MSG_FREE_HUGE,
MF_MSG_UNMAP_FAILED,
@@ -2499,28 +3673,48 @@ enum mf_action_page_type {
MF_MSG_CLEAN_LRU,
MF_MSG_TRUNCATED_LRU,
MF_MSG_BUDDY,
- MF_MSG_BUDDY_2ND,
+ MF_MSG_DAX,
+ MF_MSG_UNSPLIT_THP,
MF_MSG_UNKNOWN,
};
+/*
+ * Sysfs entries for memory failure handling statistics.
+ */
+extern const struct attribute_group memory_failure_attr_group;
+
#if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLBFS)
extern void clear_huge_page(struct page *page,
- unsigned long addr,
+ unsigned long addr_hint,
unsigned int pages_per_huge_page);
-extern void copy_user_huge_page(struct page *dst, struct page *src,
- unsigned long addr, struct vm_area_struct *vma,
- unsigned int pages_per_huge_page);
-extern long copy_huge_page_from_user(struct page *dst_page,
- const void __user *usr_src,
- unsigned int pages_per_huge_page,
- bool allow_pagefault);
-#endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */
+int copy_user_large_folio(struct folio *dst, struct folio *src,
+ unsigned long addr_hint,
+ struct vm_area_struct *vma);
+long copy_folio_from_user(struct folio *dst_folio,
+ const void __user *usr_src,
+ bool allow_pagefault);
-extern struct page_ext_operations debug_guardpage_ops;
+/**
+ * vma_is_special_huge - Are transhuge page-table entries considered special?
+ * @vma: Pointer to the struct vm_area_struct to consider
+ *
+ * Whether transhuge page-table entries are considered "special" following
+ * the definition in vm_normal_page().
+ *
+ * Return: true if transhuge page-table entries should be considered special,
+ * false otherwise.
+ */
+static inline bool vma_is_special_huge(const struct vm_area_struct *vma)
+{
+ return vma_is_dax(vma) || (vma->vm_file &&
+ (vma->vm_flags & (VM_PFNMAP | VM_MIXEDMAP)));
+}
+
+#endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */
#ifdef CONFIG_DEBUG_PAGEALLOC
extern unsigned int _debug_guardpage_minorder;
-extern bool _debug_guardpage_enabled;
+DECLARE_STATIC_KEY_FALSE(_debug_guardpage_enabled);
static inline unsigned int debug_guardpage_minorder(void)
{
@@ -2529,21 +3723,15 @@ static inline unsigned int debug_guardpage_minorder(void)
static inline bool debug_guardpage_enabled(void)
{
- return _debug_guardpage_enabled;
+ return static_branch_unlikely(&_debug_guardpage_enabled);
}
static inline bool page_is_guard(struct page *page)
{
- struct page_ext *page_ext;
-
if (!debug_guardpage_enabled())
return false;
- page_ext = lookup_page_ext(page);
- if (unlikely(!page_ext))
- return false;
-
- return test_bit(PAGE_EXT_DEBUG_GUARD, &page_ext->flags);
+ return PageGuard(page);
}
#else
static inline unsigned int debug_guardpage_minorder(void) { return 0; }
@@ -2557,5 +3745,75 @@ void __init setup_nr_node_ids(void);
static inline void setup_nr_node_ids(void) {}
#endif
-#endif /* __KERNEL__ */
+extern int memcmp_pages(struct page *page1, struct page *page2);
+
+static inline int pages_identical(struct page *page1, struct page *page2)
+{
+ return !memcmp_pages(page1, page2);
+}
+
+#ifdef CONFIG_MAPPING_DIRTY_HELPERS
+unsigned long clean_record_shared_mapping_range(struct address_space *mapping,
+ pgoff_t first_index, pgoff_t nr,
+ pgoff_t bitmap_pgoff,
+ unsigned long *bitmap,
+ pgoff_t *start,
+ pgoff_t *end);
+
+unsigned long wp_shared_mapping_range(struct address_space *mapping,
+ pgoff_t first_index, pgoff_t nr);
+#endif
+
+extern int sysctl_nr_trim_pages;
+
+#ifdef CONFIG_PRINTK
+void mem_dump_obj(void *object);
+#else
+static inline void mem_dump_obj(void *object) {}
+#endif
+
+/**
+ * seal_check_future_write - Check for F_SEAL_FUTURE_WRITE flag and handle it
+ * @seals: the seals to check
+ * @vma: the vma to operate on
+ *
+ * Check whether F_SEAL_FUTURE_WRITE is set; if so, do proper check/handling on
+ * the vma flags. Return 0 if check pass, or <0 for errors.
+ */
+static inline int seal_check_future_write(int seals, struct vm_area_struct *vma)
+{
+ if (seals & F_SEAL_FUTURE_WRITE) {
+ /*
+ * New PROT_WRITE and MAP_SHARED mmaps are not allowed when
+ * "future write" seal active.
+ */
+ if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_WRITE))
+ return -EPERM;
+
+ /*
+ * Since an F_SEAL_FUTURE_WRITE sealed memfd can be mapped as
+ * MAP_SHARED and read-only, take care to not allow mprotect to
+ * revert protections on such mappings. Do this only for shared
+ * mappings. For private mappings, don't need to mask
+ * VM_MAYWRITE as we still want them to be COW-writable.
+ */
+ if (vma->vm_flags & VM_SHARED)
+ vm_flags_clear(vma, VM_MAYWRITE);
+ }
+
+ return 0;
+}
+
+#ifdef CONFIG_ANON_VMA_NAME
+int madvise_set_anon_name(struct mm_struct *mm, unsigned long start,
+ unsigned long len_in,
+ struct anon_vma_name *anon_name);
+#else
+static inline int
+madvise_set_anon_name(struct mm_struct *mm, unsigned long start,
+ unsigned long len_in, struct anon_vma_name *anon_name) {
+ return 0;
+}
+#endif
+
#endif /* _LINUX_MM_H */
diff --git a/include/linux/mm_api.h b/include/linux/mm_api.h
new file mode 100644
index 000000000000..a5ace2b198b8
--- /dev/null
+++ b/include/linux/mm_api.h
@@ -0,0 +1 @@
+#include <linux/mm.h>
diff --git a/include/linux/mm_inline.h b/include/linux/mm_inline.h
index e030a68ead7e..0e1d239a882c 100644
--- a/include/linux/mm_inline.h
+++ b/include/linux/mm_inline.h
@@ -1,41 +1,56 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef LINUX_MM_INLINE_H
#define LINUX_MM_INLINE_H
+#include <linux/atomic.h>
#include <linux/huge_mm.h>
#include <linux/swap.h>
+#include <linux/string.h>
+#include <linux/userfaultfd_k.h>
+#include <linux/swapops.h>
/**
- * page_is_file_cache - should the page be on a file LRU or anon LRU?
- * @page: the page to test
- *
- * Returns 1 if @page is page cache page backed by a regular filesystem,
- * or 0 if @page is anonymous, tmpfs or otherwise ram or swap backed.
- * Used by functions that manipulate the LRU lists, to sort a page
- * onto the right LRU list.
+ * folio_is_file_lru - Should the folio be on a file LRU or anon LRU?
+ * @folio: The folio to test.
*
* We would like to get this info without a page flag, but the state
- * needs to survive until the page is last deleted from the LRU, which
+ * needs to survive until the folio is last deleted from the LRU, which
* could be as far down as __page_cache_release.
+ *
+ * Return: An integer (not a boolean!) used to sort a folio onto the
+ * right LRU list and to account folios correctly.
+ * 1 if @folio is a regular filesystem backed page cache folio
+ * or a lazily freed anonymous folio (e.g. via MADV_FREE).
+ * 0 if @folio is a normal anonymous folio, a tmpfs folio or otherwise
+ * ram or swap backed folio.
*/
-static inline int page_is_file_cache(struct page *page)
+static inline int folio_is_file_lru(struct folio *folio)
+{
+ return !folio_test_swapbacked(folio);
+}
+
+static inline int page_is_file_lru(struct page *page)
{
- return !PageSwapBacked(page);
+ return folio_is_file_lru(page_folio(page));
}
static __always_inline void __update_lru_size(struct lruvec *lruvec,
enum lru_list lru, enum zone_type zid,
- int nr_pages)
+ long nr_pages)
{
struct pglist_data *pgdat = lruvec_pgdat(lruvec);
- __mod_node_page_state(pgdat, NR_LRU_BASE + lru, nr_pages);
+ lockdep_assert_held(&lruvec->lru_lock);
+ WARN_ON_ONCE(nr_pages != (int)nr_pages);
+
+ __mod_lruvec_state(lruvec, NR_LRU_BASE + lru, nr_pages);
__mod_zone_page_state(&pgdat->node_zones[zid],
NR_ZONE_LRU_BASE + lru, nr_pages);
}
static __always_inline void update_lru_size(struct lruvec *lruvec,
enum lru_list lru, enum zone_type zid,
- int nr_pages)
+ long nr_pages)
{
__update_lru_size(lruvec, lru, zid, nr_pages);
#ifdef CONFIG_MEMCG
@@ -43,87 +58,540 @@ static __always_inline void update_lru_size(struct lruvec *lruvec,
#endif
}
+/**
+ * __folio_clear_lru_flags - Clear page lru flags before releasing a page.
+ * @folio: The folio that was on lru and now has a zero reference.
+ */
+static __always_inline void __folio_clear_lru_flags(struct folio *folio)
+{
+ VM_BUG_ON_FOLIO(!folio_test_lru(folio), folio);
+
+ __folio_clear_lru(folio);
+
+ /* this shouldn't happen, so leave the flags to bad_page() */
+ if (folio_test_active(folio) && folio_test_unevictable(folio))
+ return;
+
+ __folio_clear_active(folio);
+ __folio_clear_unevictable(folio);
+}
+
+/**
+ * folio_lru_list - Which LRU list should a folio be on?
+ * @folio: The folio to test.
+ *
+ * Return: The LRU list a folio should be on, as an index
+ * into the array of LRU lists.
+ */
+static __always_inline enum lru_list folio_lru_list(struct folio *folio)
+{
+ enum lru_list lru;
+
+ VM_BUG_ON_FOLIO(folio_test_active(folio) && folio_test_unevictable(folio), folio);
+
+ if (folio_test_unevictable(folio))
+ return LRU_UNEVICTABLE;
+
+ lru = folio_is_file_lru(folio) ? LRU_INACTIVE_FILE : LRU_INACTIVE_ANON;
+ if (folio_test_active(folio))
+ lru += LRU_ACTIVE;
+
+ return lru;
+}
+
+#ifdef CONFIG_LRU_GEN
+
+#ifdef CONFIG_LRU_GEN_ENABLED
+static inline bool lru_gen_enabled(void)
+{
+ DECLARE_STATIC_KEY_TRUE(lru_gen_caps[NR_LRU_GEN_CAPS]);
+
+ return static_branch_likely(&lru_gen_caps[LRU_GEN_CORE]);
+}
+#else
+static inline bool lru_gen_enabled(void)
+{
+ DECLARE_STATIC_KEY_FALSE(lru_gen_caps[NR_LRU_GEN_CAPS]);
+
+ return static_branch_unlikely(&lru_gen_caps[LRU_GEN_CORE]);
+}
+#endif
+
+static inline bool lru_gen_in_fault(void)
+{
+ return current->in_lru_fault;
+}
+
+static inline int lru_gen_from_seq(unsigned long seq)
+{
+ return seq % MAX_NR_GENS;
+}
+
+static inline int lru_hist_from_seq(unsigned long seq)
+{
+ return seq % NR_HIST_GENS;
+}
+
+static inline int lru_tier_from_refs(int refs)
+{
+ VM_WARN_ON_ONCE(refs > BIT(LRU_REFS_WIDTH));
+
+ /* see the comment in folio_lru_refs() */
+ return order_base_2(refs + 1);
+}
+
+static inline int folio_lru_refs(struct folio *folio)
+{
+ unsigned long flags = READ_ONCE(folio->flags);
+ bool workingset = flags & BIT(PG_workingset);
+
+ /*
+ * Return the number of accesses beyond PG_referenced, i.e., N-1 if the
+ * total number of accesses is N>1, since N=0,1 both map to the first
+ * tier. lru_tier_from_refs() will account for this off-by-one. Also see
+ * the comment on MAX_NR_TIERS.
+ */
+ return ((flags & LRU_REFS_MASK) >> LRU_REFS_PGOFF) + workingset;
+}
+
+static inline int folio_lru_gen(struct folio *folio)
+{
+ unsigned long flags = READ_ONCE(folio->flags);
+
+ return ((flags & LRU_GEN_MASK) >> LRU_GEN_PGOFF) - 1;
+}
+
+static inline bool lru_gen_is_active(struct lruvec *lruvec, int gen)
+{
+ unsigned long max_seq = lruvec->lrugen.max_seq;
+
+ VM_WARN_ON_ONCE(gen >= MAX_NR_GENS);
+
+ /* see the comment on MIN_NR_GENS */
+ return gen == lru_gen_from_seq(max_seq) || gen == lru_gen_from_seq(max_seq - 1);
+}
+
+static inline void lru_gen_update_size(struct lruvec *lruvec, struct folio *folio,
+ int old_gen, int new_gen)
+{
+ int type = folio_is_file_lru(folio);
+ int zone = folio_zonenum(folio);
+ int delta = folio_nr_pages(folio);
+ enum lru_list lru = type * LRU_INACTIVE_FILE;
+ struct lru_gen_folio *lrugen = &lruvec->lrugen;
+
+ VM_WARN_ON_ONCE(old_gen != -1 && old_gen >= MAX_NR_GENS);
+ VM_WARN_ON_ONCE(new_gen != -1 && new_gen >= MAX_NR_GENS);
+ VM_WARN_ON_ONCE(old_gen == -1 && new_gen == -1);
+
+ if (old_gen >= 0)
+ WRITE_ONCE(lrugen->nr_pages[old_gen][type][zone],
+ lrugen->nr_pages[old_gen][type][zone] - delta);
+ if (new_gen >= 0)
+ WRITE_ONCE(lrugen->nr_pages[new_gen][type][zone],
+ lrugen->nr_pages[new_gen][type][zone] + delta);
+
+ /* addition */
+ if (old_gen < 0) {
+ if (lru_gen_is_active(lruvec, new_gen))
+ lru += LRU_ACTIVE;
+ __update_lru_size(lruvec, lru, zone, delta);
+ return;
+ }
+
+ /* deletion */
+ if (new_gen < 0) {
+ if (lru_gen_is_active(lruvec, old_gen))
+ lru += LRU_ACTIVE;
+ __update_lru_size(lruvec, lru, zone, -delta);
+ return;
+ }
+
+ /* promotion */
+ if (!lru_gen_is_active(lruvec, old_gen) && lru_gen_is_active(lruvec, new_gen)) {
+ __update_lru_size(lruvec, lru, zone, -delta);
+ __update_lru_size(lruvec, lru + LRU_ACTIVE, zone, delta);
+ }
+
+ /* demotion requires isolation, e.g., lru_deactivate_fn() */
+ VM_WARN_ON_ONCE(lru_gen_is_active(lruvec, old_gen) && !lru_gen_is_active(lruvec, new_gen));
+}
+
+static inline bool lru_gen_add_folio(struct lruvec *lruvec, struct folio *folio, bool reclaiming)
+{
+ unsigned long seq;
+ unsigned long flags;
+ int gen = folio_lru_gen(folio);
+ int type = folio_is_file_lru(folio);
+ int zone = folio_zonenum(folio);
+ struct lru_gen_folio *lrugen = &lruvec->lrugen;
+
+ VM_WARN_ON_ONCE_FOLIO(gen != -1, folio);
+
+ if (folio_test_unevictable(folio) || !lrugen->enabled)
+ return false;
+ /*
+ * There are three common cases for this page:
+ * 1. If it's hot, e.g., freshly faulted in or previously hot and
+ * migrated, add it to the youngest generation.
+ * 2. If it's cold but can't be evicted immediately, i.e., an anon page
+ * not in swapcache or a dirty page pending writeback, add it to the
+ * second oldest generation.
+ * 3. Everything else (clean, cold) is added to the oldest generation.
+ */
+ if (folio_test_active(folio))
+ seq = lrugen->max_seq;
+ else if ((type == LRU_GEN_ANON && !folio_test_swapcache(folio)) ||
+ (folio_test_reclaim(folio) &&
+ (folio_test_dirty(folio) || folio_test_writeback(folio))))
+ seq = lrugen->min_seq[type] + 1;
+ else
+ seq = lrugen->min_seq[type];
+
+ gen = lru_gen_from_seq(seq);
+ flags = (gen + 1UL) << LRU_GEN_PGOFF;
+ /* see the comment on MIN_NR_GENS about PG_active */
+ set_mask_bits(&folio->flags, LRU_GEN_MASK | BIT(PG_active), flags);
+
+ lru_gen_update_size(lruvec, folio, -1, gen);
+ /* for folio_rotate_reclaimable() */
+ if (reclaiming)
+ list_add_tail(&folio->lru, &lrugen->folios[gen][type][zone]);
+ else
+ list_add(&folio->lru, &lrugen->folios[gen][type][zone]);
+
+ return true;
+}
+
+static inline bool lru_gen_del_folio(struct lruvec *lruvec, struct folio *folio, bool reclaiming)
+{
+ unsigned long flags;
+ int gen = folio_lru_gen(folio);
+
+ if (gen < 0)
+ return false;
+
+ VM_WARN_ON_ONCE_FOLIO(folio_test_active(folio), folio);
+ VM_WARN_ON_ONCE_FOLIO(folio_test_unevictable(folio), folio);
+
+ /* for folio_migrate_flags() */
+ flags = !reclaiming && lru_gen_is_active(lruvec, gen) ? BIT(PG_active) : 0;
+ flags = set_mask_bits(&folio->flags, LRU_GEN_MASK, flags);
+ gen = ((flags & LRU_GEN_MASK) >> LRU_GEN_PGOFF) - 1;
+
+ lru_gen_update_size(lruvec, folio, gen, -1);
+ list_del(&folio->lru);
+
+ return true;
+}
+
+#else /* !CONFIG_LRU_GEN */
+
+static inline bool lru_gen_enabled(void)
+{
+ return false;
+}
+
+static inline bool lru_gen_in_fault(void)
+{
+ return false;
+}
+
+static inline bool lru_gen_add_folio(struct lruvec *lruvec, struct folio *folio, bool reclaiming)
+{
+ return false;
+}
+
+static inline bool lru_gen_del_folio(struct lruvec *lruvec, struct folio *folio, bool reclaiming)
+{
+ return false;
+}
+
+#endif /* CONFIG_LRU_GEN */
+
+static __always_inline
+void lruvec_add_folio(struct lruvec *lruvec, struct folio *folio)
+{
+ enum lru_list lru = folio_lru_list(folio);
+
+ if (lru_gen_add_folio(lruvec, folio, false))
+ return;
+
+ update_lru_size(lruvec, lru, folio_zonenum(folio),
+ folio_nr_pages(folio));
+ if (lru != LRU_UNEVICTABLE)
+ list_add(&folio->lru, &lruvec->lists[lru]);
+}
+
static __always_inline void add_page_to_lru_list(struct page *page,
- struct lruvec *lruvec, enum lru_list lru)
+ struct lruvec *lruvec)
{
- update_lru_size(lruvec, lru, page_zonenum(page), hpage_nr_pages(page));
- list_add(&page->lru, &lruvec->lists[lru]);
+ lruvec_add_folio(lruvec, page_folio(page));
+}
+
+static __always_inline
+void lruvec_add_folio_tail(struct lruvec *lruvec, struct folio *folio)
+{
+ enum lru_list lru = folio_lru_list(folio);
+
+ if (lru_gen_add_folio(lruvec, folio, true))
+ return;
+
+ update_lru_size(lruvec, lru, folio_zonenum(folio),
+ folio_nr_pages(folio));
+ /* This is not expected to be used on LRU_UNEVICTABLE */
+ list_add_tail(&folio->lru, &lruvec->lists[lru]);
}
-static __always_inline void add_page_to_lru_list_tail(struct page *page,
- struct lruvec *lruvec, enum lru_list lru)
+static __always_inline
+void lruvec_del_folio(struct lruvec *lruvec, struct folio *folio)
{
- update_lru_size(lruvec, lru, page_zonenum(page), hpage_nr_pages(page));
- list_add_tail(&page->lru, &lruvec->lists[lru]);
+ enum lru_list lru = folio_lru_list(folio);
+
+ if (lru_gen_del_folio(lruvec, folio, false))
+ return;
+
+ if (lru != LRU_UNEVICTABLE)
+ list_del(&folio->lru);
+ update_lru_size(lruvec, lru, folio_zonenum(folio),
+ -folio_nr_pages(folio));
}
static __always_inline void del_page_from_lru_list(struct page *page,
- struct lruvec *lruvec, enum lru_list lru)
+ struct lruvec *lruvec)
{
- list_del(&page->lru);
- update_lru_size(lruvec, lru, page_zonenum(page), -hpage_nr_pages(page));
+ lruvec_del_folio(lruvec, page_folio(page));
}
-/**
- * page_lru_base_type - which LRU list type should a page be on?
- * @page: the page to test
- *
- * Used for LRU list index arithmetic.
- *
- * Returns the base LRU type - file or anon - @page should be on.
+#ifdef CONFIG_ANON_VMA_NAME
+/*
+ * mmap_lock should be read-locked when calling anon_vma_name(). Caller should
+ * either keep holding the lock while using the returned pointer or it should
+ * raise anon_vma_name refcount before releasing the lock.
*/
-static inline enum lru_list page_lru_base_type(struct page *page)
+extern struct anon_vma_name *anon_vma_name(struct vm_area_struct *vma);
+extern struct anon_vma_name *anon_vma_name_alloc(const char *name);
+extern void anon_vma_name_free(struct kref *kref);
+
+/* mmap_lock should be read-locked */
+static inline void anon_vma_name_get(struct anon_vma_name *anon_name)
{
- if (page_is_file_cache(page))
- return LRU_INACTIVE_FILE;
- return LRU_INACTIVE_ANON;
+ if (anon_name)
+ kref_get(&anon_name->kref);
}
-/**
- * page_off_lru - which LRU list was page on? clearing its lru flags.
- * @page: the page to test
- *
- * Returns the LRU list a page was on, as an index into the array of LRU
- * lists; and clears its Unevictable or Active flags, ready for freeing.
- */
-static __always_inline enum lru_list page_off_lru(struct page *page)
+static inline void anon_vma_name_put(struct anon_vma_name *anon_name)
{
- enum lru_list lru;
+ if (anon_name)
+ kref_put(&anon_name->kref, anon_vma_name_free);
+}
+
+static inline
+struct anon_vma_name *anon_vma_name_reuse(struct anon_vma_name *anon_name)
+{
+ /* Prevent anon_name refcount saturation early on */
+ if (kref_read(&anon_name->kref) < REFCOUNT_MAX) {
+ anon_vma_name_get(anon_name);
+ return anon_name;
- if (PageUnevictable(page)) {
- __ClearPageUnevictable(page);
- lru = LRU_UNEVICTABLE;
- } else {
- lru = page_lru_base_type(page);
- if (PageActive(page)) {
- __ClearPageActive(page);
- lru += LRU_ACTIVE;
- }
}
- return lru;
+ return anon_vma_name_alloc(anon_name->name);
}
-/**
- * page_lru - which LRU list should a page be on?
- * @page: the page to test
+static inline void dup_anon_vma_name(struct vm_area_struct *orig_vma,
+ struct vm_area_struct *new_vma)
+{
+ struct anon_vma_name *anon_name = anon_vma_name(orig_vma);
+
+ if (anon_name)
+ new_vma->anon_name = anon_vma_name_reuse(anon_name);
+}
+
+static inline void free_anon_vma_name(struct vm_area_struct *vma)
+{
+ /*
+ * Not using anon_vma_name because it generates a warning if mmap_lock
+ * is not held, which might be the case here.
+ */
+ anon_vma_name_put(vma->anon_name);
+}
+
+static inline bool anon_vma_name_eq(struct anon_vma_name *anon_name1,
+ struct anon_vma_name *anon_name2)
+{
+ if (anon_name1 == anon_name2)
+ return true;
+
+ return anon_name1 && anon_name2 &&
+ !strcmp(anon_name1->name, anon_name2->name);
+}
+
+#else /* CONFIG_ANON_VMA_NAME */
+static inline struct anon_vma_name *anon_vma_name(struct vm_area_struct *vma)
+{
+ return NULL;
+}
+
+static inline struct anon_vma_name *anon_vma_name_alloc(const char *name)
+{
+ return NULL;
+}
+
+static inline void anon_vma_name_get(struct anon_vma_name *anon_name) {}
+static inline void anon_vma_name_put(struct anon_vma_name *anon_name) {}
+static inline void dup_anon_vma_name(struct vm_area_struct *orig_vma,
+ struct vm_area_struct *new_vma) {}
+static inline void free_anon_vma_name(struct vm_area_struct *vma) {}
+
+static inline bool anon_vma_name_eq(struct anon_vma_name *anon_name1,
+ struct anon_vma_name *anon_name2)
+{
+ return true;
+}
+
+#endif /* CONFIG_ANON_VMA_NAME */
+
+static inline void init_tlb_flush_pending(struct mm_struct *mm)
+{
+ atomic_set(&mm->tlb_flush_pending, 0);
+}
+
+static inline void inc_tlb_flush_pending(struct mm_struct *mm)
+{
+ atomic_inc(&mm->tlb_flush_pending);
+ /*
+ * The only time this value is relevant is when there are indeed pages
+ * to flush. And we'll only flush pages after changing them, which
+ * requires the PTL.
+ *
+ * So the ordering here is:
+ *
+ * atomic_inc(&mm->tlb_flush_pending);
+ * spin_lock(&ptl);
+ * ...
+ * set_pte_at();
+ * spin_unlock(&ptl);
+ *
+ * spin_lock(&ptl)
+ * mm_tlb_flush_pending();
+ * ....
+ * spin_unlock(&ptl);
+ *
+ * flush_tlb_range();
+ * atomic_dec(&mm->tlb_flush_pending);
+ *
+ * Where the increment if constrained by the PTL unlock, it thus
+ * ensures that the increment is visible if the PTE modification is
+ * visible. After all, if there is no PTE modification, nobody cares
+ * about TLB flushes either.
+ *
+ * This very much relies on users (mm_tlb_flush_pending() and
+ * mm_tlb_flush_nested()) only caring about _specific_ PTEs (and
+ * therefore specific PTLs), because with SPLIT_PTE_PTLOCKS and RCpc
+ * locks (PPC) the unlock of one doesn't order against the lock of
+ * another PTL.
+ *
+ * The decrement is ordered by the flush_tlb_range(), such that
+ * mm_tlb_flush_pending() will not return false unless all flushes have
+ * completed.
+ */
+}
+
+static inline void dec_tlb_flush_pending(struct mm_struct *mm)
+{
+ /*
+ * See inc_tlb_flush_pending().
+ *
+ * This cannot be smp_mb__before_atomic() because smp_mb() simply does
+ * not order against TLB invalidate completion, which is what we need.
+ *
+ * Therefore we must rely on tlb_flush_*() to guarantee order.
+ */
+ atomic_dec(&mm->tlb_flush_pending);
+}
+
+static inline bool mm_tlb_flush_pending(struct mm_struct *mm)
+{
+ /*
+ * Must be called after having acquired the PTL; orders against that
+ * PTLs release and therefore ensures that if we observe the modified
+ * PTE we must also observe the increment from inc_tlb_flush_pending().
+ *
+ * That is, it only guarantees to return true if there is a flush
+ * pending for _this_ PTL.
+ */
+ return atomic_read(&mm->tlb_flush_pending);
+}
+
+static inline bool mm_tlb_flush_nested(struct mm_struct *mm)
+{
+ /*
+ * Similar to mm_tlb_flush_pending(), we must have acquired the PTL
+ * for which there is a TLB flush pending in order to guarantee
+ * we've seen both that PTE modification and the increment.
+ *
+ * (no requirement on actually still holding the PTL, that is irrelevant)
+ */
+ return atomic_read(&mm->tlb_flush_pending) > 1;
+}
+
+/*
+ * If this pte is wr-protected by uffd-wp in any form, arm the special pte to
+ * replace a none pte. NOTE! This should only be called when *pte is already
+ * cleared so we will never accidentally replace something valuable. Meanwhile
+ * none pte also means we are not demoting the pte so tlb flushed is not needed.
+ * E.g., when pte cleared the caller should have taken care of the tlb flush.
*
- * Returns the LRU list a page should be on, as an index
- * into the array of LRU lists.
+ * Must be called with pgtable lock held so that no thread will see the none
+ * pte, and if they see it, they'll fault and serialize at the pgtable lock.
+ *
+ * This function is a no-op if PTE_MARKER_UFFD_WP is not enabled.
*/
-static __always_inline enum lru_list page_lru(struct page *page)
+static inline void
+pte_install_uffd_wp_if_needed(struct vm_area_struct *vma, unsigned long addr,
+ pte_t *pte, pte_t pteval)
{
- enum lru_list lru;
+#ifdef CONFIG_PTE_MARKER_UFFD_WP
+ bool arm_uffd_pte = false;
- if (PageUnevictable(page))
- lru = LRU_UNEVICTABLE;
- else {
- lru = page_lru_base_type(page);
- if (PageActive(page))
- lru += LRU_ACTIVE;
- }
- return lru;
+ /* The current status of the pte should be "cleared" before calling */
+ WARN_ON_ONCE(!pte_none(*pte));
+
+ /*
+ * NOTE: userfaultfd_wp_unpopulated() doesn't need this whole
+ * thing, because when zapping either it means it's dropping the
+ * page, or in TTU where the present pte will be quickly replaced
+ * with a swap pte. There's no way of leaking the bit.
+ */
+ if (vma_is_anonymous(vma) || !userfaultfd_wp(vma))
+ return;
+
+ /* A uffd-wp wr-protected normal pte */
+ if (unlikely(pte_present(pteval) && pte_uffd_wp(pteval)))
+ arm_uffd_pte = true;
+
+ /*
+ * A uffd-wp wr-protected swap pte. Note: this should even cover an
+ * existing pte marker with uffd-wp bit set.
+ */
+ if (unlikely(pte_swp_uffd_wp_any(pteval)))
+ arm_uffd_pte = true;
+
+ if (unlikely(arm_uffd_pte))
+ set_pte_at(vma->vm_mm, addr, pte,
+ make_pte_marker(PTE_MARKER_UFFD_WP));
+#endif
}
-#define lru_to_page(head) (list_entry((head)->prev, struct page, lru))
+static inline bool vma_has_recency(struct vm_area_struct *vma)
+{
+ if (vma->vm_flags & (VM_SEQ_READ | VM_RAND_READ))
+ return false;
+
+ if (vma->vm_file && (vma->vm_file->f_mode & FMODE_NOREUSE))
+ return false;
+
+ return true;
+}
#endif
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 45cdb27791a3..306a3d1a0fa6 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -1,18 +1,24 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_MM_TYPES_H
#define _LINUX_MM_TYPES_H
#include <linux/mm_types_task.h>
#include <linux/auxvec.h>
+#include <linux/kref.h>
#include <linux/list.h>
#include <linux/spinlock.h>
#include <linux/rbtree.h>
+#include <linux/maple_tree.h>
#include <linux/rwsem.h>
#include <linux/completion.h>
#include <linux/cpumask.h>
#include <linux/uprobes.h>
+#include <linux/rcupdate.h>
#include <linux/page-flags-layout.h>
#include <linux/workqueue.h>
+#include <linux/seqlock.h>
+#include <linux/percpu_counter.h>
#include <asm/mmu.h>
@@ -21,6 +27,8 @@
#endif
#define AT_VECTOR_SIZE (2*(AT_VECTOR_SIZE_ARCH + AT_VECTOR_SIZE_BASE + 1))
+#define INIT_PASID 0
+
struct address_space;
struct mem_cgroup;
@@ -31,163 +39,163 @@ struct mem_cgroup;
* a page, though if it is a pagecache page, rmap structures can tell us
* who is mapping it.
*
- * The objects in struct page are organized in double word blocks in
- * order to allows us to use atomic double word operations on portions
- * of struct page. That is currently only used by slub but the arrangement
- * allows the use of atomic double word operations on the flags/mapping
- * and lru list pointers also.
+ * If you allocate the page using alloc_pages(), you can use some of the
+ * space in struct page for your own purposes. The five words in the main
+ * union are available, except for bit 0 of the first word which must be
+ * kept clear. Many users use this word to store a pointer to an object
+ * which is guaranteed to be aligned. If you use the same storage as
+ * page->mapping, you must restore it to NULL before freeing the page.
+ *
+ * If your page will not be mapped to userspace, you can also use the four
+ * bytes in the mapcount union, but you must call page_mapcount_reset()
+ * before freeing it.
+ *
+ * If you want to use the refcount field, it must be used in such a way
+ * that other CPUs temporarily incrementing and then decrementing the
+ * refcount does not cause problems. On receiving the page from
+ * alloc_pages(), the refcount will be positive.
+ *
+ * If you allocate pages of order > 0, you can use some of the fields
+ * in each subpage, but you may need to restore some of their values
+ * afterwards.
+ *
+ * SLUB uses cmpxchg_double() to atomically update its freelist and counters.
+ * That requires that freelist & counters in struct slab be adjacent and
+ * double-word aligned. Because struct slab currently just reinterprets the
+ * bits of struct page, we align all struct pages to double-word boundaries,
+ * and ensure that 'freelist' is aligned within struct slab.
*/
+#ifdef CONFIG_HAVE_ALIGNED_STRUCT_PAGE
+#define _struct_page_alignment __aligned(2 * sizeof(unsigned long))
+#else
+#define _struct_page_alignment __aligned(sizeof(unsigned long))
+#endif
+
struct page {
- /* First double word block */
unsigned long flags; /* Atomic flags, some possibly
* updated asynchronously */
+ /*
+ * Five words (20/40 bytes) are available in this union.
+ * WARNING: bit 0 of the first word is used for PageTail(). That
+ * means the other users of this union MUST NOT use the bit to
+ * avoid collision and false-positive PageTail().
+ */
union {
- struct address_space *mapping; /* If low bit clear, points to
- * inode address_space, or NULL.
- * If page mapped as anonymous
- * memory, low bit is set, and
- * it points to anon_vma object:
- * see PAGE_MAPPING_ANON below.
- */
- void *s_mem; /* slab first object */
- atomic_t compound_mapcount; /* first tail page */
- /* page_deferred_list().next -- second tail page */
- };
-
- /* Second double word */
- union {
- pgoff_t index; /* Our offset within mapping. */
- void *freelist; /* sl[aou]b first free object */
- /* page_deferred_list().prev -- second tail page */
- };
-
- union {
-#if defined(CONFIG_HAVE_CMPXCHG_DOUBLE) && \
- defined(CONFIG_HAVE_ALIGNED_STRUCT_PAGE)
- /* Used for cmpxchg_double in slub */
- unsigned long counters;
-#else
- /*
- * Keep _refcount separate from slub cmpxchg_double data.
- * As the rest of the double word is protected by slab_lock
- * but _refcount is not.
- */
- unsigned counters;
-#endif
- struct {
-
+ struct { /* Page cache and anonymous pages */
+ /**
+ * @lru: Pageout list, eg. active_list protected by
+ * lruvec->lru_lock. Sometimes used as a generic list
+ * by the page owner.
+ */
union {
- /*
- * Count of ptes mapped in mms, to show when
- * page is mapped & limit reverse map searches.
- *
- * Extra information about page type may be
- * stored here for pages that are never mapped,
- * in which case the value MUST BE <= -2.
- * See page-flags.h for more details.
- */
- atomic_t _mapcount;
-
- unsigned int active; /* SLAB */
- struct { /* SLUB */
- unsigned inuse:16;
- unsigned objects:15;
- unsigned frozen:1;
+ struct list_head lru;
+
+ /* Or, for the Unevictable "LRU list" slot */
+ struct {
+ /* Always even, to negate PageTail */
+ void *__filler;
+ /* Count page's or folio's mlocks */
+ unsigned int mlock_count;
};
- int units; /* SLOB */
+
+ /* Or, free page */
+ struct list_head buddy_list;
+ struct list_head pcp_list;
};
- /*
- * Usage count, *USE WRAPPER FUNCTION* when manual
- * accounting. See page_ref.h
+ /* See page-flags.h for PAGE_MAPPING_FLAGS */
+ struct address_space *mapping;
+ union {
+ pgoff_t index; /* Our offset within mapping. */
+ unsigned long share; /* share count for fsdax */
+ };
+ /**
+ * @private: Mapping-private opaque data.
+ * Usually used for buffer_heads if PagePrivate.
+ * Used for swp_entry_t if PageSwapCache.
+ * Indicates order in the buddy system if PageBuddy.
*/
- atomic_t _refcount;
+ unsigned long private;
};
- };
-
- /*
- * Third double word block
- *
- * WARNING: bit 0 of the first word encode PageTail(). That means
- * the rest users of the storage space MUST NOT use the bit to
- * avoid collision and false-positive PageTail().
- */
- union {
- struct list_head lru; /* Pageout list, eg. active_list
- * protected by zone_lru_lock !
- * Can be used as a generic list
- * by the page owner.
- */
- struct dev_pagemap *pgmap; /* ZONE_DEVICE pages are never on an
- * lru or handled by a slab
- * allocator, this points to the
- * hosting device page map.
- */
- struct { /* slub per cpu partial pages */
- struct page *next; /* Next partial slab */
-#ifdef CONFIG_64BIT
- int pages; /* Nr of partial slabs left */
- int pobjects; /* Approximate # of objects */
+ struct { /* page_pool used by netstack */
+ /**
+ * @pp_magic: magic value to avoid recycling non
+ * page_pool allocated pages.
+ */
+ unsigned long pp_magic;
+ struct page_pool *pp;
+ unsigned long _pp_mapping_pad;
+ unsigned long dma_addr;
+ union {
+ /**
+ * dma_addr_upper: might require a 64-bit
+ * value on 32-bit architectures.
+ */
+ unsigned long dma_addr_upper;
+ /**
+ * For frag page support, not supported in
+ * 32-bit architectures with 64-bit DMA.
+ */
+ atomic_long_t pp_frag_count;
+ };
+ };
+ struct { /* Tail pages of compound page */
+ unsigned long compound_head; /* Bit zero is set */
+ };
+ struct { /* Page table pages */
+ unsigned long _pt_pad_1; /* compound_head */
+ pgtable_t pmd_huge_pte; /* protected by page->ptl */
+ unsigned long _pt_pad_2; /* mapping */
+ union {
+ struct mm_struct *pt_mm; /* x86 pgds only */
+ atomic_t pt_frag_refcount; /* powerpc */
+ };
+#if ALLOC_SPLIT_PTLOCKS
+ spinlock_t *ptl;
#else
- short int pages;
- short int pobjects;
+ spinlock_t ptl;
#endif
};
-
- struct rcu_head rcu_head; /* Used by SLAB
- * when destroying via RCU
- */
- /* Tail pages of compound page */
- struct {
- unsigned long compound_head; /* If bit zero is set */
-
- /* First tail page only */
-#ifdef CONFIG_64BIT
+ struct { /* ZONE_DEVICE pages */
+ /** @pgmap: Points to the hosting device page map. */
+ struct dev_pagemap *pgmap;
+ void *zone_device_data;
/*
- * On 64 bit system we have enough space in struct page
- * to encode compound_dtor and compound_order with
- * unsigned int. It can help compiler generate better or
- * smaller code on some archtectures.
+ * ZONE_DEVICE private pages are counted as being
+ * mapped so the next 3 words hold the mapping, index,
+ * and private fields from the source anonymous or
+ * page cache page while the page is migrated to device
+ * private memory.
+ * ZONE_DEVICE MEMORY_DEVICE_FS_DAX pages also
+ * use the mapping, index, and private fields when
+ * pmem backed DAX files are mapped.
*/
- unsigned int compound_dtor;
- unsigned int compound_order;
-#else
- unsigned short int compound_dtor;
- unsigned short int compound_order;
-#endif
};
-#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && USE_SPLIT_PMD_PTLOCKS
- struct {
- unsigned long __pad; /* do not overlay pmd_huge_pte
- * with compound_head to avoid
- * possible bit 0 collision.
- */
- pgtable_t pmd_huge_pte; /* protected by page->ptl */
- };
-#endif
+ /** @rcu_head: You can use this to free a page by RCU. */
+ struct rcu_head rcu_head;
};
- /* Remainder is not double word aligned */
- union {
- unsigned long private; /* Mapping-private opaque data:
- * usually used for buffer_heads
- * if PagePrivate set; used for
- * swp_entry_t if PageSwapCache;
- * indicates order in the buddy
- * system if PG_buddy is set.
- */
-#if USE_SPLIT_PTE_PTLOCKS
-#if ALLOC_SPLIT_PTLOCKS
- spinlock_t *ptl;
-#else
- spinlock_t ptl;
-#endif
-#endif
- struct kmem_cache *slab_cache; /* SL[AU]B: Pointer to slab */
+ union { /* This union is 4 bytes in size. */
+ /*
+ * If the page can be mapped to userspace, encodes the number
+ * of times this page is referenced by a page table.
+ */
+ atomic_t _mapcount;
+
+ /*
+ * If the page is neither PageSlab nor mappable to userspace,
+ * the value stored here may help determine what this page
+ * is used for. See page-flags.h for a list of page types
+ * which are currently stored here.
+ */
+ unsigned int page_type;
};
+ /* Usage count. *DO NOT USE DIRECTLY*. See page_ref.h */
+ atomic_t _refcount;
+
#ifdef CONFIG_MEMCG
- struct mem_cgroup *mem_cgroup;
+ unsigned long memcg_data;
#endif
/*
@@ -205,30 +213,212 @@ struct page {
not kmapped, ie. highmem) */
#endif /* WANT_PAGE_VIRTUAL */
-#ifdef CONFIG_KMEMCHECK
+#ifdef CONFIG_KMSAN
/*
- * kmemcheck wants to track the status of each byte in a page; this
- * is a pointer to such a status block. NULL if not tracked.
+ * KMSAN metadata for this page:
+ * - shadow page: every bit indicates whether the corresponding
+ * bit of the original page is initialized (0) or not (1);
+ * - origin page: every 4 bytes contain an id of the stack trace
+ * where the uninitialized value was created.
*/
- void *shadow;
+ struct page *kmsan_shadow;
+ struct page *kmsan_origin;
#endif
#ifdef LAST_CPUPID_NOT_IN_PAGE_FLAGS
int _last_cpupid;
#endif
-}
+} _struct_page_alignment;
+
/*
- * The struct page can be forced to be double word aligned so that atomic ops
- * on double words work. The SLUB allocator can make use of such a feature.
+ * struct encoded_page - a nonexistent type marking this pointer
+ *
+ * An 'encoded_page' pointer is a pointer to a regular 'struct page', but
+ * with the low bits of the pointer indicating extra context-dependent
+ * information. Not super-common, but happens in mmu_gather and mlock
+ * handling, and this acts as a type system check on that use.
+ *
+ * We only really have two guaranteed bits in general, although you could
+ * play with 'struct page' alignment (see CONFIG_HAVE_ALIGNED_STRUCT_PAGE)
+ * for more.
+ *
+ * Use the supplied helper functions to endcode/decode the pointer and bits.
*/
-#ifdef CONFIG_HAVE_ALIGNED_STRUCT_PAGE
- __aligned(2 * sizeof(unsigned long))
+struct encoded_page;
+#define ENCODE_PAGE_BITS 3ul
+static __always_inline struct encoded_page *encode_page(struct page *page, unsigned long flags)
+{
+ BUILD_BUG_ON(flags > ENCODE_PAGE_BITS);
+ return (struct encoded_page *)(flags | (unsigned long)page);
+}
+
+static inline unsigned long encoded_page_flags(struct encoded_page *page)
+{
+ return ENCODE_PAGE_BITS & (unsigned long)page;
+}
+
+static inline struct page *encoded_page_ptr(struct encoded_page *page)
+{
+ return (struct page *)(~ENCODE_PAGE_BITS & (unsigned long)page);
+}
+
+/**
+ * struct folio - Represents a contiguous set of bytes.
+ * @flags: Identical to the page flags.
+ * @lru: Least Recently Used list; tracks how recently this folio was used.
+ * @mlock_count: Number of times this folio has been pinned by mlock().
+ * @mapping: The file this page belongs to, or refers to the anon_vma for
+ * anonymous memory.
+ * @index: Offset within the file, in units of pages. For anonymous memory,
+ * this is the index from the beginning of the mmap.
+ * @private: Filesystem per-folio data (see folio_attach_private()).
+ * Used for swp_entry_t if folio_test_swapcache().
+ * @_mapcount: Do not access this member directly. Use folio_mapcount() to
+ * find out how many times this folio is mapped by userspace.
+ * @_refcount: Do not access this member directly. Use folio_ref_count()
+ * to find how many references there are to this folio.
+ * @memcg_data: Memory Control Group data.
+ * @_folio_dtor: Which destructor to use for this folio.
+ * @_folio_order: Do not use directly, call folio_order().
+ * @_entire_mapcount: Do not use directly, call folio_entire_mapcount().
+ * @_nr_pages_mapped: Do not use directly, call folio_mapcount().
+ * @_pincount: Do not use directly, call folio_maybe_dma_pinned().
+ * @_folio_nr_pages: Do not use directly, call folio_nr_pages().
+ * @_hugetlb_subpool: Do not use directly, use accessor in hugetlb.h.
+ * @_hugetlb_cgroup: Do not use directly, use accessor in hugetlb_cgroup.h.
+ * @_hugetlb_cgroup_rsvd: Do not use directly, use accessor in hugetlb_cgroup.h.
+ * @_hugetlb_hwpoison: Do not use directly, call raw_hwp_list_head().
+ * @_deferred_list: Folios to be split under memory pressure.
+ *
+ * A folio is a physically, virtually and logically contiguous set
+ * of bytes. It is a power-of-two in size, and it is aligned to that
+ * same power-of-two. It is at least as large as %PAGE_SIZE. If it is
+ * in the page cache, it is at a file offset which is a multiple of that
+ * power-of-two. It may be mapped into userspace at an address which is
+ * at an arbitrary page offset, but its kernel virtual address is aligned
+ * to its size.
+ */
+struct folio {
+ /* private: don't document the anon union */
+ union {
+ struct {
+ /* public: */
+ unsigned long flags;
+ union {
+ struct list_head lru;
+ /* private: avoid cluttering the output */
+ struct {
+ void *__filler;
+ /* public: */
+ unsigned int mlock_count;
+ /* private: */
+ };
+ /* public: */
+ };
+ struct address_space *mapping;
+ pgoff_t index;
+ void *private;
+ atomic_t _mapcount;
+ atomic_t _refcount;
+#ifdef CONFIG_MEMCG
+ unsigned long memcg_data;
#endif
-;
+ /* private: the union with struct page is transitional */
+ };
+ struct page page;
+ };
+ union {
+ struct {
+ unsigned long _flags_1;
+ unsigned long _head_1;
+ /* public: */
+ unsigned char _folio_dtor;
+ unsigned char _folio_order;
+ atomic_t _entire_mapcount;
+ atomic_t _nr_pages_mapped;
+ atomic_t _pincount;
+#ifdef CONFIG_64BIT
+ unsigned int _folio_nr_pages;
+#endif
+ /* private: the union with struct page is transitional */
+ };
+ struct page __page_1;
+ };
+ union {
+ struct {
+ unsigned long _flags_2;
+ unsigned long _head_2;
+ /* public: */
+ void *_hugetlb_subpool;
+ void *_hugetlb_cgroup;
+ void *_hugetlb_cgroup_rsvd;
+ void *_hugetlb_hwpoison;
+ /* private: the union with struct page is transitional */
+ };
+ struct {
+ unsigned long _flags_2a;
+ unsigned long _head_2a;
+ /* public: */
+ struct list_head _deferred_list;
+ /* private: the union with struct page is transitional */
+ };
+ struct page __page_2;
+ };
+};
+
+#define FOLIO_MATCH(pg, fl) \
+ static_assert(offsetof(struct page, pg) == offsetof(struct folio, fl))
+FOLIO_MATCH(flags, flags);
+FOLIO_MATCH(lru, lru);
+FOLIO_MATCH(mapping, mapping);
+FOLIO_MATCH(compound_head, lru);
+FOLIO_MATCH(index, index);
+FOLIO_MATCH(private, private);
+FOLIO_MATCH(_mapcount, _mapcount);
+FOLIO_MATCH(_refcount, _refcount);
+#ifdef CONFIG_MEMCG
+FOLIO_MATCH(memcg_data, memcg_data);
+#endif
+#undef FOLIO_MATCH
+#define FOLIO_MATCH(pg, fl) \
+ static_assert(offsetof(struct folio, fl) == \
+ offsetof(struct page, pg) + sizeof(struct page))
+FOLIO_MATCH(flags, _flags_1);
+FOLIO_MATCH(compound_head, _head_1);
+#undef FOLIO_MATCH
+#define FOLIO_MATCH(pg, fl) \
+ static_assert(offsetof(struct folio, fl) == \
+ offsetof(struct page, pg) + 2 * sizeof(struct page))
+FOLIO_MATCH(flags, _flags_2);
+FOLIO_MATCH(compound_head, _head_2);
+#undef FOLIO_MATCH
+
+/*
+ * Used for sizing the vmemmap region on some architectures
+ */
+#define STRUCT_PAGE_MAX_SHIFT (order_base_2(sizeof(struct page)))
#define PAGE_FRAG_CACHE_MAX_SIZE __ALIGN_MASK(32768, ~PAGE_MASK)
#define PAGE_FRAG_CACHE_MAX_ORDER get_order(PAGE_FRAG_CACHE_MAX_SIZE)
+/*
+ * page_private can be used on tail pages. However, PagePrivate is only
+ * checked by the VM on the head page. So page_private on the tail pages
+ * should be used for data that's ancillary to the head page (eg attaching
+ * buffer heads to tail pages after attaching buffer heads to the head page)
+ */
+#define page_private(page) ((page)->private)
+
+static inline void set_page_private(struct page *page, unsigned long private)
+{
+ page->private = private;
+}
+
+static inline void *folio_get_private(struct folio *folio)
+{
+ return folio->private;
+}
+
struct page_frag_cache {
void * va;
#if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
@@ -275,41 +465,66 @@ struct vm_userfaultfd_ctx {
struct vm_userfaultfd_ctx {};
#endif /* CONFIG_USERFAULTFD */
+struct anon_vma_name {
+ struct kref kref;
+ /* The name needs to be at the end because it is dynamically sized. */
+ char name[];
+};
+
+struct vma_lock {
+ struct rw_semaphore lock;
+};
+
+struct vma_numab_state {
+ unsigned long next_scan;
+ unsigned long next_pid_reset;
+ unsigned long access_pids[2];
+};
+
/*
- * This struct defines a memory VMM memory area. There is one of these
- * per VM-area/task. A VM area is any part of the process virtual memory
+ * This struct describes a virtual memory area. There is one of these
+ * per VM-area/task. A VM area is any part of the process virtual memory
* space that has a special rule for the page-fault handlers (ie a shared
* library, the executable area etc).
*/
struct vm_area_struct {
/* The first cache line has the info for VMA tree walking. */
- unsigned long vm_start; /* Our start address within vm_mm. */
- unsigned long vm_end; /* The first byte after our end address
- within vm_mm. */
-
- /* linked list of VM areas per task, sorted by address */
- struct vm_area_struct *vm_next, *vm_prev;
+ union {
+ struct {
+ /* VMA covers [vm_start; vm_end) addresses within mm */
+ unsigned long vm_start;
+ unsigned long vm_end;
+ };
+#ifdef CONFIG_PER_VMA_LOCK
+ struct rcu_head vm_rcu; /* Used for deferred freeing. */
+#endif
+ };
- struct rb_node vm_rb;
+ struct mm_struct *vm_mm; /* The address space we belong to. */
+ pgprot_t vm_page_prot; /* Access permissions of this VMA. */
/*
- * Largest free memory gap in bytes to the left of this VMA.
- * Either between this VMA and vma->vm_prev, or between one of the
- * VMAs below us in the VMA rbtree and its ->vm_prev. This helps
- * get_unmapped_area find a free area of the right size.
+ * Flags, see mm.h.
+ * To modify use vm_flags_{init|reset|set|clear|mod} functions.
*/
- unsigned long rb_subtree_gap;
+ union {
+ const vm_flags_t vm_flags;
+ vm_flags_t __private __vm_flags;
+ };
- /* Second cache line starts here. */
+#ifdef CONFIG_PER_VMA_LOCK
+ int vm_lock_seq;
+ struct vma_lock *vm_lock;
- struct mm_struct *vm_mm; /* The address space we belong to. */
- pgprot_t vm_page_prot; /* Access permissions of this VMA. */
- unsigned long vm_flags; /* Flags, see mm.h. */
+ /* Flag to indicate areas detached from the mm->mm_mt tree */
+ bool detached;
+#endif
/*
* For areas with an address space and backing store,
* linkage into the address_space->i_mmap interval tree.
+ *
*/
struct {
struct rb_node rb;
@@ -322,7 +537,7 @@ struct vm_area_struct {
* can only be in the i_mmap tree. An anonymous MAP_PRIVATE, stack
* or brk vma (with NULL file) can only be in an anon_vma list.
*/
- struct list_head anon_vma_chain; /* Serialized by mmap_sem &
+ struct list_head anon_vma_chain; /* Serialized by mmap_lock &
* page_table_lock */
struct anon_vma *anon_vma; /* Serialized by page_table_lock */
@@ -335,232 +550,528 @@ struct vm_area_struct {
struct file * vm_file; /* File we map to (can be NULL). */
void * vm_private_data; /* was vm_pte (shared mem) */
+#ifdef CONFIG_ANON_VMA_NAME
+ /*
+ * For private and shared anonymous mappings, a pointer to a null
+ * terminated string containing the name given to the vma, or NULL if
+ * unnamed. Serialized by mmap_lock. Use anon_vma_name to access.
+ */
+ struct anon_vma_name *anon_name;
+#endif
+#ifdef CONFIG_SWAP
+ atomic_long_t swap_readahead_info;
+#endif
#ifndef CONFIG_MMU
struct vm_region *vm_region; /* NOMMU mapping region */
#endif
#ifdef CONFIG_NUMA
struct mempolicy *vm_policy; /* NUMA policy for the VMA */
#endif
+#ifdef CONFIG_NUMA_BALANCING
+ struct vma_numab_state *numab_state; /* NUMA Balancing state */
+#endif
struct vm_userfaultfd_ctx vm_userfaultfd_ctx;
-};
-
-struct core_thread {
- struct task_struct *task;
- struct core_thread *next;
-};
+} __randomize_layout;
-struct core_state {
- atomic_t nr_threads;
- struct core_thread dumper;
- struct completion startup;
+#ifdef CONFIG_SCHED_MM_CID
+struct mm_cid {
+ u64 time;
+ int cid;
};
+#endif
struct kioctx_table;
struct mm_struct {
- struct vm_area_struct *mmap; /* list of VMAs */
- struct rb_root mm_rb;
- u32 vmacache_seqnum; /* per-thread vmacache */
+ struct {
+ struct maple_tree mm_mt;
#ifdef CONFIG_MMU
- unsigned long (*get_unmapped_area) (struct file *filp,
+ unsigned long (*get_unmapped_area) (struct file *filp,
unsigned long addr, unsigned long len,
unsigned long pgoff, unsigned long flags);
#endif
- unsigned long mmap_base; /* base of mmap area */
- unsigned long mmap_legacy_base; /* base of mmap area in bottom-up allocations */
+ unsigned long mmap_base; /* base of mmap area */
+ unsigned long mmap_legacy_base; /* base of mmap area in bottom-up allocations */
#ifdef CONFIG_HAVE_ARCH_COMPAT_MMAP_BASES
- /* Base adresses for compatible mmap() */
- unsigned long mmap_compat_base;
- unsigned long mmap_compat_legacy_base;
+ /* Base addresses for compatible mmap() */
+ unsigned long mmap_compat_base;
+ unsigned long mmap_compat_legacy_base;
+#endif
+ unsigned long task_size; /* size of task vm space */
+ pgd_t * pgd;
+
+#ifdef CONFIG_MEMBARRIER
+ /**
+ * @membarrier_state: Flags controlling membarrier behavior.
+ *
+ * This field is close to @pgd to hopefully fit in the same
+ * cache-line, which needs to be touched by switch_mm().
+ */
+ atomic_t membarrier_state;
#endif
- unsigned long task_size; /* size of task vm space */
- unsigned long highest_vm_end; /* highest vma end address */
- pgd_t * pgd;
-
- /**
- * @mm_users: The number of users including userspace.
- *
- * Use mmget()/mmget_not_zero()/mmput() to modify. When this drops
- * to 0 (i.e. when the task exits and there are no other temporary
- * reference holders), we also release a reference on @mm_count
- * (which may then free the &struct mm_struct if @mm_count also
- * drops to 0).
- */
- atomic_t mm_users;
- /**
- * @mm_count: The number of references to &struct mm_struct
- * (@mm_users count as 1).
- *
- * Use mmgrab()/mmdrop() to modify. When this drops to 0, the
- * &struct mm_struct is freed.
- */
- atomic_t mm_count;
+ /**
+ * @mm_users: The number of users including userspace.
+ *
+ * Use mmget()/mmget_not_zero()/mmput() to modify. When this
+ * drops to 0 (i.e. when the task exits and there are no other
+ * temporary reference holders), we also release a reference on
+ * @mm_count (which may then free the &struct mm_struct if
+ * @mm_count also drops to 0).
+ */
+ atomic_t mm_users;
+
+ /**
+ * @mm_count: The number of references to &struct mm_struct
+ * (@mm_users count as 1).
+ *
+ * Use mmgrab()/mmdrop() to modify. When this drops to 0, the
+ * &struct mm_struct is freed.
+ */
+ atomic_t mm_count;
+#ifdef CONFIG_SCHED_MM_CID
+ /**
+ * @pcpu_cid: Per-cpu current cid.
+ *
+ * Keep track of the currently allocated mm_cid for each cpu.
+ * The per-cpu mm_cid values are serialized by their respective
+ * runqueue locks.
+ */
+ struct mm_cid __percpu *pcpu_cid;
+ /*
+ * @mm_cid_next_scan: Next mm_cid scan (in jiffies).
+ *
+ * When the next mm_cid scan is due (in jiffies).
+ */
+ unsigned long mm_cid_next_scan;
+#endif
+#ifdef CONFIG_MMU
+ atomic_long_t pgtables_bytes; /* size of all page tables */
+#endif
+ int map_count; /* number of VMAs */
- atomic_long_t nr_ptes; /* PTE page table pages */
-#if CONFIG_PGTABLE_LEVELS > 2
- atomic_long_t nr_pmds; /* PMD page table pages */
+ spinlock_t page_table_lock; /* Protects page tables and some
+ * counters
+ */
+ /*
+ * With some kernel config, the current mmap_lock's offset
+ * inside 'mm_struct' is at 0x120, which is very optimal, as
+ * its two hot fields 'count' and 'owner' sit in 2 different
+ * cachelines, and when mmap_lock is highly contended, both
+ * of the 2 fields will be accessed frequently, current layout
+ * will help to reduce cache bouncing.
+ *
+ * So please be careful with adding new fields before
+ * mmap_lock, which can easily push the 2 fields into one
+ * cacheline.
+ */
+ struct rw_semaphore mmap_lock;
+
+ struct list_head mmlist; /* List of maybe swapped mm's. These
+ * are globally strung together off
+ * init_mm.mmlist, and are protected
+ * by mmlist_lock
+ */
+#ifdef CONFIG_PER_VMA_LOCK
+ int mm_lock_seq;
#endif
- int map_count; /* number of VMAs */
- spinlock_t page_table_lock; /* Protects page tables and some counters */
- struct rw_semaphore mmap_sem;
- struct list_head mmlist; /* List of maybe swapped mm's. These are globally strung
- * together off init_mm.mmlist, and are protected
- * by mmlist_lock
- */
+ unsigned long hiwater_rss; /* High-watermark of RSS usage */
+ unsigned long hiwater_vm; /* High-water virtual memory usage */
+ unsigned long total_vm; /* Total pages mapped */
+ unsigned long locked_vm; /* Pages that have PG_mlocked set */
+ atomic64_t pinned_vm; /* Refcount permanently increased */
+ unsigned long data_vm; /* VM_WRITE & ~VM_SHARED & ~VM_STACK */
+ unsigned long exec_vm; /* VM_EXEC & ~VM_WRITE & ~VM_STACK */
+ unsigned long stack_vm; /* VM_STACK */
+ unsigned long def_flags;
- unsigned long hiwater_rss; /* High-watermark of RSS usage */
- unsigned long hiwater_vm; /* High-water virtual memory usage */
+ /**
+ * @write_protect_seq: Locked when any thread is write
+ * protecting pages mapped by this mm to enforce a later COW,
+ * for instance during page table copying for fork().
+ */
+ seqcount_t write_protect_seq;
- unsigned long total_vm; /* Total pages mapped */
- unsigned long locked_vm; /* Pages that have PG_mlocked set */
- unsigned long pinned_vm; /* Refcount permanently increased */
- unsigned long data_vm; /* VM_WRITE & ~VM_SHARED & ~VM_STACK */
- unsigned long exec_vm; /* VM_EXEC & ~VM_WRITE & ~VM_STACK */
- unsigned long stack_vm; /* VM_STACK */
- unsigned long def_flags;
- unsigned long start_code, end_code, start_data, end_data;
- unsigned long start_brk, brk, start_stack;
- unsigned long arg_start, arg_end, env_start, env_end;
+ spinlock_t arg_lock; /* protect the below fields */
- unsigned long saved_auxv[AT_VECTOR_SIZE]; /* for /proc/PID/auxv */
+ unsigned long start_code, end_code, start_data, end_data;
+ unsigned long start_brk, brk, start_stack;
+ unsigned long arg_start, arg_end, env_start, env_end;
- /*
- * Special counters, in some configurations protected by the
- * page_table_lock, in other configurations by being atomic.
- */
- struct mm_rss_stat rss_stat;
+ unsigned long saved_auxv[AT_VECTOR_SIZE]; /* for /proc/PID/auxv */
- struct linux_binfmt *binfmt;
+ struct percpu_counter rss_stat[NR_MM_COUNTERS];
- cpumask_var_t cpu_vm_mask_var;
+ struct linux_binfmt *binfmt;
- /* Architecture-specific MM context */
- mm_context_t context;
+ /* Architecture-specific MM context */
+ mm_context_t context;
- unsigned long flags; /* Must use atomic bitops to access the bits */
+ unsigned long flags; /* Must use atomic bitops to access */
- struct core_state *core_state; /* coredumping support */
#ifdef CONFIG_AIO
- spinlock_t ioctx_lock;
- struct kioctx_table __rcu *ioctx_table;
+ spinlock_t ioctx_lock;
+ struct kioctx_table __rcu *ioctx_table;
#endif
#ifdef CONFIG_MEMCG
- /*
- * "owner" points to a task that is regarded as the canonical
- * user/owner of this mm. All of the following must be true in
- * order for it to be changed:
- *
- * current == mm->owner
- * current->mm != mm
- * new_owner->mm == mm
- * new_owner->alloc_lock is held
- */
- struct task_struct __rcu *owner;
+ /*
+ * "owner" points to a task that is regarded as the canonical
+ * user/owner of this mm. All of the following must be true in
+ * order for it to be changed:
+ *
+ * current == mm->owner
+ * current->mm != mm
+ * new_owner->mm == mm
+ * new_owner->alloc_lock is held
+ */
+ struct task_struct __rcu *owner;
#endif
- struct user_namespace *user_ns;
+ struct user_namespace *user_ns;
- /* store ref to file /proc/<pid>/exe symlink points to */
- struct file __rcu *exe_file;
+ /* store ref to file /proc/<pid>/exe symlink points to */
+ struct file __rcu *exe_file;
#ifdef CONFIG_MMU_NOTIFIER
- struct mmu_notifier_mm *mmu_notifier_mm;
+ struct mmu_notifier_subscriptions *notifier_subscriptions;
#endif
#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && !USE_SPLIT_PMD_PTLOCKS
- pgtable_t pmd_huge_pte; /* protected by page_table_lock */
-#endif
-#ifdef CONFIG_CPUMASK_OFFSTACK
- struct cpumask cpumask_allocation;
+ pgtable_t pmd_huge_pte; /* protected by page_table_lock */
#endif
#ifdef CONFIG_NUMA_BALANCING
- /*
- * numa_next_scan is the next time that the PTEs will be marked
- * pte_numa. NUMA hinting faults will gather statistics and migrate
- * pages to new nodes if necessary.
- */
- unsigned long numa_next_scan;
+ /*
+ * numa_next_scan is the next time that PTEs will be remapped
+ * PROT_NONE to trigger NUMA hinting faults; such faults gather
+ * statistics and migrate pages to new nodes if necessary.
+ */
+ unsigned long numa_next_scan;
- /* Restart point for scanning and setting pte_numa */
- unsigned long numa_scan_offset;
+ /* Restart point for scanning and remapping PTEs. */
+ unsigned long numa_scan_offset;
- /* numa_scan_seq prevents two threads setting pte_numa */
- int numa_scan_seq;
+ /* numa_scan_seq prevents two threads remapping PTEs. */
+ int numa_scan_seq;
#endif
-#if defined(CONFIG_NUMA_BALANCING) || defined(CONFIG_COMPACTION)
- /*
- * An operation with batched TLB flushing is going on. Anything that
- * can move process memory needs to flush the TLB when moving a
- * PROT_NONE or PROT_NUMA mapped page.
- */
- bool tlb_flush_pending;
+ /*
+ * An operation with batched TLB flushing is going on. Anything
+ * that can move process memory needs to flush the TLB when
+ * moving a PROT_NONE mapped page.
+ */
+ atomic_t tlb_flush_pending;
+#ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
+ /* See flush_tlb_batched_pending() */
+ atomic_t tlb_flush_batched;
+#endif
+ struct uprobes_state uprobes_state;
+#ifdef CONFIG_PREEMPT_RT
+ struct rcu_head delayed_drop;
#endif
- struct uprobes_state uprobes_state;
#ifdef CONFIG_HUGETLB_PAGE
- atomic_long_t hugetlb_usage;
+ atomic_long_t hugetlb_usage;
+#endif
+ struct work_struct async_put_work;
+
+#ifdef CONFIG_IOMMU_SVA
+ u32 pasid;
+#endif
+#ifdef CONFIG_KSM
+ /*
+ * Represent how many pages of this process are involved in KSM
+ * merging.
+ */
+ unsigned long ksm_merging_pages;
+ /*
+ * Represent how many pages are checked for ksm merging
+ * including merged and not merged.
+ */
+ unsigned long ksm_rmap_items;
#endif
- struct work_struct async_put_work;
+#ifdef CONFIG_LRU_GEN
+ struct {
+ /* this mm_struct is on lru_gen_mm_list */
+ struct list_head list;
+ /*
+ * Set when switching to this mm_struct, as a hint of
+ * whether it has been used since the last time per-node
+ * page table walkers cleared the corresponding bits.
+ */
+ unsigned long bitmap;
+#ifdef CONFIG_MEMCG
+ /* points to the memcg of "owner" above */
+ struct mem_cgroup *memcg;
+#endif
+ } lru_gen;
+#endif /* CONFIG_LRU_GEN */
+ } __randomize_layout;
+
+ /*
+ * The mm_cpumask needs to be at the end of mm_struct, because it
+ * is dynamically sized based on nr_cpu_ids.
+ */
+ unsigned long cpu_bitmap[];
};
+#define MM_MT_FLAGS (MT_FLAGS_ALLOC_RANGE | MT_FLAGS_LOCK_EXTERN | \
+ MT_FLAGS_USE_RCU)
extern struct mm_struct init_mm;
+/* Pointer magic because the dynamic array size confuses some compilers. */
static inline void mm_init_cpumask(struct mm_struct *mm)
{
-#ifdef CONFIG_CPUMASK_OFFSTACK
- mm->cpu_vm_mask_var = &mm->cpumask_allocation;
-#endif
- cpumask_clear(mm->cpu_vm_mask_var);
+ unsigned long cpu_bitmap = (unsigned long)mm;
+
+ cpu_bitmap += offsetof(struct mm_struct, cpu_bitmap);
+ cpumask_clear((struct cpumask *)cpu_bitmap);
}
/* Future-safe accessor for struct mm_struct's cpu_vm_mask. */
static inline cpumask_t *mm_cpumask(struct mm_struct *mm)
{
- return mm->cpu_vm_mask_var;
+ return (struct cpumask *)&mm->cpu_bitmap;
}
-#if defined(CONFIG_NUMA_BALANCING) || defined(CONFIG_COMPACTION)
-/*
- * Memory barriers to keep this state in sync are graciously provided by
- * the page table locks, outside of which no page table modifications happen.
- * The barriers below prevent the compiler from re-ordering the instructions
- * around the memory barriers that are already present in the code.
- */
-static inline bool mm_tlb_flush_pending(struct mm_struct *mm)
+#ifdef CONFIG_LRU_GEN
+
+struct lru_gen_mm_list {
+ /* mm_struct list for page table walkers */
+ struct list_head fifo;
+ /* protects the list above */
+ spinlock_t lock;
+};
+
+void lru_gen_add_mm(struct mm_struct *mm);
+void lru_gen_del_mm(struct mm_struct *mm);
+#ifdef CONFIG_MEMCG
+void lru_gen_migrate_mm(struct mm_struct *mm);
+#endif
+
+static inline void lru_gen_init_mm(struct mm_struct *mm)
{
- barrier();
- return mm->tlb_flush_pending;
+ INIT_LIST_HEAD(&mm->lru_gen.list);
+ mm->lru_gen.bitmap = 0;
+#ifdef CONFIG_MEMCG
+ mm->lru_gen.memcg = NULL;
+#endif
}
-static inline void set_tlb_flush_pending(struct mm_struct *mm)
-{
- mm->tlb_flush_pending = true;
+static inline void lru_gen_use_mm(struct mm_struct *mm)
+{
/*
- * Guarantee that the tlb_flush_pending store does not leak into the
- * critical section updating the page tables
+ * When the bitmap is set, page reclaim knows this mm_struct has been
+ * used since the last time it cleared the bitmap. So it might be worth
+ * walking the page tables of this mm_struct to clear the accessed bit.
*/
- smp_mb__before_spinlock();
+ WRITE_ONCE(mm->lru_gen.bitmap, -1);
}
-/* Clearing is done after a TLB flush, which also provides a barrier. */
-static inline void clear_tlb_flush_pending(struct mm_struct *mm)
+
+#else /* !CONFIG_LRU_GEN */
+
+static inline void lru_gen_add_mm(struct mm_struct *mm)
{
- barrier();
- mm->tlb_flush_pending = false;
}
-#else
-static inline bool mm_tlb_flush_pending(struct mm_struct *mm)
+
+static inline void lru_gen_del_mm(struct mm_struct *mm)
{
- return false;
}
-static inline void set_tlb_flush_pending(struct mm_struct *mm)
+
+#ifdef CONFIG_MEMCG
+static inline void lru_gen_migrate_mm(struct mm_struct *mm)
{
}
-static inline void clear_tlb_flush_pending(struct mm_struct *mm)
+#endif
+
+static inline void lru_gen_init_mm(struct mm_struct *mm)
{
}
-#endif
+
+static inline void lru_gen_use_mm(struct mm_struct *mm)
+{
+}
+
+#endif /* CONFIG_LRU_GEN */
+
+struct vma_iterator {
+ struct ma_state mas;
+};
+
+#define VMA_ITERATOR(name, __mm, __addr) \
+ struct vma_iterator name = { \
+ .mas = { \
+ .tree = &(__mm)->mm_mt, \
+ .index = __addr, \
+ .node = MAS_START, \
+ }, \
+ }
+
+static inline void vma_iter_init(struct vma_iterator *vmi,
+ struct mm_struct *mm, unsigned long addr)
+{
+ mas_init(&vmi->mas, &mm->mm_mt, addr);
+}
+
+#ifdef CONFIG_SCHED_MM_CID
+
+enum mm_cid_state {
+ MM_CID_UNSET = -1U, /* Unset state has lazy_put flag set. */
+ MM_CID_LAZY_PUT = (1U << 31),
+};
+
+static inline bool mm_cid_is_unset(int cid)
+{
+ return cid == MM_CID_UNSET;
+}
+
+static inline bool mm_cid_is_lazy_put(int cid)
+{
+ return !mm_cid_is_unset(cid) && (cid & MM_CID_LAZY_PUT);
+}
+
+static inline bool mm_cid_is_valid(int cid)
+{
+ return !(cid & MM_CID_LAZY_PUT);
+}
+
+static inline int mm_cid_set_lazy_put(int cid)
+{
+ return cid | MM_CID_LAZY_PUT;
+}
+
+static inline int mm_cid_clear_lazy_put(int cid)
+{
+ return cid & ~MM_CID_LAZY_PUT;
+}
+
+/* Accessor for struct mm_struct's cidmask. */
+static inline cpumask_t *mm_cidmask(struct mm_struct *mm)
+{
+ unsigned long cid_bitmap = (unsigned long)mm;
+
+ cid_bitmap += offsetof(struct mm_struct, cpu_bitmap);
+ /* Skip cpu_bitmap */
+ cid_bitmap += cpumask_size();
+ return (struct cpumask *)cid_bitmap;
+}
+
+static inline void mm_init_cid(struct mm_struct *mm)
+{
+ int i;
+
+ for_each_possible_cpu(i) {
+ struct mm_cid *pcpu_cid = per_cpu_ptr(mm->pcpu_cid, i);
+
+ pcpu_cid->cid = MM_CID_UNSET;
+ pcpu_cid->time = 0;
+ }
+ cpumask_clear(mm_cidmask(mm));
+}
+
+static inline int mm_alloc_cid(struct mm_struct *mm)
+{
+ mm->pcpu_cid = alloc_percpu(struct mm_cid);
+ if (!mm->pcpu_cid)
+ return -ENOMEM;
+ mm_init_cid(mm);
+ return 0;
+}
+
+static inline void mm_destroy_cid(struct mm_struct *mm)
+{
+ free_percpu(mm->pcpu_cid);
+ mm->pcpu_cid = NULL;
+}
+
+static inline unsigned int mm_cid_size(void)
+{
+ return cpumask_size();
+}
+#else /* CONFIG_SCHED_MM_CID */
+static inline void mm_init_cid(struct mm_struct *mm) { }
+static inline int mm_alloc_cid(struct mm_struct *mm) { return 0; }
+static inline void mm_destroy_cid(struct mm_struct *mm) { }
+static inline unsigned int mm_cid_size(void)
+{
+ return 0;
+}
+#endif /* CONFIG_SCHED_MM_CID */
+
+struct mmu_gather;
+extern void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm);
+extern void tlb_gather_mmu_fullmm(struct mmu_gather *tlb, struct mm_struct *mm);
+extern void tlb_finish_mmu(struct mmu_gather *tlb);
struct vm_fault;
+/**
+ * typedef vm_fault_t - Return type for page fault handlers.
+ *
+ * Page fault handlers return a bitmask of %VM_FAULT values.
+ */
+typedef __bitwise unsigned int vm_fault_t;
+
+/**
+ * enum vm_fault_reason - Page fault handlers return a bitmask of
+ * these values to tell the core VM what happened when handling the
+ * fault. Used to decide whether a process gets delivered SIGBUS or
+ * just gets major/minor fault counters bumped up.
+ *
+ * @VM_FAULT_OOM: Out Of Memory
+ * @VM_FAULT_SIGBUS: Bad access
+ * @VM_FAULT_MAJOR: Page read from storage
+ * @VM_FAULT_HWPOISON: Hit poisoned small page
+ * @VM_FAULT_HWPOISON_LARGE: Hit poisoned large page. Index encoded
+ * in upper bits
+ * @VM_FAULT_SIGSEGV: segmentation fault
+ * @VM_FAULT_NOPAGE: ->fault installed the pte, not return page
+ * @VM_FAULT_LOCKED: ->fault locked the returned page
+ * @VM_FAULT_RETRY: ->fault blocked, must retry
+ * @VM_FAULT_FALLBACK: huge page fault failed, fall back to small
+ * @VM_FAULT_DONE_COW: ->fault has fully handled COW
+ * @VM_FAULT_NEEDDSYNC: ->fault did not modify page tables and needs
+ * fsync() to complete (for synchronous page faults
+ * in DAX)
+ * @VM_FAULT_COMPLETED: ->fault completed, meanwhile mmap lock released
+ * @VM_FAULT_HINDEX_MASK: mask HINDEX value
+ *
+ */
+enum vm_fault_reason {
+ VM_FAULT_OOM = (__force vm_fault_t)0x000001,
+ VM_FAULT_SIGBUS = (__force vm_fault_t)0x000002,
+ VM_FAULT_MAJOR = (__force vm_fault_t)0x000004,
+ VM_FAULT_HWPOISON = (__force vm_fault_t)0x000010,
+ VM_FAULT_HWPOISON_LARGE = (__force vm_fault_t)0x000020,
+ VM_FAULT_SIGSEGV = (__force vm_fault_t)0x000040,
+ VM_FAULT_NOPAGE = (__force vm_fault_t)0x000100,
+ VM_FAULT_LOCKED = (__force vm_fault_t)0x000200,
+ VM_FAULT_RETRY = (__force vm_fault_t)0x000400,
+ VM_FAULT_FALLBACK = (__force vm_fault_t)0x000800,
+ VM_FAULT_DONE_COW = (__force vm_fault_t)0x001000,
+ VM_FAULT_NEEDDSYNC = (__force vm_fault_t)0x002000,
+ VM_FAULT_COMPLETED = (__force vm_fault_t)0x004000,
+ VM_FAULT_HINDEX_MASK = (__force vm_fault_t)0x0f0000,
+};
+
+/* Encode hstate index for a hwpoisoned large page */
+#define VM_FAULT_SET_HINDEX(x) ((__force vm_fault_t)((x) << 16))
+#define VM_FAULT_GET_HINDEX(x) (((__force unsigned int)(x) >> 16) & 0xf)
+
+#define VM_FAULT_ERROR (VM_FAULT_OOM | VM_FAULT_SIGBUS | \
+ VM_FAULT_SIGSEGV | VM_FAULT_HWPOISON | \
+ VM_FAULT_HWPOISON_LARGE | VM_FAULT_FALLBACK)
+
+#define VM_FAULT_RESULT_TRACE \
+ { VM_FAULT_OOM, "OOM" }, \
+ { VM_FAULT_SIGBUS, "SIGBUS" }, \
+ { VM_FAULT_MAJOR, "MAJOR" }, \
+ { VM_FAULT_HWPOISON, "HWPOISON" }, \
+ { VM_FAULT_HWPOISON_LARGE, "HWPOISON_LARGE" }, \
+ { VM_FAULT_SIGSEGV, "SIGSEGV" }, \
+ { VM_FAULT_NOPAGE, "NOPAGE" }, \
+ { VM_FAULT_LOCKED, "LOCKED" }, \
+ { VM_FAULT_RETRY, "RETRY" }, \
+ { VM_FAULT_FALLBACK, "FALLBACK" }, \
+ { VM_FAULT_DONE_COW, "DONE_COW" }, \
+ { VM_FAULT_NEEDDSYNC, "NEEDDSYNC" }
+
struct vm_special_mapping {
const char *name; /* The name, e.g. "[vdso]". */
@@ -576,9 +1087,9 @@ struct vm_special_mapping {
* If non-NULL, then this is called to resolve page faults
* on the special mapping. If used, .pages is not checked.
*/
- int (*fault)(const struct vm_special_mapping *sm,
- struct vm_area_struct *vma,
- struct vm_fault *vmf);
+ vm_fault_t (*fault)(const struct vm_special_mapping *sm,
+ struct vm_area_struct *vma,
+ struct vm_fault *vmf);
int (*mremap)(const struct vm_special_mapping *sm,
struct vm_area_struct *new_vma);
@@ -601,4 +1112,147 @@ typedef struct {
unsigned long val;
} swp_entry_t;
+/**
+ * enum fault_flag - Fault flag definitions.
+ * @FAULT_FLAG_WRITE: Fault was a write fault.
+ * @FAULT_FLAG_MKWRITE: Fault was mkwrite of existing PTE.
+ * @FAULT_FLAG_ALLOW_RETRY: Allow to retry the fault if blocked.
+ * @FAULT_FLAG_RETRY_NOWAIT: Don't drop mmap_lock and wait when retrying.
+ * @FAULT_FLAG_KILLABLE: The fault task is in SIGKILL killable region.
+ * @FAULT_FLAG_TRIED: The fault has been tried once.
+ * @FAULT_FLAG_USER: The fault originated in userspace.
+ * @FAULT_FLAG_REMOTE: The fault is not for current task/mm.
+ * @FAULT_FLAG_INSTRUCTION: The fault was during an instruction fetch.
+ * @FAULT_FLAG_INTERRUPTIBLE: The fault can be interrupted by non-fatal signals.
+ * @FAULT_FLAG_UNSHARE: The fault is an unsharing request to break COW in a
+ * COW mapping, making sure that an exclusive anon page is
+ * mapped after the fault.
+ * @FAULT_FLAG_ORIG_PTE_VALID: whether the fault has vmf->orig_pte cached.
+ * We should only access orig_pte if this flag set.
+ * @FAULT_FLAG_VMA_LOCK: The fault is handled under VMA lock.
+ *
+ * About @FAULT_FLAG_ALLOW_RETRY and @FAULT_FLAG_TRIED: we can specify
+ * whether we would allow page faults to retry by specifying these two
+ * fault flags correctly. Currently there can be three legal combinations:
+ *
+ * (a) ALLOW_RETRY and !TRIED: this means the page fault allows retry, and
+ * this is the first try
+ *
+ * (b) ALLOW_RETRY and TRIED: this means the page fault allows retry, and
+ * we've already tried at least once
+ *
+ * (c) !ALLOW_RETRY and !TRIED: this means the page fault does not allow retry
+ *
+ * The unlisted combination (!ALLOW_RETRY && TRIED) is illegal and should never
+ * be used. Note that page faults can be allowed to retry for multiple times,
+ * in which case we'll have an initial fault with flags (a) then later on
+ * continuous faults with flags (b). We should always try to detect pending
+ * signals before a retry to make sure the continuous page faults can still be
+ * interrupted if necessary.
+ *
+ * The combination FAULT_FLAG_WRITE|FAULT_FLAG_UNSHARE is illegal.
+ * FAULT_FLAG_UNSHARE is ignored and treated like an ordinary read fault when
+ * applied to mappings that are not COW mappings.
+ */
+enum fault_flag {
+ FAULT_FLAG_WRITE = 1 << 0,
+ FAULT_FLAG_MKWRITE = 1 << 1,
+ FAULT_FLAG_ALLOW_RETRY = 1 << 2,
+ FAULT_FLAG_RETRY_NOWAIT = 1 << 3,
+ FAULT_FLAG_KILLABLE = 1 << 4,
+ FAULT_FLAG_TRIED = 1 << 5,
+ FAULT_FLAG_USER = 1 << 6,
+ FAULT_FLAG_REMOTE = 1 << 7,
+ FAULT_FLAG_INSTRUCTION = 1 << 8,
+ FAULT_FLAG_INTERRUPTIBLE = 1 << 9,
+ FAULT_FLAG_UNSHARE = 1 << 10,
+ FAULT_FLAG_ORIG_PTE_VALID = 1 << 11,
+ FAULT_FLAG_VMA_LOCK = 1 << 12,
+};
+
+typedef unsigned int __bitwise zap_flags_t;
+
+/*
+ * FOLL_PIN and FOLL_LONGTERM may be used in various combinations with each
+ * other. Here is what they mean, and how to use them:
+ *
+ *
+ * FIXME: For pages which are part of a filesystem, mappings are subject to the
+ * lifetime enforced by the filesystem and we need guarantees that longterm
+ * users like RDMA and V4L2 only establish mappings which coordinate usage with
+ * the filesystem. Ideas for this coordination include revoking the longterm
+ * pin, delaying writeback, bounce buffer page writeback, etc. As FS DAX was
+ * added after the problem with filesystems was found FS DAX VMAs are
+ * specifically failed. Filesystem pages are still subject to bugs and use of
+ * FOLL_LONGTERM should be avoided on those pages.
+ *
+ * In the CMA case: long term pins in a CMA region would unnecessarily fragment
+ * that region. And so, CMA attempts to migrate the page before pinning, when
+ * FOLL_LONGTERM is specified.
+ *
+ * FOLL_PIN indicates that a special kind of tracking (not just page->_refcount,
+ * but an additional pin counting system) will be invoked. This is intended for
+ * anything that gets a page reference and then touches page data (for example,
+ * Direct IO). This lets the filesystem know that some non-file-system entity is
+ * potentially changing the pages' data. In contrast to FOLL_GET (whose pages
+ * are released via put_page()), FOLL_PIN pages must be released, ultimately, by
+ * a call to unpin_user_page().
+ *
+ * FOLL_PIN is similar to FOLL_GET: both of these pin pages. They use different
+ * and separate refcounting mechanisms, however, and that means that each has
+ * its own acquire and release mechanisms:
+ *
+ * FOLL_GET: get_user_pages*() to acquire, and put_page() to release.
+ *
+ * FOLL_PIN: pin_user_pages*() to acquire, and unpin_user_pages to release.
+ *
+ * FOLL_PIN and FOLL_GET are mutually exclusive for a given function call.
+ * (The underlying pages may experience both FOLL_GET-based and FOLL_PIN-based
+ * calls applied to them, and that's perfectly OK. This is a constraint on the
+ * callers, not on the pages.)
+ *
+ * FOLL_PIN should be set internally by the pin_user_pages*() APIs, never
+ * directly by the caller. That's in order to help avoid mismatches when
+ * releasing pages: get_user_pages*() pages must be released via put_page(),
+ * while pin_user_pages*() pages must be released via unpin_user_page().
+ *
+ * Please see Documentation/core-api/pin_user_pages.rst for more information.
+ */
+
+enum {
+ /* check pte is writable */
+ FOLL_WRITE = 1 << 0,
+ /* do get_page on page */
+ FOLL_GET = 1 << 1,
+ /* give error on hole if it would be zero */
+ FOLL_DUMP = 1 << 2,
+ /* get_user_pages read/write w/o permission */
+ FOLL_FORCE = 1 << 3,
+ /*
+ * if a disk transfer is needed, start the IO and return without waiting
+ * upon it
+ */
+ FOLL_NOWAIT = 1 << 4,
+ /* do not fault in pages */
+ FOLL_NOFAULT = 1 << 5,
+ /* check page is hwpoisoned */
+ FOLL_HWPOISON = 1 << 6,
+ /* don't do file mappings */
+ FOLL_ANON = 1 << 7,
+ /*
+ * FOLL_LONGTERM indicates that the page will be held for an indefinite
+ * time period _often_ under userspace control. This is in contrast to
+ * iov_iter_get_pages(), whose usages are transient.
+ */
+ FOLL_LONGTERM = 1 << 8,
+ /* split huge pmd before returning */
+ FOLL_SPLIT_PMD = 1 << 9,
+ /* allow returning PCI P2PDMA pages */
+ FOLL_PCI_P2PDMA = 1 << 10,
+ /* allow interrupts from generic signals */
+ FOLL_INTERRUPTIBLE = 1 << 11,
+
+ /* See also internal only FOLL flags in mm/internal.h */
+};
+
#endif /* _LINUX_MM_TYPES_H */
diff --git a/include/linux/mm_types_task.h b/include/linux/mm_types_task.h
index fc412fbd80bd..5414b5c6a103 100644
--- a/include/linux/mm_types_task.h
+++ b/include/linux/mm_types_task.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_MM_TYPES_TASK_H
#define _LINUX_MM_TYPES_TASK_H
@@ -24,17 +25,9 @@
#define ALLOC_SPLIT_PTLOCKS (SPINLOCK_SIZE > BITS_PER_LONG/8)
/*
- * The per task VMA cache array:
+ * When updating this, please also update struct resident_page_types[] in
+ * kernel/fork.c
*/
-#define VMACACHE_BITS 2
-#define VMACACHE_SIZE (1U << VMACACHE_BITS)
-#define VMACACHE_MASK (VMACACHE_SIZE - 1)
-
-struct vmacache {
- u32 seqnum;
- struct vm_area_struct *vmas[VMACACHE_SIZE];
-};
-
enum {
MM_FILEPAGES, /* Resident file mapping pages */
MM_ANONPAGES, /* Resident anonymous pages */
@@ -43,19 +36,6 @@ enum {
NR_MM_COUNTERS
};
-#if USE_SPLIT_PTE_PTLOCKS && defined(CONFIG_MMU)
-#define SPLIT_RSS_COUNTING
-/* per-thread cached information, */
-struct task_rss_stat {
- int events; /* for synchronization threshold */
- int count[NR_MM_COUNTERS];
-};
-#endif /* USE_SPLIT_PTE_PTLOCKS */
-
-struct mm_rss_stat {
- atomic_long_t count[NR_MM_COUNTERS];
-};
-
struct page_frag {
struct page *page;
#if (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536)
diff --git a/include/linux/mman.h b/include/linux/mman.h
index c8367041fafd..cee1e4b566d8 100644
--- a/include/linux/mman.h
+++ b/include/linux/mman.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_MMAN_H
#define _LINUX_MMAN_H
@@ -7,6 +8,51 @@
#include <linux/atomic.h>
#include <uapi/linux/mman.h>
+/*
+ * Arrange for legacy / undefined architecture specific flags to be
+ * ignored by mmap handling code.
+ */
+#ifndef MAP_32BIT
+#define MAP_32BIT 0
+#endif
+#ifndef MAP_HUGE_2MB
+#define MAP_HUGE_2MB 0
+#endif
+#ifndef MAP_HUGE_1GB
+#define MAP_HUGE_1GB 0
+#endif
+#ifndef MAP_UNINITIALIZED
+#define MAP_UNINITIALIZED 0
+#endif
+#ifndef MAP_SYNC
+#define MAP_SYNC 0
+#endif
+
+/*
+ * The historical set of flags that all mmap implementations implicitly
+ * support when a ->mmap_validate() op is not provided in file_operations.
+ *
+ * MAP_EXECUTABLE and MAP_DENYWRITE are completely ignored throughout the
+ * kernel.
+ */
+#define LEGACY_MAP_MASK (MAP_SHARED \
+ | MAP_PRIVATE \
+ | MAP_FIXED \
+ | MAP_ANONYMOUS \
+ | MAP_DENYWRITE \
+ | MAP_EXECUTABLE \
+ | MAP_UNINITIALIZED \
+ | MAP_GROWSDOWN \
+ | MAP_LOCKED \
+ | MAP_NORESERVE \
+ | MAP_POPULATE \
+ | MAP_NONBLOCK \
+ | MAP_STACK \
+ | MAP_HUGETLB \
+ | MAP_32BIT \
+ | MAP_HUGE_2MB \
+ | MAP_HUGE_1GB)
+
extern int sysctl_overcommit_memory;
extern int sysctl_overcommit_ratio;
extern unsigned long sysctl_overcommit_kbytes;
@@ -14,8 +60,12 @@ extern struct percpu_counter vm_committed_as;
#ifdef CONFIG_SMP
extern s32 vm_committed_as_batch;
+extern void mm_compute_batch(int overcommit_policy);
#else
#define vm_committed_as_batch 0
+static inline void mm_compute_batch(int overcommit_policy)
+{
+}
#endif
unsigned long vm_memory_committed(void);
@@ -31,15 +81,16 @@ static inline void vm_unacct_memory(long pages)
}
/*
- * Allow architectures to handle additional protection bits
+ * Allow architectures to handle additional protection and flag bits. The
+ * overriding macros must be defined in the arch-specific asm/mman.h file.
*/
#ifndef arch_calc_vm_prot_bits
#define arch_calc_vm_prot_bits(prot, pkey) 0
#endif
-#ifndef arch_vm_get_page_prot
-#define arch_vm_get_page_prot(vm_flags) __pgprot(0)
+#ifndef arch_calc_vm_flag_bits
+#define arch_calc_vm_flag_bits(flags) 0
#endif
#ifndef arch_validate_prot
@@ -49,13 +100,26 @@ static inline void vm_unacct_memory(long pages)
*
* Returns true if the prot flags are valid
*/
-static inline bool arch_validate_prot(unsigned long prot)
+static inline bool arch_validate_prot(unsigned long prot, unsigned long addr)
{
return (prot & ~(PROT_READ | PROT_WRITE | PROT_EXEC | PROT_SEM)) == 0;
}
#define arch_validate_prot arch_validate_prot
#endif
+#ifndef arch_validate_flags
+/*
+ * This is called from mmap() and mprotect() with the updated vma->vm_flags.
+ *
+ * Returns true if the VM_* flags are valid.
+ */
+static inline bool arch_validate_flags(unsigned long flags)
+{
+ return true;
+}
+#define arch_validate_flags arch_validate_flags
+#endif
+
/*
* Optimisation macro. It is equivalent to:
* (x & bit1) ? bit2 : 0
@@ -63,8 +127,9 @@ static inline bool arch_validate_prot(unsigned long prot)
* ("bit1" and "bit2" must be single bits)
*/
#define _calc_vm_trans(x, bit1, bit2) \
+ ((!(bit1) || !(bit2)) ? 0 : \
((bit1) <= (bit2) ? ((x) & (bit1)) * ((bit2) / (bit1)) \
- : ((x) & (bit1)) / ((bit1) / (bit2)))
+ : ((x) & (bit1)) / ((bit1) / (bit2))))
/*
* Combine the mmap "prot" argument into "vm_flags" used internally.
@@ -85,9 +150,44 @@ static inline unsigned long
calc_vm_flag_bits(unsigned long flags)
{
return _calc_vm_trans(flags, MAP_GROWSDOWN, VM_GROWSDOWN ) |
- _calc_vm_trans(flags, MAP_DENYWRITE, VM_DENYWRITE ) |
- _calc_vm_trans(flags, MAP_LOCKED, VM_LOCKED );
+ _calc_vm_trans(flags, MAP_LOCKED, VM_LOCKED ) |
+ _calc_vm_trans(flags, MAP_SYNC, VM_SYNC ) |
+ arch_calc_vm_flag_bits(flags);
}
unsigned long vm_commit_limit(void);
+
+/*
+ * Denies creating a writable executable mapping or gaining executable permissions.
+ *
+ * This denies the following:
+ *
+ * a) mmap(PROT_WRITE | PROT_EXEC)
+ *
+ * b) mmap(PROT_WRITE)
+ * mprotect(PROT_EXEC)
+ *
+ * c) mmap(PROT_WRITE)
+ * mprotect(PROT_READ)
+ * mprotect(PROT_EXEC)
+ *
+ * But allows the following:
+ *
+ * d) mmap(PROT_READ | PROT_EXEC)
+ * mmap(PROT_READ | PROT_EXEC | PROT_BTI)
+ */
+static inline bool map_deny_write_exec(struct vm_area_struct *vma, unsigned long vm_flags)
+{
+ if (!test_bit(MMF_HAS_MDWE, &current->mm->flags))
+ return false;
+
+ if ((vm_flags & VM_EXEC) && (vm_flags & VM_WRITE))
+ return true;
+
+ if (!(vma->vm_flags & VM_EXEC) && (vm_flags & VM_EXEC))
+ return true;
+
+ return false;
+}
+
#endif /* _LINUX_MMAN_H */
diff --git a/include/linux/mmap_lock.h b/include/linux/mmap_lock.h
new file mode 100644
index 000000000000..aab8f1b28d26
--- /dev/null
+++ b/include/linux/mmap_lock.h
@@ -0,0 +1,183 @@
+#ifndef _LINUX_MMAP_LOCK_H
+#define _LINUX_MMAP_LOCK_H
+
+#include <linux/lockdep.h>
+#include <linux/mm_types.h>
+#include <linux/mmdebug.h>
+#include <linux/rwsem.h>
+#include <linux/tracepoint-defs.h>
+#include <linux/types.h>
+
+#define MMAP_LOCK_INITIALIZER(name) \
+ .mmap_lock = __RWSEM_INITIALIZER((name).mmap_lock),
+
+DECLARE_TRACEPOINT(mmap_lock_start_locking);
+DECLARE_TRACEPOINT(mmap_lock_acquire_returned);
+DECLARE_TRACEPOINT(mmap_lock_released);
+
+#ifdef CONFIG_TRACING
+
+void __mmap_lock_do_trace_start_locking(struct mm_struct *mm, bool write);
+void __mmap_lock_do_trace_acquire_returned(struct mm_struct *mm, bool write,
+ bool success);
+void __mmap_lock_do_trace_released(struct mm_struct *mm, bool write);
+
+static inline void __mmap_lock_trace_start_locking(struct mm_struct *mm,
+ bool write)
+{
+ if (tracepoint_enabled(mmap_lock_start_locking))
+ __mmap_lock_do_trace_start_locking(mm, write);
+}
+
+static inline void __mmap_lock_trace_acquire_returned(struct mm_struct *mm,
+ bool write, bool success)
+{
+ if (tracepoint_enabled(mmap_lock_acquire_returned))
+ __mmap_lock_do_trace_acquire_returned(mm, write, success);
+}
+
+static inline void __mmap_lock_trace_released(struct mm_struct *mm, bool write)
+{
+ if (tracepoint_enabled(mmap_lock_released))
+ __mmap_lock_do_trace_released(mm, write);
+}
+
+#else /* !CONFIG_TRACING */
+
+static inline void __mmap_lock_trace_start_locking(struct mm_struct *mm,
+ bool write)
+{
+}
+
+static inline void __mmap_lock_trace_acquire_returned(struct mm_struct *mm,
+ bool write, bool success)
+{
+}
+
+static inline void __mmap_lock_trace_released(struct mm_struct *mm, bool write)
+{
+}
+
+#endif /* CONFIG_TRACING */
+
+static inline void mmap_assert_locked(struct mm_struct *mm)
+{
+ lockdep_assert_held(&mm->mmap_lock);
+ VM_BUG_ON_MM(!rwsem_is_locked(&mm->mmap_lock), mm);
+}
+
+static inline void mmap_assert_write_locked(struct mm_struct *mm)
+{
+ lockdep_assert_held_write(&mm->mmap_lock);
+ VM_BUG_ON_MM(!rwsem_is_locked(&mm->mmap_lock), mm);
+}
+
+#ifdef CONFIG_PER_VMA_LOCK
+static inline void vma_end_write_all(struct mm_struct *mm)
+{
+ mmap_assert_write_locked(mm);
+ /* No races during update due to exclusive mmap_lock being held */
+ WRITE_ONCE(mm->mm_lock_seq, mm->mm_lock_seq + 1);
+}
+#else
+static inline void vma_end_write_all(struct mm_struct *mm) {}
+#endif
+
+static inline void mmap_init_lock(struct mm_struct *mm)
+{
+ init_rwsem(&mm->mmap_lock);
+}
+
+static inline void mmap_write_lock(struct mm_struct *mm)
+{
+ __mmap_lock_trace_start_locking(mm, true);
+ down_write(&mm->mmap_lock);
+ __mmap_lock_trace_acquire_returned(mm, true, true);
+}
+
+static inline void mmap_write_lock_nested(struct mm_struct *mm, int subclass)
+{
+ __mmap_lock_trace_start_locking(mm, true);
+ down_write_nested(&mm->mmap_lock, subclass);
+ __mmap_lock_trace_acquire_returned(mm, true, true);
+}
+
+static inline int mmap_write_lock_killable(struct mm_struct *mm)
+{
+ int ret;
+
+ __mmap_lock_trace_start_locking(mm, true);
+ ret = down_write_killable(&mm->mmap_lock);
+ __mmap_lock_trace_acquire_returned(mm, true, ret == 0);
+ return ret;
+}
+
+static inline bool mmap_write_trylock(struct mm_struct *mm)
+{
+ bool ret;
+
+ __mmap_lock_trace_start_locking(mm, true);
+ ret = down_write_trylock(&mm->mmap_lock) != 0;
+ __mmap_lock_trace_acquire_returned(mm, true, ret);
+ return ret;
+}
+
+static inline void mmap_write_unlock(struct mm_struct *mm)
+{
+ __mmap_lock_trace_released(mm, true);
+ vma_end_write_all(mm);
+ up_write(&mm->mmap_lock);
+}
+
+static inline void mmap_write_downgrade(struct mm_struct *mm)
+{
+ __mmap_lock_trace_acquire_returned(mm, false, true);
+ vma_end_write_all(mm);
+ downgrade_write(&mm->mmap_lock);
+}
+
+static inline void mmap_read_lock(struct mm_struct *mm)
+{
+ __mmap_lock_trace_start_locking(mm, false);
+ down_read(&mm->mmap_lock);
+ __mmap_lock_trace_acquire_returned(mm, false, true);
+}
+
+static inline int mmap_read_lock_killable(struct mm_struct *mm)
+{
+ int ret;
+
+ __mmap_lock_trace_start_locking(mm, false);
+ ret = down_read_killable(&mm->mmap_lock);
+ __mmap_lock_trace_acquire_returned(mm, false, ret == 0);
+ return ret;
+}
+
+static inline bool mmap_read_trylock(struct mm_struct *mm)
+{
+ bool ret;
+
+ __mmap_lock_trace_start_locking(mm, false);
+ ret = down_read_trylock(&mm->mmap_lock) != 0;
+ __mmap_lock_trace_acquire_returned(mm, false, ret);
+ return ret;
+}
+
+static inline void mmap_read_unlock(struct mm_struct *mm)
+{
+ __mmap_lock_trace_released(mm, false);
+ up_read(&mm->mmap_lock);
+}
+
+static inline void mmap_read_unlock_non_owner(struct mm_struct *mm)
+{
+ __mmap_lock_trace_released(mm, false);
+ up_read_non_owner(&mm->mmap_lock);
+}
+
+static inline int mmap_lock_is_contended(struct mm_struct *mm)
+{
+ return rwsem_is_contended(&mm->mmap_lock);
+}
+
+#endif /* _LINUX_MMAP_LOCK_H */
diff --git a/include/linux/mmc/card.h b/include/linux/mmc/card.h
index 46c73e97e61f..c726ea781255 100644
--- a/include/linux/mmc/card.h
+++ b/include/linux/mmc/card.h
@@ -1,10 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* linux/include/linux/mmc/card.h
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
* Card driver specific definitions.
*/
#ifndef LINUX_MMC_CARD_H
@@ -29,8 +26,8 @@ struct mmc_csd {
unsigned char structure;
unsigned char mmca_vsn;
unsigned short cmdclass;
- unsigned short tacc_clks;
- unsigned int tacc_ns;
+ unsigned short taac_clks;
+ unsigned int taac_ns;
unsigned int c_size;
unsigned int r2w_factor;
unsigned int max_dtr;
@@ -51,6 +48,7 @@ struct mmc_ext_csd {
u8 sec_feature_support;
u8 rel_sectors;
u8 rel_param;
+ bool enhanced_rpmb_supported;
u8 part_config;
u8 cache_ctrl;
u8 rst_n_function;
@@ -111,6 +109,7 @@ struct mmc_ext_csd {
u8 raw_hc_erase_gap_size; /* 221 */
u8 raw_erase_timeout_mult; /* 223 */
u8 raw_hc_erase_grp_size; /* 224 */
+ u8 raw_boot_mult; /* 226 */
u8 raw_sec_trim_mult; /* 229 */
u8 raw_sec_erase_mult; /* 230 */
u8 raw_sec_feature_support;/* 231 */
@@ -133,12 +132,16 @@ struct mmc_ext_csd {
struct sd_scr {
unsigned char sda_vsn;
unsigned char sda_spec3;
+ unsigned char sda_spec4;
+ unsigned char sda_specx;
unsigned char bus_widths;
#define SD_SCR_BUS_WIDTH_1 (1<<0)
#define SD_SCR_BUS_WIDTH_4 (1<<2)
unsigned char cmds;
#define SD_SCR_CMD20_SUPPORT (1<<0)
#define SD_SCR_CMD23_SUPPORT (1<<1)
+#define SD_SCR_CMD48_SUPPORT (1<<2)
+#define SD_SCR_CMD58_SUPPORT (1<<3)
};
struct sd_ssr {
@@ -156,6 +159,7 @@ struct sd_switch_caps {
#define UHS_DDR50_MAX_DTR 50000000
#define UHS_SDR25_MAX_DTR UHS_DDR50_MAX_DTR
#define UHS_SDR12_MAX_DTR 25000000
+#define DEFAULT_SPEED_MAX_DTR UHS_SDR12_MAX_DTR
unsigned int sd3_bus_mode;
#define UHS_SDR12_BUS_SPEED 0
#define HIGH_SPEED_BUS_SPEED 1
@@ -188,6 +192,25 @@ struct sd_switch_caps {
#define SD_MAX_CURRENT_800 (1 << SD_SET_CURRENT_LIMIT_800)
};
+struct sd_ext_reg {
+ u8 fno;
+ u8 page;
+ u16 offset;
+ u8 rev;
+ u8 feature_enabled;
+ u8 feature_support;
+/* Power Management Function. */
+#define SD_EXT_POWER_OFF_NOTIFY (1<<0)
+#define SD_EXT_POWER_SUSTENANCE (1<<1)
+#define SD_EXT_POWER_DOWN_MODE (1<<2)
+/* Performance Enhancement Function. */
+#define SD_EXT_PERF_FX_EVENT (1<<0)
+#define SD_EXT_PERF_CARD_MAINT (1<<1)
+#define SD_EXT_PERF_HOST_MAINT (1<<2)
+#define SD_EXT_PERF_CACHE (1<<3)
+#define SD_EXT_PERF_CMD_QUEUE (1<<4)
+};
+
struct sdio_cccr {
unsigned int sdio_vsn;
unsigned int sd_vsn;
@@ -196,7 +219,8 @@ struct sdio_cccr {
wide_bus:1,
high_power:1,
high_speed:1,
- disable_cd:1;
+ disable_cd:1,
+ enable_async_irq:1;
};
struct sdio_cis {
@@ -226,7 +250,7 @@ struct mmc_queue_req;
* MMC Physical partitions
*/
struct mmc_part {
- unsigned int size; /* partition size (in bytes) */
+ u64 size; /* partition size (in bytes) */
unsigned int part_cfg; /* partition type */
char name[MAX_MMC_PART_NAME_LEN];
bool force_ro; /* to make boot parts RO by default */
@@ -252,6 +276,7 @@ struct mmc_card {
#define MMC_TYPE_SD_COMBO 3 /* SD combo (IO+mem) card */
unsigned int state; /* (our) card state */
unsigned int quirks; /* card quirks */
+ unsigned int quirk_max_rate; /* max rate set by quirks */
#define MMC_QUIRK_LENIENT_FN0 (1<<0) /* allow SDIO FN0 writes outside of the VS CCCR range */
#define MMC_QUIRK_BLKSZ_FOR_BYTE_MODE (1<<1) /* use func->cur_blksize */
/* for byte mode */
@@ -268,6 +293,7 @@ struct mmc_card {
#define MMC_QUIRK_BROKEN_IRQ_POLLING (1<<11) /* Polling SDIO_CCCR_INTx could create a fake interrupt */
#define MMC_QUIRK_TRIM_BROKEN (1<<12) /* Skip trim */
#define MMC_QUIRK_BROKEN_HPI (1<<13) /* Disable broken HPI support */
+#define MMC_QUIRK_BROKEN_SD_DISCARD (1<<14) /* Disable broken SD discard support */
bool reenable_cmdq; /* Re-enable Command Queue */
@@ -275,6 +301,7 @@ struct mmc_card {
unsigned int erase_shift; /* if erase unit is power 2 */
unsigned int pref_erase; /* in sectors */
unsigned int eg_boundary; /* don't cross erase-group boundaries */
+ unsigned int erase_arg; /* erase / trim / discard */
u8 erased_byte; /* value of erased bytes */
u32 raw_cid[4]; /* raw card CID */
@@ -287,12 +314,17 @@ struct mmc_card {
struct sd_scr scr; /* extra SD information */
struct sd_ssr ssr; /* yet more SD information */
struct sd_switch_caps sw_caps; /* switch (CMD6) caps */
+ struct sd_ext_reg ext_power; /* SD extension reg for PM */
+ struct sd_ext_reg ext_perf; /* SD extension reg for PERF */
unsigned int sdio_funcs; /* number of SDIO functions */
+ atomic_t sdio_funcs_probed; /* number of probed SDIO funcs */
struct sdio_cccr cccr; /* common card info */
struct sdio_cis cis; /* common tuple info */
struct sdio_func *sdio_func[SDIO_MAX_FUNCS]; /* SDIO functions (devices) */
struct sdio_func *sdio_single_irq; /* SDIO function when only one IRQ active */
+ u8 major_rev; /* major revision number */
+ u8 minor_rev; /* minor revision number */
unsigned num_info; /* number of info strings */
const char **info; /* info strings */
struct sdio_func_tuple *tuples; /* unknown common tuples */
@@ -305,7 +337,7 @@ struct mmc_card {
struct mmc_part part[MMC_NUM_PHY_PARTITION]; /* physical partitions */
unsigned int nr_parts;
- unsigned int bouncesz; /* Bounce buffer size */
+ struct workqueue_struct *complete_wq; /* Private workqueue */
};
static inline bool mmc_large_sector(struct mmc_card *card)
@@ -313,10 +345,16 @@ static inline bool mmc_large_sector(struct mmc_card *card)
return card->ext_csd.data_sector_size == 4096;
}
+static inline int mmc_card_enable_async_irq(struct mmc_card *card)
+{
+ return card->cccr.enable_async_irq;
+}
+
bool mmc_card_is_blockaddr(struct mmc_card *card);
#define mmc_card_mmc(c) ((c)->type == MMC_TYPE_MMC)
#define mmc_card_sd(c) ((c)->type == MMC_TYPE_SD)
#define mmc_card_sdio(c) ((c)->type == MMC_TYPE_SDIO)
+#define mmc_card_sd_combo(c) ((c)->type == MMC_TYPE_SD_COMBO)
#endif /* LINUX_MMC_CARD_H */
diff --git a/include/linux/mmc/core.h b/include/linux/mmc/core.h
index a0c63ea28796..6efec0b9820c 100644
--- a/include/linux/mmc/core.h
+++ b/include/linux/mmc/core.h
@@ -1,9 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* linux/include/linux/mmc/core.h
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef LINUX_MMC_CORE_H
#define LINUX_MMC_CORE_H
@@ -110,9 +107,6 @@ struct mmc_command {
*/
unsigned int busy_timeout; /* busy detect timeout in ms */
- /* Set this flag only for blocking sanitize request */
- bool sanitize_busy;
-
struct mmc_data *data; /* data segment associated with cmd */
struct mmc_request *mrq; /* associated request */
};
@@ -122,11 +116,18 @@ struct mmc_data {
unsigned int timeout_clks; /* data timeout (in clocks) */
unsigned int blksz; /* data block size */
unsigned int blocks; /* number of blocks */
+ unsigned int blk_addr; /* block address */
int error; /* data error */
unsigned int flags;
-#define MMC_DATA_WRITE (1 << 8)
-#define MMC_DATA_READ (1 << 9)
+#define MMC_DATA_WRITE BIT(8)
+#define MMC_DATA_READ BIT(9)
+/* Extra flags used by CQE */
+#define MMC_DATA_QBR BIT(10) /* CQE queue barrier*/
+#define MMC_DATA_PRIO BIT(11) /* CQE high priority */
+#define MMC_DATA_REL_WR BIT(12) /* Reliable write */
+#define MMC_DATA_DAT_TAG BIT(13) /* Tag request */
+#define MMC_DATA_FORCED_PRG BIT(14) /* Forced programming */
unsigned int bytes_xfered;
@@ -149,23 +150,33 @@ struct mmc_request {
struct completion completion;
struct completion cmd_completion;
void (*done)(struct mmc_request *);/* completion function */
+ /*
+ * Notify uppers layers (e.g. mmc block driver) that recovery is needed
+ * due to an error associated with the mmc_request. Currently used only
+ * by CQE.
+ */
+ void (*recovery_notifier)(struct mmc_request *);
struct mmc_host *host;
/* Allow other commands during this ongoing data transfer or busy wait */
bool cap_cmd_during_tfr;
+
+ int tag;
+
+#ifdef CONFIG_MMC_CRYPTO
+ const struct bio_crypt_ctx *crypto_ctx;
+ int crypto_key_slot;
+#endif
};
struct mmc_card;
-struct mmc_async_req;
-struct mmc_async_req *mmc_start_areq(struct mmc_host *host,
- struct mmc_async_req *areq,
- enum mmc_blk_status *ret_stat);
void mmc_wait_for_req(struct mmc_host *host, struct mmc_request *mrq);
int mmc_wait_for_cmd(struct mmc_host *host, struct mmc_command *cmd,
int retries);
-int mmc_hw_reset(struct mmc_host *host);
+int mmc_hw_reset(struct mmc_card *card);
+int mmc_sw_reset(struct mmc_card *card);
void mmc_set_data_timeout(struct mmc_data *data, const struct mmc_card *card);
#endif /* LINUX_MMC_CORE_H */
diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h
index ebd1cebbef0c..461d1543893b 100644
--- a/include/linux/mmc/host.h
+++ b/include/linux/mmc/host.h
@@ -1,10 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* linux/include/linux/mmc/host.h
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
* Host driver specific definitions.
*/
#ifndef LINUX_MMC_HOST_H
@@ -18,10 +15,12 @@
#include <linux/mmc/card.h>
#include <linux/mmc/pm.h>
#include <linux/dma-direction.h>
+#include <linux/blk-crypto-profile.h>
struct mmc_ios {
unsigned int clock; /* clock rate */
unsigned short vdd;
+ unsigned int power_delay_ms; /* waiting for stable power */
/* vdd stores the bit number of the selected voltage range from below. */
@@ -62,6 +61,8 @@ struct mmc_ios {
#define MMC_TIMING_MMC_DDR52 8
#define MMC_TIMING_MMC_HS200 9
#define MMC_TIMING_MMC_HS400 10
+#define MMC_TIMING_SD_EXP 11
+#define MMC_TIMING_SD_EXP_1_2V 12
unsigned char signal_voltage; /* signalling voltage (1.8V or 3.3V) */
@@ -79,8 +80,38 @@ struct mmc_ios {
bool enhanced_strobe; /* hs400es selection */
};
+struct mmc_clk_phase {
+ bool valid;
+ u16 in_deg;
+ u16 out_deg;
+};
+
+#define MMC_NUM_CLK_PHASES (MMC_TIMING_MMC_HS400 + 1)
+struct mmc_clk_phase_map {
+ struct mmc_clk_phase phase[MMC_NUM_CLK_PHASES];
+};
+
struct mmc_host;
+enum mmc_err_stat {
+ MMC_ERR_CMD_TIMEOUT,
+ MMC_ERR_CMD_CRC,
+ MMC_ERR_DAT_TIMEOUT,
+ MMC_ERR_DAT_CRC,
+ MMC_ERR_AUTO_CMD,
+ MMC_ERR_ADMA,
+ MMC_ERR_TUNING,
+ MMC_ERR_CMDQ_RED,
+ MMC_ERR_CMDQ_GCE,
+ MMC_ERR_CMDQ_ICCE,
+ MMC_ERR_REQ_TIMEOUT,
+ MMC_ERR_CMDQ_REQ_TIMEOUT,
+ MMC_ERR_ICE_CFG,
+ MMC_ERR_CTRL_TIMEOUT,
+ MMC_ERR_UNEXPECTED_IRQ,
+ MMC_ERR_MAX,
+};
+
struct mmc_host_ops {
/*
* It is optional for the host to implement pre_req and post_req in
@@ -94,6 +125,9 @@ struct mmc_host_ops {
int err);
void (*pre_req)(struct mmc_host *host, struct mmc_request *req);
void (*request)(struct mmc_host *host, struct mmc_request *req);
+ /* Submit one request to host in atomic context. */
+ int (*request_atomic)(struct mmc_host *host,
+ struct mmc_request *req);
/*
* Avoid calling the next three functions too often or in a "fast
@@ -130,6 +164,7 @@ struct mmc_host_ops {
int (*get_cd)(struct mmc_host *host);
void (*enable_sdio_irq)(struct mmc_host *host, int enable);
+ /* Mandatory callback when using MMC_CAP2_SDIO_IRQ_NOTHREAD. */
void (*ack_sdio_irq)(struct mmc_host *host);
/* optional callback for HC quirks */
@@ -137,7 +172,7 @@ struct mmc_host_ops {
int (*start_signal_voltage_switch)(struct mmc_host *host, struct mmc_ios *ios);
- /* Check if the card is pulling dat[0:3] low */
+ /* Check if the card is pulling dat[0] low */
int (*card_busy)(struct mmc_host *host);
/* The tuning command opcode value is different for SD and eMMC cards */
@@ -145,13 +180,27 @@ struct mmc_host_ops {
/* Prepare HS400 target operating frequency depending host driver */
int (*prepare_hs400_tuning)(struct mmc_host *host, struct mmc_ios *ios);
+
+ /* Execute HS400 tuning depending host driver */
+ int (*execute_hs400_tuning)(struct mmc_host *host, struct mmc_card *card);
+
+ /* Prepare switch to DDR during the HS400 init sequence */
+ int (*hs400_prepare_ddr)(struct mmc_host *host);
+
+ /* Prepare for switching from HS400 to HS200 */
+ void (*hs400_downgrade)(struct mmc_host *host);
+
+ /* Complete selection of HS400 */
+ void (*hs400_complete)(struct mmc_host *host);
+
/* Prepare enhanced strobe depending host driver */
void (*hs400_enhanced_strobe)(struct mmc_host *host,
struct mmc_ios *ios);
int (*select_drive_strength)(struct mmc_card *card,
unsigned int max_dtr, int host_drv,
int card_drv, int *drv_type);
- void (*hw_reset)(struct mmc_host *host);
+ /* Reset the eMMC card via RST_n */
+ void (*card_hw_reset)(struct mmc_host *host);
void (*card_event)(struct mmc_host *host);
/*
@@ -160,6 +209,53 @@ struct mmc_host_ops {
*/
int (*multi_io_quirk)(struct mmc_card *card,
unsigned int direction, int blk_size);
+
+ /* Initialize an SD express card, mandatory for MMC_CAP2_SD_EXP. */
+ int (*init_sd_express)(struct mmc_host *host, struct mmc_ios *ios);
+};
+
+struct mmc_cqe_ops {
+ /* Allocate resources, and make the CQE operational */
+ int (*cqe_enable)(struct mmc_host *host, struct mmc_card *card);
+ /* Free resources, and make the CQE non-operational */
+ void (*cqe_disable)(struct mmc_host *host);
+ /*
+ * Issue a read, write or DCMD request to the CQE. Also deal with the
+ * effect of ->cqe_off().
+ */
+ int (*cqe_request)(struct mmc_host *host, struct mmc_request *mrq);
+ /* Free resources (e.g. DMA mapping) associated with the request */
+ void (*cqe_post_req)(struct mmc_host *host, struct mmc_request *mrq);
+ /*
+ * Prepare the CQE and host controller to accept non-CQ commands. There
+ * is no corresponding ->cqe_on(), instead ->cqe_request() is required
+ * to deal with that.
+ */
+ void (*cqe_off)(struct mmc_host *host);
+ /*
+ * Wait for all CQE tasks to complete. Return an error if recovery
+ * becomes necessary.
+ */
+ int (*cqe_wait_for_idle)(struct mmc_host *host);
+ /*
+ * Notify CQE that a request has timed out. Return false if the request
+ * completed or true if a timeout happened in which case indicate if
+ * recovery is needed.
+ */
+ bool (*cqe_timeout)(struct mmc_host *host, struct mmc_request *mrq,
+ bool *recovery_needed);
+ /*
+ * Stop all CQE activity and prepare the CQE and host controller to
+ * accept recovery commands.
+ */
+ void (*cqe_recovery_start)(struct mmc_host *host);
+ /*
+ * Clear the queue and call mmc_cqe_request_done() on all requests.
+ * Requests that errored will have the error set on the mmc_request
+ * (data->error or cmd->error for DCMD). Requests that did not error
+ * will have zero data bytes transferred.
+ */
+ void (*cqe_recovery_finish)(struct mmc_host *host);
};
struct mmc_async_req {
@@ -211,6 +307,10 @@ struct mmc_supply {
struct regulator *vqmmc; /* Optional Vccq supply */
};
+struct mmc_ctx {
+ struct task_struct *task;
+};
+
struct mmc_host {
struct device *parent;
struct device class_dev;
@@ -224,9 +324,7 @@ struct mmc_host {
u32 ocr_avail_sdio; /* SDIO-specific OCR */
u32 ocr_avail_sd; /* SD-specific OCR */
u32 ocr_avail_mmc; /* MMC-specific OCR */
-#ifdef CONFIG_PM_SLEEP
- struct notifier_block pm_notify;
-#endif
+ struct wakeup_source *ws; /* Enable consume of uevents */
u32 max_current_330;
u32 max_current_300;
u32 max_current_180;
@@ -261,10 +359,11 @@ struct mmc_host {
#define MMC_CAP_AGGRESSIVE_PM (1 << 7) /* Suspend (e)MMC/SD at idle */
#define MMC_CAP_NONREMOVABLE (1 << 8) /* Nonremovable e.g. eMMC */
#define MMC_CAP_WAIT_WHILE_BUSY (1 << 9) /* Waits while card is busy */
-#define MMC_CAP_ERASE (1 << 10) /* Allow erase/trim commands */
#define MMC_CAP_3_3V_DDR (1 << 11) /* Host supports eMMC DDR 3.3V */
#define MMC_CAP_1_8V_DDR (1 << 12) /* Host supports eMMC DDR 1.8V */
#define MMC_CAP_1_2V_DDR (1 << 13) /* Host supports eMMC DDR 1.2V */
+#define MMC_CAP_DDR (MMC_CAP_3_3V_DDR | MMC_CAP_1_8V_DDR | \
+ MMC_CAP_1_2V_DDR)
#define MMC_CAP_POWER_OFF_CARD (1 << 14) /* Can power off after boot */
#define MMC_CAP_BUS_WIDTH_TEST (1 << 15) /* CMD14/CMD19 bus width ok */
#define MMC_CAP_UHS_SDR12 (1 << 16) /* Host supports UHS SDR12 mode */
@@ -272,34 +371,39 @@ struct mmc_host {
#define MMC_CAP_UHS_SDR50 (1 << 18) /* Host supports UHS SDR50 mode */
#define MMC_CAP_UHS_SDR104 (1 << 19) /* Host supports UHS SDR104 mode */
#define MMC_CAP_UHS_DDR50 (1 << 20) /* Host supports UHS DDR50 mode */
-#define MMC_CAP_NO_BOUNCE_BUFF (1 << 21) /* Disable bounce buffers on host */
+#define MMC_CAP_UHS (MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25 | \
+ MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR104 | \
+ MMC_CAP_UHS_DDR50)
+#define MMC_CAP_SYNC_RUNTIME_PM (1 << 21) /* Synced runtime PM suspends. */
+#define MMC_CAP_NEED_RSP_BUSY (1 << 22) /* Commands with R1B can't use R1. */
#define MMC_CAP_DRIVER_TYPE_A (1 << 23) /* Host supports Driver Type A */
#define MMC_CAP_DRIVER_TYPE_C (1 << 24) /* Host supports Driver Type C */
#define MMC_CAP_DRIVER_TYPE_D (1 << 25) /* Host supports Driver Type D */
+#define MMC_CAP_DONE_COMPLETE (1 << 27) /* RW reqs can be completed within mmc_request_done() */
#define MMC_CAP_CD_WAKE (1 << 28) /* Enable card detect wake */
#define MMC_CAP_CMD_DURING_TFR (1 << 29) /* Commands during data transfer */
#define MMC_CAP_CMD23 (1 << 30) /* CMD23 supported. */
-#define MMC_CAP_HW_RESET (1 << 31) /* Hardware reset */
+#define MMC_CAP_HW_RESET (1 << 31) /* Reset the eMMC card via RST_n */
u32 caps2; /* More host capabilities */
#define MMC_CAP2_BOOTPART_NOACC (1 << 0) /* Boot partition no access */
#define MMC_CAP2_FULL_PWR_CYCLE (1 << 2) /* Can do full power cycle */
+#define MMC_CAP2_FULL_PWR_CYCLE_IN_SUSPEND (1 << 3) /* Can do full power cycle in suspend */
#define MMC_CAP2_HS200_1_8V_SDR (1 << 5) /* can support */
#define MMC_CAP2_HS200_1_2V_SDR (1 << 6) /* can support */
#define MMC_CAP2_HS200 (MMC_CAP2_HS200_1_8V_SDR | \
MMC_CAP2_HS200_1_2V_SDR)
+#define MMC_CAP2_SD_EXP (1 << 7) /* SD express via PCIe */
+#define MMC_CAP2_SD_EXP_1_2V (1 << 8) /* SD express 1.2V */
#define MMC_CAP2_CD_ACTIVE_HIGH (1 << 10) /* Card-detect signal active high */
#define MMC_CAP2_RO_ACTIVE_HIGH (1 << 11) /* Write-protect signal active high */
-#define MMC_CAP2_PACKED_RD (1 << 12) /* Allow packed read */
-#define MMC_CAP2_PACKED_WR (1 << 13) /* Allow packed write */
-#define MMC_CAP2_PACKED_CMD (MMC_CAP2_PACKED_RD | \
- MMC_CAP2_PACKED_WR)
#define MMC_CAP2_NO_PRESCAN_POWERUP (1 << 14) /* Don't power up before scan */
#define MMC_CAP2_HS400_1_8V (1 << 15) /* Can support HS400 1.8V */
#define MMC_CAP2_HS400_1_2V (1 << 16) /* Can support HS400 1.2V */
#define MMC_CAP2_HS400 (MMC_CAP2_HS400_1_8V | \
MMC_CAP2_HS400_1_2V)
+#define MMC_CAP2_HSX00_1_8V (MMC_CAP2_HS200_1_8V_SDR | MMC_CAP2_HS400_1_8V)
#define MMC_CAP2_HSX00_1_2V (MMC_CAP2_HS200_1_2V_SDR | MMC_CAP2_HS400_1_2V)
#define MMC_CAP2_SDIO_IRQ_NOTHREAD (1 << 17)
#define MMC_CAP2_NO_WRITE_PROTECT (1 << 18) /* No physical write protect pin, assume that card is always read-write */
@@ -307,6 +411,18 @@ struct mmc_host {
#define MMC_CAP2_HS400_ES (1 << 20) /* Host supports enhanced strobe */
#define MMC_CAP2_NO_SD (1 << 21) /* Do not send SD commands during initialization */
#define MMC_CAP2_NO_MMC (1 << 22) /* Do not send (e)MMC commands during initialization */
+#define MMC_CAP2_CQE (1 << 23) /* Has eMMC command queue engine */
+#define MMC_CAP2_CQE_DCMD (1 << 24) /* CQE can issue a direct command */
+#define MMC_CAP2_AVOID_3_3V (1 << 25) /* Host must negotiate down from 3.3V */
+#define MMC_CAP2_MERGE_CAPABLE (1 << 26) /* Host can merge a segment over the segment size */
+#ifdef CONFIG_MMC_CRYPTO
+#define MMC_CAP2_CRYPTO (1 << 27) /* Host supports inline encryption */
+#else
+#define MMC_CAP2_CRYPTO 0
+#endif
+#define MMC_CAP2_ALT_GPT_TEGRA (1 << 28) /* Host with eMMC that has GPT entry at a non-standard location */
+
+ int fixed_drv_type; /* fixed driver type for non-removable media */
mmc_pm_flag_t pm_caps; /* supported pm features */
@@ -327,14 +443,14 @@ struct mmc_host {
/* group bitfields together to minimize padding */
unsigned int use_spi_crc:1;
unsigned int claimed:1; /* host exclusively claimed */
- unsigned int bus_dead:1; /* bus has been released */
-#ifdef CONFIG_MMC_DEBUG
- unsigned int removed:1; /* host is being removed */
-#endif
+ unsigned int doing_init_tune:1; /* initial tuning in progress */
unsigned int can_retune:1; /* re-tuning can be used */
unsigned int doing_retune:1; /* re-tuning in progress */
unsigned int retune_now:1; /* do re-tuning at next req */
unsigned int retune_paused:1; /* re-tuning is temporarily disabled */
+ unsigned int retune_crc_disable:1; /* don't trigger retune upon crc */
+ unsigned int can_dma_map_merge:1; /* merging can be used */
+ unsigned int vqmmc_enabled:1; /* vqmmc regulator is enabled */
int rescan_disable; /* disable card detection */
int rescan_entered; /* used with nonremovable devices */
@@ -349,19 +465,19 @@ struct mmc_host {
struct mmc_card *card; /* device attached to this host */
wait_queue_head_t wq;
- struct task_struct *claimer; /* task that has host claimed */
+ struct mmc_ctx *claimer; /* context that has host claimed */
int claim_cnt; /* "claim" nesting count */
+ struct mmc_ctx default_ctx; /* default context */
struct delayed_work detect;
int detect_change; /* card detect flag */
struct mmc_slot slot;
const struct mmc_bus_ops *bus_ops; /* current bus driver */
- unsigned int bus_refs; /* reference counter */
unsigned int sdio_irqs;
struct task_struct *sdio_irq_thread;
- struct delayed_work sdio_irq_work;
+ struct work_struct sdio_irq_work;
bool sdio_irq_pending;
atomic_t sdio_irq_thread_abort;
@@ -376,9 +492,6 @@ struct mmc_host {
struct dentry *debugfs_root;
- struct mmc_async_req *areq; /* active async req */
- struct mmc_context_info context_info; /* async synchronization info */
-
/* Ongoing data transfer that allows commands during transfer */
struct mmc_request *ongoing_mrq;
@@ -393,36 +506,68 @@ struct mmc_host {
int dsr_req; /* DSR value is valid */
u32 dsr; /* optional driver stage (DSR) value */
- unsigned long private[0] ____cacheline_aligned;
+ /* Command Queue Engine (CQE) support */
+ const struct mmc_cqe_ops *cqe_ops;
+ void *cqe_private;
+ int cqe_qdepth;
+ bool cqe_enabled;
+ bool cqe_on;
+
+ /* Inline encryption support */
+#ifdef CONFIG_MMC_CRYPTO
+ struct blk_crypto_profile crypto_profile;
+#endif
+
+ /* Host Software Queue support */
+ bool hsq_enabled;
+
+ u32 err_stats[MMC_ERR_MAX];
+ unsigned long private[] ____cacheline_aligned;
};
struct device_node;
struct mmc_host *mmc_alloc_host(int extra, struct device *);
+struct mmc_host *devm_mmc_alloc_host(struct device *dev, int extra);
int mmc_add_host(struct mmc_host *);
void mmc_remove_host(struct mmc_host *);
void mmc_free_host(struct mmc_host *);
+void mmc_of_parse_clk_phase(struct mmc_host *host,
+ struct mmc_clk_phase_map *map);
int mmc_of_parse(struct mmc_host *host);
-int mmc_of_parse_voltage(struct device_node *np, u32 *mask);
+int mmc_of_parse_voltage(struct mmc_host *host, u32 *mask);
static inline void *mmc_priv(struct mmc_host *host)
{
return (void *)host->private;
}
+static inline struct mmc_host *mmc_from_priv(void *priv)
+{
+ return container_of(priv, struct mmc_host, private);
+}
+
#define mmc_host_is_spi(host) ((host)->caps & MMC_CAP_SPI)
#define mmc_dev(x) ((x)->parent)
#define mmc_classdev(x) (&(x)->class_dev)
#define mmc_hostname(x) (dev_name(&(x)->class_dev))
-int mmc_power_save_host(struct mmc_host *host);
-int mmc_power_restore_host(struct mmc_host *host);
-
void mmc_detect_change(struct mmc_host *, unsigned long delay);
void mmc_request_done(struct mmc_host *, struct mmc_request *);
void mmc_command_done(struct mmc_host *host, struct mmc_request *mrq);
+void mmc_cqe_request_done(struct mmc_host *host, struct mmc_request *mrq);
+
+/*
+ * May be called from host driver's system/runtime suspend/resume callbacks,
+ * to know if SDIO IRQs has been claimed.
+ */
+static inline bool sdio_irq_claimed(struct mmc_host *host)
+{
+ return host->sdio_irqs > 0;
+}
+
static inline void mmc_signal_sdio_irq(struct mmc_host *host)
{
host->ops->enable_sdio_irq(host, 0);
@@ -431,21 +576,14 @@ static inline void mmc_signal_sdio_irq(struct mmc_host *host)
wake_up_process(host->sdio_irq_thread);
}
-void sdio_run_irqs(struct mmc_host *host);
void sdio_signal_irq(struct mmc_host *host);
#ifdef CONFIG_REGULATOR
-int mmc_regulator_get_ocrmask(struct regulator *supply);
int mmc_regulator_set_ocr(struct mmc_host *mmc,
struct regulator *supply,
unsigned short vdd_bit);
int mmc_regulator_set_vqmmc(struct mmc_host *mmc, struct mmc_ios *ios);
#else
-static inline int mmc_regulator_get_ocrmask(struct regulator *supply)
-{
- return 0;
-}
-
static inline int mmc_regulator_set_ocr(struct mmc_host *mmc,
struct regulator *supply,
unsigned short vdd_bit)
@@ -460,8 +598,9 @@ static inline int mmc_regulator_set_vqmmc(struct mmc_host *mmc,
}
#endif
-u32 mmc_vddrange_to_ocrmask(int vdd_min, int vdd_max);
int mmc_regulator_get_supply(struct mmc_host *mmc);
+int mmc_regulator_enable_vqmmc(struct mmc_host *mmc);
+void mmc_regulator_disable_vqmmc(struct mmc_host *mmc);
static inline int mmc_card_is_removable(struct mmc_host *host)
{
@@ -505,12 +644,29 @@ static inline bool mmc_can_retune(struct mmc_host *host)
return host->can_retune == 1;
}
+static inline bool mmc_doing_retune(struct mmc_host *host)
+{
+ return host->doing_retune == 1;
+}
+
+static inline bool mmc_doing_tune(struct mmc_host *host)
+{
+ return host->doing_retune == 1 || host->doing_init_tune == 1;
+}
+
static inline enum dma_data_direction mmc_get_dma_dir(struct mmc_data *data)
{
return data->flags & MMC_DATA_WRITE ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
}
+static inline void mmc_debugfs_err_stats_inc(struct mmc_host *host,
+ enum mmc_err_stat stat)
+{
+ host->err_stats[stat] += 1;
+}
+
int mmc_send_tuning(struct mmc_host *host, u32 opcode, int *cmd_error);
-int mmc_abort_tuning(struct mmc_host *host, u32 opcode);
+int mmc_send_abort_tuning(struct mmc_host *host, u32 opcode);
+int mmc_get_ext_csd(struct mmc_card *card, u8 **new_ext_csd);
#endif /* LINUX_MMC_HOST_H */
diff --git a/include/linux/mmc/mmc.h b/include/linux/mmc/mmc.h
index 3ffc27aaeeaf..6f7993803ee7 100644
--- a/include/linux/mmc/mmc.h
+++ b/include/linux/mmc/mmc.h
@@ -99,6 +99,12 @@ static inline bool mmc_op_multi(u32 opcode)
opcode == MMC_READ_MULTIPLE_BLOCK;
}
+static inline bool mmc_op_tuning(u32 opcode)
+{
+ return opcode == MMC_SEND_TUNING_BLOCK ||
+ opcode == MMC_SEND_TUNING_BLOCK_HS200;
+}
+
/*
* MMC_SWITCH argument format:
*
@@ -144,7 +150,7 @@ static inline bool mmc_op_multi(u32 opcode)
#define R1_WP_ERASE_SKIP (1 << 15) /* sx, c */
#define R1_CARD_ECC_DISABLED (1 << 14) /* sx, a */
#define R1_ERASE_RESET (1 << 13) /* sr, c */
-#define R1_STATUS(x) (x & 0xFFFFE000)
+#define R1_STATUS(x) (x & 0xFFF9A000)
#define R1_CURRENT_STATE(x) ((x & 0x00001E00) >> 9) /* sx, b (4 bits) */
#define R1_READY_FOR_DATA (1 << 8) /* sx, a */
#define R1_SWITCH_ERROR (1 << 7) /* sx, c */
@@ -161,6 +167,16 @@ static inline bool mmc_op_multi(u32 opcode)
#define R1_STATE_PRG 7
#define R1_STATE_DIS 8
+static inline bool mmc_ready_for_data(u32 status)
+{
+ /*
+ * Some cards mishandle the status bits, so make sure to check both the
+ * busy indication and the card state.
+ */
+ return status & R1_READY_FOR_DATA &&
+ R1_CURRENT_STATE(status) == R1_STATE_TRAN;
+}
+
/*
* MMC/SD in SPI mode reports R1 status always, and R2 for SEND_STATUS
* R1 is the low order byte; R2 is the next highest byte, when present.
@@ -315,6 +331,7 @@ static inline bool mmc_op_multi(u32 opcode)
*/
#define EXT_CSD_WR_REL_PARAM_EN (1<<2)
+#define EXT_CSD_WR_REL_PARAM_EN_RPMB_REL_WR (1<<4)
#define EXT_CSD_BOOT_WP_B_PWR_WP_DIS (0x40)
#define EXT_CSD_BOOT_WP_B_PERM_WP_DIS (0x10)
@@ -434,7 +451,7 @@ static inline bool mmc_op_multi(u32 opcode)
#define MMC_SECURE_TRIM1_ARG 0x80000001
#define MMC_SECURE_TRIM2_ARG 0x80008000
#define MMC_SECURE_ARGS 0x80000000
-#define MMC_TRIM_ARGS 0x00008001
+#define MMC_TRIM_OR_DISCARD_ARGS 0x00008003
#define mmc_driver_type_mask(n) (1 << (n))
diff --git a/include/linux/mmc/pm.h b/include/linux/mmc/pm.h
index 4a139204c20c..3549f8045784 100644
--- a/include/linux/mmc/pm.h
+++ b/include/linux/mmc/pm.h
@@ -1,12 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* linux/include/linux/mmc/pm.h
*
* Author: Nicolas Pitre
* Copyright: (C) 2009 Marvell Technology Group Ltd.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef LINUX_MMC_PM_H
diff --git a/include/linux/mmc/sd.h b/include/linux/mmc/sd.h
index 1ebcf9ba1256..6727576a8755 100644
--- a/include/linux/mmc/sd.h
+++ b/include/linux/mmc/sd.h
@@ -1,12 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* include/linux/mmc/sd.h
*
* Copyright (C) 2005-2007 Pierre Ossman, All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or (at
- * your option) any later version.
*/
#ifndef LINUX_MMC_SD_H
@@ -33,6 +29,10 @@
#define SD_APP_OP_COND 41 /* bcr [31:0] OCR R3 */
#define SD_APP_SEND_SCR 51 /* adtc R1 */
+ /* class 11 */
+#define SD_READ_EXTR_SINGLE 48 /* adtc [31:0] R1 */
+#define SD_WRITE_EXTR_SINGLE 49 /* adtc [31:0] R1 */
+
/* OCR bit definitions */
#define SD_OCR_S18R (1 << 24) /* 1.8V switching request */
#define SD_ROCR_S18A SD_OCR_S18R /* 1.8V switching accepted by card */
@@ -91,4 +91,10 @@
#define SD_SWITCH_ACCESS_DEF 0
#define SD_SWITCH_ACCESS_HS 1
+/*
+ * Erase/discard
+ */
+#define SD_ERASE_ARG 0x00000000
+#define SD_DISCARD_ARG 0x00000001
+
#endif /* LINUX_MMC_SD_H */
diff --git a/include/linux/mmc/sdhci-pci-data.h b/include/linux/mmc/sdhci-pci-data.h
deleted file mode 100644
index fda15b6d4135..000000000000
--- a/include/linux/mmc/sdhci-pci-data.h
+++ /dev/null
@@ -1,20 +0,0 @@
-#ifndef LINUX_MMC_SDHCI_PCI_DATA_H
-#define LINUX_MMC_SDHCI_PCI_DATA_H
-
-struct pci_dev;
-
-struct sdhci_pci_data {
- struct pci_dev *pdev;
- int slotno;
- int rst_n_gpio; /* Set to -EINVAL if unused */
- int cd_gpio; /* Set to -EINVAL if unused */
- int (*setup)(struct sdhci_pci_data *data);
- void (*cleanup)(struct sdhci_pci_data *data);
-};
-
-extern struct sdhci_pci_data *(*sdhci_pci_get_data)(struct pci_dev *pdev,
- int slotno);
-
-extern int sdhci_pci_spt_drive_strength;
-
-#endif
diff --git a/include/linux/mmc/sdio.h b/include/linux/mmc/sdio.h
index 17446d3c3602..1ef400f28642 100644
--- a/include/linux/mmc/sdio.h
+++ b/include/linux/mmc/sdio.h
@@ -1,12 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* include/linux/mmc/sdio.h
*
* Copyright 2006-2007 Pierre Ossman
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or (at
- * your option) any later version.
*/
#ifndef LINUX_MMC_SDIO_H
@@ -86,7 +82,7 @@
#define SDIO_SD_REV_1_01 0 /* SD Physical Spec Version 1.01 */
#define SDIO_SD_REV_1_10 1 /* SD Physical Spec Version 1.10 */
#define SDIO_SD_REV_2_00 2 /* SD Physical Spec Version 2.00 */
-#define SDIO_SD_REV_3_00 3 /* SD Physical Spev Version 3.00 */
+#define SDIO_SD_REV_3_00 3 /* SD Physical Spec Version 3.00 */
#define SDIO_CCCR_IOEx 0x02
#define SDIO_CCCR_IORx 0x03
@@ -163,6 +159,11 @@
#define SDIO_DTSx_SET_TYPE_A (1 << SDIO_DRIVE_DTSx_SHIFT)
#define SDIO_DTSx_SET_TYPE_C (2 << SDIO_DRIVE_DTSx_SHIFT)
#define SDIO_DTSx_SET_TYPE_D (3 << SDIO_DRIVE_DTSx_SHIFT)
+
+#define SDIO_CCCR_INTERRUPT_EXT 0x16
+#define SDIO_INTERRUPT_EXT_SAI (1 << 0)
+#define SDIO_INTERRUPT_EXT_EAI (1 << 1)
+
/*
* Function Basic Registers (FBR)
*/
diff --git a/include/linux/mmc/sdio_func.h b/include/linux/mmc/sdio_func.h
index 97ca105347a6..478855b8e406 100644
--- a/include/linux/mmc/sdio_func.h
+++ b/include/linux/mmc/sdio_func.h
@@ -1,12 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* include/linux/mmc/sdio_func.h
*
* Copyright 2007-2008 Pierre Ossman
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or (at
- * your option) any later version.
*/
#ifndef LINUX_MMC_SDIO_FUNC_H
@@ -29,7 +25,7 @@ struct sdio_func_tuple {
struct sdio_func_tuple *next;
unsigned char code;
unsigned char size;
- unsigned char data[0];
+ unsigned char data[];
};
/*
@@ -55,6 +51,8 @@ struct sdio_func {
u8 *tmpbuf; /* DMA:able scratch buffer */
+ u8 major_rev; /* major revision number */
+ u8 minor_rev; /* minor revision number */
unsigned num_info; /* number of info strings */
const char **info; /* info strings */
@@ -111,6 +109,18 @@ struct sdio_driver {
extern int sdio_register_driver(struct sdio_driver *);
extern void sdio_unregister_driver(struct sdio_driver *);
+/**
+ * module_sdio_driver() - Helper macro for registering a SDIO driver
+ * @__sdio_driver: sdio_driver struct
+ *
+ * Helper macro for SDIO drivers which do not do anything special in module
+ * init/exit. This eliminates a lot of boilerplate. Each module may only
+ * use this macro once, and calling it replaces module_init() and module_exit()
+ */
+#define module_sdio_driver(__sdio_driver) \
+ module_driver(__sdio_driver, sdio_register_driver, \
+ sdio_unregister_driver)
+
/*
* SDIO I/O operations
*/
@@ -159,4 +169,10 @@ extern void sdio_f0_writeb(struct sdio_func *func, unsigned char b,
extern mmc_pm_flag_t sdio_get_host_pm_caps(struct sdio_func *func);
extern int sdio_set_host_pm_flags(struct sdio_func *func, mmc_pm_flag_t flags);
+extern void sdio_retune_crc_disable(struct sdio_func *func);
+extern void sdio_retune_crc_enable(struct sdio_func *func);
+
+extern void sdio_retune_hold_now(struct sdio_func *func);
+extern void sdio_retune_release(struct sdio_func *func);
+
#endif /* LINUX_MMC_SDIO_FUNC_H */
diff --git a/include/linux/mmc/sdio_ids.h b/include/linux/mmc/sdio_ids.h
index b733eb404ffc..c653accdc7fd 100644
--- a/include/linux/mmc/sdio_ids.h
+++ b/include/linux/mmc/sdio_ids.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* SDIO Classes, Interface Types, Manufacturer IDs, etc.
*/
@@ -23,49 +24,124 @@
/*
* Vendors and devices. Sort key: vendor first, device next.
*/
+
+#define SDIO_VENDOR_ID_STE 0x0020
+#define SDIO_DEVICE_ID_STE_CW1200 0x2280
+
+#define SDIO_VENDOR_ID_INTEL 0x0089
+#define SDIO_DEVICE_ID_INTEL_IWMC3200WIMAX 0x1402
+#define SDIO_DEVICE_ID_INTEL_IWMC3200WIFI 0x1403
+#define SDIO_DEVICE_ID_INTEL_IWMC3200TOP 0x1404
+#define SDIO_DEVICE_ID_INTEL_IWMC3200GPS 0x1405
+#define SDIO_DEVICE_ID_INTEL_IWMC3200BT 0x1406
+#define SDIO_DEVICE_ID_INTEL_IWMC3200WIMAX_2G5 0x1407
+
+#define SDIO_VENDOR_ID_CGUYS 0x0092
+#define SDIO_DEVICE_ID_CGUYS_EW_CG1102GC 0x0004
+
+#define SDIO_VENDOR_ID_TI 0x0097
+#define SDIO_DEVICE_ID_TI_WL1271 0x4076
+
+#define SDIO_VENDOR_ID_ATHEROS 0x0271
+#define SDIO_DEVICE_ID_ATHEROS_AR6003_00 0x0300
+#define SDIO_DEVICE_ID_ATHEROS_AR6003_01 0x0301
+#define SDIO_DEVICE_ID_ATHEROS_AR6004_00 0x0400
+#define SDIO_DEVICE_ID_ATHEROS_AR6004_01 0x0401
+#define SDIO_DEVICE_ID_ATHEROS_AR6004_02 0x0402
+#define SDIO_DEVICE_ID_ATHEROS_AR6004_18 0x0418
+#define SDIO_DEVICE_ID_ATHEROS_AR6004_19 0x0419
+#define SDIO_DEVICE_ID_ATHEROS_AR6005 0x050A
+#define SDIO_DEVICE_ID_ATHEROS_QCA9377 0x0701
+
#define SDIO_VENDOR_ID_BROADCOM 0x02d0
-#define SDIO_DEVICE_ID_BROADCOM_43143 0xa887
+#define SDIO_DEVICE_ID_BROADCOM_NINTENDO_WII 0x044b
#define SDIO_DEVICE_ID_BROADCOM_43241 0x4324
#define SDIO_DEVICE_ID_BROADCOM_4329 0x4329
#define SDIO_DEVICE_ID_BROADCOM_4330 0x4330
#define SDIO_DEVICE_ID_BROADCOM_4334 0x4334
-#define SDIO_DEVICE_ID_BROADCOM_43340 0xa94c
-#define SDIO_DEVICE_ID_BROADCOM_43341 0xa94d
#define SDIO_DEVICE_ID_BROADCOM_4335_4339 0x4335
#define SDIO_DEVICE_ID_BROADCOM_4339 0x4339
-#define SDIO_DEVICE_ID_BROADCOM_43362 0xa962
-#define SDIO_DEVICE_ID_BROADCOM_43430 0xa9a6
#define SDIO_DEVICE_ID_BROADCOM_4345 0x4345
-#define SDIO_DEVICE_ID_BROADCOM_43455 0xa9bf
#define SDIO_DEVICE_ID_BROADCOM_4354 0x4354
+#define SDIO_DEVICE_ID_BROADCOM_CYPRESS_89359 0x4355
#define SDIO_DEVICE_ID_BROADCOM_4356 0x4356
+#define SDIO_DEVICE_ID_BROADCOM_4359 0x4359
+#define SDIO_DEVICE_ID_BROADCOM_CYPRESS_4373 0x4373
+#define SDIO_DEVICE_ID_BROADCOM_CYPRESS_43012 0xa804
+#define SDIO_DEVICE_ID_BROADCOM_43143 0xa887
+#define SDIO_DEVICE_ID_BROADCOM_43340 0xa94c
+#define SDIO_DEVICE_ID_BROADCOM_43341 0xa94d
+#define SDIO_DEVICE_ID_BROADCOM_43362 0xa962
+#define SDIO_DEVICE_ID_BROADCOM_43364 0xa9a4
+#define SDIO_DEVICE_ID_BROADCOM_43430 0xa9a6
+#define SDIO_DEVICE_ID_BROADCOM_43439 0xa9af
+#define SDIO_DEVICE_ID_BROADCOM_43455 0xa9bf
+#define SDIO_DEVICE_ID_BROADCOM_CYPRESS_43752 0xaae8
-#define SDIO_VENDOR_ID_INTEL 0x0089
-#define SDIO_DEVICE_ID_INTEL_IWMC3200WIMAX 0x1402
-#define SDIO_DEVICE_ID_INTEL_IWMC3200WIFI 0x1403
-#define SDIO_DEVICE_ID_INTEL_IWMC3200TOP 0x1404
-#define SDIO_DEVICE_ID_INTEL_IWMC3200GPS 0x1405
-#define SDIO_DEVICE_ID_INTEL_IWMC3200BT 0x1406
-#define SDIO_DEVICE_ID_INTEL_IWMC3200WIMAX_2G5 0x1407
+#define SDIO_VENDOR_ID_CYPRESS 0x04b4
+#define SDIO_DEVICE_ID_BROADCOM_CYPRESS_43439 0xbd3d
#define SDIO_VENDOR_ID_MARVELL 0x02df
#define SDIO_DEVICE_ID_MARVELL_LIBERTAS 0x9103
-#define SDIO_DEVICE_ID_MARVELL_8688WLAN 0x9104
-#define SDIO_DEVICE_ID_MARVELL_8688BT 0x9105
+#define SDIO_DEVICE_ID_MARVELL_8688_WLAN 0x9104
+#define SDIO_DEVICE_ID_MARVELL_8688_BT 0x9105
+#define SDIO_DEVICE_ID_MARVELL_8786_WLAN 0x9116
+#define SDIO_DEVICE_ID_MARVELL_8787_WLAN 0x9119
+#define SDIO_DEVICE_ID_MARVELL_8787_BT 0x911a
+#define SDIO_DEVICE_ID_MARVELL_8787_BT_AMP 0x911b
#define SDIO_DEVICE_ID_MARVELL_8797_F0 0x9128
+#define SDIO_DEVICE_ID_MARVELL_8797_WLAN 0x9129
+#define SDIO_DEVICE_ID_MARVELL_8797_BT 0x912a
+#define SDIO_DEVICE_ID_MARVELL_8897_WLAN 0x912d
+#define SDIO_DEVICE_ID_MARVELL_8897_BT 0x912e
+#define SDIO_DEVICE_ID_MARVELL_8887_F0 0x9134
+#define SDIO_DEVICE_ID_MARVELL_8887_WLAN 0x9135
+#define SDIO_DEVICE_ID_MARVELL_8887_BT 0x9136
+#define SDIO_DEVICE_ID_MARVELL_8801_WLAN 0x9139
+#define SDIO_DEVICE_ID_MARVELL_8997_F0 0x9140
+#define SDIO_DEVICE_ID_MARVELL_8997_WLAN 0x9141
+#define SDIO_DEVICE_ID_MARVELL_8997_BT 0x9142
+#define SDIO_DEVICE_ID_MARVELL_8977_WLAN 0x9145
+#define SDIO_DEVICE_ID_MARVELL_8977_BT 0x9146
+#define SDIO_DEVICE_ID_MARVELL_8987_WLAN 0x9149
+#define SDIO_DEVICE_ID_MARVELL_8987_BT 0x914a
+#define SDIO_DEVICE_ID_MARVELL_8978_WLAN 0x9159
+
+#define SDIO_VENDOR_ID_MEDIATEK 0x037a
+#define SDIO_DEVICE_ID_MEDIATEK_MT7663 0x7663
+#define SDIO_DEVICE_ID_MEDIATEK_MT7668 0x7668
+#define SDIO_DEVICE_ID_MEDIATEK_MT7961 0x7961
+
+#define SDIO_VENDOR_ID_MICROCHIP_WILC 0x0296
+#define SDIO_DEVICE_ID_MICROCHIP_WILC1000 0x5347
+
+#define SDIO_VENDOR_ID_REALTEK 0x024c
+#define SDIO_DEVICE_ID_REALTEK_RTW8723BS 0xb723
+#define SDIO_DEVICE_ID_REALTEK_RTW8821BS 0xb821
+#define SDIO_DEVICE_ID_REALTEK_RTW8822BS 0xb822
+#define SDIO_DEVICE_ID_REALTEK_RTW8821CS 0xc821
+#define SDIO_DEVICE_ID_REALTEK_RTW8822CS 0xc822
+#define SDIO_DEVICE_ID_REALTEK_RTW8723DS 0xd723
+#define SDIO_DEVICE_ID_REALTEK_RTW8821DS 0xd821
#define SDIO_VENDOR_ID_SIANO 0x039a
#define SDIO_DEVICE_ID_SIANO_NOVA_B0 0x0201
#define SDIO_DEVICE_ID_SIANO_NICE 0x0202
#define SDIO_DEVICE_ID_SIANO_VEGA_A0 0x0300
#define SDIO_DEVICE_ID_SIANO_VENICE 0x0301
+#define SDIO_DEVICE_ID_SIANO_MING 0x0302
+#define SDIO_DEVICE_ID_SIANO_PELE 0x0500
+#define SDIO_DEVICE_ID_SIANO_RIO 0x0600
+#define SDIO_DEVICE_ID_SIANO_DENVER_2160 0x0700
+#define SDIO_DEVICE_ID_SIANO_DENVER_1530 0x0800
#define SDIO_DEVICE_ID_SIANO_NOVA_A0 0x1100
#define SDIO_DEVICE_ID_SIANO_STELLAR 0x5347
-#define SDIO_VENDOR_ID_TI 0x0097
-#define SDIO_DEVICE_ID_TI_WL1271 0x4076
+#define SDIO_VENDOR_ID_RSI 0x041b
+#define SDIO_DEVICE_ID_RSI_9113 0x9330
+#define SDIO_DEVICE_ID_RSI_9116 0x9116
-#define SDIO_VENDOR_ID_STE 0x0020
-#define SDIO_DEVICE_ID_STE_CW1200 0x2280
+#define SDIO_VENDOR_ID_TI_WL1251 0x104c
+#define SDIO_DEVICE_ID_TI_WL1251 0x9066
#endif /* LINUX_MMC_SDIO_IDS_H */
diff --git a/include/linux/mmc/slot-gpio.h b/include/linux/mmc/slot-gpio.h
index 82f0d289f110..5d3d15e97868 100644
--- a/include/linux/mmc/slot-gpio.h
+++ b/include/linux/mmc/slot-gpio.h
@@ -1,11 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Generic GPIO card-detect helper header
*
* Copyright (C) 2011, Guennadi Liakhovetski <g.liakhovetski@gmx.de>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef MMC_SLOT_GPIO_H
@@ -17,21 +14,18 @@
struct mmc_host;
int mmc_gpio_get_ro(struct mmc_host *host);
-int mmc_gpio_request_ro(struct mmc_host *host, unsigned int gpio);
-
int mmc_gpio_get_cd(struct mmc_host *host);
-int mmc_gpio_request_cd(struct mmc_host *host, unsigned int gpio,
- unsigned int debounce);
-
+void mmc_gpio_set_cd_irq(struct mmc_host *host, int irq);
int mmc_gpiod_request_cd(struct mmc_host *host, const char *con_id,
unsigned int idx, bool override_active_level,
- unsigned int debounce, bool *gpio_invert);
+ unsigned int debounce);
int mmc_gpiod_request_ro(struct mmc_host *host, const char *con_id,
- unsigned int idx, bool override_active_level,
- unsigned int debounce, bool *gpio_invert);
+ unsigned int idx, unsigned int debounce);
void mmc_gpio_set_cd_isr(struct mmc_host *host,
irqreturn_t (*isr)(int irq, void *dev_id));
+int mmc_gpio_set_cd_wake(struct mmc_host *host, bool on);
void mmc_gpiod_request_cd_irq(struct mmc_host *host);
bool mmc_can_gpio_cd(struct mmc_host *host);
+bool mmc_can_gpio_ro(struct mmc_host *host);
#endif
diff --git a/include/linux/mmdebug.h b/include/linux/mmdebug.h
index 451a811f48f2..b8728d11c949 100644
--- a/include/linux/mmdebug.h
+++ b/include/linux/mmdebug.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef LINUX_MM_DEBUG_H
#define LINUX_MM_DEBUG_H 1
@@ -8,8 +9,7 @@ struct page;
struct vm_area_struct;
struct mm_struct;
-extern void dump_page(struct page *page, const char *reason);
-extern void __dump_page(struct page *page, const char *reason);
+void dump_page(struct page *page, const char *reason);
void dump_vma(const struct vm_area_struct *vma);
void dump_mm(const struct mm_struct *mm);
@@ -22,6 +22,13 @@ void dump_mm(const struct mm_struct *mm);
BUG(); \
} \
} while (0)
+#define VM_BUG_ON_FOLIO(cond, folio) \
+ do { \
+ if (unlikely(cond)) { \
+ dump_page(&folio->page, "VM_BUG_ON_FOLIO(" __stringify(cond)")");\
+ BUG(); \
+ } \
+ } while (0)
#define VM_BUG_ON_VMA(cond, vma) \
do { \
if (unlikely(cond)) { \
@@ -36,21 +43,63 @@ void dump_mm(const struct mm_struct *mm);
BUG(); \
} \
} while (0)
-#define VM_WARN_ON(cond) WARN_ON(cond)
-#define VM_WARN_ON_ONCE(cond) WARN_ON_ONCE(cond)
-#define VM_WARN_ONCE(cond, format...) WARN_ONCE(cond, format)
-#define VM_WARN(cond, format...) WARN(cond, format)
+#define VM_WARN_ON_ONCE_PAGE(cond, page) ({ \
+ static bool __section(".data.once") __warned; \
+ int __ret_warn_once = !!(cond); \
+ \
+ if (unlikely(__ret_warn_once && !__warned)) { \
+ dump_page(page, "VM_WARN_ON_ONCE_PAGE(" __stringify(cond)")");\
+ __warned = true; \
+ WARN_ON(1); \
+ } \
+ unlikely(__ret_warn_once); \
+})
+#define VM_WARN_ON_FOLIO(cond, folio) ({ \
+ int __ret_warn = !!(cond); \
+ \
+ if (unlikely(__ret_warn)) { \
+ dump_page(&folio->page, "VM_WARN_ON_FOLIO(" __stringify(cond)")");\
+ WARN_ON(1); \
+ } \
+ unlikely(__ret_warn); \
+})
+#define VM_WARN_ON_ONCE_FOLIO(cond, folio) ({ \
+ static bool __section(".data.once") __warned; \
+ int __ret_warn_once = !!(cond); \
+ \
+ if (unlikely(__ret_warn_once && !__warned)) { \
+ dump_page(&folio->page, "VM_WARN_ON_ONCE_FOLIO(" __stringify(cond)")");\
+ __warned = true; \
+ WARN_ON(1); \
+ } \
+ unlikely(__ret_warn_once); \
+})
+
+#define VM_WARN_ON(cond) (void)WARN_ON(cond)
+#define VM_WARN_ON_ONCE(cond) (void)WARN_ON_ONCE(cond)
+#define VM_WARN_ONCE(cond, format...) (void)WARN_ONCE(cond, format)
+#define VM_WARN(cond, format...) (void)WARN(cond, format)
#else
#define VM_BUG_ON(cond) BUILD_BUG_ON_INVALID(cond)
#define VM_BUG_ON_PAGE(cond, page) VM_BUG_ON(cond)
+#define VM_BUG_ON_FOLIO(cond, folio) VM_BUG_ON(cond)
#define VM_BUG_ON_VMA(cond, vma) VM_BUG_ON(cond)
#define VM_BUG_ON_MM(cond, mm) VM_BUG_ON(cond)
#define VM_WARN_ON(cond) BUILD_BUG_ON_INVALID(cond)
#define VM_WARN_ON_ONCE(cond) BUILD_BUG_ON_INVALID(cond)
+#define VM_WARN_ON_ONCE_PAGE(cond, page) BUILD_BUG_ON_INVALID(cond)
+#define VM_WARN_ON_FOLIO(cond, folio) BUILD_BUG_ON_INVALID(cond)
+#define VM_WARN_ON_ONCE_FOLIO(cond, folio) BUILD_BUG_ON_INVALID(cond)
#define VM_WARN_ONCE(cond, format...) BUILD_BUG_ON_INVALID(cond)
#define VM_WARN(cond, format...) BUILD_BUG_ON_INVALID(cond)
#endif
+#ifdef CONFIG_DEBUG_VM_IRQSOFF
+#define VM_WARN_ON_IRQS_ENABLED() WARN_ON_ONCE(!irqs_disabled())
+#else
+#define VM_WARN_ON_IRQS_ENABLED() do { } while (0)
+#endif
+
#ifdef CONFIG_DEBUG_VIRTUAL
#define VIRTUAL_BUG_ON(cond) BUG_ON(cond)
#else
diff --git a/include/linux/mmiotrace.h b/include/linux/mmiotrace.h
index 3ba327af055c..88236849894d 100644
--- a/include/linux/mmiotrace.h
+++ b/include/linux/mmiotrace.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_MMIOTRACE_H
#define _LINUX_MMIOTRACE_H
diff --git a/include/linux/mmu_context.h b/include/linux/mmu_context.h
index a4441784503b..f2b7a3f04099 100644
--- a/include/linux/mmu_context.h
+++ b/include/linux/mmu_context.h
@@ -1,16 +1,45 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_MMU_CONTEXT_H
#define _LINUX_MMU_CONTEXT_H
#include <asm/mmu_context.h>
-
-struct mm_struct;
-
-void use_mm(struct mm_struct *mm);
-void unuse_mm(struct mm_struct *mm);
+#include <asm/mmu.h>
/* Architectures that care about IRQ state in switch_mm can override this. */
#ifndef switch_mm_irqs_off
# define switch_mm_irqs_off switch_mm
#endif
+#ifndef leave_mm
+static inline void leave_mm(int cpu) { }
+#endif
+
+/*
+ * CPUs that are capable of running user task @p. Must contain at least one
+ * active CPU. It is assumed that the kernel can run on all CPUs, so calling
+ * this for a kernel thread is pointless.
+ *
+ * By default, we assume a sane, homogeneous system.
+ */
+#ifndef task_cpu_possible_mask
+# define task_cpu_possible_mask(p) cpu_possible_mask
+# define task_cpu_possible(cpu, p) true
+#else
+# define task_cpu_possible(cpu, p) cpumask_test_cpu((cpu), task_cpu_possible_mask(p))
+#endif
+
+#ifndef mm_untag_mask
+static inline unsigned long mm_untag_mask(struct mm_struct *mm)
+{
+ return -1UL;
+}
+#endif
+
+#ifndef arch_pgtable_dma_compat
+static inline bool arch_pgtable_dma_compat(struct mm_struct *mm)
+{
+ return true;
+}
+#endif
+
#endif
diff --git a/include/linux/mmu_notifier.h b/include/linux/mmu_notifier.h
index c91b3bcd158f..64a3e051c3c4 100644
--- a/include/linux/mmu_notifier.h
+++ b/include/linux/mmu_notifier.h
@@ -1,29 +1,66 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_MMU_NOTIFIER_H
#define _LINUX_MMU_NOTIFIER_H
#include <linux/list.h>
#include <linux/spinlock.h>
#include <linux/mm_types.h>
+#include <linux/mmap_lock.h>
#include <linux/srcu.h>
+#include <linux/interval_tree.h>
+struct mmu_notifier_subscriptions;
struct mmu_notifier;
-struct mmu_notifier_ops;
+struct mmu_notifier_range;
+struct mmu_interval_notifier;
-#ifdef CONFIG_MMU_NOTIFIER
-
-/*
- * The mmu notifier_mm structure is allocated and installed in
- * mm->mmu_notifier_mm inside the mm_take_all_locks() protected
- * critical section and it's released only when mm_count reaches zero
- * in mmdrop().
+/**
+ * enum mmu_notifier_event - reason for the mmu notifier callback
+ * @MMU_NOTIFY_UNMAP: either munmap() that unmap the range or a mremap() that
+ * move the range
+ *
+ * @MMU_NOTIFY_CLEAR: clear page table entry (many reasons for this like
+ * madvise() or replacing a page by another one, ...).
+ *
+ * @MMU_NOTIFY_PROTECTION_VMA: update is due to protection change for the range
+ * ie using the vma access permission (vm_page_prot) to update the whole range
+ * is enough no need to inspect changes to the CPU page table (mprotect()
+ * syscall)
+ *
+ * @MMU_NOTIFY_PROTECTION_PAGE: update is due to change in read/write flag for
+ * pages in the range so to mirror those changes the user must inspect the CPU
+ * page table (from the end callback).
+ *
+ * @MMU_NOTIFY_SOFT_DIRTY: soft dirty accounting (still same page and same
+ * access flags). User should soft dirty the page in the end callback to make
+ * sure that anyone relying on soft dirtiness catch pages that might be written
+ * through non CPU mappings.
+ *
+ * @MMU_NOTIFY_RELEASE: used during mmu_interval_notifier invalidate to signal
+ * that the mm refcount is zero and the range is no longer accessible.
+ *
+ * @MMU_NOTIFY_MIGRATE: used during migrate_vma_collect() invalidate to signal
+ * a device driver to possibly ignore the invalidation if the
+ * owner field matches the driver's device private pgmap owner.
+ *
+ * @MMU_NOTIFY_EXCLUSIVE: to signal a device driver that the device will no
+ * longer have exclusive access to the page. When sent during creation of an
+ * exclusive range the owner will be initialised to the value provided by the
+ * caller of make_device_exclusive_range(), otherwise the owner will be NULL.
*/
-struct mmu_notifier_mm {
- /* all mmu notifiers registerd in this mm are queued in this list */
- struct hlist_head list;
- /* to serialize the list modifications and hlist_unhashed */
- spinlock_t lock;
+enum mmu_notifier_event {
+ MMU_NOTIFY_UNMAP = 0,
+ MMU_NOTIFY_CLEAR,
+ MMU_NOTIFY_PROTECTION_VMA,
+ MMU_NOTIFY_PROTECTION_PAGE,
+ MMU_NOTIFY_SOFT_DIRTY,
+ MMU_NOTIFY_RELEASE,
+ MMU_NOTIFY_MIGRATE,
+ MMU_NOTIFY_EXCLUSIVE,
};
+#define MMU_NOTIFIER_RANGE_BLOCKABLE (1 << 0)
+
struct mmu_notifier_ops {
/*
* Called either by mmu_notifier_unregister or when the mm is
@@ -48,7 +85,7 @@ struct mmu_notifier_ops {
* through the gart alias address, so leading to memory
* corruption.
*/
- void (*release)(struct mmu_notifier *mn,
+ void (*release)(struct mmu_notifier *subscription,
struct mm_struct *mm);
/*
@@ -60,7 +97,7 @@ struct mmu_notifier_ops {
* Start-end is necessary in case the secondary MMU is mapping the page
* at a smaller granularity than the primary MMU.
*/
- int (*clear_flush_young)(struct mmu_notifier *mn,
+ int (*clear_flush_young)(struct mmu_notifier *subscription,
struct mm_struct *mm,
unsigned long start,
unsigned long end);
@@ -70,7 +107,7 @@ struct mmu_notifier_ops {
* latter, it is supposed to test-and-clear the young/accessed bitflag
* in the secondary pte, but it may omit flushing the secondary tlb.
*/
- int (*clear_young)(struct mmu_notifier *mn,
+ int (*clear_young)(struct mmu_notifier *subscription,
struct mm_struct *mm,
unsigned long start,
unsigned long end);
@@ -81,7 +118,7 @@ struct mmu_notifier_ops {
* frequently used without actually clearing the flag or tearing
* down the secondary mapping on the page.
*/
- int (*test_young)(struct mmu_notifier *mn,
+ int (*test_young)(struct mmu_notifier *subscription,
struct mm_struct *mm,
unsigned long address);
@@ -89,25 +126,14 @@ struct mmu_notifier_ops {
* change_pte is called in cases that pte mapping to page is changed:
* for example, when ksm remaps pte to point to a new shared page.
*/
- void (*change_pte)(struct mmu_notifier *mn,
+ void (*change_pte)(struct mmu_notifier *subscription,
struct mm_struct *mm,
unsigned long address,
pte_t pte);
/*
- * Before this is invoked any secondary MMU is still ok to
- * read/write to the page previously pointed to by the Linux
- * pte because the page hasn't been freed yet and it won't be
- * freed until this returns. If required set_page_dirty has to
- * be called internally to this method.
- */
- void (*invalidate_page)(struct mmu_notifier *mn,
- struct mm_struct *mm,
- unsigned long address);
-
- /*
* invalidate_range_start() and invalidate_range_end() must be
- * paired and are called only when the mmap_sem and/or the
+ * paired and are called only when the mmap_lock and/or the
* locks protecting the reverse maps are held. If the subsystem
* can't guarantee that no additional references are taken to
* the pages in the range, it has to implement the
@@ -141,19 +167,24 @@ struct mmu_notifier_ops {
* decrease the refcount. If the refcount is decreased on
* invalidate_range_start() then the VM can free pages as page
* table entries are removed. If the refcount is only
- * droppped on invalidate_range_end() then the driver itself
+ * dropped on invalidate_range_end() then the driver itself
* will drop the last refcount but it must take care to flush
* any secondary tlb before doing the final free on the
* page. Pages will no longer be referenced by the linux
* address space but may still be referenced by sptes until
* the last refcount is dropped.
+ *
+ * If blockable argument is set to false then the callback cannot
+ * sleep and has to return with -EAGAIN if sleeping would be required.
+ * 0 should be returned otherwise. Please note that notifiers that can
+ * fail invalidate_range_start are not allowed to implement
+ * invalidate_range_end, as there is no mechanism for informing the
+ * notifier that its start failed.
*/
- void (*invalidate_range_start)(struct mmu_notifier *mn,
- struct mm_struct *mm,
- unsigned long start, unsigned long end);
- void (*invalidate_range_end)(struct mmu_notifier *mn,
- struct mm_struct *mm,
- unsigned long start, unsigned long end);
+ int (*invalidate_range_start)(struct mmu_notifier *subscription,
+ const struct mmu_notifier_range *range);
+ void (*invalidate_range_end)(struct mmu_notifier *subscription,
+ const struct mmu_notifier_range *range);
/*
* invalidate_range() is either called between
@@ -165,50 +196,193 @@ struct mmu_notifier_ops {
* If invalidate_range() is used to manage a non-CPU TLB with
* shared page-tables, it not necessary to implement the
* invalidate_range_start()/end() notifiers, as
- * invalidate_range() alread catches the points in time when an
- * external TLB range needs to be flushed.
- *
- * The invalidate_range() function is called under the ptl
- * spin-lock and not allowed to sleep.
+ * invalidate_range() already catches the points in time when an
+ * external TLB range needs to be flushed. For more in depth
+ * discussion on this see Documentation/mm/mmu_notifier.rst
*
* Note that this function might be called with just a sub-range
* of what was passed to invalidate_range_start()/end(), if
* called between those functions.
*/
- void (*invalidate_range)(struct mmu_notifier *mn, struct mm_struct *mm,
- unsigned long start, unsigned long end);
+ void (*invalidate_range)(struct mmu_notifier *subscription,
+ struct mm_struct *mm,
+ unsigned long start,
+ unsigned long end);
+
+ /*
+ * These callbacks are used with the get/put interface to manage the
+ * lifetime of the mmu_notifier memory. alloc_notifier() returns a new
+ * notifier for use with the mm.
+ *
+ * free_notifier() is only called after the mmu_notifier has been
+ * fully put, calls to any ops callback are prevented and no ops
+ * callbacks are currently running. It is called from a SRCU callback
+ * and cannot sleep.
+ */
+ struct mmu_notifier *(*alloc_notifier)(struct mm_struct *mm);
+ void (*free_notifier)(struct mmu_notifier *subscription);
};
/*
- * The notifier chains are protected by mmap_sem and/or the reverse map
+ * The notifier chains are protected by mmap_lock and/or the reverse map
* semaphores. Notifier chains are only changed when all reverse maps and
- * the mmap_sem locks are taken.
+ * the mmap_lock locks are taken.
*
* Therefore notifier chains can only be traversed when either
*
- * 1. mmap_sem is held.
+ * 1. mmap_lock is held.
* 2. One of the reverse map locks is held (i_mmap_rwsem or anon_vma->rwsem).
* 3. No other concurrent thread can access the list (release)
*/
struct mmu_notifier {
struct hlist_node hlist;
const struct mmu_notifier_ops *ops;
+ struct mm_struct *mm;
+ struct rcu_head rcu;
+ unsigned int users;
+};
+
+/**
+ * struct mmu_interval_notifier_ops
+ * @invalidate: Upon return the caller must stop using any SPTEs within this
+ * range. This function can sleep. Return false only if sleeping
+ * was required but mmu_notifier_range_blockable(range) is false.
+ */
+struct mmu_interval_notifier_ops {
+ bool (*invalidate)(struct mmu_interval_notifier *interval_sub,
+ const struct mmu_notifier_range *range,
+ unsigned long cur_seq);
+};
+
+struct mmu_interval_notifier {
+ struct interval_tree_node interval_tree;
+ const struct mmu_interval_notifier_ops *ops;
+ struct mm_struct *mm;
+ struct hlist_node deferred_item;
+ unsigned long invalidate_seq;
+};
+
+#ifdef CONFIG_MMU_NOTIFIER
+
+#ifdef CONFIG_LOCKDEP
+extern struct lockdep_map __mmu_notifier_invalidate_range_start_map;
+#endif
+
+struct mmu_notifier_range {
+ struct mm_struct *mm;
+ unsigned long start;
+ unsigned long end;
+ unsigned flags;
+ enum mmu_notifier_event event;
+ void *owner;
};
static inline int mm_has_notifiers(struct mm_struct *mm)
{
- return unlikely(mm->mmu_notifier_mm);
+ return unlikely(mm->notifier_subscriptions);
+}
+
+struct mmu_notifier *mmu_notifier_get_locked(const struct mmu_notifier_ops *ops,
+ struct mm_struct *mm);
+static inline struct mmu_notifier *
+mmu_notifier_get(const struct mmu_notifier_ops *ops, struct mm_struct *mm)
+{
+ struct mmu_notifier *ret;
+
+ mmap_write_lock(mm);
+ ret = mmu_notifier_get_locked(ops, mm);
+ mmap_write_unlock(mm);
+ return ret;
}
+void mmu_notifier_put(struct mmu_notifier *subscription);
+void mmu_notifier_synchronize(void);
-extern int mmu_notifier_register(struct mmu_notifier *mn,
+extern int mmu_notifier_register(struct mmu_notifier *subscription,
struct mm_struct *mm);
-extern int __mmu_notifier_register(struct mmu_notifier *mn,
+extern int __mmu_notifier_register(struct mmu_notifier *subscription,
struct mm_struct *mm);
-extern void mmu_notifier_unregister(struct mmu_notifier *mn,
+extern void mmu_notifier_unregister(struct mmu_notifier *subscription,
struct mm_struct *mm);
-extern void mmu_notifier_unregister_no_release(struct mmu_notifier *mn,
- struct mm_struct *mm);
-extern void __mmu_notifier_mm_destroy(struct mm_struct *mm);
+
+unsigned long
+mmu_interval_read_begin(struct mmu_interval_notifier *interval_sub);
+int mmu_interval_notifier_insert(struct mmu_interval_notifier *interval_sub,
+ struct mm_struct *mm, unsigned long start,
+ unsigned long length,
+ const struct mmu_interval_notifier_ops *ops);
+int mmu_interval_notifier_insert_locked(
+ struct mmu_interval_notifier *interval_sub, struct mm_struct *mm,
+ unsigned long start, unsigned long length,
+ const struct mmu_interval_notifier_ops *ops);
+void mmu_interval_notifier_remove(struct mmu_interval_notifier *interval_sub);
+
+/**
+ * mmu_interval_set_seq - Save the invalidation sequence
+ * @interval_sub - The subscription passed to invalidate
+ * @cur_seq - The cur_seq passed to the invalidate() callback
+ *
+ * This must be called unconditionally from the invalidate callback of a
+ * struct mmu_interval_notifier_ops under the same lock that is used to call
+ * mmu_interval_read_retry(). It updates the sequence number for later use by
+ * mmu_interval_read_retry(). The provided cur_seq will always be odd.
+ *
+ * If the caller does not call mmu_interval_read_begin() or
+ * mmu_interval_read_retry() then this call is not required.
+ */
+static inline void
+mmu_interval_set_seq(struct mmu_interval_notifier *interval_sub,
+ unsigned long cur_seq)
+{
+ WRITE_ONCE(interval_sub->invalidate_seq, cur_seq);
+}
+
+/**
+ * mmu_interval_read_retry - End a read side critical section against a VA range
+ * interval_sub: The subscription
+ * seq: The return of the paired mmu_interval_read_begin()
+ *
+ * This MUST be called under a user provided lock that is also held
+ * unconditionally by op->invalidate() when it calls mmu_interval_set_seq().
+ *
+ * Each call should be paired with a single mmu_interval_read_begin() and
+ * should be used to conclude the read side.
+ *
+ * Returns true if an invalidation collided with this critical section, and
+ * the caller should retry.
+ */
+static inline bool
+mmu_interval_read_retry(struct mmu_interval_notifier *interval_sub,
+ unsigned long seq)
+{
+ return interval_sub->invalidate_seq != seq;
+}
+
+/**
+ * mmu_interval_check_retry - Test if a collision has occurred
+ * interval_sub: The subscription
+ * seq: The return of the matching mmu_interval_read_begin()
+ *
+ * This can be used in the critical section between mmu_interval_read_begin()
+ * and mmu_interval_read_retry(). A return of true indicates an invalidation
+ * has collided with this critical region and a future
+ * mmu_interval_read_retry() will return true.
+ *
+ * False is not reliable and only suggests a collision may not have
+ * occurred. It can be called many times and does not have to hold the user
+ * provided lock.
+ *
+ * This call can be used as part of loops and other expensive operations to
+ * expedite a retry.
+ */
+static inline bool
+mmu_interval_check_retry(struct mmu_interval_notifier *interval_sub,
+ unsigned long seq)
+{
+ /* Pairs with the WRITE_ONCE in mmu_interval_set_seq() */
+ return READ_ONCE(interval_sub->invalidate_seq) != seq;
+}
+
+extern void __mmu_notifier_subscriptions_destroy(struct mm_struct *mm);
extern void __mmu_notifier_release(struct mm_struct *mm);
extern int __mmu_notifier_clear_flush_young(struct mm_struct *mm,
unsigned long start,
@@ -220,14 +394,19 @@ extern int __mmu_notifier_test_young(struct mm_struct *mm,
unsigned long address);
extern void __mmu_notifier_change_pte(struct mm_struct *mm,
unsigned long address, pte_t pte);
-extern void __mmu_notifier_invalidate_page(struct mm_struct *mm,
- unsigned long address);
-extern void __mmu_notifier_invalidate_range_start(struct mm_struct *mm,
- unsigned long start, unsigned long end);
-extern void __mmu_notifier_invalidate_range_end(struct mm_struct *mm,
- unsigned long start, unsigned long end);
+extern int __mmu_notifier_invalidate_range_start(struct mmu_notifier_range *r);
+extern void __mmu_notifier_invalidate_range_end(struct mmu_notifier_range *r,
+ bool only_end);
extern void __mmu_notifier_invalidate_range(struct mm_struct *mm,
unsigned long start, unsigned long end);
+extern bool
+mmu_notifier_range_update_to_read_only(const struct mmu_notifier_range *range);
+
+static inline bool
+mmu_notifier_range_blockable(const struct mmu_notifier_range *range)
+{
+ return (range->flags & MMU_NOTIFIER_RANGE_BLOCKABLE);
+}
static inline void mmu_notifier_release(struct mm_struct *mm)
{
@@ -268,25 +447,48 @@ static inline void mmu_notifier_change_pte(struct mm_struct *mm,
__mmu_notifier_change_pte(mm, address, pte);
}
-static inline void mmu_notifier_invalidate_page(struct mm_struct *mm,
- unsigned long address)
+static inline void
+mmu_notifier_invalidate_range_start(struct mmu_notifier_range *range)
{
- if (mm_has_notifiers(mm))
- __mmu_notifier_invalidate_page(mm, address);
+ might_sleep();
+
+ lock_map_acquire(&__mmu_notifier_invalidate_range_start_map);
+ if (mm_has_notifiers(range->mm)) {
+ range->flags |= MMU_NOTIFIER_RANGE_BLOCKABLE;
+ __mmu_notifier_invalidate_range_start(range);
+ }
+ lock_map_release(&__mmu_notifier_invalidate_range_start_map);
}
-static inline void mmu_notifier_invalidate_range_start(struct mm_struct *mm,
- unsigned long start, unsigned long end)
+static inline int
+mmu_notifier_invalidate_range_start_nonblock(struct mmu_notifier_range *range)
{
- if (mm_has_notifiers(mm))
- __mmu_notifier_invalidate_range_start(mm, start, end);
+ int ret = 0;
+
+ lock_map_acquire(&__mmu_notifier_invalidate_range_start_map);
+ if (mm_has_notifiers(range->mm)) {
+ range->flags &= ~MMU_NOTIFIER_RANGE_BLOCKABLE;
+ ret = __mmu_notifier_invalidate_range_start(range);
+ }
+ lock_map_release(&__mmu_notifier_invalidate_range_start_map);
+ return ret;
}
-static inline void mmu_notifier_invalidate_range_end(struct mm_struct *mm,
- unsigned long start, unsigned long end)
+static inline void
+mmu_notifier_invalidate_range_end(struct mmu_notifier_range *range)
{
- if (mm_has_notifiers(mm))
- __mmu_notifier_invalidate_range_end(mm, start, end);
+ if (mmu_notifier_range_blockable(range))
+ might_sleep();
+
+ if (mm_has_notifiers(range->mm))
+ __mmu_notifier_invalidate_range_end(range, false);
+}
+
+static inline void
+mmu_notifier_invalidate_range_only_end(struct mmu_notifier_range *range)
+{
+ if (mm_has_notifiers(range->mm))
+ __mmu_notifier_invalidate_range_end(range, true);
}
static inline void mmu_notifier_invalidate_range(struct mm_struct *mm,
@@ -296,15 +498,40 @@ static inline void mmu_notifier_invalidate_range(struct mm_struct *mm,
__mmu_notifier_invalidate_range(mm, start, end);
}
-static inline void mmu_notifier_mm_init(struct mm_struct *mm)
+static inline void mmu_notifier_subscriptions_init(struct mm_struct *mm)
{
- mm->mmu_notifier_mm = NULL;
+ mm->notifier_subscriptions = NULL;
}
-static inline void mmu_notifier_mm_destroy(struct mm_struct *mm)
+static inline void mmu_notifier_subscriptions_destroy(struct mm_struct *mm)
{
if (mm_has_notifiers(mm))
- __mmu_notifier_mm_destroy(mm);
+ __mmu_notifier_subscriptions_destroy(mm);
+}
+
+
+static inline void mmu_notifier_range_init(struct mmu_notifier_range *range,
+ enum mmu_notifier_event event,
+ unsigned flags,
+ struct mm_struct *mm,
+ unsigned long start,
+ unsigned long end)
+{
+ range->event = event;
+ range->mm = mm;
+ range->start = start;
+ range->end = end;
+ range->flags = flags;
+}
+
+static inline void mmu_notifier_range_init_owner(
+ struct mmu_notifier_range *range,
+ enum mmu_notifier_event event, unsigned int flags,
+ struct mm_struct *mm, unsigned long start,
+ unsigned long end, void *owner)
+{
+ mmu_notifier_range_init(range, event, flags, mm, start, end);
+ range->owner = owner;
}
#define ptep_clear_flush_young_notify(__vma, __address, __ptep) \
@@ -414,12 +641,38 @@ static inline void mmu_notifier_mm_destroy(struct mm_struct *mm)
set_pte_at(___mm, ___address, __ptep, ___pte); \
})
-extern void mmu_notifier_call_srcu(struct rcu_head *rcu,
- void (*func)(struct rcu_head *rcu));
-extern void mmu_notifier_synchronize(void);
-
#else /* CONFIG_MMU_NOTIFIER */
+struct mmu_notifier_range {
+ unsigned long start;
+ unsigned long end;
+};
+
+static inline void _mmu_notifier_range_init(struct mmu_notifier_range *range,
+ unsigned long start,
+ unsigned long end)
+{
+ range->start = start;
+ range->end = end;
+}
+
+#define mmu_notifier_range_init(range,event,flags,mm,start,end) \
+ _mmu_notifier_range_init(range, start, end)
+#define mmu_notifier_range_init_owner(range, event, flags, mm, start, \
+ end, owner) \
+ _mmu_notifier_range_init(range, start, end)
+
+static inline bool
+mmu_notifier_range_blockable(const struct mmu_notifier_range *range)
+{
+ return true;
+}
+
+static inline int mm_has_notifiers(struct mm_struct *mm)
+{
+ return 0;
+}
+
static inline void mmu_notifier_release(struct mm_struct *mm)
{
}
@@ -442,18 +695,24 @@ static inline void mmu_notifier_change_pte(struct mm_struct *mm,
{
}
-static inline void mmu_notifier_invalidate_page(struct mm_struct *mm,
- unsigned long address)
+static inline void
+mmu_notifier_invalidate_range_start(struct mmu_notifier_range *range)
{
}
-static inline void mmu_notifier_invalidate_range_start(struct mm_struct *mm,
- unsigned long start, unsigned long end)
+static inline int
+mmu_notifier_invalidate_range_start_nonblock(struct mmu_notifier_range *range)
{
+ return 0;
}
-static inline void mmu_notifier_invalidate_range_end(struct mm_struct *mm,
- unsigned long start, unsigned long end)
+static inline
+void mmu_notifier_invalidate_range_end(struct mmu_notifier_range *range)
+{
+}
+
+static inline void
+mmu_notifier_invalidate_range_only_end(struct mmu_notifier_range *range)
{
}
@@ -462,14 +721,16 @@ static inline void mmu_notifier_invalidate_range(struct mm_struct *mm,
{
}
-static inline void mmu_notifier_mm_init(struct mm_struct *mm)
+static inline void mmu_notifier_subscriptions_init(struct mm_struct *mm)
{
}
-static inline void mmu_notifier_mm_destroy(struct mm_struct *mm)
+static inline void mmu_notifier_subscriptions_destroy(struct mm_struct *mm)
{
}
+#define mmu_notifier_range_update_to_read_only(r) false
+
#define ptep_clear_flush_young_notify ptep_clear_flush_young
#define pmdp_clear_flush_young_notify pmdp_clear_flush_young
#define ptep_clear_young_notify ptep_test_and_clear_young
@@ -479,6 +740,10 @@ static inline void mmu_notifier_mm_destroy(struct mm_struct *mm)
#define pudp_huge_clear_flush_notify pudp_huge_clear_flush
#define set_pte_at_notify set_pte_at
+static inline void mmu_notifier_synchronize(void)
+{
+}
+
#endif /* CONFIG_MMU_NOTIFIER */
#endif /* _LINUX_MMU_NOTIFIER_H */
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index fc14b8b3f6ce..a4889c9d4055 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_MMZONE_H
#define _LINUX_MMZONE_H
@@ -6,6 +7,7 @@
#include <linux/spinlock.h>
#include <linux/list.h>
+#include <linux/list_nulls.h>
#include <linux/wait.h>
#include <linux/bitops.h>
#include <linux/cache.h>
@@ -17,15 +19,20 @@
#include <linux/pageblock-flags.h>
#include <linux/page-flags-layout.h>
#include <linux/atomic.h>
+#include <linux/mm_types.h>
+#include <linux/page-flags.h>
+#include <linux/local_lock.h>
#include <asm/page.h>
/* Free memory management - zoned buddy allocator. */
-#ifndef CONFIG_FORCE_MAX_ZONEORDER
-#define MAX_ORDER 11
+#ifndef CONFIG_ARCH_FORCE_MAX_ORDER
+#define MAX_ORDER 10
#else
-#define MAX_ORDER CONFIG_FORCE_MAX_ZONEORDER
+#define MAX_ORDER CONFIG_ARCH_FORCE_MAX_ORDER
#endif
-#define MAX_ORDER_NR_PAGES (1 << (MAX_ORDER - 1))
+#define MAX_ORDER_NR_PAGES (1 << MAX_ORDER)
+
+#define IS_MAX_ORDER_ALIGNED(pfn) IS_ALIGNED(pfn, MAX_ORDER_NR_PAGES)
/*
* PAGE_ALLOC_COSTLY_ORDER is the order at which allocations are deemed
@@ -50,10 +57,7 @@ enum migratetype {
*
* The way to use it is to change migratetype of a range of
* pageblocks to MIGRATE_CMA which can be done by
- * __free_pageblock_cma() function. What is important though
- * is that a range of pageblocks must be aligned to
- * MAX_ORDER_NR_PAGES should biggest page be bigger then
- * a single pageblock.
+ * __free_pageblock_cma() function.
*/
MIGRATE_CMA,
#endif
@@ -64,7 +68,7 @@ enum migratetype {
};
/* In mm/page_alloc.c; keep in sync also with show_migration_types() there */
-extern char * const migratetype_names[MIGRATE_TYPES];
+extern const char * const migratetype_names[MIGRATE_TYPES];
#ifdef CONFIG_CMA
# define is_migrate_cma(migratetype) unlikely((migratetype) == MIGRATE_CMA)
@@ -79,18 +83,27 @@ static inline bool is_migrate_movable(int mt)
return is_migrate_cma(mt) || mt == MIGRATE_MOVABLE;
}
+/*
+ * Check whether a migratetype can be merged with another migratetype.
+ *
+ * It is only mergeable when it can fall back to other migratetypes for
+ * allocation. See fallbacks[MIGRATE_TYPES][3] in page_alloc.c.
+ */
+static inline bool migratetype_is_mergeable(int mt)
+{
+ return mt < MIGRATE_PCPTYPES;
+}
+
#define for_each_migratetype_order(order, type) \
- for (order = 0; order < MAX_ORDER; order++) \
+ for (order = 0; order <= MAX_ORDER; order++) \
for (type = 0; type < MIGRATE_TYPES; type++)
extern int page_group_by_mobility_disabled;
-#define NR_MIGRATETYPE_BITS (PB_migrate_end - PB_migrate + 1)
-#define MIGRATETYPE_MASK ((1UL << NR_MIGRATETYPE_BITS) - 1)
+#define MIGRATETYPE_MASK ((1UL << PB_migratetype_bits) - 1)
#define get_pageblock_migratetype(page) \
- get_pfnblock_flags_mask(page, page_to_pfn(page), \
- PB_migrate_end, MIGRATETYPE_MASK)
+ get_pfnblock_flags_mask(page, page_to_pfn(page), MIGRATETYPE_MASK)
struct free_area {
struct list_head free_list[MIGRATE_TYPES];
@@ -99,19 +112,18 @@ struct free_area {
struct pglist_data;
-/*
- * zone->lock and the zone lru_lock are two of the hottest locks in the kernel.
- * So add a wild amount of padding here to ensure that they fall into separate
- * cachelines. There are very few zone structures in the machine, so space
- * consumption is not a concern here.
- */
-#if defined(CONFIG_SMP)
-struct zone_padding {
- char x[0];
-} ____cacheline_internodealigned_in_smp;
-#define ZONE_PADDING(name) struct zone_padding name;
+#ifdef CONFIG_NUMA
+enum numa_stat_item {
+ NUMA_HIT, /* allocated in intended node */
+ NUMA_MISS, /* allocated in non intended node */
+ NUMA_FOREIGN, /* was intended here, hit elsewhere */
+ NUMA_INTERLEAVE_HIT, /* interleaver preferred this zone */
+ NUMA_LOCAL, /* allocation from local node */
+ NUMA_OTHER, /* allocation from other node */
+ NR_VM_NUMA_EVENT_ITEMS
+};
#else
-#define ZONE_PADDING(name)
+#define NR_VM_NUMA_EVENT_ITEMS 0
#endif
enum zone_stat_item {
@@ -125,21 +137,11 @@ enum zone_stat_item {
NR_ZONE_UNEVICTABLE,
NR_ZONE_WRITE_PENDING, /* Count of dirty, writeback and unstable pages */
NR_MLOCK, /* mlock()ed pages found and moved off LRU */
- NR_PAGETABLE, /* used for pagetables */
- NR_KERNEL_STACK_KB, /* measured in KiB */
/* Second 128 byte cacheline */
NR_BOUNCE,
#if IS_ENABLED(CONFIG_ZSMALLOC)
NR_ZSPAGES, /* allocated in zsmalloc */
#endif
-#ifdef CONFIG_NUMA
- NUMA_HIT, /* allocated in intended node */
- NUMA_MISS, /* allocated in non intended node */
- NUMA_FOREIGN, /* was intended here, hit elsewhere */
- NUMA_INTERLEAVE_HIT, /* interleaver preferred this zone */
- NUMA_LOCAL, /* allocation from local node */
- NUMA_OTHER, /* allocation from other node */
-#endif
NR_FREE_CMA_PAGES,
NR_VM_ZONE_STAT_ITEMS };
@@ -150,12 +152,20 @@ enum node_stat_item {
NR_INACTIVE_FILE, /* " " " " " */
NR_ACTIVE_FILE, /* " " " " " */
NR_UNEVICTABLE, /* " " " " " */
- NR_SLAB_RECLAIMABLE,
- NR_SLAB_UNRECLAIMABLE,
+ NR_SLAB_RECLAIMABLE_B,
+ NR_SLAB_UNRECLAIMABLE_B,
NR_ISOLATED_ANON, /* Temporary isolated pages from anon lru */
NR_ISOLATED_FILE, /* Temporary isolated pages from file lru */
- WORKINGSET_REFAULT,
- WORKINGSET_ACTIVATE,
+ WORKINGSET_NODES,
+ WORKINGSET_REFAULT_BASE,
+ WORKINGSET_REFAULT_ANON = WORKINGSET_REFAULT_BASE,
+ WORKINGSET_REFAULT_FILE,
+ WORKINGSET_ACTIVATE_BASE,
+ WORKINGSET_ACTIVATE_ANON = WORKINGSET_ACTIVATE_BASE,
+ WORKINGSET_ACTIVATE_FILE,
+ WORKINGSET_RESTORE_BASE,
+ WORKINGSET_RESTORE_ANON = WORKINGSET_RESTORE_BASE,
+ WORKINGSET_RESTORE_FILE,
WORKINGSET_NODERECLAIM,
NR_ANON_MAPPED, /* Mapped anonymous pages */
NR_FILE_MAPPED, /* pagecache pages mapped into pagetables.
@@ -167,16 +177,71 @@ enum node_stat_item {
NR_SHMEM, /* shmem pages (included tmpfs/GEM pages) */
NR_SHMEM_THPS,
NR_SHMEM_PMDMAPPED,
+ NR_FILE_THPS,
+ NR_FILE_PMDMAPPED,
NR_ANON_THPS,
- NR_UNSTABLE_NFS, /* NFS unstable pages */
NR_VMSCAN_WRITE,
NR_VMSCAN_IMMEDIATE, /* Prioritise for reclaim when writeback ends */
NR_DIRTIED, /* page dirtyings since bootup */
NR_WRITTEN, /* page writings since bootup */
+ NR_THROTTLED_WRITTEN, /* NR_WRITTEN while reclaim throttled */
+ NR_KERNEL_MISC_RECLAIMABLE, /* reclaimable non-slab kernel pages */
+ NR_FOLL_PIN_ACQUIRED, /* via: pin_user_page(), gup flag: FOLL_PIN */
+ NR_FOLL_PIN_RELEASED, /* pages returned via unpin_user_page() */
+ NR_KERNEL_STACK_KB, /* measured in KiB */
+#if IS_ENABLED(CONFIG_SHADOW_CALL_STACK)
+ NR_KERNEL_SCS_KB, /* measured in KiB */
+#endif
+ NR_PAGETABLE, /* used for pagetables */
+ NR_SECONDARY_PAGETABLE, /* secondary pagetables, e.g. KVM pagetables */
+#ifdef CONFIG_SWAP
+ NR_SWAPCACHE,
+#endif
+#ifdef CONFIG_NUMA_BALANCING
+ PGPROMOTE_SUCCESS, /* promote successfully */
+ PGPROMOTE_CANDIDATE, /* candidate pages to promote */
+#endif
NR_VM_NODE_STAT_ITEMS
};
/*
+ * Returns true if the item should be printed in THPs (/proc/vmstat
+ * currently prints number of anon, file and shmem THPs. But the item
+ * is charged in pages).
+ */
+static __always_inline bool vmstat_item_print_in_thp(enum node_stat_item item)
+{
+ if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE))
+ return false;
+
+ return item == NR_ANON_THPS ||
+ item == NR_FILE_THPS ||
+ item == NR_SHMEM_THPS ||
+ item == NR_SHMEM_PMDMAPPED ||
+ item == NR_FILE_PMDMAPPED;
+}
+
+/*
+ * Returns true if the value is measured in bytes (most vmstat values are
+ * measured in pages). This defines the API part, the internal representation
+ * might be different.
+ */
+static __always_inline bool vmstat_item_in_bytes(int idx)
+{
+ /*
+ * Global and per-node slab counters track slab pages.
+ * It's expected that changes are multiples of PAGE_SIZE.
+ * Internally values are stored in pages.
+ *
+ * Per-memcg and per-lruvec counters track memory, consumed
+ * by individual slab objects. These counters are actually
+ * byte-precise.
+ */
+ return (idx == NR_SLAB_RECLAIMABLE_B ||
+ idx == NR_SLAB_UNRECLAIMABLE_B);
+}
+
+/*
* We do arithmetic on the LRU lists in various places in the code,
* so it is important to keep the active lists LRU_ACTIVE higher in
* the array than the corresponding inactive lists, and to keep
@@ -198,51 +263,365 @@ enum lru_list {
NR_LRU_LISTS
};
+enum vmscan_throttle_state {
+ VMSCAN_THROTTLE_WRITEBACK,
+ VMSCAN_THROTTLE_ISOLATED,
+ VMSCAN_THROTTLE_NOPROGRESS,
+ VMSCAN_THROTTLE_CONGESTED,
+ NR_VMSCAN_THROTTLE,
+};
+
#define for_each_lru(lru) for (lru = 0; lru < NR_LRU_LISTS; lru++)
#define for_each_evictable_lru(lru) for (lru = 0; lru <= LRU_ACTIVE_FILE; lru++)
-static inline int is_file_lru(enum lru_list lru)
+static inline bool is_file_lru(enum lru_list lru)
{
return (lru == LRU_INACTIVE_FILE || lru == LRU_ACTIVE_FILE);
}
-static inline int is_active_lru(enum lru_list lru)
+static inline bool is_active_lru(enum lru_list lru)
{
return (lru == LRU_ACTIVE_ANON || lru == LRU_ACTIVE_FILE);
}
-struct zone_reclaim_stat {
- /*
- * The pageout code in vmscan.c keeps track of how many of the
- * mem/swap backed and file backed pages are referenced.
- * The higher the rotated/scanned ratio, the more valuable
- * that cache is.
- *
- * The anon LRU stats live in [0], file LRU stats in [1]
- */
- unsigned long recent_rotated[2];
- unsigned long recent_scanned[2];
+#define WORKINGSET_ANON 0
+#define WORKINGSET_FILE 1
+#define ANON_AND_FILE 2
+
+enum lruvec_flags {
+ LRUVEC_CONGESTED, /* lruvec has many dirty pages
+ * backed by a congested BDI
+ */
};
+#endif /* !__GENERATING_BOUNDS_H */
+
+/*
+ * Evictable pages are divided into multiple generations. The youngest and the
+ * oldest generation numbers, max_seq and min_seq, are monotonically increasing.
+ * They form a sliding window of a variable size [MIN_NR_GENS, MAX_NR_GENS]. An
+ * offset within MAX_NR_GENS, i.e., gen, indexes the LRU list of the
+ * corresponding generation. The gen counter in folio->flags stores gen+1 while
+ * a page is on one of lrugen->folios[]. Otherwise it stores 0.
+ *
+ * A page is added to the youngest generation on faulting. The aging needs to
+ * check the accessed bit at least twice before handing this page over to the
+ * eviction. The first check takes care of the accessed bit set on the initial
+ * fault; the second check makes sure this page hasn't been used since then.
+ * This process, AKA second chance, requires a minimum of two generations,
+ * hence MIN_NR_GENS. And to maintain ABI compatibility with the active/inactive
+ * LRU, e.g., /proc/vmstat, these two generations are considered active; the
+ * rest of generations, if they exist, are considered inactive. See
+ * lru_gen_is_active().
+ *
+ * PG_active is always cleared while a page is on one of lrugen->folios[] so
+ * that the aging needs not to worry about it. And it's set again when a page
+ * considered active is isolated for non-reclaiming purposes, e.g., migration.
+ * See lru_gen_add_folio() and lru_gen_del_folio().
+ *
+ * MAX_NR_GENS is set to 4 so that the multi-gen LRU can support twice the
+ * number of categories of the active/inactive LRU when keeping track of
+ * accesses through page tables. This requires order_base_2(MAX_NR_GENS+1) bits
+ * in folio->flags.
+ */
+#define MIN_NR_GENS 2U
+#define MAX_NR_GENS 4U
+
+/*
+ * Each generation is divided into multiple tiers. A page accessed N times
+ * through file descriptors is in tier order_base_2(N). A page in the first tier
+ * (N=0,1) is marked by PG_referenced unless it was faulted in through page
+ * tables or read ahead. A page in any other tier (N>1) is marked by
+ * PG_referenced and PG_workingset. This implies a minimum of two tiers is
+ * supported without using additional bits in folio->flags.
+ *
+ * In contrast to moving across generations which requires the LRU lock, moving
+ * across tiers only involves atomic operations on folio->flags and therefore
+ * has a negligible cost in the buffered access path. In the eviction path,
+ * comparisons of refaulted/(evicted+protected) from the first tier and the
+ * rest infer whether pages accessed multiple times through file descriptors
+ * are statistically hot and thus worth protecting.
+ *
+ * MAX_NR_TIERS is set to 4 so that the multi-gen LRU can support twice the
+ * number of categories of the active/inactive LRU when keeping track of
+ * accesses through file descriptors. This uses MAX_NR_TIERS-2 spare bits in
+ * folio->flags.
+ */
+#define MAX_NR_TIERS 4U
+
+#ifndef __GENERATING_BOUNDS_H
+
+struct lruvec;
+struct page_vma_mapped_walk;
+
+#define LRU_GEN_MASK ((BIT(LRU_GEN_WIDTH) - 1) << LRU_GEN_PGOFF)
+#define LRU_REFS_MASK ((BIT(LRU_REFS_WIDTH) - 1) << LRU_REFS_PGOFF)
+
+#ifdef CONFIG_LRU_GEN
+
+enum {
+ LRU_GEN_ANON,
+ LRU_GEN_FILE,
+};
+
+enum {
+ LRU_GEN_CORE,
+ LRU_GEN_MM_WALK,
+ LRU_GEN_NONLEAF_YOUNG,
+ NR_LRU_GEN_CAPS
+};
+
+#define MIN_LRU_BATCH BITS_PER_LONG
+#define MAX_LRU_BATCH (MIN_LRU_BATCH * 64)
+
+/* whether to keep historical stats from evicted generations */
+#ifdef CONFIG_LRU_GEN_STATS
+#define NR_HIST_GENS MAX_NR_GENS
+#else
+#define NR_HIST_GENS 1U
+#endif
+
+/*
+ * The youngest generation number is stored in max_seq for both anon and file
+ * types as they are aged on an equal footing. The oldest generation numbers are
+ * stored in min_seq[] separately for anon and file types as clean file pages
+ * can be evicted regardless of swap constraints.
+ *
+ * Normally anon and file min_seq are in sync. But if swapping is constrained,
+ * e.g., out of swap space, file min_seq is allowed to advance and leave anon
+ * min_seq behind.
+ *
+ * The number of pages in each generation is eventually consistent and therefore
+ * can be transiently negative when reset_batch_size() is pending.
+ */
+struct lru_gen_folio {
+ /* the aging increments the youngest generation number */
+ unsigned long max_seq;
+ /* the eviction increments the oldest generation numbers */
+ unsigned long min_seq[ANON_AND_FILE];
+ /* the birth time of each generation in jiffies */
+ unsigned long timestamps[MAX_NR_GENS];
+ /* the multi-gen LRU lists, lazily sorted on eviction */
+ struct list_head folios[MAX_NR_GENS][ANON_AND_FILE][MAX_NR_ZONES];
+ /* the multi-gen LRU sizes, eventually consistent */
+ long nr_pages[MAX_NR_GENS][ANON_AND_FILE][MAX_NR_ZONES];
+ /* the exponential moving average of refaulted */
+ unsigned long avg_refaulted[ANON_AND_FILE][MAX_NR_TIERS];
+ /* the exponential moving average of evicted+protected */
+ unsigned long avg_total[ANON_AND_FILE][MAX_NR_TIERS];
+ /* the first tier doesn't need protection, hence the minus one */
+ unsigned long protected[NR_HIST_GENS][ANON_AND_FILE][MAX_NR_TIERS - 1];
+ /* can be modified without holding the LRU lock */
+ atomic_long_t evicted[NR_HIST_GENS][ANON_AND_FILE][MAX_NR_TIERS];
+ atomic_long_t refaulted[NR_HIST_GENS][ANON_AND_FILE][MAX_NR_TIERS];
+ /* whether the multi-gen LRU is enabled */
+ bool enabled;
+#ifdef CONFIG_MEMCG
+ /* the memcg generation this lru_gen_folio belongs to */
+ u8 gen;
+ /* the list segment this lru_gen_folio belongs to */
+ u8 seg;
+ /* per-node lru_gen_folio list for global reclaim */
+ struct hlist_nulls_node list;
+#endif
+};
+
+enum {
+ MM_LEAF_TOTAL, /* total leaf entries */
+ MM_LEAF_OLD, /* old leaf entries */
+ MM_LEAF_YOUNG, /* young leaf entries */
+ MM_NONLEAF_TOTAL, /* total non-leaf entries */
+ MM_NONLEAF_FOUND, /* non-leaf entries found in Bloom filters */
+ MM_NONLEAF_ADDED, /* non-leaf entries added to Bloom filters */
+ NR_MM_STATS
+};
+
+/* double-buffering Bloom filters */
+#define NR_BLOOM_FILTERS 2
+
+struct lru_gen_mm_state {
+ /* set to max_seq after each iteration */
+ unsigned long seq;
+ /* where the current iteration continues after */
+ struct list_head *head;
+ /* where the last iteration ended before */
+ struct list_head *tail;
+ /* Bloom filters flip after each iteration */
+ unsigned long *filters[NR_BLOOM_FILTERS];
+ /* the mm stats for debugging */
+ unsigned long stats[NR_HIST_GENS][NR_MM_STATS];
+};
+
+struct lru_gen_mm_walk {
+ /* the lruvec under reclaim */
+ struct lruvec *lruvec;
+ /* unstable max_seq from lru_gen_folio */
+ unsigned long max_seq;
+ /* the next address within an mm to scan */
+ unsigned long next_addr;
+ /* to batch promoted pages */
+ int nr_pages[MAX_NR_GENS][ANON_AND_FILE][MAX_NR_ZONES];
+ /* to batch the mm stats */
+ int mm_stats[NR_MM_STATS];
+ /* total batched items */
+ int batched;
+ bool can_swap;
+ bool force_scan;
+};
+
+void lru_gen_init_lruvec(struct lruvec *lruvec);
+void lru_gen_look_around(struct page_vma_mapped_walk *pvmw);
+
+#ifdef CONFIG_MEMCG
+
+/*
+ * For each node, memcgs are divided into two generations: the old and the
+ * young. For each generation, memcgs are randomly sharded into multiple bins
+ * to improve scalability. For each bin, the hlist_nulls is virtually divided
+ * into three segments: the head, the tail and the default.
+ *
+ * An onlining memcg is added to the tail of a random bin in the old generation.
+ * The eviction starts at the head of a random bin in the old generation. The
+ * per-node memcg generation counter, whose reminder (mod MEMCG_NR_GENS) indexes
+ * the old generation, is incremented when all its bins become empty.
+ *
+ * There are four operations:
+ * 1. MEMCG_LRU_HEAD, which moves an memcg to the head of a random bin in its
+ * current generation (old or young) and updates its "seg" to "head";
+ * 2. MEMCG_LRU_TAIL, which moves an memcg to the tail of a random bin in its
+ * current generation (old or young) and updates its "seg" to "tail";
+ * 3. MEMCG_LRU_OLD, which moves an memcg to the head of a random bin in the old
+ * generation, updates its "gen" to "old" and resets its "seg" to "default";
+ * 4. MEMCG_LRU_YOUNG, which moves an memcg to the tail of a random bin in the
+ * young generation, updates its "gen" to "young" and resets its "seg" to
+ * "default".
+ *
+ * The events that trigger the above operations are:
+ * 1. Exceeding the soft limit, which triggers MEMCG_LRU_HEAD;
+ * 2. The first attempt to reclaim an memcg below low, which triggers
+ * MEMCG_LRU_TAIL;
+ * 3. The first attempt to reclaim an memcg below reclaimable size threshold,
+ * which triggers MEMCG_LRU_TAIL;
+ * 4. The second attempt to reclaim an memcg below reclaimable size threshold,
+ * which triggers MEMCG_LRU_YOUNG;
+ * 5. Attempting to reclaim an memcg below min, which triggers MEMCG_LRU_YOUNG;
+ * 6. Finishing the aging on the eviction path, which triggers MEMCG_LRU_YOUNG;
+ * 7. Offlining an memcg, which triggers MEMCG_LRU_OLD.
+ *
+ * Note that memcg LRU only applies to global reclaim, and the round-robin
+ * incrementing of their max_seq counters ensures the eventual fairness to all
+ * eligible memcgs. For memcg reclaim, it still relies on mem_cgroup_iter().
+ */
+#define MEMCG_NR_GENS 2
+#define MEMCG_NR_BINS 8
+
+struct lru_gen_memcg {
+ /* the per-node memcg generation counter */
+ unsigned long seq;
+ /* each memcg has one lru_gen_folio per node */
+ unsigned long nr_memcgs[MEMCG_NR_GENS];
+ /* per-node lru_gen_folio list for global reclaim */
+ struct hlist_nulls_head fifo[MEMCG_NR_GENS][MEMCG_NR_BINS];
+ /* protects the above */
+ spinlock_t lock;
+};
+
+void lru_gen_init_pgdat(struct pglist_data *pgdat);
+
+void lru_gen_init_memcg(struct mem_cgroup *memcg);
+void lru_gen_exit_memcg(struct mem_cgroup *memcg);
+void lru_gen_online_memcg(struct mem_cgroup *memcg);
+void lru_gen_offline_memcg(struct mem_cgroup *memcg);
+void lru_gen_release_memcg(struct mem_cgroup *memcg);
+void lru_gen_soft_reclaim(struct lruvec *lruvec);
+
+#else /* !CONFIG_MEMCG */
+
+#define MEMCG_NR_GENS 1
+
+struct lru_gen_memcg {
+};
+
+static inline void lru_gen_init_pgdat(struct pglist_data *pgdat)
+{
+}
+
+#endif /* CONFIG_MEMCG */
+
+#else /* !CONFIG_LRU_GEN */
+
+static inline void lru_gen_init_pgdat(struct pglist_data *pgdat)
+{
+}
+
+static inline void lru_gen_init_lruvec(struct lruvec *lruvec)
+{
+}
+
+static inline void lru_gen_look_around(struct page_vma_mapped_walk *pvmw)
+{
+}
+
+#ifdef CONFIG_MEMCG
+
+static inline void lru_gen_init_memcg(struct mem_cgroup *memcg)
+{
+}
+
+static inline void lru_gen_exit_memcg(struct mem_cgroup *memcg)
+{
+}
+
+static inline void lru_gen_online_memcg(struct mem_cgroup *memcg)
+{
+}
+
+static inline void lru_gen_offline_memcg(struct mem_cgroup *memcg)
+{
+}
+
+static inline void lru_gen_release_memcg(struct mem_cgroup *memcg)
+{
+}
+
+static inline void lru_gen_soft_reclaim(struct lruvec *lruvec)
+{
+}
+
+#endif /* CONFIG_MEMCG */
+
+#endif /* CONFIG_LRU_GEN */
+
struct lruvec {
struct list_head lists[NR_LRU_LISTS];
- struct zone_reclaim_stat reclaim_stat;
- /* Evictions & activations on the inactive file list */
- atomic_long_t inactive_age;
+ /* per lruvec lru_lock for memcg */
+ spinlock_t lru_lock;
+ /*
+ * These track the cost of reclaiming one LRU - file or anon -
+ * over the other. As the observed cost of reclaiming one LRU
+ * increases, the reclaim scan balance tips toward the other.
+ */
+ unsigned long anon_cost;
+ unsigned long file_cost;
+ /* Non-resident age, driven by LRU movement */
+ atomic_long_t nonresident_age;
/* Refaults at the time of last reclaim cycle */
- unsigned long refaults;
+ unsigned long refaults[ANON_AND_FILE];
+ /* Various lruvec state flags (enum lruvec_flags) */
+ unsigned long flags;
+#ifdef CONFIG_LRU_GEN
+ /* evictable pages divided into generations */
+ struct lru_gen_folio lrugen;
+ /* to concurrently iterate lru_gen_mm_list */
+ struct lru_gen_mm_state mm_state;
+#endif
#ifdef CONFIG_MEMCG
struct pglist_data *pgdat;
#endif
};
-/* Mask used at gathering information at once (see memcontrol.c) */
-#define LRU_ALL_FILE (BIT(LRU_INACTIVE_FILE) | BIT(LRU_ACTIVE_FILE))
-#define LRU_ALL_ANON (BIT(LRU_INACTIVE_ANON) | BIT(LRU_ACTIVE_ANON))
-#define LRU_ALL ((1 << NR_LRU_LISTS) - 1)
-
-/* Isolate unmapped file */
+/* Isolate unmapped pages */
#define ISOLATE_UNMAPPED ((__force isolate_mode_t)0x2)
/* Isolate for asynchronous migration */
#define ISOLATE_ASYNC_MIGRATE ((__force isolate_mode_t)0x4)
@@ -256,30 +635,56 @@ enum zone_watermarks {
WMARK_MIN,
WMARK_LOW,
WMARK_HIGH,
+ WMARK_PROMO,
NR_WMARK
};
-#define min_wmark_pages(z) (z->watermark[WMARK_MIN])
-#define low_wmark_pages(z) (z->watermark[WMARK_LOW])
-#define high_wmark_pages(z) (z->watermark[WMARK_HIGH])
+/*
+ * One per migratetype for each PAGE_ALLOC_COSTLY_ORDER. One additional list
+ * for THP which will usually be GFP_MOVABLE. Even if it is another type,
+ * it should not contribute to serious fragmentation causing THP allocation
+ * failures.
+ */
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+#define NR_PCP_THP 1
+#else
+#define NR_PCP_THP 0
+#endif
+#define NR_LOWORDER_PCP_LISTS (MIGRATE_PCPTYPES * (PAGE_ALLOC_COSTLY_ORDER + 1))
+#define NR_PCP_LISTS (NR_LOWORDER_PCP_LISTS + NR_PCP_THP)
+
+#define min_wmark_pages(z) (z->_watermark[WMARK_MIN] + z->watermark_boost)
+#define low_wmark_pages(z) (z->_watermark[WMARK_LOW] + z->watermark_boost)
+#define high_wmark_pages(z) (z->_watermark[WMARK_HIGH] + z->watermark_boost)
+#define wmark_pages(z, i) (z->_watermark[i] + z->watermark_boost)
+/* Fields and list protected by pagesets local_lock in page_alloc.c */
struct per_cpu_pages {
+ spinlock_t lock; /* Protects lists field */
int count; /* number of pages in the list */
int high; /* high watermark, emptying needed */
int batch; /* chunk size for buddy add/remove */
+ short free_factor; /* batch scaling factor during free */
+#ifdef CONFIG_NUMA
+ short expire; /* When 0, remote pagesets are drained */
+#endif
/* Lists of pages, one per migrate type stored on the pcp-lists */
- struct list_head lists[MIGRATE_PCPTYPES];
-};
+ struct list_head lists[NR_PCP_LISTS];
+} ____cacheline_aligned_in_smp;
-struct per_cpu_pageset {
- struct per_cpu_pages pcp;
-#ifdef CONFIG_NUMA
- s8 expire;
-#endif
+struct per_cpu_zonestat {
#ifdef CONFIG_SMP
- s8 stat_threshold;
s8 vm_stat_diff[NR_VM_ZONE_STAT_ITEMS];
+ s8 stat_threshold;
+#endif
+#ifdef CONFIG_NUMA
+ /*
+ * Low priority inaccurate counters that are only folded
+ * on demand. Use a large type to avoid the overhead of
+ * folding during refresh_cpu_vm_stats.
+ */
+ unsigned long vm_numa_event[NR_VM_NUMA_EVENT_ITEMS];
#endif
};
@@ -291,33 +696,20 @@ struct per_cpu_nodestat {
#endif /* !__GENERATING_BOUNDS.H */
enum zone_type {
-#ifdef CONFIG_ZONE_DMA
/*
- * ZONE_DMA is used when there are devices that are not able
- * to do DMA to all of addressable memory (ZONE_NORMAL). Then we
- * carve out the portion of memory that is needed for these devices.
- * The range is arch specific.
- *
- * Some examples
- *
- * Architecture Limit
- * ---------------------------
- * parisc, ia64, sparc <4G
- * s390 <2G
- * arm Various
- * alpha Unlimited or 0-16MB.
- *
- * i386, x86_64 and multiple other arches
- * <16M.
+ * ZONE_DMA and ZONE_DMA32 are used when there are peripherals not able
+ * to DMA to all of the addressable memory (ZONE_NORMAL).
+ * On architectures where this area covers the whole 32 bit address
+ * space ZONE_DMA32 is used. ZONE_DMA is left for the ones with smaller
+ * DMA addressing constraints. This distinction is important as a 32bit
+ * DMA mask is assumed when ZONE_DMA32 is defined. Some 64-bit
+ * platforms may need both zones as they support peripherals with
+ * different DMA addressing limitations.
*/
+#ifdef CONFIG_ZONE_DMA
ZONE_DMA,
#endif
#ifdef CONFIG_ZONE_DMA32
- /*
- * x86_64 needs two ZONE_DMAs because it supports devices that are
- * only able to do DMA to the lower 16M but also 32 bit devices that
- * can only do DMA areas below 4G.
- */
ZONE_DMA32,
#endif
/*
@@ -337,6 +729,55 @@ enum zone_type {
*/
ZONE_HIGHMEM,
#endif
+ /*
+ * ZONE_MOVABLE is similar to ZONE_NORMAL, except that it contains
+ * movable pages with few exceptional cases described below. Main use
+ * cases for ZONE_MOVABLE are to make memory offlining/unplug more
+ * likely to succeed, and to locally limit unmovable allocations - e.g.,
+ * to increase the number of THP/huge pages. Notable special cases are:
+ *
+ * 1. Pinned pages: (long-term) pinning of movable pages might
+ * essentially turn such pages unmovable. Therefore, we do not allow
+ * pinning long-term pages in ZONE_MOVABLE. When pages are pinned and
+ * faulted, they come from the right zone right away. However, it is
+ * still possible that address space already has pages in
+ * ZONE_MOVABLE at the time when pages are pinned (i.e. user has
+ * touches that memory before pinning). In such case we migrate them
+ * to a different zone. When migration fails - pinning fails.
+ * 2. memblock allocations: kernelcore/movablecore setups might create
+ * situations where ZONE_MOVABLE contains unmovable allocations
+ * after boot. Memory offlining and allocations fail early.
+ * 3. Memory holes: kernelcore/movablecore setups might create very rare
+ * situations where ZONE_MOVABLE contains memory holes after boot,
+ * for example, if we have sections that are only partially
+ * populated. Memory offlining and allocations fail early.
+ * 4. PG_hwpoison pages: while poisoned pages can be skipped during
+ * memory offlining, such pages cannot be allocated.
+ * 5. Unmovable PG_offline pages: in paravirtualized environments,
+ * hotplugged memory blocks might only partially be managed by the
+ * buddy (e.g., via XEN-balloon, Hyper-V balloon, virtio-mem). The
+ * parts not manged by the buddy are unmovable PG_offline pages. In
+ * some cases (virtio-mem), such pages can be skipped during
+ * memory offlining, however, cannot be moved/allocated. These
+ * techniques might use alloc_contig_range() to hide previously
+ * exposed pages from the buddy again (e.g., to implement some sort
+ * of memory unplug in virtio-mem).
+ * 6. ZERO_PAGE(0), kernelcore/movablecore setups might create
+ * situations where ZERO_PAGE(0) which is allocated differently
+ * on different platforms may end up in a movable zone. ZERO_PAGE(0)
+ * cannot be migrated.
+ * 7. Memory-hotplug: when using memmap_on_memory and onlining the
+ * memory to the MOVABLE zone, the vmemmap pages are also placed in
+ * such zone. Such pages cannot be really moved around as they are
+ * self-stored in the range, but they are treated as movable when
+ * the range they describe is about to be offlined.
+ *
+ * In general, no unmovable allocations that degrade memory offlining
+ * should end up in ZONE_MOVABLE. Allocators (like alloc_contig_range())
+ * have to expect that migrating pages in ZONE_MOVABLE can fail (even
+ * if has_unmovable_pages() states that there are no unmovable pages,
+ * there can be false negatives).
+ */
ZONE_MOVABLE,
#ifdef CONFIG_ZONE_DEVICE
ZONE_DEVICE,
@@ -347,11 +788,14 @@ enum zone_type {
#ifndef __GENERATING_BOUNDS_H
+#define ASYNC_AND_SYNC 2
+
struct zone {
/* Read-mostly fields */
/* zone watermarks, access with *_wmark_pages(zone) macros */
- unsigned long watermark[NR_WMARK];
+ unsigned long _watermark[NR_WMARK];
+ unsigned long watermark_boost;
unsigned long nr_reserved_highatomic;
@@ -370,7 +814,14 @@ struct zone {
int node;
#endif
struct pglist_data *zone_pgdat;
- struct per_cpu_pageset __percpu *pageset;
+ struct per_cpu_pages __percpu *per_cpu_pageset;
+ struct per_cpu_zonestat __percpu *per_cpu_zonestats;
+ /*
+ * the high and batch values are copied to individual pagesets for
+ * faster access
+ */
+ int pageset_high;
+ int pageset_batch;
#ifndef CONFIG_SPARSEMEM
/*
@@ -392,11 +843,18 @@ struct zone {
* is calculated as:
* present_pages = spanned_pages - absent_pages(pages in holes);
*
+ * present_early_pages is present pages existing within the zone
+ * located on memory available since early boot, excluding hotplugged
+ * memory.
+ *
* managed_pages is present pages managed by the buddy system, which
* is calculated as (reserved_pages includes pages allocated by the
* bootmem allocator):
* managed_pages = present_pages - reserved_pages;
*
+ * cma pages is present pages that are assigned for CMA use
+ * (MIGRATE_CMA).
+ *
* So present_pages may be used by memory hotplug or memory power
* management logic to figure out unmanaged pages by checking
* (present_pages - managed_pages). And managed_pages should be used
@@ -415,18 +873,18 @@ struct zone {
* give them a chance of being in the same cacheline.
*
* Write access to present_pages at runtime should be protected by
- * mem_hotplug_begin/end(). Any reader who can't tolerant drift of
- * present_pages should get_online_mems() to get a stable value.
- *
- * Read access to managed_pages should be safe because it's unsigned
- * long. Write access to zone->managed_pages and totalram_pages are
- * protected by managed_page_count_lock at runtime. Idealy only
- * adjust_managed_page_count() should be used instead of directly
- * touching zone->managed_pages and totalram_pages.
+ * mem_hotplug_begin/done(). Any reader who can't tolerant drift of
+ * present_pages should use get_online_mems() to get a stable value.
*/
- unsigned long managed_pages;
+ atomic_long_t managed_pages;
unsigned long spanned_pages;
unsigned long present_pages;
+#if defined(CONFIG_MEMORY_HOTPLUG)
+ unsigned long present_early_pages;
+#endif
+#ifdef CONFIG_CMA
+ unsigned long cma_pages;
+#endif
const char *name;
@@ -447,10 +905,10 @@ struct zone {
int initialized;
/* Write-intensive fields used from the page allocator */
- ZONE_PADDING(_pad1_)
+ CACHELINE_PADDING(_pad1_);
/* free areas of different sizes */
- struct free_area free_area[MAX_ORDER];
+ struct free_area free_area[MAX_ORDER + 1];
/* zone flags, see below */
unsigned long flags;
@@ -459,7 +917,7 @@ struct zone {
spinlock_t lock;
/* Write-intensive fields used by compaction and vmstats. */
- ZONE_PADDING(_pad2_)
+ CACHELINE_PADDING(_pad2_);
/*
* When free pages are below this point, additional steps are taken
@@ -471,8 +929,10 @@ struct zone {
#if defined CONFIG_COMPACTION || defined CONFIG_CMA
/* pfn where compaction free scanner should start */
unsigned long compact_cached_free_pfn;
- /* pfn where async and sync compaction migration scanner should start */
- unsigned long compact_cached_migrate_pfn[2];
+ /* pfn where compaction migration scanner should start */
+ unsigned long compact_cached_migrate_pfn[ASYNC_AND_SYNC];
+ unsigned long compact_init_migrate_pfn;
+ unsigned long compact_init_free_pfn;
#endif
#ifdef CONFIG_COMPACTION
@@ -480,6 +940,7 @@ struct zone {
* On compaction failure, 1<<compact_defer_shift compactions
* are skipped before trying again. The number attempted since
* last failure is tracked with compact_considered.
+ * compact_order_failed is the minimum compaction failed order.
*/
unsigned int compact_considered;
unsigned int compact_defer_shift;
@@ -493,15 +954,13 @@ struct zone {
bool contiguous;
- ZONE_PADDING(_pad3_)
+ CACHELINE_PADDING(_pad3_);
/* Zone statistics */
atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
+ atomic_long_t vm_numa_event[NR_VM_NUMA_EVENT_ITEMS];
} ____cacheline_internodealigned_in_smp;
enum pgdat_flags {
- PGDAT_CONGESTED, /* pgdat has many dirty pages backed by
- * a congested BDI
- */
PGDAT_DIRTY, /* reclaim scanning has recently found
* many dirty file pages at the tail
* of the LRU.
@@ -512,6 +971,27 @@ enum pgdat_flags {
PGDAT_RECLAIM_LOCKED, /* prevents concurrent reclaim */
};
+enum zone_flags {
+ ZONE_BOOSTED_WATERMARK, /* zone recently boosted watermarks.
+ * Cleared when kswapd is woken.
+ */
+ ZONE_RECLAIM_ACTIVE, /* kswapd may be scanning the zone. */
+};
+
+static inline unsigned long zone_managed_pages(struct zone *zone)
+{
+ return (unsigned long)atomic_long_read(&zone->managed_pages);
+}
+
+static inline unsigned long zone_cma_pages(struct zone *zone)
+{
+#ifdef CONFIG_CMA
+ return zone->cma_pages;
+#else
+ return 0;
+#endif
+}
+
static inline unsigned long zone_end_pfn(const struct zone *zone)
{
return zone->zone_start_pfn + zone->spanned_pages;
@@ -532,6 +1012,112 @@ static inline bool zone_is_empty(struct zone *zone)
return zone->spanned_pages == 0;
}
+#ifndef BUILD_VDSO32_64
+/*
+ * The zone field is never updated after free_area_init_core()
+ * sets it, so none of the operations on it need to be atomic.
+ */
+
+/* Page flags: | [SECTION] | [NODE] | ZONE | [LAST_CPUPID] | ... | FLAGS | */
+#define SECTIONS_PGOFF ((sizeof(unsigned long)*8) - SECTIONS_WIDTH)
+#define NODES_PGOFF (SECTIONS_PGOFF - NODES_WIDTH)
+#define ZONES_PGOFF (NODES_PGOFF - ZONES_WIDTH)
+#define LAST_CPUPID_PGOFF (ZONES_PGOFF - LAST_CPUPID_WIDTH)
+#define KASAN_TAG_PGOFF (LAST_CPUPID_PGOFF - KASAN_TAG_WIDTH)
+#define LRU_GEN_PGOFF (KASAN_TAG_PGOFF - LRU_GEN_WIDTH)
+#define LRU_REFS_PGOFF (LRU_GEN_PGOFF - LRU_REFS_WIDTH)
+
+/*
+ * Define the bit shifts to access each section. For non-existent
+ * sections we define the shift as 0; that plus a 0 mask ensures
+ * the compiler will optimise away reference to them.
+ */
+#define SECTIONS_PGSHIFT (SECTIONS_PGOFF * (SECTIONS_WIDTH != 0))
+#define NODES_PGSHIFT (NODES_PGOFF * (NODES_WIDTH != 0))
+#define ZONES_PGSHIFT (ZONES_PGOFF * (ZONES_WIDTH != 0))
+#define LAST_CPUPID_PGSHIFT (LAST_CPUPID_PGOFF * (LAST_CPUPID_WIDTH != 0))
+#define KASAN_TAG_PGSHIFT (KASAN_TAG_PGOFF * (KASAN_TAG_WIDTH != 0))
+
+/* NODE:ZONE or SECTION:ZONE is used to ID a zone for the buddy allocator */
+#ifdef NODE_NOT_IN_PAGE_FLAGS
+#define ZONEID_SHIFT (SECTIONS_SHIFT + ZONES_SHIFT)
+#define ZONEID_PGOFF ((SECTIONS_PGOFF < ZONES_PGOFF) ? \
+ SECTIONS_PGOFF : ZONES_PGOFF)
+#else
+#define ZONEID_SHIFT (NODES_SHIFT + ZONES_SHIFT)
+#define ZONEID_PGOFF ((NODES_PGOFF < ZONES_PGOFF) ? \
+ NODES_PGOFF : ZONES_PGOFF)
+#endif
+
+#define ZONEID_PGSHIFT (ZONEID_PGOFF * (ZONEID_SHIFT != 0))
+
+#define ZONES_MASK ((1UL << ZONES_WIDTH) - 1)
+#define NODES_MASK ((1UL << NODES_WIDTH) - 1)
+#define SECTIONS_MASK ((1UL << SECTIONS_WIDTH) - 1)
+#define LAST_CPUPID_MASK ((1UL << LAST_CPUPID_SHIFT) - 1)
+#define KASAN_TAG_MASK ((1UL << KASAN_TAG_WIDTH) - 1)
+#define ZONEID_MASK ((1UL << ZONEID_SHIFT) - 1)
+
+static inline enum zone_type page_zonenum(const struct page *page)
+{
+ ASSERT_EXCLUSIVE_BITS(page->flags, ZONES_MASK << ZONES_PGSHIFT);
+ return (page->flags >> ZONES_PGSHIFT) & ZONES_MASK;
+}
+
+static inline enum zone_type folio_zonenum(const struct folio *folio)
+{
+ return page_zonenum(&folio->page);
+}
+
+#ifdef CONFIG_ZONE_DEVICE
+static inline bool is_zone_device_page(const struct page *page)
+{
+ return page_zonenum(page) == ZONE_DEVICE;
+}
+
+/*
+ * Consecutive zone device pages should not be merged into the same sgl
+ * or bvec segment with other types of pages or if they belong to different
+ * pgmaps. Otherwise getting the pgmap of a given segment is not possible
+ * without scanning the entire segment. This helper returns true either if
+ * both pages are not zone device pages or both pages are zone device pages
+ * with the same pgmap.
+ */
+static inline bool zone_device_pages_have_same_pgmap(const struct page *a,
+ const struct page *b)
+{
+ if (is_zone_device_page(a) != is_zone_device_page(b))
+ return false;
+ if (!is_zone_device_page(a))
+ return true;
+ return a->pgmap == b->pgmap;
+}
+
+extern void memmap_init_zone_device(struct zone *, unsigned long,
+ unsigned long, struct dev_pagemap *);
+#else
+static inline bool is_zone_device_page(const struct page *page)
+{
+ return false;
+}
+static inline bool zone_device_pages_have_same_pgmap(const struct page *a,
+ const struct page *b)
+{
+ return true;
+}
+#endif
+
+static inline bool folio_is_zone_device(const struct folio *folio)
+{
+ return is_zone_device_page(&folio->page);
+}
+
+static inline bool is_zone_movable_page(const struct page *page)
+{
+ return page_zonenum(page) == ZONE_MOVABLE;
+}
+#endif
+
/*
* Return true if [start_pfn, start_pfn + nr_pages) range has a non-empty
* intersection with the given zone
@@ -597,9 +1183,44 @@ struct zonelist {
struct zoneref _zonerefs[MAX_ZONES_PER_ZONELIST + 1];
};
-#ifndef CONFIG_DISCONTIGMEM
-/* The array of struct pages - for discontigmem use pgdat->lmem_map */
+/*
+ * The array of struct pages for flatmem.
+ * It must be declared for SPARSEMEM as well because there are configurations
+ * that rely on that.
+ */
extern struct page *mem_map;
+
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+struct deferred_split {
+ spinlock_t split_queue_lock;
+ struct list_head split_queue;
+ unsigned long split_queue_len;
+};
+#endif
+
+#ifdef CONFIG_MEMORY_FAILURE
+/*
+ * Per NUMA node memory failure handling statistics.
+ */
+struct memory_failure_stats {
+ /*
+ * Number of raw pages poisoned.
+ * Cases not accounted: memory outside kernel control, offline page,
+ * arch-specific memory_failure (SGX), hwpoison_filter() filtered
+ * error events, and unpoison actions from hwpoison_unpoison.
+ */
+ unsigned long total;
+ /*
+ * Recovery results of poisoned raw pages handled by memory_failure,
+ * in sync with mf_result.
+ * total = ignored + failed + delayed + recovered.
+ * total * PAGE_SIZE * #nodes = /proc/meminfo/HardwareCorrupted.
+ */
+ unsigned long ignored;
+ unsigned long failed;
+ unsigned long delayed;
+ unsigned long recovered;
+};
#endif
/*
@@ -610,28 +1231,38 @@ extern struct page *mem_map;
* Memory statistics and page replacement data structures are maintained on a
* per-zone basis.
*/
-struct bootmem_data;
typedef struct pglist_data {
+ /*
+ * node_zones contains just the zones for THIS node. Not all of the
+ * zones may be populated, but it is the full list. It is referenced by
+ * this node's node_zonelists as well as other node's node_zonelists.
+ */
struct zone node_zones[MAX_NR_ZONES];
+
+ /*
+ * node_zonelists contains references to all zones in all nodes.
+ * Generally the first zones will be references to this node's
+ * node_zones.
+ */
struct zonelist node_zonelists[MAX_ZONELISTS];
- int nr_zones;
-#ifdef CONFIG_FLAT_NODE_MEM_MAP /* means !SPARSEMEM */
+
+ int nr_zones; /* number of populated zones in this node */
+#ifdef CONFIG_FLATMEM /* means !SPARSEMEM */
struct page *node_mem_map;
#ifdef CONFIG_PAGE_EXTENSION
struct page_ext *node_page_ext;
#endif
#endif
-#ifndef CONFIG_NO_BOOTMEM
- struct bootmem_data *bdata;
-#endif
-#ifdef CONFIG_MEMORY_HOTPLUG
+#if defined(CONFIG_MEMORY_HOTPLUG) || defined(CONFIG_DEFERRED_STRUCT_PAGE_INIT)
/*
- * Must be held any time you expect node_start_pfn, node_present_pages
- * or node_spanned_pages stay constant. Holding this will also
- * guarantee that any pfn_valid() stays that way.
+ * Must be held any time you expect node_start_pfn,
+ * node_present_pages, node_spanned_pages or nr_zones to stay constant.
+ * Also synchronizes pgdat->first_deferred_pfn during deferred page
+ * init.
*
* pgdat_resize_lock() and pgdat_resize_unlock() are provided to
- * manipulate node_size_lock without checking for CONFIG_MEMORY_HOTPLUG.
+ * manipulate node_size_lock without checking for CONFIG_MEMORY_HOTPLUG
+ * or CONFIG_DEFERRED_STRUCT_PAGE_INIT.
*
* Nests above zone->lock and zone->span_seqlock
*/
@@ -644,28 +1275,28 @@ typedef struct pglist_data {
int node_id;
wait_queue_head_t kswapd_wait;
wait_queue_head_t pfmemalloc_wait;
- struct task_struct *kswapd; /* Protected by
- mem_hotplug_begin/end() */
+
+ /* workqueues for throttling reclaim for different reasons. */
+ wait_queue_head_t reclaim_wait[NR_VMSCAN_THROTTLE];
+
+ atomic_t nr_writeback_throttled;/* nr of writeback-throttled tasks */
+ unsigned long nr_reclaim_start; /* nr pages written while throttled
+ * when throttling started. */
+#ifdef CONFIG_MEMORY_HOTPLUG
+ struct mutex kswapd_lock;
+#endif
+ struct task_struct *kswapd; /* Protected by kswapd_lock */
int kswapd_order;
- enum zone_type kswapd_classzone_idx;
+ enum zone_type kswapd_highest_zoneidx;
int kswapd_failures; /* Number of 'reclaimed == 0' runs */
#ifdef CONFIG_COMPACTION
int kcompactd_max_order;
- enum zone_type kcompactd_classzone_idx;
+ enum zone_type kcompactd_highest_zoneidx;
wait_queue_head_t kcompactd_wait;
struct task_struct *kcompactd;
-#endif
-#ifdef CONFIG_NUMA_BALANCING
- /* Lock serializing the migrate rate limiting window */
- spinlock_t numabalancing_migrate_lock;
-
- /* Rate limiting time interval */
- unsigned long numabalancing_migrate_next_window;
-
- /* Number of pages migrated during the rate limiting time interval */
- unsigned long numabalancing_migrate_nr_pages;
+ bool proactive_compact_trigger;
#endif
/*
* This is a per-node reserve of pages that are not available
@@ -675,15 +1306,14 @@ typedef struct pglist_data {
#ifdef CONFIG_NUMA
/*
- * zone reclaim becomes active if more unmapped pages exist.
+ * node reclaim becomes active if more unmapped pages exist.
*/
unsigned long min_unmapped_pages;
unsigned long min_slab_pages;
#endif /* CONFIG_NUMA */
/* Write-intensive fields used by page reclaim */
- ZONE_PADDING(_pad1_)
- spinlock_t lru_lock;
+ CACHELINE_PADDING(_pad1_);
#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
/*
@@ -691,100 +1321,91 @@ typedef struct pglist_data {
* is the first PFN that needs to be initialised.
*/
unsigned long first_deferred_pfn;
- unsigned long static_init_size;
#endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
- spinlock_t split_queue_lock;
- struct list_head split_queue;
- unsigned long split_queue_len;
+ struct deferred_split deferred_split_queue;
#endif
+#ifdef CONFIG_NUMA_BALANCING
+ /* start time in ms of current promote rate limit period */
+ unsigned int nbp_rl_start;
+ /* number of promote candidate pages at start time of current rate limit period */
+ unsigned long nbp_rl_nr_cand;
+ /* promote threshold in ms */
+ unsigned int nbp_threshold;
+ /* start time in ms of current promote threshold adjustment period */
+ unsigned int nbp_th_start;
+ /*
+ * number of promote candidate pages at start time of current promote
+ * threshold adjustment period
+ */
+ unsigned long nbp_th_nr_cand;
+#endif
/* Fields commonly accessed by the page reclaim scanner */
- struct lruvec lruvec;
/*
- * The target ratio of ACTIVE_ANON to INACTIVE_ANON pages on
- * this node's LRU. Maintained by the pageout code.
+ * NOTE: THIS IS UNUSED IF MEMCG IS ENABLED.
+ *
+ * Use mem_cgroup_lruvec() to look up lruvecs.
*/
- unsigned int inactive_ratio;
+ struct lruvec __lruvec;
unsigned long flags;
- ZONE_PADDING(_pad2_)
+#ifdef CONFIG_LRU_GEN
+ /* kswap mm walk data */
+ struct lru_gen_mm_walk mm_walk;
+ /* lru_gen_folio list */
+ struct lru_gen_memcg memcg_lru;
+#endif
+
+ CACHELINE_PADDING(_pad2_);
/* Per-node vmstats */
struct per_cpu_nodestat __percpu *per_cpu_nodestats;
atomic_long_t vm_stat[NR_VM_NODE_STAT_ITEMS];
+#ifdef CONFIG_NUMA
+ struct memory_tier __rcu *memtier;
+#endif
+#ifdef CONFIG_MEMORY_FAILURE
+ struct memory_failure_stats mf_stats;
+#endif
} pg_data_t;
#define node_present_pages(nid) (NODE_DATA(nid)->node_present_pages)
#define node_spanned_pages(nid) (NODE_DATA(nid)->node_spanned_pages)
-#ifdef CONFIG_FLAT_NODE_MEM_MAP
-#define pgdat_page_nr(pgdat, pagenr) ((pgdat)->node_mem_map + (pagenr))
-#else
-#define pgdat_page_nr(pgdat, pagenr) pfn_to_page((pgdat)->node_start_pfn + (pagenr))
-#endif
-#define nid_page_nr(nid, pagenr) pgdat_page_nr(NODE_DATA(nid),(pagenr))
#define node_start_pfn(nid) (NODE_DATA(nid)->node_start_pfn)
#define node_end_pfn(nid) pgdat_end_pfn(NODE_DATA(nid))
-static inline spinlock_t *zone_lru_lock(struct zone *zone)
-{
- return &zone->zone_pgdat->lru_lock;
-}
-
-static inline struct lruvec *node_lruvec(struct pglist_data *pgdat)
-{
- return &pgdat->lruvec;
-}
static inline unsigned long pgdat_end_pfn(pg_data_t *pgdat)
{
return pgdat->node_start_pfn + pgdat->node_spanned_pages;
}
-static inline bool pgdat_is_empty(pg_data_t *pgdat)
-{
- return !pgdat->node_start_pfn && !pgdat->node_spanned_pages;
-}
-
-static inline int zone_id(const struct zone *zone)
-{
- struct pglist_data *pgdat = zone->zone_pgdat;
-
- return zone - pgdat->node_zones;
-}
-
-#ifdef CONFIG_ZONE_DEVICE
-static inline bool is_dev_zone(const struct zone *zone)
-{
- return zone_id(zone) == ZONE_DEVICE;
-}
-#else
-static inline bool is_dev_zone(const struct zone *zone)
-{
- return false;
-}
-#endif
-
#include <linux/memory_hotplug.h>
-extern struct mutex zonelists_mutex;
-void build_all_zonelists(pg_data_t *pgdat, struct zone *zone);
-void wakeup_kswapd(struct zone *zone, int order, enum zone_type classzone_idx);
+void build_all_zonelists(pg_data_t *pgdat);
+void wakeup_kswapd(struct zone *zone, gfp_t gfp_mask, int order,
+ enum zone_type highest_zoneidx);
bool __zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark,
- int classzone_idx, unsigned int alloc_flags,
+ int highest_zoneidx, unsigned int alloc_flags,
long free_pages);
bool zone_watermark_ok(struct zone *z, unsigned int order,
- unsigned long mark, int classzone_idx,
+ unsigned long mark, int highest_zoneidx,
unsigned int alloc_flags);
bool zone_watermark_ok_safe(struct zone *z, unsigned int order,
- unsigned long mark, int classzone_idx);
-enum memmap_context {
- MEMMAP_EARLY,
- MEMMAP_HOTPLUG,
+ unsigned long mark, int highest_zoneidx);
+/*
+ * Memory initialization context, use to differentiate memory added by
+ * the platform statically or via memory hotplug interface.
+ */
+enum meminit_context {
+ MEMINIT_EARLY,
+ MEMINIT_HOTPLUG,
};
+
extern void init_currently_empty_zone(struct zone *zone, unsigned long start_pfn,
unsigned long size);
@@ -795,33 +1416,33 @@ static inline struct pglist_data *lruvec_pgdat(struct lruvec *lruvec)
#ifdef CONFIG_MEMCG
return lruvec->pgdat;
#else
- return container_of(lruvec, struct pglist_data, lruvec);
+ return container_of(lruvec, struct pglist_data, __lruvec);
#endif
}
-extern unsigned long lruvec_lru_size(struct lruvec *lruvec, enum lru_list lru, int zone_idx);
-
-#ifdef CONFIG_HAVE_MEMORY_PRESENT
-void memory_present(int nid, unsigned long start, unsigned long end);
-#else
-static inline void memory_present(int nid, unsigned long start, unsigned long end) {}
-#endif
-
#ifdef CONFIG_HAVE_MEMORYLESS_NODES
int local_memory_node(int node_id);
#else
static inline int local_memory_node(int node_id) { return node_id; };
#endif
-#ifdef CONFIG_NEED_NODE_MEMMAP_SIZE
-unsigned long __init node_memmap_size_bytes(int, unsigned long, unsigned long);
-#endif
-
/*
* zone_idx() returns 0 for the ZONE_DMA zone, 1 for the ZONE_NORMAL zone, etc.
*/
#define zone_idx(zone) ((zone) - (zone)->zone_pgdat->node_zones)
+#ifdef CONFIG_ZONE_DEVICE
+static inline bool zone_is_zone_device(struct zone *zone)
+{
+ return zone_idx(zone) == ZONE_DEVICE;
+}
+#else
+static inline bool zone_is_zone_device(struct zone *zone)
+{
+ return false;
+}
+#endif
+
/*
* Returns true if a zone has pages managed by the buddy allocator.
* All the reclaim decisions have to use this function rather than
@@ -830,7 +1451,7 @@ unsigned long __init node_memmap_size_bytes(int, unsigned long, unsigned long);
*/
static inline bool managed_zone(struct zone *zone)
{
- return zone->managed_pages;
+ return zone_managed_pages(zone);
}
/* Returns true if a zone has memory */
@@ -839,76 +1460,93 @@ static inline bool populated_zone(struct zone *zone)
return zone->present_pages;
}
-extern int movable_zone;
+#ifdef CONFIG_NUMA
+static inline int zone_to_nid(struct zone *zone)
+{
+ return zone->node;
+}
-#ifdef CONFIG_HIGHMEM
-static inline int zone_movable_is_highmem(void)
+static inline void zone_set_nid(struct zone *zone, int nid)
{
-#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
- return movable_zone == ZONE_HIGHMEM;
+ zone->node = nid;
+}
#else
- return (ZONE_MOVABLE - 1) == ZONE_HIGHMEM;
-#endif
+static inline int zone_to_nid(struct zone *zone)
+{
+ return 0;
}
+
+static inline void zone_set_nid(struct zone *zone, int nid) {}
#endif
+extern int movable_zone;
+
static inline int is_highmem_idx(enum zone_type idx)
{
#ifdef CONFIG_HIGHMEM
return (idx == ZONE_HIGHMEM ||
- (idx == ZONE_MOVABLE && zone_movable_is_highmem()));
+ (idx == ZONE_MOVABLE && movable_zone == ZONE_HIGHMEM));
#else
return 0;
#endif
}
/**
- * is_highmem - helper function to quickly check if a struct zone is a
+ * is_highmem - helper function to quickly check if a struct zone is a
* highmem zone or not. This is an attempt to keep references
* to ZONE_{DMA/NORMAL/HIGHMEM/etc} in general code to a minimum.
- * @zone - pointer to struct zone variable
+ * @zone: pointer to struct zone variable
+ * Return: 1 for a highmem zone, 0 otherwise
*/
static inline int is_highmem(struct zone *zone)
{
-#ifdef CONFIG_HIGHMEM
return is_highmem_idx(zone_idx(zone));
+}
+
+#ifdef CONFIG_ZONE_DMA
+bool has_managed_dma(void);
#else
- return 0;
-#endif
+static inline bool has_managed_dma(void)
+{
+ return false;
}
+#endif
/* These two functions are used to setup the per zone pages min values */
struct ctl_table;
-int min_free_kbytes_sysctl_handler(struct ctl_table *, int,
- void __user *, size_t *, loff_t *);
-int watermark_scale_factor_sysctl_handler(struct ctl_table *, int,
- void __user *, size_t *, loff_t *);
-extern int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES-1];
-int lowmem_reserve_ratio_sysctl_handler(struct ctl_table *, int,
- void __user *, size_t *, loff_t *);
-int percpu_pagelist_fraction_sysctl_handler(struct ctl_table *, int,
- void __user *, size_t *, loff_t *);
+
+int min_free_kbytes_sysctl_handler(struct ctl_table *, int, void *, size_t *,
+ loff_t *);
+int watermark_scale_factor_sysctl_handler(struct ctl_table *, int, void *,
+ size_t *, loff_t *);
+extern int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES];
+int lowmem_reserve_ratio_sysctl_handler(struct ctl_table *, int, void *,
+ size_t *, loff_t *);
+int percpu_pagelist_high_fraction_sysctl_handler(struct ctl_table *, int,
+ void *, size_t *, loff_t *);
int sysctl_min_unmapped_ratio_sysctl_handler(struct ctl_table *, int,
- void __user *, size_t *, loff_t *);
+ void *, size_t *, loff_t *);
int sysctl_min_slab_ratio_sysctl_handler(struct ctl_table *, int,
- void __user *, size_t *, loff_t *);
-
-extern int numa_zonelist_order_handler(struct ctl_table *, int,
- void __user *, size_t *, loff_t *);
+ void *, size_t *, loff_t *);
+int numa_zonelist_order_handler(struct ctl_table *, int,
+ void *, size_t *, loff_t *);
+extern int percpu_pagelist_high_fraction;
extern char numa_zonelist_order[];
-#define NUMA_ZONELIST_ORDER_LEN 16 /* string buffer size */
+#define NUMA_ZONELIST_ORDER_LEN 16
-#ifndef CONFIG_NEED_MULTIPLE_NODES
+#ifndef CONFIG_NUMA
extern struct pglist_data contig_page_data;
-#define NODE_DATA(nid) (&contig_page_data)
-#define NODE_MEM_MAP(nid) mem_map
+static inline struct pglist_data *NODE_DATA(int nid)
+{
+ return &contig_page_data;
+}
-#else /* CONFIG_NEED_MULTIPLE_NODES */
+#else /* CONFIG_NUMA */
#include <asm/mmzone.h>
-#endif /* !CONFIG_NEED_MULTIPLE_NODES */
+#endif /* !CONFIG_NUMA */
extern struct pglist_data *first_online_pgdat(void);
extern struct pglist_data *next_online_pgdat(struct pglist_data *pgdat);
@@ -916,7 +1554,7 @@ extern struct zone *next_zone(struct zone *zone);
/**
* for_each_online_pgdat - helper macro to iterate over all online nodes
- * @pgdat - pointer to a pg_data_t variable
+ * @pgdat: pointer to a pg_data_t variable
*/
#define for_each_online_pgdat(pgdat) \
for (pgdat = first_online_pgdat(); \
@@ -924,7 +1562,7 @@ extern struct zone *next_zone(struct zone *zone);
pgdat = next_online_pgdat(pgdat))
/**
* for_each_zone - helper macro to iterate over all memory zones
- * @zone - pointer to struct zone variable
+ * @zone: pointer to struct zone variable
*
* The user only needs to declare the zone variable, for_each_zone
* fills it in.
@@ -954,12 +1592,7 @@ static inline int zonelist_zone_idx(struct zoneref *zoneref)
static inline int zonelist_node_idx(struct zoneref *zoneref)
{
-#ifdef CONFIG_NUMA
- /* zone_to_nid not available in this context */
- return zoneref->zone->node;
-#else
- return 0;
-#endif /* CONFIG_NUMA */
+ return zone_to_nid(zoneref->zone);
}
struct zoneref *__next_zones_zonelist(struct zoneref *z,
@@ -968,15 +1601,18 @@ struct zoneref *__next_zones_zonelist(struct zoneref *z,
/**
* next_zones_zonelist - Returns the next zone at or below highest_zoneidx within the allowed nodemask using a cursor within a zonelist as a starting point
- * @z - The cursor used as a starting point for the search
- * @highest_zoneidx - The zone index of the highest zone to return
- * @nodes - An optional nodemask to filter the zonelist with
+ * @z: The cursor used as a starting point for the search
+ * @highest_zoneidx: The zone index of the highest zone to return
+ * @nodes: An optional nodemask to filter the zonelist with
*
* This function returns the next zone at or below a given zone index that is
* within the allowed nodemask using a cursor as the starting point for the
* search. The zoneref returned is a cursor that represents the current zone
* being examined. It should be advanced by one before calling
* next_zones_zonelist again.
+ *
+ * Return: the next zone at or below highest_zoneidx within the allowed
+ * nodemask using a cursor within a zonelist as a starting point
*/
static __always_inline struct zoneref *next_zones_zonelist(struct zoneref *z,
enum zone_type highest_zoneidx,
@@ -989,10 +1625,9 @@ static __always_inline struct zoneref *next_zones_zonelist(struct zoneref *z,
/**
* first_zones_zonelist - Returns the first zone at or below highest_zoneidx within the allowed nodemask in a zonelist
- * @zonelist - The zonelist to search for a suitable zone
- * @highest_zoneidx - The zone index of the highest zone to return
- * @nodes - An optional nodemask to filter the zonelist with
- * @return - Zoneref pointer for the first suitable zone found (see below)
+ * @zonelist: The zonelist to search for a suitable zone
+ * @highest_zoneidx: The zone index of the highest zone to return
+ * @nodes: An optional nodemask to filter the zonelist with
*
* This function returns the first zone at or below a given zone index that is
* within the allowed nodemask. The zoneref returned is a cursor that can be
@@ -1002,6 +1637,8 @@ static __always_inline struct zoneref *next_zones_zonelist(struct zoneref *z,
* When no eligible zone is found, zoneref->zone is NULL (zoneref itself is
* never NULL). This may happen either genuinely, or due to concurrent nodemask
* update due to cpuset modification.
+ *
+ * Return: Zoneref pointer for the first suitable zone found
*/
static inline struct zoneref *first_zones_zonelist(struct zonelist *zonelist,
enum zone_type highest_zoneidx,
@@ -1013,11 +1650,11 @@ static inline struct zoneref *first_zones_zonelist(struct zonelist *zonelist,
/**
* for_each_zone_zonelist_nodemask - helper macro to iterate over valid zones in a zonelist at or below a given zone index and within a nodemask
- * @zone - The current zone in the iterator
- * @z - The current pointer within zonelist->zones being iterated
- * @zlist - The zonelist being iterated
- * @highidx - The zone index of the highest zone to return
- * @nodemask - Nodemask allowed by the allocator
+ * @zone: The current zone in the iterator
+ * @z: The current pointer within zonelist->_zonerefs being iterated
+ * @zlist: The zonelist being iterated
+ * @highidx: The zone index of the highest zone to return
+ * @nodemask: Nodemask allowed by the allocator
*
* This iterator iterates though all zones at or below a given zone index and
* within a given nodemask
@@ -1028,7 +1665,7 @@ static inline struct zoneref *first_zones_zonelist(struct zonelist *zonelist,
z = next_zones_zonelist(++z, highidx, nodemask), \
zone = zonelist_zone(z))
-#define for_next_zone_zonelist_nodemask(zone, z, zlist, highidx, nodemask) \
+#define for_next_zone_zonelist_nodemask(zone, z, highidx, nodemask) \
for (zone = z->zone; \
zone; \
z = next_zones_zonelist(++z, highidx, nodemask), \
@@ -1037,27 +1674,40 @@ static inline struct zoneref *first_zones_zonelist(struct zonelist *zonelist,
/**
* for_each_zone_zonelist - helper macro to iterate over valid zones in a zonelist at or below a given zone index
- * @zone - The current zone in the iterator
- * @z - The current pointer within zonelist->zones being iterated
- * @zlist - The zonelist being iterated
- * @highidx - The zone index of the highest zone to return
+ * @zone: The current zone in the iterator
+ * @z: The current pointer within zonelist->zones being iterated
+ * @zlist: The zonelist being iterated
+ * @highidx: The zone index of the highest zone to return
*
* This iterator iterates though all zones at or below a given zone index.
*/
#define for_each_zone_zonelist(zone, z, zlist, highidx) \
for_each_zone_zonelist_nodemask(zone, z, zlist, highidx, NULL)
-#ifdef CONFIG_SPARSEMEM
-#include <asm/sparsemem.h>
-#endif
-
-#if !defined(CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID) && \
- !defined(CONFIG_HAVE_MEMBLOCK_NODE_MAP)
-static inline unsigned long early_pfn_to_nid(unsigned long pfn)
+/* Whether the 'nodes' are all movable nodes */
+static inline bool movable_only_nodes(nodemask_t *nodes)
{
- BUILD_BUG_ON(IS_ENABLED(CONFIG_NUMA));
- return 0;
+ struct zonelist *zonelist;
+ struct zoneref *z;
+ int nid;
+
+ if (nodes_empty(*nodes))
+ return false;
+
+ /*
+ * We can chose arbitrary node from the nodemask to get a
+ * zonelist as they are interlinked. We just need to find
+ * at least one zone that can satisfy kernel allocations.
+ */
+ nid = first_node(*nodes);
+ zonelist = &NODE_DATA(nid)->node_zonelists[ZONELIST_FALLBACK];
+ z = first_zones_zonelist(zonelist, ZONE_NORMAL, nodes);
+ return (!z->zone) ? true : false;
}
+
+
+#ifdef CONFIG_SPARSEMEM
+#include <asm/sparsemem.h>
#endif
#ifdef CONFIG_FLATMEM
@@ -1067,8 +1717,6 @@ static inline unsigned long early_pfn_to_nid(unsigned long pfn)
#ifdef CONFIG_SPARSEMEM
/*
- * SECTION_SHIFT #bits space required to store a section #
- *
* PA_SECTION_SHIFT physical address to/from section number
* PFN_SECTION_SHIFT pfn to/from section number
*/
@@ -1083,16 +1731,48 @@ static inline unsigned long early_pfn_to_nid(unsigned long pfn)
#define SECTION_BLOCKFLAGS_BITS \
((1UL << (PFN_SECTION_SHIFT - pageblock_order)) * NR_PAGEBLOCK_BITS)
-#if (MAX_ORDER - 1 + PAGE_SHIFT) > SECTION_SIZE_BITS
+#if (MAX_ORDER + PAGE_SHIFT) > SECTION_SIZE_BITS
#error Allocator MAX_ORDER exceeds SECTION_SIZE
#endif
-#define pfn_to_section_nr(pfn) ((pfn) >> PFN_SECTION_SHIFT)
-#define section_nr_to_pfn(sec) ((sec) << PFN_SECTION_SHIFT)
+static inline unsigned long pfn_to_section_nr(unsigned long pfn)
+{
+ return pfn >> PFN_SECTION_SHIFT;
+}
+static inline unsigned long section_nr_to_pfn(unsigned long sec)
+{
+ return sec << PFN_SECTION_SHIFT;
+}
#define SECTION_ALIGN_UP(pfn) (((pfn) + PAGES_PER_SECTION - 1) & PAGE_SECTION_MASK)
#define SECTION_ALIGN_DOWN(pfn) ((pfn) & PAGE_SECTION_MASK)
+#define SUBSECTION_SHIFT 21
+#define SUBSECTION_SIZE (1UL << SUBSECTION_SHIFT)
+
+#define PFN_SUBSECTION_SHIFT (SUBSECTION_SHIFT - PAGE_SHIFT)
+#define PAGES_PER_SUBSECTION (1UL << PFN_SUBSECTION_SHIFT)
+#define PAGE_SUBSECTION_MASK (~(PAGES_PER_SUBSECTION-1))
+
+#if SUBSECTION_SHIFT > SECTION_SIZE_BITS
+#error Subsection size exceeds section size
+#else
+#define SUBSECTIONS_PER_SECTION (1UL << (SECTION_SIZE_BITS - SUBSECTION_SHIFT))
+#endif
+
+#define SUBSECTION_ALIGN_UP(pfn) ALIGN((pfn), PAGES_PER_SUBSECTION)
+#define SUBSECTION_ALIGN_DOWN(pfn) ((pfn) & PAGE_SUBSECTION_MASK)
+
+struct mem_section_usage {
+#ifdef CONFIG_SPARSEMEM_VMEMMAP
+ DECLARE_BITMAP(subsection_map, SUBSECTIONS_PER_SECTION);
+#endif
+ /* See declaration of similar field in struct zone */
+ unsigned long pageblock_flags[0];
+};
+
+void subsection_map_init(unsigned long pfn, unsigned long nr_pages);
+
struct page;
struct page_ext;
struct mem_section {
@@ -1110,8 +1790,7 @@ struct mem_section {
*/
unsigned long section_mem_map;
- /* See declaration of similar field in struct zone */
- unsigned long *pageblock_flags;
+ struct mem_section_usage *usage;
#ifdef CONFIG_PAGE_EXTENSION
/*
* If SPARSEMEM, pgdat doesn't have page_ext pointer. We use
@@ -1137,31 +1816,68 @@ struct mem_section {
#define SECTION_ROOT_MASK (SECTIONS_PER_ROOT - 1)
#ifdef CONFIG_SPARSEMEM_EXTREME
-extern struct mem_section *mem_section[NR_SECTION_ROOTS];
+extern struct mem_section **mem_section;
#else
extern struct mem_section mem_section[NR_SECTION_ROOTS][SECTIONS_PER_ROOT];
#endif
+static inline unsigned long *section_to_usemap(struct mem_section *ms)
+{
+ return ms->usage->pageblock_flags;
+}
+
static inline struct mem_section *__nr_to_section(unsigned long nr)
{
- if (!mem_section[SECTION_NR_TO_ROOT(nr)])
+ unsigned long root = SECTION_NR_TO_ROOT(nr);
+
+ if (unlikely(root >= NR_SECTION_ROOTS))
return NULL;
- return &mem_section[SECTION_NR_TO_ROOT(nr)][nr & SECTION_ROOT_MASK];
+
+#ifdef CONFIG_SPARSEMEM_EXTREME
+ if (!mem_section || !mem_section[root])
+ return NULL;
+#endif
+ return &mem_section[root][nr & SECTION_ROOT_MASK];
}
-extern int __section_nr(struct mem_section* ms);
-extern unsigned long usemap_size(void);
+extern size_t mem_section_usage_size(void);
/*
* We use the lower bits of the mem_map pointer to store
- * a little bit of information. There should be at least
- * 3 bits here due to 32-bit alignment.
+ * a little bit of information. The pointer is calculated
+ * as mem_map - section_nr_to_pfn(pnum). The result is
+ * aligned to the minimum alignment of the two values:
+ * 1. All mem_map arrays are page-aligned.
+ * 2. section_nr_to_pfn() always clears PFN_SECTION_SHIFT
+ * lowest bits. PFN_SECTION_SHIFT is arch-specific
+ * (equal SECTION_SIZE_BITS - PAGE_SHIFT), and the
+ * worst combination is powerpc with 256k pages,
+ * which results in PFN_SECTION_SHIFT equal 6.
+ * To sum it up, at least 6 bits are available on all architectures.
+ * However, we can exceed 6 bits on some other architectures except
+ * powerpc (e.g. 15 bits are available on x86_64, 13 bits are available
+ * with the worst case of 64K pages on arm64) if we make sure the
+ * exceeded bit is not applicable to powerpc.
*/
-#define SECTION_MARKED_PRESENT (1UL<<0)
-#define SECTION_HAS_MEM_MAP (1UL<<1)
-#define SECTION_IS_ONLINE (1UL<<2)
-#define SECTION_MAP_LAST_BIT (1UL<<3)
-#define SECTION_MAP_MASK (~(SECTION_MAP_LAST_BIT-1))
-#define SECTION_NID_SHIFT 3
+enum {
+ SECTION_MARKED_PRESENT_BIT,
+ SECTION_HAS_MEM_MAP_BIT,
+ SECTION_IS_ONLINE_BIT,
+ SECTION_IS_EARLY_BIT,
+#ifdef CONFIG_ZONE_DEVICE
+ SECTION_TAINT_ZONE_DEVICE_BIT,
+#endif
+ SECTION_MAP_LAST_BIT,
+};
+
+#define SECTION_MARKED_PRESENT BIT(SECTION_MARKED_PRESENT_BIT)
+#define SECTION_HAS_MEM_MAP BIT(SECTION_HAS_MEM_MAP_BIT)
+#define SECTION_IS_ONLINE BIT(SECTION_IS_ONLINE_BIT)
+#define SECTION_IS_EARLY BIT(SECTION_IS_EARLY_BIT)
+#ifdef CONFIG_ZONE_DEVICE
+#define SECTION_TAINT_ZONE_DEVICE BIT(SECTION_TAINT_ZONE_DEVICE_BIT)
+#endif
+#define SECTION_MAP_MASK (~(BIT(SECTION_MAP_LAST_BIT) - 1))
+#define SECTION_NID_SHIFT SECTION_MAP_LAST_BIT
static inline struct page *__section_mem_map_addr(struct mem_section *section)
{
@@ -1185,6 +1901,11 @@ static inline int valid_section(struct mem_section *section)
return (section && (section->section_mem_map & SECTION_HAS_MEM_MAP));
}
+static inline int early_section(struct mem_section *section)
+{
+ return (section && (section->section_mem_map & SECTION_IS_EARLY));
+}
+
static inline int valid_section_nr(unsigned long nr)
{
return valid_section(__nr_to_section(nr));
@@ -1195,6 +1916,20 @@ static inline int online_section(struct mem_section *section)
return (section && (section->section_mem_map & SECTION_IS_ONLINE));
}
+#ifdef CONFIG_ZONE_DEVICE
+static inline int online_device_section(struct mem_section *section)
+{
+ unsigned long flags = SECTION_IS_ONLINE | SECTION_TAINT_ZONE_DEVICE;
+
+ return section && ((section->section_mem_map & flags) == flags);
+}
+#else
+static inline int online_device_section(struct mem_section *section)
+{
+ return 0;
+}
+#endif
+
static inline int online_section_nr(unsigned long nr)
{
return online_section(__nr_to_section(nr));
@@ -1202,32 +1937,88 @@ static inline int online_section_nr(unsigned long nr)
#ifdef CONFIG_MEMORY_HOTPLUG
void online_mem_sections(unsigned long start_pfn, unsigned long end_pfn);
-#ifdef CONFIG_MEMORY_HOTREMOVE
void offline_mem_sections(unsigned long start_pfn, unsigned long end_pfn);
#endif
-#endif
static inline struct mem_section *__pfn_to_section(unsigned long pfn)
{
return __nr_to_section(pfn_to_section_nr(pfn));
}
-extern int __highest_present_section_nr;
+extern unsigned long __highest_present_section_nr;
+
+static inline int subsection_map_index(unsigned long pfn)
+{
+ return (pfn & ~(PAGE_SECTION_MASK)) / PAGES_PER_SUBSECTION;
+}
+
+#ifdef CONFIG_SPARSEMEM_VMEMMAP
+static inline int pfn_section_valid(struct mem_section *ms, unsigned long pfn)
+{
+ int idx = subsection_map_index(pfn);
+
+ return test_bit(idx, ms->usage->subsection_map);
+}
+#else
+static inline int pfn_section_valid(struct mem_section *ms, unsigned long pfn)
+{
+ return 1;
+}
+#endif
#ifndef CONFIG_HAVE_ARCH_PFN_VALID
+/**
+ * pfn_valid - check if there is a valid memory map entry for a PFN
+ * @pfn: the page frame number to check
+ *
+ * Check if there is a valid memory map entry aka struct page for the @pfn.
+ * Note, that availability of the memory map entry does not imply that
+ * there is actual usable memory at that @pfn. The struct page may
+ * represent a hole or an unusable page frame.
+ *
+ * Return: 1 for PFNs that have memory map entries and 0 otherwise
+ */
static inline int pfn_valid(unsigned long pfn)
{
+ struct mem_section *ms;
+
+ /*
+ * Ensure the upper PAGE_SHIFT bits are clear in the
+ * pfn. Else it might lead to false positives when
+ * some of the upper bits are set, but the lower bits
+ * match a valid pfn.
+ */
+ if (PHYS_PFN(PFN_PHYS(pfn)) != pfn)
+ return 0;
+
if (pfn_to_section_nr(pfn) >= NR_MEM_SECTIONS)
return 0;
- return valid_section(__nr_to_section(pfn_to_section_nr(pfn)));
+ ms = __pfn_to_section(pfn);
+ if (!valid_section(ms))
+ return 0;
+ /*
+ * Traditionally early sections always returned pfn_valid() for
+ * the entire section-sized span.
+ */
+ return early_section(ms) || pfn_section_valid(ms, pfn);
}
#endif
-static inline int pfn_present(unsigned long pfn)
+static inline int pfn_in_present_section(unsigned long pfn)
{
if (pfn_to_section_nr(pfn) >= NR_MEM_SECTIONS)
return 0;
- return present_section(__nr_to_section(pfn_to_section_nr(pfn)));
+ return present_section(__pfn_to_section(pfn));
+}
+
+static inline unsigned long next_present_section_nr(unsigned long section_nr)
+{
+ while (++section_nr <= __highest_present_section_nr) {
+ if (present_section_nr(section_nr))
+ return section_nr;
+ }
+
+ return -1;
}
/*
@@ -1245,74 +2036,14 @@ static inline int pfn_present(unsigned long pfn)
#define pfn_to_nid(pfn) (0)
#endif
-#define early_pfn_valid(pfn) pfn_valid(pfn)
void sparse_init(void);
#else
#define sparse_init() do {} while (0)
#define sparse_index_init(_sec, _nid) do {} while (0)
+#define pfn_in_present_section pfn_valid
+#define subsection_map_init(_pfn, _nr_pages) do {} while (0)
#endif /* CONFIG_SPARSEMEM */
-/*
- * During memory init memblocks map pfns to nids. The search is expensive and
- * this caches recent lookups. The implementation of __early_pfn_to_nid
- * may treat start/end as pfns or sections.
- */
-struct mminit_pfnnid_cache {
- unsigned long last_start;
- unsigned long last_end;
- int last_nid;
-};
-
-#ifndef early_pfn_valid
-#define early_pfn_valid(pfn) (1)
-#endif
-
-void memory_present(int nid, unsigned long start, unsigned long end);
-unsigned long __init node_memmap_size_bytes(int, unsigned long, unsigned long);
-
-/*
- * If it is possible to have holes within a MAX_ORDER_NR_PAGES, then we
- * need to check pfn validility within that MAX_ORDER_NR_PAGES block.
- * pfn_valid_within() should be used in this case; we optimise this away
- * when we have no holes within a MAX_ORDER_NR_PAGES block.
- */
-#ifdef CONFIG_HOLES_IN_ZONE
-#define pfn_valid_within(pfn) pfn_valid(pfn)
-#else
-#define pfn_valid_within(pfn) (1)
-#endif
-
-#ifdef CONFIG_ARCH_HAS_HOLES_MEMORYMODEL
-/*
- * pfn_valid() is meant to be able to tell if a given PFN has valid memmap
- * associated with it or not. This means that a struct page exists for this
- * pfn. The caller cannot assume the page is fully initialized in general.
- * Hotplugable pages might not have been onlined yet. pfn_to_online_page()
- * will ensure the struct page is fully online and initialized. Special pages
- * (e.g. ZONE_DEVICE) are never onlined and should be treated accordingly.
- *
- * In FLATMEM, it is expected that holes always have valid memmap as long as
- * there is valid PFNs either side of the hole. In SPARSEMEM, it is assumed
- * that a valid section has a memmap for the entire section.
- *
- * However, an ARM, and maybe other embedded architectures in the future
- * free memmap backing holes to save memory on the assumption the memmap is
- * never used. The page_zone linkages are then broken even though pfn_valid()
- * returns true. A walker of the full memmap must then do this additional
- * check to ensure the memmap they are looking at is sane by making sure
- * the zone and PFN linkages are still valid. This is expensive, but walkers
- * of the full memmap are extremely rare.
- */
-bool memmap_valid_within(unsigned long pfn,
- struct page *page, struct zone *zone);
-#else
-static inline bool memmap_valid_within(unsigned long pfn,
- struct page *page, struct zone *zone)
-{
- return true;
-}
-#endif /* CONFIG_ARCH_HAS_HOLES_MEMORYMODEL */
-
#endif /* !__GENERATING_BOUNDS.H */
#endif /* !__ASSEMBLY__ */
#endif /* _LINUX_MMZONE_H */
diff --git a/include/linux/mnt_idmapping.h b/include/linux/mnt_idmapping.h
new file mode 100644
index 000000000000..057c89867aa2
--- /dev/null
+++ b/include/linux/mnt_idmapping.h
@@ -0,0 +1,247 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_MNT_IDMAPPING_H
+#define _LINUX_MNT_IDMAPPING_H
+
+#include <linux/types.h>
+#include <linux/uidgid.h>
+
+struct mnt_idmap;
+struct user_namespace;
+
+extern struct mnt_idmap nop_mnt_idmap;
+extern struct user_namespace init_user_ns;
+
+typedef struct {
+ uid_t val;
+} vfsuid_t;
+
+typedef struct {
+ gid_t val;
+} vfsgid_t;
+
+static_assert(sizeof(vfsuid_t) == sizeof(kuid_t));
+static_assert(sizeof(vfsgid_t) == sizeof(kgid_t));
+static_assert(offsetof(vfsuid_t, val) == offsetof(kuid_t, val));
+static_assert(offsetof(vfsgid_t, val) == offsetof(kgid_t, val));
+
+#ifdef CONFIG_MULTIUSER
+static inline uid_t __vfsuid_val(vfsuid_t uid)
+{
+ return uid.val;
+}
+
+static inline gid_t __vfsgid_val(vfsgid_t gid)
+{
+ return gid.val;
+}
+#else
+static inline uid_t __vfsuid_val(vfsuid_t uid)
+{
+ return 0;
+}
+
+static inline gid_t __vfsgid_val(vfsgid_t gid)
+{
+ return 0;
+}
+#endif
+
+static inline bool vfsuid_valid(vfsuid_t uid)
+{
+ return __vfsuid_val(uid) != (uid_t)-1;
+}
+
+static inline bool vfsgid_valid(vfsgid_t gid)
+{
+ return __vfsgid_val(gid) != (gid_t)-1;
+}
+
+static inline bool vfsuid_eq(vfsuid_t left, vfsuid_t right)
+{
+ return vfsuid_valid(left) && __vfsuid_val(left) == __vfsuid_val(right);
+}
+
+static inline bool vfsgid_eq(vfsgid_t left, vfsgid_t right)
+{
+ return vfsgid_valid(left) && __vfsgid_val(left) == __vfsgid_val(right);
+}
+
+/**
+ * vfsuid_eq_kuid - check whether kuid and vfsuid have the same value
+ * @vfsuid: the vfsuid to compare
+ * @kuid: the kuid to compare
+ *
+ * Check whether @vfsuid and @kuid have the same values.
+ *
+ * Return: true if @vfsuid and @kuid have the same value, false if not.
+ * Comparison between two invalid uids returns false.
+ */
+static inline bool vfsuid_eq_kuid(vfsuid_t vfsuid, kuid_t kuid)
+{
+ return vfsuid_valid(vfsuid) && __vfsuid_val(vfsuid) == __kuid_val(kuid);
+}
+
+/**
+ * vfsgid_eq_kgid - check whether kgid and vfsgid have the same value
+ * @vfsgid: the vfsgid to compare
+ * @kgid: the kgid to compare
+ *
+ * Check whether @vfsgid and @kgid have the same values.
+ *
+ * Return: true if @vfsgid and @kgid have the same value, false if not.
+ * Comparison between two invalid gids returns false.
+ */
+static inline bool vfsgid_eq_kgid(vfsgid_t vfsgid, kgid_t kgid)
+{
+ return vfsgid_valid(vfsgid) && __vfsgid_val(vfsgid) == __kgid_val(kgid);
+}
+
+/*
+ * vfs{g,u}ids are created from k{g,u}ids.
+ * We don't allow them to be created from regular {u,g}id.
+ */
+#define VFSUIDT_INIT(val) (vfsuid_t){ __kuid_val(val) }
+#define VFSGIDT_INIT(val) (vfsgid_t){ __kgid_val(val) }
+
+#define INVALID_VFSUID VFSUIDT_INIT(INVALID_UID)
+#define INVALID_VFSGID VFSGIDT_INIT(INVALID_GID)
+
+/*
+ * Allow a vfs{g,u}id to be used as a k{g,u}id where we want to compare
+ * whether the mapped value is identical to value of a k{g,u}id.
+ */
+#define AS_KUIDT(val) (kuid_t){ __vfsuid_val(val) }
+#define AS_KGIDT(val) (kgid_t){ __vfsgid_val(val) }
+
+int vfsgid_in_group_p(vfsgid_t vfsgid);
+
+vfsuid_t make_vfsuid(struct mnt_idmap *idmap,
+ struct user_namespace *fs_userns, kuid_t kuid);
+
+vfsgid_t make_vfsgid(struct mnt_idmap *idmap,
+ struct user_namespace *fs_userns, kgid_t kgid);
+
+kuid_t from_vfsuid(struct mnt_idmap *idmap,
+ struct user_namespace *fs_userns, vfsuid_t vfsuid);
+
+kgid_t from_vfsgid(struct mnt_idmap *idmap,
+ struct user_namespace *fs_userns, vfsgid_t vfsgid);
+
+/**
+ * vfsuid_has_fsmapping - check whether a vfsuid maps into the filesystem
+ * @idmap: the mount's idmapping
+ * @fs_userns: the filesystem's idmapping
+ * @vfsuid: vfsuid to be mapped
+ *
+ * Check whether @vfsuid has a mapping in the filesystem idmapping. Use this
+ * function to check whether the filesystem idmapping has a mapping for
+ * @vfsuid.
+ *
+ * Return: true if @vfsuid has a mapping in the filesystem, false if not.
+ */
+static inline bool vfsuid_has_fsmapping(struct mnt_idmap *idmap,
+ struct user_namespace *fs_userns,
+ vfsuid_t vfsuid)
+{
+ return uid_valid(from_vfsuid(idmap, fs_userns, vfsuid));
+}
+
+static inline bool vfsuid_has_mapping(struct user_namespace *userns,
+ vfsuid_t vfsuid)
+{
+ return from_kuid(userns, AS_KUIDT(vfsuid)) != (uid_t)-1;
+}
+
+/**
+ * vfsuid_into_kuid - convert vfsuid into kuid
+ * @vfsuid: the vfsuid to convert
+ *
+ * This can be used when a vfsuid is committed as a kuid.
+ *
+ * Return: a kuid with the value of @vfsuid
+ */
+static inline kuid_t vfsuid_into_kuid(vfsuid_t vfsuid)
+{
+ return AS_KUIDT(vfsuid);
+}
+
+/**
+ * vfsgid_has_fsmapping - check whether a vfsgid maps into the filesystem
+ * @idmap: the mount's idmapping
+ * @fs_userns: the filesystem's idmapping
+ * @vfsgid: vfsgid to be mapped
+ *
+ * Check whether @vfsgid has a mapping in the filesystem idmapping. Use this
+ * function to check whether the filesystem idmapping has a mapping for
+ * @vfsgid.
+ *
+ * Return: true if @vfsgid has a mapping in the filesystem, false if not.
+ */
+static inline bool vfsgid_has_fsmapping(struct mnt_idmap *idmap,
+ struct user_namespace *fs_userns,
+ vfsgid_t vfsgid)
+{
+ return gid_valid(from_vfsgid(idmap, fs_userns, vfsgid));
+}
+
+static inline bool vfsgid_has_mapping(struct user_namespace *userns,
+ vfsgid_t vfsgid)
+{
+ return from_kgid(userns, AS_KGIDT(vfsgid)) != (gid_t)-1;
+}
+
+/**
+ * vfsgid_into_kgid - convert vfsgid into kgid
+ * @vfsgid: the vfsgid to convert
+ *
+ * This can be used when a vfsgid is committed as a kgid.
+ *
+ * Return: a kgid with the value of @vfsgid
+ */
+static inline kgid_t vfsgid_into_kgid(vfsgid_t vfsgid)
+{
+ return AS_KGIDT(vfsgid);
+}
+
+/**
+ * mapped_fsuid - return caller's fsuid mapped according to an idmapping
+ * @idmap: the mount's idmapping
+ * @fs_userns: the filesystem's idmapping
+ *
+ * Use this helper to initialize a new vfs or filesystem object based on
+ * the caller's fsuid. A common example is initializing the i_uid field of
+ * a newly allocated inode triggered by a creation event such as mkdir or
+ * O_CREAT. Other examples include the allocation of quotas for a specific
+ * user.
+ *
+ * Return: the caller's current fsuid mapped up according to @idmap.
+ */
+static inline kuid_t mapped_fsuid(struct mnt_idmap *idmap,
+ struct user_namespace *fs_userns)
+{
+ return from_vfsuid(idmap, fs_userns, VFSUIDT_INIT(current_fsuid()));
+}
+
+/**
+ * mapped_fsgid - return caller's fsgid mapped according to an idmapping
+ * @idmap: the mount's idmapping
+ * @fs_userns: the filesystem's idmapping
+ *
+ * Use this helper to initialize a new vfs or filesystem object based on
+ * the caller's fsgid. A common example is initializing the i_gid field of
+ * a newly allocated inode triggered by a creation event such as mkdir or
+ * O_CREAT. Other examples include the allocation of quotas for a specific
+ * user.
+ *
+ * Return: the caller's current fsgid mapped up according to @idmap.
+ */
+static inline kgid_t mapped_fsgid(struct mnt_idmap *idmap,
+ struct user_namespace *fs_userns)
+{
+ return from_vfsgid(idmap, fs_userns, VFSGIDT_INIT(current_fsgid()));
+}
+
+bool check_fsmapping(const struct mnt_idmap *idmap,
+ const struct super_block *sb);
+
+#endif /* _LINUX_MNT_IDMAPPING_H */
diff --git a/include/linux/mnt_namespace.h b/include/linux/mnt_namespace.h
index 12b2ab510323..8f882f5881e8 100644
--- a/include/linux/mnt_namespace.h
+++ b/include/linux/mnt_namespace.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _NAMESPACE_H_
#define _NAMESPACE_H_
#ifdef __KERNEL__
@@ -5,10 +6,12 @@
struct mnt_namespace;
struct fs_struct;
struct user_namespace;
+struct ns_common;
extern struct mnt_namespace *copy_mnt_ns(unsigned long, struct mnt_namespace *,
struct user_namespace *, struct fs_struct *);
extern void put_mnt_ns(struct mnt_namespace *ns);
+extern struct ns_common *from_mnt_ns(struct mnt_namespace *);
extern const struct file_operations proc_mounts_operations;
extern const struct file_operations proc_mountinfo_operations;
diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h
index 3f74ef2281e8..ccaaeda792c0 100644
--- a/include/linux/mod_devicetable.h
+++ b/include/linux/mod_devicetable.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Device tables which are exported to userspace via
* scripts/mod/file2alias.c. You must keep that file in sync with this
@@ -8,6 +9,7 @@
#define LINUX_MOD_DEVICETABLE_H
#ifdef __KERNEL__
+#include <linux/mei.h>
#include <linux/types.h>
#include <linux/uuid.h>
typedef unsigned long kernel_ulong_t;
@@ -15,11 +17,36 @@ typedef unsigned long kernel_ulong_t;
#define PCI_ANY_ID (~0)
+enum {
+ PCI_ID_F_VFIO_DRIVER_OVERRIDE = 1,
+};
+
+/**
+ * struct pci_device_id - PCI device ID structure
+ * @vendor: Vendor ID to match (or PCI_ANY_ID)
+ * @device: Device ID to match (or PCI_ANY_ID)
+ * @subvendor: Subsystem vendor ID to match (or PCI_ANY_ID)
+ * @subdevice: Subsystem device ID to match (or PCI_ANY_ID)
+ * @class: Device class, subclass, and "interface" to match.
+ * See Appendix D of the PCI Local Bus Spec or
+ * include/linux/pci_ids.h for a full list of classes.
+ * Most drivers do not need to specify class/class_mask
+ * as vendor/device is normally sufficient.
+ * @class_mask: Limit which sub-fields of the class field are compared.
+ * See drivers/scsi/sym53c8xx_2/ for example of usage.
+ * @driver_data: Data private to the driver.
+ * Most drivers don't need to use driver_data field.
+ * Best practice is to use driver_data as an index
+ * into a static list of equivalent device types,
+ * instead of using it as a pointer.
+ * @override_only: Match only when dev->driver_override is this driver.
+ */
struct pci_device_id {
__u32 vendor, device; /* Vendor and device ID or PCI_ANY_ID*/
__u32 subvendor, subdevice; /* Subsystem ID's or PCI_ANY_ID */
__u32 class, class_mask; /* (class,subclass,prog-if) triplet */
kernel_ulong_t driver_data; /* Data private to the driver */
+ __u32 override_only;
};
@@ -185,7 +212,7 @@ struct css_device_id {
kernel_ulong_t driver_data;
};
-#define ACPI_ID_LEN 9
+#define ACPI_ID_LEN 16
struct acpi_device_id {
__u8 id[ACPI_ID_LEN];
@@ -228,6 +255,14 @@ struct hda_device_id {
unsigned long driver_data;
};
+struct sdw_device_id {
+ __u16 mfg_id;
+ __u16 part_id;
+ __u8 sdw_version;
+ __u8 class_id;
+ kernel_ulong_t driver_data;
+};
+
/*
* Struct used for matching a device
*/
@@ -250,17 +285,17 @@ struct pcmcia_device_id {
__u16 match_flags;
__u16 manf_id;
- __u16 card_id;
+ __u16 card_id;
- __u8 func_id;
+ __u8 func_id;
/* for real multi-function devices */
- __u8 function;
+ __u8 function;
/* for pseudo multi-function devices */
- __u8 device_no;
+ __u8 device_no;
- __u32 prod_id_hash[4];
+ __u32 prod_id_hash[4];
/* not matched against in kernelspace */
const char * prod_id[4];
@@ -292,7 +327,8 @@ struct pcmcia_device_id {
#define INPUT_DEVICE_ID_LED_MAX 0x0f
#define INPUT_DEVICE_ID_SND_MAX 0x07
#define INPUT_DEVICE_ID_FF_MAX 0x7f
-#define INPUT_DEVICE_ID_SW_MAX 0x0f
+#define INPUT_DEVICE_ID_SW_MAX 0x10
+#define INPUT_DEVICE_ID_PROP_MAX 0x1f
#define INPUT_DEVICE_ID_MATCH_BUS 1
#define INPUT_DEVICE_ID_MATCH_VENDOR 2
@@ -308,6 +344,7 @@ struct pcmcia_device_id {
#define INPUT_DEVICE_ID_MATCH_SNDBIT 0x0400
#define INPUT_DEVICE_ID_MATCH_FFBIT 0x0800
#define INPUT_DEVICE_ID_MATCH_SWBIT 0x1000
+#define INPUT_DEVICE_ID_MATCH_PROPBIT 0x2000
struct input_device_id {
@@ -327,6 +364,7 @@ struct input_device_id {
kernel_ulong_t sndbit[INPUT_DEVICE_ID_SND_MAX / BITS_PER_LONG + 1];
kernel_ulong_t ffbit[INPUT_DEVICE_ID_FF_MAX / BITS_PER_LONG + 1];
kernel_ulong_t swbit[INPUT_DEVICE_ID_SW_MAX / BITS_PER_LONG + 1];
+ kernel_ulong_t propbit[INPUT_DEVICE_ID_PROP_MAX / BITS_PER_LONG + 1];
kernel_ulong_t driver_info;
};
@@ -405,7 +443,7 @@ struct virtio_device_id {
* For Hyper-V devices we use the device guid as the id.
*/
struct hv_vmbus_device_id {
- uuid_le guid;
+ guid_t guid;
kernel_ulong_t driver_data; /* Data private to the driver */
};
@@ -416,6 +454,7 @@ struct hv_vmbus_device_id {
struct rpmsg_device_id {
char name[RPMSG_NAME_SIZE];
+ kernel_ulong_t driver_data;
};
/* i2c */
@@ -438,6 +477,23 @@ struct pci_epf_device_id {
kernel_ulong_t driver_data;
};
+/* i3c */
+
+#define I3C_MATCH_DCR 0x1
+#define I3C_MATCH_MANUF 0x2
+#define I3C_MATCH_PART 0x4
+#define I3C_MATCH_EXTRA_INFO 0x8
+
+struct i3c_device_id {
+ __u8 match_flags;
+ __u8 dcr;
+ __u16 manuf_id;
+ __u16 part_id;
+ __u16 extra_info;
+
+ const void *data;
+};
+
/* spi */
#define SPI_NAME_SIZE 32
@@ -448,6 +504,30 @@ struct spi_device_id {
kernel_ulong_t driver_data; /* Data private to the driver */
};
+/* SLIMbus */
+
+#define SLIMBUS_NAME_SIZE 32
+#define SLIMBUS_MODULE_PREFIX "slim:"
+
+struct slim_device_id {
+ __u16 manf_id, prod_code;
+ __u16 dev_index, instance;
+
+ /* Data private to the driver */
+ kernel_ulong_t driver_data;
+};
+
+#define APR_NAME_SIZE 32
+#define APR_MODULE_PREFIX "apr:"
+
+struct apr_device_id {
+ char name[APR_NAME_SIZE];
+ __u32 domain_id;
+ __u32 svc_id;
+ __u32 svc_version;
+ kernel_ulong_t driver_data; /* Data private to the driver */
+};
+
#define SPMI_NAME_SIZE 32
#define SPMI_MODULE_PREFIX "spmi:"
@@ -462,11 +542,14 @@ enum dmi_field {
DMI_BIOS_VENDOR,
DMI_BIOS_VERSION,
DMI_BIOS_DATE,
+ DMI_BIOS_RELEASE,
+ DMI_EC_FIRMWARE_RELEASE,
DMI_SYS_VENDOR,
DMI_PRODUCT_NAME,
DMI_PRODUCT_VERSION,
DMI_PRODUCT_SERIAL,
DMI_PRODUCT_UUID,
+ DMI_PRODUCT_SKU,
DMI_PRODUCT_FAMILY,
DMI_BOARD_VENDOR,
DMI_BOARD_NAME,
@@ -479,6 +562,7 @@ enum dmi_field {
DMI_CHASSIS_SERIAL,
DMI_CHASSIS_ASSET_TAG,
DMI_STRING_MAX,
+ DMI_OEM_STRING, /* special case - will not be in dmi_ident */
};
struct dmi_strmatch {
@@ -515,9 +599,9 @@ struct platform_device_id {
#define MDIO_NAME_SIZE 32
#define MDIO_MODULE_PREFIX "mdio:"
-#define MDIO_ID_FMT "%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d"
+#define MDIO_ID_FMT "%u%u%u%u%u%u%u%u%u%u%u%u%u%u%u%u%u%u%u%u%u%u%u%u%u%u%u%u%u%u%u%u"
#define MDIO_ID_ARGS(_id) \
- (_id)>>31, ((_id)>>30) & 1, ((_id)>>29) & 1, ((_id)>>28) & 1, \
+ ((_id)>>31) & 1, ((_id)>>30) & 1, ((_id)>>29) & 1, ((_id)>>28) & 1, \
((_id)>>27) & 1, ((_id)>>26) & 1, ((_id)>>25) & 1, ((_id)>>24) & 1, \
((_id)>>23) & 1, ((_id)>>22) & 1, ((_id)>>21) & 1, ((_id)>>20) & 1, \
((_id)>>19) & 1, ((_id)>>18) & 1, ((_id)>>17) & 1, ((_id)>>16) & 1, \
@@ -529,7 +613,7 @@ struct platform_device_id {
/**
* struct mdio_device_id - identifies PHY devices on an MDIO/MII bus
* @phy_id: The result of
- * (mdio_read(&MII_PHYSID1) << 16 | mdio_read(&PHYSID2)) & @phy_id_mask
+ * (mdio_read(&MII_PHYSID1) << 16 | mdio_read(&MII_PHYSID2)) & @phy_id_mask
* for this PHY type
* @phy_id_mask: Defines the significant bits of @phy_id. A value of 0
* is used to terminate an array of struct mdio_device_id.
@@ -591,16 +675,16 @@ struct x86_cpu_id {
__u16 vendor;
__u16 family;
__u16 model;
+ __u16 steppings;
__u16 feature; /* bit index */
kernel_ulong_t driver_data;
};
-#define X86_FEATURE_MATCH(x) \
- { X86_VENDOR_ANY, X86_FAMILY_ANY, X86_MODEL_ANY, x }
-
+/* Wild cards for x86_cpu_id::vendor, family, model and feature */
#define X86_VENDOR_ANY 0xffff
#define X86_FAMILY_ANY 0
#define X86_MODEL_ANY 0
+#define X86_STEPPING_ANY 0
#define X86_FEATURE_ANY 0 /* Same as FPU, you can't test for that */
/*
@@ -674,8 +758,6 @@ struct ulpi_device_id {
* struct fsl_mc_device_id - MC object device identifier
* @vendor: vendor ID
* @obj_type: MC object type
- * @ver_major: MC object version major number
- * @ver_minor: MC object version minor number
*
* Type of entries in the "device Id" table for MC object devices supported by
* a MC object device driver. The last entry of the table has vendor set to 0x0
@@ -685,5 +767,164 @@ struct fsl_mc_device_id {
const char obj_type[16];
};
+/**
+ * struct tb_service_id - Thunderbolt service identifiers
+ * @match_flags: Flags used to match the structure
+ * @protocol_key: Protocol key the service supports
+ * @protocol_id: Protocol id the service supports
+ * @protocol_version: Version of the protocol
+ * @protocol_revision: Revision of the protocol software
+ * @driver_data: Driver specific data
+ *
+ * Thunderbolt XDomain services are exposed as devices where each device
+ * carries the protocol information the service supports. Thunderbolt
+ * XDomain service drivers match against that information.
+ */
+struct tb_service_id {
+ __u32 match_flags;
+ char protocol_key[8 + 1];
+ __u32 protocol_id;
+ __u32 protocol_version;
+ __u32 protocol_revision;
+ kernel_ulong_t driver_data;
+};
+
+#define TBSVC_MATCH_PROTOCOL_KEY 0x0001
+#define TBSVC_MATCH_PROTOCOL_ID 0x0002
+#define TBSVC_MATCH_PROTOCOL_VERSION 0x0004
+#define TBSVC_MATCH_PROTOCOL_REVISION 0x0008
+
+/* USB Type-C Alternate Modes */
+
+#define TYPEC_ANY_MODE 0x7
+
+/**
+ * struct typec_device_id - USB Type-C alternate mode identifiers
+ * @svid: Standard or Vendor ID
+ * @mode: Mode index
+ * @driver_data: Driver specific data
+ */
+struct typec_device_id {
+ __u16 svid;
+ __u8 mode;
+ kernel_ulong_t driver_data;
+};
+
+/**
+ * struct tee_client_device_id - tee based device identifier
+ * @uuid: For TEE based client devices we use the device uuid as
+ * the identifier.
+ */
+struct tee_client_device_id {
+ uuid_t uuid;
+};
+
+/* WMI */
+
+#define WMI_MODULE_PREFIX "wmi:"
+
+/**
+ * struct wmi_device_id - WMI device identifier
+ * @guid_string: 36 char string of the form fa50ff2b-f2e8-45de-83fa-65417f2f49ba
+ * @context: pointer to driver specific data
+ */
+struct wmi_device_id {
+ const char guid_string[UUID_STRING_LEN+1];
+ const void *context;
+};
+
+#define MHI_DEVICE_MODALIAS_FMT "mhi:%s"
+#define MHI_NAME_SIZE 32
+
+#define MHI_EP_DEVICE_MODALIAS_FMT "mhi_ep:%s"
+
+/**
+ * struct mhi_device_id - MHI device identification
+ * @chan: MHI channel name
+ * @driver_data: driver data;
+ */
+struct mhi_device_id {
+ const char chan[MHI_NAME_SIZE];
+ kernel_ulong_t driver_data;
+};
+
+#define AUXILIARY_NAME_SIZE 32
+#define AUXILIARY_MODULE_PREFIX "auxiliary:"
+
+struct auxiliary_device_id {
+ char name[AUXILIARY_NAME_SIZE];
+ kernel_ulong_t driver_data;
+};
+
+/* Surface System Aggregator Module */
+
+#define SSAM_MATCH_TARGET 0x1
+#define SSAM_MATCH_INSTANCE 0x2
+#define SSAM_MATCH_FUNCTION 0x4
+
+struct ssam_device_id {
+ __u8 match_flags;
+
+ __u8 domain;
+ __u8 category;
+ __u8 target;
+ __u8 instance;
+ __u8 function;
+
+ kernel_ulong_t driver_data;
+};
+
+/*
+ * DFL (Device Feature List)
+ *
+ * DFL defines a linked list of feature headers within the device MMIO space to
+ * provide an extensible way of adding features. Software can walk through these
+ * predefined data structures to enumerate features. It is now used in the FPGA.
+ * See Documentation/fpga/dfl.rst for more information.
+ *
+ * The dfl bus type is introduced to match the individual feature devices (dfl
+ * devices) for specific dfl drivers.
+ */
+
+/**
+ * struct dfl_device_id - dfl device identifier
+ * @type: DFL FIU type of the device. See enum dfl_id_type.
+ * @feature_id: feature identifier local to its DFL FIU type.
+ * @driver_data: driver specific data.
+ */
+struct dfl_device_id {
+ __u16 type;
+ __u16 feature_id;
+ kernel_ulong_t driver_data;
+};
+
+/* ISHTP (Integrated Sensor Hub Transport Protocol) */
+
+#define ISHTP_MODULE_PREFIX "ishtp:"
+
+/**
+ * struct ishtp_device_id - ISHTP device identifier
+ * @guid: GUID of the device.
+ * @driver_data: pointer to driver specific data
+ */
+struct ishtp_device_id {
+ guid_t guid;
+ kernel_ulong_t driver_data;
+};
+
+/**
+ * struct cdx_device_id - CDX device identifier
+ * @vendor: Vendor ID
+ * @device: Device ID
+ * @override_only: Match only when dev->driver_override is this driver.
+ *
+ * Type of entries in the "device Id" table for CDX devices supported by
+ * a CDX device driver.
+ */
+struct cdx_device_id {
+ __u16 vendor;
+ __u16 device;
+ __u32 override_only;
+};
#endif /* LINUX_MOD_DEVICETABLE_H */
diff --git a/include/linux/module.h b/include/linux/module.h
index 8eb9a1e693e5..9e56763dff81 100644
--- a/include/linux/module.h
+++ b/include/linux/module.h
@@ -1,13 +1,17 @@
-#ifndef _LINUX_MODULE_H
-#define _LINUX_MODULE_H
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Dynamic loading of modules into the kernel.
*
* Rewritten by Richard Henderson <rth@tamu.edu> Dec 1996
* Rewritten again by Rusty Russell, 2002
*/
+
+#ifndef _LINUX_MODULE_H
+#define _LINUX_MODULE_H
+
#include <linux/list.h>
#include <linux/stat.h>
+#include <linux/buildid.h>
#include <linux/compiler.h>
#include <linux/cache.h>
#include <linux/kmod.h>
@@ -19,16 +23,15 @@
#include <linux/jump_label.h>
#include <linux/export.h>
#include <linux/rbtree_latch.h>
+#include <linux/error-injection.h>
+#include <linux/tracepoint-defs.h>
+#include <linux/srcu.h>
+#include <linux/static_call_types.h>
+#include <linux/dynamic_debug.h>
#include <linux/percpu.h>
#include <asm/module.h>
-/* In stripped ARM and x86-64 modules, ~ is surprisingly rare. */
-#define MODULE_SIG_STRING "~Module signature appended~\n"
-
-/* Not Yet Implemented */
-#define MODULE_SUPPORTED_DEVICE(name)
-
#define MODULE_NAME_LEN MAX_PARAM_PREFIX_LEN
struct modversion_info {
@@ -45,7 +48,7 @@ struct module_kobject {
struct kobject *drivers_dir;
struct module_param_attrs *mp;
struct completion *kobj_completion;
-};
+} __randomize_layout;
struct module_attribute {
struct attribute attr;
@@ -62,7 +65,7 @@ struct module_version_attribute {
struct module_attribute mattr;
const char *module_name;
const char *version;
-} __attribute__ ((__aligned__(sizeof(void *))));
+};
extern ssize_t __modver_version_show(struct module_attribute *,
struct module_kobject *, char *);
@@ -122,19 +125,22 @@ extern void cleanup_module(void);
#define late_initcall_sync(fn) module_init(fn)
#define console_initcall(fn) module_init(fn)
-#define security_initcall(fn) module_init(fn)
/* Each module must use one module_init(). */
#define module_init(initfn) \
static inline initcall_t __maybe_unused __inittest(void) \
{ return initfn; } \
- int init_module(void) __attribute__((alias(#initfn)));
+ int init_module(void) __copy(initfn) \
+ __attribute__((alias(#initfn))); \
+ ___ADDRESSABLE(init_module, __initdata);
/* This is only required if you want to be unloadable. */
#define module_exit(exitfn) \
static inline exitcall_t __maybe_unused __exittest(void) \
{ return exitfn; } \
- void cleanup_module(void) __attribute__((alias(#exitfn)));
+ void cleanup_module(void) __copy(exitfn) \
+ __attribute__((alias(#exitfn))); \
+ ___ADDRESSABLE(cleanup_module, __exitdata);
#endif
@@ -168,10 +174,20 @@ extern void cleanup_module(void);
#define MODULE_SOFTDEP(_softdep) MODULE_INFO(softdep, _softdep)
/*
+ * MODULE_FILE is used for generating modules.builtin
+ * So, make it no-op when this is being built as a module
+ */
+#ifdef MODULE
+#define MODULE_FILE
+#else
+#define MODULE_FILE MODULE_INFO(file, KBUILD_MODFILE);
+#endif
+
+/*
* The following license idents are currently accepted as indicating free
* software modules
*
- * "GPL" [GNU Public License v2 or later]
+ * "GPL" [GNU Public License v2]
* "GPL v2" [GNU Public License v2]
* "GPL and additional rights" [GNU Public License v2 rights and more]
* "Dual BSD/GPL" [GNU Public License v2
@@ -185,6 +201,22 @@ extern void cleanup_module(void);
*
* "Proprietary" [Non free products]
*
+ * Both "GPL v2" and "GPL" (the latter also in dual licensed strings) are
+ * merely stating that the module is licensed under the GPL v2, but are not
+ * telling whether "GPL v2 only" or "GPL v2 or later". The reason why there
+ * are two variants is a historic and failed attempt to convey more
+ * information in the MODULE_LICENSE string. For module loading the
+ * "only/or later" distinction is completely irrelevant and does neither
+ * replace the proper license identifiers in the corresponding source file
+ * nor amends them in any way. The sole purpose is to make the
+ * 'Proprietary' flagging work and to refuse to bind symbols which are
+ * exported with EXPORT_SYMBOL_GPL when a non free module is loaded.
+ *
+ * In the same way "BSD" is not a clear license information. It merely
+ * states, that the module is licensed under one of the compatible BSD
+ * license variants. The detailed and correct license information is again
+ * to be found in the corresponding source files.
+ *
* There are dual licensed components, but when running with Linux it is the
* GPL that is relevant so this is a non issue. Similarly LGPL linked with GPL
* is a GPL combined work.
@@ -195,7 +227,7 @@ extern void cleanup_module(void);
* 2. So the community can ignore bug reports including proprietary modules
* 3. So vendors can do likewise based on their own policies
*/
-#define MODULE_LICENSE(_license) MODULE_INFO(license, _license)
+#define MODULE_LICENSE(_license) MODULE_FILE MODULE_INFO(license, _license)
/*
* Author(s), use "Name <email>" or just "Name", for multiple
@@ -209,7 +241,7 @@ extern void cleanup_module(void);
#ifdef MODULE
/* Creates an alias so file2alias.c can find device table. */
#define MODULE_DEVICE_TABLE(type, name) \
-extern const typeof(name) __mod_##type##__##name##_device_table \
+extern typeof(name) __mod_##type##__##name##_device_table \
__attribute__ ((unused, alias(__stringify(name))))
#else /* !MODULE */
#define MODULE_DEVICE_TABLE(type, name)
@@ -236,20 +268,21 @@ extern const typeof(name) __mod_##type##__##name##_device_table \
#define MODULE_VERSION(_version) MODULE_INFO(version, _version)
#else
#define MODULE_VERSION(_version) \
- static struct module_version_attribute ___modver_attr = { \
- .mattr = { \
- .attr = { \
- .name = "version", \
- .mode = S_IRUGO, \
+ MODULE_INFO(version, _version); \
+ static struct module_version_attribute __modver_attr \
+ __used __section("__modver") \
+ __aligned(__alignof__(struct module_version_attribute)) \
+ = { \
+ .mattr = { \
+ .attr = { \
+ .name = "version", \
+ .mode = S_IRUGO, \
+ }, \
+ .show = __modver_version_show, \
}, \
- .show = __modver_version_show, \
- }, \
- .module_name = KBUILD_MODNAME, \
- .version = _version, \
- }; \
- static const struct module_version_attribute \
- __used __attribute__ ((__section__ ("__modver"))) \
- * __moduleparam_const __modver_attr = &___modver_attr
+ .module_name = KBUILD_MODNAME, \
+ .version = _version, \
+ }
#endif
/* Optional firmware file (or files) needed by the module
@@ -257,6 +290,8 @@ extern const typeof(name) __mod_##type##__##name##_device_table \
* files require multiple MODULE_FIRMWARE() specifiers */
#define MODULE_FIRMWARE(_firmware) MODULE_INFO(firmware, _firmware)
+#define MODULE_IMPORT_NS(ns) MODULE_INFO(import_ns, __stringify(ns))
+
struct notifier_block;
#ifdef CONFIG_MODULES
@@ -265,7 +300,7 @@ extern int modules_disabled; /* for sysctl */
/* Get/put a kernel symbol (calls must be symmetric) */
void *__symbol_get(const char *symbol);
void *__symbol_get_gpl(const char *symbol);
-#define symbol_get(x) ((typeof(&x))(__symbol_get(VMLINUX_SYMBOL_STR(x))))
+#define symbol_get(x) ((typeof(&x))(__symbol_get(__stringify(x))))
/* modules using other modules: kdb wants to see this. */
struct module_use {
@@ -286,17 +321,47 @@ struct mod_tree_node {
struct latch_tree_node node;
};
-struct module_layout {
- /* The actual code + data. */
+enum mod_mem_type {
+ MOD_TEXT = 0,
+ MOD_DATA,
+ MOD_RODATA,
+ MOD_RO_AFTER_INIT,
+ MOD_INIT_TEXT,
+ MOD_INIT_DATA,
+ MOD_INIT_RODATA,
+
+ MOD_MEM_NUM_TYPES,
+ MOD_INVALID = -1,
+};
+
+#define mod_mem_type_is_init(type) \
+ ((type) == MOD_INIT_TEXT || \
+ (type) == MOD_INIT_DATA || \
+ (type) == MOD_INIT_RODATA)
+
+#define mod_mem_type_is_core(type) (!mod_mem_type_is_init(type))
+
+#define mod_mem_type_is_text(type) \
+ ((type) == MOD_TEXT || \
+ (type) == MOD_INIT_TEXT)
+
+#define mod_mem_type_is_data(type) (!mod_mem_type_is_text(type))
+
+#define mod_mem_type_is_core_data(type) \
+ (mod_mem_type_is_core(type) && \
+ mod_mem_type_is_data(type))
+
+#define for_each_mod_mem_type(type) \
+ for (enum mod_mem_type (type) = 0; \
+ (type) < MOD_MEM_NUM_TYPES; (type)++)
+
+#define for_class_mod_mem_type(type, class) \
+ for_each_mod_mem_type(type) \
+ if (mod_mem_type_is_##class(type))
+
+struct module_memory {
void *base;
- /* Total size. */
unsigned int size;
- /* The size of the executable code. */
- unsigned int text_size;
- /* Size of RO section of the module (text+rodata) */
- unsigned int ro_size;
- /* Size of RO after init section */
- unsigned int ro_after_init_size;
#ifdef CONFIG_MODULES_TREE_LOOKUP
struct mod_tree_node mtn;
@@ -305,18 +370,27 @@ struct module_layout {
#ifdef CONFIG_MODULES_TREE_LOOKUP
/* Only touch one cacheline for common rbtree-for-core-layout case. */
-#define __module_layout_align ____cacheline_aligned
+#define __module_memory_align ____cacheline_aligned
#else
-#define __module_layout_align
+#define __module_memory_align
#endif
struct mod_kallsyms {
Elf_Sym *symtab;
unsigned int num_symtab;
char *strtab;
+ char *typetab;
};
#ifdef CONFIG_LIVEPATCH
+/**
+ * struct klp_modinfo - ELF information preserved from the livepatch module
+ *
+ * @hdr: ELF header
+ * @sechdrs: Section header table
+ * @secstrings: String table for the section headers
+ * @symndx: The symbol table section index
+ */
struct klp_modinfo {
Elf_Ehdr hdr;
Elf_Shdr *sechdrs;
@@ -334,6 +408,11 @@ struct module {
/* Unique handle for this module */
char name[MODULE_NAME_LEN];
+#ifdef CONFIG_STACKTRACE_BUILD_ID
+ /* Module build ID */
+ unsigned char build_id[BUILD_ID_SIZE_MAX];
+#endif
+
/* Sysfs stuff. */
struct module_kobject mkobj;
struct module_attribute *modinfo_attrs;
@@ -346,6 +425,11 @@ struct module {
const s32 *crcs;
unsigned int num_syms;
+#ifdef CONFIG_ARCH_USES_CFI_TRAPS
+ s32 *kcfi_traps;
+ s32 *kcfi_traps_end;
+#endif
+
/* Kernel parameters. */
#ifdef CONFIG_SYSFS
struct mutex param_lock;
@@ -357,18 +441,7 @@ struct module {
unsigned int num_gpl_syms;
const struct kernel_symbol *gpl_syms;
const s32 *gpl_crcs;
-
-#ifdef CONFIG_UNUSED_SYMBOLS
- /* unused exported symbols. */
- const struct kernel_symbol *unused_syms;
- const s32 *unused_crcs;
- unsigned int num_unused_syms;
-
- /* GPL-only, unused exported symbols. */
- unsigned int num_unused_gpl_syms;
- const struct kernel_symbol *unused_gpl_syms;
- const s32 *unused_gpl_crcs;
-#endif
+ bool using_gplonly_symbols;
#ifdef CONFIG_MODULE_SIG
/* Signature was verified. */
@@ -377,11 +450,6 @@ struct module {
bool async_probe_requested;
- /* symbols that will be GPL-only in the near future. */
- const struct kernel_symbol *gpl_future_syms;
- const s32 *gpl_future_crcs;
- unsigned int num_gpl_future_syms;
-
/* Exception table */
unsigned int num_exentries;
struct exception_table_entry *extable;
@@ -389,9 +457,7 @@ struct module {
/* Startup function. */
int (*init)(void);
- /* Core layout: rbtree is accessed frequently, so keep together. */
- struct module_layout core_layout __module_layout_align;
- struct module_layout init_layout;
+ struct module_memory mem[MOD_MEM_NUM_TYPES] __module_memory_align;
/* Arch-specific module values */
struct mod_arch_specific arch;
@@ -407,7 +473,7 @@ struct module {
#ifdef CONFIG_KALLSYMS
/* Protected by RCU and/or module_mutex: use rcu_dereference() */
- struct mod_kallsyms *kallsyms;
+ struct mod_kallsyms __rcu *kallsyms;
struct mod_kallsyms core_kallsyms;
/* Section attributes */
@@ -426,12 +492,26 @@ struct module {
void __percpu *percpu;
unsigned int percpu_size;
#endif
+ void *noinstr_text_start;
+ unsigned int noinstr_text_size;
#ifdef CONFIG_TRACEPOINTS
unsigned int num_tracepoints;
- struct tracepoint * const *tracepoints_ptrs;
+ tracepoint_ptr_t *tracepoints_ptrs;
+#endif
+#ifdef CONFIG_TREE_SRCU
+ unsigned int num_srcu_structs;
+ struct srcu_struct **srcu_struct_ptrs;
+#endif
+#ifdef CONFIG_BPF_EVENTS
+ unsigned int num_bpf_raw_events;
+ struct bpf_raw_event_map *bpf_raw_events;
+#endif
+#ifdef CONFIG_DEBUG_INFO_BTF_MODULES
+ unsigned int btf_data_size;
+ void *btf_data;
#endif
-#ifdef HAVE_JUMP_LABEL
+#ifdef CONFIG_JUMP_LABEL
struct jump_entry *jump_entries;
unsigned int num_jump_entries;
#endif
@@ -449,15 +529,35 @@ struct module {
unsigned int num_ftrace_callsites;
unsigned long *ftrace_callsites;
#endif
+#ifdef CONFIG_KPROBES
+ void *kprobes_text_start;
+ unsigned int kprobes_text_size;
+ unsigned long *kprobe_blacklist;
+ unsigned int num_kprobe_blacklist;
+#endif
+#ifdef CONFIG_HAVE_STATIC_CALL_INLINE
+ int num_static_call_sites;
+ struct static_call_site *static_call_sites;
+#endif
+#if IS_ENABLED(CONFIG_KUNIT)
+ int num_kunit_suites;
+ struct kunit_suite **kunit_suites;
+#endif
+
#ifdef CONFIG_LIVEPATCH
bool klp; /* Is this a livepatch module? */
bool klp_alive;
- /* Elf information */
+ /* ELF information */
struct klp_modinfo *klp_info;
#endif
+#ifdef CONFIG_PRINTK_INDEX
+ unsigned int printk_index_size;
+ struct pi_entry **printk_index_start;
+#endif
+
#ifdef CONFIG_MODULE_UNLOAD
/* What modules depend on me? */
struct list_head source_list;
@@ -475,17 +575,30 @@ struct module {
ctor_fn_t *ctors;
unsigned int num_ctors;
#endif
-} ____cacheline_aligned;
+
+#ifdef CONFIG_FUNCTION_ERROR_INJECTION
+ struct error_injection_entry *ei_funcs;
+ unsigned int num_ei_funcs;
+#endif
+#ifdef CONFIG_DYNAMIC_DEBUG_CORE
+ struct _ddebug_info dyndbg_info;
+#endif
+} ____cacheline_aligned __randomize_layout;
#ifndef MODULE_ARCH_INIT
#define MODULE_ARCH_INIT {}
#endif
-extern struct mutex module_mutex;
+#ifndef HAVE_ARCH_KALLSYMS_SYMBOL_VALUE
+static inline unsigned long kallsyms_symbol_value(const Elf_Sym *sym)
+{
+ return sym->st_value;
+}
+#endif
/* FIXME: It'd be nice to isolate modules during init, too, so they
aren't used before they (may) fail. But presently too much code
(IDE & SCSI) require entry into the module during init.*/
-static inline int module_is_live(struct module *mod)
+static inline bool module_is_live(struct module *mod)
{
return mod->state != MODULE_STATE_GOING;
}
@@ -497,18 +610,35 @@ bool __is_module_percpu_address(unsigned long addr, unsigned long *can_addr);
bool is_module_percpu_address(unsigned long addr);
bool is_module_text_address(unsigned long addr);
+static inline bool within_module_mem_type(unsigned long addr,
+ const struct module *mod,
+ enum mod_mem_type type)
+{
+ unsigned long base, size;
+
+ base = (unsigned long)mod->mem[type].base;
+ size = mod->mem[type].size;
+ return addr - base < size;
+}
+
static inline bool within_module_core(unsigned long addr,
const struct module *mod)
{
- return (unsigned long)mod->core_layout.base <= addr &&
- addr < (unsigned long)mod->core_layout.base + mod->core_layout.size;
+ for_class_mod_mem_type(type, core) {
+ if (within_module_mem_type(addr, mod, type))
+ return true;
+ }
+ return false;
}
static inline bool within_module_init(unsigned long addr,
const struct module *mod)
{
- return (unsigned long)mod->init_layout.base <= addr &&
- addr < (unsigned long)mod->init_layout.base + mod->init_layout.size;
+ for_class_mod_mem_type(type, init) {
+ if (within_module_mem_type(addr, mod, type))
+ return true;
+ }
+ return false;
}
static inline bool within_module(unsigned long addr, const struct module *mod)
@@ -516,70 +646,63 @@ static inline bool within_module(unsigned long addr, const struct module *mod)
return within_module_init(addr, mod) || within_module_core(addr, mod);
}
-/* Search for module by name: must hold module_mutex. */
+/* Search for module by name: must be in a RCU-sched critical section. */
struct module *find_module(const char *name);
-struct symsearch {
- const struct kernel_symbol *start, *stop;
- const s32 *crcs;
- enum {
- NOT_GPL_ONLY,
- GPL_ONLY,
- WILL_BE_GPL_ONLY,
- } licence;
- bool unused;
-};
-
-/*
- * Search for an exported symbol by name.
- *
- * Must be called with module_mutex held or preemption disabled.
- */
-const struct kernel_symbol *find_symbol(const char *name,
- struct module **owner,
- const s32 **crc,
- bool gplok,
- bool warn);
-
-/*
- * Walk the exported symbol table
- *
- * Must be called with module_mutex held or preemption disabled.
- */
-bool each_symbol_section(bool (*fn)(const struct symsearch *arr,
- struct module *owner,
- void *data), void *data);
-
-/* Returns 0 and fills in value, defined and namebuf, or -ERANGE if
- symnum out of range. */
-int module_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
- char *name, char *module_name, int *exported);
-
-/* Look for this name: can be of form module:name. */
-unsigned long module_kallsyms_lookup_name(const char *name);
-
-int module_kallsyms_on_each_symbol(int (*fn)(void *, const char *,
- struct module *, unsigned long),
- void *data);
-
-extern void __noreturn __module_put_and_exit(struct module *mod,
+extern void __noreturn __module_put_and_kthread_exit(struct module *mod,
long code);
-#define module_put_and_exit(code) __module_put_and_exit(THIS_MODULE, code)
+#define module_put_and_kthread_exit(code) __module_put_and_kthread_exit(THIS_MODULE, code)
#ifdef CONFIG_MODULE_UNLOAD
int module_refcount(struct module *mod);
void __symbol_put(const char *symbol);
-#define symbol_put(x) __symbol_put(VMLINUX_SYMBOL_STR(x))
+#define symbol_put(x) __symbol_put(__stringify(x))
void symbol_put_addr(void *addr);
/* Sometimes we know we already have a refcount, and it's easier not
to handle the error case (which only happens with rmmod --wait). */
extern void __module_get(struct module *module);
-/* This is the Right Way to get a module: if it fails, it's being removed,
- * so pretend it's not there. */
+/**
+ * try_module_get() - take module refcount unless module is being removed
+ * @module: the module we should check for
+ *
+ * Only try to get a module reference count if the module is not being removed.
+ * This call will fail if the module is already being removed.
+ *
+ * Care must also be taken to ensure the module exists and is alive prior to
+ * usage of this call. This can be gauranteed through two means:
+ *
+ * 1) Direct protection: you know an earlier caller must have increased the
+ * module reference through __module_get(). This can typically be achieved
+ * by having another entity other than the module itself increment the
+ * module reference count.
+ *
+ * 2) Implied protection: there is an implied protection against module
+ * removal. An example of this is the implied protection used by kernfs /
+ * sysfs. The sysfs store / read file operations are guaranteed to exist
+ * through the use of kernfs's active reference (see kernfs_active()) and a
+ * sysfs / kernfs file removal cannot happen unless the same file is not
+ * active. Therefore, if a sysfs file is being read or written to the module
+ * which created it must still exist. It is therefore safe to use
+ * try_module_get() on module sysfs store / read ops.
+ *
+ * One of the real values to try_module_get() is the module_is_live() check
+ * which ensures that the caller of try_module_get() can yield to userspace
+ * module removal requests and gracefully fail if the module is on its way out.
+ *
+ * Returns true if the reference count was successfully incremented.
+ */
extern bool try_module_get(struct module *module);
+/**
+ * module_put() - release a reference count to a module
+ * @module: the module we should release a reference count for
+ *
+ * If you successfully bump a reference count to a module with try_module_get(),
+ * when you are finished you must call module_put() to release that reference
+ * count.
+ */
extern void module_put(struct module *module);
#else /*!CONFIG_MODULE_UNLOAD*/
@@ -597,7 +720,6 @@ static inline void __module_get(struct module *module)
#define symbol_put_addr(p) do { } while (0)
#endif /* CONFIG_MODULE_UNLOAD */
-int ref_module(struct module *a, struct module *b);
/* This is a #define so the string doesn't get put in every .o file */
#define module_name(mod) \
@@ -606,16 +728,8 @@ int ref_module(struct module *a, struct module *b);
__mod ? __mod->name : "kernel"; \
})
-/* For kallsyms to ask for address resolution. namebuf should be at
- * least KSYM_NAME_LEN long: a pointer to namebuf is returned if
- * found, otherwise NULL. */
-const char *module_address_lookup(unsigned long addr,
- unsigned long *symbolsize,
- unsigned long *offset,
- char **modname,
- char *namebuf);
-int lookup_module_symbol_name(unsigned long addr, char *symname);
-int lookup_module_symbol_attrs(unsigned long addr, unsigned long *size, unsigned long *offset, char *modname, char *name);
+/* Dereference module function descriptor */
+void *dereference_module_function_descriptor(struct module *mod, void *ptr);
int register_module_notifier(struct notifier_block *nb);
int unregister_module_notifier(struct notifier_block *nb);
@@ -627,17 +741,16 @@ static inline bool module_requested_async_probing(struct module *module)
return module && module->async_probe_requested;
}
-#ifdef CONFIG_LIVEPATCH
static inline bool is_livepatch_module(struct module *mod)
{
+#ifdef CONFIG_LIVEPATCH
return mod->klp;
-}
-#else /* !CONFIG_LIVEPATCH */
-static inline bool is_livepatch_module(struct module *mod)
-{
+#else
return false;
+#endif
}
-#endif /* CONFIG_LIVEPATCH */
+
+void set_module_sig_enforced(void);
#else /* !CONFIG_MODULES... */
@@ -671,65 +784,42 @@ static inline bool is_module_text_address(unsigned long addr)
return false;
}
-/* Get/put a kernel symbol (calls should be symmetric) */
-#define symbol_get(x) ({ extern typeof(x) x __attribute__((weak)); &(x); })
-#define symbol_put(x) do { } while (0)
-#define symbol_put_addr(x) do { } while (0)
-
-static inline void __module_get(struct module *module)
-{
-}
-
-static inline bool try_module_get(struct module *module)
+static inline bool within_module_core(unsigned long addr,
+ const struct module *mod)
{
- return true;
+ return false;
}
-static inline void module_put(struct module *module)
+static inline bool within_module_init(unsigned long addr,
+ const struct module *mod)
{
+ return false;
}
-#define module_name(mod) "kernel"
-
-/* For kallsyms to ask for address resolution. NULL means not found. */
-static inline const char *module_address_lookup(unsigned long addr,
- unsigned long *symbolsize,
- unsigned long *offset,
- char **modname,
- char *namebuf)
+static inline bool within_module(unsigned long addr, const struct module *mod)
{
- return NULL;
+ return false;
}
-static inline int lookup_module_symbol_name(unsigned long addr, char *symname)
-{
- return -ERANGE;
-}
+/* Get/put a kernel symbol (calls should be symmetric) */
+#define symbol_get(x) ({ extern typeof(x) x __attribute__((weak,visibility("hidden"))); &(x); })
+#define symbol_put(x) do { } while (0)
+#define symbol_put_addr(x) do { } while (0)
-static inline int lookup_module_symbol_attrs(unsigned long addr, unsigned long *size, unsigned long *offset, char *modname, char *name)
+static inline void __module_get(struct module *module)
{
- return -ERANGE;
}
-static inline int module_get_kallsym(unsigned int symnum, unsigned long *value,
- char *type, char *name,
- char *module_name, int *exported)
+static inline bool try_module_get(struct module *module)
{
- return -ERANGE;
+ return true;
}
-static inline unsigned long module_kallsyms_lookup_name(const char *name)
+static inline void module_put(struct module *module)
{
- return 0;
}
-static inline int module_kallsyms_on_each_symbol(int (*fn)(void *, const char *,
- struct module *,
- unsigned long),
- void *data)
-{
- return 0;
-}
+#define module_name(mod) "kernel"
static inline int register_module_notifier(struct notifier_block *nb)
{
@@ -742,7 +832,7 @@ static inline int unregister_module_notifier(struct notifier_block *nb)
return 0;
}
-#define module_put_and_exit(code) do_exit(code)
+#define module_put_and_kthread_exit(code) kthread_exit(code)
static inline void print_modules(void)
{
@@ -753,12 +843,23 @@ static inline bool module_requested_async_probing(struct module *module)
return false;
}
+
+static inline void set_module_sig_enforced(void)
+{
+}
+
+/* Dereference module function descriptor */
+static inline
+void *dereference_module_function_descriptor(struct module *mod, void *ptr)
+{
+ return ptr;
+}
+
#endif /* CONFIG_MODULES */
#ifdef CONFIG_SYSFS
extern struct kset *module_kset;
-extern struct kobj_type module_ktype;
-extern int module_sysfs_initialized;
+extern const struct kobj_type module_ktype;
#endif /* CONFIG_SYSFS */
#define symbol_request(x) try_then_request_module(symbol_get(x), "symbol:" #x)
@@ -767,18 +868,6 @@ extern int module_sysfs_initialized;
#define __MODULE_STRING(x) __stringify(x)
-#ifdef CONFIG_STRICT_MODULE_RWX
-extern void set_all_modules_text_rw(void);
-extern void set_all_modules_text_ro(void);
-extern void module_enable_ro(const struct module *mod, bool after_init);
-extern void module_disable_ro(const struct module *mod);
-#else
-static inline void set_all_modules_text_rw(void) { }
-static inline void set_all_modules_text_ro(void) { }
-static inline void module_enable_ro(const struct module *mod, bool after_init) { }
-static inline void module_disable_ro(const struct module *mod) { }
-#endif
-
#ifdef CONFIG_GENERIC_BUG
void module_bug_finalize(const Elf_Ehdr *, const Elf_Shdr *,
struct module *);
@@ -794,16 +883,118 @@ static inline void module_bug_finalize(const Elf_Ehdr *hdr,
static inline void module_bug_cleanup(struct module *mod) {}
#endif /* CONFIG_GENERIC_BUG */
+#ifdef CONFIG_RETPOLINE
+extern bool retpoline_module_ok(bool has_retpoline);
+#else
+static inline bool retpoline_module_ok(bool has_retpoline)
+{
+ return true;
+}
+#endif
+
#ifdef CONFIG_MODULE_SIG
+bool is_module_sig_enforced(void);
+
static inline bool module_sig_ok(struct module *module)
{
return module->sig_ok;
}
#else /* !CONFIG_MODULE_SIG */
+static inline bool is_module_sig_enforced(void)
+{
+ return false;
+}
+
static inline bool module_sig_ok(struct module *module)
{
return true;
}
#endif /* CONFIG_MODULE_SIG */
+#if defined(CONFIG_MODULES) && defined(CONFIG_KALLSYMS)
+int module_kallsyms_on_each_symbol(const char *modname,
+ int (*fn)(void *, const char *, unsigned long),
+ void *data);
+
+/* For kallsyms to ask for address resolution. namebuf should be at
+ * least KSYM_NAME_LEN long: a pointer to namebuf is returned if
+ * found, otherwise NULL.
+ */
+const char *module_address_lookup(unsigned long addr,
+ unsigned long *symbolsize,
+ unsigned long *offset,
+ char **modname, const unsigned char **modbuildid,
+ char *namebuf);
+int lookup_module_symbol_name(unsigned long addr, char *symname);
+int lookup_module_symbol_attrs(unsigned long addr,
+ unsigned long *size,
+ unsigned long *offset,
+ char *modname,
+ char *name);
+
+/* Returns 0 and fills in value, defined and namebuf, or -ERANGE if
+ * symnum out of range.
+ */
+int module_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
+ char *name, char *module_name, int *exported);
+
+/* Look for this name: can be of form module:name. */
+unsigned long module_kallsyms_lookup_name(const char *name);
+
+unsigned long find_kallsyms_symbol_value(struct module *mod, const char *name);
+
+#else /* CONFIG_MODULES && CONFIG_KALLSYMS */
+
+static inline int module_kallsyms_on_each_symbol(const char *modname,
+ int (*fn)(void *, const char *, unsigned long),
+ void *data)
+{
+ return -EOPNOTSUPP;
+}
+
+/* For kallsyms to ask for address resolution. NULL means not found. */
+static inline const char *module_address_lookup(unsigned long addr,
+ unsigned long *symbolsize,
+ unsigned long *offset,
+ char **modname,
+ const unsigned char **modbuildid,
+ char *namebuf)
+{
+ return NULL;
+}
+
+static inline int lookup_module_symbol_name(unsigned long addr, char *symname)
+{
+ return -ERANGE;
+}
+
+static inline int lookup_module_symbol_attrs(unsigned long addr,
+ unsigned long *size,
+ unsigned long *offset,
+ char *modname,
+ char *name)
+{
+ return -ERANGE;
+}
+
+static inline int module_get_kallsym(unsigned int symnum, unsigned long *value,
+ char *type, char *name,
+ char *module_name, int *exported)
+{
+ return -ERANGE;
+}
+
+static inline unsigned long module_kallsyms_lookup_name(const char *name)
+{
+ return 0;
+}
+
+static inline unsigned long find_kallsyms_symbol_value(struct module *mod,
+ const char *name)
+{
+ return 0;
+}
+
+#endif /* CONFIG_MODULES && CONFIG_KALLSYMS */
+
#endif /* _LINUX_MODULE_H */
diff --git a/include/linux/module_signature.h b/include/linux/module_signature.h
new file mode 100644
index 000000000000..7eb4b00381ac
--- /dev/null
+++ b/include/linux/module_signature.h
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Module signature handling.
+ *
+ * Copyright (C) 2012 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ */
+
+#ifndef _LINUX_MODULE_SIGNATURE_H
+#define _LINUX_MODULE_SIGNATURE_H
+
+#include <linux/types.h>
+
+/* In stripped ARM and x86-64 modules, ~ is surprisingly rare. */
+#define MODULE_SIG_STRING "~Module signature appended~\n"
+
+enum pkey_id_type {
+ PKEY_ID_PGP, /* OpenPGP generated key ID */
+ PKEY_ID_X509, /* X.509 arbitrary subjectKeyIdentifier */
+ PKEY_ID_PKCS7, /* Signature in PKCS#7 message */
+};
+
+/*
+ * Module signature information block.
+ *
+ * The constituents of the signature section are, in order:
+ *
+ * - Signer's name
+ * - Key identifier
+ * - Signature data
+ * - Information block
+ */
+struct module_signature {
+ u8 algo; /* Public-key crypto algorithm [0] */
+ u8 hash; /* Digest algorithm [0] */
+ u8 id_type; /* Key identifier type [PKEY_ID_PKCS7] */
+ u8 signer_len; /* Length of signer's name [0] */
+ u8 key_id_len; /* Length of key identifier [0] */
+ u8 __pad[3];
+ __be32 sig_len; /* Length of signature data */
+};
+
+int mod_check_sig(const struct module_signature *ms, size_t file_len,
+ const char *name);
+
+#endif /* _LINUX_MODULE_SIGNATURE_H */
diff --git a/include/linux/module_symbol.h b/include/linux/module_symbol.h
new file mode 100644
index 000000000000..7ace7ba30203
--- /dev/null
+++ b/include/linux/module_symbol.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+#ifndef _LINUX_MODULE_SYMBOL_H
+#define _LINUX_MODULE_SYMBOL_H
+
+/* This ignores the intensely annoying "mapping symbols" found in ELF files. */
+static inline int is_mapping_symbol(const char *str)
+{
+ if (str[0] == '.' && str[1] == 'L')
+ return true;
+ if (str[0] == 'L' && str[1] == '0')
+ return true;
+ return str[0] == '$' &&
+ (str[1] == 'a' || str[1] == 'd' || str[1] == 't' || str[1] == 'x')
+ && (str[2] == '\0' || str[2] == '.');
+}
+
+#endif /* _LINUX_MODULE_SYMBOL_H */
diff --git a/include/linux/moduleloader.h b/include/linux/moduleloader.h
index 4d0cb9bba93e..03be088fb439 100644
--- a/include/linux/moduleloader.h
+++ b/include/linux/moduleloader.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_MODULELOADER_H
#define _LINUX_MODULELOADER_H
/* The stuff needed for archs to support modules. */
@@ -12,6 +13,9 @@
* must be implemented by each architecture.
*/
+/* arch may override to do additional checking of ELF header architecture */
+bool module_elf_check_arch(Elf_Ehdr *hdr);
+
/* Adjust arch-specific sections. Return 0 on success. */
int module_frob_arch_sections(Elf_Ehdr *hdr,
Elf_Shdr *sechdrs,
@@ -28,6 +32,16 @@ void *module_alloc(unsigned long size);
/* Free memory returned from module_alloc. */
void module_memfree(void *module_region);
+/* Determines if the section name is an init section (that is only used during
+ * module loading).
+ */
+bool module_init_section(const char *name);
+
+/* Determines if the section name is an exit section (that is only used during
+ * module unloading)
+ */
+bool module_exit_section(const char *name);
+
/*
* Apply the given relocation to the (simplified) ELF. Return -error
* or 0.
@@ -61,6 +75,23 @@ int apply_relocate_add(Elf_Shdr *sechdrs,
unsigned int symindex,
unsigned int relsec,
struct module *mod);
+#ifdef CONFIG_LIVEPATCH
+/*
+ * Some architectures (namely x86_64 and ppc64) perform sanity checks when
+ * applying relocations. If a patched module gets unloaded and then later
+ * reloaded (and re-patched), klp re-applies relocations to the replacement
+ * function(s). Any leftover relocations from the previous loading of the
+ * patched module might trigger the sanity checks.
+ *
+ * To prevent that, when unloading a patched module, clear out any relocations
+ * that might trigger arch-specific sanity checks on a future module reload.
+ */
+void clear_relocate_add(Elf_Shdr *sechdrs,
+ const char *strtab,
+ unsigned int symindex,
+ unsigned int relsec,
+ struct module *me);
+#endif
#else
static inline int apply_relocate_add(Elf_Shdr *sechdrs,
const char *strtab,
@@ -85,7 +116,8 @@ void module_arch_cleanup(struct module *mod);
/* Any cleanup before freeing mod->module_init */
void module_arch_freeing_init(struct module *mod);
-#ifdef CONFIG_KASAN
+#if (defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)) && \
+ !defined(CONFIG_KASAN_VMALLOC)
#include <linux/kasan.h>
#define MODULE_ALIGN (PAGE_SIZE << KASAN_SHADOW_SCALE_SHIFT)
#else
diff --git a/include/linux/moduleparam.h b/include/linux/moduleparam.h
index 1ee7b30dafec..962cd41a2cb5 100644
--- a/include/linux/moduleparam.h
+++ b/include/linux/moduleparam.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_MODULE_PARAMS_H
#define _LINUX_MODULE_PARAMS_H
/* (C) Copyright 2001, 2002 Rusty Russell IBM Corporation */
@@ -9,25 +10,23 @@
module name. */
#ifdef MODULE
#define MODULE_PARAM_PREFIX /* empty */
+#define __MODULE_INFO_PREFIX /* empty */
#else
#define MODULE_PARAM_PREFIX KBUILD_MODNAME "."
+/* We cannot use MODULE_PARAM_PREFIX because some modules override it. */
+#define __MODULE_INFO_PREFIX KBUILD_MODNAME "."
#endif
/* Chosen so that structs with an unsigned long line up. */
#define MAX_PARAM_PREFIX_LEN (64 - sizeof(unsigned long))
-#ifdef MODULE
-#define __MODULE_INFO(tag, name, info) \
-static const char __UNIQUE_ID(name)[] \
- __used __attribute__((section(".modinfo"), unused, aligned(1))) \
- = __stringify(tag) "=" info
-#else /* !MODULE */
-/* This struct is here for syntactic coherency, it is not used */
#define __MODULE_INFO(tag, name, info) \
- struct __UNIQUE_ID(name) {}
-#endif
+ static const char __UNIQUE_ID(name)[] \
+ __used __section(".modinfo") __aligned(1) \
+ = __MODULE_INFO_PREFIX __stringify(tag) "=" info
+
#define __MODULE_PARM_TYPE(name, _type) \
- __MODULE_INFO(parmtype, name##type, #name ":" _type)
+ __MODULE_INFO(parmtype, name##type, #name ":" _type)
/* One for each parameter, describing how to use it. Some files do
multiple of these per line, so can't just use MODULE_INFO. */
@@ -101,15 +100,15 @@ struct kparam_array
/**
* module_param - typesafe helper for a module/cmdline parameter
- * @value: the variable to alter, and exposed parameter name.
+ * @name: the variable to alter, and exposed parameter name.
* @type: the type of the parameter
* @perm: visibility in sysfs.
*
- * @value becomes the module parameter, or (prefixed by KBUILD_MODNAME and a
+ * @name becomes the module parameter, or (prefixed by KBUILD_MODNAME and a
* ".") the kernel commandline parameter. Note that - is changed to _, so
* the user can use "foo-bar=1" even for variable "foo_bar".
*
- * @perm is 0 if the the variable is not to appear in sysfs, or 0444
+ * @perm is 0 if the variable is not to appear in sysfs, or 0444
* for world-readable, 0644 for root-writable, etc. Note that if it
* is writable, you may need to use kernel_param_lock() around
* accesses (esp. charp, which can be kfreed when it changes).
@@ -119,7 +118,7 @@ struct kparam_array
* you can create your own by defining those variables.
*
* Standard types are:
- * byte, short, ushort, int, uint, long, ulong
+ * byte, hexint, short, ushort, int, uint, long, ulong
* charp: a character pointer
* bool: a bool, values 0/1, y/n, Y/N.
* invbool: the above, only sense-reversed (N = true).
@@ -129,6 +128,9 @@ struct kparam_array
/**
* module_param_unsafe - same as module_param but taints kernel
+ * @name: the variable to alter, and exposed parameter name.
+ * @type: the type of the parameter
+ * @perm: visibility in sysfs.
*/
#define module_param_unsafe(name, type, perm) \
module_param_named_unsafe(name, name, type, perm)
@@ -151,6 +153,10 @@ struct kparam_array
/**
* module_param_named_unsafe - same as module_param_named but taints kernel
+ * @name: a valid C identifier which is the parameter name.
+ * @value: the actual lvalue to alter.
+ * @type: the type of the parameter
+ * @perm: visibility in sysfs.
*/
#define module_param_named_unsafe(name, value, type, perm) \
param_check_##type(name, &(value)); \
@@ -161,6 +167,7 @@ struct kparam_array
* module_param_cb - general callback for a module/cmdline parameter
* @name: a valid C identifier which is the parameter name.
* @ops: the set & get operations for this parameter.
+ * @arg: args for @ops
* @perm: visibility in sysfs.
*
* The ops can have NULL set or get functions.
@@ -172,36 +179,96 @@ struct kparam_array
__module_param_call(MODULE_PARAM_PREFIX, name, ops, arg, perm, -1, \
KERNEL_PARAM_FL_UNSAFE)
+#define __level_param_cb(name, ops, arg, perm, level) \
+ __module_param_call(MODULE_PARAM_PREFIX, name, ops, arg, perm, level, 0)
/**
- * <level>_param_cb - general callback for a module/cmdline parameter
- * to be evaluated before certain initcall level
+ * core_param_cb - general callback for a module/cmdline parameter
+ * to be evaluated before core initcall level
* @name: a valid C identifier which is the parameter name.
* @ops: the set & get operations for this parameter.
+ * @arg: args for @ops
* @perm: visibility in sysfs.
*
* The ops can have NULL set or get functions.
*/
-#define __level_param_cb(name, ops, arg, perm, level) \
- __module_param_call(MODULE_PARAM_PREFIX, name, ops, arg, perm, level, 0)
-
#define core_param_cb(name, ops, arg, perm) \
__level_param_cb(name, ops, arg, perm, 1)
+/**
+ * postcore_param_cb - general callback for a module/cmdline parameter
+ * to be evaluated before postcore initcall level
+ * @name: a valid C identifier which is the parameter name.
+ * @ops: the set & get operations for this parameter.
+ * @arg: args for @ops
+ * @perm: visibility in sysfs.
+ *
+ * The ops can have NULL set or get functions.
+ */
#define postcore_param_cb(name, ops, arg, perm) \
__level_param_cb(name, ops, arg, perm, 2)
+/**
+ * arch_param_cb - general callback for a module/cmdline parameter
+ * to be evaluated before arch initcall level
+ * @name: a valid C identifier which is the parameter name.
+ * @ops: the set & get operations for this parameter.
+ * @arg: args for @ops
+ * @perm: visibility in sysfs.
+ *
+ * The ops can have NULL set or get functions.
+ */
#define arch_param_cb(name, ops, arg, perm) \
__level_param_cb(name, ops, arg, perm, 3)
+/**
+ * subsys_param_cb - general callback for a module/cmdline parameter
+ * to be evaluated before subsys initcall level
+ * @name: a valid C identifier which is the parameter name.
+ * @ops: the set & get operations for this parameter.
+ * @arg: args for @ops
+ * @perm: visibility in sysfs.
+ *
+ * The ops can have NULL set or get functions.
+ */
#define subsys_param_cb(name, ops, arg, perm) \
__level_param_cb(name, ops, arg, perm, 4)
+/**
+ * fs_param_cb - general callback for a module/cmdline parameter
+ * to be evaluated before fs initcall level
+ * @name: a valid C identifier which is the parameter name.
+ * @ops: the set & get operations for this parameter.
+ * @arg: args for @ops
+ * @perm: visibility in sysfs.
+ *
+ * The ops can have NULL set or get functions.
+ */
#define fs_param_cb(name, ops, arg, perm) \
__level_param_cb(name, ops, arg, perm, 5)
+/**
+ * device_param_cb - general callback for a module/cmdline parameter
+ * to be evaluated before device initcall level
+ * @name: a valid C identifier which is the parameter name.
+ * @ops: the set & get operations for this parameter.
+ * @arg: args for @ops
+ * @perm: visibility in sysfs.
+ *
+ * The ops can have NULL set or get functions.
+ */
#define device_param_cb(name, ops, arg, perm) \
__level_param_cb(name, ops, arg, perm, 6)
+/**
+ * late_param_cb - general callback for a module/cmdline parameter
+ * to be evaluated before late initcall level
+ * @name: a valid C identifier which is the parameter name.
+ * @ops: the set & get operations for this parameter.
+ * @arg: args for @ops
+ * @perm: visibility in sysfs.
+ *
+ * The ops can have NULL set or get functions.
+ */
#define late_param_cb(name, ops, arg, perm) \
__level_param_cb(name, ops, arg, perm, 7)
@@ -221,25 +288,17 @@ struct kparam_array
/* Default value instead of permissions? */ \
static const char __param_str_##name[] = prefix #name; \
static struct kernel_param __moduleparam_const __param_##name \
- __used \
- __attribute__ ((unused,__section__ ("__param"),aligned(sizeof(void *)))) \
+ __used __section("__param") \
+ __aligned(__alignof__(struct kernel_param)) \
= { __param_str_##name, THIS_MODULE, ops, \
VERIFY_OCTAL_PERMISSIONS(perm), level, flags, { arg } }
/* Obsolete - use module_param_cb() */
-#define module_param_call(name, set, get, arg, perm) \
- static const struct kernel_param_ops __param_ops_##name = \
- { .flags = 0, (void *)set, (void *)get }; \
+#define module_param_call(name, _set, _get, arg, perm) \
+ static const struct kernel_param_ops __param_ops_##name = \
+ { .flags = 0, .set = _set, .get = _get }; \
__module_param_call(MODULE_PARAM_PREFIX, \
- name, &__param_ops_##name, arg, \
- (perm) + sizeof(__check_old_set_param(set))*0, -1, 0)
-
-/* We don't get oldget: it's often a new-style param_get_uint, etc. */
-static inline int
-__check_old_set_param(int (*oldset)(const char *, struct kernel_param *))
-{
- return 0;
-}
+ name, &__param_ops_##name, arg, perm, -1, 0)
#ifdef CONFIG_SYSFS
extern void kernel_param_lock(struct module *mod);
@@ -272,6 +331,10 @@ static inline void kernel_param_unlock(struct module *mod)
/**
* core_param_unsafe - same as core_param but taints kernel
+ * @name: the name of the cmdline and sysfs parameter (often the same as var)
+ * @var: the variable
+ * @type: the type of the parameter
+ * @perm: visibility in sysfs
*/
#define core_param_unsafe(name, var, type, perm) \
param_check_##type(name, &(var)); \
@@ -368,6 +431,8 @@ extern int param_get_int(char *buffer, const struct kernel_param *kp);
extern const struct kernel_param_ops param_ops_uint;
extern int param_set_uint(const char *val, const struct kernel_param *kp);
extern int param_get_uint(char *buffer, const struct kernel_param *kp);
+int param_set_uint_minmax(const char *val, const struct kernel_param *kp,
+ unsigned int min, unsigned int max);
#define param_check_uint(name, p) __param_check(name, p, unsigned int)
extern const struct kernel_param_ops param_ops_long;
@@ -385,6 +450,11 @@ extern int param_set_ullong(const char *val, const struct kernel_param *kp);
extern int param_get_ullong(char *buffer, const struct kernel_param *kp);
#define param_check_ullong(name, p) __param_check(name, p, unsigned long long)
+extern const struct kernel_param_ops param_ops_hexint;
+extern int param_set_hexint(const char *val, const struct kernel_param *kp);
+extern int param_get_hexint(char *buffer, const struct kernel_param *kp);
+#define param_check_hexint(name, p) param_check_uint(name, p)
+
extern const struct kernel_param_ops param_ops_charp;
extern int param_set_charp(const char *val, const struct kernel_param *kp);
extern int param_get_charp(char *buffer, const struct kernel_param *kp);
diff --git a/include/linux/most.h b/include/linux/most.h
new file mode 100644
index 000000000000..232e01b7f5d2
--- /dev/null
+++ b/include/linux/most.h
@@ -0,0 +1,337 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * most.h - API for component and adapter drivers
+ *
+ * Copyright (C) 2013-2015, Microchip Technology Germany II GmbH & Co. KG
+ */
+
+#ifndef __MOST_CORE_H__
+#define __MOST_CORE_H__
+
+#include <linux/types.h>
+#include <linux/device.h>
+
+struct module;
+struct interface_private;
+
+/**
+ * Interface type
+ */
+enum most_interface_type {
+ ITYPE_LOOPBACK = 1,
+ ITYPE_I2C,
+ ITYPE_I2S,
+ ITYPE_TSI,
+ ITYPE_HBI,
+ ITYPE_MEDIALB_DIM,
+ ITYPE_MEDIALB_DIM2,
+ ITYPE_USB,
+ ITYPE_PCIE
+};
+
+/**
+ * Channel direction.
+ */
+enum most_channel_direction {
+ MOST_CH_RX = 1 << 0,
+ MOST_CH_TX = 1 << 1,
+};
+
+/**
+ * Channel data type.
+ */
+enum most_channel_data_type {
+ MOST_CH_CONTROL = 1 << 0,
+ MOST_CH_ASYNC = 1 << 1,
+ MOST_CH_ISOC = 1 << 2,
+ MOST_CH_SYNC = 1 << 5,
+};
+
+enum most_status_flags {
+ /* MBO was processed successfully (data was send or received )*/
+ MBO_SUCCESS = 0,
+ /* The MBO contains wrong or missing information. */
+ MBO_E_INVAL,
+ /* MBO was completed as HDM Channel will be closed */
+ MBO_E_CLOSE,
+};
+
+/**
+ * struct most_channel_capability - Channel capability
+ * @direction: Supported channel directions.
+ * The value is bitwise OR-combination of the values from the
+ * enumeration most_channel_direction. Zero is allowed value and means
+ * "channel may not be used".
+ * @data_type: Supported channel data types.
+ * The value is bitwise OR-combination of the values from the
+ * enumeration most_channel_data_type. Zero is allowed value and means
+ * "channel may not be used".
+ * @num_buffers_packet: Maximum number of buffers supported by this channel
+ * for packet data types (Async,Control,QoS)
+ * @buffer_size_packet: Maximum buffer size supported by this channel
+ * for packet data types (Async,Control,QoS)
+ * @num_buffers_streaming: Maximum number of buffers supported by this channel
+ * for streaming data types (Sync,AV Packetized)
+ * @buffer_size_streaming: Maximum buffer size supported by this channel
+ * for streaming data types (Sync,AV Packetized)
+ * @name_suffix: Optional suffix providean by an HDM that is attached to the
+ * regular channel name.
+ *
+ * Describes the capabilities of a MOST channel like supported Data Types
+ * and directions. This information is provided by an HDM for the MostCore.
+ *
+ * The Core creates read only sysfs attribute files in
+ * /sys/devices/most/mdev#/<channel>/ with the
+ * following attributes:
+ * -available_directions
+ * -available_datatypes
+ * -number_of_packet_buffers
+ * -number_of_stream_buffers
+ * -size_of_packet_buffer
+ * -size_of_stream_buffer
+ * where content of each file is a string with all supported properties of this
+ * very channel attribute.
+ */
+struct most_channel_capability {
+ u16 direction;
+ u16 data_type;
+ u16 num_buffers_packet;
+ u16 buffer_size_packet;
+ u16 num_buffers_streaming;
+ u16 buffer_size_streaming;
+ const char *name_suffix;
+};
+
+/**
+ * struct most_channel_config - stores channel configuration
+ * @direction: direction of the channel
+ * @data_type: data type travelling over this channel
+ * @num_buffers: number of buffers
+ * @buffer_size: size of a buffer for AIM.
+ * Buffer size may be cutted down by HDM in a configure callback
+ * to match to a given interface and channel type.
+ * @extra_len: additional buffer space for internal HDM purposes like padding.
+ * May be set by HDM in a configure callback if needed.
+ * @subbuffer_size: size of a subbuffer
+ * @packets_per_xact: number of MOST frames that are packet inside one USB
+ * packet. This is USB specific
+ *
+ * Describes the configuration for a MOST channel. This information is
+ * provided from the MostCore to a HDM (like the Medusa PCIe Interface) as a
+ * parameter of the "configure" function call.
+ */
+struct most_channel_config {
+ enum most_channel_direction direction;
+ enum most_channel_data_type data_type;
+ u16 num_buffers;
+ u16 buffer_size;
+ u16 extra_len;
+ u16 subbuffer_size;
+ u16 packets_per_xact;
+ u16 dbr_size;
+};
+
+/*
+ * struct mbo - MOST Buffer Object.
+ * @context: context for core completion handler
+ * @priv: private data for HDM
+ *
+ * public: documented fields that are used for the communications
+ * between MostCore and HDMs
+ *
+ * @list: list head for use by the mbo's current owner
+ * @ifp: (in) associated interface instance
+ * @num_buffers_ptr: amount of pool buffers
+ * @hdm_channel_id: (in) HDM channel instance
+ * @virt_address: (in) kernel virtual address of the buffer
+ * @bus_address: (in) bus address of the buffer
+ * @buffer_length: (in) buffer payload length
+ * @processed_length: (out) processed length
+ * @status: (out) transfer status
+ * @complete: (in) completion routine
+ *
+ * The core allocates and initializes the MBO.
+ *
+ * The HDM receives MBO for transfer from the core with the call to enqueue().
+ * The HDM copies the data to- or from the buffer depending on configured
+ * channel direction, set "processed_length" and "status" and completes
+ * the transfer procedure by calling the completion routine.
+ *
+ * Finally, the MBO is being deallocated or recycled for further
+ * transfers of the same or a different HDM.
+ *
+ * Directions of usage:
+ * The core driver should never access any MBO fields (even if marked
+ * as "public") while the MBO is owned by an HDM. The ownership starts with
+ * the call of enqueue() and ends with the call of its complete() routine.
+ *
+ * II.
+ * Every HDM attached to the core driver _must_ ensure that it returns any MBO
+ * it owns (due to a previous call to enqueue() by the core driver) before it
+ * de-registers an interface or gets unloaded from the kernel. If this direction
+ * is violated memory leaks will occur, since the core driver does _not_ track
+ * MBOs it is currently not in control of.
+ *
+ */
+struct mbo {
+ void *context;
+ void *priv;
+ struct list_head list;
+ struct most_interface *ifp;
+ int *num_buffers_ptr;
+ u16 hdm_channel_id;
+ void *virt_address;
+ dma_addr_t bus_address;
+ u16 buffer_length;
+ u16 processed_length;
+ enum most_status_flags status;
+ void (*complete)(struct mbo *mbo);
+};
+
+/**
+ * Interface instance description.
+ *
+ * Describes an interface of a MOST device the core driver is bound to.
+ * This structure is allocated and initialized in the HDM. MostCore may not
+ * modify this structure.
+ *
+ * @dev: the actual device
+ * @mod: module
+ * @interface Interface type. \sa most_interface_type.
+ * @description PRELIMINARY.
+ * Unique description of the device instance from point of view of the
+ * interface in free text form (ASCII).
+ * It may be a hexadecimal presentation of the memory address for the MediaLB
+ * IP or USB device ID with USB properties for USB interface, etc.
+ * @num_channels Number of channels and size of the channel_vector.
+ * @channel_vector Properties of the channels.
+ * Array index represents channel ID by the driver.
+ * @configure Callback to change data type for the channel of the
+ * interface instance. May be zero if the instance of the interface is not
+ * configurable. Parameter channel_config describes direction and data
+ * type for the channel, configured by the higher level. The content of
+ * @enqueue Delivers MBO to the HDM for processing.
+ * After HDM completes Rx- or Tx- operation the processed MBO shall
+ * be returned back to the MostCore using completion routine.
+ * The reason to get the MBO delivered from the MostCore after the channel
+ * is poisoned is the re-opening of the channel by the application.
+ * In this case the HDM shall hold MBOs and service the channel as usual.
+ * The HDM must be able to hold at least one MBO for each channel.
+ * The callback returns a negative value on error, otherwise 0.
+ * @poison_channel Informs HDM about closing the channel. The HDM shall
+ * cancel all transfers and synchronously or asynchronously return
+ * all enqueued for this channel MBOs using the completion routine.
+ * The callback returns a negative value on error, otherwise 0.
+ * @request_netinfo: triggers retrieving of network info from the HDM by
+ * means of "Message exchange over MDP/MEP"
+ * The call of the function request_netinfo with the parameter on_netinfo as
+ * NULL prohibits use of the previously obtained function pointer.
+ * @priv Private field used by mostcore to store context information.
+ */
+struct most_interface {
+ struct device *dev;
+ struct device *driver_dev;
+ struct module *mod;
+ enum most_interface_type interface;
+ const char *description;
+ unsigned int num_channels;
+ struct most_channel_capability *channel_vector;
+ void *(*dma_alloc)(struct mbo *mbo, u32 size);
+ void (*dma_free)(struct mbo *mbo, u32 size);
+ int (*configure)(struct most_interface *iface, int channel_idx,
+ struct most_channel_config *channel_config);
+ int (*enqueue)(struct most_interface *iface, int channel_idx,
+ struct mbo *mbo);
+ int (*poison_channel)(struct most_interface *iface, int channel_idx);
+ void (*request_netinfo)(struct most_interface *iface, int channel_idx,
+ void (*on_netinfo)(struct most_interface *iface,
+ unsigned char link_stat,
+ unsigned char *mac_addr));
+ void *priv;
+ struct interface_private *p;
+};
+
+/**
+ * struct most_component - identifies a loadable component for the mostcore
+ * @list: list_head
+ * @name: component name
+ * @probe_channel: function for core to notify driver about channel connection
+ * @disconnect_channel: callback function to disconnect a certain channel
+ * @rx_completion: completion handler for received packets
+ * @tx_completion: completion handler for transmitted packets
+ */
+struct most_component {
+ struct list_head list;
+ const char *name;
+ struct module *mod;
+ int (*probe_channel)(struct most_interface *iface, int channel_idx,
+ struct most_channel_config *cfg, char *name,
+ char *param);
+ int (*disconnect_channel)(struct most_interface *iface,
+ int channel_idx);
+ int (*rx_completion)(struct mbo *mbo);
+ int (*tx_completion)(struct most_interface *iface, int channel_idx);
+ int (*cfg_complete)(void);
+};
+
+/**
+ * most_register_interface - Registers instance of the interface.
+ * @iface: Pointer to the interface instance description.
+ *
+ * Returns a pointer to the kobject of the generated instance.
+ *
+ * Note: HDM has to ensure that any reference held on the kobj is
+ * released before deregistering the interface.
+ */
+int most_register_interface(struct most_interface *iface);
+
+/**
+ * Deregisters instance of the interface.
+ * @intf_instance Pointer to the interface instance description.
+ */
+void most_deregister_interface(struct most_interface *iface);
+void most_submit_mbo(struct mbo *mbo);
+
+/**
+ * most_stop_enqueue - prevents core from enqueing MBOs
+ * @iface: pointer to interface
+ * @channel_idx: channel index
+ */
+void most_stop_enqueue(struct most_interface *iface, int channel_idx);
+
+/**
+ * most_resume_enqueue - allow core to enqueue MBOs again
+ * @iface: pointer to interface
+ * @channel_idx: channel index
+ *
+ * This clears the enqueue halt flag and enqueues all MBOs currently
+ * in wait fifo.
+ */
+void most_resume_enqueue(struct most_interface *iface, int channel_idx);
+int most_register_component(struct most_component *comp);
+int most_deregister_component(struct most_component *comp);
+struct mbo *most_get_mbo(struct most_interface *iface, int channel_idx,
+ struct most_component *comp);
+void most_put_mbo(struct mbo *mbo);
+int channel_has_mbo(struct most_interface *iface, int channel_idx,
+ struct most_component *comp);
+int most_start_channel(struct most_interface *iface, int channel_idx,
+ struct most_component *comp);
+int most_stop_channel(struct most_interface *iface, int channel_idx,
+ struct most_component *comp);
+int __init configfs_init(void);
+int most_register_configfs_subsys(struct most_component *comp);
+void most_deregister_configfs_subsys(struct most_component *comp);
+int most_add_link(char *mdev, char *mdev_ch, char *comp_name, char *link_name,
+ char *comp_param);
+int most_remove_link(char *mdev, char *mdev_ch, char *comp_name);
+int most_set_cfg_buffer_size(char *mdev, char *mdev_ch, u16 val);
+int most_set_cfg_subbuffer_size(char *mdev, char *mdev_ch, u16 val);
+int most_set_cfg_dbr_size(char *mdev, char *mdev_ch, u16 val);
+int most_set_cfg_num_buffers(char *mdev, char *mdev_ch, u16 val);
+int most_set_cfg_datatype(char *mdev, char *mdev_ch, char *buf);
+int most_set_cfg_direction(char *mdev, char *mdev_ch, char *buf);
+int most_set_cfg_packets_xact(char *mdev, char *mdev_ch, u16 val);
+int most_cfg_complete(char *comp_name);
+void most_interface_register_notify(const char *mdev_name);
+#endif /* MOST_CORE_H_ */
diff --git a/include/linux/mount.h b/include/linux/mount.h
index 8e0352af06b7..1ea326c368f7 100644
--- a/include/linux/mount.h
+++ b/include/linux/mount.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
*
* Definitions for mount interface. This describes the in the kernel build
@@ -10,16 +11,16 @@
#define _LINUX_MOUNT_H
#include <linux/types.h>
-#include <linux/list.h>
-#include <linux/nodemask.h>
-#include <linux/spinlock.h>
-#include <linux/seqlock.h>
-#include <linux/atomic.h>
+#include <asm/barrier.h>
struct super_block;
-struct vfsmount;
struct dentry;
-struct mnt_namespace;
+struct user_namespace;
+struct mnt_idmap;
+struct file_system_type;
+struct fs_context;
+struct file;
+struct path;
#define MNT_NOSUID 0x01
#define MNT_NODEV 0x02
@@ -28,6 +29,7 @@ struct mnt_namespace;
#define MNT_NODIRATIME 0x10
#define MNT_RELATIME 0x20
#define MNT_READONLY 0x40 /* does the user want this to be r/o? */
+#define MNT_NOSYMFOLLOW 0x80
#define MNT_SHRINKABLE 0x100
#define MNT_WRITE_HOLD 0x200
@@ -44,11 +46,12 @@ struct mnt_namespace;
#define MNT_SHARED_MASK (MNT_UNBINDABLE)
#define MNT_USER_SETTABLE_MASK (MNT_NOSUID | MNT_NODEV | MNT_NOEXEC \
| MNT_NOATIME | MNT_NODIRATIME | MNT_RELATIME \
- | MNT_READONLY)
+ | MNT_READONLY | MNT_NOSYMFOLLOW)
#define MNT_ATIME_MASK (MNT_NOATIME | MNT_NODIRATIME | MNT_RELATIME )
#define MNT_INTERNAL_FLAGS (MNT_SHARED | MNT_WRITE_HOLD | MNT_INTERNAL | \
- MNT_DOOMED | MNT_SYNC_UMOUNT | MNT_MARKED)
+ MNT_DOOMED | MNT_SYNC_UMOUNT | MNT_MARKED | \
+ MNT_CURSOR)
#define MNT_INTERNAL 0x4000
@@ -62,31 +65,38 @@ struct mnt_namespace;
#define MNT_SYNC_UMOUNT 0x2000000
#define MNT_MARKED 0x4000000
#define MNT_UMOUNT 0x8000000
+#define MNT_CURSOR 0x10000000
struct vfsmount {
struct dentry *mnt_root; /* root of the mounted tree */
struct super_block *mnt_sb; /* pointer to superblock */
int mnt_flags;
-};
+ struct mnt_idmap *mnt_idmap;
+} __randomize_layout;
-struct file; /* forward dec */
-struct path;
+static inline struct mnt_idmap *mnt_idmap(const struct vfsmount *mnt)
+{
+ /* Pairs with smp_store_release() in do_idmap_mount(). */
+ return smp_load_acquire(&mnt->mnt_idmap);
+}
extern int mnt_want_write(struct vfsmount *mnt);
extern int mnt_want_write_file(struct file *file);
-extern int mnt_clone_write(struct vfsmount *mnt);
extern void mnt_drop_write(struct vfsmount *mnt);
extern void mnt_drop_write_file(struct file *file);
extern void mntput(struct vfsmount *mnt);
extern struct vfsmount *mntget(struct vfsmount *mnt);
+extern void mnt_make_shortterm(struct vfsmount *mnt);
extern struct vfsmount *mnt_clone_internal(const struct path *path);
-extern int __mnt_is_readonly(struct vfsmount *mnt);
+extern bool __mnt_is_readonly(struct vfsmount *mnt);
extern bool mnt_may_suid(struct vfsmount *mnt);
-struct path;
extern struct vfsmount *clone_private_mount(const struct path *path);
+extern int __mnt_want_write(struct vfsmount *);
+extern void __mnt_drop_write(struct vfsmount *);
-struct file_system_type;
+extern struct vfsmount *fc_mount(struct fs_context *fc);
+extern struct vfsmount *vfs_create_mount(struct fs_context *fc);
extern struct vfsmount *vfs_kern_mount(struct file_system_type *type,
int flags, const char *name,
void *data);
@@ -98,9 +108,20 @@ extern void mnt_set_expiry(struct vfsmount *mnt, struct list_head *expiry_list);
extern void mark_mounts_for_expiry(struct list_head *mounts);
extern dev_t name_to_dev_t(const char *name);
-
-extern unsigned int sysctl_mount_max;
-
extern bool path_is_mountpoint(const struct path *path);
+extern bool our_mnt(struct vfsmount *mnt);
+
+extern struct vfsmount *kern_mount(struct file_system_type *);
+extern void kern_unmount(struct vfsmount *mnt);
+extern int may_umount_tree(struct vfsmount *);
+extern int may_umount(struct vfsmount *);
+extern long do_mount(const char *, const char __user *,
+ const char *, unsigned long, void *);
+extern struct vfsmount *collect_mounts(const struct path *);
+extern void drop_collected_mounts(struct vfsmount *);
+extern int iterate_mounts(int (*)(struct vfsmount *, void *), void *,
+ struct vfsmount *);
+extern void kern_unmount_array(struct vfsmount *mnt[], unsigned int num);
+
#endif /* _LINUX_MOUNT_H */
diff --git a/include/linux/moxtet.h b/include/linux/moxtet.h
new file mode 100644
index 000000000000..79184948fab4
--- /dev/null
+++ b/include/linux/moxtet.h
@@ -0,0 +1,109 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Turris Mox module configuration bus driver
+ *
+ * Copyright (C) 2019 Marek Behún <kabel@kernel.org>
+ */
+
+#ifndef __LINUX_MOXTET_H
+#define __LINUX_MOXTET_H
+
+#include <linux/device.h>
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
+#include <linux/mutex.h>
+
+#define TURRIS_MOX_MAX_MODULES 10
+
+enum turris_mox_cpu_module_id {
+ TURRIS_MOX_CPU_ID_EMMC = 0x00,
+ TURRIS_MOX_CPU_ID_SD = 0x10,
+};
+
+enum turris_mox_module_id {
+ TURRIS_MOX_MODULE_FIRST = 0x01,
+
+ TURRIS_MOX_MODULE_SFP = 0x01,
+ TURRIS_MOX_MODULE_PCI = 0x02,
+ TURRIS_MOX_MODULE_TOPAZ = 0x03,
+ TURRIS_MOX_MODULE_PERIDOT = 0x04,
+ TURRIS_MOX_MODULE_USB3 = 0x05,
+ TURRIS_MOX_MODULE_PCI_BRIDGE = 0x06,
+
+ TURRIS_MOX_MODULE_LAST = 0x06,
+};
+
+#define MOXTET_NIRQS 16
+
+extern struct bus_type moxtet_type;
+
+struct moxtet {
+ struct device *dev;
+ struct mutex lock;
+ u8 modules[TURRIS_MOX_MAX_MODULES];
+ int count;
+ u8 tx[TURRIS_MOX_MAX_MODULES];
+ int dev_irq;
+ struct {
+ struct irq_domain *domain;
+ struct irq_chip chip;
+ unsigned long masked, exists;
+ struct moxtet_irqpos {
+ u8 idx;
+ u8 bit;
+ } position[MOXTET_NIRQS];
+ } irq;
+#ifdef CONFIG_DEBUG_FS
+ struct dentry *debugfs_root;
+#endif
+};
+
+struct moxtet_driver {
+ const enum turris_mox_module_id *id_table;
+ struct device_driver driver;
+};
+
+static inline struct moxtet_driver *
+to_moxtet_driver(struct device_driver *drv)
+{
+ if (!drv)
+ return NULL;
+ return container_of(drv, struct moxtet_driver, driver);
+}
+
+extern int __moxtet_register_driver(struct module *owner,
+ struct moxtet_driver *mdrv);
+
+static inline void moxtet_unregister_driver(struct moxtet_driver *mdrv)
+{
+ if (mdrv)
+ driver_unregister(&mdrv->driver);
+}
+
+#define moxtet_register_driver(driver) \
+ __moxtet_register_driver(THIS_MODULE, driver)
+
+#define module_moxtet_driver(__moxtet_driver) \
+ module_driver(__moxtet_driver, moxtet_register_driver, \
+ moxtet_unregister_driver)
+
+struct moxtet_device {
+ struct device dev;
+ struct moxtet *moxtet;
+ enum turris_mox_module_id id;
+ unsigned int idx;
+};
+
+extern int moxtet_device_read(struct device *dev);
+extern int moxtet_device_write(struct device *dev, u8 val);
+extern int moxtet_device_written(struct device *dev);
+
+static inline struct moxtet_device *
+to_moxtet_device(struct device *dev)
+{
+ if (!dev)
+ return NULL;
+ return container_of(dev, struct moxtet_device, dev);
+}
+
+#endif /* __LINUX_MOXTET_H */
diff --git a/include/linux/mpage.h b/include/linux/mpage.h
index 068a0c9946af..1bdc39daac0a 100644
--- a/include/linux/mpage.h
+++ b/include/linux/mpage.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* include/linux/mpage.h
*
@@ -12,13 +13,11 @@
#ifdef CONFIG_BLOCK
struct writeback_control;
+struct readahead_control;
-int mpage_readpages(struct address_space *mapping, struct list_head *pages,
- unsigned nr_pages, get_block_t get_block);
-int mpage_readpage(struct page *page, get_block_t get_block);
+void mpage_readahead(struct readahead_control *, get_block_t get_block);
+int mpage_read_folio(struct folio *folio, get_block_t get_block);
int mpage_writepages(struct address_space *mapping,
struct writeback_control *wbc, get_block_t get_block);
-int mpage_writepage(struct page *page, get_block_t *get_block,
- struct writeback_control *wbc);
#endif
diff --git a/include/linux/mpi.h b/include/linux/mpi.h
index 1cc5ffb769af..eb0d1c1db208 100644
--- a/include/linux/mpi.h
+++ b/include/linux/mpi.h
@@ -1,23 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/* mpi.h - Multi Precision Integers
* Copyright (C) 1994, 1996, 1998, 1999,
* 2000, 2001 Free Software Foundation, Inc.
*
* This file is part of GNUPG.
*
- * GNUPG is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * GNUPG is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA
- *
* Note: This code is heavily based on the GNU MP Library.
* Actually it's the same code with only minor changes in the
* way the data is stored; this is to support the abstraction
@@ -53,100 +40,234 @@ struct gcry_mpi {
typedef struct gcry_mpi *MPI;
#define mpi_get_nlimbs(a) ((a)->nlimbs)
-#define mpi_is_neg(a) ((a)->sign)
+#define mpi_has_sign(a) ((a)->sign)
/*-- mpiutil.c --*/
MPI mpi_alloc(unsigned nlimbs);
-MPI mpi_alloc_secure(unsigned nlimbs);
-MPI mpi_alloc_like(MPI a);
+void mpi_clear(MPI a);
void mpi_free(MPI a);
int mpi_resize(MPI a, unsigned nlimbs);
-int mpi_copy(MPI *copy, const MPI a);
-void mpi_clear(MPI a);
-int mpi_set(MPI w, MPI u);
-int mpi_set_ui(MPI w, ulong u);
+
+static inline MPI mpi_new(unsigned int nbits)
+{
+ return mpi_alloc((nbits + BITS_PER_MPI_LIMB - 1) / BITS_PER_MPI_LIMB);
+}
+
+MPI mpi_copy(MPI a);
+MPI mpi_alloc_like(MPI a);
+void mpi_snatch(MPI w, MPI u);
+MPI mpi_set(MPI w, MPI u);
+MPI mpi_set_ui(MPI w, unsigned long u);
MPI mpi_alloc_set_ui(unsigned long u);
-void mpi_m_check(MPI a);
-void mpi_swap(MPI a, MPI b);
+void mpi_swap_cond(MPI a, MPI b, unsigned long swap);
+
+/* Constants used to return constant MPIs. See mpi_init if you
+ * want to add more constants.
+ */
+#define MPI_NUMBER_OF_CONSTANTS 6
+enum gcry_mpi_constants {
+ MPI_C_ZERO,
+ MPI_C_ONE,
+ MPI_C_TWO,
+ MPI_C_THREE,
+ MPI_C_FOUR,
+ MPI_C_EIGHT
+};
+
+MPI mpi_const(enum gcry_mpi_constants no);
/*-- mpicoder.c --*/
-MPI do_encode_md(const void *sha_buffer, unsigned nbits);
+
+/* Different formats of external big integer representation. */
+enum gcry_mpi_format {
+ GCRYMPI_FMT_NONE = 0,
+ GCRYMPI_FMT_STD = 1, /* Twos complement stored without length. */
+ GCRYMPI_FMT_PGP = 2, /* As used by OpenPGP (unsigned only). */
+ GCRYMPI_FMT_SSH = 3, /* As used by SSH (like STD but with length). */
+ GCRYMPI_FMT_HEX = 4, /* Hex format. */
+ GCRYMPI_FMT_USG = 5, /* Like STD but unsigned. */
+ GCRYMPI_FMT_OPAQUE = 8 /* Opaque format (some functions only). */
+};
+
MPI mpi_read_raw_data(const void *xbuffer, size_t nbytes);
MPI mpi_read_from_buffer(const void *buffer, unsigned *ret_nread);
-MPI mpi_read_raw_from_sgl(struct scatterlist *sgl, unsigned int len);
int mpi_fromstr(MPI val, const char *str);
-u32 mpi_get_keyid(MPI a, u32 *keyid);
+MPI mpi_scanval(const char *string);
+MPI mpi_read_raw_from_sgl(struct scatterlist *sgl, unsigned int len);
void *mpi_get_buffer(MPI a, unsigned *nbytes, int *sign);
int mpi_read_buffer(MPI a, uint8_t *buf, unsigned buf_len, unsigned *nbytes,
int *sign);
-void *mpi_get_secure_buffer(MPI a, unsigned *nbytes, int *sign);
int mpi_write_to_sgl(MPI a, struct scatterlist *sg, unsigned nbytes,
int *sign);
+int mpi_print(enum gcry_mpi_format format, unsigned char *buffer,
+ size_t buflen, size_t *nwritten, MPI a);
-#define log_mpidump g10_log_mpidump
+/*-- mpi-mod.c --*/
+void mpi_mod(MPI rem, MPI dividend, MPI divisor);
-/*-- mpi-add.c --*/
-int mpi_add_ui(MPI w, MPI u, ulong v);
-int mpi_add(MPI w, MPI u, MPI v);
-int mpi_addm(MPI w, MPI u, MPI v, MPI m);
-int mpi_sub_ui(MPI w, MPI u, ulong v);
-int mpi_sub(MPI w, MPI u, MPI v);
-int mpi_subm(MPI w, MPI u, MPI v, MPI m);
-
-/*-- mpi-mul.c --*/
-int mpi_mul_ui(MPI w, MPI u, ulong v);
-int mpi_mul_2exp(MPI w, MPI u, ulong cnt);
-int mpi_mul(MPI w, MPI u, MPI v);
-int mpi_mulm(MPI w, MPI u, MPI v, MPI m);
+/* Context used with Barrett reduction. */
+struct barrett_ctx_s;
+typedef struct barrett_ctx_s *mpi_barrett_t;
-/*-- mpi-div.c --*/
-ulong mpi_fdiv_r_ui(MPI rem, MPI dividend, ulong divisor);
-int mpi_fdiv_r(MPI rem, MPI dividend, MPI divisor);
-int mpi_fdiv_q(MPI quot, MPI dividend, MPI divisor);
-int mpi_fdiv_qr(MPI quot, MPI rem, MPI dividend, MPI divisor);
-int mpi_tdiv_r(MPI rem, MPI num, MPI den);
-int mpi_tdiv_qr(MPI quot, MPI rem, MPI num, MPI den);
-int mpi_tdiv_q_2exp(MPI w, MPI u, unsigned count);
-int mpi_divisible_ui(const MPI dividend, ulong divisor);
-
-/*-- mpi-gcd.c --*/
-int mpi_gcd(MPI g, const MPI a, const MPI b);
+mpi_barrett_t mpi_barrett_init(MPI m, int copy);
+void mpi_barrett_free(mpi_barrett_t ctx);
+void mpi_mod_barrett(MPI r, MPI x, mpi_barrett_t ctx);
+void mpi_mul_barrett(MPI w, MPI u, MPI v, mpi_barrett_t ctx);
/*-- mpi-pow.c --*/
-int mpi_pow(MPI w, MPI u, MPI v);
int mpi_powm(MPI res, MPI base, MPI exp, MPI mod);
-/*-- mpi-mpow.c --*/
-int mpi_mulpowm(MPI res, MPI *basearray, MPI *exparray, MPI mod);
-
/*-- mpi-cmp.c --*/
int mpi_cmp_ui(MPI u, ulong v);
int mpi_cmp(MPI u, MPI v);
+int mpi_cmpabs(MPI u, MPI v);
-/*-- mpi-scan.c --*/
-int mpi_getbyte(MPI a, unsigned idx);
-void mpi_putbyte(MPI a, unsigned idx, int value);
-unsigned mpi_trailing_zeros(MPI a);
+/*-- mpi-sub-ui.c --*/
+int mpi_sub_ui(MPI w, MPI u, unsigned long vval);
/*-- mpi-bit.c --*/
void mpi_normalize(MPI a);
unsigned mpi_get_nbits(MPI a);
-int mpi_test_bit(MPI a, unsigned n);
-int mpi_set_bit(MPI a, unsigned n);
-int mpi_set_highbit(MPI a, unsigned n);
-void mpi_clear_highbit(MPI a, unsigned n);
-void mpi_clear_bit(MPI a, unsigned n);
-int mpi_rshift(MPI x, MPI a, unsigned n);
+int mpi_test_bit(MPI a, unsigned int n);
+void mpi_set_bit(MPI a, unsigned int n);
+void mpi_set_highbit(MPI a, unsigned int n);
+void mpi_clear_highbit(MPI a, unsigned int n);
+void mpi_clear_bit(MPI a, unsigned int n);
+void mpi_rshift_limbs(MPI a, unsigned int count);
+void mpi_rshift(MPI x, MPI a, unsigned int n);
+void mpi_lshift_limbs(MPI a, unsigned int count);
+void mpi_lshift(MPI x, MPI a, unsigned int n);
+
+/*-- mpi-add.c --*/
+void mpi_add_ui(MPI w, MPI u, unsigned long v);
+void mpi_add(MPI w, MPI u, MPI v);
+void mpi_sub(MPI w, MPI u, MPI v);
+void mpi_addm(MPI w, MPI u, MPI v, MPI m);
+void mpi_subm(MPI w, MPI u, MPI v, MPI m);
+
+/*-- mpi-mul.c --*/
+void mpi_mul(MPI w, MPI u, MPI v);
+void mpi_mulm(MPI w, MPI u, MPI v, MPI m);
+
+/*-- mpi-div.c --*/
+void mpi_tdiv_r(MPI rem, MPI num, MPI den);
+void mpi_fdiv_r(MPI rem, MPI dividend, MPI divisor);
+void mpi_fdiv_q(MPI quot, MPI dividend, MPI divisor);
/*-- mpi-inv.c --*/
-int mpi_invm(MPI x, MPI u, MPI v);
+int mpi_invm(MPI x, MPI a, MPI n);
+
+/*-- ec.c --*/
+
+/* Object to represent a point in projective coordinates */
+struct gcry_mpi_point {
+ MPI x;
+ MPI y;
+ MPI z;
+};
+
+typedef struct gcry_mpi_point *MPI_POINT;
+
+/* Models describing an elliptic curve */
+enum gcry_mpi_ec_models {
+ /* The Short Weierstrass equation is
+ * y^2 = x^3 + ax + b
+ */
+ MPI_EC_WEIERSTRASS = 0,
+ /* The Montgomery equation is
+ * by^2 = x^3 + ax^2 + x
+ */
+ MPI_EC_MONTGOMERY,
+ /* The Twisted Edwards equation is
+ * ax^2 + y^2 = 1 + bx^2y^2
+ * Note that we use 'b' instead of the commonly used 'd'.
+ */
+ MPI_EC_EDWARDS
+};
+
+/* Dialects used with elliptic curves */
+enum ecc_dialects {
+ ECC_DIALECT_STANDARD = 0,
+ ECC_DIALECT_ED25519,
+ ECC_DIALECT_SAFECURVE
+};
+
+/* This context is used with all our EC functions. */
+struct mpi_ec_ctx {
+ enum gcry_mpi_ec_models model; /* The model describing this curve. */
+ enum ecc_dialects dialect; /* The ECC dialect used with the curve. */
+ int flags; /* Public key flags (not always used). */
+ unsigned int nbits; /* Number of bits. */
+
+ /* Domain parameters. Note that they may not all be set and if set
+ * the MPIs may be flagged as constant.
+ */
+ MPI p; /* Prime specifying the field GF(p). */
+ MPI a; /* First coefficient of the Weierstrass equation. */
+ MPI b; /* Second coefficient of the Weierstrass equation. */
+ MPI_POINT G; /* Base point (generator). */
+ MPI n; /* Order of G. */
+ unsigned int h; /* Cofactor. */
+
+ /* The actual key. May not be set. */
+ MPI_POINT Q; /* Public key. */
+ MPI d; /* Private key. */
+
+ const char *name; /* Name of the curve. */
+
+ /* This structure is private to mpi/ec.c! */
+ struct {
+ struct {
+ unsigned int a_is_pminus3:1;
+ unsigned int two_inv_p:1;
+ } valid; /* Flags to help setting the helper vars below. */
+
+ int a_is_pminus3; /* True if A = P - 3. */
+
+ MPI two_inv_p;
+
+ mpi_barrett_t p_barrett;
+
+ /* Scratch variables. */
+ MPI scratch[11];
+
+ /* Helper for fast reduction. */
+ /* int nist_nbits; /\* If this is a NIST curve, the # of bits. *\/ */
+ /* MPI s[10]; */
+ /* MPI c; */
+ } t;
+
+ /* Curve specific computation routines for the field. */
+ void (*addm)(MPI w, MPI u, MPI v, struct mpi_ec_ctx *ctx);
+ void (*subm)(MPI w, MPI u, MPI v, struct mpi_ec_ctx *ec);
+ void (*mulm)(MPI w, MPI u, MPI v, struct mpi_ec_ctx *ctx);
+ void (*pow2)(MPI w, const MPI b, struct mpi_ec_ctx *ctx);
+ void (*mul2)(MPI w, MPI u, struct mpi_ec_ctx *ctx);
+};
+
+void mpi_ec_init(struct mpi_ec_ctx *ctx, enum gcry_mpi_ec_models model,
+ enum ecc_dialects dialect,
+ int flags, MPI p, MPI a, MPI b);
+void mpi_ec_deinit(struct mpi_ec_ctx *ctx);
+MPI_POINT mpi_point_new(unsigned int nbits);
+void mpi_point_release(MPI_POINT p);
+void mpi_point_init(MPI_POINT p);
+void mpi_point_free_parts(MPI_POINT p);
+int mpi_ec_get_affine(MPI x, MPI y, MPI_POINT point, struct mpi_ec_ctx *ctx);
+void mpi_ec_add_points(MPI_POINT result,
+ MPI_POINT p1, MPI_POINT p2,
+ struct mpi_ec_ctx *ctx);
+void mpi_ec_mul_point(MPI_POINT result,
+ MPI scalar, MPI_POINT point,
+ struct mpi_ec_ctx *ctx);
+int mpi_ec_curve_point(MPI_POINT point, struct mpi_ec_ctx *ctx);
/* inline functions */
/**
* mpi_get_size() - returns max size required to store the number
*
- * @a: A multi precision integer for which we want to allocate a bufer
+ * @a: A multi precision integer for which we want to allocate a buffer
*
* Return: size required to store the number
*/
diff --git a/include/linux/mpls.h b/include/linux/mpls.h
index 384fb22b6c43..ae1a188c011b 100644
--- a/include/linux/mpls.h
+++ b/include/linux/mpls.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_MPLS_H
#define _LINUX_MPLS_H
diff --git a/include/linux/mpls_iptunnel.h b/include/linux/mpls_iptunnel.h
index ef29eb2d6dfd..140c56954fbe 100644
--- a/include/linux/mpls_iptunnel.h
+++ b/include/linux/mpls_iptunnel.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_MPLS_IPTUNNEL_H
#define _LINUX_MPLS_IPTUNNEL_H
diff --git a/include/linux/mroute.h b/include/linux/mroute.h
index d7f63339ef0b..80b8400ab8b2 100644
--- a/include/linux/mroute.h
+++ b/include/linux/mroute.h
@@ -1,11 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __LINUX_MROUTE_H
#define __LINUX_MROUTE_H
#include <linux/in.h>
#include <linux/pim.h>
-#include <linux/rhashtable.h>
-#include <net/sock.h>
+#include <net/fib_rules.h>
+#include <net/fib_notifier.h>
#include <uapi/linux/mroute.h>
+#include <linux/mroute_base.h>
+#include <linux/sockptr.h>
#ifdef CONFIG_IP_MROUTE
static inline int ip_mroute_opt(int opt)
@@ -13,20 +16,21 @@ static inline int ip_mroute_opt(int opt)
return opt >= MRT_BASE && opt <= MRT_MAX;
}
-int ip_mroute_setsockopt(struct sock *, int, char __user *, unsigned int);
-int ip_mroute_getsockopt(struct sock *, int, char __user *, int __user *);
+int ip_mroute_setsockopt(struct sock *, int, sockptr_t, unsigned int);
+int ip_mroute_getsockopt(struct sock *, int, sockptr_t, sockptr_t);
int ipmr_ioctl(struct sock *sk, int cmd, void __user *arg);
int ipmr_compat_ioctl(struct sock *sk, unsigned int cmd, void __user *arg);
int ip_mr_init(void);
+bool ipmr_rule_default(const struct fib_rule *rule);
#else
static inline int ip_mroute_setsockopt(struct sock *sock, int optname,
- char __user *optval, unsigned int optlen)
+ sockptr_t optval, unsigned int optlen)
{
return -ENOPROTOOPT;
}
-static inline int ip_mroute_getsockopt(struct sock *sock, int optname,
- char __user *optval, int __user *optlen)
+static inline int ip_mroute_getsockopt(struct sock *sk, int optname,
+ sockptr_t optval, sockptr_t optlen)
{
return -ENOPROTOOPT;
}
@@ -45,47 +49,15 @@ static inline int ip_mroute_opt(int opt)
{
return 0;
}
-#endif
-struct vif_device {
- struct net_device *dev; /* Device we are using */
- unsigned long bytes_in,bytes_out;
- unsigned long pkt_in,pkt_out; /* Statistics */
- unsigned long rate_limit; /* Traffic shaping (NI) */
- unsigned char threshold; /* TTL threshold */
- unsigned short flags; /* Control flags */
- __be32 local,remote; /* Addresses(remote for tunnels)*/
- int link; /* Physical interface index */
-};
+static inline bool ipmr_rule_default(const struct fib_rule *rule)
+{
+ return true;
+}
+#endif
#define VIFF_STATIC 0x8000
-#define VIF_EXISTS(_mrt, _idx) ((_mrt)->vif_table[_idx].dev != NULL)
-
-struct mr_table {
- struct list_head list;
- possible_net_t net;
- u32 id;
- struct sock __rcu *mroute_sk;
- struct timer_list ipmr_expire_timer;
- struct list_head mfc_unres_queue;
- struct vif_device vif_table[MAXVIFS];
- struct rhltable mfc_hash;
- struct list_head mfc_cache_list;
- int maxvif;
- atomic_t cache_resolve_queue_len;
- bool mroute_do_assert;
- bool mroute_do_pim;
- int mroute_reg_vif_num;
-};
-
-/* mfc_flags:
- * MFC_STATIC - the entry was added statically (not by a routing daemon)
- */
-enum {
- MFC_STATIC = BIT(0),
-};
-
struct mfc_cache_cmp_arg {
__be32 mfc_mcastgrp;
__be32 mfc_origin;
@@ -93,27 +65,13 @@ struct mfc_cache_cmp_arg {
/**
* struct mfc_cache - multicast routing entries
- * @mnode: rhashtable list
+ * @_c: Common multicast routing information; has to be first [for casting]
* @mfc_mcastgrp: destination multicast group address
* @mfc_origin: source address
* @cmparg: used for rhashtable comparisons
- * @mfc_parent: source interface (iif)
- * @mfc_flags: entry flags
- * @expires: unresolved entry expire time
- * @unresolved: unresolved cached skbs
- * @last_assert: time of last assert
- * @minvif: minimum VIF id
- * @maxvif: maximum VIF id
- * @bytes: bytes that have passed for this entry
- * @pkt: packets that have passed for this entry
- * @wrong_if: number of wrong source interface hits
- * @lastuse: time of last use of the group (traffic or update)
- * @ttls: OIF TTL threshold array
- * @list: global entry list
- * @rcu: used for entry destruction
*/
struct mfc_cache {
- struct rhlist_head mnode;
+ struct mr_mfc _c;
union {
struct {
__be32 mfc_mcastgrp;
@@ -121,27 +79,6 @@ struct mfc_cache {
};
struct mfc_cache_cmp_arg cmparg;
};
- vifi_t mfc_parent;
- int mfc_flags;
-
- union {
- struct {
- unsigned long expires;
- struct sk_buff_head unresolved;
- } unres;
- struct {
- unsigned long last_assert;
- int minvif;
- int maxvif;
- unsigned long bytes;
- unsigned long pkt;
- unsigned long wrong_if;
- unsigned long lastuse;
- unsigned char ttls[MAXVIFS];
- } res;
- } mfc_un;
- struct list_head list;
- struct rcu_head rcu;
};
struct rtmsg;
diff --git a/include/linux/mroute6.h b/include/linux/mroute6.h
index ce44e3e96d27..8f2b307fb124 100644
--- a/include/linux/mroute6.h
+++ b/include/linux/mroute6.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __LINUX_MROUTE6_H
#define __LINUX_MROUTE6_H
@@ -6,6 +7,9 @@
#include <linux/skbuff.h> /* for struct sk_buff_head */
#include <net/net_namespace.h>
#include <uapi/linux/mroute6.h>
+#include <linux/mroute_base.h>
+#include <linux/sockptr.h>
+#include <net/fib_rules.h>
#ifdef CONFIG_IPV6_MROUTE
static inline int ip6_mroute_opt(int opt)
@@ -22,24 +26,23 @@ static inline int ip6_mroute_opt(int opt)
struct sock;
#ifdef CONFIG_IPV6_MROUTE
-extern int ip6_mroute_setsockopt(struct sock *, int, char __user *, unsigned int);
-extern int ip6_mroute_getsockopt(struct sock *, int, char __user *, int __user *);
+extern int ip6_mroute_setsockopt(struct sock *, int, sockptr_t, unsigned int);
+extern int ip6_mroute_getsockopt(struct sock *, int, sockptr_t, sockptr_t);
extern int ip6_mr_input(struct sk_buff *skb);
extern int ip6mr_ioctl(struct sock *sk, int cmd, void __user *arg);
extern int ip6mr_compat_ioctl(struct sock *sk, unsigned int cmd, void __user *arg);
extern int ip6_mr_init(void);
extern void ip6_mr_cleanup(void);
#else
-static inline
-int ip6_mroute_setsockopt(struct sock *sock,
- int optname, char __user *optval, unsigned int optlen)
+static inline int ip6_mroute_setsockopt(struct sock *sock, int optname,
+ sockptr_t optval, unsigned int optlen)
{
return -ENOPROTOOPT;
}
static inline
int ip6_mroute_getsockopt(struct sock *sock,
- int optname, char __user *optval, int __user *optlen)
+ int optname, sockptr_t optval, sockptr_t optlen)
{
return -ENOPROTOOPT;
}
@@ -61,57 +64,33 @@ static inline void ip6_mr_cleanup(void)
}
#endif
-struct mif_device {
- struct net_device *dev; /* Device we are using */
- unsigned long bytes_in,bytes_out;
- unsigned long pkt_in,pkt_out; /* Statistics */
- unsigned long rate_limit; /* Traffic shaping (NI) */
- unsigned char threshold; /* TTL threshold */
- unsigned short flags; /* Control flags */
- int link; /* Physical interface index */
-};
+#ifdef CONFIG_IPV6_MROUTE_MULTIPLE_TABLES
+bool ip6mr_rule_default(const struct fib_rule *rule);
+#else
+static inline bool ip6mr_rule_default(const struct fib_rule *rule)
+{
+ return true;
+}
+#endif
#define VIFF_STATIC 0x8000
-struct mfc6_cache {
- struct list_head list;
- struct in6_addr mf6c_mcastgrp; /* Group the entry belongs to */
- struct in6_addr mf6c_origin; /* Source of packet */
- mifi_t mf6c_parent; /* Source interface */
- int mfc_flags; /* Flags on line */
+struct mfc6_cache_cmp_arg {
+ struct in6_addr mf6c_mcastgrp;
+ struct in6_addr mf6c_origin;
+};
+struct mfc6_cache {
+ struct mr_mfc _c;
union {
struct {
- unsigned long expires;
- struct sk_buff_head unresolved; /* Unresolved buffers */
- } unres;
- struct {
- unsigned long last_assert;
- int minvif;
- int maxvif;
- unsigned long bytes;
- unsigned long pkt;
- unsigned long wrong_if;
- unsigned long lastuse;
- unsigned char ttls[MAXMIFS]; /* TTL thresholds */
- } res;
- } mfc_un;
+ struct in6_addr mf6c_mcastgrp;
+ struct in6_addr mf6c_origin;
+ };
+ struct mfc6_cache_cmp_arg cmparg;
+ };
};
-#define MFC_STATIC 1
-#define MFC_NOTIFY 2
-
-#define MFC6_LINES 64
-
-#define MFC6_HASH(a, g) (((__force u32)(a)->s6_addr32[0] ^ \
- (__force u32)(a)->s6_addr32[1] ^ \
- (__force u32)(a)->s6_addr32[2] ^ \
- (__force u32)(a)->s6_addr32[3] ^ \
- (__force u32)(g)->s6_addr32[0] ^ \
- (__force u32)(g)->s6_addr32[1] ^ \
- (__force u32)(g)->s6_addr32[2] ^ \
- (__force u32)(g)->s6_addr32[3]) % MFC6_LINES)
-
#define MFC_ASSERT_THRESH (3*HZ) /* Maximal freq. of asserts */
struct rtmsg;
@@ -119,12 +98,12 @@ extern int ip6mr_get_route(struct net *net, struct sk_buff *skb,
struct rtmsg *rtm, u32 portid);
#ifdef CONFIG_IPV6_MROUTE
-extern struct sock *mroute6_socket(struct net *net, struct sk_buff *skb);
+bool mroute6_is_socket(struct net *net, struct sk_buff *skb);
extern int ip6mr_sk_done(struct sock *sk);
#else
-static inline struct sock *mroute6_socket(struct net *net, struct sk_buff *skb)
+static inline bool mroute6_is_socket(struct net *net, struct sk_buff *skb)
{
- return NULL;
+ return false;
}
static inline int ip6mr_sk_done(struct sock *sk)
{
diff --git a/include/linux/mroute_base.h b/include/linux/mroute_base.h
new file mode 100644
index 000000000000..9dd4bf157255
--- /dev/null
+++ b/include/linux/mroute_base.h
@@ -0,0 +1,477 @@
+#ifndef __LINUX_MROUTE_BASE_H
+#define __LINUX_MROUTE_BASE_H
+
+#include <linux/netdevice.h>
+#include <linux/rhashtable-types.h>
+#include <linux/spinlock.h>
+#include <net/net_namespace.h>
+#include <net/sock.h>
+#include <net/fib_notifier.h>
+#include <net/ip_fib.h>
+
+/**
+ * struct vif_device - interface representor for multicast routing
+ * @dev: network device being used
+ * @dev_tracker: refcount tracker for @dev reference
+ * @bytes_in: statistic; bytes ingressing
+ * @bytes_out: statistic; bytes egresing
+ * @pkt_in: statistic; packets ingressing
+ * @pkt_out: statistic; packets egressing
+ * @rate_limit: Traffic shaping (NI)
+ * @threshold: TTL threshold
+ * @flags: Control flags
+ * @link: Physical interface index
+ * @dev_parent_id: device parent id
+ * @local: Local address
+ * @remote: Remote address for tunnels
+ */
+struct vif_device {
+ struct net_device __rcu *dev;
+ netdevice_tracker dev_tracker;
+ unsigned long bytes_in, bytes_out;
+ unsigned long pkt_in, pkt_out;
+ unsigned long rate_limit;
+ unsigned char threshold;
+ unsigned short flags;
+ int link;
+
+ /* Currently only used by ipmr */
+ struct netdev_phys_item_id dev_parent_id;
+ __be32 local, remote;
+};
+
+struct vif_entry_notifier_info {
+ struct fib_notifier_info info;
+ struct net_device *dev;
+ unsigned short vif_index;
+ unsigned short vif_flags;
+ u32 tb_id;
+};
+
+static inline int mr_call_vif_notifier(struct notifier_block *nb,
+ unsigned short family,
+ enum fib_event_type event_type,
+ struct vif_device *vif,
+ struct net_device *vif_dev,
+ unsigned short vif_index, u32 tb_id,
+ struct netlink_ext_ack *extack)
+{
+ struct vif_entry_notifier_info info = {
+ .info = {
+ .family = family,
+ .extack = extack,
+ },
+ .dev = vif_dev,
+ .vif_index = vif_index,
+ .vif_flags = vif->flags,
+ .tb_id = tb_id,
+ };
+
+ return call_fib_notifier(nb, event_type, &info.info);
+}
+
+static inline int mr_call_vif_notifiers(struct net *net,
+ unsigned short family,
+ enum fib_event_type event_type,
+ struct vif_device *vif,
+ struct net_device *vif_dev,
+ unsigned short vif_index, u32 tb_id,
+ unsigned int *ipmr_seq)
+{
+ struct vif_entry_notifier_info info = {
+ .info = {
+ .family = family,
+ },
+ .dev = vif_dev,
+ .vif_index = vif_index,
+ .vif_flags = vif->flags,
+ .tb_id = tb_id,
+ };
+
+ ASSERT_RTNL();
+ (*ipmr_seq)++;
+ return call_fib_notifiers(net, event_type, &info.info);
+}
+
+#ifndef MAXVIFS
+/* This one is nasty; value is defined in uapi using different symbols for
+ * mroute and morute6 but both map into same 32.
+ */
+#define MAXVIFS 32
+#endif
+
+/* Note: This helper is deprecated. */
+#define VIF_EXISTS(_mrt, _idx) (!!rcu_access_pointer((_mrt)->vif_table[_idx].dev))
+
+/* mfc_flags:
+ * MFC_STATIC - the entry was added statically (not by a routing daemon)
+ * MFC_OFFLOAD - the entry was offloaded to the hardware
+ */
+enum {
+ MFC_STATIC = BIT(0),
+ MFC_OFFLOAD = BIT(1),
+};
+
+/**
+ * struct mr_mfc - common multicast routing entries
+ * @mnode: rhashtable list
+ * @mfc_parent: source interface (iif)
+ * @mfc_flags: entry flags
+ * @expires: unresolved entry expire time
+ * @unresolved: unresolved cached skbs
+ * @last_assert: time of last assert
+ * @minvif: minimum VIF id
+ * @maxvif: maximum VIF id
+ * @bytes: bytes that have passed for this entry
+ * @pkt: packets that have passed for this entry
+ * @wrong_if: number of wrong source interface hits
+ * @lastuse: time of last use of the group (traffic or update)
+ * @ttls: OIF TTL threshold array
+ * @refcount: reference count for this entry
+ * @list: global entry list
+ * @rcu: used for entry destruction
+ * @free: Operation used for freeing an entry under RCU
+ */
+struct mr_mfc {
+ struct rhlist_head mnode;
+ unsigned short mfc_parent;
+ int mfc_flags;
+
+ union {
+ struct {
+ unsigned long expires;
+ struct sk_buff_head unresolved;
+ } unres;
+ struct {
+ unsigned long last_assert;
+ int minvif;
+ int maxvif;
+ unsigned long bytes;
+ unsigned long pkt;
+ unsigned long wrong_if;
+ unsigned long lastuse;
+ unsigned char ttls[MAXVIFS];
+ refcount_t refcount;
+ } res;
+ } mfc_un;
+ struct list_head list;
+ struct rcu_head rcu;
+ void (*free)(struct rcu_head *head);
+};
+
+static inline void mr_cache_put(struct mr_mfc *c)
+{
+ if (refcount_dec_and_test(&c->mfc_un.res.refcount))
+ call_rcu(&c->rcu, c->free);
+}
+
+static inline void mr_cache_hold(struct mr_mfc *c)
+{
+ refcount_inc(&c->mfc_un.res.refcount);
+}
+
+struct mfc_entry_notifier_info {
+ struct fib_notifier_info info;
+ struct mr_mfc *mfc;
+ u32 tb_id;
+};
+
+static inline int mr_call_mfc_notifier(struct notifier_block *nb,
+ unsigned short family,
+ enum fib_event_type event_type,
+ struct mr_mfc *mfc, u32 tb_id,
+ struct netlink_ext_ack *extack)
+{
+ struct mfc_entry_notifier_info info = {
+ .info = {
+ .family = family,
+ .extack = extack,
+ },
+ .mfc = mfc,
+ .tb_id = tb_id
+ };
+
+ return call_fib_notifier(nb, event_type, &info.info);
+}
+
+static inline int mr_call_mfc_notifiers(struct net *net,
+ unsigned short family,
+ enum fib_event_type event_type,
+ struct mr_mfc *mfc, u32 tb_id,
+ unsigned int *ipmr_seq)
+{
+ struct mfc_entry_notifier_info info = {
+ .info = {
+ .family = family,
+ },
+ .mfc = mfc,
+ .tb_id = tb_id
+ };
+
+ ASSERT_RTNL();
+ (*ipmr_seq)++;
+ return call_fib_notifiers(net, event_type, &info.info);
+}
+
+struct mr_table;
+
+/**
+ * struct mr_table_ops - callbacks and info for protocol-specific ops
+ * @rht_params: parameters for accessing the MFC hash
+ * @cmparg_any: a hash key to be used for matching on (*,*) routes
+ */
+struct mr_table_ops {
+ const struct rhashtable_params *rht_params;
+ void *cmparg_any;
+};
+
+/**
+ * struct mr_table - a multicast routing table
+ * @list: entry within a list of multicast routing tables
+ * @net: net where this table belongs
+ * @ops: protocol specific operations
+ * @id: identifier of the table
+ * @mroute_sk: socket associated with the table
+ * @ipmr_expire_timer: timer for handling unresolved routes
+ * @mfc_unres_queue: list of unresolved MFC entries
+ * @vif_table: array containing all possible vifs
+ * @mfc_hash: Hash table of all resolved routes for easy lookup
+ * @mfc_cache_list: list of resovled routes for possible traversal
+ * @maxvif: Identifier of highest value vif currently in use
+ * @cache_resolve_queue_len: current size of unresolved queue
+ * @mroute_do_assert: Whether to inform userspace on wrong ingress
+ * @mroute_do_pim: Whether to receive IGMP PIMv1
+ * @mroute_reg_vif_num: PIM-device vif index
+ */
+struct mr_table {
+ struct list_head list;
+ possible_net_t net;
+ struct mr_table_ops ops;
+ u32 id;
+ struct sock __rcu *mroute_sk;
+ struct timer_list ipmr_expire_timer;
+ struct list_head mfc_unres_queue;
+ struct vif_device vif_table[MAXVIFS];
+ struct rhltable mfc_hash;
+ struct list_head mfc_cache_list;
+ int maxvif;
+ atomic_t cache_resolve_queue_len;
+ bool mroute_do_assert;
+ bool mroute_do_pim;
+ bool mroute_do_wrvifwhole;
+ int mroute_reg_vif_num;
+};
+
+#ifdef CONFIG_IP_MROUTE_COMMON
+void vif_device_init(struct vif_device *v,
+ struct net_device *dev,
+ unsigned long rate_limit,
+ unsigned char threshold,
+ unsigned short flags,
+ unsigned short get_iflink_mask);
+
+struct mr_table *
+mr_table_alloc(struct net *net, u32 id,
+ struct mr_table_ops *ops,
+ void (*expire_func)(struct timer_list *t),
+ void (*table_set)(struct mr_table *mrt,
+ struct net *net));
+
+/* These actually return 'struct mr_mfc *', but to avoid need for explicit
+ * castings they simply return void.
+ */
+void *mr_mfc_find_parent(struct mr_table *mrt,
+ void *hasharg, int parent);
+void *mr_mfc_find_any_parent(struct mr_table *mrt, int vifi);
+void *mr_mfc_find_any(struct mr_table *mrt, int vifi, void *hasharg);
+
+int mr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
+ struct mr_mfc *c, struct rtmsg *rtm);
+int mr_table_dump(struct mr_table *mrt, struct sk_buff *skb,
+ struct netlink_callback *cb,
+ int (*fill)(struct mr_table *mrt, struct sk_buff *skb,
+ u32 portid, u32 seq, struct mr_mfc *c,
+ int cmd, int flags),
+ spinlock_t *lock, struct fib_dump_filter *filter);
+int mr_rtm_dumproute(struct sk_buff *skb, struct netlink_callback *cb,
+ struct mr_table *(*iter)(struct net *net,
+ struct mr_table *mrt),
+ int (*fill)(struct mr_table *mrt,
+ struct sk_buff *skb,
+ u32 portid, u32 seq, struct mr_mfc *c,
+ int cmd, int flags),
+ spinlock_t *lock, struct fib_dump_filter *filter);
+
+int mr_dump(struct net *net, struct notifier_block *nb, unsigned short family,
+ int (*rules_dump)(struct net *net,
+ struct notifier_block *nb,
+ struct netlink_ext_ack *extack),
+ struct mr_table *(*mr_iter)(struct net *net,
+ struct mr_table *mrt),
+ struct netlink_ext_ack *extack);
+#else
+static inline void vif_device_init(struct vif_device *v,
+ struct net_device *dev,
+ unsigned long rate_limit,
+ unsigned char threshold,
+ unsigned short flags,
+ unsigned short get_iflink_mask)
+{
+}
+
+static inline void *mr_mfc_find_parent(struct mr_table *mrt,
+ void *hasharg, int parent)
+{
+ return NULL;
+}
+
+static inline void *mr_mfc_find_any_parent(struct mr_table *mrt,
+ int vifi)
+{
+ return NULL;
+}
+
+static inline struct mr_mfc *mr_mfc_find_any(struct mr_table *mrt,
+ int vifi, void *hasharg)
+{
+ return NULL;
+}
+
+static inline int mr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
+ struct mr_mfc *c, struct rtmsg *rtm)
+{
+ return -EINVAL;
+}
+
+static inline int
+mr_rtm_dumproute(struct sk_buff *skb, struct netlink_callback *cb,
+ struct mr_table *(*iter)(struct net *net,
+ struct mr_table *mrt),
+ int (*fill)(struct mr_table *mrt,
+ struct sk_buff *skb,
+ u32 portid, u32 seq, struct mr_mfc *c,
+ int cmd, int flags),
+ spinlock_t *lock, struct fib_dump_filter *filter)
+{
+ return -EINVAL;
+}
+
+static inline int mr_dump(struct net *net, struct notifier_block *nb,
+ unsigned short family,
+ int (*rules_dump)(struct net *net,
+ struct notifier_block *nb,
+ struct netlink_ext_ack *extack),
+ struct mr_table *(*mr_iter)(struct net *net,
+ struct mr_table *mrt),
+ struct netlink_ext_ack *extack)
+{
+ return -EINVAL;
+}
+#endif
+
+static inline void *mr_mfc_find(struct mr_table *mrt, void *hasharg)
+{
+ return mr_mfc_find_parent(mrt, hasharg, -1);
+}
+
+#ifdef CONFIG_PROC_FS
+struct mr_vif_iter {
+ struct seq_net_private p;
+ struct mr_table *mrt;
+ int ct;
+};
+
+struct mr_mfc_iter {
+ struct seq_net_private p;
+ struct mr_table *mrt;
+ struct list_head *cache;
+
+ /* Lock protecting the mr_table's unresolved queue */
+ spinlock_t *lock;
+};
+
+#ifdef CONFIG_IP_MROUTE_COMMON
+void *mr_vif_seq_idx(struct net *net, struct mr_vif_iter *iter, loff_t pos);
+void *mr_vif_seq_next(struct seq_file *seq, void *v, loff_t *pos);
+
+static inline void *mr_vif_seq_start(struct seq_file *seq, loff_t *pos)
+{
+ return *pos ? mr_vif_seq_idx(seq_file_net(seq),
+ seq->private, *pos - 1)
+ : SEQ_START_TOKEN;
+}
+
+/* These actually return 'struct mr_mfc *', but to avoid need for explicit
+ * castings they simply return void.
+ */
+void *mr_mfc_seq_idx(struct net *net,
+ struct mr_mfc_iter *it, loff_t pos);
+void *mr_mfc_seq_next(struct seq_file *seq, void *v,
+ loff_t *pos);
+
+static inline void *mr_mfc_seq_start(struct seq_file *seq, loff_t *pos,
+ struct mr_table *mrt, spinlock_t *lock)
+{
+ struct mr_mfc_iter *it = seq->private;
+
+ it->mrt = mrt;
+ it->cache = NULL;
+ it->lock = lock;
+
+ return *pos ? mr_mfc_seq_idx(seq_file_net(seq),
+ seq->private, *pos - 1)
+ : SEQ_START_TOKEN;
+}
+
+static inline void mr_mfc_seq_stop(struct seq_file *seq, void *v)
+{
+ struct mr_mfc_iter *it = seq->private;
+ struct mr_table *mrt = it->mrt;
+
+ if (it->cache == &mrt->mfc_unres_queue)
+ spin_unlock_bh(it->lock);
+ else if (it->cache == &mrt->mfc_cache_list)
+ rcu_read_unlock();
+}
+#else
+static inline void *mr_vif_seq_idx(struct net *net, struct mr_vif_iter *iter,
+ loff_t pos)
+{
+ return NULL;
+}
+
+static inline void *mr_vif_seq_next(struct seq_file *seq,
+ void *v, loff_t *pos)
+{
+ return NULL;
+}
+
+static inline void *mr_vif_seq_start(struct seq_file *seq, loff_t *pos)
+{
+ return NULL;
+}
+
+static inline void *mr_mfc_seq_idx(struct net *net,
+ struct mr_mfc_iter *it, loff_t pos)
+{
+ return NULL;
+}
+
+static inline void *mr_mfc_seq_next(struct seq_file *seq, void *v,
+ loff_t *pos)
+{
+ return NULL;
+}
+
+static inline void *mr_mfc_seq_start(struct seq_file *seq, loff_t *pos,
+ struct mr_table *mrt, spinlock_t *lock)
+{
+ return NULL;
+}
+
+static inline void mr_mfc_seq_stop(struct seq_file *seq, void *v)
+{
+}
+#endif
+#endif
+#endif
diff --git a/include/linux/msdos_fs.h b/include/linux/msdos_fs.h
index e1b163f912fb..b7a5d4c72f56 100644
--- a/include/linux/msdos_fs.h
+++ b/include/linux/msdos_fs.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_MSDOS_FS_H
#define _LINUX_MSDOS_FS_H
diff --git a/include/linux/msdos_partition.h b/include/linux/msdos_partition.h
new file mode 100644
index 000000000000..2cb82db2a43c
--- /dev/null
+++ b/include/linux/msdos_partition.h
@@ -0,0 +1,50 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_MSDOS_PARTITION_H
+#define _LINUX_MSDOS_PARTITION_H
+
+#define MSDOS_LABEL_MAGIC 0xAA55
+
+struct msdos_partition {
+ u8 boot_ind; /* 0x80 - active */
+ u8 head; /* starting head */
+ u8 sector; /* starting sector */
+ u8 cyl; /* starting cylinder */
+ u8 sys_ind; /* What partition type */
+ u8 end_head; /* end head */
+ u8 end_sector; /* end sector */
+ u8 end_cyl; /* end cylinder */
+ __le32 start_sect; /* starting sector counting from 0 */
+ __le32 nr_sects; /* nr of sectors in partition */
+} __packed;
+
+enum msdos_sys_ind {
+ /*
+ * These three have identical behaviour; use the second one if DOS FDISK
+ * gets confused about extended/logical partitions starting past
+ * cylinder 1023.
+ */
+ DOS_EXTENDED_PARTITION = 5,
+ LINUX_EXTENDED_PARTITION = 0x85,
+ WIN98_EXTENDED_PARTITION = 0x0f,
+
+ LINUX_DATA_PARTITION = 0x83,
+ LINUX_LVM_PARTITION = 0x8e,
+ LINUX_RAID_PARTITION = 0xfd, /* autodetect RAID partition */
+
+ SOLARIS_X86_PARTITION = 0x82, /* also Linux swap partitions */
+ NEW_SOLARIS_X86_PARTITION = 0xbf,
+
+ DM6_AUX1PARTITION = 0x51, /* no DDO: use xlated geom */
+ DM6_AUX3PARTITION = 0x53, /* no DDO: use xlated geom */
+ DM6_PARTITION = 0x54, /* has DDO: use xlated geom & offset */
+ EZD_PARTITION = 0x55, /* EZ-DRIVE */
+
+ FREEBSD_PARTITION = 0xa5, /* FreeBSD Partition ID */
+ OPENBSD_PARTITION = 0xa6, /* OpenBSD Partition ID */
+ NETBSD_PARTITION = 0xa9, /* NetBSD Partition ID */
+ BSDI_PARTITION = 0xb7, /* BSDI Partition ID */
+ MINIX_PARTITION = 0x81, /* Minix Partition ID */
+ UNIXWARE_PARTITION = 0x63, /* Same as GNU_HURD and SCO Unix */
+};
+
+#endif /* LINUX_MSDOS_PARTITION_H */
diff --git a/include/linux/msg.h b/include/linux/msg.h
index f3f302f9c197..9a972a296b95 100644
--- a/include/linux/msg.h
+++ b/include/linux/msg.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_MSG_H
#define _LINUX_MSG_H
@@ -14,29 +15,4 @@ struct msg_msg {
/* the actual message follows immediately */
};
-/* one msq_queue structure for each present queue on the system */
-struct msg_queue {
- struct kern_ipc_perm q_perm;
- time_t q_stime; /* last msgsnd time */
- time_t q_rtime; /* last msgrcv time */
- time_t q_ctime; /* last change time */
- unsigned long q_cbytes; /* current number of bytes on queue */
- unsigned long q_qnum; /* number of messages in queue */
- unsigned long q_qbytes; /* max number of bytes on queue */
- pid_t q_lspid; /* pid of last msgsnd */
- pid_t q_lrpid; /* last receive pid */
-
- struct list_head q_messages;
- struct list_head q_receivers;
- struct list_head q_senders;
-};
-
-/* Helper routines for sys_msgsnd and sys_msgrcv */
-extern long do_msgsnd(int msqid, long mtype, void __user *mtext,
- size_t msgsz, int msgflg);
-extern long do_msgrcv(int msqid, void __user *buf, size_t bufsz, long msgtyp,
- int msgflg,
- long (*msg_fill)(void __user *, struct msg_msg *,
- size_t));
-
#endif /* _LINUX_MSG_H */
diff --git a/include/linux/msi.h b/include/linux/msi.h
index df6d59201d31..cdb14a1ef268 100644
--- a/include/linux/msi.h
+++ b/include/linux/msi.h
@@ -1,198 +1,406 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef LINUX_MSI_H
#define LINUX_MSI_H
-#include <linux/kobject.h>
+/*
+ * This header file contains MSI data structures and functions which are
+ * only relevant for:
+ * - Interrupt core code
+ * - PCI/MSI core code
+ * - MSI interrupt domain implementations
+ * - IOMMU, low level VFIO, NTB and other justified exceptions
+ * dealing with low level MSI details.
+ *
+ * Regular device drivers have no business with any of these functions and
+ * especially storing MSI descriptor pointers in random code is considered
+ * abuse.
+ *
+ * Device driver relevant functions are available in <linux/msi_api.h>
+ */
+
+#include <linux/irqdomain_defs.h>
+#include <linux/cpumask.h>
+#include <linux/msi_api.h>
+#include <linux/xarray.h>
+#include <linux/mutex.h>
#include <linux/list.h>
+#include <linux/irq.h>
+#include <linux/bits.h>
+
+#include <asm/msi.h>
+
+/* Dummy shadow structures if an architecture does not define them */
+#ifndef arch_msi_msg_addr_lo
+typedef struct arch_msi_msg_addr_lo {
+ u32 address_lo;
+} __attribute__ ((packed)) arch_msi_msg_addr_lo_t;
+#endif
+
+#ifndef arch_msi_msg_addr_hi
+typedef struct arch_msi_msg_addr_hi {
+ u32 address_hi;
+} __attribute__ ((packed)) arch_msi_msg_addr_hi_t;
+#endif
+#ifndef arch_msi_msg_data
+typedef struct arch_msi_msg_data {
+ u32 data;
+} __attribute__ ((packed)) arch_msi_msg_data_t;
+#endif
+
+#ifndef arch_is_isolated_msi
+#define arch_is_isolated_msi() false
+#endif
+
+/**
+ * msi_msg - Representation of a MSI message
+ * @address_lo: Low 32 bits of msi message address
+ * @arch_addrlo: Architecture specific shadow of @address_lo
+ * @address_hi: High 32 bits of msi message address
+ * (only used when device supports it)
+ * @arch_addrhi: Architecture specific shadow of @address_hi
+ * @data: MSI message data (usually 16 bits)
+ * @arch_data: Architecture specific shadow of @data
+ */
struct msi_msg {
- u32 address_lo; /* low 32 bits of msi message address */
- u32 address_hi; /* high 32 bits of msi message address */
- u32 data; /* 16 bits of msi message data */
+ union {
+ u32 address_lo;
+ arch_msi_msg_addr_lo_t arch_addr_lo;
+ };
+ union {
+ u32 address_hi;
+ arch_msi_msg_addr_hi_t arch_addr_hi;
+ };
+ union {
+ u32 data;
+ arch_msi_msg_data_t arch_data;
+ };
};
extern int pci_msi_ignore_mask;
/* Helper functions */
-struct irq_data;
struct msi_desc;
struct pci_dev;
struct platform_msi_priv_data;
+struct device_attribute;
+struct irq_domain;
+struct irq_affinity_desc;
+
void __get_cached_msi_msg(struct msi_desc *entry, struct msi_msg *msg);
#ifdef CONFIG_GENERIC_MSI_IRQ
void get_cached_msi_msg(unsigned int irq, struct msi_msg *msg);
#else
-static inline void get_cached_msi_msg(unsigned int irq, struct msi_msg *msg)
-{
-}
+static inline void get_cached_msi_msg(unsigned int irq, struct msi_msg *msg) { }
#endif
typedef void (*irq_write_msi_msg_t)(struct msi_desc *desc,
struct msi_msg *msg);
/**
- * platform_msi_desc - Platform device specific msi descriptor data
- * @msi_priv_data: Pointer to platform private data
- * @msi_index: The index of the MSI descriptor for multi MSI
+ * pci_msi_desc - PCI/MSI specific MSI descriptor data
+ *
+ * @msi_mask: [PCI MSI] MSI cached mask bits
+ * @msix_ctrl: [PCI MSI-X] MSI-X cached per vector control bits
+ * @is_msix: [PCI MSI/X] True if MSI-X
+ * @multiple: [PCI MSI/X] log2 num of messages allocated
+ * @multi_cap: [PCI MSI/X] log2 num of messages supported
+ * @can_mask: [PCI MSI/X] Masking supported?
+ * @is_64: [PCI MSI/X] Address size: 0=32bit 1=64bit
+ * @default_irq:[PCI MSI/X] The default pre-assigned non-MSI irq
+ * @mask_pos: [PCI MSI] Mask register position
+ * @mask_base: [PCI MSI-X] Mask register base address
*/
-struct platform_msi_desc {
- struct platform_msi_priv_data *msi_priv_data;
- u16 msi_index;
+struct pci_msi_desc {
+ union {
+ u32 msi_mask;
+ u32 msix_ctrl;
+ };
+ struct {
+ u8 is_msix : 1;
+ u8 multiple : 3;
+ u8 multi_cap : 3;
+ u8 can_mask : 1;
+ u8 is_64 : 1;
+ u8 is_virtual : 1;
+ unsigned default_irq;
+ } msi_attrib;
+ union {
+ u8 mask_pos;
+ void __iomem *mask_base;
+ };
};
/**
- * fsl_mc_msi_desc - FSL-MC device specific msi descriptor data
- * @msi_index: The index of the MSI descriptor
+ * union msi_domain_cookie - Opaque MSI domain specific data
+ * @value: u64 value store
+ * @ptr: Pointer to domain specific data
+ * @iobase: Domain specific IOmem pointer
+ *
+ * The content of this data is implementation defined and used by the MSI
+ * domain to store domain specific information which is requried for
+ * interrupt chip callbacks.
*/
-struct fsl_mc_msi_desc {
- u16 msi_index;
+union msi_domain_cookie {
+ u64 value;
+ void *ptr;
+ void __iomem *iobase;
+};
+
+/**
+ * struct msi_desc_data - Generic MSI descriptor data
+ * @dcookie: Cookie for MSI domain specific data which is required
+ * for irq_chip callbacks
+ * @icookie: Cookie for the MSI interrupt instance provided by
+ * the usage site to the allocation function
+ *
+ * The content of this data is implementation defined, e.g. PCI/IMS
+ * implementations define the meaning of the data. The MSI core ignores
+ * this data completely.
+ */
+struct msi_desc_data {
+ union msi_domain_cookie dcookie;
+ union msi_instance_cookie icookie;
};
+#define MSI_MAX_INDEX ((unsigned int)USHRT_MAX)
+
/**
* struct msi_desc - Descriptor structure for MSI based interrupts
- * @list: List head for management
* @irq: The base interrupt number
* @nvec_used: The number of vectors used
* @dev: Pointer to the device which uses this descriptor
* @msg: The last set MSI message cached for reuse
* @affinity: Optional pointer to a cpu affinity mask for this descriptor
+ * @sysfs_attr: Pointer to sysfs device attribute
*
- * @masked: [PCI MSI/X] Mask bits
- * @is_msix: [PCI MSI/X] True if MSI-X
- * @multiple: [PCI MSI/X] log2 num of messages allocated
- * @multi_cap: [PCI MSI/X] log2 num of messages supported
- * @maskbit: [PCI MSI/X] Mask-Pending bit supported?
- * @is_64: [PCI MSI/X] Address size: 0=32bit 1=64bit
- * @entry_nr: [PCI MSI/X] Entry which is described by this descriptor
- * @default_irq:[PCI MSI/X] The default pre-assigned non-MSI irq
- * @mask_pos: [PCI MSI] Mask register position
- * @mask_base: [PCI MSI-X] Mask register base address
- * @platform: [platform] Platform device specific msi descriptor data
+ * @write_msi_msg: Callback that may be called when the MSI message
+ * address or data changes
+ * @write_msi_msg_data: Data parameter for the callback.
+ *
+ * @msi_index: Index of the msi descriptor
+ * @pci: PCI specific msi descriptor data
+ * @data: Generic MSI descriptor data
*/
struct msi_desc {
/* Shared device/bus type independent data */
- struct list_head list;
unsigned int irq;
unsigned int nvec_used;
struct device *dev;
struct msi_msg msg;
- struct cpumask *affinity;
+ struct irq_affinity_desc *affinity;
+#ifdef CONFIG_IRQ_MSI_IOMMU
+ const void *iommu_cookie;
+#endif
+#ifdef CONFIG_SYSFS
+ struct device_attribute *sysfs_attrs;
+#endif
+ void (*write_msi_msg)(struct msi_desc *entry, void *data);
+ void *write_msi_msg_data;
+
+ u16 msi_index;
union {
- /* PCI MSI/X specific data */
- struct {
- u32 masked;
- struct {
- __u8 is_msix : 1;
- __u8 multiple : 3;
- __u8 multi_cap : 3;
- __u8 maskbit : 1;
- __u8 is_64 : 1;
- __u16 entry_nr;
- unsigned default_irq;
- } msi_attrib;
- union {
- u8 mask_pos;
- void __iomem *mask_base;
- };
- };
-
- /*
- * Non PCI variants add their data structure here. New
- * entries need to use a named structure. We want
- * proper name spaces for this. The PCI part is
- * anonymous for now as it would require an immediate
- * tree wide cleanup.
- */
- struct platform_msi_desc platform;
- struct fsl_mc_msi_desc fsl_mc;
+ struct pci_msi_desc pci;
+ struct msi_desc_data data;
};
};
-/* Helpers to hide struct msi_desc implementation details */
-#define msi_desc_to_dev(desc) ((desc)->dev)
-#define dev_to_msi_list(dev) (&(dev)->msi_list)
-#define first_msi_entry(dev) \
- list_first_entry(dev_to_msi_list((dev)), struct msi_desc, list)
-#define for_each_msi_entry(desc, dev) \
- list_for_each_entry((desc), dev_to_msi_list((dev)), list)
+/*
+ * Filter values for the MSI descriptor iterators and accessor functions.
+ */
+enum msi_desc_filter {
+ /* All descriptors */
+ MSI_DESC_ALL,
+ /* Descriptors which have no interrupt associated */
+ MSI_DESC_NOTASSOCIATED,
+ /* Descriptors which have an interrupt associated */
+ MSI_DESC_ASSOCIATED,
+};
-#ifdef CONFIG_PCI_MSI
-#define first_pci_msi_entry(pdev) first_msi_entry(&(pdev)->dev)
-#define for_each_pci_msi_entry(desc, pdev) \
- for_each_msi_entry((desc), &(pdev)->dev)
-struct pci_dev *msi_desc_to_pci_dev(struct msi_desc *desc);
-void *msi_desc_to_pci_sysdata(struct msi_desc *desc);
-void pci_write_msi_msg(unsigned int irq, struct msi_msg *msg);
-#else /* CONFIG_PCI_MSI */
-static inline void *msi_desc_to_pci_sysdata(struct msi_desc *desc)
+/**
+ * struct msi_dev_domain - The internals of MSI domain info per device
+ * @store: Xarray for storing MSI descriptor pointers
+ * @irqdomain: Pointer to a per device interrupt domain
+ */
+struct msi_dev_domain {
+ struct xarray store;
+ struct irq_domain *domain;
+};
+
+/**
+ * msi_device_data - MSI per device data
+ * @properties: MSI properties which are interesting to drivers
+ * @platform_data: Platform-MSI specific data
+ * @mutex: Mutex protecting the MSI descriptor store
+ * @__domains: Internal data for per device MSI domains
+ * @__iter_idx: Index to search the next entry for iterators
+ */
+struct msi_device_data {
+ unsigned long properties;
+ struct platform_msi_priv_data *platform_data;
+ struct mutex mutex;
+ struct msi_dev_domain __domains[MSI_MAX_DEVICE_IRQDOMAINS];
+ unsigned long __iter_idx;
+};
+
+int msi_setup_device_data(struct device *dev);
+
+void msi_lock_descs(struct device *dev);
+void msi_unlock_descs(struct device *dev);
+
+struct msi_desc *msi_domain_first_desc(struct device *dev, unsigned int domid,
+ enum msi_desc_filter filter);
+
+/**
+ * msi_first_desc - Get the first MSI descriptor of the default irqdomain
+ * @dev: Device to operate on
+ * @filter: Descriptor state filter
+ *
+ * Must be called with the MSI descriptor mutex held, i.e. msi_lock_descs()
+ * must be invoked before the call.
+ *
+ * Return: Pointer to the first MSI descriptor matching the search
+ * criteria, NULL if none found.
+ */
+static inline struct msi_desc *msi_first_desc(struct device *dev,
+ enum msi_desc_filter filter)
{
- return NULL;
+ return msi_domain_first_desc(dev, MSI_DEFAULT_DOMAIN, filter);
}
-static inline void pci_write_msi_msg(unsigned int irq, struct msi_msg *msg)
+
+struct msi_desc *msi_next_desc(struct device *dev, unsigned int domid,
+ enum msi_desc_filter filter);
+
+/**
+ * msi_domain_for_each_desc - Iterate the MSI descriptors in a specific domain
+ *
+ * @desc: struct msi_desc pointer used as iterator
+ * @dev: struct device pointer - device to iterate
+ * @domid: The id of the interrupt domain which should be walked.
+ * @filter: Filter for descriptor selection
+ *
+ * Notes:
+ * - The loop must be protected with a msi_lock_descs()/msi_unlock_descs()
+ * pair.
+ * - It is safe to remove a retrieved MSI descriptor in the loop.
+ */
+#define msi_domain_for_each_desc(desc, dev, domid, filter) \
+ for ((desc) = msi_domain_first_desc((dev), (domid), (filter)); (desc); \
+ (desc) = msi_next_desc((dev), (domid), (filter)))
+
+/**
+ * msi_for_each_desc - Iterate the MSI descriptors in the default irqdomain
+ *
+ * @desc: struct msi_desc pointer used as iterator
+ * @dev: struct device pointer - device to iterate
+ * @filter: Filter for descriptor selection
+ *
+ * Notes:
+ * - The loop must be protected with a msi_lock_descs()/msi_unlock_descs()
+ * pair.
+ * - It is safe to remove a retrieved MSI descriptor in the loop.
+ */
+#define msi_for_each_desc(desc, dev, filter) \
+ msi_domain_for_each_desc((desc), (dev), MSI_DEFAULT_DOMAIN, (filter))
+
+#define msi_desc_to_dev(desc) ((desc)->dev)
+
+#ifdef CONFIG_IRQ_MSI_IOMMU
+static inline const void *msi_desc_get_iommu_cookie(struct msi_desc *desc)
{
+ return desc->iommu_cookie;
}
-#endif /* CONFIG_PCI_MSI */
-
-struct msi_desc *alloc_msi_entry(struct device *dev, int nvec,
- const struct cpumask *affinity);
-void free_msi_entry(struct msi_desc *entry);
-void __pci_read_msi_msg(struct msi_desc *entry, struct msi_msg *msg);
-void __pci_write_msi_msg(struct msi_desc *entry, struct msi_msg *msg);
-u32 __pci_msix_desc_mask_irq(struct msi_desc *desc, u32 flag);
-u32 __pci_msi_desc_mask_irq(struct msi_desc *desc, u32 mask, u32 flag);
-void pci_msi_mask_irq(struct irq_data *data);
-void pci_msi_unmask_irq(struct irq_data *data);
+static inline void msi_desc_set_iommu_cookie(struct msi_desc *desc,
+ const void *iommu_cookie)
+{
+ desc->iommu_cookie = iommu_cookie;
+}
+#else
+static inline const void *msi_desc_get_iommu_cookie(struct msi_desc *desc)
+{
+ return NULL;
+}
-/* Conversion helpers. Should be removed after merging */
-static inline void __write_msi_msg(struct msi_desc *entry, struct msi_msg *msg)
+static inline void msi_desc_set_iommu_cookie(struct msi_desc *desc,
+ const void *iommu_cookie)
{
- __pci_write_msi_msg(entry, msg);
}
-static inline void write_msi_msg(int irq, struct msi_msg *msg)
+#endif
+
+int msi_domain_insert_msi_desc(struct device *dev, unsigned int domid,
+ struct msi_desc *init_desc);
+/**
+ * msi_insert_msi_desc - Allocate and initialize a MSI descriptor in the
+ * default irqdomain and insert it at @init_desc->msi_index
+ * @dev: Pointer to the device for which the descriptor is allocated
+ * @init_desc: Pointer to an MSI descriptor to initialize the new descriptor
+ *
+ * Return: 0 on success or an appropriate failure code.
+ */
+static inline int msi_insert_msi_desc(struct device *dev, struct msi_desc *init_desc)
{
- pci_write_msi_msg(irq, msg);
+ return msi_domain_insert_msi_desc(dev, MSI_DEFAULT_DOMAIN, init_desc);
}
-static inline void mask_msi_irq(struct irq_data *data)
+
+void msi_domain_free_msi_descs_range(struct device *dev, unsigned int domid,
+ unsigned int first, unsigned int last);
+
+/**
+ * msi_free_msi_descs_range - Free a range of MSI descriptors of a device
+ * in the default irqdomain
+ *
+ * @dev: Device for which to free the descriptors
+ * @first: Index to start freeing from (inclusive)
+ * @last: Last index to be freed (inclusive)
+ */
+static inline void msi_free_msi_descs_range(struct device *dev, unsigned int first,
+ unsigned int last)
{
- pci_msi_mask_irq(data);
+ msi_domain_free_msi_descs_range(dev, MSI_DEFAULT_DOMAIN, first, last);
}
-static inline void unmask_msi_irq(struct irq_data *data)
+
+/**
+ * msi_free_msi_descs - Free all MSI descriptors of a device in the default irqdomain
+ * @dev: Device to free the descriptors
+ */
+static inline void msi_free_msi_descs(struct device *dev)
{
- pci_msi_unmask_irq(data);
+ msi_free_msi_descs_range(dev, 0, MSI_MAX_INDEX);
}
/*
- * The arch hooks to setup up msi irqs. Those functions are
- * implemented as weak symbols so that they /can/ be overriden by
- * architecture specific code if needed.
+ * The arch hooks to setup up msi irqs. Default functions are implemented
+ * as weak symbols so that they /can/ be overriden by architecture specific
+ * code if needed. These hooks can only be enabled by the architecture.
+ *
+ * If CONFIG_PCI_MSI_ARCH_FALLBACKS is not selected they are replaced by
+ * stubs with warnings.
*/
+#ifdef CONFIG_PCI_MSI_ARCH_FALLBACKS
int arch_setup_msi_irq(struct pci_dev *dev, struct msi_desc *desc);
void arch_teardown_msi_irq(unsigned int irq);
int arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type);
void arch_teardown_msi_irqs(struct pci_dev *dev);
-void arch_restore_msi_irqs(struct pci_dev *dev);
-
-void default_teardown_msi_irqs(struct pci_dev *dev);
-void default_restore_msi_irqs(struct pci_dev *dev);
-
-struct msi_controller {
- struct module *owner;
- struct device *dev;
- struct device_node *of_node;
- struct list_head list;
-
- int (*setup_irq)(struct msi_controller *chip, struct pci_dev *dev,
- struct msi_desc *desc);
- int (*setup_irqs)(struct msi_controller *chip, struct pci_dev *dev,
- int nvec, int type);
- void (*teardown_irq)(struct msi_controller *chip, unsigned int irq);
-};
+#ifdef CONFIG_SYSFS
+int msi_device_populate_sysfs(struct device *dev);
+void msi_device_destroy_sysfs(struct device *dev);
+#else /* CONFIG_SYSFS */
+static inline int msi_device_populate_sysfs(struct device *dev) { return 0; }
+static inline void msi_device_destroy_sysfs(struct device *dev) { }
+#endif /* !CONFIG_SYSFS */
+#endif /* CONFIG_PCI_MSI_ARCH_FALLBACKS */
-#ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN
+/*
+ * The restore hook is still available even for fully irq domain based
+ * setups. Courtesy to XEN/X86.
+ */
+bool arch_restore_msi_irqs(struct pci_dev *dev);
+
+#ifdef CONFIG_GENERIC_MSI_IRQ
#include <linux/irqhandler.h>
-#include <asm/msi.h>
struct irq_domain;
struct irq_domain_ops;
@@ -206,18 +414,29 @@ struct msi_domain_info;
* @get_hwirq: Retrieve the resulting hw irq number
* @msi_init: Domain specific init function for MSI interrupts
* @msi_free: Domain specific function to free a MSI interrupts
- * @msi_check: Callback for verification of the domain/info/dev data
* @msi_prepare: Prepare the allocation of the interrupts in the domain
- * @msi_finish: Optional callback to finalize the allocation
+ * @prepare_desc: Optional function to prepare the allocated MSI descriptor
+ * in the domain
* @set_desc: Set the msi descriptor for an interrupt
- * @handle_error: Optional error handler if the allocation fails
+ * @domain_alloc_irqs: Optional function to override the default allocation
+ * function.
+ * @domain_free_irqs: Optional function to override the default free
+ * function.
+ * @msi_post_free: Optional function which is invoked after freeing
+ * all interrupts.
+ *
+ * @get_hwirq, @msi_init and @msi_free are callbacks used by the underlying
+ * irqdomain.
*
- * @get_hwirq, @msi_init and @msi_free are callbacks used by
- * msi_create_irq_domain() and related interfaces
+ * @msi_check, @msi_prepare, @prepare_desc and @set_desc are callbacks used by the
+ * msi_domain_alloc/free_irqs*() variants.
*
- * @msi_check, @msi_prepare, @msi_finish, @set_desc and @handle_error
- * are callbacks used by msi_domain_alloc_irqs() and related
- * interfaces which are based on msi_desc.
+ * @domain_alloc_irqs, @domain_free_irqs can be used to override the
+ * default allocation/free functions (__msi_domain_alloc/free_irqs). This
+ * is initially for a wrapper around XENs seperate MSI universe which can't
+ * be wrapped into the regular irq domains concepts by mere mortals. This
+ * allows to universally use msi_domain_alloc/free_irqs without having to
+ * special case XEN all over the place.
*/
struct msi_domain_ops {
irq_hw_number_t (*get_hwirq)(struct msi_domain_info *info,
@@ -229,22 +448,29 @@ struct msi_domain_ops {
void (*msi_free)(struct irq_domain *domain,
struct msi_domain_info *info,
unsigned int virq);
- int (*msi_check)(struct irq_domain *domain,
- struct msi_domain_info *info,
- struct device *dev);
int (*msi_prepare)(struct irq_domain *domain,
struct device *dev, int nvec,
msi_alloc_info_t *arg);
- void (*msi_finish)(msi_alloc_info_t *arg, int retval);
+ void (*prepare_desc)(struct irq_domain *domain, msi_alloc_info_t *arg,
+ struct msi_desc *desc);
void (*set_desc)(msi_alloc_info_t *arg,
struct msi_desc *desc);
- int (*handle_error)(struct irq_domain *domain,
- struct msi_desc *desc, int error);
+ int (*domain_alloc_irqs)(struct irq_domain *domain,
+ struct device *dev, int nvec);
+ void (*domain_free_irqs)(struct irq_domain *domain,
+ struct device *dev);
+ void (*msi_post_free)(struct irq_domain *domain,
+ struct device *dev);
};
/**
* struct msi_domain_info - MSI interrupt domain data
* @flags: Flags to decribe features and capabilities
+ * @bus_token: The domain bus token
+ * @hwsize: The hardware table size or the software index limit.
+ * If 0 then the size is considered unlimited and
+ * gets initialized to the maximum software index limit
+ * by the domain creation code.
* @ops: The callback data structure
* @chip: Optional: associated interrupt chip
* @chip_data: Optional: associated interrupt chip data
@@ -254,17 +480,42 @@ struct msi_domain_ops {
* @data: Optional: domain specific data
*/
struct msi_domain_info {
- u32 flags;
- struct msi_domain_ops *ops;
- struct irq_chip *chip;
- void *chip_data;
- irq_flow_handler_t handler;
- void *handler_data;
- const char *handler_name;
- void *data;
+ u32 flags;
+ enum irq_domain_bus_token bus_token;
+ unsigned int hwsize;
+ struct msi_domain_ops *ops;
+ struct irq_chip *chip;
+ void *chip_data;
+ irq_flow_handler_t handler;
+ void *handler_data;
+ const char *handler_name;
+ void *data;
+};
+
+/**
+ * struct msi_domain_template - Template for MSI device domains
+ * @name: Storage for the resulting name. Filled in by the core.
+ * @chip: Interrupt chip for this domain
+ * @ops: MSI domain ops
+ * @info: MSI domain info data
+ */
+struct msi_domain_template {
+ char name[48];
+ struct irq_chip chip;
+ struct msi_domain_ops ops;
+ struct msi_domain_info info;
};
-/* Flags for msi_domain_info */
+/*
+ * Flags for msi_domain_info
+ *
+ * Bit 0-15: Generic MSI functionality which is not subject to restriction
+ * by parent domains
+ *
+ * Bit 16-31: Functionality which depends on the underlying parent domain and
+ * can be masked out by msi_parent_ops::init_dev_msi_info() when
+ * a device MSI domain is initialized.
+ */
enum {
/*
* Init non implemented ops callbacks with default MSI domain
@@ -276,23 +527,100 @@ enum {
* callbacks.
*/
MSI_FLAG_USE_DEF_CHIP_OPS = (1 << 1),
+ /* Needs early activate, required for PCI */
+ MSI_FLAG_ACTIVATE_EARLY = (1 << 2),
+ /*
+ * Must reactivate when irq is started even when
+ * MSI_FLAG_ACTIVATE_EARLY has been set.
+ */
+ MSI_FLAG_MUST_REACTIVATE = (1 << 3),
+ /* Populate sysfs on alloc() and destroy it on free() */
+ MSI_FLAG_DEV_SYSFS = (1 << 4),
+ /* Allocate simple MSI descriptors */
+ MSI_FLAG_ALLOC_SIMPLE_MSI_DESCS = (1 << 5),
+ /* Free MSI descriptors */
+ MSI_FLAG_FREE_MSI_DESCS = (1 << 6),
+ /*
+ * Quirk to handle MSI implementations which do not provide
+ * masking. Currently known to affect x86, but has to be partially
+ * handled in the core MSI code.
+ */
+ MSI_FLAG_NOMASK_QUIRK = (1 << 7),
+
+ /* Mask for the generic functionality */
+ MSI_GENERIC_FLAGS_MASK = GENMASK(15, 0),
+
+ /* Mask for the domain specific functionality */
+ MSI_DOMAIN_FLAGS_MASK = GENMASK(31, 16),
+
/* Support multiple PCI MSI interrupts */
- MSI_FLAG_MULTI_PCI_MSI = (1 << 2),
+ MSI_FLAG_MULTI_PCI_MSI = (1 << 16),
/* Support PCI MSIX interrupts */
- MSI_FLAG_PCI_MSIX = (1 << 3),
- /* Needs early activate, required for PCI */
- MSI_FLAG_ACTIVATE_EARLY = (1 << 4),
+ MSI_FLAG_PCI_MSIX = (1 << 17),
+ /* Is level-triggered capable, using two messages */
+ MSI_FLAG_LEVEL_CAPABLE = (1 << 18),
+ /* MSI-X entries must be contiguous */
+ MSI_FLAG_MSIX_CONTIGUOUS = (1 << 19),
+ /* PCI/MSI-X vectors can be dynamically allocated/freed post MSI-X enable */
+ MSI_FLAG_PCI_MSIX_ALLOC_DYN = (1 << 20),
+ /* Support for PCI/IMS */
+ MSI_FLAG_PCI_IMS = (1 << 21),
};
+/**
+ * struct msi_parent_ops - MSI parent domain callbacks and configuration info
+ *
+ * @supported_flags: Required: The supported MSI flags of the parent domain
+ * @prefix: Optional: Prefix for the domain and chip name
+ * @init_dev_msi_info: Required: Callback for MSI parent domains to setup parent
+ * domain specific domain flags, domain ops and interrupt chip
+ * callbacks when a per device domain is created.
+ */
+struct msi_parent_ops {
+ u32 supported_flags;
+ const char *prefix;
+ bool (*init_dev_msi_info)(struct device *dev, struct irq_domain *domain,
+ struct irq_domain *msi_parent_domain,
+ struct msi_domain_info *msi_child_info);
+};
+
+bool msi_parent_init_dev_msi_info(struct device *dev, struct irq_domain *domain,
+ struct irq_domain *msi_parent_domain,
+ struct msi_domain_info *msi_child_info);
+
int msi_domain_set_affinity(struct irq_data *data, const struct cpumask *mask,
bool force);
struct irq_domain *msi_create_irq_domain(struct fwnode_handle *fwnode,
struct msi_domain_info *info,
struct irq_domain *parent);
-int msi_domain_alloc_irqs(struct irq_domain *domain, struct device *dev,
- int nvec);
-void msi_domain_free_irqs(struct irq_domain *domain, struct device *dev);
+
+bool msi_create_device_irq_domain(struct device *dev, unsigned int domid,
+ const struct msi_domain_template *template,
+ unsigned int hwsize, void *domain_data,
+ void *chip_data);
+void msi_remove_device_irq_domain(struct device *dev, unsigned int domid);
+
+bool msi_match_device_irq_domain(struct device *dev, unsigned int domid,
+ enum irq_domain_bus_token bus_token);
+
+int msi_domain_alloc_irqs_range_locked(struct device *dev, unsigned int domid,
+ unsigned int first, unsigned int last);
+int msi_domain_alloc_irqs_range(struct device *dev, unsigned int domid,
+ unsigned int first, unsigned int last);
+int msi_domain_alloc_irqs_all_locked(struct device *dev, unsigned int domid, int nirqs);
+
+struct msi_map msi_domain_alloc_irq_at(struct device *dev, unsigned int domid, unsigned int index,
+ const struct irq_affinity_desc *affdesc,
+ union msi_instance_cookie *cookie);
+
+void msi_domain_free_irqs_range_locked(struct device *dev, unsigned int domid,
+ unsigned int first, unsigned int last);
+void msi_domain_free_irqs_range(struct device *dev, unsigned int domid,
+ unsigned int first, unsigned int last);
+void msi_domain_free_irqs_all_locked(struct device *dev, unsigned int domid);
+void msi_domain_free_irqs_all(struct device *dev, unsigned int domid);
+
struct msi_domain_info *msi_get_domain_info(struct irq_domain *domain);
struct irq_domain *platform_msi_create_irq_domain(struct fwnode_handle *fwnode,
@@ -307,35 +635,60 @@ int msi_domain_prepare_irqs(struct irq_domain *domain, struct device *dev,
int nvec, msi_alloc_info_t *args);
int msi_domain_populate_irqs(struct irq_domain *domain, struct device *dev,
int virq, int nvec, msi_alloc_info_t *args);
+void msi_domain_depopulate_descs(struct device *dev, int virq, int nvec);
+
struct irq_domain *
-platform_msi_create_device_domain(struct device *dev,
- unsigned int nvec,
- irq_write_msi_msg_t write_msi_msg,
- const struct irq_domain_ops *ops,
- void *host_data);
-int platform_msi_domain_alloc(struct irq_domain *domain, unsigned int virq,
- unsigned int nr_irqs);
-void platform_msi_domain_free(struct irq_domain *domain, unsigned int virq,
- unsigned int nvec);
+__platform_msi_create_device_domain(struct device *dev,
+ unsigned int nvec,
+ bool is_tree,
+ irq_write_msi_msg_t write_msi_msg,
+ const struct irq_domain_ops *ops,
+ void *host_data);
+
+#define platform_msi_create_device_domain(dev, nvec, write, ops, data) \
+ __platform_msi_create_device_domain(dev, nvec, false, write, ops, data)
+#define platform_msi_create_device_tree_domain(dev, nvec, write, ops, data) \
+ __platform_msi_create_device_domain(dev, nvec, true, write, ops, data)
+
+int platform_msi_device_domain_alloc(struct irq_domain *domain, unsigned int virq,
+ unsigned int nr_irqs);
+void platform_msi_device_domain_free(struct irq_domain *domain, unsigned int virq,
+ unsigned int nvec);
void *platform_msi_get_host_data(struct irq_domain *domain);
-#endif /* CONFIG_GENERIC_MSI_IRQ_DOMAIN */
-#ifdef CONFIG_PCI_MSI_IRQ_DOMAIN
-void pci_msi_domain_write_msg(struct irq_data *irq_data, struct msi_msg *msg);
+bool msi_device_has_isolated_msi(struct device *dev);
+#else /* CONFIG_GENERIC_MSI_IRQ */
+static inline bool msi_device_has_isolated_msi(struct device *dev)
+{
+ /*
+ * Arguably if the platform does not enable MSI support then it has
+ * "isolated MSI", as an interrupt controller that cannot receive MSIs
+ * is inherently isolated by our definition. The default definition for
+ * arch_is_isolated_msi() is conservative and returns false anyhow.
+ */
+ return arch_is_isolated_msi();
+}
+#endif /* CONFIG_GENERIC_MSI_IRQ */
+
+/* PCI specific interfaces */
+#ifdef CONFIG_PCI_MSI
+struct pci_dev *msi_desc_to_pci_dev(struct msi_desc *desc);
+void pci_write_msi_msg(unsigned int irq, struct msi_msg *msg);
+void __pci_read_msi_msg(struct msi_desc *entry, struct msi_msg *msg);
+void __pci_write_msi_msg(struct msi_desc *entry, struct msi_msg *msg);
+void pci_msi_mask_irq(struct irq_data *data);
+void pci_msi_unmask_irq(struct irq_data *data);
struct irq_domain *pci_msi_create_irq_domain(struct fwnode_handle *fwnode,
struct msi_domain_info *info,
struct irq_domain *parent);
-irq_hw_number_t pci_msi_domain_calc_hwirq(struct pci_dev *dev,
- struct msi_desc *desc);
-int pci_msi_domain_check_cap(struct irq_domain *domain,
- struct msi_domain_info *info, struct device *dev);
u32 pci_msi_domain_get_msi_rid(struct irq_domain *domain, struct pci_dev *pdev);
struct irq_domain *pci_msi_get_device_domain(struct pci_dev *pdev);
-#else
+#else /* CONFIG_PCI_MSI */
static inline struct irq_domain *pci_msi_get_device_domain(struct pci_dev *pdev)
{
return NULL;
}
-#endif /* CONFIG_PCI_MSI_IRQ_DOMAIN */
+static inline void pci_write_msi_msg(unsigned int irq, struct msi_msg *msg) { }
+#endif /* !CONFIG_PCI_MSI */
#endif /* LINUX_MSI_H */
diff --git a/include/linux/msi_api.h b/include/linux/msi_api.h
new file mode 100644
index 000000000000..391087ad99b1
--- /dev/null
+++ b/include/linux/msi_api.h
@@ -0,0 +1,73 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef LINUX_MSI_API_H
+#define LINUX_MSI_API_H
+
+/*
+ * APIs which are relevant for device driver code for allocating and
+ * freeing MSI interrupts and querying the associations between
+ * hardware/software MSI indices and the Linux interrupt number.
+ */
+
+struct device;
+
+/*
+ * Per device interrupt domain related constants.
+ */
+enum msi_domain_ids {
+ MSI_DEFAULT_DOMAIN,
+ MSI_SECONDARY_DOMAIN,
+ MSI_MAX_DEVICE_IRQDOMAINS,
+};
+
+/**
+ * union msi_instance_cookie - MSI instance cookie
+ * @value: u64 value store
+ * @ptr: Pointer to usage site specific data
+ *
+ * This cookie is handed to the IMS allocation function and stored in the
+ * MSI descriptor for the interrupt chip callbacks.
+ *
+ * The content of this cookie is MSI domain implementation defined. For
+ * PCI/IMS implementations this could be a PASID or a pointer to queue
+ * memory.
+ */
+union msi_instance_cookie {
+ u64 value;
+ void *ptr;
+};
+
+/**
+ * msi_map - Mapping between MSI index and Linux interrupt number
+ * @index: The MSI index, e.g. slot in the MSI-X table or
+ * a software managed index if >= 0. If negative
+ * the allocation function failed and it contains
+ * the error code.
+ * @virq: The associated Linux interrupt number
+ */
+struct msi_map {
+ int index;
+ int virq;
+};
+
+/*
+ * Constant to be used for dynamic allocations when the allocation is any
+ * free MSI index, which is either an entry in a hardware table or a
+ * software managed index.
+ */
+#define MSI_ANY_INDEX UINT_MAX
+
+unsigned int msi_domain_get_virq(struct device *dev, unsigned int domid, unsigned int index);
+
+/**
+ * msi_get_virq - Lookup the Linux interrupt number for a MSI index on the default interrupt domain
+ * @dev: Device for which the lookup happens
+ * @index: The MSI index to lookup
+ *
+ * Return: The Linux interrupt number on success (> 0), 0 if not found
+ */
+static inline unsigned int msi_get_virq(struct device *dev, unsigned int index)
+{
+ return msi_domain_get_virq(dev, MSI_DEFAULT_DOMAIN, index);
+}
+
+#endif
diff --git a/include/linux/mtd/bbm.h b/include/linux/mtd/bbm.h
index 3bf8f954b642..d890805f5494 100644
--- a/include/linux/mtd/bbm.h
+++ b/include/linux/mtd/bbm.h
@@ -1,6 +1,5 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
- * linux/include/linux/mtd/bbm.h
- *
* NAND family Bad Block Management (BBM) header file
* - Bad Block Table (BBT) implementation
*
@@ -9,21 +8,6 @@
*
* Copyright © 2000-2005
* Thomas Gleixner <tglx@linuxtronix.de>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- *
*/
#ifndef __LINUX_MTD_BBM_H
#define __LINUX_MTD_BBM_H
@@ -95,10 +79,7 @@ struct nand_bbt_descr {
#define NAND_BBT_WRITE 0x00002000
/* Read and write back block contents when writing bbt */
#define NAND_BBT_SAVECONTENT 0x00004000
-/* Search good / bad pattern on the first and the second page */
-#define NAND_BBT_SCAN2NDPAGE 0x00008000
-/* Search good / bad pattern on the last page of the eraseblock */
-#define NAND_BBT_SCANLASTPAGE 0x00010000
+
/*
* Use a flash based bad block table. By default, OOB identifier is saved in
* OOB area. This option is passed to the default bad block table function.
@@ -117,7 +98,7 @@ struct nand_bbt_descr {
/*
* Flag set by nand_create_default_bbt_descr(), marking that the nand_bbt_descr
- * was allocated dynamicaly and must be freed in nand_release(). Has no meaning
+ * was allocated dynamicaly and must be freed in nand_cleanup(). Has no meaning
* in nand_chip.bbt_options.
*/
#define NAND_BBT_DYNAMICSTRUCT 0x80000000
@@ -126,13 +107,6 @@ struct nand_bbt_descr {
#define NAND_BBT_SCAN_MAXBLOCKS 4
/*
- * Constants for oob configuration
- */
-#define NAND_SMALL_BADBLOCK_POS 5
-#define NAND_LARGE_BADBLOCK_POS 0
-#define ONENAND_BADBLOCK_POS 0
-
-/*
* Bad block scanning errors
*/
#define ONENAND_BBT_READ_ERROR 1
@@ -142,7 +116,6 @@ struct nand_bbt_descr {
/**
* struct bbm_info - [GENERIC] Bad Block Table data structure
* @bbt_erase_shift: [INTERN] number of address bits in a bbt entry
- * @badblockpos: [INTERN] position of the bad block marker in the oob area
* @options: options for this descriptor
* @bbt: [INTERN] bad block table pointer
* @isbad_bbt: function to determine if a block is bad
@@ -152,7 +125,6 @@ struct nand_bbt_descr {
*/
struct bbm_info {
int bbt_erase_shift;
- int badblockpos;
int options;
uint8_t *bbt;
diff --git a/include/linux/mtd/blktrans.h b/include/linux/mtd/blktrans.h
index e93837f647de..15cc9b95e32b 100644
--- a/include/linux/mtd/blktrans.h
+++ b/include/linux/mtd/blktrans.h
@@ -1,20 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Copyright © 2003-2010 David Woodhouse <dwmw2@infradead.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- *
*/
#ifndef __MTD_TRANS_H__
@@ -23,7 +9,6 @@
#include <linux/mutex.h>
#include <linux/kref.h>
#include <linux/sysfs.h>
-#include <linux/workqueue.h>
struct hd_geometry;
struct mtd_info;
@@ -44,9 +29,9 @@ struct mtd_blktrans_dev {
struct kref ref;
struct gendisk *disk;
struct attribute_group *disk_attributes;
- struct workqueue_struct *wq;
- struct work_struct work;
struct request_queue *rq;
+ struct list_head rq_list;
+ struct blk_mq_tag_set *tag_set;
spinlock_t queue_lock;
void *priv;
fmode_t file_mode;
@@ -92,5 +77,16 @@ extern int add_mtd_blktrans_dev(struct mtd_blktrans_dev *dev);
extern int del_mtd_blktrans_dev(struct mtd_blktrans_dev *dev);
extern int mtd_blktrans_cease_background(struct mtd_blktrans_dev *dev);
+/**
+ * module_mtd_blktrans() - Helper macro for registering a mtd blktrans driver
+ * @__mtd_blktrans: mtd_blktrans_ops struct
+ *
+ * Helper macro for mtd blktrans drivers which do not do anything special in
+ * module init/exit. This eliminates a lot of boilerplate. Each module may only
+ * use this macro once, and calling it replaces module_init() and module_exit()
+ */
+#define module_mtd_blktrans(__mtd_blktrans) \
+ module_driver(__mtd_blktrans, register_mtd_blktrans, \
+ deregister_mtd_blktrans)
#endif /* __MTD_TRANS_H__ */
diff --git a/include/linux/mtd/cfi.h b/include/linux/mtd/cfi.h
index 9b57a9b1b081..d88bb56c18e2 100644
--- a/include/linux/mtd/cfi.h
+++ b/include/linux/mtd/cfi.h
@@ -1,20 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Copyright © 2000-2010 David Woodhouse <dwmw2@infradead.org> et al.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- *
*/
#ifndef __MTD_CFI_H__
@@ -152,7 +138,7 @@ struct cfi_ident {
uint16_t InterfaceDesc;
uint16_t MaxBufWriteSize;
uint8_t NumEraseRegions;
- uint32_t EraseRegionInfo[0]; /* Not host ordered */
+ uint32_t EraseRegionInfo[]; /* Not host ordered */
} __packed;
/* Extended Query Structure for both PRI and ALT */
@@ -179,7 +165,7 @@ struct cfi_pri_intelext {
uint16_t ProtRegAddr;
uint8_t FactProtRegSize;
uint8_t UserProtRegSize;
- uint8_t extra[0];
+ uint8_t extra[];
} __packed;
struct cfi_intelext_otpinfo {
@@ -233,6 +219,13 @@ struct cfi_pri_amdstd {
uint8_t VppMin;
uint8_t VppMax;
uint8_t TopBottom;
+ /* Below field are added from version 1.5 */
+ uint8_t ProgramSuspend;
+ uint8_t UnlockBypass;
+ uint8_t SecureSiliconSector;
+ uint8_t SoftwareFeatures;
+#define CFI_POLL_STATUS_REG BIT(0)
+#define CFI_POLL_DQ BIT(1)
} __packed;
/* Vendor-Specific PRI for Atmel chips (command set 0x0002) */
@@ -293,7 +286,8 @@ struct cfi_private {
map_word sector_erase_cmd;
unsigned long chipshift; /* Because they're of the same type */
const char *im_name; /* inter_module name for cmdset_setup */
- struct flchip chips[0]; /* per-chip data structure for each chip */
+ unsigned long quirks;
+ struct flchip chips[]; /* per-chip data structure for each chip */
};
uint32_t cfi_build_cmd_addr(uint32_t cmd_ofs,
@@ -377,6 +371,7 @@ struct cfi_fixup {
#define CFI_MFR_SHARP 0x00B0
#define CFI_MFR_SST 0x00BF
#define CFI_MFR_ST 0x0020 /* STMicroelectronics */
+#define CFI_MFR_MICRON 0x002C /* Micron */
#define CFI_MFR_TOSHIBA 0x0098
#define CFI_MFR_WINBOND 0x00DA
diff --git a/include/linux/mtd/cfi_endian.h b/include/linux/mtd/cfi_endian.h
index b97a625071f8..5275118aa449 100644
--- a/include/linux/mtd/cfi_endian.h
+++ b/include/linux/mtd/cfi_endian.h
@@ -1,20 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Copyright © 2001-2010 David Woodhouse <dwmw2@infradead.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- *
*/
#include <asm/byteorder.h>
diff --git a/include/linux/mtd/concat.h b/include/linux/mtd/concat.h
index ccdbe93a909c..d6f653e07426 100644
--- a/include/linux/mtd/concat.h
+++ b/include/linux/mtd/concat.h
@@ -1,22 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* MTD device concatenation layer definitions
*
* Copyright © 2002 Robert Kaiser <rkaiser@sysgo.de>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- *
*/
#ifndef MTD_CONCAT_H
diff --git a/include/linux/mtd/doc2000.h b/include/linux/mtd/doc2000.h
index 407d1e556c39..1b7b0ee070ca 100644
--- a/include/linux/mtd/doc2000.h
+++ b/include/linux/mtd/doc2000.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Linux driver for Disk-On-Chip devices
*
@@ -5,21 +6,6 @@
* Copyright © 1999-2010 David Woodhouse <dwmw2@infradead.org>
* Copyright © 2002-2003 Greg Ungerer <gerg@snapgear.com>
* Copyright © 2002-2003 SnapGear Inc
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- *
*/
#ifndef __MTD_DOC2000_H__
diff --git a/include/linux/mtd/flashchip.h b/include/linux/mtd/flashchip.h
index b63fa457febd..c04f690871ca 100644
--- a/include/linux/mtd/flashchip.h
+++ b/include/linux/mtd/flashchip.h
@@ -1,21 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Copyright © 2000 Red Hat UK Limited
* Copyright © 2000-2010 David Woodhouse <dwmw2@infradead.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- *
*/
#ifndef __MTD_FLASHCHIP_H__
@@ -54,7 +40,7 @@ typedef enum {
FL_READING,
FL_CACHEDPRG,
/* These 4 come from onenand_state_t, which has been unified here */
- FL_RESETING,
+ FL_RESETTING,
FL_OTPING,
FL_PREPARING_ERASE,
FL_VERIFYING_ERASE,
@@ -85,6 +71,7 @@ struct flchip {
unsigned int write_suspended:1;
unsigned int erase_suspended:1;
unsigned long in_progress_block_addr;
+ unsigned long in_progress_block_mask;
struct mutex mutex;
wait_queue_head_t wq; /* Wait on here when we're waiting for the chip
diff --git a/include/linux/mtd/gen_probe.h b/include/linux/mtd/gen_probe.h
index 2c456054fded..6bd0b30d5935 100644
--- a/include/linux/mtd/gen_probe.h
+++ b/include/linux/mtd/gen_probe.h
@@ -1,21 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Copyright © 2001 Red Hat UK Limited
* Copyright © 2001-2010 David Woodhouse <dwmw2@infradead.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- *
*/
#ifndef __LINUX_MTD_GEN_PROBE_H__
diff --git a/include/linux/mtd/hyperbus.h b/include/linux/mtd/hyperbus.h
new file mode 100644
index 000000000000..bb6b7121a542
--- /dev/null
+++ b/include/linux/mtd/hyperbus.h
@@ -0,0 +1,95 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright (C) 2019 Texas Instruments Incorporated - https://www.ti.com/
+ */
+
+#ifndef __LINUX_MTD_HYPERBUS_H__
+#define __LINUX_MTD_HYPERBUS_H__
+
+#include <linux/mtd/map.h>
+
+/* HyperBus command bits */
+#define HYPERBUS_RW 0x80 /* R/W# */
+#define HYPERBUS_RW_WRITE 0
+#define HYPERBUS_RW_READ 0x80
+#define HYPERBUS_AS 0x40 /* Address Space */
+#define HYPERBUS_AS_MEM 0
+#define HYPERBUS_AS_REG 0x40
+#define HYPERBUS_BT 0x20 /* Burst Type */
+#define HYPERBUS_BT_WRAPPED 0
+#define HYPERBUS_BT_LINEAR 0x20
+
+enum hyperbus_memtype {
+ HYPERFLASH,
+ HYPERRAM,
+};
+
+/**
+ * struct hyperbus_device - struct representing HyperBus slave device
+ * @map: map_info struct for accessing MMIO HyperBus flash memory
+ * @np: pointer to HyperBus slave device node
+ * @mtd: pointer to MTD struct
+ * @ctlr: pointer to HyperBus controller struct
+ * @memtype: type of memory device: HyperFlash or HyperRAM
+ * @priv: pointer to controller specific per device private data
+ */
+
+struct hyperbus_device {
+ struct map_info map;
+ struct device_node *np;
+ struct mtd_info *mtd;
+ struct hyperbus_ctlr *ctlr;
+ enum hyperbus_memtype memtype;
+ void *priv;
+};
+
+/**
+ * struct hyperbus_ops - struct representing custom HyperBus operations
+ * @read16: read 16 bit of data from flash in a single burst. Used to read
+ * from non default address space, such as ID/CFI space
+ * @write16: write 16 bit of data to flash in a single burst. Used to
+ * send cmd to flash or write single 16 bit word at a time.
+ * @copy_from: copy data from flash memory
+ * @copy_to: copy data to flash memory
+ * @calibrate: calibrate HyperBus controller
+ */
+
+struct hyperbus_ops {
+ u16 (*read16)(struct hyperbus_device *hbdev, unsigned long addr);
+ void (*write16)(struct hyperbus_device *hbdev,
+ unsigned long addr, u16 val);
+ void (*copy_from)(struct hyperbus_device *hbdev, void *to,
+ unsigned long from, ssize_t len);
+ void (*copy_to)(struct hyperbus_device *dev, unsigned long to,
+ const void *from, ssize_t len);
+ int (*calibrate)(struct hyperbus_device *dev);
+};
+
+/**
+ * struct hyperbus_ctlr - struct representing HyperBus controller
+ * @dev: pointer to HyperBus controller device
+ * @calibrated: flag to indicate ctlr calibration sequence is complete
+ * @ops: HyperBus controller ops
+ */
+struct hyperbus_ctlr {
+ struct device *dev;
+ bool calibrated;
+
+ const struct hyperbus_ops *ops;
+};
+
+/**
+ * hyperbus_register_device - probe and register a HyperBus slave memory device
+ * @hbdev: hyperbus_device struct with dev, np and ctlr field populated
+ *
+ * Return: 0 for success, others for failure.
+ */
+int hyperbus_register_device(struct hyperbus_device *hbdev);
+
+/**
+ * hyperbus_unregister_device - deregister HyperBus slave memory device
+ * @hbdev: hyperbus_device to be unregistered
+ */
+void hyperbus_unregister_device(struct hyperbus_device *hbdev);
+
+#endif /* __LINUX_MTD_HYPERBUS_H__ */
diff --git a/include/linux/mtd/inftl.h b/include/linux/mtd/inftl.h
index 8255118be0f0..fdfff87066a9 100644
--- a/include/linux/mtd/inftl.h
+++ b/include/linux/mtd/inftl.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* inftl.h -- defines to support the Inverse NAND Flash Translation Layer
*
diff --git a/include/linux/mtd/jedec.h b/include/linux/mtd/jedec.h
new file mode 100644
index 000000000000..0b6b59f7cfbd
--- /dev/null
+++ b/include/linux/mtd/jedec.h
@@ -0,0 +1,91 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright © 2000-2010 David Woodhouse <dwmw2@infradead.org>
+ * Steven J. Hill <sjhill@realitydiluted.com>
+ * Thomas Gleixner <tglx@linutronix.de>
+ *
+ * Contains all JEDEC related definitions
+ */
+
+#ifndef __LINUX_MTD_JEDEC_H
+#define __LINUX_MTD_JEDEC_H
+
+struct jedec_ecc_info {
+ u8 ecc_bits;
+ u8 codeword_size;
+ __le16 bb_per_lun;
+ __le16 block_endurance;
+ u8 reserved[2];
+} __packed;
+
+/* JEDEC features */
+#define JEDEC_FEATURE_16_BIT_BUS (1 << 0)
+
+struct nand_jedec_params {
+ /* rev info and features block */
+ /* 'J' 'E' 'S' 'D' */
+ u8 sig[4];
+ __le16 revision;
+ __le16 features;
+ u8 opt_cmd[3];
+ __le16 sec_cmd;
+ u8 num_of_param_pages;
+ u8 reserved0[18];
+
+ /* manufacturer information block */
+ char manufacturer[12];
+ char model[20];
+ u8 jedec_id[6];
+ u8 reserved1[10];
+
+ /* memory organization block */
+ __le32 byte_per_page;
+ __le16 spare_bytes_per_page;
+ u8 reserved2[6];
+ __le32 pages_per_block;
+ __le32 blocks_per_lun;
+ u8 lun_count;
+ u8 addr_cycles;
+ u8 bits_per_cell;
+ u8 programs_per_page;
+ u8 multi_plane_addr;
+ u8 multi_plane_op_attr;
+ u8 reserved3[38];
+
+ /* electrical parameter block */
+ __le16 async_sdr_speed_grade;
+ __le16 toggle_ddr_speed_grade;
+ __le16 sync_ddr_speed_grade;
+ u8 async_sdr_features;
+ u8 toggle_ddr_features;
+ u8 sync_ddr_features;
+ __le16 t_prog;
+ __le16 t_bers;
+ __le16 t_r;
+ __le16 t_r_multi_plane;
+ __le16 t_ccs;
+ __le16 io_pin_capacitance_typ;
+ __le16 input_pin_capacitance_typ;
+ __le16 clk_pin_capacitance_typ;
+ u8 driver_strength_support;
+ __le16 t_adl;
+ u8 reserved4[36];
+
+ /* ECC and endurance block */
+ u8 guaranteed_good_blocks;
+ __le16 guaranteed_block_endurance;
+ struct jedec_ecc_info ecc_info[4];
+ u8 reserved5[29];
+
+ /* reserved */
+ u8 reserved6[148];
+
+ /* vendor */
+ __le16 vendor_rev_num;
+ u8 reserved7[88];
+
+ /* CRC for Parameter Page */
+ __le16 crc;
+} __packed;
+
+#endif /* __LINUX_MTD_JEDEC_H */
diff --git a/include/linux/mtd/latch-addr-flash.h b/include/linux/mtd/latch-addr-flash.h
deleted file mode 100644
index e94b8e128074..000000000000
--- a/include/linux/mtd/latch-addr-flash.h
+++ /dev/null
@@ -1,29 +0,0 @@
-/*
- * Interface for NOR flash driver whose high address lines are latched
- *
- * Copyright © 2008 MontaVista Software, Inc. <source@mvista.com>
- *
- * This file is licensed under the terms of the GNU General Public License
- * version 2. This program is licensed "as is" without any warranty of any
- * kind, whether express or implied.
- */
-#ifndef __LATCH_ADDR_FLASH__
-#define __LATCH_ADDR_FLASH__
-
-struct map_info;
-struct mtd_partition;
-
-struct latch_addr_flash_data {
- unsigned int width;
- unsigned int size;
-
- int (*init)(void *data, int cs);
- void (*done)(void *data);
- void (*set_window)(unsigned long offset, void *data);
- void *data;
-
- unsigned int nr_parts;
- struct mtd_partition *parts;
-};
-
-#endif
diff --git a/include/linux/mtd/lpc32xx_mlc.h b/include/linux/mtd/lpc32xx_mlc.h
index d91b1e35631e..d168c628c0d5 100644
--- a/include/linux/mtd/lpc32xx_mlc.h
+++ b/include/linux/mtd/lpc32xx_mlc.h
@@ -1,11 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Platform data for LPC32xx SoC MLC NAND controller
*
* Copyright © 2012 Roland Stigge
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef __LINUX_MTD_LPC32XX_MLC_H
diff --git a/include/linux/mtd/lpc32xx_slc.h b/include/linux/mtd/lpc32xx_slc.h
index 1169548a1535..cf54a9f80460 100644
--- a/include/linux/mtd/lpc32xx_slc.h
+++ b/include/linux/mtd/lpc32xx_slc.h
@@ -1,11 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Platform data for LPC32xx SoC SLC NAND controller
*
* Copyright © 2012 Roland Stigge
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef __LINUX_MTD_LPC32XX_SLC_H
diff --git a/include/linux/mtd/map.h b/include/linux/mtd/map.h
index 3aa56e3104bb..b4fa92a6e44b 100644
--- a/include/linux/mtd/map.h
+++ b/include/linux/mtd/map.h
@@ -1,20 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Copyright © 2000-2010 David Woodhouse <dwmw2@infradead.org> et al.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- *
*/
/* Overhauled routines for dealing with different mmap regions of flash */
@@ -270,75 +256,67 @@ void map_destroy(struct mtd_info *mtd);
#define INVALIDATE_CACHED_RANGE(map, from, size) \
do { if (map->inval_cache) map->inval_cache(map, from, size); } while (0)
-
-static inline int map_word_equal(struct map_info *map, map_word val1, map_word val2)
-{
- int i;
-
- for (i = 0; i < map_words(map); i++) {
- if (val1.x[i] != val2.x[i])
- return 0;
- }
-
- return 1;
-}
-
-static inline map_word map_word_and(struct map_info *map, map_word val1, map_word val2)
-{
- map_word r;
- int i;
-
- for (i = 0; i < map_words(map); i++)
- r.x[i] = val1.x[i] & val2.x[i];
-
- return r;
-}
-
-static inline map_word map_word_clr(struct map_info *map, map_word val1, map_word val2)
-{
- map_word r;
- int i;
-
- for (i = 0; i < map_words(map); i++)
- r.x[i] = val1.x[i] & ~val2.x[i];
-
- return r;
-}
-
-static inline map_word map_word_or(struct map_info *map, map_word val1, map_word val2)
-{
- map_word r;
- int i;
-
- for (i = 0; i < map_words(map); i++)
- r.x[i] = val1.x[i] | val2.x[i];
-
- return r;
-}
-
-static inline int map_word_andequal(struct map_info *map, map_word val1, map_word val2, map_word val3)
-{
- int i;
-
- for (i = 0; i < map_words(map); i++) {
- if ((val1.x[i] & val2.x[i]) != val3.x[i])
- return 0;
- }
-
- return 1;
-}
-
-static inline int map_word_bitsset(struct map_info *map, map_word val1, map_word val2)
-{
- int i;
-
- for (i = 0; i < map_words(map); i++) {
- if (val1.x[i] & val2.x[i])
- return 1;
- }
-
- return 0;
-}
+#define map_word_equal(map, val1, val2) \
+({ \
+ int i, ret = 1; \
+ for (i = 0; i < map_words(map); i++) \
+ if ((val1).x[i] != (val2).x[i]) { \
+ ret = 0; \
+ break; \
+ } \
+ ret; \
+})
+
+#define map_word_and(map, val1, val2) \
+({ \
+ map_word r; \
+ int i; \
+ for (i = 0; i < map_words(map); i++) \
+ r.x[i] = (val1).x[i] & (val2).x[i]; \
+ r; \
+})
+
+#define map_word_clr(map, val1, val2) \
+({ \
+ map_word r; \
+ int i; \
+ for (i = 0; i < map_words(map); i++) \
+ r.x[i] = (val1).x[i] & ~(val2).x[i]; \
+ r; \
+})
+
+#define map_word_or(map, val1, val2) \
+({ \
+ map_word r; \
+ int i; \
+ for (i = 0; i < map_words(map); i++) \
+ r.x[i] = (val1).x[i] | (val2).x[i]; \
+ r; \
+})
+
+#define map_word_andequal(map, val1, val2, val3) \
+({ \
+ int i, ret = 1; \
+ for (i = 0; i < map_words(map); i++) { \
+ if (((val1).x[i] & (val2).x[i]) != (val3).x[i]) { \
+ ret = 0; \
+ break; \
+ } \
+ } \
+ ret; \
+})
+
+#define map_word_bitsset(map, val1, val2) \
+({ \
+ int i, ret = 0; \
+ for (i = 0; i < map_words(map); i++) { \
+ if ((val1).x[i] & (val2).x[i]) { \
+ ret = 1; \
+ break; \
+ } \
+ } \
+ ret; \
+})
static inline map_word map_word_load(struct map_info *map, const void *ptr)
{
diff --git a/include/linux/mtd/mtd.h b/include/linux/mtd/mtd.h
index f8a2ef239c60..7c58c44662b8 100644
--- a/include/linux/mtd/mtd.h
+++ b/include/linux/mtd/mtd.h
@@ -1,20 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Copyright © 1999-2010 David Woodhouse <dwmw2@infradead.org> et al.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- *
*/
#ifndef __MTD_MTD_H__
@@ -22,40 +8,29 @@
#include <linux/types.h>
#include <linux/uio.h>
+#include <linux/list.h>
#include <linux/notifier.h>
#include <linux/device.h>
#include <linux/of.h>
+#include <linux/nvmem-provider.h>
#include <mtd/mtd-abi.h>
#include <asm/div64.h>
-#define MTD_ERASE_PENDING 0x01
-#define MTD_ERASING 0x02
-#define MTD_ERASE_SUSPEND 0x04
-#define MTD_ERASE_DONE 0x08
-#define MTD_ERASE_FAILED 0x10
-
#define MTD_FAIL_ADDR_UNKNOWN -1LL
+struct mtd_info;
+
/*
* If the erase fails, fail_addr might indicate exactly which block failed. If
* fail_addr = MTD_FAIL_ADDR_UNKNOWN, the failure was not at the device level
* or was not specific to any particular block.
*/
struct erase_info {
- struct mtd_info *mtd;
uint64_t addr;
uint64_t len;
uint64_t fail_addr;
- u_long time;
- u_long retries;
- unsigned dev;
- unsigned cell;
- void (*callback) (struct erase_info *self);
- u_long priv;
- u_char state;
- struct erase_info *next;
};
struct mtd_erase_region_info {
@@ -65,6 +40,12 @@ struct mtd_erase_region_info {
unsigned long *lockmap; /* If keeping bitmap of locks */
};
+struct mtd_req_stats {
+ unsigned int uncorrectable_errors;
+ unsigned int corrected_bitflips;
+ unsigned int max_bitflips;
+};
+
/**
* struct mtd_oob_ops - oob operation operands
* @mode: operation mode
@@ -80,9 +61,11 @@ struct mtd_erase_region_info {
* @datbuf: data buffer - if NULL only oob data are read/written
* @oobbuf: oob data buffer
*
- * Note, it is allowed to read more than one OOB area at one go, but not write.
- * The interface assumes that the OOB write requests program only one page's
- * OOB area.
+ * Note, some MTD drivers do not allow you to write more than one OOB area at
+ * one go. If you try to do that on such an MTD device, -EINVAL will be
+ * returned. If you want to make your implementation portable on all kind of MTD
+ * devices you should split the write request into several sub-requests when the
+ * request crosses a page boundary.
*/
struct mtd_oob_ops {
unsigned int mode;
@@ -93,10 +76,9 @@ struct mtd_oob_ops {
uint32_t ooboffs;
uint8_t *datbuf;
uint8_t *oobbuf;
+ struct mtd_req_stats *stats;
};
-#define MTD_MAX_OOBFREE_ENTRIES_LARGE 32
-#define MTD_MAX_ECCPOS_ENTRIES_LARGE 640
/**
* struct mtd_oob_region - oob region definition
* @offset: region offset
@@ -206,6 +188,53 @@ struct mtd_pairing_scheme {
struct module; /* only needed for owner field in mtd_info */
+/**
+ * struct mtd_debug_info - debugging information for an MTD device.
+ *
+ * @dfs_dir: direntry object of the MTD device debugfs directory
+ */
+struct mtd_debug_info {
+ struct dentry *dfs_dir;
+};
+
+/**
+ * struct mtd_part - MTD partition specific fields
+ *
+ * @node: list node used to add an MTD partition to the parent partition list
+ * @offset: offset of the partition relatively to the parent offset
+ * @size: partition size. Should be equal to mtd->size unless
+ * MTD_SLC_ON_MLC_EMULATION is set
+ * @flags: original flags (before the mtdpart logic decided to tweak them based
+ * on flash constraints, like eraseblock/pagesize alignment)
+ *
+ * This struct is embedded in mtd_info and contains partition-specific
+ * properties/fields.
+ */
+struct mtd_part {
+ struct list_head node;
+ u64 offset;
+ u64 size;
+ u32 flags;
+};
+
+/**
+ * struct mtd_master - MTD master specific fields
+ *
+ * @partitions_lock: lock protecting accesses to the partition list. Protects
+ * not only the master partition list, but also all
+ * sub-partitions.
+ * @suspended: et to 1 when the device is suspended, 0 otherwise
+ *
+ * This struct is embedded in mtd_info and contains master-specific
+ * properties/fields. The master is the root MTD device from the MTD partition
+ * point of view.
+ */
+struct mtd_master {
+ struct mutex partitions_lock;
+ struct mutex chrdev_lock;
+ unsigned int suspended : 1;
+};
+
struct mtd_info {
u_char type;
uint32_t flags;
@@ -258,7 +287,7 @@ struct mtd_info {
*/
unsigned int bitflip_threshold;
- // Kernel-only stuff starts here.
+ /* Kernel-only stuff starts here. */
const char *name;
int index;
@@ -288,10 +317,6 @@ struct mtd_info {
int (*_point) (struct mtd_info *mtd, loff_t from, size_t len,
size_t *retlen, void **virt, resource_size_t *phys);
int (*_unpoint) (struct mtd_info *mtd, loff_t from, size_t len);
- unsigned long (*_get_unmapped_area) (struct mtd_info *mtd,
- unsigned long len,
- unsigned long offset,
- unsigned long flags);
int (*_read) (struct mtd_info *mtd, loff_t from, size_t len,
size_t *retlen, u_char *buf);
int (*_write) (struct mtd_info *mtd, loff_t to, size_t len,
@@ -311,9 +336,12 @@ struct mtd_info {
int (*_read_user_prot_reg) (struct mtd_info *mtd, loff_t from,
size_t len, size_t *retlen, u_char *buf);
int (*_write_user_prot_reg) (struct mtd_info *mtd, loff_t to,
- size_t len, size_t *retlen, u_char *buf);
+ size_t len, size_t *retlen,
+ const u_char *buf);
int (*_lock_user_prot_reg) (struct mtd_info *mtd, loff_t from,
size_t len);
+ int (*_erase_user_prot_reg) (struct mtd_info *mtd, loff_t from,
+ size_t len);
int (*_writev) (struct mtd_info *mtd, const struct kvec *vecs,
unsigned long count, loff_t to, size_t *retlen);
void (*_sync) (struct mtd_info *mtd);
@@ -334,6 +362,12 @@ struct mtd_info {
int (*_get_device) (struct mtd_info *mtd);
void (*_put_device) (struct mtd_info *mtd);
+ /*
+ * flag indicates a panic write, low level drivers can take appropriate
+ * action if required to ensure writes go through
+ */
+ bool oops_panic_write;
+
struct notifier_block reboot_notifier; /* default mode before reboot */
/* ECC status information */
@@ -346,8 +380,54 @@ struct mtd_info {
struct module *owner;
struct device dev;
int usecount;
+ struct mtd_debug_info dbg;
+ struct nvmem_device *nvmem;
+ struct nvmem_device *otp_user_nvmem;
+ struct nvmem_device *otp_factory_nvmem;
+
+ /*
+ * Parent device from the MTD partition point of view.
+ *
+ * MTD masters do not have any parent, MTD partitions do. The parent
+ * MTD device can itself be a partition.
+ */
+ struct mtd_info *parent;
+
+ /* List of partitions attached to this MTD device */
+ struct list_head partitions;
+
+ struct mtd_part part;
+ struct mtd_master master;
};
+static inline struct mtd_info *mtd_get_master(struct mtd_info *mtd)
+{
+ while (mtd->parent)
+ mtd = mtd->parent;
+
+ return mtd;
+}
+
+static inline u64 mtd_get_master_ofs(struct mtd_info *mtd, u64 ofs)
+{
+ while (mtd->parent) {
+ ofs += mtd->part.offset;
+ mtd = mtd->parent;
+ }
+
+ return ofs;
+}
+
+static inline bool mtd_is_partition(const struct mtd_info *mtd)
+{
+ return mtd->parent;
+}
+
+static inline bool mtd_has_partitions(const struct mtd_info *mtd)
+{
+ return !list_empty(&mtd->partitions);
+}
+
int mtd_ooblayout_ecc(struct mtd_info *mtd, int section,
struct mtd_oob_region *oobecc);
int mtd_ooblayout_find_eccregion(struct mtd_info *mtd, int eccbyte,
@@ -391,7 +471,7 @@ static inline struct device_node *mtd_get_of_node(struct mtd_info *mtd)
return dev_of_node(&mtd->dev);
}
-static inline int mtd_oobavail(struct mtd_info *mtd, struct mtd_oob_ops *ops)
+static inline u32 mtd_oobavail(struct mtd_info *mtd, struct mtd_oob_ops *ops)
{
return ops->mode == MTD_OPS_AUTO_OOB ? mtd->oobavail : mtd->oobsize;
}
@@ -399,13 +479,16 @@ static inline int mtd_oobavail(struct mtd_info *mtd, struct mtd_oob_ops *ops)
static inline int mtd_max_bad_blocks(struct mtd_info *mtd,
loff_t ofs, size_t len)
{
- if (!mtd->_max_bad_blocks)
+ struct mtd_info *master = mtd_get_master(mtd);
+
+ if (!master->_max_bad_blocks)
return -ENOTSUPP;
if (mtd->size < (len + ofs) || ofs < 0)
return -EINVAL;
- return mtd->_max_bad_blocks(mtd, ofs, len);
+ return master->_max_bad_blocks(master, mtd_get_master_ofs(mtd, ofs),
+ len);
}
int mtd_wunit_to_pairing_info(struct mtd_info *mtd, int wunit,
@@ -438,16 +521,19 @@ int mtd_get_user_prot_info(struct mtd_info *mtd, size_t len, size_t *retlen,
int mtd_read_user_prot_reg(struct mtd_info *mtd, loff_t from, size_t len,
size_t *retlen, u_char *buf);
int mtd_write_user_prot_reg(struct mtd_info *mtd, loff_t to, size_t len,
- size_t *retlen, u_char *buf);
+ size_t *retlen, const u_char *buf);
int mtd_lock_user_prot_reg(struct mtd_info *mtd, loff_t from, size_t len);
+int mtd_erase_user_prot_reg(struct mtd_info *mtd, loff_t from, size_t len);
int mtd_writev(struct mtd_info *mtd, const struct kvec *vecs,
unsigned long count, loff_t to, size_t *retlen);
static inline void mtd_sync(struct mtd_info *mtd)
{
- if (mtd->_sync)
- mtd->_sync(mtd);
+ struct mtd_info *master = mtd_get_master(mtd);
+
+ if (master->_sync)
+ master->_sync(master);
}
int mtd_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
@@ -459,13 +545,31 @@ int mtd_block_markbad(struct mtd_info *mtd, loff_t ofs);
static inline int mtd_suspend(struct mtd_info *mtd)
{
- return mtd->_suspend ? mtd->_suspend(mtd) : 0;
+ struct mtd_info *master = mtd_get_master(mtd);
+ int ret;
+
+ if (master->master.suspended)
+ return 0;
+
+ ret = master->_suspend ? master->_suspend(master) : 0;
+ if (ret)
+ return ret;
+
+ master->master.suspended = 1;
+ return 0;
}
static inline void mtd_resume(struct mtd_info *mtd)
{
- if (mtd->_resume)
- mtd->_resume(mtd);
+ struct mtd_info *master = mtd_get_master(mtd);
+
+ if (!master->master.suspended)
+ return;
+
+ if (master->_resume)
+ master->_resume(master);
+
+ master->master.suspended = 0;
}
static inline uint32_t mtd_div_by_eb(uint64_t sz, struct mtd_info *mtd)
@@ -483,6 +587,34 @@ static inline uint32_t mtd_mod_by_eb(uint64_t sz, struct mtd_info *mtd)
return do_div(sz, mtd->erasesize);
}
+/**
+ * mtd_align_erase_req - Adjust an erase request to align things on eraseblock
+ * boundaries.
+ * @mtd: the MTD device this erase request applies on
+ * @req: the erase request to adjust
+ *
+ * This function will adjust @req->addr and @req->len to align them on
+ * @mtd->erasesize. Of course we expect @mtd->erasesize to be != 0.
+ */
+static inline void mtd_align_erase_req(struct mtd_info *mtd,
+ struct erase_info *req)
+{
+ u32 mod;
+
+ if (WARN_ON(!mtd->erasesize))
+ return;
+
+ mod = mtd_mod_by_eb(req->addr, mtd);
+ if (mod) {
+ req->addr -= mod;
+ req->len += mod;
+ }
+
+ mod = mtd_mod_by_eb(req->addr + req->len, mtd);
+ if (mod)
+ req->len += mtd->erasesize - mod;
+}
+
static inline uint32_t mtd_div_by_ws(uint64_t sz, struct mtd_info *mtd)
{
if (mtd->writesize_shift)
@@ -500,7 +632,9 @@ static inline uint32_t mtd_mod_by_ws(uint64_t sz, struct mtd_info *mtd)
static inline int mtd_wunit_per_eb(struct mtd_info *mtd)
{
- return mtd->erasesize / mtd->writesize;
+ struct mtd_info *master = mtd_get_master(mtd);
+
+ return master->erasesize / mtd->writesize;
}
static inline int mtd_offset_to_wunit(struct mtd_info *mtd, loff_t offs)
@@ -517,7 +651,9 @@ static inline loff_t mtd_wunit_to_offset(struct mtd_info *mtd, loff_t base,
static inline int mtd_has_oob(const struct mtd_info *mtd)
{
- return mtd->_read_oob && mtd->_write_oob;
+ struct mtd_info *master = mtd_get_master((struct mtd_info *)mtd);
+
+ return master->_read_oob && master->_write_oob;
}
static inline int mtd_type_is_nand(const struct mtd_info *mtd)
@@ -527,7 +663,9 @@ static inline int mtd_type_is_nand(const struct mtd_info *mtd)
static inline int mtd_can_have_bb(const struct mtd_info *mtd)
{
- return !!mtd->_block_isbad;
+ struct mtd_info *master = mtd_get_master((struct mtd_info *)mtd);
+
+ return !!master->_block_isbad;
}
/* Kernel-side ioctl definitions */
@@ -546,6 +684,7 @@ extern int mtd_device_unregister(struct mtd_info *master);
extern struct mtd_info *get_mtd_device(struct mtd_info *mtd, int num);
extern int __get_mtd_device(struct mtd_info *mtd);
extern void __put_mtd_device(struct mtd_info *mtd);
+extern struct mtd_info *of_get_mtd_device_by_node(struct device_node *np);
extern struct mtd_info *get_mtd_device_nm(const char *name);
extern void put_mtd_device(struct mtd_info *mtd);
@@ -561,8 +700,6 @@ extern void register_mtd_user (struct mtd_notifier *new);
extern int unregister_mtd_user (struct mtd_notifier *old);
void *mtd_kmalloc_up_to(const struct mtd_info *mtd, size_t *size);
-void mtd_erase_callback(struct erase_info *instr);
-
static inline int mtd_is_bitflip(int err) {
return err == -EUCLEAN;
}
@@ -577,4 +714,11 @@ static inline int mtd_is_bitflip_or_eccerr(int err) {
unsigned mtd_mmap_capabilities(struct mtd_info *mtd);
+#ifdef CONFIG_DEBUG_FS
+bool mtd_check_expert_analysis_mode(void);
+#else
+static inline bool mtd_check_expert_analysis_mode(void) { return false; }
+#endif
+
+
#endif /* __MTD_MTD_H__ */
diff --git a/include/linux/mtd/mtdram.h b/include/linux/mtd/mtdram.h
index 628a6a21ddf0..ee8f95643f9b 100644
--- a/include/linux/mtd/mtdram.h
+++ b/include/linux/mtd/mtdram.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __MTD_MTDRAM_H__
#define __MTD_MTDRAM_H__
diff --git a/include/linux/mtd/nand-ecc-mtk.h b/include/linux/mtd/nand-ecc-mtk.h
new file mode 100644
index 000000000000..0e48c36e6ca0
--- /dev/null
+++ b/include/linux/mtd/nand-ecc-mtk.h
@@ -0,0 +1,47 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+/*
+ * MTK SDG1 ECC controller
+ *
+ * Copyright (c) 2016 Mediatek
+ * Authors: Xiaolei Li <xiaolei.li@mediatek.com>
+ * Jorge Ramirez-Ortiz <jorge.ramirez-ortiz@linaro.org>
+ */
+
+#ifndef __DRIVERS_MTD_NAND_MTK_ECC_H__
+#define __DRIVERS_MTD_NAND_MTK_ECC_H__
+
+#include <linux/types.h>
+
+enum mtk_ecc_mode {ECC_DMA_MODE = 0, ECC_NFI_MODE = 1};
+enum mtk_ecc_operation {ECC_ENCODE, ECC_DECODE};
+
+struct device_node;
+struct mtk_ecc;
+
+struct mtk_ecc_stats {
+ u32 corrected;
+ u32 bitflips;
+ u32 failed;
+};
+
+struct mtk_ecc_config {
+ enum mtk_ecc_operation op;
+ enum mtk_ecc_mode mode;
+ dma_addr_t addr;
+ u32 strength;
+ u32 sectors;
+ u32 len;
+};
+
+int mtk_ecc_encode(struct mtk_ecc *, struct mtk_ecc_config *, u8 *, u32);
+void mtk_ecc_get_stats(struct mtk_ecc *, struct mtk_ecc_stats *, int);
+int mtk_ecc_wait_done(struct mtk_ecc *, enum mtk_ecc_operation);
+int mtk_ecc_enable(struct mtk_ecc *, struct mtk_ecc_config *);
+void mtk_ecc_disable(struct mtk_ecc *);
+void mtk_ecc_adjust_strength(struct mtk_ecc *ecc, u32 *p);
+unsigned int mtk_ecc_get_parity_bits(struct mtk_ecc *ecc);
+
+struct mtk_ecc *of_mtk_ecc_get(struct device_node *);
+void mtk_ecc_release(struct mtk_ecc *);
+
+#endif
diff --git a/include/linux/mtd/nand-ecc-mxic.h b/include/linux/mtd/nand-ecc-mxic.h
new file mode 100644
index 000000000000..b125926e458c
--- /dev/null
+++ b/include/linux/mtd/nand-ecc-mxic.h
@@ -0,0 +1,49 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright © 2019 Macronix
+ * Author: Miquèl Raynal <miquel.raynal@bootlin.com>
+ *
+ * Header for the Macronix external ECC engine.
+ */
+
+#ifndef __MTD_NAND_ECC_MXIC_H__
+#define __MTD_NAND_ECC_MXIC_H__
+
+#include <linux/platform_device.h>
+#include <linux/device.h>
+
+struct mxic_ecc_engine;
+
+#if IS_ENABLED(CONFIG_MTD_NAND_ECC_MXIC) && IS_REACHABLE(CONFIG_MTD_NAND_CORE)
+
+struct nand_ecc_engine_ops *mxic_ecc_get_pipelined_ops(void);
+struct nand_ecc_engine *mxic_ecc_get_pipelined_engine(struct platform_device *spi_pdev);
+void mxic_ecc_put_pipelined_engine(struct nand_ecc_engine *eng);
+int mxic_ecc_process_data_pipelined(struct nand_ecc_engine *eng,
+ unsigned int direction, dma_addr_t dirmap);
+
+#else /* !CONFIG_MTD_NAND_ECC_MXIC */
+
+static inline struct nand_ecc_engine_ops *mxic_ecc_get_pipelined_ops(void)
+{
+ return NULL;
+}
+
+static inline struct nand_ecc_engine *
+mxic_ecc_get_pipelined_engine(struct platform_device *spi_pdev)
+{
+ return ERR_PTR(-EOPNOTSUPP);
+}
+
+static inline void mxic_ecc_put_pipelined_engine(struct nand_ecc_engine *eng) {}
+
+static inline int mxic_ecc_process_data_pipelined(struct nand_ecc_engine *eng,
+ unsigned int direction,
+ dma_addr_t dirmap)
+{
+ return -EOPNOTSUPP;
+}
+
+#endif /* CONFIG_MTD_NAND_ECC_MXIC */
+
+#endif /* __MTD_NAND_ECC_MXIC_H__ */
diff --git a/include/linux/mtd/nand-ecc-sw-bch.h b/include/linux/mtd/nand-ecc-sw-bch.h
new file mode 100644
index 000000000000..9da9969505a8
--- /dev/null
+++ b/include/linux/mtd/nand-ecc-sw-bch.h
@@ -0,0 +1,71 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright © 2011 Ivan Djelic <ivan.djelic@parrot.com>
+ *
+ * This file is the header for the NAND BCH ECC implementation.
+ */
+
+#ifndef __MTD_NAND_ECC_SW_BCH_H__
+#define __MTD_NAND_ECC_SW_BCH_H__
+
+#include <linux/mtd/nand.h>
+#include <linux/bch.h>
+
+/**
+ * struct nand_ecc_sw_bch_conf - private software BCH ECC engine structure
+ * @req_ctx: Save request context and tweak the original request to fit the
+ * engine needs
+ * @code_size: Number of bytes needed to store a code (one code per step)
+ * @calc_buf: Buffer to use when calculating ECC bytes
+ * @code_buf: Buffer to use when reading (raw) ECC bytes from the chip
+ * @bch: BCH control structure
+ * @errloc: error location array
+ * @eccmask: XOR ecc mask, allows erased pages to be decoded as valid
+ */
+struct nand_ecc_sw_bch_conf {
+ struct nand_ecc_req_tweak_ctx req_ctx;
+ unsigned int code_size;
+ u8 *calc_buf;
+ u8 *code_buf;
+ struct bch_control *bch;
+ unsigned int *errloc;
+ unsigned char *eccmask;
+};
+
+#if IS_ENABLED(CONFIG_MTD_NAND_ECC_SW_BCH)
+
+int nand_ecc_sw_bch_calculate(struct nand_device *nand,
+ const unsigned char *buf, unsigned char *code);
+int nand_ecc_sw_bch_correct(struct nand_device *nand, unsigned char *buf,
+ unsigned char *read_ecc, unsigned char *calc_ecc);
+int nand_ecc_sw_bch_init_ctx(struct nand_device *nand);
+void nand_ecc_sw_bch_cleanup_ctx(struct nand_device *nand);
+struct nand_ecc_engine *nand_ecc_sw_bch_get_engine(void);
+
+#else /* !CONFIG_MTD_NAND_ECC_SW_BCH */
+
+static inline int nand_ecc_sw_bch_calculate(struct nand_device *nand,
+ const unsigned char *buf,
+ unsigned char *code)
+{
+ return -ENOTSUPP;
+}
+
+static inline int nand_ecc_sw_bch_correct(struct nand_device *nand,
+ unsigned char *buf,
+ unsigned char *read_ecc,
+ unsigned char *calc_ecc)
+{
+ return -ENOTSUPP;
+}
+
+static inline int nand_ecc_sw_bch_init_ctx(struct nand_device *nand)
+{
+ return -ENOTSUPP;
+}
+
+static inline void nand_ecc_sw_bch_cleanup_ctx(struct nand_device *nand) {}
+
+#endif /* CONFIG_MTD_NAND_ECC_SW_BCH */
+
+#endif /* __MTD_NAND_ECC_SW_BCH_H__ */
diff --git a/include/linux/mtd/nand-ecc-sw-hamming.h b/include/linux/mtd/nand-ecc-sw-hamming.h
new file mode 100644
index 000000000000..c6c71894c575
--- /dev/null
+++ b/include/linux/mtd/nand-ecc-sw-hamming.h
@@ -0,0 +1,89 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2000-2010 Steven J. Hill <sjhill@realitydiluted.com>
+ * David Woodhouse <dwmw2@infradead.org>
+ * Thomas Gleixner <tglx@linutronix.de>
+ *
+ * This file is the header for the NAND Hamming ECC implementation.
+ */
+
+#ifndef __MTD_NAND_ECC_SW_HAMMING_H__
+#define __MTD_NAND_ECC_SW_HAMMING_H__
+
+#include <linux/mtd/nand.h>
+
+/**
+ * struct nand_ecc_sw_hamming_conf - private software Hamming ECC engine structure
+ * @req_ctx: Save request context and tweak the original request to fit the
+ * engine needs
+ * @code_size: Number of bytes needed to store a code (one code per step)
+ * @calc_buf: Buffer to use when calculating ECC bytes
+ * @code_buf: Buffer to use when reading (raw) ECC bytes from the chip
+ * @sm_order: Smart Media special ordering
+ */
+struct nand_ecc_sw_hamming_conf {
+ struct nand_ecc_req_tweak_ctx req_ctx;
+ unsigned int code_size;
+ u8 *calc_buf;
+ u8 *code_buf;
+ unsigned int sm_order;
+};
+
+#if IS_ENABLED(CONFIG_MTD_NAND_ECC_SW_HAMMING)
+
+int nand_ecc_sw_hamming_init_ctx(struct nand_device *nand);
+void nand_ecc_sw_hamming_cleanup_ctx(struct nand_device *nand);
+int ecc_sw_hamming_calculate(const unsigned char *buf, unsigned int step_size,
+ unsigned char *code, bool sm_order);
+int nand_ecc_sw_hamming_calculate(struct nand_device *nand,
+ const unsigned char *buf,
+ unsigned char *code);
+int ecc_sw_hamming_correct(unsigned char *buf, unsigned char *read_ecc,
+ unsigned char *calc_ecc, unsigned int step_size,
+ bool sm_order);
+int nand_ecc_sw_hamming_correct(struct nand_device *nand, unsigned char *buf,
+ unsigned char *read_ecc,
+ unsigned char *calc_ecc);
+
+#else /* !CONFIG_MTD_NAND_ECC_SW_HAMMING */
+
+static inline int nand_ecc_sw_hamming_init_ctx(struct nand_device *nand)
+{
+ return -ENOTSUPP;
+}
+
+static inline void nand_ecc_sw_hamming_cleanup_ctx(struct nand_device *nand) {}
+
+static inline int ecc_sw_hamming_calculate(const unsigned char *buf,
+ unsigned int step_size,
+ unsigned char *code, bool sm_order)
+{
+ return -ENOTSUPP;
+}
+
+static inline int nand_ecc_sw_hamming_calculate(struct nand_device *nand,
+ const unsigned char *buf,
+ unsigned char *code)
+{
+ return -ENOTSUPP;
+}
+
+static inline int ecc_sw_hamming_correct(unsigned char *buf,
+ unsigned char *read_ecc,
+ unsigned char *calc_ecc,
+ unsigned int step_size, bool sm_order)
+{
+ return -ENOTSUPP;
+}
+
+static inline int nand_ecc_sw_hamming_correct(struct nand_device *nand,
+ unsigned char *buf,
+ unsigned char *read_ecc,
+ unsigned char *calc_ecc)
+{
+ return -ENOTSUPP;
+}
+
+#endif /* CONFIG_MTD_NAND_ECC_SW_HAMMING */
+
+#endif /* __MTD_NAND_ECC_SW_HAMMING_H__ */
diff --git a/include/linux/mtd/nand-gpio.h b/include/linux/mtd/nand-gpio.h
index be4f45d89be2..7ab51bc4a380 100644
--- a/include/linux/mtd/nand-gpio.h
+++ b/include/linux/mtd/nand-gpio.h
@@ -1,14 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __LINUX_MTD_NAND_GPIO_H
#define __LINUX_MTD_NAND_GPIO_H
#include <linux/mtd/rawnand.h>
struct gpio_nand_platdata {
- int gpio_nce;
- int gpio_nwp;
- int gpio_cle;
- int gpio_ale;
- int gpio_rdy;
void (*adjust_parts)(struct gpio_nand_platdata *, size_t);
struct mtd_partition *parts;
unsigned int num_parts;
diff --git a/include/linux/mtd/nand.h b/include/linux/mtd/nand.h
new file mode 100644
index 000000000000..b2996dc987ff
--- /dev/null
+++ b/include/linux/mtd/nand.h
@@ -0,0 +1,1065 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright 2017 - Free Electrons
+ *
+ * Authors:
+ * Boris Brezillon <boris.brezillon@free-electrons.com>
+ * Peter Pan <peterpandong@micron.com>
+ */
+
+#ifndef __LINUX_MTD_NAND_H
+#define __LINUX_MTD_NAND_H
+
+#include <linux/mtd/mtd.h>
+
+struct nand_device;
+
+/**
+ * struct nand_memory_organization - Memory organization structure
+ * @bits_per_cell: number of bits per NAND cell
+ * @pagesize: page size
+ * @oobsize: OOB area size
+ * @pages_per_eraseblock: number of pages per eraseblock
+ * @eraseblocks_per_lun: number of eraseblocks per LUN (Logical Unit Number)
+ * @max_bad_eraseblocks_per_lun: maximum number of eraseblocks per LUN
+ * @planes_per_lun: number of planes per LUN
+ * @luns_per_target: number of LUN per target (target is a synonym for die)
+ * @ntargets: total number of targets exposed by the NAND device
+ */
+struct nand_memory_organization {
+ unsigned int bits_per_cell;
+ unsigned int pagesize;
+ unsigned int oobsize;
+ unsigned int pages_per_eraseblock;
+ unsigned int eraseblocks_per_lun;
+ unsigned int max_bad_eraseblocks_per_lun;
+ unsigned int planes_per_lun;
+ unsigned int luns_per_target;
+ unsigned int ntargets;
+};
+
+#define NAND_MEMORG(bpc, ps, os, ppe, epl, mbb, ppl, lpt, nt) \
+ { \
+ .bits_per_cell = (bpc), \
+ .pagesize = (ps), \
+ .oobsize = (os), \
+ .pages_per_eraseblock = (ppe), \
+ .eraseblocks_per_lun = (epl), \
+ .max_bad_eraseblocks_per_lun = (mbb), \
+ .planes_per_lun = (ppl), \
+ .luns_per_target = (lpt), \
+ .ntargets = (nt), \
+ }
+
+/**
+ * struct nand_row_converter - Information needed to convert an absolute offset
+ * into a row address
+ * @lun_addr_shift: position of the LUN identifier in the row address
+ * @eraseblock_addr_shift: position of the eraseblock identifier in the row
+ * address
+ */
+struct nand_row_converter {
+ unsigned int lun_addr_shift;
+ unsigned int eraseblock_addr_shift;
+};
+
+/**
+ * struct nand_pos - NAND position object
+ * @target: the NAND target/die
+ * @lun: the LUN identifier
+ * @plane: the plane within the LUN
+ * @eraseblock: the eraseblock within the LUN
+ * @page: the page within the LUN
+ *
+ * These information are usually used by specific sub-layers to select the
+ * appropriate target/die and generate a row address to pass to the device.
+ */
+struct nand_pos {
+ unsigned int target;
+ unsigned int lun;
+ unsigned int plane;
+ unsigned int eraseblock;
+ unsigned int page;
+};
+
+/**
+ * enum nand_page_io_req_type - Direction of an I/O request
+ * @NAND_PAGE_READ: from the chip, to the controller
+ * @NAND_PAGE_WRITE: from the controller, to the chip
+ */
+enum nand_page_io_req_type {
+ NAND_PAGE_READ = 0,
+ NAND_PAGE_WRITE,
+};
+
+/**
+ * struct nand_page_io_req - NAND I/O request object
+ * @type: the type of page I/O: read or write
+ * @pos: the position this I/O request is targeting
+ * @dataoffs: the offset within the page
+ * @datalen: number of data bytes to read from/write to this page
+ * @databuf: buffer to store data in or get data from
+ * @ooboffs: the OOB offset within the page
+ * @ooblen: the number of OOB bytes to read from/write to this page
+ * @oobbuf: buffer to store OOB data in or get OOB data from
+ * @mode: one of the %MTD_OPS_XXX mode
+ *
+ * This object is used to pass per-page I/O requests to NAND sub-layers. This
+ * way all useful information are already formatted in a useful way and
+ * specific NAND layers can focus on translating these information into
+ * specific commands/operations.
+ */
+struct nand_page_io_req {
+ enum nand_page_io_req_type type;
+ struct nand_pos pos;
+ unsigned int dataoffs;
+ unsigned int datalen;
+ union {
+ const void *out;
+ void *in;
+ } databuf;
+ unsigned int ooboffs;
+ unsigned int ooblen;
+ union {
+ const void *out;
+ void *in;
+ } oobbuf;
+ int mode;
+};
+
+const struct mtd_ooblayout_ops *nand_get_small_page_ooblayout(void);
+const struct mtd_ooblayout_ops *nand_get_large_page_ooblayout(void);
+const struct mtd_ooblayout_ops *nand_get_large_page_hamming_ooblayout(void);
+
+/**
+ * enum nand_ecc_engine_type - NAND ECC engine type
+ * @NAND_ECC_ENGINE_TYPE_INVALID: Invalid value
+ * @NAND_ECC_ENGINE_TYPE_NONE: No ECC correction
+ * @NAND_ECC_ENGINE_TYPE_SOFT: Software ECC correction
+ * @NAND_ECC_ENGINE_TYPE_ON_HOST: On host hardware ECC correction
+ * @NAND_ECC_ENGINE_TYPE_ON_DIE: On chip hardware ECC correction
+ */
+enum nand_ecc_engine_type {
+ NAND_ECC_ENGINE_TYPE_INVALID,
+ NAND_ECC_ENGINE_TYPE_NONE,
+ NAND_ECC_ENGINE_TYPE_SOFT,
+ NAND_ECC_ENGINE_TYPE_ON_HOST,
+ NAND_ECC_ENGINE_TYPE_ON_DIE,
+};
+
+/**
+ * enum nand_ecc_placement - NAND ECC bytes placement
+ * @NAND_ECC_PLACEMENT_UNKNOWN: The actual position of the ECC bytes is unknown
+ * @NAND_ECC_PLACEMENT_OOB: The ECC bytes are located in the OOB area
+ * @NAND_ECC_PLACEMENT_INTERLEAVED: Syndrome layout, there are ECC bytes
+ * interleaved with regular data in the main
+ * area
+ */
+enum nand_ecc_placement {
+ NAND_ECC_PLACEMENT_UNKNOWN,
+ NAND_ECC_PLACEMENT_OOB,
+ NAND_ECC_PLACEMENT_INTERLEAVED,
+};
+
+/**
+ * enum nand_ecc_algo - NAND ECC algorithm
+ * @NAND_ECC_ALGO_UNKNOWN: Unknown algorithm
+ * @NAND_ECC_ALGO_HAMMING: Hamming algorithm
+ * @NAND_ECC_ALGO_BCH: Bose-Chaudhuri-Hocquenghem algorithm
+ * @NAND_ECC_ALGO_RS: Reed-Solomon algorithm
+ */
+enum nand_ecc_algo {
+ NAND_ECC_ALGO_UNKNOWN,
+ NAND_ECC_ALGO_HAMMING,
+ NAND_ECC_ALGO_BCH,
+ NAND_ECC_ALGO_RS,
+};
+
+/**
+ * struct nand_ecc_props - NAND ECC properties
+ * @engine_type: ECC engine type
+ * @placement: OOB placement (if relevant)
+ * @algo: ECC algorithm (if relevant)
+ * @strength: ECC strength
+ * @step_size: Number of bytes per step
+ * @flags: Misc properties
+ */
+struct nand_ecc_props {
+ enum nand_ecc_engine_type engine_type;
+ enum nand_ecc_placement placement;
+ enum nand_ecc_algo algo;
+ unsigned int strength;
+ unsigned int step_size;
+ unsigned int flags;
+};
+
+#define NAND_ECCREQ(str, stp) { .strength = (str), .step_size = (stp) }
+
+/* NAND ECC misc flags */
+#define NAND_ECC_MAXIMIZE_STRENGTH BIT(0)
+
+/**
+ * struct nand_bbt - bad block table object
+ * @cache: in memory BBT cache
+ */
+struct nand_bbt {
+ unsigned long *cache;
+};
+
+/**
+ * struct nand_ops - NAND operations
+ * @erase: erase a specific block. No need to check if the block is bad before
+ * erasing, this has been taken care of by the generic NAND layer
+ * @markbad: mark a specific block bad. No need to check if the block is
+ * already marked bad, this has been taken care of by the generic
+ * NAND layer. This method should just write the BBM (Bad Block
+ * Marker) so that future call to struct_nand_ops->isbad() return
+ * true
+ * @isbad: check whether a block is bad or not. This method should just read
+ * the BBM and return whether the block is bad or not based on what it
+ * reads
+ *
+ * These are all low level operations that should be implemented by specialized
+ * NAND layers (SPI NAND, raw NAND, ...).
+ */
+struct nand_ops {
+ int (*erase)(struct nand_device *nand, const struct nand_pos *pos);
+ int (*markbad)(struct nand_device *nand, const struct nand_pos *pos);
+ bool (*isbad)(struct nand_device *nand, const struct nand_pos *pos);
+};
+
+/**
+ * struct nand_ecc_context - Context for the ECC engine
+ * @conf: basic ECC engine parameters
+ * @nsteps: number of ECC steps
+ * @total: total number of bytes used for storing ECC codes, this is used by
+ * generic OOB layouts
+ * @priv: ECC engine driver private data
+ */
+struct nand_ecc_context {
+ struct nand_ecc_props conf;
+ unsigned int nsteps;
+ unsigned int total;
+ void *priv;
+};
+
+/**
+ * struct nand_ecc_engine_ops - ECC engine operations
+ * @init_ctx: given a desired user configuration for the pointed NAND device,
+ * requests the ECC engine driver to setup a configuration with
+ * values it supports.
+ * @cleanup_ctx: clean the context initialized by @init_ctx.
+ * @prepare_io_req: is called before reading/writing a page to prepare the I/O
+ * request to be performed with ECC correction.
+ * @finish_io_req: is called after reading/writing a page to terminate the I/O
+ * request and ensure proper ECC correction.
+ */
+struct nand_ecc_engine_ops {
+ int (*init_ctx)(struct nand_device *nand);
+ void (*cleanup_ctx)(struct nand_device *nand);
+ int (*prepare_io_req)(struct nand_device *nand,
+ struct nand_page_io_req *req);
+ int (*finish_io_req)(struct nand_device *nand,
+ struct nand_page_io_req *req);
+};
+
+/**
+ * enum nand_ecc_engine_integration - How the NAND ECC engine is integrated
+ * @NAND_ECC_ENGINE_INTEGRATION_INVALID: Invalid value
+ * @NAND_ECC_ENGINE_INTEGRATION_PIPELINED: Pipelined engine, performs on-the-fly
+ * correction, does not need to copy
+ * data around
+ * @NAND_ECC_ENGINE_INTEGRATION_EXTERNAL: External engine, needs to bring the
+ * data into its own area before use
+ */
+enum nand_ecc_engine_integration {
+ NAND_ECC_ENGINE_INTEGRATION_INVALID,
+ NAND_ECC_ENGINE_INTEGRATION_PIPELINED,
+ NAND_ECC_ENGINE_INTEGRATION_EXTERNAL,
+};
+
+/**
+ * struct nand_ecc_engine - ECC engine abstraction for NAND devices
+ * @dev: Host device
+ * @node: Private field for registration time
+ * @ops: ECC engine operations
+ * @integration: How the engine is integrated with the host
+ * (only relevant on %NAND_ECC_ENGINE_TYPE_ON_HOST engines)
+ * @priv: Private data
+ */
+struct nand_ecc_engine {
+ struct device *dev;
+ struct list_head node;
+ struct nand_ecc_engine_ops *ops;
+ enum nand_ecc_engine_integration integration;
+ void *priv;
+};
+
+void of_get_nand_ecc_user_config(struct nand_device *nand);
+int nand_ecc_init_ctx(struct nand_device *nand);
+void nand_ecc_cleanup_ctx(struct nand_device *nand);
+int nand_ecc_prepare_io_req(struct nand_device *nand,
+ struct nand_page_io_req *req);
+int nand_ecc_finish_io_req(struct nand_device *nand,
+ struct nand_page_io_req *req);
+bool nand_ecc_is_strong_enough(struct nand_device *nand);
+
+#if IS_REACHABLE(CONFIG_MTD_NAND_CORE)
+int nand_ecc_register_on_host_hw_engine(struct nand_ecc_engine *engine);
+int nand_ecc_unregister_on_host_hw_engine(struct nand_ecc_engine *engine);
+#else
+static inline int
+nand_ecc_register_on_host_hw_engine(struct nand_ecc_engine *engine)
+{
+ return -ENOTSUPP;
+}
+static inline int
+nand_ecc_unregister_on_host_hw_engine(struct nand_ecc_engine *engine)
+{
+ return -ENOTSUPP;
+}
+#endif
+
+struct nand_ecc_engine *nand_ecc_get_sw_engine(struct nand_device *nand);
+struct nand_ecc_engine *nand_ecc_get_on_die_hw_engine(struct nand_device *nand);
+struct nand_ecc_engine *nand_ecc_get_on_host_hw_engine(struct nand_device *nand);
+void nand_ecc_put_on_host_hw_engine(struct nand_device *nand);
+struct device *nand_ecc_get_engine_dev(struct device *host);
+
+#if IS_ENABLED(CONFIG_MTD_NAND_ECC_SW_HAMMING)
+struct nand_ecc_engine *nand_ecc_sw_hamming_get_engine(void);
+#else
+static inline struct nand_ecc_engine *nand_ecc_sw_hamming_get_engine(void)
+{
+ return NULL;
+}
+#endif /* CONFIG_MTD_NAND_ECC_SW_HAMMING */
+
+#if IS_ENABLED(CONFIG_MTD_NAND_ECC_SW_BCH)
+struct nand_ecc_engine *nand_ecc_sw_bch_get_engine(void);
+#else
+static inline struct nand_ecc_engine *nand_ecc_sw_bch_get_engine(void)
+{
+ return NULL;
+}
+#endif /* CONFIG_MTD_NAND_ECC_SW_BCH */
+
+/**
+ * struct nand_ecc_req_tweak_ctx - Help for automatically tweaking requests
+ * @orig_req: Pointer to the original IO request
+ * @nand: Related NAND device, to have access to its memory organization
+ * @page_buffer_size: Real size of the page buffer to use (can be set by the
+ * user before the tweaking mechanism initialization)
+ * @oob_buffer_size: Real size of the OOB buffer to use (can be set by the
+ * user before the tweaking mechanism initialization)
+ * @spare_databuf: Data bounce buffer
+ * @spare_oobbuf: OOB bounce buffer
+ * @bounce_data: Flag indicating a data bounce buffer is used
+ * @bounce_oob: Flag indicating an OOB bounce buffer is used
+ */
+struct nand_ecc_req_tweak_ctx {
+ struct nand_page_io_req orig_req;
+ struct nand_device *nand;
+ unsigned int page_buffer_size;
+ unsigned int oob_buffer_size;
+ void *spare_databuf;
+ void *spare_oobbuf;
+ bool bounce_data;
+ bool bounce_oob;
+};
+
+int nand_ecc_init_req_tweaking(struct nand_ecc_req_tweak_ctx *ctx,
+ struct nand_device *nand);
+void nand_ecc_cleanup_req_tweaking(struct nand_ecc_req_tweak_ctx *ctx);
+void nand_ecc_tweak_req(struct nand_ecc_req_tweak_ctx *ctx,
+ struct nand_page_io_req *req);
+void nand_ecc_restore_req(struct nand_ecc_req_tweak_ctx *ctx,
+ struct nand_page_io_req *req);
+
+/**
+ * struct nand_ecc - Information relative to the ECC
+ * @defaults: Default values, depend on the underlying subsystem
+ * @requirements: ECC requirements from the NAND chip perspective
+ * @user_conf: User desires in terms of ECC parameters
+ * @ctx: ECC context for the ECC engine, derived from the device @requirements
+ * the @user_conf and the @defaults
+ * @ondie_engine: On-die ECC engine reference, if any
+ * @engine: ECC engine actually bound
+ */
+struct nand_ecc {
+ struct nand_ecc_props defaults;
+ struct nand_ecc_props requirements;
+ struct nand_ecc_props user_conf;
+ struct nand_ecc_context ctx;
+ struct nand_ecc_engine *ondie_engine;
+ struct nand_ecc_engine *engine;
+};
+
+/**
+ * struct nand_device - NAND device
+ * @mtd: MTD instance attached to the NAND device
+ * @memorg: memory layout
+ * @ecc: NAND ECC object attached to the NAND device
+ * @rowconv: position to row address converter
+ * @bbt: bad block table info
+ * @ops: NAND operations attached to the NAND device
+ *
+ * Generic NAND object. Specialized NAND layers (raw NAND, SPI NAND, OneNAND)
+ * should declare their own NAND object embedding a nand_device struct (that's
+ * how inheritance is done).
+ * struct_nand_device->memorg and struct_nand_device->ecc.requirements should
+ * be filled at device detection time to reflect the NAND device
+ * capabilities/requirements. Once this is done nanddev_init() can be called.
+ * It will take care of converting NAND information into MTD ones, which means
+ * the specialized NAND layers should never manually tweak
+ * struct_nand_device->mtd except for the ->_read/write() hooks.
+ */
+struct nand_device {
+ struct mtd_info mtd;
+ struct nand_memory_organization memorg;
+ struct nand_ecc ecc;
+ struct nand_row_converter rowconv;
+ struct nand_bbt bbt;
+ const struct nand_ops *ops;
+};
+
+/**
+ * struct nand_io_iter - NAND I/O iterator
+ * @req: current I/O request
+ * @oobbytes_per_page: maximum number of OOB bytes per page
+ * @dataleft: remaining number of data bytes to read/write
+ * @oobleft: remaining number of OOB bytes to read/write
+ *
+ * Can be used by specialized NAND layers to iterate over all pages covered
+ * by an MTD I/O request, which should greatly simplifies the boiler-plate
+ * code needed to read/write data from/to a NAND device.
+ */
+struct nand_io_iter {
+ struct nand_page_io_req req;
+ unsigned int oobbytes_per_page;
+ unsigned int dataleft;
+ unsigned int oobleft;
+};
+
+/**
+ * mtd_to_nanddev() - Get the NAND device attached to the MTD instance
+ * @mtd: MTD instance
+ *
+ * Return: the NAND device embedding @mtd.
+ */
+static inline struct nand_device *mtd_to_nanddev(struct mtd_info *mtd)
+{
+ return container_of(mtd, struct nand_device, mtd);
+}
+
+/**
+ * nanddev_to_mtd() - Get the MTD device attached to a NAND device
+ * @nand: NAND device
+ *
+ * Return: the MTD device embedded in @nand.
+ */
+static inline struct mtd_info *nanddev_to_mtd(struct nand_device *nand)
+{
+ return &nand->mtd;
+}
+
+/*
+ * nanddev_bits_per_cell() - Get the number of bits per cell
+ * @nand: NAND device
+ *
+ * Return: the number of bits per cell.
+ */
+static inline unsigned int nanddev_bits_per_cell(const struct nand_device *nand)
+{
+ return nand->memorg.bits_per_cell;
+}
+
+/**
+ * nanddev_page_size() - Get NAND page size
+ * @nand: NAND device
+ *
+ * Return: the page size.
+ */
+static inline size_t nanddev_page_size(const struct nand_device *nand)
+{
+ return nand->memorg.pagesize;
+}
+
+/**
+ * nanddev_per_page_oobsize() - Get NAND OOB size
+ * @nand: NAND device
+ *
+ * Return: the OOB size.
+ */
+static inline unsigned int
+nanddev_per_page_oobsize(const struct nand_device *nand)
+{
+ return nand->memorg.oobsize;
+}
+
+/**
+ * nanddev_pages_per_eraseblock() - Get the number of pages per eraseblock
+ * @nand: NAND device
+ *
+ * Return: the number of pages per eraseblock.
+ */
+static inline unsigned int
+nanddev_pages_per_eraseblock(const struct nand_device *nand)
+{
+ return nand->memorg.pages_per_eraseblock;
+}
+
+/**
+ * nanddev_pages_per_target() - Get the number of pages per target
+ * @nand: NAND device
+ *
+ * Return: the number of pages per target.
+ */
+static inline unsigned int
+nanddev_pages_per_target(const struct nand_device *nand)
+{
+ return nand->memorg.pages_per_eraseblock *
+ nand->memorg.eraseblocks_per_lun *
+ nand->memorg.luns_per_target;
+}
+
+/**
+ * nanddev_per_page_oobsize() - Get NAND erase block size
+ * @nand: NAND device
+ *
+ * Return: the eraseblock size.
+ */
+static inline size_t nanddev_eraseblock_size(const struct nand_device *nand)
+{
+ return nand->memorg.pagesize * nand->memorg.pages_per_eraseblock;
+}
+
+/**
+ * nanddev_eraseblocks_per_lun() - Get the number of eraseblocks per LUN
+ * @nand: NAND device
+ *
+ * Return: the number of eraseblocks per LUN.
+ */
+static inline unsigned int
+nanddev_eraseblocks_per_lun(const struct nand_device *nand)
+{
+ return nand->memorg.eraseblocks_per_lun;
+}
+
+/**
+ * nanddev_eraseblocks_per_target() - Get the number of eraseblocks per target
+ * @nand: NAND device
+ *
+ * Return: the number of eraseblocks per target.
+ */
+static inline unsigned int
+nanddev_eraseblocks_per_target(const struct nand_device *nand)
+{
+ return nand->memorg.eraseblocks_per_lun * nand->memorg.luns_per_target;
+}
+
+/**
+ * nanddev_target_size() - Get the total size provided by a single target/die
+ * @nand: NAND device
+ *
+ * Return: the total size exposed by a single target/die in bytes.
+ */
+static inline u64 nanddev_target_size(const struct nand_device *nand)
+{
+ return (u64)nand->memorg.luns_per_target *
+ nand->memorg.eraseblocks_per_lun *
+ nand->memorg.pages_per_eraseblock *
+ nand->memorg.pagesize;
+}
+
+/**
+ * nanddev_ntarget() - Get the total of targets
+ * @nand: NAND device
+ *
+ * Return: the number of targets/dies exposed by @nand.
+ */
+static inline unsigned int nanddev_ntargets(const struct nand_device *nand)
+{
+ return nand->memorg.ntargets;
+}
+
+/**
+ * nanddev_neraseblocks() - Get the total number of eraseblocks
+ * @nand: NAND device
+ *
+ * Return: the total number of eraseblocks exposed by @nand.
+ */
+static inline unsigned int nanddev_neraseblocks(const struct nand_device *nand)
+{
+ return nand->memorg.ntargets * nand->memorg.luns_per_target *
+ nand->memorg.eraseblocks_per_lun;
+}
+
+/**
+ * nanddev_size() - Get NAND size
+ * @nand: NAND device
+ *
+ * Return: the total size (in bytes) exposed by @nand.
+ */
+static inline u64 nanddev_size(const struct nand_device *nand)
+{
+ return nanddev_target_size(nand) * nanddev_ntargets(nand);
+}
+
+/**
+ * nanddev_get_memorg() - Extract memory organization info from a NAND device
+ * @nand: NAND device
+ *
+ * This can be used by the upper layer to fill the memorg info before calling
+ * nanddev_init().
+ *
+ * Return: the memorg object embedded in the NAND device.
+ */
+static inline struct nand_memory_organization *
+nanddev_get_memorg(struct nand_device *nand)
+{
+ return &nand->memorg;
+}
+
+/**
+ * nanddev_get_ecc_conf() - Extract the ECC configuration from a NAND device
+ * @nand: NAND device
+ */
+static inline const struct nand_ecc_props *
+nanddev_get_ecc_conf(struct nand_device *nand)
+{
+ return &nand->ecc.ctx.conf;
+}
+
+/**
+ * nanddev_get_ecc_nsteps() - Extract the number of ECC steps
+ * @nand: NAND device
+ */
+static inline unsigned int
+nanddev_get_ecc_nsteps(struct nand_device *nand)
+{
+ return nand->ecc.ctx.nsteps;
+}
+
+/**
+ * nanddev_get_ecc_bytes_per_step() - Extract the number of ECC bytes per step
+ * @nand: NAND device
+ */
+static inline unsigned int
+nanddev_get_ecc_bytes_per_step(struct nand_device *nand)
+{
+ return nand->ecc.ctx.total / nand->ecc.ctx.nsteps;
+}
+
+/**
+ * nanddev_get_ecc_requirements() - Extract the ECC requirements from a NAND
+ * device
+ * @nand: NAND device
+ */
+static inline const struct nand_ecc_props *
+nanddev_get_ecc_requirements(struct nand_device *nand)
+{
+ return &nand->ecc.requirements;
+}
+
+/**
+ * nanddev_set_ecc_requirements() - Assign the ECC requirements of a NAND
+ * device
+ * @nand: NAND device
+ * @reqs: Requirements
+ */
+static inline void
+nanddev_set_ecc_requirements(struct nand_device *nand,
+ const struct nand_ecc_props *reqs)
+{
+ nand->ecc.requirements = *reqs;
+}
+
+int nanddev_init(struct nand_device *nand, const struct nand_ops *ops,
+ struct module *owner);
+void nanddev_cleanup(struct nand_device *nand);
+
+/**
+ * nanddev_register() - Register a NAND device
+ * @nand: NAND device
+ *
+ * Register a NAND device.
+ * This function is just a wrapper around mtd_device_register()
+ * registering the MTD device embedded in @nand.
+ *
+ * Return: 0 in case of success, a negative error code otherwise.
+ */
+static inline int nanddev_register(struct nand_device *nand)
+{
+ return mtd_device_register(&nand->mtd, NULL, 0);
+}
+
+/**
+ * nanddev_unregister() - Unregister a NAND device
+ * @nand: NAND device
+ *
+ * Unregister a NAND device.
+ * This function is just a wrapper around mtd_device_unregister()
+ * unregistering the MTD device embedded in @nand.
+ *
+ * Return: 0 in case of success, a negative error code otherwise.
+ */
+static inline int nanddev_unregister(struct nand_device *nand)
+{
+ return mtd_device_unregister(&nand->mtd);
+}
+
+/**
+ * nanddev_set_of_node() - Attach a DT node to a NAND device
+ * @nand: NAND device
+ * @np: DT node
+ *
+ * Attach a DT node to a NAND device.
+ */
+static inline void nanddev_set_of_node(struct nand_device *nand,
+ struct device_node *np)
+{
+ mtd_set_of_node(&nand->mtd, np);
+}
+
+/**
+ * nanddev_get_of_node() - Retrieve the DT node attached to a NAND device
+ * @nand: NAND device
+ *
+ * Return: the DT node attached to @nand.
+ */
+static inline struct device_node *nanddev_get_of_node(struct nand_device *nand)
+{
+ return mtd_get_of_node(&nand->mtd);
+}
+
+/**
+ * nanddev_offs_to_pos() - Convert an absolute NAND offset into a NAND position
+ * @nand: NAND device
+ * @offs: absolute NAND offset (usually passed by the MTD layer)
+ * @pos: a NAND position object to fill in
+ *
+ * Converts @offs into a nand_pos representation.
+ *
+ * Return: the offset within the NAND page pointed by @pos.
+ */
+static inline unsigned int nanddev_offs_to_pos(struct nand_device *nand,
+ loff_t offs,
+ struct nand_pos *pos)
+{
+ unsigned int pageoffs;
+ u64 tmp = offs;
+
+ pageoffs = do_div(tmp, nand->memorg.pagesize);
+ pos->page = do_div(tmp, nand->memorg.pages_per_eraseblock);
+ pos->eraseblock = do_div(tmp, nand->memorg.eraseblocks_per_lun);
+ pos->plane = pos->eraseblock % nand->memorg.planes_per_lun;
+ pos->lun = do_div(tmp, nand->memorg.luns_per_target);
+ pos->target = tmp;
+
+ return pageoffs;
+}
+
+/**
+ * nanddev_pos_cmp() - Compare two NAND positions
+ * @a: First NAND position
+ * @b: Second NAND position
+ *
+ * Compares two NAND positions.
+ *
+ * Return: -1 if @a < @b, 0 if @a == @b and 1 if @a > @b.
+ */
+static inline int nanddev_pos_cmp(const struct nand_pos *a,
+ const struct nand_pos *b)
+{
+ if (a->target != b->target)
+ return a->target < b->target ? -1 : 1;
+
+ if (a->lun != b->lun)
+ return a->lun < b->lun ? -1 : 1;
+
+ if (a->eraseblock != b->eraseblock)
+ return a->eraseblock < b->eraseblock ? -1 : 1;
+
+ if (a->page != b->page)
+ return a->page < b->page ? -1 : 1;
+
+ return 0;
+}
+
+/**
+ * nanddev_pos_to_offs() - Convert a NAND position into an absolute offset
+ * @nand: NAND device
+ * @pos: the NAND position to convert
+ *
+ * Converts @pos NAND position into an absolute offset.
+ *
+ * Return: the absolute offset. Note that @pos points to the beginning of a
+ * page, if one wants to point to a specific offset within this page
+ * the returned offset has to be adjusted manually.
+ */
+static inline loff_t nanddev_pos_to_offs(struct nand_device *nand,
+ const struct nand_pos *pos)
+{
+ unsigned int npages;
+
+ npages = pos->page +
+ ((pos->eraseblock +
+ (pos->lun +
+ (pos->target * nand->memorg.luns_per_target)) *
+ nand->memorg.eraseblocks_per_lun) *
+ nand->memorg.pages_per_eraseblock);
+
+ return (loff_t)npages * nand->memorg.pagesize;
+}
+
+/**
+ * nanddev_pos_to_row() - Extract a row address from a NAND position
+ * @nand: NAND device
+ * @pos: the position to convert
+ *
+ * Converts a NAND position into a row address that can then be passed to the
+ * device.
+ *
+ * Return: the row address extracted from @pos.
+ */
+static inline unsigned int nanddev_pos_to_row(struct nand_device *nand,
+ const struct nand_pos *pos)
+{
+ return (pos->lun << nand->rowconv.lun_addr_shift) |
+ (pos->eraseblock << nand->rowconv.eraseblock_addr_shift) |
+ pos->page;
+}
+
+/**
+ * nanddev_pos_next_target() - Move a position to the next target/die
+ * @nand: NAND device
+ * @pos: the position to update
+ *
+ * Updates @pos to point to the start of the next target/die. Useful when you
+ * want to iterate over all targets/dies of a NAND device.
+ */
+static inline void nanddev_pos_next_target(struct nand_device *nand,
+ struct nand_pos *pos)
+{
+ pos->page = 0;
+ pos->plane = 0;
+ pos->eraseblock = 0;
+ pos->lun = 0;
+ pos->target++;
+}
+
+/**
+ * nanddev_pos_next_lun() - Move a position to the next LUN
+ * @nand: NAND device
+ * @pos: the position to update
+ *
+ * Updates @pos to point to the start of the next LUN. Useful when you want to
+ * iterate over all LUNs of a NAND device.
+ */
+static inline void nanddev_pos_next_lun(struct nand_device *nand,
+ struct nand_pos *pos)
+{
+ if (pos->lun >= nand->memorg.luns_per_target - 1)
+ return nanddev_pos_next_target(nand, pos);
+
+ pos->lun++;
+ pos->page = 0;
+ pos->plane = 0;
+ pos->eraseblock = 0;
+}
+
+/**
+ * nanddev_pos_next_eraseblock() - Move a position to the next eraseblock
+ * @nand: NAND device
+ * @pos: the position to update
+ *
+ * Updates @pos to point to the start of the next eraseblock. Useful when you
+ * want to iterate over all eraseblocks of a NAND device.
+ */
+static inline void nanddev_pos_next_eraseblock(struct nand_device *nand,
+ struct nand_pos *pos)
+{
+ if (pos->eraseblock >= nand->memorg.eraseblocks_per_lun - 1)
+ return nanddev_pos_next_lun(nand, pos);
+
+ pos->eraseblock++;
+ pos->page = 0;
+ pos->plane = pos->eraseblock % nand->memorg.planes_per_lun;
+}
+
+/**
+ * nanddev_pos_next_page() - Move a position to the next page
+ * @nand: NAND device
+ * @pos: the position to update
+ *
+ * Updates @pos to point to the start of the next page. Useful when you want to
+ * iterate over all pages of a NAND device.
+ */
+static inline void nanddev_pos_next_page(struct nand_device *nand,
+ struct nand_pos *pos)
+{
+ if (pos->page >= nand->memorg.pages_per_eraseblock - 1)
+ return nanddev_pos_next_eraseblock(nand, pos);
+
+ pos->page++;
+}
+
+/**
+ * nand_io_iter_init - Initialize a NAND I/O iterator
+ * @nand: NAND device
+ * @offs: absolute offset
+ * @req: MTD request
+ * @iter: NAND I/O iterator
+ *
+ * Initializes a NAND iterator based on the information passed by the MTD
+ * layer.
+ */
+static inline void nanddev_io_iter_init(struct nand_device *nand,
+ enum nand_page_io_req_type reqtype,
+ loff_t offs, struct mtd_oob_ops *req,
+ struct nand_io_iter *iter)
+{
+ struct mtd_info *mtd = nanddev_to_mtd(nand);
+
+ iter->req.type = reqtype;
+ iter->req.mode = req->mode;
+ iter->req.dataoffs = nanddev_offs_to_pos(nand, offs, &iter->req.pos);
+ iter->req.ooboffs = req->ooboffs;
+ iter->oobbytes_per_page = mtd_oobavail(mtd, req);
+ iter->dataleft = req->len;
+ iter->oobleft = req->ooblen;
+ iter->req.databuf.in = req->datbuf;
+ iter->req.datalen = min_t(unsigned int,
+ nand->memorg.pagesize - iter->req.dataoffs,
+ iter->dataleft);
+ iter->req.oobbuf.in = req->oobbuf;
+ iter->req.ooblen = min_t(unsigned int,
+ iter->oobbytes_per_page - iter->req.ooboffs,
+ iter->oobleft);
+}
+
+/**
+ * nand_io_iter_next_page - Move to the next page
+ * @nand: NAND device
+ * @iter: NAND I/O iterator
+ *
+ * Updates the @iter to point to the next page.
+ */
+static inline void nanddev_io_iter_next_page(struct nand_device *nand,
+ struct nand_io_iter *iter)
+{
+ nanddev_pos_next_page(nand, &iter->req.pos);
+ iter->dataleft -= iter->req.datalen;
+ iter->req.databuf.in += iter->req.datalen;
+ iter->oobleft -= iter->req.ooblen;
+ iter->req.oobbuf.in += iter->req.ooblen;
+ iter->req.dataoffs = 0;
+ iter->req.ooboffs = 0;
+ iter->req.datalen = min_t(unsigned int, nand->memorg.pagesize,
+ iter->dataleft);
+ iter->req.ooblen = min_t(unsigned int, iter->oobbytes_per_page,
+ iter->oobleft);
+}
+
+/**
+ * nand_io_iter_end - Should end iteration or not
+ * @nand: NAND device
+ * @iter: NAND I/O iterator
+ *
+ * Check whether @iter has reached the end of the NAND portion it was asked to
+ * iterate on or not.
+ *
+ * Return: true if @iter has reached the end of the iteration request, false
+ * otherwise.
+ */
+static inline bool nanddev_io_iter_end(struct nand_device *nand,
+ const struct nand_io_iter *iter)
+{
+ if (iter->dataleft || iter->oobleft)
+ return false;
+
+ return true;
+}
+
+/**
+ * nand_io_for_each_page - Iterate over all NAND pages contained in an MTD I/O
+ * request
+ * @nand: NAND device
+ * @start: start address to read/write from
+ * @req: MTD I/O request
+ * @iter: NAND I/O iterator
+ *
+ * Should be used for iterate over pages that are contained in an MTD request.
+ */
+#define nanddev_io_for_each_page(nand, type, start, req, iter) \
+ for (nanddev_io_iter_init(nand, type, start, req, iter); \
+ !nanddev_io_iter_end(nand, iter); \
+ nanddev_io_iter_next_page(nand, iter))
+
+bool nanddev_isbad(struct nand_device *nand, const struct nand_pos *pos);
+bool nanddev_isreserved(struct nand_device *nand, const struct nand_pos *pos);
+int nanddev_markbad(struct nand_device *nand, const struct nand_pos *pos);
+
+/* ECC related functions */
+int nanddev_ecc_engine_init(struct nand_device *nand);
+void nanddev_ecc_engine_cleanup(struct nand_device *nand);
+
+static inline void *nand_to_ecc_ctx(struct nand_device *nand)
+{
+ return nand->ecc.ctx.priv;
+}
+
+/* BBT related functions */
+enum nand_bbt_block_status {
+ NAND_BBT_BLOCK_STATUS_UNKNOWN,
+ NAND_BBT_BLOCK_GOOD,
+ NAND_BBT_BLOCK_WORN,
+ NAND_BBT_BLOCK_RESERVED,
+ NAND_BBT_BLOCK_FACTORY_BAD,
+ NAND_BBT_BLOCK_NUM_STATUS,
+};
+
+int nanddev_bbt_init(struct nand_device *nand);
+void nanddev_bbt_cleanup(struct nand_device *nand);
+int nanddev_bbt_update(struct nand_device *nand);
+int nanddev_bbt_get_block_status(const struct nand_device *nand,
+ unsigned int entry);
+int nanddev_bbt_set_block_status(struct nand_device *nand, unsigned int entry,
+ enum nand_bbt_block_status status);
+int nanddev_bbt_markbad(struct nand_device *nand, unsigned int block);
+
+/**
+ * nanddev_bbt_pos_to_entry() - Convert a NAND position into a BBT entry
+ * @nand: NAND device
+ * @pos: the NAND position we want to get BBT entry for
+ *
+ * Return the BBT entry used to store information about the eraseblock pointed
+ * by @pos.
+ *
+ * Return: the BBT entry storing information about eraseblock pointed by @pos.
+ */
+static inline unsigned int nanddev_bbt_pos_to_entry(struct nand_device *nand,
+ const struct nand_pos *pos)
+{
+ return pos->eraseblock +
+ ((pos->lun + (pos->target * nand->memorg.luns_per_target)) *
+ nand->memorg.eraseblocks_per_lun);
+}
+
+/**
+ * nanddev_bbt_is_initialized() - Check if the BBT has been initialized
+ * @nand: NAND device
+ *
+ * Return: true if the BBT has been initialized, false otherwise.
+ */
+static inline bool nanddev_bbt_is_initialized(struct nand_device *nand)
+{
+ return !!nand->bbt.cache;
+}
+
+/* MTD -> NAND helper functions. */
+int nanddev_mtd_erase(struct mtd_info *mtd, struct erase_info *einfo);
+int nanddev_mtd_max_bad_blocks(struct mtd_info *mtd, loff_t offs, size_t len);
+
+#endif /* __LINUX_MTD_NAND_H */
diff --git a/include/linux/mtd/nand_bch.h b/include/linux/mtd/nand_bch.h
deleted file mode 100644
index 98f20ef05d60..000000000000
--- a/include/linux/mtd/nand_bch.h
+++ /dev/null
@@ -1,68 +0,0 @@
-/*
- * Copyright © 2011 Ivan Djelic <ivan.djelic@parrot.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This file is the header for the NAND BCH ECC implementation.
- */
-
-#ifndef __MTD_NAND_BCH_H__
-#define __MTD_NAND_BCH_H__
-
-struct mtd_info;
-struct nand_bch_control;
-
-#if defined(CONFIG_MTD_NAND_ECC_BCH)
-
-static inline int mtd_nand_has_bch(void) { return 1; }
-
-/*
- * Calculate BCH ecc code
- */
-int nand_bch_calculate_ecc(struct mtd_info *mtd, const u_char *dat,
- u_char *ecc_code);
-
-/*
- * Detect and correct bit errors
- */
-int nand_bch_correct_data(struct mtd_info *mtd, u_char *dat, u_char *read_ecc,
- u_char *calc_ecc);
-/*
- * Initialize BCH encoder/decoder
- */
-struct nand_bch_control *nand_bch_init(struct mtd_info *mtd);
-/*
- * Release BCH encoder/decoder resources
- */
-void nand_bch_free(struct nand_bch_control *nbc);
-
-#else /* !CONFIG_MTD_NAND_ECC_BCH */
-
-static inline int mtd_nand_has_bch(void) { return 0; }
-
-static inline int
-nand_bch_calculate_ecc(struct mtd_info *mtd, const u_char *dat,
- u_char *ecc_code)
-{
- return -1;
-}
-
-static inline int
-nand_bch_correct_data(struct mtd_info *mtd, unsigned char *buf,
- unsigned char *read_ecc, unsigned char *calc_ecc)
-{
- return -ENOTSUPP;
-}
-
-static inline struct nand_bch_control *nand_bch_init(struct mtd_info *mtd)
-{
- return NULL;
-}
-
-static inline void nand_bch_free(struct nand_bch_control *nbc) {}
-
-#endif /* CONFIG_MTD_NAND_ECC_BCH */
-
-#endif /* __MTD_NAND_BCH_H__ */
diff --git a/include/linux/mtd/nand_ecc.h b/include/linux/mtd/nand_ecc.h
deleted file mode 100644
index 4d8406c81652..000000000000
--- a/include/linux/mtd/nand_ecc.h
+++ /dev/null
@@ -1,42 +0,0 @@
-/*
- * drivers/mtd/nand_ecc.h
- *
- * Copyright (C) 2000-2010 Steven J. Hill <sjhill@realitydiluted.com>
- * David Woodhouse <dwmw2@infradead.org>
- * Thomas Gleixner <tglx@linutronix.de>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This file is the header for the ECC algorithm.
- */
-
-#ifndef __MTD_NAND_ECC_H__
-#define __MTD_NAND_ECC_H__
-
-struct mtd_info;
-
-/*
- * Calculate 3 byte ECC code for eccsize byte block
- */
-void __nand_calculate_ecc(const u_char *dat, unsigned int eccsize,
- u_char *ecc_code);
-
-/*
- * Calculate 3 byte ECC code for 256/512 byte block
- */
-int nand_calculate_ecc(struct mtd_info *mtd, const u_char *dat, u_char *ecc_code);
-
-/*
- * Detect and correct a 1 bit error for eccsize byte block
- */
-int __nand_correct_data(u_char *dat, u_char *read_ecc, u_char *calc_ecc,
- unsigned int eccsize);
-
-/*
- * Detect and correct a 1 bit error for 256/512 byte block
- */
-int nand_correct_data(struct mtd_info *mtd, u_char *dat, u_char *read_ecc, u_char *calc_ecc);
-
-#endif /* __MTD_NAND_ECC_H__ */
diff --git a/include/linux/mtd/ndfc.h b/include/linux/mtd/ndfc.h
index d0558a982628..98f075b86931 100644
--- a/include/linux/mtd/ndfc.h
+++ b/include/linux/mtd/ndfc.h
@@ -1,15 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * linux/include/linux/mtd/ndfc.h
- *
* Copyright (c) 2006 Thomas Gleixner <tglx@linutronix.de>
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
* Info:
* Contains defines, datastructures for ndfc nand controller
- *
*/
#ifndef __LINUX_MTD_NDFC_H
#define __LINUX_MTD_NDFC_H
diff --git a/include/linux/mtd/nftl.h b/include/linux/mtd/nftl.h
index 044daa02b8ff..4423d3b385b1 100644
--- a/include/linux/mtd/nftl.h
+++ b/include/linux/mtd/nftl.h
@@ -1,20 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Copyright © 1999-2010 David Woodhouse <dwmw2@infradead.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- *
*/
#ifndef __MTD_NFTL_H__
diff --git a/include/linux/mtd/onenand.h b/include/linux/mtd/onenand.h
index 0aaa98b219a4..1e517961d0ba 100644
--- a/include/linux/mtd/onenand.h
+++ b/include/linux/mtd/onenand.h
@@ -1,12 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* linux/include/linux/mtd/onenand.h
*
* Copyright © 2005-2009 Samsung Electronics
* Kyungmin Park <kyungmin.park@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef __LINUX_MTD_ONENAND_H
@@ -94,6 +91,7 @@ struct onenand_chip {
unsigned int technology;
unsigned int density_mask;
unsigned int options;
+ unsigned int badblockpos;
unsigned int erase_shift;
unsigned int page_shift;
@@ -188,6 +186,8 @@ struct onenand_chip {
/* Check byte access in OneNAND */
#define ONENAND_CHECK_BYTE_ACCESS(addr) (addr & 0x1)
+#define ONENAND_BADBLOCK_POS 0
+
/*
* Options bits
*/
diff --git a/include/linux/mtd/onenand_regs.h b/include/linux/mtd/onenand_regs.h
index d60130f88eed..5f728407a579 100644
--- a/include/linux/mtd/onenand_regs.h
+++ b/include/linux/mtd/onenand_regs.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* linux/include/linux/mtd/onenand_regs.h
*
@@ -5,10 +6,6 @@
*
* Copyright (C) 2005-2007 Samsung Electronics
* Kyungmin Park <kyungmin.park@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef __ONENAND_REG_H
@@ -80,6 +77,7 @@
#define ONENAND_DEVICE_DENSITY_1Gb (0x003)
#define ONENAND_DEVICE_DENSITY_2Gb (0x004)
#define ONENAND_DEVICE_DENSITY_4Gb (0x005)
+#define ONENAND_DEVICE_DENSITY_8Gb (0x006)
/*
* Version ID Register F002h (R)
diff --git a/include/linux/mtd/onfi.h b/include/linux/mtd/onfi.h
new file mode 100644
index 000000000000..a7376f9beddf
--- /dev/null
+++ b/include/linux/mtd/onfi.h
@@ -0,0 +1,189 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright © 2000-2010 David Woodhouse <dwmw2@infradead.org>
+ * Steven J. Hill <sjhill@realitydiluted.com>
+ * Thomas Gleixner <tglx@linutronix.de>
+ *
+ * Contains all ONFI related definitions
+ */
+
+#ifndef __LINUX_MTD_ONFI_H
+#define __LINUX_MTD_ONFI_H
+
+#include <linux/types.h>
+#include <linux/bitfield.h>
+
+/* ONFI version bits */
+#define ONFI_VERSION_1_0 BIT(1)
+#define ONFI_VERSION_2_0 BIT(2)
+#define ONFI_VERSION_2_1 BIT(3)
+#define ONFI_VERSION_2_2 BIT(4)
+#define ONFI_VERSION_2_3 BIT(5)
+#define ONFI_VERSION_3_0 BIT(6)
+#define ONFI_VERSION_3_1 BIT(7)
+#define ONFI_VERSION_3_2 BIT(8)
+#define ONFI_VERSION_4_0 BIT(9)
+
+/* ONFI features */
+#define ONFI_FEATURE_16_BIT_BUS BIT(0)
+#define ONFI_FEATURE_NV_DDR BIT(5)
+#define ONFI_FEATURE_EXT_PARAM_PAGE BIT(7)
+
+/* ONFI timing mode, used in both asynchronous and synchronous mode */
+#define ONFI_DATA_INTERFACE_SDR 0
+#define ONFI_DATA_INTERFACE_NVDDR BIT(4)
+#define ONFI_DATA_INTERFACE_NVDDR2 BIT(5)
+#define ONFI_TIMING_MODE_0 BIT(0)
+#define ONFI_TIMING_MODE_1 BIT(1)
+#define ONFI_TIMING_MODE_2 BIT(2)
+#define ONFI_TIMING_MODE_3 BIT(3)
+#define ONFI_TIMING_MODE_4 BIT(4)
+#define ONFI_TIMING_MODE_5 BIT(5)
+#define ONFI_TIMING_MODE_UNKNOWN BIT(6)
+#define ONFI_TIMING_MODE_PARAM(x) FIELD_GET(GENMASK(3, 0), (x))
+
+/* ONFI feature number/address */
+#define ONFI_FEATURE_NUMBER 256
+#define ONFI_FEATURE_ADDR_TIMING_MODE 0x1
+
+/* Vendor-specific feature address (Micron) */
+#define ONFI_FEATURE_ADDR_READ_RETRY 0x89
+#define ONFI_FEATURE_ON_DIE_ECC 0x90
+#define ONFI_FEATURE_ON_DIE_ECC_EN BIT(3)
+
+/* ONFI subfeature parameters length */
+#define ONFI_SUBFEATURE_PARAM_LEN 4
+
+/* ONFI optional commands SET/GET FEATURES supported? */
+#define ONFI_OPT_CMD_SET_GET_FEATURES BIT(2)
+
+struct nand_onfi_params {
+ /* rev info and features block */
+ /* 'O' 'N' 'F' 'I' */
+ u8 sig[4];
+ __le16 revision;
+ __le16 features;
+ __le16 opt_cmd;
+ u8 reserved0[2];
+ __le16 ext_param_page_length; /* since ONFI 2.1 */
+ u8 num_of_param_pages; /* since ONFI 2.1 */
+ u8 reserved1[17];
+
+ /* manufacturer information block */
+ char manufacturer[12];
+ char model[20];
+ u8 jedec_id;
+ __le16 date_code;
+ u8 reserved2[13];
+
+ /* memory organization block */
+ __le32 byte_per_page;
+ __le16 spare_bytes_per_page;
+ __le32 data_bytes_per_ppage;
+ __le16 spare_bytes_per_ppage;
+ __le32 pages_per_block;
+ __le32 blocks_per_lun;
+ u8 lun_count;
+ u8 addr_cycles;
+ u8 bits_per_cell;
+ __le16 bb_per_lun;
+ __le16 block_endurance;
+ u8 guaranteed_good_blocks;
+ __le16 guaranteed_block_endurance;
+ u8 programs_per_page;
+ u8 ppage_attr;
+ u8 ecc_bits;
+ u8 interleaved_bits;
+ u8 interleaved_ops;
+ u8 reserved3[13];
+
+ /* electrical parameter block */
+ u8 io_pin_capacitance_max;
+ __le16 sdr_timing_modes;
+ __le16 program_cache_timing_mode;
+ __le16 t_prog;
+ __le16 t_bers;
+ __le16 t_r;
+ __le16 t_ccs;
+ u8 nvddr_timing_modes;
+ u8 nvddr2_timing_modes;
+ u8 nvddr_nvddr2_features;
+ __le16 clk_pin_capacitance_typ;
+ __le16 io_pin_capacitance_typ;
+ __le16 input_pin_capacitance_typ;
+ u8 input_pin_capacitance_max;
+ u8 driver_strength_support;
+ __le16 t_int_r;
+ __le16 t_adl;
+ u8 reserved4[8];
+
+ /* vendor */
+ __le16 vendor_revision;
+ u8 vendor[88];
+
+ __le16 crc;
+} __packed;
+
+#define ONFI_CRC_BASE 0x4F4E
+
+/* Extended ECC information Block Definition (since ONFI 2.1) */
+struct onfi_ext_ecc_info {
+ u8 ecc_bits;
+ u8 codeword_size;
+ __le16 bb_per_lun;
+ __le16 block_endurance;
+ u8 reserved[2];
+} __packed;
+
+#define ONFI_SECTION_TYPE_0 0 /* Unused section. */
+#define ONFI_SECTION_TYPE_1 1 /* for additional sections. */
+#define ONFI_SECTION_TYPE_2 2 /* for ECC information. */
+struct onfi_ext_section {
+ u8 type;
+ u8 length;
+} __packed;
+
+#define ONFI_EXT_SECTION_MAX 8
+
+/* Extended Parameter Page Definition (since ONFI 2.1) */
+struct onfi_ext_param_page {
+ __le16 crc;
+ u8 sig[4]; /* 'E' 'P' 'P' 'S' */
+ u8 reserved0[10];
+ struct onfi_ext_section sections[ONFI_EXT_SECTION_MAX];
+
+ /*
+ * The actual size of the Extended Parameter Page is in
+ * @ext_param_page_length of nand_onfi_params{}.
+ * The following are the variable length sections.
+ * So we do not add any fields below. Please see the ONFI spec.
+ */
+} __packed;
+
+/**
+ * struct onfi_params - ONFI specific parameters that will be reused
+ * @version: ONFI version (BCD encoded), 0 if ONFI is not supported
+ * @tPROG: Page program time
+ * @tBERS: Block erase time
+ * @tR: Page read time
+ * @tCCS: Change column setup time
+ * @fast_tCAD: Command/Address/Data slow or fast delay (NV-DDR only)
+ * @sdr_timing_modes: Supported asynchronous/SDR timing modes
+ * @nvddr_timing_modes: Supported source synchronous/NV-DDR timing modes
+ * @vendor_revision: Vendor specific revision number
+ * @vendor: Vendor specific data
+ */
+struct onfi_params {
+ int version;
+ u16 tPROG;
+ u16 tBERS;
+ u16 tR;
+ u16 tCCS;
+ bool fast_tCAD;
+ u16 sdr_timing_modes;
+ u16 nvddr_timing_modes;
+ u16 vendor_revision;
+ u8 vendor[88];
+};
+
+#endif /* __LINUX_MTD_ONFI_H */
diff --git a/include/linux/mtd/partitions.h b/include/linux/mtd/partitions.h
index c4beb70dacbd..b74a539ec581 100644
--- a/include/linux/mtd/partitions.h
+++ b/include/linux/mtd/partitions.h
@@ -37,6 +37,7 @@
* master MTD flag set for the corresponding MTD partition.
* For example, to force a read-only partition, simply adding
* MTD_WRITEABLE to the mask_flags will do the trick.
+ * add_flags: contains flags to add to the parent flags
*
* Note: writeable partitions require their size and offset be
* erasesize aligned (e.g. use MTDPART_OFS_NEXTBLK).
@@ -48,6 +49,7 @@ struct mtd_partition {
uint64_t size; /* partition size */
uint64_t offset; /* offset within the master MTD space */
uint32_t mask_flags; /* master MTD flags to mask out for this partition */
+ uint32_t add_flags; /* flags to add to the partition */
struct device_node *of_node;
};
@@ -77,6 +79,7 @@ struct mtd_part_parser {
struct list_head list;
struct module *owner;
const char *name;
+ const struct of_device_id *of_match_table;
int (*parse_fn)(struct mtd_info *, const struct mtd_partition **,
struct mtd_part_parser_data *);
void (*cleanup)(const struct mtd_partition *pparts, int nr_parts);
@@ -104,7 +107,6 @@ extern void deregister_mtd_parser(struct mtd_part_parser *parser);
module_driver(__mtd_part_parser, register_mtd_parser, \
deregister_mtd_parser)
-int mtd_is_partition(const struct mtd_info *mtd);
int mtd_add_partition(struct mtd_info *master, const char *name,
long long offset, long long length);
int mtd_del_partition(struct mtd_info *master, int partno);
diff --git a/include/linux/mtd/pfow.h b/include/linux/mtd/pfow.h
index 42ff7ff09bf5..146413d4bdb7 100644
--- a/include/linux/mtd/pfow.h
+++ b/include/linux/mtd/pfow.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/* Primary function overlay window definitions
* and service functions used by LPDDR chips
*/
@@ -18,7 +19,7 @@
/* Identification info for LPDDR chip */
#define PFOW_MANUFACTURER_ID 0x0020
#define PFOW_DEVICE_ID 0x0022
-/* Address in PFOW where prog buffer can can be found */
+/* Address in PFOW where prog buffer can be found */
#define PFOW_PROGRAM_BUFFER_OFFSET 0x0040
/* Size of program buffer in words */
#define PFOW_PROGRAM_BUFFER_SIZE 0x0042
@@ -120,37 +121,4 @@ static inline void send_pfow_command(struct map_info *map,
map_write(map, CMD(LPDDR_START_EXECUTION),
map->pfow_base + PFOW_COMMAND_EXECUTE);
}
-
-static inline void print_drs_error(unsigned dsr)
-{
- int prog_status = (dsr & DSR_RPS) >> 8;
-
- if (!(dsr & DSR_AVAILABLE))
- printk(KERN_NOTICE"DSR.15: (0) Device not Available\n");
- if (prog_status & 0x03)
- printk(KERN_NOTICE"DSR.9,8: (11) Attempt to program invalid "
- "half with 41h command\n");
- else if (prog_status & 0x02)
- printk(KERN_NOTICE"DSR.9,8: (10) Object Mode Program attempt "
- "in region with Control Mode data\n");
- else if (prog_status & 0x01)
- printk(KERN_NOTICE"DSR.9,8: (01) Program attempt in region "
- "with Object Mode data\n");
- if (!(dsr & DSR_READY_STATUS))
- printk(KERN_NOTICE"DSR.7: (0) Device is Busy\n");
- if (dsr & DSR_ESS)
- printk(KERN_NOTICE"DSR.6: (1) Erase Suspended\n");
- if (dsr & DSR_ERASE_STATUS)
- printk(KERN_NOTICE"DSR.5: (1) Erase/Blank check error\n");
- if (dsr & DSR_PROGRAM_STATUS)
- printk(KERN_NOTICE"DSR.4: (1) Program Error\n");
- if (dsr & DSR_VPPS)
- printk(KERN_NOTICE"DSR.3: (1) Vpp low detect, operation "
- "aborted\n");
- if (dsr & DSR_PSS)
- printk(KERN_NOTICE"DSR.2: (1) Program suspended\n");
- if (dsr & DSR_DPS)
- printk(KERN_NOTICE"DSR.1: (1) Aborted Erase/Program attempt "
- "on locked block\n");
-}
#endif /* __LINUX_MTD_PFOW_H */
diff --git a/include/linux/mtd/physmap.h b/include/linux/mtd/physmap.h
index aa6a2633c2da..bfaa9cc1dc84 100644
--- a/include/linux/mtd/physmap.h
+++ b/include/linux/mtd/physmap.h
@@ -1,15 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* For boards with physically mapped flash and using
* drivers/mtd/maps/physmap.c mapping driver.
*
* Copyright (C) 2003 MontaVista Software Inc.
* Author: Jun Sun, jsun@mvista.com or jsun@junsun.net
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
*/
#ifndef __LINUX_MTD_PHYSMAP__
diff --git a/include/linux/mtd/pismo.h b/include/linux/mtd/pismo.h
index 8dfb7e1421c5..085b639c9538 100644
--- a/include/linux/mtd/pismo.h
+++ b/include/linux/mtd/pismo.h
@@ -1,9 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* PISMO memory driver - http://www.pismoworld.org/
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License.
*/
#ifndef __LINUX_MTD_PISMO_H
#define __LINUX_MTD_PISMO_H
diff --git a/include/linux/mtd/plat-ram.h b/include/linux/mtd/plat-ram.h
index 44212d65aa97..09441856d244 100644
--- a/include/linux/mtd/plat-ram.h
+++ b/include/linux/mtd/plat-ram.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/* linux/include/linux/mtd/plat-ram.h
*
* (c) 2004 Simtec Electronics
@@ -5,11 +6,6 @@
* Ben Dooks <ben@simtec.co.uk>
*
* Generic platform device based RAM map
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
*/
#ifndef __LINUX_MTD_PLATRAM_H
diff --git a/include/linux/mtd/platnand.h b/include/linux/mtd/platnand.h
new file mode 100644
index 000000000000..bc11eb6b593b
--- /dev/null
+++ b/include/linux/mtd/platnand.h
@@ -0,0 +1,74 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright © 2000-2010 David Woodhouse <dwmw2@infradead.org>
+ * Steven J. Hill <sjhill@realitydiluted.com>
+ * Thomas Gleixner <tglx@linutronix.de>
+ *
+ * Contains all platform NAND related definitions.
+ */
+
+#ifndef __LINUX_MTD_PLATNAND_H
+#define __LINUX_MTD_PLATNAND_H
+
+#include <linux/mtd/partitions.h>
+#include <linux/mtd/rawnand.h>
+#include <linux/platform_device.h>
+
+/**
+ * struct platform_nand_chip - chip level device structure
+ * @nr_chips: max. number of chips to scan for
+ * @chip_offset: chip number offset
+ * @nr_partitions: number of partitions pointed to by partitions (or zero)
+ * @partitions: mtd partition list
+ * @chip_delay: R/B delay value in us
+ * @options: Option flags, e.g. 16bit buswidth
+ * @bbt_options: BBT option flags, e.g. NAND_BBT_USE_FLASH
+ * @part_probe_types: NULL-terminated array of probe types
+ */
+struct platform_nand_chip {
+ int nr_chips;
+ int chip_offset;
+ int nr_partitions;
+ struct mtd_partition *partitions;
+ int chip_delay;
+ unsigned int options;
+ unsigned int bbt_options;
+ const char **part_probe_types;
+};
+
+/**
+ * struct platform_nand_ctrl - controller level device structure
+ * @probe: platform specific function to probe/setup hardware
+ * @remove: platform specific function to remove/teardown hardware
+ * @dev_ready: platform specific function to read ready/busy pin
+ * @select_chip: platform specific chip select function
+ * @cmd_ctrl: platform specific function for controlling
+ * ALE/CLE/nCE. Also used to write command and address
+ * @write_buf: platform specific function for write buffer
+ * @read_buf: platform specific function for read buffer
+ * @priv: private data to transport driver specific settings
+ *
+ * All fields are optional and depend on the hardware driver requirements
+ */
+struct platform_nand_ctrl {
+ int (*probe)(struct platform_device *pdev);
+ void (*remove)(struct platform_device *pdev);
+ int (*dev_ready)(struct nand_chip *chip);
+ void (*select_chip)(struct nand_chip *chip, int cs);
+ void (*cmd_ctrl)(struct nand_chip *chip, int dat, unsigned int ctrl);
+ void (*write_buf)(struct nand_chip *chip, const uint8_t *buf, int len);
+ void (*read_buf)(struct nand_chip *chip, uint8_t *buf, int len);
+ void *priv;
+};
+
+/**
+ * struct platform_nand_data - container structure for platform-specific data
+ * @chip: chip level chip structure
+ * @ctrl: controller level device structure
+ */
+struct platform_nand_data {
+ struct platform_nand_chip chip;
+ struct platform_nand_ctrl ctrl;
+};
+
+#endif /* __LINUX_MTD_PLATNAND_H */
diff --git a/include/linux/mtd/qinfo.h b/include/linux/mtd/qinfo.h
index b532ce524dae..2e3f43788d48 100644
--- a/include/linux/mtd/qinfo.h
+++ b/include/linux/mtd/qinfo.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __LINUX_MTD_QINFO_H
#define __LINUX_MTD_QINFO_H
@@ -23,7 +24,7 @@ struct lpddr_private {
struct qinfo_chip *qinfo;
int numchips;
unsigned long chipshift;
- struct flchip chips[0];
+ struct flchip chips[];
};
/* qinfo_query_info structure contains request information for
diff --git a/include/linux/mtd/rawnand.h b/include/linux/mtd/rawnand.h
index c433f093766a..5159d692f9ce 100644
--- a/include/linux/mtd/rawnand.h
+++ b/include/linux/mtd/rawnand.h
@@ -1,12 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright © 2000-2010 David Woodhouse <dwmw2@infradead.org>
* Steven J. Hill <sjhill@realitydiluted.com>
* Thomas Gleixner <tglx@linutronix.de>
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
* Info:
* Contains standard defines and IDs for NAND flash devices
*
@@ -16,31 +13,18 @@
#ifndef __LINUX_MTD_RAWNAND_H
#define __LINUX_MTD_RAWNAND_H
-#include <linux/wait.h>
-#include <linux/spinlock.h>
#include <linux/mtd/mtd.h>
+#include <linux/mtd/nand.h>
#include <linux/mtd/flashchip.h>
#include <linux/mtd/bbm.h>
+#include <linux/mtd/jedec.h>
+#include <linux/mtd/onfi.h>
+#include <linux/mutex.h>
+#include <linux/of.h>
+#include <linux/types.h>
-struct mtd_info;
-struct nand_flash_dev;
-struct device_node;
-
-/* Scan and identify a NAND device */
-int nand_scan(struct mtd_info *mtd, int max_chips);
-/*
- * Separate phases of nand_scan(), allowing board driver to intervene
- * and override command or ECC setup according to flash type.
- */
-int nand_scan_ident(struct mtd_info *mtd, int max_chips,
- struct nand_flash_dev *table);
-int nand_scan_tail(struct mtd_info *mtd);
-
-/* Unregister the MTD device and free resources held by the NAND device */
-void nand_release(struct mtd_info *mtd);
-
-/* Internal helper for board drivers which need to override command function */
-void nand_wait_ready(struct mtd_info *mtd);
+struct nand_chip;
+struct gpio_desc;
/* The maximum number of NAND chips in an array */
#define NAND_MAX_CHIPS 8
@@ -83,6 +67,8 @@ void nand_wait_ready(struct mtd_info *mtd);
/* Extended commands for large page devices */
#define NAND_CMD_READSTART 0x30
+#define NAND_CMD_READCACHESEQ 0x31
+#define NAND_CMD_READCACHEEND 0x3f
#define NAND_CMD_RNDOUTSTART 0xE0
#define NAND_CMD_CACHEDPROG 0x15
@@ -98,24 +84,6 @@ void nand_wait_ready(struct mtd_info *mtd);
#define NAND_DATA_IFACE_CHECK_ONLY -1
/*
- * Constants for ECC_MODES
- */
-typedef enum {
- NAND_ECC_NONE,
- NAND_ECC_SOFT,
- NAND_ECC_HW,
- NAND_ECC_HW_SYNDROME,
- NAND_ECC_HW_OOB_FIRST,
- NAND_ECC_ON_DIE,
-} nand_ecc_modes_t;
-
-enum nand_ecc_algo {
- NAND_ECC_UNKNOWN,
- NAND_ECC_HAMMING,
- NAND_ECC_BCH,
-};
-
-/*
* Constants for Hardware ECC
*/
/* Reset Hardware ECC for read */
@@ -132,310 +100,145 @@ enum nand_ecc_algo {
* pages and you want to rely on the default implementation.
*/
#define NAND_ECC_GENERIC_ERASED_CHECK BIT(0)
-#define NAND_ECC_MAXIMIZE BIT(1)
-/*
- * If your controller already sends the required NAND commands when
- * reading or writing a page, then the framework is not supposed to
- * send READ0 and SEQIN/PAGEPROG respectively.
- */
-#define NAND_ECC_CUSTOM_PAGE_ACCESS BIT(2)
-
-/* Bit mask for flags passed to do_nand_read_ecc */
-#define NAND_GET_DEVICE 0x80
-
/*
* Option constants for bizarre disfunctionality and real
* features.
*/
+
/* Buswidth is 16 bit */
-#define NAND_BUSWIDTH_16 0x00000002
+#define NAND_BUSWIDTH_16 BIT(1)
+
+/*
+ * When using software implementation of Hamming, we can specify which byte
+ * ordering should be used.
+ */
+#define NAND_ECC_SOFT_HAMMING_SM_ORDER BIT(2)
+
/* Chip has cache program function */
-#define NAND_CACHEPRG 0x00000008
+#define NAND_CACHEPRG BIT(3)
+/* Options valid for Samsung large page devices */
+#define NAND_SAMSUNG_LP_OPTIONS NAND_CACHEPRG
+
/*
* Chip requires ready check on read (for auto-incremented sequential read).
* True only for small page devices; large page devices do not support
* autoincrement.
*/
-#define NAND_NEED_READRDY 0x00000100
+#define NAND_NEED_READRDY BIT(8)
/* Chip does not allow subpage writes */
-#define NAND_NO_SUBPAGE_WRITE 0x00000200
+#define NAND_NO_SUBPAGE_WRITE BIT(9)
/* Device is one of 'new' xD cards that expose fake nand command set */
-#define NAND_BROKEN_XD 0x00000400
+#define NAND_BROKEN_XD BIT(10)
/* Device behaves just like nand, but is readonly */
-#define NAND_ROM 0x00000800
+#define NAND_ROM BIT(11)
/* Device supports subpage reads */
-#define NAND_SUBPAGE_READ 0x00001000
+#define NAND_SUBPAGE_READ BIT(12)
+/* Macros to identify the above */
+#define NAND_HAS_SUBPAGE_READ(chip) ((chip->options & NAND_SUBPAGE_READ))
/*
* Some MLC NANDs need data scrambling to limit bitflips caused by repeated
* patterns.
*/
-#define NAND_NEED_SCRAMBLING 0x00002000
-
-/* Options valid for Samsung large page devices */
-#define NAND_SAMSUNG_LP_OPTIONS NAND_CACHEPRG
+#define NAND_NEED_SCRAMBLING BIT(13)
-/* Macros to identify the above */
-#define NAND_HAS_CACHEPROG(chip) ((chip->options & NAND_CACHEPRG))
-#define NAND_HAS_SUBPAGE_READ(chip) ((chip->options & NAND_SUBPAGE_READ))
-#define NAND_HAS_SUBPAGE_WRITE(chip) !((chip)->options & NAND_NO_SUBPAGE_WRITE)
+/* Device needs 3rd row address cycle */
+#define NAND_ROW_ADDR_3 BIT(14)
/* Non chip related options */
/* This option skips the bbt scan during initialization. */
-#define NAND_SKIP_BBTSCAN 0x00010000
-/*
- * This option is defined if the board driver allocates its own buffers
- * (e.g. because it needs them DMA-coherent).
- */
-#define NAND_OWN_BUFFERS 0x00020000
+#define NAND_SKIP_BBTSCAN BIT(16)
/* Chip may not exist, so silence any errors in scan */
-#define NAND_SCAN_SILENT_NODEV 0x00040000
+#define NAND_SCAN_SILENT_NODEV BIT(18)
+
/*
* Autodetect nand buswidth with readid/onfi.
* This suppose the driver will configure the hardware in 8 bits mode
* when calling nand_scan_ident, and update its configuration
* before calling nand_scan_tail.
*/
-#define NAND_BUSWIDTH_AUTO 0x00080000
+#define NAND_BUSWIDTH_AUTO BIT(19)
+
/*
* This option could be defined by controller drivers to protect against
* kmap'ed, vmalloc'ed highmem buffers being passed from upper layers
*/
-#define NAND_USE_BOUNCE_BUFFER 0x00100000
+#define NAND_USES_DMA BIT(20)
/*
- * In case your controller is implementing ->cmd_ctrl() and is relying on the
- * default ->cmdfunc() implementation, you may want to let the core handle the
- * tCCS delay which is required when a column change (RNDIN or RNDOUT) is
- * requested.
+ * In case your controller is implementing ->legacy.cmd_ctrl() and is relying
+ * on the default ->cmdfunc() implementation, you may want to let the core
+ * handle the tCCS delay which is required when a column change (RNDIN or
+ * RNDOUT) is requested.
* If your controller already takes care of this delay, you don't need to set
* this flag.
*/
-#define NAND_WAIT_TCCS 0x00200000
+#define NAND_WAIT_TCCS BIT(21)
-/* Options set by nand scan */
-/* Nand scan has allocated controller struct */
-#define NAND_CONTROLLER_ALLOC 0x80000000
+/*
+ * Whether the NAND chip is a boot medium. Drivers might use this information
+ * to select ECC algorithms supported by the boot ROM or similar restrictions.
+ */
+#define NAND_IS_BOOT_MEDIUM BIT(22)
+
+/*
+ * Do not try to tweak the timings at runtime. This is needed when the
+ * controller initializes the timings on itself or when it relies on
+ * configuration done by the bootloader.
+ */
+#define NAND_KEEP_TIMINGS BIT(23)
+
+/*
+ * There are different places where the manufacturer stores the factory bad
+ * block markers.
+ *
+ * Position within the block: Each of these pages needs to be checked for a
+ * bad block marking pattern.
+ */
+#define NAND_BBM_FIRSTPAGE BIT(24)
+#define NAND_BBM_SECONDPAGE BIT(25)
+#define NAND_BBM_LASTPAGE BIT(26)
+
+/*
+ * Some controllers with pipelined ECC engines override the BBM marker with
+ * data or ECC bytes, thus making bad block detection through bad block marker
+ * impossible. Let's flag those chips so the core knows it shouldn't check the
+ * BBM and consider all blocks good.
+ */
+#define NAND_NO_BBM_QUIRK BIT(27)
/* Cell info constants */
#define NAND_CI_CHIPNR_MSK 0x03
#define NAND_CI_CELLTYPE_MSK 0x0C
#define NAND_CI_CELLTYPE_SHIFT 2
-/* Keep gcc happy */
-struct nand_chip;
+/* Position within the OOB data of the page */
+#define NAND_BBM_POS_SMALL 5
+#define NAND_BBM_POS_LARGE 0
-/* ONFI features */
-#define ONFI_FEATURE_16_BIT_BUS (1 << 0)
-#define ONFI_FEATURE_EXT_PARAM_PAGE (1 << 7)
-
-/* ONFI timing mode, used in both asynchronous and synchronous mode */
-#define ONFI_TIMING_MODE_0 (1 << 0)
-#define ONFI_TIMING_MODE_1 (1 << 1)
-#define ONFI_TIMING_MODE_2 (1 << 2)
-#define ONFI_TIMING_MODE_3 (1 << 3)
-#define ONFI_TIMING_MODE_4 (1 << 4)
-#define ONFI_TIMING_MODE_5 (1 << 5)
-#define ONFI_TIMING_MODE_UNKNOWN (1 << 6)
-
-/* ONFI feature address */
-#define ONFI_FEATURE_ADDR_TIMING_MODE 0x1
-
-/* Vendor-specific feature address (Micron) */
-#define ONFI_FEATURE_ADDR_READ_RETRY 0x89
-#define ONFI_FEATURE_ON_DIE_ECC 0x90
-#define ONFI_FEATURE_ON_DIE_ECC_EN BIT(3)
-
-/* ONFI subfeature parameters length */
-#define ONFI_SUBFEATURE_PARAM_LEN 4
-
-/* ONFI optional commands SET/GET FEATURES supported? */
-#define ONFI_OPT_CMD_SET_GET_FEATURES (1 << 2)
-
-struct nand_onfi_params {
- /* rev info and features block */
- /* 'O' 'N' 'F' 'I' */
- u8 sig[4];
- __le16 revision;
- __le16 features;
- __le16 opt_cmd;
- u8 reserved0[2];
- __le16 ext_param_page_length; /* since ONFI 2.1 */
- u8 num_of_param_pages; /* since ONFI 2.1 */
- u8 reserved1[17];
-
- /* manufacturer information block */
- char manufacturer[12];
- char model[20];
- u8 jedec_id;
- __le16 date_code;
- u8 reserved2[13];
-
- /* memory organization block */
- __le32 byte_per_page;
- __le16 spare_bytes_per_page;
- __le32 data_bytes_per_ppage;
- __le16 spare_bytes_per_ppage;
- __le32 pages_per_block;
- __le32 blocks_per_lun;
- u8 lun_count;
- u8 addr_cycles;
- u8 bits_per_cell;
- __le16 bb_per_lun;
- __le16 block_endurance;
- u8 guaranteed_good_blocks;
- __le16 guaranteed_block_endurance;
- u8 programs_per_page;
- u8 ppage_attr;
- u8 ecc_bits;
- u8 interleaved_bits;
- u8 interleaved_ops;
- u8 reserved3[13];
-
- /* electrical parameter block */
- u8 io_pin_capacitance_max;
- __le16 async_timing_mode;
- __le16 program_cache_timing_mode;
- __le16 t_prog;
- __le16 t_bers;
- __le16 t_r;
- __le16 t_ccs;
- __le16 src_sync_timing_mode;
- u8 src_ssync_features;
- __le16 clk_pin_capacitance_typ;
- __le16 io_pin_capacitance_typ;
- __le16 input_pin_capacitance_typ;
- u8 input_pin_capacitance_max;
- u8 driver_strength_support;
- __le16 t_int_r;
- __le16 t_adl;
- u8 reserved4[8];
-
- /* vendor */
- __le16 vendor_revision;
- u8 vendor[88];
-
- __le16 crc;
-} __packed;
-
-#define ONFI_CRC_BASE 0x4F4E
-
-/* Extended ECC information Block Definition (since ONFI 2.1) */
-struct onfi_ext_ecc_info {
- u8 ecc_bits;
- u8 codeword_size;
- __le16 bb_per_lun;
- __le16 block_endurance;
- u8 reserved[2];
-} __packed;
-
-#define ONFI_SECTION_TYPE_0 0 /* Unused section. */
-#define ONFI_SECTION_TYPE_1 1 /* for additional sections. */
-#define ONFI_SECTION_TYPE_2 2 /* for ECC information. */
-struct onfi_ext_section {
- u8 type;
- u8 length;
-} __packed;
-
-#define ONFI_EXT_SECTION_MAX 8
-
-/* Extended Parameter Page Definition (since ONFI 2.1) */
-struct onfi_ext_param_page {
- __le16 crc;
- u8 sig[4]; /* 'E' 'P' 'P' 'S' */
- u8 reserved0[10];
- struct onfi_ext_section sections[ONFI_EXT_SECTION_MAX];
-
- /*
- * The actual size of the Extended Parameter Page is in
- * @ext_param_page_length of nand_onfi_params{}.
- * The following are the variable length sections.
- * So we do not add any fields below. Please see the ONFI spec.
- */
-} __packed;
-
-struct jedec_ecc_info {
- u8 ecc_bits;
- u8 codeword_size;
- __le16 bb_per_lun;
- __le16 block_endurance;
- u8 reserved[2];
-} __packed;
-
-/* JEDEC features */
-#define JEDEC_FEATURE_16_BIT_BUS (1 << 0)
-
-struct nand_jedec_params {
- /* rev info and features block */
- /* 'J' 'E' 'S' 'D' */
- u8 sig[4];
- __le16 revision;
- __le16 features;
- u8 opt_cmd[3];
- __le16 sec_cmd;
- u8 num_of_param_pages;
- u8 reserved0[18];
-
- /* manufacturer information block */
- char manufacturer[12];
- char model[20];
- u8 jedec_id[6];
- u8 reserved1[10];
-
- /* memory organization block */
- __le32 byte_per_page;
- __le16 spare_bytes_per_page;
- u8 reserved2[6];
- __le32 pages_per_block;
- __le32 blocks_per_lun;
- u8 lun_count;
- u8 addr_cycles;
- u8 bits_per_cell;
- u8 programs_per_page;
- u8 multi_plane_addr;
- u8 multi_plane_op_attr;
- u8 reserved3[38];
-
- /* electrical parameter block */
- __le16 async_sdr_speed_grade;
- __le16 toggle_ddr_speed_grade;
- __le16 sync_ddr_speed_grade;
- u8 async_sdr_features;
- u8 toggle_ddr_features;
- u8 sync_ddr_features;
- __le16 t_prog;
- __le16 t_bers;
- __le16 t_r;
- __le16 t_r_multi_plane;
- __le16 t_ccs;
- __le16 io_pin_capacitance_typ;
- __le16 input_pin_capacitance_typ;
- __le16 clk_pin_capacitance_typ;
- u8 driver_strength_support;
- __le16 t_adl;
- u8 reserved4[36];
-
- /* ECC and endurance block */
- u8 guaranteed_good_blocks;
- __le16 guaranteed_block_endurance;
- struct jedec_ecc_info ecc_info[4];
- u8 reserved5[29];
-
- /* reserved */
- u8 reserved6[148];
-
- /* vendor */
- __le16 vendor_rev_num;
- u8 reserved7[88];
-
- /* CRC for Parameter Page */
- __le16 crc;
-} __packed;
+/**
+ * struct nand_parameters - NAND generic parameters from the parameter page
+ * @model: Model name
+ * @supports_set_get_features: The NAND chip supports setting/getting features
+ * @set_feature_list: Bitmap of features that can be set
+ * @get_feature_list: Bitmap of features that can be get
+ * @onfi: ONFI specific parameters
+ */
+struct nand_parameters {
+ /* Generic parameters */
+ const char *model;
+ bool supports_set_get_features;
+ DECLARE_BITMAP(set_feature_list, ONFI_FEATURE_NUMBER);
+ DECLARE_BITMAP(get_feature_list, ONFI_FEATURE_NUMBER);
+
+ /* ONFI parameters */
+ struct onfi_params *onfi;
+};
/* The maximum expected count of bytes in the NAND ID sequence */
#define NAND_MAX_ID_LEN 8
@@ -451,27 +254,6 @@ struct nand_id {
};
/**
- * struct nand_hw_control - Control structure for hardware controller (e.g ECC generator) shared among independent devices
- * @lock: protection lock
- * @active: the mtd device which holds the controller currently
- * @wq: wait queue to sleep on if a NAND operation is in
- * progress used instead of the per chip wait queue
- * when a hw controller is available.
- */
-struct nand_hw_control {
- spinlock_t lock;
- struct nand_chip *active;
- wait_queue_head_t wq;
-};
-
-static inline void nand_hw_control_init(struct nand_hw_control *nfc)
-{
- nfc->active = NULL;
- spin_lock_init(&nfc->lock);
- init_waitqueue_head(&nfc->wq);
-}
-
-/**
* struct nand_ecc_step_info - ECC step information of ECC engine
* @stepsize: data bytes per ECC step
* @strengths: array of supported strengths
@@ -511,7 +293,8 @@ static const struct nand_ecc_caps __name = { \
/**
* struct nand_ecc_ctrl - Control structure for ECC
- * @mode: ECC mode
+ * @engine_type: ECC engine type
+ * @placement: OOB bytes placement
* @algo: ECC algorithm
* @steps: number of ECC steps per page
* @size: data bytes per ECC step
@@ -521,7 +304,8 @@ static const struct nand_ecc_caps __name = { \
* @prepad: padding information for syndrome based ECC generators
* @postpad: padding information for syndrome based ECC generators
* @options: ECC specific options (see NAND_ECC_XXX flags defined above)
- * @priv: pointer to private ECC control data
+ * @calc_buf: buffer for calculated ECC, size is oobsize.
+ * @code_buf: buffer for ECC read from flash, size is oobsize.
* @hwctl: function to control hardware ECC generator. Must only
* be provided if an hardware ECC is available
* @calculate: function for ECC calculation or readback from ECC hardware
@@ -537,7 +321,7 @@ static const struct nand_ecc_caps __name = { \
* controller and always return contiguous in-band and
* out-of-band data even if they're not stored
* contiguously on the NAND chip (e.g.
- * NAND_ECC_HW_SYNDROME interleaves in-band and
+ * NAND_ECC_PLACEMENT_INTERLEAVED interleaves in-band and
* out-of-band data).
* @write_page_raw: function to write a raw page without ECC. This function
* should hide the specific layout used by the ECC
@@ -545,7 +329,7 @@ static const struct nand_ecc_caps __name = { \
* in-band and out-of-band data. ECC controller is
* responsible for doing the appropriate transformations
* to adapt to its specific layout (e.g.
- * NAND_ECC_HW_SYNDROME interleaves in-band and
+ * NAND_ECC_PLACEMENT_INTERLEAVED interleaves in-band and
* out-of-band data).
* @read_page: function to read a page according to the ECC generator
* requirements; returns maximum number of bitflips corrected in
@@ -561,7 +345,8 @@ static const struct nand_ecc_caps __name = { \
* @write_oob: function to write chip OOB data
*/
struct nand_ecc_ctrl {
- nand_ecc_modes_t mode;
+ enum nand_ecc_engine_type engine_type;
+ enum nand_ecc_placement placement;
enum nand_ecc_algo algo;
int steps;
int size;
@@ -571,52 +356,30 @@ struct nand_ecc_ctrl {
int prepad;
int postpad;
unsigned int options;
- void *priv;
- void (*hwctl)(struct mtd_info *mtd, int mode);
- int (*calculate)(struct mtd_info *mtd, const uint8_t *dat,
- uint8_t *ecc_code);
- int (*correct)(struct mtd_info *mtd, uint8_t *dat, uint8_t *read_ecc,
- uint8_t *calc_ecc);
- int (*read_page_raw)(struct mtd_info *mtd, struct nand_chip *chip,
- uint8_t *buf, int oob_required, int page);
- int (*write_page_raw)(struct mtd_info *mtd, struct nand_chip *chip,
- const uint8_t *buf, int oob_required, int page);
- int (*read_page)(struct mtd_info *mtd, struct nand_chip *chip,
- uint8_t *buf, int oob_required, int page);
- int (*read_subpage)(struct mtd_info *mtd, struct nand_chip *chip,
- uint32_t offs, uint32_t len, uint8_t *buf, int page);
- int (*write_subpage)(struct mtd_info *mtd, struct nand_chip *chip,
- uint32_t offset, uint32_t data_len,
- const uint8_t *data_buf, int oob_required, int page);
- int (*write_page)(struct mtd_info *mtd, struct nand_chip *chip,
- const uint8_t *buf, int oob_required, int page);
- int (*write_oob_raw)(struct mtd_info *mtd, struct nand_chip *chip,
- int page);
- int (*read_oob_raw)(struct mtd_info *mtd, struct nand_chip *chip,
- int page);
- int (*read_oob)(struct mtd_info *mtd, struct nand_chip *chip, int page);
- int (*write_oob)(struct mtd_info *mtd, struct nand_chip *chip,
- int page);
-};
-
-static inline int nand_standard_page_accessors(struct nand_ecc_ctrl *ecc)
-{
- return !(ecc->options & NAND_ECC_CUSTOM_PAGE_ACCESS);
-}
-
-/**
- * struct nand_buffers - buffer structure for read/write
- * @ecccalc: buffer pointer for calculated ECC, size is oobsize.
- * @ecccode: buffer pointer for ECC read from flash, size is oobsize.
- * @databuf: buffer pointer for data, size is (page size + oobsize).
- *
- * Do not change the order of buffers. databuf and oobrbuf must be in
- * consecutive order.
- */
-struct nand_buffers {
- uint8_t *ecccalc;
- uint8_t *ecccode;
- uint8_t *databuf;
+ u8 *calc_buf;
+ u8 *code_buf;
+ void (*hwctl)(struct nand_chip *chip, int mode);
+ int (*calculate)(struct nand_chip *chip, const uint8_t *dat,
+ uint8_t *ecc_code);
+ int (*correct)(struct nand_chip *chip, uint8_t *dat, uint8_t *read_ecc,
+ uint8_t *calc_ecc);
+ int (*read_page_raw)(struct nand_chip *chip, uint8_t *buf,
+ int oob_required, int page);
+ int (*write_page_raw)(struct nand_chip *chip, const uint8_t *buf,
+ int oob_required, int page);
+ int (*read_page)(struct nand_chip *chip, uint8_t *buf,
+ int oob_required, int page);
+ int (*read_subpage)(struct nand_chip *chip, uint32_t offs,
+ uint32_t len, uint8_t *buf, int page);
+ int (*write_subpage)(struct nand_chip *chip, uint32_t offset,
+ uint32_t data_len, const uint8_t *data_buf,
+ int oob_required, int page);
+ int (*write_page)(struct nand_chip *chip, const uint8_t *buf,
+ int oob_required, int page);
+ int (*write_oob_raw)(struct nand_chip *chip, int page);
+ int (*read_oob_raw)(struct nand_chip *chip, int page);
+ int (*read_oob)(struct nand_chip *chip, int page);
+ int (*write_oob)(struct nand_chip *chip, int page);
};
/**
@@ -625,8 +388,8 @@ struct nand_buffers {
* This struct defines the timing requirements of a SDR NAND chip.
* These information can be found in every NAND datasheets and the timings
* meaning are described in the ONFI specifications:
- * www.onfi.org/~/media/ONFI/specs/onfi_3_1_spec.pdf (chapter 4.15 Timing
- * Parameters)
+ * https://media-www.micron.com/-/media/client/onfi/specs/onfi_3_1_spec.pdf
+ * (chapter 4.15 Timing Parameters)
*
* All these timings are expressed in picoseconds.
*
@@ -671,10 +434,10 @@ struct nand_buffers {
* @tWW_min: WP# transition to WE# low
*/
struct nand_sdr_timings {
- u32 tBERS_max;
+ u64 tBERS_max;
u32 tCCS_min;
- u32 tPROG_max;
- u32 tR_max;
+ u64 tPROG_max;
+ u64 tR_max;
u32 tALH_min;
u32 tADL_min;
u32 tALS_min;
@@ -712,269 +475,867 @@ struct nand_sdr_timings {
};
/**
- * enum nand_data_interface_type - NAND interface timing type
+ * struct nand_nvddr_timings - NV-DDR NAND chip timings
+ *
+ * This struct defines the timing requirements of a NV-DDR NAND data interface.
+ * These information can be found in every NAND datasheets and the timings
+ * meaning are described in the ONFI specifications:
+ * https://media-www.micron.com/-/media/client/onfi/specs/onfi_4_1_gold.pdf
+ * (chapter 4.18.2 NV-DDR)
+ *
+ * All these timings are expressed in picoseconds.
+ *
+ * @tBERS_max: Block erase time
+ * @tCCS_min: Change column setup time
+ * @tPROG_max: Page program time
+ * @tR_max: Page read time
+ * @tAC_min: Access window of DQ[7:0] from CLK
+ * @tAC_max: Access window of DQ[7:0] from CLK
+ * @tADL_min: ALE to data loading time
+ * @tCAD_min: Command, Address, Data delay
+ * @tCAH_min: Command/Address DQ hold time
+ * @tCALH_min: W/R_n, CLE and ALE hold time
+ * @tCALS_min: W/R_n, CLE and ALE setup time
+ * @tCAS_min: Command/address DQ setup time
+ * @tCEH_min: CE# high hold time
+ * @tCH_min: CE# hold time
+ * @tCK_min: Average clock cycle time
+ * @tCS_min: CE# setup time
+ * @tDH_min: Data hold time
+ * @tDQSCK_min: Start of the access window of DQS from CLK
+ * @tDQSCK_max: End of the access window of DQS from CLK
+ * @tDQSD_min: Min W/R_n low to DQS/DQ driven by device
+ * @tDQSD_max: Max W/R_n low to DQS/DQ driven by device
+ * @tDQSHZ_max: W/R_n high to DQS/DQ tri-state by device
+ * @tDQSQ_max: DQS-DQ skew, DQS to last DQ valid, per access
+ * @tDS_min: Data setup time
+ * @tDSC_min: DQS cycle time
+ * @tFEAT_max: Busy time for Set Features and Get Features
+ * @tITC_max: Interface and Timing Mode Change time
+ * @tQHS_max: Data hold skew factor
+ * @tRHW_min: Data output cycle to command, address, or data input cycle
+ * @tRR_min: Ready to RE# low (data only)
+ * @tRST_max: Device reset time, measured from the falling edge of R/B# to the
+ * rising edge of R/B#.
+ * @tWB_max: WE# high to SR[6] low
+ * @tWHR_min: WE# high to RE# low
+ * @tWRCK_min: W/R_n low to data output cycle
+ * @tWW_min: WP# transition to WE# low
+ */
+struct nand_nvddr_timings {
+ u64 tBERS_max;
+ u32 tCCS_min;
+ u64 tPROG_max;
+ u64 tR_max;
+ u32 tAC_min;
+ u32 tAC_max;
+ u32 tADL_min;
+ u32 tCAD_min;
+ u32 tCAH_min;
+ u32 tCALH_min;
+ u32 tCALS_min;
+ u32 tCAS_min;
+ u32 tCEH_min;
+ u32 tCH_min;
+ u32 tCK_min;
+ u32 tCS_min;
+ u32 tDH_min;
+ u32 tDQSCK_min;
+ u32 tDQSCK_max;
+ u32 tDQSD_min;
+ u32 tDQSD_max;
+ u32 tDQSHZ_max;
+ u32 tDQSQ_max;
+ u32 tDS_min;
+ u32 tDSC_min;
+ u32 tFEAT_max;
+ u32 tITC_max;
+ u32 tQHS_max;
+ u32 tRHW_min;
+ u32 tRR_min;
+ u32 tRST_max;
+ u32 tWB_max;
+ u32 tWHR_min;
+ u32 tWRCK_min;
+ u32 tWW_min;
+};
+
+/*
+ * While timings related to the data interface itself are mostly different
+ * between SDR and NV-DDR, timings related to the internal chip behavior are
+ * common. IOW, the following entries which describe the internal delays have
+ * the same definition and are shared in both SDR and NV-DDR timing structures:
+ * - tADL_min
+ * - tBERS_max
+ * - tCCS_min
+ * - tFEAT_max
+ * - tPROG_max
+ * - tR_max
+ * - tRR_min
+ * - tRST_max
+ * - tWB_max
+ *
+ * The below macros return the value of a given timing, no matter the interface.
+ */
+#define NAND_COMMON_TIMING_PS(conf, timing_name) \
+ nand_interface_is_sdr(conf) ? \
+ nand_get_sdr_timings(conf)->timing_name : \
+ nand_get_nvddr_timings(conf)->timing_name
+
+#define NAND_COMMON_TIMING_MS(conf, timing_name) \
+ PSEC_TO_MSEC(NAND_COMMON_TIMING_PS((conf), timing_name))
+
+#define NAND_COMMON_TIMING_NS(conf, timing_name) \
+ PSEC_TO_NSEC(NAND_COMMON_TIMING_PS((conf), timing_name))
+
+/**
+ * enum nand_interface_type - NAND interface type
* @NAND_SDR_IFACE: Single Data Rate interface
+ * @NAND_NVDDR_IFACE: Double Data Rate interface
*/
-enum nand_data_interface_type {
+enum nand_interface_type {
NAND_SDR_IFACE,
+ NAND_NVDDR_IFACE,
};
/**
- * struct nand_data_interface - NAND interface timing
- * @type: type of the timing
- * @timings: The timing, type according to @type
+ * struct nand_interface_config - NAND interface timing
+ * @type: type of the timing
+ * @timings: The timing information
+ * @timings.mode: Timing mode as defined in the specification
+ * @timings.sdr: Use it when @type is %NAND_SDR_IFACE.
+ * @timings.nvddr: Use it when @type is %NAND_NVDDR_IFACE.
*/
-struct nand_data_interface {
- enum nand_data_interface_type type;
- union {
- struct nand_sdr_timings sdr;
+struct nand_interface_config {
+ enum nand_interface_type type;
+ struct nand_timings {
+ unsigned int mode;
+ union {
+ struct nand_sdr_timings sdr;
+ struct nand_nvddr_timings nvddr;
+ };
} timings;
};
/**
+ * nand_interface_is_sdr - get the interface type
+ * @conf: The data interface
+ */
+static bool nand_interface_is_sdr(const struct nand_interface_config *conf)
+{
+ return conf->type == NAND_SDR_IFACE;
+}
+
+/**
+ * nand_interface_is_nvddr - get the interface type
+ * @conf: The data interface
+ */
+static bool nand_interface_is_nvddr(const struct nand_interface_config *conf)
+{
+ return conf->type == NAND_NVDDR_IFACE;
+}
+
+/**
* nand_get_sdr_timings - get SDR timing from data interface
* @conf: The data interface
*/
static inline const struct nand_sdr_timings *
-nand_get_sdr_timings(const struct nand_data_interface *conf)
+nand_get_sdr_timings(const struct nand_interface_config *conf)
{
- if (conf->type != NAND_SDR_IFACE)
+ if (!nand_interface_is_sdr(conf))
return ERR_PTR(-EINVAL);
return &conf->timings.sdr;
}
/**
- * struct nand_manufacturer_ops - NAND Manufacturer operations
- * @detect: detect the NAND memory organization and capabilities
- * @init: initialize all vendor specific fields (like the ->read_retry()
- * implementation) if any.
- * @cleanup: the ->init() function may have allocated resources, ->cleanup()
- * is here to let vendor specific code release those resources.
+ * nand_get_nvddr_timings - get NV-DDR timing from data interface
+ * @conf: The data interface
+ */
+static inline const struct nand_nvddr_timings *
+nand_get_nvddr_timings(const struct nand_interface_config *conf)
+{
+ if (!nand_interface_is_nvddr(conf))
+ return ERR_PTR(-EINVAL);
+
+ return &conf->timings.nvddr;
+}
+
+/**
+ * struct nand_op_cmd_instr - Definition of a command instruction
+ * @opcode: the command to issue in one cycle
*/
-struct nand_manufacturer_ops {
- void (*detect)(struct nand_chip *chip);
- int (*init)(struct nand_chip *chip);
- void (*cleanup)(struct nand_chip *chip);
+struct nand_op_cmd_instr {
+ u8 opcode;
};
/**
- * struct nand_chip - NAND Private Flash Chip Data
- * @mtd: MTD device registered to the MTD framework
- * @IO_ADDR_R: [BOARDSPECIFIC] address to read the 8 I/O lines of the
- * flash device
- * @IO_ADDR_W: [BOARDSPECIFIC] address to write the 8 I/O lines of the
- * flash device.
- * @read_byte: [REPLACEABLE] read one byte from the chip
- * @read_word: [REPLACEABLE] read one word from the chip
- * @write_byte: [REPLACEABLE] write a single byte to the chip on the
- * low 8 I/O lines
- * @write_buf: [REPLACEABLE] write data from the buffer to the chip
- * @read_buf: [REPLACEABLE] read data from the chip into the buffer
- * @select_chip: [REPLACEABLE] select chip nr
- * @block_bad: [REPLACEABLE] check if a block is bad, using OOB markers
- * @block_markbad: [REPLACEABLE] mark a block bad
- * @cmd_ctrl: [BOARDSPECIFIC] hardwarespecific function for controlling
- * ALE/CLE/nCE. Also used to write command and address
- * @dev_ready: [BOARDSPECIFIC] hardwarespecific function for accessing
- * device ready/busy line. If set to NULL no access to
- * ready/busy is available and the ready/busy information
- * is read from the chip status register.
- * @cmdfunc: [REPLACEABLE] hardwarespecific function for writing
- * commands to the chip.
- * @waitfunc: [REPLACEABLE] hardwarespecific function for wait on
- * ready.
- * @setup_read_retry: [FLASHSPECIFIC] flash (vendor) specific function for
- * setting the read-retry mode. Mostly needed for MLC NAND.
- * @ecc: [BOARDSPECIFIC] ECC control structure
- * @buffers: buffer structure for read/write
- * @buf_align: minimum buffer alignment required by a platform
- * @hwcontrol: platform-specific hardware control structure
- * @erase: [REPLACEABLE] erase function
- * @scan_bbt: [REPLACEABLE] function to scan bad block table
- * @chip_delay: [BOARDSPECIFIC] chip dependent delay for transferring
- * data from array to read regs (tR).
- * @state: [INTERN] the current state of the NAND device
- * @oob_poi: "poison value buffer," used for laying out OOB data
- * before writing
- * @page_shift: [INTERN] number of address bits in a page (column
- * address bits).
- * @phys_erase_shift: [INTERN] number of address bits in a physical eraseblock
- * @bbt_erase_shift: [INTERN] number of address bits in a bbt entry
- * @chip_shift: [INTERN] number of address bits in one chip
- * @options: [BOARDSPECIFIC] various chip options. They can partly
- * be set to inform nand_scan about special functionality.
- * See the defines for further explanation.
- * @bbt_options: [INTERN] bad block specific options. All options used
- * here must come from bbm.h. By default, these options
- * will be copied to the appropriate nand_bbt_descr's.
- * @badblockpos: [INTERN] position of the bad block marker in the oob
- * area.
- * @badblockbits: [INTERN] minimum number of set bits in a good block's
- * bad block marker position; i.e., BBM == 11110111b is
- * not bad when badblockbits == 7
- * @bits_per_cell: [INTERN] number of bits per cell. i.e., 1 means SLC.
- * @ecc_strength_ds: [INTERN] ECC correctability from the datasheet.
- * Minimum amount of bit errors per @ecc_step_ds guaranteed
- * to be correctable. If unknown, set to zero.
- * @ecc_step_ds: [INTERN] ECC step required by the @ecc_strength_ds,
- * also from the datasheet. It is the recommended ECC step
- * size, if known; if unknown, set to zero.
- * @onfi_timing_mode_default: [INTERN] default ONFI timing mode. This field is
- * set to the actually used ONFI mode if the chip is
- * ONFI compliant or deduced from the datasheet if
- * the NAND chip is not ONFI compliant.
- * @numchips: [INTERN] number of physical chips
- * @chipsize: [INTERN] the size of one chip for multichip arrays
- * @pagemask: [INTERN] page number mask = number of (pages / chip) - 1
- * @pagebuf: [INTERN] holds the pagenumber which is currently in
- * data_buf.
- * @pagebuf_bitflips: [INTERN] holds the bitflip count for the page which is
- * currently in data_buf.
- * @subpagesize: [INTERN] holds the subpagesize
- * @id: [INTERN] holds NAND ID
- * @onfi_version: [INTERN] holds the chip ONFI version (BCD encoded),
- * non 0 if ONFI supported.
- * @jedec_version: [INTERN] holds the chip JEDEC version (BCD encoded),
- * non 0 if JEDEC supported.
- * @onfi_params: [INTERN] holds the ONFI page parameter when ONFI is
- * supported, 0 otherwise.
- * @jedec_params: [INTERN] holds the JEDEC parameter page when JEDEC is
- * supported, 0 otherwise.
- * @max_bb_per_die: [INTERN] the max number of bad blocks each die of a
- * this nand device will encounter their life times.
- * @blocks_per_die: [INTERN] The number of PEBs in a die
- * @data_interface: [INTERN] NAND interface timing information
- * @read_retries: [INTERN] the number of read retry modes supported
- * @onfi_set_features: [REPLACEABLE] set the features for ONFI nand
- * @onfi_get_features: [REPLACEABLE] get the features for ONFI nand
- * @setup_data_interface: [OPTIONAL] setup the data interface and timing. If
- * chipnr is set to %NAND_DATA_IFACE_CHECK_ONLY this
- * means the configuration should not be applied but
- * only checked.
- * @bbt: [INTERN] bad block table pointer
- * @bbt_td: [REPLACEABLE] bad block table descriptor for flash
- * lookup.
- * @bbt_md: [REPLACEABLE] bad block table mirror descriptor
- * @badblock_pattern: [REPLACEABLE] bad block scan pattern used for initial
- * bad block scan.
- * @controller: [REPLACEABLE] a pointer to a hardware controller
- * structure which is shared among multiple independent
- * devices.
- * @priv: [OPTIONAL] pointer to private chip data
- * @manufacturer: [INTERN] Contains manufacturer information
+ * struct nand_op_addr_instr - Definition of an address instruction
+ * @naddrs: length of the @addrs array
+ * @addrs: array containing the address cycles to issue
*/
+struct nand_op_addr_instr {
+ unsigned int naddrs;
+ const u8 *addrs;
+};
-struct nand_chip {
- struct mtd_info mtd;
- void __iomem *IO_ADDR_R;
- void __iomem *IO_ADDR_W;
+/**
+ * struct nand_op_data_instr - Definition of a data instruction
+ * @len: number of data bytes to move
+ * @buf: buffer to fill
+ * @buf.in: buffer to fill when reading from the NAND chip
+ * @buf.out: buffer to read from when writing to the NAND chip
+ * @force_8bit: force 8-bit access
+ *
+ * Please note that "in" and "out" are inverted from the ONFI specification
+ * and are from the controller perspective, so a "in" is a read from the NAND
+ * chip while a "out" is a write to the NAND chip.
+ */
+struct nand_op_data_instr {
+ unsigned int len;
+ union {
+ void *in;
+ const void *out;
+ } buf;
+ bool force_8bit;
+};
- uint8_t (*read_byte)(struct mtd_info *mtd);
- u16 (*read_word)(struct mtd_info *mtd);
- void (*write_byte)(struct mtd_info *mtd, uint8_t byte);
- void (*write_buf)(struct mtd_info *mtd, const uint8_t *buf, int len);
- void (*read_buf)(struct mtd_info *mtd, uint8_t *buf, int len);
- void (*select_chip)(struct mtd_info *mtd, int chip);
- int (*block_bad)(struct mtd_info *mtd, loff_t ofs);
- int (*block_markbad)(struct mtd_info *mtd, loff_t ofs);
- void (*cmd_ctrl)(struct mtd_info *mtd, int dat, unsigned int ctrl);
- int (*dev_ready)(struct mtd_info *mtd);
- void (*cmdfunc)(struct mtd_info *mtd, unsigned command, int column,
- int page_addr);
- int(*waitfunc)(struct mtd_info *mtd, struct nand_chip *this);
- int (*erase)(struct mtd_info *mtd, int page);
- int (*scan_bbt)(struct mtd_info *mtd);
- int (*onfi_set_features)(struct mtd_info *mtd, struct nand_chip *chip,
- int feature_addr, uint8_t *subfeature_para);
- int (*onfi_get_features)(struct mtd_info *mtd, struct nand_chip *chip,
- int feature_addr, uint8_t *subfeature_para);
- int (*setup_read_retry)(struct mtd_info *mtd, int retry_mode);
- int (*setup_data_interface)(struct mtd_info *mtd, int chipnr,
- const struct nand_data_interface *conf);
+/**
+ * struct nand_op_waitrdy_instr - Definition of a wait ready instruction
+ * @timeout_ms: maximum delay while waiting for the ready/busy pin in ms
+ */
+struct nand_op_waitrdy_instr {
+ unsigned int timeout_ms;
+};
+/**
+ * enum nand_op_instr_type - Definition of all instruction types
+ * @NAND_OP_CMD_INSTR: command instruction
+ * @NAND_OP_ADDR_INSTR: address instruction
+ * @NAND_OP_DATA_IN_INSTR: data in instruction
+ * @NAND_OP_DATA_OUT_INSTR: data out instruction
+ * @NAND_OP_WAITRDY_INSTR: wait ready instruction
+ */
+enum nand_op_instr_type {
+ NAND_OP_CMD_INSTR,
+ NAND_OP_ADDR_INSTR,
+ NAND_OP_DATA_IN_INSTR,
+ NAND_OP_DATA_OUT_INSTR,
+ NAND_OP_WAITRDY_INSTR,
+};
- int chip_delay;
- unsigned int options;
- unsigned int bbt_options;
+/**
+ * struct nand_op_instr - Instruction object
+ * @type: the instruction type
+ * @ctx: extra data associated to the instruction. You'll have to use the
+ * appropriate element depending on @type
+ * @ctx.cmd: use it if @type is %NAND_OP_CMD_INSTR
+ * @ctx.addr: use it if @type is %NAND_OP_ADDR_INSTR
+ * @ctx.data: use it if @type is %NAND_OP_DATA_IN_INSTR
+ * or %NAND_OP_DATA_OUT_INSTR
+ * @ctx.waitrdy: use it if @type is %NAND_OP_WAITRDY_INSTR
+ * @delay_ns: delay the controller should apply after the instruction has been
+ * issued on the bus. Most modern controllers have internal timings
+ * control logic, and in this case, the controller driver can ignore
+ * this field.
+ */
+struct nand_op_instr {
+ enum nand_op_instr_type type;
+ union {
+ struct nand_op_cmd_instr cmd;
+ struct nand_op_addr_instr addr;
+ struct nand_op_data_instr data;
+ struct nand_op_waitrdy_instr waitrdy;
+ } ctx;
+ unsigned int delay_ns;
+};
- int page_shift;
- int phys_erase_shift;
- int bbt_erase_shift;
- int chip_shift;
- int numchips;
- uint64_t chipsize;
- int pagemask;
- int pagebuf;
- unsigned int pagebuf_bitflips;
- int subpagesize;
- uint8_t bits_per_cell;
- uint16_t ecc_strength_ds;
- uint16_t ecc_step_ds;
- int onfi_timing_mode_default;
- int badblockpos;
- int badblockbits;
+/*
+ * Special handling must be done for the WAITRDY timeout parameter as it usually
+ * is either tPROG (after a prog), tR (before a read), tRST (during a reset) or
+ * tBERS (during an erase) which all of them are u64 values that cannot be
+ * divided by usual kernel macros and must be handled with the special
+ * DIV_ROUND_UP_ULL() macro.
+ *
+ * Cast to type of dividend is needed here to guarantee that the result won't
+ * be an unsigned long long when the dividend is an unsigned long (or smaller),
+ * which is what the compiler does when it sees ternary operator with 2
+ * different return types (picks the largest type to make sure there's no
+ * loss).
+ */
+#define __DIVIDE(dividend, divisor) ({ \
+ (__typeof__(dividend))(sizeof(dividend) <= sizeof(unsigned long) ? \
+ DIV_ROUND_UP(dividend, divisor) : \
+ DIV_ROUND_UP_ULL(dividend, divisor)); \
+ })
+#define PSEC_TO_NSEC(x) __DIVIDE(x, 1000)
+#define PSEC_TO_MSEC(x) __DIVIDE(x, 1000000000)
+
+#define NAND_OP_CMD(id, ns) \
+ { \
+ .type = NAND_OP_CMD_INSTR, \
+ .ctx.cmd.opcode = id, \
+ .delay_ns = ns, \
+ }
- struct nand_id id;
- int onfi_version;
- int jedec_version;
+#define NAND_OP_ADDR(ncycles, cycles, ns) \
+ { \
+ .type = NAND_OP_ADDR_INSTR, \
+ .ctx.addr = { \
+ .naddrs = ncycles, \
+ .addrs = cycles, \
+ }, \
+ .delay_ns = ns, \
+ }
+
+#define NAND_OP_DATA_IN(l, b, ns) \
+ { \
+ .type = NAND_OP_DATA_IN_INSTR, \
+ .ctx.data = { \
+ .len = l, \
+ .buf.in = b, \
+ .force_8bit = false, \
+ }, \
+ .delay_ns = ns, \
+ }
+
+#define NAND_OP_DATA_OUT(l, b, ns) \
+ { \
+ .type = NAND_OP_DATA_OUT_INSTR, \
+ .ctx.data = { \
+ .len = l, \
+ .buf.out = b, \
+ .force_8bit = false, \
+ }, \
+ .delay_ns = ns, \
+ }
+
+#define NAND_OP_8BIT_DATA_IN(l, b, ns) \
+ { \
+ .type = NAND_OP_DATA_IN_INSTR, \
+ .ctx.data = { \
+ .len = l, \
+ .buf.in = b, \
+ .force_8bit = true, \
+ }, \
+ .delay_ns = ns, \
+ }
+
+#define NAND_OP_8BIT_DATA_OUT(l, b, ns) \
+ { \
+ .type = NAND_OP_DATA_OUT_INSTR, \
+ .ctx.data = { \
+ .len = l, \
+ .buf.out = b, \
+ .force_8bit = true, \
+ }, \
+ .delay_ns = ns, \
+ }
+
+#define NAND_OP_WAIT_RDY(tout_ms, ns) \
+ { \
+ .type = NAND_OP_WAITRDY_INSTR, \
+ .ctx.waitrdy.timeout_ms = tout_ms, \
+ .delay_ns = ns, \
+ }
+
+/**
+ * struct nand_subop - a sub operation
+ * @cs: the CS line to select for this NAND sub-operation
+ * @instrs: array of instructions
+ * @ninstrs: length of the @instrs array
+ * @first_instr_start_off: offset to start from for the first instruction
+ * of the sub-operation
+ * @last_instr_end_off: offset to end at (excluded) for the last instruction
+ * of the sub-operation
+ *
+ * Both @first_instr_start_off and @last_instr_end_off only apply to data or
+ * address instructions.
+ *
+ * When an operation cannot be handled as is by the NAND controller, it will
+ * be split by the parser into sub-operations which will be passed to the
+ * controller driver.
+ */
+struct nand_subop {
+ unsigned int cs;
+ const struct nand_op_instr *instrs;
+ unsigned int ninstrs;
+ unsigned int first_instr_start_off;
+ unsigned int last_instr_end_off;
+};
+
+unsigned int nand_subop_get_addr_start_off(const struct nand_subop *subop,
+ unsigned int op_id);
+unsigned int nand_subop_get_num_addr_cyc(const struct nand_subop *subop,
+ unsigned int op_id);
+unsigned int nand_subop_get_data_start_off(const struct nand_subop *subop,
+ unsigned int op_id);
+unsigned int nand_subop_get_data_len(const struct nand_subop *subop,
+ unsigned int op_id);
+
+/**
+ * struct nand_op_parser_addr_constraints - Constraints for address instructions
+ * @maxcycles: maximum number of address cycles the controller can issue in a
+ * single step
+ */
+struct nand_op_parser_addr_constraints {
+ unsigned int maxcycles;
+};
+
+/**
+ * struct nand_op_parser_data_constraints - Constraints for data instructions
+ * @maxlen: maximum data length that the controller can handle in a single step
+ */
+struct nand_op_parser_data_constraints {
+ unsigned int maxlen;
+};
+
+/**
+ * struct nand_op_parser_pattern_elem - One element of a pattern
+ * @type: the instructuction type
+ * @optional: whether this element of the pattern is optional or mandatory
+ * @ctx: address or data constraint
+ * @ctx.addr: address constraint (number of cycles)
+ * @ctx.data: data constraint (data length)
+ */
+struct nand_op_parser_pattern_elem {
+ enum nand_op_instr_type type;
+ bool optional;
union {
- struct nand_onfi_params onfi_params;
- struct nand_jedec_params jedec_params;
- };
- u16 max_bb_per_die;
- u32 blocks_per_die;
+ struct nand_op_parser_addr_constraints addr;
+ struct nand_op_parser_data_constraints data;
+ } ctx;
+};
- struct nand_data_interface *data_interface;
+#define NAND_OP_PARSER_PAT_CMD_ELEM(_opt) \
+ { \
+ .type = NAND_OP_CMD_INSTR, \
+ .optional = _opt, \
+ }
- int read_retries;
+#define NAND_OP_PARSER_PAT_ADDR_ELEM(_opt, _maxcycles) \
+ { \
+ .type = NAND_OP_ADDR_INSTR, \
+ .optional = _opt, \
+ .ctx.addr.maxcycles = _maxcycles, \
+ }
- flstate_t state;
+#define NAND_OP_PARSER_PAT_DATA_IN_ELEM(_opt, _maxlen) \
+ { \
+ .type = NAND_OP_DATA_IN_INSTR, \
+ .optional = _opt, \
+ .ctx.data.maxlen = _maxlen, \
+ }
- uint8_t *oob_poi;
- struct nand_hw_control *controller;
+#define NAND_OP_PARSER_PAT_DATA_OUT_ELEM(_opt, _maxlen) \
+ { \
+ .type = NAND_OP_DATA_OUT_INSTR, \
+ .optional = _opt, \
+ .ctx.data.maxlen = _maxlen, \
+ }
- struct nand_ecc_ctrl ecc;
- struct nand_buffers *buffers;
- unsigned long buf_align;
- struct nand_hw_control hwcontrol;
+#define NAND_OP_PARSER_PAT_WAITRDY_ELEM(_opt) \
+ { \
+ .type = NAND_OP_WAITRDY_INSTR, \
+ .optional = _opt, \
+ }
- uint8_t *bbt;
- struct nand_bbt_descr *bbt_td;
- struct nand_bbt_descr *bbt_md;
+/**
+ * struct nand_op_parser_pattern - NAND sub-operation pattern descriptor
+ * @elems: array of pattern elements
+ * @nelems: number of pattern elements in @elems array
+ * @exec: the function that will issue a sub-operation
+ *
+ * A pattern is a list of elements, each element reprensenting one instruction
+ * with its constraints. The pattern itself is used by the core to match NAND
+ * chip operation with NAND controller operations.
+ * Once a match between a NAND controller operation pattern and a NAND chip
+ * operation (or a sub-set of a NAND operation) is found, the pattern ->exec()
+ * hook is called so that the controller driver can issue the operation on the
+ * bus.
+ *
+ * Controller drivers should declare as many patterns as they support and pass
+ * this list of patterns (created with the help of the following macro) to
+ * the nand_op_parser_exec_op() helper.
+ */
+struct nand_op_parser_pattern {
+ const struct nand_op_parser_pattern_elem *elems;
+ unsigned int nelems;
+ int (*exec)(struct nand_chip *chip, const struct nand_subop *subop);
+};
- struct nand_bbt_descr *badblock_pattern;
+#define NAND_OP_PARSER_PATTERN(_exec, ...) \
+ { \
+ .exec = _exec, \
+ .elems = (const struct nand_op_parser_pattern_elem[]) { __VA_ARGS__ }, \
+ .nelems = sizeof((struct nand_op_parser_pattern_elem[]) { __VA_ARGS__ }) / \
+ sizeof(struct nand_op_parser_pattern_elem), \
+ }
- void *priv;
+/**
+ * struct nand_op_parser - NAND controller operation parser descriptor
+ * @patterns: array of supported patterns
+ * @npatterns: length of the @patterns array
+ *
+ * The parser descriptor is just an array of supported patterns which will be
+ * iterated by nand_op_parser_exec_op() everytime it tries to execute an
+ * NAND operation (or tries to determine if a specific operation is supported).
+ *
+ * It is worth mentioning that patterns will be tested in their declaration
+ * order, and the first match will be taken, so it's important to order patterns
+ * appropriately so that simple/inefficient patterns are placed at the end of
+ * the list. Usually, this is where you put single instruction patterns.
+ */
+struct nand_op_parser {
+ const struct nand_op_parser_pattern *patterns;
+ unsigned int npatterns;
+};
- struct {
- const struct nand_manufacturer *desc;
- void *priv;
- } manufacturer;
+#define NAND_OP_PARSER(...) \
+ { \
+ .patterns = (const struct nand_op_parser_pattern[]) { __VA_ARGS__ }, \
+ .npatterns = sizeof((struct nand_op_parser_pattern[]) { __VA_ARGS__ }) / \
+ sizeof(struct nand_op_parser_pattern), \
+ }
+
+/**
+ * struct nand_operation - NAND operation descriptor
+ * @cs: the CS line to select for this NAND operation
+ * @instrs: array of instructions to execute
+ * @ninstrs: length of the @instrs array
+ *
+ * The actual operation structure that will be passed to chip->exec_op().
+ */
+struct nand_operation {
+ unsigned int cs;
+ const struct nand_op_instr *instrs;
+ unsigned int ninstrs;
};
-extern const struct mtd_ooblayout_ops nand_ooblayout_sp_ops;
-extern const struct mtd_ooblayout_ops nand_ooblayout_lp_ops;
+#define NAND_OPERATION(_cs, _instrs) \
+ { \
+ .cs = _cs, \
+ .instrs = _instrs, \
+ .ninstrs = ARRAY_SIZE(_instrs), \
+ }
-static inline void nand_set_flash_node(struct nand_chip *chip,
- struct device_node *np)
+int nand_op_parser_exec_op(struct nand_chip *chip,
+ const struct nand_op_parser *parser,
+ const struct nand_operation *op, bool check_only);
+
+static inline void nand_op_trace(const char *prefix,
+ const struct nand_op_instr *instr)
{
- mtd_set_of_node(&chip->mtd, np);
+#if IS_ENABLED(CONFIG_DYNAMIC_DEBUG) || defined(DEBUG)
+ switch (instr->type) {
+ case NAND_OP_CMD_INSTR:
+ pr_debug("%sCMD [0x%02x]\n", prefix,
+ instr->ctx.cmd.opcode);
+ break;
+ case NAND_OP_ADDR_INSTR:
+ pr_debug("%sADDR [%d cyc: %*ph]\n", prefix,
+ instr->ctx.addr.naddrs,
+ instr->ctx.addr.naddrs < 64 ?
+ instr->ctx.addr.naddrs : 64,
+ instr->ctx.addr.addrs);
+ break;
+ case NAND_OP_DATA_IN_INSTR:
+ pr_debug("%sDATA_IN [%d B%s]\n", prefix,
+ instr->ctx.data.len,
+ instr->ctx.data.force_8bit ?
+ ", force 8-bit" : "");
+ break;
+ case NAND_OP_DATA_OUT_INSTR:
+ pr_debug("%sDATA_OUT [%d B%s]\n", prefix,
+ instr->ctx.data.len,
+ instr->ctx.data.force_8bit ?
+ ", force 8-bit" : "");
+ break;
+ case NAND_OP_WAITRDY_INSTR:
+ pr_debug("%sWAITRDY [max %d ms]\n", prefix,
+ instr->ctx.waitrdy.timeout_ms);
+ break;
+ }
+#endif
}
-static inline struct device_node *nand_get_flash_node(struct nand_chip *chip)
+/**
+ * struct nand_controller_ops - Controller operations
+ *
+ * @attach_chip: this method is called after the NAND detection phase after
+ * flash ID and MTD fields such as erase size, page size and OOB
+ * size have been set up. ECC requirements are available if
+ * provided by the NAND chip or device tree. Typically used to
+ * choose the appropriate ECC configuration and allocate
+ * associated resources.
+ * This hook is optional.
+ * @detach_chip: free all resources allocated/claimed in
+ * nand_controller_ops->attach_chip().
+ * This hook is optional.
+ * @exec_op: controller specific method to execute NAND operations.
+ * This method replaces chip->legacy.cmdfunc(),
+ * chip->legacy.{read,write}_{buf,byte,word}(),
+ * chip->legacy.dev_ready() and chip->legacy.waitfunc().
+ * @setup_interface: setup the data interface and timing. If chipnr is set to
+ * %NAND_DATA_IFACE_CHECK_ONLY this means the configuration
+ * should not be applied but only checked.
+ * This hook is optional.
+ */
+struct nand_controller_ops {
+ int (*attach_chip)(struct nand_chip *chip);
+ void (*detach_chip)(struct nand_chip *chip);
+ int (*exec_op)(struct nand_chip *chip,
+ const struct nand_operation *op,
+ bool check_only);
+ int (*setup_interface)(struct nand_chip *chip, int chipnr,
+ const struct nand_interface_config *conf);
+};
+
+/**
+ * struct nand_controller - Structure used to describe a NAND controller
+ *
+ * @lock: lock used to serialize accesses to the NAND controller
+ * @ops: NAND controller operations.
+ * @supported_op: NAND controller known-to-be-supported operations,
+ * only writable by the core after initial checking.
+ * @supported_op.data_only_read: The controller supports reading more data from
+ * the bus without restarting an entire read operation nor
+ * changing the column.
+ * @supported_op.cont_read: The controller supports sequential cache reads.
+ */
+struct nand_controller {
+ struct mutex lock;
+ const struct nand_controller_ops *ops;
+ struct {
+ unsigned int data_only_read: 1;
+ unsigned int cont_read: 1;
+ } supported_op;
+};
+
+static inline void nand_controller_init(struct nand_controller *nfc)
{
- return mtd_get_of_node(&chip->mtd);
+ mutex_init(&nfc->lock);
}
+/**
+ * struct nand_legacy - NAND chip legacy fields/hooks
+ * @IO_ADDR_R: address to read the 8 I/O lines of the flash device
+ * @IO_ADDR_W: address to write the 8 I/O lines of the flash device
+ * @select_chip: select/deselect a specific target/die
+ * @read_byte: read one byte from the chip
+ * @write_byte: write a single byte to the chip on the low 8 I/O lines
+ * @write_buf: write data from the buffer to the chip
+ * @read_buf: read data from the chip into the buffer
+ * @cmd_ctrl: hardware specific function for controlling ALE/CLE/nCE. Also used
+ * to write command and address
+ * @cmdfunc: hardware specific function for writing commands to the chip.
+ * @dev_ready: hardware specific function for accessing device ready/busy line.
+ * If set to NULL no access to ready/busy is available and the
+ * ready/busy information is read from the chip status register.
+ * @waitfunc: hardware specific function for wait on ready.
+ * @block_bad: check if a block is bad, using OOB markers
+ * @block_markbad: mark a block bad
+ * @set_features: set the NAND chip features
+ * @get_features: get the NAND chip features
+ * @chip_delay: chip dependent delay for transferring data from array to read
+ * regs (tR).
+ * @dummy_controller: dummy controller implementation for drivers that can
+ * only control a single chip
+ *
+ * If you look at this structure you're already wrong. These fields/hooks are
+ * all deprecated.
+ */
+struct nand_legacy {
+ void __iomem *IO_ADDR_R;
+ void __iomem *IO_ADDR_W;
+ void (*select_chip)(struct nand_chip *chip, int cs);
+ u8 (*read_byte)(struct nand_chip *chip);
+ void (*write_byte)(struct nand_chip *chip, u8 byte);
+ void (*write_buf)(struct nand_chip *chip, const u8 *buf, int len);
+ void (*read_buf)(struct nand_chip *chip, u8 *buf, int len);
+ void (*cmd_ctrl)(struct nand_chip *chip, int dat, unsigned int ctrl);
+ void (*cmdfunc)(struct nand_chip *chip, unsigned command, int column,
+ int page_addr);
+ int (*dev_ready)(struct nand_chip *chip);
+ int (*waitfunc)(struct nand_chip *chip);
+ int (*block_bad)(struct nand_chip *chip, loff_t ofs);
+ int (*block_markbad)(struct nand_chip *chip, loff_t ofs);
+ int (*set_features)(struct nand_chip *chip, int feature_addr,
+ u8 *subfeature_para);
+ int (*get_features)(struct nand_chip *chip, int feature_addr,
+ u8 *subfeature_para);
+ int chip_delay;
+ struct nand_controller dummy_controller;
+};
+
+/**
+ * struct nand_chip_ops - NAND chip operations
+ * @suspend: Suspend operation
+ * @resume: Resume operation
+ * @lock_area: Lock operation
+ * @unlock_area: Unlock operation
+ * @setup_read_retry: Set the read-retry mode (mostly needed for MLC NANDs)
+ * @choose_interface_config: Choose the best interface configuration
+ */
+struct nand_chip_ops {
+ int (*suspend)(struct nand_chip *chip);
+ void (*resume)(struct nand_chip *chip);
+ int (*lock_area)(struct nand_chip *chip, loff_t ofs, uint64_t len);
+ int (*unlock_area)(struct nand_chip *chip, loff_t ofs, uint64_t len);
+ int (*setup_read_retry)(struct nand_chip *chip, int retry_mode);
+ int (*choose_interface_config)(struct nand_chip *chip,
+ struct nand_interface_config *iface);
+};
+
+/**
+ * struct nand_manufacturer - NAND manufacturer structure
+ * @desc: The manufacturer description
+ * @priv: Private information for the manufacturer driver
+ */
+struct nand_manufacturer {
+ const struct nand_manufacturer_desc *desc;
+ void *priv;
+};
+
+/**
+ * struct nand_secure_region - NAND secure region structure
+ * @offset: Offset of the start of the secure region
+ * @size: Size of the secure region
+ */
+struct nand_secure_region {
+ u64 offset;
+ u64 size;
+};
+
+/**
+ * struct nand_chip - NAND Private Flash Chip Data
+ * @base: Inherit from the generic NAND device
+ * @id: Holds NAND ID
+ * @parameters: Holds generic parameters under an easily readable form
+ * @manufacturer: Manufacturer information
+ * @ops: NAND chip operations
+ * @legacy: All legacy fields/hooks. If you develop a new driver, don't even try
+ * to use any of these fields/hooks, and if you're modifying an
+ * existing driver that is using those fields/hooks, you should
+ * consider reworking the driver and avoid using them.
+ * @options: Various chip options. They can partly be set to inform nand_scan
+ * about special functionality. See the defines for further
+ * explanation.
+ * @current_interface_config: The currently used NAND interface configuration
+ * @best_interface_config: The best NAND interface configuration which fits both
+ * the NAND chip and NAND controller constraints. If
+ * unset, the default reset interface configuration must
+ * be used.
+ * @bbt_erase_shift: Number of address bits in a bbt entry
+ * @bbt_options: Bad block table specific options. All options used here must
+ * come from bbm.h. By default, these options will be copied to
+ * the appropriate nand_bbt_descr's.
+ * @badblockpos: Bad block marker position in the oob area
+ * @badblockbits: Minimum number of set bits in a good block's bad block marker
+ * position; i.e., BBM = 11110111b is good when badblockbits = 7
+ * @bbt_td: Bad block table descriptor for flash lookup
+ * @bbt_md: Bad block table mirror descriptor
+ * @badblock_pattern: Bad block scan pattern used for initial bad block scan
+ * @bbt: Bad block table pointer
+ * @page_shift: Number of address bits in a page (column address bits)
+ * @phys_erase_shift: Number of address bits in a physical eraseblock
+ * @chip_shift: Number of address bits in one chip
+ * @pagemask: Page number mask = number of (pages / chip) - 1
+ * @subpagesize: Holds the subpagesize
+ * @data_buf: Buffer for data, size is (page size + oobsize)
+ * @oob_poi: pointer on the OOB area covered by data_buf
+ * @pagecache: Structure containing page cache related fields
+ * @pagecache.bitflips: Number of bitflips of the cached page
+ * @pagecache.page: Page number currently in the cache. -1 means no page is
+ * currently cached
+ * @buf_align: Minimum buffer alignment required by a platform
+ * @lock: Lock protecting the suspended field. Also used to serialize accesses
+ * to the NAND device
+ * @suspended: Set to 1 when the device is suspended, 0 when it's not
+ * @resume_wq: wait queue to sleep if rawnand is in suspended state.
+ * @cur_cs: Currently selected target. -1 means no target selected, otherwise we
+ * should always have cur_cs >= 0 && cur_cs < nanddev_ntargets().
+ * NAND Controller drivers should not modify this value, but they're
+ * allowed to read it.
+ * @read_retries: The number of read retry modes supported
+ * @secure_regions: Structure containing the secure regions info
+ * @nr_secure_regions: Number of secure regions
+ * @cont_read: Sequential page read internals
+ * @cont_read.ongoing: Whether a continuous read is ongoing or not
+ * @cont_read.first_page: Start of the continuous read operation
+ * @cont_read.last_page: End of the continuous read operation
+ * @controller: The hardware controller structure which is shared among multiple
+ * independent devices
+ * @ecc: The ECC controller structure
+ * @priv: Chip private data
+ */
+struct nand_chip {
+ struct nand_device base;
+ struct nand_id id;
+ struct nand_parameters parameters;
+ struct nand_manufacturer manufacturer;
+ struct nand_chip_ops ops;
+ struct nand_legacy legacy;
+ unsigned int options;
+
+ /* Data interface */
+ const struct nand_interface_config *current_interface_config;
+ struct nand_interface_config *best_interface_config;
+
+ /* Bad block information */
+ unsigned int bbt_erase_shift;
+ unsigned int bbt_options;
+ unsigned int badblockpos;
+ unsigned int badblockbits;
+ struct nand_bbt_descr *bbt_td;
+ struct nand_bbt_descr *bbt_md;
+ struct nand_bbt_descr *badblock_pattern;
+ u8 *bbt;
+
+ /* Device internal layout */
+ unsigned int page_shift;
+ unsigned int phys_erase_shift;
+ unsigned int chip_shift;
+ unsigned int pagemask;
+ unsigned int subpagesize;
+
+ /* Buffers */
+ u8 *data_buf;
+ u8 *oob_poi;
+ struct {
+ unsigned int bitflips;
+ int page;
+ } pagecache;
+ unsigned long buf_align;
+
+ /* Internals */
+ struct mutex lock;
+ unsigned int suspended : 1;
+ wait_queue_head_t resume_wq;
+ int cur_cs;
+ int read_retries;
+ struct nand_secure_region *secure_regions;
+ u8 nr_secure_regions;
+ struct {
+ bool ongoing;
+ unsigned int first_page;
+ unsigned int last_page;
+ } cont_read;
+
+ /* Externals */
+ struct nand_controller *controller;
+ struct nand_ecc_ctrl ecc;
+ void *priv;
+};
+
static inline struct nand_chip *mtd_to_nand(struct mtd_info *mtd)
{
- return container_of(mtd, struct nand_chip, mtd);
+ return container_of(mtd, struct nand_chip, base.mtd);
}
static inline struct mtd_info *nand_to_mtd(struct nand_chip *chip)
{
- return &chip->mtd;
+ return &chip->base.mtd;
}
static inline void *nand_get_controller_data(struct nand_chip *chip)
@@ -998,26 +1359,27 @@ static inline void *nand_get_manufacturer_data(struct nand_chip *chip)
return chip->manufacturer.priv;
}
-/*
- * NAND Flash Manufacturer ID Codes
- */
-#define NAND_MFR_TOSHIBA 0x98
-#define NAND_MFR_ESMT 0xc8
-#define NAND_MFR_SAMSUNG 0xec
-#define NAND_MFR_FUJITSU 0x04
-#define NAND_MFR_NATIONAL 0x8f
-#define NAND_MFR_RENESAS 0x07
-#define NAND_MFR_STMICRO 0x20
-#define NAND_MFR_HYNIX 0xad
-#define NAND_MFR_MICRON 0x2c
-#define NAND_MFR_AMD 0x01
-#define NAND_MFR_MACRONIX 0xc2
-#define NAND_MFR_EON 0x92
-#define NAND_MFR_SANDISK 0x45
-#define NAND_MFR_INTEL 0x89
-#define NAND_MFR_ATO 0x9b
-#define NAND_MFR_WINBOND 0xef
+static inline void nand_set_flash_node(struct nand_chip *chip,
+ struct device_node *np)
+{
+ mtd_set_of_node(nand_to_mtd(chip), np);
+}
+
+static inline struct device_node *nand_get_flash_node(struct nand_chip *chip)
+{
+ return mtd_get_of_node(nand_to_mtd(chip));
+}
+/**
+ * nand_get_interface_config - Retrieve the current interface configuration
+ * of a NAND chip
+ * @chip: The NAND chip
+ */
+static inline const struct nand_interface_config *
+nand_get_interface_config(struct nand_chip *chip)
+{
+ return chip->current_interface_config;
+}
/*
* A helper for defining older NAND chips where the second ID byte fully
@@ -1051,10 +1413,10 @@ static inline void *nand_get_manufacturer_data(struct nand_chip *chip)
* struct nand_flash_dev - NAND Flash Device ID Structure
* @name: a human-readable name of the NAND chip
* @dev_id: the device ID (the second byte of the full chip ID array)
- * @mfr_id: manufecturer ID part of the full chip ID array (refers the same
- * memory address as @id[0])
+ * @mfr_id: manufacturer ID part of the full chip ID array (refers the same
+ * memory address as ``id[0]``)
* @dev_id: device ID part of the full chip ID array (refers the same memory
- * address as @id[1])
+ * address as ``id[1]``)
* @id: full device ID array
* @pagesize: size of the NAND page in bytes; if 0, then the real page size (as
* well as the eraseblock size) is determined from the extended NAND
@@ -1071,10 +1433,6 @@ static inline void *nand_get_manufacturer_data(struct nand_chip *chip)
* @ecc_step_ds in nand_chip{}, also from the datasheet.
* For example, the "4bit ECC for each 512Byte" can be set with
* NAND_ECC_INFO(4, 512).
- * @onfi_timing_mode_default: the default ONFI timing mode entered after a NAND
- * reset. Should be deduced from timings described
- * in the datasheet.
- *
*/
struct nand_flash_dev {
char *name;
@@ -1095,137 +1453,9 @@ struct nand_flash_dev {
uint16_t strength_ds;
uint16_t step_ds;
} ecc;
- int onfi_timing_mode_default;
};
-/**
- * struct nand_manufacturer - NAND Flash Manufacturer structure
- * @name: Manufacturer name
- * @id: manufacturer ID code of device.
- * @ops: manufacturer operations
-*/
-struct nand_manufacturer {
- int id;
- char *name;
- const struct nand_manufacturer_ops *ops;
-};
-
-const struct nand_manufacturer *nand_get_manufacturer(u8 id);
-
-static inline const char *
-nand_manufacturer_name(const struct nand_manufacturer *manufacturer)
-{
- return manufacturer ? manufacturer->name : "Unknown";
-}
-
-extern struct nand_flash_dev nand_flash_ids[];
-
-extern const struct nand_manufacturer_ops toshiba_nand_manuf_ops;
-extern const struct nand_manufacturer_ops samsung_nand_manuf_ops;
-extern const struct nand_manufacturer_ops hynix_nand_manuf_ops;
-extern const struct nand_manufacturer_ops micron_nand_manuf_ops;
-extern const struct nand_manufacturer_ops amd_nand_manuf_ops;
-extern const struct nand_manufacturer_ops macronix_nand_manuf_ops;
-
-int nand_default_bbt(struct mtd_info *mtd);
-int nand_markbad_bbt(struct mtd_info *mtd, loff_t offs);
-int nand_isreserved_bbt(struct mtd_info *mtd, loff_t offs);
-int nand_isbad_bbt(struct mtd_info *mtd, loff_t offs, int allowbbt);
-int nand_erase_nand(struct mtd_info *mtd, struct erase_info *instr,
- int allowbbt);
-int nand_do_read(struct mtd_info *mtd, loff_t from, size_t len,
- size_t *retlen, uint8_t *buf);
-
-/**
- * struct platform_nand_chip - chip level device structure
- * @nr_chips: max. number of chips to scan for
- * @chip_offset: chip number offset
- * @nr_partitions: number of partitions pointed to by partitions (or zero)
- * @partitions: mtd partition list
- * @chip_delay: R/B delay value in us
- * @options: Option flags, e.g. 16bit buswidth
- * @bbt_options: BBT option flags, e.g. NAND_BBT_USE_FLASH
- * @part_probe_types: NULL-terminated array of probe types
- */
-struct platform_nand_chip {
- int nr_chips;
- int chip_offset;
- int nr_partitions;
- struct mtd_partition *partitions;
- int chip_delay;
- unsigned int options;
- unsigned int bbt_options;
- const char **part_probe_types;
-};
-
-/* Keep gcc happy */
-struct platform_device;
-
-/**
- * struct platform_nand_ctrl - controller level device structure
- * @probe: platform specific function to probe/setup hardware
- * @remove: platform specific function to remove/teardown hardware
- * @hwcontrol: platform specific hardware control structure
- * @dev_ready: platform specific function to read ready/busy pin
- * @select_chip: platform specific chip select function
- * @cmd_ctrl: platform specific function for controlling
- * ALE/CLE/nCE. Also used to write command and address
- * @write_buf: platform specific function for write buffer
- * @read_buf: platform specific function for read buffer
- * @read_byte: platform specific function to read one byte from chip
- * @priv: private data to transport driver specific settings
- *
- * All fields are optional and depend on the hardware driver requirements
- */
-struct platform_nand_ctrl {
- int (*probe)(struct platform_device *pdev);
- void (*remove)(struct platform_device *pdev);
- void (*hwcontrol)(struct mtd_info *mtd, int cmd);
- int (*dev_ready)(struct mtd_info *mtd);
- void (*select_chip)(struct mtd_info *mtd, int chip);
- void (*cmd_ctrl)(struct mtd_info *mtd, int dat, unsigned int ctrl);
- void (*write_buf)(struct mtd_info *mtd, const uint8_t *buf, int len);
- void (*read_buf)(struct mtd_info *mtd, uint8_t *buf, int len);
- unsigned char (*read_byte)(struct mtd_info *mtd);
- void *priv;
-};
-
-/**
- * struct platform_nand_data - container structure for platform-specific data
- * @chip: chip level chip structure
- * @ctrl: controller level device structure
- */
-struct platform_nand_data {
- struct platform_nand_chip chip;
- struct platform_nand_ctrl ctrl;
-};
-
-/* return the supported features. */
-static inline int onfi_feature(struct nand_chip *chip)
-{
- return chip->onfi_version ? le16_to_cpu(chip->onfi_params.features) : 0;
-}
-
-/* return the supported asynchronous timing mode. */
-static inline int onfi_get_async_timing_mode(struct nand_chip *chip)
-{
- if (!chip->onfi_version)
- return ONFI_TIMING_MODE_UNKNOWN;
- return le16_to_cpu(chip->onfi_params.async_timing_mode);
-}
-
-/* return the supported synchronous timing mode. */
-static inline int onfi_get_sync_timing_mode(struct nand_chip *chip)
-{
- if (!chip->onfi_version)
- return ONFI_TIMING_MODE_UNKNOWN;
- return le16_to_cpu(chip->onfi_params.src_sync_timing_mode);
-}
-
-int onfi_init_data_interface(struct nand_chip *chip,
- struct nand_data_interface *iface,
- enum nand_data_interface_type type,
- int timing_mode);
+int nand_create_bbt(struct nand_chip *chip);
/*
* Check if it is a SLC nand.
@@ -1234,13 +1464,14 @@ int onfi_init_data_interface(struct nand_chip *chip,
*/
static inline bool nand_is_slc(struct nand_chip *chip)
{
- WARN(chip->bits_per_cell == 0,
+ WARN(nanddev_bits_per_cell(&chip->base) == 0,
"chip->bits_per_cell is used uninitialized\n");
- return chip->bits_per_cell == 1;
+ return nanddev_bits_per_cell(&chip->base) == 1;
}
/**
- * Check if the opcode's address should be sent only on the lower 8 bits
+ * nand_opcode_8bits - Check if the opcode's address should be sent only on the
+ * lower 8 bits
* @command: opcode to check
*/
static inline int nand_opcode_8bits(unsigned int command)
@@ -1257,65 +1488,140 @@ static inline int nand_opcode_8bits(unsigned int command)
return 0;
}
-/* return the supported JEDEC features. */
-static inline int jedec_feature(struct nand_chip *chip)
-{
- return chip->jedec_version ? le16_to_cpu(chip->jedec_params.features)
- : 0;
-}
-
-/* get timing characteristics from ONFI timing mode. */
-const struct nand_sdr_timings *onfi_async_timing_mode_to_sdr_timings(int mode);
-/* get data interface from ONFI timing mode 0, used after reset. */
-const struct nand_data_interface *nand_get_default_data_interface(void);
+int rawnand_sw_hamming_init(struct nand_chip *chip);
+int rawnand_sw_hamming_calculate(struct nand_chip *chip,
+ const unsigned char *buf,
+ unsigned char *code);
+int rawnand_sw_hamming_correct(struct nand_chip *chip,
+ unsigned char *buf,
+ unsigned char *read_ecc,
+ unsigned char *calc_ecc);
+void rawnand_sw_hamming_cleanup(struct nand_chip *chip);
+int rawnand_sw_bch_init(struct nand_chip *chip);
+int rawnand_sw_bch_correct(struct nand_chip *chip, unsigned char *buf,
+ unsigned char *read_ecc, unsigned char *calc_ecc);
+void rawnand_sw_bch_cleanup(struct nand_chip *chip);
int nand_check_erased_ecc_chunk(void *data, int datalen,
void *ecc, int ecclen,
void *extraoob, int extraooblen,
int threshold);
-int nand_check_ecc_caps(struct nand_chip *chip,
- const struct nand_ecc_caps *caps, int oobavail);
-
-int nand_match_ecc_req(struct nand_chip *chip,
- const struct nand_ecc_caps *caps, int oobavail);
-
-int nand_maximize_ecc(struct nand_chip *chip,
- const struct nand_ecc_caps *caps, int oobavail);
+int nand_ecc_choose_conf(struct nand_chip *chip,
+ const struct nand_ecc_caps *caps, int oobavail);
/* Default write_oob implementation */
-int nand_write_oob_std(struct mtd_info *mtd, struct nand_chip *chip, int page);
-
-/* Default write_oob syndrome implementation */
-int nand_write_oob_syndrome(struct mtd_info *mtd, struct nand_chip *chip,
- int page);
+int nand_write_oob_std(struct nand_chip *chip, int page);
/* Default read_oob implementation */
-int nand_read_oob_std(struct mtd_info *mtd, struct nand_chip *chip, int page);
-
-/* Default read_oob syndrome implementation */
-int nand_read_oob_syndrome(struct mtd_info *mtd, struct nand_chip *chip,
- int page);
+int nand_read_oob_std(struct nand_chip *chip, int page);
/* Stub used by drivers that do not support GET/SET FEATURES operations */
-int nand_onfi_get_set_features_notsupp(struct mtd_info *mtd,
- struct nand_chip *chip, int addr,
- u8 *subfeature_param);
+int nand_get_set_features_notsupp(struct nand_chip *chip, int addr,
+ u8 *subfeature_param);
-/* Default read_page_raw implementation */
-int nand_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
- uint8_t *buf, int oob_required, int page);
+/* read_page_raw implementations */
+int nand_read_page_raw(struct nand_chip *chip, uint8_t *buf, int oob_required,
+ int page);
+int nand_monolithic_read_page_raw(struct nand_chip *chip, uint8_t *buf,
+ int oob_required, int page);
-/* Default write_page_raw implementation */
-int nand_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
- const uint8_t *buf, int oob_required, int page);
+/* write_page_raw implementations */
+int nand_write_page_raw(struct nand_chip *chip, const uint8_t *buf,
+ int oob_required, int page);
+int nand_monolithic_write_page_raw(struct nand_chip *chip, const uint8_t *buf,
+ int oob_required, int page);
/* Reset and initialize a NAND device */
int nand_reset(struct nand_chip *chip, int chipnr);
-/* Free resources held by the NAND device */
+/* NAND operation helpers */
+int nand_reset_op(struct nand_chip *chip);
+int nand_readid_op(struct nand_chip *chip, u8 addr, void *buf,
+ unsigned int len);
+int nand_status_op(struct nand_chip *chip, u8 *status);
+int nand_erase_op(struct nand_chip *chip, unsigned int eraseblock);
+int nand_read_page_op(struct nand_chip *chip, unsigned int page,
+ unsigned int offset_in_page, void *buf, unsigned int len);
+int nand_change_read_column_op(struct nand_chip *chip,
+ unsigned int offset_in_page, void *buf,
+ unsigned int len, bool force_8bit);
+int nand_read_oob_op(struct nand_chip *chip, unsigned int page,
+ unsigned int offset_in_page, void *buf, unsigned int len);
+int nand_prog_page_begin_op(struct nand_chip *chip, unsigned int page,
+ unsigned int offset_in_page, const void *buf,
+ unsigned int len);
+int nand_prog_page_end_op(struct nand_chip *chip);
+int nand_prog_page_op(struct nand_chip *chip, unsigned int page,
+ unsigned int offset_in_page, const void *buf,
+ unsigned int len);
+int nand_change_write_column_op(struct nand_chip *chip,
+ unsigned int offset_in_page, const void *buf,
+ unsigned int len, bool force_8bit);
+int nand_read_data_op(struct nand_chip *chip, void *buf, unsigned int len,
+ bool force_8bit, bool check_only);
+int nand_write_data_op(struct nand_chip *chip, const void *buf,
+ unsigned int len, bool force_8bit);
+int nand_read_page_hwecc_oob_first(struct nand_chip *chip, uint8_t *buf,
+ int oob_required, int page);
+
+/* Scan and identify a NAND device */
+int nand_scan_with_ids(struct nand_chip *chip, unsigned int max_chips,
+ struct nand_flash_dev *ids);
+
+static inline int nand_scan(struct nand_chip *chip, unsigned int max_chips)
+{
+ return nand_scan_with_ids(chip, max_chips, NULL);
+}
+
+/* Internal helper for board drivers which need to override command function */
+void nand_wait_ready(struct nand_chip *chip);
+
+/*
+ * Free resources held by the NAND device, must be called on error after a
+ * sucessful nand_scan().
+ */
void nand_cleanup(struct nand_chip *chip);
-/* Default extended ID decoding function */
-void nand_decode_ext_id(struct nand_chip *chip);
+/*
+ * External helper for controller drivers that have to implement the WAITRDY
+ * instruction and have no physical pin to check it.
+ */
+int nand_soft_waitrdy(struct nand_chip *chip, unsigned long timeout_ms);
+int nand_gpio_waitrdy(struct nand_chip *chip, struct gpio_desc *gpiod,
+ unsigned long timeout_ms);
+
+/* Select/deselect a NAND target. */
+void nand_select_target(struct nand_chip *chip, unsigned int cs);
+void nand_deselect_target(struct nand_chip *chip);
+
+/* Bitops */
+void nand_extract_bits(u8 *dst, unsigned int dst_off, const u8 *src,
+ unsigned int src_off, unsigned int nbits);
+
+/**
+ * nand_get_data_buf() - Get the internal page buffer
+ * @chip: NAND chip object
+ *
+ * Returns the pre-allocated page buffer after invalidating the cache. This
+ * function should be used by drivers that do not want to allocate their own
+ * bounce buffer and still need such a buffer for specific operations (most
+ * commonly when reading OOB data only).
+ *
+ * Be careful to never call this function in the write/write_oob path, because
+ * the core may have placed the data to be written out in this buffer.
+ *
+ * Return: pointer to the page cache buffer
+ */
+static inline void *nand_get_data_buf(struct nand_chip *chip)
+{
+ chip->pagecache.page = -1;
+
+ return chip->data_buf;
+}
+
+/* Parse the gpio-cs property */
+int rawnand_dt_parse_gpio_cs(struct device *dev, struct gpio_desc ***cs_array,
+ unsigned int *ncs_array);
+
#endif /* __LINUX_MTD_RAWNAND_H */
diff --git a/include/linux/mtd/sh_flctl.h b/include/linux/mtd/sh_flctl.h
index c759d403cbc0..78fc2d4218c8 100644
--- a/include/linux/mtd/sh_flctl.h
+++ b/include/linux/mtd/sh_flctl.h
@@ -1,20 +1,8 @@
-/*
+/* SPDX-License-Identifier: GPL-2.0
+ *
* SuperH FLCTL nand controller
*
* Copyright © 2008 Renesas Solutions Corp.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef __SH_FLCTL_H__
diff --git a/include/linux/mtd/sharpsl.h b/include/linux/mtd/sharpsl.h
index e1845fc4afbd..231bd1c3f408 100644
--- a/include/linux/mtd/sharpsl.h
+++ b/include/linux/mtd/sharpsl.h
@@ -1,15 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* SharpSL NAND support
*
* Copyright (C) 2008 Dmitry Baryshkov
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
+#ifndef _MTD_SHARPSL_H
+#define _MTD_SHARPSL_H
+
#include <linux/mtd/rawnand.h>
-#include <linux/mtd/nand_ecc.h>
#include <linux/mtd/partitions.h>
struct sharpsl_nand_platform_data {
@@ -19,3 +18,5 @@ struct sharpsl_nand_platform_data {
unsigned int nr_partitions;
const char *const *part_parsers;
};
+
+#endif /* _MTD_SHARPSL_H */
diff --git a/include/linux/mtd/spi-nor.h b/include/linux/mtd/spi-nor.h
index 55faa2f07cca..cdcfe0fd2e7d 100644
--- a/include/linux/mtd/spi-nor.h
+++ b/include/linux/mtd/spi-nor.h
@@ -1,33 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* Copyright (C) 2014 Freescale Semiconductor, Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
#ifndef __LINUX_MTD_SPI_NOR_H
#define __LINUX_MTD_SPI_NOR_H
#include <linux/bitops.h>
-#include <linux/mtd/cfi.h>
#include <linux/mtd/mtd.h>
-
-/*
- * Manufacturer IDs
- *
- * The first byte returned from the flash after sending opcode SPINOR_OP_RDID.
- * Sometimes these are the same as CFI IDs, but sometimes they aren't.
- */
-#define SNOR_MFR_ATMEL CFI_MFR_ATMEL
-#define SNOR_MFR_GIGADEVICE 0xc8
-#define SNOR_MFR_INTEL CFI_MFR_INTEL
-#define SNOR_MFR_MICRON CFI_MFR_ST /* ST Micro <--> Micron */
-#define SNOR_MFR_MACRONIX CFI_MFR_MACRONIX
-#define SNOR_MFR_SPANSION CFI_MFR_AMD
-#define SNOR_MFR_SST CFI_MFR_SST
-#define SNOR_MFR_WINBOND 0xef /* Also used by some Spansion */
+#include <linux/spi/spi-mem.h>
/*
* Note on opcode nomenclature: some opcodes have a format like
@@ -38,26 +19,36 @@
*/
/* Flash opcodes. */
+#define SPINOR_OP_WRDI 0x04 /* Write disable */
#define SPINOR_OP_WREN 0x06 /* Write enable */
#define SPINOR_OP_RDSR 0x05 /* Read status register */
#define SPINOR_OP_WRSR 0x01 /* Write status register 1 byte */
+#define SPINOR_OP_RDSR2 0x3f /* Read status register 2 */
+#define SPINOR_OP_WRSR2 0x3e /* Write status register 2 */
#define SPINOR_OP_READ 0x03 /* Read data bytes (low frequency) */
#define SPINOR_OP_READ_FAST 0x0b /* Read data bytes (high frequency) */
#define SPINOR_OP_READ_1_1_2 0x3b /* Read data bytes (Dual Output SPI) */
#define SPINOR_OP_READ_1_2_2 0xbb /* Read data bytes (Dual I/O SPI) */
#define SPINOR_OP_READ_1_1_4 0x6b /* Read data bytes (Quad Output SPI) */
#define SPINOR_OP_READ_1_4_4 0xeb /* Read data bytes (Quad I/O SPI) */
+#define SPINOR_OP_READ_1_1_8 0x8b /* Read data bytes (Octal Output SPI) */
+#define SPINOR_OP_READ_1_8_8 0xcb /* Read data bytes (Octal I/O SPI) */
#define SPINOR_OP_PP 0x02 /* Page program (up to 256 bytes) */
#define SPINOR_OP_PP_1_1_4 0x32 /* Quad page program */
#define SPINOR_OP_PP_1_4_4 0x38 /* Quad page program */
+#define SPINOR_OP_PP_1_1_8 0x82 /* Octal page program */
+#define SPINOR_OP_PP_1_8_8 0xc2 /* Octal page program */
#define SPINOR_OP_BE_4K 0x20 /* Erase 4KiB block */
#define SPINOR_OP_BE_4K_PMC 0xd7 /* Erase 4KiB block on PMC chips */
#define SPINOR_OP_BE_32K 0x52 /* Erase 32KiB block */
#define SPINOR_OP_CHIP_ERASE 0xc7 /* Erase whole flash chip */
#define SPINOR_OP_SE 0xd8 /* Sector erase (usually 64KiB) */
#define SPINOR_OP_RDID 0x9f /* Read JEDEC ID */
+#define SPINOR_OP_RDSFDP 0x5a /* Read SFDP */
#define SPINOR_OP_RDCR 0x35 /* Read configuration register */
-#define SPINOR_OP_RDFSR 0x70 /* Read flag status register */
+#define SPINOR_OP_SRSTEN 0x66 /* Software Reset Enable */
+#define SPINOR_OP_SRST 0x99 /* Software Reset */
+#define SPINOR_OP_GBULK 0x98 /* Global Block Unlock */
/* 4-byte address opcodes - used on Spansion and some Macronix flashes. */
#define SPINOR_OP_READ_4B 0x13 /* Read data bytes (low frequency) */
@@ -66,9 +57,13 @@
#define SPINOR_OP_READ_1_2_2_4B 0xbc /* Read data bytes (Dual I/O SPI) */
#define SPINOR_OP_READ_1_1_4_4B 0x6c /* Read data bytes (Quad Output SPI) */
#define SPINOR_OP_READ_1_4_4_4B 0xec /* Read data bytes (Quad I/O SPI) */
+#define SPINOR_OP_READ_1_1_8_4B 0x7c /* Read data bytes (Octal Output SPI) */
+#define SPINOR_OP_READ_1_8_8_4B 0xcc /* Read data bytes (Octal I/O SPI) */
#define SPINOR_OP_PP_4B 0x12 /* Page program (up to 256 bytes) */
#define SPINOR_OP_PP_1_1_4_4B 0x34 /* Quad page program */
#define SPINOR_OP_PP_1_4_4_4B 0x3e /* Quad page program */
+#define SPINOR_OP_PP_1_1_8_4B 0x84 /* Octal page program */
+#define SPINOR_OP_PP_1_8_8_4B 0x8e /* Octal page program */
#define SPINOR_OP_BE_4K_4B 0x21 /* Erase 4KiB block */
#define SPINOR_OP_BE_32K_4B 0x5c /* Erase 32KiB block */
#define SPINOR_OP_SE_4B 0xdc /* Sector erase (usually 64KiB) */
@@ -84,18 +79,8 @@
/* Used for SST flashes only. */
#define SPINOR_OP_BP 0x02 /* Byte program */
-#define SPINOR_OP_WRDI 0x04 /* Write disable */
#define SPINOR_OP_AAI_WP 0xad /* Auto address increment word program */
-/* Used for S3AN flashes only */
-#define SPINOR_OP_XSE 0x50 /* Sector erase */
-#define SPINOR_OP_XPP 0x82 /* Page program */
-#define SPINOR_OP_XRDSR 0xd7 /* Read status register */
-
-#define XSR_PAGESIZE BIT(0) /* Page size in Po2 or Linear */
-#define XSR_RDY BIT(7) /* Ready */
-
-
/* Used for Macronix and Winbond flashes. */
#define SPINOR_OP_EN4B 0xb7 /* Enter 4-byte mode */
#define SPINOR_OP_EX4B 0xe9 /* Exit 4-byte mode */
@@ -107,6 +92,11 @@
#define SPINOR_OP_RD_EVCR 0x65 /* Read EVCR register */
#define SPINOR_OP_WD_EVCR 0x61 /* Write EVCR register */
+/* Used for GigaDevices and Winbond flashes. */
+#define SPINOR_OP_ESECR 0x44 /* Erase Security registers */
+#define SPINOR_OP_PSECR 0x42 /* Program Security registers */
+#define SPINOR_OP_RSECR 0x48 /* Read Security registers */
+
/* Status Register bits. */
#define SR_WIP BIT(0) /* Write in progress */
#define SR_WEL BIT(1) /* Write enable latch */
@@ -114,19 +104,28 @@
#define SR_BP0 BIT(2) /* Block protect 0 */
#define SR_BP1 BIT(3) /* Block protect 1 */
#define SR_BP2 BIT(4) /* Block protect 2 */
-#define SR_TB BIT(5) /* Top/Bottom protect */
+#define SR_BP3 BIT(5) /* Block protect 3 */
+#define SR_TB_BIT5 BIT(5) /* Top/Bottom protect */
+#define SR_BP3_BIT6 BIT(6) /* Block protect 3 */
+#define SR_TB_BIT6 BIT(6) /* Top/Bottom protect */
#define SR_SRWD BIT(7) /* SR write protect */
+/* Spansion/Cypress specific status bits */
+#define SR_E_ERR BIT(5)
+#define SR_P_ERR BIT(6)
+
+#define SR1_QUAD_EN_BIT6 BIT(6)
-#define SR_QUAD_EN_MX BIT(6) /* Macronix Quad I/O */
+#define SR_BP_SHIFT 2
/* Enhanced Volatile Configuration Register bits */
#define EVCR_QUAD_EN_MICRON BIT(7) /* Micron Quad I/O */
-/* Flag Status Register bits */
-#define FSR_READY BIT(7)
-
-/* Configuration Register bits. */
-#define CR_QUAD_EN_SPAN BIT(1) /* Spansion Quad I/O */
+/* Status Register 2 bits. */
+#define SR2_QUAD_EN_BIT1 BIT(1)
+#define SR2_LB1 BIT(3) /* Security Register Lock Bit 1 */
+#define SR2_LB2 BIT(4) /* Security Register Lock Bit 2 */
+#define SR2_LB3 BIT(5) /* Security Register Lock Bit 3 */
+#define SR2_QUAD_EN_BIT7 BIT(7)
/* Supported SPI protocols */
#define SNOR_PROTO_INST_MASK GENMASK(23, 16)
@@ -173,6 +172,7 @@ enum spi_nor_protocol {
SNOR_PROTO_1_2_2_DTR = SNOR_PROTO_DTR(1, 2, 2),
SNOR_PROTO_1_4_4_DTR = SNOR_PROTO_DTR(1, 4, 4),
SNOR_PROTO_1_8_8_DTR = SNOR_PROTO_DTR(1, 8, 8),
+ SNOR_PROTO_8_8_8_DTR = SNOR_PROTO_DTR(8, 8, 8),
};
static inline bool spi_nor_protocol_is_dtr(enum spi_nor_protocol proto)
@@ -203,103 +203,6 @@ static inline u8 spi_nor_get_protocol_width(enum spi_nor_protocol proto)
return spi_nor_get_protocol_data_nbits(proto);
}
-#define SPI_NOR_MAX_CMD_SIZE 8
-enum spi_nor_ops {
- SPI_NOR_OPS_READ = 0,
- SPI_NOR_OPS_WRITE,
- SPI_NOR_OPS_ERASE,
- SPI_NOR_OPS_LOCK,
- SPI_NOR_OPS_UNLOCK,
-};
-
-enum spi_nor_option_flags {
- SNOR_F_USE_FSR = BIT(0),
- SNOR_F_HAS_SR_TB = BIT(1),
- SNOR_F_NO_OP_CHIP_ERASE = BIT(2),
- SNOR_F_S3AN_ADDR_DEFAULT = BIT(3),
- SNOR_F_READY_XSR_RDY = BIT(4),
-};
-
-/**
- * struct spi_nor - Structure for defining a the SPI NOR layer
- * @mtd: point to a mtd_info structure
- * @lock: the lock for the read/write/erase/lock/unlock operations
- * @dev: point to a spi device, or a spi nor controller device.
- * @page_size: the page size of the SPI NOR
- * @addr_width: number of address bytes
- * @erase_opcode: the opcode for erasing a sector
- * @read_opcode: the read opcode
- * @read_dummy: the dummy needed by the read operation
- * @program_opcode: the program opcode
- * @sst_write_second: used by the SST write operation
- * @flags: flag options for the current SPI-NOR (SNOR_F_*)
- * @read_proto: the SPI protocol for read operations
- * @write_proto: the SPI protocol for write operations
- * @reg_proto the SPI protocol for read_reg/write_reg/erase operations
- * @cmd_buf: used by the write_reg
- * @prepare: [OPTIONAL] do some preparations for the
- * read/write/erase/lock/unlock operations
- * @unprepare: [OPTIONAL] do some post work after the
- * read/write/erase/lock/unlock operations
- * @read_reg: [DRIVER-SPECIFIC] read out the register
- * @write_reg: [DRIVER-SPECIFIC] write data to the register
- * @read: [DRIVER-SPECIFIC] read data from the SPI NOR
- * @write: [DRIVER-SPECIFIC] write data to the SPI NOR
- * @erase: [DRIVER-SPECIFIC] erase a sector of the SPI NOR
- * at the offset @offs; if not provided by the driver,
- * spi-nor will send the erase opcode via write_reg()
- * @flash_lock: [FLASH-SPECIFIC] lock a region of the SPI NOR
- * @flash_unlock: [FLASH-SPECIFIC] unlock a region of the SPI NOR
- * @flash_is_locked: [FLASH-SPECIFIC] check if a region of the SPI NOR is
- * completely locked
- * @priv: the private data
- */
-struct spi_nor {
- struct mtd_info mtd;
- struct mutex lock;
- struct device *dev;
- u32 page_size;
- u8 addr_width;
- u8 erase_opcode;
- u8 read_opcode;
- u8 read_dummy;
- u8 program_opcode;
- enum spi_nor_protocol read_proto;
- enum spi_nor_protocol write_proto;
- enum spi_nor_protocol reg_proto;
- bool sst_write_second;
- u32 flags;
- u8 cmd_buf[SPI_NOR_MAX_CMD_SIZE];
-
- int (*prepare)(struct spi_nor *nor, enum spi_nor_ops ops);
- void (*unprepare)(struct spi_nor *nor, enum spi_nor_ops ops);
- int (*read_reg)(struct spi_nor *nor, u8 opcode, u8 *buf, int len);
- int (*write_reg)(struct spi_nor *nor, u8 opcode, u8 *buf, int len);
-
- ssize_t (*read)(struct spi_nor *nor, loff_t from,
- size_t len, u_char *read_buf);
- ssize_t (*write)(struct spi_nor *nor, loff_t to,
- size_t len, const u_char *write_buf);
- int (*erase)(struct spi_nor *nor, loff_t offs);
-
- int (*flash_lock)(struct spi_nor *nor, loff_t ofs, uint64_t len);
- int (*flash_unlock)(struct spi_nor *nor, loff_t ofs, uint64_t len);
- int (*flash_is_locked)(struct spi_nor *nor, loff_t ofs, uint64_t len);
-
- void *priv;
-};
-
-static inline void spi_nor_set_flash_node(struct spi_nor *nor,
- struct device_node *np)
-{
- mtd_set_of_node(&nor->mtd, np);
-}
-
-static inline struct device_node *spi_nor_get_flash_node(struct spi_nor *nor)
-{
- return mtd_get_of_node(&nor->mtd);
-}
-
/**
* struct spi_nor_hwcaps - Structure for describing the hardware capabilies
* supported by the SPI controller (bus master).
@@ -312,11 +215,11 @@ struct spi_nor_hwcaps {
/*
*(Fast) Read capabilities.
* MUST be ordered by priority: the higher bit position, the higher priority.
- * As a matter of performances, it is relevant to use Octo SPI protocols first,
+ * As a matter of performances, it is relevant to use Octal SPI protocols first,
* then Quad SPI protocols before Dual SPI protocols, Fast Read and lastly
* (Slow) Read.
*/
-#define SNOR_HWCAPS_READ_MASK GENMASK(14, 0)
+#define SNOR_HWCAPS_READ_MASK GENMASK(15, 0)
#define SNOR_HWCAPS_READ BIT(0)
#define SNOR_HWCAPS_READ_FAST BIT(1)
#define SNOR_HWCAPS_READ_1_1_1_DTR BIT(2)
@@ -333,33 +236,202 @@ struct spi_nor_hwcaps {
#define SNOR_HWCAPS_READ_4_4_4 BIT(9)
#define SNOR_HWCAPS_READ_1_4_4_DTR BIT(10)
-#define SNOR_HWCPAS_READ_OCTO GENMASK(14, 11)
+#define SNOR_HWCAPS_READ_OCTAL GENMASK(15, 11)
#define SNOR_HWCAPS_READ_1_1_8 BIT(11)
#define SNOR_HWCAPS_READ_1_8_8 BIT(12)
#define SNOR_HWCAPS_READ_8_8_8 BIT(13)
#define SNOR_HWCAPS_READ_1_8_8_DTR BIT(14)
+#define SNOR_HWCAPS_READ_8_8_8_DTR BIT(15)
/*
* Page Program capabilities.
* MUST be ordered by priority: the higher bit position, the higher priority.
- * Like (Fast) Read capabilities, Octo/Quad SPI protocols are preferred to the
+ * Like (Fast) Read capabilities, Octal/Quad SPI protocols are preferred to the
* legacy SPI 1-1-1 protocol.
* Note that Dual Page Programs are not supported because there is no existing
* JEDEC/SFDP standard to define them. Also at this moment no SPI flash memory
* implements such commands.
*/
-#define SNOR_HWCAPS_PP_MASK GENMASK(22, 16)
-#define SNOR_HWCAPS_PP BIT(16)
+#define SNOR_HWCAPS_PP_MASK GENMASK(23, 16)
+#define SNOR_HWCAPS_PP BIT(16)
-#define SNOR_HWCAPS_PP_QUAD GENMASK(19, 17)
-#define SNOR_HWCAPS_PP_1_1_4 BIT(17)
-#define SNOR_HWCAPS_PP_1_4_4 BIT(18)
-#define SNOR_HWCAPS_PP_4_4_4 BIT(19)
+#define SNOR_HWCAPS_PP_QUAD GENMASK(19, 17)
+#define SNOR_HWCAPS_PP_1_1_4 BIT(17)
+#define SNOR_HWCAPS_PP_1_4_4 BIT(18)
+#define SNOR_HWCAPS_PP_4_4_4 BIT(19)
-#define SNOR_HWCAPS_PP_OCTO GENMASK(22, 20)
-#define SNOR_HWCAPS_PP_1_1_8 BIT(20)
-#define SNOR_HWCAPS_PP_1_8_8 BIT(21)
-#define SNOR_HWCAPS_PP_8_8_8 BIT(22)
+#define SNOR_HWCAPS_PP_OCTAL GENMASK(23, 20)
+#define SNOR_HWCAPS_PP_1_1_8 BIT(20)
+#define SNOR_HWCAPS_PP_1_8_8 BIT(21)
+#define SNOR_HWCAPS_PP_8_8_8 BIT(22)
+#define SNOR_HWCAPS_PP_8_8_8_DTR BIT(23)
+
+#define SNOR_HWCAPS_X_X_X (SNOR_HWCAPS_READ_2_2_2 | \
+ SNOR_HWCAPS_READ_4_4_4 | \
+ SNOR_HWCAPS_READ_8_8_8 | \
+ SNOR_HWCAPS_PP_4_4_4 | \
+ SNOR_HWCAPS_PP_8_8_8)
+
+#define SNOR_HWCAPS_X_X_X_DTR (SNOR_HWCAPS_READ_8_8_8_DTR | \
+ SNOR_HWCAPS_PP_8_8_8_DTR)
+
+#define SNOR_HWCAPS_DTR (SNOR_HWCAPS_READ_1_1_1_DTR | \
+ SNOR_HWCAPS_READ_1_2_2_DTR | \
+ SNOR_HWCAPS_READ_1_4_4_DTR | \
+ SNOR_HWCAPS_READ_1_8_8_DTR | \
+ SNOR_HWCAPS_READ_8_8_8_DTR)
+
+#define SNOR_HWCAPS_ALL (SNOR_HWCAPS_READ_MASK | \
+ SNOR_HWCAPS_PP_MASK)
+
+/* Forward declaration that is used in 'struct spi_nor_controller_ops' */
+struct spi_nor;
+
+/**
+ * struct spi_nor_controller_ops - SPI NOR controller driver specific
+ * operations.
+ * @prepare: [OPTIONAL] do some preparations for the
+ * read/write/erase/lock/unlock operations.
+ * @unprepare: [OPTIONAL] do some post work after the
+ * read/write/erase/lock/unlock operations.
+ * @read_reg: read out the register.
+ * @write_reg: write data to the register.
+ * @read: read data from the SPI NOR.
+ * @write: write data to the SPI NOR.
+ * @erase: erase a sector of the SPI NOR at the offset @offs; if
+ * not provided by the driver, SPI NOR will send the erase
+ * opcode via write_reg().
+ */
+struct spi_nor_controller_ops {
+ int (*prepare)(struct spi_nor *nor);
+ void (*unprepare)(struct spi_nor *nor);
+ int (*read_reg)(struct spi_nor *nor, u8 opcode, u8 *buf, size_t len);
+ int (*write_reg)(struct spi_nor *nor, u8 opcode, const u8 *buf,
+ size_t len);
+
+ ssize_t (*read)(struct spi_nor *nor, loff_t from, size_t len, u8 *buf);
+ ssize_t (*write)(struct spi_nor *nor, loff_t to, size_t len,
+ const u8 *buf);
+ int (*erase)(struct spi_nor *nor, loff_t offs);
+};
+
+/**
+ * enum spi_nor_cmd_ext - describes the command opcode extension in DTR mode
+ * @SPI_NOR_EXT_NONE: no extension. This is the default, and is used in Legacy
+ * SPI mode
+ * @SPI_NOR_EXT_REPEAT: the extension is same as the opcode
+ * @SPI_NOR_EXT_INVERT: the extension is the bitwise inverse of the opcode
+ * @SPI_NOR_EXT_HEX: the extension is any hex value. The command and opcode
+ * combine to form a 16-bit opcode.
+ */
+enum spi_nor_cmd_ext {
+ SPI_NOR_EXT_NONE = 0,
+ SPI_NOR_EXT_REPEAT,
+ SPI_NOR_EXT_INVERT,
+ SPI_NOR_EXT_HEX,
+};
+
+/*
+ * Forward declarations that are used internally by the core and manufacturer
+ * drivers.
+ */
+struct flash_info;
+struct spi_nor_manufacturer;
+struct spi_nor_flash_parameter;
+
+/**
+ * struct spi_nor - Structure for defining the SPI NOR layer
+ * @mtd: an mtd_info structure
+ * @lock: the lock for the read/write/erase/lock/unlock operations
+ * @rww: Read-While-Write (RWW) sync lock
+ * @rww.wait: wait queue for the RWW sync
+ * @rww.ongoing_io: the bus is busy
+ * @rww.ongoing_rd: a read is ongoing on the chip
+ * @rww.ongoing_pe: a program/erase is ongoing on the chip
+ * @rww.used_banks: bitmap of the banks in use
+ * @dev: pointer to an SPI device or an SPI NOR controller device
+ * @spimem: pointer to the SPI memory device
+ * @bouncebuf: bounce buffer used when the buffer passed by the MTD
+ * layer is not DMA-able
+ * @bouncebuf_size: size of the bounce buffer
+ * @id: The flash's ID bytes. Always contains
+ * SPI_NOR_MAX_ID_LEN bytes.
+ * @info: SPI NOR part JEDEC MFR ID and other info
+ * @manufacturer: SPI NOR manufacturer
+ * @addr_nbytes: number of address bytes
+ * @erase_opcode: the opcode for erasing a sector
+ * @read_opcode: the read opcode
+ * @read_dummy: the dummy needed by the read operation
+ * @program_opcode: the program opcode
+ * @sst_write_second: used by the SST write operation
+ * @flags: flag options for the current SPI NOR (SNOR_F_*)
+ * @cmd_ext_type: the command opcode extension type for DTR mode.
+ * @read_proto: the SPI protocol for read operations
+ * @write_proto: the SPI protocol for write operations
+ * @reg_proto: the SPI protocol for read_reg/write_reg/erase operations
+ * @sfdp: the SFDP data of the flash
+ * @debugfs_root: pointer to the debugfs directory
+ * @controller_ops: SPI NOR controller driver specific operations.
+ * @params: [FLASH-SPECIFIC] SPI NOR flash parameters and settings.
+ * The structure includes legacy flash parameters and
+ * settings that can be overwritten by the spi_nor_fixups
+ * hooks, or dynamically when parsing the SFDP tables.
+ * @dirmap: pointers to struct spi_mem_dirmap_desc for reads/writes.
+ * @priv: pointer to the private data
+ */
+struct spi_nor {
+ struct mtd_info mtd;
+ struct mutex lock;
+ struct spi_nor_rww {
+ wait_queue_head_t wait;
+ bool ongoing_io;
+ bool ongoing_rd;
+ bool ongoing_pe;
+ unsigned int used_banks;
+ } rww;
+ struct device *dev;
+ struct spi_mem *spimem;
+ u8 *bouncebuf;
+ size_t bouncebuf_size;
+ u8 *id;
+ const struct flash_info *info;
+ const struct spi_nor_manufacturer *manufacturer;
+ u8 addr_nbytes;
+ u8 erase_opcode;
+ u8 read_opcode;
+ u8 read_dummy;
+ u8 program_opcode;
+ enum spi_nor_protocol read_proto;
+ enum spi_nor_protocol write_proto;
+ enum spi_nor_protocol reg_proto;
+ bool sst_write_second;
+ u32 flags;
+ enum spi_nor_cmd_ext cmd_ext_type;
+ struct sfdp *sfdp;
+ struct dentry *debugfs_root;
+
+ const struct spi_nor_controller_ops *controller_ops;
+
+ struct spi_nor_flash_parameter *params;
+
+ struct {
+ struct spi_mem_dirmap_desc *rdesc;
+ struct spi_mem_dirmap_desc *wdesc;
+ } dirmap;
+
+ void *priv;
+};
+
+static inline void spi_nor_set_flash_node(struct spi_nor *nor,
+ struct device_node *np)
+{
+ mtd_set_of_node(&nor->mtd, np);
+}
+
+static inline struct device_node *spi_nor_get_flash_node(struct spi_nor *nor)
+{
+ return mtd_get_of_node(&nor->mtd);
+}
/**
* spi_nor_scan() - scan the SPI NOR
@@ -367,7 +439,7 @@ struct spi_nor_hwcaps {
* @name: the chip type name
* @hwcaps: the hardware capabilities supported by the controller driver
*
- * The drivers can use this fuction to scan the SPI NOR.
+ * The drivers can use this function to scan the SPI NOR.
* In the scanning, it will try to get all the necessary information to
* fill the mtd_info{} and the spi_nor{}.
*
diff --git a/include/linux/mtd/spinand.h b/include/linux/mtd/spinand.h
new file mode 100644
index 000000000000..3e285c09d16d
--- /dev/null
+++ b/include/linux/mtd/spinand.h
@@ -0,0 +1,521 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2016-2017 Micron Technology, Inc.
+ *
+ * Authors:
+ * Peter Pan <peterpandong@micron.com>
+ */
+#ifndef __LINUX_MTD_SPINAND_H
+#define __LINUX_MTD_SPINAND_H
+
+#include <linux/mutex.h>
+#include <linux/bitops.h>
+#include <linux/device.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/nand.h>
+#include <linux/spi/spi.h>
+#include <linux/spi/spi-mem.h>
+
+/**
+ * Standard SPI NAND flash operations
+ */
+
+#define SPINAND_RESET_OP \
+ SPI_MEM_OP(SPI_MEM_OP_CMD(0xff, 1), \
+ SPI_MEM_OP_NO_ADDR, \
+ SPI_MEM_OP_NO_DUMMY, \
+ SPI_MEM_OP_NO_DATA)
+
+#define SPINAND_WR_EN_DIS_OP(enable) \
+ SPI_MEM_OP(SPI_MEM_OP_CMD((enable) ? 0x06 : 0x04, 1), \
+ SPI_MEM_OP_NO_ADDR, \
+ SPI_MEM_OP_NO_DUMMY, \
+ SPI_MEM_OP_NO_DATA)
+
+#define SPINAND_READID_OP(naddr, ndummy, buf, len) \
+ SPI_MEM_OP(SPI_MEM_OP_CMD(0x9f, 1), \
+ SPI_MEM_OP_ADDR(naddr, 0, 1), \
+ SPI_MEM_OP_DUMMY(ndummy, 1), \
+ SPI_MEM_OP_DATA_IN(len, buf, 1))
+
+#define SPINAND_SET_FEATURE_OP(reg, valptr) \
+ SPI_MEM_OP(SPI_MEM_OP_CMD(0x1f, 1), \
+ SPI_MEM_OP_ADDR(1, reg, 1), \
+ SPI_MEM_OP_NO_DUMMY, \
+ SPI_MEM_OP_DATA_OUT(1, valptr, 1))
+
+#define SPINAND_GET_FEATURE_OP(reg, valptr) \
+ SPI_MEM_OP(SPI_MEM_OP_CMD(0x0f, 1), \
+ SPI_MEM_OP_ADDR(1, reg, 1), \
+ SPI_MEM_OP_NO_DUMMY, \
+ SPI_MEM_OP_DATA_IN(1, valptr, 1))
+
+#define SPINAND_BLK_ERASE_OP(addr) \
+ SPI_MEM_OP(SPI_MEM_OP_CMD(0xd8, 1), \
+ SPI_MEM_OP_ADDR(3, addr, 1), \
+ SPI_MEM_OP_NO_DUMMY, \
+ SPI_MEM_OP_NO_DATA)
+
+#define SPINAND_PAGE_READ_OP(addr) \
+ SPI_MEM_OP(SPI_MEM_OP_CMD(0x13, 1), \
+ SPI_MEM_OP_ADDR(3, addr, 1), \
+ SPI_MEM_OP_NO_DUMMY, \
+ SPI_MEM_OP_NO_DATA)
+
+#define SPINAND_PAGE_READ_FROM_CACHE_OP(fast, addr, ndummy, buf, len) \
+ SPI_MEM_OP(SPI_MEM_OP_CMD(fast ? 0x0b : 0x03, 1), \
+ SPI_MEM_OP_ADDR(2, addr, 1), \
+ SPI_MEM_OP_DUMMY(ndummy, 1), \
+ SPI_MEM_OP_DATA_IN(len, buf, 1))
+
+#define SPINAND_PAGE_READ_FROM_CACHE_OP_3A(fast, addr, ndummy, buf, len) \
+ SPI_MEM_OP(SPI_MEM_OP_CMD(fast ? 0x0b : 0x03, 1), \
+ SPI_MEM_OP_ADDR(3, addr, 1), \
+ SPI_MEM_OP_DUMMY(ndummy, 1), \
+ SPI_MEM_OP_DATA_IN(len, buf, 1))
+
+#define SPINAND_PAGE_READ_FROM_CACHE_X2_OP(addr, ndummy, buf, len) \
+ SPI_MEM_OP(SPI_MEM_OP_CMD(0x3b, 1), \
+ SPI_MEM_OP_ADDR(2, addr, 1), \
+ SPI_MEM_OP_DUMMY(ndummy, 1), \
+ SPI_MEM_OP_DATA_IN(len, buf, 2))
+
+#define SPINAND_PAGE_READ_FROM_CACHE_X2_OP_3A(addr, ndummy, buf, len) \
+ SPI_MEM_OP(SPI_MEM_OP_CMD(0x3b, 1), \
+ SPI_MEM_OP_ADDR(3, addr, 1), \
+ SPI_MEM_OP_DUMMY(ndummy, 1), \
+ SPI_MEM_OP_DATA_IN(len, buf, 2))
+
+#define SPINAND_PAGE_READ_FROM_CACHE_X4_OP(addr, ndummy, buf, len) \
+ SPI_MEM_OP(SPI_MEM_OP_CMD(0x6b, 1), \
+ SPI_MEM_OP_ADDR(2, addr, 1), \
+ SPI_MEM_OP_DUMMY(ndummy, 1), \
+ SPI_MEM_OP_DATA_IN(len, buf, 4))
+
+#define SPINAND_PAGE_READ_FROM_CACHE_X4_OP_3A(addr, ndummy, buf, len) \
+ SPI_MEM_OP(SPI_MEM_OP_CMD(0x6b, 1), \
+ SPI_MEM_OP_ADDR(3, addr, 1), \
+ SPI_MEM_OP_DUMMY(ndummy, 1), \
+ SPI_MEM_OP_DATA_IN(len, buf, 4))
+
+#define SPINAND_PAGE_READ_FROM_CACHE_DUALIO_OP(addr, ndummy, buf, len) \
+ SPI_MEM_OP(SPI_MEM_OP_CMD(0xbb, 1), \
+ SPI_MEM_OP_ADDR(2, addr, 2), \
+ SPI_MEM_OP_DUMMY(ndummy, 2), \
+ SPI_MEM_OP_DATA_IN(len, buf, 2))
+
+#define SPINAND_PAGE_READ_FROM_CACHE_DUALIO_OP_3A(addr, ndummy, buf, len) \
+ SPI_MEM_OP(SPI_MEM_OP_CMD(0xbb, 1), \
+ SPI_MEM_OP_ADDR(3, addr, 2), \
+ SPI_MEM_OP_DUMMY(ndummy, 2), \
+ SPI_MEM_OP_DATA_IN(len, buf, 2))
+
+#define SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(addr, ndummy, buf, len) \
+ SPI_MEM_OP(SPI_MEM_OP_CMD(0xeb, 1), \
+ SPI_MEM_OP_ADDR(2, addr, 4), \
+ SPI_MEM_OP_DUMMY(ndummy, 4), \
+ SPI_MEM_OP_DATA_IN(len, buf, 4))
+
+#define SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP_3A(addr, ndummy, buf, len) \
+ SPI_MEM_OP(SPI_MEM_OP_CMD(0xeb, 1), \
+ SPI_MEM_OP_ADDR(3, addr, 4), \
+ SPI_MEM_OP_DUMMY(ndummy, 4), \
+ SPI_MEM_OP_DATA_IN(len, buf, 4))
+
+#define SPINAND_PROG_EXEC_OP(addr) \
+ SPI_MEM_OP(SPI_MEM_OP_CMD(0x10, 1), \
+ SPI_MEM_OP_ADDR(3, addr, 1), \
+ SPI_MEM_OP_NO_DUMMY, \
+ SPI_MEM_OP_NO_DATA)
+
+#define SPINAND_PROG_LOAD(reset, addr, buf, len) \
+ SPI_MEM_OP(SPI_MEM_OP_CMD(reset ? 0x02 : 0x84, 1), \
+ SPI_MEM_OP_ADDR(2, addr, 1), \
+ SPI_MEM_OP_NO_DUMMY, \
+ SPI_MEM_OP_DATA_OUT(len, buf, 1))
+
+#define SPINAND_PROG_LOAD_X4(reset, addr, buf, len) \
+ SPI_MEM_OP(SPI_MEM_OP_CMD(reset ? 0x32 : 0x34, 1), \
+ SPI_MEM_OP_ADDR(2, addr, 1), \
+ SPI_MEM_OP_NO_DUMMY, \
+ SPI_MEM_OP_DATA_OUT(len, buf, 4))
+
+/**
+ * Standard SPI NAND flash commands
+ */
+#define SPINAND_CMD_PROG_LOAD_X4 0x32
+#define SPINAND_CMD_PROG_LOAD_RDM_DATA_X4 0x34
+
+/* feature register */
+#define REG_BLOCK_LOCK 0xa0
+#define BL_ALL_UNLOCKED 0x00
+
+/* configuration register */
+#define REG_CFG 0xb0
+#define CFG_OTP_ENABLE BIT(6)
+#define CFG_ECC_ENABLE BIT(4)
+#define CFG_QUAD_ENABLE BIT(0)
+
+/* status register */
+#define REG_STATUS 0xc0
+#define STATUS_BUSY BIT(0)
+#define STATUS_ERASE_FAILED BIT(2)
+#define STATUS_PROG_FAILED BIT(3)
+#define STATUS_ECC_MASK GENMASK(5, 4)
+#define STATUS_ECC_NO_BITFLIPS (0 << 4)
+#define STATUS_ECC_HAS_BITFLIPS (1 << 4)
+#define STATUS_ECC_UNCOR_ERROR (2 << 4)
+
+struct spinand_op;
+struct spinand_device;
+
+#define SPINAND_MAX_ID_LEN 4
+/*
+ * For erase, write and read operation, we got the following timings :
+ * tBERS (erase) 1ms to 4ms
+ * tPROG 300us to 400us
+ * tREAD 25us to 100us
+ * In order to minimize latency, the min value is divided by 4 for the
+ * initial delay, and dividing by 20 for the poll delay.
+ * For reset, 5us/10us/500us if the device is respectively
+ * reading/programming/erasing when the RESET occurs. Since we always
+ * issue a RESET when the device is IDLE, 5us is selected for both initial
+ * and poll delay.
+ */
+#define SPINAND_READ_INITIAL_DELAY_US 6
+#define SPINAND_READ_POLL_DELAY_US 5
+#define SPINAND_RESET_INITIAL_DELAY_US 5
+#define SPINAND_RESET_POLL_DELAY_US 5
+#define SPINAND_WRITE_INITIAL_DELAY_US 75
+#define SPINAND_WRITE_POLL_DELAY_US 15
+#define SPINAND_ERASE_INITIAL_DELAY_US 250
+#define SPINAND_ERASE_POLL_DELAY_US 50
+
+#define SPINAND_WAITRDY_TIMEOUT_MS 400
+
+/**
+ * struct spinand_id - SPI NAND id structure
+ * @data: buffer containing the id bytes. Currently 4 bytes large, but can
+ * be extended if required
+ * @len: ID length
+ */
+struct spinand_id {
+ u8 data[SPINAND_MAX_ID_LEN];
+ int len;
+};
+
+enum spinand_readid_method {
+ SPINAND_READID_METHOD_OPCODE,
+ SPINAND_READID_METHOD_OPCODE_ADDR,
+ SPINAND_READID_METHOD_OPCODE_DUMMY,
+};
+
+/**
+ * struct spinand_devid - SPI NAND device id structure
+ * @id: device id of current chip
+ * @len: number of bytes in device id
+ * @method: method to read chip id
+ * There are 3 possible variants:
+ * SPINAND_READID_METHOD_OPCODE: chip id is returned immediately
+ * after read_id opcode.
+ * SPINAND_READID_METHOD_OPCODE_ADDR: chip id is returned after
+ * read_id opcode + 1-byte address.
+ * SPINAND_READID_METHOD_OPCODE_DUMMY: chip id is returned after
+ * read_id opcode + 1 dummy byte.
+ */
+struct spinand_devid {
+ const u8 *id;
+ const u8 len;
+ const enum spinand_readid_method method;
+};
+
+/**
+ * struct manufacurer_ops - SPI NAND manufacturer specific operations
+ * @init: initialize a SPI NAND device
+ * @cleanup: cleanup a SPI NAND device
+ *
+ * Each SPI NAND manufacturer driver should implement this interface so that
+ * NAND chips coming from this vendor can be initialized properly.
+ */
+struct spinand_manufacturer_ops {
+ int (*init)(struct spinand_device *spinand);
+ void (*cleanup)(struct spinand_device *spinand);
+};
+
+/**
+ * struct spinand_manufacturer - SPI NAND manufacturer instance
+ * @id: manufacturer ID
+ * @name: manufacturer name
+ * @devid_len: number of bytes in device ID
+ * @chips: supported SPI NANDs under current manufacturer
+ * @nchips: number of SPI NANDs available in chips array
+ * @ops: manufacturer operations
+ */
+struct spinand_manufacturer {
+ u8 id;
+ char *name;
+ const struct spinand_info *chips;
+ const size_t nchips;
+ const struct spinand_manufacturer_ops *ops;
+};
+
+/* SPI NAND manufacturers */
+extern const struct spinand_manufacturer alliancememory_spinand_manufacturer;
+extern const struct spinand_manufacturer ato_spinand_manufacturer;
+extern const struct spinand_manufacturer esmt_c8_spinand_manufacturer;
+extern const struct spinand_manufacturer gigadevice_spinand_manufacturer;
+extern const struct spinand_manufacturer macronix_spinand_manufacturer;
+extern const struct spinand_manufacturer micron_spinand_manufacturer;
+extern const struct spinand_manufacturer paragon_spinand_manufacturer;
+extern const struct spinand_manufacturer toshiba_spinand_manufacturer;
+extern const struct spinand_manufacturer winbond_spinand_manufacturer;
+extern const struct spinand_manufacturer xtx_spinand_manufacturer;
+
+/**
+ * struct spinand_op_variants - SPI NAND operation variants
+ * @ops: the list of variants for a given operation
+ * @nops: the number of variants
+ *
+ * Some operations like read-from-cache/write-to-cache have several variants
+ * depending on the number of IO lines you use to transfer data or address
+ * cycles. This structure is a way to describe the different variants supported
+ * by a chip and let the core pick the best one based on the SPI mem controller
+ * capabilities.
+ */
+struct spinand_op_variants {
+ const struct spi_mem_op *ops;
+ unsigned int nops;
+};
+
+#define SPINAND_OP_VARIANTS(name, ...) \
+ const struct spinand_op_variants name = { \
+ .ops = (struct spi_mem_op[]) { __VA_ARGS__ }, \
+ .nops = sizeof((struct spi_mem_op[]){ __VA_ARGS__ }) / \
+ sizeof(struct spi_mem_op), \
+ }
+
+/**
+ * spinand_ecc_info - description of the on-die ECC implemented by a SPI NAND
+ * chip
+ * @get_status: get the ECC status. Should return a positive number encoding
+ * the number of corrected bitflips if correction was possible or
+ * -EBADMSG if there are uncorrectable errors. I can also return
+ * other negative error codes if the error is not caused by
+ * uncorrectable bitflips
+ * @ooblayout: the OOB layout used by the on-die ECC implementation
+ */
+struct spinand_ecc_info {
+ int (*get_status)(struct spinand_device *spinand, u8 status);
+ const struct mtd_ooblayout_ops *ooblayout;
+};
+
+#define SPINAND_HAS_QE_BIT BIT(0)
+#define SPINAND_HAS_CR_FEAT_BIT BIT(1)
+
+/**
+ * struct spinand_ondie_ecc_conf - private SPI-NAND on-die ECC engine structure
+ * @status: status of the last wait operation that will be used in case
+ * ->get_status() is not populated by the spinand device.
+ */
+struct spinand_ondie_ecc_conf {
+ u8 status;
+};
+
+/**
+ * struct spinand_info - Structure used to describe SPI NAND chips
+ * @model: model name
+ * @devid: device ID
+ * @flags: OR-ing of the SPINAND_XXX flags
+ * @memorg: memory organization
+ * @eccreq: ECC requirements
+ * @eccinfo: on-die ECC info
+ * @op_variants: operations variants
+ * @op_variants.read_cache: variants of the read-cache operation
+ * @op_variants.write_cache: variants of the write-cache operation
+ * @op_variants.update_cache: variants of the update-cache operation
+ * @select_target: function used to select a target/die. Required only for
+ * multi-die chips
+ *
+ * Each SPI NAND manufacturer driver should have a spinand_info table
+ * describing all the chips supported by the driver.
+ */
+struct spinand_info {
+ const char *model;
+ struct spinand_devid devid;
+ u32 flags;
+ struct nand_memory_organization memorg;
+ struct nand_ecc_props eccreq;
+ struct spinand_ecc_info eccinfo;
+ struct {
+ const struct spinand_op_variants *read_cache;
+ const struct spinand_op_variants *write_cache;
+ const struct spinand_op_variants *update_cache;
+ } op_variants;
+ int (*select_target)(struct spinand_device *spinand,
+ unsigned int target);
+};
+
+#define SPINAND_ID(__method, ...) \
+ { \
+ .id = (const u8[]){ __VA_ARGS__ }, \
+ .len = sizeof((u8[]){ __VA_ARGS__ }), \
+ .method = __method, \
+ }
+
+#define SPINAND_INFO_OP_VARIANTS(__read, __write, __update) \
+ { \
+ .read_cache = __read, \
+ .write_cache = __write, \
+ .update_cache = __update, \
+ }
+
+#define SPINAND_ECCINFO(__ooblayout, __get_status) \
+ .eccinfo = { \
+ .ooblayout = __ooblayout, \
+ .get_status = __get_status, \
+ }
+
+#define SPINAND_SELECT_TARGET(__func) \
+ .select_target = __func,
+
+#define SPINAND_INFO(__model, __id, __memorg, __eccreq, __op_variants, \
+ __flags, ...) \
+ { \
+ .model = __model, \
+ .devid = __id, \
+ .memorg = __memorg, \
+ .eccreq = __eccreq, \
+ .op_variants = __op_variants, \
+ .flags = __flags, \
+ __VA_ARGS__ \
+ }
+
+struct spinand_dirmap {
+ struct spi_mem_dirmap_desc *wdesc;
+ struct spi_mem_dirmap_desc *rdesc;
+ struct spi_mem_dirmap_desc *wdesc_ecc;
+ struct spi_mem_dirmap_desc *rdesc_ecc;
+};
+
+/**
+ * struct spinand_device - SPI NAND device instance
+ * @base: NAND device instance
+ * @spimem: pointer to the SPI mem object
+ * @lock: lock used to serialize accesses to the NAND
+ * @id: NAND ID as returned by READ_ID
+ * @flags: NAND flags
+ * @op_templates: various SPI mem op templates
+ * @op_templates.read_cache: read cache op template
+ * @op_templates.write_cache: write cache op template
+ * @op_templates.update_cache: update cache op template
+ * @select_target: select a specific target/die. Usually called before sending
+ * a command addressing a page or an eraseblock embedded in
+ * this die. Only required if your chip exposes several dies
+ * @cur_target: currently selected target/die
+ * @eccinfo: on-die ECC information
+ * @cfg_cache: config register cache. One entry per die
+ * @databuf: bounce buffer for data
+ * @oobbuf: bounce buffer for OOB data
+ * @scratchbuf: buffer used for everything but page accesses. This is needed
+ * because the spi-mem interface explicitly requests that buffers
+ * passed in spi_mem_op be DMA-able, so we can't based the bufs on
+ * the stack
+ * @manufacturer: SPI NAND manufacturer information
+ * @priv: manufacturer private data
+ */
+struct spinand_device {
+ struct nand_device base;
+ struct spi_mem *spimem;
+ struct mutex lock;
+ struct spinand_id id;
+ u32 flags;
+
+ struct {
+ const struct spi_mem_op *read_cache;
+ const struct spi_mem_op *write_cache;
+ const struct spi_mem_op *update_cache;
+ } op_templates;
+
+ struct spinand_dirmap *dirmaps;
+
+ int (*select_target)(struct spinand_device *spinand,
+ unsigned int target);
+ unsigned int cur_target;
+
+ struct spinand_ecc_info eccinfo;
+
+ u8 *cfg_cache;
+ u8 *databuf;
+ u8 *oobbuf;
+ u8 *scratchbuf;
+ const struct spinand_manufacturer *manufacturer;
+ void *priv;
+};
+
+/**
+ * mtd_to_spinand() - Get the SPI NAND device attached to an MTD instance
+ * @mtd: MTD instance
+ *
+ * Return: the SPI NAND device attached to @mtd.
+ */
+static inline struct spinand_device *mtd_to_spinand(struct mtd_info *mtd)
+{
+ return container_of(mtd_to_nanddev(mtd), struct spinand_device, base);
+}
+
+/**
+ * spinand_to_mtd() - Get the MTD device embedded in a SPI NAND device
+ * @spinand: SPI NAND device
+ *
+ * Return: the MTD device embedded in @spinand.
+ */
+static inline struct mtd_info *spinand_to_mtd(struct spinand_device *spinand)
+{
+ return nanddev_to_mtd(&spinand->base);
+}
+
+/**
+ * nand_to_spinand() - Get the SPI NAND device embedding an NAND object
+ * @nand: NAND object
+ *
+ * Return: the SPI NAND device embedding @nand.
+ */
+static inline struct spinand_device *nand_to_spinand(struct nand_device *nand)
+{
+ return container_of(nand, struct spinand_device, base);
+}
+
+/**
+ * spinand_to_nand() - Get the NAND device embedded in a SPI NAND object
+ * @spinand: SPI NAND device
+ *
+ * Return: the NAND device embedded in @spinand.
+ */
+static inline struct nand_device *
+spinand_to_nand(struct spinand_device *spinand)
+{
+ return &spinand->base;
+}
+
+/**
+ * spinand_set_of_node - Attach a DT node to a SPI NAND device
+ * @spinand: SPI NAND device
+ * @np: DT node
+ *
+ * Attach a DT node to a SPI NAND device.
+ */
+static inline void spinand_set_of_node(struct spinand_device *spinand,
+ struct device_node *np)
+{
+ nanddev_set_of_node(&spinand->base, np);
+}
+
+int spinand_match_and_init(struct spinand_device *spinand,
+ const struct spinand_info *table,
+ unsigned int table_size,
+ enum spinand_readid_method rdid_method);
+
+int spinand_upd_cfg(struct spinand_device *spinand, u8 mask, u8 val);
+int spinand_select_target(struct spinand_device *spinand, unsigned int target);
+
+#endif /* __LINUX_MTD_SPINAND_H */
diff --git a/include/linux/mtd/super.h b/include/linux/mtd/super.h
index f456230f9330..3608a6c36fac 100644
--- a/include/linux/mtd/super.h
+++ b/include/linux/mtd/super.h
@@ -1,12 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/* MTD-based superblock handling
*
* Copyright © 2006 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
*/
#ifndef __MTD_SUPER_H__
@@ -18,9 +14,9 @@
#include <linux/fs.h>
#include <linux/mount.h>
-extern struct dentry *mount_mtd(struct file_system_type *fs_type, int flags,
- const char *dev_name, void *data,
- int (*fill_super)(struct super_block *, void *, int));
+extern int get_tree_mtd(struct fs_context *fc,
+ int (*fill_super)(struct super_block *sb,
+ struct fs_context *fc));
extern void kill_mtd_super(struct super_block *sb);
diff --git a/include/linux/mtd/ubi.h b/include/linux/mtd/ubi.h
index 1e271cb559cd..a529347fd75b 100644
--- a/include/linux/mtd/ubi.h
+++ b/include/linux/mtd/ubi.h
@@ -1,20 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Copyright (c) International Business Machines Corp., 2006
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
- * the GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
* Author: Artem Bityutskiy (Битюцкий Артём)
*/
@@ -123,6 +110,7 @@ struct ubi_volume_info {
int name_len;
const char *name;
dev_t cdev;
+ struct device *dev;
};
/**
diff --git a/include/linux/mtd/xip.h b/include/linux/mtd/xip.h
index abed4dec5c2f..3cac9360588f 100644
--- a/include/linux/mtd/xip.h
+++ b/include/linux/mtd/xip.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* MTD primitives for XIP support
*
@@ -7,10 +8,6 @@
*
* This XIP support for MTD has been loosely inspired
* by an earlier patch authored by David Woodhouse.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef __LINUX_MTD_XIP_H__
@@ -30,7 +27,9 @@
* obviously not be running from flash. The __xipram is therefore marking
* those functions so they get relocated to ram.
*/
-#define __xipram noinline __attribute__ ((__section__ (".data")))
+#ifdef CONFIG_XIP_KERNEL
+#define __xipram noinline __section(".xiptext")
+#endif
/*
* Each architecture has to provide the following macros. They must access
@@ -90,10 +89,10 @@
#define xip_cpu_idle() do { } while (0)
#endif
-#else
+#endif /* CONFIG_MTD_XIP */
+#ifndef __xipram
#define __xipram
-
-#endif /* CONFIG_MTD_XIP */
+#endif
#endif /* __LINUX_MTD_XIP_H__ */
diff --git a/include/linux/mtio.h b/include/linux/mtio.h
new file mode 100644
index 000000000000..67d03156f2c2
--- /dev/null
+++ b/include/linux/mtio.h
@@ -0,0 +1,60 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_MTIO_COMPAT_H
+#define _LINUX_MTIO_COMPAT_H
+
+#include <linux/compat.h>
+#include <uapi/linux/mtio.h>
+#include <linux/uaccess.h>
+
+/*
+ * helper functions for implementing compat ioctls on the four tape
+ * drivers: we define the 32-bit layout of each incompatible structure,
+ * plus a wrapper function to copy it to user space in either format.
+ */
+
+struct mtget32 {
+ s32 mt_type;
+ s32 mt_resid;
+ s32 mt_dsreg;
+ s32 mt_gstat;
+ s32 mt_erreg;
+ s32 mt_fileno;
+ s32 mt_blkno;
+};
+#define MTIOCGET32 _IOR('m', 2, struct mtget32)
+
+struct mtpos32 {
+ s32 mt_blkno;
+};
+#define MTIOCPOS32 _IOR('m', 3, struct mtpos32)
+
+static inline int put_user_mtget(void __user *u, struct mtget *k)
+{
+ struct mtget32 k32 = {
+ .mt_type = k->mt_type,
+ .mt_resid = k->mt_resid,
+ .mt_dsreg = k->mt_dsreg,
+ .mt_gstat = k->mt_gstat,
+ .mt_erreg = k->mt_erreg,
+ .mt_fileno = k->mt_fileno,
+ .mt_blkno = k->mt_blkno,
+ };
+ int ret;
+
+ if (in_compat_syscall())
+ ret = copy_to_user(u, &k32, sizeof(k32));
+ else
+ ret = copy_to_user(u, k, sizeof(*k));
+
+ return ret ? -EFAULT : 0;
+}
+
+static inline int put_user_mtpos(void __user *u, struct mtpos *k)
+{
+ if (in_compat_syscall())
+ return put_user(k->mt_blkno, (u32 __user *)u);
+ else
+ return put_user(k->mt_blkno, (long __user *)u);
+}
+
+#endif
diff --git a/include/linux/mutex.h b/include/linux/mutex.h
index ffcba1f337da..8f226d460f51 100644
--- a/include/linux/mutex.h
+++ b/include/linux/mutex.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Mutexes: blocking mutual exclusion locks
*
@@ -13,14 +14,23 @@
#include <asm/current.h>
#include <linux/list.h>
#include <linux/spinlock_types.h>
-#include <linux/linkage.h>
#include <linux/lockdep.h>
#include <linux/atomic.h>
#include <asm/processor.h>
#include <linux/osq_lock.h>
#include <linux/debug_locks.h>
-struct ww_acquire_ctx;
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+# define __DEP_MAP_MUTEX_INITIALIZER(lockname) \
+ , .dep_map = { \
+ .name = #lockname, \
+ .wait_type_inner = LD_WAIT_SLEEP, \
+ }
+#else
+# define __DEP_MAP_MUTEX_INITIALIZER(lockname)
+#endif
+
+#ifndef CONFIG_PREEMPT_RT
/*
* Simple, straightforward mutexes with strict semantics:
@@ -52,7 +62,7 @@ struct ww_acquire_ctx;
*/
struct mutex {
atomic_long_t owner;
- spinlock_t wait_lock;
+ raw_spinlock_t wait_lock;
#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
struct optimistic_spin_queue osq; /* Spinner MCS lock */
#endif
@@ -65,24 +75,6 @@ struct mutex {
#endif
};
-static inline struct task_struct *__mutex_owner(struct mutex *lock)
-{
- return (struct task_struct *)(atomic_long_read(&lock->owner) & ~0x07);
-}
-
-/*
- * This is the control structure for tasks blocked on mutex,
- * which resides on the blocked task's kernel stack:
- */
-struct mutex_waiter {
- struct list_head list;
- struct task_struct *task;
- struct ww_acquire_ctx *ww_ctx;
-#ifdef CONFIG_DEBUG_MUTEXES
- void *magic;
-#endif
-};
-
#ifdef CONFIG_DEBUG_MUTEXES
#define __DEBUG_MUTEX_INITIALIZER(lockname) \
@@ -113,16 +105,9 @@ do { \
__mutex_init((mutex), #mutex, &__key); \
} while (0)
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
-# define __DEP_MAP_MUTEX_INITIALIZER(lockname) \
- , .dep_map = { .name = #lockname }
-#else
-# define __DEP_MAP_MUTEX_INITIALIZER(lockname)
-#endif
-
#define __MUTEX_INITIALIZER(lockname) \
{ .owner = ATOMIC_LONG_INIT(0) \
- , .wait_lock = __SPIN_LOCK_UNLOCKED(lockname.wait_lock) \
+ , .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(lockname.wait_lock) \
, .wait_list = LIST_HEAD_INIT(lockname.wait_list) \
__DEBUG_MUTEX_INITIALIZER(lockname) \
__DEP_MAP_MUTEX_INITIALIZER(lockname) }
@@ -137,19 +122,57 @@ extern void __mutex_init(struct mutex *lock, const char *name,
* mutex_is_locked - is the mutex locked
* @lock: the mutex to be queried
*
- * Returns 1 if the mutex is locked, 0 if unlocked.
+ * Returns true if the mutex is locked, false if unlocked.
*/
-static inline int mutex_is_locked(struct mutex *lock)
-{
- /*
- * XXX think about spin_is_locked
- */
- return __mutex_owner(lock) != NULL;
+extern bool mutex_is_locked(struct mutex *lock);
+
+#else /* !CONFIG_PREEMPT_RT */
+/*
+ * Preempt-RT variant based on rtmutexes.
+ */
+#include <linux/rtmutex.h>
+
+struct mutex {
+ struct rt_mutex_base rtmutex;
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+ struct lockdep_map dep_map;
+#endif
+};
+
+#define __MUTEX_INITIALIZER(mutexname) \
+{ \
+ .rtmutex = __RT_MUTEX_BASE_INITIALIZER(mutexname.rtmutex) \
+ __DEP_MAP_MUTEX_INITIALIZER(mutexname) \
}
+#define DEFINE_MUTEX(mutexname) \
+ struct mutex mutexname = __MUTEX_INITIALIZER(mutexname)
+
+extern void __mutex_rt_init(struct mutex *lock, const char *name,
+ struct lock_class_key *key);
+extern int mutex_trylock(struct mutex *lock);
+
+static inline void mutex_destroy(struct mutex *lock) { }
+
+#define mutex_is_locked(l) rt_mutex_base_is_locked(&(l)->rtmutex)
+
+#define __mutex_init(mutex, name, key) \
+do { \
+ rt_mutex_base_init(&(mutex)->rtmutex); \
+ __mutex_rt_init((mutex), name, key); \
+} while (0)
+
+#define mutex_init(mutex) \
+do { \
+ static struct lock_class_key __key; \
+ \
+ __mutex_init((mutex), #mutex, &__key); \
+} while (0)
+#endif /* CONFIG_PREEMPT_RT */
+
/*
* See kernel/locking/mutex.c for detailed documentation of these APIs.
- * Also see Documentation/locking/mutex-design.txt.
+ * Also see Documentation/locking/mutex-design.rst.
*/
#ifdef CONFIG_DEBUG_LOCK_ALLOC
extern void mutex_lock_nested(struct mutex *lock, unsigned int subclass);
@@ -182,7 +205,7 @@ extern void mutex_lock_io(struct mutex *lock);
# define mutex_lock_interruptible_nested(lock, subclass) mutex_lock_interruptible(lock)
# define mutex_lock_killable_nested(lock, subclass) mutex_lock_killable(lock)
# define mutex_lock_nest_lock(lock, nest_lock) mutex_lock(lock)
-# define mutex_lock_io_nested(lock, subclass) mutex_lock(lock)
+# define mutex_lock_io_nested(lock, subclass) mutex_lock_io(lock)
#endif
/*
@@ -196,35 +219,4 @@ extern void mutex_unlock(struct mutex *lock);
extern int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock);
-/*
- * These values are chosen such that FAIL and SUCCESS match the
- * values of the regular mutex_trylock().
- */
-enum mutex_trylock_recursive_enum {
- MUTEX_TRYLOCK_FAILED = 0,
- MUTEX_TRYLOCK_SUCCESS = 1,
- MUTEX_TRYLOCK_RECURSIVE,
-};
-
-/**
- * mutex_trylock_recursive - trylock variant that allows recursive locking
- * @lock: mutex to be locked
- *
- * This function should not be used, _ever_. It is purely for hysterical GEM
- * raisins, and once those are gone this will be removed.
- *
- * Returns:
- * - MUTEX_TRYLOCK_FAILED - trylock failed,
- * - MUTEX_TRYLOCK_SUCCESS - lock acquired,
- * - MUTEX_TRYLOCK_RECURSIVE - we already owned the lock.
- */
-static inline /* __deprecated */ __must_check enum mutex_trylock_recursive_enum
-mutex_trylock_recursive(struct mutex *lock)
-{
- if (unlikely(__mutex_owner(lock) == current))
- return MUTEX_TRYLOCK_RECURSIVE;
-
- return mutex_trylock(lock);
-}
-
#endif /* __LINUX_MUTEX_H */
diff --git a/include/linux/mutex_api.h b/include/linux/mutex_api.h
new file mode 100644
index 000000000000..85ab9491e13e
--- /dev/null
+++ b/include/linux/mutex_api.h
@@ -0,0 +1 @@
+#include <linux/mutex.h>
diff --git a/include/linux/mux/consumer.h b/include/linux/mux/consumer.h
index 5577e1b773c4..2e25c838f831 100644
--- a/include/linux/mux/consumer.h
+++ b/include/linux/mux/consumer.h
@@ -1,32 +1,64 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* mux/consumer.h - definitions for the multiplexer consumer interface
*
* Copyright (C) 2017 Axentia Technologies AB
*
* Author: Peter Rosin <peda@axentia.se>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef _LINUX_MUX_CONSUMER_H
#define _LINUX_MUX_CONSUMER_H
+#include <linux/compiler.h>
+
struct device;
struct mux_control;
+struct mux_state;
unsigned int mux_control_states(struct mux_control *mux);
-int __must_check mux_control_select(struct mux_control *mux,
- unsigned int state);
-int __must_check mux_control_try_select(struct mux_control *mux,
- unsigned int state);
+int __must_check mux_control_select_delay(struct mux_control *mux,
+ unsigned int state,
+ unsigned int delay_us);
+int __must_check mux_state_select_delay(struct mux_state *mstate,
+ unsigned int delay_us);
+int __must_check mux_control_try_select_delay(struct mux_control *mux,
+ unsigned int state,
+ unsigned int delay_us);
+int __must_check mux_state_try_select_delay(struct mux_state *mstate,
+ unsigned int delay_us);
+
+static inline int __must_check mux_control_select(struct mux_control *mux,
+ unsigned int state)
+{
+ return mux_control_select_delay(mux, state, 0);
+}
+
+static inline int __must_check mux_state_select(struct mux_state *mstate)
+{
+ return mux_state_select_delay(mstate, 0);
+}
+
+static inline int __must_check mux_control_try_select(struct mux_control *mux,
+ unsigned int state)
+{
+ return mux_control_try_select_delay(mux, state, 0);
+}
+
+static inline int __must_check mux_state_try_select(struct mux_state *mstate)
+{
+ return mux_state_try_select_delay(mstate, 0);
+}
+
int mux_control_deselect(struct mux_control *mux);
+int mux_state_deselect(struct mux_state *mstate);
struct mux_control *mux_control_get(struct device *dev, const char *mux_name);
void mux_control_put(struct mux_control *mux);
struct mux_control *devm_mux_control_get(struct device *dev,
const char *mux_name);
+struct mux_state *devm_mux_state_get(struct device *dev,
+ const char *mux_name);
#endif /* _LINUX_MUX_CONSUMER_H */
diff --git a/include/linux/mux/driver.h b/include/linux/mux/driver.h
index 35c3579c3304..18824064f8c0 100644
--- a/include/linux/mux/driver.h
+++ b/include/linux/mux/driver.h
@@ -1,13 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* mux/driver.h - definitions for the multiplexer driver interface
*
* Copyright (C) 2017 Axentia Technologies AB
*
* Author: Peter Rosin <peda@axentia.se>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef _LINUX_MUX_DRIVER_H
@@ -15,6 +12,7 @@
#include <dt-bindings/mux/mux.h>
#include <linux/device.h>
+#include <linux/ktime.h>
#include <linux/semaphore.h>
struct mux_chip;
@@ -36,6 +34,7 @@ struct mux_control_ops {
* @states: The number of mux controller states.
* @idle_state: The mux controller state to use when inactive, or one
* of MUX_IDLE_AS_IS and MUX_IDLE_DISCONNECT.
+ * @last_change: Timestamp of last change
*
* Mux drivers may only change @states and @idle_state, and may only do so
* between allocation and registration of the mux controller. Specifically,
@@ -50,6 +49,8 @@ struct mux_control {
unsigned int states;
int idle_state;
+
+ ktime_t last_change;
};
/**
diff --git a/include/linux/mv643xx.h b/include/linux/mv643xx.h
index 69327b7b4ce4..000b126acfb6 100644
--- a/include/linux/mv643xx.h
+++ b/include/linux/mv643xx.h
@@ -1,14 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* mv643xx.h - MV-643XX Internal registers definition file.
*
* Copyright 2002 Momentum Computer, Inc.
* Author: Matthew Dharm <mdharm@momenco.com>
* Copyright 2002 GALILEO TECHNOLOGY, LTD.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
*/
#ifndef __ASM_MV643XX_H
#define __ASM_MV643XX_H
@@ -922,58 +918,4 @@
extern void mv64340_irq_init(unsigned int base);
-/* MPSC Platform Device, Driver Data (Shared register regions) */
-#define MPSC_SHARED_NAME "mpsc_shared"
-
-#define MPSC_ROUTING_BASE_ORDER 0
-#define MPSC_SDMA_INTR_BASE_ORDER 1
-
-#define MPSC_ROUTING_REG_BLOCK_SIZE 0x000c
-#define MPSC_SDMA_INTR_REG_BLOCK_SIZE 0x0084
-
-struct mpsc_shared_pdata {
- u32 mrr_val;
- u32 rcrr_val;
- u32 tcrr_val;
- u32 intr_cause_val;
- u32 intr_mask_val;
-};
-
-/* MPSC Platform Device, Driver Data */
-#define MPSC_CTLR_NAME "mpsc"
-
-#define MPSC_BASE_ORDER 0
-#define MPSC_SDMA_BASE_ORDER 1
-#define MPSC_BRG_BASE_ORDER 2
-
-#define MPSC_REG_BLOCK_SIZE 0x0038
-#define MPSC_SDMA_REG_BLOCK_SIZE 0x0c18
-#define MPSC_BRG_REG_BLOCK_SIZE 0x0008
-
-struct mpsc_pdata {
- u8 mirror_regs;
- u8 cache_mgmt;
- u8 max_idle;
- int default_baud;
- int default_bits;
- int default_parity;
- int default_flow;
- u32 chr_1_val;
- u32 chr_2_val;
- u32 chr_10_val;
- u32 mpcr_val;
- u32 bcr_val;
- u8 brg_can_tune;
- u8 brg_clk_src;
- u32 brg_clk_freq;
-};
-
-/* Watchdog Platform Device, Driver Data */
-#define MV64x60_WDT_NAME "mv64x60_wdt"
-
-struct mv64x60_wdt_pdata {
- int timeout; /* watchdog expiry in seconds, default 10 */
- int bus_clk; /* bus clock in MHz, default 133 */
-};
-
#endif /* __ASM_MV643XX_H */
diff --git a/include/linux/mv643xx_eth.h b/include/linux/mv643xx_eth.h
index 61a0da38d0cb..145169be2ed8 100644
--- a/include/linux/mv643xx_eth.h
+++ b/include/linux/mv643xx_eth.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* MV-643XX ethernet platform device data definition file.
*/
@@ -7,6 +8,7 @@
#include <linux/mbus.h>
#include <linux/if_ether.h>
+#include <linux/phy.h>
#define MV643XX_ETH_SHARED_NAME "mv643xx_eth"
#define MV643XX_ETH_NAME "mv643xx_eth_port"
@@ -58,6 +60,7 @@ struct mv643xx_eth_platform_data {
*/
int speed;
int duplex;
+ phy_interface_t interface;
/*
* How many RX/TX queues to use.
diff --git a/include/linux/mv643xx_i2c.h b/include/linux/mv643xx_i2c.h
index 5db5152e9de5..b2844e1cac80 100644
--- a/include/linux/mv643xx_i2c.h
+++ b/include/linux/mv643xx_i2c.h
@@ -1,8 +1,5 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
*/
#ifndef _MV64XXX_I2C_H_
diff --git a/include/linux/mxm-wmi.h b/include/linux/mxm-wmi.h
index 617a2950523c..28b5b4c2a782 100644
--- a/include/linux/mxm-wmi.h
+++ b/include/linux/mxm-wmi.h
@@ -1,21 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* MXM WMI driver
*
* Copyright(C) 2010 Red Hat.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#ifndef MXM_WMI_H
diff --git a/include/linux/n_r3964.h b/include/linux/n_r3964.h
deleted file mode 100644
index 90a803aa42e8..000000000000
--- a/include/linux/n_r3964.h
+++ /dev/null
@@ -1,175 +0,0 @@
-/* r3964 linediscipline for linux
- *
- * -----------------------------------------------------------
- * Copyright by
- * Philips Automation Projects
- * Kassel (Germany)
- * -----------------------------------------------------------
- * This software may be used and distributed according to the terms of
- * the GNU General Public License, incorporated herein by reference.
- *
- * Author:
- * L. Haag
- *
- * $Log: r3964.h,v $
- * Revision 1.4 2005/12/21 19:54:24 Kurt Huwig <kurt huwig de>
- * Fixed HZ usage on 2.6 kernels
- * Removed unnecessary include
- *
- * Revision 1.3 2001/03/18 13:02:24 dwmw2
- * Fix timer usage, use spinlocks properly.
- *
- * Revision 1.2 2001/03/18 12:53:15 dwmw2
- * Merge changes in 2.4.2
- *
- * Revision 1.1.1.1 1998/10/13 16:43:14 dwmw2
- * This'll screw the version control
- *
- * Revision 1.6 1998/09/30 00:40:38 dwmw2
- * Updated to use kernel's N_R3964 if available
- *
- * Revision 1.4 1998/04/02 20:29:44 lhaag
- * select, blocking, ...
- *
- * Revision 1.3 1998/02/12 18:58:43 root
- * fixed some memory leaks
- * calculation of checksum characters
- *
- * Revision 1.2 1998/02/07 13:03:17 root
- * ioctl read_telegram
- *
- * Revision 1.1 1998/02/06 19:19:43 root
- * Initial revision
- *
- *
- */
-#ifndef __LINUX_N_R3964_H__
-#define __LINUX_N_R3964_H__
-
-
-#include <linux/param.h>
-#include <uapi/linux/n_r3964.h>
-
-/*
- * Common ascii handshake characters:
- */
-
-#define STX 0x02
-#define ETX 0x03
-#define DLE 0x10
-#define NAK 0x15
-
-/*
- * Timeouts (from milliseconds to jiffies)
- */
-
-#define R3964_TO_QVZ ((550)*HZ/1000)
-#define R3964_TO_ZVZ ((220)*HZ/1000)
-#define R3964_TO_NO_BUF ((400)*HZ/1000)
-#define R3964_NO_TX_ROOM ((100)*HZ/1000)
-#define R3964_TO_RX_PANIC ((4000)*HZ/1000)
-#define R3964_MAX_RETRIES 5
-
-
-enum { R3964_IDLE,
- R3964_TX_REQUEST, R3964_TRANSMITTING,
- R3964_WAIT_ZVZ_BEFORE_TX_RETRY, R3964_WAIT_FOR_TX_ACK,
- R3964_WAIT_FOR_RX_BUF,
- R3964_RECEIVING, R3964_WAIT_FOR_BCC, R3964_WAIT_FOR_RX_REPEAT
- };
-
-/*
- * All open file-handles are 'clients' and are stored in a linked list:
- */
-
-struct r3964_message;
-
-struct r3964_client_info {
- spinlock_t lock;
- struct pid *pid;
- unsigned int sig_flags;
-
- struct r3964_client_info *next;
-
- struct r3964_message *first_msg;
- struct r3964_message *last_msg;
- struct r3964_block_header *next_block_to_read;
- int msg_count;
-};
-
-
-
-struct r3964_block_header;
-
-/* internal version of client_message: */
-struct r3964_message {
- int msg_id;
- int arg;
- int error_code;
- struct r3964_block_header *block;
- struct r3964_message *next;
-};
-
-/*
- * Header of received block in rx_buf/tx_buf:
- */
-
-struct r3964_block_header
-{
- unsigned int length; /* length in chars without header */
- unsigned char *data; /* usually data is located
- immediately behind this struct */
- unsigned int locks; /* only used in rx_buffer */
-
- struct r3964_block_header *next;
- struct r3964_client_info *owner; /* =NULL in rx_buffer */
-};
-
-/*
- * If rx_buf hasn't enough space to store R3964_MTU chars,
- * we will reject all incoming STX-requests by sending NAK.
- */
-
-#define RX_BUF_SIZE 4000
-#define TX_BUF_SIZE 4000
-#define R3964_MAX_BLOCKS_IN_RX_QUEUE 100
-
-#define R3964_PARITY 0x0001
-#define R3964_FRAME 0x0002
-#define R3964_OVERRUN 0x0004
-#define R3964_UNKNOWN 0x0008
-#define R3964_BREAK 0x0010
-#define R3964_CHECKSUM 0x0020
-#define R3964_ERROR 0x003f
-#define R3964_BCC 0x4000
-#define R3964_DEBUG 0x8000
-
-
-struct r3964_info {
- spinlock_t lock;
- struct tty_struct *tty;
- unsigned char priority;
- unsigned char *rx_buf; /* ring buffer */
- unsigned char *tx_buf;
-
- struct r3964_block_header *rx_first;
- struct r3964_block_header *rx_last;
- struct r3964_block_header *tx_first;
- struct r3964_block_header *tx_last;
- unsigned int tx_position;
- unsigned int rx_position;
- unsigned char last_rx;
- unsigned char bcc;
- unsigned int blocks_in_rx_queue;
-
- struct mutex read_lock; /* serialize r3964_read */
-
- struct r3964_client_info *firstClient;
- unsigned int state;
- unsigned int flags;
-
- struct timer_list tmr;
- int nRetry;
-};
-
-#endif
diff --git a/include/linux/namei.h b/include/linux/namei.h
index 8b4794e83196..1463cbda4888 100644
--- a/include/linux/namei.h
+++ b/include/linux/namei.h
@@ -1,6 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_NAMEI_H
#define _LINUX_NAMEI_H
+#include <linux/fs.h>
#include <linux/kernel.h>
#include <linux/path.h>
#include <linux/fcntl.h>
@@ -13,38 +15,37 @@ enum { MAX_NESTED_LINKS = 8 };
/*
* Type of the last component on LOOKUP_PARENT
*/
-enum {LAST_NORM, LAST_ROOT, LAST_DOT, LAST_DOTDOT, LAST_BIND};
-
-/*
- * The bitmask for a lookup event:
- * - follow links at the end
- * - require a directory
- * - ending slashes ok even for nonexistent files
- * - internal "there are more path components" flag
- * - dentry cache is untrusted; force a real lookup
- * - suppress terminal automount
- */
-#define LOOKUP_FOLLOW 0x0001
-#define LOOKUP_DIRECTORY 0x0002
-#define LOOKUP_AUTOMOUNT 0x0004
-
+enum {LAST_NORM, LAST_ROOT, LAST_DOT, LAST_DOTDOT};
+
+/* pathwalk mode */
+#define LOOKUP_FOLLOW 0x0001 /* follow links at the end */
+#define LOOKUP_DIRECTORY 0x0002 /* require a directory */
+#define LOOKUP_AUTOMOUNT 0x0004 /* force terminal automount */
+#define LOOKUP_EMPTY 0x4000 /* accept empty path [user_... only] */
+#define LOOKUP_DOWN 0x8000 /* follow mounts in the starting point */
+#define LOOKUP_MOUNTPOINT 0x0080 /* follow mounts in the end */
+
+#define LOOKUP_REVAL 0x0020 /* tell ->d_revalidate() to trust no cache */
+#define LOOKUP_RCU 0x0040 /* RCU pathwalk mode; semi-internal */
+
+/* These tell filesystem methods that we are dealing with the final component... */
+#define LOOKUP_OPEN 0x0100 /* ... in open */
+#define LOOKUP_CREATE 0x0200 /* ... in object creation */
+#define LOOKUP_EXCL 0x0400 /* ... in exclusive creation */
+#define LOOKUP_RENAME_TARGET 0x0800 /* ... in destination of rename() */
+
+/* internal use only */
#define LOOKUP_PARENT 0x0010
-#define LOOKUP_REVAL 0x0020
-#define LOOKUP_RCU 0x0040
-#define LOOKUP_NO_REVAL 0x0080
-/*
- * Intent data
- */
-#define LOOKUP_OPEN 0x0100
-#define LOOKUP_CREATE 0x0200
-#define LOOKUP_EXCL 0x0400
-#define LOOKUP_RENAME_TARGET 0x0800
-
-#define LOOKUP_JUMPED 0x1000
-#define LOOKUP_ROOT 0x2000
-#define LOOKUP_EMPTY 0x4000
-#define LOOKUP_DOWN 0x8000
+/* Scoping flags for lookup. */
+#define LOOKUP_NO_SYMLINKS 0x010000 /* No symlink crossing. */
+#define LOOKUP_NO_MAGICLINKS 0x020000 /* No nd_jump_link() crossing. */
+#define LOOKUP_NO_XDEV 0x040000 /* No mountpoint crossing. */
+#define LOOKUP_BENEATH 0x080000 /* No escaping from starting point. */
+#define LOOKUP_IN_ROOT 0x100000 /* Treat dirfd as fs root. */
+#define LOOKUP_CACHED 0x200000 /* Only do cached lookup */
+/* LOOKUP_* flags which do scope-related checks based on the dirfd. */
+#define LOOKUP_IS_SCOPED (LOOKUP_BENEATH | LOOKUP_IN_ROOT)
extern int path_pts(struct path *path);
@@ -56,41 +57,42 @@ static inline int user_path_at(int dfd, const char __user *name, unsigned flags,
return user_path_at_empty(dfd, name, flags, path, NULL);
}
-static inline int user_path(const char __user *name, struct path *path)
-{
- return user_path_at_empty(AT_FDCWD, name, LOOKUP_FOLLOW, path, NULL);
-}
-
-static inline int user_lpath(const char __user *name, struct path *path)
-{
- return user_path_at_empty(AT_FDCWD, name, 0, path, NULL);
-}
-
-static inline int user_path_dir(const char __user *name, struct path *path)
-{
- return user_path_at_empty(AT_FDCWD, name,
- LOOKUP_FOLLOW | LOOKUP_DIRECTORY, path, NULL);
-}
-
+struct dentry *lookup_one_qstr_excl(const struct qstr *name,
+ struct dentry *base,
+ unsigned int flags);
extern int kern_path(const char *, unsigned, struct path *);
extern struct dentry *kern_path_create(int, const char *, struct path *, unsigned int);
extern struct dentry *user_path_create(int, const char __user *, struct path *, unsigned int);
extern void done_path_create(struct path *, struct dentry *);
extern struct dentry *kern_path_locked(const char *, struct path *);
-extern int kern_path_mountpoint(int, const char *, struct path *, unsigned int);
+int vfs_path_parent_lookup(struct filename *filename, unsigned int flags,
+ struct path *parent, struct qstr *last, int *type,
+ const struct path *root);
+int vfs_path_lookup(struct dentry *, struct vfsmount *, const char *,
+ unsigned int, struct path *);
+extern struct dentry *try_lookup_one_len(const char *, struct dentry *, int);
extern struct dentry *lookup_one_len(const char *, struct dentry *, int);
extern struct dentry *lookup_one_len_unlocked(const char *, struct dentry *, int);
+extern struct dentry *lookup_positive_unlocked(const char *, struct dentry *, int);
+struct dentry *lookup_one(struct mnt_idmap *, const char *, struct dentry *, int);
+struct dentry *lookup_one_unlocked(struct mnt_idmap *idmap,
+ const char *name, struct dentry *base,
+ int len);
+struct dentry *lookup_one_positive_unlocked(struct mnt_idmap *idmap,
+ const char *name,
+ struct dentry *base, int len);
extern int follow_down_one(struct path *);
-extern int follow_down(struct path *);
+extern int follow_down(struct path *path, unsigned int flags);
extern int follow_up(struct path *);
extern struct dentry *lock_rename(struct dentry *, struct dentry *);
+extern struct dentry *lock_rename_child(struct dentry *, struct dentry *);
extern void unlock_rename(struct dentry *, struct dentry *);
-extern void nd_jump_link(struct path *path);
+extern int __must_check nd_jump_link(const struct path *path);
static inline void nd_terminate_link(void *name, size_t len, size_t maxlen)
{
diff --git a/include/linux/nd.h b/include/linux/nd.h
index 5dc6b695437d..b9771ba1ef87 100644
--- a/include/linux/nd.h
+++ b/include/linux/nd.h
@@ -1,14 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
*/
#ifndef __LINUX_ND_H__
#define __LINUX_ND_H__
@@ -16,9 +8,11 @@
#include <linux/ndctl.h>
#include <linux/device.h>
#include <linux/badblocks.h>
+#include <linux/perf_event.h>
enum nvdimm_event {
NVDIMM_REVALIDATE_POISON,
+ NVDIMM_REVALIDATE_REGION,
};
enum nvdimm_claim_class {
@@ -30,11 +24,62 @@ enum nvdimm_claim_class {
NVDIMM_CCLASS_UNKNOWN,
};
+#define NVDIMM_EVENT_VAR(_id) event_attr_##_id
+#define NVDIMM_EVENT_PTR(_id) (&event_attr_##_id.attr.attr)
+
+#define NVDIMM_EVENT_ATTR(_name, _id) \
+ PMU_EVENT_ATTR(_name, NVDIMM_EVENT_VAR(_id), _id, \
+ nvdimm_events_sysfs_show)
+
+/* Event attribute array index */
+#define NVDIMM_PMU_FORMAT_ATTR 0
+#define NVDIMM_PMU_EVENT_ATTR 1
+#define NVDIMM_PMU_CPUMASK_ATTR 2
+#define NVDIMM_PMU_NULL_ATTR 3
+
+/**
+ * struct nvdimm_pmu - data structure for nvdimm perf driver
+ * @pmu: pmu data structure for nvdimm performance stats.
+ * @dev: nvdimm device pointer.
+ * @cpu: designated cpu for counter access.
+ * @node: node for cpu hotplug notifier link.
+ * @cpuhp_state: state for cpu hotplug notification.
+ * @arch_cpumask: cpumask to get designated cpu for counter access.
+ */
+struct nvdimm_pmu {
+ struct pmu pmu;
+ struct device *dev;
+ int cpu;
+ struct hlist_node node;
+ enum cpuhp_state cpuhp_state;
+ /* cpumask provided by arch/platform specific code */
+ struct cpumask arch_cpumask;
+};
+
+struct platform_device;
+
+#ifdef CONFIG_PERF_EVENTS
+extern ssize_t nvdimm_events_sysfs_show(struct device *dev,
+ struct device_attribute *attr,
+ char *page);
+
+int register_nvdimm_pmu(struct nvdimm_pmu *nvdimm, struct platform_device *pdev);
+void unregister_nvdimm_pmu(struct nvdimm_pmu *nd_pmu);
+
+#else
+static inline int register_nvdimm_pmu(struct nvdimm_pmu *nvdimm, struct platform_device *pdev)
+{
+ return -ENXIO;
+}
+
+static inline void unregister_nvdimm_pmu(struct nvdimm_pmu *nd_pmu) { }
+#endif
+
struct nd_device_driver {
struct device_driver drv;
unsigned long type;
int (*probe)(struct device *dev);
- int (*remove)(struct device *dev);
+ void (*remove)(struct device *dev);
void (*shutdown)(struct device *dev);
void (*notify)(struct device *dev, enum nvdimm_event event);
};
@@ -95,29 +140,8 @@ struct nd_namespace_pmem {
struct nd_namespace_io nsio;
unsigned long lbasize;
char *alt_name;
- u8 *uuid;
- int id;
-};
-
-/**
- * struct nd_namespace_blk - namespace for dimm-bounded persistent memory
- * @alt_name: namespace name supplied in the dimm label
- * @uuid: namespace name supplied in the dimm label
- * @id: ida allocated id
- * @lbasize: blk namespaces have a native sector size when btt not present
- * @size: sum of all the resource ranges allocated to this namespace
- * @num_resources: number of dpa extents to claim
- * @res: discontiguous dpa extents for given dimm
- */
-struct nd_namespace_blk {
- struct nd_namespace_common common;
- char *alt_name;
- u8 *uuid;
+ uuid_t *uuid;
int id;
- unsigned long lbasize;
- resource_size_t size;
- int num_resources;
- struct resource **res;
};
static inline struct nd_namespace_io *to_nd_namespace_io(const struct device *dev)
@@ -132,11 +156,6 @@ static inline struct nd_namespace_pmem *to_nd_namespace_pmem(const struct device
return container_of(nsio, struct nd_namespace_pmem, nsio);
}
-static inline struct nd_namespace_blk *to_nd_namespace_blk(const struct device *dev)
-{
- return container_of(dev, struct nd_namespace_blk, common.dev);
-}
-
/**
* nvdimm_read_bytes() - synchronously read bytes from an nvdimm namespace
* @ndns: device to read
@@ -155,7 +174,7 @@ static inline int nvdimm_read_bytes(struct nd_namespace_common *ndns,
/**
* nvdimm_write_bytes() - synchronously write bytes to an nvdimm namespace
- * @ndns: device to read
+ * @ndns: device to write
* @offset: namespace-relative starting offset
* @buf: buffer to drain
* @size: transfer length
@@ -180,6 +199,12 @@ struct nd_region;
void nvdimm_region_notify(struct nd_region *nd_region, enum nvdimm_event event);
int __must_check __nd_driver_register(struct nd_device_driver *nd_drv,
struct module *module, const char *mod_name);
+static inline void nd_driver_unregister(struct nd_device_driver *drv)
+{
+ driver_unregister(&drv->drv);
+}
#define nd_driver_register(driver) \
__nd_driver_register(driver, THIS_MODULE, KBUILD_MODNAME)
+#define module_nd_driver(driver) \
+ module_driver(driver, nd_driver_register, nd_driver_unregister)
#endif /* __LINUX_ND_H__ */
diff --git a/include/linux/ndctl.h b/include/linux/ndctl.h
new file mode 100644
index 000000000000..cd5a293ce3ae
--- /dev/null
+++ b/include/linux/ndctl.h
@@ -0,0 +1,22 @@
+/*
+ * Copyright (c) 2014-2016, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU Lesser General Public License,
+ * version 2.1, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT ANY
+ * WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+ * FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+ * more details.
+ */
+#ifndef _LINUX_NDCTL_H
+#define _LINUX_NDCTL_H
+
+#include <uapi/linux/ndctl.h>
+
+enum {
+ ND_MIN_NAMESPACE_SIZE = PAGE_SIZE,
+};
+
+#endif /* _LINUX_NDCTL_H */
diff --git a/include/linux/net.h b/include/linux/net.h
index dda2cc939a53..b73ad8e3c212 100644
--- a/include/linux/net.h
+++ b/include/linux/net.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* NET An implementation of the SOCKET network access protocol.
* This is the master header file for the Linux NET layer,
@@ -9,11 +10,6 @@
* Authors: Orest Zborowski, <obz@Kodak.COM>
* Ross Biro
* Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
*/
#ifndef _LINUX_NET_H
#define _LINUX_NET_H
@@ -22,10 +18,11 @@
#include <linux/random.h>
#include <linux/wait.h>
#include <linux/fcntl.h> /* For O_CLOEXEC and O_NONBLOCK */
-#include <linux/kmemcheck.h>
#include <linux/rcupdate.h>
#include <linux/once.h>
#include <linux/fs.h>
+#include <linux/mm.h>
+#include <linux/sockptr.h>
#include <uapi/linux/net.h>
@@ -37,13 +34,15 @@ struct net;
/* Historically, SOCKWQ_ASYNC_NOSPACE & SOCKWQ_ASYNC_WAITDATA were located
* in sock->flags, but moved into sk->sk_wq->flags to be RCU protected.
- * Eventually all flags will be in sk->sk_wq_flags.
+ * Eventually all flags will be in sk->sk_wq->flags.
*/
#define SOCKWQ_ASYNC_NOSPACE 0
#define SOCKWQ_ASYNC_WAITDATA 1
#define SOCK_NOSPACE 2
#define SOCK_PASSCRED 3
#define SOCK_PASSSEC 4
+#define SOCK_SUPPORT_ZC 5
+#define SOCK_CUSTOM_SOCKOPT 6
#ifndef ARCH_HAS_SOCKET_TYPES
/**
@@ -84,6 +83,12 @@ enum sock_type {
#endif /* ARCH_HAS_SOCKET_TYPES */
+/**
+ * enum sock_shutdown_cmd - Shutdown types
+ * @SHUT_RD: shutdown receptions
+ * @SHUT_WR: shutdown transmissions
+ * @SHUT_RDWR: shutdown receptions/transmissions
+ */
enum sock_shutdown_cmd {
SHUT_RD,
SHUT_WR,
@@ -111,19 +116,36 @@ struct socket_wq {
struct socket {
socket_state state;
- kmemcheck_bitfield_begin(type);
short type;
- kmemcheck_bitfield_end(type);
unsigned long flags;
- struct socket_wq __rcu *wq;
-
struct file *file;
struct sock *sk;
const struct proto_ops *ops;
+
+ struct socket_wq wq;
};
+/*
+ * "descriptor" for what we're up to with a read.
+ * This allows us to use the same read code yet
+ * have multiple different users of the data that
+ * we read from a file.
+ *
+ * The simplest case just copies the data to user
+ * mode.
+ */
+typedef struct {
+ size_t written;
+ size_t count;
+ union {
+ char __user *buf;
+ void *data;
+ } arg;
+ int error;
+} read_descriptor_t;
+
struct vm_area_struct;
struct page;
struct sockaddr;
@@ -132,6 +154,8 @@ struct module;
struct sk_buff;
typedef int (*sk_read_actor_t)(read_descriptor_t *, struct sk_buff *,
unsigned int, size_t);
+typedef int (*skb_read_actor_t)(struct sock *, struct sk_buff *);
+
struct proto_ops {
int family;
@@ -149,8 +173,8 @@ struct proto_ops {
struct socket *newsock, int flags, bool kern);
int (*getname) (struct socket *sock,
struct sockaddr *addr,
- int *sockaddr_len, int peer);
- unsigned int (*poll) (struct file *file, struct socket *sock,
+ int peer);
+ __poll_t (*poll) (struct file *file, struct socket *sock,
struct poll_table_struct *wait);
int (*ioctl) (struct socket *sock, unsigned int cmd,
unsigned long arg);
@@ -158,18 +182,16 @@ struct proto_ops {
int (*compat_ioctl) (struct socket *sock, unsigned int cmd,
unsigned long arg);
#endif
+ int (*gettstamp) (struct socket *sock, void __user *userstamp,
+ bool timeval, bool time32);
int (*listen) (struct socket *sock, int len);
int (*shutdown) (struct socket *sock, int flags);
int (*setsockopt)(struct socket *sock, int level,
- int optname, char __user *optval, unsigned int optlen);
+ int optname, sockptr_t optval,
+ unsigned int optlen);
int (*getsockopt)(struct socket *sock, int level,
int optname, char __user *optval, int __user *optlen);
-#ifdef CONFIG_COMPAT
- int (*compat_setsockopt)(struct socket *sock, int level,
- int optname, char __user *optval, unsigned int optlen);
- int (*compat_getsockopt)(struct socket *sock, int level,
- int optname, char __user *optval, int __user *optlen);
-#endif
+ void (*show_fdinfo)(struct seq_file *m, struct socket *sock);
int (*sendmsg) (struct socket *sock, struct msghdr *m,
size_t total_len);
/* Notes for implementing recvmsg:
@@ -190,8 +212,19 @@ struct proto_ops {
struct pipe_inode_info *pipe, size_t len, unsigned int flags);
int (*set_peek_off)(struct sock *sk, int val);
int (*peek_len)(struct socket *sock);
+
+ /* The following functions are called internally by kernel with
+ * sock lock already held.
+ */
int (*read_sock)(struct sock *sk, read_descriptor_t *desc,
sk_read_actor_t recv_actor);
+ /* This is different from read_sock(), it reads an entire skb at a time. */
+ int (*read_skb)(struct sock *sk, skb_read_actor_t recv_actor);
+ int (*sendpage_locked)(struct sock *sk, struct page *page,
+ int offset, size_t size, int flags);
+ int (*sendmsg_locked)(struct sock *sk, struct msghdr *msg,
+ size_t size);
+ int (*set_rcvlowat)(struct sock *sk, int val);
};
#define DECLARE_SOCKADDR(type, dst, src) \
@@ -217,6 +250,7 @@ enum {
int sock_wake_async(struct socket_wq *sk_wq, int how, int band);
int sock_register(const struct net_proto_family *fam);
void sock_unregister(int family);
+bool sock_is_registered(int family);
int __sock_create(struct net *net, int family, int type, int proto,
struct socket **res, int kern);
int sock_create(int family, int type, int proto, struct socket **res);
@@ -228,7 +262,7 @@ int sock_sendmsg(struct socket *sock, struct msghdr *msg);
int sock_recvmsg(struct socket *sock, struct msghdr *msg, int flags);
struct file *sock_alloc_file(struct socket *sock, int flags, const char *dname);
struct socket *sockfd_lookup(int fd, int *err);
-struct socket *sock_from_file(struct file *file, int *err);
+struct socket *sock_from_file(struct file *file);
#define sockfd_put(sock) fput(sock->file)
int net_ratelimit(void);
@@ -252,11 +286,12 @@ do { \
net_ratelimited_function(pr_warn, fmt, ##__VA_ARGS__)
#define net_info_ratelimited(fmt, ...) \
net_ratelimited_function(pr_info, fmt, ##__VA_ARGS__)
-#if defined(CONFIG_DYNAMIC_DEBUG)
+#if defined(CONFIG_DYNAMIC_DEBUG) || \
+ (defined(CONFIG_DYNAMIC_DEBUG_CORE) && defined(DYNAMIC_DEBUG_MODULE))
#define net_dbg_ratelimited(fmt, ...) \
do { \
DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, fmt); \
- if (unlikely(descriptor.flags & _DPRINTK_FLAGS_PRINT) && \
+ if (DYNAMIC_DEBUG_BRANCH(descriptor) && \
net_ratelimit()) \
__dynamic_pr_debug(&descriptor, pr_fmt(fmt), \
##__VA_ARGS__); \
@@ -274,11 +309,26 @@ do { \
#define net_get_random_once(buf, nbytes) \
get_random_once((buf), (nbytes))
-#define net_get_random_once_wait(buf, nbytes) \
- get_random_once_wait((buf), (nbytes))
+
+/*
+ * E.g. XFS meta- & log-data is in slab pages, or bcache meta
+ * data pages, or other high order pages allocated by
+ * __get_free_pages() without __GFP_COMP, which have a page_count
+ * of 0 and/or have PageSlab() set. We cannot use send_page for
+ * those, as that does get_page(); put_page(); and would cause
+ * either a VM_BUG directly, or __page_cache_release a page that
+ * would actually still be referenced by someone, leading to some
+ * obscure delayed Oops somewhere else.
+ */
+static inline bool sendpage_ok(struct page *page)
+{
+ return !PageSlab(page) && page_count(page) >= 1;
+}
int kernel_sendmsg(struct socket *sock, struct msghdr *msg, struct kvec *vec,
size_t num, size_t len);
+int kernel_sendmsg_locked(struct sock *sk, struct msghdr *msg,
+ struct kvec *vec, size_t num, size_t len);
int kernel_recvmsg(struct socket *sock, struct msghdr *msg, struct kvec *vec,
size_t num, size_t len, int flags);
@@ -287,17 +337,12 @@ int kernel_listen(struct socket *sock, int backlog);
int kernel_accept(struct socket *sock, struct socket **newsock, int flags);
int kernel_connect(struct socket *sock, struct sockaddr *addr, int addrlen,
int flags);
-int kernel_getsockname(struct socket *sock, struct sockaddr *addr,
- int *addrlen);
-int kernel_getpeername(struct socket *sock, struct sockaddr *addr,
- int *addrlen);
-int kernel_getsockopt(struct socket *sock, int level, int optname, char *optval,
- int *optlen);
-int kernel_setsockopt(struct socket *sock, int level, int optname, char *optval,
- unsigned int optlen);
+int kernel_getsockname(struct socket *sock, struct sockaddr *addr);
+int kernel_getpeername(struct socket *sock, struct sockaddr *addr);
int kernel_sendpage(struct socket *sock, struct page *page, int offset,
size_t size, int flags);
-int kernel_sock_ioctl(struct socket *sock, int cmd, unsigned long arg);
+int kernel_sendpage_locked(struct sock *sk, struct page *page, int offset,
+ size_t size, int flags);
int kernel_sock_shutdown(struct socket *sock, enum sock_shutdown_cmd how);
/* Routine returns the IP overhead imposed by a (caller-protected) socket. */
diff --git a/include/linux/net/intel/i40e_client.h b/include/linux/net/intel/i40e_client.h
new file mode 100644
index 000000000000..ed42bd5f639f
--- /dev/null
+++ b/include/linux/net/intel/i40e_client.h
@@ -0,0 +1,191 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2013 - 2018 Intel Corporation. */
+
+#ifndef _I40E_CLIENT_H_
+#define _I40E_CLIENT_H_
+
+#include <linux/auxiliary_bus.h>
+
+#define I40E_CLIENT_STR_LENGTH 10
+
+/* Client interface version should be updated anytime there is a change in the
+ * existing APIs or data structures.
+ */
+#define I40E_CLIENT_VERSION_MAJOR 0
+#define I40E_CLIENT_VERSION_MINOR 01
+#define I40E_CLIENT_VERSION_BUILD 00
+#define I40E_CLIENT_VERSION_STR \
+ __stringify(I40E_CLIENT_VERSION_MAJOR) "." \
+ __stringify(I40E_CLIENT_VERSION_MINOR) "." \
+ __stringify(I40E_CLIENT_VERSION_BUILD)
+
+struct i40e_client_version {
+ u8 major;
+ u8 minor;
+ u8 build;
+ u8 rsvd;
+};
+
+enum i40e_client_instance_state {
+ __I40E_CLIENT_INSTANCE_NONE,
+ __I40E_CLIENT_INSTANCE_OPENED,
+};
+
+struct i40e_ops;
+struct i40e_client;
+
+#define I40E_QUEUE_INVALID_IDX 0xFFFF
+
+struct i40e_qv_info {
+ u32 v_idx; /* msix_vector */
+ u16 ceq_idx;
+ u16 aeq_idx;
+ u8 itr_idx;
+};
+
+struct i40e_qvlist_info {
+ u32 num_vectors;
+ struct i40e_qv_info qv_info[];
+};
+
+
+/* set of LAN parameters useful for clients managed by LAN */
+
+/* Struct to hold per priority info */
+struct i40e_prio_qos_params {
+ u16 qs_handle; /* qs handle for prio */
+ u8 tc; /* TC mapped to prio */
+ u8 reserved;
+};
+
+#define I40E_CLIENT_MAX_USER_PRIORITY 8
+/* Struct to hold Client QoS */
+struct i40e_qos_params {
+ struct i40e_prio_qos_params prio_qos[I40E_CLIENT_MAX_USER_PRIORITY];
+};
+
+struct i40e_params {
+ struct i40e_qos_params qos;
+ u16 mtu;
+};
+
+/* Structure to hold Lan device info for a client device */
+struct i40e_info {
+ struct i40e_client_version version;
+ u8 lanmac[6];
+ struct net_device *netdev;
+ struct pci_dev *pcidev;
+ struct auxiliary_device *aux_dev;
+ u8 __iomem *hw_addr;
+ u8 fid; /* function id, PF id or VF id */
+#define I40E_CLIENT_FTYPE_PF 0
+ u8 ftype; /* function type, PF or VF */
+ void *pf;
+
+ /* All L2 params that could change during the life span of the PF
+ * and needs to be communicated to the client when they change
+ */
+ struct i40e_qvlist_info *qvlist_info;
+ struct i40e_params params;
+ struct i40e_ops *ops;
+
+ u16 msix_count; /* number of msix vectors*/
+ /* Array down below will be dynamically allocated based on msix_count */
+ struct msix_entry *msix_entries;
+ u16 itr_index; /* Which ITR index the PE driver is suppose to use */
+ u16 fw_maj_ver; /* firmware major version */
+ u16 fw_min_ver; /* firmware minor version */
+ u32 fw_build; /* firmware build number */
+};
+
+struct i40e_auxiliary_device {
+ struct auxiliary_device aux_dev;
+ struct i40e_info *ldev;
+};
+
+#define I40E_CLIENT_RESET_LEVEL_PF 1
+#define I40E_CLIENT_RESET_LEVEL_CORE 2
+#define I40E_CLIENT_VSI_FLAG_TCP_ENABLE BIT(1)
+
+struct i40e_ops {
+ /* setup_q_vector_list enables queues with a particular vector */
+ int (*setup_qvlist)(struct i40e_info *ldev, struct i40e_client *client,
+ struct i40e_qvlist_info *qv_info);
+
+ int (*virtchnl_send)(struct i40e_info *ldev, struct i40e_client *client,
+ u32 vf_id, u8 *msg, u16 len);
+
+ /* If the PE Engine is unresponsive, RDMA driver can request a reset.
+ * The level helps determine the level of reset being requested.
+ */
+ void (*request_reset)(struct i40e_info *ldev,
+ struct i40e_client *client, u32 level);
+
+ /* API for the RDMA driver to set certain VSI flags that control
+ * PE Engine.
+ */
+ int (*update_vsi_ctxt)(struct i40e_info *ldev,
+ struct i40e_client *client,
+ bool is_vf, u32 vf_id,
+ u32 flag, u32 valid_flag);
+};
+
+struct i40e_client_ops {
+ /* Should be called from register_client() or whenever PF is ready
+ * to create a specific client instance.
+ */
+ int (*open)(struct i40e_info *ldev, struct i40e_client *client);
+
+ /* Should be called when netdev is unavailable or when unregister
+ * call comes in. If the close is happenening due to a reset being
+ * triggered set the reset bit to true.
+ */
+ void (*close)(struct i40e_info *ldev, struct i40e_client *client,
+ bool reset);
+
+ /* called when some l2 managed parameters changes - mtu */
+ void (*l2_param_change)(struct i40e_info *ldev,
+ struct i40e_client *client,
+ struct i40e_params *params);
+
+ int (*virtchnl_receive)(struct i40e_info *ldev,
+ struct i40e_client *client, u32 vf_id,
+ u8 *msg, u16 len);
+
+ /* called when a VF is reset by the PF */
+ void (*vf_reset)(struct i40e_info *ldev,
+ struct i40e_client *client, u32 vf_id);
+
+ /* called when the number of VFs changes */
+ void (*vf_enable)(struct i40e_info *ldev,
+ struct i40e_client *client, u32 num_vfs);
+
+ /* returns true if VF is capable of specified offload */
+ int (*vf_capable)(struct i40e_info *ldev,
+ struct i40e_client *client, u32 vf_id);
+};
+
+/* Client device */
+struct i40e_client_instance {
+ struct list_head list;
+ struct i40e_info lan_info;
+ struct i40e_client *client;
+ unsigned long state;
+};
+
+struct i40e_client {
+ struct list_head list; /* list of registered clients */
+ char name[I40E_CLIENT_STR_LENGTH];
+ struct i40e_client_version version;
+ unsigned long state; /* client state */
+ atomic_t ref_cnt; /* Count of all the client devices of this kind */
+ u32 flags;
+ u8 type;
+#define I40E_CLIENT_IWARP 0
+ const struct i40e_client_ops *ops; /* client ops provided by the client */
+};
+
+void i40e_client_device_register(struct i40e_info *ldev, struct i40e_client *client);
+void i40e_client_device_unregister(struct i40e_info *ldev);
+
+#endif /* _I40E_CLIENT_H_ */
diff --git a/include/linux/net/intel/iidc.h b/include/linux/net/intel/iidc.h
new file mode 100644
index 000000000000..1c1332e4df26
--- /dev/null
+++ b/include/linux/net/intel/iidc.h
@@ -0,0 +1,107 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2021, Intel Corporation. */
+
+#ifndef _IIDC_H_
+#define _IIDC_H_
+
+#include <linux/auxiliary_bus.h>
+#include <linux/dcbnl.h>
+#include <linux/device.h>
+#include <linux/if_ether.h>
+#include <linux/kernel.h>
+#include <linux/netdevice.h>
+
+enum iidc_event_type {
+ IIDC_EVENT_BEFORE_MTU_CHANGE,
+ IIDC_EVENT_AFTER_MTU_CHANGE,
+ IIDC_EVENT_BEFORE_TC_CHANGE,
+ IIDC_EVENT_AFTER_TC_CHANGE,
+ IIDC_EVENT_CRIT_ERR,
+ IIDC_EVENT_NBITS /* must be last */
+};
+
+enum iidc_reset_type {
+ IIDC_PFR,
+ IIDC_CORER,
+ IIDC_GLOBR,
+};
+
+enum iidc_rdma_protocol {
+ IIDC_RDMA_PROTOCOL_IWARP = BIT(0),
+ IIDC_RDMA_PROTOCOL_ROCEV2 = BIT(1),
+};
+
+#define IIDC_MAX_USER_PRIORITY 8
+#define IIDC_MAX_DSCP_MAPPING 64
+#define IIDC_DSCP_PFC_MODE 0x1
+
+/* Struct to hold per RDMA Qset info */
+struct iidc_rdma_qset_params {
+ /* Qset TEID returned to the RDMA driver in
+ * ice_add_rdma_qset and used by RDMA driver
+ * for calls to ice_del_rdma_qset
+ */
+ u32 teid; /* Qset TEID */
+ u16 qs_handle; /* RDMA driver provides this */
+ u16 vport_id; /* VSI index */
+ u8 tc; /* TC branch the Qset should belong to */
+};
+
+struct iidc_qos_info {
+ u64 tc_ctx;
+ u8 rel_bw;
+ u8 prio_type;
+ u8 egress_virt_up;
+ u8 ingress_virt_up;
+};
+
+/* Struct to pass QoS info */
+struct iidc_qos_params {
+ struct iidc_qos_info tc_info[IEEE_8021QAZ_MAX_TCS];
+ u8 up2tc[IIDC_MAX_USER_PRIORITY];
+ u8 vport_relative_bw;
+ u8 vport_priority_type;
+ u8 num_tc;
+ u8 pfc_mode;
+ u8 dscp_map[IIDC_MAX_DSCP_MAPPING];
+};
+
+struct iidc_event {
+ DECLARE_BITMAP(type, IIDC_EVENT_NBITS);
+ u32 reg;
+};
+
+struct ice_pf;
+
+int ice_add_rdma_qset(struct ice_pf *pf, struct iidc_rdma_qset_params *qset);
+int ice_del_rdma_qset(struct ice_pf *pf, struct iidc_rdma_qset_params *qset);
+int ice_rdma_request_reset(struct ice_pf *pf, enum iidc_reset_type reset_type);
+int ice_rdma_update_vsi_filter(struct ice_pf *pf, u16 vsi_id, bool enable);
+void ice_get_qos_params(struct ice_pf *pf, struct iidc_qos_params *qos);
+
+/* Structure representing auxiliary driver tailored information about the core
+ * PCI dev, each auxiliary driver using the IIDC interface will have an
+ * instance of this struct dedicated to it.
+ */
+
+struct iidc_auxiliary_dev {
+ struct auxiliary_device adev;
+ struct ice_pf *pf;
+};
+
+/* structure representing the auxiliary driver. This struct is to be
+ * allocated and populated by the auxiliary driver's owner. The core PCI
+ * driver will access these ops by performing a container_of on the
+ * auxiliary_device->dev.driver.
+ */
+struct iidc_auxiliary_drv {
+ struct auxiliary_driver adrv;
+ /* This event_handler is meant to be a blocking call. For instance,
+ * when a BEFORE_MTU_CHANGE event comes in, the event_handler will not
+ * return until the auxiliary driver is ready for the MTU change to
+ * happen.
+ */
+ void (*event_handler)(struct ice_pf *pf, struct iidc_event *event);
+};
+
+#endif /* _IIDC_H_*/
diff --git a/include/linux/net_tstamp.h b/include/linux/net_tstamp.h
new file mode 100644
index 000000000000..fd67f3cc0c4b
--- /dev/null
+++ b/include/linux/net_tstamp.h
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef _LINUX_NET_TIMESTAMPING_H_
+#define _LINUX_NET_TIMESTAMPING_H_
+
+#include <uapi/linux/net_tstamp.h>
+
+/**
+ * struct kernel_hwtstamp_config - Kernel copy of struct hwtstamp_config
+ *
+ * @flags: see struct hwtstamp_config
+ * @tx_type: see struct hwtstamp_config
+ * @rx_filter: see struct hwtstamp_config
+ *
+ * Prefer using this structure for in-kernel processing of hardware
+ * timestamping configuration, over the inextensible struct hwtstamp_config
+ * exposed to the %SIOCGHWTSTAMP and %SIOCSHWTSTAMP ioctl UAPI.
+ */
+struct kernel_hwtstamp_config {
+ int flags;
+ int tx_type;
+ int rx_filter;
+};
+
+static inline void hwtstamp_config_to_kernel(struct kernel_hwtstamp_config *kernel_cfg,
+ const struct hwtstamp_config *cfg)
+{
+ kernel_cfg->flags = cfg->flags;
+ kernel_cfg->tx_type = cfg->tx_type;
+ kernel_cfg->rx_filter = cfg->rx_filter;
+}
+
+#endif /* _LINUX_NET_TIMESTAMPING_H_ */
diff --git a/include/linux/netdev_features.h b/include/linux/netdev_features.h
index 1d4737cffc71..7c2d77d75a88 100644
--- a/include/linux/netdev_features.h
+++ b/include/linux/netdev_features.h
@@ -1,16 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Network device features.
- *
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
*/
#ifndef _LINUX_NETDEV_FEATURES_H
#define _LINUX_NETDEV_FEATURES_H
#include <linux/types.h>
+#include <linux/bitops.h>
+#include <asm/byteorder.h>
typedef u64 netdev_features_t;
@@ -36,7 +33,6 @@ enum {
/**/NETIF_F_GSO_SHIFT, /* keep the order of SKB_GSO_* bits */
NETIF_F_TSO_BIT /* ... TCPv4 segmentation */
= NETIF_F_GSO_SHIFT,
- NETIF_F_UFO_BIT, /* ... UDPv4 fragmentation */
NETIF_F_GSO_ROBUST_BIT, /* ... ->SKB_GSO_DODGY */
NETIF_F_TSO_ECN_BIT, /* ... TCP ECN support */
NETIF_F_TSO_MANGLEID_BIT, /* ... IPV4 ID mangling allowed */
@@ -55,8 +51,11 @@ enum {
NETIF_F_GSO_TUNNEL_REMCSUM_BIT, /* ... TUNNEL with TSO & REMCSUM */
NETIF_F_GSO_SCTP_BIT, /* ... SCTP fragmentation */
NETIF_F_GSO_ESP_BIT, /* ... ESP with TSO */
+ NETIF_F_GSO_UDP_BIT, /* ... UFO, deprecated except tuntap */
+ NETIF_F_GSO_UDP_L4_BIT, /* ... UDP payload GSO (not UFO) */
+ NETIF_F_GSO_FRAGLIST_BIT, /* ... Fraglist GSO */
/**/NETIF_F_GSO_LAST = /* last bit, see GSO_MASK */
- NETIF_F_GSO_ESP_BIT,
+ NETIF_F_GSO_FRAGLIST_BIT,
NETIF_F_FCOE_CRC_BIT, /* FCoE CRC32 */
NETIF_F_SCTP_CRC_BIT, /* SCTP checksum offload */
@@ -76,12 +75,27 @@ enum {
NETIF_F_HW_TC_BIT, /* Offload TC infrastructure */
NETIF_F_HW_ESP_BIT, /* Hardware ESP transformation offload */
NETIF_F_HW_ESP_TX_CSUM_BIT, /* ESP with TX checksum offload */
+ NETIF_F_RX_UDP_TUNNEL_PORT_BIT, /* Offload of RX port for UDP tunnels */
+ NETIF_F_HW_TLS_TX_BIT, /* Hardware TLS TX offload */
+ NETIF_F_HW_TLS_RX_BIT, /* Hardware TLS RX offload */
+
+ NETIF_F_GRO_HW_BIT, /* Hardware Generic receive offload */
+ NETIF_F_HW_TLS_RECORD_BIT, /* Offload TLS record */
+ NETIF_F_GRO_FRAGLIST_BIT, /* Fraglist GRO */
+
+ NETIF_F_HW_MACSEC_BIT, /* Offload MACsec operations */
+ NETIF_F_GRO_UDP_FWD_BIT, /* Allow UDP GRO for forwarding */
+
+ NETIF_F_HW_HSR_TAG_INS_BIT, /* Offload HSR tag insertion */
+ NETIF_F_HW_HSR_TAG_RM_BIT, /* Offload HSR tag removal */
+ NETIF_F_HW_HSR_FWD_BIT, /* Offload HSR forwarding */
+ NETIF_F_HW_HSR_DUP_BIT, /* Offload HSR duplication */
/*
* Add your fresh new feature above and remember to update
- * netdev_features_strings[] in net/core/ethtool.c and maybe
+ * netdev_features_strings[] in net/ethtool/common.c and maybe
* some feature mask #defines below. Please also describe it
- * in Documentation/networking/netdev-features.txt.
+ * in Documentation/networking/netdev-features.rst.
*/
/**/NETDEV_FEATURE_COUNT
@@ -96,6 +110,7 @@ enum {
#define NETIF_F_FRAGLIST __NETIF_F(FRAGLIST)
#define NETIF_F_FSO __NETIF_F(FSO)
#define NETIF_F_GRO __NETIF_F(GRO)
+#define NETIF_F_GRO_HW __NETIF_F(GRO_HW)
#define NETIF_F_GSO __NETIF_F(GSO)
#define NETIF_F_GSO_ROBUST __NETIF_F(GSO_ROBUST)
#define NETIF_F_HIGHDMA __NETIF_F(HIGHDMA)
@@ -118,7 +133,6 @@ enum {
#define NETIF_F_TSO6 __NETIF_F(TSO6)
#define NETIF_F_TSO_ECN __NETIF_F(TSO_ECN)
#define NETIF_F_TSO __NETIF_F(TSO)
-#define NETIF_F_UFO __NETIF_F(UFO)
#define NETIF_F_VLAN_CHALLENGED __NETIF_F(VLAN_CHALLENGED)
#define NETIF_F_RXFCS __NETIF_F(RXFCS)
#define NETIF_F_RXALL __NETIF_F(RXALL)
@@ -133,6 +147,7 @@ enum {
#define NETIF_F_GSO_TUNNEL_REMCSUM __NETIF_F(GSO_TUNNEL_REMCSUM)
#define NETIF_F_GSO_SCTP __NETIF_F(GSO_SCTP)
#define NETIF_F_GSO_ESP __NETIF_F(GSO_ESP)
+#define NETIF_F_GSO_UDP __NETIF_F(GSO_UDP)
#define NETIF_F_HW_VLAN_STAG_FILTER __NETIF_F(HW_VLAN_STAG_FILTER)
#define NETIF_F_HW_VLAN_STAG_RX __NETIF_F(HW_VLAN_STAG_RX)
#define NETIF_F_HW_VLAN_STAG_TX __NETIF_F(HW_VLAN_STAG_TX)
@@ -140,9 +155,40 @@ enum {
#define NETIF_F_HW_TC __NETIF_F(HW_TC)
#define NETIF_F_HW_ESP __NETIF_F(HW_ESP)
#define NETIF_F_HW_ESP_TX_CSUM __NETIF_F(HW_ESP_TX_CSUM)
+#define NETIF_F_RX_UDP_TUNNEL_PORT __NETIF_F(RX_UDP_TUNNEL_PORT)
+#define NETIF_F_HW_TLS_RECORD __NETIF_F(HW_TLS_RECORD)
+#define NETIF_F_GSO_UDP_L4 __NETIF_F(GSO_UDP_L4)
+#define NETIF_F_HW_TLS_TX __NETIF_F(HW_TLS_TX)
+#define NETIF_F_HW_TLS_RX __NETIF_F(HW_TLS_RX)
+#define NETIF_F_GRO_FRAGLIST __NETIF_F(GRO_FRAGLIST)
+#define NETIF_F_GSO_FRAGLIST __NETIF_F(GSO_FRAGLIST)
+#define NETIF_F_HW_MACSEC __NETIF_F(HW_MACSEC)
+#define NETIF_F_GRO_UDP_FWD __NETIF_F(GRO_UDP_FWD)
+#define NETIF_F_HW_HSR_TAG_INS __NETIF_F(HW_HSR_TAG_INS)
+#define NETIF_F_HW_HSR_TAG_RM __NETIF_F(HW_HSR_TAG_RM)
+#define NETIF_F_HW_HSR_FWD __NETIF_F(HW_HSR_FWD)
+#define NETIF_F_HW_HSR_DUP __NETIF_F(HW_HSR_DUP)
-#define for_each_netdev_feature(mask_addr, bit) \
- for_each_set_bit(bit, (unsigned long *)mask_addr, NETDEV_FEATURE_COUNT)
+/* Finds the next feature with the highest number of the range of start-1 till 0.
+ */
+static inline int find_next_netdev_feature(u64 feature, unsigned long start)
+{
+ /* like BITMAP_LAST_WORD_MASK() for u64
+ * this sets the most significant 64 - start to 0.
+ */
+ feature &= ~0ULL >> (-start & ((sizeof(feature) * 8) - 1));
+
+ return fls64(feature) - 1;
+}
+
+/* This goes for the MSB to the LSB through the set feature bits,
+ * mask_addr should be a u64 and bit an int
+ */
+#define for_each_netdev_feature(mask_addr, bit) \
+ for ((bit) = find_next_netdev_feature((mask_addr), \
+ NETDEV_FEATURE_COUNT); \
+ (bit) >= 0; \
+ (bit) = find_next_netdev_feature((mask_addr), (bit)))
/* Features valid for ethtool to change */
/* = all defined minus driver/device-class-related */
@@ -158,7 +204,7 @@ enum {
#define NETIF_F_GSO_MASK (__NETIF_F_BIT(NETIF_F_GSO_LAST + 1) - \
__NETIF_F_BIT(NETIF_F_GSO_SHIFT))
-/* List of IP checksum features. Note that NETIF_F_ HW_CSUM should not be
+/* List of IP checksum features. Note that NETIF_F_HW_CSUM should not be
* set in features when NETIF_F_IP_CSUM or NETIF_F_IPV6_CSUM are set--
* this would be contradictory
*/
@@ -172,8 +218,8 @@ enum {
NETIF_F_FSO)
/* List of features with software fallbacks. */
-#define NETIF_F_GSO_SOFTWARE (NETIF_F_ALL_TSO | NETIF_F_UFO | \
- NETIF_F_GSO_SCTP)
+#define NETIF_F_GSO_SOFTWARE (NETIF_F_ALL_TSO | NETIF_F_GSO_SCTP | \
+ NETIF_F_GSO_UDP_L4 | NETIF_F_GSO_FRAGLIST)
/*
* If one device supports one of these features, then enable them
@@ -198,6 +244,9 @@ enum {
/* changeable features with no special hardware requirements */
#define NETIF_F_SOFT_FEATURES (NETIF_F_GSO | NETIF_F_GRO)
+/* Changeable features with no special hardware requirements that defaults to off. */
+#define NETIF_F_SOFT_FEATURES_OFF (NETIF_F_GRO_FRAGLIST | NETIF_F_GRO_UDP_FWD)
+
#define NETIF_F_VLAN_FEATURES (NETIF_F_HW_VLAN_CTAG_FILTER | \
NETIF_F_HW_VLAN_CTAG_RX | \
NETIF_F_HW_VLAN_CTAG_TX | \
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 779b23595596..08fbd4622ccf 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* INET An implementation of the TCP/IP protocol suite for the LINUX
* operating system. INET is implemented using the BSD Socket
@@ -15,11 +16,6 @@
* Bjorn Ekwall. <bj0rn@blox.se>
* Pekka Riikonen <priikone@poseidon.pspt.fi>
*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- *
* Moved to /usr/include/linux for NET3
*/
#ifndef _LINUX_NETDEVICE_H
@@ -32,32 +28,43 @@
#include <linux/prefetch.h>
#include <asm/cache.h>
#include <asm/byteorder.h>
+#include <asm/local.h>
#include <linux/percpu.h>
#include <linux/rculist.h>
-#include <linux/dmaengine.h>
#include <linux/workqueue.h>
#include <linux/dynamic_queue_limits.h>
-#include <linux/ethtool.h>
#include <net/net_namespace.h>
#ifdef CONFIG_DCB
#include <net/dcbnl.h>
#endif
#include <net/netprio_cgroup.h>
+#include <net/xdp.h>
#include <linux/netdev_features.h>
#include <linux/neighbour.h>
#include <uapi/linux/netdevice.h>
#include <uapi/linux/if_bonding.h>
#include <uapi/linux/pkt_cls.h>
+#include <uapi/linux/netdev.h>
#include <linux/hashtable.h>
+#include <linux/rbtree.h>
+#include <net/net_trackers.h>
+#include <net/net_debug.h>
+#include <net/dropreason-core.h>
struct netpoll_info;
struct device;
+struct ethtool_ops;
struct phy_device;
-struct dsa_switch_tree;
-
+struct dsa_port;
+struct ip_tunnel_parm;
+struct macsec_context;
+struct macsec_ops;
+struct netdev_name_node;
+struct sd_flow_limit;
+struct sfp_bus;
/* 802.11 specific */
struct wireless_dev;
/* 802.15.4 specific */
@@ -65,15 +72,23 @@ struct wpan_dev;
struct mpls_dev;
/* UDP Tunnel offloads */
struct udp_tunnel_info;
+struct udp_tunnel_nic_info;
+struct udp_tunnel_nic;
struct bpf_prog;
+struct xdp_buff;
+struct xdp_md;
+void synchronize_net(void);
void netdev_set_default_ethtool_ops(struct net_device *dev,
const struct ethtool_ops *ops);
+void netdev_sw_irq_coalesce_default_on(struct net_device *dev);
/* Backlog congestion levels */
#define NET_RX_SUCCESS 0 /* keep 'em coming, baby */
#define NET_RX_DROP 1 /* packet dropped */
+#define MAX_NEST_DEV 8
+
/*
* Transmit return codes: transmit return codes originate from three different
* namespaces:
@@ -160,40 +175,56 @@ static inline bool dev_xmit_complete(int rc)
* (unsigned long) so they can be read and written atomically.
*/
+#define NET_DEV_STAT(FIELD) \
+ union { \
+ unsigned long FIELD; \
+ atomic_long_t __##FIELD; \
+ }
+
struct net_device_stats {
- unsigned long rx_packets;
- unsigned long tx_packets;
- unsigned long rx_bytes;
- unsigned long tx_bytes;
- unsigned long rx_errors;
- unsigned long tx_errors;
- unsigned long rx_dropped;
- unsigned long tx_dropped;
- unsigned long multicast;
- unsigned long collisions;
- unsigned long rx_length_errors;
- unsigned long rx_over_errors;
- unsigned long rx_crc_errors;
- unsigned long rx_frame_errors;
- unsigned long rx_fifo_errors;
- unsigned long rx_missed_errors;
- unsigned long tx_aborted_errors;
- unsigned long tx_carrier_errors;
- unsigned long tx_fifo_errors;
- unsigned long tx_heartbeat_errors;
- unsigned long tx_window_errors;
- unsigned long rx_compressed;
- unsigned long tx_compressed;
+ NET_DEV_STAT(rx_packets);
+ NET_DEV_STAT(tx_packets);
+ NET_DEV_STAT(rx_bytes);
+ NET_DEV_STAT(tx_bytes);
+ NET_DEV_STAT(rx_errors);
+ NET_DEV_STAT(tx_errors);
+ NET_DEV_STAT(rx_dropped);
+ NET_DEV_STAT(tx_dropped);
+ NET_DEV_STAT(multicast);
+ NET_DEV_STAT(collisions);
+ NET_DEV_STAT(rx_length_errors);
+ NET_DEV_STAT(rx_over_errors);
+ NET_DEV_STAT(rx_crc_errors);
+ NET_DEV_STAT(rx_frame_errors);
+ NET_DEV_STAT(rx_fifo_errors);
+ NET_DEV_STAT(rx_missed_errors);
+ NET_DEV_STAT(tx_aborted_errors);
+ NET_DEV_STAT(tx_carrier_errors);
+ NET_DEV_STAT(tx_fifo_errors);
+ NET_DEV_STAT(tx_heartbeat_errors);
+ NET_DEV_STAT(tx_window_errors);
+ NET_DEV_STAT(rx_compressed);
+ NET_DEV_STAT(tx_compressed);
};
+#undef NET_DEV_STAT
+/* per-cpu stats, allocated on demand.
+ * Try to fit them in a single cache line, for dev_get_stats() sake.
+ */
+struct net_device_core_stats {
+ unsigned long rx_dropped;
+ unsigned long tx_dropped;
+ unsigned long rx_nohandler;
+ unsigned long rx_otherhost_dropped;
+} __aligned(4 * sizeof(unsigned long));
#include <linux/cache.h>
#include <linux/skbuff.h>
#ifdef CONFIG_RPS
#include <linux/static_key.h>
-extern struct static_key rps_needed;
-extern struct static_key rfs_needed;
+extern struct static_key_false rps_needed;
+extern struct static_key_false rfs_needed;
#endif
struct neighbour;
@@ -202,13 +233,13 @@ struct sk_buff;
struct netdev_hw_addr {
struct list_head list;
+ struct rb_node node;
unsigned char addr[MAX_ADDR_LEN];
unsigned char type;
#define NETDEV_HW_ADDR_T_LAN 1
#define NETDEV_HW_ADDR_T_SAN 2
-#define NETDEV_HW_ADDR_T_SLAVE 3
-#define NETDEV_HW_ADDR_T_UNICAST 4
-#define NETDEV_HW_ADDR_T_MULTICAST 5
+#define NETDEV_HW_ADDR_T_UNICAST 3
+#define NETDEV_HW_ADDR_T_MULTICAST 4
bool global_use;
int sync_cnt;
int refcount;
@@ -219,6 +250,9 @@ struct netdev_hw_addr {
struct netdev_hw_addr_list {
struct list_head list;
int count;
+
+ /* Auxiliary tree for faster lookup on addition and deletion */
+ struct rb_root tree;
};
#define netdev_hw_addr_list_count(l) ((l)->count)
@@ -230,11 +264,17 @@ struct netdev_hw_addr_list {
#define netdev_uc_empty(dev) netdev_hw_addr_list_empty(&(dev)->uc)
#define netdev_for_each_uc_addr(ha, dev) \
netdev_hw_addr_list_for_each(ha, &(dev)->uc)
+#define netdev_for_each_synced_uc_addr(_ha, _dev) \
+ netdev_for_each_uc_addr((_ha), (_dev)) \
+ if ((_ha)->sync_cnt)
#define netdev_mc_count(dev) netdev_hw_addr_list_count(&(dev)->mc)
#define netdev_mc_empty(dev) netdev_hw_addr_list_empty(&(dev)->mc)
#define netdev_for_each_mc_addr(ha, dev) \
netdev_hw_addr_list_for_each(ha, &(dev)->mc)
+#define netdev_for_each_synced_mc_addr(_ha, _dev) \
+ netdev_for_each_mc_addr((_ha), (_dev)) \
+ if ((_ha)->sync_cnt)
struct hh_cache {
unsigned int hh_len;
@@ -258,9 +298,11 @@ struct hh_cache {
* relationship HH alignment <= LL alignment.
*/
#define LL_RESERVED_SPACE(dev) \
- ((((dev)->hard_header_len+(dev)->needed_headroom)&~(HH_DATA_MOD - 1)) + HH_DATA_MOD)
+ ((((dev)->hard_header_len + READ_ONCE((dev)->needed_headroom)) \
+ & ~(HH_DATA_MOD - 1)) + HH_DATA_MOD)
#define LL_RESERVED_SPACE_EXTRA(dev,extra) \
- ((((dev)->hard_header_len+(dev)->needed_headroom+(extra))&~(HH_DATA_MOD - 1)) + HH_DATA_MOD)
+ ((((dev)->hard_header_len + READ_ONCE((dev)->needed_headroom) + (extra)) \
+ & ~(HH_DATA_MOD - 1)) + HH_DATA_MOD)
struct header_ops {
int (*create) (struct sk_buff *skb, struct net_device *dev,
@@ -272,6 +314,7 @@ struct header_ops {
const struct net_device *dev,
const unsigned char *haddr);
bool (*validate)(const char *ll_header, unsigned int len);
+ __be16 (*parse_protocol)(const struct sk_buff *skb);
};
/* These flag bits are private to the generic network queueing
@@ -285,20 +328,19 @@ enum netdev_state_t {
__LINK_STATE_NOCARRIER,
__LINK_STATE_LINKWATCH_PENDING,
__LINK_STATE_DORMANT,
+ __LINK_STATE_TESTING,
};
+struct gro_list {
+ struct list_head list;
+ int count;
+};
/*
- * This structure holds boot-time configured netdevice settings. They
- * are then used in the device probing.
+ * size of gro hash buckets, must less than bit number of
+ * napi_struct::gro_bitmask
*/
-struct netdev_boot_setup {
- char name[IFNAMSIZ];
- struct ifmap map;
-};
-#define NETDEV_BOOT_SETUP_MAX 8
-
-int __init netdev_boot_setup(char *str);
+#define GRO_HASH_BUCKETS 8
/*
* Structure for NAPI scheduling similar to tasklet but with weighting
@@ -314,38 +356,52 @@ struct napi_struct {
unsigned long state;
int weight;
- unsigned int gro_count;
+ int defer_hard_irqs_count;
+ unsigned long gro_bitmask;
int (*poll)(struct napi_struct *, int);
#ifdef CONFIG_NETPOLL
+ /* CPU actively polling if netpoll is configured */
int poll_owner;
#endif
+ /* CPU on which NAPI has been scheduled for processing */
+ int list_owner;
struct net_device *dev;
- struct sk_buff *gro_list;
+ struct gro_list gro_hash[GRO_HASH_BUCKETS];
struct sk_buff *skb;
+ struct list_head rx_list; /* Pending GRO_NORMAL skbs */
+ int rx_count; /* length of rx_list */
+ unsigned int napi_id;
struct hrtimer timer;
+ struct task_struct *thread;
+ /* control-path-only fields follow */
struct list_head dev_list;
struct hlist_node napi_hash_node;
- unsigned int napi_id;
};
enum {
- NAPI_STATE_SCHED, /* Poll is scheduled */
- NAPI_STATE_MISSED, /* reschedule a napi */
- NAPI_STATE_DISABLE, /* Disable pending */
- NAPI_STATE_NPSVC, /* Netpoll - don't dequeue from poll_list */
- NAPI_STATE_HASHED, /* In NAPI hash (busy polling possible) */
- NAPI_STATE_NO_BUSY_POLL,/* Do not add in napi_hash, no busy polling */
- NAPI_STATE_IN_BUSY_POLL,/* sk_busy_loop() owns this NAPI */
+ NAPI_STATE_SCHED, /* Poll is scheduled */
+ NAPI_STATE_MISSED, /* reschedule a napi */
+ NAPI_STATE_DISABLE, /* Disable pending */
+ NAPI_STATE_NPSVC, /* Netpoll - don't dequeue from poll_list */
+ NAPI_STATE_LISTED, /* NAPI added to system lists */
+ NAPI_STATE_NO_BUSY_POLL, /* Do not add in napi_hash, no busy polling */
+ NAPI_STATE_IN_BUSY_POLL, /* sk_busy_loop() owns this NAPI */
+ NAPI_STATE_PREFER_BUSY_POLL, /* prefer busy-polling over softirq processing*/
+ NAPI_STATE_THREADED, /* The poll is performed inside its own thread*/
+ NAPI_STATE_SCHED_THREADED, /* Napi is currently scheduled in threaded mode */
};
enum {
- NAPIF_STATE_SCHED = BIT(NAPI_STATE_SCHED),
- NAPIF_STATE_MISSED = BIT(NAPI_STATE_MISSED),
- NAPIF_STATE_DISABLE = BIT(NAPI_STATE_DISABLE),
- NAPIF_STATE_NPSVC = BIT(NAPI_STATE_NPSVC),
- NAPIF_STATE_HASHED = BIT(NAPI_STATE_HASHED),
- NAPIF_STATE_NO_BUSY_POLL = BIT(NAPI_STATE_NO_BUSY_POLL),
- NAPIF_STATE_IN_BUSY_POLL = BIT(NAPI_STATE_IN_BUSY_POLL),
+ NAPIF_STATE_SCHED = BIT(NAPI_STATE_SCHED),
+ NAPIF_STATE_MISSED = BIT(NAPI_STATE_MISSED),
+ NAPIF_STATE_DISABLE = BIT(NAPI_STATE_DISABLE),
+ NAPIF_STATE_NPSVC = BIT(NAPI_STATE_NPSVC),
+ NAPIF_STATE_LISTED = BIT(NAPI_STATE_LISTED),
+ NAPIF_STATE_NO_BUSY_POLL = BIT(NAPI_STATE_NO_BUSY_POLL),
+ NAPIF_STATE_IN_BUSY_POLL = BIT(NAPI_STATE_IN_BUSY_POLL),
+ NAPIF_STATE_PREFER_BUSY_POLL = BIT(NAPI_STATE_PREFER_BUSY_POLL),
+ NAPIF_STATE_THREADED = BIT(NAPI_STATE_THREADED),
+ NAPIF_STATE_SCHED_THREADED = BIT(NAPI_STATE_SCHED_THREADED),
};
enum gro_result {
@@ -353,7 +409,6 @@ enum gro_result {
GRO_MERGED_FREE,
GRO_HELD,
GRO_NORMAL,
- GRO_DROP,
GRO_CONSUMED,
};
typedef enum gro_result gro_result_t;
@@ -416,6 +471,11 @@ static inline bool napi_disable_pending(struct napi_struct *n)
return test_bit(NAPI_STATE_DISABLE, &n->state);
}
+static inline bool napi_prefer_busy_poll(struct napi_struct *n)
+{
+ return test_bit(NAPI_STATE_PREFER_BUSY_POLL, &n->state);
+}
+
bool napi_schedule_prep(struct napi_struct *n);
/**
@@ -453,33 +513,24 @@ static inline bool napi_reschedule(struct napi_struct *napi)
return false;
}
-bool napi_complete_done(struct napi_struct *n, int work_done);
/**
- * napi_complete - NAPI processing complete
- * @n: NAPI context
+ * napi_complete_done - NAPI processing complete
+ * @n: NAPI context
+ * @work_done: number of packets processed
*
- * Mark NAPI processing as complete.
- * Consider using napi_complete_done() instead.
+ * Mark NAPI processing as complete. Should only be called if poll budget
+ * has not been completely consumed.
+ * Prefer over napi_complete().
* Return false if device should avoid rearming interrupts.
*/
+bool napi_complete_done(struct napi_struct *n, int work_done);
+
static inline bool napi_complete(struct napi_struct *n)
{
return napi_complete_done(n, 0);
}
-/**
- * napi_hash_del - remove a NAPI from global table
- * @napi: NAPI context
- *
- * Warning: caller must observe RCU grace period
- * before freeing memory containing @napi, if
- * this function returns true.
- * Note: core networking stack automatically calls it
- * from netif_napi_del().
- * Drivers might want to call this helper to combine all
- * the needed RCU grace periods into a single one.
- */
-bool napi_hash_del(struct napi_struct *napi);
+int dev_set_threaded(struct net_device *dev, bool threaded);
/**
* napi_disable - prevent NAPI from scheduling
@@ -490,20 +541,7 @@ bool napi_hash_del(struct napi_struct *napi);
*/
void napi_disable(struct napi_struct *n);
-/**
- * napi_enable - enable NAPI scheduling
- * @n: NAPI context
- *
- * Resume NAPI from being scheduled on this context.
- * Must be paired with napi_disable.
- */
-static inline void napi_enable(struct napi_struct *n)
-{
- BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state));
- smp_mb__before_atomic();
- clear_bit(NAPI_STATE_SCHED, &n->state);
- clear_bit(NAPI_STATE_NPSVC, &n->state);
-}
+void napi_enable(struct napi_struct *n);
/**
* napi_synchronize - wait until NAPI is not running
@@ -522,6 +560,32 @@ static inline void napi_synchronize(const struct napi_struct *n)
barrier();
}
+/**
+ * napi_if_scheduled_mark_missed - if napi is running, set the
+ * NAPIF_STATE_MISSED
+ * @n: NAPI context
+ *
+ * If napi is running, set the NAPIF_STATE_MISSED, and return true if
+ * NAPI is scheduled.
+ **/
+static inline bool napi_if_scheduled_mark_missed(struct napi_struct *n)
+{
+ unsigned long val, new;
+
+ val = READ_ONCE(n->state);
+ do {
+ if (val & NAPIF_STATE_DISABLE)
+ return true;
+
+ if (!(val & NAPIF_STATE_SCHED))
+ return false;
+
+ new = val | NAPIF_STATE_MISSED;
+ } while (!try_cmpxchg(&n->state, &val, new));
+
+ return true;
+}
+
enum netdev_queue_state_t {
__QUEUE_STATE_DRV_XOFF,
__QUEUE_STATE_STACK_XOFF,
@@ -553,6 +617,8 @@ struct netdev_queue {
* read-mostly part
*/
struct net_device *dev;
+ netdevice_tracker dev_tracker;
+
struct Qdisc __rcu *qdisc;
struct Qdisc *qdisc_sleeping;
#ifdef CONFIG_SYSFS
@@ -566,7 +632,13 @@ struct netdev_queue {
* Number of TX timeouts for this queue
* (/sys/class/net/DEV/Q/trans_timeout)
*/
- unsigned long trans_timeout;
+ atomic_long_t trans_timeout;
+
+ /* Subordinate device that the queue has been assigned to */
+ struct net_device *sb_dev;
+#ifdef CONFIG_XDP_SOCKETS
+ struct xsk_buff_pool *pool;
+#endif
/*
* write-mostly part
*/
@@ -584,6 +656,35 @@ struct netdev_queue {
#endif
} ____cacheline_aligned_in_smp;
+extern int sysctl_fb_tunnels_only_for_init_net;
+extern int sysctl_devconf_inherit_init_net;
+
+/*
+ * sysctl_fb_tunnels_only_for_init_net == 0 : For all netns
+ * == 1 : For initns only
+ * == 2 : For none.
+ */
+static inline bool net_has_fallback_tunnels(const struct net *net)
+{
+#if IS_ENABLED(CONFIG_SYSCTL)
+ int fb_tunnels_only_for_init_net = READ_ONCE(sysctl_fb_tunnels_only_for_init_net);
+
+ return !fb_tunnels_only_for_init_net ||
+ (net_eq(net, &init_net) && fb_tunnels_only_for_init_net == 1);
+#else
+ return true;
+#endif
+}
+
+static inline int net_inherit_devconf(void)
+{
+#if IS_ENABLED(CONFIG_SYSCTL)
+ return READ_ONCE(sysctl_devconf_inherit_init_net);
+#else
+ return 0;
+#endif
+}
+
static inline int netdev_queue_numa_node_read(const struct netdev_queue *q)
{
#if defined(CONFIG_XPS) && defined(CONFIG_NUMA)
@@ -608,7 +709,7 @@ static inline void netdev_queue_numa_node_write(struct netdev_queue *q, int node
struct rps_map {
unsigned int len;
struct rcu_head rcu;
- u16 cpus[0];
+ u16 cpus[];
};
#define RPS_MAP_SIZE(_num) (sizeof(struct rps_map) + ((_num) * sizeof(u16)))
@@ -630,7 +731,7 @@ struct rps_dev_flow {
struct rps_dev_flow_table {
unsigned int mask;
struct rcu_head rcu;
- struct rps_dev_flow flows[0];
+ struct rps_dev_flow flows[];
};
#define RPS_DEV_FLOW_TABLE_SIZE(_num) (sizeof(struct rps_dev_flow_table) + \
((_num) * sizeof(struct rps_dev_flow)))
@@ -648,7 +749,7 @@ struct rps_dev_flow_table {
struct rps_sock_flow_table {
u32 mask;
- u32 ents[0] ____cacheline_aligned_in_smp;
+ u32 ents[] ____cacheline_aligned_in_smp;
};
#define RPS_SOCK_FLOW_TABLE_SIZE(_num) (offsetof(struct rps_sock_flow_table, ents[_num]))
@@ -680,12 +781,18 @@ bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index, u32 flow_id,
/* This structure contains an instance of an RX queue. */
struct netdev_rx_queue {
+ struct xdp_rxq_info xdp_rxq;
#ifdef CONFIG_RPS
struct rps_map __rcu *rps_map;
struct rps_dev_flow_table __rcu *rps_flow_table;
#endif
struct kobject kobj;
struct net_device *dev;
+ netdevice_tracker dev_tracker;
+
+#ifdef CONFIG_XDP_SOCKETS
+ struct xsk_buff_pool *pool;
+#endif
} ____cacheline_aligned_in_smp;
/*
@@ -693,10 +800,16 @@ struct netdev_rx_queue {
*/
struct rx_queue_attribute {
struct attribute attr;
- ssize_t (*show)(struct netdev_rx_queue *queue,
- struct rx_queue_attribute *attr, char *buf);
+ ssize_t (*show)(struct netdev_rx_queue *queue, char *buf);
ssize_t (*store)(struct netdev_rx_queue *queue,
- struct rx_queue_attribute *attr, const char *buf, size_t len);
+ const char *buf, size_t len);
+};
+
+/* XPS map type and offset of the xps map within net_device->xps_maps[]. */
+enum xps_map_type {
+ XPS_CPUS = 0,
+ XPS_RXQS,
+ XPS_MAPS_MAX,
};
#ifdef CONFIG_XPS
@@ -708,7 +821,7 @@ struct xps_map {
unsigned int len;
unsigned int alloc_len;
struct rcu_head rcu;
- u16 queues[0];
+ u16 queues[];
};
#define XPS_MAP_SIZE(_num) (sizeof(struct xps_map) + ((_num) * sizeof(u16)))
#define XPS_MIN_MAP_ALLOC ((L1_CACHE_ALIGN(offsetof(struct xps_map, queues[1])) \
@@ -716,13 +829,28 @@ struct xps_map {
/*
* This structure holds all XPS maps for device. Maps are indexed by CPU.
+ *
+ * We keep track of the number of cpus/rxqs used when the struct is allocated,
+ * in nr_ids. This will help not accessing out-of-bound memory.
+ *
+ * We keep track of the number of traffic classes used when the struct is
+ * allocated, in num_tc. This will be used to navigate the maps, to ensure we're
+ * not crossing its upper bound, as the original dev->num_tc can be updated in
+ * the meantime.
*/
struct xps_dev_maps {
struct rcu_head rcu;
- struct xps_map __rcu *cpu_map[0];
+ unsigned int nr_ids;
+ s16 num_tc;
+ struct xps_map __rcu *attr_map[]; /* Either CPUs map or RXQs map */
};
-#define XPS_DEV_MAPS_SIZE(_tcs) (sizeof(struct xps_dev_maps) + \
+
+#define XPS_CPU_DEV_MAPS_SIZE(_tcs) (sizeof(struct xps_dev_maps) + \
(nr_cpu_ids * (_tcs) * sizeof(struct xps_map *)))
+
+#define XPS_RXQ_DEV_MAPS_SIZE(_tcs, _rxqs) (sizeof(struct xps_dev_maps) +\
+ (_rxqs * (_tcs) * sizeof(struct xps_map *)))
+
#endif /* CONFIG_XPS */
#define TC_MAX_QUEUE 16
@@ -768,37 +896,97 @@ static inline bool netdev_phys_item_id_same(struct netdev_phys_item_id *a,
}
typedef u16 (*select_queue_fallback_t)(struct net_device *dev,
- struct sk_buff *skb);
-
-/* These structures hold the attributes of qdisc and classifiers
- * that are being passed to the netdevice through the setup_tc op.
- */
-enum {
- TC_SETUP_MQPRIO,
- TC_SETUP_CLSU32,
- TC_SETUP_CLSFLOWER,
- TC_SETUP_MATCHALL,
- TC_SETUP_CLSBPF,
+ struct sk_buff *skb,
+ struct net_device *sb_dev);
+
+enum net_device_path_type {
+ DEV_PATH_ETHERNET = 0,
+ DEV_PATH_VLAN,
+ DEV_PATH_BRIDGE,
+ DEV_PATH_PPPOE,
+ DEV_PATH_DSA,
+ DEV_PATH_MTK_WDMA,
};
-struct tc_cls_u32_offload;
-
-struct tc_to_netdev {
- unsigned int type;
+struct net_device_path {
+ enum net_device_path_type type;
+ const struct net_device *dev;
union {
- struct tc_cls_u32_offload *cls_u32;
- struct tc_cls_flower_offload *cls_flower;
- struct tc_cls_matchall_offload *cls_mall;
- struct tc_cls_bpf_offload *cls_bpf;
- struct tc_mqprio_qopt *mqprio;
+ struct {
+ u16 id;
+ __be16 proto;
+ u8 h_dest[ETH_ALEN];
+ } encap;
+ struct {
+ enum {
+ DEV_PATH_BR_VLAN_KEEP,
+ DEV_PATH_BR_VLAN_TAG,
+ DEV_PATH_BR_VLAN_UNTAG,
+ DEV_PATH_BR_VLAN_UNTAG_HW,
+ } vlan_mode;
+ u16 vlan_id;
+ __be16 vlan_proto;
+ } bridge;
+ struct {
+ int port;
+ u16 proto;
+ } dsa;
+ struct {
+ u8 wdma_idx;
+ u8 queue;
+ u16 wcid;
+ u8 bss;
+ } mtk_wdma;
};
- bool egress_dev;
};
-/* These structures hold the attributes of xdp state that are being passed
- * to the netdevice through the xdp op.
+#define NET_DEVICE_PATH_STACK_MAX 5
+#define NET_DEVICE_PATH_VLAN_MAX 2
+
+struct net_device_path_stack {
+ int num_paths;
+ struct net_device_path path[NET_DEVICE_PATH_STACK_MAX];
+};
+
+struct net_device_path_ctx {
+ const struct net_device *dev;
+ u8 daddr[ETH_ALEN];
+
+ int num_vlans;
+ struct {
+ u16 id;
+ __be16 proto;
+ } vlan[NET_DEVICE_PATH_VLAN_MAX];
+};
+
+enum tc_setup_type {
+ TC_QUERY_CAPS,
+ TC_SETUP_QDISC_MQPRIO,
+ TC_SETUP_CLSU32,
+ TC_SETUP_CLSFLOWER,
+ TC_SETUP_CLSMATCHALL,
+ TC_SETUP_CLSBPF,
+ TC_SETUP_BLOCK,
+ TC_SETUP_QDISC_CBS,
+ TC_SETUP_QDISC_RED,
+ TC_SETUP_QDISC_PRIO,
+ TC_SETUP_QDISC_MQ,
+ TC_SETUP_QDISC_ETF,
+ TC_SETUP_ROOT_QDISC,
+ TC_SETUP_QDISC_GRED,
+ TC_SETUP_QDISC_TAPRIO,
+ TC_SETUP_FT,
+ TC_SETUP_QDISC_ETS,
+ TC_SETUP_QDISC_TBF,
+ TC_SETUP_QDISC_FIFO,
+ TC_SETUP_QDISC_HTB,
+ TC_SETUP_ACT,
+};
+
+/* These structures hold the attributes of bpf state that are being passed
+ * to the netdevice through the bpf op.
*/
-enum xdp_netdev_command {
+enum bpf_netdev_command {
/* Set or clear a bpf program used in the earliest stages of packet
* rx. The prog will have been loaded as BPF_PROG_TYPE_XDP. The callee
* is responsible for calling bpf_prog_put on any old progs that are
@@ -808,17 +996,32 @@ enum xdp_netdev_command {
*/
XDP_SETUP_PROG,
XDP_SETUP_PROG_HW,
- /* Check if a bpf program is set on the device. The callee should
- * set @prog_attached to one of XDP_ATTACHED_* values, note that "true"
- * is equivalent to XDP_ATTACHED_DRV.
- */
- XDP_QUERY_PROG,
+ /* BPF program for offload callbacks, invoked at program load time. */
+ BPF_OFFLOAD_MAP_ALLOC,
+ BPF_OFFLOAD_MAP_FREE,
+ XDP_SETUP_XSK_POOL,
};
+struct bpf_prog_offload_ops;
struct netlink_ext_ack;
+struct xdp_umem;
+struct xdp_dev_bulk_queue;
+struct bpf_xdp_link;
+
+enum bpf_xdp_mode {
+ XDP_MODE_SKB = 0,
+ XDP_MODE_DRV = 1,
+ XDP_MODE_HW = 2,
+ __MAX_XDP_MODE
+};
-struct netdev_xdp {
- enum xdp_netdev_command command;
+struct bpf_xdp_entity {
+ struct bpf_prog *prog;
+ struct bpf_xdp_link *link;
+};
+
+struct netdev_bpf {
+ enum bpf_netdev_command command;
union {
/* XDP_SETUP_PROG */
struct {
@@ -826,24 +1029,50 @@ struct netdev_xdp {
struct bpf_prog *prog;
struct netlink_ext_ack *extack;
};
- /* XDP_QUERY_PROG */
+ /* BPF_OFFLOAD_MAP_ALLOC, BPF_OFFLOAD_MAP_FREE */
struct {
- u8 prog_attached;
- u32 prog_id;
+ struct bpf_offloaded_map *offmap;
};
+ /* XDP_SETUP_XSK_POOL */
+ struct {
+ struct xsk_buff_pool *pool;
+ u16 queue_id;
+ } xsk;
};
};
+/* Flags for ndo_xsk_wakeup. */
+#define XDP_WAKEUP_RX (1 << 0)
+#define XDP_WAKEUP_TX (1 << 1)
+
#ifdef CONFIG_XFRM_OFFLOAD
struct xfrmdev_ops {
- int (*xdo_dev_state_add) (struct xfrm_state *x);
+ int (*xdo_dev_state_add) (struct xfrm_state *x, struct netlink_ext_ack *extack);
void (*xdo_dev_state_delete) (struct xfrm_state *x);
void (*xdo_dev_state_free) (struct xfrm_state *x);
bool (*xdo_dev_offload_ok) (struct sk_buff *skb,
struct xfrm_state *x);
+ void (*xdo_dev_state_advance_esn) (struct xfrm_state *x);
+ void (*xdo_dev_state_update_curlft) (struct xfrm_state *x);
+ int (*xdo_dev_policy_add) (struct xfrm_policy *x, struct netlink_ext_ack *extack);
+ void (*xdo_dev_policy_delete) (struct xfrm_policy *x);
+ void (*xdo_dev_policy_free) (struct xfrm_policy *x);
};
#endif
+struct dev_ifalias {
+ struct rcu_head rcuhead;
+ char ifalias[];
+};
+
+struct devlink;
+struct tlsdev_ops;
+
+struct netdev_net_notifier {
+ struct list_head list;
+ struct notifier_block *nb;
+};
+
/*
* This structure defines the management hooks for network devices.
* The following hooks can be defined; unless noted otherwise, they are
@@ -887,7 +1116,7 @@ struct xfrmdev_ops {
* those the driver believes to be appropriate.
*
* u16 (*ndo_select_queue)(struct net_device *dev, struct sk_buff *skb,
- * void *accel_priv, select_queue_fallback_t fallback);
+ * struct net_device *sb_dev);
* Called to decide which queue to use when device supports multiple
* transmit queues.
*
@@ -909,9 +1138,18 @@ struct xfrmdev_ops {
* Test if Media Access Control address is valid for the device.
*
* int (*ndo_do_ioctl)(struct net_device *dev, struct ifreq *ifr, int cmd);
- * Called when a user requests an ioctl which can't be handled by
- * the generic interface code. If not defined ioctls return
- * not supported error code.
+ * Old-style ioctl entry point. This is used internally by the
+ * appletalk and ieee802154 subsystems but is no longer called by
+ * the device ioctl handler.
+ *
+ * int (*ndo_siocbond)(struct net_device *dev, struct ifreq *ifr, int cmd);
+ * Used by the bonding driver for its device specific ioctls:
+ * SIOCBONDENSLAVE, SIOCBONDRELEASE, SIOCBONDSETHWADDR, SIOCBONDCHANGEACTIVE,
+ * SIOCBONDSLAVEINFOQUERY, and SIOCBONDINFOQUERY
+ *
+ * * int (*ndo_eth_ioctl)(struct net_device *dev, struct ifreq *ifr, int cmd);
+ * Called for ethernet specific ioctls: SIOCGMIIPHY, SIOCGMIIREG,
+ * SIOCSMIIREG, SIOCSHWTSTAMP and SIOCGHWTSTAMP.
*
* int (*ndo_set_config)(struct net_device *dev, struct ifmap *map);
* Used to set network devices bus interface parameters. This interface
@@ -922,7 +1160,7 @@ struct xfrmdev_ops {
* Called when a user wants to change the Maximum Transfer Unit
* of a device.
*
- * void (*ndo_tx_timeout)(struct net_device *dev);
+ * void (*ndo_tx_timeout)(struct net_device *dev, unsigned int txqueue);
* Callback used when the transmitter has not made any progress
* for dev->watchdog ticks.
*
@@ -977,8 +1215,8 @@ struct xfrmdev_ops {
* with PF and querying it may introduce a theoretical security risk.
* int (*ndo_set_vf_rss_query_en)(struct net_device *dev, int vf, bool setting);
* int (*ndo_get_vf_port)(struct net_device *dev, int vf, struct sk_buff *skb);
- * int (*ndo_setup_tc)(struct net_device *dev, u32 handle, u32 chain_index,
- * __be16 protocol, struct tc_to_netdev *tc);
+ * int (*ndo_setup_tc)(struct net_device *dev, enum tc_setup_type type,
+ * void *type_data);
* Called to setup any 'tc' scheduler, classifier or action on @dev.
* This is always called from the stack with the rtnl lock held and netif
* tx queues stopped. This allows the netdevice to perform queue
@@ -1041,6 +1279,12 @@ struct xfrmdev_ops {
* int (*ndo_del_slave)(struct net_device *dev, struct net_device *slave_dev);
* Called to release previously enslaved netdev.
*
+ * struct net_device *(*ndo_get_xmit_slave)(struct net_device *dev,
+ * struct sk_buff *skb,
+ * bool all_slaves);
+ * Get the xmit slave of master device. If all_slaves is true, function
+ * assume all the slaves can transmit.
+ *
* Feature/offload setting functions.
* netdev_features_t (*ndo_fix_features)(struct net_device *dev,
* netdev_features_t features);
@@ -1055,20 +1299,36 @@ struct xfrmdev_ops {
*
* int (*ndo_fdb_add)(struct ndmsg *ndm, struct nlattr *tb[],
* struct net_device *dev,
- * const unsigned char *addr, u16 vid, u16 flags)
+ * const unsigned char *addr, u16 vid, u16 flags,
+ * struct netlink_ext_ack *extack);
* Adds an FDB entry to dev for addr.
* int (*ndo_fdb_del)(struct ndmsg *ndm, struct nlattr *tb[],
* struct net_device *dev,
* const unsigned char *addr, u16 vid)
* Deletes the FDB entry from dev coresponding to addr.
+ * int (*ndo_fdb_del_bulk)(struct ndmsg *ndm, struct nlattr *tb[],
+ * struct net_device *dev,
+ * u16 vid,
+ * struct netlink_ext_ack *extack);
* int (*ndo_fdb_dump)(struct sk_buff *skb, struct netlink_callback *cb,
* struct net_device *dev, struct net_device *filter_dev,
* int *idx)
* Used to add FDB entries to dump requests. Implementers should add
* entries to skb and update idx with the number of entries.
*
+ * int (*ndo_mdb_add)(struct net_device *dev, struct nlattr *tb[],
+ * u16 nlmsg_flags, struct netlink_ext_ack *extack);
+ * Adds an MDB entry to dev.
+ * int (*ndo_mdb_del)(struct net_device *dev, struct nlattr *tb[],
+ * struct netlink_ext_ack *extack);
+ * Deletes the MDB entry from dev.
+ * int (*ndo_mdb_dump)(struct net_device *dev, struct sk_buff *skb,
+ * struct netlink_callback *cb);
+ * Dumps MDB entries from dev. The first argument (marker) in the netlink
+ * callback is used by core rtnetlink code.
+ *
* int (*ndo_bridge_setlink)(struct net_device *dev, struct nlmsghdr *nlh,
- * u16 flags)
+ * u16 flags, struct netlink_ext_ack *extack)
* int (*ndo_bridge_getlink)(struct sk_buff *skb, u32 pid, u32 seq,
* struct net_device *dev, u32 filter_mask,
* int nlflags)
@@ -1089,18 +1349,9 @@ struct xfrmdev_ops {
* not implement this, it is assumed that the hw is not able to have
* multiple net devices on single physical port.
*
- * void (*ndo_udp_tunnel_add)(struct net_device *dev,
- * struct udp_tunnel_info *ti);
- * Called by UDP tunnel to notify a driver about the UDP port and socket
- * address family that a UDP tunnel is listnening to. It is called only
- * when a new port starts listening. The operation is protected by the
- * RTNL.
- *
- * void (*ndo_udp_tunnel_del)(struct net_device *dev,
- * struct udp_tunnel_info *ti);
- * Called by UDP tunnel to notify the driver about a UDP port and socket
- * address family that the UDP tunnel is not listening to anymore. The
- * operation is protected by the RTNL.
+ * int (*ndo_get_port_parent_id)(struct net_device *dev,
+ * struct netdev_phys_item_id *ppid)
+ * Called to get the parent ID of the physical port of this device.
*
* void* (*ndo_dfwd_add_station)(struct net_device *pdev,
* struct net_device *dev)
@@ -1120,11 +1371,6 @@ struct xfrmdev_ops {
* TX queue.
* int (*ndo_get_iflink)(const struct net_device *dev);
* Called to get the iflink value of this device.
- * void (*ndo_change_proto_down)(struct net_device *dev,
- * bool proto_down);
- * This function is used to pass protocol port error state information
- * to the switch driver. The switch driver can react to the proto_down
- * by doing a phys down on the associated switch port.
* int (*ndo_fill_metadata_dst)(struct net_device *dev, struct sk_buff *skb);
* This function is used to get egress tunnel information for given skb.
* This is useful for retrieving outer tunnel header parameters while
@@ -1135,10 +1381,40 @@ struct xfrmdev_ops {
* appropriate rx headroom value allows avoiding skb head copy on
* forward. Setting a negative value resets the rx headroom to the
* default value.
- * int (*ndo_xdp)(struct net_device *dev, struct netdev_xdp *xdp);
+ * int (*ndo_bpf)(struct net_device *dev, struct netdev_bpf *bpf);
* This function is used to set or query state related to XDP on the
- * netdevice. See definition of enum xdp_netdev_command for details.
- *
+ * netdevice and manage BPF offload. See definition of
+ * enum bpf_netdev_command for details.
+ * int (*ndo_xdp_xmit)(struct net_device *dev, int n, struct xdp_frame **xdp,
+ * u32 flags);
+ * This function is used to submit @n XDP packets for transmit on a
+ * netdevice. Returns number of frames successfully transmitted, frames
+ * that got dropped are freed/returned via xdp_return_frame().
+ * Returns negative number, means general error invoking ndo, meaning
+ * no frames were xmit'ed and core-caller will free all frames.
+ * struct net_device *(*ndo_xdp_get_xmit_slave)(struct net_device *dev,
+ * struct xdp_buff *xdp);
+ * Get the xmit slave of master device based on the xdp_buff.
+ * int (*ndo_xsk_wakeup)(struct net_device *dev, u32 queue_id, u32 flags);
+ * This function is used to wake up the softirq, ksoftirqd or kthread
+ * responsible for sending and/or receiving packets on a specific
+ * queue id bound to an AF_XDP socket. The flags field specifies if
+ * only RX, only Tx, or both should be woken up using the flags
+ * XDP_WAKEUP_RX and XDP_WAKEUP_TX.
+ * int (*ndo_tunnel_ctl)(struct net_device *dev, struct ip_tunnel_parm *p,
+ * int cmd);
+ * Add, change, delete or get information on an IPv4 tunnel.
+ * struct net_device *(*ndo_get_peer_dev)(struct net_device *dev);
+ * If a device is paired with a peer device, return the peer instance.
+ * The caller must be under RCU read context.
+ * int (*ndo_fill_forward_path)(struct net_device_path_ctx *ctx, struct net_device_path *path);
+ * Get the forwarding path to reach the real device from the HW destination address
+ * ktime_t (*ndo_get_tstamp)(struct net_device *dev,
+ * const struct skb_shared_hwtstamps *hwtstamps,
+ * bool cycles);
+ * Get hardware timestamp based on normal/adjustable time or free running
+ * cycle counter. This function is required if physical clock supports a
+ * free running cycle counter.
*/
struct net_device_ops {
int (*ndo_init)(struct net_device *dev);
@@ -1152,8 +1428,7 @@ struct net_device_ops {
netdev_features_t features);
u16 (*ndo_select_queue)(struct net_device *dev,
struct sk_buff *skb,
- void *accel_priv,
- select_queue_fallback_t fallback);
+ struct net_device *sb_dev);
void (*ndo_change_rx_flags)(struct net_device *dev,
int flags);
void (*ndo_set_rx_mode)(struct net_device *dev);
@@ -1162,13 +1437,23 @@ struct net_device_ops {
int (*ndo_validate_addr)(struct net_device *dev);
int (*ndo_do_ioctl)(struct net_device *dev,
struct ifreq *ifr, int cmd);
+ int (*ndo_eth_ioctl)(struct net_device *dev,
+ struct ifreq *ifr, int cmd);
+ int (*ndo_siocbond)(struct net_device *dev,
+ struct ifreq *ifr, int cmd);
+ int (*ndo_siocwandev)(struct net_device *dev,
+ struct if_settings *ifs);
+ int (*ndo_siocdevprivate)(struct net_device *dev,
+ struct ifreq *ifr,
+ void __user *data, int cmd);
int (*ndo_set_config)(struct net_device *dev,
struct ifmap *map);
int (*ndo_change_mtu)(struct net_device *dev,
int new_mtu);
int (*ndo_neigh_setup)(struct net_device *dev,
struct neigh_parms *);
- void (*ndo_tx_timeout) (struct net_device *dev);
+ void (*ndo_tx_timeout) (struct net_device *dev,
+ unsigned int txqueue);
void (*ndo_get_stats64)(struct net_device *dev,
struct rtnl_link_stats64 *storage);
@@ -1214,6 +1499,10 @@ struct net_device_ops {
struct nlattr *port[]);
int (*ndo_get_vf_port)(struct net_device *dev,
int vf, struct sk_buff *skb);
+ int (*ndo_get_vf_guid)(struct net_device *dev,
+ int vf,
+ struct ifla_vf_guid *node_guid,
+ struct ifla_vf_guid *port_guid);
int (*ndo_set_vf_guid)(struct net_device *dev,
int vf, u64 guid,
int guid_type);
@@ -1221,9 +1510,8 @@ struct net_device_ops {
struct net_device *dev,
int vf, bool setting);
int (*ndo_setup_tc)(struct net_device *dev,
- u32 handle, u32 chain_index,
- __be16 protocol,
- struct tc_to_netdev *tc);
+ enum tc_setup_type type,
+ void *type_data);
#if IS_ENABLED(CONFIG_FCOE)
int (*ndo_fcoe_enable)(struct net_device *dev);
int (*ndo_fcoe_disable)(struct net_device *dev);
@@ -1255,9 +1543,15 @@ struct net_device_ops {
u32 flow_id);
#endif
int (*ndo_add_slave)(struct net_device *dev,
- struct net_device *slave_dev);
+ struct net_device *slave_dev,
+ struct netlink_ext_ack *extack);
int (*ndo_del_slave)(struct net_device *dev,
struct net_device *slave_dev);
+ struct net_device* (*ndo_get_xmit_slave)(struct net_device *dev,
+ struct sk_buff *skb,
+ bool all_slaves);
+ struct net_device* (*ndo_sk_get_lower_dev)(struct net_device *dev,
+ struct sock *sk);
netdev_features_t (*ndo_fix_features)(struct net_device *dev,
netdev_features_t features);
int (*ndo_set_features)(struct net_device *dev,
@@ -1272,21 +1566,43 @@ struct net_device_ops {
struct net_device *dev,
const unsigned char *addr,
u16 vid,
- u16 flags);
+ u16 flags,
+ struct netlink_ext_ack *extack);
int (*ndo_fdb_del)(struct ndmsg *ndm,
struct nlattr *tb[],
struct net_device *dev,
const unsigned char *addr,
- u16 vid);
+ u16 vid, struct netlink_ext_ack *extack);
+ int (*ndo_fdb_del_bulk)(struct ndmsg *ndm,
+ struct nlattr *tb[],
+ struct net_device *dev,
+ u16 vid,
+ struct netlink_ext_ack *extack);
int (*ndo_fdb_dump)(struct sk_buff *skb,
struct netlink_callback *cb,
struct net_device *dev,
struct net_device *filter_dev,
int *idx);
-
+ int (*ndo_fdb_get)(struct sk_buff *skb,
+ struct nlattr *tb[],
+ struct net_device *dev,
+ const unsigned char *addr,
+ u16 vid, u32 portid, u32 seq,
+ struct netlink_ext_ack *extack);
+ int (*ndo_mdb_add)(struct net_device *dev,
+ struct nlattr *tb[],
+ u16 nlmsg_flags,
+ struct netlink_ext_ack *extack);
+ int (*ndo_mdb_del)(struct net_device *dev,
+ struct nlattr *tb[],
+ struct netlink_ext_ack *extack);
+ int (*ndo_mdb_dump)(struct net_device *dev,
+ struct sk_buff *skb,
+ struct netlink_callback *cb);
int (*ndo_bridge_setlink)(struct net_device *dev,
struct nlmsghdr *nlh,
- u16 flags);
+ u16 flags,
+ struct netlink_ext_ack *extack);
int (*ndo_bridge_getlink)(struct sk_buff *skb,
u32 pid, u32 seq,
struct net_device *dev,
@@ -1299,34 +1615,50 @@ struct net_device_ops {
bool new_carrier);
int (*ndo_get_phys_port_id)(struct net_device *dev,
struct netdev_phys_item_id *ppid);
+ int (*ndo_get_port_parent_id)(struct net_device *dev,
+ struct netdev_phys_item_id *ppid);
int (*ndo_get_phys_port_name)(struct net_device *dev,
char *name, size_t len);
- void (*ndo_udp_tunnel_add)(struct net_device *dev,
- struct udp_tunnel_info *ti);
- void (*ndo_udp_tunnel_del)(struct net_device *dev,
- struct udp_tunnel_info *ti);
void* (*ndo_dfwd_add_station)(struct net_device *pdev,
struct net_device *dev);
void (*ndo_dfwd_del_station)(struct net_device *pdev,
void *priv);
- int (*ndo_get_lock_subclass)(struct net_device *dev);
int (*ndo_set_tx_maxrate)(struct net_device *dev,
int queue_index,
u32 maxrate);
int (*ndo_get_iflink)(const struct net_device *dev);
- int (*ndo_change_proto_down)(struct net_device *dev,
- bool proto_down);
int (*ndo_fill_metadata_dst)(struct net_device *dev,
struct sk_buff *skb);
void (*ndo_set_rx_headroom)(struct net_device *dev,
int needed_headroom);
- int (*ndo_xdp)(struct net_device *dev,
- struct netdev_xdp *xdp);
+ int (*ndo_bpf)(struct net_device *dev,
+ struct netdev_bpf *bpf);
+ int (*ndo_xdp_xmit)(struct net_device *dev, int n,
+ struct xdp_frame **xdp,
+ u32 flags);
+ struct net_device * (*ndo_xdp_get_xmit_slave)(struct net_device *dev,
+ struct xdp_buff *xdp);
+ int (*ndo_xsk_wakeup)(struct net_device *dev,
+ u32 queue_id, u32 flags);
+ int (*ndo_tunnel_ctl)(struct net_device *dev,
+ struct ip_tunnel_parm *p, int cmd);
+ struct net_device * (*ndo_get_peer_dev)(struct net_device *dev);
+ int (*ndo_fill_forward_path)(struct net_device_path_ctx *ctx,
+ struct net_device_path *path);
+ ktime_t (*ndo_get_tstamp)(struct net_device *dev,
+ const struct skb_shared_hwtstamps *hwtstamps,
+ bool cycles);
+};
+
+struct xdp_metadata_ops {
+ int (*xmo_rx_timestamp)(const struct xdp_md *ctx, u64 *timestamp);
+ int (*xmo_rx_hash)(const struct xdp_md *ctx, u32 *hash,
+ enum xdp_rss_hash_type *rss_type);
};
/**
- * enum net_device_priv_flags - &struct net_device priv_flags
+ * enum netdev_priv_flags - &struct net_device priv_flags
*
* These are the &struct net_device, they are only set internally
* by drivers and used in the kernel. These flags are invisible to
@@ -1356,8 +1688,6 @@ struct net_device_ops {
* @IFF_MACVLAN: Macvlan device
* @IFF_XMIT_DST_RELEASE_PERM: IFF_XMIT_DST_RELEASE not taking into account
* underlying stacked devices
- * @IFF_IPVLAN_MASTER: IPvlan master device
- * @IFF_IPVLAN_SLAVE: IPvlan slave device
* @IFF_L3MDEV_MASTER: device is an L3 master device
* @IFF_NO_QUEUE: device can run without qdisc attached
* @IFF_OPENVSWITCH: device is a Open vSwitch master
@@ -1367,6 +1697,14 @@ struct net_device_ops {
* @IFF_PHONY_HEADROOM: the headroom value is controlled by an external
* entity (i.e. the master device for bridged veth)
* @IFF_MACSEC: device is a MACsec device
+ * @IFF_NO_RX_HANDLER: device doesn't support the rx_handler hook
+ * @IFF_FAILOVER: device is a failover master device
+ * @IFF_FAILOVER_SLAVE: device is lower dev of a failover master device
+ * @IFF_L3MDEV_RX_HANDLER: only invoke the rx handler of L3 master device
+ * @IFF_NO_ADDRCONF: prevent ipv6 addrconf
+ * @IFF_TX_SKB_NO_LINEAR: device/driver is capable of xmitting frames with
+ * skb_headlen(skb) == 0 (data starts from frag0)
+ * @IFF_CHANGE_PROTO_DOWN: device supports setting carrier via IFLA_PROTO_DOWN
*/
enum netdev_priv_flags {
IFF_802_1Q_VLAN = 1<<0,
@@ -1387,16 +1725,21 @@ enum netdev_priv_flags {
IFF_LIVE_ADDR_CHANGE = 1<<15,
IFF_MACVLAN = 1<<16,
IFF_XMIT_DST_RELEASE_PERM = 1<<17,
- IFF_IPVLAN_MASTER = 1<<18,
- IFF_IPVLAN_SLAVE = 1<<19,
- IFF_L3MDEV_MASTER = 1<<20,
- IFF_NO_QUEUE = 1<<21,
- IFF_OPENVSWITCH = 1<<22,
- IFF_L3MDEV_SLAVE = 1<<23,
- IFF_TEAM = 1<<24,
- IFF_RXFH_CONFIGURED = 1<<25,
- IFF_PHONY_HEADROOM = 1<<26,
- IFF_MACSEC = 1<<27,
+ IFF_L3MDEV_MASTER = 1<<18,
+ IFF_NO_QUEUE = 1<<19,
+ IFF_OPENVSWITCH = 1<<20,
+ IFF_L3MDEV_SLAVE = 1<<21,
+ IFF_TEAM = 1<<22,
+ IFF_RXFH_CONFIGURED = 1<<23,
+ IFF_PHONY_HEADROOM = 1<<24,
+ IFF_MACSEC = 1<<25,
+ IFF_NO_RX_HANDLER = 1<<26,
+ IFF_FAILOVER = 1<<27,
+ IFF_FAILOVER_SLAVE = 1<<28,
+ IFF_L3MDEV_RX_HANDLER = 1<<29,
+ IFF_NO_ADDRCONF = BIT_ULL(30),
+ IFF_TX_SKB_NO_LINEAR = BIT_ULL(31),
+ IFF_CHANGE_PROTO_DOWN = BIT_ULL(32),
};
#define IFF_802_1Q_VLAN IFF_802_1Q_VLAN
@@ -1417,15 +1760,25 @@ enum netdev_priv_flags {
#define IFF_LIVE_ADDR_CHANGE IFF_LIVE_ADDR_CHANGE
#define IFF_MACVLAN IFF_MACVLAN
#define IFF_XMIT_DST_RELEASE_PERM IFF_XMIT_DST_RELEASE_PERM
-#define IFF_IPVLAN_MASTER IFF_IPVLAN_MASTER
-#define IFF_IPVLAN_SLAVE IFF_IPVLAN_SLAVE
#define IFF_L3MDEV_MASTER IFF_L3MDEV_MASTER
#define IFF_NO_QUEUE IFF_NO_QUEUE
#define IFF_OPENVSWITCH IFF_OPENVSWITCH
#define IFF_L3MDEV_SLAVE IFF_L3MDEV_SLAVE
#define IFF_TEAM IFF_TEAM
#define IFF_RXFH_CONFIGURED IFF_RXFH_CONFIGURED
+#define IFF_PHONY_HEADROOM IFF_PHONY_HEADROOM
#define IFF_MACSEC IFF_MACSEC
+#define IFF_NO_RX_HANDLER IFF_NO_RX_HANDLER
+#define IFF_FAILOVER IFF_FAILOVER
+#define IFF_FAILOVER_SLAVE IFF_FAILOVER_SLAVE
+#define IFF_L3MDEV_RX_HANDLER IFF_L3MDEV_RX_HANDLER
+#define IFF_TX_SKB_NO_LINEAR IFF_TX_SKB_NO_LINEAR
+
+/* Specifies the type of the struct net_device::ml_priv pointer */
+enum netdev_ml_priv_type {
+ ML_PRIV_NONE,
+ ML_PRIV_CAN,
+};
/**
* struct net_device - The DEVICE structure.
@@ -1438,15 +1791,13 @@ enum netdev_priv_flags {
* (i.e. as seen by users in the "Space.c" file). It is the name
* of the interface.
*
- * @name_hlist: Device name hash chain, please keep it close to name[]
+ * @name_node: Name hashlist node
* @ifalias: SNMP alias
* @mem_end: Shared memory end
* @mem_start: Shared memory start
* @base_addr: Device I/O address
* @irq: Device IRQ number
*
- * @carrier_changes: Stats to monitor carrier on<->off transitions
- *
* @state: Generic network queuing layer state, see netdev_state_t
* @dev_list: The global list of network devices
* @napi_list: List entry used for polling NAPI devices
@@ -1469,6 +1820,7 @@ enum netdev_priv_flags {
* and drivers will need to set them appropriately.
*
* @mpls_features: Mask of features inheritable by MPLS
+ * @gso_partial_features: value(s) from NETIF_F_GSO\*
*
* @ifindex: interface index
* @group: The group the device belongs to
@@ -1476,12 +1828,10 @@ enum netdev_priv_flags {
* @stats: Statistics struct, which was left as a legacy, use
* rtnl_link_stats64 instead
*
- * @rx_dropped: Dropped packets by core network,
- * do not use this in drivers
- * @tx_dropped: Dropped packets by core network,
+ * @core_stats: core networking counters,
* do not use this in drivers
- * @rx_nohandler: nohandler dropped packets by core network on
- * inactive devices, do not use this in drivers
+ * @carrier_up_count: Number of times the carrier has been up
+ * @carrier_down_count: Number of times the carrier has been down
*
* @wireless_handlers: List of functions to handle Wireless Extensions,
* instead of ioctl,
@@ -1490,13 +1840,18 @@ enum netdev_priv_flags {
*
* @netdev_ops: Includes several pointers to callbacks,
* if one wants to override the ndo_*() functions
+ * @xdp_metadata_ops: Includes pointers to XDP metadata callbacks.
* @ethtool_ops: Management operations
+ * @l3mdev_ops: Layer 3 master device operations
* @ndisc_ops: Includes callbacks for different IPv6 neighbour
* discovery handling. Necessary for e.g. 6LoWPAN.
+ * @xfrmdev_ops: Transformation offload operations
+ * @tlsdev_ops: Transport Layer Security offload operations
* @header_ops: Includes callbacks for creating,parsing,caching,etc
* of Layer 2 headers.
*
* @flags: Interface flags (a la BSD)
+ * @xdp_features: XDP capability supported by the device
* @priv_flags: Like 'flags' but invisible to userspace,
* see if.h for the definitions
* @gflags: Global flags ( kept as legacy )
@@ -1523,12 +1878,15 @@ enum netdev_priv_flags {
* @perm_addr: Permanent hw address
* @addr_assign_type: Hw address assignment type
* @addr_len: Hardware address length
+ * @upper_level: Maximum depth level of upper devices.
+ * @lower_level: Maximum depth level of lower devices.
* @neigh_priv_len: Used in neigh_alloc()
* @dev_id: Used to differentiate devices that share
* the same link layer address
* @dev_port: Used to differentiate devices that share
* the same function
* @addr_list_lock: XXX: need comments on this one
+ * @name_assign_type: network interface name assignment type
* @uc_promisc: Counter that indicates promiscuous mode
* has been enabled due to the need to listen to
* additional unicast addresses in a device that
@@ -1547,10 +1905,13 @@ enum netdev_priv_flags {
* @tipc_ptr: TIPC specific data
* @atalk_ptr: AppleTalk link
* @ip_ptr: IPv4 specific data
- * @dn_ptr: DECnet specific data
* @ip6_ptr: IPv6 specific data
* @ax25_ptr: AX.25 specific data
* @ieee80211_ptr: IEEE 802.11 specific data, assign before registering
+ * @ieee802154_ptr: IEEE 802.15.4 low-rate Wireless Personal Area Network
+ * device struct
+ * @mpls_ptr: mpls_dev struct pointer
+ * @mctp_ptr: MCTP specific data
*
* @dev_addr: Hw address (before bcast,
* because most packets are unicast)
@@ -1559,10 +1920,17 @@ enum netdev_priv_flags {
* @num_rx_queues: Number of RX queues
* allocated at register_netdev() time
* @real_num_rx_queues: Number of RX queues currently active in device
+ * @xdp_prog: XDP sockets filter program pointer
+ * @gro_flush_timeout: timeout for GRO layer in NAPI
+ * @napi_defer_hard_irqs: If not zero, provides a counter that would
+ * allow to avoid NIC hard IRQ, on busy queues.
*
* @rx_handler: handler for received packets
* @rx_handler_data: XXX: need comments on this one
+ * @miniq_ingress: ingress/clsact qdisc specific data for
+ * ingress processing
* @ingress_queue: XXX: need comments on this one
+ * @nf_hooks_ingress: netfilter hooks executed for ingress packets
* @broadcast: hw bcast address
*
* @rx_cpu_rmap: CPU reverse-mapping for RX completion interrupts,
@@ -1577,14 +1945,22 @@ enum netdev_priv_flags {
* @qdisc: Root qdisc from userspace point of view
* @tx_queue_len: Max frames per queue allowed
* @tx_global_lock: XXX: need comments on this one
+ * @xdp_bulkq: XDP device bulk queue
+ * @xps_maps: all CPUs/RXQs maps for XPS device
*
* @xps_maps: XXX: need comments on this one
- *
+ * @miniq_egress: clsact qdisc specific data for
+ * egress processing
+ * @nf_hooks_egress: netfilter hooks executed for egress packets
+ * @qdisc_hash: qdisc hash table
* @watchdog_timeo: Represents the timeout that is used by
* the watchdog (see dev_watchdog())
* @watchdog_timer: List of timers
*
+ * @proto_down_reason: reason a netdev interface is held down
* @pcpu_refcnt: Number of references to this device
+ * @dev_refcnt: Number of references to this device
+ * @refcnt_tracker: Tracker directory for tracked references to this device
* @todo_list: Delayed register/unregister
* @link_watch_list: XXX: need comments on this one
*
@@ -1599,6 +1975,7 @@ enum netdev_priv_flags {
* @nd_net: Network namespace this network device is inside
*
* @ml_priv: Mid-layer private
+ * @ml_priv_type: Mid-layer private type
* @lstats: Loopback statistics
* @tstats: Tunnel statistics
* @dstats: Dummy statistics
@@ -1607,6 +1984,8 @@ enum netdev_priv_flags {
* @garp_port: GARP
* @mrp_port: MRP
*
+ * @dm_private: Drop monitor private
+ *
* @dev: Class/net/name entry
* @sysfs_groups: Space for optional device, statistics and wireless
* sysfs groups
@@ -1615,8 +1994,12 @@ enum netdev_priv_flags {
* @rtnl_link_ops: Rtnl_link_ops
*
* @gso_max_size: Maximum size of generic segmentation offload
+ * @tso_max_size: Device (as in HW) limit on the max TSO request size
* @gso_max_segs: Maximum number of segments that can be passed to the
* NIC for GSO
+ * @tso_max_segs: Device (as in HW) limit on the max TSO segment count
+ * @gso_ipv4_max_size: Maximum size of generic segmentation offload,
+ * for IPv4.
*
* @dcbnl_ops: Data Center Bridging netlink ops
* @num_tc: Number of traffic classes in the net device
@@ -1628,22 +2011,58 @@ enum netdev_priv_flags {
* @priomap: XXX: need comments on this one
* @phydev: Physical device may attach itself
* for hardware timestamping
+ * @sfp_bus: attached &struct sfp_bus structure.
*
* @qdisc_tx_busylock: lockdep class annotating Qdisc->busylock spinlock
- * @qdisc_running_key: lockdep class annotating Qdisc->running seqcount
*
* @proto_down: protocol port state information can be sent to the
* switch driver and used to set the phys state of the
* switch port.
*
+ * @wol_enabled: Wake-on-LAN is enabled
+ *
+ * @threaded: napi threaded mode is enabled
+ *
+ * @net_notifier_list: List of per-net netdev notifier block
+ * that follow this device when it is moved
+ * to another network namespace.
+ *
+ * @macsec_ops: MACsec offloading ops
+ *
+ * @udp_tunnel_nic_info: static structure describing the UDP tunnel
+ * offload capabilities of the device
+ * @udp_tunnel_nic: UDP tunnel offload state
+ * @xdp_state: stores info on attached XDP BPF programs
+ *
+ * @nested_level: Used as a parameter of spin_lock_nested() of
+ * dev->addr_list_lock.
+ * @unlink_list: As netif_addr_lock() can be called recursively,
+ * keep a list of interfaces to be deleted.
+ * @gro_max_size: Maximum size of aggregated packet in generic
+ * receive offload (GRO)
+ * @gro_ipv4_max_size: Maximum size of aggregated packet in generic
+ * receive offload (GRO), for IPv4.
+ *
+ * @dev_addr_shadow: Copy of @dev_addr to catch direct writes.
+ * @linkwatch_dev_tracker: refcount tracker used by linkwatch.
+ * @watchdog_dev_tracker: refcount tracker used by watchdog.
+ * @dev_registered_tracker: tracker for reference held while
+ * registered
+ * @offload_xstats_l3: L3 HW stats for this netdevice.
+ *
+ * @devlink_port: Pointer to related devlink port structure.
+ * Assigned by a driver before netdev registration using
+ * SET_NETDEV_DEVLINK_PORT macro. This pointer is static
+ * during the time netdevice is registered.
+ *
* FIXME: cleanup struct net_device such that network protocol info
* moves out.
*/
struct net_device {
char name[IFNAMSIZ];
- struct hlist_node name_hlist;
- char *ifalias;
+ struct netdev_name_node *name_node;
+ struct dev_ifalias __rcu *ifalias;
/*
* I/O specific fields
* FIXME: Merge these and struct ifmap into one
@@ -1651,9 +2070,6 @@ struct net_device {
unsigned long mem_end;
unsigned long mem_start;
unsigned long base_addr;
- int irq;
-
- atomic_t carrier_changes;
/*
* Some hardware also needs these fields (state,dev_list,
@@ -1675,6 +2091,25 @@ struct net_device {
struct list_head lower;
} adj_list;
+ /* Read-mostly cache-line for fast-path access */
+ unsigned int flags;
+ xdp_features_t xdp_features;
+ unsigned long long priv_flags;
+ const struct net_device_ops *netdev_ops;
+ const struct xdp_metadata_ops *xdp_metadata_ops;
+ int ifindex;
+ unsigned short gflags;
+ unsigned short hard_header_len;
+
+ /* Note : dev->mtu is often read without holding a lock.
+ * Writers usually hold RTNL.
+ * It is recommended to use READ_ONCE() to annotate the reads,
+ * and to use WRITE_ONCE() to annotate the writes.
+ */
+ unsigned int mtu;
+ unsigned short needed_headroom;
+ unsigned short needed_tailroom;
+
netdev_features_t features;
netdev_features_t hw_features;
netdev_features_t wanted_features;
@@ -1683,24 +2118,27 @@ struct net_device {
netdev_features_t mpls_features;
netdev_features_t gso_partial_features;
- int ifindex;
+ unsigned int min_mtu;
+ unsigned int max_mtu;
+ unsigned short type;
+ unsigned char min_header_len;
+ unsigned char name_assign_type;
+
int group;
- struct net_device_stats stats;
+ struct net_device_stats stats; /* not used by modern drivers */
+
+ struct net_device_core_stats __percpu *core_stats;
- atomic_long_t rx_dropped;
- atomic_long_t tx_dropped;
- atomic_long_t rx_nohandler;
+ /* Stats to monitor link on/off, flapping */
+ atomic_t carrier_up_count;
+ atomic_t carrier_down_count;
#ifdef CONFIG_WIRELESS_EXT
const struct iw_handler_def *wireless_handlers;
struct iw_public_data *wireless_data;
#endif
- const struct net_device_ops *netdev_ops;
const struct ethtool_ops *ethtool_ops;
-#ifdef CONFIG_NET_SWITCHDEV
- const struct switchdev_ops *switchdev_ops;
-#endif
#ifdef CONFIG_NET_L3_MASTER_DEV
const struct l3mdev_ops *l3mdev_ops;
#endif
@@ -1708,17 +2146,15 @@ struct net_device {
const struct ndisc_ops *ndisc_ops;
#endif
-#ifdef CONFIG_XFRM
+#ifdef CONFIG_XFRM_OFFLOAD
const struct xfrmdev_ops *xfrmdev_ops;
#endif
- const struct header_ops *header_ops;
-
- unsigned int flags;
- unsigned int priv_flags;
+#if IS_ENABLED(CONFIG_TLS_DEVICE)
+ const struct tlsdev_ops *tlsdev_ops;
+#endif
- unsigned short gflags;
- unsigned short padded;
+ const struct header_ops *header_ops;
unsigned char operstate;
unsigned char link_mode;
@@ -1726,26 +2162,21 @@ struct net_device {
unsigned char if_port;
unsigned char dma;
- unsigned int mtu;
- unsigned int min_mtu;
- unsigned int max_mtu;
- unsigned short type;
- unsigned short hard_header_len;
- unsigned char min_header_len;
-
- unsigned short needed_headroom;
- unsigned short needed_tailroom;
-
/* Interface address info. */
unsigned char perm_addr[MAX_ADDR_LEN];
unsigned char addr_assign_type;
unsigned char addr_len;
+ unsigned char upper_level;
+ unsigned char lower_level;
+
unsigned short neigh_priv_len;
unsigned short dev_id;
unsigned short dev_port;
+ unsigned short padded;
+
spinlock_t addr_list_lock;
- unsigned char name_assign_type;
- bool uc_promisc;
+ int irq;
+
struct netdev_hw_addr_list uc;
struct netdev_hw_addr_list mc;
struct netdev_hw_addr_list dev_addrs;
@@ -1753,56 +2184,78 @@ struct net_device {
#ifdef CONFIG_SYSFS
struct kset *queues_kset;
#endif
+#ifdef CONFIG_LOCKDEP
+ struct list_head unlink_list;
+#endif
unsigned int promiscuity;
unsigned int allmulti;
+ bool uc_promisc;
+#ifdef CONFIG_LOCKDEP
+ unsigned char nested_level;
+#endif
/* Protocol-specific pointers */
+ struct in_device __rcu *ip_ptr;
+ struct inet6_dev __rcu *ip6_ptr;
#if IS_ENABLED(CONFIG_VLAN_8021Q)
struct vlan_info __rcu *vlan_info;
#endif
#if IS_ENABLED(CONFIG_NET_DSA)
- struct dsa_switch_tree *dsa_ptr;
+ struct dsa_port *dsa_ptr;
#endif
#if IS_ENABLED(CONFIG_TIPC)
struct tipc_bearer __rcu *tipc_ptr;
#endif
+#if IS_ENABLED(CONFIG_ATALK)
void *atalk_ptr;
- struct in_device __rcu *ip_ptr;
- struct dn_dev __rcu *dn_ptr;
- struct inet6_dev __rcu *ip6_ptr;
+#endif
+#if IS_ENABLED(CONFIG_AX25)
void *ax25_ptr;
+#endif
+#if IS_ENABLED(CONFIG_CFG80211)
struct wireless_dev *ieee80211_ptr;
+#endif
+#if IS_ENABLED(CONFIG_IEEE802154) || IS_ENABLED(CONFIG_6LOWPAN)
struct wpan_dev *ieee802154_ptr;
+#endif
#if IS_ENABLED(CONFIG_MPLS_ROUTING)
struct mpls_dev __rcu *mpls_ptr;
#endif
+#if IS_ENABLED(CONFIG_MCTP)
+ struct mctp_dev __rcu *mctp_ptr;
+#endif
/*
* Cache lines mostly used on receive path (including eth_type_trans())
*/
/* Interface address info used in eth_type_trans() */
- unsigned char *dev_addr;
+ const unsigned char *dev_addr;
-#ifdef CONFIG_SYSFS
struct netdev_rx_queue *_rx;
-
unsigned int num_rx_queues;
unsigned int real_num_rx_queues;
-#endif
struct bpf_prog __rcu *xdp_prog;
unsigned long gro_flush_timeout;
+ int napi_defer_hard_irqs;
+#define GRO_LEGACY_MAX_SIZE 65536u
+/* TCP minimal MSS is 8 (TCP_MIN_GSO_SIZE),
+ * and shinfo->gso_segs is a 16bit field.
+ */
+#define GRO_MAX_SIZE (8 * 65535u)
+ unsigned int gro_max_size;
+ unsigned int gro_ipv4_max_size;
rx_handler_func_t __rcu *rx_handler;
void __rcu *rx_handler_data;
#ifdef CONFIG_NET_CLS_ACT
- struct tcf_proto __rcu *ingress_cl_list;
+ struct mini_Qdisc __rcu *miniq_ingress;
#endif
struct netdev_queue __rcu *ingress_queue;
#ifdef CONFIG_NETFILTER_INGRESS
- struct nf_hook_entry __rcu *nf_hooks_ingress;
+ struct nf_hook_entries __rcu *nf_hooks_ingress;
#endif
unsigned char broadcast[MAX_ADDR_LEN];
@@ -1817,27 +2270,40 @@ struct net_device {
struct netdev_queue *_tx ____cacheline_aligned_in_smp;
unsigned int num_tx_queues;
unsigned int real_num_tx_queues;
- struct Qdisc *qdisc;
-#ifdef CONFIG_NET_SCHED
- DECLARE_HASHTABLE (qdisc_hash, 4);
-#endif
+ struct Qdisc __rcu *qdisc;
unsigned int tx_queue_len;
spinlock_t tx_global_lock;
- int watchdog_timeo;
+
+ struct xdp_dev_bulk_queue __percpu *xdp_bulkq;
#ifdef CONFIG_XPS
- struct xps_dev_maps __rcu *xps_maps;
+ struct xps_dev_maps __rcu *xps_maps[XPS_MAPS_MAX];
#endif
#ifdef CONFIG_NET_CLS_ACT
- struct tcf_proto __rcu *egress_cl_list;
+ struct mini_Qdisc __rcu *miniq_egress;
+#endif
+#ifdef CONFIG_NETFILTER_EGRESS
+ struct nf_hook_entries __rcu *nf_hooks_egress;
#endif
+#ifdef CONFIG_NET_SCHED
+ DECLARE_HASHTABLE (qdisc_hash, 4);
+#endif
/* These may be needed for future network-power-down code. */
struct timer_list watchdog_timer;
+ int watchdog_timeo;
+
+ u32 proto_down_reason;
- int __percpu *pcpu_refcnt;
struct list_head todo_list;
+#ifdef CONFIG_PCPU_DEV_REFCNT
+ int __percpu *pcpu_refcnt;
+#else
+ refcount_t dev_refcnt;
+#endif
+ struct ref_tracker_dir refcnt_tracker;
+
struct list_head link_watch_list;
enum { NETREG_UNINITIALIZED=0,
@@ -1865,12 +2331,13 @@ struct net_device {
possible_net_t nd_net;
/* mid-layer private */
+ void *ml_priv;
+ enum netdev_ml_priv_type ml_priv_type;
+
union {
- void *ml_priv;
struct pcpu_lstats __percpu *lstats;
struct pcpu_sw_netstats __percpu *tstats;
struct pcpu_dstats __percpu *dstats;
- struct pcpu_vstats __percpu *vstats;
};
#if IS_ENABLED(CONFIG_GARP)
@@ -1879,7 +2346,9 @@ struct net_device {
#if IS_ENABLED(CONFIG_MRP)
struct mrp_port __rcu *mrp_port;
#endif
-
+#if IS_ENABLED(CONFIG_NET_DROP_MONITOR)
+ struct dm_hw_stat_delta __rcu *dm_private;
+#endif
struct device dev;
const struct attribute_group *sysfs_groups[4];
const struct attribute_group *sysfs_rx_queue_group;
@@ -1887,15 +2356,26 @@ struct net_device {
const struct rtnl_link_ops *rtnl_link_ops;
/* for setting kernel sock attribute on TCP connection setup */
-#define GSO_MAX_SIZE 65536
+#define GSO_MAX_SEGS 65535u
+#define GSO_LEGACY_MAX_SIZE 65536u
+/* TCP minimal MSS is 8 (TCP_MIN_GSO_SIZE),
+ * and shinfo->gso_segs is a 16bit field.
+ */
+#define GSO_MAX_SIZE (8 * GSO_MAX_SEGS)
+
unsigned int gso_max_size;
-#define GSO_MAX_SEGS 65535
+#define TSO_LEGACY_MAX_SIZE 65536
+#define TSO_MAX_SIZE UINT_MAX
+ unsigned int tso_max_size;
u16 gso_max_segs;
+#define TSO_MAX_SEGS U16_MAX
+ u16 tso_max_segs;
+ unsigned int gso_ipv4_max_size;
#ifdef CONFIG_DCB
const struct dcbnl_rtnl_ops *dcbnl_ops;
#endif
- u8 num_tc;
+ s16 num_tc;
struct netdev_tc_txq tc_to_txq[TC_MAX_QUEUE];
u8 prio_tc_map[TC_BITMASK + 1];
@@ -1906,12 +2386,45 @@ struct net_device {
struct netprio_map __rcu *priomap;
#endif
struct phy_device *phydev;
+ struct sfp_bus *sfp_bus;
struct lock_class_key *qdisc_tx_busylock;
- struct lock_class_key *qdisc_running_key;
bool proto_down;
+ unsigned wol_enabled:1;
+ unsigned threaded:1;
+
+ struct list_head net_notifier_list;
+
+#if IS_ENABLED(CONFIG_MACSEC)
+ /* MACsec management functions */
+ const struct macsec_ops *macsec_ops;
+#endif
+ const struct udp_tunnel_nic_info *udp_tunnel_nic_info;
+ struct udp_tunnel_nic *udp_tunnel_nic;
+
+ /* protected by rtnl_lock */
+ struct bpf_xdp_entity xdp_state[__MAX_XDP_MODE];
+
+ u8 dev_addr_shadow[MAX_ADDR_LEN];
+ netdevice_tracker linkwatch_dev_tracker;
+ netdevice_tracker watchdog_dev_tracker;
+ netdevice_tracker dev_registered_tracker;
+ struct rtnl_hw_stats64 *offload_xstats_l3;
+
+ struct devlink_port *devlink_port;
};
#define to_net_dev(d) container_of(d, struct net_device, dev)
+/*
+ * Driver should use this to assign devlink port instance to a netdevice
+ * before it registers the netdevice. Therefore devlink_port is static
+ * during the netdev lifetime after it is registered.
+ */
+#define SET_NETDEV_DEVLINK_PORT(dev, port) \
+({ \
+ WARN_ON((dev)->reg_state != NETREG_UNINITIALIZED); \
+ ((dev)->devlink_port = (port)); \
+})
+
static inline bool netif_elide_gro(const struct net_device *dev)
{
if (!(dev->features & NETIF_F_GRO) || dev->xdp_prog)
@@ -1948,10 +2461,38 @@ int netdev_get_num_tc(struct net_device *dev)
return dev->num_tc;
}
+static inline void net_prefetch(void *p)
+{
+ prefetch(p);
+#if L1_CACHE_BYTES < 128
+ prefetch((u8 *)p + L1_CACHE_BYTES);
+#endif
+}
+
+static inline void net_prefetchw(void *p)
+{
+ prefetchw(p);
+#if L1_CACHE_BYTES < 128
+ prefetchw((u8 *)p + L1_CACHE_BYTES);
+#endif
+}
+
+void netdev_unbind_sb_channel(struct net_device *dev,
+ struct net_device *sb_dev);
+int netdev_bind_sb_channel_queue(struct net_device *dev,
+ struct net_device *sb_dev,
+ u8 tc, u16 count, u16 offset);
+int netdev_set_sb_channel(struct net_device *dev, u16 channel);
+static inline int netdev_get_sb_channel(struct net_device *dev)
+{
+ return max_t(int, -dev->num_tc, 0);
+}
+
static inline
struct netdev_queue *netdev_get_tx_queue(const struct net_device *dev,
unsigned int index)
{
+ DEBUG_NET_WARN_ON_ONCE(index >= dev->num_tx_queues);
return &dev->_tx[index];
}
@@ -1976,23 +2517,23 @@ static inline void netdev_for_each_tx_queue(struct net_device *dev,
#define netdev_lockdep_set_classes(dev) \
{ \
static struct lock_class_key qdisc_tx_busylock_key; \
- static struct lock_class_key qdisc_running_key; \
static struct lock_class_key qdisc_xmit_lock_key; \
static struct lock_class_key dev_addr_list_lock_key; \
unsigned int i; \
\
(dev)->qdisc_tx_busylock = &qdisc_tx_busylock_key; \
- (dev)->qdisc_running_key = &qdisc_running_key; \
lockdep_set_class(&(dev)->addr_list_lock, \
- &dev_addr_list_lock_key); \
+ &dev_addr_list_lock_key); \
for (i = 0; i < (dev)->num_tx_queues; i++) \
lockdep_set_class(&(dev)->_tx[i]._xmit_lock, \
&qdisc_xmit_lock_key); \
}
-struct netdev_queue *netdev_pick_tx(struct net_device *dev,
- struct sk_buff *skb,
- void *accel_priv);
+u16 netdev_pick_tx(struct net_device *dev, struct sk_buff *skb,
+ struct net_device *sb_dev);
+struct netdev_queue *netdev_core_pick_tx(struct net_device *dev,
+ struct sk_buff *skb,
+ struct net_device *sb_dev);
/* returns the headroom that the master device needs to take in account
* when forwarding to this dev
@@ -2014,6 +2555,29 @@ static inline void netdev_reset_rx_headroom(struct net_device *dev)
netdev_set_rx_headroom(dev, -1);
}
+static inline void *netdev_get_ml_priv(struct net_device *dev,
+ enum netdev_ml_priv_type type)
+{
+ if (dev->ml_priv_type != type)
+ return NULL;
+
+ return dev->ml_priv;
+}
+
+static inline void netdev_set_ml_priv(struct net_device *dev,
+ void *ml_priv,
+ enum netdev_ml_priv_type type)
+{
+ WARN(dev->ml_priv_type && dev->ml_priv_type != type,
+ "Overwriting already set ml_priv_type (%u) with different ml_priv_type (%u)!\n",
+ dev->ml_priv_type, type);
+ WARN(!dev->ml_priv_type && dev->ml_priv,
+ "Overwriting already set ml_priv and ml_priv_type is ML_PRIV_NONE!\n");
+
+ dev->ml_priv = ml_priv;
+ dev->ml_priv_type = type;
+}
+
/*
* Net namespace inlines
*/
@@ -2056,158 +2620,89 @@ static inline void *netdev_priv(const struct net_device *dev)
*/
#define NAPI_POLL_WEIGHT 64
+void netif_napi_add_weight(struct net_device *dev, struct napi_struct *napi,
+ int (*poll)(struct napi_struct *, int), int weight);
+
/**
- * netif_napi_add - initialize a NAPI context
- * @dev: network device
- * @napi: NAPI context
- * @poll: polling function
- * @weight: default weight
+ * netif_napi_add() - initialize a NAPI context
+ * @dev: network device
+ * @napi: NAPI context
+ * @poll: polling function
*
* netif_napi_add() must be used to initialize a NAPI context prior to calling
* *any* of the other NAPI-related functions.
*/
-void netif_napi_add(struct net_device *dev, struct napi_struct *napi,
- int (*poll)(struct napi_struct *, int), int weight);
+static inline void
+netif_napi_add(struct net_device *dev, struct napi_struct *napi,
+ int (*poll)(struct napi_struct *, int))
+{
+ netif_napi_add_weight(dev, napi, poll, NAPI_POLL_WEIGHT);
+}
+
+static inline void
+netif_napi_add_tx_weight(struct net_device *dev,
+ struct napi_struct *napi,
+ int (*poll)(struct napi_struct *, int),
+ int weight)
+{
+ set_bit(NAPI_STATE_NO_BUSY_POLL, &napi->state);
+ netif_napi_add_weight(dev, napi, poll, weight);
+}
/**
- * netif_tx_napi_add - initialize a NAPI context
- * @dev: network device
- * @napi: NAPI context
- * @poll: polling function
- * @weight: default weight
+ * netif_napi_add_tx() - initialize a NAPI context to be used for Tx only
+ * @dev: network device
+ * @napi: NAPI context
+ * @poll: polling function
*
* This variant of netif_napi_add() should be used from drivers using NAPI
* to exclusively poll a TX queue.
* This will avoid we add it into napi_hash[], thus polluting this hash table.
*/
-static inline void netif_tx_napi_add(struct net_device *dev,
+static inline void netif_napi_add_tx(struct net_device *dev,
struct napi_struct *napi,
- int (*poll)(struct napi_struct *, int),
- int weight)
+ int (*poll)(struct napi_struct *, int))
{
- set_bit(NAPI_STATE_NO_BUSY_POLL, &napi->state);
- netif_napi_add(dev, napi, poll, weight);
+ netif_napi_add_tx_weight(dev, napi, poll, NAPI_POLL_WEIGHT);
}
/**
+ * __netif_napi_del - remove a NAPI context
+ * @napi: NAPI context
+ *
+ * Warning: caller must observe RCU grace period before freeing memory
+ * containing @napi. Drivers might want to call this helper to combine
+ * all the needed RCU grace periods into a single one.
+ */
+void __netif_napi_del(struct napi_struct *napi);
+
+/**
* netif_napi_del - remove a NAPI context
* @napi: NAPI context
*
* netif_napi_del() removes a NAPI context from the network device NAPI list
*/
-void netif_napi_del(struct napi_struct *napi);
-
-struct napi_gro_cb {
- /* Virtual address of skb_shinfo(skb)->frags[0].page + offset. */
- void *frag0;
-
- /* Length of frag0. */
- unsigned int frag0_len;
-
- /* This indicates where we are processing relative to skb->data. */
- int data_offset;
-
- /* This is non-zero if the packet cannot be merged with the new skb. */
- u16 flush;
-
- /* Save the IP ID here and check when we get to the transport layer */
- u16 flush_id;
-
- /* Number of segments aggregated. */
- u16 count;
-
- /* Start offset for remote checksum offload */
- u16 gro_remcsum_start;
-
- /* jiffies when first packet was created/queued */
- unsigned long age;
-
- /* Used in ipv6_gro_receive() and foo-over-udp */
- u16 proto;
-
- /* This is non-zero if the packet may be of the same flow. */
- u8 same_flow:1;
-
- /* Used in tunnel GRO receive */
- u8 encap_mark:1;
-
- /* GRO checksum is valid */
- u8 csum_valid:1;
-
- /* Number of checksums via CHECKSUM_UNNECESSARY */
- u8 csum_cnt:3;
-
- /* Free the skb? */
- u8 free:2;
-#define NAPI_GRO_FREE 1
-#define NAPI_GRO_FREE_STOLEN_HEAD 2
-
- /* Used in foo-over-udp, set in udp[46]_gro_receive */
- u8 is_ipv6:1;
-
- /* Used in GRE, set in fou/gue_gro_receive */
- u8 is_fou:1;
-
- /* Used to determine if flush_id can be ignored */
- u8 is_atomic:1;
-
- /* Number of gro_receive callbacks this packet already went through */
- u8 recursion_counter:4;
-
- /* 1 bit hole */
-
- /* used to support CHECKSUM_COMPLETE for tunneling protocols */
- __wsum csum;
-
- /* used in skb_gro_receive() slow path */
- struct sk_buff *last;
-};
-
-#define NAPI_GRO_CB(skb) ((struct napi_gro_cb *)(skb)->cb)
-
-#define GRO_RECURSION_LIMIT 15
-static inline int gro_recursion_inc_test(struct sk_buff *skb)
+static inline void netif_napi_del(struct napi_struct *napi)
{
- return ++NAPI_GRO_CB(skb)->recursion_counter == GRO_RECURSION_LIMIT;
-}
-
-typedef struct sk_buff **(*gro_receive_t)(struct sk_buff **, struct sk_buff *);
-static inline struct sk_buff **call_gro_receive(gro_receive_t cb,
- struct sk_buff **head,
- struct sk_buff *skb)
-{
- if (unlikely(gro_recursion_inc_test(skb))) {
- NAPI_GRO_CB(skb)->flush |= 1;
- return NULL;
- }
-
- return cb(head, skb);
-}
-
-typedef struct sk_buff **(*gro_receive_sk_t)(struct sock *, struct sk_buff **,
- struct sk_buff *);
-static inline struct sk_buff **call_gro_receive_sk(gro_receive_sk_t cb,
- struct sock *sk,
- struct sk_buff **head,
- struct sk_buff *skb)
-{
- if (unlikely(gro_recursion_inc_test(skb))) {
- NAPI_GRO_CB(skb)->flush |= 1;
- return NULL;
- }
-
- return cb(sk, head, skb);
+ __netif_napi_del(napi);
+ synchronize_net();
}
struct packet_type {
__be16 type; /* This is really htons(ether_type). */
+ bool ignore_outgoing;
struct net_device *dev; /* NULL is wildcarded here */
+ netdevice_tracker dev_tracker;
int (*func) (struct sk_buff *,
struct net_device *,
struct packet_type *,
struct net_device *);
+ void (*list_func) (struct list_head *,
+ struct packet_type *,
+ struct net_device *);
bool (*id_match)(struct packet_type *ptype,
struct sock *sk);
+ struct net *af_packet_net;
void *af_packet_priv;
struct list_head list;
};
@@ -2215,8 +2710,8 @@ struct packet_type {
struct offload_callbacks {
struct sk_buff *(*gso_segment)(struct sk_buff *skb,
netdev_features_t features);
- struct sk_buff **(*gro_receive)(struct sk_buff **head,
- struct sk_buff *skb);
+ struct sk_buff *(*gro_receive)(struct list_head *head,
+ struct sk_buff *skb);
int (*gro_complete)(struct sk_buff *skb, int nhoff);
};
@@ -2229,12 +2724,52 @@ struct packet_offload {
/* often modified stats are per-CPU, other are shared (netdev->stats) */
struct pcpu_sw_netstats {
- u64 rx_packets;
- u64 rx_bytes;
- u64 tx_packets;
- u64 tx_bytes;
+ u64_stats_t rx_packets;
+ u64_stats_t rx_bytes;
+ u64_stats_t tx_packets;
+ u64_stats_t tx_bytes;
struct u64_stats_sync syncp;
-};
+} __aligned(4 * sizeof(u64));
+
+struct pcpu_lstats {
+ u64_stats_t packets;
+ u64_stats_t bytes;
+ struct u64_stats_sync syncp;
+} __aligned(2 * sizeof(u64));
+
+void dev_lstats_read(struct net_device *dev, u64 *packets, u64 *bytes);
+
+static inline void dev_sw_netstats_rx_add(struct net_device *dev, unsigned int len)
+{
+ struct pcpu_sw_netstats *tstats = this_cpu_ptr(dev->tstats);
+
+ u64_stats_update_begin(&tstats->syncp);
+ u64_stats_add(&tstats->rx_bytes, len);
+ u64_stats_inc(&tstats->rx_packets);
+ u64_stats_update_end(&tstats->syncp);
+}
+
+static inline void dev_sw_netstats_tx_add(struct net_device *dev,
+ unsigned int packets,
+ unsigned int len)
+{
+ struct pcpu_sw_netstats *tstats = this_cpu_ptr(dev->tstats);
+
+ u64_stats_update_begin(&tstats->syncp);
+ u64_stats_add(&tstats->tx_bytes, len);
+ u64_stats_add(&tstats->tx_packets, packets);
+ u64_stats_update_end(&tstats->syncp);
+}
+
+static inline void dev_lstats_add(struct net_device *dev, unsigned int len)
+{
+ struct pcpu_lstats *lstats = this_cpu_ptr(dev->lstats);
+
+ u64_stats_update_begin(&lstats->syncp);
+ u64_stats_add(&lstats->bytes, len);
+ u64_stats_inc(&lstats->packets);
+ u64_stats_update_end(&lstats->syncp);
+}
#define __netdev_alloc_pcpu_stats(type, gfp) \
({ \
@@ -2253,6 +2788,20 @@ struct pcpu_sw_netstats {
#define netdev_alloc_pcpu_stats(type) \
__netdev_alloc_pcpu_stats(type, GFP_KERNEL)
+#define devm_netdev_alloc_pcpu_stats(dev, type) \
+({ \
+ typeof(type) __percpu *pcpu_stats = devm_alloc_percpu(dev, type);\
+ if (pcpu_stats) { \
+ int __cpu; \
+ for_each_possible_cpu(__cpu) { \
+ typeof(type) *stat; \
+ stat = per_cpu_ptr(pcpu_stats, __cpu); \
+ u64_stats_init(&stat->syncp); \
+ } \
+ } \
+ pcpu_stats; \
+})
+
enum netdev_lag_tx_type {
NETDEV_LAG_TX_TYPE_UNKNOWN,
NETDEV_LAG_TX_TYPE_RANDOM,
@@ -2262,8 +2811,20 @@ enum netdev_lag_tx_type {
NETDEV_LAG_TX_TYPE_HASH,
};
+enum netdev_lag_hash {
+ NETDEV_LAG_HASH_NONE,
+ NETDEV_LAG_HASH_L2,
+ NETDEV_LAG_HASH_L34,
+ NETDEV_LAG_HASH_L23,
+ NETDEV_LAG_HASH_E23,
+ NETDEV_LAG_HASH_E34,
+ NETDEV_LAG_HASH_VLAN_SRCMAC,
+ NETDEV_LAG_HASH_UNKNOWN,
+};
+
struct netdev_lag_upper_info {
enum netdev_lag_tx_type tx_type;
+ enum netdev_lag_hash hash_type;
};
struct netdev_lag_lower_state_info {
@@ -2273,48 +2834,79 @@ struct netdev_lag_lower_state_info {
#include <linux/notifier.h>
-/* netdevice notifier chain. Please remember to update the rtnetlink
- * notification exclusion list in rtnetlink_event() when adding new
- * types.
+/* netdevice notifier chain. Please remember to update netdev_cmd_to_name()
+ * and the rtnetlink notification exclusion list in rtnetlink_event() when
+ * adding new types.
*/
-#define NETDEV_UP 0x0001 /* For now you can't veto a device up/down */
-#define NETDEV_DOWN 0x0002
-#define NETDEV_REBOOT 0x0003 /* Tell a protocol stack a network interface
+enum netdev_cmd {
+ NETDEV_UP = 1, /* For now you can't veto a device up/down */
+ NETDEV_DOWN,
+ NETDEV_REBOOT, /* Tell a protocol stack a network interface
detected a hardware crash and restarted
- we can use this eg to kick tcp sessions
once done */
-#define NETDEV_CHANGE 0x0004 /* Notify device state change */
-#define NETDEV_REGISTER 0x0005
-#define NETDEV_UNREGISTER 0x0006
-#define NETDEV_CHANGEMTU 0x0007 /* notify after mtu change happened */
-#define NETDEV_CHANGEADDR 0x0008
-#define NETDEV_GOING_DOWN 0x0009
-#define NETDEV_CHANGENAME 0x000A
-#define NETDEV_FEAT_CHANGE 0x000B
-#define NETDEV_BONDING_FAILOVER 0x000C
-#define NETDEV_PRE_UP 0x000D
-#define NETDEV_PRE_TYPE_CHANGE 0x000E
-#define NETDEV_POST_TYPE_CHANGE 0x000F
-#define NETDEV_POST_INIT 0x0010
-#define NETDEV_UNREGISTER_FINAL 0x0011
-#define NETDEV_RELEASE 0x0012
-#define NETDEV_NOTIFY_PEERS 0x0013
-#define NETDEV_JOIN 0x0014
-#define NETDEV_CHANGEUPPER 0x0015
-#define NETDEV_RESEND_IGMP 0x0016
-#define NETDEV_PRECHANGEMTU 0x0017 /* notify before mtu change happened */
-#define NETDEV_CHANGEINFODATA 0x0018
-#define NETDEV_BONDING_INFO 0x0019
-#define NETDEV_PRECHANGEUPPER 0x001A
-#define NETDEV_CHANGELOWERSTATE 0x001B
-#define NETDEV_UDP_TUNNEL_PUSH_INFO 0x001C
-#define NETDEV_CHANGE_TX_QUEUE_LEN 0x001E
+ NETDEV_CHANGE, /* Notify device state change */
+ NETDEV_REGISTER,
+ NETDEV_UNREGISTER,
+ NETDEV_CHANGEMTU, /* notify after mtu change happened */
+ NETDEV_CHANGEADDR, /* notify after the address change */
+ NETDEV_PRE_CHANGEADDR, /* notify before the address change */
+ NETDEV_GOING_DOWN,
+ NETDEV_CHANGENAME,
+ NETDEV_FEAT_CHANGE,
+ NETDEV_BONDING_FAILOVER,
+ NETDEV_PRE_UP,
+ NETDEV_PRE_TYPE_CHANGE,
+ NETDEV_POST_TYPE_CHANGE,
+ NETDEV_POST_INIT,
+ NETDEV_PRE_UNINIT,
+ NETDEV_RELEASE,
+ NETDEV_NOTIFY_PEERS,
+ NETDEV_JOIN,
+ NETDEV_CHANGEUPPER,
+ NETDEV_RESEND_IGMP,
+ NETDEV_PRECHANGEMTU, /* notify before mtu change happened */
+ NETDEV_CHANGEINFODATA,
+ NETDEV_BONDING_INFO,
+ NETDEV_PRECHANGEUPPER,
+ NETDEV_CHANGELOWERSTATE,
+ NETDEV_UDP_TUNNEL_PUSH_INFO,
+ NETDEV_UDP_TUNNEL_DROP_INFO,
+ NETDEV_CHANGE_TX_QUEUE_LEN,
+ NETDEV_CVLAN_FILTER_PUSH_INFO,
+ NETDEV_CVLAN_FILTER_DROP_INFO,
+ NETDEV_SVLAN_FILTER_PUSH_INFO,
+ NETDEV_SVLAN_FILTER_DROP_INFO,
+ NETDEV_OFFLOAD_XSTATS_ENABLE,
+ NETDEV_OFFLOAD_XSTATS_DISABLE,
+ NETDEV_OFFLOAD_XSTATS_REPORT_USED,
+ NETDEV_OFFLOAD_XSTATS_REPORT_DELTA,
+ NETDEV_XDP_FEAT_CHANGE,
+};
+const char *netdev_cmd_to_name(enum netdev_cmd cmd);
int register_netdevice_notifier(struct notifier_block *nb);
int unregister_netdevice_notifier(struct notifier_block *nb);
+int register_netdevice_notifier_net(struct net *net, struct notifier_block *nb);
+int unregister_netdevice_notifier_net(struct net *net,
+ struct notifier_block *nb);
+int register_netdevice_notifier_dev_net(struct net_device *dev,
+ struct notifier_block *nb,
+ struct netdev_net_notifier *nn);
+int unregister_netdevice_notifier_dev_net(struct net_device *dev,
+ struct notifier_block *nb,
+ struct netdev_net_notifier *nn);
struct netdev_notifier_info {
- struct net_device *dev;
+ struct net_device *dev;
+ struct netlink_ext_ack *extack;
+};
+
+struct netdev_notifier_info_ext {
+ struct netdev_notifier_info info; /* must be first */
+ union {
+ u32 mtu;
+ } ext;
};
struct netdev_notifier_change_info {
@@ -2335,10 +2927,52 @@ struct netdev_notifier_changelowerstate_info {
void *lower_state_info; /* is lower dev state */
};
+struct netdev_notifier_pre_changeaddr_info {
+ struct netdev_notifier_info info; /* must be first */
+ const unsigned char *dev_addr;
+};
+
+enum netdev_offload_xstats_type {
+ NETDEV_OFFLOAD_XSTATS_TYPE_L3 = 1,
+};
+
+struct netdev_notifier_offload_xstats_info {
+ struct netdev_notifier_info info; /* must be first */
+ enum netdev_offload_xstats_type type;
+
+ union {
+ /* NETDEV_OFFLOAD_XSTATS_REPORT_DELTA */
+ struct netdev_notifier_offload_xstats_rd *report_delta;
+ /* NETDEV_OFFLOAD_XSTATS_REPORT_USED */
+ struct netdev_notifier_offload_xstats_ru *report_used;
+ };
+};
+
+int netdev_offload_xstats_enable(struct net_device *dev,
+ enum netdev_offload_xstats_type type,
+ struct netlink_ext_ack *extack);
+int netdev_offload_xstats_disable(struct net_device *dev,
+ enum netdev_offload_xstats_type type);
+bool netdev_offload_xstats_enabled(const struct net_device *dev,
+ enum netdev_offload_xstats_type type);
+int netdev_offload_xstats_get(struct net_device *dev,
+ enum netdev_offload_xstats_type type,
+ struct rtnl_hw_stats64 *stats, bool *used,
+ struct netlink_ext_ack *extack);
+void
+netdev_offload_xstats_report_delta(struct netdev_notifier_offload_xstats_rd *rd,
+ const struct rtnl_hw_stats64 *stats);
+void
+netdev_offload_xstats_report_used(struct netdev_notifier_offload_xstats_ru *ru);
+void netdev_offload_xstats_push_delta(struct net_device *dev,
+ enum netdev_offload_xstats_type type,
+ const struct rtnl_hw_stats64 *stats);
+
static inline void netdev_notifier_info_init(struct netdev_notifier_info *info,
struct net_device *dev)
{
info->dev = dev;
+ info->extack = NULL;
}
static inline struct net_device *
@@ -2347,8 +2981,15 @@ netdev_notifier_info_to_dev(const struct netdev_notifier_info *info)
return info->dev;
}
-int call_netdevice_notifiers(unsigned long val, struct net_device *dev);
+static inline struct netlink_ext_ack *
+netdev_notifier_info_to_extack(const struct netdev_notifier_info *info)
+{
+ return info->extack;
+}
+int call_netdevice_notifiers(unsigned long val, struct net_device *dev);
+int call_netdevice_notifiers_info(unsigned long val,
+ struct netdev_notifier_info *info);
extern rwlock_t dev_base_lock; /* Device list lock */
@@ -2362,6 +3003,9 @@ extern rwlock_t dev_base_lock; /* Device list lock */
list_for_each_entry_safe(d, n, &(net)->dev_base_head, dev_list)
#define for_each_netdev_continue(net, d) \
list_for_each_entry_continue(d, &(net)->dev_base_head, dev_list)
+#define for_each_netdev_continue_reverse(net, d) \
+ list_for_each_entry_continue_reverse(d, &(net)->dev_base_head, \
+ dev_list)
#define for_each_netdev_continue_rcu(net, d) \
list_for_each_entry_continue_rcu(d, &(net)->dev_base_head, dev_list)
#define for_each_netdev_in_bond_rcu(bond, slave) \
@@ -2403,11 +3047,9 @@ static inline struct net_device *first_net_device_rcu(struct net *net)
}
int netdev_boot_setup_check(struct net_device *dev);
-unsigned long netdev_boot_base(const char *prefix, int unit);
struct net_device *dev_getbyhwaddr_rcu(struct net *net, unsigned short type,
const char *hwaddr);
struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type);
-struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type);
void dev_add_pack(struct packet_type *pt);
void dev_remove_pack(struct packet_type *pt);
void __dev_remove_pack(struct packet_type *pt);
@@ -2416,19 +3058,49 @@ void dev_remove_offload(struct packet_offload *po);
int dev_get_iflink(const struct net_device *dev);
int dev_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb);
+int dev_fill_forward_path(const struct net_device *dev, const u8 *daddr,
+ struct net_device_path_stack *stack);
struct net_device *__dev_get_by_flags(struct net *net, unsigned short flags,
unsigned short mask);
struct net_device *dev_get_by_name(struct net *net, const char *name);
struct net_device *dev_get_by_name_rcu(struct net *net, const char *name);
struct net_device *__dev_get_by_name(struct net *net, const char *name);
+bool netdev_name_in_use(struct net *net, const char *name);
int dev_alloc_name(struct net_device *dev, const char *name);
-int dev_open(struct net_device *dev);
-int dev_close(struct net_device *dev);
-int dev_close_many(struct list_head *head, bool unlink);
+int dev_open(struct net_device *dev, struct netlink_ext_ack *extack);
+void dev_close(struct net_device *dev);
+void dev_close_many(struct list_head *head, bool unlink);
void dev_disable_lro(struct net_device *dev);
int dev_loopback_xmit(struct net *net, struct sock *sk, struct sk_buff *newskb);
-int dev_queue_xmit(struct sk_buff *skb);
-int dev_queue_xmit_accel(struct sk_buff *skb, void *accel_priv);
+u16 dev_pick_tx_zero(struct net_device *dev, struct sk_buff *skb,
+ struct net_device *sb_dev);
+u16 dev_pick_tx_cpu_id(struct net_device *dev, struct sk_buff *skb,
+ struct net_device *sb_dev);
+
+int __dev_queue_xmit(struct sk_buff *skb, struct net_device *sb_dev);
+int __dev_direct_xmit(struct sk_buff *skb, u16 queue_id);
+
+static inline int dev_queue_xmit(struct sk_buff *skb)
+{
+ return __dev_queue_xmit(skb, NULL);
+}
+
+static inline int dev_queue_xmit_accel(struct sk_buff *skb,
+ struct net_device *sb_dev)
+{
+ return __dev_queue_xmit(skb, sb_dev);
+}
+
+static inline int dev_direct_xmit(struct sk_buff *skb, u16 queue_id)
+{
+ int ret;
+
+ ret = __dev_direct_xmit(skb, queue_id);
+ if (!dev_xmit_complete(ret))
+ kfree_skb(skb);
+ return ret;
+}
+
int register_netdevice(struct net_device *dev);
void unregister_netdevice_queue(struct net_device *dev, struct list_head *head);
void unregister_netdevice_many(struct list_head *head);
@@ -2440,250 +3112,19 @@ static inline void unregister_netdevice(struct net_device *dev)
int netdev_refcnt_read(const struct net_device *dev);
void free_netdev(struct net_device *dev);
void netdev_freemem(struct net_device *dev);
-void synchronize_net(void);
int init_dummy_netdev(struct net_device *dev);
-DECLARE_PER_CPU(int, xmit_recursion);
-#define XMIT_RECURSION_LIMIT 10
-
-static inline int dev_recursion_level(void)
-{
- return this_cpu_read(xmit_recursion);
-}
-
+struct net_device *netdev_get_xmit_slave(struct net_device *dev,
+ struct sk_buff *skb,
+ bool all_slaves);
+struct net_device *netdev_sk_get_lowest_dev(struct net_device *dev,
+ struct sock *sk);
struct net_device *dev_get_by_index(struct net *net, int ifindex);
struct net_device *__dev_get_by_index(struct net *net, int ifindex);
struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex);
struct net_device *dev_get_by_napi_id(unsigned int napi_id);
-int netdev_get_name(struct net *net, char *name, int ifindex);
int dev_restart(struct net_device *dev);
-int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb);
-
-static inline unsigned int skb_gro_offset(const struct sk_buff *skb)
-{
- return NAPI_GRO_CB(skb)->data_offset;
-}
-
-static inline unsigned int skb_gro_len(const struct sk_buff *skb)
-{
- return skb->len - NAPI_GRO_CB(skb)->data_offset;
-}
-
-static inline void skb_gro_pull(struct sk_buff *skb, unsigned int len)
-{
- NAPI_GRO_CB(skb)->data_offset += len;
-}
-
-static inline void *skb_gro_header_fast(struct sk_buff *skb,
- unsigned int offset)
-{
- return NAPI_GRO_CB(skb)->frag0 + offset;
-}
-
-static inline int skb_gro_header_hard(struct sk_buff *skb, unsigned int hlen)
-{
- return NAPI_GRO_CB(skb)->frag0_len < hlen;
-}
-
-static inline void skb_gro_frag0_invalidate(struct sk_buff *skb)
-{
- NAPI_GRO_CB(skb)->frag0 = NULL;
- NAPI_GRO_CB(skb)->frag0_len = 0;
-}
-
-static inline void *skb_gro_header_slow(struct sk_buff *skb, unsigned int hlen,
- unsigned int offset)
-{
- if (!pskb_may_pull(skb, hlen))
- return NULL;
-
- skb_gro_frag0_invalidate(skb);
- return skb->data + offset;
-}
-
-static inline void *skb_gro_network_header(struct sk_buff *skb)
-{
- return (NAPI_GRO_CB(skb)->frag0 ?: skb->data) +
- skb_network_offset(skb);
-}
-
-static inline void skb_gro_postpull_rcsum(struct sk_buff *skb,
- const void *start, unsigned int len)
-{
- if (NAPI_GRO_CB(skb)->csum_valid)
- NAPI_GRO_CB(skb)->csum = csum_sub(NAPI_GRO_CB(skb)->csum,
- csum_partial(start, len, 0));
-}
-
-/* GRO checksum functions. These are logical equivalents of the normal
- * checksum functions (in skbuff.h) except that they operate on the GRO
- * offsets and fields in sk_buff.
- */
-__sum16 __skb_gro_checksum_complete(struct sk_buff *skb);
-
-static inline bool skb_at_gro_remcsum_start(struct sk_buff *skb)
-{
- return (NAPI_GRO_CB(skb)->gro_remcsum_start == skb_gro_offset(skb));
-}
-
-static inline bool __skb_gro_checksum_validate_needed(struct sk_buff *skb,
- bool zero_okay,
- __sum16 check)
-{
- return ((skb->ip_summed != CHECKSUM_PARTIAL ||
- skb_checksum_start_offset(skb) <
- skb_gro_offset(skb)) &&
- !skb_at_gro_remcsum_start(skb) &&
- NAPI_GRO_CB(skb)->csum_cnt == 0 &&
- (!zero_okay || check));
-}
-
-static inline __sum16 __skb_gro_checksum_validate_complete(struct sk_buff *skb,
- __wsum psum)
-{
- if (NAPI_GRO_CB(skb)->csum_valid &&
- !csum_fold(csum_add(psum, NAPI_GRO_CB(skb)->csum)))
- return 0;
-
- NAPI_GRO_CB(skb)->csum = psum;
-
- return __skb_gro_checksum_complete(skb);
-}
-
-static inline void skb_gro_incr_csum_unnecessary(struct sk_buff *skb)
-{
- if (NAPI_GRO_CB(skb)->csum_cnt > 0) {
- /* Consume a checksum from CHECKSUM_UNNECESSARY */
- NAPI_GRO_CB(skb)->csum_cnt--;
- } else {
- /* Update skb for CHECKSUM_UNNECESSARY and csum_level when we
- * verified a new top level checksum or an encapsulated one
- * during GRO. This saves work if we fallback to normal path.
- */
- __skb_incr_checksum_unnecessary(skb);
- }
-}
-
-#define __skb_gro_checksum_validate(skb, proto, zero_okay, check, \
- compute_pseudo) \
-({ \
- __sum16 __ret = 0; \
- if (__skb_gro_checksum_validate_needed(skb, zero_okay, check)) \
- __ret = __skb_gro_checksum_validate_complete(skb, \
- compute_pseudo(skb, proto)); \
- if (!__ret) \
- skb_gro_incr_csum_unnecessary(skb); \
- __ret; \
-})
-
-#define skb_gro_checksum_validate(skb, proto, compute_pseudo) \
- __skb_gro_checksum_validate(skb, proto, false, 0, compute_pseudo)
-
-#define skb_gro_checksum_validate_zero_check(skb, proto, check, \
- compute_pseudo) \
- __skb_gro_checksum_validate(skb, proto, true, check, compute_pseudo)
-
-#define skb_gro_checksum_simple_validate(skb) \
- __skb_gro_checksum_validate(skb, 0, false, 0, null_compute_pseudo)
-
-static inline bool __skb_gro_checksum_convert_check(struct sk_buff *skb)
-{
- return (NAPI_GRO_CB(skb)->csum_cnt == 0 &&
- !NAPI_GRO_CB(skb)->csum_valid);
-}
-
-static inline void __skb_gro_checksum_convert(struct sk_buff *skb,
- __sum16 check, __wsum pseudo)
-{
- NAPI_GRO_CB(skb)->csum = ~pseudo;
- NAPI_GRO_CB(skb)->csum_valid = 1;
-}
-
-#define skb_gro_checksum_try_convert(skb, proto, check, compute_pseudo) \
-do { \
- if (__skb_gro_checksum_convert_check(skb)) \
- __skb_gro_checksum_convert(skb, check, \
- compute_pseudo(skb, proto)); \
-} while (0)
-
-struct gro_remcsum {
- int offset;
- __wsum delta;
-};
-
-static inline void skb_gro_remcsum_init(struct gro_remcsum *grc)
-{
- grc->offset = 0;
- grc->delta = 0;
-}
-
-static inline void *skb_gro_remcsum_process(struct sk_buff *skb, void *ptr,
- unsigned int off, size_t hdrlen,
- int start, int offset,
- struct gro_remcsum *grc,
- bool nopartial)
-{
- __wsum delta;
- size_t plen = hdrlen + max_t(size_t, offset + sizeof(u16), start);
-
- BUG_ON(!NAPI_GRO_CB(skb)->csum_valid);
-
- if (!nopartial) {
- NAPI_GRO_CB(skb)->gro_remcsum_start = off + hdrlen + start;
- return ptr;
- }
-
- ptr = skb_gro_header_fast(skb, off);
- if (skb_gro_header_hard(skb, off + plen)) {
- ptr = skb_gro_header_slow(skb, off + plen, off);
- if (!ptr)
- return NULL;
- }
-
- delta = remcsum_adjust(ptr + hdrlen, NAPI_GRO_CB(skb)->csum,
- start, offset);
-
- /* Adjust skb->csum since we changed the packet */
- NAPI_GRO_CB(skb)->csum = csum_add(NAPI_GRO_CB(skb)->csum, delta);
-
- grc->offset = off + hdrlen + offset;
- grc->delta = delta;
-
- return ptr;
-}
-
-static inline void skb_gro_remcsum_cleanup(struct sk_buff *skb,
- struct gro_remcsum *grc)
-{
- void *ptr;
- size_t plen = grc->offset + sizeof(u16);
-
- if (!grc->delta)
- return;
-
- ptr = skb_gro_header_fast(skb, grc->offset);
- if (skb_gro_header_hard(skb, grc->offset + sizeof(u16))) {
- ptr = skb_gro_header_slow(skb, plen, grc->offset);
- if (!ptr)
- return;
- }
-
- remcsum_unadjust((__sum16 *)ptr, grc->delta);
-}
-
-#ifdef CONFIG_XFRM_OFFLOAD
-static inline void skb_gro_flush_final(struct sk_buff *skb, struct sk_buff **pp, int flush)
-{
- if (PTR_ERR(pp) != -EINPROGRESS)
- NAPI_GRO_CB(skb)->flush |= flush;
-}
-#else
-static inline void skb_gro_flush_final(struct sk_buff *skb, struct sk_buff **pp, int flush)
-{
- NAPI_GRO_CB(skb)->flush |= flush;
-}
-#endif
static inline int dev_hard_header(struct sk_buff *skb, struct net_device *dev,
unsigned short type,
@@ -2706,6 +3147,15 @@ static inline int dev_parse_header(const struct sk_buff *skb,
return dev->header_ops->parse(skb, haddr);
}
+static inline __be16 dev_parse_header_protocol(const struct sk_buff *skb)
+{
+ const struct net_device *dev = skb->dev;
+
+ if (!dev->header_ops || !dev->header_ops->parse_protocol)
+ return 0;
+ return dev->header_ops->parse_protocol(skb);
+}
+
/* ll_header must have at least hard_header_len allocated */
static inline bool dev_validate_header(const struct net_device *dev,
char *ll_header, int len)
@@ -2726,26 +3176,11 @@ static inline bool dev_validate_header(const struct net_device *dev,
return false;
}
-typedef int gifconf_func_t(struct net_device * dev, char __user * bufptr, int len);
-int register_gifconf(unsigned int family, gifconf_func_t *gifconf);
-static inline int unregister_gifconf(unsigned int family)
+static inline bool dev_has_header(const struct net_device *dev)
{
- return register_gifconf(family, NULL);
+ return dev->header_ops && dev->header_ops->create;
}
-#ifdef CONFIG_NET_FLOW_LIMIT
-#define FLOW_LIMIT_HISTORY (1 << 7) /* must be ^2 and !overflow buckets */
-struct sd_flow_limit {
- u64 count;
- unsigned int num_buckets;
- unsigned int history_head;
- u16 history[FLOW_LIMIT_HISTORY];
- u8 buckets[];
-};
-
-extern int netdev_flow_limit_table_len;
-#endif /* CONFIG_NET_FLOW_LIMIT */
-
/*
* Incoming packets are placed on per-CPU queues
*/
@@ -2756,17 +3191,30 @@ struct softnet_data {
/* stats */
unsigned int processed;
unsigned int time_squeeze;
- unsigned int received_rps;
#ifdef CONFIG_RPS
struct softnet_data *rps_ipi_list;
#endif
+
+ bool in_net_rx_action;
+ bool in_napi_threaded_poll;
+
#ifdef CONFIG_NET_FLOW_LIMIT
struct sd_flow_limit __rcu *flow_limit;
#endif
struct Qdisc *output_queue;
struct Qdisc **output_queue_tailp;
struct sk_buff *completion_queue;
-
+#ifdef CONFIG_XFRM_OFFLOAD
+ struct sk_buff_head xfrm_backlog;
+#endif
+ /* written and read only by owning cpu: */
+ struct {
+ u16 recursion;
+ u8 more;
+#ifdef CONFIG_NET_EGRESS
+ u8 skip_txqueue;
+#endif
+ } xmit;
#ifdef CONFIG_RPS
/* input_queue_head should be written by cpu owning this struct,
* and only read by other cpus. Worth using a cache line.
@@ -2774,15 +3222,22 @@ struct softnet_data {
unsigned int input_queue_head ____cacheline_aligned_in_smp;
/* Elements below can be accessed between CPUs for RPS/RFS */
- struct call_single_data csd ____cacheline_aligned_in_smp;
+ call_single_data_t csd ____cacheline_aligned_in_smp;
struct softnet_data *rps_ipi_next;
unsigned int cpu;
unsigned int input_queue_tail;
#endif
+ unsigned int received_rps;
unsigned int dropped;
struct sk_buff_head input_pkt_queue;
struct napi_struct backlog;
+ /* Another possibly contended cache line */
+ spinlock_t defer_lock ____cacheline_aligned_in_smp;
+ int defer_count;
+ int defer_ipi_scheduled;
+ struct sk_buff *defer_list;
+ call_single_data_t defer_csd;
};
static inline void input_queue_head_incr(struct softnet_data *sd)
@@ -2802,6 +3257,28 @@ static inline void input_queue_tail_incr_save(struct softnet_data *sd,
DECLARE_PER_CPU_ALIGNED(struct softnet_data, softnet_data);
+static inline int dev_recursion_level(void)
+{
+ return this_cpu_read(softnet_data.xmit.recursion);
+}
+
+#define XMIT_RECURSION_LIMIT 8
+static inline bool dev_xmit_recursion(void)
+{
+ return unlikely(__this_cpu_read(softnet_data.xmit.recursion) >
+ XMIT_RECURSION_LIMIT);
+}
+
+static inline void dev_xmit_recursion_inc(void)
+{
+ __this_cpu_inc(softnet_data.xmit.recursion);
+}
+
+static inline void dev_xmit_recursion_dec(void)
+{
+ __this_cpu_dec(softnet_data.xmit.recursion);
+}
+
void __netif_schedule(struct Qdisc *q);
void netif_schedule_queue(struct netdev_queue *txq);
@@ -2865,6 +3342,7 @@ static inline void netif_tx_wake_all_queues(struct net_device *dev)
static __always_inline void netif_tx_stop_queue(struct netdev_queue *dev_queue)
{
+ /* Must be an atomic op see netif_txq_try_stop() */
set_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state);
}
@@ -2916,6 +3394,24 @@ netif_xmit_frozen_or_drv_stopped(const struct netdev_queue *dev_queue)
}
/**
+ * netdev_queue_set_dql_min_limit - set dql minimum limit
+ * @dev_queue: pointer to transmit queue
+ * @min_limit: dql minimum limit
+ *
+ * Forces xmit_more() to return true until the minimum threshold
+ * defined by @min_limit is reached (or until the tx queue is
+ * empty). Warning: to be use with care, misuse will impact the
+ * latency.
+ */
+static inline void netdev_queue_set_dql_min_limit(struct netdev_queue *dev_queue,
+ unsigned int min_limit)
+{
+#ifdef CONFIG_BQL
+ dev_queue->dql.min_limit = min_limit;
+#endif
+}
+
+/**
* netdev_txq_bql_enqueue_prefetchw - prefetch bql data for write
* @dev_queue: pointer to transmit queue
*
@@ -2943,6 +3439,16 @@ static inline void netdev_txq_bql_complete_prefetchw(struct netdev_queue *dev_qu
#endif
}
+/**
+ * netdev_tx_sent_queue - report the number of bytes queued to a given tx queue
+ * @dev_queue: network device queue
+ * @bytes: number of bytes queued to the device queue
+ *
+ * Report the number of bytes queued for sending/completion to the network
+ * device hardware queue. @bytes should be a good approximation and should
+ * exactly match netdev_completed_queue() @bytes.
+ * This is typically called once per packet, from ndo_start_xmit().
+ */
static inline void netdev_tx_sent_queue(struct netdev_queue *dev_queue,
unsigned int bytes)
{
@@ -2967,20 +3473,58 @@ static inline void netdev_tx_sent_queue(struct netdev_queue *dev_queue,
#endif
}
+/* Variant of netdev_tx_sent_queue() for drivers that are aware
+ * that they should not test BQL status themselves.
+ * We do want to change __QUEUE_STATE_STACK_XOFF only for the last
+ * skb of a batch.
+ * Returns true if the doorbell must be used to kick the NIC.
+ */
+static inline bool __netdev_tx_sent_queue(struct netdev_queue *dev_queue,
+ unsigned int bytes,
+ bool xmit_more)
+{
+ if (xmit_more) {
+#ifdef CONFIG_BQL
+ dql_queued(&dev_queue->dql, bytes);
+#endif
+ return netif_tx_queue_stopped(dev_queue);
+ }
+ netdev_tx_sent_queue(dev_queue, bytes);
+ return true;
+}
+
/**
- * netdev_sent_queue - report the number of bytes queued to hardware
- * @dev: network device
- * @bytes: number of bytes queued to the hardware device queue
+ * netdev_sent_queue - report the number of bytes queued to hardware
+ * @dev: network device
+ * @bytes: number of bytes queued to the hardware device queue
*
- * Report the number of bytes queued for sending/completion to the network
- * device hardware queue. @bytes should be a good approximation and should
- * exactly match netdev_completed_queue() @bytes
+ * Report the number of bytes queued for sending/completion to the network
+ * device hardware queue#0. @bytes should be a good approximation and should
+ * exactly match netdev_completed_queue() @bytes.
+ * This is typically called once per packet, from ndo_start_xmit().
*/
static inline void netdev_sent_queue(struct net_device *dev, unsigned int bytes)
{
netdev_tx_sent_queue(netdev_get_tx_queue(dev, 0), bytes);
}
+static inline bool __netdev_sent_queue(struct net_device *dev,
+ unsigned int bytes,
+ bool xmit_more)
+{
+ return __netdev_tx_sent_queue(netdev_get_tx_queue(dev, 0), bytes,
+ xmit_more);
+}
+
+/**
+ * netdev_tx_completed_queue - report number of packets/bytes at TX completion.
+ * @dev_queue: network device queue
+ * @pkts: number of packets (currently ignored)
+ * @bytes: number of bytes dequeued from the device queue
+ *
+ * Must be called at most once per TX completion round (and not per
+ * individual packet), so that BQL can adjust its limits appropriately.
+ */
static inline void netdev_tx_completed_queue(struct netdev_queue *dev_queue,
unsigned int pkts, unsigned int bytes)
{
@@ -2995,9 +3539,9 @@ static inline void netdev_tx_completed_queue(struct netdev_queue *dev_queue,
* netdev_tx_sent_queue will miss the update and cause the queue to
* be stopped forever
*/
- smp_mb();
+ smp_mb(); /* NOTE: netdev_txq_completed_mb() assumes this exists */
- if (dql_avail(&dev_queue->dql) < 0)
+ if (unlikely(dql_avail(&dev_queue->dql) < 0))
return;
if (test_and_clear_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state))
@@ -3107,7 +3651,7 @@ static inline void netif_stop_subqueue(struct net_device *dev, u16 queue_index)
}
/**
- * netif_subqueue_stopped - test status of subqueue
+ * __netif_subqueue_stopped - test status of subqueue
* @dev: network device
* @queue_index: sub queue index
*
@@ -3121,6 +3665,13 @@ static inline bool __netif_subqueue_stopped(const struct net_device *dev,
return netif_tx_queue_stopped(txq);
}
+/**
+ * netif_subqueue_stopped - test status of subqueue
+ * @dev: network device
+ * @skb: sub queue buffer pointer
+ *
+ * Check individual transmit queue of a device with multiple transmit queues.
+ */
static inline bool netif_subqueue_stopped(const struct net_device *dev,
struct sk_buff *skb)
{
@@ -3144,6 +3695,92 @@ static inline void netif_wake_subqueue(struct net_device *dev, u16 queue_index)
#ifdef CONFIG_XPS
int netif_set_xps_queue(struct net_device *dev, const struct cpumask *mask,
u16 index);
+int __netif_set_xps_queue(struct net_device *dev, const unsigned long *mask,
+ u16 index, enum xps_map_type type);
+
+/**
+ * netif_attr_test_mask - Test a CPU or Rx queue set in a mask
+ * @j: CPU/Rx queue index
+ * @mask: bitmask of all cpus/rx queues
+ * @nr_bits: number of bits in the bitmask
+ *
+ * Test if a CPU or Rx queue index is set in a mask of all CPU/Rx queues.
+ */
+static inline bool netif_attr_test_mask(unsigned long j,
+ const unsigned long *mask,
+ unsigned int nr_bits)
+{
+ cpu_max_bits_warn(j, nr_bits);
+ return test_bit(j, mask);
+}
+
+/**
+ * netif_attr_test_online - Test for online CPU/Rx queue
+ * @j: CPU/Rx queue index
+ * @online_mask: bitmask for CPUs/Rx queues that are online
+ * @nr_bits: number of bits in the bitmask
+ *
+ * Returns true if a CPU/Rx queue is online.
+ */
+static inline bool netif_attr_test_online(unsigned long j,
+ const unsigned long *online_mask,
+ unsigned int nr_bits)
+{
+ cpu_max_bits_warn(j, nr_bits);
+
+ if (online_mask)
+ return test_bit(j, online_mask);
+
+ return (j < nr_bits);
+}
+
+/**
+ * netif_attrmask_next - get the next CPU/Rx queue in a cpu/Rx queues mask
+ * @n: CPU/Rx queue index
+ * @srcp: the cpumask/Rx queue mask pointer
+ * @nr_bits: number of bits in the bitmask
+ *
+ * Returns >= nr_bits if no further CPUs/Rx queues set.
+ */
+static inline unsigned int netif_attrmask_next(int n, const unsigned long *srcp,
+ unsigned int nr_bits)
+{
+ /* -1 is a legal arg here. */
+ if (n != -1)
+ cpu_max_bits_warn(n, nr_bits);
+
+ if (srcp)
+ return find_next_bit(srcp, nr_bits, n + 1);
+
+ return n + 1;
+}
+
+/**
+ * netif_attrmask_next_and - get the next CPU/Rx queue in \*src1p & \*src2p
+ * @n: CPU/Rx queue index
+ * @src1p: the first CPUs/Rx queues mask pointer
+ * @src2p: the second CPUs/Rx queues mask pointer
+ * @nr_bits: number of bits in the bitmask
+ *
+ * Returns >= nr_bits if no further CPUs/Rx queues set in both.
+ */
+static inline int netif_attrmask_next_and(int n, const unsigned long *src1p,
+ const unsigned long *src2p,
+ unsigned int nr_bits)
+{
+ /* -1 is a legal arg here. */
+ if (n != -1)
+ cpu_max_bits_warn(n, nr_bits);
+
+ if (src1p && src2p)
+ return find_next_and_bit(src1p, src2p, nr_bits, n + 1);
+ else if (src1p)
+ return find_next_bit(src1p, nr_bits, n + 1);
+ else if (src2p)
+ return find_next_bit(src2p, nr_bits, n + 1);
+
+ return n + 1;
+}
#else
static inline int netif_set_xps_queue(struct net_device *dev,
const struct cpumask *mask,
@@ -3151,20 +3788,14 @@ static inline int netif_set_xps_queue(struct net_device *dev,
{
return 0;
}
-#endif
-
-u16 __skb_tx_hash(const struct net_device *dev, struct sk_buff *skb,
- unsigned int num_tx_queues);
-/*
- * Returns a Tx hash for the given packet when dev->real_num_tx_queues is used
- * as a distribution range limit for the returned value.
- */
-static inline u16 skb_tx_hash(const struct net_device *dev,
- struct sk_buff *skb)
+static inline int __netif_set_xps_queue(struct net_device *dev,
+ const unsigned long *mask,
+ u16 index, enum xps_map_type type)
{
- return __skb_tx_hash(dev, skb, dev->real_num_tx_queues);
+ return 0;
}
+#endif
/**
* netif_is_multiqueue - test if device has multiple transmit queues
@@ -3183,11 +3814,20 @@ int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq);
int netif_set_real_num_rx_queues(struct net_device *dev, unsigned int rxq);
#else
static inline int netif_set_real_num_rx_queues(struct net_device *dev,
- unsigned int rxq)
+ unsigned int rxqs)
{
+ dev->real_num_rx_queues = rxqs;
return 0;
}
#endif
+int netif_set_real_num_queues(struct net_device *dev,
+ unsigned int txq, unsigned int rxq);
+
+static inline struct netdev_rx_queue *
+__netif_get_rx_queue(struct net_device *dev, unsigned int rxq)
+{
+ return dev->_rx + rxq;
+}
#ifdef CONFIG_SYSFS
static inline unsigned int get_netdev_rx_queue_index(
@@ -3201,21 +3841,15 @@ static inline unsigned int get_netdev_rx_queue_index(
}
#endif
-#define DEFAULT_MAX_NUM_RSS_QUEUES (8)
int netif_get_num_default_rss_queues(void);
-enum skb_free_reason {
- SKB_REASON_CONSUMED,
- SKB_REASON_DROPPED,
-};
-
-void __dev_kfree_skb_irq(struct sk_buff *skb, enum skb_free_reason reason);
-void __dev_kfree_skb_any(struct sk_buff *skb, enum skb_free_reason reason);
+void dev_kfree_skb_irq_reason(struct sk_buff *skb, enum skb_drop_reason reason);
+void dev_kfree_skb_any_reason(struct sk_buff *skb, enum skb_drop_reason reason);
/*
* It is not allowed to call kfree_skb() or consume_skb() from hardware
* interrupt context or with hardware interrupts being disabled.
- * (in_irq() || irqs_disabled())
+ * (in_hardirq() || irqs_disabled())
*
* We provide four helpers that can be used in following contexts :
*
@@ -3233,30 +3867,39 @@ void __dev_kfree_skb_any(struct sk_buff *skb, enum skb_free_reason reason);
*/
static inline void dev_kfree_skb_irq(struct sk_buff *skb)
{
- __dev_kfree_skb_irq(skb, SKB_REASON_DROPPED);
+ dev_kfree_skb_irq_reason(skb, SKB_DROP_REASON_NOT_SPECIFIED);
}
static inline void dev_consume_skb_irq(struct sk_buff *skb)
{
- __dev_kfree_skb_irq(skb, SKB_REASON_CONSUMED);
+ dev_kfree_skb_irq_reason(skb, SKB_CONSUMED);
}
static inline void dev_kfree_skb_any(struct sk_buff *skb)
{
- __dev_kfree_skb_any(skb, SKB_REASON_DROPPED);
+ dev_kfree_skb_any_reason(skb, SKB_DROP_REASON_NOT_SPECIFIED);
}
static inline void dev_consume_skb_any(struct sk_buff *skb)
{
- __dev_kfree_skb_any(skb, SKB_REASON_CONSUMED);
+ dev_kfree_skb_any_reason(skb, SKB_CONSUMED);
}
+u32 bpf_prog_run_generic_xdp(struct sk_buff *skb, struct xdp_buff *xdp,
+ struct bpf_prog *xdp_prog);
+void generic_xdp_tx(struct sk_buff *skb, struct bpf_prog *xdp_prog);
+int do_xdp_generic(struct bpf_prog *xdp_prog, struct sk_buff *skb);
int netif_rx(struct sk_buff *skb);
-int netif_rx_ni(struct sk_buff *skb);
+int __netif_rx(struct sk_buff *skb);
+
int netif_receive_skb(struct sk_buff *skb);
+int netif_receive_skb_core(struct sk_buff *skb);
+void netif_receive_skb_list_internal(struct list_head *head);
+void netif_receive_skb_list(struct list_head *head);
gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb);
void napi_gro_flush(struct napi_struct *napi, bool flush_old);
struct sk_buff *napi_get_frags(struct napi_struct *napi);
+void napi_get_frags_check(struct napi_struct *napi);
gro_result_t napi_gro_frags(struct napi_struct *napi);
struct packet_offload *gro_find_receive_by_type(__be16 type);
struct packet_offload *gro_find_complete_by_type(__be16 type);
@@ -3274,72 +3917,196 @@ int netdev_rx_handler_register(struct net_device *dev,
void netdev_rx_handler_unregister(struct net_device *dev);
bool dev_valid_name(const char *name);
-int dev_ioctl(struct net *net, unsigned int cmd, void __user *);
-int dev_ethtool(struct net *net, struct ifreq *);
+static inline bool is_socket_ioctl_cmd(unsigned int cmd)
+{
+ return _IOC_TYPE(cmd) == SOCK_IOC_TYPE;
+}
+int get_user_ifreq(struct ifreq *ifr, void __user **ifrdata, void __user *arg);
+int put_user_ifreq(struct ifreq *ifr, void __user *arg);
+int dev_ioctl(struct net *net, unsigned int cmd, struct ifreq *ifr,
+ void __user *data, bool *need_copyout);
+int dev_ifconf(struct net *net, struct ifconf __user *ifc);
+int dev_ethtool(struct net *net, struct ifreq *ifr, void __user *userdata);
unsigned int dev_get_flags(const struct net_device *);
-int __dev_change_flags(struct net_device *, unsigned int flags);
-int dev_change_flags(struct net_device *, unsigned int);
-void __dev_notify_flags(struct net_device *, unsigned int old_flags,
- unsigned int gchanges);
-int dev_change_name(struct net_device *, const char *);
+int __dev_change_flags(struct net_device *dev, unsigned int flags,
+ struct netlink_ext_ack *extack);
+int dev_change_flags(struct net_device *dev, unsigned int flags,
+ struct netlink_ext_ack *extack);
int dev_set_alias(struct net_device *, const char *, size_t);
-int dev_change_net_namespace(struct net_device *, struct net *, const char *);
+int dev_get_alias(const struct net_device *, char *, size_t);
+int __dev_change_net_namespace(struct net_device *dev, struct net *net,
+ const char *pat, int new_ifindex);
+static inline
+int dev_change_net_namespace(struct net_device *dev, struct net *net,
+ const char *pat)
+{
+ return __dev_change_net_namespace(dev, net, pat, 0);
+}
int __dev_set_mtu(struct net_device *, int);
int dev_set_mtu(struct net_device *, int);
-void dev_set_group(struct net_device *, int);
-int dev_set_mac_address(struct net_device *, struct sockaddr *);
-int dev_change_carrier(struct net_device *, bool new_carrier);
-int dev_get_phys_port_id(struct net_device *dev,
- struct netdev_phys_item_id *ppid);
-int dev_get_phys_port_name(struct net_device *dev,
- char *name, size_t len);
-int dev_change_proto_down(struct net_device *dev, bool proto_down);
-struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev);
+int dev_pre_changeaddr_notify(struct net_device *dev, const char *addr,
+ struct netlink_ext_ack *extack);
+int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa,
+ struct netlink_ext_ack *extack);
+int dev_set_mac_address_user(struct net_device *dev, struct sockaddr *sa,
+ struct netlink_ext_ack *extack);
+int dev_get_mac_address(struct sockaddr *sa, struct net *net, char *dev_name);
+int dev_get_port_parent_id(struct net_device *dev,
+ struct netdev_phys_item_id *ppid, bool recurse);
+bool netdev_port_same_parent_id(struct net_device *a, struct net_device *b);
+struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev, bool *again);
struct sk_buff *dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
struct netdev_queue *txq, int *ret);
-typedef int (*xdp_op_t)(struct net_device *dev, struct netdev_xdp *xdp);
-int dev_change_xdp_fd(struct net_device *dev, struct netlink_ext_ack *extack,
- int fd, u32 flags);
-u8 __dev_xdp_attached(struct net_device *dev, xdp_op_t xdp_op, u32 *prog_id);
+int bpf_xdp_link_attach(const union bpf_attr *attr, struct bpf_prog *prog);
+u8 dev_xdp_prog_count(struct net_device *dev);
+u32 dev_xdp_prog_id(struct net_device *dev, enum bpf_xdp_mode mode);
int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb);
int dev_forward_skb(struct net_device *dev, struct sk_buff *skb);
+int dev_forward_skb_nomtu(struct net_device *dev, struct sk_buff *skb);
bool is_skb_forwardable(const struct net_device *dev,
const struct sk_buff *skb);
+static __always_inline bool __is_skb_forwardable(const struct net_device *dev,
+ const struct sk_buff *skb,
+ const bool check_mtu)
+{
+ const u32 vlan_hdr_len = 4; /* VLAN_HLEN */
+ unsigned int len;
+
+ if (!(dev->flags & IFF_UP))
+ return false;
+
+ if (!check_mtu)
+ return true;
+
+ len = dev->mtu + dev->hard_header_len + vlan_hdr_len;
+ if (skb->len <= len)
+ return true;
+
+ /* if TSO is enabled, we don't care about the length as the packet
+ * could be forwarded without being segmented before
+ */
+ if (skb_is_gso(skb))
+ return true;
+
+ return false;
+}
+
+struct net_device_core_stats __percpu *netdev_core_stats_alloc(struct net_device *dev);
+
+static inline struct net_device_core_stats __percpu *dev_core_stats(struct net_device *dev)
+{
+ /* This READ_ONCE() pairs with the write in netdev_core_stats_alloc() */
+ struct net_device_core_stats __percpu *p = READ_ONCE(dev->core_stats);
+
+ if (likely(p))
+ return p;
+
+ return netdev_core_stats_alloc(dev);
+}
+
+#define DEV_CORE_STATS_INC(FIELD) \
+static inline void dev_core_stats_##FIELD##_inc(struct net_device *dev) \
+{ \
+ struct net_device_core_stats __percpu *p; \
+ \
+ p = dev_core_stats(dev); \
+ if (p) \
+ this_cpu_inc(p->FIELD); \
+}
+DEV_CORE_STATS_INC(rx_dropped)
+DEV_CORE_STATS_INC(tx_dropped)
+DEV_CORE_STATS_INC(rx_nohandler)
+DEV_CORE_STATS_INC(rx_otherhost_dropped)
+
static __always_inline int ____dev_forward_skb(struct net_device *dev,
- struct sk_buff *skb)
+ struct sk_buff *skb,
+ const bool check_mtu)
{
if (skb_orphan_frags(skb, GFP_ATOMIC) ||
- unlikely(!is_skb_forwardable(dev, skb))) {
- atomic_long_inc(&dev->rx_dropped);
+ unlikely(!__is_skb_forwardable(dev, skb, check_mtu))) {
+ dev_core_stats_rx_dropped_inc(dev);
kfree_skb(skb);
return NET_RX_DROP;
}
- skb_scrub_packet(skb, true);
+ skb_scrub_packet(skb, !net_eq(dev_net(dev), dev_net(skb->dev)));
skb->priority = 0;
return 0;
}
+bool dev_nit_active(struct net_device *dev);
void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev);
-extern int netdev_budget;
-extern unsigned int netdev_budget_usecs;
+static inline void __dev_put(struct net_device *dev)
+{
+ if (dev) {
+#ifdef CONFIG_PCPU_DEV_REFCNT
+ this_cpu_dec(*dev->pcpu_refcnt);
+#else
+ refcount_dec(&dev->dev_refcnt);
+#endif
+ }
+}
+
+static inline void __dev_hold(struct net_device *dev)
+{
+ if (dev) {
+#ifdef CONFIG_PCPU_DEV_REFCNT
+ this_cpu_inc(*dev->pcpu_refcnt);
+#else
+ refcount_inc(&dev->dev_refcnt);
+#endif
+ }
+}
-/* Called by rtnetlink.c:rtnl_unlock() */
-void netdev_run_todo(void);
+static inline void __netdev_tracker_alloc(struct net_device *dev,
+ netdevice_tracker *tracker,
+ gfp_t gfp)
+{
+#ifdef CONFIG_NET_DEV_REFCNT_TRACKER
+ ref_tracker_alloc(&dev->refcnt_tracker, tracker, gfp);
+#endif
+}
-/**
- * dev_put - release reference to device
- * @dev: network device
- *
- * Release reference to device to allow it to be freed.
+/* netdev_tracker_alloc() can upgrade a prior untracked reference
+ * taken by dev_get_by_name()/dev_get_by_index() to a tracked one.
*/
-static inline void dev_put(struct net_device *dev)
+static inline void netdev_tracker_alloc(struct net_device *dev,
+ netdevice_tracker *tracker, gfp_t gfp)
{
- this_cpu_dec(*dev->pcpu_refcnt);
+#ifdef CONFIG_NET_DEV_REFCNT_TRACKER
+ refcount_dec(&dev->refcnt_tracker.no_tracker);
+ __netdev_tracker_alloc(dev, tracker, gfp);
+#endif
+}
+
+static inline void netdev_tracker_free(struct net_device *dev,
+ netdevice_tracker *tracker)
+{
+#ifdef CONFIG_NET_DEV_REFCNT_TRACKER
+ ref_tracker_free(&dev->refcnt_tracker, tracker);
+#endif
+}
+
+static inline void netdev_hold(struct net_device *dev,
+ netdevice_tracker *tracker, gfp_t gfp)
+{
+ if (dev) {
+ __dev_hold(dev);
+ __netdev_tracker_alloc(dev, tracker, gfp);
+ }
+}
+
+static inline void netdev_put(struct net_device *dev,
+ netdevice_tracker *tracker)
+{
+ if (dev) {
+ netdev_tracker_free(dev, tracker);
+ __dev_put(dev);
+ }
}
/**
@@ -3347,10 +4114,38 @@ static inline void dev_put(struct net_device *dev)
* @dev: network device
*
* Hold reference to device to keep it from being freed.
+ * Try using netdev_hold() instead.
*/
static inline void dev_hold(struct net_device *dev)
{
- this_cpu_inc(*dev->pcpu_refcnt);
+ netdev_hold(dev, NULL, GFP_ATOMIC);
+}
+
+/**
+ * dev_put - release reference to device
+ * @dev: network device
+ *
+ * Release reference to device to allow it to be freed.
+ * Try using netdev_put() instead.
+ */
+static inline void dev_put(struct net_device *dev)
+{
+ netdev_put(dev, NULL);
+}
+
+static inline void netdev_ref_replace(struct net_device *odev,
+ struct net_device *ndev,
+ netdevice_tracker *tracker,
+ gfp_t gfp)
+{
+ if (odev)
+ netdev_tracker_free(odev, tracker);
+
+ __dev_hold(ndev);
+ __dev_put(odev);
+
+ if (ndev)
+ __netdev_tracker_alloc(ndev, tracker, gfp);
}
/* Carrier loss detection, dial on demand. The functions netif_carrier_on
@@ -3361,10 +4156,7 @@ static inline void dev_hold(struct net_device *dev)
* called netif_lowerlayer_*() because they represent the state of any
* kind of lower layer not just hardware media.
*/
-
-void linkwatch_init_dev(struct net_device *dev);
void linkwatch_fire_event(struct net_device *dev);
-void linkwatch_forget_dev(struct net_device *dev);
/**
* netif_carrier_ok - test if carrier present
@@ -3382,8 +4174,8 @@ unsigned long dev_trans_start(struct net_device *dev);
void __netdev_watchdog_up(struct net_device *dev);
void netif_carrier_on(struct net_device *dev);
-
void netif_carrier_off(struct net_device *dev);
+void netif_carrier_event(struct net_device *dev);
/**
* netif_dormant_on - mark device as dormant.
@@ -3428,6 +4220,46 @@ static inline bool netif_dormant(const struct net_device *dev)
/**
+ * netif_testing_on - mark device as under test.
+ * @dev: network device
+ *
+ * Mark device as under test (as per RFC2863).
+ *
+ * The testing state indicates that some test(s) must be performed on
+ * the interface. After completion, of the test, the interface state
+ * will change to up, dormant, or down, as appropriate.
+ */
+static inline void netif_testing_on(struct net_device *dev)
+{
+ if (!test_and_set_bit(__LINK_STATE_TESTING, &dev->state))
+ linkwatch_fire_event(dev);
+}
+
+/**
+ * netif_testing_off - set device as not under test.
+ * @dev: network device
+ *
+ * Device is not in testing state.
+ */
+static inline void netif_testing_off(struct net_device *dev)
+{
+ if (test_and_clear_bit(__LINK_STATE_TESTING, &dev->state))
+ linkwatch_fire_event(dev);
+}
+
+/**
+ * netif_testing - test if device is under test
+ * @dev: network device
+ *
+ * Check if device is under test
+ */
+static inline bool netif_testing(const struct net_device *dev)
+{
+ return test_bit(__LINK_STATE_TESTING, &dev->state);
+}
+
+
+/**
* netif_oper_up - test if device is operational
* @dev: network device
*
@@ -3445,7 +4277,7 @@ static inline bool netif_oper_up(const struct net_device *dev)
*
* Check if device has not been removed from system.
*/
-static inline bool netif_device_present(struct net_device *dev)
+static inline bool netif_device_present(const struct net_device *dev)
{
return test_bit(__LINK_STATE_PRESENT, &dev->state);
}
@@ -3459,22 +4291,48 @@ void netif_device_attach(struct net_device *dev);
*/
enum {
- NETIF_MSG_DRV = 0x0001,
- NETIF_MSG_PROBE = 0x0002,
- NETIF_MSG_LINK = 0x0004,
- NETIF_MSG_TIMER = 0x0008,
- NETIF_MSG_IFDOWN = 0x0010,
- NETIF_MSG_IFUP = 0x0020,
- NETIF_MSG_RX_ERR = 0x0040,
- NETIF_MSG_TX_ERR = 0x0080,
- NETIF_MSG_TX_QUEUED = 0x0100,
- NETIF_MSG_INTR = 0x0200,
- NETIF_MSG_TX_DONE = 0x0400,
- NETIF_MSG_RX_STATUS = 0x0800,
- NETIF_MSG_PKTDATA = 0x1000,
- NETIF_MSG_HW = 0x2000,
- NETIF_MSG_WOL = 0x4000,
+ NETIF_MSG_DRV_BIT,
+ NETIF_MSG_PROBE_BIT,
+ NETIF_MSG_LINK_BIT,
+ NETIF_MSG_TIMER_BIT,
+ NETIF_MSG_IFDOWN_BIT,
+ NETIF_MSG_IFUP_BIT,
+ NETIF_MSG_RX_ERR_BIT,
+ NETIF_MSG_TX_ERR_BIT,
+ NETIF_MSG_TX_QUEUED_BIT,
+ NETIF_MSG_INTR_BIT,
+ NETIF_MSG_TX_DONE_BIT,
+ NETIF_MSG_RX_STATUS_BIT,
+ NETIF_MSG_PKTDATA_BIT,
+ NETIF_MSG_HW_BIT,
+ NETIF_MSG_WOL_BIT,
+
+ /* When you add a new bit above, update netif_msg_class_names array
+ * in net/ethtool/common.c
+ */
+ NETIF_MSG_CLASS_COUNT,
};
+/* Both ethtool_ops interface and internal driver implementation use u32 */
+static_assert(NETIF_MSG_CLASS_COUNT <= 32);
+
+#define __NETIF_MSG_BIT(bit) ((u32)1 << (bit))
+#define __NETIF_MSG(name) __NETIF_MSG_BIT(NETIF_MSG_ ## name ## _BIT)
+
+#define NETIF_MSG_DRV __NETIF_MSG(DRV)
+#define NETIF_MSG_PROBE __NETIF_MSG(PROBE)
+#define NETIF_MSG_LINK __NETIF_MSG(LINK)
+#define NETIF_MSG_TIMER __NETIF_MSG(TIMER)
+#define NETIF_MSG_IFDOWN __NETIF_MSG(IFDOWN)
+#define NETIF_MSG_IFUP __NETIF_MSG(IFUP)
+#define NETIF_MSG_RX_ERR __NETIF_MSG(RX_ERR)
+#define NETIF_MSG_TX_ERR __NETIF_MSG(TX_ERR)
+#define NETIF_MSG_TX_QUEUED __NETIF_MSG(TX_QUEUED)
+#define NETIF_MSG_INTR __NETIF_MSG(INTR)
+#define NETIF_MSG_TX_DONE __NETIF_MSG(TX_DONE)
+#define NETIF_MSG_RX_STATUS __NETIF_MSG(RX_STATUS)
+#define NETIF_MSG_PKTDATA __NETIF_MSG(PKTDATA)
+#define NETIF_MSG_HW __NETIF_MSG(HW)
+#define NETIF_MSG_WOL __NETIF_MSG(WOL)
#define netif_msg_drv(p) ((p)->msg_enable & NETIF_MSG_DRV)
#define netif_msg_probe(p) ((p)->msg_enable & NETIF_MSG_PROBE)
@@ -3500,13 +4358,14 @@ static inline u32 netif_msg_init(int debug_value, int default_msg_enable_bits)
if (debug_value == 0) /* no output */
return 0;
/* set low N bits */
- return (1 << debug_value) - 1;
+ return (1U << debug_value) - 1;
}
static inline void __netif_tx_lock(struct netdev_queue *txq, int cpu)
{
spin_lock(&txq->_xmit_lock);
- txq->xmit_lock_owner = cpu;
+ /* Pairs with READ_ONCE() in __dev_queue_xmit() */
+ WRITE_ONCE(txq->xmit_lock_owner, cpu);
}
static inline bool __netif_tx_acquire(struct netdev_queue *txq)
@@ -3523,33 +4382,50 @@ static inline void __netif_tx_release(struct netdev_queue *txq)
static inline void __netif_tx_lock_bh(struct netdev_queue *txq)
{
spin_lock_bh(&txq->_xmit_lock);
- txq->xmit_lock_owner = smp_processor_id();
+ /* Pairs with READ_ONCE() in __dev_queue_xmit() */
+ WRITE_ONCE(txq->xmit_lock_owner, smp_processor_id());
}
static inline bool __netif_tx_trylock(struct netdev_queue *txq)
{
bool ok = spin_trylock(&txq->_xmit_lock);
- if (likely(ok))
- txq->xmit_lock_owner = smp_processor_id();
+
+ if (likely(ok)) {
+ /* Pairs with READ_ONCE() in __dev_queue_xmit() */
+ WRITE_ONCE(txq->xmit_lock_owner, smp_processor_id());
+ }
return ok;
}
static inline void __netif_tx_unlock(struct netdev_queue *txq)
{
- txq->xmit_lock_owner = -1;
+ /* Pairs with READ_ONCE() in __dev_queue_xmit() */
+ WRITE_ONCE(txq->xmit_lock_owner, -1);
spin_unlock(&txq->_xmit_lock);
}
static inline void __netif_tx_unlock_bh(struct netdev_queue *txq)
{
- txq->xmit_lock_owner = -1;
+ /* Pairs with READ_ONCE() in __dev_queue_xmit() */
+ WRITE_ONCE(txq->xmit_lock_owner, -1);
spin_unlock_bh(&txq->_xmit_lock);
}
+/*
+ * txq->trans_start can be read locklessly from dev_watchdog()
+ */
static inline void txq_trans_update(struct netdev_queue *txq)
{
if (txq->xmit_lock_owner != -1)
- txq->trans_start = jiffies;
+ WRITE_ONCE(txq->trans_start, jiffies);
+}
+
+static inline void txq_trans_cond_update(struct netdev_queue *txq)
+{
+ unsigned long now = jiffies;
+
+ if (READ_ONCE(txq->trans_start) != now)
+ WRITE_ONCE(txq->trans_start, now);
}
/* legacy drivers only, netdev_start_xmit() sets txq->trans_start */
@@ -3557,8 +4433,7 @@ static inline void netif_trans_update(struct net_device *dev)
{
struct netdev_queue *txq = netdev_get_tx_queue(dev, 0);
- if (txq->trans_start != jiffies)
- txq->trans_start = jiffies;
+ txq_trans_cond_update(txq);
}
/**
@@ -3567,27 +4442,7 @@ static inline void netif_trans_update(struct net_device *dev)
*
* Get network device transmit lock
*/
-static inline void netif_tx_lock(struct net_device *dev)
-{
- unsigned int i;
- int cpu;
-
- spin_lock(&dev->tx_global_lock);
- cpu = smp_processor_id();
- for (i = 0; i < dev->num_tx_queues; i++) {
- struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
-
- /* We are the only thread of execution doing a
- * freeze, but we have to grab the _xmit_lock in
- * order to synchronize with threads which are in
- * the ->hard_start_xmit() handler and already
- * checked the frozen bit.
- */
- __netif_tx_lock(txq, cpu);
- set_bit(__QUEUE_STATE_FROZEN, &txq->state);
- __netif_tx_unlock(txq);
- }
-}
+void netif_tx_lock(struct net_device *dev);
static inline void netif_tx_lock_bh(struct net_device *dev)
{
@@ -3595,22 +4450,7 @@ static inline void netif_tx_lock_bh(struct net_device *dev)
netif_tx_lock(dev);
}
-static inline void netif_tx_unlock(struct net_device *dev)
-{
- unsigned int i;
-
- for (i = 0; i < dev->num_tx_queues; i++) {
- struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
-
- /* No need to grab the _xmit_lock here. If the
- * queue is not stopped for another reason, we
- * force a schedule.
- */
- clear_bit(__QUEUE_STATE_FROZEN, &txq->state);
- netif_schedule_queue(txq);
- }
- spin_unlock(&dev->tx_global_lock);
-}
+void netif_tx_unlock(struct net_device *dev);
static inline void netif_tx_unlock_bh(struct net_device *dev)
{
@@ -3646,6 +4486,7 @@ static inline void netif_tx_disable(struct net_device *dev)
local_bh_disable();
cpu = smp_processor_id();
+ spin_lock(&dev->tx_global_lock);
for (i = 0; i < dev->num_tx_queues; i++) {
struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
@@ -3653,27 +4494,29 @@ static inline void netif_tx_disable(struct net_device *dev)
netif_tx_stop_queue(txq);
__netif_tx_unlock(txq);
}
+ spin_unlock(&dev->tx_global_lock);
local_bh_enable();
}
static inline void netif_addr_lock(struct net_device *dev)
{
- spin_lock(&dev->addr_list_lock);
-}
-
-static inline void netif_addr_lock_nested(struct net_device *dev)
-{
- int subclass = SINGLE_DEPTH_NESTING;
+ unsigned char nest_level = 0;
- if (dev->netdev_ops->ndo_get_lock_subclass)
- subclass = dev->netdev_ops->ndo_get_lock_subclass(dev);
-
- spin_lock_nested(&dev->addr_list_lock, subclass);
+#ifdef CONFIG_LOCKDEP
+ nest_level = dev->nested_level;
+#endif
+ spin_lock_nested(&dev->addr_list_lock, nest_level);
}
static inline void netif_addr_lock_bh(struct net_device *dev)
{
- spin_lock_bh(&dev->addr_list_lock);
+ unsigned char nest_level = 0;
+
+#ifdef CONFIG_LOCKDEP
+ nest_level = dev->nested_level;
+#endif
+ local_bh_disable();
+ spin_lock_nested(&dev->addr_list_lock, nest_level);
}
static inline void netif_addr_unlock(struct net_device *dev)
@@ -3712,6 +4555,8 @@ struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
int register_netdev(struct net_device *dev);
void unregister_netdev(struct net_device *dev);
+int devm_register_netdev(struct device *dev, struct net_device *ndev);
+
/* General hardware address lists handling functions */
int __hw_addr_sync(struct netdev_hw_addr_list *to_list,
struct netdev_hw_addr_list *from_list, int addr_len);
@@ -3722,6 +4567,16 @@ int __hw_addr_sync_dev(struct netdev_hw_addr_list *list,
int (*sync)(struct net_device *, const unsigned char *),
int (*unsync)(struct net_device *,
const unsigned char *));
+int __hw_addr_ref_sync_dev(struct netdev_hw_addr_list *list,
+ struct net_device *dev,
+ int (*sync)(struct net_device *,
+ const unsigned char *, int),
+ int (*unsync)(struct net_device *,
+ const unsigned char *, int));
+void __hw_addr_ref_unsync_dev(struct netdev_hw_addr_list *list,
+ struct net_device *dev,
+ int (*unsync)(struct net_device *,
+ const unsigned char *, int));
void __hw_addr_unsync_dev(struct netdev_hw_addr_list *list,
struct net_device *dev,
int (*unsync)(struct net_device *,
@@ -3729,12 +4584,24 @@ void __hw_addr_unsync_dev(struct netdev_hw_addr_list *list,
void __hw_addr_init(struct netdev_hw_addr_list *list);
/* Functions used for device addresses handling */
+void dev_addr_mod(struct net_device *dev, unsigned int offset,
+ const void *addr, size_t len);
+
+static inline void
+__dev_addr_set(struct net_device *dev, const void *addr, size_t len)
+{
+ dev_addr_mod(dev, 0, addr, len);
+}
+
+static inline void dev_addr_set(struct net_device *dev, const u8 *addr)
+{
+ __dev_addr_set(dev, addr, dev->addr_len);
+}
+
int dev_addr_add(struct net_device *dev, const unsigned char *addr,
unsigned char addr_type);
int dev_addr_del(struct net_device *dev, const unsigned char *addr,
unsigned char addr_type);
-void dev_addr_flush(struct net_device *dev);
-int dev_addr_init(struct net_device *dev);
/* Functions used for unicast addresses handling */
int dev_uc_add(struct net_device *dev, const unsigned char *addr);
@@ -3824,10 +4691,10 @@ static inline void __dev_mc_unsync(struct net_device *dev,
/* Functions used for secondary unicast and multicast support */
void dev_set_rx_mode(struct net_device *dev);
-void __dev_set_rx_mode(struct net_device *dev);
int dev_set_promiscuity(struct net_device *dev, int inc);
int dev_set_allmulti(struct net_device *dev, int inc);
void netdev_state_change(struct net_device *dev);
+void __netdev_notify_peers(struct net_device *dev);
void netdev_notify_peers(struct net_device *dev);
void netdev_features_change(struct net_device *dev);
/* Load a device via the kmod */
@@ -3836,20 +4703,34 @@ struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
struct rtnl_link_stats64 *storage);
void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64,
const struct net_device_stats *netdev_stats);
+void dev_fetch_sw_netstats(struct rtnl_link_stats64 *s,
+ const struct pcpu_sw_netstats __percpu *netstats);
+void dev_get_tstats64(struct net_device *dev, struct rtnl_link_stats64 *s);
extern int netdev_max_backlog;
-extern int netdev_tstamp_prequeue;
-extern int weight_p;
-extern int dev_weight_rx_bias;
-extern int dev_weight_tx_bias;
extern int dev_rx_weight;
extern int dev_tx_weight;
+extern int gro_normal_batch;
+
+enum {
+ NESTED_SYNC_IMM_BIT,
+ NESTED_SYNC_TODO_BIT,
+};
+
+#define __NESTED_SYNC_BIT(bit) ((u32)1 << (bit))
+#define __NESTED_SYNC(name) __NESTED_SYNC_BIT(NESTED_SYNC_ ## name ## _BIT)
+
+#define NESTED_SYNC_IMM __NESTED_SYNC(IMM)
+#define NESTED_SYNC_TODO __NESTED_SYNC(TODO)
+
+struct netdev_nested_priv {
+ unsigned char flags;
+ void *data;
+};
bool netdev_has_upper_dev(struct net_device *dev, struct net_device *upper_dev);
struct net_device *netdev_upper_get_next_dev_rcu(struct net_device *dev,
struct list_head **iter);
-struct net_device *netdev_all_upper_get_next_dev_rcu(struct net_device *dev,
- struct list_head **iter);
/* iterate through upper list, must be called under RCU read lock */
#define netdev_for_each_upper_dev_rcu(dev, updev, iter) \
@@ -3860,12 +4741,14 @@ struct net_device *netdev_all_upper_get_next_dev_rcu(struct net_device *dev,
int netdev_walk_all_upper_dev_rcu(struct net_device *dev,
int (*fn)(struct net_device *upper_dev,
- void *data),
- void *data);
+ struct netdev_nested_priv *priv),
+ struct netdev_nested_priv *priv);
bool netdev_has_upper_dev_all_rcu(struct net_device *dev,
struct net_device *upper_dev);
+bool netdev_has_any_upper_dev(struct net_device *dev);
+
void *netdev_lower_get_next_private(struct net_device *dev,
struct list_head **iter);
void *netdev_lower_get_next_private_rcu(struct net_device *dev,
@@ -3892,30 +4775,39 @@ void *netdev_lower_get_next(struct net_device *dev,
ldev; \
ldev = netdev_lower_get_next(dev, &(iter)))
-struct net_device *netdev_all_lower_get_next(struct net_device *dev,
+struct net_device *netdev_next_lower_dev_rcu(struct net_device *dev,
struct list_head **iter);
-struct net_device *netdev_all_lower_get_next_rcu(struct net_device *dev,
- struct list_head **iter);
-
int netdev_walk_all_lower_dev(struct net_device *dev,
int (*fn)(struct net_device *lower_dev,
- void *data),
- void *data);
+ struct netdev_nested_priv *priv),
+ struct netdev_nested_priv *priv);
int netdev_walk_all_lower_dev_rcu(struct net_device *dev,
int (*fn)(struct net_device *lower_dev,
- void *data),
- void *data);
+ struct netdev_nested_priv *priv),
+ struct netdev_nested_priv *priv);
void *netdev_adjacent_get_private(struct list_head *adj_list);
void *netdev_lower_get_first_private_rcu(struct net_device *dev);
struct net_device *netdev_master_upper_dev_get(struct net_device *dev);
struct net_device *netdev_master_upper_dev_get_rcu(struct net_device *dev);
-int netdev_upper_dev_link(struct net_device *dev, struct net_device *upper_dev);
+int netdev_upper_dev_link(struct net_device *dev, struct net_device *upper_dev,
+ struct netlink_ext_ack *extack);
int netdev_master_upper_dev_link(struct net_device *dev,
struct net_device *upper_dev,
- void *upper_priv, void *upper_info);
+ void *upper_priv, void *upper_info,
+ struct netlink_ext_ack *extack);
void netdev_upper_dev_unlink(struct net_device *dev,
struct net_device *upper_dev);
+int netdev_adjacent_change_prepare(struct net_device *old_dev,
+ struct net_device *new_dev,
+ struct net_device *dev,
+ struct netlink_ext_ack *extack);
+void netdev_adjacent_change_commit(struct net_device *old_dev,
+ struct net_device *new_dev,
+ struct net_device *dev);
+void netdev_adjacent_change_abort(struct net_device *old_dev,
+ struct net_device *new_dev,
+ struct net_device *dev);
void netdev_adjacent_rename_links(struct net_device *dev, char *oldname);
void *netdev_lower_dev_get_private(struct net_device *dev,
struct net_device *lower_dev);
@@ -3927,7 +4819,6 @@ void netdev_lower_state_changed(struct net_device *lower_dev,
extern u8 netdev_rss_key[NETDEV_RSS_KEY_LEN] __read_mostly;
void netdev_rss_key_fill(void *buffer, size_t len);
-int dev_get_nest_level(struct net_device *dev);
int skb_checksum_help(struct sk_buff *skb);
int skb_crc32c_csum_help(struct sk_buff *skb);
int skb_csum_hwoffload_help(struct sk_buff *skb,
@@ -3935,6 +4826,8 @@ int skb_csum_hwoffload_help(struct sk_buff *skb,
struct sk_buff *__skb_gso_segment(struct sk_buff *skb,
netdev_features_t features, bool tx_path);
+struct sk_buff *skb_eth_gso_segment(struct sk_buff *skb,
+ netdev_features_t features, __be16 type);
struct sk_buff *skb_mac_gso_segment(struct sk_buff *skb,
netdev_features_t features);
@@ -3951,6 +4844,15 @@ struct netdev_notifier_bonding_info {
void netdev_bonding_info_change(struct net_device *dev,
struct netdev_bonding_info *bonding_info);
+#if IS_ENABLED(CONFIG_ETHTOOL_NETLINK)
+void ethtool_notify(struct net_device *dev, unsigned int cmd, const void *data);
+#else
+static inline void ethtool_notify(struct net_device *dev, unsigned int cmd,
+ const void *data)
+{
+}
+#endif
+
static inline
struct sk_buff *skb_gso_segment(struct sk_buff *skb, netdev_features_t features)
{
@@ -3982,9 +4884,10 @@ static inline bool can_checksum_protocol(netdev_features_t features,
}
#ifdef CONFIG_BUG
-void netdev_rx_csum_fault(struct net_device *dev);
+void netdev_rx_csum_fault(struct net_device *dev, struct sk_buff *skb);
#else
-static inline void netdev_rx_csum_fault(struct net_device *dev)
+static inline void netdev_rx_csum_fault(struct net_device *dev,
+ struct sk_buff *skb)
{
}
#endif
@@ -3992,25 +4895,36 @@ static inline void netdev_rx_csum_fault(struct net_device *dev)
void net_enable_timestamp(void);
void net_disable_timestamp(void);
-#ifdef CONFIG_PROC_FS
-int __init dev_proc_init(void);
-#else
-#define dev_proc_init() 0
-#endif
+static inline ktime_t netdev_get_tstamp(struct net_device *dev,
+ const struct skb_shared_hwtstamps *hwtstamps,
+ bool cycles)
+{
+ const struct net_device_ops *ops = dev->netdev_ops;
+
+ if (ops->ndo_get_tstamp)
+ return ops->ndo_get_tstamp(dev, hwtstamps, cycles);
+
+ return hwtstamps->hwtstamp;
+}
static inline netdev_tx_t __netdev_start_xmit(const struct net_device_ops *ops,
struct sk_buff *skb, struct net_device *dev,
bool more)
{
- skb->xmit_more = more ? 1 : 0;
+ __this_cpu_write(softnet_data.xmit.more, more);
return ops->ndo_start_xmit(skb, dev);
}
+static inline bool netdev_xmit_more(void)
+{
+ return __this_cpu_read(softnet_data.xmit.more);
+}
+
static inline netdev_tx_t netdev_start_xmit(struct sk_buff *skb, struct net_device *dev,
struct netdev_queue *txq, bool more)
{
const struct net_device_ops *ops = dev->netdev_ops;
- int rc;
+ netdev_tx_t rc;
rc = __netdev_start_xmit(ops, skb, dev, more);
if (rc == NETDEV_TX_OK)
@@ -4019,27 +4933,15 @@ static inline netdev_tx_t netdev_start_xmit(struct sk_buff *skb, struct net_devi
return rc;
}
-int netdev_class_create_file_ns(struct class_attribute *class_attr,
+int netdev_class_create_file_ns(const struct class_attribute *class_attr,
const void *ns);
-void netdev_class_remove_file_ns(struct class_attribute *class_attr,
+void netdev_class_remove_file_ns(const struct class_attribute *class_attr,
const void *ns);
-static inline int netdev_class_create_file(struct class_attribute *class_attr)
-{
- return netdev_class_create_file_ns(class_attr, NULL);
-}
-
-static inline void netdev_class_remove_file(struct class_attribute *class_attr)
-{
- netdev_class_remove_file_ns(class_attr, NULL);
-}
-
-extern struct kobj_ns_type_operations net_ns_type_operations;
+extern const struct kobj_ns_type_operations net_ns_type_operations;
const char *netdev_drivername(const struct net_device *dev);
-void linkwatch_run_queue(void);
-
static inline netdev_features_t netdev_intersect_features(netdev_features_t f1,
netdev_features_t f2)
{
@@ -4089,7 +4991,6 @@ static inline bool net_gso_ok(netdev_features_t features, int gso_type)
/* check flags correspondence */
BUILD_BUG_ON(SKB_GSO_TCPV4 != (NETIF_F_TSO >> NETIF_F_GSO_SHIFT));
- BUILD_BUG_ON(SKB_GSO_UDP != (NETIF_F_UFO >> NETIF_F_GSO_SHIFT));
BUILD_BUG_ON(SKB_GSO_DODGY != (NETIF_F_GSO_ROBUST >> NETIF_F_GSO_SHIFT));
BUILD_BUG_ON(SKB_GSO_TCP_ECN != (NETIF_F_TSO_ECN >> NETIF_F_GSO_SHIFT));
BUILD_BUG_ON(SKB_GSO_TCP_FIXEDID != (NETIF_F_TSO_MANGLEID >> NETIF_F_GSO_SHIFT));
@@ -4105,6 +5006,9 @@ static inline bool net_gso_ok(netdev_features_t features, int gso_type)
BUILD_BUG_ON(SKB_GSO_TUNNEL_REMCSUM != (NETIF_F_GSO_TUNNEL_REMCSUM >> NETIF_F_GSO_SHIFT));
BUILD_BUG_ON(SKB_GSO_SCTP != (NETIF_F_GSO_SCTP >> NETIF_F_GSO_SHIFT));
BUILD_BUG_ON(SKB_GSO_ESP != (NETIF_F_GSO_ESP >> NETIF_F_GSO_SHIFT));
+ BUILD_BUG_ON(SKB_GSO_UDP != (NETIF_F_GSO_UDP >> NETIF_F_GSO_SHIFT));
+ BUILD_BUG_ON(SKB_GSO_UDP_L4 != (NETIF_F_GSO_UDP_L4 >> NETIF_F_GSO_SHIFT));
+ BUILD_BUG_ON(SKB_GSO_FRAGLIST != (NETIF_F_GSO_FRAGLIST >> NETIF_F_GSO_SHIFT));
return (features & feature) == feature;
}
@@ -4123,11 +5027,10 @@ static inline bool netif_needs_gso(struct sk_buff *skb,
(skb->ip_summed != CHECKSUM_UNNECESSARY)));
}
-static inline void netif_set_gso_max_size(struct net_device *dev,
- unsigned int size)
-{
- dev->gso_max_size = size;
-}
+void netif_set_tso_max_size(struct net_device *dev, unsigned int size);
+void netif_set_tso_max_segs(struct net_device *dev, unsigned int segs);
+void netif_inherit_tso_max(struct net_device *to,
+ const struct net_device *from);
static inline void skb_gso_error_unwind(struct sk_buff *skb, __be16 protocol,
int pulled_hlen, u16 mac_offset,
@@ -4157,16 +5060,6 @@ static inline bool netif_is_macvlan_port(const struct net_device *dev)
return dev->priv_flags & IFF_MACVLAN_PORT;
}
-static inline bool netif_is_ipvlan(const struct net_device *dev)
-{
- return dev->priv_flags & IFF_IPVLAN_SLAVE;
-}
-
-static inline bool netif_is_ipvlan_port(const struct net_device *dev)
-{
- return dev->priv_flags & IFF_IPVLAN_MASTER;
-}
-
static inline bool netif_is_bond_master(const struct net_device *dev)
{
return dev->flags & IFF_MASTER && dev->priv_flags & IFF_BONDING;
@@ -4182,6 +5075,11 @@ static inline bool netif_supports_nofcs(struct net_device *dev)
return dev->priv_flags & IFF_SUPP_NOFCS;
}
+static inline bool netif_has_l3_rx_handler(const struct net_device *dev)
+{
+ return dev->priv_flags & IFF_L3MDEV_RX_HANDLER;
+}
+
static inline bool netif_is_l3_master(const struct net_device *dev)
{
return dev->priv_flags & IFF_L3MDEV_MASTER;
@@ -4212,6 +5110,11 @@ static inline bool netif_is_ovs_port(const struct net_device *dev)
return dev->priv_flags & IFF_OVS_DATAPATH;
}
+static inline bool netif_is_any_bridge_port(const struct net_device *dev)
+{
+ return netif_is_bridge_port(dev) || netif_is_ovs_port(dev);
+}
+
static inline bool netif_is_team_master(const struct net_device *dev)
{
return dev->priv_flags & IFF_TEAM;
@@ -4237,6 +5140,16 @@ static inline bool netif_is_rxfh_configured(const struct net_device *dev)
return dev->priv_flags & IFF_RXFH_CONFIGURED;
}
+static inline bool netif_is_failover(const struct net_device *dev)
+{
+ return dev->priv_flags & IFF_FAILOVER;
+}
+
+static inline bool netif_is_failover_slave(const struct net_device *dev)
+{
+ return dev->priv_flags & IFF_FAILOVER_SLAVE;
+}
+
/* This device needs to keep skb dst for qdisc enqueue or ndo_start_xmit() */
static inline void netif_keep_dst(struct net_device *dev)
{
@@ -4247,7 +5160,7 @@ static inline void netif_keep_dst(struct net_device *dev)
static inline bool netif_reduces_vlan_mtu(struct net_device *dev)
{
/* TODO: reserve and use an additional IFF bit, if we get more users */
- return dev->priv_flags & IFF_MACSEC;
+ return netif_is_macsec(dev);
}
extern struct pernet_operations __net_initdata loopback_net_ops;
@@ -4263,11 +5176,6 @@ static inline const char *netdev_name(const struct net_device *dev)
return dev->name;
}
-static inline bool netdev_unregistering(const struct net_device *dev)
-{
- return dev->reg_state == NETREG_UNREGISTERING;
-}
-
static inline const char *netdev_reg_state(const struct net_device *dev)
{
switch (dev->reg_state) {
@@ -4283,130 +5191,21 @@ static inline const char *netdev_reg_state(const struct net_device *dev)
return " (unknown)";
}
-__printf(3, 4)
-void netdev_printk(const char *level, const struct net_device *dev,
- const char *format, ...);
-__printf(2, 3)
-void netdev_emerg(const struct net_device *dev, const char *format, ...);
-__printf(2, 3)
-void netdev_alert(const struct net_device *dev, const char *format, ...);
-__printf(2, 3)
-void netdev_crit(const struct net_device *dev, const char *format, ...);
-__printf(2, 3)
-void netdev_err(const struct net_device *dev, const char *format, ...);
-__printf(2, 3)
-void netdev_warn(const struct net_device *dev, const char *format, ...);
-__printf(2, 3)
-void netdev_notice(const struct net_device *dev, const char *format, ...);
-__printf(2, 3)
-void netdev_info(const struct net_device *dev, const char *format, ...);
-
#define MODULE_ALIAS_NETDEV(device) \
MODULE_ALIAS("netdev-" device)
-#if defined(CONFIG_DYNAMIC_DEBUG)
-#define netdev_dbg(__dev, format, args...) \
-do { \
- dynamic_netdev_dbg(__dev, format, ##args); \
-} while (0)
-#elif defined(DEBUG)
-#define netdev_dbg(__dev, format, args...) \
- netdev_printk(KERN_DEBUG, __dev, format, ##args)
-#else
-#define netdev_dbg(__dev, format, args...) \
-({ \
- if (0) \
- netdev_printk(KERN_DEBUG, __dev, format, ##args); \
-})
-#endif
-
-#if defined(VERBOSE_DEBUG)
-#define netdev_vdbg netdev_dbg
-#else
-
-#define netdev_vdbg(dev, format, args...) \
-({ \
- if (0) \
- netdev_printk(KERN_DEBUG, dev, format, ##args); \
- 0; \
-})
-#endif
-
/*
* netdev_WARN() acts like dev_printk(), but with the key difference
* of using a WARN/WARN_ON to get the message out, including the
* file/line information and a backtrace.
*/
#define netdev_WARN(dev, format, args...) \
- WARN(1, "netdevice: %s%s\n" format, netdev_name(dev), \
+ WARN(1, "netdevice: %s%s: " format, netdev_name(dev), \
netdev_reg_state(dev), ##args)
-/* netif printk helpers, similar to netdev_printk */
-
-#define netif_printk(priv, type, level, dev, fmt, args...) \
-do { \
- if (netif_msg_##type(priv)) \
- netdev_printk(level, (dev), fmt, ##args); \
-} while (0)
-
-#define netif_level(level, priv, type, dev, fmt, args...) \
-do { \
- if (netif_msg_##type(priv)) \
- netdev_##level(dev, fmt, ##args); \
-} while (0)
-
-#define netif_emerg(priv, type, dev, fmt, args...) \
- netif_level(emerg, priv, type, dev, fmt, ##args)
-#define netif_alert(priv, type, dev, fmt, args...) \
- netif_level(alert, priv, type, dev, fmt, ##args)
-#define netif_crit(priv, type, dev, fmt, args...) \
- netif_level(crit, priv, type, dev, fmt, ##args)
-#define netif_err(priv, type, dev, fmt, args...) \
- netif_level(err, priv, type, dev, fmt, ##args)
-#define netif_warn(priv, type, dev, fmt, args...) \
- netif_level(warn, priv, type, dev, fmt, ##args)
-#define netif_notice(priv, type, dev, fmt, args...) \
- netif_level(notice, priv, type, dev, fmt, ##args)
-#define netif_info(priv, type, dev, fmt, args...) \
- netif_level(info, priv, type, dev, fmt, ##args)
-
-#if defined(CONFIG_DYNAMIC_DEBUG)
-#define netif_dbg(priv, type, netdev, format, args...) \
-do { \
- if (netif_msg_##type(priv)) \
- dynamic_netdev_dbg(netdev, format, ##args); \
-} while (0)
-#elif defined(DEBUG)
-#define netif_dbg(priv, type, dev, format, args...) \
- netif_printk(priv, type, KERN_DEBUG, dev, format, ##args)
-#else
-#define netif_dbg(priv, type, dev, format, args...) \
-({ \
- if (0) \
- netif_printk(priv, type, KERN_DEBUG, dev, format, ##args); \
- 0; \
-})
-#endif
-
-/* if @cond then downgrade to debug, else print at @level */
-#define netif_cond_dbg(priv, type, netdev, cond, level, fmt, args...) \
- do { \
- if (cond) \
- netif_dbg(priv, type, netdev, fmt, ##args); \
- else \
- netif_ ## level(priv, type, netdev, fmt, ##args); \
- } while (0)
-
-#if defined(VERBOSE_DEBUG)
-#define netif_vdbg netif_dbg
-#else
-#define netif_vdbg(priv, type, dev, format, args...) \
-({ \
- if (0) \
- netif_printk(priv, type, KERN_DEBUG, dev, format, ##args); \
- 0; \
-})
-#endif
+#define netdev_WARN_ONCE(dev, format, args...) \
+ WARN_ONCE(1, "netdevice: %s%s: " format, netdev_name(dev), \
+ netdev_reg_state(dev), ##args)
/*
* The list of packet types we will receive (as opposed to discard)
@@ -4415,15 +5214,7 @@ do { \
* Why 16. Because with 16 the only overlap we get on a hash of the
* low nibble of the protocol value is RARP/SNAP/X.25.
*
- * NOTE: That is no longer true with the addition of VLAN tags. Not
- * sure which should go first, but I bet it won't make much
- * difference if we are running VLANs. The good news is that
- * this protocol won't be in the list unless compiled in, so
- * the average user (w/out VLANs) will not be adversely affected.
- * --BLG
- *
* 0800 IP
- * 8100 802.1Q VLAN
* 0001 802.3
* 0002 AX.25
* 0004 802.2
@@ -4438,4 +5229,14 @@ do { \
#define PTYPE_HASH_SIZE (16)
#define PTYPE_HASH_MASK (PTYPE_HASH_SIZE - 1)
+extern struct list_head ptype_all __read_mostly;
+extern struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly;
+
+extern struct net_device *blackhole_netdev;
+
+/* Note: Avoid these macros in fast path, prefer per-cpu or per-queue counters. */
+#define DEV_STATS_INC(DEV, FIELD) atomic_long_inc(&(DEV)->stats.__##FIELD)
+#define DEV_STATS_ADD(DEV, FIELD, VAL) \
+ atomic_long_add((VAL), &(DEV)->stats.__##FIELD)
+
#endif /* _LINUX_NETDEVICE_H */
diff --git a/include/linux/netfilter.h b/include/linux/netfilter.h
index a4b97be30b28..0762444e3767 100644
--- a/include/linux/netfilter.h
+++ b/include/linux/netfilter.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __LINUX_NETFILTER_H
#define __LINUX_NETFILTER_H
@@ -12,9 +13,9 @@
#include <linux/static_key.h>
#include <linux/netfilter_defs.h>
#include <linux/netdevice.h>
+#include <linux/sockptr.h>
#include <net/net_namespace.h>
-#ifdef CONFIG_NETFILTER
static inline int NF_DROP_GETERR(int verdict)
{
return -(verdict >> NF_VERDICT_QBITS);
@@ -23,20 +24,36 @@ static inline int NF_DROP_GETERR(int verdict)
static inline int nf_inet_addr_cmp(const union nf_inet_addr *a1,
const union nf_inet_addr *a2)
{
+#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64
+ const unsigned long *ul1 = (const unsigned long *)a1;
+ const unsigned long *ul2 = (const unsigned long *)a2;
+
+ return ((ul1[0] ^ ul2[0]) | (ul1[1] ^ ul2[1])) == 0UL;
+#else
return a1->all[0] == a2->all[0] &&
a1->all[1] == a2->all[1] &&
a1->all[2] == a2->all[2] &&
a1->all[3] == a2->all[3];
+#endif
}
static inline void nf_inet_addr_mask(const union nf_inet_addr *a1,
union nf_inet_addr *result,
const union nf_inet_addr *mask)
{
+#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64
+ const unsigned long *ua = (const unsigned long *)a1;
+ unsigned long *ur = (unsigned long *)result;
+ const unsigned long *um = (const unsigned long *)mask;
+
+ ur[0] = ua[0] & um[0];
+ ur[1] = ua[1] & um[1];
+#else
result->all[0] = a1->all[0] & mask->all[0];
result->all[1] = a1->all[1] & mask->all[1];
result->all[2] = a1->all[2] & mask->all[2];
result->all[3] = a1->all[3] & mask->all[3];
+#endif
}
int netfilter_init(void);
@@ -48,8 +65,8 @@ struct nf_hook_ops;
struct sock;
struct nf_hook_state {
- unsigned int hook;
- u_int8_t pf;
+ u8 hook;
+ u8 pf;
struct net_device *in;
struct net_device *out;
struct sock *sk;
@@ -60,39 +77,63 @@ struct nf_hook_state {
typedef unsigned int nf_hookfn(void *priv,
struct sk_buff *skb,
const struct nf_hook_state *state);
-struct nf_hook_ops {
- struct list_head list;
+enum nf_hook_ops_type {
+ NF_HOOK_OP_UNDEFINED,
+ NF_HOOK_OP_NF_TABLES,
+ NF_HOOK_OP_BPF,
+};
+struct nf_hook_ops {
/* User fills in from here down. */
nf_hookfn *hook;
struct net_device *dev;
void *priv;
- u_int8_t pf;
+ u8 pf;
+ enum nf_hook_ops_type hook_ops_type:8;
unsigned int hooknum;
/* Hooks are ordered in ascending priority. */
int priority;
};
struct nf_hook_entry {
- struct nf_hook_entry __rcu *next;
nf_hookfn *hook;
void *priv;
- const struct nf_hook_ops *orig_ops;
};
-static inline void
-nf_hook_entry_init(struct nf_hook_entry *entry, const struct nf_hook_ops *ops)
-{
- entry->next = NULL;
- entry->hook = ops->hook;
- entry->priv = ops->priv;
- entry->orig_ops = ops;
-}
+struct nf_hook_entries_rcu_head {
+ struct rcu_head head;
+ void *allocation;
+};
-static inline int
-nf_hook_entry_priority(const struct nf_hook_entry *entry)
+struct nf_hook_entries {
+ u16 num_hook_entries;
+ /* padding */
+ struct nf_hook_entry hooks[];
+
+ /* trailer: pointers to original orig_ops of each hook,
+ * followed by rcu_head and scratch space used for freeing
+ * the structure via call_rcu.
+ *
+ * This is not part of struct nf_hook_entry since its only
+ * needed in slow path (hook register/unregister):
+ * const struct nf_hook_ops *orig_ops[]
+ *
+ * For the same reason, we store this at end -- its
+ * only needed when a hook is deleted, not during
+ * packet path processing:
+ * struct nf_hook_entries_rcu_head head
+ */
+};
+
+#ifdef CONFIG_NETFILTER
+static inline struct nf_hook_ops **nf_hook_entries_get_hook_ops(const struct nf_hook_entries *e)
{
- return entry->orig_ops->priority;
+ unsigned int n = e->num_hook_entries;
+ const void *hook_end;
+
+ hook_end = &e->hooks[n]; /* this is *past* ->hooks[]! */
+
+ return (struct nf_hook_ops **)hook_end;
}
static inline int
@@ -102,12 +143,6 @@ nf_hook_entry_hookfn(const struct nf_hook_entry *entry, struct sk_buff *skb,
return entry->hook(entry->priv, skb, state);
}
-static inline const struct nf_hook_ops *
-nf_hook_entry_ops(const struct nf_hook_entry *entry)
-{
- return entry->orig_ops;
-}
-
static inline void nf_hook_state_init(struct nf_hook_state *p,
unsigned int hook,
u_int8_t pf,
@@ -136,18 +171,11 @@ struct nf_sockopt_ops {
/* Non-inclusive ranges: use 0/0/NULL to never get called. */
int set_optmin;
int set_optmax;
- int (*set)(struct sock *sk, int optval, void __user *user, unsigned int len);
-#ifdef CONFIG_COMPAT
- int (*compat_set)(struct sock *sk, int optval,
- void __user *user, unsigned int len);
-#endif
+ int (*set)(struct sock *sk, int optval, sockptr_t arg,
+ unsigned int len);
int get_optmin;
int get_optmax;
int (*get)(struct sock *sk, int optval, void __user *user, int *len);
-#ifdef CONFIG_COMPAT
- int (*compat_get)(struct sock *sk, int optval,
- void __user *user, int *len);
-#endif
/* Use the module struct to lock set/get code in place */
struct module *owner;
};
@@ -160,25 +188,20 @@ int nf_register_net_hooks(struct net *net, const struct nf_hook_ops *reg,
void nf_unregister_net_hooks(struct net *net, const struct nf_hook_ops *reg,
unsigned int n);
-int nf_register_hook(struct nf_hook_ops *reg);
-void nf_unregister_hook(struct nf_hook_ops *reg);
-int nf_register_hooks(struct nf_hook_ops *reg, unsigned int n);
-void nf_unregister_hooks(struct nf_hook_ops *reg, unsigned int n);
-int _nf_register_hooks(struct nf_hook_ops *reg, unsigned int n);
-void _nf_unregister_hooks(struct nf_hook_ops *reg, unsigned int n);
-
/* Functions to register get/setsockopt ranges (non-inclusive). You
need to check permissions yourself! */
int nf_register_sockopt(struct nf_sockopt_ops *reg);
void nf_unregister_sockopt(struct nf_sockopt_ops *reg);
-#ifdef HAVE_JUMP_LABEL
+#ifdef CONFIG_JUMP_LABEL
extern struct static_key nf_hooks_needed[NFPROTO_NUMPROTO][NF_MAX_HOOKS];
#endif
int nf_hook_slow(struct sk_buff *skb, struct nf_hook_state *state,
- struct nf_hook_entry *entry);
+ const struct nf_hook_entries *e, unsigned int i);
+void nf_hook_slow_list(struct list_head *head, struct nf_hook_state *state,
+ const struct nf_hook_entries *e);
/**
* nf_hook - call a netfilter hook
*
@@ -191,10 +214,10 @@ static inline int nf_hook(u_int8_t pf, unsigned int hook, struct net *net,
struct net_device *indev, struct net_device *outdev,
int (*okfn)(struct net *, struct sock *, struct sk_buff *))
{
- struct nf_hook_entry *hook_head;
+ struct nf_hook_entries *hook_head = NULL;
int ret = 1;
-#ifdef HAVE_JUMP_LABEL
+#ifdef CONFIG_JUMP_LABEL
if (__builtin_constant_p(pf) &&
__builtin_constant_p(hook) &&
!static_key_false(&nf_hooks_needed[pf][hook]))
@@ -202,14 +225,37 @@ static inline int nf_hook(u_int8_t pf, unsigned int hook, struct net *net,
#endif
rcu_read_lock();
- hook_head = rcu_dereference(net->nf.hooks[pf][hook]);
+ switch (pf) {
+ case NFPROTO_IPV4:
+ hook_head = rcu_dereference(net->nf.hooks_ipv4[hook]);
+ break;
+ case NFPROTO_IPV6:
+ hook_head = rcu_dereference(net->nf.hooks_ipv6[hook]);
+ break;
+ case NFPROTO_ARP:
+#ifdef CONFIG_NETFILTER_FAMILY_ARP
+ if (WARN_ON_ONCE(hook >= ARRAY_SIZE(net->nf.hooks_arp)))
+ break;
+ hook_head = rcu_dereference(net->nf.hooks_arp[hook]);
+#endif
+ break;
+ case NFPROTO_BRIDGE:
+#ifdef CONFIG_NETFILTER_FAMILY_BRIDGE
+ hook_head = rcu_dereference(net->nf.hooks_bridge[hook]);
+#endif
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ break;
+ }
+
if (hook_head) {
struct nf_hook_state state;
nf_hook_state_init(&state, hook, pf, indev, outdev,
sk, net, okfn);
- ret = nf_hook_slow(skb, &state, hook_head);
+ ret = nf_hook_slow(skb, &state, hook_head, 0);
}
rcu_read_unlock();
@@ -258,98 +304,92 @@ NF_HOOK(uint8_t pf, unsigned int hook, struct net *net, struct sock *sk, struct
return ret;
}
-/* Call setsockopt() */
-int nf_setsockopt(struct sock *sk, u_int8_t pf, int optval, char __user *opt,
- unsigned int len);
-int nf_getsockopt(struct sock *sk, u_int8_t pf, int optval, char __user *opt,
- int *len);
-#ifdef CONFIG_COMPAT
-int compat_nf_setsockopt(struct sock *sk, u_int8_t pf, int optval,
- char __user *opt, unsigned int len);
-int compat_nf_getsockopt(struct sock *sk, u_int8_t pf, int optval,
- char __user *opt, int *len);
-#endif
-
-/* Call this before modifying an existing packet: ensures it is
- modifiable and linear to the point you care about (writable_len).
- Returns true or false. */
-int skb_make_writable(struct sk_buff *skb, unsigned int writable_len);
+static inline void
+NF_HOOK_LIST(uint8_t pf, unsigned int hook, struct net *net, struct sock *sk,
+ struct list_head *head, struct net_device *in, struct net_device *out,
+ int (*okfn)(struct net *, struct sock *, struct sk_buff *))
+{
+ struct nf_hook_entries *hook_head = NULL;
-struct flowi;
-struct nf_queue_entry;
+#ifdef CONFIG_JUMP_LABEL
+ if (__builtin_constant_p(pf) &&
+ __builtin_constant_p(hook) &&
+ !static_key_false(&nf_hooks_needed[pf][hook]))
+ return;
+#endif
-struct nf_afinfo {
- unsigned short family;
- __sum16 (*checksum)(struct sk_buff *skb, unsigned int hook,
- unsigned int dataoff, u_int8_t protocol);
- __sum16 (*checksum_partial)(struct sk_buff *skb,
- unsigned int hook,
- unsigned int dataoff,
- unsigned int len,
- u_int8_t protocol);
- int (*route)(struct net *net, struct dst_entry **dst,
- struct flowi *fl, bool strict);
- void (*saveroute)(const struct sk_buff *skb,
- struct nf_queue_entry *entry);
- int (*reroute)(struct net *net, struct sk_buff *skb,
- const struct nf_queue_entry *entry);
- int route_key_size;
-};
+ rcu_read_lock();
+ switch (pf) {
+ case NFPROTO_IPV4:
+ hook_head = rcu_dereference(net->nf.hooks_ipv4[hook]);
+ break;
+ case NFPROTO_IPV6:
+ hook_head = rcu_dereference(net->nf.hooks_ipv6[hook]);
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ break;
+ }
-extern const struct nf_afinfo __rcu *nf_afinfo[NFPROTO_NUMPROTO];
-static inline const struct nf_afinfo *nf_get_afinfo(unsigned short family)
-{
- return rcu_dereference(nf_afinfo[family]);
-}
+ if (hook_head) {
+ struct nf_hook_state state;
-static inline __sum16
-nf_checksum(struct sk_buff *skb, unsigned int hook, unsigned int dataoff,
- u_int8_t protocol, unsigned short family)
-{
- const struct nf_afinfo *afinfo;
- __sum16 csum = 0;
+ nf_hook_state_init(&state, hook, pf, in, out, sk, net, okfn);
- rcu_read_lock();
- afinfo = nf_get_afinfo(family);
- if (afinfo)
- csum = afinfo->checksum(skb, hook, dataoff, protocol);
+ nf_hook_slow_list(head, &state, hook_head);
+ }
rcu_read_unlock();
- return csum;
}
-static inline __sum16
-nf_checksum_partial(struct sk_buff *skb, unsigned int hook,
- unsigned int dataoff, unsigned int len,
- u_int8_t protocol, unsigned short family)
-{
- const struct nf_afinfo *afinfo;
- __sum16 csum = 0;
+/* Call setsockopt() */
+int nf_setsockopt(struct sock *sk, u_int8_t pf, int optval, sockptr_t opt,
+ unsigned int len);
+int nf_getsockopt(struct sock *sk, u_int8_t pf, int optval, char __user *opt,
+ int *len);
- rcu_read_lock();
- afinfo = nf_get_afinfo(family);
- if (afinfo)
- csum = afinfo->checksum_partial(skb, hook, dataoff, len,
- protocol);
- rcu_read_unlock();
- return csum;
-}
+struct flowi;
+struct nf_queue_entry;
-int nf_register_afinfo(const struct nf_afinfo *afinfo);
-void nf_unregister_afinfo(const struct nf_afinfo *afinfo);
+__sum16 nf_checksum(struct sk_buff *skb, unsigned int hook,
+ unsigned int dataoff, u_int8_t protocol,
+ unsigned short family);
+
+__sum16 nf_checksum_partial(struct sk_buff *skb, unsigned int hook,
+ unsigned int dataoff, unsigned int len,
+ u_int8_t protocol, unsigned short family);
+int nf_route(struct net *net, struct dst_entry **dst, struct flowi *fl,
+ bool strict, unsigned short family);
+int nf_reroute(struct sk_buff *skb, struct nf_queue_entry *entry);
#include <net/flow.h>
-extern void (*nf_nat_decode_session_hook)(struct sk_buff *, struct flowi *);
+
+struct nf_conn;
+enum nf_nat_manip_type;
+struct nlattr;
+enum ip_conntrack_dir;
+
+struct nf_nat_hook {
+ int (*parse_nat_setup)(struct nf_conn *ct, enum nf_nat_manip_type manip,
+ const struct nlattr *attr);
+ void (*decode_session)(struct sk_buff *skb, struct flowi *fl);
+ unsigned int (*manip_pkt)(struct sk_buff *skb, struct nf_conn *ct,
+ enum nf_nat_manip_type mtype,
+ enum ip_conntrack_dir dir);
+ void (*remove_nat_bysrc)(struct nf_conn *ct);
+};
+
+extern const struct nf_nat_hook __rcu *nf_nat_hook;
static inline void
nf_nat_decode_session(struct sk_buff *skb, struct flowi *fl, u_int8_t family)
{
-#ifdef CONFIG_NF_NAT_NEEDED
- void (*decodefn)(struct sk_buff *, struct flowi *);
+#if IS_ENABLED(CONFIG_NF_NAT)
+ const struct nf_nat_hook *nat_hook;
rcu_read_lock();
- decodefn = rcu_dereference(nf_nat_decode_session_hook);
- if (decodefn)
- decodefn(skb, fl);
+ nat_hook = rcu_dereference(nf_nat_hook);
+ if (nat_hook && nat_hook->decode_session)
+ nat_hook->decode_session(skb, fl);
rcu_read_unlock();
#endif
}
@@ -372,6 +412,14 @@ NF_HOOK(uint8_t pf, unsigned int hook, struct net *net, struct sock *sk,
return okfn(net, sk, skb);
}
+static inline void
+NF_HOOK_LIST(uint8_t pf, unsigned int hook, struct net *net, struct sock *sk,
+ struct list_head *head, struct net_device *in, struct net_device *out,
+ int (*okfn)(struct net *, struct sock *, struct sk_buff *))
+{
+ /* nothing to do */
+}
+
static inline int nf_hook(u_int8_t pf, unsigned int hook, struct net *net,
struct sock *sk, struct sk_buff *skb,
struct net_device *indev, struct net_device *outdev,
@@ -386,23 +434,41 @@ nf_nat_decode_session(struct sk_buff *skb, struct flowi *fl, u_int8_t family)
}
#endif /*CONFIG_NETFILTER*/
-#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
+#if IS_ENABLED(CONFIG_NF_CONNTRACK)
#include <linux/netfilter/nf_conntrack_zones_common.h>
-extern void (*ip_ct_attach)(struct sk_buff *, const struct sk_buff *) __rcu;
void nf_ct_attach(struct sk_buff *, const struct sk_buff *);
-extern void (*nf_ct_destroy)(struct nf_conntrack *) __rcu;
+void nf_ct_set_closing(struct nf_conntrack *nfct);
+struct nf_conntrack_tuple;
+bool nf_ct_get_tuple_skb(struct nf_conntrack_tuple *dst_tuple,
+ const struct sk_buff *skb);
#else
static inline void nf_ct_attach(struct sk_buff *new, struct sk_buff *skb) {}
+static inline void nf_ct_set_closing(struct nf_conntrack *nfct) {}
+struct nf_conntrack_tuple;
+static inline bool nf_ct_get_tuple_skb(struct nf_conntrack_tuple *dst_tuple,
+ const struct sk_buff *skb)
+{
+ return false;
+}
#endif
struct nf_conn;
enum ip_conntrack_info;
+
+struct nf_ct_hook {
+ int (*update)(struct net *net, struct sk_buff *skb);
+ void (*destroy)(struct nf_conntrack *);
+ bool (*get_tuple_skb)(struct nf_conntrack_tuple *,
+ const struct sk_buff *);
+ void (*attach)(struct sk_buff *nskb, const struct sk_buff *skb);
+ void (*set_closing)(struct nf_conntrack *nfct);
+};
+extern const struct nf_ct_hook __rcu *nf_ct_hook;
+
struct nlattr;
struct nfnl_ct_hook {
- struct nf_conn *(*get_ct)(const struct sk_buff *skb,
- enum ip_conntrack_info *ctinfo);
size_t (*build_size)(const struct nf_conn *ct);
int (*build)(struct sk_buff *skb, struct nf_conn *ct,
enum ip_conntrack_info ctinfo,
@@ -413,7 +479,7 @@ struct nfnl_ct_hook {
void (*seq_adjust)(struct sk_buff *skb, struct nf_conn *ct,
enum ip_conntrack_info ctinfo, s32 off);
};
-extern struct nfnl_ct_hook __rcu *nfnl_ct_hook;
+extern const struct nfnl_ct_hook __rcu *nfnl_ct_hook;
/**
* nf_skb_duplicated - TEE target has sent a packet
@@ -426,4 +492,9 @@ extern struct nfnl_ct_hook __rcu *nfnl_ct_hook;
*/
DECLARE_PER_CPU(bool, nf_skb_duplicated);
+/**
+ * Contains bitmask of ctnetlink event subscribers, if any.
+ * Can't be pernet due to NETLINK_LISTEN_ALL_NSID setsockopt flag.
+ */
+extern u8 nf_ctnetlink_has_listener;
#endif /*__LINUX_NETFILTER_H*/
diff --git a/include/linux/netfilter/ipset/ip_set.h b/include/linux/netfilter/ipset/ip_set.h
index 8e42253e5d4d..e8c350a3ade1 100644
--- a/include/linux/netfilter/ipset/ip_set.h
+++ b/include/linux/netfilter/ipset/ip_set.h
@@ -1,11 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/* Copyright (C) 2000-2002 Joakim Axelsson <gozem@linux.nu>
* Patrick Schaaf <bof@bof.de>
* Martin Josefsson <gandalf@wlug.westbo.se>
- * Copyright (C) 2003-2013 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
+ * Copyright (C) 2003-2013 Jozsef Kadlecsik <kadlec@netfilter.org>
*/
#ifndef _IP_SET_H
#define _IP_SET_H
@@ -101,7 +98,7 @@ struct ip_set_counter {
struct ip_set_comment_rcu {
struct rcu_head rcu;
- char str[0];
+ char str[];
};
struct ip_set_comment {
@@ -122,10 +119,11 @@ struct ip_set_ext {
u64 bytes;
char *comment;
u32 timeout;
+ u8 packets_op;
+ u8 bytes_op;
+ bool target;
};
-struct ip_set;
-
#define ext_timeout(e, s) \
((unsigned long *)(((void *)(e)) + (s)->offset[IPSET_EXT_ID_TIMEOUT]))
#define ext_counter(e, s) \
@@ -188,8 +186,22 @@ struct ip_set_type_variant {
/* Return true if "b" set is the same as "a"
* according to the create set parameters */
bool (*same_set)(const struct ip_set *a, const struct ip_set *b);
+ /* Region-locking is used */
+ bool region_lock;
};
+struct ip_set_region {
+ spinlock_t lock; /* Region lock */
+ size_t ext_size; /* Size of the dynamic extensions */
+ u32 elements; /* Number of elements vs timeout */
+};
+
+/* Max range where every element is added/deleted in one step */
+#define IPSET_MAX_RANGE (1<<14)
+
+/* The max revision number supported by any set type + 1 */
+#define IPSET_REVISION_MAX 9
+
/* The core set type structure */
struct ip_set_type {
struct list_head list;
@@ -207,6 +219,8 @@ struct ip_set_type {
u8 family;
/* Type revisions */
u8 revision_min, revision_max;
+ /* Revision-specific supported (create) flags */
+ u8 create_flags[IPSET_REVISION_MAX+1];
/* Set features to control swapping */
u16 features;
@@ -270,49 +284,30 @@ ip_set_ext_destroy(struct ip_set *set, void *data)
/* Check that the extension is enabled for the set and
* call it's destroy function for its extension part in data.
*/
- if (SET_WITH_COMMENT(set))
- ip_set_extensions[IPSET_EXT_ID_COMMENT].destroy(
- set, ext_comment(data, set));
-}
+ if (SET_WITH_COMMENT(set)) {
+ struct ip_set_comment *c = ext_comment(data, set);
-static inline int
-ip_set_put_flags(struct sk_buff *skb, struct ip_set *set)
-{
- u32 cadt_flags = 0;
-
- if (SET_WITH_TIMEOUT(set))
- if (unlikely(nla_put_net32(skb, IPSET_ATTR_TIMEOUT,
- htonl(set->timeout))))
- return -EMSGSIZE;
- if (SET_WITH_COUNTER(set))
- cadt_flags |= IPSET_FLAG_WITH_COUNTERS;
- if (SET_WITH_COMMENT(set))
- cadt_flags |= IPSET_FLAG_WITH_COMMENT;
- if (SET_WITH_SKBINFO(set))
- cadt_flags |= IPSET_FLAG_WITH_SKBINFO;
- if (SET_WITH_FORCEADD(set))
- cadt_flags |= IPSET_FLAG_WITH_FORCEADD;
-
- if (!cadt_flags)
- return 0;
- return nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS, htonl(cadt_flags));
+ ip_set_extensions[IPSET_EXT_ID_COMMENT].destroy(set, c);
+ }
}
+int ip_set_put_flags(struct sk_buff *skb, struct ip_set *set);
+
/* Netlink CB args */
enum {
IPSET_CB_NET = 0, /* net namespace */
+ IPSET_CB_PROTO, /* ipset protocol */
IPSET_CB_DUMP, /* dump single set/all sets */
IPSET_CB_INDEX, /* set index */
IPSET_CB_PRIVATE, /* set private data */
IPSET_CB_ARG0, /* type specific */
- IPSET_CB_ARG1,
};
/* register and unregister set references */
extern ip_set_id_t ip_set_get_byname(struct net *net,
const char *name, struct ip_set **set);
extern void ip_set_put_byindex(struct net *net, ip_set_id_t index);
-extern const char *ip_set_name_byindex(struct net *net, ip_set_id_t index);
+extern void ip_set_name_byindex(struct net *net, ip_set_id_t index, char *name);
extern ip_set_id_t ip_set_nfnl_get_byindex(struct net *net, ip_set_id_t index);
extern void ip_set_nfnl_put(struct net *net, ip_set_id_t index);
@@ -339,6 +334,10 @@ extern int ip_set_get_extensions(struct ip_set *set, struct nlattr *tb[],
struct ip_set_ext *ext);
extern int ip_set_put_extensions(struct sk_buff *skb, const struct ip_set *set,
const void *e, bool active);
+extern bool ip_set_match_extensions(struct ip_set *set,
+ const struct ip_set_ext *ext,
+ struct ip_set_ext *mext,
+ u32 flags, void *data);
static inline int
ip_set_get_hostipaddr4(struct nlattr *nla, u32 *ipaddr)
@@ -395,33 +394,30 @@ ip_set_get_h16(const struct nlattr *attr)
return ntohs(nla_get_be16(attr));
}
-#define ipset_nest_start(skb, attr) nla_nest_start(skb, attr | NLA_F_NESTED)
-#define ipset_nest_end(skb, start) nla_nest_end(skb, start)
-
static inline int nla_put_ipaddr4(struct sk_buff *skb, int type, __be32 ipaddr)
{
- struct nlattr *__nested = ipset_nest_start(skb, type);
+ struct nlattr *__nested = nla_nest_start(skb, type);
int ret;
if (!__nested)
return -EMSGSIZE;
ret = nla_put_in_addr(skb, IPSET_ATTR_IPADDR_IPV4, ipaddr);
if (!ret)
- ipset_nest_end(skb, __nested);
+ nla_nest_end(skb, __nested);
return ret;
}
static inline int nla_put_ipaddr6(struct sk_buff *skb, int type,
const struct in6_addr *ipaddrptr)
{
- struct nlattr *__nested = ipset_nest_start(skb, type);
+ struct nlattr *__nested = nla_nest_start(skb, type);
int ret;
if (!__nested)
return -EMSGSIZE;
ret = nla_put_in6_addr(skb, IPSET_ATTR_IPADDR_IPV6, ipaddrptr);
if (!ret)
- ipset_nest_end(skb, __nested);
+ nla_nest_end(skb, __nested);
return ret;
}
@@ -445,20 +441,92 @@ ip6addrptr(const struct sk_buff *skb, bool src, struct in6_addr *addr)
sizeof(*addr));
}
-/* Calculate the bytes required to store the inclusive range of a-b */
-static inline int
-bitmap_bytes(u32 a, u32 b)
+/* How often should the gc be run by default */
+#define IPSET_GC_TIME (3 * 60)
+
+/* Timeout period depending on the timeout value of the given set */
+#define IPSET_GC_PERIOD(timeout) \
+ ((timeout/3) ? min_t(u32, (timeout)/3, IPSET_GC_TIME) : 1)
+
+/* Entry is set with no timeout value */
+#define IPSET_ELEM_PERMANENT 0
+
+/* Set is defined with timeout support: timeout value may be 0 */
+#define IPSET_NO_TIMEOUT UINT_MAX
+
+/* Max timeout value, see msecs_to_jiffies() in jiffies.h */
+#define IPSET_MAX_TIMEOUT (UINT_MAX >> 1)/MSEC_PER_SEC
+
+#define ip_set_adt_opt_timeout(opt, set) \
+((opt)->ext.timeout != IPSET_NO_TIMEOUT ? (opt)->ext.timeout : (set)->timeout)
+
+static inline unsigned int
+ip_set_timeout_uget(struct nlattr *tb)
+{
+ unsigned int timeout = ip_set_get_h32(tb);
+
+ /* Normalize to fit into jiffies */
+ if (timeout > IPSET_MAX_TIMEOUT)
+ timeout = IPSET_MAX_TIMEOUT;
+
+ return timeout;
+}
+
+static inline bool
+ip_set_timeout_expired(const unsigned long *t)
+{
+ return *t != IPSET_ELEM_PERMANENT && time_is_before_jiffies(*t);
+}
+
+static inline void
+ip_set_timeout_set(unsigned long *timeout, u32 value)
+{
+ unsigned long t;
+
+ if (!value) {
+ *timeout = IPSET_ELEM_PERMANENT;
+ return;
+ }
+
+ t = msecs_to_jiffies(value * MSEC_PER_SEC) + jiffies;
+ if (t == IPSET_ELEM_PERMANENT)
+ /* Bingo! :-) */
+ t--;
+ *timeout = t;
+}
+
+void ip_set_init_comment(struct ip_set *set, struct ip_set_comment *comment,
+ const struct ip_set_ext *ext);
+
+static inline void
+ip_set_init_counter(struct ip_set_counter *counter,
+ const struct ip_set_ext *ext)
+{
+ if (ext->bytes != ULLONG_MAX)
+ atomic64_set(&(counter)->bytes, (long long)(ext->bytes));
+ if (ext->packets != ULLONG_MAX)
+ atomic64_set(&(counter)->packets, (long long)(ext->packets));
+}
+
+static inline void
+ip_set_init_skbinfo(struct ip_set_skbinfo *skbinfo,
+ const struct ip_set_ext *ext)
{
- return 4 * ((((b - a + 8) / 8) + 3) / 4);
+ *skbinfo = ext->skbinfo;
}
-#include <linux/netfilter/ipset/ip_set_timeout.h>
-#include <linux/netfilter/ipset/ip_set_comment.h>
-#include <linux/netfilter/ipset/ip_set_counter.h>
-#include <linux/netfilter/ipset/ip_set_skbinfo.h>
+static inline void
+nf_inet_addr_mask_inplace(union nf_inet_addr *a1,
+ const union nf_inet_addr *mask)
+{
+ a1->all[0] &= mask->all[0];
+ a1->all[1] &= mask->all[1];
+ a1->all[2] &= mask->all[2];
+ a1->all[3] &= mask->all[3];
+}
#define IP_SET_INIT_KEXT(skb, opt, set) \
- { .bytes = (skb)->len, .packets = 1, \
+ { .bytes = (skb)->len, .packets = 1, .target = true,\
.timeout = ip_set_adt_opt_timeout(opt, set) }
#define IP_SET_INIT_UEXT(set) \
diff --git a/include/linux/netfilter/ipset/ip_set_bitmap.h b/include/linux/netfilter/ipset/ip_set_bitmap.h
index 366d6c0ea04f..fcc4d214a788 100644
--- a/include/linux/netfilter/ipset/ip_set_bitmap.h
+++ b/include/linux/netfilter/ipset/ip_set_bitmap.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __IP_SET_BITMAP_H
#define __IP_SET_BITMAP_H
@@ -11,18 +12,4 @@ enum {
IPSET_ADD_START_STORED_TIMEOUT,
};
-/* Common functions */
-
-static inline u32
-range_to_mask(u32 from, u32 to, u8 *bits)
-{
- u32 mask = 0xFFFFFFFE;
-
- *bits = 32;
- while (--(*bits) > 0 && mask && (to & mask) != from)
- mask <<= 1;
-
- return mask;
-}
-
#endif /* __IP_SET_BITMAP_H */
diff --git a/include/linux/netfilter/ipset/ip_set_comment.h b/include/linux/netfilter/ipset/ip_set_comment.h
deleted file mode 100644
index 8e2bab1e8e90..000000000000
--- a/include/linux/netfilter/ipset/ip_set_comment.h
+++ /dev/null
@@ -1,76 +0,0 @@
-#ifndef _IP_SET_COMMENT_H
-#define _IP_SET_COMMENT_H
-
-/* Copyright (C) 2013 Oliver Smith <oliver@8.c.9.b.0.7.4.0.1.0.0.2.ip6.arpa>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#ifdef __KERNEL__
-
-static inline char*
-ip_set_comment_uget(struct nlattr *tb)
-{
- return nla_data(tb);
-}
-
-/* Called from uadd only, protected by the set spinlock.
- * The kadt functions don't use the comment extensions in any way.
- */
-static inline void
-ip_set_init_comment(struct ip_set *set, struct ip_set_comment *comment,
- const struct ip_set_ext *ext)
-{
- struct ip_set_comment_rcu *c = rcu_dereference_protected(comment->c, 1);
- size_t len = ext->comment ? strlen(ext->comment) : 0;
-
- if (unlikely(c)) {
- set->ext_size -= sizeof(*c) + strlen(c->str) + 1;
- kfree_rcu(c, rcu);
- rcu_assign_pointer(comment->c, NULL);
- }
- if (!len)
- return;
- if (unlikely(len > IPSET_MAX_COMMENT_SIZE))
- len = IPSET_MAX_COMMENT_SIZE;
- c = kmalloc(sizeof(*c) + len + 1, GFP_ATOMIC);
- if (unlikely(!c))
- return;
- strlcpy(c->str, ext->comment, len + 1);
- set->ext_size += sizeof(*c) + strlen(c->str) + 1;
- rcu_assign_pointer(comment->c, c);
-}
-
-/* Used only when dumping a set, protected by rcu_read_lock_bh() */
-static inline int
-ip_set_put_comment(struct sk_buff *skb, const struct ip_set_comment *comment)
-{
- struct ip_set_comment_rcu *c = rcu_dereference_bh(comment->c);
-
- if (!c)
- return 0;
- return nla_put_string(skb, IPSET_ATTR_COMMENT, c->str);
-}
-
-/* Called from uadd/udel, flush or the garbage collectors protected
- * by the set spinlock.
- * Called when the set is destroyed and when there can't be any user
- * of the set data anymore.
- */
-static inline void
-ip_set_comment_free(struct ip_set *set, struct ip_set_comment *comment)
-{
- struct ip_set_comment_rcu *c;
-
- c = rcu_dereference_protected(comment->c, 1);
- if (unlikely(!c))
- return;
- set->ext_size -= sizeof(*c) + strlen(c->str) + 1;
- kfree_rcu(c, rcu);
- rcu_assign_pointer(comment->c, NULL);
-}
-
-#endif
-#endif
diff --git a/include/linux/netfilter/ipset/ip_set_counter.h b/include/linux/netfilter/ipset/ip_set_counter.h
deleted file mode 100644
index bb6fba480118..000000000000
--- a/include/linux/netfilter/ipset/ip_set_counter.h
+++ /dev/null
@@ -1,75 +0,0 @@
-#ifndef _IP_SET_COUNTER_H
-#define _IP_SET_COUNTER_H
-
-/* Copyright (C) 2015 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#ifdef __KERNEL__
-
-static inline void
-ip_set_add_bytes(u64 bytes, struct ip_set_counter *counter)
-{
- atomic64_add((long long)bytes, &(counter)->bytes);
-}
-
-static inline void
-ip_set_add_packets(u64 packets, struct ip_set_counter *counter)
-{
- atomic64_add((long long)packets, &(counter)->packets);
-}
-
-static inline u64
-ip_set_get_bytes(const struct ip_set_counter *counter)
-{
- return (u64)atomic64_read(&(counter)->bytes);
-}
-
-static inline u64
-ip_set_get_packets(const struct ip_set_counter *counter)
-{
- return (u64)atomic64_read(&(counter)->packets);
-}
-
-static inline void
-ip_set_update_counter(struct ip_set_counter *counter,
- const struct ip_set_ext *ext,
- struct ip_set_ext *mext, u32 flags)
-{
- if (ext->packets != ULLONG_MAX &&
- !(flags & IPSET_FLAG_SKIP_COUNTER_UPDATE)) {
- ip_set_add_bytes(ext->bytes, counter);
- ip_set_add_packets(ext->packets, counter);
- }
- if (flags & IPSET_FLAG_MATCH_COUNTERS) {
- mext->packets = ip_set_get_packets(counter);
- mext->bytes = ip_set_get_bytes(counter);
- }
-}
-
-static inline bool
-ip_set_put_counter(struct sk_buff *skb, const struct ip_set_counter *counter)
-{
- return nla_put_net64(skb, IPSET_ATTR_BYTES,
- cpu_to_be64(ip_set_get_bytes(counter)),
- IPSET_ATTR_PAD) ||
- nla_put_net64(skb, IPSET_ATTR_PACKETS,
- cpu_to_be64(ip_set_get_packets(counter)),
- IPSET_ATTR_PAD);
-}
-
-static inline void
-ip_set_init_counter(struct ip_set_counter *counter,
- const struct ip_set_ext *ext)
-{
- if (ext->bytes != ULLONG_MAX)
- atomic64_set(&(counter)->bytes, (long long)(ext->bytes));
- if (ext->packets != ULLONG_MAX)
- atomic64_set(&(counter)->packets, (long long)(ext->packets));
-}
-
-#endif /* __KERNEL__ */
-#endif /* _IP_SET_COUNTER_H */
diff --git a/include/linux/netfilter/ipset/ip_set_getport.h b/include/linux/netfilter/ipset/ip_set_getport.h
index 90d09300e954..1ecaabd9a048 100644
--- a/include/linux/netfilter/ipset/ip_set_getport.h
+++ b/include/linux/netfilter/ipset/ip_set_getport.h
@@ -1,10 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _IP_SET_GETPORT_H
#define _IP_SET_GETPORT_H
+#include <linux/skbuff.h>
+#include <linux/types.h>
+#include <uapi/linux/in.h>
+
extern bool ip_set_get_ip4_port(const struct sk_buff *skb, bool src,
__be16 *port, u8 *proto);
-#if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE)
+#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
extern bool ip_set_get_ip6_port(const struct sk_buff *skb, bool src,
__be16 *port, u8 *proto);
#else
@@ -15,9 +20,6 @@ static inline bool ip_set_get_ip6_port(const struct sk_buff *skb, bool src,
}
#endif
-extern bool ip_set_get_ip_port(const struct sk_buff *skb, u8 pf, bool src,
- __be16 *port);
-
static inline bool ip_set_proto_with_ports(u8 proto)
{
switch (proto) {
diff --git a/include/linux/netfilter/ipset/ip_set_hash.h b/include/linux/netfilter/ipset/ip_set_hash.h
index f98ddfb094cb..838abab672af 100644
--- a/include/linux/netfilter/ipset/ip_set_hash.h
+++ b/include/linux/netfilter/ipset/ip_set_hash.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __IP_SET_HASH_H
#define __IP_SET_HASH_H
diff --git a/include/linux/netfilter/ipset/ip_set_list.h b/include/linux/netfilter/ipset/ip_set_list.h
index fe2622a00151..a61fe2a7e655 100644
--- a/include/linux/netfilter/ipset/ip_set_list.h
+++ b/include/linux/netfilter/ipset/ip_set_list.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __IP_SET_LIST_H
#define __IP_SET_LIST_H
diff --git a/include/linux/netfilter/ipset/ip_set_skbinfo.h b/include/linux/netfilter/ipset/ip_set_skbinfo.h
deleted file mode 100644
index 29d7ef2bc3fa..000000000000
--- a/include/linux/netfilter/ipset/ip_set_skbinfo.h
+++ /dev/null
@@ -1,46 +0,0 @@
-#ifndef _IP_SET_SKBINFO_H
-#define _IP_SET_SKBINFO_H
-
-/* Copyright (C) 2015 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#ifdef __KERNEL__
-
-static inline void
-ip_set_get_skbinfo(struct ip_set_skbinfo *skbinfo,
- const struct ip_set_ext *ext,
- struct ip_set_ext *mext, u32 flags)
-{
- mext->skbinfo = *skbinfo;
-}
-
-static inline bool
-ip_set_put_skbinfo(struct sk_buff *skb, const struct ip_set_skbinfo *skbinfo)
-{
- /* Send nonzero parameters only */
- return ((skbinfo->skbmark || skbinfo->skbmarkmask) &&
- nla_put_net64(skb, IPSET_ATTR_SKBMARK,
- cpu_to_be64((u64)skbinfo->skbmark << 32 |
- skbinfo->skbmarkmask),
- IPSET_ATTR_PAD)) ||
- (skbinfo->skbprio &&
- nla_put_net32(skb, IPSET_ATTR_SKBPRIO,
- cpu_to_be32(skbinfo->skbprio))) ||
- (skbinfo->skbqueue &&
- nla_put_net16(skb, IPSET_ATTR_SKBQUEUE,
- cpu_to_be16(skbinfo->skbqueue)));
-}
-
-static inline void
-ip_set_init_skbinfo(struct ip_set_skbinfo *skbinfo,
- const struct ip_set_ext *ext)
-{
- *skbinfo = ext->skbinfo;
-}
-
-#endif /* __KERNEL__ */
-#endif /* _IP_SET_SKBINFO_H */
diff --git a/include/linux/netfilter/ipset/ip_set_timeout.h b/include/linux/netfilter/ipset/ip_set_timeout.h
deleted file mode 100644
index bfb3531fd88a..000000000000
--- a/include/linux/netfilter/ipset/ip_set_timeout.h
+++ /dev/null
@@ -1,73 +0,0 @@
-#ifndef _IP_SET_TIMEOUT_H
-#define _IP_SET_TIMEOUT_H
-
-/* Copyright (C) 2003-2013 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#ifdef __KERNEL__
-
-/* How often should the gc be run by default */
-#define IPSET_GC_TIME (3 * 60)
-
-/* Timeout period depending on the timeout value of the given set */
-#define IPSET_GC_PERIOD(timeout) \
- ((timeout/3) ? min_t(u32, (timeout)/3, IPSET_GC_TIME) : 1)
-
-/* Entry is set with no timeout value */
-#define IPSET_ELEM_PERMANENT 0
-
-/* Set is defined with timeout support: timeout value may be 0 */
-#define IPSET_NO_TIMEOUT UINT_MAX
-
-#define ip_set_adt_opt_timeout(opt, set) \
-((opt)->ext.timeout != IPSET_NO_TIMEOUT ? (opt)->ext.timeout : (set)->timeout)
-
-static inline unsigned int
-ip_set_timeout_uget(struct nlattr *tb)
-{
- unsigned int timeout = ip_set_get_h32(tb);
-
- /* Normalize to fit into jiffies */
- if (timeout > UINT_MAX/MSEC_PER_SEC)
- timeout = UINT_MAX/MSEC_PER_SEC;
-
- /* Userspace supplied TIMEOUT parameter: adjust crazy size */
- return timeout == IPSET_NO_TIMEOUT ? IPSET_NO_TIMEOUT - 1 : timeout;
-}
-
-static inline bool
-ip_set_timeout_expired(const unsigned long *t)
-{
- return *t != IPSET_ELEM_PERMANENT && time_is_before_jiffies(*t);
-}
-
-static inline void
-ip_set_timeout_set(unsigned long *timeout, u32 value)
-{
- unsigned long t;
-
- if (!value) {
- *timeout = IPSET_ELEM_PERMANENT;
- return;
- }
-
- t = msecs_to_jiffies(value * MSEC_PER_SEC) + jiffies;
- if (t == IPSET_ELEM_PERMANENT)
- /* Bingo! :-) */
- t--;
- *timeout = t;
-}
-
-static inline u32
-ip_set_timeout_get(const unsigned long *timeout)
-{
- return *timeout == IPSET_ELEM_PERMANENT ? 0 :
- jiffies_to_msecs(*timeout - jiffies)/MSEC_PER_SEC;
-}
-
-#endif /* __KERNEL__ */
-#endif /* _IP_SET_TIMEOUT_H */
diff --git a/include/linux/netfilter/ipset/pfxlen.h b/include/linux/netfilter/ipset/pfxlen.h
index 1afbb94b4b65..f59094e6158b 100644
--- a/include/linux/netfilter/ipset/pfxlen.h
+++ b/include/linux/netfilter/ipset/pfxlen.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _PFXLEN_H
#define _PFXLEN_H
diff --git a/include/linux/netfilter/nf_conntrack_amanda.h b/include/linux/netfilter/nf_conntrack_amanda.h
index 4b59a1584959..6f0ac896fcc9 100644
--- a/include/linux/netfilter/nf_conntrack_amanda.h
+++ b/include/linux/netfilter/nf_conntrack_amanda.h
@@ -1,7 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _NF_CONNTRACK_AMANDA_H
#define _NF_CONNTRACK_AMANDA_H
/* AMANDA tracking. */
+#include <linux/netfilter.h>
+#include <linux/skbuff.h>
+#include <net/netfilter/nf_conntrack_expect.h>
+
extern unsigned int (*nf_nat_amanda_hook)(struct sk_buff *skb,
enum ip_conntrack_info ctinfo,
unsigned int protoff,
diff --git a/include/linux/netfilter/nf_conntrack_common.h b/include/linux/netfilter/nf_conntrack_common.h
index 1d1ef4e20512..2770db2fa080 100644
--- a/include/linux/netfilter/nf_conntrack_common.h
+++ b/include/linux/netfilter/nf_conntrack_common.h
@@ -1,14 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _NF_CONNTRACK_COMMON_H
#define _NF_CONNTRACK_COMMON_H
+#include <linux/refcount.h>
#include <uapi/linux/netfilter/nf_conntrack_common.h>
struct ip_conntrack_stat {
unsigned int found;
unsigned int invalid;
- unsigned int ignore;
unsigned int insert;
unsigned int insert_failed;
+ unsigned int clash_resolve;
unsigned int drop;
unsigned int early_drop;
unsigned int error;
@@ -16,9 +18,28 @@ struct ip_conntrack_stat {
unsigned int expect_create;
unsigned int expect_delete;
unsigned int search_restart;
+ unsigned int chaintoolong;
};
-/* call to create an explicit dependency on nf_conntrack. */
-void need_conntrack(void);
+#define NFCT_INFOMASK 7UL
+#define NFCT_PTRMASK ~(NFCT_INFOMASK)
+
+struct nf_conntrack {
+ refcount_t use;
+};
+
+void nf_conntrack_destroy(struct nf_conntrack *nfct);
+
+/* like nf_ct_put, but without module dependency on nf_conntrack */
+static inline void nf_conntrack_put(struct nf_conntrack *nfct)
+{
+ if (nfct && refcount_dec_and_test(&nfct->use))
+ nf_conntrack_destroy(nfct);
+}
+static inline void nf_conntrack_get(struct nf_conntrack *nfct)
+{
+ if (nfct)
+ refcount_inc(&nfct->use);
+}
#endif /* _NF_CONNTRACK_COMMON_H */
diff --git a/include/linux/netfilter/nf_conntrack_dccp.h b/include/linux/netfilter/nf_conntrack_dccp.h
index ff721d7325cf..c509ed76e714 100644
--- a/include/linux/netfilter/nf_conntrack_dccp.h
+++ b/include/linux/netfilter/nf_conntrack_dccp.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _NF_CONNTRACK_DCCP_H
#define _NF_CONNTRACK_DCCP_H
@@ -24,7 +25,6 @@ enum ct_dccp_roles {
};
#define CT_DCCP_ROLE_MAX (__CT_DCCP_ROLE_MAX - 1)
-#ifdef __KERNEL__
#include <linux/netfilter/nf_conntrack_tuple_common.h>
struct nf_ct_dccp {
@@ -35,6 +35,4 @@ struct nf_ct_dccp {
u_int64_t handshake_seq;
};
-#endif /* __KERNEL__ */
-
#endif /* _NF_CONNTRACK_DCCP_H */
diff --git a/include/linux/netfilter/nf_conntrack_ftp.h b/include/linux/netfilter/nf_conntrack_ftp.h
index 5f818b01e035..0e38302820b9 100644
--- a/include/linux/netfilter/nf_conntrack_ftp.h
+++ b/include/linux/netfilter/nf_conntrack_ftp.h
@@ -1,8 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _NF_CONNTRACK_FTP_H
#define _NF_CONNTRACK_FTP_H
+#include <linux/netfilter.h>
+#include <linux/skbuff.h>
+#include <linux/types.h>
+#include <net/netfilter/nf_conntrack_expect.h>
#include <uapi/linux/netfilter/nf_conntrack_ftp.h>
-
+#include <uapi/linux/netfilter/nf_conntrack_tuple_common.h>
#define FTP_PORT 21
@@ -19,8 +24,6 @@ struct nf_ct_ftp_master {
u_int16_t flags[IP_CT_DIR_MAX];
};
-struct nf_conntrack_expect;
-
/* For NAT to hook in when we find a packet which describes what other
* connection we should expect. */
extern unsigned int (*nf_nat_ftp_hook)(struct sk_buff *skb,
diff --git a/include/linux/netfilter/nf_conntrack_h323.h b/include/linux/netfilter/nf_conntrack_h323.h
index 858d9b214053..9e937f64a1ad 100644
--- a/include/linux/netfilter/nf_conntrack_h323.h
+++ b/include/linux/netfilter/nf_conntrack_h323.h
@@ -1,9 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _NF_CONNTRACK_H323_H
#define _NF_CONNTRACK_H323_H
-#ifdef __KERNEL__
-
+#include <linux/netfilter.h>
+#include <linux/skbuff.h>
+#include <linux/types.h>
#include <linux/netfilter/nf_conntrack_h323_asn1.h>
+#include <net/netfilter/nf_conntrack_expect.h>
+#include <uapi/linux/netfilter/nf_conntrack_tuple_common.h>
#define RAS_PORT 1719
#define Q931_PORT 1720
@@ -27,8 +31,6 @@ struct nf_ct_h323_master {
};
};
-struct nf_conn;
-
int get_h225_addr(struct nf_conn *ct, unsigned char *data,
TransportAddress *taddr, union nf_inet_addr *addr,
__be16 *port);
@@ -36,62 +38,63 @@ void nf_conntrack_h245_expect(struct nf_conn *new,
struct nf_conntrack_expect *this);
void nf_conntrack_q931_expect(struct nf_conn *new,
struct nf_conntrack_expect *this);
-extern int (*set_h245_addr_hook) (struct sk_buff *skb, unsigned int protoff,
- unsigned char **data, int dataoff,
- H245_TransportAddress *taddr,
- union nf_inet_addr *addr,
- __be16 port);
-extern int (*set_h225_addr_hook) (struct sk_buff *skb, unsigned int protoff,
- unsigned char **data, int dataoff,
- TransportAddress *taddr,
- union nf_inet_addr *addr,
- __be16 port);
-extern int (*set_sig_addr_hook) (struct sk_buff *skb,
- struct nf_conn *ct,
- enum ip_conntrack_info ctinfo,
- unsigned int protoff, unsigned char **data,
- TransportAddress *taddr, int count);
-extern int (*set_ras_addr_hook) (struct sk_buff *skb,
- struct nf_conn *ct,
- enum ip_conntrack_info ctinfo,
- unsigned int protoff, unsigned char **data,
- TransportAddress *taddr, int count);
-extern int (*nat_rtp_rtcp_hook) (struct sk_buff *skb,
- struct nf_conn *ct,
- enum ip_conntrack_info ctinfo,
- unsigned int protoff, unsigned char **data,
- int dataoff,
- H245_TransportAddress *taddr,
- __be16 port, __be16 rtp_port,
- struct nf_conntrack_expect *rtp_exp,
- struct nf_conntrack_expect *rtcp_exp);
-extern int (*nat_t120_hook) (struct sk_buff *skb, struct nf_conn *ct,
- enum ip_conntrack_info ctinfo,
- unsigned int protoff,
+
+struct nfct_h323_nat_hooks {
+ int (*set_h245_addr)(struct sk_buff *skb, unsigned int protoff,
unsigned char **data, int dataoff,
- H245_TransportAddress *taddr, __be16 port,
- struct nf_conntrack_expect *exp);
-extern int (*nat_h245_hook) (struct sk_buff *skb, struct nf_conn *ct,
- enum ip_conntrack_info ctinfo,
- unsigned int protoff,
+ H245_TransportAddress *taddr,
+ union nf_inet_addr *addr, __be16 port);
+ int (*set_h225_addr)(struct sk_buff *skb, unsigned int protoff,
unsigned char **data, int dataoff,
- TransportAddress *taddr, __be16 port,
- struct nf_conntrack_expect *exp);
-extern int (*nat_callforwarding_hook) (struct sk_buff *skb,
- struct nf_conn *ct,
- enum ip_conntrack_info ctinfo,
- unsigned int protoff,
- unsigned char **data, int dataoff,
- TransportAddress *taddr,
- __be16 port,
- struct nf_conntrack_expect *exp);
-extern int (*nat_q931_hook) (struct sk_buff *skb, struct nf_conn *ct,
- enum ip_conntrack_info ctinfo,
- unsigned int protoff,
- unsigned char **data, TransportAddress *taddr,
- int idx, __be16 port,
- struct nf_conntrack_expect *exp);
-
-#endif
+ TransportAddress *taddr,
+ union nf_inet_addr *addr, __be16 port);
+ int (*set_sig_addr)(struct sk_buff *skb,
+ struct nf_conn *ct,
+ enum ip_conntrack_info ctinfo,
+ unsigned int protoff, unsigned char **data,
+ TransportAddress *taddr, int count);
+ int (*set_ras_addr)(struct sk_buff *skb,
+ struct nf_conn *ct,
+ enum ip_conntrack_info ctinfo,
+ unsigned int protoff, unsigned char **data,
+ TransportAddress *taddr, int count);
+ int (*nat_rtp_rtcp)(struct sk_buff *skb,
+ struct nf_conn *ct,
+ enum ip_conntrack_info ctinfo,
+ unsigned int protoff,
+ unsigned char **data, int dataoff,
+ H245_TransportAddress *taddr,
+ __be16 port, __be16 rtp_port,
+ struct nf_conntrack_expect *rtp_exp,
+ struct nf_conntrack_expect *rtcp_exp);
+ int (*nat_t120)(struct sk_buff *skb,
+ struct nf_conn *ct,
+ enum ip_conntrack_info ctinfo,
+ unsigned int protoff,
+ unsigned char **data, int dataoff,
+ H245_TransportAddress *taddr, __be16 port,
+ struct nf_conntrack_expect *exp);
+ int (*nat_h245)(struct sk_buff *skb,
+ struct nf_conn *ct,
+ enum ip_conntrack_info ctinfo,
+ unsigned int protoff,
+ unsigned char **data, int dataoff,
+ TransportAddress *taddr, __be16 port,
+ struct nf_conntrack_expect *exp);
+ int (*nat_callforwarding)(struct sk_buff *skb,
+ struct nf_conn *ct,
+ enum ip_conntrack_info ctinfo,
+ unsigned int protoff,
+ unsigned char **data, int dataoff,
+ TransportAddress *taddr, __be16 port,
+ struct nf_conntrack_expect *exp);
+ int (*nat_q931)(struct sk_buff *skb,
+ struct nf_conn *ct,
+ enum ip_conntrack_info ctinfo,
+ unsigned int protoff,
+ unsigned char **data, TransportAddress *taddr, int idx,
+ __be16 port, struct nf_conntrack_expect *exp);
+};
+extern const struct nfct_h323_nat_hooks __rcu *nfct_h323_nat_hook;
#endif
diff --git a/include/linux/netfilter/nf_conntrack_h323_asn1.h b/include/linux/netfilter/nf_conntrack_h323_asn1.h
index 3176a277eed1..bd6797f823b2 100644
--- a/include/linux/netfilter/nf_conntrack_h323_asn1.h
+++ b/include/linux/netfilter/nf_conntrack_h323_asn1.h
@@ -1,12 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/****************************************************************************
- * ip_conntrack_h323_asn1.h - BER and PER decoding library for H.323
- * conntrack/NAT module.
+ * BER and PER decoding library for H.323 conntrack/NAT module.
*
* Copyright (c) 2006 by Jing Min Zhao <zhaojingmin@users.sourceforge.net>
*
- * This source code is licensed under General Public License version 2.
- *
- *
* This library is based on H.225 version 4, H.235 version 2 and H.245
* version 7. It is extremely optimized to decode only the absolutely
* necessary objects in a signal for Linux kernel NAT module use, so don't
@@ -40,6 +37,8 @@
/*****************************************************************************
* H.323 Types
****************************************************************************/
+
+#include <linux/types.h>
#include <linux/netfilter/nf_conntrack_h323_types.h>
typedef struct {
diff --git a/include/linux/netfilter/nf_conntrack_h323_types.h b/include/linux/netfilter/nf_conntrack_h323_types.h
index b0821f45fbe4..74c6f9241944 100644
--- a/include/linux/netfilter/nf_conntrack_h323_types.h
+++ b/include/linux/netfilter/nf_conntrack_h323_types.h
@@ -1,10 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/* Generated by Jing Min Zhao's ASN.1 parser, May 16 2007
*
* Copyright (c) 2006 Jing Min Zhao <zhaojingmin@users.sourceforge.net>
- *
- * This source code is licensed under General Public License version 2.
*/
+#ifndef _NF_CONNTRACK_H323_TYPES_H
+#define _NF_CONNTRACK_H323_TYPES_H
+
typedef struct TransportAddress_ipAddress { /* SEQUENCE */
int options; /* No use */
unsigned int ip;
@@ -932,3 +934,5 @@ typedef struct RasMessage { /* CHOICE */
InfoRequestResponse infoRequestResponse;
};
} RasMessage;
+
+#endif /* _NF_CONNTRACK_H323_TYPES_H */
diff --git a/include/linux/netfilter/nf_conntrack_irc.h b/include/linux/netfilter/nf_conntrack_irc.h
index 4bb9bae67176..d02255f721e1 100644
--- a/include/linux/netfilter/nf_conntrack_irc.h
+++ b/include/linux/netfilter/nf_conntrack_irc.h
@@ -1,7 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _NF_CONNTRACK_IRC_H
#define _NF_CONNTRACK_IRC_H
-#ifdef __KERNEL__
+#include <linux/netfilter.h>
+#include <linux/skbuff.h>
+#include <net/netfilter/nf_conntrack_expect.h>
#define IRC_PORT 6667
@@ -12,5 +15,4 @@ extern unsigned int (*nf_nat_irc_hook)(struct sk_buff *skb,
unsigned int matchlen,
struct nf_conntrack_expect *exp);
-#endif /* __KERNEL__ */
#endif /* _NF_CONNTRACK_IRC_H */
diff --git a/include/linux/netfilter/nf_conntrack_pptp.h b/include/linux/netfilter/nf_conntrack_pptp.h
index 2ab2830316b7..c3bdb4370938 100644
--- a/include/linux/netfilter/nf_conntrack_pptp.h
+++ b/include/linux/netfilter/nf_conntrack_pptp.h
@@ -1,10 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/* PPTP constants and structs */
#ifndef _NF_CONNTRACK_PPTP_H
#define _NF_CONNTRACK_PPTP_H
+#include <linux/netfilter.h>
+#include <linux/skbuff.h>
+#include <linux/types.h>
#include <linux/netfilter/nf_conntrack_common.h>
+#include <net/netfilter/nf_conntrack_expect.h>
+#include <uapi/linux/netfilter/nf_conntrack_tuple_common.h>
-extern const char *const pptp_msg_name[];
+const char *pptp_msg_name(u_int16_t msg);
/* state of the control session */
enum pptp_ctrlsess_state {
@@ -44,8 +50,6 @@ struct nf_nat_pptp {
__be16 pac_call_id; /* NAT'ed PAC call id */
};
-#ifdef __KERNEL__
-
#define PPTP_CONTROL_PORT 1723
#define PPTP_PACKET_CONTROL 1
@@ -296,31 +300,22 @@ union pptp_ctrl_union {
struct PptpSetLinkInfo setlink;
};
-/* crap needed for nf_conntrack_compat.h */
-struct nf_conn;
-struct nf_conntrack_expect;
-
-extern int
-(*nf_nat_pptp_hook_outbound)(struct sk_buff *skb,
- struct nf_conn *ct, enum ip_conntrack_info ctinfo,
- unsigned int protoff,
- struct PptpControlHeader *ctlh,
- union pptp_ctrl_union *pptpReq);
-
-extern int
-(*nf_nat_pptp_hook_inbound)(struct sk_buff *skb,
- struct nf_conn *ct, enum ip_conntrack_info ctinfo,
- unsigned int protoff,
- struct PptpControlHeader *ctlh,
- union pptp_ctrl_union *pptpReq);
-
-extern void
-(*nf_nat_pptp_hook_exp_gre)(struct nf_conntrack_expect *exp_orig,
- struct nf_conntrack_expect *exp_reply);
-
-extern void
-(*nf_nat_pptp_hook_expectfn)(struct nf_conn *ct,
- struct nf_conntrack_expect *exp);
-
-#endif /* __KERNEL__ */
+struct nf_nat_pptp_hook {
+ int (*outbound)(struct sk_buff *skb,
+ struct nf_conn *ct, enum ip_conntrack_info ctinfo,
+ unsigned int protoff,
+ struct PptpControlHeader *ctlh,
+ union pptp_ctrl_union *pptpReq);
+ int (*inbound)(struct sk_buff *skb,
+ struct nf_conn *ct, enum ip_conntrack_info ctinfo,
+ unsigned int protoff,
+ struct PptpControlHeader *ctlh,
+ union pptp_ctrl_union *pptpReq);
+ void (*exp_gre)(struct nf_conntrack_expect *exp_orig,
+ struct nf_conntrack_expect *exp_reply);
+ void (*expectfn)(struct nf_conn *ct,
+ struct nf_conntrack_expect *exp);
+};
+
+extern const struct nf_nat_pptp_hook __rcu *nf_nat_pptp_hook;
#endif /* _NF_CONNTRACK_PPTP_H */
diff --git a/include/linux/netfilter/nf_conntrack_proto_gre.h b/include/linux/netfilter/nf_conntrack_proto_gre.h
index dee0acd0dd31..f33aa6021364 100644
--- a/include/linux/netfilter/nf_conntrack_proto_gre.h
+++ b/include/linux/netfilter/nf_conntrack_proto_gre.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _CONNTRACK_PROTO_GRE_H
#define _CONNTRACK_PROTO_GRE_H
#include <asm/byteorder.h>
@@ -9,7 +10,6 @@ struct nf_ct_gre {
unsigned int timeout;
};
-#ifdef __KERNEL__
#include <net/netfilter/nf_conntrack_tuple.h>
struct nf_conn;
@@ -18,16 +18,17 @@ struct nf_conn;
struct nf_ct_gre_keymap {
struct list_head list;
struct nf_conntrack_tuple tuple;
+ struct rcu_head rcu;
};
/* add new tuple->key_reply pair to keymap */
int nf_ct_gre_keymap_add(struct nf_conn *ct, enum ip_conntrack_dir dir,
struct nf_conntrack_tuple *t);
+void nf_ct_gre_keymap_flush(struct net *net);
/* delete keymap entries */
void nf_ct_gre_keymap_destroy(struct nf_conn *ct);
-void nf_nat_need_gre(void);
-
-#endif /* __KERNEL__ */
+bool gre_pkt_to_tuple(const struct sk_buff *skb, unsigned int dataoff,
+ struct net *net, struct nf_conntrack_tuple *tuple);
#endif /* _CONNTRACK_PROTO_GRE_H */
diff --git a/include/linux/netfilter/nf_conntrack_sane.h b/include/linux/netfilter/nf_conntrack_sane.h
index 4767d6e23e97..46c7acd1b4a7 100644
--- a/include/linux/netfilter/nf_conntrack_sane.h
+++ b/include/linux/netfilter/nf_conntrack_sane.h
@@ -1,9 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _NF_CONNTRACK_SANE_H
#define _NF_CONNTRACK_SANE_H
/* SANE tracking. */
-#ifdef __KERNEL__
-
#define SANE_PORT 6566
enum sane_state {
@@ -16,6 +15,4 @@ struct nf_ct_sane_master {
enum sane_state state;
};
-#endif /* __KERNEL__ */
-
#endif /* _NF_CONNTRACK_SANE_H */
diff --git a/include/linux/netfilter/nf_conntrack_sctp.h b/include/linux/netfilter/nf_conntrack_sctp.h
index 22a16a23cd8a..625f491b95de 100644
--- a/include/linux/netfilter/nf_conntrack_sctp.h
+++ b/include/linux/netfilter/nf_conntrack_sctp.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _NF_CONNTRACK_SCTP_H
#define _NF_CONNTRACK_SCTP_H
/* SCTP tracking. */
@@ -8,6 +9,8 @@ struct ip_ct_sctp {
enum sctp_conntrack state;
__be32 vtag[IP_CT_DIR_MAX];
+ u8 last_dir;
+ u8 flags;
};
#endif /* _NF_CONNTRACK_SCTP_H */
diff --git a/include/linux/netfilter/nf_conntrack_sip.h b/include/linux/netfilter/nf_conntrack_sip.h
index d5af3c27fb7d..dbc614dfe0d5 100644
--- a/include/linux/netfilter/nf_conntrack_sip.h
+++ b/include/linux/netfilter/nf_conntrack_sip.h
@@ -1,10 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NF_CONNTRACK_SIP_H__
#define __NF_CONNTRACK_SIP_H__
-#ifdef __KERNEL__
-
-#include <net/netfilter/nf_conntrack_expect.h>
+#include <linux/skbuff.h>
#include <linux/types.h>
+#include <net/netfilter/nf_conntrack_expect.h>
#define SIP_PORT 5060
#define SIP_TIMEOUT 3600
@@ -164,7 +164,7 @@ struct nf_nat_sip_hooks {
unsigned int medialen,
union nf_inet_addr *rtp_addr);
};
-extern const struct nf_nat_sip_hooks *nf_nat_sip_hooks;
+extern const struct nf_nat_sip_hooks __rcu *nf_nat_sip_hooks;
int ct_sip_parse_request(const struct nf_conn *ct, const char *dptr,
unsigned int datalen, unsigned int *matchoff,
@@ -195,5 +195,4 @@ int ct_sip_get_sdp_header(const struct nf_conn *ct, const char *dptr,
enum sdp_header_types term,
unsigned int *matchoff, unsigned int *matchlen);
-#endif /* __KERNEL__ */
#endif /* __NF_CONNTRACK_SIP_H__ */
diff --git a/include/linux/netfilter/nf_conntrack_snmp.h b/include/linux/netfilter/nf_conntrack_snmp.h
index 064bc63a5346..87e4f33eb55f 100644
--- a/include/linux/netfilter/nf_conntrack_snmp.h
+++ b/include/linux/netfilter/nf_conntrack_snmp.h
@@ -1,6 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _NF_CONNTRACK_SNMP_H
#define _NF_CONNTRACK_SNMP_H
+#include <linux/netfilter.h>
+#include <linux/skbuff.h>
+
extern int (*nf_nat_snmp_hook)(struct sk_buff *skb,
unsigned int protoff,
struct nf_conn *ct,
diff --git a/include/linux/netfilter/nf_conntrack_tcp.h b/include/linux/netfilter/nf_conntrack_tcp.h
index 22db9614b584..f9e3a663037b 100644
--- a/include/linux/netfilter/nf_conntrack_tcp.h
+++ b/include/linux/netfilter/nf_conntrack_tcp.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _NF_CONNTRACK_TCP_H
#define _NF_CONNTRACK_TCP_H
diff --git a/include/linux/netfilter/nf_conntrack_tftp.h b/include/linux/netfilter/nf_conntrack_tftp.h
index c78d38fdb050..dc4c1b9beac0 100644
--- a/include/linux/netfilter/nf_conntrack_tftp.h
+++ b/include/linux/netfilter/nf_conntrack_tftp.h
@@ -1,8 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _NF_CONNTRACK_TFTP_H
#define _NF_CONNTRACK_TFTP_H
#define TFTP_PORT 69
+#include <linux/netfilter.h>
+#include <linux/skbuff.h>
+#include <linux/types.h>
+#include <net/netfilter/nf_conntrack_expect.h>
+
struct tftphdr {
__be16 opcode;
};
diff --git a/include/linux/netfilter/nf_conntrack_zones_common.h b/include/linux/netfilter/nf_conntrack_zones_common.h
index 5d7cf36d4766..8f3905e12a64 100644
--- a/include/linux/netfilter/nf_conntrack_zones_common.h
+++ b/include/linux/netfilter/nf_conntrack_zones_common.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _NF_CONNTRACK_ZONES_COMMON_H
#define _NF_CONNTRACK_ZONES_COMMON_H
diff --git a/include/linux/netfilter/nfnetlink.h b/include/linux/netfilter/nfnetlink.h
index 41d04e9d088a..e9a9ab34a7cc 100644
--- a/include/linux/netfilter/nfnetlink.h
+++ b/include/linux/netfilter/nfnetlink.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _NFNETLINK_H
#define _NFNETLINK_H
@@ -6,21 +7,33 @@
#include <net/netlink.h>
#include <uapi/linux/netfilter/nfnetlink.h>
+struct nfnl_info {
+ struct net *net;
+ struct sock *sk;
+ const struct nlmsghdr *nlh;
+ const struct nfgenmsg *nfmsg;
+ struct netlink_ext_ack *extack;
+};
+
+enum nfnl_callback_type {
+ NFNL_CB_UNSPEC = 0,
+ NFNL_CB_MUTEX,
+ NFNL_CB_RCU,
+ NFNL_CB_BATCH,
+};
+
struct nfnl_callback {
- int (*call)(struct net *net, struct sock *nl, struct sk_buff *skb,
- const struct nlmsghdr *nlh,
- const struct nlattr * const cda[],
- struct netlink_ext_ack *extack);
- int (*call_rcu)(struct net *net, struct sock *nl, struct sk_buff *skb,
- const struct nlmsghdr *nlh,
- const struct nlattr * const cda[],
- struct netlink_ext_ack *extack);
- int (*call_batch)(struct net *net, struct sock *nl, struct sk_buff *skb,
- const struct nlmsghdr *nlh,
- const struct nlattr * const cda[],
- struct netlink_ext_ack *extack);
- const struct nla_policy *policy; /* netlink attribute policy */
- const u_int16_t attr_count; /* number of nlattr's */
+ int (*call)(struct sk_buff *skb, const struct nfnl_info *info,
+ const struct nlattr * const cda[]);
+ const struct nla_policy *policy;
+ enum nfnl_callback_type type;
+ __u16 attr_count;
+};
+
+enum nfnl_abort_action {
+ NFNL_ABORT_NONE = 0,
+ NFNL_ABORT_AUTOLOAD,
+ NFNL_ABORT_VALIDATE,
};
struct nfnetlink_subsystem {
@@ -28,8 +41,10 @@ struct nfnetlink_subsystem {
__u8 subsys_id; /* nfnetlink subsystem ID */
__u8 cb_count; /* number of callbacks */
const struct nfnl_callback *cb; /* callback for individual types */
+ struct module *owner;
int (*commit)(struct net *net, struct sk_buff *skb);
- int (*abort)(struct net *net, struct sk_buff *skb);
+ int (*abort)(struct net *net, struct sk_buff *skb,
+ enum nfnl_abort_action action);
bool (*valid_genid)(struct net *net, u32 genid);
};
@@ -40,14 +55,42 @@ int nfnetlink_has_listeners(struct net *net, unsigned int group);
int nfnetlink_send(struct sk_buff *skb, struct net *net, u32 portid,
unsigned int group, int echo, gfp_t flags);
int nfnetlink_set_err(struct net *net, u32 portid, u32 group, int error);
-int nfnetlink_unicast(struct sk_buff *skb, struct net *net, u32 portid,
- int flags);
+int nfnetlink_unicast(struct sk_buff *skb, struct net *net, u32 portid);
+void nfnetlink_broadcast(struct net *net, struct sk_buff *skb, __u32 portid,
+ __u32 group, gfp_t allocation);
static inline u16 nfnl_msg_type(u8 subsys, u8 msg_type)
{
return subsys << 8 | msg_type;
}
+static inline void nfnl_fill_hdr(struct nlmsghdr *nlh, u8 family, u8 version,
+ __be16 res_id)
+{
+ struct nfgenmsg *nfmsg;
+
+ nfmsg = nlmsg_data(nlh);
+ nfmsg->nfgen_family = family;
+ nfmsg->version = version;
+ nfmsg->res_id = res_id;
+}
+
+static inline struct nlmsghdr *nfnl_msg_put(struct sk_buff *skb, u32 portid,
+ u32 seq, int type, int flags,
+ u8 family, u8 version,
+ __be16 res_id)
+{
+ struct nlmsghdr *nlh;
+
+ nlh = nlmsg_put(skb, portid, seq, type, sizeof(struct nfgenmsg), flags);
+ if (!nlh)
+ return NULL;
+
+ nfnl_fill_hdr(nlh, family, version, res_id);
+
+ return nlh;
+}
+
void nfnl_lock(__u8 subsys_id);
void nfnl_unlock(__u8 subsys_id);
#ifdef CONFIG_PROVE_LOCKING
@@ -59,19 +102,6 @@ static inline bool lockdep_nfnl_is_held(__u8 subsys_id)
}
#endif /* CONFIG_PROVE_LOCKING */
-/*
- * nfnl_dereference - fetch RCU pointer when updates are prevented by subsys mutex
- *
- * @p: The pointer to read, prior to dereferencing
- * @ss: The nfnetlink subsystem ID
- *
- * Return the value of the specified RCU-protected pointer, but omit
- * both the smp_read_barrier_depends() and the ACCESS_ONCE(), because
- * caller holds the NFNL subsystem mutex.
- */
-#define nfnl_dereference(p, ss) \
- rcu_dereference_protected(p, lockdep_nfnl_is_held(ss))
-
#define MODULE_ALIAS_NFNL_SUBSYS(subsys) \
MODULE_ALIAS("nfnetlink-subsys-" __stringify(subsys))
diff --git a/include/linux/netfilter/nfnetlink_acct.h b/include/linux/netfilter/nfnetlink_acct.h
index 664da0048625..beee8bffe49e 100644
--- a/include/linux/netfilter/nfnetlink_acct.h
+++ b/include/linux/netfilter/nfnetlink_acct.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _NFNL_ACCT_H_
#define _NFNL_ACCT_H_
@@ -15,6 +16,5 @@ struct nf_acct;
struct nf_acct *nfnl_acct_find_get(struct net *net, const char *filter_name);
void nfnl_acct_put(struct nf_acct *acct);
void nfnl_acct_update(const struct sk_buff *skb, struct nf_acct *nfacct);
-int nfnl_acct_overquota(struct net *net, const struct sk_buff *skb,
- struct nf_acct *nfacct);
+int nfnl_acct_overquota(struct net *net, struct nf_acct *nfacct);
#endif /* _NFNL_ACCT_H */
diff --git a/include/linux/netfilter/nfnetlink_osf.h b/include/linux/netfilter/nfnetlink_osf.h
new file mode 100644
index 000000000000..788613f36935
--- /dev/null
+++ b/include/linux/netfilter/nfnetlink_osf.h
@@ -0,0 +1,38 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _NFOSF_H
+#define _NFOSF_H
+
+#include <uapi/linux/netfilter/nfnetlink_osf.h>
+
+enum osf_fmatch_states {
+ /* Packet does not match the fingerprint */
+ FMATCH_WRONG = 0,
+ /* Packet matches the fingerprint */
+ FMATCH_OK,
+ /* Options do not match the fingerprint, but header does */
+ FMATCH_OPT_WRONG,
+};
+
+extern struct list_head nf_osf_fingers[2];
+
+struct nf_osf_finger {
+ struct rcu_head rcu_head;
+ struct list_head finger_entry;
+ struct nf_osf_user_finger finger;
+};
+
+struct nf_osf_data {
+ const char *genre;
+ const char *version;
+};
+
+bool nf_osf_match(const struct sk_buff *skb, u_int8_t family,
+ int hooknum, struct net_device *in, struct net_device *out,
+ const struct nf_osf_info *info, struct net *net,
+ const struct list_head *nf_osf_fingers);
+
+bool nf_osf_find(const struct sk_buff *skb,
+ const struct list_head *nf_osf_fingers,
+ const int ttl_check, struct nf_osf_data *data);
+
+#endif /* _NFOSF_H */
diff --git a/include/linux/netfilter/x_tables.h b/include/linux/netfilter/x_tables.h
index b3044c2c62cb..5897f3dbaf7c 100644
--- a/include/linux/netfilter/x_tables.h
+++ b/include/linux/netfilter/x_tables.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _X_TABLES_H
#define _X_TABLES_H
@@ -35,8 +36,8 @@ struct xt_action_param {
const void *matchinfo, *targinfo;
};
const struct nf_hook_state *state;
- int fragoff;
unsigned int thoff;
+ u16 fragoff;
bool hotdrop;
};
@@ -157,7 +158,7 @@ struct xt_match {
/* Called when entry of this type deleted. */
void (*destroy)(const struct xt_mtdtor_param *);
-#ifdef CONFIG_COMPAT
+#ifdef CONFIG_NETFILTER_XTABLES_COMPAT
/* Called when userspace align differs from kernel space one */
void (*compat_from_user)(void *dst, const void *src);
int (*compat_to_user)(void __user *dst, const void *src);
@@ -168,7 +169,7 @@ struct xt_match {
const char *table;
unsigned int matchsize;
unsigned int usersize;
-#ifdef CONFIG_COMPAT
+#ifdef CONFIG_NETFILTER_XTABLES_COMPAT
unsigned int compatsize;
#endif
unsigned int hooks;
@@ -198,7 +199,7 @@ struct xt_target {
/* Called when entry of this type deleted. */
void (*destroy)(const struct xt_tgdtor_param *);
-#ifdef CONFIG_COMPAT
+#ifdef CONFIG_NETFILTER_XTABLES_COMPAT
/* Called when userspace align differs from kernel space one */
void (*compat_from_user)(void *dst, const void *src);
int (*compat_to_user)(void __user *dst, const void *src);
@@ -209,7 +210,7 @@ struct xt_target {
const char *table;
unsigned int targetsize;
unsigned int usersize;
-#ifdef CONFIG_COMPAT
+#ifdef CONFIG_NETFILTER_XTABLES_COMPAT
unsigned int compatsize;
#endif
unsigned int hooks;
@@ -228,15 +229,15 @@ struct xt_table {
/* Man behind the curtain... */
struct xt_table_info *private;
+ /* hook ops that register the table with the netfilter core */
+ struct nf_hook_ops *ops;
+
/* Set this to THIS_MODULE if you are a module, otherwise NULL */
struct module *me;
u_int8_t af; /* address/protocol family */
int priority; /* hook order */
- /* called when table is needed in the given netns */
- int (*table_init)(struct net *net);
-
/* A unique name... */
const char name[XT_TABLE_MAXNAMELEN];
};
@@ -263,7 +264,7 @@ struct xt_table_info {
unsigned int stacksize;
void ***jumpstack;
- unsigned char entries[0] __aligned(8);
+ unsigned char entries[] __aligned(8);
};
int xt_register_target(struct xt_target *target);
@@ -280,13 +281,17 @@ int xt_check_entry_offsets(const void *base, const char *elems,
unsigned int target_offset,
unsigned int next_offset);
+int xt_check_table_hooks(const struct xt_table_info *info, unsigned int valid_hooks);
+
unsigned int *xt_alloc_entry_offsets(unsigned int size);
bool xt_find_jump_offset(const unsigned int *offsets,
unsigned int target, unsigned int size);
-int xt_check_match(struct xt_mtchk_param *, unsigned int size, u_int8_t proto,
+int xt_check_proc_name(const char *name, unsigned int size);
+
+int xt_check_match(struct xt_mtchk_param *, unsigned int size, u16 proto,
bool inv_proto);
-int xt_check_target(struct xt_tgchk_param *, unsigned int size, u_int8_t proto,
+int xt_check_target(struct xt_tgchk_param *, unsigned int size, u16 proto,
bool inv_proto);
int xt_match_to_user(const struct xt_entry_match *m,
@@ -296,8 +301,9 @@ int xt_target_to_user(const struct xt_entry_target *t,
int xt_data_to_user(void __user *dst, const void *src,
int usersize, int size, int aligned_size);
-void *xt_copy_counters_from_user(const void __user *user, unsigned int len,
- struct xt_counters_info *info, bool compat);
+void *xt_copy_counters(sockptr_t arg, unsigned int len,
+ struct xt_counters_info *info);
+struct xt_counters *xt_counters_alloc(unsigned int counters);
struct xt_table *xt_register_table(struct net *net,
const struct xt_table *table,
@@ -311,14 +317,16 @@ struct xt_table_info *xt_replace_table(struct xt_table *table,
int *error);
struct xt_match *xt_find_match(u8 af, const char *name, u8 revision);
-struct xt_target *xt_find_target(u8 af, const char *name, u8 revision);
struct xt_match *xt_request_find_match(u8 af, const char *name, u8 revision);
struct xt_target *xt_request_find_target(u8 af, const char *name, u8 revision);
int xt_find_revision(u8 af, const char *name, u8 revision, int target,
int *err);
+struct xt_table *xt_find_table(struct net *net, u8 af, const char *name);
struct xt_table *xt_find_table_lock(struct net *net, u_int8_t af,
const char *name);
+struct xt_table *xt_request_find_table_lock(struct net *net, u_int8_t af,
+ const char *name);
void xt_table_unlock(struct xt_table *t);
int xt_proto_init(struct net *net, u_int8_t af);
@@ -329,7 +337,7 @@ void xt_free_table_info(struct xt_table_info *info);
/**
* xt_recseq - recursive seqcount for netfilter use
- *
+ *
* Packet processing changes the seqcount only if no recursion happened
* get_counters() can use read_seqcount_begin()/read_seqcount_retry(),
* because we use the normal seqcount convention :
@@ -369,7 +377,7 @@ static inline unsigned int xt_write_recseq_begin(void)
* since addend is most likely 1
*/
__this_cpu_add(xt_recseq.sequence, addend);
- smp_wmb();
+ smp_mb();
return addend;
}
@@ -441,7 +449,10 @@ xt_get_per_cpu_counter(struct xt_counters *cnt, unsigned int cpu)
struct nf_hook_ops *xt_hook_ops_alloc(const struct xt_table *, nf_hookfn *);
-#ifdef CONFIG_COMPAT
+int xt_register_template(const struct xt_table *t, int(*table_init)(struct net *net));
+void xt_unregister_template(const struct xt_table *t);
+
+#ifdef CONFIG_NETFILTER_XTABLES_COMPAT
#include <net/compat.h>
struct compat_xt_entry_match {
@@ -457,7 +468,7 @@ struct compat_xt_entry_match {
} kernel;
u_int16_t match_size;
} u;
- unsigned char data[0];
+ unsigned char data[];
};
struct compat_xt_entry_target {
@@ -473,7 +484,7 @@ struct compat_xt_entry_target {
} kernel;
u_int16_t target_size;
} u;
- unsigned char data[0];
+ unsigned char data[];
};
/* FIXME: this works only on 32 bit tasks
@@ -487,7 +498,7 @@ struct compat_xt_counters {
struct compat_xt_counters_info {
char name[XT_TABLE_MAXNAMELEN];
compat_uint_t num_counters;
- struct compat_xt_counters counters[0];
+ struct compat_xt_counters counters[];
};
struct _compat_xt_align {
@@ -504,7 +515,7 @@ void xt_compat_unlock(u_int8_t af);
int xt_compat_add_offset(u_int8_t af, unsigned int offset, int delta);
void xt_compat_flush_offsets(u_int8_t af);
-void xt_compat_init_offsets(u_int8_t af, unsigned int number);
+int xt_compat_init_offsets(u8 af, unsigned int number);
int xt_compat_calc_jump(u_int8_t af, unsigned int offset);
int xt_compat_match_offset(const struct xt_match *match);
@@ -522,5 +533,5 @@ int xt_compat_check_entry_offsets(const void *base, const char *elems,
unsigned int target_offset,
unsigned int next_offset);
-#endif /* CONFIG_COMPAT */
+#endif /* CONFIG_NETFILTER_XTABLES_COMPAT */
#endif /* _X_TABLES_H */
diff --git a/include/linux/netfilter/xt_hashlimit.h b/include/linux/netfilter/xt_hashlimit.h
deleted file mode 100644
index 074790c0cf74..000000000000
--- a/include/linux/netfilter/xt_hashlimit.h
+++ /dev/null
@@ -1,9 +0,0 @@
-#ifndef _XT_HASHLIMIT_H
-#define _XT_HASHLIMIT_H
-
-#include <uapi/linux/netfilter/xt_hashlimit.h>
-
-#define XT_HASHLIMIT_ALL (XT_HASHLIMIT_HASH_DIP | XT_HASHLIMIT_HASH_DPT | \
- XT_HASHLIMIT_HASH_SIP | XT_HASHLIMIT_HASH_SPT | \
- XT_HASHLIMIT_INVERT | XT_HASHLIMIT_BYTES)
-#endif /*_XT_HASHLIMIT_H*/
diff --git a/include/linux/netfilter/xt_physdev.h b/include/linux/netfilter/xt_physdev.h
deleted file mode 100644
index 5b5e41716d69..000000000000
--- a/include/linux/netfilter/xt_physdev.h
+++ /dev/null
@@ -1,7 +0,0 @@
-#ifndef _XT_PHYSDEV_H
-#define _XT_PHYSDEV_H
-
-#include <linux/if.h>
-#include <uapi/linux/netfilter/xt_physdev.h>
-
-#endif /*_XT_PHYSDEV_H*/
diff --git a/include/linux/netfilter_arp/arp_tables.h b/include/linux/netfilter_arp/arp_tables.h
index 029b95e8924e..a40aaf645fa4 100644
--- a/include/linux/netfilter_arp/arp_tables.h
+++ b/include/linux/netfilter_arp/arp_tables.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Format of an ARP firewall descriptor
*
@@ -50,14 +51,13 @@ struct arpt_error {
extern void *arpt_alloc_initial_table(const struct xt_table *);
int arpt_register_table(struct net *net, const struct xt_table *table,
const struct arpt_replace *repl,
- const struct nf_hook_ops *ops, struct xt_table **res);
-void arpt_unregister_table(struct net *net, struct xt_table *table,
- const struct nf_hook_ops *ops);
-extern unsigned int arpt_do_table(struct sk_buff *skb,
- const struct nf_hook_state *state,
- struct xt_table *table);
+ const struct nf_hook_ops *ops);
+void arpt_unregister_table(struct net *net, const char *name);
+void arpt_unregister_table_pre_exit(struct net *net, const char *name);
+extern unsigned int arpt_do_table(void *priv, struct sk_buff *skb,
+ const struct nf_hook_state *state);
-#ifdef CONFIG_COMPAT
+#ifdef CONFIG_NETFILTER_XTABLES_COMPAT
#include <net/compat.h>
struct compat_arpt_entry {
@@ -66,7 +66,7 @@ struct compat_arpt_entry {
__u16 next_offset;
compat_uint_t comefrom;
struct compat_xt_counters counters;
- unsigned char elems[0];
+ unsigned char elems[];
};
static inline struct xt_entry_target *
diff --git a/include/linux/netfilter_bridge.h b/include/linux/netfilter_bridge.h
index 2ed40c402b5e..f980edfdd278 100644
--- a/include/linux/netfilter_bridge.h
+++ b/include/linux/netfilter_bridge.h
@@ -1,18 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __LINUX_BRIDGE_NETFILTER_H
#define __LINUX_BRIDGE_NETFILTER_H
#include <uapi/linux/netfilter_bridge.h>
#include <linux/skbuff.h>
-enum nf_br_hook_priorities {
- NF_BR_PRI_FIRST = INT_MIN,
- NF_BR_PRI_NAT_DST_BRIDGED = -300,
- NF_BR_PRI_FILTER_BRIDGED = -200,
- NF_BR_PRI_BRNF = 0,
- NF_BR_PRI_NAT_DST_OTHER = 100,
- NF_BR_PRI_FILTER_OTHER = 200,
- NF_BR_PRI_NAT_SRC = 300,
- NF_BR_PRI_LAST = INT_MAX,
+struct nf_bridge_frag_data {
+ char mac[ETH_HLEN];
+ bool vlan_present;
+ u16 vlan_tci;
+ __be16 vlan_proto;
};
#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
@@ -27,43 +24,58 @@ static inline void br_drop_fake_rtable(struct sk_buff *skb)
skb_dst_drop(skb);
}
+static inline struct nf_bridge_info *
+nf_bridge_info_get(const struct sk_buff *skb)
+{
+ return skb_ext_find(skb, SKB_EXT_BRIDGE_NF);
+}
+
+static inline bool nf_bridge_info_exists(const struct sk_buff *skb)
+{
+ return skb_ext_exist(skb, SKB_EXT_BRIDGE_NF);
+}
+
static inline int nf_bridge_get_physinif(const struct sk_buff *skb)
{
- struct nf_bridge_info *nf_bridge;
+ const struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb);
- if (skb->nf_bridge == NULL)
+ if (!nf_bridge)
return 0;
- nf_bridge = skb->nf_bridge;
return nf_bridge->physindev ? nf_bridge->physindev->ifindex : 0;
}
static inline int nf_bridge_get_physoutif(const struct sk_buff *skb)
{
- struct nf_bridge_info *nf_bridge;
+ const struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb);
- if (skb->nf_bridge == NULL)
+ if (!nf_bridge)
return 0;
- nf_bridge = skb->nf_bridge;
return nf_bridge->physoutdev ? nf_bridge->physoutdev->ifindex : 0;
}
static inline struct net_device *
nf_bridge_get_physindev(const struct sk_buff *skb)
{
- return skb->nf_bridge ? skb->nf_bridge->physindev : NULL;
+ const struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb);
+
+ return nf_bridge ? nf_bridge->physindev : NULL;
}
static inline struct net_device *
nf_bridge_get_physoutdev(const struct sk_buff *skb)
{
- return skb->nf_bridge ? skb->nf_bridge->physoutdev : NULL;
+ const struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb);
+
+ return nf_bridge ? nf_bridge->physoutdev : NULL;
}
static inline bool nf_bridge_in_prerouting(const struct sk_buff *skb)
{
- return skb->nf_bridge && skb->nf_bridge->in_prerouting;
+ const struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb);
+
+ return nf_bridge && nf_bridge->in_prerouting;
}
#else
#define br_drop_fake_rtable(skb) do { } while (0)
diff --git a/include/linux/netfilter_bridge/ebt_802_3.h b/include/linux/netfilter_bridge/ebt_802_3.h
deleted file mode 100644
index e17e8bfb4e8b..000000000000
--- a/include/linux/netfilter_bridge/ebt_802_3.h
+++ /dev/null
@@ -1,11 +0,0 @@
-#ifndef __LINUX_BRIDGE_EBT_802_3_H
-#define __LINUX_BRIDGE_EBT_802_3_H
-
-#include <linux/skbuff.h>
-#include <uapi/linux/netfilter_bridge/ebt_802_3.h>
-
-static inline struct ebt_802_3_hdr *ebt_802_3_hdr(const struct sk_buff *skb)
-{
- return (struct ebt_802_3_hdr *)skb_mac_header(skb);
-}
-#endif
diff --git a/include/linux/netfilter_bridge/ebtables.h b/include/linux/netfilter_bridge/ebtables.h
index 2c2a5514b0df..fd533552a062 100644
--- a/include/linux/netfilter_bridge/ebtables.h
+++ b/include/linux/netfilter_bridge/ebtables.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* ebtables
*
@@ -16,10 +17,6 @@
#include <linux/if_ether.h>
#include <uapi/linux/netfilter_bridge/ebtables.h>
-/* return values for match() functions */
-#define EBT_MATCH 0
-#define EBT_NOMATCH 1
-
struct ebt_match {
struct list_head list;
const char name[EBT_FUNCTION_MAXNAMELEN];
@@ -88,7 +85,7 @@ struct ebt_table_info {
/* room to maintain the stack used for jumping from and into udc */
struct ebt_chainstack **chainstack;
char *entries;
- struct ebt_counter counters[0] ____cacheline_aligned;
+ struct ebt_counter counters[] ____cacheline_aligned;
};
struct ebt_table {
@@ -97,25 +94,22 @@ struct ebt_table {
struct ebt_replace_kernel *table;
unsigned int valid_hooks;
rwlock_t lock;
- /* e.g. could be the table explicitly only allows certain
- * matches, targets, ... 0 == let it in */
- int (*check)(const struct ebt_table_info *info,
- unsigned int valid_hooks);
/* the data used by the kernel */
struct ebt_table_info *private;
+ struct nf_hook_ops *ops;
struct module *me;
};
#define EBT_ALIGN(s) (((s) + (__alignof__(struct _xt_align)-1)) & \
~(__alignof__(struct _xt_align)-1))
-extern struct ebt_table *ebt_register_table(struct net *net,
- const struct ebt_table *table,
- const struct nf_hook_ops *);
-extern void ebt_unregister_table(struct net *net, struct ebt_table *table,
- const struct nf_hook_ops *);
-extern unsigned int ebt_do_table(struct sk_buff *skb,
- const struct nf_hook_state *state,
- struct ebt_table *table);
+
+extern int ebt_register_table(struct net *net,
+ const struct ebt_table *table,
+ const struct nf_hook_ops *ops);
+extern void ebt_unregister_table(struct net *net, const char *tablename);
+void ebt_unregister_table_pre_exit(struct net *net, const char *tablename);
+extern unsigned int ebt_do_table(void *priv, struct sk_buff *skb,
+ const struct nf_hook_state *state);
/* True if the hook mask denotes that the rule is in a base chain,
* used in the check() functions */
@@ -128,4 +122,6 @@ static inline bool ebt_invalid_target(int target)
return (target < -NUM_STANDARD_TARGETS || target >= 0);
}
+int ebt_register_template(const struct ebt_table *t, int(*table_init)(struct net *net));
+void ebt_unregister_template(const struct ebt_table *t);
#endif
diff --git a/include/linux/netfilter_defs.h b/include/linux/netfilter_defs.h
index d3a7f8597e82..a5f7bef1b3a4 100644
--- a/include/linux/netfilter_defs.h
+++ b/include/linux/netfilter_defs.h
@@ -1,9 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __LINUX_NETFILTER_CORE_H_
#define __LINUX_NETFILTER_CORE_H_
#include <uapi/linux/netfilter.h>
-/* Largest hook number + 1, see uapi/linux/netfilter_decnet.h */
-#define NF_MAX_HOOKS 8
+/* in/out/forward only */
+#define NF_ARP_NUMHOOKS 3
+
+#define NF_MAX_HOOKS NF_INET_NUMHOOKS
#endif
diff --git a/include/linux/netfilter_ingress.h b/include/linux/netfilter_ingress.h
deleted file mode 100644
index 59476061de86..000000000000
--- a/include/linux/netfilter_ingress.h
+++ /dev/null
@@ -1,57 +0,0 @@
-#ifndef _NETFILTER_INGRESS_H_
-#define _NETFILTER_INGRESS_H_
-
-#include <linux/netfilter.h>
-#include <linux/netdevice.h>
-
-#ifdef CONFIG_NETFILTER_INGRESS
-static inline bool nf_hook_ingress_active(const struct sk_buff *skb)
-{
-#ifdef HAVE_JUMP_LABEL
- if (!static_key_false(&nf_hooks_needed[NFPROTO_NETDEV][NF_NETDEV_INGRESS]))
- return false;
-#endif
- return rcu_access_pointer(skb->dev->nf_hooks_ingress);
-}
-
-/* caller must hold rcu_read_lock */
-static inline int nf_hook_ingress(struct sk_buff *skb)
-{
- struct nf_hook_entry *e = rcu_dereference(skb->dev->nf_hooks_ingress);
- struct nf_hook_state state;
- int ret;
-
- /* Must recheck the ingress hook head, in the event it became NULL
- * after the check in nf_hook_ingress_active evaluated to true.
- */
- if (unlikely(!e))
- return 0;
-
- nf_hook_state_init(&state, NF_NETDEV_INGRESS,
- NFPROTO_NETDEV, skb->dev, NULL, NULL,
- dev_net(skb->dev), NULL);
- ret = nf_hook_slow(skb, &state, e);
- if (ret == 0)
- return -1;
-
- return ret;
-}
-
-static inline void nf_hook_ingress_init(struct net_device *dev)
-{
- RCU_INIT_POINTER(dev->nf_hooks_ingress, NULL);
-}
-#else /* CONFIG_NETFILTER_INGRESS */
-static inline int nf_hook_ingress_active(struct sk_buff *skb)
-{
- return 0;
-}
-
-static inline int nf_hook_ingress(struct sk_buff *skb)
-{
- return 0;
-}
-
-static inline void nf_hook_ingress_init(struct net_device *dev) {}
-#endif /* CONFIG_NETFILTER_INGRESS */
-#endif /* _NETFILTER_INGRESS_H_ */
diff --git a/include/linux/netfilter_ipv4.h b/include/linux/netfilter_ipv4.h
index 98c03b2462b5..5b70ca868bb1 100644
--- a/include/linux/netfilter_ipv4.h
+++ b/include/linux/netfilter_ipv4.h
@@ -6,7 +6,36 @@
#include <uapi/linux/netfilter_ipv4.h>
-int ip_route_me_harder(struct net *net, struct sk_buff *skb, unsigned addr_type);
+/* Extra routing may needed on local out, as the QUEUE target never returns
+ * control to the table.
+ */
+struct ip_rt_info {
+ __be32 daddr;
+ __be32 saddr;
+ u_int8_t tos;
+ u_int32_t mark;
+};
+
+int ip_route_me_harder(struct net *net, struct sock *sk, struct sk_buff *skb, unsigned addr_type);
+
+struct nf_queue_entry;
+
+#ifdef CONFIG_INET
__sum16 nf_ip_checksum(struct sk_buff *skb, unsigned int hook,
unsigned int dataoff, u_int8_t protocol);
+int nf_ip_route(struct net *net, struct dst_entry **dst, struct flowi *fl,
+ bool strict);
+#else
+static inline __sum16 nf_ip_checksum(struct sk_buff *skb, unsigned int hook,
+ unsigned int dataoff, u_int8_t protocol)
+{
+ return 0;
+}
+static inline int nf_ip_route(struct net *net, struct dst_entry **dst,
+ struct flowi *fl, bool strict)
+{
+ return -EOPNOTSUPP;
+}
+#endif /* CONFIG_INET */
+
#endif /*__LINUX_IP_NETFILTER_H*/
diff --git a/include/linux/netfilter_ipv4/ip_tables.h b/include/linux/netfilter_ipv4/ip_tables.h
index 7bfc5893ec31..132b0e4a6d4d 100644
--- a/include/linux/netfilter_ipv4/ip_tables.h
+++ b/include/linux/netfilter_ipv4/ip_tables.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* 25-Jul-1998 Major changes to allow for ip chain table
*
@@ -16,19 +17,17 @@
#include <linux/if.h>
#include <linux/in.h>
+#include <linux/init.h>
#include <linux/ip.h>
#include <linux/skbuff.h>
-
-#include <linux/init.h>
#include <uapi/linux/netfilter_ipv4/ip_tables.h>
-extern void ipt_init(void) __init;
-
int ipt_register_table(struct net *net, const struct xt_table *table,
const struct ipt_replace *repl,
- const struct nf_hook_ops *ops, struct xt_table **res);
-void ipt_unregister_table(struct net *net, struct xt_table *table,
- const struct nf_hook_ops *ops);
+ const struct nf_hook_ops *ops);
+
+void ipt_unregister_table_pre_exit(struct net *net, const char *name);
+void ipt_unregister_table_exit(struct net *net, const char *name);
/* Standard entry. */
struct ipt_standard {
@@ -64,11 +63,11 @@ struct ipt_error {
}
extern void *ipt_alloc_initial_table(const struct xt_table *);
-extern unsigned int ipt_do_table(struct sk_buff *skb,
- const struct nf_hook_state *state,
- struct xt_table *table);
+extern unsigned int ipt_do_table(void *priv,
+ struct sk_buff *skb,
+ const struct nf_hook_state *state);
-#ifdef CONFIG_COMPAT
+#ifdef CONFIG_NETFILTER_XTABLES_COMPAT
#include <net/compat.h>
struct compat_ipt_entry {
@@ -78,7 +77,7 @@ struct compat_ipt_entry {
__u16 next_offset;
compat_uint_t comefrom;
struct compat_xt_counters counters;
- unsigned char elems[0];
+ unsigned char elems[];
};
/* Helper functions */
diff --git a/include/linux/netfilter_ipv6.h b/include/linux/netfilter_ipv6.h
index 47c6b04c28c0..7834c0be2831 100644
--- a/include/linux/netfilter_ipv6.h
+++ b/include/linux/netfilter_ipv6.h
@@ -1,33 +1,74 @@
/* IPv6-specific defines for netfilter.
* (C)1998 Rusty Russell -- This code is GPL.
* (C)1999 David Jeffery
- * this header was blatantly ripped from netfilter_ipv4.h
+ * this header was blatantly ripped from netfilter_ipv4.h
* it's amazing what adding a bunch of 6s can do =8^)
*/
#ifndef __LINUX_IP6_NETFILTER_H
#define __LINUX_IP6_NETFILTER_H
#include <uapi/linux/netfilter_ipv6.h>
+#include <net/tcp.h>
+
+/* Check for an extension */
+static inline int
+nf_ip6_ext_hdr(u8 nexthdr)
+{ return (nexthdr == IPPROTO_HOPOPTS) ||
+ (nexthdr == IPPROTO_ROUTING) ||
+ (nexthdr == IPPROTO_FRAGMENT) ||
+ (nexthdr == IPPROTO_ESP) ||
+ (nexthdr == IPPROTO_AH) ||
+ (nexthdr == IPPROTO_NONE) ||
+ (nexthdr == IPPROTO_DSTOPTS);
+}
+
+/* Extra routing may needed on local out, as the QUEUE target never returns
+ * control to the table.
+ */
+struct ip6_rt_info {
+ struct in6_addr daddr;
+ struct in6_addr saddr;
+ u_int32_t mark;
+};
+
+struct nf_queue_entry;
+struct nf_bridge_frag_data;
/*
* Hook functions for ipv6 to allow xt_* modules to be built-in even
* if IPv6 is a module.
*/
struct nf_ipv6_ops {
+#if IS_MODULE(CONFIG_IPV6)
int (*chk_addr)(struct net *net, const struct in6_addr *addr,
const struct net_device *dev, int strict);
+ int (*route_me_harder)(struct net *net, struct sock *sk, struct sk_buff *skb);
+ int (*dev_get_saddr)(struct net *net, const struct net_device *dev,
+ const struct in6_addr *daddr, unsigned int srcprefs,
+ struct in6_addr *saddr);
+ int (*route)(struct net *net, struct dst_entry **dst, struct flowi *fl,
+ bool strict);
+ u32 (*cookie_init_sequence)(const struct ipv6hdr *iph,
+ const struct tcphdr *th, u16 *mssp);
+ int (*cookie_v6_check)(const struct ipv6hdr *iph,
+ const struct tcphdr *th, __u32 cookie);
+#endif
void (*route_input)(struct sk_buff *skb);
int (*fragment)(struct net *net, struct sock *sk, struct sk_buff *skb,
int (*output)(struct net *, struct sock *, struct sk_buff *));
+ int (*reroute)(struct sk_buff *skb, const struct nf_queue_entry *entry);
+#if IS_MODULE(CONFIG_IPV6)
+ int (*br_fragment)(struct net *net, struct sock *sk,
+ struct sk_buff *skb,
+ struct nf_bridge_frag_data *data,
+ int (*output)(struct net *, struct sock *sk,
+ const struct nf_bridge_frag_data *data,
+ struct sk_buff *));
+#endif
};
#ifdef CONFIG_NETFILTER
-int ip6_route_me_harder(struct net *net, struct sk_buff *skb);
-__sum16 nf_ip6_checksum(struct sk_buff *skb, unsigned int hook,
- unsigned int dataoff, u_int8_t protocol);
-
-int ipv6_netfilter_init(void);
-void ipv6_netfilter_fini(void);
+#include <net/addrconf.h>
extern const struct nf_ipv6_ops __rcu *nf_ipv6_ops;
static inline const struct nf_ipv6_ops *nf_get_ipv6_ops(void)
@@ -35,6 +76,132 @@ static inline const struct nf_ipv6_ops *nf_get_ipv6_ops(void)
return rcu_dereference(nf_ipv6_ops);
}
+static inline int nf_ipv6_chk_addr(struct net *net, const struct in6_addr *addr,
+ const struct net_device *dev, int strict)
+{
+#if IS_MODULE(CONFIG_IPV6)
+ const struct nf_ipv6_ops *v6_ops = nf_get_ipv6_ops();
+
+ if (!v6_ops)
+ return 1;
+
+ return v6_ops->chk_addr(net, addr, dev, strict);
+#elif IS_BUILTIN(CONFIG_IPV6)
+ return ipv6_chk_addr(net, addr, dev, strict);
+#else
+ return 1;
+#endif
+}
+
+int __nf_ip6_route(struct net *net, struct dst_entry **dst,
+ struct flowi *fl, bool strict);
+
+static inline int nf_ip6_route(struct net *net, struct dst_entry **dst,
+ struct flowi *fl, bool strict)
+{
+#if IS_MODULE(CONFIG_IPV6)
+ const struct nf_ipv6_ops *v6ops = nf_get_ipv6_ops();
+
+ if (v6ops)
+ return v6ops->route(net, dst, fl, strict);
+
+ return -EHOSTUNREACH;
+#endif
+#if IS_BUILTIN(CONFIG_IPV6)
+ return __nf_ip6_route(net, dst, fl, strict);
+#else
+ return -EHOSTUNREACH;
+#endif
+}
+
+#include <net/netfilter/ipv6/nf_defrag_ipv6.h>
+
+int br_ip6_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
+ struct nf_bridge_frag_data *data,
+ int (*output)(struct net *, struct sock *sk,
+ const struct nf_bridge_frag_data *data,
+ struct sk_buff *));
+
+static inline int nf_br_ip6_fragment(struct net *net, struct sock *sk,
+ struct sk_buff *skb,
+ struct nf_bridge_frag_data *data,
+ int (*output)(struct net *, struct sock *sk,
+ const struct nf_bridge_frag_data *data,
+ struct sk_buff *))
+{
+#if IS_MODULE(CONFIG_IPV6)
+ const struct nf_ipv6_ops *v6_ops = nf_get_ipv6_ops();
+
+ if (!v6_ops)
+ return 1;
+
+ return v6_ops->br_fragment(net, sk, skb, data, output);
+#elif IS_BUILTIN(CONFIG_IPV6)
+ return br_ip6_fragment(net, sk, skb, data, output);
+#else
+ return 1;
+#endif
+}
+
+int ip6_route_me_harder(struct net *net, struct sock *sk, struct sk_buff *skb);
+
+static inline int nf_ip6_route_me_harder(struct net *net, struct sock *sk, struct sk_buff *skb)
+{
+#if IS_MODULE(CONFIG_IPV6)
+ const struct nf_ipv6_ops *v6_ops = nf_get_ipv6_ops();
+
+ if (!v6_ops)
+ return -EHOSTUNREACH;
+
+ return v6_ops->route_me_harder(net, sk, skb);
+#elif IS_BUILTIN(CONFIG_IPV6)
+ return ip6_route_me_harder(net, sk, skb);
+#else
+ return -EHOSTUNREACH;
+#endif
+}
+
+static inline u32 nf_ipv6_cookie_init_sequence(const struct ipv6hdr *iph,
+ const struct tcphdr *th,
+ u16 *mssp)
+{
+#if IS_ENABLED(CONFIG_SYN_COOKIES)
+#if IS_MODULE(CONFIG_IPV6)
+ const struct nf_ipv6_ops *v6_ops = nf_get_ipv6_ops();
+
+ if (v6_ops)
+ return v6_ops->cookie_init_sequence(iph, th, mssp);
+#elif IS_BUILTIN(CONFIG_IPV6)
+ return __cookie_v6_init_sequence(iph, th, mssp);
+#endif
+#endif
+ return 0;
+}
+
+static inline int nf_cookie_v6_check(const struct ipv6hdr *iph,
+ const struct tcphdr *th, __u32 cookie)
+{
+#if IS_ENABLED(CONFIG_SYN_COOKIES)
+#if IS_MODULE(CONFIG_IPV6)
+ const struct nf_ipv6_ops *v6_ops = nf_get_ipv6_ops();
+
+ if (v6_ops)
+ return v6_ops->cookie_v6_check(iph, th, cookie);
+#elif IS_BUILTIN(CONFIG_IPV6)
+ return __cookie_v6_check(iph, th, cookie);
+#endif
+#endif
+ return 0;
+}
+
+__sum16 nf_ip6_checksum(struct sk_buff *skb, unsigned int hook,
+ unsigned int dataoff, u_int8_t protocol);
+
+int nf_ip6_check_hbh_len(struct sk_buff *skb, u32 *plen);
+
+int ipv6_netfilter_init(void);
+void ipv6_netfilter_fini(void);
+
#else /* CONFIG_NETFILTER */
static inline int ipv6_netfilter_init(void) { return 0; }
static inline void ipv6_netfilter_fini(void) { return; }
diff --git a/include/linux/netfilter_ipv6/ip6_tables.h b/include/linux/netfilter_ipv6/ip6_tables.h
index b21c392d6012..8b8885a73c76 100644
--- a/include/linux/netfilter_ipv6/ip6_tables.h
+++ b/include/linux/netfilter_ipv6/ip6_tables.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* 25-Jul-1998 Major changes to allow for ip chain table
*
@@ -16,37 +17,22 @@
#include <linux/if.h>
#include <linux/in6.h>
+#include <linux/init.h>
#include <linux/ipv6.h>
#include <linux/skbuff.h>
-
-#include <linux/init.h>
#include <uapi/linux/netfilter_ipv6/ip6_tables.h>
-extern void ip6t_init(void) __init;
-
extern void *ip6t_alloc_initial_table(const struct xt_table *);
+
int ip6t_register_table(struct net *net, const struct xt_table *table,
const struct ip6t_replace *repl,
- const struct nf_hook_ops *ops, struct xt_table **res);
-void ip6t_unregister_table(struct net *net, struct xt_table *table,
- const struct nf_hook_ops *ops);
-extern unsigned int ip6t_do_table(struct sk_buff *skb,
- const struct nf_hook_state *state,
- struct xt_table *table);
-
-/* Check for an extension */
-static inline int
-ip6t_ext_hdr(u8 nexthdr)
-{ return (nexthdr == IPPROTO_HOPOPTS) ||
- (nexthdr == IPPROTO_ROUTING) ||
- (nexthdr == IPPROTO_FRAGMENT) ||
- (nexthdr == IPPROTO_ESP) ||
- (nexthdr == IPPROTO_AH) ||
- (nexthdr == IPPROTO_NONE) ||
- (nexthdr == IPPROTO_DSTOPTS);
-}
+ const struct nf_hook_ops *ops);
+void ip6t_unregister_table_pre_exit(struct net *net, const char *name);
+void ip6t_unregister_table_exit(struct net *net, const char *name);
+extern unsigned int ip6t_do_table(void *priv, struct sk_buff *skb,
+ const struct nf_hook_state *state);
-#ifdef CONFIG_COMPAT
+#ifdef CONFIG_NETFILTER_XTABLES_COMPAT
#include <net/compat.h>
struct compat_ip6t_entry {
@@ -56,7 +42,7 @@ struct compat_ip6t_entry {
__u16 next_offset;
compat_uint_t comefrom;
struct compat_xt_counters counters;
- unsigned char elems[0];
+ unsigned char elems[];
};
static inline struct xt_entry_target *
diff --git a/include/linux/netfilter_netdev.h b/include/linux/netfilter_netdev.h
new file mode 100644
index 000000000000..8676316547cc
--- /dev/null
+++ b/include/linux/netfilter_netdev.h
@@ -0,0 +1,150 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _NETFILTER_NETDEV_H_
+#define _NETFILTER_NETDEV_H_
+
+#include <linux/netfilter.h>
+#include <linux/netdevice.h>
+
+#ifdef CONFIG_NETFILTER_INGRESS
+static inline bool nf_hook_ingress_active(const struct sk_buff *skb)
+{
+#ifdef CONFIG_JUMP_LABEL
+ if (!static_key_false(&nf_hooks_needed[NFPROTO_NETDEV][NF_NETDEV_INGRESS]))
+ return false;
+#endif
+ return rcu_access_pointer(skb->dev->nf_hooks_ingress);
+}
+
+/* caller must hold rcu_read_lock */
+static inline int nf_hook_ingress(struct sk_buff *skb)
+{
+ struct nf_hook_entries *e = rcu_dereference(skb->dev->nf_hooks_ingress);
+ struct nf_hook_state state;
+ int ret;
+
+ /* Must recheck the ingress hook head, in the event it became NULL
+ * after the check in nf_hook_ingress_active evaluated to true.
+ */
+ if (unlikely(!e))
+ return 0;
+
+ nf_hook_state_init(&state, NF_NETDEV_INGRESS,
+ NFPROTO_NETDEV, skb->dev, NULL, NULL,
+ dev_net(skb->dev), NULL);
+ ret = nf_hook_slow(skb, &state, e, 0);
+ if (ret == 0)
+ return -1;
+
+ return ret;
+}
+
+#else /* CONFIG_NETFILTER_INGRESS */
+static inline int nf_hook_ingress_active(struct sk_buff *skb)
+{
+ return 0;
+}
+
+static inline int nf_hook_ingress(struct sk_buff *skb)
+{
+ return 0;
+}
+#endif /* CONFIG_NETFILTER_INGRESS */
+
+#ifdef CONFIG_NETFILTER_EGRESS
+static inline bool nf_hook_egress_active(void)
+{
+#ifdef CONFIG_JUMP_LABEL
+ if (!static_key_false(&nf_hooks_needed[NFPROTO_NETDEV][NF_NETDEV_EGRESS]))
+ return false;
+#endif
+ return true;
+}
+
+/**
+ * nf_hook_egress - classify packets before transmission
+ * @skb: packet to be classified
+ * @rc: result code which shall be returned by __dev_queue_xmit() on failure
+ * @dev: netdev whose egress hooks shall be applied to @skb
+ *
+ * Returns @skb on success or %NULL if the packet was consumed or filtered.
+ * Caller must hold rcu_read_lock.
+ *
+ * On ingress, packets are classified first by tc, then by netfilter.
+ * On egress, the order is reversed for symmetry. Conceptually, tc and
+ * netfilter can be thought of as layers, with netfilter layered above tc:
+ * When tc redirects a packet to another interface, netfilter is not applied
+ * because the packet is on the tc layer.
+ *
+ * The nf_skip_egress flag controls whether netfilter is applied on egress.
+ * It is updated by __netif_receive_skb_core() and __dev_queue_xmit() when the
+ * packet passes through tc and netfilter. Because __dev_queue_xmit() may be
+ * called recursively by tunnel drivers such as vxlan, the flag is reverted to
+ * false after sch_handle_egress(). This ensures that netfilter is applied
+ * both on the overlay and underlying network.
+ */
+static inline struct sk_buff *nf_hook_egress(struct sk_buff *skb, int *rc,
+ struct net_device *dev)
+{
+ struct nf_hook_entries *e;
+ struct nf_hook_state state;
+ int ret;
+
+#ifdef CONFIG_NETFILTER_SKIP_EGRESS
+ if (skb->nf_skip_egress)
+ return skb;
+#endif
+
+ e = rcu_dereference_check(dev->nf_hooks_egress, rcu_read_lock_bh_held());
+ if (!e)
+ return skb;
+
+ nf_hook_state_init(&state, NF_NETDEV_EGRESS,
+ NFPROTO_NETDEV, NULL, dev, NULL,
+ dev_net(dev), NULL);
+
+ /* nf assumes rcu_read_lock, not just read_lock_bh */
+ rcu_read_lock();
+ ret = nf_hook_slow(skb, &state, e, 0);
+ rcu_read_unlock();
+
+ if (ret == 1) {
+ return skb;
+ } else if (ret < 0) {
+ *rc = NET_XMIT_DROP;
+ return NULL;
+ } else { /* ret == 0 */
+ *rc = NET_XMIT_SUCCESS;
+ return NULL;
+ }
+}
+#else /* CONFIG_NETFILTER_EGRESS */
+static inline bool nf_hook_egress_active(void)
+{
+ return false;
+}
+
+static inline struct sk_buff *nf_hook_egress(struct sk_buff *skb, int *rc,
+ struct net_device *dev)
+{
+ return skb;
+}
+#endif /* CONFIG_NETFILTER_EGRESS */
+
+static inline void nf_skip_egress(struct sk_buff *skb, bool skip)
+{
+#ifdef CONFIG_NETFILTER_SKIP_EGRESS
+ skb->nf_skip_egress = skip;
+#endif
+}
+
+static inline void nf_hook_netdev_init(struct net_device *dev)
+{
+#ifdef CONFIG_NETFILTER_INGRESS
+ RCU_INIT_POINTER(dev->nf_hooks_ingress, NULL);
+#endif
+#ifdef CONFIG_NETFILTER_EGRESS
+ RCU_INIT_POINTER(dev->nf_hooks_egress, NULL);
+#endif
+}
+
+#endif /* _NETFILTER_NETDEV_H_ */
diff --git a/include/linux/netfs.h b/include/linux/netfs.h
new file mode 100644
index 000000000000..a1f3522daa69
--- /dev/null
+++ b/include/linux/netfs.h
@@ -0,0 +1,365 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/* Network filesystem support services.
+ *
+ * Copyright (C) 2021 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * See:
+ *
+ * Documentation/filesystems/netfs_library.rst
+ *
+ * for a description of the network filesystem interface declared here.
+ */
+
+#ifndef _LINUX_NETFS_H
+#define _LINUX_NETFS_H
+
+#include <linux/workqueue.h>
+#include <linux/fs.h>
+#include <linux/pagemap.h>
+#include <linux/uio.h>
+
+enum netfs_sreq_ref_trace;
+
+/*
+ * Overload PG_private_2 to give us PG_fscache - this is used to indicate that
+ * a page is currently backed by a local disk cache
+ */
+#define folio_test_fscache(folio) folio_test_private_2(folio)
+#define PageFsCache(page) PagePrivate2((page))
+#define SetPageFsCache(page) SetPagePrivate2((page))
+#define ClearPageFsCache(page) ClearPagePrivate2((page))
+#define TestSetPageFsCache(page) TestSetPagePrivate2((page))
+#define TestClearPageFsCache(page) TestClearPagePrivate2((page))
+
+/**
+ * folio_start_fscache - Start an fscache write on a folio.
+ * @folio: The folio.
+ *
+ * Call this function before writing a folio to a local cache. Starting a
+ * second write before the first one finishes is not allowed.
+ */
+static inline void folio_start_fscache(struct folio *folio)
+{
+ VM_BUG_ON_FOLIO(folio_test_private_2(folio), folio);
+ folio_get(folio);
+ folio_set_private_2(folio);
+}
+
+/**
+ * folio_end_fscache - End an fscache write on a folio.
+ * @folio: The folio.
+ *
+ * Call this function after the folio has been written to the local cache.
+ * This will wake any sleepers waiting on this folio.
+ */
+static inline void folio_end_fscache(struct folio *folio)
+{
+ folio_end_private_2(folio);
+}
+
+/**
+ * folio_wait_fscache - Wait for an fscache write on this folio to end.
+ * @folio: The folio.
+ *
+ * If this folio is currently being written to a local cache, wait for
+ * the write to finish. Another write may start after this one finishes,
+ * unless the caller holds the folio lock.
+ */
+static inline void folio_wait_fscache(struct folio *folio)
+{
+ folio_wait_private_2(folio);
+}
+
+/**
+ * folio_wait_fscache_killable - Wait for an fscache write on this folio to end.
+ * @folio: The folio.
+ *
+ * If this folio is currently being written to a local cache, wait
+ * for the write to finish or for a fatal signal to be received.
+ * Another write may start after this one finishes, unless the caller
+ * holds the folio lock.
+ *
+ * Return:
+ * - 0 if successful.
+ * - -EINTR if a fatal signal was encountered.
+ */
+static inline int folio_wait_fscache_killable(struct folio *folio)
+{
+ return folio_wait_private_2_killable(folio);
+}
+
+static inline void set_page_fscache(struct page *page)
+{
+ folio_start_fscache(page_folio(page));
+}
+
+static inline void end_page_fscache(struct page *page)
+{
+ folio_end_private_2(page_folio(page));
+}
+
+static inline void wait_on_page_fscache(struct page *page)
+{
+ folio_wait_private_2(page_folio(page));
+}
+
+static inline int wait_on_page_fscache_killable(struct page *page)
+{
+ return folio_wait_private_2_killable(page_folio(page));
+}
+
+enum netfs_io_source {
+ NETFS_FILL_WITH_ZEROES,
+ NETFS_DOWNLOAD_FROM_SERVER,
+ NETFS_READ_FROM_CACHE,
+ NETFS_INVALID_READ,
+} __mode(byte);
+
+typedef void (*netfs_io_terminated_t)(void *priv, ssize_t transferred_or_error,
+ bool was_async);
+
+/*
+ * Per-inode context. This wraps the VFS inode.
+ */
+struct netfs_inode {
+ struct inode inode; /* The VFS inode */
+ const struct netfs_request_ops *ops;
+#if IS_ENABLED(CONFIG_FSCACHE)
+ struct fscache_cookie *cache;
+#endif
+ loff_t remote_i_size; /* Size of the remote file */
+};
+
+/*
+ * Resources required to do operations on a cache.
+ */
+struct netfs_cache_resources {
+ const struct netfs_cache_ops *ops;
+ void *cache_priv;
+ void *cache_priv2;
+ unsigned int debug_id; /* Cookie debug ID */
+ unsigned int inval_counter; /* object->inval_counter at begin_op */
+};
+
+/*
+ * Descriptor for a single component subrequest.
+ */
+struct netfs_io_subrequest {
+ struct netfs_io_request *rreq; /* Supervising I/O request */
+ struct list_head rreq_link; /* Link in rreq->subrequests */
+ loff_t start; /* Where to start the I/O */
+ size_t len; /* Size of the I/O */
+ size_t transferred; /* Amount of data transferred */
+ refcount_t ref;
+ short error; /* 0 or error that occurred */
+ unsigned short debug_index; /* Index in list (for debugging output) */
+ enum netfs_io_source source; /* Where to read from/write to */
+ unsigned long flags;
+#define NETFS_SREQ_COPY_TO_CACHE 0 /* Set if should copy the data to the cache */
+#define NETFS_SREQ_CLEAR_TAIL 1 /* Set if the rest of the read should be cleared */
+#define NETFS_SREQ_SHORT_IO 2 /* Set if the I/O was short */
+#define NETFS_SREQ_SEEK_DATA_READ 3 /* Set if ->read() should SEEK_DATA first */
+#define NETFS_SREQ_NO_PROGRESS 4 /* Set if we didn't manage to read any data */
+#define NETFS_SREQ_ONDEMAND 5 /* Set if it's from on-demand read mode */
+};
+
+enum netfs_io_origin {
+ NETFS_READAHEAD, /* This read was triggered by readahead */
+ NETFS_READPAGE, /* This read is a synchronous read */
+ NETFS_READ_FOR_WRITE, /* This read is to prepare a write */
+} __mode(byte);
+
+/*
+ * Descriptor for an I/O helper request. This is used to make multiple I/O
+ * operations to a variety of data stores and then stitch the result together.
+ */
+struct netfs_io_request {
+ struct work_struct work;
+ struct inode *inode; /* The file being accessed */
+ struct address_space *mapping; /* The mapping being accessed */
+ struct netfs_cache_resources cache_resources;
+ struct list_head subrequests; /* Contributory I/O operations */
+ void *netfs_priv; /* Private data for the netfs */
+ unsigned int debug_id;
+ atomic_t nr_outstanding; /* Number of ops in progress */
+ atomic_t nr_copy_ops; /* Number of copy-to-cache ops in progress */
+ size_t submitted; /* Amount submitted for I/O so far */
+ size_t len; /* Length of the request */
+ short error; /* 0 or error that occurred */
+ enum netfs_io_origin origin; /* Origin of the request */
+ loff_t i_size; /* Size of the file */
+ loff_t start; /* Start position */
+ pgoff_t no_unlock_folio; /* Don't unlock this folio after read */
+ refcount_t ref;
+ unsigned long flags;
+#define NETFS_RREQ_INCOMPLETE_IO 0 /* Some ioreqs terminated short or with error */
+#define NETFS_RREQ_COPY_TO_CACHE 1 /* Need to write to the cache */
+#define NETFS_RREQ_NO_UNLOCK_FOLIO 2 /* Don't unlock no_unlock_folio on completion */
+#define NETFS_RREQ_DONT_UNLOCK_FOLIOS 3 /* Don't unlock the folios on completion */
+#define NETFS_RREQ_FAILED 4 /* The request failed */
+#define NETFS_RREQ_IN_PROGRESS 5 /* Unlocked when the request completes */
+ const struct netfs_request_ops *netfs_ops;
+};
+
+/*
+ * Operations the network filesystem can/must provide to the helpers.
+ */
+struct netfs_request_ops {
+ int (*init_request)(struct netfs_io_request *rreq, struct file *file);
+ void (*free_request)(struct netfs_io_request *rreq);
+ int (*begin_cache_operation)(struct netfs_io_request *rreq);
+
+ void (*expand_readahead)(struct netfs_io_request *rreq);
+ bool (*clamp_length)(struct netfs_io_subrequest *subreq);
+ void (*issue_read)(struct netfs_io_subrequest *subreq);
+ bool (*is_still_valid)(struct netfs_io_request *rreq);
+ int (*check_write_begin)(struct file *file, loff_t pos, unsigned len,
+ struct folio **foliop, void **_fsdata);
+ void (*done)(struct netfs_io_request *rreq);
+};
+
+/*
+ * How to handle reading from a hole.
+ */
+enum netfs_read_from_hole {
+ NETFS_READ_HOLE_IGNORE,
+ NETFS_READ_HOLE_CLEAR,
+ NETFS_READ_HOLE_FAIL,
+};
+
+/*
+ * Table of operations for access to a cache. This is obtained by
+ * rreq->ops->begin_cache_operation().
+ */
+struct netfs_cache_ops {
+ /* End an operation */
+ void (*end_operation)(struct netfs_cache_resources *cres);
+
+ /* Read data from the cache */
+ int (*read)(struct netfs_cache_resources *cres,
+ loff_t start_pos,
+ struct iov_iter *iter,
+ enum netfs_read_from_hole read_hole,
+ netfs_io_terminated_t term_func,
+ void *term_func_priv);
+
+ /* Write data to the cache */
+ int (*write)(struct netfs_cache_resources *cres,
+ loff_t start_pos,
+ struct iov_iter *iter,
+ netfs_io_terminated_t term_func,
+ void *term_func_priv);
+
+ /* Expand readahead request */
+ void (*expand_readahead)(struct netfs_cache_resources *cres,
+ loff_t *_start, size_t *_len, loff_t i_size);
+
+ /* Prepare a read operation, shortening it to a cached/uncached
+ * boundary as appropriate.
+ */
+ enum netfs_io_source (*prepare_read)(struct netfs_io_subrequest *subreq,
+ loff_t i_size);
+
+ /* Prepare a write operation, working out what part of the write we can
+ * actually do.
+ */
+ int (*prepare_write)(struct netfs_cache_resources *cres,
+ loff_t *_start, size_t *_len, loff_t i_size,
+ bool no_space_allocated_yet);
+
+ /* Prepare an on-demand read operation, shortening it to a cached/uncached
+ * boundary as appropriate.
+ */
+ enum netfs_io_source (*prepare_ondemand_read)(struct netfs_cache_resources *cres,
+ loff_t start, size_t *_len,
+ loff_t i_size,
+ unsigned long *_flags, ino_t ino);
+
+ /* Query the occupancy of the cache in a region, returning where the
+ * next chunk of data starts and how long it is.
+ */
+ int (*query_occupancy)(struct netfs_cache_resources *cres,
+ loff_t start, size_t len, size_t granularity,
+ loff_t *_data_start, size_t *_data_len);
+};
+
+struct readahead_control;
+void netfs_readahead(struct readahead_control *);
+int netfs_read_folio(struct file *, struct folio *);
+int netfs_write_begin(struct netfs_inode *, struct file *,
+ struct address_space *, loff_t pos, unsigned int len,
+ struct folio **, void **fsdata);
+
+void netfs_subreq_terminated(struct netfs_io_subrequest *, ssize_t, bool);
+void netfs_get_subrequest(struct netfs_io_subrequest *subreq,
+ enum netfs_sreq_ref_trace what);
+void netfs_put_subrequest(struct netfs_io_subrequest *subreq,
+ bool was_async, enum netfs_sreq_ref_trace what);
+void netfs_stats_show(struct seq_file *);
+ssize_t netfs_extract_user_iter(struct iov_iter *orig, size_t orig_len,
+ struct iov_iter *new,
+ iov_iter_extraction_t extraction_flags);
+struct sg_table;
+ssize_t netfs_extract_iter_to_sg(struct iov_iter *iter, size_t len,
+ struct sg_table *sgtable, unsigned int sg_max,
+ iov_iter_extraction_t extraction_flags);
+
+/**
+ * netfs_inode - Get the netfs inode context from the inode
+ * @inode: The inode to query
+ *
+ * Get the netfs lib inode context from the network filesystem's inode. The
+ * context struct is expected to directly follow on from the VFS inode struct.
+ */
+static inline struct netfs_inode *netfs_inode(struct inode *inode)
+{
+ return container_of(inode, struct netfs_inode, inode);
+}
+
+/**
+ * netfs_inode_init - Initialise a netfslib inode context
+ * @ctx: The netfs inode to initialise
+ * @ops: The netfs's operations list
+ *
+ * Initialise the netfs library context struct. This is expected to follow on
+ * directly from the VFS inode struct.
+ */
+static inline void netfs_inode_init(struct netfs_inode *ctx,
+ const struct netfs_request_ops *ops)
+{
+ ctx->ops = ops;
+ ctx->remote_i_size = i_size_read(&ctx->inode);
+#if IS_ENABLED(CONFIG_FSCACHE)
+ ctx->cache = NULL;
+#endif
+}
+
+/**
+ * netfs_resize_file - Note that a file got resized
+ * @ctx: The netfs inode being resized
+ * @new_i_size: The new file size
+ *
+ * Inform the netfs lib that a file got resized so that it can adjust its state.
+ */
+static inline void netfs_resize_file(struct netfs_inode *ctx, loff_t new_i_size)
+{
+ ctx->remote_i_size = new_i_size;
+}
+
+/**
+ * netfs_i_cookie - Get the cache cookie from the inode
+ * @ctx: The netfs inode to query
+ *
+ * Get the caching cookie (if enabled) from the network filesystem's inode.
+ */
+static inline struct fscache_cookie *netfs_i_cookie(struct netfs_inode *ctx)
+{
+#if IS_ENABLED(CONFIG_FSCACHE)
+ return ctx->cache;
+#else
+ return NULL;
+#endif
+}
+
+#endif /* _LINUX_NETFS_H */
diff --git a/include/linux/netlink.h b/include/linux/netlink.h
index 8664fd26eb5d..19c0791ed9d5 100644
--- a/include/linux/netlink.h
+++ b/include/linux/netlink.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __LINUX_NETLINK_H
#define __LINUX_NETLINK_H
@@ -10,15 +11,14 @@
struct net;
+void do_trace_netlink_extack(const char *msg);
+
static inline struct nlmsghdr *nlmsg_hdr(const struct sk_buff *skb)
{
return (struct nlmsghdr *)skb->data;
}
enum netlink_skb_flags {
- NETLINK_SKB_MMAPED = 0x1, /* Packet data is mmaped */
- NETLINK_SKB_TX = 0x2, /* Packet was sent by userspace */
- NETLINK_SKB_DELIVERED = 0x4, /* Packet was delivered */
NETLINK_SKB_DST = 0x8, /* Dst set in sendto or sendmsg */
};
@@ -36,8 +36,8 @@ struct netlink_skb_parms {
#define NETLINK_CREDS(skb) (&NETLINK_CB((skb)).creds)
-extern void netlink_table_grab(void);
-extern void netlink_table_ungrab(void);
+void netlink_table_grab(void);
+void netlink_table_ungrab(void);
#define NL_CFG_F_NONROOT_RECV (1 << 0)
#define NL_CFG_F_NONROOT_SEND (1 << 1)
@@ -50,10 +50,9 @@ struct netlink_kernel_cfg {
struct mutex *cb_mutex;
int (*bind)(struct net *net, int group);
void (*unbind)(struct net *net, int group);
- bool (*compare)(struct net *net, struct sock *sk);
};
-extern struct sock *__netlink_kernel_create(struct net *net, int unit,
+struct sock *__netlink_kernel_create(struct net *net, int unit,
struct module *module,
struct netlink_kernel_cfg *cfg);
static inline struct sock *
@@ -64,72 +63,173 @@ netlink_kernel_create(struct net *net, int unit, struct netlink_kernel_cfg *cfg)
/* this can be increased when necessary - don't expose to userland */
#define NETLINK_MAX_COOKIE_LEN 20
+#define NETLINK_MAX_FMTMSG_LEN 80
/**
* struct netlink_ext_ack - netlink extended ACK report struct
* @_msg: message string to report - don't access directly, use
* %NL_SET_ERR_MSG
* @bad_attr: attribute with error
+ * @policy: policy for a bad attribute
+ * @miss_type: attribute type which was missing
+ * @miss_nest: nest missing an attribute (%NULL if missing top level attr)
* @cookie: cookie data to return to userspace (for success)
* @cookie_len: actual cookie data length
+ * @_msg_buf: output buffer for formatted message strings - don't access
+ * directly, use %NL_SET_ERR_MSG_FMT
*/
struct netlink_ext_ack {
const char *_msg;
const struct nlattr *bad_attr;
+ const struct nla_policy *policy;
+ const struct nlattr *miss_nest;
+ u16 miss_type;
u8 cookie[NETLINK_MAX_COOKIE_LEN];
u8 cookie_len;
+ char _msg_buf[NETLINK_MAX_FMTMSG_LEN];
};
/* Always use this macro, this allows later putting the
* message into a separate section or such for things
* like translation or listing all possible messages.
- * Currently string formatting is not supported (due
- * to the lack of an output buffer.)
+ * If string formatting is needed use NL_SET_ERR_MSG_FMT.
*/
#define NL_SET_ERR_MSG(extack, msg) do { \
- static const char __msg[] = (msg); \
+ static const char __msg[] = msg; \
struct netlink_ext_ack *__extack = (extack); \
\
+ do_trace_netlink_extack(__msg); \
+ \
if (__extack) \
__extack->_msg = __msg; \
} while (0)
+/* We splice fmt with %s at each end even in the snprintf so that both calls
+ * can use the same string constant, avoiding its duplication in .ro
+ */
+#define NL_SET_ERR_MSG_FMT(extack, fmt, args...) do { \
+ struct netlink_ext_ack *__extack = (extack); \
+ \
+ if (!__extack) \
+ break; \
+ if (snprintf(__extack->_msg_buf, NETLINK_MAX_FMTMSG_LEN, \
+ "%s" fmt "%s", "", ##args, "") >= \
+ NETLINK_MAX_FMTMSG_LEN) \
+ net_warn_ratelimited("%s" fmt "%s", "truncated extack: ", \
+ ##args, "\n"); \
+ \
+ do_trace_netlink_extack(__extack->_msg_buf); \
+ \
+ __extack->_msg = __extack->_msg_buf; \
+} while (0)
+
#define NL_SET_ERR_MSG_MOD(extack, msg) \
NL_SET_ERR_MSG((extack), KBUILD_MODNAME ": " msg)
-#define NL_SET_BAD_ATTR(extack, attr) do { \
- if ((extack)) \
+#define NL_SET_ERR_MSG_FMT_MOD(extack, fmt, args...) \
+ NL_SET_ERR_MSG_FMT((extack), KBUILD_MODNAME ": " fmt, ##args)
+
+#define NL_SET_ERR_MSG_WEAK(extack, msg) do { \
+ if ((extack) && !(extack)->_msg) \
+ NL_SET_ERR_MSG((extack), msg); \
+} while (0)
+
+#define NL_SET_ERR_MSG_WEAK_MOD(extack, msg) do { \
+ if ((extack) && !(extack)->_msg) \
+ NL_SET_ERR_MSG_MOD((extack), msg); \
+} while (0)
+
+#define NL_SET_BAD_ATTR_POLICY(extack, attr, pol) do { \
+ if ((extack)) { \
(extack)->bad_attr = (attr); \
+ (extack)->policy = (pol); \
+ } \
} while (0)
-#define NL_SET_ERR_MSG_ATTR(extack, attr, msg) do { \
- static const char __msg[] = (msg); \
+#define NL_SET_BAD_ATTR(extack, attr) NL_SET_BAD_ATTR_POLICY(extack, attr, NULL)
+
+#define NL_SET_ERR_MSG_ATTR_POL(extack, attr, pol, msg) do { \
+ static const char __msg[] = msg; \
+ struct netlink_ext_ack *__extack = (extack); \
+ \
+ do_trace_netlink_extack(__msg); \
+ \
+ if (__extack) { \
+ __extack->_msg = __msg; \
+ __extack->bad_attr = (attr); \
+ __extack->policy = (pol); \
+ } \
+} while (0)
+
+#define NL_SET_ERR_MSG_ATTR_POL_FMT(extack, attr, pol, fmt, args...) do { \
+ struct netlink_ext_ack *__extack = (extack); \
+ \
+ if (!__extack) \
+ break; \
+ \
+ if (snprintf(__extack->_msg_buf, NETLINK_MAX_FMTMSG_LEN, \
+ "%s" fmt "%s", "", ##args, "") >= \
+ NETLINK_MAX_FMTMSG_LEN) \
+ net_warn_ratelimited("%s" fmt "%s", "truncated extack: ", \
+ ##args, "\n"); \
+ \
+ do_trace_netlink_extack(__extack->_msg_buf); \
+ \
+ __extack->_msg = __extack->_msg_buf; \
+ __extack->bad_attr = (attr); \
+ __extack->policy = (pol); \
+} while (0)
+
+#define NL_SET_ERR_MSG_ATTR(extack, attr, msg) \
+ NL_SET_ERR_MSG_ATTR_POL(extack, attr, NULL, msg)
+
+#define NL_SET_ERR_MSG_ATTR_FMT(extack, attr, msg, args...) \
+ NL_SET_ERR_MSG_ATTR_POL_FMT(extack, attr, NULL, msg, ##args)
+
+#define NL_SET_ERR_ATTR_MISS(extack, nest, type) do { \
struct netlink_ext_ack *__extack = (extack); \
\
if (__extack) { \
- __extack->_msg = __msg; \
- __extack->bad_attr = (attr); \
+ __extack->miss_nest = (nest); \
+ __extack->miss_type = (type); \
} \
} while (0)
-extern void netlink_kernel_release(struct sock *sk);
-extern int __netlink_change_ngroups(struct sock *sk, unsigned int groups);
-extern int netlink_change_ngroups(struct sock *sk, unsigned int groups);
-extern void __netlink_clear_multicast_users(struct sock *sk, unsigned int group);
-extern void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err,
- const struct netlink_ext_ack *extack);
-extern int netlink_has_listeners(struct sock *sk, unsigned int group);
-
-extern int netlink_unicast(struct sock *ssk, struct sk_buff *skb, __u32 portid, int nonblock);
-extern int netlink_broadcast(struct sock *ssk, struct sk_buff *skb, __u32 portid,
- __u32 group, gfp_t allocation);
-extern int netlink_broadcast_filtered(struct sock *ssk, struct sk_buff *skb,
- __u32 portid, __u32 group, gfp_t allocation,
- int (*filter)(struct sock *dsk, struct sk_buff *skb, void *data),
- void *filter_data);
-extern int netlink_set_err(struct sock *ssk, __u32 portid, __u32 group, int code);
-extern int netlink_register_notifier(struct notifier_block *nb);
-extern int netlink_unregister_notifier(struct notifier_block *nb);
+#define NL_REQ_ATTR_CHECK(extack, nest, tb, type) ({ \
+ struct nlattr **__tb = (tb); \
+ u32 __attr = (type); \
+ int __retval; \
+ \
+ __retval = !__tb[__attr]; \
+ if (__retval) \
+ NL_SET_ERR_ATTR_MISS((extack), (nest), __attr); \
+ __retval; \
+})
+
+static inline void nl_set_extack_cookie_u64(struct netlink_ext_ack *extack,
+ u64 cookie)
+{
+ if (!extack)
+ return;
+ memcpy(extack->cookie, &cookie, sizeof(cookie));
+ extack->cookie_len = sizeof(cookie);
+}
+
+void netlink_kernel_release(struct sock *sk);
+int __netlink_change_ngroups(struct sock *sk, unsigned int groups);
+int netlink_change_ngroups(struct sock *sk, unsigned int groups);
+void __netlink_clear_multicast_users(struct sock *sk, unsigned int group);
+void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err,
+ const struct netlink_ext_ack *extack);
+int netlink_has_listeners(struct sock *sk, unsigned int group);
+bool netlink_strict_get_check(struct sk_buff *skb);
+
+int netlink_unicast(struct sock *ssk, struct sk_buff *skb, __u32 portid, int nonblock);
+int netlink_broadcast(struct sock *ssk, struct sk_buff *skb, __u32 portid,
+ __u32 group, gfp_t allocation);
+int netlink_set_err(struct sock *ssk, __u32 portid, __u32 group, int code);
+int netlink_register_notifier(struct notifier_block *nb);
+int netlink_unregister_notifier(struct notifier_block *nb);
/* finegrained unicast helpers: */
struct sock *netlink_getsockbyfilp(struct file *filp);
@@ -172,19 +272,32 @@ netlink_skb_clone(struct sk_buff *skb, gfp_t gfp_mask)
struct netlink_callback {
struct sk_buff *skb;
const struct nlmsghdr *nlh;
- int (*start)(struct netlink_callback *);
int (*dump)(struct sk_buff * skb,
struct netlink_callback *cb);
int (*done)(struct netlink_callback *cb);
void *data;
/* the module that dump function belong to */
struct module *module;
+ struct netlink_ext_ack *extack;
u16 family;
- u16 min_dump_alloc;
+ u16 answer_flags;
+ u32 min_dump_alloc;
unsigned int prev_seq, seq;
- long args[6];
+ bool strict_check;
+ union {
+ u8 ctx[48];
+
+ /* args is deprecated. Cast a struct over ctx instead
+ * for proper type safety.
+ */
+ long args[6];
+ };
};
+#define NL_ASSERT_DUMP_CTX_FITS(type_name) \
+ BUILD_BUG_ON(sizeof(type_name) > \
+ sizeof_field(struct netlink_callback, ctx))
+
struct netlink_notify {
struct net *net;
u32 portid;
@@ -200,10 +313,10 @@ struct netlink_dump_control {
int (*done)(struct netlink_callback *);
void *data;
struct module *module;
- u16 min_dump_alloc;
+ u32 min_dump_alloc;
};
-extern int __netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
+int __netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
const struct nlmsghdr *nlh,
struct netlink_dump_control *control);
static inline int netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
@@ -222,8 +335,8 @@ struct netlink_tap {
struct list_head list;
};
-extern int netlink_add_tap(struct netlink_tap *nt);
-extern int netlink_remove_tap(struct netlink_tap *nt);
+int netlink_add_tap(struct netlink_tap *nt);
+int netlink_remove_tap(struct netlink_tap *nt);
bool __netlink_ns_capable(const struct netlink_skb_parms *nsp,
struct user_namespace *ns, int cap);
diff --git a/include/linux/netpoll.h b/include/linux/netpoll.h
index 27c0aaa22cb0..bd19c4b91e31 100644
--- a/include/linux/netpoll.h
+++ b/include/linux/netpoll.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Common code for low-level network console, dump, and debugger code
*
@@ -23,6 +24,7 @@ union inet_addr {
struct netpoll {
struct net_device *dev;
+ netdevice_tracker dev_tracker;
char dev_name[IFNAMSIZ];
const char *name;
@@ -30,8 +32,6 @@ struct netpoll {
bool ipv6;
u16 local_port, remote_port;
u8 remote_mac[ETH_ALEN];
-
- struct work_struct cleanup_work;
};
struct netpoll_info {
@@ -48,8 +48,9 @@ struct netpoll_info {
};
#ifdef CONFIG_NETPOLL
-extern void netpoll_poll_disable(struct net_device *dev);
-extern void netpoll_poll_enable(struct net_device *dev);
+void netpoll_poll_dev(struct net_device *dev);
+void netpoll_poll_disable(struct net_device *dev);
+void netpoll_poll_enable(struct net_device *dev);
#else
static inline void netpoll_poll_disable(struct net_device *dev) { return; }
static inline void netpoll_poll_enable(struct net_device *dev) { return; }
@@ -61,17 +62,9 @@ int netpoll_parse_options(struct netpoll *np, char *opt);
int __netpoll_setup(struct netpoll *np, struct net_device *ndev);
int netpoll_setup(struct netpoll *np);
void __netpoll_cleanup(struct netpoll *np);
-void __netpoll_free_async(struct netpoll *np);
+void __netpoll_free(struct netpoll *np);
void netpoll_cleanup(struct netpoll *np);
-void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb,
- struct net_device *dev);
-static inline void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb)
-{
- unsigned long flags;
- local_irq_save(flags);
- netpoll_send_skb_on_dev(np, skb, np->dev);
- local_irq_restore(flags);
-}
+netdev_tx_t netpoll_send_skb(struct netpoll *np, struct sk_buff *skb);
#ifdef CONFIG_NETPOLL
static inline void *netpoll_poll_lock(struct napi_struct *napi)
@@ -110,9 +103,6 @@ static inline void *netpoll_poll_lock(struct napi_struct *napi)
static inline void netpoll_poll_unlock(void *have)
{
}
-static inline void netpoll_netdev_init(struct net_device *dev)
-{
-}
static inline bool netpoll_tx_running(struct net_device *dev)
{
return false;
diff --git a/include/linux/nfs.h b/include/linux/nfs.h
index 610af5155ef2..ceb70a926b95 100644
--- a/include/linux/nfs.h
+++ b/include/linux/nfs.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* NFS protocol definitions
*
@@ -9,6 +10,7 @@
#include <linux/sunrpc/msg_prot.h>
#include <linux/string.h>
+#include <linux/crc32.h>
#include <uapi/linux/nfs.h>
/*
@@ -35,14 +37,6 @@ static inline void nfs_copy_fh(struct nfs_fh *target, const struct nfs_fh *sourc
memcpy(target->data, source->data, source->size);
}
-
-/*
- * This is really a general kernel constant, but since nothing like
- * this is defined in the kernel headers, I have to do it here.
- */
-#define NFS_OFFSET_MAX ((__s64)((~(__u64)0) >> 1))
-
-
enum nfs3_stable_how {
NFS_UNSTABLE = 0,
NFS_DATA_SYNC = 1,
@@ -51,4 +45,23 @@ enum nfs3_stable_how {
/* used by direct.c to mark verf as invalid */
NFS_INVALID_STABLE_HOW = -1
};
+
+#ifdef CONFIG_CRC32
+/**
+ * nfs_fhandle_hash - calculate the crc32 hash for the filehandle
+ * @fh - pointer to filehandle
+ *
+ * returns a crc32 hash for the filehandle that is compatible with
+ * the one displayed by "wireshark".
+ */
+static inline u32 nfs_fhandle_hash(const struct nfs_fh *fh)
+{
+ return ~crc32_le(0xFFFFFFFF, &fh->data[0], fh->size);
+}
+#else /* CONFIG_CRC32 */
+static inline u32 nfs_fhandle_hash(const struct nfs_fh *fh)
+{
+ return 0;
+}
+#endif /* CONFIG_CRC32 */
#endif /* _LINUX_NFS_H */
diff --git a/include/linux/nfs3.h b/include/linux/nfs3.h
index a778ad8e3afd..404b8f724fc9 100644
--- a/include/linux/nfs3.h
+++ b/include/linux/nfs3.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* NFSv3 protocol definitions
*/
diff --git a/include/linux/nfs4.h b/include/linux/nfs4.h
index 47239c336688..730003c4f4af 100644
--- a/include/linux/nfs4.h
+++ b/include/linux/nfs4.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* include/linux/nfs4.h
*
@@ -15,6 +16,7 @@
#include <linux/list.h>
#include <linux/uidgid.h>
#include <uapi/linux/nfs4.h>
+#include <linux/sunrpc/msg_prot.h>
enum nfs4_acl_whotype {
NFS4_ACL_WHO_NAMED = 0,
@@ -36,7 +38,7 @@ struct nfs4_ace {
struct nfs4_acl {
uint32_t naces;
- struct nfs4_ace aces[0];
+ struct nfs4_ace aces[];
};
#define NFS4_MAXLABELLEN 2048
@@ -148,6 +150,12 @@ enum nfs_opnum4 {
OP_WRITE_SAME = 70,
OP_CLONE = 71,
+ /* xattr support (RFC8726) */
+ OP_GETXATTR = 72,
+ OP_SETXATTR = 73,
+ OP_LISTXATTRS = 74,
+ OP_REMOVEXATTR = 75,
+
OP_ILLEGAL = 10044,
};
@@ -157,7 +165,7 @@ Needs to be updated if more operations are defined in future.*/
#define FIRST_NFS4_OP OP_ACCESS
#define LAST_NFS40_OP OP_RELEASE_LOCKOWNER
#define LAST_NFS41_OP OP_RECLAIM_COMPLETE
-#define LAST_NFS42_OP OP_CLONE
+#define LAST_NFS42_OP OP_REMOVEXATTR
#define LAST_NFS4_OP LAST_NFS42_OP
enum nfsstat4 {
@@ -278,8 +286,16 @@ enum nfsstat4 {
NFS4ERR_WRONG_LFS = 10092,
NFS4ERR_BADLABEL = 10093,
NFS4ERR_OFFLOAD_NO_REQS = 10094,
+
+ /* xattr (RFC8276) */
+ NFS4ERR_NOXATTR = 10095,
+ NFS4ERR_XATTR2BIG = 10096,
};
+/* error codes for internal client use */
+#define NFS4ERR_RESET_TO_MDS 12001
+#define NFS4ERR_RESET_TO_PNFS 12002
+
static inline bool seqid_mutating_err(u32 err)
{
/* See RFC 7530, section 9.1.7 */
@@ -293,7 +309,7 @@ static inline bool seqid_mutating_err(u32 err)
case NFS4ERR_NOFILEHANDLE:
case NFS4ERR_MOVED:
return false;
- };
+ }
return true;
}
@@ -435,13 +451,17 @@ enum lock_type4 {
#define FATTR4_WORD1_TIME_MODIFY (1UL << 21)
#define FATTR4_WORD1_TIME_MODIFY_SET (1UL << 22)
#define FATTR4_WORD1_MOUNTED_ON_FILEID (1UL << 23)
+#define FATTR4_WORD1_DACL (1UL << 26)
+#define FATTR4_WORD1_SACL (1UL << 27)
#define FATTR4_WORD1_FS_LAYOUT_TYPES (1UL << 30)
#define FATTR4_WORD2_LAYOUT_TYPES (1UL << 0)
#define FATTR4_WORD2_LAYOUT_BLKSIZE (1UL << 1)
#define FATTR4_WORD2_MDSTHRESHOLD (1UL << 4)
#define FATTR4_WORD2_CLONE_BLKSIZE (1UL << 13)
+#define FATTR4_WORD2_CHANGE_ATTR_TYPE (1UL << 15)
#define FATTR4_WORD2_SECURITY_LABEL (1UL << 16)
#define FATTR4_WORD2_MODE_UMASK (1UL << 17)
+#define FATTR4_WORD2_XATTR_SUPPORT (1UL << 18)
/* MDS threshold bitmap bits */
#define THRESHOLD_RD (1UL << 0)
@@ -456,7 +476,12 @@ enum lock_type4 {
#define NFS4_DEBUG 1
-/* Index of predefined Linux client operations */
+/*
+ * Index of predefined Linux client operations
+ *
+ * To ensure that /proc/net/rpc/nfs remains correctly ordered, please
+ * append only to this enum when adding new client operations.
+ */
enum {
NFSPROC4_CLNT_NULL = 0, /* Unused */
@@ -479,7 +504,6 @@ enum {
NFSPROC4_CLNT_ACCESS,
NFSPROC4_CLNT_GETATTR,
NFSPROC4_CLNT_LOOKUP,
- NFSPROC4_CLNT_LOOKUPP,
NFSPROC4_CLNT_LOOKUP_ROOT,
NFSPROC4_CLNT_REMOVE,
NFSPROC4_CLNT_RENAME,
@@ -499,7 +523,6 @@ enum {
NFSPROC4_CLNT_SECINFO,
NFSPROC4_CLNT_FSID_PRESENT,
- /* nfs41 */
NFSPROC4_CLNT_EXCHANGE_ID,
NFSPROC4_CLNT_CREATE_SESSION,
NFSPROC4_CLNT_DESTROY_SESSION,
@@ -517,13 +540,23 @@ enum {
NFSPROC4_CLNT_BIND_CONN_TO_SESSION,
NFSPROC4_CLNT_DESTROY_CLIENTID,
- /* nfs42 */
NFSPROC4_CLNT_SEEK,
NFSPROC4_CLNT_ALLOCATE,
NFSPROC4_CLNT_DEALLOCATE,
NFSPROC4_CLNT_LAYOUTSTATS,
NFSPROC4_CLNT_CLONE,
NFSPROC4_CLNT_COPY,
+ NFSPROC4_CLNT_OFFLOAD_CANCEL,
+
+ NFSPROC4_CLNT_LOOKUPP,
+ NFSPROC4_CLNT_LAYOUTERROR,
+ NFSPROC4_CLNT_COPY_NOTIFY,
+
+ NFSPROC4_CLNT_GETXATTR,
+ NFSPROC4_CLNT_SETXATTR,
+ NFSPROC4_CLNT_LISTXATTRS,
+ NFSPROC4_CLNT_REMOVEXATTR,
+ NFSPROC4_CLNT_READ_PLUS,
};
/* nfs41 types */
@@ -645,6 +678,7 @@ enum pnfs_update_layout_reason {
PNFS_UPDATE_LAYOUT_BLOCKED,
PNFS_UPDATE_LAYOUT_INVALID_OPEN,
PNFS_UPDATE_LAYOUT_SEND_LAYOUTGET,
+ PNFS_UPDATE_LAYOUT_EXIT,
};
#define NFS4_OP_MAP_NUM_LONGS \
@@ -658,4 +692,57 @@ struct nfs4_op_map {
} u;
};
+struct nfs42_netaddr {
+ char netid[RPCBIND_MAXNETIDLEN];
+ char addr[RPCBIND_MAXUADDRLEN + 1];
+ u32 netid_len;
+ u32 addr_len;
+};
+
+enum netloc_type4 {
+ NL4_NAME = 1,
+ NL4_URL = 2,
+ NL4_NETADDR = 3,
+};
+
+struct nl4_server {
+ enum netloc_type4 nl4_type;
+ union {
+ struct { /* NL4_NAME, NL4_URL */
+ int nl4_str_sz;
+ char nl4_str[NFS4_OPAQUE_LIMIT + 1];
+ };
+ struct nfs42_netaddr nl4_addr; /* NL4_NETADDR */
+ } u;
+};
+
+enum nfs4_change_attr_type {
+ NFS4_CHANGE_TYPE_IS_MONOTONIC_INCR = 0,
+ NFS4_CHANGE_TYPE_IS_VERSION_COUNTER = 1,
+ NFS4_CHANGE_TYPE_IS_VERSION_COUNTER_NOPNFS = 2,
+ NFS4_CHANGE_TYPE_IS_TIME_METADATA = 3,
+ NFS4_CHANGE_TYPE_IS_UNDEFINED = 4,
+};
+
+/*
+ * Options for setxattr. These match the flags for setxattr(2).
+ */
+enum nfs4_setxattr_options {
+ SETXATTR4_EITHER = 0,
+ SETXATTR4_CREATE = 1,
+ SETXATTR4_REPLACE = 2,
+};
+
+enum {
+ RCA4_TYPE_MASK_RDATA_DLG = 0,
+ RCA4_TYPE_MASK_WDATA_DLG = 1,
+ RCA4_TYPE_MASK_DIR_DLG = 2,
+ RCA4_TYPE_MASK_FILE_LAYOUT = 3,
+ RCA4_TYPE_MASK_BLK_LAYOUT = 4,
+ RCA4_TYPE_MASK_OBJ_LAYOUT_MIN = 8,
+ RCA4_TYPE_MASK_OBJ_LAYOUT_MAX = 9,
+ RCA4_TYPE_MASK_OTHER_LAYOUT_MIN = 12,
+ RCA4_TYPE_MASK_OTHER_LAYOUT_MAX = 15,
+};
+
#endif
diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h
index e52cc55ac300..279262057a92 100644
--- a/include/linux/nfs_fs.h
+++ b/include/linux/nfs_fs.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* linux/include/linux/nfs_fs.h
*
@@ -22,6 +23,7 @@
#include <linux/mm.h>
#include <linux/pagemap.h>
#include <linux/rbtree.h>
+#include <linux/refcount.h>
#include <linux/rwsem.h>
#include <linux/wait.h>
@@ -29,6 +31,10 @@
#include <linux/sunrpc/auth.h>
#include <linux/sunrpc/clnt.h>
+#ifdef CONFIG_NFS_FSCACHE
+#include <linux/netfs.h>
+#endif
+
#include <linux/nfs.h>
#include <linux/nfs2.h>
#include <linux/nfs3.h>
@@ -39,9 +45,14 @@
#include <linux/mempool.h>
/*
- * These are the default flags for swap requests
+ * These are the default for number of transports to different server IPs
*/
-#define NFS_RPC_SWAPFLAGS (RPC_TASK_SWAPPER|RPC_TASK_ROOTCREDS)
+#define NFS_MAX_TRANSPORTS 16
+
+/*
+ * Size of the NFS directory verifier
+ */
+#define NFS_DIR_VERIFIER_SIZE 2
/*
* NFSv3/v4 Access mode cache entry
@@ -49,18 +60,21 @@
struct nfs_access_entry {
struct rb_node rb_node;
struct list_head lru;
- unsigned long jiffies;
- struct rpc_cred * cred;
- int mask;
+ kuid_t fsuid;
+ kgid_t fsgid;
+ struct group_info *group_info;
+ u64 timestamp;
+ __u32 mask;
struct rcu_head rcu_head;
};
struct nfs_lock_context {
- atomic_t count;
+ refcount_t count;
struct list_head list;
struct nfs_open_context *open_context;
fl_owner_t lockowner;
atomic_t io_count;
+ struct rcu_head rcu_head;
};
struct nfs4_state;
@@ -68,28 +82,35 @@ struct nfs_open_context {
struct nfs_lock_context lock_context;
fl_owner_t flock_owner;
struct dentry *dentry;
- struct rpc_cred *cred;
+ const struct cred *cred;
+ struct rpc_cred __rcu *ll_cred; /* low-level cred - use to check for expiry */
struct nfs4_state *state;
fmode_t mode;
unsigned long flags;
-#define NFS_CONTEXT_ERROR_WRITE (0)
-#define NFS_CONTEXT_RESEND_WRITES (1)
#define NFS_CONTEXT_BAD (2)
#define NFS_CONTEXT_UNLOCK (3)
+#define NFS_CONTEXT_FILE_OPEN (4)
int error;
struct list_head list;
struct nfs4_threshold *mdsthreshold;
+ struct rcu_head rcu_head;
};
struct nfs_open_dir_context {
struct list_head list;
- struct rpc_cred *cred;
+ atomic_t cache_hits;
+ atomic_t cache_misses;
unsigned long attr_gencount;
+ __be32 verf[NFS_DIR_VERIFIER_SIZE];
__u64 dir_cookie;
- __u64 dup_cookie;
- signed char duped;
+ __u64 last_cookie;
+ pgoff_t page_index;
+ unsigned int dtsize;
+ bool force_clear;
+ bool eof;
+ struct rcu_head rcu_head;
};
/*
@@ -99,6 +120,8 @@ struct nfs_delegation;
struct posix_acl;
+struct nfs4_xattr_cache;
+
/*
* nfs fs inode data in memory
*/
@@ -138,31 +161,72 @@ struct nfs_inode {
unsigned long attrtimeo_timestamp;
unsigned long attr_gencount;
- /* "Generation counter" for the attribute cache. This is
- * bumped whenever we update the metadata on the
- * server.
- */
- unsigned long cache_change_attribute;
struct rb_root access_cache;
struct list_head access_cache_entry_lru;
struct list_head access_cache_inode_lru;
- /*
- * This is the cookie verifier used for NFSv3 readdir
- * operations
- */
- __be32 cookieverf[2];
-
- unsigned long nrequests;
- struct nfs_mds_commit_info commit_info;
+ union {
+ /* Directory */
+ struct {
+ /* "Generation counter" for the attribute cache.
+ * This is bumped whenever we update the metadata
+ * on the server.
+ */
+ unsigned long cache_change_attribute;
+ /*
+ * This is the cookie verifier used for NFSv3 readdir
+ * operations
+ */
+ __be32 cookieverf[NFS_DIR_VERIFIER_SIZE];
+ /* Readers: in-flight sillydelete RPC calls */
+ /* Writers: rmdir */
+ struct rw_semaphore rmdir_sem;
+ };
+ /* Regular file */
+ struct {
+ atomic_long_t nrequests;
+ atomic_long_t redirtied_pages;
+ struct nfs_mds_commit_info commit_info;
+ struct mutex commit_mutex;
+ };
+ };
/* Open contexts for shared mmap writes */
struct list_head open_files;
- /* Readers: in-flight sillydelete RPC calls */
- /* Writers: rmdir */
- struct rw_semaphore rmdir_sem;
+ /* Keep track of out-of-order replies.
+ * The ooo array contains start/end pairs of
+ * numbers from the changeid sequence when
+ * the inode's iversion has been updated.
+ * It also contains end/start pair (i.e. reverse order)
+ * of sections of the changeid sequence that have
+ * been seen in replies from the server.
+ * Normally these should match and when both
+ * A:B and B:A are found in ooo, they are both removed.
+ * And if a reply with A:B causes an iversion update
+ * of A:B, then neither are added.
+ * When a reply has pre_change that doesn't match
+ * iversion, then the changeid pair and any consequent
+ * change in iversion ARE added. Later replies
+ * might fill in the gaps, or possibly a gap is caused
+ * by a change from another client.
+ * When a file or directory is opened, if the ooo table
+ * is not empty, then we assume the gaps were due to
+ * another client and we invalidate the cached data.
+ *
+ * We can only track a limited number of concurrent gaps.
+ * Currently that limit is 16.
+ * We allocate the table on demand. If there is insufficient
+ * memory, then we probably cannot cache the file anyway
+ * so there is no loss.
+ */
+ struct {
+ int cnt;
+ struct {
+ u64 start, end;
+ } gap[16];
+ } *ooo;
#if IS_ENABLED(CONFIG_NFS_V4)
struct nfs4_cached_acl *nfs4_acl;
@@ -177,33 +241,79 @@ struct nfs_inode {
/* how many bytes have been written/read and how many bytes queued up */
__u64 write_io;
__u64 read_io;
+#ifdef CONFIG_NFS_V4_2
+ struct nfs4_xattr_cache *xattr_cache;
+#endif
+ union {
+ struct inode vfs_inode;
#ifdef CONFIG_NFS_FSCACHE
- struct fscache_cookie *fscache;
+ struct netfs_inode netfs; /* netfs context and VFS inode */
#endif
- struct inode vfs_inode;
+ };
+};
+
+struct nfs4_copy_state {
+ struct list_head copies;
+ struct list_head src_copies;
+ nfs4_stateid stateid;
+ struct completion completion;
+ uint64_t count;
+ struct nfs_writeverf verf;
+ int error;
+ int flags;
+ struct nfs4_state *parent_src_state;
+ struct nfs4_state *parent_dst_state;
};
/*
+ * Access bit flags
+ */
+#define NFS_ACCESS_READ 0x0001
+#define NFS_ACCESS_LOOKUP 0x0002
+#define NFS_ACCESS_MODIFY 0x0004
+#define NFS_ACCESS_EXTEND 0x0008
+#define NFS_ACCESS_DELETE 0x0010
+#define NFS_ACCESS_EXECUTE 0x0020
+#define NFS_ACCESS_XAREAD 0x0040
+#define NFS_ACCESS_XAWRITE 0x0080
+#define NFS_ACCESS_XALIST 0x0100
+
+/*
* Cache validity bit flags
*/
-#define NFS_INO_INVALID_ATTR 0x0001 /* cached attrs are invalid */
-#define NFS_INO_INVALID_DATA 0x0002 /* cached data is invalid */
-#define NFS_INO_INVALID_ATIME 0x0004 /* cached atime is invalid */
-#define NFS_INO_INVALID_ACCESS 0x0008 /* cached access cred invalid */
-#define NFS_INO_INVALID_ACL 0x0010 /* cached acls are invalid */
-#define NFS_INO_REVAL_PAGECACHE 0x0020 /* must revalidate pagecache */
-#define NFS_INO_REVAL_FORCED 0x0040 /* force revalidation ignoring a delegation */
-#define NFS_INO_INVALID_LABEL 0x0080 /* cached label is invalid */
+#define NFS_INO_INVALID_DATA BIT(1) /* cached data is invalid */
+#define NFS_INO_INVALID_ATIME BIT(2) /* cached atime is invalid */
+#define NFS_INO_INVALID_ACCESS BIT(3) /* cached access cred invalid */
+#define NFS_INO_INVALID_ACL BIT(4) /* cached acls are invalid */
+#define NFS_INO_REVAL_FORCED BIT(6) /* force revalidation ignoring a delegation */
+#define NFS_INO_INVALID_LABEL BIT(7) /* cached label is invalid */
+#define NFS_INO_INVALID_CHANGE BIT(8) /* cached change is invalid */
+#define NFS_INO_INVALID_CTIME BIT(9) /* cached ctime is invalid */
+#define NFS_INO_INVALID_MTIME BIT(10) /* cached mtime is invalid */
+#define NFS_INO_INVALID_SIZE BIT(11) /* cached size is invalid */
+#define NFS_INO_INVALID_OTHER BIT(12) /* other attrs are invalid */
+#define NFS_INO_DATA_INVAL_DEFER \
+ BIT(13) /* Deferred cache invalidation */
+#define NFS_INO_INVALID_BLOCKS BIT(14) /* cached blocks are invalid */
+#define NFS_INO_INVALID_XATTR BIT(15) /* xattrs are invalid */
+#define NFS_INO_INVALID_NLINK BIT(16) /* cached nlinks is invalid */
+#define NFS_INO_INVALID_MODE BIT(17) /* cached mode is invalid */
+
+#define NFS_INO_INVALID_ATTR (NFS_INO_INVALID_CHANGE \
+ | NFS_INO_INVALID_CTIME \
+ | NFS_INO_INVALID_MTIME \
+ | NFS_INO_INVALID_SIZE \
+ | NFS_INO_INVALID_NLINK \
+ | NFS_INO_INVALID_MODE \
+ | NFS_INO_INVALID_OTHER) /* inode metadata is invalid */
/*
* Bit offsets in flags field
*/
-#define NFS_INO_ADVISE_RDPLUS (0) /* advise readdirplus */
#define NFS_INO_STALE (1) /* possible stale inode */
#define NFS_INO_ACL_LRU_SET (2) /* Inode is on the LRU list */
#define NFS_INO_INVALIDATING (3) /* inode is being invalidated */
-#define NFS_INO_FSCACHE (5) /* inode can be cached by FS-Cache */
-#define NFS_INO_FSCACHE_LOCK (6) /* FS-Cache cookie management lock */
+#define NFS_INO_PRESERVE_UNLINKED (4) /* preserve file if removed while open */
#define NFS_INO_LAYOUTCOMMIT (9) /* layoutcommit required */
#define NFS_INO_LAYOUTCOMMITTING (10) /* layoutcommit inflight */
#define NFS_INO_LAYOUTSTATS (11) /* layoutstats inflight */
@@ -256,15 +366,6 @@ static inline int NFS_STALE(const struct inode *inode)
return test_bit(NFS_INO_STALE, &NFS_I(inode)->flags);
}
-static inline struct fscache_cookie *nfs_i_fscache(struct inode *inode)
-{
-#ifdef CONFIG_NFS_FSCACHE
- return NFS_I(inode)->fscache;
-#else
- return NULL;
-#endif
-}
-
static inline __u64 NFS_FILEID(const struct inode *inode)
{
return NFS_I(inode)->fileid;
@@ -280,81 +381,64 @@ static inline void nfs_mark_for_revalidate(struct inode *inode)
struct nfs_inode *nfsi = NFS_I(inode);
spin_lock(&inode->i_lock);
- nfsi->cache_validity |= NFS_INO_INVALID_ATTR |
- NFS_INO_REVAL_PAGECACHE |
- NFS_INO_INVALID_ACCESS |
- NFS_INO_INVALID_ACL;
+ nfsi->cache_validity |= NFS_INO_INVALID_ACCESS | NFS_INO_INVALID_ACL |
+ NFS_INO_INVALID_CHANGE | NFS_INO_INVALID_CTIME |
+ NFS_INO_INVALID_SIZE;
if (S_ISDIR(inode->i_mode))
nfsi->cache_validity |= NFS_INO_INVALID_DATA;
spin_unlock(&inode->i_lock);
}
-static inline int nfs_server_capable(struct inode *inode, int cap)
+static inline int nfs_server_capable(const struct inode *inode, int cap)
{
return NFS_SERVER(inode)->caps & cap;
}
-static inline void nfs_set_verifier(struct dentry * dentry, unsigned long verf)
-{
- dentry->d_time = verf;
-}
-
/**
* nfs_save_change_attribute - Returns the inode attribute change cookie
* @dir - pointer to parent directory inode
- * The "change attribute" is updated every time we finish an operation
- * that will result in a metadata change on the server.
+ * The "cache change attribute" is updated when we need to revalidate
+ * our dentry cache after a directory was seen to change on the server.
*/
static inline unsigned long nfs_save_change_attribute(struct inode *dir)
{
return NFS_I(dir)->cache_change_attribute;
}
-/**
- * nfs_verify_change_attribute - Detects NFS remote directory changes
- * @dir - pointer to parent directory inode
- * @chattr - previously saved change attribute
- * Return "false" if the verifiers doesn't match the change attribute.
- * This would usually indicate that the directory contents have changed on
- * the server, and that any dentries need revalidating.
- */
-static inline int nfs_verify_change_attribute(struct inode *dir, unsigned long chattr)
-{
- return chattr == NFS_I(dir)->cache_change_attribute;
-}
-
/*
* linux/fs/nfs/inode.c
*/
extern int nfs_sync_mapping(struct address_space *mapping);
extern void nfs_zap_mapping(struct inode *inode, struct address_space *mapping);
extern void nfs_zap_caches(struct inode *);
+extern void nfs_set_inode_stale(struct inode *inode);
extern void nfs_invalidate_atime(struct inode *);
extern struct inode *nfs_fhget(struct super_block *, struct nfs_fh *,
- struct nfs_fattr *, struct nfs4_label *);
+ struct nfs_fattr *);
struct inode *nfs_ilookup(struct super_block *sb, struct nfs_fattr *, struct nfs_fh *);
extern int nfs_refresh_inode(struct inode *, struct nfs_fattr *);
extern int nfs_post_op_update_inode(struct inode *inode, struct nfs_fattr *fattr);
extern int nfs_post_op_update_inode_force_wcc(struct inode *inode, struct nfs_fattr *fattr);
extern int nfs_post_op_update_inode_force_wcc_locked(struct inode *inode, struct nfs_fattr *fattr);
-extern int nfs_getattr(const struct path *, struct kstat *, u32, unsigned int);
-extern void nfs_access_add_cache(struct inode *, struct nfs_access_entry *);
+extern int nfs_getattr(struct mnt_idmap *, const struct path *,
+ struct kstat *, u32, unsigned int);
+extern void nfs_access_add_cache(struct inode *, struct nfs_access_entry *, const struct cred *);
extern void nfs_access_set_mask(struct nfs_access_entry *, u32);
-extern int nfs_permission(struct inode *, int);
+extern int nfs_permission(struct mnt_idmap *, struct inode *, int);
extern int nfs_open(struct inode *, struct file *);
extern int nfs_attribute_cache_expired(struct inode *inode);
-extern int nfs_revalidate_inode(struct nfs_server *server, struct inode *inode);
+extern int nfs_revalidate_inode(struct inode *inode, unsigned long flags);
extern int __nfs_revalidate_inode(struct nfs_server *, struct inode *);
+extern int nfs_clear_invalid_mapping(struct address_space *mapping);
extern bool nfs_mapping_need_revalidate_inode(struct inode *inode);
extern int nfs_revalidate_mapping(struct inode *inode, struct address_space *mapping);
extern int nfs_revalidate_mapping_rcu(struct inode *inode);
-extern int nfs_setattr(struct dentry *, struct iattr *);
+extern int nfs_setattr(struct mnt_idmap *, struct dentry *, struct iattr *);
extern void nfs_setattr_update_inode(struct inode *inode, struct iattr *attr, struct nfs_fattr *);
-extern void nfs_setsecurity(struct inode *inode, struct nfs_fattr *fattr,
- struct nfs4_label *label);
+extern void nfs_setsecurity(struct inode *inode, struct nfs_fattr *fattr);
extern struct nfs_open_context *get_nfs_open_context(struct nfs_open_context *ctx);
extern void put_nfs_open_context(struct nfs_open_context *ctx);
-extern struct nfs_open_context *nfs_find_open_context(struct inode *inode, struct rpc_cred *cred, fmode_t mode);
+extern struct nfs_open_context *nfs_find_open_context(struct inode *inode, const struct cred *cred, fmode_t mode);
extern struct nfs_open_context *alloc_nfs_open_context(struct dentry *dentry, fmode_t f_mode, struct file *filp);
extern void nfs_inode_attach_open_context(struct nfs_open_context *ctx);
extern void nfs_file_set_open_context(struct file *filp, struct nfs_open_context *ctx);
@@ -367,9 +451,22 @@ extern void nfs_fattr_set_barrier(struct nfs_fattr *fattr);
extern unsigned long nfs_inc_attr_generation_counter(void);
extern struct nfs_fattr *nfs_alloc_fattr(void);
+extern struct nfs_fattr *nfs_alloc_fattr_with_label(struct nfs_server *server);
+
+static inline void nfs4_label_free(struct nfs4_label *label)
+{
+#ifdef CONFIG_NFS_V4_SECURITY_LABEL
+ if (label) {
+ kfree(label->label);
+ kfree(label);
+ }
+#endif
+}
static inline void nfs_free_fattr(const struct nfs_fattr *fattr)
{
+ if (fattr)
+ nfs4_label_free(fattr->label);
kfree(fattr);
}
@@ -425,7 +522,7 @@ static inline struct nfs_open_context *nfs_file_open_context(struct file *filp)
return filp->private_data;
}
-static inline struct rpc_cred *nfs_file_cred(struct file *file)
+static inline const struct cred *nfs_file_cred(struct file *file)
{
if (file != NULL) {
struct nfs_open_context *ctx =
@@ -439,11 +536,11 @@ static inline struct rpc_cred *nfs_file_cred(struct file *file)
/*
* linux/fs/nfs/direct.c
*/
-extern ssize_t nfs_direct_IO(struct kiocb *, struct iov_iter *);
-extern ssize_t nfs_file_direct_read(struct kiocb *iocb,
- struct iov_iter *iter);
-extern ssize_t nfs_file_direct_write(struct kiocb *iocb,
- struct iov_iter *iter);
+int nfs_swap_rw(struct kiocb *iocb, struct iov_iter *iter);
+ssize_t nfs_file_direct_read(struct kiocb *iocb,
+ struct iov_iter *iter, bool swap);
+ssize_t nfs_file_direct_write(struct kiocb *iocb,
+ struct iov_iter *iter, bool swap);
/*
* linux/fs/nfs/dir.c
@@ -452,10 +549,18 @@ extern const struct file_operations nfs_dir_operations;
extern const struct dentry_operations nfs_dentry_operations;
extern void nfs_force_lookup_revalidate(struct inode *dir);
+extern void nfs_set_verifier(struct dentry * dentry, unsigned long verf);
+#if IS_ENABLED(CONFIG_NFS_V4)
+extern void nfs_clear_verifier_delegated(struct inode *inode);
+#endif /* IS_ENABLED(CONFIG_NFS_V4) */
+extern struct dentry *nfs_add_or_obtain(struct dentry *dentry,
+ struct nfs_fh *fh, struct nfs_fattr *fattr);
extern int nfs_instantiate(struct dentry *dentry, struct nfs_fh *fh,
- struct nfs_fattr *fattr, struct nfs4_label *label);
-extern int nfs_may_open(struct inode *inode, struct rpc_cred *cred, int openflags);
+ struct nfs_fattr *fattr);
+extern int nfs_may_open(struct inode *inode, const struct cred *cred, int openflags);
extern void nfs_access_zap_cache(struct inode *inode);
+extern int nfs_access_get_cached(struct inode *inode, const struct cred *cred,
+ u32 *mask, bool may_block);
/*
* linux/fs/nfs/symlink.c
@@ -492,8 +597,9 @@ extern void nfs_complete_unlink(struct dentry *dentry, struct inode *);
extern int nfs_congestion_kb;
extern int nfs_writepage(struct page *page, struct writeback_control *wbc);
extern int nfs_writepages(struct address_space *, struct writeback_control *);
-extern int nfs_flush_incompatible(struct file *file, struct page *page);
-extern int nfs_updatepage(struct file *, struct page *, unsigned int, unsigned int);
+extern int nfs_flush_incompatible(struct file *file, struct folio *folio);
+extern int nfs_update_folio(struct file *file, struct folio *folio,
+ unsigned int offset, unsigned int count);
/*
* Try to write back everything synchronously (but check the
@@ -501,26 +607,25 @@ extern int nfs_updatepage(struct file *, struct page *, unsigned int, unsigned
*/
extern int nfs_sync_inode(struct inode *inode);
extern int nfs_wb_all(struct inode *inode);
-extern int nfs_wb_page(struct inode *inode, struct page *page);
-extern int nfs_wb_page_cancel(struct inode *inode, struct page* page);
+extern int nfs_wb_folio(struct inode *inode, struct folio *folio);
+int nfs_wb_folio_cancel(struct inode *inode, struct folio *folio);
extern int nfs_commit_inode(struct inode *, int);
-extern struct nfs_commit_data *nfs_commitdata_alloc(bool never_fail);
+extern struct nfs_commit_data *nfs_commitdata_alloc(void);
extern void nfs_commit_free(struct nfs_commit_data *data);
+bool nfs_commit_end(struct nfs_mds_commit_info *cinfo);
-static inline int
-nfs_have_writebacks(struct inode *inode)
+static inline bool nfs_have_writebacks(const struct inode *inode)
{
- return NFS_I(inode)->nrequests != 0;
+ if (S_ISREG(inode->i_mode))
+ return atomic_long_read(&NFS_I(inode)->nrequests) != 0;
+ return false;
}
/*
* linux/fs/nfs/read.c
*/
-extern int nfs_readpage(struct file *, struct page *);
-extern int nfs_readpages(struct file *, struct address_space *,
- struct list_head *, unsigned);
-extern int nfs_readpage_async(struct nfs_open_context *, struct inode *,
- struct page *);
+int nfs_read_folio(struct file *, struct folio *);
+void nfs_readahead(struct readahead_control *);
/*
* inline functions
@@ -540,8 +645,31 @@ nfs_fileid_to_ino_t(u64 fileid)
return ino;
}
+static inline void nfs_ooo_clear(struct nfs_inode *nfsi)
+{
+ nfsi->cache_validity &= ~NFS_INO_DATA_INVAL_DEFER;
+ kfree(nfsi->ooo);
+ nfsi->ooo = NULL;
+}
+
+static inline bool nfs_ooo_test(struct nfs_inode *nfsi)
+{
+ return (nfsi->cache_validity & NFS_INO_DATA_INVAL_DEFER) ||
+ (nfsi->ooo && nfsi->ooo->cnt > 0);
+
+}
+
#define NFS_JUKEBOX_RETRY_TIME (5 * HZ)
+/* We need to block new opens while a file is being unlinked.
+ * If it is opened *before* we decide to unlink, we will silly-rename
+ * instead. If it is opened *after*, then we need to create or will fail.
+ * If we allow the two to race, we could end up with a file that is open
+ * but deleted on the server resulting in ESTALE.
+ * So use ->d_fsdata to record when the unlink is happening
+ * and block dentry revalidation while it is set.
+ */
+#define NFS_FSDATA_BLOCKED ((void*)1)
# undef ifdebug
# ifdef NFS_DEBUG
diff --git a/include/linux/nfs_fs_i.h b/include/linux/nfs_fs_i.h
index a5c50d97341e..98f9268fcfc2 100644
--- a/include/linux/nfs_fs_i.h
+++ b/include/linux/nfs_fs_i.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _NFS_FS_I
#define _NFS_FS_I
diff --git a/include/linux/nfs_fs_sb.h b/include/linux/nfs_fs_sb.h
index 74c44665e6d3..ea2f7e6b1b0b 100644
--- a/include/linux/nfs_fs_sb.h
+++ b/include/linux/nfs_fs_sb.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _NFS_FS_SB
#define _NFS_FS_SB
@@ -9,6 +10,7 @@
#include <linux/sunrpc/xprt.h>
#include <linux/atomic.h>
+#include <linux/refcount.h>
struct nfs4_session;
struct nfs_iostats;
@@ -24,7 +26,7 @@ struct nfs41_impl_id;
* The nfs_client identifies our client state to the server.
*/
struct nfs_client {
- atomic_t cl_count;
+ refcount_t cl_count;
atomic_t cl_mds_count;
int cl_cons_state; /* current construction state (-ve: init error) */
#define NFS_CS_READY 0 /* ready to be used */
@@ -43,6 +45,9 @@ struct nfs_client {
#define NFS_CS_INFINITE_SLOTS 3 /* - don't limit TCP slots */
#define NFS_CS_NO_RETRANS_TIMEOUT 4 /* - Disable retransmit timeouts */
#define NFS_CS_TSM_POSSIBLE 5 /* - Maybe state migration */
+#define NFS_CS_NOPING 6 /* - don't ping on connect */
+#define NFS_CS_DS 7 /* - Server is a DS */
+#define NFS_CS_REUSEPORT 8 /* - reuse src port on reconnect */
struct sockaddr_storage cl_addr; /* server identifier */
size_t cl_addrlen;
char * cl_hostname; /* hostname of server */
@@ -56,7 +61,9 @@ struct nfs_client {
struct nfs_subversion * cl_nfs_mod; /* pointer to nfs version module */
u32 cl_minorversion;/* NFSv4 minorversion */
- struct rpc_cred *cl_machine_cred;
+ unsigned int cl_nconnect; /* Number of connections */
+ unsigned int cl_max_connect; /* max number of xprts allowed */
+ const char * cl_principal; /* used for machine cred */
#if IS_ENABLED(CONFIG_NFS_V4)
struct list_head cl_ds_clients; /* auth flavor data servers */
@@ -113,12 +120,8 @@ struct nfs_client {
* This is used to generate the mv0 callback address.
*/
char cl_ipaddr[48];
-
-#ifdef CONFIG_NFS_FSCACHE
- struct fscache_cookie *fscache; /* client index cache cookie */
-#endif
-
struct net *cl_net;
+ struct list_head pending_cb_stateids;
};
/*
@@ -135,7 +138,23 @@ struct nfs_server {
struct nlm_host *nlm_host; /* NLM client handle */
struct nfs_iostats __percpu *io_stats; /* I/O statistics */
atomic_long_t writeback; /* number of writeback pages */
- int flags; /* various flags */
+ unsigned int write_congested;/* flag set when writeback gets too high */
+ unsigned int flags; /* various flags */
+
+/* The following are for internal use only. Also see uapi/linux/nfs_mount.h */
+#define NFS_MOUNT_LOOKUP_CACHE_NONEG 0x10000
+#define NFS_MOUNT_LOOKUP_CACHE_NONE 0x20000
+#define NFS_MOUNT_NORESVPORT 0x40000
+#define NFS_MOUNT_LEGACY_INTERFACE 0x80000
+#define NFS_MOUNT_LOCAL_FLOCK 0x100000
+#define NFS_MOUNT_LOCAL_FCNTL 0x200000
+#define NFS_MOUNT_SOFTERR 0x400000
+#define NFS_MOUNT_SOFTREVAL 0x800000
+#define NFS_MOUNT_WRITE_EAGER 0x01000000
+#define NFS_MOUNT_WRITE_WAIT 0x02000000
+#define NFS_MOUNT_TRUNK_DISCOVERY 0x04000000
+
+ unsigned int fattr_valid; /* Valid attributes */
unsigned int caps; /* server capabilities */
unsigned int rsize; /* read size */
unsigned int rpages; /* read size (in pages) */
@@ -145,6 +164,11 @@ struct nfs_server {
unsigned int dtsize; /* readdir size */
unsigned short port; /* "port=" setting */
unsigned int bsize; /* server block size */
+#ifdef CONFIG_NFS_V4_2
+ unsigned int gxasize; /* getxattr size */
+ unsigned int sxasize; /* setxattr size */
+ unsigned int lxasize; /* listxattr size */
+#endif
unsigned int acregmin; /* attr cache timeouts */
unsigned int acregmax;
unsigned int acdirmin;
@@ -155,17 +179,20 @@ struct nfs_server {
#define NFS_OPTION_FSCACHE 0x00000001 /* - local caching enabled */
#define NFS_OPTION_MIGRATION 0x00000002 /* - NFSv4 migration enabled */
+ enum nfs4_change_attr_type
+ change_attr_type;/* Description of change attribute */
+
struct nfs_fsid fsid;
__u64 maxfilesize; /* maximum file size */
- struct timespec time_delta; /* smallest time granularity */
+ struct timespec64 time_delta; /* smallest time granularity */
unsigned long mount_time; /* when this fs was mounted */
struct super_block *super; /* VFS super block */
dev_t s_dev; /* superblock dev numbers */
struct nfs_auth_info auth_info; /* parsed auth flavors */
#ifdef CONFIG_NFS_FSCACHE
- struct nfs_fscache_key *fscache_key; /* unique key for superblock */
- struct fscache_cookie *fscache; /* superblock cookie */
+ struct fscache_volume *fscache; /* superblock cookie */
+ char *fscache_uniq; /* Uniquifier (or NULL) */
#endif
u32 pnfs_blksize; /* layout_blksize attr */
@@ -206,6 +233,7 @@ struct nfs_server {
struct list_head state_owners_lru;
struct list_head layouts;
struct list_head delegations;
+ struct list_head ss_copies;
unsigned long mig_gen;
unsigned long mig_status;
@@ -224,6 +252,13 @@ struct nfs_server {
unsigned short mountd_port;
unsigned short mountd_protocol;
struct rpc_wait_queue uoc_rpcwaitq;
+
+ /* XDR related information */
+ unsigned int read_hdrsize;
+
+ /* User namespace info */
+ const struct cred *cred;
+ bool has_sec_mnt_opts;
};
/* Server capabilities */
@@ -232,15 +267,9 @@ struct nfs_server {
#define NFS_CAP_SYMLINKS (1U << 2)
#define NFS_CAP_ACLS (1U << 3)
#define NFS_CAP_ATOMIC_OPEN (1U << 4)
-/* #define NFS_CAP_CHANGE_ATTR (1U << 5) */
-#define NFS_CAP_FILEID (1U << 6)
-#define NFS_CAP_MODE (1U << 7)
-#define NFS_CAP_NLINK (1U << 8)
-#define NFS_CAP_OWNER (1U << 9)
-#define NFS_CAP_OWNER_GROUP (1U << 10)
-#define NFS_CAP_ATIME (1U << 11)
-#define NFS_CAP_CTIME (1U << 12)
-#define NFS_CAP_MTIME (1U << 13)
+#define NFS_CAP_LGOPEN (1U << 5)
+#define NFS_CAP_CASE_INSENSITIVE (1U << 6)
+#define NFS_CAP_CASE_PRESERVING (1U << 7)
#define NFS_CAP_POSIX_LOCK (1U << 14)
#define NFS_CAP_UIDGID_NOMAP (1U << 15)
#define NFS_CAP_STATEID_NFSV41 (1U << 16)
@@ -252,5 +281,11 @@ struct nfs_server {
#define NFS_CAP_LAYOUTSTATS (1U << 22)
#define NFS_CAP_CLONE (1U << 23)
#define NFS_CAP_COPY (1U << 24)
-
+#define NFS_CAP_OFFLOAD_CANCEL (1U << 25)
+#define NFS_CAP_LAYOUTERROR (1U << 26)
+#define NFS_CAP_COPY_NOTIFY (1U << 27)
+#define NFS_CAP_XATTR (1U << 28)
+#define NFS_CAP_READ_PLUS (1U << 29)
+#define NFS_CAP_FS_LOCATIONS (1U << 30)
+#define NFS_CAP_MOVEABLE (1U << 31)
#endif
diff --git a/include/linux/nfs_iostat.h b/include/linux/nfs_iostat.h
index 9dcbbe9a51fb..8d946089d151 100644
--- a/include/linux/nfs_iostat.h
+++ b/include/linux/nfs_iostat.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* User-space visible declarations for NFS client per-mount
* point statistics
@@ -118,16 +119,4 @@ enum nfs_stat_eventcounters {
__NFSIOS_COUNTSMAX,
};
-/*
- * NFS local caching servicing counters
- */
-enum nfs_stat_fscachecounters {
- NFSIOS_FSCACHE_PAGES_READ_OK,
- NFSIOS_FSCACHE_PAGES_READ_FAIL,
- NFSIOS_FSCACHE_PAGES_WRITTEN_OK,
- NFSIOS_FSCACHE_PAGES_WRITTEN_FAIL,
- NFSIOS_FSCACHE_PAGES_UNCACHED,
- __NFSIOS_FSCACHEMAX,
-};
-
#endif /* _LINUX_NFS_IOSTAT */
diff --git a/include/linux/nfs_page.h b/include/linux/nfs_page.h
index d67b67ae6c8b..aa9f4c6ebe26 100644
--- a/include/linux/nfs_page.h
+++ b/include/linux/nfs_page.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* linux/include/linux/nfs_page.h
*
@@ -24,6 +25,7 @@
enum {
PG_BUSY = 0, /* nfs_{un}lock_request */
PG_MAPPED, /* page private set for buffered io */
+ PG_FOLIO, /* Tracking a folio (unset for O_DIRECT) */
PG_CLEAN, /* write succeeded */
PG_COMMIT_TO_DS, /* used by pnfs layouts */
PG_INODE_REF, /* extra ref held by inode when in writeback */
@@ -40,8 +42,10 @@ enum {
struct nfs_inode;
struct nfs_page {
struct list_head wb_list; /* Defines state of page: */
- struct page *wb_page; /* page to read in/write out */
- struct nfs_open_context *wb_context; /* File state context info */
+ union {
+ struct page *wb_page; /* page to read in/write out */
+ struct folio *wb_folio;
+ };
struct nfs_lock_context *wb_lock_context; /* lock context info */
pgoff_t wb_index; /* Offset >> PAGE_SHIFT */
unsigned int wb_offset, /* Offset & ~PAGE_MASK */
@@ -52,8 +56,10 @@ struct nfs_page {
struct nfs_write_verifier wb_verf; /* Commit cookie */
struct nfs_page *wb_this_page; /* list of reqs for this page */
struct nfs_page *wb_head; /* head pointer for req list */
+ unsigned short wb_nio; /* Number of I/O attempts */
};
+struct nfs_pgio_mirror;
struct nfs_pageio_descriptor;
struct nfs_pageio_ops {
void (*pg_init)(struct nfs_pageio_descriptor *, struct nfs_page *);
@@ -63,6 +69,9 @@ struct nfs_pageio_ops {
unsigned int (*pg_get_mirror_count)(struct nfs_pageio_descriptor *,
struct nfs_page *);
void (*pg_cleanup)(struct nfs_pageio_descriptor *);
+ struct nfs_pgio_mirror *
+ (*pg_get_mirror)(struct nfs_pageio_descriptor *, u32);
+ u32 (*pg_set_mirror)(struct nfs_pageio_descriptor *, u32);
};
struct nfs_rw_ops {
@@ -86,7 +95,6 @@ struct nfs_pgio_mirror {
};
struct nfs_pageio_descriptor {
- unsigned char pg_moreio : 1;
struct inode *pg_inode;
const struct nfs_pageio_ops *pg_ops;
const struct nfs_rw_ops *pg_rw_ops;
@@ -97,6 +105,9 @@ struct nfs_pageio_descriptor {
struct pnfs_layout_segment *pg_lseg;
struct nfs_io_completion *pg_io_completion;
struct nfs_direct_req *pg_dreq;
+#ifdef CONFIG_NFS_FSCACHE
+ void *pg_netfs;
+#endif
unsigned int pg_bsize; /* default bsize for mirrors */
u32 pg_mirror_count;
@@ -104,6 +115,8 @@ struct nfs_pageio_descriptor {
struct nfs_pgio_mirror pg_mirrors_static[1];
struct nfs_pgio_mirror *pg_mirrors_dynamic;
u32 pg_mirror_idx; /* current mirror */
+ unsigned short pg_maxretrans;
+ unsigned char pg_moreio : 1;
};
/* arbitrarily selected limit to number of mirrors */
@@ -111,11 +124,15 @@ struct nfs_pageio_descriptor {
#define NFS_WBACK_BUSY(req) (test_bit(PG_BUSY,&(req)->wb_flags))
-extern struct nfs_page *nfs_create_request(struct nfs_open_context *ctx,
- struct page *page,
- struct nfs_page *last,
- unsigned int offset,
- unsigned int count);
+extern struct nfs_page *nfs_page_create_from_page(struct nfs_open_context *ctx,
+ struct page *page,
+ unsigned int pgbase,
+ loff_t offset,
+ unsigned int count);
+extern struct nfs_page *nfs_page_create_from_folio(struct nfs_open_context *ctx,
+ struct folio *folio,
+ unsigned int offset,
+ unsigned int count);
extern void nfs_release_request(struct nfs_page *);
@@ -125,8 +142,7 @@ extern void nfs_pageio_init(struct nfs_pageio_descriptor *desc,
const struct nfs_pgio_completion_ops *compl_ops,
const struct nfs_rw_ops *rw_ops,
size_t bsize,
- int how,
- gfp_t gfp_flags);
+ int how);
extern int nfs_pageio_add_request(struct nfs_pageio_descriptor *,
struct nfs_page *);
extern int nfs_pageio_resend(struct nfs_pageio_descriptor *,
@@ -139,12 +155,76 @@ extern size_t nfs_generic_pg_test(struct nfs_pageio_descriptor *desc,
extern int nfs_wait_on_request(struct nfs_page *);
extern void nfs_unlock_request(struct nfs_page *req);
extern void nfs_unlock_and_release_request(struct nfs_page *);
-extern int nfs_page_group_lock(struct nfs_page *, bool);
-extern void nfs_page_group_lock_wait(struct nfs_page *);
+extern struct nfs_page *nfs_page_group_lock_head(struct nfs_page *req);
+extern int nfs_page_group_lock_subrequests(struct nfs_page *head);
+extern void nfs_join_page_group(struct nfs_page *head, struct inode *inode);
+extern int nfs_page_group_lock(struct nfs_page *);
extern void nfs_page_group_unlock(struct nfs_page *);
extern bool nfs_page_group_sync_on_bit(struct nfs_page *, unsigned int);
+extern int nfs_page_set_headlock(struct nfs_page *req);
+extern void nfs_page_clear_headlock(struct nfs_page *req);
extern bool nfs_async_iocounter_wait(struct rpc_task *, struct nfs_lock_context *);
+/**
+ * nfs_page_to_folio - Retrieve a struct folio for the request
+ * @req: pointer to a struct nfs_page
+ *
+ * If a folio was assigned to @req, then return it, otherwise return NULL.
+ */
+static inline struct folio *nfs_page_to_folio(const struct nfs_page *req)
+{
+ if (test_bit(PG_FOLIO, &req->wb_flags))
+ return req->wb_folio;
+ return NULL;
+}
+
+/**
+ * nfs_page_to_page - Retrieve a struct page for the request
+ * @req: pointer to a struct nfs_page
+ * @pgbase: folio byte offset
+ *
+ * Return the page containing the byte that is at offset @pgbase relative
+ * to the start of the folio.
+ * Note: The request starts at offset @req->wb_pgbase.
+ */
+static inline struct page *nfs_page_to_page(const struct nfs_page *req,
+ size_t pgbase)
+{
+ struct folio *folio = nfs_page_to_folio(req);
+
+ if (folio == NULL)
+ return req->wb_page;
+ return folio_page(folio, pgbase >> PAGE_SHIFT);
+}
+
+/**
+ * nfs_page_to_inode - Retrieve an inode for the request
+ * @req: pointer to a struct nfs_page
+ */
+static inline struct inode *nfs_page_to_inode(const struct nfs_page *req)
+{
+ struct folio *folio = nfs_page_to_folio(req);
+
+ if (folio == NULL)
+ return page_file_mapping(req->wb_page)->host;
+ return folio_file_mapping(folio)->host;
+}
+
+/**
+ * nfs_page_max_length - Retrieve the maximum possible length for a request
+ * @req: pointer to a struct nfs_page
+ *
+ * Returns the maximum possible length of a request
+ */
+static inline size_t nfs_page_max_length(const struct nfs_page *req)
+{
+ struct folio *folio = nfs_page_to_folio(req);
+
+ if (folio == NULL)
+ return PAGE_SIZE;
+ return folio_size(folio);
+}
+
/*
* Lock the page of an asynchronous request
*/
@@ -165,6 +245,16 @@ nfs_list_add_request(struct nfs_page *req, struct list_head *head)
list_add_tail(&req->wb_list, head);
}
+/**
+ * nfs_list_move_request - Move a request to a new list
+ * @req: request
+ * @head: head of list into which to insert the request.
+ */
+static inline void
+nfs_list_move_request(struct nfs_page *req, struct list_head *head)
+{
+ list_move_tail(&req->wb_list, head);
+}
/**
* nfs_list_remove_request - Remove a request from its wb_list
@@ -184,10 +274,15 @@ nfs_list_entry(struct list_head *head)
return list_entry(head, struct nfs_page, wb_list);
}
-static inline
-loff_t req_offset(struct nfs_page *req)
+static inline loff_t req_offset(const struct nfs_page *req)
{
return (((loff_t)req->wb_index) << PAGE_SHIFT) + req->wb_offset;
}
+static inline struct nfs_open_context *
+nfs_req_openctx(struct nfs_page *req)
+{
+ return req->wb_lock_context->open_context;
+}
+
#endif /* _LINUX_NFS_PAGE_H */
diff --git a/include/linux/nfs_ssc.h b/include/linux/nfs_ssc.h
new file mode 100644
index 000000000000..22265b1ff080
--- /dev/null
+++ b/include/linux/nfs_ssc.h
@@ -0,0 +1,81 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * include/linux/nfs_ssc.h
+ *
+ * Author: Dai Ngo <dai.ngo@oracle.com>
+ *
+ * Copyright (c) 2020, Oracle and/or its affiliates.
+ */
+
+#include <linux/nfs_fs.h>
+#include <linux/sunrpc/svc.h>
+
+extern struct nfs_ssc_client_ops_tbl nfs_ssc_client_tbl;
+
+/*
+ * NFS_V4
+ */
+struct nfs4_ssc_client_ops {
+ struct file *(*sco_open)(struct vfsmount *ss_mnt,
+ struct nfs_fh *src_fh, nfs4_stateid *stateid);
+ void (*sco_close)(struct file *filep);
+};
+
+/*
+ * NFS_FS
+ */
+struct nfs_ssc_client_ops {
+ void (*sco_sb_deactive)(struct super_block *sb);
+};
+
+struct nfs_ssc_client_ops_tbl {
+ const struct nfs4_ssc_client_ops *ssc_nfs4_ops;
+ const struct nfs_ssc_client_ops *ssc_nfs_ops;
+};
+
+extern void nfs42_ssc_register_ops(void);
+extern void nfs42_ssc_unregister_ops(void);
+
+extern void nfs42_ssc_register(const struct nfs4_ssc_client_ops *ops);
+extern void nfs42_ssc_unregister(const struct nfs4_ssc_client_ops *ops);
+
+#ifdef CONFIG_NFSD_V4_2_INTER_SSC
+static inline struct file *nfs42_ssc_open(struct vfsmount *ss_mnt,
+ struct nfs_fh *src_fh, nfs4_stateid *stateid)
+{
+ if (nfs_ssc_client_tbl.ssc_nfs4_ops)
+ return (*nfs_ssc_client_tbl.ssc_nfs4_ops->sco_open)(ss_mnt, src_fh, stateid);
+ return ERR_PTR(-EIO);
+}
+
+static inline void nfs42_ssc_close(struct file *filep)
+{
+ if (nfs_ssc_client_tbl.ssc_nfs4_ops)
+ (*nfs_ssc_client_tbl.ssc_nfs4_ops->sco_close)(filep);
+}
+#endif
+
+struct nfsd4_ssc_umount_item {
+ struct list_head nsui_list;
+ bool nsui_busy;
+ /*
+ * nsui_refcnt inited to 2, 1 on list and 1 for consumer. Entry
+ * is removed when refcnt drops to 1 and nsui_expire expires.
+ */
+ refcount_t nsui_refcnt;
+ unsigned long nsui_expire;
+ struct vfsmount *nsui_vfsmount;
+ char nsui_ipaddr[RPC_MAX_ADDRBUFLEN + 1];
+};
+
+/*
+ * NFS_FS
+ */
+extern void nfs_ssc_register(const struct nfs_ssc_client_ops *ops);
+extern void nfs_ssc_unregister(const struct nfs_ssc_client_ops *ops);
+
+static inline void nfs_do_sb_deactive(struct super_block *sb)
+{
+ if (nfs_ssc_client_tbl.ssc_nfs_ops)
+ (*nfs_ssc_client_tbl.ssc_nfs_ops->sco_sb_deactive)(sb);
+}
diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h
index ca3bcc4ed4e5..29a1b39794bf 100644
--- a/include/linux/nfs_xdr.h
+++ b/include/linux/nfs_xdr.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_NFS_XDR_H
#define _LINUX_NFS_XDR_H
@@ -14,6 +15,8 @@
#define NFS_DEF_FILE_IO_SIZE (4096U)
#define NFS_MIN_FILE_IO_SIZE (1024U)
+#define NFS_BITMASK_SZ 3
+
struct nfs4_string {
unsigned int len;
char *data;
@@ -61,19 +64,20 @@ struct nfs_fattr {
struct nfs_fsid fsid;
__u64 fileid;
__u64 mounted_on_fileid;
- struct timespec atime;
- struct timespec mtime;
- struct timespec ctime;
+ struct timespec64 atime;
+ struct timespec64 mtime;
+ struct timespec64 ctime;
__u64 change_attr; /* NFSv4 change attribute */
__u64 pre_change_attr;/* pre-op NFSv4 change attribute */
__u64 pre_size; /* pre_op_attr.size */
- struct timespec pre_mtime; /* pre_op_attr.mtime */
- struct timespec pre_ctime; /* pre_op_attr.ctime */
+ struct timespec64 pre_mtime; /* pre_op_attr.mtime */
+ struct timespec64 pre_ctime; /* pre_op_attr.ctime */
unsigned long time_start;
unsigned long gencount;
struct nfs4_string *owner_name;
struct nfs4_string *group_name;
struct nfs4_threshold *mdsthreshold; /* pNFS threshold hints */
+ struct nfs4_label *label;
};
#define NFS_ATTR_FATTR_TYPE (1U << 0)
@@ -142,12 +146,15 @@ struct nfs_fsinfo {
__u32 wtmult; /* writes should be multiple of this */
__u32 dtpref; /* pref. readdir transfer size */
__u64 maxfilesize;
- struct timespec time_delta; /* server time granularity */
+ struct timespec64 time_delta; /* server time granularity */
__u32 lease_time; /* in seconds */
__u32 nlayouttypes; /* number of layouttypes */
__u32 layouttype[NFS_MAX_LAYOUT_TYPES]; /* supported pnfs layout driver */
__u32 blksize; /* preferred pnfs io block size */
__u32 clone_blksize; /* granularity of a CLONE operation */
+ enum nfs4_change_attr_type
+ change_attr_type; /* Info about change attr */
+ __u32 xattr_support; /* User xattrs supported */
};
struct nfs_fsstat {
@@ -258,6 +265,7 @@ struct nfs4_layoutget_args {
struct nfs4_layoutget_res {
struct nfs4_sequence_res seq_res;
+ int status;
__u32 return_on_close;
struct pnfs_layout_range range;
__u32 type;
@@ -268,7 +276,8 @@ struct nfs4_layoutget_res {
struct nfs4_layoutget {
struct nfs4_layoutget_args args;
struct nfs4_layoutget_res res;
- struct rpc_cred *cred;
+ const struct cred *cred;
+ struct pnfs_layout_hdr *lo;
gfp_t gfp_flags;
};
@@ -307,7 +316,7 @@ struct nfs4_layoutcommit_data {
struct rpc_task task;
struct nfs_fattr fattr;
struct list_head lseg_list;
- struct rpc_cred *cred;
+ const struct cred *cred;
struct inode *inode;
struct nfs4_layoutcommit_args args;
struct nfs4_layoutcommit_res res;
@@ -332,7 +341,7 @@ struct nfs4_layoutreturn_res {
struct nfs4_layoutreturn {
struct nfs4_layoutreturn_args args;
struct nfs4_layoutreturn_res res;
- struct rpc_cred *cred;
+ const struct cred *cred;
struct nfs_client *clp;
struct inode *inode;
int rpc_status;
@@ -381,6 +390,41 @@ struct nfs42_layoutstat_data {
struct nfs42_layoutstat_res res;
};
+struct nfs42_device_error {
+ struct nfs4_deviceid dev_id;
+ int status;
+ enum nfs_opnum4 opnum;
+};
+
+struct nfs42_layout_error {
+ __u64 offset;
+ __u64 length;
+ nfs4_stateid stateid;
+ struct nfs42_device_error errors[1];
+};
+
+#define NFS42_LAYOUTERROR_MAX 5
+
+struct nfs42_layouterror_args {
+ struct nfs4_sequence_args seq_args;
+ struct inode *inode;
+ unsigned int num_errors;
+ struct nfs42_layout_error errors[NFS42_LAYOUTERROR_MAX];
+};
+
+struct nfs42_layouterror_res {
+ struct nfs4_sequence_res seq_res;
+ unsigned int num_errors;
+ int rpc_status;
+};
+
+struct nfs42_layouterror_data {
+ struct nfs42_layouterror_args args;
+ struct nfs42_layouterror_res res;
+ struct inode *inode;
+ struct pnfs_layout_segment *lseg;
+};
+
struct nfs42_clone_args {
struct nfs4_sequence_args seq_args;
struct nfs_fh *src_fh;
@@ -434,6 +478,7 @@ struct nfs_openargs {
enum createmode4 createmode;
const struct nfs4_label *label;
umode_t umask;
+ struct nfs4_layoutget_args *lg_args;
};
struct nfs_openres {
@@ -443,7 +488,6 @@ struct nfs_openres {
struct nfs4_change_info cinfo;
__u32 rflags;
struct nfs_fattr * f_attr;
- struct nfs4_label *f_label;
struct nfs_seqid * seqid;
const struct nfs_server *server;
fmode_t delegation_type;
@@ -456,6 +500,7 @@ struct nfs_openres {
__u32 access_request;
__u32 access_supported;
__u32 access_result;
+ struct nfs4_layoutget_res *lg_res;
};
/*
@@ -485,6 +530,7 @@ struct nfs_closeargs {
fmode_t fmode;
u32 share_access;
const u32 * bitmask;
+ u32 bitmask_store[NFS_BITMASK_SZ];
struct nfs4_layoutreturn_args *lr_args;
};
@@ -567,7 +613,8 @@ struct nfs4_delegreturnargs {
struct nfs4_sequence_args seq_args;
const struct nfs_fh *fhandle;
const nfs4_stateid *stateid;
- const u32 * bitmask;
+ const u32 *bitmask;
+ u32 bitmask_store[NFS_BITMASK_SZ];
struct nfs4_layoutreturn_args *lr_args;
};
@@ -604,19 +651,32 @@ struct nfs_pgio_args {
__u32 count;
unsigned int pgbase;
struct page ** pages;
- const u32 * bitmask; /* used by write */
- enum nfs3_stable_how stable; /* used by write */
+ union {
+ unsigned int replen; /* used by read */
+ struct {
+ const u32 * bitmask; /* used by write */
+ u32 bitmask_store[NFS_BITMASK_SZ]; /* used by write */
+ enum nfs3_stable_how stable; /* used by write */
+ };
+ };
};
struct nfs_pgio_res {
struct nfs4_sequence_res seq_res;
struct nfs_fattr * fattr;
- __u32 count;
+ __u64 count;
__u32 op_status;
- int eof; /* used by read */
- struct nfs_writeverf * verf; /* used by write */
- const struct nfs_server *server; /* used by write */
-
+ union {
+ struct {
+ unsigned int replen; /* used by read */
+ int eof; /* used by read */
+ void * scratch; /* used by read */
+ };
+ struct {
+ struct nfs_writeverf * verf; /* used by write */
+ const struct nfs_server *server; /* used by write */
+ };
+ };
};
/*
@@ -686,18 +746,30 @@ struct nfs_auth_info {
*/
struct nfs_entry {
__u64 ino;
- __u64 cookie,
- prev_cookie;
+ __u64 cookie;
const char * name;
unsigned int len;
int eof;
struct nfs_fh * fh;
struct nfs_fattr * fattr;
- struct nfs4_label *label;
unsigned char d_type;
struct nfs_server * server;
};
+struct nfs_readdir_arg {
+ struct dentry *dentry;
+ const struct cred *cred;
+ __be32 *verf;
+ u64 cookie;
+ struct page **pages;
+ unsigned int page_len;
+ bool plus;
+};
+
+struct nfs_readdir_res {
+ __be32 *verf;
+};
+
/*
* The following types are for NFSv2 only.
*/
@@ -729,9 +801,17 @@ struct nfs_setattrargs {
const struct nfs4_label *label;
};
+enum nfs4_acl_type {
+ NFS4ACL_NONE = 0,
+ NFS4ACL_ACL,
+ NFS4ACL_DACL,
+ NFS4ACL_SACL,
+};
+
struct nfs_setaclargs {
struct nfs4_sequence_args seq_args;
struct nfs_fh * fh;
+ enum nfs4_acl_type acl_type;
size_t acl_len;
struct page ** acl_pages;
};
@@ -743,6 +823,7 @@ struct nfs_setaclres {
struct nfs_getaclargs {
struct nfs4_sequence_args seq_args;
struct nfs_fh * fh;
+ enum nfs4_acl_type acl_type;
size_t acl_len;
struct page ** acl_pages;
};
@@ -751,6 +832,7 @@ struct nfs_getaclargs {
#define NFS4_ACL_TRUNC 0x0001 /* ACL was truncated */
struct nfs_getaclres {
struct nfs4_sequence_res seq_res;
+ enum nfs4_acl_type acl_type;
size_t acl_len;
size_t acl_data_offset;
int acl_flags;
@@ -760,7 +842,6 @@ struct nfs_getaclres {
struct nfs_setattrres {
struct nfs4_sequence_res seq_res;
struct nfs_fattr * fattr;
- struct nfs4_label *label;
const struct nfs_server * server;
};
@@ -819,7 +900,7 @@ struct nfs3_sattrargs {
struct nfs_fh * fh;
struct iattr * sattr;
unsigned int guard;
- struct timespec guardtime;
+ struct timespec64 guardtime;
};
struct nfs3_diropargs {
@@ -967,7 +1048,6 @@ struct nfs4_create_res {
const struct nfs_server * server;
struct nfs_fh * fh;
struct nfs_fattr * fattr;
- struct nfs4_label *label;
struct nfs4_change_info dir_cinfo;
};
@@ -992,7 +1072,6 @@ struct nfs4_getattr_res {
struct nfs4_sequence_res seq_res;
const struct nfs_server * server;
struct nfs_fattr * fattr;
- struct nfs4_label *label;
};
struct nfs4_link_arg {
@@ -1007,7 +1086,6 @@ struct nfs4_link_res {
struct nfs4_sequence_res seq_res;
const struct nfs_server * server;
struct nfs_fattr * fattr;
- struct nfs4_label *label;
struct nfs4_change_info cinfo;
struct nfs_fattr * dir_attr;
};
@@ -1024,7 +1102,6 @@ struct nfs4_lookup_res {
const struct nfs_server * server;
struct nfs_fattr * fattr;
struct nfs_fh * fh;
- struct nfs4_label *label;
};
struct nfs4_lookupp_arg {
@@ -1038,7 +1115,6 @@ struct nfs4_lookupp_res {
const struct nfs_server *server;
struct nfs_fattr *fattr;
struct nfs_fh *fh;
- struct nfs4_label *label;
};
struct nfs4_lookup_root_arg {
@@ -1128,6 +1204,8 @@ struct nfs4_server_caps_res {
u32 has_links;
u32 has_symlinks;
u32 fh_expire_type;
+ u32 case_insensitive;
+ u32 case_preserving;
};
#define NFS4_PATHNAME_MAXCOMPONENTS 512
@@ -1145,7 +1223,7 @@ struct nfs4_fs_location {
#define NFS4_FS_LOCATIONS_MAXENTRIES 10
struct nfs4_fs_locations {
- struct nfs_fattr fattr;
+ struct nfs_fattr *fattr;
const struct nfs_server *server;
struct nfs4_pathname fs_path;
int nlocations;
@@ -1176,7 +1254,7 @@ struct nfs4_secinfo4 {
struct nfs4_secinfo_flavors {
unsigned int num_flavors;
- struct nfs4_secinfo4 flavors[0];
+ struct nfs4_secinfo4 flavors[];
};
struct nfs4_secinfo_arg {
@@ -1215,16 +1293,25 @@ struct nfstime4 {
struct pnfs_commit_bucket {
struct list_head written;
struct list_head committing;
- struct pnfs_layout_segment *wlseg;
- struct pnfs_layout_segment *clseg;
+ struct pnfs_layout_segment *lseg;
struct nfs_writeverf direct_verf;
};
+struct pnfs_commit_array {
+ struct list_head cinfo_list;
+ struct list_head lseg_list;
+ struct pnfs_layout_segment *lseg;
+ struct rcu_head rcu;
+ refcount_t refcount;
+ unsigned int nbuckets;
+ struct pnfs_commit_bucket buckets[];
+};
+
struct pnfs_ds_commit_info {
- int nwritten;
- int ncommitting;
- int nbuckets;
- struct pnfs_commit_bucket *buckets;
+ struct list_head commits;
+ unsigned int nwritten;
+ unsigned int ncommitting;
+ const struct pnfs_commit_ops *ops;
};
struct nfs41_state_protection {
@@ -1235,7 +1322,7 @@ struct nfs41_state_protection {
struct nfs41_exchange_id_args {
struct nfs_client *client;
- nfs4_verifier *verifier;
+ nfs4_verifier verifier;
u32 flags;
struct nfs41_state_protection state_protect;
};
@@ -1257,11 +1344,13 @@ struct nfs41_impl_id {
struct nfstime4 date;
};
+#define MAX_BIND_CONN_TO_SESSION_RETRIES 3
struct nfs41_bind_conn_to_session_args {
struct nfs_client *client;
struct nfs4_sessionid sessionid;
u32 dir;
bool use_conn_in_rdma_mode;
+ int retries;
};
struct nfs41_bind_conn_to_session_res {
@@ -1335,22 +1424,11 @@ struct nfs41_free_stateid_res {
unsigned int status;
};
-static inline void
-nfs_free_pnfs_ds_cinfo(struct pnfs_ds_commit_info *cinfo)
-{
- kfree(cinfo->buckets);
-}
-
#else
struct pnfs_ds_commit_info {
};
-static inline void
-nfs_free_pnfs_ds_cinfo(struct pnfs_ds_commit_info *cinfo)
-{
-}
-
#endif /* CONFIG_NFS_V4_1 */
#ifdef CONFIG_NFS_V4_2
@@ -1384,9 +1462,12 @@ struct nfs42_copy_args {
u64 dst_pos;
u64 count;
+ bool sync;
+ struct nl4_server *cp_src;
};
struct nfs42_write_res {
+ nfs4_stateid stateid;
u64 count;
struct nfs_writeverf verifier;
};
@@ -1399,6 +1480,34 @@ struct nfs42_copy_res {
struct nfs_commitres commit_res;
};
+struct nfs42_offload_status_args {
+ struct nfs4_sequence_args osa_seq_args;
+ struct nfs_fh *osa_src_fh;
+ nfs4_stateid osa_stateid;
+};
+
+struct nfs42_offload_status_res {
+ struct nfs4_sequence_res osr_seq_res;
+ uint64_t osr_count;
+ int osr_status;
+};
+
+struct nfs42_copy_notify_args {
+ struct nfs4_sequence_args cna_seq_args;
+
+ struct nfs_fh *cna_src_fh;
+ nfs4_stateid cna_src_stateid;
+ struct nl4_server cna_dst;
+};
+
+struct nfs42_copy_notify_res {
+ struct nfs4_sequence_res cnr_seq_res;
+
+ struct nfstime4 cnr_lease_time;
+ nfs4_stateid cnr_stateid;
+ struct nl4_server cnr_src;
+};
+
struct nfs42_seek_args {
struct nfs4_sequence_args seq_args;
@@ -1415,7 +1524,64 @@ struct nfs42_seek_res {
u32 sr_eof;
u64 sr_offset;
};
-#endif
+
+struct nfs42_setxattrargs {
+ struct nfs4_sequence_args seq_args;
+ struct nfs_fh *fh;
+ const char *xattr_name;
+ u32 xattr_flags;
+ size_t xattr_len;
+ struct page **xattr_pages;
+};
+
+struct nfs42_setxattrres {
+ struct nfs4_sequence_res seq_res;
+ struct nfs4_change_info cinfo;
+};
+
+struct nfs42_getxattrargs {
+ struct nfs4_sequence_args seq_args;
+ struct nfs_fh *fh;
+ const char *xattr_name;
+ size_t xattr_len;
+ struct page **xattr_pages;
+};
+
+struct nfs42_getxattrres {
+ struct nfs4_sequence_res seq_res;
+ size_t xattr_len;
+};
+
+struct nfs42_listxattrsargs {
+ struct nfs4_sequence_args seq_args;
+ struct nfs_fh *fh;
+ u32 count;
+ u64 cookie;
+ struct page **xattr_pages;
+};
+
+struct nfs42_listxattrsres {
+ struct nfs4_sequence_res seq_res;
+ struct page *scratch;
+ void *xattr_buf;
+ size_t xattr_len;
+ u64 cookie;
+ bool eof;
+ size_t copied;
+};
+
+struct nfs42_removexattrargs {
+ struct nfs4_sequence_args seq_args;
+ struct nfs_fh *fh;
+ const char *xattr_name;
+};
+
+struct nfs42_removexattrres {
+ struct nfs4_sequence_res seq_res;
+ struct nfs4_change_info cinfo;
+};
+
+#endif /* CONFIG_NFS_V4_2 */
struct nfs_page;
@@ -1433,12 +1599,15 @@ enum {
NFS_IOHDR_EOF,
NFS_IOHDR_REDO,
NFS_IOHDR_STAT,
+ NFS_IOHDR_RESEND_PNFS,
+ NFS_IOHDR_RESEND_MDS,
+ NFS_IOHDR_UNSTABLE_WRITES,
};
struct nfs_io_completion;
struct nfs_pgio_header {
struct inode *inode;
- struct rpc_cred *cred;
+ const struct cred *cred;
struct list_head pages;
struct nfs_page *req;
struct nfs_writeverf verf; /* Used for writes */
@@ -1451,11 +1620,13 @@ struct nfs_pgio_header {
const struct nfs_rw_ops *rw_ops;
struct nfs_io_completion *io_completion;
struct nfs_direct_req *dreq;
- spinlock_t lock;
- /* fields protected by lock */
+#ifdef CONFIG_NFS_FSCACHE
+ void *netfs;
+#endif
+
int pnfs_error;
int error; /* merge with pnfs_error */
- unsigned long good_bytes; /* boundary of good data */
+ unsigned int good_bytes; /* boundary of good data */
unsigned long flags;
/*
@@ -1470,13 +1641,13 @@ struct nfs_pgio_header {
__u64 mds_offset; /* Filelayout dense stripe */
struct nfs_page_array page_array;
struct nfs_client *ds_clp; /* pNFS data server */
- int ds_commit_idx; /* ds index if ds_clp is set */
- int pgio_mirror_idx;/* mirror index in pgio layer */
+ u32 ds_commit_idx; /* ds index if ds_clp is set */
+ u32 pgio_mirror_idx;/* mirror index in pgio layer */
};
struct nfs_mds_commit_info {
atomic_t rpcs_out;
- unsigned long ncommit;
+ atomic_long_t ncommit;
struct list_head list;
};
@@ -1499,7 +1670,7 @@ struct nfs_commit_info {
struct nfs_commit_data {
struct rpc_task task;
struct inode *inode;
- struct rpc_cred *cred;
+ const struct cred *cred;
struct nfs_fattr fattr;
struct nfs_writeverf verf;
struct list_head pages; /* Coalesced requests we wish to flush */
@@ -1519,7 +1690,7 @@ struct nfs_commit_data {
};
struct nfs_pgio_completion_ops {
- void (*error_cleanup)(struct list_head *head);
+ void (*error_cleanup)(struct list_head *head, int);
void (*init_hdr)(struct nfs_pgio_header *hdr);
void (*completion)(struct nfs_pgio_header *hdr);
void (*reschedule_io)(struct nfs_pgio_header *hdr);
@@ -1530,7 +1701,7 @@ struct nfs_unlinkdata {
struct nfs_removeres res;
struct dentry *dentry;
wait_queue_head_t wq;
- struct rpc_cred *cred;
+ const struct cred *cred;
struct nfs_fattr dir_attr;
long timeout;
};
@@ -1538,7 +1709,8 @@ struct nfs_unlinkdata {
struct nfs_renamedata {
struct nfs_renameargs args;
struct nfs_renameres res;
- struct rpc_cred *cred;
+ struct rpc_task task;
+ const struct cred *cred;
struct inode *old_dir;
struct dentry *old_dentry;
struct nfs_fattr old_fattr;
@@ -1557,6 +1729,7 @@ struct nfs_subversion;
struct nfs_mount_info;
struct nfs_client_initdata;
struct nfs_pageio_descriptor;
+struct fs_context;
/*
* RPC procedure vector for NFSv2/NFSv3 demuxing
@@ -1571,29 +1744,28 @@ struct nfs_rpc_ops {
int (*getroot) (struct nfs_server *, struct nfs_fh *,
struct nfs_fsinfo *);
- struct vfsmount *(*submount) (struct nfs_server *, struct dentry *,
- struct nfs_fh *, struct nfs_fattr *);
- struct dentry *(*try_mount) (int, const char *, struct nfs_mount_info *,
- struct nfs_subversion *);
+ int (*submount) (struct fs_context *, struct nfs_server *);
+ int (*try_get_tree) (struct fs_context *);
int (*getattr) (struct nfs_server *, struct nfs_fh *,
- struct nfs_fattr *, struct nfs4_label *);
+ struct nfs_fattr *, struct inode *);
int (*setattr) (struct dentry *, struct nfs_fattr *,
struct iattr *);
- int (*lookup) (struct inode *, const struct qstr *,
- struct nfs_fh *, struct nfs_fattr *,
- struct nfs4_label *);
+ int (*lookup) (struct inode *, struct dentry *,
+ struct nfs_fh *, struct nfs_fattr *);
int (*lookupp) (struct inode *, struct nfs_fh *,
- struct nfs_fattr *, struct nfs4_label *);
- int (*access) (struct inode *, struct nfs_access_entry *);
+ struct nfs_fattr *);
+ int (*access) (struct inode *, struct nfs_access_entry *, const struct cred *);
int (*readlink)(struct inode *, struct page *, unsigned int,
unsigned int);
int (*create) (struct inode *, struct dentry *,
struct iattr *, int);
- int (*remove) (struct inode *, const struct qstr *);
- void (*unlink_setup) (struct rpc_message *, struct inode *dir);
+ int (*remove) (struct inode *, struct dentry *);
+ void (*unlink_setup) (struct rpc_message *, struct dentry *, struct inode *);
void (*unlink_rpc_prepare) (struct rpc_task *, struct nfs_unlinkdata *);
int (*unlink_done) (struct rpc_task *, struct inode *);
- void (*rename_setup) (struct rpc_message *msg, struct inode *dir);
+ void (*rename_setup) (struct rpc_message *msg,
+ struct dentry *old_dentry,
+ struct dentry *new_dentry);
void (*rename_rpc_prepare)(struct rpc_task *task, struct nfs_renamedata *);
int (*rename_done) (struct rpc_task *task, struct inode *old_dir, struct inode *new_dir);
int (*link) (struct inode *, struct inode *, const struct qstr *);
@@ -1601,8 +1773,7 @@ struct nfs_rpc_ops {
unsigned int, struct iattr *);
int (*mkdir) (struct inode *, struct dentry *, struct iattr *);
int (*rmdir) (struct inode *, const struct qstr *);
- int (*readdir) (struct dentry *, struct rpc_cred *,
- u64, struct page **, unsigned int, bool);
+ int (*readdir) (struct nfs_readdir_arg *, struct nfs_readdir_res *);
int (*mknod) (struct inode *, struct dentry *, struct iattr *,
dev_t);
int (*statfs) (struct nfs_server *, struct nfs_fh *,
@@ -1617,9 +1788,11 @@ struct nfs_rpc_ops {
struct nfs_pgio_header *);
void (*read_setup)(struct nfs_pgio_header *, struct rpc_message *);
int (*read_done)(struct rpc_task *, struct nfs_pgio_header *);
- void (*write_setup)(struct nfs_pgio_header *, struct rpc_message *);
+ void (*write_setup)(struct nfs_pgio_header *, struct rpc_message *,
+ struct rpc_clnt **);
int (*write_done)(struct rpc_task *, struct nfs_pgio_header *);
- void (*commit_setup) (struct nfs_commit_data *, struct rpc_message *);
+ void (*commit_setup) (struct nfs_commit_data *, struct rpc_message *,
+ struct rpc_clnt **);
void (*commit_rpc_prepare)(struct rpc_task *, struct nfs_commit_data *);
int (*commit_done) (struct rpc_task *, struct nfs_commit_data *);
int (*lock)(struct file *, int, struct file_lock *);
@@ -1632,14 +1805,16 @@ struct nfs_rpc_ops {
struct iattr *iattr,
int *);
int (*have_delegation)(struct inode *, fmode_t);
- int (*return_delegation)(struct inode *);
struct nfs_client *(*alloc_client) (const struct nfs_client_initdata *);
struct nfs_client *(*init_client) (struct nfs_client *,
const struct nfs_client_initdata *);
void (*free_client) (struct nfs_client *);
- struct nfs_server *(*create_server)(struct nfs_mount_info *, struct nfs_subversion *);
+ struct nfs_server *(*create_server)(struct fs_context *);
struct nfs_server *(*clone_server)(struct nfs_server *, struct nfs_fh *,
struct nfs_fattr *, rpc_authflavor_t);
+ int (*discover_trunking)(struct nfs_server *, struct nfs_fh *);
+ void (*enable_swap)(struct inode *inode);
+ void (*disable_swap)(struct inode *inode);
};
/*
diff --git a/include/linux/nfsacl.h b/include/linux/nfsacl.h
index 5e69e67b31ab..8e76a79cdc6a 100644
--- a/include/linux/nfsacl.h
+++ b/include/linux/nfsacl.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* File: linux/nfsacl.h
*
@@ -37,5 +38,11 @@ nfsacl_encode(struct xdr_buf *buf, unsigned int base, struct inode *inode,
extern int
nfsacl_decode(struct xdr_buf *buf, unsigned int base, unsigned int *aclcnt,
struct posix_acl **pacl);
+extern bool
+nfs_stream_decode_acl(struct xdr_stream *xdr, unsigned int *aclcnt,
+ struct posix_acl **pacl);
+extern bool
+nfs_stream_encode_acl(struct xdr_stream *xdr, struct inode *inode,
+ struct posix_acl *acl, int encode_entries, int typeflag);
#endif /* __LINUX_NFSACL_H */
diff --git a/include/linux/nitro_enclaves.h b/include/linux/nitro_enclaves.h
new file mode 100644
index 000000000000..d91ef2bfdf47
--- /dev/null
+++ b/include/linux/nitro_enclaves.h
@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ */
+
+#ifndef _LINUX_NITRO_ENCLAVES_H_
+#define _LINUX_NITRO_ENCLAVES_H_
+
+#include <uapi/linux/nitro_enclaves.h>
+
+#endif /* _LINUX_NITRO_ENCLAVES_H_ */
diff --git a/include/linux/nl802154.h b/include/linux/nl802154.h
index 0f6f6607f592..cbe5fd1dd2e7 100644
--- a/include/linux/nl802154.h
+++ b/include/linux/nl802154.h
@@ -1,22 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* nl802154.h
*
* Copyright (C) 2007, 2008, 2009 Siemens AG
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
*/
#ifndef NL802154_H
#define NL802154_H
+#include <net/netlink.h>
+
#define IEEE802154_NL_NAME "802.15.4 MAC"
#define IEEE802154_MCAST_COORD_NAME "coordinator"
#define IEEE802154_MCAST_BEACON_NAME "beacon"
diff --git a/include/linux/nls.h b/include/linux/nls.h
index 520681b68208..499e486b3722 100644
--- a/include/linux/nls.h
+++ b/include/linux/nls.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_NLS_H
#define _LINUX_NLS_H
diff --git a/include/linux/nmi.h b/include/linux/nmi.h
index 8aa01fd859fb..048c0b9aa623 100644
--- a/include/linux/nmi.h
+++ b/include/linux/nmi.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* linux/include/linux/nmi.h
*/
@@ -12,11 +13,31 @@
#ifdef CONFIG_LOCKUP_DETECTOR
void lockup_detector_init(void);
+void lockup_detector_soft_poweroff(void);
+void lockup_detector_cleanup(void);
+bool is_hardlockup(void);
+
+extern int watchdog_user_enabled;
+extern int nmi_watchdog_user_enabled;
+extern int soft_watchdog_user_enabled;
+extern int watchdog_thresh;
+extern unsigned long watchdog_enabled;
+
+extern struct cpumask watchdog_cpumask;
+extern unsigned long *watchdog_cpumask_bits;
+#ifdef CONFIG_SMP
+extern int sysctl_softlockup_all_cpu_backtrace;
+extern int sysctl_hardlockup_all_cpu_backtrace;
#else
-static inline void lockup_detector_init(void)
-{
-}
-#endif
+#define sysctl_softlockup_all_cpu_backtrace 0
+#define sysctl_hardlockup_all_cpu_backtrace 0
+#endif /* !CONFIG_SMP */
+
+#else /* CONFIG_LOCKUP_DETECTOR */
+static inline void lockup_detector_init(void) { }
+static inline void lockup_detector_soft_poweroff(void) { }
+static inline void lockup_detector_cleanup(void) { }
+#endif /* !CONFIG_LOCKUP_DETECTOR */
#ifdef CONFIG_SOFTLOCKUP_DETECTOR
extern void touch_softlockup_watchdog_sched(void);
@@ -24,29 +45,23 @@ extern void touch_softlockup_watchdog(void);
extern void touch_softlockup_watchdog_sync(void);
extern void touch_all_softlockup_watchdogs(void);
extern unsigned int softlockup_panic;
-extern int soft_watchdog_enabled;
-extern atomic_t watchdog_park_in_progress;
-#else
-static inline void touch_softlockup_watchdog_sched(void)
-{
-}
-static inline void touch_softlockup_watchdog(void)
-{
-}
-static inline void touch_softlockup_watchdog_sync(void)
-{
-}
-static inline void touch_all_softlockup_watchdogs(void)
-{
-}
-#endif
+
+extern int lockup_detector_online_cpu(unsigned int cpu);
+extern int lockup_detector_offline_cpu(unsigned int cpu);
+#else /* CONFIG_SOFTLOCKUP_DETECTOR */
+static inline void touch_softlockup_watchdog_sched(void) { }
+static inline void touch_softlockup_watchdog(void) { }
+static inline void touch_softlockup_watchdog_sync(void) { }
+static inline void touch_all_softlockup_watchdogs(void) { }
+
+#define lockup_detector_online_cpu NULL
+#define lockup_detector_offline_cpu NULL
+#endif /* CONFIG_SOFTLOCKUP_DETECTOR */
#ifdef CONFIG_DETECT_HUNG_TASK
void reset_hung_task_detector(void);
#else
-static inline void reset_hung_task_detector(void)
-{
-}
+static inline void reset_hung_task_detector(void) { }
#endif
/*
@@ -54,12 +69,12 @@ static inline void reset_hung_task_detector(void)
* 'watchdog_enabled' variable. Each lockup detector has its dedicated bit -
* bit 0 for the hard lockup detector and bit 1 for the soft lockup detector.
*
- * 'watchdog_user_enabled', 'nmi_watchdog_enabled' and 'soft_watchdog_enabled'
- * are variables that are only used as an 'interface' between the parameters
- * in /proc/sys/kernel and the internal state bits in 'watchdog_enabled'. The
- * 'watchdog_thresh' variable is handled differently because its value is not
- * boolean, and the lockup detectors are 'suspended' while 'watchdog_thresh'
- * is equal zero.
+ * 'watchdog_user_enabled', 'nmi_watchdog_user_enabled' and
+ * 'soft_watchdog_user_enabled' are variables that are only used as an
+ * 'interface' between the parameters in /proc/sys/kernel and the internal
+ * state bits in 'watchdog_enabled'. The 'watchdog_thresh' variable is
+ * handled differently because its value is not boolean, and the lockup
+ * detectors are 'suspended' while 'watchdog_thresh' is equal zero.
*/
#define NMI_WATCHDOG_ENABLED_BIT 0
#define SOFT_WATCHDOG_ENABLED_BIT 1
@@ -73,17 +88,45 @@ extern unsigned int hardlockup_panic;
static inline void hardlockup_detector_disable(void) {}
#endif
+#if defined(CONFIG_HAVE_NMI_WATCHDOG) || defined(CONFIG_HARDLOCKUP_DETECTOR)
+# define NMI_WATCHDOG_SYSCTL_PERM 0644
+#else
+# define NMI_WATCHDOG_SYSCTL_PERM 0444
+#endif
+
#if defined(CONFIG_HARDLOCKUP_DETECTOR_PERF)
extern void arch_touch_nmi_watchdog(void);
+extern void hardlockup_detector_perf_stop(void);
+extern void hardlockup_detector_perf_restart(void);
+extern void hardlockup_detector_perf_disable(void);
+extern void hardlockup_detector_perf_enable(void);
+extern void hardlockup_detector_perf_cleanup(void);
+extern int hardlockup_detector_perf_init(void);
#else
-#if !defined(CONFIG_HAVE_NMI_WATCHDOG)
+static inline void hardlockup_detector_perf_stop(void) { }
+static inline void hardlockup_detector_perf_restart(void) { }
+static inline void hardlockup_detector_perf_disable(void) { }
+static inline void hardlockup_detector_perf_enable(void) { }
+static inline void hardlockup_detector_perf_cleanup(void) { }
+# if !defined(CONFIG_HAVE_NMI_WATCHDOG)
+static inline int hardlockup_detector_perf_init(void) { return -ENODEV; }
static inline void arch_touch_nmi_watchdog(void) {}
-#endif
+# else
+static inline int hardlockup_detector_perf_init(void) { return 0; }
+# endif
#endif
+void watchdog_nmi_stop(void);
+void watchdog_nmi_start(void);
+int watchdog_nmi_probe(void);
+int watchdog_nmi_enable(unsigned int cpu);
+void watchdog_nmi_disable(unsigned int cpu);
+
+void lockup_detector_reconfigure(void);
+
/**
* touch_nmi_watchdog - restart NMI watchdog timeout.
- *
+ *
* If the architecture supports the NMI watchdog, touch_nmi_watchdog()
* may be used to reset the timeout - for code which intentionally
* disables interrupts for a long time. This call is stateless.
@@ -153,48 +196,30 @@ static inline bool trigger_single_cpu_backtrace(int cpu)
u64 hw_nmi_get_sample_period(int watchdog_thresh);
#endif
-#ifdef CONFIG_LOCKUP_DETECTOR
-extern int nmi_watchdog_enabled;
-extern int watchdog_user_enabled;
-extern int watchdog_thresh;
-extern unsigned long watchdog_enabled;
-extern struct cpumask watchdog_cpumask;
-extern unsigned long *watchdog_cpumask_bits;
-extern int __read_mostly watchdog_suspended;
-#ifdef CONFIG_SMP
-extern int sysctl_softlockup_all_cpu_backtrace;
-extern int sysctl_hardlockup_all_cpu_backtrace;
+#if defined(CONFIG_HARDLOCKUP_CHECK_TIMESTAMP) && \
+ defined(CONFIG_HARDLOCKUP_DETECTOR)
+void watchdog_update_hrtimer_threshold(u64 period);
#else
-#define sysctl_softlockup_all_cpu_backtrace 0
-#define sysctl_hardlockup_all_cpu_backtrace 0
+static inline void watchdog_update_hrtimer_threshold(u64 period) { }
#endif
-extern bool is_hardlockup(void);
-struct ctl_table;
-extern int proc_watchdog(struct ctl_table *, int ,
- void __user *, size_t *, loff_t *);
-extern int proc_nmi_watchdog(struct ctl_table *, int ,
- void __user *, size_t *, loff_t *);
-extern int proc_soft_watchdog(struct ctl_table *, int ,
- void __user *, size_t *, loff_t *);
-extern int proc_watchdog_thresh(struct ctl_table *, int ,
- void __user *, size_t *, loff_t *);
-extern int proc_watchdog_cpumask(struct ctl_table *, int,
- void __user *, size_t *, loff_t *);
-extern int lockup_detector_suspend(void);
-extern void lockup_detector_resume(void);
-#else
-static inline int lockup_detector_suspend(void)
-{
- return 0;
-}
-static inline void lockup_detector_resume(void)
-{
-}
-#endif
+struct ctl_table;
+int proc_watchdog(struct ctl_table *, int, void *, size_t *, loff_t *);
+int proc_nmi_watchdog(struct ctl_table *, int , void *, size_t *, loff_t *);
+int proc_soft_watchdog(struct ctl_table *, int , void *, size_t *, loff_t *);
+int proc_watchdog_thresh(struct ctl_table *, int , void *, size_t *, loff_t *);
+int proc_watchdog_cpumask(struct ctl_table *, int, void *, size_t *, loff_t *);
#ifdef CONFIG_HAVE_ACPI_APEI_NMI
#include <asm/nmi.h>
#endif
+#ifdef CONFIG_NMI_CHECK_CPU
+void nmi_backtrace_stall_snap(const struct cpumask *btp);
+void nmi_backtrace_stall_check(const struct cpumask *btp);
+#else
+static inline void nmi_backtrace_stall_snap(const struct cpumask *btp) {}
+static inline void nmi_backtrace_stall_check(const struct cpumask *btp) {}
+#endif
+
#endif
diff --git a/include/linux/node.h b/include/linux/node.h
index d1751beb462c..427a5975cf40 100644
--- a/include/linux/node.h
+++ b/include/linux/node.h
@@ -1,46 +1,112 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* include/linux/node.h - generic node definition
*
- * This is mainly for topological representation. We define the
- * basic 'struct node' here, which can be embedded in per-arch
+ * This is mainly for topological representation. We define the
+ * basic 'struct node' here, which can be embedded in per-arch
* definitions of processors.
*
* Basic handling of the devices is done in drivers/base/node.c
- * and system devices are handled in drivers/base/sys.c.
+ * and system devices are handled in drivers/base/sys.c.
*
* Nodes are exported via driverfs in the class/node/devices/
- * directory.
+ * directory.
*/
#ifndef _LINUX_NODE_H_
#define _LINUX_NODE_H_
#include <linux/device.h>
#include <linux/cpumask.h>
-#include <linux/workqueue.h>
+#include <linux/list.h>
+
+/**
+ * struct node_hmem_attrs - heterogeneous memory performance attributes
+ *
+ * @read_bandwidth: Read bandwidth in MB/s
+ * @write_bandwidth: Write bandwidth in MB/s
+ * @read_latency: Read latency in nanoseconds
+ * @write_latency: Write latency in nanoseconds
+ */
+struct node_hmem_attrs {
+ unsigned int read_bandwidth;
+ unsigned int write_bandwidth;
+ unsigned int read_latency;
+ unsigned int write_latency;
+};
+
+enum cache_indexing {
+ NODE_CACHE_DIRECT_MAP,
+ NODE_CACHE_INDEXED,
+ NODE_CACHE_OTHER,
+};
+
+enum cache_write_policy {
+ NODE_CACHE_WRITE_BACK,
+ NODE_CACHE_WRITE_THROUGH,
+ NODE_CACHE_WRITE_OTHER,
+};
+
+/**
+ * struct node_cache_attrs - system memory caching attributes
+ *
+ * @indexing: The ways memory blocks may be placed in cache
+ * @write_policy: Write back or write through policy
+ * @size: Total size of cache in bytes
+ * @line_size: Number of bytes fetched on a cache miss
+ * @level: The cache hierarchy level
+ */
+struct node_cache_attrs {
+ enum cache_indexing indexing;
+ enum cache_write_policy write_policy;
+ u64 size;
+ u16 line_size;
+ u8 level;
+};
+
+#ifdef CONFIG_HMEM_REPORTING
+void node_add_cache(unsigned int nid, struct node_cache_attrs *cache_attrs);
+void node_set_perf_attrs(unsigned int nid, struct node_hmem_attrs *hmem_attrs,
+ unsigned access);
+#else
+static inline void node_add_cache(unsigned int nid,
+ struct node_cache_attrs *cache_attrs)
+{
+}
+
+static inline void node_set_perf_attrs(unsigned int nid,
+ struct node_hmem_attrs *hmem_attrs,
+ unsigned access)
+{
+}
+#endif
struct node {
struct device dev;
-
-#if defined(CONFIG_MEMORY_HOTPLUG_SPARSE) && defined(CONFIG_HUGETLBFS)
- struct work_struct node_work;
+ struct list_head access_list;
+#ifdef CONFIG_HMEM_REPORTING
+ struct list_head cache_attrs;
+ struct device *cache_dev;
#endif
};
struct memory_block;
extern struct node *node_devices[];
-typedef void (*node_registration_func_t)(struct node *);
-#if defined(CONFIG_MEMORY_HOTPLUG_SPARSE) && defined(CONFIG_NUMA)
-extern int link_mem_sections(int nid, unsigned long start_pfn, unsigned long nr_pages);
+#if defined(CONFIG_MEMORY_HOTPLUG) && defined(CONFIG_NUMA)
+void register_memory_blocks_under_node(int nid, unsigned long start_pfn,
+ unsigned long end_pfn,
+ enum meminit_context context);
#else
-static inline int link_mem_sections(int nid, unsigned long start_pfn, unsigned long nr_pages)
+static inline void register_memory_blocks_under_node(int nid, unsigned long start_pfn,
+ unsigned long end_pfn,
+ enum meminit_context context)
{
- return 0;
}
#endif
extern void unregister_node(struct node *node);
#ifdef CONFIG_NUMA
+extern void node_dev_init(void);
/* Core of the node registration - only memory hotplug should use this */
extern int __register_one_node(int nid);
@@ -51,12 +117,14 @@ static inline int register_one_node(int nid)
if (node_online(nid)) {
struct pglist_data *pgdat = NODE_DATA(nid);
+ unsigned long start_pfn = pgdat->node_start_pfn;
+ unsigned long end_pfn = start_pfn + pgdat->node_spanned_pages;
error = __register_one_node(nid);
if (error)
return error;
- /* link memory sections under this node */
- error = link_mem_sections(nid, pgdat->node_start_pfn, pgdat->node_spanned_pages);
+ register_memory_blocks_under_node(nid, start_pfn, end_pfn,
+ MEMINIT_EARLY);
}
return error;
@@ -65,16 +133,15 @@ static inline int register_one_node(int nid)
extern void unregister_one_node(int nid);
extern int register_cpu_under_node(unsigned int cpu, unsigned int nid);
extern int unregister_cpu_under_node(unsigned int cpu, unsigned int nid);
-extern int register_mem_sect_under_node(struct memory_block *mem_blk,
- int nid);
-extern int unregister_mem_sect_under_nodes(struct memory_block *mem_blk,
- unsigned long phys_index);
-
-#ifdef CONFIG_HUGETLBFS
-extern void register_hugetlbfs_with_node(node_registration_func_t doregister,
- node_registration_func_t unregister);
-#endif
+extern void unregister_memory_block_under_nodes(struct memory_block *mem_blk);
+
+extern int register_memory_node_under_compute_node(unsigned int mem_nid,
+ unsigned int cpu_nid,
+ unsigned access);
#else
+static inline void node_dev_init(void)
+{
+}
static inline int __register_one_node(int nid)
{
return 0;
@@ -95,19 +162,7 @@ static inline int unregister_cpu_under_node(unsigned int cpu, unsigned int nid)
{
return 0;
}
-static inline int register_mem_sect_under_node(struct memory_block *mem_blk,
- int nid)
-{
- return 0;
-}
-static inline int unregister_mem_sect_under_nodes(struct memory_block *mem_blk,
- unsigned long phys_index)
-{
- return 0;
-}
-
-static inline void register_hugetlbfs_with_node(node_registration_func_t reg,
- node_registration_func_t unreg)
+static inline void unregister_memory_block_under_nodes(struct memory_block *mem_blk)
{
}
#endif
diff --git a/include/linux/nodemask.h b/include/linux/nodemask.h
index cf0b91c3ec12..bb0ee80526b2 100644
--- a/include/linux/nodemask.h
+++ b/include/linux/nodemask.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __LINUX_NODEMASK_H
#define __LINUX_NODEMASK_H
@@ -41,11 +42,11 @@
* void nodes_shift_right(dst, src, n) Shift right
* void nodes_shift_left(dst, src, n) Shift left
*
- * int first_node(mask) Number lowest set bit, or MAX_NUMNODES
- * int next_node(node, mask) Next node past 'node', or MAX_NUMNODES
- * int next_node_in(node, mask) Next node past 'node', or wrap to first,
+ * unsigned int first_node(mask) Number lowest set bit, or MAX_NUMNODES
+ * unsigend int next_node(node, mask) Next node past 'node', or MAX_NUMNODES
+ * unsigned int next_node_in(node, mask) Next node past 'node', or wrap to first,
* or MAX_NUMNODES
- * int first_unset_node(mask) First node not set in mask, or
+ * unsigned int first_unset_node(mask) First node not set in mask, or
* MAX_NUMNODES
*
* nodemask_t nodemask_of_node(node) Return nodemask with bit 'node' set
@@ -89,10 +90,11 @@
* for such situations. See below and CPUMASK_ALLOC also.
*/
-#include <linux/kernel.h>
#include <linux/threads.h>
#include <linux/bitmap.h>
+#include <linux/minmax.h>
#include <linux/numa.h>
+#include <linux/random.h>
typedef struct { DECLARE_BITMAP(bits, MAX_NUMNODES); } nodemask_t;
extern nodemask_t _unused_nodemask_arg_;
@@ -103,13 +105,22 @@ extern nodemask_t _unused_nodemask_arg_;
*
* Can be used to provide arguments for '%*pb[l]' when printing a nodemask.
*/
-#define nodemask_pr_args(maskp) MAX_NUMNODES, (maskp)->bits
+#define nodemask_pr_args(maskp) __nodemask_pr_numnodes(maskp), \
+ __nodemask_pr_bits(maskp)
+static inline unsigned int __nodemask_pr_numnodes(const nodemask_t *m)
+{
+ return m ? MAX_NUMNODES : 0;
+}
+static inline const unsigned long *__nodemask_pr_bits(const nodemask_t *m)
+{
+ return m ? m->bits : NULL;
+}
/*
* The inline keyword gives the compiler room to decide to inline, or
* not inline a function as it sees best. However, as these functions
* are called in both __init and non-__init functions, if they are not
- * inlined we will end up with a section mis-match error (of the type of
+ * inlined we will end up with a section mismatch error (of the type of
* freeable items not being freed). So we must use __always_inline here
* to fix the problem. If other functions in the future also end up in
* this situation they will also need to be annotated as __always_inline
@@ -143,7 +154,7 @@ static inline void __nodes_clear(nodemask_t *dstp, unsigned int nbits)
#define node_test_and_set(node, nodemask) \
__node_test_and_set((node), &(nodemask))
-static inline int __node_test_and_set(int node, nodemask_t *addr)
+static inline bool __node_test_and_set(int node, nodemask_t *addr)
{
return test_and_set_bit(node, addr->bits);
}
@@ -190,7 +201,7 @@ static inline void __nodes_complement(nodemask_t *dstp,
#define nodes_equal(src1, src2) \
__nodes_equal(&(src1), &(src2), MAX_NUMNODES)
-static inline int __nodes_equal(const nodemask_t *src1p,
+static inline bool __nodes_equal(const nodemask_t *src1p,
const nodemask_t *src2p, unsigned int nbits)
{
return bitmap_equal(src1p->bits, src2p->bits, nbits);
@@ -198,7 +209,7 @@ static inline int __nodes_equal(const nodemask_t *src1p,
#define nodes_intersects(src1, src2) \
__nodes_intersects(&(src1), &(src2), MAX_NUMNODES)
-static inline int __nodes_intersects(const nodemask_t *src1p,
+static inline bool __nodes_intersects(const nodemask_t *src1p,
const nodemask_t *src2p, unsigned int nbits)
{
return bitmap_intersects(src1p->bits, src2p->bits, nbits);
@@ -206,20 +217,20 @@ static inline int __nodes_intersects(const nodemask_t *src1p,
#define nodes_subset(src1, src2) \
__nodes_subset(&(src1), &(src2), MAX_NUMNODES)
-static inline int __nodes_subset(const nodemask_t *src1p,
+static inline bool __nodes_subset(const nodemask_t *src1p,
const nodemask_t *src2p, unsigned int nbits)
{
return bitmap_subset(src1p->bits, src2p->bits, nbits);
}
#define nodes_empty(src) __nodes_empty(&(src), MAX_NUMNODES)
-static inline int __nodes_empty(const nodemask_t *srcp, unsigned int nbits)
+static inline bool __nodes_empty(const nodemask_t *srcp, unsigned int nbits)
{
return bitmap_empty(srcp->bits, nbits);
}
#define nodes_full(nodemask) __nodes_full(&(nodemask), MAX_NUMNODES)
-static inline int __nodes_full(const nodemask_t *srcp, unsigned int nbits)
+static inline bool __nodes_full(const nodemask_t *srcp, unsigned int nbits)
{
return bitmap_full(srcp->bits, nbits);
}
@@ -250,15 +261,15 @@ static inline void __nodes_shift_left(nodemask_t *dstp,
> MAX_NUMNODES, then the silly min_ts could be dropped. */
#define first_node(src) __first_node(&(src))
-static inline int __first_node(const nodemask_t *srcp)
+static inline unsigned int __first_node(const nodemask_t *srcp)
{
- return min_t(int, MAX_NUMNODES, find_first_bit(srcp->bits, MAX_NUMNODES));
+ return min_t(unsigned int, MAX_NUMNODES, find_first_bit(srcp->bits, MAX_NUMNODES));
}
#define next_node(n, src) __next_node((n), &(src))
-static inline int __next_node(int n, const nodemask_t *srcp)
+static inline unsigned int __next_node(int n, const nodemask_t *srcp)
{
- return min_t(int,MAX_NUMNODES,find_next_bit(srcp->bits, MAX_NUMNODES, n+1));
+ return min_t(unsigned int, MAX_NUMNODES, find_next_bit(srcp->bits, MAX_NUMNODES, n+1));
}
/*
@@ -266,7 +277,14 @@ static inline int __next_node(int n, const nodemask_t *srcp)
* the first node in src if needed. Returns MAX_NUMNODES if src is empty.
*/
#define next_node_in(n, src) __next_node_in((n), &(src))
-int __next_node_in(int node, const nodemask_t *srcp);
+static inline unsigned int __next_node_in(int node, const nodemask_t *srcp)
+{
+ unsigned int ret = __next_node(node, srcp);
+
+ if (ret == MAX_NUMNODES)
+ ret = __first_node(srcp);
+ return ret;
+}
static inline void init_nodemask_of_node(nodemask_t *mask, int node)
{
@@ -286,9 +304,9 @@ static inline void init_nodemask_of_node(nodemask_t *mask, int node)
})
#define first_unset_node(mask) __first_unset_node(&(mask))
-static inline int __first_unset_node(const nodemask_t *maskp)
+static inline unsigned int __first_unset_node(const nodemask_t *maskp)
{
- return min_t(int,MAX_NUMNODES,
+ return min_t(unsigned int, MAX_NUMNODES,
find_first_zero_bit(maskp->bits, MAX_NUMNODES));
}
@@ -365,14 +383,13 @@ static inline void __nodes_fold(nodemask_t *dstp, const nodemask_t *origp,
}
#if MAX_NUMNODES > 1
-#define for_each_node_mask(node, mask) \
- for ((node) = first_node(mask); \
- (node) < MAX_NUMNODES; \
- (node) = next_node((node), (mask)))
+#define for_each_node_mask(node, mask) \
+ for ((node) = first_node(mask); \
+ (node >= 0) && (node) < MAX_NUMNODES; \
+ (node) = next_node((node), (mask)))
#else /* MAX_NUMNODES == 1 */
-#define for_each_node_mask(node, mask) \
- if (!nodes_empty(mask)) \
- for ((node) = 0; (node) < 1; (node)++)
+#define for_each_node_mask(node, mask) \
+ for ((node) = 0; (node) < 1 && !nodes_empty(mask); (node)++)
#endif /* MAX_NUMNODES */
/*
@@ -389,6 +406,7 @@ enum node_states {
#endif
N_MEMORY, /* The node has memory(regular, high, movable) */
N_CPU, /* The node has one or more cpus */
+ N_GENERIC_INITIATOR, /* The node has one or more Generic Initiators */
NR_NODE_STATES
};
@@ -425,17 +443,17 @@ static inline int num_node_state(enum node_states state)
#define first_online_node first_node(node_states[N_ONLINE])
#define first_memory_node first_node(node_states[N_MEMORY])
-static inline int next_online_node(int nid)
+static inline unsigned int next_online_node(int nid)
{
return next_node(nid, node_states[N_ONLINE]);
}
-static inline int next_memory_node(int nid)
+static inline unsigned int next_memory_node(int nid)
{
return next_node(nid, node_states[N_MEMORY]);
}
-extern int nr_node_ids;
-extern int nr_online_nodes;
+extern unsigned int nr_node_ids;
+extern unsigned int nr_online_nodes;
static inline void node_set_online(int nid)
{
@@ -475,22 +493,37 @@ static inline int num_node_state(enum node_states state)
#define first_online_node 0
#define first_memory_node 0
#define next_online_node(nid) (MAX_NUMNODES)
-#define nr_node_ids 1
-#define nr_online_nodes 1
+#define next_memory_node(nid) (MAX_NUMNODES)
+#define nr_node_ids 1U
+#define nr_online_nodes 1U
#define node_set_online(node) node_set_state((node), N_ONLINE)
#define node_set_offline(node) node_clear_state((node), N_ONLINE)
#endif
+static inline int node_random(const nodemask_t *maskp)
+{
#if defined(CONFIG_NUMA) && (MAX_NUMNODES > 1)
-extern int node_random(const nodemask_t *maskp);
+ int w, bit;
+
+ w = nodes_weight(*maskp);
+ switch (w) {
+ case 0:
+ bit = NUMA_NO_NODE;
+ break;
+ case 1:
+ bit = first_node(*maskp);
+ break;
+ default:
+ bit = find_nth_bit(maskp->bits, MAX_NUMNODES, get_random_u32_below(w));
+ break;
+ }
+ return bit;
#else
-static inline int node_random(const nodemask_t *mask)
-{
return 0;
-}
#endif
+}
#define node_online_map node_states[N_ONLINE]
#define node_possible_map node_states[N_POSSIBLE]
@@ -504,11 +537,11 @@ static inline int node_random(const nodemask_t *mask)
#define for_each_online_node(node) for_each_node_state(node, N_ONLINE)
/*
- * For nodemask scrach area.
+ * For nodemask scratch area.
* NODEMASK_ALLOC(type, name) allocates an object with a specified type and
* name.
*/
-#if NODES_SHIFT > 8 /* nodemask_t > 256 bytes */
+#if NODES_SHIFT > 8 /* nodemask_t > 32 bytes */
#define NODEMASK_ALLOC(type, name, gfp_flags) \
type *name = kmalloc(sizeof(*name), gfp_flags)
#define NODEMASK_FREE(m) kfree(m)
@@ -517,7 +550,7 @@ static inline int node_random(const nodemask_t *mask)
#define NODEMASK_FREE(m) do {} while (0)
#endif
-/* A example struture for using NODEMASK_ALLOC, used in mempolicy. */
+/* Example structure for using NODEMASK_ALLOC, used in mempolicy. */
struct nodemask_scratch {
nodemask_t mask1;
nodemask_t mask2;
diff --git a/include/linux/nospec.h b/include/linux/nospec.h
new file mode 100644
index 000000000000..9f0af4f116d9
--- /dev/null
+++ b/include/linux/nospec.h
@@ -0,0 +1,74 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright(c) 2018 Linus Torvalds. All rights reserved.
+// Copyright(c) 2018 Alexei Starovoitov. All rights reserved.
+// Copyright(c) 2018 Intel Corporation. All rights reserved.
+
+#ifndef _LINUX_NOSPEC_H
+#define _LINUX_NOSPEC_H
+
+#include <linux/compiler.h>
+#include <asm/barrier.h>
+
+struct task_struct;
+
+#ifndef barrier_nospec
+# define barrier_nospec() do { } while (0)
+#endif
+
+/**
+ * array_index_mask_nospec() - generate a ~0 mask when index < size, 0 otherwise
+ * @index: array element index
+ * @size: number of elements in array
+ *
+ * When @index is out of bounds (@index >= @size), the sign bit will be
+ * set. Extend the sign bit to all bits and invert, giving a result of
+ * zero for an out of bounds index, or ~0 if within bounds [0, @size).
+ */
+#ifndef array_index_mask_nospec
+static inline unsigned long array_index_mask_nospec(unsigned long index,
+ unsigned long size)
+{
+ /*
+ * Always calculate and emit the mask even if the compiler
+ * thinks the mask is not needed. The compiler does not take
+ * into account the value of @index under speculation.
+ */
+ OPTIMIZER_HIDE_VAR(index);
+ return ~(long)(index | (size - 1UL - index)) >> (BITS_PER_LONG - 1);
+}
+#endif
+
+/*
+ * array_index_nospec - sanitize an array index after a bounds check
+ *
+ * For a code sequence like:
+ *
+ * if (index < size) {
+ * index = array_index_nospec(index, size);
+ * val = array[index];
+ * }
+ *
+ * ...if the CPU speculates past the bounds check then
+ * array_index_nospec() will clamp the index within the range of [0,
+ * size).
+ */
+#define array_index_nospec(index, size) \
+({ \
+ typeof(index) _i = (index); \
+ typeof(size) _s = (size); \
+ unsigned long _mask = array_index_mask_nospec(_i, _s); \
+ \
+ BUILD_BUG_ON(sizeof(_i) > sizeof(long)); \
+ BUILD_BUG_ON(sizeof(_s) > sizeof(long)); \
+ \
+ (typeof(_i)) (_i & _mask); \
+})
+
+/* Speculation control prctl */
+int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which);
+int arch_prctl_spec_ctrl_set(struct task_struct *task, unsigned long which,
+ unsigned long ctrl);
+/* Speculation control for seccomp enforced mitigation */
+void arch_seccomp_spec_mitigate(struct task_struct *task);
+
+#endif /* _LINUX_NOSPEC_H */
diff --git a/include/linux/notifier.h b/include/linux/notifier.h
index 4149868de4e6..2aba75145144 100644
--- a/include/linux/notifier.h
+++ b/include/linux/notifier.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Routines to manage notifier chains for passing status changes to any
* interested routines. We need this instead of hard coded call lists so
@@ -42,9 +43,7 @@
* in srcu_notifier_call_chain(): no cache bounces and no memory barriers.
* As compensation, srcu_notifier_chain_unregister() is rather expensive.
* SRCU notifier chains should be used when the chain will be called very
- * often but notifier_blocks will seldom be removed. Also, SRCU notifier
- * chains are slightly more difficult to use because they require special
- * runtime initialization.
+ * often but notifier_blocks will seldom be removed.
*/
struct notifier_block;
@@ -74,6 +73,9 @@ struct raw_notifier_head {
struct srcu_notifier_head {
struct mutex mutex;
+#ifdef CONFIG_TREE_SRCU
+ struct srcu_usage srcuu;
+#endif
struct srcu_struct srcu;
struct notifier_block __rcu *head;
};
@@ -90,7 +92,7 @@ struct srcu_notifier_head {
(name)->head = NULL; \
} while (0)
-/* srcu_notifier_heads must be initialized and cleaned up dynamically */
+/* srcu_notifier_heads must be cleaned up dynamically */
extern void srcu_init_notifier_head(struct srcu_notifier_head *nh);
#define srcu_cleanup_notifier_head(name) \
cleanup_srcu_struct(&(name)->srcu);
@@ -103,7 +105,13 @@ extern void srcu_init_notifier_head(struct srcu_notifier_head *nh);
.head = NULL }
#define RAW_NOTIFIER_INIT(name) { \
.head = NULL }
-/* srcu_notifier_heads cannot be initialized statically */
+
+#define SRCU_NOTIFIER_INIT(name, pcpu) \
+ { \
+ .mutex = __MUTEX_INITIALIZER(name.mutex), \
+ .head = NULL, \
+ .srcu = __SRCU_STRUCT_INIT(name.srcu, name.srcuu, pcpu), \
+ }
#define ATOMIC_NOTIFIER_HEAD(name) \
struct atomic_notifier_head name = \
@@ -115,6 +123,25 @@ extern void srcu_init_notifier_head(struct srcu_notifier_head *nh);
struct raw_notifier_head name = \
RAW_NOTIFIER_INIT(name)
+#ifdef CONFIG_TREE_SRCU
+#define _SRCU_NOTIFIER_HEAD(name, mod) \
+ static DEFINE_PER_CPU(struct srcu_data, name##_head_srcu_data); \
+ mod struct srcu_notifier_head name = \
+ SRCU_NOTIFIER_INIT(name, name##_head_srcu_data)
+
+#else
+#define _SRCU_NOTIFIER_HEAD(name, mod) \
+ mod struct srcu_notifier_head name = \
+ SRCU_NOTIFIER_INIT(name, name)
+
+#endif
+
+#define SRCU_NOTIFIER_HEAD(name) \
+ _SRCU_NOTIFIER_HEAD(name, /* not static */)
+
+#define SRCU_NOTIFIER_HEAD_STATIC(name) \
+ _SRCU_NOTIFIER_HEAD(name, static)
+
#ifdef __KERNEL__
extern int atomic_notifier_chain_register(struct atomic_notifier_head *nh,
@@ -126,9 +153,10 @@ extern int raw_notifier_chain_register(struct raw_notifier_head *nh,
extern int srcu_notifier_chain_register(struct srcu_notifier_head *nh,
struct notifier_block *nb);
-extern int blocking_notifier_chain_cond_register(
- struct blocking_notifier_head *nh,
- struct notifier_block *nb);
+extern int atomic_notifier_chain_register_unique_prio(
+ struct atomic_notifier_head *nh, struct notifier_block *nb);
+extern int blocking_notifier_chain_register_unique_prio(
+ struct blocking_notifier_head *nh, struct notifier_block *nb);
extern int atomic_notifier_chain_unregister(struct atomic_notifier_head *nh,
struct notifier_block *nb);
@@ -141,20 +169,19 @@ extern int srcu_notifier_chain_unregister(struct srcu_notifier_head *nh,
extern int atomic_notifier_call_chain(struct atomic_notifier_head *nh,
unsigned long val, void *v);
-extern int __atomic_notifier_call_chain(struct atomic_notifier_head *nh,
- unsigned long val, void *v, int nr_to_call, int *nr_calls);
extern int blocking_notifier_call_chain(struct blocking_notifier_head *nh,
unsigned long val, void *v);
-extern int __blocking_notifier_call_chain(struct blocking_notifier_head *nh,
- unsigned long val, void *v, int nr_to_call, int *nr_calls);
extern int raw_notifier_call_chain(struct raw_notifier_head *nh,
unsigned long val, void *v);
-extern int __raw_notifier_call_chain(struct raw_notifier_head *nh,
- unsigned long val, void *v, int nr_to_call, int *nr_calls);
extern int srcu_notifier_call_chain(struct srcu_notifier_head *nh,
unsigned long val, void *v);
-extern int __srcu_notifier_call_chain(struct srcu_notifier_head *nh,
- unsigned long val, void *v, int nr_to_call, int *nr_calls);
+
+extern int blocking_notifier_call_chain_robust(struct blocking_notifier_head *nh,
+ unsigned long val_up, unsigned long val_down, void *v);
+extern int raw_notifier_call_chain_robust(struct raw_notifier_head *nh,
+ unsigned long val_up, unsigned long val_down, void *v);
+
+extern bool atomic_notifier_call_chain_is_empty(struct atomic_notifier_head *nh);
#define NOTIFY_DONE 0x0000 /* Don't care */
#define NOTIFY_OK 0x0001 /* Suits me */
diff --git a/include/linux/ns_common.h b/include/linux/ns_common.h
index 85a5c8c16be9..0f1d024bd958 100644
--- a/include/linux/ns_common.h
+++ b/include/linux/ns_common.h
@@ -1,12 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_NS_COMMON_H
#define _LINUX_NS_COMMON_H
+#include <linux/refcount.h>
+
struct proc_ns_operations;
struct ns_common {
atomic_long_t stashed;
const struct proc_ns_operations *ops;
unsigned int inum;
+ refcount_t count;
};
#endif
diff --git a/include/linux/nsc_gpio.h b/include/linux/nsc_gpio.h
index 7da0cf3702ee..d7a04a6e3783 100644
--- a/include/linux/nsc_gpio.h
+++ b/include/linux/nsc_gpio.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/**
nsc_gpio.c
diff --git a/include/linux/nsproxy.h b/include/linux/nsproxy.h
index ac0d65bef5d0..fee881cded01 100644
--- a/include/linux/nsproxy.h
+++ b/include/linux/nsproxy.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_NSPROXY_H
#define _LINUX_NSPROXY_H
@@ -34,11 +35,37 @@ struct nsproxy {
struct mnt_namespace *mnt_ns;
struct pid_namespace *pid_ns_for_children;
struct net *net_ns;
+ struct time_namespace *time_ns;
+ struct time_namespace *time_ns_for_children;
struct cgroup_namespace *cgroup_ns;
};
extern struct nsproxy init_nsproxy;
/*
+ * A structure to encompass all bits needed to install
+ * a partial or complete new set of namespaces.
+ *
+ * If a new user namespace is requested cred will
+ * point to a modifiable set of credentials. If a pointer
+ * to a modifiable set is needed nsset_cred() must be
+ * used and tested.
+ */
+struct nsset {
+ unsigned flags;
+ struct nsproxy *nsproxy;
+ struct fs_struct *fs;
+ const struct cred *cred;
+};
+
+static inline struct cred *nsset_cred(struct nsset *set)
+{
+ if (set->flags & CLONE_NEWUSER)
+ return (struct cred *)set->cred;
+
+ return NULL;
+}
+
+/*
* the namespaces access rules are:
*
* 1. only current task is allowed to change tsk->nsproxy pointer or
@@ -67,6 +94,7 @@ extern struct nsproxy init_nsproxy;
int copy_namespaces(unsigned long flags, struct task_struct *tsk);
void exit_task_namespaces(struct task_struct *tsk);
void switch_task_namespaces(struct task_struct *tsk, struct nsproxy *new);
+int exec_task_namespaces(void);
void free_nsproxy(struct nsproxy *ns);
int unshare_nsproxy_namespaces(unsigned long, struct nsproxy **,
struct cred *, struct fs_struct *);
diff --git a/include/linux/ntb.h b/include/linux/ntb.h
index 609e232c00da..191b524e5c0d 100644
--- a/include/linux/ntb.h
+++ b/include/linux/ntb.h
@@ -58,9 +58,11 @@
#include <linux/completion.h>
#include <linux/device.h>
+#include <linux/interrupt.h>
struct ntb_client;
struct ntb_dev;
+struct ntb_msi;
struct pci_dev;
/**
@@ -70,6 +72,8 @@ struct pci_dev;
* @NTB_TOPO_SEC: On secondary side of remote ntb.
* @NTB_TOPO_B2B_USD: On primary side of local ntb upstream of remote ntb.
* @NTB_TOPO_B2B_DSD: On primary side of local ntb downstream of remote ntb.
+ * @NTB_TOPO_SWITCH: Connected via a switch which supports ntb.
+ * @NTB_TOPO_CROSSLINK: Connected via two symmetric switchecs
*/
enum ntb_topo {
NTB_TOPO_NONE = -1,
@@ -77,6 +81,8 @@ enum ntb_topo {
NTB_TOPO_SEC,
NTB_TOPO_B2B_USD,
NTB_TOPO_B2B_DSD,
+ NTB_TOPO_SWITCH,
+ NTB_TOPO_CROSSLINK,
};
static inline int ntb_topo_is_b2b(enum ntb_topo topo)
@@ -92,11 +98,13 @@ static inline int ntb_topo_is_b2b(enum ntb_topo topo)
static inline char *ntb_topo_string(enum ntb_topo topo)
{
switch (topo) {
- case NTB_TOPO_NONE: return "NTB_TOPO_NONE";
- case NTB_TOPO_PRI: return "NTB_TOPO_PRI";
- case NTB_TOPO_SEC: return "NTB_TOPO_SEC";
- case NTB_TOPO_B2B_USD: return "NTB_TOPO_B2B_USD";
- case NTB_TOPO_B2B_DSD: return "NTB_TOPO_B2B_DSD";
+ case NTB_TOPO_NONE: return "NTB_TOPO_NONE";
+ case NTB_TOPO_PRI: return "NTB_TOPO_PRI";
+ case NTB_TOPO_SEC: return "NTB_TOPO_SEC";
+ case NTB_TOPO_B2B_USD: return "NTB_TOPO_B2B_USD";
+ case NTB_TOPO_B2B_DSD: return "NTB_TOPO_B2B_DSD";
+ case NTB_TOPO_SWITCH: return "NTB_TOPO_SWITCH";
+ case NTB_TOPO_CROSSLINK: return "NTB_TOPO_CROSSLINK";
}
return "NTB_TOPO_INVALID";
}
@@ -199,7 +207,7 @@ static inline int ntb_ctx_ops_is_valid(const struct ntb_ctx_ops *ops)
}
/**
- * struct ntb_ctx_ops - ntb device operations
+ * struct ntb_dev_ops - ntb device operations
* @port_number: See ntb_port_number().
* @peer_port_count: See ntb_peer_port_count().
* @peer_port_number: See ntb_peer_port_number().
@@ -247,7 +255,7 @@ static inline int ntb_ctx_ops_is_valid(const struct ntb_ctx_ops *ops)
* @msg_set_mask: See ntb_msg_set_mask().
* @msg_clear_mask: See ntb_msg_clear_mask().
* @msg_read: See ntb_msg_read().
- * @msg_write: See ntb_msg_write().
+ * @peer_msg_write: See ntb_peer_msg_write().
*/
struct ntb_dev_ops {
int (*port_number)(struct ntb_dev *ntb);
@@ -290,7 +298,8 @@ struct ntb_dev_ops {
int (*db_clear_mask)(struct ntb_dev *ntb, u64 db_bits);
int (*peer_db_addr)(struct ntb_dev *ntb,
- phys_addr_t *db_addr, resource_size_t *db_size);
+ phys_addr_t *db_addr, resource_size_t *db_size,
+ u64 *db_data, int db_bit);
u64 (*peer_db_read)(struct ntb_dev *ntb);
int (*peer_db_set)(struct ntb_dev *ntb, u64 db_bits);
int (*peer_db_clear)(struct ntb_dev *ntb, u64 db_bits);
@@ -318,8 +327,8 @@ struct ntb_dev_ops {
int (*msg_clear_sts)(struct ntb_dev *ntb, u64 sts_bits);
int (*msg_set_mask)(struct ntb_dev *ntb, u64 mask_bits);
int (*msg_clear_mask)(struct ntb_dev *ntb, u64 mask_bits);
- int (*msg_read)(struct ntb_dev *ntb, int midx, int *pidx, u32 *msg);
- int (*msg_write)(struct ntb_dev *ntb, int midx, int pidx, u32 msg);
+ u32 (*msg_read)(struct ntb_dev *ntb, int *pidx, int midx);
+ int (*peer_msg_write)(struct ntb_dev *ntb, int pidx, int midx, u32 msg);
};
static inline int ntb_dev_ops_is_valid(const struct ntb_dev_ops *ops)
@@ -381,7 +390,7 @@ static inline int ntb_dev_ops_is_valid(const struct ntb_dev_ops *ops)
/* !ops->msg_set_mask == !ops->msg_count && */
/* !ops->msg_clear_mask == !ops->msg_count && */
!ops->msg_read == !ops->msg_count &&
- !ops->msg_write == !ops->msg_count &&
+ !ops->peer_msg_write == !ops->msg_count &&
1;
}
@@ -397,7 +406,7 @@ struct ntb_client {
#define drv_ntb_client(__drv) container_of((__drv), struct ntb_client, drv)
/**
- * struct ntb_device - ntb device
+ * struct ntb_dev - ntb device
* @dev: Linux device object.
* @pdev: PCI device entry of the ntb.
* @topo: Detected topology of the ntb.
@@ -419,6 +428,10 @@ struct ntb_dev {
spinlock_t ctx_lock;
/* block unregister until device is fully released */
struct completion released;
+
+#ifdef CONFIG_NTB_MSI
+ struct ntb_msi *msi;
+#endif
};
#define dev_ntb(__dev) container_of((__dev), struct ntb_dev, dev)
@@ -465,7 +478,7 @@ void ntb_unregister_client(struct ntb_client *client);
int ntb_register_device(struct ntb_dev *ntb);
/**
- * ntb_register_device() - unregister a ntb device
+ * ntb_unregister_device() - unregister a ntb device
* @ntb: NTB device context.
*
* The device will be removed from the list of ntb devices. If the ntb device
@@ -609,7 +622,6 @@ static inline int ntb_port_number(struct ntb_dev *ntb)
return ntb->ops->port_number(ntb);
}
-
/**
* ntb_peer_port_count() - get the number of peer device ports
* @ntb: NTB device context.
@@ -647,6 +659,58 @@ static inline int ntb_peer_port_number(struct ntb_dev *ntb, int pidx)
}
/**
+ * ntb_logical_port_number() - get the logical port number of the local port
+ * @ntb: NTB device context.
+ *
+ * The Logical Port Number is defined to be a unique number for each
+ * port starting from zero through to the number of ports minus one.
+ * This is in contrast to the Port Number where each port can be assigned
+ * any unique physical number by the hardware.
+ *
+ * The logical port number is useful for calculating the resource indexes
+ * used by peers.
+ *
+ * Return: the logical port number or negative value indicating an error
+ */
+static inline int ntb_logical_port_number(struct ntb_dev *ntb)
+{
+ int lport = ntb_port_number(ntb);
+ int pidx;
+
+ if (lport < 0)
+ return lport;
+
+ for (pidx = 0; pidx < ntb_peer_port_count(ntb); pidx++)
+ if (lport <= ntb_peer_port_number(ntb, pidx))
+ return pidx;
+
+ return pidx;
+}
+
+/**
+ * ntb_peer_logical_port_number() - get the logical peer port by given index
+ * @ntb: NTB device context.
+ * @pidx: Peer port index.
+ *
+ * The Logical Port Number is defined to be a unique number for each
+ * port starting from zero through to the number of ports minus one.
+ * This is in contrast to the Port Number where each port can be assigned
+ * any unique physical number by the hardware.
+ *
+ * The logical port number is useful for calculating the resource indexes
+ * used by peers.
+ *
+ * Return: the peer's logical port number or negative value indicating an error
+ */
+static inline int ntb_peer_logical_port_number(struct ntb_dev *ntb, int pidx)
+{
+ if (ntb_peer_port_number(ntb, pidx) < ntb_port_number(ntb))
+ return pidx;
+ else
+ return pidx + 1;
+}
+
+/**
* ntb_peer_port_idx() - get the peer device port index by given port number
* @ntb: NTB device context.
* @port: Peer port number.
@@ -730,7 +794,8 @@ static inline int ntb_link_disable(struct ntb_dev *ntb)
* Hardware and topology may support a different number of memory windows.
* Moreover different peer devices can support different number of memory
* windows. Simply speaking this method returns the number of possible inbound
- * memory windows to share with specified peer device.
+ * memory windows to share with specified peer device. Note: this may return
+ * zero if the link is not up yet.
*
* Return: the number of memory windows.
*/
@@ -751,7 +816,7 @@ static inline int ntb_mw_count(struct ntb_dev *ntb, int pidx)
* Get the alignments of an inbound memory window with specified index.
* NULL may be given for any output parameter if the value is not needed.
* The alignment and size parameters may be used for allocation of proper
- * shared memory.
+ * shared memory. Note: this must only be called when the link is up.
*
* Return: Zero on success, otherwise a negative error number.
*/
@@ -760,6 +825,9 @@ static inline int ntb_mw_get_align(struct ntb_dev *ntb, int pidx, int widx,
resource_size_t *size_align,
resource_size_t *size_max)
{
+ if (!(ntb_link_is_up(ntb, NULL, NULL) & BIT_ULL(pidx)))
+ return -ENOTCONN;
+
return ntb->ops->mw_get_align(ntb, pidx, widx, addr_align, size_align,
size_max);
}
@@ -1068,6 +1136,8 @@ static inline int ntb_db_clear_mask(struct ntb_dev *ntb, u64 db_bits)
* @ntb: NTB device context.
* @db_addr: OUT - The address of the peer doorbell register.
* @db_size: OUT - The number of bytes to write the peer doorbell register.
+ * @db_data: OUT - The data of peer doorbell register
+ * @db_bit: door bell bit number
*
* Return the address of the peer doorbell register. This may be used, for
* example, by drivers that offload memory copy operations to a dma engine.
@@ -1081,12 +1151,13 @@ static inline int ntb_db_clear_mask(struct ntb_dev *ntb, u64 db_bits)
*/
static inline int ntb_peer_db_addr(struct ntb_dev *ntb,
phys_addr_t *db_addr,
- resource_size_t *db_size)
+ resource_size_t *db_size,
+ u64 *db_data, int db_bit)
{
if (!ntb->ops->peer_db_addr)
return -EINVAL;
- return ntb->ops->peer_db_addr(ntb, db_addr, db_size);
+ return ntb->ops->peer_db_addr(ntb, db_addr, db_size, db_data, db_bit);
}
/**
@@ -1280,7 +1351,7 @@ static inline int ntb_spad_write(struct ntb_dev *ntb, int sidx, u32 val)
* @sidx: Scratchpad index.
* @spad_addr: OUT - The address of the peer scratchpad register.
*
- * Return the address of the peer doorbell register. This may be used, for
+ * Return the address of the peer scratchpad register. This may be used, for
* example, by drivers that offload memory copy operations to a dma engine.
*
* Return: Zero on success, otherwise an error number.
@@ -1302,7 +1373,7 @@ static inline int ntb_peer_spad_addr(struct ntb_dev *ntb, int pidx, int sidx,
*
* Read the peer scratchpad register, and return the value.
*
- * Return: The value of the local scratchpad register.
+ * Return: The value of the peer scratchpad register.
*/
static inline u32 ntb_peer_spad_read(struct ntb_dev *ntb, int pidx, int sidx)
{
@@ -1452,31 +1523,29 @@ static inline int ntb_msg_clear_mask(struct ntb_dev *ntb, u64 mask_bits)
}
/**
- * ntb_msg_read() - read message register with specified index
+ * ntb_msg_read() - read inbound message register with specified index
* @ntb: NTB device context.
- * @midx: Message register index
* @pidx: OUT - Port index of peer device a message retrieved from
- * @msg: OUT - Data
+ * @midx: Message register index
*
* Read data from the specified message register. Source port index of a
* message is retrieved as well.
*
- * Return: Zero on success, otherwise a negative error number.
+ * Return: The value of the inbound message register.
*/
-static inline int ntb_msg_read(struct ntb_dev *ntb, int midx, int *pidx,
- u32 *msg)
+static inline u32 ntb_msg_read(struct ntb_dev *ntb, int *pidx, int midx)
{
if (!ntb->ops->msg_read)
- return -EINVAL;
+ return ~(u32)0;
- return ntb->ops->msg_read(ntb, midx, pidx, msg);
+ return ntb->ops->msg_read(ntb, pidx, midx);
}
/**
- * ntb_msg_write() - write data to the specified message register
+ * ntb_peer_msg_write() - write data to the specified peer message register
* @ntb: NTB device context.
- * @midx: Message register index
* @pidx: Port index of peer device a message being sent to
+ * @midx: Message register index
* @msg: Data to send
*
* Send data to a specified peer device using the defined message register.
@@ -1485,13 +1554,150 @@ static inline int ntb_msg_read(struct ntb_dev *ntb, int midx, int *pidx,
*
* Return: Zero on success, otherwise a negative error number.
*/
-static inline int ntb_msg_write(struct ntb_dev *ntb, int midx, int pidx,
- u32 msg)
+static inline int ntb_peer_msg_write(struct ntb_dev *ntb, int pidx, int midx,
+ u32 msg)
{
- if (!ntb->ops->msg_write)
+ if (!ntb->ops->peer_msg_write)
return -EINVAL;
- return ntb->ops->msg_write(ntb, midx, pidx, msg);
+ return ntb->ops->peer_msg_write(ntb, pidx, midx, msg);
+}
+
+/**
+ * ntb_peer_resource_idx() - get a resource index for a given peer idx
+ * @ntb: NTB device context.
+ * @pidx: Peer port index.
+ *
+ * When constructing a graph of peers, each remote peer must use a different
+ * resource index (mw, doorbell, etc) to communicate with each other
+ * peer.
+ *
+ * In a two peer system, this function should always return 0 such that
+ * resource 0 points to the remote peer on both ports.
+ *
+ * In a 5 peer system, this function will return the following matrix
+ *
+ * pidx \ port 0 1 2 3 4
+ * 0 0 0 1 2 3
+ * 1 0 1 1 2 3
+ * 2 0 1 2 2 3
+ * 3 0 1 2 3 3
+ *
+ * For example, if this function is used to program peer's memory
+ * windows, port 0 will program MW 0 on all it's peers to point to itself.
+ * port 1 will program MW 0 in port 0 to point to itself and MW 1 on all
+ * other ports. etc.
+ *
+ * For the legacy two host case, ntb_port_number() and ntb_peer_port_number()
+ * both return zero and therefore this function will always return zero.
+ * So MW 0 on each host would be programmed to point to the other host.
+ *
+ * Return: the resource index to use for that peer.
+ */
+static inline int ntb_peer_resource_idx(struct ntb_dev *ntb, int pidx)
+{
+ int local_port, peer_port;
+
+ if (pidx >= ntb_peer_port_count(ntb))
+ return -EINVAL;
+
+ local_port = ntb_logical_port_number(ntb);
+ peer_port = ntb_peer_logical_port_number(ntb, pidx);
+
+ if (peer_port < local_port)
+ return local_port - 1;
+ else
+ return local_port;
+}
+
+/**
+ * ntb_peer_highest_mw_idx() - get a memory window index for a given peer idx
+ * using the highest index memory windows first
+ *
+ * @ntb: NTB device context.
+ * @pidx: Peer port index.
+ *
+ * Like ntb_peer_resource_idx(), except it returns indexes starting with
+ * last memory window index.
+ *
+ * Return: the resource index to use for that peer.
+ */
+static inline int ntb_peer_highest_mw_idx(struct ntb_dev *ntb, int pidx)
+{
+ int ret;
+
+ ret = ntb_peer_resource_idx(ntb, pidx);
+ if (ret < 0)
+ return ret;
+
+ return ntb_mw_count(ntb, pidx) - ret - 1;
+}
+
+struct ntb_msi_desc {
+ u32 addr_offset;
+ u32 data;
+};
+
+#ifdef CONFIG_NTB_MSI
+
+int ntb_msi_init(struct ntb_dev *ntb, void (*desc_changed)(void *ctx));
+int ntb_msi_setup_mws(struct ntb_dev *ntb);
+void ntb_msi_clear_mws(struct ntb_dev *ntb);
+int ntbm_msi_request_threaded_irq(struct ntb_dev *ntb, irq_handler_t handler,
+ irq_handler_t thread_fn,
+ const char *name, void *dev_id,
+ struct ntb_msi_desc *msi_desc);
+void ntbm_msi_free_irq(struct ntb_dev *ntb, unsigned int irq, void *dev_id);
+int ntb_msi_peer_trigger(struct ntb_dev *ntb, int peer,
+ struct ntb_msi_desc *desc);
+int ntb_msi_peer_addr(struct ntb_dev *ntb, int peer,
+ struct ntb_msi_desc *desc,
+ phys_addr_t *msi_addr);
+
+#else /* not CONFIG_NTB_MSI */
+
+static inline int ntb_msi_init(struct ntb_dev *ntb,
+ void (*desc_changed)(void *ctx))
+{
+ return -EOPNOTSUPP;
+}
+static inline int ntb_msi_setup_mws(struct ntb_dev *ntb)
+{
+ return -EOPNOTSUPP;
+}
+static inline void ntb_msi_clear_mws(struct ntb_dev *ntb) {}
+static inline int ntbm_msi_request_threaded_irq(struct ntb_dev *ntb,
+ irq_handler_t handler,
+ irq_handler_t thread_fn,
+ const char *name, void *dev_id,
+ struct ntb_msi_desc *msi_desc)
+{
+ return -EOPNOTSUPP;
+}
+static inline void ntbm_msi_free_irq(struct ntb_dev *ntb, unsigned int irq,
+ void *dev_id) {}
+static inline int ntb_msi_peer_trigger(struct ntb_dev *ntb, int peer,
+ struct ntb_msi_desc *desc)
+{
+ return -EOPNOTSUPP;
+}
+static inline int ntb_msi_peer_addr(struct ntb_dev *ntb, int peer,
+ struct ntb_msi_desc *desc,
+ phys_addr_t *msi_addr)
+{
+ return -EOPNOTSUPP;
+
+}
+
+#endif /* CONFIG_NTB_MSI */
+
+static inline int ntbm_msi_request_irq(struct ntb_dev *ntb,
+ irq_handler_t handler,
+ const char *name, void *dev_id,
+ struct ntb_msi_desc *msi_desc)
+{
+ return ntbm_msi_request_threaded_irq(ntb, handler, NULL, name,
+ dev_id, msi_desc);
}
#endif
diff --git a/include/linux/nubus.h b/include/linux/nubus.h
index 6165b2c62040..392fc6c53e96 100644
--- a/include/linux/nubus.h
+++ b/include/linux/nubus.h
@@ -1,23 +1,40 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
nubus.h: various definitions and prototypes for NuBus drivers to use.
Originally written by Alan Cox.
Hacked to death by C. Scott Ananian and David Huggins-Daines.
-
- Some of the constants in here are from the corresponding
- NetBSD/OpenBSD header file, by Allen Briggs. We figured out the
- rest of them on our own. */
+*/
+
#ifndef LINUX_NUBUS_H
#define LINUX_NUBUS_H
+#include <linux/device.h>
#include <asm/nubus.h>
#include <uapi/linux/nubus.h>
+struct proc_dir_entry;
+struct seq_file;
+
+struct nubus_dir {
+ unsigned char *base;
+ unsigned char *ptr;
+ int done;
+ int mask;
+ struct proc_dir_entry *procdir;
+};
+
+struct nubus_dirent {
+ unsigned char *base;
+ unsigned char type;
+ __u32 data; /* Actually 24 bits used */
+ int mask;
+};
+
struct nubus_board {
- struct nubus_board* next;
- struct nubus_dev* first_dev;
-
+ struct device dev;
+
/* Only 9-E actually exist, though 0-8 are also theoretically
possible, and 0 is a special case which represents the
motherboard and onboard peripherals (Ethernet, video) */
@@ -26,10 +43,10 @@ struct nubus_board {
char name[64];
/* Format block */
- unsigned char* fblock;
+ unsigned char *fblock;
/* Root directory (does *not* always equal fblock + doffset!) */
- unsigned char* directory;
-
+ unsigned char *directory;
+
unsigned long slot_addr;
/* Offset to root directory (sometimes) */
unsigned long doffset;
@@ -40,15 +57,15 @@ struct nubus_board {
unsigned char rev;
unsigned char format;
unsigned char lanes;
-};
-struct nubus_dev {
- /* Next link in device list */
- struct nubus_dev* next;
/* Directory entry in /proc/bus/nubus */
- struct proc_dir_entry* procdir;
+ struct proc_dir_entry *procdir;
+};
+
+struct nubus_rsrc {
+ struct list_head list;
- /* The functional resource ID of this device */
+ /* The functional resource ID */
unsigned char resid;
/* These are mostly here for convenience; we could always read
them from the ROMs if we wanted to */
@@ -56,79 +73,116 @@ struct nubus_dev {
unsigned short type;
unsigned short dr_sw;
unsigned short dr_hw;
- /* This is the device's name rather than the board's.
- Sometimes they are different. Usually the board name is
- more correct. */
- char name[64];
- /* MacOS driver (I kid you not) */
- unsigned char* driver;
- /* Actually this is an offset */
- unsigned long iobase;
- unsigned long iosize;
- unsigned char flags, hwdevid;
-
+
/* Functional directory */
- unsigned char* directory;
+ unsigned char *directory;
/* Much of our info comes from here */
- struct nubus_board* board;
+ struct nubus_board *board;
+};
+
+/* This is all NuBus functional resources (used to find devices later on) */
+extern struct list_head nubus_func_rsrcs;
+
+struct nubus_driver {
+ struct device_driver driver;
+ int (*probe)(struct nubus_board *board);
+ void (*remove)(struct nubus_board *board);
};
-/* This is all NuBus devices (used to find devices later on) */
-extern struct nubus_dev* nubus_devices;
-/* This is all NuBus cards */
-extern struct nubus_board* nubus_boards;
+extern struct bus_type nubus_bus_type;
/* Generic NuBus interface functions, modelled after the PCI interface */
-void nubus_scan_bus(void);
#ifdef CONFIG_PROC_FS
-extern void nubus_proc_init(void);
+void nubus_proc_init(void);
+struct proc_dir_entry *nubus_proc_add_board(struct nubus_board *board);
+struct proc_dir_entry *nubus_proc_add_rsrc_dir(struct proc_dir_entry *procdir,
+ const struct nubus_dirent *ent,
+ struct nubus_board *board);
+void nubus_proc_add_rsrc_mem(struct proc_dir_entry *procdir,
+ const struct nubus_dirent *ent,
+ unsigned int size);
+void nubus_proc_add_rsrc(struct proc_dir_entry *procdir,
+ const struct nubus_dirent *ent);
#else
static inline void nubus_proc_init(void) {}
+static inline
+struct proc_dir_entry *nubus_proc_add_board(struct nubus_board *board)
+{ return NULL; }
+static inline
+struct proc_dir_entry *nubus_proc_add_rsrc_dir(struct proc_dir_entry *procdir,
+ const struct nubus_dirent *ent,
+ struct nubus_board *board)
+{ return NULL; }
+static inline void nubus_proc_add_rsrc_mem(struct proc_dir_entry *procdir,
+ const struct nubus_dirent *ent,
+ unsigned int size) {}
+static inline void nubus_proc_add_rsrc(struct proc_dir_entry *procdir,
+ const struct nubus_dirent *ent) {}
#endif
-int get_nubus_list(char *buf);
-int nubus_proc_attach_device(struct nubus_dev *dev);
-/* If we need more precision we can add some more of these */
-struct nubus_dev* nubus_find_device(unsigned short category,
- unsigned short type,
- unsigned short dr_hw,
- unsigned short dr_sw,
- const struct nubus_dev* from);
-struct nubus_dev* nubus_find_type(unsigned short category,
- unsigned short type,
- const struct nubus_dev* from);
-/* Might have more than one device in a slot, you know... */
-struct nubus_dev* nubus_find_slot(unsigned int slot,
- const struct nubus_dev* from);
+
+struct nubus_rsrc *nubus_first_rsrc_or_null(void);
+struct nubus_rsrc *nubus_next_rsrc_or_null(struct nubus_rsrc *from);
+
+#define for_each_func_rsrc(f) \
+ for (f = nubus_first_rsrc_or_null(); f; f = nubus_next_rsrc_or_null(f))
+
+#define for_each_board_func_rsrc(b, f) \
+ for_each_func_rsrc(f) if (f->board != b) {} else
/* These are somewhat more NuBus-specific. They all return 0 for
success and -1 for failure, as you'd expect. */
/* The root directory which contains the board and functional
directories */
-int nubus_get_root_dir(const struct nubus_board* board,
- struct nubus_dir* dir);
+int nubus_get_root_dir(const struct nubus_board *board,
+ struct nubus_dir *dir);
/* The board directory */
-int nubus_get_board_dir(const struct nubus_board* board,
- struct nubus_dir* dir);
+int nubus_get_board_dir(const struct nubus_board *board,
+ struct nubus_dir *dir);
/* The functional directory */
-int nubus_get_func_dir(const struct nubus_dev* dev,
- struct nubus_dir* dir);
+int nubus_get_func_dir(const struct nubus_rsrc *fres, struct nubus_dir *dir);
/* These work on any directory gotten via the above */
-int nubus_readdir(struct nubus_dir* dir,
- struct nubus_dirent* ent);
-int nubus_find_rsrc(struct nubus_dir* dir,
+int nubus_readdir(struct nubus_dir *dir,
+ struct nubus_dirent *ent);
+int nubus_find_rsrc(struct nubus_dir *dir,
unsigned char rsrc_type,
- struct nubus_dirent* ent);
-int nubus_rewinddir(struct nubus_dir* dir);
+ struct nubus_dirent *ent);
+int nubus_rewinddir(struct nubus_dir *dir);
/* Things to do with directory entries */
-int nubus_get_subdir(const struct nubus_dirent* ent,
- struct nubus_dir* dir);
-void nubus_get_rsrc_mem(void* dest,
- const struct nubus_dirent *dirent,
- int len);
-void nubus_get_rsrc_str(void* dest,
- const struct nubus_dirent *dirent,
- int maxlen);
+int nubus_get_subdir(const struct nubus_dirent *ent,
+ struct nubus_dir *dir);
+void nubus_get_rsrc_mem(void *dest, const struct nubus_dirent *dirent,
+ unsigned int len);
+unsigned int nubus_get_rsrc_str(char *dest, const struct nubus_dirent *dirent,
+ unsigned int len);
+void nubus_seq_write_rsrc_mem(struct seq_file *m,
+ const struct nubus_dirent *dirent,
+ unsigned int len);
+unsigned char *nubus_dirptr(const struct nubus_dirent *nd);
+
+/* Declarations relating to driver model objects */
+int nubus_parent_device_register(void);
+int nubus_device_register(struct nubus_board *board);
+int nubus_driver_register(struct nubus_driver *ndrv);
+void nubus_driver_unregister(struct nubus_driver *ndrv);
+int nubus_proc_show(struct seq_file *m, void *data);
+
+static inline void nubus_set_drvdata(struct nubus_board *board, void *data)
+{
+ dev_set_drvdata(&board->dev, data);
+}
+
+static inline void *nubus_get_drvdata(struct nubus_board *board)
+{
+ return dev_get_drvdata(&board->dev);
+}
+
+/* Returns a pointer to the "standard" slot space. */
+static inline void *nubus_slot_addr(int slot)
+{
+ return (void *)(0xF0000000 | (slot << 24));
+}
+
#endif /* LINUX_NUBUS_H */
diff --git a/include/linux/numa.h b/include/linux/numa.h
index 3aaa31603a86..59df211d051f 100644
--- a/include/linux/numa.h
+++ b/include/linux/numa.h
@@ -1,6 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_NUMA_H
#define _LINUX_NUMA_H
-
+#include <linux/types.h>
#ifdef CONFIG_NODES_SHIFT
#define NODES_SHIFT CONFIG_NODES_SHIFT
@@ -12,4 +13,53 @@
#define NUMA_NO_NODE (-1)
+/* optionally keep NUMA memory info available post init */
+#ifdef CONFIG_NUMA_KEEP_MEMINFO
+#define __initdata_or_meminfo
+#else
+#define __initdata_or_meminfo __initdata
+#endif
+
+#ifdef CONFIG_NUMA
+#include <linux/printk.h>
+#include <asm/sparsemem.h>
+
+/* Generic implementation available */
+int numa_map_to_online_node(int node);
+
+#ifndef memory_add_physaddr_to_nid
+static inline int memory_add_physaddr_to_nid(u64 start)
+{
+ pr_info_once("Unknown online node for memory at 0x%llx, assuming node 0\n",
+ start);
+ return 0;
+}
+#endif
+#ifndef phys_to_target_node
+static inline int phys_to_target_node(u64 start)
+{
+ pr_info_once("Unknown target node for memory at 0x%llx, assuming node 0\n",
+ start);
+ return 0;
+}
+#endif
+#else /* !CONFIG_NUMA */
+static inline int numa_map_to_online_node(int node)
+{
+ return NUMA_NO_NODE;
+}
+static inline int memory_add_physaddr_to_nid(u64 start)
+{
+ return 0;
+}
+static inline int phys_to_target_node(u64 start)
+{
+ return 0;
+}
+#endif
+
+#ifdef CONFIG_HAVE_ARCH_NODE_DEV_GROUP
+extern const struct attribute_group arch_node_dev_group;
+#endif
+
#endif /* _LINUX_NUMA_H */
diff --git a/include/linux/nvme-auth.h b/include/linux/nvme-auth.h
new file mode 100644
index 000000000000..dcb8030062dd
--- /dev/null
+++ b/include/linux/nvme-auth.h
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2021 Hannes Reinecke, SUSE Software Solutions
+ */
+
+#ifndef _NVME_AUTH_H
+#define _NVME_AUTH_H
+
+#include <crypto/kpp.h>
+
+struct nvme_dhchap_key {
+ u8 *key;
+ size_t len;
+ u8 hash;
+};
+
+u32 nvme_auth_get_seqnum(void);
+const char *nvme_auth_dhgroup_name(u8 dhgroup_id);
+const char *nvme_auth_dhgroup_kpp(u8 dhgroup_id);
+u8 nvme_auth_dhgroup_id(const char *dhgroup_name);
+
+const char *nvme_auth_hmac_name(u8 hmac_id);
+const char *nvme_auth_digest_name(u8 hmac_id);
+size_t nvme_auth_hmac_hash_len(u8 hmac_id);
+u8 nvme_auth_hmac_id(const char *hmac_name);
+
+struct nvme_dhchap_key *nvme_auth_extract_key(unsigned char *secret,
+ u8 key_hash);
+void nvme_auth_free_key(struct nvme_dhchap_key *key);
+u8 *nvme_auth_transform_key(struct nvme_dhchap_key *key, char *nqn);
+int nvme_auth_generate_key(u8 *secret, struct nvme_dhchap_key **ret_key);
+int nvme_auth_augmented_challenge(u8 hmac_id, u8 *skey, size_t skey_len,
+ u8 *challenge, u8 *aug, size_t hlen);
+int nvme_auth_gen_privkey(struct crypto_kpp *dh_tfm, u8 dh_gid);
+int nvme_auth_gen_pubkey(struct crypto_kpp *dh_tfm,
+ u8 *host_key, size_t host_key_len);
+int nvme_auth_gen_shared_secret(struct crypto_kpp *dh_tfm,
+ u8 *ctrl_key, size_t ctrl_key_len,
+ u8 *sess_key, size_t sess_key_len);
+
+#endif /* _NVME_AUTH_H */
diff --git a/include/linux/nvme-fc-driver.h b/include/linux/nvme-fc-driver.h
index 6c8c5d8041b7..fa092b9be2fd 100644
--- a/include/linux/nvme-fc-driver.h
+++ b/include/linux/nvme-fc-driver.h
@@ -1,65 +1,36 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (c) 2016, Avago Technologies
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
*/
#ifndef _NVME_FC_DRIVER_H
#define _NVME_FC_DRIVER_H 1
+#include <linux/scatterlist.h>
+#include <linux/blk-mq.h>
+
/*
- * ********************** LLDD FC-NVME Host API ********************
+ * ********************** FC-NVME LS API ********************
*
- * For FC LLDD's that are the NVME Host role.
+ * Data structures used by both FC-NVME hosts and FC-NVME
+ * targets to perform FC-NVME LS requests or transmit
+ * responses.
*
- * ******************************************************************
+ * ***********************************************************
*/
-
-
-/* FC Port role bitmask - can merge with FC Port Roles in fc transport */
-#define FC_PORT_ROLE_NVME_INITIATOR 0x10
-#define FC_PORT_ROLE_NVME_TARGET 0x20
-#define FC_PORT_ROLE_NVME_DISCOVERY 0x40
-
-
/**
- * struct nvme_fc_port_info - port-specific ids and FC connection-specific
- * data element used during NVME Host role
- * registrations
+ * struct nvmefc_ls_req - Request structure passed from the transport
+ * to the LLDD to perform a NVME-FC LS request and obtain
+ * a response.
+ * Used by nvme-fc transport (host) to send LS's such as
+ * Create Association, Create Connection and Disconnect
+ * Association.
+ * Used by the nvmet-fc transport (controller) to send
+ * LS's such as Disconnect Association.
*
- * Static fields describing the port being registered:
- * @node_name: FC WWNN for the port
- * @port_name: FC WWPN for the port
- * @port_role: What NVME roles are supported (see FC_PORT_ROLE_xxx)
- *
- * Initialization values for dynamic port fields:
- * @port_id: FC N_Port_ID currently assigned the port. Upper 8 bits must
- * be set to 0.
- */
-struct nvme_fc_port_info {
- u64 node_name;
- u64 port_name;
- u32 port_role;
- u32 port_id;
-};
-
-
-/**
- * struct nvmefc_ls_req - Request structure passed from NVME-FC transport
- * to LLDD in order to perform a NVME FC-4 LS
- * request and obtain a response.
- *
- * Values set by the NVME-FC layer prior to calling the LLDD ls_req
- * entrypoint.
+ * Values set by the requestor prior to calling the LLDD ls_req entrypoint:
* @rqstaddr: pointer to request buffer
* @rqstdma: PCI DMA address of request buffer
* @rqstlen: Length, in bytes, of request buffer
@@ -72,8 +43,8 @@ struct nvme_fc_port_info {
* @private: pointer to memory allocated alongside the ls request structure
* that is specifically for the LLDD to use while processing the
* request. The length of the buffer corresponds to the
- * lsrqst_priv_sz value specified in the nvme_fc_port_template
- * supplied by the LLDD.
+ * lsrqst_priv_sz value specified in the xxx_template supplied
+ * by the LLDD.
* @done: The callback routine the LLDD is to invoke upon completion of
* the LS request. req argument is the pointer to the original LS
* request structure. Status argument must be 0 upon success, a
@@ -95,6 +66,101 @@ struct nvmefc_ls_req {
} __aligned(sizeof(u64)); /* alignment for other things alloc'd with */
+/**
+ * struct nvmefc_ls_rsp - Structure passed from the transport to the LLDD
+ * to request the transmit the NVME-FC LS response to a
+ * NVME-FC LS request. The structure originates in the LLDD
+ * and is given to the transport via the xxx_rcv_ls_req()
+ * transport routine. As such, the structure represents the
+ * FC exchange context for the NVME-FC LS request that was
+ * received and which the response is to be sent for.
+ * Used by the LLDD to pass the nvmet-fc transport (controller)
+ * received LS's such as Create Association, Create Connection
+ * and Disconnect Association.
+ * Used by the LLDD to pass the nvme-fc transport (host)
+ * received LS's such as Disconnect Association or Disconnect
+ * Connection.
+ *
+ * The structure is allocated by the LLDD whenever a LS Request is received
+ * from the FC link. The address of the structure is passed to the nvmet-fc
+ * or nvme-fc layer via the xxx_rcv_ls_req() transport routines.
+ *
+ * The address of the structure is to be passed back to the LLDD
+ * when the response is to be transmit. The LLDD will use the address to
+ * map back to the LLDD exchange structure which maintains information such
+ * the remote N_Port that sent the LS as well as any FC exchange context.
+ * Upon completion of the LS response transmit, the LLDD will pass the
+ * address of the structure back to the transport LS rsp done() routine,
+ * allowing the transport release dma resources. Upon completion of
+ * the done() routine, no further access to the structure will be made by
+ * the transport and the LLDD can de-allocate the structure.
+ *
+ * Field initialization:
+ * At the time of the xxx_rcv_ls_req() call, there is no content that
+ * is valid in the structure.
+ *
+ * When the structure is used for the LLDD->xmt_ls_rsp() call, the
+ * transport layer will fully set the fields in order to specify the
+ * response payload buffer and its length as well as the done routine
+ * to be called upon completion of the transmit. The transport layer
+ * will also set a private pointer for its own use in the done routine.
+ *
+ * Values set by the transport layer prior to calling the LLDD xmt_ls_rsp
+ * entrypoint:
+ * @rspbuf: pointer to the LS response buffer
+ * @rspdma: PCI DMA address of the LS response buffer
+ * @rsplen: Length, in bytes, of the LS response buffer
+ * @done: The callback routine the LLDD is to invoke upon completion of
+ * transmitting the LS response. req argument is the pointer to
+ * the original ls request.
+ * @nvme_fc_private: pointer to an internal transport-specific structure
+ * used as part of the transport done() processing. The LLDD is
+ * not to access this pointer.
+ */
+struct nvmefc_ls_rsp {
+ void *rspbuf;
+ dma_addr_t rspdma;
+ u16 rsplen;
+
+ void (*done)(struct nvmefc_ls_rsp *rsp);
+ void *nvme_fc_private; /* LLDD is not to access !! */
+};
+
+
+
+/*
+ * ********************** LLDD FC-NVME Host API ********************
+ *
+ * For FC LLDD's that are the NVME Host role.
+ *
+ * ******************************************************************
+ */
+
+
+/**
+ * struct nvme_fc_port_info - port-specific ids and FC connection-specific
+ * data element used during NVME Host role
+ * registrations
+ *
+ * Static fields describing the port being registered:
+ * @node_name: FC WWNN for the port
+ * @port_name: FC WWPN for the port
+ * @port_role: What NVME roles are supported (see FC_PORT_ROLE_xxx)
+ * @dev_loss_tmo: maximum delay for reconnects to an association on
+ * this device. Used only on a remoteport.
+ *
+ * Initialization values for dynamic port fields:
+ * @port_id: FC N_Port_ID currently assigned the port. Upper 8 bits must
+ * be set to 0.
+ */
+struct nvme_fc_port_info {
+ u64 node_name;
+ u64 port_name;
+ u32 port_role;
+ u32 port_id;
+ u32 dev_loss_tmo;
+};
+
enum nvmefc_fcp_datadir {
NVMEFC_FCP_NODATA, /* payload_length and sg_cnt will be zero */
NVMEFC_FCP_WRITE,
@@ -102,8 +168,6 @@ enum nvmefc_fcp_datadir {
};
-#define NVME_FC_MAX_SEGMENTS 256
-
/**
* struct nvmefc_fcp_req - Request structure passed from NVME-FC transport
* to LLDD in order to perform a NVME FCP IO operation.
@@ -202,6 +266,9 @@ enum nvme_fc_obj_state {
* The length of the buffer corresponds to the local_priv_sz
* value specified in the nvme_fc_port_template supplied by
* the LLDD.
+ * @dev_loss_tmo: maximum delay for reconnects to an association on
+ * this device. To modify, lldd must call
+ * nvme_fc_set_remoteport_devloss().
*
* Fields with dynamic values. Values may change base on link state. LLDD
* may reference fields directly to change them. Initialized by the
@@ -259,10 +326,9 @@ struct nvme_fc_remote_port {
u32 port_role;
u64 node_name;
u64 port_name;
-
struct nvme_fc_local_port *localport;
-
void *private;
+ u32 dev_loss_tmo;
/* dynamic fields */
u32 port_id;
@@ -346,6 +412,21 @@ struct nvme_fc_remote_port {
* indicating an FC transport Aborted status.
* Entrypoint is Mandatory.
*
+ * @xmt_ls_rsp: Called to transmit the response to a FC-NVME FC-4 LS service.
+ * The nvmefc_ls_rsp structure is the same LLDD-supplied exchange
+ * structure specified in the nvme_fc_rcv_ls_req() call made when
+ * the LS request was received. The structure will fully describe
+ * the buffers for the response payload and the dma address of the
+ * payload. The LLDD is to transmit the response (or return a
+ * non-zero errno status), and upon completion of the transmit, call
+ * the "done" routine specified in the nvmefc_ls_rsp structure
+ * (argument to done is the address of the nvmefc_ls_rsp structure
+ * itself). Upon the completion of the done routine, the LLDD shall
+ * consider the LS handling complete and the nvmefc_ls_rsp structure
+ * may be freed/released.
+ * Entrypoint is mandatory if the LLDD calls the nvme_fc_rcv_ls_req()
+ * entrypoint.
+ *
* @max_hw_queues: indicates the maximum number of hw queues the LLDD
* supports for cpu affinitization.
* Value is Mandatory. Must be at least 1.
@@ -380,7 +461,7 @@ struct nvme_fc_remote_port {
* @lsrqst_priv_sz: The LLDD sets this field to the amount of additional
* memory that it would like fc nvme layer to allocate on the LLDD's
* behalf whenever a ls request structure is allocated. The additional
- * memory area solely for the of the LLDD and its location is
+ * memory area is solely for use by the LLDD and its location is
* specified by the ls_request->private pointer.
* Value is Mandatory. Allowed to be zero.
*
@@ -400,7 +481,6 @@ struct nvme_fc_port_template {
void **handle);
void (*delete_queue)(struct nvme_fc_local_port *,
unsigned int qidx, void *handle);
- void (*poll_queue)(struct nvme_fc_local_port *, void *handle);
int (*ls_req)(struct nvme_fc_local_port *,
struct nvme_fc_remote_port *,
struct nvmefc_ls_req *);
@@ -415,6 +495,11 @@ struct nvme_fc_port_template {
struct nvme_fc_remote_port *,
void *hw_queue_handle,
struct nvmefc_fcp_req *);
+ int (*xmt_ls_rsp)(struct nvme_fc_local_port *localport,
+ struct nvme_fc_remote_port *rport,
+ struct nvmefc_ls_rsp *ls_rsp);
+ void (*map_queues)(struct nvme_fc_local_port *localport,
+ struct blk_mq_queue_map *map);
u32 max_hw_queues;
u16 max_sgl_segments;
@@ -446,9 +531,50 @@ int nvme_fc_register_remoteport(struct nvme_fc_local_port *localport,
int nvme_fc_unregister_remoteport(struct nvme_fc_remote_port *remoteport);
+void nvme_fc_rescan_remoteport(struct nvme_fc_remote_port *remoteport);
+
+int nvme_fc_set_remoteport_devloss(struct nvme_fc_remote_port *remoteport,
+ u32 dev_loss_tmo);
+
+/*
+ * Routine called to pass a NVME-FC LS request, received by the lldd,
+ * to the nvme-fc transport.
+ *
+ * If the return value is zero: the LS was successfully accepted by the
+ * transport.
+ * If the return value is non-zero: the transport has not accepted the
+ * LS. The lldd should ABTS-LS the LS.
+ *
+ * Note: if the LLDD receives and ABTS for the LS prior to the transport
+ * calling the ops->xmt_ls_rsp() routine to transmit a response, the LLDD
+ * shall mark the LS as aborted, and when the xmt_ls_rsp() is called: the
+ * response shall not be transmit and the struct nvmefc_ls_rsp() done
+ * routine shall be called. The LLDD may transmit the ABTS response as
+ * soon as the LS was marked or can delay until the xmt_ls_rsp() call is
+ * made.
+ * Note: if an RCV LS was successfully posted to the transport and the
+ * remoteport is then unregistered before xmt_ls_rsp() was called for
+ * the lsrsp structure, the transport will still call xmt_ls_rsp()
+ * afterward to cleanup the outstanding lsrsp structure. The LLDD should
+ * noop the transmission of the rsp and call the lsrsp->done() routine
+ * to allow the lsrsp structure to be released.
+ */
+int nvme_fc_rcv_ls_req(struct nvme_fc_remote_port *remoteport,
+ struct nvmefc_ls_rsp *lsrsp,
+ void *lsreqbuf, u32 lsreqbuf_len);
/*
+ * Routine called to get the appid field associated with request by the lldd
+ *
+ * If the return value is NULL : the user/libvirt has not set the appid to VM
+ * If the return value is non-zero: Returns the appid associated with VM
+ *
+ * @req: IO request from nvme fc to driver
+ */
+char *nvme_fc_io_getuuid(struct nvmefc_fcp_req *req);
+
+/*
* *************** LLDD FC-NVME Target/Subsystem API ***************
*
* For FC LLDD's that are the NVME Subsystem role
@@ -476,55 +602,6 @@ struct nvmet_fc_port_info {
};
-/**
- * struct nvmefc_tgt_ls_req - Structure used between LLDD and NVMET-FC
- * layer to represent the exchange context for
- * a FC-NVME Link Service (LS).
- *
- * The structure is allocated by the LLDD whenever a LS Request is received
- * from the FC link. The address of the structure is passed to the nvmet-fc
- * layer via the nvmet_fc_rcv_ls_req() call. The address of the structure
- * will be passed back to the LLDD when the response is to be transmit.
- * The LLDD is to use the address to map back to the LLDD exchange structure
- * which maintains information such as the targetport the LS was received
- * on, the remote FC NVME initiator that sent the LS, and any FC exchange
- * context. Upon completion of the LS response transmit, the address of the
- * structure will be passed back to the LS rsp done() routine, allowing the
- * nvmet-fc layer to release dma resources. Upon completion of the done()
- * routine, no further access will be made by the nvmet-fc layer and the
- * LLDD can de-allocate the structure.
- *
- * Field initialization:
- * At the time of the nvmet_fc_rcv_ls_req() call, there is no content that
- * is valid in the structure.
- *
- * When the structure is used for the LLDD->xmt_ls_rsp() call, the nvmet-fc
- * layer will fully set the fields in order to specify the response
- * payload buffer and its length as well as the done routine to be called
- * upon compeletion of the transmit. The nvmet-fc layer will also set a
- * private pointer for its own use in the done routine.
- *
- * Values set by the NVMET-FC layer prior to calling the LLDD xmt_ls_rsp
- * entrypoint.
- * @rspbuf: pointer to the LS response buffer
- * @rspdma: PCI DMA address of the LS response buffer
- * @rsplen: Length, in bytes, of the LS response buffer
- * @done: The callback routine the LLDD is to invoke upon completion of
- * transmitting the LS response. req argument is the pointer to
- * the original ls request.
- * @nvmet_fc_private: pointer to an internal NVMET-FC layer structure used
- * as part of the NVMET-FC processing. The LLDD is not to access
- * this pointer.
- */
-struct nvmefc_tgt_ls_req {
- void *rspbuf;
- dma_addr_t rspdma;
- u16 rsplen;
-
- void (*done)(struct nvmefc_tgt_ls_req *req);
- void *nvmet_fc_private; /* LLDD is not to access !! */
-};
-
/* Operations that NVME-FC layer may request the LLDD to perform for FCP */
enum {
NVMET_FCOP_READDATA = 1, /* xmt data to initiator */
@@ -607,7 +684,7 @@ enum {
* Values set by the LLDD indicating completion status of the FCP operation.
* Must be set prior to calling the done() callback.
* @transferred_length: amount of DATA_OUT payload data received by a
- * a WRITEDATA operation. If not a WRITEDATA operation, value must
+ * WRITEDATA operation. If not a WRITEDATA operation, value must
* be set to 0. Should equal transfer_length on success.
* @fcp_error: status of the FCP operation. Must be 0 on success; on failure
* must be a NVME_SC_FC_xxxx value.
@@ -619,7 +696,7 @@ struct nvmefc_tgt_fcp_req {
u32 timeout;
u32 transfer_length;
struct fc_ba_rjt ba_rjt;
- struct scatterlist sg[NVME_FC_MAX_SEGMENTS];
+ struct scatterlist *sg;
int sg_cnt;
void *rspaddr;
dma_addr_t rspdma;
@@ -642,22 +719,6 @@ enum {
* sequence in one LLDD operation. Errors during Data
* sequence transmit must not allow RSP sequence to be sent.
*/
- NVMET_FCTGTFEAT_CMD_IN_ISR = (1 << 1),
- /* Bit 2: When 0, the LLDD is calling the cmd rcv handler
- * in a non-isr context, allowing the transport to finish
- * op completion in the calling context. When 1, the LLDD
- * is calling the cmd rcv handler in an ISR context,
- * requiring the transport to transition to a workqueue
- * for op completion.
- */
- NVMET_FCTGTFEAT_OPDONE_IN_ISR = (1 << 2),
- /* Bit 3: When 0, the LLDD is calling the op done handler
- * in a non-isr context, allowing the transport to finish
- * op completion in the calling context. When 1, the LLDD
- * is calling the op done handler in an ISR context,
- * requiring the transport to transition to a workqueue
- * for op completion.
- */
};
@@ -669,7 +730,7 @@ enum {
*
* Fields with static values for the port. Initialized by the
* port_info struct supplied to the registration call.
- * @port_num: NVME-FC transport subsytem port number
+ * @port_num: NVME-FC transport subsystem port number
* @node_name: FC WWNN for the port
* @port_name: FC WWPN for the port
* @private: pointer to memory allocated alongside the local port
@@ -715,19 +776,25 @@ struct nvmet_fc_target_port {
* Entrypoint is Mandatory.
*
* @xmt_ls_rsp: Called to transmit the response to a FC-NVME FC-4 LS service.
- * The nvmefc_tgt_ls_req structure is the same LLDD-supplied exchange
+ * The nvmefc_ls_rsp structure is the same LLDD-supplied exchange
* structure specified in the nvmet_fc_rcv_ls_req() call made when
- * the LS request was received. The structure will fully describe
+ * the LS request was received. The structure will fully describe
* the buffers for the response payload and the dma address of the
- * payload. The LLDD is to transmit the response (or return a non-zero
- * errno status), and upon completion of the transmit, call the
- * "done" routine specified in the nvmefc_tgt_ls_req structure
- * (argument to done is the ls reqwuest structure itself).
- * After calling the done routine, the LLDD shall consider the
- * LS handling complete and the nvmefc_tgt_ls_req structure may
- * be freed/released.
+ * payload. The LLDD is to transmit the response (or return a
+ * non-zero errno status), and upon completion of the transmit, call
+ * the "done" routine specified in the nvmefc_ls_rsp structure
+ * (argument to done is the address of the nvmefc_ls_rsp structure
+ * itself). Upon the completion of the done() routine, the LLDD shall
+ * consider the LS handling complete and the nvmefc_ls_rsp structure
+ * may be freed/released.
+ * The transport will always call the xmt_ls_rsp() routine for any
+ * LS received.
* Entrypoint is Mandatory.
*
+ * @map_queues: This functions lets the driver expose the queue mapping
+ * to the block layer.
+ * Entrypoint is Optional.
+ *
* @fcp_op: Called to perform a data transfer or transmit a response.
* The nvmefc_tgt_fcp_req structure is the same LLDD-supplied
* exchange structure specified in the nvmet_fc_rcv_fcp_req() call
@@ -801,11 +868,57 @@ struct nvmet_fc_target_port {
* outstanding operation (if there was one) to complete, then will
* call the fcp_req_release() callback to return the command's
* exchange context back to the LLDD.
+ * Entrypoint is Mandatory.
*
* @fcp_req_release: Called by the transport to return a nvmefc_tgt_fcp_req
* to the LLDD after all operations on the fcp operation are complete.
* This may be due to the command completing or upon completion of
* abort cleanup.
+ * Entrypoint is Mandatory.
+ *
+ * @defer_rcv: Called by the transport to signal the LLLD that it has
+ * begun processing of a previously received NVME CMD IU. The LLDD
+ * is now free to re-use the rcv buffer associated with the
+ * nvmefc_tgt_fcp_req.
+ * Entrypoint is Optional.
+ *
+ * @discovery_event: Called by the transport to generate an RSCN
+ * change notifications to NVME initiators. The RSCN notifications
+ * should cause the initiator to rescan the discovery controller
+ * on the targetport.
+ *
+ * @ls_req: Called to issue a FC-NVME FC-4 LS service request.
+ * The nvme_fc_ls_req structure will fully describe the buffers for
+ * the request payload and where to place the response payload.
+ * The targetport that is to issue the LS request is identified by
+ * the targetport argument. The remote port that is to receive the
+ * LS request is identified by the hosthandle argument. The nvmet-fc
+ * transport is only allowed to issue FC-NVME LS's on behalf of an
+ * association that was created prior by a Create Association LS.
+ * The hosthandle will originate from the LLDD in the struct
+ * nvmefc_ls_rsp structure for the Create Association LS that
+ * was delivered to the transport. The transport will save the
+ * hosthandle as an attribute of the association. If the LLDD
+ * loses connectivity with the remote port, it must call the
+ * nvmet_fc_invalidate_host() routine to remove any references to
+ * the remote port in the transport.
+ * The LLDD is to allocate an exchange, issue the LS request, obtain
+ * the LS response, and call the "done" routine specified in the
+ * request structure (argument to done is the ls request structure
+ * itself).
+ * Entrypoint is Optional - but highly recommended.
+ *
+ * @ls_abort: called to request the LLDD to abort the indicated ls request.
+ * The call may return before the abort has completed. After aborting
+ * the request, the LLDD must still call the ls request done routine
+ * indicating an FC transport Aborted status.
+ * Entrypoint is Mandatory if the ls_req entry point is specified.
+ *
+ * @host_release: called to inform the LLDD that the request to invalidate
+ * the host port indicated by the hosthandle has been fully completed.
+ * No associations exist with the host port and there will be no
+ * further references to hosthandle.
+ * Entrypoint is Mandatory if the lldd calls nvmet_fc_invalidate_host().
*
* @max_hw_queues: indicates the maximum number of hw queues the LLDD
* supports for cpu affinitization.
@@ -835,17 +948,33 @@ struct nvmet_fc_target_port {
* area solely for the of the LLDD and its location is specified by
* the targetport->private pointer.
* Value is Mandatory. Allowed to be zero.
+ *
+ * @lsrqst_priv_sz: The LLDD sets this field to the amount of additional
+ * memory that it would like nvmet-fc layer to allocate on the LLDD's
+ * behalf whenever a ls request structure is allocated. The additional
+ * memory area is solely for use by the LLDD and its location is
+ * specified by the ls_request->private pointer.
+ * Value is Mandatory. Allowed to be zero.
+ *
*/
struct nvmet_fc_target_template {
void (*targetport_delete)(struct nvmet_fc_target_port *tgtport);
int (*xmt_ls_rsp)(struct nvmet_fc_target_port *tgtport,
- struct nvmefc_tgt_ls_req *tls_req);
+ struct nvmefc_ls_rsp *ls_rsp);
int (*fcp_op)(struct nvmet_fc_target_port *tgtport,
struct nvmefc_tgt_fcp_req *fcpreq);
void (*fcp_abort)(struct nvmet_fc_target_port *tgtport,
struct nvmefc_tgt_fcp_req *fcpreq);
void (*fcp_req_release)(struct nvmet_fc_target_port *tgtport,
struct nvmefc_tgt_fcp_req *fcpreq);
+ void (*defer_rcv)(struct nvmet_fc_target_port *tgtport,
+ struct nvmefc_tgt_fcp_req *fcpreq);
+ void (*discovery_event)(struct nvmet_fc_target_port *tgtport);
+ int (*ls_req)(struct nvmet_fc_target_port *targetport,
+ void *hosthandle, struct nvmefc_ls_req *lsreq);
+ void (*ls_abort)(struct nvmet_fc_target_port *targetport,
+ void *hosthandle, struct nvmefc_ls_req *lsreq);
+ void (*host_release)(void *hosthandle);
u32 max_hw_queues;
u16 max_sgl_segments;
@@ -854,7 +983,9 @@ struct nvmet_fc_target_template {
u32 target_features;
+ /* sizes of additional private data for data structures */
u32 target_priv_sz;
+ u32 lsrqst_priv_sz;
};
@@ -865,15 +996,71 @@ int nvmet_fc_register_targetport(struct nvmet_fc_port_info *portinfo,
int nvmet_fc_unregister_targetport(struct nvmet_fc_target_port *tgtport);
+/*
+ * Routine called to pass a NVME-FC LS request, received by the lldd,
+ * to the nvmet-fc transport.
+ *
+ * If the return value is zero: the LS was successfully accepted by the
+ * transport.
+ * If the return value is non-zero: the transport has not accepted the
+ * LS. The lldd should ABTS-LS the LS.
+ *
+ * Note: if the LLDD receives and ABTS for the LS prior to the transport
+ * calling the ops->xmt_ls_rsp() routine to transmit a response, the LLDD
+ * shall mark the LS as aborted, and when the xmt_ls_rsp() is called: the
+ * response shall not be transmit and the struct nvmefc_ls_rsp() done
+ * routine shall be called. The LLDD may transmit the ABTS response as
+ * soon as the LS was marked or can delay until the xmt_ls_rsp() call is
+ * made.
+ * Note: if an RCV LS was successfully posted to the transport and the
+ * targetport is then unregistered before xmt_ls_rsp() was called for
+ * the lsrsp structure, the transport will still call xmt_ls_rsp()
+ * afterward to cleanup the outstanding lsrsp structure. The LLDD should
+ * noop the transmission of the rsp and call the lsrsp->done() routine
+ * to allow the lsrsp structure to be released.
+ */
int nvmet_fc_rcv_ls_req(struct nvmet_fc_target_port *tgtport,
- struct nvmefc_tgt_ls_req *lsreq,
+ void *hosthandle,
+ struct nvmefc_ls_rsp *rsp,
void *lsreqbuf, u32 lsreqbuf_len);
+/*
+ * Routine called by the LLDD whenever it has a logout or loss of
+ * connectivity to a NVME-FC host port which there had been active
+ * NVMe controllers for. The host port is indicated by the
+ * hosthandle. The hosthandle is given to the nvmet-fc transport
+ * when a NVME LS was received, typically to create a new association.
+ * The nvmet-fc transport will cache the hostport value with the
+ * association for use in LS requests for the association.
+ * When the LLDD calls this routine, the nvmet-fc transport will
+ * immediately terminate all associations that were created with
+ * the hosthandle host port.
+ * The LLDD, after calling this routine and having control returned,
+ * must assume the transport may subsequently utilize hosthandle as
+ * part of sending LS's to terminate the association. The LLDD
+ * should reject the LS's if they are attempted.
+ * Once the last association has terminated for the hosthandle host
+ * port, the nvmet-fc transport will call the ops->host_release()
+ * callback. As of the callback, the nvmet-fc transport will no
+ * longer reference hosthandle.
+ */
+void nvmet_fc_invalidate_host(struct nvmet_fc_target_port *tgtport,
+ void *hosthandle);
+
+/*
+ * If nvmet_fc_rcv_fcp_req returns non-zero, the transport has not accepted
+ * the FCP cmd. The lldd should ABTS-LS the cmd.
+ */
int nvmet_fc_rcv_fcp_req(struct nvmet_fc_target_port *tgtport,
struct nvmefc_tgt_fcp_req *fcpreq,
void *cmdiubuf, u32 cmdiubuf_len);
void nvmet_fc_rcv_fcp_abort(struct nvmet_fc_target_port *tgtport,
struct nvmefc_tgt_fcp_req *fcpreq);
+/*
+ * add a define, visible to the compiler, that indicates support
+ * for feature. Allows for conditional compilation in LLDDs.
+ */
+#define NVME_FC_FEAT_UUID 0x0001
#endif /* _NVME_FC_DRIVER_H */
diff --git a/include/linux/nvme-fc.h b/include/linux/nvme-fc.h
index 21c37e39e41a..51fe44e0328b 100644
--- a/include/linux/nvme-fc.h
+++ b/include/linux/nvme-fc.h
@@ -1,48 +1,63 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (c) 2016 Avago Technologies. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful.
- * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND WARRANTIES,
- * INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, FITNESS FOR A
- * PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE DISCLAIMED, EXCEPT TO
- * THE EXTENT THAT SUCH DISCLAIMERS ARE HELD TO BE LEGALLY INVALID.
- * See the GNU General Public License for more details, a copy of which
- * can be found in the file COPYING included with this package
- *
*/
/*
- * This file contains definitions relative to FC-NVME r1.14 (16-020vB).
- * The fcnvme_lsdesc_cr_assoc_cmd struct reflects expected r1.16 content.
+ * This file contains definitions relative to FC-NVME-2 r1.08
+ * (T11-2019-00210-v004).
*/
#ifndef _NVME_FC_H
#define _NVME_FC_H 1
+#include <uapi/scsi/fc/fc_fs.h>
-#define NVME_CMD_SCSI_ID 0xFD
+#define NVME_CMD_FORMAT_ID 0xFD
#define NVME_CMD_FC_ID FC_TYPE_NVME
/* FC-NVME Cmd IU Flags */
-#define FCNVME_CMD_FLAGS_DIRMASK 0x03
-#define FCNVME_CMD_FLAGS_WRITE 0x01
-#define FCNVME_CMD_FLAGS_READ 0x02
+enum {
+ FCNVME_CMD_FLAGS_DIRMASK = 0x03,
+ FCNVME_CMD_FLAGS_WRITE = (1 << 0),
+ FCNVME_CMD_FLAGS_READ = (1 << 1),
+
+ FCNVME_CMD_FLAGS_PICWP = (1 << 2),
+};
+
+enum {
+ FCNVME_CMD_CAT_MASK = 0x0F,
+ FCNVME_CMD_CAT_ADMINQ = 0x01,
+ FCNVME_CMD_CAT_CSSMASK = 0x07,
+ FCNVME_CMD_CAT_CSSFLAG = 0x08,
+};
+
+static inline __u8 fccmnd_set_cat_admin(__u8 rsv_cat)
+{
+ return (rsv_cat & ~FCNVME_CMD_CAT_MASK) | FCNVME_CMD_CAT_ADMINQ;
+}
+
+static inline __u8 fccmnd_set_cat_css(__u8 rsv_cat, __u8 css)
+{
+ return (rsv_cat & ~FCNVME_CMD_CAT_MASK) | FCNVME_CMD_CAT_CSSFLAG |
+ (css & FCNVME_CMD_CAT_CSSMASK);
+}
struct nvme_fc_cmd_iu {
- __u8 scsi_id;
+ __u8 format_id;
__u8 fc_id;
__be16 iu_len;
- __u8 rsvd4[3];
+ __u8 rsvd4[2];
+ __u8 rsv_cat;
__u8 flags;
__be64 connection_id;
__be32 csn;
__be32 data_len;
struct nvme_command sqe;
- __be32 rsvd88[2];
+ __u8 dps;
+ __u8 lbads;
+ __be16 ms;
+ __be32 rsvd92;
};
#define NVME_FC_SIZEOF_ZEROS_RSP 12
@@ -50,11 +65,12 @@ struct nvme_fc_cmd_iu {
enum {
FCNVME_SC_SUCCESS = 0,
FCNVME_SC_INVALID_FIELD = 1,
- FCNVME_SC_INVALID_CONNID = 2,
+ /* reserved 2 */
+ FCNVME_SC_ILL_CONN_PARAMS = 3,
};
struct nvme_fc_ersp_iu {
- __u8 status_code;
+ __u8 ersp_result;
__u8 rsvd1;
__be16 iu_len;
__be32 rsn;
@@ -65,14 +81,45 @@ struct nvme_fc_ersp_iu {
};
-/* FC-NVME Link Services */
+#define FCNVME_NVME_SR_OPCODE 0x01
+#define FCNVME_NVME_SR_RSP_OPCODE 0x02
+
+struct nvme_fc_nvme_sr_iu {
+ __u8 fc_id;
+ __u8 opcode;
+ __u8 rsvd2;
+ __u8 retry_rctl;
+ __be32 rsvd4;
+};
+
+
+enum {
+ FCNVME_SRSTAT_ACC = 0x0,
+ /* reserved 0x1 */
+ /* reserved 0x2 */
+ FCNVME_SRSTAT_LOGICAL_ERR = 0x3,
+ FCNVME_SRSTAT_INV_QUALIF = 0x4,
+ FCNVME_SRSTAT_UNABL2PERFORM = 0x9,
+};
+
+struct nvme_fc_nvme_sr_rsp_iu {
+ __u8 fc_id;
+ __u8 opcode;
+ __u8 rsvd2;
+ __u8 status;
+ __be32 rsvd4;
+};
+
+
+/* FC-NVME Link Services - LS cmd values (w0 bits 31:24) */
enum {
FCNVME_LS_RSVD = 0,
FCNVME_LS_RJT = 1,
FCNVME_LS_ACC = 2,
- FCNVME_LS_CREATE_ASSOCIATION = 3,
- FCNVME_LS_CREATE_CONNECTION = 4,
- FCNVME_LS_DISCONNECT = 5,
+ FCNVME_LS_CREATE_ASSOCIATION = 3, /* Create Association */
+ FCNVME_LS_CREATE_CONNECTION = 4, /* Create I/O Connection */
+ FCNVME_LS_DISCONNECT_ASSOC = 5, /* Disconnect Association */
+ FCNVME_LS_DISCONNECT_CONN = 6, /* Disconnect Connection */
};
/* FC-NVME Link Service Descriptors */
@@ -129,14 +176,17 @@ enum fcnvme_ls_rjt_reason {
FCNVME_RJT_RC_UNSUP = 0x0b,
/* command not supported */
- FCNVME_RJT_RC_INPROG = 0x0e,
- /* command already in progress */
-
FCNVME_RJT_RC_INV_ASSOC = 0x40,
- /* Invalid Association ID*/
+ /* Invalid Association ID */
FCNVME_RJT_RC_INV_CONN = 0x41,
- /* Invalid Connection ID*/
+ /* Invalid Connection ID */
+
+ FCNVME_RJT_RC_INV_PARAM = 0x42,
+ /* Invalid Parameters */
+
+ FCNVME_RJT_RC_INSUF_RES = 0x43,
+ /* Insufficient Resources */
FCNVME_RJT_RC_VENDOR = 0xff,
/* vendor specific error */
@@ -150,14 +200,32 @@ enum fcnvme_ls_rjt_explan {
FCNVME_RJT_EXP_OXID_RXID = 0x17,
/* invalid OX_ID-RX_ID combination */
- FCNVME_RJT_EXP_INSUF_RES = 0x29,
- /* insufficient resources */
-
FCNVME_RJT_EXP_UNAB_DATA = 0x2a,
/* unable to supply requested data */
FCNVME_RJT_EXP_INV_LEN = 0x2d,
/* Invalid payload length */
+
+ FCNVME_RJT_EXP_INV_ERSP_RAT = 0x40,
+ /* Invalid NVMe_ERSP Ratio */
+
+ FCNVME_RJT_EXP_INV_CTLR_ID = 0x41,
+ /* Invalid Controller ID */
+
+ FCNVME_RJT_EXP_INV_QUEUE_ID = 0x42,
+ /* Invalid Queue ID */
+
+ FCNVME_RJT_EXP_INV_SQSIZE = 0x43,
+ /* Invalid Submission Queue Size */
+
+ FCNVME_RJT_EXP_INV_HOSTID = 0x44,
+ /* Invalid HOST ID */
+
+ FCNVME_RJT_EXP_INV_HOSTNQN = 0x45,
+ /* Invalid HOSTNQN */
+
+ FCNVME_RJT_EXP_INV_SUBNQN = 0x46,
+ /* Invalid SUBNQN */
};
/* FCNVME_LSDESC_RJT */
@@ -221,21 +289,11 @@ struct fcnvme_lsdesc_cr_conn_cmd {
__be32 rsvd52;
};
-/* Disconnect Scope Values */
-enum {
- FCNVME_DISCONN_ASSOCIATION = 0,
- FCNVME_DISCONN_CONNECTION = 1,
-};
-
/* FCNVME_LSDESC_DISCONN_CMD */
struct fcnvme_lsdesc_disconn_cmd {
__be32 desc_tag; /* FCNVME_LSDESC_xxx */
__be32 desc_len;
- u8 rsvd8[3];
- /* note: scope is really a 1 bit field */
- u8 scope; /* FCNVME_DISCONN_xxx */
- __be32 rsvd12;
- __be64 id;
+ __be32 rsvd8[4];
};
/* FCNVME_LSDESC_CONN_ID */
@@ -254,9 +312,14 @@ struct fcnvme_lsdesc_assoc_id {
/* r_ctl values */
enum {
- FCNVME_RS_RCTL_DATA = 1,
- FCNVME_RS_RCTL_XFER_RDY = 5,
- FCNVME_RS_RCTL_RSP = 8,
+ FCNVME_RS_RCTL_CMND = 0x6,
+ FCNVME_RS_RCTL_DATA = 0x1,
+ FCNVME_RS_RCTL_CONF = 0x3,
+ FCNVME_RS_RCTL_SR = 0x9,
+ FCNVME_RS_RCTL_XFER_RDY = 0x5,
+ FCNVME_RS_RCTL_RSP = 0x7,
+ FCNVME_RS_RCTL_ERSP = 0x8,
+ FCNVME_RS_RCTL_SR_RSP = 0xA,
};
@@ -276,7 +339,10 @@ struct fcnvme_ls_acc_hdr {
struct fcnvme_ls_rqst_w0 w0;
__be32 desc_list_len;
struct fcnvme_lsdesc_rqst rqst;
- /* Followed by cmd-specific ACC descriptors, see next definitions */
+ /*
+ * Followed by cmd-specific ACCEPT descriptors, see xxx_acc
+ * definitions below
+ */
};
/* FCNVME_LS_CREATE_ASSOCIATION */
@@ -314,25 +380,59 @@ struct fcnvme_ls_cr_conn_acc {
struct fcnvme_lsdesc_conn_id connectid;
};
-/* FCNVME_LS_DISCONNECT */
-struct fcnvme_ls_disconnect_rqst {
+/* FCNVME_LS_DISCONNECT_ASSOC */
+struct fcnvme_ls_disconnect_assoc_rqst {
struct fcnvme_ls_rqst_w0 w0;
__be32 desc_list_len;
struct fcnvme_lsdesc_assoc_id associd;
struct fcnvme_lsdesc_disconn_cmd discon_cmd;
};
-struct fcnvme_ls_disconnect_acc {
+struct fcnvme_ls_disconnect_assoc_acc {
struct fcnvme_ls_acc_hdr hdr;
};
+/* FCNVME_LS_DISCONNECT_CONN */
+struct fcnvme_ls_disconnect_conn_rqst {
+ struct fcnvme_ls_rqst_w0 w0;
+ __be32 desc_list_len;
+ struct fcnvme_lsdesc_assoc_id associd;
+ struct fcnvme_lsdesc_conn_id connectid;
+};
+
+struct fcnvme_ls_disconnect_conn_acc {
+ struct fcnvme_ls_acc_hdr hdr;
+};
+
+
+/*
+ * Default R_A_TOV is pulled in from fc_fs.h but needs conversion
+ * from ms to seconds for our use.
+ */
+#define FC_TWO_TIMES_R_A_TOV (2 * (FC_DEF_R_A_TOV / 1000))
+#define NVME_FC_LS_TIMEOUT_SEC FC_TWO_TIMES_R_A_TOV
+#define NVME_FC_TGTOP_TIMEOUT_SEC FC_TWO_TIMES_R_A_TOV
+
/*
- * Yet to be defined in FC-NVME:
+ * TRADDR string must be of form "nn-<16hexdigits>:pn-<16hexdigits>"
+ * the string is allowed to be specified with or without a "0x" prefix
+ * infront of the <16hexdigits>. Without is considered the "min" string
+ * and with is considered the "max" string. The hexdigits may be upper
+ * or lower case.
+ * Note: FC-NVME-2 standard requires a "0x" prefix.
*/
-#define NVME_FC_CONNECT_TIMEOUT_SEC 2 /* 2 seconds */
-#define NVME_FC_LS_TIMEOUT_SEC 2 /* 2 seconds */
-#define NVME_FC_TGTOP_TIMEOUT_SEC 2 /* 2 seconds */
+#define NVME_FC_TRADDR_NNLEN 3 /* "?n-" */
+#define NVME_FC_TRADDR_OXNNLEN 5 /* "?n-0x" */
+#define NVME_FC_TRADDR_HEXNAMELEN 16
+#define NVME_FC_TRADDR_MINLENGTH \
+ (2 * (NVME_FC_TRADDR_NNLEN + NVME_FC_TRADDR_HEXNAMELEN) + 1)
+#define NVME_FC_TRADDR_MAXLENGTH \
+ (2 * (NVME_FC_TRADDR_OXNNLEN + NVME_FC_TRADDR_HEXNAMELEN) + 1)
+#define NVME_FC_TRADDR_MIN_PN_OFFSET \
+ (NVME_FC_TRADDR_NNLEN + NVME_FC_TRADDR_HEXNAMELEN + 1)
+#define NVME_FC_TRADDR_MAX_PN_OFFSET \
+ (NVME_FC_TRADDR_OXNNLEN + NVME_FC_TRADDR_HEXNAMELEN + 1)
#endif /* _NVME_FC_H */
diff --git a/include/linux/nvme-rdma.h b/include/linux/nvme-rdma.h
index a72fd04aa5e1..4dd7e6fe92fb 100644
--- a/include/linux/nvme-rdma.h
+++ b/include/linux/nvme-rdma.h
@@ -1,19 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (c) 2015 Mellanox Technologies. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
*/
#ifndef _LINUX_NVME_RDMA_H
#define _LINUX_NVME_RDMA_H
+#define NVME_RDMA_MAX_QUEUE_SIZE 128
+
enum nvme_rdma_cm_fmt {
NVME_RDMA_CM_FMT_1_0 = 0x0,
};
@@ -85,7 +79,7 @@ struct nvme_rdma_cm_rep {
* struct nvme_rdma_cm_rej - rdma connect reject
*
* @recfmt: format of the RDMA Private Data
- * @fsts: error status for the associated connect request
+ * @sts: error status for the associated connect request
*/
struct nvme_rdma_cm_rej {
__le16 recfmt;
diff --git a/include/linux/nvme-tcp.h b/include/linux/nvme-tcp.h
new file mode 100644
index 000000000000..57ebe1267f7f
--- /dev/null
+++ b/include/linux/nvme-tcp.h
@@ -0,0 +1,191 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * NVMe over Fabrics TCP protocol header.
+ * Copyright (c) 2018 Lightbits Labs. All rights reserved.
+ */
+
+#ifndef _LINUX_NVME_TCP_H
+#define _LINUX_NVME_TCP_H
+
+#include <linux/nvme.h>
+
+#define NVME_TCP_DISC_PORT 8009
+#define NVME_TCP_ADMIN_CCSZ SZ_8K
+#define NVME_TCP_DIGEST_LENGTH 4
+#define NVME_TCP_MIN_MAXH2CDATA 4096
+
+enum nvme_tcp_pfv {
+ NVME_TCP_PFV_1_0 = 0x0,
+};
+
+enum nvme_tcp_fatal_error_status {
+ NVME_TCP_FES_INVALID_PDU_HDR = 0x01,
+ NVME_TCP_FES_PDU_SEQ_ERR = 0x02,
+ NVME_TCP_FES_HDR_DIGEST_ERR = 0x03,
+ NVME_TCP_FES_DATA_OUT_OF_RANGE = 0x04,
+ NVME_TCP_FES_R2T_LIMIT_EXCEEDED = 0x05,
+ NVME_TCP_FES_DATA_LIMIT_EXCEEDED = 0x05,
+ NVME_TCP_FES_UNSUPPORTED_PARAM = 0x06,
+};
+
+enum nvme_tcp_digest_option {
+ NVME_TCP_HDR_DIGEST_ENABLE = (1 << 0),
+ NVME_TCP_DATA_DIGEST_ENABLE = (1 << 1),
+};
+
+enum nvme_tcp_pdu_type {
+ nvme_tcp_icreq = 0x0,
+ nvme_tcp_icresp = 0x1,
+ nvme_tcp_h2c_term = 0x2,
+ nvme_tcp_c2h_term = 0x3,
+ nvme_tcp_cmd = 0x4,
+ nvme_tcp_rsp = 0x5,
+ nvme_tcp_h2c_data = 0x6,
+ nvme_tcp_c2h_data = 0x7,
+ nvme_tcp_r2t = 0x9,
+};
+
+enum nvme_tcp_pdu_flags {
+ NVME_TCP_F_HDGST = (1 << 0),
+ NVME_TCP_F_DDGST = (1 << 1),
+ NVME_TCP_F_DATA_LAST = (1 << 2),
+ NVME_TCP_F_DATA_SUCCESS = (1 << 3),
+};
+
+/**
+ * struct nvme_tcp_hdr - nvme tcp pdu common header
+ *
+ * @type: pdu type
+ * @flags: pdu specific flags
+ * @hlen: pdu header length
+ * @pdo: pdu data offset
+ * @plen: pdu wire byte length
+ */
+struct nvme_tcp_hdr {
+ __u8 type;
+ __u8 flags;
+ __u8 hlen;
+ __u8 pdo;
+ __le32 plen;
+};
+
+/**
+ * struct nvme_tcp_icreq_pdu - nvme tcp initialize connection request pdu
+ *
+ * @hdr: pdu generic header
+ * @pfv: pdu version format
+ * @hpda: host pdu data alignment (dwords, 0's based)
+ * @digest: digest types enabled
+ * @maxr2t: maximum r2ts per request supported
+ */
+struct nvme_tcp_icreq_pdu {
+ struct nvme_tcp_hdr hdr;
+ __le16 pfv;
+ __u8 hpda;
+ __u8 digest;
+ __le32 maxr2t;
+ __u8 rsvd2[112];
+};
+
+/**
+ * struct nvme_tcp_icresp_pdu - nvme tcp initialize connection response pdu
+ *
+ * @hdr: pdu common header
+ * @pfv: pdu version format
+ * @cpda: controller pdu data alignment (dowrds, 0's based)
+ * @digest: digest types enabled
+ * @maxdata: maximum data capsules per r2t supported
+ */
+struct nvme_tcp_icresp_pdu {
+ struct nvme_tcp_hdr hdr;
+ __le16 pfv;
+ __u8 cpda;
+ __u8 digest;
+ __le32 maxdata;
+ __u8 rsvd[112];
+};
+
+/**
+ * struct nvme_tcp_term_pdu - nvme tcp terminate connection pdu
+ *
+ * @hdr: pdu common header
+ * @fes: fatal error status
+ * @fei: fatal error information
+ */
+struct nvme_tcp_term_pdu {
+ struct nvme_tcp_hdr hdr;
+ __le16 fes;
+ __le16 feil;
+ __le16 feiu;
+ __u8 rsvd[10];
+};
+
+/**
+ * struct nvme_tcp_cmd_pdu - nvme tcp command capsule pdu
+ *
+ * @hdr: pdu common header
+ * @cmd: nvme command
+ */
+struct nvme_tcp_cmd_pdu {
+ struct nvme_tcp_hdr hdr;
+ struct nvme_command cmd;
+};
+
+/**
+ * struct nvme_tcp_rsp_pdu - nvme tcp response capsule pdu
+ *
+ * @hdr: pdu common header
+ * @hdr: nvme-tcp generic header
+ * @cqe: nvme completion queue entry
+ */
+struct nvme_tcp_rsp_pdu {
+ struct nvme_tcp_hdr hdr;
+ struct nvme_completion cqe;
+};
+
+/**
+ * struct nvme_tcp_r2t_pdu - nvme tcp ready-to-transfer pdu
+ *
+ * @hdr: pdu common header
+ * @command_id: nvme command identifier which this relates to
+ * @ttag: transfer tag (controller generated)
+ * @r2t_offset: offset from the start of the command data
+ * @r2t_length: length the host is allowed to send
+ */
+struct nvme_tcp_r2t_pdu {
+ struct nvme_tcp_hdr hdr;
+ __u16 command_id;
+ __u16 ttag;
+ __le32 r2t_offset;
+ __le32 r2t_length;
+ __u8 rsvd[4];
+};
+
+/**
+ * struct nvme_tcp_data_pdu - nvme tcp data pdu
+ *
+ * @hdr: pdu common header
+ * @command_id: nvme command identifier which this relates to
+ * @ttag: transfer tag (controller generated)
+ * @data_offset: offset from the start of the command data
+ * @data_length: length of the data stream
+ */
+struct nvme_tcp_data_pdu {
+ struct nvme_tcp_hdr hdr;
+ __u16 command_id;
+ __u16 ttag;
+ __le32 data_offset;
+ __le32 data_length;
+ __u8 rsvd[4];
+};
+
+union nvme_tcp_pdu {
+ struct nvme_tcp_icreq_pdu icreq;
+ struct nvme_tcp_icresp_pdu icresp;
+ struct nvme_tcp_cmd_pdu cmd;
+ struct nvme_tcp_rsp_pdu rsp;
+ struct nvme_tcp_r2t_pdu r2t;
+ struct nvme_tcp_data_pdu data;
+};
+
+#endif /* _LINUX_NVME_TCP_H */
diff --git a/include/linux/nvme.h b/include/linux/nvme.h
index 6b8ee9e628e1..779507ac750b 100644
--- a/include/linux/nvme.h
+++ b/include/linux/nvme.h
@@ -1,20 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Definitions for the NVM Express interface
* Copyright (c) 2011-2014, Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
*/
#ifndef _LINUX_NVME_H
#define _LINUX_NVME_H
+#include <linux/bits.h>
#include <linux/types.h>
#include <linux/uuid.h>
@@ -27,14 +20,35 @@
#define NVMF_TRSVCID_SIZE 32
#define NVMF_TRADDR_SIZE 256
#define NVMF_TSAS_SIZE 256
+#define NVMF_AUTH_HASH_LEN 64
#define NVME_DISC_SUBSYS_NAME "nqn.2014-08.org.nvmexpress.discovery"
#define NVME_RDMA_IP_PORT 4420
+#define NVME_NSID_ALL 0xffffffff
+
enum nvme_subsys_type {
- NVME_NQN_DISC = 1, /* Discovery type target subsystem */
- NVME_NQN_NVME = 2, /* NVME type target subsystem */
+ /* Referral to another discovery type target subsystem */
+ NVME_NQN_DISC = 1,
+
+ /* NVME type target subsystem */
+ NVME_NQN_NVME = 2,
+
+ /* Current discovery type target subsystem */
+ NVME_NQN_CURR = 3,
+};
+
+enum nvme_ctrl_type {
+ NVME_CTRL_IO = 1, /* I/O controller */
+ NVME_CTRL_DISC = 2, /* Discovery controller */
+ NVME_CTRL_ADMIN = 3, /* Administrative controller */
+};
+
+enum nvme_dctype {
+ NVME_DCTYPE_NOT_REPORTED = 0,
+ NVME_DCTYPE_DDC = 1, /* Direct Discovery Controller */
+ NVME_DCTYPE_CDC = 2, /* Central Discovery Controller */
};
/* Address Family codes for Discovery Log Page entry ADRFAM field */
@@ -44,21 +58,28 @@ enum {
NVMF_ADDR_FAMILY_IP6 = 2, /* IP6 */
NVMF_ADDR_FAMILY_IB = 3, /* InfiniBand */
NVMF_ADDR_FAMILY_FC = 4, /* Fibre Channel */
+ NVMF_ADDR_FAMILY_LOOP = 254, /* Reserved for host usage */
+ NVMF_ADDR_FAMILY_MAX,
};
/* Transport Type codes for Discovery Log Page entry TRTYPE field */
enum {
NVMF_TRTYPE_RDMA = 1, /* RDMA */
NVMF_TRTYPE_FC = 2, /* Fibre Channel */
+ NVMF_TRTYPE_TCP = 3, /* TCP/IP */
NVMF_TRTYPE_LOOP = 254, /* Reserved for host usage */
NVMF_TRTYPE_MAX,
};
/* Transport Requirements codes for Discovery Log Page entry TREQ field */
enum {
- NVMF_TREQ_NOT_SPECIFIED = 0, /* Not specified */
- NVMF_TREQ_REQUIRED = 1, /* Required */
- NVMF_TREQ_NOT_REQUIRED = 2, /* Not Required */
+ NVMF_TREQ_NOT_SPECIFIED = 0, /* Not specified */
+ NVMF_TREQ_REQUIRED = 1, /* Required */
+ NVMF_TREQ_NOT_REQUIRED = 2, /* Not Required */
+#define NVME_TREQ_SECURE_CHANNEL_MASK \
+ (NVMF_TREQ_REQUIRED | NVMF_TREQ_NOT_REQUIRED)
+
+ NVMF_TREQ_DISABLE_SQFLOW = (1 << 2), /* Supports SQ flow control disable */
};
/* RDMA QP Service Type codes for Discovery Log Page entry TSAS
@@ -88,6 +109,14 @@ enum {
};
#define NVME_AQ_DEPTH 32
+#define NVME_NR_AEN_COMMANDS 1
+#define NVME_AQ_BLK_MQ_DEPTH (NVME_AQ_DEPTH - NVME_NR_AEN_COMMANDS)
+
+/*
+ * Subtract one to leave an empty queue entry for 'Full Queue' condition. See
+ * NVM-Express 1.2 specification, section 4.1.2.
+ */
+#define NVME_AQ_MQ_TAG_DEPTH (NVME_AQ_BLK_MQ_DEPTH - 1)
enum {
NVME_REG_CAP = 0x0000, /* Controller Capabilities */
@@ -100,8 +129,26 @@ enum {
NVME_REG_AQA = 0x0024, /* Admin Queue Attributes */
NVME_REG_ASQ = 0x0028, /* Admin SQ Base Address */
NVME_REG_ACQ = 0x0030, /* Admin CQ Base Address */
- NVME_REG_CMBLOC = 0x0038, /* Controller Memory Buffer Location */
+ NVME_REG_CMBLOC = 0x0038, /* Controller Memory Buffer Location */
NVME_REG_CMBSZ = 0x003c, /* Controller Memory Buffer Size */
+ NVME_REG_BPINFO = 0x0040, /* Boot Partition Information */
+ NVME_REG_BPRSEL = 0x0044, /* Boot Partition Read Select */
+ NVME_REG_BPMBL = 0x0048, /* Boot Partition Memory Buffer
+ * Location
+ */
+ NVME_REG_CMBMSC = 0x0050, /* Controller Memory Buffer Memory
+ * Space Control
+ */
+ NVME_REG_CRTO = 0x0068, /* Controller Ready Timeouts */
+ NVME_REG_PMRCAP = 0x0e00, /* Persistent Memory Capabilities */
+ NVME_REG_PMRCTL = 0x0e04, /* Persistent Memory Region Control */
+ NVME_REG_PMRSTS = 0x0e08, /* Persistent Memory Region Status */
+ NVME_REG_PMREBS = 0x0e0c, /* Persistent Memory Region Elasticity
+ * Buffer Size
+ */
+ NVME_REG_PMRSWTP = 0x0e10, /* Persistent Memory Region Sustained
+ * Write Throughput
+ */
NVME_REG_DBS = 0x1000, /* SQ 0 Tail Doorbell */
};
@@ -109,49 +156,89 @@ enum {
#define NVME_CAP_TIMEOUT(cap) (((cap) >> 24) & 0xff)
#define NVME_CAP_STRIDE(cap) (((cap) >> 32) & 0xf)
#define NVME_CAP_NSSRC(cap) (((cap) >> 36) & 0x1)
+#define NVME_CAP_CSS(cap) (((cap) >> 37) & 0xff)
#define NVME_CAP_MPSMIN(cap) (((cap) >> 48) & 0xf)
#define NVME_CAP_MPSMAX(cap) (((cap) >> 52) & 0xf)
+#define NVME_CAP_CMBS(cap) (((cap) >> 57) & 0x1)
#define NVME_CMB_BIR(cmbloc) ((cmbloc) & 0x7)
#define NVME_CMB_OFST(cmbloc) (((cmbloc) >> 12) & 0xfffff)
-#define NVME_CMB_SZ(cmbsz) (((cmbsz) >> 12) & 0xfffff)
-#define NVME_CMB_SZU(cmbsz) (((cmbsz) >> 8) & 0xf)
-#define NVME_CMB_WDS(cmbsz) ((cmbsz) & 0x10)
-#define NVME_CMB_RDS(cmbsz) ((cmbsz) & 0x8)
-#define NVME_CMB_LISTS(cmbsz) ((cmbsz) & 0x4)
-#define NVME_CMB_CQS(cmbsz) ((cmbsz) & 0x2)
-#define NVME_CMB_SQS(cmbsz) ((cmbsz) & 0x1)
+#define NVME_CRTO_CRIMT(crto) ((crto) >> 16)
+#define NVME_CRTO_CRWMT(crto) ((crto) & 0xffff)
+
+enum {
+ NVME_CMBSZ_SQS = 1 << 0,
+ NVME_CMBSZ_CQS = 1 << 1,
+ NVME_CMBSZ_LISTS = 1 << 2,
+ NVME_CMBSZ_RDS = 1 << 3,
+ NVME_CMBSZ_WDS = 1 << 4,
+
+ NVME_CMBSZ_SZ_SHIFT = 12,
+ NVME_CMBSZ_SZ_MASK = 0xfffff,
+
+ NVME_CMBSZ_SZU_SHIFT = 8,
+ NVME_CMBSZ_SZU_MASK = 0xf,
+};
/*
* Submission and Completion Queue Entry Sizes for the NVM command set.
* (In bytes and specified as a power of two (2^n)).
*/
+#define NVME_ADM_SQES 6
#define NVME_NVM_IOSQES 6
#define NVME_NVM_IOCQES 4
enum {
NVME_CC_ENABLE = 1 << 0,
- NVME_CC_CSS_NVM = 0 << 4,
+ NVME_CC_EN_SHIFT = 0,
+ NVME_CC_CSS_SHIFT = 4,
NVME_CC_MPS_SHIFT = 7,
- NVME_CC_ARB_RR = 0 << 11,
- NVME_CC_ARB_WRRU = 1 << 11,
- NVME_CC_ARB_VS = 7 << 11,
- NVME_CC_SHN_NONE = 0 << 14,
- NVME_CC_SHN_NORMAL = 1 << 14,
- NVME_CC_SHN_ABRUPT = 2 << 14,
- NVME_CC_SHN_MASK = 3 << 14,
- NVME_CC_IOSQES = NVME_NVM_IOSQES << 16,
- NVME_CC_IOCQES = NVME_NVM_IOCQES << 20,
+ NVME_CC_AMS_SHIFT = 11,
+ NVME_CC_SHN_SHIFT = 14,
+ NVME_CC_IOSQES_SHIFT = 16,
+ NVME_CC_IOCQES_SHIFT = 20,
+ NVME_CC_CSS_NVM = 0 << NVME_CC_CSS_SHIFT,
+ NVME_CC_CSS_CSI = 6 << NVME_CC_CSS_SHIFT,
+ NVME_CC_CSS_MASK = 7 << NVME_CC_CSS_SHIFT,
+ NVME_CC_AMS_RR = 0 << NVME_CC_AMS_SHIFT,
+ NVME_CC_AMS_WRRU = 1 << NVME_CC_AMS_SHIFT,
+ NVME_CC_AMS_VS = 7 << NVME_CC_AMS_SHIFT,
+ NVME_CC_SHN_NONE = 0 << NVME_CC_SHN_SHIFT,
+ NVME_CC_SHN_NORMAL = 1 << NVME_CC_SHN_SHIFT,
+ NVME_CC_SHN_ABRUPT = 2 << NVME_CC_SHN_SHIFT,
+ NVME_CC_SHN_MASK = 3 << NVME_CC_SHN_SHIFT,
+ NVME_CC_IOSQES = NVME_NVM_IOSQES << NVME_CC_IOSQES_SHIFT,
+ NVME_CC_IOCQES = NVME_NVM_IOCQES << NVME_CC_IOCQES_SHIFT,
+ NVME_CC_CRIME = 1 << 24,
+};
+
+enum {
NVME_CSTS_RDY = 1 << 0,
NVME_CSTS_CFS = 1 << 1,
NVME_CSTS_NSSRO = 1 << 4,
+ NVME_CSTS_PP = 1 << 5,
NVME_CSTS_SHST_NORMAL = 0 << 2,
NVME_CSTS_SHST_OCCUR = 1 << 2,
NVME_CSTS_SHST_CMPLT = 2 << 2,
NVME_CSTS_SHST_MASK = 3 << 2,
};
+enum {
+ NVME_CMBMSC_CRE = 1 << 0,
+ NVME_CMBMSC_CMSE = 1 << 1,
+};
+
+enum {
+ NVME_CAP_CSS_NVM = 1 << 0,
+ NVME_CAP_CSS_CSI = 1 << 6,
+};
+
+enum {
+ NVME_CAP_CRMS_CRWMS = 1ULL << 59,
+ NVME_CAP_CRMS_CRIMS = 1ULL << 60,
+};
+
struct nvme_id_power_state {
__le16 max_power; /* centiwatts */
__u8 rsvd2;
@@ -175,6 +262,12 @@ enum {
NVME_PS_FLAGS_NON_OP_STATE = 1 << 1,
};
+enum nvme_ctrl_attr {
+ NVME_CTRL_ATTR_HID_128_BIT = (1 << 0),
+ NVME_CTRL_ATTR_TBKAS = (1 << 6),
+ NVME_CTRL_ATTR_ELBAS = (1 << 15),
+};
+
struct nvme_id_ctrl {
__le16 vid;
__le16 ssvid;
@@ -191,7 +284,13 @@ struct nvme_id_ctrl {
__le32 rtd3e;
__le32 oaes;
__le32 ctratt;
- __u8 rsvd100[156];
+ __u8 rsvd100[11];
+ __u8 cntrltype;
+ __u8 fguid[16];
+ __le16 crdt1;
+ __le16 crdt2;
+ __le16 crdt3;
+ __u8 rsvd134[122];
__le16 oacs;
__u8 acl;
__u8 aerl;
@@ -217,7 +316,14 @@ struct nvme_id_ctrl {
__le16 mntmt;
__le16 mxtmt;
__le32 sanicap;
- __u8 rsvd332[180];
+ __le32 hmminds;
+ __le16 hmmaxd;
+ __u8 rsvd338[4];
+ __u8 anatt;
+ __u8 anacap;
+ __le32 anagrpmax;
+ __le32 nanagrpid;
+ __u8 rsvd352[160];
__u8 sqes;
__u8 cqes;
__le16 maxcmd;
@@ -229,11 +335,12 @@ struct nvme_id_ctrl {
__le16 awun;
__le16 awupf;
__u8 nvscc;
- __u8 rsvd531;
+ __u8 nwpc;
__le16 acwu;
__u8 rsvd534[2];
__le32 sgls;
- __u8 rsvd540[228];
+ __le32 mnan;
+ __u8 rsvd544[224];
char subnqn[256];
__u8 rsvd1024[768];
__le32 ioccsz;
@@ -241,20 +348,37 @@ struct nvme_id_ctrl {
__le16 icdoff;
__u8 ctrattr;
__u8 msdbd;
- __u8 rsvd1804[244];
+ __u8 rsvd1804[2];
+ __u8 dctype;
+ __u8 rsvd1807[241];
struct nvme_id_power_state psd[32];
__u8 vs[1024];
};
enum {
+ NVME_CTRL_CMIC_MULTI_PORT = 1 << 0,
+ NVME_CTRL_CMIC_MULTI_CTRL = 1 << 1,
+ NVME_CTRL_CMIC_ANA = 1 << 3,
NVME_CTRL_ONCS_COMPARE = 1 << 0,
NVME_CTRL_ONCS_WRITE_UNCORRECTABLE = 1 << 1,
NVME_CTRL_ONCS_DSM = 1 << 2,
NVME_CTRL_ONCS_WRITE_ZEROES = 1 << 3,
+ NVME_CTRL_ONCS_RESERVATIONS = 1 << 5,
+ NVME_CTRL_ONCS_TIMESTAMP = 1 << 6,
NVME_CTRL_VWC_PRESENT = 1 << 0,
NVME_CTRL_OACS_SEC_SUPP = 1 << 0,
+ NVME_CTRL_OACS_NS_MNGT_SUPP = 1 << 3,
NVME_CTRL_OACS_DIRECTIVES = 1 << 5,
- NVME_CTRL_OACS_DBBUF_SUPP = 1 << 7,
+ NVME_CTRL_OACS_DBBUF_SUPP = 1 << 8,
+ NVME_CTRL_LPA_CMD_EFFECTS_LOG = 1 << 1,
+ NVME_CTRL_CTRATT_128_ID = 1 << 0,
+ NVME_CTRL_CTRATT_NON_OP_PSP = 1 << 1,
+ NVME_CTRL_CTRATT_NVM_SETS = 1 << 2,
+ NVME_CTRL_CTRATT_READ_RECV_LVLS = 1 << 3,
+ NVME_CTRL_CTRATT_ENDURANCE_GROUPS = 1 << 4,
+ NVME_CTRL_CTRATT_PREDICTABLE_LAT = 1 << 5,
+ NVME_CTRL_CTRATT_NAMESPACE_GRANULARITY = 1 << 7,
+ NVME_CTRL_CTRATT_UUID_LIST = 1 << 9,
};
struct nvme_lbaf {
@@ -276,7 +400,7 @@ struct nvme_id_ns {
__u8 nmic;
__u8 rescap;
__u8 fpi;
- __u8 rsvd33;
+ __u8 dlfeat;
__le16 nawun;
__le16 nawupf;
__le16 nacwu;
@@ -285,23 +409,115 @@ struct nvme_id_ns {
__le16 nabspf;
__le16 noiob;
__u8 nvmcap[16];
- __u8 rsvd64[40];
+ __le16 npwg;
+ __le16 npwa;
+ __le16 npdg;
+ __le16 npda;
+ __le16 nows;
+ __u8 rsvd74[18];
+ __le32 anagrpid;
+ __u8 rsvd96[3];
+ __u8 nsattr;
+ __le16 nvmsetid;
+ __le16 endgid;
__u8 nguid[16];
__u8 eui64[8];
- struct nvme_lbaf lbaf[16];
- __u8 rsvd192[192];
+ struct nvme_lbaf lbaf[64];
__u8 vs[3712];
};
+/* I/O Command Set Independent Identify Namespace Data Structure */
+struct nvme_id_ns_cs_indep {
+ __u8 nsfeat;
+ __u8 nmic;
+ __u8 rescap;
+ __u8 fpi;
+ __le32 anagrpid;
+ __u8 nsattr;
+ __u8 rsvd9;
+ __le16 nvmsetid;
+ __le16 endgid;
+ __u8 nstat;
+ __u8 rsvd15[4081];
+};
+
+struct nvme_zns_lbafe {
+ __le64 zsze;
+ __u8 zdes;
+ __u8 rsvd9[7];
+};
+
+struct nvme_id_ns_zns {
+ __le16 zoc;
+ __le16 ozcs;
+ __le32 mar;
+ __le32 mor;
+ __le32 rrl;
+ __le32 frl;
+ __u8 rsvd20[2796];
+ struct nvme_zns_lbafe lbafe[64];
+ __u8 vs[256];
+};
+
+struct nvme_id_ctrl_zns {
+ __u8 zasl;
+ __u8 rsvd1[4095];
+};
+
+struct nvme_id_ns_nvm {
+ __le64 lbstm;
+ __u8 pic;
+ __u8 rsvd9[3];
+ __le32 elbaf[64];
+ __u8 rsvd268[3828];
+};
+
+enum {
+ NVME_ID_NS_NVM_STS_MASK = 0x3f,
+ NVME_ID_NS_NVM_GUARD_SHIFT = 7,
+ NVME_ID_NS_NVM_GUARD_MASK = 0x3,
+};
+
+static inline __u8 nvme_elbaf_sts(__u32 elbaf)
+{
+ return elbaf & NVME_ID_NS_NVM_STS_MASK;
+}
+
+static inline __u8 nvme_elbaf_guard_type(__u32 elbaf)
+{
+ return (elbaf >> NVME_ID_NS_NVM_GUARD_SHIFT) & NVME_ID_NS_NVM_GUARD_MASK;
+}
+
+struct nvme_id_ctrl_nvm {
+ __u8 vsl;
+ __u8 wzsl;
+ __u8 wusl;
+ __u8 dmrl;
+ __le32 dmrsl;
+ __le64 dmsl;
+ __u8 rsvd16[4080];
+};
+
enum {
NVME_ID_CNS_NS = 0x00,
NVME_ID_CNS_CTRL = 0x01,
NVME_ID_CNS_NS_ACTIVE_LIST = 0x02,
NVME_ID_CNS_NS_DESC_LIST = 0x03,
+ NVME_ID_CNS_CS_NS = 0x05,
+ NVME_ID_CNS_CS_CTRL = 0x06,
+ NVME_ID_CNS_NS_CS_INDEP = 0x08,
NVME_ID_CNS_NS_PRESENT_LIST = 0x10,
NVME_ID_CNS_NS_PRESENT = 0x11,
NVME_ID_CNS_CTRL_NS_LIST = 0x12,
NVME_ID_CNS_CTRL_LIST = 0x13,
+ NVME_ID_CNS_SCNDRY_CTRL_LIST = 0x15,
+ NVME_ID_CNS_NS_GRANULARITY = 0x16,
+ NVME_ID_CNS_UUID_LIST = 0x17,
+};
+
+enum {
+ NVME_CSI_NVM = 0,
+ NVME_CSI_ZNS = 2,
};
enum {
@@ -319,8 +535,14 @@ enum {
enum {
NVME_NS_FEAT_THIN = 1 << 0,
+ NVME_NS_FEAT_ATOMICS = 1 << 1,
+ NVME_NS_FEAT_IO_OPT = 1 << 4,
+ NVME_NS_ATTR_RO = 1 << 0,
NVME_NS_FLBAS_LBA_MASK = 0xf,
+ NVME_NS_FLBAS_LBA_UMASK = 0x60,
+ NVME_NS_FLBAS_LBA_SHIFT = 1,
NVME_NS_FLBAS_META_EXT = 0x10,
+ NVME_NS_NMIC_SHARED = 1 << 0,
NVME_LBAF_RP_BEST = 0,
NVME_LBAF_RP_BETTER = 1,
NVME_LBAF_RP_GOOD = 2,
@@ -337,6 +559,28 @@ enum {
NVME_NS_DPS_PI_TYPE3 = 3,
};
+enum {
+ NVME_NSTAT_NRDY = 1 << 0,
+};
+
+enum {
+ NVME_NVM_NS_16B_GUARD = 0,
+ NVME_NVM_NS_32B_GUARD = 1,
+ NVME_NVM_NS_64B_GUARD = 2,
+};
+
+static inline __u8 nvme_lbaf_index(__u8 flbas)
+{
+ return (flbas & NVME_NS_FLBAS_LBA_MASK) |
+ ((flbas & NVME_NS_FLBAS_LBA_UMASK) >> NVME_NS_FLBAS_LBA_SHIFT);
+}
+
+/* Identify Namespace Metadata Capabilities (MC): */
+enum {
+ NVME_MC_EXTENDED_LBA = (1 << 0),
+ NVME_MC_METADATA_PTR = (1 << 1),
+};
+
struct nvme_ns_id_desc {
__u8 nidt;
__u8 nidl;
@@ -346,11 +590,13 @@ struct nvme_ns_id_desc {
#define NVME_NIDT_EUI64_LEN 8
#define NVME_NIDT_NGUID_LEN 16
#define NVME_NIDT_UUID_LEN 16
+#define NVME_NIDT_CSI_LEN 1
enum {
NVME_NIDT_EUI64 = 0x01,
NVME_NIDT_NGUID = 0x02,
NVME_NIDT_UUID = 0x03,
+ NVME_NIDT_CSI = 0x04,
};
struct nvme_smart_log {
@@ -359,7 +605,8 @@ struct nvme_smart_log {
__u8 avail_spare;
__u8 spare_thresh;
__u8 percent_used;
- __u8 rsvd6[26];
+ __u8 endu_grp_crit_warn_sumry;
+ __u8 rsvd7[25];
__u8 data_units_read[16];
__u8 data_units_written[16];
__u8 host_reads[16];
@@ -373,7 +620,82 @@ struct nvme_smart_log {
__le32 warning_temp_time;
__le32 critical_comp_time;
__le16 temp_sensor[8];
- __u8 rsvd216[296];
+ __le32 thm_temp1_trans_count;
+ __le32 thm_temp2_trans_count;
+ __le32 thm_temp1_total_time;
+ __le32 thm_temp2_total_time;
+ __u8 rsvd232[280];
+};
+
+struct nvme_fw_slot_info_log {
+ __u8 afi;
+ __u8 rsvd1[7];
+ __le64 frs[7];
+ __u8 rsvd64[448];
+};
+
+enum {
+ NVME_CMD_EFFECTS_CSUPP = 1 << 0,
+ NVME_CMD_EFFECTS_LBCC = 1 << 1,
+ NVME_CMD_EFFECTS_NCC = 1 << 2,
+ NVME_CMD_EFFECTS_NIC = 1 << 3,
+ NVME_CMD_EFFECTS_CCC = 1 << 4,
+ NVME_CMD_EFFECTS_CSE_MASK = GENMASK(18, 16),
+ NVME_CMD_EFFECTS_UUID_SEL = 1 << 19,
+ NVME_CMD_EFFECTS_SCOPE_MASK = GENMASK(31, 20),
+};
+
+struct nvme_effects_log {
+ __le32 acs[256];
+ __le32 iocs[256];
+ __u8 resv[2048];
+};
+
+enum nvme_ana_state {
+ NVME_ANA_OPTIMIZED = 0x01,
+ NVME_ANA_NONOPTIMIZED = 0x02,
+ NVME_ANA_INACCESSIBLE = 0x03,
+ NVME_ANA_PERSISTENT_LOSS = 0x04,
+ NVME_ANA_CHANGE = 0x0f,
+};
+
+struct nvme_ana_group_desc {
+ __le32 grpid;
+ __le32 nnsids;
+ __le64 chgcnt;
+ __u8 state;
+ __u8 rsvd17[15];
+ __le32 nsids[];
+};
+
+/* flag for the log specific field of the ANA log */
+#define NVME_ANA_LOG_RGO (1 << 0)
+
+struct nvme_ana_rsp_hdr {
+ __le64 chgcnt;
+ __le16 ngrps;
+ __le16 rsvd10[3];
+};
+
+struct nvme_zone_descriptor {
+ __u8 zt;
+ __u8 zs;
+ __u8 za;
+ __u8 rsvd3[5];
+ __le64 zcap;
+ __le64 zslba;
+ __le64 wp;
+ __u8 rsvd32[32];
+};
+
+enum {
+ NVME_ZONE_TYPE_SEQWRITE_REQ = 0x2,
+};
+
+struct nvme_zone_report {
+ __le64 nr_zones;
+ __u8 resv8[56];
+ struct nvme_zone_descriptor entries[];
};
enum {
@@ -385,15 +707,44 @@ enum {
};
enum {
- NVME_AER_NOTICE_NS_CHANGED = 0x0002,
+ NVME_AER_ERROR = 0,
+ NVME_AER_SMART = 1,
+ NVME_AER_NOTICE = 2,
+ NVME_AER_CSS = 6,
+ NVME_AER_VS = 7,
+};
+
+enum {
+ NVME_AER_ERROR_PERSIST_INT_ERR = 0x03,
+};
+
+enum {
+ NVME_AER_NOTICE_NS_CHANGED = 0x00,
+ NVME_AER_NOTICE_FW_ACT_STARTING = 0x01,
+ NVME_AER_NOTICE_ANA = 0x03,
+ NVME_AER_NOTICE_DISC_CHANGED = 0xf0,
+};
+
+enum {
+ NVME_AEN_BIT_NS_ATTR = 8,
+ NVME_AEN_BIT_FW_ACT = 9,
+ NVME_AEN_BIT_ANA_CHANGE = 11,
+ NVME_AEN_BIT_DISC_CHANGE = 31,
+};
+
+enum {
+ NVME_AEN_CFG_NS_ATTR = 1 << NVME_AEN_BIT_NS_ATTR,
+ NVME_AEN_CFG_FW_ACT = 1 << NVME_AEN_BIT_FW_ACT,
+ NVME_AEN_CFG_ANA_CHANGE = 1 << NVME_AEN_BIT_ANA_CHANGE,
+ NVME_AEN_CFG_DISC_CHANGE = 1 << NVME_AEN_BIT_DISC_CHANGE,
};
struct nvme_lba_range_type {
__u8 type;
__u8 attributes;
__u8 rsvd2[14];
- __u64 slba;
- __u64 nlb;
+ __le64 slba;
+ __le64 nlb;
__u8 guid[16];
__u8 rsvd48[16];
};
@@ -440,23 +791,51 @@ enum nvme_opcode {
nvme_cmd_compare = 0x05,
nvme_cmd_write_zeroes = 0x08,
nvme_cmd_dsm = 0x09,
+ nvme_cmd_verify = 0x0c,
nvme_cmd_resv_register = 0x0d,
nvme_cmd_resv_report = 0x0e,
nvme_cmd_resv_acquire = 0x11,
nvme_cmd_resv_release = 0x15,
-};
+ nvme_cmd_zone_mgmt_send = 0x79,
+ nvme_cmd_zone_mgmt_recv = 0x7a,
+ nvme_cmd_zone_append = 0x7d,
+ nvme_cmd_vendor_start = 0x80,
+};
+
+#define nvme_opcode_name(opcode) { opcode, #opcode }
+#define show_nvm_opcode_name(val) \
+ __print_symbolic(val, \
+ nvme_opcode_name(nvme_cmd_flush), \
+ nvme_opcode_name(nvme_cmd_write), \
+ nvme_opcode_name(nvme_cmd_read), \
+ nvme_opcode_name(nvme_cmd_write_uncor), \
+ nvme_opcode_name(nvme_cmd_compare), \
+ nvme_opcode_name(nvme_cmd_write_zeroes), \
+ nvme_opcode_name(nvme_cmd_dsm), \
+ nvme_opcode_name(nvme_cmd_verify), \
+ nvme_opcode_name(nvme_cmd_resv_register), \
+ nvme_opcode_name(nvme_cmd_resv_report), \
+ nvme_opcode_name(nvme_cmd_resv_acquire), \
+ nvme_opcode_name(nvme_cmd_resv_release), \
+ nvme_opcode_name(nvme_cmd_zone_mgmt_send), \
+ nvme_opcode_name(nvme_cmd_zone_mgmt_recv), \
+ nvme_opcode_name(nvme_cmd_zone_append))
+
+
/*
* Descriptor subtype - lower 4 bits of nvme_(keyed_)sgl_desc identifier
*
* @NVME_SGL_FMT_ADDRESS: absolute address of the data block
* @NVME_SGL_FMT_OFFSET: relative offset of the in-capsule data block
+ * @NVME_SGL_FMT_TRANSPORT_A: transport defined format, value 0xA
* @NVME_SGL_FMT_INVALIDATE: RDMA transport specific remote invalidation
* request subtype
*/
enum {
NVME_SGL_FMT_ADDRESS = 0x00,
NVME_SGL_FMT_OFFSET = 0x01,
+ NVME_SGL_FMT_TRANSPORT_A = 0x0A,
NVME_SGL_FMT_INVALIDATE = 0x0f,
};
@@ -470,12 +849,16 @@ enum {
*
* For struct nvme_keyed_sgl_desc:
* @NVME_KEY_SGL_FMT_DATA_DESC: keyed data block descriptor
+ *
+ * Transport-specific SGL types:
+ * @NVME_TRANSPORT_SGL_DATA_DESC: Transport SGL data dlock descriptor
*/
enum {
NVME_SGL_FMT_DATA_DESC = 0x00,
NVME_SGL_FMT_SEG_DESC = 0x02,
NVME_SGL_FMT_LAST_SEG_DESC = 0x03,
NVME_KEY_SGL_FMT_DATA_DESC = 0x04,
+ NVME_TRANSPORT_SGL_DATA_DESC = 0x05,
};
struct nvme_sgl_desc {
@@ -532,7 +915,14 @@ struct nvme_common_command {
__le32 cdw2[2];
__le64 metadata;
union nvme_data_ptr dptr;
- __le32 cdw10[6];
+ struct_group(cdws,
+ __le32 cdw10;
+ __le32 cdw11;
+ __le32 cdw12;
+ __le32 cdw13;
+ __le32 cdw14;
+ __le32 cdw15;
+ );
};
struct nvme_rw_command {
@@ -540,7 +930,8 @@ struct nvme_rw_command {
__u8 flags;
__u16 command_id;
__le32 nsid;
- __u64 rsvd2;
+ __le32 cdw2;
+ __le32 cdw3;
__le64 metadata;
union nvme_data_ptr dptr;
__le64 slba;
@@ -555,6 +946,7 @@ struct nvme_rw_command {
enum {
NVME_RW_LR = 1 << 15,
NVME_RW_FUA = 1 << 14,
+ NVME_RW_APPEND_PIREMAP = 1 << 9,
NVME_RW_DSM_FREQ_UNSPEC = 0,
NVME_RW_DSM_FREQ_TYPICAL = 1,
NVME_RW_DSM_FREQ_RARE = 2,
@@ -575,6 +967,7 @@ enum {
NVME_RW_PRINFO_PRCHK_GUARD = 1 << 12,
NVME_RW_PRINFO_PRACT = 1 << 13,
NVME_RW_DTYPE_STREAMS = 1 << 4,
+ NVME_WZ_DEAC = 1 << 9,
};
struct nvme_dsm_cmd {
@@ -620,8 +1013,68 @@ struct nvme_write_zeroes_cmd {
__le16 appmask;
};
+enum nvme_zone_mgmt_action {
+ NVME_ZONE_CLOSE = 0x1,
+ NVME_ZONE_FINISH = 0x2,
+ NVME_ZONE_OPEN = 0x3,
+ NVME_ZONE_RESET = 0x4,
+ NVME_ZONE_OFFLINE = 0x5,
+ NVME_ZONE_SET_DESC_EXT = 0x10,
+};
+
+struct nvme_zone_mgmt_send_cmd {
+ __u8 opcode;
+ __u8 flags;
+ __u16 command_id;
+ __le32 nsid;
+ __le32 cdw2[2];
+ __le64 metadata;
+ union nvme_data_ptr dptr;
+ __le64 slba;
+ __le32 cdw12;
+ __u8 zsa;
+ __u8 select_all;
+ __u8 rsvd13[2];
+ __le32 cdw14[2];
+};
+
+struct nvme_zone_mgmt_recv_cmd {
+ __u8 opcode;
+ __u8 flags;
+ __u16 command_id;
+ __le32 nsid;
+ __le64 rsvd2[2];
+ union nvme_data_ptr dptr;
+ __le64 slba;
+ __le32 numd;
+ __u8 zra;
+ __u8 zrasf;
+ __u8 pr;
+ __u8 rsvd13;
+ __le32 cdw14[2];
+};
+
+enum {
+ NVME_ZRA_ZONE_REPORT = 0,
+ NVME_ZRASF_ZONE_REPORT_ALL = 0,
+ NVME_ZRASF_ZONE_STATE_EMPTY = 0x01,
+ NVME_ZRASF_ZONE_STATE_IMP_OPEN = 0x02,
+ NVME_ZRASF_ZONE_STATE_EXP_OPEN = 0x03,
+ NVME_ZRASF_ZONE_STATE_CLOSED = 0x04,
+ NVME_ZRASF_ZONE_STATE_READONLY = 0x05,
+ NVME_ZRASF_ZONE_STATE_FULL = 0x06,
+ NVME_ZRASF_ZONE_STATE_OFFLINE = 0x07,
+ NVME_REPORT_ZONE_PARTIAL = 1,
+};
+
/* Features */
+enum {
+ NVME_TEMP_THRESH_MASK = 0xffff,
+ NVME_TEMP_THRESH_SELECT_SHIFT = 16,
+ NVME_TEMP_THRESH_TYPE_UNDER = 0x100000,
+};
+
struct nvme_feat_auto_pst {
__le64 entries[32];
};
@@ -631,6 +1084,18 @@ enum {
NVME_HOST_MEM_RETURN = (1 << 1),
};
+struct nvme_feat_host_behavior {
+ __u8 acre;
+ __u8 etdas;
+ __u8 lbafee;
+ __u8 resv1[509];
+};
+
+enum {
+ NVME_ENABLE_ACRE = 1,
+ NVME_ENABLE_LBAFEE = 1,
+};
+
/* Admin commands */
enum nvme_admin_opcode {
@@ -647,15 +1112,53 @@ enum nvme_admin_opcode {
nvme_admin_ns_mgmt = 0x0d,
nvme_admin_activate_fw = 0x10,
nvme_admin_download_fw = 0x11,
+ nvme_admin_dev_self_test = 0x14,
nvme_admin_ns_attach = 0x15,
nvme_admin_keep_alive = 0x18,
nvme_admin_directive_send = 0x19,
nvme_admin_directive_recv = 0x1a,
+ nvme_admin_virtual_mgmt = 0x1c,
+ nvme_admin_nvme_mi_send = 0x1d,
+ nvme_admin_nvme_mi_recv = 0x1e,
nvme_admin_dbbuf = 0x7C,
nvme_admin_format_nvm = 0x80,
nvme_admin_security_send = 0x81,
nvme_admin_security_recv = 0x82,
-};
+ nvme_admin_sanitize_nvm = 0x84,
+ nvme_admin_get_lba_status = 0x86,
+ nvme_admin_vendor_start = 0xC0,
+};
+
+#define nvme_admin_opcode_name(opcode) { opcode, #opcode }
+#define show_admin_opcode_name(val) \
+ __print_symbolic(val, \
+ nvme_admin_opcode_name(nvme_admin_delete_sq), \
+ nvme_admin_opcode_name(nvme_admin_create_sq), \
+ nvme_admin_opcode_name(nvme_admin_get_log_page), \
+ nvme_admin_opcode_name(nvme_admin_delete_cq), \
+ nvme_admin_opcode_name(nvme_admin_create_cq), \
+ nvme_admin_opcode_name(nvme_admin_identify), \
+ nvme_admin_opcode_name(nvme_admin_abort_cmd), \
+ nvme_admin_opcode_name(nvme_admin_set_features), \
+ nvme_admin_opcode_name(nvme_admin_get_features), \
+ nvme_admin_opcode_name(nvme_admin_async_event), \
+ nvme_admin_opcode_name(nvme_admin_ns_mgmt), \
+ nvme_admin_opcode_name(nvme_admin_activate_fw), \
+ nvme_admin_opcode_name(nvme_admin_download_fw), \
+ nvme_admin_opcode_name(nvme_admin_dev_self_test), \
+ nvme_admin_opcode_name(nvme_admin_ns_attach), \
+ nvme_admin_opcode_name(nvme_admin_keep_alive), \
+ nvme_admin_opcode_name(nvme_admin_directive_send), \
+ nvme_admin_opcode_name(nvme_admin_directive_recv), \
+ nvme_admin_opcode_name(nvme_admin_virtual_mgmt), \
+ nvme_admin_opcode_name(nvme_admin_nvme_mi_send), \
+ nvme_admin_opcode_name(nvme_admin_nvme_mi_recv), \
+ nvme_admin_opcode_name(nvme_admin_dbbuf), \
+ nvme_admin_opcode_name(nvme_admin_format_nvm), \
+ nvme_admin_opcode_name(nvme_admin_security_send), \
+ nvme_admin_opcode_name(nvme_admin_security_recv), \
+ nvme_admin_opcode_name(nvme_admin_sanitize_nvm), \
+ nvme_admin_opcode_name(nvme_admin_get_lba_status))
enum {
NVME_QUEUE_PHYS_CONTIG = (1 << 0),
@@ -677,14 +1180,32 @@ enum {
NVME_FEAT_ASYNC_EVENT = 0x0b,
NVME_FEAT_AUTO_PST = 0x0c,
NVME_FEAT_HOST_MEM_BUF = 0x0d,
+ NVME_FEAT_TIMESTAMP = 0x0e,
NVME_FEAT_KATO = 0x0f,
+ NVME_FEAT_HCTM = 0x10,
+ NVME_FEAT_NOPSC = 0x11,
+ NVME_FEAT_RRL = 0x12,
+ NVME_FEAT_PLM_CONFIG = 0x13,
+ NVME_FEAT_PLM_WINDOW = 0x14,
+ NVME_FEAT_HOST_BEHAVIOR = 0x16,
+ NVME_FEAT_SANITIZE = 0x17,
NVME_FEAT_SW_PROGRESS = 0x80,
NVME_FEAT_HOST_ID = 0x81,
NVME_FEAT_RESV_MASK = 0x82,
NVME_FEAT_RESV_PERSIST = 0x83,
+ NVME_FEAT_WRITE_PROTECT = 0x84,
+ NVME_FEAT_VENDOR_START = 0xC0,
+ NVME_FEAT_VENDOR_END = 0xFF,
NVME_LOG_ERROR = 0x01,
NVME_LOG_SMART = 0x02,
NVME_LOG_FW_SLOT = 0x03,
+ NVME_LOG_CHANGED_NS = 0x04,
+ NVME_LOG_CMD_EFFECTS = 0x05,
+ NVME_LOG_DEVICE_SELF_TEST = 0x06,
+ NVME_LOG_TELEMETRY_HOST = 0x07,
+ NVME_LOG_TELEMETRY_CTRL = 0x08,
+ NVME_LOG_ENDURANCE_GROUP = 0x09,
+ NVME_LOG_ANA = 0x0c,
NVME_LOG_DISC = 0x70,
NVME_LOG_RESERVATION = 0x80,
NVME_FWACT_REPL = (0 << 3),
@@ -692,6 +1213,16 @@ enum {
NVME_FWACT_ACTV = (2 << 3),
};
+/* NVMe Namespace Write Protect State */
+enum {
+ NVME_NS_NO_WRITE_PROTECT = 0,
+ NVME_NS_WRITE_PROTECT,
+ NVME_NS_WRITE_PROTECT_POWER_CYCLE,
+ NVME_NS_WRITE_PROTECT_PERMANENT,
+};
+
+#define NVME_MAX_CHANGED_NAMESPACES 1024
+
struct nvme_identify {
__u8 opcode;
__u8 flags;
@@ -702,7 +1233,9 @@ struct nvme_identify {
__u8 cns;
__u8 rsvd3;
__le16 ctrlid;
- __u32 rsvd11[5];
+ __u8 rsvd11[3];
+ __u8 csi;
+ __u32 rsvd12[4];
};
#define NVME_IDENTIFY_DATA_SIZE 4096
@@ -805,13 +1338,20 @@ struct nvme_get_log_page_command {
__u64 rsvd2[2];
union nvme_data_ptr dptr;
__u8 lid;
- __u8 rsvd10;
+ __u8 lsp; /* upper 4 bits reserved */
__le16 numdl;
__le16 numdu;
__u16 rsvd11;
- __le32 lpol;
- __le32 lpou;
- __u32 rsvd14[2];
+ union {
+ struct {
+ __le32 lpol;
+ __le32 lpou;
+ };
+ __le64 lpo;
+ };
+ __u8 rsvd14[3];
+ __u8 csi;
+ __u32 rsvd15;
};
struct nvme_directive_cmd {
@@ -843,8 +1383,29 @@ enum nvmf_capsule_command {
nvme_fabrics_type_property_set = 0x00,
nvme_fabrics_type_connect = 0x01,
nvme_fabrics_type_property_get = 0x04,
+ nvme_fabrics_type_auth_send = 0x05,
+ nvme_fabrics_type_auth_receive = 0x06,
};
+#define nvme_fabrics_type_name(type) { type, #type }
+#define show_fabrics_type_name(type) \
+ __print_symbolic(type, \
+ nvme_fabrics_type_name(nvme_fabrics_type_property_set), \
+ nvme_fabrics_type_name(nvme_fabrics_type_connect), \
+ nvme_fabrics_type_name(nvme_fabrics_type_property_get), \
+ nvme_fabrics_type_name(nvme_fabrics_type_auth_send), \
+ nvme_fabrics_type_name(nvme_fabrics_type_auth_receive))
+
+/*
+ * If not fabrics command, fctype will be ignored.
+ */
+#define show_opcode_name(qid, opcode, fctype) \
+ ((opcode) == nvme_fabrics_command ? \
+ show_fabrics_type_name(fctype) : \
+ ((qid) ? \
+ show_nvm_opcode_name(opcode) : \
+ show_admin_opcode_name(opcode)))
+
struct nvmf_common_command {
__u8 opcode;
__u8 resv1;
@@ -866,6 +1427,12 @@ struct nvmf_common_command {
#define MAX_DISC_LOGS 255
+/* Discovery log page entry flags (EFLAGS): */
+enum {
+ NVME_DISC_EFLAGS_EPCSD = (1 << 1),
+ NVME_DISC_EFLAGS_DUPRETINFO = (1 << 0),
+};
+
/* Discovery log page entry */
struct nvmf_disc_rsp_page_entry {
__u8 trtype;
@@ -875,7 +1442,8 @@ struct nvmf_disc_rsp_page_entry {
__le16 portid;
__le16 cntlid;
__le16 asqsz;
- __u8 resv8[22];
+ __le16 eflags;
+ __u8 resv10[20];
char trsvcid[NVMF_TRSVCID_SIZE];
__u8 resv64[192];
char subnqn[NVMF_NQN_FIELD_LEN];
@@ -899,7 +1467,11 @@ struct nvmf_disc_rsp_page_hdr {
__le64 numrec;
__le16 recfmt;
__u8 resv14[1006];
- struct nvmf_disc_rsp_page_entry entries[0];
+ struct nvmf_disc_rsp_page_entry entries[];
+};
+
+enum {
+ NVME_CONNECT_DISABLE_SQFLOW = (1 << 2),
};
struct nvmf_connect_command {
@@ -918,6 +1490,11 @@ struct nvmf_connect_command {
__u8 resv4[12];
};
+enum {
+ NVME_CONNECT_AUTHREQ_ASCR = (1U << 18),
+ NVME_CONNECT_AUTHREQ_ATR = (1U << 17),
+};
+
struct nvmf_connect_data {
uuid_t hostid;
__le16 cntlid;
@@ -952,6 +1529,200 @@ struct nvmf_property_get_command {
__u8 resv4[16];
};
+struct nvmf_auth_common_command {
+ __u8 opcode;
+ __u8 resv1;
+ __u16 command_id;
+ __u8 fctype;
+ __u8 resv2[19];
+ union nvme_data_ptr dptr;
+ __u8 resv3;
+ __u8 spsp0;
+ __u8 spsp1;
+ __u8 secp;
+ __le32 al_tl;
+ __u8 resv4[16];
+};
+
+struct nvmf_auth_send_command {
+ __u8 opcode;
+ __u8 resv1;
+ __u16 command_id;
+ __u8 fctype;
+ __u8 resv2[19];
+ union nvme_data_ptr dptr;
+ __u8 resv3;
+ __u8 spsp0;
+ __u8 spsp1;
+ __u8 secp;
+ __le32 tl;
+ __u8 resv4[16];
+};
+
+struct nvmf_auth_receive_command {
+ __u8 opcode;
+ __u8 resv1;
+ __u16 command_id;
+ __u8 fctype;
+ __u8 resv2[19];
+ union nvme_data_ptr dptr;
+ __u8 resv3;
+ __u8 spsp0;
+ __u8 spsp1;
+ __u8 secp;
+ __le32 al;
+ __u8 resv4[16];
+};
+
+/* Value for secp */
+enum {
+ NVME_AUTH_DHCHAP_PROTOCOL_IDENTIFIER = 0xe9,
+};
+
+/* Defined value for auth_type */
+enum {
+ NVME_AUTH_COMMON_MESSAGES = 0x00,
+ NVME_AUTH_DHCHAP_MESSAGES = 0x01,
+};
+
+/* Defined messages for auth_id */
+enum {
+ NVME_AUTH_DHCHAP_MESSAGE_NEGOTIATE = 0x00,
+ NVME_AUTH_DHCHAP_MESSAGE_CHALLENGE = 0x01,
+ NVME_AUTH_DHCHAP_MESSAGE_REPLY = 0x02,
+ NVME_AUTH_DHCHAP_MESSAGE_SUCCESS1 = 0x03,
+ NVME_AUTH_DHCHAP_MESSAGE_SUCCESS2 = 0x04,
+ NVME_AUTH_DHCHAP_MESSAGE_FAILURE2 = 0xf0,
+ NVME_AUTH_DHCHAP_MESSAGE_FAILURE1 = 0xf1,
+};
+
+struct nvmf_auth_dhchap_protocol_descriptor {
+ __u8 authid;
+ __u8 rsvd;
+ __u8 halen;
+ __u8 dhlen;
+ __u8 idlist[60];
+};
+
+enum {
+ NVME_AUTH_DHCHAP_AUTH_ID = 0x01,
+};
+
+/* Defined hash functions for DH-HMAC-CHAP authentication */
+enum {
+ NVME_AUTH_HASH_SHA256 = 0x01,
+ NVME_AUTH_HASH_SHA384 = 0x02,
+ NVME_AUTH_HASH_SHA512 = 0x03,
+ NVME_AUTH_HASH_INVALID = 0xff,
+};
+
+/* Defined Diffie-Hellman group identifiers for DH-HMAC-CHAP authentication */
+enum {
+ NVME_AUTH_DHGROUP_NULL = 0x00,
+ NVME_AUTH_DHGROUP_2048 = 0x01,
+ NVME_AUTH_DHGROUP_3072 = 0x02,
+ NVME_AUTH_DHGROUP_4096 = 0x03,
+ NVME_AUTH_DHGROUP_6144 = 0x04,
+ NVME_AUTH_DHGROUP_8192 = 0x05,
+ NVME_AUTH_DHGROUP_INVALID = 0xff,
+};
+
+union nvmf_auth_protocol {
+ struct nvmf_auth_dhchap_protocol_descriptor dhchap;
+};
+
+struct nvmf_auth_dhchap_negotiate_data {
+ __u8 auth_type;
+ __u8 auth_id;
+ __le16 rsvd;
+ __le16 t_id;
+ __u8 sc_c;
+ __u8 napd;
+ union nvmf_auth_protocol auth_protocol[];
+};
+
+struct nvmf_auth_dhchap_challenge_data {
+ __u8 auth_type;
+ __u8 auth_id;
+ __u16 rsvd1;
+ __le16 t_id;
+ __u8 hl;
+ __u8 rsvd2;
+ __u8 hashid;
+ __u8 dhgid;
+ __le16 dhvlen;
+ __le32 seqnum;
+ /* 'hl' bytes of challenge value */
+ __u8 cval[];
+ /* followed by 'dhvlen' bytes of DH value */
+};
+
+struct nvmf_auth_dhchap_reply_data {
+ __u8 auth_type;
+ __u8 auth_id;
+ __le16 rsvd1;
+ __le16 t_id;
+ __u8 hl;
+ __u8 rsvd2;
+ __u8 cvalid;
+ __u8 rsvd3;
+ __le16 dhvlen;
+ __le32 seqnum;
+ /* 'hl' bytes of response data */
+ __u8 rval[];
+ /* followed by 'hl' bytes of Challenge value */
+ /* followed by 'dhvlen' bytes of DH value */
+};
+
+enum {
+ NVME_AUTH_DHCHAP_RESPONSE_VALID = (1 << 0),
+};
+
+struct nvmf_auth_dhchap_success1_data {
+ __u8 auth_type;
+ __u8 auth_id;
+ __le16 rsvd1;
+ __le16 t_id;
+ __u8 hl;
+ __u8 rsvd2;
+ __u8 rvalid;
+ __u8 rsvd3[7];
+ /* 'hl' bytes of response value if 'rvalid' is set */
+ __u8 rval[];
+};
+
+struct nvmf_auth_dhchap_success2_data {
+ __u8 auth_type;
+ __u8 auth_id;
+ __le16 rsvd1;
+ __le16 t_id;
+ __u8 rsvd2[10];
+};
+
+struct nvmf_auth_dhchap_failure_data {
+ __u8 auth_type;
+ __u8 auth_id;
+ __le16 rsvd1;
+ __le16 t_id;
+ __u8 rescode;
+ __u8 rescode_exp;
+};
+
+enum {
+ NVME_AUTH_DHCHAP_FAILURE_REASON_FAILED = 0x01,
+};
+
+enum {
+ NVME_AUTH_DHCHAP_FAILURE_FAILED = 0x01,
+ NVME_AUTH_DHCHAP_FAILURE_NOT_USABLE = 0x02,
+ NVME_AUTH_DHCHAP_FAILURE_CONCAT_MISMATCH = 0x03,
+ NVME_AUTH_DHCHAP_FAILURE_HASH_UNUSABLE = 0x04,
+ NVME_AUTH_DHCHAP_FAILURE_DHGROUP_UNUSABLE = 0x05,
+ NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD = 0x06,
+ NVME_AUTH_DHCHAP_FAILURE_INCORRECT_MESSAGE = 0x07,
+};
+
+
struct nvme_dbbuf {
__u8 opcode;
__u8 flags;
@@ -963,14 +1734,14 @@ struct nvme_dbbuf {
};
struct streams_directive_params {
- __u16 msl;
- __u16 nssa;
- __u16 nsso;
+ __le16 msl;
+ __le16 nssa;
+ __le16 nsso;
__u8 rsvd[10];
- __u32 sws;
- __u16 sgs;
- __u16 nsa;
- __u16 nso;
+ __le32 sws;
+ __le16 sgs;
+ __le16 nsa;
+ __le16 nso;
__u8 rsvd2[6];
};
@@ -987,17 +1758,41 @@ struct nvme_command {
struct nvme_format_cmd format;
struct nvme_dsm_cmd dsm;
struct nvme_write_zeroes_cmd write_zeroes;
+ struct nvme_zone_mgmt_send_cmd zms;
+ struct nvme_zone_mgmt_recv_cmd zmr;
struct nvme_abort_cmd abort;
struct nvme_get_log_page_command get_log_page;
struct nvmf_common_command fabrics;
struct nvmf_connect_command connect;
struct nvmf_property_set_command prop_set;
struct nvmf_property_get_command prop_get;
+ struct nvmf_auth_common_command auth_common;
+ struct nvmf_auth_send_command auth_send;
+ struct nvmf_auth_receive_command auth_receive;
struct nvme_dbbuf dbbuf;
struct nvme_directive_cmd directive;
};
};
+static inline bool nvme_is_fabrics(struct nvme_command *cmd)
+{
+ return cmd->common.opcode == nvme_fabrics_command;
+}
+
+struct nvme_error_slot {
+ __le64 error_count;
+ __le16 sqid;
+ __le16 cmdid;
+ __le16 status_field;
+ __le16 param_error_location;
+ __le64 lba;
+ __le32 nsid;
+ __u8 vs;
+ __u8 resv[3];
+ __le64 cs;
+ __u8 resv2[24];
+};
+
static inline bool nvme_is_write(struct nvme_command *cmd)
{
/*
@@ -1005,8 +1800,8 @@ static inline bool nvme_is_write(struct nvme_command *cmd)
*
* Why can't we simply have a Fabrics In and Fabrics out command?
*/
- if (unlikely(cmd->common.opcode == nvme_fabrics_command))
- return cmd->fabrics.opcode & 1;
+ if (unlikely(nvme_is_fabrics(cmd)))
+ return cmd->fabrics.fctype & 1;
return cmd->common.opcode & 1;
}
@@ -1032,14 +1827,31 @@ enum {
NVME_SC_SGL_INVALID_DATA = 0xf,
NVME_SC_SGL_INVALID_METADATA = 0x10,
NVME_SC_SGL_INVALID_TYPE = 0x11,
-
+ NVME_SC_CMB_INVALID_USE = 0x12,
+ NVME_SC_PRP_INVALID_OFFSET = 0x13,
+ NVME_SC_ATOMIC_WU_EXCEEDED = 0x14,
+ NVME_SC_OP_DENIED = 0x15,
NVME_SC_SGL_INVALID_OFFSET = 0x16,
- NVME_SC_SGL_INVALID_SUBTYPE = 0x17,
+ NVME_SC_RESERVED = 0x17,
+ NVME_SC_HOST_ID_INCONSIST = 0x18,
+ NVME_SC_KA_TIMEOUT_EXPIRED = 0x19,
+ NVME_SC_KA_TIMEOUT_INVALID = 0x1A,
+ NVME_SC_ABORTED_PREEMPT_ABORT = 0x1B,
+ NVME_SC_SANITIZE_FAILED = 0x1C,
+ NVME_SC_SANITIZE_IN_PROGRESS = 0x1D,
+ NVME_SC_SGL_INVALID_GRANULARITY = 0x1E,
+ NVME_SC_CMD_NOT_SUP_CMB_QUEUE = 0x1F,
+ NVME_SC_NS_WRITE_PROTECTED = 0x20,
+ NVME_SC_CMD_INTERRUPTED = 0x21,
+ NVME_SC_TRANSIENT_TR_ERR = 0x22,
+ NVME_SC_ADMIN_COMMAND_MEDIA_NOT_READY = 0x24,
+ NVME_SC_INVALID_IO_CMD_SET = 0x2C,
NVME_SC_LBA_RANGE = 0x80,
NVME_SC_CAP_EXCEEDED = 0x81,
NVME_SC_NS_NOT_READY = 0x82,
NVME_SC_RESERVATION_CONFLICT = 0x83,
+ NVME_SC_FORMAT_IN_PROGRESS = 0x84,
/*
* Command Specific Status:
@@ -1063,15 +1875,24 @@ enum {
NVME_SC_FW_NEEDS_SUBSYS_RESET = 0x110,
NVME_SC_FW_NEEDS_RESET = 0x111,
NVME_SC_FW_NEEDS_MAX_TIME = 0x112,
- NVME_SC_FW_ACIVATE_PROHIBITED = 0x113,
+ NVME_SC_FW_ACTIVATE_PROHIBITED = 0x113,
NVME_SC_OVERLAPPING_RANGE = 0x114,
- NVME_SC_NS_INSUFFICENT_CAP = 0x115,
+ NVME_SC_NS_INSUFFICIENT_CAP = 0x115,
NVME_SC_NS_ID_UNAVAILABLE = 0x116,
NVME_SC_NS_ALREADY_ATTACHED = 0x118,
NVME_SC_NS_IS_PRIVATE = 0x119,
NVME_SC_NS_NOT_ATTACHED = 0x11a,
NVME_SC_THIN_PROV_NOT_SUPP = 0x11b,
NVME_SC_CTRL_LIST_INVALID = 0x11c,
+ NVME_SC_SELT_TEST_IN_PROGRESS = 0x11d,
+ NVME_SC_BP_WRITE_PROHIBITED = 0x11e,
+ NVME_SC_CTRL_ID_INVALID = 0x11f,
+ NVME_SC_SEC_CTRL_STATE_INVALID = 0x120,
+ NVME_SC_CTRL_RES_NUM_INVALID = 0x121,
+ NVME_SC_RES_ID_INVALID = 0x122,
+ NVME_SC_PMR_SAN_PROHIBITED = 0x123,
+ NVME_SC_ANA_GROUP_ID_INVALID = 0x124,
+ NVME_SC_ANA_ATTACH_FAILED = 0x125,
/*
* I/O Command Set Specific - NVM commands:
@@ -1094,6 +1915,18 @@ enum {
NVME_SC_AUTH_REQUIRED = 0x191,
/*
+ * I/O Command Set Specific - Zoned commands:
+ */
+ NVME_SC_ZONE_BOUNDARY_ERROR = 0x1b8,
+ NVME_SC_ZONE_FULL = 0x1b9,
+ NVME_SC_ZONE_READ_ONLY = 0x1ba,
+ NVME_SC_ZONE_OFFLINE = 0x1bb,
+ NVME_SC_ZONE_INVALID_WRITE = 0x1bc,
+ NVME_SC_ZONE_TOO_MANY_ACTIVE = 0x1bd,
+ NVME_SC_ZONE_TOO_MANY_OPEN = 0x1be,
+ NVME_SC_ZONE_INVALID_TRANSITION = 0x1bf,
+
+ /*
* Media and Data Integrity Errors:
*/
NVME_SC_WRITE_FAULT = 0x280,
@@ -1105,20 +1938,20 @@ enum {
NVME_SC_ACCESS_DENIED = 0x286,
NVME_SC_UNWRITTEN_BLOCK = 0x287,
- NVME_SC_DNR = 0x4000,
-
-
/*
- * FC Transport-specific error status values for NVME commands
- *
- * Transport-specific status code values must be in the range 0xB0..0xBF
+ * Path-related Errors:
*/
-
- /* Generic FC failure - catchall */
- NVME_SC_FC_TRANSPORT_ERROR = 0x00B0,
-
- /* I/O failure due to FC ABTS'd */
- NVME_SC_FC_TRANSPORT_ABORTED = 0x00B1,
+ NVME_SC_INTERNAL_PATH_ERROR = 0x300,
+ NVME_SC_ANA_PERSISTENT_LOSS = 0x301,
+ NVME_SC_ANA_INACCESSIBLE = 0x302,
+ NVME_SC_ANA_TRANSITION = 0x303,
+ NVME_SC_CTRL_PATH_ERROR = 0x360,
+ NVME_SC_HOST_PATH_ERROR = 0x370,
+ NVME_SC_HOST_ABORTED_CMD = 0x371,
+
+ NVME_SC_CRD = 0x1800,
+ NVME_SC_MORE = 0x2000,
+ NVME_SC_DNR = 0x4000,
};
struct nvme_completion {
diff --git a/include/linux/nvmem-consumer.h b/include/linux/nvmem-consumer.h
index c2256d746543..fa030d93b768 100644
--- a/include/linux/nvmem-consumer.h
+++ b/include/linux/nvmem-consumer.h
@@ -1,40 +1,67 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* nvmem framework consumer.
*
* Copyright (C) 2015 Srinivas Kandagatla <srinivas.kandagatla@linaro.org>
* Copyright (C) 2013 Maxime Ripard <maxime.ripard@free-electrons.com>
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
*/
#ifndef _LINUX_NVMEM_CONSUMER_H
#define _LINUX_NVMEM_CONSUMER_H
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/notifier.h>
+
struct device;
struct device_node;
/* consumer cookie */
struct nvmem_cell;
struct nvmem_device;
+struct nvmem_cell_info;
+
+/**
+ * struct nvmem_cell_lookup - cell lookup entry
+ *
+ * @nvmem_name: Name of the provider.
+ * @cell_name: Name of the nvmem cell as defined in the name field of
+ * struct nvmem_cell_info.
+ * @dev_id: Name of the consumer device that will be associated with
+ * this cell.
+ * @con_id: Connector id for this cell lookup.
+ */
+struct nvmem_cell_lookup {
+ const char *nvmem_name;
+ const char *cell_name;
+ const char *dev_id;
+ const char *con_id;
+ struct list_head node;
+};
-struct nvmem_cell_info {
- const char *name;
- unsigned int offset;
- unsigned int bytes;
- unsigned int bit_offset;
- unsigned int nbits;
+enum {
+ NVMEM_ADD = 1,
+ NVMEM_REMOVE,
+ NVMEM_CELL_ADD,
+ NVMEM_CELL_REMOVE,
};
#if IS_ENABLED(CONFIG_NVMEM)
/* Cell based interface */
-struct nvmem_cell *nvmem_cell_get(struct device *dev, const char *name);
-struct nvmem_cell *devm_nvmem_cell_get(struct device *dev, const char *name);
+struct nvmem_cell *nvmem_cell_get(struct device *dev, const char *id);
+struct nvmem_cell *devm_nvmem_cell_get(struct device *dev, const char *id);
void nvmem_cell_put(struct nvmem_cell *cell);
void devm_nvmem_cell_put(struct device *dev, struct nvmem_cell *cell);
void *nvmem_cell_read(struct nvmem_cell *cell, size_t *len);
int nvmem_cell_write(struct nvmem_cell *cell, void *buf, size_t len);
+int nvmem_cell_read_u8(struct device *dev, const char *cell_id, u8 *val);
+int nvmem_cell_read_u16(struct device *dev, const char *cell_id, u16 *val);
+int nvmem_cell_read_u32(struct device *dev, const char *cell_id, u32 *val);
+int nvmem_cell_read_u64(struct device *dev, const char *cell_id, u64 *val);
+int nvmem_cell_read_variable_le_u32(struct device *dev, const char *cell_id,
+ u32 *val);
+int nvmem_cell_read_variable_le_u64(struct device *dev, const char *cell_id,
+ u64 *val);
/* direct nvmem device read/write interface */
struct nvmem_device *nvmem_device_get(struct device *dev, const char *name);
@@ -51,18 +78,31 @@ ssize_t nvmem_device_cell_read(struct nvmem_device *nvmem,
int nvmem_device_cell_write(struct nvmem_device *nvmem,
struct nvmem_cell_info *info, void *buf);
+const char *nvmem_dev_name(struct nvmem_device *nvmem);
+
+void nvmem_add_cell_lookups(struct nvmem_cell_lookup *entries,
+ size_t nentries);
+void nvmem_del_cell_lookups(struct nvmem_cell_lookup *entries,
+ size_t nentries);
+
+int nvmem_register_notifier(struct notifier_block *nb);
+int nvmem_unregister_notifier(struct notifier_block *nb);
+
+struct nvmem_device *nvmem_device_find(void *data,
+ int (*match)(struct device *dev, const void *data));
+
#else
static inline struct nvmem_cell *nvmem_cell_get(struct device *dev,
- const char *name)
+ const char *id)
{
- return ERR_PTR(-ENOSYS);
+ return ERR_PTR(-EOPNOTSUPP);
}
static inline struct nvmem_cell *devm_nvmem_cell_get(struct device *dev,
- const char *name)
+ const char *id)
{
- return ERR_PTR(-ENOSYS);
+ return ERR_PTR(-EOPNOTSUPP);
}
static inline void devm_nvmem_cell_put(struct device *dev,
@@ -76,25 +116,57 @@ static inline void nvmem_cell_put(struct nvmem_cell *cell)
static inline void *nvmem_cell_read(struct nvmem_cell *cell, size_t *len)
{
- return ERR_PTR(-ENOSYS);
+ return ERR_PTR(-EOPNOTSUPP);
}
static inline int nvmem_cell_write(struct nvmem_cell *cell,
- const char *buf, size_t len)
+ void *buf, size_t len)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int nvmem_cell_read_u16(struct device *dev,
+ const char *cell_id, u16 *val)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int nvmem_cell_read_u32(struct device *dev,
+ const char *cell_id, u32 *val)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int nvmem_cell_read_u64(struct device *dev,
+ const char *cell_id, u64 *val)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int nvmem_cell_read_variable_le_u32(struct device *dev,
+ const char *cell_id,
+ u32 *val)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int nvmem_cell_read_variable_le_u64(struct device *dev,
+ const char *cell_id,
+ u64 *val)
{
- return -ENOSYS;
+ return -EOPNOTSUPP;
}
static inline struct nvmem_device *nvmem_device_get(struct device *dev,
const char *name)
{
- return ERR_PTR(-ENOSYS);
+ return ERR_PTR(-EOPNOTSUPP);
}
static inline struct nvmem_device *devm_nvmem_device_get(struct device *dev,
const char *name)
{
- return ERR_PTR(-ENOSYS);
+ return ERR_PTR(-EOPNOTSUPP);
}
static inline void nvmem_device_put(struct nvmem_device *nvmem)
@@ -110,47 +182,81 @@ static inline ssize_t nvmem_device_cell_read(struct nvmem_device *nvmem,
struct nvmem_cell_info *info,
void *buf)
{
- return -ENOSYS;
+ return -EOPNOTSUPP;
}
static inline int nvmem_device_cell_write(struct nvmem_device *nvmem,
struct nvmem_cell_info *info,
void *buf)
{
- return -ENOSYS;
+ return -EOPNOTSUPP;
}
static inline int nvmem_device_read(struct nvmem_device *nvmem,
unsigned int offset, size_t bytes,
void *buf)
{
- return -ENOSYS;
+ return -EOPNOTSUPP;
}
static inline int nvmem_device_write(struct nvmem_device *nvmem,
unsigned int offset, size_t bytes,
void *buf)
{
- return -ENOSYS;
+ return -EOPNOTSUPP;
+}
+
+static inline const char *nvmem_dev_name(struct nvmem_device *nvmem)
+{
+ return NULL;
+}
+
+static inline void
+nvmem_add_cell_lookups(struct nvmem_cell_lookup *entries, size_t nentries) {}
+static inline void
+nvmem_del_cell_lookups(struct nvmem_cell_lookup *entries, size_t nentries) {}
+
+static inline int nvmem_register_notifier(struct notifier_block *nb)
+{
+ return -EOPNOTSUPP;
}
+
+static inline int nvmem_unregister_notifier(struct notifier_block *nb)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline struct nvmem_device *nvmem_device_find(void *data,
+ int (*match)(struct device *dev, const void *data))
+{
+ return NULL;
+}
+
#endif /* CONFIG_NVMEM */
#if IS_ENABLED(CONFIG_NVMEM) && IS_ENABLED(CONFIG_OF)
struct nvmem_cell *of_nvmem_cell_get(struct device_node *np,
- const char *name);
+ const char *id);
struct nvmem_device *of_nvmem_device_get(struct device_node *np,
const char *name);
+struct device_node *of_nvmem_layout_get_container(struct nvmem_device *nvmem);
#else
static inline struct nvmem_cell *of_nvmem_cell_get(struct device_node *np,
- const char *name)
+ const char *id)
{
- return ERR_PTR(-ENOSYS);
+ return ERR_PTR(-EOPNOTSUPP);
}
static inline struct nvmem_device *of_nvmem_device_get(struct device_node *np,
const char *name)
{
- return ERR_PTR(-ENOSYS);
+ return ERR_PTR(-EOPNOTSUPP);
+}
+
+static inline struct device_node *
+of_nvmem_layout_get_container(struct nvmem_device *nvmem)
+{
+ return ERR_PTR(-EOPNOTSUPP);
}
#endif /* CONFIG_NVMEM && CONFIG_OF */
diff --git a/include/linux/nvmem-provider.h b/include/linux/nvmem-provider.h
index 497706f5adca..dae26295e6be 100644
--- a/include/linux/nvmem-provider.h
+++ b/include/linux/nvmem-provider.h
@@ -1,27 +1,110 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* nvmem framework provider.
*
* Copyright (C) 2015 Srinivas Kandagatla <srinivas.kandagatla@linaro.org>
* Copyright (C) 2013 Maxime Ripard <maxime.ripard@free-electrons.com>
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
*/
#ifndef _LINUX_NVMEM_PROVIDER_H
#define _LINUX_NVMEM_PROVIDER_H
+#include <linux/device/driver.h>
#include <linux/err.h>
#include <linux/errno.h>
+#include <linux/gpio/consumer.h>
struct nvmem_device;
-struct nvmem_cell_info;
typedef int (*nvmem_reg_read_t)(void *priv, unsigned int offset,
void *val, size_t bytes);
typedef int (*nvmem_reg_write_t)(void *priv, unsigned int offset,
void *val, size_t bytes);
+/* used for vendor specific post processing of cell data */
+typedef int (*nvmem_cell_post_process_t)(void *priv, const char *id, int index,
+ unsigned int offset, void *buf,
+ size_t bytes);
+
+enum nvmem_type {
+ NVMEM_TYPE_UNKNOWN = 0,
+ NVMEM_TYPE_EEPROM,
+ NVMEM_TYPE_OTP,
+ NVMEM_TYPE_BATTERY_BACKED,
+ NVMEM_TYPE_FRAM,
+};
+
+#define NVMEM_DEVID_NONE (-1)
+#define NVMEM_DEVID_AUTO (-2)
+
+/**
+ * struct nvmem_keepout - NVMEM register keepout range.
+ *
+ * @start: The first byte offset to avoid.
+ * @end: One beyond the last byte offset to avoid.
+ * @value: The byte to fill reads with for this region.
+ */
+struct nvmem_keepout {
+ unsigned int start;
+ unsigned int end;
+ unsigned char value;
+};
+
+/**
+ * struct nvmem_cell_info - NVMEM cell description
+ * @name: Name.
+ * @offset: Offset within the NVMEM device.
+ * @raw_len: Length of raw data (without post processing).
+ * @bytes: Length of the cell.
+ * @bit_offset: Bit offset if cell is smaller than a byte.
+ * @nbits: Number of bits.
+ * @np: Optional device_node pointer.
+ * @read_post_process: Callback for optional post processing of cell data
+ * on reads.
+ * @priv: Opaque data passed to the read_post_process hook.
+ */
+struct nvmem_cell_info {
+ const char *name;
+ unsigned int offset;
+ size_t raw_len;
+ unsigned int bytes;
+ unsigned int bit_offset;
+ unsigned int nbits;
+ struct device_node *np;
+ nvmem_cell_post_process_t read_post_process;
+ void *priv;
+};
+/**
+ * struct nvmem_config - NVMEM device configuration
+ *
+ * @dev: Parent device.
+ * @name: Optional name.
+ * @id: Optional device ID used in full name. Ignored if name is NULL.
+ * @owner: Pointer to exporter module. Used for refcounting.
+ * @cells: Optional array of pre-defined NVMEM cells.
+ * @ncells: Number of elements in cells.
+ * @keepout: Optional array of keepout ranges (sorted ascending by start).
+ * @nkeepout: Number of elements in the keepout array.
+ * @type: Type of the nvmem storage
+ * @read_only: Device is read-only.
+ * @root_only: Device is accessibly to root only.
+ * @of_node: If given, this will be used instead of the parent's of_node.
+ * @no_of_node: Device should not use the parent's of_node even if it's !NULL.
+ * @reg_read: Callback to read data.
+ * @reg_write: Callback to write data.
+ * @size: Device size.
+ * @word_size: Minimum read/write access granularity.
+ * @stride: Minimum read/write access stride.
+ * @priv: User context passed to read/write callbacks.
+ * @ignore_wp: Write Protect pin is managed by the provider.
+ * @layout: Fixed layout associated with this nvmem device.
+ *
+ * Note: A default "nvmem<id>" name will be assigned to the device if
+ * no name is specified in its configuration. In such case "<id>" is
+ * generated with ida_simple_get() and provided id field is ignored.
+ *
+ * Note: Specifying name and setting id to -1 implies a unique device
+ * whose name is provided as-is (kept unaltered).
+ */
struct nvmem_config {
struct device *dev;
const char *name;
@@ -29,8 +112,15 @@ struct nvmem_config {
struct module *owner;
const struct nvmem_cell_info *cells;
int ncells;
+ const struct nvmem_keepout *keepout;
+ unsigned int nkeepout;
+ enum nvmem_type type;
bool read_only;
bool root_only;
+ bool ignore_wp;
+ struct nvmem_layout *layout;
+ struct device_node *of_node;
+ bool no_of_node;
nvmem_reg_read_t reg_read;
nvmem_reg_write_t reg_write;
int size;
@@ -42,22 +132,120 @@ struct nvmem_config {
struct device *base_dev;
};
+/**
+ * struct nvmem_cell_table - NVMEM cell definitions for given provider
+ *
+ * @nvmem_name: Provider name.
+ * @cells: Array of cell definitions.
+ * @ncells: Number of cell definitions in the array.
+ * @node: List node.
+ *
+ * This structure together with related helper functions is provided for users
+ * that don't can't access the nvmem provided structure but wish to register
+ * cell definitions for it e.g. board files registering an EEPROM device.
+ */
+struct nvmem_cell_table {
+ const char *nvmem_name;
+ const struct nvmem_cell_info *cells;
+ size_t ncells;
+ struct list_head node;
+};
+
+/**
+ * struct nvmem_layout - NVMEM layout definitions
+ *
+ * @name: Layout name.
+ * @of_match_table: Open firmware match table.
+ * @add_cells: Will be called if a nvmem device is found which
+ * has this layout. The function will add layout
+ * specific cells with nvmem_add_one_cell().
+ * @fixup_cell_info: Will be called before a cell is added. Can be
+ * used to modify the nvmem_cell_info.
+ * @owner: Pointer to struct module.
+ * @node: List node.
+ *
+ * A nvmem device can hold a well defined structure which can just be
+ * evaluated during runtime. For example a TLV list, or a list of "name=val"
+ * pairs. A nvmem layout can parse the nvmem device and add appropriate
+ * cells.
+ */
+struct nvmem_layout {
+ const char *name;
+ const struct of_device_id *of_match_table;
+ int (*add_cells)(struct device *dev, struct nvmem_device *nvmem,
+ struct nvmem_layout *layout);
+ void (*fixup_cell_info)(struct nvmem_device *nvmem,
+ struct nvmem_layout *layout,
+ struct nvmem_cell_info *cell);
+
+ /* private */
+ struct module *owner;
+ struct list_head node;
+};
+
#if IS_ENABLED(CONFIG_NVMEM)
struct nvmem_device *nvmem_register(const struct nvmem_config *cfg);
-int nvmem_unregister(struct nvmem_device *nvmem);
+void nvmem_unregister(struct nvmem_device *nvmem);
+
+struct nvmem_device *devm_nvmem_register(struct device *dev,
+ const struct nvmem_config *cfg);
+
+void nvmem_add_cell_table(struct nvmem_cell_table *table);
+void nvmem_del_cell_table(struct nvmem_cell_table *table);
+
+int nvmem_add_one_cell(struct nvmem_device *nvmem,
+ const struct nvmem_cell_info *info);
+
+int __nvmem_layout_register(struct nvmem_layout *layout, struct module *owner);
+#define nvmem_layout_register(layout) \
+ __nvmem_layout_register(layout, THIS_MODULE)
+void nvmem_layout_unregister(struct nvmem_layout *layout);
+
+const void *nvmem_layout_get_match_data(struct nvmem_device *nvmem,
+ struct nvmem_layout *layout);
#else
static inline struct nvmem_device *nvmem_register(const struct nvmem_config *c)
{
- return ERR_PTR(-ENOSYS);
+ return ERR_PTR(-EOPNOTSUPP);
}
-static inline int nvmem_unregister(struct nvmem_device *nvmem)
+static inline void nvmem_unregister(struct nvmem_device *nvmem) {}
+
+static inline struct nvmem_device *
+devm_nvmem_register(struct device *dev, const struct nvmem_config *c)
{
- return -ENOSYS;
+ return nvmem_register(c);
+}
+
+static inline void nvmem_add_cell_table(struct nvmem_cell_table *table) {}
+static inline void nvmem_del_cell_table(struct nvmem_cell_table *table) {}
+static inline int nvmem_add_one_cell(struct nvmem_device *nvmem,
+ const struct nvmem_cell_info *info)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int nvmem_layout_register(struct nvmem_layout *layout)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline void nvmem_layout_unregister(struct nvmem_layout *layout) {}
+
+static inline const void *
+nvmem_layout_get_match_data(struct nvmem_device *nvmem,
+ struct nvmem_layout *layout)
+{
+ return NULL;
}
#endif /* CONFIG_NVMEM */
+
+#define module_nvmem_layout_driver(__layout_driver) \
+ module_driver(__layout_driver, nvmem_layout_register, \
+ nvmem_layout_unregister)
+
#endif /* ifndef _LINUX_NVMEM_PROVIDER_H */
diff --git a/include/linux/nvram.h b/include/linux/nvram.h
index cf0ff555a6ac..d29d9c93a927 100644
--- a/include/linux/nvram.h
+++ b/include/linux/nvram.h
@@ -1,13 +1,133 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_NVRAM_H
#define _LINUX_NVRAM_H
+#include <linux/errno.h>
#include <uapi/linux/nvram.h>
-/* __foo is foo without grabbing the rtc_lock - get it yourself */
-extern unsigned char __nvram_read_byte(int i);
-extern unsigned char nvram_read_byte(int i);
-extern void __nvram_write_byte(unsigned char c, int i);
-extern void nvram_write_byte(unsigned char c, int i);
-extern int __nvram_check_checksum(void);
-extern int nvram_check_checksum(void);
+#ifdef CONFIG_PPC
+#include <asm/machdep.h>
+#endif
+
+/**
+ * struct nvram_ops - NVRAM functionality made available to drivers
+ * @read: validate checksum (if any) then load a range of bytes from NVRAM
+ * @write: store a range of bytes to NVRAM then update checksum (if any)
+ * @read_byte: load a single byte from NVRAM
+ * @write_byte: store a single byte to NVRAM
+ * @get_size: return the fixed number of bytes in the NVRAM
+ *
+ * Architectures which provide an nvram ops struct need not implement all
+ * of these methods. If the NVRAM hardware can be accessed only one byte
+ * at a time then it may be sufficient to provide .read_byte and .write_byte.
+ * If the NVRAM has a checksum (and it is to be checked) the .read and
+ * .write methods can be used to implement that efficiently.
+ *
+ * Portable drivers may use the wrapper functions defined here.
+ * The nvram_read() and nvram_write() functions call the .read and .write
+ * methods when available and fall back on the .read_byte and .write_byte
+ * methods otherwise.
+ */
+
+struct nvram_ops {
+ ssize_t (*get_size)(void);
+ unsigned char (*read_byte)(int);
+ void (*write_byte)(unsigned char, int);
+ ssize_t (*read)(char *, size_t, loff_t *);
+ ssize_t (*write)(char *, size_t, loff_t *);
+#if defined(CONFIG_X86) || defined(CONFIG_M68K)
+ long (*initialize)(void);
+ long (*set_checksum)(void);
+#endif
+};
+
+extern const struct nvram_ops arch_nvram_ops;
+
+static inline ssize_t nvram_get_size(void)
+{
+#ifdef CONFIG_PPC
+ if (ppc_md.nvram_size)
+ return ppc_md.nvram_size();
+#else
+ if (arch_nvram_ops.get_size)
+ return arch_nvram_ops.get_size();
+#endif
+ return -ENODEV;
+}
+
+static inline unsigned char nvram_read_byte(int addr)
+{
+#ifdef CONFIG_PPC
+ if (ppc_md.nvram_read_val)
+ return ppc_md.nvram_read_val(addr);
+#else
+ if (arch_nvram_ops.read_byte)
+ return arch_nvram_ops.read_byte(addr);
+#endif
+ return 0xFF;
+}
+
+static inline void nvram_write_byte(unsigned char val, int addr)
+{
+#ifdef CONFIG_PPC
+ if (ppc_md.nvram_write_val)
+ ppc_md.nvram_write_val(addr, val);
+#else
+ if (arch_nvram_ops.write_byte)
+ arch_nvram_ops.write_byte(val, addr);
+#endif
+}
+
+static inline ssize_t nvram_read_bytes(char *buf, size_t count, loff_t *ppos)
+{
+ ssize_t nvram_size = nvram_get_size();
+ loff_t i;
+ char *p = buf;
+
+ if (nvram_size < 0)
+ return nvram_size;
+ for (i = *ppos; count > 0 && i < nvram_size; ++i, ++p, --count)
+ *p = nvram_read_byte(i);
+ *ppos = i;
+ return p - buf;
+}
+
+static inline ssize_t nvram_write_bytes(char *buf, size_t count, loff_t *ppos)
+{
+ ssize_t nvram_size = nvram_get_size();
+ loff_t i;
+ char *p = buf;
+
+ if (nvram_size < 0)
+ return nvram_size;
+ for (i = *ppos; count > 0 && i < nvram_size; ++i, ++p, --count)
+ nvram_write_byte(*p, i);
+ *ppos = i;
+ return p - buf;
+}
+
+static inline ssize_t nvram_read(char *buf, size_t count, loff_t *ppos)
+{
+#ifdef CONFIG_PPC
+ if (ppc_md.nvram_read)
+ return ppc_md.nvram_read(buf, count, ppos);
+#else
+ if (arch_nvram_ops.read)
+ return arch_nvram_ops.read(buf, count, ppos);
+#endif
+ return nvram_read_bytes(buf, count, ppos);
+}
+
+static inline ssize_t nvram_write(char *buf, size_t count, loff_t *ppos)
+{
+#ifdef CONFIG_PPC
+ if (ppc_md.nvram_write)
+ return ppc_md.nvram_write(buf, count, ppos);
+#else
+ if (arch_nvram_ops.write)
+ return arch_nvram_ops.write(buf, count, ppos);
+#endif
+ return nvram_write_bytes(buf, count, ppos);
+}
+
#endif /* _LINUX_NVRAM_H */
diff --git a/include/linux/objagg.h b/include/linux/objagg.h
new file mode 100644
index 000000000000..78021777df46
--- /dev/null
+++ b/include/linux/objagg.h
@@ -0,0 +1,63 @@
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */
+/* Copyright (c) 2018 Mellanox Technologies. All rights reserved */
+
+#ifndef _OBJAGG_H
+#define _OBJAGG_H
+
+struct objagg_ops {
+ size_t obj_size;
+ bool (*delta_check)(void *priv, const void *parent_obj,
+ const void *obj);
+ int (*hints_obj_cmp)(const void *obj1, const void *obj2);
+ void * (*delta_create)(void *priv, void *parent_obj, void *obj);
+ void (*delta_destroy)(void *priv, void *delta_priv);
+ void * (*root_create)(void *priv, void *obj, unsigned int root_id);
+#define OBJAGG_OBJ_ROOT_ID_INVALID UINT_MAX
+ void (*root_destroy)(void *priv, void *root_priv);
+};
+
+struct objagg;
+struct objagg_obj;
+struct objagg_hints;
+
+const void *objagg_obj_root_priv(const struct objagg_obj *objagg_obj);
+const void *objagg_obj_delta_priv(const struct objagg_obj *objagg_obj);
+const void *objagg_obj_raw(const struct objagg_obj *objagg_obj);
+
+struct objagg_obj *objagg_obj_get(struct objagg *objagg, void *obj);
+void objagg_obj_put(struct objagg *objagg, struct objagg_obj *objagg_obj);
+struct objagg *objagg_create(const struct objagg_ops *ops,
+ struct objagg_hints *hints, void *priv);
+void objagg_destroy(struct objagg *objagg);
+
+struct objagg_obj_stats {
+ unsigned int user_count;
+ unsigned int delta_user_count; /* includes delta object users */
+};
+
+struct objagg_obj_stats_info {
+ struct objagg_obj_stats stats;
+ struct objagg_obj *objagg_obj; /* associated object */
+ bool is_root;
+};
+
+struct objagg_stats {
+ unsigned int root_count;
+ unsigned int stats_info_count;
+ struct objagg_obj_stats_info stats_info[];
+};
+
+const struct objagg_stats *objagg_stats_get(struct objagg *objagg);
+void objagg_stats_put(const struct objagg_stats *objagg_stats);
+
+enum objagg_opt_algo_type {
+ OBJAGG_OPT_ALGO_SIMPLE_GREEDY,
+};
+
+struct objagg_hints *objagg_hints_get(struct objagg *objagg,
+ enum objagg_opt_algo_type opt_algo_type);
+void objagg_hints_put(struct objagg_hints *objagg_hints);
+const struct objagg_stats *
+objagg_hints_stats_get(struct objagg_hints *objagg_hints);
+
+#endif
diff --git a/include/linux/objtool.h b/include/linux/objtool.h
new file mode 100644
index 000000000000..03f82c2c2ebf
--- /dev/null
+++ b/include/linux/objtool.h
@@ -0,0 +1,173 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_OBJTOOL_H
+#define _LINUX_OBJTOOL_H
+
+#include <linux/objtool_types.h>
+
+#ifdef CONFIG_OBJTOOL
+
+#include <asm/asm.h>
+
+#ifndef __ASSEMBLY__
+
+#define UNWIND_HINT(type, sp_reg, sp_offset, signal) \
+ "987: \n\t" \
+ ".pushsection .discard.unwind_hints\n\t" \
+ /* struct unwind_hint */ \
+ ".long 987b - .\n\t" \
+ ".short " __stringify(sp_offset) "\n\t" \
+ ".byte " __stringify(sp_reg) "\n\t" \
+ ".byte " __stringify(type) "\n\t" \
+ ".byte " __stringify(signal) "\n\t" \
+ ".balign 4 \n\t" \
+ ".popsection\n\t"
+
+/*
+ * This macro marks the given function's stack frame as "non-standard", which
+ * tells objtool to ignore the function when doing stack metadata validation.
+ * It should only be used in special cases where you're 100% sure it won't
+ * affect the reliability of frame pointers and kernel stack traces.
+ *
+ * For more information, see tools/objtool/Documentation/objtool.txt.
+ */
+#define STACK_FRAME_NON_STANDARD(func) \
+ static void __used __section(".discard.func_stack_frame_non_standard") \
+ *__func_stack_frame_non_standard_##func = func
+
+/*
+ * STACK_FRAME_NON_STANDARD_FP() is a frame-pointer-specific function ignore
+ * for the case where a function is intentionally missing frame pointer setup,
+ * but otherwise needs objtool/ORC coverage when frame pointers are disabled.
+ */
+#ifdef CONFIG_FRAME_POINTER
+#define STACK_FRAME_NON_STANDARD_FP(func) STACK_FRAME_NON_STANDARD(func)
+#else
+#define STACK_FRAME_NON_STANDARD_FP(func)
+#endif
+
+#define ANNOTATE_NOENDBR \
+ "986: \n\t" \
+ ".pushsection .discard.noendbr\n\t" \
+ ".long 986b - .\n\t" \
+ ".popsection\n\t"
+
+#define ASM_REACHABLE \
+ "998:\n\t" \
+ ".pushsection .discard.reachable\n\t" \
+ ".long 998b - .\n\t" \
+ ".popsection\n\t"
+
+#else /* __ASSEMBLY__ */
+
+/*
+ * This macro indicates that the following intra-function call is valid.
+ * Any non-annotated intra-function call will cause objtool to issue a warning.
+ */
+#define ANNOTATE_INTRA_FUNCTION_CALL \
+ 999: \
+ .pushsection .discard.intra_function_calls; \
+ .long 999b - .; \
+ .popsection;
+
+/*
+ * In asm, there are two kinds of code: normal C-type callable functions and
+ * the rest. The normal callable functions can be called by other code, and
+ * don't do anything unusual with the stack. Such normal callable functions
+ * are annotated with the ENTRY/ENDPROC macros. Most asm code falls in this
+ * category. In this case, no special debugging annotations are needed because
+ * objtool can automatically generate the ORC data for the ORC unwinder to read
+ * at runtime.
+ *
+ * Anything which doesn't fall into the above category, such as syscall and
+ * interrupt handlers, tends to not be called directly by other functions, and
+ * often does unusual non-C-function-type things with the stack pointer. Such
+ * code needs to be annotated such that objtool can understand it. The
+ * following CFI hint macros are for this type of code.
+ *
+ * These macros provide hints to objtool about the state of the stack at each
+ * instruction. Objtool starts from the hints and follows the code flow,
+ * making automatic CFI adjustments when it sees pushes and pops, filling out
+ * the debuginfo as necessary. It will also warn if it sees any
+ * inconsistencies.
+ */
+.macro UNWIND_HINT type:req sp_reg=0 sp_offset=0 signal=0
+.Lhere_\@:
+ .pushsection .discard.unwind_hints
+ /* struct unwind_hint */
+ .long .Lhere_\@ - .
+ .short \sp_offset
+ .byte \sp_reg
+ .byte \type
+ .byte \signal
+ .balign 4
+ .popsection
+.endm
+
+.macro STACK_FRAME_NON_STANDARD func:req
+ .pushsection .discard.func_stack_frame_non_standard, "aw"
+ .long \func - .
+ .popsection
+.endm
+
+.macro STACK_FRAME_NON_STANDARD_FP func:req
+#ifdef CONFIG_FRAME_POINTER
+ STACK_FRAME_NON_STANDARD \func
+#endif
+.endm
+
+.macro ANNOTATE_NOENDBR
+.Lhere_\@:
+ .pushsection .discard.noendbr
+ .long .Lhere_\@ - .
+ .popsection
+.endm
+
+/*
+ * Use objtool to validate the entry requirement that all code paths do
+ * VALIDATE_UNRET_END before RET.
+ *
+ * NOTE: The macro must be used at the beginning of a global symbol, otherwise
+ * it will be ignored.
+ */
+.macro VALIDATE_UNRET_BEGIN
+#if defined(CONFIG_NOINSTR_VALIDATION) && defined(CONFIG_CPU_UNRET_ENTRY)
+.Lhere_\@:
+ .pushsection .discard.validate_unret
+ .long .Lhere_\@ - .
+ .popsection
+#endif
+.endm
+
+.macro REACHABLE
+.Lhere_\@:
+ .pushsection .discard.reachable
+ .long .Lhere_\@ - .
+ .popsection
+.endm
+
+#endif /* __ASSEMBLY__ */
+
+#else /* !CONFIG_OBJTOOL */
+
+#ifndef __ASSEMBLY__
+
+#define UNWIND_HINT(type, sp_reg, sp_offset, signal) "\n\t"
+#define STACK_FRAME_NON_STANDARD(func)
+#define STACK_FRAME_NON_STANDARD_FP(func)
+#define ANNOTATE_NOENDBR
+#define ASM_REACHABLE
+#else
+#define ANNOTATE_INTRA_FUNCTION_CALL
+.macro UNWIND_HINT type:req sp_reg=0 sp_offset=0 signal=0
+.endm
+.macro STACK_FRAME_NON_STANDARD func:req
+.endm
+.macro ANNOTATE_NOENDBR
+.endm
+.macro REACHABLE
+.endm
+#endif
+
+#endif /* CONFIG_OBJTOOL */
+
+#endif /* _LINUX_OBJTOOL_H */
diff --git a/include/linux/objtool_types.h b/include/linux/objtool_types.h
new file mode 100644
index 000000000000..453a4f4ef39d
--- /dev/null
+++ b/include/linux/objtool_types.h
@@ -0,0 +1,57 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_OBJTOOL_TYPES_H
+#define _LINUX_OBJTOOL_TYPES_H
+
+#ifndef __ASSEMBLY__
+
+#include <linux/types.h>
+
+/*
+ * This struct is used by asm and inline asm code to manually annotate the
+ * location of registers on the stack.
+ */
+struct unwind_hint {
+ u32 ip;
+ s16 sp_offset;
+ u8 sp_reg;
+ u8 type;
+ u8 signal;
+};
+
+#endif /* __ASSEMBLY__ */
+
+/*
+ * UNWIND_HINT_TYPE_UNDEFINED: A blind spot in ORC coverage which can result in
+ * a truncated and unreliable stack unwind.
+ *
+ * UNWIND_HINT_TYPE_END_OF_STACK: The end of the kernel stack unwind before
+ * hitting user entry, boot code, or fork entry (when there are no pt_regs
+ * available).
+ *
+ * UNWIND_HINT_TYPE_CALL: Indicates that sp_reg+sp_offset resolves to PREV_SP
+ * (the caller's SP right before it made the call). Used for all callable
+ * functions, i.e. all C code and all callable asm functions.
+ *
+ * UNWIND_HINT_TYPE_REGS: Used in entry code to indicate that sp_reg+sp_offset
+ * points to a fully populated pt_regs from a syscall, interrupt, or exception.
+ *
+ * UNWIND_HINT_TYPE_REGS_PARTIAL: Used in entry code to indicate that
+ * sp_reg+sp_offset points to the iret return frame.
+ *
+ * UNWIND_HINT_TYPE_FUNC: Generate the unwind metadata of a callable function.
+ * Useful for code which doesn't have an ELF function annotation.
+ *
+ * UNWIND_HINT_TYPE_{SAVE,RESTORE}: Save the unwind metadata at a certain
+ * location so that it can be restored later.
+ */
+#define UNWIND_HINT_TYPE_UNDEFINED 0
+#define UNWIND_HINT_TYPE_END_OF_STACK 1
+#define UNWIND_HINT_TYPE_CALL 2
+#define UNWIND_HINT_TYPE_REGS 3
+#define UNWIND_HINT_TYPE_REGS_PARTIAL 4
+/* The below hint types don't have corresponding ORC types */
+#define UNWIND_HINT_TYPE_FUNC 5
+#define UNWIND_HINT_TYPE_SAVE 6
+#define UNWIND_HINT_TYPE_RESTORE 7
+
+#endif /* _LINUX_OBJTOOL_TYPES_H */
diff --git a/include/linux/of.h b/include/linux/of.h
index 4a8a70916237..6ecde0515677 100644
--- a/include/linux/of.h
+++ b/include/linux/of.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
#ifndef _LINUX_OF_H
#define _LINUX_OF_H
/*
@@ -9,25 +10,16 @@
* Updates for PPC64 by Peter Bergner & David Engebretsen, IBM Corp.
* Updates for SPARC64 by David S. Miller
* Derived from PowerPC and Sparc prom.h files by Stephen Rothwell, IBM Corp.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
*/
#include <linux/types.h>
#include <linux/bitops.h>
#include <linux/errno.h>
#include <linux/kobject.h>
#include <linux/mod_devicetable.h>
-#include <linux/spinlock.h>
-#include <linux/topology.h>
-#include <linux/notifier.h>
#include <linux/property.h>
#include <linux/list.h>
#include <asm/byteorder.h>
-#include <asm/errno.h>
typedef u32 phandle;
typedef u32 ihandle;
@@ -37,9 +29,15 @@ struct property {
int length;
void *value;
struct property *next;
+#if defined(CONFIG_OF_DYNAMIC) || defined(CONFIG_SPARC)
unsigned long _flags;
+#endif
+#if defined(CONFIG_OF_PROMTREE)
unsigned int unique_id;
+#endif
+#if defined(CONFIG_OF_KOBJ)
struct bin_attribute attr;
+#endif
};
#if defined(CONFIG_SPARC)
@@ -48,7 +46,6 @@ struct of_irq_controller;
struct device_node {
const char *name;
- const char *type;
phandle phandle;
const char *full_name;
struct fwnode_handle fwnode;
@@ -58,11 +55,12 @@ struct device_node {
struct device_node *parent;
struct device_node *child;
struct device_node *sibling;
+#if defined(CONFIG_OF_KOBJ)
struct kobject kobj;
+#endif
unsigned long _flags;
void *data;
#if defined(CONFIG_SPARC)
- const char *path_component_name;
unsigned int unique_id;
struct of_irq_controller *irq_trans;
#endif
@@ -98,27 +96,33 @@ struct of_reconfig_data {
struct property *old_prop;
};
+/**
+ * of_node_init - initialize a devicetree node
+ * @node: Pointer to device node that has been created by kzalloc()
+ * @phandle_name: Name of property holding a phandle value
+ *
+ * On return the device_node refcount is set to one. Use of_node_put()
+ * on @node when done to free the memory allocated for it. If the node
+ * is NOT a dynamic node the memory will not be freed. The decision of
+ * whether to free the memory will be done by node->release(), which is
+ * of_node_release().
+ */
/* initialize a node */
-extern struct kobj_type of_node_ktype;
+extern const struct kobj_type of_node_ktype;
extern const struct fwnode_operations of_fwnode_ops;
static inline void of_node_init(struct device_node *node)
{
+#if defined(CONFIG_OF_KOBJ)
kobject_init(&node->kobj, &of_node_ktype);
- node->fwnode.type = FWNODE_OF;
- node->fwnode.ops = &of_fwnode_ops;
-}
-
-/* true when node is initialized */
-static inline int of_node_is_initialized(struct device_node *node)
-{
- return node && node->kobj.state_initialized;
+#endif
+ fwnode_init(&node->fwnode, &of_fwnode_ops);
}
-/* true when node is attached (i.e. present on sysfs) */
-static inline int of_node_is_attached(struct device_node *node)
-{
- return node && node->kobj.state_in_sysfs;
-}
+#if defined(CONFIG_OF_KOBJ)
+#define of_node_kobj(n) (&(n)->kobj)
+#else
+#define of_node_kobj(n) NULL
+#endif
#ifdef CONFIG_OF_DYNAMIC
extern struct device_node *of_node_get(struct device_node *node);
@@ -137,13 +141,17 @@ extern struct device_node *of_root;
extern struct device_node *of_chosen;
extern struct device_node *of_aliases;
extern struct device_node *of_stdout;
-extern raw_spinlock_t devtree_lock;
-/* flag descriptions (need to be visible even when !CONFIG_OF) */
-#define OF_DYNAMIC 1 /* node and properties were allocated via kmalloc */
-#define OF_DETACHED 2 /* node has been detached from the device tree */
-#define OF_POPULATED 3 /* device already created for the node */
-#define OF_POPULATED_BUS 4 /* of_platform_populate recursed to children of this node */
+/*
+ * struct device_node flag descriptions
+ * (need to be visible even when !CONFIG_OF)
+ */
+#define OF_DYNAMIC 1 /* (and properties) allocated via kmalloc */
+#define OF_DETACHED 2 /* detached from the device tree */
+#define OF_POPULATED 3 /* device already created */
+#define OF_POPULATED_BUS 4 /* platform bus created for children */
+#define OF_OVERLAY 5 /* allocated for an overlay */
+#define OF_OVERLAY_FREE_CSET 6 /* in overlay cset being freed */
#define OF_BAD_ADDR ((u64)-1)
@@ -152,7 +160,7 @@ void of_core_init(void);
static inline bool is_of_node(const struct fwnode_handle *fwnode)
{
- return !IS_ERR_OR_NULL(fwnode) && fwnode->type == FWNODE_OF;
+ return !IS_ERR_OR_NULL(fwnode) && fwnode->ops == &of_fwnode_ops;
}
#define to_of_node(__fwnode) \
@@ -183,7 +191,7 @@ static inline bool of_node_is_root(const struct device_node *node)
return node && (node->parent == NULL);
}
-static inline int of_node_check_flag(struct device_node *n, unsigned long flag)
+static inline int of_node_check_flag(const struct device_node *n, unsigned long flag)
{
return test_bit(flag, &n->_flags);
}
@@ -204,7 +212,8 @@ static inline void of_node_clear_flag(struct device_node *n, unsigned long flag)
clear_bit(flag, &n->_flags);
}
-static inline int of_property_check_flag(struct property *p, unsigned long flag)
+#if defined(CONFIG_OF_DYNAMIC) || defined(CONFIG_SPARC)
+static inline int of_property_check_flag(const struct property *p, unsigned long flag)
{
return test_bit(flag, &p->_flags);
}
@@ -218,6 +227,7 @@ static inline void of_property_clear_flag(struct property *p, unsigned long flag
{
clear_bit(flag, &p->_flags);
}
+#endif
extern struct device_node *__of_find_all_nodes(struct device_node *prev);
extern struct device_node *of_find_all_nodes(struct device_node *prev);
@@ -230,8 +240,8 @@ extern struct device_node *of_find_all_nodes(struct device_node *prev);
static inline u64 of_read_number(const __be32 *cell, int size)
{
u64 r = 0;
- while (size--)
- r = (r << 32) | be32_to_cpu(*(cell++));
+ for (; size--; cell++)
+ r = (r << 32) | be32_to_cpu(*cell);
return r;
}
@@ -246,15 +256,12 @@ static inline unsigned long of_read_ulong(const __be32 *cell, int size)
#include <asm/prom.h>
#endif
-/* Default #address and #size cells. Allow arch asm/prom.h to override */
-#if !defined(OF_ROOT_NODE_ADDR_CELLS_DEFAULT)
-#define OF_ROOT_NODE_ADDR_CELLS_DEFAULT 1
-#define OF_ROOT_NODE_SIZE_CELLS_DEFAULT 1
-#endif
-
#define OF_IS_DYNAMIC(x) test_bit(OF_DYNAMIC, &x->_flags)
#define OF_MARK_DYNAMIC(x) set_bit(OF_DYNAMIC, &x->_flags)
+extern bool of_node_name_eq(const struct device_node *np, const char *name);
+extern bool of_node_name_prefix(const struct device_node *np, const char *prefix);
+
static inline const char *of_node_full_name(const struct device_node *np)
{
return np ? np->full_name : "<no-node>";
@@ -289,6 +296,8 @@ extern struct device_node *of_get_next_child(const struct device_node *node,
extern struct device_node *of_get_next_available_child(
const struct device_node *node, struct device_node *prev);
+extern struct device_node *of_get_compatible_child(const struct device_node *parent,
+ const char *compatible);
extern struct device_node *of_get_child_by_name(const struct device_node *node,
const char *name);
@@ -339,7 +348,7 @@ extern int of_property_read_string_helper(const struct device_node *np,
const char **out_strs, size_t sz, int index);
extern int of_device_is_compatible(const struct device_node *device,
const char *);
-extern int of_device_compatible_match(struct device_node *device,
+extern int of_device_compatible_match(const struct device_node *device,
const char *const *compat);
extern bool of_device_is_available(const struct device_node *device);
extern bool of_device_is_big_endian(const struct device_node *device);
@@ -347,6 +356,13 @@ extern const void *of_get_property(const struct device_node *node,
const char *name,
int *lenp);
extern struct device_node *of_get_cpu_node(int cpu, unsigned int *thread);
+extern struct device_node *of_cpu_device_node_get(int cpu);
+extern int of_cpu_node_to_id(struct device_node *np);
+extern struct device_node *of_get_next_cpu_node(struct device_node *prev);
+extern struct device_node *of_get_cpu_state_node(struct device_node *cpu_node,
+ int index);
+extern u64 of_get_cpu_hwid(struct device_node *cpun, unsigned int thread);
+
#define for_each_property_of_node(dn, pp) \
for (pp = dn->properties; pp != NULL; pp = pp->next)
@@ -354,20 +370,23 @@ extern int of_n_addr_cells(struct device_node *np);
extern int of_n_size_cells(struct device_node *np);
extern const struct of_device_id *of_match_node(
const struct of_device_id *matches, const struct device_node *node);
-extern int of_modalias_node(struct device_node *node, char *modalias, int len);
+extern const void *of_device_get_match_data(const struct device *dev);
+extern int of_alias_from_compatible(const struct device_node *node, char *alias,
+ int len);
extern void of_print_phandle_args(const char *msg, const struct of_phandle_args *args);
-extern struct device_node *of_parse_phandle(const struct device_node *np,
- const char *phandle_name,
- int index);
-extern int of_parse_phandle_with_args(const struct device_node *np,
- const char *list_name, const char *cells_name, int index,
- struct of_phandle_args *out_args);
-extern int of_parse_phandle_with_fixed_args(const struct device_node *np,
- const char *list_name, int cells_count, int index,
+extern int __of_parse_phandle_with_args(const struct device_node *np,
+ const char *list_name, const char *cells_name, int cell_count,
+ int index, struct of_phandle_args *out_args);
+extern int of_parse_phandle_with_args_map(const struct device_node *np,
+ const char *list_name, const char *stem_name, int index,
struct of_phandle_args *out_args);
extern int of_count_phandle_with_args(const struct device_node *np,
const char *list_name, const char *cells_name);
+/* module functions */
+extern ssize_t of_modalias(const struct device_node *np, char *str, ssize_t len);
+extern int of_request_module(const struct device_node *np);
+
/* phandle iterator functions */
extern int of_phandle_iterator_init(struct of_phandle_iterator *it,
const struct device_node *np,
@@ -402,122 +421,6 @@ extern int of_detach_node(struct device_node *);
#define of_match_ptr(_ptr) (_ptr)
-/**
- * of_property_read_u8_array - Find and read an array of u8 from a property.
- *
- * @np: device node from which the property value is to be read.
- * @propname: name of the property to be searched.
- * @out_values: pointer to return value, modified only if return value is 0.
- * @sz: number of array elements to read
- *
- * Search for a property in a device node and read 8-bit value(s) from
- * it. Returns 0 on success, -EINVAL if the property does not exist,
- * -ENODATA if property does not have a value, and -EOVERFLOW if the
- * property data isn't large enough.
- *
- * dts entry of array should be like:
- * property = /bits/ 8 <0x50 0x60 0x70>;
- *
- * The out_values is modified only if a valid u8 value can be decoded.
- */
-static inline int of_property_read_u8_array(const struct device_node *np,
- const char *propname,
- u8 *out_values, size_t sz)
-{
- int ret = of_property_read_variable_u8_array(np, propname, out_values,
- sz, 0);
- if (ret >= 0)
- return 0;
- else
- return ret;
-}
-
-/**
- * of_property_read_u16_array - Find and read an array of u16 from a property.
- *
- * @np: device node from which the property value is to be read.
- * @propname: name of the property to be searched.
- * @out_values: pointer to return value, modified only if return value is 0.
- * @sz: number of array elements to read
- *
- * Search for a property in a device node and read 16-bit value(s) from
- * it. Returns 0 on success, -EINVAL if the property does not exist,
- * -ENODATA if property does not have a value, and -EOVERFLOW if the
- * property data isn't large enough.
- *
- * dts entry of array should be like:
- * property = /bits/ 16 <0x5000 0x6000 0x7000>;
- *
- * The out_values is modified only if a valid u16 value can be decoded.
- */
-static inline int of_property_read_u16_array(const struct device_node *np,
- const char *propname,
- u16 *out_values, size_t sz)
-{
- int ret = of_property_read_variable_u16_array(np, propname, out_values,
- sz, 0);
- if (ret >= 0)
- return 0;
- else
- return ret;
-}
-
-/**
- * of_property_read_u32_array - Find and read an array of 32 bit integers
- * from a property.
- *
- * @np: device node from which the property value is to be read.
- * @propname: name of the property to be searched.
- * @out_values: pointer to return value, modified only if return value is 0.
- * @sz: number of array elements to read
- *
- * Search for a property in a device node and read 32-bit value(s) from
- * it. Returns 0 on success, -EINVAL if the property does not exist,
- * -ENODATA if property does not have a value, and -EOVERFLOW if the
- * property data isn't large enough.
- *
- * The out_values is modified only if a valid u32 value can be decoded.
- */
-static inline int of_property_read_u32_array(const struct device_node *np,
- const char *propname,
- u32 *out_values, size_t sz)
-{
- int ret = of_property_read_variable_u32_array(np, propname, out_values,
- sz, 0);
- if (ret >= 0)
- return 0;
- else
- return ret;
-}
-
-/**
- * of_property_read_u64_array - Find and read an array of 64 bit integers
- * from a property.
- *
- * @np: device node from which the property value is to be read.
- * @propname: name of the property to be searched.
- * @out_values: pointer to return value, modified only if return value is 0.
- * @sz: number of array elements to read
- *
- * Search for a property in a device node and read 64-bit value(s) from
- * it. Returns 0 on success, -EINVAL if the property does not exist,
- * -ENODATA if property does not have a value, and -EOVERFLOW if the
- * property data isn't large enough.
- *
- * The out_values is modified only if a valid u64 value can be decoded.
- */
-static inline int of_property_read_u64_array(const struct device_node *np,
- const char *propname,
- u64 *out_values, size_t sz)
-{
- int ret = of_property_read_variable_u64_array(np, propname, out_values,
- sz, 0);
- if (ret >= 0)
- return 0;
- else
- return ret;
-}
-
/*
* struct property *prop;
* const __be32 *p;
@@ -539,6 +442,17 @@ const char *of_prop_next_string(struct property *prop, const char *cur);
bool of_console_check(struct device_node *dn, char *name, int index);
+int of_map_id(struct device_node *np, u32 id,
+ const char *map_name, const char *map_mask_name,
+ struct device_node **target, u32 *id_out);
+
+phys_addr_t of_dma_get_max_cpu_address(struct device_node *np);
+
+struct kimage;
+void *of_kexec_alloc_and_setup_fdt(const struct kimage *image,
+ unsigned long initrd_load_addr,
+ unsigned long initrd_len,
+ const char *cmdline, size_t extra_fdt_size);
#else /* CONFIG_OF */
static inline void of_core_init(void)
@@ -555,6 +469,16 @@ static inline struct device_node *to_of_node(const struct fwnode_handle *fwnode)
return NULL;
}
+static inline bool of_node_name_eq(const struct device_node *np, const char *name)
+{
+ return false;
+}
+
+static inline bool of_node_name_prefix(const struct device_node *np, const char *prefix)
+{
+ return false;
+}
+
static inline const char* of_node_full_name(const struct device_node *np)
{
return "<no-node>";
@@ -601,6 +525,11 @@ static inline struct device_node *of_get_parent(const struct device_node *node)
return NULL;
}
+static inline struct device_node *of_get_next_parent(struct device_node *node)
+{
+ return NULL;
+}
+
static inline struct device_node *of_get_next_child(
const struct device_node *node, struct device_node *prev)
{
@@ -626,6 +555,12 @@ static inline bool of_have_populated_dt(void)
return false;
}
+static inline struct device_node *of_get_compatible_child(const struct device_node *parent,
+ const char *compatible)
+{
+ return NULL;
+}
+
static inline struct device_node *of_get_child_by_name(
const struct device_node *node,
const char *name)
@@ -639,7 +574,7 @@ static inline int of_device_is_compatible(const struct device_node *device,
return 0;
}
-static inline int of_device_compatible_match(struct device_node *device,
+static inline int of_device_compatible_match(const struct device_node *device,
const char *const *compat)
{
return 0;
@@ -682,57 +617,77 @@ static inline int of_property_read_u32_index(const struct device_node *np,
return -ENOSYS;
}
-static inline int of_property_read_u8_array(const struct device_node *np,
- const char *propname, u8 *out_values, size_t sz)
+static inline int of_property_read_u64_index(const struct device_node *np,
+ const char *propname, u32 index, u64 *out_value)
{
return -ENOSYS;
}
-static inline int of_property_read_u16_array(const struct device_node *np,
- const char *propname, u16 *out_values, size_t sz)
+static inline const void *of_get_property(const struct device_node *node,
+ const char *name,
+ int *lenp)
{
- return -ENOSYS;
+ return NULL;
}
-static inline int of_property_read_u32_array(const struct device_node *np,
- const char *propname,
- u32 *out_values, size_t sz)
+static inline struct device_node *of_get_cpu_node(int cpu,
+ unsigned int *thread)
{
- return -ENOSYS;
+ return NULL;
}
-static inline int of_property_read_u64_array(const struct device_node *np,
- const char *propname,
- u64 *out_values, size_t sz)
+static inline struct device_node *of_cpu_device_node_get(int cpu)
{
- return -ENOSYS;
+ return NULL;
}
-static inline int of_property_read_string(const struct device_node *np,
- const char *propname,
- const char **out_string)
+static inline int of_cpu_node_to_id(struct device_node *np)
{
- return -ENOSYS;
+ return -ENODEV;
}
-static inline int of_property_read_string_helper(const struct device_node *np,
- const char *propname,
- const char **out_strs, size_t sz, int index)
+static inline struct device_node *of_get_next_cpu_node(struct device_node *prev)
{
- return -ENOSYS;
+ return NULL;
}
-static inline const void *of_get_property(const struct device_node *node,
- const char *name,
- int *lenp)
+static inline struct device_node *of_get_cpu_state_node(struct device_node *cpu_node,
+ int index)
{
return NULL;
}
-static inline struct device_node *of_get_cpu_node(int cpu,
- unsigned int *thread)
+static inline int of_n_addr_cells(struct device_node *np)
{
- return NULL;
+ return 0;
+
+}
+static inline int of_n_size_cells(struct device_node *np)
+{
+ return 0;
+}
+
+static inline int of_property_read_variable_u8_array(const struct device_node *np,
+ const char *propname, u8 *out_values,
+ size_t sz_min, size_t sz_max)
+{
+ return -ENOSYS;
+}
+
+static inline int of_property_read_variable_u16_array(const struct device_node *np,
+ const char *propname, u16 *out_values,
+ size_t sz_min, size_t sz_max)
+{
+ return -ENOSYS;
+}
+
+static inline int of_property_read_variable_u32_array(const struct device_node *np,
+ const char *propname,
+ u32 *out_values,
+ size_t sz_min,
+ size_t sz_max)
+{
+ return -ENOSYS;
}
static inline int of_property_read_u64(const struct device_node *np,
@@ -741,6 +696,22 @@ static inline int of_property_read_u64(const struct device_node *np,
return -ENOSYS;
}
+static inline int of_property_read_variable_u64_array(const struct device_node *np,
+ const char *propname,
+ u64 *out_values,
+ size_t sz_min,
+ size_t sz_max)
+{
+ return -ENOSYS;
+}
+
+static inline int of_property_read_string(const struct device_node *np,
+ const char *propname,
+ const char **out_string)
+{
+ return -ENOSYS;
+}
+
static inline int of_property_match_string(const struct device_node *np,
const char *propname,
const char *string)
@@ -748,36 +719,50 @@ static inline int of_property_match_string(const struct device_node *np,
return -ENOSYS;
}
-static inline struct device_node *of_parse_phandle(const struct device_node *np,
- const char *phandle_name,
- int index)
+static inline int of_property_read_string_helper(const struct device_node *np,
+ const char *propname,
+ const char **out_strs, size_t sz, int index)
{
- return NULL;
+ return -ENOSYS;
}
-static inline int of_parse_phandle_with_args(const struct device_node *np,
- const char *list_name,
- const char *cells_name,
- int index,
- struct of_phandle_args *out_args)
+static inline int __of_parse_phandle_with_args(const struct device_node *np,
+ const char *list_name,
+ const char *cells_name,
+ int cell_count,
+ int index,
+ struct of_phandle_args *out_args)
{
return -ENOSYS;
}
-static inline int of_parse_phandle_with_fixed_args(const struct device_node *np,
- const char *list_name, int cells_count, int index,
- struct of_phandle_args *out_args)
+static inline int of_parse_phandle_with_args_map(const struct device_node *np,
+ const char *list_name,
+ const char *stem_name,
+ int index,
+ struct of_phandle_args *out_args)
{
return -ENOSYS;
}
-static inline int of_count_phandle_with_args(struct device_node *np,
+static inline int of_count_phandle_with_args(const struct device_node *np,
const char *list_name,
const char *cells_name)
{
return -ENOSYS;
}
+static inline ssize_t of_modalias(const struct device_node *np, char *str,
+ ssize_t len)
+{
+ return -ENODEV;
+}
+
+static inline int of_request_module(const struct device_node *np)
+{
+ return -ENODEV;
+}
+
static inline int of_phandle_iterator_init(struct of_phandle_iterator *it,
const struct device_node *np,
const char *list_name,
@@ -814,6 +799,16 @@ static inline int of_machine_is_compatible(const char *compat)
return 0;
}
+static inline int of_add_property(struct device_node *np, struct property *prop)
+{
+ return 0;
+}
+
+static inline int of_remove_property(struct device_node *np, struct property *prop)
+{
+ return 0;
+}
+
static inline bool of_console_check(const struct device_node *dn, const char *name, int index)
{
return false;
@@ -850,7 +845,8 @@ static inline void of_node_clear_flag(struct device_node *n, unsigned long flag)
{
}
-static inline int of_property_check_flag(struct property *p, unsigned long flag)
+static inline int of_property_check_flag(const struct property *p,
+ unsigned long flag)
{
return 0;
}
@@ -863,6 +859,23 @@ static inline void of_property_clear_flag(struct property *p, unsigned long flag
{
}
+static inline int of_map_id(struct device_node *np, u32 id,
+ const char *map_name, const char *map_mask_name,
+ struct device_node **target, u32 *id_out)
+{
+ return -EINVAL;
+}
+
+static inline phys_addr_t of_dma_get_max_cpu_address(struct device_node *np)
+{
+ return PHYS_ADDR_MAX;
+}
+
+static inline const void *of_device_get_match_data(const struct device *dev)
+{
+ return NULL;
+}
+
#define of_match_ptr(_ptr) NULL
#define of_match_node(_matches, _node) NULL
#endif /* CONFIG_OF */
@@ -874,6 +887,12 @@ static inline void of_property_clear_flag(struct property *p, unsigned long flag
#define of_node_cmp(s1, s2) strcasecmp((s1), (s2))
#endif
+static inline int of_prop_val_eq(struct property *p1, struct property *p2)
+{
+ return p1->length == p2->length &&
+ !memcmp(p1->value, p2->value, (size_t)p1->length);
+}
+
#if defined(CONFIG_OF) && defined(CONFIG_NUMA)
extern int of_node_to_nid(struct device_node *np);
#else
@@ -899,6 +918,154 @@ static inline struct device_node *of_find_matching_node(
return of_find_matching_node_and_match(from, matches, NULL);
}
+static inline const char *of_node_get_device_type(const struct device_node *np)
+{
+ return of_get_property(np, "device_type", NULL);
+}
+
+static inline bool of_node_is_type(const struct device_node *np, const char *type)
+{
+ const char *match = of_node_get_device_type(np);
+
+ return np && match && type && !strcmp(match, type);
+}
+
+/**
+ * of_parse_phandle - Resolve a phandle property to a device_node pointer
+ * @np: Pointer to device node holding phandle property
+ * @phandle_name: Name of property holding a phandle value
+ * @index: For properties holding a table of phandles, this is the index into
+ * the table
+ *
+ * Return: The device_node pointer with refcount incremented. Use
+ * of_node_put() on it when done.
+ */
+static inline struct device_node *of_parse_phandle(const struct device_node *np,
+ const char *phandle_name,
+ int index)
+{
+ struct of_phandle_args args;
+
+ if (__of_parse_phandle_with_args(np, phandle_name, NULL, 0,
+ index, &args))
+ return NULL;
+
+ return args.np;
+}
+
+/**
+ * of_parse_phandle_with_args() - Find a node pointed by phandle in a list
+ * @np: pointer to a device tree node containing a list
+ * @list_name: property name that contains a list
+ * @cells_name: property name that specifies phandles' arguments count
+ * @index: index of a phandle to parse out
+ * @out_args: optional pointer to output arguments structure (will be filled)
+ *
+ * This function is useful to parse lists of phandles and their arguments.
+ * Returns 0 on success and fills out_args, on error returns appropriate
+ * errno value.
+ *
+ * Caller is responsible to call of_node_put() on the returned out_args->np
+ * pointer.
+ *
+ * Example::
+ *
+ * phandle1: node1 {
+ * #list-cells = <2>;
+ * };
+ *
+ * phandle2: node2 {
+ * #list-cells = <1>;
+ * };
+ *
+ * node3 {
+ * list = <&phandle1 1 2 &phandle2 3>;
+ * };
+ *
+ * To get a device_node of the ``node2`` node you may call this:
+ * of_parse_phandle_with_args(node3, "list", "#list-cells", 1, &args);
+ */
+static inline int of_parse_phandle_with_args(const struct device_node *np,
+ const char *list_name,
+ const char *cells_name,
+ int index,
+ struct of_phandle_args *out_args)
+{
+ int cell_count = -1;
+
+ /* If cells_name is NULL we assume a cell count of 0 */
+ if (!cells_name)
+ cell_count = 0;
+
+ return __of_parse_phandle_with_args(np, list_name, cells_name,
+ cell_count, index, out_args);
+}
+
+/**
+ * of_parse_phandle_with_fixed_args() - Find a node pointed by phandle in a list
+ * @np: pointer to a device tree node containing a list
+ * @list_name: property name that contains a list
+ * @cell_count: number of argument cells following the phandle
+ * @index: index of a phandle to parse out
+ * @out_args: optional pointer to output arguments structure (will be filled)
+ *
+ * This function is useful to parse lists of phandles and their arguments.
+ * Returns 0 on success and fills out_args, on error returns appropriate
+ * errno value.
+ *
+ * Caller is responsible to call of_node_put() on the returned out_args->np
+ * pointer.
+ *
+ * Example::
+ *
+ * phandle1: node1 {
+ * };
+ *
+ * phandle2: node2 {
+ * };
+ *
+ * node3 {
+ * list = <&phandle1 0 2 &phandle2 2 3>;
+ * };
+ *
+ * To get a device_node of the ``node2`` node you may call this:
+ * of_parse_phandle_with_fixed_args(node3, "list", 2, 1, &args);
+ */
+static inline int of_parse_phandle_with_fixed_args(const struct device_node *np,
+ const char *list_name,
+ int cell_count,
+ int index,
+ struct of_phandle_args *out_args)
+{
+ return __of_parse_phandle_with_args(np, list_name, NULL, cell_count,
+ index, out_args);
+}
+
+/**
+ * of_parse_phandle_with_optional_args() - Find a node pointed by phandle in a list
+ * @np: pointer to a device tree node containing a list
+ * @list_name: property name that contains a list
+ * @cells_name: property name that specifies phandles' arguments count
+ * @index: index of a phandle to parse out
+ * @out_args: optional pointer to output arguments structure (will be filled)
+ *
+ * Same as of_parse_phandle_with_args() except that if the cells_name property
+ * is not found, cell_count of 0 is assumed.
+ *
+ * This is used to useful, if you have a phandle which didn't have arguments
+ * before and thus doesn't have a '#*-cells' property but is now migrated to
+ * having arguments while retaining backwards compatibility.
+ */
+static inline int of_parse_phandle_with_optional_args(const struct device_node *np,
+ const char *list_name,
+ const char *cells_name,
+ int index,
+ struct of_phandle_args *out_args)
+{
+ return __of_parse_phandle_with_args(np, list_name, cells_name,
+ 0, index, out_args);
+}
+
/**
* of_property_count_u8_elems - Count the number of u8 elements in a property
*
@@ -906,7 +1073,9 @@ static inline struct device_node *of_find_matching_node(
* @propname: name of the property to be searched.
*
* Search for a property in a device node and count the number of u8 elements
- * in it. Returns number of elements on sucess, -EINVAL if the property does
+ * in it.
+ *
+ * Return: The number of elements on sucess, -EINVAL if the property does
* not exist or its length does not match a multiple of u8 and -ENODATA if the
* property does not have a value.
*/
@@ -923,7 +1092,9 @@ static inline int of_property_count_u8_elems(const struct device_node *np,
* @propname: name of the property to be searched.
*
* Search for a property in a device node and count the number of u16 elements
- * in it. Returns number of elements on sucess, -EINVAL if the property does
+ * in it.
+ *
+ * Return: The number of elements on sucess, -EINVAL if the property does
* not exist or its length does not match a multiple of u16 and -ENODATA if the
* property does not have a value.
*/
@@ -940,7 +1111,9 @@ static inline int of_property_count_u16_elems(const struct device_node *np,
* @propname: name of the property to be searched.
*
* Search for a property in a device node and count the number of u32 elements
- * in it. Returns number of elements on sucess, -EINVAL if the property does
+ * in it.
+ *
+ * Return: The number of elements on sucess, -EINVAL if the property does
* not exist or its length does not match a multiple of u32 and -ENODATA if the
* property does not have a value.
*/
@@ -957,7 +1130,9 @@ static inline int of_property_count_u32_elems(const struct device_node *np,
* @propname: name of the property to be searched.
*
* Search for a property in a device node and count the number of u64 elements
- * in it. Returns number of elements on sucess, -EINVAL if the property does
+ * in it.
+ *
+ * Return: The number of elements on sucess, -EINVAL if the property does
* not exist or its length does not match a multiple of u64 and -ENODATA if the
* property does not have a value.
*/
@@ -978,7 +1153,7 @@ static inline int of_property_count_u64_elems(const struct device_node *np,
* Search for a property in a device tree node and retrieve a list of
* terminated string values (pointer to data, not a copy) in that property.
*
- * If @out_strs is NULL, the number of strings in the property is returned.
+ * Return: If @out_strs is NULL, the number of strings in the property is returned.
*/
static inline int of_property_read_string_array(const struct device_node *np,
const char *propname, const char **out_strs,
@@ -994,10 +1169,11 @@ static inline int of_property_read_string_array(const struct device_node *np,
* @propname: name of the property to be searched.
*
* Search for a property in a device tree node and retrieve the number of null
- * terminated string contain in it. Returns the number of strings on
- * success, -EINVAL if the property does not exist, -ENODATA if property
- * does not have a value, and -EILSEQ if the string is not null-terminated
- * within the length of the property data.
+ * terminated string contain in it.
+ *
+ * Return: The number of strings on success, -EINVAL if the property does not
+ * exist, -ENODATA if property does not have a value, and -EILSEQ if the string
+ * is not null-terminated within the length of the property data.
*/
static inline int of_property_count_strings(const struct device_node *np,
const char *propname)
@@ -1011,13 +1187,14 @@ static inline int of_property_count_strings(const struct device_node *np,
* @np: device node from which the property value is to be read.
* @propname: name of the property to be searched.
* @index: index of the string in the list of strings
- * @out_string: pointer to null terminated return string, modified only if
+ * @output: pointer to null terminated return string, modified only if
* return value is 0.
*
* Search for a property in a device tree node and retrieve a null
* terminated string value (pointer to data, not a copy) in the list of strings
* contained in that property.
- * Returns 0 on success, -EINVAL if the property does not exist, -ENODATA if
+ *
+ * Return: 0 on success, -EINVAL if the property does not exist, -ENODATA if
* property does not have a value, and -EILSEQ if the string is not
* null-terminated within the length of the property data.
*
@@ -1032,12 +1209,14 @@ static inline int of_property_read_string_index(const struct device_node *np,
}
/**
- * of_property_read_bool - Findfrom a property
+ * of_property_read_bool - Find a property
* @np: device node from which the property value is to be read.
* @propname: name of the property to be searched.
*
- * Search for a property in a device node.
- * Returns true if the property exists false otherwise.
+ * Search for a boolean property in a device node. Usage on non-boolean
+ * property types is deprecated.
+ *
+ * Return: true if the property exists false otherwise.
*/
static inline bool of_property_read_bool(const struct device_node *np,
const char *propname)
@@ -1047,6 +1226,144 @@ static inline bool of_property_read_bool(const struct device_node *np,
return prop ? true : false;
}
+/**
+ * of_property_present - Test if a property is present in a node
+ * @np: device node to search for the property.
+ * @propname: name of the property to be searched.
+ *
+ * Test for a property present in a device node.
+ *
+ * Return: true if the property exists false otherwise.
+ */
+static inline bool of_property_present(const struct device_node *np, const char *propname)
+{
+ return of_property_read_bool(np, propname);
+}
+
+/**
+ * of_property_read_u8_array - Find and read an array of u8 from a property.
+ *
+ * @np: device node from which the property value is to be read.
+ * @propname: name of the property to be searched.
+ * @out_values: pointer to return value, modified only if return value is 0.
+ * @sz: number of array elements to read
+ *
+ * Search for a property in a device node and read 8-bit value(s) from
+ * it.
+ *
+ * dts entry of array should be like:
+ * ``property = /bits/ 8 <0x50 0x60 0x70>;``
+ *
+ * Return: 0 on success, -EINVAL if the property does not exist,
+ * -ENODATA if property does not have a value, and -EOVERFLOW if the
+ * property data isn't large enough.
+ *
+ * The out_values is modified only if a valid u8 value can be decoded.
+ */
+static inline int of_property_read_u8_array(const struct device_node *np,
+ const char *propname,
+ u8 *out_values, size_t sz)
+{
+ int ret = of_property_read_variable_u8_array(np, propname, out_values,
+ sz, 0);
+ if (ret >= 0)
+ return 0;
+ else
+ return ret;
+}
+
+/**
+ * of_property_read_u16_array - Find and read an array of u16 from a property.
+ *
+ * @np: device node from which the property value is to be read.
+ * @propname: name of the property to be searched.
+ * @out_values: pointer to return value, modified only if return value is 0.
+ * @sz: number of array elements to read
+ *
+ * Search for a property in a device node and read 16-bit value(s) from
+ * it.
+ *
+ * dts entry of array should be like:
+ * ``property = /bits/ 16 <0x5000 0x6000 0x7000>;``
+ *
+ * Return: 0 on success, -EINVAL if the property does not exist,
+ * -ENODATA if property does not have a value, and -EOVERFLOW if the
+ * property data isn't large enough.
+ *
+ * The out_values is modified only if a valid u16 value can be decoded.
+ */
+static inline int of_property_read_u16_array(const struct device_node *np,
+ const char *propname,
+ u16 *out_values, size_t sz)
+{
+ int ret = of_property_read_variable_u16_array(np, propname, out_values,
+ sz, 0);
+ if (ret >= 0)
+ return 0;
+ else
+ return ret;
+}
+
+/**
+ * of_property_read_u32_array - Find and read an array of 32 bit integers
+ * from a property.
+ *
+ * @np: device node from which the property value is to be read.
+ * @propname: name of the property to be searched.
+ * @out_values: pointer to return value, modified only if return value is 0.
+ * @sz: number of array elements to read
+ *
+ * Search for a property in a device node and read 32-bit value(s) from
+ * it.
+ *
+ * Return: 0 on success, -EINVAL if the property does not exist,
+ * -ENODATA if property does not have a value, and -EOVERFLOW if the
+ * property data isn't large enough.
+ *
+ * The out_values is modified only if a valid u32 value can be decoded.
+ */
+static inline int of_property_read_u32_array(const struct device_node *np,
+ const char *propname,
+ u32 *out_values, size_t sz)
+{
+ int ret = of_property_read_variable_u32_array(np, propname, out_values,
+ sz, 0);
+ if (ret >= 0)
+ return 0;
+ else
+ return ret;
+}
+
+/**
+ * of_property_read_u64_array - Find and read an array of 64 bit integers
+ * from a property.
+ *
+ * @np: device node from which the property value is to be read.
+ * @propname: name of the property to be searched.
+ * @out_values: pointer to return value, modified only if return value is 0.
+ * @sz: number of array elements to read
+ *
+ * Search for a property in a device node and read 64-bit value(s) from
+ * it.
+ *
+ * Return: 0 on success, -EINVAL if the property does not exist,
+ * -ENODATA if property does not have a value, and -EOVERFLOW if the
+ * property data isn't large enough.
+ *
+ * The out_values is modified only if a valid u64 value can be decoded.
+ */
+static inline int of_property_read_u64_array(const struct device_node *np,
+ const char *propname,
+ u64 *out_values, size_t sz)
+{
+ int ret = of_property_read_variable_u64_array(np, propname, out_values,
+ sz, 0);
+ if (ret >= 0)
+ return 0;
+ else
+ return ret;
+}
+
static inline int of_property_read_u8(const struct device_node *np,
const char *propname,
u8 *out_value)
@@ -1116,6 +1433,10 @@ static inline int of_property_read_s32(const struct device_node *np,
for (child = of_get_next_available_child(parent, NULL); child != NULL; \
child = of_get_next_available_child(parent, child))
+#define for_each_of_cpu_node(cpu) \
+ for (cpu = of_get_next_cpu_node(NULL); cpu != NULL; \
+ cpu = of_get_next_cpu_node(cpu))
+
#define for_each_node_with_property(dn, prop_name) \
for (dn = of_find_node_with_property(NULL, prop_name); dn; \
dn = of_find_node_with_property(dn, prop_name))
@@ -1142,18 +1463,22 @@ static inline int of_get_available_child_count(const struct device_node *np)
return num;
}
+#define _OF_DECLARE_STUB(table, name, compat, fn, fn_type) \
+ static const struct of_device_id __of_table_##name \
+ __attribute__((unused)) \
+ = { .compatible = compat, \
+ .data = (fn == (fn_type)NULL) ? fn : fn }
+
#if defined(CONFIG_OF) && !defined(MODULE)
#define _OF_DECLARE(table, name, compat, fn, fn_type) \
static const struct of_device_id __of_table_##name \
- __used __section(__##table##_of_table) \
+ __used __section("__" #table "_of_table") \
+ __aligned(__alignof__(struct of_device_id)) \
= { .compatible = compat, \
.data = (fn == (fn_type)NULL) ? fn : fn }
#else
#define _OF_DECLARE(table, name, compat, fn, fn_type) \
- static const struct of_device_id __of_table_##name \
- __attribute__((unused)) \
- = { .compatible = compat, \
- .data = (fn == (fn_type)NULL) ? fn : fn }
+ _OF_DECLARE_STUB(table, name, compat, fn, fn_type)
#endif
typedef int (*of_init_fn_2)(struct device_node *, struct device_node *);
@@ -1209,6 +1534,8 @@ enum of_reconfig_change {
OF_RECONFIG_CHANGE_REMOVE,
};
+struct notifier_block;
+
#ifdef CONFIG_OF_DYNAMIC
extern int of_reconfig_notifier_register(struct notifier_block *);
extern int of_reconfig_notifier_unregister(struct notifier_block *);
@@ -1274,31 +1601,42 @@ static inline int of_reconfig_get_state_change(unsigned long action,
}
#endif /* CONFIG_OF_DYNAMIC */
-/* CONFIG_OF_RESOLVE api */
-extern int of_resolve_phandles(struct device_node *tree);
-
/**
* of_device_is_system_power_controller - Tells if system-power-controller is found for device_node
* @np: Pointer to the given device_node
*
- * return true if present false otherwise
+ * Return: true if present false otherwise
*/
static inline bool of_device_is_system_power_controller(const struct device_node *np)
{
return of_property_read_bool(np, "system-power-controller");
}
-/**
+/*
* Overlay support
*/
enum of_overlay_notify_action {
+ OF_OVERLAY_INIT = 0, /* kzalloc() of ovcs sets this value */
OF_OVERLAY_PRE_APPLY,
OF_OVERLAY_POST_APPLY,
OF_OVERLAY_PRE_REMOVE,
OF_OVERLAY_POST_REMOVE,
};
+static inline const char *of_overlay_action_name(enum of_overlay_notify_action action)
+{
+ static const char *const of_overlay_action_name[] = {
+ "init",
+ "pre-apply",
+ "post-apply",
+ "pre-remove",
+ "post-remove",
+ };
+
+ return of_overlay_action_name[action];
+}
+
struct of_overlay_notify_data {
struct device_node *overlay;
struct device_node *target;
@@ -1306,27 +1644,28 @@ struct of_overlay_notify_data {
#ifdef CONFIG_OF_OVERLAY
-/* ID based overlays; the API for external users */
-int of_overlay_create(struct device_node *tree);
-int of_overlay_destroy(int id);
-int of_overlay_destroy_all(void);
+int of_overlay_fdt_apply(const void *overlay_fdt, u32 overlay_fdt_size,
+ int *ovcs_id);
+int of_overlay_remove(int *ovcs_id);
+int of_overlay_remove_all(void);
int of_overlay_notifier_register(struct notifier_block *nb);
int of_overlay_notifier_unregister(struct notifier_block *nb);
#else
-static inline int of_overlay_create(struct device_node *tree)
+static inline int of_overlay_fdt_apply(void *overlay_fdt, u32 overlay_fdt_size,
+ int *ovcs_id)
{
return -ENOTSUPP;
}
-static inline int of_overlay_destroy(int id)
+static inline int of_overlay_remove(int *ovcs_id)
{
return -ENOTSUPP;
}
-static inline int of_overlay_destroy_all(void)
+static inline int of_overlay_remove_all(void)
{
return -ENOTSUPP;
}
diff --git a/include/linux/of_address.h b/include/linux/of_address.h
index 37864734ca50..26a19daf0d09 100644
--- a/include/linux/of_address.h
+++ b/include/linux/of_address.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __OF_ADDRESS_H
#define __OF_ADDRESS_H
#include <linux/ioport.h>
@@ -5,37 +6,61 @@
#include <linux/of.h>
#include <linux/io.h>
+struct of_bus;
+
struct of_pci_range_parser {
struct device_node *node;
+ struct of_bus *bus;
const __be32 *range;
const __be32 *end;
- int np;
+ int na;
+ int ns;
int pna;
+ bool dma;
};
+#define of_range_parser of_pci_range_parser
struct of_pci_range {
- u32 pci_space;
- u64 pci_addr;
+ union {
+ u64 pci_addr;
+ u64 bus_addr;
+ };
u64 cpu_addr;
u64 size;
u32 flags;
};
+#define of_range of_pci_range
#define for_each_of_pci_range(parser, range) \
for (; of_pci_range_parser_one(parser, range);)
+#define for_each_of_range for_each_of_pci_range
+
+/*
+ * of_range_count - Get the number of "ranges" or "dma-ranges" entries
+ * @parser: Parser state initialized by of_range_parser_init()
+ *
+ * Returns the number of entries or 0 if none.
+ *
+ * Note that calling this within or after the for_each_of_range() iterator will
+ * be inaccurate giving the number of entries remaining.
+ */
+static inline int of_range_count(const struct of_range_parser *parser)
+{
+ if (!parser || !parser->node || !parser->range || parser->range == parser->end)
+ return 0;
+ return (parser->end - parser->range) / (parser->na + parser->pna + parser->ns);
+}
/* Translate a DMA address from device space to CPU space */
extern u64 of_translate_dma_address(struct device_node *dev,
const __be32 *in_addr);
+extern const __be32 *of_translate_dma_region(struct device_node *dev, const __be32 *addr,
+ phys_addr_t *start, size_t *length);
#ifdef CONFIG_OF_ADDRESS
extern u64 of_translate_address(struct device_node *np, const __be32 *addr);
extern int of_address_to_resource(struct device_node *dev, int index,
struct resource *r);
-extern struct device_node *of_find_matching_node_by_address(
- struct device_node *from,
- const struct of_device_id *matches,
- u64 base_address);
extern void __iomem *of_iomap(struct device_node *device, int index);
void __iomem *of_io_request_and_map(struct device_node *device,
int index, const char *name);
@@ -44,16 +69,25 @@ void __iomem *of_io_request_and_map(struct device_node *device,
* the address space flags too. The PCI version uses a BAR number
* instead of an absolute index
*/
-extern const __be32 *of_get_address(struct device_node *dev, int index,
- u64 *size, unsigned int *flags);
+extern const __be32 *__of_get_address(struct device_node *dev, int index, int bar_no,
+ u64 *size, unsigned int *flags);
+
+int of_property_read_reg(struct device_node *np, int idx, u64 *addr, u64 *size);
extern int of_pci_range_parser_init(struct of_pci_range_parser *parser,
struct device_node *node);
+extern int of_pci_dma_range_parser_init(struct of_pci_range_parser *parser,
+ struct device_node *node);
extern struct of_pci_range *of_pci_range_parser_one(
struct of_pci_range_parser *parser,
struct of_pci_range *range);
-extern int of_dma_get_range(struct device_node *np, u64 *dma_addr,
- u64 *paddr, u64 *size);
+extern int of_pci_address_to_resource(struct device_node *dev, int bar,
+ struct resource *r);
+extern int of_pci_range_to_resource(struct of_pci_range *range,
+ struct device_node *np,
+ struct resource *res);
+extern int of_range_to_resource(struct device_node *np, int index,
+ struct resource *res);
extern bool of_dma_is_coherent(struct device_node *np);
#else /* CONFIG_OF_ADDRESS */
static inline void __iomem *of_io_request_and_map(struct device_node *device,
@@ -68,24 +102,27 @@ static inline u64 of_translate_address(struct device_node *np,
return OF_BAD_ADDR;
}
-static inline struct device_node *of_find_matching_node_by_address(
- struct device_node *from,
- const struct of_device_id *matches,
- u64 base_address)
+static inline const __be32 *__of_get_address(struct device_node *dev, int index, int bar_no,
+ u64 *size, unsigned int *flags)
{
return NULL;
}
-static inline const __be32 *of_get_address(struct device_node *dev, int index,
- u64 *size, unsigned int *flags)
+static inline int of_property_read_reg(struct device_node *np, int idx, u64 *addr, u64 *size)
{
- return NULL;
+ return -ENOSYS;
}
static inline int of_pci_range_parser_init(struct of_pci_range_parser *parser,
struct device_node *node)
{
- return -1;
+ return -ENOSYS;
+}
+
+static inline int of_pci_dma_range_parser_init(struct of_pci_range_parser *parser,
+ struct device_node *node)
+{
+ return -ENOSYS;
}
static inline struct of_pci_range *of_pci_range_parser_one(
@@ -95,10 +132,23 @@ static inline struct of_pci_range *of_pci_range_parser_one(
return NULL;
}
-static inline int of_dma_get_range(struct device_node *np, u64 *dma_addr,
- u64 *paddr, u64 *size)
+static inline int of_pci_address_to_resource(struct device_node *dev, int bar,
+ struct resource *r)
+{
+ return -ENOSYS;
+}
+
+static inline int of_pci_range_to_resource(struct of_pci_range *range,
+ struct device_node *np,
+ struct resource *res)
{
- return -ENODEV;
+ return -ENOSYS;
+}
+
+static inline int of_range_to_resource(struct device_node *np, int index,
+ struct resource *res)
+{
+ return -ENOSYS;
}
static inline bool of_dma_is_coherent(struct device_node *np)
@@ -123,34 +173,29 @@ static inline void __iomem *of_iomap(struct device_node *device, int index)
return NULL;
}
#endif
+#define of_range_parser_init of_pci_range_parser_init
-#if defined(CONFIG_OF_ADDRESS) && defined(CONFIG_PCI)
-extern const __be32 *of_get_pci_address(struct device_node *dev, int bar_no,
- u64 *size, unsigned int *flags);
-extern int of_pci_address_to_resource(struct device_node *dev, int bar,
- struct resource *r);
-extern int of_pci_range_to_resource(struct of_pci_range *range,
- struct device_node *np,
- struct resource *res);
-#else /* CONFIG_OF_ADDRESS && CONFIG_PCI */
-static inline int of_pci_address_to_resource(struct device_node *dev, int bar,
- struct resource *r)
+static inline const __be32 *of_get_address(struct device_node *dev, int index,
+ u64 *size, unsigned int *flags)
{
- return -ENOSYS;
+ return __of_get_address(dev, index, -1, size, flags);
}
-static inline const __be32 *of_get_pci_address(struct device_node *dev,
- int bar_no, u64 *size, unsigned int *flags)
+static inline const __be32 *of_get_pci_address(struct device_node *dev, int bar_no,
+ u64 *size, unsigned int *flags)
{
- return NULL;
+ return __of_get_address(dev, -1, bar_no, size, flags);
}
-static inline int of_pci_range_to_resource(struct of_pci_range *range,
- struct device_node *np,
- struct resource *res)
+
+static inline int of_address_count(struct device_node *np)
{
- return -ENOSYS;
+ struct resource res;
+ int count = 0;
+
+ while (of_address_to_resource(np, count, &res) == 0)
+ count++;
+
+ return count;
}
-#endif /* CONFIG_OF_ADDRESS && CONFIG_PCI */
#endif /* __OF_ADDRESS_H */
-
diff --git a/include/linux/of_clk.h b/include/linux/of_clk.h
new file mode 100644
index 000000000000..31b73a0da9db
--- /dev/null
+++ b/include/linux/of_clk.h
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * OF clock helpers
+ */
+
+#ifndef __LINUX_OF_CLK_H
+#define __LINUX_OF_CLK_H
+
+struct device_node;
+struct of_device_id;
+
+#if defined(CONFIG_COMMON_CLK) && defined(CONFIG_OF)
+
+unsigned int of_clk_get_parent_count(const struct device_node *np);
+const char *of_clk_get_parent_name(const struct device_node *np, int index);
+void of_clk_init(const struct of_device_id *matches);
+
+#else /* !CONFIG_COMMON_CLK || !CONFIG_OF */
+
+static inline unsigned int of_clk_get_parent_count(const struct device_node *np)
+{
+ return 0;
+}
+static inline const char *of_clk_get_parent_name(const struct device_node *np,
+ int index)
+{
+ return NULL;
+}
+static inline void of_clk_init(const struct of_device_id *matches) {}
+
+#endif /* !CONFIG_COMMON_CLK || !CONFIG_OF */
+
+#endif /* __LINUX_OF_CLK_H */
diff --git a/include/linux/of_device.h b/include/linux/of_device.h
index b4ad8b4f8506..2c7a3d4bc775 100644
--- a/include/linux/of_device.h
+++ b/include/linux/of_device.h
@@ -1,14 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_OF_DEVICE_H
#define _LINUX_OF_DEVICE_H
-#include <linux/cpu.h>
#include <linux/platform_device.h>
#include <linux/of_platform.h> /* temporary until merge */
#include <linux/of.h>
-#include <linux/mod_devicetable.h>
struct device;
+struct of_device_id;
+struct kobj_uevent_env;
#ifdef CONFIG_OF
extern const struct of_device_id *of_match_device(
@@ -25,37 +26,20 @@ static inline int of_driver_match_device(struct device *dev,
return of_match_device(drv->of_match_table, dev) != NULL;
}
-extern struct platform_device *of_dev_get(struct platform_device *dev);
-extern void of_dev_put(struct platform_device *dev);
-
-extern int of_device_add(struct platform_device *pdev);
-extern int of_device_register(struct platform_device *ofdev);
-extern void of_device_unregister(struct platform_device *ofdev);
-
-extern const void *of_device_get_match_data(const struct device *dev);
-
extern ssize_t of_device_modalias(struct device *dev, char *str, ssize_t len);
-extern int of_device_request_module(struct device *dev);
-extern void of_device_uevent(struct device *dev, struct kobj_uevent_env *env);
-extern int of_device_uevent_modalias(struct device *dev, struct kobj_uevent_env *env);
-
-static inline void of_device_node_put(struct device *dev)
-{
- of_node_put(dev->of_node);
-}
+extern void of_device_uevent(const struct device *dev, struct kobj_uevent_env *env);
+extern int of_device_uevent_modalias(const struct device *dev, struct kobj_uevent_env *env);
-static inline struct device_node *of_cpu_device_node_get(int cpu)
+int of_dma_configure_id(struct device *dev,
+ struct device_node *np,
+ bool force_dma, const u32 *id);
+static inline int of_dma_configure(struct device *dev,
+ struct device_node *np,
+ bool force_dma)
{
- struct device *cpu_dev;
- cpu_dev = get_cpu_device(cpu);
- if (!cpu_dev)
- return NULL;
- return of_node_get(cpu_dev->of_node);
+ return of_dma_configure_id(dev, np, force_dma, NULL);
}
-
-int of_dma_configure(struct device *dev, struct device_node *np);
-void of_dma_deconfigure(struct device *dev);
#else /* CONFIG_OF */
static inline int of_driver_match_device(struct device *dev,
@@ -64,52 +48,40 @@ static inline int of_driver_match_device(struct device *dev,
return 0;
}
-static inline void of_device_uevent(struct device *dev,
+static inline void of_device_uevent(const struct device *dev,
struct kobj_uevent_env *env) { }
-static inline const void *of_device_get_match_data(const struct device *dev)
-{
- return NULL;
-}
-
static inline int of_device_modalias(struct device *dev,
char *str, ssize_t len)
{
return -ENODEV;
}
-static inline int of_device_request_module(struct device *dev)
-{
- return -ENODEV;
-}
-
-static inline int of_device_uevent_modalias(struct device *dev,
+static inline int of_device_uevent_modalias(const struct device *dev,
struct kobj_uevent_env *env)
{
return -ENODEV;
}
-static inline void of_device_node_put(struct device *dev) { }
-
-static inline const struct of_device_id *__of_match_device(
+static inline const struct of_device_id *of_match_device(
const struct of_device_id *matches, const struct device *dev)
{
return NULL;
}
-#define of_match_device(matches, dev) \
- __of_match_device(of_match_ptr(matches), (dev))
-static inline struct device_node *of_cpu_device_node_get(int cpu)
+static inline int of_dma_configure_id(struct device *dev,
+ struct device_node *np,
+ bool force_dma,
+ const u32 *id)
{
- return NULL;
+ return 0;
}
-
-static inline int of_dma_configure(struct device *dev, struct device_node *np)
+static inline int of_dma_configure(struct device *dev,
+ struct device_node *np,
+ bool force_dma)
{
return 0;
}
-static inline void of_dma_deconfigure(struct device *dev)
-{}
#endif /* CONFIG_OF */
#endif /* _LINUX_OF_DEVICE_H */
diff --git a/include/linux/of_dma.h b/include/linux/of_dma.h
index b90d8ec57c1f..fd706cdf255c 100644
--- a/include/linux/of_dma.h
+++ b/include/linux/of_dma.h
@@ -1,13 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* OF helpers for DMA request / controller
*
* Based on of_gpio.h
*
* Copyright (C) 2012 Texas Instruments Incorporated - http://www.ti.com/
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef __LINUX_OF_DMA_H
diff --git a/include/linux/of_fdt.h b/include/linux/of_fdt.h
index 013c5418aeec..d69ad5bb1eb1 100644
--- a/include/linux/of_fdt.h
+++ b/include/linux/of_fdt.h
@@ -1,12 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Definitions for working with the Flattened Device Tree data format
*
* Copyright 2009 Benjamin Herrenschmidt, IBM Corp
* benh@kernel.crashing.org
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2 as published by the Free Software Foundation.
*/
#ifndef _LINUX_OF_FDT_H
@@ -26,15 +23,6 @@
struct device_node;
/* For scanning an arbitrary device-tree at any time */
-extern char *of_fdt_get_string(const void *blob, u32 offset);
-extern void *of_fdt_get_property(const void *blob,
- unsigned long node,
- const char *name,
- int *size);
-extern bool of_fdt_is_big_endian(const void *blob,
- unsigned long node);
-extern int of_fdt_match(const void *blob, unsigned long node,
- const char *const *compat);
extern void *of_fdt_unflatten_tree(const unsigned long *blob,
struct device_node *dad,
struct device_node **mynodes);
@@ -47,6 +35,12 @@ extern void *initial_boot_params;
extern char __dtb_start[];
extern char __dtb_end[];
+/* Other Prototypes */
+extern u64 of_flat_dt_translate_address(unsigned long node);
+extern void of_fdt_limit_memory(int limit);
+#endif /* CONFIG_OF_FLATTREE */
+
+#ifdef CONFIG_OF_EARLY_FLATTREE
/* For scanning the flat device-tree at boot time */
extern int of_scan_flat_dt(int (*it)(unsigned long node, const char *uname,
int depth, void *data),
@@ -61,28 +55,20 @@ extern int of_get_flat_dt_subnode_by_name(unsigned long node,
extern const void *of_get_flat_dt_prop(unsigned long node, const char *name,
int *size);
extern int of_flat_dt_is_compatible(unsigned long node, const char *name);
-extern int of_flat_dt_match(unsigned long node, const char *const *matches);
extern unsigned long of_get_flat_dt_root(void);
-extern int of_get_flat_dt_size(void);
extern uint32_t of_get_flat_dt_phandle(unsigned long node);
-extern int early_init_dt_scan_chosen(unsigned long node, const char *uname,
- int depth, void *data);
-extern int early_init_dt_scan_memory(unsigned long node, const char *uname,
- int depth, void *data);
+extern int early_init_dt_scan_chosen(char *cmdline);
+extern int early_init_dt_scan_memory(void);
+extern void early_init_dt_check_for_usable_mem_range(void);
extern int early_init_dt_scan_chosen_stdout(void);
extern void early_init_fdt_scan_reserved_mem(void);
extern void early_init_fdt_reserve_self(void);
extern void early_init_dt_add_memory_arch(u64 base, u64 size);
-extern int early_init_dt_mark_hotplug_memory_arch(u64 base, u64 size);
-extern int early_init_dt_reserve_memory_arch(phys_addr_t base, phys_addr_t size,
- bool no_map);
-extern void * early_init_dt_alloc_memory_arch(u64 size, u64 align);
extern u64 dt_mem_next_cell(int s, const __be32 **cellp);
/* Early flat tree scan hooks */
-extern int early_init_dt_scan_root(unsigned long node, const char *uname,
- int depth, void *data);
+extern int early_init_dt_scan_root(void);
extern bool early_init_dt_scan(void *params);
extern bool early_init_dt_verify(void *params);
@@ -97,16 +83,15 @@ extern void unflatten_device_tree(void);
extern void unflatten_and_copy_device_tree(void);
extern void early_init_devtree(void *);
extern void early_get_first_memblock_info(void *, phys_addr_t *);
-extern u64 of_flat_dt_translate_address(unsigned long node);
-extern void of_fdt_limit_memory(int limit);
-#else /* CONFIG_OF_FLATTREE */
+#else /* CONFIG_OF_EARLY_FLATTREE */
+static inline void early_init_dt_check_for_usable_mem_range(void) {}
static inline int early_init_dt_scan_chosen_stdout(void) { return -ENODEV; }
static inline void early_init_fdt_scan_reserved_mem(void) {}
static inline void early_init_fdt_reserve_self(void) {}
static inline const char *of_flat_dt_get_machine_name(void) { return NULL; }
static inline void unflatten_device_tree(void) {}
static inline void unflatten_and_copy_device_tree(void) {}
-#endif /* CONFIG_OF_FLATTREE */
+#endif /* CONFIG_OF_EARLY_FLATTREE */
#endif /* __ASSEMBLY__ */
#endif /* _LINUX_OF_FDT_H */
diff --git a/include/linux/of_gpio.h b/include/linux/of_gpio.h
index ca10f43564de..d0f66a5e1b2a 100644
--- a/include/linux/of_gpio.h
+++ b/include/linux/of_gpio.h
@@ -1,161 +1,38 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* OF helpers for the GPIO API
*
* Copyright (c) 2007-2008 MontaVista Software, Inc.
*
* Author: Anton Vorontsov <avorontsov@ru.mvista.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
#ifndef __LINUX_OF_GPIO_H
#define __LINUX_OF_GPIO_H
#include <linux/compiler.h>
-#include <linux/kernel.h>
-#include <linux/errno.h>
-#include <linux/gpio.h>
+#include <linux/gpio/driver.h>
+#include <linux/gpio.h> /* FIXME: Shouldn't be here */
#include <linux/of.h>
struct device_node;
-/*
- * This is Linux-specific flags. By default controllers' and Linux' mapping
- * match, but GPIO controllers are free to translate their own flags to
- * Linux-specific in their .xlate callback. Though, 1:1 mapping is recommended.
- */
-enum of_gpio_flags {
- OF_GPIO_ACTIVE_LOW = 0x1,
- OF_GPIO_SINGLE_ENDED = 0x2,
- OF_GPIO_OPEN_DRAIN = 0x4,
- OF_GPIO_SLEEP_MAY_LOOSE_VALUE = 0x8,
-};
-
#ifdef CONFIG_OF_GPIO
-/*
- * OF GPIO chip for memory mapped banks
- */
-struct of_mm_gpio_chip {
- struct gpio_chip gc;
- void (*save_regs)(struct of_mm_gpio_chip *mm_gc);
- void __iomem *regs;
-};
-
-static inline struct of_mm_gpio_chip *to_of_mm_gpio_chip(struct gpio_chip *gc)
-{
- return container_of(gc, struct of_mm_gpio_chip, gc);
-}
-
-extern int of_get_named_gpio_flags(struct device_node *np,
- const char *list_name, int index, enum of_gpio_flags *flags);
-
-extern int of_mm_gpiochip_add_data(struct device_node *np,
- struct of_mm_gpio_chip *mm_gc,
- void *data);
-static inline int of_mm_gpiochip_add(struct device_node *np,
- struct of_mm_gpio_chip *mm_gc)
-{
- return of_mm_gpiochip_add_data(np, mm_gc, NULL);
-}
-extern void of_mm_gpiochip_remove(struct of_mm_gpio_chip *mm_gc);
-
-extern int of_gpio_simple_xlate(struct gpio_chip *gc,
- const struct of_phandle_args *gpiospec,
- u32 *flags);
+extern int of_get_named_gpio(const struct device_node *np,
+ const char *list_name, int index);
#else /* CONFIG_OF_GPIO */
-/* Drivers may not strictly depend on the GPIO support, so let them link. */
-static inline int of_get_named_gpio_flags(struct device_node *np,
- const char *list_name, int index, enum of_gpio_flags *flags)
-{
- if (flags)
- *flags = 0;
-
- return -ENOSYS;
-}
+#include <linux/errno.h>
-static inline int of_gpio_simple_xlate(struct gpio_chip *gc,
- const struct of_phandle_args *gpiospec,
- u32 *flags)
+/* Drivers may not strictly depend on the GPIO support, so let them link. */
+static inline int of_get_named_gpio(const struct device_node *np,
+ const char *propname, int index)
{
return -ENOSYS;
}
#endif /* CONFIG_OF_GPIO */
-/**
- * of_gpio_named_count() - Count GPIOs for a device
- * @np: device node to count GPIOs for
- * @propname: property name containing gpio specifier(s)
- *
- * The function returns the count of GPIOs specified for a node.
- * Note that the empty GPIO specifiers count too. Returns either
- * Number of gpios defined in property,
- * -EINVAL for an incorrectly formed gpios property, or
- * -ENOENT for a missing gpios property
- *
- * Example:
- * gpios = <0
- * &gpio1 1 2
- * 0
- * &gpio2 3 4>;
- *
- * The above example defines four GPIOs, two of which are not specified.
- * This function will return '4'
- */
-static inline int of_gpio_named_count(struct device_node *np, const char* propname)
-{
- return of_count_phandle_with_args(np, propname, "#gpio-cells");
-}
-
-/**
- * of_gpio_count() - Count GPIOs for a device
- * @np: device node to count GPIOs for
- *
- * Same as of_gpio_named_count, but hard coded to use the 'gpios' property
- */
-static inline int of_gpio_count(struct device_node *np)
-{
- return of_gpio_named_count(np, "gpios");
-}
-
-static inline int of_get_gpio_flags(struct device_node *np, int index,
- enum of_gpio_flags *flags)
-{
- return of_get_named_gpio_flags(np, "gpios", index, flags);
-}
-
-/**
- * of_get_named_gpio() - Get a GPIO number to use with GPIO API
- * @np: device node to get GPIO from
- * @propname: Name of property containing gpio specifier(s)
- * @index: index of the GPIO
- *
- * Returns GPIO number to use with Linux generic GPIO API, or one of the errno
- * value on the error condition.
- */
-static inline int of_get_named_gpio(struct device_node *np,
- const char *propname, int index)
-{
- return of_get_named_gpio_flags(np, propname, index, NULL);
-}
-
-/**
- * of_get_gpio() - Get a GPIO number to use with GPIO API
- * @np: device node to get GPIO from
- * @index: index of the GPIO
- *
- * Returns GPIO number to use with Linux generic GPIO API, or one of the errno
- * value on the error condition.
- */
-static inline int of_get_gpio(struct device_node *np, int index)
-{
- return of_get_gpio_flags(np, index, NULL);
-}
-
#endif /* __LINUX_OF_GPIO_H */
diff --git a/include/linux/of_graph.h b/include/linux/of_graph.h
index 3e058f05ab04..4d7756087b6b 100644
--- a/include/linux/of_graph.h
+++ b/include/linux/of_graph.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* OF graph binding parsing helpers
*
@@ -6,10 +7,6 @@
*
* Copyright (C) 2012 Renesas Electronics Corp.
* Author: Guennadi Liakhovetski <g.liakhovetski@gmx.de>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
*/
#ifndef __LINUX_OF_GRAPH_H
#define __LINUX_OF_GRAPH_H
@@ -41,6 +38,7 @@ struct of_endpoint {
child = of_graph_get_next_endpoint(parent, child))
#ifdef CONFIG_OF
+bool of_graph_is_present(const struct device_node *node);
int of_graph_parse_endpoint(const struct device_node *node,
struct of_endpoint *endpoint);
int of_graph_get_endpoint_count(const struct device_node *np);
@@ -59,6 +57,11 @@ struct device_node *of_graph_get_remote_node(const struct device_node *node,
u32 port, u32 endpoint);
#else
+static inline bool of_graph_is_present(const struct device_node *node)
+{
+ return false;
+}
+
static inline int of_graph_parse_endpoint(const struct device_node *node,
struct of_endpoint *endpoint)
{
diff --git a/include/linux/of_iommu.h b/include/linux/of_iommu.h
index 13394ac83c66..9a5e6b410dd2 100644
--- a/include/linux/of_iommu.h
+++ b/include/linux/of_iommu.h
@@ -1,41 +1,34 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __OF_IOMMU_H
#define __OF_IOMMU_H
-#include <linux/device.h>
-#include <linux/iommu.h>
-#include <linux/of.h>
+struct device;
+struct device_node;
+struct iommu_ops;
#ifdef CONFIG_OF_IOMMU
-extern int of_get_dma_window(struct device_node *dn, const char *prefix,
- int index, unsigned long *busno, dma_addr_t *addr,
- size_t *size);
-
extern const struct iommu_ops *of_iommu_configure(struct device *dev,
- struct device_node *master_np);
+ struct device_node *master_np,
+ const u32 *id);
+
+extern void of_iommu_get_resv_regions(struct device *dev,
+ struct list_head *list);
#else
-static inline int of_get_dma_window(struct device_node *dn, const char *prefix,
- int index, unsigned long *busno, dma_addr_t *addr,
- size_t *size)
+static inline const struct iommu_ops *of_iommu_configure(struct device *dev,
+ struct device_node *master_np,
+ const u32 *id)
{
- return -EINVAL;
+ return NULL;
}
-static inline const struct iommu_ops *of_iommu_configure(struct device *dev,
- struct device_node *master_np)
+static inline void of_iommu_get_resv_regions(struct device *dev,
+ struct list_head *list)
{
- return NULL;
}
#endif /* CONFIG_OF_IOMMU */
-extern struct of_device_id __iommu_of_table;
-
-typedef int (*of_iommu_init_fn)(struct device_node *);
-
-#define IOMMU_OF_DECLARE(name, compat, fn) \
- _OF_DECLARE(iommu, name, compat, fn, of_iommu_init_fn)
-
#endif /* __OF_IOMMU_H */
diff --git a/include/linux/of_irq.h b/include/linux/of_irq.h
index 1e0deb8e8494..d6d3eae2f145 100644
--- a/include/linux/of_irq.h
+++ b/include/linux/of_irq.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __OF_IRQ_H
#define __OF_IRQ_H
@@ -19,12 +20,12 @@ typedef int (*of_irq_init_cb_t)(struct device_node *, struct device_node *);
#if defined(CONFIG_PPC32) && defined(CONFIG_PPC_PMAC)
extern unsigned int of_irq_workarounds;
extern struct device_node *of_irq_dflt_pic;
-extern int of_irq_parse_oldworld(struct device_node *device, int index,
- struct of_phandle_args *out_irq);
+int of_irq_parse_oldworld(const struct device_node *device, int index,
+ struct of_phandle_args *out_irq);
#else /* CONFIG_PPC32 && CONFIG_PPC_PMAC */
#define of_irq_workarounds (0)
#define of_irq_dflt_pic (NULL)
-static inline int of_irq_parse_oldworld(struct device_node *device, int index,
+static inline int of_irq_parse_oldworld(const struct device_node *device, int index,
struct of_phandle_args *out_irq)
{
return -EINVAL;
@@ -32,15 +33,14 @@ static inline int of_irq_parse_oldworld(struct device_node *device, int index,
#endif /* CONFIG_PPC32 && CONFIG_PPC_PMAC */
extern int of_irq_parse_raw(const __be32 *addr, struct of_phandle_args *out_irq);
-extern int of_irq_parse_one(struct device_node *device, int index,
- struct of_phandle_args *out_irq);
extern unsigned int irq_create_of_mapping(struct of_phandle_args *irq_data);
extern int of_irq_to_resource(struct device_node *dev, int index,
struct resource *r);
-extern void of_irq_init(const struct of_device_id *matches);
-
#ifdef CONFIG_OF_IRQ
+extern void of_irq_init(const struct of_device_id *matches);
+extern int of_irq_parse_one(struct device_node *device, int index,
+ struct of_phandle_args *out_irq);
extern int of_irq_count(struct device_node *dev);
extern int of_irq_get(struct device_node *dev, int index);
extern int of_irq_get_byname(struct device_node *dev, const char *name);
@@ -51,10 +51,19 @@ extern struct irq_domain *of_msi_get_domain(struct device *dev,
struct device_node *np,
enum irq_domain_bus_token token);
extern struct irq_domain *of_msi_map_get_device_domain(struct device *dev,
- u32 rid);
+ u32 id,
+ u32 bus_token);
extern void of_msi_configure(struct device *dev, struct device_node *np);
-u32 of_msi_map_rid(struct device *dev, struct device_node *msi_np, u32 rid_in);
+u32 of_msi_map_id(struct device *dev, struct device_node *msi_np, u32 id_in);
#else
+static inline void of_irq_init(const struct of_device_id *matches)
+{
+}
+static inline int of_irq_parse_one(struct device_node *device, int index,
+ struct of_phandle_args *out_irq)
+{
+ return -EINVAL;
+}
static inline int of_irq_count(struct device_node *dev)
{
return 0;
@@ -84,17 +93,17 @@ static inline struct irq_domain *of_msi_get_domain(struct device *dev,
return NULL;
}
static inline struct irq_domain *of_msi_map_get_device_domain(struct device *dev,
- u32 rid)
+ u32 id, u32 bus_token)
{
return NULL;
}
static inline void of_msi_configure(struct device *dev, struct device_node *np)
{
}
-static inline u32 of_msi_map_rid(struct device *dev,
- struct device_node *msi_np, u32 rid_in)
+static inline u32 of_msi_map_id(struct device *dev,
+ struct device_node *msi_np, u32 id_in)
{
- return rid_in;
+ return id_in;
}
#endif
diff --git a/include/linux/of_mdio.h b/include/linux/of_mdio.h
index f5db93bcd069..8a52ef2e6fa6 100644
--- a/include/linux/of_mdio.h
+++ b/include/linux/of_mdio.h
@@ -1,36 +1,54 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* OF helpers for the MDIO (Ethernet PHY) API
*
* Copyright (c) 2009 Secret Lab Technologies, Ltd.
- *
- * This file is released under the GPLv2
*/
#ifndef __LINUX_OF_MDIO_H
#define __LINUX_OF_MDIO_H
+#include <linux/device.h>
#include <linux/phy.h>
#include <linux/of.h>
#if IS_ENABLED(CONFIG_OF_MDIO)
-extern int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np);
-extern struct phy_device *of_phy_find_device(struct device_node *phy_np);
-extern struct phy_device *of_phy_connect(struct net_device *dev,
- struct device_node *phy_np,
- void (*hndlr)(struct net_device *),
- u32 flags, phy_interface_t iface);
-extern struct phy_device *
+bool of_mdiobus_child_is_phy(struct device_node *child);
+int __of_mdiobus_register(struct mii_bus *mdio, struct device_node *np,
+ struct module *owner);
+
+static inline int of_mdiobus_register(struct mii_bus *mdio,
+ struct device_node *np)
+{
+ return __of_mdiobus_register(mdio, np, THIS_MODULE);
+}
+
+int __devm_of_mdiobus_register(struct device *dev, struct mii_bus *mdio,
+ struct device_node *np, struct module *owner);
+
+static inline int devm_of_mdiobus_register(struct device *dev,
+ struct mii_bus *mdio,
+ struct device_node *np)
+{
+ return __devm_of_mdiobus_register(dev, mdio, np, THIS_MODULE);
+}
+
+struct mdio_device *of_mdio_find_device(struct device_node *np);
+struct phy_device *of_phy_find_device(struct device_node *phy_np);
+struct phy_device *
+of_phy_connect(struct net_device *dev, struct device_node *phy_np,
+ void (*hndlr)(struct net_device *), u32 flags,
+ phy_interface_t iface);
+struct phy_device *
of_phy_get_and_connect(struct net_device *dev, struct device_node *np,
void (*hndlr)(struct net_device *));
-struct phy_device *of_phy_attach(struct net_device *dev,
- struct device_node *phy_np, u32 flags,
- phy_interface_t iface);
-
-extern struct mii_bus *of_mdio_find_bus(struct device_node *mdio_np);
-extern int of_phy_register_fixed_link(struct device_node *np);
-extern void of_phy_deregister_fixed_link(struct device_node *np);
-extern bool of_phy_is_fixed_link(struct device_node *np);
+struct mii_bus *of_mdio_find_bus(struct device_node *mdio_np);
+int of_phy_register_fixed_link(struct device_node *np);
+void of_phy_deregister_fixed_link(struct device_node *np);
+bool of_phy_is_fixed_link(struct device_node *np);
+int of_mdiobus_phy_device_register(struct mii_bus *mdio, struct phy_device *phy,
+ struct device_node *child, u32 addr);
static inline int of_mdio_parse_addr(struct device *dev,
const struct device_node *np)
@@ -55,6 +73,11 @@ static inline int of_mdio_parse_addr(struct device *dev,
}
#else /* CONFIG_OF_MDIO */
+static inline bool of_mdiobus_child_is_phy(struct device_node *child)
+{
+ return false;
+}
+
static inline int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np)
{
/*
@@ -65,6 +88,18 @@ static inline int of_mdiobus_register(struct mii_bus *mdio, struct device_node *
return mdiobus_register(mdio);
}
+static inline int devm_of_mdiobus_register(struct device *dev,
+ struct mii_bus *mdio,
+ struct device_node *np)
+{
+ return devm_mdiobus_register(dev, mdio);
+}
+
+static inline struct mdio_device *of_mdio_find_device(struct device_node *np)
+{
+ return NULL;
+}
+
static inline struct phy_device *of_phy_find_device(struct device_node *phy_np)
{
return NULL;
@@ -85,13 +120,6 @@ of_phy_get_and_connect(struct net_device *dev, struct device_node *np,
return NULL;
}
-static inline struct phy_device *of_phy_attach(struct net_device *dev,
- struct device_node *phy_np,
- u32 flags, phy_interface_t iface)
-{
- return NULL;
-}
-
static inline struct mii_bus *of_mdio_find_bus(struct device_node *mdio_np)
{
return NULL;
@@ -113,6 +141,13 @@ static inline bool of_phy_is_fixed_link(struct device_node *np)
{
return false;
}
+
+static inline int of_mdiobus_phy_device_register(struct mii_bus *mdio,
+ struct phy_device *phy,
+ struct device_node *child, u32 addr)
+{
+ return -ENOSYS;
+}
#endif
diff --git a/include/linux/of_net.h b/include/linux/of_net.h
index 9cd72aab76fe..d88715a0b3a5 100644
--- a/include/linux/of_net.h
+++ b/include/linux/of_net.h
@@ -1,28 +1,42 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* OF helpers for network devices.
- *
- * This file is released under the GPLv2
*/
#ifndef __LINUX_OF_NET_H
#define __LINUX_OF_NET_H
-#ifdef CONFIG_OF_NET
+#include <linux/phy.h>
+
+#if defined(CONFIG_OF) && defined(CONFIG_NET)
#include <linux/of.h>
struct net_device;
-extern int of_get_phy_mode(struct device_node *np);
-extern const void *of_get_mac_address(struct device_node *np);
+extern int of_get_phy_mode(struct device_node *np, phy_interface_t *interface);
+extern int of_get_mac_address(struct device_node *np, u8 *mac);
+extern int of_get_mac_address_nvmem(struct device_node *np, u8 *mac);
+int of_get_ethdev_address(struct device_node *np, struct net_device *dev);
extern struct net_device *of_find_net_device_by_node(struct device_node *np);
#else
-static inline int of_get_phy_mode(struct device_node *np)
+static inline int of_get_phy_mode(struct device_node *np,
+ phy_interface_t *interface)
{
return -ENODEV;
}
-static inline const void *of_get_mac_address(struct device_node *np)
+static inline int of_get_mac_address(struct device_node *np, u8 *mac)
{
- return NULL;
+ return -ENODEV;
+}
+
+static inline int of_get_mac_address_nvmem(struct device_node *np, u8 *mac)
+{
+ return -ENODEV;
+}
+
+static inline int of_get_ethdev_address(struct device_node *np, struct net_device *dev)
+{
+ return -ENODEV;
}
static inline struct net_device *of_find_net_device_by_node(struct device_node *np)
diff --git a/include/linux/of_pci.h b/include/linux/of_pci.h
index 518c8d20647a..29658c0ee71f 100644
--- a/include/linux/of_pci.h
+++ b/include/linux/of_pci.h
@@ -1,32 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __OF_PCI_H
#define __OF_PCI_H
-#include <linux/pci.h>
-#include <linux/msi.h>
+#include <linux/types.h>
+#include <linux/errno.h>
struct pci_dev;
-struct of_phandle_args;
struct device_node;
-#ifdef CONFIG_OF_PCI
-int of_irq_parse_pci(const struct pci_dev *pdev, struct of_phandle_args *out_irq);
+#if IS_ENABLED(CONFIG_OF) && IS_ENABLED(CONFIG_PCI)
struct device_node *of_pci_find_child_device(struct device_node *parent,
unsigned int devfn);
int of_pci_get_devfn(struct device_node *np);
-int of_irq_parse_and_map_pci(const struct pci_dev *dev, u8 slot, u8 pin);
-int of_pci_parse_bus_range(struct device_node *node, struct resource *res);
-int of_get_pci_domain_nr(struct device_node *node);
-int of_pci_get_max_link_speed(struct device_node *node);
void of_pci_check_probe_only(void);
-int of_pci_map_rid(struct device_node *np, u32 rid,
- const char *map_name, const char *map_mask_name,
- struct device_node **target, u32 *id_out);
#else
-static inline int of_irq_parse_pci(const struct pci_dev *pdev, struct of_phandle_args *out_irq)
-{
- return 0;
-}
-
static inline struct device_node *of_pci_find_child_device(struct device_node *parent,
unsigned int devfn)
{
@@ -38,50 +25,16 @@ static inline int of_pci_get_devfn(struct device_node *np)
return -EINVAL;
}
-static inline int
-of_irq_parse_and_map_pci(const struct pci_dev *dev, u8 slot, u8 pin)
-{
- return 0;
-}
-
-static inline int
-of_pci_parse_bus_range(struct device_node *node, struct resource *res)
-{
- return -EINVAL;
-}
-
-static inline int
-of_get_pci_domain_nr(struct device_node *node)
-{
- return -1;
-}
-
-static inline int of_pci_map_rid(struct device_node *np, u32 rid,
- const char *map_name, const char *map_mask_name,
- struct device_node **target, u32 *id_out)
-{
- return -EINVAL;
-}
-
-static inline int
-of_pci_get_max_link_speed(struct device_node *node)
-{
- return -EINVAL;
-}
-
static inline void of_pci_check_probe_only(void) { }
#endif
-#if defined(CONFIG_OF_ADDRESS)
-int of_pci_get_host_bridge_resources(struct device_node *dev,
- unsigned char busno, unsigned char bus_max,
- struct list_head *resources, resource_size_t *io_base);
+#if IS_ENABLED(CONFIG_OF_IRQ)
+int of_irq_parse_and_map_pci(const struct pci_dev *dev, u8 slot, u8 pin);
#else
-static inline int of_pci_get_host_bridge_resources(struct device_node *dev,
- unsigned char busno, unsigned char bus_max,
- struct list_head *resources, resource_size_t *io_base)
+static inline int
+of_irq_parse_and_map_pci(const struct pci_dev *dev, u8 slot, u8 pin)
{
- return -EINVAL;
+ return 0;
}
#endif
diff --git a/include/linux/of_pdt.h b/include/linux/of_pdt.h
index 7e09244bb679..89e4eb076a01 100644
--- a/include/linux/of_pdt.h
+++ b/include/linux/of_pdt.h
@@ -1,13 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* Definitions for building a device tree by calling into the
* Open Firmware PROM.
*
* Copyright (C) 2010 Andres Salomon <dilinger@queued.net>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
*/
#ifndef _LINUX_OF_PDT_H
@@ -39,6 +35,4 @@ extern void *prom_early_alloc(unsigned long size);
/* for building the device tree */
extern void of_pdt_build_devicetree(phandle root_node, struct of_pdt_ops *ops);
-extern void (*of_pdt_build_more)(struct device_node *dp);
-
#endif /* _LINUX_OF_PDT_H */
diff --git a/include/linux/of_platform.h b/include/linux/of_platform.h
index e0d1946270f3..d8045bcfc35e 100644
--- a/include/linux/of_platform.h
+++ b/include/linux/of_platform.h
@@ -1,22 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
#ifndef _LINUX_OF_PLATFORM_H
#define _LINUX_OF_PLATFORM_H
/*
* Copyright (C) 2006 Benjamin Herrenschmidt, IBM Corp.
* <benh@kernel.crashing.org>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- *
*/
-#include <linux/device.h>
#include <linux/mod_devicetable.h>
-#include <linux/pm.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
+struct device;
+struct of_device_id;
+
/**
* struct of_dev_auxdata - lookup table entry for device names & platform_data
* @compatible: compatible value of node to match against node
@@ -57,18 +53,32 @@ extern const struct of_device_id of_default_bus_match_table[];
extern struct platform_device *of_device_alloc(struct device_node *np,
const char *bus_id,
struct device *parent);
+
+extern int of_device_add(struct platform_device *pdev);
+extern int of_device_register(struct platform_device *ofdev);
+extern void of_device_unregister(struct platform_device *ofdev);
+
+#ifdef CONFIG_OF
extern struct platform_device *of_find_device_by_node(struct device_node *np);
+#else
+static inline struct platform_device *of_find_device_by_node(struct device_node *np)
+{
+ return NULL;
+}
+#endif
+
+extern int of_platform_bus_probe(struct device_node *root,
+ const struct of_device_id *matches,
+ struct device *parent);
+#ifdef CONFIG_OF_ADDRESS
/* Platform devices and busses creation */
extern struct platform_device *of_platform_device_create(struct device_node *np,
const char *bus_id,
struct device *parent);
extern int of_platform_device_destroy(struct device *dev, void *data);
-extern int of_platform_bus_probe(struct device_node *root,
- const struct of_device_id *matches,
- struct device *parent);
-#ifdef CONFIG_OF_ADDRESS
+
extern int of_platform_populate(struct device_node *root,
const struct of_device_id *matches,
const struct of_dev_auxdata *lookup,
@@ -82,6 +92,18 @@ extern int devm_of_platform_populate(struct device *dev);
extern void devm_of_platform_depopulate(struct device *dev);
#else
+/* Platform devices and busses creation */
+static inline struct platform_device *of_platform_device_create(struct device_node *np,
+ const char *bus_id,
+ struct device *parent)
+{
+ return NULL;
+}
+static inline int of_platform_device_destroy(struct device *dev, void *data)
+{
+ return -ENODEV;
+}
+
static inline int of_platform_populate(struct device_node *root,
const struct of_device_id *matches,
const struct of_dev_auxdata *lookup,
diff --git a/include/linux/of_reserved_mem.h b/include/linux/of_reserved_mem.h
index f8e1992d6423..4de2a24cadc9 100644
--- a/include/linux/of_reserved_mem.h
+++ b/include/linux/of_reserved_mem.h
@@ -1,7 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __OF_RESERVED_MEM_H
#define __OF_RESERVED_MEM_H
#include <linux/device.h>
+#include <linux/of.h>
struct of_phandle_args;
struct reserved_mem_ops;
@@ -25,36 +27,43 @@ struct reserved_mem_ops {
typedef int (*reservedmem_of_init_fn)(struct reserved_mem *rmem);
+#ifdef CONFIG_OF_RESERVED_MEM
+
#define RESERVEDMEM_OF_DECLARE(name, compat, init) \
_OF_DECLARE(reservedmem, name, compat, init, reservedmem_of_init_fn)
-#ifdef CONFIG_OF_RESERVED_MEM
-
int of_reserved_mem_device_init_by_idx(struct device *dev,
struct device_node *np, int idx);
+int of_reserved_mem_device_init_by_name(struct device *dev,
+ struct device_node *np,
+ const char *name);
void of_reserved_mem_device_release(struct device *dev);
-int early_init_dt_alloc_reserved_memory_arch(phys_addr_t size,
- phys_addr_t align,
- phys_addr_t start,
- phys_addr_t end,
- bool nomap,
- phys_addr_t *res_base);
-
-void fdt_init_reserved_mem(void);
-void fdt_reserved_mem_save_node(unsigned long node, const char *uname,
- phys_addr_t base, phys_addr_t size);
+struct reserved_mem *of_reserved_mem_lookup(struct device_node *np);
#else
+
+#define RESERVEDMEM_OF_DECLARE(name, compat, init) \
+ _OF_DECLARE_STUB(reservedmem, name, compat, init, reservedmem_of_init_fn)
+
static inline int of_reserved_mem_device_init_by_idx(struct device *dev,
struct device_node *np, int idx)
{
return -ENOSYS;
}
+
+static inline int of_reserved_mem_device_init_by_name(struct device *dev,
+ struct device_node *np,
+ const char *name)
+{
+ return -ENOSYS;
+}
+
static inline void of_reserved_mem_device_release(struct device *pdev) { }
-static inline void fdt_init_reserved_mem(void) { }
-static inline void fdt_reserved_mem_save_node(unsigned long node,
- const char *uname, phys_addr_t base, phys_addr_t size) { }
+static inline struct reserved_mem *of_reserved_mem_lookup(struct device_node *np)
+{
+ return NULL;
+}
#endif
/**
diff --git a/include/linux/oid_registry.h b/include/linux/oid_registry.h
index d2fa9ca42e9a..0f4a8903922a 100644
--- a/include/linux/oid_registry.h
+++ b/include/linux/oid_registry.h
@@ -1,12 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/* ASN.1 Object identifier (OID) registry
*
* Copyright (C) 2012 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public Licence
- * as published by the Free Software Foundation; either version
- * 2 of the Licence, or (at your option) any later version.
*/
#ifndef _LINUX_OID_REGISTRY_H
@@ -23,8 +19,14 @@
enum OID {
OID_id_dsa_with_sha1, /* 1.2.840.10030.4.3 */
OID_id_dsa, /* 1.2.840.10040.4.1 */
- OID_id_ecdsa_with_sha1, /* 1.2.840.10045.4.1 */
OID_id_ecPublicKey, /* 1.2.840.10045.2.1 */
+ OID_id_prime192v1, /* 1.2.840.10045.3.1.1 */
+ OID_id_prime256v1, /* 1.2.840.10045.3.1.7 */
+ OID_id_ecdsa_with_sha1, /* 1.2.840.10045.4.1 */
+ OID_id_ecdsa_with_sha224, /* 1.2.840.10045.4.3.1 */
+ OID_id_ecdsa_with_sha256, /* 1.2.840.10045.4.3.2 */
+ OID_id_ecdsa_with_sha384, /* 1.2.840.10045.4.3.3 */
+ OID_id_ecdsa_with_sha512, /* 1.2.840.10045.4.3.4 */
/* PKCS#1 {iso(1) member-body(2) us(840) rsadsi(113549) pkcs(1) pkcs-1(1)} */
OID_rsaEncryption, /* 1.2.840.113549.1.1.1 */
@@ -52,6 +54,10 @@ enum OID {
OID_md4, /* 1.2.840.113549.2.4 */
OID_md5, /* 1.2.840.113549.2.5 */
+ OID_mskrb5, /* 1.2.840.48018.1.2.2 */
+ OID_krb5, /* 1.2.840.113554.1.2.2 */
+ OID_krb5u2u, /* 1.2.840.113554.1.2.2.3 */
+
/* Microsoft Authenticode & Software Publishing */
OID_msIndirectData, /* 1.3.6.1.4.1.311.2.1.4 */
OID_msStatementType, /* 1.3.6.1.4.1.311.2.1.11 */
@@ -60,8 +66,16 @@ enum OID {
OID_msIndividualSPKeyPurpose, /* 1.3.6.1.4.1.311.2.1.21 */
OID_msOutlookExpress, /* 1.3.6.1.4.1.311.16.4 */
+ OID_ntlmssp, /* 1.3.6.1.4.1.311.2.2.10 */
+
+ OID_spnego, /* 1.3.6.1.5.5.2 */
+
+ OID_IAKerb, /* 1.3.6.1.5.2.5 */
+ OID_PKU2U, /* 1.3.5.1.5.2.7 */
+ OID_Scram, /* 1.3.6.1.5.5.14 */
OID_certAuthInfoAccess, /* 1.3.6.1.5.5.7.1.1 */
OID_sha1, /* 1.3.14.3.2.26 */
+ OID_id_ansip384r1, /* 1.3.132.0.34 */
OID_sha256, /* 2.16.840.1.101.3.4.2.1 */
OID_sha384, /* 2.16.840.1.101.3.4.2.2 */
OID_sha512, /* 2.16.840.1.101.3.4.2.3 */
@@ -93,10 +107,44 @@ enum OID {
OID_authorityKeyIdentifier, /* 2.5.29.35 */
OID_extKeyUsage, /* 2.5.29.37 */
+ /* Heimdal mechanisms */
+ OID_NetlogonMechanism, /* 1.2.752.43.14.2 */
+ OID_appleLocalKdcSupported, /* 1.2.752.43.14.3 */
+
+ /* EC-RDSA */
+ OID_gostCPSignA, /* 1.2.643.2.2.35.1 */
+ OID_gostCPSignB, /* 1.2.643.2.2.35.2 */
+ OID_gostCPSignC, /* 1.2.643.2.2.35.3 */
+ OID_gost2012PKey256, /* 1.2.643.7.1.1.1.1 */
+ OID_gost2012PKey512, /* 1.2.643.7.1.1.1.2 */
+ OID_gost2012Digest256, /* 1.2.643.7.1.1.2.2 */
+ OID_gost2012Digest512, /* 1.2.643.7.1.1.2.3 */
+ OID_gost2012Signature256, /* 1.2.643.7.1.1.3.2 */
+ OID_gost2012Signature512, /* 1.2.643.7.1.1.3.3 */
+ OID_gostTC26Sign256A, /* 1.2.643.7.1.2.1.1.1 */
+ OID_gostTC26Sign256B, /* 1.2.643.7.1.2.1.1.2 */
+ OID_gostTC26Sign256C, /* 1.2.643.7.1.2.1.1.3 */
+ OID_gostTC26Sign256D, /* 1.2.643.7.1.2.1.1.4 */
+ OID_gostTC26Sign512A, /* 1.2.643.7.1.2.1.2.1 */
+ OID_gostTC26Sign512B, /* 1.2.643.7.1.2.1.2.2 */
+ OID_gostTC26Sign512C, /* 1.2.643.7.1.2.1.2.3 */
+
+ /* OSCCA */
+ OID_sm2, /* 1.2.156.10197.1.301 */
+ OID_sm3, /* 1.2.156.10197.1.401 */
+ OID_SM2_with_SM3, /* 1.2.156.10197.1.501 */
+ OID_sm3WithRSAEncryption, /* 1.2.156.10197.1.504 */
+
+ /* TCG defined OIDS for TPM based keys */
+ OID_TPMLoadableKey, /* 2.23.133.10.1.3 */
+ OID_TPMImportableKey, /* 2.23.133.10.1.4 */
+ OID_TPMSealedData, /* 2.23.133.10.1.5 */
+
OID__NR
};
extern enum OID look_up_OID(const void *data, size_t datasize);
+extern int parse_OID(const void *data, size_t datasize, enum OID *oid);
extern int sprint_oid(const void *, size_t, char *, size_t);
extern int sprint_OID(enum OID, char *, size_t);
diff --git a/include/linux/olpc-ec.h b/include/linux/olpc-ec.h
index 2925df3ce78a..c4602364e909 100644
--- a/include/linux/olpc-ec.h
+++ b/include/linux/olpc-ec.h
@@ -1,6 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_OLPC_EC_H
#define _LINUX_OLPC_EC_H
+#include <linux/bits.h>
+
/* XO-1 EC commands */
#define EC_FIRMWARE_REV 0x08
#define EC_WRITE_SCI_MASK 0x1b
@@ -15,28 +18,57 @@
#define EC_SCI_QUERY 0x84
#define EC_EXT_SCI_QUERY 0x85
+/* SCI source values */
+#define EC_SCI_SRC_GAME BIT(0)
+#define EC_SCI_SRC_BATTERY BIT(1)
+#define EC_SCI_SRC_BATSOC BIT(2)
+#define EC_SCI_SRC_BATERR BIT(3)
+#define EC_SCI_SRC_EBOOK BIT(4) /* XO-1 only */
+#define EC_SCI_SRC_WLAN BIT(5) /* XO-1 only */
+#define EC_SCI_SRC_ACPWR BIT(6)
+#define EC_SCI_SRC_BATCRIT BIT(7)
+#define EC_SCI_SRC_GPWAKE BIT(8) /* XO-1.5 only */
+#define EC_SCI_SRC_ALL GENMASK(8, 0)
+
struct platform_device;
struct olpc_ec_driver {
- int (*probe)(struct platform_device *);
int (*suspend)(struct platform_device *);
int (*resume)(struct platform_device *);
int (*ec_cmd)(u8, u8 *, size_t, u8 *, size_t, void *);
+
+ bool wakeup_available;
};
-#ifdef CONFIG_OLPC
+#ifdef CONFIG_OLPC_EC
extern void olpc_ec_driver_register(struct olpc_ec_driver *drv, void *arg);
extern int olpc_ec_cmd(u8 cmd, u8 *inbuf, size_t inlen, u8 *outbuf,
size_t outlen);
+extern void olpc_ec_wakeup_set(u16 value);
+extern void olpc_ec_wakeup_clear(u16 value);
+
+extern int olpc_ec_mask_write(u16 bits);
+extern int olpc_ec_sci_query(u16 *sci_value);
+
+extern bool olpc_ec_wakeup_available(void);
+
#else
static inline int olpc_ec_cmd(u8 cmd, u8 *inbuf, size_t inlen, u8 *outbuf,
size_t outlen) { return -ENODEV; }
-#endif /* CONFIG_OLPC */
+static inline void olpc_ec_wakeup_set(u16 value) { }
+static inline void olpc_ec_wakeup_clear(u16 value) { }
+
+static inline bool olpc_ec_wakeup_available(void)
+{
+ return false;
+}
+
+#endif /* CONFIG_OLPC_EC */
#endif /* _LINUX_OLPC_EC_H */
diff --git a/include/linux/omap-dma.h b/include/linux/omap-dma.h
index 290081620b3e..6f6c31e3fb93 100644
--- a/include/linux/omap-dma.h
+++ b/include/linux/omap-dma.h
@@ -1,7 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __LINUX_OMAP_DMA_H
#define __LINUX_OMAP_DMA_H
-#include <linux/omap-dmaengine.h>
-
/*
* Legacy OMAP DMA handling defines and functions
*
@@ -130,7 +129,6 @@
#define IS_WORD_16 BIT(0xd)
#define ENABLE_16XX_MODE BIT(0xe)
#define HS_CHANNELS_RESERVED BIT(0xf)
-#define DMA_ENGINE_HANDLE_IRQ BIT(0x10)
/* Defines for DMA Capabilities */
#define DMA_HAS_TRANSPARENT_CAPS (0x1 << 18)
@@ -240,9 +238,6 @@ struct omap_dma_lch {
void (*callback)(int lch, u16 ch_status, void *data);
void *data;
long flags;
- /* required for Dynamic chaining */
- int prev_linked_ch;
- int next_linked_ch;
int state;
int chain_id;
int status;
@@ -297,23 +292,28 @@ struct omap_system_dma_plat_info {
#define dma_omap15xx() __dma_omap15xx(d)
#define dma_omap16xx() __dma_omap16xx(d)
-#if defined(CONFIG_ARCH_OMAP)
extern struct omap_system_dma_plat_info *omap_get_plat_info(void);
+#if defined(CONFIG_ARCH_OMAP1)
extern void omap_set_dma_priority(int lch, int dst_port, int priority);
+#else
+static inline void omap_set_dma_priority(int lch, int dst_port, int priority)
+{
+}
+#endif
+
extern int omap_request_dma(int dev_id, const char *dev_name,
void (*callback)(int lch, u16 ch_status, void *data),
void *data, int *dma_ch);
-extern void omap_enable_dma_irq(int ch, u16 irq_bits);
-extern void omap_disable_dma_irq(int ch, u16 irq_bits);
extern void omap_free_dma(int ch);
+#if IS_ENABLED(CONFIG_USB_OMAP)
+extern void omap_disable_dma_irq(int ch, u16 irq_bits);
extern void omap_start_dma(int lch);
extern void omap_stop_dma(int lch);
extern void omap_set_dma_transfer_params(int lch, int data_type,
int elem_count, int frame_count,
int sync_mode,
int dma_trigger, int src_or_dst_synch);
-extern void omap_set_dma_write_mode(int lch, enum omap_dma_write_mode mode);
extern void omap_set_dma_channel_mode(int lch, enum omap_dma_channel_mode mode);
extern void omap_set_dma_src_params(int lch, int src_port, int src_amode,
@@ -330,25 +330,15 @@ extern void omap_set_dma_dest_data_pack(int lch, int enable);
extern void omap_set_dma_dest_burst_mode(int lch,
enum omap_dma_burst_mode burst_mode);
-extern void omap_set_dma_params(int lch,
- struct omap_dma_channel_params *params);
-
-extern void omap_dma_link_lch(int lch_head, int lch_queue);
-
-extern int omap_set_dma_callback(int lch,
- void (*callback)(int lch, u16 ch_status, void *data),
- void *data);
extern dma_addr_t omap_get_dma_src_pos(int lch);
extern dma_addr_t omap_get_dma_dst_pos(int lch);
extern int omap_get_dma_active_status(int lch);
+#endif
+
extern int omap_dma_running(void);
-extern void omap_dma_set_global_params(int arb_rate, int max_fifo_depth,
- int tparams);
-void omap_dma_global_context_save(void);
-void omap_dma_global_context_restore(void);
-#if defined(CONFIG_ARCH_OMAP1) && IS_ENABLED(CONFIG_FB_OMAP)
-#include <mach/lcd_dma.h>
+#if IS_ENABLED(CONFIG_FB_OMAP)
+extern int omap_lcd_dma_running(void);
#else
static inline int omap_lcd_dma_running(void)
{
@@ -356,22 +346,4 @@ static inline int omap_lcd_dma_running(void)
}
#endif
-#else /* CONFIG_ARCH_OMAP */
-
-static inline struct omap_system_dma_plat_info *omap_get_plat_info(void)
-{
- return NULL;
-}
-
-static inline int omap_request_dma(int dev_id, const char *dev_name,
- void (*callback)(int lch, u16 ch_status, void *data),
- void *data, int *dma_ch)
-{
- return -ENODEV;
-}
-
-static inline void omap_free_dma(int ch) { }
-
-#endif /* CONFIG_ARCH_OMAP */
-
#endif /* __LINUX_OMAP_DMA_H */
diff --git a/include/linux/omap-dmaengine.h b/include/linux/omap-dmaengine.h
deleted file mode 100644
index 8e6906c72e90..000000000000
--- a/include/linux/omap-dmaengine.h
+++ /dev/null
@@ -1,21 +0,0 @@
-/*
- * OMAP DMA Engine support
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-#ifndef __LINUX_OMAP_DMAENGINE_H
-#define __LINUX_OMAP_DMAENGINE_H
-
-struct dma_chan;
-
-#if defined(CONFIG_DMA_OMAP) || (defined(CONFIG_DMA_OMAP_MODULE) && defined(MODULE))
-bool omap_dma_filter_fn(struct dma_chan *, void *);
-#else
-static inline bool omap_dma_filter_fn(struct dma_chan *c, void *d)
-{
- return false;
-}
-#endif
-#endif /* __LINUX_OMAP_DMAENGINE_H */
diff --git a/include/linux/omap-gpmc.h b/include/linux/omap-gpmc.h
index fd0de00c0d77..082841908fe7 100644
--- a/include/linux/omap-gpmc.h
+++ b/include/linux/omap-gpmc.h
@@ -1,10 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* OMAP GPMC (General Purpose Memory Controller) defines
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
*/
#include <linux/platform_data/gpmc-omap.h>
@@ -25,28 +21,44 @@ struct gpmc_nand_ops {
struct gpmc_nand_regs;
+struct gpmc_onenand_info {
+ bool sync_read;
+ bool sync_write;
+ int burst_len;
+};
+
#if IS_ENABLED(CONFIG_OMAP_GPMC)
struct gpmc_nand_ops *gpmc_omap_get_nand_ops(struct gpmc_nand_regs *regs,
int cs);
+/**
+ * gpmc_omap_onenand_set_timings - set optimized sync timings.
+ * @cs: Chip Select Region
+ * @freq: Chip frequency
+ * @latency: Burst latency cycle count
+ * @info: Structure describing parameters used
+ *
+ * Sets optimized timings for the @cs region based on @freq and @latency.
+ * Updates the @info structure based on the GPMC settings.
+ */
+int gpmc_omap_onenand_set_timings(struct device *dev, int cs, int freq,
+ int latency,
+ struct gpmc_onenand_info *info);
+
#else
static inline struct gpmc_nand_ops *gpmc_omap_get_nand_ops(struct gpmc_nand_regs *regs,
int cs)
{
return NULL;
}
-#endif /* CONFIG_OMAP_GPMC */
-
-/*--------------------------------*/
-/* deprecated APIs */
-#if IS_ENABLED(CONFIG_OMAP_GPMC)
-void gpmc_update_nand_reg(struct gpmc_nand_regs *reg, int cs);
-#else
-static inline void gpmc_update_nand_reg(struct gpmc_nand_regs *reg, int cs)
+static inline
+int gpmc_omap_onenand_set_timings(struct device *dev, int cs, int freq,
+ int latency,
+ struct gpmc_onenand_info *info)
{
+ return -EINVAL;
}
#endif /* CONFIG_OMAP_GPMC */
-/*--------------------------------*/
extern int gpmc_calc_timings(struct gpmc_timings *gpmc_t,
struct gpmc_settings *gpmc_s,
@@ -69,9 +81,6 @@ extern int gpmc_configure(int cmd, int wval);
extern void gpmc_read_settings_dt(struct device_node *np,
struct gpmc_settings *p);
-extern void omap3_gpmc_save_context(void);
-extern void omap3_gpmc_restore_context(void);
-
struct gpmc_timings;
struct omap_nand_platform_data;
struct omap_onenand_platform_data;
diff --git a/include/linux/omap-iommu.h b/include/linux/omap-iommu.h
index c1aede46718b..2c32ca09df02 100644
--- a/include/linux/omap-iommu.h
+++ b/include/linux/omap-iommu.h
@@ -1,19 +1,36 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* omap iommu: simple virtual address space management
*
* Copyright (C) 2008-2009 Nokia Corporation
*
* Written by Hiroshi DOYU <Hiroshi.DOYU@nokia.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef _OMAP_IOMMU_H_
#define _OMAP_IOMMU_H_
+struct iommu_domain;
+
+#ifdef CONFIG_OMAP_IOMMU
extern void omap_iommu_save_ctx(struct device *dev);
extern void omap_iommu_restore_ctx(struct device *dev);
+int omap_iommu_domain_deactivate(struct iommu_domain *domain);
+int omap_iommu_domain_activate(struct iommu_domain *domain);
+#else
+static inline void omap_iommu_save_ctx(struct device *dev) {}
+static inline void omap_iommu_restore_ctx(struct device *dev) {}
+
+static inline int omap_iommu_domain_deactivate(struct iommu_domain *domain)
+{
+ return -ENODEV;
+}
+
+static inline int omap_iommu_domain_activate(struct iommu_domain *domain)
+{
+ return -ENODEV;
+}
+#endif
+
#endif
diff --git a/include/linux/omap-mailbox.h b/include/linux/omap-mailbox.h
index c726bd833761..8aa984ec1f38 100644
--- a/include/linux/omap-mailbox.h
+++ b/include/linux/omap-mailbox.h
@@ -1,15 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* omap-mailbox: interprocessor communication module for OMAP
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef OMAP_MAILBOX_H
#define OMAP_MAILBOX_H
-typedef u32 mbox_msg_t;
+typedef uintptr_t mbox_msg_t;
+
+#define omap_mbox_message(data) (u32)(mbox_msg_t)(data)
typedef int __bitwise omap_mbox_irq_t;
#define IRQ_TX ((__force omap_mbox_irq_t) 1)
diff --git a/include/linux/omapfb.h b/include/linux/omapfb.h
index d1f4dccaeede..63c9d473e7de 100644
--- a/include/linux/omapfb.h
+++ b/include/linux/omapfb.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* File: include/linux/omapfb.h
*
@@ -5,20 +6,6 @@
*
* Copyright (C) 2004 Nokia Corporation
* Author: Imre Deak <imre.deak@nokia.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
#ifndef __LINUX_OMAPFB_H__
#define __LINUX_OMAPFB_H__
diff --git a/include/linux/once.h b/include/linux/once.h
index 9c98aaa87cbc..bc714d414448 100644
--- a/include/linux/once.h
+++ b/include/linux/once.h
@@ -1,12 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_ONCE_H
#define _LINUX_ONCE_H
#include <linux/types.h>
#include <linux/jump_label.h>
+/* Helpers used from arbitrary contexts.
+ * Hard irqs are blocked, be cautious.
+ */
bool __do_once_start(bool *done, unsigned long *flags);
-void __do_once_done(bool *done, struct static_key *once_key,
- unsigned long *flags);
+void __do_once_done(bool *done, struct static_key_true *once_key,
+ unsigned long *flags, struct module *mod);
+
+/* Variant for process contexts only. */
+bool __do_once_sleepable_start(bool *done);
+void __do_once_sleepable_done(bool *done, struct static_key_true *once_key,
+ struct module *mod);
/* Call a function exactly once. The idea of DO_ONCE() is to perform
* a function call such as initialization of random seeds, etc, only
@@ -15,7 +24,7 @@ void __do_once_done(bool *done, struct static_key *once_key,
* out the condition into a nop. DO_ONCE() guarantees type safety of
* arguments!
*
- * Not that the following is not equivalent ...
+ * Note that the following is not equivalent ...
*
* DO_ONCE(func, arg);
* DO_ONCE(func, arg);
@@ -37,23 +46,41 @@ void __do_once_done(bool *done, struct static_key *once_key,
#define DO_ONCE(func, ...) \
({ \
bool ___ret = false; \
- static bool ___done = false; \
- static struct static_key ___once_key = STATIC_KEY_INIT_TRUE; \
- if (static_key_true(&___once_key)) { \
+ static bool __section(".data.once") ___done = false; \
+ static DEFINE_STATIC_KEY_TRUE(___once_key); \
+ if (static_branch_unlikely(&___once_key)) { \
unsigned long ___flags; \
___ret = __do_once_start(&___done, &___flags); \
if (unlikely(___ret)) { \
func(__VA_ARGS__); \
__do_once_done(&___done, &___once_key, \
- &___flags); \
+ &___flags, THIS_MODULE); \
} \
} \
___ret; \
})
+/* Variant of DO_ONCE() for process/sleepable contexts. */
+#define DO_ONCE_SLEEPABLE(func, ...) \
+ ({ \
+ bool ___ret = false; \
+ static bool __section(".data.once") ___done = false; \
+ static DEFINE_STATIC_KEY_TRUE(___once_key); \
+ if (static_branch_unlikely(&___once_key)) { \
+ ___ret = __do_once_sleepable_start(&___done); \
+ if (unlikely(___ret)) { \
+ func(__VA_ARGS__); \
+ __do_once_sleepable_done(&___done, &___once_key,\
+ THIS_MODULE); \
+ } \
+ } \
+ ___ret; \
+ })
+
#define get_random_once(buf, nbytes) \
DO_ONCE(get_random_bytes, (buf), (nbytes))
-#define get_random_once_wait(buf, nbytes) \
- DO_ONCE(get_random_bytes_wait, (buf), (nbytes)) \
+
+#define get_random_sleepable_once(buf, nbytes) \
+ DO_ONCE_SLEEPABLE(get_random_bytes, (buf), (nbytes))
#endif /* _LINUX_ONCE_H */
diff --git a/include/linux/once_lite.h b/include/linux/once_lite.h
new file mode 100644
index 000000000000..b7bce4983638
--- /dev/null
+++ b/include/linux/once_lite.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_ONCE_LITE_H
+#define _LINUX_ONCE_LITE_H
+
+#include <linux/types.h>
+
+/* Call a function once. Similar to DO_ONCE(), but does not use jump label
+ * patching via static keys.
+ */
+#define DO_ONCE_LITE(func, ...) \
+ DO_ONCE_LITE_IF(true, func, ##__VA_ARGS__)
+
+#define __ONCE_LITE_IF(condition) \
+ ({ \
+ static bool __section(".data.once") __already_done; \
+ bool __ret_cond = !!(condition); \
+ bool __ret_once = false; \
+ \
+ if (unlikely(__ret_cond && !__already_done)) { \
+ __already_done = true; \
+ __ret_once = true; \
+ } \
+ unlikely(__ret_once); \
+ })
+
+#define DO_ONCE_LITE_IF(condition, func, ...) \
+ ({ \
+ bool __ret_do_once = !!(condition); \
+ \
+ if (__ONCE_LITE_IF(__ret_do_once)) \
+ func(__VA_ARGS__); \
+ \
+ unlikely(__ret_do_once); \
+ })
+
+#endif /* _LINUX_ONCE_LITE_H */
diff --git a/include/linux/oom.h b/include/linux/oom.h
index 8a266e2be5a6..7d0c9c48a0c5 100644
--- a/include/linux/oom.h
+++ b/include/linux/oom.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __INCLUDE_LINUX_OOM_H
#define __INCLUDE_LINUX_OOM_H
@@ -6,12 +7,21 @@
#include <linux/types.h>
#include <linux/nodemask.h>
#include <uapi/linux/oom.h>
+#include <linux/sched/coredump.h> /* MMF_* */
+#include <linux/mm.h> /* VM_FAULT* */
struct zonelist;
struct notifier_block;
struct mem_cgroup;
struct task_struct;
+enum oom_constraint {
+ CONSTRAINT_NONE,
+ CONSTRAINT_CPUSET,
+ CONSTRAINT_MEMORY_POLICY,
+ CONSTRAINT_MEMCG,
+};
+
/*
* Details of the page allocation that triggered the oom killer that are used to
* determine what should be killed.
@@ -38,10 +48,14 @@ struct oom_control {
/* Used by oom implementation, do not set */
unsigned long totalpages;
struct task_struct *chosen;
- unsigned long chosen_points;
+ long chosen_points;
+
+ /* Used to print the constraint info. */
+ enum oom_constraint constraint;
};
extern struct mutex oom_lock;
+extern struct mutex oom_adj_mutex;
static inline void set_current_oom_origin(void)
{
@@ -63,8 +77,27 @@ static inline bool tsk_is_oom_victim(struct task_struct * tsk)
return tsk->signal->oom_mm;
}
-extern unsigned long oom_badness(struct task_struct *p,
- struct mem_cgroup *memcg, const nodemask_t *nodemask,
+/*
+ * Checks whether a page fault on the given mm is still reliable.
+ * This is no longer true if the oom reaper started to reap the
+ * address space which is reflected by MMF_UNSTABLE flag set in
+ * the mm. At that moment any !shared mapping would lose the content
+ * and could cause a memory corruption (zero pages instead of the
+ * original content).
+ *
+ * User should call this before establishing a page table entry for
+ * a !shared mapping and under the proper page table lock.
+ *
+ * Return 0 when the PF is safe VM_FAULT_SIGBUS otherwise.
+ */
+static inline vm_fault_t check_stable_address_space(struct mm_struct *mm)
+{
+ if (unlikely(test_bit(MMF_UNSTABLE, &mm->flags)))
+ return VM_FAULT_SIGBUS;
+ return 0;
+}
+
+long oom_badness(struct task_struct *p,
unsigned long totalpages);
extern bool out_of_memory(struct oom_control *oc);
@@ -79,8 +112,4 @@ extern void oom_killer_enable(void);
extern struct task_struct *find_lock_task_mm(struct task_struct *p);
-/* sysctls */
-extern int sysctl_oom_dump_tasks;
-extern int sysctl_oom_kill_allocating_task;
-extern int sysctl_panic_on_oom;
#endif /* _INCLUDE_LINUX_OOM_H */
diff --git a/include/linux/openvswitch.h b/include/linux/openvswitch.h
index e6b240b6196c..3b037bab32b0 100644
--- a/include/linux/openvswitch.h
+++ b/include/linux/openvswitch.h
@@ -1,19 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2007-2011 Nicira Networks.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of version 2 of the GNU General Public
- * License as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA
*/
#ifndef _LINUX_OPENVSWITCH_H
@@ -21,4 +8,9 @@
#include <uapi/linux/openvswitch.h>
+#define OVS_CLONE_ATTR_EXEC 0 /* Specify an u32 value. When nonzero,
+ * actions in clone will not change flow
+ * keys. False otherwise.
+ */
+
#endif /* _LINUX_OPENVSWITCH_H */
diff --git a/include/linux/oprofile.h b/include/linux/oprofile.h
deleted file mode 100644
index b2a0f15f11fe..000000000000
--- a/include/linux/oprofile.h
+++ /dev/null
@@ -1,209 +0,0 @@
-/**
- * @file oprofile.h
- *
- * API for machine-specific interrupts to interface
- * to oprofile.
- *
- * @remark Copyright 2002 OProfile authors
- * @remark Read the file COPYING
- *
- * @author John Levon <levon@movementarian.org>
- */
-
-#ifndef OPROFILE_H
-#define OPROFILE_H
-
-#include <linux/types.h>
-#include <linux/spinlock.h>
-#include <linux/init.h>
-#include <linux/errno.h>
-#include <linux/printk.h>
-#include <linux/atomic.h>
-
-/* Each escaped entry is prefixed by ESCAPE_CODE
- * then one of the following codes, then the
- * relevant data.
- * These #defines live in this file so that arch-specific
- * buffer sync'ing code can access them.
- */
-#define ESCAPE_CODE ~0UL
-#define CTX_SWITCH_CODE 1
-#define CPU_SWITCH_CODE 2
-#define COOKIE_SWITCH_CODE 3
-#define KERNEL_ENTER_SWITCH_CODE 4
-#define KERNEL_EXIT_SWITCH_CODE 5
-#define MODULE_LOADED_CODE 6
-#define CTX_TGID_CODE 7
-#define TRACE_BEGIN_CODE 8
-#define TRACE_END_CODE 9
-#define XEN_ENTER_SWITCH_CODE 10
-#define SPU_PROFILING_CODE 11
-#define SPU_CTX_SWITCH_CODE 12
-#define IBS_FETCH_CODE 13
-#define IBS_OP_CODE 14
-
-struct dentry;
-struct file_operations;
-struct pt_regs;
-
-/* Operations structure to be filled in */
-struct oprofile_operations {
- /* create any necessary configuration files in the oprofile fs.
- * Optional. */
- int (*create_files)(struct dentry * root);
- /* Do any necessary interrupt setup. Optional. */
- int (*setup)(void);
- /* Do any necessary interrupt shutdown. Optional. */
- void (*shutdown)(void);
- /* Start delivering interrupts. */
- int (*start)(void);
- /* Stop delivering interrupts. */
- void (*stop)(void);
- /* Arch-specific buffer sync functions.
- * Return value = 0: Success
- * Return value = -1: Failure
- * Return value = 1: Run generic sync function
- */
- int (*sync_start)(void);
- int (*sync_stop)(void);
-
- /* Initiate a stack backtrace. Optional. */
- void (*backtrace)(struct pt_regs * const regs, unsigned int depth);
-
- /* Multiplex between different events. Optional. */
- int (*switch_events)(void);
- /* CPU identification string. */
- char * cpu_type;
-};
-
-/**
- * One-time initialisation. *ops must be set to a filled-in
- * operations structure. This is called even in timer interrupt
- * mode so an arch can set a backtrace callback.
- *
- * If an error occurs, the fields should be left untouched.
- */
-int oprofile_arch_init(struct oprofile_operations * ops);
-
-/**
- * One-time exit/cleanup for the arch.
- */
-void oprofile_arch_exit(void);
-
-/**
- * Add a sample. This may be called from any context.
- */
-void oprofile_add_sample(struct pt_regs * const regs, unsigned long event);
-
-/**
- * Add an extended sample. Use this when the PC is not from the regs, and
- * we cannot determine if we're in kernel mode from the regs.
- *
- * This function does perform a backtrace.
- *
- */
-void oprofile_add_ext_sample(unsigned long pc, struct pt_regs * const regs,
- unsigned long event, int is_kernel);
-
-/**
- * Add an hardware sample.
- */
-void oprofile_add_ext_hw_sample(unsigned long pc, struct pt_regs * const regs,
- unsigned long event, int is_kernel,
- struct task_struct *task);
-
-/* Use this instead when the PC value is not from the regs. Doesn't
- * backtrace. */
-void oprofile_add_pc(unsigned long pc, int is_kernel, unsigned long event);
-
-/* add a backtrace entry, to be called from the ->backtrace callback */
-void oprofile_add_trace(unsigned long eip);
-
-
-/**
- * Create a file of the given name as a child of the given root, with
- * the specified file operations.
- */
-int oprofilefs_create_file(struct dentry * root,
- char const * name, const struct file_operations * fops);
-
-int oprofilefs_create_file_perm(struct dentry * root,
- char const * name, const struct file_operations * fops, int perm);
-
-/** Create a file for read/write access to an unsigned long. */
-int oprofilefs_create_ulong(struct dentry * root,
- char const * name, ulong * val);
-
-/** Create a file for read-only access to an unsigned long. */
-int oprofilefs_create_ro_ulong(struct dentry * root,
- char const * name, ulong * val);
-
-/** Create a file for read-only access to an atomic_t. */
-int oprofilefs_create_ro_atomic(struct dentry * root,
- char const * name, atomic_t * val);
-
-/** create a directory */
-struct dentry *oprofilefs_mkdir(struct dentry *parent, char const *name);
-
-/**
- * Write the given asciz string to the given user buffer @buf, updating *offset
- * appropriately. Returns bytes written or -EFAULT.
- */
-ssize_t oprofilefs_str_to_user(char const * str, char __user * buf, size_t count, loff_t * offset);
-
-/**
- * Convert an unsigned long value into ASCII and copy it to the user buffer @buf,
- * updating *offset appropriately. Returns bytes written or -EFAULT.
- */
-ssize_t oprofilefs_ulong_to_user(unsigned long val, char __user * buf, size_t count, loff_t * offset);
-
-/**
- * Read an ASCII string for a number from a userspace buffer and fill *val on success.
- * Returns 0 on success, < 0 on error.
- */
-int oprofilefs_ulong_from_user(unsigned long * val, char const __user * buf, size_t count);
-
-/** lock for read/write safety */
-extern raw_spinlock_t oprofilefs_lock;
-
-/**
- * Add the contents of a circular buffer to the event buffer.
- */
-void oprofile_put_buff(unsigned long *buf, unsigned int start,
- unsigned int stop, unsigned int max);
-
-unsigned long oprofile_get_cpu_buffer_size(void);
-void oprofile_cpu_buffer_inc_smpl_lost(void);
-
-/* cpu buffer functions */
-
-struct op_sample;
-
-struct op_entry {
- struct ring_buffer_event *event;
- struct op_sample *sample;
- unsigned long size;
- unsigned long *data;
-};
-
-void oprofile_write_reserve(struct op_entry *entry,
- struct pt_regs * const regs,
- unsigned long pc, int code, int size);
-int oprofile_add_data(struct op_entry *entry, unsigned long val);
-int oprofile_add_data64(struct op_entry *entry, u64 val);
-int oprofile_write_commit(struct op_entry *entry);
-
-#ifdef CONFIG_HW_PERF_EVENTS
-int __init oprofile_perf_init(struct oprofile_operations *ops);
-void oprofile_perf_exit(void);
-char *op_name_from_perf_id(void);
-#else
-static inline int __init oprofile_perf_init(struct oprofile_operations *ops)
-{
- pr_info("oprofile: hardware counters not available\n");
- return -ENODEV;
-}
-static inline void oprofile_perf_exit(void) { }
-#endif /* CONFIG_HW_PERF_EVENTS */
-
-#endif /* OPROFILE_H */
diff --git a/include/linux/osq_lock.h b/include/linux/osq_lock.h
index 703ea5c30a33..5581dbd3bd34 100644
--- a/include/linux/osq_lock.h
+++ b/include/linux/osq_lock.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __LINUX_OSQ_LOCK_H
#define __LINUX_OSQ_LOCK_H
diff --git a/include/linux/overflow.h b/include/linux/overflow.h
new file mode 100644
index 000000000000..0e33b5cbdb9f
--- /dev/null
+++ b/include/linux/overflow.h
@@ -0,0 +1,296 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+#ifndef __LINUX_OVERFLOW_H
+#define __LINUX_OVERFLOW_H
+
+#include <linux/compiler.h>
+#include <linux/limits.h>
+#include <linux/const.h>
+
+/*
+ * We need to compute the minimum and maximum values representable in a given
+ * type. These macros may also be useful elsewhere. It would seem more obvious
+ * to do something like:
+ *
+ * #define type_min(T) (T)(is_signed_type(T) ? (T)1 << (8*sizeof(T)-1) : 0)
+ * #define type_max(T) (T)(is_signed_type(T) ? ((T)1 << (8*sizeof(T)-1)) - 1 : ~(T)0)
+ *
+ * Unfortunately, the middle expressions, strictly speaking, have
+ * undefined behaviour, and at least some versions of gcc warn about
+ * the type_max expression (but not if -fsanitize=undefined is in
+ * effect; in that case, the warning is deferred to runtime...).
+ *
+ * The slightly excessive casting in type_min is to make sure the
+ * macros also produce sensible values for the exotic type _Bool. [The
+ * overflow checkers only almost work for _Bool, but that's
+ * a-feature-not-a-bug, since people shouldn't be doing arithmetic on
+ * _Bools. Besides, the gcc builtins don't allow _Bool* as third
+ * argument.]
+ *
+ * Idea stolen from
+ * https://mail-index.netbsd.org/tech-misc/2007/02/05/0000.html -
+ * credit to Christian Biere.
+ */
+#define __type_half_max(type) ((type)1 << (8*sizeof(type) - 1 - is_signed_type(type)))
+#define type_max(T) ((T)((__type_half_max(T) - 1) + __type_half_max(T)))
+#define type_min(T) ((T)((T)-type_max(T)-(T)1))
+
+/*
+ * Avoids triggering -Wtype-limits compilation warning,
+ * while using unsigned data types to check a < 0.
+ */
+#define is_non_negative(a) ((a) > 0 || (a) == 0)
+#define is_negative(a) (!(is_non_negative(a)))
+
+/*
+ * Allows for effectively applying __must_check to a macro so we can have
+ * both the type-agnostic benefits of the macros while also being able to
+ * enforce that the return value is, in fact, checked.
+ */
+static inline bool __must_check __must_check_overflow(bool overflow)
+{
+ return unlikely(overflow);
+}
+
+/**
+ * check_add_overflow() - Calculate addition with overflow checking
+ * @a: first addend
+ * @b: second addend
+ * @d: pointer to store sum
+ *
+ * Returns 0 on success.
+ *
+ * *@d holds the results of the attempted addition, but is not considered
+ * "safe for use" on a non-zero return value, which indicates that the
+ * sum has overflowed or been truncated.
+ */
+#define check_add_overflow(a, b, d) \
+ __must_check_overflow(__builtin_add_overflow(a, b, d))
+
+/**
+ * check_sub_overflow() - Calculate subtraction with overflow checking
+ * @a: minuend; value to subtract from
+ * @b: subtrahend; value to subtract from @a
+ * @d: pointer to store difference
+ *
+ * Returns 0 on success.
+ *
+ * *@d holds the results of the attempted subtraction, but is not considered
+ * "safe for use" on a non-zero return value, which indicates that the
+ * difference has underflowed or been truncated.
+ */
+#define check_sub_overflow(a, b, d) \
+ __must_check_overflow(__builtin_sub_overflow(a, b, d))
+
+/**
+ * check_mul_overflow() - Calculate multiplication with overflow checking
+ * @a: first factor
+ * @b: second factor
+ * @d: pointer to store product
+ *
+ * Returns 0 on success.
+ *
+ * *@d holds the results of the attempted multiplication, but is not
+ * considered "safe for use" on a non-zero return value, which indicates
+ * that the product has overflowed or been truncated.
+ */
+#define check_mul_overflow(a, b, d) \
+ __must_check_overflow(__builtin_mul_overflow(a, b, d))
+
+/**
+ * check_shl_overflow() - Calculate a left-shifted value and check overflow
+ * @a: Value to be shifted
+ * @s: How many bits left to shift
+ * @d: Pointer to where to store the result
+ *
+ * Computes *@d = (@a << @s)
+ *
+ * Returns true if '*@d' cannot hold the result or when '@a << @s' doesn't
+ * make sense. Example conditions:
+ *
+ * - '@a << @s' causes bits to be lost when stored in *@d.
+ * - '@s' is garbage (e.g. negative) or so large that the result of
+ * '@a << @s' is guaranteed to be 0.
+ * - '@a' is negative.
+ * - '@a << @s' sets the sign bit, if any, in '*@d'.
+ *
+ * '*@d' will hold the results of the attempted shift, but is not
+ * considered "safe for use" if true is returned.
+ */
+#define check_shl_overflow(a, s, d) __must_check_overflow(({ \
+ typeof(a) _a = a; \
+ typeof(s) _s = s; \
+ typeof(d) _d = d; \
+ u64 _a_full = _a; \
+ unsigned int _to_shift = \
+ is_non_negative(_s) && _s < 8 * sizeof(*d) ? _s : 0; \
+ *_d = (_a_full << _to_shift); \
+ (_to_shift != _s || is_negative(*_d) || is_negative(_a) || \
+ (*_d >> _to_shift) != _a); \
+}))
+
+#define __overflows_type_constexpr(x, T) ( \
+ is_unsigned_type(typeof(x)) ? \
+ (x) > type_max(typeof(T)) : \
+ is_unsigned_type(typeof(T)) ? \
+ (x) < 0 || (x) > type_max(typeof(T)) : \
+ (x) < type_min(typeof(T)) || (x) > type_max(typeof(T)))
+
+#define __overflows_type(x, T) ({ \
+ typeof(T) v = 0; \
+ check_add_overflow((x), v, &v); \
+})
+
+/**
+ * overflows_type - helper for checking the overflows between value, variables,
+ * or data type
+ *
+ * @n: source constant value or variable to be checked
+ * @T: destination variable or data type proposed to store @x
+ *
+ * Compares the @x expression for whether or not it can safely fit in
+ * the storage of the type in @T. @x and @T can have different types.
+ * If @x is a constant expression, this will also resolve to a constant
+ * expression.
+ *
+ * Returns: true if overflow can occur, false otherwise.
+ */
+#define overflows_type(n, T) \
+ __builtin_choose_expr(__is_constexpr(n), \
+ __overflows_type_constexpr(n, T), \
+ __overflows_type(n, T))
+
+/**
+ * castable_to_type - like __same_type(), but also allows for casted literals
+ *
+ * @n: variable or constant value
+ * @T: variable or data type
+ *
+ * Unlike the __same_type() macro, this allows a constant value as the
+ * first argument. If this value would not overflow into an assignment
+ * of the second argument's type, it returns true. Otherwise, this falls
+ * back to __same_type().
+ */
+#define castable_to_type(n, T) \
+ __builtin_choose_expr(__is_constexpr(n), \
+ !__overflows_type_constexpr(n, T), \
+ __same_type(n, T))
+
+/**
+ * size_mul() - Calculate size_t multiplication with saturation at SIZE_MAX
+ * @factor1: first factor
+ * @factor2: second factor
+ *
+ * Returns: calculate @factor1 * @factor2, both promoted to size_t,
+ * with any overflow causing the return value to be SIZE_MAX. The
+ * lvalue must be size_t to avoid implicit type conversion.
+ */
+static inline size_t __must_check size_mul(size_t factor1, size_t factor2)
+{
+ size_t bytes;
+
+ if (check_mul_overflow(factor1, factor2, &bytes))
+ return SIZE_MAX;
+
+ return bytes;
+}
+
+/**
+ * size_add() - Calculate size_t addition with saturation at SIZE_MAX
+ * @addend1: first addend
+ * @addend2: second addend
+ *
+ * Returns: calculate @addend1 + @addend2, both promoted to size_t,
+ * with any overflow causing the return value to be SIZE_MAX. The
+ * lvalue must be size_t to avoid implicit type conversion.
+ */
+static inline size_t __must_check size_add(size_t addend1, size_t addend2)
+{
+ size_t bytes;
+
+ if (check_add_overflow(addend1, addend2, &bytes))
+ return SIZE_MAX;
+
+ return bytes;
+}
+
+/**
+ * size_sub() - Calculate size_t subtraction with saturation at SIZE_MAX
+ * @minuend: value to subtract from
+ * @subtrahend: value to subtract from @minuend
+ *
+ * Returns: calculate @minuend - @subtrahend, both promoted to size_t,
+ * with any overflow causing the return value to be SIZE_MAX. For
+ * composition with the size_add() and size_mul() helpers, neither
+ * argument may be SIZE_MAX (or the result with be forced to SIZE_MAX).
+ * The lvalue must be size_t to avoid implicit type conversion.
+ */
+static inline size_t __must_check size_sub(size_t minuend, size_t subtrahend)
+{
+ size_t bytes;
+
+ if (minuend == SIZE_MAX || subtrahend == SIZE_MAX ||
+ check_sub_overflow(minuend, subtrahend, &bytes))
+ return SIZE_MAX;
+
+ return bytes;
+}
+
+/**
+ * array_size() - Calculate size of 2-dimensional array.
+ * @a: dimension one
+ * @b: dimension two
+ *
+ * Calculates size of 2-dimensional array: @a * @b.
+ *
+ * Returns: number of bytes needed to represent the array or SIZE_MAX on
+ * overflow.
+ */
+#define array_size(a, b) size_mul(a, b)
+
+/**
+ * array3_size() - Calculate size of 3-dimensional array.
+ * @a: dimension one
+ * @b: dimension two
+ * @c: dimension three
+ *
+ * Calculates size of 3-dimensional array: @a * @b * @c.
+ *
+ * Returns: number of bytes needed to represent the array or SIZE_MAX on
+ * overflow.
+ */
+#define array3_size(a, b, c) size_mul(size_mul(a, b), c)
+
+/**
+ * flex_array_size() - Calculate size of a flexible array member
+ * within an enclosing structure.
+ * @p: Pointer to the structure.
+ * @member: Name of the flexible array member.
+ * @count: Number of elements in the array.
+ *
+ * Calculates size of a flexible array of @count number of @member
+ * elements, at the end of structure @p.
+ *
+ * Return: number of bytes needed or SIZE_MAX on overflow.
+ */
+#define flex_array_size(p, member, count) \
+ __builtin_choose_expr(__is_constexpr(count), \
+ (count) * sizeof(*(p)->member) + __must_be_array((p)->member), \
+ size_mul(count, sizeof(*(p)->member) + __must_be_array((p)->member)))
+
+/**
+ * struct_size() - Calculate size of structure with trailing flexible array.
+ * @p: Pointer to the structure.
+ * @member: Name of the array member.
+ * @count: Number of elements in the array.
+ *
+ * Calculates size of memory needed for structure @p followed by an
+ * array of @count number of @member elements.
+ *
+ * Return: number of bytes needed or SIZE_MAX on overflow.
+ */
+#define struct_size(p, member, count) \
+ __builtin_choose_expr(__is_constexpr(count), \
+ sizeof(*(p)) + flex_array_size(p, member, count), \
+ size_add(sizeof(*(p)), flex_array_size(p, member, count)))
+
+#endif /* __LINUX_OVERFLOW_H */
diff --git a/include/linux/oxu210hp.h b/include/linux/oxu210hp.h
deleted file mode 100644
index 0bf96eae5389..000000000000
--- a/include/linux/oxu210hp.h
+++ /dev/null
@@ -1,7 +0,0 @@
-/* platform data for the OXU210HP HCD */
-
-struct oxu210hp_platform_data {
- unsigned int bus16:1;
- unsigned int use_hcd_otg:1;
- unsigned int use_hcd_sph:1;
-};
diff --git a/include/linux/packing.h b/include/linux/packing.h
new file mode 100644
index 000000000000..8d6571feb95d
--- /dev/null
+++ b/include/linux/packing.h
@@ -0,0 +1,49 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2016-2018 NXP
+ * Copyright (c) 2018-2019, Vladimir Oltean <olteanv@gmail.com>
+ */
+#ifndef _LINUX_PACKING_H
+#define _LINUX_PACKING_H
+
+#include <linux/types.h>
+#include <linux/bitops.h>
+
+#define QUIRK_MSB_ON_THE_RIGHT BIT(0)
+#define QUIRK_LITTLE_ENDIAN BIT(1)
+#define QUIRK_LSW32_IS_FIRST BIT(2)
+
+enum packing_op {
+ PACK,
+ UNPACK,
+};
+
+/**
+ * packing - Convert numbers (currently u64) between a packed and an unpacked
+ * format. Unpacked means laid out in memory in the CPU's native
+ * understanding of integers, while packed means anything else that
+ * requires translation.
+ *
+ * @pbuf: Pointer to a buffer holding the packed value.
+ * @uval: Pointer to an u64 holding the unpacked value.
+ * @startbit: The index (in logical notation, compensated for quirks) where
+ * the packed value starts within pbuf. Must be larger than, or
+ * equal to, endbit.
+ * @endbit: The index (in logical notation, compensated for quirks) where
+ * the packed value ends within pbuf. Must be smaller than, or equal
+ * to, startbit.
+ * @op: If PACK, then uval will be treated as const pointer and copied (packed)
+ * into pbuf, between startbit and endbit.
+ * If UNPACK, then pbuf will be treated as const pointer and the logical
+ * value between startbit and endbit will be copied (unpacked) to uval.
+ * @quirks: A bit mask of QUIRK_LITTLE_ENDIAN, QUIRK_LSW32_IS_FIRST and
+ * QUIRK_MSB_ON_THE_RIGHT.
+ *
+ * Return: 0 on success, EINVAL or ERANGE if called incorrectly. Assuming
+ * correct usage, return code may be discarded.
+ * If op is PACK, pbuf is modified.
+ * If op is UNPACK, uval is modified.
+ */
+int packing(void *pbuf, u64 *uval, int startbit, int endbit, size_t pbuflen,
+ enum packing_op op, u8 quirks);
+
+#endif
diff --git a/include/linux/padata.h b/include/linux/padata.h
index 2f9c1f93b1ce..495b16b6b4d7 100644
--- a/include/linux/padata.h
+++ b/include/linux/padata.h
@@ -1,38 +1,29 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* padata.h - header for the padata parallelization interface
*
* Copyright (C) 2008, 2009 secunet Security Networks AG
* Copyright (C) 2008, 2009 Steffen Klassert <steffen.klassert@secunet.com>
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * Copyright (c) 2020 Oracle and/or its affiliates.
+ * Author: Daniel Jordan <daniel.m.jordan@oracle.com>
*/
#ifndef PADATA_H
#define PADATA_H
+#include <linux/refcount.h>
+#include <linux/compiler_types.h>
#include <linux/workqueue.h>
#include <linux/spinlock.h>
#include <linux/list.h>
-#include <linux/timer.h>
-#include <linux/notifier.h>
#include <linux/kobject.h>
#define PADATA_CPU_SERIAL 0x01
#define PADATA_CPU_PARALLEL 0x02
/**
- * struct padata_priv - Embedded to the users data structure.
+ * struct padata_priv - Represents one job
*
* @list: List entry, to attach to the padata lists.
* @pd: Pointer to the internal control structure.
@@ -46,13 +37,14 @@ struct padata_priv {
struct list_head list;
struct parallel_data *pd;
int cb_cpu;
+ unsigned int seq_nr;
int info;
void (*parallel)(struct padata_priv *padata);
void (*serial)(struct padata_priv *padata);
};
/**
- * struct padata_list
+ * struct padata_list - one per work type per CPU
*
* @list: List head.
* @lock: List lock.
@@ -76,28 +68,6 @@ struct padata_serial_queue {
};
/**
- * struct padata_parallel_queue - The percpu padata parallel queue
- *
- * @parallel: List to wait for parallelization.
- * @reorder: List to wait for reordering after parallel processing.
- * @serial: List to wait for serialization after reordering.
- * @pwork: work struct for parallelization.
- * @swork: work struct for serialization.
- * @pd: Backpointer to the internal control structure.
- * @work: work struct for parallelization.
- * @num_obj: Number of objects that are processed by this cpu.
- * @cpu_index: Index of the cpu.
- */
-struct padata_parallel_queue {
- struct padata_list parallel;
- struct padata_list reorder;
- struct parallel_data *pd;
- struct work_struct work;
- atomic_t num_obj;
- int cpu_index;
-};
-
-/**
* struct padata_cpumask - The cpumasks for the parallel/serial workers
*
* @pcpu: cpumask for the parallel workers.
@@ -112,50 +82,92 @@ struct padata_cpumask {
* struct parallel_data - Internal control structure, covers everything
* that depends on the cpumask in use.
*
- * @pinst: padata instance.
- * @pqueue: percpu padata queues used for parallelization.
+ * @ps: padata_shell object.
+ * @reorder_list: percpu reorder lists
* @squeue: percpu padata queues used for serialuzation.
- * @reorder_objects: Number of objects waiting in the reorder queues.
* @refcnt: Number of objects holding a reference on this parallel_data.
- * @max_seq_nr: Maximal used sequence number.
+ * @seq_nr: Sequence number of the parallelized data object.
+ * @processed: Number of already processed objects.
+ * @cpu: Next CPU to be processed.
* @cpumask: The cpumasks in use for parallel and serial workers.
+ * @reorder_work: work struct for reordering.
* @lock: Reorder lock.
- * @processed: Number of already processed objects.
- * @timer: Reorder timer.
*/
struct parallel_data {
- struct padata_instance *pinst;
- struct padata_parallel_queue __percpu *pqueue;
+ struct padata_shell *ps;
+ struct padata_list __percpu *reorder_list;
struct padata_serial_queue __percpu *squeue;
- atomic_t reorder_objects;
- atomic_t refcnt;
- atomic_t seq_nr;
- struct padata_cpumask cpumask;
- spinlock_t lock ____cacheline_aligned;
+ refcount_t refcnt;
+ unsigned int seq_nr;
unsigned int processed;
- struct timer_list timer;
+ int cpu;
+ struct padata_cpumask cpumask;
+ struct work_struct reorder_work;
+ spinlock_t ____cacheline_aligned lock;
+};
+
+/**
+ * struct padata_shell - Wrapper around struct parallel_data, its
+ * purpose is to allow the underlying control structure to be replaced
+ * on the fly using RCU.
+ *
+ * @pinst: padat instance.
+ * @pd: Actual parallel_data structure which may be substituted on the fly.
+ * @opd: Pointer to old pd to be freed by padata_replace.
+ * @list: List entry in padata_instance list.
+ */
+struct padata_shell {
+ struct padata_instance *pinst;
+ struct parallel_data __rcu *pd;
+ struct parallel_data *opd;
+ struct list_head list;
+};
+
+/**
+ * struct padata_mt_job - represents one multithreaded job
+ *
+ * @thread_fn: Called for each chunk of work that a padata thread does.
+ * @fn_arg: The thread function argument.
+ * @start: The start of the job (units are job-specific).
+ * @size: size of this node's work (units are job-specific).
+ * @align: Ranges passed to the thread function fall on this boundary, with the
+ * possible exceptions of the beginning and end of the job.
+ * @min_chunk: The minimum chunk size in job-specific units. This allows
+ * the client to communicate the minimum amount of work that's
+ * appropriate for one worker thread to do at once.
+ * @max_threads: Max threads to use for the job, actual number may be less
+ * depending on task size and minimum chunk size.
+ */
+struct padata_mt_job {
+ void (*thread_fn)(unsigned long start, unsigned long end, void *arg);
+ void *fn_arg;
+ unsigned long start;
+ unsigned long size;
+ unsigned long align;
+ unsigned long min_chunk;
+ int max_threads;
};
/**
* struct padata_instance - The overall control structure.
*
- * @cpu_notifier: cpu hotplug notifier.
- * @wq: The workqueue in use.
- * @pd: The internal control structure.
+ * @cpu_online_node: Linkage for CPU online callback.
+ * @cpu_dead_node: Linkage for CPU offline callback.
+ * @parallel_wq: The workqueue used for parallel work.
+ * @serial_wq: The workqueue used for serial work.
+ * @pslist: List of padata_shell objects attached to this instance.
* @cpumask: User supplied cpumasks for parallel and serial works.
- * @cpumask_change_notifier: Notifiers chain for user-defined notify
- * callbacks that will be called when either @pcpu or @cbcpu
- * or both cpumasks change.
* @kobj: padata instance kernel object.
* @lock: padata instance lock.
* @flags: padata flags.
*/
struct padata_instance {
- struct hlist_node node;
- struct workqueue_struct *wq;
- struct parallel_data *pd;
+ struct hlist_node cpu_online_node;
+ struct hlist_node cpu_dead_node;
+ struct workqueue_struct *parallel_wq;
+ struct workqueue_struct *serial_wq;
+ struct list_head pslist;
struct padata_cpumask cpumask;
- struct blocking_notifier_head cpumask_change_notifier;
struct kobject kobj;
struct mutex lock;
u8 flags;
@@ -164,18 +176,20 @@ struct padata_instance {
#define PADATA_INVALID 4
};
-extern struct padata_instance *padata_alloc_possible(
- struct workqueue_struct *wq);
+#ifdef CONFIG_PADATA
+extern void __init padata_init(void);
+#else
+static inline void __init padata_init(void) {}
+#endif
+
+extern struct padata_instance *padata_alloc(const char *name);
extern void padata_free(struct padata_instance *pinst);
-extern int padata_do_parallel(struct padata_instance *pinst,
- struct padata_priv *padata, int cb_cpu);
+extern struct padata_shell *padata_alloc_shell(struct padata_instance *pinst);
+extern void padata_free_shell(struct padata_shell *ps);
+extern int padata_do_parallel(struct padata_shell *ps,
+ struct padata_priv *padata, int *cb_cpu);
extern void padata_do_serial(struct padata_priv *padata);
+extern void __init padata_do_multithreaded(struct padata_mt_job *job);
extern int padata_set_cpumask(struct padata_instance *pinst, int cpumask_type,
cpumask_var_t cpumask);
-extern int padata_start(struct padata_instance *pinst);
-extern void padata_stop(struct padata_instance *pinst);
-extern int padata_register_cpumask_notifier(struct padata_instance *pinst,
- struct notifier_block *nblock);
-extern int padata_unregister_cpumask_notifier(struct padata_instance *pinst,
- struct notifier_block *nblock);
#endif
diff --git a/include/linux/page-flags-layout.h b/include/linux/page-flags-layout.h
index 77b078c103b2..7d79818dc065 100644
--- a/include/linux/page-flags-layout.h
+++ b/include/linux/page-flags-layout.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef PAGE_FLAGS_LAYOUT_H
#define PAGE_FLAGS_LAYOUT_H
@@ -20,17 +21,19 @@
#elif MAX_NR_ZONES <= 8
#define ZONES_SHIFT 3
#else
-#error ZONES_SHIFT -- too many zones configured adjust calculation
+#error ZONES_SHIFT "Too many zones configured"
#endif
+#define ZONES_WIDTH ZONES_SHIFT
+
#ifdef CONFIG_SPARSEMEM
#include <asm/sparsemem.h>
-
-/* SECTION_SHIFT #bits space required to store a section # */
#define SECTIONS_SHIFT (MAX_PHYSMEM_BITS - SECTION_SIZE_BITS)
+#else
+#define SECTIONS_SHIFT 0
+#endif
-#endif /* CONFIG_SPARSEMEM */
-
+#ifndef BUILD_VDSO32_64
/*
* page->flags layout:
*
@@ -52,17 +55,29 @@
#define SECTIONS_WIDTH 0
#endif
-#define ZONES_WIDTH ZONES_SHIFT
-
-#if SECTIONS_WIDTH+ZONES_WIDTH+NODES_SHIFT <= BITS_PER_LONG - NR_PAGEFLAGS
+#if ZONES_WIDTH + LRU_GEN_WIDTH + SECTIONS_WIDTH + NODES_SHIFT \
+ <= BITS_PER_LONG - NR_PAGEFLAGS
#define NODES_WIDTH NODES_SHIFT
-#else
-#ifdef CONFIG_SPARSEMEM_VMEMMAP
+#elif defined(CONFIG_SPARSEMEM_VMEMMAP)
#error "Vmemmap: No space for nodes field in page flags"
-#endif
+#else
#define NODES_WIDTH 0
#endif
+/*
+ * Note that this #define MUST have a value so that it can be tested with
+ * the IS_ENABLED() macro.
+ */
+#if NODES_SHIFT != 0 && NODES_WIDTH == 0
+#define NODE_NOT_IN_PAGE_FLAGS 1
+#endif
+
+#if defined(CONFIG_KASAN_SW_TAGS) || defined(CONFIG_KASAN_HW_TAGS)
+#define KASAN_TAG_WIDTH 8
+#else
+#define KASAN_TAG_WIDTH 0
+#endif
+
#ifdef CONFIG_NUMA_BALANCING
#define LAST__PID_SHIFT 8
#define LAST__PID_MASK ((1 << LAST__PID_SHIFT)-1)
@@ -75,22 +90,26 @@
#define LAST_CPUPID_SHIFT 0
#endif
-#if SECTIONS_WIDTH+ZONES_WIDTH+NODES_SHIFT+LAST_CPUPID_SHIFT <= BITS_PER_LONG - NR_PAGEFLAGS
+#if ZONES_WIDTH + LRU_GEN_WIDTH + SECTIONS_WIDTH + NODES_WIDTH + \
+ KASAN_TAG_WIDTH + LAST_CPUPID_SHIFT <= BITS_PER_LONG - NR_PAGEFLAGS
#define LAST_CPUPID_WIDTH LAST_CPUPID_SHIFT
#else
#define LAST_CPUPID_WIDTH 0
#endif
-/*
- * We are going to use the flags for the page to node mapping if its in
- * there. This includes the case where there is no node, so it is implicit.
- */
-#if !(NODES_WIDTH > 0 || NODES_SHIFT == 0)
-#define NODE_NOT_IN_PAGE_FLAGS
+#if LAST_CPUPID_SHIFT != 0 && LAST_CPUPID_WIDTH == 0
+#define LAST_CPUPID_NOT_IN_PAGE_FLAGS
#endif
-#if defined(CONFIG_NUMA_BALANCING) && LAST_CPUPID_WIDTH == 0
-#define LAST_CPUPID_NOT_IN_PAGE_FLAGS
+#if ZONES_WIDTH + LRU_GEN_WIDTH + SECTIONS_WIDTH + NODES_WIDTH + \
+ KASAN_TAG_WIDTH + LAST_CPUPID_WIDTH > BITS_PER_LONG - NR_PAGEFLAGS
+#error "Not enough bits in page flags"
#endif
+/* see the comment on MAX_NR_TIERS */
+#define LRU_REFS_WIDTH min(__LRU_REFS_WIDTH, BITS_PER_LONG - NR_PAGEFLAGS - \
+ ZONES_WIDTH - LRU_GEN_WIDTH - SECTIONS_WIDTH - \
+ NODES_WIDTH - KASAN_TAG_WIDTH - LAST_CPUPID_WIDTH)
+
+#endif
#endif /* _LINUX_PAGE_FLAGS_LAYOUT */
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
index d33e3280c8ad..1c68d67b832f 100644
--- a/include/linux/page-flags.h
+++ b/include/linux/page-flags.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Macros for manipulating and testing page->flags
*/
@@ -16,8 +17,37 @@
/*
* Various page->flags bits:
*
- * PG_reserved is set for special pages, which can never be swapped out. Some
- * of them might not even exist (eg empty_bad_page)...
+ * PG_reserved is set for special pages. The "struct page" of such a page
+ * should in general not be touched (e.g. set dirty) except by its owner.
+ * Pages marked as PG_reserved include:
+ * - Pages part of the kernel image (including vDSO) and similar (e.g. BIOS,
+ * initrd, HW tables)
+ * - Pages reserved or allocated early during boot (before the page allocator
+ * was initialized). This includes (depending on the architecture) the
+ * initial vmemmap, initial page tables, crashkernel, elfcorehdr, and much
+ * much more. Once (if ever) freed, PG_reserved is cleared and they will
+ * be given to the page allocator.
+ * - Pages falling into physical memory gaps - not IORESOURCE_SYSRAM. Trying
+ * to read/write these pages might end badly. Don't touch!
+ * - The zero page(s)
+ * - Pages not added to the page allocator when onlining a section because
+ * they were excluded via the online_page_callback() or because they are
+ * PG_hwpoison.
+ * - Pages allocated in the context of kexec/kdump (loaded kernel image,
+ * control pages, vmcoreinfo)
+ * - MMIO/DMA pages. Some architectures don't allow to ioremap pages that are
+ * not marked PG_reserved (as they might be in use by somebody else who does
+ * not respect the caching strategy).
+ * - Pages part of an offline section (struct pages of offline sections should
+ * not be trusted as they will be initialized when first onlined).
+ * - MCA pages on ia64
+ * - Pages holding CPU notes for POWER Firmware Assisted Dump
+ * - Device memory (e.g. PMEM, DAX, HMM)
+ * Some PG_reserved pages will be excluded from the hibernation image.
+ * PG_reserved does in general not hinder anybody from dumping or swapping
+ * and is no longer required for remap_pfn_range(). ioremap might require it.
+ * Consequently, PG_reserved for a page mapped into user space can indicate
+ * the zero page, the vDSO, MMIO pages or device memory.
*
* The PG_private bitflag is set on pagecache pages if they contain filesystem
* specific data (which is normally at page->private). It can be used by
@@ -33,8 +63,10 @@
* page_waitqueue(page) is a wait queue of all tasks waiting for the page
* to become unlocked.
*
- * PG_uptodate tells whether the page's contents is valid. When a read
- * completes, the page becomes uptodate, unless a disk I/O error happened.
+ * PG_swapbacked is set when a page uses swap as a backing storage. This are
+ * usually PageAnon or shmem pages but please note that even anonymous pages
+ * might lose their PG_swapbacked flag when they simply can be dropped (e.g. as
+ * a result of MADV_FREE).
*
* PG_referenced, PG_reclaim are used for page reclaim for anonymous and
* file-backed pagecache (see mm/vmscan.c).
@@ -45,19 +77,13 @@
* guarantees that this bit is cleared for a page when it first is entered into
* the page cache.
*
- * PG_highmem pages are not permanently mapped into the kernel virtual address
- * space, they need to be kmapped separately for doing IO on the pages. The
- * struct page (these bits with information) are always mapped into kernel
- * address space...
- *
* PG_hwpoison indicates that a page got corrupted in hardware and contains
* data with incorrect ECC bits that triggered a machine check. Accessing is
* not safe since it may cause another machine check. Don't touch!
*/
/*
- * Don't use the *_dontuse flags. Use the macros. Otherwise you'll break
- * locked- and dirty-page accounting.
+ * Don't use the pageflags directly. Use the PageFoo macros.
*
* The page flags field is split into two parts, the main flags area
* which extends from the low bits upwards, and the fields area which
@@ -73,13 +99,14 @@
*/
enum pageflags {
PG_locked, /* Page is locked. Don't touch. */
- PG_error,
PG_referenced,
PG_uptodate,
PG_dirty,
PG_lru,
PG_active,
+ PG_workingset,
PG_waiters, /* Page has waiters, check its waitqueue. Must be bit #7 and in the same byte as "PG_locked" */
+ PG_error,
PG_slab,
PG_owner_priv_1, /* Owner use. If pagecache, fs may use*/
PG_arch_1,
@@ -101,12 +128,27 @@ enum pageflags {
#ifdef CONFIG_MEMORY_FAILURE
PG_hwpoison, /* hardware poisoned page. Don't touch */
#endif
-#if defined(CONFIG_IDLE_PAGE_TRACKING) && defined(CONFIG_64BIT)
+#if defined(CONFIG_PAGE_IDLE_FLAG) && defined(CONFIG_64BIT)
PG_young,
PG_idle,
#endif
+#ifdef CONFIG_ARCH_USES_PG_ARCH_X
+ PG_arch_2,
+ PG_arch_3,
+#endif
__NR_PAGEFLAGS,
+ PG_readahead = PG_reclaim,
+
+ /*
+ * Depending on the way an anonymous folio can be mapped into a page
+ * table (e.g., single PMD/PUD/CONT of the head page vs. PTE-mapped
+ * THP), PG_anon_exclusive may be set only for the head page or for
+ * tail pages of an anonymous folio. For now, we only expect it to be
+ * set on tail pages for PTE-mapped THP.
+ */
+ PG_anon_exclusive = PG_mappedtodisk,
+
/* Filesystems */
PG_checked = PG_owner_priv_1,
@@ -126,43 +168,157 @@ enum pageflags {
PG_savepinned = PG_dirty,
/* Has a grant mapping of another (foreign) domain's page. */
PG_foreign = PG_owner_priv_1,
+ /* Remapped by swiotlb-xen. */
+ PG_xen_remapped = PG_owner_priv_1,
- /* SLOB */
- PG_slob_free = PG_private,
-
- /* Compound pages. Stored in first tail page's flags */
- PG_double_map = PG_private_2,
+#ifdef CONFIG_MEMORY_FAILURE
+ /*
+ * Compound pages. Stored in first tail page's flags.
+ * Indicates that at least one subpage is hwpoisoned in the
+ * THP.
+ */
+ PG_has_hwpoisoned = PG_error,
+#endif
/* non-lru isolated movable page */
PG_isolated = PG_reclaim,
+
+ /* Only valid for buddy pages. Used to track pages that are reported */
+ PG_reported = PG_uptodate,
+
+#ifdef CONFIG_MEMORY_HOTPLUG
+ /* For self-hosted memmap pages */
+ PG_vmemmap_self_hosted = PG_owner_priv_1,
+#endif
};
+#define PAGEFLAGS_MASK ((1UL << NR_PAGEFLAGS) - 1)
+
#ifndef __GENERATING_BOUNDS_H
-struct page; /* forward declaration */
+#ifdef CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP
+DECLARE_STATIC_KEY_FALSE(hugetlb_optimize_vmemmap_key);
+
+/*
+ * Return the real head page struct iff the @page is a fake head page, otherwise
+ * return the @page itself. See Documentation/mm/vmemmap_dedup.rst.
+ */
+static __always_inline const struct page *page_fixed_fake_head(const struct page *page)
+{
+ if (!static_branch_unlikely(&hugetlb_optimize_vmemmap_key))
+ return page;
+
+ /*
+ * Only addresses aligned with PAGE_SIZE of struct page may be fake head
+ * struct page. The alignment check aims to avoid access the fields (
+ * e.g. compound_head) of the @page[1]. It can avoid touch a (possibly)
+ * cold cacheline in some cases.
+ */
+ if (IS_ALIGNED((unsigned long)page, PAGE_SIZE) &&
+ test_bit(PG_head, &page->flags)) {
+ /*
+ * We can safely access the field of the @page[1] with PG_head
+ * because the @page is a compound page composed with at least
+ * two contiguous pages.
+ */
+ unsigned long head = READ_ONCE(page[1].compound_head);
+
+ if (likely(head & 1))
+ return (const struct page *)(head - 1);
+ }
+ return page;
+}
+#else
+static inline const struct page *page_fixed_fake_head(const struct page *page)
+{
+ return page;
+}
+#endif
+
+static __always_inline int page_is_fake_head(struct page *page)
+{
+ return page_fixed_fake_head(page) != page;
+}
-static inline struct page *compound_head(struct page *page)
+static inline unsigned long _compound_head(const struct page *page)
{
unsigned long head = READ_ONCE(page->compound_head);
if (unlikely(head & 1))
- return (struct page *) (head - 1);
- return page;
+ return head - 1;
+ return (unsigned long)page_fixed_fake_head(page);
}
+#define compound_head(page) ((typeof(page))_compound_head(page))
+
+/**
+ * page_folio - Converts from page to folio.
+ * @p: The page.
+ *
+ * Every page is part of a folio. This function cannot be called on a
+ * NULL pointer.
+ *
+ * Context: No reference, nor lock is required on @page. If the caller
+ * does not hold a reference, this call may race with a folio split, so
+ * it should re-check the folio still contains this page after gaining
+ * a reference on the folio.
+ * Return: The folio which contains this page.
+ */
+#define page_folio(p) (_Generic((p), \
+ const struct page *: (const struct folio *)_compound_head(p), \
+ struct page *: (struct folio *)_compound_head(p)))
+
+/**
+ * folio_page - Return a page from a folio.
+ * @folio: The folio.
+ * @n: The page number to return.
+ *
+ * @n is relative to the start of the folio. This function does not
+ * check that the page number lies within @folio; the caller is presumed
+ * to have a reference to the page.
+ */
+#define folio_page(folio, n) nth_page(&(folio)->page, n)
+
static __always_inline int PageTail(struct page *page)
{
- return READ_ONCE(page->compound_head) & 1;
+ return READ_ONCE(page->compound_head) & 1 || page_is_fake_head(page);
}
static __always_inline int PageCompound(struct page *page)
{
- return test_bit(PG_head, &page->flags) || PageTail(page);
+ return test_bit(PG_head, &page->flags) ||
+ READ_ONCE(page->compound_head) & 1;
+}
+
+#define PAGE_POISON_PATTERN -1l
+static inline int PagePoisoned(const struct page *page)
+{
+ return READ_ONCE(page->flags) == PAGE_POISON_PATTERN;
+}
+
+#ifdef CONFIG_DEBUG_VM
+void page_init_poison(struct page *page, size_t size);
+#else
+static inline void page_init_poison(struct page *page, size_t size)
+{
+}
+#endif
+
+static unsigned long *folio_flags(struct folio *folio, unsigned n)
+{
+ struct page *page = &folio->page;
+
+ VM_BUG_ON_PGFLAGS(PageTail(page), page);
+ VM_BUG_ON_PGFLAGS(n > 0 && !test_bit(PG_head, &page->flags), page);
+ return &page[n].flags;
}
/*
* Page flags policies wrt compound pages
*
+ * PF_POISONED_CHECK
+ * check if this struct page poisoned/uninitialized
+ *
* PF_ANY:
* the page flag is relevant for small, head and tail pages.
*
@@ -179,49 +335,86 @@ static __always_inline int PageCompound(struct page *page)
*
* PF_NO_COMPOUND:
* the page flag is not relevant for compound pages.
+ *
+ * PF_SECOND:
+ * the page flag is stored in the first tail page.
*/
-#define PF_ANY(page, enforce) page
-#define PF_HEAD(page, enforce) compound_head(page)
+#define PF_POISONED_CHECK(page) ({ \
+ VM_BUG_ON_PGFLAGS(PagePoisoned(page), page); \
+ page; })
+#define PF_ANY(page, enforce) PF_POISONED_CHECK(page)
+#define PF_HEAD(page, enforce) PF_POISONED_CHECK(compound_head(page))
#define PF_ONLY_HEAD(page, enforce) ({ \
VM_BUG_ON_PGFLAGS(PageTail(page), page); \
- page;})
+ PF_POISONED_CHECK(page); })
#define PF_NO_TAIL(page, enforce) ({ \
VM_BUG_ON_PGFLAGS(enforce && PageTail(page), page); \
- compound_head(page);})
+ PF_POISONED_CHECK(compound_head(page)); })
#define PF_NO_COMPOUND(page, enforce) ({ \
VM_BUG_ON_PGFLAGS(enforce && PageCompound(page), page); \
- page;})
+ PF_POISONED_CHECK(page); })
+#define PF_SECOND(page, enforce) ({ \
+ VM_BUG_ON_PGFLAGS(!PageHead(page), page); \
+ PF_POISONED_CHECK(&page[1]); })
+
+/* Which page is the flag stored in */
+#define FOLIO_PF_ANY 0
+#define FOLIO_PF_HEAD 0
+#define FOLIO_PF_ONLY_HEAD 0
+#define FOLIO_PF_NO_TAIL 0
+#define FOLIO_PF_NO_COMPOUND 0
+#define FOLIO_PF_SECOND 1
/*
* Macros to create function definitions for page flags
*/
#define TESTPAGEFLAG(uname, lname, policy) \
+static __always_inline bool folio_test_##lname(struct folio *folio) \
+{ return test_bit(PG_##lname, folio_flags(folio, FOLIO_##policy)); } \
static __always_inline int Page##uname(struct page *page) \
- { return test_bit(PG_##lname, &policy(page, 0)->flags); }
+{ return test_bit(PG_##lname, &policy(page, 0)->flags); }
#define SETPAGEFLAG(uname, lname, policy) \
+static __always_inline \
+void folio_set_##lname(struct folio *folio) \
+{ set_bit(PG_##lname, folio_flags(folio, FOLIO_##policy)); } \
static __always_inline void SetPage##uname(struct page *page) \
- { set_bit(PG_##lname, &policy(page, 1)->flags); }
+{ set_bit(PG_##lname, &policy(page, 1)->flags); }
#define CLEARPAGEFLAG(uname, lname, policy) \
+static __always_inline \
+void folio_clear_##lname(struct folio *folio) \
+{ clear_bit(PG_##lname, folio_flags(folio, FOLIO_##policy)); } \
static __always_inline void ClearPage##uname(struct page *page) \
- { clear_bit(PG_##lname, &policy(page, 1)->flags); }
+{ clear_bit(PG_##lname, &policy(page, 1)->flags); }
#define __SETPAGEFLAG(uname, lname, policy) \
+static __always_inline \
+void __folio_set_##lname(struct folio *folio) \
+{ __set_bit(PG_##lname, folio_flags(folio, FOLIO_##policy)); } \
static __always_inline void __SetPage##uname(struct page *page) \
- { __set_bit(PG_##lname, &policy(page, 1)->flags); }
+{ __set_bit(PG_##lname, &policy(page, 1)->flags); }
#define __CLEARPAGEFLAG(uname, lname, policy) \
+static __always_inline \
+void __folio_clear_##lname(struct folio *folio) \
+{ __clear_bit(PG_##lname, folio_flags(folio, FOLIO_##policy)); } \
static __always_inline void __ClearPage##uname(struct page *page) \
- { __clear_bit(PG_##lname, &policy(page, 1)->flags); }
+{ __clear_bit(PG_##lname, &policy(page, 1)->flags); }
#define TESTSETFLAG(uname, lname, policy) \
+static __always_inline \
+bool folio_test_set_##lname(struct folio *folio) \
+{ return test_and_set_bit(PG_##lname, folio_flags(folio, FOLIO_##policy)); } \
static __always_inline int TestSetPage##uname(struct page *page) \
- { return test_and_set_bit(PG_##lname, &policy(page, 1)->flags); }
+{ return test_and_set_bit(PG_##lname, &policy(page, 1)->flags); }
#define TESTCLEARFLAG(uname, lname, policy) \
+static __always_inline \
+bool folio_test_clear_##lname(struct folio *folio) \
+{ return test_and_clear_bit(PG_##lname, folio_flags(folio, FOLIO_##policy)); } \
static __always_inline int TestClearPage##uname(struct page *page) \
- { return test_and_clear_bit(PG_##lname, &policy(page, 1)->flags); }
+{ return test_and_clear_bit(PG_##lname, &policy(page, 1)->flags); }
#define PAGEFLAG(uname, lname, policy) \
TESTPAGEFLAG(uname, lname, policy) \
@@ -237,43 +430,53 @@ static __always_inline int TestClearPage##uname(struct page *page) \
TESTSETFLAG(uname, lname, policy) \
TESTCLEARFLAG(uname, lname, policy)
-#define TESTPAGEFLAG_FALSE(uname) \
+#define TESTPAGEFLAG_FALSE(uname, lname) \
+static inline bool folio_test_##lname(const struct folio *folio) { return false; } \
static inline int Page##uname(const struct page *page) { return 0; }
-#define SETPAGEFLAG_NOOP(uname) \
+#define SETPAGEFLAG_NOOP(uname, lname) \
+static inline void folio_set_##lname(struct folio *folio) { } \
static inline void SetPage##uname(struct page *page) { }
-#define CLEARPAGEFLAG_NOOP(uname) \
+#define CLEARPAGEFLAG_NOOP(uname, lname) \
+static inline void folio_clear_##lname(struct folio *folio) { } \
static inline void ClearPage##uname(struct page *page) { }
-#define __CLEARPAGEFLAG_NOOP(uname) \
+#define __CLEARPAGEFLAG_NOOP(uname, lname) \
+static inline void __folio_clear_##lname(struct folio *folio) { } \
static inline void __ClearPage##uname(struct page *page) { }
-#define TESTSETFLAG_FALSE(uname) \
+#define TESTSETFLAG_FALSE(uname, lname) \
+static inline bool folio_test_set_##lname(struct folio *folio) \
+{ return 0; } \
static inline int TestSetPage##uname(struct page *page) { return 0; }
-#define TESTCLEARFLAG_FALSE(uname) \
+#define TESTCLEARFLAG_FALSE(uname, lname) \
+static inline bool folio_test_clear_##lname(struct folio *folio) \
+{ return 0; } \
static inline int TestClearPage##uname(struct page *page) { return 0; }
-#define PAGEFLAG_FALSE(uname) TESTPAGEFLAG_FALSE(uname) \
- SETPAGEFLAG_NOOP(uname) CLEARPAGEFLAG_NOOP(uname)
+#define PAGEFLAG_FALSE(uname, lname) TESTPAGEFLAG_FALSE(uname, lname) \
+ SETPAGEFLAG_NOOP(uname, lname) CLEARPAGEFLAG_NOOP(uname, lname)
-#define TESTSCFLAG_FALSE(uname) \
- TESTSETFLAG_FALSE(uname) TESTCLEARFLAG_FALSE(uname)
+#define TESTSCFLAG_FALSE(uname, lname) \
+ TESTSETFLAG_FALSE(uname, lname) TESTCLEARFLAG_FALSE(uname, lname)
__PAGEFLAG(Locked, locked, PF_NO_TAIL)
-PAGEFLAG(Waiters, waiters, PF_ONLY_HEAD) __CLEARPAGEFLAG(Waiters, waiters, PF_ONLY_HEAD)
-PAGEFLAG(Error, error, PF_NO_COMPOUND) TESTCLEARFLAG(Error, error, PF_NO_COMPOUND)
+PAGEFLAG(Waiters, waiters, PF_ONLY_HEAD)
+PAGEFLAG(Error, error, PF_NO_TAIL) TESTCLEARFLAG(Error, error, PF_NO_TAIL)
PAGEFLAG(Referenced, referenced, PF_HEAD)
TESTCLEARFLAG(Referenced, referenced, PF_HEAD)
__SETPAGEFLAG(Referenced, referenced, PF_HEAD)
PAGEFLAG(Dirty, dirty, PF_HEAD) TESTSCFLAG(Dirty, dirty, PF_HEAD)
__CLEARPAGEFLAG(Dirty, dirty, PF_HEAD)
PAGEFLAG(LRU, lru, PF_HEAD) __CLEARPAGEFLAG(LRU, lru, PF_HEAD)
+ TESTCLEARFLAG(LRU, lru, PF_HEAD)
PAGEFLAG(Active, active, PF_HEAD) __CLEARPAGEFLAG(Active, active, PF_HEAD)
TESTCLEARFLAG(Active, active, PF_HEAD)
+PAGEFLAG(Workingset, workingset, PF_HEAD)
+ TESTCLEARFLAG(Workingset, workingset, PF_HEAD)
__PAGEFLAG(Slab, slab, PF_NO_TAIL)
-__PAGEFLAG(SlobFree, slob_free, PF_NO_TAIL)
PAGEFLAG(Checked, checked, PF_NO_COMPOUND) /* Used by some filesystems */
/* Xen */
@@ -281,9 +484,12 @@ PAGEFLAG(Pinned, pinned, PF_NO_COMPOUND)
TESTSCFLAG(Pinned, pinned, PF_NO_COMPOUND)
PAGEFLAG(SavePinned, savepinned, PF_NO_COMPOUND);
PAGEFLAG(Foreign, foreign, PF_NO_COMPOUND);
+PAGEFLAG(XenRemapped, xen_remapped, PF_NO_COMPOUND)
+ TESTCLEARFLAG(XenRemapped, xen_remapped, PF_NO_COMPOUND)
PAGEFLAG(Reserved, reserved, PF_NO_COMPOUND)
__CLEARPAGEFLAG(Reserved, reserved, PF_NO_COMPOUND)
+ __SETPAGEFLAG(Reserved, reserved, PF_NO_COMPOUND)
PAGEFLAG(SwapBacked, swapbacked, PF_NO_TAIL)
__CLEARPAGEFLAG(SwapBacked, swapbacked, PF_NO_TAIL)
__SETPAGEFLAG(SwapBacked, swapbacked, PF_NO_TAIL)
@@ -291,10 +497,9 @@ PAGEFLAG(SwapBacked, swapbacked, PF_NO_TAIL)
/*
* Private page markings that may be used by the filesystem that owns the page
* for its own purposes.
- * - PG_private and PG_private_2 cause releasepage() and co to be invoked
+ * - PG_private and PG_private_2 cause release_folio() and co to be invoked
*/
-PAGEFLAG(Private, private, PF_ANY) __SETPAGEFLAG(Private, private, PF_ANY)
- __CLEARPAGEFLAG(Private, private, PF_ANY)
+PAGEFLAG(Private, private, PF_ANY)
PAGEFLAG(Private2, private_2, PF_ANY) TESTSCFLAG(Private2, private_2, PF_ANY)
PAGEFLAG(OwnerPriv1, owner_priv_1, PF_ANY)
TESTCLEARFLAG(OwnerPriv1, owner_priv_1, PF_ANY)
@@ -303,15 +508,15 @@ PAGEFLAG(OwnerPriv1, owner_priv_1, PF_ANY)
* Only test-and-set exist for PG_writeback. The unconditional operators are
* risky: they bypass page accounting.
*/
-TESTPAGEFLAG(Writeback, writeback, PF_NO_COMPOUND)
- TESTSCFLAG(Writeback, writeback, PF_NO_COMPOUND)
+TESTPAGEFLAG(Writeback, writeback, PF_NO_TAIL)
+ TESTSCFLAG(Writeback, writeback, PF_NO_TAIL)
PAGEFLAG(MappedToDisk, mappedtodisk, PF_NO_TAIL)
/* PG_readahead is only used for reads; PG_reclaim is only for writes */
PAGEFLAG(Reclaim, reclaim, PF_NO_TAIL)
TESTCLEARFLAG(Reclaim, reclaim, PF_NO_TAIL)
-PAGEFLAG(Readahead, reclaim, PF_NO_COMPOUND)
- TESTCLEARFLAG(Readahead, reclaim, PF_NO_COMPOUND)
+PAGEFLAG(Readahead, readahead, PF_NO_COMPOUND)
+ TESTCLEARFLAG(Readahead, readahead, PF_NO_COMPOUND)
#ifdef CONFIG_HIGHMEM
/*
@@ -319,23 +524,27 @@ PAGEFLAG(Readahead, reclaim, PF_NO_COMPOUND)
* available at this point.
*/
#define PageHighMem(__p) is_highmem_idx(page_zonenum(__p))
+#define folio_test_highmem(__f) is_highmem_idx(folio_zonenum(__f))
#else
-PAGEFLAG_FALSE(HighMem)
+PAGEFLAG_FALSE(HighMem, highmem)
#endif
#ifdef CONFIG_SWAP
-static __always_inline int PageSwapCache(struct page *page)
+static __always_inline bool folio_test_swapcache(struct folio *folio)
{
-#ifdef CONFIG_THP_SWAP
- page = compound_head(page);
-#endif
- return PageSwapBacked(page) && test_bit(PG_swapcache, &page->flags);
+ return folio_test_swapbacked(folio) &&
+ test_bit(PG_swapcache, folio_flags(folio, 0));
+}
+static __always_inline bool PageSwapCache(struct page *page)
+{
+ return folio_test_swapcache(page_folio(page));
}
+
SETPAGEFLAG(SwapCache, swapcache, PF_NO_TAIL)
CLEARPAGEFLAG(SwapCache, swapcache, PF_NO_TAIL)
#else
-PAGEFLAG_FALSE(SwapCache)
+PAGEFLAG_FALSE(SwapCache, swapcache)
#endif
PAGEFLAG(Unevictable, unevictable, PF_HEAD)
@@ -347,26 +556,31 @@ PAGEFLAG(Mlocked, mlocked, PF_NO_TAIL)
__CLEARPAGEFLAG(Mlocked, mlocked, PF_NO_TAIL)
TESTSCFLAG(Mlocked, mlocked, PF_NO_TAIL)
#else
-PAGEFLAG_FALSE(Mlocked) __CLEARPAGEFLAG_NOOP(Mlocked)
- TESTSCFLAG_FALSE(Mlocked)
+PAGEFLAG_FALSE(Mlocked, mlocked) __CLEARPAGEFLAG_NOOP(Mlocked, mlocked)
+ TESTSCFLAG_FALSE(Mlocked, mlocked)
#endif
#ifdef CONFIG_ARCH_USES_PG_UNCACHED
PAGEFLAG(Uncached, uncached, PF_NO_COMPOUND)
#else
-PAGEFLAG_FALSE(Uncached)
+PAGEFLAG_FALSE(Uncached, uncached)
#endif
#ifdef CONFIG_MEMORY_FAILURE
PAGEFLAG(HWPoison, hwpoison, PF_ANY)
TESTSCFLAG(HWPoison, hwpoison, PF_ANY)
#define __PG_HWPOISON (1UL << PG_hwpoison)
+#define MAGIC_HWPOISON 0x48575053U /* HWPS */
+extern void SetPageHWPoisonTakenOff(struct page *page);
+extern void ClearPageHWPoisonTakenOff(struct page *page);
+extern bool take_page_off_buddy(struct page *page);
+extern bool put_page_back_buddy(struct page *page);
#else
-PAGEFLAG_FALSE(HWPoison)
+PAGEFLAG_FALSE(HWPoison, hwpoison)
#define __PG_HWPOISON 0
#endif
-#if defined(CONFIG_IDLE_PAGE_TRACKING) && defined(CONFIG_64BIT)
+#if defined(CONFIG_PAGE_IDLE_FLAG) && defined(CONFIG_64BIT)
TESTPAGEFLAG(Young, young, PF_ANY)
SETPAGEFLAG(Young, young, PF_ANY)
TESTCLEARFLAG(Young, young, PF_ANY)
@@ -374,6 +588,20 @@ PAGEFLAG(Idle, idle, PF_ANY)
#endif
/*
+ * PageReported() is used to track reported free pages within the Buddy
+ * allocator. We can use the non-atomic version of the test and set
+ * operations as both should be shielded with the zone lock to prevent
+ * any possible races on the setting or clearing of the bit.
+ */
+__PAGEFLAG(Reported, reported, PF_NO_COMPOUND)
+
+#ifdef CONFIG_MEMORY_HOTPLUG
+PAGEFLAG(VmemmapSelfHosted, vmemmap_self_hosted, PF_ANY)
+#else
+PAGEFLAG_FALSE(VmemmapSelfHosted, vmemmap_self_hosted)
+#endif
+
+/*
* On an anonymous page mapped into a user virtual memory area,
* page->mapping points to its anon_vma, not to a struct address_space;
* with the PAGE_MAPPING_ANON bit set to distinguish it. See rmap.h.
@@ -384,7 +612,7 @@ PAGEFLAG(Idle, idle, PF_ANY)
* structure which KSM associates with that merged page. See ksm.h.
*
* PAGE_MAPPING_KSM without PAGE_MAPPING_ANON is used for non-lru movable
- * page and then page->mapping points a struct address_space.
+ * page and then page->mapping points to a struct movable_operations.
*
* Please note that, confusingly, "page_mapping" refers to the inode
* address_space which maps the page from disk; whereas "page_mapped"
@@ -395,15 +623,36 @@ PAGEFLAG(Idle, idle, PF_ANY)
#define PAGE_MAPPING_KSM (PAGE_MAPPING_ANON | PAGE_MAPPING_MOVABLE)
#define PAGE_MAPPING_FLAGS (PAGE_MAPPING_ANON | PAGE_MAPPING_MOVABLE)
+/*
+ * Different with flags above, this flag is used only for fsdax mode. It
+ * indicates that this page->mapping is now under reflink case.
+ */
+#define PAGE_MAPPING_DAX_SHARED ((void *)0x1)
+
+static __always_inline bool folio_mapping_flags(struct folio *folio)
+{
+ return ((unsigned long)folio->mapping & PAGE_MAPPING_FLAGS) != 0;
+}
+
static __always_inline int PageMappingFlags(struct page *page)
{
return ((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) != 0;
}
-static __always_inline int PageAnon(struct page *page)
+static __always_inline bool folio_test_anon(struct folio *folio)
{
- page = compound_head(page);
- return ((unsigned long)page->mapping & PAGE_MAPPING_ANON) != 0;
+ return ((unsigned long)folio->mapping & PAGE_MAPPING_ANON) != 0;
+}
+
+static __always_inline bool PageAnon(struct page *page)
+{
+ return folio_test_anon(page_folio(page));
+}
+
+static __always_inline bool __folio_test_movable(const struct folio *folio)
+{
+ return ((unsigned long)folio->mapping & PAGE_MAPPING_FLAGS) ==
+ PAGE_MAPPING_MOVABLE;
}
static __always_inline int __PageMovable(struct page *page)
@@ -419,30 +668,42 @@ static __always_inline int __PageMovable(struct page *page)
* is found in VM_MERGEABLE vmas. It's a PageAnon page, pointing not to any
* anon_vma, but to that page's node of the stable tree.
*/
-static __always_inline int PageKsm(struct page *page)
+static __always_inline bool folio_test_ksm(struct folio *folio)
{
- page = compound_head(page);
- return ((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) ==
+ return ((unsigned long)folio->mapping & PAGE_MAPPING_FLAGS) ==
PAGE_MAPPING_KSM;
}
+
+static __always_inline bool PageKsm(struct page *page)
+{
+ return folio_test_ksm(page_folio(page));
+}
#else
-TESTPAGEFLAG_FALSE(Ksm)
+TESTPAGEFLAG_FALSE(Ksm, ksm)
#endif
u64 stable_page_flags(struct page *page);
-static inline int PageUptodate(struct page *page)
+/**
+ * folio_test_uptodate - Is this folio up to date?
+ * @folio: The folio.
+ *
+ * The uptodate flag is set on a folio when every byte in the folio is
+ * at least as new as the corresponding bytes on storage. Anonymous
+ * and CoW folios are always uptodate. If the folio is not uptodate,
+ * some of the bytes in it may be; see the is_partially_uptodate()
+ * address_space operation.
+ */
+static inline bool folio_test_uptodate(struct folio *folio)
{
- int ret;
- page = compound_head(page);
- ret = test_bit(PG_uptodate, &(page)->flags);
+ bool ret = test_bit(PG_uptodate, folio_flags(folio, 0));
/*
- * Must ensure that the data we read out of the page is loaded
- * _after_ we've loaded page->flags to check for PageUptodate.
- * We can skip the barrier if the page is not uptodate, because
+ * Must ensure that the data we read out of the folio is loaded
+ * _after_ we've loaded folio->flags to check the uptodate bit.
+ * We can skip the barrier if the folio is not uptodate, because
* we wouldn't be reading anything from it.
*
- * See SetPageUptodate() for the other side of the story.
+ * See folio_mark_uptodate() for the other side of the story.
*/
if (ret)
smp_rmb();
@@ -450,46 +711,78 @@ static inline int PageUptodate(struct page *page)
return ret;
}
-static __always_inline void __SetPageUptodate(struct page *page)
+static inline int PageUptodate(struct page *page)
+{
+ return folio_test_uptodate(page_folio(page));
+}
+
+static __always_inline void __folio_mark_uptodate(struct folio *folio)
{
- VM_BUG_ON_PAGE(PageTail(page), page);
smp_wmb();
- __set_bit(PG_uptodate, &page->flags);
+ __set_bit(PG_uptodate, folio_flags(folio, 0));
}
-static __always_inline void SetPageUptodate(struct page *page)
+static __always_inline void folio_mark_uptodate(struct folio *folio)
{
- VM_BUG_ON_PAGE(PageTail(page), page);
/*
* Memory barrier must be issued before setting the PG_uptodate bit,
- * so that all previous stores issued in order to bring the page
- * uptodate are actually visible before PageUptodate becomes true.
+ * so that all previous stores issued in order to bring the folio
+ * uptodate are actually visible before folio_test_uptodate becomes true.
*/
smp_wmb();
- set_bit(PG_uptodate, &page->flags);
+ set_bit(PG_uptodate, folio_flags(folio, 0));
+}
+
+static __always_inline void __SetPageUptodate(struct page *page)
+{
+ __folio_mark_uptodate((struct folio *)page);
+}
+
+static __always_inline void SetPageUptodate(struct page *page)
+{
+ folio_mark_uptodate((struct folio *)page);
}
CLEARPAGEFLAG(Uptodate, uptodate, PF_NO_TAIL)
-int test_clear_page_writeback(struct page *page);
-int __test_set_page_writeback(struct page *page, bool keep_write);
+bool __folio_start_writeback(struct folio *folio, bool keep_write);
+bool set_page_writeback(struct page *page);
-#define test_set_page_writeback(page) \
- __test_set_page_writeback(page, false)
-#define test_set_page_writeback_keepwrite(page) \
- __test_set_page_writeback(page, true)
+#define folio_start_writeback(folio) \
+ __folio_start_writeback(folio, false)
+#define folio_start_writeback_keepwrite(folio) \
+ __folio_start_writeback(folio, true)
-static inline void set_page_writeback(struct page *page)
+static inline bool test_set_page_writeback(struct page *page)
{
- test_set_page_writeback(page);
+ return set_page_writeback(page);
}
-static inline void set_page_writeback_keepwrite(struct page *page)
+static __always_inline bool folio_test_head(struct folio *folio)
{
- test_set_page_writeback_keepwrite(page);
+ return test_bit(PG_head, folio_flags(folio, FOLIO_PF_ANY));
}
-__PAGEFLAG(Head, head, PF_ANY) CLEARPAGEFLAG(Head, head, PF_ANY)
+static __always_inline int PageHead(struct page *page)
+{
+ PF_POISONED_CHECK(page);
+ return test_bit(PG_head, &page->flags) && !page_is_fake_head(page);
+}
+
+__SETPAGEFLAG(Head, head, PF_ANY)
+__CLEARPAGEFLAG(Head, head, PF_ANY)
+CLEARPAGEFLAG(Head, head, PF_ANY)
+
+/**
+ * folio_test_large() - Does this folio contain more than one page?
+ * @folio: The folio to test.
+ *
+ * Return: True if the folio is larger than one page.
+ */
+static inline bool folio_test_large(struct folio *folio)
+{
+ return folio_test_head(folio);
+}
static __always_inline void set_compound_head(struct page *page, struct page *head)
{
@@ -513,19 +806,11 @@ static inline void ClearPageCompound(struct page *page)
#ifdef CONFIG_HUGETLB_PAGE
int PageHuge(struct page *page);
-int PageHeadHuge(struct page *page);
-bool page_huge_active(struct page *page);
+bool folio_test_hugetlb(struct folio *folio);
#else
-TESTPAGEFLAG_FALSE(Huge)
-TESTPAGEFLAG_FALSE(HeadHuge)
-
-static inline bool page_huge_active(struct page *page)
-{
- return 0;
-}
+TESTPAGEFLAG_FALSE(Huge, hugetlb)
#endif
-
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
/*
* PageHuge() only returns true for hugetlbfs pages, but not for
@@ -541,6 +826,11 @@ static inline int PageTransHuge(struct page *page)
return PageHead(page);
}
+static inline bool folio_test_transhuge(struct folio *folio)
+{
+ return folio_test_head(folio);
+}
+
/*
* PageTransCompound returns true for both transparent huge pages
* and hugetlbfs pages, so it should only be called when it's known
@@ -552,27 +842,6 @@ static inline int PageTransCompound(struct page *page)
}
/*
- * PageTransCompoundMap is the same as PageTransCompound, but it also
- * guarantees the primary MMU has the entire compound page mapped
- * through pmd_trans_huge, which in turn guarantees the secondary MMUs
- * can also map the entire compound page. This allows the secondary
- * MMUs to call get_user_pages() only once for each compound page and
- * to immediately map the entire compound page with a single secondary
- * MMU fault. If there will be a pmd split later, the secondary MMUs
- * will get an update through the MMU notifier invalidation through
- * split_huge_pmd().
- *
- * Unlike PageTransCompound, this is safe to be called only while
- * split_huge_pmd() cannot run from under us, like if protected by the
- * MMU notifier, otherwise it may result in page->_mapcount < 0 false
- * positives.
- */
-static inline int PageTransCompoundMap(struct page *page)
-{
- return PageTransCompound(page) && atomic_read(&page->_mapcount) < 0;
-}
-
-/*
* PageTransTail returns true for both transparent huge pages
* and hugetlbfs pages, so it should only be called when it's known
* that hugetlbfs pages aren't involved.
@@ -581,133 +850,160 @@ static inline int PageTransTail(struct page *page)
{
return PageTail(page);
}
+#else
+TESTPAGEFLAG_FALSE(TransHuge, transhuge)
+TESTPAGEFLAG_FALSE(TransCompound, transcompound)
+TESTPAGEFLAG_FALSE(TransCompoundMap, transcompoundmap)
+TESTPAGEFLAG_FALSE(TransTail, transtail)
+#endif
+#if defined(CONFIG_MEMORY_FAILURE) && defined(CONFIG_TRANSPARENT_HUGEPAGE)
/*
- * PageDoubleMap indicates that the compound page is mapped with PTEs as well
- * as PMDs.
- *
- * This is required for optimization of rmap operations for THP: we can postpone
- * per small page mapcount accounting (and its overhead from atomic operations)
- * until the first PMD split.
- *
- * For the page PageDoubleMap means ->_mapcount in all sub-pages is offset up
- * by one. This reference will go away with last compound_mapcount.
+ * PageHasHWPoisoned indicates that at least one subpage is hwpoisoned in the
+ * compound page.
*
- * See also __split_huge_pmd_locked() and page_remove_anon_compound_rmap().
+ * This flag is set by hwpoison handler. Cleared by THP split or free page.
*/
-static inline int PageDoubleMap(struct page *page)
-{
- return PageHead(page) && test_bit(PG_double_map, &page[1].flags);
-}
+PAGEFLAG(HasHWPoisoned, has_hwpoisoned, PF_SECOND)
+ TESTSCFLAG(HasHWPoisoned, has_hwpoisoned, PF_SECOND)
+#else
+PAGEFLAG_FALSE(HasHWPoisoned, has_hwpoisoned)
+ TESTSCFLAG_FALSE(HasHWPoisoned, has_hwpoisoned)
+#endif
-static inline void SetPageDoubleMap(struct page *page)
+/*
+ * Check if a page is currently marked HWPoisoned. Note that this check is
+ * best effort only and inherently racy: there is no way to synchronize with
+ * failing hardware.
+ */
+static inline bool is_page_hwpoison(struct page *page)
{
- VM_BUG_ON_PAGE(!PageHead(page), page);
- set_bit(PG_double_map, &page[1].flags);
+ if (PageHWPoison(page))
+ return true;
+ return PageHuge(page) && PageHWPoison(compound_head(page));
}
-static inline void ClearPageDoubleMap(struct page *page)
-{
- VM_BUG_ON_PAGE(!PageHead(page), page);
- clear_bit(PG_double_map, &page[1].flags);
-}
-static inline int TestSetPageDoubleMap(struct page *page)
+/*
+ * For pages that are never mapped to userspace (and aren't PageSlab),
+ * page_type may be used. Because it is initialised to -1, we invert the
+ * sense of the bit, so __SetPageFoo *clears* the bit used for PageFoo, and
+ * __ClearPageFoo *sets* the bit used for PageFoo. We reserve a few high and
+ * low bits so that an underflow or overflow of page_mapcount() won't be
+ * mistaken for a page type value.
+ */
+
+#define PAGE_TYPE_BASE 0xf0000000
+/* Reserve 0x0000007f to catch underflows of page_mapcount */
+#define PAGE_MAPCOUNT_RESERVE -128
+#define PG_buddy 0x00000080
+#define PG_offline 0x00000100
+#define PG_table 0x00000200
+#define PG_guard 0x00000400
+
+#define PageType(page, flag) \
+ ((page->page_type & (PAGE_TYPE_BASE | flag)) == PAGE_TYPE_BASE)
+
+static inline int page_type_has_type(unsigned int page_type)
{
- VM_BUG_ON_PAGE(!PageHead(page), page);
- return test_and_set_bit(PG_double_map, &page[1].flags);
+ return (int)page_type < PAGE_MAPCOUNT_RESERVE;
}
-static inline int TestClearPageDoubleMap(struct page *page)
+static inline int page_has_type(struct page *page)
{
- VM_BUG_ON_PAGE(!PageHead(page), page);
- return test_and_clear_bit(PG_double_map, &page[1].flags);
+ return page_type_has_type(page->page_type);
}
-#else
-TESTPAGEFLAG_FALSE(TransHuge)
-TESTPAGEFLAG_FALSE(TransCompound)
-TESTPAGEFLAG_FALSE(TransCompoundMap)
-TESTPAGEFLAG_FALSE(TransTail)
-PAGEFLAG_FALSE(DoubleMap)
- TESTSETFLAG_FALSE(DoubleMap)
- TESTCLEARFLAG_FALSE(DoubleMap)
-#endif
-
-/*
- * For pages that are never mapped to userspace, page->mapcount may be
- * used for storing extra information about page type. Any value used
- * for this purpose must be <= -2, but it's better start not too close
- * to -2 so that an underflow of the page_mapcount() won't be mistaken
- * for a special page.
- */
-#define PAGE_MAPCOUNT_OPS(uname, lname) \
+#define PAGE_TYPE_OPS(uname, lname) \
static __always_inline int Page##uname(struct page *page) \
{ \
- return atomic_read(&page->_mapcount) == \
- PAGE_##lname##_MAPCOUNT_VALUE; \
+ return PageType(page, PG_##lname); \
} \
static __always_inline void __SetPage##uname(struct page *page) \
{ \
- VM_BUG_ON_PAGE(atomic_read(&page->_mapcount) != -1, page); \
- atomic_set(&page->_mapcount, PAGE_##lname##_MAPCOUNT_VALUE); \
+ VM_BUG_ON_PAGE(!PageType(page, 0), page); \
+ page->page_type &= ~PG_##lname; \
} \
static __always_inline void __ClearPage##uname(struct page *page) \
{ \
VM_BUG_ON_PAGE(!Page##uname(page), page); \
- atomic_set(&page->_mapcount, -1); \
+ page->page_type |= PG_##lname; \
}
/*
- * PageBuddy() indicate that the page is free and in the buddy system
+ * PageBuddy() indicates that the page is free and in the buddy system
* (see mm/page_alloc.c).
*/
-#define PAGE_BUDDY_MAPCOUNT_VALUE (-128)
-PAGE_MAPCOUNT_OPS(Buddy, BUDDY)
+PAGE_TYPE_OPS(Buddy, buddy)
/*
- * PageBalloon() is set on pages that are on the balloon page list
- * (see mm/balloon_compaction.c).
+ * PageOffline() indicates that the page is logically offline although the
+ * containing section is online. (e.g. inflated in a balloon driver or
+ * not onlined when onlining the section).
+ * The content of these pages is effectively stale. Such pages should not
+ * be touched (read/write/dump/save) except by their owner.
+ *
+ * If a driver wants to allow to offline unmovable PageOffline() pages without
+ * putting them back to the buddy, it can do so via the memory notifier by
+ * decrementing the reference count in MEM_GOING_OFFLINE and incrementing the
+ * reference count in MEM_CANCEL_OFFLINE. When offlining, the PageOffline()
+ * pages (now with a reference count of zero) are treated like free pages,
+ * allowing the containing memory block to get offlined. A driver that
+ * relies on this feature is aware that re-onlining the memory block will
+ * require to re-set the pages PageOffline() and not giving them to the
+ * buddy via online_page_callback_t.
+ *
+ * There are drivers that mark a page PageOffline() and expect there won't be
+ * any further access to page content. PFN walkers that read content of random
+ * pages should check PageOffline() and synchronize with such drivers using
+ * page_offline_freeze()/page_offline_thaw().
*/
-#define PAGE_BALLOON_MAPCOUNT_VALUE (-256)
-PAGE_MAPCOUNT_OPS(Balloon, BALLOON)
+PAGE_TYPE_OPS(Offline, offline)
+
+extern void page_offline_freeze(void);
+extern void page_offline_thaw(void);
+extern void page_offline_begin(void);
+extern void page_offline_end(void);
/*
- * If kmemcg is enabled, the buddy allocator will set PageKmemcg() on
- * pages allocated with __GFP_ACCOUNT. It gets cleared on page free.
+ * Marks pages in use as page tables.
*/
-#define PAGE_KMEMCG_MAPCOUNT_VALUE (-512)
-PAGE_MAPCOUNT_OPS(Kmemcg, KMEMCG)
+PAGE_TYPE_OPS(Table, table)
+
+/*
+ * Marks guardpages used with debug_pagealloc.
+ */
+PAGE_TYPE_OPS(Guard, guard)
extern bool is_free_buddy_page(struct page *page);
-__PAGEFLAG(Isolated, isolated, PF_ANY);
+PAGEFLAG(Isolated, isolated, PF_ANY);
-/*
- * If network-based swap is enabled, sl*b must keep track of whether pages
- * were allocated from pfmemalloc reserves.
- */
-static inline int PageSlabPfmemalloc(struct page *page)
+static __always_inline int PageAnonExclusive(struct page *page)
{
- VM_BUG_ON_PAGE(!PageSlab(page), page);
- return PageActive(page);
+ VM_BUG_ON_PGFLAGS(!PageAnon(page), page);
+ VM_BUG_ON_PGFLAGS(PageHuge(page) && !PageHead(page), page);
+ return test_bit(PG_anon_exclusive, &PF_ANY(page, 1)->flags);
}
-static inline void SetPageSlabPfmemalloc(struct page *page)
+static __always_inline void SetPageAnonExclusive(struct page *page)
{
- VM_BUG_ON_PAGE(!PageSlab(page), page);
- SetPageActive(page);
+ VM_BUG_ON_PGFLAGS(!PageAnon(page) || PageKsm(page), page);
+ VM_BUG_ON_PGFLAGS(PageHuge(page) && !PageHead(page), page);
+ set_bit(PG_anon_exclusive, &PF_ANY(page, 1)->flags);
}
-static inline void __ClearPageSlabPfmemalloc(struct page *page)
+static __always_inline void ClearPageAnonExclusive(struct page *page)
{
- VM_BUG_ON_PAGE(!PageSlab(page), page);
- __ClearPageActive(page);
+ VM_BUG_ON_PGFLAGS(!PageAnon(page) || PageKsm(page), page);
+ VM_BUG_ON_PGFLAGS(PageHuge(page) && !PageHead(page), page);
+ clear_bit(PG_anon_exclusive, &PF_ANY(page, 1)->flags);
}
-static inline void ClearPageSlabPfmemalloc(struct page *page)
+static __always_inline void __ClearPageAnonExclusive(struct page *page)
{
- VM_BUG_ON_PAGE(!PageSlab(page), page);
- ClearPageActive(page);
+ VM_BUG_ON_PGFLAGS(!PageAnon(page), page);
+ VM_BUG_ON_PGFLAGS(PageHuge(page) && !PageHead(page), page);
+ __clear_bit(PG_anon_exclusive, &PF_ANY(page, 1)->flags);
}
#ifdef CONFIG_MMU
@@ -718,25 +1014,25 @@ static inline void ClearPageSlabPfmemalloc(struct page *page)
/*
* Flags checked when a page is freed. Pages being freed should not have
- * these flags set. It they are, there is a problem.
+ * these flags set. If they are, there is a problem.
*/
#define PAGE_FLAGS_CHECK_AT_FREE \
(1UL << PG_lru | 1UL << PG_locked | \
1UL << PG_private | 1UL << PG_private_2 | \
1UL << PG_writeback | 1UL << PG_reserved | \
1UL << PG_slab | 1UL << PG_active | \
- 1UL << PG_unevictable | __PG_MLOCKED)
+ 1UL << PG_unevictable | __PG_MLOCKED | LRU_GEN_MASK)
/*
* Flags checked when a page is prepped for return by the page allocator.
- * Pages being prepped should not have these flags set. It they are set,
+ * Pages being prepped should not have these flags set. If they are set,
* there has been a kernel bug or struct page corruption.
*
* __PG_HWPOISON is exceptional because it needs to be kept beyond page's
* alloc-free cycle to prevent from reusing the page.
*/
#define PAGE_FLAGS_CHECK_AT_PREP \
- (((1UL << NR_PAGEFLAGS) - 1) & ~__PG_HWPOISON)
+ ((PAGEFLAGS_MASK & ~__PG_HWPOISON) | LRU_GEN_MASK | LRU_REFS_MASK)
#define PAGE_FLAGS_PRIVATE \
(1UL << PG_private | 1UL << PG_private_2)
@@ -752,11 +1048,17 @@ static inline int page_has_private(struct page *page)
return !!(page->flags & PAGE_FLAGS_PRIVATE);
}
+static inline bool folio_has_private(struct folio *folio)
+{
+ return page_has_private(&folio->page);
+}
+
#undef PF_ANY
#undef PF_HEAD
#undef PF_ONLY_HEAD
#undef PF_NO_TAIL
#undef PF_NO_COMPOUND
+#undef PF_SECOND
#endif /* !__GENERATING_BOUNDS_H */
#endif /* PAGE_FLAGS_H */
diff --git a/include/linux/page-isolation.h b/include/linux/page-isolation.h
index d4cd2014fa6f..5456b7be38ae 100644
--- a/include/linux/page-isolation.h
+++ b/include/linux/page-isolation.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __LINUX_PAGEISOLATION_H
#define __LINUX_PAGEISOLATION_H
@@ -29,40 +30,34 @@ static inline bool is_migrate_isolate(int migratetype)
}
#endif
-bool has_unmovable_pages(struct zone *zone, struct page *page, int count,
- bool skip_hwpoisoned_pages);
+#define MEMORY_OFFLINE 0x1
+#define REPORT_FAILURE 0x2
+
void set_pageblock_migratetype(struct page *page, int migratetype);
int move_freepages_block(struct zone *zone, struct page *page,
int migratetype, int *num_movable);
/*
* Changes migrate type in [start_pfn, end_pfn) to be MIGRATE_ISOLATE.
- * If specified range includes migrate types other than MOVABLE or CMA,
- * this will fail with -EBUSY.
- *
- * For isolating all pages in the range finally, the caller have to
- * free all pages in the range. test_page_isolated() can be used for
- * test it.
*/
int
start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
- unsigned migratetype, bool skip_hwpoisoned_pages);
+ int migratetype, int flags, gfp_t gfp_flags);
/*
* Changes MIGRATE_ISOLATE to MIGRATE_MOVABLE.
* target range is [start_pfn, end_pfn)
*/
-int
+void
undo_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
- unsigned migratetype);
+ int migratetype);
/*
* Test all pages in [start_pfn, end_pfn) are isolated or not.
*/
int test_pages_isolated(unsigned long start_pfn, unsigned long end_pfn,
- bool skip_hwpoisoned_pages);
+ int isol_flags);
-struct page *alloc_migrate_target(struct page *page, unsigned long private,
- int **resultp);
+struct page *alloc_migrate_target(struct page *page, unsigned long private);
#endif
diff --git a/include/linux/page_counter.h b/include/linux/page_counter.h
index 7e62920a3a94..c141ea9a95ef 100644
--- a/include/linux/page_counter.h
+++ b/include/linux/page_counter.h
@@ -1,19 +1,42 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_PAGE_COUNTER_H
#define _LINUX_PAGE_COUNTER_H
#include <linux/atomic.h>
+#include <linux/cache.h>
#include <linux/kernel.h>
#include <asm/page.h>
struct page_counter {
- atomic_long_t count;
- unsigned long limit;
- struct page_counter *parent;
+ /*
+ * Make sure 'usage' does not share cacheline with any other field. The
+ * memcg->memory.usage is a hot member of struct mem_cgroup.
+ */
+ atomic_long_t usage;
+ CACHELINE_PADDING(_pad1_);
+
+ /* effective memory.min and memory.min usage tracking */
+ unsigned long emin;
+ atomic_long_t min_usage;
+ atomic_long_t children_min_usage;
+
+ /* effective memory.low and memory.low usage tracking */
+ unsigned long elow;
+ atomic_long_t low_usage;
+ atomic_long_t children_low_usage;
- /* legacy */
unsigned long watermark;
unsigned long failcnt;
-};
+
+ /* Keep all the read most fields in a separete cacheline. */
+ CACHELINE_PADDING(_pad2_);
+
+ unsigned long min;
+ unsigned long low;
+ unsigned long high;
+ unsigned long max;
+ struct page_counter *parent;
+} ____cacheline_internodealigned_in_smp;
#if BITS_PER_LONG == 32
#define PAGE_COUNTER_MAX LONG_MAX
@@ -24,14 +47,14 @@ struct page_counter {
static inline void page_counter_init(struct page_counter *counter,
struct page_counter *parent)
{
- atomic_long_set(&counter->count, 0);
- counter->limit = PAGE_COUNTER_MAX;
+ atomic_long_set(&counter->usage, 0);
+ counter->max = PAGE_COUNTER_MAX;
counter->parent = parent;
}
static inline unsigned long page_counter_read(struct page_counter *counter)
{
- return atomic_long_read(&counter->count);
+ return atomic_long_read(&counter->usage);
}
void page_counter_cancel(struct page_counter *counter, unsigned long nr_pages);
@@ -40,7 +63,16 @@ bool page_counter_try_charge(struct page_counter *counter,
unsigned long nr_pages,
struct page_counter **fail);
void page_counter_uncharge(struct page_counter *counter, unsigned long nr_pages);
-int page_counter_limit(struct page_counter *counter, unsigned long limit);
+void page_counter_set_min(struct page_counter *counter, unsigned long nr_pages);
+void page_counter_set_low(struct page_counter *counter, unsigned long nr_pages);
+
+static inline void page_counter_set_high(struct page_counter *counter,
+ unsigned long nr_pages)
+{
+ WRITE_ONCE(counter->high, nr_pages);
+}
+
+int page_counter_set_max(struct page_counter *counter, unsigned long nr_pages);
int page_counter_memparse(const char *buf, const char *max,
unsigned long *nr_pages);
diff --git a/include/linux/page_ext.h b/include/linux/page_ext.h
index 9298c393ddaa..67314f648aeb 100644
--- a/include/linux/page_ext.h
+++ b/include/linux/page_ext.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __LINUX_PAGE_EXT_H
#define __LINUX_PAGE_EXT_H
@@ -6,30 +7,37 @@
#include <linux/stackdepot.h>
struct pglist_data;
+
+/**
+ * struct page_ext_operations - per page_ext client operations
+ * @offset: Offset to the client's data within page_ext. Offset is returned to
+ * the client by page_ext_init.
+ * @size: The size of the client data within page_ext.
+ * @need: Function that returns true if client requires page_ext.
+ * @init: (optional) Called to initialize client once page_exts are allocated.
+ * @need_shared_flags: True when client is using shared page_ext->flags
+ * field.
+ *
+ * Each Page Extension client must define page_ext_operations in
+ * page_ext_ops array.
+ */
struct page_ext_operations {
size_t offset;
size_t size;
bool (*need)(void);
void (*init)(void);
+ bool need_shared_flags;
};
#ifdef CONFIG_PAGE_EXTENSION
/*
- * page_ext->flags bits:
- *
- * PAGE_EXT_DEBUG_POISON is set for poisoned pages. This is used to
- * implement generic debug pagealloc feature. The pages are filled with
- * poison patterns and set this flag after free_pages(). The poisoned
- * pages are verified whether the patterns are not corrupted and clear
- * the flag before alloc_pages().
+ * The page_ext_flags users must set need_shared_flags to true.
*/
-
enum page_ext_flags {
- PAGE_EXT_DEBUG_POISON, /* Page is poisoned */
- PAGE_EXT_DEBUG_GUARD,
PAGE_EXT_OWNER,
-#if defined(CONFIG_IDLE_PAGE_TRACKING) && !defined(CONFIG_64BIT)
+ PAGE_EXT_OWNER_ALLOCATED,
+#if defined(CONFIG_PAGE_IDLE_FLAG) && !defined(CONFIG_64BIT)
PAGE_EXT_YOUNG,
PAGE_EXT_IDLE,
#endif
@@ -46,40 +54,72 @@ struct page_ext {
unsigned long flags;
};
+extern bool early_page_ext;
+extern unsigned long page_ext_size;
extern void pgdat_page_ext_init(struct pglist_data *pgdat);
+static inline bool early_page_ext_enabled(void)
+{
+ return early_page_ext;
+}
+
#ifdef CONFIG_SPARSEMEM
static inline void page_ext_init_flatmem(void)
{
}
extern void page_ext_init(void);
+static inline void page_ext_init_flatmem_late(void)
+{
+}
#else
extern void page_ext_init_flatmem(void);
+extern void page_ext_init_flatmem_late(void);
static inline void page_ext_init(void)
{
}
#endif
-struct page_ext *lookup_page_ext(struct page *page);
+extern struct page_ext *page_ext_get(struct page *page);
+extern void page_ext_put(struct page_ext *page_ext);
+
+static inline struct page_ext *page_ext_next(struct page_ext *curr)
+{
+ void *next = curr;
+ next += page_ext_size;
+ return next;
+}
#else /* !CONFIG_PAGE_EXTENSION */
struct page_ext;
-static inline void pgdat_page_ext_init(struct pglist_data *pgdat)
+static inline bool early_page_ext_enabled(void)
{
+ return false;
}
-static inline struct page_ext *lookup_page_ext(struct page *page)
+static inline void pgdat_page_ext_init(struct pglist_data *pgdat)
{
- return NULL;
}
static inline void page_ext_init(void)
{
}
+static inline void page_ext_init_flatmem_late(void)
+{
+}
+
static inline void page_ext_init_flatmem(void)
{
}
+
+static inline struct page_ext *page_ext_get(struct page *page)
+{
+ return NULL;
+}
+
+static inline void page_ext_put(struct page_ext *page_ext)
+{
+}
#endif /* CONFIG_PAGE_EXTENSION */
#endif /* __LINUX_PAGE_EXT_H */
diff --git a/include/linux/page_idle.h b/include/linux/page_idle.h
index fec40271339f..5cb7bd2078ec 100644
--- a/include/linux/page_idle.h
+++ b/include/linux/page_idle.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_MM_PAGE_IDLE_H
#define _LINUX_MM_PAGE_IDLE_H
@@ -5,135 +6,147 @@
#include <linux/page-flags.h>
#include <linux/page_ext.h>
-#ifdef CONFIG_IDLE_PAGE_TRACKING
+#ifdef CONFIG_PAGE_IDLE_FLAG
-#ifdef CONFIG_64BIT
-static inline bool page_is_young(struct page *page)
-{
- return PageYoung(page);
-}
-
-static inline void set_page_young(struct page *page)
-{
- SetPageYoung(page);
-}
-
-static inline bool test_and_clear_page_young(struct page *page)
-{
- return TestClearPageYoung(page);
-}
-
-static inline bool page_is_idle(struct page *page)
-{
- return PageIdle(page);
-}
-
-static inline void set_page_idle(struct page *page)
-{
- SetPageIdle(page);
-}
-
-static inline void clear_page_idle(struct page *page)
-{
- ClearPageIdle(page);
-}
-#else /* !CONFIG_64BIT */
+#ifndef CONFIG_64BIT
/*
* If there is not enough space to store Idle and Young bits in page flags, use
* page ext flags instead.
*/
-extern struct page_ext_operations page_idle_ops;
-
-static inline bool page_is_young(struct page *page)
+static inline bool folio_test_young(struct folio *folio)
{
- struct page_ext *page_ext = lookup_page_ext(page);
+ struct page_ext *page_ext = page_ext_get(&folio->page);
+ bool page_young;
if (unlikely(!page_ext))
return false;
- return test_bit(PAGE_EXT_YOUNG, &page_ext->flags);
+ page_young = test_bit(PAGE_EXT_YOUNG, &page_ext->flags);
+ page_ext_put(page_ext);
+
+ return page_young;
}
-static inline void set_page_young(struct page *page)
+static inline void folio_set_young(struct folio *folio)
{
- struct page_ext *page_ext = lookup_page_ext(page);
+ struct page_ext *page_ext = page_ext_get(&folio->page);
if (unlikely(!page_ext))
return;
set_bit(PAGE_EXT_YOUNG, &page_ext->flags);
+ page_ext_put(page_ext);
}
-static inline bool test_and_clear_page_young(struct page *page)
+static inline bool folio_test_clear_young(struct folio *folio)
{
- struct page_ext *page_ext = lookup_page_ext(page);
+ struct page_ext *page_ext = page_ext_get(&folio->page);
+ bool page_young;
if (unlikely(!page_ext))
return false;
- return test_and_clear_bit(PAGE_EXT_YOUNG, &page_ext->flags);
+ page_young = test_and_clear_bit(PAGE_EXT_YOUNG, &page_ext->flags);
+ page_ext_put(page_ext);
+
+ return page_young;
}
-static inline bool page_is_idle(struct page *page)
+static inline bool folio_test_idle(struct folio *folio)
{
- struct page_ext *page_ext = lookup_page_ext(page);
+ struct page_ext *page_ext = page_ext_get(&folio->page);
+ bool page_idle;
if (unlikely(!page_ext))
return false;
- return test_bit(PAGE_EXT_IDLE, &page_ext->flags);
+ page_idle = test_bit(PAGE_EXT_IDLE, &page_ext->flags);
+ page_ext_put(page_ext);
+
+ return page_idle;
}
-static inline void set_page_idle(struct page *page)
+static inline void folio_set_idle(struct folio *folio)
{
- struct page_ext *page_ext = lookup_page_ext(page);
+ struct page_ext *page_ext = page_ext_get(&folio->page);
if (unlikely(!page_ext))
return;
set_bit(PAGE_EXT_IDLE, &page_ext->flags);
+ page_ext_put(page_ext);
}
-static inline void clear_page_idle(struct page *page)
+static inline void folio_clear_idle(struct folio *folio)
{
- struct page_ext *page_ext = lookup_page_ext(page);
+ struct page_ext *page_ext = page_ext_get(&folio->page);
if (unlikely(!page_ext))
return;
clear_bit(PAGE_EXT_IDLE, &page_ext->flags);
+ page_ext_put(page_ext);
}
-#endif /* CONFIG_64BIT */
+#endif /* !CONFIG_64BIT */
-#else /* !CONFIG_IDLE_PAGE_TRACKING */
+#else /* !CONFIG_PAGE_IDLE_FLAG */
-static inline bool page_is_young(struct page *page)
+static inline bool folio_test_young(struct folio *folio)
+{
+ return false;
+}
+
+static inline void folio_set_young(struct folio *folio)
+{
+}
+
+static inline bool folio_test_clear_young(struct folio *folio)
+{
+ return false;
+}
+
+static inline bool folio_test_idle(struct folio *folio)
{
return false;
}
+static inline void folio_set_idle(struct folio *folio)
+{
+}
+
+static inline void folio_clear_idle(struct folio *folio)
+{
+}
+
+#endif /* CONFIG_PAGE_IDLE_FLAG */
+
+static inline bool page_is_young(struct page *page)
+{
+ return folio_test_young(page_folio(page));
+}
+
static inline void set_page_young(struct page *page)
{
+ folio_set_young(page_folio(page));
}
static inline bool test_and_clear_page_young(struct page *page)
{
- return false;
+ return folio_test_clear_young(page_folio(page));
}
static inline bool page_is_idle(struct page *page)
{
- return false;
+ return folio_test_idle(page_folio(page));
}
static inline void set_page_idle(struct page *page)
{
+ folio_set_idle(page_folio(page));
}
static inline void clear_page_idle(struct page *page)
{
+ folio_clear_idle(page_folio(page));
}
-
-#endif /* CONFIG_IDLE_PAGE_TRACKING */
-
#endif /* _LINUX_MM_PAGE_IDLE_H */
diff --git a/include/linux/page_owner.h b/include/linux/page_owner.h
index 2be728d156b5..119a0c9d2a8b 100644
--- a/include/linux/page_owner.h
+++ b/include/linux/page_owner.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __LINUX_PAGE_OWNER_H
#define __LINUX_PAGE_OWNER_H
@@ -7,51 +8,51 @@
extern struct static_key_false page_owner_inited;
extern struct page_ext_operations page_owner_ops;
-extern void __reset_page_owner(struct page *page, unsigned int order);
+extern void __reset_page_owner(struct page *page, unsigned short order);
extern void __set_page_owner(struct page *page,
- unsigned int order, gfp_t gfp_mask);
-extern void __split_page_owner(struct page *page, unsigned int order);
-extern void __copy_page_owner(struct page *oldpage, struct page *newpage);
+ unsigned short order, gfp_t gfp_mask);
+extern void __split_page_owner(struct page *page, unsigned int nr);
+extern void __folio_copy_owner(struct folio *newfolio, struct folio *old);
extern void __set_page_owner_migrate_reason(struct page *page, int reason);
-extern void __dump_page_owner(struct page *page);
+extern void __dump_page_owner(const struct page *page);
extern void pagetypeinfo_showmixedcount_print(struct seq_file *m,
pg_data_t *pgdat, struct zone *zone);
-static inline void reset_page_owner(struct page *page, unsigned int order)
+static inline void reset_page_owner(struct page *page, unsigned short order)
{
if (static_branch_unlikely(&page_owner_inited))
__reset_page_owner(page, order);
}
static inline void set_page_owner(struct page *page,
- unsigned int order, gfp_t gfp_mask)
+ unsigned short order, gfp_t gfp_mask)
{
if (static_branch_unlikely(&page_owner_inited))
__set_page_owner(page, order, gfp_mask);
}
-static inline void split_page_owner(struct page *page, unsigned int order)
+static inline void split_page_owner(struct page *page, unsigned int nr)
{
if (static_branch_unlikely(&page_owner_inited))
- __split_page_owner(page, order);
+ __split_page_owner(page, nr);
}
-static inline void copy_page_owner(struct page *oldpage, struct page *newpage)
+static inline void folio_copy_owner(struct folio *newfolio, struct folio *old)
{
if (static_branch_unlikely(&page_owner_inited))
- __copy_page_owner(oldpage, newpage);
+ __folio_copy_owner(newfolio, old);
}
static inline void set_page_owner_migrate_reason(struct page *page, int reason)
{
if (static_branch_unlikely(&page_owner_inited))
__set_page_owner_migrate_reason(page, reason);
}
-static inline void dump_page_owner(struct page *page)
+static inline void dump_page_owner(const struct page *page)
{
if (static_branch_unlikely(&page_owner_inited))
__dump_page_owner(page);
}
#else
-static inline void reset_page_owner(struct page *page, unsigned int order)
+static inline void reset_page_owner(struct page *page, unsigned short order)
{
}
static inline void set_page_owner(struct page *page,
@@ -59,16 +60,16 @@ static inline void set_page_owner(struct page *page,
{
}
static inline void split_page_owner(struct page *page,
- unsigned int order)
+ unsigned short order)
{
}
-static inline void copy_page_owner(struct page *oldpage, struct page *newpage)
+static inline void folio_copy_owner(struct folio *newfolio, struct folio *folio)
{
}
static inline void set_page_owner_migrate_reason(struct page *page, int reason)
{
}
-static inline void dump_page_owner(struct page *page)
+static inline void dump_page_owner(const struct page *page)
{
}
#endif /* CONFIG_PAGE_OWNER */
diff --git a/include/linux/page_ref.h b/include/linux/page_ref.h
index 1fd71733aa68..d7c2d33baa7f 100644
--- a/include/linux/page_ref.h
+++ b/include/linux/page_ref.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_PAGE_REF_H
#define _LINUX_PAGE_REF_H
@@ -6,13 +7,13 @@
#include <linux/page-flags.h>
#include <linux/tracepoint-defs.h>
-extern struct tracepoint __tracepoint_page_ref_set;
-extern struct tracepoint __tracepoint_page_ref_mod;
-extern struct tracepoint __tracepoint_page_ref_mod_and_test;
-extern struct tracepoint __tracepoint_page_ref_mod_and_return;
-extern struct tracepoint __tracepoint_page_ref_mod_unless;
-extern struct tracepoint __tracepoint_page_ref_freeze;
-extern struct tracepoint __tracepoint_page_ref_unfreeze;
+DECLARE_TRACEPOINT(page_ref_set);
+DECLARE_TRACEPOINT(page_ref_mod);
+DECLARE_TRACEPOINT(page_ref_mod_and_test);
+DECLARE_TRACEPOINT(page_ref_mod_and_return);
+DECLARE_TRACEPOINT(page_ref_mod_unless);
+DECLARE_TRACEPOINT(page_ref_freeze);
+DECLARE_TRACEPOINT(page_ref_unfreeze);
#ifdef CONFIG_DEBUG_PAGE_REF
@@ -23,7 +24,7 @@ extern struct tracepoint __tracepoint_page_ref_unfreeze;
*
* See trace_##name##_enabled(void) in include/linux/tracepoint.h
*/
-#define page_ref_tracepoint_active(t) static_key_false(&(t).key)
+#define page_ref_tracepoint_active(t) tracepoint_enabled(t)
extern void __page_ref_set(struct page *page, int v);
extern void __page_ref_mod(struct page *page, int v);
@@ -61,23 +62,50 @@ static inline void __page_ref_unfreeze(struct page *page, int v)
#endif
-static inline int page_ref_count(struct page *page)
+static inline int page_ref_count(const struct page *page)
{
return atomic_read(&page->_refcount);
}
-static inline int page_count(struct page *page)
+/**
+ * folio_ref_count - The reference count on this folio.
+ * @folio: The folio.
+ *
+ * The refcount is usually incremented by calls to folio_get() and
+ * decremented by calls to folio_put(). Some typical users of the
+ * folio refcount:
+ *
+ * - Each reference from a page table
+ * - The page cache
+ * - Filesystem private data
+ * - The LRU list
+ * - Pipes
+ * - Direct IO which references this page in the process address space
+ *
+ * Return: The number of references to this folio.
+ */
+static inline int folio_ref_count(const struct folio *folio)
{
- return atomic_read(&compound_head(page)->_refcount);
+ return page_ref_count(&folio->page);
+}
+
+static inline int page_count(const struct page *page)
+{
+ return folio_ref_count(page_folio(page));
}
static inline void set_page_count(struct page *page, int v)
{
atomic_set(&page->_refcount, v);
- if (page_ref_tracepoint_active(__tracepoint_page_ref_set))
+ if (page_ref_tracepoint_active(page_ref_set))
__page_ref_set(page, v);
}
+static inline void folio_set_count(struct folio *folio, int v)
+{
+ set_page_count(&folio->page, v);
+}
+
/*
* Setup the page count before being freed into the page allocator for
* the first time (boot or memory hotplug)
@@ -90,94 +118,227 @@ static inline void init_page_count(struct page *page)
static inline void page_ref_add(struct page *page, int nr)
{
atomic_add(nr, &page->_refcount);
- if (page_ref_tracepoint_active(__tracepoint_page_ref_mod))
+ if (page_ref_tracepoint_active(page_ref_mod))
__page_ref_mod(page, nr);
}
+static inline void folio_ref_add(struct folio *folio, int nr)
+{
+ page_ref_add(&folio->page, nr);
+}
+
static inline void page_ref_sub(struct page *page, int nr)
{
atomic_sub(nr, &page->_refcount);
- if (page_ref_tracepoint_active(__tracepoint_page_ref_mod))
+ if (page_ref_tracepoint_active(page_ref_mod))
__page_ref_mod(page, -nr);
}
+static inline void folio_ref_sub(struct folio *folio, int nr)
+{
+ page_ref_sub(&folio->page, nr);
+}
+
+static inline int page_ref_sub_return(struct page *page, int nr)
+{
+ int ret = atomic_sub_return(nr, &page->_refcount);
+
+ if (page_ref_tracepoint_active(page_ref_mod_and_return))
+ __page_ref_mod_and_return(page, -nr, ret);
+ return ret;
+}
+
+static inline int folio_ref_sub_return(struct folio *folio, int nr)
+{
+ return page_ref_sub_return(&folio->page, nr);
+}
+
static inline void page_ref_inc(struct page *page)
{
atomic_inc(&page->_refcount);
- if (page_ref_tracepoint_active(__tracepoint_page_ref_mod))
+ if (page_ref_tracepoint_active(page_ref_mod))
__page_ref_mod(page, 1);
}
+static inline void folio_ref_inc(struct folio *folio)
+{
+ page_ref_inc(&folio->page);
+}
+
static inline void page_ref_dec(struct page *page)
{
atomic_dec(&page->_refcount);
- if (page_ref_tracepoint_active(__tracepoint_page_ref_mod))
+ if (page_ref_tracepoint_active(page_ref_mod))
__page_ref_mod(page, -1);
}
+static inline void folio_ref_dec(struct folio *folio)
+{
+ page_ref_dec(&folio->page);
+}
+
static inline int page_ref_sub_and_test(struct page *page, int nr)
{
int ret = atomic_sub_and_test(nr, &page->_refcount);
- if (page_ref_tracepoint_active(__tracepoint_page_ref_mod_and_test))
+ if (page_ref_tracepoint_active(page_ref_mod_and_test))
__page_ref_mod_and_test(page, -nr, ret);
return ret;
}
+static inline int folio_ref_sub_and_test(struct folio *folio, int nr)
+{
+ return page_ref_sub_and_test(&folio->page, nr);
+}
+
static inline int page_ref_inc_return(struct page *page)
{
int ret = atomic_inc_return(&page->_refcount);
- if (page_ref_tracepoint_active(__tracepoint_page_ref_mod_and_return))
+ if (page_ref_tracepoint_active(page_ref_mod_and_return))
__page_ref_mod_and_return(page, 1, ret);
return ret;
}
+static inline int folio_ref_inc_return(struct folio *folio)
+{
+ return page_ref_inc_return(&folio->page);
+}
+
static inline int page_ref_dec_and_test(struct page *page)
{
int ret = atomic_dec_and_test(&page->_refcount);
- if (page_ref_tracepoint_active(__tracepoint_page_ref_mod_and_test))
+ if (page_ref_tracepoint_active(page_ref_mod_and_test))
__page_ref_mod_and_test(page, -1, ret);
return ret;
}
+static inline int folio_ref_dec_and_test(struct folio *folio)
+{
+ return page_ref_dec_and_test(&folio->page);
+}
+
static inline int page_ref_dec_return(struct page *page)
{
int ret = atomic_dec_return(&page->_refcount);
- if (page_ref_tracepoint_active(__tracepoint_page_ref_mod_and_return))
+ if (page_ref_tracepoint_active(page_ref_mod_and_return))
__page_ref_mod_and_return(page, -1, ret);
return ret;
}
-static inline int page_ref_add_unless(struct page *page, int nr, int u)
+static inline int folio_ref_dec_return(struct folio *folio)
{
- int ret = atomic_add_unless(&page->_refcount, nr, u);
+ return page_ref_dec_return(&folio->page);
+}
- if (page_ref_tracepoint_active(__tracepoint_page_ref_mod_unless))
+static inline bool page_ref_add_unless(struct page *page, int nr, int u)
+{
+ bool ret = atomic_add_unless(&page->_refcount, nr, u);
+
+ if (page_ref_tracepoint_active(page_ref_mod_unless))
__page_ref_mod_unless(page, nr, ret);
return ret;
}
+static inline bool folio_ref_add_unless(struct folio *folio, int nr, int u)
+{
+ return page_ref_add_unless(&folio->page, nr, u);
+}
+
+/**
+ * folio_try_get - Attempt to increase the refcount on a folio.
+ * @folio: The folio.
+ *
+ * If you do not already have a reference to a folio, you can attempt to
+ * get one using this function. It may fail if, for example, the folio
+ * has been freed since you found a pointer to it, or it is frozen for
+ * the purposes of splitting or migration.
+ *
+ * Return: True if the reference count was successfully incremented.
+ */
+static inline bool folio_try_get(struct folio *folio)
+{
+ return folio_ref_add_unless(folio, 1, 0);
+}
+
+static inline bool folio_ref_try_add_rcu(struct folio *folio, int count)
+{
+#ifdef CONFIG_TINY_RCU
+ /*
+ * The caller guarantees the folio will not be freed from interrupt
+ * context, so (on !SMP) we only need preemption to be disabled
+ * and TINY_RCU does that for us.
+ */
+# ifdef CONFIG_PREEMPT_COUNT
+ VM_BUG_ON(!in_atomic() && !irqs_disabled());
+# endif
+ VM_BUG_ON_FOLIO(folio_ref_count(folio) == 0, folio);
+ folio_ref_add(folio, count);
+#else
+ if (unlikely(!folio_ref_add_unless(folio, count, 0))) {
+ /* Either the folio has been freed, or will be freed. */
+ return false;
+ }
+#endif
+ return true;
+}
+
+/**
+ * folio_try_get_rcu - Attempt to increase the refcount on a folio.
+ * @folio: The folio.
+ *
+ * This is a version of folio_try_get() optimised for non-SMP kernels.
+ * If you are still holding the rcu_read_lock() after looking up the
+ * page and know that the page cannot have its refcount decreased to
+ * zero in interrupt context, you can use this instead of folio_try_get().
+ *
+ * Example users include get_user_pages_fast() (as pages are not unmapped
+ * from interrupt context) and the page cache lookups (as pages are not
+ * truncated from interrupt context). We also know that pages are not
+ * frozen in interrupt context for the purposes of splitting or migration.
+ *
+ * You can also use this function if you're holding a lock that prevents
+ * pages being frozen & removed; eg the i_pages lock for the page cache
+ * or the mmap_lock or page table lock for page tables. In this case,
+ * it will always succeed, and you could have used a plain folio_get(),
+ * but it's sometimes more convenient to have a common function called
+ * from both locked and RCU-protected contexts.
+ *
+ * Return: True if the reference count was successfully incremented.
+ */
+static inline bool folio_try_get_rcu(struct folio *folio)
+{
+ return folio_ref_try_add_rcu(folio, 1);
+}
+
static inline int page_ref_freeze(struct page *page, int count)
{
int ret = likely(atomic_cmpxchg(&page->_refcount, count, 0) == count);
- if (page_ref_tracepoint_active(__tracepoint_page_ref_freeze))
+ if (page_ref_tracepoint_active(page_ref_freeze))
__page_ref_freeze(page, count, ret);
return ret;
}
+static inline int folio_ref_freeze(struct folio *folio, int count)
+{
+ return page_ref_freeze(&folio->page, count);
+}
+
static inline void page_ref_unfreeze(struct page *page, int count)
{
VM_BUG_ON_PAGE(page_count(page) != 0, page);
VM_BUG_ON(count == 0);
- smp_mb();
- atomic_set(&page->_refcount, count);
- if (page_ref_tracepoint_active(__tracepoint_page_ref_unfreeze))
+ atomic_set_release(&page->_refcount, count);
+ if (page_ref_tracepoint_active(page_ref_unfreeze))
__page_ref_unfreeze(page, count);
}
+static inline void folio_ref_unfreeze(struct folio *folio, int count)
+{
+ page_ref_unfreeze(&folio->page, count);
+}
#endif
diff --git a/include/linux/page_reporting.h b/include/linux/page_reporting.h
new file mode 100644
index 000000000000..fe648dfa3a7c
--- /dev/null
+++ b/include/linux/page_reporting.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_PAGE_REPORTING_H
+#define _LINUX_PAGE_REPORTING_H
+
+#include <linux/mmzone.h>
+#include <linux/scatterlist.h>
+
+/* This value should always be a power of 2, see page_reporting_cycle() */
+#define PAGE_REPORTING_CAPACITY 32
+
+struct page_reporting_dev_info {
+ /* function that alters pages to make them "reported" */
+ int (*report)(struct page_reporting_dev_info *prdev,
+ struct scatterlist *sg, unsigned int nents);
+
+ /* work struct for processing reports */
+ struct delayed_work work;
+
+ /* Current state of page reporting */
+ atomic_t state;
+
+ /* Minimal order of page reporting */
+ unsigned int order;
+};
+
+/* Tear-down and bring-up for page reporting devices */
+void page_reporting_unregister(struct page_reporting_dev_info *prdev);
+int page_reporting_register(struct page_reporting_dev_info *prdev);
+#endif /*_LINUX_PAGE_REPORTING_H */
diff --git a/include/linux/page_table_check.h b/include/linux/page_table_check.h
new file mode 100644
index 000000000000..01e16c7696ec
--- /dev/null
+++ b/include/linux/page_table_check.h
@@ -0,0 +1,166 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/*
+ * Copyright (c) 2021, Google LLC.
+ * Pasha Tatashin <pasha.tatashin@soleen.com>
+ */
+#ifndef __LINUX_PAGE_TABLE_CHECK_H
+#define __LINUX_PAGE_TABLE_CHECK_H
+
+#ifdef CONFIG_PAGE_TABLE_CHECK
+#include <linux/jump_label.h>
+
+extern struct static_key_true page_table_check_disabled;
+extern struct page_ext_operations page_table_check_ops;
+
+void __page_table_check_zero(struct page *page, unsigned int order);
+void __page_table_check_pte_clear(struct mm_struct *mm, unsigned long addr,
+ pte_t pte);
+void __page_table_check_pmd_clear(struct mm_struct *mm, unsigned long addr,
+ pmd_t pmd);
+void __page_table_check_pud_clear(struct mm_struct *mm, unsigned long addr,
+ pud_t pud);
+void __page_table_check_pte_set(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep, pte_t pte);
+void __page_table_check_pmd_set(struct mm_struct *mm, unsigned long addr,
+ pmd_t *pmdp, pmd_t pmd);
+void __page_table_check_pud_set(struct mm_struct *mm, unsigned long addr,
+ pud_t *pudp, pud_t pud);
+void __page_table_check_pte_clear_range(struct mm_struct *mm,
+ unsigned long addr,
+ pmd_t pmd);
+
+static inline void page_table_check_alloc(struct page *page, unsigned int order)
+{
+ if (static_branch_likely(&page_table_check_disabled))
+ return;
+
+ __page_table_check_zero(page, order);
+}
+
+static inline void page_table_check_free(struct page *page, unsigned int order)
+{
+ if (static_branch_likely(&page_table_check_disabled))
+ return;
+
+ __page_table_check_zero(page, order);
+}
+
+static inline void page_table_check_pte_clear(struct mm_struct *mm,
+ unsigned long addr, pte_t pte)
+{
+ if (static_branch_likely(&page_table_check_disabled))
+ return;
+
+ __page_table_check_pte_clear(mm, addr, pte);
+}
+
+static inline void page_table_check_pmd_clear(struct mm_struct *mm,
+ unsigned long addr, pmd_t pmd)
+{
+ if (static_branch_likely(&page_table_check_disabled))
+ return;
+
+ __page_table_check_pmd_clear(mm, addr, pmd);
+}
+
+static inline void page_table_check_pud_clear(struct mm_struct *mm,
+ unsigned long addr, pud_t pud)
+{
+ if (static_branch_likely(&page_table_check_disabled))
+ return;
+
+ __page_table_check_pud_clear(mm, addr, pud);
+}
+
+static inline void page_table_check_pte_set(struct mm_struct *mm,
+ unsigned long addr, pte_t *ptep,
+ pte_t pte)
+{
+ if (static_branch_likely(&page_table_check_disabled))
+ return;
+
+ __page_table_check_pte_set(mm, addr, ptep, pte);
+}
+
+static inline void page_table_check_pmd_set(struct mm_struct *mm,
+ unsigned long addr, pmd_t *pmdp,
+ pmd_t pmd)
+{
+ if (static_branch_likely(&page_table_check_disabled))
+ return;
+
+ __page_table_check_pmd_set(mm, addr, pmdp, pmd);
+}
+
+static inline void page_table_check_pud_set(struct mm_struct *mm,
+ unsigned long addr, pud_t *pudp,
+ pud_t pud)
+{
+ if (static_branch_likely(&page_table_check_disabled))
+ return;
+
+ __page_table_check_pud_set(mm, addr, pudp, pud);
+}
+
+static inline void page_table_check_pte_clear_range(struct mm_struct *mm,
+ unsigned long addr,
+ pmd_t pmd)
+{
+ if (static_branch_likely(&page_table_check_disabled))
+ return;
+
+ __page_table_check_pte_clear_range(mm, addr, pmd);
+}
+
+#else
+
+static inline void page_table_check_alloc(struct page *page, unsigned int order)
+{
+}
+
+static inline void page_table_check_free(struct page *page, unsigned int order)
+{
+}
+
+static inline void page_table_check_pte_clear(struct mm_struct *mm,
+ unsigned long addr, pte_t pte)
+{
+}
+
+static inline void page_table_check_pmd_clear(struct mm_struct *mm,
+ unsigned long addr, pmd_t pmd)
+{
+}
+
+static inline void page_table_check_pud_clear(struct mm_struct *mm,
+ unsigned long addr, pud_t pud)
+{
+}
+
+static inline void page_table_check_pte_set(struct mm_struct *mm,
+ unsigned long addr, pte_t *ptep,
+ pte_t pte)
+{
+}
+
+static inline void page_table_check_pmd_set(struct mm_struct *mm,
+ unsigned long addr, pmd_t *pmdp,
+ pmd_t pmd)
+{
+}
+
+static inline void page_table_check_pud_set(struct mm_struct *mm,
+ unsigned long addr, pud_t *pudp,
+ pud_t pud)
+{
+}
+
+static inline void page_table_check_pte_clear_range(struct mm_struct *mm,
+ unsigned long addr,
+ pmd_t pmd)
+{
+}
+
+#endif /* CONFIG_PAGE_TABLE_CHECK */
+#endif /* __LINUX_PAGE_TABLE_CHECK_H */
diff --git a/include/linux/pageblock-flags.h b/include/linux/pageblock-flags.h
index e942558b3585..e83c4c095041 100644
--- a/include/linux/pageblock-flags.h
+++ b/include/linux/pageblock-flags.h
@@ -1,20 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Macros for manipulating and testing flags related to a
* pageblock_nr_pages number of pages.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation version 2 of the License
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- *
* Copyright (C) IBM Corporation, 2006
*
* Original author, Mel Gorman
@@ -25,10 +13,11 @@
#include <linux/types.h>
+#define PB_migratetype_bits 3
/* Bit indices that affect a whole block of pages */
enum pageblock_bits {
PB_migrate,
- PB_migrate_end = PB_migrate + 3 - 1,
+ PB_migrate_end = PB_migrate + PB_migratetype_bits - 1,
/* 3 bits required for migrate types */
PB_migrate_skip,/* If set the block is skipped by compaction */
@@ -48,54 +37,62 @@ extern unsigned int pageblock_order;
#else /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */
-/* Huge pages are a constant size */
-#define pageblock_order HUGETLB_PAGE_ORDER
+/*
+ * Huge pages are a constant size, but don't exceed the maximum allocation
+ * granularity.
+ */
+#define pageblock_order min_t(unsigned int, HUGETLB_PAGE_ORDER, MAX_ORDER)
#endif /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */
#else /* CONFIG_HUGETLB_PAGE */
/* If huge pages are not used, group by MAX_ORDER_NR_PAGES */
-#define pageblock_order (MAX_ORDER-1)
+#define pageblock_order MAX_ORDER
#endif /* CONFIG_HUGETLB_PAGE */
#define pageblock_nr_pages (1UL << pageblock_order)
+#define pageblock_align(pfn) ALIGN((pfn), pageblock_nr_pages)
+#define pageblock_aligned(pfn) IS_ALIGNED((pfn), pageblock_nr_pages)
+#define pageblock_start_pfn(pfn) ALIGN_DOWN((pfn), pageblock_nr_pages)
+#define pageblock_end_pfn(pfn) ALIGN((pfn) + 1, pageblock_nr_pages)
/* Forward declaration */
struct page;
-unsigned long get_pfnblock_flags_mask(struct page *page,
+unsigned long get_pfnblock_flags_mask(const struct page *page,
unsigned long pfn,
- unsigned long end_bitidx,
unsigned long mask);
void set_pfnblock_flags_mask(struct page *page,
unsigned long flags,
unsigned long pfn,
- unsigned long end_bitidx,
unsigned long mask);
/* Declarations for getting and setting flags. See mm/page_alloc.c */
-#define get_pageblock_flags_group(page, start_bitidx, end_bitidx) \
- get_pfnblock_flags_mask(page, page_to_pfn(page), \
- end_bitidx, \
- (1 << (end_bitidx - start_bitidx + 1)) - 1)
-#define set_pageblock_flags_group(page, flags, start_bitidx, end_bitidx) \
- set_pfnblock_flags_mask(page, flags, page_to_pfn(page), \
- end_bitidx, \
- (1 << (end_bitidx - start_bitidx + 1)) - 1)
-
#ifdef CONFIG_COMPACTION
#define get_pageblock_skip(page) \
- get_pageblock_flags_group(page, PB_migrate_skip, \
- PB_migrate_skip)
+ get_pfnblock_flags_mask(page, page_to_pfn(page), \
+ (1 << (PB_migrate_skip)))
#define clear_pageblock_skip(page) \
- set_pageblock_flags_group(page, 0, PB_migrate_skip, \
- PB_migrate_skip)
+ set_pfnblock_flags_mask(page, 0, page_to_pfn(page), \
+ (1 << PB_migrate_skip))
#define set_pageblock_skip(page) \
- set_pageblock_flags_group(page, 1, PB_migrate_skip, \
- PB_migrate_skip)
+ set_pfnblock_flags_mask(page, (1 << PB_migrate_skip), \
+ page_to_pfn(page), \
+ (1 << PB_migrate_skip))
+#else
+static inline bool get_pageblock_skip(struct page *page)
+{
+ return false;
+}
+static inline void clear_pageblock_skip(struct page *page)
+{
+}
+static inline void set_pageblock_skip(struct page *page)
+{
+}
#endif /* CONFIG_COMPACTION */
#endif /* PAGEBLOCK_FLAGS_H */
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index baa9344dcd10..a56308a9d1a4 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_PAGEMAP_H
#define _LINUX_PAGEMAP_H
@@ -15,6 +16,177 @@
#include <linux/hardirq.h> /* for in_interrupt() */
#include <linux/hugetlb_inline.h>
+struct folio_batch;
+
+unsigned long invalidate_mapping_pages(struct address_space *mapping,
+ pgoff_t start, pgoff_t end);
+
+static inline void invalidate_remote_inode(struct inode *inode)
+{
+ if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
+ S_ISLNK(inode->i_mode))
+ invalidate_mapping_pages(inode->i_mapping, 0, -1);
+}
+int invalidate_inode_pages2(struct address_space *mapping);
+int invalidate_inode_pages2_range(struct address_space *mapping,
+ pgoff_t start, pgoff_t end);
+int write_inode_now(struct inode *, int sync);
+int filemap_fdatawrite(struct address_space *);
+int filemap_flush(struct address_space *);
+int filemap_fdatawait_keep_errors(struct address_space *mapping);
+int filemap_fdatawait_range(struct address_space *, loff_t lstart, loff_t lend);
+int filemap_fdatawait_range_keep_errors(struct address_space *mapping,
+ loff_t start_byte, loff_t end_byte);
+
+static inline int filemap_fdatawait(struct address_space *mapping)
+{
+ return filemap_fdatawait_range(mapping, 0, LLONG_MAX);
+}
+
+bool filemap_range_has_page(struct address_space *, loff_t lstart, loff_t lend);
+int filemap_write_and_wait_range(struct address_space *mapping,
+ loff_t lstart, loff_t lend);
+int __filemap_fdatawrite_range(struct address_space *mapping,
+ loff_t start, loff_t end, int sync_mode);
+int filemap_fdatawrite_range(struct address_space *mapping,
+ loff_t start, loff_t end);
+int filemap_check_errors(struct address_space *mapping);
+void __filemap_set_wb_err(struct address_space *mapping, int err);
+int filemap_fdatawrite_wbc(struct address_space *mapping,
+ struct writeback_control *wbc);
+
+static inline int filemap_write_and_wait(struct address_space *mapping)
+{
+ return filemap_write_and_wait_range(mapping, 0, LLONG_MAX);
+}
+
+/**
+ * filemap_set_wb_err - set a writeback error on an address_space
+ * @mapping: mapping in which to set writeback error
+ * @err: error to be set in mapping
+ *
+ * When writeback fails in some way, we must record that error so that
+ * userspace can be informed when fsync and the like are called. We endeavor
+ * to report errors on any file that was open at the time of the error. Some
+ * internal callers also need to know when writeback errors have occurred.
+ *
+ * When a writeback error occurs, most filesystems will want to call
+ * filemap_set_wb_err to record the error in the mapping so that it will be
+ * automatically reported whenever fsync is called on the file.
+ */
+static inline void filemap_set_wb_err(struct address_space *mapping, int err)
+{
+ /* Fastpath for common case of no error */
+ if (unlikely(err))
+ __filemap_set_wb_err(mapping, err);
+}
+
+/**
+ * filemap_check_wb_err - has an error occurred since the mark was sampled?
+ * @mapping: mapping to check for writeback errors
+ * @since: previously-sampled errseq_t
+ *
+ * Grab the errseq_t value from the mapping, and see if it has changed "since"
+ * the given value was sampled.
+ *
+ * If it has then report the latest error set, otherwise return 0.
+ */
+static inline int filemap_check_wb_err(struct address_space *mapping,
+ errseq_t since)
+{
+ return errseq_check(&mapping->wb_err, since);
+}
+
+/**
+ * filemap_sample_wb_err - sample the current errseq_t to test for later errors
+ * @mapping: mapping to be sampled
+ *
+ * Writeback errors are always reported relative to a particular sample point
+ * in the past. This function provides those sample points.
+ */
+static inline errseq_t filemap_sample_wb_err(struct address_space *mapping)
+{
+ return errseq_sample(&mapping->wb_err);
+}
+
+/**
+ * file_sample_sb_err - sample the current errseq_t to test for later errors
+ * @file: file pointer to be sampled
+ *
+ * Grab the most current superblock-level errseq_t value for the given
+ * struct file.
+ */
+static inline errseq_t file_sample_sb_err(struct file *file)
+{
+ return errseq_sample(&file->f_path.dentry->d_sb->s_wb_err);
+}
+
+/*
+ * Flush file data before changing attributes. Caller must hold any locks
+ * required to prevent further writes to this file until we're done setting
+ * flags.
+ */
+static inline int inode_drain_writes(struct inode *inode)
+{
+ inode_dio_wait(inode);
+ return filemap_write_and_wait(inode->i_mapping);
+}
+
+static inline bool mapping_empty(struct address_space *mapping)
+{
+ return xa_empty(&mapping->i_pages);
+}
+
+/*
+ * mapping_shrinkable - test if page cache state allows inode reclaim
+ * @mapping: the page cache mapping
+ *
+ * This checks the mapping's cache state for the pupose of inode
+ * reclaim and LRU management.
+ *
+ * The caller is expected to hold the i_lock, but is not required to
+ * hold the i_pages lock, which usually protects cache state. That's
+ * because the i_lock and the list_lru lock that protect the inode and
+ * its LRU state don't nest inside the irq-safe i_pages lock.
+ *
+ * Cache deletions are performed under the i_lock, which ensures that
+ * when an inode goes empty, it will reliably get queued on the LRU.
+ *
+ * Cache additions do not acquire the i_lock and may race with this
+ * check, in which case we'll report the inode as shrinkable when it
+ * has cache pages. This is okay: the shrinker also checks the
+ * refcount and the referenced bit, which will be elevated or set in
+ * the process of adding new cache pages to an inode.
+ */
+static inline bool mapping_shrinkable(struct address_space *mapping)
+{
+ void *head;
+
+ /*
+ * On highmem systems, there could be lowmem pressure from the
+ * inodes before there is highmem pressure from the page
+ * cache. Make inodes shrinkable regardless of cache state.
+ */
+ if (IS_ENABLED(CONFIG_HIGHMEM))
+ return true;
+
+ /* Cache completely empty? Shrink away. */
+ head = rcu_access_pointer(mapping->i_pages.xa_head);
+ if (!head)
+ return true;
+
+ /*
+ * The xarray stores single offset-0 entries directly in the
+ * head pointer, which allows non-resident page cache entries
+ * to escape the shadow shrinker's list of xarray nodes. The
+ * inode shrinker needs to pick them up under memory pressure.
+ */
+ if (!xa_is_node(head) && xa_is_value(head))
+ return true;
+
+ return false;
+}
+
/*
* Bits in mapping->flags.
*/
@@ -26,12 +198,13 @@ enum mapping_flags {
AS_EXITING = 4, /* final truncate in progress */
/* writeback related tags are not used */
AS_NO_WRITEBACK_TAGS = 5,
+ AS_LARGE_FOLIO_SUPPORT = 6,
};
/**
* mapping_set_error - record a writeback error in the address_space
- * @mapping - the mapping in which an error should be set
- * @error - the error to set in the mapping
+ * @mapping: the mapping in which an error should be set
+ * @error: the error to set in the mapping
*
* When writeback fails in some way, we must record that error so that
* userspace can be informed when fsync and the like are called. We endeavor
@@ -48,7 +221,11 @@ static inline void mapping_set_error(struct address_space *mapping, int error)
return;
/* Record in wb_err for checkers using errseq_t based tracking */
- filemap_set_wb_err(mapping, error);
+ __filemap_set_wb_err(mapping, error);
+
+ /* Record it in superblock */
+ if (mapping->host)
+ errseq_set(&mapping->host->i_sb->s_wb_err, error);
/* Record it in flags for now, for legacy callers */
if (error == -ENOSPC)
@@ -67,11 +244,9 @@ static inline void mapping_clear_unevictable(struct address_space *mapping)
clear_bit(AS_UNEVICTABLE, &mapping->flags);
}
-static inline int mapping_unevictable(struct address_space *mapping)
+static inline bool mapping_unevictable(struct address_space *mapping)
{
- if (mapping)
- return test_bit(AS_UNEVICTABLE, &mapping->flags);
- return !!mapping;
+ return mapping && test_bit(AS_UNEVICTABLE, &mapping->flags);
}
static inline void mapping_set_exiting(struct address_space *mapping)
@@ -115,140 +290,211 @@ static inline void mapping_set_gfp_mask(struct address_space *m, gfp_t mask)
m->gfp_mask = mask;
}
-void release_pages(struct page **pages, int nr, bool cold);
+/**
+ * mapping_set_large_folios() - Indicate the file supports large folios.
+ * @mapping: The file.
+ *
+ * The filesystem should call this function in its inode constructor to
+ * indicate that the VFS can use large folios to cache the contents of
+ * the file.
+ *
+ * Context: This should not be called while the inode is active as it
+ * is non-atomic.
+ */
+static inline void mapping_set_large_folios(struct address_space *mapping)
+{
+ __set_bit(AS_LARGE_FOLIO_SUPPORT, &mapping->flags);
+}
/*
- * speculatively take a reference to a page.
- * If the page is free (_refcount == 0), then _refcount is untouched, and 0
- * is returned. Otherwise, _refcount is incremented by 1 and 1 is returned.
- *
- * This function must be called inside the same rcu_read_lock() section as has
- * been used to lookup the page in the pagecache radix-tree (or page table):
- * this allows allocators to use a synchronize_rcu() to stabilize _refcount.
- *
- * Unless an RCU grace period has passed, the count of all pages coming out
- * of the allocator must be considered unstable. page_count may return higher
- * than expected, and put_page must be able to do the right thing when the
- * page has been finished with, no matter what it is subsequently allocated
- * for (because put_page is what is used here to drop an invalid speculative
- * reference).
- *
- * This is the interesting part of the lockless pagecache (and lockless
- * get_user_pages) locking protocol, where the lookup-side (eg. find_get_page)
- * has the following pattern:
- * 1. find page in radix tree
- * 2. conditionally increment refcount
- * 3. check the page is still in pagecache (if no, goto 1)
- *
- * Remove-side that cares about stability of _refcount (eg. reclaim) has the
- * following (with tree_lock held for write):
- * A. atomically check refcount is correct and set it to 0 (atomic_cmpxchg)
- * B. remove page from pagecache
- * C. free the page
- *
- * There are 2 critical interleavings that matter:
- * - 2 runs before A: in this case, A sees elevated refcount and bails out
- * - A runs before 2: in this case, 2 sees zero refcount and retries;
- * subsequently, B will complete and 1 will find no page, causing the
- * lookup to return NULL.
- *
- * It is possible that between 1 and 2, the page is removed then the exact same
- * page is inserted into the same position in pagecache. That's OK: the
- * old find_get_page using tree_lock could equally have run before or after
- * such a re-insertion, depending on order that locks are granted.
- *
- * Lookups racing against pagecache insertion isn't a big problem: either 1
- * will find the page or it will not. Likewise, the old find_get_page could run
- * either before the insertion or afterwards, depending on timing.
- */
-static inline int page_cache_get_speculative(struct page *page)
-{
- VM_BUG_ON(in_interrupt());
-
-#ifdef CONFIG_TINY_RCU
-# ifdef CONFIG_PREEMPT_COUNT
- VM_BUG_ON(!in_atomic() && !irqs_disabled());
-# endif
- /*
- * Preempt must be disabled here - we rely on rcu_read_lock doing
- * this for us.
- *
- * Pagecache won't be truncated from interrupt context, so if we have
- * found a page in the radix tree here, we have pinned its refcount by
- * disabling preempt, and hence no need for the "speculative get" that
- * SMP requires.
- */
- VM_BUG_ON_PAGE(page_count(page) == 0, page);
- page_ref_inc(page);
+ * Large folio support currently depends on THP. These dependencies are
+ * being worked on but are not yet fixed.
+ */
+static inline bool mapping_large_folio_support(struct address_space *mapping)
+{
+ return IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) &&
+ test_bit(AS_LARGE_FOLIO_SUPPORT, &mapping->flags);
+}
+static inline int filemap_nr_thps(struct address_space *mapping)
+{
+#ifdef CONFIG_READ_ONLY_THP_FOR_FS
+ return atomic_read(&mapping->nr_thps);
#else
- if (unlikely(!get_page_unless_zero(page))) {
- /*
- * Either the page has been freed, or will be freed.
- * In either case, retry here and the caller should
- * do the right thing (see comments above).
- */
- return 0;
- }
+ return 0;
+#endif
+}
+
+static inline void filemap_nr_thps_inc(struct address_space *mapping)
+{
+#ifdef CONFIG_READ_ONLY_THP_FOR_FS
+ if (!mapping_large_folio_support(mapping))
+ atomic_inc(&mapping->nr_thps);
+#else
+ WARN_ON_ONCE(mapping_large_folio_support(mapping) == 0);
#endif
- VM_BUG_ON_PAGE(PageTail(page), page);
+}
- return 1;
+static inline void filemap_nr_thps_dec(struct address_space *mapping)
+{
+#ifdef CONFIG_READ_ONLY_THP_FOR_FS
+ if (!mapping_large_folio_support(mapping))
+ atomic_dec(&mapping->nr_thps);
+#else
+ WARN_ON_ONCE(mapping_large_folio_support(mapping) == 0);
+#endif
+}
+
+struct address_space *page_mapping(struct page *);
+struct address_space *folio_mapping(struct folio *);
+struct address_space *swapcache_mapping(struct folio *);
+
+/**
+ * folio_file_mapping - Find the mapping this folio belongs to.
+ * @folio: The folio.
+ *
+ * For folios which are in the page cache, return the mapping that this
+ * page belongs to. Folios in the swap cache return the mapping of the
+ * swap file or swap device where the data is stored. This is different
+ * from the mapping returned by folio_mapping(). The only reason to
+ * use it is if, like NFS, you return 0 from ->activate_swapfile.
+ *
+ * Do not call this for folios which aren't in the page cache or swap cache.
+ */
+static inline struct address_space *folio_file_mapping(struct folio *folio)
+{
+ if (unlikely(folio_test_swapcache(folio)))
+ return swapcache_mapping(folio);
+
+ return folio->mapping;
+}
+
+static inline struct address_space *page_file_mapping(struct page *page)
+{
+ return folio_file_mapping(page_folio(page));
}
/*
- * Same as above, but add instead of inc (could just be merged)
+ * For file cache pages, return the address_space, otherwise return NULL
*/
-static inline int page_cache_add_speculative(struct page *page, int count)
+static inline struct address_space *page_mapping_file(struct page *page)
{
- VM_BUG_ON(in_interrupt());
+ struct folio *folio = page_folio(page);
-#if !defined(CONFIG_SMP) && defined(CONFIG_TREE_RCU)
-# ifdef CONFIG_PREEMPT_COUNT
- VM_BUG_ON(!in_atomic() && !irqs_disabled());
-# endif
- VM_BUG_ON_PAGE(page_count(page) == 0, page);
- page_ref_add(page, count);
+ if (unlikely(folio_test_swapcache(folio)))
+ return NULL;
+ return folio_mapping(folio);
+}
-#else
- if (unlikely(!page_ref_add_unless(page, count, 0)))
- return 0;
-#endif
- VM_BUG_ON_PAGE(PageCompound(page) && page != compound_head(page), page);
+/**
+ * folio_inode - Get the host inode for this folio.
+ * @folio: The folio.
+ *
+ * For folios which are in the page cache, return the inode that this folio
+ * belongs to.
+ *
+ * Do not call this for folios which aren't in the page cache.
+ */
+static inline struct inode *folio_inode(struct folio *folio)
+{
+ return folio->mapping->host;
+}
- return 1;
+/**
+ * folio_attach_private - Attach private data to a folio.
+ * @folio: Folio to attach data to.
+ * @data: Data to attach to folio.
+ *
+ * Attaching private data to a folio increments the page's reference count.
+ * The data must be detached before the folio will be freed.
+ */
+static inline void folio_attach_private(struct folio *folio, void *data)
+{
+ folio_get(folio);
+ folio->private = data;
+ folio_set_private(folio);
+}
+
+/**
+ * folio_change_private - Change private data on a folio.
+ * @folio: Folio to change the data on.
+ * @data: Data to set on the folio.
+ *
+ * Change the private data attached to a folio and return the old
+ * data. The page must previously have had data attached and the data
+ * must be detached before the folio will be freed.
+ *
+ * Return: Data that was previously attached to the folio.
+ */
+static inline void *folio_change_private(struct folio *folio, void *data)
+{
+ void *old = folio_get_private(folio);
+
+ folio->private = data;
+ return old;
+}
+
+/**
+ * folio_detach_private - Detach private data from a folio.
+ * @folio: Folio to detach data from.
+ *
+ * Removes the data that was previously attached to the folio and decrements
+ * the refcount on the page.
+ *
+ * Return: Data that was attached to the folio.
+ */
+static inline void *folio_detach_private(struct folio *folio)
+{
+ void *data = folio_get_private(folio);
+
+ if (!folio_test_private(folio))
+ return NULL;
+ folio_clear_private(folio);
+ folio->private = NULL;
+ folio_put(folio);
+
+ return data;
+}
+
+static inline void attach_page_private(struct page *page, void *data)
+{
+ folio_attach_private(page_folio(page), data);
+}
+
+static inline void *detach_page_private(struct page *page)
+{
+ return folio_detach_private(page_folio(page));
}
#ifdef CONFIG_NUMA
-extern struct page *__page_cache_alloc(gfp_t gfp);
+struct folio *filemap_alloc_folio(gfp_t gfp, unsigned int order);
#else
-static inline struct page *__page_cache_alloc(gfp_t gfp)
+static inline struct folio *filemap_alloc_folio(gfp_t gfp, unsigned int order)
{
- return alloc_pages(gfp, 0);
+ return folio_alloc(gfp, order);
}
#endif
-static inline struct page *page_cache_alloc(struct address_space *x)
+static inline struct page *__page_cache_alloc(gfp_t gfp)
{
- return __page_cache_alloc(mapping_gfp_mask(x));
+ return &filemap_alloc_folio(gfp, 0)->page;
}
-static inline struct page *page_cache_alloc_cold(struct address_space *x)
+static inline struct page *page_cache_alloc(struct address_space *x)
{
- return __page_cache_alloc(mapping_gfp_mask(x)|__GFP_COLD);
+ return __page_cache_alloc(mapping_gfp_mask(x));
}
static inline gfp_t readahead_gfp_mask(struct address_space *x)
{
- return mapping_gfp_mask(x) |
- __GFP_COLD | __GFP_NORETRY | __GFP_NOWARN;
+ return mapping_gfp_mask(x) | __GFP_NORETRY | __GFP_NOWARN;
}
-typedef int filler_t(void *, struct page *);
+typedef int filler_t(struct file *, struct folio *);
-pgoff_t page_cache_next_hole(struct address_space *mapping,
+pgoff_t page_cache_next_miss(struct address_space *mapping,
pgoff_t index, unsigned long max_scan);
-pgoff_t page_cache_prev_hole(struct address_space *mapping,
+pgoff_t page_cache_prev_miss(struct address_space *mapping,
pgoff_t index, unsigned long max_scan);
#define FGP_ACCESSED 0x00000001
@@ -257,9 +503,71 @@ pgoff_t page_cache_prev_hole(struct address_space *mapping,
#define FGP_WRITE 0x00000008
#define FGP_NOFS 0x00000010
#define FGP_NOWAIT 0x00000020
+#define FGP_FOR_MMAP 0x00000040
+#define FGP_STABLE 0x00000080
+
+#define FGP_WRITEBEGIN (FGP_LOCK | FGP_WRITE | FGP_CREAT | FGP_STABLE)
+
+void *filemap_get_entry(struct address_space *mapping, pgoff_t index);
+struct folio *__filemap_get_folio(struct address_space *mapping, pgoff_t index,
+ int fgp_flags, gfp_t gfp);
+struct page *pagecache_get_page(struct address_space *mapping, pgoff_t index,
+ int fgp_flags, gfp_t gfp);
-struct page *pagecache_get_page(struct address_space *mapping, pgoff_t offset,
- int fgp_flags, gfp_t cache_gfp_mask);
+/**
+ * filemap_get_folio - Find and get a folio.
+ * @mapping: The address_space to search.
+ * @index: The page index.
+ *
+ * Looks up the page cache entry at @mapping & @index. If a folio is
+ * present, it is returned with an increased refcount.
+ *
+ * Return: A folio or ERR_PTR(-ENOENT) if there is no folio in the cache for
+ * this index. Will not return a shadow, swap or DAX entry.
+ */
+static inline struct folio *filemap_get_folio(struct address_space *mapping,
+ pgoff_t index)
+{
+ return __filemap_get_folio(mapping, index, 0, 0);
+}
+
+/**
+ * filemap_lock_folio - Find and lock a folio.
+ * @mapping: The address_space to search.
+ * @index: The page index.
+ *
+ * Looks up the page cache entry at @mapping & @index. If a folio is
+ * present, it is returned locked with an increased refcount.
+ *
+ * Context: May sleep.
+ * Return: A folio or ERR_PTR(-ENOENT) if there is no folio in the cache for
+ * this index. Will not return a shadow, swap or DAX entry.
+ */
+static inline struct folio *filemap_lock_folio(struct address_space *mapping,
+ pgoff_t index)
+{
+ return __filemap_get_folio(mapping, index, FGP_LOCK, 0);
+}
+
+/**
+ * filemap_grab_folio - grab a folio from the page cache
+ * @mapping: The address space to search
+ * @index: The page index
+ *
+ * Looks up the page cache entry at @mapping & @index. If no folio is found,
+ * a new folio is created. The folio is locked, marked as accessed, and
+ * returned.
+ *
+ * Return: A found or created folio. ERR_PTR(-ENOMEM) if no folio is found
+ * and failed to create a folio.
+ */
+static inline struct folio *filemap_grab_folio(struct address_space *mapping,
+ pgoff_t index)
+{
+ return __filemap_get_folio(mapping, index,
+ FGP_LOCK | FGP_ACCESSED | FGP_CREAT,
+ mapping_gfp_mask(mapping));
+}
/**
* find_get_page - find and get a page reference
@@ -286,20 +594,20 @@ static inline struct page *find_get_page_flags(struct address_space *mapping,
/**
* find_lock_page - locate, pin and lock a pagecache page
* @mapping: the address_space to search
- * @offset: the page index
+ * @index: the page index
*
- * Looks up the page cache slot at @mapping & @offset. If there is a
+ * Looks up the page cache entry at @mapping & @index. If there is a
* page cache page, it is returned locked and with an increased
* refcount.
*
- * Otherwise, %NULL is returned.
- *
- * find_lock_page() may sleep.
+ * Context: May sleep.
+ * Return: A struct page or %NULL if there is no page in the cache for this
+ * index.
*/
static inline struct page *find_lock_page(struct address_space *mapping,
- pgoff_t offset)
+ pgoff_t index)
{
- return pagecache_get_page(mapping, offset, FGP_LOCK, 0);
+ return pagecache_get_page(mapping, index, FGP_LOCK, 0);
}
/**
@@ -322,9 +630,9 @@ static inline struct page *find_lock_page(struct address_space *mapping,
* atomic allocation!
*/
static inline struct page *find_or_create_page(struct address_space *mapping,
- pgoff_t offset, gfp_t gfp_mask)
+ pgoff_t index, gfp_t gfp_mask)
{
- return pagecache_get_page(mapping, offset,
+ return pagecache_get_page(mapping, index,
FGP_LOCK|FGP_ACCESSED|FGP_CREAT,
gfp_mask);
}
@@ -350,23 +658,95 @@ static inline struct page *grab_cache_page_nowait(struct address_space *mapping,
mapping_gfp_mask(mapping));
}
-struct page *find_get_entry(struct address_space *mapping, pgoff_t offset);
-struct page *find_lock_entry(struct address_space *mapping, pgoff_t offset);
-unsigned find_get_entries(struct address_space *mapping, pgoff_t start,
- unsigned int nr_entries, struct page **entries,
- pgoff_t *indices);
-unsigned find_get_pages(struct address_space *mapping, pgoff_t start,
- unsigned int nr_pages, struct page **pages);
-unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t start,
- unsigned int nr_pages, struct page **pages);
-unsigned find_get_pages_tag(struct address_space *mapping, pgoff_t *index,
- int tag, unsigned int nr_pages, struct page **pages);
-unsigned find_get_entries_tag(struct address_space *mapping, pgoff_t start,
- int tag, unsigned int nr_entries,
- struct page **entries, pgoff_t *indices);
+#define swapcache_index(folio) __page_file_index(&(folio)->page)
+
+/**
+ * folio_index - File index of a folio.
+ * @folio: The folio.
+ *
+ * For a folio which is either in the page cache or the swap cache,
+ * return its index within the address_space it belongs to. If you know
+ * the page is definitely in the page cache, you can look at the folio's
+ * index directly.
+ *
+ * Return: The index (offset in units of pages) of a folio in its file.
+ */
+static inline pgoff_t folio_index(struct folio *folio)
+{
+ if (unlikely(folio_test_swapcache(folio)))
+ return swapcache_index(folio);
+ return folio->index;
+}
+
+/**
+ * folio_next_index - Get the index of the next folio.
+ * @folio: The current folio.
+ *
+ * Return: The index of the folio which follows this folio in the file.
+ */
+static inline pgoff_t folio_next_index(struct folio *folio)
+{
+ return folio->index + folio_nr_pages(folio);
+}
+
+/**
+ * folio_file_page - The page for a particular index.
+ * @folio: The folio which contains this index.
+ * @index: The index we want to look up.
+ *
+ * Sometimes after looking up a folio in the page cache, we need to
+ * obtain the specific page for an index (eg a page fault).
+ *
+ * Return: The page containing the file data for this index.
+ */
+static inline struct page *folio_file_page(struct folio *folio, pgoff_t index)
+{
+ /* HugeTLBfs indexes the page cache in units of hpage_size */
+ if (folio_test_hugetlb(folio))
+ return &folio->page;
+ return folio_page(folio, index & (folio_nr_pages(folio) - 1));
+}
+
+/**
+ * folio_contains - Does this folio contain this index?
+ * @folio: The folio.
+ * @index: The page index within the file.
+ *
+ * Context: The caller should have the page locked in order to prevent
+ * (eg) shmem from moving the page between the page cache and swap cache
+ * and changing its index in the middle of the operation.
+ * Return: true or false.
+ */
+static inline bool folio_contains(struct folio *folio, pgoff_t index)
+{
+ /* HugeTLBfs indexes the page cache in units of hpage_size */
+ if (folio_test_hugetlb(folio))
+ return folio->index == index;
+ return index - folio_index(folio) < folio_nr_pages(folio);
+}
+
+/*
+ * Given the page we found in the page cache, return the page corresponding
+ * to this index in the file
+ */
+static inline struct page *find_subpage(struct page *head, pgoff_t index)
+{
+ /* HugeTLBfs wants the head page regardless */
+ if (PageHuge(head))
+ return head;
+
+ return head + (index & (thp_nr_pages(head) - 1));
+}
+
+unsigned filemap_get_folios(struct address_space *mapping, pgoff_t *start,
+ pgoff_t end, struct folio_batch *fbatch);
+unsigned filemap_get_folios_contig(struct address_space *mapping,
+ pgoff_t *start, pgoff_t end, struct folio_batch *fbatch);
+unsigned filemap_get_folios_tag(struct address_space *mapping, pgoff_t *start,
+ pgoff_t end, xa_mark_t tag, struct folio_batch *fbatch);
struct page *grab_cache_page_write_begin(struct address_space *mapping,
- pgoff_t index, unsigned flags);
+ pgoff_t index);
/*
* Returns locked page at given index in given cache, creating it if needed.
@@ -377,49 +757,56 @@ static inline struct page *grab_cache_page(struct address_space *mapping,
return find_or_create_page(mapping, index, mapping_gfp_mask(mapping));
}
-extern struct page * read_cache_page(struct address_space *mapping,
- pgoff_t index, filler_t *filler, void *data);
+struct folio *read_cache_folio(struct address_space *, pgoff_t index,
+ filler_t *filler, struct file *file);
+struct folio *mapping_read_folio_gfp(struct address_space *, pgoff_t index,
+ gfp_t flags);
+struct page *read_cache_page(struct address_space *, pgoff_t index,
+ filler_t *filler, struct file *file);
extern struct page * read_cache_page_gfp(struct address_space *mapping,
pgoff_t index, gfp_t gfp_mask);
-extern int read_cache_pages(struct address_space *mapping,
- struct list_head *pages, filler_t *filler, void *data);
static inline struct page *read_mapping_page(struct address_space *mapping,
- pgoff_t index, void *data)
+ pgoff_t index, struct file *file)
{
- filler_t *filler = (filler_t *)mapping->a_ops->readpage;
- return read_cache_page(mapping, index, filler, data);
+ return read_cache_page(mapping, index, NULL, file);
+}
+
+static inline struct folio *read_mapping_folio(struct address_space *mapping,
+ pgoff_t index, struct file *file)
+{
+ return read_cache_folio(mapping, index, NULL, file);
}
/*
- * Get index of the page with in radix-tree
+ * Get index of the page within radix-tree (but not for hugetlb pages).
* (TODO: remove once hugetlb pages will have ->index in PAGE_SIZE)
*/
static inline pgoff_t page_to_index(struct page *page)
{
- pgoff_t pgoff;
+ struct page *head;
if (likely(!PageTransTail(page)))
return page->index;
+ head = compound_head(page);
/*
* We don't initialize ->index for tail pages: calculate based on
* head page
*/
- pgoff = compound_head(page)->index;
- pgoff += page - compound_head(page);
- return pgoff;
+ return head->index + page - head;
}
+extern pgoff_t hugetlb_basepage_index(struct page *page);
+
/*
- * Get the offset in PAGE_SIZE.
- * (TODO: hugepage should have ->index in PAGE_SIZE)
+ * Get the offset in PAGE_SIZE (even for hugetlb pages).
+ * (TODO: hugetlb pages should have ->index in PAGE_SIZE)
*/
static inline pgoff_t page_to_pgoff(struct page *page)
{
- if (unlikely(PageHeadHuge(page)))
- return page->index << compound_order(page);
-
+ if (unlikely(PageHuge(page)))
+ return hugetlb_basepage_index(page);
return page_to_index(page);
}
@@ -436,6 +823,38 @@ static inline loff_t page_file_offset(struct page *page)
return ((loff_t)page_index(page)) << PAGE_SHIFT;
}
+/**
+ * folio_pos - Returns the byte position of this folio in its file.
+ * @folio: The folio.
+ */
+static inline loff_t folio_pos(struct folio *folio)
+{
+ return page_offset(&folio->page);
+}
+
+/**
+ * folio_file_pos - Returns the byte position of this folio in its file.
+ * @folio: The folio.
+ *
+ * This differs from folio_pos() for folios which belong to a swap file.
+ * NFS is the only filesystem today which needs to use folio_file_pos().
+ */
+static inline loff_t folio_file_pos(struct folio *folio)
+{
+ return page_file_offset(&folio->page);
+}
+
+/*
+ * Get the offset in PAGE_SIZE (even for hugetlb folios).
+ * (TODO: hugetlb folios should have ->index in PAGE_SIZE)
+ */
+static inline pgoff_t folio_pgoff(struct folio *folio)
+{
+ if (unlikely(folio_test_hugetlb(folio)))
+ return hugetlb_basepage_index(&folio->page);
+ return folio->index;
+}
+
extern pgoff_t linear_hugepage_index(struct vm_area_struct *vma,
unsigned long address);
@@ -450,180 +869,510 @@ static inline pgoff_t linear_page_index(struct vm_area_struct *vma,
return pgoff;
}
-extern void __lock_page(struct page *page);
-extern int __lock_page_killable(struct page *page);
-extern int __lock_page_or_retry(struct page *page, struct mm_struct *mm,
+struct wait_page_key {
+ struct folio *folio;
+ int bit_nr;
+ int page_match;
+};
+
+struct wait_page_queue {
+ struct folio *folio;
+ int bit_nr;
+ wait_queue_entry_t wait;
+};
+
+static inline bool wake_page_match(struct wait_page_queue *wait_page,
+ struct wait_page_key *key)
+{
+ if (wait_page->folio != key->folio)
+ return false;
+ key->page_match = 1;
+
+ if (wait_page->bit_nr != key->bit_nr)
+ return false;
+
+ return true;
+}
+
+void __folio_lock(struct folio *folio);
+int __folio_lock_killable(struct folio *folio);
+bool __folio_lock_or_retry(struct folio *folio, struct mm_struct *mm,
unsigned int flags);
-extern void unlock_page(struct page *page);
+void unlock_page(struct page *page);
+void folio_unlock(struct folio *folio);
-static inline int trylock_page(struct page *page)
+/**
+ * folio_trylock() - Attempt to lock a folio.
+ * @folio: The folio to attempt to lock.
+ *
+ * Sometimes it is undesirable to wait for a folio to be unlocked (eg
+ * when the locks are being taken in the wrong order, or if making
+ * progress through a batch of folios is more important than processing
+ * them in order). Usually folio_lock() is the correct function to call.
+ *
+ * Context: Any context.
+ * Return: Whether the lock was successfully acquired.
+ */
+static inline bool folio_trylock(struct folio *folio)
{
- page = compound_head(page);
- return (likely(!test_and_set_bit_lock(PG_locked, &page->flags)));
+ return likely(!test_and_set_bit_lock(PG_locked, folio_flags(folio, 0)));
}
/*
- * lock_page may only be called if we have the page's inode pinned.
+ * Return true if the page was successfully locked
+ */
+static inline int trylock_page(struct page *page)
+{
+ return folio_trylock(page_folio(page));
+}
+
+/**
+ * folio_lock() - Lock this folio.
+ * @folio: The folio to lock.
+ *
+ * The folio lock protects against many things, probably more than it
+ * should. It is primarily held while a folio is being brought uptodate,
+ * either from its backing file or from swap. It is also held while a
+ * folio is being truncated from its address_space, so holding the lock
+ * is sufficient to keep folio->mapping stable.
+ *
+ * The folio lock is also held while write() is modifying the page to
+ * provide POSIX atomicity guarantees (as long as the write does not
+ * cross a page boundary). Other modifications to the data in the folio
+ * do not hold the folio lock and can race with writes, eg DMA and stores
+ * to mapped pages.
+ *
+ * Context: May sleep. If you need to acquire the locks of two or
+ * more folios, they must be in order of ascending index, if they are
+ * in the same address_space. If they are in different address_spaces,
+ * acquire the lock of the folio which belongs to the address_space which
+ * has the lowest address in memory first.
+ */
+static inline void folio_lock(struct folio *folio)
+{
+ might_sleep();
+ if (!folio_trylock(folio))
+ __folio_lock(folio);
+}
+
+/**
+ * lock_page() - Lock the folio containing this page.
+ * @page: The page to lock.
+ *
+ * See folio_lock() for a description of what the lock protects.
+ * This is a legacy function and new code should probably use folio_lock()
+ * instead.
+ *
+ * Context: May sleep. Pages in the same folio share a lock, so do not
+ * attempt to lock two pages which share a folio.
*/
static inline void lock_page(struct page *page)
{
+ struct folio *folio;
might_sleep();
- if (!trylock_page(page))
- __lock_page(page);
+
+ folio = page_folio(page);
+ if (!folio_trylock(folio))
+ __folio_lock(folio);
}
-/*
- * lock_page_killable is like lock_page but can be interrupted by fatal
- * signals. It returns 0 if it locked the page and -EINTR if it was
- * killed while waiting.
+/**
+ * folio_lock_killable() - Lock this folio, interruptible by a fatal signal.
+ * @folio: The folio to lock.
+ *
+ * Attempts to lock the folio, like folio_lock(), except that the sleep
+ * to acquire the lock is interruptible by a fatal signal.
+ *
+ * Context: May sleep; see folio_lock().
+ * Return: 0 if the lock was acquired; -EINTR if a fatal signal was received.
*/
-static inline int lock_page_killable(struct page *page)
+static inline int folio_lock_killable(struct folio *folio)
{
might_sleep();
- if (!trylock_page(page))
- return __lock_page_killable(page);
+ if (!folio_trylock(folio))
+ return __folio_lock_killable(folio);
return 0;
}
/*
- * lock_page_or_retry - Lock the page, unless this would block and the
+ * folio_lock_or_retry - Lock the folio, unless this would block and the
* caller indicated that it can handle a retry.
*
- * Return value and mmap_sem implications depend on flags; see
- * __lock_page_or_retry().
+ * Return value and mmap_lock implications depend on flags; see
+ * __folio_lock_or_retry().
*/
-static inline int lock_page_or_retry(struct page *page, struct mm_struct *mm,
- unsigned int flags)
+static inline bool folio_lock_or_retry(struct folio *folio,
+ struct mm_struct *mm, unsigned int flags)
{
might_sleep();
- return trylock_page(page) || __lock_page_or_retry(page, mm, flags);
+ return folio_trylock(folio) || __folio_lock_or_retry(folio, mm, flags);
}
/*
- * This is exported only for wait_on_page_locked/wait_on_page_writeback, etc.,
+ * This is exported only for folio_wait_locked/folio_wait_writeback, etc.,
* and should not be used directly.
*/
-extern void wait_on_page_bit(struct page *page, int bit_nr);
-extern int wait_on_page_bit_killable(struct page *page, int bit_nr);
+void folio_wait_bit(struct folio *folio, int bit_nr);
+int folio_wait_bit_killable(struct folio *folio, int bit_nr);
/*
- * Wait for a page to be unlocked.
+ * Wait for a folio to be unlocked.
*
- * This must be called with the caller "holding" the page,
- * ie with increased "page->count" so that the page won't
- * go away during the wait..
+ * This must be called with the caller "holding" the folio,
+ * ie with increased folio reference count so that the folio won't
+ * go away during the wait.
*/
-static inline void wait_on_page_locked(struct page *page)
+static inline void folio_wait_locked(struct folio *folio)
{
- if (PageLocked(page))
- wait_on_page_bit(compound_head(page), PG_locked);
+ if (folio_test_locked(folio))
+ folio_wait_bit(folio, PG_locked);
}
-static inline int wait_on_page_locked_killable(struct page *page)
+static inline int folio_wait_locked_killable(struct folio *folio)
{
- if (!PageLocked(page))
+ if (!folio_test_locked(folio))
return 0;
- return wait_on_page_bit_killable(compound_head(page), PG_locked);
+ return folio_wait_bit_killable(folio, PG_locked);
}
-/*
- * Wait for a page to complete writeback
- */
-static inline void wait_on_page_writeback(struct page *page)
+static inline void wait_on_page_locked(struct page *page)
{
- if (PageWriteback(page))
- wait_on_page_bit(page, PG_writeback);
+ folio_wait_locked(page_folio(page));
}
-extern void end_page_writeback(struct page *page);
-void wait_for_stable_page(struct page *page);
+static inline int wait_on_page_locked_killable(struct page *page)
+{
+ return folio_wait_locked_killable(page_folio(page));
+}
+void wait_on_page_writeback(struct page *page);
+void folio_wait_writeback(struct folio *folio);
+int folio_wait_writeback_killable(struct folio *folio);
+void end_page_writeback(struct page *page);
+void folio_end_writeback(struct folio *folio);
+void wait_for_stable_page(struct page *page);
+void folio_wait_stable(struct folio *folio);
+void __folio_mark_dirty(struct folio *folio, struct address_space *, int warn);
+static inline void __set_page_dirty(struct page *page,
+ struct address_space *mapping, int warn)
+{
+ __folio_mark_dirty(page_folio(page), mapping, warn);
+}
+void folio_account_cleaned(struct folio *folio, struct bdi_writeback *wb);
+void __folio_cancel_dirty(struct folio *folio);
+static inline void folio_cancel_dirty(struct folio *folio)
+{
+ /* Avoid atomic ops, locking, etc. when not actually needed. */
+ if (folio_test_dirty(folio))
+ __folio_cancel_dirty(folio);
+}
+bool folio_clear_dirty_for_io(struct folio *folio);
+bool clear_page_dirty_for_io(struct page *page);
+void folio_invalidate(struct folio *folio, size_t offset, size_t length);
+int __set_page_dirty_nobuffers(struct page *page);
+bool noop_dirty_folio(struct address_space *mapping, struct folio *folio);
+
+#ifdef CONFIG_MIGRATION
+int filemap_migrate_folio(struct address_space *mapping, struct folio *dst,
+ struct folio *src, enum migrate_mode mode);
+#else
+#define filemap_migrate_folio NULL
+#endif
void page_endio(struct page *page, bool is_write, int err);
+void folio_end_private_2(struct folio *folio);
+void folio_wait_private_2(struct folio *folio);
+int folio_wait_private_2_killable(struct folio *folio);
+
/*
* Add an arbitrary waiter to a page's wait queue
*/
-extern void add_page_wait_queue(struct page *page, wait_queue_entry_t *waiter);
+void folio_add_wait_queue(struct folio *folio, wait_queue_entry_t *waiter);
/*
- * Fault everything in given userspace address range in.
+ * Fault in userspace address range.
+ */
+size_t fault_in_writeable(char __user *uaddr, size_t size);
+size_t fault_in_subpage_writeable(char __user *uaddr, size_t size);
+size_t fault_in_safe_writeable(const char __user *uaddr, size_t size);
+size_t fault_in_readable(const char __user *uaddr, size_t size);
+
+int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
+ pgoff_t index, gfp_t gfp);
+int filemap_add_folio(struct address_space *mapping, struct folio *folio,
+ pgoff_t index, gfp_t gfp);
+void filemap_remove_folio(struct folio *folio);
+void __filemap_remove_folio(struct folio *folio, void *shadow);
+void replace_page_cache_folio(struct folio *old, struct folio *new);
+void delete_from_page_cache_batch(struct address_space *mapping,
+ struct folio_batch *fbatch);
+bool filemap_release_folio(struct folio *folio, gfp_t gfp);
+loff_t mapping_seek_hole_data(struct address_space *, loff_t start, loff_t end,
+ int whence);
+
+/* Must be non-static for BPF error injection */
+int __filemap_add_folio(struct address_space *mapping, struct folio *folio,
+ pgoff_t index, gfp_t gfp, void **shadowp);
+
+bool filemap_range_has_writeback(struct address_space *mapping,
+ loff_t start_byte, loff_t end_byte);
+
+/**
+ * filemap_range_needs_writeback - check if range potentially needs writeback
+ * @mapping: address space within which to check
+ * @start_byte: offset in bytes where the range starts
+ * @end_byte: offset in bytes where the range ends (inclusive)
+ *
+ * Find at least one page in the range supplied, usually used to check if
+ * direct writing in this range will trigger a writeback. Used by O_DIRECT
+ * read/write with IOCB_NOWAIT, to see if the caller needs to do
+ * filemap_write_and_wait_range() before proceeding.
+ *
+ * Return: %true if the caller should do filemap_write_and_wait_range() before
+ * doing O_DIRECT to a page in this range, %false otherwise.
*/
-static inline int fault_in_pages_writeable(char __user *uaddr, int size)
+static inline bool filemap_range_needs_writeback(struct address_space *mapping,
+ loff_t start_byte,
+ loff_t end_byte)
{
- char __user *end = uaddr + size - 1;
+ if (!mapping->nrpages)
+ return false;
+ if (!mapping_tagged(mapping, PAGECACHE_TAG_DIRTY) &&
+ !mapping_tagged(mapping, PAGECACHE_TAG_WRITEBACK))
+ return false;
+ return filemap_range_has_writeback(mapping, start_byte, end_byte);
+}
- if (unlikely(size == 0))
- return 0;
+/**
+ * struct readahead_control - Describes a readahead request.
+ *
+ * A readahead request is for consecutive pages. Filesystems which
+ * implement the ->readahead method should call readahead_page() or
+ * readahead_page_batch() in a loop and attempt to start I/O against
+ * each page in the request.
+ *
+ * Most of the fields in this struct are private and should be accessed
+ * by the functions below.
+ *
+ * @file: The file, used primarily by network filesystems for authentication.
+ * May be NULL if invoked internally by the filesystem.
+ * @mapping: Readahead this filesystem object.
+ * @ra: File readahead state. May be NULL.
+ */
+struct readahead_control {
+ struct file *file;
+ struct address_space *mapping;
+ struct file_ra_state *ra;
+/* private: use the readahead_* accessors instead */
+ pgoff_t _index;
+ unsigned int _nr_pages;
+ unsigned int _batch_count;
+ bool _workingset;
+ unsigned long _pflags;
+};
- if (unlikely(uaddr > end))
- return -EFAULT;
- /*
- * Writing zeroes into userspace here is OK, because we know that if
- * the zero gets there, we'll be overwriting it.
- */
- do {
- if (unlikely(__put_user(0, uaddr) != 0))
- return -EFAULT;
- uaddr += PAGE_SIZE;
- } while (uaddr <= end);
+#define DEFINE_READAHEAD(ractl, f, r, m, i) \
+ struct readahead_control ractl = { \
+ .file = f, \
+ .mapping = m, \
+ .ra = r, \
+ ._index = i, \
+ }
- /* Check whether the range spilled into the next page. */
- if (((unsigned long)uaddr & PAGE_MASK) ==
- ((unsigned long)end & PAGE_MASK))
- return __put_user(0, end);
+#define VM_READAHEAD_PAGES (SZ_128K / PAGE_SIZE)
- return 0;
+void page_cache_ra_unbounded(struct readahead_control *,
+ unsigned long nr_to_read, unsigned long lookahead_count);
+void page_cache_sync_ra(struct readahead_control *, unsigned long req_count);
+void page_cache_async_ra(struct readahead_control *, struct folio *,
+ unsigned long req_count);
+void readahead_expand(struct readahead_control *ractl,
+ loff_t new_start, size_t new_len);
+
+/**
+ * page_cache_sync_readahead - generic file readahead
+ * @mapping: address_space which holds the pagecache and I/O vectors
+ * @ra: file_ra_state which holds the readahead state
+ * @file: Used by the filesystem for authentication.
+ * @index: Index of first page to be read.
+ * @req_count: Total number of pages being read by the caller.
+ *
+ * page_cache_sync_readahead() should be called when a cache miss happened:
+ * it will submit the read. The readahead logic may decide to piggyback more
+ * pages onto the read request if access patterns suggest it will improve
+ * performance.
+ */
+static inline
+void page_cache_sync_readahead(struct address_space *mapping,
+ struct file_ra_state *ra, struct file *file, pgoff_t index,
+ unsigned long req_count)
+{
+ DEFINE_READAHEAD(ractl, file, ra, mapping, index);
+ page_cache_sync_ra(&ractl, req_count);
+}
+
+/**
+ * page_cache_async_readahead - file readahead for marked pages
+ * @mapping: address_space which holds the pagecache and I/O vectors
+ * @ra: file_ra_state which holds the readahead state
+ * @file: Used by the filesystem for authentication.
+ * @folio: The folio at @index which triggered the readahead call.
+ * @index: Index of first page to be read.
+ * @req_count: Total number of pages being read by the caller.
+ *
+ * page_cache_async_readahead() should be called when a page is used which
+ * is marked as PageReadahead; this is a marker to suggest that the application
+ * has used up enough of the readahead window that we should start pulling in
+ * more pages.
+ */
+static inline
+void page_cache_async_readahead(struct address_space *mapping,
+ struct file_ra_state *ra, struct file *file,
+ struct folio *folio, pgoff_t index, unsigned long req_count)
+{
+ DEFINE_READAHEAD(ractl, file, ra, mapping, index);
+ page_cache_async_ra(&ractl, folio, req_count);
}
-static inline int fault_in_pages_readable(const char __user *uaddr, int size)
+static inline struct folio *__readahead_folio(struct readahead_control *ractl)
{
- volatile char c;
- const char __user *end = uaddr + size - 1;
+ struct folio *folio;
- if (unlikely(size == 0))
- return 0;
+ BUG_ON(ractl->_batch_count > ractl->_nr_pages);
+ ractl->_nr_pages -= ractl->_batch_count;
+ ractl->_index += ractl->_batch_count;
- if (unlikely(uaddr > end))
- return -EFAULT;
+ if (!ractl->_nr_pages) {
+ ractl->_batch_count = 0;
+ return NULL;
+ }
- do {
- if (unlikely(__get_user(c, uaddr) != 0))
- return -EFAULT;
- uaddr += PAGE_SIZE;
- } while (uaddr <= end);
+ folio = xa_load(&ractl->mapping->i_pages, ractl->_index);
+ VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
+ ractl->_batch_count = folio_nr_pages(folio);
- /* Check whether the range spilled into the next page. */
- if (((unsigned long)uaddr & PAGE_MASK) ==
- ((unsigned long)end & PAGE_MASK)) {
- return __get_user(c, end);
+ return folio;
+}
+
+/**
+ * readahead_page - Get the next page to read.
+ * @ractl: The current readahead request.
+ *
+ * Context: The page is locked and has an elevated refcount. The caller
+ * should decreases the refcount once the page has been submitted for I/O
+ * and unlock the page once all I/O to that page has completed.
+ * Return: A pointer to the next page, or %NULL if we are done.
+ */
+static inline struct page *readahead_page(struct readahead_control *ractl)
+{
+ struct folio *folio = __readahead_folio(ractl);
+
+ return &folio->page;
+}
+
+/**
+ * readahead_folio - Get the next folio to read.
+ * @ractl: The current readahead request.
+ *
+ * Context: The folio is locked. The caller should unlock the folio once
+ * all I/O to that folio has completed.
+ * Return: A pointer to the next folio, or %NULL if we are done.
+ */
+static inline struct folio *readahead_folio(struct readahead_control *ractl)
+{
+ struct folio *folio = __readahead_folio(ractl);
+
+ if (folio)
+ folio_put(folio);
+ return folio;
+}
+
+static inline unsigned int __readahead_batch(struct readahead_control *rac,
+ struct page **array, unsigned int array_sz)
+{
+ unsigned int i = 0;
+ XA_STATE(xas, &rac->mapping->i_pages, 0);
+ struct page *page;
+
+ BUG_ON(rac->_batch_count > rac->_nr_pages);
+ rac->_nr_pages -= rac->_batch_count;
+ rac->_index += rac->_batch_count;
+ rac->_batch_count = 0;
+
+ xas_set(&xas, rac->_index);
+ rcu_read_lock();
+ xas_for_each(&xas, page, rac->_index + rac->_nr_pages - 1) {
+ if (xas_retry(&xas, page))
+ continue;
+ VM_BUG_ON_PAGE(!PageLocked(page), page);
+ VM_BUG_ON_PAGE(PageTail(page), page);
+ array[i++] = page;
+ rac->_batch_count += thp_nr_pages(page);
+ if (i == array_sz)
+ break;
}
+ rcu_read_unlock();
- (void)c;
- return 0;
+ return i;
}
-int add_to_page_cache_locked(struct page *page, struct address_space *mapping,
- pgoff_t index, gfp_t gfp_mask);
-int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
- pgoff_t index, gfp_t gfp_mask);
-extern void delete_from_page_cache(struct page *page);
-extern void __delete_from_page_cache(struct page *page, void *shadow);
-int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask);
+/**
+ * readahead_page_batch - Get a batch of pages to read.
+ * @rac: The current readahead request.
+ * @array: An array of pointers to struct page.
+ *
+ * Context: The pages are locked and have an elevated refcount. The caller
+ * should decreases the refcount once the page has been submitted for I/O
+ * and unlock the page once all I/O to that page has completed.
+ * Return: The number of pages placed in the array. 0 indicates the request
+ * is complete.
+ */
+#define readahead_page_batch(rac, array) \
+ __readahead_batch(rac, array, ARRAY_SIZE(array))
-/*
- * Like add_to_page_cache_locked, but used to add newly allocated pages:
- * the page is new, so we can just run __SetPageLocked() against it.
+/**
+ * readahead_pos - The byte offset into the file of this readahead request.
+ * @rac: The readahead request.
*/
-static inline int add_to_page_cache(struct page *page,
- struct address_space *mapping, pgoff_t offset, gfp_t gfp_mask)
+static inline loff_t readahead_pos(struct readahead_control *rac)
{
- int error;
+ return (loff_t)rac->_index * PAGE_SIZE;
+}
- __SetPageLocked(page);
- error = add_to_page_cache_locked(page, mapping, offset, gfp_mask);
- if (unlikely(error))
- __ClearPageLocked(page);
- return error;
+/**
+ * readahead_length - The number of bytes in this readahead request.
+ * @rac: The readahead request.
+ */
+static inline size_t readahead_length(struct readahead_control *rac)
+{
+ return rac->_nr_pages * PAGE_SIZE;
+}
+
+/**
+ * readahead_index - The index of the first page in this readahead request.
+ * @rac: The readahead request.
+ */
+static inline pgoff_t readahead_index(struct readahead_control *rac)
+{
+ return rac->_index;
+}
+
+/**
+ * readahead_count - The number of pages in this readahead request.
+ * @rac: The readahead request.
+ */
+static inline unsigned int readahead_count(struct readahead_control *rac)
+{
+ return rac->_nr_pages;
+}
+
+/**
+ * readahead_batch_length - The number of bytes in the current batch.
+ * @rac: The readahead request.
+ */
+static inline size_t readahead_batch_length(struct readahead_control *rac)
+{
+ return rac->_batch_count * PAGE_SIZE;
}
static inline unsigned long dir_pages(struct inode *inode)
@@ -632,4 +1381,82 @@ static inline unsigned long dir_pages(struct inode *inode)
PAGE_SHIFT;
}
+/**
+ * folio_mkwrite_check_truncate - check if folio was truncated
+ * @folio: the folio to check
+ * @inode: the inode to check the folio against
+ *
+ * Return: the number of bytes in the folio up to EOF,
+ * or -EFAULT if the folio was truncated.
+ */
+static inline ssize_t folio_mkwrite_check_truncate(struct folio *folio,
+ struct inode *inode)
+{
+ loff_t size = i_size_read(inode);
+ pgoff_t index = size >> PAGE_SHIFT;
+ size_t offset = offset_in_folio(folio, size);
+
+ if (!folio->mapping)
+ return -EFAULT;
+
+ /* folio is wholly inside EOF */
+ if (folio_next_index(folio) - 1 < index)
+ return folio_size(folio);
+ /* folio is wholly past EOF */
+ if (folio->index > index || !offset)
+ return -EFAULT;
+ /* folio is partially inside EOF */
+ return offset;
+}
+
+/**
+ * page_mkwrite_check_truncate - check if page was truncated
+ * @page: the page to check
+ * @inode: the inode to check the page against
+ *
+ * Returns the number of bytes in the page up to EOF,
+ * or -EFAULT if the page was truncated.
+ */
+static inline int page_mkwrite_check_truncate(struct page *page,
+ struct inode *inode)
+{
+ loff_t size = i_size_read(inode);
+ pgoff_t index = size >> PAGE_SHIFT;
+ int offset = offset_in_page(size);
+
+ if (page->mapping != inode->i_mapping)
+ return -EFAULT;
+
+ /* page is wholly inside EOF */
+ if (page->index < index)
+ return PAGE_SIZE;
+ /* page is wholly past EOF */
+ if (page->index > index || !offset)
+ return -EFAULT;
+ /* page is partially inside EOF */
+ return offset;
+}
+
+/**
+ * i_blocks_per_folio - How many blocks fit in this folio.
+ * @inode: The inode which contains the blocks.
+ * @folio: The folio.
+ *
+ * If the block size is larger than the size of this folio, return zero.
+ *
+ * Context: The caller should hold a refcount on the folio to prevent it
+ * from being split.
+ * Return: The number of filesystem blocks covered by this folio.
+ */
+static inline
+unsigned int i_blocks_per_folio(struct inode *inode, struct folio *folio)
+{
+ return folio_size(folio) >> inode->i_blkbits;
+}
+
+static inline
+unsigned int i_blocks_per_page(struct inode *inode, struct page *page)
+{
+ return i_blocks_per_folio(inode, page_folio(page));
+}
#endif /* _LINUX_PAGEMAP_H */
diff --git a/include/linux/pagevec.h b/include/linux/pagevec.h
index b45d391b4540..f582f7213ea5 100644
--- a/include/linux/pagevec.h
+++ b/include/linux/pagevec.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* include/linux/pagevec.h
*
@@ -8,35 +9,28 @@
#ifndef _LINUX_PAGEVEC_H
#define _LINUX_PAGEVEC_H
-/* 14 pointers + two long's align the pagevec structure to a power of two */
-#define PAGEVEC_SIZE 14
+#include <linux/xarray.h>
+
+/* 15 pointers + header align the pagevec structure to a power of two */
+#define PAGEVEC_SIZE 15
struct page;
+struct folio;
struct address_space;
+/* Layout must match folio_batch */
struct pagevec {
- unsigned long nr;
- unsigned long cold;
+ unsigned char nr;
+ bool percpu_pvec_drained;
struct page *pages[PAGEVEC_SIZE];
};
void __pagevec_release(struct pagevec *pvec);
-void __pagevec_lru_add(struct pagevec *pvec);
-unsigned pagevec_lookup_entries(struct pagevec *pvec,
- struct address_space *mapping,
- pgoff_t start, unsigned nr_entries,
- pgoff_t *indices);
-void pagevec_remove_exceptionals(struct pagevec *pvec);
-unsigned pagevec_lookup(struct pagevec *pvec, struct address_space *mapping,
- pgoff_t start, unsigned nr_pages);
-unsigned pagevec_lookup_tag(struct pagevec *pvec,
- struct address_space *mapping, pgoff_t *index, int tag,
- unsigned nr_pages);
-
-static inline void pagevec_init(struct pagevec *pvec, int cold)
+
+static inline void pagevec_init(struct pagevec *pvec)
{
pvec->nr = 0;
- pvec->cold = cold;
+ pvec->percpu_pvec_drained = false;
}
static inline void pagevec_reinit(struct pagevec *pvec)
@@ -69,4 +63,74 @@ static inline void pagevec_release(struct pagevec *pvec)
__pagevec_release(pvec);
}
+/**
+ * struct folio_batch - A collection of folios.
+ *
+ * The folio_batch is used to amortise the cost of retrieving and
+ * operating on a set of folios. The order of folios in the batch may be
+ * significant (eg delete_from_page_cache_batch()). Some users of the
+ * folio_batch store "exceptional" entries in it which can be removed
+ * by calling folio_batch_remove_exceptionals().
+ */
+struct folio_batch {
+ unsigned char nr;
+ bool percpu_pvec_drained;
+ struct folio *folios[PAGEVEC_SIZE];
+};
+
+/* Layout must match pagevec */
+static_assert(sizeof(struct pagevec) == sizeof(struct folio_batch));
+static_assert(offsetof(struct pagevec, pages) ==
+ offsetof(struct folio_batch, folios));
+
+/**
+ * folio_batch_init() - Initialise a batch of folios
+ * @fbatch: The folio batch.
+ *
+ * A freshly initialised folio_batch contains zero folios.
+ */
+static inline void folio_batch_init(struct folio_batch *fbatch)
+{
+ fbatch->nr = 0;
+ fbatch->percpu_pvec_drained = false;
+}
+
+static inline void folio_batch_reinit(struct folio_batch *fbatch)
+{
+ fbatch->nr = 0;
+}
+
+static inline unsigned int folio_batch_count(struct folio_batch *fbatch)
+{
+ return fbatch->nr;
+}
+
+static inline unsigned int fbatch_space(struct folio_batch *fbatch)
+{
+ return PAGEVEC_SIZE - fbatch->nr;
+}
+
+/**
+ * folio_batch_add() - Add a folio to a batch.
+ * @fbatch: The folio batch.
+ * @folio: The folio to add.
+ *
+ * The folio is added to the end of the batch.
+ * The batch must have previously been initialised using folio_batch_init().
+ *
+ * Return: The number of slots still available.
+ */
+static inline unsigned folio_batch_add(struct folio_batch *fbatch,
+ struct folio *folio)
+{
+ fbatch->folios[fbatch->nr++] = folio;
+ return fbatch_space(fbatch);
+}
+
+static inline void folio_batch_release(struct folio_batch *fbatch)
+{
+ pagevec_release((struct pagevec *)fbatch);
+}
+
+void folio_batch_remove_exceptionals(struct folio_batch *fbatch);
#endif /* _LINUX_PAGEVEC_H */
diff --git a/include/linux/pagewalk.h b/include/linux/pagewalk.h
new file mode 100644
index 000000000000..27a6df448ee5
--- /dev/null
+++ b/include/linux/pagewalk.h
@@ -0,0 +1,122 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_PAGEWALK_H
+#define _LINUX_PAGEWALK_H
+
+#include <linux/mm.h>
+
+struct mm_walk;
+
+/**
+ * struct mm_walk_ops - callbacks for walk_page_range
+ * @pgd_entry: if set, called for each non-empty PGD (top-level) entry
+ * @p4d_entry: if set, called for each non-empty P4D entry
+ * @pud_entry: if set, called for each non-empty PUD entry
+ * @pmd_entry: if set, called for each non-empty PMD entry
+ * this handler is required to be able to handle
+ * pmd_trans_huge() pmds. They may simply choose to
+ * split_huge_page() instead of handling it explicitly.
+ * @pte_entry: if set, called for each PTE (lowest-level) entry,
+ * including empty ones
+ * @pte_hole: if set, called for each hole at all levels,
+ * depth is -1 if not known, 0:PGD, 1:P4D, 2:PUD, 3:PMD.
+ * Any folded depths (where PTRS_PER_P?D is equal to 1)
+ * are skipped.
+ * @hugetlb_entry: if set, called for each hugetlb entry. This hook
+ * function is called with the vma lock held, in order to
+ * protect against a concurrent freeing of the pte_t* or
+ * the ptl. In some cases, the hook function needs to drop
+ * and retake the vma lock in order to avoid deadlocks
+ * while calling other functions. In such cases the hook
+ * function must either refrain from accessing the pte or
+ * ptl after dropping the vma lock, or else revalidate
+ * those items after re-acquiring the vma lock and before
+ * accessing them.
+ * @test_walk: caller specific callback function to determine whether
+ * we walk over the current vma or not. Returning 0 means
+ * "do page table walk over the current vma", returning
+ * a negative value means "abort current page table walk
+ * right now" and returning 1 means "skip the current vma"
+ * Note that this callback is not called when the caller
+ * passes in a single VMA as for walk_page_vma().
+ * @pre_vma: if set, called before starting walk on a non-null vma.
+ * @post_vma: if set, called after a walk on a non-null vma, provided
+ * that @pre_vma and the vma walk succeeded.
+ *
+ * p?d_entry callbacks are called even if those levels are folded on a
+ * particular architecture/configuration.
+ */
+struct mm_walk_ops {
+ int (*pgd_entry)(pgd_t *pgd, unsigned long addr,
+ unsigned long next, struct mm_walk *walk);
+ int (*p4d_entry)(p4d_t *p4d, unsigned long addr,
+ unsigned long next, struct mm_walk *walk);
+ int (*pud_entry)(pud_t *pud, unsigned long addr,
+ unsigned long next, struct mm_walk *walk);
+ int (*pmd_entry)(pmd_t *pmd, unsigned long addr,
+ unsigned long next, struct mm_walk *walk);
+ int (*pte_entry)(pte_t *pte, unsigned long addr,
+ unsigned long next, struct mm_walk *walk);
+ int (*pte_hole)(unsigned long addr, unsigned long next,
+ int depth, struct mm_walk *walk);
+ int (*hugetlb_entry)(pte_t *pte, unsigned long hmask,
+ unsigned long addr, unsigned long next,
+ struct mm_walk *walk);
+ int (*test_walk)(unsigned long addr, unsigned long next,
+ struct mm_walk *walk);
+ int (*pre_vma)(unsigned long start, unsigned long end,
+ struct mm_walk *walk);
+ void (*post_vma)(struct mm_walk *walk);
+};
+
+/*
+ * Action for pud_entry / pmd_entry callbacks.
+ * ACTION_SUBTREE is the default
+ */
+enum page_walk_action {
+ /* Descend to next level, splitting huge pages if needed and possible */
+ ACTION_SUBTREE = 0,
+ /* Continue to next entry at this level (ignoring any subtree) */
+ ACTION_CONTINUE = 1,
+ /* Call again for this entry */
+ ACTION_AGAIN = 2
+};
+
+/**
+ * struct mm_walk - walk_page_range data
+ * @ops: operation to call during the walk
+ * @mm: mm_struct representing the target process of page table walk
+ * @pgd: pointer to PGD; only valid with no_vma (otherwise set to NULL)
+ * @vma: vma currently walked (NULL if walking outside vmas)
+ * @action: next action to perform (see enum page_walk_action)
+ * @no_vma: walk ignoring vmas (vma will always be NULL)
+ * @private: private data for callbacks' usage
+ *
+ * (see the comment on walk_page_range() for more details)
+ */
+struct mm_walk {
+ const struct mm_walk_ops *ops;
+ struct mm_struct *mm;
+ pgd_t *pgd;
+ struct vm_area_struct *vma;
+ enum page_walk_action action;
+ bool no_vma;
+ void *private;
+};
+
+int walk_page_range(struct mm_struct *mm, unsigned long start,
+ unsigned long end, const struct mm_walk_ops *ops,
+ void *private);
+int walk_page_range_novma(struct mm_struct *mm, unsigned long start,
+ unsigned long end, const struct mm_walk_ops *ops,
+ pgd_t *pgd,
+ void *private);
+int walk_page_range_vma(struct vm_area_struct *vma, unsigned long start,
+ unsigned long end, const struct mm_walk_ops *ops,
+ void *private);
+int walk_page_vma(struct vm_area_struct *vma, const struct mm_walk_ops *ops,
+ void *private);
+int walk_page_mapping(struct address_space *mapping, pgoff_t first_index,
+ pgoff_t nr, const struct mm_walk_ops *ops,
+ void *private);
+
+#endif /* _LINUX_PAGEWALK_H */
diff --git a/include/linux/panic.h b/include/linux/panic.h
new file mode 100644
index 000000000000..979b776e3bcb
--- /dev/null
+++ b/include/linux/panic.h
@@ -0,0 +1,94 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_PANIC_H
+#define _LINUX_PANIC_H
+
+#include <linux/compiler_attributes.h>
+#include <linux/types.h>
+
+struct pt_regs;
+
+extern long (*panic_blink)(int state);
+__printf(1, 2)
+void panic(const char *fmt, ...) __noreturn __cold;
+void nmi_panic(struct pt_regs *regs, const char *msg);
+void check_panic_on_warn(const char *origin);
+extern void oops_enter(void);
+extern void oops_exit(void);
+extern bool oops_may_print(void);
+
+extern int panic_timeout;
+extern unsigned long panic_print;
+extern int panic_on_oops;
+extern int panic_on_unrecovered_nmi;
+extern int panic_on_io_nmi;
+extern int panic_on_warn;
+
+extern unsigned long panic_on_taint;
+extern bool panic_on_taint_nousertaint;
+
+extern int sysctl_panic_on_rcu_stall;
+extern int sysctl_max_rcu_stall_to_panic;
+extern int sysctl_panic_on_stackoverflow;
+
+extern bool crash_kexec_post_notifiers;
+
+/*
+ * panic_cpu is used for synchronizing panic() and crash_kexec() execution. It
+ * holds a CPU number which is executing panic() currently. A value of
+ * PANIC_CPU_INVALID means no CPU has entered panic() or crash_kexec().
+ */
+extern atomic_t panic_cpu;
+#define PANIC_CPU_INVALID -1
+
+/*
+ * Only to be used by arch init code. If the user over-wrote the default
+ * CONFIG_PANIC_TIMEOUT, honor it.
+ */
+static inline void set_arch_panic_timeout(int timeout, int arch_default_timeout)
+{
+ if (panic_timeout == arch_default_timeout)
+ panic_timeout = timeout;
+}
+
+/* This cannot be an enum because some may be used in assembly source. */
+#define TAINT_PROPRIETARY_MODULE 0
+#define TAINT_FORCED_MODULE 1
+#define TAINT_CPU_OUT_OF_SPEC 2
+#define TAINT_FORCED_RMMOD 3
+#define TAINT_MACHINE_CHECK 4
+#define TAINT_BAD_PAGE 5
+#define TAINT_USER 6
+#define TAINT_DIE 7
+#define TAINT_OVERRIDDEN_ACPI_TABLE 8
+#define TAINT_WARN 9
+#define TAINT_CRAP 10
+#define TAINT_FIRMWARE_WORKAROUND 11
+#define TAINT_OOT_MODULE 12
+#define TAINT_UNSIGNED_MODULE 13
+#define TAINT_SOFTLOCKUP 14
+#define TAINT_LIVEPATCH 15
+#define TAINT_AUX 16
+#define TAINT_RANDSTRUCT 17
+#define TAINT_TEST 18
+#define TAINT_FLAGS_COUNT 19
+#define TAINT_FLAGS_MAX ((1UL << TAINT_FLAGS_COUNT) - 1)
+
+struct taint_flag {
+ char c_true; /* character printed when tainted */
+ char c_false; /* character printed when not tainted */
+ bool module; /* also show as a per-module taint flag */
+};
+
+extern const struct taint_flag taint_flags[TAINT_FLAGS_COUNT];
+
+enum lockdep_ok {
+ LOCKDEP_STILL_OK,
+ LOCKDEP_NOW_UNRELIABLE,
+};
+
+extern const char *print_tainted(void);
+extern void add_taint(unsigned flag, enum lockdep_ok);
+extern int test_taint(unsigned flag);
+extern unsigned long get_taint(void);
+
+#endif /* _LINUX_PANIC_H */
diff --git a/include/linux/panic_notifier.h b/include/linux/panic_notifier.h
new file mode 100644
index 000000000000..41e32483d7a7
--- /dev/null
+++ b/include/linux/panic_notifier.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_PANIC_NOTIFIERS_H
+#define _LINUX_PANIC_NOTIFIERS_H
+
+#include <linux/notifier.h>
+#include <linux/types.h>
+
+extern struct atomic_notifier_head panic_notifier_list;
+
+extern bool crash_kexec_post_notifiers;
+
+#endif /* _LINUX_PANIC_NOTIFIERS_H */
diff --git a/include/linux/parport.h b/include/linux/parport.h
index 58e3c64c6b49..a0bc9e0267b7 100644
--- a/include/linux/parport.h
+++ b/include/linux/parport.h
@@ -40,10 +40,6 @@ struct amiga_parport_state {
unsigned char statusdir;/* ciab.ddrb & 7 */
};
-struct ax88796_parport_state {
- unsigned char cpr;
-};
-
struct ip32_parport_state {
unsigned int dcr;
unsigned int ecr;
@@ -55,7 +51,6 @@ struct parport_state {
/* ARC has no state. */
struct ax_parport_state ax;
struct amiga_parport_state amiga;
- struct ax88796_parport_state ax88796;
/* Atari has not state. */
struct ip32_parport_state ip32;
void *misc;
@@ -225,6 +220,7 @@ struct parport {
struct pardevice *waittail;
struct list_head list;
+ struct timer_list timer;
unsigned int flags;
void *sysctl_table;
@@ -296,13 +292,54 @@ int __must_check __parport_register_driver(struct parport_driver *,
* parport_register_driver must be a macro so that KBUILD_MODNAME can
* be expanded
*/
+
+/**
+ * parport_register_driver - register a parallel port device driver
+ * @driver: structure describing the driver
+ *
+ * This can be called by a parallel port device driver in order
+ * to receive notifications about ports being found in the
+ * system, as well as ports no longer available.
+ *
+ * If devmodel is true then the new device model is used
+ * for registration.
+ *
+ * The @driver structure is allocated by the caller and must not be
+ * deallocated until after calling parport_unregister_driver().
+ *
+ * If using the non device model:
+ * The driver's attach() function may block. The port that
+ * attach() is given will be valid for the duration of the
+ * callback, but if the driver wants to take a copy of the
+ * pointer it must call parport_get_port() to do so. Calling
+ * parport_register_device() on that port will do this for you.
+ *
+ * The driver's detach() function may block. The port that
+ * detach() is given will be valid for the duration of the
+ * callback, but if the driver wants to take a copy of the
+ * pointer it must call parport_get_port() to do so.
+ *
+ *
+ * Returns 0 on success. The non device model will always succeeds.
+ * but the new device model can fail and will return the error code.
+ **/
#define parport_register_driver(driver) \
__parport_register_driver(driver, THIS_MODULE, KBUILD_MODNAME)
/* Unregister a high-level driver. */
-extern void parport_unregister_driver (struct parport_driver *);
void parport_unregister_driver(struct parport_driver *);
+/**
+ * module_parport_driver() - Helper macro for registering a modular parport driver
+ * @__parport_driver: struct parport_driver to be used
+ *
+ * Helper macro for parport drivers which do not do anything special in module
+ * init and exit. This eliminates a lot of boilerplate. Each module may only
+ * use this macro once, and calling it replaces module_init() and module_exit().
+ */
+#define module_parport_driver(__parport_driver) \
+ module_driver(__parport_driver, parport_register_driver, parport_unregister_driver)
+
/* If parport_register_driver doesn't fit your needs, perhaps
* parport_find_xxx does. */
extern struct parport *parport_find_number (int);
@@ -324,18 +361,10 @@ struct pardev_cb {
unsigned int flags;
};
-/* parport_register_device declares that a device is connected to a
- port, and tells the kernel all it needs to know.
- - pf is the preemption function (may be NULL for no callback)
- - kf is the wake-up function (may be NULL for no callback)
- - irq_func is the interrupt handler (may be NULL for no interrupts)
- - handle is a user pointer that gets handed to callback functions. */
-struct pardevice *parport_register_device(struct parport *port,
- const char *name,
- int (*pf)(void *), void (*kf)(void *),
- void (*irq_func)(void *),
- int flags, void *handle);
-
+/*
+ * parport_register_dev_model declares that a device is connected to a
+ * port, and tells the kernel all it needs to know.
+ */
struct pardevice *
parport_register_dev_model(struct parport *port, const char *name,
const struct pardev_cb *par_dev_cb, int cnt);
@@ -459,6 +488,7 @@ extern size_t parport_ieee1284_epp_read_addr (struct parport *,
void *, size_t, int);
/* IEEE1284.3 functions */
+#define daisy_dev_name "Device ID probe"
extern int parport_daisy_init (struct parport *port);
extern void parport_daisy_fini (struct parport *port);
extern struct pardevice *parport_open (int devnum, const char *name);
diff --git a/include/linux/parport_pc.h b/include/linux/parport_pc.h
index cc1767f5cca8..f1ec5c10c3b3 100644
--- a/include/linux/parport_pc.h
+++ b/include/linux/parport_pc.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __LINUX_PARPORT_PC_H
#define __LINUX_PARPORT_PC_H
@@ -25,6 +26,9 @@ struct parport_pc_private {
/* Whether or not there's an ECR. */
int ecr;
+ /* Bitmask of writable ECR bits. */
+ unsigned char ecr_writable;
+
/* Number of PWords that FIFO will hold. */
int fifo_depth;
diff --git a/include/linux/parser.h b/include/linux/parser.h
index 884c1e6eb3fe..dd79f45a37b8 100644
--- a/include/linux/parser.h
+++ b/include/linux/parser.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* linux/include/linux/parser.h
*
@@ -6,7 +7,8 @@
* but could potentially be used anywhere else that simple option=arg
* parsing is required.
*/
-
+#ifndef _LINUX_PARSER_H
+#define _LINUX_PARSER_H
/* associates an integer enumerator with a pattern string. */
struct match_token {
@@ -27,9 +29,12 @@ typedef struct {
int match_token(char *, const match_table_t table, substring_t args[]);
int match_int(substring_t *, int *result);
+int match_uint(substring_t *s, unsigned int *result);
int match_u64(substring_t *, u64 *result);
int match_octal(substring_t *, int *result);
int match_hex(substring_t *, int *result);
bool match_wildcard(const char *pattern, const char *str);
size_t match_strlcpy(char *, const substring_t *, size_t);
char *match_strdup(const substring_t *);
+
+#endif /* _LINUX_PARSER_H */
diff --git a/include/linux/part_stat.h b/include/linux/part_stat.h
new file mode 100644
index 000000000000..abeba356bc3f
--- /dev/null
+++ b/include/linux/part_stat.h
@@ -0,0 +1,82 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_PART_STAT_H
+#define _LINUX_PART_STAT_H
+
+#include <linux/blkdev.h>
+#include <asm/local.h>
+
+struct disk_stats {
+ u64 nsecs[NR_STAT_GROUPS];
+ unsigned long sectors[NR_STAT_GROUPS];
+ unsigned long ios[NR_STAT_GROUPS];
+ unsigned long merges[NR_STAT_GROUPS];
+ unsigned long io_ticks;
+ local_t in_flight[2];
+};
+
+/*
+ * Macros to operate on percpu disk statistics:
+ *
+ * {disk|part|all}_stat_{add|sub|inc|dec}() modify the stat counters and should
+ * be called between disk_stat_lock() and disk_stat_unlock().
+ *
+ * part_stat_read() can be called at any time.
+ */
+#define part_stat_lock() preempt_disable()
+#define part_stat_unlock() preempt_enable()
+
+#define part_stat_get_cpu(part, field, cpu) \
+ (per_cpu_ptr((part)->bd_stats, (cpu))->field)
+
+#define part_stat_get(part, field) \
+ part_stat_get_cpu(part, field, smp_processor_id())
+
+#define part_stat_read(part, field) \
+({ \
+ typeof((part)->bd_stats->field) res = 0; \
+ unsigned int _cpu; \
+ for_each_possible_cpu(_cpu) \
+ res += per_cpu_ptr((part)->bd_stats, _cpu)->field; \
+ res; \
+})
+
+static inline void part_stat_set_all(struct block_device *part, int value)
+{
+ int i;
+
+ for_each_possible_cpu(i)
+ memset(per_cpu_ptr(part->bd_stats, i), value,
+ sizeof(struct disk_stats));
+}
+
+#define part_stat_read_accum(part, field) \
+ (part_stat_read(part, field[STAT_READ]) + \
+ part_stat_read(part, field[STAT_WRITE]) + \
+ part_stat_read(part, field[STAT_DISCARD]))
+
+#define __part_stat_add(part, field, addnd) \
+ __this_cpu_add((part)->bd_stats->field, addnd)
+
+#define part_stat_add(part, field, addnd) do { \
+ __part_stat_add((part), field, addnd); \
+ if ((part)->bd_partno) \
+ __part_stat_add(bdev_whole(part), field, addnd); \
+} while (0)
+
+#define part_stat_dec(part, field) \
+ part_stat_add(part, field, -1)
+#define part_stat_inc(part, field) \
+ part_stat_add(part, field, 1)
+#define part_stat_sub(part, field, subnd) \
+ part_stat_add(part, field, -subnd)
+
+#define part_stat_local_dec(part, field) \
+ local_dec(&(part_stat_get(part, field)))
+#define part_stat_local_inc(part, field) \
+ local_inc(&(part_stat_get(part, field)))
+#define part_stat_local_read(part, field) \
+ local_read(&(part_stat_get(part, field)))
+#define part_stat_local_read_cpu(part, field, cpu) \
+ local_read(&(part_stat_get_cpu(part, field, cpu)))
+
+#endif /* _LINUX_PART_STAT_H */
diff --git a/include/linux/patchkey.h b/include/linux/patchkey.h
index 97a919fc9927..f581defb2df0 100644
--- a/include/linux/patchkey.h
+++ b/include/linux/patchkey.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* <linux/patchkey.h> -- definition of _PATCHKEY macro
*
diff --git a/include/linux/path.h b/include/linux/path.h
index d1372186f431..475225a03d0d 100644
--- a/include/linux/path.h
+++ b/include/linux/path.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_PATH_H
#define _LINUX_PATH_H
@@ -7,7 +8,7 @@ struct vfsmount;
struct path {
struct vfsmount *mnt;
struct dentry *dentry;
-};
+} __randomize_layout;
extern void path_get(const struct path *);
extern void path_put(const struct path *);
@@ -17,4 +18,10 @@ static inline int path_equal(const struct path *path1, const struct path *path2)
return path1->mnt == path2->mnt && path1->dentry == path2->dentry;
}
+static inline void path_put_init(struct path *path)
+{
+ path_put(path);
+ *path = (struct path) { };
+}
+
#endif /* _LINUX_PATH_H */
diff --git a/include/linux/pch_dma.h b/include/linux/pch_dma.h
index fdafe529ef8a..d5a6a4b6b431 100644
--- a/include/linux/pch_dma.h
+++ b/include/linux/pch_dma.h
@@ -1,18 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2010 Intel Corporation
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#ifndef PCH_DMA_H
diff --git a/include/linux/pci-acpi.h b/include/linux/pci-acpi.h
index dd86c97f2454..078225b514d4 100644
--- a/include/linux/pci-acpi.h
+++ b/include/linux/pci-acpi.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* File pci-acpi.h
*
@@ -26,7 +27,7 @@ extern phys_addr_t acpi_pci_root_get_mcfg_addr(acpi_handle handle);
struct pci_ecam_ops;
extern int pci_mcfg_lookup(struct acpi_pci_root *root, struct resource *cfgres,
- struct pci_ecam_ops **ecam_ops);
+ const struct pci_ecam_ops **ecam_ops);
static inline acpi_handle acpi_find_root_bridge_handle(struct pci_dev *pdev)
{
@@ -83,6 +84,14 @@ extern struct pci_bus *acpi_pci_root_create(struct acpi_pci_root *root,
void acpi_pci_add_bus(struct pci_bus *bus);
void acpi_pci_remove_bus(struct pci_bus *bus);
+#ifdef CONFIG_PCI
+void pci_acpi_setup(struct device *dev, struct acpi_device *adev);
+void pci_acpi_cleanup(struct device *dev, struct acpi_device *adev);
+#else
+static inline void pci_acpi_setup(struct device *dev, struct acpi_device *adev) {}
+static inline void pci_acpi_cleanup(struct device *dev, struct acpi_device *adev) {}
+#endif
+
#ifdef CONFIG_ACPI_PCI_SLOT
void acpi_pci_slot_init(void);
void acpi_pci_slot_enumerate(struct pci_bus *bus);
@@ -106,19 +115,27 @@ static inline void acpiphp_check_host_bridge(struct acpi_device *adev) { }
#endif
extern const guid_t pci_acpi_dsm_guid;
-#define DEVICE_LABEL_DSM 0x07
-#define RESET_DELAY_DSM 0x08
-#define FUNCTION_DELAY_DSM 0x09
+
+/* _DSM Definitions for PCI */
+#define DSM_PCI_PRESERVE_BOOT_CONFIG 0x05
+#define DSM_PCI_DEVICE_NAME 0x07
+#define DSM_PCI_POWER_ON_RESET_DELAY 0x08
+#define DSM_PCI_DEVICE_READINESS_DURATIONS 0x09
+
+#ifdef CONFIG_PCIE_EDR
+void pci_acpi_add_edr_notifier(struct pci_dev *pdev);
+void pci_acpi_remove_edr_notifier(struct pci_dev *pdev);
+#else
+static inline void pci_acpi_add_edr_notifier(struct pci_dev *pdev) { }
+static inline void pci_acpi_remove_edr_notifier(struct pci_dev *pdev) { }
+#endif /* CONFIG_PCIE_EDR */
+
+int pci_acpi_set_companion_lookup_hook(struct acpi_device *(*func)(struct pci_dev *));
+void pci_acpi_clear_companion_lookup_hook(void);
#else /* CONFIG_ACPI */
static inline void acpi_pci_add_bus(struct pci_bus *bus) { }
static inline void acpi_pci_remove_bus(struct pci_bus *bus) { }
#endif /* CONFIG_ACPI */
-#ifdef CONFIG_ACPI_APEI
-extern bool aer_acpi_firmware_first(void);
-#else
-static inline bool aer_acpi_firmware_first(void) { return false; }
-#endif
-
#endif /* _PCI_ACPI_H_ */
diff --git a/include/linux/pci-aspm.h b/include/linux/pci-aspm.h
deleted file mode 100644
index 207c561fb40e..000000000000
--- a/include/linux/pci-aspm.h
+++ /dev/null
@@ -1,65 +0,0 @@
-/*
- * aspm.h
- *
- * PCI Express ASPM defines and function prototypes
- *
- * Copyright (C) 2007 Intel Corp.
- * Zhang Yanmin (yanmin.zhang@intel.com)
- * Shaohua Li (shaohua.li@intel.com)
- *
- * For more information, please consult the following manuals (look at
- * http://www.pcisig.com/ for how to get them):
- *
- * PCI Express Specification
- */
-
-#ifndef LINUX_ASPM_H
-#define LINUX_ASPM_H
-
-#include <linux/pci.h>
-
-#define PCIE_LINK_STATE_L0S 1
-#define PCIE_LINK_STATE_L1 2
-#define PCIE_LINK_STATE_CLKPM 4
-
-#ifdef CONFIG_PCIEASPM
-void pcie_aspm_init_link_state(struct pci_dev *pdev);
-void pcie_aspm_exit_link_state(struct pci_dev *pdev);
-void pcie_aspm_pm_state_change(struct pci_dev *pdev);
-void pcie_aspm_powersave_config_link(struct pci_dev *pdev);
-void pci_disable_link_state(struct pci_dev *pdev, int state);
-void pci_disable_link_state_locked(struct pci_dev *pdev, int state);
-void pcie_no_aspm(void);
-#else
-static inline void pcie_aspm_init_link_state(struct pci_dev *pdev)
-{
-}
-static inline void pcie_aspm_exit_link_state(struct pci_dev *pdev)
-{
-}
-static inline void pcie_aspm_pm_state_change(struct pci_dev *pdev)
-{
-}
-static inline void pcie_aspm_powersave_config_link(struct pci_dev *pdev)
-{
-}
-static inline void pci_disable_link_state(struct pci_dev *pdev, int state)
-{
-}
-static inline void pcie_no_aspm(void)
-{
-}
-#endif
-
-#ifdef CONFIG_PCIEASPM_DEBUG /* this depends on CONFIG_PCIEASPM */
-void pcie_aspm_create_sysfs_dev_files(struct pci_dev *pdev);
-void pcie_aspm_remove_sysfs_dev_files(struct pci_dev *pdev);
-#else
-static inline void pcie_aspm_create_sysfs_dev_files(struct pci_dev *pdev)
-{
-}
-static inline void pcie_aspm_remove_sysfs_dev_files(struct pci_dev *pdev)
-{
-}
-#endif
-#endif /* LINUX_ASPM_H */
diff --git a/include/linux/pci-ats.h b/include/linux/pci-ats.h
index 782fb8e0755f..df54cd5b15db 100644
--- a/include/linux/pci-ats.h
+++ b/include/linux/pci-ats.h
@@ -1,71 +1,52 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef LINUX_PCI_ATS_H
#define LINUX_PCI_ATS_H
#include <linux/pci.h>
-#ifdef CONFIG_PCI_PRI
+#ifdef CONFIG_PCI_ATS
+/* Address Translation Service */
+bool pci_ats_supported(struct pci_dev *dev);
+int pci_enable_ats(struct pci_dev *dev, int ps);
+void pci_disable_ats(struct pci_dev *dev);
+int pci_ats_queue_depth(struct pci_dev *dev);
+int pci_ats_page_aligned(struct pci_dev *dev);
+#else /* CONFIG_PCI_ATS */
+static inline bool pci_ats_supported(struct pci_dev *d)
+{ return false; }
+static inline int pci_enable_ats(struct pci_dev *d, int ps)
+{ return -ENODEV; }
+static inline void pci_disable_ats(struct pci_dev *d) { }
+static inline int pci_ats_queue_depth(struct pci_dev *d)
+{ return -ENODEV; }
+static inline int pci_ats_page_aligned(struct pci_dev *dev)
+{ return 0; }
+#endif /* CONFIG_PCI_ATS */
+#ifdef CONFIG_PCI_PRI
int pci_enable_pri(struct pci_dev *pdev, u32 reqs);
void pci_disable_pri(struct pci_dev *pdev);
-void pci_restore_pri_state(struct pci_dev *pdev);
int pci_reset_pri(struct pci_dev *pdev);
-
-#else /* CONFIG_PCI_PRI */
-
-static inline int pci_enable_pri(struct pci_dev *pdev, u32 reqs)
-{
- return -ENODEV;
-}
-
-static inline void pci_disable_pri(struct pci_dev *pdev)
-{
-}
-
-static inline void pci_restore_pri_state(struct pci_dev *pdev)
-{
-}
-
-static inline int pci_reset_pri(struct pci_dev *pdev)
-{
- return -ENODEV;
-}
-
+int pci_prg_resp_pasid_required(struct pci_dev *pdev);
+bool pci_pri_supported(struct pci_dev *pdev);
+#else
+static inline bool pci_pri_supported(struct pci_dev *pdev)
+{ return false; }
#endif /* CONFIG_PCI_PRI */
#ifdef CONFIG_PCI_PASID
-
int pci_enable_pasid(struct pci_dev *pdev, int features);
void pci_disable_pasid(struct pci_dev *pdev);
-void pci_restore_pasid_state(struct pci_dev *pdev);
int pci_pasid_features(struct pci_dev *pdev);
int pci_max_pasids(struct pci_dev *pdev);
-
-#else /* CONFIG_PCI_PASID */
-
+#else /* CONFIG_PCI_PASID */
static inline int pci_enable_pasid(struct pci_dev *pdev, int features)
-{
- return -EINVAL;
-}
-
-static inline void pci_disable_pasid(struct pci_dev *pdev)
-{
-}
-
-static inline void pci_restore_pasid_state(struct pci_dev *pdev)
-{
-}
-
+{ return -EINVAL; }
+static inline void pci_disable_pasid(struct pci_dev *pdev) { }
static inline int pci_pasid_features(struct pci_dev *pdev)
-{
- return -EINVAL;
-}
-
+{ return -EINVAL; }
static inline int pci_max_pasids(struct pci_dev *pdev)
-{
- return -EINVAL;
-}
-
+{ return -EINVAL; }
#endif /* CONFIG_PCI_PASID */
-
-#endif /* LINUX_PCI_ATS_H*/
+#endif /* LINUX_PCI_ATS_H */
diff --git a/include/linux/pci-dma-compat.h b/include/linux/pci-dma-compat.h
deleted file mode 100644
index 39726caef5b1..000000000000
--- a/include/linux/pci-dma-compat.h
+++ /dev/null
@@ -1,147 +0,0 @@
-/* include this file if the platform implements the dma_ DMA Mapping API
- * and wants to provide the pci_ DMA Mapping API in terms of it */
-
-#ifndef _ASM_GENERIC_PCI_DMA_COMPAT_H
-#define _ASM_GENERIC_PCI_DMA_COMPAT_H
-
-#include <linux/dma-mapping.h>
-
-/* This defines the direction arg to the DMA mapping routines. */
-#define PCI_DMA_BIDIRECTIONAL 0
-#define PCI_DMA_TODEVICE 1
-#define PCI_DMA_FROMDEVICE 2
-#define PCI_DMA_NONE 3
-
-static inline void *
-pci_alloc_consistent(struct pci_dev *hwdev, size_t size,
- dma_addr_t *dma_handle)
-{
- return dma_alloc_coherent(hwdev == NULL ? NULL : &hwdev->dev, size, dma_handle, GFP_ATOMIC);
-}
-
-static inline void *
-pci_zalloc_consistent(struct pci_dev *hwdev, size_t size,
- dma_addr_t *dma_handle)
-{
- return dma_zalloc_coherent(hwdev == NULL ? NULL : &hwdev->dev,
- size, dma_handle, GFP_ATOMIC);
-}
-
-static inline void
-pci_free_consistent(struct pci_dev *hwdev, size_t size,
- void *vaddr, dma_addr_t dma_handle)
-{
- dma_free_coherent(hwdev == NULL ? NULL : &hwdev->dev, size, vaddr, dma_handle);
-}
-
-static inline dma_addr_t
-pci_map_single(struct pci_dev *hwdev, void *ptr, size_t size, int direction)
-{
- return dma_map_single(hwdev == NULL ? NULL : &hwdev->dev, ptr, size, (enum dma_data_direction)direction);
-}
-
-static inline void
-pci_unmap_single(struct pci_dev *hwdev, dma_addr_t dma_addr,
- size_t size, int direction)
-{
- dma_unmap_single(hwdev == NULL ? NULL : &hwdev->dev, dma_addr, size, (enum dma_data_direction)direction);
-}
-
-static inline dma_addr_t
-pci_map_page(struct pci_dev *hwdev, struct page *page,
- unsigned long offset, size_t size, int direction)
-{
- return dma_map_page(hwdev == NULL ? NULL : &hwdev->dev, page, offset, size, (enum dma_data_direction)direction);
-}
-
-static inline void
-pci_unmap_page(struct pci_dev *hwdev, dma_addr_t dma_address,
- size_t size, int direction)
-{
- dma_unmap_page(hwdev == NULL ? NULL : &hwdev->dev, dma_address, size, (enum dma_data_direction)direction);
-}
-
-static inline int
-pci_map_sg(struct pci_dev *hwdev, struct scatterlist *sg,
- int nents, int direction)
-{
- return dma_map_sg(hwdev == NULL ? NULL : &hwdev->dev, sg, nents, (enum dma_data_direction)direction);
-}
-
-static inline void
-pci_unmap_sg(struct pci_dev *hwdev, struct scatterlist *sg,
- int nents, int direction)
-{
- dma_unmap_sg(hwdev == NULL ? NULL : &hwdev->dev, sg, nents, (enum dma_data_direction)direction);
-}
-
-static inline void
-pci_dma_sync_single_for_cpu(struct pci_dev *hwdev, dma_addr_t dma_handle,
- size_t size, int direction)
-{
- dma_sync_single_for_cpu(hwdev == NULL ? NULL : &hwdev->dev, dma_handle, size, (enum dma_data_direction)direction);
-}
-
-static inline void
-pci_dma_sync_single_for_device(struct pci_dev *hwdev, dma_addr_t dma_handle,
- size_t size, int direction)
-{
- dma_sync_single_for_device(hwdev == NULL ? NULL : &hwdev->dev, dma_handle, size, (enum dma_data_direction)direction);
-}
-
-static inline void
-pci_dma_sync_sg_for_cpu(struct pci_dev *hwdev, struct scatterlist *sg,
- int nelems, int direction)
-{
- dma_sync_sg_for_cpu(hwdev == NULL ? NULL : &hwdev->dev, sg, nelems, (enum dma_data_direction)direction);
-}
-
-static inline void
-pci_dma_sync_sg_for_device(struct pci_dev *hwdev, struct scatterlist *sg,
- int nelems, int direction)
-{
- dma_sync_sg_for_device(hwdev == NULL ? NULL : &hwdev->dev, sg, nelems, (enum dma_data_direction)direction);
-}
-
-static inline int
-pci_dma_mapping_error(struct pci_dev *pdev, dma_addr_t dma_addr)
-{
- return dma_mapping_error(&pdev->dev, dma_addr);
-}
-
-#ifdef CONFIG_PCI
-static inline int pci_set_dma_mask(struct pci_dev *dev, u64 mask)
-{
- return dma_set_mask(&dev->dev, mask);
-}
-
-static inline int pci_set_consistent_dma_mask(struct pci_dev *dev, u64 mask)
-{
- return dma_set_coherent_mask(&dev->dev, mask);
-}
-
-static inline int pci_set_dma_max_seg_size(struct pci_dev *dev,
- unsigned int size)
-{
- return dma_set_max_seg_size(&dev->dev, size);
-}
-
-static inline int pci_set_dma_seg_boundary(struct pci_dev *dev,
- unsigned long mask)
-{
- return dma_set_seg_boundary(&dev->dev, mask);
-}
-#else
-static inline int pci_set_dma_mask(struct pci_dev *dev, u64 mask)
-{ return -EIO; }
-static inline int pci_set_consistent_dma_mask(struct pci_dev *dev, u64 mask)
-{ return -EIO; }
-static inline int pci_set_dma_max_seg_size(struct pci_dev *dev,
- unsigned int size)
-{ return -EIO; }
-static inline int pci_set_dma_seg_boundary(struct pci_dev *dev,
- unsigned long mask)
-{ return -EIO; }
-#endif
-
-#endif
diff --git a/include/linux/pci-dma.h b/include/linux/pci-dma.h
deleted file mode 100644
index 549a041f9c08..000000000000
--- a/include/linux/pci-dma.h
+++ /dev/null
@@ -1,11 +0,0 @@
-#ifndef _LINUX_PCI_DMA_H
-#define _LINUX_PCI_DMA_H
-
-#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) DEFINE_DMA_UNMAP_ADDR(ADDR_NAME);
-#define DECLARE_PCI_UNMAP_LEN(LEN_NAME) DEFINE_DMA_UNMAP_LEN(LEN_NAME);
-#define pci_unmap_addr dma_unmap_addr
-#define pci_unmap_addr_set dma_unmap_addr_set
-#define pci_unmap_len dma_unmap_len
-#define pci_unmap_len_set dma_unmap_len_set
-
-#endif
diff --git a/include/linux/pci-doe.h b/include/linux/pci-doe.h
new file mode 100644
index 000000000000..1f14aed4354b
--- /dev/null
+++ b/include/linux/pci-doe.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Data Object Exchange
+ * PCIe r6.0, sec 6.30 DOE
+ *
+ * Copyright (C) 2021 Huawei
+ * Jonathan Cameron <Jonathan.Cameron@huawei.com>
+ *
+ * Copyright (C) 2022 Intel Corporation
+ * Ira Weiny <ira.weiny@intel.com>
+ */
+
+#ifndef LINUX_PCI_DOE_H
+#define LINUX_PCI_DOE_H
+
+struct pci_doe_mb;
+
+struct pci_doe_mb *pci_find_doe_mailbox(struct pci_dev *pdev, u16 vendor,
+ u8 type);
+
+int pci_doe(struct pci_doe_mb *doe_mb, u16 vendor, u8 type,
+ const void *request, size_t request_sz,
+ void *response, size_t response_sz);
+
+#endif
diff --git a/include/linux/pci-ecam.h b/include/linux/pci-ecam.h
index 809c2f1873ac..6b1301e2498e 100644
--- a/include/linux/pci-ecam.h
+++ b/include/linux/pci-ecam.h
@@ -1,17 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright 2016 Broadcom
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License, version 2, as
- * published by the Free Software Foundation (the "GPL").
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 (GPLv2) for more details.
- *
- * You should have received a copy of the GNU General Public License
- * version 2 (GPLv2) along with this source code.
*/
#ifndef DRIVERS_PCI_ECAM_H
#define DRIVERS_PCI_ECAM_H
@@ -21,6 +10,33 @@
#include <linux/platform_device.h>
/*
+ * Memory address shift values for the byte-level address that
+ * can be used when accessing the PCI Express Configuration Space.
+ */
+
+/*
+ * Enhanced Configuration Access Mechanism (ECAM)
+ *
+ * See PCI Express Base Specification, Revision 5.0, Version 1.0,
+ * Section 7.2.2, Table 7-1, p. 677.
+ */
+#define PCIE_ECAM_BUS_SHIFT 20 /* Bus number */
+#define PCIE_ECAM_DEVFN_SHIFT 12 /* Device and Function number */
+
+#define PCIE_ECAM_BUS_MASK 0xff
+#define PCIE_ECAM_DEVFN_MASK 0xff
+#define PCIE_ECAM_REG_MASK 0xfff /* Limit offset to a maximum of 4K */
+
+#define PCIE_ECAM_BUS(x) (((x) & PCIE_ECAM_BUS_MASK) << PCIE_ECAM_BUS_SHIFT)
+#define PCIE_ECAM_DEVFN(x) (((x) & PCIE_ECAM_DEVFN_MASK) << PCIE_ECAM_DEVFN_SHIFT)
+#define PCIE_ECAM_REG(x) ((x) & PCIE_ECAM_REG_MASK)
+
+#define PCIE_ECAM_OFFSET(bus, devfn, where) \
+ (PCIE_ECAM_BUS(bus) | \
+ PCIE_ECAM_DEVFN(devfn) | \
+ PCIE_ECAM_REG(where))
+
+/*
* struct to hold pci ops and bus shift of the config window
* for a PCI controller.
*/
@@ -39,8 +55,9 @@ struct pci_ecam_ops {
struct pci_config_window {
struct resource res;
struct resource busr;
+ unsigned int bus_shift;
void *priv;
- struct pci_ecam_ops *ops;
+ const struct pci_ecam_ops *ops;
union {
void __iomem *win; /* 64-bit single mapping */
void __iomem **winp; /* 32-bit per-bus mapping */
@@ -51,27 +68,31 @@ struct pci_config_window {
/* create and free pci_config_window */
struct pci_config_window *pci_ecam_create(struct device *dev,
struct resource *cfgres, struct resource *busr,
- struct pci_ecam_ops *ops);
+ const struct pci_ecam_ops *ops);
void pci_ecam_free(struct pci_config_window *cfg);
/* map_bus when ->sysdata is an instance of pci_config_window */
void __iomem *pci_ecam_map_bus(struct pci_bus *bus, unsigned int devfn,
int where);
/* default ECAM ops */
-extern struct pci_ecam_ops pci_generic_ecam_ops;
+extern const struct pci_ecam_ops pci_generic_ecam_ops;
#if defined(CONFIG_ACPI) && defined(CONFIG_PCI_QUIRKS)
-extern struct pci_ecam_ops pci_32b_ops; /* 32-bit accesses only */
-extern struct pci_ecam_ops hisi_pcie_ops; /* HiSilicon */
-extern struct pci_ecam_ops thunder_pem_ecam_ops; /* Cavium ThunderX 1.x & 2.x */
-extern struct pci_ecam_ops pci_thunder_ecam_ops; /* Cavium ThunderX 1.x */
-extern struct pci_ecam_ops xgene_v1_pcie_ecam_ops; /* APM X-Gene PCIe v1 */
-extern struct pci_ecam_ops xgene_v2_pcie_ecam_ops; /* APM X-Gene PCIe v2.x */
+extern const struct pci_ecam_ops pci_32b_ops; /* 32-bit accesses only */
+extern const struct pci_ecam_ops pci_32b_read_ops; /* 32-bit read only */
+extern const struct pci_ecam_ops hisi_pcie_ops; /* HiSilicon */
+extern const struct pci_ecam_ops thunder_pem_ecam_ops; /* Cavium ThunderX 1.x & 2.x */
+extern const struct pci_ecam_ops pci_thunder_ecam_ops; /* Cavium ThunderX 1.x */
+extern const struct pci_ecam_ops xgene_v1_pcie_ecam_ops; /* APM X-Gene PCIe v1 */
+extern const struct pci_ecam_ops xgene_v2_pcie_ecam_ops; /* APM X-Gene PCIe v2.x */
+extern const struct pci_ecam_ops al_pcie_ops; /* Amazon Annapurna Labs PCIe */
+extern const struct pci_ecam_ops tegra194_pcie_ops; /* Tegra194 PCIe */
+extern const struct pci_ecam_ops loongson_pci_ecam_ops; /* Loongson PCIe */
#endif
-#ifdef CONFIG_PCI_HOST_COMMON
+#if IS_ENABLED(CONFIG_PCI_HOST_COMMON)
/* for DT-based PCI controllers that support ECAM */
-int pci_host_common_probe(struct platform_device *pdev,
- struct pci_ecam_ops *ops);
+int pci_host_common_probe(struct platform_device *pdev);
+int pci_host_common_remove(struct platform_device *pdev);
#endif
#endif
diff --git a/include/linux/pci-ep-cfs.h b/include/linux/pci-ep-cfs.h
index 263b89ea5705..3e2140d7e31d 100644
--- a/include/linux/pci-ep-cfs.h
+++ b/include/linux/pci-ep-cfs.h
@@ -1,12 +1,9 @@
-/**
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
* PCI Endpoint ConfigFS header file
*
* Copyright (C) 2017 Texas Instruments
* Author: Kishon Vijay Abraham I <kishon@ti.com>
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 of
- * the License as published by the Free Software Foundation.
*/
#ifndef __LINUX_PCI_EP_CFS_H
@@ -22,7 +19,7 @@ void pci_ep_cfs_remove_epf_group(struct config_group *group);
#else
static inline struct config_group *pci_ep_cfs_add_epc_group(const char *name)
{
- return 0;
+ return NULL;
}
static inline void pci_ep_cfs_remove_epc_group(struct config_group *group)
@@ -31,7 +28,7 @@ static inline void pci_ep_cfs_remove_epc_group(struct config_group *group)
static inline struct config_group *pci_ep_cfs_add_epf_group(const char *name)
{
- return 0;
+ return NULL;
}
static inline void pci_ep_cfs_remove_epf_group(struct config_group *group)
diff --git a/include/linux/pci-epc.h b/include/linux/pci-epc.h
index af5edbf3eea3..301bb0e53707 100644
--- a/include/linux/pci-epc.h
+++ b/include/linux/pci-epc.h
@@ -1,12 +1,9 @@
-/**
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
* PCI Endpoint *Controller* (EPC) header file
*
* Copyright (C) 2017 Texas Instruments
* Author: Kishon Vijay Abraham I <kishon@ti.com>
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 of
- * the License as published by the Free Software Foundation.
*/
#ifndef __LINUX_PCI_EPC_H
@@ -16,12 +13,32 @@
struct pci_epc;
+enum pci_epc_interface_type {
+ UNKNOWN_INTERFACE = -1,
+ PRIMARY_INTERFACE,
+ SECONDARY_INTERFACE,
+};
+
enum pci_epc_irq_type {
PCI_EPC_IRQ_UNKNOWN,
PCI_EPC_IRQ_LEGACY,
PCI_EPC_IRQ_MSI,
+ PCI_EPC_IRQ_MSIX,
};
+static inline const char *
+pci_epc_interface_string(enum pci_epc_interface_type type)
+{
+ switch (type) {
+ case PRIMARY_INTERFACE:
+ return "primary";
+ case SECONDARY_INTERFACE:
+ return "secondary";
+ default:
+ return "UNKNOWN interface";
+ }
+}
+
/**
* struct pci_epc_ops - set of function pointers for performing EPC operations
* @write_header: ops to populate configuration space header
@@ -33,62 +50,129 @@ enum pci_epc_irq_type {
* capability register
* @get_msi: ops to get the number of MSI interrupts allocated by the RC from
* the MSI capability register
- * @raise_irq: ops to raise a legacy or MSI interrupt
+ * @set_msix: ops to set the requested number of MSI-X interrupts in the
+ * MSI-X capability register
+ * @get_msix: ops to get the number of MSI-X interrupts allocated by the RC
+ * from the MSI-X capability register
+ * @raise_irq: ops to raise a legacy, MSI or MSI-X interrupt
+ * @map_msi_irq: ops to map physical address to MSI address and return MSI data
* @start: ops to start the PCI link
* @stop: ops to stop the PCI link
+ * @get_features: ops to get the features supported by the EPC
* @owner: the module owner containing the ops
*/
struct pci_epc_ops {
- int (*write_header)(struct pci_epc *pci_epc,
+ int (*write_header)(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
struct pci_epf_header *hdr);
- int (*set_bar)(struct pci_epc *epc, enum pci_barno bar,
- dma_addr_t bar_phys, size_t size, int flags);
- void (*clear_bar)(struct pci_epc *epc, enum pci_barno bar);
- int (*map_addr)(struct pci_epc *epc, phys_addr_t addr,
- u64 pci_addr, size_t size);
- void (*unmap_addr)(struct pci_epc *epc, phys_addr_t addr);
- int (*set_msi)(struct pci_epc *epc, u8 interrupts);
- int (*get_msi)(struct pci_epc *epc);
- int (*raise_irq)(struct pci_epc *pci_epc,
- enum pci_epc_irq_type type, u8 interrupt_num);
+ int (*set_bar)(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
+ struct pci_epf_bar *epf_bar);
+ void (*clear_bar)(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
+ struct pci_epf_bar *epf_bar);
+ int (*map_addr)(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
+ phys_addr_t addr, u64 pci_addr, size_t size);
+ void (*unmap_addr)(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
+ phys_addr_t addr);
+ int (*set_msi)(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
+ u8 interrupts);
+ int (*get_msi)(struct pci_epc *epc, u8 func_no, u8 vfunc_no);
+ int (*set_msix)(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
+ u16 interrupts, enum pci_barno, u32 offset);
+ int (*get_msix)(struct pci_epc *epc, u8 func_no, u8 vfunc_no);
+ int (*raise_irq)(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
+ enum pci_epc_irq_type type, u16 interrupt_num);
+ int (*map_msi_irq)(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
+ phys_addr_t phys_addr, u8 interrupt_num,
+ u32 entry_size, u32 *msi_data,
+ u32 *msi_addr_offset);
int (*start)(struct pci_epc *epc);
void (*stop)(struct pci_epc *epc);
+ const struct pci_epc_features* (*get_features)(struct pci_epc *epc,
+ u8 func_no, u8 vfunc_no);
struct module *owner;
};
/**
+ * struct pci_epc_mem_window - address window of the endpoint controller
+ * @phys_base: physical base address of the PCI address window
+ * @size: the size of the PCI address window
+ * @page_size: size of each page
+ */
+struct pci_epc_mem_window {
+ phys_addr_t phys_base;
+ size_t size;
+ size_t page_size;
+};
+
+/**
* struct pci_epc_mem - address space of the endpoint controller
- * @phys_base: physical base address of the PCI address space
- * @size: the size of the PCI address space
+ * @window: address window of the endpoint controller
* @bitmap: bitmap to manage the PCI address space
* @pages: number of bits representing the address region
+ * @lock: mutex to protect bitmap
*/
struct pci_epc_mem {
- phys_addr_t phys_base;
- size_t size;
+ struct pci_epc_mem_window window;
unsigned long *bitmap;
int pages;
+ /* mutex to protect against concurrent access for memory allocation*/
+ struct mutex lock;
};
/**
* struct pci_epc - represents the PCI EPC device
* @dev: PCI EPC device
* @pci_epf: list of endpoint functions present in this EPC device
+ * list_lock: Mutex for protecting pci_epf list
* @ops: function pointers for performing endpoint operations
- * @mem: address space of the endpoint controller
+ * @windows: array of address space of the endpoint controller
+ * @mem: first window of the endpoint controller, which corresponds to
+ * default address space of the endpoint controller supporting
+ * single window.
+ * @num_windows: number of windows supported by device
* @max_functions: max number of functions that can be configured in this EPC
+ * @max_vfs: Array indicating the maximum number of virtual functions that can
+ * be associated with each physical function
* @group: configfs group representing the PCI EPC device
- * @lock: spinlock to protect pci_epc ops
+ * @lock: mutex to protect pci_epc ops
+ * @function_num_map: bitmap to manage physical function number
*/
struct pci_epc {
struct device dev;
struct list_head pci_epf;
+ struct mutex list_lock;
const struct pci_epc_ops *ops;
+ struct pci_epc_mem **windows;
struct pci_epc_mem *mem;
+ unsigned int num_windows;
u8 max_functions;
+ u8 *max_vfs;
struct config_group *group;
- /* spinlock to protect against concurrent access of EP controller */
- spinlock_t lock;
+ /* mutex to protect against concurrent access of EP controller */
+ struct mutex lock;
+ unsigned long function_num_map;
+};
+
+/**
+ * struct pci_epc_features - features supported by a EPC device per function
+ * @linkup_notifier: indicate if the EPC device can notify EPF driver on link up
+ * @core_init_notifier: indicate cores that can notify about their availability
+ * for initialization
+ * @msi_capable: indicate if the endpoint function has MSI capability
+ * @msix_capable: indicate if the endpoint function has MSI-X capability
+ * @reserved_bar: bitmap to indicate reserved BAR unavailable to function driver
+ * @bar_fixed_64bit: bitmap to indicate fixed 64bit BARs
+ * @bar_fixed_size: Array specifying the size supported by each BAR
+ * @align: alignment size required for BAR buffer allocation
+ */
+struct pci_epc_features {
+ unsigned int linkup_notifier : 1;
+ unsigned int core_init_notifier : 1;
+ unsigned int msi_capable : 1;
+ unsigned int msix_capable : 1;
+ u8 reserved_bar;
+ u8 bar_fixed_64bit;
+ u64 bar_fixed_size[PCI_STD_NUM_BARS];
+ size_t align;
};
#define to_pci_epc(device) container_of((device), struct pci_epc, dev)
@@ -116,26 +200,50 @@ __pci_epc_create(struct device *dev, const struct pci_epc_ops *ops,
struct module *owner);
void devm_pci_epc_destroy(struct device *dev, struct pci_epc *epc);
void pci_epc_destroy(struct pci_epc *epc);
-int pci_epc_add_epf(struct pci_epc *epc, struct pci_epf *epf);
+int pci_epc_add_epf(struct pci_epc *epc, struct pci_epf *epf,
+ enum pci_epc_interface_type type);
void pci_epc_linkup(struct pci_epc *epc);
-void pci_epc_remove_epf(struct pci_epc *epc, struct pci_epf *epf);
-int pci_epc_write_header(struct pci_epc *epc, struct pci_epf_header *hdr);
-int pci_epc_set_bar(struct pci_epc *epc, enum pci_barno bar,
- dma_addr_t bar_phys, size_t size, int flags);
-void pci_epc_clear_bar(struct pci_epc *epc, int bar);
-int pci_epc_map_addr(struct pci_epc *epc, phys_addr_t phys_addr,
+void pci_epc_init_notify(struct pci_epc *epc);
+void pci_epc_remove_epf(struct pci_epc *epc, struct pci_epf *epf,
+ enum pci_epc_interface_type type);
+int pci_epc_write_header(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
+ struct pci_epf_header *hdr);
+int pci_epc_set_bar(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
+ struct pci_epf_bar *epf_bar);
+void pci_epc_clear_bar(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
+ struct pci_epf_bar *epf_bar);
+int pci_epc_map_addr(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
+ phys_addr_t phys_addr,
u64 pci_addr, size_t size);
-void pci_epc_unmap_addr(struct pci_epc *epc, phys_addr_t phys_addr);
-int pci_epc_set_msi(struct pci_epc *epc, u8 interrupts);
-int pci_epc_get_msi(struct pci_epc *epc);
-int pci_epc_raise_irq(struct pci_epc *epc, enum pci_epc_irq_type type,
- u8 interrupt_num);
+void pci_epc_unmap_addr(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
+ phys_addr_t phys_addr);
+int pci_epc_set_msi(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
+ u8 interrupts);
+int pci_epc_get_msi(struct pci_epc *epc, u8 func_no, u8 vfunc_no);
+int pci_epc_set_msix(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
+ u16 interrupts, enum pci_barno, u32 offset);
+int pci_epc_get_msix(struct pci_epc *epc, u8 func_no, u8 vfunc_no);
+int pci_epc_map_msi_irq(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
+ phys_addr_t phys_addr, u8 interrupt_num,
+ u32 entry_size, u32 *msi_data, u32 *msi_addr_offset);
+int pci_epc_raise_irq(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
+ enum pci_epc_irq_type type, u16 interrupt_num);
int pci_epc_start(struct pci_epc *epc);
void pci_epc_stop(struct pci_epc *epc);
+const struct pci_epc_features *pci_epc_get_features(struct pci_epc *epc,
+ u8 func_no, u8 vfunc_no);
+enum pci_barno
+pci_epc_get_first_free_bar(const struct pci_epc_features *epc_features);
+enum pci_barno pci_epc_get_next_free_bar(const struct pci_epc_features
+ *epc_features, enum pci_barno bar);
struct pci_epc *pci_epc_get(const char *epc_name);
void pci_epc_put(struct pci_epc *epc);
-int pci_epc_mem_init(struct pci_epc *epc, phys_addr_t phys_addr, size_t size);
+int pci_epc_mem_init(struct pci_epc *epc, phys_addr_t base,
+ size_t size, size_t page_size);
+int pci_epc_multi_mem_init(struct pci_epc *epc,
+ struct pci_epc_mem_window *window,
+ unsigned int num_windows);
void pci_epc_mem_exit(struct pci_epc *epc);
void __iomem *pci_epc_mem_alloc_addr(struct pci_epc *epc,
phys_addr_t *phys_addr, size_t size);
diff --git a/include/linux/pci-epf.h b/include/linux/pci-epf.h
index 0d529cb90143..a215dc8ce693 100644
--- a/include/linux/pci-epf.h
+++ b/include/linux/pci-epf.h
@@ -1,31 +1,24 @@
-/**
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
* PCI Endpoint *Function* (EPF) header file
*
* Copyright (C) 2017 Texas Instruments
* Author: Kishon Vijay Abraham I <kishon@ti.com>
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 of
- * the License as published by the Free Software Foundation.
*/
#ifndef __LINUX_PCI_EPF_H
#define __LINUX_PCI_EPF_H
+#include <linux/configfs.h>
#include <linux/device.h>
#include <linux/mod_devicetable.h>
+#include <linux/pci.h>
struct pci_epf;
-
-enum pci_interrupt_pin {
- PCI_INTERRUPT_UNKNOWN,
- PCI_INTERRUPT_INTA,
- PCI_INTERRUPT_INTB,
- PCI_INTERRUPT_INTC,
- PCI_INTERRUPT_INTD,
-};
+enum pci_epc_interface_type;
enum pci_barno {
+ NO_BAR = -1,
BAR_0,
BAR_1,
BAR_2,
@@ -65,13 +58,23 @@ struct pci_epf_header {
* @bind: ops to perform when a EPC device has been bound to EPF device
* @unbind: ops to perform when a binding has been lost between a EPC device
* and EPF device
- * @linkup: ops to perform when the EPC device has established a connection with
- * a host system
+ * @add_cfs: ops to initialize function specific configfs attributes
*/
struct pci_epf_ops {
int (*bind)(struct pci_epf *epf);
void (*unbind)(struct pci_epf *epf);
- void (*linkup)(struct pci_epf *epf);
+ struct config_group *(*add_cfs)(struct pci_epf *epf,
+ struct config_group *group);
+};
+
+/**
+ * struct pci_epf_event_ops - Callbacks for capturing the EPC events
+ * @core_init: Callback for the EPC initialization complete event
+ * @link_up: Callback for the EPC link up event
+ */
+struct pci_epc_event_ops {
+ int (*core_init)(struct pci_epf *epf);
+ int (*link_up)(struct pci_epf *epf);
};
/**
@@ -82,17 +85,17 @@ struct pci_epf_ops {
* @driver: PCI EPF driver
* @ops: set of function pointers for performing EPF operations
* @owner: the owner of the module that registers the PCI EPF driver
- * @group: configfs group corresponding to the PCI EPF driver
+ * @epf_group: list of configfs group corresponding to the PCI EPF driver
* @id_table: identifies EPF devices for probing
*/
struct pci_epf_driver {
int (*probe)(struct pci_epf *epf);
- int (*remove)(struct pci_epf *epf);
+ void (*remove)(struct pci_epf *epf);
struct device_driver driver;
struct pci_epf_ops *ops;
struct module *owner;
- struct config_group *group;
+ struct list_head epf_group;
const struct pci_epf_device_id *id_table;
};
@@ -102,11 +105,17 @@ struct pci_epf_driver {
/**
* struct pci_epf_bar - represents the BAR of EPF device
* @phys_addr: physical address that should be mapped to the BAR
+ * @addr: virtual address corresponding to the @phys_addr
* @size: the size of the address space present in BAR
+ * @barno: BAR number
+ * @flags: flags that are set for the BAR
*/
struct pci_epf_bar {
dma_addr_t phys_addr;
+ void *addr;
size_t size;
+ enum pci_barno barno;
+ int flags;
};
/**
@@ -116,10 +125,25 @@ struct pci_epf_bar {
* @header: represents standard configuration header
* @bar: represents the BAR of EPF device
* @msi_interrupts: number of MSI interrupts required by this function
- * @func_no: unique function number within this endpoint device
+ * @msix_interrupts: number of MSI-X interrupts required by this function
+ * @func_no: unique (physical) function number within this endpoint device
+ * @vfunc_no: unique virtual function number within a physical function
* @epc: the EPC device to which this EPF device is bound
+ * @epf_pf: the physical EPF device to which this virtual EPF device is bound
* @driver: the EPF driver to which this EPF device is bound
* @list: to add pci_epf as a list of PCI endpoint functions to pci_epc
+ * @lock: mutex to protect pci_epf_ops
+ * @sec_epc: the secondary EPC device to which this EPF device is bound
+ * @sec_epc_list: to add pci_epf as list of PCI endpoint functions to secondary
+ * EPC device
+ * @sec_epc_bar: represents the BAR of EPF device associated with secondary EPC
+ * @sec_epc_func_no: unique (physical) function number within the secondary EPC
+ * @group: configfs group associated with the EPF device
+ * @is_bound: indicates if bind notification to function driver has been invoked
+ * @is_vf: true - virtual function, false - physical function
+ * @vfunction_num_map: bitmap to manage virtual function number
+ * @pci_vepf: list of virtual endpoint functions associated with this function
+ * @event_ops: Callbacks for capturing the EPC events
*/
struct pci_epf {
struct device dev;
@@ -127,11 +151,41 @@ struct pci_epf {
struct pci_epf_header *header;
struct pci_epf_bar bar[6];
u8 msi_interrupts;
+ u16 msix_interrupts;
u8 func_no;
+ u8 vfunc_no;
struct pci_epc *epc;
+ struct pci_epf *epf_pf;
struct pci_epf_driver *driver;
struct list_head list;
+ /* mutex to protect against concurrent access of pci_epf_ops */
+ struct mutex lock;
+
+ /* Below members are to attach secondary EPC to an endpoint function */
+ struct pci_epc *sec_epc;
+ struct list_head sec_epc_list;
+ struct pci_epf_bar sec_epc_bar[6];
+ u8 sec_epc_func_no;
+ struct config_group *group;
+ unsigned int is_bound;
+ unsigned int is_vf;
+ unsigned long vfunction_num_map;
+ struct list_head pci_vepf;
+ const struct pci_epc_event_ops *event_ops;
+};
+
+/**
+ * struct pci_epf_msix_tbl - represents the MSIX table entry structure
+ * @msg_addr: Writes to this address will trigger MSIX interrupt in host
+ * @msg_data: Data that should be written to @msg_addr to trigger MSIX interrupt
+ * @vector_ctrl: Identifies if the function is prohibited from sending a message
+ * using this MSIX table entry
+ */
+struct pci_epf_msix_tbl {
+ u64 msg_addr;
+ u32 msg_data;
+ u32 vector_ctrl;
};
#define to_pci_epf(epf_dev) container_of((epf_dev), struct pci_epf, dev)
@@ -154,9 +208,14 @@ void pci_epf_destroy(struct pci_epf *epf);
int __pci_epf_register_driver(struct pci_epf_driver *driver,
struct module *owner);
void pci_epf_unregister_driver(struct pci_epf_driver *driver);
-void *pci_epf_alloc_space(struct pci_epf *epf, size_t size, enum pci_barno bar);
-void pci_epf_free_space(struct pci_epf *epf, void *addr, enum pci_barno bar);
+void *pci_epf_alloc_space(struct pci_epf *epf, size_t size, enum pci_barno bar,
+ size_t align, enum pci_epc_interface_type type);
+void pci_epf_free_space(struct pci_epf *epf, void *addr, enum pci_barno bar,
+ enum pci_epc_interface_type type);
int pci_epf_bind(struct pci_epf *epf);
void pci_epf_unbind(struct pci_epf *epf);
-void pci_epf_linkup(struct pci_epf *epf);
+struct config_group *pci_epf_type_add_cfs(struct pci_epf *epf,
+ struct config_group *group);
+int pci_epf_add_vepf(struct pci_epf *epf_pf, struct pci_epf *epf_vf);
+void pci_epf_remove_vepf(struct pci_epf *epf_pf, struct pci_epf *epf_vf);
#endif /* __LINUX_PCI_EPF_H */
diff --git a/include/linux/pci-p2pdma.h b/include/linux/pci-p2pdma.h
new file mode 100644
index 000000000000..2c07aa6b7665
--- /dev/null
+++ b/include/linux/pci-p2pdma.h
@@ -0,0 +1,107 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * PCI Peer 2 Peer DMA support.
+ *
+ * Copyright (c) 2016-2018, Logan Gunthorpe
+ * Copyright (c) 2016-2017, Microsemi Corporation
+ * Copyright (c) 2017, Christoph Hellwig
+ * Copyright (c) 2018, Eideticom Inc.
+ */
+
+#ifndef _LINUX_PCI_P2PDMA_H
+#define _LINUX_PCI_P2PDMA_H
+
+#include <linux/pci.h>
+
+struct block_device;
+struct scatterlist;
+
+#ifdef CONFIG_PCI_P2PDMA
+int pci_p2pdma_add_resource(struct pci_dev *pdev, int bar, size_t size,
+ u64 offset);
+int pci_p2pdma_distance_many(struct pci_dev *provider, struct device **clients,
+ int num_clients, bool verbose);
+bool pci_has_p2pmem(struct pci_dev *pdev);
+struct pci_dev *pci_p2pmem_find_many(struct device **clients, int num_clients);
+void *pci_alloc_p2pmem(struct pci_dev *pdev, size_t size);
+void pci_free_p2pmem(struct pci_dev *pdev, void *addr, size_t size);
+pci_bus_addr_t pci_p2pmem_virt_to_bus(struct pci_dev *pdev, void *addr);
+struct scatterlist *pci_p2pmem_alloc_sgl(struct pci_dev *pdev,
+ unsigned int *nents, u32 length);
+void pci_p2pmem_free_sgl(struct pci_dev *pdev, struct scatterlist *sgl);
+void pci_p2pmem_publish(struct pci_dev *pdev, bool publish);
+int pci_p2pdma_enable_store(const char *page, struct pci_dev **p2p_dev,
+ bool *use_p2pdma);
+ssize_t pci_p2pdma_enable_show(char *page, struct pci_dev *p2p_dev,
+ bool use_p2pdma);
+#else /* CONFIG_PCI_P2PDMA */
+static inline int pci_p2pdma_add_resource(struct pci_dev *pdev, int bar,
+ size_t size, u64 offset)
+{
+ return -EOPNOTSUPP;
+}
+static inline int pci_p2pdma_distance_many(struct pci_dev *provider,
+ struct device **clients, int num_clients, bool verbose)
+{
+ return -1;
+}
+static inline bool pci_has_p2pmem(struct pci_dev *pdev)
+{
+ return false;
+}
+static inline struct pci_dev *pci_p2pmem_find_many(struct device **clients,
+ int num_clients)
+{
+ return NULL;
+}
+static inline void *pci_alloc_p2pmem(struct pci_dev *pdev, size_t size)
+{
+ return NULL;
+}
+static inline void pci_free_p2pmem(struct pci_dev *pdev, void *addr,
+ size_t size)
+{
+}
+static inline pci_bus_addr_t pci_p2pmem_virt_to_bus(struct pci_dev *pdev,
+ void *addr)
+{
+ return 0;
+}
+static inline struct scatterlist *pci_p2pmem_alloc_sgl(struct pci_dev *pdev,
+ unsigned int *nents, u32 length)
+{
+ return NULL;
+}
+static inline void pci_p2pmem_free_sgl(struct pci_dev *pdev,
+ struct scatterlist *sgl)
+{
+}
+static inline void pci_p2pmem_publish(struct pci_dev *pdev, bool publish)
+{
+}
+static inline int pci_p2pdma_enable_store(const char *page,
+ struct pci_dev **p2p_dev, bool *use_p2pdma)
+{
+ *use_p2pdma = false;
+ return 0;
+}
+static inline ssize_t pci_p2pdma_enable_show(char *page,
+ struct pci_dev *p2p_dev, bool use_p2pdma)
+{
+ return sprintf(page, "none\n");
+}
+#endif /* CONFIG_PCI_P2PDMA */
+
+
+static inline int pci_p2pdma_distance(struct pci_dev *provider,
+ struct device *client, bool verbose)
+{
+ return pci_p2pdma_distance_many(provider, &client, 1, verbose);
+}
+
+static inline struct pci_dev *pci_p2pmem_find(struct device *client)
+{
+ return pci_p2pmem_find_many(&client, 1);
+}
+
+#endif /* _LINUX_PCI_P2P_H */
diff --git a/include/linux/pci.h b/include/linux/pci.h
index 4869e66dd659..60b8772b5bd4 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* pci.h
*
@@ -5,12 +6,18 @@
* Copyright 1994, Drew Eckhardt
* Copyright 1997--1999 Martin Mares <mj@ucw.cz>
*
+ * PCI Express ASPM defines and function prototypes
+ * Copyright (c) 2007 Intel Corp.
+ * Zhang Yanmin (yanmin.zhang@intel.com)
+ * Shaohua Li (shaohua.li@intel.com)
+ *
* For more information, please consult the following manuals (look at
* http://www.pcisig.com/ for how to get them):
*
* PCI BIOS Specification
* PCI Local Bus Specification
* PCI to PCI Bridge Specification
+ * PCI Express Specification
* PCI System Design Guide
*/
#ifndef LINUX_PCI_H
@@ -31,10 +38,24 @@
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/resource_ext.h>
+#include <linux/msi_api.h>
#include <uapi/linux/pci.h>
#include <linux/pci_ids.h>
+#define PCI_STATUS_ERROR_BITS (PCI_STATUS_DETECTED_PARITY | \
+ PCI_STATUS_SIG_SYSTEM_ERROR | \
+ PCI_STATUS_REC_MASTER_ABORT | \
+ PCI_STATUS_REC_TARGET_ABORT | \
+ PCI_STATUS_SIG_TARGET_ABORT | \
+ PCI_STATUS_PARITY)
+
+/* Number of reset methods used in pci_reset_fn_methods array in pci.c */
+#define PCI_NUM_RESET_METHODS 7
+
+#define PCI_RESET_PROBE true
+#define PCI_RESET_DO_RESET false
+
/*
* The PCI interface treats multi-function devices as independent
* devices. The slot/function address of each device is encoded
@@ -47,17 +68,17 @@
* In the interest of not exposing interfaces to user-space unnecessarily,
* the following kernel-only defines are being added here.
*/
-#define PCI_DEVID(bus, devfn) ((((u16)(bus)) << 8) | (devfn))
+#define PCI_DEVID(bus, devfn) ((((u16)(bus)) << 8) | (devfn))
/* return bus from PCI devid = ((u16)bus_number) << 8) | devfn */
#define PCI_BUS_NUM(x) (((x) >> 8) & 0xff)
/* pci_slot represents a physical slot */
struct pci_slot {
- struct pci_bus *bus; /* The bus this slot is on */
- struct list_head list; /* node in list of slots on this bus */
- struct hotplug_slot *hotplug; /* Hotplug info (migrate over time) */
- unsigned char number; /* PCI_SLOT(pci_dev->devfn) */
- struct kobject kobj;
+ struct pci_bus *bus; /* Bus this slot is on */
+ struct list_head list; /* Node in list of slots */
+ struct hotplug_slot *hotplug; /* Hotplug info (move here) */
+ unsigned char number; /* PCI_SLOT(pci_dev->devfn) */
+ struct kobject kobj;
};
static inline const char *pci_slot_name(const struct pci_slot *slot)
@@ -71,37 +92,78 @@ enum pci_mmap_state {
pci_mmap_mem
};
-/*
- * For PCI devices, the region numbers are assigned this way:
- */
+/* For PCI devices, the region numbers are assigned this way: */
enum {
/* #0-5: standard PCI resources */
PCI_STD_RESOURCES,
- PCI_STD_RESOURCE_END = 5,
+ PCI_STD_RESOURCE_END = PCI_STD_RESOURCES + PCI_STD_NUM_BARS - 1,
/* #6: expansion ROM resource */
PCI_ROM_RESOURCE,
- /* device specific resources */
+ /* Device-specific resources */
#ifdef CONFIG_PCI_IOV
PCI_IOV_RESOURCES,
PCI_IOV_RESOURCE_END = PCI_IOV_RESOURCES + PCI_SRIOV_NUM_BARS - 1,
#endif
- /* resources assigned to buses behind the bridge */
+/* PCI-to-PCI (P2P) bridge windows */
+#define PCI_BRIDGE_IO_WINDOW (PCI_BRIDGE_RESOURCES + 0)
+#define PCI_BRIDGE_MEM_WINDOW (PCI_BRIDGE_RESOURCES + 1)
+#define PCI_BRIDGE_PREF_MEM_WINDOW (PCI_BRIDGE_RESOURCES + 2)
+
+/* CardBus bridge windows */
+#define PCI_CB_BRIDGE_IO_0_WINDOW (PCI_BRIDGE_RESOURCES + 0)
+#define PCI_CB_BRIDGE_IO_1_WINDOW (PCI_BRIDGE_RESOURCES + 1)
+#define PCI_CB_BRIDGE_MEM_0_WINDOW (PCI_BRIDGE_RESOURCES + 2)
+#define PCI_CB_BRIDGE_MEM_1_WINDOW (PCI_BRIDGE_RESOURCES + 3)
+
+/* Total number of bridge resources for P2P and CardBus */
#define PCI_BRIDGE_RESOURCE_NUM 4
+ /* Resources assigned to buses behind the bridge */
PCI_BRIDGE_RESOURCES,
PCI_BRIDGE_RESOURCE_END = PCI_BRIDGE_RESOURCES +
PCI_BRIDGE_RESOURCE_NUM - 1,
- /* total resources associated with a PCI device */
+ /* Total resources associated with a PCI device */
PCI_NUM_RESOURCES,
- /* preserve this for compatibility */
+ /* Preserve this for compatibility */
DEVICE_COUNT_RESOURCE = PCI_NUM_RESOURCES,
};
+/**
+ * enum pci_interrupt_pin - PCI INTx interrupt values
+ * @PCI_INTERRUPT_UNKNOWN: Unknown or unassigned interrupt
+ * @PCI_INTERRUPT_INTA: PCI INTA pin
+ * @PCI_INTERRUPT_INTB: PCI INTB pin
+ * @PCI_INTERRUPT_INTC: PCI INTC pin
+ * @PCI_INTERRUPT_INTD: PCI INTD pin
+ *
+ * Corresponds to values for legacy PCI INTx interrupts, as can be found in the
+ * PCI_INTERRUPT_PIN register.
+ */
+enum pci_interrupt_pin {
+ PCI_INTERRUPT_UNKNOWN,
+ PCI_INTERRUPT_INTA,
+ PCI_INTERRUPT_INTB,
+ PCI_INTERRUPT_INTC,
+ PCI_INTERRUPT_INTD,
+};
+
+/* The number of legacy PCI INTx interrupts */
+#define PCI_NUM_INTX 4
+
+/*
+ * Reading from a device that doesn't respond typically returns ~0. A
+ * successful read from a device may also return ~0, so you need additional
+ * information to reliably identify errors.
+ */
+#define PCI_ERROR_RESPONSE (~0ULL)
+#define PCI_SET_ERROR_RESPONSE(val) (*(val) = ((typeof(*(val))) PCI_ERROR_RESPONSE))
+#define PCI_POSSIBLE_ERROR(val) ((val) == ((typeof(val)) PCI_ERROR_RESPONSE))
+
/*
* pci_power_t values must match the bits in the Capabilities PME_Support
* and Control/Status PowerState fields in the Power Management capability.
@@ -124,18 +186,16 @@ static inline const char *pci_power_name(pci_power_t state)
return pci_power_names[1 + (__force int) state];
}
-#define PCI_PM_D2_DELAY 200
-#define PCI_PM_D3_WAIT 10
-#define PCI_PM_D3COLD_WAIT 100
-#define PCI_PM_BUS_WAIT 50
-
-/** The pci_channel state describes connectivity between the CPU and
- * the pci device. If some PCI bus between here and the pci device
- * has crashed or locked up, this info is reflected here.
+/**
+ * typedef pci_channel_state_t
+ *
+ * The pci_channel state describes connectivity between the CPU and
+ * the PCI device. If some PCI bus between here and the PCI device
+ * has crashed or locked up, this info is reflected here.
*/
typedef unsigned int __bitwise pci_channel_state_t;
-enum pci_channel_state {
+enum {
/* I/O channel is in normal state */
pci_channel_io_normal = (__force pci_channel_state_t) 1,
@@ -161,9 +221,7 @@ enum pcie_reset_state {
typedef unsigned short __bitwise pci_dev_flags_t;
enum pci_dev_flags {
- /* INTX_DISABLE in PCI_COMMAND register disables MSI
- * generation too.
- */
+ /* INTX_DISABLE in PCI_COMMAND register disables MSI too */
PCI_DEV_FLAGS_MSI_INTX_DISABLE_BUG = (__force pci_dev_flags_t) (1 << 0),
/* Device configuration is irrevocably lost if disabled into D3 */
PCI_DEV_FLAGS_NO_D3 = (__force pci_dev_flags_t) (1 << 1),
@@ -179,15 +237,14 @@ enum pci_dev_flags {
PCI_DEV_FLAGS_NO_PM_RESET = (__force pci_dev_flags_t) (1 << 7),
/* Get VPD from function 0 VPD */
PCI_DEV_FLAGS_VPD_REF_F0 = (__force pci_dev_flags_t) (1 << 8),
- /* a non-root bridge where translation occurs, stop alias search here */
+ /* A non-root bridge where translation occurs, stop alias search here */
PCI_DEV_FLAGS_BRIDGE_XLATE_ROOT = (__force pci_dev_flags_t) (1 << 9),
/* Do not use FLR even if device advertises PCI_AF_CAP */
PCI_DEV_FLAGS_NO_FLR_RESET = (__force pci_dev_flags_t) (1 << 10),
- /*
- * Resume before calling the driver's system suspend hooks, disabling
- * the direct_complete optimization.
- */
- PCI_DEV_FLAGS_NEEDS_RESUME = (__force pci_dev_flags_t) (1 << 11),
+ /* Don't use Relaxed Ordering for TLPs directed at this device */
+ PCI_DEV_FLAGS_NO_RELAXED_ORDERING = (__force pci_dev_flags_t) (1 << 11),
+ /* Device does honor MSI masking despite saying otherwise */
+ PCI_DEV_FLAGS_HAS_MSI_MASKING = (__force pci_dev_flags_t) (1 << 12),
};
enum pci_irq_reroute_variant {
@@ -200,22 +257,23 @@ enum pci_bus_flags {
PCI_BUS_FLAGS_NO_MSI = (__force pci_bus_flags_t) 1,
PCI_BUS_FLAGS_NO_MMRBC = (__force pci_bus_flags_t) 2,
PCI_BUS_FLAGS_NO_AERSID = (__force pci_bus_flags_t) 4,
+ PCI_BUS_FLAGS_NO_EXTCFG = (__force pci_bus_flags_t) 8,
};
-/* These values come from the PCI Express Spec */
+/* Values from Link Status register, PCIe r3.1, sec 7.8.8 */
enum pcie_link_width {
PCIE_LNK_WIDTH_RESRV = 0x00,
PCIE_LNK_X1 = 0x01,
PCIE_LNK_X2 = 0x02,
PCIE_LNK_X4 = 0x04,
PCIE_LNK_X8 = 0x08,
- PCIE_LNK_X12 = 0x0C,
+ PCIE_LNK_X12 = 0x0c,
PCIE_LNK_X16 = 0x10,
PCIE_LNK_X32 = 0x20,
- PCIE_LNK_WIDTH_UNKNOWN = 0xFF,
+ PCIE_LNK_WIDTH_UNKNOWN = 0xff,
};
-/* Based on the PCI Hotplug Spec, but some values are made up by us */
+/* See matching string table in pci_speed_string() */
enum pci_bus_speed {
PCI_SPEED_33MHz = 0x00,
PCI_SPEED_66MHz = 0x01,
@@ -239,40 +297,38 @@ enum pci_bus_speed {
PCIE_SPEED_2_5GT = 0x14,
PCIE_SPEED_5_0GT = 0x15,
PCIE_SPEED_8_0GT = 0x16,
+ PCIE_SPEED_16_0GT = 0x17,
+ PCIE_SPEED_32_0GT = 0x18,
+ PCIE_SPEED_64_0GT = 0x19,
PCI_SPEED_UNKNOWN = 0xff,
};
-struct pci_cap_saved_data {
- u16 cap_nr;
- bool cap_extended;
- unsigned int size;
- u32 data[0];
-};
+enum pci_bus_speed pcie_get_speed_cap(struct pci_dev *dev);
+enum pcie_link_width pcie_get_width_cap(struct pci_dev *dev);
-struct pci_cap_saved_state {
- struct hlist_node next;
- struct pci_cap_saved_data cap;
+struct pci_vpd {
+ struct mutex lock;
+ unsigned int len;
+ u8 cap;
};
struct irq_affinity;
struct pcie_link_state;
-struct pci_vpd;
struct pci_sriov;
-struct pci_ats;
+struct pci_p2pdma;
+struct rcec_ea;
-/*
- * The pci_dev structure is used to describe PCI devices.
- */
+/* The pci_dev structure describes PCI devices */
struct pci_dev {
- struct list_head bus_list; /* node in per-bus list */
- struct pci_bus *bus; /* bus this device is on */
- struct pci_bus *subordinate; /* bus this device bridges to */
+ struct list_head bus_list; /* Node in per-bus list */
+ struct pci_bus *bus; /* Bus this device is on */
+ struct pci_bus *subordinate; /* Bus this device bridges to */
- void *sysdata; /* hook for sys-specific extension */
- struct proc_dir_entry *procent; /* device entry in /proc/bus/pci */
+ void *sysdata; /* Hook for sys-specific extension */
+ struct proc_dir_entry *procent; /* Device entry in /proc/bus/pci */
struct pci_slot *slot; /* Physical slot this device is in */
- unsigned int devfn; /* encoded device & function index */
+ unsigned int devfn; /* Encoded device & function index */
unsigned short vendor;
unsigned short device;
unsigned short subsystem_vendor;
@@ -282,17 +338,23 @@ struct pci_dev {
u8 hdr_type; /* PCI header type (`multi' flag masked out) */
#ifdef CONFIG_PCIEAER
u16 aer_cap; /* AER capability offset */
+ struct aer_stats *aer_stats; /* AER stats for this device */
+#endif
+#ifdef CONFIG_PCIEPORTBUS
+ struct rcec_ea *rcec_ea; /* RCEC cached endpoint association */
+ struct pci_dev *rcec; /* Associated RCEC device */
#endif
+ u32 devcap; /* PCIe Device Capabilities */
u8 pcie_cap; /* PCIe capability offset */
u8 msi_cap; /* MSI capability offset */
u8 msix_cap; /* MSI-X capability offset */
u8 pcie_mpss:3; /* PCIe Max Payload Size Supported */
- u8 rom_base_reg; /* which config register controls the ROM */
- u8 pin; /* which interrupt pin this device uses */
- u16 pcie_flags_reg; /* cached PCIe Capabilities Register */
- unsigned long *dma_alias_mask;/* mask of enabled devfn aliases */
+ u8 rom_base_reg; /* Config register controlling ROM */
+ u8 pin; /* Interrupt pin this device uses */
+ u16 pcie_flags_reg; /* Cached PCIe Capabilities Register */
+ unsigned long *dma_alias_mask;/* Mask of enabled devfn aliases */
- struct pci_driver *driver; /* which driver has allocated this device */
+ struct pci_driver *driver; /* Driver bound to this device */
u64 dma_mask; /* Mask of the bits of bus address this
device implements. Normally this is
0xffffffff. You only need to change
@@ -301,9 +363,10 @@ struct pci_dev {
struct device_dma_parameters dma_parms;
- pci_power_t current_state; /* Current operating state. In ACPI-speak,
- this is D0-D3, D0 being fully functional,
- and D3 being off. */
+ pci_power_t current_state; /* Current operating state. In ACPI,
+ this is D0-D3, D0 being fully
+ functional, and D3 being off. */
+ unsigned int imm_ready:1; /* Supports Immediate Readiness */
u8 pm_cap; /* PM capability offset */
unsigned int pme_support:5; /* Bitmask of states from which PME#
can be generated */
@@ -314,28 +377,32 @@ struct pci_dev {
unsigned int no_d3cold:1; /* D3cold is forbidden */
unsigned int bridge_d3:1; /* Allow D3 for bridge */
unsigned int d3cold_allowed:1; /* D3cold is allowed by user */
- unsigned int mmio_always_on:1; /* disallow turning off io/mem
- decoding during bar sizing */
+ unsigned int mmio_always_on:1; /* Disallow turning off io/mem
+ decoding during BAR sizing */
unsigned int wakeup_prepared:1;
- unsigned int runtime_d3cold:1; /* whether go through runtime
- D3cold, not set for devices
- powered on/off by the
- corresponding bridge */
+ unsigned int skip_bus_pm:1; /* Internal: Skip bus-level PM */
unsigned int ignore_hotplug:1; /* Ignore hotplug events */
unsigned int hotplug_user_indicators:1; /* SlotCtl indicators
controlled exclusively by
user sysfs */
- unsigned int d3_delay; /* D3->D0 transition time in ms */
+ unsigned int clear_retrain_link:1; /* Need to clear Retrain Link
+ bit manually */
+ unsigned int d3hot_delay; /* D3hot->D0 transition time in ms */
unsigned int d3cold_delay; /* D3cold->D0 transition time in ms */
#ifdef CONFIG_PCIEASPM
struct pcie_link_state *link_state; /* ASPM link state */
+ unsigned int ltr_path:1; /* Latency Tolerance Reporting
+ supported from root to here */
+ u16 l1ss; /* L1SS Capability pointer */
#endif
+ unsigned int pasid_no_tlp:1; /* PASID works without TLP Prefix */
+ unsigned int eetlp_prefix_path:1; /* End-to-End TLP Prefix */
- pci_channel_state_t error_state; /* current connectivity state */
- struct device dev; /* Generic device interface */
+ pci_channel_state_t error_state; /* Current connectivity state */
+ struct device dev; /* Generic device interface */
- int cfg_size; /* Size of configuration space */
+ int cfg_size; /* Size of config space */
/*
* Instead of touching interrupt line and base address registers
@@ -343,80 +410,124 @@ struct pci_dev {
*/
unsigned int irq;
struct resource resource[DEVICE_COUNT_RESOURCE]; /* I/O and memory regions + expansion ROMs */
-
- bool match_driver; /* Skip attaching driver */
- /* These fields are used by common fixups */
- unsigned int transparent:1; /* Subtractive decode PCI bridge */
- unsigned int multifunction:1;/* Part of multi-function device */
- /* keep track of device state */
- unsigned int is_added:1;
- unsigned int is_busmaster:1; /* device is busmaster */
- unsigned int no_msi:1; /* device may not use msi */
- unsigned int no_64bit_msi:1; /* device may only use 32-bit MSIs */
- unsigned int block_cfg_access:1; /* config space access is blocked */
- unsigned int broken_parity_status:1; /* Device generates false positive parity */
- unsigned int irq_reroute_variant:2; /* device needs IRQ rerouting variant */
+ struct resource driver_exclusive_resource; /* driver exclusive resource ranges */
+
+ bool match_driver; /* Skip attaching driver */
+
+ unsigned int transparent:1; /* Subtractive decode bridge */
+ unsigned int io_window:1; /* Bridge has I/O window */
+ unsigned int pref_window:1; /* Bridge has pref mem window */
+ unsigned int pref_64_window:1; /* Pref mem window is 64-bit */
+ unsigned int multifunction:1; /* Multi-function device */
+
+ unsigned int is_busmaster:1; /* Is busmaster */
+ unsigned int no_msi:1; /* May not use MSI */
+ unsigned int no_64bit_msi:1; /* May only use 32-bit MSIs */
+ unsigned int block_cfg_access:1; /* Config space access blocked */
+ unsigned int broken_parity_status:1; /* Generates false positive parity */
+ unsigned int irq_reroute_variant:2; /* Needs IRQ rerouting variant */
unsigned int msi_enabled:1;
unsigned int msix_enabled:1;
- unsigned int ari_enabled:1; /* ARI forwarding */
- unsigned int ats_enabled:1; /* Address Translation Service */
+ unsigned int ari_enabled:1; /* ARI forwarding */
+ unsigned int ats_enabled:1; /* Address Translation Svc */
unsigned int pasid_enabled:1; /* Process Address Space ID */
unsigned int pri_enabled:1; /* Page Request Interface */
- unsigned int is_managed:1;
- unsigned int needs_freset:1; /* Dev requires fundamental reset */
+ unsigned int is_managed:1; /* Managed via devres */
+ unsigned int is_msi_managed:1; /* MSI release via devres installed */
+ unsigned int needs_freset:1; /* Requires fundamental reset */
unsigned int state_saved:1;
unsigned int is_physfn:1;
unsigned int is_virtfn:1;
- unsigned int reset_fn:1;
- unsigned int is_hotplug_bridge:1;
- unsigned int is_thunderbolt:1; /* Thunderbolt controller */
- unsigned int __aer_firmware_first_valid:1;
- unsigned int __aer_firmware_first:1;
- unsigned int broken_intx_masking:1; /* INTx masking can't be used */
- unsigned int io_window_1k:1; /* Intel P2P bridge 1K I/O windows */
+ unsigned int is_hotplug_bridge:1;
+ unsigned int shpc_managed:1; /* SHPC owned by shpchp */
+ unsigned int is_thunderbolt:1; /* Thunderbolt controller */
+ /*
+ * Devices marked being untrusted are the ones that can potentially
+ * execute DMA attacks and similar. They are typically connected
+ * through external ports such as Thunderbolt but not limited to
+ * that. When an IOMMU is enabled they should be getting full
+ * mappings to make sure they cannot access arbitrary memory.
+ */
+ unsigned int untrusted:1;
+ /*
+ * Info from the platform, e.g., ACPI or device tree, may mark a
+ * device as "external-facing". An external-facing device is
+ * itself internal but devices downstream from it are external.
+ */
+ unsigned int external_facing:1;
+ unsigned int broken_intx_masking:1; /* INTx masking can't be used */
+ unsigned int io_window_1k:1; /* Intel bridge 1K I/O windows */
unsigned int irq_managed:1;
- unsigned int has_secondary_link:1;
- unsigned int non_compliant_bars:1; /* broken BARs; ignore them */
- unsigned int is_probed:1; /* device probing in progress */
+ unsigned int non_compliant_bars:1; /* Broken BARs; ignore them */
+ unsigned int is_probed:1; /* Device probing in progress */
+ unsigned int link_active_reporting:1;/* Device capable of reporting link active */
+ unsigned int no_vf_scan:1; /* Don't scan for VFs after IOV enablement */
+ unsigned int no_command_memory:1; /* No PCI_COMMAND_MEMORY */
+ unsigned int rom_bar_overlap:1; /* ROM BAR disable broken */
pci_dev_flags_t dev_flags;
atomic_t enable_cnt; /* pci_enable_device has been called */
- u32 saved_config_space[16]; /* config space saved at suspend time */
+ u32 saved_config_space[16]; /* Config space saved at suspend time */
struct hlist_head saved_cap_space;
- struct bin_attribute *rom_attr; /* attribute descriptor for sysfs ROM entry */
- int rom_attr_enabled; /* has display of the rom attribute been enabled? */
+ int rom_attr_enabled; /* Display of ROM attribute enabled? */
struct bin_attribute *res_attr[DEVICE_COUNT_RESOURCE]; /* sysfs file for resources */
struct bin_attribute *res_attr_wc[DEVICE_COUNT_RESOURCE]; /* sysfs file for WC mapping of resources */
+#ifdef CONFIG_HOTPLUG_PCI_PCIE
+ unsigned int broken_cmd_compl:1; /* No compl for some cmds */
+#endif
#ifdef CONFIG_PCIE_PTM
+ u16 ptm_cap; /* PTM Capability */
unsigned int ptm_root:1;
unsigned int ptm_enabled:1;
u8 ptm_granularity;
#endif
#ifdef CONFIG_PCI_MSI
- const struct attribute_group **msi_irq_groups;
+ void __iomem *msix_base;
+ raw_spinlock_t msi_lock;
+#endif
+ struct pci_vpd vpd;
+#ifdef CONFIG_PCIE_DPC
+ u16 dpc_cap;
+ unsigned int dpc_rp_extensions:1;
+ u8 dpc_rp_log_size;
#endif
- struct pci_vpd *vpd;
#ifdef CONFIG_PCI_ATS
union {
- struct pci_sriov *sriov; /* SR-IOV capability related */
- struct pci_dev *physfn; /* the PF this VF is associated with */
+ struct pci_sriov *sriov; /* PF: SR-IOV info */
+ struct pci_dev *physfn; /* VF: related PF */
};
u16 ats_cap; /* ATS Capability offset */
u8 ats_stu; /* ATS Smallest Translation Unit */
- atomic_t ats_ref_cnt; /* number of VFs with ATS enabled */
#endif
#ifdef CONFIG_PCI_PRI
+ u16 pri_cap; /* PRI Capability offset */
u32 pri_reqs_alloc; /* Number of PRI requests allocated */
+ unsigned int pasid_required:1; /* PRG Response PASID Required */
#endif
#ifdef CONFIG_PCI_PASID
+ u16 pasid_cap; /* PASID Capability offset */
u16 pasid_features;
#endif
- phys_addr_t rom; /* Physical address of ROM if it's not from the BAR */
- size_t romlen; /* Length of ROM if it's not from the BAR */
- char *driver_override; /* Driver name to force a match */
+#ifdef CONFIG_PCI_P2PDMA
+ struct pci_p2pdma __rcu *p2pdma;
+#endif
+#ifdef CONFIG_PCI_DOE
+ struct xarray doe_mbs; /* Data Object Exchange mailboxes */
+#endif
+ u16 acs_cap; /* ACS Capability offset */
+ phys_addr_t rom; /* Physical address if not from BAR */
+ size_t romlen; /* Length if not from BAR */
+ /*
+ * Driver name to force a match. Do not set directly, because core
+ * frees it. Use driver_set_override() to set or clear it.
+ */
+ const char *driver_override;
- unsigned long priv_flags; /* Private flags for the pci driver */
+ unsigned long priv_flags; /* Private flags for the PCI driver */
+
+ /* These methods index pci_reset_fn_methods[] */
+ u8 reset_methods[PCI_NUM_RESET_METHODS]; /* In priority order */
};
static inline struct pci_dev *pci_physfn(struct pci_dev *dev)
@@ -438,26 +549,51 @@ static inline int pci_channel_offline(struct pci_dev *pdev)
return (pdev->error_state != pci_channel_io_normal);
}
+/*
+ * Currently in ACPI spec, for each PCI host bridge, PCI Segment
+ * Group number is limited to a 16-bit value, therefore (int)-1 is
+ * not a valid PCI domain number, and can be used as a sentinel
+ * value indicating ->domain_nr is not set by the driver (and
+ * CONFIG_PCI_DOMAINS_GENERIC=y archs will set it with
+ * pci_bus_find_domain_nr()).
+ */
+#define PCI_DOMAIN_NR_NOT_SET (-1)
+
struct pci_host_bridge {
- struct device dev;
- struct pci_bus *bus; /* root bus */
- struct pci_ops *ops;
- void *sysdata;
- int busnr;
+ struct device dev;
+ struct pci_bus *bus; /* Root bus */
+ struct pci_ops *ops;
+ struct pci_ops *child_ops;
+ void *sysdata;
+ int busnr;
+ int domain_nr;
struct list_head windows; /* resource_entry */
- u8 (*swizzle_irq)(struct pci_dev *, u8 *); /* platform IRQ swizzler */
+ struct list_head dma_ranges; /* dma ranges resource list */
+ u8 (*swizzle_irq)(struct pci_dev *, u8 *); /* Platform IRQ swizzler */
int (*map_irq)(const struct pci_dev *, u8, u8);
void (*release_fn)(struct pci_host_bridge *);
- void *release_data;
- struct msi_controller *msi;
- unsigned int ignore_reset_delay:1; /* for entire hierarchy */
+ void *release_data;
+ unsigned int ignore_reset_delay:1; /* For entire hierarchy */
+ unsigned int no_ext_tags:1; /* No Extended Tags */
+ unsigned int no_inc_mrrs:1; /* No Increase MRRS */
+ unsigned int native_aer:1; /* OS may use PCIe AER */
+ unsigned int native_pcie_hotplug:1; /* OS may use PCIe hotplug */
+ unsigned int native_shpc_hotplug:1; /* OS may use SHPC hotplug */
+ unsigned int native_pme:1; /* OS may use PCIe PME */
+ unsigned int native_ltr:1; /* OS may use PCIe LTR */
+ unsigned int native_dpc:1; /* OS may use PCIe DPC */
+ unsigned int native_cxl_error:1; /* OS may use CXL RAS/Events */
+ unsigned int preserve_config:1; /* Preserve FW resource setup */
+ unsigned int size_windows:1; /* Enable root bus sizing */
+ unsigned int msi_domain:1; /* Bridge wants MSI domain */
+
/* Resource alignment requirements */
resource_size_t (*align_resource)(struct pci_dev *dev,
const struct resource *res,
resource_size_t start,
resource_size_t size,
resource_size_t align);
- unsigned long private[0] ____cacheline_aligned;
+ unsigned long private[] ____cacheline_aligned;
};
#define to_pci_host_bridge(n) container_of(n, struct pci_host_bridge, dev)
@@ -479,8 +615,8 @@ void pci_free_host_bridge(struct pci_host_bridge *bridge);
struct pci_host_bridge *pci_find_host_bridge(struct pci_bus *bus);
void pci_set_host_bridge_release(struct pci_host_bridge *bridge,
- void (*release_fn)(struct pci_host_bridge *),
- void *release_data);
+ void (*release_fn)(struct pci_host_bridge *),
+ void *release_data);
int pcibios_root_bridge_prepare(struct pci_host_bridge *bridge);
@@ -500,32 +636,31 @@ int pcibios_root_bridge_prepare(struct pci_host_bridge *bridge);
#define PCI_SUBTRACTIVE_DECODE 0x1
struct pci_bus_resource {
- struct list_head list;
- struct resource *res;
- unsigned int flags;
+ struct list_head list;
+ struct resource *res;
+ unsigned int flags;
};
#define PCI_REGION_FLAG_MASK 0x0fU /* These bits of resource flags tell us the PCI region flags */
struct pci_bus {
- struct list_head node; /* node in list of buses */
- struct pci_bus *parent; /* parent bus this bridge is on */
- struct list_head children; /* list of child buses */
- struct list_head devices; /* list of devices on this bus */
- struct pci_dev *self; /* bridge device as seen by parent */
- struct list_head slots; /* list of slots on this bus;
+ struct list_head node; /* Node in list of buses */
+ struct pci_bus *parent; /* Parent bus this bridge is on */
+ struct list_head children; /* List of child buses */
+ struct list_head devices; /* List of devices on this bus */
+ struct pci_dev *self; /* Bridge device as seen by parent */
+ struct list_head slots; /* List of slots on this bus;
protected by pci_slot_mutex */
struct resource *resource[PCI_BRIDGE_RESOURCE_NUM];
- struct list_head resources; /* address space routed to this bus */
- struct resource busn_res; /* bus numbers routed to this bus */
+ struct list_head resources; /* Address space routed to this bus */
+ struct resource busn_res; /* Bus numbers routed to this bus */
- struct pci_ops *ops; /* configuration access functions */
- struct msi_controller *msi; /* MSI controller */
- void *sysdata; /* hook for sys-specific extension */
- struct proc_dir_entry *procdir; /* directory entry in /proc/bus/pci */
+ struct pci_ops *ops; /* Configuration access functions */
+ void *sysdata; /* Hook for sys-specific extension */
+ struct proc_dir_entry *procdir; /* Directory entry in /proc/bus/pci */
- unsigned char number; /* bus number */
- unsigned char primary; /* number of primary bridge */
+ unsigned char number; /* Bus number */
+ unsigned char primary; /* Number of primary bridge */
unsigned char max_bus_speed; /* enum pci_bus_speed */
unsigned char cur_bus_speed; /* enum pci_bus_speed */
#ifdef CONFIG_PCI_DOMAINS_GENERIC
@@ -534,17 +669,23 @@ struct pci_bus {
char name[48];
- unsigned short bridge_ctl; /* manage NO_ISA/FBB/et al behaviors */
- pci_bus_flags_t bus_flags; /* inherited by child buses */
+ unsigned short bridge_ctl; /* Manage NO_ISA/FBB/et al behaviors */
+ pci_bus_flags_t bus_flags; /* Inherited by child buses */
struct device *bridge;
struct device dev;
- struct bin_attribute *legacy_io; /* legacy I/O for this bus */
- struct bin_attribute *legacy_mem; /* legacy mem */
+ struct bin_attribute *legacy_io; /* Legacy I/O for this bus */
+ struct bin_attribute *legacy_mem; /* Legacy mem */
unsigned int is_added:1;
+ unsigned int unsafe_warn:1; /* warned about RW1C config write */
};
#define to_pci_bus(n) container_of(n, struct pci_bus, dev)
+static inline u16 pci_dev_id(struct pci_dev *dev)
+{
+ return PCI_DEVID(dev->bus->number, dev->devfn);
+}
+
/*
* Returns true if the PCI bus is root (behind host-PCI bridge),
* false otherwise
@@ -571,6 +712,10 @@ static inline bool pci_is_bridge(struct pci_dev *dev)
dev->hdr_type == PCI_HEADER_TYPE_CARDBUS;
}
+#define for_each_pci_bridge(dev, bus) \
+ list_for_each_entry(dev, &bus->devices, bus_list) \
+ if (!pci_is_bridge(dev)) {} else
+
static inline struct pci_dev *pci_upstream_bridge(struct pci_dev *dev)
{
dev = pci_physfn(dev);
@@ -580,9 +725,6 @@ static inline struct pci_dev *pci_upstream_bridge(struct pci_dev *dev)
return dev->bus->self;
}
-struct device *pci_get_host_bridge_device(struct pci_dev *dev);
-void pci_put_host_bridge_device(struct device *dev);
-
#ifdef CONFIG_PCI_MSI
static inline bool pci_dev_msi_enabled(struct pci_dev *pci_dev)
{
@@ -592,9 +734,7 @@ static inline bool pci_dev_msi_enabled(struct pci_dev *pci_dev)
static inline bool pci_dev_msi_enabled(struct pci_dev *pci_dev) { return false; }
#endif
-/*
- * Error values that may be returned by PCI functions.
- */
+/* Error values that may be returned by PCI functions */
#define PCIBIOS_SUCCESSFUL 0x00
#define PCIBIOS_FUNC_NOT_SUPPORTED 0x81
#define PCIBIOS_BAD_VENDOR_ID 0x83
@@ -603,9 +743,7 @@ static inline bool pci_dev_msi_enabled(struct pci_dev *pci_dev) { return false;
#define PCIBIOS_SET_FAILED 0x88
#define PCIBIOS_BUFFER_TOO_SMALL 0x89
-/*
- * Translate above to generic errno for passing back through non-PCI code.
- */
+/* Translate above to generic errno for passing back through non-PCI code */
static inline int pcibios_err_to_errno(int err)
{
if (err <= PCIBIOS_SUCCESSFUL)
@@ -648,20 +786,20 @@ int raw_pci_read(unsigned int domain, unsigned int bus, unsigned int devfn,
int raw_pci_write(unsigned int domain, unsigned int bus, unsigned int devfn,
int reg, int len, u32 val);
-#ifdef CONFIG_PCI_BUS_ADDR_T_64BIT
+#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
typedef u64 pci_bus_addr_t;
#else
typedef u32 pci_bus_addr_t;
#endif
struct pci_bus_region {
- pci_bus_addr_t start;
- pci_bus_addr_t end;
+ pci_bus_addr_t start;
+ pci_bus_addr_t end;
};
struct pci_dynids {
- spinlock_t lock; /* protects list, index */
- struct list_head list; /* for IDs added at runtime */
+ spinlock_t lock; /* Protects list, index */
+ struct list_head list; /* For IDs added at runtime */
};
@@ -675,13 +813,13 @@ struct pci_dynids {
typedef unsigned int __bitwise pci_ers_result_t;
enum pci_ers_result {
- /* no result/none/not supported in device driver */
+ /* No result/none/not supported in device driver */
PCI_ERS_RESULT_NONE = (__force pci_ers_result_t) 1,
/* Device driver can recover without slot reset */
PCI_ERS_RESULT_CAN_RECOVER = (__force pci_ers_result_t) 2,
- /* Device driver wants slot to be reset. */
+ /* Device driver wants slot to be reset */
PCI_ERS_RESULT_NEED_RESET = (__force pci_ers_result_t) 3,
/* Device has completely failed, is unrecoverable */
@@ -698,7 +836,7 @@ enum pci_ers_result {
struct pci_error_handlers {
/* PCI bus error detected on this device */
pci_ers_result_t (*error_detected)(struct pci_dev *dev,
- enum pci_channel_state error);
+ pci_channel_state_t error);
/* MMIO has been re-enabled, but not DMA */
pci_ers_result_t (*mmio_enabled)(struct pci_dev *dev);
@@ -712,31 +850,97 @@ struct pci_error_handlers {
/* Device driver may resume normal operations */
void (*resume)(struct pci_dev *dev);
+
+ /* Allow device driver to record more details of a correctable error */
+ void (*cor_error_detected)(struct pci_dev *dev);
};
struct module;
+
+/**
+ * struct pci_driver - PCI driver structure
+ * @node: List of driver structures.
+ * @name: Driver name.
+ * @id_table: Pointer to table of device IDs the driver is
+ * interested in. Most drivers should export this
+ * table using MODULE_DEVICE_TABLE(pci,...).
+ * @probe: This probing function gets called (during execution
+ * of pci_register_driver() for already existing
+ * devices or later if a new device gets inserted) for
+ * all PCI devices which match the ID table and are not
+ * "owned" by the other drivers yet. This function gets
+ * passed a "struct pci_dev \*" for each device whose
+ * entry in the ID table matches the device. The probe
+ * function returns zero when the driver chooses to
+ * take "ownership" of the device or an error code
+ * (negative number) otherwise.
+ * The probe function always gets called from process
+ * context, so it can sleep.
+ * @remove: The remove() function gets called whenever a device
+ * being handled by this driver is removed (either during
+ * deregistration of the driver or when it's manually
+ * pulled out of a hot-pluggable slot).
+ * The remove function always gets called from process
+ * context, so it can sleep.
+ * @suspend: Put device into low power state.
+ * @resume: Wake device from low power state.
+ * (Please see Documentation/power/pci.rst for descriptions
+ * of PCI Power Management and the related functions.)
+ * @shutdown: Hook into reboot_notifier_list (kernel/sys.c).
+ * Intended to stop any idling DMA operations.
+ * Useful for enabling wake-on-lan (NIC) or changing
+ * the power state of a device before reboot.
+ * e.g. drivers/net/e100.c.
+ * @sriov_configure: Optional driver callback to allow configuration of
+ * number of VFs to enable via sysfs "sriov_numvfs" file.
+ * @sriov_set_msix_vec_count: PF Driver callback to change number of MSI-X
+ * vectors on a VF. Triggered via sysfs "sriov_vf_msix_count".
+ * This will change MSI-X Table Size in the VF Message Control
+ * registers.
+ * @sriov_get_vf_total_msix: PF driver callback to get the total number of
+ * MSI-X vectors available for distribution to the VFs.
+ * @err_handler: See Documentation/PCI/pci-error-recovery.rst
+ * @groups: Sysfs attribute groups.
+ * @dev_groups: Attributes attached to the device that will be
+ * created once it is bound to the driver.
+ * @driver: Driver model structure.
+ * @dynids: List of dynamically added device IDs.
+ * @driver_managed_dma: Device driver doesn't use kernel DMA API for DMA.
+ * For most device drivers, no need to care about this flag
+ * as long as all DMAs are handled through the kernel DMA API.
+ * For some special ones, for example VFIO drivers, they know
+ * how to manage the DMA themselves and set this flag so that
+ * the IOMMU layer will allow them to setup and manage their
+ * own I/O address space.
+ */
struct pci_driver {
- struct list_head node;
- const char *name;
- const struct pci_device_id *id_table; /* must be non-NULL for probe to be called */
- int (*probe) (struct pci_dev *dev, const struct pci_device_id *id); /* New device inserted */
- void (*remove) (struct pci_dev *dev); /* Device removed (NULL if not a hot-plug capable driver) */
- int (*suspend) (struct pci_dev *dev, pm_message_t state); /* Device suspended */
- int (*suspend_late) (struct pci_dev *dev, pm_message_t state);
- int (*resume_early) (struct pci_dev *dev);
- int (*resume) (struct pci_dev *dev); /* Device woken up */
- void (*shutdown) (struct pci_dev *dev);
- int (*sriov_configure) (struct pci_dev *dev, int num_vfs); /* PF pdev */
+ struct list_head node;
+ const char *name;
+ const struct pci_device_id *id_table; /* Must be non-NULL for probe to be called */
+ int (*probe)(struct pci_dev *dev, const struct pci_device_id *id); /* New device inserted */
+ void (*remove)(struct pci_dev *dev); /* Device removed (NULL if not a hot-plug capable driver) */
+ int (*suspend)(struct pci_dev *dev, pm_message_t state); /* Device suspended */
+ int (*resume)(struct pci_dev *dev); /* Device woken up */
+ void (*shutdown)(struct pci_dev *dev);
+ int (*sriov_configure)(struct pci_dev *dev, int num_vfs); /* On PF */
+ int (*sriov_set_msix_vec_count)(struct pci_dev *vf, int msix_vec_count); /* On PF */
+ u32 (*sriov_get_vf_total_msix)(struct pci_dev *pf);
const struct pci_error_handlers *err_handler;
+ const struct attribute_group **groups;
+ const struct attribute_group **dev_groups;
struct device_driver driver;
- struct pci_dynids dynids;
+ struct pci_dynids dynids;
+ bool driver_managed_dma;
};
-#define to_pci_driver(drv) container_of(drv, struct pci_driver, driver)
+static inline struct pci_driver *to_pci_driver(struct device_driver *drv)
+{
+ return drv ? container_of(drv, struct pci_driver, driver) : NULL;
+}
/**
- * PCI_DEVICE - macro used to describe a specific pci device
+ * PCI_DEVICE - macro used to describe a specific PCI device
* @vend: the 16 bit PCI Vendor ID
* @dev: the 16 bit PCI Device ID
*
@@ -749,7 +953,36 @@ struct pci_driver {
.subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID
/**
- * PCI_DEVICE_SUB - macro used to describe a specific pci device with subsystem
+ * PCI_DEVICE_DRIVER_OVERRIDE - macro used to describe a PCI device with
+ * override_only flags.
+ * @vend: the 16 bit PCI Vendor ID
+ * @dev: the 16 bit PCI Device ID
+ * @driver_override: the 32 bit PCI Device override_only
+ *
+ * This macro is used to create a struct pci_device_id that matches only a
+ * driver_override device. The subvendor and subdevice fields will be set to
+ * PCI_ANY_ID.
+ */
+#define PCI_DEVICE_DRIVER_OVERRIDE(vend, dev, driver_override) \
+ .vendor = (vend), .device = (dev), .subvendor = PCI_ANY_ID, \
+ .subdevice = PCI_ANY_ID, .override_only = (driver_override)
+
+/**
+ * PCI_DRIVER_OVERRIDE_DEVICE_VFIO - macro used to describe a VFIO
+ * "driver_override" PCI device.
+ * @vend: the 16 bit PCI Vendor ID
+ * @dev: the 16 bit PCI Device ID
+ *
+ * This macro is used to create a struct pci_device_id that matches a
+ * specific device. The subvendor and subdevice fields will be set to
+ * PCI_ANY_ID and the driver_override will be set to
+ * PCI_ID_F_VFIO_DRIVER_OVERRIDE.
+ */
+#define PCI_DRIVER_OVERRIDE_DEVICE_VFIO(vend, dev) \
+ PCI_DEVICE_DRIVER_OVERRIDE(vend, dev, PCI_ID_F_VFIO_DRIVER_OVERRIDE)
+
+/**
+ * PCI_DEVICE_SUB - macro used to describe a specific PCI device with subsystem
* @vend: the 16 bit PCI Vendor ID
* @dev: the 16 bit PCI Device ID
* @subvend: the 16 bit PCI Subvendor ID
@@ -763,7 +996,7 @@ struct pci_driver {
.subvendor = (subvend), .subdevice = (subdev)
/**
- * PCI_DEVICE_CLASS - macro used to describe a specific pci device class
+ * PCI_DEVICE_CLASS - macro used to describe a specific PCI device class
* @dev_class: the class, subclass, prog-if triple for this device
* @dev_class_mask: the class mask for this device
*
@@ -777,7 +1010,7 @@ struct pci_driver {
.subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID
/**
- * PCI_VDEVICE - macro used to describe a specific pci device in short form
+ * PCI_VDEVICE - macro used to describe a specific PCI device in short form
* @vend: the vendor name
* @dev: the 16 bit PCI Device ID
*
@@ -786,22 +1019,41 @@ struct pci_driver {
* to PCI_ANY_ID. The macro allows the next field to follow as the device
* private data.
*/
-
#define PCI_VDEVICE(vend, dev) \
.vendor = PCI_VENDOR_ID_##vend, .device = (dev), \
.subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, 0, 0
+/**
+ * PCI_DEVICE_DATA - macro used to describe a specific PCI device in very short form
+ * @vend: the vendor name (without PCI_VENDOR_ID_ prefix)
+ * @dev: the device name (without PCI_DEVICE_ID_<vend>_ prefix)
+ * @data: the driver data to be filled
+ *
+ * This macro is used to create a struct pci_device_id that matches a
+ * specific PCI device. The subvendor, and subdevice fields will be set
+ * to PCI_ANY_ID.
+ */
+#define PCI_DEVICE_DATA(vend, dev, data) \
+ .vendor = PCI_VENDOR_ID_##vend, .device = PCI_DEVICE_ID_##vend##_##dev, \
+ .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, 0, 0, \
+ .driver_data = (kernel_ulong_t)(data)
+
enum {
- PCI_REASSIGN_ALL_RSRC = 0x00000001, /* ignore firmware setup */
- PCI_REASSIGN_ALL_BUS = 0x00000002, /* reassign all bus numbers */
- PCI_PROBE_ONLY = 0x00000004, /* use existing setup */
- PCI_CAN_SKIP_ISA_ALIGN = 0x00000008, /* don't do ISA alignment */
- PCI_ENABLE_PROC_DOMAINS = 0x00000010, /* enable domains in /proc */
+ PCI_REASSIGN_ALL_RSRC = 0x00000001, /* Ignore firmware setup */
+ PCI_REASSIGN_ALL_BUS = 0x00000002, /* Reassign all bus numbers */
+ PCI_PROBE_ONLY = 0x00000004, /* Use existing setup */
+ PCI_CAN_SKIP_ISA_ALIGN = 0x00000008, /* Don't do ISA alignment */
+ PCI_ENABLE_PROC_DOMAINS = 0x00000010, /* Enable domains in /proc */
PCI_COMPAT_DOMAIN_0 = 0x00000020, /* ... except domain 0 */
- PCI_SCAN_ALL_PCIE_DEVS = 0x00000040, /* scan all, not just dev 0 */
+ PCI_SCAN_ALL_PCIE_DEVS = 0x00000040, /* Scan all, not just dev 0 */
};
-/* these external functions are only available when PCI support is enabled */
+#define PCI_IRQ_LEGACY (1 << 0) /* Allow legacy interrupts */
+#define PCI_IRQ_MSI (1 << 1) /* Allow MSI interrupts */
+#define PCI_IRQ_MSIX (1 << 2) /* Allow MSI-X interrupts */
+#define PCI_IRQ_AFFINITY (1 << 3) /* Auto-assign affinity */
+
+/* These external functions are only available when PCI support is enabled */
#ifdef CONFIG_PCI
extern unsigned int pci_flags;
@@ -814,11 +1066,11 @@ static inline int pci_has_flag(int flag) { return pci_flags & flag; }
void pcie_bus_configure_settings(struct pci_bus *bus);
enum pcie_bus_config_types {
- PCIE_BUS_TUNE_OFF, /* don't touch MPS at all */
- PCIE_BUS_DEFAULT, /* ensure MPS matches upstream bridge */
- PCIE_BUS_SAFE, /* use largest MPS boot-time devices support */
- PCIE_BUS_PERFORMANCE, /* use MPS and MRRS for best performance */
- PCIE_BUS_PEER2PEER, /* set MPS = 128 for all devices */
+ PCIE_BUS_TUNE_OFF, /* Don't touch MPS at all */
+ PCIE_BUS_DEFAULT, /* Ensure MPS matches upstream bridge */
+ PCIE_BUS_SAFE, /* Use largest MPS boot-time devices support */
+ PCIE_BUS_PERFORMANCE, /* Use MPS and MRRS for best performance */
+ PCIE_BUS_PEER2PEER, /* Set MPS = 128 for all devices */
};
extern enum pcie_bus_config_types pcie_bus_config;
@@ -827,7 +1079,7 @@ extern struct bus_type pci_bus_type;
/* Do NOT directly access these two variables, unless you are arch-specific PCI
* code, or PCI core code. */
-extern struct list_head pci_root_buses; /* list of all known PCI buses */
+extern struct list_head pci_root_buses; /* List of all known PCI buses */
/* Some device drivers need know if PCI is initiated */
int no_pci_devices(void);
@@ -844,9 +1096,8 @@ char *pcibios_setup(char *str);
resource_size_t pcibios_align_resource(void *, const struct resource *,
resource_size_t,
resource_size_t);
-void pcibios_update_irq(struct pci_dev *, int irq);
-/* Weak but can be overriden by arch */
+/* Weak but can be overridden by arch */
void pci_fixup_cardbus(struct pci_bus *);
/* Generic PCI functions used internally */
@@ -862,16 +1113,16 @@ struct pci_bus *pci_scan_bus(int bus, struct pci_ops *ops, void *sysdata);
struct pci_bus *pci_create_root_bus(struct device *parent, int bus,
struct pci_ops *ops, void *sysdata,
struct list_head *resources);
+int pci_host_probe(struct pci_host_bridge *bridge);
int pci_bus_insert_busn_res(struct pci_bus *b, int bus, int busmax);
int pci_bus_update_busn_res_end(struct pci_bus *b, int busmax);
void pci_bus_release_busn_res(struct pci_bus *b);
struct pci_bus *pci_scan_root_bus(struct device *parent, int bus,
- struct pci_ops *ops, void *sysdata,
- struct list_head *resources);
+ struct pci_ops *ops, void *sysdata,
+ struct list_head *resources);
int pci_scan_root_bus_bridge(struct pci_host_bridge *bridge);
struct pci_bus *pci_add_new_bus(struct pci_bus *parent, struct pci_dev *dev,
int busnr);
-void pcie_update_link_speed(struct pci_bus *bus, u16 link_status);
struct pci_slot *pci_create_slot(struct pci_bus *parent, int slot_nr,
const char *name,
struct hotplug_slot *hotplug);
@@ -889,7 +1140,6 @@ void pci_bus_add_device(struct pci_dev *dev);
void pci_read_bridge_bases(struct pci_bus *child);
struct resource *pci_find_parent_resource(const struct pci_dev *dev,
struct resource *res);
-struct pci_dev *pci_find_pcie_root_port(struct pci_dev *dev);
u8 pci_swizzle_interrupt_pin(const struct pci_dev *dev, u8 pin);
int pci_get_interrupt_pin(struct pci_dev *dev, struct pci_dev **bridge);
u8 pci_common_swizzle(struct pci_dev *dev, u8 *pinp);
@@ -908,34 +1158,27 @@ void pci_sort_breadthfirst(void);
/* Generic PCI functions exported to card drivers */
-enum pci_lost_interrupt_reason {
- PCI_LOST_IRQ_NO_INFORMATION = 0,
- PCI_LOST_IRQ_DISABLE_MSI,
- PCI_LOST_IRQ_DISABLE_MSIX,
- PCI_LOST_IRQ_DISABLE_ACPI,
-};
-enum pci_lost_interrupt_reason pci_lost_interrupt(struct pci_dev *dev);
-int pci_find_capability(struct pci_dev *dev, int cap);
-int pci_find_next_capability(struct pci_dev *dev, u8 pos, int cap);
-int pci_find_ext_capability(struct pci_dev *dev, int cap);
-int pci_find_next_ext_capability(struct pci_dev *dev, int pos, int cap);
-int pci_find_ht_capability(struct pci_dev *dev, int ht_cap);
-int pci_find_next_ht_capability(struct pci_dev *dev, int pos, int ht_cap);
+u8 pci_bus_find_capability(struct pci_bus *bus, unsigned int devfn, int cap);
+u8 pci_find_capability(struct pci_dev *dev, int cap);
+u8 pci_find_next_capability(struct pci_dev *dev, u8 pos, int cap);
+u8 pci_find_ht_capability(struct pci_dev *dev, int ht_cap);
+u8 pci_find_next_ht_capability(struct pci_dev *dev, u8 pos, int ht_cap);
+u16 pci_find_ext_capability(struct pci_dev *dev, int cap);
+u16 pci_find_next_ext_capability(struct pci_dev *dev, u16 pos, int cap);
struct pci_bus *pci_find_next_bus(const struct pci_bus *from);
+u16 pci_find_vsec_capability(struct pci_dev *dev, u16 vendor, int cap);
+u16 pci_find_dvsec_capability(struct pci_dev *dev, u16 vendor, u16 dvsec);
+
+u64 pci_get_dsn(struct pci_dev *dev);
struct pci_dev *pci_get_device(unsigned int vendor, unsigned int device,
- struct pci_dev *from);
+ struct pci_dev *from);
struct pci_dev *pci_get_subsys(unsigned int vendor, unsigned int device,
- unsigned int ss_vendor, unsigned int ss_device,
- struct pci_dev *from);
+ unsigned int ss_vendor, unsigned int ss_device,
+ struct pci_dev *from);
struct pci_dev *pci_get_slot(struct pci_bus *bus, unsigned int devfn);
struct pci_dev *pci_get_domain_bus_and_slot(int domain, unsigned int bus,
unsigned int devfn);
-static inline struct pci_dev *pci_get_bus_and_slot(unsigned int bus,
- unsigned int devfn)
-{
- return pci_get_domain_bus_and_slot(0, bus, devfn);
-}
struct pci_dev *pci_get_class(unsigned int class, struct pci_dev *from);
int pci_dev_present(const struct pci_device_id *ids);
@@ -1003,7 +1246,7 @@ static inline int pcie_capability_clear_dword(struct pci_dev *dev, int pos,
return pcie_capability_clear_and_set_dword(dev, pos, clear, 0);
}
-/* user-space driven config access */
+/* User-space driven config access */
int pci_user_read_config_byte(struct pci_dev *dev, int where, u8 *val);
int pci_user_read_config_word(struct pci_dev *dev, int where, u16 *val);
int pci_user_read_config_dword(struct pci_dev *dev, int where, u32 *val);
@@ -1045,10 +1288,11 @@ void pci_clear_master(struct pci_dev *dev);
int pci_set_pcie_reset_state(struct pci_dev *dev, enum pcie_reset_state state);
int pci_set_cacheline_size(struct pci_dev *dev);
-#define HAVE_PCI_SET_MWI
int __must_check pci_set_mwi(struct pci_dev *dev);
+int __must_check pcim_set_mwi(struct pci_dev *dev);
int pci_try_set_mwi(struct pci_dev *dev);
void pci_clear_mwi(struct pci_dev *dev);
+void pci_disable_parity(struct pci_dev *dev);
void pci_intx(struct pci_dev *dev, int enable);
bool pci_check_and_mask_intx(struct pci_dev *dev);
bool pci_check_and_unmask_intx(struct pci_dev *dev);
@@ -1061,28 +1305,40 @@ int pcie_get_readrq(struct pci_dev *dev);
int pcie_set_readrq(struct pci_dev *dev, int rq);
int pcie_get_mps(struct pci_dev *dev);
int pcie_set_mps(struct pci_dev *dev, int mps);
-int pcie_get_minimum_link(struct pci_dev *dev, enum pci_bus_speed *speed,
- enum pcie_link_width *width);
-void pcie_flr(struct pci_dev *dev);
-int __pci_reset_function(struct pci_dev *dev);
+u32 pcie_bandwidth_available(struct pci_dev *dev, struct pci_dev **limiting_dev,
+ enum pci_bus_speed *speed,
+ enum pcie_link_width *width);
+void pcie_print_link_status(struct pci_dev *dev);
+int pcie_reset_flr(struct pci_dev *dev, bool probe);
+int pcie_flr(struct pci_dev *dev);
int __pci_reset_function_locked(struct pci_dev *dev);
int pci_reset_function(struct pci_dev *dev);
+int pci_reset_function_locked(struct pci_dev *dev);
int pci_try_reset_function(struct pci_dev *dev);
int pci_probe_reset_slot(struct pci_slot *slot);
-int pci_reset_slot(struct pci_slot *slot);
-int pci_try_reset_slot(struct pci_slot *slot);
int pci_probe_reset_bus(struct pci_bus *bus);
-int pci_reset_bus(struct pci_bus *bus);
-int pci_try_reset_bus(struct pci_bus *bus);
+int pci_reset_bus(struct pci_dev *dev);
void pci_reset_secondary_bus(struct pci_dev *dev);
void pcibios_reset_secondary_bus(struct pci_dev *dev);
-void pci_reset_bridge_secondary_bus(struct pci_dev *dev);
void pci_update_resource(struct pci_dev *dev, int resno);
int __must_check pci_assign_resource(struct pci_dev *dev, int i);
int __must_check pci_reassign_resource(struct pci_dev *dev, int i, resource_size_t add_size, resource_size_t align);
+void pci_release_resource(struct pci_dev *dev, int resno);
+static inline int pci_rebar_bytes_to_size(u64 bytes)
+{
+ bytes = roundup_pow_of_two(bytes);
+
+ /* Return BAR size as defined in the resizable BAR specification */
+ return max(ilog2(bytes), 20) - 20;
+}
+
+u32 pci_rebar_get_possible_sizes(struct pci_dev *pdev, int bar);
+int __must_check pci_resize_resource(struct pci_dev *dev, int i, int size);
int pci_select_bars(struct pci_dev *dev, unsigned long flags);
bool pci_device_is_present(struct pci_dev *pdev);
void pci_ignore_hotplug(struct pci_dev *dev);
+struct pci_dev *pci_real_dma_dev(struct pci_dev *dev);
+int pci_status_get_and_clear_errors(struct pci_dev *pdev);
int __printf(6, 7) pci_request_irq(struct pci_dev *dev, unsigned int nr,
irq_handler_t handler, irq_handler_t thread_fn, void *dev_id,
@@ -1094,8 +1350,6 @@ int pci_enable_rom(struct pci_dev *pdev);
void pci_disable_rom(struct pci_dev *pdev);
void __iomem __must_check *pci_map_rom(struct pci_dev *pdev, size_t *size);
void pci_unmap_rom(struct pci_dev *pdev, void __iomem *rom);
-size_t pci_get_rom_size(struct pci_dev *pdev, void __iomem *rom, size_t size);
-void __iomem __must_check *pci_platform_rom(struct pci_dev *pdev, size_t *size);
/* Power management related routines */
int pci_save_state(struct pci_dev *dev);
@@ -1105,13 +1359,7 @@ int pci_load_saved_state(struct pci_dev *dev,
struct pci_saved_state *state);
int pci_load_and_free_saved_state(struct pci_dev *dev,
struct pci_saved_state **state);
-struct pci_cap_saved_state *pci_find_saved_cap(struct pci_dev *dev, char cap);
-struct pci_cap_saved_state *pci_find_saved_ext_cap(struct pci_dev *dev,
- u16 cap);
-int pci_add_cap_save_buffer(struct pci_dev *dev, char cap, unsigned int size);
-int pci_add_ext_cap_save_buffer(struct pci_dev *dev,
- u16 cap, unsigned int size);
-int __pci_complete_power_transition(struct pci_dev *dev, pci_power_t state);
+int pci_platform_power_transition(struct pci_dev *dev, pci_power_t state);
int pci_set_power_state(struct pci_dev *dev, pci_power_t state);
pci_power_t pci_choose_state(struct pci_dev *dev, pm_message_t state);
bool pci_pme_capable(struct pci_dev *dev, pci_power_t state);
@@ -1121,31 +1369,27 @@ int pci_wake_from_d3(struct pci_dev *dev, bool enable);
int pci_prepare_to_sleep(struct pci_dev *dev);
int pci_back_from_sleep(struct pci_dev *dev);
bool pci_dev_run_wake(struct pci_dev *dev);
-bool pci_check_pme_status(struct pci_dev *dev);
-void pci_pme_wakeup_bus(struct pci_bus *bus);
void pci_d3cold_enable(struct pci_dev *dev);
void pci_d3cold_disable(struct pci_dev *dev);
-
-/* PCI Virtual Channel */
-int pci_save_vc_state(struct pci_dev *dev);
-void pci_restore_vc_state(struct pci_dev *dev);
-void pci_allocate_vc_save_buffers(struct pci_dev *dev);
+bool pcie_relaxed_ordering_enabled(struct pci_dev *dev);
+void pci_resume_bus(struct pci_bus *bus);
+void pci_bus_set_current_state(struct pci_bus *bus, pci_power_t state);
/* For use by arch with custom probe code */
void set_pcie_port_type(struct pci_dev *pdev);
void set_pcie_hotplug_bridge(struct pci_dev *pdev);
/* Functions for PCI Hotplug drivers to use */
-int pci_bus_find_capability(struct pci_bus *bus, unsigned int devfn, int cap);
unsigned int pci_rescan_bus_bridge_resize(struct pci_dev *bridge);
unsigned int pci_rescan_bus(struct pci_bus *bus);
void pci_lock_rescan_remove(void);
void pci_unlock_rescan_remove(void);
-/* Vital product data routines */
+/* Vital Product Data routines */
ssize_t pci_read_vpd(struct pci_dev *dev, loff_t pos, size_t count, void *buf);
ssize_t pci_write_vpd(struct pci_dev *dev, loff_t pos, size_t count, const void *buf);
-int pci_set_vpd_size(struct pci_dev *dev, size_t len);
+ssize_t pci_read_vpd_any(struct pci_dev *dev, loff_t pos, size_t count, void *buf);
+ssize_t pci_write_vpd_any(struct pci_dev *dev, loff_t pos, size_t count, const void *buf);
/* Helper functions for low-level code (drivers/pci/setup-[bus,res].c) */
resource_size_t pcibios_retrieve_fw_addr(struct pci_dev *dev, int idx);
@@ -1158,10 +1402,9 @@ void pci_assign_unassigned_resources(void);
void pci_assign_unassigned_bridge_resources(struct pci_dev *bridge);
void pci_assign_unassigned_bus_resources(struct pci_bus *bus);
void pci_assign_unassigned_root_bus_resources(struct pci_bus *bus);
+int pci_reassign_bridge_resources(struct pci_dev *bridge, unsigned long type);
void pdev_enable_device(struct pci_dev *);
int pci_enable_resources(struct pci_dev *, int mask);
-void pci_fixup_irqs(u8 (*)(struct pci_dev *, u8 *),
- int (*)(const struct pci_dev *, u8, u8));
void pci_assign_irq(struct pci_dev *dev);
struct resource *pci_find_resource(struct pci_dev *dev, struct resource *res);
#define HAVE_PCI_REQ_REGIONS 2
@@ -1169,15 +1412,27 @@ int __must_check pci_request_regions(struct pci_dev *, const char *);
int __must_check pci_request_regions_exclusive(struct pci_dev *, const char *);
void pci_release_regions(struct pci_dev *);
int __must_check pci_request_region(struct pci_dev *, int, const char *);
-int __must_check pci_request_region_exclusive(struct pci_dev *, int, const char *);
void pci_release_region(struct pci_dev *, int);
int pci_request_selected_regions(struct pci_dev *, int, const char *);
int pci_request_selected_regions_exclusive(struct pci_dev *, int, const char *);
void pci_release_selected_regions(struct pci_dev *, int);
+static inline __must_check struct resource *
+pci_request_config_region_exclusive(struct pci_dev *pdev, unsigned int offset,
+ unsigned int len, const char *name)
+{
+ return __request_region(&pdev->driver_exclusive_resource, offset, len,
+ name, IORESOURCE_EXCLUSIVE);
+}
+
+static inline void pci_release_config_region(struct pci_dev *pdev,
+ unsigned int offset,
+ unsigned int len)
+{
+ __release_region(&pdev->driver_exclusive_resource, offset, len);
+}
+
/* drivers/pci/bus.c */
-struct pci_bus *pci_bus_get(struct pci_bus *bus);
-void pci_bus_put(struct pci_bus *bus);
void pci_add_resource(struct list_head *resources, struct resource *res);
void pci_add_resource_offset(struct list_head *resources, struct resource *res,
resource_size_t offset);
@@ -1186,13 +1441,51 @@ void pci_bus_add_resource(struct pci_bus *bus, struct resource *res,
unsigned int flags);
struct resource *pci_bus_resource_n(const struct pci_bus *bus, int n);
void pci_bus_remove_resources(struct pci_bus *bus);
+void pci_bus_remove_resource(struct pci_bus *bus, struct resource *res);
int devm_request_pci_bus_resources(struct device *dev,
struct list_head *resources);
-#define pci_bus_for_each_resource(bus, res, i) \
- for (i = 0; \
- (res = pci_bus_resource_n(bus, i)) || i < PCI_BRIDGE_RESOURCE_NUM; \
- i++)
+/* Temporary until new and working PCI SBR API in place */
+int pci_bridge_secondary_bus_reset(struct pci_dev *dev);
+
+#define __pci_bus_for_each_res0(bus, res, ...) \
+ for (unsigned int __b = 0; \
+ (res = pci_bus_resource_n(bus, __b)) || __b < PCI_BRIDGE_RESOURCE_NUM; \
+ __b++)
+
+#define __pci_bus_for_each_res1(bus, res, __b) \
+ for (__b = 0; \
+ (res = pci_bus_resource_n(bus, __b)) || __b < PCI_BRIDGE_RESOURCE_NUM; \
+ __b++)
+
+/**
+ * pci_bus_for_each_resource - iterate over PCI bus resources
+ * @bus: the PCI bus
+ * @res: pointer to the current resource
+ * @...: optional index of the current resource
+ *
+ * Iterate over PCI bus resources. The first part is to go over PCI bus
+ * resource array, which has at most the %PCI_BRIDGE_RESOURCE_NUM entries.
+ * After that continue with the separate list of the additional resources,
+ * if not empty. That's why the Logical OR is being used.
+ *
+ * Possible usage:
+ *
+ * struct pci_bus *bus = ...;
+ * struct resource *res;
+ * unsigned int i;
+ *
+ * // With optional index
+ * pci_bus_for_each_resource(bus, res, i)
+ * pr_info("PCI bus resource[%u]: %pR\n", i, res);
+ *
+ * // Without index
+ * pci_bus_for_each_resource(bus, res)
+ * _do_something_(res);
+ */
+#define pci_bus_for_each_resource(bus, res, ...) \
+ CONCATENATE(__pci_bus_for_each_res, COUNT_ARGS(__VA_ARGS__)) \
+ (bus, res, __VA_ARGS__)
int __must_check pci_bus_alloc_resource(struct pci_bus *bus,
struct resource *res, resource_size_t size,
@@ -1205,10 +1498,13 @@ int __must_check pci_bus_alloc_resource(struct pci_bus *bus,
void *alignf_data);
-int pci_register_io_range(phys_addr_t addr, resource_size_t size);
+int pci_register_io_range(struct fwnode_handle *fwnode, phys_addr_t addr,
+ resource_size_t size);
unsigned long pci_address_to_pio(phys_addr_t addr);
phys_addr_t pci_pio_to_address(unsigned long pio);
int pci_remap_iospace(const struct resource *res, phys_addr_t phys_addr);
+int devm_pci_remap_iospace(struct device *dev, const struct resource *res,
+ phys_addr_t phys_addr);
void pci_unmap_iospace(struct resource *res);
void __iomem *devm_pci_remap_cfgspace(struct device *dev,
resource_size_t offset,
@@ -1228,9 +1524,7 @@ static inline pci_bus_addr_t pci_bus_address(struct pci_dev *pdev, int bar)
int __must_check __pci_register_driver(struct pci_driver *, struct module *,
const char *mod_name);
-/*
- * pci_register_driver must be a macro so that KBUILD_MODNAME can be expanded
- */
+/* pci_register_driver() must be a macro so KBUILD_MODNAME can be expanded */
#define pci_register_driver(driver) \
__pci_register_driver(driver, THIS_MODULE, KBUILD_MODNAME)
@@ -1245,8 +1539,7 @@ void pci_unregister_driver(struct pci_driver *dev);
* use this macro once, and calling it replaces module_init() and module_exit()
*/
#define module_pci_driver(__pci_driver) \
- module_driver(__pci_driver, pci_register_driver, \
- pci_unregister_driver)
+ module_driver(__pci_driver, pci_register_driver, pci_unregister_driver)
/**
* builtin_pci_driver() - Helper macro for registering a PCI driver
@@ -1277,7 +1570,6 @@ unsigned char pci_bus_max_busnr(struct pci_bus *bus);
void pci_setup_bridge(struct pci_bus *bus);
resource_size_t pcibios_window_alignment(struct pci_bus *bus,
unsigned long type);
-resource_size_t pcibios_iov_resource_alignment(struct pci_dev *dev, int resno);
#define PCI_VGA_STATE_CHANGE_BRIDGE (1 << 0)
#define PCI_VGA_STATE_CHANGE_DECODES (1 << 1)
@@ -1285,30 +1577,22 @@ resource_size_t pcibios_iov_resource_alignment(struct pci_dev *dev, int resno);
int pci_set_vga_state(struct pci_dev *pdev, bool decode,
unsigned int command_bits, u32 flags);
-#define PCI_IRQ_LEGACY (1 << 0) /* allow legacy interrupts */
-#define PCI_IRQ_MSI (1 << 1) /* allow MSI interrupts */
-#define PCI_IRQ_MSIX (1 << 2) /* allow MSI-X interrupts */
-#define PCI_IRQ_AFFINITY (1 << 3) /* auto-assign affinity */
+/*
+ * Virtual interrupts allow for more interrupts to be allocated
+ * than the device has interrupts for. These are not programmed
+ * into the device's MSI-X table and must be handled by some
+ * other driver means.
+ */
+#define PCI_IRQ_VIRTUAL (1 << 4)
+
#define PCI_IRQ_ALL_TYPES \
(PCI_IRQ_LEGACY | PCI_IRQ_MSI | PCI_IRQ_MSIX)
-/* kmem_cache style wrapper around pci_alloc_consistent() */
-
-#include <linux/pci-dma.h>
#include <linux/dmapool.h>
-#define pci_pool dma_pool
-#define pci_pool_create(name, pdev, size, align, allocation) \
- dma_pool_create(name, &pdev->dev, size, align, allocation)
-#define pci_pool_destroy(pool) dma_pool_destroy(pool)
-#define pci_pool_alloc(pool, flags, handle) dma_pool_alloc(pool, flags, handle)
-#define pci_pool_zalloc(pool, flags, handle) \
- dma_pool_zalloc(pool, flags, handle)
-#define pci_pool_free(pool, vaddr, addr) dma_pool_free(pool, vaddr, addr)
-
struct msix_entry {
- u32 vector; /* kernel uses to write allocated vector */
- u16 entry; /* driver uses to specify entry, OS writes */
+ u32 vector; /* Kernel uses to write allocated vector */
+ u16 entry; /* Driver uses to specify entry, OS writes */
};
#ifdef CONFIG_PCI_MSI
@@ -1329,14 +1613,20 @@ static inline int pci_enable_msix_exact(struct pci_dev *dev,
return rc;
return 0;
}
+int pci_alloc_irq_vectors(struct pci_dev *dev, unsigned int min_vecs,
+ unsigned int max_vecs, unsigned int flags);
int pci_alloc_irq_vectors_affinity(struct pci_dev *dev, unsigned int min_vecs,
unsigned int max_vecs, unsigned int flags,
- const struct irq_affinity *affd);
+ struct irq_affinity *affd);
+
+bool pci_msix_can_alloc_dyn(struct pci_dev *dev);
+struct msi_map pci_msix_alloc_irq_at(struct pci_dev *dev, unsigned int index,
+ const struct irq_affinity_desc *affdesc);
+void pci_msix_free_irq(struct pci_dev *pdev, struct msi_map map);
void pci_free_irq_vectors(struct pci_dev *dev);
int pci_irq_vector(struct pci_dev *dev, unsigned int nr);
const struct cpumask *pci_irq_get_affinity(struct pci_dev *pdev, int vec);
-int pci_irq_get_node(struct pci_dev *pdev, int vec);
#else
static inline int pci_msi_vec_count(struct pci_dev *dev) { return -ENOSYS; }
@@ -1348,21 +1638,42 @@ static inline int pci_msi_enabled(void) { return 0; }
static inline int pci_enable_msi(struct pci_dev *dev)
{ return -ENOSYS; }
static inline int pci_enable_msix_range(struct pci_dev *dev,
- struct msix_entry *entries, int minvec, int maxvec)
+ struct msix_entry *entries, int minvec, int maxvec)
{ return -ENOSYS; }
static inline int pci_enable_msix_exact(struct pci_dev *dev,
- struct msix_entry *entries, int nvec)
+ struct msix_entry *entries, int nvec)
{ return -ENOSYS; }
static inline int
pci_alloc_irq_vectors_affinity(struct pci_dev *dev, unsigned int min_vecs,
unsigned int max_vecs, unsigned int flags,
- const struct irq_affinity *aff_desc)
+ struct irq_affinity *aff_desc)
{
if ((flags & PCI_IRQ_LEGACY) && min_vecs == 1 && dev->irq)
return 1;
return -ENOSPC;
}
+static inline int
+pci_alloc_irq_vectors(struct pci_dev *dev, unsigned int min_vecs,
+ unsigned int max_vecs, unsigned int flags)
+{
+ return pci_alloc_irq_vectors_affinity(dev, min_vecs, max_vecs,
+ flags, NULL);
+}
+
+static inline bool pci_msix_can_alloc_dyn(struct pci_dev *dev)
+{ return false; }
+static inline struct msi_map pci_msix_alloc_irq_at(struct pci_dev *dev, unsigned int index,
+ const struct irq_affinity_desc *affdesc)
+{
+ struct msi_map map = { .index = -ENOSYS, };
+
+ return map;
+}
+
+static inline void pci_msix_free_irq(struct pci_dev *pdev, struct msi_map map)
+{
+}
static inline void pci_free_irq_vectors(struct pci_dev *dev)
{
@@ -1379,83 +1690,107 @@ static inline const struct cpumask *pci_irq_get_affinity(struct pci_dev *pdev,
{
return cpu_possible_mask;
}
-
-static inline int pci_irq_get_node(struct pci_dev *pdev, int vec)
-{
- return first_online_node;
-}
#endif
-static inline int
-pci_alloc_irq_vectors(struct pci_dev *dev, unsigned int min_vecs,
- unsigned int max_vecs, unsigned int flags)
+/**
+ * pci_irqd_intx_xlate() - Translate PCI INTx value to an IRQ domain hwirq
+ * @d: the INTx IRQ domain
+ * @node: the DT node for the device whose interrupt we're translating
+ * @intspec: the interrupt specifier data from the DT
+ * @intsize: the number of entries in @intspec
+ * @out_hwirq: pointer at which to write the hwirq number
+ * @out_type: pointer at which to write the interrupt type
+ *
+ * Translate a PCI INTx interrupt number from device tree in the range 1-4, as
+ * stored in the standard PCI_INTERRUPT_PIN register, to a value in the range
+ * 0-3 suitable for use in a 4 entry IRQ domain. That is, subtract one from the
+ * INTx value to obtain the hwirq number.
+ *
+ * Returns 0 on success, or -EINVAL if the interrupt specifier is out of range.
+ */
+static inline int pci_irqd_intx_xlate(struct irq_domain *d,
+ struct device_node *node,
+ const u32 *intspec,
+ unsigned int intsize,
+ unsigned long *out_hwirq,
+ unsigned int *out_type)
{
- return pci_alloc_irq_vectors_affinity(dev, min_vecs, max_vecs, flags,
- NULL);
+ const u32 intx = intspec[0];
+
+ if (intx < PCI_INTERRUPT_INTA || intx > PCI_INTERRUPT_INTD)
+ return -EINVAL;
+
+ *out_hwirq = intx - PCI_INTERRUPT_INTA;
+ return 0;
}
#ifdef CONFIG_PCIEPORTBUS
extern bool pcie_ports_disabled;
-extern bool pcie_ports_auto;
+extern bool pcie_ports_native;
#else
#define pcie_ports_disabled true
-#define pcie_ports_auto false
+#define pcie_ports_native false
#endif
+#define PCIE_LINK_STATE_L0S BIT(0)
+#define PCIE_LINK_STATE_L1 BIT(1)
+#define PCIE_LINK_STATE_CLKPM BIT(2)
+#define PCIE_LINK_STATE_L1_1 BIT(3)
+#define PCIE_LINK_STATE_L1_2 BIT(4)
+#define PCIE_LINK_STATE_L1_1_PCIPM BIT(5)
+#define PCIE_LINK_STATE_L1_2_PCIPM BIT(6)
+#define PCIE_LINK_STATE_ALL (PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 |\
+ PCIE_LINK_STATE_CLKPM | PCIE_LINK_STATE_L1_1 |\
+ PCIE_LINK_STATE_L1_2 | PCIE_LINK_STATE_L1_1_PCIPM |\
+ PCIE_LINK_STATE_L1_2_PCIPM)
+
#ifdef CONFIG_PCIEASPM
+int pci_disable_link_state(struct pci_dev *pdev, int state);
+int pci_disable_link_state_locked(struct pci_dev *pdev, int state);
+int pci_enable_link_state(struct pci_dev *pdev, int state);
+void pcie_no_aspm(void);
bool pcie_aspm_support_enabled(void);
+bool pcie_aspm_enabled(struct pci_dev *pdev);
#else
+static inline int pci_disable_link_state(struct pci_dev *pdev, int state)
+{ return 0; }
+static inline int pci_disable_link_state_locked(struct pci_dev *pdev, int state)
+{ return 0; }
+static inline int pci_enable_link_state(struct pci_dev *pdev, int state)
+{ return 0; }
+static inline void pcie_no_aspm(void) { }
static inline bool pcie_aspm_support_enabled(void) { return false; }
+static inline bool pcie_aspm_enabled(struct pci_dev *pdev) { return false; }
#endif
#ifdef CONFIG_PCIEAER
-void pci_no_aer(void);
bool pci_aer_available(void);
-int pci_aer_init(struct pci_dev *dev);
#else
-static inline void pci_no_aer(void) { }
static inline bool pci_aer_available(void) { return false; }
-static inline int pci_aer_init(struct pci_dev *d) { return -ENODEV; }
-#endif
-
-#ifdef CONFIG_PCIE_ECRC
-void pcie_set_ecrc_checking(struct pci_dev *dev);
-void pcie_ecrc_get_policy(char *str);
-#else
-static inline void pcie_set_ecrc_checking(struct pci_dev *dev) { }
-static inline void pcie_ecrc_get_policy(char *str) { }
#endif
-#ifdef CONFIG_HT_IRQ
-/* The functions a driver should call */
-int ht_create_irq(struct pci_dev *dev, int idx);
-void ht_destroy_irq(unsigned int irq);
-#endif /* CONFIG_HT_IRQ */
-
-#ifdef CONFIG_PCI_ATS
-/* Address Translation Service */
-void pci_ats_init(struct pci_dev *dev);
-int pci_enable_ats(struct pci_dev *dev, int ps);
-void pci_disable_ats(struct pci_dev *dev);
-int pci_ats_queue_depth(struct pci_dev *dev);
-#else
-static inline void pci_ats_init(struct pci_dev *d) { }
-static inline int pci_enable_ats(struct pci_dev *d, int ps) { return -ENODEV; }
-static inline void pci_disable_ats(struct pci_dev *d) { }
-static inline int pci_ats_queue_depth(struct pci_dev *d) { return -ENODEV; }
-#endif
+bool pci_ats_disabled(void);
#ifdef CONFIG_PCIE_PTM
int pci_enable_ptm(struct pci_dev *dev, u8 *granularity);
+void pci_disable_ptm(struct pci_dev *dev);
+bool pcie_ptm_enabled(struct pci_dev *dev);
#else
static inline int pci_enable_ptm(struct pci_dev *dev, u8 *granularity)
{ return -EINVAL; }
+static inline void pci_disable_ptm(struct pci_dev *dev) { }
+static inline bool pcie_ptm_enabled(struct pci_dev *dev)
+{ return false; }
#endif
void pci_cfg_access_lock(struct pci_dev *dev);
bool pci_cfg_access_trylock(struct pci_dev *dev);
void pci_cfg_access_unlock(struct pci_dev *dev);
+void pci_dev_lock(struct pci_dev *dev);
+int pci_dev_trylock(struct pci_dev *dev);
+void pci_dev_unlock(struct pci_dev *dev);
+
/*
* PCI domain support. Sometimes called PCI segment (eg by ACPI),
* a PCI domain is defined to be a set of PCI buses which share
@@ -1463,12 +1798,10 @@ void pci_cfg_access_unlock(struct pci_dev *dev);
*/
#ifdef CONFIG_PCI_DOMAINS
extern int pci_domains_supported;
-int pci_get_new_domain_nr(void);
#else
enum { pci_domains_supported = 0 };
static inline int pci_domain_nr(struct pci_bus *bus) { return 0; }
static inline int pci_proc_domain(struct pci_bus *bus) { return 0; }
-static inline int pci_get_new_domain_nr(void) { return -ENOSYS; }
#endif /* CONFIG_PCI_DOMAINS */
/*
@@ -1488,11 +1821,12 @@ static inline int acpi_pci_bus_find_domain_nr(struct pci_bus *bus)
{ return 0; }
#endif
int pci_bus_find_domain_nr(struct pci_bus *bus, struct device *parent);
+void pci_bus_release_domain_nr(struct pci_bus *bus, struct device *parent);
#endif
-/* some architectures require additional setup to direct VGA traffic */
+/* Some architectures require additional setup to direct VGA traffic */
typedef int (*arch_set_vga_state_t)(struct pci_dev *pdev, bool decode,
- unsigned int command_bits, u32 flags);
+ unsigned int command_bits, u32 flags);
void pci_register_set_vga_state(arch_set_vga_state_t func);
static inline int
@@ -1531,10 +1865,9 @@ static inline void pci_clear_flags(int flags) { }
static inline int pci_has_flag(int flag) { return 0; }
/*
- * If the system does not have PCI, clearly these return errors. Define
- * these as simple inline functions to avoid hair in drivers.
+ * If the system does not have PCI, clearly these return errors. Define
+ * these as simple inline functions to avoid hair in drivers.
*/
-
#define _PCI_NOP(o, s, t) \
static inline int pci_##o##_config_##s(struct pci_dev *dev, \
int where, t val) \
@@ -1562,22 +1895,27 @@ static inline struct pci_dev *pci_get_class(unsigned int class,
struct pci_dev *from)
{ return NULL; }
-#define pci_dev_present(ids) (0)
+
+static inline int pci_dev_present(const struct pci_device_id *ids)
+{ return 0; }
+
#define no_pci_devices() (1)
#define pci_dev_put(dev) do { } while (0)
static inline void pci_set_master(struct pci_dev *dev) { }
static inline int pci_enable_device(struct pci_dev *dev) { return -EIO; }
static inline void pci_disable_device(struct pci_dev *dev) { }
+static inline int pcim_enable_device(struct pci_dev *pdev) { return -EIO; }
static inline int pci_assign_resource(struct pci_dev *dev, int i)
{ return -EBUSY; }
-static inline int __pci_register_driver(struct pci_driver *drv,
- struct module *owner)
+static inline int __must_check __pci_register_driver(struct pci_driver *drv,
+ struct module *owner,
+ const char *mod_name)
{ return 0; }
static inline int pci_register_driver(struct pci_driver *drv)
{ return 0; }
static inline void pci_unregister_driver(struct pci_driver *drv) { }
-static inline int pci_find_capability(struct pci_dev *dev, int cap)
+static inline u8 pci_find_capability(struct pci_dev *dev, int cap)
{ return 0; }
static inline int pci_find_next_capability(struct pci_dev *dev, u8 post,
int cap)
@@ -1585,6 +1923,9 @@ static inline int pci_find_next_capability(struct pci_dev *dev, u8 post,
static inline int pci_find_ext_capability(struct pci_dev *dev, int cap)
{ return 0; }
+static inline u64 pci_get_dsn(struct pci_dev *dev)
+{ return 0; }
+
/* Power management related routines */
static inline int pci_save_state(struct pci_dev *dev) { return 0; }
static inline void pci_restore_state(struct pci_dev *dev) { }
@@ -1606,52 +1947,73 @@ static inline int pci_request_regions(struct pci_dev *dev, const char *res_name)
{ return -EIO; }
static inline void pci_release_regions(struct pci_dev *dev) { }
-static inline unsigned long pci_address_to_pio(phys_addr_t addr) { return -1; }
+static inline int pci_register_io_range(struct fwnode_handle *fwnode,
+ phys_addr_t addr, resource_size_t size)
+{ return -EINVAL; }
-static inline void pci_block_cfg_access(struct pci_dev *dev) { }
-static inline int pci_block_cfg_access_in_atomic(struct pci_dev *dev)
-{ return 0; }
-static inline void pci_unblock_cfg_access(struct pci_dev *dev) { }
+static inline unsigned long pci_address_to_pio(phys_addr_t addr) { return -1; }
static inline struct pci_bus *pci_find_next_bus(const struct pci_bus *from)
{ return NULL; }
static inline struct pci_dev *pci_get_slot(struct pci_bus *bus,
unsigned int devfn)
{ return NULL; }
-static inline struct pci_dev *pci_get_bus_and_slot(unsigned int bus,
- unsigned int devfn)
+static inline struct pci_dev *pci_get_domain_bus_and_slot(int domain,
+ unsigned int bus, unsigned int devfn)
{ return NULL; }
static inline int pci_domain_nr(struct pci_bus *bus) { return 0; }
static inline struct pci_dev *pci_dev_get(struct pci_dev *dev) { return NULL; }
-static inline int pci_get_new_domain_nr(void) { return -ENOSYS; }
#define dev_is_pci(d) (false)
#define dev_is_pf(d) (false)
+static inline bool pci_acs_enabled(struct pci_dev *pdev, u16 acs_flags)
+{ return false; }
+static inline int pci_irqd_intx_xlate(struct irq_domain *d,
+ struct device_node *node,
+ const u32 *intspec,
+ unsigned int intsize,
+ unsigned long *out_hwirq,
+ unsigned int *out_type)
+{ return -EINVAL; }
+
+static inline const struct pci_device_id *pci_match_id(const struct pci_device_id *ids,
+ struct pci_dev *dev)
+{ return NULL; }
+static inline bool pci_ats_disabled(void) { return true; }
+
+static inline int pci_irq_vector(struct pci_dev *dev, unsigned int nr)
+{
+ return -EINVAL;
+}
+
+static inline int
+pci_alloc_irq_vectors_affinity(struct pci_dev *dev, unsigned int min_vecs,
+ unsigned int max_vecs, unsigned int flags,
+ struct irq_affinity *aff_desc)
+{
+ return -ENOSPC;
+}
+static inline int
+pci_alloc_irq_vectors(struct pci_dev *dev, unsigned int min_vecs,
+ unsigned int max_vecs, unsigned int flags)
+{
+ return -ENOSPC;
+}
#endif /* CONFIG_PCI */
/* Include architecture-dependent settings and functions */
#include <asm/pci.h>
-/* These two functions provide almost identical functionality. Depennding
- * on the architecture, one will be implemented as a wrapper around the
- * other (in drivers/pci/mmap.c).
- *
+/*
* pci_mmap_resource_range() maps a specific BAR, and vm->vm_pgoff
* is expected to be an offset within that region.
*
- * pci_mmap_page_range() is the legacy architecture-specific interface,
- * which accepts a "user visible" resource address converted by
- * pci_resource_to_user(), as used in the legacy mmap() interface in
- * /proc/bus/pci/.
*/
int pci_mmap_resource_range(struct pci_dev *dev, int bar,
struct vm_area_struct *vma,
enum pci_mmap_state mmap_state, int write_combine);
-int pci_mmap_page_range(struct pci_dev *pdev, int bar,
- struct vm_area_struct *vma,
- enum pci_mmap_state mmap_state, int write_combine);
#ifndef arch_can_pci_mmap_wc
#define arch_can_pci_mmap_wc() 0
@@ -1668,20 +2030,34 @@ int pci_iobar_pfn(struct pci_dev *pdev, int bar, struct vm_area_struct *vma);
#define pci_root_bus_fwnode(bus) NULL
#endif
-/* these helpers provide future and backwards compatibility
- * for accessing popular PCI BAR info */
-#define pci_resource_start(dev, bar) ((dev)->resource[(bar)].start)
-#define pci_resource_end(dev, bar) ((dev)->resource[(bar)].end)
-#define pci_resource_flags(dev, bar) ((dev)->resource[(bar)].flags)
-#define pci_resource_len(dev,bar) \
- ((pci_resource_start((dev), (bar)) == 0 && \
- pci_resource_end((dev), (bar)) == \
- pci_resource_start((dev), (bar))) ? 0 : \
- \
- (pci_resource_end((dev), (bar)) - \
- pci_resource_start((dev), (bar)) + 1))
-
-/* Similar to the helpers above, these manipulate per-pci_dev
+/*
+ * These helpers provide future and backwards compatibility
+ * for accessing popular PCI BAR info
+ */
+#define pci_resource_n(dev, bar) (&(dev)->resource[(bar)])
+#define pci_resource_start(dev, bar) (pci_resource_n(dev, bar)->start)
+#define pci_resource_end(dev, bar) (pci_resource_n(dev, bar)->end)
+#define pci_resource_flags(dev, bar) (pci_resource_n(dev, bar)->flags)
+#define pci_resource_len(dev,bar) \
+ (pci_resource_end((dev), (bar)) ? \
+ resource_size(pci_resource_n((dev), (bar))) : 0)
+
+#define __pci_dev_for_each_res0(dev, res, ...) \
+ for (unsigned int __b = 0; \
+ res = pci_resource_n(dev, __b), __b < PCI_NUM_RESOURCES; \
+ __b++)
+
+#define __pci_dev_for_each_res1(dev, res, __b) \
+ for (__b = 0; \
+ res = pci_resource_n(dev, __b), __b < PCI_NUM_RESOURCES; \
+ __b++)
+
+#define pci_dev_for_each_resource(dev, res, ...) \
+ CONCATENATE(__pci_dev_for_each_res, COUNT_ARGS(__VA_ARGS__)) \
+ (dev, res, __VA_ARGS__)
+
+/*
+ * Similar to the helpers above, these manipulate per-pci_dev
* driver-specific data. They are really just a wrapper around
* the generic device structure functions of these calls.
*/
@@ -1695,46 +2071,32 @@ static inline void pci_set_drvdata(struct pci_dev *pdev, void *data)
dev_set_drvdata(&pdev->dev, data);
}
-/* If you want to know what to call your pci_dev, ask this function.
- * Again, it's a wrapper around the generic device.
- */
static inline const char *pci_name(const struct pci_dev *pdev)
{
return dev_name(&pdev->dev);
}
-
-/* Some archs don't want to expose struct resource to userland as-is
- * in sysfs and /proc
- */
-#ifdef HAVE_ARCH_PCI_RESOURCE_TO_USER
void pci_resource_to_user(const struct pci_dev *dev, int bar,
const struct resource *rsrc,
resource_size_t *start, resource_size_t *end);
-#else
-static inline void pci_resource_to_user(const struct pci_dev *dev, int bar,
- const struct resource *rsrc, resource_size_t *start,
- resource_size_t *end)
-{
- *start = rsrc->start;
- *end = rsrc->end;
-}
-#endif /* HAVE_ARCH_PCI_RESOURCE_TO_USER */
-
/*
- * The world is not perfect and supplies us with broken PCI devices.
- * For at least a part of these bugs we need a work-around, so both
- * generic (drivers/pci/quirks.c) and per-architecture code can define
- * fixup hooks to be called for particular buggy devices.
+ * The world is not perfect and supplies us with broken PCI devices.
+ * For at least a part of these bugs we need a work-around, so both
+ * generic (drivers/pci/quirks.c) and per-architecture code can define
+ * fixup hooks to be called for particular buggy devices.
*/
struct pci_fixup {
- u16 vendor; /* You can use PCI_ANY_ID here of course */
- u16 device; /* You can use PCI_ANY_ID here of course */
- u32 class; /* You can use PCI_ANY_ID here too */
+ u16 vendor; /* Or PCI_ANY_ID */
+ u16 device; /* Or PCI_ANY_ID */
+ u32 class; /* Or PCI_ANY_ID */
unsigned int class_shift; /* should be 0, 8, 16 */
+#ifdef CONFIG_HAVE_ARCH_PREL32_RELOCATIONS
+ int hook_offset;
+#else
void (*hook)(struct pci_dev *dev);
+#endif
};
enum pci_fixup_pass {
@@ -1748,12 +2110,51 @@ enum pci_fixup_pass {
pci_fixup_suspend_late, /* pci_device_suspend_late() */
};
+#ifdef CONFIG_HAVE_ARCH_PREL32_RELOCATIONS
+#define ___DECLARE_PCI_FIXUP_SECTION(sec, name, vendor, device, class, \
+ class_shift, hook) \
+ __ADDRESSABLE(hook) \
+ asm(".section " #sec ", \"a\" \n" \
+ ".balign 16 \n" \
+ ".short " #vendor ", " #device " \n" \
+ ".long " #class ", " #class_shift " \n" \
+ ".long " #hook " - . \n" \
+ ".previous \n");
+
+/*
+ * Clang's LTO may rename static functions in C, but has no way to
+ * handle such renamings when referenced from inline asm. To work
+ * around this, create global C stubs for these cases.
+ */
+#ifdef CONFIG_LTO_CLANG
+#define __DECLARE_PCI_FIXUP_SECTION(sec, name, vendor, device, class, \
+ class_shift, hook, stub) \
+ void stub(struct pci_dev *dev); \
+ void stub(struct pci_dev *dev) \
+ { \
+ hook(dev); \
+ } \
+ ___DECLARE_PCI_FIXUP_SECTION(sec, name, vendor, device, class, \
+ class_shift, stub)
+#else
+#define __DECLARE_PCI_FIXUP_SECTION(sec, name, vendor, device, class, \
+ class_shift, hook, stub) \
+ ___DECLARE_PCI_FIXUP_SECTION(sec, name, vendor, device, class, \
+ class_shift, hook)
+#endif
+
+#define DECLARE_PCI_FIXUP_SECTION(sec, name, vendor, device, class, \
+ class_shift, hook) \
+ __DECLARE_PCI_FIXUP_SECTION(sec, name, vendor, device, class, \
+ class_shift, hook, __UNIQUE_ID(hook))
+#else
/* Anonymous variables would be nice... */
#define DECLARE_PCI_FIXUP_SECTION(section, name, vendor, device, class, \
class_shift, hook) \
static const struct pci_fixup __PASTE(__pci_fixup_##name,__LINE__) __used \
__attribute__((__section__(#section), aligned((sizeof(void *))))) \
= { vendor, device, class, class_shift, hook };
+#endif
#define DECLARE_PCI_FIXUP_CLASS_EARLY(vendor, device, class, \
class_shift, hook) \
@@ -1774,23 +2175,19 @@ enum pci_fixup_pass {
#define DECLARE_PCI_FIXUP_CLASS_RESUME(vendor, device, class, \
class_shift, hook) \
DECLARE_PCI_FIXUP_SECTION(.pci_fixup_resume, \
- resume##hook, vendor, device, class, \
- class_shift, hook)
+ resume##hook, vendor, device, class, class_shift, hook)
#define DECLARE_PCI_FIXUP_CLASS_RESUME_EARLY(vendor, device, class, \
class_shift, hook) \
DECLARE_PCI_FIXUP_SECTION(.pci_fixup_resume_early, \
- resume_early##hook, vendor, device, \
- class, class_shift, hook)
+ resume_early##hook, vendor, device, class, class_shift, hook)
#define DECLARE_PCI_FIXUP_CLASS_SUSPEND(vendor, device, class, \
class_shift, hook) \
DECLARE_PCI_FIXUP_SECTION(.pci_fixup_suspend, \
- suspend##hook, vendor, device, class, \
- class_shift, hook)
+ suspend##hook, vendor, device, class, class_shift, hook)
#define DECLARE_PCI_FIXUP_CLASS_SUSPEND_LATE(vendor, device, class, \
class_shift, hook) \
DECLARE_PCI_FIXUP_SECTION(.pci_fixup_suspend_late, \
- suspend_late##hook, vendor, device, \
- class, class_shift, hook)
+ suspend_late##hook, vendor, device, class, class_shift, hook)
#define DECLARE_PCI_FIXUP_EARLY(vendor, device, hook) \
DECLARE_PCI_FIXUP_SECTION(.pci_fixup_early, \
@@ -1806,37 +2203,22 @@ enum pci_fixup_pass {
hook, vendor, device, PCI_ANY_ID, 0, hook)
#define DECLARE_PCI_FIXUP_RESUME(vendor, device, hook) \
DECLARE_PCI_FIXUP_SECTION(.pci_fixup_resume, \
- resume##hook, vendor, device, \
- PCI_ANY_ID, 0, hook)
+ resume##hook, vendor, device, PCI_ANY_ID, 0, hook)
#define DECLARE_PCI_FIXUP_RESUME_EARLY(vendor, device, hook) \
DECLARE_PCI_FIXUP_SECTION(.pci_fixup_resume_early, \
- resume_early##hook, vendor, device, \
- PCI_ANY_ID, 0, hook)
+ resume_early##hook, vendor, device, PCI_ANY_ID, 0, hook)
#define DECLARE_PCI_FIXUP_SUSPEND(vendor, device, hook) \
DECLARE_PCI_FIXUP_SECTION(.pci_fixup_suspend, \
- suspend##hook, vendor, device, \
- PCI_ANY_ID, 0, hook)
+ suspend##hook, vendor, device, PCI_ANY_ID, 0, hook)
#define DECLARE_PCI_FIXUP_SUSPEND_LATE(vendor, device, hook) \
DECLARE_PCI_FIXUP_SECTION(.pci_fixup_suspend_late, \
- suspend_late##hook, vendor, device, \
- PCI_ANY_ID, 0, hook)
+ suspend_late##hook, vendor, device, PCI_ANY_ID, 0, hook)
#ifdef CONFIG_PCI_QUIRKS
void pci_fixup_device(enum pci_fixup_pass pass, struct pci_dev *dev);
-int pci_dev_specific_acs_enabled(struct pci_dev *dev, u16 acs_flags);
-int pci_dev_specific_enable_acs(struct pci_dev *dev);
#else
static inline void pci_fixup_device(enum pci_fixup_pass pass,
struct pci_dev *dev) { }
-static inline int pci_dev_specific_acs_enabled(struct pci_dev *dev,
- u16 acs_flags)
-{
- return -ENOTTY;
-}
-static inline int pci_dev_specific_enable_acs(struct pci_dev *dev)
-{
- return -ENOTTY;
-}
#endif
void __iomem *pcim_iomap(struct pci_dev *pdev, int bar, unsigned long maxlen);
@@ -1861,24 +2243,21 @@ extern unsigned long pci_cardbus_mem_size;
extern u8 pci_dfl_cache_line_size;
extern u8 pci_cache_line_size;
-extern unsigned long pci_hotplug_io_size;
-extern unsigned long pci_hotplug_mem_size;
-extern unsigned long pci_hotplug_bus_size;
-
/* Architecture-specific versions may override these (weak) */
void pcibios_disable_device(struct pci_dev *dev);
void pcibios_set_master(struct pci_dev *dev);
int pcibios_set_pcie_reset_state(struct pci_dev *dev,
enum pcie_reset_state state);
-int pcibios_add_device(struct pci_dev *dev);
+int pcibios_device_add(struct pci_dev *dev);
void pcibios_release_device(struct pci_dev *dev);
+#ifdef CONFIG_PCI
void pcibios_penalize_isa_irq(int irq, int active);
+#else
+static inline void pcibios_penalize_isa_irq(int irq, int active) {}
+#endif
int pcibios_alloc_irq(struct pci_dev *dev);
void pcibios_free_irq(struct pci_dev *dev);
-
-#ifdef CONFIG_HIBERNATE_CALLBACKS
-extern struct dev_pm_ops pcibios_pm_ops;
-#endif
+resource_size_t pcibios_default_alignment(void);
#if defined(CONFIG_PCI_MMCONFIG) || defined(CONFIG_ACPI_MCFG)
void __init pci_mmcfg_early_init(void);
@@ -1896,16 +2275,26 @@ void __iomem *pci_ioremap_wc_bar(struct pci_dev *pdev, int bar);
#ifdef CONFIG_PCI_IOV
int pci_iov_virtfn_bus(struct pci_dev *dev, int id);
int pci_iov_virtfn_devfn(struct pci_dev *dev, int id);
-
+int pci_iov_vf_id(struct pci_dev *dev);
+void *pci_iov_get_pf_drvdata(struct pci_dev *dev, struct pci_driver *pf_driver);
int pci_enable_sriov(struct pci_dev *dev, int nr_virtfn);
void pci_disable_sriov(struct pci_dev *dev);
-int pci_iov_add_virtfn(struct pci_dev *dev, int id, int reset);
-void pci_iov_remove_virtfn(struct pci_dev *dev, int id, int reset);
+
+int pci_iov_sysfs_link(struct pci_dev *dev, struct pci_dev *virtfn, int id);
+int pci_iov_add_virtfn(struct pci_dev *dev, int id);
+void pci_iov_remove_virtfn(struct pci_dev *dev, int id);
int pci_num_vf(struct pci_dev *dev);
int pci_vfs_assigned(struct pci_dev *dev);
int pci_sriov_set_totalvfs(struct pci_dev *dev, u16 numvfs);
int pci_sriov_get_totalvfs(struct pci_dev *dev);
+int pci_sriov_configure_simple(struct pci_dev *dev, int nr_virtfn);
resource_size_t pci_iov_resource_size(struct pci_dev *dev, int resno);
+void pci_vf_drivers_autoprobe(struct pci_dev *dev, bool probe);
+
+/* Arch may override these (weak) */
+int pcibios_sriov_enable(struct pci_dev *pdev, u16 num_vfs);
+int pcibios_sriov_disable(struct pci_dev *pdev);
+resource_size_t pcibios_iov_resource_alignment(struct pci_dev *dev, int resno);
#else
static inline int pci_iov_virtfn_bus(struct pci_dev *dev, int id)
{
@@ -1915,14 +2304,32 @@ static inline int pci_iov_virtfn_devfn(struct pci_dev *dev, int id)
{
return -ENOSYS;
}
+
+static inline int pci_iov_vf_id(struct pci_dev *dev)
+{
+ return -ENOSYS;
+}
+
+static inline void *pci_iov_get_pf_drvdata(struct pci_dev *dev,
+ struct pci_driver *pf_driver)
+{
+ return ERR_PTR(-EINVAL);
+}
+
static inline int pci_enable_sriov(struct pci_dev *dev, int nr_virtfn)
{ return -ENODEV; }
-static inline int pci_iov_add_virtfn(struct pci_dev *dev, int id, int reset)
+
+static inline int pci_iov_sysfs_link(struct pci_dev *dev,
+ struct pci_dev *virtfn, int id)
+{
+ return -ENODEV;
+}
+static inline int pci_iov_add_virtfn(struct pci_dev *dev, int id)
{
return -ENOSYS;
}
static inline void pci_iov_remove_virtfn(struct pci_dev *dev,
- int id, int reset) { }
+ int id) { }
static inline void pci_disable_sriov(struct pci_dev *dev) { }
static inline int pci_num_vf(struct pci_dev *dev) { return 0; }
static inline int pci_vfs_assigned(struct pci_dev *dev)
@@ -1931,8 +2338,10 @@ static inline int pci_sriov_set_totalvfs(struct pci_dev *dev, u16 numvfs)
{ return 0; }
static inline int pci_sriov_get_totalvfs(struct pci_dev *dev)
{ return 0; }
+#define pci_sriov_configure_simple NULL
static inline resource_size_t pci_iov_resource_size(struct pci_dev *dev, int resno)
{ return 0; }
+static inline void pci_vf_drivers_autoprobe(struct pci_dev *dev, bool probe) { }
#endif
#if defined(CONFIG_HOTPLUG_PCI) || defined(CONFIG_HOTPLUG_PCI_MODULE)
@@ -1985,17 +2394,22 @@ static inline int pci_pcie_type(const struct pci_dev *dev)
return (pcie_caps_reg(dev) & PCI_EXP_FLAGS_TYPE) >> 4;
}
+/**
+ * pcie_find_root_port - Get the PCIe root port device
+ * @dev: PCI device
+ *
+ * Traverse up the parent chain and return the PCIe Root Port PCI Device
+ * for a given PCI/PCIe Device.
+ */
static inline struct pci_dev *pcie_find_root_port(struct pci_dev *dev)
{
- while (1) {
- if (!pci_is_pcie(dev))
- break;
- if (pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT)
+ while (dev) {
+ if (pci_is_pcie(dev) &&
+ pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT)
return dev;
- if (!dev->bus->self)
- break;
- dev = dev->bus->self;
+ dev = pci_upstream_bridge(dev);
}
+
return NULL;
}
@@ -2003,6 +2417,7 @@ void pci_request_acs(void);
bool pci_acs_enabled(struct pci_dev *pdev, u16 acs_flags);
bool pci_acs_path_enabled(struct pci_dev *start,
struct pci_dev *end, u16 acs_flags);
+int pci_enable_atomic_ops_to_root(struct pci_dev *dev, u32 cap_mask);
#define PCI_VPD_LRDT 0x80 /* Large Resource Data Type */
#define PCI_VPD_LRDT_ID(x) ((x) | PCI_VPD_LRDT)
@@ -2016,118 +2431,69 @@ bool pci_acs_path_enabled(struct pci_dev *start,
#define PCI_VPD_LRDT_RO_DATA PCI_VPD_LRDT_ID(PCI_VPD_LTIN_RO_DATA)
#define PCI_VPD_LRDT_RW_DATA PCI_VPD_LRDT_ID(PCI_VPD_LTIN_RW_DATA)
-/* Small Resource Data Type Tag Item Names */
-#define PCI_VPD_STIN_END 0x0f /* End */
-
-#define PCI_VPD_SRDT_END (PCI_VPD_STIN_END << 3)
-
-#define PCI_VPD_SRDT_TIN_MASK 0x78
-#define PCI_VPD_SRDT_LEN_MASK 0x07
-#define PCI_VPD_LRDT_TIN_MASK 0x7f
-
-#define PCI_VPD_LRDT_TAG_SIZE 3
-#define PCI_VPD_SRDT_TAG_SIZE 1
-
-#define PCI_VPD_INFO_FLD_HDR_SIZE 3
-
#define PCI_VPD_RO_KEYWORD_PARTNO "PN"
+#define PCI_VPD_RO_KEYWORD_SERIALNO "SN"
#define PCI_VPD_RO_KEYWORD_MFR_ID "MN"
#define PCI_VPD_RO_KEYWORD_VENDOR0 "V0"
#define PCI_VPD_RO_KEYWORD_CHKSUM "RV"
/**
- * pci_vpd_lrdt_size - Extracts the Large Resource Data Type length
- * @lrdt: Pointer to the beginning of the Large Resource Data Type tag
- *
- * Returns the extracted Large Resource Data Type length.
- */
-static inline u16 pci_vpd_lrdt_size(const u8 *lrdt)
-{
- return (u16)lrdt[1] + ((u16)lrdt[2] << 8);
-}
-
-/**
- * pci_vpd_lrdt_tag - Extracts the Large Resource Data Type Tag Item
- * @lrdt: Pointer to the beginning of the Large Resource Data Type tag
- *
- * Returns the extracted Large Resource Data Type Tag item.
- */
-static inline u16 pci_vpd_lrdt_tag(const u8 *lrdt)
-{
- return (u16)(lrdt[0] & PCI_VPD_LRDT_TIN_MASK);
-}
-
-/**
- * pci_vpd_srdt_size - Extracts the Small Resource Data Type length
- * @lrdt: Pointer to the beginning of the Small Resource Data Type tag
- *
- * Returns the extracted Small Resource Data Type length.
- */
-static inline u8 pci_vpd_srdt_size(const u8 *srdt)
-{
- return (*srdt) & PCI_VPD_SRDT_LEN_MASK;
-}
-
-/**
- * pci_vpd_srdt_tag - Extracts the Small Resource Data Type Tag Item
- * @lrdt: Pointer to the beginning of the Small Resource Data Type tag
+ * pci_vpd_alloc - Allocate buffer and read VPD into it
+ * @dev: PCI device
+ * @size: pointer to field where VPD length is returned
*
- * Returns the extracted Small Resource Data Type Tag Item.
+ * Returns pointer to allocated buffer or an ERR_PTR in case of failure
*/
-static inline u8 pci_vpd_srdt_tag(const u8 *srdt)
-{
- return ((*srdt) & PCI_VPD_SRDT_TIN_MASK) >> 3;
-}
+void *pci_vpd_alloc(struct pci_dev *dev, unsigned int *size);
/**
- * pci_vpd_info_field_size - Extracts the information field length
- * @lrdt: Pointer to the beginning of an information field header
+ * pci_vpd_find_id_string - Locate id string in VPD
+ * @buf: Pointer to buffered VPD data
+ * @len: The length of the buffer area in which to search
+ * @size: Pointer to field where length of id string is returned
*
- * Returns the extracted information field length.
+ * Returns the index of the id string or -ENOENT if not found.
*/
-static inline u8 pci_vpd_info_field_size(const u8 *info_field)
-{
- return info_field[2];
-}
+int pci_vpd_find_id_string(const u8 *buf, unsigned int len, unsigned int *size);
/**
- * pci_vpd_find_tag - Locates the Resource Data Type tag provided
- * @buf: Pointer to buffered vpd data
- * @off: The offset into the buffer at which to begin the search
- * @len: The length of the vpd buffer
- * @rdt: The Resource Data Type to search for
+ * pci_vpd_find_ro_info_keyword - Locate info field keyword in VPD RO section
+ * @buf: Pointer to buffered VPD data
+ * @len: The length of the buffer area in which to search
+ * @kw: The keyword to search for
+ * @size: Pointer to field where length of found keyword data is returned
*
- * Returns the index where the Resource Data Type was found or
- * -ENOENT otherwise.
+ * Returns the index of the information field keyword data or -ENOENT if
+ * not found.
*/
-int pci_vpd_find_tag(const u8 *buf, unsigned int off, unsigned int len, u8 rdt);
+int pci_vpd_find_ro_info_keyword(const void *buf, unsigned int len,
+ const char *kw, unsigned int *size);
/**
- * pci_vpd_find_info_keyword - Locates an information field keyword in the VPD
- * @buf: Pointer to buffered vpd data
- * @off: The offset into the buffer at which to begin the search
- * @len: The length of the buffer area, relative to off, in which to search
- * @kw: The keyword to search for
+ * pci_vpd_check_csum - Check VPD checksum
+ * @buf: Pointer to buffered VPD data
+ * @len: VPD size
*
- * Returns the index where the information field keyword was found or
- * -ENOENT otherwise.
+ * Returns 1 if VPD has no checksum, otherwise 0 or an errno
*/
-int pci_vpd_find_info_keyword(const u8 *buf, unsigned int off,
- unsigned int len, const char *kw);
+int pci_vpd_check_csum(const void *buf, unsigned int len);
/* PCI <-> OF binding helpers */
#ifdef CONFIG_OF
struct device_node;
struct irq_domain;
-void pci_set_of_node(struct pci_dev *dev);
-void pci_release_of_node(struct pci_dev *dev);
-void pci_set_bus_of_node(struct pci_bus *bus);
-void pci_release_bus_of_node(struct pci_bus *bus);
struct irq_domain *pci_host_bridge_of_msi_domain(struct pci_bus *bus);
+bool pci_host_of_has_msi_map(struct device *dev);
/* Arch may override this (weak) */
struct device_node *pcibios_get_phb_of_node(struct pci_bus *bus);
+#else /* CONFIG_OF */
+static inline struct irq_domain *
+pci_host_bridge_of_msi_domain(struct pci_bus *bus) { return NULL; }
+static inline bool pci_host_of_has_msi_map(struct device *dev) { return false; }
+#endif /* CONFIG_OF */
+
static inline struct device_node *
pci_device_to_OF_node(const struct pci_dev *pdev)
{
@@ -2139,25 +2505,16 @@ static inline struct device_node *pci_bus_to_OF_node(struct pci_bus *bus)
return bus ? bus->dev.of_node : NULL;
}
-#else /* CONFIG_OF */
-static inline void pci_set_of_node(struct pci_dev *dev) { }
-static inline void pci_release_of_node(struct pci_dev *dev) { }
-static inline void pci_set_bus_of_node(struct pci_bus *bus) { }
-static inline void pci_release_bus_of_node(struct pci_bus *bus) { }
-static inline struct device_node *
-pci_device_to_OF_node(const struct pci_dev *pdev) { return NULL; }
-static inline struct irq_domain *
-pci_host_bridge_of_msi_domain(struct pci_bus *bus) { return NULL; }
-#endif /* CONFIG_OF */
-
#ifdef CONFIG_ACPI
struct irq_domain *pci_host_bridge_acpi_msi_domain(struct pci_bus *bus);
void
pci_msi_register_fwnode_provider(struct fwnode_handle *(*fn)(struct device *));
+bool pci_pr3_present(struct pci_dev *pdev);
#else
static inline struct irq_domain *
pci_host_bridge_acpi_msi_domain(struct pci_bus *bus) { return NULL; }
+static inline bool pci_pr3_present(struct pci_dev *pdev) { return false; }
#endif
#ifdef CONFIG_EEH
@@ -2167,13 +2524,13 @@ static inline struct eeh_dev *pci_dev_to_eeh_dev(struct pci_dev *pdev)
}
#endif
-void pci_add_dma_alias(struct pci_dev *dev, u8 devfn);
+void pci_add_dma_alias(struct pci_dev *dev, u8 devfn_from, unsigned nr_devfns);
bool pci_devs_are_dma_aliases(struct pci_dev *dev1, struct pci_dev *dev2);
int pci_for_each_dma_alias(struct pci_dev *pdev,
int (*fn)(struct pci_dev *pdev,
u16 alias, void *data), void *data);
-/* helper functions for operation of device flag */
+/* Helper functions for operation of device flag */
static inline void pci_set_dev_assigned(struct pci_dev *pdev)
{
pdev->dev_flags |= PCI_DEV_FLAGS_ASSIGNED;
@@ -2220,7 +2577,45 @@ static inline bool pci_is_thunderbolt_attached(struct pci_dev *pdev)
return false;
}
-/* provide the legacy pci_dma_* API */
-#include <linux/pci-dma-compat.h>
+#if defined(CONFIG_PCIEPORTBUS) || defined(CONFIG_EEH)
+void pci_uevent_ers(struct pci_dev *pdev, enum pci_ers_result err_type);
+#endif
+
+struct msi_domain_template;
+
+bool pci_create_ims_domain(struct pci_dev *pdev, const struct msi_domain_template *template,
+ unsigned int hwsize, void *data);
+struct msi_map pci_ims_alloc_irq(struct pci_dev *pdev, union msi_instance_cookie *icookie,
+ const struct irq_affinity_desc *affdesc);
+void pci_ims_free_irq(struct pci_dev *pdev, struct msi_map map);
+
+#include <linux/dma-mapping.h>
+
+#define pci_printk(level, pdev, fmt, arg...) \
+ dev_printk(level, &(pdev)->dev, fmt, ##arg)
+
+#define pci_emerg(pdev, fmt, arg...) dev_emerg(&(pdev)->dev, fmt, ##arg)
+#define pci_alert(pdev, fmt, arg...) dev_alert(&(pdev)->dev, fmt, ##arg)
+#define pci_crit(pdev, fmt, arg...) dev_crit(&(pdev)->dev, fmt, ##arg)
+#define pci_err(pdev, fmt, arg...) dev_err(&(pdev)->dev, fmt, ##arg)
+#define pci_warn(pdev, fmt, arg...) dev_warn(&(pdev)->dev, fmt, ##arg)
+#define pci_warn_once(pdev, fmt, arg...) dev_warn_once(&(pdev)->dev, fmt, ##arg)
+#define pci_notice(pdev, fmt, arg...) dev_notice(&(pdev)->dev, fmt, ##arg)
+#define pci_info(pdev, fmt, arg...) dev_info(&(pdev)->dev, fmt, ##arg)
+#define pci_dbg(pdev, fmt, arg...) dev_dbg(&(pdev)->dev, fmt, ##arg)
+
+#define pci_notice_ratelimited(pdev, fmt, arg...) \
+ dev_notice_ratelimited(&(pdev)->dev, fmt, ##arg)
+
+#define pci_info_ratelimited(pdev, fmt, arg...) \
+ dev_info_ratelimited(&(pdev)->dev, fmt, ##arg)
+
+#define pci_WARN(pdev, condition, fmt, arg...) \
+ WARN(condition, "%s %s: " fmt, \
+ dev_driver_string(&(pdev)->dev), pci_name(pdev), ##arg)
+
+#define pci_WARN_ONCE(pdev, condition, fmt, arg...) \
+ WARN_ONCE(condition, "%s %s: " fmt, \
+ dev_driver_string(&(pdev)->dev), pci_name(pdev), ##arg)
#endif /* LINUX_PCI_H */
diff --git a/include/linux/pci_hotplug.h b/include/linux/pci_hotplug.h
index 2e855afa0212..3a10d6ec3ee7 100644
--- a/include/linux/pci_hotplug.h
+++ b/include/linux/pci_hotplug.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* PCI HotPlug Core Functions
*
@@ -7,21 +8,6 @@
*
* All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or (at
- * your option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for more
- * details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
* Send feedback to <kristen.c.accardi@intel.com>
*
*/
@@ -30,8 +16,6 @@
/**
* struct hotplug_slot_ops -the callbacks that the hotplug pci core can use
- * @owner: The module owner of this structure
- * @mod_name: The module name (KBUILD_MODNAME) of this structure
* @enable_slot: Called when the user wants to enable a specific pci slot
* @disable_slot: Called when the user wants to disable a specific pci slot
* @set_attention_status: Called to set the specific slot's attention LED to
@@ -39,17 +23,9 @@
* @hardware_test: Called to run a specified hardware test on the specified
* slot.
* @get_power_status: Called to get the current power status of a slot.
- * If this field is NULL, the value passed in the struct hotplug_slot_info
- * will be used when this value is requested by a user.
* @get_attention_status: Called to get the current attention status of a slot.
- * If this field is NULL, the value passed in the struct hotplug_slot_info
- * will be used when this value is requested by a user.
* @get_latch_status: Called to get the current latch status of a slot.
- * If this field is NULL, the value passed in the struct hotplug_slot_info
- * will be used when this value is requested by a user.
* @get_adapter_status: Called to get see if an adapter is present in the slot or not.
- * If this field is NULL, the value passed in the struct hotplug_slot_info
- * will be used when this value is requested by a user.
* @reset_slot: Optional interface to allow override of a bus reset for the
* slot for cases where a secondary bus reset can result in spurious
* hotplug events or where a slot can be reset independent of the bus.
@@ -60,8 +36,6 @@
* set an LED, enable / disable power, etc.)
*/
struct hotplug_slot_ops {
- struct module *owner;
- const char *mod_name;
int (*enable_slot) (struct hotplug_slot *slot);
int (*disable_slot) (struct hotplug_slot *slot);
int (*set_attention_status) (struct hotplug_slot *slot, u8 value);
@@ -70,44 +44,25 @@ struct hotplug_slot_ops {
int (*get_attention_status) (struct hotplug_slot *slot, u8 *value);
int (*get_latch_status) (struct hotplug_slot *slot, u8 *value);
int (*get_adapter_status) (struct hotplug_slot *slot, u8 *value);
- int (*reset_slot) (struct hotplug_slot *slot, int probe);
-};
-
-/**
- * struct hotplug_slot_info - used to notify the hotplug pci core of the state of the slot
- * @power_status: if power is enabled or not (1/0)
- * @attention_status: if the attention light is enabled or not (1/0)
- * @latch_status: if the latch (if any) is open or closed (1/0)
- * @adapter_status: if there is a pci board present in the slot or not (1/0)
- *
- * Used to notify the hotplug pci core of the status of a specific slot.
- */
-struct hotplug_slot_info {
- u8 power_status;
- u8 attention_status;
- u8 latch_status;
- u8 adapter_status;
+ int (*reset_slot) (struct hotplug_slot *slot, bool probe);
};
/**
* struct hotplug_slot - used to register a physical slot with the hotplug pci core
* @ops: pointer to the &struct hotplug_slot_ops to be used for this slot
- * @info: pointer to the &struct hotplug_slot_info for the initial values for
- * this slot.
- * @release: called during pci_hp_deregister to free memory allocated in a
- * hotplug_slot structure.
- * @private: used by the hotplug pci controller driver to store whatever it
- * needs.
+ * @slot_list: internal list used to track hotplug PCI slots
+ * @pci_slot: represents a physical slot
+ * @owner: The module owner of this structure
+ * @mod_name: The module name (KBUILD_MODNAME) of this structure
*/
struct hotplug_slot {
- struct hotplug_slot_ops *ops;
- struct hotplug_slot_info *info;
- void (*release) (struct hotplug_slot *slot);
- void *private;
+ const struct hotplug_slot_ops *ops;
/* Variables below this are for use only by the hotplug pci core. */
struct list_head slot_list;
struct pci_slot *pci_slot;
+ struct module *owner;
+ const char *mod_name;
};
static inline const char *hotplug_slot_name(const struct hotplug_slot *slot)
@@ -118,74 +73,39 @@ static inline const char *hotplug_slot_name(const struct hotplug_slot *slot)
int __pci_hp_register(struct hotplug_slot *slot, struct pci_bus *pbus, int nr,
const char *name, struct module *owner,
const char *mod_name);
-int pci_hp_deregister(struct hotplug_slot *slot);
-int __must_check pci_hp_change_slot_info(struct hotplug_slot *slot,
- struct hotplug_slot_info *info);
+int __pci_hp_initialize(struct hotplug_slot *slot, struct pci_bus *bus, int nr,
+ const char *name, struct module *owner,
+ const char *mod_name);
+int pci_hp_add(struct hotplug_slot *slot);
+
+void pci_hp_del(struct hotplug_slot *slot);
+void pci_hp_destroy(struct hotplug_slot *slot);
+void pci_hp_deregister(struct hotplug_slot *slot);
/* use a define to avoid include chaining to get THIS_MODULE & friends */
#define pci_hp_register(slot, pbus, devnr, name) \
__pci_hp_register(slot, pbus, devnr, name, THIS_MODULE, KBUILD_MODNAME)
-
-/* PCI Setting Record (Type 0) */
-struct hpp_type0 {
- u32 revision;
- u8 cache_line_size;
- u8 latency_timer;
- u8 enable_serr;
- u8 enable_perr;
-};
-
-/* PCI-X Setting Record (Type 1) */
-struct hpp_type1 {
- u32 revision;
- u8 max_mem_read;
- u8 avg_max_split;
- u16 tot_max_split;
-};
-
-/* PCI Express Setting Record (Type 2) */
-struct hpp_type2 {
- u32 revision;
- u32 unc_err_mask_and;
- u32 unc_err_mask_or;
- u32 unc_err_sever_and;
- u32 unc_err_sever_or;
- u32 cor_err_mask_and;
- u32 cor_err_mask_or;
- u32 adv_err_cap_and;
- u32 adv_err_cap_or;
- u16 pci_exp_devctl_and;
- u16 pci_exp_devctl_or;
- u16 pci_exp_lnkctl_and;
- u16 pci_exp_lnkctl_or;
- u32 sec_unc_err_sever_and;
- u32 sec_unc_err_sever_or;
- u32 sec_unc_err_mask_and;
- u32 sec_unc_err_mask_or;
-};
-
-struct hotplug_params {
- struct hpp_type0 *t0; /* Type0: NULL if not available */
- struct hpp_type1 *t1; /* Type1: NULL if not available */
- struct hpp_type2 *t2; /* Type2: NULL if not available */
- struct hpp_type0 type0_data;
- struct hpp_type1 type1_data;
- struct hpp_type2 type2_data;
-};
+#define pci_hp_initialize(slot, bus, nr, name) \
+ __pci_hp_initialize(slot, bus, nr, name, THIS_MODULE, KBUILD_MODNAME)
#ifdef CONFIG_ACPI
#include <linux/acpi.h>
-int pci_get_hp_params(struct pci_dev *dev, struct hotplug_params *hpp);
-bool pciehp_is_native(struct pci_dev *pdev);
-int acpi_get_hp_hw_control_from_firmware(struct pci_dev *dev, u32 flags);
+bool pciehp_is_native(struct pci_dev *bridge);
+int acpi_get_hp_hw_control_from_firmware(struct pci_dev *bridge);
+bool shpchp_is_native(struct pci_dev *bridge);
int acpi_pci_check_ejectable(struct pci_bus *pbus, acpi_handle handle);
int acpi_pci_detect_ejectable(acpi_handle handle);
#else
-static inline int pci_get_hp_params(struct pci_dev *dev,
- struct hotplug_params *hpp)
+static inline int acpi_get_hp_hw_control_from_firmware(struct pci_dev *bridge)
{
- return -ENODEV;
+ return 0;
}
-static inline bool pciehp_is_native(struct pci_dev *pdev) { return true; }
+static inline bool pciehp_is_native(struct pci_dev *bridge) { return true; }
+static inline bool shpchp_is_native(struct pci_dev *bridge) { return true; }
#endif
+
+static inline bool hotplug_is_native(struct pci_dev *bridge)
+{
+ return pciehp_is_native(bridge) || shpchp_is_native(bridge);
+}
#endif
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index c71e532da458..95f33dadb2be 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* PCI Class, Vendor and Device IDs
*
@@ -44,11 +45,13 @@
#define PCI_CLASS_MULTIMEDIA_VIDEO 0x0400
#define PCI_CLASS_MULTIMEDIA_AUDIO 0x0401
#define PCI_CLASS_MULTIMEDIA_PHONE 0x0402
+#define PCI_CLASS_MULTIMEDIA_HD_AUDIO 0x0403
#define PCI_CLASS_MULTIMEDIA_OTHER 0x0480
#define PCI_BASE_CLASS_MEMORY 0x05
#define PCI_CLASS_MEMORY_RAM 0x0500
#define PCI_CLASS_MEMORY_FLASH 0x0501
+#define PCI_CLASS_MEMORY_CXL 0x0502
#define PCI_CLASS_MEMORY_OTHER 0x0580
#define PCI_BASE_CLASS_BRIDGE 0x06
@@ -57,6 +60,8 @@
#define PCI_CLASS_BRIDGE_EISA 0x0602
#define PCI_CLASS_BRIDGE_MC 0x0603
#define PCI_CLASS_BRIDGE_PCI 0x0604
+#define PCI_CLASS_BRIDGE_PCI_NORMAL 0x060400
+#define PCI_CLASS_BRIDGE_PCI_SUBTRACTIVE 0x060401
#define PCI_CLASS_BRIDGE_PCMCIA 0x0605
#define PCI_CLASS_BRIDGE_NUBUS 0x0606
#define PCI_CLASS_BRIDGE_CARDBUS 0x0607
@@ -70,6 +75,9 @@
#define PCI_CLASS_COMMUNICATION_MODEM 0x0703
#define PCI_CLASS_COMMUNICATION_OTHER 0x0780
+/* Interface for SERIAL/MODEM */
+#define PCI_SERIAL_16550_COMPATIBLE 0x02
+
#define PCI_BASE_CLASS_SYSTEM 0x08
#define PCI_CLASS_SYSTEM_PIC 0x0800
#define PCI_CLASS_SYSTEM_PIC_IOAPIC 0x080010
@@ -79,6 +87,7 @@
#define PCI_CLASS_SYSTEM_RTC 0x0803
#define PCI_CLASS_SYSTEM_PCI_HOTPLUG 0x0804
#define PCI_CLASS_SYSTEM_SDHCI 0x0805
+#define PCI_CLASS_SYSTEM_RCEC 0x0807
#define PCI_CLASS_SYSTEM_OTHER 0x0880
#define PCI_BASE_CLASS_INPUT 0x09
@@ -115,6 +124,10 @@
#define PCI_CLASS_SERIAL_USB_DEVICE 0x0c03fe
#define PCI_CLASS_SERIAL_FIBER 0x0c04
#define PCI_CLASS_SERIAL_SMBUS 0x0c05
+#define PCI_CLASS_SERIAL_IPMI 0x0c07
+#define PCI_CLASS_SERIAL_IPMI_SMIC 0x0c0700
+#define PCI_CLASS_SERIAL_IPMI_KCS 0x0c0701
+#define PCI_CLASS_SERIAL_IPMI_BT 0x0c0702
#define PCI_BASE_CLASS_WIRELESS 0x0d
#define PCI_CLASS_WIRELESS_RF_CONTROLLER 0x0d10
@@ -141,6 +154,9 @@
#define PCI_CLASS_OTHERS 0xff
/* Vendors and devices. Sort key: vendor first, device next. */
+#define PCI_VENDOR_ID_PCI_SIG 0x0001
+
+#define PCI_VENDOR_ID_LOONGSON 0x0014
#define PCI_VENDOR_ID_TTTECH 0x0357
#define PCI_DEVICE_ID_TTTECH_MC322 0x000a
@@ -148,6 +164,8 @@
#define PCI_VENDOR_ID_DYNALINK 0x0675
#define PCI_DEVICE_ID_DYNALINK_IS64PH 0x1702
+#define PCI_VENDOR_ID_UBIQUITI 0x0777
+
#define PCI_VENDOR_ID_BERKOM 0x0871
#define PCI_DEVICE_ID_BERKOM_A1T 0xffa1
#define PCI_DEVICE_ID_BERKOM_T_CONCEPT 0xffa2
@@ -537,6 +555,19 @@
#define PCI_DEVICE_ID_AMD_16H_NB_F4 0x1534
#define PCI_DEVICE_ID_AMD_16H_M30H_NB_F3 0x1583
#define PCI_DEVICE_ID_AMD_16H_M30H_NB_F4 0x1584
+#define PCI_DEVICE_ID_AMD_17H_DF_F3 0x1463
+#define PCI_DEVICE_ID_AMD_17H_M10H_DF_F3 0x15eb
+#define PCI_DEVICE_ID_AMD_17H_M30H_DF_F3 0x1493
+#define PCI_DEVICE_ID_AMD_17H_M60H_DF_F3 0x144b
+#define PCI_DEVICE_ID_AMD_17H_M70H_DF_F3 0x1443
+#define PCI_DEVICE_ID_AMD_17H_MA0H_DF_F3 0x1727
+#define PCI_DEVICE_ID_AMD_19H_DF_F3 0x1653
+#define PCI_DEVICE_ID_AMD_19H_M10H_DF_F3 0x14b0
+#define PCI_DEVICE_ID_AMD_19H_M40H_DF_F3 0x167c
+#define PCI_DEVICE_ID_AMD_19H_M50H_DF_F3 0x166d
+#define PCI_DEVICE_ID_AMD_19H_M60H_DF_F3 0x14e3
+#define PCI_DEVICE_ID_AMD_19H_M70H_DF_F3 0x14f3
+#define PCI_DEVICE_ID_AMD_19H_M78H_DF_F3 0x12fb
#define PCI_DEVICE_ID_AMD_CNB17H_F3 0x1703
#define PCI_DEVICE_ID_AMD_LANCE 0x2000
#define PCI_DEVICE_ID_AMD_LANCE_HOME 0x2001
@@ -557,6 +588,7 @@
#define PCI_DEVICE_ID_AMD_OPUS_7443 0x7443
#define PCI_DEVICE_ID_AMD_VIPER_7443 0x7443
#define PCI_DEVICE_ID_AMD_OPUS_7445 0x7445
+#define PCI_DEVICE_ID_AMD_GOLAM_7450 0x7450
#define PCI_DEVICE_ID_AMD_8111_PCI 0x7460
#define PCI_DEVICE_ID_AMD_8111_LPC 0x7468
#define PCI_DEVICE_ID_AMD_8111_IDE 0x7469
@@ -576,6 +608,7 @@
#define PCI_DEVICE_ID_AMD_CS5536_EHC 0x2095
#define PCI_DEVICE_ID_AMD_CS5536_UDC 0x2096
#define PCI_DEVICE_ID_AMD_CS5536_UOC 0x2097
+#define PCI_DEVICE_ID_AMD_CS5536_DEV_IDE 0x2092
#define PCI_DEVICE_ID_AMD_CS5536_IDE 0x209A
#define PCI_DEVICE_ID_AMD_LX_VIDEO 0x2081
#define PCI_DEVICE_ID_AMD_LX_AES 0x2082
@@ -611,6 +644,8 @@
#define PCI_DEVICE_ID_DELL_RAC4 0x0012
#define PCI_DEVICE_ID_DELL_PERC5 0x0015
+#define PCI_SUBVENDOR_ID_DELL 0x1028
+
#define PCI_VENDOR_ID_MATROX 0x102B
#define PCI_DEVICE_ID_MATROX_MGA_2 0x0518
#define PCI_DEVICE_ID_MATROX_MIL 0x0519
@@ -862,6 +897,7 @@
#define PCI_DEVICE_ID_TI_X620 0xac8d
#define PCI_DEVICE_ID_TI_X420 0xac8e
#define PCI_DEVICE_ID_TI_XX20_FM 0xac8f
+#define PCI_DEVICE_ID_TI_J721E 0xb00d
#define PCI_DEVICE_ID_TI_DRA74x 0xb500
#define PCI_DEVICE_ID_TI_DRA72x 0xb501
@@ -1057,7 +1093,6 @@
#define PCI_VENDOR_ID_SGI 0x10a9
#define PCI_DEVICE_ID_SGI_IOC3 0x0003
#define PCI_DEVICE_ID_SGI_LITHIUM 0x1002
-#define PCI_DEVICE_ID_SGI_IOC4 0x100a
#define PCI_VENDOR_ID_WINBOND 0x10ad
#define PCI_DEVICE_ID_WINBOND_82C105 0x0105
@@ -1098,8 +1133,9 @@
#define PCI_DEVICE_ID_3COM_3CR990SVR 0x990a
#define PCI_VENDOR_ID_AL 0x10b9
+#define PCI_DEVICE_ID_AL_M1489 0x1489
#define PCI_DEVICE_ID_AL_M1533 0x1533
-#define PCI_DEVICE_ID_AL_M1535 0x1535
+#define PCI_DEVICE_ID_AL_M1535 0x1535
#define PCI_DEVICE_ID_AL_M1541 0x1541
#define PCI_DEVICE_ID_AL_M1563 0x1563
#define PCI_DEVICE_ID_AL_M1621 0x1621
@@ -1127,6 +1163,8 @@
#define PCI_VENDOR_ID_TCONRAD 0x10da
#define PCI_DEVICE_ID_TCONRAD_TOKENRING 0x0508
+#define PCI_VENDOR_ID_ROHM 0x10db
+
#define PCI_VENDOR_ID_NVIDIA 0x10de
#define PCI_DEVICE_ID_NVIDIA_TNT 0x0020
#define PCI_DEVICE_ID_NVIDIA_TNT2 0x0028
@@ -1321,6 +1359,7 @@
#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP78S_SMBUS 0x0752
#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP77_IDE 0x0759
#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP73_SMBUS 0x07D8
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE_320M 0x08A0
#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP79_SMBUS 0x0AA2
#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP89_SATA 0x0D85
@@ -1329,6 +1368,7 @@
#define PCI_DEVICE_ID_IMS_TT3D 0x9135
#define PCI_VENDOR_ID_AMCC 0x10e8
+#define PCI_VENDOR_ID_AMPERE 0x1def
#define PCI_VENDOR_ID_INTERG 0x10ea
#define PCI_DEVICE_ID_INTERG_1682 0x1682
@@ -1557,6 +1597,8 @@
#define PCI_DEVICE_ID_SERVERWORKS_CSB6LPC 0x0227
#define PCI_DEVICE_ID_SERVERWORKS_HT1100LD 0x0408
+#define PCI_VENDOR_ID_ALTERA 0x1172
+
#define PCI_VENDOR_ID_SBE 0x1176
#define PCI_DEVICE_ID_SBE_WANXL100 0x0301
#define PCI_DEVICE_ID_SBE_WANXL200 0x0302
@@ -1659,39 +1701,11 @@
#define PCI_DEVICE_ID_COMPEX_ENET100VG4 0x0112
#define PCI_VENDOR_ID_PMC_Sierra 0x11f8
+#define PCI_VENDOR_ID_MICROSEMI 0x11f8
#define PCI_VENDOR_ID_RP 0x11fe
-#define PCI_DEVICE_ID_RP32INTF 0x0001
-#define PCI_DEVICE_ID_RP8INTF 0x0002
-#define PCI_DEVICE_ID_RP16INTF 0x0003
-#define PCI_DEVICE_ID_RP4QUAD 0x0004
-#define PCI_DEVICE_ID_RP8OCTA 0x0005
-#define PCI_DEVICE_ID_RP8J 0x0006
-#define PCI_DEVICE_ID_RP4J 0x0007
-#define PCI_DEVICE_ID_RP8SNI 0x0008
-#define PCI_DEVICE_ID_RP16SNI 0x0009
-#define PCI_DEVICE_ID_RPP4 0x000A
-#define PCI_DEVICE_ID_RPP8 0x000B
-#define PCI_DEVICE_ID_RP4M 0x000D
-#define PCI_DEVICE_ID_RP2_232 0x000E
-#define PCI_DEVICE_ID_RP2_422 0x000F
-#define PCI_DEVICE_ID_URP32INTF 0x0801
-#define PCI_DEVICE_ID_URP8INTF 0x0802
-#define PCI_DEVICE_ID_URP16INTF 0x0803
-#define PCI_DEVICE_ID_URP8OCTA 0x0805
-#define PCI_DEVICE_ID_UPCI_RM3_8PORT 0x080C
-#define PCI_DEVICE_ID_UPCI_RM3_4PORT 0x080D
-#define PCI_DEVICE_ID_CRP16INTF 0x0903
#define PCI_VENDOR_ID_CYCLADES 0x120e
-#define PCI_DEVICE_ID_CYCLOM_Y_Lo 0x0100
-#define PCI_DEVICE_ID_CYCLOM_Y_Hi 0x0101
-#define PCI_DEVICE_ID_CYCLOM_4Y_Lo 0x0102
-#define PCI_DEVICE_ID_CYCLOM_4Y_Hi 0x0103
-#define PCI_DEVICE_ID_CYCLOM_8Y_Lo 0x0104
-#define PCI_DEVICE_ID_CYCLOM_8Y_Hi 0x0105
-#define PCI_DEVICE_ID_CYCLOM_Z_Lo 0x0200
-#define PCI_DEVICE_ID_CYCLOM_Z_Hi 0x0201
#define PCI_DEVICE_ID_PC300_RX_2 0x0300
#define PCI_DEVICE_ID_PC300_RX_1 0x0301
#define PCI_DEVICE_ID_PC300_TE_2 0x0310
@@ -1733,7 +1747,7 @@
#define PCI_VENDOR_ID_STALLION 0x124d
/* Allied Telesyn */
-#define PCI_VENDOR_ID_AT 0x1259
+#define PCI_VENDOR_ID_AT 0x1259
#define PCI_SUBDEVICE_ID_AT_2700FX 0x2701
#define PCI_SUBDEVICE_ID_AT_2701FX 0x2703
@@ -1809,6 +1823,12 @@
#define PCI_VENDOR_ID_NVIDIA_SGS 0x12d2
#define PCI_DEVICE_ID_NVIDIA_SGS_RIVA128 0x0018
+#define PCI_VENDOR_ID_PERICOM 0x12D8
+#define PCI_DEVICE_ID_PERICOM_PI7C9X7951 0x7951
+#define PCI_DEVICE_ID_PERICOM_PI7C9X7952 0x7952
+#define PCI_DEVICE_ID_PERICOM_PI7C9X7954 0x7954
+#define PCI_DEVICE_ID_PERICOM_PI7C9X7958 0x7958
+
#define PCI_SUBVENDOR_ID_CHASE_PCIFAST 0x12E0
#define PCI_SUBDEVICE_ID_CHASE_PCIFAST4 0x0031
#define PCI_SUBDEVICE_ID_CHASE_PCIFAST8 0x0021
@@ -1931,6 +1951,8 @@
#define PCI_VENDOR_ID_DIGIGRAM 0x1369
#define PCI_SUBDEVICE_ID_DIGIGRAM_LX6464ES_SERIAL_SUBSYSTEM 0xc001
#define PCI_SUBDEVICE_ID_DIGIGRAM_LX6464ES_CAE_SERIAL_SUBSYSTEM 0xc002
+#define PCI_SUBDEVICE_ID_DIGIGRAM_LX6464ESE_SERIAL_SUBSYSTEM 0xc021
+#define PCI_SUBDEVICE_ID_DIGIGRAM_LX6464ESE_CAE_SERIAL_SUBSYSTEM 0xc022
#define PCI_VENDOR_ID_KAWASAKI 0x136b
#define PCI_DEVICE_ID_MCHIP_KL5A72002 0xff01
@@ -1953,24 +1975,6 @@
#define PCI_DEVICE_ID_APPLICOM_PCI2000PFB 0x0003
#define PCI_VENDOR_ID_MOXA 0x1393
-#define PCI_DEVICE_ID_MOXA_RC7000 0x0001
-#define PCI_DEVICE_ID_MOXA_CP102 0x1020
-#define PCI_DEVICE_ID_MOXA_CP102UL 0x1021
-#define PCI_DEVICE_ID_MOXA_CP102U 0x1022
-#define PCI_DEVICE_ID_MOXA_C104 0x1040
-#define PCI_DEVICE_ID_MOXA_CP104U 0x1041
-#define PCI_DEVICE_ID_MOXA_CP104JU 0x1042
-#define PCI_DEVICE_ID_MOXA_CP104EL 0x1043
-#define PCI_DEVICE_ID_MOXA_CT114 0x1140
-#define PCI_DEVICE_ID_MOXA_CP114 0x1141
-#define PCI_DEVICE_ID_MOXA_CP118U 0x1180
-#define PCI_DEVICE_ID_MOXA_CP118EL 0x1181
-#define PCI_DEVICE_ID_MOXA_CP132 0x1320
-#define PCI_DEVICE_ID_MOXA_CP132U 0x1321
-#define PCI_DEVICE_ID_MOXA_CP134U 0x1340
-#define PCI_DEVICE_ID_MOXA_C168 0x1680
-#define PCI_DEVICE_ID_MOXA_CP168U 0x1681
-#define PCI_DEVICE_ID_MOXA_CP168EL 0x1682
#define PCI_DEVICE_ID_MOXA_CP204J 0x2040
#define PCI_DEVICE_ID_MOXA_C218 0x2180
#define PCI_DEVICE_ID_MOXA_C320 0x3200
@@ -2030,8 +2034,6 @@
#define PCI_DEVICE_ID_EXAR_XR17V358 0x0358
#define PCI_VENDOR_ID_MICROGATE 0x13c0
-#define PCI_DEVICE_ID_MICROGATE_USC 0x0010
-#define PCI_DEVICE_ID_MICROGATE_SCA 0x0030
#define PCI_VENDOR_ID_3WARE 0x13C1
#define PCI_DEVICE_ID_3WARE_1000 0x1000
@@ -2081,6 +2083,9 @@
#define PCI_DEVICE_ID_ICE_1712 0x1712
#define PCI_DEVICE_ID_VT1724 0x1724
+#define PCI_VENDOR_ID_MICROSOFT 0x1414
+#define PCI_DEVICE_ID_HYPERV_VIDEO 0x5353
+
#define PCI_VENDOR_ID_OXSEMI 0x1415
#define PCI_DEVICE_ID_OXSEMI_12PCI840 0x8403
#define PCI_DEVICE_ID_OXSEMI_PCIe840 0xC000
@@ -2111,6 +2116,9 @@
#define PCI_VENDOR_ID_MYRICOM 0x14c1
+#define PCI_VENDOR_ID_MEDIATEK 0x14c3
+#define PCI_DEVICE_ID_MEDIATEK_7629 0x7629
+
#define PCI_VENDOR_ID_TITAN 0x14D2
#define PCI_DEVICE_ID_TITAN_010L 0x8001
#define PCI_DEVICE_ID_TITAN_100L 0x8010
@@ -2342,6 +2350,12 @@
#define PCI_DEVICE_ID_CENATEK_IDE 0x0001
#define PCI_VENDOR_ID_SYNOPSYS 0x16c3
+#define PCI_DEVICE_ID_SYNOPSYS_HAPSUSB3 0xabcd
+#define PCI_DEVICE_ID_SYNOPSYS_HAPSUSB3_AXI 0xabce
+#define PCI_DEVICE_ID_SYNOPSYS_HAPSUSB31 0xabcf
+#define PCI_DEVICE_ID_SYNOPSYS_EDDA 0xedda
+
+#define PCI_VENDOR_ID_USR 0x16ec
#define PCI_VENDOR_ID_VITESSE 0x1725
#define PCI_DEVICE_ID_VITESSE_VSC7174 0x7174
@@ -2377,8 +2391,14 @@
#define PCI_DEVICE_ID_RDC_R6061 0x6061
#define PCI_DEVICE_ID_RDC_D1010 0x1010
+#define PCI_VENDOR_ID_GLI 0x17a0
+
#define PCI_VENDOR_ID_LENOVO 0x17aa
+#define PCI_VENDOR_ID_QCOM 0x17cb
+
+#define PCI_VENDOR_ID_CDNS 0x17cd
+
#define PCI_VENDOR_ID_ARECA 0x17d3
#define PCI_DEVICE_ID_ARECA_1110 0x1110
#define PCI_DEVICE_ID_ARECA_1120 0x1120
@@ -2429,7 +2449,8 @@
#define PCI_VENDOR_ID_TDI 0x192E
#define PCI_DEVICE_ID_TDI_EHCI 0x0101
-#define PCI_VENDOR_ID_FREESCALE 0x1957
+#define PCI_VENDOR_ID_FREESCALE 0x1957 /* duplicate: NXP */
+#define PCI_VENDOR_ID_NXP 0x1957 /* duplicate: FREESCALE */
#define PCI_DEVICE_ID_MPC8308 0xc006
#define PCI_DEVICE_ID_MPC8315E 0x00b4
#define PCI_DEVICE_ID_MPC8315 0x00b5
@@ -2520,13 +2541,17 @@
#define PCI_DEVICE_ID_KORENIX_JETCARDF2 0x1700
#define PCI_DEVICE_ID_KORENIX_JETCARDF3 0x17ff
-#define PCI_VENDOR_ID_HUAWEI 0x19e5
+#define PCI_VENDOR_ID_HUAWEI 0x19e5
+#define PCI_DEVICE_ID_HUAWEI_ZIP_VF 0xa251
+#define PCI_DEVICE_ID_HUAWEI_SEC_VF 0xa256
+#define PCI_DEVICE_ID_HUAWEI_HPRE_VF 0xa259
#define PCI_VENDOR_ID_NETRONOME 0x19ee
-#define PCI_DEVICE_ID_NETRONOME_NFP3200 0x3200
-#define PCI_DEVICE_ID_NETRONOME_NFP3240 0x3240
+#define PCI_DEVICE_ID_NETRONOME_NFP3800 0x3800
#define PCI_DEVICE_ID_NETRONOME_NFP4000 0x4000
+#define PCI_DEVICE_ID_NETRONOME_NFP5000 0x5000
#define PCI_DEVICE_ID_NETRONOME_NFP6000 0x6000
+#define PCI_DEVICE_ID_NETRONOME_NFP3800_VF 0x3803
#define PCI_DEVICE_ID_NETRONOME_NFP6000_VF 0x6003
#define PCI_VENDOR_ID_QMI 0x1a32
@@ -2539,9 +2564,25 @@
#define PCI_VENDOR_ID_ASMEDIA 0x1b21
+#define PCI_VENDOR_ID_REDHAT 0x1b36
+
+#define PCI_VENDOR_ID_SILICOM_DENMARK 0x1c2c
+
+#define PCI_VENDOR_ID_AMAZON_ANNAPURNA_LABS 0x1c36
+
#define PCI_VENDOR_ID_CIRCUITCO 0x1cc8
#define PCI_SUBSYSTEM_ID_CIRCUITCO_MINNOWBOARD 0x0001
+#define PCI_VENDOR_ID_AMAZON 0x1d0f
+
+#define PCI_VENDOR_ID_ZHAOXIN 0x1d17
+
+#define PCI_VENDOR_ID_HYGON 0x1d94
+
+#define PCI_VENDOR_ID_FUNGIBLE 0x1dad
+
+#define PCI_VENDOR_ID_HXT 0x1dbf
+
#define PCI_VENDOR_ID_TEKRAM 0x1de1
#define PCI_DEVICE_ID_TEKRAM_DC290 0xdc29
@@ -2550,6 +2591,9 @@
#define PCI_DEVICE_ID_TEHUTI_3010 0x3010
#define PCI_DEVICE_ID_TEHUTI_3014 0x3014
+#define PCI_VENDOR_ID_SUNIX 0x1fd4
+#define PCI_DEVICE_ID_SUNIX_1999 0x1999
+
#define PCI_VENDOR_ID_HINT 0x3388
#define PCI_DEVICE_ID_HINT_VXPROII_IDE 0x8013
@@ -2594,15 +2638,18 @@
#define PCI_DEVICE_ID_INTEL_PXHD_0 0x0320
#define PCI_DEVICE_ID_INTEL_PXHD_1 0x0321
#define PCI_DEVICE_ID_INTEL_PXH_0 0x0329
-#define PCI_DEVICE_ID_INTEL_PXH_1 0x032A
-#define PCI_DEVICE_ID_INTEL_PXHV 0x032C
+#define PCI_DEVICE_ID_INTEL_PXH_1 0x032a
+#define PCI_DEVICE_ID_INTEL_PXHV 0x032c
#define PCI_DEVICE_ID_INTEL_80332_0 0x0330
#define PCI_DEVICE_ID_INTEL_80332_1 0x0332
#define PCI_DEVICE_ID_INTEL_80333_0 0x0370
#define PCI_DEVICE_ID_INTEL_80333_1 0x0372
+#define PCI_DEVICE_ID_INTEL_QAT_DH895XCC 0x0435
+#define PCI_DEVICE_ID_INTEL_QAT_DH895XCC_VF 0x0443
#define PCI_DEVICE_ID_INTEL_82375 0x0482
#define PCI_DEVICE_ID_INTEL_82424 0x0483
#define PCI_DEVICE_ID_INTEL_82378 0x0484
+#define PCI_DEVICE_ID_INTEL_82425 0x0486
#define PCI_DEVICE_ID_INTEL_MRST_SD0 0x0807
#define PCI_DEVICE_ID_INTEL_MRST_SD1 0x0808
#define PCI_DEVICE_ID_INTEL_MFD_SD 0x0820
@@ -2610,14 +2657,14 @@
#define PCI_DEVICE_ID_INTEL_MFD_SDIO2 0x0822
#define PCI_DEVICE_ID_INTEL_MFD_EMMC0 0x0823
#define PCI_DEVICE_ID_INTEL_MFD_EMMC1 0x0824
-#define PCI_DEVICE_ID_INTEL_MRST_SD2 0x084F
-#define PCI_DEVICE_ID_INTEL_QUARK_X1000_ILB 0x095E
+#define PCI_DEVICE_ID_INTEL_MRST_SD2 0x084f
+#define PCI_DEVICE_ID_INTEL_QUARK_X1000_ILB 0x095e
#define PCI_DEVICE_ID_INTEL_I960 0x0960
#define PCI_DEVICE_ID_INTEL_I960RM 0x0962
#define PCI_DEVICE_ID_INTEL_CENTERTON_ILB 0x0c60
#define PCI_DEVICE_ID_INTEL_8257X_SOL 0x1062
#define PCI_DEVICE_ID_INTEL_82573E_SOL 0x1085
-#define PCI_DEVICE_ID_INTEL_82573L_SOL 0x108F
+#define PCI_DEVICE_ID_INTEL_82573L_SOL 0x108f
#define PCI_DEVICE_ID_INTEL_82815_MC 0x1130
#define PCI_DEVICE_ID_INTEL_82815_CGC 0x1132
#define PCI_DEVICE_ID_INTEL_82092AA_0 0x1221
@@ -2649,6 +2696,8 @@
#define PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_4C_NHI 0x1577
#define PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_4C_BRIDGE 0x1578
#define PCI_DEVICE_ID_INTEL_80960_RP 0x1960
+#define PCI_DEVICE_ID_INTEL_QAT_C3XXX 0x19e2
+#define PCI_DEVICE_ID_INTEL_QAT_C3XXX_VF 0x19e3
#define PCI_DEVICE_ID_INTEL_82840_HB 0x1a21
#define PCI_DEVICE_ID_INTEL_82845_HB 0x1a30
#define PCI_DEVICE_ID_INTEL_IOAT 0x1a38
@@ -2659,6 +2708,7 @@
#define PCI_DEVICE_ID_INTEL_PANTHERPOINT_XHCI 0x1e31
#define PCI_DEVICE_ID_INTEL_PANTHERPOINT_LPC_MIN 0x1e40
#define PCI_DEVICE_ID_INTEL_PANTHERPOINT_LPC_MAX 0x1e5f
+#define PCI_DEVICE_ID_INTEL_VMD_201D 0x201d
#define PCI_DEVICE_ID_INTEL_DH89XXCC_LPC_MIN 0x2310
#define PCI_DEVICE_ID_INTEL_DH89XXCC_LPC_MAX 0x231f
#define PCI_DEVICE_ID_INTEL_82801AA_0 0x2410
@@ -2708,12 +2758,6 @@
#define PCI_DEVICE_ID_INTEL_82801EB_11 0x24db
#define PCI_DEVICE_ID_INTEL_82801EB_12 0x24dc
#define PCI_DEVICE_ID_INTEL_82801EB_13 0x24dd
-#define PCI_DEVICE_ID_INTEL_ESB_1 0x25a1
-#define PCI_DEVICE_ID_INTEL_ESB_2 0x25a2
-#define PCI_DEVICE_ID_INTEL_ESB_4 0x25a4
-#define PCI_DEVICE_ID_INTEL_ESB_5 0x25a6
-#define PCI_DEVICE_ID_INTEL_ESB_9 0x25ab
-#define PCI_DEVICE_ID_INTEL_ESB_10 0x25ac
#define PCI_DEVICE_ID_INTEL_82820_HB 0x2500
#define PCI_DEVICE_ID_INTEL_82820_UP_HB 0x2501
#define PCI_DEVICE_ID_INTEL_82850_HB 0x2530
@@ -2728,14 +2772,15 @@
#define PCI_DEVICE_ID_INTEL_82915G_IG 0x2582
#define PCI_DEVICE_ID_INTEL_82915GM_HB 0x2590
#define PCI_DEVICE_ID_INTEL_82915GM_IG 0x2592
-#define PCI_DEVICE_ID_INTEL_5000_ERR 0x25F0
-#define PCI_DEVICE_ID_INTEL_5000_FBD0 0x25F5
-#define PCI_DEVICE_ID_INTEL_5000_FBD1 0x25F6
-#define PCI_DEVICE_ID_INTEL_82945G_HB 0x2770
-#define PCI_DEVICE_ID_INTEL_82945G_IG 0x2772
-#define PCI_DEVICE_ID_INTEL_3000_HB 0x2778
-#define PCI_DEVICE_ID_INTEL_82945GM_HB 0x27A0
-#define PCI_DEVICE_ID_INTEL_82945GM_IG 0x27A2
+#define PCI_DEVICE_ID_INTEL_ESB_1 0x25a1
+#define PCI_DEVICE_ID_INTEL_ESB_2 0x25a2
+#define PCI_DEVICE_ID_INTEL_ESB_4 0x25a4
+#define PCI_DEVICE_ID_INTEL_ESB_5 0x25a6
+#define PCI_DEVICE_ID_INTEL_ESB_9 0x25ab
+#define PCI_DEVICE_ID_INTEL_ESB_10 0x25ac
+#define PCI_DEVICE_ID_INTEL_5000_ERR 0x25f0
+#define PCI_DEVICE_ID_INTEL_5000_FBD0 0x25f5
+#define PCI_DEVICE_ID_INTEL_5000_FBD1 0x25f6
#define PCI_DEVICE_ID_INTEL_ICH6_0 0x2640
#define PCI_DEVICE_ID_INTEL_ICH6_1 0x2641
#define PCI_DEVICE_ID_INTEL_ICH6_2 0x2642
@@ -2747,6 +2792,11 @@
#define PCI_DEVICE_ID_INTEL_ESB2_14 0x2698
#define PCI_DEVICE_ID_INTEL_ESB2_17 0x269b
#define PCI_DEVICE_ID_INTEL_ESB2_18 0x269e
+#define PCI_DEVICE_ID_INTEL_82945G_HB 0x2770
+#define PCI_DEVICE_ID_INTEL_82945G_IG 0x2772
+#define PCI_DEVICE_ID_INTEL_3000_HB 0x2778
+#define PCI_DEVICE_ID_INTEL_82945GM_HB 0x27a0
+#define PCI_DEVICE_ID_INTEL_82945GM_IG 0x27a2
#define PCI_DEVICE_ID_INTEL_ICH7_0 0x27b8
#define PCI_DEVICE_ID_INTEL_ICH7_1 0x27b9
#define PCI_DEVICE_ID_INTEL_ICH7_30 0x27b0
@@ -2763,6 +2813,7 @@
#define PCI_DEVICE_ID_INTEL_ICH8_4 0x2815
#define PCI_DEVICE_ID_INTEL_ICH8_5 0x283e
#define PCI_DEVICE_ID_INTEL_ICH8_6 0x2850
+#define PCI_DEVICE_ID_INTEL_VMD_28C0 0x28c0
#define PCI_DEVICE_ID_INTEL_ICH9_0 0x2910
#define PCI_DEVICE_ID_INTEL_ICH9_1 0x2917
#define PCI_DEVICE_ID_INTEL_ICH9_2 0x2912
@@ -2798,7 +2849,7 @@
#define PCI_DEVICE_ID_INTEL_LYNNFIELD_QPI_PHY0 0x2c91
#define PCI_DEVICE_ID_INTEL_LYNNFIELD_MCR 0x2c98
#define PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_TAD 0x2c99
-#define PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_TEST 0x2c9C
+#define PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_TEST 0x2c9c
#define PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH0_CTRL 0x2ca0
#define PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH0_ADDR 0x2ca1
#define PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH0_RANK 0x2ca2
@@ -2863,6 +2914,8 @@
#define PCI_DEVICE_ID_INTEL_IOAT_JSF7 0x3717
#define PCI_DEVICE_ID_INTEL_IOAT_JSF8 0x3718
#define PCI_DEVICE_ID_INTEL_IOAT_JSF9 0x3719
+#define PCI_DEVICE_ID_INTEL_QAT_C62X 0x37c8
+#define PCI_DEVICE_ID_INTEL_QAT_C62X_VF 0x37c9
#define PCI_DEVICE_ID_INTEL_ICH10_0 0x3a14
#define PCI_DEVICE_ID_INTEL_ICH10_1 0x3a16
#define PCI_DEVICE_ID_INTEL_ICH10_2 0x3a18
@@ -2908,16 +2961,16 @@
#define PCI_DEVICE_ID_INTEL_SBRIDGE_BR 0x3cf5 /* 13.6 */
#define PCI_DEVICE_ID_INTEL_SBRIDGE_SAD1 0x3cf6 /* 12.7 */
#define PCI_DEVICE_ID_INTEL_IOAT_SNB 0x402f
-#define PCI_DEVICE_ID_INTEL_5100_16 0x65f0
-#define PCI_DEVICE_ID_INTEL_5100_19 0x65f3
-#define PCI_DEVICE_ID_INTEL_5100_21 0x65f5
-#define PCI_DEVICE_ID_INTEL_5100_22 0x65f6
#define PCI_DEVICE_ID_INTEL_5400_ERR 0x4030
#define PCI_DEVICE_ID_INTEL_5400_FBD0 0x4035
#define PCI_DEVICE_ID_INTEL_5400_FBD1 0x4036
-#define PCI_DEVICE_ID_INTEL_IOAT_SCNB 0x65ff
#define PCI_DEVICE_ID_INTEL_EP80579_0 0x5031
#define PCI_DEVICE_ID_INTEL_EP80579_1 0x5032
+#define PCI_DEVICE_ID_INTEL_5100_16 0x65f0
+#define PCI_DEVICE_ID_INTEL_5100_19 0x65f3
+#define PCI_DEVICE_ID_INTEL_5100_21 0x65f5
+#define PCI_DEVICE_ID_INTEL_5100_22 0x65f6
+#define PCI_DEVICE_ID_INTEL_IOAT_SCNB 0x65ff
#define PCI_DEVICE_ID_INTEL_82371SB_0 0x7000
#define PCI_DEVICE_ID_INTEL_82371SB_1 0x7010
#define PCI_DEVICE_ID_INTEL_82371SB_2 0x7020
@@ -2957,8 +3010,11 @@
#define PCI_DEVICE_ID_INTEL_84460GX 0x84ea
#define PCI_DEVICE_ID_INTEL_IXP4XX 0x8500
#define PCI_DEVICE_ID_INTEL_IXP2800 0x9004
+#define PCI_DEVICE_ID_INTEL_VMD_9A0B 0x9a0b
#define PCI_DEVICE_ID_INTEL_S21152BB 0xb152
+#define PCI_VENDOR_ID_WANGXUN 0x8088
+
#define PCI_VENDOR_ID_SCALEMP 0x8686
#define PCI_DEVICE_ID_SCALEMP_VSMP_CTL 0x1010
@@ -3039,6 +3095,8 @@
#define PCI_VENDOR_ID_3COM_2 0xa727
+#define PCI_VENDOR_ID_SOLIDRUN 0xd063
+
#define PCI_VENDOR_ID_DIGIUM 0xd161
#define PCI_DEVICE_ID_DIGIUM_HFC4S 0xb410
@@ -3060,4 +3118,6 @@
#define PCI_VENDOR_ID_OCZ 0x1b85
+#define PCI_VENDOR_ID_NCUBE 0x10ff
+
#endif /* _LINUX_PCI_IDS_H */
diff --git a/include/linux/pcieport_if.h b/include/linux/pcieport_if.h
deleted file mode 100644
index afcd130ab3a9..000000000000
--- a/include/linux/pcieport_if.h
+++ /dev/null
@@ -1,70 +0,0 @@
-/*
- * File: pcieport_if.h
- * Purpose: PCI Express Port Bus Driver's IF Data Structure
- *
- * Copyright (C) 2004 Intel
- * Copyright (C) Tom Long Nguyen (tom.l.nguyen@intel.com)
- */
-
-#ifndef _PCIEPORT_IF_H_
-#define _PCIEPORT_IF_H_
-
-/* Port Type */
-#define PCIE_ANY_PORT (~0)
-
-/* Service Type */
-#define PCIE_PORT_SERVICE_PME_SHIFT 0 /* Power Management Event */
-#define PCIE_PORT_SERVICE_PME (1 << PCIE_PORT_SERVICE_PME_SHIFT)
-#define PCIE_PORT_SERVICE_AER_SHIFT 1 /* Advanced Error Reporting */
-#define PCIE_PORT_SERVICE_AER (1 << PCIE_PORT_SERVICE_AER_SHIFT)
-#define PCIE_PORT_SERVICE_HP_SHIFT 2 /* Native Hotplug */
-#define PCIE_PORT_SERVICE_HP (1 << PCIE_PORT_SERVICE_HP_SHIFT)
-#define PCIE_PORT_SERVICE_VC_SHIFT 3 /* Virtual Channel */
-#define PCIE_PORT_SERVICE_VC (1 << PCIE_PORT_SERVICE_VC_SHIFT)
-#define PCIE_PORT_SERVICE_DPC_SHIFT 4 /* Downstream Port Containment */
-#define PCIE_PORT_SERVICE_DPC (1 << PCIE_PORT_SERVICE_DPC_SHIFT)
-
-struct pcie_device {
- int irq; /* Service IRQ/MSI/MSI-X Vector */
- struct pci_dev *port; /* Root/Upstream/Downstream Port */
- u32 service; /* Port service this device represents */
- void *priv_data; /* Service Private Data */
- struct device device; /* Generic Device Interface */
-};
-#define to_pcie_device(d) container_of(d, struct pcie_device, device)
-
-static inline void set_service_data(struct pcie_device *dev, void *data)
-{
- dev->priv_data = data;
-}
-
-static inline void* get_service_data(struct pcie_device *dev)
-{
- return dev->priv_data;
-}
-
-struct pcie_port_service_driver {
- const char *name;
- int (*probe) (struct pcie_device *dev);
- void (*remove) (struct pcie_device *dev);
- int (*suspend) (struct pcie_device *dev);
- int (*resume) (struct pcie_device *dev);
-
- /* Service Error Recovery Handler */
- const struct pci_error_handlers *err_handler;
-
- /* Link Reset Capability - AER service driver specific */
- pci_ers_result_t (*reset_link) (struct pci_dev *dev);
-
- int port_type; /* Type of the port this driver can handle */
- u32 service; /* Port service this device represents */
-
- struct device_driver driver;
-};
-#define to_service_driver(d) \
- container_of(d, struct pcie_port_service_driver, driver)
-
-int pcie_port_service_register(struct pcie_port_service_driver *new);
-void pcie_port_service_unregister(struct pcie_port_service_driver *new);
-
-#endif /* _PCIEPORT_IF_H_ */
diff --git a/include/linux/pcs-altera-tse.h b/include/linux/pcs-altera-tse.h
new file mode 100644
index 000000000000..92ab9f08e835
--- /dev/null
+++ b/include/linux/pcs-altera-tse.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2022 Bootlin
+ *
+ * Maxime Chevallier <maxime.chevallier@bootlin.com>
+ */
+
+#ifndef __LINUX_PCS_ALTERA_TSE_H
+#define __LINUX_PCS_ALTERA_TSE_H
+
+struct phylink_pcs;
+struct net_device;
+
+struct phylink_pcs *alt_tse_pcs_create(struct net_device *ndev,
+ void __iomem *pcs_base, int reg_width);
+
+#endif /* __LINUX_PCS_ALTERA_TSE_H */
diff --git a/include/linux/pcs-lynx.h b/include/linux/pcs-lynx.h
new file mode 100644
index 000000000000..5712cc2ce775
--- /dev/null
+++ b/include/linux/pcs-lynx.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
+/* Copyright 2020 NXP
+ * Lynx PCS helpers
+ */
+
+#ifndef __LINUX_PCS_LYNX_H
+#define __LINUX_PCS_LYNX_H
+
+#include <linux/mdio.h>
+#include <linux/phylink.h>
+
+struct mdio_device *lynx_get_mdio_device(struct phylink_pcs *pcs);
+
+struct phylink_pcs *lynx_pcs_create(struct mdio_device *mdio);
+
+void lynx_pcs_destroy(struct phylink_pcs *pcs);
+
+#endif /* __LINUX_PCS_LYNX_H */
diff --git a/include/linux/pcs-rzn1-miic.h b/include/linux/pcs-rzn1-miic.h
new file mode 100644
index 000000000000..56d12b21365d
--- /dev/null
+++ b/include/linux/pcs-rzn1-miic.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2022 Schneider Electric
+ *
+ * Clément Léger <clement.leger@bootlin.com>
+ */
+
+#ifndef __LINUX_PCS_MIIC_H
+#define __LINUX_PCS_MIIC_H
+
+struct phylink;
+struct device_node;
+
+struct phylink_pcs *miic_create(struct device *dev, struct device_node *np);
+
+void miic_destroy(struct phylink_pcs *pcs);
+
+#endif /* __LINUX_PCS_MIIC_H */
diff --git a/include/linux/pcs/pcs-mtk-lynxi.h b/include/linux/pcs/pcs-mtk-lynxi.h
new file mode 100644
index 000000000000..be3b4ab32f4a
--- /dev/null
+++ b/include/linux/pcs/pcs-mtk-lynxi.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __LINUX_PCS_MTK_LYNXI_H
+#define __LINUX_PCS_MTK_LYNXI_H
+
+#include <linux/phylink.h>
+#include <linux/regmap.h>
+
+#define MTK_SGMII_FLAG_PN_SWAP BIT(0)
+struct phylink_pcs *mtk_pcs_lynxi_create(struct device *dev,
+ struct regmap *regmap,
+ u32 ana_rgc3, u32 flags);
+void mtk_pcs_lynxi_destroy(struct phylink_pcs *pcs);
+#endif
diff --git a/include/linux/pcs/pcs-xpcs.h b/include/linux/pcs/pcs-xpcs.h
new file mode 100644
index 000000000000..d2da1e0b4a92
--- /dev/null
+++ b/include/linux/pcs/pcs-xpcs.h
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2020 Synopsys, Inc. and/or its affiliates.
+ * Synopsys DesignWare XPCS helpers
+ */
+
+#ifndef __LINUX_PCS_XPCS_H
+#define __LINUX_PCS_XPCS_H
+
+#include <linux/phy.h>
+#include <linux/phylink.h>
+
+#define NXP_SJA1105_XPCS_ID 0x00000010
+#define NXP_SJA1110_XPCS_ID 0x00000020
+
+/* AN mode */
+#define DW_AN_C73 1
+#define DW_AN_C37_SGMII 2
+#define DW_2500BASEX 3
+#define DW_AN_C37_1000BASEX 4
+
+struct xpcs_id;
+
+struct dw_xpcs {
+ struct mdio_device *mdiodev;
+ const struct xpcs_id *id;
+ struct phylink_pcs pcs;
+};
+
+int xpcs_get_an_mode(struct dw_xpcs *xpcs, phy_interface_t interface);
+void xpcs_link_up(struct phylink_pcs *pcs, unsigned int mode,
+ phy_interface_t interface, int speed, int duplex);
+int xpcs_do_config(struct dw_xpcs *xpcs, phy_interface_t interface,
+ unsigned int mode, const unsigned long *advertising);
+void xpcs_get_interfaces(struct dw_xpcs *xpcs, unsigned long *interfaces);
+int xpcs_config_eee(struct dw_xpcs *xpcs, int mult_fact_100ns,
+ int enable);
+struct dw_xpcs *xpcs_create(struct mdio_device *mdiodev,
+ phy_interface_t interface);
+void xpcs_destroy(struct dw_xpcs *xpcs);
+
+#endif /* __LINUX_PCS_XPCS_H */
diff --git a/include/linux/pda_power.h b/include/linux/pda_power.h
deleted file mode 100644
index 2bb62bf296ac..000000000000
--- a/include/linux/pda_power.h
+++ /dev/null
@@ -1,42 +0,0 @@
-/*
- * Common power driver for PDAs and phones with one or two external
- * power supplies (AC/USB) connected to main and backup batteries,
- * and optional builtin charger.
- *
- * Copyright © 2007 Anton Vorontsov <cbou@mail.ru>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#ifndef __PDA_POWER_H__
-#define __PDA_POWER_H__
-
-#define PDA_POWER_CHARGE_AC (1 << 0)
-#define PDA_POWER_CHARGE_USB (1 << 1)
-
-struct device;
-
-struct pda_power_pdata {
- int (*init)(struct device *dev);
- int (*is_ac_online)(void);
- int (*is_usb_online)(void);
- void (*set_charge)(int flags);
- void (*exit)(struct device *dev);
- int (*suspend)(pm_message_t state);
- int (*resume)(void);
-
- char **supplied_to;
- size_t num_supplicants;
-
- unsigned int wait_for_status; /* msecs, default is 500 */
- unsigned int wait_for_charger; /* msecs, default is 500 */
- unsigned int polling_interval; /* msecs, default is 2000 */
-
- unsigned long ac_max_uA; /* current to draw when on AC */
-
- bool use_otg_notifier;
-};
-
-#endif /* __PDA_POWER_H__ */
diff --git a/include/linux/pds/pds_adminq.h b/include/linux/pds/pds_adminq.h
new file mode 100644
index 000000000000..98a60ce87b92
--- /dev/null
+++ b/include/linux/pds/pds_adminq.h
@@ -0,0 +1,647 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2023 Advanced Micro Devices, Inc */
+
+#ifndef _PDS_CORE_ADMINQ_H_
+#define _PDS_CORE_ADMINQ_H_
+
+#define PDSC_ADMINQ_MAX_POLL_INTERVAL 256
+
+enum pds_core_adminq_flags {
+ PDS_AQ_FLAG_FASTPOLL = BIT(1), /* completion poll at 1ms */
+};
+
+/*
+ * enum pds_core_adminq_opcode - AdminQ command opcodes
+ * These commands are only processed on AdminQ, not available in devcmd
+ */
+enum pds_core_adminq_opcode {
+ PDS_AQ_CMD_NOP = 0,
+
+ /* Client control */
+ PDS_AQ_CMD_CLIENT_REG = 6,
+ PDS_AQ_CMD_CLIENT_UNREG = 7,
+ PDS_AQ_CMD_CLIENT_CMD = 8,
+
+ /* LIF commands */
+ PDS_AQ_CMD_LIF_IDENTIFY = 20,
+ PDS_AQ_CMD_LIF_INIT = 21,
+ PDS_AQ_CMD_LIF_RESET = 22,
+ PDS_AQ_CMD_LIF_GETATTR = 23,
+ PDS_AQ_CMD_LIF_SETATTR = 24,
+ PDS_AQ_CMD_LIF_SETPHC = 25,
+
+ PDS_AQ_CMD_RX_MODE_SET = 30,
+ PDS_AQ_CMD_RX_FILTER_ADD = 31,
+ PDS_AQ_CMD_RX_FILTER_DEL = 32,
+
+ /* Queue commands */
+ PDS_AQ_CMD_Q_IDENTIFY = 39,
+ PDS_AQ_CMD_Q_INIT = 40,
+ PDS_AQ_CMD_Q_CONTROL = 41,
+
+ /* SR/IOV commands */
+ PDS_AQ_CMD_VF_GETATTR = 60,
+ PDS_AQ_CMD_VF_SETATTR = 61,
+};
+
+/*
+ * enum pds_core_notifyq_opcode - NotifyQ event codes
+ */
+enum pds_core_notifyq_opcode {
+ PDS_EVENT_LINK_CHANGE = 1,
+ PDS_EVENT_RESET = 2,
+ PDS_EVENT_XCVR = 5,
+ PDS_EVENT_CLIENT = 6,
+};
+
+#define PDS_COMP_COLOR_MASK 0x80
+
+/**
+ * struct pds_core_notifyq_event - Generic event reporting structure
+ * @eid: event number
+ * @ecode: event code
+ *
+ * This is the generic event report struct from which the other
+ * actual events will be formed.
+ */
+struct pds_core_notifyq_event {
+ __le64 eid;
+ __le16 ecode;
+};
+
+/**
+ * struct pds_core_link_change_event - Link change event notification
+ * @eid: event number
+ * @ecode: event code = PDS_EVENT_LINK_CHANGE
+ * @link_status: link up/down, with error bits
+ * @link_speed: speed of the network link
+ *
+ * Sent when the network link state changes between UP and DOWN
+ */
+struct pds_core_link_change_event {
+ __le64 eid;
+ __le16 ecode;
+ __le16 link_status;
+ __le32 link_speed; /* units of 1Mbps: e.g. 10000 = 10Gbps */
+};
+
+/**
+ * struct pds_core_reset_event - Reset event notification
+ * @eid: event number
+ * @ecode: event code = PDS_EVENT_RESET
+ * @reset_code: reset type
+ * @state: 0=pending, 1=complete, 2=error
+ *
+ * Sent when the NIC or some subsystem is going to be or
+ * has been reset.
+ */
+struct pds_core_reset_event {
+ __le64 eid;
+ __le16 ecode;
+ u8 reset_code;
+ u8 state;
+};
+
+/**
+ * struct pds_core_client_event - Client event notification
+ * @eid: event number
+ * @ecode: event code = PDS_EVENT_CLIENT
+ * @client_id: client to sent event to
+ * @client_event: wrapped event struct for the client
+ *
+ * Sent when an event needs to be passed on to a client
+ */
+struct pds_core_client_event {
+ __le64 eid;
+ __le16 ecode;
+ __le16 client_id;
+ u8 client_event[54];
+};
+
+/**
+ * struct pds_core_notifyq_cmd - Placeholder for building qcq
+ * @data: anonymous field for building the qcq
+ */
+struct pds_core_notifyq_cmd {
+ __le32 data; /* Not used but needed for qcq structure */
+};
+
+/*
+ * union pds_core_notifyq_comp - Overlay of notifyq event structures
+ */
+union pds_core_notifyq_comp {
+ struct {
+ __le64 eid;
+ __le16 ecode;
+ };
+ struct pds_core_notifyq_event event;
+ struct pds_core_link_change_event link_change;
+ struct pds_core_reset_event reset;
+ u8 data[64];
+};
+
+#define PDS_DEVNAME_LEN 32
+/**
+ * struct pds_core_client_reg_cmd - Register a new client with DSC
+ * @opcode: opcode PDS_AQ_CMD_CLIENT_REG
+ * @rsvd: word boundary padding
+ * @devname: text name of client device
+ * @vif_type: what type of device (enum pds_core_vif_types)
+ *
+ * Tell the DSC of the new client, and receive a client_id from DSC.
+ */
+struct pds_core_client_reg_cmd {
+ u8 opcode;
+ u8 rsvd[3];
+ char devname[PDS_DEVNAME_LEN];
+ u8 vif_type;
+};
+
+/**
+ * struct pds_core_client_reg_comp - Client registration completion
+ * @status: Status of the command (enum pdc_core_status_code)
+ * @rsvd: Word boundary padding
+ * @comp_index: Index in the descriptor ring for which this is the completion
+ * @client_id: New id assigned by DSC
+ * @rsvd1: Word boundary padding
+ * @color: Color bit
+ */
+struct pds_core_client_reg_comp {
+ u8 status;
+ u8 rsvd;
+ __le16 comp_index;
+ __le16 client_id;
+ u8 rsvd1[9];
+ u8 color;
+};
+
+/**
+ * struct pds_core_client_unreg_cmd - Unregister a client from DSC
+ * @opcode: opcode PDS_AQ_CMD_CLIENT_UNREG
+ * @rsvd: word boundary padding
+ * @client_id: id of client being removed
+ *
+ * Tell the DSC this client is going away and remove its context
+ * This uses the generic completion.
+ */
+struct pds_core_client_unreg_cmd {
+ u8 opcode;
+ u8 rsvd;
+ __le16 client_id;
+};
+
+/**
+ * struct pds_core_client_request_cmd - Pass along a wrapped client AdminQ cmd
+ * @opcode: opcode PDS_AQ_CMD_CLIENT_CMD
+ * @rsvd: word boundary padding
+ * @client_id: id of client being removed
+ * @client_cmd: the wrapped client command
+ *
+ * Proxy post an adminq command for the client.
+ * This uses the generic completion.
+ */
+struct pds_core_client_request_cmd {
+ u8 opcode;
+ u8 rsvd;
+ __le16 client_id;
+ u8 client_cmd[60];
+};
+
+#define PDS_CORE_MAX_FRAGS 16
+
+#define PDS_CORE_QCQ_F_INITED BIT(0)
+#define PDS_CORE_QCQ_F_SG BIT(1)
+#define PDS_CORE_QCQ_F_INTR BIT(2)
+#define PDS_CORE_QCQ_F_TX_STATS BIT(3)
+#define PDS_CORE_QCQ_F_RX_STATS BIT(4)
+#define PDS_CORE_QCQ_F_NOTIFYQ BIT(5)
+#define PDS_CORE_QCQ_F_CMB_RINGS BIT(6)
+#define PDS_CORE_QCQ_F_CORE BIT(7)
+
+enum pds_core_lif_type {
+ PDS_CORE_LIF_TYPE_DEFAULT = 0,
+};
+
+/**
+ * union pds_core_lif_config - LIF configuration
+ * @state: LIF state (enum pds_core_lif_state)
+ * @rsvd: Word boundary padding
+ * @name: LIF name
+ * @rsvd2: Word boundary padding
+ * @features: LIF features active (enum pds_core_hw_features)
+ * @queue_count: Queue counts per queue-type
+ * @words: Full union buffer size
+ */
+union pds_core_lif_config {
+ struct {
+ u8 state;
+ u8 rsvd[3];
+ char name[PDS_CORE_IFNAMSIZ];
+ u8 rsvd2[12];
+ __le64 features;
+ __le32 queue_count[PDS_CORE_QTYPE_MAX];
+ } __packed;
+ __le32 words[64];
+};
+
+/**
+ * struct pds_core_lif_status - LIF status register
+ * @eid: most recent NotifyQ event id
+ * @rsvd: full struct size
+ */
+struct pds_core_lif_status {
+ __le64 eid;
+ u8 rsvd[56];
+};
+
+/**
+ * struct pds_core_lif_info - LIF info structure
+ * @config: LIF configuration structure
+ * @status: LIF status structure
+ */
+struct pds_core_lif_info {
+ union pds_core_lif_config config;
+ struct pds_core_lif_status status;
+};
+
+/**
+ * struct pds_core_lif_identity - LIF identity information (type-specific)
+ * @features: LIF features (see enum pds_core_hw_features)
+ * @version: Identify structure version
+ * @hw_index: LIF hardware index
+ * @rsvd: Word boundary padding
+ * @max_nb_sessions: Maximum number of sessions supported
+ * @rsvd2: buffer padding
+ * @config: LIF config struct with features, q counts
+ */
+struct pds_core_lif_identity {
+ __le64 features;
+ u8 version;
+ u8 hw_index;
+ u8 rsvd[2];
+ __le32 max_nb_sessions;
+ u8 rsvd2[120];
+ union pds_core_lif_config config;
+};
+
+/**
+ * struct pds_core_lif_identify_cmd - Get LIF identity info command
+ * @opcode: Opcode PDS_AQ_CMD_LIF_IDENTIFY
+ * @type: LIF type (enum pds_core_lif_type)
+ * @client_id: Client identifier
+ * @ver: Version of identify returned by device
+ * @rsvd: Word boundary padding
+ * @ident_pa: DMA address to receive identity info
+ *
+ * Firmware will copy LIF identity data (struct pds_core_lif_identity)
+ * into the buffer address given.
+ */
+struct pds_core_lif_identify_cmd {
+ u8 opcode;
+ u8 type;
+ __le16 client_id;
+ u8 ver;
+ u8 rsvd[3];
+ __le64 ident_pa;
+};
+
+/**
+ * struct pds_core_lif_identify_comp - LIF identify command completion
+ * @status: Status of the command (enum pds_core_status_code)
+ * @ver: Version of identify returned by device
+ * @bytes: Bytes copied into the buffer
+ * @rsvd: Word boundary padding
+ * @color: Color bit
+ */
+struct pds_core_lif_identify_comp {
+ u8 status;
+ u8 ver;
+ __le16 bytes;
+ u8 rsvd[11];
+ u8 color;
+};
+
+/**
+ * struct pds_core_lif_init_cmd - LIF init command
+ * @opcode: Opcode PDS_AQ_CMD_LIF_INIT
+ * @type: LIF type (enum pds_core_lif_type)
+ * @client_id: Client identifier
+ * @rsvd: Word boundary padding
+ * @info_pa: Destination address for LIF info (struct pds_core_lif_info)
+ */
+struct pds_core_lif_init_cmd {
+ u8 opcode;
+ u8 type;
+ __le16 client_id;
+ __le32 rsvd;
+ __le64 info_pa;
+};
+
+/**
+ * struct pds_core_lif_init_comp - LIF init command completion
+ * @status: Status of the command (enum pds_core_status_code)
+ * @rsvd: Word boundary padding
+ * @hw_index: Hardware index of the initialized LIF
+ * @rsvd1: Word boundary padding
+ * @color: Color bit
+ */
+struct pds_core_lif_init_comp {
+ u8 status;
+ u8 rsvd;
+ __le16 hw_index;
+ u8 rsvd1[11];
+ u8 color;
+};
+
+/**
+ * struct pds_core_lif_reset_cmd - LIF reset command
+ * Will reset only the specified LIF.
+ * @opcode: Opcode PDS_AQ_CMD_LIF_RESET
+ * @rsvd: Word boundary padding
+ * @client_id: Client identifier
+ */
+struct pds_core_lif_reset_cmd {
+ u8 opcode;
+ u8 rsvd;
+ __le16 client_id;
+};
+
+/**
+ * enum pds_core_lif_attr - List of LIF attributes
+ * @PDS_CORE_LIF_ATTR_STATE: LIF state attribute
+ * @PDS_CORE_LIF_ATTR_NAME: LIF name attribute
+ * @PDS_CORE_LIF_ATTR_FEATURES: LIF features attribute
+ * @PDS_CORE_LIF_ATTR_STATS_CTRL: LIF statistics control attribute
+ */
+enum pds_core_lif_attr {
+ PDS_CORE_LIF_ATTR_STATE = 0,
+ PDS_CORE_LIF_ATTR_NAME = 1,
+ PDS_CORE_LIF_ATTR_FEATURES = 4,
+ PDS_CORE_LIF_ATTR_STATS_CTRL = 6,
+};
+
+/**
+ * struct pds_core_lif_setattr_cmd - Set LIF attributes on the NIC
+ * @opcode: Opcode PDS_AQ_CMD_LIF_SETATTR
+ * @attr: Attribute type (enum pds_core_lif_attr)
+ * @client_id: Client identifier
+ * @state: LIF state (enum pds_core_lif_state)
+ * @name: The name string, 0 terminated
+ * @features: Features (enum pds_core_hw_features)
+ * @stats_ctl: Stats control commands (enum pds_core_stats_ctl_cmd)
+ * @rsvd: Command Buffer padding
+ */
+struct pds_core_lif_setattr_cmd {
+ u8 opcode;
+ u8 attr;
+ __le16 client_id;
+ union {
+ u8 state;
+ char name[PDS_CORE_IFNAMSIZ];
+ __le64 features;
+ u8 stats_ctl;
+ u8 rsvd[60];
+ } __packed;
+};
+
+/**
+ * struct pds_core_lif_setattr_comp - LIF set attr command completion
+ * @status: Status of the command (enum pds_core_status_code)
+ * @rsvd: Word boundary padding
+ * @comp_index: Index in the descriptor ring for which this is the completion
+ * @features: Features (enum pds_core_hw_features)
+ * @rsvd2: Word boundary padding
+ * @color: Color bit
+ */
+struct pds_core_lif_setattr_comp {
+ u8 status;
+ u8 rsvd;
+ __le16 comp_index;
+ union {
+ __le64 features;
+ u8 rsvd2[11];
+ } __packed;
+ u8 color;
+};
+
+/**
+ * struct pds_core_lif_getattr_cmd - Get LIF attributes from the NIC
+ * @opcode: Opcode PDS_AQ_CMD_LIF_GETATTR
+ * @attr: Attribute type (enum pds_core_lif_attr)
+ * @client_id: Client identifier
+ */
+struct pds_core_lif_getattr_cmd {
+ u8 opcode;
+ u8 attr;
+ __le16 client_id;
+};
+
+/**
+ * struct pds_core_lif_getattr_comp - LIF get attr command completion
+ * @status: Status of the command (enum pds_core_status_code)
+ * @rsvd: Word boundary padding
+ * @comp_index: Index in the descriptor ring for which this is the completion
+ * @state: LIF state (enum pds_core_lif_state)
+ * @name: LIF name string, 0 terminated
+ * @features: Features (enum pds_core_hw_features)
+ * @rsvd2: Word boundary padding
+ * @color: Color bit
+ */
+struct pds_core_lif_getattr_comp {
+ u8 status;
+ u8 rsvd;
+ __le16 comp_index;
+ union {
+ u8 state;
+ __le64 features;
+ u8 rsvd2[11];
+ } __packed;
+ u8 color;
+};
+
+/**
+ * union pds_core_q_identity - Queue identity information
+ * @version: Queue type version that can be used with FW
+ * @supported: Bitfield of queue versions, first bit = ver 0
+ * @rsvd: Word boundary padding
+ * @features: Queue features
+ * @desc_sz: Descriptor size
+ * @comp_sz: Completion descriptor size
+ * @rsvd2: Word boundary padding
+ */
+struct pds_core_q_identity {
+ u8 version;
+ u8 supported;
+ u8 rsvd[6];
+#define PDS_CORE_QIDENT_F_CQ 0x01 /* queue has completion ring */
+ __le64 features;
+ __le16 desc_sz;
+ __le16 comp_sz;
+ u8 rsvd2[6];
+};
+
+/**
+ * struct pds_core_q_identify_cmd - queue identify command
+ * @opcode: Opcode PDS_AQ_CMD_Q_IDENTIFY
+ * @type: Logical queue type (enum pds_core_logical_qtype)
+ * @client_id: Client identifier
+ * @ver: Highest queue type version that the driver supports
+ * @rsvd: Word boundary padding
+ * @ident_pa: DMA address to receive the data (struct pds_core_q_identity)
+ */
+struct pds_core_q_identify_cmd {
+ u8 opcode;
+ u8 type;
+ __le16 client_id;
+ u8 ver;
+ u8 rsvd[3];
+ __le64 ident_pa;
+};
+
+/**
+ * struct pds_core_q_identify_comp - queue identify command completion
+ * @status: Status of the command (enum pds_core_status_code)
+ * @rsvd: Word boundary padding
+ * @comp_index: Index in the descriptor ring for which this is the completion
+ * @ver: Queue type version that can be used with FW
+ * @rsvd1: Word boundary padding
+ * @color: Color bit
+ */
+struct pds_core_q_identify_comp {
+ u8 status;
+ u8 rsvd;
+ __le16 comp_index;
+ u8 ver;
+ u8 rsvd1[10];
+ u8 color;
+};
+
+/**
+ * struct pds_core_q_init_cmd - Queue init command
+ * @opcode: Opcode PDS_AQ_CMD_Q_INIT
+ * @type: Logical queue type
+ * @client_id: Client identifier
+ * @ver: Queue type version
+ * @rsvd: Word boundary padding
+ * @index: (LIF, qtype) relative admin queue index
+ * @intr_index: Interrupt control register index, or Event queue index
+ * @pid: Process ID
+ * @flags:
+ * IRQ: Interrupt requested on completion
+ * ENA: Enable the queue. If ENA=0 the queue is initialized
+ * but remains disabled, to be later enabled with the
+ * Queue Enable command. If ENA=1, then queue is
+ * initialized and then enabled.
+ * @cos: Class of service for this queue
+ * @ring_size: Queue ring size, encoded as a log2(size), in
+ * number of descriptors. The actual ring size is
+ * (1 << ring_size). For example, to select a ring size
+ * of 64 descriptors write ring_size = 6. The minimum
+ * ring_size value is 2 for a ring of 4 descriptors.
+ * The maximum ring_size value is 12 for a ring of 4k
+ * descriptors. Values of ring_size <2 and >12 are
+ * reserved.
+ * @ring_base: Queue ring base address
+ * @cq_ring_base: Completion queue ring base address
+ */
+struct pds_core_q_init_cmd {
+ u8 opcode;
+ u8 type;
+ __le16 client_id;
+ u8 ver;
+ u8 rsvd[3];
+ __le32 index;
+ __le16 pid;
+ __le16 intr_index;
+ __le16 flags;
+#define PDS_CORE_QINIT_F_IRQ 0x01 /* Request interrupt on completion */
+#define PDS_CORE_QINIT_F_ENA 0x02 /* Enable the queue */
+ u8 cos;
+#define PDS_CORE_QSIZE_MIN_LG2 2
+#define PDS_CORE_QSIZE_MAX_LG2 12
+ u8 ring_size;
+ __le64 ring_base;
+ __le64 cq_ring_base;
+} __packed;
+
+/**
+ * struct pds_core_q_init_comp - Queue init command completion
+ * @status: Status of the command (enum pds_core_status_code)
+ * @rsvd: Word boundary padding
+ * @comp_index: Index in the descriptor ring for which this is the completion
+ * @hw_index: Hardware Queue ID
+ * @hw_type: Hardware Queue type
+ * @rsvd2: Word boundary padding
+ * @color: Color
+ */
+struct pds_core_q_init_comp {
+ u8 status;
+ u8 rsvd;
+ __le16 comp_index;
+ __le32 hw_index;
+ u8 hw_type;
+ u8 rsvd2[6];
+ u8 color;
+};
+
+union pds_core_adminq_cmd {
+ u8 opcode;
+ u8 bytes[64];
+
+ struct pds_core_client_reg_cmd client_reg;
+ struct pds_core_client_unreg_cmd client_unreg;
+ struct pds_core_client_request_cmd client_request;
+
+ struct pds_core_lif_identify_cmd lif_ident;
+ struct pds_core_lif_init_cmd lif_init;
+ struct pds_core_lif_reset_cmd lif_reset;
+ struct pds_core_lif_setattr_cmd lif_setattr;
+ struct pds_core_lif_getattr_cmd lif_getattr;
+
+ struct pds_core_q_identify_cmd q_ident;
+ struct pds_core_q_init_cmd q_init;
+};
+
+union pds_core_adminq_comp {
+ struct {
+ u8 status;
+ u8 rsvd;
+ __le16 comp_index;
+ u8 rsvd2[11];
+ u8 color;
+ };
+ u32 words[4];
+
+ struct pds_core_client_reg_comp client_reg;
+
+ struct pds_core_lif_identify_comp lif_ident;
+ struct pds_core_lif_init_comp lif_init;
+ struct pds_core_lif_setattr_comp lif_setattr;
+ struct pds_core_lif_getattr_comp lif_getattr;
+
+ struct pds_core_q_identify_comp q_ident;
+ struct pds_core_q_init_comp q_init;
+};
+
+#ifndef __CHECKER__
+static_assert(sizeof(union pds_core_adminq_cmd) == 64);
+static_assert(sizeof(union pds_core_adminq_comp) == 16);
+static_assert(sizeof(union pds_core_notifyq_comp) == 64);
+#endif /* __CHECKER__ */
+
+/* The color bit is a 'done' bit for the completion descriptors
+ * where the meaning alternates between '1' and '0' for alternating
+ * passes through the completion descriptor ring.
+ */
+static inline bool pdsc_color_match(u8 color, bool done_color)
+{
+ return (!!(color & PDS_COMP_COLOR_MASK)) == done_color;
+}
+
+struct pdsc;
+int pdsc_adminq_post(struct pdsc *pdsc,
+ union pds_core_adminq_cmd *cmd,
+ union pds_core_adminq_comp *comp,
+ bool fast_poll);
+
+#endif /* _PDS_CORE_ADMINQ_H_ */
diff --git a/include/linux/pds/pds_auxbus.h b/include/linux/pds/pds_auxbus.h
new file mode 100644
index 000000000000..214ef12302d0
--- /dev/null
+++ b/include/linux/pds/pds_auxbus.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2023 Advanced Micro Devices, Inc */
+
+#ifndef _PDSC_AUXBUS_H_
+#define _PDSC_AUXBUS_H_
+
+#include <linux/auxiliary_bus.h>
+
+struct pds_auxiliary_dev {
+ struct auxiliary_device aux_dev;
+ struct pci_dev *vf_pdev;
+ u16 client_id;
+};
+
+int pds_client_adminq_cmd(struct pds_auxiliary_dev *padev,
+ union pds_core_adminq_cmd *req,
+ size_t req_len,
+ union pds_core_adminq_comp *resp,
+ u64 flags);
+#endif /* _PDSC_AUXBUS_H_ */
diff --git a/include/linux/pds/pds_common.h b/include/linux/pds/pds_common.h
new file mode 100644
index 000000000000..060331486d50
--- /dev/null
+++ b/include/linux/pds/pds_common.h
@@ -0,0 +1,68 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR Linux-OpenIB) OR BSD-2-Clause */
+/* Copyright(c) 2023 Advanced Micro Devices, Inc. */
+
+#ifndef _PDS_COMMON_H_
+#define _PDS_COMMON_H_
+
+#define PDS_CORE_DRV_NAME "pds_core"
+
+/* the device's internal addressing uses up to 52 bits */
+#define PDS_CORE_ADDR_LEN 52
+#define PDS_CORE_ADDR_MASK (BIT_ULL(PDS_ADDR_LEN) - 1)
+#define PDS_PAGE_SIZE 4096
+
+enum pds_core_driver_type {
+ PDS_DRIVER_LINUX = 1,
+ PDS_DRIVER_WIN = 2,
+ PDS_DRIVER_DPDK = 3,
+ PDS_DRIVER_FREEBSD = 4,
+ PDS_DRIVER_IPXE = 5,
+ PDS_DRIVER_ESXI = 6,
+};
+
+enum pds_core_vif_types {
+ PDS_DEV_TYPE_CORE = 0,
+ PDS_DEV_TYPE_VDPA = 1,
+ PDS_DEV_TYPE_VFIO = 2,
+ PDS_DEV_TYPE_ETH = 3,
+ PDS_DEV_TYPE_RDMA = 4,
+ PDS_DEV_TYPE_LM = 5,
+
+ /* new ones added before this line */
+ PDS_DEV_TYPE_MAX = 16 /* don't change - used in struct size */
+};
+
+#define PDS_DEV_TYPE_CORE_STR "Core"
+#define PDS_DEV_TYPE_VDPA_STR "vDPA"
+#define PDS_DEV_TYPE_VFIO_STR "VFio"
+#define PDS_DEV_TYPE_ETH_STR "Eth"
+#define PDS_DEV_TYPE_RDMA_STR "RDMA"
+#define PDS_DEV_TYPE_LM_STR "LM"
+
+#define PDS_CORE_IFNAMSIZ 16
+
+/**
+ * enum pds_core_logical_qtype - Logical Queue Types
+ * @PDS_CORE_QTYPE_ADMINQ: Administrative Queue
+ * @PDS_CORE_QTYPE_NOTIFYQ: Notify Queue
+ * @PDS_CORE_QTYPE_RXQ: Receive Queue
+ * @PDS_CORE_QTYPE_TXQ: Transmit Queue
+ * @PDS_CORE_QTYPE_EQ: Event Queue
+ * @PDS_CORE_QTYPE_MAX: Max queue type supported
+ */
+enum pds_core_logical_qtype {
+ PDS_CORE_QTYPE_ADMINQ = 0,
+ PDS_CORE_QTYPE_NOTIFYQ = 1,
+ PDS_CORE_QTYPE_RXQ = 2,
+ PDS_CORE_QTYPE_TXQ = 3,
+ PDS_CORE_QTYPE_EQ = 4,
+
+ PDS_CORE_QTYPE_MAX = 16 /* don't change - used in struct size */
+};
+
+int pdsc_register_notify(struct notifier_block *nb);
+void pdsc_unregister_notify(struct notifier_block *nb);
+void *pdsc_get_pf_struct(struct pci_dev *vf_pdev);
+int pds_client_register(struct pci_dev *pf_pdev, char *devname);
+int pds_client_unregister(struct pci_dev *pf_pdev, u16 client_id);
+#endif /* _PDS_COMMON_H_ */
diff --git a/include/linux/pds/pds_core_if.h b/include/linux/pds/pds_core_if.h
new file mode 100644
index 000000000000..e838a2b90440
--- /dev/null
+++ b/include/linux/pds/pds_core_if.h
@@ -0,0 +1,571 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR Linux-OpenIB) OR BSD-2-Clause */
+/* Copyright(c) 2023 Advanced Micro Devices, Inc. */
+
+#ifndef _PDS_CORE_IF_H_
+#define _PDS_CORE_IF_H_
+
+#define PCI_VENDOR_ID_PENSANDO 0x1dd8
+#define PCI_DEVICE_ID_PENSANDO_CORE_PF 0x100c
+#define PCI_DEVICE_ID_VIRTIO_NET_TRANS 0x1000
+#define PCI_DEVICE_ID_PENSANDO_IONIC_ETH_VF 0x1003
+#define PCI_DEVICE_ID_PENSANDO_VDPA_VF 0x100b
+#define PDS_CORE_BARS_MAX 4
+#define PDS_CORE_PCI_BAR_DBELL 1
+
+/* Bar0 */
+#define PDS_CORE_DEV_INFO_SIGNATURE 0x44455649 /* 'DEVI' */
+#define PDS_CORE_BAR0_SIZE 0x8000
+#define PDS_CORE_BAR0_DEV_INFO_REGS_OFFSET 0x0000
+#define PDS_CORE_BAR0_DEV_CMD_REGS_OFFSET 0x0800
+#define PDS_CORE_BAR0_DEV_CMD_DATA_REGS_OFFSET 0x0c00
+#define PDS_CORE_BAR0_INTR_STATUS_OFFSET 0x1000
+#define PDS_CORE_BAR0_INTR_CTRL_OFFSET 0x2000
+#define PDS_CORE_DEV_CMD_DONE 0x00000001
+
+#define PDS_CORE_DEVCMD_TIMEOUT 5
+
+#define PDS_CORE_CLIENT_ID 0
+#define PDS_CORE_ASIC_TYPE_CAPRI 0
+
+/*
+ * enum pds_core_cmd_opcode - Device commands
+ */
+enum pds_core_cmd_opcode {
+ /* Core init */
+ PDS_CORE_CMD_NOP = 0,
+ PDS_CORE_CMD_IDENTIFY = 1,
+ PDS_CORE_CMD_RESET = 2,
+ PDS_CORE_CMD_INIT = 3,
+
+ PDS_CORE_CMD_FW_DOWNLOAD = 4,
+ PDS_CORE_CMD_FW_CONTROL = 5,
+
+ /* SR/IOV commands */
+ PDS_CORE_CMD_VF_GETATTR = 60,
+ PDS_CORE_CMD_VF_SETATTR = 61,
+ PDS_CORE_CMD_VF_CTRL = 62,
+
+ /* Add commands before this line */
+ PDS_CORE_CMD_MAX,
+ PDS_CORE_CMD_COUNT
+};
+
+/*
+ * enum pds_core_status_code - Device command return codes
+ */
+enum pds_core_status_code {
+ PDS_RC_SUCCESS = 0, /* Success */
+ PDS_RC_EVERSION = 1, /* Incorrect version for request */
+ PDS_RC_EOPCODE = 2, /* Invalid cmd opcode */
+ PDS_RC_EIO = 3, /* I/O error */
+ PDS_RC_EPERM = 4, /* Permission denied */
+ PDS_RC_EQID = 5, /* Bad qid */
+ PDS_RC_EQTYPE = 6, /* Bad qtype */
+ PDS_RC_ENOENT = 7, /* No such element */
+ PDS_RC_EINTR = 8, /* operation interrupted */
+ PDS_RC_EAGAIN = 9, /* Try again */
+ PDS_RC_ENOMEM = 10, /* Out of memory */
+ PDS_RC_EFAULT = 11, /* Bad address */
+ PDS_RC_EBUSY = 12, /* Device or resource busy */
+ PDS_RC_EEXIST = 13, /* object already exists */
+ PDS_RC_EINVAL = 14, /* Invalid argument */
+ PDS_RC_ENOSPC = 15, /* No space left or alloc failure */
+ PDS_RC_ERANGE = 16, /* Parameter out of range */
+ PDS_RC_BAD_ADDR = 17, /* Descriptor contains a bad ptr */
+ PDS_RC_DEV_CMD = 18, /* Device cmd attempted on AdminQ */
+ PDS_RC_ENOSUPP = 19, /* Operation not supported */
+ PDS_RC_ERROR = 29, /* Generic error */
+ PDS_RC_ERDMA = 30, /* Generic RDMA error */
+ PDS_RC_EVFID = 31, /* VF ID does not exist */
+ PDS_RC_BAD_FW = 32, /* FW file is invalid or corrupted */
+ PDS_RC_ECLIENT = 33, /* No such client id */
+};
+
+/**
+ * struct pds_core_drv_identity - Driver identity information
+ * @drv_type: Driver type (enum pds_core_driver_type)
+ * @os_dist: OS distribution, numeric format
+ * @os_dist_str: OS distribution, string format
+ * @kernel_ver: Kernel version, numeric format
+ * @kernel_ver_str: Kernel version, string format
+ * @driver_ver_str: Driver version, string format
+ */
+struct pds_core_drv_identity {
+ __le32 drv_type;
+ __le32 os_dist;
+ char os_dist_str[128];
+ __le32 kernel_ver;
+ char kernel_ver_str[32];
+ char driver_ver_str[32];
+};
+
+#define PDS_DEV_TYPE_MAX 16
+/**
+ * struct pds_core_dev_identity - Device identity information
+ * @version: Version of device identify
+ * @type: Identify type (0 for now)
+ * @state: Device state
+ * @rsvd: Word boundary padding
+ * @nlifs: Number of LIFs provisioned
+ * @nintrs: Number of interrupts provisioned
+ * @ndbpgs_per_lif: Number of doorbell pages per LIF
+ * @intr_coal_mult: Interrupt coalescing multiplication factor
+ * Scale user-supplied interrupt coalescing
+ * value in usecs to device units using:
+ * device units = usecs * mult / div
+ * @intr_coal_div: Interrupt coalescing division factor
+ * Scale user-supplied interrupt coalescing
+ * value in usecs to device units using:
+ * device units = usecs * mult / div
+ * @vif_types: How many of each VIF device type is supported
+ */
+struct pds_core_dev_identity {
+ u8 version;
+ u8 type;
+ u8 state;
+ u8 rsvd;
+ __le32 nlifs;
+ __le32 nintrs;
+ __le32 ndbpgs_per_lif;
+ __le32 intr_coal_mult;
+ __le32 intr_coal_div;
+ __le16 vif_types[PDS_DEV_TYPE_MAX];
+};
+
+#define PDS_CORE_IDENTITY_VERSION_1 1
+
+/**
+ * struct pds_core_dev_identify_cmd - Driver/device identify command
+ * @opcode: Opcode PDS_CORE_CMD_IDENTIFY
+ * @ver: Highest version of identify supported by driver
+ *
+ * Expects to find driver identification info (struct pds_core_drv_identity)
+ * in cmd_regs->data. Driver should keep the devcmd interface locked
+ * while preparing the driver info.
+ */
+struct pds_core_dev_identify_cmd {
+ u8 opcode;
+ u8 ver;
+};
+
+/**
+ * struct pds_core_dev_identify_comp - Device identify command completion
+ * @status: Status of the command (enum pds_core_status_code)
+ * @ver: Version of identify returned by device
+ *
+ * Device identification info (struct pds_core_dev_identity) can be found
+ * in cmd_regs->data. Driver should keep the devcmd interface locked
+ * while reading the results.
+ */
+struct pds_core_dev_identify_comp {
+ u8 status;
+ u8 ver;
+};
+
+/**
+ * struct pds_core_dev_reset_cmd - Device reset command
+ * @opcode: Opcode PDS_CORE_CMD_RESET
+ *
+ * Resets and clears all LIFs, VDevs, and VIFs on the device.
+ */
+struct pds_core_dev_reset_cmd {
+ u8 opcode;
+};
+
+/**
+ * struct pds_core_dev_reset_comp - Reset command completion
+ * @status: Status of the command (enum pds_core_status_code)
+ */
+struct pds_core_dev_reset_comp {
+ u8 status;
+};
+
+/*
+ * struct pds_core_dev_init_data - Pointers and info needed for the Core
+ * initialization PDS_CORE_CMD_INIT command. The in and out structs are
+ * overlays on the pds_core_dev_cmd_regs.data space for passing data down
+ * to the firmware on init, and then returning initialization results.
+ */
+struct pds_core_dev_init_data_in {
+ __le64 adminq_q_base;
+ __le64 adminq_cq_base;
+ __le64 notifyq_cq_base;
+ __le32 flags;
+ __le16 intr_index;
+ u8 adminq_ring_size;
+ u8 notifyq_ring_size;
+};
+
+struct pds_core_dev_init_data_out {
+ __le32 core_hw_index;
+ __le32 adminq_hw_index;
+ __le32 notifyq_hw_index;
+ u8 adminq_hw_type;
+ u8 notifyq_hw_type;
+};
+
+/**
+ * struct pds_core_dev_init_cmd - Core device initialize
+ * @opcode: opcode PDS_CORE_CMD_INIT
+ *
+ * Initializes the core device and sets up the AdminQ and NotifyQ.
+ * Expects to find initialization data (struct pds_core_dev_init_data_in)
+ * in cmd_regs->data. Driver should keep the devcmd interface locked
+ * while preparing the driver info.
+ */
+struct pds_core_dev_init_cmd {
+ u8 opcode;
+};
+
+/**
+ * struct pds_core_dev_init_comp - Core init completion
+ * @status: Status of the command (enum pds_core_status_code)
+ *
+ * Initialization result data (struct pds_core_dev_init_data_in)
+ * is found in cmd_regs->data.
+ */
+struct pds_core_dev_init_comp {
+ u8 status;
+};
+
+/**
+ * struct pds_core_fw_download_cmd - Firmware download command
+ * @opcode: opcode
+ * @rsvd: Word boundary padding
+ * @addr: DMA address of the firmware buffer
+ * @offset: offset of the firmware buffer within the full image
+ * @length: number of valid bytes in the firmware buffer
+ */
+struct pds_core_fw_download_cmd {
+ u8 opcode;
+ u8 rsvd[3];
+ __le32 offset;
+ __le64 addr;
+ __le32 length;
+};
+
+/**
+ * struct pds_core_fw_download_comp - Firmware download completion
+ * @status: Status of the command (enum pds_core_status_code)
+ */
+struct pds_core_fw_download_comp {
+ u8 status;
+};
+
+/**
+ * enum pds_core_fw_control_oper - FW control operations
+ * @PDS_CORE_FW_INSTALL_ASYNC: Install firmware asynchronously
+ * @PDS_CORE_FW_INSTALL_STATUS: Firmware installation status
+ * @PDS_CORE_FW_ACTIVATE_ASYNC: Activate firmware asynchronously
+ * @PDS_CORE_FW_ACTIVATE_STATUS: Firmware activate status
+ * @PDS_CORE_FW_UPDATE_CLEANUP: Cleanup any firmware update leftovers
+ * @PDS_CORE_FW_GET_BOOT: Return current active firmware slot
+ * @PDS_CORE_FW_SET_BOOT: Set active firmware slot for next boot
+ * @PDS_CORE_FW_GET_LIST: Return list of installed firmware images
+ */
+enum pds_core_fw_control_oper {
+ PDS_CORE_FW_INSTALL_ASYNC = 0,
+ PDS_CORE_FW_INSTALL_STATUS = 1,
+ PDS_CORE_FW_ACTIVATE_ASYNC = 2,
+ PDS_CORE_FW_ACTIVATE_STATUS = 3,
+ PDS_CORE_FW_UPDATE_CLEANUP = 4,
+ PDS_CORE_FW_GET_BOOT = 5,
+ PDS_CORE_FW_SET_BOOT = 6,
+ PDS_CORE_FW_GET_LIST = 7,
+};
+
+enum pds_core_fw_slot {
+ PDS_CORE_FW_SLOT_INVALID = 0,
+ PDS_CORE_FW_SLOT_A = 1,
+ PDS_CORE_FW_SLOT_B = 2,
+ PDS_CORE_FW_SLOT_GOLD = 3,
+};
+
+/**
+ * struct pds_core_fw_control_cmd - Firmware control command
+ * @opcode: opcode
+ * @rsvd: Word boundary padding
+ * @oper: firmware control operation (enum pds_core_fw_control_oper)
+ * @slot: slot to operate on (enum pds_core_fw_slot)
+ */
+struct pds_core_fw_control_cmd {
+ u8 opcode;
+ u8 rsvd[3];
+ u8 oper;
+ u8 slot;
+};
+
+/**
+ * struct pds_core_fw_control_comp - Firmware control copletion
+ * @status: Status of the command (enum pds_core_status_code)
+ * @rsvd: Word alignment space
+ * @slot: Slot number (enum pds_core_fw_slot)
+ * @rsvd1: Struct padding
+ * @color: Color bit
+ */
+struct pds_core_fw_control_comp {
+ u8 status;
+ u8 rsvd[3];
+ u8 slot;
+ u8 rsvd1[10];
+ u8 color;
+};
+
+struct pds_core_fw_name_info {
+#define PDS_CORE_FWSLOT_BUFLEN 8
+#define PDS_CORE_FWVERS_BUFLEN 32
+ char slotname[PDS_CORE_FWSLOT_BUFLEN];
+ char fw_version[PDS_CORE_FWVERS_BUFLEN];
+};
+
+struct pds_core_fw_list_info {
+#define PDS_CORE_FWVERS_LIST_LEN 16
+ u8 num_fw_slots;
+ struct pds_core_fw_name_info fw_names[PDS_CORE_FWVERS_LIST_LEN];
+} __packed;
+
+enum pds_core_vf_attr {
+ PDS_CORE_VF_ATTR_SPOOFCHK = 1,
+ PDS_CORE_VF_ATTR_TRUST = 2,
+ PDS_CORE_VF_ATTR_MAC = 3,
+ PDS_CORE_VF_ATTR_LINKSTATE = 4,
+ PDS_CORE_VF_ATTR_VLAN = 5,
+ PDS_CORE_VF_ATTR_RATE = 6,
+ PDS_CORE_VF_ATTR_STATSADDR = 7,
+};
+
+/**
+ * enum pds_core_vf_link_status - Virtual Function link status
+ * @PDS_CORE_VF_LINK_STATUS_AUTO: Use link state of the uplink
+ * @PDS_CORE_VF_LINK_STATUS_UP: Link always up
+ * @PDS_CORE_VF_LINK_STATUS_DOWN: Link always down
+ */
+enum pds_core_vf_link_status {
+ PDS_CORE_VF_LINK_STATUS_AUTO = 0,
+ PDS_CORE_VF_LINK_STATUS_UP = 1,
+ PDS_CORE_VF_LINK_STATUS_DOWN = 2,
+};
+
+/**
+ * struct pds_core_vf_setattr_cmd - Set VF attributes on the NIC
+ * @opcode: Opcode
+ * @attr: Attribute type (enum pds_core_vf_attr)
+ * @vf_index: VF index
+ * @macaddr: mac address
+ * @vlanid: vlan ID
+ * @maxrate: max Tx rate in Mbps
+ * @spoofchk: enable address spoof checking
+ * @trust: enable VF trust
+ * @linkstate: set link up or down
+ * @stats: stats addr struct
+ * @stats.pa: set DMA address for VF stats
+ * @stats.len: length of VF stats space
+ * @pad: force union to specific size
+ */
+struct pds_core_vf_setattr_cmd {
+ u8 opcode;
+ u8 attr;
+ __le16 vf_index;
+ union {
+ u8 macaddr[6];
+ __le16 vlanid;
+ __le32 maxrate;
+ u8 spoofchk;
+ u8 trust;
+ u8 linkstate;
+ struct {
+ __le64 pa;
+ __le32 len;
+ } stats;
+ u8 pad[60];
+ } __packed;
+};
+
+struct pds_core_vf_setattr_comp {
+ u8 status;
+ u8 attr;
+ __le16 vf_index;
+ __le16 comp_index;
+ u8 rsvd[9];
+ u8 color;
+};
+
+/**
+ * struct pds_core_vf_getattr_cmd - Get VF attributes from the NIC
+ * @opcode: Opcode
+ * @attr: Attribute type (enum pds_core_vf_attr)
+ * @vf_index: VF index
+ */
+struct pds_core_vf_getattr_cmd {
+ u8 opcode;
+ u8 attr;
+ __le16 vf_index;
+};
+
+struct pds_core_vf_getattr_comp {
+ u8 status;
+ u8 attr;
+ __le16 vf_index;
+ union {
+ u8 macaddr[6];
+ __le16 vlanid;
+ __le32 maxrate;
+ u8 spoofchk;
+ u8 trust;
+ u8 linkstate;
+ __le64 stats_pa;
+ u8 pad[11];
+ } __packed;
+ u8 color;
+};
+
+enum pds_core_vf_ctrl_opcode {
+ PDS_CORE_VF_CTRL_START_ALL = 0,
+ PDS_CORE_VF_CTRL_START = 1,
+};
+
+/**
+ * struct pds_core_vf_ctrl_cmd - VF control command
+ * @opcode: Opcode for the command
+ * @ctrl_opcode: VF control operation type
+ * @vf_index: VF Index. It is unused if op START_ALL is used.
+ */
+
+struct pds_core_vf_ctrl_cmd {
+ u8 opcode;
+ u8 ctrl_opcode;
+ __le16 vf_index;
+};
+
+/**
+ * struct pds_core_vf_ctrl_comp - VF_CTRL command completion.
+ * @status: Status of the command (enum pds_core_status_code)
+ */
+struct pds_core_vf_ctrl_comp {
+ u8 status;
+};
+
+/*
+ * union pds_core_dev_cmd - Overlay of core device command structures
+ */
+union pds_core_dev_cmd {
+ u8 opcode;
+ u32 words[16];
+
+ struct pds_core_dev_identify_cmd identify;
+ struct pds_core_dev_init_cmd init;
+ struct pds_core_dev_reset_cmd reset;
+ struct pds_core_fw_download_cmd fw_download;
+ struct pds_core_fw_control_cmd fw_control;
+
+ struct pds_core_vf_setattr_cmd vf_setattr;
+ struct pds_core_vf_getattr_cmd vf_getattr;
+ struct pds_core_vf_ctrl_cmd vf_ctrl;
+};
+
+/*
+ * union pds_core_dev_comp - Overlay of core device completion structures
+ */
+union pds_core_dev_comp {
+ u8 status;
+ u8 bytes[16];
+
+ struct pds_core_dev_identify_comp identify;
+ struct pds_core_dev_reset_comp reset;
+ struct pds_core_dev_init_comp init;
+ struct pds_core_fw_download_comp fw_download;
+ struct pds_core_fw_control_comp fw_control;
+
+ struct pds_core_vf_setattr_comp vf_setattr;
+ struct pds_core_vf_getattr_comp vf_getattr;
+ struct pds_core_vf_ctrl_comp vf_ctrl;
+};
+
+/**
+ * struct pds_core_dev_hwstamp_regs - Hardware current timestamp registers
+ * @tick_low: Low 32 bits of hardware timestamp
+ * @tick_high: High 32 bits of hardware timestamp
+ */
+struct pds_core_dev_hwstamp_regs {
+ u32 tick_low;
+ u32 tick_high;
+};
+
+/**
+ * struct pds_core_dev_info_regs - Device info register format (read-only)
+ * @signature: Signature value of 0x44455649 ('DEVI')
+ * @version: Current version of info
+ * @asic_type: Asic type
+ * @asic_rev: Asic revision
+ * @fw_status: Firmware status
+ * bit 0 - 1 = fw running
+ * bit 4-7 - 4 bit generation number, changes on fw restart
+ * @fw_heartbeat: Firmware heartbeat counter
+ * @serial_num: Serial number
+ * @fw_version: Firmware version
+ * @oprom_regs: oprom_regs to store oprom debug enable/disable and bmp
+ * @rsvd_pad1024: Struct padding
+ * @hwstamp: Hardware current timestamp registers
+ * @rsvd_pad2048: Struct padding
+ */
+struct pds_core_dev_info_regs {
+#define PDS_CORE_DEVINFO_FWVERS_BUFLEN 32
+#define PDS_CORE_DEVINFO_SERIAL_BUFLEN 32
+ u32 signature;
+ u8 version;
+ u8 asic_type;
+ u8 asic_rev;
+#define PDS_CORE_FW_STS_F_STOPPED 0x00
+#define PDS_CORE_FW_STS_F_RUNNING 0x01
+#define PDS_CORE_FW_STS_F_GENERATION 0xF0
+ u8 fw_status;
+ __le32 fw_heartbeat;
+ char fw_version[PDS_CORE_DEVINFO_FWVERS_BUFLEN];
+ char serial_num[PDS_CORE_DEVINFO_SERIAL_BUFLEN];
+ u8 oprom_regs[32]; /* reserved */
+ u8 rsvd_pad1024[916];
+ struct pds_core_dev_hwstamp_regs hwstamp; /* on 1k boundary */
+ u8 rsvd_pad2048[1016];
+} __packed;
+
+/**
+ * struct pds_core_dev_cmd_regs - Device command register format (read-write)
+ * @doorbell: Device Cmd Doorbell, write-only
+ * Write a 1 to signal device to process cmd
+ * @done: Command completed indicator, poll for completion
+ * bit 0 == 1 when command is complete
+ * @cmd: Opcode-specific command bytes
+ * @comp: Opcode-specific response bytes
+ * @rsvd: Struct padding
+ * @data: Opcode-specific side-data
+ */
+struct pds_core_dev_cmd_regs {
+ u32 doorbell;
+ u32 done;
+ union pds_core_dev_cmd cmd;
+ union pds_core_dev_comp comp;
+ u8 rsvd[48];
+ u32 data[478];
+} __packed;
+
+/**
+ * struct pds_core_dev_regs - Device register format for bar 0 page 0
+ * @info: Device info registers
+ * @devcmd: Device command registers
+ */
+struct pds_core_dev_regs {
+ struct pds_core_dev_info_regs info;
+ struct pds_core_dev_cmd_regs devcmd;
+} __packed;
+
+#ifndef __CHECKER__
+static_assert(sizeof(struct pds_core_drv_identity) <= 1912);
+static_assert(sizeof(struct pds_core_dev_identity) <= 1912);
+static_assert(sizeof(union pds_core_dev_cmd) == 64);
+static_assert(sizeof(union pds_core_dev_comp) == 16);
+static_assert(sizeof(struct pds_core_dev_info_regs) == 2048);
+static_assert(sizeof(struct pds_core_dev_cmd_regs) == 2048);
+static_assert(sizeof(struct pds_core_dev_regs) == 4096);
+#endif /* __CHECKER__ */
+
+#endif /* _PDS_CORE_IF_H_ */
diff --git a/include/linux/pds/pds_intr.h b/include/linux/pds/pds_intr.h
new file mode 100644
index 000000000000..56277c37248c
--- /dev/null
+++ b/include/linux/pds/pds_intr.h
@@ -0,0 +1,163 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR Linux-OpenIB) OR BSD-2-Clause */
+/* Copyright(c) 2023 Advanced Micro Devices, Inc. */
+
+#ifndef _PDS_INTR_H_
+#define _PDS_INTR_H_
+
+/*
+ * Interrupt control register
+ * @coal_init: Coalescing timer initial value, in
+ * device units. Use @identity->intr_coal_mult
+ * and @identity->intr_coal_div to convert from
+ * usecs to device units:
+ *
+ * coal_init = coal_usecs * coal_mutl / coal_div
+ *
+ * When an interrupt is sent the interrupt
+ * coalescing timer current value
+ * (@coalescing_curr) is initialized with this
+ * value and begins counting down. No more
+ * interrupts are sent until the coalescing
+ * timer reaches 0. When @coalescing_init=0
+ * interrupt coalescing is effectively disabled
+ * and every interrupt assert results in an
+ * interrupt. Reset value: 0
+ * @mask: Interrupt mask. When @mask=1 the interrupt
+ * resource will not send an interrupt. When
+ * @mask=0 the interrupt resource will send an
+ * interrupt if an interrupt event is pending
+ * or on the next interrupt assertion event.
+ * Reset value: 1
+ * @credits: Interrupt credits. This register indicates
+ * how many interrupt events the hardware has
+ * sent. When written by software this
+ * register atomically decrements @int_credits
+ * by the value written. When @int_credits
+ * becomes 0 then the "pending interrupt" bit
+ * in the Interrupt Status register is cleared
+ * by the hardware and any pending but unsent
+ * interrupts are cleared.
+ * !!!IMPORTANT!!! This is a signed register.
+ * @flags: Interrupt control flags
+ * @unmask -- When this bit is written with a 1
+ * the interrupt resource will set mask=0.
+ * @coal_timer_reset -- When this
+ * bit is written with a 1 the
+ * @coalescing_curr will be reloaded with
+ * @coalescing_init to reset the coalescing
+ * timer.
+ * @mask_on_assert: Automatically mask on assertion. When
+ * @mask_on_assert=1 the interrupt resource
+ * will set @mask=1 whenever an interrupt is
+ * sent. When using interrupts in Legacy
+ * Interrupt mode the driver must select
+ * @mask_on_assert=0 for proper interrupt
+ * operation.
+ * @coalescing_curr: Coalescing timer current value, in
+ * microseconds. When this value reaches 0
+ * the interrupt resource is again eligible to
+ * send an interrupt. If an interrupt event
+ * is already pending when @coalescing_curr
+ * reaches 0 the pending interrupt will be
+ * sent, otherwise an interrupt will be sent
+ * on the next interrupt assertion event.
+ */
+struct pds_core_intr {
+ u32 coal_init;
+ u32 mask;
+ u16 credits;
+ u16 flags;
+#define PDS_CORE_INTR_F_UNMASK 0x0001
+#define PDS_CORE_INTR_F_TIMER_RESET 0x0002
+ u32 mask_on_assert;
+ u32 coalescing_curr;
+ u32 rsvd6[3];
+};
+
+#ifndef __CHECKER__
+static_assert(sizeof(struct pds_core_intr) == 32);
+#endif /* __CHECKER__ */
+
+#define PDS_CORE_INTR_CTRL_REGS_MAX 2048
+#define PDS_CORE_INTR_CTRL_COAL_MAX 0x3F
+#define PDS_CORE_INTR_INDEX_NOT_ASSIGNED -1
+
+struct pds_core_intr_status {
+ u32 status[2];
+};
+
+/**
+ * enum pds_core_intr_mask_vals - valid values for mask and mask_assert.
+ * @PDS_CORE_INTR_MASK_CLEAR: unmask interrupt.
+ * @PDS_CORE_INTR_MASK_SET: mask interrupt.
+ */
+enum pds_core_intr_mask_vals {
+ PDS_CORE_INTR_MASK_CLEAR = 0,
+ PDS_CORE_INTR_MASK_SET = 1,
+};
+
+/**
+ * enum pds_core_intr_credits_bits - Bitwise composition of credits values.
+ * @PDS_CORE_INTR_CRED_COUNT: bit mask of credit count, no shift needed.
+ * @PDS_CORE_INTR_CRED_COUNT_SIGNED: bit mask of credit count, including sign bit.
+ * @PDS_CORE_INTR_CRED_UNMASK: unmask the interrupt.
+ * @PDS_CORE_INTR_CRED_RESET_COALESCE: reset the coalesce timer.
+ * @PDS_CORE_INTR_CRED_REARM: unmask the and reset the timer.
+ */
+enum pds_core_intr_credits_bits {
+ PDS_CORE_INTR_CRED_COUNT = 0x7fffu,
+ PDS_CORE_INTR_CRED_COUNT_SIGNED = 0xffffu,
+ PDS_CORE_INTR_CRED_UNMASK = 0x10000u,
+ PDS_CORE_INTR_CRED_RESET_COALESCE = 0x20000u,
+ PDS_CORE_INTR_CRED_REARM = (PDS_CORE_INTR_CRED_UNMASK |
+ PDS_CORE_INTR_CRED_RESET_COALESCE),
+};
+
+static inline void
+pds_core_intr_coal_init(struct pds_core_intr __iomem *intr_ctrl, u32 coal)
+{
+ iowrite32(coal, &intr_ctrl->coal_init);
+}
+
+static inline void
+pds_core_intr_mask(struct pds_core_intr __iomem *intr_ctrl, u32 mask)
+{
+ iowrite32(mask, &intr_ctrl->mask);
+}
+
+static inline void
+pds_core_intr_credits(struct pds_core_intr __iomem *intr_ctrl,
+ u32 cred, u32 flags)
+{
+ if (WARN_ON_ONCE(cred > PDS_CORE_INTR_CRED_COUNT)) {
+ cred = ioread32(&intr_ctrl->credits);
+ cred &= PDS_CORE_INTR_CRED_COUNT_SIGNED;
+ }
+
+ iowrite32(cred | flags, &intr_ctrl->credits);
+}
+
+static inline void
+pds_core_intr_clean_flags(struct pds_core_intr __iomem *intr_ctrl, u32 flags)
+{
+ u32 cred;
+
+ cred = ioread32(&intr_ctrl->credits);
+ cred &= PDS_CORE_INTR_CRED_COUNT_SIGNED;
+ cred |= flags;
+ iowrite32(cred, &intr_ctrl->credits);
+}
+
+static inline void
+pds_core_intr_clean(struct pds_core_intr __iomem *intr_ctrl)
+{
+ pds_core_intr_clean_flags(intr_ctrl, PDS_CORE_INTR_CRED_RESET_COALESCE);
+}
+
+static inline void
+pds_core_intr_mask_assert(struct pds_core_intr __iomem *intr_ctrl, u32 mask)
+{
+ iowrite32(mask, &intr_ctrl->mask_on_assert);
+}
+
+#endif /* _PDS_INTR_H_ */
diff --git a/include/linux/pe.h b/include/linux/pe.h
index 143ce75be5f0..5e1e11540870 100644
--- a/include/linux/pe.h
+++ b/include/linux/pe.h
@@ -1,19 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright 2011 Red Hat, Inc.
* All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- *
* Author(s): Peter Jones <pjones@redhat.com>
*/
#ifndef __LINUX_PE_H
@@ -21,6 +10,34 @@
#include <linux/types.h>
+/*
+ * Linux EFI stub v1.0 adds the following functionality:
+ * - Loading initrd from the LINUX_EFI_INITRD_MEDIA_GUID device path,
+ * - Loading/starting the kernel from firmware that targets a different
+ * machine type, via the entrypoint exposed in the .compat PE/COFF section.
+ *
+ * The recommended way of loading and starting v1.0 or later kernels is to use
+ * the LoadImage() and StartImage() EFI boot services, and expose the initrd
+ * via the LINUX_EFI_INITRD_MEDIA_GUID device path.
+ *
+ * Versions older than v1.0 support initrd loading via the image load options
+ * (using initrd=, limited to the volume from which the kernel itself was
+ * loaded), or via arch specific means (bootparams, DT, etc).
+ *
+ * On x86, LoadImage() and StartImage() can be omitted if the EFI handover
+ * protocol is implemented, which can be inferred from the version,
+ * handover_offset and xloadflags fields in the bootparams structure.
+ */
+#define LINUX_EFISTUB_MAJOR_VERSION 0x1
+#define LINUX_EFISTUB_MINOR_VERSION 0x1
+
+/*
+ * LINUX_PE_MAGIC appears at offset 0x38 into the MS-DOS header of EFI bootable
+ * Linux kernel images that target the architecture as specified by the PE/COFF
+ * header machine type field.
+ */
+#define LINUX_PE_MAGIC 0x818223cd
+
#define MZ_MAGIC 0x5a4d /* "MZ" */
#define PE_MAGIC 0x00004550 /* "PE\0\0" */
@@ -45,6 +62,9 @@
#define IMAGE_FILE_MACHINE_POWERPC 0x01f0
#define IMAGE_FILE_MACHINE_POWERPCFP 0x01f1
#define IMAGE_FILE_MACHINE_R4000 0x0166
+#define IMAGE_FILE_MACHINE_RISCV32 0x5032
+#define IMAGE_FILE_MACHINE_RISCV64 0x5064
+#define IMAGE_FILE_MACHINE_RISCV128 0x5128
#define IMAGE_FILE_MACHINE_SH3 0x01a2
#define IMAGE_FILE_MACHINE_SH3DSP 0x01a3
#define IMAGE_FILE_MACHINE_SH3E 0x01a4
@@ -52,6 +72,8 @@
#define IMAGE_FILE_MACHINE_SH5 0x01a8
#define IMAGE_FILE_MACHINE_THUMB 0x01c2
#define IMAGE_FILE_MACHINE_WCEMIPSV2 0x0169
+#define IMAGE_FILE_MACHINE_LOONGARCH32 0x6232
+#define IMAGE_FILE_MACHINE_LOONGARCH64 0x6264
/* flags */
#define IMAGE_FILE_RELOCS_STRIPPED 0x0001
@@ -96,6 +118,9 @@
#define IMAGE_DLLCHARACTERISTICS_WDM_DRIVER 0x2000
#define IMAGE_DLLCHARACTERISTICS_TERMINAL_SERVER_AWARE 0x8000
+#define IMAGE_DLLCHARACTERISTICS_EX_CET_COMPAT 0x0001
+#define IMAGE_DLLCHARACTERISTICS_EX_FORWARD_CFI_COMPAT 0x0040
+
/* they actually defined 0x00000000 as well, but I think we'll skip that one. */
#define IMAGE_SCN_RESERVED_0 0x00000001
#define IMAGE_SCN_RESERVED_1 0x00000002
@@ -143,6 +168,7 @@
#define IMAGE_SCN_MEM_WRITE 0x80000000 /* writeable */
#define IMAGE_DEBUG_TYPE_CODEVIEW 2
+#define IMAGE_DEBUG_TYPE_EX_DLLCHARACTERISTICS 20
#ifndef __ASSEMBLY__
@@ -166,7 +192,7 @@ struct mz_hdr {
uint16_t oem_info; /* oem specific */
uint16_t reserved1[10]; /* reserved */
uint32_t peaddr; /* address of pe header */
- char message[64]; /* message to print */
+ char message[]; /* message to print */
};
struct mz_reloc {
diff --git a/include/linux/peci-cpu.h b/include/linux/peci-cpu.h
new file mode 100644
index 000000000000..ff8ae9c26c80
--- /dev/null
+++ b/include/linux/peci-cpu.h
@@ -0,0 +1,40 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (c) 2021 Intel Corporation */
+
+#ifndef __LINUX_PECI_CPU_H
+#define __LINUX_PECI_CPU_H
+
+#include <linux/types.h>
+
+#include "../../arch/x86/include/asm/intel-family.h"
+
+#define PECI_PCS_PKG_ID 0 /* Package Identifier Read */
+#define PECI_PKG_ID_CPU_ID 0x0000 /* CPUID Info */
+#define PECI_PKG_ID_PLATFORM_ID 0x0001 /* Platform ID */
+#define PECI_PKG_ID_DEVICE_ID 0x0002 /* Uncore Device ID */
+#define PECI_PKG_ID_MAX_THREAD_ID 0x0003 /* Max Thread ID */
+#define PECI_PKG_ID_MICROCODE_REV 0x0004 /* CPU Microcode Update Revision */
+#define PECI_PKG_ID_MCA_ERROR_LOG 0x0005 /* Machine Check Status */
+#define PECI_PCS_MODULE_TEMP 9 /* Per Core DTS Temperature Read */
+#define PECI_PCS_THERMAL_MARGIN 10 /* DTS thermal margin */
+#define PECI_PCS_DDR_DIMM_TEMP 14 /* DDR DIMM Temperature */
+#define PECI_PCS_TEMP_TARGET 16 /* Temperature Target Read */
+#define PECI_PCS_TDP_UNITS 30 /* Units for power/energy registers */
+
+struct peci_device;
+
+int peci_temp_read(struct peci_device *device, s16 *temp_raw);
+
+int peci_pcs_read(struct peci_device *device, u8 index,
+ u16 param, u32 *data);
+
+int peci_pci_local_read(struct peci_device *device, u8 bus, u8 dev,
+ u8 func, u16 reg, u32 *data);
+
+int peci_ep_pci_local_read(struct peci_device *device, u8 seg,
+ u8 bus, u8 dev, u8 func, u16 reg, u32 *data);
+
+int peci_mmio_read(struct peci_device *device, u8 bar, u8 seg,
+ u8 bus, u8 dev, u8 func, u64 address, u32 *data);
+
+#endif /* __LINUX_PECI_CPU_H */
diff --git a/include/linux/peci.h b/include/linux/peci.h
new file mode 100644
index 000000000000..06e6ef935297
--- /dev/null
+++ b/include/linux/peci.h
@@ -0,0 +1,112 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (c) 2018-2021 Intel Corporation */
+
+#ifndef __LINUX_PECI_H
+#define __LINUX_PECI_H
+
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/mutex.h>
+#include <linux/types.h>
+
+/*
+ * Currently we don't support any PECI command over 32 bytes.
+ */
+#define PECI_REQUEST_MAX_BUF_SIZE 32
+
+struct peci_controller;
+struct peci_request;
+
+/**
+ * struct peci_controller_ops - PECI controller specific methods
+ * @xfer: PECI transfer function
+ *
+ * PECI controllers may have different hardware interfaces - the drivers
+ * implementing PECI controllers can use this structure to abstract away those
+ * differences by exposing a common interface for PECI core.
+ */
+struct peci_controller_ops {
+ int (*xfer)(struct peci_controller *controller, u8 addr, struct peci_request *req);
+};
+
+/**
+ * struct peci_controller - PECI controller
+ * @dev: device object to register PECI controller to the device model
+ * @ops: pointer to device specific controller operations
+ * @bus_lock: lock used to protect multiple callers
+ * @id: PECI controller ID
+ *
+ * PECI controllers usually connect to their drivers using non-PECI bus,
+ * such as the platform bus.
+ * Each PECI controller can communicate with one or more PECI devices.
+ */
+struct peci_controller {
+ struct device dev;
+ struct peci_controller_ops *ops;
+ struct mutex bus_lock; /* held for the duration of xfer */
+ u8 id;
+};
+
+struct peci_controller *devm_peci_controller_add(struct device *parent,
+ struct peci_controller_ops *ops);
+
+static inline struct peci_controller *to_peci_controller(void *d)
+{
+ return container_of(d, struct peci_controller, dev);
+}
+
+/**
+ * struct peci_device - PECI device
+ * @dev: device object to register PECI device to the device model
+ * @controller: manages the bus segment hosting this PECI device
+ * @info: PECI device characteristics
+ * @info.family: device family
+ * @info.model: device model
+ * @info.peci_revision: PECI revision supported by the PECI device
+ * @info.socket_id: the socket ID represented by the PECI device
+ * @addr: address used on the PECI bus connected to the parent controller
+ * @deleted: indicates that PECI device was already deleted
+ *
+ * A peci_device identifies a single device (i.e. CPU) connected to a PECI bus.
+ * The behaviour exposed to the rest of the system is defined by the PECI driver
+ * managing the device.
+ */
+struct peci_device {
+ struct device dev;
+ struct {
+ u16 family;
+ u8 model;
+ u8 peci_revision;
+ u8 socket_id;
+ } info;
+ u8 addr;
+ bool deleted;
+};
+
+static inline struct peci_device *to_peci_device(struct device *d)
+{
+ return container_of(d, struct peci_device, dev);
+}
+
+/**
+ * struct peci_request - PECI request
+ * @device: PECI device to which the request is sent
+ * @tx: TX buffer specific data
+ * @tx.buf: TX buffer
+ * @tx.len: transfer data length in bytes
+ * @rx: RX buffer specific data
+ * @rx.buf: RX buffer
+ * @rx.len: received data length in bytes
+ *
+ * A peci_request represents a request issued by PECI originator (TX) and
+ * a response received from PECI responder (RX).
+ */
+struct peci_request {
+ struct peci_device *device;
+ struct {
+ u8 buf[PECI_REQUEST_MAX_BUF_SIZE];
+ u8 len;
+ } rx, tx;
+};
+
+#endif /* __LINUX_PECI_H */
diff --git a/include/linux/percpu-defs.h b/include/linux/percpu-defs.h
index 8f16299ca068..e60727be79c4 100644
--- a/include/linux/percpu-defs.h
+++ b/include/linux/percpu-defs.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* linux/percpu-defs.h - basic definitions for percpu areas
*
@@ -50,7 +51,7 @@
PER_CPU_ATTRIBUTES
#define __PCPU_DUMMY_ATTRS \
- __attribute__((section(".discard"), unused))
+ __section(".discard") __attribute__((unused))
/*
* s390 and alpha modules require percpu variables to be defined as
@@ -91,8 +92,7 @@
extern __PCPU_DUMMY_ATTRS char __pcpu_unique_##name; \
__PCPU_DUMMY_ATTRS char __pcpu_unique_##name; \
extern __PCPU_ATTRS(sec) __typeof__(type) name; \
- __PCPU_ATTRS(sec) PER_CPU_DEF_ATTRIBUTES __weak \
- __typeof__(type) name
+ __PCPU_ATTRS(sec) __weak __typeof__(type) name
#else
/*
* Normal declaration and definition macros.
@@ -101,8 +101,7 @@
extern __PCPU_ATTRS(sec) __typeof__(type) name
#define DEFINE_PER_CPU_SECTION(type, name, sec) \
- __PCPU_ATTRS(sec) PER_CPU_DEF_ATTRIBUTES \
- __typeof__(type) name
+ __PCPU_ATTRS(sec) __typeof__(type) name
#endif
/*
@@ -173,6 +172,20 @@
DEFINE_PER_CPU_SECTION(type, name, "..read_mostly")
/*
+ * Declaration/definition used for per-CPU variables that should be accessed
+ * as decrypted when memory encryption is enabled in the guest.
+ */
+#ifdef CONFIG_AMD_MEM_ENCRYPT
+#define DECLARE_PER_CPU_DECRYPTED(type, name) \
+ DECLARE_PER_CPU_SECTION(type, name, "..decrypted")
+
+#define DEFINE_PER_CPU_DECRYPTED(type, name) \
+ DEFINE_PER_CPU_SECTION(type, name, "..decrypted")
+#else
+#define DEFINE_PER_CPU_DECRYPTED(type, name) DEFINE_PER_CPU(type, name)
+#endif
+
+/*
* Intermodule exports for per-CPU variables. sparse forgets about
* address space across EXPORT_SYMBOL(), change EXPORT_SYMBOL() to
* noop if __CHECKER__.
@@ -297,7 +310,7 @@ extern void __bad_size_call_parameter(void);
#ifdef CONFIG_DEBUG_PREEMPT
extern void __this_cpu_preempt_check(const char *op);
#else
-static inline void __this_cpu_preempt_check(const char *op) { }
+static __always_inline void __this_cpu_preempt_check(const char *op) { }
#endif
#define __pcpu_size_call_return(stem, variable) \
@@ -399,7 +412,7 @@ do { \
* instead.
*
* If there is no other protection through preempt disable and/or disabling
- * interupts then one of these RMW operations can show unexpected behavior
+ * interrupts then one of these RMW operations can show unexpected behavior
* because the execution thread was rescheduled on another processor or an
* interrupt occurred and the same percpu variable was modified from the
* interrupt context.
diff --git a/include/linux/percpu-refcount.h b/include/linux/percpu-refcount.h
index c13dceb87b60..d73a1c08c3e3 100644
--- a/include/linux/percpu-refcount.h
+++ b/include/linux/percpu-refcount.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Percpu refcounts:
* (C) 2012 Google, Inc.
@@ -29,10 +30,14 @@
* calls io_destroy() or the process exits.
*
* In the aio code, kill_ioctx() is called when we wish to destroy a kioctx; it
- * calls percpu_ref_kill(), then hlist_del_rcu() and synchronize_rcu() to remove
- * the kioctx from the proccess's list of kioctxs - after that, there can't be
- * any new users of the kioctx (from lookup_ioctx()) and it's then safe to drop
- * the initial ref with percpu_ref_put().
+ * removes the kioctx from the proccess's table of kioctxs and kills percpu_ref.
+ * After that, there can't be any new users of the kioctx (from lookup_ioctx())
+ * and it's then safe to drop the initial ref with percpu_ref_put().
+ *
+ * Note that the free path, free_ioctx(), needs to go through explicit call_rcu()
+ * to synchronize with RCU protected lookup_ioctx(). percpu_ref operations don't
+ * imply RCU grace periods of any kind and if a user wants to combine percpu_ref
+ * with RCU protection, it must be done explicitly.
*
* Code that does a two stage shutdown like this often needs some kind of
* explicit synchronization to ensure the initial refcount can only be dropped
@@ -46,9 +51,9 @@
#define _LINUX_PERCPU_REFCOUNT_H
#include <linux/atomic.h>
-#include <linux/kernel.h>
#include <linux/percpu.h>
#include <linux/rcupdate.h>
+#include <linux/types.h>
#include <linux/gfp.h>
struct percpu_ref;
@@ -70,27 +75,47 @@ enum {
* operation using percpu_ref_switch_to_percpu(). If initialized
* with this flag, the ref will stay in atomic mode until
* percpu_ref_switch_to_percpu() is invoked on it.
+ * Implies ALLOW_REINIT.
*/
PERCPU_REF_INIT_ATOMIC = 1 << 0,
/*
* Start dead w/ ref == 0 in atomic mode. Must be revived with
- * percpu_ref_reinit() before used. Implies INIT_ATOMIC.
+ * percpu_ref_reinit() before used. Implies INIT_ATOMIC and
+ * ALLOW_REINIT.
*/
PERCPU_REF_INIT_DEAD = 1 << 1,
+
+ /*
+ * Allow switching from atomic mode to percpu mode.
+ */
+ PERCPU_REF_ALLOW_REINIT = 1 << 2,
};
-struct percpu_ref {
+struct percpu_ref_data {
atomic_long_t count;
+ percpu_ref_func_t *release;
+ percpu_ref_func_t *confirm_switch;
+ bool force_atomic:1;
+ bool allow_reinit:1;
+ struct rcu_head rcu;
+ struct percpu_ref *ref;
+};
+
+struct percpu_ref {
/*
* The low bit of the pointer indicates whether the ref is in percpu
* mode; if set, then get/put will manipulate the atomic_t.
*/
unsigned long percpu_count_ptr;
- percpu_ref_func_t *release;
- percpu_ref_func_t *confirm_switch;
- bool force_atomic:1;
- struct rcu_head rcu;
+
+ /*
+ * 'percpu_ref' is often embedded into user structure, and only
+ * 'percpu_count_ptr' is required in fast path, move other fields
+ * into 'percpu_ref_data', so we can reduce memory footprint in
+ * fast path.
+ */
+ struct percpu_ref_data *data;
};
int __must_check percpu_ref_init(struct percpu_ref *ref,
@@ -103,7 +128,9 @@ void percpu_ref_switch_to_atomic_sync(struct percpu_ref *ref);
void percpu_ref_switch_to_percpu(struct percpu_ref *ref);
void percpu_ref_kill_and_confirm(struct percpu_ref *ref,
percpu_ref_func_t *confirm_kill);
+void percpu_ref_resurrect(struct percpu_ref *ref);
void percpu_ref_reinit(struct percpu_ref *ref);
+bool percpu_ref_is_zero(struct percpu_ref *ref);
/**
* percpu_ref_kill - drop the initial ref
@@ -112,8 +139,10 @@ void percpu_ref_reinit(struct percpu_ref *ref);
* Must be used to drop the initial ref on a percpu refcount; must be called
* precisely once before shutdown.
*
- * Puts @ref in non percpu mode, then does a call_rcu() before gathering up the
- * percpu counters and dropping the initial ref.
+ * Switches @ref into atomic mode before gathering up the percpu counters
+ * and dropping the initial ref.
+ *
+ * There are no implied RCU grace periods between kill and release.
*/
static inline void percpu_ref_kill(struct percpu_ref *ref)
{
@@ -138,12 +167,12 @@ static inline bool __ref_is_percpu(struct percpu_ref *ref,
* when using it as a pointer, __PERCPU_REF_ATOMIC may be set in
* between contaminating the pointer value, meaning that
* READ_ONCE() is required when fetching it.
+ *
+ * The dependency ordering from the READ_ONCE() pairs
+ * with smp_store_release() in __percpu_ref_switch_to_percpu().
*/
percpu_ptr = READ_ONCE(ref->percpu_count_ptr);
- /* paired with smp_store_release() in __percpu_ref_switch_to_percpu() */
- smp_read_barrier_depends();
-
/*
* Theoretically, the following could test just ATOMIC; however,
* then we'd have to mask off DEAD separately as DEAD may be
@@ -170,21 +199,21 @@ static inline void percpu_ref_get_many(struct percpu_ref *ref, unsigned long nr)
{
unsigned long __percpu *percpu_count;
- rcu_read_lock_sched();
+ rcu_read_lock();
if (__ref_is_percpu(ref, &percpu_count))
this_cpu_add(*percpu_count, nr);
else
- atomic_long_add(nr, &ref->count);
+ atomic_long_add(nr, &ref->data->count);
- rcu_read_unlock_sched();
+ rcu_read_unlock();
}
/**
* percpu_ref_get - increment a percpu refcount
* @ref: percpu_ref to get
*
- * Analagous to atomic_long_inc().
+ * Analogous to atomic_long_inc().
*
* This function is safe to call as long as @ref is between init and exit.
*/
@@ -194,30 +223,68 @@ static inline void percpu_ref_get(struct percpu_ref *ref)
}
/**
- * percpu_ref_tryget - try to increment a percpu refcount
+ * percpu_ref_tryget_many - try to increment a percpu refcount
* @ref: percpu_ref to try-get
+ * @nr: number of references to get
*
- * Increment a percpu refcount unless its count already reached zero.
+ * Increment a percpu refcount by @nr unless its count already reached zero.
* Returns %true on success; %false on failure.
*
* This function is safe to call as long as @ref is between init and exit.
*/
-static inline bool percpu_ref_tryget(struct percpu_ref *ref)
+static inline bool percpu_ref_tryget_many(struct percpu_ref *ref,
+ unsigned long nr)
{
unsigned long __percpu *percpu_count;
bool ret;
- rcu_read_lock_sched();
+ rcu_read_lock();
if (__ref_is_percpu(ref, &percpu_count)) {
- this_cpu_inc(*percpu_count);
+ this_cpu_add(*percpu_count, nr);
ret = true;
} else {
- ret = atomic_long_inc_not_zero(&ref->count);
+ ret = atomic_long_add_unless(&ref->data->count, nr, 0);
}
- rcu_read_unlock_sched();
+ rcu_read_unlock();
+
+ return ret;
+}
+
+/**
+ * percpu_ref_tryget - try to increment a percpu refcount
+ * @ref: percpu_ref to try-get
+ *
+ * Increment a percpu refcount unless its count already reached zero.
+ * Returns %true on success; %false on failure.
+ *
+ * This function is safe to call as long as @ref is between init and exit.
+ */
+static inline bool percpu_ref_tryget(struct percpu_ref *ref)
+{
+ return percpu_ref_tryget_many(ref, 1);
+}
+
+/**
+ * percpu_ref_tryget_live_rcu - same as percpu_ref_tryget_live() but the
+ * caller is responsible for taking RCU.
+ *
+ * This function is safe to call as long as @ref is between init and exit.
+ */
+static inline bool percpu_ref_tryget_live_rcu(struct percpu_ref *ref)
+{
+ unsigned long __percpu *percpu_count;
+ bool ret = false;
+
+ WARN_ON_ONCE(!rcu_read_lock_held());
+ if (likely(__ref_is_percpu(ref, &percpu_count))) {
+ this_cpu_inc(*percpu_count);
+ ret = true;
+ } else if (!(ref->percpu_count_ptr & __PERCPU_REF_DEAD)) {
+ ret = atomic_long_inc_not_zero(&ref->data->count);
+ }
return ret;
}
@@ -238,20 +305,11 @@ static inline bool percpu_ref_tryget(struct percpu_ref *ref)
*/
static inline bool percpu_ref_tryget_live(struct percpu_ref *ref)
{
- unsigned long __percpu *percpu_count;
bool ret = false;
- rcu_read_lock_sched();
-
- if (__ref_is_percpu(ref, &percpu_count)) {
- this_cpu_inc(*percpu_count);
- ret = true;
- } else if (!(ref->percpu_count_ptr & __PERCPU_REF_DEAD)) {
- ret = atomic_long_inc_not_zero(&ref->count);
- }
-
- rcu_read_unlock_sched();
-
+ rcu_read_lock();
+ ret = percpu_ref_tryget_live_rcu(ref);
+ rcu_read_unlock();
return ret;
}
@@ -269,14 +327,14 @@ static inline void percpu_ref_put_many(struct percpu_ref *ref, unsigned long nr)
{
unsigned long __percpu *percpu_count;
- rcu_read_lock_sched();
+ rcu_read_lock();
if (__ref_is_percpu(ref, &percpu_count))
this_cpu_sub(*percpu_count, nr);
- else if (unlikely(atomic_long_sub_and_test(nr, &ref->count)))
- ref->release(ref);
+ else if (unlikely(atomic_long_sub_and_test(nr, &ref->data->count)))
+ ref->data->release(ref);
- rcu_read_unlock_sched();
+ rcu_read_unlock();
}
/**
@@ -307,21 +365,4 @@ static inline bool percpu_ref_is_dying(struct percpu_ref *ref)
return ref->percpu_count_ptr & __PERCPU_REF_DEAD;
}
-/**
- * percpu_ref_is_zero - test whether a percpu refcount reached zero
- * @ref: percpu_ref to test
- *
- * Returns %true if @ref reached zero.
- *
- * This function is safe to call as long as @ref is between init and exit.
- */
-static inline bool percpu_ref_is_zero(struct percpu_ref *ref)
-{
- unsigned long __percpu *percpu_count;
-
- if (__ref_is_percpu(ref, &percpu_count))
- return false;
- return !atomic_long_read(&ref->count);
-}
-
#endif
diff --git a/include/linux/percpu-rwsem.h b/include/linux/percpu-rwsem.h
index 93664f022ecf..36b942b67b7d 100644
--- a/include/linux/percpu-rwsem.h
+++ b/include/linux/percpu-rwsem.h
@@ -1,38 +1,54 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_PERCPU_RWSEM_H
#define _LINUX_PERCPU_RWSEM_H
#include <linux/atomic.h>
-#include <linux/rwsem.h>
#include <linux/percpu.h>
#include <linux/rcuwait.h>
+#include <linux/wait.h>
#include <linux/rcu_sync.h>
#include <linux/lockdep.h>
struct percpu_rw_semaphore {
struct rcu_sync rss;
unsigned int __percpu *read_count;
- struct rw_semaphore rw_sem; /* slowpath */
- struct rcuwait writer; /* blocked writer */
- int readers_block;
+ struct rcuwait writer;
+ wait_queue_head_t waiters;
+ atomic_t block;
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+ struct lockdep_map dep_map;
+#endif
};
-#define DEFINE_STATIC_PERCPU_RWSEM(name) \
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+#define __PERCPU_RWSEM_DEP_MAP_INIT(lockname) .dep_map = { .name = #lockname },
+#else
+#define __PERCPU_RWSEM_DEP_MAP_INIT(lockname)
+#endif
+
+#define __DEFINE_PERCPU_RWSEM(name, is_static) \
static DEFINE_PER_CPU(unsigned int, __percpu_rwsem_rc_##name); \
-static struct percpu_rw_semaphore name = { \
- .rss = __RCU_SYNC_INITIALIZER(name.rss, RCU_SCHED_SYNC), \
+is_static struct percpu_rw_semaphore name = { \
+ .rss = __RCU_SYNC_INITIALIZER(name.rss), \
.read_count = &__percpu_rwsem_rc_##name, \
- .rw_sem = __RWSEM_INITIALIZER(name.rw_sem), \
.writer = __RCUWAIT_INITIALIZER(name.writer), \
+ .waiters = __WAIT_QUEUE_HEAD_INITIALIZER(name.waiters), \
+ .block = ATOMIC_INIT(0), \
+ __PERCPU_RWSEM_DEP_MAP_INIT(name) \
}
-extern int __percpu_down_read(struct percpu_rw_semaphore *, int);
-extern void __percpu_up_read(struct percpu_rw_semaphore *);
+#define DEFINE_PERCPU_RWSEM(name) \
+ __DEFINE_PERCPU_RWSEM(name, /* not static */)
+#define DEFINE_STATIC_PERCPU_RWSEM(name) \
+ __DEFINE_PERCPU_RWSEM(name, static)
+
+extern bool __percpu_down_read(struct percpu_rw_semaphore *, bool);
-static inline void percpu_down_read_preempt_disable(struct percpu_rw_semaphore *sem)
+static inline void percpu_down_read(struct percpu_rw_semaphore *sem)
{
might_sleep();
- rwsem_acquire_read(&sem->rw_sem.dep_map, 0, 0, _RET_IP_);
+ rwsem_acquire_read(&sem->dep_map, 0, 0, _RET_IP_);
preempt_disable();
/*
@@ -40,35 +56,31 @@ static inline void percpu_down_read_preempt_disable(struct percpu_rw_semaphore *
* cannot both change sem->state from readers_fast and start checking
* counters while we are here. So if we see !sem->state, we know that
* the writer won't be checking until we're past the preempt_enable()
- * and that one the synchronize_sched() is done, the writer will see
+ * and that once the synchronize_rcu() is done, the writer will see
* anything we did within this RCU-sched read-size critical section.
*/
- __this_cpu_inc(*sem->read_count);
- if (unlikely(!rcu_sync_is_idle(&sem->rss)))
+ if (likely(rcu_sync_is_idle(&sem->rss)))
+ this_cpu_inc(*sem->read_count);
+ else
__percpu_down_read(sem, false); /* Unconditional memory barrier */
- barrier();
/*
- * The barrier() prevents the compiler from
+ * The preempt_enable() prevents the compiler from
* bleeding the critical section out.
*/
-}
-
-static inline void percpu_down_read(struct percpu_rw_semaphore *sem)
-{
- percpu_down_read_preempt_disable(sem);
preempt_enable();
}
-static inline int percpu_down_read_trylock(struct percpu_rw_semaphore *sem)
+static inline bool percpu_down_read_trylock(struct percpu_rw_semaphore *sem)
{
- int ret = 1;
+ bool ret = true;
preempt_disable();
/*
* Same as in percpu_down_read().
*/
- __this_cpu_inc(*sem->read_count);
- if (unlikely(!rcu_sync_is_idle(&sem->rss)))
+ if (likely(rcu_sync_is_idle(&sem->rss)))
+ this_cpu_inc(*sem->read_count);
+ else
ret = __percpu_down_read(sem, true); /* Unconditional memory barrier */
preempt_enable();
/*
@@ -77,39 +89,47 @@ static inline int percpu_down_read_trylock(struct percpu_rw_semaphore *sem)
*/
if (ret)
- rwsem_acquire_read(&sem->rw_sem.dep_map, 0, 1, _RET_IP_);
+ rwsem_acquire_read(&sem->dep_map, 0, 1, _RET_IP_);
return ret;
}
-static inline void percpu_up_read_preempt_enable(struct percpu_rw_semaphore *sem)
+static inline void percpu_up_read(struct percpu_rw_semaphore *sem)
{
- /*
- * The barrier() prevents the compiler from
- * bleeding the critical section out.
- */
- barrier();
+ rwsem_release(&sem->dep_map, _RET_IP_);
+
+ preempt_disable();
/*
* Same as in percpu_down_read().
*/
- if (likely(rcu_sync_is_idle(&sem->rss)))
- __this_cpu_dec(*sem->read_count);
- else
- __percpu_up_read(sem); /* Unconditional memory barrier */
+ if (likely(rcu_sync_is_idle(&sem->rss))) {
+ this_cpu_dec(*sem->read_count);
+ } else {
+ /*
+ * slowpath; reader will only ever wake a single blocked
+ * writer.
+ */
+ smp_mb(); /* B matches C */
+ /*
+ * In other words, if they see our decrement (presumably to
+ * aggregate zero, as that is the only time it matters) they
+ * will also see our critical section.
+ */
+ this_cpu_dec(*sem->read_count);
+ rcuwait_wake_up(&sem->writer);
+ }
preempt_enable();
-
- rwsem_release(&sem->rw_sem.dep_map, 1, _RET_IP_);
-}
-
-static inline void percpu_up_read(struct percpu_rw_semaphore *sem)
-{
- preempt_disable();
- percpu_up_read_preempt_enable(sem);
}
+extern bool percpu_is_read_locked(struct percpu_rw_semaphore *);
extern void percpu_down_write(struct percpu_rw_semaphore *);
extern void percpu_up_write(struct percpu_rw_semaphore *);
+static inline bool percpu_is_write_locked(struct percpu_rw_semaphore *sem)
+{
+ return atomic_read(&sem->block);
+}
+
extern int __percpu_init_rwsem(struct percpu_rw_semaphore *,
const char *, struct lock_class_key *);
@@ -121,25 +141,19 @@ extern void percpu_free_rwsem(struct percpu_rw_semaphore *);
__percpu_init_rwsem(sem, #sem, &rwsem_key); \
})
-#define percpu_rwsem_is_held(sem) lockdep_is_held(&(sem)->rw_sem)
-
-#define percpu_rwsem_assert_held(sem) \
- lockdep_assert_held(&(sem)->rw_sem)
+#define percpu_rwsem_is_held(sem) lockdep_is_held(sem)
+#define percpu_rwsem_assert_held(sem) lockdep_assert_held(sem)
static inline void percpu_rwsem_release(struct percpu_rw_semaphore *sem,
bool read, unsigned long ip)
{
- lock_release(&sem->rw_sem.dep_map, 1, ip);
-#ifdef CONFIG_RWSEM_SPIN_ON_OWNER
- if (!read)
- sem->rw_sem.owner = NULL;
-#endif
+ lock_release(&sem->dep_map, ip);
}
static inline void percpu_rwsem_acquire(struct percpu_rw_semaphore *sem,
bool read, unsigned long ip)
{
- lock_acquire(&sem->rw_sem.dep_map, 0, 1, read, 1, NULL, ip);
+ lock_acquire(&sem->dep_map, 0, 1, read, 1, NULL, ip);
}
#endif
diff --git a/include/linux/percpu.h b/include/linux/percpu.h
index 491b3f5a5f8a..1338ea2aa720 100644
--- a/include/linux/percpu.h
+++ b/include/linux/percpu.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __LINUX_PERCPU_H
#define __LINUX_PERCPU_H
@@ -5,7 +6,6 @@
#include <linux/preempt.h>
#include <linux/smp.h>
#include <linux/cpumask.h>
-#include <linux/printk.h>
#include <linux/pfn.h>
#include <linux/init.h>
@@ -21,15 +21,27 @@
/* minimum unit size, also is the maximum supported allocation size */
#define PCPU_MIN_UNIT_SIZE PFN_ALIGN(32 << 10)
+/* minimum allocation size and shift in bytes */
+#define PCPU_MIN_ALLOC_SHIFT 2
+#define PCPU_MIN_ALLOC_SIZE (1 << PCPU_MIN_ALLOC_SHIFT)
+
+/*
+ * The PCPU_BITMAP_BLOCK_SIZE must be the same size as PAGE_SIZE as the
+ * updating of hints is used to manage the nr_empty_pop_pages in both
+ * the chunk and globally.
+ */
+#define PCPU_BITMAP_BLOCK_SIZE PAGE_SIZE
+#define PCPU_BITMAP_BLOCK_BITS (PCPU_BITMAP_BLOCK_SIZE >> \
+ PCPU_MIN_ALLOC_SHIFT)
+
/*
* Percpu allocator can serve percpu allocations before slab is
* initialized which allows slab to depend on the percpu allocator.
- * The following two parameters decide how much resource to
- * preallocate for this. Keep PERCPU_DYNAMIC_RESERVE equal to or
- * larger than PERCPU_DYNAMIC_EARLY_SIZE.
+ * The following parameter decide how much resource to preallocate
+ * for this. Keep PERCPU_DYNAMIC_RESERVE equal to or larger than
+ * PERCPU_DYNAMIC_EARLY_SIZE.
*/
-#define PERCPU_DYNAMIC_EARLY_SLOTS 128
-#define PERCPU_DYNAMIC_EARLY_SIZE (12 << 10)
+#define PERCPU_DYNAMIC_EARLY_SIZE (20 << 10)
/*
* PERCPU_DYNAMIC_RESERVE indicates the amount of free area to piggy
@@ -81,45 +93,39 @@ extern const char * const pcpu_fc_names[PCPU_FC_NR];
extern enum pcpu_fc pcpu_chosen_fc;
-typedef void * (*pcpu_fc_alloc_fn_t)(unsigned int cpu, size_t size,
- size_t align);
-typedef void (*pcpu_fc_free_fn_t)(void *ptr, size_t size);
-typedef void (*pcpu_fc_populate_pte_fn_t)(unsigned long addr);
+typedef int (pcpu_fc_cpu_to_node_fn_t)(int cpu);
typedef int (pcpu_fc_cpu_distance_fn_t)(unsigned int from, unsigned int to);
extern struct pcpu_alloc_info * __init pcpu_alloc_alloc_info(int nr_groups,
int nr_units);
extern void __init pcpu_free_alloc_info(struct pcpu_alloc_info *ai);
-extern int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
+extern void __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
void *base_addr);
#ifdef CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK
extern int __init pcpu_embed_first_chunk(size_t reserved_size, size_t dyn_size,
size_t atom_size,
pcpu_fc_cpu_distance_fn_t cpu_distance_fn,
- pcpu_fc_alloc_fn_t alloc_fn,
- pcpu_fc_free_fn_t free_fn);
+ pcpu_fc_cpu_to_node_fn_t cpu_to_nd_fn);
#endif
#ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK
+void __init pcpu_populate_pte(unsigned long addr);
extern int __init pcpu_page_first_chunk(size_t reserved_size,
- pcpu_fc_alloc_fn_t alloc_fn,
- pcpu_fc_free_fn_t free_fn,
- pcpu_fc_populate_pte_fn_t populate_pte_fn);
+ pcpu_fc_cpu_to_node_fn_t cpu_to_nd_fn);
#endif
-extern void __percpu *__alloc_reserved_percpu(size_t size, size_t align);
+extern void __percpu *__alloc_reserved_percpu(size_t size, size_t align) __alloc_size(1);
extern bool __is_kernel_percpu_address(unsigned long addr, unsigned long *can_addr);
extern bool is_kernel_percpu_address(unsigned long addr);
#if !defined(CONFIG_SMP) || !defined(CONFIG_HAVE_SETUP_PER_CPU_AREA)
extern void __init setup_per_cpu_areas(void);
#endif
-extern void __init percpu_init_late(void);
-extern void __percpu *__alloc_percpu_gfp(size_t size, size_t align, gfp_t gfp);
-extern void __percpu *__alloc_percpu(size_t size, size_t align);
+extern void __percpu *__alloc_percpu_gfp(size_t size, size_t align, gfp_t gfp) __alloc_size(1);
+extern void __percpu *__alloc_percpu(size_t size, size_t align) __alloc_size(1);
extern void free_percpu(void __percpu *__pdata);
extern phys_addr_t per_cpu_ptr_to_phys(void *addr);
@@ -130,4 +136,6 @@ extern phys_addr_t per_cpu_ptr_to_phys(void *addr);
(typeof(type) __percpu *)__alloc_percpu(sizeof(type), \
__alignof__(type))
+extern unsigned long pcpu_nr_pages(void);
+
#endif /* __LINUX_PERCPU_H */
diff --git a/include/linux/percpu_counter.h b/include/linux/percpu_counter.h
index ec065387f443..75b73c83bc9d 100644
--- a/include/linux/percpu_counter.h
+++ b/include/linux/percpu_counter.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_PERCPU_COUNTER_H
#define _LINUX_PERCPU_COUNTER_H
/*
@@ -12,7 +13,9 @@
#include <linux/threads.h>
#include <linux/percpu.h>
#include <linux/types.h>
-#include <linux/gfp.h>
+
+/* percpu_counter batch for local add or sub */
+#define PERCPU_COUNTER_LOCAL_BATCH INT_MAX
#ifdef CONFIG_SMP
@@ -43,6 +46,7 @@ void percpu_counter_add_batch(struct percpu_counter *fbc, s64 amount,
s32 batch);
s64 __percpu_counter_sum(struct percpu_counter *fbc);
int __percpu_counter_compare(struct percpu_counter *fbc, s64 rhs, s32 batch);
+void percpu_counter_sync(struct percpu_counter *fbc);
static inline int percpu_counter_compare(struct percpu_counter *fbc, s64 rhs)
{
@@ -54,6 +58,22 @@ static inline void percpu_counter_add(struct percpu_counter *fbc, s64 amount)
percpu_counter_add_batch(fbc, amount, percpu_counter_batch);
}
+/*
+ * With percpu_counter_add_local() and percpu_counter_sub_local(), counts
+ * are accumulated in local per cpu counter and not in fbc->count until
+ * local count overflows PERCPU_COUNTER_LOCAL_BATCH. This makes counter
+ * write efficient.
+ * But percpu_counter_sum(), instead of percpu_counter_read(), needs to be
+ * used to add up the counts from each CPU to account for all the local
+ * counts. So percpu_counter_add_local() and percpu_counter_sub_local()
+ * should be used when a counter is updated frequently and read rarely.
+ */
+static inline void
+percpu_counter_add_local(struct percpu_counter *fbc, s64 amount)
+{
+ percpu_counter_add_batch(fbc, amount, PERCPU_COUNTER_LOCAL_BATCH);
+}
+
static inline s64 percpu_counter_sum_positive(struct percpu_counter *fbc)
{
s64 ret = __percpu_counter_sum(fbc);
@@ -77,15 +97,15 @@ static inline s64 percpu_counter_read(struct percpu_counter *fbc)
*/
static inline s64 percpu_counter_read_positive(struct percpu_counter *fbc)
{
- s64 ret = fbc->count;
+ /* Prevent reloads of fbc->count */
+ s64 ret = READ_ONCE(fbc->count);
- barrier(); /* Prevent reloads of fbc->count */
if (ret >= 0)
return ret;
return 0;
}
-static inline int percpu_counter_initialized(struct percpu_counter *fbc)
+static inline bool percpu_counter_initialized(struct percpu_counter *fbc)
{
return (fbc->counters != NULL);
}
@@ -131,9 +151,18 @@ __percpu_counter_compare(struct percpu_counter *fbc, s64 rhs, s32 batch)
static inline void
percpu_counter_add(struct percpu_counter *fbc, s64 amount)
{
- preempt_disable();
+ unsigned long flags;
+
+ local_irq_save(flags);
fbc->count += amount;
- preempt_enable();
+ local_irq_restore(flags);
+}
+
+/* non-SMP percpu_counter_add_local is the same with percpu_counter_add */
+static inline void
+percpu_counter_add_local(struct percpu_counter *fbc, s64 amount)
+{
+ percpu_counter_add(fbc, amount);
}
static inline void
@@ -166,11 +195,14 @@ static inline s64 percpu_counter_sum(struct percpu_counter *fbc)
return percpu_counter_read(fbc);
}
-static inline int percpu_counter_initialized(struct percpu_counter *fbc)
+static inline bool percpu_counter_initialized(struct percpu_counter *fbc)
{
- return 1;
+ return true;
}
+static inline void percpu_counter_sync(struct percpu_counter *fbc)
+{
+}
#endif /* CONFIG_SMP */
static inline void percpu_counter_inc(struct percpu_counter *fbc)
@@ -188,4 +220,10 @@ static inline void percpu_counter_sub(struct percpu_counter *fbc, s64 amount)
percpu_counter_add(fbc, -amount);
}
+static inline void
+percpu_counter_sub_local(struct percpu_counter *fbc, s64 amount)
+{
+ percpu_counter_add_local(fbc, -amount);
+}
+
#endif /* _LINUX_PERCPU_COUNTER_H */
diff --git a/include/linux/percpu_ida.h b/include/linux/percpu_ida.h
deleted file mode 100644
index f5cfdd6a5539..000000000000
--- a/include/linux/percpu_ida.h
+++ /dev/null
@@ -1,82 +0,0 @@
-#ifndef __PERCPU_IDA_H__
-#define __PERCPU_IDA_H__
-
-#include <linux/types.h>
-#include <linux/bitops.h>
-#include <linux/init.h>
-#include <linux/sched.h>
-#include <linux/spinlock_types.h>
-#include <linux/wait.h>
-#include <linux/cpumask.h>
-
-struct percpu_ida_cpu;
-
-struct percpu_ida {
- /*
- * number of tags available to be allocated, as passed to
- * percpu_ida_init()
- */
- unsigned nr_tags;
- unsigned percpu_max_size;
- unsigned percpu_batch_size;
-
- struct percpu_ida_cpu __percpu *tag_cpu;
-
- /*
- * Bitmap of cpus that (may) have tags on their percpu freelists:
- * steal_tags() uses this to decide when to steal tags, and which cpus
- * to try stealing from.
- *
- * It's ok for a freelist to be empty when its bit is set - steal_tags()
- * will just keep looking - but the bitmap _must_ be set whenever a
- * percpu freelist does have tags.
- */
- cpumask_t cpus_have_tags;
-
- struct {
- spinlock_t lock;
- /*
- * When we go to steal tags from another cpu (see steal_tags()),
- * we want to pick a cpu at random. Cycling through them every
- * time we steal is a bit easier and more or less equivalent:
- */
- unsigned cpu_last_stolen;
-
- /* For sleeping on allocation failure */
- wait_queue_head_t wait;
-
- /*
- * Global freelist - it's a stack where nr_free points to the
- * top
- */
- unsigned nr_free;
- unsigned *freelist;
- } ____cacheline_aligned_in_smp;
-};
-
-/*
- * Number of tags we move between the percpu freelist and the global freelist at
- * a time
- */
-#define IDA_DEFAULT_PCPU_BATCH_MOVE 32U
-/* Max size of percpu freelist, */
-#define IDA_DEFAULT_PCPU_SIZE ((IDA_DEFAULT_PCPU_BATCH_MOVE * 3) / 2)
-
-int percpu_ida_alloc(struct percpu_ida *pool, int state);
-void percpu_ida_free(struct percpu_ida *pool, unsigned tag);
-
-void percpu_ida_destroy(struct percpu_ida *pool);
-int __percpu_ida_init(struct percpu_ida *pool, unsigned long nr_tags,
- unsigned long max_size, unsigned long batch_size);
-static inline int percpu_ida_init(struct percpu_ida *pool, unsigned long nr_tags)
-{
- return __percpu_ida_init(pool, nr_tags, IDA_DEFAULT_PCPU_SIZE,
- IDA_DEFAULT_PCPU_BATCH_MOVE);
-}
-
-typedef int (*percpu_ida_cb)(unsigned, void *);
-int percpu_ida_for_each_free(struct percpu_ida *pool, percpu_ida_cb fn,
- void *data);
-
-unsigned percpu_ida_free_tags(struct percpu_ida *pool, int cpu);
-#endif /* __PERCPU_IDA_H__ */
diff --git a/include/linux/perf/arm_pmu.h b/include/linux/perf/arm_pmu.h
index 1360dd6d5e61..525b5d64e394 100644
--- a/include/linux/perf/arm_pmu.h
+++ b/include/linux/perf/arm_pmu.h
@@ -1,12 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* linux/arch/arm/include/asm/pmu.h
*
* Copyright (C) 2009 picoChip Designs Ltd, Jamie Iles
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
*/
#ifndef __ARM_PMU_H__
@@ -14,22 +10,10 @@
#include <linux/interrupt.h>
#include <linux/perf_event.h>
+#include <linux/platform_device.h>
#include <linux/sysfs.h>
#include <asm/cputype.h>
-/*
- * struct arm_pmu_platdata - ARM PMU platform data
- *
- * @handle_irq: an optional handler which will be called from the
- * interrupt and passed the address of the low level handler,
- * and can be used to implement any platform specific handling
- * before or after calling it.
- */
-struct arm_pmu_platdata {
- irqreturn_t (*handle_irq)(int irq, void *dev,
- irq_handler_t pmu_handler);
-};
-
#ifdef CONFIG_ARM_PMU
/*
@@ -37,6 +21,15 @@ struct arm_pmu_platdata {
*/
#define ARMPMU_MAX_HWEVENTS 32
+/*
+ * ARM PMU hw_event flags
+ */
+#define ARMPMU_EVT_64BIT 0x00001 /* Event uses a 64bit counter */
+#define ARMPMU_EVT_47BIT 0x00002 /* Event uses a 47bit counter */
+
+static_assert((PERF_EVENT_FLAG_ARCH & ARMPMU_EVT_64BIT) == ARMPMU_EVT_64BIT);
+static_assert((PERF_EVENT_FLAG_ARCH & ARMPMU_EVT_47BIT) == ARMPMU_EVT_47BIT);
+
#define HW_OP_UNSUPPORTED 0xFFFF
#define C(_x) PERF_COUNT_HW_CACHE_##_x
#define CACHE_OP_UNSUPPORTED 0xFFFF
@@ -83,15 +76,16 @@ enum armpmu_attr_groups {
ARMPMU_ATTR_GROUP_COMMON,
ARMPMU_ATTR_GROUP_EVENTS,
ARMPMU_ATTR_GROUP_FORMATS,
+ ARMPMU_ATTR_GROUP_CAPS,
ARMPMU_NR_ATTR_GROUPS
};
struct arm_pmu {
struct pmu pmu;
- cpumask_t active_irqs;
cpumask_t supported_cpus;
char *name;
- irqreturn_t (*handle_irq)(int irq_num, void *dev);
+ int pmuver;
+ irqreturn_t (*handle_irq)(struct arm_pmu *pmu);
void (*enable)(struct perf_event *event);
void (*disable)(struct perf_event *event);
int (*get_event_idx)(struct pmu_hw_events *hw_events,
@@ -100,23 +94,26 @@ struct arm_pmu {
struct perf_event *event);
int (*set_event_filter)(struct hw_perf_event *evt,
struct perf_event_attr *attr);
- u32 (*read_counter)(struct perf_event *event);
- void (*write_counter)(struct perf_event *event, u32 val);
+ u64 (*read_counter)(struct perf_event *event);
+ void (*write_counter)(struct perf_event *event, u64 val);
void (*start)(struct arm_pmu *);
void (*stop)(struct arm_pmu *);
void (*reset)(void *);
int (*map_event)(struct perf_event *event);
int num_events;
- u64 max_period;
bool secure_access; /* 32-bit ARM only */
-#define ARMV8_PMUV3_MAX_COMMON_EVENTS 0x40
+#define ARMV8_PMUV3_MAX_COMMON_EVENTS 0x40
DECLARE_BITMAP(pmceid_bitmap, ARMV8_PMUV3_MAX_COMMON_EVENTS);
+#define ARMV8_PMUV3_EXT_COMMON_EVENT_BASE 0x4000
+ DECLARE_BITMAP(pmceid_ext_bitmap, ARMV8_PMUV3_MAX_COMMON_EVENTS);
struct platform_device *plat_device;
struct pmu_hw_events __percpu *hw_events;
struct hlist_node node;
struct notifier_block cpu_pm_nb;
/* the attr_groups array must be NULL-terminated */
const struct attribute_group *attr_groups[ARMPMU_NR_ATTR_GROUPS + 1];
+ /* store the PMMIR_EL1 to expose slots */
+ u64 reg_pmmir;
/* Only to be used by ACPI probing code */
unsigned long acpi_cpuid;
@@ -168,17 +165,23 @@ int arm_pmu_acpi_probe(armpmu_init_fn init_fn);
static inline int arm_pmu_acpi_probe(armpmu_init_fn init_fn) { return 0; }
#endif
+#ifdef CONFIG_KVM
+void kvm_host_pmu_init(struct arm_pmu *pmu);
+#else
+#define kvm_host_pmu_init(x) do { } while(0)
+#endif
+
/* Internal functions only for core arm_pmu code */
struct arm_pmu *armpmu_alloc(void);
void armpmu_free(struct arm_pmu *pmu);
int armpmu_register(struct arm_pmu *pmu);
-int armpmu_request_irqs(struct arm_pmu *armpmu);
-void armpmu_free_irqs(struct arm_pmu *armpmu);
-int armpmu_request_irq(struct arm_pmu *armpmu, int cpu);
-void armpmu_free_irq(struct arm_pmu *armpmu, int cpu);
+int armpmu_request_irq(int irq, int cpu);
+void armpmu_free_irq(int irq, int cpu);
#define ARMV8_PMU_PDEV_NAME "armv8-pmu"
#endif /* CONFIG_ARM_PMU */
+#define ARMV8_SPE_PDEV_NAME "arm,spe-v1"
+
#endif /* __ARM_PMU_H__ */
diff --git a/include/linux/perf/arm_pmuv3.h b/include/linux/perf/arm_pmuv3.h
new file mode 100644
index 000000000000..e3899bd77f5c
--- /dev/null
+++ b/include/linux/perf/arm_pmuv3.h
@@ -0,0 +1,303 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2012 ARM Ltd.
+ */
+
+#ifndef __PERF_ARM_PMUV3_H
+#define __PERF_ARM_PMUV3_H
+
+#define ARMV8_PMU_MAX_COUNTERS 32
+#define ARMV8_PMU_COUNTER_MASK (ARMV8_PMU_MAX_COUNTERS - 1)
+
+/*
+ * Common architectural and microarchitectural event numbers.
+ */
+#define ARMV8_PMUV3_PERFCTR_SW_INCR 0x0000
+#define ARMV8_PMUV3_PERFCTR_L1I_CACHE_REFILL 0x0001
+#define ARMV8_PMUV3_PERFCTR_L1I_TLB_REFILL 0x0002
+#define ARMV8_PMUV3_PERFCTR_L1D_CACHE_REFILL 0x0003
+#define ARMV8_PMUV3_PERFCTR_L1D_CACHE 0x0004
+#define ARMV8_PMUV3_PERFCTR_L1D_TLB_REFILL 0x0005
+#define ARMV8_PMUV3_PERFCTR_LD_RETIRED 0x0006
+#define ARMV8_PMUV3_PERFCTR_ST_RETIRED 0x0007
+#define ARMV8_PMUV3_PERFCTR_INST_RETIRED 0x0008
+#define ARMV8_PMUV3_PERFCTR_EXC_TAKEN 0x0009
+#define ARMV8_PMUV3_PERFCTR_EXC_RETURN 0x000A
+#define ARMV8_PMUV3_PERFCTR_CID_WRITE_RETIRED 0x000B
+#define ARMV8_PMUV3_PERFCTR_PC_WRITE_RETIRED 0x000C
+#define ARMV8_PMUV3_PERFCTR_BR_IMMED_RETIRED 0x000D
+#define ARMV8_PMUV3_PERFCTR_BR_RETURN_RETIRED 0x000E
+#define ARMV8_PMUV3_PERFCTR_UNALIGNED_LDST_RETIRED 0x000F
+#define ARMV8_PMUV3_PERFCTR_BR_MIS_PRED 0x0010
+#define ARMV8_PMUV3_PERFCTR_CPU_CYCLES 0x0011
+#define ARMV8_PMUV3_PERFCTR_BR_PRED 0x0012
+#define ARMV8_PMUV3_PERFCTR_MEM_ACCESS 0x0013
+#define ARMV8_PMUV3_PERFCTR_L1I_CACHE 0x0014
+#define ARMV8_PMUV3_PERFCTR_L1D_CACHE_WB 0x0015
+#define ARMV8_PMUV3_PERFCTR_L2D_CACHE 0x0016
+#define ARMV8_PMUV3_PERFCTR_L2D_CACHE_REFILL 0x0017
+#define ARMV8_PMUV3_PERFCTR_L2D_CACHE_WB 0x0018
+#define ARMV8_PMUV3_PERFCTR_BUS_ACCESS 0x0019
+#define ARMV8_PMUV3_PERFCTR_MEMORY_ERROR 0x001A
+#define ARMV8_PMUV3_PERFCTR_INST_SPEC 0x001B
+#define ARMV8_PMUV3_PERFCTR_TTBR_WRITE_RETIRED 0x001C
+#define ARMV8_PMUV3_PERFCTR_BUS_CYCLES 0x001D
+#define ARMV8_PMUV3_PERFCTR_CHAIN 0x001E
+#define ARMV8_PMUV3_PERFCTR_L1D_CACHE_ALLOCATE 0x001F
+#define ARMV8_PMUV3_PERFCTR_L2D_CACHE_ALLOCATE 0x0020
+#define ARMV8_PMUV3_PERFCTR_BR_RETIRED 0x0021
+#define ARMV8_PMUV3_PERFCTR_BR_MIS_PRED_RETIRED 0x0022
+#define ARMV8_PMUV3_PERFCTR_STALL_FRONTEND 0x0023
+#define ARMV8_PMUV3_PERFCTR_STALL_BACKEND 0x0024
+#define ARMV8_PMUV3_PERFCTR_L1D_TLB 0x0025
+#define ARMV8_PMUV3_PERFCTR_L1I_TLB 0x0026
+#define ARMV8_PMUV3_PERFCTR_L2I_CACHE 0x0027
+#define ARMV8_PMUV3_PERFCTR_L2I_CACHE_REFILL 0x0028
+#define ARMV8_PMUV3_PERFCTR_L3D_CACHE_ALLOCATE 0x0029
+#define ARMV8_PMUV3_PERFCTR_L3D_CACHE_REFILL 0x002A
+#define ARMV8_PMUV3_PERFCTR_L3D_CACHE 0x002B
+#define ARMV8_PMUV3_PERFCTR_L3D_CACHE_WB 0x002C
+#define ARMV8_PMUV3_PERFCTR_L2D_TLB_REFILL 0x002D
+#define ARMV8_PMUV3_PERFCTR_L2I_TLB_REFILL 0x002E
+#define ARMV8_PMUV3_PERFCTR_L2D_TLB 0x002F
+#define ARMV8_PMUV3_PERFCTR_L2I_TLB 0x0030
+#define ARMV8_PMUV3_PERFCTR_REMOTE_ACCESS 0x0031
+#define ARMV8_PMUV3_PERFCTR_LL_CACHE 0x0032
+#define ARMV8_PMUV3_PERFCTR_LL_CACHE_MISS 0x0033
+#define ARMV8_PMUV3_PERFCTR_DTLB_WALK 0x0034
+#define ARMV8_PMUV3_PERFCTR_ITLB_WALK 0x0035
+#define ARMV8_PMUV3_PERFCTR_LL_CACHE_RD 0x0036
+#define ARMV8_PMUV3_PERFCTR_LL_CACHE_MISS_RD 0x0037
+#define ARMV8_PMUV3_PERFCTR_REMOTE_ACCESS_RD 0x0038
+#define ARMV8_PMUV3_PERFCTR_L1D_CACHE_LMISS_RD 0x0039
+#define ARMV8_PMUV3_PERFCTR_OP_RETIRED 0x003A
+#define ARMV8_PMUV3_PERFCTR_OP_SPEC 0x003B
+#define ARMV8_PMUV3_PERFCTR_STALL 0x003C
+#define ARMV8_PMUV3_PERFCTR_STALL_SLOT_BACKEND 0x003D
+#define ARMV8_PMUV3_PERFCTR_STALL_SLOT_FRONTEND 0x003E
+#define ARMV8_PMUV3_PERFCTR_STALL_SLOT 0x003F
+
+/* Statistical profiling extension microarchitectural events */
+#define ARMV8_SPE_PERFCTR_SAMPLE_POP 0x4000
+#define ARMV8_SPE_PERFCTR_SAMPLE_FEED 0x4001
+#define ARMV8_SPE_PERFCTR_SAMPLE_FILTRATE 0x4002
+#define ARMV8_SPE_PERFCTR_SAMPLE_COLLISION 0x4003
+
+/* AMUv1 architecture events */
+#define ARMV8_AMU_PERFCTR_CNT_CYCLES 0x4004
+#define ARMV8_AMU_PERFCTR_STALL_BACKEND_MEM 0x4005
+
+/* long-latency read miss events */
+#define ARMV8_PMUV3_PERFCTR_L1I_CACHE_LMISS 0x4006
+#define ARMV8_PMUV3_PERFCTR_L2D_CACHE_LMISS_RD 0x4009
+#define ARMV8_PMUV3_PERFCTR_L2I_CACHE_LMISS 0x400A
+#define ARMV8_PMUV3_PERFCTR_L3D_CACHE_LMISS_RD 0x400B
+
+/* Trace buffer events */
+#define ARMV8_PMUV3_PERFCTR_TRB_WRAP 0x400C
+#define ARMV8_PMUV3_PERFCTR_TRB_TRIG 0x400E
+
+/* Trace unit events */
+#define ARMV8_PMUV3_PERFCTR_TRCEXTOUT0 0x4010
+#define ARMV8_PMUV3_PERFCTR_TRCEXTOUT1 0x4011
+#define ARMV8_PMUV3_PERFCTR_TRCEXTOUT2 0x4012
+#define ARMV8_PMUV3_PERFCTR_TRCEXTOUT3 0x4013
+#define ARMV8_PMUV3_PERFCTR_CTI_TRIGOUT4 0x4018
+#define ARMV8_PMUV3_PERFCTR_CTI_TRIGOUT5 0x4019
+#define ARMV8_PMUV3_PERFCTR_CTI_TRIGOUT6 0x401A
+#define ARMV8_PMUV3_PERFCTR_CTI_TRIGOUT7 0x401B
+
+/* additional latency from alignment events */
+#define ARMV8_PMUV3_PERFCTR_LDST_ALIGN_LAT 0x4020
+#define ARMV8_PMUV3_PERFCTR_LD_ALIGN_LAT 0x4021
+#define ARMV8_PMUV3_PERFCTR_ST_ALIGN_LAT 0x4022
+
+/* Armv8.5 Memory Tagging Extension events */
+#define ARMV8_MTE_PERFCTR_MEM_ACCESS_CHECKED 0x4024
+#define ARMV8_MTE_PERFCTR_MEM_ACCESS_CHECKED_RD 0x4025
+#define ARMV8_MTE_PERFCTR_MEM_ACCESS_CHECKED_WR 0x4026
+
+/* ARMv8 recommended implementation defined event types */
+#define ARMV8_IMPDEF_PERFCTR_L1D_CACHE_RD 0x0040
+#define ARMV8_IMPDEF_PERFCTR_L1D_CACHE_WR 0x0041
+#define ARMV8_IMPDEF_PERFCTR_L1D_CACHE_REFILL_RD 0x0042
+#define ARMV8_IMPDEF_PERFCTR_L1D_CACHE_REFILL_WR 0x0043
+#define ARMV8_IMPDEF_PERFCTR_L1D_CACHE_REFILL_INNER 0x0044
+#define ARMV8_IMPDEF_PERFCTR_L1D_CACHE_REFILL_OUTER 0x0045
+#define ARMV8_IMPDEF_PERFCTR_L1D_CACHE_WB_VICTIM 0x0046
+#define ARMV8_IMPDEF_PERFCTR_L1D_CACHE_WB_CLEAN 0x0047
+#define ARMV8_IMPDEF_PERFCTR_L1D_CACHE_INVAL 0x0048
+
+#define ARMV8_IMPDEF_PERFCTR_L1D_TLB_REFILL_RD 0x004C
+#define ARMV8_IMPDEF_PERFCTR_L1D_TLB_REFILL_WR 0x004D
+#define ARMV8_IMPDEF_PERFCTR_L1D_TLB_RD 0x004E
+#define ARMV8_IMPDEF_PERFCTR_L1D_TLB_WR 0x004F
+#define ARMV8_IMPDEF_PERFCTR_L2D_CACHE_RD 0x0050
+#define ARMV8_IMPDEF_PERFCTR_L2D_CACHE_WR 0x0051
+#define ARMV8_IMPDEF_PERFCTR_L2D_CACHE_REFILL_RD 0x0052
+#define ARMV8_IMPDEF_PERFCTR_L2D_CACHE_REFILL_WR 0x0053
+
+#define ARMV8_IMPDEF_PERFCTR_L2D_CACHE_WB_VICTIM 0x0056
+#define ARMV8_IMPDEF_PERFCTR_L2D_CACHE_WB_CLEAN 0x0057
+#define ARMV8_IMPDEF_PERFCTR_L2D_CACHE_INVAL 0x0058
+
+#define ARMV8_IMPDEF_PERFCTR_L2D_TLB_REFILL_RD 0x005C
+#define ARMV8_IMPDEF_PERFCTR_L2D_TLB_REFILL_WR 0x005D
+#define ARMV8_IMPDEF_PERFCTR_L2D_TLB_RD 0x005E
+#define ARMV8_IMPDEF_PERFCTR_L2D_TLB_WR 0x005F
+#define ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_RD 0x0060
+#define ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_WR 0x0061
+#define ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_SHARED 0x0062
+#define ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_NOT_SHARED 0x0063
+#define ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_NORMAL 0x0064
+#define ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_PERIPH 0x0065
+#define ARMV8_IMPDEF_PERFCTR_MEM_ACCESS_RD 0x0066
+#define ARMV8_IMPDEF_PERFCTR_MEM_ACCESS_WR 0x0067
+#define ARMV8_IMPDEF_PERFCTR_UNALIGNED_LD_SPEC 0x0068
+#define ARMV8_IMPDEF_PERFCTR_UNALIGNED_ST_SPEC 0x0069
+#define ARMV8_IMPDEF_PERFCTR_UNALIGNED_LDST_SPEC 0x006A
+
+#define ARMV8_IMPDEF_PERFCTR_LDREX_SPEC 0x006C
+#define ARMV8_IMPDEF_PERFCTR_STREX_PASS_SPEC 0x006D
+#define ARMV8_IMPDEF_PERFCTR_STREX_FAIL_SPEC 0x006E
+#define ARMV8_IMPDEF_PERFCTR_STREX_SPEC 0x006F
+#define ARMV8_IMPDEF_PERFCTR_LD_SPEC 0x0070
+#define ARMV8_IMPDEF_PERFCTR_ST_SPEC 0x0071
+#define ARMV8_IMPDEF_PERFCTR_LDST_SPEC 0x0072
+#define ARMV8_IMPDEF_PERFCTR_DP_SPEC 0x0073
+#define ARMV8_IMPDEF_PERFCTR_ASE_SPEC 0x0074
+#define ARMV8_IMPDEF_PERFCTR_VFP_SPEC 0x0075
+#define ARMV8_IMPDEF_PERFCTR_PC_WRITE_SPEC 0x0076
+#define ARMV8_IMPDEF_PERFCTR_CRYPTO_SPEC 0x0077
+#define ARMV8_IMPDEF_PERFCTR_BR_IMMED_SPEC 0x0078
+#define ARMV8_IMPDEF_PERFCTR_BR_RETURN_SPEC 0x0079
+#define ARMV8_IMPDEF_PERFCTR_BR_INDIRECT_SPEC 0x007A
+
+#define ARMV8_IMPDEF_PERFCTR_ISB_SPEC 0x007C
+#define ARMV8_IMPDEF_PERFCTR_DSB_SPEC 0x007D
+#define ARMV8_IMPDEF_PERFCTR_DMB_SPEC 0x007E
+
+#define ARMV8_IMPDEF_PERFCTR_EXC_UNDEF 0x0081
+#define ARMV8_IMPDEF_PERFCTR_EXC_SVC 0x0082
+#define ARMV8_IMPDEF_PERFCTR_EXC_PABORT 0x0083
+#define ARMV8_IMPDEF_PERFCTR_EXC_DABORT 0x0084
+
+#define ARMV8_IMPDEF_PERFCTR_EXC_IRQ 0x0086
+#define ARMV8_IMPDEF_PERFCTR_EXC_FIQ 0x0087
+#define ARMV8_IMPDEF_PERFCTR_EXC_SMC 0x0088
+
+#define ARMV8_IMPDEF_PERFCTR_EXC_HVC 0x008A
+#define ARMV8_IMPDEF_PERFCTR_EXC_TRAP_PABORT 0x008B
+#define ARMV8_IMPDEF_PERFCTR_EXC_TRAP_DABORT 0x008C
+#define ARMV8_IMPDEF_PERFCTR_EXC_TRAP_OTHER 0x008D
+#define ARMV8_IMPDEF_PERFCTR_EXC_TRAP_IRQ 0x008E
+#define ARMV8_IMPDEF_PERFCTR_EXC_TRAP_FIQ 0x008F
+#define ARMV8_IMPDEF_PERFCTR_RC_LD_SPEC 0x0090
+#define ARMV8_IMPDEF_PERFCTR_RC_ST_SPEC 0x0091
+
+#define ARMV8_IMPDEF_PERFCTR_L3D_CACHE_RD 0x00A0
+#define ARMV8_IMPDEF_PERFCTR_L3D_CACHE_WR 0x00A1
+#define ARMV8_IMPDEF_PERFCTR_L3D_CACHE_REFILL_RD 0x00A2
+#define ARMV8_IMPDEF_PERFCTR_L3D_CACHE_REFILL_WR 0x00A3
+
+#define ARMV8_IMPDEF_PERFCTR_L3D_CACHE_WB_VICTIM 0x00A6
+#define ARMV8_IMPDEF_PERFCTR_L3D_CACHE_WB_CLEAN 0x00A7
+#define ARMV8_IMPDEF_PERFCTR_L3D_CACHE_INVAL 0x00A8
+
+/*
+ * Per-CPU PMCR: config reg
+ */
+#define ARMV8_PMU_PMCR_E (1 << 0) /* Enable all counters */
+#define ARMV8_PMU_PMCR_P (1 << 1) /* Reset all counters */
+#define ARMV8_PMU_PMCR_C (1 << 2) /* Cycle counter reset */
+#define ARMV8_PMU_PMCR_D (1 << 3) /* CCNT counts every 64th cpu cycle */
+#define ARMV8_PMU_PMCR_X (1 << 4) /* Export to ETM */
+#define ARMV8_PMU_PMCR_DP (1 << 5) /* Disable CCNT if non-invasive debug*/
+#define ARMV8_PMU_PMCR_LC (1 << 6) /* Overflow on 64 bit cycle counter */
+#define ARMV8_PMU_PMCR_LP (1 << 7) /* Long event counter enable */
+#define ARMV8_PMU_PMCR_N_SHIFT 11 /* Number of counters supported */
+#define ARMV8_PMU_PMCR_N_MASK 0x1f
+#define ARMV8_PMU_PMCR_MASK 0xff /* Mask for writable bits */
+
+/*
+ * PMOVSR: counters overflow flag status reg
+ */
+#define ARMV8_PMU_OVSR_MASK 0xffffffff /* Mask for writable bits */
+#define ARMV8_PMU_OVERFLOWED_MASK ARMV8_PMU_OVSR_MASK
+
+/*
+ * PMXEVTYPER: Event selection reg
+ */
+#define ARMV8_PMU_EVTYPE_MASK 0xc800ffff /* Mask for writable bits */
+#define ARMV8_PMU_EVTYPE_EVENT 0xffff /* Mask for EVENT bits */
+
+/*
+ * Event filters for PMUv3
+ */
+#define ARMV8_PMU_EXCLUDE_EL1 (1U << 31)
+#define ARMV8_PMU_EXCLUDE_EL0 (1U << 30)
+#define ARMV8_PMU_INCLUDE_EL2 (1U << 27)
+
+/*
+ * PMUSERENR: user enable reg
+ */
+#define ARMV8_PMU_USERENR_MASK 0xf /* Mask for writable bits */
+#define ARMV8_PMU_USERENR_EN (1 << 0) /* PMU regs can be accessed at EL0 */
+#define ARMV8_PMU_USERENR_SW (1 << 1) /* PMSWINC can be written at EL0 */
+#define ARMV8_PMU_USERENR_CR (1 << 2) /* Cycle counter can be read at EL0 */
+#define ARMV8_PMU_USERENR_ER (1 << 3) /* Event counter can be read at EL0 */
+
+/* PMMIR_EL1.SLOTS mask */
+#define ARMV8_PMU_SLOTS_MASK 0xff
+
+#define ARMV8_PMU_BUS_SLOTS_SHIFT 8
+#define ARMV8_PMU_BUS_SLOTS_MASK 0xff
+#define ARMV8_PMU_BUS_WIDTH_SHIFT 16
+#define ARMV8_PMU_BUS_WIDTH_MASK 0xf
+
+/*
+ * This code is really good
+ */
+
+#define PMEVN_CASE(n, case_macro) \
+ case n: case_macro(n); break
+
+#define PMEVN_SWITCH(x, case_macro) \
+ do { \
+ switch (x) { \
+ PMEVN_CASE(0, case_macro); \
+ PMEVN_CASE(1, case_macro); \
+ PMEVN_CASE(2, case_macro); \
+ PMEVN_CASE(3, case_macro); \
+ PMEVN_CASE(4, case_macro); \
+ PMEVN_CASE(5, case_macro); \
+ PMEVN_CASE(6, case_macro); \
+ PMEVN_CASE(7, case_macro); \
+ PMEVN_CASE(8, case_macro); \
+ PMEVN_CASE(9, case_macro); \
+ PMEVN_CASE(10, case_macro); \
+ PMEVN_CASE(11, case_macro); \
+ PMEVN_CASE(12, case_macro); \
+ PMEVN_CASE(13, case_macro); \
+ PMEVN_CASE(14, case_macro); \
+ PMEVN_CASE(15, case_macro); \
+ PMEVN_CASE(16, case_macro); \
+ PMEVN_CASE(17, case_macro); \
+ PMEVN_CASE(18, case_macro); \
+ PMEVN_CASE(19, case_macro); \
+ PMEVN_CASE(20, case_macro); \
+ PMEVN_CASE(21, case_macro); \
+ PMEVN_CASE(22, case_macro); \
+ PMEVN_CASE(23, case_macro); \
+ PMEVN_CASE(24, case_macro); \
+ PMEVN_CASE(25, case_macro); \
+ PMEVN_CASE(26, case_macro); \
+ PMEVN_CASE(27, case_macro); \
+ PMEVN_CASE(28, case_macro); \
+ PMEVN_CASE(29, case_macro); \
+ PMEVN_CASE(30, case_macro); \
+ default: WARN(1, "Invalid PMEV* index\n"); \
+ } \
+ } while (0)
+
+#endif
diff --git a/include/linux/perf/riscv_pmu.h b/include/linux/perf/riscv_pmu.h
new file mode 100644
index 000000000000..43fc892aa7d9
--- /dev/null
+++ b/include/linux/perf/riscv_pmu.h
@@ -0,0 +1,84 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2018 SiFive
+ * Copyright (C) 2018 Andes Technology Corporation
+ * Copyright (C) 2021 Western Digital Corporation or its affiliates.
+ *
+ */
+
+#ifndef _ASM_RISCV_PERF_EVENT_H
+#define _ASM_RISCV_PERF_EVENT_H
+
+#include <linux/perf_event.h>
+#include <linux/ptrace.h>
+#include <linux/interrupt.h>
+
+#ifdef CONFIG_RISCV_PMU
+
+/*
+ * The RISCV_MAX_COUNTERS parameter should be specified.
+ */
+
+#define RISCV_MAX_COUNTERS 64
+#define RISCV_OP_UNSUPP (-EOPNOTSUPP)
+#define RISCV_PMU_PDEV_NAME "riscv-pmu"
+#define RISCV_PMU_LEGACY_PDEV_NAME "riscv-pmu-legacy"
+
+#define RISCV_PMU_STOP_FLAG_RESET 1
+
+#define RISCV_PMU_CONFIG1_GUEST_EVENTS 0x1
+
+struct cpu_hw_events {
+ /* currently enabled events */
+ int n_events;
+ /* Counter overflow interrupt */
+ int irq;
+ /* currently enabled events */
+ struct perf_event *events[RISCV_MAX_COUNTERS];
+ /* currently enabled hardware counters */
+ DECLARE_BITMAP(used_hw_ctrs, RISCV_MAX_COUNTERS);
+ /* currently enabled firmware counters */
+ DECLARE_BITMAP(used_fw_ctrs, RISCV_MAX_COUNTERS);
+};
+
+struct riscv_pmu {
+ struct pmu pmu;
+ char *name;
+
+ irqreturn_t (*handle_irq)(int irq_num, void *dev);
+
+ unsigned long cmask;
+ u64 (*ctr_read)(struct perf_event *event);
+ int (*ctr_get_idx)(struct perf_event *event);
+ int (*ctr_get_width)(int idx);
+ void (*ctr_clear_idx)(struct perf_event *event);
+ void (*ctr_start)(struct perf_event *event, u64 init_val);
+ void (*ctr_stop)(struct perf_event *event, unsigned long flag);
+ int (*event_map)(struct perf_event *event, u64 *config);
+
+ struct cpu_hw_events __percpu *hw_events;
+ struct hlist_node node;
+ struct notifier_block riscv_pm_nb;
+};
+
+#define to_riscv_pmu(p) (container_of(p, struct riscv_pmu, pmu))
+
+void riscv_pmu_start(struct perf_event *event, int flags);
+void riscv_pmu_stop(struct perf_event *event, int flags);
+unsigned long riscv_pmu_ctr_read_csr(unsigned long csr);
+int riscv_pmu_event_set_period(struct perf_event *event);
+uint64_t riscv_pmu_ctr_get_width_mask(struct perf_event *event);
+u64 riscv_pmu_event_update(struct perf_event *event);
+#ifdef CONFIG_RISCV_PMU_LEGACY
+void riscv_pmu_legacy_skip_init(void);
+#else
+static inline void riscv_pmu_legacy_skip_init(void) {};
+#endif
+struct riscv_pmu *riscv_pmu_alloc(void);
+#ifdef CONFIG_RISCV_PMU_SBI
+int riscv_pmu_get_hpm_info(u32 *hw_ctr_width, u32 *num_hw_ctr);
+#endif
+
+#endif /* CONFIG_RISCV_PMU */
+
+#endif /* _ASM_RISCV_PERF_EVENT_H */
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index a3b873fc59e4..d5628a7b5eaa 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -15,6 +15,7 @@
#define _LINUX_PERF_EVENT_H
#include <uapi/linux/perf_event.h>
+#include <uapi/linux/bpf_perf_event.h>
/*
* Kernel-internal data types and definitions:
@@ -25,13 +26,17 @@
# include <asm/local64.h>
#endif
+#define PERF_GUEST_ACTIVE 0x01
+#define PERF_GUEST_USER 0x02
+
struct perf_guest_info_callbacks {
- int (*is_in_guest)(void);
- int (*is_user_mode)(void);
- unsigned long (*get_guest_ip)(void);
+ unsigned int (*state)(void);
+ unsigned long (*get_ip)(void);
+ unsigned int (*handle_intel_pt_intr)(void);
};
#ifdef CONFIG_HAVE_HW_BREAKPOINT
+#include <linux/rhashtable-types.h>
#include <asm/hw_breakpoint.h>
#endif
@@ -52,13 +57,16 @@ struct perf_guest_info_callbacks {
#include <linux/atomic.h>
#include <linux/sysfs.h>
#include <linux/perf_regs.h>
-#include <linux/workqueue.h>
#include <linux/cgroup.h>
+#include <linux/refcount.h>
+#include <linux/security.h>
+#include <linux/static_call.h>
+#include <linux/lockdep.h>
#include <asm/local.h>
struct perf_callchain_entry {
__u64 nr;
- __u64 ip[0]; /* /proc/sys/kernel/perf_event_max_stack */
+ __u64 ip[]; /* /proc/sys/kernel/perf_event_max_stack */
};
struct perf_callchain_entry_ctx {
@@ -87,18 +95,35 @@ struct perf_raw_record {
u32 size;
};
+static __always_inline bool perf_raw_frag_last(const struct perf_raw_frag *frag)
+{
+ return frag->pad < sizeof(u64);
+}
+
/*
* branch stack layout:
* nr: number of taken branches stored in entries[]
+ * hw_idx: The low level index of raw branch records
+ * for the most recent branch.
+ * -1ULL means invalid/unknown.
*
* Note that nr can vary from sample to sample
* branches (to, from) are stored from most recent
* to least recent, i.e., entries[0] contains the most
* recent branch.
+ * The entries[] is an abstraction of raw branch records,
+ * which may not be stored in age order in HW, e.g. Intel LBR.
+ * The hw_idx is to expose the low level index of raw
+ * branch record for the most recent branch aka entries[0].
+ * The hw_idx index is between -1 (unknown) and max depth,
+ * which can be retrieved in /sys/devices/cpu/caps/branches.
+ * For the architectures whose raw branch records are
+ * already stored in age order, the hw_idx should be 0.
*/
struct perf_branch_stack {
__u64 nr;
- struct perf_branch_entry entries[0];
+ __u64 hw_idx;
+ struct perf_branch_entry entries[];
};
struct task_struct;
@@ -114,6 +139,17 @@ struct hw_perf_event_extra {
};
/**
+ * hw_perf_event::flag values
+ *
+ * PERF_EVENT_FLAG_ARCH bits are reserved for architecture-specific
+ * usage.
+ */
+#define PERF_EVENT_FLAG_ARCH 0x000fffff
+#define PERF_EVENT_FLAG_USER_READ_CNT 0x80000000
+
+static_assert((PERF_EVENT_FLAG_USER_READ_CNT & PERF_EVENT_FLAG_ARCH) == 0);
+
+/**
* struct hw_perf_event - performance event hardware details:
*/
struct hw_perf_event {
@@ -139,17 +175,6 @@ struct hw_perf_event {
/* for tp_event->class */
struct list_head tp_list;
};
- struct { /* intel_cqm */
- int cqm_state;
- u32 cqm_rmid;
- int is_group_event;
- struct list_head cqm_events_entry;
- struct list_head cqm_groups_entry;
- struct list_head cqm_group_entry;
- };
- struct { /* itrace */
- int itrace_started;
- };
struct { /* amd_power */
u64 pwr_acc;
u64 ptsc;
@@ -162,7 +187,7 @@ struct hw_perf_event {
* creation and event initalization.
*/
struct arch_hw_breakpoint info;
- struct list_head bp_list;
+ struct rhlist_head bp_list;
};
#endif
struct { /* amd_iommu */
@@ -208,17 +233,26 @@ struct hw_perf_event {
*/
u64 sample_period;
- /*
- * The period we started this sample with.
- */
- u64 last_period;
+ union {
+ struct { /* Sampling */
+ /*
+ * The period we started this sample with.
+ */
+ u64 last_period;
- /*
- * However much is left of the current period; note that this is
- * a full 64bit value and allows for generation of periods longer
- * than hardware might allow.
- */
- local64_t period_left;
+ /*
+ * However much is left of the current period;
+ * note that this is a full 64bit value and
+ * allows for generation of periods longer
+ * than hardware might allow.
+ */
+ local64_t period_left;
+ };
+ struct { /* Topdown events counting for context switch */
+ u64 saved_metric;
+ u64 saved_slots;
+ };
+ };
/*
* State for throttling the event, see __perf_event_overflow() and
@@ -237,6 +271,7 @@ struct hw_perf_event {
};
struct perf_event;
+struct perf_event_pmu_context;
/*
* Common implementation detail of pmu::{start,commit,cancel}_txn
@@ -247,13 +282,18 @@ struct perf_event;
/**
* pmu::capabilities flags
*/
-#define PERF_PMU_CAP_NO_INTERRUPT 0x01
-#define PERF_PMU_CAP_NO_NMI 0x02
-#define PERF_PMU_CAP_AUX_NO_SG 0x04
-#define PERF_PMU_CAP_AUX_SW_DOUBLEBUF 0x08
-#define PERF_PMU_CAP_EXCLUSIVE 0x10
-#define PERF_PMU_CAP_ITRACE 0x20
-#define PERF_PMU_CAP_HETEROGENEOUS_CPUS 0x40
+#define PERF_PMU_CAP_NO_INTERRUPT 0x0001
+#define PERF_PMU_CAP_NO_NMI 0x0002
+#define PERF_PMU_CAP_AUX_NO_SG 0x0004
+#define PERF_PMU_CAP_EXTENDED_REGS 0x0008
+#define PERF_PMU_CAP_EXCLUSIVE 0x0010
+#define PERF_PMU_CAP_ITRACE 0x0020
+#define PERF_PMU_CAP_HETEROGENEOUS_CPUS 0x0040
+#define PERF_PMU_CAP_NO_EXCLUDE 0x0080
+#define PERF_PMU_CAP_AUX_OUTPUT 0x0100
+#define PERF_PMU_CAP_EXTENDED_HW_TYPE 0x0200
+
+struct perf_output_handle;
/**
* struct pmu - generic performance monitoring unit
@@ -264,6 +304,7 @@ struct pmu {
struct module *module;
struct device *dev;
const struct attribute_group **attr_groups;
+ const struct attribute_group **attr_update;
const char *name;
int type;
@@ -272,8 +313,8 @@ struct pmu {
*/
int capabilities;
- int * __percpu pmu_disable_count;
- struct perf_cpu_context * __percpu pmu_cpu_context;
+ int __percpu *pmu_disable_count;
+ struct perf_cpu_pmu_context __percpu *cpu_pmu_context;
atomic_t exclusive_cnt; /* < 0: cpu; > 0: tsk */
int task_ctx_nr;
int hrtimer_interval_ms;
@@ -298,7 +339,7 @@ struct pmu {
* -EBUSY -- @event is for this PMU but PMU temporarily unavailable
* -EINVAL -- @event is for this PMU but @event is not valid
* -EOPNOTSUPP -- @event is for this PMU, @event is valid, but not supported
- * -EACCESS -- @event is for this PMU, @event is valid, but no privilidges
+ * -EACCES -- @event is for this PMU, @event is valid, but no privileges
*
* 0 -- @event is for this PMU and valid
*
@@ -310,8 +351,8 @@ struct pmu {
* Notification that the event was mapped or unmapped. Called
* in the context of the mapping task.
*/
- void (*event_mapped) (struct perf_event *event); /*optional*/
- void (*event_unmapped) (struct perf_event *event); /*optional*/
+ void (*event_mapped) (struct perf_event *event, struct mm_struct *mm); /* optional */
+ void (*event_unmapped) (struct perf_event *event, struct mm_struct *mm); /* optional */
/*
* Flags for ->add()/->del()/ ->start()/->stop(). There are
@@ -357,7 +398,7 @@ struct pmu {
* ->stop() with PERF_EF_UPDATE will read the counter and update
* period/count values like ->read() would.
*
- * ->start() with PERF_EF_RELOAD will reprogram the the counter
+ * ->start() with PERF_EF_RELOAD will reprogram the counter
* value, must be preceded by a ->stop() with PERF_EF_UPDATE.
*/
void (*start) (struct perf_event *event, int flags);
@@ -408,23 +449,28 @@ struct pmu {
/*
* context-switches callback
*/
- void (*sched_task) (struct perf_event_context *ctx,
+ void (*sched_task) (struct perf_event_pmu_context *pmu_ctx,
bool sched_in);
+
/*
- * PMU specific data size
+ * Kmem cache of PMU specific data
*/
- size_t task_ctx_size;
-
+ struct kmem_cache *task_ctx_cache;
/*
- * Return the count value for a counter.
+ * PMU specific parts of task perf event context (i.e. ctx->task_ctx_data)
+ * can be synchronized using this function. See Intel LBR callstack support
+ * implementation and Perf core context switch handling callbacks for usage
+ * examples.
*/
- u64 (*count) (struct perf_event *event); /*optional*/
+ void (*swap_task_ctx) (struct perf_event_pmu_context *prev_epc,
+ struct perf_event_pmu_context *next_epc);
+ /* optional */
/*
* Set up pmu-private data structures for an AUX area
*/
- void *(*setup_aux) (int cpu, void **pages,
+ void *(*setup_aux) (struct perf_event *event, void **pages,
int nr_pages, bool overwrite);
/* optional */
@@ -434,6 +480,19 @@ struct pmu {
void (*free_aux) (void *aux); /* optional */
/*
+ * Take a snapshot of the AUX buffer without touching the event
+ * state, so that preempting ->start()/->stop() callbacks does
+ * not interfere with their logic. Called in PMI context.
+ *
+ * Returns the size of AUX data copied to the output handle.
+ *
+ * Optional.
+ */
+ long (*snapshot_aux) (struct perf_event *event,
+ struct perf_output_handle *handle,
+ unsigned long size);
+
+ /*
* Validate address range filters: make sure the HW supports the
* requested configuration and number of filters; return 0 if the
* supplied filters are valid, -errno otherwise.
@@ -459,29 +518,49 @@ struct pmu {
/* optional */
/*
- * Filter events for PMU-specific reasons.
+ * Check if event can be used for aux_output purposes for
+ * events of this PMU.
+ *
+ * Runs from perf_event_open(). Should return 0 for "no match"
+ * or non-zero for "match".
+ */
+ int (*aux_output_match) (struct perf_event *event);
+ /* optional */
+
+ /*
+ * Skip programming this PMU on the given CPU. Typically needed for
+ * big.LITTLE things.
*/
- int (*filter_match) (struct perf_event *event); /* optional */
+ bool (*filter) (struct pmu *pmu, int cpu); /* optional */
+
+ /*
+ * Check period value for PERF_EVENT_IOC_PERIOD ioctl.
+ */
+ int (*check_period) (struct perf_event *event, u64 value); /* optional */
+};
+
+enum perf_addr_filter_action_t {
+ PERF_ADDR_FILTER_ACTION_STOP = 0,
+ PERF_ADDR_FILTER_ACTION_START,
+ PERF_ADDR_FILTER_ACTION_FILTER,
};
/**
* struct perf_addr_filter - address range filter definition
* @entry: event's filter list linkage
- * @inode: object file's inode for file-based filters
+ * @path: object file's path for file-based filters
* @offset: filter range offset
- * @size: filter range size
- * @range: 1: range, 0: address
- * @filter: 1: filter/start, 0: stop
+ * @size: filter range size (size==0 means single address trigger)
+ * @action: filter/start/stop
*
* This is a hardware-agnostic filter configuration as specified by the user.
*/
struct perf_addr_filter {
struct list_head entry;
- struct inode *inode;
+ struct path path;
unsigned long offset;
unsigned long size;
- unsigned int range : 1,
- filter : 1;
+ enum perf_addr_filter_action_t action;
};
/**
@@ -500,10 +579,15 @@ struct perf_addr_filters_head {
unsigned int nr_file_filters;
};
+struct perf_addr_filter_range {
+ unsigned long start;
+ unsigned long size;
+};
+
/**
- * enum perf_event_active_state - the states of a event
+ * enum perf_event_state - the states of an event:
*/
-enum perf_event_active_state {
+enum perf_event_state {
PERF_EVENT_STATE_DEAD = -4,
PERF_EVENT_STATE_EXIT = -3,
PERF_EVENT_STATE_ERROR = -2,
@@ -525,9 +609,13 @@ typedef void (*perf_overflow_handler_t)(struct perf_event *,
* PERF_EV_CAP_SOFTWARE: Is a software event.
* PERF_EV_CAP_READ_ACTIVE_PKG: A CPU event (or cgroup event) that can be read
* from any CPU in the package where it is active.
+ * PERF_EV_CAP_SIBLING: An event with this flag must be a group sibling and
+ * cannot be a group leader. If an event with this flag is detached from the
+ * group it is scheduled out and moved into an unrecoverable ERROR state.
*/
#define PERF_EV_CAP_SOFTWARE BIT(0)
#define PERF_EV_CAP_READ_ACTIVE_PKG BIT(1)
+#define PERF_EV_CAP_SIBLING BIT(2)
#define SWEVENT_HLIST_BITS 8
#define SWEVENT_HLIST_SIZE (1 << SWEVENT_HLIST_BITS)
@@ -541,15 +629,39 @@ struct swevent_hlist {
#define PERF_ATTACH_GROUP 0x02
#define PERF_ATTACH_TASK 0x04
#define PERF_ATTACH_TASK_DATA 0x08
+#define PERF_ATTACH_ITRACE 0x10
+#define PERF_ATTACH_SCHED_CB 0x20
+#define PERF_ATTACH_CHILD 0x40
+struct bpf_prog;
struct perf_cgroup;
-struct ring_buffer;
+struct perf_buffer;
struct pmu_event_list {
raw_spinlock_t lock;
struct list_head list;
};
+/*
+ * event->sibling_list is modified whole holding both ctx->lock and ctx->mutex
+ * as such iteration must hold either lock. However, since ctx->lock is an IRQ
+ * safe lock, and is only held by the CPU doing the modification, having IRQs
+ * disabled is sufficient since it will hold-off the IPIs.
+ */
+#ifdef CONFIG_PROVE_LOCKING
+#define lockdep_assert_event_ctx(event) \
+ WARN_ON_ONCE(__lockdep_enabled && \
+ (this_cpu_read(hardirqs_enabled) && \
+ lockdep_is_held(&(event)->ctx->mutex) != LOCK_STATE_HELD))
+#else
+#define lockdep_assert_event_ctx(event)
+#endif
+
+#define for_each_sibling_event(sibling, event) \
+ lockdep_assert_event_ctx(event); \
+ if ((event)->group_leader == (event)) \
+ list_for_each_entry((sibling), &(event)->sibling_list, sibling_list)
+
/**
* struct perf_event - performance event kernel representation:
*/
@@ -563,16 +675,16 @@ struct perf_event {
struct list_head event_entry;
/*
- * XXX: group_entry and sibling_list should be mutually exclusive;
- * either you're a sibling on a group, or you're the group leader.
- * Rework the code to always use the same list element.
- *
* Locked for modification by both ctx->mutex and ctx->lock; holding
* either sufficies for read.
*/
- struct list_head group_entry;
struct list_head sibling_list;
-
+ struct list_head active_list;
+ /*
+ * Node on the pinned or flexible tree located at the event context;
+ */
+ struct rb_node group_node;
+ u64 group_index;
/*
* We need storage to track the entries in perf_pmu_migrate_context; we
* cannot use the event_entry because of RCU and we want to keep the
@@ -590,10 +702,15 @@ struct perf_event {
int group_caps;
struct perf_event *group_leader;
+ /*
+ * event->pmu will always point to pmu in which this event belongs.
+ * Whereas event->pmu_ctx->pmu may point to other pmu when group of
+ * different pmu events is created.
+ */
struct pmu *pmu;
void *pmu_private;
- enum perf_event_active_state state;
+ enum perf_event_state state;
unsigned int attach_state;
local64_t count;
atomic64_t child_count;
@@ -603,36 +720,10 @@ struct perf_event {
* has been enabled (i.e. eligible to run, and the task has
* been scheduled in, if this is a per-task event)
* and running (scheduled onto the CPU), respectively.
- *
- * They are computed from tstamp_enabled, tstamp_running and
- * tstamp_stopped when the event is in INACTIVE or ACTIVE state.
*/
u64 total_time_enabled;
u64 total_time_running;
-
- /*
- * These are timestamps used for computing total_time_enabled
- * and total_time_running when the event is in INACTIVE or
- * ACTIVE state, measured in nanoseconds from an arbitrary point
- * in time.
- * tstamp_enabled: the notional time when the event was enabled
- * tstamp_running: the notional time when the event was scheduled on
- * tstamp_stopped: in INACTIVE state, the notional time when the
- * event was scheduled off.
- */
- u64 tstamp_enabled;
- u64 tstamp_running;
- u64 tstamp_stopped;
-
- /*
- * timestamp shadows the actual context timing but it can
- * be safely used in NMI interrupt context. It reflects the
- * context time as it was when the event was last scheduled in.
- *
- * ctx_time already accounts for ctx->timestamp. Therefore to
- * compute ctx_time for a sample, simply add perf_clock().
- */
- u64 shadow_ctx_time;
+ u64 tstamp;
struct perf_event_attr attr;
u16 header_size;
@@ -641,6 +732,12 @@ struct perf_event {
struct hw_perf_event hw;
struct perf_event_context *ctx;
+ /*
+ * event->pmu_ctx points to perf_event_pmu_context in which the event
+ * is added. This pmu_ctx can be of other pmu for sw event when that
+ * sw event is part of a group which also contains non-sw events.
+ */
+ struct perf_event_pmu_context *pmu_ctx;
atomic_long_t refcount;
/*
@@ -667,7 +764,7 @@ struct perf_event {
struct mutex mmap_mutex;
atomic_t mmap_count;
- struct ring_buffer *rb;
+ struct perf_buffer *rb;
struct list_head rb_entry;
unsigned long rcu_batches;
int rcu_pending;
@@ -677,31 +774,41 @@ struct perf_event {
struct fasync_struct *fasync;
/* delayed work for NMIs and such */
- int pending_wakeup;
- int pending_kill;
- int pending_disable;
- struct irq_work pending;
+ unsigned int pending_wakeup;
+ unsigned int pending_kill;
+ unsigned int pending_disable;
+ unsigned int pending_sigtrap;
+ unsigned long pending_addr; /* SIGTRAP */
+ struct irq_work pending_irq;
+ struct callback_head pending_task;
+ unsigned int pending_work;
atomic_t event_limit;
/* address range filters */
struct perf_addr_filters_head addr_filters;
/* vma address array for file-based filders */
- unsigned long *addr_filters_offs;
+ struct perf_addr_filter_range *addr_filter_ranges;
unsigned long addr_filters_gen;
+ /* for aux_output events */
+ struct perf_event *aux_event;
+
void (*destroy)(struct perf_event *);
struct rcu_head rcu_head;
struct pid_namespace *ns;
u64 id;
+ atomic64_t lost_samples;
+
u64 (*clock)(void);
perf_overflow_handler_t overflow_handler;
void *overflow_handler_context;
#ifdef CONFIG_BPF_SYSCALL
perf_overflow_handler_t orig_overflow_handler;
struct bpf_prog *prog;
+ u64 bpf_cookie;
#endif
#ifdef CONFIG_EVENT_TRACING
@@ -714,20 +821,78 @@ struct perf_event {
#ifdef CONFIG_CGROUP_PERF
struct perf_cgroup *cgrp; /* cgroup event is attach to */
- int cgrp_defer_enabled;
#endif
+#ifdef CONFIG_SECURITY
+ void *security;
+#endif
struct list_head sb_list;
#endif /* CONFIG_PERF_EVENTS */
};
+/*
+ * ,-----------------------[1:n]----------------------.
+ * V V
+ * perf_event_context <-[1:n]-> perf_event_pmu_context <--- perf_event
+ * ^ ^ | |
+ * `--------[1:n]---------' `-[n:1]-> pmu <-[1:n]-'
+ *
+ *
+ * struct perf_event_pmu_context lifetime is refcount based and RCU freed
+ * (similar to perf_event_context). Locking is as if it were a member of
+ * perf_event_context; specifically:
+ *
+ * modification, both: ctx->mutex && ctx->lock
+ * reading, either: ctx->mutex || ctx->lock
+ *
+ * There is one exception to this; namely put_pmu_ctx() isn't always called
+ * with ctx->mutex held; this means that as long as we can guarantee the epc
+ * has events the above rules hold.
+ *
+ * Specificially, sys_perf_event_open()'s group_leader case depends on
+ * ctx->mutex pinning the configuration. Since we hold a reference on
+ * group_leader (through the filedesc) it can't go away, therefore it's
+ * associated pmu_ctx must exist and cannot change due to ctx->mutex.
+ */
+struct perf_event_pmu_context {
+ struct pmu *pmu;
+ struct perf_event_context *ctx;
+
+ struct list_head pmu_ctx_entry;
+
+ struct list_head pinned_active;
+ struct list_head flexible_active;
+
+ /* Used to avoid freeing per-cpu perf_event_pmu_context */
+ unsigned int embedded : 1;
+
+ unsigned int nr_events;
+
+ atomic_t refcount; /* event <-> epc */
+ struct rcu_head rcu_head;
+
+ void *task_ctx_data; /* pmu specific data */
+ /*
+ * Set when one or more (plausibly active) event can't be scheduled
+ * due to pmu overcommit or pmu constraints, except tolerant to
+ * events not necessary to be active due to scheduling constraints,
+ * such as cgroups.
+ */
+ int rotate_necessary;
+};
+
+struct perf_event_groups {
+ struct rb_root tree;
+ u64 index;
+};
+
+
/**
* struct perf_event_context - event context structure
*
* Used as a container for task events and CPU events as well:
*/
struct perf_event_context {
- struct pmu *pmu;
/*
* Protect the states of the events in the list,
* nr_active, and the list:
@@ -740,17 +905,21 @@ struct perf_event_context {
*/
struct mutex mutex;
- struct list_head active_ctx_list;
- struct list_head pinned_groups;
- struct list_head flexible_groups;
+ struct list_head pmu_ctx_list;
+ struct perf_event_groups pinned_groups;
+ struct perf_event_groups flexible_groups;
struct list_head event_list;
+
int nr_events;
- int nr_active;
+ int nr_user;
int is_active;
+
+ int nr_task_data;
int nr_stat;
int nr_freq;
int rotate_disable;
- atomic_t refcount;
+
+ refcount_t refcount; /* event <-> ctx */
struct task_struct *task;
/*
@@ -758,6 +927,7 @@ struct perf_event_context {
*/
u64 time;
u64 timestamp;
+ u64 timeoffset;
/*
* These fields let us detect when two contexts have both
@@ -770,8 +940,15 @@ struct perf_event_context {
#ifdef CONFIG_CGROUP_PERF
int nr_cgroups; /* cgroup evts */
#endif
- void *task_ctx_data; /* pmu specific data */
struct rcu_head rcu_head;
+
+ /*
+ * Sum (event->pending_sigtrap + event->pending_work)
+ *
+ * The SIGTRAP is targeted at ctx->task, as such it won't do changing
+ * that until the signal is delivered.
+ */
+ local_t nr_pending;
};
/*
@@ -780,12 +957,13 @@ struct perf_event_context {
*/
#define PERF_NR_CONTEXTS 4
-/**
- * struct perf_event_cpu_context - per cpu event context structure
- */
-struct perf_cpu_context {
- struct perf_event_context ctx;
- struct perf_event_context *task_ctx;
+struct perf_cpu_pmu_context {
+ struct perf_event_pmu_context epc;
+ struct perf_event_pmu_context *task_epc;
+
+ struct list_head sched_cb_entry;
+ int sched_cb_usage;
+
int active_oncpu;
int exclusive;
@@ -793,21 +971,32 @@ struct perf_cpu_context {
struct hrtimer hrtimer;
ktime_t hrtimer_interval;
unsigned int hrtimer_active;
+};
+
+/**
+ * struct perf_event_cpu_context - per cpu event context structure
+ */
+struct perf_cpu_context {
+ struct perf_event_context ctx;
+ struct perf_event_context *task_ctx;
+ int online;
#ifdef CONFIG_CGROUP_PERF
struct perf_cgroup *cgrp;
- struct list_head cgrp_cpuctx_entry;
#endif
- struct list_head sched_cb_entry;
- int sched_cb_usage;
-
- int online;
+ /*
+ * Per-CPU storage for iterators used in visit_groups_merge. The default
+ * storage is of size 2 to hold the CPU and any CPU event iterators.
+ */
+ int heap_size;
+ struct perf_event **heap;
+ struct perf_event *heap_default[2];
};
struct perf_output_handle {
struct perf_event *event;
- struct ring_buffer *rb;
+ struct perf_buffer *rb;
unsigned long wakeup;
unsigned long size;
u64 aux_flags;
@@ -819,8 +1008,9 @@ struct perf_output_handle {
};
struct bpf_perf_event_data_kern {
- struct pt_regs *regs;
+ bpf_user_pt_regs_t *regs;
struct perf_sample_data *data;
+ struct perf_event *event;
};
#ifdef CONFIG_CGROUP_PERF
@@ -832,6 +1022,8 @@ struct bpf_perf_event_data_kern {
struct perf_cgroup_info {
u64 time;
u64 timestamp;
+ u64 timeoffset;
+ int active;
};
struct perf_cgroup {
@@ -856,6 +1048,8 @@ perf_cgroup_from_task(struct task_struct *task, struct perf_event_context *ctx)
#ifdef CONFIG_PERF_EVENTS
+extern struct perf_event_context *perf_cpu_task_ctx(void);
+
extern void *perf_aux_output_begin(struct perf_output_handle *handle,
struct perf_event *event);
extern void perf_aux_output_end(struct perf_output_handle *handle,
@@ -864,21 +1058,21 @@ extern int perf_aux_output_skip(struct perf_output_handle *handle,
unsigned long size);
extern void *perf_get_aux(struct perf_output_handle *handle);
extern void perf_aux_output_flag(struct perf_output_handle *handle, u64 flags);
+extern void perf_event_itrace_started(struct perf_event *event);
extern int perf_pmu_register(struct pmu *pmu, const char *name, int type);
extern void perf_pmu_unregister(struct pmu *pmu);
-extern int perf_num_counters(void);
-extern const char *perf_pmu_name(void);
extern void __perf_event_task_sched_in(struct task_struct *prev,
struct task_struct *task);
extern void __perf_event_task_sched_out(struct task_struct *prev,
struct task_struct *next);
-extern int perf_event_init_task(struct task_struct *child);
+extern int perf_event_init_task(struct task_struct *child, u64 clone_flags);
extern void perf_event_exit_task(struct task_struct *child);
extern void perf_event_free_task(struct task_struct *task);
extern void perf_event_delayed_put(struct task_struct *task);
extern struct file *perf_event_get(unsigned int fd);
+extern const struct perf_event *perf_get_event(struct file *file);
extern const struct perf_event_attr *perf_event_attrs(struct perf_event *event);
extern void perf_event_print_debug(void);
extern void perf_pmu_disable(struct pmu *pmu);
@@ -887,6 +1081,9 @@ extern void perf_sched_cb_dec(struct pmu *pmu);
extern void perf_sched_cb_inc(struct pmu *pmu);
extern int perf_event_task_disable(void);
extern int perf_event_task_enable(void);
+
+extern void perf_pmu_resched(struct pmu *pmu);
+
extern int perf_event_refresh(struct perf_event *event, int refresh);
extern void perf_event_update_userpage(struct perf_event *event);
extern int perf_event_release_kernel(struct perf_event *event);
@@ -898,52 +1095,87 @@ perf_event_create_kernel_counter(struct perf_event_attr *attr,
void *context);
extern void perf_pmu_migrate_context(struct pmu *pmu,
int src_cpu, int dst_cpu);
-int perf_event_read_local(struct perf_event *event, u64 *value);
+int perf_event_read_local(struct perf_event *event, u64 *value,
+ u64 *enabled, u64 *running);
extern u64 perf_event_read_value(struct perf_event *event,
u64 *enabled, u64 *running);
+extern struct perf_callchain_entry *perf_callchain(struct perf_event *event, struct pt_regs *regs);
+
+static inline bool branch_sample_no_flags(const struct perf_event *event)
+{
+ return event->attr.branch_sample_type & PERF_SAMPLE_BRANCH_NO_FLAGS;
+}
+
+static inline bool branch_sample_no_cycles(const struct perf_event *event)
+{
+ return event->attr.branch_sample_type & PERF_SAMPLE_BRANCH_NO_CYCLES;
+}
+
+static inline bool branch_sample_type(const struct perf_event *event)
+{
+ return event->attr.branch_sample_type & PERF_SAMPLE_BRANCH_TYPE_SAVE;
+}
+
+static inline bool branch_sample_hw_index(const struct perf_event *event)
+{
+ return event->attr.branch_sample_type & PERF_SAMPLE_BRANCH_HW_INDEX;
+}
+
+static inline bool branch_sample_priv(const struct perf_event *event)
+{
+ return event->attr.branch_sample_type & PERF_SAMPLE_BRANCH_PRIV_SAVE;
+}
+
struct perf_sample_data {
/*
- * Fields set by perf_sample_data_init(), group so as to
- * minimize the cachelines touched.
+ * Fields set by perf_sample_data_init() unconditionally,
+ * group so as to minimize the cachelines touched.
*/
- u64 addr;
- struct perf_raw_record *raw;
- struct perf_branch_stack *br_stack;
+ u64 sample_flags;
u64 period;
- u64 weight;
- u64 txn;
- union perf_mem_data_src data_src;
+ u64 dyn_size;
/*
- * The other fields, optionally {set,used} by
- * perf_{prepare,output}_sample().
+ * Fields commonly set by __perf_event_header__init_id(),
+ * group so as to minimize the cachelines touched.
*/
u64 type;
- u64 ip;
struct {
u32 pid;
u32 tid;
} tid_entry;
u64 time;
u64 id;
- u64 stream_id;
struct {
u32 cpu;
u32 reserved;
} cpu_entry;
- struct perf_callchain_entry *callchain;
/*
- * regs_user may point to task_pt_regs or to regs_user_copy, depending
- * on arch details.
+ * The other fields, optionally {set,used} by
+ * perf_{prepare,output}_sample().
*/
- struct perf_regs regs_user;
- struct pt_regs regs_user_copy;
+ u64 ip;
+ struct perf_callchain_entry *callchain;
+ struct perf_raw_record *raw;
+ struct perf_branch_stack *br_stack;
+ union perf_sample_weight weight;
+ union perf_mem_data_src data_src;
+ u64 txn;
+ struct perf_regs regs_user;
struct perf_regs regs_intr;
u64 stack_user_size;
+
+ u64 stream_id;
+ u64 cgroup;
+ u64 addr;
+ u64 phys_addr;
+ u64 data_page_size;
+ u64 code_page_size;
+ u64 aux_size;
} ____cacheline_aligned;
/* default value for data source */
@@ -957,20 +1189,103 @@ static inline void perf_sample_data_init(struct perf_sample_data *data,
u64 addr, u64 period)
{
/* remaining struct members initialized in perf_prepare_sample() */
- data->addr = addr;
- data->raw = NULL;
- data->br_stack = NULL;
+ data->sample_flags = PERF_SAMPLE_PERIOD;
data->period = period;
- data->weight = 0;
- data->data_src.val = PERF_MEM_NA;
- data->txn = 0;
+ data->dyn_size = 0;
+
+ if (addr) {
+ data->addr = addr;
+ data->sample_flags |= PERF_SAMPLE_ADDR;
+ }
+}
+
+static inline void perf_sample_save_callchain(struct perf_sample_data *data,
+ struct perf_event *event,
+ struct pt_regs *regs)
+{
+ int size = 1;
+
+ data->callchain = perf_callchain(event, regs);
+ size += data->callchain->nr;
+
+ data->dyn_size += size * sizeof(u64);
+ data->sample_flags |= PERF_SAMPLE_CALLCHAIN;
+}
+
+static inline void perf_sample_save_raw_data(struct perf_sample_data *data,
+ struct perf_raw_record *raw)
+{
+ struct perf_raw_frag *frag = &raw->frag;
+ u32 sum = 0;
+ int size;
+
+ do {
+ sum += frag->size;
+ if (perf_raw_frag_last(frag))
+ break;
+ frag = frag->next;
+ } while (1);
+
+ size = round_up(sum + sizeof(u32), sizeof(u64));
+ raw->size = size - sizeof(u32);
+ frag->pad = raw->size - sum;
+
+ data->raw = raw;
+ data->dyn_size += size;
+ data->sample_flags |= PERF_SAMPLE_RAW;
+}
+
+static inline void perf_sample_save_brstack(struct perf_sample_data *data,
+ struct perf_event *event,
+ struct perf_branch_stack *brs)
+{
+ int size = sizeof(u64); /* nr */
+
+ if (branch_sample_hw_index(event))
+ size += sizeof(u64);
+ size += brs->nr * sizeof(struct perf_branch_entry);
+
+ data->br_stack = brs;
+ data->dyn_size += size;
+ data->sample_flags |= PERF_SAMPLE_BRANCH_STACK;
+}
+
+static inline u32 perf_sample_data_size(struct perf_sample_data *data,
+ struct perf_event *event)
+{
+ u32 size = sizeof(struct perf_event_header);
+
+ size += event->header_size + event->id_header_size;
+ size += data->dyn_size;
+
+ return size;
+}
+
+/*
+ * Clear all bitfields in the perf_branch_entry.
+ * The to and from fields are not cleared because they are
+ * systematically modified by caller.
+ */
+static inline void perf_clear_branch_entry_bitfields(struct perf_branch_entry *br)
+{
+ br->mispred = 0;
+ br->predicted = 0;
+ br->in_tx = 0;
+ br->abort = 0;
+ br->cycles = 0;
+ br->type = 0;
+ br->spec = PERF_BR_SPEC_NA;
+ br->reserved = 0;
}
extern void perf_output_sample(struct perf_output_handle *handle,
struct perf_event_header *header,
struct perf_sample_data *data,
struct perf_event *event);
-extern void perf_prepare_sample(struct perf_event_header *header,
+extern void perf_prepare_sample(struct perf_sample_data *data,
+ struct perf_event *event,
+ struct pt_regs *regs);
+extern void perf_prepare_header(struct perf_event_header *header,
struct perf_sample_data *data,
struct perf_event *event,
struct pt_regs *regs);
@@ -985,9 +1300,9 @@ extern void perf_event_output_forward(struct perf_event *event,
extern void perf_event_output_backward(struct perf_event *event,
struct perf_sample_data *data,
struct pt_regs *regs);
-extern void perf_event_output(struct perf_event *event,
- struct perf_sample_data *data,
- struct pt_regs *regs);
+extern int perf_event_output(struct perf_event *event,
+ struct perf_sample_data *data,
+ struct pt_regs *regs);
static inline bool
is_default_overflow_handler(struct perf_event *event)
@@ -1011,6 +1326,15 @@ perf_event__output_id_sample(struct perf_event *event,
extern void
perf_log_lost_samples(struct perf_event *event, u64 lost);
+static inline bool event_has_any_exclude_flag(struct perf_event *event)
+{
+ struct perf_event_attr *attr = &event->attr;
+
+ return attr->exclude_idle || attr->exclude_user ||
+ attr->exclude_kernel || attr->exclude_hv ||
+ attr->exclude_guest || attr->exclude_host;
+}
+
static inline bool is_sampling_event(struct perf_event *event)
{
return event->attr.sample_period != 0;
@@ -1024,6 +1348,19 @@ static inline int is_software_event(struct perf_event *event)
return event->event_caps & PERF_EV_CAP_SOFTWARE;
}
+/*
+ * Return 1 for event in sw context, 0 for event in hw context
+ */
+static inline int in_software_context(struct perf_event *event)
+{
+ return event->pmu_ctx->pmu->task_ctx_nr == perf_sw_context;
+}
+
+static inline int is_exclusive_pmu(struct pmu *pmu)
+{
+ return pmu->capabilities & PERF_PMU_CAP_EXCLUSIVE;
+}
+
extern struct static_key perf_swevent_enabled[PERF_COUNT_SW_MAX];
extern void ___perf_sw_event(u32, u64, struct pt_regs *, u64);
@@ -1034,12 +1371,18 @@ static inline void perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned lo
#endif
/*
- * Take a snapshot of the regs. Skip ip and frame pointer to
- * the nth caller. We only need a few of the regs:
+ * When generating a perf sample in-line, instead of from an interrupt /
+ * exception, we lack a pt_regs. This is typically used from software events
+ * like: SW_CONTEXT_SWITCHES, SW_MIGRATIONS and the tie-in with tracepoints.
+ *
+ * We typically don't need a full set, but (for x86) do require:
* - ip for PERF_SAMPLE_IP
* - cs for user_mode() tests
- * - bp for callchains
- * - eflags, for future purposes, just in case
+ * - sp for PERF_SAMPLE_CALLCHAIN
+ * - eflags for MISC bits and CALLCHAIN (see: perf_hw_regs())
+ *
+ * NOTE: assumes @regs is otherwise already 0 filled; this is important for
+ * things like PERF_SAMPLE_REGS_INTR.
*/
static inline void perf_fetch_caller_regs(struct pt_regs *regs)
{
@@ -1060,30 +1403,24 @@ DECLARE_PER_CPU(struct pt_regs, __perf_regs[4]);
* which is guaranteed by us not actually scheduling inside other swevents
* because those disable preemption.
*/
-static __always_inline void
-perf_sw_event_sched(u32 event_id, u64 nr, u64 addr)
+static __always_inline void __perf_sw_event_sched(u32 event_id, u64 nr, u64 addr)
{
- if (static_key_false(&perf_swevent_enabled[event_id])) {
- struct pt_regs *regs = this_cpu_ptr(&__perf_regs[0]);
+ struct pt_regs *regs = this_cpu_ptr(&__perf_regs[0]);
- perf_fetch_caller_regs(regs);
- ___perf_sw_event(event_id, nr, regs, addr);
- }
+ perf_fetch_caller_regs(regs);
+ ___perf_sw_event(event_id, nr, regs, addr);
}
extern struct static_key_false perf_sched_events;
-static __always_inline bool
-perf_sw_migrate_enabled(void)
+static __always_inline bool __perf_sw_enabled(int swevt)
{
- if (static_key_false(&perf_swevent_enabled[PERF_COUNT_SW_CPU_MIGRATIONS]))
- return true;
- return false;
+ return static_key_false(&perf_swevent_enabled[swevt]);
}
static inline void perf_event_task_migrate(struct task_struct *task)
{
- if (perf_sw_migrate_enabled())
+ if (__perf_sw_enabled(PERF_COUNT_SW_CPU_MIGRATIONS))
task->sched_migrated = 1;
}
@@ -1093,11 +1430,9 @@ static inline void perf_event_task_sched_in(struct task_struct *prev,
if (static_branch_unlikely(&perf_sched_events))
__perf_event_task_sched_in(prev, task);
- if (perf_sw_migrate_enabled() && task->sched_migrated) {
- struct pt_regs *regs = this_cpu_ptr(&__perf_regs[0]);
-
- perf_fetch_caller_regs(regs);
- ___perf_sw_event(PERF_COUNT_SW_CPU_MIGRATIONS, 1, regs, 0);
+ if (__perf_sw_enabled(PERF_COUNT_SW_CPU_MIGRATIONS) &&
+ task->sched_migrated) {
+ __perf_sw_event_sched(PERF_COUNT_SW_CPU_MIGRATIONS, 1, 0);
task->sched_migrated = 0;
}
}
@@ -1105,26 +1440,62 @@ static inline void perf_event_task_sched_in(struct task_struct *prev,
static inline void perf_event_task_sched_out(struct task_struct *prev,
struct task_struct *next)
{
- perf_sw_event_sched(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, 0);
+ if (__perf_sw_enabled(PERF_COUNT_SW_CONTEXT_SWITCHES))
+ __perf_sw_event_sched(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, 0);
+
+#ifdef CONFIG_CGROUP_PERF
+ if (__perf_sw_enabled(PERF_COUNT_SW_CGROUP_SWITCHES) &&
+ perf_cgroup_from_task(prev, NULL) !=
+ perf_cgroup_from_task(next, NULL))
+ __perf_sw_event_sched(PERF_COUNT_SW_CGROUP_SWITCHES, 1, 0);
+#endif
if (static_branch_unlikely(&perf_sched_events))
__perf_event_task_sched_out(prev, next);
}
-static inline u64 __perf_event_count(struct perf_event *event)
+extern void perf_event_mmap(struct vm_area_struct *vma);
+
+extern void perf_event_ksymbol(u16 ksym_type, u64 addr, u32 len,
+ bool unregister, const char *sym);
+extern void perf_event_bpf_event(struct bpf_prog *prog,
+ enum perf_bpf_event_type type,
+ u16 flags);
+
+#ifdef CONFIG_GUEST_PERF_EVENTS
+extern struct perf_guest_info_callbacks __rcu *perf_guest_cbs;
+
+DECLARE_STATIC_CALL(__perf_guest_state, *perf_guest_cbs->state);
+DECLARE_STATIC_CALL(__perf_guest_get_ip, *perf_guest_cbs->get_ip);
+DECLARE_STATIC_CALL(__perf_guest_handle_intel_pt_intr, *perf_guest_cbs->handle_intel_pt_intr);
+
+static inline unsigned int perf_guest_state(void)
{
- return local64_read(&event->count) + atomic64_read(&event->child_count);
+ return static_call(__perf_guest_state)();
}
-
-extern void perf_event_mmap(struct vm_area_struct *vma);
-extern struct perf_guest_info_callbacks *perf_guest_cbs;
-extern int perf_register_guest_info_callbacks(struct perf_guest_info_callbacks *callbacks);
-extern int perf_unregister_guest_info_callbacks(struct perf_guest_info_callbacks *callbacks);
+static inline unsigned long perf_guest_get_ip(void)
+{
+ return static_call(__perf_guest_get_ip)();
+}
+static inline unsigned int perf_guest_handle_intel_pt_intr(void)
+{
+ return static_call(__perf_guest_handle_intel_pt_intr)();
+}
+extern void perf_register_guest_info_callbacks(struct perf_guest_info_callbacks *cbs);
+extern void perf_unregister_guest_info_callbacks(struct perf_guest_info_callbacks *cbs);
+#else
+static inline unsigned int perf_guest_state(void) { return 0; }
+static inline unsigned long perf_guest_get_ip(void) { return 0; }
+static inline unsigned int perf_guest_handle_intel_pt_intr(void) { return 0; }
+#endif /* CONFIG_GUEST_PERF_EVENTS */
extern void perf_event_exec(void);
extern void perf_event_comm(struct task_struct *tsk, bool exec);
extern void perf_event_namespaces(struct task_struct *tsk);
extern void perf_event_fork(struct task_struct *tsk);
+extern void perf_event_text_poke(const void *addr,
+ const void *old_bytes, size_t old_len,
+ const void *new_bytes, size_t new_len);
/* Callchains */
DECLARE_PER_CPU(struct perf_callchain_entry, perf_callchain_entry);
@@ -1136,6 +1507,8 @@ get_perf_callchain(struct pt_regs *regs, u32 init_nr, bool kernel, bool user,
u32 max_stack, bool crosstask, bool add_mark);
extern int get_callchain_buffers(int max_stack);
extern void put_callchain_buffers(void);
+extern struct perf_callchain_entry *get_callchain_entry(int *rctx);
+extern void put_callchain_entry(int rctx);
extern int sysctl_perf_event_max_stack;
extern int sysctl_perf_event_max_contexts_per_stack;
@@ -1172,29 +1545,48 @@ extern int sysctl_perf_cpu_time_max_percent;
extern void perf_sample_event_took(u64 sample_len_ns);
-extern int perf_proc_update_handler(struct ctl_table *table, int write,
- void __user *buffer, size_t *lenp,
- loff_t *ppos);
-extern int perf_cpu_time_max_percent_handler(struct ctl_table *table, int write,
- void __user *buffer, size_t *lenp,
- loff_t *ppos);
-
+int perf_proc_update_handler(struct ctl_table *table, int write,
+ void *buffer, size_t *lenp, loff_t *ppos);
+int perf_cpu_time_max_percent_handler(struct ctl_table *table, int write,
+ void *buffer, size_t *lenp, loff_t *ppos);
int perf_event_max_stack_handler(struct ctl_table *table, int write,
- void __user *buffer, size_t *lenp, loff_t *ppos);
+ void *buffer, size_t *lenp, loff_t *ppos);
+
+/* Access to perf_event_open(2) syscall. */
+#define PERF_SECURITY_OPEN 0
+
+/* Finer grained perf_event_open(2) access control. */
+#define PERF_SECURITY_CPU 1
+#define PERF_SECURITY_KERNEL 2
+#define PERF_SECURITY_TRACEPOINT 3
-static inline bool perf_paranoid_tracepoint_raw(void)
+static inline int perf_is_paranoid(void)
{
return sysctl_perf_event_paranoid > -1;
}
-static inline bool perf_paranoid_cpu(void)
+static inline int perf_allow_kernel(struct perf_event_attr *attr)
{
- return sysctl_perf_event_paranoid > 0;
+ if (sysctl_perf_event_paranoid > 1 && !perfmon_capable())
+ return -EACCES;
+
+ return security_perf_event_open(attr, PERF_SECURITY_KERNEL);
}
-static inline bool perf_paranoid_kernel(void)
+static inline int perf_allow_cpu(struct perf_event_attr *attr)
{
- return sysctl_perf_event_paranoid > 1;
+ if (sysctl_perf_event_paranoid > 0 && !perfmon_capable())
+ return -EACCES;
+
+ return security_perf_event_open(attr, PERF_SECURITY_CPU);
+}
+
+static inline int perf_allow_tracepoint(struct perf_event_attr *attr)
+{
+ if (sysctl_perf_event_paranoid > -1 && !perfmon_capable())
+ return -EPERM;
+
+ return security_perf_event_open(attr, PERF_SECURITY_TRACEPOINT);
}
extern void perf_event_init(void);
@@ -1209,6 +1601,9 @@ extern void perf_bp_event(struct perf_event *event, void *data);
(user_mode(regs) ? PERF_RECORD_MISC_USER : PERF_RECORD_MISC_KERNEL)
# define perf_instruction_pointer(regs) instruction_pointer(regs)
#endif
+#ifndef perf_arch_bpf_user_pt_regs
+# define perf_arch_bpf_user_pt_regs(regs) regs
+#endif
static inline bool has_branch_stack(struct perf_event *event)
{
@@ -1250,13 +1645,17 @@ perf_event_addr_filters(struct perf_event *event)
}
extern void perf_event_addr_filters_sync(struct perf_event *event);
+extern void perf_report_aux_output_id(struct perf_event *event, u64 hw_id);
extern int perf_output_begin(struct perf_output_handle *handle,
+ struct perf_sample_data *data,
struct perf_event *event, unsigned int size);
extern int perf_output_begin_forward(struct perf_output_handle *handle,
- struct perf_event *event,
- unsigned int size);
+ struct perf_sample_data *data,
+ struct perf_event *event,
+ unsigned int size);
extern int perf_output_begin_backward(struct perf_output_handle *handle,
+ struct perf_sample_data *data,
struct perf_event *event,
unsigned int size);
@@ -1265,6 +1664,9 @@ extern unsigned int perf_output_copy(struct perf_output_handle *handle,
const void *buf, unsigned int len);
extern unsigned int perf_output_skip(struct perf_output_handle *handle,
unsigned int len);
+extern long perf_output_copy_aux(struct perf_output_handle *aux_handle,
+ struct perf_output_handle *handle,
+ unsigned long from, unsigned long to);
extern int perf_swevent_get_recursion_context(void);
extern void perf_swevent_put_recursion_context(int rctx);
extern u64 perf_swevent_set_period(struct perf_event *event);
@@ -1274,6 +1676,8 @@ extern void perf_event_disable_local(struct perf_event *event);
extern void perf_event_disable_inatomic(struct perf_event *event);
extern void perf_event_task_tick(void);
extern int perf_event_account_interrupt(struct perf_event *event);
+extern int perf_event_period(struct perf_event *event, u64 value);
+extern u64 perf_event_pause(struct perf_event *event, bool reset);
#else /* !CONFIG_PERF_EVENTS: */
static inline void *
perf_aux_output_begin(struct perf_output_handle *handle,
@@ -1294,16 +1698,22 @@ perf_event_task_sched_in(struct task_struct *prev,
static inline void
perf_event_task_sched_out(struct task_struct *prev,
struct task_struct *next) { }
-static inline int perf_event_init_task(struct task_struct *child) { return 0; }
+static inline int perf_event_init_task(struct task_struct *child,
+ u64 clone_flags) { return 0; }
static inline void perf_event_exit_task(struct task_struct *child) { }
static inline void perf_event_free_task(struct task_struct *task) { }
static inline void perf_event_delayed_put(struct task_struct *task) { }
static inline struct file *perf_event_get(unsigned int fd) { return ERR_PTR(-EINVAL); }
+static inline const struct perf_event *perf_get_event(struct file *file)
+{
+ return ERR_PTR(-EINVAL);
+}
static inline const struct perf_event_attr *perf_event_attrs(struct perf_event *event)
{
return ERR_PTR(-EINVAL);
}
-static inline int perf_event_read_local(struct perf_event *event, u64 *value)
+static inline int perf_event_read_local(struct perf_event *event, u64 *value,
+ u64 *enabled, u64 *running)
{
return -EINVAL;
}
@@ -1318,20 +1728,25 @@ static inline int perf_event_refresh(struct perf_event *event, int refresh)
static inline void
perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr) { }
static inline void
-perf_sw_event_sched(u32 event_id, u64 nr, u64 addr) { }
-static inline void
perf_bp_event(struct perf_event *event, void *data) { }
-static inline int perf_register_guest_info_callbacks
-(struct perf_guest_info_callbacks *callbacks) { return 0; }
-static inline int perf_unregister_guest_info_callbacks
-(struct perf_guest_info_callbacks *callbacks) { return 0; }
-
static inline void perf_event_mmap(struct vm_area_struct *vma) { }
+
+typedef int (perf_ksymbol_get_name_f)(char *name, int name_len, void *data);
+static inline void perf_event_ksymbol(u16 ksym_type, u64 addr, u32 len,
+ bool unregister, const char *sym) { }
+static inline void perf_event_bpf_event(struct bpf_prog *prog,
+ enum perf_bpf_event_type type,
+ u16 flags) { }
static inline void perf_event_exec(void) { }
static inline void perf_event_comm(struct task_struct *tsk, bool exec) { }
static inline void perf_event_namespaces(struct task_struct *tsk) { }
static inline void perf_event_fork(struct task_struct *tsk) { }
+static inline void perf_event_text_poke(const void *addr,
+ const void *old_bytes,
+ size_t old_len,
+ const void *new_bytes,
+ size_t new_len) { }
static inline void perf_event_init(void) { }
static inline int perf_swevent_get_recursion_context(void) { return -1; }
static inline void perf_swevent_put_recursion_context(int rctx) { }
@@ -1341,6 +1756,14 @@ static inline void perf_event_disable(struct perf_event *event) { }
static inline int __perf_event_disable(void *info) { return -1; }
static inline void perf_event_task_tick(void) { }
static inline int perf_event_release_kernel(struct perf_event *event) { return 0; }
+static inline int perf_event_period(struct perf_event *event, u64 value)
+{
+ return -EINVAL;
+}
+static inline u64 perf_event_pause(struct perf_event *event, bool reset)
+{
+ return 0;
+}
#endif
#if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_INTEL)
@@ -1349,11 +1772,6 @@ extern void perf_restore_debug_store(void);
static inline void perf_restore_debug_store(void) { }
#endif
-static __always_inline bool perf_raw_frag_last(const struct perf_raw_frag *frag)
-{
- return frag->pad < sizeof(u64);
-}
-
#define perf_output_put(handle, x) perf_output_copy((handle), &(x), sizeof(x))
struct perf_pmu_events_attr {
@@ -1369,6 +1787,18 @@ struct perf_pmu_events_ht_attr {
const char *event_str_noht;
};
+struct perf_pmu_events_hybrid_attr {
+ struct device_attribute attr;
+ u64 id;
+ const char *event_str;
+ u64 pmu_type;
+};
+
+struct perf_pmu_format_hybrid_attr {
+ struct device_attribute attr;
+ u64 pmu_type;
+};
+
ssize_t perf_event_sysfs_show(struct device *dev, struct device_attribute *attr,
char *page);
@@ -1385,7 +1815,13 @@ static struct perf_pmu_events_attr _var = { \
.event_str = _str, \
};
-#define PMU_FORMAT_ATTR(_name, _format) \
+#define PMU_EVENT_ATTR_ID(_name, _show, _id) \
+ (&((struct perf_pmu_events_attr[]) { \
+ { .attr = __ATTR(_name, 0444, _show, NULL), \
+ .id = _id, } \
+ })[0].attr.attr)
+
+#define PMU_FORMAT_ATTR_SHOW(_name, _format) \
static ssize_t \
_name##_show(struct device *dev, \
struct device_attribute *attr, \
@@ -1394,6 +1830,9 @@ _name##_show(struct device *dev, \
BUILD_BUG_ON(sizeof(_format) >= PAGE_SIZE); \
return sprintf(page, _format "\n"); \
} \
+
+#define PMU_FORMAT_ATTR(_name, _format) \
+ PMU_FORMAT_ATTR_SHOW(_name, _format) \
\
static struct device_attribute format_attr_##_name = __ATTR_RO(_name)
@@ -1406,4 +1845,40 @@ int perf_event_exit_cpu(unsigned int cpu);
#define perf_event_exit_cpu NULL
#endif
+extern void __weak arch_perf_update_userpage(struct perf_event *event,
+ struct perf_event_mmap_page *userpg,
+ u64 now);
+
+#ifdef CONFIG_MMU
+extern __weak u64 arch_perf_get_page_size(struct mm_struct *mm, unsigned long addr);
+#endif
+
+/*
+ * Snapshot branch stack on software events.
+ *
+ * Branch stack can be very useful in understanding software events. For
+ * example, when a long function, e.g. sys_perf_event_open, returns an
+ * errno, it is not obvious why the function failed. Branch stack could
+ * provide very helpful information in this type of scenarios.
+ *
+ * On software event, it is necessary to stop the hardware branch recorder
+ * fast. Otherwise, the hardware register/buffer will be flushed with
+ * entries of the triggering event. Therefore, static call is used to
+ * stop the hardware recorder.
+ */
+
+/*
+ * cnt is the number of entries allocated for entries.
+ * Return number of entries copied to .
+ */
+typedef int (perf_snapshot_branch_stack_t)(struct perf_branch_entry *entries,
+ unsigned int cnt);
+DECLARE_STATIC_CALL(perf_snapshot_branch_stack, perf_snapshot_branch_stack_t);
+
+#ifndef PERF_NEEDS_LOPWR_CB
+static inline void perf_lopwr_cb(bool mode)
+{
+}
+#endif
+
#endif /* _LINUX_PERF_EVENT_H */
diff --git a/include/linux/perf_event_api.h b/include/linux/perf_event_api.h
new file mode 100644
index 000000000000..c2fd6048b790
--- /dev/null
+++ b/include/linux/perf_event_api.h
@@ -0,0 +1 @@
+#include <linux/perf_event.h>
diff --git a/include/linux/perf_regs.h b/include/linux/perf_regs.h
index 9b7dd59fe28d..f632c5725f16 100644
--- a/include/linux/perf_regs.h
+++ b/include/linux/perf_regs.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_PERF_REGS_H
#define _LINUX_PERF_REGS_H
@@ -10,13 +11,20 @@ struct perf_regs {
#ifdef CONFIG_HAVE_PERF_REGS
#include <asm/perf_regs.h>
+
+#ifndef PERF_REG_EXTENDED_MASK
+#define PERF_REG_EXTENDED_MASK 0
+#endif
+
u64 perf_reg_value(struct pt_regs *regs, int idx);
int perf_reg_validate(u64 mask);
u64 perf_reg_abi(struct task_struct *task);
void perf_get_regs_user(struct perf_regs *regs_user,
- struct pt_regs *regs,
- struct pt_regs *regs_user_copy);
+ struct pt_regs *regs);
#else
+
+#define PERF_REG_EXTENDED_MASK 0
+
static inline u64 perf_reg_value(struct pt_regs *regs, int idx)
{
return 0;
@@ -33,8 +41,7 @@ static inline u64 perf_reg_abi(struct task_struct *task)
}
static inline void perf_get_regs_user(struct perf_regs *regs_user,
- struct pt_regs *regs,
- struct pt_regs *regs_user_copy)
+ struct pt_regs *regs)
{
regs_user->regs = task_pt_regs(current);
regs_user->abi = perf_reg_abi(current);
diff --git a/include/linux/personality.h b/include/linux/personality.h
index aeb7892b2468..fc16fbc659c7 100644
--- a/include/linux/personality.h
+++ b/include/linux/personality.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_PERSONALITY_H
#define _LINUX_PERSONALITY_H
diff --git a/include/linux/pfn.h b/include/linux/pfn.h
index 1132953235c0..14bc053c53d8 100644
--- a/include/linux/pfn.h
+++ b/include/linux/pfn.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_PFN_H_
#define _LINUX_PFN_H_
diff --git a/include/linux/pfn_t.h b/include/linux/pfn_t.h
index a49b3259cad7..2d9148221e9a 100644
--- a/include/linux/pfn_t.h
+++ b/include/linux/pfn_t.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_PFN_T_H_
#define _LINUX_PFN_T_H_
#include <linux/mm.h>
@@ -8,14 +9,18 @@
* PFN_SG_LAST - pfn references a page and is the last scatterlist entry
* PFN_DEV - pfn is not covered by system memmap by default
* PFN_MAP - pfn has a dynamic page mapping established by a device driver
+ * PFN_SPECIAL - for CONFIG_FS_DAX_LIMITED builds to allow XIP, but not
+ * get_user_pages
*/
-#define PFN_FLAGS_MASK (((u64) ~PAGE_MASK) << (BITS_PER_LONG_LONG - PAGE_SHIFT))
+#define PFN_FLAGS_MASK (((u64) (~PAGE_MASK)) << (BITS_PER_LONG_LONG - PAGE_SHIFT))
#define PFN_SG_CHAIN (1ULL << (BITS_PER_LONG_LONG - 1))
#define PFN_SG_LAST (1ULL << (BITS_PER_LONG_LONG - 2))
#define PFN_DEV (1ULL << (BITS_PER_LONG_LONG - 3))
#define PFN_MAP (1ULL << (BITS_PER_LONG_LONG - 4))
+#define PFN_SPECIAL (1ULL << (BITS_PER_LONG_LONG - 5))
#define PFN_FLAGS_TRACE \
+ { PFN_SPECIAL, "SPECIAL" }, \
{ PFN_SG_CHAIN, "SG_CHAIN" }, \
{ PFN_SG_LAST, "SG_LAST" }, \
{ PFN_DEV, "DEV" }, \
@@ -61,13 +66,6 @@ static inline phys_addr_t pfn_t_to_phys(pfn_t pfn)
return PFN_PHYS(pfn_t_to_pfn(pfn));
}
-static inline void *pfn_t_to_virt(pfn_t pfn)
-{
- if (pfn_t_has_page(pfn))
- return __va(pfn_t_to_phys(pfn));
- return NULL;
-}
-
static inline pfn_t page_to_pfn_t(struct page *page)
{
return pfn_to_pfn_t(page_to_pfn(page));
@@ -99,7 +97,7 @@ static inline pud_t pfn_t_pud(pfn_t pfn, pgprot_t pgprot)
#endif
#endif
-#ifdef __HAVE_ARCH_PTE_DEVMAP
+#ifdef CONFIG_ARCH_HAS_PTE_DEVMAP
static inline bool pfn_t_devmap(pfn_t pfn)
{
const u64 flags = PFN_DEV|PFN_MAP;
@@ -117,6 +115,17 @@ pmd_t pmd_mkdevmap(pmd_t pmd);
defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD)
pud_t pud_mkdevmap(pud_t pud);
#endif
-#endif /* __HAVE_ARCH_PTE_DEVMAP */
+#endif /* CONFIG_ARCH_HAS_PTE_DEVMAP */
+#ifdef CONFIG_ARCH_HAS_PTE_SPECIAL
+static inline bool pfn_t_special(pfn_t pfn)
+{
+ return (pfn.val & PFN_SPECIAL) == PFN_SPECIAL;
+}
+#else
+static inline bool pfn_t_special(pfn_t pfn)
+{
+ return false;
+}
+#endif /* CONFIG_ARCH_HAS_PTE_SPECIAL */
#endif /* _LINUX_PFN_T_H_ */
diff --git a/include/linux/pgtable.h b/include/linux/pgtable.h
new file mode 100644
index 000000000000..c5a51481bbb9
--- /dev/null
+++ b/include/linux/pgtable.h
@@ -0,0 +1,1712 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_PGTABLE_H
+#define _LINUX_PGTABLE_H
+
+#include <linux/pfn.h>
+#include <asm/pgtable.h>
+
+#ifndef __ASSEMBLY__
+#ifdef CONFIG_MMU
+
+#include <linux/mm_types.h>
+#include <linux/bug.h>
+#include <linux/errno.h>
+#include <asm-generic/pgtable_uffd.h>
+#include <linux/page_table_check.h>
+
+#if 5 - defined(__PAGETABLE_P4D_FOLDED) - defined(__PAGETABLE_PUD_FOLDED) - \
+ defined(__PAGETABLE_PMD_FOLDED) != CONFIG_PGTABLE_LEVELS
+#error CONFIG_PGTABLE_LEVELS is not consistent with __PAGETABLE_{P4D,PUD,PMD}_FOLDED
+#endif
+
+/*
+ * On almost all architectures and configurations, 0 can be used as the
+ * upper ceiling to free_pgtables(): on many architectures it has the same
+ * effect as using TASK_SIZE. However, there is one configuration which
+ * must impose a more careful limit, to avoid freeing kernel pgtables.
+ */
+#ifndef USER_PGTABLES_CEILING
+#define USER_PGTABLES_CEILING 0UL
+#endif
+
+/*
+ * This defines the first usable user address. Platforms
+ * can override its value with custom FIRST_USER_ADDRESS
+ * defined in their respective <asm/pgtable.h>.
+ */
+#ifndef FIRST_USER_ADDRESS
+#define FIRST_USER_ADDRESS 0UL
+#endif
+
+/*
+ * This defines the generic helper for accessing PMD page
+ * table page. Although platforms can still override this
+ * via their respective <asm/pgtable.h>.
+ */
+#ifndef pmd_pgtable
+#define pmd_pgtable(pmd) pmd_page(pmd)
+#endif
+
+/*
+ * A page table page can be thought of an array like this: pXd_t[PTRS_PER_PxD]
+ *
+ * The pXx_index() functions return the index of the entry in the page
+ * table page which would control the given virtual address
+ *
+ * As these functions may be used by the same code for different levels of
+ * the page table folding, they are always available, regardless of
+ * CONFIG_PGTABLE_LEVELS value. For the folded levels they simply return 0
+ * because in such cases PTRS_PER_PxD equals 1.
+ */
+
+static inline unsigned long pte_index(unsigned long address)
+{
+ return (address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1);
+}
+#define pte_index pte_index
+
+#ifndef pmd_index
+static inline unsigned long pmd_index(unsigned long address)
+{
+ return (address >> PMD_SHIFT) & (PTRS_PER_PMD - 1);
+}
+#define pmd_index pmd_index
+#endif
+
+#ifndef pud_index
+static inline unsigned long pud_index(unsigned long address)
+{
+ return (address >> PUD_SHIFT) & (PTRS_PER_PUD - 1);
+}
+#define pud_index pud_index
+#endif
+
+#ifndef pgd_index
+/* Must be a compile-time constant, so implement it as a macro */
+#define pgd_index(a) (((a) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1))
+#endif
+
+#ifndef pte_offset_kernel
+static inline pte_t *pte_offset_kernel(pmd_t *pmd, unsigned long address)
+{
+ return (pte_t *)pmd_page_vaddr(*pmd) + pte_index(address);
+}
+#define pte_offset_kernel pte_offset_kernel
+#endif
+
+#if defined(CONFIG_HIGHPTE)
+#define pte_offset_map(dir, address) \
+ ((pte_t *)kmap_atomic(pmd_page(*(dir))) + \
+ pte_index((address)))
+#define pte_unmap(pte) kunmap_atomic((pte))
+#else
+#define pte_offset_map(dir, address) pte_offset_kernel((dir), (address))
+#define pte_unmap(pte) ((void)(pte)) /* NOP */
+#endif
+
+/* Find an entry in the second-level page table.. */
+#ifndef pmd_offset
+static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address)
+{
+ return pud_pgtable(*pud) + pmd_index(address);
+}
+#define pmd_offset pmd_offset
+#endif
+
+#ifndef pud_offset
+static inline pud_t *pud_offset(p4d_t *p4d, unsigned long address)
+{
+ return p4d_pgtable(*p4d) + pud_index(address);
+}
+#define pud_offset pud_offset
+#endif
+
+static inline pgd_t *pgd_offset_pgd(pgd_t *pgd, unsigned long address)
+{
+ return (pgd + pgd_index(address));
+};
+
+/*
+ * a shortcut to get a pgd_t in a given mm
+ */
+#ifndef pgd_offset
+#define pgd_offset(mm, address) pgd_offset_pgd((mm)->pgd, (address))
+#endif
+
+/*
+ * a shortcut which implies the use of the kernel's pgd, instead
+ * of a process's
+ */
+#ifndef pgd_offset_k
+#define pgd_offset_k(address) pgd_offset(&init_mm, (address))
+#endif
+
+/*
+ * In many cases it is known that a virtual address is mapped at PMD or PTE
+ * level, so instead of traversing all the page table levels, we can get a
+ * pointer to the PMD entry in user or kernel page table or translate a virtual
+ * address to the pointer in the PTE in the kernel page tables with simple
+ * helpers.
+ */
+static inline pmd_t *pmd_off(struct mm_struct *mm, unsigned long va)
+{
+ return pmd_offset(pud_offset(p4d_offset(pgd_offset(mm, va), va), va), va);
+}
+
+static inline pmd_t *pmd_off_k(unsigned long va)
+{
+ return pmd_offset(pud_offset(p4d_offset(pgd_offset_k(va), va), va), va);
+}
+
+static inline pte_t *virt_to_kpte(unsigned long vaddr)
+{
+ pmd_t *pmd = pmd_off_k(vaddr);
+
+ return pmd_none(*pmd) ? NULL : pte_offset_kernel(pmd, vaddr);
+}
+
+#ifndef pmd_young
+static inline int pmd_young(pmd_t pmd)
+{
+ return 0;
+}
+#endif
+
+#ifndef __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
+extern int ptep_set_access_flags(struct vm_area_struct *vma,
+ unsigned long address, pte_t *ptep,
+ pte_t entry, int dirty);
+#endif
+
+#ifndef __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+extern int pmdp_set_access_flags(struct vm_area_struct *vma,
+ unsigned long address, pmd_t *pmdp,
+ pmd_t entry, int dirty);
+extern int pudp_set_access_flags(struct vm_area_struct *vma,
+ unsigned long address, pud_t *pudp,
+ pud_t entry, int dirty);
+#else
+static inline int pmdp_set_access_flags(struct vm_area_struct *vma,
+ unsigned long address, pmd_t *pmdp,
+ pmd_t entry, int dirty)
+{
+ BUILD_BUG();
+ return 0;
+}
+static inline int pudp_set_access_flags(struct vm_area_struct *vma,
+ unsigned long address, pud_t *pudp,
+ pud_t entry, int dirty)
+{
+ BUILD_BUG();
+ return 0;
+}
+#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
+#endif
+
+#ifndef __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
+static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
+ unsigned long address,
+ pte_t *ptep)
+{
+ pte_t pte = *ptep;
+ int r = 1;
+ if (!pte_young(pte))
+ r = 0;
+ else
+ set_pte_at(vma->vm_mm, address, ptep, pte_mkold(pte));
+ return r;
+}
+#endif
+
+#ifndef __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
+#if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_ARCH_HAS_NONLEAF_PMD_YOUNG)
+static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma,
+ unsigned long address,
+ pmd_t *pmdp)
+{
+ pmd_t pmd = *pmdp;
+ int r = 1;
+ if (!pmd_young(pmd))
+ r = 0;
+ else
+ set_pmd_at(vma->vm_mm, address, pmdp, pmd_mkold(pmd));
+ return r;
+}
+#else
+static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma,
+ unsigned long address,
+ pmd_t *pmdp)
+{
+ BUILD_BUG();
+ return 0;
+}
+#endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_ARCH_HAS_NONLEAF_PMD_YOUNG */
+#endif
+
+#ifndef __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
+int ptep_clear_flush_young(struct vm_area_struct *vma,
+ unsigned long address, pte_t *ptep);
+#endif
+
+#ifndef __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+extern int pmdp_clear_flush_young(struct vm_area_struct *vma,
+ unsigned long address, pmd_t *pmdp);
+#else
+/*
+ * Despite relevant to THP only, this API is called from generic rmap code
+ * under PageTransHuge(), hence needs a dummy implementation for !THP
+ */
+static inline int pmdp_clear_flush_young(struct vm_area_struct *vma,
+ unsigned long address, pmd_t *pmdp)
+{
+ BUILD_BUG();
+ return 0;
+}
+#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
+#endif
+
+#ifndef arch_has_hw_nonleaf_pmd_young
+/*
+ * Return whether the accessed bit in non-leaf PMD entries is supported on the
+ * local CPU.
+ */
+static inline bool arch_has_hw_nonleaf_pmd_young(void)
+{
+ return IS_ENABLED(CONFIG_ARCH_HAS_NONLEAF_PMD_YOUNG);
+}
+#endif
+
+#ifndef arch_has_hw_pte_young
+/*
+ * Return whether the accessed bit is supported on the local CPU.
+ *
+ * This stub assumes accessing through an old PTE triggers a page fault.
+ * Architectures that automatically set the access bit should overwrite it.
+ */
+static inline bool arch_has_hw_pte_young(void)
+{
+ return false;
+}
+#endif
+
+#ifndef __HAVE_ARCH_PTEP_GET_AND_CLEAR
+static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
+ unsigned long address,
+ pte_t *ptep)
+{
+ pte_t pte = *ptep;
+ pte_clear(mm, address, ptep);
+ page_table_check_pte_clear(mm, address, pte);
+ return pte;
+}
+#endif
+
+static inline void ptep_clear(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep)
+{
+ ptep_get_and_clear(mm, addr, ptep);
+}
+
+#ifndef ptep_get
+static inline pte_t ptep_get(pte_t *ptep)
+{
+ return READ_ONCE(*ptep);
+}
+#endif
+
+#ifndef pmdp_get
+static inline pmd_t pmdp_get(pmd_t *pmdp)
+{
+ return READ_ONCE(*pmdp);
+}
+#endif
+
+#ifdef CONFIG_GUP_GET_PXX_LOW_HIGH
+/*
+ * For walking the pagetables without holding any locks. Some architectures
+ * (eg x86-32 PAE) cannot load the entries atomically without using expensive
+ * instructions. We are guaranteed that a PTE will only either go from not
+ * present to present, or present to not present -- it will not switch to a
+ * completely different present page without a TLB flush inbetween; which we
+ * are blocking by holding interrupts off.
+ *
+ * Setting ptes from not present to present goes:
+ *
+ * ptep->pte_high = h;
+ * smp_wmb();
+ * ptep->pte_low = l;
+ *
+ * And present to not present goes:
+ *
+ * ptep->pte_low = 0;
+ * smp_wmb();
+ * ptep->pte_high = 0;
+ *
+ * We must ensure here that the load of pte_low sees 'l' IFF pte_high sees 'h'.
+ * We load pte_high *after* loading pte_low, which ensures we don't see an older
+ * value of pte_high. *Then* we recheck pte_low, which ensures that we haven't
+ * picked up a changed pte high. We might have gotten rubbish values from
+ * pte_low and pte_high, but we are guaranteed that pte_low will not have the
+ * present bit set *unless* it is 'l'. Because get_user_pages_fast() only
+ * operates on present ptes we're safe.
+ */
+static inline pte_t ptep_get_lockless(pte_t *ptep)
+{
+ pte_t pte;
+
+ do {
+ pte.pte_low = ptep->pte_low;
+ smp_rmb();
+ pte.pte_high = ptep->pte_high;
+ smp_rmb();
+ } while (unlikely(pte.pte_low != ptep->pte_low));
+
+ return pte;
+}
+#define ptep_get_lockless ptep_get_lockless
+
+#if CONFIG_PGTABLE_LEVELS > 2
+static inline pmd_t pmdp_get_lockless(pmd_t *pmdp)
+{
+ pmd_t pmd;
+
+ do {
+ pmd.pmd_low = pmdp->pmd_low;
+ smp_rmb();
+ pmd.pmd_high = pmdp->pmd_high;
+ smp_rmb();
+ } while (unlikely(pmd.pmd_low != pmdp->pmd_low));
+
+ return pmd;
+}
+#define pmdp_get_lockless pmdp_get_lockless
+#endif /* CONFIG_PGTABLE_LEVELS > 2 */
+#endif /* CONFIG_GUP_GET_PXX_LOW_HIGH */
+
+/*
+ * We require that the PTE can be read atomically.
+ */
+#ifndef ptep_get_lockless
+static inline pte_t ptep_get_lockless(pte_t *ptep)
+{
+ return ptep_get(ptep);
+}
+#endif
+
+#ifndef pmdp_get_lockless
+static inline pmd_t pmdp_get_lockless(pmd_t *pmdp)
+{
+ return pmdp_get(pmdp);
+}
+#endif
+
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+#ifndef __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
+static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
+ unsigned long address,
+ pmd_t *pmdp)
+{
+ pmd_t pmd = *pmdp;
+
+ pmd_clear(pmdp);
+ page_table_check_pmd_clear(mm, address, pmd);
+
+ return pmd;
+}
+#endif /* __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR */
+#ifndef __HAVE_ARCH_PUDP_HUGE_GET_AND_CLEAR
+static inline pud_t pudp_huge_get_and_clear(struct mm_struct *mm,
+ unsigned long address,
+ pud_t *pudp)
+{
+ pud_t pud = *pudp;
+
+ pud_clear(pudp);
+ page_table_check_pud_clear(mm, address, pud);
+
+ return pud;
+}
+#endif /* __HAVE_ARCH_PUDP_HUGE_GET_AND_CLEAR */
+#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
+
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+#ifndef __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR_FULL
+static inline pmd_t pmdp_huge_get_and_clear_full(struct vm_area_struct *vma,
+ unsigned long address, pmd_t *pmdp,
+ int full)
+{
+ return pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp);
+}
+#endif
+
+#ifndef __HAVE_ARCH_PUDP_HUGE_GET_AND_CLEAR_FULL
+static inline pud_t pudp_huge_get_and_clear_full(struct mm_struct *mm,
+ unsigned long address, pud_t *pudp,
+ int full)
+{
+ return pudp_huge_get_and_clear(mm, address, pudp);
+}
+#endif
+#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
+
+#ifndef __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
+static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm,
+ unsigned long address, pte_t *ptep,
+ int full)
+{
+ return ptep_get_and_clear(mm, address, ptep);
+}
+#endif
+
+
+/*
+ * If two threads concurrently fault at the same page, the thread that
+ * won the race updates the PTE and its local TLB/Cache. The other thread
+ * gives up, simply does nothing, and continues; on architectures where
+ * software can update TLB, local TLB can be updated here to avoid next page
+ * fault. This function updates TLB only, do nothing with cache or others.
+ * It is the difference with function update_mmu_cache.
+ */
+#ifndef __HAVE_ARCH_UPDATE_MMU_TLB
+static inline void update_mmu_tlb(struct vm_area_struct *vma,
+ unsigned long address, pte_t *ptep)
+{
+}
+#define __HAVE_ARCH_UPDATE_MMU_TLB
+#endif
+
+/*
+ * Some architectures may be able to avoid expensive synchronization
+ * primitives when modifications are made to PTE's which are already
+ * not present, or in the process of an address space destruction.
+ */
+#ifndef __HAVE_ARCH_PTE_CLEAR_NOT_PRESENT_FULL
+static inline void pte_clear_not_present_full(struct mm_struct *mm,
+ unsigned long address,
+ pte_t *ptep,
+ int full)
+{
+ pte_clear(mm, address, ptep);
+}
+#endif
+
+#ifndef __HAVE_ARCH_PTEP_CLEAR_FLUSH
+extern pte_t ptep_clear_flush(struct vm_area_struct *vma,
+ unsigned long address,
+ pte_t *ptep);
+#endif
+
+#ifndef __HAVE_ARCH_PMDP_HUGE_CLEAR_FLUSH
+extern pmd_t pmdp_huge_clear_flush(struct vm_area_struct *vma,
+ unsigned long address,
+ pmd_t *pmdp);
+extern pud_t pudp_huge_clear_flush(struct vm_area_struct *vma,
+ unsigned long address,
+ pud_t *pudp);
+#endif
+
+#ifndef __HAVE_ARCH_PTEP_SET_WRPROTECT
+struct mm_struct;
+static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long address, pte_t *ptep)
+{
+ pte_t old_pte = *ptep;
+ set_pte_at(mm, address, ptep, pte_wrprotect(old_pte));
+}
+#endif
+
+/*
+ * On some architectures hardware does not set page access bit when accessing
+ * memory page, it is responsibility of software setting this bit. It brings
+ * out extra page fault penalty to track page access bit. For optimization page
+ * access bit can be set during all page fault flow on these arches.
+ * To be differentiate with macro pte_mkyoung, this macro is used on platforms
+ * where software maintains page access bit.
+ */
+#ifndef pte_sw_mkyoung
+static inline pte_t pte_sw_mkyoung(pte_t pte)
+{
+ return pte;
+}
+#define pte_sw_mkyoung pte_sw_mkyoung
+#endif
+
+#ifndef __HAVE_ARCH_PMDP_SET_WRPROTECT
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+static inline void pmdp_set_wrprotect(struct mm_struct *mm,
+ unsigned long address, pmd_t *pmdp)
+{
+ pmd_t old_pmd = *pmdp;
+ set_pmd_at(mm, address, pmdp, pmd_wrprotect(old_pmd));
+}
+#else
+static inline void pmdp_set_wrprotect(struct mm_struct *mm,
+ unsigned long address, pmd_t *pmdp)
+{
+ BUILD_BUG();
+}
+#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
+#endif
+#ifndef __HAVE_ARCH_PUDP_SET_WRPROTECT
+#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
+static inline void pudp_set_wrprotect(struct mm_struct *mm,
+ unsigned long address, pud_t *pudp)
+{
+ pud_t old_pud = *pudp;
+
+ set_pud_at(mm, address, pudp, pud_wrprotect(old_pud));
+}
+#else
+static inline void pudp_set_wrprotect(struct mm_struct *mm,
+ unsigned long address, pud_t *pudp)
+{
+ BUILD_BUG();
+}
+#endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
+#endif
+
+#ifndef pmdp_collapse_flush
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+extern pmd_t pmdp_collapse_flush(struct vm_area_struct *vma,
+ unsigned long address, pmd_t *pmdp);
+#else
+static inline pmd_t pmdp_collapse_flush(struct vm_area_struct *vma,
+ unsigned long address,
+ pmd_t *pmdp)
+{
+ BUILD_BUG();
+ return *pmdp;
+}
+#define pmdp_collapse_flush pmdp_collapse_flush
+#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
+#endif
+
+#ifndef __HAVE_ARCH_PGTABLE_DEPOSIT
+extern void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
+ pgtable_t pgtable);
+#endif
+
+#ifndef __HAVE_ARCH_PGTABLE_WITHDRAW
+extern pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp);
+#endif
+
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+/*
+ * This is an implementation of pmdp_establish() that is only suitable for an
+ * architecture that doesn't have hardware dirty/accessed bits. In this case we
+ * can't race with CPU which sets these bits and non-atomic approach is fine.
+ */
+static inline pmd_t generic_pmdp_establish(struct vm_area_struct *vma,
+ unsigned long address, pmd_t *pmdp, pmd_t pmd)
+{
+ pmd_t old_pmd = *pmdp;
+ set_pmd_at(vma->vm_mm, address, pmdp, pmd);
+ return old_pmd;
+}
+#endif
+
+#ifndef __HAVE_ARCH_PMDP_INVALIDATE
+extern pmd_t pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
+ pmd_t *pmdp);
+#endif
+
+#ifndef __HAVE_ARCH_PMDP_INVALIDATE_AD
+
+/*
+ * pmdp_invalidate_ad() invalidates the PMD while changing a transparent
+ * hugepage mapping in the page tables. This function is similar to
+ * pmdp_invalidate(), but should only be used if the access and dirty bits would
+ * not be cleared by the software in the new PMD value. The function ensures
+ * that hardware changes of the access and dirty bits updates would not be lost.
+ *
+ * Doing so can allow in certain architectures to avoid a TLB flush in most
+ * cases. Yet, another TLB flush might be necessary later if the PMD update
+ * itself requires such flush (e.g., if protection was set to be stricter). Yet,
+ * even when a TLB flush is needed because of the update, the caller may be able
+ * to batch these TLB flushing operations, so fewer TLB flush operations are
+ * needed.
+ */
+extern pmd_t pmdp_invalidate_ad(struct vm_area_struct *vma,
+ unsigned long address, pmd_t *pmdp);
+#endif
+
+#ifndef __HAVE_ARCH_PTE_SAME
+static inline int pte_same(pte_t pte_a, pte_t pte_b)
+{
+ return pte_val(pte_a) == pte_val(pte_b);
+}
+#endif
+
+#ifndef __HAVE_ARCH_PTE_UNUSED
+/*
+ * Some architectures provide facilities to virtualization guests
+ * so that they can flag allocated pages as unused. This allows the
+ * host to transparently reclaim unused pages. This function returns
+ * whether the pte's page is unused.
+ */
+static inline int pte_unused(pte_t pte)
+{
+ return 0;
+}
+#endif
+
+#ifndef pte_access_permitted
+#define pte_access_permitted(pte, write) \
+ (pte_present(pte) && (!(write) || pte_write(pte)))
+#endif
+
+#ifndef pmd_access_permitted
+#define pmd_access_permitted(pmd, write) \
+ (pmd_present(pmd) && (!(write) || pmd_write(pmd)))
+#endif
+
+#ifndef pud_access_permitted
+#define pud_access_permitted(pud, write) \
+ (pud_present(pud) && (!(write) || pud_write(pud)))
+#endif
+
+#ifndef p4d_access_permitted
+#define p4d_access_permitted(p4d, write) \
+ (p4d_present(p4d) && (!(write) || p4d_write(p4d)))
+#endif
+
+#ifndef pgd_access_permitted
+#define pgd_access_permitted(pgd, write) \
+ (pgd_present(pgd) && (!(write) || pgd_write(pgd)))
+#endif
+
+#ifndef __HAVE_ARCH_PMD_SAME
+static inline int pmd_same(pmd_t pmd_a, pmd_t pmd_b)
+{
+ return pmd_val(pmd_a) == pmd_val(pmd_b);
+}
+
+static inline int pud_same(pud_t pud_a, pud_t pud_b)
+{
+ return pud_val(pud_a) == pud_val(pud_b);
+}
+#endif
+
+#ifndef __HAVE_ARCH_P4D_SAME
+static inline int p4d_same(p4d_t p4d_a, p4d_t p4d_b)
+{
+ return p4d_val(p4d_a) == p4d_val(p4d_b);
+}
+#endif
+
+#ifndef __HAVE_ARCH_PGD_SAME
+static inline int pgd_same(pgd_t pgd_a, pgd_t pgd_b)
+{
+ return pgd_val(pgd_a) == pgd_val(pgd_b);
+}
+#endif
+
+/*
+ * Use set_p*_safe(), and elide TLB flushing, when confident that *no*
+ * TLB flush will be required as a result of the "set". For example, use
+ * in scenarios where it is known ahead of time that the routine is
+ * setting non-present entries, or re-setting an existing entry to the
+ * same value. Otherwise, use the typical "set" helpers and flush the
+ * TLB.
+ */
+#define set_pte_safe(ptep, pte) \
+({ \
+ WARN_ON_ONCE(pte_present(*ptep) && !pte_same(*ptep, pte)); \
+ set_pte(ptep, pte); \
+})
+
+#define set_pmd_safe(pmdp, pmd) \
+({ \
+ WARN_ON_ONCE(pmd_present(*pmdp) && !pmd_same(*pmdp, pmd)); \
+ set_pmd(pmdp, pmd); \
+})
+
+#define set_pud_safe(pudp, pud) \
+({ \
+ WARN_ON_ONCE(pud_present(*pudp) && !pud_same(*pudp, pud)); \
+ set_pud(pudp, pud); \
+})
+
+#define set_p4d_safe(p4dp, p4d) \
+({ \
+ WARN_ON_ONCE(p4d_present(*p4dp) && !p4d_same(*p4dp, p4d)); \
+ set_p4d(p4dp, p4d); \
+})
+
+#define set_pgd_safe(pgdp, pgd) \
+({ \
+ WARN_ON_ONCE(pgd_present(*pgdp) && !pgd_same(*pgdp, pgd)); \
+ set_pgd(pgdp, pgd); \
+})
+
+#ifndef __HAVE_ARCH_DO_SWAP_PAGE
+/*
+ * Some architectures support metadata associated with a page. When a
+ * page is being swapped out, this metadata must be saved so it can be
+ * restored when the page is swapped back in. SPARC M7 and newer
+ * processors support an ADI (Application Data Integrity) tag for the
+ * page as metadata for the page. arch_do_swap_page() can restore this
+ * metadata when a page is swapped back in.
+ */
+static inline void arch_do_swap_page(struct mm_struct *mm,
+ struct vm_area_struct *vma,
+ unsigned long addr,
+ pte_t pte, pte_t oldpte)
+{
+
+}
+#endif
+
+#ifndef __HAVE_ARCH_UNMAP_ONE
+/*
+ * Some architectures support metadata associated with a page. When a
+ * page is being swapped out, this metadata must be saved so it can be
+ * restored when the page is swapped back in. SPARC M7 and newer
+ * processors support an ADI (Application Data Integrity) tag for the
+ * page as metadata for the page. arch_unmap_one() can save this
+ * metadata on a swap-out of a page.
+ */
+static inline int arch_unmap_one(struct mm_struct *mm,
+ struct vm_area_struct *vma,
+ unsigned long addr,
+ pte_t orig_pte)
+{
+ return 0;
+}
+#endif
+
+/*
+ * Allow architectures to preserve additional metadata associated with
+ * swapped-out pages. The corresponding __HAVE_ARCH_SWAP_* macros and function
+ * prototypes must be defined in the arch-specific asm/pgtable.h file.
+ */
+#ifndef __HAVE_ARCH_PREPARE_TO_SWAP
+static inline int arch_prepare_to_swap(struct page *page)
+{
+ return 0;
+}
+#endif
+
+#ifndef __HAVE_ARCH_SWAP_INVALIDATE
+static inline void arch_swap_invalidate_page(int type, pgoff_t offset)
+{
+}
+
+static inline void arch_swap_invalidate_area(int type)
+{
+}
+#endif
+
+#ifndef __HAVE_ARCH_SWAP_RESTORE
+static inline void arch_swap_restore(swp_entry_t entry, struct folio *folio)
+{
+}
+#endif
+
+#ifndef __HAVE_ARCH_PGD_OFFSET_GATE
+#define pgd_offset_gate(mm, addr) pgd_offset(mm, addr)
+#endif
+
+#ifndef __HAVE_ARCH_MOVE_PTE
+#define move_pte(pte, prot, old_addr, new_addr) (pte)
+#endif
+
+#ifndef pte_accessible
+# define pte_accessible(mm, pte) ((void)(pte), 1)
+#endif
+
+#ifndef flush_tlb_fix_spurious_fault
+#define flush_tlb_fix_spurious_fault(vma, address, ptep) flush_tlb_page(vma, address)
+#endif
+
+/*
+ * When walking page tables, get the address of the next boundary,
+ * or the end address of the range if that comes earlier. Although no
+ * vma end wraps to 0, rounded up __boundary may wrap to 0 throughout.
+ */
+
+#define pgd_addr_end(addr, end) \
+({ unsigned long __boundary = ((addr) + PGDIR_SIZE) & PGDIR_MASK; \
+ (__boundary - 1 < (end) - 1)? __boundary: (end); \
+})
+
+#ifndef p4d_addr_end
+#define p4d_addr_end(addr, end) \
+({ unsigned long __boundary = ((addr) + P4D_SIZE) & P4D_MASK; \
+ (__boundary - 1 < (end) - 1)? __boundary: (end); \
+})
+#endif
+
+#ifndef pud_addr_end
+#define pud_addr_end(addr, end) \
+({ unsigned long __boundary = ((addr) + PUD_SIZE) & PUD_MASK; \
+ (__boundary - 1 < (end) - 1)? __boundary: (end); \
+})
+#endif
+
+#ifndef pmd_addr_end
+#define pmd_addr_end(addr, end) \
+({ unsigned long __boundary = ((addr) + PMD_SIZE) & PMD_MASK; \
+ (__boundary - 1 < (end) - 1)? __boundary: (end); \
+})
+#endif
+
+/*
+ * When walking page tables, we usually want to skip any p?d_none entries;
+ * and any p?d_bad entries - reporting the error before resetting to none.
+ * Do the tests inline, but report and clear the bad entry in mm/memory.c.
+ */
+void pgd_clear_bad(pgd_t *);
+
+#ifndef __PAGETABLE_P4D_FOLDED
+void p4d_clear_bad(p4d_t *);
+#else
+#define p4d_clear_bad(p4d) do { } while (0)
+#endif
+
+#ifndef __PAGETABLE_PUD_FOLDED
+void pud_clear_bad(pud_t *);
+#else
+#define pud_clear_bad(p4d) do { } while (0)
+#endif
+
+void pmd_clear_bad(pmd_t *);
+
+static inline int pgd_none_or_clear_bad(pgd_t *pgd)
+{
+ if (pgd_none(*pgd))
+ return 1;
+ if (unlikely(pgd_bad(*pgd))) {
+ pgd_clear_bad(pgd);
+ return 1;
+ }
+ return 0;
+}
+
+static inline int p4d_none_or_clear_bad(p4d_t *p4d)
+{
+ if (p4d_none(*p4d))
+ return 1;
+ if (unlikely(p4d_bad(*p4d))) {
+ p4d_clear_bad(p4d);
+ return 1;
+ }
+ return 0;
+}
+
+static inline int pud_none_or_clear_bad(pud_t *pud)
+{
+ if (pud_none(*pud))
+ return 1;
+ if (unlikely(pud_bad(*pud))) {
+ pud_clear_bad(pud);
+ return 1;
+ }
+ return 0;
+}
+
+static inline int pmd_none_or_clear_bad(pmd_t *pmd)
+{
+ if (pmd_none(*pmd))
+ return 1;
+ if (unlikely(pmd_bad(*pmd))) {
+ pmd_clear_bad(pmd);
+ return 1;
+ }
+ return 0;
+}
+
+static inline pte_t __ptep_modify_prot_start(struct vm_area_struct *vma,
+ unsigned long addr,
+ pte_t *ptep)
+{
+ /*
+ * Get the current pte state, but zero it out to make it
+ * non-present, preventing the hardware from asynchronously
+ * updating it.
+ */
+ return ptep_get_and_clear(vma->vm_mm, addr, ptep);
+}
+
+static inline void __ptep_modify_prot_commit(struct vm_area_struct *vma,
+ unsigned long addr,
+ pte_t *ptep, pte_t pte)
+{
+ /*
+ * The pte is non-present, so there's no hardware state to
+ * preserve.
+ */
+ set_pte_at(vma->vm_mm, addr, ptep, pte);
+}
+
+#ifndef __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION
+/*
+ * Start a pte protection read-modify-write transaction, which
+ * protects against asynchronous hardware modifications to the pte.
+ * The intention is not to prevent the hardware from making pte
+ * updates, but to prevent any updates it may make from being lost.
+ *
+ * This does not protect against other software modifications of the
+ * pte; the appropriate pte lock must be held over the transaction.
+ *
+ * Note that this interface is intended to be batchable, meaning that
+ * ptep_modify_prot_commit may not actually update the pte, but merely
+ * queue the update to be done at some later time. The update must be
+ * actually committed before the pte lock is released, however.
+ */
+static inline pte_t ptep_modify_prot_start(struct vm_area_struct *vma,
+ unsigned long addr,
+ pte_t *ptep)
+{
+ return __ptep_modify_prot_start(vma, addr, ptep);
+}
+
+/*
+ * Commit an update to a pte, leaving any hardware-controlled bits in
+ * the PTE unmodified.
+ */
+static inline void ptep_modify_prot_commit(struct vm_area_struct *vma,
+ unsigned long addr,
+ pte_t *ptep, pte_t old_pte, pte_t pte)
+{
+ __ptep_modify_prot_commit(vma, addr, ptep, pte);
+}
+#endif /* __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION */
+#endif /* CONFIG_MMU */
+
+/*
+ * No-op macros that just return the current protection value. Defined here
+ * because these macros can be used even if CONFIG_MMU is not defined.
+ */
+
+#ifndef pgprot_nx
+#define pgprot_nx(prot) (prot)
+#endif
+
+#ifndef pgprot_noncached
+#define pgprot_noncached(prot) (prot)
+#endif
+
+#ifndef pgprot_writecombine
+#define pgprot_writecombine pgprot_noncached
+#endif
+
+#ifndef pgprot_writethrough
+#define pgprot_writethrough pgprot_noncached
+#endif
+
+#ifndef pgprot_device
+#define pgprot_device pgprot_noncached
+#endif
+
+#ifndef pgprot_mhp
+#define pgprot_mhp(prot) (prot)
+#endif
+
+#ifdef CONFIG_MMU
+#ifndef pgprot_modify
+#define pgprot_modify pgprot_modify
+static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
+{
+ if (pgprot_val(oldprot) == pgprot_val(pgprot_noncached(oldprot)))
+ newprot = pgprot_noncached(newprot);
+ if (pgprot_val(oldprot) == pgprot_val(pgprot_writecombine(oldprot)))
+ newprot = pgprot_writecombine(newprot);
+ if (pgprot_val(oldprot) == pgprot_val(pgprot_device(oldprot)))
+ newprot = pgprot_device(newprot);
+ return newprot;
+}
+#endif
+#endif /* CONFIG_MMU */
+
+#ifndef pgprot_encrypted
+#define pgprot_encrypted(prot) (prot)
+#endif
+
+#ifndef pgprot_decrypted
+#define pgprot_decrypted(prot) (prot)
+#endif
+
+/*
+ * A facility to provide lazy MMU batching. This allows PTE updates and
+ * page invalidations to be delayed until a call to leave lazy MMU mode
+ * is issued. Some architectures may benefit from doing this, and it is
+ * beneficial for both shadow and direct mode hypervisors, which may batch
+ * the PTE updates which happen during this window. Note that using this
+ * interface requires that read hazards be removed from the code. A read
+ * hazard could result in the direct mode hypervisor case, since the actual
+ * write to the page tables may not yet have taken place, so reads though
+ * a raw PTE pointer after it has been modified are not guaranteed to be
+ * up to date. This mode can only be entered and left under the protection of
+ * the page table locks for all page tables which may be modified. In the UP
+ * case, this is required so that preemption is disabled, and in the SMP case,
+ * it must synchronize the delayed page table writes properly on other CPUs.
+ */
+#ifndef __HAVE_ARCH_ENTER_LAZY_MMU_MODE
+#define arch_enter_lazy_mmu_mode() do {} while (0)
+#define arch_leave_lazy_mmu_mode() do {} while (0)
+#define arch_flush_lazy_mmu_mode() do {} while (0)
+#endif
+
+/*
+ * A facility to provide batching of the reload of page tables and
+ * other process state with the actual context switch code for
+ * paravirtualized guests. By convention, only one of the batched
+ * update (lazy) modes (CPU, MMU) should be active at any given time,
+ * entry should never be nested, and entry and exits should always be
+ * paired. This is for sanity of maintaining and reasoning about the
+ * kernel code. In this case, the exit (end of the context switch) is
+ * in architecture-specific code, and so doesn't need a generic
+ * definition.
+ */
+#ifndef __HAVE_ARCH_START_CONTEXT_SWITCH
+#define arch_start_context_switch(prev) do {} while (0)
+#endif
+
+#ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY
+#ifndef CONFIG_ARCH_ENABLE_THP_MIGRATION
+static inline pmd_t pmd_swp_mksoft_dirty(pmd_t pmd)
+{
+ return pmd;
+}
+
+static inline int pmd_swp_soft_dirty(pmd_t pmd)
+{
+ return 0;
+}
+
+static inline pmd_t pmd_swp_clear_soft_dirty(pmd_t pmd)
+{
+ return pmd;
+}
+#endif
+#else /* !CONFIG_HAVE_ARCH_SOFT_DIRTY */
+static inline int pte_soft_dirty(pte_t pte)
+{
+ return 0;
+}
+
+static inline int pmd_soft_dirty(pmd_t pmd)
+{
+ return 0;
+}
+
+static inline pte_t pte_mksoft_dirty(pte_t pte)
+{
+ return pte;
+}
+
+static inline pmd_t pmd_mksoft_dirty(pmd_t pmd)
+{
+ return pmd;
+}
+
+static inline pte_t pte_clear_soft_dirty(pte_t pte)
+{
+ return pte;
+}
+
+static inline pmd_t pmd_clear_soft_dirty(pmd_t pmd)
+{
+ return pmd;
+}
+
+static inline pte_t pte_swp_mksoft_dirty(pte_t pte)
+{
+ return pte;
+}
+
+static inline int pte_swp_soft_dirty(pte_t pte)
+{
+ return 0;
+}
+
+static inline pte_t pte_swp_clear_soft_dirty(pte_t pte)
+{
+ return pte;
+}
+
+static inline pmd_t pmd_swp_mksoft_dirty(pmd_t pmd)
+{
+ return pmd;
+}
+
+static inline int pmd_swp_soft_dirty(pmd_t pmd)
+{
+ return 0;
+}
+
+static inline pmd_t pmd_swp_clear_soft_dirty(pmd_t pmd)
+{
+ return pmd;
+}
+#endif
+
+#ifndef __HAVE_PFNMAP_TRACKING
+/*
+ * Interfaces that can be used by architecture code to keep track of
+ * memory type of pfn mappings specified by the remap_pfn_range,
+ * vmf_insert_pfn.
+ */
+
+/*
+ * track_pfn_remap is called when a _new_ pfn mapping is being established
+ * by remap_pfn_range() for physical range indicated by pfn and size.
+ */
+static inline int track_pfn_remap(struct vm_area_struct *vma, pgprot_t *prot,
+ unsigned long pfn, unsigned long addr,
+ unsigned long size)
+{
+ return 0;
+}
+
+/*
+ * track_pfn_insert is called when a _new_ single pfn is established
+ * by vmf_insert_pfn().
+ */
+static inline void track_pfn_insert(struct vm_area_struct *vma, pgprot_t *prot,
+ pfn_t pfn)
+{
+}
+
+/*
+ * track_pfn_copy is called when vma that is covering the pfnmap gets
+ * copied through copy_page_range().
+ */
+static inline int track_pfn_copy(struct vm_area_struct *vma)
+{
+ return 0;
+}
+
+/*
+ * untrack_pfn is called while unmapping a pfnmap for a region.
+ * untrack can be called for a specific region indicated by pfn and size or
+ * can be for the entire vma (in which case pfn, size are zero).
+ */
+static inline void untrack_pfn(struct vm_area_struct *vma,
+ unsigned long pfn, unsigned long size,
+ bool mm_wr_locked)
+{
+}
+
+/*
+ * untrack_pfn_clear is called while mremapping a pfnmap for a new region
+ * or fails to copy pgtable during duplicate vm area.
+ */
+static inline void untrack_pfn_clear(struct vm_area_struct *vma)
+{
+}
+#else
+extern int track_pfn_remap(struct vm_area_struct *vma, pgprot_t *prot,
+ unsigned long pfn, unsigned long addr,
+ unsigned long size);
+extern void track_pfn_insert(struct vm_area_struct *vma, pgprot_t *prot,
+ pfn_t pfn);
+extern int track_pfn_copy(struct vm_area_struct *vma);
+extern void untrack_pfn(struct vm_area_struct *vma, unsigned long pfn,
+ unsigned long size, bool mm_wr_locked);
+extern void untrack_pfn_clear(struct vm_area_struct *vma);
+#endif
+
+#ifdef CONFIG_MMU
+#ifdef __HAVE_COLOR_ZERO_PAGE
+static inline int is_zero_pfn(unsigned long pfn)
+{
+ extern unsigned long zero_pfn;
+ unsigned long offset_from_zero_pfn = pfn - zero_pfn;
+ return offset_from_zero_pfn <= (zero_page_mask >> PAGE_SHIFT);
+}
+
+#define my_zero_pfn(addr) page_to_pfn(ZERO_PAGE(addr))
+
+#else
+static inline int is_zero_pfn(unsigned long pfn)
+{
+ extern unsigned long zero_pfn;
+ return pfn == zero_pfn;
+}
+
+static inline unsigned long my_zero_pfn(unsigned long addr)
+{
+ extern unsigned long zero_pfn;
+ return zero_pfn;
+}
+#endif
+#else
+static inline int is_zero_pfn(unsigned long pfn)
+{
+ return 0;
+}
+
+static inline unsigned long my_zero_pfn(unsigned long addr)
+{
+ return 0;
+}
+#endif /* CONFIG_MMU */
+
+#ifdef CONFIG_MMU
+
+#ifndef CONFIG_TRANSPARENT_HUGEPAGE
+static inline int pmd_trans_huge(pmd_t pmd)
+{
+ return 0;
+}
+#ifndef pmd_write
+static inline int pmd_write(pmd_t pmd)
+{
+ BUG();
+ return 0;
+}
+#endif /* pmd_write */
+#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
+
+#ifndef pud_write
+static inline int pud_write(pud_t pud)
+{
+ BUG();
+ return 0;
+}
+#endif /* pud_write */
+
+#if !defined(CONFIG_ARCH_HAS_PTE_DEVMAP) || !defined(CONFIG_TRANSPARENT_HUGEPAGE)
+static inline int pmd_devmap(pmd_t pmd)
+{
+ return 0;
+}
+static inline int pud_devmap(pud_t pud)
+{
+ return 0;
+}
+static inline int pgd_devmap(pgd_t pgd)
+{
+ return 0;
+}
+#endif
+
+#if !defined(CONFIG_TRANSPARENT_HUGEPAGE) || \
+ !defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD)
+static inline int pud_trans_huge(pud_t pud)
+{
+ return 0;
+}
+#endif
+
+/* See pmd_none_or_trans_huge_or_clear_bad for discussion. */
+static inline int pud_none_or_trans_huge_or_dev_or_clear_bad(pud_t *pud)
+{
+ pud_t pudval = READ_ONCE(*pud);
+
+ if (pud_none(pudval) || pud_trans_huge(pudval) || pud_devmap(pudval))
+ return 1;
+ if (unlikely(pud_bad(pudval))) {
+ pud_clear_bad(pud);
+ return 1;
+ }
+ return 0;
+}
+
+/* See pmd_trans_unstable for discussion. */
+static inline int pud_trans_unstable(pud_t *pud)
+{
+#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && \
+ defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD)
+ return pud_none_or_trans_huge_or_dev_or_clear_bad(pud);
+#else
+ return 0;
+#endif
+}
+
+#ifndef arch_needs_pgtable_deposit
+#define arch_needs_pgtable_deposit() (false)
+#endif
+/*
+ * This function is meant to be used by sites walking pagetables with
+ * the mmap_lock held in read mode to protect against MADV_DONTNEED and
+ * transhuge page faults. MADV_DONTNEED can convert a transhuge pmd
+ * into a null pmd and the transhuge page fault can convert a null pmd
+ * into an hugepmd or into a regular pmd (if the hugepage allocation
+ * fails). While holding the mmap_lock in read mode the pmd becomes
+ * stable and stops changing under us only if it's not null and not a
+ * transhuge pmd. When those races occurs and this function makes a
+ * difference vs the standard pmd_none_or_clear_bad, the result is
+ * undefined so behaving like if the pmd was none is safe (because it
+ * can return none anyway). The compiler level barrier() is critically
+ * important to compute the two checks atomically on the same pmdval.
+ *
+ * For 32bit kernels with a 64bit large pmd_t this automatically takes
+ * care of reading the pmd atomically to avoid SMP race conditions
+ * against pmd_populate() when the mmap_lock is hold for reading by the
+ * caller (a special atomic read not done by "gcc" as in the generic
+ * version above, is also needed when THP is disabled because the page
+ * fault can populate the pmd from under us).
+ */
+static inline int pmd_none_or_trans_huge_or_clear_bad(pmd_t *pmd)
+{
+ pmd_t pmdval = pmdp_get_lockless(pmd);
+ /*
+ * The barrier will stabilize the pmdval in a register or on
+ * the stack so that it will stop changing under the code.
+ *
+ * When CONFIG_TRANSPARENT_HUGEPAGE=y on x86 32bit PAE,
+ * pmdp_get_lockless is allowed to return a not atomic pmdval
+ * (for example pointing to an hugepage that has never been
+ * mapped in the pmd). The below checks will only care about
+ * the low part of the pmd with 32bit PAE x86 anyway, with the
+ * exception of pmd_none(). So the important thing is that if
+ * the low part of the pmd is found null, the high part will
+ * be also null or the pmd_none() check below would be
+ * confused.
+ */
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+ barrier();
+#endif
+ /*
+ * !pmd_present() checks for pmd migration entries
+ *
+ * The complete check uses is_pmd_migration_entry() in linux/swapops.h
+ * But using that requires moving current function and pmd_trans_unstable()
+ * to linux/swapops.h to resolve dependency, which is too much code move.
+ *
+ * !pmd_present() is equivalent to is_pmd_migration_entry() currently,
+ * because !pmd_present() pages can only be under migration not swapped
+ * out.
+ *
+ * pmd_none() is preserved for future condition checks on pmd migration
+ * entries and not confusing with this function name, although it is
+ * redundant with !pmd_present().
+ */
+ if (pmd_none(pmdval) || pmd_trans_huge(pmdval) ||
+ (IS_ENABLED(CONFIG_ARCH_ENABLE_THP_MIGRATION) && !pmd_present(pmdval)))
+ return 1;
+ if (unlikely(pmd_bad(pmdval))) {
+ pmd_clear_bad(pmd);
+ return 1;
+ }
+ return 0;
+}
+
+/*
+ * This is a noop if Transparent Hugepage Support is not built into
+ * the kernel. Otherwise it is equivalent to
+ * pmd_none_or_trans_huge_or_clear_bad(), and shall only be called in
+ * places that already verified the pmd is not none and they want to
+ * walk ptes while holding the mmap sem in read mode (write mode don't
+ * need this). If THP is not enabled, the pmd can't go away under the
+ * code even if MADV_DONTNEED runs, but if THP is enabled we need to
+ * run a pmd_trans_unstable before walking the ptes after
+ * split_huge_pmd returns (because it may have run when the pmd become
+ * null, but then a page fault can map in a THP and not a regular page).
+ */
+static inline int pmd_trans_unstable(pmd_t *pmd)
+{
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+ return pmd_none_or_trans_huge_or_clear_bad(pmd);
+#else
+ return 0;
+#endif
+}
+
+/*
+ * the ordering of these checks is important for pmds with _page_devmap set.
+ * if we check pmd_trans_unstable() first we will trip the bad_pmd() check
+ * inside of pmd_none_or_trans_huge_or_clear_bad(). this will end up correctly
+ * returning 1 but not before it spams dmesg with the pmd_clear_bad() output.
+ */
+static inline int pmd_devmap_trans_unstable(pmd_t *pmd)
+{
+ return pmd_devmap(*pmd) || pmd_trans_unstable(pmd);
+}
+
+#ifndef CONFIG_NUMA_BALANCING
+/*
+ * Technically a PTE can be PROTNONE even when not doing NUMA balancing but
+ * the only case the kernel cares is for NUMA balancing and is only ever set
+ * when the VMA is accessible. For PROT_NONE VMAs, the PTEs are not marked
+ * _PAGE_PROTNONE so by default, implement the helper as "always no". It
+ * is the responsibility of the caller to distinguish between PROT_NONE
+ * protections and NUMA hinting fault protections.
+ */
+static inline int pte_protnone(pte_t pte)
+{
+ return 0;
+}
+
+static inline int pmd_protnone(pmd_t pmd)
+{
+ return 0;
+}
+#endif /* CONFIG_NUMA_BALANCING */
+
+#endif /* CONFIG_MMU */
+
+#ifdef CONFIG_HAVE_ARCH_HUGE_VMAP
+
+#ifndef __PAGETABLE_P4D_FOLDED
+int p4d_set_huge(p4d_t *p4d, phys_addr_t addr, pgprot_t prot);
+void p4d_clear_huge(p4d_t *p4d);
+#else
+static inline int p4d_set_huge(p4d_t *p4d, phys_addr_t addr, pgprot_t prot)
+{
+ return 0;
+}
+static inline void p4d_clear_huge(p4d_t *p4d) { }
+#endif /* !__PAGETABLE_P4D_FOLDED */
+
+int pud_set_huge(pud_t *pud, phys_addr_t addr, pgprot_t prot);
+int pmd_set_huge(pmd_t *pmd, phys_addr_t addr, pgprot_t prot);
+int pud_clear_huge(pud_t *pud);
+int pmd_clear_huge(pmd_t *pmd);
+int p4d_free_pud_page(p4d_t *p4d, unsigned long addr);
+int pud_free_pmd_page(pud_t *pud, unsigned long addr);
+int pmd_free_pte_page(pmd_t *pmd, unsigned long addr);
+#else /* !CONFIG_HAVE_ARCH_HUGE_VMAP */
+static inline int p4d_set_huge(p4d_t *p4d, phys_addr_t addr, pgprot_t prot)
+{
+ return 0;
+}
+static inline int pud_set_huge(pud_t *pud, phys_addr_t addr, pgprot_t prot)
+{
+ return 0;
+}
+static inline int pmd_set_huge(pmd_t *pmd, phys_addr_t addr, pgprot_t prot)
+{
+ return 0;
+}
+static inline void p4d_clear_huge(p4d_t *p4d) { }
+static inline int pud_clear_huge(pud_t *pud)
+{
+ return 0;
+}
+static inline int pmd_clear_huge(pmd_t *pmd)
+{
+ return 0;
+}
+static inline int p4d_free_pud_page(p4d_t *p4d, unsigned long addr)
+{
+ return 0;
+}
+static inline int pud_free_pmd_page(pud_t *pud, unsigned long addr)
+{
+ return 0;
+}
+static inline int pmd_free_pte_page(pmd_t *pmd, unsigned long addr)
+{
+ return 0;
+}
+#endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */
+
+#ifndef __HAVE_ARCH_FLUSH_PMD_TLB_RANGE
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+/*
+ * ARCHes with special requirements for evicting THP backing TLB entries can
+ * implement this. Otherwise also, it can help optimize normal TLB flush in
+ * THP regime. Stock flush_tlb_range() typically has optimization to nuke the
+ * entire TLB if flush span is greater than a threshold, which will
+ * likely be true for a single huge page. Thus a single THP flush will
+ * invalidate the entire TLB which is not desirable.
+ * e.g. see arch/arc: flush_pmd_tlb_range
+ */
+#define flush_pmd_tlb_range(vma, addr, end) flush_tlb_range(vma, addr, end)
+#define flush_pud_tlb_range(vma, addr, end) flush_tlb_range(vma, addr, end)
+#else
+#define flush_pmd_tlb_range(vma, addr, end) BUILD_BUG()
+#define flush_pud_tlb_range(vma, addr, end) BUILD_BUG()
+#endif
+#endif
+
+struct file;
+int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn,
+ unsigned long size, pgprot_t *vma_prot);
+
+#ifndef CONFIG_X86_ESPFIX64
+static inline void init_espfix_bsp(void) { }
+#endif
+
+extern void __init pgtable_cache_init(void);
+
+#ifndef __HAVE_ARCH_PFN_MODIFY_ALLOWED
+static inline bool pfn_modify_allowed(unsigned long pfn, pgprot_t prot)
+{
+ return true;
+}
+
+static inline bool arch_has_pfn_modify_check(void)
+{
+ return false;
+}
+#endif /* !_HAVE_ARCH_PFN_MODIFY_ALLOWED */
+
+/*
+ * Architecture PAGE_KERNEL_* fallbacks
+ *
+ * Some architectures don't define certain PAGE_KERNEL_* flags. This is either
+ * because they really don't support them, or the port needs to be updated to
+ * reflect the required functionality. Below are a set of relatively safe
+ * fallbacks, as best effort, which we can count on in lieu of the architectures
+ * not defining them on their own yet.
+ */
+
+#ifndef PAGE_KERNEL_RO
+# define PAGE_KERNEL_RO PAGE_KERNEL
+#endif
+
+#ifndef PAGE_KERNEL_EXEC
+# define PAGE_KERNEL_EXEC PAGE_KERNEL
+#endif
+
+/*
+ * Page Table Modification bits for pgtbl_mod_mask.
+ *
+ * These are used by the p?d_alloc_track*() set of functions an in the generic
+ * vmalloc/ioremap code to track at which page-table levels entries have been
+ * modified. Based on that the code can better decide when vmalloc and ioremap
+ * mapping changes need to be synchronized to other page-tables in the system.
+ */
+#define __PGTBL_PGD_MODIFIED 0
+#define __PGTBL_P4D_MODIFIED 1
+#define __PGTBL_PUD_MODIFIED 2
+#define __PGTBL_PMD_MODIFIED 3
+#define __PGTBL_PTE_MODIFIED 4
+
+#define PGTBL_PGD_MODIFIED BIT(__PGTBL_PGD_MODIFIED)
+#define PGTBL_P4D_MODIFIED BIT(__PGTBL_P4D_MODIFIED)
+#define PGTBL_PUD_MODIFIED BIT(__PGTBL_PUD_MODIFIED)
+#define PGTBL_PMD_MODIFIED BIT(__PGTBL_PMD_MODIFIED)
+#define PGTBL_PTE_MODIFIED BIT(__PGTBL_PTE_MODIFIED)
+
+/* Page-Table Modification Mask */
+typedef unsigned int pgtbl_mod_mask;
+
+#endif /* !__ASSEMBLY__ */
+
+#if !defined(MAX_POSSIBLE_PHYSMEM_BITS) && !defined(CONFIG_64BIT)
+#ifdef CONFIG_PHYS_ADDR_T_64BIT
+/*
+ * ZSMALLOC needs to know the highest PFN on 32-bit architectures
+ * with physical address space extension, but falls back to
+ * BITS_PER_LONG otherwise.
+ */
+#error Missing MAX_POSSIBLE_PHYSMEM_BITS definition
+#else
+#define MAX_POSSIBLE_PHYSMEM_BITS 32
+#endif
+#endif
+
+#ifndef has_transparent_hugepage
+#define has_transparent_hugepage() IS_BUILTIN(CONFIG_TRANSPARENT_HUGEPAGE)
+#endif
+
+/*
+ * On some architectures it depends on the mm if the p4d/pud or pmd
+ * layer of the page table hierarchy is folded or not.
+ */
+#ifndef mm_p4d_folded
+#define mm_p4d_folded(mm) __is_defined(__PAGETABLE_P4D_FOLDED)
+#endif
+
+#ifndef mm_pud_folded
+#define mm_pud_folded(mm) __is_defined(__PAGETABLE_PUD_FOLDED)
+#endif
+
+#ifndef mm_pmd_folded
+#define mm_pmd_folded(mm) __is_defined(__PAGETABLE_PMD_FOLDED)
+#endif
+
+#ifndef p4d_offset_lockless
+#define p4d_offset_lockless(pgdp, pgd, address) p4d_offset(&(pgd), address)
+#endif
+#ifndef pud_offset_lockless
+#define pud_offset_lockless(p4dp, p4d, address) pud_offset(&(p4d), address)
+#endif
+#ifndef pmd_offset_lockless
+#define pmd_offset_lockless(pudp, pud, address) pmd_offset(&(pud), address)
+#endif
+
+/*
+ * p?d_leaf() - true if this entry is a final mapping to a physical address.
+ * This differs from p?d_huge() by the fact that they are always available (if
+ * the architecture supports large pages at the appropriate level) even
+ * if CONFIG_HUGETLB_PAGE is not defined.
+ * Only meaningful when called on a valid entry.
+ */
+#ifndef pgd_leaf
+#define pgd_leaf(x) 0
+#endif
+#ifndef p4d_leaf
+#define p4d_leaf(x) 0
+#endif
+#ifndef pud_leaf
+#define pud_leaf(x) 0
+#endif
+#ifndef pmd_leaf
+#define pmd_leaf(x) 0
+#endif
+
+#ifndef pgd_leaf_size
+#define pgd_leaf_size(x) (1ULL << PGDIR_SHIFT)
+#endif
+#ifndef p4d_leaf_size
+#define p4d_leaf_size(x) P4D_SIZE
+#endif
+#ifndef pud_leaf_size
+#define pud_leaf_size(x) PUD_SIZE
+#endif
+#ifndef pmd_leaf_size
+#define pmd_leaf_size(x) PMD_SIZE
+#endif
+#ifndef pte_leaf_size
+#define pte_leaf_size(x) PAGE_SIZE
+#endif
+
+/*
+ * Some architectures have MMUs that are configurable or selectable at boot
+ * time. These lead to variable PTRS_PER_x. For statically allocated arrays it
+ * helps to have a static maximum value.
+ */
+
+#ifndef MAX_PTRS_PER_PTE
+#define MAX_PTRS_PER_PTE PTRS_PER_PTE
+#endif
+
+#ifndef MAX_PTRS_PER_PMD
+#define MAX_PTRS_PER_PMD PTRS_PER_PMD
+#endif
+
+#ifndef MAX_PTRS_PER_PUD
+#define MAX_PTRS_PER_PUD PTRS_PER_PUD
+#endif
+
+#ifndef MAX_PTRS_PER_P4D
+#define MAX_PTRS_PER_P4D PTRS_PER_P4D
+#endif
+
+/* description of effects of mapping type and prot in current implementation.
+ * this is due to the limited x86 page protection hardware. The expected
+ * behavior is in parens:
+ *
+ * map_type prot
+ * PROT_NONE PROT_READ PROT_WRITE PROT_EXEC
+ * MAP_SHARED r: (no) no r: (yes) yes r: (no) yes r: (no) yes
+ * w: (no) no w: (no) no w: (yes) yes w: (no) no
+ * x: (no) no x: (no) yes x: (no) yes x: (yes) yes
+ *
+ * MAP_PRIVATE r: (no) no r: (yes) yes r: (no) yes r: (no) yes
+ * w: (no) no w: (no) no w: (copy) copy w: (no) no
+ * x: (no) no x: (no) yes x: (no) yes x: (yes) yes
+ *
+ * On arm64, PROT_EXEC has the following behaviour for both MAP_SHARED and
+ * MAP_PRIVATE (with Enhanced PAN supported):
+ * r: (no) no
+ * w: (no) no
+ * x: (yes) yes
+ */
+#define DECLARE_VM_GET_PAGE_PROT \
+pgprot_t vm_get_page_prot(unsigned long vm_flags) \
+{ \
+ return protection_map[vm_flags & \
+ (VM_READ | VM_WRITE | VM_EXEC | VM_SHARED)]; \
+} \
+EXPORT_SYMBOL(vm_get_page_prot);
+
+#endif /* _LINUX_PGTABLE_H */
diff --git a/include/linux/pgtable_api.h b/include/linux/pgtable_api.h
new file mode 100644
index 000000000000..ff367a4ba8c4
--- /dev/null
+++ b/include/linux/pgtable_api.h
@@ -0,0 +1 @@
+#include <linux/pgtable.h>
diff --git a/include/linux/phonet.h b/include/linux/phonet.h
index f691b04fc5ce..bc7d1e529efc 100644
--- a/include/linux/phonet.h
+++ b/include/linux/phonet.h
@@ -1,23 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/**
* file phonet.h
*
* Phonet sockets kernel interface
*
* Copyright (C) 2008 Nokia Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
- * 02110-1301 USA
*/
#ifndef LINUX_PHONET_H
#define LINUX_PHONET_H
diff --git a/include/linux/phy.h b/include/linux/phy.h
index 2a9567bb8186..c5a0dc829714 100644
--- a/include/linux/phy.h
+++ b/include/linux/phy.h
@@ -1,16 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Framework and drivers for configuring and reading different PHYs
- * Based on code in sungem_phy.c and gianfar_phy.c
+ * Based on code in sungem_phy.c and (long-removed) gianfar_phy.c
*
* Author: Andy Fleming
*
* Copyright (c) 2004 Freescale Semiconductor, Inc.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
*/
#ifndef __PHY_H
@@ -19,12 +14,20 @@
#include <linux/compiler.h>
#include <linux/spinlock.h>
#include <linux/ethtool.h>
+#include <linux/leds.h>
+#include <linux/linkmode.h>
+#include <linux/netlink.h>
#include <linux/mdio.h>
#include <linux/mii.h>
+#include <linux/mii_timestamper.h>
#include <linux/module.h>
#include <linux/timer.h>
#include <linux/workqueue.h>
#include <linux/mod_devicetable.h>
+#include <linux/u64_stats_sync.h>
+#include <linux/irqreturn.h>
+#include <linux/iopoll.h>
+#include <linux/refcount.h>
#include <linux/atomic.h>
@@ -41,27 +44,89 @@
#define PHY_1000BT_FEATURES (SUPPORTED_1000baseT_Half | \
SUPPORTED_1000baseT_Full)
-#define PHY_BASIC_FEATURES (PHY_10BT_FEATURES | \
- PHY_100BT_FEATURES | \
- PHY_DEFAULT_FEATURES)
-
-#define PHY_GBIT_FEATURES (PHY_BASIC_FEATURES | \
- PHY_1000BT_FEATURES)
-
+extern __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_basic_features) __ro_after_init;
+extern __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_basic_t1_features) __ro_after_init;
+extern __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_basic_t1s_p2mp_features) __ro_after_init;
+extern __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_gbit_features) __ro_after_init;
+extern __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_gbit_fibre_features) __ro_after_init;
+extern __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_gbit_all_ports_features) __ro_after_init;
+extern __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_10gbit_features) __ro_after_init;
+extern __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_10gbit_fec_features) __ro_after_init;
+extern __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_10gbit_full_features) __ro_after_init;
+extern __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_eee_cap1_features) __ro_after_init;
+
+#define PHY_BASIC_FEATURES ((unsigned long *)&phy_basic_features)
+#define PHY_BASIC_T1_FEATURES ((unsigned long *)&phy_basic_t1_features)
+#define PHY_BASIC_T1S_P2MP_FEATURES ((unsigned long *)&phy_basic_t1s_p2mp_features)
+#define PHY_GBIT_FEATURES ((unsigned long *)&phy_gbit_features)
+#define PHY_GBIT_FIBRE_FEATURES ((unsigned long *)&phy_gbit_fibre_features)
+#define PHY_GBIT_ALL_PORTS_FEATURES ((unsigned long *)&phy_gbit_all_ports_features)
+#define PHY_10GBIT_FEATURES ((unsigned long *)&phy_10gbit_features)
+#define PHY_10GBIT_FEC_FEATURES ((unsigned long *)&phy_10gbit_fec_features)
+#define PHY_10GBIT_FULL_FEATURES ((unsigned long *)&phy_10gbit_full_features)
+#define PHY_EEE_CAP1_FEATURES ((unsigned long *)&phy_eee_cap1_features)
+
+extern const int phy_basic_ports_array[3];
+extern const int phy_fibre_port_array[1];
+extern const int phy_all_ports_features_array[7];
+extern const int phy_10_100_features_array[4];
+extern const int phy_basic_t1_features_array[3];
+extern const int phy_basic_t1s_p2mp_features_array[2];
+extern const int phy_gbit_features_array[2];
+extern const int phy_10gbit_features_array[1];
/*
* Set phydev->irq to PHY_POLL if interrupts are not supported,
- * or not desired for this PHY. Set to PHY_IGNORE_INTERRUPT if
- * the attached driver handles the interrupt
+ * or not desired for this PHY. Set to PHY_MAC_INTERRUPT if
+ * the attached MAC driver handles the interrupt
*/
#define PHY_POLL -1
-#define PHY_IGNORE_INTERRUPT -2
+#define PHY_MAC_INTERRUPT -2
-#define PHY_HAS_INTERRUPT 0x00000001
-#define PHY_IS_INTERNAL 0x00000002
+#define PHY_IS_INTERNAL 0x00000001
+#define PHY_RST_AFTER_CLK_EN 0x00000002
+#define PHY_POLL_CABLE_TEST 0x00000004
#define MDIO_DEVICE_IS_PHY 0x80000000
-/* Interface Mode definitions */
+/**
+ * enum phy_interface_t - Interface Mode definitions
+ *
+ * @PHY_INTERFACE_MODE_NA: Not Applicable - don't touch
+ * @PHY_INTERFACE_MODE_INTERNAL: No interface, MAC and PHY combined
+ * @PHY_INTERFACE_MODE_MII: Media-independent interface
+ * @PHY_INTERFACE_MODE_GMII: Gigabit media-independent interface
+ * @PHY_INTERFACE_MODE_SGMII: Serial gigabit media-independent interface
+ * @PHY_INTERFACE_MODE_TBI: Ten Bit Interface
+ * @PHY_INTERFACE_MODE_REVMII: Reverse Media Independent Interface
+ * @PHY_INTERFACE_MODE_RMII: Reduced Media Independent Interface
+ * @PHY_INTERFACE_MODE_REVRMII: Reduced Media Independent Interface in PHY role
+ * @PHY_INTERFACE_MODE_RGMII: Reduced gigabit media-independent interface
+ * @PHY_INTERFACE_MODE_RGMII_ID: RGMII with Internal RX+TX delay
+ * @PHY_INTERFACE_MODE_RGMII_RXID: RGMII with Internal RX delay
+ * @PHY_INTERFACE_MODE_RGMII_TXID: RGMII with Internal RX delay
+ * @PHY_INTERFACE_MODE_RTBI: Reduced TBI
+ * @PHY_INTERFACE_MODE_SMII: Serial MII
+ * @PHY_INTERFACE_MODE_XGMII: 10 gigabit media-independent interface
+ * @PHY_INTERFACE_MODE_XLGMII:40 gigabit media-independent interface
+ * @PHY_INTERFACE_MODE_MOCA: Multimedia over Coax
+ * @PHY_INTERFACE_MODE_QSGMII: Quad SGMII
+ * @PHY_INTERFACE_MODE_TRGMII: Turbo RGMII
+ * @PHY_INTERFACE_MODE_100BASEX: 100 BaseX
+ * @PHY_INTERFACE_MODE_1000BASEX: 1000 BaseX
+ * @PHY_INTERFACE_MODE_2500BASEX: 2500 BaseX
+ * @PHY_INTERFACE_MODE_5GBASER: 5G BaseR
+ * @PHY_INTERFACE_MODE_RXAUI: Reduced XAUI
+ * @PHY_INTERFACE_MODE_XAUI: 10 Gigabit Attachment Unit Interface
+ * @PHY_INTERFACE_MODE_10GBASER: 10G BaseR
+ * @PHY_INTERFACE_MODE_25GBASER: 25G BaseR
+ * @PHY_INTERFACE_MODE_USXGMII: Universal Serial 10GE MII
+ * @PHY_INTERFACE_MODE_10GKR: 10GBASE-KR - with Clause 73 AN
+ * @PHY_INTERFACE_MODE_QUSGMII: Quad Universal SGMII
+ * @PHY_INTERFACE_MODE_1000BASEKX: 1000Base-KX - with Clause 73 AN
+ * @PHY_INTERFACE_MODE_MAX: Book keeping
+ *
+ * Describes the interface between the MAC and PHY.
+ */
typedef enum {
PHY_INTERFACE_MODE_NA,
PHY_INTERFACE_MODE_INTERNAL,
@@ -71,6 +136,7 @@ typedef enum {
PHY_INTERFACE_MODE_TBI,
PHY_INTERFACE_MODE_REVMII,
PHY_INTERFACE_MODE_RMII,
+ PHY_INTERFACE_MODE_REVRMII,
PHY_INTERFACE_MODE_RGMII,
PHY_INTERFACE_MODE_RGMII_ID,
PHY_INTERFACE_MODE_RGMII_RXID,
@@ -78,37 +144,75 @@ typedef enum {
PHY_INTERFACE_MODE_RTBI,
PHY_INTERFACE_MODE_SMII,
PHY_INTERFACE_MODE_XGMII,
+ PHY_INTERFACE_MODE_XLGMII,
PHY_INTERFACE_MODE_MOCA,
PHY_INTERFACE_MODE_QSGMII,
PHY_INTERFACE_MODE_TRGMII,
+ PHY_INTERFACE_MODE_100BASEX,
PHY_INTERFACE_MODE_1000BASEX,
PHY_INTERFACE_MODE_2500BASEX,
+ PHY_INTERFACE_MODE_5GBASER,
PHY_INTERFACE_MODE_RXAUI,
PHY_INTERFACE_MODE_XAUI,
- /* 10GBASE-KR, XFI, SFI - single lane 10G Serdes */
+ /* 10GBASE-R, XFI, SFI - single lane 10G Serdes */
+ PHY_INTERFACE_MODE_10GBASER,
+ PHY_INTERFACE_MODE_25GBASER,
+ PHY_INTERFACE_MODE_USXGMII,
+ /* 10GBASE-KR - with Clause 73 AN */
PHY_INTERFACE_MODE_10GKR,
+ PHY_INTERFACE_MODE_QUSGMII,
+ PHY_INTERFACE_MODE_1000BASEKX,
PHY_INTERFACE_MODE_MAX,
} phy_interface_t;
-/**
- * phy_supported_speeds - return all speeds currently supported by a phy device
- * @phy: The phy device to return supported speeds of.
- * @speeds: buffer to store supported speeds in.
- * @size: size of speeds buffer.
- *
- * Description: Returns the number of supported speeds, and
- * fills the speeds * buffer with the supported speeds. If speeds buffer is
- * too small to contain * all currently supported speeds, will return as
- * many speeds as can fit.
+/* PHY interface mode bitmap handling */
+#define DECLARE_PHY_INTERFACE_MASK(name) \
+ DECLARE_BITMAP(name, PHY_INTERFACE_MODE_MAX)
+
+static inline void phy_interface_zero(unsigned long *intf)
+{
+ bitmap_zero(intf, PHY_INTERFACE_MODE_MAX);
+}
+
+static inline bool phy_interface_empty(const unsigned long *intf)
+{
+ return bitmap_empty(intf, PHY_INTERFACE_MODE_MAX);
+}
+
+static inline void phy_interface_and(unsigned long *dst, const unsigned long *a,
+ const unsigned long *b)
+{
+ bitmap_and(dst, a, b, PHY_INTERFACE_MODE_MAX);
+}
+
+static inline void phy_interface_or(unsigned long *dst, const unsigned long *a,
+ const unsigned long *b)
+{
+ bitmap_or(dst, a, b, PHY_INTERFACE_MODE_MAX);
+}
+
+static inline void phy_interface_set_rgmii(unsigned long *intf)
+{
+ __set_bit(PHY_INTERFACE_MODE_RGMII, intf);
+ __set_bit(PHY_INTERFACE_MODE_RGMII_ID, intf);
+ __set_bit(PHY_INTERFACE_MODE_RGMII_RXID, intf);
+ __set_bit(PHY_INTERFACE_MODE_RGMII_TXID, intf);
+}
+
+/*
+ * phy_supported_speeds - return all speeds currently supported by a PHY device
*/
unsigned int phy_supported_speeds(struct phy_device *phy,
unsigned int *speeds,
unsigned int size);
/**
- * It maps 'enum phy_interface_t' found in include/linux/phy.h
+ * phy_modes - map phy_interface_t enum to device tree binding of phy-mode
+ * @interface: enum phy_interface_t value
+ *
+ * Description: maps enum &phy_interface_t defined in this file
* into the device tree binding of 'phy-mode', so that Ethernet
- * device driver can get phy interface from device tree.
+ * device driver can get PHY interface from device tree.
*/
static inline const char *phy_modes(phy_interface_t interface)
{
@@ -129,6 +233,8 @@ static inline const char *phy_modes(phy_interface_t interface)
return "rev-mii";
case PHY_INTERFACE_MODE_RMII:
return "rmii";
+ case PHY_INTERFACE_MODE_REVRMII:
+ return "rev-rmii";
case PHY_INTERFACE_MODE_RGMII:
return "rgmii";
case PHY_INTERFACE_MODE_RGMII_ID:
@@ -143,6 +249,8 @@ static inline const char *phy_modes(phy_interface_t interface)
return "smii";
case PHY_INTERFACE_MODE_XGMII:
return "xgmii";
+ case PHY_INTERFACE_MODE_XLGMII:
+ return "xlgmii";
case PHY_INTERFACE_MODE_MOCA:
return "moca";
case PHY_INTERFACE_MODE_QSGMII:
@@ -151,24 +259,35 @@ static inline const char *phy_modes(phy_interface_t interface)
return "trgmii";
case PHY_INTERFACE_MODE_1000BASEX:
return "1000base-x";
+ case PHY_INTERFACE_MODE_1000BASEKX:
+ return "1000base-kx";
case PHY_INTERFACE_MODE_2500BASEX:
return "2500base-x";
+ case PHY_INTERFACE_MODE_5GBASER:
+ return "5gbase-r";
case PHY_INTERFACE_MODE_RXAUI:
return "rxaui";
case PHY_INTERFACE_MODE_XAUI:
return "xaui";
+ case PHY_INTERFACE_MODE_10GBASER:
+ return "10gbase-r";
+ case PHY_INTERFACE_MODE_25GBASER:
+ return "25gbase-r";
+ case PHY_INTERFACE_MODE_USXGMII:
+ return "usxgmii";
case PHY_INTERFACE_MODE_10GKR:
return "10gbase-kr";
+ case PHY_INTERFACE_MODE_100BASEX:
+ return "100base-x";
+ case PHY_INTERFACE_MODE_QUSGMII:
+ return "qusgmii";
default:
return "unknown";
}
}
-
#define PHY_INIT_TIMEOUT 100000
-#define PHY_STATE_TIME 1
#define PHY_FORCE_TIMEOUT 10
-#define PHY_AN_TIMEOUT 10
#define PHY_MAX_ADDR 32
@@ -177,14 +296,68 @@ static inline const char *phy_modes(phy_interface_t interface)
#define MII_BUS_ID_SIZE 61
-/* Or MII_ADDR_C45 into regnum for read/write on mii_bus to enable the 21 bit
- IEEE 802.3ae clause 45 addressing mode used by 10GIGE phy chips. */
-#define MII_ADDR_C45 (1<<30)
-
struct device;
+struct phylink;
+struct sfp_bus;
+struct sfp_upstream_ops;
struct sk_buff;
-/*
+/**
+ * struct mdio_bus_stats - Statistics counters for MDIO busses
+ * @transfers: Total number of transfers, i.e. @writes + @reads
+ * @errors: Number of MDIO transfers that returned an error
+ * @writes: Number of write transfers
+ * @reads: Number of read transfers
+ * @syncp: Synchronisation for incrementing statistics
+ */
+struct mdio_bus_stats {
+ u64_stats_t transfers;
+ u64_stats_t errors;
+ u64_stats_t writes;
+ u64_stats_t reads;
+ /* Must be last, add new statistics above */
+ struct u64_stats_sync syncp;
+};
+
+/**
+ * struct phy_package_shared - Shared information in PHY packages
+ * @addr: Common PHY address used to combine PHYs in one package
+ * @refcnt: Number of PHYs connected to this shared data
+ * @flags: Initialization of PHY package
+ * @priv_size: Size of the shared private data @priv
+ * @priv: Driver private data shared across a PHY package
+ *
+ * Represents a shared structure between different phydev's in the same
+ * package, for example a quad PHY. See phy_package_join() and
+ * phy_package_leave().
+ */
+struct phy_package_shared {
+ int addr;
+ refcount_t refcnt;
+ unsigned long flags;
+ size_t priv_size;
+
+ /* private data pointer */
+ /* note that this pointer is shared between different phydevs and
+ * the user has to take care of appropriate locking. It is allocated
+ * and freed automatically by phy_package_join() and
+ * phy_package_leave().
+ */
+ void *priv;
+};
+
+/* used as bit number in atomic bitops */
+#define PHY_SHARED_F_INIT_DONE 0
+#define PHY_SHARED_F_PROBE_DONE 1
+
+/**
+ * struct mii_bus - Represents an MDIO bus
+ *
+ * @owner: Who owns this device
+ * @name: User friendly name for this MDIO device, or driver name
+ * @id: Unique identifier for this bus, typical from bus hierarchy
+ * @priv: Driver private data
+ *
* The Bus class for PHYs. Devices which provide access to
* PHYs should register using this structure
*/
@@ -193,55 +366,90 @@ struct mii_bus {
const char *name;
char id[MII_BUS_ID_SIZE];
void *priv;
+ /** @read: Perform a read transfer on the bus */
int (*read)(struct mii_bus *bus, int addr, int regnum);
+ /** @write: Perform a write transfer on the bus */
int (*write)(struct mii_bus *bus, int addr, int regnum, u16 val);
+ /** @read_c45: Perform a C45 read transfer on the bus */
+ int (*read_c45)(struct mii_bus *bus, int addr, int devnum, int regnum);
+ /** @write_c45: Perform a C45 write transfer on the bus */
+ int (*write_c45)(struct mii_bus *bus, int addr, int devnum,
+ int regnum, u16 val);
+ /** @reset: Perform a reset of the bus */
int (*reset)(struct mii_bus *bus);
- /*
- * A lock to ensure that only one thing can read/write
+ /** @stats: Statistic counters per device on the bus */
+ struct mdio_bus_stats stats[PHY_MAX_ADDR];
+
+ /**
+ * @mdio_lock: A lock to ensure that only one thing can read/write
* the MDIO bus at a time
*/
struct mutex mdio_lock;
+ /** @parent: Parent device of this bus */
struct device *parent;
+ /** @state: State of bus structure */
enum {
MDIOBUS_ALLOCATED = 1,
MDIOBUS_REGISTERED,
MDIOBUS_UNREGISTERED,
MDIOBUS_RELEASED,
} state;
+
+ /** @dev: Kernel device representation */
struct device dev;
- /* list of all PHYs on bus */
+ /** @mdio_map: list of all MDIO devices on bus */
struct mdio_device *mdio_map[PHY_MAX_ADDR];
- /* PHY addresses to be ignored when probing */
+ /** @phy_mask: PHY addresses to be ignored when probing */
u32 phy_mask;
- /* PHY addresses to ignore the TA/read failure */
+ /** @phy_ignore_ta_mask: PHY addresses to ignore the TA/read failure */
u32 phy_ignore_ta_mask;
- /*
- * An array of interrupts, each PHY's interrupt at the index
+ /**
+ * @irq: An array of interrupts, each PHY's interrupt at the index
* matching its address
*/
int irq[PHY_MAX_ADDR];
- /* GPIO reset pulse width in microseconds */
+ /** @reset_delay_us: GPIO reset pulse width in microseconds */
int reset_delay_us;
- /* RESET GPIO descriptor pointer */
+ /** @reset_post_delay_us: GPIO reset deassert delay in microseconds */
+ int reset_post_delay_us;
+ /** @reset_gpiod: Reset GPIO descriptor pointer */
struct gpio_desc *reset_gpiod;
+
+ /** @shared_lock: protect access to the shared element */
+ struct mutex shared_lock;
+
+ /** @shared: shared state across different PHYs */
+ struct phy_package_shared *shared[PHY_MAX_ADDR];
};
#define to_mii_bus(d) container_of(d, struct mii_bus, dev)
-struct mii_bus *mdiobus_alloc_size(size_t);
+struct mii_bus *mdiobus_alloc_size(size_t size);
+
+/**
+ * mdiobus_alloc - Allocate an MDIO bus structure
+ *
+ * The internal state of the MDIO bus will be set of MDIOBUS_ALLOCATED ready
+ * for the driver to register the bus.
+ */
static inline struct mii_bus *mdiobus_alloc(void)
{
return mdiobus_alloc_size(0);
}
int __mdiobus_register(struct mii_bus *bus, struct module *owner);
+int __devm_mdiobus_register(struct device *dev, struct mii_bus *bus,
+ struct module *owner);
#define mdiobus_register(bus) __mdiobus_register(bus, THIS_MODULE)
+#define devm_mdiobus_register(dev, bus) \
+ __devm_mdiobus_register(dev, bus, THIS_MODULE)
+
void mdiobus_unregister(struct mii_bus *bus);
void mdiobus_free(struct mii_bus *bus);
struct mii_bus *devm_mdiobus_alloc_size(struct device *dev, int sizeof_priv);
@@ -250,142 +458,160 @@ static inline struct mii_bus *devm_mdiobus_alloc(struct device *dev)
return devm_mdiobus_alloc_size(dev, 0);
}
-void devm_mdiobus_free(struct device *dev, struct mii_bus *bus);
-struct phy_device *mdiobus_scan(struct mii_bus *bus, int addr);
+struct mii_bus *mdio_find_bus(const char *mdio_name);
+struct phy_device *mdiobus_scan_c22(struct mii_bus *bus, int addr);
-#define PHY_INTERRUPT_DISABLED 0x0
-#define PHY_INTERRUPT_ENABLED 0x80000000
+#define PHY_INTERRUPT_DISABLED false
+#define PHY_INTERRUPT_ENABLED true
-/* PHY state machine states:
+/**
+ * enum phy_state - PHY state machine states:
*
- * DOWN: PHY device and driver are not ready for anything. probe
+ * @PHY_DOWN: PHY device and driver are not ready for anything. probe
* should be called if and only if the PHY is in this state,
* given that the PHY device exists.
- * - PHY driver probe function will, depending on the PHY, set
- * the state to STARTING or READY
- *
- * STARTING: PHY device is coming up, and the ethernet driver is
- * not ready. PHY drivers may set this in the probe function.
- * If they do, they are responsible for making sure the state is
- * eventually set to indicate whether the PHY is UP or READY,
- * depending on the state when the PHY is done starting up.
- * - PHY driver will set the state to READY
- * - start will set the state to PENDING
- *
- * READY: PHY is ready to send and receive packets, but the
+ * - PHY driver probe function will set the state to @PHY_READY
+ *
+ * @PHY_READY: PHY is ready to send and receive packets, but the
* controller is not. By default, PHYs which do not implement
- * probe will be set to this state by phy_probe(). If the PHY
- * driver knows the PHY is ready, and the PHY state is STARTING,
- * then it sets this STATE.
+ * probe will be set to this state by phy_probe().
* - start will set the state to UP
*
- * PENDING: PHY device is coming up, but the ethernet driver is
- * ready. phy_start will set this state if the PHY state is
- * STARTING.
- * - PHY driver will set the state to UP when the PHY is ready
- *
- * UP: The PHY and attached device are ready to do work.
+ * @PHY_UP: The PHY and attached device are ready to do work.
* Interrupts should be started here.
- * - timer moves to AN
- *
- * AN: The PHY is currently negotiating the link state. Link is
- * therefore down for now. phy_timer will set this state when it
- * detects the state is UP. config_aneg will set this state
- * whenever called with phydev->autoneg set to AUTONEG_ENABLE.
- * - If autonegotiation finishes, but there's no link, it sets
- * the state to NOLINK.
- * - If aneg finishes with link, it sets the state to RUNNING,
- * and calls adjust_link
- * - If autonegotiation did not finish after an arbitrary amount
- * of time, autonegotiation should be tried again if the PHY
- * supports "magic" autonegotiation (back to AN)
- * - If it didn't finish, and no magic_aneg, move to FORCING.
- *
- * NOLINK: PHY is up, but not currently plugged in.
- * - If the timer notes that the link comes back, we move to RUNNING
- * - config_aneg moves to AN
- * - phy_stop moves to HALTED
- *
- * FORCING: PHY is being configured with forced settings
- * - if link is up, move to RUNNING
- * - If link is down, we drop to the next highest setting, and
- * retry (FORCING) after a timeout
- * - phy_stop moves to HALTED
- *
- * RUNNING: PHY is currently up, running, and possibly sending
+ * - timer moves to @PHY_NOLINK or @PHY_RUNNING
+ *
+ * @PHY_NOLINK: PHY is up, but not currently plugged in.
+ * - irq or timer will set @PHY_RUNNING if link comes back
+ * - phy_stop moves to @PHY_HALTED
+ *
+ * @PHY_RUNNING: PHY is currently up, running, and possibly sending
* and/or receiving packets
- * - timer will set CHANGELINK if we're polling (this ensures the
- * link state is polled every other cycle of this state machine,
- * which makes it every other second)
- * - irq will set CHANGELINK
- * - config_aneg will set AN
- * - phy_stop moves to HALTED
- *
- * CHANGELINK: PHY experienced a change in link state
- * - timer moves to RUNNING if link
- * - timer moves to NOLINK if the link is down
- * - phy_stop moves to HALTED
- *
- * HALTED: PHY is up, but no polling or interrupts are done. Or
- * PHY is in an error state.
+ * - irq or timer will set @PHY_NOLINK if link goes down
+ * - phy_stop moves to @PHY_HALTED
*
- * - phy_start moves to RESUMING
+ * @PHY_CABLETEST: PHY is performing a cable test. Packet reception/sending
+ * is not expected to work, carrier will be indicated as down. PHY will be
+ * poll once per second, or on interrupt for it current state.
+ * Once complete, move to UP to restart the PHY.
+ * - phy_stop aborts the running test and moves to @PHY_HALTED
*
- * RESUMING: PHY was halted, but now wants to run again.
- * - If we are forcing, or aneg is done, timer moves to RUNNING
- * - If aneg is not done, timer moves to AN
- * - phy_stop moves to HALTED
+ * @PHY_HALTED: PHY is up, but no polling or interrupts are done. Or
+ * PHY is in an error state.
+ * - phy_start moves to @PHY_UP
*/
enum phy_state {
PHY_DOWN = 0,
- PHY_STARTING,
PHY_READY,
- PHY_PENDING,
+ PHY_HALTED,
PHY_UP,
- PHY_AN,
PHY_RUNNING,
PHY_NOLINK,
- PHY_FORCING,
- PHY_CHANGELINK,
- PHY_HALTED,
- PHY_RESUMING
+ PHY_CABLETEST,
};
+#define MDIO_MMD_NUM 32
+
/**
* struct phy_c45_device_ids - 802.3-c45 Device Identifiers
- * @devices_in_package: Bit vector of devices present.
+ * @devices_in_package: IEEE 802.3 devices in package register value.
+ * @mmds_present: bit vector of MMDs present.
* @device_ids: The device identifer for each present device.
*/
struct phy_c45_device_ids {
u32 devices_in_package;
- u32 device_ids[8];
+ u32 mmds_present;
+ u32 device_ids[MDIO_MMD_NUM];
};
-/* phy_device: An instance of a PHY
- *
- * drv: Pointer to the driver for this PHY instance
- * phy_id: UID for this device found during discovery
- * c45_ids: 802.3-c45 Device Identifers if is_c45.
- * is_c45: Set to true if this phy uses clause 45 addressing.
- * is_internal: Set to true if this phy is internal to a MAC.
- * is_pseudo_fixed_link: Set to true if this phy is an Ethernet switch, etc.
- * has_fixups: Set to true if this phy has fixups/quirks.
- * suspended: Set to true if this phy has been suspended successfully.
- * sysfs_links: Internal boolean tracking sysfs symbolic links setup/removal.
- * loopback_enabled: Set true if this phy has been loopbacked successfully.
- * state: state of the PHY for management purposes
- * dev_flags: Device-specific flags used by the PHY driver.
- * link_timeout: The number of timer firings to wait before the
- * giving up on the current attempt at acquiring a link
- * irq: IRQ number of the PHY's interrupt (-1 if none)
- * phy_timer: The timer for handling the state machine
- * phy_queue: A work_queue for the phy_mac_interrupt
- * attached_dev: The attached enet driver's device instance ptr
- * adjust_link: Callback for the enet controller to respond to
- * changes in the link state.
- *
- * speed, duplex, pause, supported, advertising, lp_advertising,
- * and autoneg are used like in mii_if_info
+struct macsec_context;
+struct macsec_ops;
+
+/**
+ * struct phy_device - An instance of a PHY
+ *
+ * @mdio: MDIO bus this PHY is on
+ * @drv: Pointer to the driver for this PHY instance
+ * @devlink: Create a link between phy dev and mac dev, if the external phy
+ * used by current mac interface is managed by another mac interface.
+ * @phy_id: UID for this device found during discovery
+ * @c45_ids: 802.3-c45 Device Identifiers if is_c45.
+ * @is_c45: Set to true if this PHY uses clause 45 addressing.
+ * @is_internal: Set to true if this PHY is internal to a MAC.
+ * @is_pseudo_fixed_link: Set to true if this PHY is an Ethernet switch, etc.
+ * @is_gigabit_capable: Set to true if PHY supports 1000Mbps
+ * @has_fixups: Set to true if this PHY has fixups/quirks.
+ * @suspended: Set to true if this PHY has been suspended successfully.
+ * @suspended_by_mdio_bus: Set to true if this PHY was suspended by MDIO bus.
+ * @sysfs_links: Internal boolean tracking sysfs symbolic links setup/removal.
+ * @loopback_enabled: Set true if this PHY has been loopbacked successfully.
+ * @downshifted_rate: Set true if link speed has been downshifted.
+ * @is_on_sfp_module: Set true if PHY is located on an SFP module.
+ * @mac_managed_pm: Set true if MAC driver takes of suspending/resuming PHY
+ * @state: State of the PHY for management purposes
+ * @dev_flags: Device-specific flags used by the PHY driver.
+ *
+ * - Bits [15:0] are free to use by the PHY driver to communicate
+ * driver specific behavior.
+ * - Bits [23:16] are currently reserved for future use.
+ * - Bits [31:24] are reserved for defining generic
+ * PHY driver behavior.
+ * @irq: IRQ number of the PHY's interrupt (-1 if none)
+ * @phy_timer: The timer for handling the state machine
+ * @phylink: Pointer to phylink instance for this PHY
+ * @sfp_bus_attached: Flag indicating whether the SFP bus has been attached
+ * @sfp_bus: SFP bus attached to this PHY's fiber port
+ * @attached_dev: The attached enet driver's device instance ptr
+ * @adjust_link: Callback for the enet controller to respond to changes: in the
+ * link state.
+ * @phy_link_change: Callback for phylink for notification of link change
+ * @macsec_ops: MACsec offloading ops.
+ *
+ * @speed: Current link speed
+ * @duplex: Current duplex
+ * @port: Current port
+ * @pause: Current pause
+ * @asym_pause: Current asymmetric pause
+ * @supported: Combined MAC/PHY supported linkmodes
+ * @advertising: Currently advertised linkmodes
+ * @adv_old: Saved advertised while power saving for WoL
+ * @supported_eee: supported PHY EEE linkmodes
+ * @advertising_eee: Currently advertised EEE linkmodes
+ * @eee_enabled: Flag indicating whether the EEE feature is enabled
+ * @lp_advertising: Current link partner advertised linkmodes
+ * @host_interfaces: PHY interface modes supported by host
+ * @eee_broken_modes: Energy efficient ethernet modes which should be prohibited
+ * @autoneg: Flag autoneg being used
+ * @rate_matching: Current rate matching mode
+ * @link: Current link state
+ * @autoneg_complete: Flag auto negotiation of the link has completed
+ * @mdix: Current crossover
+ * @mdix_ctrl: User setting of crossover
+ * @pma_extable: Cached value of PMA/PMD Extended Abilities Register
+ * @interrupts: Flag interrupts have been enabled
+ * @irq_suspended: Flag indicating PHY is suspended and therefore interrupt
+ * handling shall be postponed until PHY has resumed
+ * @irq_rerun: Flag indicating interrupts occurred while PHY was suspended,
+ * requiring a rerun of the interrupt handler after resume
+ * @interface: enum phy_interface_t value
+ * @skb: Netlink message for cable diagnostics
+ * @nest: Netlink nest used for cable diagnostics
+ * @ehdr: nNtlink header for cable diagnostics
+ * @phy_led_triggers: Array of LED triggers
+ * @phy_num_led_triggers: Number of triggers in @phy_led_triggers
+ * @led_link_trigger: LED trigger for link up/down
+ * @last_triggered: last LED trigger for link speed
+ * @leds: list of PHY LED structures
+ * @master_slave_set: User requested master/slave configuration
+ * @master_slave_get: Current master/slave advertisement
+ * @master_slave_state: Current master/slave configuration
+ * @mii_ts: Pointer to time stamper callbacks
+ * @psec: Pointer to Power Sourcing Equipment control struct
+ * @lock: Mutex for serialization access to PHY
+ * @state_queue: Work queue for state machine
+ * @link_down_events: Number of times link was lost
+ * @shared: Pointer to private data shared by phys in one package
+ * @priv: Pointer to driver private data
*
* interrupts currently only supports enabled or disabled,
* but could be changed in the future to support enabling
@@ -401,16 +627,35 @@ struct phy_device {
/* And management functions */
struct phy_driver *drv;
+ struct device_link *devlink;
+
u32 phy_id;
struct phy_c45_device_ids c45_ids;
- bool is_c45;
- bool is_internal;
- bool is_pseudo_fixed_link;
- bool has_fixups;
- bool suspended;
- bool sysfs_links;
- bool loopback_enabled;
+ unsigned is_c45:1;
+ unsigned is_internal:1;
+ unsigned is_pseudo_fixed_link:1;
+ unsigned is_gigabit_capable:1;
+ unsigned has_fixups:1;
+ unsigned suspended:1;
+ unsigned suspended_by_mdio_bus:1;
+ unsigned sysfs_links:1;
+ unsigned loopback_enabled:1;
+ unsigned downshifted_rate:1;
+ unsigned is_on_sfp_module:1;
+ unsigned mac_managed_pm:1;
+
+ unsigned autoneg:1;
+ /* The most recently read link state */
+ unsigned link:1;
+ unsigned autoneg_complete:1;
+
+ /* Interrupts are enabled */
+ unsigned interrupts:1;
+ unsigned irq_suspended:1;
+ unsigned irq_rerun:1;
+
+ int rate_matching;
enum phy_state state;
@@ -424,33 +669,39 @@ struct phy_device {
*/
int speed;
int duplex;
+ int port;
int pause;
int asym_pause;
-
- /* The most recently read link state */
- int link;
-
- /* Enabled Interrupts */
- u32 interrupts;
-
- /* Union of PHY and Attached devices' supported modes */
- /* See mii.h for more info */
- u32 supported;
- u32 advertising;
- u32 lp_advertising;
+ u8 master_slave_get;
+ u8 master_slave_set;
+ u8 master_slave_state;
+
+ /* Union of PHY and Attached devices' supported link modes */
+ /* See ethtool.h for more info */
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(supported);
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(advertising);
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(lp_advertising);
+ /* used with phy_speed_down */
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(adv_old);
+ /* used for eee validation */
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(supported_eee);
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(advertising_eee);
+ bool eee_enabled;
+
+ /* Host supported PHY interface types. Should be ignored if empty. */
+ DECLARE_PHY_INTERFACE_MASK(host_interfaces);
/* Energy efficient ethernet modes which should be prohibited */
u32 eee_broken_modes;
- int autoneg;
-
- int link_timeout;
-
#ifdef CONFIG_LED_TRIGGER_PHY
struct phy_led_trigger *phy_led_triggers;
unsigned int phy_num_led_triggers;
struct phy_led_trigger *last_triggered;
+
+ struct phy_led_trigger *led_link_trigger;
#endif
+ struct list_head leds;
/*
* Interrupt number for this PHY
@@ -462,195 +713,390 @@ struct phy_device {
/* For use by PHYs to maintain extra state */
void *priv;
+ /* shared data pointer */
+ /* For use by PHYs inside the same package that need a shared state. */
+ struct phy_package_shared *shared;
+
+ /* Reporting cable test results */
+ struct sk_buff *skb;
+ void *ehdr;
+ struct nlattr *nest;
+
/* Interrupt and Polling infrastructure */
- struct work_struct phy_queue;
struct delayed_work state_queue;
- atomic_t irq_disable;
struct mutex lock;
+ /* This may be modified under the rtnl lock */
+ bool sfp_bus_attached;
+ struct sfp_bus *sfp_bus;
+ struct phylink *phylink;
struct net_device *attached_dev;
+ struct mii_timestamper *mii_ts;
+ struct pse_control *psec;
u8 mdix;
u8 mdix_ctrl;
+ int pma_extable;
+
+ unsigned int link_down_events;
+
+ void (*phy_link_change)(struct phy_device *phydev, bool up);
void (*adjust_link)(struct net_device *dev);
+
+#if IS_ENABLED(CONFIG_MACSEC)
+ /* MACsec management functions */
+ const struct macsec_ops *macsec_ops;
+#endif
};
-#define to_phy_device(d) container_of(to_mdio_device(d), \
- struct phy_device, mdio)
-/* struct phy_driver: Driver structure for a particular PHY type
+/* Generic phy_device::dev_flags */
+#define PHY_F_NO_IRQ 0x80000000
+
+static inline struct phy_device *to_phy_device(const struct device *dev)
+{
+ return container_of(to_mdio_device(dev), struct phy_device, mdio);
+}
+
+/**
+ * struct phy_tdr_config - Configuration of a TDR raw test
+ *
+ * @first: Distance for first data collection point
+ * @last: Distance for last data collection point
+ * @step: Step between data collection points
+ * @pair: Bitmap of cable pairs to collect data for
+ *
+ * A structure containing possible configuration parameters
+ * for a TDR cable test. The driver does not need to implement
+ * all the parameters, but should report what is actually used.
+ * All distances are in centimeters.
+ */
+struct phy_tdr_config {
+ u32 first;
+ u32 last;
+ u32 step;
+ s8 pair;
+};
+#define PHY_PAIR_ALL -1
+
+/**
+ * struct phy_plca_cfg - Configuration of the PLCA (Physical Layer Collision
+ * Avoidance) Reconciliation Sublayer.
+ *
+ * @version: read-only PLCA register map version. -1 = not available. Ignored
+ * when setting the configuration. Format is the same as reported by the PLCA
+ * IDVER register (31.CA00). -1 = not available.
+ * @enabled: PLCA configured mode (enabled/disabled). -1 = not available / don't
+ * set. 0 = disabled, anything else = enabled.
+ * @node_id: the PLCA local node identifier. -1 = not available / don't set.
+ * Allowed values [0 .. 254]. 255 = node disabled.
+ * @node_cnt: the PLCA node count (maximum number of nodes having a TO). Only
+ * meaningful for the coordinator (node_id = 0). -1 = not available / don't
+ * set. Allowed values [1 .. 255].
+ * @to_tmr: The value of the PLCA to_timer in bit-times, which determines the
+ * PLCA transmit opportunity window opening. See IEEE802.3 Clause 148 for
+ * more details. The to_timer shall be set equal over all nodes.
+ * -1 = not available / don't set. Allowed values [0 .. 255].
+ * @burst_cnt: controls how many additional frames a node is allowed to send in
+ * single transmit opportunity (TO). The default value of 0 means that the
+ * node is allowed exactly one frame per TO. A value of 1 allows two frames
+ * per TO, and so on. -1 = not available / don't set.
+ * Allowed values [0 .. 255].
+ * @burst_tmr: controls how many bit times to wait for the MAC to send a new
+ * frame before interrupting the burst. This value should be set to a value
+ * greater than the MAC inter-packet gap (which is typically 96 bits).
+ * -1 = not available / don't set. Allowed values [0 .. 255].
+ *
+ * A structure containing configuration parameters for setting/getting the PLCA
+ * RS configuration. The driver does not need to implement all the parameters,
+ * but should report what is actually used.
+ */
+struct phy_plca_cfg {
+ int version;
+ int enabled;
+ int node_id;
+ int node_cnt;
+ int to_tmr;
+ int burst_cnt;
+ int burst_tmr;
+};
+
+/**
+ * struct phy_plca_status - Status of the PLCA (Physical Layer Collision
+ * Avoidance) Reconciliation Sublayer.
+ *
+ * @pst: The PLCA status as reported by the PST bit in the PLCA STATUS
+ * register(31.CA03), indicating BEACON activity.
*
- * driver_data: static driver data
- * phy_id: The result of reading the UID registers of this PHY
+ * A structure containing status information of the PLCA RS configuration.
+ * The driver does not need to implement all the parameters, but should report
+ * what is actually used.
+ */
+struct phy_plca_status {
+ bool pst;
+};
+
+/**
+ * struct phy_led: An LED driven by the PHY
+ *
+ * @list: List of LEDs
+ * @phydev: PHY this LED is attached to
+ * @led_cdev: Standard LED class structure
+ * @index: Number of the LED
+ */
+struct phy_led {
+ struct list_head list;
+ struct phy_device *phydev;
+ struct led_classdev led_cdev;
+ u8 index;
+};
+
+#define to_phy_led(d) container_of(d, struct phy_led, led_cdev)
+
+/**
+ * struct phy_driver - Driver structure for a particular PHY type
+ *
+ * @mdiodrv: Data common to all MDIO devices
+ * @phy_id: The result of reading the UID registers of this PHY
* type, and ANDing them with the phy_id_mask. This driver
* only works for PHYs with IDs which match this field
- * name: The friendly name of this PHY type
- * phy_id_mask: Defines the important bits of the phy_id
- * features: A list of features (speed, duplex, etc) supported
- * by this PHY
- * flags: A bitfield defining certain other features this PHY
+ * @name: The friendly name of this PHY type
+ * @phy_id_mask: Defines the important bits of the phy_id
+ * @features: A mandatory list of features (speed, duplex, etc)
+ * supported by this PHY
+ * @flags: A bitfield defining certain other features this PHY
* supports (like interrupts)
+ * @driver_data: Static driver data
*
- * The drivers must implement config_aneg and read_status. All
- * other functions are optional. Note that none of these
- * functions should be called from interrupt time. The goal is
- * for the bus read/write functions to be able to block when the
- * bus transaction is happening, and be freed up by an interrupt
- * (The MPC85xx has this ability, though it is not currently
- * supported in the driver).
+ * All functions are optional. If config_aneg or read_status
+ * are not implemented, the phy core uses the genphy versions.
+ * Note that none of these functions should be called from
+ * interrupt time. The goal is for the bus read/write functions
+ * to be able to block when the bus transaction is happening,
+ * and be freed up by an interrupt (The MPC85xx has this ability,
+ * though it is not currently supported in the driver).
*/
struct phy_driver {
struct mdio_driver_common mdiodrv;
u32 phy_id;
char *name;
- unsigned int phy_id_mask;
- u32 features;
+ u32 phy_id_mask;
+ const unsigned long * const features;
u32 flags;
const void *driver_data;
- /*
- * Called to issue a PHY software reset
+ /**
+ * @soft_reset: Called to issue a PHY software reset
*/
int (*soft_reset)(struct phy_device *phydev);
- /*
- * Called to initialize the PHY,
+ /**
+ * @config_init: Called to initialize the PHY,
* including after a reset
*/
int (*config_init)(struct phy_device *phydev);
- /*
- * Called during discovery. Used to set
+ /**
+ * @probe: Called during discovery. Used to set
* up device-specific structures, if any
*/
int (*probe)(struct phy_device *phydev);
+ /**
+ * @get_features: Probe the hardware to determine what
+ * abilities it has. Should only set phydev->supported.
+ */
+ int (*get_features)(struct phy_device *phydev);
+
+ /**
+ * @get_rate_matching: Get the supported type of rate matching for a
+ * particular phy interface. This is used by phy consumers to determine
+ * whether to advertise lower-speed modes for that interface. It is
+ * assumed that if a rate matching mode is supported on an interface,
+ * then that interface's rate can be adapted to all slower link speeds
+ * supported by the phy. If the interface is not supported, this should
+ * return %RATE_MATCH_NONE.
+ */
+ int (*get_rate_matching)(struct phy_device *phydev,
+ phy_interface_t iface);
+
/* PHY Power Management */
+ /** @suspend: Suspend the hardware, saving state if needed */
int (*suspend)(struct phy_device *phydev);
+ /** @resume: Resume the hardware, restoring state if needed */
int (*resume)(struct phy_device *phydev);
- /*
- * Configures the advertisement and resets
+ /**
+ * @config_aneg: Configures the advertisement and resets
* autonegotiation if phydev->autoneg is on,
* forces the speed to the current settings in phydev
* if phydev->autoneg is off
*/
int (*config_aneg)(struct phy_device *phydev);
- /* Determines the auto negotiation result */
+ /** @aneg_done: Determines the auto negotiation result */
int (*aneg_done)(struct phy_device *phydev);
- /* Determines the negotiated speed and duplex */
+ /** @read_status: Determines the negotiated speed and duplex */
int (*read_status)(struct phy_device *phydev);
- /* Clears any pending interrupts */
- int (*ack_interrupt)(struct phy_device *phydev);
-
- /* Enables or disables interrupts */
+ /**
+ * @config_intr: Enables or disables interrupts.
+ * It should also clear any pending interrupts prior to enabling the
+ * IRQs and after disabling them.
+ */
int (*config_intr)(struct phy_device *phydev);
- /*
- * Checks if the PHY generated an interrupt.
- * For multi-PHY devices with shared PHY interrupt pin
- */
- int (*did_interrupt)(struct phy_device *phydev);
+ /** @handle_interrupt: Override default interrupt handling */
+ irqreturn_t (*handle_interrupt)(struct phy_device *phydev);
- /* Clears up any memory if needed */
+ /** @remove: Clears up any memory if needed */
void (*remove)(struct phy_device *phydev);
- /* Returns true if this is a suitable driver for the given
- * phydev. If NULL, matching is based on phy_id and
- * phy_id_mask.
+ /**
+ * @match_phy_device: Returns true if this is a suitable
+ * driver for the given phydev. If NULL, matching is based on
+ * phy_id and phy_id_mask.
*/
int (*match_phy_device)(struct phy_device *phydev);
- /* Handles ethtool queries for hardware time stamping. */
- int (*ts_info)(struct phy_device *phydev, struct ethtool_ts_info *ti);
-
- /* Handles SIOCSHWTSTAMP ioctl for hardware time stamping. */
- int (*hwtstamp)(struct phy_device *phydev, struct ifreq *ifr);
-
- /*
- * Requests a Rx timestamp for 'skb'. If the skb is accepted,
- * the phy driver promises to deliver it using netif_rx() as
- * soon as a timestamp becomes available. One of the
- * PTP_CLASS_ values is passed in 'type'. The function must
- * return true if the skb is accepted for delivery.
- */
- bool (*rxtstamp)(struct phy_device *dev, struct sk_buff *skb, int type);
-
- /*
- * Requests a Tx timestamp for 'skb'. The phy driver promises
- * to deliver it using skb_complete_tx_timestamp() as soon as a
- * timestamp becomes available. One of the PTP_CLASS_ values
- * is passed in 'type'.
+ /**
+ * @set_wol: Some devices (e.g. qnap TS-119P II) require PHY
+ * register changes to enable Wake on LAN, so set_wol is
+ * provided to be called in the ethernet driver's set_wol
+ * function.
*/
- void (*txtstamp)(struct phy_device *dev, struct sk_buff *skb, int type);
-
- /* Some devices (e.g. qnap TS-119P II) require PHY register changes to
- * enable Wake on LAN, so set_wol is provided to be called in the
- * ethernet driver's set_wol function. */
int (*set_wol)(struct phy_device *dev, struct ethtool_wolinfo *wol);
- /* See set_wol, but for checking whether Wake on LAN is enabled. */
+ /**
+ * @get_wol: See set_wol, but for checking whether Wake on LAN
+ * is enabled.
+ */
void (*get_wol)(struct phy_device *dev, struct ethtool_wolinfo *wol);
- /*
- * Called to inform a PHY device driver when the core is about to
- * change the link state. This callback is supposed to be used as
- * fixup hook for drivers that need to take action when the link
- * state changes. Drivers are by no means allowed to mess with the
+ /**
+ * @link_change_notify: Called to inform a PHY device driver
+ * when the core is about to change the link state. This
+ * callback is supposed to be used as fixup hook for drivers
+ * that need to take action when the link state
+ * changes. Drivers are by no means allowed to mess with the
* PHY device structure in their implementations.
*/
void (*link_change_notify)(struct phy_device *dev);
- /*
- * Phy specific driver override for reading a MMD register.
- * This function is optional for PHY specific drivers. When
- * not provided, the default MMD read function will be used
- * by phy_read_mmd(), which will use either a direct read for
- * Clause 45 PHYs or an indirect read for Clause 22 PHYs.
- * devnum is the MMD device number within the PHY device,
- * regnum is the register within the selected MMD device.
+ /**
+ * @read_mmd: PHY specific driver override for reading a MMD
+ * register. This function is optional for PHY specific
+ * drivers. When not provided, the default MMD read function
+ * will be used by phy_read_mmd(), which will use either a
+ * direct read for Clause 45 PHYs or an indirect read for
+ * Clause 22 PHYs. devnum is the MMD device number within the
+ * PHY device, regnum is the register within the selected MMD
+ * device.
*/
int (*read_mmd)(struct phy_device *dev, int devnum, u16 regnum);
- /*
- * Phy specific driver override for writing a MMD register.
- * This function is optional for PHY specific drivers. When
- * not provided, the default MMD write function will be used
- * by phy_write_mmd(), which will use either a direct write for
- * Clause 45 PHYs, or an indirect write for Clause 22 PHYs.
- * devnum is the MMD device number within the PHY device,
- * regnum is the register within the selected MMD device.
- * val is the value to be written.
+ /**
+ * @write_mmd: PHY specific driver override for writing a MMD
+ * register. This function is optional for PHY specific
+ * drivers. When not provided, the default MMD write function
+ * will be used by phy_write_mmd(), which will use either a
+ * direct write for Clause 45 PHYs, or an indirect write for
+ * Clause 22 PHYs. devnum is the MMD device number within the
+ * PHY device, regnum is the register within the selected MMD
+ * device. val is the value to be written.
*/
int (*write_mmd)(struct phy_device *dev, int devnum, u16 regnum,
u16 val);
- /* Get the size and type of the eeprom contained within a plug-in
- * module */
+ /** @read_page: Return the current PHY register page number */
+ int (*read_page)(struct phy_device *dev);
+ /** @write_page: Set the current PHY register page number */
+ int (*write_page)(struct phy_device *dev, int page);
+
+ /**
+ * @module_info: Get the size and type of the eeprom contained
+ * within a plug-in module
+ */
int (*module_info)(struct phy_device *dev,
struct ethtool_modinfo *modinfo);
- /* Get the eeprom information from the plug-in module */
+ /**
+ * @module_eeprom: Get the eeprom information from the plug-in
+ * module
+ */
int (*module_eeprom)(struct phy_device *dev,
struct ethtool_eeprom *ee, u8 *data);
- /* Get statistics from the phy using ethtool */
+ /** @cable_test_start: Start a cable test */
+ int (*cable_test_start)(struct phy_device *dev);
+
+ /** @cable_test_tdr_start: Start a raw TDR cable test */
+ int (*cable_test_tdr_start)(struct phy_device *dev,
+ const struct phy_tdr_config *config);
+
+ /**
+ * @cable_test_get_status: Once per second, or on interrupt,
+ * request the status of the test.
+ */
+ int (*cable_test_get_status)(struct phy_device *dev, bool *finished);
+
+ /* Get statistics from the PHY using ethtool */
+ /** @get_sset_count: Number of statistic counters */
int (*get_sset_count)(struct phy_device *dev);
+ /** @get_strings: Names of the statistic counters */
void (*get_strings)(struct phy_device *dev, u8 *data);
+ /** @get_stats: Return the statistic counter values */
void (*get_stats)(struct phy_device *dev,
struct ethtool_stats *stats, u64 *data);
/* Get and Set PHY tunables */
+ /** @get_tunable: Return the value of a tunable */
int (*get_tunable)(struct phy_device *dev,
struct ethtool_tunable *tuna, void *data);
+ /** @set_tunable: Set the value of a tunable */
int (*set_tunable)(struct phy_device *dev,
struct ethtool_tunable *tuna,
const void *data);
+ /** @set_loopback: Set the loopback mood of the PHY */
int (*set_loopback)(struct phy_device *dev, bool enable);
+ /** @get_sqi: Get the signal quality indication */
+ int (*get_sqi)(struct phy_device *dev);
+ /** @get_sqi_max: Get the maximum signal quality indication */
+ int (*get_sqi_max)(struct phy_device *dev);
+
+ /* PLCA RS interface */
+ /** @get_plca_cfg: Return the current PLCA configuration */
+ int (*get_plca_cfg)(struct phy_device *dev,
+ struct phy_plca_cfg *plca_cfg);
+ /** @set_plca_cfg: Set the PLCA configuration */
+ int (*set_plca_cfg)(struct phy_device *dev,
+ const struct phy_plca_cfg *plca_cfg);
+ /** @get_plca_status: Return the current PLCA status info */
+ int (*get_plca_status)(struct phy_device *dev,
+ struct phy_plca_status *plca_st);
+
+ /**
+ * @led_brightness_set: Set a PHY LED brightness. Index
+ * indicates which of the PHYs led should be set. Value
+ * follows the standard LED class meaning, e.g. LED_OFF,
+ * LED_HALF, LED_FULL.
+ */
+ int (*led_brightness_set)(struct phy_device *dev,
+ u8 index, enum led_brightness value);
+
+ /**
+ * @led_blink_set: Set a PHY LED brightness. Index indicates
+ * which of the PHYs led should be configured to blink. Delays
+ * are in milliseconds and if both are zero then a sensible
+ * default should be chosen. The call should adjust the
+ * timings in that case and if it can't match the values
+ * specified exactly.
+ */
+ int (*led_blink_set)(struct phy_device *dev, u8 index,
+ unsigned long *delay_on,
+ unsigned long *delay_off);
};
#define to_phy_driver(d) container_of(to_mdio_common_driver(d), \
struct phy_driver, mdiodrv)
@@ -658,6 +1104,10 @@ struct phy_driver {
#define PHY_ANY_ID "MATCH ANY PHY"
#define PHY_ANY_UID 0xffffffff
+#define PHY_ID_MATCH_EXACT(id) .phy_id = (id), .phy_id_mask = GENMASK(31, 0)
+#define PHY_ID_MATCH_MODEL(id) .phy_id = (id), .phy_id_mask = GENMASK(31, 4)
+#define PHY_ID_MATCH_VENDOR(id) .phy_id = (id), .phy_id_mask = GENMASK(31, 10)
+
/* A Structure for boards to register fixups with the PHY Lib */
struct phy_fixup {
struct list_head list;
@@ -667,16 +1117,42 @@ struct phy_fixup {
int (*run)(struct phy_device *phydev);
};
+const char *phy_speed_to_str(int speed);
+const char *phy_duplex_to_str(unsigned int duplex);
+const char *phy_rate_matching_to_str(int rate_matching);
+
+int phy_interface_num_ports(phy_interface_t interface);
+
+/* A structure for mapping a particular speed and duplex
+ * combination to a particular SUPPORTED and ADVERTISED value
+ */
+struct phy_setting {
+ u32 speed;
+ u8 duplex;
+ u8 bit;
+};
+
+const struct phy_setting *
+phy_lookup_setting(int speed, int duplex, const unsigned long *mask,
+ bool exact);
+size_t phy_speeds(unsigned int *speeds, size_t size,
+ unsigned long *mask);
+void of_set_phy_supported(struct phy_device *phydev);
+void of_set_phy_eee_broken(struct phy_device *phydev);
+int phy_speed_down_core(struct phy_device *phydev);
+
/**
- * phy_read_mmd - Convenience function for reading a register
- * from an MMD on a given PHY.
+ * phy_is_started - Convenience function to check whether PHY is started
* @phydev: The phy_device struct
- * @devad: The MMD to read from
- * @regnum: The register on the MMD to read
- *
- * Same rules as for phy_read();
*/
-int phy_read_mmd(struct phy_device *phydev, int devad, u32 regnum);
+static inline bool phy_is_started(struct phy_device *phydev)
+{
+ return phydev->state >= PHY_UP;
+}
+
+void phy_resolve_aneg_pause(struct phy_device *phydev);
+void phy_resolve_aneg_linkmode(struct phy_device *phydev);
+void phy_check_downshift(struct phy_device *phydev);
/**
* phy_read - Convenience function for reading a given PHY register
@@ -692,6 +1168,30 @@ static inline int phy_read(struct phy_device *phydev, u32 regnum)
return mdiobus_read(phydev->mdio.bus, phydev->mdio.addr, regnum);
}
+#define phy_read_poll_timeout(phydev, regnum, val, cond, sleep_us, \
+ timeout_us, sleep_before_read) \
+({ \
+ int __ret = read_poll_timeout(phy_read, val, val < 0 || (cond), \
+ sleep_us, timeout_us, sleep_before_read, phydev, regnum); \
+ if (val < 0) \
+ __ret = val; \
+ if (__ret) \
+ phydev_err(phydev, "%s failed: %d\n", __func__, __ret); \
+ __ret; \
+})
+
+/**
+ * __phy_read - convenience function for reading a given PHY register
+ * @phydev: the phy_device struct
+ * @regnum: register number to read
+ *
+ * The caller must have taken the MDIO bus lock.
+ */
+static inline int __phy_read(struct phy_device *phydev, u32 regnum)
+{
+ return __mdiobus_read(phydev->mdio.bus, phydev->mdio.addr, regnum);
+}
+
/**
* phy_write - Convenience function for writing a given PHY register
* @phydev: the phy_device struct
@@ -708,15 +1208,302 @@ static inline int phy_write(struct phy_device *phydev, u32 regnum, u16 val)
}
/**
+ * __phy_write - Convenience function for writing a given PHY register
+ * @phydev: the phy_device struct
+ * @regnum: register number to write
+ * @val: value to write to @regnum
+ *
+ * The caller must have taken the MDIO bus lock.
+ */
+static inline int __phy_write(struct phy_device *phydev, u32 regnum, u16 val)
+{
+ return __mdiobus_write(phydev->mdio.bus, phydev->mdio.addr, regnum,
+ val);
+}
+
+/**
+ * __phy_modify_changed() - Convenience function for modifying a PHY register
+ * @phydev: a pointer to a &struct phy_device
+ * @regnum: register number
+ * @mask: bit mask of bits to clear
+ * @set: bit mask of bits to set
+ *
+ * Unlocked helper function which allows a PHY register to be modified as
+ * new register value = (old register value & ~mask) | set
+ *
+ * Returns negative errno, 0 if there was no change, and 1 in case of change
+ */
+static inline int __phy_modify_changed(struct phy_device *phydev, u32 regnum,
+ u16 mask, u16 set)
+{
+ return __mdiobus_modify_changed(phydev->mdio.bus, phydev->mdio.addr,
+ regnum, mask, set);
+}
+
+/*
+ * phy_read_mmd - Convenience function for reading a register
+ * from an MMD on a given PHY.
+ */
+int phy_read_mmd(struct phy_device *phydev, int devad, u32 regnum);
+
+/**
+ * phy_read_mmd_poll_timeout - Periodically poll a PHY register until a
+ * condition is met or a timeout occurs
+ *
+ * @phydev: The phy_device struct
+ * @devaddr: The MMD to read from
+ * @regnum: The register on the MMD to read
+ * @val: Variable to read the register into
+ * @cond: Break condition (usually involving @val)
+ * @sleep_us: Maximum time to sleep between reads in us (0
+ * tight-loops). Should be less than ~20ms since usleep_range
+ * is used (see Documentation/timers/timers-howto.rst).
+ * @timeout_us: Timeout in us, 0 means never timeout
+ * @sleep_before_read: if it is true, sleep @sleep_us before read.
+ * Returns 0 on success and -ETIMEDOUT upon a timeout. In either
+ * case, the last read value at @args is stored in @val. Must not
+ * be called from atomic context if sleep_us or timeout_us are used.
+ */
+#define phy_read_mmd_poll_timeout(phydev, devaddr, regnum, val, cond, \
+ sleep_us, timeout_us, sleep_before_read) \
+({ \
+ int __ret = read_poll_timeout(phy_read_mmd, val, (cond) || val < 0, \
+ sleep_us, timeout_us, sleep_before_read, \
+ phydev, devaddr, regnum); \
+ if (val < 0) \
+ __ret = val; \
+ if (__ret) \
+ phydev_err(phydev, "%s failed: %d\n", __func__, __ret); \
+ __ret; \
+})
+
+/*
+ * __phy_read_mmd - Convenience function for reading a register
+ * from an MMD on a given PHY.
+ */
+int __phy_read_mmd(struct phy_device *phydev, int devad, u32 regnum);
+
+/*
+ * phy_write_mmd - Convenience function for writing a register
+ * on an MMD on a given PHY.
+ */
+int phy_write_mmd(struct phy_device *phydev, int devad, u32 regnum, u16 val);
+
+/*
+ * __phy_write_mmd - Convenience function for writing a register
+ * on an MMD on a given PHY.
+ */
+int __phy_write_mmd(struct phy_device *phydev, int devad, u32 regnum, u16 val);
+
+int __phy_modify_changed(struct phy_device *phydev, u32 regnum, u16 mask,
+ u16 set);
+int phy_modify_changed(struct phy_device *phydev, u32 regnum, u16 mask,
+ u16 set);
+int __phy_modify(struct phy_device *phydev, u32 regnum, u16 mask, u16 set);
+int phy_modify(struct phy_device *phydev, u32 regnum, u16 mask, u16 set);
+
+int __phy_modify_mmd_changed(struct phy_device *phydev, int devad, u32 regnum,
+ u16 mask, u16 set);
+int phy_modify_mmd_changed(struct phy_device *phydev, int devad, u32 regnum,
+ u16 mask, u16 set);
+int __phy_modify_mmd(struct phy_device *phydev, int devad, u32 regnum,
+ u16 mask, u16 set);
+int phy_modify_mmd(struct phy_device *phydev, int devad, u32 regnum,
+ u16 mask, u16 set);
+
+/**
+ * __phy_set_bits - Convenience function for setting bits in a PHY register
+ * @phydev: the phy_device struct
+ * @regnum: register number to write
+ * @val: bits to set
+ *
+ * The caller must have taken the MDIO bus lock.
+ */
+static inline int __phy_set_bits(struct phy_device *phydev, u32 regnum, u16 val)
+{
+ return __phy_modify(phydev, regnum, 0, val);
+}
+
+/**
+ * __phy_clear_bits - Convenience function for clearing bits in a PHY register
+ * @phydev: the phy_device struct
+ * @regnum: register number to write
+ * @val: bits to clear
+ *
+ * The caller must have taken the MDIO bus lock.
+ */
+static inline int __phy_clear_bits(struct phy_device *phydev, u32 regnum,
+ u16 val)
+{
+ return __phy_modify(phydev, regnum, val, 0);
+}
+
+/**
+ * phy_set_bits - Convenience function for setting bits in a PHY register
+ * @phydev: the phy_device struct
+ * @regnum: register number to write
+ * @val: bits to set
+ */
+static inline int phy_set_bits(struct phy_device *phydev, u32 regnum, u16 val)
+{
+ return phy_modify(phydev, regnum, 0, val);
+}
+
+/**
+ * phy_clear_bits - Convenience function for clearing bits in a PHY register
+ * @phydev: the phy_device struct
+ * @regnum: register number to write
+ * @val: bits to clear
+ */
+static inline int phy_clear_bits(struct phy_device *phydev, u32 regnum, u16 val)
+{
+ return phy_modify(phydev, regnum, val, 0);
+}
+
+/**
+ * __phy_set_bits_mmd - Convenience function for setting bits in a register
+ * on MMD
+ * @phydev: the phy_device struct
+ * @devad: the MMD containing register to modify
+ * @regnum: register number to modify
+ * @val: bits to set
+ *
+ * The caller must have taken the MDIO bus lock.
+ */
+static inline int __phy_set_bits_mmd(struct phy_device *phydev, int devad,
+ u32 regnum, u16 val)
+{
+ return __phy_modify_mmd(phydev, devad, regnum, 0, val);
+}
+
+/**
+ * __phy_clear_bits_mmd - Convenience function for clearing bits in a register
+ * on MMD
+ * @phydev: the phy_device struct
+ * @devad: the MMD containing register to modify
+ * @regnum: register number to modify
+ * @val: bits to clear
+ *
+ * The caller must have taken the MDIO bus lock.
+ */
+static inline int __phy_clear_bits_mmd(struct phy_device *phydev, int devad,
+ u32 regnum, u16 val)
+{
+ return __phy_modify_mmd(phydev, devad, regnum, val, 0);
+}
+
+/**
+ * phy_set_bits_mmd - Convenience function for setting bits in a register
+ * on MMD
+ * @phydev: the phy_device struct
+ * @devad: the MMD containing register to modify
+ * @regnum: register number to modify
+ * @val: bits to set
+ */
+static inline int phy_set_bits_mmd(struct phy_device *phydev, int devad,
+ u32 regnum, u16 val)
+{
+ return phy_modify_mmd(phydev, devad, regnum, 0, val);
+}
+
+/**
+ * phy_clear_bits_mmd - Convenience function for clearing bits in a register
+ * on MMD
+ * @phydev: the phy_device struct
+ * @devad: the MMD containing register to modify
+ * @regnum: register number to modify
+ * @val: bits to clear
+ */
+static inline int phy_clear_bits_mmd(struct phy_device *phydev, int devad,
+ u32 regnum, u16 val)
+{
+ return phy_modify_mmd(phydev, devad, regnum, val, 0);
+}
+
+/**
* phy_interrupt_is_valid - Convenience function for testing a given PHY irq
* @phydev: the phy_device struct
*
* NOTE: must be kept in sync with addition/removal of PHY_POLL and
- * PHY_IGNORE_INTERRUPT
+ * PHY_MAC_INTERRUPT
*/
static inline bool phy_interrupt_is_valid(struct phy_device *phydev)
{
- return phydev->irq != PHY_POLL && phydev->irq != PHY_IGNORE_INTERRUPT;
+ return phydev->irq != PHY_POLL && phydev->irq != PHY_MAC_INTERRUPT;
+}
+
+/**
+ * phy_polling_mode - Convenience function for testing whether polling is
+ * used to detect PHY status changes
+ * @phydev: the phy_device struct
+ */
+static inline bool phy_polling_mode(struct phy_device *phydev)
+{
+ if (phydev->state == PHY_CABLETEST)
+ if (phydev->drv->flags & PHY_POLL_CABLE_TEST)
+ return true;
+
+ return phydev->irq == PHY_POLL;
+}
+
+/**
+ * phy_has_hwtstamp - Tests whether a PHY time stamp configuration.
+ * @phydev: the phy_device struct
+ */
+static inline bool phy_has_hwtstamp(struct phy_device *phydev)
+{
+ return phydev && phydev->mii_ts && phydev->mii_ts->hwtstamp;
+}
+
+/**
+ * phy_has_rxtstamp - Tests whether a PHY supports receive time stamping.
+ * @phydev: the phy_device struct
+ */
+static inline bool phy_has_rxtstamp(struct phy_device *phydev)
+{
+ return phydev && phydev->mii_ts && phydev->mii_ts->rxtstamp;
+}
+
+/**
+ * phy_has_tsinfo - Tests whether a PHY reports time stamping and/or
+ * PTP hardware clock capabilities.
+ * @phydev: the phy_device struct
+ */
+static inline bool phy_has_tsinfo(struct phy_device *phydev)
+{
+ return phydev && phydev->mii_ts && phydev->mii_ts->ts_info;
+}
+
+/**
+ * phy_has_txtstamp - Tests whether a PHY supports transmit time stamping.
+ * @phydev: the phy_device struct
+ */
+static inline bool phy_has_txtstamp(struct phy_device *phydev)
+{
+ return phydev && phydev->mii_ts && phydev->mii_ts->txtstamp;
+}
+
+static inline int phy_hwtstamp(struct phy_device *phydev, struct ifreq *ifr)
+{
+ return phydev->mii_ts->hwtstamp(phydev->mii_ts, ifr);
+}
+
+static inline bool phy_rxtstamp(struct phy_device *phydev, struct sk_buff *skb,
+ int type)
+{
+ return phydev->mii_ts->rxtstamp(phydev->mii_ts, skb, type);
+}
+
+static inline int phy_ts_info(struct phy_device *phydev,
+ struct ethtool_ts_info *tsinfo)
+{
+ return phydev->mii_ts->ts_info(phydev->mii_ts, tsinfo);
+}
+
+static inline void phy_txtstamp(struct phy_device *phydev, struct sk_buff *skb,
+ int type)
+{
+ phydev->mii_ts->txtstamp(phydev->mii_ts, skb, type);
}
/**
@@ -729,9 +1516,18 @@ static inline bool phy_is_internal(struct phy_device *phydev)
}
/**
+ * phy_on_sfp - Convenience function for testing if a PHY is on an SFP module
+ * @phydev: the phy_device struct
+ */
+static inline bool phy_on_sfp(struct phy_device *phydev)
+{
+ return phydev->is_on_sfp_module;
+}
+
+/**
* phy_interface_mode_is_rgmii - Convenience function for testing if a
* PHY interface mode is RGMII (all variants)
- * @mode: the phy_interface_t enum
+ * @mode: the &phy_interface_t enum
*/
static inline bool phy_interface_mode_is_rgmii(phy_interface_t mode)
{
@@ -740,6 +1536,20 @@ static inline bool phy_interface_mode_is_rgmii(phy_interface_t mode)
};
/**
+ * phy_interface_mode_is_8023z() - does the PHY interface mode use 802.3z
+ * negotiation
+ * @mode: one of &enum phy_interface_t
+ *
+ * Returns true if the PHY interface mode uses the 16-bit negotiation
+ * word as defined in 802.3z. (See 802.3-2015 37.2.1 Config_Reg encoding)
+ */
+static inline bool phy_interface_mode_is_8023z(phy_interface_t mode)
+{
+ return mode == PHY_INTERFACE_MODE_1000BASEX ||
+ mode == PHY_INTERFACE_MODE_2500BASEX;
+}
+
+/**
* phy_interface_is_rgmii - Convenience function for testing if a PHY interface
* is RGMII (all variants)
* @phydev: the phy_device struct
@@ -749,7 +1559,7 @@ static inline bool phy_interface_is_rgmii(struct phy_device *phydev)
return phy_interface_mode_is_rgmii(phydev->interface);
};
-/*
+/**
* phy_is_pseudo_fixed_link - Convenience function for testing if this
* PHY is the CPU port facing side of an Ethernet switch, or similar.
* @phydev: the phy_device struct
@@ -759,26 +1569,56 @@ static inline bool phy_is_pseudo_fixed_link(struct phy_device *phydev)
return phydev->is_pseudo_fixed_link;
}
-/**
- * phy_write_mmd - Convenience function for writing a register
- * on an MMD on a given PHY.
- * @phydev: The phy_device struct
- * @devad: The MMD to read from
- * @regnum: The register on the MMD to read
- * @val: value to write to @regnum
- *
- * Same rules as for phy_write();
- */
-int phy_write_mmd(struct phy_device *phydev, int devad, u32 regnum, u16 val);
-
-struct phy_device *phy_device_create(struct mii_bus *bus, int addr, int phy_id,
+int phy_save_page(struct phy_device *phydev);
+int phy_select_page(struct phy_device *phydev, int page);
+int phy_restore_page(struct phy_device *phydev, int oldpage, int ret);
+int phy_read_paged(struct phy_device *phydev, int page, u32 regnum);
+int phy_write_paged(struct phy_device *phydev, int page, u32 regnum, u16 val);
+int phy_modify_paged_changed(struct phy_device *phydev, int page, u32 regnum,
+ u16 mask, u16 set);
+int phy_modify_paged(struct phy_device *phydev, int page, u32 regnum,
+ u16 mask, u16 set);
+
+struct phy_device *phy_device_create(struct mii_bus *bus, int addr, u32 phy_id,
bool is_c45,
struct phy_c45_device_ids *c45_ids);
#if IS_ENABLED(CONFIG_PHYLIB)
+int fwnode_get_phy_id(struct fwnode_handle *fwnode, u32 *phy_id);
+struct mdio_device *fwnode_mdio_find_device(struct fwnode_handle *fwnode);
+struct phy_device *fwnode_phy_find_device(struct fwnode_handle *phy_fwnode);
+struct phy_device *device_phy_find_device(struct device *dev);
+struct fwnode_handle *fwnode_get_phy_node(const struct fwnode_handle *fwnode);
struct phy_device *get_phy_device(struct mii_bus *bus, int addr, bool is_c45);
int phy_device_register(struct phy_device *phy);
void phy_device_free(struct phy_device *phydev);
#else
+static inline int fwnode_get_phy_id(struct fwnode_handle *fwnode, u32 *phy_id)
+{
+ return 0;
+}
+static inline
+struct mdio_device *fwnode_mdio_find_device(struct fwnode_handle *fwnode)
+{
+ return 0;
+}
+
+static inline
+struct phy_device *fwnode_phy_find_device(struct fwnode_handle *phy_fwnode)
+{
+ return NULL;
+}
+
+static inline struct phy_device *device_phy_find_device(struct device *dev)
+{
+ return NULL;
+}
+
+static inline
+struct fwnode_handle *fwnode_get_phy_node(struct fwnode_handle *fwnode)
+{
+ return NULL;
+}
+
static inline
struct phy_device *get_phy_device(struct mii_bus *bus, int addr, bool is_c45)
{
@@ -793,10 +1633,16 @@ static inline int phy_device_register(struct phy_device *phy)
static inline void phy_device_free(struct phy_device *phydev) { }
#endif /* CONFIG_PHYLIB */
void phy_device_remove(struct phy_device *phydev);
+int phy_get_c45_ids(struct phy_device *phydev);
int phy_init_hw(struct phy_device *phydev);
int phy_suspend(struct phy_device *phydev);
int phy_resume(struct phy_device *phydev);
+int __phy_resume(struct phy_device *phydev);
int phy_loopback(struct phy_device *phydev, bool enable);
+void phy_sfp_attach(void *upstream, struct sfp_bus *bus);
+void phy_sfp_detach(void *upstream, struct sfp_bus *bus);
+int phy_sfp_probe(struct phy_device *phydev,
+ const struct sfp_upstream_ops *ops);
struct phy_device *phy_attach(struct net_device *dev, const char *bus_id,
phy_interface_t interface);
struct phy_device *phy_find_first(struct mii_bus *bus);
@@ -812,82 +1658,219 @@ void phy_disconnect(struct phy_device *phydev);
void phy_detach(struct phy_device *phydev);
void phy_start(struct phy_device *phydev);
void phy_stop(struct phy_device *phydev);
+int phy_config_aneg(struct phy_device *phydev);
int phy_start_aneg(struct phy_device *phydev);
int phy_aneg_done(struct phy_device *phydev);
+int phy_speed_down(struct phy_device *phydev, bool sync);
+int phy_speed_up(struct phy_device *phydev);
+bool phy_check_valid(int speed, int duplex, unsigned long *features);
-int phy_stop_interrupts(struct phy_device *phydev);
int phy_restart_aneg(struct phy_device *phydev);
+int phy_reset_after_clk_enable(struct phy_device *phydev);
-static inline int phy_read_status(struct phy_device *phydev)
+#if IS_ENABLED(CONFIG_PHYLIB)
+int phy_start_cable_test(struct phy_device *phydev,
+ struct netlink_ext_ack *extack);
+int phy_start_cable_test_tdr(struct phy_device *phydev,
+ struct netlink_ext_ack *extack,
+ const struct phy_tdr_config *config);
+#else
+static inline
+int phy_start_cable_test(struct phy_device *phydev,
+ struct netlink_ext_ack *extack)
{
- if (!phydev->drv)
- return -EIO;
+ NL_SET_ERR_MSG(extack, "Kernel not compiled with PHYLIB support");
+ return -EOPNOTSUPP;
+}
+static inline
+int phy_start_cable_test_tdr(struct phy_device *phydev,
+ struct netlink_ext_ack *extack,
+ const struct phy_tdr_config *config)
+{
+ NL_SET_ERR_MSG(extack, "Kernel not compiled with PHYLIB support");
+ return -EOPNOTSUPP;
+}
+#endif
- return phydev->drv->read_status(phydev);
+int phy_cable_test_result(struct phy_device *phydev, u8 pair, u16 result);
+int phy_cable_test_fault_length(struct phy_device *phydev, u8 pair,
+ u16 cm);
+
+static inline void phy_device_reset(struct phy_device *phydev, int value)
+{
+ mdio_device_reset(&phydev->mdio, value);
}
#define phydev_err(_phydev, format, args...) \
dev_err(&_phydev->mdio.dev, format, ##args)
+#define phydev_err_probe(_phydev, err, format, args...) \
+ dev_err_probe(&_phydev->mdio.dev, err, format, ##args)
+
+#define phydev_info(_phydev, format, args...) \
+ dev_info(&_phydev->mdio.dev, format, ##args)
+
+#define phydev_warn(_phydev, format, args...) \
+ dev_warn(&_phydev->mdio.dev, format, ##args)
+
#define phydev_dbg(_phydev, format, args...) \
- dev_dbg(&_phydev->mdio.dev, format, ##args);
+ dev_dbg(&_phydev->mdio.dev, format, ##args)
static inline const char *phydev_name(const struct phy_device *phydev)
{
return dev_name(&phydev->mdio.dev);
}
+static inline void phy_lock_mdio_bus(struct phy_device *phydev)
+{
+ mutex_lock(&phydev->mdio.bus->mdio_lock);
+}
+
+static inline void phy_unlock_mdio_bus(struct phy_device *phydev)
+{
+ mutex_unlock(&phydev->mdio.bus->mdio_lock);
+}
+
void phy_attached_print(struct phy_device *phydev, const char *fmt, ...)
__printf(2, 3);
+char *phy_attached_info_irq(struct phy_device *phydev)
+ __malloc;
void phy_attached_info(struct phy_device *phydev);
/* Clause 22 PHY */
-int genphy_config_init(struct phy_device *phydev);
+int genphy_read_abilities(struct phy_device *phydev);
int genphy_setup_forced(struct phy_device *phydev);
int genphy_restart_aneg(struct phy_device *phydev);
-int genphy_config_aneg(struct phy_device *phydev);
+int genphy_check_and_restart_aneg(struct phy_device *phydev, bool restart);
+int genphy_config_eee_advert(struct phy_device *phydev);
+int __genphy_config_aneg(struct phy_device *phydev, bool changed);
int genphy_aneg_done(struct phy_device *phydev);
int genphy_update_link(struct phy_device *phydev);
+int genphy_read_lpa(struct phy_device *phydev);
+int genphy_read_status_fixed(struct phy_device *phydev);
int genphy_read_status(struct phy_device *phydev);
+int genphy_read_master_slave(struct phy_device *phydev);
int genphy_suspend(struct phy_device *phydev);
int genphy_resume(struct phy_device *phydev);
int genphy_loopback(struct phy_device *phydev, bool enable);
int genphy_soft_reset(struct phy_device *phydev);
-static inline int genphy_no_soft_reset(struct phy_device *phydev)
+irqreturn_t genphy_handle_interrupt_no_ack(struct phy_device *phydev);
+
+static inline int genphy_config_aneg(struct phy_device *phydev)
+{
+ return __genphy_config_aneg(phydev, false);
+}
+
+static inline int genphy_no_config_intr(struct phy_device *phydev)
{
return 0;
}
+int genphy_read_mmd_unsupported(struct phy_device *phdev, int devad,
+ u16 regnum);
+int genphy_write_mmd_unsupported(struct phy_device *phdev, int devnum,
+ u16 regnum, u16 val);
+
+/* Clause 37 */
+int genphy_c37_config_aneg(struct phy_device *phydev);
+int genphy_c37_read_status(struct phy_device *phydev);
/* Clause 45 PHY */
int genphy_c45_restart_aneg(struct phy_device *phydev);
+int genphy_c45_check_and_restart_aneg(struct phy_device *phydev, bool restart);
int genphy_c45_aneg_done(struct phy_device *phydev);
-int genphy_c45_read_link(struct phy_device *phydev, u32 mmd_mask);
+int genphy_c45_read_link(struct phy_device *phydev);
int genphy_c45_read_lpa(struct phy_device *phydev);
int genphy_c45_read_pma(struct phy_device *phydev);
int genphy_c45_pma_setup_forced(struct phy_device *phydev);
+int genphy_c45_pma_baset1_setup_master_slave(struct phy_device *phydev);
+int genphy_c45_an_config_aneg(struct phy_device *phydev);
int genphy_c45_an_disable_aneg(struct phy_device *phydev);
+int genphy_c45_read_mdix(struct phy_device *phydev);
+int genphy_c45_pma_read_abilities(struct phy_device *phydev);
+int genphy_c45_read_eee_abilities(struct phy_device *phydev);
+int genphy_c45_pma_baset1_read_master_slave(struct phy_device *phydev);
+int genphy_c45_read_status(struct phy_device *phydev);
+int genphy_c45_baset1_read_status(struct phy_device *phydev);
+int genphy_c45_config_aneg(struct phy_device *phydev);
+int genphy_c45_loopback(struct phy_device *phydev, bool enable);
+int genphy_c45_pma_resume(struct phy_device *phydev);
+int genphy_c45_pma_suspend(struct phy_device *phydev);
+int genphy_c45_fast_retrain(struct phy_device *phydev, bool enable);
+int genphy_c45_plca_get_cfg(struct phy_device *phydev,
+ struct phy_plca_cfg *plca_cfg);
+int genphy_c45_plca_set_cfg(struct phy_device *phydev,
+ const struct phy_plca_cfg *plca_cfg);
+int genphy_c45_plca_get_status(struct phy_device *phydev,
+ struct phy_plca_status *plca_st);
+int genphy_c45_eee_is_active(struct phy_device *phydev, unsigned long *adv,
+ unsigned long *lp, bool *is_enabled);
+int genphy_c45_ethtool_get_eee(struct phy_device *phydev,
+ struct ethtool_eee *data);
+int genphy_c45_ethtool_set_eee(struct phy_device *phydev,
+ struct ethtool_eee *data);
+int genphy_c45_write_eee_adv(struct phy_device *phydev, unsigned long *adv);
+int genphy_c45_an_config_eee_aneg(struct phy_device *phydev);
+int genphy_c45_read_eee_adv(struct phy_device *phydev, unsigned long *adv);
+
+/* Generic C45 PHY driver */
+extern struct phy_driver genphy_c45_driver;
+
+/* The gen10g_* functions are the old Clause 45 stub */
+int gen10g_config_aneg(struct phy_device *phydev);
+
+static inline int phy_read_status(struct phy_device *phydev)
+{
+ if (!phydev->drv)
+ return -EIO;
+
+ if (phydev->drv->read_status)
+ return phydev->drv->read_status(phydev);
+ else
+ return genphy_read_status(phydev);
+}
void phy_driver_unregister(struct phy_driver *drv);
void phy_drivers_unregister(struct phy_driver *drv, int n);
int phy_driver_register(struct phy_driver *new_driver, struct module *owner);
int phy_drivers_register(struct phy_driver *new_driver, int n,
struct module *owner);
+void phy_error(struct phy_device *phydev);
void phy_state_machine(struct work_struct *work);
-void phy_change(struct phy_device *phydev);
-void phy_change_work(struct work_struct *work);
-void phy_mac_interrupt(struct phy_device *phydev, int new_link);
+void phy_queue_state_machine(struct phy_device *phydev, unsigned long jiffies);
+void phy_trigger_machine(struct phy_device *phydev);
+void phy_mac_interrupt(struct phy_device *phydev);
void phy_start_machine(struct phy_device *phydev);
void phy_stop_machine(struct phy_device *phydev);
-void phy_trigger_machine(struct phy_device *phydev, bool sync);
-int phy_ethtool_sset(struct phy_device *phydev, struct ethtool_cmd *cmd);
void phy_ethtool_ksettings_get(struct phy_device *phydev,
struct ethtool_link_ksettings *cmd);
int phy_ethtool_ksettings_set(struct phy_device *phydev,
const struct ethtool_link_ksettings *cmd);
int phy_mii_ioctl(struct phy_device *phydev, struct ifreq *ifr, int cmd);
-int phy_start_interrupts(struct phy_device *phydev);
+int phy_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd);
+int phy_do_ioctl_running(struct net_device *dev, struct ifreq *ifr, int cmd);
+int phy_disable_interrupts(struct phy_device *phydev);
+void phy_request_interrupt(struct phy_device *phydev);
+void phy_free_interrupt(struct phy_device *phydev);
void phy_print_status(struct phy_device *phydev);
-int phy_set_max_speed(struct phy_device *phydev, u32 max_speed);
+int phy_get_rate_matching(struct phy_device *phydev,
+ phy_interface_t iface);
+void phy_set_max_speed(struct phy_device *phydev, u32 max_speed);
+void phy_remove_link_mode(struct phy_device *phydev, u32 link_mode);
+void phy_advertise_supported(struct phy_device *phydev);
+void phy_support_sym_pause(struct phy_device *phydev);
+void phy_support_asym_pause(struct phy_device *phydev);
+void phy_set_sym_pause(struct phy_device *phydev, bool rx, bool tx,
+ bool autoneg);
+void phy_set_asym_pause(struct phy_device *phydev, bool rx, bool tx);
+bool phy_validate_pause(struct phy_device *phydev,
+ struct ethtool_pauseparam *pp);
+void phy_get_pause(struct phy_device *phydev, bool *tx_pause, bool *rx_pause);
+
+s32 phy_get_internal_delay(struct phy_device *phydev, struct device *dev,
+ const int *delay_values, int size, bool is_rx);
+
+void phy_resolve_pause(unsigned long *local_adv, unsigned long *partner_adv,
+ bool *tx_pause, bool *rx_pause);
int phy_register_fixup(const char *bus_id, u32 phy_uid, u32 phy_uid_mask,
int (*run)(struct phy_device *));
@@ -912,12 +1895,91 @@ int phy_ethtool_get_link_ksettings(struct net_device *ndev,
int phy_ethtool_set_link_ksettings(struct net_device *ndev,
const struct ethtool_link_ksettings *cmd);
int phy_ethtool_nway_reset(struct net_device *ndev);
+int phy_package_join(struct phy_device *phydev, int addr, size_t priv_size);
+void phy_package_leave(struct phy_device *phydev);
+int devm_phy_package_join(struct device *dev, struct phy_device *phydev,
+ int addr, size_t priv_size);
#if IS_ENABLED(CONFIG_PHYLIB)
int __init mdio_bus_init(void);
void mdio_bus_exit(void);
#endif
+int phy_ethtool_get_strings(struct phy_device *phydev, u8 *data);
+int phy_ethtool_get_sset_count(struct phy_device *phydev);
+int phy_ethtool_get_stats(struct phy_device *phydev,
+ struct ethtool_stats *stats, u64 *data);
+int phy_ethtool_get_plca_cfg(struct phy_device *phydev,
+ struct phy_plca_cfg *plca_cfg);
+int phy_ethtool_set_plca_cfg(struct phy_device *phydev,
+ const struct phy_plca_cfg *plca_cfg,
+ struct netlink_ext_ack *extack);
+int phy_ethtool_get_plca_status(struct phy_device *phydev,
+ struct phy_plca_status *plca_st);
+
+static inline int phy_package_read(struct phy_device *phydev, u32 regnum)
+{
+ struct phy_package_shared *shared = phydev->shared;
+
+ if (!shared)
+ return -EIO;
+
+ return mdiobus_read(phydev->mdio.bus, shared->addr, regnum);
+}
+
+static inline int __phy_package_read(struct phy_device *phydev, u32 regnum)
+{
+ struct phy_package_shared *shared = phydev->shared;
+
+ if (!shared)
+ return -EIO;
+
+ return __mdiobus_read(phydev->mdio.bus, shared->addr, regnum);
+}
+
+static inline int phy_package_write(struct phy_device *phydev,
+ u32 regnum, u16 val)
+{
+ struct phy_package_shared *shared = phydev->shared;
+
+ if (!shared)
+ return -EIO;
+
+ return mdiobus_write(phydev->mdio.bus, shared->addr, regnum, val);
+}
+
+static inline int __phy_package_write(struct phy_device *phydev,
+ u32 regnum, u16 val)
+{
+ struct phy_package_shared *shared = phydev->shared;
+
+ if (!shared)
+ return -EIO;
+
+ return __mdiobus_write(phydev->mdio.bus, shared->addr, regnum, val);
+}
+
+static inline bool __phy_package_set_once(struct phy_device *phydev,
+ unsigned int b)
+{
+ struct phy_package_shared *shared = phydev->shared;
+
+ if (!shared)
+ return false;
+
+ return !test_and_set_bit(b, &shared->flags);
+}
+
+static inline bool phy_package_init_once(struct phy_device *phydev)
+{
+ return __phy_package_set_once(phydev, PHY_SHARED_F_INIT_DONE);
+}
+
+static inline bool phy_package_probe_once(struct phy_device *phydev)
+{
+ return __phy_package_set_once(phydev, PHY_SHARED_F_PROBE_DONE);
+}
+
extern struct bus_type mdio_bus_type;
struct mdio_board_info {
@@ -940,8 +2002,9 @@ static inline int mdiobus_register_board_info(const struct mdio_board_info *i,
/**
- * module_phy_driver() - Helper macro for registering PHY drivers
+ * phy_module_driver() - Helper macro for registering PHY drivers
* @__phy_drivers: array of PHY drivers to register
+ * @__count: Numbers of members in array
*
* Helper macro for PHY drivers which do not do anything special in module
* init/exit. Each module may only use this macro once, and calling it
@@ -962,4 +2025,7 @@ module_exit(phy_module_exit)
#define module_phy_driver(__phy_drivers) \
phy_module_driver(__phy_drivers, ARRAY_SIZE(__phy_drivers))
+bool phy_driver_is_genphy(struct phy_device *phydev);
+bool phy_driver_is_genphy_10g(struct phy_device *phydev);
+
#endif /* __PHY_H */
diff --git a/include/linux/phy/omap_control_phy.h b/include/linux/phy/omap_control_phy.h
index eb7d4a135a9e..aec57dd784f7 100644
--- a/include/linux/phy/omap_control_phy.h
+++ b/include/linux/phy/omap_control_phy.h
@@ -1,19 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* omap_control_phy.h - Header file for the PHY part of control module.
*
* Copyright (C) 2013 Texas Instruments Incorporated - http://www.ti.com
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
* Author: Kishon Vijay Abraham I <kishon@ti.com>
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
*/
#ifndef __OMAP_CONTROL_PHY_H__
diff --git a/include/linux/phy/omap_usb.h b/include/linux/phy/omap_usb.h
index 2e5fb870efa9..e23b52df93ec 100644
--- a/include/linux/phy/omap_usb.h
+++ b/include/linux/phy/omap_usb.h
@@ -1,79 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* omap_usb.h -- omap usb2 phy header file
*
- * Copyright (C) 2012 Texas Instruments Incorporated - http://www.ti.com
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
+ * Copyright (C) 2012-2020 Texas Instruments Incorporated - http://www.ti.com
* Author: Kishon Vijay Abraham I <kishon@ti.com>
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
*/
#ifndef __DRIVERS_OMAP_USB2_H
#define __DRIVERS_OMAP_USB2_H
-#include <linux/io.h>
-#include <linux/usb/otg.h>
-
-struct usb_dpll_params {
- u16 m;
- u8 n;
- u8 freq:3;
- u8 sd;
- u32 mf;
-};
-
-enum omap_usb_phy_type {
- TYPE_USB2, /* USB2_PHY, power down in CONTROL_DEV_CONF */
- TYPE_DRA7USB2, /* USB2 PHY, power and power_aux e.g. DRA7 */
- TYPE_AM437USB2, /* USB2 PHY, power e.g. AM437x */
-};
-
-struct omap_usb {
- struct usb_phy phy;
- struct phy_companion *comparator;
- void __iomem *pll_ctrl_base;
- void __iomem *phy_base;
- struct device *dev;
- struct device *control_dev;
- struct clk *wkupclk;
- struct clk *optclk;
- u8 flags;
- enum omap_usb_phy_type type;
- struct regmap *syscon_phy_power; /* ctrl. reg. acces */
- unsigned int power_reg; /* power reg. index within syscon */
- u32 mask;
- u32 power_on;
- u32 power_off;
-};
-
-struct usb_phy_data {
- const char *label;
- u8 flags;
- u32 mask;
- u32 power_on;
- u32 power_off;
-};
-
-/* Driver Flags */
-#define OMAP_USB2_HAS_START_SRP (1 << 0)
-#define OMAP_USB2_HAS_SET_VBUS (1 << 1)
-#define OMAP_USB2_CALIBRATE_FALSE_DISCONNECT (1 << 2)
-
-#define OMAP_DEV_PHY_PD BIT(0)
-#define OMAP_USB2_PHY_PD BIT(28)
-
-#define AM437X_USB2_PHY_PD BIT(0)
-#define AM437X_USB2_OTG_PD BIT(1)
-#define AM437X_USB2_OTGVDET_EN BIT(19)
-#define AM437X_USB2_OTGSESSEND_EN BIT(20)
+#include <linux/usb/phy_companion.h>
#define phy_to_omapusb(x) container_of((x), struct omap_usb, phy)
@@ -86,15 +22,4 @@ static inline int omap_usb2_set_comparator(struct phy_companion *comparator)
}
#endif
-static inline u32 omap_usb_readl(void __iomem *addr, unsigned offset)
-{
- return __raw_readl(addr + offset);
-}
-
-static inline void omap_usb_writel(void __iomem *addr, unsigned offset,
- u32 data)
-{
- __raw_writel(data, addr + offset);
-}
-
#endif /* __DRIVERS_OMAP_USB_H */
diff --git a/include/linux/phy/pcie.h b/include/linux/phy/pcie.h
new file mode 100644
index 000000000000..e7ac81764576
--- /dev/null
+++ b/include/linux/phy/pcie.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2022 Rockchip Electronics Co., Ltd.
+ */
+#ifndef __PHY_PCIE_H
+#define __PHY_PCIE_H
+
+#define PHY_MODE_PCIE_RC 20
+#define PHY_MODE_PCIE_EP 21
+#define PHY_MODE_PCIE_BIFURCATION 22
+
+#endif
diff --git a/include/linux/phy/phy-dp.h b/include/linux/phy/phy-dp.h
new file mode 100644
index 000000000000..18cad23642cd
--- /dev/null
+++ b/include/linux/phy/phy-dp.h
@@ -0,0 +1,95 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2019 Cadence Design Systems Inc.
+ */
+
+#ifndef __PHY_DP_H_
+#define __PHY_DP_H_
+
+#include <linux/types.h>
+
+/**
+ * struct phy_configure_opts_dp - DisplayPort PHY configuration set
+ *
+ * This structure is used to represent the configuration state of a
+ * DisplayPort phy.
+ */
+struct phy_configure_opts_dp {
+ /**
+ * @link_rate:
+ *
+ * Link Rate, in Mb/s, of the main link.
+ *
+ * Allowed values: 1620, 2160, 2430, 2700, 3240, 4320, 5400, 8100 Mb/s
+ */
+ unsigned int link_rate;
+
+ /**
+ * @lanes:
+ *
+ * Number of active, consecutive, data lanes, starting from
+ * lane 0, used for the transmissions on main link.
+ *
+ * Allowed values: 1, 2, 4
+ */
+ unsigned int lanes;
+
+ /**
+ * @voltage:
+ *
+ * Voltage swing levels, as specified by DisplayPort specification,
+ * to be used by particular lanes. One value per lane.
+ * voltage[0] is for lane 0, voltage[1] is for lane 1, etc.
+ *
+ * Maximum value: 3
+ */
+ unsigned int voltage[4];
+
+ /**
+ * @pre:
+ *
+ * Pre-emphasis levels, as specified by DisplayPort specification, to be
+ * used by particular lanes. One value per lane.
+ *
+ * Maximum value: 3
+ */
+ unsigned int pre[4];
+
+ /**
+ * @ssc:
+ *
+ * Flag indicating, whether or not to enable spread-spectrum clocking.
+ *
+ */
+ u8 ssc : 1;
+
+ /**
+ * @set_rate:
+ *
+ * Flag indicating, whether or not reconfigure link rate and SSC to
+ * requested values.
+ *
+ */
+ u8 set_rate : 1;
+
+ /**
+ * @set_lanes:
+ *
+ * Flag indicating, whether or not reconfigure lane count to
+ * requested value.
+ *
+ */
+ u8 set_lanes : 1;
+
+ /**
+ * @set_voltages:
+ *
+ * Flag indicating, whether or not reconfigure voltage swing
+ * and pre-emphasis to requested values. Only lanes specified
+ * by "lanes" parameter will be affected.
+ *
+ */
+ u8 set_voltages : 1;
+};
+
+#endif /* __PHY_DP_H_ */
diff --git a/include/linux/phy/phy-lvds.h b/include/linux/phy/phy-lvds.h
new file mode 100644
index 000000000000..09931d080a6d
--- /dev/null
+++ b/include/linux/phy/phy-lvds.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright 2020,2022 NXP
+ */
+
+#ifndef __PHY_LVDS_H_
+#define __PHY_LVDS_H_
+
+/**
+ * struct phy_configure_opts_lvds - LVDS configuration set
+ * @bits_per_lane_and_dclk_cycle: Number of bits per lane per differential
+ * clock cycle.
+ * @differential_clk_rate: Clock rate, in Hertz, of the LVDS
+ * differential clock.
+ * @lanes: Number of active, consecutive,
+ * data lanes, starting from lane 0,
+ * used for the transmissions.
+ * @is_slave: Boolean, true if the phy is a slave
+ * which works together with a master
+ * phy to support dual link transmission,
+ * otherwise a regular phy or a master phy.
+ *
+ * This structure is used to represent the configuration state of a LVDS phy.
+ */
+struct phy_configure_opts_lvds {
+ unsigned int bits_per_lane_and_dclk_cycle;
+ unsigned long differential_clk_rate;
+ unsigned int lanes;
+ bool is_slave;
+};
+
+#endif /* __PHY_LVDS_H_ */
diff --git a/include/linux/phy/phy-mipi-dphy.h b/include/linux/phy/phy-mipi-dphy.h
new file mode 100644
index 000000000000..1ac128d78dfe
--- /dev/null
+++ b/include/linux/phy/phy-mipi-dphy.h
@@ -0,0 +1,287 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2018 Cadence Design Systems Inc.
+ */
+
+#ifndef __PHY_MIPI_DPHY_H_
+#define __PHY_MIPI_DPHY_H_
+
+/**
+ * struct phy_configure_opts_mipi_dphy - MIPI D-PHY configuration set
+ *
+ * This structure is used to represent the configuration state of a
+ * MIPI D-PHY phy.
+ */
+struct phy_configure_opts_mipi_dphy {
+ /**
+ * @clk_miss:
+ *
+ * Timeout, in picoseconds, for receiver to detect absence of
+ * Clock transitions and disable the Clock Lane HS-RX.
+ *
+ * Maximum value: 60000 ps
+ */
+ unsigned int clk_miss;
+
+ /**
+ * @clk_post:
+ *
+ * Time, in picoseconds, that the transmitter continues to
+ * send HS clock after the last associated Data Lane has
+ * transitioned to LP Mode. Interval is defined as the period
+ * from the end of @hs_trail to the beginning of @clk_trail.
+ *
+ * Minimum value: 60000 ps + 52 * @hs_clk_rate period in ps
+ */
+ unsigned int clk_post;
+
+ /**
+ * @clk_pre:
+ *
+ * Time, in UI, that the HS clock shall be driven by
+ * the transmitter prior to any associated Data Lane beginning
+ * the transition from LP to HS mode.
+ *
+ * Minimum value: 8 UI
+ */
+ unsigned int clk_pre;
+
+ /**
+ * @clk_prepare:
+ *
+ * Time, in picoseconds, that the transmitter drives the Clock
+ * Lane LP-00 Line state immediately before the HS-0 Line
+ * state starting the HS transmission.
+ *
+ * Minimum value: 38000 ps
+ * Maximum value: 95000 ps
+ */
+ unsigned int clk_prepare;
+
+ /**
+ * @clk_settle:
+ *
+ * Time interval, in picoseconds, during which the HS receiver
+ * should ignore any Clock Lane HS transitions, starting from
+ * the beginning of @clk_prepare.
+ *
+ * Minimum value: 95000 ps
+ * Maximum value: 300000 ps
+ */
+ unsigned int clk_settle;
+
+ /**
+ * @clk_term_en:
+ *
+ * Time, in picoseconds, for the Clock Lane receiver to enable
+ * the HS line termination.
+ *
+ * Maximum value: 38000 ps
+ */
+ unsigned int clk_term_en;
+
+ /**
+ * @clk_trail:
+ *
+ * Time, in picoseconds, that the transmitter drives the HS-0
+ * state after the last payload clock bit of a HS transmission
+ * burst.
+ *
+ * Minimum value: 60000 ps
+ */
+ unsigned int clk_trail;
+
+ /**
+ * @clk_zero:
+ *
+ * Time, in picoseconds, that the transmitter drives the HS-0
+ * state prior to starting the Clock.
+ */
+ unsigned int clk_zero;
+
+ /**
+ * @d_term_en:
+ *
+ * Time, in picoseconds, for the Data Lane receiver to enable
+ * the HS line termination.
+ *
+ * Maximum value: 35000 ps + 4 * @hs_clk_rate period in ps
+ */
+ unsigned int d_term_en;
+
+ /**
+ * @eot:
+ *
+ * Transmitted time interval, in picoseconds, from the start
+ * of @hs_trail or @clk_trail, to the start of the LP- 11
+ * state following a HS burst.
+ *
+ * Maximum value: 105000 ps + 12 * @hs_clk_rate period in ps
+ */
+ unsigned int eot;
+
+ /**
+ * @hs_exit:
+ *
+ * Time, in picoseconds, that the transmitter drives LP-11
+ * following a HS burst.
+ *
+ * Minimum value: 100000 ps
+ */
+ unsigned int hs_exit;
+
+ /**
+ * @hs_prepare:
+ *
+ * Time, in picoseconds, that the transmitter drives the Data
+ * Lane LP-00 Line state immediately before the HS-0 Line
+ * state starting the HS transmission.
+ *
+ * Minimum value: 40000 ps + 4 * @hs_clk_rate period in ps
+ * Maximum value: 85000 ps + 6 * @hs_clk_rate period in ps
+ */
+ unsigned int hs_prepare;
+
+ /**
+ * @hs_settle:
+ *
+ * Time interval, in picoseconds, during which the HS receiver
+ * shall ignore any Data Lane HS transitions, starting from
+ * the beginning of @hs_prepare.
+ *
+ * Minimum value: 85000 ps + 6 * @hs_clk_rate period in ps
+ * Maximum value: 145000 ps + 10 * @hs_clk_rate period in ps
+ */
+ unsigned int hs_settle;
+
+ /**
+ * @hs_skip:
+ *
+ * Time interval, in picoseconds, during which the HS-RX
+ * should ignore any transitions on the Data Lane, following a
+ * HS burst. The end point of the interval is defined as the
+ * beginning of the LP-11 state following the HS burst.
+ *
+ * Minimum value: 40000 ps
+ * Maximum value: 55000 ps + 4 * @hs_clk_rate period in ps
+ */
+ unsigned int hs_skip;
+
+ /**
+ * @hs_trail:
+ *
+ * Time, in picoseconds, that the transmitter drives the
+ * flipped differential state after last payload data bit of a
+ * HS transmission burst
+ *
+ * Minimum value: max(8 * @hs_clk_rate period in ps,
+ * 60000 ps + 4 * @hs_clk_rate period in ps)
+ */
+ unsigned int hs_trail;
+
+ /**
+ * @hs_zero:
+ *
+ * Time, in picoseconds, that the transmitter drives the HS-0
+ * state prior to transmitting the Sync sequence.
+ */
+ unsigned int hs_zero;
+
+ /**
+ * @init:
+ *
+ * Time, in microseconds for the initialization period to
+ * complete.
+ *
+ * Minimum value: 100 us
+ */
+ unsigned int init;
+
+ /**
+ * @lpx:
+ *
+ * Transmitted length, in picoseconds, of any Low-Power state
+ * period.
+ *
+ * Minimum value: 50000 ps
+ */
+ unsigned int lpx;
+
+ /**
+ * @ta_get:
+ *
+ * Time, in picoseconds, that the new transmitter drives the
+ * Bridge state (LP-00) after accepting control during a Link
+ * Turnaround.
+ *
+ * Value: 5 * @lpx
+ */
+ unsigned int ta_get;
+
+ /**
+ * @ta_go:
+ *
+ * Time, in picoseconds, that the transmitter drives the
+ * Bridge state (LP-00) before releasing control during a Link
+ * Turnaround.
+ *
+ * Value: 4 * @lpx
+ */
+ unsigned int ta_go;
+
+ /**
+ * @ta_sure:
+ *
+ * Time, in picoseconds, that the new transmitter waits after
+ * the LP-10 state before transmitting the Bridge state
+ * (LP-00) during a Link Turnaround.
+ *
+ * Minimum value: @lpx
+ * Maximum value: 2 * @lpx
+ */
+ unsigned int ta_sure;
+
+ /**
+ * @wakeup:
+ *
+ * Time, in microseconds, that a transmitter drives a Mark-1
+ * state prior to a Stop state in order to initiate an exit
+ * from ULPS.
+ *
+ * Minimum value: 1000 us
+ */
+ unsigned int wakeup;
+
+ /**
+ * @hs_clk_rate:
+ *
+ * Clock rate, in Hertz, of the high-speed clock.
+ */
+ unsigned long hs_clk_rate;
+
+ /**
+ * @lp_clk_rate:
+ *
+ * Clock rate, in Hertz, of the low-power clock.
+ */
+ unsigned long lp_clk_rate;
+
+ /**
+ * @lanes:
+ *
+ * Number of active, consecutive, data lanes, starting from
+ * lane 0, used for the transmissions.
+ */
+ unsigned char lanes;
+};
+
+int phy_mipi_dphy_get_default_config(unsigned long pixel_clock,
+ unsigned int bpp,
+ unsigned int lanes,
+ struct phy_configure_opts_mipi_dphy *cfg);
+int phy_mipi_dphy_get_default_config_for_hsclk(unsigned long long hs_clk_rate,
+ unsigned int lanes,
+ struct phy_configure_opts_mipi_dphy *cfg);
+int phy_mipi_dphy_config_validate(struct phy_configure_opts_mipi_dphy *cfg);
+
+#endif /* __PHY_MIPI_DPHY_H_ */
diff --git a/include/linux/phy/phy-qcom-ufs.h b/include/linux/phy/phy-qcom-ufs.h
deleted file mode 100644
index 35c070ea6ea3..000000000000
--- a/include/linux/phy/phy-qcom-ufs.h
+++ /dev/null
@@ -1,41 +0,0 @@
-/*
- * Copyright (c) 2013-2015, Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-
-#ifndef PHY_QCOM_UFS_H_
-#define PHY_QCOM_UFS_H_
-
-#include "phy.h"
-
-/**
- * ufs_qcom_phy_enable_dev_ref_clk() - Enable the device
- * ref clock.
- * @phy: reference to a generic phy.
- */
-void ufs_qcom_phy_enable_dev_ref_clk(struct phy *phy);
-
-/**
- * ufs_qcom_phy_disable_dev_ref_clk() - Disable the device
- * ref clock.
- * @phy: reference to a generic phy.
- */
-void ufs_qcom_phy_disable_dev_ref_clk(struct phy *phy);
-
-int ufs_qcom_phy_start_serdes(struct phy *phy);
-int ufs_qcom_phy_set_tx_lane_enable(struct phy *phy, u32 tx_lanes);
-int ufs_qcom_phy_calibrate_phy(struct phy *phy, bool is_rate_B);
-int ufs_qcom_phy_is_pcs_ready(struct phy *phy);
-void ufs_qcom_phy_save_controller_version(struct phy *phy,
- u8 major, u16 minor, u16 step);
-
-#endif /* PHY_QCOM_UFS_H_ */
diff --git a/include/linux/phy/phy-sun4i-usb.h b/include/linux/phy/phy-sun4i-usb.h
index 50aed92ea89c..91eb755ee73b 100644
--- a/include/linux/phy/phy-sun4i-usb.h
+++ b/include/linux/phy/phy-sun4i-usb.h
@@ -1,14 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2015 Hans de Goede <hdegoede@redhat.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#ifndef PHY_SUN4I_USB_H_
diff --git a/include/linux/phy/phy.h b/include/linux/phy/phy.h
index 78bb0d7f6b11..3a570bc59fc7 100644
--- a/include/linux/phy/phy.h
+++ b/include/linux/phy/phy.h
@@ -1,14 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* phy.h -- generic phy header file
*
* Copyright (C) 2013 Texas Instruments Incorporated - http://www.ti.com
*
* Author: Kishon Vijay Abraham I <kishon@ti.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
#ifndef __DRIVERS_PHY_H
@@ -20,13 +16,55 @@
#include <linux/pm_runtime.h>
#include <linux/regulator/consumer.h>
+#include <linux/phy/phy-dp.h>
+#include <linux/phy/phy-lvds.h>
+#include <linux/phy/phy-mipi-dphy.h>
+
struct phy;
enum phy_mode {
PHY_MODE_INVALID,
PHY_MODE_USB_HOST,
+ PHY_MODE_USB_HOST_LS,
+ PHY_MODE_USB_HOST_FS,
+ PHY_MODE_USB_HOST_HS,
+ PHY_MODE_USB_HOST_SS,
PHY_MODE_USB_DEVICE,
+ PHY_MODE_USB_DEVICE_LS,
+ PHY_MODE_USB_DEVICE_FS,
+ PHY_MODE_USB_DEVICE_HS,
+ PHY_MODE_USB_DEVICE_SS,
PHY_MODE_USB_OTG,
+ PHY_MODE_UFS_HS_A,
+ PHY_MODE_UFS_HS_B,
+ PHY_MODE_PCIE,
+ PHY_MODE_ETHERNET,
+ PHY_MODE_MIPI_DPHY,
+ PHY_MODE_SATA,
+ PHY_MODE_LVDS,
+ PHY_MODE_DP
+};
+
+enum phy_media {
+ PHY_MEDIA_DEFAULT,
+ PHY_MEDIA_SR,
+ PHY_MEDIA_DAC,
+};
+
+/**
+ * union phy_configure_opts - Opaque generic phy configuration
+ *
+ * @mipi_dphy: Configuration set applicable for phys supporting
+ * the MIPI_DPHY phy mode.
+ * @dp: Configuration set applicable for phys supporting
+ * the DisplayPort protocol.
+ * @lvds: Configuration set applicable for phys supporting
+ * the LVDS phy mode.
+ */
+union phy_configure_opts {
+ struct phy_configure_opts_mipi_dphy mipi_dphy;
+ struct phy_configure_opts_dp dp;
+ struct phy_configure_opts_lvds lvds;
};
/**
@@ -36,7 +74,11 @@ enum phy_mode {
* @power_on: powering on the phy
* @power_off: powering off the phy
* @set_mode: set the mode of the phy
+ * @set_media: set the media type of the phy (optional)
+ * @set_speed: set the speed of the phy (optional)
* @reset: resetting the phy
+ * @calibrate: calibrate the phy
+ * @release: ops to be performed while the consumer relinquishes the PHY
* @owner: the module owner containing the ops
*/
struct phy_ops {
@@ -44,17 +86,56 @@ struct phy_ops {
int (*exit)(struct phy *phy);
int (*power_on)(struct phy *phy);
int (*power_off)(struct phy *phy);
- int (*set_mode)(struct phy *phy, enum phy_mode mode);
+ int (*set_mode)(struct phy *phy, enum phy_mode mode, int submode);
+ int (*set_media)(struct phy *phy, enum phy_media media);
+ int (*set_speed)(struct phy *phy, int speed);
+
+ /**
+ * @configure:
+ *
+ * Optional.
+ *
+ * Used to change the PHY parameters. phy_init() must have
+ * been called on the phy.
+ *
+ * Returns: 0 if successful, an negative error code otherwise
+ */
+ int (*configure)(struct phy *phy, union phy_configure_opts *opts);
+
+ /**
+ * @validate:
+ *
+ * Optional.
+ *
+ * Used to check that the current set of parameters can be
+ * handled by the phy. Implementations are free to tune the
+ * parameters passed as arguments if needed by some
+ * implementation detail or constraints. It must not change
+ * any actual configuration of the PHY, so calling it as many
+ * times as deemed fit by the consumer must have no side
+ * effect.
+ *
+ * Returns: 0 if the configuration can be applied, an negative
+ * error code otherwise
+ */
+ int (*validate)(struct phy *phy, enum phy_mode mode, int submode,
+ union phy_configure_opts *opts);
int (*reset)(struct phy *phy);
+ int (*calibrate)(struct phy *phy);
+ void (*release)(struct phy *phy);
struct module *owner;
};
/**
* struct phy_attrs - represents phy attributes
* @bus_width: Data path width implemented by PHY
+ * @max_link_rate: Maximum link rate supported by PHY (units to be decided by producer and consumer)
+ * @mode: PHY mode
*/
struct phy_attrs {
u32 bus_width;
+ u32 max_link_rate;
+ enum phy_mode mode;
};
/**
@@ -62,11 +143,11 @@ struct phy_attrs {
* @dev: phy device
* @id: id of the phy device
* @ops: function pointers for performing phy operations
- * @init_data: list of PHY consumers (non-dt only)
* @mutex: mutex to protect phy_ops
* @init_count: used to protect when the PHY is used by multiple consumers
* @power_count: used to protect when the PHY is used by multiple consumers
- * @phy_attrs: used to specify PHY specific attributes
+ * @attrs: used to specify PHY specific attributes
+ * @pwr: power regulator associated with the phy
*/
struct phy {
struct device dev;
@@ -82,9 +163,10 @@ struct phy {
/**
* struct phy_provider - represents the phy provider
* @dev: phy provider device
+ * @children: can be used to override the default (dev->of_node) child node
* @owner: the module owner having of_xlate
- * @of_xlate: function pointer to obtain phy instance from phy pointer
* @list: to maintain a linked list of PHY providers
+ * @of_xlate: function pointer to obtain phy instance from phy pointer
*/
struct phy_provider {
struct device *dev;
@@ -95,6 +177,13 @@ struct phy_provider {
struct of_phandle_args *args);
};
+/**
+ * struct phy_lookup - PHY association in list of phys managed by the phy driver
+ * @node: list node
+ * @dev_id: the device of the association
+ * @con_id: connection ID string on device
+ * @phy: the phy of the association
+ */
struct phy_lookup {
struct list_head node;
const char *dev_id;
@@ -137,8 +226,21 @@ int phy_init(struct phy *phy);
int phy_exit(struct phy *phy);
int phy_power_on(struct phy *phy);
int phy_power_off(struct phy *phy);
-int phy_set_mode(struct phy *phy, enum phy_mode mode);
+int phy_set_mode_ext(struct phy *phy, enum phy_mode mode, int submode);
+#define phy_set_mode(phy, mode) \
+ phy_set_mode_ext(phy, mode, 0)
+int phy_set_media(struct phy *phy, enum phy_media media);
+int phy_set_speed(struct phy *phy, int speed);
+int phy_configure(struct phy *phy, union phy_configure_opts *opts);
+int phy_validate(struct phy *phy, enum phy_mode mode, int submode,
+ union phy_configure_opts *opts);
+
+static inline enum phy_mode phy_get_mode(struct phy *phy)
+{
+ return phy->attrs.mode;
+}
int phy_reset(struct phy *phy);
+int phy_calibrate(struct phy *phy);
static inline int phy_get_bus_width(struct phy *phy)
{
return phy->attrs.bus_width;
@@ -148,14 +250,16 @@ static inline void phy_set_bus_width(struct phy *phy, int bus_width)
phy->attrs.bus_width = bus_width;
}
struct phy *phy_get(struct device *dev, const char *string);
-struct phy *phy_optional_get(struct device *dev, const char *string);
struct phy *devm_phy_get(struct device *dev, const char *string);
struct phy *devm_phy_optional_get(struct device *dev, const char *string);
struct phy *devm_of_phy_get(struct device *dev, struct device_node *np,
const char *con_id);
+struct phy *devm_of_phy_optional_get(struct device *dev, struct device_node *np,
+ const char *con_id);
struct phy *devm_of_phy_get_by_index(struct device *dev, struct device_node *np,
int index);
-void phy_put(struct phy *phy);
+void of_phy_put(struct phy *phy);
+void phy_put(struct device *dev, struct phy *phy);
void devm_phy_put(struct device *dev, struct phy *phy);
struct phy *of_phy_get(struct device_node *np, const char *con_id);
struct phy *of_phy_simple_xlate(struct device *dev,
@@ -246,13 +350,36 @@ static inline int phy_power_off(struct phy *phy)
return -ENOSYS;
}
-static inline int phy_set_mode(struct phy *phy, enum phy_mode mode)
+static inline int phy_set_mode_ext(struct phy *phy, enum phy_mode mode,
+ int submode)
{
if (!phy)
return 0;
return -ENOSYS;
}
+#define phy_set_mode(phy, mode) \
+ phy_set_mode_ext(phy, mode, 0)
+
+static inline int phy_set_media(struct phy *phy, enum phy_media media)
+{
+ if (!phy)
+ return 0;
+ return -ENODEV;
+}
+
+static inline int phy_set_speed(struct phy *phy, int speed)
+{
+ if (!phy)
+ return 0;
+ return -ENODEV;
+}
+
+static inline enum phy_mode phy_get_mode(struct phy *phy)
+{
+ return PHY_MODE_INVALID;
+}
+
static inline int phy_reset(struct phy *phy)
{
if (!phy)
@@ -260,6 +387,31 @@ static inline int phy_reset(struct phy *phy)
return -ENOSYS;
}
+static inline int phy_calibrate(struct phy *phy)
+{
+ if (!phy)
+ return 0;
+ return -ENOSYS;
+}
+
+static inline int phy_configure(struct phy *phy,
+ union phy_configure_opts *opts)
+{
+ if (!phy)
+ return 0;
+
+ return -ENOSYS;
+}
+
+static inline int phy_validate(struct phy *phy, enum phy_mode mode, int submode,
+ union phy_configure_opts *opts)
+{
+ if (!phy)
+ return 0;
+
+ return -ENOSYS;
+}
+
static inline int phy_get_bus_width(struct phy *phy)
{
return -ENOSYS;
@@ -275,12 +427,6 @@ static inline struct phy *phy_get(struct device *dev, const char *string)
return ERR_PTR(-ENOSYS);
}
-static inline struct phy *phy_optional_get(struct device *dev,
- const char *string)
-{
- return ERR_PTR(-ENOSYS);
-}
-
static inline struct phy *devm_phy_get(struct device *dev, const char *string)
{
return ERR_PTR(-ENOSYS);
@@ -289,7 +435,7 @@ static inline struct phy *devm_phy_get(struct device *dev, const char *string)
static inline struct phy *devm_phy_optional_get(struct device *dev,
const char *string)
{
- return ERR_PTR(-ENOSYS);
+ return NULL;
}
static inline struct phy *devm_of_phy_get(struct device *dev,
@@ -299,6 +445,13 @@ static inline struct phy *devm_of_phy_get(struct device *dev,
return ERR_PTR(-ENOSYS);
}
+static inline struct phy *devm_of_phy_optional_get(struct device *dev,
+ struct device_node *np,
+ const char *con_id)
+{
+ return NULL;
+}
+
static inline struct phy *devm_of_phy_get_by_index(struct device *dev,
struct device_node *np,
int index)
@@ -306,7 +459,11 @@ static inline struct phy *devm_of_phy_get_by_index(struct device *dev,
return ERR_PTR(-ENOSYS);
}
-static inline void phy_put(struct phy *phy)
+static inline void of_phy_put(struct phy *phy)
+{
+}
+
+static inline void phy_put(struct device *dev, struct phy *phy)
{
}
diff --git a/include/linux/phy/tegra/xusb.h b/include/linux/phy/tegra/xusb.h
index 8e1a57a78d9f..70998e6dd6fd 100644
--- a/include/linux/phy/tegra/xusb.h
+++ b/include/linux/phy/tegra/xusb.h
@@ -1,14 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
+ * Copyright (c) 2016-2022, NVIDIA CORPORATION. All rights reserved.
*/
#ifndef PHY_TEGRA_XUSB_H
@@ -16,6 +8,7 @@
struct tegra_xusb_padctl;
struct device;
+enum usb_device_speed;
struct tegra_xusb_padctl *tegra_xusb_padctl_get(struct device *dev);
void tegra_xusb_padctl_put(struct tegra_xusb_padctl *padctl);
@@ -26,5 +19,18 @@ int tegra_xusb_padctl_hsic_set_idle(struct tegra_xusb_padctl *padctl,
unsigned int port, bool idle);
int tegra_xusb_padctl_usb3_set_lfps_detect(struct tegra_xusb_padctl *padctl,
unsigned int port, bool enable);
+int tegra_xusb_padctl_set_vbus_override(struct tegra_xusb_padctl *padctl,
+ bool val);
+void tegra_phy_xusb_utmi_pad_power_on(struct phy *phy);
+void tegra_phy_xusb_utmi_pad_power_down(struct phy *phy);
+int tegra_phy_xusb_utmi_port_reset(struct phy *phy);
+int tegra_xusb_padctl_get_usb3_companion(struct tegra_xusb_padctl *padctl,
+ unsigned int port);
+int tegra_xusb_padctl_enable_phy_sleepwalk(struct tegra_xusb_padctl *padctl, struct phy *phy,
+ enum usb_device_speed speed);
+int tegra_xusb_padctl_disable_phy_sleepwalk(struct tegra_xusb_padctl *padctl, struct phy *phy);
+int tegra_xusb_padctl_enable_phy_wake(struct tegra_xusb_padctl *padctl, struct phy *phy);
+int tegra_xusb_padctl_disable_phy_wake(struct tegra_xusb_padctl *padctl, struct phy *phy);
+bool tegra_xusb_padctl_remote_wake_detected(struct tegra_xusb_padctl *padctl, struct phy *phy);
#endif /* PHY_TEGRA_XUSB_H */
diff --git a/include/linux/phy/ulpi_phy.h b/include/linux/phy/ulpi_phy.h
index f2ebe490a4bc..7054b440347c 100644
--- a/include/linux/phy/ulpi_phy.h
+++ b/include/linux/phy/ulpi_phy.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#include <linux/phy/phy.h>
/**
diff --git a/include/linux/phy_fixed.h b/include/linux/phy_fixed.h
index 1d41ec44e39d..1acafd86ab13 100644
--- a/include/linux/phy_fixed.h
+++ b/include/linux/phy_fixed.h
@@ -1,6 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __PHY_FIXED_H
#define __PHY_FIXED_H
+#include <linux/types.h>
+
struct fixed_phy_status {
int link;
int speed;
@@ -10,36 +13,47 @@ struct fixed_phy_status {
};
struct device_node;
+struct gpio_desc;
+struct net_device;
#if IS_ENABLED(CONFIG_FIXED_PHY)
+extern int fixed_phy_change_carrier(struct net_device *dev, bool new_carrier);
extern int fixed_phy_add(unsigned int irq, int phy_id,
- struct fixed_phy_status *status,
- int link_gpio);
+ struct fixed_phy_status *status);
extern struct phy_device *fixed_phy_register(unsigned int irq,
struct fixed_phy_status *status,
- int link_gpio,
struct device_node *np);
+
+extern struct phy_device *
+fixed_phy_register_with_gpiod(unsigned int irq,
+ struct fixed_phy_status *status,
+ struct gpio_desc *gpiod);
+
extern void fixed_phy_unregister(struct phy_device *phydev);
extern int fixed_phy_set_link_update(struct phy_device *phydev,
int (*link_update)(struct net_device *,
struct fixed_phy_status *));
-extern int fixed_phy_update_state(struct phy_device *phydev,
- const struct fixed_phy_status *status,
- const struct fixed_phy_status *changed);
#else
static inline int fixed_phy_add(unsigned int irq, int phy_id,
- struct fixed_phy_status *status,
- int link_gpio)
+ struct fixed_phy_status *status)
{
return -ENODEV;
}
static inline struct phy_device *fixed_phy_register(unsigned int irq,
struct fixed_phy_status *status,
- int gpio_link,
struct device_node *np)
{
return ERR_PTR(-ENODEV);
}
+
+static inline struct phy_device *
+fixed_phy_register_with_gpiod(unsigned int irq,
+ struct fixed_phy_status *status,
+ struct gpio_desc *gpiod)
+{
+ return ERR_PTR(-ENODEV);
+}
+
static inline void fixed_phy_unregister(struct phy_device *phydev)
{
}
@@ -49,11 +63,9 @@ static inline int fixed_phy_set_link_update(struct phy_device *phydev,
{
return -ENODEV;
}
-static inline int fixed_phy_update_state(struct phy_device *phydev,
- const struct fixed_phy_status *status,
- const struct fixed_phy_status *changed)
+static inline int fixed_phy_change_carrier(struct net_device *dev, bool new_carrier)
{
- return -ENODEV;
+ return -EINVAL;
}
#endif /* CONFIG_FIXED_PHY */
diff --git a/include/linux/phy_led_triggers.h b/include/linux/phy_led_triggers.h
index b37b05bfd1a6..5c4d7a755101 100644
--- a/include/linux/phy_led_triggers.h
+++ b/include/linux/phy_led_triggers.h
@@ -1,14 +1,5 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/* Copyright (C) 2016 National Instruments Corp.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#ifndef __PHY_LED_TRIGGERS
#define __PHY_LED_TRIGGERS
@@ -20,10 +11,10 @@ struct phy_device;
#include <linux/leds.h>
#include <linux/phy.h>
-#define PHY_LED_TRIGGER_SPEED_SUFFIX_SIZE 10
+#define PHY_LED_TRIGGER_SPEED_SUFFIX_SIZE 11
#define PHY_LINK_LED_TRIGGER_NAME_SIZE (MII_BUS_ID_SIZE + \
- FIELD_SIZEOF(struct mdio_device, addr)+\
+ sizeof_field(struct mdio_device, addr)+\
PHY_LED_TRIGGER_SPEED_SUFFIX_SIZE)
struct phy_led_trigger {
diff --git a/include/linux/phylink.h b/include/linux/phylink.h
new file mode 100644
index 000000000000..71755c66c162
--- /dev/null
+++ b/include/linux/phylink.h
@@ -0,0 +1,663 @@
+#ifndef NETDEV_PCS_H
+#define NETDEV_PCS_H
+
+#include <linux/phy.h>
+#include <linux/spinlock.h>
+#include <linux/workqueue.h>
+
+struct device_node;
+struct ethtool_cmd;
+struct fwnode_handle;
+struct net_device;
+
+enum {
+ MLO_PAUSE_NONE,
+ MLO_PAUSE_RX = BIT(0),
+ MLO_PAUSE_TX = BIT(1),
+ MLO_PAUSE_TXRX_MASK = MLO_PAUSE_TX | MLO_PAUSE_RX,
+ MLO_PAUSE_AN = BIT(2),
+
+ MLO_AN_PHY = 0, /* Conventional PHY */
+ MLO_AN_FIXED, /* Fixed-link mode */
+ MLO_AN_INBAND, /* In-band protocol */
+
+ /* MAC_SYM_PAUSE and MAC_ASYM_PAUSE are used when configuring our
+ * autonegotiation advertisement. They correspond to the PAUSE and
+ * ASM_DIR bits defined by 802.3, respectively.
+ *
+ * The following table lists the values of tx_pause and rx_pause which
+ * might be requested in mac_link_up. The exact values depend on either
+ * the results of autonegotation (if MLO_PAUSE_AN is set) or user
+ * configuration (if MLO_PAUSE_AN is not set).
+ *
+ * MAC_SYM_PAUSE MAC_ASYM_PAUSE MLO_PAUSE_AN tx_pause/rx_pause
+ * ============= ============== ============ ==================
+ * 0 0 0 0/0
+ * 0 0 1 0/0
+ * 0 1 0 0/0, 0/1, 1/0, 1/1
+ * 0 1 1 0/0, 1/0
+ * 1 0 0 0/0, 1/1
+ * 1 0 1 0/0, 1/1
+ * 1 1 0 0/0, 0/1, 1/0, 1/1
+ * 1 1 1 0/0, 0/1, 1/1
+ *
+ * If you set MAC_ASYM_PAUSE, the user may request any combination of
+ * tx_pause and rx_pause. You do not have to support these
+ * combinations.
+ *
+ * However, you should support combinations of tx_pause and rx_pause
+ * which might be the result of autonegotation. For example, don't set
+ * MAC_SYM_PAUSE unless your device can support tx_pause and rx_pause
+ * at the same time.
+ */
+ MAC_SYM_PAUSE = BIT(0),
+ MAC_ASYM_PAUSE = BIT(1),
+ MAC_10HD = BIT(2),
+ MAC_10FD = BIT(3),
+ MAC_10 = MAC_10HD | MAC_10FD,
+ MAC_100HD = BIT(4),
+ MAC_100FD = BIT(5),
+ MAC_100 = MAC_100HD | MAC_100FD,
+ MAC_1000HD = BIT(6),
+ MAC_1000FD = BIT(7),
+ MAC_1000 = MAC_1000HD | MAC_1000FD,
+ MAC_2500FD = BIT(8),
+ MAC_5000FD = BIT(9),
+ MAC_10000FD = BIT(10),
+ MAC_20000FD = BIT(11),
+ MAC_25000FD = BIT(12),
+ MAC_40000FD = BIT(13),
+ MAC_50000FD = BIT(14),
+ MAC_56000FD = BIT(15),
+ MAC_100000FD = BIT(16),
+ MAC_200000FD = BIT(17),
+ MAC_400000FD = BIT(18),
+};
+
+static inline bool phylink_autoneg_inband(unsigned int mode)
+{
+ return mode == MLO_AN_INBAND;
+}
+
+/**
+ * struct phylink_link_state - link state structure
+ * @advertising: ethtool bitmask containing advertised link modes
+ * @lp_advertising: ethtool bitmask containing link partner advertised link
+ * modes
+ * @interface: link &typedef phy_interface_t mode
+ * @speed: link speed, one of the SPEED_* constants.
+ * @duplex: link duplex mode, one of DUPLEX_* constants.
+ * @pause: link pause state, described by MLO_PAUSE_* constants.
+ * @rate_matching: rate matching being performed, one of the RATE_MATCH_*
+ * constants. If rate matching is taking place, then the speed/duplex of
+ * the medium link mode (@speed and @duplex) and the speed/duplex of the phy
+ * interface mode (@interface) are different.
+ * @link: true if the link is up.
+ * @an_complete: true if autonegotiation has completed.
+ */
+struct phylink_link_state {
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(advertising);
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(lp_advertising);
+ phy_interface_t interface;
+ int speed;
+ int duplex;
+ int pause;
+ int rate_matching;
+ unsigned int link:1;
+ unsigned int an_complete:1;
+};
+
+enum phylink_op_type {
+ PHYLINK_NETDEV = 0,
+ PHYLINK_DEV,
+};
+
+/**
+ * struct phylink_config - PHYLINK configuration structure
+ * @dev: a pointer to a struct device associated with the MAC
+ * @type: operation type of PHYLINK instance
+ * @legacy_pre_march2020: driver has not been updated for March 2020 updates
+ * (See commit 7cceb599d15d ("net: phylink: avoid mac_config calls")
+ * @poll_fixed_state: if true, starts link_poll,
+ * if MAC link is at %MLO_AN_FIXED mode.
+ * @mac_managed_pm: if true, indicate the MAC driver is responsible for PHY PM.
+ * @ovr_an_inband: if true, override PCS to MLO_AN_INBAND
+ * @get_fixed_state: callback to execute to determine the fixed link state,
+ * if MAC link is at %MLO_AN_FIXED mode.
+ * @supported_interfaces: bitmap describing which PHY_INTERFACE_MODE_xxx
+ * are supported by the MAC/PCS.
+ * @mac_capabilities: MAC pause/speed/duplex capabilities.
+ */
+struct phylink_config {
+ struct device *dev;
+ enum phylink_op_type type;
+ bool legacy_pre_march2020;
+ bool poll_fixed_state;
+ bool mac_managed_pm;
+ bool ovr_an_inband;
+ void (*get_fixed_state)(struct phylink_config *config,
+ struct phylink_link_state *state);
+ DECLARE_PHY_INTERFACE_MASK(supported_interfaces);
+ unsigned long mac_capabilities;
+};
+
+/**
+ * struct phylink_mac_ops - MAC operations structure.
+ * @validate: Validate and update the link configuration.
+ * @mac_select_pcs: Select a PCS for the interface mode.
+ * @mac_pcs_get_state: Read the current link state from the hardware.
+ * @mac_prepare: prepare for a major reconfiguration of the interface.
+ * @mac_config: configure the MAC for the selected mode and state.
+ * @mac_finish: finish a major reconfiguration of the interface.
+ * @mac_an_restart: restart 802.3z BaseX autonegotiation.
+ * @mac_link_down: take the link down.
+ * @mac_link_up: allow the link to come up.
+ *
+ * The individual methods are described more fully below.
+ */
+struct phylink_mac_ops {
+ void (*validate)(struct phylink_config *config,
+ unsigned long *supported,
+ struct phylink_link_state *state);
+ struct phylink_pcs *(*mac_select_pcs)(struct phylink_config *config,
+ phy_interface_t interface);
+ void (*mac_pcs_get_state)(struct phylink_config *config,
+ struct phylink_link_state *state);
+ int (*mac_prepare)(struct phylink_config *config, unsigned int mode,
+ phy_interface_t iface);
+ void (*mac_config)(struct phylink_config *config, unsigned int mode,
+ const struct phylink_link_state *state);
+ int (*mac_finish)(struct phylink_config *config, unsigned int mode,
+ phy_interface_t iface);
+ void (*mac_an_restart)(struct phylink_config *config);
+ void (*mac_link_down)(struct phylink_config *config, unsigned int mode,
+ phy_interface_t interface);
+ void (*mac_link_up)(struct phylink_config *config,
+ struct phy_device *phy, unsigned int mode,
+ phy_interface_t interface, int speed, int duplex,
+ bool tx_pause, bool rx_pause);
+};
+
+#if 0 /* For kernel-doc purposes only. */
+/**
+ * validate - Validate and update the link configuration
+ * @config: a pointer to a &struct phylink_config.
+ * @supported: ethtool bitmask for supported link modes.
+ * @state: a pointer to a &struct phylink_link_state.
+ *
+ * Clear bits in the @supported and @state->advertising masks that
+ * are not supportable by the MAC.
+ *
+ * Note that the PHY may be able to transform from one connection
+ * technology to another, so, eg, don't clear 1000BaseX just
+ * because the MAC is unable to BaseX mode. This is more about
+ * clearing unsupported speeds and duplex settings. The port modes
+ * should not be cleared; phylink_set_port_modes() will help with this.
+ *
+ * When @config->supported_interfaces has been set, phylink will iterate
+ * over the supported interfaces to determine the full capability of the
+ * MAC. The validation function must not print errors if @state->interface
+ * is set to an unexpected value.
+ *
+ * When @config->supported_interfaces is empty, phylink will call this
+ * function with @state->interface set to %PHY_INTERFACE_MODE_NA, and
+ * expects the MAC driver to return all supported link modes.
+ *
+ * If the @state->interface mode is not supported, then the @supported
+ * mask must be cleared.
+ *
+ * This member is optional; if not set, the generic validator will be
+ * used making use of @config->mac_capabilities and
+ * @config->supported_interfaces to determine which link modes are
+ * supported.
+ */
+void validate(struct phylink_config *config, unsigned long *supported,
+ struct phylink_link_state *state);
+/**
+ * mac_select_pcs: Select a PCS for the interface mode.
+ * @config: a pointer to a &struct phylink_config.
+ * @interface: PHY interface mode for PCS
+ *
+ * Return the &struct phylink_pcs for the specified interface mode, or
+ * NULL if none is required, or an error pointer on error.
+ *
+ * This must not modify any state. It is used to query which PCS should
+ * be used. Phylink will use this during validation to ensure that the
+ * configuration is valid, and when setting a configuration to internally
+ * set the PCS that will be used.
+ */
+struct phylink_pcs *mac_select_pcs(struct phylink_config *config,
+ phy_interface_t interface);
+
+/**
+ * mac_pcs_get_state() - Read the current inband link state from the hardware
+ * @config: a pointer to a &struct phylink_config.
+ * @state: a pointer to a &struct phylink_link_state.
+ *
+ * Read the current inband link state from the MAC PCS, reporting the
+ * current speed in @state->speed, duplex mode in @state->duplex, pause
+ * mode in @state->pause using the %MLO_PAUSE_RX and %MLO_PAUSE_TX bits,
+ * negotiation completion state in @state->an_complete, and link up state
+ * in @state->link. If possible, @state->lp_advertising should also be
+ * populated.
+ *
+ * Note: This is a legacy method. This function will not be called unless
+ * legacy_pre_march2020 is set in &struct phylink_config and there is no
+ * PCS attached.
+ */
+void mac_pcs_get_state(struct phylink_config *config,
+ struct phylink_link_state *state);
+
+/**
+ * mac_prepare() - prepare to change the PHY interface mode
+ * @config: a pointer to a &struct phylink_config.
+ * @mode: one of %MLO_AN_FIXED, %MLO_AN_PHY, %MLO_AN_INBAND.
+ * @iface: interface mode to switch to
+ *
+ * phylink will call this method at the beginning of a full initialisation
+ * of the link, which includes changing the interface mode or at initial
+ * startup time. It may be called for the current mode. The MAC driver
+ * should perform whatever actions are required, e.g. disabling the
+ * Serdes PHY.
+ *
+ * This will be the first call in the sequence:
+ * - mac_prepare()
+ * - mac_config()
+ * - pcs_config()
+ * - possible pcs_an_restart()
+ * - mac_finish()
+ *
+ * Returns zero on success, or negative errno on failure which will be
+ * reported to the kernel log.
+ */
+int mac_prepare(struct phylink_config *config, unsigned int mode,
+ phy_interface_t iface);
+
+/**
+ * mac_config() - configure the MAC for the selected mode and state
+ * @config: a pointer to a &struct phylink_config.
+ * @mode: one of %MLO_AN_FIXED, %MLO_AN_PHY, %MLO_AN_INBAND.
+ * @state: a pointer to a &struct phylink_link_state.
+ *
+ * Note - not all members of @state are valid. In particular,
+ * @state->lp_advertising, @state->link, @state->an_complete are never
+ * guaranteed to be correct, and so any mac_config() implementation must
+ * never reference these fields.
+ *
+ * Note: For legacy March 2020 drivers (drivers with legacy_pre_march2020 set
+ * in their &phylnk_config and which don't have a PCS), this function will be
+ * called on each link up event, and to also change the in-band advert. For
+ * non-legacy drivers, it will only be called to reconfigure the MAC for a
+ * "major" change in e.g. interface mode. It will not be called for changes
+ * in speed, duplex or pause modes or to change the in-band advertisement.
+ * In any case, it is strongly preferred that speed, duplex and pause settings
+ * are handled in the mac_link_up() method and not in this method.
+ *
+ * (this requires a rewrite - please refer to mac_link_up() for situations
+ * where the PCS and MAC are not tightly integrated.)
+ *
+ * In all negotiation modes, as defined by @mode, @state->pause indicates the
+ * pause settings which should be applied as follows. If %MLO_PAUSE_AN is not
+ * set, %MLO_PAUSE_TX and %MLO_PAUSE_RX indicate whether the MAC should send
+ * pause frames and/or act on received pause frames respectively. Otherwise,
+ * the results of in-band negotiation/status from the MAC PCS should be used
+ * to control the MAC pause mode settings.
+ *
+ * The action performed depends on the currently selected mode:
+ *
+ * %MLO_AN_FIXED, %MLO_AN_PHY:
+ * Configure for non-inband negotiation mode, where the link settings
+ * are completely communicated via mac_link_up(). The physical link
+ * protocol from the MAC is specified by @state->interface.
+ *
+ * @state->advertising may be used, but is not required.
+ *
+ * Older drivers (prior to the mac_link_up() change) may use @state->speed,
+ * @state->duplex and @state->pause to configure the MAC, but this is
+ * deprecated; such drivers should be converted to use mac_link_up().
+ *
+ * Other members of @state must be ignored.
+ *
+ * Valid state members: interface, advertising.
+ * Deprecated state members: speed, duplex, pause.
+ *
+ * %MLO_AN_INBAND:
+ * place the link in an inband negotiation mode (such as 802.3z
+ * 1000base-X or Cisco SGMII mode depending on the @state->interface
+ * mode). In both cases, link state management (whether the link
+ * is up or not) is performed by the MAC, and reported via the
+ * mac_pcs_get_state() callback. Changes in link state must be made
+ * by calling phylink_mac_change().
+ *
+ * Interface mode specific details are mentioned below.
+ *
+ * If in 802.3z mode, the link speed is fixed, dependent on the
+ * @state->interface. Duplex and pause modes are negotiated via
+ * the in-band configuration word. Advertised pause modes are set
+ * according to the @state->an_enabled and @state->advertising
+ * flags. Beware of MACs which only support full duplex at gigabit
+ * and higher speeds.
+ *
+ * If in Cisco SGMII mode, the link speed and duplex mode are passed
+ * in the serial bitstream 16-bit configuration word, and the MAC
+ * should be configured to read these bits and acknowledge the
+ * configuration word. Nothing is advertised by the MAC. The MAC is
+ * responsible for reading the configuration word and configuring
+ * itself accordingly.
+ *
+ * Valid state members: interface, an_enabled, pause, advertising.
+ *
+ * Implementations are expected to update the MAC to reflect the
+ * requested settings - i.o.w., if nothing has changed between two
+ * calls, no action is expected. If only flow control settings have
+ * changed, flow control should be updated *without* taking the link
+ * down. This "update" behaviour is critical to avoid bouncing the
+ * link up status.
+ */
+void mac_config(struct phylink_config *config, unsigned int mode,
+ const struct phylink_link_state *state);
+
+/**
+ * mac_finish() - finish a to change the PHY interface mode
+ * @config: a pointer to a &struct phylink_config.
+ * @mode: one of %MLO_AN_FIXED, %MLO_AN_PHY, %MLO_AN_INBAND.
+ * @iface: interface mode to switch to
+ *
+ * phylink will call this if it called mac_prepare() to allow the MAC to
+ * complete any necessary steps after the MAC and PCS have been configured
+ * for the @mode and @iface. E.g. a MAC driver may wish to re-enable the
+ * Serdes PHY here if it was previously disabled by mac_prepare().
+ *
+ * Returns zero on success, or negative errno on failure which will be
+ * reported to the kernel log.
+ */
+int mac_finish(struct phylink_config *config, unsigned int mode,
+ phy_interface_t iface);
+
+/**
+ * mac_an_restart() - restart 802.3z BaseX autonegotiation
+ * @config: a pointer to a &struct phylink_config.
+ *
+ * Note: This is a legacy method. This function will not be called unless
+ * legacy_pre_march2020 is set in &struct phylink_config and there is no
+ * PCS attached.
+ */
+void mac_an_restart(struct phylink_config *config);
+
+/**
+ * mac_link_down() - take the link down
+ * @config: a pointer to a &struct phylink_config.
+ * @mode: link autonegotiation mode
+ * @interface: link &typedef phy_interface_t mode
+ *
+ * If @mode is not an in-band negotiation mode (as defined by
+ * phylink_autoneg_inband()), force the link down and disable any
+ * Energy Efficient Ethernet MAC configuration. Interface type
+ * selection must be done in mac_config().
+ */
+void mac_link_down(struct phylink_config *config, unsigned int mode,
+ phy_interface_t interface);
+
+/**
+ * mac_link_up() - allow the link to come up
+ * @config: a pointer to a &struct phylink_config.
+ * @phy: any attached phy
+ * @mode: link autonegotiation mode
+ * @interface: link &typedef phy_interface_t mode
+ * @speed: link speed
+ * @duplex: link duplex
+ * @tx_pause: link transmit pause enablement status
+ * @rx_pause: link receive pause enablement status
+ *
+ * Configure the MAC for an established link.
+ *
+ * @speed, @duplex, @tx_pause and @rx_pause indicate the finalised link
+ * settings, and should be used to configure the MAC block appropriately
+ * where these settings are not automatically conveyed from the PCS block,
+ * or if in-band negotiation (as defined by phylink_autoneg_inband(@mode))
+ * is disabled.
+ *
+ * Note that when 802.3z in-band negotiation is in use, it is possible
+ * that the user wishes to override the pause settings, and this should
+ * be allowed when considering the implementation of this method.
+ *
+ * If in-band negotiation mode is disabled, allow the link to come up. If
+ * @phy is non-%NULL, configure Energy Efficient Ethernet by calling
+ * phy_init_eee() and perform appropriate MAC configuration for EEE.
+ * Interface type selection must be done in mac_config().
+ */
+void mac_link_up(struct phylink_config *config, struct phy_device *phy,
+ unsigned int mode, phy_interface_t interface,
+ int speed, int duplex, bool tx_pause, bool rx_pause);
+#endif
+
+struct phylink_pcs_ops;
+
+/**
+ * struct phylink_pcs - PHYLINK PCS instance
+ * @ops: a pointer to the &struct phylink_pcs_ops structure
+ * @poll: poll the PCS for link changes
+ *
+ * This structure is designed to be embedded within the PCS private data,
+ * and will be passed between phylink and the PCS.
+ */
+struct phylink_pcs {
+ const struct phylink_pcs_ops *ops;
+ bool poll;
+};
+
+/**
+ * struct phylink_pcs_ops - MAC PCS operations structure.
+ * @pcs_validate: validate the link configuration.
+ * @pcs_get_state: read the current MAC PCS link state from the hardware.
+ * @pcs_config: configure the MAC PCS for the selected mode and state.
+ * @pcs_an_restart: restart 802.3z BaseX autonegotiation.
+ * @pcs_link_up: program the PCS for the resolved link configuration
+ * (where necessary).
+ */
+struct phylink_pcs_ops {
+ int (*pcs_validate)(struct phylink_pcs *pcs, unsigned long *supported,
+ const struct phylink_link_state *state);
+ void (*pcs_get_state)(struct phylink_pcs *pcs,
+ struct phylink_link_state *state);
+ int (*pcs_config)(struct phylink_pcs *pcs, unsigned int mode,
+ phy_interface_t interface,
+ const unsigned long *advertising,
+ bool permit_pause_to_mac);
+ void (*pcs_an_restart)(struct phylink_pcs *pcs);
+ void (*pcs_link_up)(struct phylink_pcs *pcs, unsigned int mode,
+ phy_interface_t interface, int speed, int duplex);
+};
+
+#if 0 /* For kernel-doc purposes only. */
+/**
+ * pcs_validate() - validate the link configuration.
+ * @pcs: a pointer to a &struct phylink_pcs.
+ * @supported: ethtool bitmask for supported link modes.
+ * @state: a const pointer to a &struct phylink_link_state.
+ *
+ * Validate the interface mode, and advertising's autoneg bit, removing any
+ * media ethtool link modes that would not be supportable from the supported
+ * mask. Phylink will propagate the changes to the advertising mask. See the
+ * &struct phylink_mac_ops validate() method.
+ *
+ * Returns -EINVAL if the interface mode/autoneg mode is not supported.
+ * Returns non-zero positive if the link state can be supported.
+ */
+int pcs_validate(struct phylink_pcs *pcs, unsigned long *supported,
+ const struct phylink_link_state *state);
+
+/**
+ * pcs_get_state() - Read the current inband link state from the hardware
+ * @pcs: a pointer to a &struct phylink_pcs.
+ * @state: a pointer to a &struct phylink_link_state.
+ *
+ * Read the current inband link state from the MAC PCS, reporting the
+ * current speed in @state->speed, duplex mode in @state->duplex, pause
+ * mode in @state->pause using the %MLO_PAUSE_RX and %MLO_PAUSE_TX bits,
+ * negotiation completion state in @state->an_complete, and link up state
+ * in @state->link. If possible, @state->lp_advertising should also be
+ * populated.
+ *
+ * When present, this overrides mac_pcs_get_state() in &struct
+ * phylink_mac_ops.
+ */
+void pcs_get_state(struct phylink_pcs *pcs,
+ struct phylink_link_state *state);
+
+/**
+ * pcs_config() - Configure the PCS mode and advertisement
+ * @pcs: a pointer to a &struct phylink_pcs.
+ * @mode: one of %MLO_AN_FIXED, %MLO_AN_PHY, %MLO_AN_INBAND.
+ * @interface: interface mode to be used
+ * @advertising: adertisement ethtool link mode mask
+ * @permit_pause_to_mac: permit forwarding pause resolution to MAC
+ *
+ * Configure the PCS for the operating mode, the interface mode, and set
+ * the advertisement mask. @permit_pause_to_mac indicates whether the
+ * hardware may forward the pause mode resolution to the MAC.
+ *
+ * When operating in %MLO_AN_INBAND, inband should always be enabled,
+ * otherwise inband should be disabled.
+ *
+ * For SGMII, there is no advertisement from the MAC side, the PCS should
+ * be programmed to acknowledge the inband word from the PHY.
+ *
+ * For 1000BASE-X, the advertisement should be programmed into the PCS.
+ *
+ * For most 10GBASE-R, there is no advertisement.
+ */
+int pcs_config(struct phylink_pcs *pcs, unsigned int mode,
+ phy_interface_t interface, const unsigned long *advertising,
+ bool permit_pause_to_mac);
+
+/**
+ * pcs_an_restart() - restart 802.3z BaseX autonegotiation
+ * @pcs: a pointer to a &struct phylink_pcs.
+ *
+ * When PCS ops are present, this overrides mac_an_restart() in &struct
+ * phylink_mac_ops.
+ */
+void pcs_an_restart(struct phylink_pcs *pcs);
+
+/**
+ * pcs_link_up() - program the PCS for the resolved link configuration
+ * @pcs: a pointer to a &struct phylink_pcs.
+ * @mode: link autonegotiation mode
+ * @interface: link &typedef phy_interface_t mode
+ * @speed: link speed
+ * @duplex: link duplex
+ *
+ * This call will be made just before mac_link_up() to inform the PCS of
+ * the resolved link parameters. For example, a PCS operating in SGMII
+ * mode without in-band AN needs to be manually configured for the link
+ * and duplex setting. Otherwise, this should be a no-op.
+ */
+void pcs_link_up(struct phylink_pcs *pcs, unsigned int mode,
+ phy_interface_t interface, int speed, int duplex);
+#endif
+
+void phylink_caps_to_linkmodes(unsigned long *linkmodes, unsigned long caps);
+unsigned long phylink_get_capabilities(phy_interface_t interface,
+ unsigned long mac_capabilities,
+ int rate_matching);
+void phylink_validate_mask_caps(unsigned long *supported,
+ struct phylink_link_state *state,
+ unsigned long caps);
+void phylink_generic_validate(struct phylink_config *config,
+ unsigned long *supported,
+ struct phylink_link_state *state);
+
+struct phylink *phylink_create(struct phylink_config *, struct fwnode_handle *,
+ phy_interface_t iface,
+ const struct phylink_mac_ops *mac_ops);
+void phylink_destroy(struct phylink *);
+bool phylink_expects_phy(struct phylink *pl);
+
+int phylink_connect_phy(struct phylink *, struct phy_device *);
+int phylink_of_phy_connect(struct phylink *, struct device_node *, u32 flags);
+int phylink_fwnode_phy_connect(struct phylink *pl,
+ struct fwnode_handle *fwnode,
+ u32 flags);
+void phylink_disconnect_phy(struct phylink *);
+
+void phylink_mac_change(struct phylink *, bool up);
+
+void phylink_start(struct phylink *);
+void phylink_stop(struct phylink *);
+
+void phylink_suspend(struct phylink *pl, bool mac_wol);
+void phylink_resume(struct phylink *pl);
+
+void phylink_ethtool_get_wol(struct phylink *, struct ethtool_wolinfo *);
+int phylink_ethtool_set_wol(struct phylink *, struct ethtool_wolinfo *);
+
+int phylink_ethtool_ksettings_get(struct phylink *,
+ struct ethtool_link_ksettings *);
+int phylink_ethtool_ksettings_set(struct phylink *,
+ const struct ethtool_link_ksettings *);
+int phylink_ethtool_nway_reset(struct phylink *);
+void phylink_ethtool_get_pauseparam(struct phylink *,
+ struct ethtool_pauseparam *);
+int phylink_ethtool_set_pauseparam(struct phylink *,
+ struct ethtool_pauseparam *);
+int phylink_get_eee_err(struct phylink *);
+int phylink_init_eee(struct phylink *, bool);
+int phylink_ethtool_get_eee(struct phylink *, struct ethtool_eee *);
+int phylink_ethtool_set_eee(struct phylink *, struct ethtool_eee *);
+int phylink_mii_ioctl(struct phylink *, struct ifreq *, int);
+int phylink_speed_down(struct phylink *pl, bool sync);
+int phylink_speed_up(struct phylink *pl);
+
+#define phylink_zero(bm) \
+ bitmap_zero(bm, __ETHTOOL_LINK_MODE_MASK_NBITS)
+#define __phylink_do_bit(op, bm, mode) \
+ op(ETHTOOL_LINK_MODE_ ## mode ## _BIT, bm)
+
+#define phylink_set(bm, mode) __phylink_do_bit(__set_bit, bm, mode)
+#define phylink_clear(bm, mode) __phylink_do_bit(__clear_bit, bm, mode)
+#define phylink_test(bm, mode) __phylink_do_bit(test_bit, bm, mode)
+
+void phylink_set_port_modes(unsigned long *bits);
+
+/**
+ * phylink_get_link_timer_ns - return the PCS link timer value
+ * @interface: link &typedef phy_interface_t mode
+ *
+ * Return the PCS link timer setting in nanoseconds for the PHY @interface
+ * mode, or -EINVAL if not appropriate.
+ */
+static inline int phylink_get_link_timer_ns(phy_interface_t interface)
+{
+ switch (interface) {
+ case PHY_INTERFACE_MODE_SGMII:
+ case PHY_INTERFACE_MODE_QSGMII:
+ case PHY_INTERFACE_MODE_USXGMII:
+ return 1600000;
+
+ case PHY_INTERFACE_MODE_1000BASEX:
+ case PHY_INTERFACE_MODE_2500BASEX:
+ return 10000000;
+
+ default:
+ return -EINVAL;
+ }
+}
+
+void phylink_mii_c22_pcs_decode_state(struct phylink_link_state *state,
+ u16 bmsr, u16 lpa);
+void phylink_mii_c22_pcs_get_state(struct mdio_device *pcs,
+ struct phylink_link_state *state);
+int phylink_mii_c22_pcs_encode_advertisement(phy_interface_t interface,
+ const unsigned long *advertising);
+int phylink_mii_c22_pcs_config(struct mdio_device *pcs, unsigned int mode,
+ phy_interface_t interface,
+ const unsigned long *advertising);
+void phylink_mii_c22_pcs_an_restart(struct mdio_device *pcs);
+
+void phylink_mii_c45_pcs_get_state(struct mdio_device *pcs,
+ struct phylink_link_state *state);
+
+void phylink_decode_usxgmii_word(struct phylink_link_state *state,
+ uint16_t lpa);
+#endif
diff --git a/include/linux/pid.h b/include/linux/pid.h
index 4d179316e431..b75de288a8c2 100644
--- a/include/linux/pid.h
+++ b/include/linux/pid.h
@@ -1,14 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_PID_H
#define _LINUX_PID_H
#include <linux/rculist.h>
+#include <linux/wait.h>
+#include <linux/refcount.h>
enum pid_type
{
PIDTYPE_PID,
+ PIDTYPE_TGID,
PIDTYPE_PGID,
PIDTYPE_SID,
- PIDTYPE_MAX
+ PIDTYPE_MAX,
};
/*
@@ -48,39 +52,49 @@ enum pid_type
*/
struct upid {
- /* Try to keep pid_chain in the same cacheline as nr for find_vpid */
int nr;
struct pid_namespace *ns;
- struct hlist_node pid_chain;
};
struct pid
{
- atomic_t count;
+ refcount_t count;
unsigned int level;
+ spinlock_t lock;
/* lists of tasks that use this pid */
struct hlist_head tasks[PIDTYPE_MAX];
+ struct hlist_head inodes;
+ /* wait queue for pidfd notifications */
+ wait_queue_head_t wait_pidfd;
struct rcu_head rcu;
struct upid numbers[1];
};
extern struct pid init_struct_pid;
-struct pid_link
-{
- struct hlist_node node;
- struct pid *pid;
-};
+extern const struct file_operations pidfd_fops;
+
+struct file;
+
+extern struct pid *pidfd_pid(const struct file *file);
+struct pid *pidfd_get_pid(unsigned int fd, unsigned int *flags);
+struct task_struct *pidfd_get_task(int pidfd, unsigned int *flags);
+int pidfd_create(struct pid *pid, unsigned int flags);
+int pidfd_prepare(struct pid *pid, unsigned int flags, struct file **ret);
static inline struct pid *get_pid(struct pid *pid)
{
if (pid)
- atomic_inc(&pid->count);
+ refcount_inc(&pid->count);
return pid;
}
extern void put_pid(struct pid *pid);
extern struct task_struct *pid_task(struct pid *pid, enum pid_type);
+static inline bool pid_has_task(struct pid *pid, enum pid_type type)
+{
+ return !hlist_empty(&pid->tasks[type]);
+}
extern struct task_struct *get_pid_task(struct pid *pid, enum pid_type);
extern struct pid *get_task_pid(struct task_struct *task, enum pid_type type);
@@ -92,12 +106,16 @@ extern void attach_pid(struct task_struct *task, enum pid_type);
extern void detach_pid(struct task_struct *task, enum pid_type);
extern void change_pid(struct task_struct *task, enum pid_type,
struct pid *pid);
+extern void exchange_tids(struct task_struct *task, struct task_struct *old);
extern void transfer_pid(struct task_struct *old, struct task_struct *new,
enum pid_type);
struct pid_namespace;
extern struct pid_namespace init_pid_ns;
+extern int pid_max;
+extern int pid_max_min, pid_max_max;
+
/*
* look up a PID in the hash table. Must be called with the tasklist_lock
* or rcu_read_lock() held.
@@ -115,9 +133,9 @@ extern struct pid *find_vpid(int nr);
*/
extern struct pid *find_get_pid(int nr);
extern struct pid *find_ge_pid(int nr, struct pid_namespace *);
-int next_pidmap(struct pid_namespace *pid_ns, unsigned int last);
-extern struct pid *alloc_pid(struct pid_namespace *ns);
+extern struct pid *alloc_pid(struct pid_namespace *ns, pid_t *set_tid,
+ size_t set_tid_size);
extern void free_pid(struct pid *pid);
extern void disable_pid_allocation(struct pid_namespace *ns);
@@ -176,7 +194,7 @@ pid_t pid_vnr(struct pid *pid);
do { \
if ((pid) != NULL) \
hlist_for_each_entry_rcu((task), \
- &(pid)->tasks[type], pids[type].node) {
+ &(pid)->tasks[type], pid_links[type]) {
/*
* Both old and new leaders may be attached to
diff --git a/include/linux/pid_namespace.h b/include/linux/pid_namespace.h
index c2a989dee876..c758809d5bcf 100644
--- a/include/linux/pid_namespace.h
+++ b/include/linux/pid_namespace.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_PID_NS_H
#define _LINUX_PID_NS_H
@@ -7,62 +8,59 @@
#include <linux/workqueue.h>
#include <linux/threads.h>
#include <linux/nsproxy.h>
-#include <linux/kref.h>
#include <linux/ns_common.h>
+#include <linux/idr.h>
-struct pidmap {
- atomic_t nr_free;
- void *page;
-};
-
-#define BITS_PER_PAGE (PAGE_SIZE * 8)
-#define BITS_PER_PAGE_MASK (BITS_PER_PAGE-1)
-#define PIDMAP_ENTRIES ((PID_MAX_LIMIT+BITS_PER_PAGE-1)/BITS_PER_PAGE)
+/* MAX_PID_NS_LEVEL is needed for limiting size of 'struct pid' */
+#define MAX_PID_NS_LEVEL 32
struct fs_pin;
-enum { /* definitions for pid_namespace's hide_pid field */
- HIDEPID_OFF = 0,
- HIDEPID_NO_ACCESS = 1,
- HIDEPID_INVISIBLE = 2,
-};
+#if defined(CONFIG_SYSCTL) && defined(CONFIG_MEMFD_CREATE)
+/*
+ * sysctl for vm.memfd_noexec
+ * 0: memfd_create() without MFD_EXEC nor MFD_NOEXEC_SEAL
+ * acts like MFD_EXEC was set.
+ * 1: memfd_create() without MFD_EXEC nor MFD_NOEXEC_SEAL
+ * acts like MFD_NOEXEC_SEAL was set.
+ * 2: memfd_create() without MFD_NOEXEC_SEAL will be
+ * rejected.
+ */
+#define MEMFD_NOEXEC_SCOPE_EXEC 0
+#define MEMFD_NOEXEC_SCOPE_NOEXEC_SEAL 1
+#define MEMFD_NOEXEC_SCOPE_NOEXEC_ENFORCED 2
+#endif
struct pid_namespace {
- struct kref kref;
- struct pidmap pidmap[PIDMAP_ENTRIES];
+ struct idr idr;
struct rcu_head rcu;
- int last_pid;
- unsigned int nr_hashed;
+ unsigned int pid_allocated;
struct task_struct *child_reaper;
struct kmem_cache *pid_cachep;
unsigned int level;
struct pid_namespace *parent;
-#ifdef CONFIG_PROC_FS
- struct vfsmount *proc_mnt;
- struct dentry *proc_self;
- struct dentry *proc_thread_self;
-#endif
#ifdef CONFIG_BSD_PROCESS_ACCT
struct fs_pin *bacct;
#endif
struct user_namespace *user_ns;
struct ucounts *ucounts;
- struct work_struct proc_work;
- kgid_t pid_gid;
- int hide_pid;
int reboot; /* group exit code if this pidns was rebooted */
struct ns_common ns;
-};
+#if defined(CONFIG_SYSCTL) && defined(CONFIG_MEMFD_CREATE)
+ /* sysctl for vm.memfd_noexec */
+ int memfd_noexec_scope;
+#endif
+} __randomize_layout;
extern struct pid_namespace init_pid_ns;
-#define PIDNS_HASH_ADDING (1U << 31)
+#define PIDNS_ADDING (1U << 31)
#ifdef CONFIG_PID_NS
static inline struct pid_namespace *get_pid_ns(struct pid_namespace *ns)
{
if (ns != &init_pid_ns)
- kref_get(&ns->kref);
+ refcount_inc(&ns->ns.count);
return ns;
}
@@ -105,6 +103,11 @@ static inline int reboot_pid_ns(struct pid_namespace *pid_ns, int cmd)
extern struct pid_namespace *task_active_pid_ns(struct task_struct *tsk);
void pidhash_init(void);
-void pidmap_init(void);
+void pid_idr_init(void);
+
+static inline bool task_is_in_init_pid_ns(struct task_struct *tsk)
+{
+ return task_active_pid_ns(tsk) == &init_pid_ns;
+}
#endif /* _LINUX_PID_NS_H */
diff --git a/include/linux/pim.h b/include/linux/pim.h
index 0e81b2778ae0..290d4d2ed9b9 100644
--- a/include/linux/pim.h
+++ b/include/linux/pim.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __LINUX_PIM_H
#define __LINUX_PIM_H
diff --git a/include/linux/pinctrl/consumer.h b/include/linux/pinctrl/consumer.h
index a0f2aba72fa9..4729d54e8995 100644
--- a/include/linux/pinctrl/consumer.h
+++ b/include/linux/pinctrl/consumer.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Consumer interface the pin control subsystem
*
@@ -6,40 +7,40 @@
* Based on bits of regulator core, gpio core and clk core
*
* Author: Linus Walleij <linus.walleij@linaro.org>
- *
- * License terms: GNU General Public License (GPL) version 2
*/
#ifndef __LINUX_PINCTRL_CONSUMER_H
#define __LINUX_PINCTRL_CONSUMER_H
#include <linux/err.h>
-#include <linux/list.h>
-#include <linux/seq_file.h>
+#include <linux/types.h>
+
#include <linux/pinctrl/pinctrl-state.h>
+struct device;
+
/* This struct is private to the core and should be regarded as a cookie */
struct pinctrl;
struct pinctrl_state;
-struct device;
#ifdef CONFIG_PINCTRL
/* External interface to pin control */
-extern int pinctrl_request_gpio(unsigned gpio);
-extern void pinctrl_free_gpio(unsigned gpio);
+extern bool pinctrl_gpio_can_use_line(unsigned gpio);
+extern int pinctrl_gpio_request(unsigned gpio);
+extern void pinctrl_gpio_free(unsigned gpio);
extern int pinctrl_gpio_direction_input(unsigned gpio);
extern int pinctrl_gpio_direction_output(unsigned gpio);
extern int pinctrl_gpio_set_config(unsigned gpio, unsigned long config);
extern struct pinctrl * __must_check pinctrl_get(struct device *dev);
extern void pinctrl_put(struct pinctrl *p);
-extern struct pinctrl_state * __must_check pinctrl_lookup_state(
- struct pinctrl *p,
- const char *name);
+extern struct pinctrl_state * __must_check pinctrl_lookup_state(struct pinctrl *p,
+ const char *name);
extern int pinctrl_select_state(struct pinctrl *p, struct pinctrl_state *s);
extern struct pinctrl * __must_check devm_pinctrl_get(struct device *dev);
extern void devm_pinctrl_put(struct pinctrl *p);
+extern int pinctrl_select_default_state(struct device *dev);
#ifdef CONFIG_PM
extern int pinctrl_pm_select_default_state(struct device *dev);
@@ -62,12 +63,17 @@ static inline int pinctrl_pm_select_idle_state(struct device *dev)
#else /* !CONFIG_PINCTRL */
-static inline int pinctrl_request_gpio(unsigned gpio)
+static inline bool pinctrl_gpio_can_use_line(unsigned gpio)
+{
+ return true;
+}
+
+static inline int pinctrl_gpio_request(unsigned gpio)
{
return 0;
}
-static inline void pinctrl_free_gpio(unsigned gpio)
+static inline void pinctrl_gpio_free(unsigned gpio)
{
}
@@ -95,9 +101,8 @@ static inline void pinctrl_put(struct pinctrl *p)
{
}
-static inline struct pinctrl_state * __must_check pinctrl_lookup_state(
- struct pinctrl *p,
- const char *name)
+static inline struct pinctrl_state * __must_check pinctrl_lookup_state(struct pinctrl *p,
+ const char *name)
{
return NULL;
}
@@ -117,6 +122,11 @@ static inline void devm_pinctrl_put(struct pinctrl *p)
{
}
+static inline int pinctrl_select_default_state(struct device *dev)
+{
+ return 0;
+}
+
static inline int pinctrl_pm_select_default_state(struct device *dev)
{
return 0;
@@ -134,8 +144,8 @@ static inline int pinctrl_pm_select_idle_state(struct device *dev)
#endif /* CONFIG_PINCTRL */
-static inline struct pinctrl * __must_check pinctrl_get_select(
- struct device *dev, const char *name)
+static inline struct pinctrl * __must_check pinctrl_get_select(struct device *dev,
+ const char *name)
{
struct pinctrl *p;
struct pinctrl_state *s;
@@ -160,14 +170,13 @@ static inline struct pinctrl * __must_check pinctrl_get_select(
return p;
}
-static inline struct pinctrl * __must_check pinctrl_get_select_default(
- struct device *dev)
+static inline struct pinctrl * __must_check pinctrl_get_select_default(struct device *dev)
{
return pinctrl_get_select(dev, PINCTRL_STATE_DEFAULT);
}
-static inline struct pinctrl * __must_check devm_pinctrl_get_select(
- struct device *dev, const char *name)
+static inline struct pinctrl * __must_check devm_pinctrl_get_select(struct device *dev,
+ const char *name)
{
struct pinctrl *p;
struct pinctrl_state *s;
@@ -192,8 +201,7 @@ static inline struct pinctrl * __must_check devm_pinctrl_get_select(
return p;
}
-static inline struct pinctrl * __must_check devm_pinctrl_get_select_default(
- struct device *dev)
+static inline struct pinctrl * __must_check devm_pinctrl_get_select_default(struct device *dev)
{
return devm_pinctrl_get_select(dev, PINCTRL_STATE_DEFAULT);
}
diff --git a/include/linux/pinctrl/devinfo.h b/include/linux/pinctrl/devinfo.h
index 05082e407c4a..bb6653af4f92 100644
--- a/include/linux/pinctrl/devinfo.h
+++ b/include/linux/pinctrl/devinfo.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Per-device information from the pin control system.
* This is the stuff that get included into the device
@@ -8,18 +9,22 @@
* This interface is used in the core to keep track of pins.
*
* Author: Linus Walleij <linus.walleij@linaro.org>
- *
- * License terms: GNU General Public License (GPL) version 2
*/
#ifndef PINCTRL_DEVINFO_H
#define PINCTRL_DEVINFO_H
+struct device;
+
#ifdef CONFIG_PINCTRL
+#include <linux/device.h>
+
/* The device core acts as a consumer toward pinctrl */
#include <linux/pinctrl/consumer.h>
+struct pinctrl;
+
/**
* struct dev_pin_info - pin state container for devices
* @p: pinctrl handle for the containing device
@@ -41,6 +46,14 @@ struct dev_pin_info {
extern int pinctrl_bind_pins(struct device *dev);
extern int pinctrl_init_done(struct device *dev);
+static inline struct pinctrl *dev_pinctrl(struct device *dev)
+{
+ if (!dev->pins)
+ return NULL;
+
+ return dev->pins->p;
+}
+
#else
/* Stubs if we're not using pinctrl */
@@ -55,5 +68,10 @@ static inline int pinctrl_init_done(struct device *dev)
return 0;
}
+static inline struct pinctrl *dev_pinctrl(struct device *dev)
+{
+ return NULL;
+}
+
#endif /* CONFIG_PINCTRL */
#endif /* PINCTRL_DEVINFO_H */
diff --git a/include/linux/pinctrl/machine.h b/include/linux/pinctrl/machine.h
index e5b1716f98cc..0639b36f43c5 100644
--- a/include/linux/pinctrl/machine.h
+++ b/include/linux/pinctrl/machine.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Machine interface for the pinctrl subsystem.
*
@@ -6,13 +7,11 @@
* Based on bits of regulator core, gpio core and clk core
*
* Author: Linus Walleij <linus.walleij@linaro.org>
- *
- * License terms: GNU General Public License (GPL) version 2
*/
#ifndef __LINUX_PINCTRL_MACHINE_H
#define __LINUX_PINCTRL_MACHINE_H
-#include <linux/bug.h>
+#include <linux/kernel.h> /* ARRAY_SIZE() */
#include <linux/pinctrl/pinctrl-state.h>
@@ -150,19 +149,26 @@ struct pinctrl_map {
#define PIN_MAP_CONFIGS_GROUP_HOG_DEFAULT(dev, grp, cfgs) \
PIN_MAP_CONFIGS_GROUP(dev, PINCTRL_STATE_DEFAULT, dev, grp, cfgs)
+struct pinctrl_map;
+
#ifdef CONFIG_PINCTRL
-extern int pinctrl_register_mappings(struct pinctrl_map const *map,
- unsigned num_maps);
+extern int pinctrl_register_mappings(const struct pinctrl_map *map,
+ unsigned num_maps);
+extern void pinctrl_unregister_mappings(const struct pinctrl_map *map);
extern void pinctrl_provide_dummies(void);
#else
-static inline int pinctrl_register_mappings(struct pinctrl_map const *map,
- unsigned num_maps)
+static inline int pinctrl_register_mappings(const struct pinctrl_map *map,
+ unsigned num_maps)
{
return 0;
}
+static inline void pinctrl_unregister_mappings(const struct pinctrl_map *map)
+{
+}
+
static inline void pinctrl_provide_dummies(void)
{
}
diff --git a/include/linux/pinctrl/pinconf-generic.h b/include/linux/pinctrl/pinconf-generic.h
index 231d3075815a..d74b7a4ea154 100644
--- a/include/linux/pinctrl/pinconf-generic.h
+++ b/include/linux/pinctrl/pinconf-generic.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Interface the generic pinconfig portions of the pinctrl subsystem
*
@@ -6,12 +7,19 @@
* This interface is used in the core to keep track of pins.
*
* Author: Linus Walleij <linus.walleij@linaro.org>
- *
- * License terms: GNU General Public License (GPL) version 2
*/
#ifndef __LINUX_PINCTRL_PINCONF_GENERIC_H
#define __LINUX_PINCTRL_PINCONF_GENERIC_H
+#include <linux/types.h>
+
+#include <linux/pinctrl/machine.h>
+
+struct device_node;
+
+struct pinctrl_dev;
+struct pinctrl_map;
+
/**
* enum pin_config_param - possible pin configuration parameters
* @PIN_CONFIG_BIAS_BUS_HOLD: the pin will be set to weakly latch so that it
@@ -30,7 +38,8 @@
* impedance.
* @PIN_CONFIG_BIAS_PULL_DOWN: the pin will be pulled down (usually with high
* impedance to GROUND). If the argument is != 0 pull-down is enabled,
- * if it is 0, pull-down is total, i.e. the pin is connected to GROUND.
+ * the value is interpreted by the driver and can be custom or an SI unit
+ * such as Ohms.
* @PIN_CONFIG_BIAS_PULL_PIN_DEFAULT: the pin will be pulled up or down based
* on embedded knowledge of the controller hardware, like current mux
* function. The pull direction and possibly strength too will normally
@@ -41,7 +50,8 @@
* @PIN_CONFIG_BIAS_DISABLE.
* @PIN_CONFIG_BIAS_PULL_UP: the pin will be pulled up (usually with high
* impedance to VDD). If the argument is != 0 pull-up is enabled,
- * if it is 0, pull-up is total, i.e. the pin is connected to VDD.
+ * the value is interpreted by the driver and can be custom or an SI unit
+ * such as Ohms.
* @PIN_CONFIG_DRIVE_OPEN_DRAIN: the pin will be driven with open drain (open
* collector) which means it is usually wired with other output ports
* which are then pulled up with an external resistor. Setting this
@@ -55,6 +65,8 @@
* push-pull mode, the argument is ignored.
* @PIN_CONFIG_DRIVE_STRENGTH: the pin will sink or source at most the current
* passed as argument. The argument is in mA.
+ * @PIN_CONFIG_DRIVE_STRENGTH_UA: the pin will sink or source at most the current
+ * passed as argument. The argument is in uA.
* @PIN_CONFIG_INPUT_DEBOUNCE: this will configure the pin to debounce mode,
* which means it will wait for signals to settle when reading inputs. The
* argument gives the debounce time in usecs. Setting the
@@ -69,23 +81,32 @@
* @PIN_CONFIG_INPUT_SCHMITT_ENABLE: control schmitt-trigger mode on the pin.
* If the argument != 0, schmitt-trigger mode is enabled. If it's 0,
* schmitt-trigger mode is disabled.
- * @PIN_CONFIG_LOW_POWER_MODE: this will configure the pin for low power
+ * @PIN_CONFIG_MODE_LOW_POWER: this will configure the pin for low power
* operation, if several modes of operation are supported these can be
* passed in the argument on a custom form, else just use argument 1
* to indicate low power mode, argument 0 turns low power mode off.
+ * @PIN_CONFIG_MODE_PWM: this will configure the pin for PWM
+ * @PIN_CONFIG_OUTPUT: this will configure the pin as an output and drive a
+ * value on the line. Use argument 1 to indicate high level, argument 0 to
+ * indicate low level. (Please see Documentation/driver-api/pin-control.rst,
+ * section "GPIO mode pitfalls" for a discussion around this parameter.)
* @PIN_CONFIG_OUTPUT_ENABLE: this will enable the pin's output mode
* without driving a value there. For most platforms this reduces to
* enable the output buffers and then let the pin controller current
* configuration (eg. the currently selected mux function) drive values on
* the line. Use argument 1 to enable output mode, argument 0 to disable
* it.
- * @PIN_CONFIG_OUTPUT: this will configure the pin as an output and drive a
- * value on the line. Use argument 1 to indicate high level, argument 0 to
- * indicate low level. (Please see Documentation/pinctrl.txt, section
- * "GPIO mode pitfalls" for a discussion around this parameter.)
+ * @PIN_CONFIG_OUTPUT_IMPEDANCE_OHMS: this will configure the output impedance
+ * of the pin with the value passed as argument. The argument is in ohms.
+ * @PIN_CONFIG_PERSIST_STATE: retain pin state across sleep or controller reset
* @PIN_CONFIG_POWER_SOURCE: if the pin can select between different power
* supplies, the argument to this parameter (on a custom format) tells
* the driver which alternative power source to use.
+ * @PIN_CONFIG_SKEW_DELAY: if the pin has programmable skew rate (on inputs)
+ * or latch delay (on outputs) this parameter (in a custom format)
+ * specifies the clock skew or latch delay. It typically controls how
+ * many double inverters are put in front of the line.
+ * @PIN_CONFIG_SLEEP_HARDWARE_STATE: indicate this is sleep related state.
* @PIN_CONFIG_SLEW_RATE: if the pin can select slew rate, the argument to
* this parameter (on a custom format) tells the driver which alternative
* slew rate to use.
@@ -106,14 +127,20 @@ enum pin_config_param {
PIN_CONFIG_DRIVE_OPEN_SOURCE,
PIN_CONFIG_DRIVE_PUSH_PULL,
PIN_CONFIG_DRIVE_STRENGTH,
+ PIN_CONFIG_DRIVE_STRENGTH_UA,
PIN_CONFIG_INPUT_DEBOUNCE,
PIN_CONFIG_INPUT_ENABLE,
PIN_CONFIG_INPUT_SCHMITT,
PIN_CONFIG_INPUT_SCHMITT_ENABLE,
- PIN_CONFIG_LOW_POWER_MODE,
- PIN_CONFIG_OUTPUT_ENABLE,
+ PIN_CONFIG_MODE_LOW_POWER,
+ PIN_CONFIG_MODE_PWM,
PIN_CONFIG_OUTPUT,
+ PIN_CONFIG_OUTPUT_ENABLE,
+ PIN_CONFIG_OUTPUT_IMPEDANCE_OHMS,
+ PIN_CONFIG_PERSIST_STATE,
PIN_CONFIG_POWER_SOURCE,
+ PIN_CONFIG_SKEW_DELAY,
+ PIN_CONFIG_SLEEP_HARDWARE_STATE,
PIN_CONFIG_SLEW_RATE,
PIN_CONFIG_END = 0x7F,
PIN_CONFIG_MAX = 0xFF,
@@ -147,9 +174,6 @@ static inline unsigned long pinconf_to_config_packed(enum pin_config_param param
return PIN_CONF_PACKED(param, argument);
}
-#ifdef CONFIG_GENERIC_PINCONF
-
-#ifdef CONFIG_DEBUG_FS
#define PCONFDUMP(a, b, c, d) { \
.param = a, .display = b, .format = c, .has_arg = d \
}
@@ -160,14 +184,6 @@ struct pin_config_item {
const char * const format;
bool has_arg;
};
-#endif /* CONFIG_DEBUG_FS */
-
-#ifdef CONFIG_OF
-
-#include <linux/device.h>
-#include <linux/pinctrl/machine.h>
-struct pinctrl_dev;
-struct pinctrl_map;
struct pinconf_generic_params {
const char * const property;
@@ -185,25 +201,25 @@ int pinconf_generic_dt_node_to_map(struct pinctrl_dev *pctldev,
void pinconf_generic_dt_free_map(struct pinctrl_dev *pctldev,
struct pinctrl_map *map, unsigned num_maps);
-static inline int pinconf_generic_dt_node_to_map_group(
- struct pinctrl_dev *pctldev, struct device_node *np_config,
- struct pinctrl_map **map, unsigned *num_maps)
+static inline int pinconf_generic_dt_node_to_map_group(struct pinctrl_dev *pctldev,
+ struct device_node *np_config, struct pinctrl_map **map,
+ unsigned *num_maps)
{
return pinconf_generic_dt_node_to_map(pctldev, np_config, map, num_maps,
PIN_MAP_TYPE_CONFIGS_GROUP);
}
-static inline int pinconf_generic_dt_node_to_map_pin(
- struct pinctrl_dev *pctldev, struct device_node *np_config,
- struct pinctrl_map **map, unsigned *num_maps)
+static inline int pinconf_generic_dt_node_to_map_pin(struct pinctrl_dev *pctldev,
+ struct device_node *np_config, struct pinctrl_map **map,
+ unsigned *num_maps)
{
return pinconf_generic_dt_node_to_map(pctldev, np_config, map, num_maps,
PIN_MAP_TYPE_CONFIGS_PIN);
}
-static inline int pinconf_generic_dt_node_to_map_all(
- struct pinctrl_dev *pctldev, struct device_node *np_config,
- struct pinctrl_map **map, unsigned *num_maps)
+static inline int pinconf_generic_dt_node_to_map_all(struct pinctrl_dev *pctldev,
+ struct device_node *np_config, struct pinctrl_map **map,
+ unsigned *num_maps)
{
/*
* passing the type as PIN_MAP_TYPE_INVALID causes the underlying parser
@@ -212,8 +228,5 @@ static inline int pinconf_generic_dt_node_to_map_all(
return pinconf_generic_dt_node_to_map(pctldev, np_config, map, num_maps,
PIN_MAP_TYPE_INVALID);
}
-#endif
-
-#endif /* CONFIG_GENERIC_PINCONF */
#endif /* __LINUX_PINCTRL_PINCONF_GENERIC_H */
diff --git a/include/linux/pinctrl/pinconf.h b/include/linux/pinctrl/pinconf.h
index 09eb80f2574a..f8a8215e9021 100644
--- a/include/linux/pinctrl/pinconf.h
+++ b/include/linux/pinctrl/pinconf.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Interface the pinconfig portions of the pinctrl subsystem
*
@@ -6,15 +7,11 @@
* This interface is used in the core to keep track of pins.
*
* Author: Linus Walleij <linus.walleij@linaro.org>
- *
- * License terms: GNU General Public License (GPL) version 2
*/
#ifndef __LINUX_PINCTRL_PINCONF_H
#define __LINUX_PINCTRL_PINCONF_H
-#ifdef CONFIG_PINCONF
-
-#include <linux/pinctrl/machine.h>
+#include <linux/types.h>
struct pinctrl_dev;
struct seq_file;
@@ -28,9 +25,9 @@ struct seq_file;
* is not available on this controller this should return -ENOTSUPP
* and if it is available but disabled it should return -EINVAL
* @pin_config_set: configure an individual pin
- * @pin_config_group_get: get configurations for an entire pin group
+ * @pin_config_group_get: get configurations for an entire pin group; should
+ * return -ENOTSUPP and -EINVAL using the same rules as pin_config_get.
* @pin_config_group_set: configure all pins in a group
- * @pin_config_dbg_parse_modify: optional debugfs to modify a pin configuration
* @pin_config_dbg_show: optional debugfs display hook that will provide
* per-device info for a certain pin in debugfs
* @pin_config_group_dbg_show: optional debugfs display hook that will provide
@@ -56,9 +53,6 @@ struct pinconf_ops {
unsigned selector,
unsigned long *configs,
unsigned num_configs);
- int (*pin_config_dbg_parse_modify) (struct pinctrl_dev *pctldev,
- const char *arg,
- unsigned long *config);
void (*pin_config_dbg_show) (struct pinctrl_dev *pctldev,
struct seq_file *s,
unsigned offset);
@@ -70,6 +64,4 @@ struct pinconf_ops {
unsigned long config);
};
-#endif
-
#endif /* __LINUX_PINCTRL_PINCONF_H */
diff --git a/include/linux/pinctrl/pinctrl-state.h b/include/linux/pinctrl/pinctrl-state.h
index 23073519339f..635d97e9285e 100644
--- a/include/linux/pinctrl/pinctrl-state.h
+++ b/include/linux/pinctrl/pinctrl-state.h
@@ -1,7 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Standard pin control state definitions
*/
+#ifndef __LINUX_PINCTRL_PINCTRL_STATE_H
+#define __LINUX_PINCTRL_PINCTRL_STATE_H
+
/**
* @PINCTRL_STATE_DEFAULT: the state the pinctrl handle shall be put
* into as default, usually this means the pins are up and ready to
@@ -30,3 +34,5 @@
#define PINCTRL_STATE_INIT "init"
#define PINCTRL_STATE_IDLE "idle"
#define PINCTRL_STATE_SLEEP "sleep"
+
+#endif /* __LINUX_PINCTRL_PINCTRL_STATE_H */
diff --git a/include/linux/pinctrl/pinctrl.h b/include/linux/pinctrl/pinctrl.h
index 5e45385c5bdc..4d252ea00ed1 100644
--- a/include/linux/pinctrl/pinctrl.h
+++ b/include/linux/pinctrl/pinctrl.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Interface the pinctrl subsystem
*
@@ -6,27 +7,44 @@
* This interface is used in the core to keep track of pins.
*
* Author: Linus Walleij <linus.walleij@linaro.org>
- *
- * License terms: GNU General Public License (GPL) version 2
*/
#ifndef __LINUX_PINCTRL_PINCTRL_H
#define __LINUX_PINCTRL_PINCTRL_H
-#ifdef CONFIG_PINCTRL
-
-#include <linux/radix-tree.h>
-#include <linux/list.h>
-#include <linux/seq_file.h>
-#include <linux/pinctrl/pinctrl-state.h>
+#include <linux/types.h>
struct device;
+struct device_node;
+struct gpio_chip;
+struct module;
+struct seq_file;
+
+struct pin_config_item;
+struct pinconf_generic_params;
+struct pinconf_ops;
struct pinctrl_dev;
struct pinctrl_map;
struct pinmux_ops;
-struct pinconf_ops;
-struct pin_config_item;
-struct gpio_chip;
-struct device_node;
+
+/**
+ * struct pingroup - provides information on pingroup
+ * @name: a name for pingroup
+ * @pins: an array of pins in the pingroup
+ * @npins: number of pins in the pingroup
+ */
+struct pingroup {
+ const char *name;
+ const unsigned int *pins;
+ size_t npins;
+};
+
+/* Convenience macro to define a single named or anonymous pingroup */
+#define PINCTRL_PINGROUP(_name, _pins, _npins) \
+(struct pingroup) { \
+ .name = _name, \
+ .pins = _pins, \
+ .npins = _npins, \
+}
/**
* struct pinctrl_pin_desc - boards/machines provide information on their
@@ -53,8 +71,8 @@ struct pinctrl_pin_desc {
* @id: an ID number for the chip in this range
* @base: base offset of the GPIO range
* @pin_base: base pin number of the GPIO range if pins == NULL
- * @pins: enumeration of pins in GPIO range or NULL
* @npins: number of pins in the GPIO range, including the base number
+ * @pins: enumeration of pins in GPIO range or NULL
* @gc: an optional pointer to a gpio_chip
*/
struct pinctrl_gpio_range {
@@ -63,8 +81,8 @@ struct pinctrl_gpio_range {
unsigned int id;
unsigned int base;
unsigned int pin_base;
- unsigned const *pins;
unsigned int npins;
+ unsigned const *pins;
struct gpio_chip *gc;
};
@@ -124,6 +142,10 @@ struct pinctrl_ops {
* the hardware description
* @custom_conf_items: Information how to print @params in debugfs, must be
* the same size as the @custom_params, i.e. @num_custom_params
+ * @link_consumers: If true create a device link between pinctrl and its
+ * consumers (i.e. the devices requesting pin control states). This is
+ * sometimes necessary to ascertain the right suspend/resume order for
+ * example.
*/
struct pinctrl_desc {
const char *name;
@@ -138,6 +160,7 @@ struct pinctrl_desc {
const struct pinconf_generic_params *custom_params;
const struct pin_config_item *custom_conf_items;
#endif
+ bool link_consumers;
};
/* External interface to pin controller */
@@ -166,7 +189,6 @@ extern struct pinctrl_dev *devm_pinctrl_register(struct device *dev,
extern void devm_pinctrl_unregister(struct device *dev,
struct pinctrl_dev *pctldev);
-extern bool pin_is_valid(struct pinctrl_dev *pctldev, int pin);
extern void pinctrl_add_gpio_range(struct pinctrl_dev *pctldev,
struct pinctrl_gpio_range *range);
extern void pinctrl_add_gpio_ranges(struct pinctrl_dev *pctldev,
@@ -184,7 +206,27 @@ extern int pinctrl_get_group_pins(struct pinctrl_dev *pctldev,
const char *pin_group, const unsigned **pins,
unsigned *num_pins);
-#ifdef CONFIG_OF
+/**
+ * struct pinfunction - Description about a function
+ * @name: Name of the function
+ * @groups: An array of groups for this function
+ * @ngroups: Number of groups in @groups
+ */
+struct pinfunction {
+ const char *name;
+ const char * const *groups;
+ size_t ngroups;
+};
+
+/* Convenience macro to define a single named pinfunction */
+#define PINCTRL_PINFUNCTION(_name, _groups, _ngroups) \
+(struct pinfunction) { \
+ .name = (_name), \
+ .groups = (_groups), \
+ .ngroups = (_ngroups), \
+ }
+
+#if IS_ENABLED(CONFIG_OF) && IS_ENABLED(CONFIG_PINCTRL)
extern struct pinctrl_dev *of_pinctrl_get(struct device_node *np);
#else
static inline
@@ -197,16 +239,5 @@ struct pinctrl_dev *of_pinctrl_get(struct device_node *np)
extern const char *pinctrl_dev_get_name(struct pinctrl_dev *pctldev);
extern const char *pinctrl_dev_get_devname(struct pinctrl_dev *pctldev);
extern void *pinctrl_dev_get_drvdata(struct pinctrl_dev *pctldev);
-#else
-
-struct pinctrl_dev;
-
-/* Sufficiently stupid default functions when pinctrl is not in use */
-static inline bool pin_is_valid(struct pinctrl_dev *pctldev, int pin)
-{
- return pin >= 0;
-}
-
-#endif /* !CONFIG_PINCTRL */
#endif /* __LINUX_PINCTRL_PINCTRL_H */
diff --git a/include/linux/pinctrl/pinmux.h b/include/linux/pinctrl/pinmux.h
index ace60d775b20..a7e370965c53 100644
--- a/include/linux/pinctrl/pinmux.h
+++ b/include/linux/pinctrl/pinmux.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Interface the pinmux subsystem
*
@@ -6,19 +7,14 @@
* Based on bits of regulator core, gpio core and clk core
*
* Author: Linus Walleij <linus.walleij@linaro.org>
- *
- * License terms: GNU General Public License (GPL) version 2
*/
#ifndef __LINUX_PINCTRL_PINMUX_H
#define __LINUX_PINCTRL_PINMUX_H
-#include <linux/list.h>
-#include <linux/seq_file.h>
-#include <linux/pinctrl/pinctrl.h>
-
-#ifdef CONFIG_PINMUX
+#include <linux/types.h>
struct pinctrl_dev;
+struct pinctrl_gpio_range;
/**
* struct pinmux_ops - pinmux operations, to be implemented by pin controller
@@ -85,6 +81,4 @@ struct pinmux_ops {
bool strict;
};
-#endif /* CONFIG_PINMUX */
-
#endif /* __LINUX_PINCTRL_PINMUX_H */
diff --git a/include/linux/pipe_fs_i.h b/include/linux/pipe_fs_i.h
index e7497c9dde7f..d2c3f16cf6b1 100644
--- a/include/linux/pipe_fs_i.h
+++ b/include/linux/pipe_fs_i.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_PIPE_FS_I_H
#define _LINUX_PIPE_FS_I_H
@@ -7,6 +8,11 @@
#define PIPE_BUF_FLAG_ATOMIC 0x02 /* was atomically mapped */
#define PIPE_BUF_FLAG_GIFT 0x04 /* page is a gift */
#define PIPE_BUF_FLAG_PACKET 0x08 /* read() as a packet */
+#define PIPE_BUF_FLAG_CAN_MERGE 0x10 /* can merge buffers */
+#define PIPE_BUF_FLAG_WHOLE 0x20 /* read() must return entire buffer or error */
+#ifdef CONFIG_WATCH_QUEUE
+#define PIPE_BUF_FLAG_LOSS 0x40 /* Message loss happened after this buffer */
+#endif
/**
* struct pipe_buffer - a linux kernel pipe buffer
@@ -28,63 +34,71 @@ struct pipe_buffer {
/**
* struct pipe_inode_info - a linux kernel pipe
* @mutex: mutex protecting the whole thing
- * @wait: reader/writer wait point in case of empty/full pipe
- * @nrbufs: the number of non-empty pipe buffers in this pipe
- * @buffers: total number of buffers (should be a power of 2)
- * @curbuf: the current pipe buffer entry
+ * @rd_wait: reader wait point in case of empty pipe
+ * @wr_wait: writer wait point in case of full pipe
+ * @head: The point of buffer production
+ * @tail: The point of buffer consumption
+ * @note_loss: The next read() should insert a data-lost message
+ * @max_usage: The maximum number of slots that may be used in the ring
+ * @ring_size: total number of buffers (should be a power of 2)
+ * @nr_accounted: The amount this pipe accounts for in user->pipe_bufs
* @tmp_page: cached released page
* @readers: number of current readers of this pipe
* @writers: number of current writers of this pipe
* @files: number of struct file referring this pipe (protected by ->i_lock)
- * @waiting_writers: number of writers blocked waiting for room
* @r_counter: reader counter
* @w_counter: writer counter
+ * @poll_usage: is this pipe used for epoll, which has crazy wakeups?
* @fasync_readers: reader side fasync
* @fasync_writers: writer side fasync
* @bufs: the circular array of pipe buffers
* @user: the user who created this pipe
+ * @watch_queue: If this pipe is a watch_queue, this is the stuff for that
**/
struct pipe_inode_info {
struct mutex mutex;
- wait_queue_head_t wait;
- unsigned int nrbufs, curbuf, buffers;
+ wait_queue_head_t rd_wait, wr_wait;
+ unsigned int head;
+ unsigned int tail;
+ unsigned int max_usage;
+ unsigned int ring_size;
+#ifdef CONFIG_WATCH_QUEUE
+ bool note_loss;
+#endif
+ unsigned int nr_accounted;
unsigned int readers;
unsigned int writers;
unsigned int files;
- unsigned int waiting_writers;
unsigned int r_counter;
unsigned int w_counter;
+ bool poll_usage;
struct page *tmp_page;
struct fasync_struct *fasync_readers;
struct fasync_struct *fasync_writers;
struct pipe_buffer *bufs;
struct user_struct *user;
+#ifdef CONFIG_WATCH_QUEUE
+ struct watch_queue *watch_queue;
+#endif
};
/*
* Note on the nesting of these functions:
*
* ->confirm()
- * ->steal()
+ * ->try_steal()
*
- * That is, ->steal() must be called on a confirmed buffer.
- * See below for the meaning of each operation. Also see kerneldoc
- * in fs/pipe.c for the pipe and generic variants of these hooks.
+ * That is, ->try_steal() must be called on a confirmed buffer. See below for
+ * the meaning of each operation. Also see the kerneldoc in fs/pipe.c for the
+ * pipe and generic variants of these hooks.
*/
struct pipe_buf_operations {
/*
- * This is set to 1, if the generic pipe read/write may coalesce
- * data into an existing buffer. If this is set to 0, a new pipe
- * page segment is always used for new data.
- */
- int can_merge;
-
- /*
* ->confirm() verifies that the data in the pipe buffer is there
* and that the contents are good. If the pages in the pipe belong
* to a file system, we may need to wait for IO completion in this
* hook. Returns 0 for good, or a negative error value in case of
- * error.
+ * error. If not present all pages are considered good.
*/
int (*confirm)(struct pipe_inode_info *, struct pipe_buffer *);
@@ -96,29 +110,83 @@ struct pipe_buf_operations {
/*
* Attempt to take ownership of the pipe buffer and its contents.
- * ->steal() returns 0 for success, in which case the contents
- * of the pipe (the buf->page) is locked and now completely owned
- * by the caller. The page may then be transferred to a different
- * mapping, the most often used case is insertion into different
- * file address space cache.
+ * ->try_steal() returns %true for success, in which case the contents
+ * of the pipe (the buf->page) is locked and now completely owned by the
+ * caller. The page may then be transferred to a different mapping, the
+ * most often used case is insertion into different file address space
+ * cache.
*/
- int (*steal)(struct pipe_inode_info *, struct pipe_buffer *);
+ bool (*try_steal)(struct pipe_inode_info *, struct pipe_buffer *);
/*
* Get a reference to the pipe buffer.
*/
- void (*get)(struct pipe_inode_info *, struct pipe_buffer *);
+ bool (*get)(struct pipe_inode_info *, struct pipe_buffer *);
};
/**
+ * pipe_empty - Return true if the pipe is empty
+ * @head: The pipe ring head pointer
+ * @tail: The pipe ring tail pointer
+ */
+static inline bool pipe_empty(unsigned int head, unsigned int tail)
+{
+ return head == tail;
+}
+
+/**
+ * pipe_occupancy - Return number of slots used in the pipe
+ * @head: The pipe ring head pointer
+ * @tail: The pipe ring tail pointer
+ */
+static inline unsigned int pipe_occupancy(unsigned int head, unsigned int tail)
+{
+ return head - tail;
+}
+
+/**
+ * pipe_full - Return true if the pipe is full
+ * @head: The pipe ring head pointer
+ * @tail: The pipe ring tail pointer
+ * @limit: The maximum amount of slots available.
+ */
+static inline bool pipe_full(unsigned int head, unsigned int tail,
+ unsigned int limit)
+{
+ return pipe_occupancy(head, tail) >= limit;
+}
+
+/**
+ * pipe_buf - Return the pipe buffer for the specified slot in the pipe ring
+ * @pipe: The pipe to access
+ * @slot: The slot of interest
+ */
+static inline struct pipe_buffer *pipe_buf(const struct pipe_inode_info *pipe,
+ unsigned int slot)
+{
+ return &pipe->bufs[slot & (pipe->ring_size - 1)];
+}
+
+/**
+ * pipe_head_buf - Return the pipe buffer at the head of the pipe ring
+ * @pipe: The pipe to access
+ */
+static inline struct pipe_buffer *pipe_head_buf(const struct pipe_inode_info *pipe)
+{
+ return pipe_buf(pipe, pipe->head);
+}
+
+/**
* pipe_buf_get - get a reference to a pipe_buffer
* @pipe: the pipe that the buffer belongs to
* @buf: the buffer to get a reference to
+ *
+ * Return: %true if the reference was successfully obtained.
*/
-static inline void pipe_buf_get(struct pipe_inode_info *pipe,
+static inline __must_check bool pipe_buf_get(struct pipe_inode_info *pipe,
struct pipe_buffer *buf)
{
- buf->ops->get(pipe, buf);
+ return buf->ops->get(pipe, buf);
}
/**
@@ -143,18 +211,31 @@ static inline void pipe_buf_release(struct pipe_inode_info *pipe,
static inline int pipe_buf_confirm(struct pipe_inode_info *pipe,
struct pipe_buffer *buf)
{
+ if (!buf->ops->confirm)
+ return 0;
return buf->ops->confirm(pipe, buf);
}
/**
- * pipe_buf_steal - attempt to take ownership of a pipe_buffer
+ * pipe_buf_try_steal - attempt to take ownership of a pipe_buffer
* @pipe: the pipe that the buffer belongs to
* @buf: the buffer to attempt to steal
*/
-static inline int pipe_buf_steal(struct pipe_inode_info *pipe,
- struct pipe_buffer *buf)
+static inline bool pipe_buf_try_steal(struct pipe_inode_info *pipe,
+ struct pipe_buffer *buf)
+{
+ if (!buf->ops->try_steal)
+ return false;
+ return buf->ops->try_steal(pipe, buf);
+}
+
+static inline void pipe_discard_from(struct pipe_inode_info *pipe,
+ unsigned int old_head)
{
- return buf->ops->steal(pipe, buf);
+ unsigned int mask = pipe->ring_size - 1;
+
+ while (pipe->head > old_head)
+ pipe_buf_release(pipe, &pipe->bufs[--pipe->head & mask]);
}
/* Differs from PIPE_BUF in that PIPE_SIZE is the length of the actual
@@ -166,29 +247,36 @@ void pipe_lock(struct pipe_inode_info *);
void pipe_unlock(struct pipe_inode_info *);
void pipe_double_lock(struct pipe_inode_info *, struct pipe_inode_info *);
-extern unsigned int pipe_max_size, pipe_min_size;
-extern unsigned long pipe_user_pages_hard;
-extern unsigned long pipe_user_pages_soft;
-int pipe_proc_fn(struct ctl_table *, int, void __user *, size_t *, loff_t *);
-
-/* Drop the inode semaphore and wait for a pipe event, atomically */
-void pipe_wait(struct pipe_inode_info *pipe);
+/* Wait for a pipe to be readable/writable while dropping the pipe lock */
+void pipe_wait_readable(struct pipe_inode_info *);
+void pipe_wait_writable(struct pipe_inode_info *);
struct pipe_inode_info *alloc_pipe_info(void);
void free_pipe_info(struct pipe_inode_info *);
/* Generic pipe buffer ops functions */
-void generic_pipe_buf_get(struct pipe_inode_info *, struct pipe_buffer *);
-int generic_pipe_buf_confirm(struct pipe_inode_info *, struct pipe_buffer *);
-int generic_pipe_buf_steal(struct pipe_inode_info *, struct pipe_buffer *);
+bool generic_pipe_buf_get(struct pipe_inode_info *, struct pipe_buffer *);
+bool generic_pipe_buf_try_steal(struct pipe_inode_info *, struct pipe_buffer *);
void generic_pipe_buf_release(struct pipe_inode_info *, struct pipe_buffer *);
extern const struct pipe_buf_operations nosteal_pipe_buf_ops;
+#ifdef CONFIG_WATCH_QUEUE
+unsigned long account_pipe_buffers(struct user_struct *user,
+ unsigned long old, unsigned long new);
+bool too_many_pipe_buffers_soft(unsigned long user_bufs);
+bool too_many_pipe_buffers_hard(unsigned long user_bufs);
+bool pipe_is_unprivileged_user(void);
+#endif
+
/* for F_SETPIPE_SZ and F_GETPIPE_SZ */
+#ifdef CONFIG_WATCH_QUEUE
+int pipe_resize_ring(struct pipe_inode_info *pipe, unsigned int nr_slots);
+#endif
long pipe_fcntl(struct file *, unsigned int, unsigned long arg);
-struct pipe_inode_info *get_pipe_info(struct file *file);
+struct pipe_inode_info *get_pipe_info(struct file *file, bool for_splice);
int create_pipe_files(struct file **, int);
+unsigned int round_pipe_size(unsigned long size);
#endif
diff --git a/include/linux/pkeys.h b/include/linux/pkeys.h
index a1bacf1150b2..86be8bf27b41 100644
--- a/include/linux/pkeys.h
+++ b/include/linux/pkeys.h
@@ -1,8 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_PKEYS_H
#define _LINUX_PKEYS_H
-#include <linux/mm_types.h>
-#include <asm/mmu_context.h>
+#include <linux/mm.h>
+
+#define ARCH_DEFAULT_PKEY 0
#ifdef CONFIG_ARCH_HAS_PKEYS
#include <asm/pkeys.h>
@@ -13,6 +15,11 @@
#define PKEY_DEDICATED_EXECUTE_ONLY 0
#define ARCH_VM_PKEY_FLAGS 0
+static inline int vma_pkey(struct vm_area_struct *vma)
+{
+ return 0;
+}
+
static inline bool mm_pkey_is_allocated(struct mm_struct *mm, int pkey)
{
return (pkey == 0);
@@ -34,8 +41,9 @@ static inline int arch_set_user_pkey_access(struct task_struct *tsk, int pkey,
return 0;
}
-static inline void copy_init_pkru_to_fpregs(void)
+static inline bool arch_pkeys_enabled(void)
{
+ return false;
}
#endif /* ! CONFIG_ARCH_HAS_PKEYS */
diff --git a/include/linux/pktcdvd.h b/include/linux/pktcdvd.h
index 93d142ad1528..f9c5ac80d59b 100644
--- a/include/linux/pktcdvd.h
+++ b/include/linux/pktcdvd.h
@@ -152,14 +152,6 @@ struct packet_stacked_data
};
#define PSD_POOL_SIZE 64
-struct pktcdvd_kobj
-{
- struct kobject kobj;
- struct pktcdvd_device *pd;
-};
-#define to_pktcdvdkobj(_k) \
- ((struct pktcdvd_kobj*)container_of(_k,struct pktcdvd_kobj,kobj))
-
struct pktcdvd_device
{
struct block_device *bdev; /* dev attached */
@@ -183,10 +175,12 @@ struct pktcdvd_device
spinlock_t lock; /* Serialize access to bio_queue */
struct rb_root bio_queue; /* Work queue of bios we need to handle */
int bio_queue_size; /* Number of nodes in bio_queue */
+ bool congested; /* Someone is waiting for bio_queue_size
+ * to drop. */
sector_t current_sector; /* Keep track of where the elevator is */
atomic_t scan_queue; /* Set to non-zero when pkt_handle_queue */
/* needs to be run. */
- mempool_t *rb_pool; /* mempool for pkt_rb_node allocations */
+ mempool_t rb_pool; /* mempool for pkt_rb_node allocations */
struct packet_iosched iosched;
struct gendisk *disk;
@@ -195,8 +189,6 @@ struct pktcdvd_device
int write_congestion_on;
struct device *dev; /* sysfs pktcdvd[0-7] dev */
- struct pktcdvd_kobj *kobj_stat; /* sysfs pktcdvd[0-7]/stat/ */
- struct pktcdvd_kobj *kobj_wqueue; /* sysfs pktcdvd[0-7]/write_queue/ */
struct dentry *dfs_d_root; /* debugfs: devname directory */
struct dentry *dfs_f_info; /* debugfs: info file */
diff --git a/include/linux/pl320-ipc.h b/include/linux/pl320-ipc.h
index 5161f63ec1c8..4b29e172eea8 100644
--- a/include/linux/pl320-ipc.h
+++ b/include/linux/pl320-ipc.h
@@ -1,15 +1,5 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program. If not, see <http://www.gnu.org/licenses/>.
*/
int pl320_ipc_transmit(u32 *data);
diff --git a/include/linux/platform_data/ad5449.h b/include/linux/platform_data/ad5449.h
index bd712bd4b94e..d687ef5726c2 100644
--- a/include/linux/platform_data/ad5449.h
+++ b/include/linux/platform_data/ad5449.h
@@ -1,11 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* AD5415, AD5426, AD5429, AD5432, AD5439, AD5443, AD5449 Digital to Analog
* Converter driver.
*
* Copyright 2012 Analog Devices Inc.
* Author: Lars-Peter Clausen <lars@metafoo.de>
- *
- * Licensed under the GPL-2.
*/
#ifndef __LINUX_PLATFORM_DATA_AD5449_H__
diff --git a/include/linux/platform_data/ad5755.h b/include/linux/platform_data/ad5755.h
deleted file mode 100644
index a5a1cb751874..000000000000
--- a/include/linux/platform_data/ad5755.h
+++ /dev/null
@@ -1,103 +0,0 @@
-/*
- * Copyright 2012 Analog Devices Inc.
- *
- * Licensed under the GPL-2.
- */
-#ifndef __LINUX_PLATFORM_DATA_AD5755_H__
-#define __LINUX_PLATFORM_DATA_AD5755_H__
-
-enum ad5755_mode {
- AD5755_MODE_VOLTAGE_0V_5V = 0,
- AD5755_MODE_VOLTAGE_0V_10V = 1,
- AD5755_MODE_VOLTAGE_PLUSMINUS_5V = 2,
- AD5755_MODE_VOLTAGE_PLUSMINUS_10V = 3,
- AD5755_MODE_CURRENT_4mA_20mA = 4,
- AD5755_MODE_CURRENT_0mA_20mA = 5,
- AD5755_MODE_CURRENT_0mA_24mA = 6,
-};
-
-enum ad5755_dc_dc_phase {
- AD5755_DC_DC_PHASE_ALL_SAME_EDGE = 0,
- AD5755_DC_DC_PHASE_A_B_SAME_EDGE_C_D_OPP_EDGE = 1,
- AD5755_DC_DC_PHASE_A_C_SAME_EDGE_B_D_OPP_EDGE = 2,
- AD5755_DC_DC_PHASE_90_DEGREE = 3,
-};
-
-enum ad5755_dc_dc_freq {
- AD5755_DC_DC_FREQ_250kHZ = 0,
- AD5755_DC_DC_FREQ_410kHZ = 1,
- AD5755_DC_DC_FREQ_650kHZ = 2,
-};
-
-enum ad5755_dc_dc_maxv {
- AD5755_DC_DC_MAXV_23V = 0,
- AD5755_DC_DC_MAXV_24V5 = 1,
- AD5755_DC_DC_MAXV_27V = 2,
- AD5755_DC_DC_MAXV_29V5 = 3,
-};
-
-enum ad5755_slew_rate {
- AD5755_SLEW_RATE_64k = 0,
- AD5755_SLEW_RATE_32k = 1,
- AD5755_SLEW_RATE_16k = 2,
- AD5755_SLEW_RATE_8k = 3,
- AD5755_SLEW_RATE_4k = 4,
- AD5755_SLEW_RATE_2k = 5,
- AD5755_SLEW_RATE_1k = 6,
- AD5755_SLEW_RATE_500 = 7,
- AD5755_SLEW_RATE_250 = 8,
- AD5755_SLEW_RATE_125 = 9,
- AD5755_SLEW_RATE_64 = 10,
- AD5755_SLEW_RATE_32 = 11,
- AD5755_SLEW_RATE_16 = 12,
- AD5755_SLEW_RATE_8 = 13,
- AD5755_SLEW_RATE_4 = 14,
- AD5755_SLEW_RATE_0_5 = 15,
-};
-
-enum ad5755_slew_step_size {
- AD5755_SLEW_STEP_SIZE_1 = 0,
- AD5755_SLEW_STEP_SIZE_2 = 1,
- AD5755_SLEW_STEP_SIZE_4 = 2,
- AD5755_SLEW_STEP_SIZE_8 = 3,
- AD5755_SLEW_STEP_SIZE_16 = 4,
- AD5755_SLEW_STEP_SIZE_32 = 5,
- AD5755_SLEW_STEP_SIZE_64 = 6,
- AD5755_SLEW_STEP_SIZE_128 = 7,
- AD5755_SLEW_STEP_SIZE_256 = 8,
-};
-
-/**
- * struct ad5755_platform_data - AD5755 DAC driver platform data
- * @ext_dc_dc_compenstation_resistor: Whether an external DC-DC converter
- * compensation register is used.
- * @dc_dc_phase: DC-DC converter phase.
- * @dc_dc_freq: DC-DC converter frequency.
- * @dc_dc_maxv: DC-DC maximum allowed boost voltage.
- * @dac.mode: The mode to be used for the DAC output.
- * @dac.ext_current_sense_resistor: Whether an external current sense resistor
- * is used.
- * @dac.enable_voltage_overrange: Whether to enable 20% voltage output overrange.
- * @dac.slew.enable: Whether to enable digital slew.
- * @dac.slew.rate: Slew rate of the digital slew.
- * @dac.slew.step_size: Slew step size of the digital slew.
- **/
-struct ad5755_platform_data {
- bool ext_dc_dc_compenstation_resistor;
- enum ad5755_dc_dc_phase dc_dc_phase;
- enum ad5755_dc_dc_freq dc_dc_freq;
- enum ad5755_dc_dc_maxv dc_dc_maxv;
-
- struct {
- enum ad5755_mode mode;
- bool ext_current_sense_resistor;
- bool enable_voltage_overrange;
- struct {
- bool enable;
- enum ad5755_slew_rate rate;
- enum ad5755_slew_step_size step_size;
- } slew;
- } dac[4];
-};
-
-#endif
diff --git a/include/linux/platform_data/ad5761.h b/include/linux/platform_data/ad5761.h
index 7bd8ed7d978e..69e261e2ca14 100644
--- a/include/linux/platform_data/ad5761.h
+++ b/include/linux/platform_data/ad5761.h
@@ -1,10 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* AD5721, AD5721R, AD5761, AD5761R, Voltage Output Digital to Analog Converter
*
* Copyright 2016 Qtechnology A/S
- * 2016 Ricardo Ribalda <ricardo.ribalda@gmail.com>
- *
- * Licensed under the GPL-2.
+ * 2016 Ricardo Ribalda <ribalda@kernel.org>
*/
#ifndef __LINUX_PLATFORM_DATA_AD5761_H__
#define __LINUX_PLATFORM_DATA_AD5761_H__
diff --git a/include/linux/platform_data/ad7266.h b/include/linux/platform_data/ad7266.h
index eabfdcb26992..f0652567afba 100644
--- a/include/linux/platform_data/ad7266.h
+++ b/include/linux/platform_data/ad7266.h
@@ -1,9 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* AD7266/65 SPI ADC driver
*
* Copyright 2012 Analog Devices Inc.
- *
- * Licensed under the GPL-2.
*/
#ifndef __IIO_ADC_AD7266_H__
@@ -41,14 +40,11 @@ enum ad7266_mode {
* @range: Reference voltage range the device is configured for
* @mode: Sample mode the device is configured for
* @fixed_addr: Whether the address pins are hard-wired
- * @addr_gpios: GPIOs used for controlling the address pins, only used if
- * fixed_addr is set to false.
*/
struct ad7266_platform_data {
enum ad7266_range range;
enum ad7266_mode mode;
bool fixed_addr;
- unsigned int addr_gpios[3];
};
#endif
diff --git a/include/linux/platform_data/ad7291.h b/include/linux/platform_data/ad7291.h
deleted file mode 100644
index bbd89fa51188..000000000000
--- a/include/linux/platform_data/ad7291.h
+++ /dev/null
@@ -1,12 +0,0 @@
-#ifndef __IIO_AD7291_H__
-#define __IIO_AD7291_H__
-
-/**
- * struct ad7291_platform_data - AD7291 platform data
- * @use_external_ref: Whether to use an external or internal reference voltage
- */
-struct ad7291_platform_data {
- bool use_external_ref;
-};
-
-#endif
diff --git a/include/linux/platform_data/ad7298.h b/include/linux/platform_data/ad7298.h
deleted file mode 100644
index fbf8adf1363a..000000000000
--- a/include/linux/platform_data/ad7298.h
+++ /dev/null
@@ -1,20 +0,0 @@
-/*
- * AD7298 SPI ADC driver
- *
- * Copyright 2011 Analog Devices Inc.
- *
- * Licensed under the GPL-2.
- */
-
-#ifndef __LINUX_PLATFORM_DATA_AD7298_H__
-#define __LINUX_PLATFORM_DATA_AD7298_H__
-
-/**
- * struct ad7298_platform_data - Platform data for the ad7298 ADC driver
- * @ext_ref: Whether to use an external reference voltage.
- **/
-struct ad7298_platform_data {
- bool ext_ref;
-};
-
-#endif /* IIO_ADC_AD7298_H_ */
diff --git a/include/linux/platform_data/ad7303.h b/include/linux/platform_data/ad7303.h
deleted file mode 100644
index de6a7a6b4bbf..000000000000
--- a/include/linux/platform_data/ad7303.h
+++ /dev/null
@@ -1,21 +0,0 @@
-/*
- * Analog Devices AD7303 DAC driver
- *
- * Copyright 2013 Analog Devices Inc.
- *
- * Licensed under the GPL-2.
- */
-
-#ifndef __IIO_ADC_AD7303_H__
-#define __IIO_ADC_AD7303_H__
-
-/**
- * struct ad7303_platform_data - AD7303 platform data
- * @use_external_ref: If set to true use an external voltage reference connected
- * to the REF pin, otherwise use the internal reference derived from Vdd.
- */
-struct ad7303_platform_data {
- bool use_external_ref;
-};
-
-#endif
diff --git a/include/linux/platform_data/ad7791.h b/include/linux/platform_data/ad7791.h
index f9e4db1b82ae..cc7533049b5b 100644
--- a/include/linux/platform_data/ad7791.h
+++ b/include/linux/platform_data/ad7791.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __LINUX_PLATFORM_DATA_AD7791__
#define __LINUX_PLATFORM_DATA_AD7791__
diff --git a/include/linux/platform_data/ad7793.h b/include/linux/platform_data/ad7793.h
index 7ea6751aae6d..7c697e58f02a 100644
--- a/include/linux/platform_data/ad7793.h
+++ b/include/linux/platform_data/ad7793.h
@@ -1,9 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* AD7792/AD7793 SPI ADC driver
*
* Copyright 2011 Analog Devices Inc.
- *
- * Licensed under the GPL-2.
*/
#ifndef __LINUX_PLATFORM_DATA_AD7793_H__
#define __LINUX_PLATFORM_DATA_AD7793_H__
@@ -41,7 +40,7 @@ enum ad7793_bias_voltage {
* enum ad7793_refsel - AD7793 reference voltage selection
* @AD7793_REFSEL_REFIN1: External reference applied between REFIN1(+)
* and REFIN1(-).
- * @AD7793_REFSEL_REFIN2: External reference applied between REFIN2(+) and
+ * @AD7793_REFSEL_REFIN2: External reference applied between REFIN2(+)
* and REFIN1(-). Only valid for AD7795/AD7796.
* @AD7793_REFSEL_INTERNAL: Internal 1.17 V reference.
*/
diff --git a/include/linux/platform_data/ad7879.h b/include/linux/platform_data/ad7879.h
deleted file mode 100644
index 69e2e1fd2bc8..000000000000
--- a/include/linux/platform_data/ad7879.h
+++ /dev/null
@@ -1,41 +0,0 @@
-/* linux/platform_data/ad7879.h */
-
-/* Touchscreen characteristics vary between boards and models. The
- * platform_data for the device's "struct device" holds this information.
- *
- * It's OK if the min/max values are zero.
- */
-struct ad7879_platform_data {
- u16 model; /* 7879 */
- u16 x_plate_ohms;
- u16 x_min, x_max;
- u16 y_min, y_max;
- u16 pressure_min, pressure_max;
-
- bool swap_xy; /* swap x and y axes */
-
- /* [0..255] 0=OFF Starts at 1=550us and goes
- * all the way to 9.440ms in steps of 35us.
- */
- u8 pen_down_acc_interval;
- /* [0..15] Starts at 0=128us and goes all the
- * way to 4.096ms in steps of 128us.
- */
- u8 first_conversion_delay;
- /* [0..3] 0 = 2us, 1 = 4us, 2 = 8us, 3 = 16us */
- u8 acquisition_time;
- /* [0..3] Average X middle samples 0 = 2, 1 = 4, 2 = 8, 3 = 16 */
- u8 averaging;
- /* [0..3] Perform X measurements 0 = OFF,
- * 1 = 4, 2 = 8, 3 = 16 (median > averaging)
- */
- u8 median;
- /* 1 = AUX/VBAT/GPIO export GPIO to gpiolib
- * requires CONFIG_GPIOLIB
- */
- bool gpio_export;
- /* identifies the first GPIO number handled by this chip;
- * or, if negative, requests dynamic ID allocation.
- */
- s32 gpio_base;
-};
diff --git a/include/linux/platform_data/ad7887.h b/include/linux/platform_data/ad7887.h
index 1e06eac3174d..9b4dca6ae70b 100644
--- a/include/linux/platform_data/ad7887.h
+++ b/include/linux/platform_data/ad7887.h
@@ -1,9 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* AD7887 SPI ADC driver
*
* Copyright 2010 Analog Devices Inc.
- *
- * Licensed under the GPL-2 or later.
*/
#ifndef IIO_ADC_AD7887_H_
#define IIO_ADC_AD7887_H_
@@ -14,13 +13,9 @@
* second input channel, and Vref is internally connected to Vdd. If set to
* false the device is used in single channel mode and AIN1/Vref is used as
* VREF input.
- * @use_onchip_ref: Whether to use the onchip reference. If set to true the
- * internal 2.5V reference is used. If set to false a external reference is
- * used.
*/
struct ad7887_platform_data {
bool en_dual;
- bool use_onchip_ref;
};
#endif /* IIO_ADC_AD7887_H_ */
diff --git a/include/linux/platform_data/adau17x1.h b/include/linux/platform_data/adau17x1.h
index 9db1b905df24..27a39cc6faec 100644
--- a/include/linux/platform_data/adau17x1.h
+++ b/include/linux/platform_data/adau17x1.h
@@ -1,10 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Driver for ADAU1361/ADAU1461/ADAU1761/ADAU1961/ADAU1381/ADAU1781 codecs
*
* Copyright 2011-2014 Analog Devices Inc.
* Author: Lars-Peter Clausen <lars@metafoo.de>
- *
- * Licensed under the GPL-2 or later.
*/
#ifndef __LINUX_PLATFORM_DATA_ADAU17X1_H__
diff --git a/include/linux/platform_data/adau1977.h b/include/linux/platform_data/adau1977.h
deleted file mode 100644
index bed11d908f92..000000000000
--- a/include/linux/platform_data/adau1977.h
+++ /dev/null
@@ -1,45 +0,0 @@
-/*
- * ADAU1977/ADAU1978/ADAU1979 driver
- *
- * Copyright 2014 Analog Devices Inc.
- * Author: Lars-Peter Clausen <lars@metafoo.de>
- *
- * Licensed under the GPL-2.
- */
-
-#ifndef __LINUX_PLATFORM_DATA_ADAU1977_H__
-#define __LINUX_PLATFORM_DATA_ADAU1977_H__
-
-/**
- * enum adau1977_micbias - ADAU1977 MICBIAS pin voltage setting
- * @ADAU1977_MICBIAS_5V0: MICBIAS is set to 5.0 V
- * @ADAU1977_MICBIAS_5V5: MICBIAS is set to 5.5 V
- * @ADAU1977_MICBIAS_6V0: MICBIAS is set to 6.0 V
- * @ADAU1977_MICBIAS_6V5: MICBIAS is set to 6.5 V
- * @ADAU1977_MICBIAS_7V0: MICBIAS is set to 7.0 V
- * @ADAU1977_MICBIAS_7V5: MICBIAS is set to 7.5 V
- * @ADAU1977_MICBIAS_8V0: MICBIAS is set to 8.0 V
- * @ADAU1977_MICBIAS_8V5: MICBIAS is set to 8.5 V
- * @ADAU1977_MICBIAS_9V0: MICBIAS is set to 9.0 V
- */
-enum adau1977_micbias {
- ADAU1977_MICBIAS_5V0 = 0x0,
- ADAU1977_MICBIAS_5V5 = 0x1,
- ADAU1977_MICBIAS_6V0 = 0x2,
- ADAU1977_MICBIAS_6V5 = 0x3,
- ADAU1977_MICBIAS_7V0 = 0x4,
- ADAU1977_MICBIAS_7V5 = 0x5,
- ADAU1977_MICBIAS_8V0 = 0x6,
- ADAU1977_MICBIAS_8V5 = 0x7,
- ADAU1977_MICBIAS_9V0 = 0x8,
-};
-
-/**
- * struct adau1977_platform_data - Platform configuration data for the ADAU1977
- * @micbias: Specifies the voltage for the MICBIAS pin
- */
-struct adau1977_platform_data {
- enum adau1977_micbias micbias;
-};
-
-#endif
diff --git a/include/linux/platform_data/adp5588.h b/include/linux/platform_data/adp5588.h
deleted file mode 100644
index c2153049cfbd..000000000000
--- a/include/linux/platform_data/adp5588.h
+++ /dev/null
@@ -1,172 +0,0 @@
-/*
- * Analog Devices ADP5588 I/O Expander and QWERTY Keypad Controller
- *
- * Copyright 2009-2010 Analog Devices Inc.
- *
- * Licensed under the GPL-2 or later.
- */
-
-#ifndef _ADP5588_H
-#define _ADP5588_H
-
-#define DEV_ID 0x00 /* Device ID */
-#define CFG 0x01 /* Configuration Register1 */
-#define INT_STAT 0x02 /* Interrupt Status Register */
-#define KEY_LCK_EC_STAT 0x03 /* Key Lock and Event Counter Register */
-#define Key_EVENTA 0x04 /* Key Event Register A */
-#define Key_EVENTB 0x05 /* Key Event Register B */
-#define Key_EVENTC 0x06 /* Key Event Register C */
-#define Key_EVENTD 0x07 /* Key Event Register D */
-#define Key_EVENTE 0x08 /* Key Event Register E */
-#define Key_EVENTF 0x09 /* Key Event Register F */
-#define Key_EVENTG 0x0A /* Key Event Register G */
-#define Key_EVENTH 0x0B /* Key Event Register H */
-#define Key_EVENTI 0x0C /* Key Event Register I */
-#define Key_EVENTJ 0x0D /* Key Event Register J */
-#define KP_LCK_TMR 0x0E /* Keypad Lock1 to Lock2 Timer */
-#define UNLOCK1 0x0F /* Unlock Key1 */
-#define UNLOCK2 0x10 /* Unlock Key2 */
-#define GPIO_INT_STAT1 0x11 /* GPIO Interrupt Status */
-#define GPIO_INT_STAT2 0x12 /* GPIO Interrupt Status */
-#define GPIO_INT_STAT3 0x13 /* GPIO Interrupt Status */
-#define GPIO_DAT_STAT1 0x14 /* GPIO Data Status, Read twice to clear */
-#define GPIO_DAT_STAT2 0x15 /* GPIO Data Status, Read twice to clear */
-#define GPIO_DAT_STAT3 0x16 /* GPIO Data Status, Read twice to clear */
-#define GPIO_DAT_OUT1 0x17 /* GPIO DATA OUT */
-#define GPIO_DAT_OUT2 0x18 /* GPIO DATA OUT */
-#define GPIO_DAT_OUT3 0x19 /* GPIO DATA OUT */
-#define GPIO_INT_EN1 0x1A /* GPIO Interrupt Enable */
-#define GPIO_INT_EN2 0x1B /* GPIO Interrupt Enable */
-#define GPIO_INT_EN3 0x1C /* GPIO Interrupt Enable */
-#define KP_GPIO1 0x1D /* Keypad or GPIO Selection */
-#define KP_GPIO2 0x1E /* Keypad or GPIO Selection */
-#define KP_GPIO3 0x1F /* Keypad or GPIO Selection */
-#define GPI_EM1 0x20 /* GPI Event Mode 1 */
-#define GPI_EM2 0x21 /* GPI Event Mode 2 */
-#define GPI_EM3 0x22 /* GPI Event Mode 3 */
-#define GPIO_DIR1 0x23 /* GPIO Data Direction */
-#define GPIO_DIR2 0x24 /* GPIO Data Direction */
-#define GPIO_DIR3 0x25 /* GPIO Data Direction */
-#define GPIO_INT_LVL1 0x26 /* GPIO Edge/Level Detect */
-#define GPIO_INT_LVL2 0x27 /* GPIO Edge/Level Detect */
-#define GPIO_INT_LVL3 0x28 /* GPIO Edge/Level Detect */
-#define Debounce_DIS1 0x29 /* Debounce Disable */
-#define Debounce_DIS2 0x2A /* Debounce Disable */
-#define Debounce_DIS3 0x2B /* Debounce Disable */
-#define GPIO_PULL1 0x2C /* GPIO Pull Disable */
-#define GPIO_PULL2 0x2D /* GPIO Pull Disable */
-#define GPIO_PULL3 0x2E /* GPIO Pull Disable */
-#define CMP_CFG_STAT 0x30 /* Comparator Configuration and Status Register */
-#define CMP_CONFG_SENS1 0x31 /* Sensor1 Comparator Configuration Register */
-#define CMP_CONFG_SENS2 0x32 /* L2 Light Sensor Reference Level, Output Falling for Sensor 1 */
-#define CMP1_LVL2_TRIP 0x33 /* L2 Light Sensor Hysteresis (Active when Output Rising) for Sensor 1 */
-#define CMP1_LVL2_HYS 0x34 /* L3 Light Sensor Reference Level, Output Falling For Sensor 1 */
-#define CMP1_LVL3_TRIP 0x35 /* L3 Light Sensor Hysteresis (Active when Output Rising) For Sensor 1 */
-#define CMP1_LVL3_HYS 0x36 /* Sensor 2 Comparator Configuration Register */
-#define CMP2_LVL2_TRIP 0x37 /* L2 Light Sensor Reference Level, Output Falling for Sensor 2 */
-#define CMP2_LVL2_HYS 0x38 /* L2 Light Sensor Hysteresis (Active when Output Rising) for Sensor 2 */
-#define CMP2_LVL3_TRIP 0x39 /* L3 Light Sensor Reference Level, Output Falling For Sensor 2 */
-#define CMP2_LVL3_HYS 0x3A /* L3 Light Sensor Hysteresis (Active when Output Rising) For Sensor 2 */
-#define CMP1_ADC_DAT_R1 0x3B /* Comparator 1 ADC data Register1 */
-#define CMP1_ADC_DAT_R2 0x3C /* Comparator 1 ADC data Register2 */
-#define CMP2_ADC_DAT_R1 0x3D /* Comparator 2 ADC data Register1 */
-#define CMP2_ADC_DAT_R2 0x3E /* Comparator 2 ADC data Register2 */
-
-#define ADP5588_DEVICE_ID_MASK 0xF
-
- /* Configuration Register1 */
-#define ADP5588_AUTO_INC (1 << 7)
-#define ADP5588_GPIEM_CFG (1 << 6)
-#define ADP5588_OVR_FLOW_M (1 << 5)
-#define ADP5588_INT_CFG (1 << 4)
-#define ADP5588_OVR_FLOW_IEN (1 << 3)
-#define ADP5588_K_LCK_IM (1 << 2)
-#define ADP5588_GPI_IEN (1 << 1)
-#define ADP5588_KE_IEN (1 << 0)
-
-/* Interrupt Status Register */
-#define ADP5588_CMP2_INT (1 << 5)
-#define ADP5588_CMP1_INT (1 << 4)
-#define ADP5588_OVR_FLOW_INT (1 << 3)
-#define ADP5588_K_LCK_INT (1 << 2)
-#define ADP5588_GPI_INT (1 << 1)
-#define ADP5588_KE_INT (1 << 0)
-
-/* Key Lock and Event Counter Register */
-#define ADP5588_K_LCK_EN (1 << 6)
-#define ADP5588_LCK21 0x30
-#define ADP5588_KEC 0xF
-
-#define ADP5588_MAXGPIO 18
-#define ADP5588_BANK(offs) ((offs) >> 3)
-#define ADP5588_BIT(offs) (1u << ((offs) & 0x7))
-
-/* Put one of these structures in i2c_board_info platform_data */
-
-#define ADP5588_KEYMAPSIZE 80
-
-#define GPI_PIN_ROW0 97
-#define GPI_PIN_ROW1 98
-#define GPI_PIN_ROW2 99
-#define GPI_PIN_ROW3 100
-#define GPI_PIN_ROW4 101
-#define GPI_PIN_ROW5 102
-#define GPI_PIN_ROW6 103
-#define GPI_PIN_ROW7 104
-#define GPI_PIN_COL0 105
-#define GPI_PIN_COL1 106
-#define GPI_PIN_COL2 107
-#define GPI_PIN_COL3 108
-#define GPI_PIN_COL4 109
-#define GPI_PIN_COL5 110
-#define GPI_PIN_COL6 111
-#define GPI_PIN_COL7 112
-#define GPI_PIN_COL8 113
-#define GPI_PIN_COL9 114
-
-#define GPI_PIN_ROW_BASE GPI_PIN_ROW0
-#define GPI_PIN_ROW_END GPI_PIN_ROW7
-#define GPI_PIN_COL_BASE GPI_PIN_COL0
-#define GPI_PIN_COL_END GPI_PIN_COL9
-
-#define GPI_PIN_BASE GPI_PIN_ROW_BASE
-#define GPI_PIN_END GPI_PIN_COL_END
-
-#define ADP5588_GPIMAPSIZE_MAX (GPI_PIN_END - GPI_PIN_BASE + 1)
-
-struct adp5588_gpi_map {
- unsigned short pin;
- unsigned short sw_evt;
-};
-
-struct adp5588_kpad_platform_data {
- int rows; /* Number of rows */
- int cols; /* Number of columns */
- const unsigned short *keymap; /* Pointer to keymap */
- unsigned short keymapsize; /* Keymap size */
- unsigned repeat:1; /* Enable key repeat */
- unsigned en_keylock:1; /* Enable Key Lock feature */
- unsigned short unlock_key1; /* Unlock Key 1 */
- unsigned short unlock_key2; /* Unlock Key 2 */
- const struct adp5588_gpi_map *gpimap;
- unsigned short gpimapsize;
- const struct adp5588_gpio_platform_data *gpio_data;
-};
-
-struct i2c_client; /* forward declaration */
-
-struct adp5588_gpio_platform_data {
- int gpio_start; /* GPIO Chip base # */
- const char *const *names;
- unsigned irq_base; /* interrupt base # */
- unsigned pullup_dis_mask; /* Pull-Up Disable Mask */
- int (*setup)(struct i2c_client *client,
- unsigned gpio, unsigned ngpio,
- void *context);
- int (*teardown)(struct i2c_client *client,
- unsigned gpio, unsigned ngpio,
- void *context);
- void *context;
-};
-
-#endif
diff --git a/include/linux/platform_data/adp8860.h b/include/linux/platform_data/adp8860.h
index 0b4d39855c91..523c43740ec6 100644
--- a/include/linux/platform_data/adp8860.h
+++ b/include/linux/platform_data/adp8860.h
@@ -1,10 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Definitions and platform data for Analog Devices
* Backlight drivers ADP8860
*
* Copyright 2009-2010 Analog Devices Inc.
- *
- * Licensed under the GPL-2 or later.
*/
#ifndef __LINUX_I2C_ADP8860_H
diff --git a/include/linux/platform_data/adp8870.h b/include/linux/platform_data/adp8870.h
index 624dceccbd5b..c5e55df2d809 100644
--- a/include/linux/platform_data/adp8870.h
+++ b/include/linux/platform_data/adp8870.h
@@ -1,10 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Definitions and platform data for Analog Devices
* Backlight drivers ADP8870
*
* Copyright 2009-2010 Analog Devices Inc.
- *
- * Licensed under the GPL-2 or later.
*/
#ifndef __LINUX_I2C_ADP8870_H
diff --git a/include/linux/platform_data/ads1015.h b/include/linux/platform_data/ads1015.h
deleted file mode 100644
index d5aa2a045669..000000000000
--- a/include/linux/platform_data/ads1015.h
+++ /dev/null
@@ -1,36 +0,0 @@
-/*
- * Platform Data for ADS1015 12-bit 4-input ADC
- * (C) Copyright 2010
- * Dirk Eibach, Guntermann & Drunck GmbH <eibach@gdsys.de>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- */
-
-#ifndef LINUX_ADS1015_H
-#define LINUX_ADS1015_H
-
-#define ADS1015_CHANNELS 8
-
-struct ads1015_channel_data {
- bool enabled;
- unsigned int pga;
- unsigned int data_rate;
-};
-
-struct ads1015_platform_data {
- struct ads1015_channel_data channel_data[ADS1015_CHANNELS];
-};
-
-#endif /* LINUX_ADS1015_H */
diff --git a/include/linux/platform_data/ads7828.h b/include/linux/platform_data/ads7828.h
index 3245f45f9d77..0fa4186c6171 100644
--- a/include/linux/platform_data/ads7828.h
+++ b/include/linux/platform_data/ads7828.h
@@ -1,14 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* TI ADS7828 A/D Converter platform data definition
*
* Copyright (c) 2012 Savoir-faire Linux Inc.
* Vivien Didelot <vivien.didelot@savoirfairelinux.com>
*
- * For further information, see the Documentation/hwmon/ads7828 file.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
+ * For further information, see the Documentation/hwmon/ads7828.rst file.
*/
#ifndef _PDATA_ADS7828_H
diff --git a/include/linux/platform_data/amd_xdma.h b/include/linux/platform_data/amd_xdma.h
new file mode 100644
index 000000000000..b5e23e14bac8
--- /dev/null
+++ b/include/linux/platform_data/amd_xdma.h
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (C) 2022, Advanced Micro Devices, Inc.
+ */
+
+#ifndef _PLATDATA_AMD_XDMA_H
+#define _PLATDATA_AMD_XDMA_H
+
+#include <linux/dmaengine.h>
+
+/**
+ * struct xdma_chan_info - DMA channel information
+ * This information is used to match channel when request dma channel
+ * @dir: Channel transfer direction
+ */
+struct xdma_chan_info {
+ enum dma_transfer_direction dir;
+};
+
+#define XDMA_FILTER_PARAM(chan_info) ((void *)(chan_info))
+
+struct dma_slave_map;
+
+/**
+ * struct xdma_platdata - platform specific data for XDMA engine
+ * @max_dma_channels: Maximum dma channels in each direction
+ */
+struct xdma_platdata {
+ u32 max_dma_channels;
+ u32 device_map_cnt;
+ struct dma_slave_map *device_map;
+};
+
+#endif /* _PLATDATA_AMD_XDMA_H */
diff --git a/include/linux/platform_data/ams-delta-fiq.h b/include/linux/platform_data/ams-delta-fiq.h
new file mode 100644
index 000000000000..cf4589ccb720
--- /dev/null
+++ b/include/linux/platform_data/ams-delta-fiq.h
@@ -0,0 +1,58 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/*
+ * include/linux/platform_data/ams-delta-fiq.h
+ *
+ * Taken from the original Amstrad modifications to fiq.h
+ *
+ * Copyright (c) 2004 Amstrad Plc
+ * Copyright (c) 2006 Matt Callow
+ * Copyright (c) 2010 Janusz Krzysztofik
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef __LINUX_PLATFORM_DATA_AMS_DELTA_FIQ_H
+#define __LINUX_PLATFORM_DATA_AMS_DELTA_FIQ_H
+
+/*
+ * These are the offsets from the beginning of the fiq_buffer. They are put here
+ * since the buffer and header need to be accessed by drivers servicing devices
+ * which generate GPIO interrupts - e.g. keyboard, modem, hook switch.
+ */
+#define FIQ_MASK 0
+#define FIQ_STATE 1
+#define FIQ_KEYS_CNT 2
+#define FIQ_TAIL_OFFSET 3
+#define FIQ_HEAD_OFFSET 4
+#define FIQ_BUF_LEN 5
+#define FIQ_KEY 6
+#define FIQ_MISSED_KEYS 7
+#define FIQ_BUFFER_START 8
+#define FIQ_GPIO_INT_MASK 9
+#define FIQ_KEYS_HICNT 10
+#define FIQ_IRQ_PEND 11
+#define FIQ_SIR_CODE_L1 12
+#define IRQ_SIR_CODE_L2 13
+
+#define FIQ_CNT_INT_00 14
+#define FIQ_CNT_INT_KEY 15
+#define FIQ_CNT_INT_MDM 16
+#define FIQ_CNT_INT_03 17
+#define FIQ_CNT_INT_HSW 18
+#define FIQ_CNT_INT_05 19
+#define FIQ_CNT_INT_06 20
+#define FIQ_CNT_INT_07 21
+#define FIQ_CNT_INT_08 22
+#define FIQ_CNT_INT_09 23
+#define FIQ_CNT_INT_10 24
+#define FIQ_CNT_INT_11 25
+#define FIQ_CNT_INT_12 26
+#define FIQ_CNT_INT_13 27
+#define FIQ_CNT_INT_14 28
+#define FIQ_CNT_INT_15 29
+
+#define FIQ_CIRC_BUFF 30 /*Start of circular buffer */
+
+#endif
diff --git a/include/linux/platform_data/apds990x.h b/include/linux/platform_data/apds990x.h
index d186fcc5d257..94dfbaa365e1 100644
--- a/include/linux/platform_data/apds990x.h
+++ b/include/linux/platform_data/apds990x.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* This file is part of the APDS990x sensor driver.
* Chip is combined proximity and ambient light sensor.
@@ -5,21 +6,6 @@
* Copyright (C) 2010 Nokia Corporation and/or its subsidiary(-ies).
*
* Contact: Samu Onkalo <samu.p.onkalo@nokia.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
- * 02110-1301 USA
- *
*/
#ifndef __APDS990X_H__
diff --git a/include/linux/platform_data/arm-ux500-pm.h b/include/linux/platform_data/arm-ux500-pm.h
index 8dff64b29ec0..9f6f01cfdd6c 100644
--- a/include/linux/platform_data/arm-ux500-pm.h
+++ b/include/linux/platform_data/arm-ux500-pm.h
@@ -1,10 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) ST-Ericsson SA 2010-2013
* Author: Rickard Andersson <rickard.andersson@stericsson.com> for
* ST-Ericsson.
* Author: Daniel Lezcano <daniel.lezcano@linaro.org> for Linaro.
- * License terms: GNU General Public License (GPL) version 2
- *
*/
#ifndef ARM_UX500_PM_H
diff --git a/include/linux/platform_data/asoc-imx-ssi.h b/include/linux/platform_data/asoc-imx-ssi.h
index 92c7fd72f636..902851aeb0b4 100644
--- a/include/linux/platform_data/asoc-imx-ssi.h
+++ b/include/linux/platform_data/asoc-imx-ssi.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __MACH_SSI_H
#define __MACH_SSI_H
diff --git a/include/linux/platform_data/asoc-kirkwood.h b/include/linux/platform_data/asoc-kirkwood.h
index d6a55bd2e578..d442cefa3928 100644
--- a/include/linux/platform_data/asoc-kirkwood.h
+++ b/include/linux/platform_data/asoc-kirkwood.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __PLAT_AUDIO_H
#define __PLAT_AUDIO_H
diff --git a/include/linux/platform_data/asoc-mx27vis.h b/include/linux/platform_data/asoc-mx27vis.h
deleted file mode 100644
index 409adcd04d04..000000000000
--- a/include/linux/platform_data/asoc-mx27vis.h
+++ /dev/null
@@ -1,11 +0,0 @@
-#ifndef __PLATFORM_DATA_ASOC_MX27VIS_H
-#define __PLATFORM_DATA_ASOC_MX27VIS_H
-
-struct snd_mx27vis_platform_data {
- int amp_gain0_gpio;
- int amp_gain1_gpio;
- int amp_mutel_gpio;
- int amp_muter_gpio;
-};
-
-#endif /* __PLATFORM_DATA_ASOC_MX27VIS_H */
diff --git a/include/linux/platform_data/asoc-palm27x.h b/include/linux/platform_data/asoc-palm27x.h
deleted file mode 100644
index 58afb30d5298..000000000000
--- a/include/linux/platform_data/asoc-palm27x.h
+++ /dev/null
@@ -1,8 +0,0 @@
-#ifndef _INCLUDE_PALMASOC_H_
-#define _INCLUDE_PALMASOC_H_
-
-struct palm27x_asoc_info {
- int jack_gpio;
-};
-
-#endif
diff --git a/include/linux/platform_data/asoc-pxa.h b/include/linux/platform_data/asoc-pxa.h
new file mode 100644
index 000000000000..327454cd8246
--- /dev/null
+++ b/include/linux/platform_data/asoc-pxa.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __SOC_PXA_AUDIO_H__
+#define __SOC_PXA_AUDIO_H__
+
+#include <sound/core.h>
+#include <sound/pcm.h>
+#include <sound/ac97_codec.h>
+
+/*
+ * @reset_gpio: AC97 reset gpio (normally gpio113 or gpio95)
+ * a -1 value means no gpio will be used for reset
+ * @codec_pdata: AC97 codec platform_data
+
+ * reset_gpio should only be specified for pxa27x CPUs where a silicon
+ * bug prevents correct operation of the reset line. If not specified,
+ * the default behaviour on these CPUs is to consider gpio 113 as the
+ * AC97 reset line, which is the default on most boards.
+ */
+typedef struct {
+ int (*startup)(struct snd_pcm_substream *, void *);
+ void (*shutdown)(struct snd_pcm_substream *, void *);
+ void (*suspend)(void *);
+ void (*resume)(void *);
+ void *priv;
+ int reset_gpio;
+ void *codec_pdata[AC97_BUS_MAX_DEVICES];
+} pxa2xx_audio_ops_t;
+
+extern void pxa_set_ac97_info(pxa2xx_audio_ops_t *ops);
+
+#endif
diff --git a/include/linux/platform_data/asoc-s3c.h b/include/linux/platform_data/asoc-s3c.h
index 90641a5daaf0..f9c00f839e9f 100644
--- a/include/linux/platform_data/asoc-s3c.h
+++ b/include/linux/platform_data/asoc-s3c.h
@@ -1,10 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2009 Samsung Electronics Co. Ltd
* Author: Jaswinder Singh <jassi.brar@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
/* The machine init code calls s3c*_ac97_setup_gpio with
diff --git a/include/linux/platform_data/asoc-s3c24xx_simtec.h b/include/linux/platform_data/asoc-s3c24xx_simtec.h
deleted file mode 100644
index d220e54123aa..000000000000
--- a/include/linux/platform_data/asoc-s3c24xx_simtec.h
+++ /dev/null
@@ -1,33 +0,0 @@
-/*
- * Copyright 2008 Simtec Electronics
- * http://armlinux.simtec.co.uk/
- * Ben Dooks <ben@simtec.co.uk>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * Simtec Audio support.
-*/
-
-/**
- * struct s3c24xx_audio_simtec_pdata - platform data for simtec audio
- * @use_mpllin: Select codec clock from MPLLin
- * @output_cdclk: Need to output CDCLK to the codec
- * @have_mic: Set if we have a MIC socket
- * @have_lout: Set if we have a LineOut socket
- * @amp_gpio: GPIO pin to enable the AMP
- * @amp_gain: Option GPIO to control AMP gain
- */
-struct s3c24xx_audio_simtec_pdata {
- unsigned int use_mpllin:1;
- unsigned int output_cdclk:1;
-
- unsigned int have_mic:1;
- unsigned int have_lout:1;
-
- int amp_gpio;
- int amp_gain[2];
-
- void (*startup)(void);
-};
diff --git a/include/linux/platform_data/asoc-ti-mcbsp.h b/include/linux/platform_data/asoc-ti-mcbsp.h
index e684543254f3..cc8197760015 100644
--- a/include/linux/platform_data/asoc-ti-mcbsp.h
+++ b/include/linux/platform_data/asoc-ti-mcbsp.h
@@ -1,23 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Defines for Multi-Channel Buffered Serial Port
*
* Copyright (C) 2002 RidgeRun, Inc.
* Author: Steve Johnson
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
*/
#ifndef __ASOC_TI_MCBSP_H
#define __ASOC_TI_MCBSP_H
@@ -25,10 +11,6 @@
#include <linux/spinlock.h>
#include <linux/clk.h>
-#define MCBSP_CONFIG_TYPE2 0x2
-#define MCBSP_CONFIG_TYPE3 0x3
-#define MCBSP_CONFIG_TYPE4 0x4
-
/* Platform specific configuration */
struct omap_mcbsp_ops {
void (*request)(unsigned int);
@@ -47,14 +29,6 @@ struct omap_mcbsp_platform_data {
int (*force_ick_on)(struct clk *clk, bool force_on);
};
-/**
- * omap_mcbsp_dev_attr - OMAP McBSP device attributes for omap_hwmod
- * @sidetone: name of the sidetone device
- */
-struct omap_mcbsp_dev_attr {
- const char *sidetone;
-};
-
void omap3_mcbsp_init_pdata_callback(struct omap_mcbsp_platform_data *pdata);
#endif
diff --git a/include/linux/platform_data/asoc-ux500-msp.h b/include/linux/platform_data/asoc-ux500-msp.h
deleted file mode 100644
index 2f34bb98fe2a..000000000000
--- a/include/linux/platform_data/asoc-ux500-msp.h
+++ /dev/null
@@ -1,20 +0,0 @@
-/*
- * Copyright (C) ST-Ericsson SA 2010
- *
- * Author: Rabin Vincent <rabin.vincent@stericsson.com> for ST-Ericsson
- * License terms: GNU General Public License (GPL), version 2.
- */
-
-#ifndef __MSP_H
-#define __MSP_H
-
-#include <linux/platform_data/dma-ste-dma40.h>
-
-/* Platform data structure for a MSP I2S-device */
-struct msp_i2s_platform_data {
- int id;
- struct stedma40_chan_cfg *msp_i2s_dma_rx;
- struct stedma40_chan_cfg *msp_i2s_dma_tx;
-};
-
-#endif
diff --git a/include/linux/platform_data/at24.h b/include/linux/platform_data/at24.h
deleted file mode 100644
index 271a4e25af67..000000000000
--- a/include/linux/platform_data/at24.h
+++ /dev/null
@@ -1,58 +0,0 @@
-/*
- * at24.h - platform_data for the at24 (generic eeprom) driver
- * (C) Copyright 2008 by Pengutronix
- * (C) Copyright 2012 by Wolfram Sang
- * same license as the driver
- */
-
-#ifndef _LINUX_AT24_H
-#define _LINUX_AT24_H
-
-#include <linux/types.h>
-#include <linux/nvmem-consumer.h>
-#include <linux/bitops.h>
-
-/**
- * struct at24_platform_data - data to set up at24 (generic eeprom) driver
- * @byte_len: size of eeprom in byte
- * @page_size: number of byte which can be written in one go
- * @flags: tunable options, check AT24_FLAG_* defines
- * @setup: an optional callback invoked after eeprom is probed; enables kernel
- code to access eeprom via nvmem, see example
- * @context: optional parameter passed to setup()
- *
- * If you set up a custom eeprom type, please double-check the parameters.
- * Especially page_size needs extra care, as you risk data loss if your value
- * is bigger than what the chip actually supports!
- *
- * An example in pseudo code for a setup() callback:
- *
- * void get_mac_addr(struct nvmem_device *nvmem, void *context)
- * {
- * u8 *mac_addr = ethernet_pdata->mac_addr;
- * off_t offset = context;
- *
- * // Read MAC addr from EEPROM
- * if (nvmem_device_read(nvmem, offset, ETH_ALEN, mac_addr) == ETH_ALEN)
- * pr_info("Read MAC addr from EEPROM: %pM\n", mac_addr);
- * }
- *
- * This function pointer and context can now be set up in at24_platform_data.
- */
-
-struct at24_platform_data {
- u32 byte_len; /* size (sum of all addr) */
- u16 page_size; /* for writes */
- u8 flags;
-#define AT24_FLAG_ADDR16 BIT(7) /* address pointer is 16 bit */
-#define AT24_FLAG_READONLY BIT(6) /* sysfs-entry will be read-only */
-#define AT24_FLAG_IRUGO BIT(5) /* sysfs-entry will be world-readable */
-#define AT24_FLAG_TAKE8ADDR BIT(4) /* take always 8 addresses (24c00) */
-#define AT24_FLAG_SERIAL BIT(3) /* factory-programmed serial number */
-#define AT24_FLAG_MAC BIT(2) /* factory-programmed mac address */
-
- void (*setup)(struct nvmem_device *nvmem, void *context);
- void *context;
-};
-
-#endif /* _LINUX_AT24_H */
diff --git a/include/linux/platform_data/at91_adc.h b/include/linux/platform_data/at91_adc.h
deleted file mode 100644
index 7819fc787731..000000000000
--- a/include/linux/platform_data/at91_adc.h
+++ /dev/null
@@ -1,50 +0,0 @@
-/*
- * Copyright (C) 2011 Free Electrons
- *
- * Licensed under the GPLv2 or later.
- */
-
-#ifndef _AT91_ADC_H_
-#define _AT91_ADC_H_
-
-enum atmel_adc_ts_type {
- ATMEL_ADC_TOUCHSCREEN_NONE = 0,
- ATMEL_ADC_TOUCHSCREEN_4WIRE = 4,
- ATMEL_ADC_TOUCHSCREEN_5WIRE = 5,
-};
-
-/**
- * struct at91_adc_trigger - description of triggers
- * @name: name of the trigger advertised to the user
- * @value: value to set in the ADC's trigger setup register
- to enable the trigger
- * @is_external: Does the trigger rely on an external pin?
- */
-struct at91_adc_trigger {
- const char *name;
- u8 value;
- bool is_external;
-};
-
-/**
- * struct at91_adc_data - platform data for ADC driver
- * @channels_used: channels in use on the board as a bitmask
- * @startup_time: startup time of the ADC in microseconds
- * @trigger_list: Triggers available in the ADC
- * @trigger_number: Number of triggers available in the ADC
- * @use_external_triggers: does the board has external triggers availables
- * @vref: Reference voltage for the ADC in millivolts
- * @touchscreen_type: If a touchscreen is connected, its type (4 or 5 wires)
- */
-struct at91_adc_data {
- unsigned long channels_used;
- u8 startup_time;
- struct at91_adc_trigger *trigger_list;
- u8 trigger_number;
- bool use_external_triggers;
- u16 vref;
- enum atmel_adc_ts_type touchscreen_type;
-};
-
-extern void __init at91_add_device_adc(struct at91_adc_data *data);
-#endif
diff --git a/include/linux/platform_data/ata-pxa.h b/include/linux/platform_data/ata-pxa.h
index 6cf7df1d5830..0b65fd0aa509 100644
--- a/include/linux/platform_data/ata-pxa.h
+++ b/include/linux/platform_data/ata-pxa.h
@@ -1,21 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Generic PXA PATA driver
*
* Copyright (C) 2010 Marek Vasut <marek.vasut@gmail.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; see the file COPYING. If not, write to
- * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#ifndef __MACH_PATA_PXA_H__
diff --git a/include/linux/platform_data/ata-samsung_cf.h b/include/linux/platform_data/ata-samsung_cf.h
deleted file mode 100644
index 748e71642c4a..000000000000
--- a/include/linux/platform_data/ata-samsung_cf.h
+++ /dev/null
@@ -1,34 +0,0 @@
-/*
- * Copyright (c) 2010 Samsung Electronics Co., Ltd.
- * http://www.samsung.com
- *
- * Samsung CF-ATA platform_device info
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-#ifndef __ATA_SAMSUNG_CF_H
-#define __ATA_SAMSUNG_CF_H __FILE__
-
-/**
- * struct s3c_ide_platdata - S3C IDE driver platform data.
- * @setup_gpio: Setup the external GPIO pins to the right state for data
- * transfer in true-ide mode.
- */
-struct s3c_ide_platdata {
- void (*setup_gpio)(void);
-};
-
-/*
- * s3c_ide_set_platdata() - Setup the platform specifc data for IDE driver.
- * @pdata: Platform data for IDE driver.
- */
-extern void s3c_ide_set_platdata(struct s3c_ide_platdata *pdata);
-
-/* architecture-specific IDE configuration */
-extern void s3c64xx_ide_setup_gpio(void);
-extern void s5pv210_ide_setup_gpio(void);
-
-#endif /*__ATA_SAMSUNG_CF_H */
diff --git a/include/linux/platform_data/atmel.h b/include/linux/platform_data/atmel.h
index cdceb4d4ef9d..73f63be509c4 100644
--- a/include/linux/platform_data/atmel.h
+++ b/include/linux/platform_data/atmel.h
@@ -1,24 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* atmel platform data
- *
- * GPL v2 Only
*/
#ifndef __ATMEL_H__
#define __ATMEL_H__
- /* Compact Flash */
-struct at91_cf_data {
- int irq_pin; /* I/O IRQ */
- int det_pin; /* Card detect */
- int vcc_pin; /* power switching */
- int rst_pin; /* card reset */
- u8 chipselect; /* EBI Chip Select number */
- u8 flags;
-#define AT91_CF_TRUE_IDE 0x01
-#define AT91_IDE_SWAP_A0_A2 0x02
-};
-
/* FIXME: this needs a better location, but gets stuff building again */
#ifdef CONFIG_ATMEL_PM
extern int at91_suspend_entering_slow_clock(void);
diff --git a/include/linux/platform_data/atmel_mxt_ts.h b/include/linux/platform_data/atmel_mxt_ts.h
deleted file mode 100644
index 695035a8d7fb..000000000000
--- a/include/linux/platform_data/atmel_mxt_ts.h
+++ /dev/null
@@ -1,31 +0,0 @@
-/*
- * Atmel maXTouch Touchscreen driver
- *
- * Copyright (C) 2010 Samsung Electronics Co.Ltd
- * Author: Joonyoung Shim <jy0922.shim@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- */
-
-#ifndef __LINUX_PLATFORM_DATA_ATMEL_MXT_TS_H
-#define __LINUX_PLATFORM_DATA_ATMEL_MXT_TS_H
-
-#include <linux/types.h>
-
-enum mxt_suspend_mode {
- MXT_SUSPEND_DEEP_SLEEP = 0,
- MXT_SUSPEND_T9_CTRL = 1,
-};
-
-/* The platform data for the Atmel maXTouch touchscreen driver */
-struct mxt_platform_data {
- unsigned long irqflags;
- u8 t19_num_keys;
- const unsigned int *t19_keymap;
- enum mxt_suspend_mode suspend_mode;
-};
-
-#endif /* __LINUX_PLATFORM_DATA_ATMEL_MXT_TS_H */
diff --git a/include/linux/platform_data/b53.h b/include/linux/platform_data/b53.h
index 69d279c0da96..6f6fed2b171d 100644
--- a/include/linux/platform_data/b53.h
+++ b/include/linux/platform_data/b53.h
@@ -19,9 +19,13 @@
#ifndef __B53_H
#define __B53_H
-#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/platform_data/dsa.h>
struct b53_platform_data {
+ /* Must be first such that dsa_register_switch() can access it */
+ struct dsa_chip_data cd;
+
u32 chip_id;
u16 enabled_ports;
diff --git a/include/linux/platform_data/bcm7038_wdt.h b/include/linux/platform_data/bcm7038_wdt.h
new file mode 100644
index 000000000000..e18cfd9ec8f9
--- /dev/null
+++ b/include/linux/platform_data/bcm7038_wdt.h
@@ -0,0 +1,8 @@
+#ifndef __BCM7038_WDT_PDATA_H
+#define __BCM7038_WDT_PDATA_H
+
+struct bcm7038_wdt_platform_data {
+ const char *clk_name;
+};
+
+#endif /* __BCM7038_WDT_PDATA_H */
diff --git a/include/linux/platform_data/bcmgenet.h b/include/linux/platform_data/bcmgenet.h
index 26af54321958..d8f8738629d2 100644
--- a/include/linux/platform_data/bcmgenet.h
+++ b/include/linux/platform_data/bcmgenet.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __LINUX_PLATFORM_DATA_BCMGENET_H__
#define __LINUX_PLATFORM_DATA_BCMGENET_H__
diff --git a/include/linux/platform_data/bd6107.h b/include/linux/platform_data/bd6107.h
index 671d6502d241..54a06a4d2618 100644
--- a/include/linux/platform_data/bd6107.h
+++ b/include/linux/platform_data/bd6107.h
@@ -1,9 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* bd6107.h - Rohm BD6107 LEDs Driver
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef __BD6107_H__
#define __BD6107_H__
@@ -12,7 +9,6 @@ struct device;
struct bd6107_platform_data {
struct device *fbdev;
- int reset; /* Reset GPIO */
unsigned int def_value;
};
diff --git a/include/linux/platform_data/bfin_rotary.h b/include/linux/platform_data/bfin_rotary.h
deleted file mode 100644
index 98829370fee2..000000000000
--- a/include/linux/platform_data/bfin_rotary.h
+++ /dev/null
@@ -1,117 +0,0 @@
-/*
- * board initialization should put one of these structures into platform_data
- * and place the bfin-rotary onto platform_bus named "bfin-rotary".
- *
- * Copyright 2008-2010 Analog Devices Inc.
- *
- * Licensed under the GPL-2 or later.
- */
-
-#ifndef _BFIN_ROTARY_H
-#define _BFIN_ROTARY_H
-
-/* mode bitmasks */
-#define ROT_QUAD_ENC CNTMODE_QUADENC /* quadrature/grey code encoder mode */
-#define ROT_BIN_ENC CNTMODE_BINENC /* binary encoder mode */
-#define ROT_UD_CNT CNTMODE_UDCNT /* rotary counter mode */
-#define ROT_DIR_CNT CNTMODE_DIRCNT /* direction counter mode */
-
-#define ROT_DEBE DEBE /* Debounce Enable */
-
-#define ROT_CDGINV CDGINV /* CDG Pin Polarity Invert */
-#define ROT_CUDINV CUDINV /* CUD Pin Polarity Invert */
-#define ROT_CZMINV CZMINV /* CZM Pin Polarity Invert */
-
-struct bfin_rotary_platform_data {
- /* set rotary UP KEY_### or BTN_### in case you prefer
- * bfin-rotary to send EV_KEY otherwise set 0
- */
- unsigned int rotary_up_key;
- /* set rotary DOWN KEY_### or BTN_### in case you prefer
- * bfin-rotary to send EV_KEY otherwise set 0
- */
- unsigned int rotary_down_key;
- /* set rotary BUTTON KEY_### or BTN_### */
- unsigned int rotary_button_key;
- /* set rotary Relative Axis REL_### in case you prefer
- * bfin-rotary to send EV_REL otherwise set 0
- */
- unsigned int rotary_rel_code;
- unsigned short debounce; /* 0..17 */
- unsigned short mode;
- unsigned short pm_wakeup;
- unsigned short *pin_list;
-};
-
-/* CNT_CONFIG bitmasks */
-#define CNTE (1 << 0) /* Counter Enable */
-#define DEBE (1 << 1) /* Debounce Enable */
-#define CDGINV (1 << 4) /* CDG Pin Polarity Invert */
-#define CUDINV (1 << 5) /* CUD Pin Polarity Invert */
-#define CZMINV (1 << 6) /* CZM Pin Polarity Invert */
-#define CNTMODE_SHIFT 8
-#define CNTMODE (0x7 << CNTMODE_SHIFT) /* Counter Operating Mode */
-#define ZMZC (1 << 1) /* CZM Zeroes Counter Enable */
-#define BNDMODE_SHIFT 12
-#define BNDMODE (0x3 << BNDMODE_SHIFT) /* Boundary register Mode */
-#define INPDIS (1 << 15) /* CUG and CDG Input Disable */
-
-#define CNTMODE_QUADENC (0 << CNTMODE_SHIFT) /* quadrature encoder mode */
-#define CNTMODE_BINENC (1 << CNTMODE_SHIFT) /* binary encoder mode */
-#define CNTMODE_UDCNT (2 << CNTMODE_SHIFT) /* up/down counter mode */
-#define CNTMODE_DIRCNT (4 << CNTMODE_SHIFT) /* direction counter mode */
-#define CNTMODE_DIRTMR (5 << CNTMODE_SHIFT) /* direction timer mode */
-
-#define BNDMODE_COMP (0 << BNDMODE_SHIFT) /* boundary compare mode */
-#define BNDMODE_ZERO (1 << BNDMODE_SHIFT) /* boundary compare and zero mode */
-#define BNDMODE_CAPT (2 << BNDMODE_SHIFT) /* boundary capture mode */
-#define BNDMODE_AEXT (3 << BNDMODE_SHIFT) /* boundary auto-extend mode */
-
-/* CNT_IMASK bitmasks */
-#define ICIE (1 << 0) /* Illegal Gray/Binary Code Interrupt Enable */
-#define UCIE (1 << 1) /* Up count Interrupt Enable */
-#define DCIE (1 << 2) /* Down count Interrupt Enable */
-#define MINCIE (1 << 3) /* Min Count Interrupt Enable */
-#define MAXCIE (1 << 4) /* Max Count Interrupt Enable */
-#define COV31IE (1 << 5) /* Bit 31 Overflow Interrupt Enable */
-#define COV15IE (1 << 6) /* Bit 15 Overflow Interrupt Enable */
-#define CZEROIE (1 << 7) /* Count to Zero Interrupt Enable */
-#define CZMIE (1 << 8) /* CZM Pin Interrupt Enable */
-#define CZMEIE (1 << 9) /* CZM Error Interrupt Enable */
-#define CZMZIE (1 << 10) /* CZM Zeroes Counter Interrupt Enable */
-
-/* CNT_STATUS bitmasks */
-#define ICII (1 << 0) /* Illegal Gray/Binary Code Interrupt Identifier */
-#define UCII (1 << 1) /* Up count Interrupt Identifier */
-#define DCII (1 << 2) /* Down count Interrupt Identifier */
-#define MINCII (1 << 3) /* Min Count Interrupt Identifier */
-#define MAXCII (1 << 4) /* Max Count Interrupt Identifier */
-#define COV31II (1 << 5) /* Bit 31 Overflow Interrupt Identifier */
-#define COV15II (1 << 6) /* Bit 15 Overflow Interrupt Identifier */
-#define CZEROII (1 << 7) /* Count to Zero Interrupt Identifier */
-#define CZMII (1 << 8) /* CZM Pin Interrupt Identifier */
-#define CZMEII (1 << 9) /* CZM Error Interrupt Identifier */
-#define CZMZII (1 << 10) /* CZM Zeroes Counter Interrupt Identifier */
-
-/* CNT_COMMAND bitmasks */
-#define W1LCNT 0xf /* Load Counter Register */
-#define W1LMIN 0xf0 /* Load Min Register */
-#define W1LMAX 0xf00 /* Load Max Register */
-#define W1ZMONCE (1 << 12) /* Enable CZM Clear Counter Once */
-
-#define W1LCNT_ZERO (1 << 0) /* write 1 to load CNT_COUNTER with zero */
-#define W1LCNT_MIN (1 << 2) /* write 1 to load CNT_COUNTER from CNT_MIN */
-#define W1LCNT_MAX (1 << 3) /* write 1 to load CNT_COUNTER from CNT_MAX */
-
-#define W1LMIN_ZERO (1 << 4) /* write 1 to load CNT_MIN with zero */
-#define W1LMIN_CNT (1 << 5) /* write 1 to load CNT_MIN from CNT_COUNTER */
-#define W1LMIN_MAX (1 << 7) /* write 1 to load CNT_MIN from CNT_MAX */
-
-#define W1LMAX_ZERO (1 << 8) /* write 1 to load CNT_MAX with zero */
-#define W1LMAX_CNT (1 << 9) /* write 1 to load CNT_MAX from CNT_COUNTER */
-#define W1LMAX_MIN (1 << 10) /* write 1 to load CNT_MAX from CNT_MIN */
-
-/* CNT_DEBOUNCE bitmasks */
-#define DPRESCALE 0xf /* Load Counter Register */
-
-#endif
diff --git a/include/linux/platform_data/bh1770glc.h b/include/linux/platform_data/bh1770glc.h
index 8b5e2df36c72..cbb613915f0d 100644
--- a/include/linux/platform_data/bh1770glc.h
+++ b/include/linux/platform_data/bh1770glc.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* This file is part of the ROHM BH1770GLC / OSRAM SFH7770 sensor driver.
* Chip is combined proximity and ambient light sensor.
@@ -5,21 +6,6 @@
* Copyright (C) 2010 Nokia Corporation and/or its subsidiary(-ies).
*
* Contact: Samu Onkalo <samu.p.onkalo@nokia.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
- * 02110-1301 USA
- *
*/
#ifndef __BH1770_H__
diff --git a/include/linux/platform_data/brcmfmac.h b/include/linux/platform_data/brcmfmac.h
index 1d30bf278231..f922a192fe58 100644
--- a/include/linux/platform_data/brcmfmac.h
+++ b/include/linux/platform_data/brcmfmac.h
@@ -125,7 +125,7 @@ struct brcmfmac_pd_cc_entry {
*/
struct brcmfmac_pd_cc {
int table_size;
- struct brcmfmac_pd_cc_entry table[0];
+ struct brcmfmac_pd_cc_entry table[];
};
/**
@@ -178,7 +178,7 @@ struct brcmfmac_platform_data {
void (*power_off)(void);
char *fw_alternative_path;
int device_count;
- struct brcmfmac_pd_device devices[0];
+ struct brcmfmac_pd_device devices[];
};
diff --git a/include/linux/platform_data/brcmnand.h b/include/linux/platform_data/brcmnand.h
new file mode 100644
index 000000000000..8b8777985dce
--- /dev/null
+++ b/include/linux/platform_data/brcmnand.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+#ifndef BRCMNAND_PLAT_DATA_H
+#define BRCMNAND_PLAT_DATA_H
+
+struct brcmnand_platform_data {
+ int chip_select;
+ const char * const *part_probe_types;
+ unsigned int ecc_stepsize;
+ unsigned int ecc_strength;
+};
+
+#endif /* BRCMNAND_PLAT_DATA_H */
diff --git a/include/linux/platform_data/bt-nokia-h4p.h b/include/linux/platform_data/bt-nokia-h4p.h
deleted file mode 100644
index 30d169dfadf3..000000000000
--- a/include/linux/platform_data/bt-nokia-h4p.h
+++ /dev/null
@@ -1,38 +0,0 @@
-/*
- * This file is part of Nokia H4P bluetooth driver
- *
- * Copyright (C) 2010 Nokia Corporation.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
- * 02110-1301 USA
- *
- */
-
-
-/**
- * struct hci_h4p_platform data - hci_h4p Platform data structure
- */
-struct hci_h4p_platform_data {
- int chip_type;
- int bt_sysclk;
- unsigned int bt_wakeup_gpio;
- unsigned int host_wakeup_gpio;
- unsigned int reset_gpio;
- int reset_gpio_shared;
- unsigned int uart_irq;
- phys_addr_t uart_base;
- const char *uart_iclk;
- const char *uart_fclk;
- void (*set_pm_limits)(struct device *dev, bool set);
-};
diff --git a/include/linux/platform_data/clk-da8xx-cfgchip.h b/include/linux/platform_data/clk-da8xx-cfgchip.h
new file mode 100644
index 000000000000..de0f77d38669
--- /dev/null
+++ b/include/linux/platform_data/clk-da8xx-cfgchip.h
@@ -0,0 +1,21 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * clk-da8xx-cfgchip - TI DaVinci DA8xx CFGCHIP clock driver
+ *
+ * Copyright (C) 2018 David Lechner <david@lechnology.com>
+ */
+
+#ifndef __LINUX_PLATFORM_DATA_CLK_DA8XX_CFGCHIP_H__
+#define __LINUX_PLATFORM_DATA_CLK_DA8XX_CFGCHIP_H__
+
+#include <linux/regmap.h>
+
+/**
+ * da8xx_cfgchip_clk_platform_data
+ * @cfgchip: CFGCHIP syscon regmap
+ */
+struct da8xx_cfgchip_clk_platform_data {
+ struct regmap *cfgchip;
+};
+
+#endif /* __LINUX_PLATFORM_DATA_CLK_DA8XX_CFGCHIP_H__ */
diff --git a/include/linux/platform_data/clk-davinci-pll.h b/include/linux/platform_data/clk-davinci-pll.h
new file mode 100644
index 000000000000..e55dab1d578b
--- /dev/null
+++ b/include/linux/platform_data/clk-davinci-pll.h
@@ -0,0 +1,21 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * PLL clock driver for TI Davinci SoCs
+ *
+ * Copyright (C) 2018 David Lechner <david@lechnology.com>
+ */
+
+#ifndef __LINUX_PLATFORM_DATA_CLK_DAVINCI_PLL_H__
+#define __LINUX_PLATFORM_DATA_CLK_DAVINCI_PLL_H__
+
+#include <linux/regmap.h>
+
+/**
+ * davinci_pll_platform_data
+ * @cfgchip: CFGCHIP syscon regmap
+ */
+struct davinci_pll_platform_data {
+ struct regmap *cfgchip;
+};
+
+#endif /* __LINUX_PLATFORM_DATA_CLK_DAVINCI_PLL_H__ */
diff --git a/include/linux/platform_data/clk-fch.h b/include/linux/platform_data/clk-fch.h
new file mode 100644
index 000000000000..11a2a23fd9b2
--- /dev/null
+++ b/include/linux/platform_data/clk-fch.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * clock framework for AMD misc clocks
+ *
+ * Copyright 2018 Advanced Micro Devices, Inc.
+ */
+
+#ifndef __CLK_FCH_H
+#define __CLK_FCH_H
+
+#include <linux/compiler.h>
+
+struct fch_clk_data {
+ void __iomem *base;
+ char *name;
+};
+
+#endif /* __CLK_FCH_H */
diff --git a/include/linux/platform_data/clk-integrator.h b/include/linux/platform_data/clk-integrator.h
deleted file mode 100644
index addd48cac625..000000000000
--- a/include/linux/platform_data/clk-integrator.h
+++ /dev/null
@@ -1,2 +0,0 @@
-void integrator_impd1_clk_init(void __iomem *base, unsigned int id);
-void integrator_impd1_clk_exit(unsigned int id);
diff --git a/include/linux/platform_data/clk-u300.h b/include/linux/platform_data/clk-u300.h
deleted file mode 100644
index 8429e73911a1..000000000000
--- a/include/linux/platform_data/clk-u300.h
+++ /dev/null
@@ -1 +0,0 @@
-void __init u300_clk_init(void __iomem *base);
diff --git a/include/linux/platform_data/cpuidle-exynos.h b/include/linux/platform_data/cpuidle-exynos.h
index bfa40e4c5d5f..075cbf0302a5 100644
--- a/include/linux/platform_data/cpuidle-exynos.h
+++ b/include/linux/platform_data/cpuidle-exynos.h
@@ -1,10 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2014 Samsung Electronics Co., Ltd.
* http://www.samsung.com
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef __CPUIDLE_EXYNOS_H
diff --git a/include/linux/platform_data/cros_ec_chardev.h b/include/linux/platform_data/cros_ec_chardev.h
new file mode 100644
index 000000000000..7de8faaf77df
--- /dev/null
+++ b/include/linux/platform_data/cros_ec_chardev.h
@@ -0,0 +1,38 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * ChromeOS EC device interface.
+ *
+ * Copyright (C) 2014 Google, Inc.
+ */
+
+#ifndef _UAPI_LINUX_CROS_EC_DEV_H_
+#define _UAPI_LINUX_CROS_EC_DEV_H_
+
+#include <linux/bits.h>
+#include <linux/ioctl.h>
+#include <linux/types.h>
+
+#include <linux/platform_data/cros_ec_commands.h>
+
+#define CROS_EC_DEV_VERSION "1.0.0"
+
+/**
+ * struct cros_ec_readmem - Struct used to read mapped memory.
+ * @offset: Within EC_LPC_ADDR_MEMMAP region.
+ * @bytes: Number of bytes to read. Zero means "read a string" (including '\0')
+ * At most only EC_MEMMAP_SIZE bytes can be read.
+ * @buffer: Where to store the result. The ioctl returns the number of bytes
+ * read or negative on error.
+ */
+struct cros_ec_readmem {
+ uint32_t offset;
+ uint32_t bytes;
+ uint8_t buffer[EC_MEMMAP_SIZE];
+};
+
+#define CROS_EC_DEV_IOC 0xEC
+#define CROS_EC_DEV_IOCXCMD _IOWR(CROS_EC_DEV_IOC, 0, struct cros_ec_command)
+#define CROS_EC_DEV_IOCRDMEM _IOWR(CROS_EC_DEV_IOC, 1, struct cros_ec_readmem)
+#define CROS_EC_DEV_IOCEVENTMASK _IO(CROS_EC_DEV_IOC, 2)
+
+#endif /* _CROS_EC_DEV_H_ */
diff --git a/include/linux/platform_data/cros_ec_commands.h b/include/linux/platform_data/cros_ec_commands.h
new file mode 100644
index 000000000000..ab721cf13a98
--- /dev/null
+++ b/include/linux/platform_data/cros_ec_commands.h
@@ -0,0 +1,6397 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Host communication command constants for ChromeOS EC
+ *
+ * Copyright (C) 2012 Google, Inc
+ *
+ * NOTE: This file is auto-generated from ChromeOS EC Open Source code from
+ * https://chromium.googlesource.com/chromiumos/platform/ec/+/master/include/ec_commands.h
+ */
+
+/* Host communication command constants for Chrome EC */
+
+#ifndef __CROS_EC_COMMANDS_H
+#define __CROS_EC_COMMANDS_H
+
+#include <linux/bits.h>
+#include <linux/types.h>
+
+#define BUILD_ASSERT(_cond)
+
+/*
+ * Current version of this protocol
+ *
+ * TODO(crosbug.com/p/11223): This is effectively useless; protocol is
+ * determined in other ways. Remove this once the kernel code no longer
+ * depends on it.
+ */
+#define EC_PROTO_VERSION 0x00000002
+
+/* Command version mask */
+#define EC_VER_MASK(version) BIT(version)
+
+/* I/O addresses for ACPI commands */
+#define EC_LPC_ADDR_ACPI_DATA 0x62
+#define EC_LPC_ADDR_ACPI_CMD 0x66
+
+/* I/O addresses for host command */
+#define EC_LPC_ADDR_HOST_DATA 0x200
+#define EC_LPC_ADDR_HOST_CMD 0x204
+
+/* I/O addresses for host command args and params */
+/* Protocol version 2 */
+#define EC_LPC_ADDR_HOST_ARGS 0x800 /* And 0x801, 0x802, 0x803 */
+#define EC_LPC_ADDR_HOST_PARAM 0x804 /* For version 2 params; size is
+ * EC_PROTO2_MAX_PARAM_SIZE
+ */
+/* Protocol version 3 */
+#define EC_LPC_ADDR_HOST_PACKET 0x800 /* Offset of version 3 packet */
+#define EC_LPC_HOST_PACKET_SIZE 0x100 /* Max size of version 3 packet */
+
+/*
+ * The actual block is 0x800-0x8ff, but some BIOSes think it's 0x880-0x8ff
+ * and they tell the kernel that so we have to think of it as two parts.
+ *
+ * Other BIOSes report only the I/O port region spanned by the Microchip
+ * MEC series EC; an attempt to address a larger region may fail.
+ */
+#define EC_HOST_CMD_REGION0 0x800
+#define EC_HOST_CMD_REGION1 0x880
+#define EC_HOST_CMD_REGION_SIZE 0x80
+#define EC_HOST_CMD_MEC_REGION_SIZE 0x8
+
+/* EC command register bit functions */
+#define EC_LPC_CMDR_DATA BIT(0) /* Data ready for host to read */
+#define EC_LPC_CMDR_PENDING BIT(1) /* Write pending to EC */
+#define EC_LPC_CMDR_BUSY BIT(2) /* EC is busy processing a command */
+#define EC_LPC_CMDR_CMD BIT(3) /* Last host write was a command */
+#define EC_LPC_CMDR_ACPI_BRST BIT(4) /* Burst mode (not used) */
+#define EC_LPC_CMDR_SCI BIT(5) /* SCI event is pending */
+#define EC_LPC_CMDR_SMI BIT(6) /* SMI event is pending */
+
+#define EC_LPC_ADDR_MEMMAP 0x900
+#define EC_MEMMAP_SIZE 255 /* ACPI IO buffer max is 255 bytes */
+#define EC_MEMMAP_TEXT_MAX 8 /* Size of a string in the memory map */
+
+/* The offset address of each type of data in mapped memory. */
+#define EC_MEMMAP_TEMP_SENSOR 0x00 /* Temp sensors 0x00 - 0x0f */
+#define EC_MEMMAP_FAN 0x10 /* Fan speeds 0x10 - 0x17 */
+#define EC_MEMMAP_TEMP_SENSOR_B 0x18 /* More temp sensors 0x18 - 0x1f */
+#define EC_MEMMAP_ID 0x20 /* 0x20 == 'E', 0x21 == 'C' */
+#define EC_MEMMAP_ID_VERSION 0x22 /* Version of data in 0x20 - 0x2f */
+#define EC_MEMMAP_THERMAL_VERSION 0x23 /* Version of data in 0x00 - 0x1f */
+#define EC_MEMMAP_BATTERY_VERSION 0x24 /* Version of data in 0x40 - 0x7f */
+#define EC_MEMMAP_SWITCHES_VERSION 0x25 /* Version of data in 0x30 - 0x33 */
+#define EC_MEMMAP_EVENTS_VERSION 0x26 /* Version of data in 0x34 - 0x3f */
+#define EC_MEMMAP_HOST_CMD_FLAGS 0x27 /* Host cmd interface flags (8 bits) */
+/* Unused 0x28 - 0x2f */
+#define EC_MEMMAP_SWITCHES 0x30 /* 8 bits */
+/* Unused 0x31 - 0x33 */
+#define EC_MEMMAP_HOST_EVENTS 0x34 /* 64 bits */
+/* Battery values are all 32 bits, unless otherwise noted. */
+#define EC_MEMMAP_BATT_VOLT 0x40 /* Battery Present Voltage */
+#define EC_MEMMAP_BATT_RATE 0x44 /* Battery Present Rate */
+#define EC_MEMMAP_BATT_CAP 0x48 /* Battery Remaining Capacity */
+#define EC_MEMMAP_BATT_FLAG 0x4c /* Battery State, see below (8-bit) */
+#define EC_MEMMAP_BATT_COUNT 0x4d /* Battery Count (8-bit) */
+#define EC_MEMMAP_BATT_INDEX 0x4e /* Current Battery Data Index (8-bit) */
+/* Unused 0x4f */
+#define EC_MEMMAP_BATT_DCAP 0x50 /* Battery Design Capacity */
+#define EC_MEMMAP_BATT_DVLT 0x54 /* Battery Design Voltage */
+#define EC_MEMMAP_BATT_LFCC 0x58 /* Battery Last Full Charge Capacity */
+#define EC_MEMMAP_BATT_CCNT 0x5c /* Battery Cycle Count */
+/* Strings are all 8 bytes (EC_MEMMAP_TEXT_MAX) */
+#define EC_MEMMAP_BATT_MFGR 0x60 /* Battery Manufacturer String */
+#define EC_MEMMAP_BATT_MODEL 0x68 /* Battery Model Number String */
+#define EC_MEMMAP_BATT_SERIAL 0x70 /* Battery Serial Number String */
+#define EC_MEMMAP_BATT_TYPE 0x78 /* Battery Type String */
+#define EC_MEMMAP_ALS 0x80 /* ALS readings in lux (2 X 16 bits) */
+/* Unused 0x84 - 0x8f */
+#define EC_MEMMAP_ACC_STATUS 0x90 /* Accelerometer status (8 bits )*/
+/* Unused 0x91 */
+#define EC_MEMMAP_ACC_DATA 0x92 /* Accelerometers data 0x92 - 0x9f */
+/* 0x92: Lid Angle if available, LID_ANGLE_UNRELIABLE otherwise */
+/* 0x94 - 0x99: 1st Accelerometer */
+/* 0x9a - 0x9f: 2nd Accelerometer */
+#define EC_MEMMAP_GYRO_DATA 0xa0 /* Gyroscope data 0xa0 - 0xa5 */
+/* Unused 0xa6 - 0xdf */
+
+/*
+ * ACPI is unable to access memory mapped data at or above this offset due to
+ * limitations of the ACPI protocol. Do not place data in the range 0xe0 - 0xfe
+ * which might be needed by ACPI.
+ */
+#define EC_MEMMAP_NO_ACPI 0xe0
+
+/* Define the format of the accelerometer mapped memory status byte. */
+#define EC_MEMMAP_ACC_STATUS_SAMPLE_ID_MASK 0x0f
+#define EC_MEMMAP_ACC_STATUS_BUSY_BIT BIT(4)
+#define EC_MEMMAP_ACC_STATUS_PRESENCE_BIT BIT(7)
+
+/* Number of temp sensors at EC_MEMMAP_TEMP_SENSOR */
+#define EC_TEMP_SENSOR_ENTRIES 16
+/*
+ * Number of temp sensors at EC_MEMMAP_TEMP_SENSOR_B.
+ *
+ * Valid only if EC_MEMMAP_THERMAL_VERSION returns >= 2.
+ */
+#define EC_TEMP_SENSOR_B_ENTRIES 8
+
+/* Special values for mapped temperature sensors */
+#define EC_TEMP_SENSOR_NOT_PRESENT 0xff
+#define EC_TEMP_SENSOR_ERROR 0xfe
+#define EC_TEMP_SENSOR_NOT_POWERED 0xfd
+#define EC_TEMP_SENSOR_NOT_CALIBRATED 0xfc
+/*
+ * The offset of temperature value stored in mapped memory. This allows
+ * reporting a temperature range of 200K to 454K = -73C to 181C.
+ */
+#define EC_TEMP_SENSOR_OFFSET 200
+
+/*
+ * Number of ALS readings at EC_MEMMAP_ALS
+ */
+#define EC_ALS_ENTRIES 2
+
+/*
+ * The default value a temperature sensor will return when it is present but
+ * has not been read this boot. This is a reasonable number to avoid
+ * triggering alarms on the host.
+ */
+#define EC_TEMP_SENSOR_DEFAULT (296 - EC_TEMP_SENSOR_OFFSET)
+
+#define EC_FAN_SPEED_ENTRIES 4 /* Number of fans at EC_MEMMAP_FAN */
+#define EC_FAN_SPEED_NOT_PRESENT 0xffff /* Entry not present */
+#define EC_FAN_SPEED_STALLED 0xfffe /* Fan stalled */
+
+/* Battery bit flags at EC_MEMMAP_BATT_FLAG. */
+#define EC_BATT_FLAG_AC_PRESENT 0x01
+#define EC_BATT_FLAG_BATT_PRESENT 0x02
+#define EC_BATT_FLAG_DISCHARGING 0x04
+#define EC_BATT_FLAG_CHARGING 0x08
+#define EC_BATT_FLAG_LEVEL_CRITICAL 0x10
+/* Set if some of the static/dynamic data is invalid (or outdated). */
+#define EC_BATT_FLAG_INVALID_DATA 0x20
+
+/* Switch flags at EC_MEMMAP_SWITCHES */
+#define EC_SWITCH_LID_OPEN 0x01
+#define EC_SWITCH_POWER_BUTTON_PRESSED 0x02
+#define EC_SWITCH_WRITE_PROTECT_DISABLED 0x04
+/* Was recovery requested via keyboard; now unused. */
+#define EC_SWITCH_IGNORE1 0x08
+/* Recovery requested via dedicated signal (from servo board) */
+#define EC_SWITCH_DEDICATED_RECOVERY 0x10
+/* Was fake developer mode switch; now unused. Remove in next refactor. */
+#define EC_SWITCH_IGNORE0 0x20
+
+/* Host command interface flags */
+/* Host command interface supports LPC args (LPC interface only) */
+#define EC_HOST_CMD_FLAG_LPC_ARGS_SUPPORTED 0x01
+/* Host command interface supports version 3 protocol */
+#define EC_HOST_CMD_FLAG_VERSION_3 0x02
+
+/* Wireless switch flags */
+#define EC_WIRELESS_SWITCH_ALL ~0x00 /* All flags */
+#define EC_WIRELESS_SWITCH_WLAN 0x01 /* WLAN radio */
+#define EC_WIRELESS_SWITCH_BLUETOOTH 0x02 /* Bluetooth radio */
+#define EC_WIRELESS_SWITCH_WWAN 0x04 /* WWAN power */
+#define EC_WIRELESS_SWITCH_WLAN_POWER 0x08 /* WLAN power */
+
+/*****************************************************************************/
+/*
+ * ACPI commands
+ *
+ * These are valid ONLY on the ACPI command/data port.
+ */
+
+/*
+ * ACPI Read Embedded Controller
+ *
+ * This reads from ACPI memory space on the EC (EC_ACPI_MEM_*).
+ *
+ * Use the following sequence:
+ *
+ * - Write EC_CMD_ACPI_READ to EC_LPC_ADDR_ACPI_CMD
+ * - Wait for EC_LPC_CMDR_PENDING bit to clear
+ * - Write address to EC_LPC_ADDR_ACPI_DATA
+ * - Wait for EC_LPC_CMDR_DATA bit to set
+ * - Read value from EC_LPC_ADDR_ACPI_DATA
+ */
+#define EC_CMD_ACPI_READ 0x0080
+
+/*
+ * ACPI Write Embedded Controller
+ *
+ * This reads from ACPI memory space on the EC (EC_ACPI_MEM_*).
+ *
+ * Use the following sequence:
+ *
+ * - Write EC_CMD_ACPI_WRITE to EC_LPC_ADDR_ACPI_CMD
+ * - Wait for EC_LPC_CMDR_PENDING bit to clear
+ * - Write address to EC_LPC_ADDR_ACPI_DATA
+ * - Wait for EC_LPC_CMDR_PENDING bit to clear
+ * - Write value to EC_LPC_ADDR_ACPI_DATA
+ */
+#define EC_CMD_ACPI_WRITE 0x0081
+
+/*
+ * ACPI Burst Enable Embedded Controller
+ *
+ * This enables burst mode on the EC to allow the host to issue several
+ * commands back-to-back. While in this mode, writes to mapped multi-byte
+ * data are locked out to ensure data consistency.
+ */
+#define EC_CMD_ACPI_BURST_ENABLE 0x0082
+
+/*
+ * ACPI Burst Disable Embedded Controller
+ *
+ * This disables burst mode on the EC and stops preventing EC writes to mapped
+ * multi-byte data.
+ */
+#define EC_CMD_ACPI_BURST_DISABLE 0x0083
+
+/*
+ * ACPI Query Embedded Controller
+ *
+ * This clears the lowest-order bit in the currently pending host events, and
+ * sets the result code to the 1-based index of the bit (event 0x00000001 = 1,
+ * event 0x80000000 = 32), or 0 if no event was pending.
+ */
+#define EC_CMD_ACPI_QUERY_EVENT 0x0084
+
+/* Valid addresses in ACPI memory space, for read/write commands */
+
+/* Memory space version; set to EC_ACPI_MEM_VERSION_CURRENT */
+#define EC_ACPI_MEM_VERSION 0x00
+/*
+ * Test location; writing value here updates test compliment byte to (0xff -
+ * value).
+ */
+#define EC_ACPI_MEM_TEST 0x01
+/* Test compliment; writes here are ignored. */
+#define EC_ACPI_MEM_TEST_COMPLIMENT 0x02
+
+/* Keyboard backlight brightness percent (0 - 100) */
+#define EC_ACPI_MEM_KEYBOARD_BACKLIGHT 0x03
+/* DPTF Target Fan Duty (0-100, 0xff for auto/none) */
+#define EC_ACPI_MEM_FAN_DUTY 0x04
+
+/*
+ * DPTF temp thresholds. Any of the EC's temp sensors can have up to two
+ * independent thresholds attached to them. The current value of the ID
+ * register determines which sensor is affected by the THRESHOLD and COMMIT
+ * registers. The THRESHOLD register uses the same EC_TEMP_SENSOR_OFFSET scheme
+ * as the memory-mapped sensors. The COMMIT register applies those settings.
+ *
+ * The spec does not mandate any way to read back the threshold settings
+ * themselves, but when a threshold is crossed the AP needs a way to determine
+ * which sensor(s) are responsible. Each reading of the ID register clears and
+ * returns one sensor ID that has crossed one of its threshold (in either
+ * direction) since the last read. A value of 0xFF means "no new thresholds
+ * have tripped". Setting or enabling the thresholds for a sensor will clear
+ * the unread event count for that sensor.
+ */
+#define EC_ACPI_MEM_TEMP_ID 0x05
+#define EC_ACPI_MEM_TEMP_THRESHOLD 0x06
+#define EC_ACPI_MEM_TEMP_COMMIT 0x07
+/*
+ * Here are the bits for the COMMIT register:
+ * bit 0 selects the threshold index for the chosen sensor (0/1)
+ * bit 1 enables/disables the selected threshold (0 = off, 1 = on)
+ * Each write to the commit register affects one threshold.
+ */
+#define EC_ACPI_MEM_TEMP_COMMIT_SELECT_MASK BIT(0)
+#define EC_ACPI_MEM_TEMP_COMMIT_ENABLE_MASK BIT(1)
+/*
+ * Example:
+ *
+ * Set the thresholds for sensor 2 to 50 C and 60 C:
+ * write 2 to [0x05] -- select temp sensor 2
+ * write 0x7b to [0x06] -- C_TO_K(50) - EC_TEMP_SENSOR_OFFSET
+ * write 0x2 to [0x07] -- enable threshold 0 with this value
+ * write 0x85 to [0x06] -- C_TO_K(60) - EC_TEMP_SENSOR_OFFSET
+ * write 0x3 to [0x07] -- enable threshold 1 with this value
+ *
+ * Disable the 60 C threshold, leaving the 50 C threshold unchanged:
+ * write 2 to [0x05] -- select temp sensor 2
+ * write 0x1 to [0x07] -- disable threshold 1
+ */
+
+/* DPTF battery charging current limit */
+#define EC_ACPI_MEM_CHARGING_LIMIT 0x08
+
+/* Charging limit is specified in 64 mA steps */
+#define EC_ACPI_MEM_CHARGING_LIMIT_STEP_MA 64
+/* Value to disable DPTF battery charging limit */
+#define EC_ACPI_MEM_CHARGING_LIMIT_DISABLED 0xff
+
+/*
+ * Report device orientation
+ * Bits Definition
+ * 3:1 Device DPTF Profile Number (DDPN)
+ * 0 = Reserved for backward compatibility (indicates no valid
+ * profile number. Host should fall back to using TBMD).
+ * 1..7 = DPTF Profile number to indicate to host which table needs
+ * to be loaded.
+ * 0 Tablet Mode Device Indicator (TBMD)
+ */
+#define EC_ACPI_MEM_DEVICE_ORIENTATION 0x09
+#define EC_ACPI_MEM_TBMD_SHIFT 0
+#define EC_ACPI_MEM_TBMD_MASK 0x1
+#define EC_ACPI_MEM_DDPN_SHIFT 1
+#define EC_ACPI_MEM_DDPN_MASK 0x7
+
+/*
+ * Report device features. Uses the same format as the host command, except:
+ *
+ * bit 0 (EC_FEATURE_LIMITED) changes meaning from "EC code has a limited set
+ * of features", which is of limited interest when the system is already
+ * interpreting ACPI bytecode, to "EC_FEATURES[0-7] is not supported". Since
+ * these are supported, it defaults to 0.
+ * This allows detecting the presence of this field since older versions of
+ * the EC codebase would simply return 0xff to that unknown address. Check
+ * FEATURES0 != 0xff (or FEATURES0[0] == 0) to make sure that the other bits
+ * are valid.
+ */
+#define EC_ACPI_MEM_DEVICE_FEATURES0 0x0a
+#define EC_ACPI_MEM_DEVICE_FEATURES1 0x0b
+#define EC_ACPI_MEM_DEVICE_FEATURES2 0x0c
+#define EC_ACPI_MEM_DEVICE_FEATURES3 0x0d
+#define EC_ACPI_MEM_DEVICE_FEATURES4 0x0e
+#define EC_ACPI_MEM_DEVICE_FEATURES5 0x0f
+#define EC_ACPI_MEM_DEVICE_FEATURES6 0x10
+#define EC_ACPI_MEM_DEVICE_FEATURES7 0x11
+
+#define EC_ACPI_MEM_BATTERY_INDEX 0x12
+
+/*
+ * USB Port Power. Each bit indicates whether the corresponding USB ports' power
+ * is enabled (1) or disabled (0).
+ * bit 0 USB port ID 0
+ * ...
+ * bit 7 USB port ID 7
+ */
+#define EC_ACPI_MEM_USB_PORT_POWER 0x13
+
+/*
+ * ACPI addresses 0x20 - 0xff map to EC_MEMMAP offset 0x00 - 0xdf. This data
+ * is read-only from the AP. Added in EC_ACPI_MEM_VERSION 2.
+ */
+#define EC_ACPI_MEM_MAPPED_BEGIN 0x20
+#define EC_ACPI_MEM_MAPPED_SIZE 0xe0
+
+/* Current version of ACPI memory address space */
+#define EC_ACPI_MEM_VERSION_CURRENT 2
+
+
+/*
+ * This header file is used in coreboot both in C and ACPI code. The ACPI code
+ * is pre-processed to handle constants but the ASL compiler is unable to
+ * handle actual C code so keep it separate.
+ */
+
+
+/*
+ * Attributes for EC request and response packets. Just defining __packed
+ * results in inefficient assembly code on ARM, if the structure is actually
+ * 32-bit aligned, as it should be for all buffers.
+ *
+ * Be very careful when adding these to existing structures. They will round
+ * up the structure size to the specified boundary.
+ *
+ * Also be very careful to make that if a structure is included in some other
+ * parent structure that the alignment will still be true given the packing of
+ * the parent structure. This is particularly important if the sub-structure
+ * will be passed as a pointer to another function, since that function will
+ * not know about the misaligment caused by the parent structure's packing.
+ *
+ * Also be very careful using __packed - particularly when nesting non-packed
+ * structures inside packed ones. In fact, DO NOT use __packed directly;
+ * always use one of these attributes.
+ *
+ * Once everything is annotated properly, the following search strings should
+ * not return ANY matches in this file other than right here:
+ *
+ * "__packed" - generates inefficient code; all sub-structs must also be packed
+ *
+ * "struct [^_]" - all structs should be annotated, except for structs that are
+ * members of other structs/unions (and their original declarations should be
+ * annotated).
+ */
+
+/*
+ * Packed structures make no assumption about alignment, so they do inefficient
+ * byte-wise reads.
+ */
+#define __ec_align1 __packed
+#define __ec_align2 __packed
+#define __ec_align4 __packed
+#define __ec_align_size1 __packed
+#define __ec_align_offset1 __packed
+#define __ec_align_offset2 __packed
+#define __ec_todo_packed __packed
+#define __ec_todo_unpacked
+
+
+/* LPC command status byte masks */
+/* EC has written a byte in the data register and host hasn't read it yet */
+#define EC_LPC_STATUS_TO_HOST 0x01
+/* Host has written a command/data byte and the EC hasn't read it yet */
+#define EC_LPC_STATUS_FROM_HOST 0x02
+/* EC is processing a command */
+#define EC_LPC_STATUS_PROCESSING 0x04
+/* Last write to EC was a command, not data */
+#define EC_LPC_STATUS_LAST_CMD 0x08
+/* EC is in burst mode */
+#define EC_LPC_STATUS_BURST_MODE 0x10
+/* SCI event is pending (requesting SCI query) */
+#define EC_LPC_STATUS_SCI_PENDING 0x20
+/* SMI event is pending (requesting SMI query) */
+#define EC_LPC_STATUS_SMI_PENDING 0x40
+/* (reserved) */
+#define EC_LPC_STATUS_RESERVED 0x80
+
+/*
+ * EC is busy. This covers both the EC processing a command, and the host has
+ * written a new command but the EC hasn't picked it up yet.
+ */
+#define EC_LPC_STATUS_BUSY_MASK \
+ (EC_LPC_STATUS_FROM_HOST | EC_LPC_STATUS_PROCESSING)
+
+/*
+ * Host command response codes (16-bit). Note that response codes should be
+ * stored in a uint16_t rather than directly in a value of this type.
+ */
+enum ec_status {
+ EC_RES_SUCCESS = 0,
+ EC_RES_INVALID_COMMAND = 1,
+ EC_RES_ERROR = 2,
+ EC_RES_INVALID_PARAM = 3,
+ EC_RES_ACCESS_DENIED = 4,
+ EC_RES_INVALID_RESPONSE = 5,
+ EC_RES_INVALID_VERSION = 6,
+ EC_RES_INVALID_CHECKSUM = 7,
+ EC_RES_IN_PROGRESS = 8, /* Accepted, command in progress */
+ EC_RES_UNAVAILABLE = 9, /* No response available */
+ EC_RES_TIMEOUT = 10, /* We got a timeout */
+ EC_RES_OVERFLOW = 11, /* Table / data overflow */
+ EC_RES_INVALID_HEADER = 12, /* Header contains invalid data */
+ EC_RES_REQUEST_TRUNCATED = 13, /* Didn't get the entire request */
+ EC_RES_RESPONSE_TOO_BIG = 14, /* Response was too big to handle */
+ EC_RES_BUS_ERROR = 15, /* Communications bus error */
+ EC_RES_BUSY = 16, /* Up but too busy. Should retry */
+ EC_RES_INVALID_HEADER_VERSION = 17, /* Header version invalid */
+ EC_RES_INVALID_HEADER_CRC = 18, /* Header CRC invalid */
+ EC_RES_INVALID_DATA_CRC = 19, /* Data CRC invalid */
+ EC_RES_DUP_UNAVAILABLE = 20, /* Can't resend response */
+};
+
+/*
+ * Host event codes. Note these are 1-based, not 0-based, because ACPI query
+ * EC command uses code 0 to mean "no event pending". We explicitly specify
+ * each value in the enum listing so they won't change if we delete/insert an
+ * item or rearrange the list (it needs to be stable across platforms, not
+ * just within a single compiled instance).
+ */
+enum host_event_code {
+ EC_HOST_EVENT_LID_CLOSED = 1,
+ EC_HOST_EVENT_LID_OPEN = 2,
+ EC_HOST_EVENT_POWER_BUTTON = 3,
+ EC_HOST_EVENT_AC_CONNECTED = 4,
+ EC_HOST_EVENT_AC_DISCONNECTED = 5,
+ EC_HOST_EVENT_BATTERY_LOW = 6,
+ EC_HOST_EVENT_BATTERY_CRITICAL = 7,
+ EC_HOST_EVENT_BATTERY = 8,
+ EC_HOST_EVENT_THERMAL_THRESHOLD = 9,
+ /* Event generated by a device attached to the EC */
+ EC_HOST_EVENT_DEVICE = 10,
+ EC_HOST_EVENT_THERMAL = 11,
+ EC_HOST_EVENT_USB_CHARGER = 12,
+ EC_HOST_EVENT_KEY_PRESSED = 13,
+ /*
+ * EC has finished initializing the host interface. The host can check
+ * for this event following sending a EC_CMD_REBOOT_EC command to
+ * determine when the EC is ready to accept subsequent commands.
+ */
+ EC_HOST_EVENT_INTERFACE_READY = 14,
+ /* Keyboard recovery combo has been pressed */
+ EC_HOST_EVENT_KEYBOARD_RECOVERY = 15,
+
+ /* Shutdown due to thermal overload */
+ EC_HOST_EVENT_THERMAL_SHUTDOWN = 16,
+ /* Shutdown due to battery level too low */
+ EC_HOST_EVENT_BATTERY_SHUTDOWN = 17,
+
+ /* Suggest that the AP throttle itself */
+ EC_HOST_EVENT_THROTTLE_START = 18,
+ /* Suggest that the AP resume normal speed */
+ EC_HOST_EVENT_THROTTLE_STOP = 19,
+
+ /* Hang detect logic detected a hang and host event timeout expired */
+ EC_HOST_EVENT_HANG_DETECT = 20,
+ /* Hang detect logic detected a hang and warm rebooted the AP */
+ EC_HOST_EVENT_HANG_REBOOT = 21,
+
+ /* PD MCU triggering host event */
+ EC_HOST_EVENT_PD_MCU = 22,
+
+ /* Battery Status flags have changed */
+ EC_HOST_EVENT_BATTERY_STATUS = 23,
+
+ /* EC encountered a panic, triggering a reset */
+ EC_HOST_EVENT_PANIC = 24,
+
+ /* Keyboard fastboot combo has been pressed */
+ EC_HOST_EVENT_KEYBOARD_FASTBOOT = 25,
+
+ /* EC RTC event occurred */
+ EC_HOST_EVENT_RTC = 26,
+
+ /* Emulate MKBP event */
+ EC_HOST_EVENT_MKBP = 27,
+
+ /* EC desires to change state of host-controlled USB mux */
+ EC_HOST_EVENT_USB_MUX = 28,
+
+ /* TABLET/LAPTOP mode or detachable base attach/detach event */
+ EC_HOST_EVENT_MODE_CHANGE = 29,
+
+ /* Keyboard recovery combo with hardware reinitialization */
+ EC_HOST_EVENT_KEYBOARD_RECOVERY_HW_REINIT = 30,
+
+ /* WoV */
+ EC_HOST_EVENT_WOV = 31,
+
+ /*
+ * The high bit of the event mask is not used as a host event code. If
+ * it reads back as set, then the entire event mask should be
+ * considered invalid by the host. This can happen when reading the
+ * raw event status via EC_MEMMAP_HOST_EVENTS but the LPC interface is
+ * not initialized on the EC, or improperly configured on the host.
+ */
+ EC_HOST_EVENT_INVALID = 32
+};
+/* Host event mask */
+#define EC_HOST_EVENT_MASK(event_code) BIT_ULL((event_code) - 1)
+
+/**
+ * struct ec_lpc_host_args - Arguments at EC_LPC_ADDR_HOST_ARGS
+ * @flags: The host argument flags.
+ * @command_version: Command version.
+ * @data_size: The length of data.
+ * @checksum: Checksum; sum of command + flags + command_version + data_size +
+ * all params/response data bytes.
+ */
+struct ec_lpc_host_args {
+ uint8_t flags;
+ uint8_t command_version;
+ uint8_t data_size;
+ uint8_t checksum;
+} __ec_align4;
+
+/* Flags for ec_lpc_host_args.flags */
+/*
+ * Args are from host. Data area at EC_LPC_ADDR_HOST_PARAM contains command
+ * params.
+ *
+ * If EC gets a command and this flag is not set, this is an old-style command.
+ * Command version is 0 and params from host are at EC_LPC_ADDR_OLD_PARAM with
+ * unknown length. EC must respond with an old-style response (that is,
+ * without setting EC_HOST_ARGS_FLAG_TO_HOST).
+ */
+#define EC_HOST_ARGS_FLAG_FROM_HOST 0x01
+/*
+ * Args are from EC. Data area at EC_LPC_ADDR_HOST_PARAM contains response.
+ *
+ * If EC responds to a command and this flag is not set, this is an old-style
+ * response. Command version is 0 and response data from EC is at
+ * EC_LPC_ADDR_OLD_PARAM with unknown length.
+ */
+#define EC_HOST_ARGS_FLAG_TO_HOST 0x02
+
+/*****************************************************************************/
+/*
+ * Byte codes returned by EC over SPI interface.
+ *
+ * These can be used by the AP to debug the EC interface, and to determine
+ * when the EC is not in a state where it will ever get around to responding
+ * to the AP.
+ *
+ * Example of sequence of bytes read from EC for a current good transfer:
+ * 1. - - AP asserts chip select (CS#)
+ * 2. EC_SPI_OLD_READY - AP sends first byte(s) of request
+ * 3. - - EC starts handling CS# interrupt
+ * 4. EC_SPI_RECEIVING - AP sends remaining byte(s) of request
+ * 5. EC_SPI_PROCESSING - EC starts processing request; AP is clocking in
+ * bytes looking for EC_SPI_FRAME_START
+ * 6. - - EC finishes processing and sets up response
+ * 7. EC_SPI_FRAME_START - AP reads frame byte
+ * 8. (response packet) - AP reads response packet
+ * 9. EC_SPI_PAST_END - Any additional bytes read by AP
+ * 10 - - AP deasserts chip select
+ * 11 - - EC processes CS# interrupt and sets up DMA for
+ * next request
+ *
+ * If the AP is waiting for EC_SPI_FRAME_START and sees any value other than
+ * the following byte values:
+ * EC_SPI_OLD_READY
+ * EC_SPI_RX_READY
+ * EC_SPI_RECEIVING
+ * EC_SPI_PROCESSING
+ *
+ * Then the EC found an error in the request, or was not ready for the request
+ * and lost data. The AP should give up waiting for EC_SPI_FRAME_START,
+ * because the EC is unable to tell when the AP is done sending its request.
+ */
+
+/*
+ * Framing byte which precedes a response packet from the EC. After sending a
+ * request, the AP will clock in bytes until it sees the framing byte, then
+ * clock in the response packet.
+ */
+#define EC_SPI_FRAME_START 0xec
+
+/*
+ * Padding bytes which are clocked out after the end of a response packet.
+ */
+#define EC_SPI_PAST_END 0xed
+
+/*
+ * EC is ready to receive, and has ignored the byte sent by the AP. EC expects
+ * that the AP will send a valid packet header (starting with
+ * EC_COMMAND_PROTOCOL_3) in the next 32 bytes.
+ */
+#define EC_SPI_RX_READY 0xf8
+
+/*
+ * EC has started receiving the request from the AP, but hasn't started
+ * processing it yet.
+ */
+#define EC_SPI_RECEIVING 0xf9
+
+/* EC has received the entire request from the AP and is processing it. */
+#define EC_SPI_PROCESSING 0xfa
+
+/*
+ * EC received bad data from the AP, such as a packet header with an invalid
+ * length. EC will ignore all data until chip select deasserts.
+ */
+#define EC_SPI_RX_BAD_DATA 0xfb
+
+/*
+ * EC received data from the AP before it was ready. That is, the AP asserted
+ * chip select and started clocking data before the EC was ready to receive it.
+ * EC will ignore all data until chip select deasserts.
+ */
+#define EC_SPI_NOT_READY 0xfc
+
+/*
+ * EC was ready to receive a request from the AP. EC has treated the byte sent
+ * by the AP as part of a request packet, or (for old-style ECs) is processing
+ * a fully received packet but is not ready to respond yet.
+ */
+#define EC_SPI_OLD_READY 0xfd
+
+/*****************************************************************************/
+
+/*
+ * Protocol version 2 for I2C and SPI send a request this way:
+ *
+ * 0 EC_CMD_VERSION0 + (command version)
+ * 1 Command number
+ * 2 Length of params = N
+ * 3..N+2 Params, if any
+ * N+3 8-bit checksum of bytes 0..N+2
+ *
+ * The corresponding response is:
+ *
+ * 0 Result code (EC_RES_*)
+ * 1 Length of params = M
+ * 2..M+1 Params, if any
+ * M+2 8-bit checksum of bytes 0..M+1
+ */
+#define EC_PROTO2_REQUEST_HEADER_BYTES 3
+#define EC_PROTO2_REQUEST_TRAILER_BYTES 1
+#define EC_PROTO2_REQUEST_OVERHEAD (EC_PROTO2_REQUEST_HEADER_BYTES + \
+ EC_PROTO2_REQUEST_TRAILER_BYTES)
+
+#define EC_PROTO2_RESPONSE_HEADER_BYTES 2
+#define EC_PROTO2_RESPONSE_TRAILER_BYTES 1
+#define EC_PROTO2_RESPONSE_OVERHEAD (EC_PROTO2_RESPONSE_HEADER_BYTES + \
+ EC_PROTO2_RESPONSE_TRAILER_BYTES)
+
+/* Parameter length was limited by the LPC interface */
+#define EC_PROTO2_MAX_PARAM_SIZE 0xfc
+
+/* Maximum request and response packet sizes for protocol version 2 */
+#define EC_PROTO2_MAX_REQUEST_SIZE (EC_PROTO2_REQUEST_OVERHEAD + \
+ EC_PROTO2_MAX_PARAM_SIZE)
+#define EC_PROTO2_MAX_RESPONSE_SIZE (EC_PROTO2_RESPONSE_OVERHEAD + \
+ EC_PROTO2_MAX_PARAM_SIZE)
+
+/*****************************************************************************/
+
+/*
+ * Value written to legacy command port / prefix byte to indicate protocol
+ * 3+ structs are being used. Usage is bus-dependent.
+ */
+#define EC_COMMAND_PROTOCOL_3 0xda
+
+#define EC_HOST_REQUEST_VERSION 3
+
+/**
+ * struct ec_host_request - Version 3 request from host.
+ * @struct_version: Should be 3. The EC will return EC_RES_INVALID_HEADER if it
+ * receives a header with a version it doesn't know how to
+ * parse.
+ * @checksum: Checksum of request and data; sum of all bytes including checksum
+ * should total to 0.
+ * @command: Command to send (EC_CMD_...)
+ * @command_version: Command version.
+ * @reserved: Unused byte in current protocol version; set to 0.
+ * @data_len: Length of data which follows this header.
+ */
+struct ec_host_request {
+ uint8_t struct_version;
+ uint8_t checksum;
+ uint16_t command;
+ uint8_t command_version;
+ uint8_t reserved;
+ uint16_t data_len;
+} __ec_align4;
+
+#define EC_HOST_RESPONSE_VERSION 3
+
+/**
+ * struct ec_host_response - Version 3 response from EC.
+ * @struct_version: Struct version (=3).
+ * @checksum: Checksum of response and data; sum of all bytes including
+ * checksum should total to 0.
+ * @result: EC's response to the command (separate from communication failure)
+ * @data_len: Length of data which follows this header.
+ * @reserved: Unused bytes in current protocol version; set to 0.
+ */
+struct ec_host_response {
+ uint8_t struct_version;
+ uint8_t checksum;
+ uint16_t result;
+ uint16_t data_len;
+ uint16_t reserved;
+} __ec_align4;
+
+/*****************************************************************************/
+
+/*
+ * Host command protocol V4.
+ *
+ * Packets always start with a request or response header. They are followed
+ * by data_len bytes of data. If the data_crc_present flag is set, the data
+ * bytes are followed by a CRC-8 of that data, using x^8 + x^2 + x + 1
+ * polynomial.
+ *
+ * Host algorithm when sending a request q:
+ *
+ * 101) tries_left=(some value, e.g. 3);
+ * 102) q.seq_num++
+ * 103) q.seq_dup=0
+ * 104) Calculate q.header_crc.
+ * 105) Send request q to EC.
+ * 106) Wait for response r. Go to 201 if received or 301 if timeout.
+ *
+ * 201) If r.struct_version != 4, go to 301.
+ * 202) If r.header_crc mismatches calculated CRC for r header, go to 301.
+ * 203) If r.data_crc_present and r.data_crc mismatches, go to 301.
+ * 204) If r.seq_num != q.seq_num, go to 301.
+ * 205) If r.seq_dup == q.seq_dup, return success.
+ * 207) If r.seq_dup == 1, go to 301.
+ * 208) Return error.
+ *
+ * 301) If --tries_left <= 0, return error.
+ * 302) If q.seq_dup == 1, go to 105.
+ * 303) q.seq_dup = 1
+ * 304) Go to 104.
+ *
+ * EC algorithm when receiving a request q.
+ * EC has response buffer r, error buffer e.
+ *
+ * 101) If q.struct_version != 4, set e.result = EC_RES_INVALID_HEADER_VERSION
+ * and go to 301
+ * 102) If q.header_crc mismatches calculated CRC, set e.result =
+ * EC_RES_INVALID_HEADER_CRC and go to 301
+ * 103) If q.data_crc_present, calculate data CRC. If that mismatches the CRC
+ * byte at the end of the packet, set e.result = EC_RES_INVALID_DATA_CRC
+ * and go to 301.
+ * 104) If q.seq_dup == 0, go to 201.
+ * 105) If q.seq_num != r.seq_num, go to 201.
+ * 106) If q.seq_dup == r.seq_dup, go to 205, else go to 203.
+ *
+ * 201) Process request q into response r.
+ * 202) r.seq_num = q.seq_num
+ * 203) r.seq_dup = q.seq_dup
+ * 204) Calculate r.header_crc
+ * 205) If r.data_len > 0 and data is no longer available, set e.result =
+ * EC_RES_DUP_UNAVAILABLE and go to 301.
+ * 206) Send response r.
+ *
+ * 301) e.seq_num = q.seq_num
+ * 302) e.seq_dup = q.seq_dup
+ * 303) Calculate e.header_crc.
+ * 304) Send error response e.
+ */
+
+/* Version 4 request from host */
+struct ec_host_request4 {
+ /*
+ * bits 0-3: struct_version: Structure version (=4)
+ * bit 4: is_response: Is response (=0)
+ * bits 5-6: seq_num: Sequence number
+ * bit 7: seq_dup: Sequence duplicate flag
+ */
+ uint8_t fields0;
+
+ /*
+ * bits 0-4: command_version: Command version
+ * bits 5-6: Reserved (set 0, ignore on read)
+ * bit 7: data_crc_present: Is data CRC present after data
+ */
+ uint8_t fields1;
+
+ /* Command code (EC_CMD_*) */
+ uint16_t command;
+
+ /* Length of data which follows this header (not including data CRC) */
+ uint16_t data_len;
+
+ /* Reserved (set 0, ignore on read) */
+ uint8_t reserved;
+
+ /* CRC-8 of above fields, using x^8 + x^2 + x + 1 polynomial */
+ uint8_t header_crc;
+} __ec_align4;
+
+/* Version 4 response from EC */
+struct ec_host_response4 {
+ /*
+ * bits 0-3: struct_version: Structure version (=4)
+ * bit 4: is_response: Is response (=1)
+ * bits 5-6: seq_num: Sequence number
+ * bit 7: seq_dup: Sequence duplicate flag
+ */
+ uint8_t fields0;
+
+ /*
+ * bits 0-6: Reserved (set 0, ignore on read)
+ * bit 7: data_crc_present: Is data CRC present after data
+ */
+ uint8_t fields1;
+
+ /* Result code (EC_RES_*) */
+ uint16_t result;
+
+ /* Length of data which follows this header (not including data CRC) */
+ uint16_t data_len;
+
+ /* Reserved (set 0, ignore on read) */
+ uint8_t reserved;
+
+ /* CRC-8 of above fields, using x^8 + x^2 + x + 1 polynomial */
+ uint8_t header_crc;
+} __ec_align4;
+
+/* Fields in fields0 byte */
+#define EC_PACKET4_0_STRUCT_VERSION_MASK 0x0f
+#define EC_PACKET4_0_IS_RESPONSE_MASK 0x10
+#define EC_PACKET4_0_SEQ_NUM_SHIFT 5
+#define EC_PACKET4_0_SEQ_NUM_MASK 0x60
+#define EC_PACKET4_0_SEQ_DUP_MASK 0x80
+
+/* Fields in fields1 byte */
+#define EC_PACKET4_1_COMMAND_VERSION_MASK 0x1f /* (request only) */
+#define EC_PACKET4_1_DATA_CRC_PRESENT_MASK 0x80
+
+/*****************************************************************************/
+/*
+ * Notes on commands:
+ *
+ * Each command is an 16-bit command value. Commands which take params or
+ * return response data specify structures for that data. If no structure is
+ * specified, the command does not input or output data, respectively.
+ * Parameter/response length is implicit in the structs. Some underlying
+ * communication protocols (I2C, SPI) may add length or checksum headers, but
+ * those are implementation-dependent and not defined here.
+ *
+ * All commands MUST be #defined to be 4-digit UPPER CASE hex values
+ * (e.g., 0x00AB, not 0xab) for CONFIG_HOSTCMD_SECTION_SORTED to work.
+ */
+
+/*****************************************************************************/
+/* General / test commands */
+
+/*
+ * Get protocol version, used to deal with non-backward compatible protocol
+ * changes.
+ */
+#define EC_CMD_PROTO_VERSION 0x0000
+
+/**
+ * struct ec_response_proto_version - Response to the proto version command.
+ * @version: The protocol version.
+ */
+struct ec_response_proto_version {
+ uint32_t version;
+} __ec_align4;
+
+/*
+ * Hello. This is a simple command to test the EC is responsive to
+ * commands.
+ */
+#define EC_CMD_HELLO 0x0001
+
+/**
+ * struct ec_params_hello - Parameters to the hello command.
+ * @in_data: Pass anything here.
+ */
+struct ec_params_hello {
+ uint32_t in_data;
+} __ec_align4;
+
+/**
+ * struct ec_response_hello - Response to the hello command.
+ * @out_data: Output will be in_data + 0x01020304.
+ */
+struct ec_response_hello {
+ uint32_t out_data;
+} __ec_align4;
+
+/* Get version number */
+#define EC_CMD_GET_VERSION 0x0002
+
+enum ec_current_image {
+ EC_IMAGE_UNKNOWN = 0,
+ EC_IMAGE_RO,
+ EC_IMAGE_RW
+};
+
+/**
+ * struct ec_response_get_version - Response to the get version command.
+ * @version_string_ro: Null-terminated RO firmware version string.
+ * @version_string_rw: Null-terminated RW firmware version string.
+ * @reserved: Unused bytes; was previously RW-B firmware version string.
+ * @current_image: One of ec_current_image.
+ */
+struct ec_response_get_version {
+ char version_string_ro[32];
+ char version_string_rw[32];
+ char reserved[32];
+ uint32_t current_image;
+} __ec_align4;
+
+/* Read test */
+#define EC_CMD_READ_TEST 0x0003
+
+/**
+ * struct ec_params_read_test - Parameters for the read test command.
+ * @offset: Starting value for read buffer.
+ * @size: Size to read in bytes.
+ */
+struct ec_params_read_test {
+ uint32_t offset;
+ uint32_t size;
+} __ec_align4;
+
+/**
+ * struct ec_response_read_test - Response to the read test command.
+ * @data: Data returned by the read test command.
+ */
+struct ec_response_read_test {
+ uint32_t data[32];
+} __ec_align4;
+
+/*
+ * Get build information
+ *
+ * Response is null-terminated string.
+ */
+#define EC_CMD_GET_BUILD_INFO 0x0004
+
+/* Get chip info */
+#define EC_CMD_GET_CHIP_INFO 0x0005
+
+/**
+ * struct ec_response_get_chip_info - Response to the get chip info command.
+ * @vendor: Null-terminated string for chip vendor.
+ * @name: Null-terminated string for chip name.
+ * @revision: Null-terminated string for chip mask version.
+ */
+struct ec_response_get_chip_info {
+ char vendor[32];
+ char name[32];
+ char revision[32];
+} __ec_align4;
+
+/* Get board HW version */
+#define EC_CMD_GET_BOARD_VERSION 0x0006
+
+/**
+ * struct ec_response_board_version - Response to the board version command.
+ * @board_version: A monotonously incrementing number.
+ */
+struct ec_response_board_version {
+ uint16_t board_version;
+} __ec_align2;
+
+/*
+ * Read memory-mapped data.
+ *
+ * This is an alternate interface to memory-mapped data for bus protocols
+ * which don't support direct-mapped memory - I2C, SPI, etc.
+ *
+ * Response is params.size bytes of data.
+ */
+#define EC_CMD_READ_MEMMAP 0x0007
+
+/**
+ * struct ec_params_read_memmap - Parameters for the read memory map command.
+ * @offset: Offset in memmap (EC_MEMMAP_*).
+ * @size: Size to read in bytes.
+ */
+struct ec_params_read_memmap {
+ uint8_t offset;
+ uint8_t size;
+} __ec_align1;
+
+/* Read versions supported for a command */
+#define EC_CMD_GET_CMD_VERSIONS 0x0008
+
+/**
+ * struct ec_params_get_cmd_versions - Parameters for the get command versions.
+ * @cmd: Command to check.
+ */
+struct ec_params_get_cmd_versions {
+ uint8_t cmd;
+} __ec_align1;
+
+/**
+ * struct ec_params_get_cmd_versions_v1 - Parameters for the get command
+ * versions (v1)
+ * @cmd: Command to check.
+ */
+struct ec_params_get_cmd_versions_v1 {
+ uint16_t cmd;
+} __ec_align2;
+
+/**
+ * struct ec_response_get_cmd_versions - Response to the get command versions.
+ * @version_mask: Mask of supported versions; use EC_VER_MASK() to compare with
+ * a desired version.
+ */
+struct ec_response_get_cmd_versions {
+ uint32_t version_mask;
+} __ec_align4;
+
+/*
+ * Check EC communications status (busy). This is needed on i2c/spi but not
+ * on lpc since it has its own out-of-band busy indicator.
+ *
+ * lpc must read the status from the command register. Attempting this on
+ * lpc will overwrite the args/parameter space and corrupt its data.
+ */
+#define EC_CMD_GET_COMMS_STATUS 0x0009
+
+/* Avoid using ec_status which is for return values */
+enum ec_comms_status {
+ EC_COMMS_STATUS_PROCESSING = BIT(0), /* Processing cmd */
+};
+
+/**
+ * struct ec_response_get_comms_status - Response to the get comms status
+ * command.
+ * @flags: Mask of enum ec_comms_status.
+ */
+struct ec_response_get_comms_status {
+ uint32_t flags; /* Mask of enum ec_comms_status */
+} __ec_align4;
+
+/* Fake a variety of responses, purely for testing purposes. */
+#define EC_CMD_TEST_PROTOCOL 0x000A
+
+/* Tell the EC what to send back to us. */
+struct ec_params_test_protocol {
+ uint32_t ec_result;
+ uint32_t ret_len;
+ uint8_t buf[32];
+} __ec_align4;
+
+/* Here it comes... */
+struct ec_response_test_protocol {
+ uint8_t buf[32];
+} __ec_align4;
+
+/* Get protocol information */
+#define EC_CMD_GET_PROTOCOL_INFO 0x000B
+
+/* Flags for ec_response_get_protocol_info.flags */
+/* EC_RES_IN_PROGRESS may be returned if a command is slow */
+#define EC_PROTOCOL_INFO_IN_PROGRESS_SUPPORTED BIT(0)
+
+/**
+ * struct ec_response_get_protocol_info - Response to the get protocol info.
+ * @protocol_versions: Bitmask of protocol versions supported (1 << n means
+ * version n).
+ * @max_request_packet_size: Maximum request packet size in bytes.
+ * @max_response_packet_size: Maximum response packet size in bytes.
+ * @flags: see EC_PROTOCOL_INFO_*
+ */
+struct ec_response_get_protocol_info {
+ /* Fields which exist if at least protocol version 3 supported */
+ uint32_t protocol_versions;
+ uint16_t max_request_packet_size;
+ uint16_t max_response_packet_size;
+ uint32_t flags;
+} __ec_align4;
+
+
+/*****************************************************************************/
+/* Get/Set miscellaneous values */
+
+/* The upper byte of .flags tells what to do (nothing means "get") */
+#define EC_GSV_SET 0x80000000
+
+/*
+ * The lower three bytes of .flags identifies the parameter, if that has
+ * meaning for an individual command.
+ */
+#define EC_GSV_PARAM_MASK 0x00ffffff
+
+struct ec_params_get_set_value {
+ uint32_t flags;
+ uint32_t value;
+} __ec_align4;
+
+struct ec_response_get_set_value {
+ uint32_t flags;
+ uint32_t value;
+} __ec_align4;
+
+/* More than one command can use these structs to get/set parameters. */
+#define EC_CMD_GSV_PAUSE_IN_S5 0x000C
+
+/*****************************************************************************/
+/* List the features supported by the firmware */
+#define EC_CMD_GET_FEATURES 0x000D
+
+/* Supported features */
+enum ec_feature_code {
+ /*
+ * This image contains a limited set of features. Another image
+ * in RW partition may support more features.
+ */
+ EC_FEATURE_LIMITED = 0,
+ /*
+ * Commands for probing/reading/writing/erasing the flash in the
+ * EC are present.
+ */
+ EC_FEATURE_FLASH = 1,
+ /*
+ * Can control the fan speed directly.
+ */
+ EC_FEATURE_PWM_FAN = 2,
+ /*
+ * Can control the intensity of the keyboard backlight.
+ */
+ EC_FEATURE_PWM_KEYB = 3,
+ /*
+ * Support Google lightbar, introduced on Pixel.
+ */
+ EC_FEATURE_LIGHTBAR = 4,
+ /* Control of LEDs */
+ EC_FEATURE_LED = 5,
+ /* Exposes an interface to control gyro and sensors.
+ * The host goes through the EC to access these sensors.
+ * In addition, the EC may provide composite sensors, like lid angle.
+ */
+ EC_FEATURE_MOTION_SENSE = 6,
+ /* The keyboard is controlled by the EC */
+ EC_FEATURE_KEYB = 7,
+ /* The AP can use part of the EC flash as persistent storage. */
+ EC_FEATURE_PSTORE = 8,
+ /* The EC monitors BIOS port 80h, and can return POST codes. */
+ EC_FEATURE_PORT80 = 9,
+ /*
+ * Thermal management: include TMP specific commands.
+ * Higher level than direct fan control.
+ */
+ EC_FEATURE_THERMAL = 10,
+ /* Can switch the screen backlight on/off */
+ EC_FEATURE_BKLIGHT_SWITCH = 11,
+ /* Can switch the wifi module on/off */
+ EC_FEATURE_WIFI_SWITCH = 12,
+ /* Monitor host events, through for example SMI or SCI */
+ EC_FEATURE_HOST_EVENTS = 13,
+ /* The EC exposes GPIO commands to control/monitor connected devices. */
+ EC_FEATURE_GPIO = 14,
+ /* The EC can send i2c messages to downstream devices. */
+ EC_FEATURE_I2C = 15,
+ /* Command to control charger are included */
+ EC_FEATURE_CHARGER = 16,
+ /* Simple battery support. */
+ EC_FEATURE_BATTERY = 17,
+ /*
+ * Support Smart battery protocol
+ * (Common Smart Battery System Interface Specification)
+ */
+ EC_FEATURE_SMART_BATTERY = 18,
+ /* EC can detect when the host hangs. */
+ EC_FEATURE_HANG_DETECT = 19,
+ /* Report power information, for pit only */
+ EC_FEATURE_PMU = 20,
+ /* Another Cros EC device is present downstream of this one */
+ EC_FEATURE_SUB_MCU = 21,
+ /* Support USB Power delivery (PD) commands */
+ EC_FEATURE_USB_PD = 22,
+ /* Control USB multiplexer, for audio through USB port for instance. */
+ EC_FEATURE_USB_MUX = 23,
+ /* Motion Sensor code has an internal software FIFO */
+ EC_FEATURE_MOTION_SENSE_FIFO = 24,
+ /* Support temporary secure vstore */
+ EC_FEATURE_VSTORE = 25,
+ /* EC decides on USB-C SS mux state, muxes configured by host */
+ EC_FEATURE_USBC_SS_MUX_VIRTUAL = 26,
+ /* EC has RTC feature that can be controlled by host commands */
+ EC_FEATURE_RTC = 27,
+ /* The MCU exposes a Fingerprint sensor */
+ EC_FEATURE_FINGERPRINT = 28,
+ /* The MCU exposes a Touchpad */
+ EC_FEATURE_TOUCHPAD = 29,
+ /* The MCU has RWSIG task enabled */
+ EC_FEATURE_RWSIG = 30,
+ /* EC has device events support */
+ EC_FEATURE_DEVICE_EVENT = 31,
+ /* EC supports the unified wake masks for LPC/eSPI systems */
+ EC_FEATURE_UNIFIED_WAKE_MASKS = 32,
+ /* EC supports 64-bit host events */
+ EC_FEATURE_HOST_EVENT64 = 33,
+ /* EC runs code in RAM (not in place, a.k.a. XIP) */
+ EC_FEATURE_EXEC_IN_RAM = 34,
+ /* EC supports CEC commands */
+ EC_FEATURE_CEC = 35,
+ /* EC supports tight sensor timestamping. */
+ EC_FEATURE_MOTION_SENSE_TIGHT_TIMESTAMPS = 36,
+ /*
+ * EC supports tablet mode detection aligned to Chrome and allows
+ * setting of threshold by host command using
+ * MOTIONSENSE_CMD_TABLET_MODE_LID_ANGLE.
+ */
+ EC_FEATURE_REFINED_TABLET_MODE_HYSTERESIS = 37,
+ /* The MCU is a System Companion Processor (SCP). */
+ EC_FEATURE_SCP = 39,
+ /* The MCU is an Integrated Sensor Hub */
+ EC_FEATURE_ISH = 40,
+ /* New TCPMv2 TYPEC_ prefaced commands supported */
+ EC_FEATURE_TYPEC_CMD = 41,
+ /*
+ * The EC will wait for direction from the AP to enter Type-C alternate
+ * modes or USB4.
+ */
+ EC_FEATURE_TYPEC_REQUIRE_AP_MODE_ENTRY = 42,
+ /*
+ * The EC will wait for an acknowledge from the AP after setting the
+ * mux.
+ */
+ EC_FEATURE_TYPEC_MUX_REQUIRE_AP_ACK = 43,
+ /*
+ * The EC supports entering and residing in S4.
+ */
+ EC_FEATURE_S4_RESIDENCY = 44,
+ /*
+ * The EC supports the AP directing mux sets for the board.
+ */
+ EC_FEATURE_TYPEC_AP_MUX_SET = 45,
+ /*
+ * The EC supports the AP composing VDMs for us to send.
+ */
+ EC_FEATURE_TYPEC_AP_VDM_SEND = 46,
+};
+
+#define EC_FEATURE_MASK_0(event_code) BIT(event_code % 32)
+#define EC_FEATURE_MASK_1(event_code) BIT(event_code - 32)
+
+struct ec_response_get_features {
+ uint32_t flags[2];
+} __ec_align4;
+
+/*****************************************************************************/
+/* Get the board's SKU ID from EC */
+#define EC_CMD_GET_SKU_ID 0x000E
+
+/* Set SKU ID from AP */
+#define EC_CMD_SET_SKU_ID 0x000F
+
+struct ec_sku_id_info {
+ uint32_t sku_id;
+} __ec_align4;
+
+/*****************************************************************************/
+/* Flash commands */
+
+/* Get flash info */
+#define EC_CMD_FLASH_INFO 0x0010
+#define EC_VER_FLASH_INFO 2
+
+/**
+ * struct ec_response_flash_info - Response to the flash info command.
+ * @flash_size: Usable flash size in bytes.
+ * @write_block_size: Write block size. Write offset and size must be a
+ * multiple of this.
+ * @erase_block_size: Erase block size. Erase offset and size must be a
+ * multiple of this.
+ * @protect_block_size: Protection block size. Protection offset and size
+ * must be a multiple of this.
+ *
+ * Version 0 returns these fields.
+ */
+struct ec_response_flash_info {
+ uint32_t flash_size;
+ uint32_t write_block_size;
+ uint32_t erase_block_size;
+ uint32_t protect_block_size;
+} __ec_align4;
+
+/*
+ * Flags for version 1+ flash info command
+ * EC flash erases bits to 0 instead of 1.
+ */
+#define EC_FLASH_INFO_ERASE_TO_0 BIT(0)
+
+/*
+ * Flash must be selected for read/write/erase operations to succeed. This may
+ * be necessary on a chip where write/erase can be corrupted by other board
+ * activity, or where the chip needs to enable some sort of programming voltage,
+ * or where the read/write/erase operations require cleanly suspending other
+ * chip functionality.
+ */
+#define EC_FLASH_INFO_SELECT_REQUIRED BIT(1)
+
+/**
+ * struct ec_response_flash_info_1 - Response to the flash info v1 command.
+ * @flash_size: Usable flash size in bytes.
+ * @write_block_size: Write block size. Write offset and size must be a
+ * multiple of this.
+ * @erase_block_size: Erase block size. Erase offset and size must be a
+ * multiple of this.
+ * @protect_block_size: Protection block size. Protection offset and size
+ * must be a multiple of this.
+ * @write_ideal_size: Ideal write size in bytes. Writes will be fastest if
+ * size is exactly this and offset is a multiple of this.
+ * For example, an EC may have a write buffer which can do
+ * half-page operations if data is aligned, and a slower
+ * word-at-a-time write mode.
+ * @flags: Flags; see EC_FLASH_INFO_*
+ *
+ * Version 1 returns the same initial fields as version 0, with additional
+ * fields following.
+ *
+ * gcc anonymous structs don't seem to get along with the __packed directive;
+ * if they did we'd define the version 0 structure as a sub-structure of this
+ * one.
+ *
+ * Version 2 supports flash banks of different sizes:
+ * The caller specified the number of banks it has preallocated
+ * (num_banks_desc)
+ * The EC returns the number of banks describing the flash memory.
+ * It adds banks descriptions up to num_banks_desc.
+ */
+struct ec_response_flash_info_1 {
+ /* Version 0 fields; see above for description */
+ uint32_t flash_size;
+ uint32_t write_block_size;
+ uint32_t erase_block_size;
+ uint32_t protect_block_size;
+
+ /* Version 1 adds these fields: */
+ uint32_t write_ideal_size;
+ uint32_t flags;
+} __ec_align4;
+
+struct ec_params_flash_info_2 {
+ /* Number of banks to describe */
+ uint16_t num_banks_desc;
+ /* Reserved; set 0; ignore on read */
+ uint8_t reserved[2];
+} __ec_align4;
+
+struct ec_flash_bank {
+ /* Number of sector is in this bank. */
+ uint16_t count;
+ /* Size in power of 2 of each sector (8 --> 256 bytes) */
+ uint8_t size_exp;
+ /* Minimal write size for the sectors in this bank */
+ uint8_t write_size_exp;
+ /* Erase size for the sectors in this bank */
+ uint8_t erase_size_exp;
+ /* Size for write protection, usually identical to erase size. */
+ uint8_t protect_size_exp;
+ /* Reserved; set 0; ignore on read */
+ uint8_t reserved[2];
+};
+
+struct ec_response_flash_info_2 {
+ /* Total flash in the EC. */
+ uint32_t flash_size;
+ /* Flags; see EC_FLASH_INFO_* */
+ uint32_t flags;
+ /* Maximum size to use to send data to write to the EC. */
+ uint32_t write_ideal_size;
+ /* Number of banks present in the EC. */
+ uint16_t num_banks_total;
+ /* Number of banks described in banks array. */
+ uint16_t num_banks_desc;
+ struct ec_flash_bank banks[];
+} __ec_align4;
+
+/*
+ * Read flash
+ *
+ * Response is params.size bytes of data.
+ */
+#define EC_CMD_FLASH_READ 0x0011
+
+/**
+ * struct ec_params_flash_read - Parameters for the flash read command.
+ * @offset: Byte offset to read.
+ * @size: Size to read in bytes.
+ */
+struct ec_params_flash_read {
+ uint32_t offset;
+ uint32_t size;
+} __ec_align4;
+
+/* Write flash */
+#define EC_CMD_FLASH_WRITE 0x0012
+#define EC_VER_FLASH_WRITE 1
+
+/* Version 0 of the flash command supported only 64 bytes of data */
+#define EC_FLASH_WRITE_VER0_SIZE 64
+
+/**
+ * struct ec_params_flash_write - Parameters for the flash write command.
+ * @offset: Byte offset to write.
+ * @size: Size to write in bytes.
+ */
+struct ec_params_flash_write {
+ uint32_t offset;
+ uint32_t size;
+ /* Followed by data to write */
+} __ec_align4;
+
+/* Erase flash */
+#define EC_CMD_FLASH_ERASE 0x0013
+
+/**
+ * struct ec_params_flash_erase - Parameters for the flash erase command, v0.
+ * @offset: Byte offset to erase.
+ * @size: Size to erase in bytes.
+ */
+struct ec_params_flash_erase {
+ uint32_t offset;
+ uint32_t size;
+} __ec_align4;
+
+/*
+ * v1 add async erase:
+ * subcommands can returns:
+ * EC_RES_SUCCESS : erased (see ERASE_SECTOR_ASYNC case below).
+ * EC_RES_INVALID_PARAM : offset/size are not aligned on a erase boundary.
+ * EC_RES_ERROR : other errors.
+ * EC_RES_BUSY : an existing erase operation is in progress.
+ * EC_RES_ACCESS_DENIED: Trying to erase running image.
+ *
+ * When ERASE_SECTOR_ASYNC returns EC_RES_SUCCESS, the operation is just
+ * properly queued. The user must call ERASE_GET_RESULT subcommand to get
+ * the proper result.
+ * When ERASE_GET_RESULT returns EC_RES_BUSY, the caller must wait and send
+ * ERASE_GET_RESULT again to get the result of ERASE_SECTOR_ASYNC.
+ * ERASE_GET_RESULT command may timeout on EC where flash access is not
+ * permitted while erasing. (For instance, STM32F4).
+ */
+enum ec_flash_erase_cmd {
+ FLASH_ERASE_SECTOR, /* Erase and wait for result */
+ FLASH_ERASE_SECTOR_ASYNC, /* Erase and return immediately. */
+ FLASH_ERASE_GET_RESULT, /* Ask for last erase result */
+};
+
+/**
+ * struct ec_params_flash_erase_v1 - Parameters for the flash erase command, v1.
+ * @cmd: One of ec_flash_erase_cmd.
+ * @reserved: Pad byte; currently always contains 0.
+ * @flag: No flags defined yet; set to 0.
+ * @params: Same as v0 parameters.
+ */
+struct ec_params_flash_erase_v1 {
+ uint8_t cmd;
+ uint8_t reserved;
+ uint16_t flag;
+ struct ec_params_flash_erase params;
+} __ec_align4;
+
+/*
+ * Get/set flash protection.
+ *
+ * If mask!=0, sets/clear the requested bits of flags. Depending on the
+ * firmware write protect GPIO, not all flags will take effect immediately;
+ * some flags require a subsequent hard reset to take effect. Check the
+ * returned flags bits to see what actually happened.
+ *
+ * If mask=0, simply returns the current flags state.
+ */
+#define EC_CMD_FLASH_PROTECT 0x0015
+#define EC_VER_FLASH_PROTECT 1 /* Command version 1 */
+
+/* Flags for flash protection */
+/* RO flash code protected when the EC boots */
+#define EC_FLASH_PROTECT_RO_AT_BOOT BIT(0)
+/*
+ * RO flash code protected now. If this bit is set, at-boot status cannot
+ * be changed.
+ */
+#define EC_FLASH_PROTECT_RO_NOW BIT(1)
+/* Entire flash code protected now, until reboot. */
+#define EC_FLASH_PROTECT_ALL_NOW BIT(2)
+/* Flash write protect GPIO is asserted now */
+#define EC_FLASH_PROTECT_GPIO_ASSERTED BIT(3)
+/* Error - at least one bank of flash is stuck locked, and cannot be unlocked */
+#define EC_FLASH_PROTECT_ERROR_STUCK BIT(4)
+/*
+ * Error - flash protection is in inconsistent state. At least one bank of
+ * flash which should be protected is not protected. Usually fixed by
+ * re-requesting the desired flags, or by a hard reset if that fails.
+ */
+#define EC_FLASH_PROTECT_ERROR_INCONSISTENT BIT(5)
+/* Entire flash code protected when the EC boots */
+#define EC_FLASH_PROTECT_ALL_AT_BOOT BIT(6)
+/* RW flash code protected when the EC boots */
+#define EC_FLASH_PROTECT_RW_AT_BOOT BIT(7)
+/* RW flash code protected now. */
+#define EC_FLASH_PROTECT_RW_NOW BIT(8)
+/* Rollback information flash region protected when the EC boots */
+#define EC_FLASH_PROTECT_ROLLBACK_AT_BOOT BIT(9)
+/* Rollback information flash region protected now */
+#define EC_FLASH_PROTECT_ROLLBACK_NOW BIT(10)
+
+
+/**
+ * struct ec_params_flash_protect - Parameters for the flash protect command.
+ * @mask: Bits in flags to apply.
+ * @flags: New flags to apply.
+ */
+struct ec_params_flash_protect {
+ uint32_t mask;
+ uint32_t flags;
+} __ec_align4;
+
+/**
+ * struct ec_response_flash_protect - Response to the flash protect command.
+ * @flags: Current value of flash protect flags.
+ * @valid_flags: Flags which are valid on this platform. This allows the
+ * caller to distinguish between flags which aren't set vs. flags
+ * which can't be set on this platform.
+ * @writable_flags: Flags which can be changed given the current protection
+ * state.
+ */
+struct ec_response_flash_protect {
+ uint32_t flags;
+ uint32_t valid_flags;
+ uint32_t writable_flags;
+} __ec_align4;
+
+/*
+ * Note: commands 0x14 - 0x19 version 0 were old commands to get/set flash
+ * write protect. These commands may be reused with version > 0.
+ */
+
+/* Get the region offset/size */
+#define EC_CMD_FLASH_REGION_INFO 0x0016
+#define EC_VER_FLASH_REGION_INFO 1
+
+enum ec_flash_region {
+ /* Region which holds read-only EC image */
+ EC_FLASH_REGION_RO = 0,
+ /*
+ * Region which holds active RW image. 'Active' is different from
+ * 'running'. Active means 'scheduled-to-run'. Since RO image always
+ * scheduled to run, active/non-active applies only to RW images (for
+ * the same reason 'update' applies only to RW images. It's a state of
+ * an image on a flash. Running image can be RO, RW_A, RW_B but active
+ * image can only be RW_A or RW_B. In recovery mode, an active RW image
+ * doesn't enter 'running' state but it's still active on a flash.
+ */
+ EC_FLASH_REGION_ACTIVE,
+ /*
+ * Region which should be write-protected in the factory (a superset of
+ * EC_FLASH_REGION_RO)
+ */
+ EC_FLASH_REGION_WP_RO,
+ /* Region which holds updatable (non-active) RW image */
+ EC_FLASH_REGION_UPDATE,
+ /* Number of regions */
+ EC_FLASH_REGION_COUNT,
+};
+/*
+ * 'RW' is vague if there are multiple RW images; we mean the active one,
+ * so the old constant is deprecated.
+ */
+#define EC_FLASH_REGION_RW EC_FLASH_REGION_ACTIVE
+
+/**
+ * struct ec_params_flash_region_info - Parameters for the flash region info
+ * command.
+ * @region: Flash region; see EC_FLASH_REGION_*
+ */
+struct ec_params_flash_region_info {
+ uint32_t region;
+} __ec_align4;
+
+struct ec_response_flash_region_info {
+ uint32_t offset;
+ uint32_t size;
+} __ec_align4;
+
+/* Read/write VbNvContext */
+#define EC_CMD_VBNV_CONTEXT 0x0017
+#define EC_VER_VBNV_CONTEXT 1
+#define EC_VBNV_BLOCK_SIZE 16
+
+enum ec_vbnvcontext_op {
+ EC_VBNV_CONTEXT_OP_READ,
+ EC_VBNV_CONTEXT_OP_WRITE,
+};
+
+struct ec_params_vbnvcontext {
+ uint32_t op;
+ uint8_t block[EC_VBNV_BLOCK_SIZE];
+} __ec_align4;
+
+struct ec_response_vbnvcontext {
+ uint8_t block[EC_VBNV_BLOCK_SIZE];
+} __ec_align4;
+
+
+/* Get SPI flash information */
+#define EC_CMD_FLASH_SPI_INFO 0x0018
+
+struct ec_response_flash_spi_info {
+ /* JEDEC info from command 0x9F (manufacturer, memory type, size) */
+ uint8_t jedec[3];
+
+ /* Pad byte; currently always contains 0 */
+ uint8_t reserved0;
+
+ /* Manufacturer / device ID from command 0x90 */
+ uint8_t mfr_dev_id[2];
+
+ /* Status registers from command 0x05 and 0x35 */
+ uint8_t sr1, sr2;
+} __ec_align1;
+
+
+/* Select flash during flash operations */
+#define EC_CMD_FLASH_SELECT 0x0019
+
+/**
+ * struct ec_params_flash_select - Parameters for the flash select command.
+ * @select: 1 to select flash, 0 to deselect flash
+ */
+struct ec_params_flash_select {
+ uint8_t select;
+} __ec_align4;
+
+
+/*****************************************************************************/
+/* PWM commands */
+
+/* Get fan target RPM */
+#define EC_CMD_PWM_GET_FAN_TARGET_RPM 0x0020
+
+struct ec_response_pwm_get_fan_rpm {
+ uint32_t rpm;
+} __ec_align4;
+
+/* Set target fan RPM */
+#define EC_CMD_PWM_SET_FAN_TARGET_RPM 0x0021
+
+/* Version 0 of input params */
+struct ec_params_pwm_set_fan_target_rpm_v0 {
+ uint32_t rpm;
+} __ec_align4;
+
+/* Version 1 of input params */
+struct ec_params_pwm_set_fan_target_rpm_v1 {
+ uint32_t rpm;
+ uint8_t fan_idx;
+} __ec_align_size1;
+
+/* Get keyboard backlight */
+/* OBSOLETE - Use EC_CMD_PWM_SET_DUTY */
+#define EC_CMD_PWM_GET_KEYBOARD_BACKLIGHT 0x0022
+
+struct ec_response_pwm_get_keyboard_backlight {
+ uint8_t percent;
+ uint8_t enabled;
+} __ec_align1;
+
+/* Set keyboard backlight */
+/* OBSOLETE - Use EC_CMD_PWM_SET_DUTY */
+#define EC_CMD_PWM_SET_KEYBOARD_BACKLIGHT 0x0023
+
+struct ec_params_pwm_set_keyboard_backlight {
+ uint8_t percent;
+} __ec_align1;
+
+/* Set target fan PWM duty cycle */
+#define EC_CMD_PWM_SET_FAN_DUTY 0x0024
+
+/* Version 0 of input params */
+struct ec_params_pwm_set_fan_duty_v0 {
+ uint32_t percent;
+} __ec_align4;
+
+/* Version 1 of input params */
+struct ec_params_pwm_set_fan_duty_v1 {
+ uint32_t percent;
+ uint8_t fan_idx;
+} __ec_align_size1;
+
+#define EC_CMD_PWM_SET_DUTY 0x0025
+/* 16 bit duty cycle, 0xffff = 100% */
+#define EC_PWM_MAX_DUTY 0xffff
+
+enum ec_pwm_type {
+ /* All types, indexed by board-specific enum pwm_channel */
+ EC_PWM_TYPE_GENERIC = 0,
+ /* Keyboard backlight */
+ EC_PWM_TYPE_KB_LIGHT,
+ /* Display backlight */
+ EC_PWM_TYPE_DISPLAY_LIGHT,
+ EC_PWM_TYPE_COUNT,
+};
+
+struct ec_params_pwm_set_duty {
+ uint16_t duty; /* Duty cycle, EC_PWM_MAX_DUTY = 100% */
+ uint8_t pwm_type; /* ec_pwm_type */
+ uint8_t index; /* Type-specific index, or 0 if unique */
+} __ec_align4;
+
+#define EC_CMD_PWM_GET_DUTY 0x0026
+
+struct ec_params_pwm_get_duty {
+ uint8_t pwm_type; /* ec_pwm_type */
+ uint8_t index; /* Type-specific index, or 0 if unique */
+} __ec_align1;
+
+struct ec_response_pwm_get_duty {
+ uint16_t duty; /* Duty cycle, EC_PWM_MAX_DUTY = 100% */
+} __ec_align2;
+
+/*****************************************************************************/
+/*
+ * Lightbar commands. This looks worse than it is. Since we only use one HOST
+ * command to say "talk to the lightbar", we put the "and tell it to do X" part
+ * into a subcommand. We'll make separate structs for subcommands with
+ * different input args, so that we know how much to expect.
+ */
+#define EC_CMD_LIGHTBAR_CMD 0x0028
+
+struct rgb_s {
+ uint8_t r, g, b;
+} __ec_todo_unpacked;
+
+#define LB_BATTERY_LEVELS 4
+
+/*
+ * List of tweakable parameters. NOTE: It's __packed so it can be sent in a
+ * host command, but the alignment is the same regardless. Keep it that way.
+ */
+struct lightbar_params_v0 {
+ /* Timing */
+ int32_t google_ramp_up;
+ int32_t google_ramp_down;
+ int32_t s3s0_ramp_up;
+ int32_t s0_tick_delay[2]; /* AC=0/1 */
+ int32_t s0a_tick_delay[2]; /* AC=0/1 */
+ int32_t s0s3_ramp_down;
+ int32_t s3_sleep_for;
+ int32_t s3_ramp_up;
+ int32_t s3_ramp_down;
+
+ /* Oscillation */
+ uint8_t new_s0;
+ uint8_t osc_min[2]; /* AC=0/1 */
+ uint8_t osc_max[2]; /* AC=0/1 */
+ uint8_t w_ofs[2]; /* AC=0/1 */
+
+ /* Brightness limits based on the backlight and AC. */
+ uint8_t bright_bl_off_fixed[2]; /* AC=0/1 */
+ uint8_t bright_bl_on_min[2]; /* AC=0/1 */
+ uint8_t bright_bl_on_max[2]; /* AC=0/1 */
+
+ /* Battery level thresholds */
+ uint8_t battery_threshold[LB_BATTERY_LEVELS - 1];
+
+ /* Map [AC][battery_level] to color index */
+ uint8_t s0_idx[2][LB_BATTERY_LEVELS]; /* AP is running */
+ uint8_t s3_idx[2][LB_BATTERY_LEVELS]; /* AP is sleeping */
+
+ /* Color palette */
+ struct rgb_s color[8]; /* 0-3 are Google colors */
+} __ec_todo_packed;
+
+struct lightbar_params_v1 {
+ /* Timing */
+ int32_t google_ramp_up;
+ int32_t google_ramp_down;
+ int32_t s3s0_ramp_up;
+ int32_t s0_tick_delay[2]; /* AC=0/1 */
+ int32_t s0a_tick_delay[2]; /* AC=0/1 */
+ int32_t s0s3_ramp_down;
+ int32_t s3_sleep_for;
+ int32_t s3_ramp_up;
+ int32_t s3_ramp_down;
+ int32_t s5_ramp_up;
+ int32_t s5_ramp_down;
+ int32_t tap_tick_delay;
+ int32_t tap_gate_delay;
+ int32_t tap_display_time;
+
+ /* Tap-for-battery params */
+ uint8_t tap_pct_red;
+ uint8_t tap_pct_green;
+ uint8_t tap_seg_min_on;
+ uint8_t tap_seg_max_on;
+ uint8_t tap_seg_osc;
+ uint8_t tap_idx[3];
+
+ /* Oscillation */
+ uint8_t osc_min[2]; /* AC=0/1 */
+ uint8_t osc_max[2]; /* AC=0/1 */
+ uint8_t w_ofs[2]; /* AC=0/1 */
+
+ /* Brightness limits based on the backlight and AC. */
+ uint8_t bright_bl_off_fixed[2]; /* AC=0/1 */
+ uint8_t bright_bl_on_min[2]; /* AC=0/1 */
+ uint8_t bright_bl_on_max[2]; /* AC=0/1 */
+
+ /* Battery level thresholds */
+ uint8_t battery_threshold[LB_BATTERY_LEVELS - 1];
+
+ /* Map [AC][battery_level] to color index */
+ uint8_t s0_idx[2][LB_BATTERY_LEVELS]; /* AP is running */
+ uint8_t s3_idx[2][LB_BATTERY_LEVELS]; /* AP is sleeping */
+
+ /* s5: single color pulse on inhibited power-up */
+ uint8_t s5_idx;
+
+ /* Color palette */
+ struct rgb_s color[8]; /* 0-3 are Google colors */
+} __ec_todo_packed;
+
+/* Lightbar command params v2
+ * crbug.com/467716
+ *
+ * lightbar_parms_v1 was too big for i2c, therefore in v2, we split them up by
+ * logical groups to make it more manageable ( < 120 bytes).
+ *
+ * NOTE: Each of these groups must be less than 120 bytes.
+ */
+
+struct lightbar_params_v2_timing {
+ /* Timing */
+ int32_t google_ramp_up;
+ int32_t google_ramp_down;
+ int32_t s3s0_ramp_up;
+ int32_t s0_tick_delay[2]; /* AC=0/1 */
+ int32_t s0a_tick_delay[2]; /* AC=0/1 */
+ int32_t s0s3_ramp_down;
+ int32_t s3_sleep_for;
+ int32_t s3_ramp_up;
+ int32_t s3_ramp_down;
+ int32_t s5_ramp_up;
+ int32_t s5_ramp_down;
+ int32_t tap_tick_delay;
+ int32_t tap_gate_delay;
+ int32_t tap_display_time;
+} __ec_todo_packed;
+
+struct lightbar_params_v2_tap {
+ /* Tap-for-battery params */
+ uint8_t tap_pct_red;
+ uint8_t tap_pct_green;
+ uint8_t tap_seg_min_on;
+ uint8_t tap_seg_max_on;
+ uint8_t tap_seg_osc;
+ uint8_t tap_idx[3];
+} __ec_todo_packed;
+
+struct lightbar_params_v2_oscillation {
+ /* Oscillation */
+ uint8_t osc_min[2]; /* AC=0/1 */
+ uint8_t osc_max[2]; /* AC=0/1 */
+ uint8_t w_ofs[2]; /* AC=0/1 */
+} __ec_todo_packed;
+
+struct lightbar_params_v2_brightness {
+ /* Brightness limits based on the backlight and AC. */
+ uint8_t bright_bl_off_fixed[2]; /* AC=0/1 */
+ uint8_t bright_bl_on_min[2]; /* AC=0/1 */
+ uint8_t bright_bl_on_max[2]; /* AC=0/1 */
+} __ec_todo_packed;
+
+struct lightbar_params_v2_thresholds {
+ /* Battery level thresholds */
+ uint8_t battery_threshold[LB_BATTERY_LEVELS - 1];
+} __ec_todo_packed;
+
+struct lightbar_params_v2_colors {
+ /* Map [AC][battery_level] to color index */
+ uint8_t s0_idx[2][LB_BATTERY_LEVELS]; /* AP is running */
+ uint8_t s3_idx[2][LB_BATTERY_LEVELS]; /* AP is sleeping */
+
+ /* s5: single color pulse on inhibited power-up */
+ uint8_t s5_idx;
+
+ /* Color palette */
+ struct rgb_s color[8]; /* 0-3 are Google colors */
+} __ec_todo_packed;
+
+/* Lightbar program. */
+#define EC_LB_PROG_LEN 192
+struct lightbar_program {
+ uint8_t size;
+ uint8_t data[EC_LB_PROG_LEN];
+} __ec_todo_unpacked;
+
+struct ec_params_lightbar {
+ uint8_t cmd; /* Command (see enum lightbar_command) */
+ union {
+ /*
+ * The following commands have no args:
+ *
+ * dump, off, on, init, get_seq, get_params_v0, get_params_v1,
+ * version, get_brightness, get_demo, suspend, resume,
+ * get_params_v2_timing, get_params_v2_tap, get_params_v2_osc,
+ * get_params_v2_bright, get_params_v2_thlds,
+ * get_params_v2_colors
+ *
+ * Don't use an empty struct, because C++ hates that.
+ */
+
+ struct __ec_todo_unpacked {
+ uint8_t num;
+ } set_brightness, seq, demo;
+
+ struct __ec_todo_unpacked {
+ uint8_t ctrl, reg, value;
+ } reg;
+
+ struct __ec_todo_unpacked {
+ uint8_t led, red, green, blue;
+ } set_rgb;
+
+ struct __ec_todo_unpacked {
+ uint8_t led;
+ } get_rgb;
+
+ struct __ec_todo_unpacked {
+ uint8_t enable;
+ } manual_suspend_ctrl;
+
+ struct lightbar_params_v0 set_params_v0;
+ struct lightbar_params_v1 set_params_v1;
+
+ struct lightbar_params_v2_timing set_v2par_timing;
+ struct lightbar_params_v2_tap set_v2par_tap;
+ struct lightbar_params_v2_oscillation set_v2par_osc;
+ struct lightbar_params_v2_brightness set_v2par_bright;
+ struct lightbar_params_v2_thresholds set_v2par_thlds;
+ struct lightbar_params_v2_colors set_v2par_colors;
+
+ struct lightbar_program set_program;
+ };
+} __ec_todo_packed;
+
+struct ec_response_lightbar {
+ union {
+ struct __ec_todo_unpacked {
+ struct __ec_todo_unpacked {
+ uint8_t reg;
+ uint8_t ic0;
+ uint8_t ic1;
+ } vals[23];
+ } dump;
+
+ struct __ec_todo_unpacked {
+ uint8_t num;
+ } get_seq, get_brightness, get_demo;
+
+ struct lightbar_params_v0 get_params_v0;
+ struct lightbar_params_v1 get_params_v1;
+
+
+ struct lightbar_params_v2_timing get_params_v2_timing;
+ struct lightbar_params_v2_tap get_params_v2_tap;
+ struct lightbar_params_v2_oscillation get_params_v2_osc;
+ struct lightbar_params_v2_brightness get_params_v2_bright;
+ struct lightbar_params_v2_thresholds get_params_v2_thlds;
+ struct lightbar_params_v2_colors get_params_v2_colors;
+
+ struct __ec_todo_unpacked {
+ uint32_t num;
+ uint32_t flags;
+ } version;
+
+ struct __ec_todo_unpacked {
+ uint8_t red, green, blue;
+ } get_rgb;
+
+ /*
+ * The following commands have no response:
+ *
+ * off, on, init, set_brightness, seq, reg, set_rgb, demo,
+ * set_params_v0, set_params_v1, set_program,
+ * manual_suspend_ctrl, suspend, resume, set_v2par_timing,
+ * set_v2par_tap, set_v2par_osc, set_v2par_bright,
+ * set_v2par_thlds, set_v2par_colors
+ */
+ };
+} __ec_todo_packed;
+
+/* Lightbar commands */
+enum lightbar_command {
+ LIGHTBAR_CMD_DUMP = 0,
+ LIGHTBAR_CMD_OFF = 1,
+ LIGHTBAR_CMD_ON = 2,
+ LIGHTBAR_CMD_INIT = 3,
+ LIGHTBAR_CMD_SET_BRIGHTNESS = 4,
+ LIGHTBAR_CMD_SEQ = 5,
+ LIGHTBAR_CMD_REG = 6,
+ LIGHTBAR_CMD_SET_RGB = 7,
+ LIGHTBAR_CMD_GET_SEQ = 8,
+ LIGHTBAR_CMD_DEMO = 9,
+ LIGHTBAR_CMD_GET_PARAMS_V0 = 10,
+ LIGHTBAR_CMD_SET_PARAMS_V0 = 11,
+ LIGHTBAR_CMD_VERSION = 12,
+ LIGHTBAR_CMD_GET_BRIGHTNESS = 13,
+ LIGHTBAR_CMD_GET_RGB = 14,
+ LIGHTBAR_CMD_GET_DEMO = 15,
+ LIGHTBAR_CMD_GET_PARAMS_V1 = 16,
+ LIGHTBAR_CMD_SET_PARAMS_V1 = 17,
+ LIGHTBAR_CMD_SET_PROGRAM = 18,
+ LIGHTBAR_CMD_MANUAL_SUSPEND_CTRL = 19,
+ LIGHTBAR_CMD_SUSPEND = 20,
+ LIGHTBAR_CMD_RESUME = 21,
+ LIGHTBAR_CMD_GET_PARAMS_V2_TIMING = 22,
+ LIGHTBAR_CMD_SET_PARAMS_V2_TIMING = 23,
+ LIGHTBAR_CMD_GET_PARAMS_V2_TAP = 24,
+ LIGHTBAR_CMD_SET_PARAMS_V2_TAP = 25,
+ LIGHTBAR_CMD_GET_PARAMS_V2_OSCILLATION = 26,
+ LIGHTBAR_CMD_SET_PARAMS_V2_OSCILLATION = 27,
+ LIGHTBAR_CMD_GET_PARAMS_V2_BRIGHTNESS = 28,
+ LIGHTBAR_CMD_SET_PARAMS_V2_BRIGHTNESS = 29,
+ LIGHTBAR_CMD_GET_PARAMS_V2_THRESHOLDS = 30,
+ LIGHTBAR_CMD_SET_PARAMS_V2_THRESHOLDS = 31,
+ LIGHTBAR_CMD_GET_PARAMS_V2_COLORS = 32,
+ LIGHTBAR_CMD_SET_PARAMS_V2_COLORS = 33,
+ LIGHTBAR_NUM_CMDS
+};
+
+/*****************************************************************************/
+/* LED control commands */
+
+#define EC_CMD_LED_CONTROL 0x0029
+
+enum ec_led_id {
+ /* LED to indicate battery state of charge */
+ EC_LED_ID_BATTERY_LED = 0,
+ /*
+ * LED to indicate system power state (on or in suspend).
+ * May be on power button or on C-panel.
+ */
+ EC_LED_ID_POWER_LED,
+ /* LED on power adapter or its plug */
+ EC_LED_ID_ADAPTER_LED,
+ /* LED to indicate left side */
+ EC_LED_ID_LEFT_LED,
+ /* LED to indicate right side */
+ EC_LED_ID_RIGHT_LED,
+ /* LED to indicate recovery mode with HW_REINIT */
+ EC_LED_ID_RECOVERY_HW_REINIT_LED,
+ /* LED to indicate sysrq debug mode. */
+ EC_LED_ID_SYSRQ_DEBUG_LED,
+
+ EC_LED_ID_COUNT
+};
+
+/* LED control flags */
+#define EC_LED_FLAGS_QUERY BIT(0) /* Query LED capability only */
+#define EC_LED_FLAGS_AUTO BIT(1) /* Switch LED back to automatic control */
+
+enum ec_led_colors {
+ EC_LED_COLOR_RED = 0,
+ EC_LED_COLOR_GREEN,
+ EC_LED_COLOR_BLUE,
+ EC_LED_COLOR_YELLOW,
+ EC_LED_COLOR_WHITE,
+ EC_LED_COLOR_AMBER,
+
+ EC_LED_COLOR_COUNT
+};
+
+struct ec_params_led_control {
+ uint8_t led_id; /* Which LED to control */
+ uint8_t flags; /* Control flags */
+
+ uint8_t brightness[EC_LED_COLOR_COUNT];
+} __ec_align1;
+
+struct ec_response_led_control {
+ /*
+ * Available brightness value range.
+ *
+ * Range 0 means color channel not present.
+ * Range 1 means on/off control.
+ * Other values means the LED is control by PWM.
+ */
+ uint8_t brightness_range[EC_LED_COLOR_COUNT];
+} __ec_align1;
+
+/*****************************************************************************/
+/* Verified boot commands */
+
+/*
+ * Note: command code 0x29 version 0 was VBOOT_CMD in Link EVT; it may be
+ * reused for other purposes with version > 0.
+ */
+
+/* Verified boot hash command */
+#define EC_CMD_VBOOT_HASH 0x002A
+
+struct ec_params_vboot_hash {
+ uint8_t cmd; /* enum ec_vboot_hash_cmd */
+ uint8_t hash_type; /* enum ec_vboot_hash_type */
+ uint8_t nonce_size; /* Nonce size; may be 0 */
+ uint8_t reserved0; /* Reserved; set 0 */
+ uint32_t offset; /* Offset in flash to hash */
+ uint32_t size; /* Number of bytes to hash */
+ uint8_t nonce_data[64]; /* Nonce data; ignored if nonce_size=0 */
+} __ec_align4;
+
+struct ec_response_vboot_hash {
+ uint8_t status; /* enum ec_vboot_hash_status */
+ uint8_t hash_type; /* enum ec_vboot_hash_type */
+ uint8_t digest_size; /* Size of hash digest in bytes */
+ uint8_t reserved0; /* Ignore; will be 0 */
+ uint32_t offset; /* Offset in flash which was hashed */
+ uint32_t size; /* Number of bytes hashed */
+ uint8_t hash_digest[64]; /* Hash digest data */
+} __ec_align4;
+
+enum ec_vboot_hash_cmd {
+ EC_VBOOT_HASH_GET = 0, /* Get current hash status */
+ EC_VBOOT_HASH_ABORT = 1, /* Abort calculating current hash */
+ EC_VBOOT_HASH_START = 2, /* Start computing a new hash */
+ EC_VBOOT_HASH_RECALC = 3, /* Synchronously compute a new hash */
+};
+
+enum ec_vboot_hash_type {
+ EC_VBOOT_HASH_TYPE_SHA256 = 0, /* SHA-256 */
+};
+
+enum ec_vboot_hash_status {
+ EC_VBOOT_HASH_STATUS_NONE = 0, /* No hash (not started, or aborted) */
+ EC_VBOOT_HASH_STATUS_DONE = 1, /* Finished computing a hash */
+ EC_VBOOT_HASH_STATUS_BUSY = 2, /* Busy computing a hash */
+};
+
+/*
+ * Special values for offset for EC_VBOOT_HASH_START and EC_VBOOT_HASH_RECALC.
+ * If one of these is specified, the EC will automatically update offset and
+ * size to the correct values for the specified image (RO or RW).
+ */
+#define EC_VBOOT_HASH_OFFSET_RO 0xfffffffe
+#define EC_VBOOT_HASH_OFFSET_ACTIVE 0xfffffffd
+#define EC_VBOOT_HASH_OFFSET_UPDATE 0xfffffffc
+
+/*
+ * 'RW' is vague if there are multiple RW images; we mean the active one,
+ * so the old constant is deprecated.
+ */
+#define EC_VBOOT_HASH_OFFSET_RW EC_VBOOT_HASH_OFFSET_ACTIVE
+
+/*****************************************************************************/
+/*
+ * Motion sense commands. We'll make separate structs for sub-commands with
+ * different input args, so that we know how much to expect.
+ */
+#define EC_CMD_MOTION_SENSE_CMD 0x002B
+
+/* Motion sense commands */
+enum motionsense_command {
+ /*
+ * Dump command returns all motion sensor data including motion sense
+ * module flags and individual sensor flags.
+ */
+ MOTIONSENSE_CMD_DUMP = 0,
+
+ /*
+ * Info command returns data describing the details of a given sensor,
+ * including enum motionsensor_type, enum motionsensor_location, and
+ * enum motionsensor_chip.
+ */
+ MOTIONSENSE_CMD_INFO = 1,
+
+ /*
+ * EC Rate command is a setter/getter command for the EC sampling rate
+ * in milliseconds.
+ * It is per sensor, the EC run sample task at the minimum of all
+ * sensors EC_RATE.
+ * For sensors without hardware FIFO, EC_RATE should be equals to 1/ODR
+ * to collect all the sensor samples.
+ * For sensor with hardware FIFO, EC_RATE is used as the maximal delay
+ * to process of all motion sensors in milliseconds.
+ */
+ MOTIONSENSE_CMD_EC_RATE = 2,
+
+ /*
+ * Sensor ODR command is a setter/getter command for the output data
+ * rate of a specific motion sensor in millihertz.
+ */
+ MOTIONSENSE_CMD_SENSOR_ODR = 3,
+
+ /*
+ * Sensor range command is a setter/getter command for the range of
+ * a specified motion sensor in +/-G's or +/- deg/s.
+ */
+ MOTIONSENSE_CMD_SENSOR_RANGE = 4,
+
+ /*
+ * Setter/getter command for the keyboard wake angle. When the lid
+ * angle is greater than this value, keyboard wake is disabled in S3,
+ * and when the lid angle goes less than this value, keyboard wake is
+ * enabled. Note, the lid angle measurement is an approximate,
+ * un-calibrated value, hence the wake angle isn't exact.
+ */
+ MOTIONSENSE_CMD_KB_WAKE_ANGLE = 5,
+
+ /*
+ * Returns a single sensor data.
+ */
+ MOTIONSENSE_CMD_DATA = 6,
+
+ /*
+ * Return sensor fifo info.
+ */
+ MOTIONSENSE_CMD_FIFO_INFO = 7,
+
+ /*
+ * Insert a flush element in the fifo and return sensor fifo info.
+ * The host can use that element to synchronize its operation.
+ */
+ MOTIONSENSE_CMD_FIFO_FLUSH = 8,
+
+ /*
+ * Return a portion of the fifo.
+ */
+ MOTIONSENSE_CMD_FIFO_READ = 9,
+
+ /*
+ * Perform low level calibration.
+ * On sensors that support it, ask to do offset calibration.
+ */
+ MOTIONSENSE_CMD_PERFORM_CALIB = 10,
+
+ /*
+ * Sensor Offset command is a setter/getter command for the offset
+ * used for calibration.
+ * The offsets can be calculated by the host, or via
+ * PERFORM_CALIB command.
+ */
+ MOTIONSENSE_CMD_SENSOR_OFFSET = 11,
+
+ /*
+ * List available activities for a MOTION sensor.
+ * Indicates if they are enabled or disabled.
+ */
+ MOTIONSENSE_CMD_LIST_ACTIVITIES = 12,
+
+ /*
+ * Activity management
+ * Enable/Disable activity recognition.
+ */
+ MOTIONSENSE_CMD_SET_ACTIVITY = 13,
+
+ /*
+ * Lid Angle
+ */
+ MOTIONSENSE_CMD_LID_ANGLE = 14,
+
+ /*
+ * Allow the FIFO to trigger interrupt via MKBP events.
+ * By default the FIFO does not send interrupt to process the FIFO
+ * until the AP is ready or it is coming from a wakeup sensor.
+ */
+ MOTIONSENSE_CMD_FIFO_INT_ENABLE = 15,
+
+ /*
+ * Spoof the readings of the sensors. The spoofed readings can be set
+ * to arbitrary values, or will lock to the last read actual values.
+ */
+ MOTIONSENSE_CMD_SPOOF = 16,
+
+ /* Set lid angle for tablet mode detection. */
+ MOTIONSENSE_CMD_TABLET_MODE_LID_ANGLE = 17,
+
+ /*
+ * Sensor Scale command is a setter/getter command for the calibration
+ * scale.
+ */
+ MOTIONSENSE_CMD_SENSOR_SCALE = 18,
+
+ /* Number of motionsense sub-commands. */
+ MOTIONSENSE_NUM_CMDS
+};
+
+/* List of motion sensor types. */
+enum motionsensor_type {
+ MOTIONSENSE_TYPE_ACCEL = 0,
+ MOTIONSENSE_TYPE_GYRO = 1,
+ MOTIONSENSE_TYPE_MAG = 2,
+ MOTIONSENSE_TYPE_PROX = 3,
+ MOTIONSENSE_TYPE_LIGHT = 4,
+ MOTIONSENSE_TYPE_ACTIVITY = 5,
+ MOTIONSENSE_TYPE_BARO = 6,
+ MOTIONSENSE_TYPE_SYNC = 7,
+ MOTIONSENSE_TYPE_MAX,
+};
+
+/* List of motion sensor locations. */
+enum motionsensor_location {
+ MOTIONSENSE_LOC_BASE = 0,
+ MOTIONSENSE_LOC_LID = 1,
+ MOTIONSENSE_LOC_CAMERA = 2,
+ MOTIONSENSE_LOC_MAX,
+};
+
+/* List of motion sensor chips. */
+enum motionsensor_chip {
+ MOTIONSENSE_CHIP_KXCJ9 = 0,
+ MOTIONSENSE_CHIP_LSM6DS0 = 1,
+ MOTIONSENSE_CHIP_BMI160 = 2,
+ MOTIONSENSE_CHIP_SI1141 = 3,
+ MOTIONSENSE_CHIP_SI1142 = 4,
+ MOTIONSENSE_CHIP_SI1143 = 5,
+ MOTIONSENSE_CHIP_KX022 = 6,
+ MOTIONSENSE_CHIP_L3GD20H = 7,
+ MOTIONSENSE_CHIP_BMA255 = 8,
+ MOTIONSENSE_CHIP_BMP280 = 9,
+ MOTIONSENSE_CHIP_OPT3001 = 10,
+ MOTIONSENSE_CHIP_BH1730 = 11,
+ MOTIONSENSE_CHIP_GPIO = 12,
+ MOTIONSENSE_CHIP_LIS2DH = 13,
+ MOTIONSENSE_CHIP_LSM6DSM = 14,
+ MOTIONSENSE_CHIP_LIS2DE = 15,
+ MOTIONSENSE_CHIP_LIS2MDL = 16,
+ MOTIONSENSE_CHIP_LSM6DS3 = 17,
+ MOTIONSENSE_CHIP_LSM6DSO = 18,
+ MOTIONSENSE_CHIP_LNG2DM = 19,
+ MOTIONSENSE_CHIP_MAX,
+};
+
+/* List of orientation positions */
+enum motionsensor_orientation {
+ MOTIONSENSE_ORIENTATION_LANDSCAPE = 0,
+ MOTIONSENSE_ORIENTATION_PORTRAIT = 1,
+ MOTIONSENSE_ORIENTATION_UPSIDE_DOWN_PORTRAIT = 2,
+ MOTIONSENSE_ORIENTATION_UPSIDE_DOWN_LANDSCAPE = 3,
+ MOTIONSENSE_ORIENTATION_UNKNOWN = 4,
+};
+
+struct ec_response_motion_sensor_data {
+ /* Flags for each sensor. */
+ uint8_t flags;
+ /* Sensor number the data comes from. */
+ uint8_t sensor_num;
+ /* Each sensor is up to 3-axis. */
+ union {
+ int16_t data[3];
+ struct __ec_todo_packed {
+ uint16_t reserved;
+ uint32_t timestamp;
+ };
+ struct __ec_todo_unpacked {
+ uint8_t activity; /* motionsensor_activity */
+ uint8_t state;
+ int16_t add_info[2];
+ };
+ };
+} __ec_todo_packed;
+
+/* Note: used in ec_response_get_next_data */
+struct ec_response_motion_sense_fifo_info {
+ /* Size of the fifo */
+ uint16_t size;
+ /* Amount of space used in the fifo */
+ uint16_t count;
+ /* Timestamp recorded in us.
+ * aka accurate timestamp when host event was triggered.
+ */
+ uint32_t timestamp;
+ /* Total amount of vector lost */
+ uint16_t total_lost;
+ /* Lost events since the last fifo_info, per sensors */
+ uint16_t lost[];
+} __ec_todo_packed;
+
+struct ec_response_motion_sense_fifo_data {
+ uint32_t number_data;
+ struct ec_response_motion_sensor_data data[];
+} __ec_todo_packed;
+
+/* List supported activity recognition */
+enum motionsensor_activity {
+ MOTIONSENSE_ACTIVITY_RESERVED = 0,
+ MOTIONSENSE_ACTIVITY_SIG_MOTION = 1,
+ MOTIONSENSE_ACTIVITY_DOUBLE_TAP = 2,
+ MOTIONSENSE_ACTIVITY_ORIENTATION = 3,
+};
+
+struct ec_motion_sense_activity {
+ uint8_t sensor_num;
+ uint8_t activity; /* one of enum motionsensor_activity */
+ uint8_t enable; /* 1: enable, 0: disable */
+ uint8_t reserved;
+ uint16_t parameters[3]; /* activity dependent parameters */
+} __ec_todo_unpacked;
+
+/* Module flag masks used for the dump sub-command. */
+#define MOTIONSENSE_MODULE_FLAG_ACTIVE BIT(0)
+
+/* Sensor flag masks used for the dump sub-command. */
+#define MOTIONSENSE_SENSOR_FLAG_PRESENT BIT(0)
+
+/*
+ * Flush entry for synchronization.
+ * data contains time stamp
+ */
+#define MOTIONSENSE_SENSOR_FLAG_FLUSH BIT(0)
+#define MOTIONSENSE_SENSOR_FLAG_TIMESTAMP BIT(1)
+#define MOTIONSENSE_SENSOR_FLAG_WAKEUP BIT(2)
+#define MOTIONSENSE_SENSOR_FLAG_TABLET_MODE BIT(3)
+#define MOTIONSENSE_SENSOR_FLAG_ODR BIT(4)
+
+/*
+ * Send this value for the data element to only perform a read. If you
+ * send any other value, the EC will interpret it as data to set and will
+ * return the actual value set.
+ */
+#define EC_MOTION_SENSE_NO_VALUE -1
+
+#define EC_MOTION_SENSE_INVALID_CALIB_TEMP 0x8000
+
+/* MOTIONSENSE_CMD_SENSOR_OFFSET subcommand flag */
+/* Set Calibration information */
+#define MOTION_SENSE_SET_OFFSET BIT(0)
+
+/* Default Scale value, factor 1. */
+#define MOTION_SENSE_DEFAULT_SCALE BIT(15)
+
+#define LID_ANGLE_UNRELIABLE 500
+
+enum motionsense_spoof_mode {
+ /* Disable spoof mode. */
+ MOTIONSENSE_SPOOF_MODE_DISABLE = 0,
+
+ /* Enable spoof mode, but use provided component values. */
+ MOTIONSENSE_SPOOF_MODE_CUSTOM,
+
+ /* Enable spoof mode, but use the current sensor values. */
+ MOTIONSENSE_SPOOF_MODE_LOCK_CURRENT,
+
+ /* Query the current spoof mode status for the sensor. */
+ MOTIONSENSE_SPOOF_MODE_QUERY,
+};
+
+struct ec_params_motion_sense {
+ uint8_t cmd;
+ union {
+ /* Used for MOTIONSENSE_CMD_DUMP. */
+ struct __ec_todo_unpacked {
+ /*
+ * Maximal number of sensor the host is expecting.
+ * 0 means the host is only interested in the number
+ * of sensors controlled by the EC.
+ */
+ uint8_t max_sensor_count;
+ } dump;
+
+ /*
+ * Used for MOTIONSENSE_CMD_KB_WAKE_ANGLE.
+ */
+ struct __ec_todo_unpacked {
+ /* Data to set or EC_MOTION_SENSE_NO_VALUE to read.
+ * kb_wake_angle: angle to wakup AP.
+ */
+ int16_t data;
+ } kb_wake_angle;
+
+ /*
+ * Used for MOTIONSENSE_CMD_INFO, MOTIONSENSE_CMD_DATA
+ * and MOTIONSENSE_CMD_PERFORM_CALIB.
+ */
+ struct __ec_todo_unpacked {
+ uint8_t sensor_num;
+ } info, info_3, data, fifo_flush, perform_calib,
+ list_activities;
+
+ /*
+ * Used for MOTIONSENSE_CMD_EC_RATE, MOTIONSENSE_CMD_SENSOR_ODR
+ * and MOTIONSENSE_CMD_SENSOR_RANGE.
+ */
+ struct __ec_todo_unpacked {
+ uint8_t sensor_num;
+
+ /* Rounding flag, true for round-up, false for down. */
+ uint8_t roundup;
+
+ uint16_t reserved;
+
+ /* Data to set or EC_MOTION_SENSE_NO_VALUE to read. */
+ int32_t data;
+ } ec_rate, sensor_odr, sensor_range;
+
+ /* Used for MOTIONSENSE_CMD_SENSOR_OFFSET */
+ struct __ec_todo_packed {
+ uint8_t sensor_num;
+
+ /*
+ * bit 0: If set (MOTION_SENSE_SET_OFFSET), set
+ * the calibration information in the EC.
+ * If unset, just retrieve calibration information.
+ */
+ uint16_t flags;
+
+ /*
+ * Temperature at calibration, in units of 0.01 C
+ * 0x8000: invalid / unknown.
+ * 0x0: 0C
+ * 0x7fff: +327.67C
+ */
+ int16_t temp;
+
+ /*
+ * Offset for calibration.
+ * Unit:
+ * Accelerometer: 1/1024 g
+ * Gyro: 1/1024 deg/s
+ * Compass: 1/16 uT
+ */
+ int16_t offset[3];
+ } sensor_offset;
+
+ /* Used for MOTIONSENSE_CMD_SENSOR_SCALE */
+ struct __ec_todo_packed {
+ uint8_t sensor_num;
+
+ /*
+ * bit 0: If set (MOTION_SENSE_SET_OFFSET), set
+ * the calibration information in the EC.
+ * If unset, just retrieve calibration information.
+ */
+ uint16_t flags;
+
+ /*
+ * Temperature at calibration, in units of 0.01 C
+ * 0x8000: invalid / unknown.
+ * 0x0: 0C
+ * 0x7fff: +327.67C
+ */
+ int16_t temp;
+
+ /*
+ * Scale for calibration:
+ * By default scale is 1, it is encoded on 16bits:
+ * 1 = BIT(15)
+ * ~2 = 0xFFFF
+ * ~0 = 0.
+ */
+ uint16_t scale[3];
+ } sensor_scale;
+
+
+ /* Used for MOTIONSENSE_CMD_FIFO_INFO */
+ /* (no params) */
+
+ /* Used for MOTIONSENSE_CMD_FIFO_READ */
+ struct __ec_todo_unpacked {
+ /*
+ * Number of expected vector to return.
+ * EC may return less or 0 if none available.
+ */
+ uint32_t max_data_vector;
+ } fifo_read;
+
+ struct ec_motion_sense_activity set_activity;
+
+ /* Used for MOTIONSENSE_CMD_LID_ANGLE */
+ /* (no params) */
+
+ /* Used for MOTIONSENSE_CMD_FIFO_INT_ENABLE */
+ struct __ec_todo_unpacked {
+ /*
+ * 1: enable, 0 disable fifo,
+ * EC_MOTION_SENSE_NO_VALUE return value.
+ */
+ int8_t enable;
+ } fifo_int_enable;
+
+ /* Used for MOTIONSENSE_CMD_SPOOF */
+ struct __ec_todo_packed {
+ uint8_t sensor_id;
+
+ /* See enum motionsense_spoof_mode. */
+ uint8_t spoof_enable;
+
+ /* Ignored, used for alignment. */
+ uint8_t reserved;
+
+ /* Individual component values to spoof. */
+ int16_t components[3];
+ } spoof;
+
+ /* Used for MOTIONSENSE_CMD_TABLET_MODE_LID_ANGLE. */
+ struct __ec_todo_unpacked {
+ /*
+ * Lid angle threshold for switching between tablet and
+ * clamshell mode.
+ */
+ int16_t lid_angle;
+
+ /*
+ * Hysteresis degree to prevent fluctuations between
+ * clamshell and tablet mode if lid angle keeps
+ * changing around the threshold. Lid motion driver will
+ * use lid_angle + hys_degree to trigger tablet mode and
+ * lid_angle - hys_degree to trigger clamshell mode.
+ */
+ int16_t hys_degree;
+ } tablet_mode_threshold;
+ };
+} __ec_todo_packed;
+
+struct ec_response_motion_sense {
+ union {
+ /* Used for MOTIONSENSE_CMD_DUMP */
+ struct __ec_todo_unpacked {
+ /* Flags representing the motion sensor module. */
+ uint8_t module_flags;
+
+ /* Number of sensors managed directly by the EC. */
+ uint8_t sensor_count;
+
+ /*
+ * Sensor data is truncated if response_max is too small
+ * for holding all the data.
+ */
+ DECLARE_FLEX_ARRAY(struct ec_response_motion_sensor_data, sensor);
+ } dump;
+
+ /* Used for MOTIONSENSE_CMD_INFO. */
+ struct __ec_todo_unpacked {
+ /* Should be element of enum motionsensor_type. */
+ uint8_t type;
+
+ /* Should be element of enum motionsensor_location. */
+ uint8_t location;
+
+ /* Should be element of enum motionsensor_chip. */
+ uint8_t chip;
+ } info;
+
+ /* Used for MOTIONSENSE_CMD_INFO version 3 */
+ struct __ec_todo_unpacked {
+ /* Should be element of enum motionsensor_type. */
+ uint8_t type;
+
+ /* Should be element of enum motionsensor_location. */
+ uint8_t location;
+
+ /* Should be element of enum motionsensor_chip. */
+ uint8_t chip;
+
+ /* Minimum sensor sampling frequency */
+ uint32_t min_frequency;
+
+ /* Maximum sensor sampling frequency */
+ uint32_t max_frequency;
+
+ /* Max number of sensor events that could be in fifo */
+ uint32_t fifo_max_event_count;
+ } info_3;
+
+ /* Used for MOTIONSENSE_CMD_DATA */
+ struct ec_response_motion_sensor_data data;
+
+ /*
+ * Used for MOTIONSENSE_CMD_EC_RATE, MOTIONSENSE_CMD_SENSOR_ODR,
+ * MOTIONSENSE_CMD_SENSOR_RANGE,
+ * MOTIONSENSE_CMD_KB_WAKE_ANGLE,
+ * MOTIONSENSE_CMD_FIFO_INT_ENABLE and
+ * MOTIONSENSE_CMD_SPOOF.
+ */
+ struct __ec_todo_unpacked {
+ /* Current value of the parameter queried. */
+ int32_t ret;
+ } ec_rate, sensor_odr, sensor_range, kb_wake_angle,
+ fifo_int_enable, spoof;
+
+ /*
+ * Used for MOTIONSENSE_CMD_SENSOR_OFFSET,
+ * PERFORM_CALIB.
+ */
+ struct __ec_todo_unpacked {
+ int16_t temp;
+ int16_t offset[3];
+ } sensor_offset, perform_calib;
+
+ /* Used for MOTIONSENSE_CMD_SENSOR_SCALE */
+ struct __ec_todo_unpacked {
+ int16_t temp;
+ uint16_t scale[3];
+ } sensor_scale;
+
+ struct ec_response_motion_sense_fifo_info fifo_info, fifo_flush;
+
+ struct ec_response_motion_sense_fifo_data fifo_read;
+
+ struct __ec_todo_packed {
+ uint16_t reserved;
+ uint32_t enabled;
+ uint32_t disabled;
+ } list_activities;
+
+ /* No params for set activity */
+
+ /* Used for MOTIONSENSE_CMD_LID_ANGLE */
+ struct __ec_todo_unpacked {
+ /*
+ * Angle between 0 and 360 degree if available,
+ * LID_ANGLE_UNRELIABLE otherwise.
+ */
+ uint16_t value;
+ } lid_angle;
+
+ /* Used for MOTIONSENSE_CMD_TABLET_MODE_LID_ANGLE. */
+ struct __ec_todo_unpacked {
+ /*
+ * Lid angle threshold for switching between tablet and
+ * clamshell mode.
+ */
+ uint16_t lid_angle;
+
+ /* Hysteresis degree. */
+ uint16_t hys_degree;
+ } tablet_mode_threshold;
+
+ };
+} __ec_todo_packed;
+
+/*****************************************************************************/
+/* Force lid open command */
+
+/* Make lid event always open */
+#define EC_CMD_FORCE_LID_OPEN 0x002C
+
+struct ec_params_force_lid_open {
+ uint8_t enabled;
+} __ec_align1;
+
+/*****************************************************************************/
+/* Configure the behavior of the power button */
+#define EC_CMD_CONFIG_POWER_BUTTON 0x002D
+
+enum ec_config_power_button_flags {
+ /* Enable/Disable power button pulses for x86 devices */
+ EC_POWER_BUTTON_ENABLE_PULSE = BIT(0),
+};
+
+struct ec_params_config_power_button {
+ /* See enum ec_config_power_button_flags */
+ uint8_t flags;
+} __ec_align1;
+
+/*****************************************************************************/
+/* USB charging control commands */
+
+/* Set USB port charging mode */
+#define EC_CMD_USB_CHARGE_SET_MODE 0x0030
+
+struct ec_params_usb_charge_set_mode {
+ uint8_t usb_port_id;
+ uint8_t mode:7;
+ uint8_t inhibit_charge:1;
+} __ec_align1;
+
+/*****************************************************************************/
+/* Persistent storage for host */
+
+/* Maximum bytes that can be read/written in a single command */
+#define EC_PSTORE_SIZE_MAX 64
+
+/* Get persistent storage info */
+#define EC_CMD_PSTORE_INFO 0x0040
+
+struct ec_response_pstore_info {
+ /* Persistent storage size, in bytes */
+ uint32_t pstore_size;
+ /* Access size; read/write offset and size must be a multiple of this */
+ uint32_t access_size;
+} __ec_align4;
+
+/*
+ * Read persistent storage
+ *
+ * Response is params.size bytes of data.
+ */
+#define EC_CMD_PSTORE_READ 0x0041
+
+struct ec_params_pstore_read {
+ uint32_t offset; /* Byte offset to read */
+ uint32_t size; /* Size to read in bytes */
+} __ec_align4;
+
+/* Write persistent storage */
+#define EC_CMD_PSTORE_WRITE 0x0042
+
+struct ec_params_pstore_write {
+ uint32_t offset; /* Byte offset to write */
+ uint32_t size; /* Size to write in bytes */
+ uint8_t data[EC_PSTORE_SIZE_MAX];
+} __ec_align4;
+
+/*****************************************************************************/
+/* Real-time clock */
+
+/* RTC params and response structures */
+struct ec_params_rtc {
+ uint32_t time;
+} __ec_align4;
+
+struct ec_response_rtc {
+ uint32_t time;
+} __ec_align4;
+
+/* These use ec_response_rtc */
+#define EC_CMD_RTC_GET_VALUE 0x0044
+#define EC_CMD_RTC_GET_ALARM 0x0045
+
+/* These all use ec_params_rtc */
+#define EC_CMD_RTC_SET_VALUE 0x0046
+#define EC_CMD_RTC_SET_ALARM 0x0047
+
+/* Pass as time param to SET_ALARM to clear the current alarm */
+#define EC_RTC_ALARM_CLEAR 0
+
+/*****************************************************************************/
+/* Port80 log access */
+
+/* Maximum entries that can be read/written in a single command */
+#define EC_PORT80_SIZE_MAX 32
+
+/* Get last port80 code from previous boot */
+#define EC_CMD_PORT80_LAST_BOOT 0x0048
+#define EC_CMD_PORT80_READ 0x0048
+
+enum ec_port80_subcmd {
+ EC_PORT80_GET_INFO = 0,
+ EC_PORT80_READ_BUFFER,
+};
+
+struct ec_params_port80_read {
+ uint16_t subcmd;
+ union {
+ struct __ec_todo_unpacked {
+ uint32_t offset;
+ uint32_t num_entries;
+ } read_buffer;
+ };
+} __ec_todo_packed;
+
+struct ec_response_port80_read {
+ union {
+ struct __ec_todo_unpacked {
+ uint32_t writes;
+ uint32_t history_size;
+ uint32_t last_boot;
+ } get_info;
+ struct __ec_todo_unpacked {
+ uint16_t codes[EC_PORT80_SIZE_MAX];
+ } data;
+ };
+} __ec_todo_packed;
+
+struct ec_response_port80_last_boot {
+ uint16_t code;
+} __ec_align2;
+
+/*****************************************************************************/
+/* Temporary secure storage for host verified boot use */
+
+/* Number of bytes in a vstore slot */
+#define EC_VSTORE_SLOT_SIZE 64
+
+/* Maximum number of vstore slots */
+#define EC_VSTORE_SLOT_MAX 32
+
+/* Get persistent storage info */
+#define EC_CMD_VSTORE_INFO 0x0049
+struct ec_response_vstore_info {
+ /* Indicates which slots are locked */
+ uint32_t slot_locked;
+ /* Total number of slots available */
+ uint8_t slot_count;
+} __ec_align_size1;
+
+/*
+ * Read temporary secure storage
+ *
+ * Response is EC_VSTORE_SLOT_SIZE bytes of data.
+ */
+#define EC_CMD_VSTORE_READ 0x004A
+
+struct ec_params_vstore_read {
+ uint8_t slot; /* Slot to read from */
+} __ec_align1;
+
+struct ec_response_vstore_read {
+ uint8_t data[EC_VSTORE_SLOT_SIZE];
+} __ec_align1;
+
+/*
+ * Write temporary secure storage and lock it.
+ */
+#define EC_CMD_VSTORE_WRITE 0x004B
+
+struct ec_params_vstore_write {
+ uint8_t slot; /* Slot to write to */
+ uint8_t data[EC_VSTORE_SLOT_SIZE];
+} __ec_align1;
+
+/*****************************************************************************/
+/* Thermal engine commands. Note that there are two implementations. We'll
+ * reuse the command number, but the data and behavior is incompatible.
+ * Version 0 is what originally shipped on Link.
+ * Version 1 separates the CPU thermal limits from the fan control.
+ */
+
+#define EC_CMD_THERMAL_SET_THRESHOLD 0x0050
+#define EC_CMD_THERMAL_GET_THRESHOLD 0x0051
+
+/* The version 0 structs are opaque. You have to know what they are for
+ * the get/set commands to make any sense.
+ */
+
+/* Version 0 - set */
+struct ec_params_thermal_set_threshold {
+ uint8_t sensor_type;
+ uint8_t threshold_id;
+ uint16_t value;
+} __ec_align2;
+
+/* Version 0 - get */
+struct ec_params_thermal_get_threshold {
+ uint8_t sensor_type;
+ uint8_t threshold_id;
+} __ec_align1;
+
+struct ec_response_thermal_get_threshold {
+ uint16_t value;
+} __ec_align2;
+
+
+/* The version 1 structs are visible. */
+enum ec_temp_thresholds {
+ EC_TEMP_THRESH_WARN = 0,
+ EC_TEMP_THRESH_HIGH,
+ EC_TEMP_THRESH_HALT,
+
+ EC_TEMP_THRESH_COUNT
+};
+
+/*
+ * Thermal configuration for one temperature sensor. Temps are in degrees K.
+ * Zero values will be silently ignored by the thermal task.
+ *
+ * Set 'temp_host' value allows thermal task to trigger some event with 1 degree
+ * hysteresis.
+ * For example,
+ * temp_host[EC_TEMP_THRESH_HIGH] = 300 K
+ * temp_host_release[EC_TEMP_THRESH_HIGH] = 0 K
+ * EC will throttle ap when temperature >= 301 K, and release throttling when
+ * temperature <= 299 K.
+ *
+ * Set 'temp_host_release' value allows thermal task has a custom hysteresis.
+ * For example,
+ * temp_host[EC_TEMP_THRESH_HIGH] = 300 K
+ * temp_host_release[EC_TEMP_THRESH_HIGH] = 295 K
+ * EC will throttle ap when temperature >= 301 K, and release throttling when
+ * temperature <= 294 K.
+ *
+ * Note that this structure is a sub-structure of
+ * ec_params_thermal_set_threshold_v1, but maintains its alignment there.
+ */
+struct ec_thermal_config {
+ uint32_t temp_host[EC_TEMP_THRESH_COUNT]; /* levels of hotness */
+ uint32_t temp_host_release[EC_TEMP_THRESH_COUNT]; /* release levels */
+ uint32_t temp_fan_off; /* no active cooling needed */
+ uint32_t temp_fan_max; /* max active cooling needed */
+} __ec_align4;
+
+/* Version 1 - get config for one sensor. */
+struct ec_params_thermal_get_threshold_v1 {
+ uint32_t sensor_num;
+} __ec_align4;
+/* This returns a struct ec_thermal_config */
+
+/*
+ * Version 1 - set config for one sensor.
+ * Use read-modify-write for best results!
+ */
+struct ec_params_thermal_set_threshold_v1 {
+ uint32_t sensor_num;
+ struct ec_thermal_config cfg;
+} __ec_align4;
+/* This returns no data */
+
+/****************************************************************************/
+
+/* Toggle automatic fan control */
+#define EC_CMD_THERMAL_AUTO_FAN_CTRL 0x0052
+
+/* Version 1 of input params */
+struct ec_params_auto_fan_ctrl_v1 {
+ uint8_t fan_idx;
+} __ec_align1;
+
+/* Get/Set TMP006 calibration data */
+#define EC_CMD_TMP006_GET_CALIBRATION 0x0053
+#define EC_CMD_TMP006_SET_CALIBRATION 0x0054
+
+/*
+ * The original TMP006 calibration only needed four params, but now we need
+ * more. Since the algorithm is nothing but magic numbers anyway, we'll leave
+ * the params opaque. The v1 "get" response will include the algorithm number
+ * and how many params it requires. That way we can change the EC code without
+ * needing to update this file. We can also use a different algorithm on each
+ * sensor.
+ */
+
+/* This is the same struct for both v0 and v1. */
+struct ec_params_tmp006_get_calibration {
+ uint8_t index;
+} __ec_align1;
+
+/* Version 0 */
+struct ec_response_tmp006_get_calibration_v0 {
+ float s0;
+ float b0;
+ float b1;
+ float b2;
+} __ec_align4;
+
+struct ec_params_tmp006_set_calibration_v0 {
+ uint8_t index;
+ uint8_t reserved[3];
+ float s0;
+ float b0;
+ float b1;
+ float b2;
+} __ec_align4;
+
+/* Version 1 */
+struct ec_response_tmp006_get_calibration_v1 {
+ uint8_t algorithm;
+ uint8_t num_params;
+ uint8_t reserved[2];
+ float val[];
+} __ec_align4;
+
+struct ec_params_tmp006_set_calibration_v1 {
+ uint8_t index;
+ uint8_t algorithm;
+ uint8_t num_params;
+ uint8_t reserved;
+ float val[];
+} __ec_align4;
+
+
+/* Read raw TMP006 data */
+#define EC_CMD_TMP006_GET_RAW 0x0055
+
+struct ec_params_tmp006_get_raw {
+ uint8_t index;
+} __ec_align1;
+
+struct ec_response_tmp006_get_raw {
+ int32_t t; /* In 1/100 K */
+ int32_t v; /* In nV */
+} __ec_align4;
+
+/*****************************************************************************/
+/* MKBP - Matrix KeyBoard Protocol */
+
+/*
+ * Read key state
+ *
+ * Returns raw data for keyboard cols; see ec_response_mkbp_info.cols for
+ * expected response size.
+ *
+ * NOTE: This has been superseded by EC_CMD_MKBP_GET_NEXT_EVENT. If you wish
+ * to obtain the instantaneous state, use EC_CMD_MKBP_INFO with the type
+ * EC_MKBP_INFO_CURRENT and event EC_MKBP_EVENT_KEY_MATRIX.
+ */
+#define EC_CMD_MKBP_STATE 0x0060
+
+/*
+ * Provide information about various MKBP things. See enum ec_mkbp_info_type.
+ */
+#define EC_CMD_MKBP_INFO 0x0061
+
+struct ec_response_mkbp_info {
+ uint32_t rows;
+ uint32_t cols;
+ /* Formerly "switches", which was 0. */
+ uint8_t reserved;
+} __ec_align_size1;
+
+struct ec_params_mkbp_info {
+ uint8_t info_type;
+ uint8_t event_type;
+} __ec_align1;
+
+enum ec_mkbp_info_type {
+ /*
+ * Info about the keyboard matrix: number of rows and columns.
+ *
+ * Returns struct ec_response_mkbp_info.
+ */
+ EC_MKBP_INFO_KBD = 0,
+
+ /*
+ * For buttons and switches, info about which specifically are
+ * supported. event_type must be set to one of the values in enum
+ * ec_mkbp_event.
+ *
+ * For EC_MKBP_EVENT_BUTTON and EC_MKBP_EVENT_SWITCH, returns a 4 byte
+ * bitmask indicating which buttons or switches are present. See the
+ * bit inidices below.
+ */
+ EC_MKBP_INFO_SUPPORTED = 1,
+
+ /*
+ * Instantaneous state of buttons and switches.
+ *
+ * event_type must be set to one of the values in enum ec_mkbp_event.
+ *
+ * For EC_MKBP_EVENT_KEY_MATRIX, returns uint8_t key_matrix[13]
+ * indicating the current state of the keyboard matrix.
+ *
+ * For EC_MKBP_EVENT_HOST_EVENT, return uint32_t host_event, the raw
+ * event state.
+ *
+ * For EC_MKBP_EVENT_BUTTON, returns uint32_t buttons, indicating the
+ * state of supported buttons.
+ *
+ * For EC_MKBP_EVENT_SWITCH, returns uint32_t switches, indicating the
+ * state of supported switches.
+ */
+ EC_MKBP_INFO_CURRENT = 2,
+};
+
+/* Simulate key press */
+#define EC_CMD_MKBP_SIMULATE_KEY 0x0062
+
+struct ec_params_mkbp_simulate_key {
+ uint8_t col;
+ uint8_t row;
+ uint8_t pressed;
+} __ec_align1;
+
+#define EC_CMD_GET_KEYBOARD_ID 0x0063
+
+struct ec_response_keyboard_id {
+ uint32_t keyboard_id;
+} __ec_align4;
+
+enum keyboard_id {
+ KEYBOARD_ID_UNSUPPORTED = 0,
+ KEYBOARD_ID_UNREADABLE = 0xffffffff,
+};
+
+/* Configure keyboard scanning */
+#define EC_CMD_MKBP_SET_CONFIG 0x0064
+#define EC_CMD_MKBP_GET_CONFIG 0x0065
+
+/* flags */
+enum mkbp_config_flags {
+ EC_MKBP_FLAGS_ENABLE = 1, /* Enable keyboard scanning */
+};
+
+enum mkbp_config_valid {
+ EC_MKBP_VALID_SCAN_PERIOD = BIT(0),
+ EC_MKBP_VALID_POLL_TIMEOUT = BIT(1),
+ EC_MKBP_VALID_MIN_POST_SCAN_DELAY = BIT(3),
+ EC_MKBP_VALID_OUTPUT_SETTLE = BIT(4),
+ EC_MKBP_VALID_DEBOUNCE_DOWN = BIT(5),
+ EC_MKBP_VALID_DEBOUNCE_UP = BIT(6),
+ EC_MKBP_VALID_FIFO_MAX_DEPTH = BIT(7),
+};
+
+/*
+ * Configuration for our key scanning algorithm.
+ *
+ * Note that this is used as a sub-structure of
+ * ec_{params/response}_mkbp_get_config.
+ */
+struct ec_mkbp_config {
+ uint32_t valid_mask; /* valid fields */
+ uint8_t flags; /* some flags (enum mkbp_config_flags) */
+ uint8_t valid_flags; /* which flags are valid */
+ uint16_t scan_period_us; /* period between start of scans */
+ /* revert to interrupt mode after no activity for this long */
+ uint32_t poll_timeout_us;
+ /*
+ * minimum post-scan relax time. Once we finish a scan we check
+ * the time until we are due to start the next one. If this time is
+ * shorter this field, we use this instead.
+ */
+ uint16_t min_post_scan_delay_us;
+ /* delay between setting up output and waiting for it to settle */
+ uint16_t output_settle_us;
+ uint16_t debounce_down_us; /* time for debounce on key down */
+ uint16_t debounce_up_us; /* time for debounce on key up */
+ /* maximum depth to allow for fifo (0 = no keyscan output) */
+ uint8_t fifo_max_depth;
+} __ec_align_size1;
+
+struct ec_params_mkbp_set_config {
+ struct ec_mkbp_config config;
+} __ec_align_size1;
+
+struct ec_response_mkbp_get_config {
+ struct ec_mkbp_config config;
+} __ec_align_size1;
+
+/* Run the key scan emulation */
+#define EC_CMD_KEYSCAN_SEQ_CTRL 0x0066
+
+enum ec_keyscan_seq_cmd {
+ EC_KEYSCAN_SEQ_STATUS = 0, /* Get status information */
+ EC_KEYSCAN_SEQ_CLEAR = 1, /* Clear sequence */
+ EC_KEYSCAN_SEQ_ADD = 2, /* Add item to sequence */
+ EC_KEYSCAN_SEQ_START = 3, /* Start running sequence */
+ EC_KEYSCAN_SEQ_COLLECT = 4, /* Collect sequence summary data */
+};
+
+enum ec_collect_flags {
+ /*
+ * Indicates this scan was processed by the EC. Due to timing, some
+ * scans may be skipped.
+ */
+ EC_KEYSCAN_SEQ_FLAG_DONE = BIT(0),
+};
+
+struct ec_collect_item {
+ uint8_t flags; /* some flags (enum ec_collect_flags) */
+} __ec_align1;
+
+struct ec_params_keyscan_seq_ctrl {
+ uint8_t cmd; /* Command to send (enum ec_keyscan_seq_cmd) */
+ union {
+ struct __ec_align1 {
+ uint8_t active; /* still active */
+ uint8_t num_items; /* number of items */
+ /* Current item being presented */
+ uint8_t cur_item;
+ } status;
+ struct __ec_todo_unpacked {
+ /*
+ * Absolute time for this scan, measured from the
+ * start of the sequence.
+ */
+ uint32_t time_us;
+ uint8_t scan[0]; /* keyscan data */
+ } add;
+ struct __ec_align1 {
+ uint8_t start_item; /* First item to return */
+ uint8_t num_items; /* Number of items to return */
+ } collect;
+ };
+} __ec_todo_packed;
+
+struct ec_result_keyscan_seq_ctrl {
+ union {
+ struct __ec_todo_unpacked {
+ uint8_t num_items; /* Number of items */
+ /* Data for each item */
+ struct ec_collect_item item[0];
+ } collect;
+ };
+} __ec_todo_packed;
+
+/*
+ * Get the next pending MKBP event.
+ *
+ * Returns EC_RES_UNAVAILABLE if there is no event pending.
+ */
+#define EC_CMD_GET_NEXT_EVENT 0x0067
+
+#define EC_MKBP_HAS_MORE_EVENTS_SHIFT 7
+
+/*
+ * We use the most significant bit of the event type to indicate to the host
+ * that the EC has more MKBP events available to provide.
+ */
+#define EC_MKBP_HAS_MORE_EVENTS BIT(EC_MKBP_HAS_MORE_EVENTS_SHIFT)
+
+/* The mask to apply to get the raw event type */
+#define EC_MKBP_EVENT_TYPE_MASK (BIT(EC_MKBP_HAS_MORE_EVENTS_SHIFT) - 1)
+
+enum ec_mkbp_event {
+ /* Keyboard matrix changed. The event data is the new matrix state. */
+ EC_MKBP_EVENT_KEY_MATRIX = 0,
+
+ /* New host event. The event data is 4 bytes of host event flags. */
+ EC_MKBP_EVENT_HOST_EVENT = 1,
+
+ /* New Sensor FIFO data. The event data is fifo_info structure. */
+ EC_MKBP_EVENT_SENSOR_FIFO = 2,
+
+ /* The state of the non-matrixed buttons have changed. */
+ EC_MKBP_EVENT_BUTTON = 3,
+
+ /* The state of the switches have changed. */
+ EC_MKBP_EVENT_SWITCH = 4,
+
+ /* New Fingerprint sensor event, the event data is fp_events bitmap. */
+ EC_MKBP_EVENT_FINGERPRINT = 5,
+
+ /*
+ * Sysrq event: send emulated sysrq. The event data is sysrq,
+ * corresponding to the key to be pressed.
+ */
+ EC_MKBP_EVENT_SYSRQ = 6,
+
+ /*
+ * New 64-bit host event.
+ * The event data is 8 bytes of host event flags.
+ */
+ EC_MKBP_EVENT_HOST_EVENT64 = 7,
+
+ /* Notify the AP that something happened on CEC */
+ EC_MKBP_EVENT_CEC_EVENT = 8,
+
+ /* Send an incoming CEC message to the AP */
+ EC_MKBP_EVENT_CEC_MESSAGE = 9,
+
+ /* Peripheral device charger event */
+ EC_MKBP_EVENT_PCHG = 12,
+
+ /* Number of MKBP events */
+ EC_MKBP_EVENT_COUNT,
+};
+BUILD_ASSERT(EC_MKBP_EVENT_COUNT <= EC_MKBP_EVENT_TYPE_MASK);
+
+union __ec_align_offset1 ec_response_get_next_data {
+ uint8_t key_matrix[13];
+
+ /* Unaligned */
+ uint32_t host_event;
+ uint64_t host_event64;
+
+ struct __ec_todo_unpacked {
+ /* For aligning the fifo_info */
+ uint8_t reserved[3];
+ struct ec_response_motion_sense_fifo_info info;
+ } sensor_fifo;
+
+ uint32_t buttons;
+
+ uint32_t switches;
+
+ uint32_t fp_events;
+
+ uint32_t sysrq;
+
+ /* CEC events from enum mkbp_cec_event */
+ uint32_t cec_events;
+};
+
+union __ec_align_offset1 ec_response_get_next_data_v1 {
+ uint8_t key_matrix[16];
+
+ /* Unaligned */
+ uint32_t host_event;
+ uint64_t host_event64;
+
+ struct __ec_todo_unpacked {
+ /* For aligning the fifo_info */
+ uint8_t reserved[3];
+ struct ec_response_motion_sense_fifo_info info;
+ } sensor_fifo;
+
+ uint32_t buttons;
+
+ uint32_t switches;
+
+ uint32_t fp_events;
+
+ uint32_t sysrq;
+
+ /* CEC events from enum mkbp_cec_event */
+ uint32_t cec_events;
+
+ uint8_t cec_message[16];
+};
+BUILD_ASSERT(sizeof(union ec_response_get_next_data_v1) == 16);
+
+struct ec_response_get_next_event {
+ uint8_t event_type;
+ /* Followed by event data if any */
+ union ec_response_get_next_data data;
+} __ec_align1;
+
+struct ec_response_get_next_event_v1 {
+ uint8_t event_type;
+ /* Followed by event data if any */
+ union ec_response_get_next_data_v1 data;
+} __ec_align1;
+
+/* Bit indices for buttons and switches.*/
+/* Buttons */
+#define EC_MKBP_POWER_BUTTON 0
+#define EC_MKBP_VOL_UP 1
+#define EC_MKBP_VOL_DOWN 2
+#define EC_MKBP_RECOVERY 3
+#define EC_MKBP_BRI_UP 4
+#define EC_MKBP_BRI_DOWN 5
+#define EC_MKBP_SCREEN_LOCK 6
+
+/* Switches */
+#define EC_MKBP_LID_OPEN 0
+#define EC_MKBP_TABLET_MODE 1
+#define EC_MKBP_BASE_ATTACHED 2
+#define EC_MKBP_FRONT_PROXIMITY 3
+
+/* Run keyboard factory test scanning */
+#define EC_CMD_KEYBOARD_FACTORY_TEST 0x0068
+
+struct ec_response_keyboard_factory_test {
+ uint16_t shorted; /* Keyboard pins are shorted */
+} __ec_align2;
+
+/* Fingerprint events in 'fp_events' for EC_MKBP_EVENT_FINGERPRINT */
+#define EC_MKBP_FP_RAW_EVENT(fp_events) ((fp_events) & 0x00FFFFFF)
+#define EC_MKBP_FP_ERRCODE(fp_events) ((fp_events) & 0x0000000F)
+#define EC_MKBP_FP_ENROLL_PROGRESS_OFFSET 4
+#define EC_MKBP_FP_ENROLL_PROGRESS(fpe) (((fpe) & 0x00000FF0) \
+ >> EC_MKBP_FP_ENROLL_PROGRESS_OFFSET)
+#define EC_MKBP_FP_MATCH_IDX_OFFSET 12
+#define EC_MKBP_FP_MATCH_IDX_MASK 0x0000F000
+#define EC_MKBP_FP_MATCH_IDX(fpe) (((fpe) & EC_MKBP_FP_MATCH_IDX_MASK) \
+ >> EC_MKBP_FP_MATCH_IDX_OFFSET)
+#define EC_MKBP_FP_ENROLL BIT(27)
+#define EC_MKBP_FP_MATCH BIT(28)
+#define EC_MKBP_FP_FINGER_DOWN BIT(29)
+#define EC_MKBP_FP_FINGER_UP BIT(30)
+#define EC_MKBP_FP_IMAGE_READY BIT(31)
+/* code given by EC_MKBP_FP_ERRCODE() when EC_MKBP_FP_ENROLL is set */
+#define EC_MKBP_FP_ERR_ENROLL_OK 0
+#define EC_MKBP_FP_ERR_ENROLL_LOW_QUALITY 1
+#define EC_MKBP_FP_ERR_ENROLL_IMMOBILE 2
+#define EC_MKBP_FP_ERR_ENROLL_LOW_COVERAGE 3
+#define EC_MKBP_FP_ERR_ENROLL_INTERNAL 5
+/* Can be used to detect if image was usable for enrollment or not. */
+#define EC_MKBP_FP_ERR_ENROLL_PROBLEM_MASK 1
+/* code given by EC_MKBP_FP_ERRCODE() when EC_MKBP_FP_MATCH is set */
+#define EC_MKBP_FP_ERR_MATCH_NO 0
+#define EC_MKBP_FP_ERR_MATCH_NO_INTERNAL 6
+#define EC_MKBP_FP_ERR_MATCH_NO_TEMPLATES 7
+#define EC_MKBP_FP_ERR_MATCH_NO_LOW_QUALITY 2
+#define EC_MKBP_FP_ERR_MATCH_NO_LOW_COVERAGE 4
+#define EC_MKBP_FP_ERR_MATCH_YES 1
+#define EC_MKBP_FP_ERR_MATCH_YES_UPDATED 3
+#define EC_MKBP_FP_ERR_MATCH_YES_UPDATE_FAILED 5
+
+
+/*****************************************************************************/
+/* Temperature sensor commands */
+
+/* Read temperature sensor info */
+#define EC_CMD_TEMP_SENSOR_GET_INFO 0x0070
+
+struct ec_params_temp_sensor_get_info {
+ uint8_t id;
+} __ec_align1;
+
+struct ec_response_temp_sensor_get_info {
+ char sensor_name[32];
+ uint8_t sensor_type;
+} __ec_align1;
+
+/*****************************************************************************/
+
+/*
+ * Note: host commands 0x80 - 0x87 are reserved to avoid conflict with ACPI
+ * commands accidentally sent to the wrong interface. See the ACPI section
+ * below.
+ */
+
+/*****************************************************************************/
+/* Host event commands */
+
+
+/* Obsolete. New implementation should use EC_CMD_HOST_EVENT instead */
+/*
+ * Host event mask params and response structures, shared by all of the host
+ * event commands below.
+ */
+struct ec_params_host_event_mask {
+ uint32_t mask;
+} __ec_align4;
+
+struct ec_response_host_event_mask {
+ uint32_t mask;
+} __ec_align4;
+
+/* These all use ec_response_host_event_mask */
+#define EC_CMD_HOST_EVENT_GET_B 0x0087
+#define EC_CMD_HOST_EVENT_GET_SMI_MASK 0x0088
+#define EC_CMD_HOST_EVENT_GET_SCI_MASK 0x0089
+#define EC_CMD_HOST_EVENT_GET_WAKE_MASK 0x008D
+
+/* These all use ec_params_host_event_mask */
+#define EC_CMD_HOST_EVENT_SET_SMI_MASK 0x008A
+#define EC_CMD_HOST_EVENT_SET_SCI_MASK 0x008B
+#define EC_CMD_HOST_EVENT_CLEAR 0x008C
+#define EC_CMD_HOST_EVENT_SET_WAKE_MASK 0x008E
+#define EC_CMD_HOST_EVENT_CLEAR_B 0x008F
+
+/*
+ * Unified host event programming interface - Should be used by newer versions
+ * of BIOS/OS to program host events and masks
+ */
+
+struct ec_params_host_event {
+
+ /* Action requested by host - one of enum ec_host_event_action. */
+ uint8_t action;
+
+ /*
+ * Mask type that the host requested the action on - one of
+ * enum ec_host_event_mask_type.
+ */
+ uint8_t mask_type;
+
+ /* Set to 0, ignore on read */
+ uint16_t reserved;
+
+ /* Value to be used in case of set operations. */
+ uint64_t value;
+} __ec_align4;
+
+/*
+ * Response structure returned by EC_CMD_HOST_EVENT.
+ * Update the value on a GET request. Set to 0 on GET/CLEAR
+ */
+
+struct ec_response_host_event {
+
+ /* Mask value in case of get operation */
+ uint64_t value;
+} __ec_align4;
+
+enum ec_host_event_action {
+ /*
+ * params.value is ignored. Value of mask_type populated
+ * in response.value
+ */
+ EC_HOST_EVENT_GET,
+
+ /* Bits in params.value are set */
+ EC_HOST_EVENT_SET,
+
+ /* Bits in params.value are cleared */
+ EC_HOST_EVENT_CLEAR,
+};
+
+enum ec_host_event_mask_type {
+
+ /* Main host event copy */
+ EC_HOST_EVENT_MAIN,
+
+ /* Copy B of host events */
+ EC_HOST_EVENT_B,
+
+ /* SCI Mask */
+ EC_HOST_EVENT_SCI_MASK,
+
+ /* SMI Mask */
+ EC_HOST_EVENT_SMI_MASK,
+
+ /* Mask of events that should be always reported in hostevents */
+ EC_HOST_EVENT_ALWAYS_REPORT_MASK,
+
+ /* Active wake mask */
+ EC_HOST_EVENT_ACTIVE_WAKE_MASK,
+
+ /* Lazy wake mask for S0ix */
+ EC_HOST_EVENT_LAZY_WAKE_MASK_S0IX,
+
+ /* Lazy wake mask for S3 */
+ EC_HOST_EVENT_LAZY_WAKE_MASK_S3,
+
+ /* Lazy wake mask for S5 */
+ EC_HOST_EVENT_LAZY_WAKE_MASK_S5,
+};
+
+#define EC_CMD_HOST_EVENT 0x00A4
+
+/*****************************************************************************/
+/* Switch commands */
+
+/* Enable/disable LCD backlight */
+#define EC_CMD_SWITCH_ENABLE_BKLIGHT 0x0090
+
+struct ec_params_switch_enable_backlight {
+ uint8_t enabled;
+} __ec_align1;
+
+/* Enable/disable WLAN/Bluetooth */
+#define EC_CMD_SWITCH_ENABLE_WIRELESS 0x0091
+#define EC_VER_SWITCH_ENABLE_WIRELESS 1
+
+/* Version 0 params; no response */
+struct ec_params_switch_enable_wireless_v0 {
+ uint8_t enabled;
+} __ec_align1;
+
+/* Version 1 params */
+struct ec_params_switch_enable_wireless_v1 {
+ /* Flags to enable now */
+ uint8_t now_flags;
+
+ /* Which flags to copy from now_flags */
+ uint8_t now_mask;
+
+ /*
+ * Flags to leave enabled in S3, if they're on at the S0->S3
+ * transition. (Other flags will be disabled by the S0->S3
+ * transition.)
+ */
+ uint8_t suspend_flags;
+
+ /* Which flags to copy from suspend_flags */
+ uint8_t suspend_mask;
+} __ec_align1;
+
+/* Version 1 response */
+struct ec_response_switch_enable_wireless_v1 {
+ /* Flags to enable now */
+ uint8_t now_flags;
+
+ /* Flags to leave enabled in S3 */
+ uint8_t suspend_flags;
+} __ec_align1;
+
+/*****************************************************************************/
+/* GPIO commands. Only available on EC if write protect has been disabled. */
+
+/* Set GPIO output value */
+#define EC_CMD_GPIO_SET 0x0092
+
+struct ec_params_gpio_set {
+ char name[32];
+ uint8_t val;
+} __ec_align1;
+
+/* Get GPIO value */
+#define EC_CMD_GPIO_GET 0x0093
+
+/* Version 0 of input params and response */
+struct ec_params_gpio_get {
+ char name[32];
+} __ec_align1;
+
+struct ec_response_gpio_get {
+ uint8_t val;
+} __ec_align1;
+
+/* Version 1 of input params and response */
+struct ec_params_gpio_get_v1 {
+ uint8_t subcmd;
+ union {
+ struct __ec_align1 {
+ char name[32];
+ } get_value_by_name;
+ struct __ec_align1 {
+ uint8_t index;
+ } get_info;
+ };
+} __ec_align1;
+
+struct ec_response_gpio_get_v1 {
+ union {
+ struct __ec_align1 {
+ uint8_t val;
+ } get_value_by_name, get_count;
+ struct __ec_todo_unpacked {
+ uint8_t val;
+ char name[32];
+ uint32_t flags;
+ } get_info;
+ };
+} __ec_todo_packed;
+
+enum gpio_get_subcmd {
+ EC_GPIO_GET_BY_NAME = 0,
+ EC_GPIO_GET_COUNT = 1,
+ EC_GPIO_GET_INFO = 2,
+};
+
+/*****************************************************************************/
+/* I2C commands. Only available when flash write protect is unlocked. */
+
+/*
+ * CAUTION: These commands are deprecated, and are not supported anymore in EC
+ * builds >= 8398.0.0 (see crosbug.com/p/23570).
+ *
+ * Use EC_CMD_I2C_PASSTHRU instead.
+ */
+
+/* Read I2C bus */
+#define EC_CMD_I2C_READ 0x0094
+
+struct ec_params_i2c_read {
+ uint16_t addr; /* 8-bit address (7-bit shifted << 1) */
+ uint8_t read_size; /* Either 8 or 16. */
+ uint8_t port;
+ uint8_t offset;
+} __ec_align_size1;
+
+struct ec_response_i2c_read {
+ uint16_t data;
+} __ec_align2;
+
+/* Write I2C bus */
+#define EC_CMD_I2C_WRITE 0x0095
+
+struct ec_params_i2c_write {
+ uint16_t data;
+ uint16_t addr; /* 8-bit address (7-bit shifted << 1) */
+ uint8_t write_size; /* Either 8 or 16. */
+ uint8_t port;
+ uint8_t offset;
+} __ec_align_size1;
+
+/*****************************************************************************/
+/* Charge state commands. Only available when flash write protect unlocked. */
+
+/* Force charge state machine to stop charging the battery or force it to
+ * discharge the battery.
+ */
+#define EC_CMD_CHARGE_CONTROL 0x0096
+#define EC_VER_CHARGE_CONTROL 1
+
+enum ec_charge_control_mode {
+ CHARGE_CONTROL_NORMAL = 0,
+ CHARGE_CONTROL_IDLE,
+ CHARGE_CONTROL_DISCHARGE,
+};
+
+struct ec_params_charge_control {
+ uint32_t mode; /* enum charge_control_mode */
+} __ec_align4;
+
+/*****************************************************************************/
+
+/* Snapshot console output buffer for use by EC_CMD_CONSOLE_READ. */
+#define EC_CMD_CONSOLE_SNAPSHOT 0x0097
+
+/*
+ * Read data from the saved snapshot. If the subcmd parameter is
+ * CONSOLE_READ_NEXT, this will return data starting from the beginning of
+ * the latest snapshot. If it is CONSOLE_READ_RECENT, it will start from the
+ * end of the previous snapshot.
+ *
+ * The params are only looked at in version >= 1 of this command. Prior
+ * versions will just default to CONSOLE_READ_NEXT behavior.
+ *
+ * Response is null-terminated string. Empty string, if there is no more
+ * remaining output.
+ */
+#define EC_CMD_CONSOLE_READ 0x0098
+
+enum ec_console_read_subcmd {
+ CONSOLE_READ_NEXT = 0,
+ CONSOLE_READ_RECENT
+};
+
+struct ec_params_console_read_v1 {
+ uint8_t subcmd; /* enum ec_console_read_subcmd */
+} __ec_align1;
+
+/*****************************************************************************/
+
+/*
+ * Cut off battery power immediately or after the host has shut down.
+ *
+ * return EC_RES_INVALID_COMMAND if unsupported by a board/battery.
+ * EC_RES_SUCCESS if the command was successful.
+ * EC_RES_ERROR if the cut off command failed.
+ */
+#define EC_CMD_BATTERY_CUT_OFF 0x0099
+
+#define EC_BATTERY_CUTOFF_FLAG_AT_SHUTDOWN BIT(0)
+
+struct ec_params_battery_cutoff {
+ uint8_t flags;
+} __ec_align1;
+
+/*****************************************************************************/
+/* USB port mux control. */
+
+/*
+ * Switch USB mux or return to automatic switching.
+ */
+#define EC_CMD_USB_MUX 0x009A
+
+struct ec_params_usb_mux {
+ uint8_t mux;
+} __ec_align1;
+
+/*****************************************************************************/
+/* LDOs / FETs control. */
+
+enum ec_ldo_state {
+ EC_LDO_STATE_OFF = 0, /* the LDO / FET is shut down */
+ EC_LDO_STATE_ON = 1, /* the LDO / FET is ON / providing power */
+};
+
+/*
+ * Switch on/off a LDO.
+ */
+#define EC_CMD_LDO_SET 0x009B
+
+struct ec_params_ldo_set {
+ uint8_t index;
+ uint8_t state;
+} __ec_align1;
+
+/*
+ * Get LDO state.
+ */
+#define EC_CMD_LDO_GET 0x009C
+
+struct ec_params_ldo_get {
+ uint8_t index;
+} __ec_align1;
+
+struct ec_response_ldo_get {
+ uint8_t state;
+} __ec_align1;
+
+/*****************************************************************************/
+/* Power info. */
+
+/*
+ * Get power info.
+ */
+#define EC_CMD_POWER_INFO 0x009D
+
+struct ec_response_power_info {
+ uint32_t usb_dev_type;
+ uint16_t voltage_ac;
+ uint16_t voltage_system;
+ uint16_t current_system;
+ uint16_t usb_current_limit;
+} __ec_align4;
+
+/*****************************************************************************/
+/* I2C passthru command */
+
+#define EC_CMD_I2C_PASSTHRU 0x009E
+
+/* Read data; if not present, message is a write */
+#define EC_I2C_FLAG_READ BIT(15)
+
+/* Mask for address */
+#define EC_I2C_ADDR_MASK 0x3ff
+
+#define EC_I2C_STATUS_NAK BIT(0) /* Transfer was not acknowledged */
+#define EC_I2C_STATUS_TIMEOUT BIT(1) /* Timeout during transfer */
+
+/* Any error */
+#define EC_I2C_STATUS_ERROR (EC_I2C_STATUS_NAK | EC_I2C_STATUS_TIMEOUT)
+
+struct ec_params_i2c_passthru_msg {
+ uint16_t addr_flags; /* I2C slave address (7 or 10 bits) and flags */
+ uint16_t len; /* Number of bytes to read or write */
+} __ec_align2;
+
+struct ec_params_i2c_passthru {
+ uint8_t port; /* I2C port number */
+ uint8_t num_msgs; /* Number of messages */
+ struct ec_params_i2c_passthru_msg msg[];
+ /* Data to write for all messages is concatenated here */
+} __ec_align2;
+
+struct ec_response_i2c_passthru {
+ uint8_t i2c_status; /* Status flags (EC_I2C_STATUS_...) */
+ uint8_t num_msgs; /* Number of messages processed */
+ uint8_t data[]; /* Data read by messages concatenated here */
+} __ec_align1;
+
+/*****************************************************************************/
+/* Power button hang detect */
+
+#define EC_CMD_HANG_DETECT 0x009F
+
+/* Reasons to start hang detection timer */
+/* Power button pressed */
+#define EC_HANG_START_ON_POWER_PRESS BIT(0)
+
+/* Lid closed */
+#define EC_HANG_START_ON_LID_CLOSE BIT(1)
+
+ /* Lid opened */
+#define EC_HANG_START_ON_LID_OPEN BIT(2)
+
+/* Start of AP S3->S0 transition (booting or resuming from suspend) */
+#define EC_HANG_START_ON_RESUME BIT(3)
+
+/* Reasons to cancel hang detection */
+
+/* Power button released */
+#define EC_HANG_STOP_ON_POWER_RELEASE BIT(8)
+
+/* Any host command from AP received */
+#define EC_HANG_STOP_ON_HOST_COMMAND BIT(9)
+
+/* Stop on end of AP S0->S3 transition (suspending or shutting down) */
+#define EC_HANG_STOP_ON_SUSPEND BIT(10)
+
+/*
+ * If this flag is set, all the other fields are ignored, and the hang detect
+ * timer is started. This provides the AP a way to start the hang timer
+ * without reconfiguring any of the other hang detect settings. Note that
+ * you must previously have configured the timeouts.
+ */
+#define EC_HANG_START_NOW BIT(30)
+
+/*
+ * If this flag is set, all the other fields are ignored (including
+ * EC_HANG_START_NOW). This provides the AP a way to stop the hang timer
+ * without reconfiguring any of the other hang detect settings.
+ */
+#define EC_HANG_STOP_NOW BIT(31)
+
+struct ec_params_hang_detect {
+ /* Flags; see EC_HANG_* */
+ uint32_t flags;
+
+ /* Timeout in msec before generating host event, if enabled */
+ uint16_t host_event_timeout_msec;
+
+ /* Timeout in msec before generating warm reboot, if enabled */
+ uint16_t warm_reboot_timeout_msec;
+} __ec_align4;
+
+/*****************************************************************************/
+/* Commands for battery charging */
+
+/*
+ * This is the single catch-all host command to exchange data regarding the
+ * charge state machine (v2 and up).
+ */
+#define EC_CMD_CHARGE_STATE 0x00A0
+
+/* Subcommands for this host command */
+enum charge_state_command {
+ CHARGE_STATE_CMD_GET_STATE,
+ CHARGE_STATE_CMD_GET_PARAM,
+ CHARGE_STATE_CMD_SET_PARAM,
+ CHARGE_STATE_NUM_CMDS
+};
+
+/*
+ * Known param numbers are defined here. Ranges are reserved for board-specific
+ * params, which are handled by the particular implementations.
+ */
+enum charge_state_params {
+ CS_PARAM_CHG_VOLTAGE, /* charger voltage limit */
+ CS_PARAM_CHG_CURRENT, /* charger current limit */
+ CS_PARAM_CHG_INPUT_CURRENT, /* charger input current limit */
+ CS_PARAM_CHG_STATUS, /* charger-specific status */
+ CS_PARAM_CHG_OPTION, /* charger-specific options */
+ CS_PARAM_LIMIT_POWER, /*
+ * Check if power is limited due to
+ * low battery and / or a weak external
+ * charger. READ ONLY.
+ */
+ /* How many so far? */
+ CS_NUM_BASE_PARAMS,
+
+ /* Range for CONFIG_CHARGER_PROFILE_OVERRIDE params */
+ CS_PARAM_CUSTOM_PROFILE_MIN = 0x10000,
+ CS_PARAM_CUSTOM_PROFILE_MAX = 0x1ffff,
+
+ /* Range for CONFIG_CHARGE_STATE_DEBUG params */
+ CS_PARAM_DEBUG_MIN = 0x20000,
+ CS_PARAM_DEBUG_CTL_MODE = 0x20000,
+ CS_PARAM_DEBUG_MANUAL_MODE,
+ CS_PARAM_DEBUG_SEEMS_DEAD,
+ CS_PARAM_DEBUG_SEEMS_DISCONNECTED,
+ CS_PARAM_DEBUG_BATT_REMOVED,
+ CS_PARAM_DEBUG_MANUAL_CURRENT,
+ CS_PARAM_DEBUG_MANUAL_VOLTAGE,
+ CS_PARAM_DEBUG_MAX = 0x2ffff,
+
+ /* Other custom param ranges go here... */
+};
+
+struct ec_params_charge_state {
+ uint8_t cmd; /* enum charge_state_command */
+ union {
+ /* get_state has no args */
+
+ struct __ec_todo_unpacked {
+ uint32_t param; /* enum charge_state_param */
+ } get_param;
+
+ struct __ec_todo_unpacked {
+ uint32_t param; /* param to set */
+ uint32_t value; /* value to set */
+ } set_param;
+ };
+} __ec_todo_packed;
+
+struct ec_response_charge_state {
+ union {
+ struct __ec_align4 {
+ int ac;
+ int chg_voltage;
+ int chg_current;
+ int chg_input_current;
+ int batt_state_of_charge;
+ } get_state;
+
+ struct __ec_align4 {
+ uint32_t value;
+ } get_param;
+
+ /* set_param returns no args */
+ };
+} __ec_align4;
+
+
+/*
+ * Set maximum battery charging current.
+ */
+#define EC_CMD_CHARGE_CURRENT_LIMIT 0x00A1
+
+struct ec_params_current_limit {
+ uint32_t limit; /* in mA */
+} __ec_align4;
+
+/*
+ * Set maximum external voltage / current.
+ */
+#define EC_CMD_EXTERNAL_POWER_LIMIT 0x00A2
+
+/* Command v0 is used only on Spring and is obsolete + unsupported */
+struct ec_params_external_power_limit_v1 {
+ uint16_t current_lim; /* in mA, or EC_POWER_LIMIT_NONE to clear limit */
+ uint16_t voltage_lim; /* in mV, or EC_POWER_LIMIT_NONE to clear limit */
+} __ec_align2;
+
+#define EC_POWER_LIMIT_NONE 0xffff
+
+/*
+ * Set maximum voltage & current of a dedicated charge port
+ */
+#define EC_CMD_OVERRIDE_DEDICATED_CHARGER_LIMIT 0x00A3
+
+struct ec_params_dedicated_charger_limit {
+ uint16_t current_lim; /* in mA */
+ uint16_t voltage_lim; /* in mV */
+} __ec_align2;
+
+/*****************************************************************************/
+/* Hibernate/Deep Sleep Commands */
+
+/* Set the delay before going into hibernation. */
+#define EC_CMD_HIBERNATION_DELAY 0x00A8
+
+struct ec_params_hibernation_delay {
+ /*
+ * Seconds to wait in G3 before hibernate. Pass in 0 to read the
+ * current settings without changing them.
+ */
+ uint32_t seconds;
+} __ec_align4;
+
+struct ec_response_hibernation_delay {
+ /*
+ * The current time in seconds in which the system has been in the G3
+ * state. This value is reset if the EC transitions out of G3.
+ */
+ uint32_t time_g3;
+
+ /*
+ * The current time remaining in seconds until the EC should hibernate.
+ * This value is also reset if the EC transitions out of G3.
+ */
+ uint32_t time_remaining;
+
+ /*
+ * The current time in seconds that the EC should wait in G3 before
+ * hibernating.
+ */
+ uint32_t hibernate_delay;
+} __ec_align4;
+
+/* Inform the EC when entering a sleep state */
+#define EC_CMD_HOST_SLEEP_EVENT 0x00A9
+
+enum host_sleep_event {
+ HOST_SLEEP_EVENT_S3_SUSPEND = 1,
+ HOST_SLEEP_EVENT_S3_RESUME = 2,
+ HOST_SLEEP_EVENT_S0IX_SUSPEND = 3,
+ HOST_SLEEP_EVENT_S0IX_RESUME = 4,
+ /* S3 suspend with additional enabled wake sources */
+ HOST_SLEEP_EVENT_S3_WAKEABLE_SUSPEND = 5,
+};
+
+struct ec_params_host_sleep_event {
+ uint8_t sleep_event;
+} __ec_align1;
+
+/*
+ * Use a default timeout value (CONFIG_SLEEP_TIMEOUT_MS) for detecting sleep
+ * transition failures
+ */
+#define EC_HOST_SLEEP_TIMEOUT_DEFAULT 0
+
+/* Disable timeout detection for this sleep transition */
+#define EC_HOST_SLEEP_TIMEOUT_INFINITE 0xFFFF
+
+struct ec_params_host_sleep_event_v1 {
+ /* The type of sleep being entered or exited. */
+ uint8_t sleep_event;
+
+ /* Padding */
+ uint8_t reserved;
+ union {
+ /* Parameters that apply for suspend messages. */
+ struct {
+ /*
+ * The timeout in milliseconds between when this message
+ * is received and when the EC will declare sleep
+ * transition failure if the sleep signal is not
+ * asserted.
+ */
+ uint16_t sleep_timeout_ms;
+ } suspend_params;
+
+ /* No parameters for non-suspend messages. */
+ };
+} __ec_align2;
+
+/* A timeout occurred when this bit is set */
+#define EC_HOST_RESUME_SLEEP_TIMEOUT 0x80000000
+
+/*
+ * The mask defining which bits correspond to the number of sleep transitions,
+ * as well as the maximum number of suspend line transitions that will be
+ * reported back to the host.
+ */
+#define EC_HOST_RESUME_SLEEP_TRANSITIONS_MASK 0x7FFFFFFF
+
+struct ec_response_host_sleep_event_v1 {
+ union {
+ /* Response fields that apply for resume messages. */
+ struct {
+ /*
+ * The number of sleep power signal transitions that
+ * occurred since the suspend message. The high bit
+ * indicates a timeout occurred.
+ */
+ uint32_t sleep_transitions;
+ } resume_response;
+
+ /* No response fields for non-resume messages. */
+ };
+} __ec_align4;
+
+/*****************************************************************************/
+/* Device events */
+#define EC_CMD_DEVICE_EVENT 0x00AA
+
+enum ec_device_event {
+ EC_DEVICE_EVENT_TRACKPAD,
+ EC_DEVICE_EVENT_DSP,
+ EC_DEVICE_EVENT_WIFI,
+ EC_DEVICE_EVENT_WLC,
+};
+
+enum ec_device_event_param {
+ /* Get and clear pending device events */
+ EC_DEVICE_EVENT_PARAM_GET_CURRENT_EVENTS,
+ /* Get device event mask */
+ EC_DEVICE_EVENT_PARAM_GET_ENABLED_EVENTS,
+ /* Set device event mask */
+ EC_DEVICE_EVENT_PARAM_SET_ENABLED_EVENTS,
+};
+
+#define EC_DEVICE_EVENT_MASK(event_code) BIT(event_code % 32)
+
+struct ec_params_device_event {
+ uint32_t event_mask;
+ uint8_t param;
+} __ec_align_size1;
+
+struct ec_response_device_event {
+ uint32_t event_mask;
+} __ec_align4;
+
+/*****************************************************************************/
+/* Smart battery pass-through */
+
+/* Get / Set 16-bit smart battery registers */
+#define EC_CMD_SB_READ_WORD 0x00B0
+#define EC_CMD_SB_WRITE_WORD 0x00B1
+
+/* Get / Set string smart battery parameters
+ * formatted as SMBUS "block".
+ */
+#define EC_CMD_SB_READ_BLOCK 0x00B2
+#define EC_CMD_SB_WRITE_BLOCK 0x00B3
+
+struct ec_params_sb_rd {
+ uint8_t reg;
+} __ec_align1;
+
+struct ec_response_sb_rd_word {
+ uint16_t value;
+} __ec_align2;
+
+struct ec_params_sb_wr_word {
+ uint8_t reg;
+ uint16_t value;
+} __ec_align1;
+
+struct ec_response_sb_rd_block {
+ uint8_t data[32];
+} __ec_align1;
+
+struct ec_params_sb_wr_block {
+ uint8_t reg;
+ uint16_t data[32];
+} __ec_align1;
+
+/*****************************************************************************/
+/* Battery vendor parameters
+ *
+ * Get or set vendor-specific parameters in the battery. Implementations may
+ * differ between boards or batteries. On a set operation, the response
+ * contains the actual value set, which may be rounded or clipped from the
+ * requested value.
+ */
+
+#define EC_CMD_BATTERY_VENDOR_PARAM 0x00B4
+
+enum ec_battery_vendor_param_mode {
+ BATTERY_VENDOR_PARAM_MODE_GET = 0,
+ BATTERY_VENDOR_PARAM_MODE_SET,
+};
+
+struct ec_params_battery_vendor_param {
+ uint32_t param;
+ uint32_t value;
+ uint8_t mode;
+} __ec_align_size1;
+
+struct ec_response_battery_vendor_param {
+ uint32_t value;
+} __ec_align4;
+
+/*****************************************************************************/
+/*
+ * Smart Battery Firmware Update Commands
+ */
+#define EC_CMD_SB_FW_UPDATE 0x00B5
+
+enum ec_sb_fw_update_subcmd {
+ EC_SB_FW_UPDATE_PREPARE = 0x0,
+ EC_SB_FW_UPDATE_INFO = 0x1, /*query sb info */
+ EC_SB_FW_UPDATE_BEGIN = 0x2, /*check if protected */
+ EC_SB_FW_UPDATE_WRITE = 0x3, /*check if protected */
+ EC_SB_FW_UPDATE_END = 0x4,
+ EC_SB_FW_UPDATE_STATUS = 0x5,
+ EC_SB_FW_UPDATE_PROTECT = 0x6,
+ EC_SB_FW_UPDATE_MAX = 0x7,
+};
+
+#define SB_FW_UPDATE_CMD_WRITE_BLOCK_SIZE 32
+#define SB_FW_UPDATE_CMD_STATUS_SIZE 2
+#define SB_FW_UPDATE_CMD_INFO_SIZE 8
+
+struct ec_sb_fw_update_header {
+ uint16_t subcmd; /* enum ec_sb_fw_update_subcmd */
+ uint16_t fw_id; /* firmware id */
+} __ec_align4;
+
+struct ec_params_sb_fw_update {
+ struct ec_sb_fw_update_header hdr;
+ union {
+ /* EC_SB_FW_UPDATE_PREPARE = 0x0 */
+ /* EC_SB_FW_UPDATE_INFO = 0x1 */
+ /* EC_SB_FW_UPDATE_BEGIN = 0x2 */
+ /* EC_SB_FW_UPDATE_END = 0x4 */
+ /* EC_SB_FW_UPDATE_STATUS = 0x5 */
+ /* EC_SB_FW_UPDATE_PROTECT = 0x6 */
+ /* Those have no args */
+
+ /* EC_SB_FW_UPDATE_WRITE = 0x3 */
+ struct __ec_align4 {
+ uint8_t data[SB_FW_UPDATE_CMD_WRITE_BLOCK_SIZE];
+ } write;
+ };
+} __ec_align4;
+
+struct ec_response_sb_fw_update {
+ union {
+ /* EC_SB_FW_UPDATE_INFO = 0x1 */
+ struct __ec_align1 {
+ uint8_t data[SB_FW_UPDATE_CMD_INFO_SIZE];
+ } info;
+
+ /* EC_SB_FW_UPDATE_STATUS = 0x5 */
+ struct __ec_align1 {
+ uint8_t data[SB_FW_UPDATE_CMD_STATUS_SIZE];
+ } status;
+ };
+} __ec_align1;
+
+/*
+ * Entering Verified Boot Mode Command
+ * Default mode is VBOOT_MODE_NORMAL if EC did not receive this command.
+ * Valid Modes are: normal, developer, and recovery.
+ */
+#define EC_CMD_ENTERING_MODE 0x00B6
+
+struct ec_params_entering_mode {
+ int vboot_mode;
+} __ec_align4;
+
+#define VBOOT_MODE_NORMAL 0
+#define VBOOT_MODE_DEVELOPER 1
+#define VBOOT_MODE_RECOVERY 2
+
+/*****************************************************************************/
+/*
+ * I2C passthru protection command: Protects I2C tunnels against access on
+ * certain addresses (board-specific).
+ */
+#define EC_CMD_I2C_PASSTHRU_PROTECT 0x00B7
+
+enum ec_i2c_passthru_protect_subcmd {
+ EC_CMD_I2C_PASSTHRU_PROTECT_STATUS = 0x0,
+ EC_CMD_I2C_PASSTHRU_PROTECT_ENABLE = 0x1,
+};
+
+struct ec_params_i2c_passthru_protect {
+ uint8_t subcmd;
+ uint8_t port; /* I2C port number */
+} __ec_align1;
+
+struct ec_response_i2c_passthru_protect {
+ uint8_t status; /* Status flags (0: unlocked, 1: locked) */
+} __ec_align1;
+
+
+/*****************************************************************************/
+/*
+ * HDMI CEC commands
+ *
+ * These commands are for sending and receiving message via HDMI CEC
+ */
+
+#define MAX_CEC_MSG_LEN 16
+
+/* CEC message from the AP to be written on the CEC bus */
+#define EC_CMD_CEC_WRITE_MSG 0x00B8
+
+/**
+ * struct ec_params_cec_write - Message to write to the CEC bus
+ * @msg: message content to write to the CEC bus
+ */
+struct ec_params_cec_write {
+ uint8_t msg[MAX_CEC_MSG_LEN];
+} __ec_align1;
+
+/* Set various CEC parameters */
+#define EC_CMD_CEC_SET 0x00BA
+
+/**
+ * struct ec_params_cec_set - CEC parameters set
+ * @cmd: parameter type, can be CEC_CMD_ENABLE or CEC_CMD_LOGICAL_ADDRESS
+ * @val: in case cmd is CEC_CMD_ENABLE, this field can be 0 to disable CEC
+ * or 1 to enable CEC functionality, in case cmd is
+ * CEC_CMD_LOGICAL_ADDRESS, this field encodes the requested logical
+ * address between 0 and 15 or 0xff to unregister
+ */
+struct ec_params_cec_set {
+ uint8_t cmd; /* enum cec_command */
+ uint8_t val;
+} __ec_align1;
+
+/* Read various CEC parameters */
+#define EC_CMD_CEC_GET 0x00BB
+
+/**
+ * struct ec_params_cec_get - CEC parameters get
+ * @cmd: parameter type, can be CEC_CMD_ENABLE or CEC_CMD_LOGICAL_ADDRESS
+ */
+struct ec_params_cec_get {
+ uint8_t cmd; /* enum cec_command */
+} __ec_align1;
+
+/**
+ * struct ec_response_cec_get - CEC parameters get response
+ * @val: in case cmd was CEC_CMD_ENABLE, this field will 0 if CEC is
+ * disabled or 1 if CEC functionality is enabled,
+ * in case cmd was CEC_CMD_LOGICAL_ADDRESS, this will encode the
+ * configured logical address between 0 and 15 or 0xff if unregistered
+ */
+struct ec_response_cec_get {
+ uint8_t val;
+} __ec_align1;
+
+/* CEC parameters command */
+enum cec_command {
+ /* CEC reading, writing and events enable */
+ CEC_CMD_ENABLE,
+ /* CEC logical address */
+ CEC_CMD_LOGICAL_ADDRESS,
+};
+
+/* Events from CEC to AP */
+enum mkbp_cec_event {
+ /* Outgoing message was acknowledged by a follower */
+ EC_MKBP_CEC_SEND_OK = BIT(0),
+ /* Outgoing message was not acknowledged */
+ EC_MKBP_CEC_SEND_FAILED = BIT(1),
+};
+
+/*****************************************************************************/
+
+/* Commands for audio codec. */
+#define EC_CMD_EC_CODEC 0x00BC
+
+enum ec_codec_subcmd {
+ EC_CODEC_GET_CAPABILITIES = 0x0,
+ EC_CODEC_GET_SHM_ADDR = 0x1,
+ EC_CODEC_SET_SHM_ADDR = 0x2,
+ EC_CODEC_SUBCMD_COUNT,
+};
+
+enum ec_codec_cap {
+ EC_CODEC_CAP_WOV_AUDIO_SHM = 0,
+ EC_CODEC_CAP_WOV_LANG_SHM = 1,
+ EC_CODEC_CAP_LAST = 32,
+};
+
+enum ec_codec_shm_id {
+ EC_CODEC_SHM_ID_WOV_AUDIO = 0x0,
+ EC_CODEC_SHM_ID_WOV_LANG = 0x1,
+ EC_CODEC_SHM_ID_LAST,
+};
+
+enum ec_codec_shm_type {
+ EC_CODEC_SHM_TYPE_EC_RAM = 0x0,
+ EC_CODEC_SHM_TYPE_SYSTEM_RAM = 0x1,
+};
+
+struct __ec_align1 ec_param_ec_codec_get_shm_addr {
+ uint8_t shm_id;
+ uint8_t reserved[3];
+};
+
+struct __ec_align4 ec_param_ec_codec_set_shm_addr {
+ uint64_t phys_addr;
+ uint32_t len;
+ uint8_t shm_id;
+ uint8_t reserved[3];
+};
+
+struct __ec_align4 ec_param_ec_codec {
+ uint8_t cmd; /* enum ec_codec_subcmd */
+ uint8_t reserved[3];
+
+ union {
+ struct ec_param_ec_codec_get_shm_addr
+ get_shm_addr_param;
+ struct ec_param_ec_codec_set_shm_addr
+ set_shm_addr_param;
+ };
+};
+
+struct __ec_align4 ec_response_ec_codec_get_capabilities {
+ uint32_t capabilities;
+};
+
+struct __ec_align4 ec_response_ec_codec_get_shm_addr {
+ uint64_t phys_addr;
+ uint32_t len;
+ uint8_t type;
+ uint8_t reserved[3];
+};
+
+/*****************************************************************************/
+
+/* Commands for DMIC on audio codec. */
+#define EC_CMD_EC_CODEC_DMIC 0x00BD
+
+enum ec_codec_dmic_subcmd {
+ EC_CODEC_DMIC_GET_MAX_GAIN = 0x0,
+ EC_CODEC_DMIC_SET_GAIN_IDX = 0x1,
+ EC_CODEC_DMIC_GET_GAIN_IDX = 0x2,
+ EC_CODEC_DMIC_SUBCMD_COUNT,
+};
+
+enum ec_codec_dmic_channel {
+ EC_CODEC_DMIC_CHANNEL_0 = 0x0,
+ EC_CODEC_DMIC_CHANNEL_1 = 0x1,
+ EC_CODEC_DMIC_CHANNEL_2 = 0x2,
+ EC_CODEC_DMIC_CHANNEL_3 = 0x3,
+ EC_CODEC_DMIC_CHANNEL_4 = 0x4,
+ EC_CODEC_DMIC_CHANNEL_5 = 0x5,
+ EC_CODEC_DMIC_CHANNEL_6 = 0x6,
+ EC_CODEC_DMIC_CHANNEL_7 = 0x7,
+ EC_CODEC_DMIC_CHANNEL_COUNT,
+};
+
+struct __ec_align1 ec_param_ec_codec_dmic_set_gain_idx {
+ uint8_t channel; /* enum ec_codec_dmic_channel */
+ uint8_t gain;
+ uint8_t reserved[2];
+};
+
+struct __ec_align1 ec_param_ec_codec_dmic_get_gain_idx {
+ uint8_t channel; /* enum ec_codec_dmic_channel */
+ uint8_t reserved[3];
+};
+
+struct __ec_align4 ec_param_ec_codec_dmic {
+ uint8_t cmd; /* enum ec_codec_dmic_subcmd */
+ uint8_t reserved[3];
+
+ union {
+ struct ec_param_ec_codec_dmic_set_gain_idx
+ set_gain_idx_param;
+ struct ec_param_ec_codec_dmic_get_gain_idx
+ get_gain_idx_param;
+ };
+};
+
+struct __ec_align1 ec_response_ec_codec_dmic_get_max_gain {
+ uint8_t max_gain;
+};
+
+struct __ec_align1 ec_response_ec_codec_dmic_get_gain_idx {
+ uint8_t gain;
+};
+
+/*****************************************************************************/
+
+/* Commands for I2S RX on audio codec. */
+
+#define EC_CMD_EC_CODEC_I2S_RX 0x00BE
+
+enum ec_codec_i2s_rx_subcmd {
+ EC_CODEC_I2S_RX_ENABLE = 0x0,
+ EC_CODEC_I2S_RX_DISABLE = 0x1,
+ EC_CODEC_I2S_RX_SET_SAMPLE_DEPTH = 0x2,
+ EC_CODEC_I2S_RX_SET_DAIFMT = 0x3,
+ EC_CODEC_I2S_RX_SET_BCLK = 0x4,
+ EC_CODEC_I2S_RX_RESET = 0x5,
+ EC_CODEC_I2S_RX_SUBCMD_COUNT,
+};
+
+enum ec_codec_i2s_rx_sample_depth {
+ EC_CODEC_I2S_RX_SAMPLE_DEPTH_16 = 0x0,
+ EC_CODEC_I2S_RX_SAMPLE_DEPTH_24 = 0x1,
+ EC_CODEC_I2S_RX_SAMPLE_DEPTH_COUNT,
+};
+
+enum ec_codec_i2s_rx_daifmt {
+ EC_CODEC_I2S_RX_DAIFMT_I2S = 0x0,
+ EC_CODEC_I2S_RX_DAIFMT_RIGHT_J = 0x1,
+ EC_CODEC_I2S_RX_DAIFMT_LEFT_J = 0x2,
+ EC_CODEC_I2S_RX_DAIFMT_COUNT,
+};
+
+struct __ec_align1 ec_param_ec_codec_i2s_rx_set_sample_depth {
+ uint8_t depth;
+ uint8_t reserved[3];
+};
+
+struct __ec_align1 ec_param_ec_codec_i2s_rx_set_gain {
+ uint8_t left;
+ uint8_t right;
+ uint8_t reserved[2];
+};
+
+struct __ec_align1 ec_param_ec_codec_i2s_rx_set_daifmt {
+ uint8_t daifmt;
+ uint8_t reserved[3];
+};
+
+struct __ec_align4 ec_param_ec_codec_i2s_rx_set_bclk {
+ uint32_t bclk;
+};
+
+struct __ec_align4 ec_param_ec_codec_i2s_rx {
+ uint8_t cmd; /* enum ec_codec_i2s_rx_subcmd */
+ uint8_t reserved[3];
+
+ union {
+ struct ec_param_ec_codec_i2s_rx_set_sample_depth
+ set_sample_depth_param;
+ struct ec_param_ec_codec_i2s_rx_set_daifmt
+ set_daifmt_param;
+ struct ec_param_ec_codec_i2s_rx_set_bclk
+ set_bclk_param;
+ };
+};
+
+/*****************************************************************************/
+/* Commands for WoV on audio codec. */
+
+#define EC_CMD_EC_CODEC_WOV 0x00BF
+
+enum ec_codec_wov_subcmd {
+ EC_CODEC_WOV_SET_LANG = 0x0,
+ EC_CODEC_WOV_SET_LANG_SHM = 0x1,
+ EC_CODEC_WOV_GET_LANG = 0x2,
+ EC_CODEC_WOV_ENABLE = 0x3,
+ EC_CODEC_WOV_DISABLE = 0x4,
+ EC_CODEC_WOV_READ_AUDIO = 0x5,
+ EC_CODEC_WOV_READ_AUDIO_SHM = 0x6,
+ EC_CODEC_WOV_SUBCMD_COUNT,
+};
+
+/*
+ * @hash is SHA256 of the whole language model.
+ * @total_len indicates the length of whole language model.
+ * @offset is the cursor from the beginning of the model.
+ * @buf is the packet buffer.
+ * @len denotes how many bytes in the buf.
+ */
+struct __ec_align4 ec_param_ec_codec_wov_set_lang {
+ uint8_t hash[32];
+ uint32_t total_len;
+ uint32_t offset;
+ uint8_t buf[128];
+ uint32_t len;
+};
+
+struct __ec_align4 ec_param_ec_codec_wov_set_lang_shm {
+ uint8_t hash[32];
+ uint32_t total_len;
+};
+
+struct __ec_align4 ec_param_ec_codec_wov {
+ uint8_t cmd; /* enum ec_codec_wov_subcmd */
+ uint8_t reserved[3];
+
+ union {
+ struct ec_param_ec_codec_wov_set_lang
+ set_lang_param;
+ struct ec_param_ec_codec_wov_set_lang_shm
+ set_lang_shm_param;
+ };
+};
+
+struct __ec_align4 ec_response_ec_codec_wov_get_lang {
+ uint8_t hash[32];
+};
+
+struct __ec_align4 ec_response_ec_codec_wov_read_audio {
+ uint8_t buf[128];
+ uint32_t len;
+};
+
+struct __ec_align4 ec_response_ec_codec_wov_read_audio_shm {
+ uint32_t offset;
+ uint32_t len;
+};
+
+/*****************************************************************************/
+/* System commands */
+
+/*
+ * TODO(crosbug.com/p/23747): This is a confusing name, since it doesn't
+ * necessarily reboot the EC. Rename to "image" or something similar?
+ */
+#define EC_CMD_REBOOT_EC 0x00D2
+
+/* Command */
+enum ec_reboot_cmd {
+ EC_REBOOT_CANCEL = 0, /* Cancel a pending reboot */
+ EC_REBOOT_JUMP_RO = 1, /* Jump to RO without rebooting */
+ EC_REBOOT_JUMP_RW = 2, /* Jump to active RW without rebooting */
+ /* (command 3 was jump to RW-B) */
+ EC_REBOOT_COLD = 4, /* Cold-reboot */
+ EC_REBOOT_DISABLE_JUMP = 5, /* Disable jump until next reboot */
+ EC_REBOOT_HIBERNATE = 6, /* Hibernate EC */
+ EC_REBOOT_HIBERNATE_CLEAR_AP_OFF = 7, /* and clears AP_OFF flag */
+ EC_REBOOT_COLD_AP_OFF = 8, /* Cold-reboot and don't boot AP */
+};
+
+/* Flags for ec_params_reboot_ec.reboot_flags */
+#define EC_REBOOT_FLAG_RESERVED0 BIT(0) /* Was recovery request */
+#define EC_REBOOT_FLAG_ON_AP_SHUTDOWN BIT(1) /* Reboot after AP shutdown */
+#define EC_REBOOT_FLAG_SWITCH_RW_SLOT BIT(2) /* Switch RW slot */
+
+struct ec_params_reboot_ec {
+ uint8_t cmd; /* enum ec_reboot_cmd */
+ uint8_t flags; /* See EC_REBOOT_FLAG_* */
+} __ec_align1;
+
+/*
+ * Get information on last EC panic.
+ *
+ * Returns variable-length platform-dependent panic information. See panic.h
+ * for details.
+ */
+#define EC_CMD_GET_PANIC_INFO 0x00D3
+
+/*****************************************************************************/
+/*
+ * Special commands
+ *
+ * These do not follow the normal rules for commands. See each command for
+ * details.
+ */
+
+/*
+ * Reboot NOW
+ *
+ * This command will work even when the EC LPC interface is busy, because the
+ * reboot command is processed at interrupt level. Note that when the EC
+ * reboots, the host will reboot too, so there is no response to this command.
+ *
+ * Use EC_CMD_REBOOT_EC to reboot the EC more politely.
+ */
+#define EC_CMD_REBOOT 0x00D1 /* Think "die" */
+
+/*
+ * Resend last response (not supported on LPC).
+ *
+ * Returns EC_RES_UNAVAILABLE if there is no response available - for example,
+ * there was no previous command, or the previous command's response was too
+ * big to save.
+ */
+#define EC_CMD_RESEND_RESPONSE 0x00DB
+
+/*
+ * This header byte on a command indicate version 0. Any header byte less
+ * than this means that we are talking to an old EC which doesn't support
+ * versioning. In that case, we assume version 0.
+ *
+ * Header bytes greater than this indicate a later version. For example,
+ * EC_CMD_VERSION0 + 1 means we are using version 1.
+ *
+ * The old EC interface must not use commands 0xdc or higher.
+ */
+#define EC_CMD_VERSION0 0x00DC
+
+/*****************************************************************************/
+/*
+ * PD commands
+ *
+ * These commands are for PD MCU communication.
+ */
+
+/* EC to PD MCU exchange status command */
+#define EC_CMD_PD_EXCHANGE_STATUS 0x0100
+#define EC_VER_PD_EXCHANGE_STATUS 2
+
+enum pd_charge_state {
+ PD_CHARGE_NO_CHANGE = 0, /* Don't change charge state */
+ PD_CHARGE_NONE, /* No charging allowed */
+ PD_CHARGE_5V, /* 5V charging only */
+ PD_CHARGE_MAX /* Charge at max voltage */
+};
+
+/* Status of EC being sent to PD */
+#define EC_STATUS_HIBERNATING BIT(0)
+
+struct ec_params_pd_status {
+ uint8_t status; /* EC status */
+ int8_t batt_soc; /* battery state of charge */
+ uint8_t charge_state; /* charging state (from enum pd_charge_state) */
+} __ec_align1;
+
+/* Status of PD being sent back to EC */
+#define PD_STATUS_HOST_EVENT BIT(0) /* Forward host event to AP */
+#define PD_STATUS_IN_RW BIT(1) /* Running RW image */
+#define PD_STATUS_JUMPED_TO_IMAGE BIT(2) /* Current image was jumped to */
+#define PD_STATUS_TCPC_ALERT_0 BIT(3) /* Alert active in port 0 TCPC */
+#define PD_STATUS_TCPC_ALERT_1 BIT(4) /* Alert active in port 1 TCPC */
+#define PD_STATUS_TCPC_ALERT_2 BIT(5) /* Alert active in port 2 TCPC */
+#define PD_STATUS_TCPC_ALERT_3 BIT(6) /* Alert active in port 3 TCPC */
+#define PD_STATUS_EC_INT_ACTIVE (PD_STATUS_TCPC_ALERT_0 | \
+ PD_STATUS_TCPC_ALERT_1 | \
+ PD_STATUS_HOST_EVENT)
+struct ec_response_pd_status {
+ uint32_t curr_lim_ma; /* input current limit */
+ uint16_t status; /* PD MCU status */
+ int8_t active_charge_port; /* active charging port */
+} __ec_align_size1;
+
+/* AP to PD MCU host event status command, cleared on read */
+#define EC_CMD_PD_HOST_EVENT_STATUS 0x0104
+
+/* PD MCU host event status bits */
+#define PD_EVENT_UPDATE_DEVICE BIT(0)
+#define PD_EVENT_POWER_CHANGE BIT(1)
+#define PD_EVENT_IDENTITY_RECEIVED BIT(2)
+#define PD_EVENT_DATA_SWAP BIT(3)
+struct ec_response_host_event_status {
+ uint32_t status; /* PD MCU host event status */
+} __ec_align4;
+
+/* Set USB type-C port role and muxes */
+#define EC_CMD_USB_PD_CONTROL 0x0101
+
+enum usb_pd_control_role {
+ USB_PD_CTRL_ROLE_NO_CHANGE = 0,
+ USB_PD_CTRL_ROLE_TOGGLE_ON = 1, /* == AUTO */
+ USB_PD_CTRL_ROLE_TOGGLE_OFF = 2,
+ USB_PD_CTRL_ROLE_FORCE_SINK = 3,
+ USB_PD_CTRL_ROLE_FORCE_SOURCE = 4,
+ USB_PD_CTRL_ROLE_FREEZE = 5,
+ USB_PD_CTRL_ROLE_COUNT
+};
+
+enum usb_pd_control_mux {
+ USB_PD_CTRL_MUX_NO_CHANGE = 0,
+ USB_PD_CTRL_MUX_NONE = 1,
+ USB_PD_CTRL_MUX_USB = 2,
+ USB_PD_CTRL_MUX_DP = 3,
+ USB_PD_CTRL_MUX_DOCK = 4,
+ USB_PD_CTRL_MUX_AUTO = 5,
+ USB_PD_CTRL_MUX_COUNT
+};
+
+enum usb_pd_control_swap {
+ USB_PD_CTRL_SWAP_NONE = 0,
+ USB_PD_CTRL_SWAP_DATA = 1,
+ USB_PD_CTRL_SWAP_POWER = 2,
+ USB_PD_CTRL_SWAP_VCONN = 3,
+ USB_PD_CTRL_SWAP_COUNT
+};
+
+struct ec_params_usb_pd_control {
+ uint8_t port;
+ uint8_t role;
+ uint8_t mux;
+ uint8_t swap;
+} __ec_align1;
+
+#define PD_CTRL_RESP_ENABLED_COMMS BIT(0) /* Communication enabled */
+#define PD_CTRL_RESP_ENABLED_CONNECTED BIT(1) /* Device connected */
+#define PD_CTRL_RESP_ENABLED_PD_CAPABLE BIT(2) /* Partner is PD capable */
+
+#define PD_CTRL_RESP_ROLE_POWER BIT(0) /* 0=SNK/1=SRC */
+#define PD_CTRL_RESP_ROLE_DATA BIT(1) /* 0=UFP/1=DFP */
+#define PD_CTRL_RESP_ROLE_VCONN BIT(2) /* Vconn status */
+#define PD_CTRL_RESP_ROLE_DR_POWER BIT(3) /* Partner is dualrole power */
+#define PD_CTRL_RESP_ROLE_DR_DATA BIT(4) /* Partner is dualrole data */
+#define PD_CTRL_RESP_ROLE_USB_COMM BIT(5) /* Partner USB comm capable */
+#define PD_CTRL_RESP_ROLE_EXT_POWERED BIT(6) /* Partner externally powerd */
+
+struct ec_response_usb_pd_control {
+ uint8_t enabled;
+ uint8_t role;
+ uint8_t polarity;
+ uint8_t state;
+} __ec_align1;
+
+struct ec_response_usb_pd_control_v1 {
+ uint8_t enabled;
+ uint8_t role;
+ uint8_t polarity;
+ char state[32];
+} __ec_align1;
+
+/* Values representing usbc PD CC state */
+#define USBC_PD_CC_NONE 0 /* No accessory connected */
+#define USBC_PD_CC_NO_UFP 1 /* No UFP accessory connected */
+#define USBC_PD_CC_AUDIO_ACC 2 /* Audio accessory connected */
+#define USBC_PD_CC_DEBUG_ACC 3 /* Debug accessory connected */
+#define USBC_PD_CC_UFP_ATTACHED 4 /* UFP attached to usbc */
+#define USBC_PD_CC_DFP_ATTACHED 5 /* DPF attached to usbc */
+
+/* Active/Passive Cable */
+#define USB_PD_CTRL_ACTIVE_CABLE BIT(0)
+/* Optical/Non-optical cable */
+#define USB_PD_CTRL_OPTICAL_CABLE BIT(1)
+/* 3rd Gen TBT device (or AMA)/2nd gen tbt Adapter */
+#define USB_PD_CTRL_TBT_LEGACY_ADAPTER BIT(2)
+/* Active Link Uni-Direction */
+#define USB_PD_CTRL_ACTIVE_LINK_UNIDIR BIT(3)
+
+struct ec_response_usb_pd_control_v2 {
+ uint8_t enabled;
+ uint8_t role;
+ uint8_t polarity;
+ char state[32];
+ uint8_t cc_state; /* enum pd_cc_states representing cc state */
+ uint8_t dp_mode; /* Current DP pin mode (MODE_DP_PIN_[A-E]) */
+ uint8_t reserved; /* Reserved for future use */
+ uint8_t control_flags; /* USB_PD_CTRL_*flags */
+ uint8_t cable_speed; /* TBT_SS_* cable speed */
+ uint8_t cable_gen; /* TBT_GEN3_* cable rounded support */
+} __ec_align1;
+
+#define EC_CMD_USB_PD_PORTS 0x0102
+
+/* Maximum number of PD ports on a device, num_ports will be <= this */
+#define EC_USB_PD_MAX_PORTS 8
+
+struct ec_response_usb_pd_ports {
+ uint8_t num_ports;
+} __ec_align1;
+
+#define EC_CMD_USB_PD_POWER_INFO 0x0103
+
+#define PD_POWER_CHARGING_PORT 0xff
+struct ec_params_usb_pd_power_info {
+ uint8_t port;
+} __ec_align1;
+
+enum usb_chg_type {
+ USB_CHG_TYPE_NONE,
+ USB_CHG_TYPE_PD,
+ USB_CHG_TYPE_C,
+ USB_CHG_TYPE_PROPRIETARY,
+ USB_CHG_TYPE_BC12_DCP,
+ USB_CHG_TYPE_BC12_CDP,
+ USB_CHG_TYPE_BC12_SDP,
+ USB_CHG_TYPE_OTHER,
+ USB_CHG_TYPE_VBUS,
+ USB_CHG_TYPE_UNKNOWN,
+ USB_CHG_TYPE_DEDICATED,
+};
+enum usb_power_roles {
+ USB_PD_PORT_POWER_DISCONNECTED,
+ USB_PD_PORT_POWER_SOURCE,
+ USB_PD_PORT_POWER_SINK,
+ USB_PD_PORT_POWER_SINK_NOT_CHARGING,
+};
+
+struct usb_chg_measures {
+ uint16_t voltage_max;
+ uint16_t voltage_now;
+ uint16_t current_max;
+ uint16_t current_lim;
+} __ec_align2;
+
+struct ec_response_usb_pd_power_info {
+ uint8_t role;
+ uint8_t type;
+ uint8_t dualrole;
+ uint8_t reserved1;
+ struct usb_chg_measures meas;
+ uint32_t max_power;
+} __ec_align4;
+
+
+/*
+ * This command will return the number of USB PD charge port + the number
+ * of dedicated port present.
+ * EC_CMD_USB_PD_PORTS does NOT include the dedicated ports
+ */
+#define EC_CMD_CHARGE_PORT_COUNT 0x0105
+struct ec_response_charge_port_count {
+ uint8_t port_count;
+} __ec_align1;
+
+/* Write USB-PD device FW */
+#define EC_CMD_USB_PD_FW_UPDATE 0x0110
+
+enum usb_pd_fw_update_cmds {
+ USB_PD_FW_REBOOT,
+ USB_PD_FW_FLASH_ERASE,
+ USB_PD_FW_FLASH_WRITE,
+ USB_PD_FW_ERASE_SIG,
+};
+
+struct ec_params_usb_pd_fw_update {
+ uint16_t dev_id;
+ uint8_t cmd;
+ uint8_t port;
+ uint32_t size; /* Size to write in bytes */
+ /* Followed by data to write */
+} __ec_align4;
+
+/* Write USB-PD Accessory RW_HASH table entry */
+#define EC_CMD_USB_PD_RW_HASH_ENTRY 0x0111
+/* RW hash is first 20 bytes of SHA-256 of RW section */
+#define PD_RW_HASH_SIZE 20
+struct ec_params_usb_pd_rw_hash_entry {
+ uint16_t dev_id;
+ uint8_t dev_rw_hash[PD_RW_HASH_SIZE];
+ uint8_t reserved; /*
+ * For alignment of current_image
+ * TODO(rspangler) but it's not aligned!
+ * Should have been reserved[2].
+ */
+ uint32_t current_image; /* One of ec_current_image */
+} __ec_align1;
+
+/* Read USB-PD Accessory info */
+#define EC_CMD_USB_PD_DEV_INFO 0x0112
+
+struct ec_params_usb_pd_info_request {
+ uint8_t port;
+} __ec_align1;
+
+/* Read USB-PD Device discovery info */
+#define EC_CMD_USB_PD_DISCOVERY 0x0113
+struct ec_params_usb_pd_discovery_entry {
+ uint16_t vid; /* USB-IF VID */
+ uint16_t pid; /* USB-IF PID */
+ uint8_t ptype; /* product type (hub,periph,cable,ama) */
+} __ec_align_size1;
+
+/* Override default charge behavior */
+#define EC_CMD_PD_CHARGE_PORT_OVERRIDE 0x0114
+
+/* Negative port parameters have special meaning */
+enum usb_pd_override_ports {
+ OVERRIDE_DONT_CHARGE = -2,
+ OVERRIDE_OFF = -1,
+ /* [0, CONFIG_USB_PD_PORT_COUNT): Port# */
+};
+
+struct ec_params_charge_port_override {
+ int16_t override_port; /* Override port# */
+} __ec_align2;
+
+/*
+ * Read (and delete) one entry of PD event log.
+ * TODO(crbug.com/751742): Make this host command more generic to accommodate
+ * future non-PD logs that use the same internal EC event_log.
+ */
+#define EC_CMD_PD_GET_LOG_ENTRY 0x0115
+
+struct ec_response_pd_log {
+ uint32_t timestamp; /* relative timestamp in milliseconds */
+ uint8_t type; /* event type : see PD_EVENT_xx below */
+ uint8_t size_port; /* [7:5] port number [4:0] payload size in bytes */
+ uint16_t data; /* type-defined data payload */
+ uint8_t payload[]; /* optional additional data payload: 0..16 bytes */
+} __ec_align4;
+
+/* The timestamp is the microsecond counter shifted to get about a ms. */
+#define PD_LOG_TIMESTAMP_SHIFT 10 /* 1 LSB = 1024us */
+
+#define PD_LOG_SIZE_MASK 0x1f
+#define PD_LOG_PORT_MASK 0xe0
+#define PD_LOG_PORT_SHIFT 5
+#define PD_LOG_PORT_SIZE(port, size) (((port) << PD_LOG_PORT_SHIFT) | \
+ ((size) & PD_LOG_SIZE_MASK))
+#define PD_LOG_PORT(size_port) ((size_port) >> PD_LOG_PORT_SHIFT)
+#define PD_LOG_SIZE(size_port) ((size_port) & PD_LOG_SIZE_MASK)
+
+/* PD event log : entry types */
+/* PD MCU events */
+#define PD_EVENT_MCU_BASE 0x00
+#define PD_EVENT_MCU_CHARGE (PD_EVENT_MCU_BASE+0)
+#define PD_EVENT_MCU_CONNECT (PD_EVENT_MCU_BASE+1)
+/* Reserved for custom board event */
+#define PD_EVENT_MCU_BOARD_CUSTOM (PD_EVENT_MCU_BASE+2)
+/* PD generic accessory events */
+#define PD_EVENT_ACC_BASE 0x20
+#define PD_EVENT_ACC_RW_FAIL (PD_EVENT_ACC_BASE+0)
+#define PD_EVENT_ACC_RW_ERASE (PD_EVENT_ACC_BASE+1)
+/* PD power supply events */
+#define PD_EVENT_PS_BASE 0x40
+#define PD_EVENT_PS_FAULT (PD_EVENT_PS_BASE+0)
+/* PD video dongles events */
+#define PD_EVENT_VIDEO_BASE 0x60
+#define PD_EVENT_VIDEO_DP_MODE (PD_EVENT_VIDEO_BASE+0)
+#define PD_EVENT_VIDEO_CODEC (PD_EVENT_VIDEO_BASE+1)
+/* Returned in the "type" field, when there is no entry available */
+#define PD_EVENT_NO_ENTRY 0xff
+
+/*
+ * PD_EVENT_MCU_CHARGE event definition :
+ * the payload is "struct usb_chg_measures"
+ * the data field contains the port state flags as defined below :
+ */
+/* Port partner is a dual role device */
+#define CHARGE_FLAGS_DUAL_ROLE BIT(15)
+/* Port is the pending override port */
+#define CHARGE_FLAGS_DELAYED_OVERRIDE BIT(14)
+/* Port is the override port */
+#define CHARGE_FLAGS_OVERRIDE BIT(13)
+/* Charger type */
+#define CHARGE_FLAGS_TYPE_SHIFT 3
+#define CHARGE_FLAGS_TYPE_MASK (0xf << CHARGE_FLAGS_TYPE_SHIFT)
+/* Power delivery role */
+#define CHARGE_FLAGS_ROLE_MASK (7 << 0)
+
+/*
+ * PD_EVENT_PS_FAULT data field flags definition :
+ */
+#define PS_FAULT_OCP 1
+#define PS_FAULT_FAST_OCP 2
+#define PS_FAULT_OVP 3
+#define PS_FAULT_DISCH 4
+
+/*
+ * PD_EVENT_VIDEO_CODEC payload is "struct mcdp_info".
+ */
+struct mcdp_version {
+ uint8_t major;
+ uint8_t minor;
+ uint16_t build;
+} __ec_align4;
+
+struct mcdp_info {
+ uint8_t family[2];
+ uint8_t chipid[2];
+ struct mcdp_version irom;
+ struct mcdp_version fw;
+} __ec_align4;
+
+/* struct mcdp_info field decoding */
+#define MCDP_CHIPID(chipid) ((chipid[0] << 8) | chipid[1])
+#define MCDP_FAMILY(family) ((family[0] << 8) | family[1])
+
+/* Get/Set USB-PD Alternate mode info */
+#define EC_CMD_USB_PD_GET_AMODE 0x0116
+struct ec_params_usb_pd_get_mode_request {
+ uint16_t svid_idx; /* SVID index to get */
+ uint8_t port; /* port */
+} __ec_align_size1;
+
+struct ec_params_usb_pd_get_mode_response {
+ uint16_t svid; /* SVID */
+ uint16_t opos; /* Object Position */
+ uint32_t vdo[6]; /* Mode VDOs */
+} __ec_align4;
+
+#define EC_CMD_USB_PD_SET_AMODE 0x0117
+
+enum pd_mode_cmd {
+ PD_EXIT_MODE = 0,
+ PD_ENTER_MODE = 1,
+ /* Not a command. Do NOT remove. */
+ PD_MODE_CMD_COUNT,
+};
+
+struct ec_params_usb_pd_set_mode_request {
+ uint32_t cmd; /* enum pd_mode_cmd */
+ uint16_t svid; /* SVID to set */
+ uint8_t opos; /* Object Position */
+ uint8_t port; /* port */
+} __ec_align4;
+
+/* Ask the PD MCU to record a log of a requested type */
+#define EC_CMD_PD_WRITE_LOG_ENTRY 0x0118
+
+struct ec_params_pd_write_log_entry {
+ uint8_t type; /* event type : see PD_EVENT_xx above */
+ uint8_t port; /* port#, or 0 for events unrelated to a given port */
+} __ec_align1;
+
+
+/* Control USB-PD chip */
+#define EC_CMD_PD_CONTROL 0x0119
+
+enum ec_pd_control_cmd {
+ PD_SUSPEND = 0, /* Suspend the PD chip (EC: stop talking to PD) */
+ PD_RESUME, /* Resume the PD chip (EC: start talking to PD) */
+ PD_RESET, /* Force reset the PD chip */
+ PD_CONTROL_DISABLE, /* Disable further calls to this command */
+ PD_CHIP_ON, /* Power on the PD chip */
+};
+
+struct ec_params_pd_control {
+ uint8_t chip; /* chip id */
+ uint8_t subcmd;
+} __ec_align1;
+
+/* Get info about USB-C SS muxes */
+#define EC_CMD_USB_PD_MUX_INFO 0x011A
+
+struct ec_params_usb_pd_mux_info {
+ uint8_t port; /* USB-C port number */
+} __ec_align1;
+
+/* Flags representing mux state */
+#define USB_PD_MUX_NONE 0 /* Open switch */
+#define USB_PD_MUX_USB_ENABLED BIT(0) /* USB connected */
+#define USB_PD_MUX_DP_ENABLED BIT(1) /* DP connected */
+#define USB_PD_MUX_POLARITY_INVERTED BIT(2) /* CC line Polarity inverted */
+#define USB_PD_MUX_HPD_IRQ BIT(3) /* HPD IRQ is asserted */
+#define USB_PD_MUX_HPD_LVL BIT(4) /* HPD level is asserted */
+#define USB_PD_MUX_SAFE_MODE BIT(5) /* DP is in safe mode */
+#define USB_PD_MUX_TBT_COMPAT_ENABLED BIT(6) /* TBT compat enabled */
+#define USB_PD_MUX_USB4_ENABLED BIT(7) /* USB4 enabled */
+
+struct ec_response_usb_pd_mux_info {
+ uint8_t flags; /* USB_PD_MUX_*-encoded USB mux state */
+} __ec_align1;
+
+#define EC_CMD_PD_CHIP_INFO 0x011B
+
+struct ec_params_pd_chip_info {
+ uint8_t port; /* USB-C port number */
+ uint8_t renew; /* Force renewal */
+} __ec_align1;
+
+struct ec_response_pd_chip_info {
+ uint16_t vendor_id;
+ uint16_t product_id;
+ uint16_t device_id;
+ union {
+ uint8_t fw_version_string[8];
+ uint64_t fw_version_number;
+ };
+} __ec_align2;
+
+struct ec_response_pd_chip_info_v1 {
+ uint16_t vendor_id;
+ uint16_t product_id;
+ uint16_t device_id;
+ union {
+ uint8_t fw_version_string[8];
+ uint64_t fw_version_number;
+ };
+ union {
+ uint8_t min_req_fw_version_string[8];
+ uint64_t min_req_fw_version_number;
+ };
+} __ec_align2;
+
+/* Run RW signature verification and get status */
+#define EC_CMD_RWSIG_CHECK_STATUS 0x011C
+
+struct ec_response_rwsig_check_status {
+ uint32_t status;
+} __ec_align4;
+
+/* For controlling RWSIG task */
+#define EC_CMD_RWSIG_ACTION 0x011D
+
+enum rwsig_action {
+ RWSIG_ACTION_ABORT = 0, /* Abort RWSIG and prevent jumping */
+ RWSIG_ACTION_CONTINUE = 1, /* Jump to RW immediately */
+};
+
+struct ec_params_rwsig_action {
+ uint32_t action;
+} __ec_align4;
+
+/* Run verification on a slot */
+#define EC_CMD_EFS_VERIFY 0x011E
+
+struct ec_params_efs_verify {
+ uint8_t region; /* enum ec_flash_region */
+} __ec_align1;
+
+/*
+ * Retrieve info from Cros Board Info store. Response is based on the data
+ * type. Integers return a uint32. Strings return a string, using the response
+ * size to determine how big it is.
+ */
+#define EC_CMD_GET_CROS_BOARD_INFO 0x011F
+/*
+ * Write info into Cros Board Info on EEPROM. Write fails if the board has
+ * hardware write-protect enabled.
+ */
+#define EC_CMD_SET_CROS_BOARD_INFO 0x0120
+
+enum cbi_data_tag {
+ CBI_TAG_BOARD_VERSION = 0, /* uint32_t or smaller */
+ CBI_TAG_OEM_ID = 1, /* uint32_t or smaller */
+ CBI_TAG_SKU_ID = 2, /* uint32_t or smaller */
+ CBI_TAG_DRAM_PART_NUM = 3, /* variable length ascii, nul terminated. */
+ CBI_TAG_OEM_NAME = 4, /* variable length ascii, nul terminated. */
+ CBI_TAG_MODEL_ID = 5, /* uint32_t or smaller */
+ CBI_TAG_COUNT,
+};
+
+/*
+ * Flags to control read operation
+ *
+ * RELOAD: Invalidate cache and read data from EEPROM. Useful to verify
+ * write was successful without reboot.
+ */
+#define CBI_GET_RELOAD BIT(0)
+
+struct ec_params_get_cbi {
+ uint32_t tag; /* enum cbi_data_tag */
+ uint32_t flag; /* CBI_GET_* */
+} __ec_align4;
+
+/*
+ * Flags to control write behavior.
+ *
+ * NO_SYNC: Makes EC update data in RAM but skip writing to EEPROM. It's
+ * useful when writing multiple fields in a row.
+ * INIT: Need to be set when creating a new CBI from scratch. All fields
+ * will be initialized to zero first.
+ */
+#define CBI_SET_NO_SYNC BIT(0)
+#define CBI_SET_INIT BIT(1)
+
+struct ec_params_set_cbi {
+ uint32_t tag; /* enum cbi_data_tag */
+ uint32_t flag; /* CBI_SET_* */
+ uint32_t size; /* Data size */
+ uint8_t data[]; /* For string and raw data */
+} __ec_align1;
+
+/*
+ * Information about resets of the AP by the EC and the EC's own uptime.
+ */
+#define EC_CMD_GET_UPTIME_INFO 0x0121
+
+struct ec_response_uptime_info {
+ /*
+ * Number of milliseconds since the last EC boot. Sysjump resets
+ * typically do not restart the EC's time_since_boot epoch.
+ *
+ * WARNING: The EC's sense of time is much less accurate than the AP's
+ * sense of time, in both phase and frequency. This timebase is similar
+ * to CLOCK_MONOTONIC_RAW, but with 1% or more frequency error.
+ */
+ uint32_t time_since_ec_boot_ms;
+
+ /*
+ * Number of times the AP was reset by the EC since the last EC boot.
+ * Note that the AP may be held in reset by the EC during the initial
+ * boot sequence, such that the very first AP boot may count as more
+ * than one here.
+ */
+ uint32_t ap_resets_since_ec_boot;
+
+ /*
+ * The set of flags which describe the EC's most recent reset. See
+ * include/system.h RESET_FLAG_* for details.
+ */
+ uint32_t ec_reset_flags;
+
+ /* Empty log entries have both the cause and timestamp set to zero. */
+ struct ap_reset_log_entry {
+ /*
+ * See include/chipset.h: enum chipset_{reset,shutdown}_reason
+ * for details.
+ */
+ uint16_t reset_cause;
+
+ /* Reserved for protocol growth. */
+ uint16_t reserved;
+
+ /*
+ * The time of the reset's assertion, in milliseconds since the
+ * last EC boot, in the same epoch as time_since_ec_boot_ms.
+ * Set to zero if the log entry is empty.
+ */
+ uint32_t reset_time_ms;
+ } recent_ap_reset[4];
+} __ec_align4;
+
+/*
+ * Add entropy to the device secret (stored in the rollback region).
+ *
+ * Depending on the chip, the operation may take a long time (e.g. to erase
+ * flash), so the commands are asynchronous.
+ */
+#define EC_CMD_ADD_ENTROPY 0x0122
+
+enum add_entropy_action {
+ /* Add entropy to the current secret. */
+ ADD_ENTROPY_ASYNC = 0,
+ /*
+ * Add entropy, and also make sure that the previous secret is erased.
+ * (this can be implemented by adding entropy multiple times until
+ * all rolback blocks have been overwritten).
+ */
+ ADD_ENTROPY_RESET_ASYNC = 1,
+ /* Read back result from the previous operation. */
+ ADD_ENTROPY_GET_RESULT = 2,
+};
+
+struct ec_params_rollback_add_entropy {
+ uint8_t action;
+} __ec_align1;
+
+/*
+ * Perform a single read of a given ADC channel.
+ */
+#define EC_CMD_ADC_READ 0x0123
+
+struct ec_params_adc_read {
+ uint8_t adc_channel;
+} __ec_align1;
+
+struct ec_response_adc_read {
+ int32_t adc_value;
+} __ec_align4;
+
+/*
+ * Read back rollback info
+ */
+#define EC_CMD_ROLLBACK_INFO 0x0124
+
+struct ec_response_rollback_info {
+ int32_t id; /* Incrementing number to indicate which region to use. */
+ int32_t rollback_min_version;
+ int32_t rw_rollback_version;
+} __ec_align4;
+
+
+/* Issue AP reset */
+#define EC_CMD_AP_RESET 0x0125
+
+/*
+ * Get the number of peripheral charge ports
+ */
+#define EC_CMD_PCHG_COUNT 0x0134
+
+#define EC_PCHG_MAX_PORTS 8
+
+struct ec_response_pchg_count {
+ uint8_t port_count;
+} __ec_align1;
+
+/*
+ * Get the status of a peripheral charge port
+ */
+#define EC_CMD_PCHG 0x0135
+
+struct ec_params_pchg {
+ uint8_t port;
+} __ec_align1;
+
+struct ec_response_pchg {
+ uint32_t error; /* enum pchg_error */
+ uint8_t state; /* enum pchg_state state */
+ uint8_t battery_percentage;
+ uint8_t unused0;
+ uint8_t unused1;
+ /* Fields added in version 1 */
+ uint32_t fw_version;
+ uint32_t dropped_event_count;
+} __ec_align2;
+
+enum pchg_state {
+ /* Charger is reset and not initialized. */
+ PCHG_STATE_RESET = 0,
+ /* Charger is initialized or disabled. */
+ PCHG_STATE_INITIALIZED,
+ /* Charger is enabled and ready to detect a device. */
+ PCHG_STATE_ENABLED,
+ /* Device is in proximity. */
+ PCHG_STATE_DETECTED,
+ /* Device is being charged. */
+ PCHG_STATE_CHARGING,
+ /* Device is fully charged. It implies DETECTED (& not charging). */
+ PCHG_STATE_FULL,
+ /* In download (a.k.a. firmware update) mode */
+ PCHG_STATE_DOWNLOAD,
+ /* In download mode. Ready for receiving data. */
+ PCHG_STATE_DOWNLOADING,
+ /* Device is ready for data communication. */
+ PCHG_STATE_CONNECTED,
+ /* Put no more entry below */
+ PCHG_STATE_COUNT,
+};
+
+#define EC_PCHG_STATE_TEXT { \
+ [PCHG_STATE_RESET] = "RESET", \
+ [PCHG_STATE_INITIALIZED] = "INITIALIZED", \
+ [PCHG_STATE_ENABLED] = "ENABLED", \
+ [PCHG_STATE_DETECTED] = "DETECTED", \
+ [PCHG_STATE_CHARGING] = "CHARGING", \
+ [PCHG_STATE_FULL] = "FULL", \
+ [PCHG_STATE_DOWNLOAD] = "DOWNLOAD", \
+ [PCHG_STATE_DOWNLOADING] = "DOWNLOADING", \
+ [PCHG_STATE_CONNECTED] = "CONNECTED", \
+ }
+
+/*
+ * Update firmware of peripheral chip
+ */
+#define EC_CMD_PCHG_UPDATE 0x0136
+
+/* Port number is encoded in bit[28:31]. */
+#define EC_MKBP_PCHG_PORT_SHIFT 28
+/* Utility macro for converting MKBP event to port number. */
+#define EC_MKBP_PCHG_EVENT_TO_PORT(e) (((e) >> EC_MKBP_PCHG_PORT_SHIFT) & 0xf)
+/* Utility macro for extracting event bits. */
+#define EC_MKBP_PCHG_EVENT_MASK(e) ((e) \
+ & GENMASK(EC_MKBP_PCHG_PORT_SHIFT-1, 0))
+
+#define EC_MKBP_PCHG_UPDATE_OPENED BIT(0)
+#define EC_MKBP_PCHG_WRITE_COMPLETE BIT(1)
+#define EC_MKBP_PCHG_UPDATE_CLOSED BIT(2)
+#define EC_MKBP_PCHG_UPDATE_ERROR BIT(3)
+#define EC_MKBP_PCHG_DEVICE_EVENT BIT(4)
+
+enum ec_pchg_update_cmd {
+ /* Reset chip to normal mode. */
+ EC_PCHG_UPDATE_CMD_RESET_TO_NORMAL = 0,
+ /* Reset and put a chip in update (a.k.a. download) mode. */
+ EC_PCHG_UPDATE_CMD_OPEN,
+ /* Write a block of data containing FW image. */
+ EC_PCHG_UPDATE_CMD_WRITE,
+ /* Close update session. */
+ EC_PCHG_UPDATE_CMD_CLOSE,
+ /* End of commands */
+ EC_PCHG_UPDATE_CMD_COUNT,
+};
+
+struct ec_params_pchg_update {
+ /* PCHG port number */
+ uint8_t port;
+ /* enum ec_pchg_update_cmd */
+ uint8_t cmd;
+ /* Padding */
+ uint8_t reserved0;
+ uint8_t reserved1;
+ /* Version of new firmware */
+ uint32_t version;
+ /* CRC32 of new firmware */
+ uint32_t crc32;
+ /* Address in chip memory where <data> is written to */
+ uint32_t addr;
+ /* Size of <data> */
+ uint32_t size;
+ /* Partial data of new firmware */
+ uint8_t data[];
+} __ec_align4;
+
+BUILD_ASSERT(EC_PCHG_UPDATE_CMD_COUNT
+ < BIT(sizeof(((struct ec_params_pchg_update *)0)->cmd)*8));
+
+struct ec_response_pchg_update {
+ /* Block size */
+ uint32_t block_size;
+} __ec_align4;
+
+
+/*****************************************************************************/
+/* Voltage regulator controls */
+
+/*
+ * Get basic info of voltage regulator for given index.
+ *
+ * Returns the regulator name and supported voltage list in mV.
+ */
+#define EC_CMD_REGULATOR_GET_INFO 0x012C
+
+/* Maximum length of regulator name */
+#define EC_REGULATOR_NAME_MAX_LEN 16
+
+/* Maximum length of the supported voltage list. */
+#define EC_REGULATOR_VOLTAGE_MAX_COUNT 16
+
+struct ec_params_regulator_get_info {
+ uint32_t index;
+} __ec_align4;
+
+struct ec_response_regulator_get_info {
+ char name[EC_REGULATOR_NAME_MAX_LEN];
+ uint16_t num_voltages;
+ uint16_t voltages_mv[EC_REGULATOR_VOLTAGE_MAX_COUNT];
+} __ec_align2;
+
+/*
+ * Configure the regulator as enabled / disabled.
+ */
+#define EC_CMD_REGULATOR_ENABLE 0x012D
+
+struct ec_params_regulator_enable {
+ uint32_t index;
+ uint8_t enable;
+} __ec_align4;
+
+/*
+ * Query if the regulator is enabled.
+ *
+ * Returns 1 if the regulator is enabled, 0 if not.
+ */
+#define EC_CMD_REGULATOR_IS_ENABLED 0x012E
+
+struct ec_params_regulator_is_enabled {
+ uint32_t index;
+} __ec_align4;
+
+struct ec_response_regulator_is_enabled {
+ uint8_t enabled;
+} __ec_align1;
+
+/*
+ * Set voltage for the voltage regulator within the range specified.
+ *
+ * The driver should select the voltage in range closest to min_mv.
+ *
+ * Also note that this might be called before the regulator is enabled, and the
+ * setting should be in effect after the regulator is enabled.
+ */
+#define EC_CMD_REGULATOR_SET_VOLTAGE 0x012F
+
+struct ec_params_regulator_set_voltage {
+ uint32_t index;
+ uint32_t min_mv;
+ uint32_t max_mv;
+} __ec_align4;
+
+/*
+ * Get the currently configured voltage for the voltage regulator.
+ *
+ * Note that this might be called before the regulator is enabled, and this
+ * should return the configured output voltage if the regulator is enabled.
+ */
+#define EC_CMD_REGULATOR_GET_VOLTAGE 0x0130
+
+struct ec_params_regulator_get_voltage {
+ uint32_t index;
+} __ec_align4;
+
+struct ec_response_regulator_get_voltage {
+ uint32_t voltage_mv;
+} __ec_align4;
+
+/*
+ * Gather all discovery information for the given port and partner type.
+ *
+ * Note that if discovery has not yet completed, only the currently completed
+ * responses will be filled in. If the discovery data structures are changed
+ * in the process of the command running, BUSY will be returned.
+ *
+ * VDO field sizes are set to the maximum possible number of VDOs a VDM may
+ * contain, while the number of SVIDs here is selected to fit within the PROTO2
+ * maximum parameter size.
+ */
+#define EC_CMD_TYPEC_DISCOVERY 0x0131
+
+enum typec_partner_type {
+ TYPEC_PARTNER_SOP = 0,
+ TYPEC_PARTNER_SOP_PRIME = 1,
+};
+
+struct ec_params_typec_discovery {
+ uint8_t port;
+ uint8_t partner_type; /* enum typec_partner_type */
+} __ec_align1;
+
+struct svid_mode_info {
+ uint16_t svid;
+ uint16_t mode_count; /* Number of modes partner sent */
+ uint32_t mode_vdo[6]; /* Max VDOs allowed after VDM header is 6 */
+};
+
+struct ec_response_typec_discovery {
+ uint8_t identity_count; /* Number of identity VDOs partner sent */
+ uint8_t svid_count; /* Number of SVIDs partner sent */
+ uint16_t reserved;
+ uint32_t discovery_vdo[6]; /* Max VDOs allowed after VDM header is 6 */
+ struct svid_mode_info svids[];
+} __ec_align1;
+
+/* USB Type-C commands for AP-controlled device policy. */
+#define EC_CMD_TYPEC_CONTROL 0x0132
+
+enum typec_control_command {
+ TYPEC_CONTROL_COMMAND_EXIT_MODES,
+ TYPEC_CONTROL_COMMAND_CLEAR_EVENTS,
+ TYPEC_CONTROL_COMMAND_ENTER_MODE,
+ TYPEC_CONTROL_COMMAND_TBT_UFP_REPLY,
+ TYPEC_CONTROL_COMMAND_USB_MUX_SET,
+ TYPEC_CONTROL_COMMAND_BIST_SHARE_MODE,
+ TYPEC_CONTROL_COMMAND_SEND_VDM_REQ,
+};
+
+/* Replies the AP may specify to the TBT EnterMode command as a UFP */
+enum typec_tbt_ufp_reply {
+ TYPEC_TBT_UFP_REPLY_NAK,
+ TYPEC_TBT_UFP_REPLY_ACK,
+};
+
+struct typec_usb_mux_set {
+ uint8_t mux_index; /* Index of the mux to set in the chain */
+ uint8_t mux_flags; /* USB_PD_MUX_*-encoded USB mux state to set */
+} __ec_align1;
+
+#define VDO_MAX_SIZE 7
+
+struct typec_vdm_req {
+ /* VDM data, including VDM header */
+ uint32_t vdm_data[VDO_MAX_SIZE];
+ /* Number of 32-bit fields filled in */
+ uint8_t vdm_data_objects;
+ /* Partner to address - see enum typec_partner_type */
+ uint8_t partner_type;
+} __ec_align1;
+
+struct ec_params_typec_control {
+ uint8_t port;
+ uint8_t command; /* enum typec_control_command */
+ uint16_t reserved;
+
+ /*
+ * This section will be interpreted based on |command|. Define a
+ * placeholder structure to avoid having to increase the size and bump
+ * the command version when adding new sub-commands.
+ */
+ union {
+ uint32_t clear_events_mask;
+ uint8_t mode_to_enter; /* enum typec_mode */
+ uint8_t tbt_ufp_reply; /* enum typec_tbt_ufp_reply */
+ struct typec_usb_mux_set mux_params;
+ /* Used for VMD_REQ */
+ struct typec_vdm_req vdm_req_params;
+ uint8_t placeholder[128];
+ };
+} __ec_align1;
+
+/*
+ * Gather all status information for a port.
+ *
+ * Note: this covers many of the return fields from the deprecated
+ * EC_CMD_USB_PD_CONTROL command, except those that are redundant with the
+ * discovery data. The "enum pd_cc_states" is defined with the deprecated
+ * EC_CMD_USB_PD_CONTROL command.
+ *
+ * This also combines in the EC_CMD_USB_PD_MUX_INFO flags.
+ */
+#define EC_CMD_TYPEC_STATUS 0x0133
+
+/*
+ * Power role.
+ *
+ * Note this is also used for PD header creation, and values align to those in
+ * the Power Delivery Specification Revision 3.0 (See
+ * 6.2.1.1.4 Port Power Role).
+ */
+enum pd_power_role {
+ PD_ROLE_SINK = 0,
+ PD_ROLE_SOURCE = 1
+};
+
+/*
+ * Data role.
+ *
+ * Note this is also used for PD header creation, and the first two values
+ * align to those in the Power Delivery Specification Revision 3.0 (See
+ * 6.2.1.1.6 Port Data Role).
+ */
+enum pd_data_role {
+ PD_ROLE_UFP = 0,
+ PD_ROLE_DFP = 1,
+ PD_ROLE_DISCONNECTED = 2,
+};
+
+enum pd_vconn_role {
+ PD_ROLE_VCONN_OFF = 0,
+ PD_ROLE_VCONN_SRC = 1,
+};
+
+/*
+ * Note: BIT(0) may be used to determine whether the polarity is CC1 or CC2,
+ * regardless of whether a debug accessory is connected.
+ */
+enum tcpc_cc_polarity {
+ /*
+ * _CCx: is used to indicate the polarity while not connected to
+ * a Debug Accessory. Only one CC line will assert a resistor and
+ * the other will be open.
+ */
+ POLARITY_CC1 = 0,
+ POLARITY_CC2 = 1,
+
+ /*
+ * _CCx_DTS is used to indicate the polarity while connected to a
+ * SRC Debug Accessory. Assert resistors on both lines.
+ */
+ POLARITY_CC1_DTS = 2,
+ POLARITY_CC2_DTS = 3,
+
+ /*
+ * The current TCPC code relies on these specific POLARITY values.
+ * Adding in a check to verify if the list grows for any reason
+ * that this will give a hint that other places need to be
+ * adjusted.
+ */
+ POLARITY_COUNT
+};
+
+#define PD_STATUS_EVENT_SOP_DISC_DONE BIT(0)
+#define PD_STATUS_EVENT_SOP_PRIME_DISC_DONE BIT(1)
+#define PD_STATUS_EVENT_HARD_RESET BIT(2)
+#define PD_STATUS_EVENT_DISCONNECTED BIT(3)
+#define PD_STATUS_EVENT_MUX_0_SET_DONE BIT(4)
+#define PD_STATUS_EVENT_MUX_1_SET_DONE BIT(5)
+#define PD_STATUS_EVENT_VDM_REQ_REPLY BIT(6)
+#define PD_STATUS_EVENT_VDM_REQ_FAILED BIT(7)
+#define PD_STATUS_EVENT_VDM_ATTENTION BIT(8)
+
+struct ec_params_typec_status {
+ uint8_t port;
+} __ec_align1;
+
+struct ec_response_typec_status {
+ uint8_t pd_enabled; /* PD communication enabled - bool */
+ uint8_t dev_connected; /* Device connected - bool */
+ uint8_t sop_connected; /* Device is SOP PD capable - bool */
+ uint8_t source_cap_count; /* Number of Source Cap PDOs */
+
+ uint8_t power_role; /* enum pd_power_role */
+ uint8_t data_role; /* enum pd_data_role */
+ uint8_t vconn_role; /* enum pd_vconn_role */
+ uint8_t sink_cap_count; /* Number of Sink Cap PDOs */
+
+ uint8_t polarity; /* enum tcpc_cc_polarity */
+ uint8_t cc_state; /* enum pd_cc_states */
+ uint8_t dp_pin; /* DP pin mode (MODE_DP_IN_[A-E]) */
+ uint8_t mux_state; /* USB_PD_MUX* - encoded mux state */
+
+ char tc_state[32]; /* TC state name */
+
+ uint32_t events; /* PD_STATUS_EVENT bitmask */
+
+ /*
+ * BCD PD revisions for partners
+ *
+ * The format has the PD major reversion in the upper nibble, and PD
+ * minor version in the next nibble. Following two nibbles are
+ * currently 0.
+ * ex. PD 3.2 would map to 0x3200
+ *
+ * PD major/minor will be 0 if no PD device is connected.
+ */
+ uint16_t sop_revision;
+ uint16_t sop_prime_revision;
+
+ uint32_t source_cap_pdos[7]; /* Max 7 PDOs can be present */
+
+ uint32_t sink_cap_pdos[7]; /* Max 7 PDOs can be present */
+} __ec_align1;
+
+/*
+ * Gather the response to the most recent VDM REQ from the AP, as well
+ * as popping the oldest VDM:Attention from the DPM queue
+ */
+#define EC_CMD_TYPEC_VDM_RESPONSE 0x013C
+
+struct ec_params_typec_vdm_response {
+ uint8_t port;
+} __ec_align1;
+
+struct ec_response_typec_vdm_response {
+ /* Number of 32-bit fields filled in */
+ uint8_t vdm_data_objects;
+ /* Partner to address - see enum typec_partner_type */
+ uint8_t partner_type;
+ /* enum ec_status describing VDM response */
+ uint16_t vdm_response_err;
+ /* VDM data, including VDM header */
+ uint32_t vdm_response[VDO_MAX_SIZE];
+ /* Number of 32-bit Attention fields filled in */
+ uint8_t vdm_attention_objects;
+ /* Number of remaining messages to consume */
+ uint8_t vdm_attention_left;
+ /* Reserved */
+ uint16_t reserved1;
+ /* VDM:Attention contents */
+ uint32_t vdm_attention[2];
+} __ec_align1;
+
+#undef VDO_MAX_SIZE
+
+/*****************************************************************************/
+/* The command range 0x200-0x2FF is reserved for Rotor. */
+
+/*****************************************************************************/
+/*
+ * Reserve a range of host commands for the CR51 firmware.
+ */
+#define EC_CMD_CR51_BASE 0x0300
+#define EC_CMD_CR51_LAST 0x03FF
+
+/*****************************************************************************/
+/* Fingerprint MCU commands: range 0x0400-0x040x */
+
+/* Fingerprint SPI sensor passthru command: prototyping ONLY */
+#define EC_CMD_FP_PASSTHRU 0x0400
+
+#define EC_FP_FLAG_NOT_COMPLETE 0x1
+
+struct ec_params_fp_passthru {
+ uint16_t len; /* Number of bytes to write then read */
+ uint16_t flags; /* EC_FP_FLAG_xxx */
+ uint8_t data[]; /* Data to send */
+} __ec_align2;
+
+/* Configure the Fingerprint MCU behavior */
+#define EC_CMD_FP_MODE 0x0402
+
+/* Put the sensor in its lowest power mode */
+#define FP_MODE_DEEPSLEEP BIT(0)
+/* Wait to see a finger on the sensor */
+#define FP_MODE_FINGER_DOWN BIT(1)
+/* Poll until the finger has left the sensor */
+#define FP_MODE_FINGER_UP BIT(2)
+/* Capture the current finger image */
+#define FP_MODE_CAPTURE BIT(3)
+/* Finger enrollment session on-going */
+#define FP_MODE_ENROLL_SESSION BIT(4)
+/* Enroll the current finger image */
+#define FP_MODE_ENROLL_IMAGE BIT(5)
+/* Try to match the current finger image */
+#define FP_MODE_MATCH BIT(6)
+/* Reset and re-initialize the sensor. */
+#define FP_MODE_RESET_SENSOR BIT(7)
+/* special value: don't change anything just read back current mode */
+#define FP_MODE_DONT_CHANGE BIT(31)
+
+#define FP_VALID_MODES (FP_MODE_DEEPSLEEP | \
+ FP_MODE_FINGER_DOWN | \
+ FP_MODE_FINGER_UP | \
+ FP_MODE_CAPTURE | \
+ FP_MODE_ENROLL_SESSION | \
+ FP_MODE_ENROLL_IMAGE | \
+ FP_MODE_MATCH | \
+ FP_MODE_RESET_SENSOR | \
+ FP_MODE_DONT_CHANGE)
+
+/* Capture types defined in bits [30..28] */
+#define FP_MODE_CAPTURE_TYPE_SHIFT 28
+#define FP_MODE_CAPTURE_TYPE_MASK (0x7 << FP_MODE_CAPTURE_TYPE_SHIFT)
+/*
+ * This enum must remain ordered, if you add new values you must ensure that
+ * FP_CAPTURE_TYPE_MAX is still the last one.
+ */
+enum fp_capture_type {
+ /* Full blown vendor-defined capture (produces 'frame_size' bytes) */
+ FP_CAPTURE_VENDOR_FORMAT = 0,
+ /* Simple raw image capture (produces width x height x bpp bits) */
+ FP_CAPTURE_SIMPLE_IMAGE = 1,
+ /* Self test pattern (e.g. checkerboard) */
+ FP_CAPTURE_PATTERN0 = 2,
+ /* Self test pattern (e.g. inverted checkerboard) */
+ FP_CAPTURE_PATTERN1 = 3,
+ /* Capture for Quality test with fixed contrast */
+ FP_CAPTURE_QUALITY_TEST = 4,
+ /* Capture for pixel reset value test */
+ FP_CAPTURE_RESET_TEST = 5,
+ FP_CAPTURE_TYPE_MAX,
+};
+/* Extracts the capture type from the sensor 'mode' word */
+#define FP_CAPTURE_TYPE(mode) (((mode) & FP_MODE_CAPTURE_TYPE_MASK) \
+ >> FP_MODE_CAPTURE_TYPE_SHIFT)
+
+struct ec_params_fp_mode {
+ uint32_t mode; /* as defined by FP_MODE_ constants */
+} __ec_align4;
+
+struct ec_response_fp_mode {
+ uint32_t mode; /* as defined by FP_MODE_ constants */
+} __ec_align4;
+
+/* Retrieve Fingerprint sensor information */
+#define EC_CMD_FP_INFO 0x0403
+
+/* Number of dead pixels detected on the last maintenance */
+#define FP_ERROR_DEAD_PIXELS(errors) ((errors) & 0x3FF)
+/* Unknown number of dead pixels detected on the last maintenance */
+#define FP_ERROR_DEAD_PIXELS_UNKNOWN (0x3FF)
+/* No interrupt from the sensor */
+#define FP_ERROR_NO_IRQ BIT(12)
+/* SPI communication error */
+#define FP_ERROR_SPI_COMM BIT(13)
+/* Invalid sensor Hardware ID */
+#define FP_ERROR_BAD_HWID BIT(14)
+/* Sensor initialization failed */
+#define FP_ERROR_INIT_FAIL BIT(15)
+
+struct ec_response_fp_info_v0 {
+ /* Sensor identification */
+ uint32_t vendor_id;
+ uint32_t product_id;
+ uint32_t model_id;
+ uint32_t version;
+ /* Image frame characteristics */
+ uint32_t frame_size;
+ uint32_t pixel_format; /* using V4L2_PIX_FMT_ */
+ uint16_t width;
+ uint16_t height;
+ uint16_t bpp;
+ uint16_t errors; /* see FP_ERROR_ flags above */
+} __ec_align4;
+
+struct ec_response_fp_info {
+ /* Sensor identification */
+ uint32_t vendor_id;
+ uint32_t product_id;
+ uint32_t model_id;
+ uint32_t version;
+ /* Image frame characteristics */
+ uint32_t frame_size;
+ uint32_t pixel_format; /* using V4L2_PIX_FMT_ */
+ uint16_t width;
+ uint16_t height;
+ uint16_t bpp;
+ uint16_t errors; /* see FP_ERROR_ flags above */
+ /* Template/finger current information */
+ uint32_t template_size; /* max template size in bytes */
+ uint16_t template_max; /* maximum number of fingers/templates */
+ uint16_t template_valid; /* number of valid fingers/templates */
+ uint32_t template_dirty; /* bitmap of templates with MCU side changes */
+ uint32_t template_version; /* version of the template format */
+} __ec_align4;
+
+/* Get the last captured finger frame or a template content */
+#define EC_CMD_FP_FRAME 0x0404
+
+/* constants defining the 'offset' field which also contains the frame index */
+#define FP_FRAME_INDEX_SHIFT 28
+/* Frame buffer where the captured image is stored */
+#define FP_FRAME_INDEX_RAW_IMAGE 0
+/* First frame buffer holding a template */
+#define FP_FRAME_INDEX_TEMPLATE 1
+#define FP_FRAME_GET_BUFFER_INDEX(offset) ((offset) >> FP_FRAME_INDEX_SHIFT)
+#define FP_FRAME_OFFSET_MASK 0x0FFFFFFF
+
+/* Version of the format of the encrypted templates. */
+#define FP_TEMPLATE_FORMAT_VERSION 3
+
+/* Constants for encryption parameters */
+#define FP_CONTEXT_NONCE_BYTES 12
+#define FP_CONTEXT_USERID_WORDS (32 / sizeof(uint32_t))
+#define FP_CONTEXT_TAG_BYTES 16
+#define FP_CONTEXT_SALT_BYTES 16
+#define FP_CONTEXT_TPM_BYTES 32
+
+struct ec_fp_template_encryption_metadata {
+ /*
+ * Version of the structure format (N=3).
+ */
+ uint16_t struct_version;
+ /* Reserved bytes, set to 0. */
+ uint16_t reserved;
+ /*
+ * The salt is *only* ever used for key derivation. The nonce is unique,
+ * a different one is used for every message.
+ */
+ uint8_t nonce[FP_CONTEXT_NONCE_BYTES];
+ uint8_t salt[FP_CONTEXT_SALT_BYTES];
+ uint8_t tag[FP_CONTEXT_TAG_BYTES];
+};
+
+struct ec_params_fp_frame {
+ /*
+ * The offset contains the template index or FP_FRAME_INDEX_RAW_IMAGE
+ * in the high nibble, and the real offset within the frame in
+ * FP_FRAME_OFFSET_MASK.
+ */
+ uint32_t offset;
+ uint32_t size;
+} __ec_align4;
+
+/* Load a template into the MCU */
+#define EC_CMD_FP_TEMPLATE 0x0405
+
+/* Flag in the 'size' field indicating that the full template has been sent */
+#define FP_TEMPLATE_COMMIT 0x80000000
+
+struct ec_params_fp_template {
+ uint32_t offset;
+ uint32_t size;
+ uint8_t data[];
+} __ec_align4;
+
+/* Clear the current fingerprint user context and set a new one */
+#define EC_CMD_FP_CONTEXT 0x0406
+
+struct ec_params_fp_context {
+ uint32_t userid[FP_CONTEXT_USERID_WORDS];
+} __ec_align4;
+
+#define EC_CMD_FP_STATS 0x0407
+
+#define FPSTATS_CAPTURE_INV BIT(0)
+#define FPSTATS_MATCHING_INV BIT(1)
+
+struct ec_response_fp_stats {
+ uint32_t capture_time_us;
+ uint32_t matching_time_us;
+ uint32_t overall_time_us;
+ struct {
+ uint32_t lo;
+ uint32_t hi;
+ } overall_t0;
+ uint8_t timestamps_invalid;
+ int8_t template_matched;
+} __ec_align2;
+
+#define EC_CMD_FP_SEED 0x0408
+struct ec_params_fp_seed {
+ /*
+ * Version of the structure format (N=3).
+ */
+ uint16_t struct_version;
+ /* Reserved bytes, set to 0. */
+ uint16_t reserved;
+ /* Seed from the TPM. */
+ uint8_t seed[FP_CONTEXT_TPM_BYTES];
+} __ec_align4;
+
+#define EC_CMD_FP_ENC_STATUS 0x0409
+
+/* FP TPM seed has been set or not */
+#define FP_ENC_STATUS_SEED_SET BIT(0)
+
+struct ec_response_fp_encryption_status {
+ /* Used bits in encryption engine status */
+ uint32_t valid_flags;
+ /* Encryption engine status */
+ uint32_t status;
+} __ec_align4;
+
+/*****************************************************************************/
+/* Touchpad MCU commands: range 0x0500-0x05FF */
+
+/* Perform touchpad self test */
+#define EC_CMD_TP_SELF_TEST 0x0500
+
+/* Get number of frame types, and the size of each type */
+#define EC_CMD_TP_FRAME_INFO 0x0501
+
+struct ec_response_tp_frame_info {
+ uint32_t n_frames;
+ uint32_t frame_sizes[];
+} __ec_align4;
+
+/* Create a snapshot of current frame readings */
+#define EC_CMD_TP_FRAME_SNAPSHOT 0x0502
+
+/* Read the frame */
+#define EC_CMD_TP_FRAME_GET 0x0503
+
+struct ec_params_tp_frame_get {
+ uint32_t frame_index;
+ uint32_t offset;
+ uint32_t size;
+} __ec_align4;
+
+/*****************************************************************************/
+/* EC-EC communication commands: range 0x0600-0x06FF */
+
+#define EC_COMM_TEXT_MAX 8
+
+/*
+ * Get battery static information, i.e. information that never changes, or
+ * very infrequently.
+ */
+#define EC_CMD_BATTERY_GET_STATIC 0x0600
+
+/**
+ * struct ec_params_battery_static_info - Battery static info parameters
+ * @index: Battery index.
+ */
+struct ec_params_battery_static_info {
+ uint8_t index;
+} __ec_align_size1;
+
+/**
+ * struct ec_response_battery_static_info - Battery static info response
+ * @design_capacity: Battery Design Capacity (mAh)
+ * @design_voltage: Battery Design Voltage (mV)
+ * @manufacturer: Battery Manufacturer String
+ * @model: Battery Model Number String
+ * @serial: Battery Serial Number String
+ * @type: Battery Type String
+ * @cycle_count: Battery Cycle Count
+ */
+struct ec_response_battery_static_info {
+ uint16_t design_capacity;
+ uint16_t design_voltage;
+ char manufacturer[EC_COMM_TEXT_MAX];
+ char model[EC_COMM_TEXT_MAX];
+ char serial[EC_COMM_TEXT_MAX];
+ char type[EC_COMM_TEXT_MAX];
+ /* TODO(crbug.com/795991): Consider moving to dynamic structure. */
+ uint32_t cycle_count;
+} __ec_align4;
+
+/*
+ * Get battery dynamic information, i.e. information that is likely to change
+ * every time it is read.
+ */
+#define EC_CMD_BATTERY_GET_DYNAMIC 0x0601
+
+/**
+ * struct ec_params_battery_dynamic_info - Battery dynamic info parameters
+ * @index: Battery index.
+ */
+struct ec_params_battery_dynamic_info {
+ uint8_t index;
+} __ec_align_size1;
+
+/**
+ * struct ec_response_battery_dynamic_info - Battery dynamic info response
+ * @actual_voltage: Battery voltage (mV)
+ * @actual_current: Battery current (mA); negative=discharging
+ * @remaining_capacity: Remaining capacity (mAh)
+ * @full_capacity: Capacity (mAh, might change occasionally)
+ * @flags: Flags, see EC_BATT_FLAG_*
+ * @desired_voltage: Charging voltage desired by battery (mV)
+ * @desired_current: Charging current desired by battery (mA)
+ */
+struct ec_response_battery_dynamic_info {
+ int16_t actual_voltage;
+ int16_t actual_current;
+ int16_t remaining_capacity;
+ int16_t full_capacity;
+ int16_t flags;
+ int16_t desired_voltage;
+ int16_t desired_current;
+} __ec_align2;
+
+/*
+ * Control charger chip. Used to control charger chip on the slave.
+ */
+#define EC_CMD_CHARGER_CONTROL 0x0602
+
+/**
+ * struct ec_params_charger_control - Charger control parameters
+ * @max_current: Charger current (mA). Positive to allow base to draw up to
+ * max_current and (possibly) charge battery, negative to request current
+ * from base (OTG).
+ * @otg_voltage: Voltage (mV) to use in OTG mode, ignored if max_current is
+ * >= 0.
+ * @allow_charging: Allow base battery charging (only makes sense if
+ * max_current > 0).
+ */
+struct ec_params_charger_control {
+ int16_t max_current;
+ uint16_t otg_voltage;
+ uint8_t allow_charging;
+} __ec_align_size1;
+
+/* Get ACK from the USB-C SS muxes */
+#define EC_CMD_USB_PD_MUX_ACK 0x0603
+
+struct ec_params_usb_pd_mux_ack {
+ uint8_t port; /* USB-C port number */
+} __ec_align1;
+
+/*****************************************************************************/
+/*
+ * Reserve a range of host commands for board-specific, experimental, or
+ * special purpose features. These can be (re)used without updating this file.
+ *
+ * CAUTION: Don't go nuts with this. Shipping products should document ALL
+ * their EC commands for easier development, testing, debugging, and support.
+ *
+ * All commands MUST be #defined to be 4-digit UPPER CASE hex values
+ * (e.g., 0x00AB, not 0xab) for CONFIG_HOSTCMD_SECTION_SORTED to work.
+ *
+ * In your experimental code, you may want to do something like this:
+ *
+ * #define EC_CMD_MAGIC_FOO 0x0000
+ * #define EC_CMD_MAGIC_BAR 0x0001
+ * #define EC_CMD_MAGIC_HEY 0x0002
+ *
+ * DECLARE_PRIVATE_HOST_COMMAND(EC_CMD_MAGIC_FOO, magic_foo_handler,
+ * EC_VER_MASK(0);
+ *
+ * DECLARE_PRIVATE_HOST_COMMAND(EC_CMD_MAGIC_BAR, magic_bar_handler,
+ * EC_VER_MASK(0);
+ *
+ * DECLARE_PRIVATE_HOST_COMMAND(EC_CMD_MAGIC_HEY, magic_hey_handler,
+ * EC_VER_MASK(0);
+ */
+#define EC_CMD_BOARD_SPECIFIC_BASE 0x3E00
+#define EC_CMD_BOARD_SPECIFIC_LAST 0x3FFF
+
+/*
+ * Given the private host command offset, calculate the true private host
+ * command value.
+ */
+#define EC_PRIVATE_HOST_COMMAND_VALUE(command) \
+ (EC_CMD_BOARD_SPECIFIC_BASE + (command))
+
+/*****************************************************************************/
+/*
+ * Passthru commands
+ *
+ * Some platforms have sub-processors chained to each other. For example.
+ *
+ * AP <--> EC <--> PD MCU
+ *
+ * The top 2 bits of the command number are used to indicate which device the
+ * command is intended for. Device 0 is always the device receiving the
+ * command; other device mapping is board-specific.
+ *
+ * When a device receives a command to be passed to a sub-processor, it passes
+ * it on with the device number set back to 0. This allows the sub-processor
+ * to remain blissfully unaware of whether the command originated on the next
+ * device up the chain, or was passed through from the AP.
+ *
+ * In the above example, if the AP wants to send command 0x0002 to the PD MCU,
+ * AP sends command 0x4002 to the EC
+ * EC sends command 0x0002 to the PD MCU
+ * EC forwards PD MCU response back to the AP
+ */
+
+/* Offset and max command number for sub-device n */
+#define EC_CMD_PASSTHRU_OFFSET(n) (0x4000 * (n))
+#define EC_CMD_PASSTHRU_MAX(n) (EC_CMD_PASSTHRU_OFFSET(n) + 0x3fff)
+
+/*****************************************************************************/
+/*
+ * Deprecated constants. These constants have been renamed for clarity. The
+ * meaning and size has not changed. Programs that use the old names should
+ * switch to the new names soon, as the old names may not be carried forward
+ * forever.
+ */
+#define EC_HOST_PARAM_SIZE EC_PROTO2_MAX_PARAM_SIZE
+#define EC_LPC_ADDR_OLD_PARAM EC_HOST_CMD_REGION1
+#define EC_OLD_PARAM_SIZE EC_HOST_CMD_REGION_SIZE
+
+
+
+#endif /* __CROS_EC_COMMANDS_H */
diff --git a/include/linux/platform_data/cros_ec_proto.h b/include/linux/platform_data/cros_ec_proto.h
new file mode 100644
index 000000000000..4f9f756bc17c
--- /dev/null
+++ b/include/linux/platform_data/cros_ec_proto.h
@@ -0,0 +1,277 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * ChromeOS Embedded Controller protocol interface.
+ *
+ * Copyright (C) 2012 Google, Inc
+ */
+
+#ifndef __LINUX_CROS_EC_PROTO_H
+#define __LINUX_CROS_EC_PROTO_H
+
+#include <linux/device.h>
+#include <linux/lockdep_types.h>
+#include <linux/mutex.h>
+#include <linux/notifier.h>
+
+#include <linux/platform_data/cros_ec_commands.h>
+
+#define CROS_EC_DEV_NAME "cros_ec"
+#define CROS_EC_DEV_FP_NAME "cros_fp"
+#define CROS_EC_DEV_ISH_NAME "cros_ish"
+#define CROS_EC_DEV_PD_NAME "cros_pd"
+#define CROS_EC_DEV_SCP_NAME "cros_scp"
+#define CROS_EC_DEV_TP_NAME "cros_tp"
+
+#define CROS_EC_DEV_EC_INDEX 0
+#define CROS_EC_DEV_PD_INDEX 1
+
+/*
+ * The EC is unresponsive for a time after a reboot command. Add a
+ * simple delay to make sure that the bus stays locked.
+ */
+#define EC_REBOOT_DELAY_MS 50
+
+/*
+ * Max bus-specific overhead incurred by request/responses.
+ * I2C requires 1 additional byte for requests.
+ * I2C requires 2 additional bytes for responses.
+ * SPI requires up to 32 additional bytes for responses.
+ */
+#define EC_PROTO_VERSION_UNKNOWN 0
+#define EC_MAX_REQUEST_OVERHEAD 1
+#define EC_MAX_RESPONSE_OVERHEAD 32
+
+/*
+ * EC panic is not covered by the standard (0-F) ACPI notify values.
+ * Arbitrarily choosing B0 to notify ec panic, which is in the 84-BF
+ * device specific ACPI notify range.
+ */
+#define ACPI_NOTIFY_CROS_EC_PANIC 0xB0
+
+/*
+ * Command interface between EC and AP, for LPC, I2C and SPI interfaces.
+ */
+enum {
+ EC_MSG_TX_HEADER_BYTES = 3,
+ EC_MSG_TX_TRAILER_BYTES = 1,
+ EC_MSG_TX_PROTO_BYTES = EC_MSG_TX_HEADER_BYTES +
+ EC_MSG_TX_TRAILER_BYTES,
+ EC_MSG_RX_PROTO_BYTES = 3,
+
+ /* Max length of messages for proto 2*/
+ EC_PROTO2_MSG_BYTES = EC_PROTO2_MAX_PARAM_SIZE +
+ EC_MSG_TX_PROTO_BYTES,
+
+ EC_MAX_MSG_BYTES = 64 * 1024,
+};
+
+/**
+ * struct cros_ec_command - Information about a ChromeOS EC command.
+ * @version: Command version number (often 0).
+ * @command: Command to send (EC_CMD_...).
+ * @outsize: Outgoing length in bytes.
+ * @insize: Max number of bytes to accept from the EC.
+ * @result: EC's response to the command (separate from communication failure).
+ * @data: Where to put the incoming data from EC and outgoing data to EC.
+ */
+struct cros_ec_command {
+ uint32_t version;
+ uint32_t command;
+ uint32_t outsize;
+ uint32_t insize;
+ uint32_t result;
+ uint8_t data[];
+};
+
+/**
+ * struct cros_ec_device - Information about a ChromeOS EC device.
+ * @phys_name: Name of physical comms layer (e.g. 'i2c-4').
+ * @dev: Device pointer for physical comms device
+ * @cros_class: The class structure for this device.
+ * @cmd_readmem: Direct read of the EC memory-mapped region, if supported.
+ * @offset: Is within EC_LPC_ADDR_MEMMAP region.
+ * @bytes: Number of bytes to read. zero means "read a string" (including
+ * the trailing '\0'). At most only EC_MEMMAP_SIZE bytes can be
+ * read. Caller must ensure that the buffer is large enough for the
+ * result when reading a string.
+ * @max_request: Max size of message requested.
+ * @max_response: Max size of message response.
+ * @max_passthru: Max sice of passthru message.
+ * @proto_version: The protocol version used for this device.
+ * @priv: Private data.
+ * @irq: Interrupt to use.
+ * @id: Device id.
+ * @din: Input buffer (for data from EC). This buffer will always be
+ * dword-aligned and include enough space for up to 7 word-alignment
+ * bytes also, so we can ensure that the body of the message is always
+ * dword-aligned (64-bit). We use this alignment to keep ARM and x86
+ * happy. Probably word alignment would be OK, there might be a small
+ * performance advantage to using dword.
+ * @dout: Output buffer (for data to EC). This buffer will always be
+ * dword-aligned and include enough space for up to 7 word-alignment
+ * bytes also, so we can ensure that the body of the message is always
+ * dword-aligned (64-bit). We use this alignment to keep ARM and x86
+ * happy. Probably word alignment would be OK, there might be a small
+ * performance advantage to using dword.
+ * @din_size: Size of din buffer to allocate (zero to use static din).
+ * @dout_size: Size of dout buffer to allocate (zero to use static dout).
+ * @wake_enabled: True if this device can wake the system from sleep.
+ * @suspended: True if this device had been suspended.
+ * @cmd_xfer: Send command to EC and get response.
+ * Returns the number of bytes received if the communication
+ * succeeded, but that doesn't mean the EC was happy with the
+ * command. The caller should check msg.result for the EC's result
+ * code.
+ * @pkt_xfer: Send packet to EC and get response.
+ * @lockdep_key: Lockdep class for each instance. Unused if CONFIG_LOCKDEP is
+ * not enabled.
+ * @lock: One transaction at a time.
+ * @mkbp_event_supported: 0 if MKBP not supported. Otherwise its value is
+ * the maximum supported version of the MKBP host event
+ * command + 1.
+ * @host_sleep_v1: True if this EC supports the sleep v1 command.
+ * @event_notifier: Interrupt event notifier for transport devices.
+ * @event_data: Raw payload transferred with the MKBP event.
+ * @event_size: Size in bytes of the event data.
+ * @host_event_wake_mask: Mask of host events that cause wake from suspend.
+ * @suspend_timeout_ms: The timeout in milliseconds between when sleep event
+ * is received and when the EC will declare sleep
+ * transition failure if the sleep signal is not
+ * asserted. See also struct
+ * ec_params_host_sleep_event_v1 in cros_ec_commands.h.
+ * @last_resume_result: The number of sleep power signal transitions that
+ * occurred since the suspend message. The high bit
+ * indicates a timeout occurred. See also struct
+ * ec_response_host_sleep_event_v1 in cros_ec_commands.h.
+ * @last_event_time: exact time from the hard irq when we got notified of
+ * a new event.
+ * @notifier_ready: The notifier_block to let the kernel re-query EC
+ * communication protocol when the EC sends
+ * EC_HOST_EVENT_INTERFACE_READY.
+ * @ec: The platform_device used by the mfd driver to interface with the
+ * main EC.
+ * @pd: The platform_device used by the mfd driver to interface with the
+ * PD behind an EC.
+ * @panic_notifier: EC panic notifier.
+ */
+struct cros_ec_device {
+ /* These are used by other drivers that want to talk to the EC */
+ const char *phys_name;
+ struct device *dev;
+ struct class *cros_class;
+ int (*cmd_readmem)(struct cros_ec_device *ec, unsigned int offset,
+ unsigned int bytes, void *dest);
+
+ /* These are used to implement the platform-specific interface */
+ u16 max_request;
+ u16 max_response;
+ u16 max_passthru;
+ u16 proto_version;
+ void *priv;
+ int irq;
+ u8 *din;
+ u8 *dout;
+ int din_size;
+ int dout_size;
+ bool wake_enabled;
+ bool suspended;
+ int (*cmd_xfer)(struct cros_ec_device *ec,
+ struct cros_ec_command *msg);
+ int (*pkt_xfer)(struct cros_ec_device *ec,
+ struct cros_ec_command *msg);
+ struct lock_class_key lockdep_key;
+ struct mutex lock;
+ u8 mkbp_event_supported;
+ bool host_sleep_v1;
+ struct blocking_notifier_head event_notifier;
+
+ struct ec_response_get_next_event_v1 event_data;
+ int event_size;
+ u32 host_event_wake_mask;
+ u32 last_resume_result;
+ u16 suspend_timeout_ms;
+ ktime_t last_event_time;
+ struct notifier_block notifier_ready;
+
+ /* The platform devices used by the mfd driver */
+ struct platform_device *ec;
+ struct platform_device *pd;
+
+ struct blocking_notifier_head panic_notifier;
+};
+
+/**
+ * struct cros_ec_platform - ChromeOS EC platform information.
+ * @ec_name: Name of EC device (e.g. 'cros-ec', 'cros-pd', ...)
+ * used in /dev/ and sysfs.
+ * @cmd_offset: Offset to apply for each command. Set when
+ * registering a device behind another one.
+ */
+struct cros_ec_platform {
+ const char *ec_name;
+ u16 cmd_offset;
+};
+
+/**
+ * struct cros_ec_dev - ChromeOS EC device entry point.
+ * @class_dev: Device structure used in sysfs.
+ * @ec_dev: cros_ec_device structure to talk to the physical device.
+ * @dev: Pointer to the platform device.
+ * @debug_info: cros_ec_debugfs structure for debugging information.
+ * @has_kb_wake_angle: True if at least 2 accelerometer are connected to the EC.
+ * @cmd_offset: Offset to apply for each command.
+ * @features: Features supported by the EC.
+ */
+struct cros_ec_dev {
+ struct device class_dev;
+ struct cros_ec_device *ec_dev;
+ struct device *dev;
+ struct cros_ec_debugfs *debug_info;
+ bool has_kb_wake_angle;
+ u16 cmd_offset;
+ struct ec_response_get_features features;
+};
+
+#define to_cros_ec_dev(dev) container_of(dev, struct cros_ec_dev, class_dev)
+
+int cros_ec_prepare_tx(struct cros_ec_device *ec_dev,
+ struct cros_ec_command *msg);
+
+int cros_ec_check_result(struct cros_ec_device *ec_dev,
+ struct cros_ec_command *msg);
+
+int cros_ec_cmd_xfer(struct cros_ec_device *ec_dev,
+ struct cros_ec_command *msg);
+
+int cros_ec_cmd_xfer_status(struct cros_ec_device *ec_dev,
+ struct cros_ec_command *msg);
+
+int cros_ec_query_all(struct cros_ec_device *ec_dev);
+
+int cros_ec_get_next_event(struct cros_ec_device *ec_dev,
+ bool *wake_event,
+ bool *has_more_events);
+
+u32 cros_ec_get_host_event(struct cros_ec_device *ec_dev);
+
+bool cros_ec_check_features(struct cros_ec_dev *ec, int feature);
+
+int cros_ec_get_sensor_count(struct cros_ec_dev *ec);
+
+int cros_ec_cmd(struct cros_ec_device *ec_dev, unsigned int version, int command, void *outdata,
+ size_t outsize, void *indata, size_t insize);
+
+/**
+ * cros_ec_get_time_ns() - Return time in ns.
+ *
+ * This is the function used to record the time for last_event_time in struct
+ * cros_ec_device during the hard irq.
+ *
+ * Return: ktime_t format since boot.
+ */
+static inline ktime_t cros_ec_get_time_ns(void)
+{
+ return ktime_get_boottime_ns();
+}
+
+#endif /* __LINUX_CROS_EC_PROTO_H */
diff --git a/include/linux/platform_data/cros_ec_sensorhub.h b/include/linux/platform_data/cros_ec_sensorhub.h
new file mode 100644
index 000000000000..0ecce6aa69d5
--- /dev/null
+++ b/include/linux/platform_data/cros_ec_sensorhub.h
@@ -0,0 +1,194 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Chrome OS EC MEMS Sensor Hub driver.
+ *
+ * Copyright 2019 Google LLC
+ */
+
+#ifndef __LINUX_PLATFORM_DATA_CROS_EC_SENSORHUB_H
+#define __LINUX_PLATFORM_DATA_CROS_EC_SENSORHUB_H
+
+#include <linux/ktime.h>
+#include <linux/mutex.h>
+#include <linux/notifier.h>
+#include <linux/platform_data/cros_ec_commands.h>
+
+struct iio_dev;
+
+/**
+ * struct cros_ec_sensor_platform - ChromeOS EC sensor platform information.
+ * @sensor_num: Id of the sensor, as reported by the EC.
+ */
+struct cros_ec_sensor_platform {
+ u8 sensor_num;
+};
+
+/**
+ * typedef cros_ec_sensorhub_push_data_cb_t - Callback function to send datum
+ * to specific sensors.
+ *
+ * @indio_dev: The IIO device that will process the sample.
+ * @data: Vector array of the ring sample.
+ * @timestamp: Timestamp in host timespace when the sample was acquired by
+ * the EC.
+ */
+typedef int (*cros_ec_sensorhub_push_data_cb_t)(struct iio_dev *indio_dev,
+ s16 *data,
+ s64 timestamp);
+
+struct cros_ec_sensorhub_sensor_push_data {
+ struct iio_dev *indio_dev;
+ cros_ec_sensorhub_push_data_cb_t push_data_cb;
+};
+
+enum {
+ CROS_EC_SENSOR_LAST_TS,
+ CROS_EC_SENSOR_NEW_TS,
+ CROS_EC_SENSOR_ALL_TS
+};
+
+struct cros_ec_sensors_ring_sample {
+ u8 sensor_id;
+ u8 flag;
+ s16 vector[3];
+ s64 timestamp;
+} __packed;
+
+/* State used for cros_ec_ring_fix_overflow */
+struct cros_ec_sensors_ec_overflow_state {
+ s64 offset;
+ s64 last;
+};
+
+/* Length of the filter, how long to remember entries for */
+#define CROS_EC_SENSORHUB_TS_HISTORY_SIZE 64
+
+/**
+ * struct cros_ec_sensors_ts_filter_state - Timestamp filetr state.
+ *
+ * @x_offset: x is EC interrupt time. x_offset its last value.
+ * @y_offset: y is the difference between AP and EC time, y_offset its last
+ * value.
+ * @x_history: The past history of x, relative to x_offset.
+ * @y_history: The past history of y, relative to y_offset.
+ * @m_history: rate between y and x.
+ * @history_len: Amount of valid historic data in the arrays.
+ * @temp_buf: Temporary buffer used when updating the filter.
+ * @median_m: median value of m_history
+ * @median_error: final error to apply to AP interrupt timestamp to get the
+ * "true timestamp" the event occurred.
+ */
+struct cros_ec_sensors_ts_filter_state {
+ s64 x_offset, y_offset;
+ s64 x_history[CROS_EC_SENSORHUB_TS_HISTORY_SIZE];
+ s64 y_history[CROS_EC_SENSORHUB_TS_HISTORY_SIZE];
+ s64 m_history[CROS_EC_SENSORHUB_TS_HISTORY_SIZE];
+ int history_len;
+
+ s64 temp_buf[CROS_EC_SENSORHUB_TS_HISTORY_SIZE];
+
+ s64 median_m;
+ s64 median_error;
+};
+
+/* struct cros_ec_sensors_ts_batch_state - State of batch of a single sensor.
+ *
+ * Use to store information to batch data using median fileter information.
+ *
+ * @penul_ts: last but one batch timestamp (penultimate timestamp).
+ * Used for timestamp spreading calculations
+ * when a batch shows up.
+ * @penul_len: last but one batch length.
+ * @last_ts: Last batch timestam.
+ * @last_len: Last batch length.
+ * @newest_sensor_event: Last sensor timestamp.
+ */
+struct cros_ec_sensors_ts_batch_state {
+ s64 penul_ts;
+ int penul_len;
+ s64 last_ts;
+ int last_len;
+ s64 newest_sensor_event;
+};
+
+/*
+ * struct cros_ec_sensorhub - Sensor Hub device data.
+ *
+ * @dev: Device object, mostly used for logging.
+ * @ec: Embedded Controller where the hub is located.
+ * @sensor_num: Number of MEMS sensors present in the EC.
+ * @msg: Structure to send FIFO requests.
+ * @params: Pointer to parameters in msg.
+ * @resp: Pointer to responses in msg.
+ * @cmd_lock : Lock for sending msg.
+ * @notifier: Notifier to kick the FIFO interrupt.
+ * @ring: Preprocessed ring to store events.
+ * @fifo_timestamp: Array for event timestamp and spreading.
+ * @fifo_info: Copy of FIFO information coming from the EC.
+ * @fifo_size: Size of the ring.
+ * @batch_state: Per sensor information of the last batches received.
+ * @overflow_a: For handling timestamp overflow for a time (sensor events)
+ * @overflow_b: For handling timestamp overflow for b time (ec interrupts)
+ * @filter: Medium fileter structure.
+ * @tight_timestamps: Set to truen when EC support tight timestamping:
+ * The timestamps reported from the EC have low jitter.
+ * Timestamps also come before every sample. Set either
+ * by feature bits coming from the EC or userspace.
+ * @future_timestamp_count: Statistics used to compute shaved time.
+ * This occurs when timestamp interpolation from EC
+ * time to AP time accidentally puts timestamps in
+ * the future. These timestamps are clamped to
+ * `now` and these count/total_ns maintain the
+ * statistics for how much time was removed in a
+ * given period.
+ * @future_timestamp_total_ns: Total amount of time shaved.
+ * @push_data: Array of callback to send datums to iio sensor object.
+ */
+struct cros_ec_sensorhub {
+ struct device *dev;
+ struct cros_ec_dev *ec;
+ int sensor_num;
+
+ struct cros_ec_command *msg;
+ struct ec_params_motion_sense *params;
+ struct ec_response_motion_sense *resp;
+ struct mutex cmd_lock; /* Lock for protecting msg structure. */
+
+ struct notifier_block notifier;
+
+ struct cros_ec_sensors_ring_sample *ring;
+
+ ktime_t fifo_timestamp[CROS_EC_SENSOR_ALL_TS];
+ struct ec_response_motion_sense_fifo_info *fifo_info;
+ int fifo_size;
+
+ struct cros_ec_sensors_ts_batch_state *batch_state;
+
+ struct cros_ec_sensors_ec_overflow_state overflow_a;
+ struct cros_ec_sensors_ec_overflow_state overflow_b;
+
+ struct cros_ec_sensors_ts_filter_state filter;
+
+ int tight_timestamps;
+
+ s32 future_timestamp_count;
+ s64 future_timestamp_total_ns;
+
+ struct cros_ec_sensorhub_sensor_push_data *push_data;
+};
+
+int cros_ec_sensorhub_register_push_data(struct cros_ec_sensorhub *sensorhub,
+ u8 sensor_num,
+ struct iio_dev *indio_dev,
+ cros_ec_sensorhub_push_data_cb_t cb);
+
+void cros_ec_sensorhub_unregister_push_data(struct cros_ec_sensorhub *sensorhub,
+ u8 sensor_num);
+
+int cros_ec_sensorhub_ring_allocate(struct cros_ec_sensorhub *sensorhub);
+int cros_ec_sensorhub_ring_add(struct cros_ec_sensorhub *sensorhub);
+void cros_ec_sensorhub_ring_remove(void *arg);
+int cros_ec_sensorhub_ring_fifo_enable(struct cros_ec_sensorhub *sensorhub,
+ bool on);
+
+#endif /* __LINUX_PLATFORM_DATA_CROS_EC_SENSORHUB_H */
diff --git a/include/linux/platform_data/cros_usbpd_notify.h b/include/linux/platform_data/cros_usbpd_notify.h
new file mode 100644
index 000000000000..4f2791722b6d
--- /dev/null
+++ b/include/linux/platform_data/cros_usbpd_notify.h
@@ -0,0 +1,17 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * ChromeOS EC Power Delivery Notifier Driver
+ *
+ * Copyright 2020 Google LLC
+ */
+
+#ifndef __LINUX_PLATFORM_DATA_CROS_USBPD_NOTIFY_H
+#define __LINUX_PLATFORM_DATA_CROS_USBPD_NOTIFY_H
+
+#include <linux/notifier.h>
+
+int cros_usbpd_register_notify(struct notifier_block *nb);
+
+void cros_usbpd_unregister_notify(struct notifier_block *nb);
+
+#endif /* __LINUX_PLATFORM_DATA_CROS_USBPD_NOTIFY_H */
diff --git a/include/linux/platform_data/crypto-atmel.h b/include/linux/platform_data/crypto-atmel.h
deleted file mode 100644
index b46e0d9062a0..000000000000
--- a/include/linux/platform_data/crypto-atmel.h
+++ /dev/null
@@ -1,22 +0,0 @@
-#ifndef __LINUX_CRYPTO_ATMEL_H
-#define __LINUX_CRYPTO_ATMEL_H
-
-#include <linux/platform_data/dma-atmel.h>
-
-/**
- * struct crypto_dma_data - DMA data for AES/TDES/SHA
- */
-struct crypto_dma_data {
- struct at_dma_slave txdata;
- struct at_dma_slave rxdata;
-};
-
-/**
- * struct crypto_platform_data - board-specific AES/TDES/SHA configuration
- * @dma_slave: DMA slave interface to use in data transfers.
- */
-struct crypto_platform_data {
- struct crypto_dma_data *dma_slave;
-};
-
-#endif /* __LINUX_CRYPTO_ATMEL_H */
diff --git a/include/linux/platform_data/crypto-ux500.h b/include/linux/platform_data/crypto-ux500.h
index 94df96d9a336..5d43350e32cc 100644
--- a/include/linux/platform_data/crypto-ux500.h
+++ b/include/linux/platform_data/crypto-ux500.h
@@ -1,8 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) ST-Ericsson SA 2011
*
* Author: Joakim Bech <joakim.xx.bech@stericsson.com> for ST-Ericsson
- * License terms: GNU General Public License (GPL) version 2
*/
#ifndef _CRYPTO_UX500_H
#define _CRYPTO_UX500_H
diff --git a/include/linux/platform_data/cyttsp4.h b/include/linux/platform_data/cyttsp4.h
index 6eba54aff1dc..5dc9d2be384b 100644
--- a/include/linux/platform_data/cyttsp4.h
+++ b/include/linux/platform_data/cyttsp4.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Header file for:
* Cypress TrueTouch(TM) Standard Product (TTSP) touchscreen drivers.
@@ -9,22 +10,7 @@
* Copyright (C) 2009, 2010, 2011 Cypress Semiconductor, Inc.
* Copyright (C) 2012 Javier Martinez Canillas <javier@dowhile0.org>
*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2, and only version 2, as published by the
- * Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
- *
* Contact Cypress Semiconductor at www.cypress.com (kev@cypress.com)
- *
*/
#ifndef _CYTTSP4_H_
#define _CYTTSP4_H_
diff --git a/include/linux/platform_data/davinci-cpufreq.h b/include/linux/platform_data/davinci-cpufreq.h
new file mode 100644
index 000000000000..bc208c64e3d7
--- /dev/null
+++ b/include/linux/platform_data/davinci-cpufreq.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * TI DaVinci CPUFreq platform support.
+ *
+ * Copyright (C) 2009 Texas Instruments, Inc. https://www.ti.com/
+ */
+
+#ifndef _MACH_DAVINCI_CPUFREQ_H
+#define _MACH_DAVINCI_CPUFREQ_H
+
+#include <linux/cpufreq.h>
+
+struct davinci_cpufreq_config {
+ struct cpufreq_frequency_table *freq_table;
+ int (*set_voltage)(unsigned int index);
+ int (*init)(void);
+};
+
+#endif /* _MACH_DAVINCI_CPUFREQ_H */
diff --git a/include/linux/platform_data/davinci_asp.h b/include/linux/platform_data/davinci_asp.h
index 85ad68f9206a..c8645b2ed3c0 100644
--- a/include/linux/platform_data/davinci_asp.h
+++ b/include/linux/platform_data/davinci_asp.h
@@ -1,16 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* TI DaVinci Audio Serial Port support
*
- * Copyright (C) 2012 Texas Instruments Incorporated - http://www.ti.com/
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
+ * Copyright (C) 2012 Texas Instruments Incorporated - https://www.ti.com/
*/
#ifndef __DAVINCI_ASP_H
@@ -79,6 +71,7 @@ struct davinci_mcasp_pdata {
/* McASP specific fields */
int tdm_slots;
u8 op_mode;
+ u8 dismod;
u8 num_serializer;
u8 *serial_dir;
u8 version;
@@ -95,6 +88,7 @@ enum {
MCASP_VERSION_2, /* DA8xx/OMAPL1x */
MCASP_VERSION_3, /* TI81xx/AM33xx */
MCASP_VERSION_4, /* DRA7xxx */
+ MCASP_VERSION_OMAP, /* OMAP4/5 */
};
enum mcbsp_clk_input_pin {
diff --git a/include/linux/platform_data/db8500_thermal.h b/include/linux/platform_data/db8500_thermal.h
deleted file mode 100644
index 3bf60902e902..000000000000
--- a/include/linux/platform_data/db8500_thermal.h
+++ /dev/null
@@ -1,38 +0,0 @@
-/*
- * db8500_thermal.h - DB8500 Thermal Management Implementation
- *
- * Copyright (C) 2012 ST-Ericsson
- * Copyright (C) 2012 Linaro Ltd.
- *
- * Author: Hongbo Zhang <hongbo.zhang@linaro.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#ifndef _DB8500_THERMAL_H_
-#define _DB8500_THERMAL_H_
-
-#include <linux/thermal.h>
-
-#define COOLING_DEV_MAX 8
-
-struct db8500_trip_point {
- unsigned long temp;
- enum thermal_trip_type type;
- char cdev_name[COOLING_DEV_MAX][THERMAL_NAME_LENGTH];
-};
-
-struct db8500_thsens_platform_data {
- struct db8500_trip_point trip_points[THERMAL_MAX_TRIPS];
- int num_trips;
-};
-
-#endif /* _DB8500_THERMAL_H_ */
diff --git a/include/linux/platform_data/dma-atmel.h b/include/linux/platform_data/dma-atmel.h
deleted file mode 100644
index e95f19c65873..000000000000
--- a/include/linux/platform_data/dma-atmel.h
+++ /dev/null
@@ -1,65 +0,0 @@
-/*
- * Header file for the Atmel AHB DMA Controller driver
- *
- * Copyright (C) 2008 Atmel Corporation
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
-#ifndef AT_HDMAC_H
-#define AT_HDMAC_H
-
-#include <linux/dmaengine.h>
-
-/**
- * struct at_dma_platform_data - Controller configuration parameters
- * @nr_channels: Number of channels supported by hardware (max 8)
- * @cap_mask: dma_capability flags supported by the platform
- */
-struct at_dma_platform_data {
- unsigned int nr_channels;
- dma_cap_mask_t cap_mask;
-};
-
-/**
- * struct at_dma_slave - Controller-specific information about a slave
- * @dma_dev: required DMA master device
- * @cfg: Platform-specific initializer for the CFG register
- */
-struct at_dma_slave {
- struct device *dma_dev;
- u32 cfg;
-};
-
-
-/* Platform-configurable bits in CFG */
-#define ATC_PER_MSB(h) ((0x30U & (h)) >> 4) /* Extract most significant bits of a handshaking identifier */
-
-#define ATC_SRC_PER(h) (0xFU & (h)) /* Channel src rq associated with periph handshaking ifc h */
-#define ATC_DST_PER(h) ((0xFU & (h)) << 4) /* Channel dst rq associated with periph handshaking ifc h */
-#define ATC_SRC_REP (0x1 << 8) /* Source Replay Mod */
-#define ATC_SRC_H2SEL (0x1 << 9) /* Source Handshaking Mod */
-#define ATC_SRC_H2SEL_SW (0x0 << 9)
-#define ATC_SRC_H2SEL_HW (0x1 << 9)
-#define ATC_SRC_PER_MSB(h) (ATC_PER_MSB(h) << 10) /* Channel src rq (most significant bits) */
-#define ATC_DST_REP (0x1 << 12) /* Destination Replay Mod */
-#define ATC_DST_H2SEL (0x1 << 13) /* Destination Handshaking Mod */
-#define ATC_DST_H2SEL_SW (0x0 << 13)
-#define ATC_DST_H2SEL_HW (0x1 << 13)
-#define ATC_DST_PER_MSB(h) (ATC_PER_MSB(h) << 14) /* Channel dst rq (most significant bits) */
-#define ATC_SOD (0x1 << 16) /* Stop On Done */
-#define ATC_LOCK_IF (0x1 << 20) /* Interface Lock */
-#define ATC_LOCK_B (0x1 << 21) /* AHB Bus Lock */
-#define ATC_LOCK_IF_L (0x1 << 22) /* Master Interface Arbiter Lock */
-#define ATC_LOCK_IF_L_CHUNK (0x0 << 22)
-#define ATC_LOCK_IF_L_BUFFER (0x1 << 22)
-#define ATC_AHB_PROT_MASK (0x7 << 24) /* AHB Protection */
-#define ATC_FIFOCFG_MASK (0x3 << 28) /* FIFO Request Configuration */
-#define ATC_FIFOCFG_LARGESTBURST (0x0 << 28)
-#define ATC_FIFOCFG_HALFFIFO (0x1 << 28)
-#define ATC_FIFOCFG_ENOUGHSPACE (0x2 << 28)
-
-
-#endif /* AT_HDMAC_H */
diff --git a/include/linux/platform_data/dma-coh901318.h b/include/linux/platform_data/dma-coh901318.h
deleted file mode 100644
index c4cb9590d115..000000000000
--- a/include/linux/platform_data/dma-coh901318.h
+++ /dev/null
@@ -1,72 +0,0 @@
-/*
- * Platform data for the COH901318 DMA controller
- * Copyright (C) 2007-2013 ST-Ericsson
- * License terms: GNU General Public License (GPL) version 2
- */
-
-#ifndef PLAT_COH901318_H
-#define PLAT_COH901318_H
-
-#ifdef CONFIG_COH901318
-
-/* We only support the U300 DMA channels */
-#define U300_DMA_MSL_TX_0 0
-#define U300_DMA_MSL_TX_1 1
-#define U300_DMA_MSL_TX_2 2
-#define U300_DMA_MSL_TX_3 3
-#define U300_DMA_MSL_TX_4 4
-#define U300_DMA_MSL_TX_5 5
-#define U300_DMA_MSL_TX_6 6
-#define U300_DMA_MSL_RX_0 7
-#define U300_DMA_MSL_RX_1 8
-#define U300_DMA_MSL_RX_2 9
-#define U300_DMA_MSL_RX_3 10
-#define U300_DMA_MSL_RX_4 11
-#define U300_DMA_MSL_RX_5 12
-#define U300_DMA_MSL_RX_6 13
-#define U300_DMA_MMCSD_RX_TX 14
-#define U300_DMA_MSPRO_TX 15
-#define U300_DMA_MSPRO_RX 16
-#define U300_DMA_UART0_TX 17
-#define U300_DMA_UART0_RX 18
-#define U300_DMA_APEX_TX 19
-#define U300_DMA_APEX_RX 20
-#define U300_DMA_PCM_I2S0_TX 21
-#define U300_DMA_PCM_I2S0_RX 22
-#define U300_DMA_PCM_I2S1_TX 23
-#define U300_DMA_PCM_I2S1_RX 24
-#define U300_DMA_XGAM_CDI 25
-#define U300_DMA_XGAM_PDI 26
-#define U300_DMA_SPI_TX 27
-#define U300_DMA_SPI_RX 28
-#define U300_DMA_GENERAL_PURPOSE_0 29
-#define U300_DMA_GENERAL_PURPOSE_1 30
-#define U300_DMA_GENERAL_PURPOSE_2 31
-#define U300_DMA_GENERAL_PURPOSE_3 32
-#define U300_DMA_GENERAL_PURPOSE_4 33
-#define U300_DMA_GENERAL_PURPOSE_5 34
-#define U300_DMA_GENERAL_PURPOSE_6 35
-#define U300_DMA_GENERAL_PURPOSE_7 36
-#define U300_DMA_GENERAL_PURPOSE_8 37
-#define U300_DMA_UART1_TX 38
-#define U300_DMA_UART1_RX 39
-
-#define U300_DMA_DEVICE_CHANNELS 32
-#define U300_DMA_CHANNELS 40
-
-/**
- * coh901318_filter_id() - DMA channel filter function
- * @chan: dma channel handle
- * @chan_id: id of dma channel to be filter out
- *
- * In dma_request_channel() it specifies what channel id to be requested
- */
-bool coh901318_filter_id(struct dma_chan *chan, void *chan_id);
-#else
-static inline bool coh901318_filter_id(struct dma_chan *chan, void *chan_id)
-{
- return false;
-}
-#endif
-
-#endif /* PLAT_COH901318_H */
diff --git a/include/linux/platform_data/dma-dw.h b/include/linux/platform_data/dma-dw.h
index 896cb71a382c..860ba4bc5ead 100644
--- a/include/linux/platform_data/dma-dw.h
+++ b/include/linux/platform_data/dma-dw.h
@@ -1,20 +1,22 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Driver for the Synopsys DesignWare DMA Controller
*
* Copyright (C) 2007 Atmel Corporation
* Copyright (C) 2010-2011 ST Microelectronics
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef _PLATFORM_DATA_DMA_DW_H
#define _PLATFORM_DATA_DMA_DW_H
-#include <linux/device.h>
+#include <linux/bits.h>
+#include <linux/types.h>
#define DW_DMA_MAX_NR_MASTERS 4
#define DW_DMA_MAX_NR_CHANNELS 8
+#define DW_DMA_MIN_BURST 1
+#define DW_DMA_MAX_BURST 256
+
+struct device;
/**
* struct dw_dma_slave - Controller-specific information about a slave
@@ -24,6 +26,7 @@
* @dst_id: dst request line
* @m_master: memory master for transfers on allocated channel
* @p_master: peripheral master for transfers on allocated channel
+ * @channels: mask of the channels permitted for allocation (zero value means any)
* @hs_polarity:set active low polarity of handshake interface
*/
struct dw_dma_slave {
@@ -32,39 +35,45 @@ struct dw_dma_slave {
u8 dst_id;
u8 m_master;
u8 p_master;
+ u8 channels;
bool hs_polarity;
};
/**
* struct dw_dma_platform_data - Controller configuration parameters
+ * @nr_masters: Number of AHB masters supported by the controller
* @nr_channels: Number of channels supported by hardware (max 8)
- * @is_private: The device channels should be marked as private and not for
- * by the general purpose DMA channel allocator.
- * @is_memcpy: The device channels do support memory-to-memory transfers.
- * @is_idma32: The type of the DMA controller is iDMA32
* @chan_allocation_order: Allocate channels starting from 0 or 7
* @chan_priority: Set channel priority increasing from 0 to 7 or 7 to 0.
* @block_size: Maximum block size supported by the controller
- * @nr_masters: Number of AHB masters supported by the controller
* @data_width: Maximum data width supported by hardware per AHB master
* (in bytes, power of 2)
* @multi_block: Multi block transfers supported by hardware per channel.
+ * @max_burst: Maximum value of burst transaction size supported by hardware
+ * per channel (in units of CTL.SRC_TR_WIDTH/CTL.DST_TR_WIDTH).
+ * @protctl: Protection control signals setting per channel.
+ * @quirks: Optional platform quirks.
*/
struct dw_dma_platform_data {
- unsigned int nr_channels;
- bool is_private;
- bool is_memcpy;
- bool is_idma32;
+ u32 nr_masters;
+ u32 nr_channels;
#define CHAN_ALLOCATION_ASCENDING 0 /* zero to seven */
#define CHAN_ALLOCATION_DESCENDING 1 /* seven to zero */
- unsigned char chan_allocation_order;
+ u32 chan_allocation_order;
#define CHAN_PRIORITY_ASCENDING 0 /* chan0 highest */
#define CHAN_PRIORITY_DESCENDING 1 /* chan7 highest */
- unsigned char chan_priority;
- unsigned int block_size;
- unsigned char nr_masters;
- unsigned char data_width[DW_DMA_MAX_NR_MASTERS];
- unsigned char multi_block[DW_DMA_MAX_NR_CHANNELS];
+ u32 chan_priority;
+ u32 block_size;
+ u32 data_width[DW_DMA_MAX_NR_MASTERS];
+ u32 multi_block[DW_DMA_MAX_NR_CHANNELS];
+ u32 max_burst[DW_DMA_MAX_NR_CHANNELS];
+#define CHAN_PROTCTL_PRIVILEGED BIT(0)
+#define CHAN_PROTCTL_BUFFERABLE BIT(1)
+#define CHAN_PROTCTL_CACHEABLE BIT(2)
+#define CHAN_PROTCTL_MASK GENMASK(2, 0)
+ u32 protctl;
+#define DW_DMA_QUIRK_XBAR_PRESENT BIT(0)
+ u32 quirks;
};
#endif /* _PLATFORM_DATA_DMA_DW_H */
diff --git a/include/linux/platform_data/dma-ep93xx.h b/include/linux/platform_data/dma-ep93xx.h
index e82c642fa53c..eb9805bb3fe8 100644
--- a/include/linux/platform_data/dma-ep93xx.h
+++ b/include/linux/platform_data/dma-ep93xx.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __ASM_ARCH_DMA_H
#define __ASM_ARCH_DMA_H
@@ -84,7 +85,7 @@ static inline enum dma_transfer_direction
ep93xx_dma_chan_direction(struct dma_chan *chan)
{
if (!ep93xx_dma_chan_is_m2p(chan))
- return DMA_NONE;
+ return DMA_TRANS_NONE;
/* even channels are for TX, odd for RX */
return (chan->chan_id % 2 == 0) ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM;
diff --git a/include/linux/platform_data/dma-hsu.h b/include/linux/platform_data/dma-hsu.h
index 3453fa655502..611bae193c1c 100644
--- a/include/linux/platform_data/dma-hsu.h
+++ b/include/linux/platform_data/dma-hsu.h
@@ -1,17 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Driver for the High Speed UART DMA
*
* Copyright (C) 2015 Intel Corporation
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef _PLATFORM_DATA_DMA_HSU_H
#define _PLATFORM_DATA_DMA_HSU_H
-#include <linux/device.h>
+struct device;
struct hsu_dma_slave {
struct device *dma_dev;
diff --git a/include/linux/platform_data/dma-imx-sdma.h b/include/linux/platform_data/dma-imx-sdma.h
deleted file mode 100644
index 2d08816720f6..000000000000
--- a/include/linux/platform_data/dma-imx-sdma.h
+++ /dev/null
@@ -1,67 +0,0 @@
-#ifndef __MACH_MXC_SDMA_H__
-#define __MACH_MXC_SDMA_H__
-
-/**
- * struct sdma_script_start_addrs - SDMA script start pointers
- *
- * start addresses of the different functions in the physical
- * address space of the SDMA engine.
- */
-struct sdma_script_start_addrs {
- s32 ap_2_ap_addr;
- s32 ap_2_bp_addr;
- s32 ap_2_ap_fixed_addr;
- s32 bp_2_ap_addr;
- s32 loopback_on_dsp_side_addr;
- s32 mcu_interrupt_only_addr;
- s32 firi_2_per_addr;
- s32 firi_2_mcu_addr;
- s32 per_2_firi_addr;
- s32 mcu_2_firi_addr;
- s32 uart_2_per_addr;
- s32 uart_2_mcu_addr;
- s32 per_2_app_addr;
- s32 mcu_2_app_addr;
- s32 per_2_per_addr;
- s32 uartsh_2_per_addr;
- s32 uartsh_2_mcu_addr;
- s32 per_2_shp_addr;
- s32 mcu_2_shp_addr;
- s32 ata_2_mcu_addr;
- s32 mcu_2_ata_addr;
- s32 app_2_per_addr;
- s32 app_2_mcu_addr;
- s32 shp_2_per_addr;
- s32 shp_2_mcu_addr;
- s32 mshc_2_mcu_addr;
- s32 mcu_2_mshc_addr;
- s32 spdif_2_mcu_addr;
- s32 mcu_2_spdif_addr;
- s32 asrc_2_mcu_addr;
- s32 ext_mem_2_ipu_addr;
- s32 descrambler_addr;
- s32 dptc_dvfs_addr;
- s32 utra_addr;
- s32 ram_code_start_addr;
- /* End of v1 array */
- s32 mcu_2_ssish_addr;
- s32 ssish_2_mcu_addr;
- s32 hdmi_dma_addr;
- /* End of v2 array */
- s32 zcanfd_2_mcu_addr;
- s32 zqspi_2_mcu_addr;
- /* End of v3 array */
-};
-
-/**
- * struct sdma_platform_data - platform specific data for SDMA engine
- *
- * @fw_name The firmware name
- * @script_addrs SDMA scripts addresses in SDMA ROM
- */
-struct sdma_platform_data {
- char *fw_name;
- struct sdma_script_start_addrs *script_addrs;
-};
-
-#endif /* __MACH_MXC_SDMA_H__ */
diff --git a/include/linux/platform_data/dma-iop32x.h b/include/linux/platform_data/dma-iop32x.h
new file mode 100644
index 000000000000..ac83cff89549
--- /dev/null
+++ b/include/linux/platform_data/dma-iop32x.h
@@ -0,0 +1,110 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright © 2006, Intel Corporation.
+ */
+#ifndef IOP_ADMA_H
+#define IOP_ADMA_H
+#include <linux/types.h>
+#include <linux/dmaengine.h>
+#include <linux/interrupt.h>
+
+#define IOP_ADMA_SLOT_SIZE 32
+#define IOP_ADMA_THRESHOLD 4
+#ifdef DEBUG
+#define IOP_PARANOIA 1
+#else
+#define IOP_PARANOIA 0
+#endif
+#define iop_paranoia(x) BUG_ON(IOP_PARANOIA && (x))
+
+#define DMA0_ID 0
+#define DMA1_ID 1
+#define AAU_ID 2
+
+/**
+ * struct iop_adma_device - internal representation of an ADMA device
+ * @pdev: Platform device
+ * @id: HW ADMA Device selector
+ * @dma_desc_pool: base of DMA descriptor region (DMA address)
+ * @dma_desc_pool_virt: base of DMA descriptor region (CPU address)
+ * @common: embedded struct dma_device
+ */
+struct iop_adma_device {
+ struct platform_device *pdev;
+ int id;
+ dma_addr_t dma_desc_pool;
+ void *dma_desc_pool_virt;
+ struct dma_device common;
+};
+
+/**
+ * struct iop_adma_chan - internal representation of an ADMA device
+ * @pending: allows batching of hardware operations
+ * @lock: serializes enqueue/dequeue operations to the slot pool
+ * @mmr_base: memory mapped register base
+ * @chain: device chain view of the descriptors
+ * @device: parent device
+ * @common: common dmaengine channel object members
+ * @last_used: place holder for allocation to continue from where it left off
+ * @all_slots: complete domain of slots usable by the channel
+ * @slots_allocated: records the actual size of the descriptor slot pool
+ * @irq_tasklet: bottom half where iop_adma_slot_cleanup runs
+ */
+struct iop_adma_chan {
+ int pending;
+ spinlock_t lock; /* protects the descriptor slot pool */
+ void __iomem *mmr_base;
+ struct list_head chain;
+ struct iop_adma_device *device;
+ struct dma_chan common;
+ struct iop_adma_desc_slot *last_used;
+ struct list_head all_slots;
+ int slots_allocated;
+ struct tasklet_struct irq_tasklet;
+};
+
+/**
+ * struct iop_adma_desc_slot - IOP-ADMA software descriptor
+ * @slot_node: node on the iop_adma_chan.all_slots list
+ * @chain_node: node on the op_adma_chan.chain list
+ * @hw_desc: virtual address of the hardware descriptor chain
+ * @phys: hardware address of the hardware descriptor chain
+ * @group_head: first operation in a transaction
+ * @slot_cnt: total slots used in an transaction (group of operations)
+ * @slots_per_op: number of slots per operation
+ * @idx: pool index
+ * @tx_list: list of descriptors that are associated with one operation
+ * @async_tx: support for the async_tx api
+ * @group_list: list of slots that make up a multi-descriptor transaction
+ * for example transfer lengths larger than the supported hw max
+ * @xor_check_result: result of zero sum
+ * @crc32_result: result crc calculation
+ */
+struct iop_adma_desc_slot {
+ struct list_head slot_node;
+ struct list_head chain_node;
+ void *hw_desc;
+ struct iop_adma_desc_slot *group_head;
+ u16 slot_cnt;
+ u16 slots_per_op;
+ u16 idx;
+ struct list_head tx_list;
+ struct dma_async_tx_descriptor async_tx;
+ union {
+ u32 *xor_check_result;
+ u32 *crc32_result;
+ u32 *pq_check_result;
+ };
+};
+
+struct iop_adma_platform_data {
+ int hw_id;
+ dma_cap_mask_t cap_mask;
+ size_t pool_size;
+};
+
+#define to_iop_sw_desc(addr_hw_desc) \
+ container_of(addr_hw_desc, struct iop_adma_desc_slot, hw_desc)
+#define iop_hw_desc_slot_idx(hw_desc, idx) \
+ ( (void *) (((unsigned long) hw_desc) + ((idx) << 5)) )
+#endif
diff --git a/include/linux/platform_data/dma-mcf-edma.h b/include/linux/platform_data/dma-mcf-edma.h
new file mode 100644
index 000000000000..d718ccfa3421
--- /dev/null
+++ b/include/linux/platform_data/dma-mcf-edma.h
@@ -0,0 +1,38 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Freescale eDMA platform data, ColdFire SoC's family.
+ *
+ * Copyright (c) 2017 Angelo Dureghello <angelo@sysam.it>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __LINUX_PLATFORM_DATA_MCF_EDMA_H__
+#define __LINUX_PLATFORM_DATA_MCF_EDMA_H__
+
+struct dma_slave_map;
+
+bool mcf_edma_filter_fn(struct dma_chan *chan, void *param);
+
+#define MCF_EDMA_FILTER_PARAM(ch) ((void *)ch)
+
+/**
+ * struct mcf_edma_platform_data - platform specific data for eDMA engine
+ *
+ * @ver The eDMA module version.
+ * @dma_channels The number of eDMA channels.
+ */
+struct mcf_edma_platform_data {
+ int dma_channels;
+ const struct dma_slave_map *slave_map;
+ int slavecnt;
+};
+
+#endif /* __LINUX_PLATFORM_DATA_MCF_EDMA_H__ */
diff --git a/include/linux/platform_data/dma-mmp_tdma.h b/include/linux/platform_data/dma-mmp_tdma.h
deleted file mode 100644
index 422d4504dbac..000000000000
--- a/include/linux/platform_data/dma-mmp_tdma.h
+++ /dev/null
@@ -1,40 +0,0 @@
-/*
- * SRAM Memory Management
- *
- * Copyright (c) 2011 Marvell Semiconductors Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- */
-
-#ifndef __DMA_MMP_TDMA_H
-#define __DMA_MMP_TDMA_H
-
-#include <linux/genalloc.h>
-
-/* ARBITRARY: SRAM allocations are multiples of this 2^N size */
-#define SRAM_GRANULARITY 512
-
-enum sram_type {
- MMP_SRAM_UNDEFINED = 0,
- MMP_ASRAM,
- MMP_ISRAM,
-};
-
-struct sram_platdata {
- char *pool_name;
- int granularity;
-};
-
-#ifdef CONFIG_MMP_SRAM
-extern struct gen_pool *sram_get_gpool(char *pool_name);
-#else
-static inline struct gen_pool *sram_get_gpool(char *pool_name)
-{
- return NULL;
-}
-#endif
-
-#endif /* __DMA_MMP_TDMA_H */
diff --git a/include/linux/platform_data/dma-mv_xor.h b/include/linux/platform_data/dma-mv_xor.h
index 92ffd3245f76..6867a7ea329b 100644
--- a/include/linux/platform_data/dma-mv_xor.h
+++ b/include/linux/platform_data/dma-mv_xor.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Marvell XOR platform device data definition file.
*/
diff --git a/include/linux/platform_data/dma-s3c24xx.h b/include/linux/platform_data/dma-s3c24xx.h
deleted file mode 100644
index 4f9aba405e96..000000000000
--- a/include/linux/platform_data/dma-s3c24xx.h
+++ /dev/null
@@ -1,52 +0,0 @@
-/*
- * S3C24XX DMA handling
- *
- * Copyright (c) 2013 Heiko Stuebner <heiko@sntech.de>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the Free
- * Software Foundation; either version 2 of the License, or (at your option)
- * any later version.
- */
-
-/* Helper to encode the source selection constraints for early s3c socs. */
-#define S3C24XX_DMA_CHANREQ(src, chan) ((BIT(3) | src) << chan * 4)
-
-enum s3c24xx_dma_bus {
- S3C24XX_DMA_APB,
- S3C24XX_DMA_AHB,
-};
-
-/**
- * @bus: on which bus does the peripheral reside - AHB or APB.
- * @handshake: is a handshake with the peripheral necessary
- * @chansel: channel selection information, depending on variant; reqsel for
- * s3c2443 and later and channel-selection map for earlier SoCs
- * see CHANSEL doc in s3c2443-dma.c
- */
-struct s3c24xx_dma_channel {
- enum s3c24xx_dma_bus bus;
- bool handshake;
- u16 chansel;
-};
-
-struct dma_slave_map;
-
-/**
- * struct s3c24xx_dma_platdata - platform specific settings
- * @num_phy_channels: number of physical channels
- * @channels: array of virtual channel descriptions
- * @num_channels: number of virtual channels
- * @slave_map: dma slave map matching table
- * @slavecnt: number of elements in slave_map
- */
-struct s3c24xx_dma_platdata {
- int num_phy_channels;
- struct s3c24xx_dma_channel *channels;
- int num_channels;
- const struct dma_slave_map *slave_map;
- int slavecnt;
-};
-
-struct dma_chan;
-bool s3c24xx_dma_filter(struct dma_chan *chan, void *param);
diff --git a/include/linux/platform_data/dma-ste-dma40.h b/include/linux/platform_data/dma-ste-dma40.h
index 1bb9b1852256..10641633facc 100644
--- a/include/linux/platform_data/dma-ste-dma40.h
+++ b/include/linux/platform_data/dma-ste-dma40.h
@@ -1,8 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) ST-Ericsson SA 2007-2010
* Author: Per Forlin <per.forlin@stericsson.com> for ST-Ericsson
* Author: Jonas Aaberg <jonas.aberg@stericsson.com> for ST-Ericsson
- * License terms: GNU General Public License (GPL) version 2
*/
diff --git a/include/linux/platform_data/dmtimer-omap.h b/include/linux/platform_data/dmtimer-omap.h
index a19b78d826e9..95d852aef130 100644
--- a/include/linux/platform_data/dmtimer-omap.h
+++ b/include/linux/platform_data/dmtimer-omap.h
@@ -1,31 +1,58 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* DMTIMER platform data for TI OMAP platforms
*
* Copyright (C) 2012 Texas Instruments
* Author: Jon Hunter <jon-hunter@ti.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef __PLATFORM_DATA_DMTIMER_OMAP_H__
#define __PLATFORM_DATA_DMTIMER_OMAP_H__
+struct omap_dm_timer_ops {
+ struct omap_dm_timer *(*request_by_node)(struct device_node *np);
+ struct omap_dm_timer *(*request_specific)(int timer_id);
+ struct omap_dm_timer *(*request)(void);
+
+ int (*free)(struct omap_dm_timer *timer);
+
+ void (*enable)(struct omap_dm_timer *timer);
+ void (*disable)(struct omap_dm_timer *timer);
+
+ int (*get_irq)(struct omap_dm_timer *timer);
+ int (*set_int_enable)(struct omap_dm_timer *timer,
+ unsigned int value);
+ int (*set_int_disable)(struct omap_dm_timer *timer, u32 mask);
+
+ struct clk *(*get_fclk)(struct omap_dm_timer *timer);
+
+ int (*start)(struct omap_dm_timer *timer);
+ int (*stop)(struct omap_dm_timer *timer);
+ int (*set_source)(struct omap_dm_timer *timer, int source);
+
+ int (*set_load)(struct omap_dm_timer *timer, unsigned int value);
+ int (*set_match)(struct omap_dm_timer *timer, int enable,
+ unsigned int match);
+ int (*set_pwm)(struct omap_dm_timer *timer, int def_on,
+ int toggle, int trigger, int autoreload);
+ int (*get_pwm_status)(struct omap_dm_timer *timer);
+ int (*set_prescaler)(struct omap_dm_timer *timer, int prescaler);
+
+ unsigned int (*read_counter)(struct omap_dm_timer *timer);
+ int (*write_counter)(struct omap_dm_timer *timer,
+ unsigned int value);
+ unsigned int (*read_status)(struct omap_dm_timer *timer);
+ int (*write_status)(struct omap_dm_timer *timer,
+ unsigned int value);
+};
+
struct dmtimer_platform_data {
/* set_timer_src - Only used for OMAP1 devices */
int (*set_timer_src)(struct platform_device *pdev, int source);
u32 timer_capability;
u32 timer_errata;
int (*get_context_loss_count)(struct device *);
+ const struct omap_dm_timer_ops *timer_ops;
};
#endif /* __PLATFORM_DATA_DMTIMER_OMAP_H__ */
diff --git a/include/linux/platform_data/ds620.h b/include/linux/platform_data/ds620.h
index 736bb87ac0fc..f0ce22a78bb8 100644
--- a/include/linux/platform_data/ds620.h
+++ b/include/linux/platform_data/ds620.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_DS620_H
#define _LINUX_DS620_H
@@ -13,7 +14,7 @@ struct ds620_platform_data {
* 1 = PO_LOW
* 2 = PO_HIGH
*
- * (see Documentation/hwmon/ds620)
+ * (see Documentation/hwmon/ds620.rst)
*/
int pomode;
};
diff --git a/include/linux/platform_data/dsa.h b/include/linux/platform_data/dsa.h
new file mode 100644
index 000000000000..d4d9bf2060a6
--- /dev/null
+++ b/include/linux/platform_data/dsa.h
@@ -0,0 +1,68 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __DSA_PDATA_H
+#define __DSA_PDATA_H
+
+struct device;
+struct net_device;
+
+#define DSA_MAX_SWITCHES 4
+#define DSA_MAX_PORTS 12
+#define DSA_RTABLE_NONE -1
+
+struct dsa_chip_data {
+ /*
+ * How to access the switch configuration registers.
+ */
+ struct device *host_dev;
+ int sw_addr;
+
+ /*
+ * Reference to network devices
+ */
+ struct device *netdev[DSA_MAX_PORTS];
+
+ /* set to size of eeprom if supported by the switch */
+ int eeprom_len;
+
+ /* Device tree node pointer for this specific switch chip
+ * used during switch setup in case additional properties
+ * and resources needs to be used
+ */
+ struct device_node *of_node;
+
+ /*
+ * The names of the switch's ports. Use "cpu" to
+ * designate the switch port that the cpu is connected to,
+ * "dsa" to indicate that this port is a DSA link to
+ * another switch, NULL to indicate the port is unused,
+ * or any other string to indicate this is a physical port.
+ */
+ char *port_names[DSA_MAX_PORTS];
+ struct device_node *port_dn[DSA_MAX_PORTS];
+
+ /*
+ * An array of which element [a] indicates which port on this
+ * switch should be used to send packets to that are destined
+ * for switch a. Can be NULL if there is only one switch chip.
+ */
+ s8 rtable[DSA_MAX_SWITCHES];
+};
+
+struct dsa_platform_data {
+ /*
+ * Reference to a Linux network interface that connects
+ * to the root switch chip of the tree.
+ */
+ struct device *netdev;
+ struct net_device *of_netdev;
+
+ /*
+ * Info structs describing each of the switch chips
+ * connected via this network interface.
+ */
+ int nr_chips;
+ struct dsa_chip_data *chip;
+};
+
+
+#endif /* __DSA_PDATA_H */
diff --git a/include/linux/platform_data/dwc3-omap.h b/include/linux/platform_data/dwc3-omap.h
deleted file mode 100644
index 1d36ca874cc8..000000000000
--- a/include/linux/platform_data/dwc3-omap.h
+++ /dev/null
@@ -1,43 +0,0 @@
-/**
- * dwc3-omap.h - OMAP Specific Glue layer, header.
- *
- * Copyright (C) 2010-2011 Texas Instruments Incorporated - http://www.ti.com
- * All rights reserved.
- *
- * Author: Felipe Balbi <balbi@ti.com>
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions, and the following disclaimer,
- * without modification.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. The names of the above-listed copyright holders may not be used
- * to endorse or promote products derived from this software without
- * specific prior written permission.
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2, as published by the Free
- * Software Foundation.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
- * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
- * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
- * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
- * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-enum dwc3_omap_utmi_mode {
- DWC3_OMAP_UTMI_MODE_UNKNOWN = 0,
- DWC3_OMAP_UTMI_MODE_HW,
- DWC3_OMAP_UTMI_MODE_SW,
-};
diff --git a/include/linux/platform_data/edma.h b/include/linux/platform_data/edma.h
index 0a533f94438f..ee13d5ca630d 100644
--- a/include/linux/platform_data/edma.h
+++ b/include/linux/platform_data/edma.h
@@ -1,12 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* TI EDMA definitions
*
* Copyright (C) 2006-2013 Texas Instruments.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
*/
/*
diff --git a/include/linux/platform_data/efm32-spi.h b/include/linux/platform_data/efm32-spi.h
deleted file mode 100644
index 31b19ca1d73a..000000000000
--- a/include/linux/platform_data/efm32-spi.h
+++ /dev/null
@@ -1,14 +0,0 @@
-#ifndef __LINUX_PLATFORM_DATA_EFM32_SPI_H__
-#define __LINUX_PLATFORM_DATA_EFM32_SPI_H__
-
-#include <linux/types.h>
-
-/**
- * struct efm32_spi_pdata
- * @location: pinmux location for the I/O pins (to be written to the ROUTE
- * register)
- */
-struct efm32_spi_pdata {
- u8 location;
-};
-#endif /* ifndef __LINUX_PLATFORM_DATA_EFM32_SPI_H__ */
diff --git a/include/linux/platform_data/efm32-uart.h b/include/linux/platform_data/efm32-uart.h
deleted file mode 100644
index ed0e975b3c54..000000000000
--- a/include/linux/platform_data/efm32-uart.h
+++ /dev/null
@@ -1,18 +0,0 @@
-/*
- *
- *
- */
-#ifndef __LINUX_PLATFORM_DATA_EFM32_UART_H__
-#define __LINUX_PLATFORM_DATA_EFM32_UART_H__
-
-#include <linux/types.h>
-
-/**
- * struct efm32_uart_pdata
- * @location: pinmux location for the I/O pins (to be written to the ROUTE
- * register)
- */
-struct efm32_uart_pdata {
- u8 location;
-};
-#endif /* ifndef __LINUX_PLATFORM_DATA_EFM32_UART_H__ */
diff --git a/include/linux/platform_data/ehci-sh.h b/include/linux/platform_data/ehci-sh.h
deleted file mode 100644
index 5c15a738e116..000000000000
--- a/include/linux/platform_data/ehci-sh.h
+++ /dev/null
@@ -1,28 +0,0 @@
-/*
- * EHCI SuperH driver platform data
- *
- * Copyright (C) 2012 Nobuhiro Iwamatsu <nobuhiro.iwamatsu.yj@renesas.com>
- * Copyright (C) 2012 Renesas Solutions Corp.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- */
-
-#ifndef __USB_EHCI_SH_H
-#define __USB_EHCI_SH_H
-
-struct ehci_sh_platdata {
- void (*phy_init)(void); /* Phy init function */
-};
-
-#endif /* __USB_EHCI_SH_H */
diff --git a/include/linux/platform_data/elm.h b/include/linux/platform_data/elm.h
index b8686c00f15f..3cc78f0447b1 100644
--- a/include/linux/platform_data/elm.h
+++ b/include/linux/platform_data/elm.h
@@ -1,18 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* BCH Error Location Module
*
- * Copyright (C) 2012 Texas Instruments Incorporated - http://www.ti.com/
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
+ * Copyright (C) 2012 Texas Instruments Incorporated - https://www.ti.com/
*/
#ifndef __ELM_H
@@ -60,6 +50,6 @@ static inline int elm_config(struct device *dev, enum bch_ecc bch_type,
{
return -ENOSYS;
}
-#endif /* CONFIG_MTD_NAND_ECC_BCH */
+#endif /* CONFIG_MTD_NAND_OMAP_BCH */
#endif /* __ELM_H */
diff --git a/include/linux/platform_data/emc2305.h b/include/linux/platform_data/emc2305.h
new file mode 100644
index 000000000000..54d672dd6f7d
--- /dev/null
+++ b/include/linux/platform_data/emc2305.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __LINUX_PLATFORM_DATA_EMC2305__
+#define __LINUX_PLATFORM_DATA_EMC2305__
+
+#define EMC2305_PWM_MAX 5
+
+/**
+ * struct emc2305_platform_data - EMC2305 driver platform data
+ * @max_state: maximum cooling state of the cooling device;
+ * @pwm_num: number of active channels;
+ * @pwm_separate: separate PWM settings for every channel;
+ * @pwm_min: array of minimum PWM per channel;
+ */
+struct emc2305_platform_data {
+ u8 max_state;
+ u8 pwm_num;
+ bool pwm_separate;
+ u8 pwm_min[EMC2305_PWM_MAX];
+};
+
+#endif
diff --git a/include/linux/platform_data/emif_plat.h b/include/linux/platform_data/emif_plat.h
index 5c19a2a647c4..b93feef5d586 100644
--- a/include/linux/platform_data/emif_plat.h
+++ b/include/linux/platform_data/emif_plat.h
@@ -1,13 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Definitions for TI EMIF device platform data
*
* Copyright (C) 2012 Texas Instruments, Inc.
*
* Aneesh V <aneesh@ti.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef __EMIF_PLAT_H
#define __EMIF_PLAT_H
diff --git a/include/linux/platform_data/eth-ep93xx.h b/include/linux/platform_data/eth-ep93xx.h
new file mode 100644
index 000000000000..8eef637a804d
--- /dev/null
+++ b/include/linux/platform_data/eth-ep93xx.h
@@ -0,0 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_PLATFORM_DATA_ETH_EP93XX
+#define _LINUX_PLATFORM_DATA_ETH_EP93XX
+
+struct ep93xx_eth_data {
+ unsigned char dev_addr[6];
+ unsigned char phy_id;
+};
+
+#endif
diff --git a/include/linux/platform_data/eth-netx.h b/include/linux/platform_data/eth-netx.h
deleted file mode 100644
index a395159725d5..000000000000
--- a/include/linux/platform_data/eth-netx.h
+++ /dev/null
@@ -1,25 +0,0 @@
-/*
- * Copyright (c) 2005 Sascha Hauer <s.hauer@pengutronix.de>, Pengutronix
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-
-#ifndef __ETH_NETX_H
-#define __ETH_NETX_H
-
-struct netxeth_platform_data {
- unsigned int xcno; /* number of xmac/xpec engine this eth uses */
-};
-
-#endif
diff --git a/include/linux/platform_data/fsa9480.h b/include/linux/platform_data/fsa9480.h
deleted file mode 100644
index 72dddcb4bed1..000000000000
--- a/include/linux/platform_data/fsa9480.h
+++ /dev/null
@@ -1,27 +0,0 @@
-/*
- * Copyright (C) 2010 Samsung Electronics
- * Minkyu Kang <mk7.kang@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#ifndef _FSA9480_H_
-#define _FSA9480_H_
-
-#define FSA9480_ATTACHED 1
-#define FSA9480_DETACHED 0
-
-struct fsa9480_platform_data {
- void (*cfg_gpio) (void);
- void (*usb_cb) (u8 attached);
- void (*uart_cb) (u8 attached);
- void (*charger_cb) (u8 attached);
- void (*jig_cb) (u8 attached);
- void (*reset_cb) (void);
- void (*usb_power) (u8 on);
- int wakeup;
-};
-
-#endif /* _FSA9480_H_ */
diff --git a/include/linux/platform_data/g762.h b/include/linux/platform_data/g762.h
index d3c51283764d..249257ee2132 100644
--- a/include/linux/platform_data/g762.h
+++ b/include/linux/platform_data/g762.h
@@ -1,21 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Platform data structure for g762 fan controller driver
*
* Copyright (C) 2013, Arnaud EBALARD <arno@natisbad.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#ifndef __LINUX_PLATFORM_DATA_G762_H__
#define __LINUX_PLATFORM_DATA_G762_H__
diff --git a/include/linux/platform_data/gpio-ath79.h b/include/linux/platform_data/gpio-ath79.h
index 88b0db7bee74..3ea6dd942c27 100644
--- a/include/linux/platform_data/gpio-ath79.h
+++ b/include/linux/platform_data/gpio-ath79.h
@@ -1,11 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Atheros AR7XXX/AR9XXX GPIO controller platform data
*
* Copyright (C) 2015 Alban Bedel <albeu@free.fr>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef __LINUX_PLATFORM_DATA_GPIO_ATH79_H
diff --git a/include/linux/platform_data/gpio-davinci.h b/include/linux/platform_data/gpio-davinci.h
index 90ae19ca828f..b82e44662efe 100644
--- a/include/linux/platform_data/gpio-davinci.h
+++ b/include/linux/platform_data/gpio-davinci.h
@@ -1,60 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* DaVinci GPIO Platform Related Defines
*
- * Copyright (C) 2013 Texas Instruments Incorporated - http://www.ti.com/
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
+ * Copyright (C) 2013 Texas Instruments Incorporated - https://www.ti.com/
*/
#ifndef __DAVINCI_GPIO_PLATFORM_H
#define __DAVINCI_GPIO_PLATFORM_H
-#include <linux/io.h>
-#include <linux/spinlock.h>
-
-#include <asm-generic/gpio.h>
-
-#define MAX_REGS_BANKS 5
-
struct davinci_gpio_platform_data {
+ bool no_auto_base;
+ u32 base;
u32 ngpio;
u32 gpio_unbanked;
};
-struct davinci_gpio_irq_data {
- void __iomem *regs;
- struct davinci_gpio_controller *chip;
- int bank_num;
-};
-
-struct davinci_gpio_controller {
- struct gpio_chip chip;
- struct irq_domain *irq_domain;
- /* Serialize access to GPIO registers */
- spinlock_t lock;
- void __iomem *regs[MAX_REGS_BANKS];
- int gpio_unbanked;
- unsigned int base_irq;
- unsigned int base;
-};
-
-/*
- * basic gpio routines
- */
-#define GPIO(X) (X) /* 0 <= X <= (DAVINCI_N_GPIO - 1) */
-
/* Convert GPIO signal to GPIO pin number */
#define GPIO_TO_PIN(bank, gpio) (16 * (bank) + (gpio))
-static inline u32 __gpio_mask(unsigned gpio)
-{
- return 1 << (gpio % 32);
-}
#endif
diff --git a/include/linux/platform_data/gpio-dwapb.h b/include/linux/platform_data/gpio-dwapb.h
deleted file mode 100644
index 2dc7f4a8ab09..000000000000
--- a/include/linux/platform_data/gpio-dwapb.h
+++ /dev/null
@@ -1,31 +0,0 @@
-/*
- * Copyright(c) 2014 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- */
-
-#ifndef GPIO_DW_APB_H
-#define GPIO_DW_APB_H
-
-struct dwapb_port_property {
- struct fwnode_handle *fwnode;
- unsigned int idx;
- unsigned int ngpio;
- unsigned int gpio_base;
- unsigned int irq;
- bool irq_shared;
-};
-
-struct dwapb_platform_data {
- struct dwapb_port_property *properties;
- unsigned int nports;
-};
-
-#endif
diff --git a/include/linux/platform_data/gpio-htc-egpio.h b/include/linux/platform_data/gpio-htc-egpio.h
index b4201c971367..eaefba0b6465 100644
--- a/include/linux/platform_data/gpio-htc-egpio.h
+++ b/include/linux/platform_data/gpio-htc-egpio.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* HTC simple EGPIO irq and gpio extender
*/
@@ -5,8 +6,6 @@
#ifndef __HTC_EGPIO_H__
#define __HTC_EGPIO_H__
-#include <linux/gpio.h>
-
/* Descriptive values for all-in or all-out htc_egpio_chip descriptors. */
#define HTC_EGPIO_OUTPUT (~0)
#define HTC_EGPIO_INPUT 0
@@ -51,7 +50,4 @@ struct htc_egpio_platform_data {
int num_chips;
};
-/* Determine the wakeup irq, to be called during early resume */
-extern int htc_egpio_get_wakeup_irq(struct device *dev);
-
#endif
diff --git a/include/linux/platform_data/gpio-omap.h b/include/linux/platform_data/gpio-omap.h
index cb2618147c34..f377817ce75c 100644
--- a/include/linux/platform_data/gpio-omap.h
+++ b/include/linux/platform_data/gpio-omap.h
@@ -1,31 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* OMAP GPIO handling defines and functions
*
* Copyright (C) 2003-2005 Nokia Corporation
*
* Written by Juha Yrjölä <juha.yrjola@nokia.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
*/
#ifndef __ASM_ARCH_OMAP_GPIO_H
#define __ASM_ARCH_OMAP_GPIO_H
+#ifndef __ASSEMBLER__
#include <linux/io.h>
#include <linux/platform_device.h>
+#endif
#define OMAP1_MPUIO_BASE 0xfffb5000
@@ -97,6 +85,7 @@
* omap2+ specific GPIO registers
*/
#define OMAP24XX_GPIO_REVISION 0x0000
+#define OMAP24XX_GPIO_SYSCONFIG 0x0010
#define OMAP24XX_GPIO_IRQSTATUS1 0x0018
#define OMAP24XX_GPIO_IRQSTATUS2 0x0028
#define OMAP24XX_GPIO_IRQENABLE2 0x002c
@@ -120,6 +109,7 @@
#define OMAP24XX_GPIO_SETDATAOUT 0x0094
#define OMAP4_GPIO_REVISION 0x0000
+#define OMAP4_GPIO_SYSCONFIG 0x0010
#define OMAP4_GPIO_EOI 0x0020
#define OMAP4_GPIO_IRQSTATUSRAW0 0x0024
#define OMAP4_GPIO_IRQSTATUSRAW1 0x0028
@@ -157,13 +147,10 @@
#define OMAP_MPUIO(nr) (OMAP_MAX_GPIO_LINES + (nr))
#define OMAP_GPIO_IS_MPUIO(nr) ((nr) >= OMAP_MAX_GPIO_LINES)
-struct omap_gpio_dev_attr {
- int bank_width; /* GPIO bank width */
- bool dbck_flag; /* dbck required or not - True for OMAP3&4 */
-};
-
+#ifndef __ASSEMBLER__
struct omap_gpio_reg_offs {
u16 revision;
+ u16 sysconfig;
u16 direction;
u16 datain;
u16 dataout;
@@ -202,23 +189,12 @@ struct omap_gpio_platform_data {
bool is_mpuio; /* whether the bank is of type MPUIO */
u32 non_wakeup_gpios;
- struct omap_gpio_reg_offs *regs;
+ const struct omap_gpio_reg_offs *regs;
/* Return context loss count due to PM states changing */
int (*get_context_loss_count)(struct device *dev);
};
-#if IS_BUILTIN(CONFIG_GPIO_OMAP)
-extern void omap2_gpio_prepare_for_idle(int off_mode);
-extern void omap2_gpio_resume_after_idle(void);
-#else
-static inline void omap2_gpio_prepare_for_idle(int off_mode)
-{
-}
-
-static inline void omap2_gpio_resume_after_idle(void)
-{
-}
-#endif
+#endif /* __ASSEMBLER__ */
#endif
diff --git a/include/linux/platform_data/gpio-ts5500.h b/include/linux/platform_data/gpio-ts5500.h
deleted file mode 100644
index b10d11c9bb49..000000000000
--- a/include/linux/platform_data/gpio-ts5500.h
+++ /dev/null
@@ -1,27 +0,0 @@
-/*
- * GPIO (DIO) header for Technologic Systems TS-5500
- *
- * Copyright (c) 2012 Savoir-faire Linux Inc.
- * Vivien Didelot <vivien.didelot@savoirfairelinux.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#ifndef _PDATA_GPIO_TS5500_H
-#define _PDATA_GPIO_TS5500_H
-
-/**
- * struct ts5500_dio_platform_data - TS-5500 pin block configuration
- * @base: The GPIO base number to use.
- * @strap: The only pin connected to an interrupt in a block is input-only.
- * If you need a bidirectional line which can trigger an IRQ, you
- * may strap it with an in/out pin. This flag indicates this case.
- */
-struct ts5500_dio_platform_data {
- int base;
- bool strap;
-};
-
-#endif /* _PDATA_GPIO_TS5500_H */
diff --git a/include/linux/platform_data/gpio/gpio-amd-fch.h b/include/linux/platform_data/gpio/gpio-amd-fch.h
new file mode 100644
index 000000000000..255d51c9d36d
--- /dev/null
+++ b/include/linux/platform_data/gpio/gpio-amd-fch.h
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+
+/*
+ * AMD FCH gpio driver platform-data
+ *
+ * Copyright (C) 2018 metux IT consult
+ * Author: Enrico Weigelt <info@metux.net>
+ *
+ */
+
+#ifndef __LINUX_PLATFORM_DATA_GPIO_AMD_FCH_H
+#define __LINUX_PLATFORM_DATA_GPIO_AMD_FCH_H
+
+#define AMD_FCH_GPIO_DRIVER_NAME "gpio_amd_fch"
+
+/*
+ * gpio register index definitions
+ */
+#define AMD_FCH_GPIO_REG_GPIO49 0x40
+#define AMD_FCH_GPIO_REG_GPIO50 0x41
+#define AMD_FCH_GPIO_REG_GPIO51 0x42
+#define AMD_FCH_GPIO_REG_GPIO55_DEVSLP0 0x43
+#define AMD_FCH_GPIO_REG_GPIO57 0x44
+#define AMD_FCH_GPIO_REG_GPIO58 0x45
+#define AMD_FCH_GPIO_REG_GPIO59_DEVSLP1 0x46
+#define AMD_FCH_GPIO_REG_GPIO64 0x47
+#define AMD_FCH_GPIO_REG_GPIO68 0x48
+#define AMD_FCH_GPIO_REG_GPIO66_SPKR 0x5B
+#define AMD_FCH_GPIO_REG_GPIO71 0x4D
+#define AMD_FCH_GPIO_REG_GPIO32_GE1 0x59
+#define AMD_FCH_GPIO_REG_GPIO33_GE2 0x5A
+#define AMT_FCH_GPIO_REG_GEVT22 0x09
+
+/*
+ * struct amd_fch_gpio_pdata - GPIO chip platform data
+ * @gpio_num: number of entries
+ * @gpio_reg: array of gpio registers
+ * @gpio_names: array of gpio names
+ */
+struct amd_fch_gpio_pdata {
+ int gpio_num;
+ int *gpio_reg;
+ const char * const *gpio_names;
+};
+
+#endif /* __LINUX_PLATFORM_DATA_GPIO_AMD_FCH_H */
diff --git a/include/linux/platform_data/gpio_backlight.h b/include/linux/platform_data/gpio_backlight.h
index 5ae0d9c80d4d..1a8b5b1946fe 100644
--- a/include/linux/platform_data/gpio_backlight.h
+++ b/include/linux/platform_data/gpio_backlight.h
@@ -1,9 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* gpio_backlight.h - Simple GPIO-controlled backlight
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef __GPIO_BACKLIGHT_H__
#define __GPIO_BACKLIGHT_H__
@@ -12,10 +9,6 @@ struct device;
struct gpio_backlight_platform_data {
struct device *fbdev;
- int gpio;
- int def_value;
- bool active_low;
- const char *name;
};
#endif
diff --git a/include/linux/platform_data/gpmc-omap.h b/include/linux/platform_data/gpmc-omap.h
index 67ccdb0e1606..dcca6c5e23bb 100644
--- a/include/linux/platform_data/gpmc-omap.h
+++ b/include/linux/platform_data/gpmc-omap.h
@@ -1,12 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* OMAP GPMC Platform data
*
- * Copyright (C) 2014 Texas Instruments, Inc. - http://www.ti.com
+ * Copyright (C) 2014 Texas Instruments, Inc. - https://www.ti.com
* Roger Quadros <rogerq@ti.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
*/
#ifndef _GPMC_OMAP_H_
@@ -139,6 +136,13 @@ struct gpmc_device_timings {
#define GPMC_MUX_AAD 1 /* Addr-Addr-Data multiplex */
#define GPMC_MUX_AD 2 /* Addr-Data multiplex */
+/* Wait pin polarity values */
+#define GPMC_WAITPINPOLARITY_INVALID UINT_MAX
+#define GPMC_WAITPINPOLARITY_ACTIVE_LOW 0
+#define GPMC_WAITPINPOLARITY_ACTIVE_HIGH 1
+
+#define GPMC_WAITPIN_INVALID UINT_MAX
+
struct gpmc_settings {
bool burst_wrap; /* enables wrap bursting */
bool burst_read; /* enables read page/burst mode */
@@ -152,6 +156,7 @@ struct gpmc_settings {
u32 device_width; /* device bus width (8 or 16 bit) */
u32 mux_add_data; /* multiplex address & data */
u32 wait_pin; /* wait-pin to be used */
+ u32 wait_pin_polarity;
};
/* Data for each chip select */
diff --git a/include/linux/platform_data/gsc_hwmon.h b/include/linux/platform_data/gsc_hwmon.h
new file mode 100644
index 000000000000..f2781aa7eff8
--- /dev/null
+++ b/include/linux/platform_data/gsc_hwmon.h
@@ -0,0 +1,45 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _GSC_HWMON_H
+#define _GSC_HWMON_H
+
+enum gsc_hwmon_mode {
+ mode_temperature,
+ mode_voltage_24bit,
+ mode_voltage_raw,
+ mode_voltage_16bit,
+ mode_fan,
+ mode_max,
+};
+
+/**
+ * struct gsc_hwmon_channel - configuration parameters
+ * @reg: I2C register offset
+ * @mode: channel mode
+ * @name: channel name
+ * @mvoffset: voltage offset
+ * @vdiv: voltage divider array (2 resistor values in milli-ohms)
+ */
+struct gsc_hwmon_channel {
+ unsigned int reg;
+ unsigned int mode;
+ const char *name;
+ unsigned int mvoffset;
+ unsigned int vdiv[2];
+};
+
+/**
+ * struct gsc_hwmon_platform_data - platform data for gsc_hwmon driver
+ * @nchannels: number of elements in @channels array
+ * @vreference: voltage reference (mV)
+ * @resolution: ADC bit resolution
+ * @fan_base: register base for FAN controller
+ * @channels: array of gsc_hwmon_channel structures describing channels
+ */
+struct gsc_hwmon_platform_data {
+ int nchannels;
+ unsigned int resolution;
+ unsigned int vreference;
+ unsigned int fan_base;
+ struct gsc_hwmon_channel channels[];
+};
+#endif
diff --git a/include/linux/platform_data/hirschmann-hellcreek.h b/include/linux/platform_data/hirschmann-hellcreek.h
new file mode 100644
index 000000000000..6a000df5541f
--- /dev/null
+++ b/include/linux/platform_data/hirschmann-hellcreek.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: (GPL-2.0 or MIT) */
+/*
+ * Hirschmann Hellcreek TSN switch platform data.
+ *
+ * Copyright (C) 2020 Linutronix GmbH
+ * Author Kurt Kanzenbach <kurt@linutronix.de>
+ */
+
+#ifndef _HIRSCHMANN_HELLCREEK_H_
+#define _HIRSCHMANN_HELLCREEK_H_
+
+#include <linux/types.h>
+
+struct hellcreek_platform_data {
+ const char *name; /* Switch name */
+ int num_ports; /* Amount of switch ports */
+ int is_100_mbits; /* Is it configured to 100 or 1000 mbit/s */
+ int qbv_support; /* Qbv support on front TSN ports */
+ int qbv_on_cpu_port; /* Qbv support on the CPU port */
+ int qbu_support; /* Qbu support on front TSN ports */
+ u16 module_id; /* Module identificaton */
+};
+
+#endif /* _HIRSCHMANN_HELLCREEK_H_ */
diff --git a/include/linux/platform_data/hsmmc-omap.h b/include/linux/platform_data/hsmmc-omap.h
index 8e981be2e2c2..7124a5f4bf06 100644
--- a/include/linux/platform_data/hsmmc-omap.h
+++ b/include/linux/platform_data/hsmmc-omap.h
@@ -1,11 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* MMC definitions for OMAP2
*
* Copyright (C) 2006 Nokia Corporation
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
/*
@@ -55,9 +52,6 @@ struct omap_hsmmc_platform_data {
u32 caps; /* Used for the MMC driver on 2430 and later */
u32 pm_caps; /* PM capabilities of the mmc */
- /* use the internal clock */
- unsigned internal_clock:1;
-
/* nonremovable e.g. eMMC */
unsigned nonremovable:1;
@@ -70,18 +64,8 @@ struct omap_hsmmc_platform_data {
#define HSMMC_HAS_HSPE_SUPPORT (1 << 2)
unsigned features;
- int gpio_cd; /* gpio (card detect) */
- int gpio_cod; /* gpio (cover detect) */
- int gpio_wp; /* gpio (write protect) */
-
- int (*set_power)(struct device *dev, int power_on, int vdd);
- void (*remux)(struct device *dev, int power_on);
- /* Call back before enabling / disabling regulators */
- void (*before_set_reg)(struct device *dev, int power_on, int vdd);
- /* Call back after enabling / disabling regulators */
- void (*after_set_reg)(struct device *dev, int power_on, int vdd);
- /* if we have special card, init it using this callback */
- void (*init_card)(struct mmc_card *card);
+ /* string specifying a particular variant of hardware */
+ char *version;
const char *name;
u32 ocr_mask;
diff --git a/include/linux/platform_data/hwmon-s3c.h b/include/linux/platform_data/hwmon-s3c.h
index 0e3cce130fe2..1707ad4147df 100644
--- a/include/linux/platform_data/hwmon-s3c.h
+++ b/include/linux/platform_data/hwmon-s3c.h
@@ -1,13 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright 2005 Simtec Electronics
* Ben Dooks <ben@simtec.co.uk>
* http://armlinux.simtec.co.uk/
*
* S3C - HWMon interface for ADC
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef __HWMON_S3C_H__
diff --git a/include/linux/platform_data/i2c-cbus-gpio.h b/include/linux/platform_data/i2c-cbus-gpio.h
deleted file mode 100644
index 6faa992a9502..000000000000
--- a/include/linux/platform_data/i2c-cbus-gpio.h
+++ /dev/null
@@ -1,27 +0,0 @@
-/*
- * i2c-cbus-gpio.h - CBUS I2C platform_data definition
- *
- * Copyright (C) 2004-2009 Nokia Corporation
- *
- * Written by Felipe Balbi and Aaro Koskinen.
- *
- * This file is subject to the terms and conditions of the GNU General
- * Public License. See the file "COPYING" in the main directory of this
- * archive for more details.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#ifndef __INCLUDE_LINUX_I2C_CBUS_GPIO_H
-#define __INCLUDE_LINUX_I2C_CBUS_GPIO_H
-
-struct i2c_cbus_platform_data {
- int dat_gpio;
- int clk_gpio;
- int sel_gpio;
-};
-
-#endif /* __INCLUDE_LINUX_I2C_CBUS_GPIO_H */
diff --git a/include/linux/platform_data/i2c-davinci.h b/include/linux/platform_data/i2c-davinci.h
index 89fd34727a24..98967df07468 100644
--- a/include/linux/platform_data/i2c-davinci.h
+++ b/include/linux/platform_data/i2c-davinci.h
@@ -16,9 +16,8 @@
struct davinci_i2c_platform_data {
unsigned int bus_freq; /* standard bus frequency (kHz) */
unsigned int bus_delay; /* post-transaction delay (usec) */
- unsigned int sda_pin; /* GPIO pin ID to use for SDA */
- unsigned int scl_pin; /* GPIO pin ID to use for SCL */
- bool has_pfunc; /*chip has a ICPFUNC register */
+ bool gpio_recovery; /* Use GPIO recovery method */
+ bool has_pfunc; /* Chip has a ICPFUNC register */
};
/* for board setup code */
diff --git a/include/linux/platform_data/i2c-designware.h b/include/linux/platform_data/i2c-designware.h
deleted file mode 100644
index 7a61fb27c25b..000000000000
--- a/include/linux/platform_data/i2c-designware.h
+++ /dev/null
@@ -1,21 +0,0 @@
-/*
- * Copyright(c) 2014 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- */
-
-#ifndef I2C_DESIGNWARE_H
-#define I2C_DESIGNWARE_H
-
-struct dw_i2c_platform_data {
- unsigned int i2c_scl_freq;
-};
-
-#endif
diff --git a/include/linux/i2c-gpio.h b/include/linux/platform_data/i2c-gpio.h
index c1bcb1f1d73b..545639bcca72 100644
--- a/include/linux/i2c-gpio.h
+++ b/include/linux/platform_data/i2c-gpio.h
@@ -1,19 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* i2c-gpio interface to platform code
*
* Copyright (C) 2007 Atmel Corporation
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef _LINUX_I2C_GPIO_H
#define _LINUX_I2C_GPIO_H
/**
* struct i2c_gpio_platform_data - Platform-dependent data for i2c-gpio
- * @sda_pin: GPIO pin ID to use for SDA
- * @scl_pin: GPIO pin ID to use for SCL
* @udelay: signal toggle delay. SCL frequency is (500 / udelay) kHz
* @timeout: clock stretching timeout in jiffies. If the slave keeps
* SCL low for longer than this, the transfer will time out.
@@ -21,18 +16,25 @@
* isn't actively driven high when setting the output value high.
* gpio_get_value() must return the actual pin state even if the
* pin is configured as an output.
+ * @sda_is_output_only: SDA output drivers can't be turned off.
+ * This is for clients that can only read SDA/SCL.
+ * @sda_has_no_pullup: SDA is used in a non-compliant way and has no pull-up.
+ * Therefore disable open-drain.
* @scl_is_open_drain: SCL is set up as open drain. Same requirements
* as for sda_is_open_drain apply.
* @scl_is_output_only: SCL output drivers cannot be turned off.
+ * @scl_has_no_pullup: SCL is used in a non-compliant way and has no pull-up.
+ * Therefore disable open-drain.
*/
struct i2c_gpio_platform_data {
- unsigned int sda_pin;
- unsigned int scl_pin;
int udelay;
int timeout;
unsigned int sda_is_open_drain:1;
+ unsigned int sda_is_output_only:1;
+ unsigned int sda_has_no_pullup:1;
unsigned int scl_is_open_drain:1;
unsigned int scl_is_output_only:1;
+ unsigned int scl_has_no_pullup:1;
};
#endif /* _LINUX_I2C_GPIO_H */
diff --git a/include/linux/platform_data/i2c-hid.h b/include/linux/platform_data/i2c-hid.h
deleted file mode 100644
index 1fb088239d12..000000000000
--- a/include/linux/platform_data/i2c-hid.h
+++ /dev/null
@@ -1,42 +0,0 @@
-/*
- * HID over I2C protocol implementation
- *
- * Copyright (c) 2012 Benjamin Tissoires <benjamin.tissoires@gmail.com>
- * Copyright (c) 2012 Ecole Nationale de l'Aviation Civile, France
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file COPYING in the main directory of this archive for
- * more details.
- */
-
-#ifndef __LINUX_I2C_HID_H
-#define __LINUX_I2C_HID_H
-
-#include <linux/types.h>
-
-struct regulator;
-
-/**
- * struct i2chid_platform_data - used by hid over i2c implementation.
- * @hid_descriptor_address: i2c register where the HID descriptor is stored.
- * @supply: regulator for powering on the device.
- * @post_power_delay_ms: delay after powering on before device is usable.
- *
- * Note that it is the responsibility of the platform driver (or the acpi 5.0
- * driver, or the flattened device tree) to setup the irq related to the gpio in
- * the struct i2c_board_info.
- * The platform driver should also setup the gpio according to the device:
- *
- * A typical example is the following:
- * irq = gpio_to_irq(intr_gpio);
- * hkdk4412_i2c_devs5[0].irq = irq; // store the irq in i2c_board_info
- * gpio_request(intr_gpio, "elan-irq");
- * s3c_gpio_setpull(intr_gpio, S3C_GPIO_PULL_UP);
- */
-struct i2c_hid_platform_data {
- u16 hid_descriptor_address;
- struct regulator *supply;
- int post_power_delay_ms;
-};
-
-#endif /* __LINUX_I2C_HID_H */
diff --git a/include/linux/platform_data/i2c-imx.h b/include/linux/platform_data/i2c-imx.h
index 8289d915e615..962bfc3273eb 100644
--- a/include/linux/platform_data/i2c-imx.h
+++ b/include/linux/platform_data/i2c-imx.h
@@ -1,9 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* i2c.h - i.MX I2C driver header file
*
* Copyright (c) 2008, Darius Augulis <augulis.darius@gmail.com>
- *
- * This file is released under the GPLv2
*/
#ifndef __ASM_ARCH_I2C_H_
diff --git a/include/linux/i2c-mux-gpio.h b/include/linux/platform_data/i2c-mux-gpio.h
index 4406108201fe..5e4c2c272a73 100644
--- a/include/linux/i2c-mux-gpio.h
+++ b/include/linux/platform_data/i2c-mux-gpio.h
@@ -1,11 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* i2c-mux-gpio interface to platform code
*
* Peter Korsgaard <peter.korsgaard@barco.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef _LINUX_I2C_MUX_GPIO_H
@@ -22,10 +19,6 @@
* position
* @n_values: Number of multiplexer positions (busses to instantiate)
* @classes: Optional I2C auto-detection classes
- * @gpio_chip: Optional GPIO chip name; if set, GPIO pin numbers are given
- * relative to the base GPIO number of that chip
- * @gpios: Array of GPIO numbers used to control MUX
- * @n_gpios: Number of GPIOs used to control MUX
* @idle: Bitmask to write to MUX when idle or GPIO_I2CMUX_NO_IDLE if not used
*/
struct i2c_mux_gpio_platform_data {
@@ -34,9 +27,6 @@ struct i2c_mux_gpio_platform_data {
const unsigned *values;
int n_values;
const unsigned *classes;
- char *gpio_chip;
- const unsigned *gpios;
- int n_gpios;
unsigned idle;
};
diff --git a/include/linux/platform_data/i2c-mux-reg.h b/include/linux/platform_data/i2c-mux-reg.h
index c68712aadf43..2543c2a1c9ae 100644
--- a/include/linux/platform_data/i2c-mux-reg.h
+++ b/include/linux/platform_data/i2c-mux-reg.h
@@ -1,13 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* I2C multiplexer using a single register
*
* Copyright 2015 Freescale Semiconductor
* York Sun <yorksun@freescale.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
*/
#ifndef __LINUX_PLATFORM_DATA_I2C_MUX_REG_H
diff --git a/include/linux/platform_data/i2c-nuc900.h b/include/linux/platform_data/i2c-nuc900.h
deleted file mode 100644
index 9ffb12d06e91..000000000000
--- a/include/linux/platform_data/i2c-nuc900.h
+++ /dev/null
@@ -1,9 +0,0 @@
-#ifndef __ASM_ARCH_NUC900_I2C_H
-#define __ASM_ARCH_NUC900_I2C_H
-
-struct nuc900_platform_i2c {
- int bus_num;
- unsigned long bus_freq;
-};
-
-#endif /* __ASM_ARCH_NUC900_I2C_H */
diff --git a/include/linux/i2c-ocores.h b/include/linux/platform_data/i2c-ocores.h
index 01edd96fe1f7..e6326cbafe59 100644
--- a/include/linux/i2c-ocores.h
+++ b/include/linux/platform_data/i2c-ocores.h
@@ -1,11 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* i2c-ocores.h - definitions for the i2c-ocores interface
*
- * Peter Korsgaard <jacmet@sunsite.dk>
- *
- * This file is licensed under the terms of the GNU General Public License
- * version 2. This program is licensed "as is" without any warranty of any
- * kind, whether express or implied.
+ * Peter Korsgaard <peter@korsgaard.com>
*/
#ifndef _LINUX_I2C_OCORES_H
@@ -15,6 +12,7 @@ struct ocores_i2c_platform_data {
u32 reg_shift; /* register offset shift value */
u32 reg_io_width; /* register io read/write width */
u32 clock_khz; /* input clock in kHz */
+ u32 bus_khz; /* bus clock in kHz */
bool big_endian; /* registers are big endian */
u8 num_devices; /* number of devices in the devices list */
struct i2c_board_info const *devices; /* devices connected to the bus */
diff --git a/include/linux/i2c-omap.h b/include/linux/platform_data/i2c-omap.h
index babe0cf6d56b..3444265ee8ee 100644
--- a/include/linux/i2c-omap.h
+++ b/include/linux/platform_data/i2c-omap.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __I2C_OMAP_H__
#define __I2C_OMAP_H__
diff --git a/include/linux/i2c-pca-platform.h b/include/linux/platform_data/i2c-pca-platform.h
index aba33759dec4..c37329432a8e 100644
--- a/include/linux/i2c-pca-platform.h
+++ b/include/linux/platform_data/i2c-pca-platform.h
@@ -1,10 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef I2C_PCA9564_PLATFORM_H
#define I2C_PCA9564_PLATFORM_H
struct i2c_pca9564_pf_platform_data {
- int gpio; /* pin to reset chip. driver will work when
- * not supplied (negative value), but it
- * cannot exit some error conditions then */
int i2c_clock_speed; /* values are defined in linux/i2c-algo-pca.h */
int timeout; /* timeout in jiffies */
};
diff --git a/include/linux/platform_data/i2c-pxa.h b/include/linux/platform_data/i2c-pxa.h
new file mode 100644
index 000000000000..24953981bd9f
--- /dev/null
+++ b/include/linux/platform_data/i2c-pxa.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * i2c_pxa.h
+ *
+ * Copyright (C) 2002 Intrinsyc Software Inc.
+ */
+#ifndef _I2C_PXA_H_
+#define _I2C_PXA_H_
+
+struct i2c_pxa_platform_data {
+ unsigned int class;
+ unsigned int use_pio :1;
+ unsigned int fast_mode :1;
+ unsigned int high_mode:1;
+ unsigned char master_code;
+ unsigned long rate;
+};
+#endif
diff --git a/include/linux/platform_data/i2c-s3c2410.h b/include/linux/platform_data/i2c-s3c2410.h
index 05af66b840b9..550746715139 100644
--- a/include/linux/platform_data/i2c-s3c2410.h
+++ b/include/linux/platform_data/i2c-s3c2410.h
@@ -1,12 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright 2004-2009 Simtec Electronics
* Ben Dooks <ben@simtec.co.uk>
*
* S3C - I2C Controller platform_device info
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef __I2C_S3C2410_H
diff --git a/include/linux/i2c-xiic.h b/include/linux/platform_data/i2c-xiic.h
index 4f9f2256a97e..2e3683c2398d 100644
--- a/include/linux/i2c-xiic.h
+++ b/include/linux/platform_data/i2c-xiic.h
@@ -1,19 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* i2c-xiic.h
* Copyright (c) 2009 Intel Corporation
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
/* Supports:
diff --git a/include/linux/platform_data/ina2xx.h b/include/linux/platform_data/ina2xx.h
index 9abc0ca7259b..2aa5ee9a9050 100644
--- a/include/linux/platform_data/ina2xx.h
+++ b/include/linux/platform_data/ina2xx.h
@@ -1,13 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Driver for Texas Instruments INA219, INA226 power monitor chips
*
- * Copyright (C) 2012 Lothar Felten <l-felten@ti.com>
+ * Copyright (C) 2012 Lothar Felten <lothar.felten@gmail.com>
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * For further information, see the Documentation/hwmon/ina2xx file.
+ * For further information, see the Documentation/hwmon/ina2xx.rst file.
*/
/**
diff --git a/include/linux/platform_data/intel-mid_wdt.h b/include/linux/platform_data/intel-mid_wdt.h
index b98253466ace..8dba70b4b020 100644
--- a/include/linux/platform_data/intel-mid_wdt.h
+++ b/include/linux/platform_data/intel-mid_wdt.h
@@ -1,12 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* intel-mid_wdt: generic Intel MID SCU watchdog driver
*
* Copyright (C) 2014 Intel Corporation. All rights reserved.
* Contact: David Cohen <david.a.cohen@linux.intel.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of version 2 of the GNU General
- * Public License as published by the Free Software Foundation.
*/
#ifndef __INTEL_MID_WDT_H__
diff --git a/include/linux/platform_data/invensense_mpu6050.h b/include/linux/platform_data/invensense_mpu6050.h
index 554b59801aa8..f05b37521f67 100644
--- a/include/linux/platform_data/invensense_mpu6050.h
+++ b/include/linux/platform_data/invensense_mpu6050.h
@@ -1,14 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2012 Invensense, Inc.
-*
-* This software is licensed under the terms of the GNU General Public
-* License version 2, as published by the Free Software Foundation, and
-* may be copied, distributed, and modified under those terms.
-*
-* This program is distributed in the hope that it will be useful,
-* but WITHOUT ANY WARRANTY; without even the implied warranty of
-* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-* GNU General Public License for more details.
*/
#ifndef __INV_MPU6050_PLATFORM_H_
@@ -20,7 +12,7 @@
* mounting matrix retrieved from device-tree)
*
* Contains platform specific information on how to configure the MPU6050 to
- * work on this platform. The orientation matricies are 3x3 rotation matricies
+ * work on this platform. The orientation matrices are 3x3 rotation matrices
* that are applied to the data to rotate from the mounting orientation to the
* platform orientation. The values must be one of 0, 1, or -1 and each row and
* column should have exactly 1 non-zero value.
diff --git a/include/linux/platform_data/iommu-omap.h b/include/linux/platform_data/iommu-omap.h
index e8b12dbf6170..8474a0208b34 100644
--- a/include/linux/platform_data/iommu-omap.h
+++ b/include/linux/platform_data/iommu-omap.h
@@ -1,13 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* omap iommu: main structures
*
* Copyright (C) 2008-2009 Nokia Corporation
*
* Written by Hiroshi DOYU <Hiroshi.DOYU@nokia.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/platform_device.h>
@@ -16,4 +13,8 @@ struct iommu_platform_data {
const char *reset_name;
int (*assert_reset)(struct platform_device *pdev, const char *name);
int (*deassert_reset)(struct platform_device *pdev, const char *name);
+ int (*device_enable)(struct platform_device *pdev);
+ int (*device_idle)(struct platform_device *pdev);
+ int (*set_pwrdm_constraint)(struct platform_device *pdev, bool request,
+ u8 *pwrst);
};
diff --git a/include/linux/platform_data/irda-pxaficp.h b/include/linux/platform_data/irda-pxaficp.h
deleted file mode 100644
index 3cd41f77dda4..000000000000
--- a/include/linux/platform_data/irda-pxaficp.h
+++ /dev/null
@@ -1,25 +0,0 @@
-#ifndef ASMARM_ARCH_IRDA_H
-#define ASMARM_ARCH_IRDA_H
-
-/* board specific transceiver capabilities */
-
-#define IR_OFF 1
-#define IR_SIRMODE 2
-#define IR_FIRMODE 4
-
-struct pxaficp_platform_data {
- int transceiver_cap;
- void (*transceiver_mode)(struct device *dev, int mode);
- int (*startup)(struct device *dev);
- void (*shutdown)(struct device *dev);
- int gpio_pwdown; /* powerdown GPIO for the IrDA chip */
- bool gpio_pwdown_inverted; /* gpio_pwdown is inverted */
-};
-
-extern void pxa_set_ficp_info(struct pxaficp_platform_data *info);
-
-#if defined(CONFIG_PXA25x) || defined(CONFIG_PXA27x)
-void pxa2xx_transceiver_mode(struct device *dev, int mode);
-#endif
-
-#endif
diff --git a/include/linux/platform_data/irda-sa11x0.h b/include/linux/platform_data/irda-sa11x0.h
deleted file mode 100644
index 38f77b5e56cf..000000000000
--- a/include/linux/platform_data/irda-sa11x0.h
+++ /dev/null
@@ -1,20 +0,0 @@
-/*
- * arch/arm/include/asm/mach/irda.h
- *
- * Copyright (C) 2004 Russell King.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-#ifndef __ASM_ARM_MACH_IRDA_H
-#define __ASM_ARM_MACH_IRDA_H
-
-struct irda_platform_data {
- int (*startup)(struct device *);
- void (*shutdown)(struct device *);
- int (*set_power)(struct device *, unsigned int state);
- void (*set_speed)(struct device *, unsigned int speed);
-};
-
-#endif
diff --git a/include/linux/platform_data/isl9305.h b/include/linux/platform_data/isl9305.h
index 4ac1a070af0a..6893fdaae7d4 100644
--- a/include/linux/platform_data/isl9305.h
+++ b/include/linux/platform_data/isl9305.h
@@ -1,14 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* isl9305 - Intersil ISL9305 DCDC regulator
*
* Copyright 2014 Linaro Ltd
*
* Author: Mark Brown <broonie@kernel.org>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
*/
#ifndef __ISL9305_H
diff --git a/include/linux/platform_data/itco_wdt.h b/include/linux/platform_data/itco_wdt.h
index 0e95527edf25..45d860cac2b0 100644
--- a/include/linux/platform_data/itco_wdt.h
+++ b/include/linux/platform_data/itco_wdt.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Platform data for the Intel TCO Watchdog
*/
@@ -11,13 +12,16 @@
#define ICH_RES_MEM_OFF 2
#define ICH_RES_MEM_GCS_PMC 0
+/**
+ * struct itco_wdt_platform_data - iTCO_wdt platform data
+ * @name: Name of the platform
+ * @version: iTCO version
+ * @no_reboot_use_pmc: Use PMC BXT API to set and clear NO_REBOOT bit
+ */
struct itco_wdt_platform_data {
char name[32];
unsigned int version;
- /* private data to be passed to update_no_reboot_bit API */
- void *no_reboot_priv;
- /* pointer for platform specific no reboot update function */
- int (*update_no_reboot_bit)(void *priv, bool set);
+ bool no_reboot_use_pmc;
};
#endif /* _ITCO_WDT_H_ */
diff --git a/include/linux/platform_data/keyboard-pxa930_rotary.h b/include/linux/platform_data/keyboard-pxa930_rotary.h
deleted file mode 100644
index 053587caffdd..000000000000
--- a/include/linux/platform_data/keyboard-pxa930_rotary.h
+++ /dev/null
@@ -1,20 +0,0 @@
-#ifndef __ASM_ARCH_PXA930_ROTARY_H
-#define __ASM_ARCH_PXA930_ROTARY_H
-
-/* NOTE:
- *
- * rotary can be either interpreted as a ralative input event (e.g.
- * REL_WHEEL or REL_HWHEEL) or a specific key event (e.g. UP/DOWN
- * or LEFT/RIGHT), depending on if up_key & down_key are assigned
- * or rel_code is assigned a non-zero value. When all are non-zero,
- * up_key and down_key will be preferred.
- */
-struct pxa930_rotary_platform_data {
- int up_key;
- int down_key;
- int rel_code;
-};
-
-void __init pxa930_set_rotarykey_info(struct pxa930_rotary_platform_data *info);
-
-#endif /* __ASM_ARCH_PXA930_ROTARY_H */
diff --git a/include/linux/platform_data/keypad-ep93xx.h b/include/linux/platform_data/keypad-ep93xx.h
index adccee25b162..3054fced8509 100644
--- a/include/linux/platform_data/keypad-ep93xx.h
+++ b/include/linux/platform_data/keypad-ep93xx.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __KEYPAD_EP93XX_H
#define __KEYPAD_EP93XX_H
@@ -8,8 +9,7 @@ struct matrix_keymap_data;
#define EP93XX_KEYPAD_DIAG_MODE (1<<1) /* diagnostic mode */
#define EP93XX_KEYPAD_BACK_DRIVE (1<<2) /* back driving mode */
#define EP93XX_KEYPAD_TEST_MODE (1<<3) /* scan only column 0 */
-#define EP93XX_KEYPAD_KDIV (1<<4) /* 1/4 clock or 1/16 clock */
-#define EP93XX_KEYPAD_AUTOREPEAT (1<<5) /* enable key autorepeat */
+#define EP93XX_KEYPAD_AUTOREPEAT (1<<4) /* enable key autorepeat */
/**
* struct ep93xx_keypad_platform_data - platform specific device structure
@@ -23,6 +23,7 @@ struct ep93xx_keypad_platform_data {
unsigned int debounce;
unsigned int prescale;
unsigned int flags;
+ unsigned int clk_rate;
};
#define EP93XX_MATRIX_ROWS (8)
diff --git a/include/linux/platform_data/keypad-nomadik-ske.h b/include/linux/platform_data/keypad-nomadik-ske.h
index 31382fbc07dc..7efabbca1dca 100644
--- a/include/linux/platform_data/keypad-nomadik-ske.h
+++ b/include/linux/platform_data/keypad-nomadik-ske.h
@@ -1,7 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) ST-Ericsson SA 2010
*
- * License Terms: GNU General Public License v2
* Author: Naveen Kumar Gaddipati <naveen.gaddipati@stericsson.com>
*
* ux500 Scroll key and Keypad Encoder (SKE) header
diff --git a/include/linux/platform_data/keypad-omap.h b/include/linux/platform_data/keypad-omap.h
index c3a3abae98f0..3e7c64c854f4 100644
--- a/include/linux/platform_data/keypad-omap.h
+++ b/include/linux/platform_data/keypad-omap.h
@@ -1,9 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2006 Komal Shah <komal_shah802003@yahoo.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef __KEYPAD_OMAP_H
#define __KEYPAD_OMAP_H
diff --git a/include/linux/platform_data/keypad-pxa27x.h b/include/linux/platform_data/keypad-pxa27x.h
index 24625569d16d..a376442b9935 100644
--- a/include/linux/platform_data/keypad-pxa27x.h
+++ b/include/linux/platform_data/keypad-pxa27x.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __ASM_ARCH_PXA27x_KEYPAD_H
#define __ASM_ARCH_PXA27x_KEYPAD_H
diff --git a/include/linux/platform_data/keypad-w90p910.h b/include/linux/platform_data/keypad-w90p910.h
deleted file mode 100644
index 556778e8ddaa..000000000000
--- a/include/linux/platform_data/keypad-w90p910.h
+++ /dev/null
@@ -1,15 +0,0 @@
-#ifndef __ASM_ARCH_W90P910_KEYPAD_H
-#define __ASM_ARCH_W90P910_KEYPAD_H
-
-#include <linux/input/matrix_keypad.h>
-
-extern void mfp_set_groupi(struct device *dev);
-
-struct w90p910_keypad_platform_data {
- const struct matrix_keymap_data *keymap_data;
-
- unsigned int prescale;
- unsigned int debounce;
-};
-
-#endif /* __ASM_ARCH_W90P910_KEYPAD_H */
diff --git a/include/linux/platform_data/keyscan-davinci.h b/include/linux/platform_data/keyscan-davinci.h
index 7a560e05bda8..260d596ba0af 100644
--- a/include/linux/platform_data/keyscan-davinci.h
+++ b/include/linux/platform_data/keyscan-davinci.h
@@ -1,21 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Copyright (C) 2009 Texas Instruments, Inc
*
* Author: Miguel Aguilar <miguel.aguilar@ridgerun.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#ifndef DAVINCI_KEYSCAN_H
diff --git a/include/linux/platform_data/lcd-mipid.h b/include/linux/platform_data/lcd-mipid.h
index 8e52c6572281..63f05eb23827 100644
--- a/include/linux/platform_data/lcd-mipid.h
+++ b/include/linux/platform_data/lcd-mipid.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __LCD_MIPID_H
#define __LCD_MIPID_H
diff --git a/include/linux/platform_data/leds-kirkwood-netxbig.h b/include/linux/platform_data/leds-kirkwood-netxbig.h
deleted file mode 100644
index 3c85a735c380..000000000000
--- a/include/linux/platform_data/leds-kirkwood-netxbig.h
+++ /dev/null
@@ -1,54 +0,0 @@
-/*
- * Platform data structure for netxbig LED driver
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
- */
-
-#ifndef __LEDS_KIRKWOOD_NETXBIG_H
-#define __LEDS_KIRKWOOD_NETXBIG_H
-
-struct netxbig_gpio_ext {
- unsigned *addr;
- int num_addr;
- unsigned *data;
- int num_data;
- unsigned enable;
-};
-
-enum netxbig_led_mode {
- NETXBIG_LED_OFF,
- NETXBIG_LED_ON,
- NETXBIG_LED_SATA,
- NETXBIG_LED_TIMER1,
- NETXBIG_LED_TIMER2,
- NETXBIG_LED_MODE_NUM,
-};
-
-#define NETXBIG_LED_INVALID_MODE NETXBIG_LED_MODE_NUM
-
-struct netxbig_led_timer {
- unsigned long delay_on;
- unsigned long delay_off;
- enum netxbig_led_mode mode;
-};
-
-struct netxbig_led {
- const char *name;
- const char *default_trigger;
- int mode_addr;
- int *mode_val;
- int bright_addr;
- int bright_max;
-};
-
-struct netxbig_led_platform_data {
- struct netxbig_gpio_ext *gpio_ext;
- struct netxbig_led_timer *timer;
- int num_timer;
- struct netxbig_led *leds;
- int num_leds;
-};
-
-#endif /* __LEDS_KIRKWOOD_NETXBIG_H */
diff --git a/include/linux/platform_data/leds-kirkwood-ns2.h b/include/linux/platform_data/leds-kirkwood-ns2.h
deleted file mode 100644
index eb8a6860e816..000000000000
--- a/include/linux/platform_data/leds-kirkwood-ns2.h
+++ /dev/null
@@ -1,38 +0,0 @@
-/*
- * Platform data structure for Network Space v2 LED driver
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
- */
-
-#ifndef __LEDS_KIRKWOOD_NS2_H
-#define __LEDS_KIRKWOOD_NS2_H
-
-enum ns2_led_modes {
- NS_V2_LED_OFF,
- NS_V2_LED_ON,
- NS_V2_LED_SATA,
-};
-
-struct ns2_led_modval {
- enum ns2_led_modes mode;
- int cmd_level;
- int slow_level;
-};
-
-struct ns2_led {
- const char *name;
- const char *default_trigger;
- unsigned cmd;
- unsigned slow;
- int num_modes;
- struct ns2_led_modval *modval;
-};
-
-struct ns2_led_platform_data {
- int num_leds;
- struct ns2_led *leds;
-};
-
-#endif /* __LEDS_KIRKWOOD_NS2_H */
diff --git a/include/linux/platform_data/leds-lm355x.h b/include/linux/platform_data/leds-lm355x.h
index b88724bb0b46..b1090487b4b0 100644
--- a/include/linux/platform_data/leds-lm355x.h
+++ b/include/linux/platform_data/leds-lm355x.h
@@ -1,8 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2012 Texas Instruments
*
- * License Terms: GNU General Public License v2
- *
* Simple driver for Texas Instruments LM355x LED driver chip
*
* Author: G.Shark Jeong <gshark.jeong@gmail.com>
diff --git a/include/linux/platform_data/leds-lm3642.h b/include/linux/platform_data/leds-lm3642.h
index 72d6ee6ade57..2490a2fb6549 100644
--- a/include/linux/platform_data/leds-lm3642.h
+++ b/include/linux/platform_data/leds-lm3642.h
@@ -1,8 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2012 Texas Instruments
*
-* License Terms: GNU General Public License v2
-*
* Simple driver for Texas Instruments LM3642 LED driver chip
*
* Author: G.Shark Jeong <gshark.jeong@gmail.com>
diff --git a/include/linux/platform_data/leds-lp55xx.h b/include/linux/platform_data/leds-lp55xx.h
index 624ff9edad6f..3441064713a3 100644
--- a/include/linux/platform_data/leds-lp55xx.h
+++ b/include/linux/platform_data/leds-lp55xx.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* LP55XX Platform Data Header
*
@@ -5,27 +6,32 @@
*
* Author: Milo(Woogyom) Kim <milo.kim@ti.com>
*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2 as published by the Free Software Foundation.
- *
* Derived from leds-lp5521.h, leds-lp5523.h
*/
#ifndef _LEDS_LP55XX_H
#define _LEDS_LP55XX_H
+#include <linux/gpio/consumer.h>
+#include <linux/led-class-multicolor.h>
+
/* Clock configuration */
#define LP55XX_CLOCK_AUTO 0
#define LP55XX_CLOCK_INT 1
#define LP55XX_CLOCK_EXT 2
+#define LP55XX_MAX_GROUPED_CHAN 4
+
struct lp55xx_led_config {
const char *name;
const char *default_trigger;
u8 chan_nr;
u8 led_current; /* mA x10, 0 if led is not connected */
u8 max_current;
+ int num_colors;
+ unsigned int max_channel;
+ int color_id[LED_COLOR_ID_MAX];
+ int output_num[LED_COLOR_ID_MAX];
};
struct lp55xx_predef_pattern {
@@ -52,7 +58,7 @@ enum lp8501_pwr_sel {
* @clock_mode : Input clock mode. LP55XX_CLOCK_AUTO or _INT or _EXT
* @setup_resources : Platform specific function before enabling the chip
* @release_resources : Platform specific function after disabling the chip
- * @enable : EN pin control by platform side
+ * @enable_gpiod : enable GPIO descriptor
* @patterns : Predefined pattern data for RGB channels
* @num_patterns : Number of patterns
* @update_config : Value of CONFIG register
@@ -68,7 +74,7 @@ struct lp55xx_platform_data {
u8 clock_mode;
/* optional enable GPIO */
- int enable_gpio;
+ struct gpio_desc *enable_gpiod;
/* Predefined pattern data */
struct lp55xx_predef_pattern *patterns;
diff --git a/include/linux/platform_data/leds-omap.h b/include/linux/platform_data/leds-omap.h
deleted file mode 100644
index 56c9b2a0ada5..000000000000
--- a/include/linux/platform_data/leds-omap.h
+++ /dev/null
@@ -1,22 +0,0 @@
-/*
- * Copyright (C) 2006 Samsung Electronics
- * Kyungmin Park <kyungmin.park@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-#ifndef ASMARM_ARCH_LED_H
-#define ASMARM_ARCH_LED_H
-
-struct omap_led_config {
- struct led_classdev cdev;
- s16 gpio;
-};
-
-struct omap_led_platform_data {
- s16 nr_leds;
- struct omap_led_config *leds;
-};
-
-#endif
diff --git a/include/linux/platform_data/leds-pca963x.h b/include/linux/platform_data/leds-pca963x.h
deleted file mode 100644
index 54e845ffb5ed..000000000000
--- a/include/linux/platform_data/leds-pca963x.h
+++ /dev/null
@@ -1,48 +0,0 @@
-/*
- * PCA963X LED chip driver.
- *
- * Copyright 2012 bct electronic GmbH
- * Copyright 2013 Qtechnology A/S
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
- * 02110-1301 USA
- */
-
-#ifndef __LINUX_PCA963X_H
-#define __LINUX_PCA963X_H
-#include <linux/leds.h>
-
-enum pca963x_outdrv {
- PCA963X_OPEN_DRAIN,
- PCA963X_TOTEM_POLE, /* aka push-pull */
-};
-
-enum pca963x_blink_type {
- PCA963X_SW_BLINK,
- PCA963X_HW_BLINK,
-};
-
-enum pca963x_direction {
- PCA963X_NORMAL,
- PCA963X_INVERTED,
-};
-
-struct pca963x_platform_data {
- struct led_platform_data leds;
- enum pca963x_outdrv outdrv;
- enum pca963x_blink_type blink_type;
- enum pca963x_direction dir;
-};
-
-#endif /* __LINUX_PCA963X_H*/
diff --git a/include/linux/platform_data/leds-s3c24xx.h b/include/linux/platform_data/leds-s3c24xx.h
deleted file mode 100644
index 441a6f290649..000000000000
--- a/include/linux/platform_data/leds-s3c24xx.h
+++ /dev/null
@@ -1,27 +0,0 @@
-/*
- * Copyright (c) 2006 Simtec Electronics
- * http://armlinux.simtec.co.uk/
- * Ben Dooks <ben@simtec.co.uk>
- *
- * S3C24XX - LEDs GPIO connector
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-#ifndef __LEDS_S3C24XX_H
-#define __LEDS_S3C24XX_H
-
-#define S3C24XX_LEDF_ACTLOW (1<<0) /* LED is on when GPIO low */
-#define S3C24XX_LEDF_TRISTATE (1<<1) /* tristate to turn off */
-
-struct s3c24xx_led_platdata {
- unsigned int gpio;
- unsigned int flags;
-
- char *name;
- char *def_trigger;
-};
-
-#endif /* __LEDS_S3C24XX_H */
diff --git a/include/linux/platform_data/lm3630a_bl.h b/include/linux/platform_data/lm3630a_bl.h
index 7538e38e270b..530be9318711 100644
--- a/include/linux/platform_data/lm3630a_bl.h
+++ b/include/linux/platform_data/lm3630a_bl.h
@@ -1,11 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Simple driver for Texas Instruments LM3630A LED Flash driver chip
* Copyright (C) 2012 Texas Instruments
-*
-* This program is free software; you can redistribute it and/or modify
-* it under the terms of the GNU General Public License version 2 as
-* published by the Free Software Foundation.
-*
*/
#ifndef __LINUX_LM3630A_H
@@ -38,9 +34,11 @@ enum lm3630a_ledb_ctrl {
#define LM3630A_MAX_BRIGHTNESS 255
/*
+ *@leda_label : optional led a label.
*@leda_init_brt : led a init brightness. 4~255
*@leda_max_brt : led a max brightness. 4~255
*@leda_ctrl : led a disable, enable linear, enable exponential
+ *@ledb_label : optional led b label.
*@ledb_init_brt : led b init brightness. 4~255
*@ledb_max_brt : led b max brightness. 4~255
*@ledb_ctrl : led b disable, enable linear, enable exponential
@@ -50,10 +48,12 @@ enum lm3630a_ledb_ctrl {
struct lm3630a_platform_data {
/* led a config. */
+ const char *leda_label;
int leda_init_brt;
int leda_max_brt;
enum lm3630a_leda_ctrl leda_ctrl;
/* led b config. */
+ const char *ledb_label;
int ledb_init_brt;
int ledb_max_brt;
enum lm3630a_ledb_ctrl ledb_ctrl;
diff --git a/include/linux/platform_data/lm3639_bl.h b/include/linux/platform_data/lm3639_bl.h
index 5234cd5ed166..341f24051db4 100644
--- a/include/linux/platform_data/lm3639_bl.h
+++ b/include/linux/platform_data/lm3639_bl.h
@@ -1,11 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Simple driver for Texas Instruments LM3630 LED Flash driver chip
* Copyright (C) 2012 Texas Instruments
-*
-* This program is free software; you can redistribute it and/or modify
-* it under the terms of the GNU General Public License version 2 as
-* published by the Free Software Foundation.
-*
*/
#ifndef __LINUX_LM3639_H
diff --git a/include/linux/platform_data/lm8323.h b/include/linux/platform_data/lm8323.h
index 478d668bc590..311999260614 100644
--- a/include/linux/platform_data/lm8323.h
+++ b/include/linux/platform_data/lm8323.h
@@ -1,18 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* lm8323.h - Configuration for LM8323 keypad driver.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation (version 2 of the License only).
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#ifndef __LINUX_LM8323_H
diff --git a/include/linux/platform_data/lp855x.h b/include/linux/platform_data/lp855x.h
index 1b2ba24e4e03..ab222dd05bbc 100644
--- a/include/linux/platform_data/lp855x.h
+++ b/include/linux/platform_data/lp855x.h
@@ -1,12 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* LP855x Backlight Driver
*
* Copyright (C) 2011 Texas Instruments
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
*/
#ifndef _LP855X_H
diff --git a/include/linux/platform_data/lp8727.h b/include/linux/platform_data/lp8727.h
index 47128a50e04e..c701a7b96f0b 100644
--- a/include/linux/platform_data/lp8727.h
+++ b/include/linux/platform_data/lp8727.h
@@ -1,12 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* LP8727 Micro/Mini USB IC with integrated charger
*
* Copyright (C) 2011 Texas Instruments
* Copyright (C) 2011 National Semiconductor
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef _LP8727_H
diff --git a/include/linux/platform_data/lp8755.h b/include/linux/platform_data/lp8755.h
index a7fd0776c9bf..7bf4221d62dd 100644
--- a/include/linux/platform_data/lp8755.h
+++ b/include/linux/platform_data/lp8755.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* LP8755 High Performance Power Management Unit Driver:System Interface Driver
*
@@ -5,11 +6,6 @@
*
* Author: Daniel(Geon Si) Jeong <daniel.jeong@ti.com>
* G.Shark Jeong <gshark.jeong@gmail.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
*/
#ifndef _LP8755_H
diff --git a/include/linux/platform_data/ltc4245.h b/include/linux/platform_data/ltc4245.h
index 56bda4be0016..f07fa05ea6c4 100644
--- a/include/linux/platform_data/ltc4245.h
+++ b/include/linux/platform_data/ltc4245.h
@@ -1,12 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Platform Data for LTC4245 hardware monitor chip
*
* Copyright (c) 2010 Ira W. Snyder <iws@ovro.caltech.edu>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
*/
#ifndef LINUX_LTC4245_H
diff --git a/include/linux/platform_data/lv5207lp.h b/include/linux/platform_data/lv5207lp.h
index 7dc4d9a219a6..c9da8d402750 100644
--- a/include/linux/platform_data/lv5207lp.h
+++ b/include/linux/platform_data/lv5207lp.h
@@ -1,9 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* lv5207lp.h - Sanyo LV5207LP LEDs Driver
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef __LV5207LP_H__
#define __LV5207LP_H__
diff --git a/include/linux/platform_data/macb.h b/include/linux/platform_data/macb.h
deleted file mode 100644
index 7815d50c26ff..000000000000
--- a/include/linux/platform_data/macb.h
+++ /dev/null
@@ -1,32 +0,0 @@
-/*
- * Copyright (C) 2004-2006 Atmel Corporation
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-#ifndef __MACB_PDATA_H__
-#define __MACB_PDATA_H__
-
-#include <linux/clk.h>
-
-/**
- * struct macb_platform_data - platform data for MACB Ethernet
- * @phy_mask: phy mask passed when register the MDIO bus
- * within the driver
- * @phy_irq_pin: PHY IRQ
- * @is_rmii: using RMII interface?
- * @rev_eth_addr: reverse Ethernet address byte order
- * @pclk: platform clock
- * @hclk: AHB clock
- */
-struct macb_platform_data {
- u32 phy_mask;
- int phy_irq_pin;
- u8 is_rmii;
- u8 rev_eth_addr;
- struct clk *pclk;
- struct clk *hclk;
-};
-
-#endif /* __MACB_PDATA_H__ */
diff --git a/include/linux/platform_data/max197.h b/include/linux/platform_data/max197.h
index 8da8f94ee15c..03ef46f9cd65 100644
--- a/include/linux/platform_data/max197.h
+++ b/include/linux/platform_data/max197.h
@@ -1,14 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Maxim MAX197 A/D Converter Driver
*
* Copyright (c) 2012 Savoir-faire Linux Inc.
* Vivien Didelot <vivien.didelot@savoirfairelinux.com>
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * For further information, see the Documentation/hwmon/max197 file.
+ * For further information, see the Documentation/hwmon/max197.rst file.
*/
#ifndef _PDATA_MAX197_H
diff --git a/include/linux/platform_data/max3421-hcd.h b/include/linux/platform_data/max3421-hcd.h
index 0303d1970084..5947a6f43d60 100644
--- a/include/linux/platform_data/max3421-hcd.h
+++ b/include/linux/platform_data/max3421-hcd.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (c) 2014 eGauge Systems LLC
* Contributed by David Mosberger-Tang <davidm@egauge.net>
diff --git a/include/linux/platform_data/max6639.h b/include/linux/platform_data/max6639.h
index 6011c42034da..65bfdb4fdc15 100644
--- a/include/linux/platform_data/max6639.h
+++ b/include/linux/platform_data/max6639.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_MAX6639_H
#define _LINUX_MAX6639_H
diff --git a/include/linux/platform_data/max6697.h b/include/linux/platform_data/max6697.h
index ed9d3b3daf02..6fbb70005541 100644
--- a/include/linux/platform_data/max6697.h
+++ b/include/linux/platform_data/max6697.h
@@ -1,10 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* max6697.h
* Copyright (c) 2012 Guenter Roeck <linux@roeck-us.net>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef MAX6697_H
diff --git a/include/linux/platform_data/max732x.h b/include/linux/platform_data/max732x.h
index c04bac8bf2fe..423999207cd5 100644
--- a/include/linux/platform_data/max732x.h
+++ b/include/linux/platform_data/max732x.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __LINUX_I2C_MAX732X_H
#define __LINUX_I2C_MAX732X_H
@@ -6,17 +7,5 @@
struct max732x_platform_data {
/* number of the first GPIO */
unsigned gpio_base;
-
- /* interrupt base */
- int irq_base;
-
- void *context; /* param to setup/teardown */
-
- int (*setup)(struct i2c_client *client,
- unsigned gpio, unsigned ngpio,
- void *context);
- int (*teardown)(struct i2c_client *client,
- unsigned gpio, unsigned ngpio,
- void *context);
};
#endif /* __LINUX_I2C_MAX732X_H */
diff --git a/include/linux/platform_data/mcs.h b/include/linux/platform_data/mcs.h
index 61bb18a4fd3c..fcc6f2a1f5c3 100644
--- a/include/linux/platform_data/mcs.h
+++ b/include/linux/platform_data/mcs.h
@@ -1,13 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Copyright (C) 2009 - 2010 Samsung Electronics Co.Ltd
* Author: Joonyoung Shim <jy0922.shim@samsung.com>
* Author: HeungJun Kim <riverful.kim@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
*/
#ifndef __LINUX_MCS_H
diff --git a/include/linux/platform_data/mdio-bcm-unimac.h b/include/linux/platform_data/mdio-bcm-unimac.h
new file mode 100644
index 000000000000..8a5f9f0b2c52
--- /dev/null
+++ b/include/linux/platform_data/mdio-bcm-unimac.h
@@ -0,0 +1,13 @@
+#ifndef __MDIO_BCM_UNIMAC_PDATA_H
+#define __MDIO_BCM_UNIMAC_PDATA_H
+
+struct unimac_mdio_pdata {
+ u32 phy_mask;
+ int (*wait_func)(void *data);
+ void *wait_func_data;
+ const char *bus_name;
+};
+
+#define UNIMAC_MDIO_DRV_NAME "unimac-mdio"
+
+#endif /* __MDIO_BCM_UNIMAC_PDATA_H */
diff --git a/include/linux/platform_data/mdio-gpio.h b/include/linux/platform_data/mdio-gpio.h
index 11f00cdabe3d..13874fa6e767 100644
--- a/include/linux/platform_data/mdio-gpio.h
+++ b/include/linux/platform_data/mdio-gpio.h
@@ -1,33 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
- * MDIO-GPIO bus platform data structures
- *
- * Copyright (C) 2008, Paulius Zaleckas <paulius.zaleckas@teltonika.lt>
- *
- * This file is licensed under the terms of the GNU General Public License
- * version 2. This program is licensed "as is" without any warranty of any
- * kind, whether express or implied.
+ * MDIO-GPIO bus platform data structure
*/
-#ifndef __LINUX_MDIO_GPIO_H
-#define __LINUX_MDIO_GPIO_H
-
-#include <linux/mdio-bitbang.h>
+#ifndef __LINUX_MDIO_GPIO_PDATA_H
+#define __LINUX_MDIO_GPIO_PDATA_H
struct mdio_gpio_platform_data {
- /* GPIO numbers for bus pins */
- unsigned int mdc;
- unsigned int mdio;
- unsigned int mdo;
-
- bool mdc_active_low;
- bool mdio_active_low;
- bool mdo_active_low;
-
u32 phy_mask;
u32 phy_ignore_ta_mask;
- int irqs[PHY_MAX_ADDR];
- /* reset callback */
- int (*reset)(struct mii_bus *bus);
};
-#endif /* __LINUX_MDIO_GPIO_H */
+#endif /* __LINUX_MDIO_GPIO_PDATA_H */
diff --git a/include/linux/platform_data/media/camera-mx2.h b/include/linux/platform_data/media/camera-mx2.h
deleted file mode 100644
index 7ded6f1f74bc..000000000000
--- a/include/linux/platform_data/media/camera-mx2.h
+++ /dev/null
@@ -1,44 +0,0 @@
-/*
- * mx2-cam.h - i.MX27/i.MX25 camera driver header file
- *
- * Copyright (C) 2003, Intel Corporation
- * Copyright (C) 2008, Sascha Hauer <s.hauer@pengutronix.de>
- * Copyright (C) 2010, Baruch Siach <baruch@tkos.co.il>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
- */
-
-#ifndef __MACH_MX2_CAM_H_
-#define __MACH_MX2_CAM_H_
-
-#define MX2_CAMERA_EXT_VSYNC (1 << 1)
-#define MX2_CAMERA_CCIR (1 << 2)
-#define MX2_CAMERA_CCIR_INTERLACE (1 << 3)
-#define MX2_CAMERA_HSYNC_HIGH (1 << 4)
-#define MX2_CAMERA_GATED_CLOCK (1 << 5)
-#define MX2_CAMERA_INV_DATA (1 << 6)
-#define MX2_CAMERA_PCLK_SAMPLE_RISING (1 << 7)
-
-/**
- * struct mx2_camera_platform_data - optional platform data for mx2_camera
- * @flags: any combination of MX2_CAMERA_*
- * @clk: clock rate of the csi block / 2
- */
-struct mx2_camera_platform_data {
- unsigned long flags;
- unsigned long clk;
-};
-
-#endif /* __MACH_MX2_CAM_H_ */
diff --git a/include/linux/platform_data/media/camera-mx3.h b/include/linux/platform_data/media/camera-mx3.h
deleted file mode 100644
index a910dadc8258..000000000000
--- a/include/linux/platform_data/media/camera-mx3.h
+++ /dev/null
@@ -1,52 +0,0 @@
-/*
- * mx3_camera.h - i.MX3x camera driver header file
- *
- * Copyright (C) 2008, Guennadi Liakhovetski, DENX Software Engineering, <lg@denx.de>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#ifndef _MX3_CAMERA_H_
-#define _MX3_CAMERA_H_
-
-#include <linux/device.h>
-
-#define MX3_CAMERA_CLK_SRC 1
-#define MX3_CAMERA_EXT_VSYNC 2
-#define MX3_CAMERA_DP 4
-#define MX3_CAMERA_PCP 8
-#define MX3_CAMERA_HSP 0x10
-#define MX3_CAMERA_VSP 0x20
-#define MX3_CAMERA_DATAWIDTH_4 0x40
-#define MX3_CAMERA_DATAWIDTH_8 0x80
-#define MX3_CAMERA_DATAWIDTH_10 0x100
-#define MX3_CAMERA_DATAWIDTH_15 0x200
-
-#define MX3_CAMERA_DATAWIDTH_MASK (MX3_CAMERA_DATAWIDTH_4 | MX3_CAMERA_DATAWIDTH_8 | \
- MX3_CAMERA_DATAWIDTH_10 | MX3_CAMERA_DATAWIDTH_15)
-
-struct v4l2_async_subdev;
-
-/**
- * struct mx3_camera_pdata - i.MX3x camera platform data
- * @flags: MX3_CAMERA_* flags
- * @mclk_10khz: master clock frequency in 10kHz units
- * @dma_dev: IPU DMA device to match against in channel allocation
- */
-struct mx3_camera_pdata {
- unsigned long flags;
- unsigned long mclk_10khz;
- struct device *dma_dev;
- struct v4l2_async_subdev **asd; /* Flat array, arranged in groups */
- int *asd_sizes; /* 0-terminated array of asd group sizes */
-};
-
-#endif
diff --git a/include/linux/platform_data/media/camera-pxa.h b/include/linux/platform_data/media/camera-pxa.h
index ce5d90e1a6e4..846a47b8c540 100644
--- a/include/linux/platform_data/media/camera-pxa.h
+++ b/include/linux/platform_data/media/camera-pxa.h
@@ -1,22 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
camera.h - PXA camera driver header file
Copyright (C) 2003, Intel Corporation
Copyright (C) 2008, Guennadi Liakhovetski <kernel@pengutronix.de>
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#ifndef __ASM_ARCH_CAMERA_H_
diff --git a/include/linux/platform_data/media/coda.h b/include/linux/platform_data/media/coda.h
deleted file mode 100644
index 6ad4410d9e20..000000000000
--- a/include/linux/platform_data/media/coda.h
+++ /dev/null
@@ -1,18 +0,0 @@
-/*
- * Copyright (C) 2013 Philipp Zabel, Pengutronix
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
-#ifndef PLATFORM_CODA_H
-#define PLATFORM_CODA_H
-
-struct device;
-
-struct coda_platform_data {
- struct device *iram_dev;
-};
-
-#endif
diff --git a/include/linux/platform_data/media/gpio-ir-recv.h b/include/linux/platform_data/media/gpio-ir-recv.h
deleted file mode 100644
index 0c298f569d5a..000000000000
--- a/include/linux/platform_data/media/gpio-ir-recv.h
+++ /dev/null
@@ -1,23 +0,0 @@
-/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#ifndef __GPIO_IR_RECV_H__
-#define __GPIO_IR_RECV_H__
-
-struct gpio_ir_recv_platform_data {
- int gpio_nr;
- bool active_low;
- u64 allowed_protos;
- const char *map_name;
-};
-
-#endif /* __GPIO_IR_RECV_H__ */
diff --git a/include/linux/platform_data/media/ir-rx51.h b/include/linux/platform_data/media/ir-rx51.h
deleted file mode 100644
index 2c94ab568bfa..000000000000
--- a/include/linux/platform_data/media/ir-rx51.h
+++ /dev/null
@@ -1,8 +0,0 @@
-#ifndef _IR_RX51_H
-#define _IR_RX51_H
-
-struct ir_rx51_platform_data {
- int(*set_max_mpu_wakeup_lat)(struct device *dev, long t);
-};
-
-#endif
diff --git a/include/linux/platform_data/media/mmp-camera.h b/include/linux/platform_data/media/mmp-camera.h
index 7611963a257f..53adaab64f28 100644
--- a/include/linux/platform_data/media/mmp-camera.h
+++ b/include/linux/platform_data/media/mmp-camera.h
@@ -1,9 +1,25 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Information for the Marvell Armada MMP camera
*/
+#include <media/v4l2-mediabus.h>
+
+enum dphy3_algo {
+ DPHY3_ALGO_DEFAULT = 0,
+ DPHY3_ALGO_PXA910,
+ DPHY3_ALGO_PXA2128
+};
+
struct mmp_camera_platform_data {
- struct platform_device *i2c_device;
- int sensor_power_gpio;
- int sensor_reset_gpio;
+ enum v4l2_mbus_type bus_type;
+ int mclk_src; /* which clock source the MCLK derives from */
+ int mclk_div; /* Clock Divider Value for MCLK */
+ /*
+ * MIPI support
+ */
+ int dphy[3]; /* DPHY: CSI2_DPHY3, CSI2_DPHY5, CSI2_DPHY6 */
+ enum dphy3_algo dphy3_algo; /* algos for calculate CSI2_DPHY3 */
+ int lane; /* ccic used lane number; 0 means DVP mode */
+ int lane_clk;
};
diff --git a/include/linux/platform_data/media/omap1_camera.h b/include/linux/platform_data/media/omap1_camera.h
deleted file mode 100644
index 819767cf04d4..000000000000
--- a/include/linux/platform_data/media/omap1_camera.h
+++ /dev/null
@@ -1,35 +0,0 @@
-/*
- * Header for V4L2 SoC Camera driver for OMAP1 Camera Interface
- *
- * Copyright (C) 2010, Janusz Krzysztofik <jkrzyszt@tis.icnet.pl>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#ifndef __MEDIA_OMAP1_CAMERA_H_
-#define __MEDIA_OMAP1_CAMERA_H_
-
-#include <linux/bitops.h>
-
-#define OMAP1_CAMERA_IOSIZE 0x1c
-
-enum omap1_cam_vb_mode {
- OMAP1_CAM_DMA_CONTIG = 0,
- OMAP1_CAM_DMA_SG,
-};
-
-#define OMAP1_CAMERA_MIN_BUF_COUNT(x) ((x) == OMAP1_CAM_DMA_CONTIG ? 3 : 2)
-
-struct omap1_cam_platform_data {
- unsigned long camexclk_khz;
- unsigned long lclk_khz_max;
- unsigned long flags;
-};
-
-#define OMAP1_CAMERA_LCLK_RISING BIT(0)
-#define OMAP1_CAMERA_RST_LOW BIT(1)
-#define OMAP1_CAMERA_RST_HIGH BIT(2)
-
-#endif /* __MEDIA_OMAP1_CAMERA_H_ */
diff --git a/include/linux/platform_data/media/omap4iss.h b/include/linux/platform_data/media/omap4iss.h
index 0d7620db5e32..2a511a8fcda7 100644
--- a/include/linux/platform_data/media/omap4iss.h
+++ b/include/linux/platform_data/media/omap4iss.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef ARCH_ARM_PLAT_OMAP4_ISS_H
#define ARCH_ARM_PLAT_OMAP4_ISS_H
diff --git a/include/linux/platform_data/media/s5p_hdmi.h b/include/linux/platform_data/media/s5p_hdmi.h
deleted file mode 100644
index bb9cacb0cbb0..000000000000
--- a/include/linux/platform_data/media/s5p_hdmi.h
+++ /dev/null
@@ -1,36 +0,0 @@
-/*
- * Driver header for S5P HDMI chip.
- *
- * Copyright (c) 2011 Samsung Electronics, Co. Ltd
- * Contact: Tomasz Stanislawski <t.stanislaws@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
-
-#ifndef S5P_HDMI_H
-#define S5P_HDMI_H
-
-struct i2c_board_info;
-
-/**
- * @hdmiphy_bus: controller id for HDMIPHY bus
- * @hdmiphy_info: template for HDMIPHY I2C device
- * @mhl_bus: controller id for MHL control bus
- * @mhl_info: template for MHL I2C device
- * @hpd_gpio: GPIO for Hot-Plug-Detect pin
- *
- * NULL pointer for *_info fields indicates that
- * the corresponding chip is not present
- */
-struct s5p_hdmi_platform_data {
- int hdmiphy_bus;
- struct i2c_board_info *hdmiphy_info;
- int mhl_bus;
- struct i2c_board_info *mhl_info;
- int hpd_gpio;
-};
-
-#endif /* S5P_HDMI_H */
diff --git a/include/linux/platform_data/media/si4713.h b/include/linux/platform_data/media/si4713.h
index 932668ad54f7..13b3eb7a9059 100644
--- a/include/linux/platform_data/media/si4713.h
+++ b/include/linux/platform_data/media/si4713.h
@@ -31,7 +31,7 @@ struct si4713_platform_data {
*/
struct si4713_rnl {
__u32 index; /* modulator index */
- __u32 frequency; /* frequency to peform rnl measurement */
+ __u32 frequency; /* frequency to perform rnl measurement */
__s32 rnl; /* result of measurement in dBuV */
__u32 reserved[4]; /* drivers and apps must init this to 0 */
};
@@ -40,7 +40,7 @@ struct si4713_rnl {
* This is the ioctl number to query for rnl. Users must pass a
* struct si4713_rnl pointer specifying desired frequency in 'frequency' field
* following driver capabilities (i.e V4L2_TUNER_CAP_LOW).
- * Driver must return measured value in the same struture, filling 'rnl' field.
+ * Driver must return measured value in the same structure, filling 'rnl' field.
*/
#define SI4713_IOC_MEASURE_RNL _IOWR('V', BASE_VIDIOC_PRIVATE + 0, \
struct si4713_rnl)
diff --git a/include/linux/platform_data/media/sii9234.h b/include/linux/platform_data/media/sii9234.h
deleted file mode 100644
index 6a4a809fe9a3..000000000000
--- a/include/linux/platform_data/media/sii9234.h
+++ /dev/null
@@ -1,24 +0,0 @@
-/*
- * Driver header for SII9234 MHL converter chip.
- *
- * Copyright (c) 2011 Samsung Electronics, Co. Ltd
- * Contact: Tomasz Stanislawski <t.stanislaws@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
-
-#ifndef SII9234_H
-#define SII9234_H
-
-/**
- * @gpio_n_reset: GPIO driving nRESET pin
- */
-
-struct sii9234_platform_data {
- int gpio_n_reset;
-};
-
-#endif /* SII9234_H */
diff --git a/include/linux/platform_data/media/soc_camera_platform.h b/include/linux/platform_data/media/soc_camera_platform.h
deleted file mode 100644
index 1e5065dab430..000000000000
--- a/include/linux/platform_data/media/soc_camera_platform.h
+++ /dev/null
@@ -1,83 +0,0 @@
-/*
- * Generic Platform Camera Driver Header
- *
- * Copyright (C) 2008 Magnus Damm
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#ifndef __SOC_CAMERA_H__
-#define __SOC_CAMERA_H__
-
-#include <linux/videodev2.h>
-#include <media/soc_camera.h>
-#include <media/v4l2-mediabus.h>
-
-struct device;
-
-struct soc_camera_platform_info {
- const char *format_name;
- unsigned long format_depth;
- struct v4l2_mbus_framefmt format;
- unsigned long mbus_param;
- enum v4l2_mbus_type mbus_type;
- struct soc_camera_device *icd;
- int (*set_capture)(struct soc_camera_platform_info *info, int enable);
-};
-
-static inline void soc_camera_platform_release(struct platform_device **pdev)
-{
- *pdev = NULL;
-}
-
-static inline int soc_camera_platform_add(struct soc_camera_device *icd,
- struct platform_device **pdev,
- struct soc_camera_link *plink,
- void (*release)(struct device *dev),
- int id)
-{
- struct soc_camera_subdev_desc *ssdd =
- (struct soc_camera_subdev_desc *)plink;
- struct soc_camera_platform_info *info = ssdd->drv_priv;
- int ret;
-
- if (&icd->sdesc->subdev_desc != ssdd)
- return -ENODEV;
-
- if (*pdev)
- return -EBUSY;
-
- *pdev = platform_device_alloc("soc_camera_platform", id);
- if (!*pdev)
- return -ENOMEM;
-
- info->icd = icd;
-
- (*pdev)->dev.platform_data = info;
- (*pdev)->dev.release = release;
-
- ret = platform_device_add(*pdev);
- if (ret < 0) {
- platform_device_put(*pdev);
- *pdev = NULL;
- info->icd = NULL;
- }
-
- return ret;
-}
-
-static inline void soc_camera_platform_del(const struct soc_camera_device *icd,
- struct platform_device *pdev,
- const struct soc_camera_link *plink)
-{
- const struct soc_camera_subdev_desc *ssdd =
- (const struct soc_camera_subdev_desc *)plink;
- if (&icd->sdesc->subdev_desc != ssdd || !pdev)
- return;
-
- platform_device_unregister(pdev);
-}
-
-#endif /* __SOC_CAMERA_H__ */
diff --git a/include/linux/platform_data/media/timb_radio.h b/include/linux/platform_data/media/timb_radio.h
index a40a6a348d21..109a0d4a4f07 100644
--- a/include/linux/platform_data/media/timb_radio.h
+++ b/include/linux/platform_data/media/timb_radio.h
@@ -1,19 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* timb_radio.h Platform struct for the Timberdale radio driver
* Copyright (c) 2009 Intel Corporation
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#ifndef _TIMB_RADIO_
diff --git a/include/linux/platform_data/media/timb_video.h b/include/linux/platform_data/media/timb_video.h
index 70ae43970a49..38764cc09b4f 100644
--- a/include/linux/platform_data/media/timb_video.h
+++ b/include/linux/platform_data/media/timb_video.h
@@ -1,19 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* timb_video.h Platform struct for the Timberdale video driver
* Copyright (c) 2009-2010 Intel Corporation
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#ifndef _TIMB_VIDEO_
diff --git a/include/linux/platform_data/mfd-mcp-sa11x0.h b/include/linux/platform_data/mfd-mcp-sa11x0.h
index 747cd6baf711..b589e61bbc2e 100644
--- a/include/linux/platform_data/mfd-mcp-sa11x0.h
+++ b/include/linux/platform_data/mfd-mcp-sa11x0.h
@@ -1,9 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2005 Russell King.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef __MFD_MCP_SA11X0_H
#define __MFD_MCP_SA11X0_H
diff --git a/include/linux/platform_data/microchip-ksz.h b/include/linux/platform_data/microchip-ksz.h
index 84789ca634aa..ea1cc6d829e9 100644
--- a/include/linux/platform_data/microchip-ksz.h
+++ b/include/linux/platform_data/microchip-ksz.h
@@ -19,7 +19,7 @@
#ifndef __MICROCHIP_KSZ_H
#define __MICROCHIP_KSZ_H
-#include <linux/kernel.h>
+#include <linux/types.h>
struct ksz_platform_data {
u32 chip_id;
diff --git a/include/linux/platform_data/mlxcpld-hotplug.h b/include/linux/platform_data/mlxcpld-hotplug.h
deleted file mode 100644
index e4cfcffaa6f4..000000000000
--- a/include/linux/platform_data/mlxcpld-hotplug.h
+++ /dev/null
@@ -1,99 +0,0 @@
-/*
- * include/linux/platform_data/mlxcpld-hotplug.h
- * Copyright (c) 2016 Mellanox Technologies. All rights reserved.
- * Copyright (c) 2016 Vadim Pasternak <vadimp@mellanox.com>
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the names of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef __LINUX_PLATFORM_DATA_MLXCPLD_HOTPLUG_H
-#define __LINUX_PLATFORM_DATA_MLXCPLD_HOTPLUG_H
-
-/**
- * struct mlxcpld_hotplug_device - I2C device data:
- * @adapter: I2C device adapter;
- * @client: I2C device client;
- * @brdinfo: device board information;
- * @bus: I2C bus, where device is attached;
- *
- * Structure represents I2C hotplug device static data (board topology) and
- * dynamic data (related kernel objects handles).
- */
-struct mlxcpld_hotplug_device {
- struct i2c_adapter *adapter;
- struct i2c_client *client;
- struct i2c_board_info brdinfo;
- u16 bus;
-};
-
-/**
- * struct mlxcpld_hotplug_platform_data - device platform data:
- * @top_aggr_offset: offset of top aggregation interrupt register;
- * @top_aggr_mask: top aggregation interrupt common mask;
- * @top_aggr_psu_mask: top aggregation interrupt PSU mask;
- * @psu_reg_offset: offset of PSU interrupt register;
- * @psu_mask: PSU interrupt mask;
- * @psu_count: number of equipped replaceable PSUs;
- * @psu: pointer to PSU devices data array;
- * @top_aggr_pwr_mask: top aggregation interrupt power mask;
- * @pwr_reg_offset: offset of power interrupt register
- * @pwr_mask: power interrupt mask;
- * @pwr_count: number of power sources;
- * @pwr: pointer to power devices data array;
- * @top_aggr_fan_mask: top aggregation interrupt FAN mask;
- * @fan_reg_offset: offset of FAN interrupt register;
- * @fan_mask: FAN interrupt mask;
- * @fan_count: number of equipped replaceable FANs;
- * @fan: pointer to FAN devices data array;
- *
- * Structure represents board platform data, related to system hotplug events,
- * like FAN, PSU, power cable insertion and removing. This data provides the
- * number of hot-pluggable devices and hardware description for event handling.
- */
-struct mlxcpld_hotplug_platform_data {
- u16 top_aggr_offset;
- u8 top_aggr_mask;
- u8 top_aggr_psu_mask;
- u16 psu_reg_offset;
- u8 psu_mask;
- u8 psu_count;
- struct mlxcpld_hotplug_device *psu;
- u8 top_aggr_pwr_mask;
- u16 pwr_reg_offset;
- u8 pwr_mask;
- u8 pwr_count;
- struct mlxcpld_hotplug_device *pwr;
- u8 top_aggr_fan_mask;
- u16 fan_reg_offset;
- u8 fan_mask;
- u8 fan_count;
- struct mlxcpld_hotplug_device *fan;
-};
-
-#endif /* __LINUX_PLATFORM_DATA_MLXCPLD_HOTPLUG_H */
diff --git a/include/linux/platform_data/mlxcpld.h b/include/linux/platform_data/mlxcpld.h
new file mode 100644
index 000000000000..d7610b528856
--- /dev/null
+++ b/include/linux/platform_data/mlxcpld.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */
+/*
+ * Mellanox I2C multiplexer support in CPLD
+ *
+ * Copyright (C) 2016-2020 Mellanox Technologies
+ */
+
+#ifndef _LINUX_I2C_MLXCPLD_H
+#define _LINUX_I2C_MLXCPLD_H
+
+/* Platform data for the CPLD I2C multiplexers */
+
+/* mlxcpld_mux_plat_data - per mux data, used with i2c_register_board_info
+ * @chan_ids - channels array
+ * @num_adaps - number of adapters
+ * @sel_reg_addr - mux select register offset in CPLD space
+ * @reg_size: register size in bytes
+ * @handle: handle to be passed by callback
+ * @completion_notify: callback to notify when all the adapters are created
+ */
+struct mlxcpld_mux_plat_data {
+ int *chan_ids;
+ int num_adaps;
+ int sel_reg_addr;
+ u8 reg_size;
+ void *handle;
+ int (*completion_notify)(void *handle, struct i2c_adapter *parent,
+ struct i2c_adapter *adapters[]);
+};
+
+#endif /* _LINUX_I2C_MLXCPLD_H */
diff --git a/include/linux/platform_data/mlxreg.h b/include/linux/platform_data/mlxreg.h
new file mode 100644
index 000000000000..0b9f81a6f753
--- /dev/null
+++ b/include/linux/platform_data/mlxreg.h
@@ -0,0 +1,239 @@
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */
+/*
+ * Copyright (C) 2017-2020 Mellanox Technologies Ltd.
+ */
+
+#ifndef __LINUX_PLATFORM_DATA_MLXREG_H
+#define __LINUX_PLATFORM_DATA_MLXREG_H
+
+#define MLXREG_CORE_LABEL_MAX_SIZE 32
+#define MLXREG_CORE_WD_FEATURE_NOWAYOUT BIT(0)
+#define MLXREG_CORE_WD_FEATURE_START_AT_BOOT BIT(1)
+
+/**
+ * enum mlxreg_wdt_type - type of HW watchdog
+ *
+ * TYPE1 HW watchdog implementation exist in old systems.
+ * All new systems have TYPE2 HW watchdog.
+ * TYPE3 HW watchdog can exist on all systems with new CPLD.
+ * TYPE3 is selected by WD capability bit.
+ */
+enum mlxreg_wdt_type {
+ MLX_WDT_TYPE1,
+ MLX_WDT_TYPE2,
+ MLX_WDT_TYPE3,
+};
+
+/**
+ * enum mlxreg_hotplug_kind - kind of hotplug entry
+ *
+ * @MLXREG_HOTPLUG_DEVICE_NA: do not care;
+ * @MLXREG_HOTPLUG_LC_PRESENT: entry for line card presence in/out events;
+ * @MLXREG_HOTPLUG_LC_VERIFIED: entry for line card verification status events
+ * coming after line card security signature validation;
+ * @MLXREG_HOTPLUG_LC_POWERED: entry for line card power on/off events;
+ * @MLXREG_HOTPLUG_LC_SYNCED: entry for line card synchronization events, coming
+ * after hardware-firmware synchronization handshake;
+ * @MLXREG_HOTPLUG_LC_READY: entry for line card ready events, indicating line card
+ PHYs ready / unready state;
+ * @MLXREG_HOTPLUG_LC_ACTIVE: entry for line card active events, indicating firmware
+ * availability / unavailability for the ports on line card;
+ * @MLXREG_HOTPLUG_LC_THERMAL: entry for line card thermal shutdown events, positive
+ * event indicates that system should power off the line
+ * card for which this event has been received;
+ */
+enum mlxreg_hotplug_kind {
+ MLXREG_HOTPLUG_DEVICE_NA = 0,
+ MLXREG_HOTPLUG_LC_PRESENT = 1,
+ MLXREG_HOTPLUG_LC_VERIFIED = 2,
+ MLXREG_HOTPLUG_LC_POWERED = 3,
+ MLXREG_HOTPLUG_LC_SYNCED = 4,
+ MLXREG_HOTPLUG_LC_READY = 5,
+ MLXREG_HOTPLUG_LC_ACTIVE = 6,
+ MLXREG_HOTPLUG_LC_THERMAL = 7,
+};
+
+/**
+ * enum mlxreg_hotplug_device_action - hotplug device action required for
+ * driver's connectivity
+ *
+ * @MLXREG_HOTPLUG_DEVICE_DEFAULT_ACTION: probe device for 'on' event, remove
+ * for 'off' event;
+ * @MLXREG_HOTPLUG_DEVICE_PLATFORM_ACTION: probe platform device for 'on'
+ * event, remove for 'off' event;
+ * @MLXREG_HOTPLUG_DEVICE_NO_ACTION: no connectivity action is required;
+ */
+enum mlxreg_hotplug_device_action {
+ MLXREG_HOTPLUG_DEVICE_DEFAULT_ACTION = 0,
+ MLXREG_HOTPLUG_DEVICE_PLATFORM_ACTION = 1,
+ MLXREG_HOTPLUG_DEVICE_NO_ACTION = 2,
+};
+
+/**
+ * struct mlxreg_core_hotplug_notifier - hotplug notifier block:
+ *
+ * @identity: notifier identity name;
+ * @handle: user handle to be passed by user handler function;
+ * @user_handler: user handler function associated with the event;
+ */
+struct mlxreg_core_hotplug_notifier {
+ char identity[MLXREG_CORE_LABEL_MAX_SIZE];
+ void *handle;
+ int (*user_handler)(void *handle, enum mlxreg_hotplug_kind kind, u8 action);
+};
+
+/**
+ * struct mlxreg_hotplug_device - I2C device data:
+ *
+ * @adapter: I2C device adapter;
+ * @client: I2C device client;
+ * @brdinfo: device board information;
+ * @nr: I2C device adapter number, to which device is to be attached;
+ * @pdev: platform device, if device is instantiated as a platform device;
+ * @action: action to be performed upon event receiving;
+ * @handle: user handle to be passed by user handler function;
+ * @user_handler: user handler function associated with the event;
+ * @notifier: pointer to event notifier block;
+ *
+ * Structure represents I2C hotplug device static data (board topology) and
+ * dynamic data (related kernel objects handles).
+ */
+struct mlxreg_hotplug_device {
+ struct i2c_adapter *adapter;
+ struct i2c_client *client;
+ struct i2c_board_info *brdinfo;
+ int nr;
+ struct platform_device *pdev;
+ enum mlxreg_hotplug_device_action action;
+ void *handle;
+ int (*user_handler)(void *handle, enum mlxreg_hotplug_kind kind, u8 action);
+ struct mlxreg_core_hotplug_notifier *notifier;
+};
+
+/**
+ * struct mlxreg_core_data - attributes control data:
+ *
+ * @label: attribute label;
+ * @reg: attribute register;
+ * @mask: attribute access mask;
+ * @bit: attribute effective bit;
+ * @capability: attribute capability register;
+ * @reg_prsnt: attribute presence register;
+ * @reg_sync: attribute synch register;
+ * @reg_pwr: attribute power register;
+ * @reg_ena: attribute enable register;
+ * @mode: access mode;
+ * @np - pointer to node platform associated with attribute;
+ * @hpdev - hotplug device data;
+ * @notifier: pointer to event notifier block;
+ * @health_cntr: dynamic device health indication counter;
+ * @attached: true if device has been attached after good health indication;
+ * @regnum: number of registers occupied by multi-register attribute;
+ * @slot: slot number, at which device is located;
+ * @secured: if set indicates that entry access is secured;
+ */
+struct mlxreg_core_data {
+ char label[MLXREG_CORE_LABEL_MAX_SIZE];
+ u32 reg;
+ u32 mask;
+ u32 bit;
+ u32 capability;
+ u32 reg_prsnt;
+ u32 reg_sync;
+ u32 reg_pwr;
+ u32 reg_ena;
+ umode_t mode;
+ struct device_node *np;
+ struct mlxreg_hotplug_device hpdev;
+ struct mlxreg_core_hotplug_notifier *notifier;
+ u32 health_cntr;
+ bool attached;
+ u8 regnum;
+ u8 slot;
+ u8 secured;
+};
+
+/**
+ * struct mlxreg_core_item - same type components controlled by the driver:
+ *
+ * @data: component data;
+ * @kind: kind of hotplug attribute;
+ * @aggr_mask: group aggregation mask;
+ * @reg: group interrupt status register;
+ * @mask: group interrupt mask;
+ * @capability: group capability register;
+ * @cache: last status value for elements fro the same group;
+ * @count: number of available elements in the group;
+ * @ind: element's index inside the group;
+ * @inversed: if 0: 0 for signal status is OK, if 1 - 1 is OK;
+ * @health: true if device has health indication, false in other case;
+ */
+struct mlxreg_core_item {
+ struct mlxreg_core_data *data;
+ enum mlxreg_hotplug_kind kind;
+ u32 aggr_mask;
+ u32 reg;
+ u32 mask;
+ u32 capability;
+ u32 cache;
+ u8 count;
+ u8 ind;
+ u8 inversed;
+ u8 health;
+};
+
+/**
+ * struct mlxreg_core_platform_data - platform data:
+ *
+ * @data: instance private data;
+ * @regmap: register map of parent device;
+ * @counter: number of instances;
+ * @features: supported features of device;
+ * @version: implementation version;
+ * @identity: device identity name;
+ * @capability: device capability register;
+ */
+struct mlxreg_core_platform_data {
+ struct mlxreg_core_data *data;
+ void *regmap;
+ int counter;
+ u32 features;
+ u32 version;
+ char identity[MLXREG_CORE_LABEL_MAX_SIZE];
+ u32 capability;
+};
+
+/**
+ * struct mlxreg_core_hotplug_platform_data - hotplug platform data:
+ *
+ * @items: same type components with the hotplug capability;
+ * @irq: platform interrupt number;
+ * @regmap: register map of parent device;
+ * @counter: number of the components with the hotplug capability;
+ * @cell: location of top aggregation interrupt register;
+ * @mask: top aggregation interrupt common mask;
+ * @cell_low: location of low aggregation interrupt register;
+ * @mask_low: low aggregation interrupt common mask;
+ * @deferred_nr: I2C adapter number must be exist prior probing execution;
+ * @shift_nr: I2C adapter numbers must be incremented by this value;
+ * @addr: mapped resource address;
+ * @handle: handle to be passed by callback;
+ * @completion_notify: callback to notify when platform driver probing is done;
+ */
+struct mlxreg_core_hotplug_platform_data {
+ struct mlxreg_core_item *items;
+ int irq;
+ void *regmap;
+ int counter;
+ u32 cell;
+ u32 mask;
+ u32 cell_low;
+ u32 mask_low;
+ int deferred_nr;
+ int shift_nr;
+ void __iomem *addr;
+ void *handle;
+ int (*completion_notify)(void *handle, int id);
+};
+
+#endif /* __LINUX_PLATFORM_DATA_MLXREG_H */
diff --git a/include/linux/platform_data/mmc-davinci.h b/include/linux/platform_data/mmc-davinci.h
index 9cea4ee377b5..87a8bed3b6cb 100644
--- a/include/linux/platform_data/mmc-davinci.h
+++ b/include/linux/platform_data/mmc-davinci.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Board-specific MMC configuration
*/
diff --git a/include/linux/platform_data/mmc-esdhc-imx.h b/include/linux/platform_data/mmc-esdhc-imx.h
deleted file mode 100644
index 7daa78a2f342..000000000000
--- a/include/linux/platform_data/mmc-esdhc-imx.h
+++ /dev/null
@@ -1,51 +0,0 @@
-/*
- * Copyright 2010 Wolfram Sang <w.sang@pengutronix.de>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; version 2
- * of the License.
- */
-
-#ifndef __ASM_ARCH_IMX_ESDHC_H
-#define __ASM_ARCH_IMX_ESDHC_H
-
-#include <linux/types.h>
-
-enum wp_types {
- ESDHC_WP_NONE, /* no WP, neither controller nor gpio */
- ESDHC_WP_CONTROLLER, /* mmc controller internal WP */
- ESDHC_WP_GPIO, /* external gpio pin for WP */
-};
-
-enum cd_types {
- ESDHC_CD_NONE, /* no CD, neither controller nor gpio */
- ESDHC_CD_CONTROLLER, /* mmc controller internal CD */
- ESDHC_CD_GPIO, /* external gpio pin for CD */
- ESDHC_CD_PERMANENT, /* no CD, card permanently wired to host */
-};
-
-/**
- * struct esdhc_platform_data - platform data for esdhc on i.MX
- *
- * ESDHC_WP(CD)_CONTROLLER type is not available on i.MX25/35.
- *
- * @wp_gpio: gpio for write_protect
- * @cd_gpio: gpio for card_detect interrupt
- * @wp_type: type of write_protect method (see wp_types enum above)
- * @cd_type: type of card_detect method (see cd_types enum above)
- * @support_vsel: indicate it supports 1.8v switching
- */
-
-struct esdhc_platform_data {
- unsigned int wp_gpio;
- unsigned int cd_gpio;
- enum wp_types wp_type;
- enum cd_types cd_type;
- int max_bus_width;
- bool support_vsel;
- unsigned int delay_line;
- unsigned int tuning_step; /* The delay cell steps in tuning procedure */
- unsigned int tuning_start_tap; /* The start delay cell point in tuning procedure */
-};
-#endif /* __ASM_ARCH_IMX_ESDHC_H */
diff --git a/include/linux/platform_data/mmc-esdhc-mcf.h b/include/linux/platform_data/mmc-esdhc-mcf.h
new file mode 100644
index 000000000000..85cb786a62fe
--- /dev/null
+++ b/include/linux/platform_data/mmc-esdhc-mcf.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __LINUX_PLATFORM_DATA_MCF_ESDHC_H__
+#define __LINUX_PLATFORM_DATA_MCF_ESDHC_H__
+
+enum cd_types {
+ ESDHC_CD_NONE, /* no CD, neither controller nor gpio */
+ ESDHC_CD_CONTROLLER, /* mmc controller internal CD */
+ ESDHC_CD_PERMANENT, /* no CD, card permanently wired to host */
+};
+
+struct mcf_esdhc_platform_data {
+ int max_bus_width;
+ int cd_type;
+};
+
+#endif /* __LINUX_PLATFORM_DATA_MCF_ESDHC_H__ */
diff --git a/include/linux/platform_data/mmc-mxcmmc.h b/include/linux/platform_data/mmc-mxcmmc.h
index b0fdaa9bd185..ac677351316a 100644
--- a/include/linux/platform_data/mmc-mxcmmc.h
+++ b/include/linux/platform_data/mmc-mxcmmc.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef ASMARM_ARCH_MMC_H
#define ASMARM_ARCH_MMC_H
diff --git a/include/linux/platform_data/mmc-omap.h b/include/linux/platform_data/mmc-omap.h
index 929469291406..91051e9907f3 100644
--- a/include/linux/platform_data/mmc-omap.h
+++ b/include/linux/platform_data/mmc-omap.h
@@ -1,11 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* MMC definitions for OMAP2
*
* Copyright (C) 2006 Nokia Corporation
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#define OMAP_MMC_MAX_SLOTS 2
@@ -111,11 +108,13 @@ struct omap_mmc_platform_data {
const char *name;
u32 ocr_mask;
- /* Card detection IRQs */
- int card_detect_irq;
+ /* Card detection */
int (*card_detect)(struct device *dev, int slot);
unsigned int ban_openended:1;
} slots[OMAP_MMC_MAX_SLOTS];
};
+
+extern void omap_mmc_notify_cover_event(struct device *dev, int slot,
+ int is_closed);
diff --git a/include/linux/platform_data/mmc-pxamci.h b/include/linux/platform_data/mmc-pxamci.h
index 1706b3597ce0..7e44e84e7150 100644
--- a/include/linux/platform_data/mmc-pxamci.h
+++ b/include/linux/platform_data/mmc-pxamci.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef ASMARM_ARCH_MMC_H
#define ASMARM_ARCH_MMC_H
@@ -14,11 +15,7 @@ struct pxamci_platform_data {
int (*get_ro)(struct device *);
int (*setpower)(struct device *, unsigned int);
void (*exit)(struct device *, void *);
- int gpio_card_detect; /* gpio detecting card insertion */
- int gpio_card_ro; /* gpio detecting read only toggle */
bool gpio_card_ro_invert; /* gpio ro is inverted */
- int gpio_power; /* gpio powering up MMC bus */
- bool gpio_power_invert; /* gpio power is inverted */
};
extern void pxa_set_mci_info(struct pxamci_platform_data *info);
diff --git a/include/linux/platform_data/mmc-s3cmci.h b/include/linux/platform_data/mmc-s3cmci.h
deleted file mode 100644
index c42d31711944..000000000000
--- a/include/linux/platform_data/mmc-s3cmci.h
+++ /dev/null
@@ -1,52 +0,0 @@
-#ifndef _ARCH_MCI_H
-#define _ARCH_MCI_H
-
-/**
- * struct s3c24xx_mci_pdata - sd/mmc controller platform data
- * @no_wprotect: Set this to indicate there is no write-protect switch.
- * @no_detect: Set this if there is no detect switch.
- * @wprotect_invert: Invert the default sense of the write protect switch.
- * @detect_invert: Invert the default sense of the write protect switch.
- * @use_dma: Set to allow the use of DMA.
- * @gpio_detect: GPIO number for the card detect line.
- * @gpio_wprotect: GPIO number for the write protect line.
- * @ocr_avail: The mask of the available power states, non-zero to use.
- * @set_power: Callback to control the power mode.
- *
- * The @gpio_detect is used for card detection when @no_wprotect is unset,
- * and the default sense is that 0 returned from gpio_get_value() means
- * that a card is inserted. If @detect_invert is set, then the value from
- * gpio_get_value() is inverted, which makes 1 mean card inserted.
- *
- * The driver will use @gpio_wprotect to signal whether the card is write
- * protected if @no_wprotect is not set. A 0 returned from gpio_get_value()
- * means the card is read/write, and 1 means read-only. The @wprotect_invert
- * will invert the value returned from gpio_get_value().
- *
- * Card power is set by @ocr_availa, using MCC_VDD_ constants if it is set
- * to a non-zero value, otherwise the default of 3.2-3.4V is used.
- */
-struct s3c24xx_mci_pdata {
- unsigned int no_wprotect:1;
- unsigned int no_detect:1;
- unsigned int wprotect_invert:1;
- unsigned int detect_invert:1; /* set => detect active high */
- unsigned int use_dma:1;
-
- unsigned int gpio_detect;
- unsigned int gpio_wprotect;
- unsigned long ocr_avail;
- void (*set_power)(unsigned char power_mode,
- unsigned short vdd);
-};
-
-/**
- * s3c24xx_mci_set_platdata - set platform data for mmc/sdi device
- * @pdata: The platform data
- *
- * Copy the platform data supplied by @pdata so that this can be marked
- * __initdata.
- */
-extern void s3c24xx_mci_set_platdata(struct s3c24xx_mci_pdata *pdata);
-
-#endif /* _ARCH_NCI_H */
diff --git a/include/linux/platform_data/mmc-sdhci-s3c.h b/include/linux/platform_data/mmc-sdhci-s3c.h
index 249f02387a35..74a54eeb27b1 100644
--- a/include/linux/platform_data/mmc-sdhci-s3c.h
+++ b/include/linux/platform_data/mmc-sdhci-s3c.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __PLATFORM_DATA_SDHCI_S3C_H
#define __PLATFORM_DATA_SDHCI_S3C_H
diff --git a/include/linux/platform_data/mmp_audio.h b/include/linux/platform_data/mmp_audio.h
deleted file mode 100644
index 0f25d165abd6..000000000000
--- a/include/linux/platform_data/mmp_audio.h
+++ /dev/null
@@ -1,22 +0,0 @@
-/*
- * MMP Platform AUDIO Management
- *
- * Copyright (c) 2011 Marvell Semiconductors Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- */
-
-#ifndef MMP_AUDIO_H
-#define MMP_AUDIO_H
-
-struct mmp_audio_platdata {
- u32 period_max_capture;
- u32 buffer_max_capture;
- u32 period_max_playback;
- u32 buffer_max_playback;
-};
-
-#endif /* MMP_AUDIO_H */
diff --git a/include/linux/platform_data/mmp_dma.h b/include/linux/platform_data/mmp_dma.h
index d1397c8ed94e..030241cb9cc1 100644
--- a/include/linux/platform_data/mmp_dma.h
+++ b/include/linux/platform_data/mmp_dma.h
@@ -1,20 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* MMP Platform DMA Management
*
* Copyright (c) 2011 Marvell Semiconductors Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
*/
#ifndef MMP_DMA_H
#define MMP_DMA_H
+struct dma_slave_map;
+
struct mmp_dma_platdata {
int dma_channels;
int nb_requestors;
+ int slave_map_cnt;
+ const struct dma_slave_map *slave_map;
};
#endif /* MMP_DMA_H */
diff --git a/include/linux/platform_data/mms114.h b/include/linux/platform_data/mms114.h
deleted file mode 100644
index 5722ebfb2738..000000000000
--- a/include/linux/platform_data/mms114.h
+++ /dev/null
@@ -1,24 +0,0 @@
-/*
- * Copyright (C) 2012 Samsung Electronics Co.Ltd
- * Author: Joonyoung Shim <jy0922.shim@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundationr
- */
-
-#ifndef __LINUX_MMS114_H
-#define __LINUX_MMS114_H
-
-struct mms114_platform_data {
- unsigned int x_size;
- unsigned int y_size;
- unsigned int contact_threshold;
- unsigned int moving_threshold;
- bool x_invert;
- bool y_invert;
-
- void (*cfg_pin)(bool);
-};
-
-#endif /* __LINUX_MMS114_H */
diff --git a/include/linux/platform_data/mouse-pxa930_trkball.h b/include/linux/platform_data/mouse-pxa930_trkball.h
deleted file mode 100644
index 5e0789bc4729..000000000000
--- a/include/linux/platform_data/mouse-pxa930_trkball.h
+++ /dev/null
@@ -1,10 +0,0 @@
-#ifndef __ASM_ARCH_PXA930_TRKBALL_H
-#define __ASM_ARCH_PXA930_TRKBALL_H
-
-struct pxa930_trkball_platform_data {
- int x_filter;
- int y_filter;
-};
-
-#endif /* __ASM_ARCH_PXA930_TRKBALL_H */
-
diff --git a/include/linux/platform_data/mtd-davinci-aemif.h b/include/linux/platform_data/mtd-davinci-aemif.h
index 97948ac2bb9b..a49826214a39 100644
--- a/include/linux/platform_data/mtd-davinci-aemif.h
+++ b/include/linux/platform_data/mtd-davinci-aemif.h
@@ -1,7 +1,7 @@
/*
* TI DaVinci AEMIF support
*
- * Copyright 2010 (C) Texas Instruments, Inc. http://www.ti.com/
+ * Copyright 2010 (C) Texas Instruments, Inc. https://www.ti.com/
*
* This file is licensed under the terms of the GNU General Public License
* version 2. This program is licensed "as is" without any warranty of any
@@ -33,5 +33,4 @@ struct davinci_aemif_timing {
u8 ta;
};
-int davinci_aemif_setup(struct platform_device *pdev);
#endif
diff --git a/include/linux/platform_data/mtd-davinci.h b/include/linux/platform_data/mtd-davinci.h
index f1a2cf655bdb..dd474dd44848 100644
--- a/include/linux/platform_data/mtd-davinci.h
+++ b/include/linux/platform_data/mtd-davinci.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* mach-davinci/nand.h
*
@@ -9,20 +10,6 @@
* Dirk Behme <Dirk.Behme@gmail.com>
*
* --------------------------------------------------------------------------
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#ifndef __ARCH_ARM_DAVINCI_NAND_H
@@ -56,6 +43,16 @@ struct davinci_nand_pdata { /* platform_data */
uint32_t mask_ale;
uint32_t mask_cle;
+ /*
+ * 0-indexed chip-select number of the asynchronous
+ * interface to which the NAND device has been connected.
+ *
+ * So, if you have NAND connected to CS3 of DA850, you
+ * will pass '1' here. Since the asynchronous interface
+ * on DA850 starts from CS2.
+ */
+ uint32_t core_chipsel;
+
/* for packages using two chipselects */
uint32_t mask_chipsel;
@@ -63,15 +60,16 @@ struct davinci_nand_pdata { /* platform_data */
struct mtd_partition *parts;
unsigned nr_parts;
- /* none == NAND_ECC_NONE (strongly *not* advised!!)
- * soft == NAND_ECC_SOFT
- * else == NAND_ECC_HW, according to ecc_bits
+ /* none == NAND_ECC_ENGINE_TYPE_NONE (strongly *not* advised!!)
+ * soft == NAND_ECC_ENGINE_TYPE_SOFT
+ * else == NAND_ECC_ENGINE_TYPE_ON_HOST, according to ecc_bits
*
* All DaVinci-family chips support 1-bit hardware ECC.
* Newer ones also support 4-bit ECC, but are awkward
* using it with large page chips.
*/
- nand_ecc_modes_t ecc_mode;
+ enum nand_ecc_engine_type engine_type;
+ enum nand_ecc_placement ecc_placement;
u8 ecc_bits;
/* e.g. NAND_BUSWIDTH_16 */
diff --git a/include/linux/platform_data/mtd-mxc_nand.h b/include/linux/platform_data/mtd-mxc_nand.h
deleted file mode 100644
index 6bb96ef1600b..000000000000
--- a/include/linux/platform_data/mtd-mxc_nand.h
+++ /dev/null
@@ -1,32 +0,0 @@
-/*
- * Copyright 2004-2007 Freescale Semiconductor, Inc. All Rights Reserved.
- * Copyright 2008 Sascha Hauer, kernel@pengutronix.de
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version 2
- * of the License, or (at your option) any later version.
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
- * MA 02110-1301, USA.
- */
-
-#ifndef __ASM_ARCH_NAND_H
-#define __ASM_ARCH_NAND_H
-
-#include <linux/mtd/partitions.h>
-
-struct mxc_nand_platform_data {
- unsigned int width; /* data bus width in bytes */
- unsigned int hw_ecc:1; /* 0 if suppress hardware ECC */
- unsigned int flash_bbt:1; /* set to 1 to use a flash based bbt */
- struct mtd_partition *parts; /* partition table */
- int nr_parts; /* size of parts */
-};
-#endif /* __ASM_ARCH_NAND_H */
diff --git a/include/linux/platform_data/mtd-nand-omap2.h b/include/linux/platform_data/mtd-nand-omap2.h
index 17d57a18bac5..8c2f1f185353 100644
--- a/include/linux/platform_data/mtd-nand-omap2.h
+++ b/include/linux/platform_data/mtd-nand-omap2.h
@@ -1,15 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2006 Micron Technology Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef _MTD_NAND_OMAP2_H
#define _MTD_NAND_OMAP2_H
#include <linux/mtd/partitions.h>
+#include <linux/mod_devicetable.h>
#define GPMC_BCH_NUM_REMAINDER 8
@@ -63,24 +61,12 @@ struct gpmc_nand_regs {
void __iomem *gpmc_bch_result4[GPMC_BCH_NUM_REMAINDER];
void __iomem *gpmc_bch_result5[GPMC_BCH_NUM_REMAINDER];
void __iomem *gpmc_bch_result6[GPMC_BCH_NUM_REMAINDER];
- /* Deprecated. Do not use */
- void __iomem *gpmc_status;
};
-struct omap_nand_platform_data {
- int cs;
- struct mtd_partition *parts;
- int nr_parts;
- bool flash_bbt;
- enum nand_io xfer_type;
- int devsize;
- enum omap_ecc ecc_opt;
-
- struct device_node *elm_of_node;
-
- /* deprecated */
- struct gpmc_nand_regs reg;
- struct device_node *of_node;
- bool dev_ready;
+static const struct of_device_id omap_nand_ids[] = {
+ { .compatible = "ti,omap2-nand", },
+ { .compatible = "ti,am64-nand", },
+ {},
};
-#endif
+
+#endif /* _MTD_NAND_OMAP2_H */
diff --git a/include/linux/platform_data/mtd-nand-pxa3xx.h b/include/linux/platform_data/mtd-nand-pxa3xx.h
index 394d15597dc7..4fd0f592a2d2 100644
--- a/include/linux/platform_data/mtd-nand-pxa3xx.h
+++ b/include/linux/platform_data/mtd-nand-pxa3xx.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __ASM_ARCH_PXA3XX_NAND_H
#define __ASM_ARCH_PXA3XX_NAND_H
@@ -5,41 +6,22 @@
#include <linux/mtd/partitions.h>
/*
- * Current pxa3xx_nand controller has two chip select which
- * both be workable.
- *
- * Notice should be taken that:
- * When you want to use this feature, you should not enable the
- * keep configuration feature, for two chip select could be
- * attached with different nand chip. The different page size
- * and timing requirement make the keep configuration impossible.
+ * Current pxa3xx_nand controller has two chip select which both be workable but
+ * historically all platforms remaining on platform data used only one. Switch
+ * to device tree if you need more.
*/
-
-/* The max num of chip select current support */
-#define NUM_CHIP_SELECT (2)
struct pxa3xx_nand_platform_data {
-
- /* the data flash bus is shared between the Static Memory
- * Controller and the Data Flash Controller, the arbiter
- * controls the ownership of the bus
- */
- int enable_arbiter;
-
- /* allow platform code to keep OBM/bootloader defined NFC config */
- int keep_config;
-
- /* indicate how many chip selects will be used */
- int num_cs;
-
- /* use an flash-based bad block table */
- bool flash_bbt;
-
- /* requested ECC strength and ECC step size */
+ /* Keep OBM/bootloader NFC timing configuration */
+ bool keep_config;
+ /* Use a flash-based bad block table */
+ bool flash_bbt;
+ /* Requested ECC strength and ECC step size */
int ecc_strength, ecc_step_size;
-
- const struct mtd_partition *parts[NUM_CHIP_SELECT];
- unsigned int nr_parts[NUM_CHIP_SELECT];
+ /* Partitions */
+ const struct mtd_partition *parts;
+ unsigned int nr_parts;
};
extern void pxa3xx_set_nand_info(struct pxa3xx_nand_platform_data *info);
+
#endif /* __ASM_ARCH_PXA3XX_NAND_H */
diff --git a/include/linux/platform_data/mtd-nand-s3c2410.h b/include/linux/platform_data/mtd-nand-s3c2410.h
index f8c553f92655..25390fc3e795 100644
--- a/include/linux/platform_data/mtd-nand-s3c2410.h
+++ b/include/linux/platform_data/mtd-nand-s3c2410.h
@@ -1,12 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2004 Simtec Electronics
* Ben Dooks <ben@simtec.co.uk>
*
* S3C2410 - NAND device controller platform_device info
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef __MTD_NAND_S3C2410_H
@@ -52,7 +49,7 @@ struct s3c2410_platform_nand {
unsigned int ignore_unset_ecc:1;
- nand_ecc_modes_t ecc_mode;
+ enum nand_ecc_engine_type engine_type;
int nr_sets;
struct s3c2410_nand_set *sets;
diff --git a/include/linux/platform_data/mtd-onenand-omap2.h b/include/linux/platform_data/mtd-onenand-omap2.h
deleted file mode 100644
index 56ff0e6f5ad1..000000000000
--- a/include/linux/platform_data/mtd-onenand-omap2.h
+++ /dev/null
@@ -1,34 +0,0 @@
-/*
- * Copyright (C) 2006 Nokia Corporation
- * Author: Juha Yrjola
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#ifndef __MTD_ONENAND_OMAP2_H
-#define __MTD_ONENAND_OMAP2_H
-
-#include <linux/mtd/mtd.h>
-#include <linux/mtd/partitions.h>
-
-#define ONENAND_SYNC_READ (1 << 0)
-#define ONENAND_SYNC_READWRITE (1 << 1)
-#define ONENAND_IN_OMAP34XX (1 << 2)
-
-struct omap_onenand_platform_data {
- int cs;
- int gpio_irq;
- struct mtd_partition *parts;
- int nr_parts;
- int (*onenand_setup)(void __iomem *, int *freq_ptr);
- int dma_channel;
- u8 flags;
- u8 regulator_can_sleep;
- u8 skip_initial_unlocking;
-
- /* for passing the partitions */
- struct device_node *of_node;
-};
-#endif
diff --git a/include/linux/platform_data/mtd-orion_nand.h b/include/linux/platform_data/mtd-orion_nand.h
index a7ce77c7c1a8..34828eb85982 100644
--- a/include/linux/platform_data/mtd-orion_nand.h
+++ b/include/linux/platform_data/mtd-orion_nand.h
@@ -12,7 +12,6 @@
*/
struct orion_nand_data {
struct mtd_partition *parts;
- int (*dev_ready)(struct mtd_info *mtd);
u32 nr_parts;
u8 ale; /* address line number connected to ALE */
u8 cle; /* address line number connected to CLE */
diff --git a/include/linux/platform_data/mv88e6xxx.h b/include/linux/platform_data/mv88e6xxx.h
new file mode 100644
index 000000000000..21452a9365e1
--- /dev/null
+++ b/include/linux/platform_data/mv88e6xxx.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __DSA_MV88E6XXX_H
+#define __DSA_MV88E6XXX_H
+
+#include <linux/platform_data/dsa.h>
+
+struct dsa_mv88e6xxx_pdata {
+ /* Must be first, such that dsa_register_switch() can access this
+ * without gory pointer manipulations
+ */
+ struct dsa_chip_data cd;
+ const char *compatible;
+ unsigned int enabled_ports;
+ struct net_device *netdev;
+ u32 eeprom_len;
+ int irq;
+};
+
+#endif
diff --git a/include/linux/platform_data/mv_usb.h b/include/linux/platform_data/mv_usb.h
index 98b7925f1a2d..20d239c02bf3 100644
--- a/include/linux/platform_data/mv_usb.h
+++ b/include/linux/platform_data/mv_usb.h
@@ -1,23 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Copyright (C) 2011 Marvell International Ltd. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
*/
#ifndef __MV_PLATFORM_USB_H
#define __MV_PLATFORM_USB_H
-enum pxa_ehci_type {
- EHCI_UNDEFINED = 0,
- PXA_U2OEHCI, /* pxa 168, 9xx */
- PXA_SPH, /* pxa 168, 9xx SPH */
- MMP3_HSIC, /* mmp3 hsic */
- MMP3_FSIC, /* mmp3 fsic */
-};
-
enum {
MV_USB_MODE_OTG,
MV_USB_MODE_HOST,
@@ -48,6 +36,5 @@ struct mv_usb_platform_data {
int (*phy_init)(void __iomem *regbase);
void (*phy_deinit)(void __iomem *regbase);
int (*set_vbus)(unsigned int vbus);
- int (*private_init)(void __iomem *opregs, void __iomem *phyregs);
};
#endif
diff --git a/include/linux/platform_data/net-cw1200.h b/include/linux/platform_data/net-cw1200.h
index c6fbc3ce4ab0..c510734405bb 100644
--- a/include/linux/platform_data/net-cw1200.h
+++ b/include/linux/platform_data/net-cw1200.h
@@ -1,8 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) ST-Ericsson SA 2011
*
* Author: Dmitry Tarnyagin <dmitry.tarnyagin@stericsson.com>
- * License terms: GNU General Public License (GPL) version 2
*/
#ifndef CW1200_PLAT_H_INCLUDED
diff --git a/include/linux/platform_data/nfcmrvl.h b/include/linux/platform_data/nfcmrvl.h
deleted file mode 100644
index 9e75ac8d19be..000000000000
--- a/include/linux/platform_data/nfcmrvl.h
+++ /dev/null
@@ -1,48 +0,0 @@
-/*
- * Copyright (C) 2015, Marvell International Ltd.
- *
- * This software file (the "File") is distributed by Marvell International
- * Ltd. under the terms of the GNU General Public License Version 2, June 1991
- * (the "License"). You may use, redistribute and/or modify this File in
- * accordance with the terms and conditions of the License, a copy of which
- * is available on the worldwide web at
- * http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
- *
- * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
- * ARE EXPRESSLY DISCLAIMED. The License provides additional details about
- * this warranty disclaimer.
- */
-
-#ifndef _NFCMRVL_PTF_H_
-#define _NFCMRVL_PTF_H_
-
-struct nfcmrvl_platform_data {
- /*
- * Generic
- */
-
- /* GPIO that is wired to RESET_N signal */
- int reset_n_io;
- /* Tell if transport is muxed in HCI one */
- unsigned int hci_muxed;
-
- /*
- * UART specific
- */
-
- /* Tell if UART needs flow control at init */
- unsigned int flow_control;
- /* Tell if firmware supports break control for power management */
- unsigned int break_control;
-
-
- /*
- * I2C specific
- */
-
- unsigned int irq;
- unsigned int irq_polarity;
-};
-
-#endif /* _NFCMRVL_PTF_H_ */
diff --git a/include/linux/platform_data/ntc_thermistor.h b/include/linux/platform_data/ntc_thermistor.h
deleted file mode 100644
index 698d0d59db76..000000000000
--- a/include/linux/platform_data/ntc_thermistor.h
+++ /dev/null
@@ -1,62 +0,0 @@
-/*
- * ntc_thermistor.h - NTC Thermistors
- *
- * Copyright (C) 2010 Samsung Electronics
- * MyungJoo Ham <myungjoo.ham@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-#ifndef _LINUX_NTC_H
-#define _LINUX_NTC_H
-
-struct iio_channel;
-
-enum ntc_thermistor_type {
- TYPE_NCPXXWB473,
- TYPE_NCPXXWL333,
- TYPE_B57330V2103,
- TYPE_NCPXXWF104,
- TYPE_NCPXXXH103,
-};
-
-struct ntc_thermistor_platform_data {
- /*
- * One (not both) of read_uV and read_ohm should be provided and only
- * one of the two should be provided.
- * Both functions should return negative value for an error case.
- *
- * pullup_uV, pullup_ohm, pulldown_ohm, and connect are required to use
- * read_uV()
- *
- * How to setup pullup_ohm, pulldown_ohm, and connect is
- * described at Documentation/hwmon/ntc_thermistor
- *
- * pullup/down_ohm: 0 for infinite / not-connected
- *
- * chan: iio_channel pointer to communicate with the ADC which the
- * thermistor is using for conversion of the analog values.
- */
- int (*read_uv)(struct ntc_thermistor_platform_data *);
- unsigned int pullup_uv;
-
- unsigned int pullup_ohm;
- unsigned int pulldown_ohm;
- enum { NTC_CONNECTED_POSITIVE, NTC_CONNECTED_GROUND } connect;
- struct iio_channel *chan;
-
- int (*read_ohm)(void);
-};
-
-#endif /* _LINUX_NTC_H */
diff --git a/include/linux/platform_data/nxp-nci.h b/include/linux/platform_data/nxp-nci.h
deleted file mode 100644
index d6ed28679bb2..000000000000
--- a/include/linux/platform_data/nxp-nci.h
+++ /dev/null
@@ -1,27 +0,0 @@
-/*
- * Generic platform data for the NXP NCI NFC chips.
- *
- * Copyright (C) 2014 NXP Semiconductors All rights reserved.
- *
- * Authors: Clément Perrochaud <clement.perrochaud@nxp.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#ifndef _NXP_NCI_H_
-#define _NXP_NCI_H_
-
-struct nxp_nci_nfc_platform_data {
- unsigned int gpio_en;
- unsigned int gpio_fw;
- unsigned int irq;
-};
-
-#endif /* _NXP_NCI_H_ */
diff --git a/include/linux/platform_data/omap-twl4030.h b/include/linux/platform_data/omap-twl4030.h
index ee60ef79d792..0dd851ea1c72 100644
--- a/include/linux/platform_data/omap-twl4030.h
+++ b/include/linux/platform_data/omap-twl4030.h
@@ -1,25 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/**
* omap-twl4030.h - ASoC machine driver for TI SoC based boards with twl4030
* codec, header.
*
- * Copyright (C) 2012 Texas Instruments Incorporated - http://www.ti.com
+ * Copyright (C) 2012 Texas Instruments Incorporated - https://www.ti.com
* All rights reserved.
*
* Author: Peter Ujfalusi <peter.ujfalusi@ti.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
- * 02110-1301 USA
*/
#ifndef _OMAP_TWL4030_H_
diff --git a/include/linux/platform_data/omap-wd-timer.h b/include/linux/platform_data/omap-wd-timer.h
index d75f5f802d98..f2788ec984e9 100644
--- a/include/linux/platform_data/omap-wd-timer.h
+++ b/include/linux/platform_data/omap-wd-timer.h
@@ -1,13 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* OMAP2+ WDTIMER-specific function prototypes
*
* Copyright (C) 2012 Texas Instruments, Inc.
* Paul Walmsley
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
#ifndef __LINUX_PLATFORM_DATA_OMAP_WD_TIMER_H
diff --git a/include/linux/platform_data/omap1_bl.h b/include/linux/platform_data/omap1_bl.h
index 881a8e92d605..5e8b17d77a5f 100644
--- a/include/linux/platform_data/omap1_bl.h
+++ b/include/linux/platform_data/omap1_bl.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __OMAP1_BL_H__
#define __OMAP1_BL_H__
diff --git a/include/linux/platform_data/omap_drm.h b/include/linux/platform_data/omap_drm.h
deleted file mode 100644
index f4e4a237ebd2..000000000000
--- a/include/linux/platform_data/omap_drm.h
+++ /dev/null
@@ -1,53 +0,0 @@
-/*
- * DRM/KMS platform data for TI OMAP platforms
- *
- * Copyright (C) 2012 Texas Instruments
- * Author: Rob Clark <rob.clark@linaro.org>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program. If not, see <http://www.gnu.org/licenses/>.
- */
-
-#ifndef __PLATFORM_DATA_OMAP_DRM_H__
-#define __PLATFORM_DATA_OMAP_DRM_H__
-
-/*
- * Optional platform data to configure the default configuration of which
- * pipes/overlays/CRTCs are used.. if this is not provided, then instead the
- * first CONFIG_DRM_OMAP_NUM_CRTCS are used, and they are each connected to
- * one manager, with priority given to managers that are connected to
- * detected devices. Remaining overlays are used as video planes. This
- * should be a good default behavior for most cases, but yet there still
- * might be times when you wish to do something different.
- */
-struct omap_kms_platform_data {
- /* overlays to use as CRTCs: */
- int ovl_cnt;
- const int *ovl_ids;
-
- /* overlays to use as video planes: */
- int pln_cnt;
- const int *pln_ids;
-
- int mgr_cnt;
- const int *mgr_ids;
-
- int dev_cnt;
- const char **dev_names;
-};
-
-struct omap_drm_platform_data {
- uint32_t omaprev;
- struct omap_kms_platform_data *kms_pdata;
-};
-
-#endif /* __PLATFORM_DATA_OMAP_DRM_H__ */
diff --git a/include/linux/platform_data/omapdss.h b/include/linux/platform_data/omapdss.h
index 7feb011ed500..a377090d90e8 100644
--- a/include/linux/platform_data/omapdss.h
+++ b/include/linux/platform_data/omapdss.h
@@ -1,10 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Copyright (C) 2016 Texas Instruments, Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
#ifndef __OMAPDSS_PDATA_H
diff --git a/include/linux/platform_data/pca953x.h b/include/linux/platform_data/pca953x.h
index 3c98dd4f901f..96c1a14ab365 100644
--- a/include/linux/platform_data/pca953x.h
+++ b/include/linux/platform_data/pca953x.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_PCA953X_H
#define _LINUX_PCA953X_H
@@ -21,7 +22,7 @@ struct pca953x_platform_data {
int (*setup)(struct i2c_client *client,
unsigned gpio, unsigned ngpio,
void *context);
- int (*teardown)(struct i2c_client *client,
+ void (*teardown)(struct i2c_client *client,
unsigned gpio, unsigned ngpio,
void *context);
const char *const *names;
diff --git a/include/linux/platform_data/pcf857x.h b/include/linux/platform_data/pcf857x.h
deleted file mode 100644
index 0767a2a6b2f1..000000000000
--- a/include/linux/platform_data/pcf857x.h
+++ /dev/null
@@ -1,44 +0,0 @@
-#ifndef __LINUX_PCF857X_H
-#define __LINUX_PCF857X_H
-
-/**
- * struct pcf857x_platform_data - data to set up pcf857x driver
- * @gpio_base: number of the chip's first GPIO
- * @n_latch: optional bit-inverse of initial register value; if
- * you leave this initialized to zero the driver will act
- * like the chip was just reset
- * @setup: optional callback issued once the GPIOs are valid
- * @teardown: optional callback issued before the GPIOs are invalidated
- * @context: optional parameter passed to setup() and teardown()
- *
- * In addition to the I2C_BOARD_INFO() state appropriate to each chip,
- * the i2c_board_info used with the pcf875x driver must provide its
- * platform_data (pointer to one of these structures) with at least
- * the gpio_base value initialized.
- *
- * The @setup callback may be used with the kind of board-specific glue
- * which hands the (now-valid) GPIOs to other drivers, or which puts
- * devices in their initial states using these GPIOs.
- *
- * These GPIO chips are only "quasi-bidirectional"; read the chip specs
- * to understand the behavior. They don't have separate registers to
- * record which pins are used for input or output, record which output
- * values are driven, or provide access to input values. That must be
- * inferred by reading the chip's value and knowing the last value written
- * to it. If you leave n_latch initialized to zero, that last written
- * value is presumed to be all ones (as if the chip were just reset).
- */
-struct pcf857x_platform_data {
- unsigned gpio_base;
- unsigned n_latch;
-
- int (*setup)(struct i2c_client *client,
- int gpio, unsigned ngpio,
- void *context);
- int (*teardown)(struct i2c_client *client,
- int gpio, unsigned ngpio,
- void *context);
- void *context;
-};
-
-#endif /* __LINUX_PCF857X_H */
diff --git a/include/linux/platform_data/pcmcia-pxa2xx_viper.h b/include/linux/platform_data/pcmcia-pxa2xx_viper.h
deleted file mode 100644
index d428be4db44c..000000000000
--- a/include/linux/platform_data/pcmcia-pxa2xx_viper.h
+++ /dev/null
@@ -1,11 +0,0 @@
-#ifndef __ARCOM_PCMCIA_H
-#define __ARCOM_PCMCIA_H
-
-struct arcom_pcmcia_pdata {
- int cd_gpio;
- int rdy_gpio;
- int pwr_gpio;
- void (*reset)(int state);
-};
-
-#endif
diff --git a/include/linux/platform_data/phy-da8xx-usb.h b/include/linux/platform_data/phy-da8xx-usb.h
new file mode 100644
index 000000000000..85c2b99381b2
--- /dev/null
+++ b/include/linux/platform_data/phy-da8xx-usb.h
@@ -0,0 +1,21 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * phy-da8xx-usb - TI DaVinci DA8xx USB PHY driver
+ *
+ * Copyright (C) 2018 David Lechner <david@lechnology.com>
+ */
+
+#ifndef __LINUX_PLATFORM_DATA_PHY_DA8XX_USB_H__
+#define __LINUX_PLATFORM_DATA_PHY_DA8XX_USB_H__
+
+#include <linux/regmap.h>
+
+/**
+ * da8xx_usb_phy_platform_data
+ * @cfgchip: CFGCHIP syscon regmap
+ */
+struct da8xx_usb_phy_platform_data {
+ struct regmap *cfgchip;
+};
+
+#endif /* __LINUX_PLATFORM_DATA_PHY_DA8XX_USB_H__ */
diff --git a/include/linux/platform_data/pinctrl-adi2.h b/include/linux/platform_data/pinctrl-adi2.h
deleted file mode 100644
index 8f91300617ec..000000000000
--- a/include/linux/platform_data/pinctrl-adi2.h
+++ /dev/null
@@ -1,40 +0,0 @@
-/*
- * Pinctrl Driver for ADI GPIO2 controller
- *
- * Copyright 2007-2013 Analog Devices Inc.
- *
- * Licensed under the GPLv2 or later
- */
-
-
-#ifndef PINCTRL_ADI2_H
-#define PINCTRL_ADI2_H
-
-#include <linux/io.h>
-#include <linux/platform_device.h>
-
-/**
- * struct adi_pinctrl_gpio_platform_data - Pinctrl gpio platform data
- * for ADI GPIO2 device.
- *
- * @port_gpio_base: Optional global GPIO index of the GPIO bank.
- * 0 means driver decides.
- * @port_pin_base: Pin index of the pin controller device.
- * @port_width: PIN number of the GPIO bank device
- * @pint_id: GPIO PINT device id that this GPIO bank should map to.
- * @pint_assign: The 32-bit GPIO PINT registers can be divided into 2 parts. A
- * GPIO bank can be mapped into either low 16 bits[0] or high 16
- * bits[1] of each PINT register.
- * @pint_map: GIOP bank mapping code in PINT device
- */
-struct adi_pinctrl_gpio_platform_data {
- unsigned int port_gpio_base;
- unsigned int port_pin_base;
- unsigned int port_width;
- u8 pinctrl_id;
- u8 pint_id;
- bool pint_assign;
- u8 pint_map;
-};
-
-#endif
diff --git a/include/linux/platform_data/pinctrl-single.h b/include/linux/platform_data/pinctrl-single.h
index 72eacda9b360..7473d3c4cabf 100644
--- a/include/linux/platform_data/pinctrl-single.h
+++ b/include/linux/platform_data/pinctrl-single.h
@@ -1,3 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef _PINCTRL_SINGLE_H
+#define _PINCTRL_SINGLE_H
+
/**
* irq: optional wake-up interrupt
* rearm: optional soc specific rearm function
@@ -10,3 +15,5 @@ struct pcs_pdata {
int irq;
void (*rearm)(void);
};
+
+#endif /* _PINCTRL_SINGLE_H */
diff --git a/include/linux/platform_data/pixcir_i2c_ts.h b/include/linux/platform_data/pixcir_i2c_ts.h
deleted file mode 100644
index 646af6f8b838..000000000000
--- a/include/linux/platform_data/pixcir_i2c_ts.h
+++ /dev/null
@@ -1,63 +0,0 @@
-#ifndef _PIXCIR_I2C_TS_H
-#define _PIXCIR_I2C_TS_H
-
-/*
- * Register map
- */
-#define PIXCIR_REG_POWER_MODE 51
-#define PIXCIR_REG_INT_MODE 52
-
-/*
- * Power modes:
- * active: max scan speed
- * idle: lower scan speed with automatic transition to active on touch
- * halt: datasheet says sleep but this is more like halt as the chip
- * clocks are cut and it can only be brought out of this mode
- * using the RESET pin.
- */
-enum pixcir_power_mode {
- PIXCIR_POWER_ACTIVE,
- PIXCIR_POWER_IDLE,
- PIXCIR_POWER_HALT,
-};
-
-#define PIXCIR_POWER_MODE_MASK 0x03
-#define PIXCIR_POWER_ALLOW_IDLE (1UL << 2)
-
-/*
- * Interrupt modes:
- * periodical: interrupt is asserted periodicaly
- * diff coordinates: interrupt is asserted when coordinates change
- * level on touch: interrupt level asserted during touch
- * pulse on touch: interrupt pulse asserted druing touch
- *
- */
-enum pixcir_int_mode {
- PIXCIR_INT_PERIODICAL,
- PIXCIR_INT_DIFF_COORD,
- PIXCIR_INT_LEVEL_TOUCH,
- PIXCIR_INT_PULSE_TOUCH,
-};
-
-#define PIXCIR_INT_MODE_MASK 0x03
-#define PIXCIR_INT_ENABLE (1UL << 3)
-#define PIXCIR_INT_POL_HIGH (1UL << 2)
-
-/**
- * struct pixcir_irc_chip_data - chip related data
- * @max_fingers: Max number of fingers reported simultaneously by h/w
- * @has_hw_ids: Hardware supports finger tracking IDs
- *
- */
-struct pixcir_i2c_chip_data {
- u8 max_fingers;
- bool has_hw_ids;
-};
-
-struct pixcir_ts_platform_data {
- int x_max;
- int y_max;
- struct pixcir_i2c_chip_data chip;
-};
-
-#endif
diff --git a/include/linux/platform_data/pm33xx.h b/include/linux/platform_data/pm33xx.h
new file mode 100644
index 000000000000..7037ba7a53ca
--- /dev/null
+++ b/include/linux/platform_data/pm33xx.h
@@ -0,0 +1,75 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * TI pm33xx platform data
+ *
+ * Copyright (C) 2016-2018 Texas Instruments, Inc.
+ * Dave Gerlach <d-gerlach@ti.com>
+ */
+
+#ifndef _LINUX_PLATFORM_DATA_PM33XX_H
+#define _LINUX_PLATFORM_DATA_PM33XX_H
+
+#include <linux/kbuild.h>
+#include <linux/types.h>
+
+/*
+ * WFI Flags for sleep code control
+ *
+ * These flags allow PM code to exclude certain operations from happening
+ * in the low level ASM code found in sleep33xx.S and sleep43xx.S
+ *
+ * WFI_FLAG_FLUSH_CACHE: Flush the ARM caches and disable caching. Only
+ * needed when MPU will lose context.
+ * WFI_FLAG_SELF_REFRESH: Let EMIF place DDR memory into self-refresh and
+ * disable EMIF.
+ * WFI_FLAG_SAVE_EMIF: Save context of all EMIF registers and restore in
+ * resume path. Only needed if PER domain loses context
+ * and must also have WFI_FLAG_SELF_REFRESH set.
+ * WFI_FLAG_WAKE_M3: Disable MPU clock or clockdomain to cause wkup_m3 to
+ * execute when WFI instruction executes.
+ * WFI_FLAG_RTC_ONLY: Configure the RTC to enter RTC+DDR mode.
+ */
+#define WFI_FLAG_FLUSH_CACHE BIT(0)
+#define WFI_FLAG_SELF_REFRESH BIT(1)
+#define WFI_FLAG_SAVE_EMIF BIT(2)
+#define WFI_FLAG_WAKE_M3 BIT(3)
+#define WFI_FLAG_RTC_ONLY BIT(4)
+
+#ifndef __ASSEMBLER__
+struct am33xx_pm_sram_addr {
+ void (*do_wfi)(void);
+ unsigned long *do_wfi_sz;
+ unsigned long *resume_offset;
+ unsigned long *emif_sram_table;
+ unsigned long *ro_sram_data;
+ unsigned long resume_address;
+};
+
+struct am33xx_pm_platform_data {
+ int (*init)(int (*idle)(u32 wfi_flags));
+ int (*deinit)(void);
+ int (*soc_suspend)(unsigned int state, int (*fn)(unsigned long),
+ unsigned long args);
+ int (*cpu_suspend)(int (*fn)(unsigned long), unsigned long args);
+ void (*begin_suspend)(void);
+ void (*finish_suspend)(void);
+ struct am33xx_pm_sram_addr *(*get_sram_addrs)(void);
+ void (*save_context)(void);
+ void (*restore_context)(void);
+ int (*check_off_mode_enable)(void);
+};
+
+struct am33xx_pm_sram_data {
+ u32 wfi_flags;
+ u32 l2_aux_ctrl_val;
+ u32 l2_prefetch_ctrl_val;
+} __packed __aligned(8);
+
+struct am33xx_pm_ro_sram_data {
+ u32 amx3_pm_sram_data_virt;
+ u32 amx3_pm_sram_data_phys;
+ void __iomem *rtc_base_virt;
+} __packed __aligned(8);
+
+#endif /* __ASSEMBLER__ */
+#endif /* _LINUX_PLATFORM_DATA_PM33XX_H */
diff --git a/include/linux/platform_data/pwm_omap_dmtimer.h b/include/linux/platform_data/pwm_omap_dmtimer.h
deleted file mode 100644
index e7d521e48855..000000000000
--- a/include/linux/platform_data/pwm_omap_dmtimer.h
+++ /dev/null
@@ -1,90 +0,0 @@
-/*
- * include/linux/platform_data/pwm_omap_dmtimer.h
- *
- * OMAP Dual-Mode Timer PWM platform data
- *
- * Copyright (C) 2010 Texas Instruments Incorporated - http://www.ti.com/
- * Tarun Kanti DebBarma <tarun.kanti@ti.com>
- * Thara Gopinath <thara@ti.com>
- *
- * Platform device conversion and hwmod support.
- *
- * Copyright (C) 2005 Nokia Corporation
- * Author: Lauri Leukkunen <lauri.leukkunen@nokia.com>
- * PWM and clock framework support by Timo Teras.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
- * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
- * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 675 Mass Ave, Cambridge, MA 02139, USA.
- */
-
-#ifndef __PWM_OMAP_DMTIMER_PDATA_H
-#define __PWM_OMAP_DMTIMER_PDATA_H
-
-/* clock sources */
-#define PWM_OMAP_DMTIMER_SRC_SYS_CLK 0x00
-#define PWM_OMAP_DMTIMER_SRC_32_KHZ 0x01
-#define PWM_OMAP_DMTIMER_SRC_EXT_CLK 0x02
-
-/* timer interrupt enable bits */
-#define PWM_OMAP_DMTIMER_INT_CAPTURE (1 << 2)
-#define PWM_OMAP_DMTIMER_INT_OVERFLOW (1 << 1)
-#define PWM_OMAP_DMTIMER_INT_MATCH (1 << 0)
-
-/* trigger types */
-#define PWM_OMAP_DMTIMER_TRIGGER_NONE 0x00
-#define PWM_OMAP_DMTIMER_TRIGGER_OVERFLOW 0x01
-#define PWM_OMAP_DMTIMER_TRIGGER_OVERFLOW_AND_COMPARE 0x02
-
-struct omap_dm_timer;
-typedef struct omap_dm_timer pwm_omap_dmtimer;
-
-struct pwm_omap_dmtimer_pdata {
- pwm_omap_dmtimer *(*request_by_node)(struct device_node *np);
- pwm_omap_dmtimer *(*request_specific)(int timer_id);
- pwm_omap_dmtimer *(*request)(void);
-
- int (*free)(pwm_omap_dmtimer *timer);
-
- void (*enable)(pwm_omap_dmtimer *timer);
- void (*disable)(pwm_omap_dmtimer *timer);
-
- int (*get_irq)(pwm_omap_dmtimer *timer);
- int (*set_int_enable)(pwm_omap_dmtimer *timer, unsigned int value);
- int (*set_int_disable)(pwm_omap_dmtimer *timer, u32 mask);
-
- struct clk *(*get_fclk)(pwm_omap_dmtimer *timer);
-
- int (*start)(pwm_omap_dmtimer *timer);
- int (*stop)(pwm_omap_dmtimer *timer);
- int (*set_source)(pwm_omap_dmtimer *timer, int source);
-
- int (*set_load)(pwm_omap_dmtimer *timer, int autoreload,
- unsigned int value);
- int (*set_match)(pwm_omap_dmtimer *timer, int enable,
- unsigned int match);
- int (*set_pwm)(pwm_omap_dmtimer *timer, int def_on,
- int toggle, int trigger);
- int (*set_prescaler)(pwm_omap_dmtimer *timer, int prescaler);
-
- unsigned int (*read_counter)(pwm_omap_dmtimer *timer);
- int (*write_counter)(pwm_omap_dmtimer *timer, unsigned int value);
- unsigned int (*read_status)(pwm_omap_dmtimer *timer);
- int (*write_status)(pwm_omap_dmtimer *timer, unsigned int value);
-};
-
-#endif /* __PWM_OMAP_DMTIMER_PDATA_H */
diff --git a/include/linux/platform_data/pxa2xx_udc.h b/include/linux/platform_data/pxa2xx_udc.h
index c6c5e98b5b82..ff9c35dca59d 100644
--- a/include/linux/platform_data/pxa2xx_udc.h
+++ b/include/linux/platform_data/pxa2xx_udc.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* This supports machine-specific differences in how the PXA2xx
* USB Device Controller (UDC) is wired.
diff --git a/include/linux/platform_data/pxa_sdhci.h b/include/linux/platform_data/pxa_sdhci.h
index 9e20c2fb4ffd..899457cee425 100644
--- a/include/linux/platform_data/pxa_sdhci.h
+++ b/include/linux/platform_data/pxa_sdhci.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* include/linux/platform_data/pxa_sdhci.h
*
@@ -5,10 +6,6 @@
* Zhangfei Gao <zhangfei.gao@marvell.com>
*
* PXA Platform - SDHCI platform data definitions
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef _PXA_SDHCI_H_
@@ -33,8 +30,6 @@
* 1: choose feedback clk + delay value
* 2: choose internal clk
* @clk_delay_enable: enable clk_delay or not, used on pxa910
- * @ext_cd_gpio: gpio pin used for external CD line
- * @ext_cd_gpio_invert: invert values for external CD gpio line
* @max_speed: the maximum speed supported
* @host_caps: Standard MMC host capabilities bit field.
* @quirks: quirks of platfrom
@@ -46,8 +41,6 @@ struct sdhci_pxa_platdata {
unsigned int clk_delay_cycles;
unsigned int clk_delay_sel;
bool clk_delay_enable;
- unsigned int ext_cd_gpio;
- bool ext_cd_gpio_invert;
unsigned int max_speed;
u32 host_caps;
u32 host_caps2;
diff --git a/include/linux/platform_data/regulator-haptic.h b/include/linux/platform_data/regulator-haptic.h
index 5658e58e0738..4213e1b01316 100644
--- a/include/linux/platform_data/regulator-haptic.h
+++ b/include/linux/platform_data/regulator-haptic.h
@@ -1,13 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Regulator Haptic Platform Data
*
* Copyright (c) 2014 Samsung Electronics Co., Ltd.
* Author: Jaewon Kim <jaewon02.kim@samsung.com>
* Author: Hyunhee Kim <hyunhee.kim@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef _REGULATOR_HAPTIC_H
diff --git a/include/linux/platform_data/remoteproc-omap.h b/include/linux/platform_data/remoteproc-omap.h
deleted file mode 100644
index 71a1b2399c48..000000000000
--- a/include/linux/platform_data/remoteproc-omap.h
+++ /dev/null
@@ -1,59 +0,0 @@
-/*
- * Remote Processor - omap-specific bits
- *
- * Copyright (C) 2011 Texas Instruments, Inc.
- * Copyright (C) 2011 Google, Inc.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#ifndef _PLAT_REMOTEPROC_H
-#define _PLAT_REMOTEPROC_H
-
-struct rproc_ops;
-struct platform_device;
-
-/*
- * struct omap_rproc_pdata - omap remoteproc's platform data
- * @name: the remoteproc's name
- * @oh_name: omap hwmod device
- * @oh_name_opt: optional, secondary omap hwmod device
- * @firmware: name of firmware file to load
- * @mbox_name: name of omap mailbox device to use with this rproc
- * @ops: start/stop rproc handlers
- * @device_enable: omap-specific handler for enabling a device
- * @device_shutdown: omap-specific handler for shutting down a device
- * @set_bootaddr: omap-specific handler for setting the rproc boot address
- */
-struct omap_rproc_pdata {
- const char *name;
- const char *oh_name;
- const char *oh_name_opt;
- const char *firmware;
- const char *mbox_name;
- const struct rproc_ops *ops;
- int (*device_enable)(struct platform_device *pdev);
- int (*device_shutdown)(struct platform_device *pdev);
- void (*set_bootaddr)(u32);
-};
-
-#if defined(CONFIG_OMAP_REMOTEPROC) || defined(CONFIG_OMAP_REMOTEPROC_MODULE)
-
-void __init omap_rproc_reserve_cma(void);
-
-#else
-
-static inline void __init omap_rproc_reserve_cma(void)
-{
-}
-
-#endif
-
-#endif /* _PLAT_REMOTEPROC_H */
diff --git a/include/linux/platform_data/rtc-v3020.h b/include/linux/platform_data/rtc-v3020.h
deleted file mode 100644
index e55d82cebf80..000000000000
--- a/include/linux/platform_data/rtc-v3020.h
+++ /dev/null
@@ -1,41 +0,0 @@
-/*
- * v3020.h - Registers definition and platform data structure for the v3020 RTC.
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 2006, 8D Technologies inc.
- */
-#ifndef __LINUX_V3020_H
-#define __LINUX_V3020_H
-
-/* The v3020 has only one data pin but which one
- * is used depends on the board. */
-struct v3020_platform_data {
- int leftshift; /* (1<<(leftshift)) & readl() */
-
- unsigned int use_gpio:1;
- unsigned int gpio_cs;
- unsigned int gpio_wr;
- unsigned int gpio_rd;
- unsigned int gpio_io;
-};
-
-#define V3020_STATUS_0 0x00
-#define V3020_STATUS_1 0x01
-#define V3020_SECONDS 0x02
-#define V3020_MINUTES 0x03
-#define V3020_HOURS 0x04
-#define V3020_MONTH_DAY 0x05
-#define V3020_MONTH 0x06
-#define V3020_YEAR 0x07
-#define V3020_WEEK_DAY 0x08
-#define V3020_WEEK 0x09
-
-#define V3020_IS_COMMAND(val) ((val)>=0x0E)
-
-#define V3020_CMD_RAM2CLOCK 0x0E
-#define V3020_CMD_CLOCK2RAM 0x0F
-
-#endif /* __LINUX_V3020_H */
diff --git a/include/linux/platform_data/s3c-hsotg.h b/include/linux/platform_data/s3c-hsotg.h
index 3982586ba6df..004ddaf650cd 100644
--- a/include/linux/platform_data/s3c-hsotg.h
+++ b/include/linux/platform_data/s3c-hsotg.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/* include/linux/platform_data/s3c-hsotg.h
*
* Copyright 2008 Openmoko, Inc.
@@ -6,10 +7,6 @@
* http://armlinux.simtec.co.uk/
*
* S3C USB2.0 High-speed / OtG platform information
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef __LINUX_USB_S3C_HSOTG_H
diff --git a/include/linux/platform_data/s3c-hsudc.h b/include/linux/platform_data/s3c-hsudc.h
deleted file mode 100644
index 6fa109339bf9..000000000000
--- a/include/linux/platform_data/s3c-hsudc.h
+++ /dev/null
@@ -1,34 +0,0 @@
-/*
- * S3C24XX USB 2.0 High-speed USB controller gadget driver
- *
- * Copyright (c) 2010 Samsung Electronics Co., Ltd.
- * http://www.samsung.com/
- *
- * The S3C24XX USB 2.0 high-speed USB controller supports upto 9 endpoints.
- * Each endpoint can be configured as either in or out endpoint. Endpoints
- * can be configured for Bulk or Interrupt transfer mode.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-#ifndef __LINUX_USB_S3C_HSUDC_H
-#define __LINUX_USB_S3C_HSUDC_H
-
-/**
- * s3c24xx_hsudc_platdata - Platform data for USB High-Speed gadget controller.
- * @epnum: Number of endpoints to be instantiated by the controller driver.
- * @gpio_init: Platform specific USB related GPIO initialization.
- * @gpio_uninit: Platform specific USB releted GPIO uninitialzation.
- *
- * Representation of platform data for the S3C24XX USB 2.0 High Speed gadget
- * controllers.
- */
-struct s3c24xx_hsudc_platdata {
- unsigned int epnum;
- void (*gpio_init)(void);
- void (*gpio_uninit)(void);
-};
-
-#endif /* __LINUX_USB_S3C_HSUDC_H */
diff --git a/include/linux/platform_data/sa11x0-serial.h b/include/linux/platform_data/sa11x0-serial.h
index 009e1d83fe39..8b79ab08af45 100644
--- a/include/linux/platform_data/sa11x0-serial.h
+++ b/include/linux/platform_data/sa11x0-serial.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Author: Nicolas Pitre
*
diff --git a/include/linux/platform_data/sc18is602.h b/include/linux/platform_data/sc18is602.h
index 997b06634152..0e91489edfe6 100644
--- a/include/linux/platform_data/sc18is602.h
+++ b/include/linux/platform_data/sc18is602.h
@@ -1,13 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Platform data for NXP SC18IS602/603
*
* Copyright (C) 2012 Guenter Roeck <linux@roeck-us.net>
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * For further information, see the Documentation/spi/sc18is602 file.
+ * For further information, see the Documentation/spi/spi-sc18is602.rst file.
*/
/**
diff --git a/include/linux/platform_data/sdhci-pic32.h b/include/linux/platform_data/sdhci-pic32.h
index 7e0efe64c8c5..8a53fd34daf7 100644
--- a/include/linux/platform_data/sdhci-pic32.h
+++ b/include/linux/platform_data/sdhci-pic32.h
@@ -1,15 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Purna Chandra Mandal, purna.mandal@microchip.com
* Copyright (C) 2015 Microchip Technology Inc. All rights reserved.
- *
- * This program is free software; you can distribute it and/or modify it
- * under the terms of the GNU General Public License (Version 2) as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * for more details.
*/
#ifndef __PIC32_SDHCI_PDATA_H__
#define __PIC32_SDHCI_PDATA_H__
diff --git a/include/linux/platform_data/serial-imx.h b/include/linux/platform_data/serial-imx.h
deleted file mode 100644
index a938eba2f18e..000000000000
--- a/include/linux/platform_data/serial-imx.h
+++ /dev/null
@@ -1,28 +0,0 @@
-/*
- * Copyright (C) 2008 by Sascha Hauer <kernel@pengutronix.de>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version 2
- * of the License, or (at your option) any later version.
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
- * MA 02110-1301, USA.
- */
-
-#ifndef ASMARM_ARCH_UART_H
-#define ASMARM_ARCH_UART_H
-
-#define IMXUART_HAVE_RTSCTS (1<<0)
-
-struct imxuart_platform_data {
- unsigned int flags;
-};
-
-#endif
diff --git a/include/linux/platform_data/serial-omap.h b/include/linux/platform_data/serial-omap.h
index 2ba2c34ca3d3..0061d2451900 100644
--- a/include/linux/platform_data/serial-omap.h
+++ b/include/linux/platform_data/serial-omap.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Driver for OMAP-UART controller.
* Based on drivers/serial/8250.c
@@ -7,11 +8,6 @@
* Authors:
* Govindraj R <govindraj.raja@ti.com>
* Thara Gopinath <thara@ti.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
#ifndef __OMAP_SERIAL_H__
diff --git a/include/linux/platform_data/serial-sccnxp.h b/include/linux/platform_data/serial-sccnxp.h
index af0c8c3b89ae..dc670f24e710 100644
--- a/include/linux/platform_data/serial-sccnxp.h
+++ b/include/linux/platform_data/serial-sccnxp.h
@@ -1,14 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* NXP (Philips) SCC+++(SCN+++) serial driver
*
* Copyright (C) 2012 Alexander Shiyan <shc_work@mail.ru>
*
* Based on sc26xx.c, by Thomas Bogendörfer (tsbogend@alpha.franken.de)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
#ifndef _PLATFORM_DATA_SERIAL_SCCNXP_H_
diff --git a/include/linux/platform_data/sgi-w1.h b/include/linux/platform_data/sgi-w1.h
new file mode 100644
index 000000000000..e28c8a90ff84
--- /dev/null
+++ b/include/linux/platform_data/sgi-w1.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * SGI One-Wire (W1) IP
+ */
+
+#ifndef PLATFORM_DATA_SGI_W1_H
+#define PLATFORM_DATA_SGI_W1_H
+
+struct sgi_w1_platform_data {
+ char dev_id[64];
+};
+
+#endif /* PLATFORM_DATA_SGI_W1_H */
diff --git a/include/linux/platform_data/sh_ipmmu.h b/include/linux/platform_data/sh_ipmmu.h
deleted file mode 100644
index 39f7405cdac5..000000000000
--- a/include/linux/platform_data/sh_ipmmu.h
+++ /dev/null
@@ -1,18 +0,0 @@
-/* sh_ipmmu.h
- *
- * Copyright (C) 2012 Hideki EIRAKU
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- */
-
-#ifndef __SH_IPMMU_H__
-#define __SH_IPMMU_H__
-
-struct shmobile_ipmmu_platform_data {
- const char * const *dev_names;
- unsigned int num_dev_names;
-};
-
-#endif /* __SH_IPMMU_H__ */
diff --git a/include/linux/mmc/sh_mmcif.h b/include/linux/platform_data/sh_mmcif.h
index a7baa29484c3..6eb914f958f9 100644
--- a/include/linux/mmc/sh_mmcif.h
+++ b/include/linux/platform_data/sh_mmcif.h
@@ -1,14 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * include/linux/mmc/sh_mmcif.h
- *
* platform data for eMMC driver
*
* Copyright (C) 2010 Renesas Solutions Corp.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License.
- *
*/
#ifndef LINUX_MMC_SH_MMCIF_H
diff --git a/include/linux/platform_data/shmob_drm.h b/include/linux/platform_data/shmob_drm.h
index 7c686d335c12..d661399b217d 100644
--- a/include/linux/platform_data/shmob_drm.h
+++ b/include/linux/platform_data/shmob_drm.h
@@ -1,26 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* shmob_drm.h -- SH Mobile DRM driver
*
* Copyright (C) 2012 Renesas Corporation
*
* Laurent Pinchart (laurent.pinchart@ideasonboard.com)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
#ifndef __SHMOB_DRM_H__
#define __SHMOB_DRM_H__
-#include <linux/kernel.h>
-
#include <drm/drm_mode.h>
-struct sh_mobile_meram_cfg;
-struct sh_mobile_meram_info;
-
enum shmob_drm_clk_source {
SHMOB_DRM_CLK_BUS,
SHMOB_DRM_CLK_PERIPHERAL,
@@ -93,7 +84,6 @@ struct shmob_drm_platform_data {
struct shmob_drm_interface_data iface;
struct shmob_drm_panel_data panel;
struct shmob_drm_backlight_data backlight;
- const struct sh_mobile_meram_cfg *meram;
};
#endif /* __SHMOB_DRM_H__ */
diff --git a/include/linux/platform_data/sht15.h b/include/linux/platform_data/sht15.h
deleted file mode 100644
index 12289c1e9413..000000000000
--- a/include/linux/platform_data/sht15.h
+++ /dev/null
@@ -1,38 +0,0 @@
-/*
- * sht15.h - support for the SHT15 Temperature and Humidity Sensor
- *
- * Copyright (c) 2009 Jonathan Cameron
- *
- * Copyright (c) 2007 Wouter Horre
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * For further information, see the Documentation/hwmon/sht15 file.
- */
-
-#ifndef _PDATA_SHT15_H
-#define _PDATA_SHT15_H
-
-/**
- * struct sht15_platform_data - sht15 connectivity info
- * @gpio_data: no. of gpio to which bidirectional data line is
- * connected.
- * @gpio_sck: no. of gpio to which the data clock is connected.
- * @supply_mv: supply voltage in mv. Overridden by regulator if
- * available.
- * @checksum: flag to indicate the checksum should be validated.
- * @no_otp_reload: flag to indicate no reload from OTP.
- * @low_resolution: flag to indicate the temp/humidity resolution to use.
- */
-struct sht15_platform_data {
- int gpio_data;
- int gpio_sck;
- int supply_mv;
- bool checksum;
- bool no_otp_reload;
- bool low_resolution;
-};
-
-#endif /* _PDATA_SHT15_H */
diff --git a/include/linux/platform_data/sht3x.h b/include/linux/platform_data/sht3x.h
index 2e5eea358194..14680d2a98f7 100644
--- a/include/linux/platform_data/sht3x.h
+++ b/include/linux/platform_data/sht3x.h
@@ -1,18 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Copyright (C) 2016 Sensirion AG, Switzerland
* Author: David Frey <david.frey@sensirion.com>
* Author: Pascal Sachs <pascal.sachs@sensirion.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
*/
#ifndef __SHT3X_H_
diff --git a/include/linux/platform_data/shtc1.h b/include/linux/platform_data/shtc1.h
index 7b8c353f7dc8..5ba6f8f9a9a1 100644
--- a/include/linux/platform_data/shtc1.h
+++ b/include/linux/platform_data/shtc1.h
@@ -1,16 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2014 Sensirion AG, Switzerland
* Author: Johannes Winkelmann <johannes.winkelmann@sensirion.com>
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
*/
#ifndef __SHTC1_H_
diff --git a/include/linux/platform_data/si5351.h b/include/linux/platform_data/si5351.h
index 533d9807e543..c71a2dd66143 100644
--- a/include/linux/platform_data/si5351.h
+++ b/include/linux/platform_data/si5351.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Si5351A/B/C programmable clock generator platform_data.
*/
@@ -85,6 +86,7 @@ enum si5351_disable_state {
* @multisynth_src: multisynth source clock
* @clkout_src: clkout source clock
* @pll_master: if true, clkout can also change pll rate
+ * @pll_reset: if true, clkout can reset its pll
* @drive: output drive strength
* @rate: initial clkout rate, or default if 0
*/
@@ -94,6 +96,7 @@ struct si5351_clkout_config {
enum si5351_drive_strength drive;
enum si5351_disable_state disable_state;
bool pll_master;
+ bool pll_reset;
unsigned long rate;
};
diff --git a/include/linux/platform_data/simplefb.h b/include/linux/platform_data/simplefb.h
index 077303cedbf4..4f94d52ac99f 100644
--- a/include/linux/platform_data/simplefb.h
+++ b/include/linux/platform_data/simplefb.h
@@ -1,12 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* simplefb.h - Simple Framebuffer Device
*
* Copyright (C) 2013 David Herrmann <dh.herrmann@gmail.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
#ifndef __PLATFORM_DATA_SIMPLEFB_H__
@@ -14,17 +10,19 @@
#include <drm/drm_fourcc.h>
#include <linux/fb.h>
-#include <linux/kernel.h>
+#include <linux/types.h>
/* format array, use it to initialize a "struct simplefb_format" array */
#define SIMPLEFB_FORMATS \
{ \
{ "r5g6b5", 16, {11, 5}, {5, 6}, {0, 5}, {0, 0}, DRM_FORMAT_RGB565 }, \
+ { "r5g5b5a1", 16, {11, 5}, {6, 5}, {1, 5}, {0, 1}, DRM_FORMAT_RGBA5551 }, \
{ "x1r5g5b5", 16, {10, 5}, {5, 5}, {0, 5}, {0, 0}, DRM_FORMAT_XRGB1555 }, \
{ "a1r5g5b5", 16, {10, 5}, {5, 5}, {0, 5}, {15, 1}, DRM_FORMAT_ARGB1555 }, \
{ "r8g8b8", 24, {16, 8}, {8, 8}, {0, 8}, {0, 0}, DRM_FORMAT_RGB888 }, \
{ "x8r8g8b8", 32, {16, 8}, {8, 8}, {0, 8}, {0, 0}, DRM_FORMAT_XRGB8888 }, \
{ "a8r8g8b8", 32, {16, 8}, {8, 8}, {0, 8}, {24, 8}, DRM_FORMAT_ARGB8888 }, \
+ { "x8b8g8r8", 32, {0, 8}, {8, 8}, {16, 8}, {0, 0}, DRM_FORMAT_XBGR8888 }, \
{ "a8b8g8r8", 32, {0, 8}, {8, 8}, {16, 8}, {24, 8}, DRM_FORMAT_ABGR8888 }, \
{ "x2r10g10b10", 32, {20, 10}, {10, 10}, {0, 10}, {0, 0}, DRM_FORMAT_XRGB2101010 }, \
{ "a2r10g10b10", 32, {20, 10}, {10, 10}, {0, 10}, {30, 2}, DRM_FORMAT_ARGB2101010 }, \
diff --git a/include/linux/platform_data/sky81452-backlight.h b/include/linux/platform_data/sky81452-backlight.h
deleted file mode 100644
index 1231e9bb00f1..000000000000
--- a/include/linux/platform_data/sky81452-backlight.h
+++ /dev/null
@@ -1,46 +0,0 @@
-/*
- * sky81452.h SKY81452 backlight driver
- *
- * Copyright 2014 Skyworks Solutions Inc.
- * Author : Gyungoh Yoo <jack.yoo@skyworksinc.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, see <http://www.gnu.org/licenses/>.
- */
-
-#ifndef _SKY81452_BACKLIGHT_H
-#define _SKY81452_BACKLIGHT_H
-
-/**
- * struct sky81452_platform_data
- * @name: backlight driver name.
- If it is not defined, default name is lcd-backlight.
- * @gpio_enable:GPIO number which control EN pin
- * @enable: Enable mask for current sink channel 1, 2, 3, 4, 5 and 6.
- * @ignore_pwm: true if DPWMI should be ignored.
- * @dpwm_mode: true is DPWM dimming mode, otherwise Analog dimming mode.
- * @phase_shift:true is phase shift mode.
- * @short_detecion_threshold: It should be one of 4, 5, 6 and 7V.
- * @boost_current_limit: It should be one of 2300, 2750mA.
- */
-struct sky81452_bl_platform_data {
- const char *name;
- int gpio_enable;
- unsigned int enable;
- bool ignore_pwm;
- bool dpwm_mode;
- bool phase_shift;
- unsigned int short_detection_threshold;
- unsigned int boost_current_limit;
-};
-
-#endif
diff --git a/include/linux/platform_data/spi-clps711x.h b/include/linux/platform_data/spi-clps711x.h
deleted file mode 100644
index 301956e63143..000000000000
--- a/include/linux/platform_data/spi-clps711x.h
+++ /dev/null
@@ -1,21 +0,0 @@
-/*
- * CLPS711X SPI bus driver definitions
- *
- * Copyright (C) 2012 Alexander Shiyan <shc_work@mail.ru>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
-
-#ifndef ____LINUX_PLATFORM_DATA_SPI_CLPS711X_H
-#define ____LINUX_PLATFORM_DATA_SPI_CLPS711X_H
-
-/* Board specific platform_data */
-struct spi_clps711x_pdata {
- int *chipselect; /* Array of GPIO-numbers */
- int num_chipselect; /* Total count of GPIOs */
-};
-
-#endif
diff --git a/include/linux/platform_data/spi-davinci.h b/include/linux/platform_data/spi-davinci.h
index f4edcb03c40c..2cb5cc70fd9d 100644
--- a/include/linux/platform_data/spi-davinci.h
+++ b/include/linux/platform_data/spi-davinci.h
@@ -1,19 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Copyright 2009 Texas Instruments.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#ifndef __ARCH_ARM_DAVINCI_SPI_H
@@ -36,9 +23,6 @@ enum {
* @num_chipselect: number of chipselects supported by this SPI master
* @intr_line: interrupt line used to connect the SPI IP to the ARM interrupt
* controller withn the SoC. Possible values are 0 and 1.
- * @chip_sel: list of GPIOs which can act as chip-selects for the SPI.
- * SPI_INTERN_CS denotes internal SPI chip-select. Not necessary
- * to populate if all chip-selects are internal.
* @cshold_bug: set this to true if the SPI controller on your chip requires
* a write to CSHOLD bit in between transfers (like in DM355).
* @dma_event_q: DMA event queue to use if SPI_IO_TYPE_DMA is used for any
@@ -48,7 +32,6 @@ struct davinci_spi_platform_data {
u8 version;
u8 num_chipselect;
u8 intr_line;
- u8 *chip_sel;
u8 prescaler_limit;
bool cshold_bug;
enum dma_event_q dma_event_q;
diff --git a/include/linux/platform_data/spi-ep93xx.h b/include/linux/platform_data/spi-ep93xx.h
index 171a271c2cbd..b439f2a896e0 100644
--- a/include/linux/platform_data/spi-ep93xx.h
+++ b/include/linux/platform_data/spi-ep93xx.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __ASM_MACH_EP93XX_SPI_H
#define __ASM_MACH_EP93XX_SPI_H
@@ -5,13 +6,9 @@ struct spi_device;
/**
* struct ep93xx_spi_info - EP93xx specific SPI descriptor
- * @chipselect: array of gpio numbers to use as chip selects
- * @num_chipselect: ARRAY_SIZE(chipselect)
* @use_dma: use DMA for the transfers
*/
struct ep93xx_spi_info {
- int *chipselect;
- int num_chipselect;
bool use_dma;
};
diff --git a/include/linux/platform_data/spi-imx.h b/include/linux/platform_data/spi-imx.h
deleted file mode 100644
index 08be445e8eb8..000000000000
--- a/include/linux/platform_data/spi-imx.h
+++ /dev/null
@@ -1,27 +0,0 @@
-
-#ifndef __MACH_SPI_H_
-#define __MACH_SPI_H_
-
-/*
- * struct spi_imx_master - device.platform_data for SPI controller devices.
- * @chipselect: Array of chipselects for this master. Numbers >= 0 mean gpio
- * pins, numbers < 0 mean internal CSPI chipselects according
- * to MXC_SPI_CS(). Normally you want to use gpio based chip
- * selects as the CSPI module tries to be intelligent about
- * when to assert the chipselect: The CSPI module deasserts the
- * chipselect once it runs out of input data. The other problem
- * is that it is not possible to mix between high active and low
- * active chipselects on one single bus using the internal
- * chipselects. Unfortunately Freescale decided to put some
- * chipselects on dedicated pins which are not usable as gpios,
- * so we have to support the internal chipselects.
- * @num_chipselect: ARRAY_SIZE(chipselect)
- */
-struct spi_imx_master {
- int *chipselect;
- int num_chipselect;
-};
-
-#define MXC_SPI_CS(no) ((no) - 32)
-
-#endif /* __MACH_SPI_H_*/
diff --git a/include/linux/platform_data/spi-mt65xx.h b/include/linux/platform_data/spi-mt65xx.h
index ba4e4bb70262..f0db674f07b8 100644
--- a/include/linux/platform_data/spi-mt65xx.h
+++ b/include/linux/platform_data/spi-mt65xx.h
@@ -1,12 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* MTK SPI bus driver definitions
*
* Copyright (c) 2015 MediaTek Inc.
* Author: Leilk Liu <leilk.liu@mediatek.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef ____LINUX_PLATFORM_DATA_SPI_MTK_H
@@ -14,9 +11,7 @@
/* Board specific platform_data */
struct mtk_chip_config {
- u32 tx_mlsb;
- u32 rx_mlsb;
- u32 cs_pol;
u32 sample_sel;
+ u32 tick_delay;
};
#endif
diff --git a/include/linux/platform_data/spi-nuc900.h b/include/linux/platform_data/spi-nuc900.h
deleted file mode 100644
index 4b3f46832e19..000000000000
--- a/include/linux/platform_data/spi-nuc900.h
+++ /dev/null
@@ -1,33 +0,0 @@
-/*
- * Copyright (c) 2009 Nuvoton technology corporation.
- *
- * Wan ZongShun <mcuos.com@gmail.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation;version 2 of the License.
- *
- */
-
-#ifndef __SPI_NUC900_H
-#define __SPI_NUC900_H
-
-extern void mfp_set_groupg(struct device *dev, const char *subname);
-
-struct nuc900_spi_info {
- unsigned int num_cs;
- unsigned int lsb;
- unsigned int txneg;
- unsigned int rxneg;
- unsigned int divider;
- unsigned int sleep;
- unsigned int txnum;
- unsigned int txbitlen;
- int bus_num;
-};
-
-struct nuc900_spi_chip {
- unsigned char bits_per_word;
-};
-
-#endif /* __SPI_NUC900_H */
diff --git a/include/linux/platform_data/spi-omap2-mcspi.h b/include/linux/platform_data/spi-omap2-mcspi.h
index c100456eab17..3b400b1919a9 100644
--- a/include/linux/platform_data/spi-omap2-mcspi.h
+++ b/include/linux/platform_data/spi-omap2-mcspi.h
@@ -1,10 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _OMAP2_MCSPI_H
#define _OMAP2_MCSPI_H
-#define OMAP2_MCSPI_REV 0
-#define OMAP3_MCSPI_REV 1
-#define OMAP4_MCSPI_REV 2
-
#define OMAP4_MCSPI_REG_OFFSET 0x100
#define MCSPI_PINDIR_D0_IN_D1_OUT 0
@@ -14,10 +11,7 @@ struct omap2_mcspi_platform_config {
unsigned short num_cs;
unsigned int regs_offset;
unsigned int pin_dir:1;
-};
-
-struct omap2_mcspi_dev_attr {
- unsigned short num_chipselect;
+ size_t max_xfer_len;
};
struct omap2_mcspi_device_config {
diff --git a/include/linux/platform_data/spi-s3c64xx.h b/include/linux/platform_data/spi-s3c64xx.h
index da79774078a7..3101152ce449 100644
--- a/include/linux/platform_data/spi-s3c64xx.h
+++ b/include/linux/platform_data/spi-s3c64xx.h
@@ -1,10 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
/*
* Copyright (C) 2009 Samsung Electronics Ltd.
* Jaswinder Singh <jassi.brar@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef __SPI_S3C64XX_H
@@ -18,7 +16,6 @@ struct platform_device;
* struct s3c64xx_spi_csinfo - ChipSelect description
* @fb_delay: Slave specific feedback delay.
* Refer to FB_CLK_SEL register definition in SPI chapter.
- * @line: Custom 'identity' of the CS line.
*
* This is per SPI-Slave Chipselect information.
* Allocate and initialize one in machine init code and make the
@@ -26,13 +23,13 @@ struct platform_device;
*/
struct s3c64xx_spi_csinfo {
u8 fb_delay;
- unsigned line;
};
/**
* struct s3c64xx_spi_info - SPI Controller defining structure
* @src_clk_nr: Clock source index for the CLK_CFG[SPI_CLKSEL] field.
* @num_cs: Number of CS this controller emulates.
+ * @no_cs: Used when CS line is not connected.
* @cfg_gpio: Configure pins for this SPI controller.
*/
struct s3c64xx_spi_info {
@@ -45,26 +42,16 @@ struct s3c64xx_spi_info {
/**
* s3c64xx_spi_set_platdata - SPI Controller configure callback by the board
* initialization code.
- * @cfg_gpio: Pointer to gpio setup function.
* @src_clk_nr: Clock the SPI controller is to use to generate SPI clocks.
* @num_cs: Number of elements in the 'cs' array.
*
* Call this from machine init code for each SPI Controller that
* has some chips attached to it.
*/
-extern void s3c64xx_spi0_set_platdata(int (*cfg_gpio)(void), int src_clk_nr,
- int num_cs);
-extern void s3c64xx_spi1_set_platdata(int (*cfg_gpio)(void), int src_clk_nr,
- int num_cs);
-extern void s3c64xx_spi2_set_platdata(int (*cfg_gpio)(void), int src_clk_nr,
- int num_cs);
+extern void s3c64xx_spi0_set_platdata(int src_clk_nr, int num_cs);
/* defined by architecture to configure gpio */
extern int s3c64xx_spi0_cfg_gpio(void);
-extern int s3c64xx_spi1_cfg_gpio(void);
-extern int s3c64xx_spi2_cfg_gpio(void);
extern struct s3c64xx_spi_info s3c64xx_spi0_pdata;
-extern struct s3c64xx_spi_info s3c64xx_spi1_pdata;
-extern struct s3c64xx_spi_info s3c64xx_spi2_pdata;
#endif /*__SPI_S3C64XX_H */
diff --git a/include/linux/platform_data/ssm2518.h b/include/linux/platform_data/ssm2518.h
deleted file mode 100644
index 9a8e3ea287e3..000000000000
--- a/include/linux/platform_data/ssm2518.h
+++ /dev/null
@@ -1,22 +0,0 @@
-/*
- * SSM2518 amplifier audio driver
- *
- * Copyright 2013 Analog Devices Inc.
- * Author: Lars-Peter Clausen <lars@metafoo.de>
- *
- * Licensed under the GPL-2.
- */
-
-#ifndef __LINUX_PLATFORM_DATA_SSM2518_H__
-#define __LINUX_PLATFORM_DATA_SSM2518_H__
-
-/**
- * struct ssm2518_platform_data - Platform data for the ssm2518 driver
- * @enable_gpio: GPIO connected to the nSD pin. Set to -1 if the nSD pin is
- * hardwired.
- */
-struct ssm2518_platform_data {
- int enable_gpio;
-};
-
-#endif
diff --git a/include/linux/platform_data/st1232_pdata.h b/include/linux/platform_data/st1232_pdata.h
deleted file mode 100644
index cac3e7b4c454..000000000000
--- a/include/linux/platform_data/st1232_pdata.h
+++ /dev/null
@@ -1,13 +0,0 @@
-#ifndef _LINUX_ST1232_PDATA_H
-#define _LINUX_ST1232_PDATA_H
-
-/*
- * Optional platform data
- *
- * Use this if you want the driver to drive the reset pin.
- */
-struct st1232_pdata {
- int reset_gpio;
-};
-
-#endif
diff --git a/include/linux/platform_data/st33zp24.h b/include/linux/platform_data/st33zp24.h
deleted file mode 100644
index 6f0fb6ebd7db..000000000000
--- a/include/linux/platform_data/st33zp24.h
+++ /dev/null
@@ -1,28 +0,0 @@
-/*
- * STMicroelectronics TPM Linux driver for TPM 1.2 ST33ZP24
- * Copyright (C) 2009 - 2016 STMicroelectronics
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see <http://www.gnu.org/licenses/>.
- */
-#ifndef __ST33ZP24_H__
-#define __ST33ZP24_H__
-
-#define TPM_ST33_I2C "st33zp24-i2c"
-#define TPM_ST33_SPI "st33zp24-spi"
-
-struct st33zp24_platform_data {
- int io_lpcpd;
-};
-
-#endif /* __ST33ZP24_H__ */
diff --git a/include/linux/platform_data/st_sensors_pdata.h b/include/linux/platform_data/st_sensors_pdata.h
index 79b0e4cdb814..897051e51b78 100644
--- a/include/linux/platform_data/st_sensors_pdata.h
+++ b/include/linux/platform_data/st_sensors_pdata.h
@@ -1,11 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* STMicroelectronics sensors platform-data driver
*
* Copyright 2013 STMicroelectronics Inc.
*
* Denis Ciocca <denis.ciocca@st.com>
- *
- * Licensed under the GPL-2.
*/
#ifndef ST_SENSORS_PDATA_H
@@ -14,13 +13,20 @@
/**
* struct st_sensors_platform_data - Platform data for the ST sensors
* @drdy_int_pin: Redirect DRDY on pin 1 (1) or pin 2 (2).
- * Available only for accelerometer and pressure sensors.
+ * Available only for accelerometer, magnetometer and pressure sensors.
* Accelerometer DRDY on LSM330 available only on pin 1 (see datasheet).
+ * Magnetometer DRDY is supported only on LSM9DS0.
* @open_drain: set the interrupt line to be open drain if possible.
+ * @spi_3wire: enable spi-3wire mode.
+ * @pullups: enable/disable i2c controller pullup resistors.
+ * @wakeup_source: enable/disable device as wakeup generator.
*/
struct st_sensors_platform_data {
u8 drdy_int_pin;
bool open_drain;
+ bool spi_3wire;
+ bool pullups;
+ bool wakeup_source;
};
#endif /* ST_SENSORS_PDATA_H */
diff --git a/include/linux/platform_data/syscon.h b/include/linux/platform_data/syscon.h
index 2354c6fa3726..2c089dd3e2bd 100644
--- a/include/linux/platform_data/syscon.h
+++ b/include/linux/platform_data/syscon.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef PLATFORM_DATA_SYSCON_H
#define PLATFORM_DATA_SYSCON_H
diff --git a/include/linux/platform_data/tda9950.h b/include/linux/platform_data/tda9950.h
new file mode 100644
index 000000000000..c65efd461102
--- /dev/null
+++ b/include/linux/platform_data/tda9950.h
@@ -0,0 +1,16 @@
+#ifndef LINUX_PLATFORM_DATA_TDA9950_H
+#define LINUX_PLATFORM_DATA_TDA9950_H
+
+struct device;
+
+struct tda9950_glue {
+ struct device *parent;
+ unsigned long irq_flags;
+ void *data;
+ int (*init)(void *);
+ void (*exit)(void *);
+ int (*open)(void *);
+ void (*release)(void *);
+};
+
+#endif
diff --git a/include/linux/platform_data/ti-aemif.h b/include/linux/platform_data/ti-aemif.h
index ac72e115093c..77625251df07 100644
--- a/include/linux/platform_data/ti-aemif.h
+++ b/include/linux/platform_data/ti-aemif.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* TI DaVinci AEMIF platform glue.
*
@@ -5,10 +6,6 @@
*
* Author:
* Bartosz Golaszewski <bgolaszewski@baylibre.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef __TI_DAVINCI_AEMIF_DATA_H__
@@ -16,8 +13,33 @@
#include <linux/of_platform.h>
+/**
+ * struct aemif_abus_data - Async bus configuration parameters.
+ *
+ * @cs - Chip-select number.
+ */
+struct aemif_abus_data {
+ u32 cs;
+};
+
+/**
+ * struct aemif_platform_data - Data to set up the TI aemif driver.
+ *
+ * @dev_lookup: of_dev_auxdata passed to of_platform_populate() for aemif
+ * subdevices.
+ * @cs_offset: Lowest allowed chip-select number.
+ * @abus_data: Array of async bus configuration entries.
+ * @num_abus_data: Number of abus entries.
+ * @sub_devices: Array of platform subdevices.
+ * @num_sub_devices: Number of subdevices.
+ */
struct aemif_platform_data {
struct of_dev_auxdata *dev_lookup;
+ u32 cs_offset;
+ struct aemif_abus_data *abus_data;
+ size_t num_abus_data;
+ struct platform_device *sub_devices;
+ size_t num_sub_devices;
};
#endif /* __TI_DAVINCI_AEMIF_DATA_H__ */
diff --git a/include/linux/platform_data/ti-prm.h b/include/linux/platform_data/ti-prm.h
new file mode 100644
index 000000000000..28154c3226c2
--- /dev/null
+++ b/include/linux/platform_data/ti-prm.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * TI PRM (Power & Reset Manager) platform data
+ *
+ * Copyright (C) 2019 Texas Instruments, Inc.
+ *
+ * Tero Kristo <t-kristo@ti.com>
+ */
+
+#ifndef _LINUX_PLATFORM_DATA_TI_PRM_H
+#define _LINUX_PLATFORM_DATA_TI_PRM_H
+
+struct clockdomain;
+
+struct ti_prm_platform_data {
+ void (*clkdm_deny_idle)(struct clockdomain *clkdm);
+ void (*clkdm_allow_idle)(struct clockdomain *clkdm);
+ struct clockdomain * (*clkdm_lookup)(const char *name);
+};
+
+#endif /* _LINUX_PLATFORM_DATA_TI_PRM_H */
diff --git a/include/linux/platform_data/ti-sysc.h b/include/linux/platform_data/ti-sysc.h
new file mode 100644
index 000000000000..eb556f988d57
--- /dev/null
+++ b/include/linux/platform_data/ti-sysc.h
@@ -0,0 +1,172 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __TI_SYSC_DATA_H__
+#define __TI_SYSC_DATA_H__
+
+enum ti_sysc_module_type {
+ TI_SYSC_OMAP2,
+ TI_SYSC_OMAP2_TIMER,
+ TI_SYSC_OMAP3_SHAM,
+ TI_SYSC_OMAP3_AES,
+ TI_SYSC_OMAP4,
+ TI_SYSC_OMAP4_TIMER,
+ TI_SYSC_OMAP4_SIMPLE,
+ TI_SYSC_OMAP34XX_SR,
+ TI_SYSC_OMAP36XX_SR,
+ TI_SYSC_OMAP4_SR,
+ TI_SYSC_OMAP4_MCASP,
+ TI_SYSC_OMAP4_USB_HOST_FS,
+ TI_SYSC_DRA7_MCAN,
+ TI_SYSC_PRUSS,
+};
+
+struct ti_sysc_cookie {
+ void *data;
+ void *clkdm;
+};
+
+/**
+ * struct sysc_regbits - TI OCP_SYSCONFIG register field offsets
+ * @midle_shift: Offset of the midle bit
+ * @clkact_shift: Offset of the clockactivity bit
+ * @sidle_shift: Offset of the sidle bit
+ * @enwkup_shift: Offset of the enawakeup bit
+ * @srst_shift: Offset of the softreset bit
+ * @autoidle_shift: Offset of the autoidle bit
+ * @dmadisable_shift: Offset of the dmadisable bit
+ * @emufree_shift; Offset of the emufree bit
+ *
+ * Note that 0 is a valid shift, and for ti-sysc.c -ENODEV can be used if a
+ * feature is not available.
+ */
+struct sysc_regbits {
+ s8 midle_shift;
+ s8 clkact_shift;
+ s8 sidle_shift;
+ s8 enwkup_shift;
+ s8 srst_shift;
+ s8 autoidle_shift;
+ s8 dmadisable_shift;
+ s8 emufree_shift;
+};
+
+#define SYSC_MODULE_QUIRK_OTG BIT(30)
+#define SYSC_QUIRK_RESET_ON_CTX_LOST BIT(29)
+#define SYSC_QUIRK_REINIT_ON_CTX_LOST BIT(28)
+#define SYSC_QUIRK_REINIT_ON_RESUME BIT(27)
+#define SYSC_QUIRK_GPMC_DEBUG BIT(26)
+#define SYSC_MODULE_QUIRK_ENA_RESETDONE BIT(25)
+#define SYSC_MODULE_QUIRK_PRUSS BIT(24)
+#define SYSC_MODULE_QUIRK_DSS_RESET BIT(23)
+#define SYSC_MODULE_QUIRK_RTC_UNLOCK BIT(22)
+#define SYSC_QUIRK_CLKDM_NOAUTO BIT(21)
+#define SYSC_QUIRK_FORCE_MSTANDBY BIT(20)
+#define SYSC_MODULE_QUIRK_AESS BIT(19)
+#define SYSC_MODULE_QUIRK_SGX BIT(18)
+#define SYSC_MODULE_QUIRK_HDQ1W BIT(17)
+#define SYSC_MODULE_QUIRK_I2C BIT(16)
+#define SYSC_MODULE_QUIRK_WDT BIT(15)
+#define SYSS_QUIRK_RESETDONE_INVERTED BIT(14)
+#define SYSC_QUIRK_SWSUP_MSTANDBY BIT(13)
+#define SYSC_QUIRK_SWSUP_SIDLE_ACT BIT(12)
+#define SYSC_QUIRK_SWSUP_SIDLE BIT(11)
+#define SYSC_QUIRK_EXT_OPT_CLOCK BIT(10)
+#define SYSC_QUIRK_LEGACY_IDLE BIT(9)
+#define SYSC_QUIRK_RESET_STATUS BIT(8)
+#define SYSC_QUIRK_NO_IDLE BIT(7)
+#define SYSC_QUIRK_NO_IDLE_ON_INIT BIT(6)
+#define SYSC_QUIRK_NO_RESET_ON_INIT BIT(5)
+#define SYSC_QUIRK_OPT_CLKS_NEEDED BIT(4)
+#define SYSC_QUIRK_OPT_CLKS_IN_RESET BIT(3)
+#define SYSC_QUIRK_16BIT BIT(2)
+#define SYSC_QUIRK_UNCACHED BIT(1)
+#define SYSC_QUIRK_USE_CLOCKACT BIT(0)
+
+#define SYSC_NR_IDLEMODES 4
+
+/**
+ * struct sysc_capabilities - capabilities for an interconnect target module
+ * @type: sysc type identifier for the module
+ * @sysc_mask: bitmask of supported SYSCONFIG register bits
+ * @regbits: bitmask of SYSCONFIG register bits
+ * @mod_quirks: bitmask of module specific quirks
+ */
+struct sysc_capabilities {
+ const enum ti_sysc_module_type type;
+ const u32 sysc_mask;
+ const struct sysc_regbits *regbits;
+ const u32 mod_quirks;
+};
+
+/**
+ * struct sysc_config - configuration for an interconnect target module
+ * @sysc_val: configured value for sysc register
+ * @syss_mask: configured mask value for SYSSTATUS register
+ * @midlemodes: bitmask of supported master idle modes
+ * @sidlemodes: bitmask of supported slave idle modes
+ * @srst_udelay: optional delay needed after OCP soft reset
+ * @quirks: bitmask of enabled quirks
+ */
+struct sysc_config {
+ u32 sysc_val;
+ u32 syss_mask;
+ u8 midlemodes;
+ u8 sidlemodes;
+ u8 srst_udelay;
+ u32 quirks;
+};
+
+enum sysc_registers {
+ SYSC_REVISION,
+ SYSC_SYSCONFIG,
+ SYSC_SYSSTATUS,
+ SYSC_MAX_REGS,
+};
+
+/**
+ * struct ti_sysc_module_data - ti-sysc to hwmod translation data for a module
+ * @name: legacy "ti,hwmods" module name
+ * @module_pa: physical address of the interconnect target module
+ * @module_size: size of the interconnect target module
+ * @offsets: array of register offsets as listed in enum sysc_registers
+ * @nr_offsets: number of registers
+ * @cap: interconnect target module capabilities
+ * @cfg: interconnect target module configuration
+ *
+ * This data is enough to allocate a new struct omap_hwmod_class_sysconfig
+ * based on device tree data parsed by ti-sysc driver.
+ */
+struct ti_sysc_module_data {
+ const char *name;
+ u64 module_pa;
+ u32 module_size;
+ int *offsets;
+ int nr_offsets;
+ const struct sysc_capabilities *cap;
+ struct sysc_config *cfg;
+};
+
+struct device;
+struct clk;
+
+struct ti_sysc_platform_data {
+ struct of_dev_auxdata *auxdata;
+ bool (*soc_type_gp)(void);
+ int (*init_clockdomain)(struct device *dev, struct clk *fck,
+ struct clk *ick, struct ti_sysc_cookie *cookie);
+ void (*clkdm_deny_idle)(struct device *dev,
+ const struct ti_sysc_cookie *cookie);
+ void (*clkdm_allow_idle)(struct device *dev,
+ const struct ti_sysc_cookie *cookie);
+ int (*init_module)(struct device *dev,
+ const struct ti_sysc_module_data *data,
+ struct ti_sysc_cookie *cookie);
+ int (*enable_module)(struct device *dev,
+ const struct ti_sysc_cookie *cookie);
+ int (*idle_module)(struct device *dev,
+ const struct ti_sysc_cookie *cookie);
+ int (*shutdown_module)(struct device *dev,
+ const struct ti_sysc_cookie *cookie);
+};
+
+#endif /* __TI_SYSC_DATA_H__ */
diff --git a/include/linux/platform_data/touchscreen-s3c2410.h b/include/linux/platform_data/touchscreen-s3c2410.h
index 71eccaa9835d..bf8d3b9d7c6a 100644
--- a/include/linux/platform_data/touchscreen-s3c2410.h
+++ b/include/linux/platform_data/touchscreen-s3c2410.h
@@ -1,9 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2005 Arnaud Patard <arnaud.patard@rtp-net.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef __TOUCHSCREEN_S3C2410_H
diff --git a/include/linux/platform_data/tps68470.h b/include/linux/platform_data/tps68470.h
new file mode 100644
index 000000000000..e605a2cab07f
--- /dev/null
+++ b/include/linux/platform_data/tps68470.h
@@ -0,0 +1,40 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * TI TPS68470 PMIC platform data definition.
+ *
+ * Copyright (c) 2021 Red Hat Inc.
+ *
+ * Red Hat authors:
+ * Hans de Goede <hdegoede@redhat.com>
+ */
+#ifndef __PDATA_TPS68470_H
+#define __PDATA_TPS68470_H
+
+enum tps68470_regulators {
+ TPS68470_CORE,
+ TPS68470_ANA,
+ TPS68470_VCM,
+ TPS68470_VIO,
+ TPS68470_VSIO,
+ TPS68470_AUX1,
+ TPS68470_AUX2,
+ TPS68470_NUM_REGULATORS
+};
+
+struct regulator_init_data;
+
+struct tps68470_regulator_platform_data {
+ const struct regulator_init_data *reg_init_data[TPS68470_NUM_REGULATORS];
+};
+
+struct tps68470_clk_consumer {
+ const char *consumer_dev_name;
+ const char *consumer_con_id;
+};
+
+struct tps68470_clk_platform_data {
+ unsigned int n_consumers;
+ struct tps68470_clk_consumer consumers[];
+};
+
+#endif
diff --git a/include/linux/platform_data/tsc2007.h b/include/linux/platform_data/tsc2007.h
index c2d3aa1dadd4..a0ca52c41ccb 100644
--- a/include/linux/platform_data/tsc2007.h
+++ b/include/linux/platform_data/tsc2007.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __LINUX_I2C_TSC2007_H
#define __LINUX_I2C_TSC2007_H
diff --git a/include/linux/platform_data/tsl2563.h b/include/linux/platform_data/tsl2563.h
deleted file mode 100644
index c90d7a09dda7..000000000000
--- a/include/linux/platform_data/tsl2563.h
+++ /dev/null
@@ -1,8 +0,0 @@
-#ifndef __LINUX_TSL2563_H
-#define __LINUX_TSL2563_H
-
-struct tsl2563_platform_data {
- int cover_comp_gain;
-};
-
-#endif /* __LINUX_TSL2563_H */
diff --git a/include/linux/platform_data/tsl2772.h b/include/linux/platform_data/tsl2772.h
new file mode 100644
index 000000000000..f8ade15a35e2
--- /dev/null
+++ b/include/linux/platform_data/tsl2772.h
@@ -0,0 +1,101 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Device driver for monitoring ambient light intensity (lux)
+ * and proximity (prox) within the TAOS TSL2772 family of devices.
+ *
+ * Copyright (c) 2012, TAOS Corporation.
+ * Copyright (c) 2017-2018 Brian Masney <masneyb@onstation.org>
+ */
+
+#ifndef __TSL2772_H
+#define __TSL2772_H
+
+struct tsl2772_lux {
+ unsigned int ch0;
+ unsigned int ch1;
+};
+
+/* Max number of segments allowable in LUX table */
+#define TSL2772_MAX_LUX_TABLE_SIZE 6
+/* The default LUX tables all have 3 elements. */
+#define TSL2772_DEF_LUX_TABLE_SZ 3
+#define TSL2772_DEFAULT_TABLE_BYTES (sizeof(struct tsl2772_lux) * \
+ TSL2772_DEF_LUX_TABLE_SZ)
+
+/* Proximity diode to use */
+#define TSL2772_DIODE0 0x01
+#define TSL2772_DIODE1 0x02
+#define TSL2772_DIODE_BOTH 0x03
+
+/* LED Power */
+#define TSL2772_100_mA 0x00
+#define TSL2772_50_mA 0x01
+#define TSL2772_25_mA 0x02
+#define TSL2772_13_mA 0x03
+
+/**
+ * struct tsl2772_settings - Settings for the tsl2772 driver
+ * @als_time: Integration time of the ALS channel ADCs in 2.73 ms
+ * increments. Total integration time is
+ * (256 - als_time) * 2.73.
+ * @als_gain: Index into the tsl2772_als_gain array.
+ * @als_gain_trim: Default gain trim to account for aperture effects.
+ * @wait_time: Time between proximity and ALS cycles in 2.73
+ * periods.
+ * @prox_time: Integration time of the proximity ADC in 2.73 ms
+ * increments. Total integration time is
+ * (256 - prx_time) * 2.73.
+ * @prox_gain: Index into the tsl2772_prx_gain array.
+ * @als_prox_config: The value of the ALS / Proximity configuration
+ * register.
+ * @als_cal_target: Known external ALS reading for calibration.
+ * @als_persistence: H/W Filters, Number of 'out of limits' ALS readings.
+ * @als_interrupt_en: Enable/Disable ALS interrupts
+ * @als_thresh_low: CH0 'low' count to trigger interrupt.
+ * @als_thresh_high: CH0 'high' count to trigger interrupt.
+ * @prox_persistence: H/W Filters, Number of 'out of limits' proximity
+ * readings.
+ * @prox_interrupt_en: Enable/Disable proximity interrupts.
+ * @prox_thres_low: Low threshold proximity detection.
+ * @prox_thres_high: High threshold proximity detection.
+ * @prox_pulse_count: Number if proximity emitter pulses.
+ * @prox_max_samples_cal: The number of samples that are taken when performing
+ * a proximity calibration.
+ * @prox_diode Which diode(s) to use for driving the external
+ * LED(s) for proximity sensing.
+ * @prox_power The amount of power to use for the external LED(s).
+ */
+struct tsl2772_settings {
+ int als_time;
+ int als_gain;
+ int als_gain_trim;
+ int wait_time;
+ int prox_time;
+ int prox_gain;
+ int als_prox_config;
+ int als_cal_target;
+ u8 als_persistence;
+ bool als_interrupt_en;
+ int als_thresh_low;
+ int als_thresh_high;
+ u8 prox_persistence;
+ bool prox_interrupt_en;
+ int prox_thres_low;
+ int prox_thres_high;
+ int prox_pulse_count;
+ int prox_max_samples_cal;
+ int prox_diode;
+ int prox_power;
+};
+
+/**
+ * struct tsl2772_platform_data - Platform callback, glass and defaults
+ * @platform_lux_table: Device specific glass coefficents
+ * @platform_default_settings: Device specific power on defaults
+ */
+struct tsl2772_platform_data {
+ struct tsl2772_lux platform_lux_table[TSL2772_MAX_LUX_TABLE_SIZE];
+ struct tsl2772_settings *platform_default_settings;
+};
+
+#endif /* __TSL2772_H */
diff --git a/include/linux/platform_data/txx9/ndfmc.h b/include/linux/platform_data/txx9/ndfmc.h
new file mode 100644
index 000000000000..7aaa4cd34d31
--- /dev/null
+++ b/include/linux/platform_data/txx9/ndfmc.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ *
+ * (C) Copyright TOSHIBA CORPORATION 2007
+ */
+#ifndef __TXX9_NDFMC_H
+#define __TXX9_NDFMC_H
+
+#define NDFMC_PLAT_FLAG_USE_BSPRT 0x01
+#define NDFMC_PLAT_FLAG_NO_RSTR 0x02
+#define NDFMC_PLAT_FLAG_HOLDADD 0x04
+#define NDFMC_PLAT_FLAG_DUMMYWRITE 0x08
+
+struct txx9ndfmc_platform_data {
+ unsigned int shift;
+ unsigned int gbus_clock;
+ unsigned int hold; /* hold time in nanosecond */
+ unsigned int spw; /* strobe pulse width in nanosecond */
+ unsigned int flags;
+ unsigned char ch_mask; /* available channel bitmask */
+ unsigned char wp_mask; /* write-protect bitmask */
+ unsigned char wide_mask; /* 16bit-nand bitmask */
+};
+
+void txx9_ndfmc_init(unsigned long baseaddr,
+ const struct txx9ndfmc_platform_data *plat_data);
+
+#endif /* __TXX9_NDFMC_H */
diff --git a/include/linux/platform_data/uio_dmem_genirq.h b/include/linux/platform_data/uio_dmem_genirq.h
index 973c1bb32168..c8f6de685306 100644
--- a/include/linux/platform_data/uio_dmem_genirq.h
+++ b/include/linux/platform_data/uio_dmem_genirq.h
@@ -1,16 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* include/linux/platform_data/uio_dmem_genirq.h
*
* Copyright (C) 2012 Damian Hobson-Garcia
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#ifndef _UIO_DMEM_GENIRQ_H
diff --git a/include/linux/platform_data/uio_pruss.h b/include/linux/platform_data/uio_pruss.h
index 3d47d219827f..f76fa393b802 100644
--- a/include/linux/platform_data/uio_pruss.h
+++ b/include/linux/platform_data/uio_pruss.h
@@ -1,18 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* include/linux/platform_data/uio_pruss.h
*
* Platform data for uio_pruss driver
*
- * Copyright (C) 2010-11 Texas Instruments Incorporated - http://www.ti.com/
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
+ * Copyright (C) 2010-11 Texas Instruments Incorporated - https://www.ti.com/
*/
#ifndef _UIO_PRUSS_H_
diff --git a/include/linux/platform_data/usb-davinci.h b/include/linux/platform_data/usb-davinci.h
index 0926e99f2e8f..879f5c78b91a 100644
--- a/include/linux/platform_data/usb-davinci.h
+++ b/include/linux/platform_data/usb-davinci.h
@@ -11,22 +11,8 @@
#ifndef __ASM_ARCH_USB_H
#define __ASM_ARCH_USB_H
-struct da8xx_ohci_root_hub;
-
-typedef void (*da8xx_ocic_handler_t)(struct da8xx_ohci_root_hub *hub,
- unsigned port);
-
/* Passed as the platform data to the OHCI driver */
struct da8xx_ohci_root_hub {
- /* Switch the port power on/off */
- int (*set_power)(unsigned port, int on);
- /* Read the port power status */
- int (*get_power)(unsigned port);
- /* Read the port over-current indicator */
- int (*get_oci)(unsigned port);
- /* Over-current indicator change notification (pass NULL to disable) */
- int (*ocic_notify)(da8xx_ocic_handler_t handler);
-
/* Time from power on to power good (in 2 ms units) */
u8 potpgt;
};
diff --git a/include/linux/platform_data/usb-ehci-mxc.h b/include/linux/platform_data/usb-ehci-mxc.h
deleted file mode 100644
index 157e71f79f99..000000000000
--- a/include/linux/platform_data/usb-ehci-mxc.h
+++ /dev/null
@@ -1,13 +0,0 @@
-#ifndef __INCLUDE_ASM_ARCH_MXC_EHCI_H
-#define __INCLUDE_ASM_ARCH_MXC_EHCI_H
-
-struct mxc_usbh_platform_data {
- int (*init)(struct platform_device *pdev);
- int (*exit)(struct platform_device *pdev);
-
- unsigned int portsc;
- struct usb_phy *otg;
-};
-
-#endif /* __INCLUDE_ASM_ARCH_MXC_EHCI_H */
-
diff --git a/include/linux/platform_data/usb-musb-ux500.h b/include/linux/platform_data/usb-musb-ux500.h
index dd9c83ac7de0..8909f396febc 100644
--- a/include/linux/platform_data/usb-musb-ux500.h
+++ b/include/linux/platform_data/usb-musb-ux500.h
@@ -1,8 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) ST-Ericsson SA 2011
*
* Author: Mian Yousaf Kaukab <mian.yousaf.kaukab@stericsson.com>
- * License terms: GNU General Public License (GPL) version 2
*/
#ifndef __ASM_ARCH_USB_H
#define __ASM_ARCH_USB_H
diff --git a/include/linux/platform_data/usb-mx2.h b/include/linux/platform_data/usb-mx2.h
deleted file mode 100644
index 22d0b596262c..000000000000
--- a/include/linux/platform_data/usb-mx2.h
+++ /dev/null
@@ -1,38 +0,0 @@
-/*
- * Copyright (C) 2009 Martin Fuzzey <mfuzzey@gmail.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#ifndef __ASM_ARCH_MX21_USBH
-#define __ASM_ARCH_MX21_USBH
-
-enum mx21_usbh_xcvr {
- /* Values below as used by hardware (HWMODE register) */
- MX21_USBXCVR_TXDIF_RXDIF = 0,
- MX21_USBXCVR_TXDIF_RXSE = 1,
- MX21_USBXCVR_TXSE_RXDIF = 2,
- MX21_USBXCVR_TXSE_RXSE = 3,
-};
-
-struct mx21_usbh_platform_data {
- enum mx21_usbh_xcvr host_xcvr; /* tranceiver mode host 1,2 ports */
- enum mx21_usbh_xcvr otg_xcvr; /* tranceiver mode otg (as host) port */
- u16 enable_host1:1,
- enable_host2:1,
- enable_otg_host:1, /* enable "OTG" port (as host) */
- host1_xcverless:1, /* traceiverless host1 port */
- host1_txenoe:1, /* output enable host1 transmit enable */
- otg_ext_xcvr:1, /* external tranceiver for OTG port */
- unused:10;
-};
-
-#endif /* __ASM_ARCH_MX21_USBH */
diff --git a/include/linux/platform_data/usb-ohci-pxa27x.h b/include/linux/platform_data/usb-ohci-pxa27x.h
index 95b6e2a6e514..69adea7694f3 100644
--- a/include/linux/platform_data/usb-ohci-pxa27x.h
+++ b/include/linux/platform_data/usb-ohci-pxa27x.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef ASMARM_ARCH_OHCI_H
#define ASMARM_ARCH_OHCI_H
diff --git a/include/linux/platform_data/usb-ohci-s3c2410.h b/include/linux/platform_data/usb-ohci-s3c2410.h
index cc7554ae6e8b..558a9605be78 100644
--- a/include/linux/platform_data/usb-ohci-s3c2410.h
+++ b/include/linux/platform_data/usb-ohci-s3c2410.h
@@ -1,13 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/* arch/arm/plat-samsung/include/plat/usb-control.h
*
* Copyright (c) 2004 Simtec Electronics
* Ben Dooks <ben@simtec.co.uk>
*
* S3C - USB host port information
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef __ASM_ARCH_USBCONTROL_H
diff --git a/include/linux/platform_data/usb-omap.h b/include/linux/platform_data/usb-omap.h
index fa579b4c666b..580978e468f8 100644
--- a/include/linux/platform_data/usb-omap.h
+++ b/include/linux/platform_data/usb-omap.h
@@ -1,22 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* usb-omap.h - Platform data for the various OMAP USB IPs
*
- * Copyright (C) 2012 Texas Instruments Incorporated - http://www.ti.com
- *
- * This software is distributed under the terms of the GNU General Public
- * License ("GPL") version 2, as published by the Free Software Foundation.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
+ * Copyright (C) 2012 Texas Instruments Incorporated - https://www.ti.com
*/
#define OMAP3_HS_USB_PORTS 3
diff --git a/include/linux/platform_data/usb-omap1.h b/include/linux/platform_data/usb-omap1.h
index 43b5ce139c37..e7b8dc92a269 100644
--- a/include/linux/platform_data/usb-omap1.h
+++ b/include/linux/platform_data/usb-omap1.h
@@ -48,6 +48,10 @@ struct omap_usb_config {
u32 (*usb2_init)(unsigned nwires, unsigned alt_pingroup);
int (*ocpi_enable)(void);
+
+ void (*lb_reset)(void);
+
+ int (*transceiver_power)(int on);
};
#endif /* __LINUX_USB_OMAP1_H */
diff --git a/include/linux/platform_data/usb-pxa3xx-ulpi.h b/include/linux/platform_data/usb-pxa3xx-ulpi.h
deleted file mode 100644
index 9d82cb65ea56..000000000000
--- a/include/linux/platform_data/usb-pxa3xx-ulpi.h
+++ /dev/null
@@ -1,35 +0,0 @@
-/*
- * PXA3xx U2D header
- *
- * Copyright (C) 2010 CompuLab Ltd.
- *
- * Igor Grinberg <grinberg@compulab.co.il>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-#ifndef __PXA310_U2D__
-#define __PXA310_U2D__
-
-#include <linux/usb/ulpi.h>
-
-struct pxa3xx_u2d_platform_data {
-
-#define ULPI_SER_6PIN (1 << 0)
-#define ULPI_SER_3PIN (1 << 1)
- unsigned int ulpi_mode;
-
- int (*init)(struct device *);
- void (*exit)(struct device *);
-};
-
-
-/* Start PXA3xx U2D host */
-int pxa3xx_u2d_start_hc(struct usb_bus *host);
-/* Stop PXA3xx U2D host */
-void pxa3xx_u2d_stop_hc(struct usb_bus *host);
-
-extern void pxa3xx_set_u2d_info(struct pxa3xx_u2d_platform_data *info);
-
-#endif /* __PXA310_U2D__ */
diff --git a/include/linux/platform_data/usb-s3c2410_udc.h b/include/linux/platform_data/usb-s3c2410_udc.h
deleted file mode 100644
index de8e2288a509..000000000000
--- a/include/linux/platform_data/usb-s3c2410_udc.h
+++ /dev/null
@@ -1,44 +0,0 @@
-/* arch/arm/plat-samsung/include/plat/udc.h
- *
- * Copyright (c) 2005 Arnaud Patard <arnaud.patard@rtp-net.org>
- *
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- *
- * Changelog:
- * 14-Mar-2005 RTP Created file
- * 02-Aug-2005 RTP File rename
- * 07-Sep-2005 BJD Minor cleanups, changed cmd to enum
- * 18-Jan-2007 HMW Add per-platform vbus_draw function
-*/
-
-#ifndef __ASM_ARM_ARCH_UDC_H
-#define __ASM_ARM_ARCH_UDC_H
-
-enum s3c2410_udc_cmd_e {
- S3C2410_UDC_P_ENABLE = 1, /* Pull-up enable */
- S3C2410_UDC_P_DISABLE = 2, /* Pull-up disable */
- S3C2410_UDC_P_RESET = 3, /* UDC reset, in case of */
-};
-
-struct s3c2410_udc_mach_info {
- void (*udc_command)(enum s3c2410_udc_cmd_e);
- void (*vbus_draw)(unsigned int ma);
-
- unsigned int pullup_pin;
- unsigned int pullup_pin_inverted;
-
- unsigned int vbus_pin;
- unsigned char vbus_pin_inverted;
-};
-
-extern void __init s3c24xx_udc_set_platdata(struct s3c2410_udc_mach_info *);
-
-struct s3c24xx_hsudc_platdata;
-
-extern void __init s3c24xx_hsudc_set_platdata(struct s3c24xx_hsudc_platdata *pd);
-
-#endif /* __ASM_ARM_ARCH_UDC_H */
diff --git a/include/linux/platform_data/usb3503.h b/include/linux/platform_data/usb3503.h
index 1d1b6ef871f6..f3c942f396f8 100644
--- a/include/linux/platform_data/usb3503.h
+++ b/include/linux/platform_data/usb3503.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __USB3503_H__
#define __USB3503_H__
@@ -11,14 +12,12 @@ enum usb3503_mode {
USB3503_MODE_UNKNOWN,
USB3503_MODE_HUB,
USB3503_MODE_STANDBY,
+ USB3503_MODE_BYPASS,
};
struct usb3503_platform_data {
enum usb3503_mode initial_mode;
u8 port_off_mask;
- int gpio_intn;
- int gpio_connect;
- int gpio_reset;
};
#endif
diff --git a/include/linux/platform_data/ux500_wdt.h b/include/linux/platform_data/ux500_wdt.h
deleted file mode 100644
index 1689ff4c3bfd..000000000000
--- a/include/linux/platform_data/ux500_wdt.h
+++ /dev/null
@@ -1,19 +0,0 @@
-/*
- * Copyright (C) ST Ericsson SA 2011
- *
- * License Terms: GNU General Public License v2
- *
- * STE Ux500 Watchdog platform data
- */
-#ifndef __UX500_WDT_H
-#define __UX500_WDT_H
-
-/**
- * struct ux500_wdt_data
- */
-struct ux500_wdt_data {
- unsigned int timeout;
- bool has_28_bits_resolution;
-};
-
-#endif /* __UX500_WDT_H */
diff --git a/include/linux/platform_data/video-clcd-versatile.h b/include/linux/platform_data/video-clcd-versatile.h
deleted file mode 100644
index 09ccf182af4d..000000000000
--- a/include/linux/platform_data/video-clcd-versatile.h
+++ /dev/null
@@ -1,27 +0,0 @@
-#ifndef PLAT_CLCD_H
-#define PLAT_CLCD_H
-
-#ifdef CONFIG_PLAT_VERSATILE_CLCD
-struct clcd_panel *versatile_clcd_get_panel(const char *);
-int versatile_clcd_setup_dma(struct clcd_fb *, unsigned long);
-int versatile_clcd_mmap_dma(struct clcd_fb *, struct vm_area_struct *);
-void versatile_clcd_remove_dma(struct clcd_fb *);
-#else
-static inline struct clcd_panel *versatile_clcd_get_panel(const char *s)
-{
- return NULL;
-}
-static inline int versatile_clcd_setup_dma(struct clcd_fb *fb, unsigned long framesize)
-{
- return -ENODEV;
-}
-static inline int versatile_clcd_mmap_dma(struct clcd_fb *fb, struct vm_area_struct *vm)
-{
- return -ENODEV;
-}
-static inline void versatile_clcd_remove_dma(struct clcd_fb *fb)
-{
-}
-#endif
-
-#endif
diff --git a/include/linux/platform_data/video-ep93xx.h b/include/linux/platform_data/video-ep93xx.h
index 699ac4109366..a6f3ccdec1cb 100644
--- a/include/linux/platform_data/video-ep93xx.h
+++ b/include/linux/platform_data/video-ep93xx.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __VIDEO_EP93XX_H
#define __VIDEO_EP93XX_H
diff --git a/include/linux/platform_data/video-imxfb.h b/include/linux/platform_data/video-imxfb.h
deleted file mode 100644
index cf9348b376ac..000000000000
--- a/include/linux/platform_data/video-imxfb.h
+++ /dev/null
@@ -1,69 +0,0 @@
-/*
- * This structure describes the machine which we are running on.
- */
-#ifndef __MACH_IMXFB_H__
-#define __MACH_IMXFB_H__
-
-#include <linux/fb.h>
-
-#define PCR_TFT (1 << 31)
-#define PCR_COLOR (1 << 30)
-#define PCR_PBSIZ_1 (0 << 28)
-#define PCR_PBSIZ_2 (1 << 28)
-#define PCR_PBSIZ_4 (2 << 28)
-#define PCR_PBSIZ_8 (3 << 28)
-#define PCR_BPIX_1 (0 << 25)
-#define PCR_BPIX_2 (1 << 25)
-#define PCR_BPIX_4 (2 << 25)
-#define PCR_BPIX_8 (3 << 25)
-#define PCR_BPIX_12 (4 << 25)
-#define PCR_BPIX_16 (5 << 25)
-#define PCR_BPIX_18 (6 << 25)
-#define PCR_PIXPOL (1 << 24)
-#define PCR_FLMPOL (1 << 23)
-#define PCR_LPPOL (1 << 22)
-#define PCR_CLKPOL (1 << 21)
-#define PCR_OEPOL (1 << 20)
-#define PCR_SCLKIDLE (1 << 19)
-#define PCR_END_SEL (1 << 18)
-#define PCR_END_BYTE_SWAP (1 << 17)
-#define PCR_REV_VS (1 << 16)
-#define PCR_ACD_SEL (1 << 15)
-#define PCR_ACD(x) (((x) & 0x7f) << 8)
-#define PCR_SCLK_SEL (1 << 7)
-#define PCR_SHARP (1 << 6)
-#define PCR_PCD(x) ((x) & 0x3f)
-
-#define PWMR_CLS(x) (((x) & 0x1ff) << 16)
-#define PWMR_LDMSK (1 << 15)
-#define PWMR_SCR1 (1 << 10)
-#define PWMR_SCR0 (1 << 9)
-#define PWMR_CC_EN (1 << 8)
-#define PWMR_PW(x) ((x) & 0xff)
-
-#define LSCR1_PS_RISE_DELAY(x) (((x) & 0x7f) << 26)
-#define LSCR1_CLS_RISE_DELAY(x) (((x) & 0x3f) << 16)
-#define LSCR1_REV_TOGGLE_DELAY(x) (((x) & 0xf) << 8)
-#define LSCR1_GRAY2(x) (((x) & 0xf) << 4)
-#define LSCR1_GRAY1(x) (((x) & 0xf))
-
-struct imx_fb_videomode {
- struct fb_videomode mode;
- u32 pcr;
- bool aus_mode;
- unsigned char bpp;
-};
-
-struct imx_fb_platform_data {
- struct imx_fb_videomode *mode;
- int num_modes;
-
- u_int pwmr;
- u_int lscr1;
- u_int dmacr;
-
- int (*init)(struct platform_device *);
- void (*exit)(struct platform_device *);
-};
-
-#endif /* ifndef __MACH_IMXFB_H__ */
diff --git a/include/linux/platform_data/video-mx3fb.h b/include/linux/platform_data/video-mx3fb.h
index fdbe60001542..d03dc322a616 100644
--- a/include/linux/platform_data/video-mx3fb.h
+++ b/include/linux/platform_data/video-mx3fb.h
@@ -1,10 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2008
* Guennadi Liakhovetski, DENX Software Engineering, <lg@denx.de>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef __ASM_ARCH_MX3FB_H__
diff --git a/include/linux/platform_data/video-nuc900fb.h b/include/linux/platform_data/video-nuc900fb.h
deleted file mode 100644
index cec5ece765ed..000000000000
--- a/include/linux/platform_data/video-nuc900fb.h
+++ /dev/null
@@ -1,83 +0,0 @@
-/* linux/include/asm/arch-nuc900/fb.h
- *
- * Copyright (c) 2008 Nuvoton technology corporation
- * All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * Changelog:
- *
- * 2008/08/26 vincen.zswan modify this file for LCD.
- */
-
-#ifndef __ASM_ARM_FB_H
-#define __ASM_ARM_FB_H
-
-
-
-/* LCD Controller Hardware Desc */
-struct nuc900fb_hw {
- unsigned int lcd_dccs;
- unsigned int lcd_device_ctrl;
- unsigned int lcd_mpulcd_cmd;
- unsigned int lcd_int_cs;
- unsigned int lcd_crtc_size;
- unsigned int lcd_crtc_dend;
- unsigned int lcd_crtc_hr;
- unsigned int lcd_crtc_hsync;
- unsigned int lcd_crtc_vr;
- unsigned int lcd_va_baddr0;
- unsigned int lcd_va_baddr1;
- unsigned int lcd_va_fbctrl;
- unsigned int lcd_va_scale;
- unsigned int lcd_va_test;
- unsigned int lcd_va_win;
- unsigned int lcd_va_stuff;
-};
-
-/* LCD Display Description */
-struct nuc900fb_display {
- /* LCD Image type */
- unsigned type;
-
- /* LCD Screen Size */
- unsigned short width;
- unsigned short height;
-
- /* LCD Screen Info */
- unsigned short xres;
- unsigned short yres;
- unsigned short bpp;
-
- unsigned long pixclock;
- unsigned short left_margin;
- unsigned short right_margin;
- unsigned short hsync_len;
- unsigned short upper_margin;
- unsigned short lower_margin;
- unsigned short vsync_len;
-
- /* hardware special register value */
- unsigned int dccs;
- unsigned int devctl;
- unsigned int fbctrl;
- unsigned int scale;
-};
-
-struct nuc900fb_mach_info {
- struct nuc900fb_display *displays;
- unsigned num_displays;
- unsigned default_display;
- /* GPIO Setting Info */
- unsigned gpio_dir;
- unsigned gpio_dir_mask;
- unsigned gpio_data;
- unsigned gpio_data_mask;
-};
-
-extern void __init nuc900_fb_set_platdata(struct nuc900fb_mach_info *);
-
-#endif /* __ASM_ARM_FB_H */
diff --git a/include/linux/platform_data/video-pxafb.h b/include/linux/platform_data/video-pxafb.h
index 07c6c1e153f8..6333bac166a5 100644
--- a/include/linux/platform_data/video-pxafb.h
+++ b/include/linux/platform_data/video-pxafb.h
@@ -1,17 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Support for the xscale frame buffer.
*
* Author: Jean-Frederic Clere
* Created: Sep 22, 2003
* Copyright: jfclere@sinix.net
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/fb.h>
-#include <mach/regs-lcd.h>
/*
* Supported LCD connections
@@ -156,6 +152,27 @@ struct pxafb_mach_info {
void pxa_set_fb_info(struct device *, struct pxafb_mach_info *);
unsigned long pxafb_get_hsync_time(struct device *dev);
+/* smartpanel related */
+#define SMART_CMD_A0 (0x1 << 8)
+#define SMART_CMD_READ_STATUS_REG (0x0 << 9)
+#define SMART_CMD_READ_FRAME_BUFFER ((0x0 << 9) | SMART_CMD_A0)
+#define SMART_CMD_WRITE_COMMAND (0x1 << 9)
+#define SMART_CMD_WRITE_DATA ((0x1 << 9) | SMART_CMD_A0)
+#define SMART_CMD_WRITE_FRAME ((0x2 << 9) | SMART_CMD_A0)
+#define SMART_CMD_WAIT_FOR_VSYNC (0x3 << 9)
+#define SMART_CMD_NOOP (0x4 << 9)
+#define SMART_CMD_INTERRUPT (0x5 << 9)
+
+#define SMART_CMD(x) (SMART_CMD_WRITE_COMMAND | ((x) & 0xff))
+#define SMART_DAT(x) (SMART_CMD_WRITE_DATA | ((x) & 0xff))
+
+/* SMART_DELAY() is introduced for software controlled delay primitive which
+ * can be inserted between command sequences, unused command 0x6 is used here
+ * and delay ranges from 0ms ~ 255ms
+ */
+#define SMART_CMD_DELAY (0x6 << 9)
+#define SMART_DELAY(ms) (SMART_CMD_DELAY | ((ms) & 0xff))
+
#ifdef CONFIG_FB_PXA_SMARTPANEL
extern int pxafb_smart_queue(struct fb_info *info, uint16_t *cmds, int);
extern int pxafb_smart_flush(struct fb_info *info);
diff --git a/include/linux/platform_data/video_s3c.h b/include/linux/platform_data/video_s3c.h
index 48883995f47f..dd7747ba3269 100644
--- a/include/linux/platform_data/video_s3c.h
+++ b/include/linux/platform_data/video_s3c.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __PLATFORM_DATA_VIDEO_S3C
#define __PLATFORM_DATA_VIDEO_S3C
diff --git a/include/linux/platform_data/voltage-omap.h b/include/linux/platform_data/voltage-omap.h
index 5be4d5def427..6d74e507dbd2 100644
--- a/include/linux/platform_data/voltage-omap.h
+++ b/include/linux/platform_data/voltage-omap.h
@@ -1,11 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* OMAP Voltage Management Routines
*
* Copyright (C) 2011, Texas Instruments, Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef __ARCH_ARM_OMAP_VOLTAGE_H
@@ -32,7 +29,6 @@ struct omap_volt_data {
struct voltagedomain;
struct voltagedomain *voltdm_lookup(const char *name);
-int voltdm_scale(struct voltagedomain *voltdm, unsigned long target_volt);
unsigned long voltdm_get_voltage(struct voltagedomain *voltdm);
struct omap_volt_data *omap_voltage_get_voltdata(struct voltagedomain *voltdm,
unsigned long volt);
diff --git a/include/linux/platform_data/wilco-ec.h b/include/linux/platform_data/wilco-ec.h
new file mode 100644
index 000000000000..3e268e636b5b
--- /dev/null
+++ b/include/linux/platform_data/wilco-ec.h
@@ -0,0 +1,225 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * ChromeOS Wilco Embedded Controller
+ *
+ * Copyright 2018 Google LLC
+ */
+
+#ifndef WILCO_EC_H
+#define WILCO_EC_H
+
+#include <linux/mutex.h>
+#include <linux/types.h>
+
+/* Message flags for using the mailbox() interface */
+#define WILCO_EC_FLAG_NO_RESPONSE BIT(0) /* EC does not respond */
+
+/* Normal commands have a maximum 32 bytes of data */
+#define EC_MAILBOX_DATA_SIZE 32
+
+struct device;
+struct resource;
+struct platform_device;
+
+/**
+ * struct wilco_ec_device - Wilco Embedded Controller handle.
+ * @dev: Device handle.
+ * @mailbox_lock: Mutex to ensure one mailbox command at a time.
+ * @io_command: I/O port for mailbox command. Provided by ACPI.
+ * @io_data: I/O port for mailbox data. Provided by ACPI.
+ * @io_packet: I/O port for mailbox packet data. Provided by ACPI.
+ * @data_buffer: Buffer used for EC communication. The same buffer
+ * is used to hold the request and the response.
+ * @data_size: Size of the data buffer used for EC communication.
+ * @debugfs_pdev: The child platform_device used by the debugfs sub-driver.
+ * @rtc_pdev: The child platform_device used by the RTC sub-driver.
+ * @charger_pdev: Child platform_device used by the charger config sub-driver.
+ * @telem_pdev: The child platform_device used by the telemetry sub-driver.
+ */
+struct wilco_ec_device {
+ struct device *dev;
+ struct mutex mailbox_lock;
+ struct resource *io_command;
+ struct resource *io_data;
+ struct resource *io_packet;
+ void *data_buffer;
+ size_t data_size;
+ struct platform_device *debugfs_pdev;
+ struct platform_device *rtc_pdev;
+ struct platform_device *charger_pdev;
+ struct platform_device *telem_pdev;
+};
+
+/**
+ * struct wilco_ec_request - Mailbox request message format.
+ * @struct_version: Should be %EC_MAILBOX_PROTO_VERSION
+ * @checksum: Sum of all bytes must be 0.
+ * @mailbox_id: Mailbox identifier, specifies the command set.
+ * @mailbox_version: Mailbox interface version %EC_MAILBOX_VERSION
+ * @reserved: Set to zero.
+ * @data_size: Length of following data.
+ */
+struct wilco_ec_request {
+ u8 struct_version;
+ u8 checksum;
+ u16 mailbox_id;
+ u8 mailbox_version;
+ u8 reserved;
+ u16 data_size;
+} __packed;
+
+/**
+ * struct wilco_ec_response - Mailbox response message format.
+ * @struct_version: Should be %EC_MAILBOX_PROTO_VERSION
+ * @checksum: Sum of all bytes must be 0.
+ * @result: Result code from the EC. Non-zero indicates an error.
+ * @data_size: Length of the response data buffer.
+ * @reserved: Set to zero.
+ * @data: Response data buffer. Max size is %EC_MAILBOX_DATA_SIZE_EXTENDED.
+ */
+struct wilco_ec_response {
+ u8 struct_version;
+ u8 checksum;
+ u16 result;
+ u16 data_size;
+ u8 reserved[2];
+ u8 data[];
+} __packed;
+
+/**
+ * enum wilco_ec_msg_type - Message type to select a set of command codes.
+ * @WILCO_EC_MSG_LEGACY: Legacy EC messages for standard EC behavior.
+ * @WILCO_EC_MSG_PROPERTY: Get/Set/Sync EC controlled NVRAM property.
+ * @WILCO_EC_MSG_TELEMETRY: Request telemetry data from the EC.
+ */
+enum wilco_ec_msg_type {
+ WILCO_EC_MSG_LEGACY = 0x00f0,
+ WILCO_EC_MSG_PROPERTY = 0x00f2,
+ WILCO_EC_MSG_TELEMETRY = 0x00f5,
+};
+
+/**
+ * struct wilco_ec_message - Request and response message.
+ * @type: Mailbox message type.
+ * @flags: Message flags, e.g. %WILCO_EC_FLAG_NO_RESPONSE.
+ * @request_size: Number of bytes to send to the EC.
+ * @request_data: Buffer containing the request data.
+ * @response_size: Number of bytes to read from EC.
+ * @response_data: Buffer containing the response data, should be
+ * response_size bytes and allocated by caller.
+ */
+struct wilco_ec_message {
+ enum wilco_ec_msg_type type;
+ u8 flags;
+ size_t request_size;
+ void *request_data;
+ size_t response_size;
+ void *response_data;
+};
+
+/**
+ * wilco_ec_mailbox() - Send request to the EC and receive the response.
+ * @ec: Wilco EC device.
+ * @msg: Wilco EC message.
+ *
+ * Return: Number of bytes received or negative error code on failure.
+ */
+int wilco_ec_mailbox(struct wilco_ec_device *ec, struct wilco_ec_message *msg);
+
+/**
+ * wilco_keyboard_leds_init() - Set up the keyboard backlight LEDs.
+ * @ec: EC device to query.
+ *
+ * After this call, the keyboard backlight will be exposed through a an LED
+ * device at /sys/class/leds.
+ *
+ * This may sleep because it uses wilco_ec_mailbox().
+ *
+ * Return: 0 on success, negative error code on failure.
+ */
+int wilco_keyboard_leds_init(struct wilco_ec_device *ec);
+
+/*
+ * A Property is typically a data item that is stored to NVRAM
+ * by the EC. Each of these data items has an index associated
+ * with it, known as the Property ID (PID). Properties may have
+ * variable lengths, up to a max of WILCO_EC_PROPERTY_MAX_SIZE
+ * bytes. Properties can be simple integers, or they may be more
+ * complex binary data.
+ */
+
+#define WILCO_EC_PROPERTY_MAX_SIZE 4
+
+/**
+ * struct ec_property_set_msg - Message to get or set a property.
+ * @property_id: Which property to get or set.
+ * @length: Number of bytes of |data| that are used.
+ * @data: Actual property data.
+ */
+struct wilco_ec_property_msg {
+ u32 property_id;
+ int length;
+ u8 data[WILCO_EC_PROPERTY_MAX_SIZE];
+};
+
+/**
+ * wilco_ec_get_property() - Retrieve a property from the EC.
+ * @ec: Embedded Controller device.
+ * @prop_msg: Message for request and response.
+ *
+ * The property_id field of |prop_msg| should be filled before calling this
+ * function. The result will be stored in the data and length fields.
+ *
+ * Return: 0 on success, negative error code on failure.
+ */
+int wilco_ec_get_property(struct wilco_ec_device *ec,
+ struct wilco_ec_property_msg *prop_msg);
+
+/**
+ * wilco_ec_set_property() - Store a property on the EC.
+ * @ec: Embedded Controller device.
+ * @prop_msg: Message for request and response.
+ *
+ * The property_id, length, and data fields of |prop_msg| should be
+ * filled before calling this function.
+ *
+ * Return: 0 on success, negative error code on failure.
+ */
+int wilco_ec_set_property(struct wilco_ec_device *ec,
+ struct wilco_ec_property_msg *prop_msg);
+
+/**
+ * wilco_ec_get_byte_property() - Retrieve a byte-size property from the EC.
+ * @ec: Embedded Controller device.
+ * @property_id: Which property to retrieve.
+ * @val: The result value, will be filled by this function.
+ *
+ * Return: 0 on success, negative error code on failure.
+ */
+int wilco_ec_get_byte_property(struct wilco_ec_device *ec, u32 property_id,
+ u8 *val);
+
+/**
+ * wilco_ec_get_byte_property() - Store a byte-size property on the EC.
+ * @ec: Embedded Controller device.
+ * @property_id: Which property to store.
+ * @val: Value to store.
+ *
+ * Return: 0 on success, negative error code on failure.
+ */
+int wilco_ec_set_byte_property(struct wilco_ec_device *ec, u32 property_id,
+ u8 val);
+
+/**
+ * wilco_ec_add_sysfs() - Create sysfs entries
+ * @ec: Wilco EC device
+ *
+ * wilco_ec_remove_sysfs() needs to be called afterwards
+ * to perform the necessary cleanup.
+ *
+ * Return: 0 on success or negative error code on failure.
+ */
+int wilco_ec_add_sysfs(struct wilco_ec_device *ec);
+void wilco_ec_remove_sysfs(struct wilco_ec_device *ec);
+
+#endif /* WILCO_EC_H */
diff --git a/include/linux/platform_data/wiznet.h b/include/linux/platform_data/wiznet.h
index b5d8c192d84d..1154c4db8a13 100644
--- a/include/linux/platform_data/wiznet.h
+++ b/include/linux/platform_data/wiznet.h
@@ -1,7 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Ethernet driver for the WIZnet W5x00 chip.
- *
- * Licensed under the GPL-2 or later.
*/
#ifndef PLATFORM_DATA_WIZNET_H
diff --git a/include/linux/platform_data/wkup_m3.h b/include/linux/platform_data/wkup_m3.h
index 3f1d77effd71..629660ff5806 100644
--- a/include/linux/platform_data/wkup_m3.h
+++ b/include/linux/platform_data/wkup_m3.h
@@ -1,18 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* TI Wakeup M3 remote processor platform data
*
* Copyright (C) 2014-2015 Texas Instruments, Inc.
*
* Dave Gerlach <d-gerlach@ti.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#ifndef _LINUX_PLATFORM_DATA_WKUP_M3_H
diff --git a/include/linux/platform_data/x86/apple.h b/include/linux/platform_data/x86/apple.h
new file mode 100644
index 000000000000..079e816c3c21
--- /dev/null
+++ b/include/linux/platform_data/x86/apple.h
@@ -0,0 +1,13 @@
+#ifndef PLATFORM_DATA_X86_APPLE_H
+#define PLATFORM_DATA_X86_APPLE_H
+
+#ifdef CONFIG_X86
+/**
+ * x86_apple_machine - whether the machine is an x86 Apple Macintosh
+ */
+extern bool x86_apple_machine;
+#else
+#define x86_apple_machine false
+#endif
+
+#endif
diff --git a/include/linux/platform_data/x86/asus-wmi.h b/include/linux/platform_data/x86/asus-wmi.h
new file mode 100644
index 000000000000..28234dc9fa6a
--- /dev/null
+++ b/include/linux/platform_data/x86/asus-wmi.h
@@ -0,0 +1,133 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __PLATFORM_DATA_X86_ASUS_WMI_H
+#define __PLATFORM_DATA_X86_ASUS_WMI_H
+
+#include <linux/errno.h>
+#include <linux/types.h>
+
+/* WMI Methods */
+#define ASUS_WMI_METHODID_SPEC 0x43455053 /* BIOS SPECification */
+#define ASUS_WMI_METHODID_SFBD 0x44424653 /* Set First Boot Device */
+#define ASUS_WMI_METHODID_GLCD 0x44434C47 /* Get LCD status */
+#define ASUS_WMI_METHODID_GPID 0x44495047 /* Get Panel ID?? (Resol) */
+#define ASUS_WMI_METHODID_QMOD 0x444F4D51 /* Quiet MODe */
+#define ASUS_WMI_METHODID_SPLV 0x4C425053 /* Set Panel Light Value */
+#define ASUS_WMI_METHODID_AGFN 0x4E464741 /* Atk Generic FuNction */
+#define ASUS_WMI_METHODID_SFUN 0x4E554653 /* FUNCtionalities */
+#define ASUS_WMI_METHODID_SDSP 0x50534453 /* Set DiSPlay output */
+#define ASUS_WMI_METHODID_GDSP 0x50534447 /* Get DiSPlay output */
+#define ASUS_WMI_METHODID_DEVP 0x50564544 /* DEVice Policy */
+#define ASUS_WMI_METHODID_OSVR 0x5256534F /* OS VeRsion */
+#define ASUS_WMI_METHODID_DCTS 0x53544344 /* Device status (DCTS) */
+#define ASUS_WMI_METHODID_DSTS 0x53545344 /* Device status (DSTS) */
+#define ASUS_WMI_METHODID_BSTS 0x53545342 /* Bios STatuS ? */
+#define ASUS_WMI_METHODID_DEVS 0x53564544 /* DEVice Set */
+#define ASUS_WMI_METHODID_CFVS 0x53564643 /* CPU Frequency Volt Set */
+#define ASUS_WMI_METHODID_KBFT 0x5446424B /* KeyBoard FilTer */
+#define ASUS_WMI_METHODID_INIT 0x54494E49 /* INITialize */
+#define ASUS_WMI_METHODID_HKEY 0x59454B48 /* Hot KEY ?? */
+
+#define ASUS_WMI_UNSUPPORTED_METHOD 0xFFFFFFFE
+
+/* Wireless */
+#define ASUS_WMI_DEVID_HW_SWITCH 0x00010001
+#define ASUS_WMI_DEVID_WIRELESS_LED 0x00010002
+#define ASUS_WMI_DEVID_CWAP 0x00010003
+#define ASUS_WMI_DEVID_WLAN 0x00010011
+#define ASUS_WMI_DEVID_WLAN_LED 0x00010012
+#define ASUS_WMI_DEVID_BLUETOOTH 0x00010013
+#define ASUS_WMI_DEVID_GPS 0x00010015
+#define ASUS_WMI_DEVID_WIMAX 0x00010017
+#define ASUS_WMI_DEVID_WWAN3G 0x00010019
+#define ASUS_WMI_DEVID_UWB 0x00010021
+
+/* Leds */
+/* 0x000200XX and 0x000400XX */
+#define ASUS_WMI_DEVID_LED1 0x00020011
+#define ASUS_WMI_DEVID_LED2 0x00020012
+#define ASUS_WMI_DEVID_LED3 0x00020013
+#define ASUS_WMI_DEVID_LED4 0x00020014
+#define ASUS_WMI_DEVID_LED5 0x00020015
+#define ASUS_WMI_DEVID_LED6 0x00020016
+#define ASUS_WMI_DEVID_MICMUTE_LED 0x00040017
+
+/* Backlight and Brightness */
+#define ASUS_WMI_DEVID_ALS_ENABLE 0x00050001 /* Ambient Light Sensor */
+#define ASUS_WMI_DEVID_BACKLIGHT 0x00050011
+#define ASUS_WMI_DEVID_BRIGHTNESS 0x00050012
+#define ASUS_WMI_DEVID_KBD_BACKLIGHT 0x00050021
+#define ASUS_WMI_DEVID_LIGHT_SENSOR 0x00050022 /* ?? */
+#define ASUS_WMI_DEVID_LIGHTBAR 0x00050025
+#define ASUS_WMI_DEVID_FAN_BOOST_MODE 0x00110018
+#define ASUS_WMI_DEVID_THROTTLE_THERMAL_POLICY 0x00120075
+
+/* Misc */
+#define ASUS_WMI_DEVID_PANEL_OD 0x00050019
+#define ASUS_WMI_DEVID_CAMERA 0x00060013
+#define ASUS_WMI_DEVID_LID_FLIP 0x00060062
+#define ASUS_WMI_DEVID_LID_FLIP_ROG 0x00060077
+
+/* Storage */
+#define ASUS_WMI_DEVID_CARDREADER 0x00080013
+
+/* Input */
+#define ASUS_WMI_DEVID_TOUCHPAD 0x00100011
+#define ASUS_WMI_DEVID_TOUCHPAD_LED 0x00100012
+#define ASUS_WMI_DEVID_FNLOCK 0x00100023
+
+/* Fan, Thermal */
+#define ASUS_WMI_DEVID_THERMAL_CTRL 0x00110011
+#define ASUS_WMI_DEVID_FAN_CTRL 0x00110012 /* deprecated */
+#define ASUS_WMI_DEVID_CPU_FAN_CTRL 0x00110013
+#define ASUS_WMI_DEVID_GPU_FAN_CTRL 0x00110014
+#define ASUS_WMI_DEVID_CPU_FAN_CURVE 0x00110024
+#define ASUS_WMI_DEVID_GPU_FAN_CURVE 0x00110025
+
+/* Power */
+#define ASUS_WMI_DEVID_PROCESSOR_STATE 0x00120012
+
+/* Deep S3 / Resume on LID open */
+#define ASUS_WMI_DEVID_LID_RESUME 0x00120031
+
+/* Maximum charging percentage */
+#define ASUS_WMI_DEVID_RSOC 0x00120057
+
+/* Keyboard dock */
+#define ASUS_WMI_DEVID_KBD_DOCK 0x00120063
+
+/* dgpu on/off */
+#define ASUS_WMI_DEVID_EGPU 0x00090019
+
+/* dgpu on/off */
+#define ASUS_WMI_DEVID_DGPU 0x00090020
+
+/* gpu mux switch, 0 = dGPU, 1 = Optimus */
+#define ASUS_WMI_DEVID_GPU_MUX 0x00090016
+
+/* TUF laptop RGB modes/colours */
+#define ASUS_WMI_DEVID_TUF_RGB_MODE 0x00100056
+
+/* TUF laptop RGB power/state */
+#define ASUS_WMI_DEVID_TUF_RGB_STATE 0x00100057
+
+/* DSTS masks */
+#define ASUS_WMI_DSTS_STATUS_BIT 0x00000001
+#define ASUS_WMI_DSTS_UNKNOWN_BIT 0x00000002
+#define ASUS_WMI_DSTS_PRESENCE_BIT 0x00010000
+#define ASUS_WMI_DSTS_USER_BIT 0x00020000
+#define ASUS_WMI_DSTS_BIOS_BIT 0x00040000
+#define ASUS_WMI_DSTS_BRIGHTNESS_MASK 0x000000FF
+#define ASUS_WMI_DSTS_MAX_BRIGTH_MASK 0x0000FF00
+#define ASUS_WMI_DSTS_LIGHTBAR_MASK 0x0000000F
+
+#if IS_REACHABLE(CONFIG_ASUS_WMI)
+int asus_wmi_evaluate_method(u32 method_id, u32 arg0, u32 arg1, u32 *retval);
+#else
+static inline int asus_wmi_evaluate_method(u32 method_id, u32 arg0, u32 arg1,
+ u32 *retval)
+{
+ return -ENODEV;
+}
+#endif
+
+#endif /* __PLATFORM_DATA_X86_ASUS_WMI_H */
diff --git a/include/linux/platform_data/clk-lpss.h b/include/linux/platform_data/x86/clk-lpss.h
index 23901992b9dd..41df326583f9 100644
--- a/include/linux/platform_data/clk-lpss.h
+++ b/include/linux/platform_data/x86/clk-lpss.h
@@ -1,13 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Intel Low Power Subsystem clocks.
*
* Copyright (C) 2013, Intel Corporation
* Authors: Mika Westerberg <mika.westerberg@linux.intel.com>
* Rafael J. Wysocki <rafael.j.wysocki@intel.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef __CLK_LPSS_H
@@ -18,6 +15,6 @@ struct lpss_clk_data {
struct clk *clk;
};
-extern int lpt_clk_init(void);
+extern int lpss_atom_clk_init(void);
#endif /* __CLK_LPSS_H */
diff --git a/include/linux/platform_data/x86/clk-pmc-atom.h b/include/linux/platform_data/x86/clk-pmc-atom.h
index 3ab892208343..2bdcf39b13ed 100644
--- a/include/linux/platform_data/x86/clk-pmc-atom.h
+++ b/include/linux/platform_data/x86/clk-pmc-atom.h
@@ -1,17 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Intel Atom platform clocks for BayTrail and CherryTrail SoC.
*
* Copyright (C) 2016, Intel Corporation
* Author: Irina Tirdea <irina.tirdea@intel.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
*/
#ifndef __PLATFORM_DATA_X86_CLK_PMC_ATOM_H
@@ -35,10 +27,13 @@ struct pmc_clk {
*
* @base: PMC clock register base offset
* @clks: pointer to set of registered clocks, typically 0..5
+ * @critical: flag to indicate if firmware enabled pmc_plt_clks
+ * should be marked as critial or not
*/
struct pmc_clk_data {
void __iomem *base;
const struct pmc_clk *clks;
+ bool critical;
};
#endif /* __PLATFORM_DATA_X86_CLK_PMC_ATOM_H */
diff --git a/include/linux/platform_data/x86/nvidia-wmi-ec-backlight.h b/include/linux/platform_data/x86/nvidia-wmi-ec-backlight.h
new file mode 100644
index 000000000000..23d60130272c
--- /dev/null
+++ b/include/linux/platform_data/x86/nvidia-wmi-ec-backlight.h
@@ -0,0 +1,76 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
+ */
+
+#ifndef __PLATFORM_DATA_X86_NVIDIA_WMI_EC_BACKLIGHT_H
+#define __PLATFORM_DATA_X86_NVIDIA_WMI_EC_BACKLIGHT_H
+
+#define WMI_BRIGHTNESS_GUID "603E9613-EF25-4338-A3D0-C46177516DB7"
+
+/**
+ * enum wmi_brightness_method - WMI method IDs
+ * @WMI_BRIGHTNESS_METHOD_LEVEL: Get/Set EC brightness level status
+ * @WMI_BRIGHTNESS_METHOD_SOURCE: Get/Set EC Brightness Source
+ */
+enum wmi_brightness_method {
+ WMI_BRIGHTNESS_METHOD_LEVEL = 1,
+ WMI_BRIGHTNESS_METHOD_SOURCE = 2,
+ WMI_BRIGHTNESS_METHOD_MAX
+};
+
+/**
+ * enum wmi_brightness_mode - Operation mode for WMI-wrapped method
+ * @WMI_BRIGHTNESS_MODE_GET: Get the current brightness level/source.
+ * @WMI_BRIGHTNESS_MODE_SET: Set the brightness level.
+ * @WMI_BRIGHTNESS_MODE_GET_MAX_LEVEL: Get the maximum brightness level. This
+ * is only valid when the WMI method is
+ * %WMI_BRIGHTNESS_METHOD_LEVEL.
+ */
+enum wmi_brightness_mode {
+ WMI_BRIGHTNESS_MODE_GET = 0,
+ WMI_BRIGHTNESS_MODE_SET = 1,
+ WMI_BRIGHTNESS_MODE_GET_MAX_LEVEL = 2,
+ WMI_BRIGHTNESS_MODE_MAX
+};
+
+/**
+ * enum wmi_brightness_source - Backlight brightness control source selection
+ * @WMI_BRIGHTNESS_SOURCE_GPU: Backlight brightness is controlled by the GPU.
+ * @WMI_BRIGHTNESS_SOURCE_EC: Backlight brightness is controlled by the
+ * system's Embedded Controller (EC).
+ * @WMI_BRIGHTNESS_SOURCE_AUX: Backlight brightness is controlled over the
+ * DisplayPort AUX channel.
+ */
+enum wmi_brightness_source {
+ WMI_BRIGHTNESS_SOURCE_GPU = 1,
+ WMI_BRIGHTNESS_SOURCE_EC = 2,
+ WMI_BRIGHTNESS_SOURCE_AUX = 3,
+ WMI_BRIGHTNESS_SOURCE_MAX
+};
+
+/**
+ * struct wmi_brightness_args - arguments for the WMI-wrapped ACPI method
+ * @mode: Pass in an &enum wmi_brightness_mode value to select between
+ * getting or setting a value.
+ * @val: In parameter for value to set when using %WMI_BRIGHTNESS_MODE_SET
+ * mode. Not used in conjunction with %WMI_BRIGHTNESS_MODE_GET or
+ * %WMI_BRIGHTNESS_MODE_GET_MAX_LEVEL mode.
+ * @ret: Out parameter returning retrieved value when operating in
+ * %WMI_BRIGHTNESS_MODE_GET or %WMI_BRIGHTNESS_MODE_GET_MAX_LEVEL
+ * mode. Not used in %WMI_BRIGHTNESS_MODE_SET mode.
+ * @ignored: Padding; not used. The ACPI method expects a 24 byte params struct.
+ *
+ * This is the parameters structure for the WmiBrightnessNotify ACPI method as
+ * wrapped by WMI. The value passed in to @val or returned by @ret will be a
+ * brightness value when the WMI method ID is %WMI_BRIGHTNESS_METHOD_LEVEL, or
+ * an &enum wmi_brightness_source value with %WMI_BRIGHTNESS_METHOD_SOURCE.
+ */
+struct wmi_brightness_args {
+ u32 mode;
+ u32 val;
+ u32 ret;
+ u32 ignored[3];
+};
+
+#endif
diff --git a/include/linux/platform_data/x86/p2sb.h b/include/linux/platform_data/x86/p2sb.h
new file mode 100644
index 000000000000..a1d5fddc8f13
--- /dev/null
+++ b/include/linux/platform_data/x86/p2sb.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Primary to Sideband (P2SB) bridge access support
+ */
+
+#ifndef _PLATFORM_DATA_X86_P2SB_H
+#define _PLATFORM_DATA_X86_P2SB_H
+
+#include <linux/errno.h>
+#include <linux/kconfig.h>
+
+struct pci_bus;
+struct resource;
+
+#if IS_BUILTIN(CONFIG_P2SB)
+
+int p2sb_bar(struct pci_bus *bus, unsigned int devfn, struct resource *mem);
+
+#else /* CONFIG_P2SB */
+
+static inline int p2sb_bar(struct pci_bus *bus, unsigned int devfn, struct resource *mem)
+{
+ return -ENODEV;
+}
+
+#endif /* CONFIG_P2SB is not set */
+
+#endif /* _PLATFORM_DATA_X86_P2SB_H */
diff --git a/include/linux/platform_data/x86/pmc_atom.h b/include/linux/platform_data/x86/pmc_atom.h
index e4905fe69c38..b8a701c77fd0 100644
--- a/include/linux/platform_data/x86/pmc_atom.h
+++ b/include/linux/platform_data/x86/pmc_atom.h
@@ -1,21 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * Intel Atom SOC Power Management Controller Header File
- * Copyright (c) 2014, Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
+ * Intel Atom SoC Power Management Controller Header File
+ * Copyright (c) 2014-2015,2022 Intel Corporation.
*/
#ifndef PMC_ATOM_H
#define PMC_ATOM_H
+#include <linux/bits.h>
+
/* ValleyView Power Control Unit PCI Device ID */
#define PCI_DEVICE_ID_VLV_PMC 0x0F1C
/* CherryTrail Power Control Unit PCI Device ID */
@@ -56,7 +49,7 @@
#define PMC_S0I2_TMR 0x88
#define PMC_S0I3_TMR 0x8C
#define PMC_S0_TMR 0x90
-/* Sleep state counter is in units of of 32us */
+/* Sleep state counter is in units of 32us */
#define PMC_TMR_SHIFT 5
/* Power status of power islands */
@@ -148,11 +141,10 @@
#define ACPI_MMIO_REG_LEN 0x100
#define PM1_CNT 0x4
-#define SLEEP_TYPE_MASK 0xFFFFECFF
+#define SLEEP_TYPE_MASK GENMASK(12, 10)
#define SLEEP_TYPE_S5 0x1C00
-#define SLEEP_ENABLE 0x2000
+#define SLEEP_ENABLE BIT(13)
extern int pmc_atom_read(int offset, u32 *value);
-extern int pmc_atom_write(int offset, u32 value);
#endif /* PMC_ATOM_H */
diff --git a/include/linux/platform_data/x86/pwm-lpss.h b/include/linux/platform_data/x86/pwm-lpss.h
new file mode 100644
index 000000000000..c852fe24fe2a
--- /dev/null
+++ b/include/linux/platform_data/x86/pwm-lpss.h
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Intel Low Power Subsystem PWM controller driver */
+
+#ifndef __PLATFORM_DATA_X86_PWM_LPSS_H
+#define __PLATFORM_DATA_X86_PWM_LPSS_H
+
+#include <linux/types.h>
+
+struct device;
+
+struct pwm_lpss_chip;
+
+struct pwm_lpss_boardinfo {
+ unsigned long clk_rate;
+ unsigned int npwm;
+ unsigned long base_unit_bits;
+ /*
+ * Some versions of the IP may stuck in the state machine if enable
+ * bit is not set, and hence update bit will show busy status till
+ * the reset. For the rest it may be otherwise.
+ */
+ bool bypass;
+ /*
+ * On some devices the _PS0/_PS3 AML code of the GPU (GFX0) device
+ * messes with the PWM0 controllers state,
+ */
+ bool other_devices_aml_touches_pwm_regs;
+};
+
+struct pwm_lpss_chip *devm_pwm_lpss_probe(struct device *dev, void __iomem *base,
+ const struct pwm_lpss_boardinfo *info);
+
+#endif /* __PLATFORM_DATA_X86_PWM_LPSS_H */
diff --git a/include/linux/platform_data/x86/simatic-ipc-base.h b/include/linux/platform_data/x86/simatic-ipc-base.h
new file mode 100644
index 000000000000..57d6a10dfc9e
--- /dev/null
+++ b/include/linux/platform_data/x86/simatic-ipc-base.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Siemens SIMATIC IPC drivers
+ *
+ * Copyright (c) Siemens AG, 2018-2021
+ *
+ * Authors:
+ * Henning Schild <henning.schild@siemens.com>
+ * Gerd Haeussler <gerd.haeussler.ext@siemens.com>
+ */
+
+#ifndef __PLATFORM_DATA_X86_SIMATIC_IPC_BASE_H
+#define __PLATFORM_DATA_X86_SIMATIC_IPC_BASE_H
+
+#include <linux/types.h>
+
+#define SIMATIC_IPC_DEVICE_NONE 0
+#define SIMATIC_IPC_DEVICE_227D 1
+#define SIMATIC_IPC_DEVICE_427E 2
+#define SIMATIC_IPC_DEVICE_127E 3
+#define SIMATIC_IPC_DEVICE_227E 4
+#define SIMATIC_IPC_DEVICE_227G 5
+
+struct simatic_ipc_platform {
+ u8 devmode;
+};
+
+#endif /* __PLATFORM_DATA_X86_SIMATIC_IPC_BASE_H */
diff --git a/include/linux/platform_data/x86/simatic-ipc.h b/include/linux/platform_data/x86/simatic-ipc.h
new file mode 100644
index 000000000000..a48bb5240977
--- /dev/null
+++ b/include/linux/platform_data/x86/simatic-ipc.h
@@ -0,0 +1,75 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Siemens SIMATIC IPC drivers
+ *
+ * Copyright (c) Siemens AG, 2018-2021
+ *
+ * Authors:
+ * Henning Schild <henning.schild@siemens.com>
+ * Gerd Haeussler <gerd.haeussler.ext@siemens.com>
+ */
+
+#ifndef __PLATFORM_DATA_X86_SIMATIC_IPC_H
+#define __PLATFORM_DATA_X86_SIMATIC_IPC_H
+
+#include <linux/dmi.h>
+#include <linux/platform_data/x86/simatic-ipc-base.h>
+
+#define SIMATIC_IPC_DMI_ENTRY_OEM 129
+/* binary type */
+#define SIMATIC_IPC_DMI_TYPE 0xff
+#define SIMATIC_IPC_DMI_GROUP 0x05
+#define SIMATIC_IPC_DMI_ENTRY 0x02
+#define SIMATIC_IPC_DMI_TID 0x02
+
+enum simatic_ipc_station_ids {
+ SIMATIC_IPC_INVALID_STATION_ID = 0,
+ SIMATIC_IPC_IPC227D = 0x00000501,
+ SIMATIC_IPC_IPC427D = 0x00000701,
+ SIMATIC_IPC_IPC227E = 0x00000901,
+ SIMATIC_IPC_IPC277E = 0x00000902,
+ SIMATIC_IPC_IPC427E = 0x00000A01,
+ SIMATIC_IPC_IPC477E = 0x00000A02,
+ SIMATIC_IPC_IPC127E = 0x00000D01,
+ SIMATIC_IPC_IPC227G = 0x00000F01,
+ SIMATIC_IPC_IPCBX_39A = 0x00001001,
+ SIMATIC_IPC_IPCPX_39A = 0x00001002,
+};
+
+static inline u32 simatic_ipc_get_station_id(u8 *data, int max_len)
+{
+ struct {
+ u8 type; /* type (0xff = binary) */
+ u8 len; /* len of data entry */
+ u8 group;
+ u8 entry;
+ u8 tid;
+ __le32 station_id; /* station id (LE) */
+ } __packed * data_entry = (void *)data + sizeof(struct dmi_header);
+
+ while ((u8 *)data_entry < data + max_len) {
+ if (data_entry->type == SIMATIC_IPC_DMI_TYPE &&
+ data_entry->len == sizeof(*data_entry) &&
+ data_entry->group == SIMATIC_IPC_DMI_GROUP &&
+ data_entry->entry == SIMATIC_IPC_DMI_ENTRY &&
+ data_entry->tid == SIMATIC_IPC_DMI_TID) {
+ return le32_to_cpu(data_entry->station_id);
+ }
+ data_entry = (void *)((u8 *)(data_entry) + data_entry->len);
+ }
+
+ return SIMATIC_IPC_INVALID_STATION_ID;
+}
+
+static inline void
+simatic_ipc_find_dmi_entry_helper(const struct dmi_header *dh, void *_data)
+{
+ u32 *id = _data;
+
+ if (dh->type != SIMATIC_IPC_DMI_ENTRY_OEM)
+ return;
+
+ *id = simatic_ipc_get_station_id((u8 *)dh, dh->length);
+}
+
+#endif /* __PLATFORM_DATA_X86_SIMATIC_IPC_H */
diff --git a/include/linux/platform_data/x86/soc.h b/include/linux/platform_data/x86/soc.h
new file mode 100644
index 000000000000..a5705189e2ac
--- /dev/null
+++ b/include/linux/platform_data/x86/soc.h
@@ -0,0 +1,70 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Helpers for Intel SoC model detection
+ *
+ * Copyright (c) 2019, Intel Corporation.
+ */
+
+#ifndef __PLATFORM_DATA_X86_SOC_H
+#define __PLATFORM_DATA_X86_SOC_H
+
+#include <linux/types.h>
+
+#if IS_ENABLED(CONFIG_X86)
+
+#include <linux/mod_devicetable.h>
+
+#include <asm/cpu_device_id.h>
+
+#define SOC_INTEL_IS_CPU(soc, type) \
+static inline bool soc_intel_is_##soc(void) \
+{ \
+ static const struct x86_cpu_id soc##_cpu_ids[] = { \
+ X86_MATCH_INTEL_FAM6_MODEL(type, NULL), \
+ {} \
+ }; \
+ const struct x86_cpu_id *id; \
+ \
+ id = x86_match_cpu(soc##_cpu_ids); \
+ if (id) \
+ return true; \
+ return false; \
+}
+
+SOC_INTEL_IS_CPU(byt, ATOM_SILVERMONT);
+SOC_INTEL_IS_CPU(cht, ATOM_AIRMONT);
+SOC_INTEL_IS_CPU(apl, ATOM_GOLDMONT);
+SOC_INTEL_IS_CPU(glk, ATOM_GOLDMONT_PLUS);
+SOC_INTEL_IS_CPU(cml, KABYLAKE_L);
+
+#undef SOC_INTEL_IS_CPU
+
+#else /* IS_ENABLED(CONFIG_X86) */
+
+static inline bool soc_intel_is_byt(void)
+{
+ return false;
+}
+
+static inline bool soc_intel_is_cht(void)
+{
+ return false;
+}
+
+static inline bool soc_intel_is_apl(void)
+{
+ return false;
+}
+
+static inline bool soc_intel_is_glk(void)
+{
+ return false;
+}
+
+static inline bool soc_intel_is_cml(void)
+{
+ return false;
+}
+#endif /* IS_ENABLED(CONFIG_X86) */
+
+#endif /* __PLATFORM_DATA_X86_SOC_H */
diff --git a/include/linux/platform_data/intel-spi.h b/include/linux/platform_data/x86/spi-intel.h
index 942b0c3f8f08..a512ec37abbb 100644
--- a/include/linux/platform_data/intel-spi.h
+++ b/include/linux/platform_data/x86/spi-intel.h
@@ -1,31 +1,31 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Intel PCH/PCU SPI flash driver.
*
* Copyright (C) 2016, Intel Corporation
* Author: Mika Westerberg <mika.westerberg@linux.intel.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
-#ifndef INTEL_SPI_PDATA_H
-#define INTEL_SPI_PDATA_H
+#ifndef SPI_INTEL_PDATA_H
+#define SPI_INTEL_PDATA_H
enum intel_spi_type {
INTEL_SPI_BYT = 1,
INTEL_SPI_LPT,
INTEL_SPI_BXT,
+ INTEL_SPI_CNL,
};
/**
* struct intel_spi_boardinfo - Board specific data for Intel SPI driver
* @type: Type which this controller is compatible with
- * @writeable: The chip is writeable
+ * @set_writeable: Try to make the chip writeable (optional)
+ * @data: Data to be passed to @set_writeable can be %NULL
*/
struct intel_spi_boardinfo {
enum intel_spi_type type;
- bool writeable;
+ bool (*set_writeable)(void __iomem *base, void *data);
+ void *data;
};
-#endif /* INTEL_SPI_PDATA_H */
+#endif /* SPI_INTEL_PDATA_H */
diff --git a/include/linux/platform_data/xilinx-ll-temac.h b/include/linux/platform_data/xilinx-ll-temac.h
new file mode 100644
index 000000000000..f4a68136afa6
--- /dev/null
+++ b/include/linux/platform_data/xilinx-ll-temac.h
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __LINUX_XILINX_LL_TEMAC_H
+#define __LINUX_XILINX_LL_TEMAC_H
+
+#include <linux/if_ether.h>
+#include <linux/phy.h>
+#include <linux/spinlock.h>
+
+struct ll_temac_platform_data {
+ bool txcsum; /* Enable/disable TX checksum */
+ bool rxcsum; /* Enable/disable RX checksum */
+ u8 mac_addr[ETH_ALEN]; /* MAC address (6 bytes) */
+ /* Clock frequency for input to MDIO clock generator */
+ u32 mdio_clk_freq;
+ unsigned long long mdio_bus_id; /* Unique id for MDIO bus */
+ int phy_addr; /* Address of the PHY to connect to */
+ phy_interface_t phy_interface; /* PHY interface mode */
+ bool reg_little_endian; /* Little endian TEMAC register access */
+ bool dma_little_endian; /* Little endian DMA register access */
+ /* Pre-initialized mutex to use for synchronizing indirect
+ * register access. When using both interfaces of a single
+ * TEMAC IP block, the same mutex should be passed here, as
+ * they share the same DCR bus bridge.
+ */
+ spinlock_t *indirect_lock;
+ /* DMA channel control setup */
+ u8 tx_irq_timeout; /* TX Interrupt Delay Time-out */
+ u8 tx_irq_count; /* TX Interrupt Coalescing Threshold Count */
+ u8 rx_irq_timeout; /* RX Interrupt Delay Time-out */
+ u8 rx_irq_count; /* RX Interrupt Coalescing Threshold Count */
+};
+
+#endif /* __LINUX_XILINX_LL_TEMAC_H */
diff --git a/include/linux/platform_data/xtalk-bridge.h b/include/linux/platform_data/xtalk-bridge.h
new file mode 100644
index 000000000000..51e5001f2c05
--- /dev/null
+++ b/include/linux/platform_data/xtalk-bridge.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * SGI PCI Xtalk Bridge
+ */
+
+#ifndef PLATFORM_DATA_XTALK_BRIDGE_H
+#define PLATFORM_DATA_XTALK_BRIDGE_H
+
+#include <asm/sn/types.h>
+
+struct xtalk_bridge_platform_data {
+ struct resource mem;
+ struct resource io;
+ unsigned long bridge_addr;
+ unsigned long intr_addr;
+ unsigned long mem_offset;
+ unsigned long io_offset;
+ nasid_t nasid;
+ int masterwid;
+};
+
+#endif /* PLATFORM_DATA_XTALK_BRIDGE_H */
diff --git a/include/linux/platform_data/zforce_ts.h b/include/linux/platform_data/zforce_ts.h
index 7bdece8ef33e..2463a4a856a6 100644
--- a/include/linux/platform_data/zforce_ts.h
+++ b/include/linux/platform_data/zforce_ts.h
@@ -1,15 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/* drivers/input/touchscreen/zforce.c
*
* Copyright (C) 2012-2013 MundoReader S.L.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#ifndef _LINUX_INPUT_ZFORCE_TS_H
diff --git a/include/linux/platform_device.h b/include/linux/platform_device.h
index 49f634d96118..b845fd83f429 100644
--- a/include/linux/platform_device.h
+++ b/include/linux/platform_device.h
@@ -1,35 +1,41 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* platform_device.h - generic, centralized driver model
*
* Copyright (c) 2001-2003 Patrick Mochel <mochel@osdl.org>
*
- * This file is released under the GPLv2
- *
- * See Documentation/driver-model/ for more information.
+ * See Documentation/driver-api/driver-model/ for more information.
*/
#ifndef _PLATFORM_DEVICE_H_
#define _PLATFORM_DEVICE_H_
#include <linux/device.h>
-#include <linux/mod_devicetable.h>
#define PLATFORM_DEVID_NONE (-1)
#define PLATFORM_DEVID_AUTO (-2)
+struct irq_affinity;
struct mfd_cell;
struct property_entry;
+struct platform_device_id;
struct platform_device {
const char *name;
int id;
bool id_auto;
struct device dev;
+ u64 platform_dma_mask;
+ struct device_dma_parameters dma_parms;
u32 num_resources;
struct resource *resource;
const struct platform_device_id *id_entry;
- char *driver_override; /* Driver name to force a match */
+ /*
+ * Driver name to force a match. Do not set directly, because core
+ * frees it. Use driver_set_override() to set or clear it.
+ */
+ const char *driver_override;
/* MFD cell pointer */
struct mfd_cell *mfd_cell;
@@ -40,6 +46,7 @@ struct platform_device {
#define platform_get_device_id(pdev) ((pdev)->id_entry)
+#define dev_is_platform(dev) ((dev)->bus == &platform_bus_type)
#define to_platform_device(x) container_of((x), struct platform_device, dev)
extern int platform_device_register(struct platform_device *);
@@ -48,20 +55,43 @@ extern void platform_device_unregister(struct platform_device *);
extern struct bus_type platform_bus_type;
extern struct device platform_bus;
-extern void arch_setup_pdev_archdata(struct platform_device *);
extern struct resource *platform_get_resource(struct platform_device *,
unsigned int, unsigned int);
+extern struct resource *platform_get_mem_or_io(struct platform_device *,
+ unsigned int);
+
+extern struct device *
+platform_find_device_by_driver(struct device *start,
+ const struct device_driver *drv);
+extern void __iomem *
+devm_platform_get_and_ioremap_resource(struct platform_device *pdev,
+ unsigned int index, struct resource **res);
+extern void __iomem *
+devm_platform_ioremap_resource(struct platform_device *pdev,
+ unsigned int index);
+extern void __iomem *
+devm_platform_ioremap_resource_byname(struct platform_device *pdev,
+ const char *name);
extern int platform_get_irq(struct platform_device *, unsigned int);
+extern int platform_get_irq_optional(struct platform_device *, unsigned int);
extern int platform_irq_count(struct platform_device *);
+extern int devm_platform_get_irqs_affinity(struct platform_device *dev,
+ struct irq_affinity *affd,
+ unsigned int minvec,
+ unsigned int maxvec,
+ int **irqs);
extern struct resource *platform_get_resource_byname(struct platform_device *,
unsigned int,
const char *);
extern int platform_get_irq_byname(struct platform_device *, const char *);
+extern int platform_get_irq_byname_optional(struct platform_device *dev,
+ const char *name);
extern int platform_add_devices(struct platform_device **, int);
struct platform_device_info {
struct device *parent;
struct fwnode_handle *fwnode;
+ bool of_node_reused;
const char *name;
int id;
@@ -73,7 +103,7 @@ struct platform_device_info {
size_t size_data;
u64 dma_mask;
- struct property_entry *properties;
+ const struct property_entry *properties;
};
extern struct platform_device *platform_device_register_full(
const struct platform_device_info *pdevinfo);
@@ -171,21 +201,38 @@ extern int platform_device_add_resources(struct platform_device *pdev,
unsigned int num);
extern int platform_device_add_data(struct platform_device *pdev,
const void *data, size_t size);
-extern int platform_device_add_properties(struct platform_device *pdev,
- const struct property_entry *properties);
extern int platform_device_add(struct platform_device *pdev);
extern void platform_device_del(struct platform_device *pdev);
extern void platform_device_put(struct platform_device *pdev);
struct platform_driver {
int (*probe)(struct platform_device *);
+
+ /*
+ * Traditionally the remove callback returned an int which however is
+ * ignored by the driver core. This led to wrong expectations by driver
+ * authors who thought returning an error code was a valid error
+ * handling strategy. To convert to a callback returning void, new
+ * drivers should implement .remove_new() until the conversion it done
+ * that eventually makes .remove() return void.
+ */
int (*remove)(struct platform_device *);
+ void (*remove_new)(struct platform_device *);
+
void (*shutdown)(struct platform_device *);
int (*suspend)(struct platform_device *, pm_message_t state);
int (*resume)(struct platform_device *);
struct device_driver driver;
const struct platform_device_id *id_table;
bool prevent_deferred_probe;
+ /*
+ * For most device drivers, no need to care about this flag as long as
+ * all DMAs are handled through the kernel DMA API. For some special
+ * ones, for example VFIO drivers, they know how to manage the DMA
+ * themselves and set this flag so that the IOMMU layer will allow them
+ * to setup and manage their own I/O address space.
+ */
+ bool driver_managed_dma;
};
#define to_platform_driver(drv) (container_of((drv), struct platform_driver, \
@@ -284,58 +331,6 @@ void platform_unregister_drivers(struct platform_driver * const *drivers,
#define platform_register_drivers(drivers, count) \
__platform_register_drivers(drivers, count, THIS_MODULE)
-/* early platform driver interface */
-struct early_platform_driver {
- const char *class_str;
- struct platform_driver *pdrv;
- struct list_head list;
- int requested_id;
- char *buffer;
- int bufsize;
-};
-
-#define EARLY_PLATFORM_ID_UNSET -2
-#define EARLY_PLATFORM_ID_ERROR -3
-
-extern int early_platform_driver_register(struct early_platform_driver *epdrv,
- char *buf);
-extern void early_platform_add_devices(struct platform_device **devs, int num);
-
-static inline int is_early_platform_device(struct platform_device *pdev)
-{
- return !pdev->dev.driver;
-}
-
-extern void early_platform_driver_register_all(char *class_str);
-extern int early_platform_driver_probe(char *class_str,
- int nr_probe, int user_only);
-extern void early_platform_cleanup(void);
-
-#define early_platform_init(class_string, platdrv) \
- early_platform_init_buffer(class_string, platdrv, NULL, 0)
-
-#ifndef MODULE
-#define early_platform_init_buffer(class_string, platdrv, buf, bufsiz) \
-static __initdata struct early_platform_driver early_driver = { \
- .class_str = class_string, \
- .buffer = buf, \
- .bufsize = bufsiz, \
- .pdrv = platdrv, \
- .requested_id = EARLY_PLATFORM_ID_UNSET, \
-}; \
-static int __init early_platform_driver_setup_func(char *buffer) \
-{ \
- return early_platform_driver_register(&early_driver, buffer); \
-} \
-early_param(class_string, early_platform_driver_setup_func)
-#else /* MODULE */
-#define early_platform_init_buffer(class_string, platdrv, buf, bufsiz) \
-static inline char *early_platform_driver_setup_func(void) \
-{ \
- return bufsiz ? buf : NULL; \
-}
-#endif /* MODULE */
-
#ifdef CONFIG_SUSPEND
extern int platform_pm_suspend(struct device *dev);
extern int platform_pm_resume(struct device *dev);
@@ -368,4 +363,19 @@ extern int platform_pm_restore(struct device *dev);
#define USE_PLATFORM_PM_SLEEP_OPS
#endif
+#ifndef CONFIG_SUPERH
+/*
+ * REVISIT: This stub is needed for all non-SuperH users of early platform
+ * drivers. It should go away once we introduce the new platform_device-based
+ * early driver framework.
+ */
+static inline int is_sh_early_platform_device(struct platform_device *pdev)
+{
+ return 0;
+}
+#endif /* CONFIG_SUPERH */
+
+/* For now only SuperH uses it */
+void early_platform_cleanup(void);
+
#endif /* _PLATFORM_DEVICE_H_ */
diff --git a/include/linux/platform_profile.h b/include/linux/platform_profile.h
new file mode 100644
index 000000000000..e5cbb6841f3a
--- /dev/null
+++ b/include/linux/platform_profile.h
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Platform profile sysfs interface
+ *
+ * See Documentation/userspace-api/sysfs-platform_profile.rst for more
+ * information.
+ */
+
+#ifndef _PLATFORM_PROFILE_H_
+#define _PLATFORM_PROFILE_H_
+
+#include <linux/bitops.h>
+
+/*
+ * If more options are added please update profile_names array in
+ * platform_profile.c and sysfs-platform_profile documentation.
+ */
+
+enum platform_profile_option {
+ PLATFORM_PROFILE_LOW_POWER,
+ PLATFORM_PROFILE_COOL,
+ PLATFORM_PROFILE_QUIET,
+ PLATFORM_PROFILE_BALANCED,
+ PLATFORM_PROFILE_BALANCED_PERFORMANCE,
+ PLATFORM_PROFILE_PERFORMANCE,
+ PLATFORM_PROFILE_LAST, /*must always be last */
+};
+
+struct platform_profile_handler {
+ unsigned long choices[BITS_TO_LONGS(PLATFORM_PROFILE_LAST)];
+ int (*profile_get)(struct platform_profile_handler *pprof,
+ enum platform_profile_option *profile);
+ int (*profile_set)(struct platform_profile_handler *pprof,
+ enum platform_profile_option profile);
+};
+
+int platform_profile_register(struct platform_profile_handler *pprof);
+int platform_profile_remove(void);
+void platform_profile_notify(void);
+
+#endif /*_PLATFORM_PROFILE_H_*/
diff --git a/include/linux/pldmfw.h b/include/linux/pldmfw.h
new file mode 100644
index 000000000000..0fc831338226
--- /dev/null
+++ b/include/linux/pldmfw.h
@@ -0,0 +1,165 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2018-2019, Intel Corporation. */
+
+#ifndef _PLDMFW_H_
+#define _PLDMFW_H_
+
+#include <linux/list.h>
+#include <linux/firmware.h>
+
+#define PLDM_DEVICE_UPDATE_CONTINUE_AFTER_FAIL BIT(0)
+
+#define PLDM_STRING_TYPE_UNKNOWN 0
+#define PLDM_STRING_TYPE_ASCII 1
+#define PLDM_STRING_TYPE_UTF8 2
+#define PLDM_STRING_TYPE_UTF16 3
+#define PLDM_STRING_TYPE_UTF16LE 4
+#define PLDM_STRING_TYPE_UTF16BE 5
+
+struct pldmfw_record {
+ struct list_head entry;
+
+ /* List of descriptor TLVs */
+ struct list_head descs;
+
+ /* Component Set version string*/
+ const u8 *version_string;
+ u8 version_type;
+ u8 version_len;
+
+ /* Package Data length */
+ u16 package_data_len;
+
+ /* Bitfield of Device Update Flags */
+ u32 device_update_flags;
+
+ /* Package Data block */
+ const u8 *package_data;
+
+ /* Bitmap of components applicable to this record */
+ unsigned long *component_bitmap;
+ u16 component_bitmap_len;
+};
+
+/* Standard descriptor TLV identifiers */
+#define PLDM_DESC_ID_PCI_VENDOR_ID 0x0000
+#define PLDM_DESC_ID_IANA_ENTERPRISE_ID 0x0001
+#define PLDM_DESC_ID_UUID 0x0002
+#define PLDM_DESC_ID_PNP_VENDOR_ID 0x0003
+#define PLDM_DESC_ID_ACPI_VENDOR_ID 0x0004
+#define PLDM_DESC_ID_PCI_DEVICE_ID 0x0100
+#define PLDM_DESC_ID_PCI_SUBVENDOR_ID 0x0101
+#define PLDM_DESC_ID_PCI_SUBDEV_ID 0x0102
+#define PLDM_DESC_ID_PCI_REVISION_ID 0x0103
+#define PLDM_DESC_ID_PNP_PRODUCT_ID 0x0104
+#define PLDM_DESC_ID_ACPI_PRODUCT_ID 0x0105
+#define PLDM_DESC_ID_VENDOR_DEFINED 0xFFFF
+
+struct pldmfw_desc_tlv {
+ struct list_head entry;
+
+ const u8 *data;
+ u16 type;
+ u16 size;
+};
+
+#define PLDM_CLASSIFICATION_UNKNOWN 0x0000
+#define PLDM_CLASSIFICATION_OTHER 0x0001
+#define PLDM_CLASSIFICATION_DRIVER 0x0002
+#define PLDM_CLASSIFICATION_CONFIG_SW 0x0003
+#define PLDM_CLASSIFICATION_APP_SW 0x0004
+#define PLDM_CLASSIFICATION_INSTRUMENTATION 0x0005
+#define PLDM_CLASSIFICATION_BIOS 0x0006
+#define PLDM_CLASSIFICATION_DIAGNOSTIC_SW 0x0007
+#define PLDM_CLASSIFICATION_OS 0x0008
+#define PLDM_CLASSIFICATION_MIDDLEWARE 0x0009
+#define PLDM_CLASSIFICATION_FIRMWARE 0x000A
+#define PLDM_CLASSIFICATION_CODE 0x000B
+#define PLDM_CLASSIFICATION_SERVICE_PACK 0x000C
+#define PLDM_CLASSIFICATION_SOFTWARE_BUNDLE 0x000D
+
+#define PLDM_ACTIVATION_METHOD_AUTO BIT(0)
+#define PLDM_ACTIVATION_METHOD_SELF_CONTAINED BIT(1)
+#define PLDM_ACTIVATION_METHOD_MEDIUM_SPECIFIC BIT(2)
+#define PLDM_ACTIVATION_METHOD_REBOOT BIT(3)
+#define PLDM_ACTIVATION_METHOD_DC_CYCLE BIT(4)
+#define PLDM_ACTIVATION_METHOD_AC_CYCLE BIT(5)
+
+#define PLDMFW_COMPONENT_OPTION_FORCE_UPDATE BIT(0)
+#define PLDMFW_COMPONENT_OPTION_USE_COMPARISON_STAMP BIT(1)
+
+struct pldmfw_component {
+ struct list_head entry;
+
+ /* component identifier */
+ u16 classification;
+ u16 identifier;
+
+ u16 options;
+ u16 activation_method;
+
+ u32 comparison_stamp;
+
+ u32 component_size;
+ const u8 *component_data;
+
+ /* Component version string */
+ const u8 *version_string;
+ u8 version_type;
+ u8 version_len;
+
+ /* component index */
+ u8 index;
+
+};
+
+/* Transfer flag used for sending components to the firmware */
+#define PLDM_TRANSFER_FLAG_START BIT(0)
+#define PLDM_TRANSFER_FLAG_MIDDLE BIT(1)
+#define PLDM_TRANSFER_FLAG_END BIT(2)
+
+struct pldmfw_ops;
+
+/* Main entry point to the PLDM firmware update engine. Device drivers
+ * should embed this in a private structure and use container_of to obtain
+ * a pointer to their own data, used to implement the device specific
+ * operations.
+ */
+struct pldmfw {
+ const struct pldmfw_ops *ops;
+ struct device *dev;
+};
+
+bool pldmfw_op_pci_match_record(struct pldmfw *context, struct pldmfw_record *record);
+
+/* Operations invoked by the generic PLDM firmware update engine. Used to
+ * implement device specific logic.
+ *
+ * @match_record: check if the device matches the given record. For
+ * convenience, a standard implementation is provided for PCI devices.
+ *
+ * @send_package_data: send the package data associated with the matching
+ * record to firmware.
+ *
+ * @send_component_table: send the component data associated with a given
+ * component to firmware. Called once for each applicable component.
+ *
+ * @flash_component: Flash the data for a given component to the device.
+ * Called once for each applicable component, after all component tables have
+ * been sent.
+ *
+ * @finalize_update: (optional) Finish the update. Called after all components
+ * have been flashed.
+ */
+struct pldmfw_ops {
+ bool (*match_record)(struct pldmfw *context, struct pldmfw_record *record);
+ int (*send_package_data)(struct pldmfw *context, const u8 *data, u16 length);
+ int (*send_component_table)(struct pldmfw *context, struct pldmfw_component *component,
+ u8 transfer_flag);
+ int (*flash_component)(struct pldmfw *context, struct pldmfw_component *component);
+ int (*finalize_update)(struct pldmfw *context);
+};
+
+int pldmfw_flash_image(struct pldmfw *context, const struct firmware *fw);
+
+#endif
diff --git a/include/linux/plist.h b/include/linux/plist.h
index 97883604a3c5..0f352c1d3c80 100644
--- a/include/linux/plist.h
+++ b/include/linux/plist.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Descending-priority-sorted double-linked list
*
@@ -12,8 +13,6 @@
* Simplifications of the original code by
* Oleg Nesterov <oleg@tv-sign.ru>
*
- * Licensed under the FSF's GNU Public License v2 or later.
- *
* Based on simple lists (include/linux/list.h).
*
* This is a priority-sorted list of nodes; each node has a
@@ -70,13 +69,15 @@
* is lowest priority.
*
* No locking is done, up to the caller.
- *
*/
#ifndef _LINUX_PLIST_H_
#define _LINUX_PLIST_H_
-#include <linux/kernel.h>
+#include <linux/container_of.h>
#include <linux/list.h>
+#include <linux/types.h>
+
+#include <asm/bug.h>
struct plist_head {
struct list_head node_list;
@@ -231,7 +232,7 @@ static inline int plist_node_empty(const struct plist_node *node)
* @type: the type of the struct this is embedded in
* @member: the name of the list_head within the struct
*/
-#ifdef CONFIG_DEBUG_PI_LIST
+#ifdef CONFIG_DEBUG_PLIST
# define plist_first_entry(head, type, member) \
({ \
WARN_ON(plist_head_empty(head)); \
@@ -248,7 +249,7 @@ static inline int plist_node_empty(const struct plist_node *node)
* @type: the type of the struct this is embedded in
* @member: the name of the list_head within the struct
*/
-#ifdef CONFIG_DEBUG_PI_LIST
+#ifdef CONFIG_DEBUG_PLIST
# define plist_last_entry(head, type, member) \
({ \
WARN_ON(plist_head_empty(head)); \
diff --git a/include/linux/pm-trace.h b/include/linux/pm-trace.h
index 7b78793f07d7..b8604f8847d9 100644
--- a/include/linux/pm-trace.h
+++ b/include/linux/pm-trace.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef PM_TRACE_H
#define PM_TRACE_H
diff --git a/include/linux/pm.h b/include/linux/pm.h
index b8b4df09fd8f..035d9649eba4 100644
--- a/include/linux/pm.h
+++ b/include/linux/pm.h
@@ -1,38 +1,26 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* pm.h - Power management interface
*
* Copyright (C) 2000 Andrew Henroid
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#ifndef _LINUX_PM_H
#define _LINUX_PM_H
+#include <linux/export.h>
#include <linux/list.h>
#include <linux/workqueue.h>
#include <linux/spinlock.h>
#include <linux/wait.h>
#include <linux/timer.h>
+#include <linux/hrtimer.h>
#include <linux/completion.h>
/*
* Callbacks for platform drivers to implement.
*/
extern void (*pm_power_off)(void);
-extern void (*pm_power_off_prepare)(void);
struct device; /* we have a circular dep with device.h */
#ifdef CONFIG_VT_CONSOLE_SLEEP
@@ -47,11 +35,19 @@ static inline void pm_vt_switch_unregister(struct device *dev)
}
#endif /* CONFIG_VT_CONSOLE_SLEEP */
+#ifdef CONFIG_CXL_SUSPEND
+bool cxl_mem_active(void);
+#else
+static inline bool cxl_mem_active(void)
+{
+ return false;
+}
+#endif
+
/*
* Device power management
*/
-struct device;
#ifdef CONFIG_PM
extern const char power_group_name[]; /* = "power" */
@@ -283,7 +279,7 @@ typedef struct pm_message {
* actions to be performed by a device driver's callbacks generally depend on
* the platform and subsystem the device belongs to.
*
- * Refer to Documentation/power/runtime_pm.txt for more information about the
+ * Refer to Documentation/power/runtime_pm.rst for more information about the
* role of the @runtime_suspend(), @runtime_resume() and @runtime_idle()
* callbacks in device runtime power management.
*/
@@ -313,57 +309,120 @@ struct dev_pm_ops {
int (*runtime_idle)(struct device *dev);
};
+#define SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn) \
+ .suspend = pm_sleep_ptr(suspend_fn), \
+ .resume = pm_sleep_ptr(resume_fn), \
+ .freeze = pm_sleep_ptr(suspend_fn), \
+ .thaw = pm_sleep_ptr(resume_fn), \
+ .poweroff = pm_sleep_ptr(suspend_fn), \
+ .restore = pm_sleep_ptr(resume_fn),
+
+#define LATE_SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn) \
+ .suspend_late = pm_sleep_ptr(suspend_fn), \
+ .resume_early = pm_sleep_ptr(resume_fn), \
+ .freeze_late = pm_sleep_ptr(suspend_fn), \
+ .thaw_early = pm_sleep_ptr(resume_fn), \
+ .poweroff_late = pm_sleep_ptr(suspend_fn), \
+ .restore_early = pm_sleep_ptr(resume_fn),
+
+#define NOIRQ_SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn) \
+ .suspend_noirq = pm_sleep_ptr(suspend_fn), \
+ .resume_noirq = pm_sleep_ptr(resume_fn), \
+ .freeze_noirq = pm_sleep_ptr(suspend_fn), \
+ .thaw_noirq = pm_sleep_ptr(resume_fn), \
+ .poweroff_noirq = pm_sleep_ptr(suspend_fn), \
+ .restore_noirq = pm_sleep_ptr(resume_fn),
+
+#define RUNTIME_PM_OPS(suspend_fn, resume_fn, idle_fn) \
+ .runtime_suspend = suspend_fn, \
+ .runtime_resume = resume_fn, \
+ .runtime_idle = idle_fn,
+
#ifdef CONFIG_PM_SLEEP
#define SET_SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn) \
- .suspend = suspend_fn, \
- .resume = resume_fn, \
- .freeze = suspend_fn, \
- .thaw = resume_fn, \
- .poweroff = suspend_fn, \
- .restore = resume_fn,
+ SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn)
#else
#define SET_SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn)
#endif
#ifdef CONFIG_PM_SLEEP
#define SET_LATE_SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn) \
- .suspend_late = suspend_fn, \
- .resume_early = resume_fn, \
- .freeze_late = suspend_fn, \
- .thaw_early = resume_fn, \
- .poweroff_late = suspend_fn, \
- .restore_early = resume_fn,
+ LATE_SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn)
#else
#define SET_LATE_SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn)
#endif
#ifdef CONFIG_PM_SLEEP
#define SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn) \
- .suspend_noirq = suspend_fn, \
- .resume_noirq = resume_fn, \
- .freeze_noirq = suspend_fn, \
- .thaw_noirq = resume_fn, \
- .poweroff_noirq = suspend_fn, \
- .restore_noirq = resume_fn,
+ NOIRQ_SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn)
#else
#define SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn)
#endif
#ifdef CONFIG_PM
#define SET_RUNTIME_PM_OPS(suspend_fn, resume_fn, idle_fn) \
- .runtime_suspend = suspend_fn, \
- .runtime_resume = resume_fn, \
- .runtime_idle = idle_fn,
+ RUNTIME_PM_OPS(suspend_fn, resume_fn, idle_fn)
#else
#define SET_RUNTIME_PM_OPS(suspend_fn, resume_fn, idle_fn)
#endif
+#define _DEFINE_DEV_PM_OPS(name, \
+ suspend_fn, resume_fn, \
+ runtime_suspend_fn, runtime_resume_fn, idle_fn) \
+const struct dev_pm_ops name = { \
+ SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn) \
+ RUNTIME_PM_OPS(runtime_suspend_fn, runtime_resume_fn, idle_fn) \
+}
+
+#ifdef CONFIG_PM
+#define _EXPORT_DEV_PM_OPS(name, sec, ns) \
+ const struct dev_pm_ops name; \
+ __EXPORT_SYMBOL(name, sec, ns); \
+ const struct dev_pm_ops name
+#define EXPORT_PM_FN_GPL(name) EXPORT_SYMBOL_GPL(name)
+#define EXPORT_PM_FN_NS_GPL(name, ns) EXPORT_SYMBOL_NS_GPL(name, ns)
+#else
+#define _EXPORT_DEV_PM_OPS(name, sec, ns) \
+ static __maybe_unused const struct dev_pm_ops __static_##name
+#define EXPORT_PM_FN_GPL(name)
+#define EXPORT_PM_FN_NS_GPL(name, ns)
+#endif
+
+#define EXPORT_DEV_PM_OPS(name) _EXPORT_DEV_PM_OPS(name, "", "")
+#define EXPORT_GPL_DEV_PM_OPS(name) _EXPORT_DEV_PM_OPS(name, "_gpl", "")
+#define EXPORT_NS_DEV_PM_OPS(name, ns) _EXPORT_DEV_PM_OPS(name, "", #ns)
+#define EXPORT_NS_GPL_DEV_PM_OPS(name, ns) _EXPORT_DEV_PM_OPS(name, "_gpl", #ns)
+
/*
* Use this if you want to use the same suspend and resume callbacks for suspend
* to RAM and hibernation.
+ *
+ * If the underlying dev_pm_ops struct symbol has to be exported, use
+ * EXPORT_SIMPLE_DEV_PM_OPS() or EXPORT_GPL_SIMPLE_DEV_PM_OPS() instead.
*/
+#define DEFINE_SIMPLE_DEV_PM_OPS(name, suspend_fn, resume_fn) \
+ _DEFINE_DEV_PM_OPS(name, suspend_fn, resume_fn, NULL, NULL, NULL)
+
+#define EXPORT_SIMPLE_DEV_PM_OPS(name, suspend_fn, resume_fn) \
+ EXPORT_DEV_PM_OPS(name) = { \
+ SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn) \
+ }
+#define EXPORT_GPL_SIMPLE_DEV_PM_OPS(name, suspend_fn, resume_fn) \
+ EXPORT_GPL_DEV_PM_OPS(name) = { \
+ SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn) \
+ }
+#define EXPORT_NS_SIMPLE_DEV_PM_OPS(name, suspend_fn, resume_fn, ns) \
+ EXPORT_NS_DEV_PM_OPS(name, ns) = { \
+ SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn) \
+ }
+#define EXPORT_NS_GPL_SIMPLE_DEV_PM_OPS(name, suspend_fn, resume_fn, ns) \
+ EXPORT_NS_GPL_DEV_PM_OPS(name, ns) = { \
+ SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn) \
+ }
+
+/* Deprecated. Use DEFINE_SIMPLE_DEV_PM_OPS() instead. */
#define SIMPLE_DEV_PM_OPS(name, suspend_fn, resume_fn) \
-const struct dev_pm_ops name = { \
+const struct dev_pm_ops __maybe_unused name = { \
SET_SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn) \
}
@@ -379,13 +438,19 @@ const struct dev_pm_ops name = { \
* suspend and "early" resume callback pointers, .suspend_late() and
* .resume_early(), to the same routines as .runtime_suspend() and
* .runtime_resume(), respectively (and analogously for hibernation).
+ *
+ * Deprecated. You most likely don't want this macro. Use
+ * DEFINE_RUNTIME_DEV_PM_OPS() instead.
*/
#define UNIVERSAL_DEV_PM_OPS(name, suspend_fn, resume_fn, idle_fn) \
-const struct dev_pm_ops name = { \
+const struct dev_pm_ops __maybe_unused name = { \
SET_SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn) \
SET_RUNTIME_PM_OPS(suspend_fn, resume_fn, idle_fn) \
}
+#define pm_ptr(_ptr) PTR_IF(IS_ENABLED(CONFIG_PM), (_ptr))
+#define pm_sleep_ptr(_ptr) PTR_IF(IS_ENABLED(CONFIG_PM_SLEEP), (_ptr))
+
/*
* PM_EVENT_ messages
*
@@ -506,6 +571,7 @@ const struct dev_pm_ops name = { \
*/
enum rpm_status {
+ RPM_INVALID = -1,
RPM_ACTIVE = 0,
RPM_RESUMING,
RPM_SUSPENDED,
@@ -543,6 +609,8 @@ struct pm_subsys_data {
spinlock_t lock;
unsigned int refcount;
#ifdef CONFIG_PM_CLK
+ unsigned int clock_op_might_sleep;
+ struct mutex clock_mutex;
struct list_head clock_list;
#endif
#ifdef CONFIG_PM_GENERIC_DOMAINS
@@ -550,6 +618,24 @@ struct pm_subsys_data {
#endif
};
+/*
+ * Driver flags to control system suspend/resume behavior.
+ *
+ * These flags can be set by device drivers at the probe time. They need not be
+ * cleared by the drivers as the driver core will take care of that.
+ *
+ * NO_DIRECT_COMPLETE: Do not apply direct-complete optimization to the device.
+ * SMART_PREPARE: Take the driver ->prepare callback return value into account.
+ * SMART_SUSPEND: Avoid resuming the device from runtime suspend.
+ * MAY_SKIP_RESUME: Allow driver "noirq" and "early" callbacks to be skipped.
+ *
+ * See Documentation/driver-api/pm/devices.rst for details.
+ */
+#define DPM_FLAG_NO_DIRECT_COMPLETE BIT(0)
+#define DPM_FLAG_SMART_PREPARE BIT(1)
+#define DPM_FLAG_SMART_SUSPEND BIT(2)
+#define DPM_FLAG_MAY_SKIP_RESUME BIT(3)
+
struct dev_pm_info {
pm_message_t power_state;
unsigned int can_wakeup:1;
@@ -559,8 +645,10 @@ struct dev_pm_info {
bool is_suspended:1; /* Ditto */
bool is_noirq_suspended:1;
bool is_late_suspended:1;
+ bool no_pm:1;
bool early_init:1; /* Owned by the PM core */
bool direct_complete:1; /* Owned by the PM core */
+ u32 driver_flags;
spinlock_t lock;
#ifdef CONFIG_PM_SLEEP
struct list_head entry;
@@ -569,12 +657,14 @@ struct dev_pm_info {
bool wakeup_path:1;
bool syscore:1;
bool no_pm_callbacks:1; /* Owned by the PM core */
+ unsigned int must_resume:1; /* Owned by the PM core */
+ unsigned int may_skip_resume:1; /* Set by subsystems */
#else
unsigned int should_wakeup:1;
#endif
#ifdef CONFIG_PM
- struct timer_list suspend_timer;
- unsigned long timer_expires;
+ struct hrtimer suspend_timer;
+ u64 timer_expires;
struct work_struct work;
wait_queue_head_t wait_queue;
struct wake_irq *wakeirq;
@@ -584,6 +674,7 @@ struct dev_pm_info {
unsigned int idle_notification:1;
unsigned int request_pending:1;
unsigned int deferred_resume:1;
+ unsigned int needs_force_resume:1;
unsigned int runtime_auto:1;
bool ignore_children:1;
unsigned int no_callbacks:1;
@@ -594,19 +685,19 @@ struct dev_pm_info {
unsigned int links_count;
enum rpm_request request;
enum rpm_status runtime_status;
+ enum rpm_status last_status;
int runtime_error;
int autosuspend_delay;
- unsigned long last_busy;
- unsigned long active_jiffies;
- unsigned long suspended_jiffies;
- unsigned long accounting_timestamp;
+ u64 last_busy;
+ u64 active_time;
+ u64 suspended_time;
+ u64 accounting_timestamp;
#endif
struct pm_subsys_data *subsys_data; /* Owned by the subsystem. */
void (*set_latency_tolerance)(struct device *, s32);
struct dev_pm_qos *qos;
};
-extern void update_pm_runtime_accounting(struct device *dev);
extern int dev_pm_get_subsys_data(struct device *dev);
extern void dev_pm_put_subsys_data(struct device *dev);
@@ -614,6 +705,7 @@ extern void dev_pm_put_subsys_data(struct device *dev);
* struct dev_pm_domain - power management domain representation.
*
* @ops: Power management operations associated with this domain.
+ * @start: Called when a user needs to start the device via the domain.
* @detach: Called when removing a device from the domain.
* @activate: Called before executing probe routines for bus types and drivers.
* @sync: Called after successful driver probe.
@@ -625,6 +717,7 @@ extern void dev_pm_put_subsys_data(struct device *dev);
*/
struct dev_pm_domain {
struct dev_pm_ops ops;
+ int (*start)(struct device *dev);
void (*detach)(struct device *dev, bool power_off);
int (*activate)(struct device *dev);
void (*sync)(struct device *dev);
@@ -702,11 +795,11 @@ extern int dpm_suspend_late(pm_message_t state);
extern int dpm_suspend(pm_message_t state);
extern int dpm_prepare(pm_message_t state);
-extern void __suspend_report_result(const char *function, void *fn, int ret);
+extern void __suspend_report_result(const char *function, struct device *dev, void *fn, int ret);
-#define suspend_report_result(fn, ret) \
+#define suspend_report_result(dev, fn, ret) \
do { \
- __suspend_report_result(__func__, fn, ret); \
+ __suspend_report_result(__func__, dev, fn, ret); \
} while (0)
extern int device_pm_wait_for_dev(struct device *sub, struct device *dev);
@@ -732,7 +825,9 @@ extern int pm_generic_poweroff_noirq(struct device *dev);
extern int pm_generic_poweroff_late(struct device *dev);
extern int pm_generic_poweroff(struct device *dev);
extern void pm_generic_complete(struct device *dev);
-extern void pm_complete_with_resume_check(struct device *dev);
+
+extern bool dev_pm_skip_resume(struct device *dev);
+extern bool dev_pm_skip_suspend(struct device *dev);
#else /* !CONFIG_PM_SLEEP */
@@ -744,7 +839,7 @@ static inline int dpm_suspend_start(pm_message_t state)
return 0;
}
-#define suspend_report_result(fn, ret) do {} while (0)
+#define suspend_report_result(dev, fn, ret) do {} while (0)
static inline int device_pm_wait_for_dev(struct device *a, struct device *b)
{
diff --git a/include/linux/pm2301_charger.h b/include/linux/pm2301_charger.h
deleted file mode 100644
index 85c16defe11a..000000000000
--- a/include/linux/pm2301_charger.h
+++ /dev/null
@@ -1,61 +0,0 @@
-/*
- * PM2301 charger driver.
- *
- * Copyright (C) 2012 ST Ericsson Corporation
- *
- * Contact: Olivier LAUNAY (olivier.launay@stericsson.com
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
- * 02110-1301 USA
- */
-
-#ifndef __LINUX_PM2301_H
-#define __LINUX_PM2301_H
-
-/**
- * struct pm2xxx_bm_charger_parameters - Charger specific parameters
- * @ac_volt_max: maximum allowed AC charger voltage in mV
- * @ac_curr_max: maximum allowed AC charger current in mA
- */
-struct pm2xxx_bm_charger_parameters {
- int ac_volt_max;
- int ac_curr_max;
-};
-
-/**
- * struct pm2xxx_bm_data - pm2xxx battery management data
- * @enable_overshoot flag to enable VBAT overshoot control
- * @chg_params charger parameters
- */
-struct pm2xxx_bm_data {
- bool enable_overshoot;
- const struct pm2xxx_bm_charger_parameters *chg_params;
-};
-
-struct pm2xxx_charger_platform_data {
- char **supplied_to;
- size_t num_supplicants;
- int i2c_bus;
- const char *label;
- int gpio_irq_number;
- unsigned int lpn_gpio;
- int irq_type;
-};
-
-struct pm2xxx_platform_data {
- struct pm2xxx_charger_platform_data *wall_charger;
- struct pm2xxx_bm_data *battery;
-};
-
-#endif /* __LINUX_PM2301_H */
diff --git a/include/linux/pm_clock.h b/include/linux/pm_clock.h
index 09779b0ae720..ada3a0ab10bf 100644
--- a/include/linux/pm_clock.h
+++ b/include/linux/pm_clock.h
@@ -1,9 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* pm_clock.h - Definitions and headers related to device clocks.
*
* Copyright (C) 2011 Rafael J. Wysocki <rjw@sisk.pl>, Renesas Electronics Corp.
- *
- * This file is released under the GPLv2.
*/
#ifndef _LINUX_PM_CLOCK_H
@@ -48,6 +47,7 @@ extern void pm_clk_remove(struct device *dev, const char *con_id);
extern void pm_clk_remove_clk(struct device *dev, struct clk *clk);
extern int pm_clk_suspend(struct device *dev);
extern int pm_clk_resume(struct device *dev);
+extern int devm_pm_clk_create(struct device *dev);
#else
static inline bool pm_clk_no_clocks(struct device *dev)
{
@@ -84,6 +84,10 @@ static inline void pm_clk_remove(struct device *dev, const char *con_id)
static inline void pm_clk_remove_clk(struct device *dev, struct clk *clk)
{
}
+static inline int devm_pm_clk_create(struct device *dev)
+{
+ return -EINVAL;
+}
#endif
#ifdef CONFIG_HAVE_CLK
diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h
index 41004d97cefa..f776fb93eaa0 100644
--- a/include/linux/pm_domain.h
+++ b/include/linux/pm_domain.h
@@ -1,30 +1,85 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* pm_domain.h - Definitions and headers related to device power domains.
*
* Copyright (C) 2011 Rafael J. Wysocki <rjw@sisk.pl>, Renesas Electronics Corp.
- *
- * This file is released under the GPLv2.
*/
#ifndef _LINUX_PM_DOMAIN_H
#define _LINUX_PM_DOMAIN_H
#include <linux/device.h>
+#include <linux/ktime.h>
#include <linux/mutex.h>
#include <linux/pm.h>
#include <linux/err.h>
#include <linux/of.h>
#include <linux/notifier.h>
#include <linux/spinlock.h>
+#include <linux/cpumask.h>
+#include <linux/time64.h>
-/* Defines used for the flags field in the struct generic_pm_domain */
-#define GENPD_FLAG_PM_CLK (1U << 0) /* PM domain uses PM clk */
-#define GENPD_FLAG_IRQ_SAFE (1U << 1) /* PM domain operates in atomic */
-#define GENPD_FLAG_ALWAYS_ON (1U << 2) /* PM domain is always powered on */
+/*
+ * Flags to control the behaviour of a genpd.
+ *
+ * These flags may be set in the struct generic_pm_domain's flags field by a
+ * genpd backend driver. The flags must be set before it calls pm_genpd_init(),
+ * which initializes a genpd.
+ *
+ * GENPD_FLAG_PM_CLK: Instructs genpd to use the PM clk framework,
+ * while powering on/off attached devices.
+ *
+ * GENPD_FLAG_IRQ_SAFE: This informs genpd that its backend callbacks,
+ * ->power_on|off(), doesn't sleep. Hence, these
+ * can be invoked from within atomic context, which
+ * enables genpd to power on/off the PM domain,
+ * even when pm_runtime_is_irq_safe() returns true,
+ * for any of its attached devices. Note that, a
+ * genpd having this flag set, requires its
+ * masterdomains to also have it set.
+ *
+ * GENPD_FLAG_ALWAYS_ON: Instructs genpd to always keep the PM domain
+ * powered on.
+ *
+ * GENPD_FLAG_ACTIVE_WAKEUP: Instructs genpd to keep the PM domain powered
+ * on, in case any of its attached devices is used
+ * in the wakeup path to serve system wakeups.
+ *
+ * GENPD_FLAG_CPU_DOMAIN: Instructs genpd that it should expect to get
+ * devices attached, which may belong to CPUs or
+ * possibly have subdomains with CPUs attached.
+ * This flag enables the genpd backend driver to
+ * deploy idle power management support for CPUs
+ * and groups of CPUs. Note that, the backend
+ * driver must then comply with the so called,
+ * last-man-standing algorithm, for the CPUs in the
+ * PM domain.
+ *
+ * GENPD_FLAG_RPM_ALWAYS_ON: Instructs genpd to always keep the PM domain
+ * powered on except for system suspend.
+ *
+ * GENPD_FLAG_MIN_RESIDENCY: Enable the genpd governor to consider its
+ * components' next wakeup when determining the
+ * optimal idle state.
+ */
+#define GENPD_FLAG_PM_CLK (1U << 0)
+#define GENPD_FLAG_IRQ_SAFE (1U << 1)
+#define GENPD_FLAG_ALWAYS_ON (1U << 2)
+#define GENPD_FLAG_ACTIVE_WAKEUP (1U << 3)
+#define GENPD_FLAG_CPU_DOMAIN (1U << 4)
+#define GENPD_FLAG_RPM_ALWAYS_ON (1U << 5)
+#define GENPD_FLAG_MIN_RESIDENCY (1U << 6)
enum gpd_status {
- GPD_STATE_ACTIVE = 0, /* PM domain is active */
- GPD_STATE_POWER_OFF, /* PM domain is off */
+ GENPD_STATE_ON = 0, /* PM domain is on */
+ GENPD_STATE_OFF, /* PM domain is off */
+};
+
+enum genpd_notication {
+ GENPD_NOTIFY_PRE_OFF = 0,
+ GENPD_NOTIFY_OFF,
+ GENPD_NOTIFY_PRE_ON,
+ GENPD_NOTIFY_ON,
};
struct dev_power_governor {
@@ -35,25 +90,41 @@ struct dev_power_governor {
struct gpd_dev_ops {
int (*start)(struct device *dev);
int (*stop)(struct device *dev);
- bool (*active_wakeup)(struct device *dev);
+};
+
+struct genpd_governor_data {
+ s64 max_off_time_ns;
+ bool max_off_time_changed;
+ ktime_t next_wakeup;
+ ktime_t next_hrtimer;
+ bool cached_power_down_ok;
+ bool cached_power_down_state_idx;
};
struct genpd_power_state {
s64 power_off_latency_ns;
s64 power_on_latency_ns;
s64 residency_ns;
+ u64 usage;
+ u64 rejected;
struct fwnode_handle *fwnode;
+ u64 idle_time;
+ void *data;
};
struct genpd_lock_ops;
+struct dev_pm_opp;
+struct opp_table;
struct generic_pm_domain {
+ struct device dev;
struct dev_pm_domain domain; /* PM domain operations */
struct list_head gpd_list_node; /* Node in the global PM domains list */
- struct list_head master_links; /* Links with PM domain as a master */
- struct list_head slave_links; /* Links with PM domain as a slave */
+ struct list_head parent_links; /* Links with PM domain as a parent */
+ struct list_head child_links; /* Links with PM domain as a child */
struct list_head dev_list; /* List of devices */
struct dev_power_governor *gov;
+ struct genpd_governor_data *gd; /* Data used by a genpd governor. */
struct work_struct power_off_work;
struct fwnode_handle *provider; /* Identity of the domain provider */
bool has_provider;
@@ -63,21 +134,30 @@ struct generic_pm_domain {
unsigned int device_count; /* Number of devices */
unsigned int suspended_count; /* System suspend device counter */
unsigned int prepared_count; /* Suspend counter of prepared devices */
+ unsigned int performance_state; /* Aggregated max performance state */
+ cpumask_var_t cpus; /* A cpumask of the attached CPUs */
+ bool synced_poweroff; /* A consumer needs a synced poweroff */
int (*power_off)(struct generic_pm_domain *domain);
int (*power_on)(struct generic_pm_domain *domain);
+ struct raw_notifier_head power_notifiers; /* Power on/off notifiers */
+ struct opp_table *opp_table; /* OPP table of the genpd */
+ unsigned int (*opp_to_performance_state)(struct generic_pm_domain *genpd,
+ struct dev_pm_opp *opp);
+ int (*set_performance_state)(struct generic_pm_domain *genpd,
+ unsigned int state);
struct gpd_dev_ops dev_ops;
- s64 max_off_time_ns; /* Maximum allowed "suspended" time. */
- bool max_off_time_changed;
- bool cached_power_down_ok;
int (*attach_dev)(struct generic_pm_domain *domain,
struct device *dev);
void (*detach_dev)(struct generic_pm_domain *domain,
struct device *dev);
unsigned int flags; /* Bit field of configs for genpd */
struct genpd_power_state *states;
+ void (*free_states)(struct genpd_power_state *states,
+ unsigned int state_count);
unsigned int state_count; /* number of states */
unsigned int state_idx; /* state that genpd will go to when off */
- void *free; /* Free the state that was allocated for default */
+ u64 on_time;
+ u64 accounting_time;
const struct genpd_lock_ops *lock_ops;
union {
struct mutex mlock;
@@ -95,16 +175,21 @@ static inline struct generic_pm_domain *pd_to_genpd(struct dev_pm_domain *pd)
}
struct gpd_link {
- struct generic_pm_domain *master;
- struct list_head master_node;
- struct generic_pm_domain *slave;
- struct list_head slave_node;
+ struct generic_pm_domain *parent;
+ struct list_head parent_node;
+ struct generic_pm_domain *child;
+ struct list_head child_node;
+
+ /* Sub-domain's per-master domain performance state */
+ unsigned int performance_state;
+ unsigned int prev_performance_state;
};
struct gpd_timing_data {
s64 suspend_latency_ns;
s64 resume_latency_ns;
s64 effective_constraint_ns;
+ ktime_t next_wakeup;
bool constraint_changed;
bool cached_suspend_ok;
};
@@ -116,8 +201,13 @@ struct pm_domain_data {
struct generic_pm_domain_data {
struct pm_domain_data base;
- struct gpd_timing_data td;
+ struct gpd_timing_data *td;
struct notifier_block nb;
+ struct notifier_block *power_nb;
+ int cpu;
+ unsigned int performance_state;
+ unsigned int default_pstate;
+ unsigned int rpm_pstate;
void *data;
};
@@ -132,46 +222,49 @@ static inline struct generic_pm_domain_data *dev_gpd_data(struct device *dev)
return to_gpd_data(dev->power.subsys_data->domain_data);
}
-extern int __pm_genpd_add_device(struct generic_pm_domain *genpd,
- struct device *dev,
- struct gpd_timing_data *td);
-
-extern int pm_genpd_remove_device(struct generic_pm_domain *genpd,
- struct device *dev);
-extern int pm_genpd_add_subdomain(struct generic_pm_domain *genpd,
- struct generic_pm_domain *new_subdomain);
-extern int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
- struct generic_pm_domain *target);
-extern int pm_genpd_init(struct generic_pm_domain *genpd,
- struct dev_power_governor *gov, bool is_off);
-extern int pm_genpd_remove(struct generic_pm_domain *genpd);
+int pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev);
+int pm_genpd_remove_device(struct device *dev);
+int pm_genpd_add_subdomain(struct generic_pm_domain *genpd,
+ struct generic_pm_domain *subdomain);
+int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
+ struct generic_pm_domain *subdomain);
+int pm_genpd_init(struct generic_pm_domain *genpd,
+ struct dev_power_governor *gov, bool is_off);
+int pm_genpd_remove(struct generic_pm_domain *genpd);
+int dev_pm_genpd_set_performance_state(struct device *dev, unsigned int state);
+int dev_pm_genpd_add_notifier(struct device *dev, struct notifier_block *nb);
+int dev_pm_genpd_remove_notifier(struct device *dev);
+void dev_pm_genpd_set_next_wakeup(struct device *dev, ktime_t next);
+ktime_t dev_pm_genpd_get_next_hrtimer(struct device *dev);
+void dev_pm_genpd_synced_poweroff(struct device *dev);
extern struct dev_power_governor simple_qos_governor;
extern struct dev_power_governor pm_domain_always_on_gov;
+#ifdef CONFIG_CPU_IDLE
+extern struct dev_power_governor pm_domain_cpu_gov;
+#endif
#else
static inline struct generic_pm_domain_data *dev_gpd_data(struct device *dev)
{
return ERR_PTR(-ENOSYS);
}
-static inline int __pm_genpd_add_device(struct generic_pm_domain *genpd,
- struct device *dev,
- struct gpd_timing_data *td)
+static inline int pm_genpd_add_device(struct generic_pm_domain *genpd,
+ struct device *dev)
{
return -ENOSYS;
}
-static inline int pm_genpd_remove_device(struct generic_pm_domain *genpd,
- struct device *dev)
+static inline int pm_genpd_remove_device(struct device *dev)
{
return -ENOSYS;
}
static inline int pm_genpd_add_subdomain(struct generic_pm_domain *genpd,
- struct generic_pm_domain *new_sd)
+ struct generic_pm_domain *subdomain)
{
return -ENOSYS;
}
static inline int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
- struct generic_pm_domain *target)
+ struct generic_pm_domain *subdomain)
{
return -ENOSYS;
}
@@ -182,25 +275,46 @@ static inline int pm_genpd_init(struct generic_pm_domain *genpd,
}
static inline int pm_genpd_remove(struct generic_pm_domain *genpd)
{
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
}
-#define simple_qos_governor (*(struct dev_power_governor *)(NULL))
-#define pm_domain_always_on_gov (*(struct dev_power_governor *)(NULL))
-#endif
+static inline int dev_pm_genpd_set_performance_state(struct device *dev,
+ unsigned int state)
+{
+ return -EOPNOTSUPP;
+}
-static inline int pm_genpd_add_device(struct generic_pm_domain *genpd,
- struct device *dev)
+static inline int dev_pm_genpd_add_notifier(struct device *dev,
+ struct notifier_block *nb)
{
- return __pm_genpd_add_device(genpd, dev, NULL);
+ return -EOPNOTSUPP;
}
+static inline int dev_pm_genpd_remove_notifier(struct device *dev)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline void dev_pm_genpd_set_next_wakeup(struct device *dev, ktime_t next)
+{ }
+
+static inline ktime_t dev_pm_genpd_get_next_hrtimer(struct device *dev)
+{
+ return KTIME_MAX;
+}
+static inline void dev_pm_genpd_synced_poweroff(struct device *dev)
+{ }
+
+#define simple_qos_governor (*(struct dev_power_governor *)(NULL))
+#define pm_domain_always_on_gov (*(struct dev_power_governor *)(NULL))
+#endif
+
#ifdef CONFIG_PM_GENERIC_DOMAINS_SLEEP
-extern void pm_genpd_syscore_poweroff(struct device *dev);
-extern void pm_genpd_syscore_poweron(struct device *dev);
+void dev_pm_genpd_suspend(struct device *dev);
+void dev_pm_genpd_resume(struct device *dev);
#else
-static inline void pm_genpd_syscore_poweroff(struct device *dev) {}
-static inline void pm_genpd_syscore_poweron(struct device *dev) {}
+static inline void dev_pm_genpd_suspend(struct device *dev) {}
+static inline void dev_pm_genpd_resume(struct device *dev) {}
#endif
/* OF PM domain providers */
@@ -221,26 +335,33 @@ int of_genpd_add_provider_simple(struct device_node *np,
int of_genpd_add_provider_onecell(struct device_node *np,
struct genpd_onecell_data *data);
void of_genpd_del_provider(struct device_node *np);
-extern int of_genpd_add_device(struct of_phandle_args *args,
- struct device *dev);
-extern int of_genpd_add_subdomain(struct of_phandle_args *parent,
- struct of_phandle_args *new_subdomain);
-extern struct generic_pm_domain *of_genpd_remove_last(struct device_node *np);
-extern int of_genpd_parse_idle_states(struct device_node *dn,
- struct genpd_power_state **states, int *n);
+int of_genpd_add_device(struct of_phandle_args *args, struct device *dev);
+int of_genpd_add_subdomain(struct of_phandle_args *parent_spec,
+ struct of_phandle_args *subdomain_spec);
+int of_genpd_remove_subdomain(struct of_phandle_args *parent_spec,
+ struct of_phandle_args *subdomain_spec);
+struct generic_pm_domain *of_genpd_remove_last(struct device_node *np);
+int of_genpd_parse_idle_states(struct device_node *dn,
+ struct genpd_power_state **states, int *n);
+unsigned int pm_genpd_opp_to_performance_state(struct device *genpd_dev,
+ struct dev_pm_opp *opp);
int genpd_dev_pm_attach(struct device *dev);
+struct device *genpd_dev_pm_attach_by_id(struct device *dev,
+ unsigned int index);
+struct device *genpd_dev_pm_attach_by_name(struct device *dev,
+ const char *name);
#else /* !CONFIG_PM_GENERIC_DOMAINS_OF */
static inline int of_genpd_add_provider_simple(struct device_node *np,
struct generic_pm_domain *genpd)
{
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
}
static inline int of_genpd_add_provider_onecell(struct device_node *np,
struct genpd_onecell_data *data)
{
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
}
static inline void of_genpd_del_provider(struct device_node *np) {}
@@ -251,8 +372,14 @@ static inline int of_genpd_add_device(struct of_phandle_args *args,
return -ENODEV;
}
-static inline int of_genpd_add_subdomain(struct of_phandle_args *parent,
- struct of_phandle_args *new_subdomain)
+static inline int of_genpd_add_subdomain(struct of_phandle_args *parent_spec,
+ struct of_phandle_args *subdomain_spec)
+{
+ return -ENODEV;
+}
+
+static inline int of_genpd_remove_subdomain(struct of_phandle_args *parent_spec,
+ struct of_phandle_args *subdomain_spec)
{
return -ENODEV;
}
@@ -263,28 +390,66 @@ static inline int of_genpd_parse_idle_states(struct device_node *dn,
return -ENODEV;
}
+static inline unsigned int
+pm_genpd_opp_to_performance_state(struct device *genpd_dev,
+ struct dev_pm_opp *opp)
+{
+ return 0;
+}
+
static inline int genpd_dev_pm_attach(struct device *dev)
{
- return -ENODEV;
+ return 0;
+}
+
+static inline struct device *genpd_dev_pm_attach_by_id(struct device *dev,
+ unsigned int index)
+{
+ return NULL;
+}
+
+static inline struct device *genpd_dev_pm_attach_by_name(struct device *dev,
+ const char *name)
+{
+ return NULL;
}
static inline
struct generic_pm_domain *of_genpd_remove_last(struct device_node *np)
{
- return ERR_PTR(-ENOTSUPP);
+ return ERR_PTR(-EOPNOTSUPP);
}
#endif /* CONFIG_PM_GENERIC_DOMAINS_OF */
#ifdef CONFIG_PM
-extern int dev_pm_domain_attach(struct device *dev, bool power_on);
-extern void dev_pm_domain_detach(struct device *dev, bool power_off);
-extern void dev_pm_domain_set(struct device *dev, struct dev_pm_domain *pd);
+int dev_pm_domain_attach(struct device *dev, bool power_on);
+struct device *dev_pm_domain_attach_by_id(struct device *dev,
+ unsigned int index);
+struct device *dev_pm_domain_attach_by_name(struct device *dev,
+ const char *name);
+void dev_pm_domain_detach(struct device *dev, bool power_off);
+int dev_pm_domain_start(struct device *dev);
+void dev_pm_domain_set(struct device *dev, struct dev_pm_domain *pd);
#else
static inline int dev_pm_domain_attach(struct device *dev, bool power_on)
{
- return -ENODEV;
+ return 0;
+}
+static inline struct device *dev_pm_domain_attach_by_id(struct device *dev,
+ unsigned int index)
+{
+ return NULL;
+}
+static inline struct device *dev_pm_domain_attach_by_name(struct device *dev,
+ const char *name)
+{
+ return NULL;
}
static inline void dev_pm_domain_detach(struct device *dev, bool power_off) {}
+static inline int dev_pm_domain_start(struct device *dev)
+{
+ return 0;
+}
static inline void dev_pm_domain_set(struct device *dev,
struct dev_pm_domain *pd) {}
#endif
diff --git a/include/linux/pm_opp.h b/include/linux/pm_opp.h
index 51ec727b4824..dc1fb5890792 100644
--- a/include/linux/pm_opp.h
+++ b/include/linux/pm_opp.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Generic OPP Interface
*
@@ -5,15 +6,12 @@
* Nishanth Menon
* Romit Dasgupta
* Kevin Hilman
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef __LINUX_OPP_H__
#define __LINUX_OPP_H__
+#include <linux/energy_model.h>
#include <linux/err.h>
#include <linux/notifier.h>
@@ -25,6 +23,7 @@ struct opp_table;
enum dev_pm_opp_event {
OPP_EVENT_ADD, OPP_EVENT_REMOVE, OPP_EVENT_ENABLE, OPP_EVENT_DISABLE,
+ OPP_EVENT_ADJUST_VOLTAGE,
};
/**
@@ -33,47 +32,64 @@ enum dev_pm_opp_event {
* @u_volt_min: Minimum voltage in microvolts corresponding to this OPP
* @u_volt_max: Maximum voltage in microvolts corresponding to this OPP
* @u_amp: Maximum current drawn by the device in microamperes
+ * @u_watt: Power used by the device in microwatts
*
- * This structure stores the voltage/current values for a single power supply.
+ * This structure stores the voltage/current/power values for a single power
+ * supply.
*/
struct dev_pm_opp_supply {
unsigned long u_volt;
unsigned long u_volt_min;
unsigned long u_volt_max;
unsigned long u_amp;
+ unsigned long u_watt;
};
/**
- * struct dev_pm_opp_info - OPP freq/voltage/current values
- * @rate: Target clk rate in hz
- * @supplies: Array of voltage/current values for all power supplies
+ * struct dev_pm_opp_icc_bw - Interconnect bandwidth values
+ * @avg: Average bandwidth corresponding to this OPP (in icc units)
+ * @peak: Peak bandwidth corresponding to this OPP (in icc units)
*
- * This structure stores the freq/voltage/current values for a single OPP.
+ * This structure stores the bandwidth values for a single interconnect path.
*/
-struct dev_pm_opp_info {
- unsigned long rate;
- struct dev_pm_opp_supply *supplies;
+struct dev_pm_opp_icc_bw {
+ u32 avg;
+ u32 peak;
};
+typedef int (*config_regulators_t)(struct device *dev,
+ struct dev_pm_opp *old_opp, struct dev_pm_opp *new_opp,
+ struct regulator **regulators, unsigned int count);
+
+typedef int (*config_clks_t)(struct device *dev, struct opp_table *opp_table,
+ struct dev_pm_opp *opp, void *data, bool scaling_down);
+
/**
- * struct dev_pm_set_opp_data - Set OPP data
- * @old_opp: Old OPP info
- * @new_opp: New OPP info
- * @regulators: Array of regulator pointers
- * @regulator_count: Number of regulators
- * @clk: Pointer to clk
- * @dev: Pointer to the struct device
+ * struct dev_pm_opp_config - Device OPP configuration values
+ * @clk_names: Clk names, NULL terminated array.
+ * @config_clks: Custom set clk helper.
+ * @prop_name: Name to postfix to properties.
+ * @config_regulators: Custom set regulator helper.
+ * @supported_hw: Array of hierarchy of versions to match.
+ * @supported_hw_count: Number of elements in the array.
+ * @regulator_names: Array of pointers to the names of the regulator, NULL terminated.
+ * @genpd_names: Null terminated array of pointers containing names of genpd to
+ * attach.
+ * @virt_devs: Pointer to return the array of virtual devices.
*
- * This structure contains all information required for setting an OPP.
+ * This structure contains platform specific OPP configurations for the device.
*/
-struct dev_pm_set_opp_data {
- struct dev_pm_opp_info old_opp;
- struct dev_pm_opp_info new_opp;
-
- struct regulator **regulators;
- unsigned int regulator_count;
- struct clk *clk;
- struct device *dev;
+struct dev_pm_opp_config {
+ /* NULL terminated */
+ const char * const *clk_names;
+ config_clks_t config_clks;
+ const char *prop_name;
+ config_regulators_t config_regulators;
+ const unsigned int *supported_hw;
+ unsigned int supported_hw_count;
+ const char * const *regulator_names;
+ const char * const *genpd_names;
+ struct device ***virt_devs;
};
#if defined(CONFIG_PM_OPP)
@@ -83,8 +99,17 @@ void dev_pm_opp_put_opp_table(struct opp_table *opp_table);
unsigned long dev_pm_opp_get_voltage(struct dev_pm_opp *opp);
+int dev_pm_opp_get_supplies(struct dev_pm_opp *opp, struct dev_pm_opp_supply *supplies);
+
+unsigned long dev_pm_opp_get_power(struct dev_pm_opp *opp);
+
unsigned long dev_pm_opp_get_freq(struct dev_pm_opp *opp);
+unsigned int dev_pm_opp_get_level(struct dev_pm_opp *opp);
+
+unsigned int dev_pm_opp_get_required_pstate(struct dev_pm_opp *opp,
+ unsigned int index);
+
bool dev_pm_opp_is_turbo(struct dev_pm_opp *opp);
int dev_pm_opp_get_opp_count(struct device *dev);
@@ -96,17 +121,33 @@ unsigned long dev_pm_opp_get_suspend_opp_freq(struct device *dev);
struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev,
unsigned long freq,
bool available);
-
struct dev_pm_opp *dev_pm_opp_find_freq_floor(struct device *dev,
unsigned long *freq);
+struct dev_pm_opp *dev_pm_opp_find_level_exact(struct device *dev,
+ unsigned int level);
+struct dev_pm_opp *dev_pm_opp_find_level_ceil(struct device *dev,
+ unsigned int *level);
+
struct dev_pm_opp *dev_pm_opp_find_freq_ceil(struct device *dev,
unsigned long *freq);
+
+struct dev_pm_opp *dev_pm_opp_find_bw_ceil(struct device *dev,
+ unsigned int *bw, int index);
+
+struct dev_pm_opp *dev_pm_opp_find_bw_floor(struct device *dev,
+ unsigned int *bw, int index);
+
void dev_pm_opp_put(struct dev_pm_opp *opp);
int dev_pm_opp_add(struct device *dev, unsigned long freq,
unsigned long u_volt);
void dev_pm_opp_remove(struct device *dev, unsigned long freq);
+void dev_pm_opp_remove_all_dynamic(struct device *dev);
+
+int dev_pm_opp_adjust_voltage(struct device *dev, unsigned long freq,
+ unsigned long u_volt, unsigned long u_volt_min,
+ unsigned long u_volt_max);
int dev_pm_opp_enable(struct device *dev, unsigned long freq);
@@ -115,25 +156,31 @@ int dev_pm_opp_disable(struct device *dev, unsigned long freq);
int dev_pm_opp_register_notifier(struct device *dev, struct notifier_block *nb);
int dev_pm_opp_unregister_notifier(struct device *dev, struct notifier_block *nb);
-struct opp_table *dev_pm_opp_set_supported_hw(struct device *dev, const u32 *versions, unsigned int count);
-void dev_pm_opp_put_supported_hw(struct opp_table *opp_table);
-struct opp_table *dev_pm_opp_set_prop_name(struct device *dev, const char *name);
-void dev_pm_opp_put_prop_name(struct opp_table *opp_table);
-struct opp_table *dev_pm_opp_set_regulators(struct device *dev, const char * const names[], unsigned int count);
-void dev_pm_opp_put_regulators(struct opp_table *opp_table);
-struct opp_table *dev_pm_opp_set_clkname(struct device *dev, const char * name);
-void dev_pm_opp_put_clkname(struct opp_table *opp_table);
-struct opp_table *dev_pm_opp_register_set_opp_helper(struct device *dev, int (*set_opp)(struct dev_pm_set_opp_data *data));
-void dev_pm_opp_register_put_opp_helper(struct opp_table *opp_table);
+int dev_pm_opp_set_config(struct device *dev, struct dev_pm_opp_config *config);
+int devm_pm_opp_set_config(struct device *dev, struct dev_pm_opp_config *config);
+void dev_pm_opp_clear_config(int token);
+int dev_pm_opp_config_clks_simple(struct device *dev,
+ struct opp_table *opp_table, struct dev_pm_opp *opp, void *data,
+ bool scaling_down);
+
+struct dev_pm_opp *dev_pm_opp_xlate_required_opp(struct opp_table *src_table, struct opp_table *dst_table, struct dev_pm_opp *src_opp);
+int dev_pm_opp_xlate_performance_state(struct opp_table *src_table, struct opp_table *dst_table, unsigned int pstate);
int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq);
+int dev_pm_opp_set_opp(struct device *dev, struct dev_pm_opp *opp);
int dev_pm_opp_set_sharing_cpus(struct device *cpu_dev, const struct cpumask *cpumask);
int dev_pm_opp_get_sharing_cpus(struct device *cpu_dev, struct cpumask *cpumask);
void dev_pm_opp_remove_table(struct device *dev);
void dev_pm_opp_cpumask_remove_table(const struct cpumask *cpumask);
+int dev_pm_opp_sync_regulators(struct device *dev);
#else
static inline struct opp_table *dev_pm_opp_get_opp_table(struct device *dev)
{
- return ERR_PTR(-ENOTSUPP);
+ return ERR_PTR(-EOPNOTSUPP);
+}
+
+static inline struct opp_table *dev_pm_opp_get_opp_table_indexed(struct device *dev, int index)
+{
+ return ERR_PTR(-EOPNOTSUPP);
}
static inline void dev_pm_opp_put_opp_table(struct opp_table *opp_table) {}
@@ -143,11 +190,33 @@ static inline unsigned long dev_pm_opp_get_voltage(struct dev_pm_opp *opp)
return 0;
}
+static inline int dev_pm_opp_get_supplies(struct dev_pm_opp *opp, struct dev_pm_opp_supply *supplies)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline unsigned long dev_pm_opp_get_power(struct dev_pm_opp *opp)
+{
+ return 0;
+}
+
static inline unsigned long dev_pm_opp_get_freq(struct dev_pm_opp *opp)
{
return 0;
}
+static inline unsigned int dev_pm_opp_get_level(struct dev_pm_opp *opp)
+{
+ return 0;
+}
+
+static inline
+unsigned int dev_pm_opp_get_required_pstate(struct dev_pm_opp *opp,
+ unsigned int index)
+{
+ return 0;
+}
+
static inline bool dev_pm_opp_is_turbo(struct dev_pm_opp *opp)
{
return false;
@@ -178,22 +247,46 @@ static inline unsigned long dev_pm_opp_get_suspend_opp_freq(struct device *dev)
return 0;
}
+static inline struct dev_pm_opp *dev_pm_opp_find_level_exact(struct device *dev,
+ unsigned int level)
+{
+ return ERR_PTR(-EOPNOTSUPP);
+}
+
+static inline struct dev_pm_opp *dev_pm_opp_find_level_ceil(struct device *dev,
+ unsigned int *level)
+{
+ return ERR_PTR(-EOPNOTSUPP);
+}
+
static inline struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev,
unsigned long freq, bool available)
{
- return ERR_PTR(-ENOTSUPP);
+ return ERR_PTR(-EOPNOTSUPP);
}
static inline struct dev_pm_opp *dev_pm_opp_find_freq_floor(struct device *dev,
unsigned long *freq)
{
- return ERR_PTR(-ENOTSUPP);
+ return ERR_PTR(-EOPNOTSUPP);
}
static inline struct dev_pm_opp *dev_pm_opp_find_freq_ceil(struct device *dev,
unsigned long *freq)
{
- return ERR_PTR(-ENOTSUPP);
+ return ERR_PTR(-EOPNOTSUPP);
+}
+
+static inline struct dev_pm_opp *dev_pm_opp_find_bw_ceil(struct device *dev,
+ unsigned int *bw, int index)
+{
+ return ERR_PTR(-EOPNOTSUPP);
+}
+
+static inline struct dev_pm_opp *dev_pm_opp_find_bw_floor(struct device *dev,
+ unsigned int *bw, int index)
+{
+ return ERR_PTR(-EOPNOTSUPP);
}
static inline void dev_pm_opp_put(struct dev_pm_opp *opp) {}
@@ -201,13 +294,25 @@ static inline void dev_pm_opp_put(struct dev_pm_opp *opp) {}
static inline int dev_pm_opp_add(struct device *dev, unsigned long freq,
unsigned long u_volt)
{
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
}
static inline void dev_pm_opp_remove(struct device *dev, unsigned long freq)
{
}
+static inline void dev_pm_opp_remove_all_dynamic(struct device *dev)
+{
+}
+
+static inline int
+dev_pm_opp_adjust_voltage(struct device *dev, unsigned long freq,
+ unsigned long u_volt, unsigned long u_volt_min,
+ unsigned long u_volt_max)
+{
+ return 0;
+}
+
static inline int dev_pm_opp_enable(struct device *dev, unsigned long freq)
{
return 0;
@@ -220,60 +325,57 @@ static inline int dev_pm_opp_disable(struct device *dev, unsigned long freq)
static inline int dev_pm_opp_register_notifier(struct device *dev, struct notifier_block *nb)
{
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
}
static inline int dev_pm_opp_unregister_notifier(struct device *dev, struct notifier_block *nb)
{
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
}
-static inline struct opp_table *dev_pm_opp_set_supported_hw(struct device *dev,
- const u32 *versions,
- unsigned int count)
+static inline int dev_pm_opp_set_config(struct device *dev, struct dev_pm_opp_config *config)
{
- return ERR_PTR(-ENOTSUPP);
+ return -EOPNOTSUPP;
}
-static inline void dev_pm_opp_put_supported_hw(struct opp_table *opp_table) {}
-
-static inline struct opp_table *dev_pm_opp_register_set_opp_helper(struct device *dev,
- int (*set_opp)(struct dev_pm_set_opp_data *data))
+static inline int devm_pm_opp_set_config(struct device *dev, struct dev_pm_opp_config *config)
{
- return ERR_PTR(-ENOTSUPP);
+ return -EOPNOTSUPP;
}
-static inline void dev_pm_opp_register_put_opp_helper(struct opp_table *opp_table) {}
+static inline void dev_pm_opp_clear_config(int token) {}
-static inline struct opp_table *dev_pm_opp_set_prop_name(struct device *dev, const char *name)
+static inline int dev_pm_opp_config_clks_simple(struct device *dev,
+ struct opp_table *opp_table, struct dev_pm_opp *opp, void *data,
+ bool scaling_down)
{
- return ERR_PTR(-ENOTSUPP);
+ return -EOPNOTSUPP;
}
-static inline void dev_pm_opp_put_prop_name(struct opp_table *opp_table) {}
-
-static inline struct opp_table *dev_pm_opp_set_regulators(struct device *dev, const char * const names[], unsigned int count)
+static inline struct dev_pm_opp *dev_pm_opp_xlate_required_opp(struct opp_table *src_table,
+ struct opp_table *dst_table, struct dev_pm_opp *src_opp)
{
- return ERR_PTR(-ENOTSUPP);
+ return ERR_PTR(-EOPNOTSUPP);
}
-static inline void dev_pm_opp_put_regulators(struct opp_table *opp_table) {}
-
-static inline struct opp_table *dev_pm_opp_set_clkname(struct device *dev, const char * name)
+static inline int dev_pm_opp_xlate_performance_state(struct opp_table *src_table, struct opp_table *dst_table, unsigned int pstate)
{
- return ERR_PTR(-ENOTSUPP);
+ return -EOPNOTSUPP;
}
-static inline void dev_pm_opp_put_clkname(struct opp_table *opp_table) {}
-
static inline int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq)
{
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
+}
+
+static inline int dev_pm_opp_set_opp(struct device *dev, struct dev_pm_opp *opp)
+{
+ return -EOPNOTSUPP;
}
static inline int dev_pm_opp_set_sharing_cpus(struct device *cpu_dev, const struct cpumask *cpumask)
{
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
}
static inline int dev_pm_opp_get_sharing_cpus(struct device *cpu_dev, struct cpumask *cpumask)
@@ -289,28 +391,59 @@ static inline void dev_pm_opp_cpumask_remove_table(const struct cpumask *cpumask
{
}
+static inline int dev_pm_opp_sync_regulators(struct device *dev)
+{
+ return -EOPNOTSUPP;
+}
+
#endif /* CONFIG_PM_OPP */
#if defined(CONFIG_PM_OPP) && defined(CONFIG_OF)
int dev_pm_opp_of_add_table(struct device *dev);
+int dev_pm_opp_of_add_table_indexed(struct device *dev, int index);
+int devm_pm_opp_of_add_table_indexed(struct device *dev, int index);
void dev_pm_opp_of_remove_table(struct device *dev);
+int devm_pm_opp_of_add_table(struct device *dev);
int dev_pm_opp_of_cpumask_add_table(const struct cpumask *cpumask);
void dev_pm_opp_of_cpumask_remove_table(const struct cpumask *cpumask);
int dev_pm_opp_of_get_sharing_cpus(struct device *cpu_dev, struct cpumask *cpumask);
struct device_node *dev_pm_opp_of_get_opp_desc_node(struct device *dev);
+struct device_node *dev_pm_opp_get_of_node(struct dev_pm_opp *opp);
+int of_get_required_opp_performance_state(struct device_node *np, int index);
+int dev_pm_opp_of_find_icc_paths(struct device *dev, struct opp_table *opp_table);
+int dev_pm_opp_of_register_em(struct device *dev, struct cpumask *cpus);
+static inline void dev_pm_opp_of_unregister_em(struct device *dev)
+{
+ em_dev_unregister_perf_domain(dev);
+}
#else
static inline int dev_pm_opp_of_add_table(struct device *dev)
{
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
+}
+
+static inline int dev_pm_opp_of_add_table_indexed(struct device *dev, int index)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int devm_pm_opp_of_add_table_indexed(struct device *dev, int index)
+{
+ return -EOPNOTSUPP;
}
static inline void dev_pm_opp_of_remove_table(struct device *dev)
{
}
+static inline int devm_pm_opp_of_add_table(struct device *dev)
+{
+ return -EOPNOTSUPP;
+}
+
static inline int dev_pm_opp_of_cpumask_add_table(const struct cpumask *cpumask)
{
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
}
static inline void dev_pm_opp_of_cpumask_remove_table(const struct cpumask *cpumask)
@@ -319,13 +452,183 @@ static inline void dev_pm_opp_of_cpumask_remove_table(const struct cpumask *cpum
static inline int dev_pm_opp_of_get_sharing_cpus(struct device *cpu_dev, struct cpumask *cpumask)
{
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
}
static inline struct device_node *dev_pm_opp_of_get_opp_desc_node(struct device *dev)
{
return NULL;
}
+
+static inline struct device_node *dev_pm_opp_get_of_node(struct dev_pm_opp *opp)
+{
+ return NULL;
+}
+
+static inline int dev_pm_opp_of_register_em(struct device *dev,
+ struct cpumask *cpus)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline void dev_pm_opp_of_unregister_em(struct device *dev)
+{
+}
+
+static inline int of_get_required_opp_performance_state(struct device_node *np, int index)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int dev_pm_opp_of_find_icc_paths(struct device *dev, struct opp_table *opp_table)
+{
+ return -EOPNOTSUPP;
+}
#endif
+/* OPP Configuration helpers */
+
+/* Regulators helpers */
+static inline int dev_pm_opp_set_regulators(struct device *dev,
+ const char * const names[])
+{
+ struct dev_pm_opp_config config = {
+ .regulator_names = names,
+ };
+
+ return dev_pm_opp_set_config(dev, &config);
+}
+
+static inline void dev_pm_opp_put_regulators(int token)
+{
+ dev_pm_opp_clear_config(token);
+}
+
+static inline int devm_pm_opp_set_regulators(struct device *dev,
+ const char * const names[])
+{
+ struct dev_pm_opp_config config = {
+ .regulator_names = names,
+ };
+
+ return devm_pm_opp_set_config(dev, &config);
+}
+
+/* Supported-hw helpers */
+static inline int dev_pm_opp_set_supported_hw(struct device *dev,
+ const u32 *versions,
+ unsigned int count)
+{
+ struct dev_pm_opp_config config = {
+ .supported_hw = versions,
+ .supported_hw_count = count,
+ };
+
+ return dev_pm_opp_set_config(dev, &config);
+}
+
+static inline void dev_pm_opp_put_supported_hw(int token)
+{
+ dev_pm_opp_clear_config(token);
+}
+
+static inline int devm_pm_opp_set_supported_hw(struct device *dev,
+ const u32 *versions,
+ unsigned int count)
+{
+ struct dev_pm_opp_config config = {
+ .supported_hw = versions,
+ .supported_hw_count = count,
+ };
+
+ return devm_pm_opp_set_config(dev, &config);
+}
+
+/* clkname helpers */
+static inline int dev_pm_opp_set_clkname(struct device *dev, const char *name)
+{
+ const char *names[] = { name, NULL };
+ struct dev_pm_opp_config config = {
+ .clk_names = names,
+ };
+
+ return dev_pm_opp_set_config(dev, &config);
+}
+
+static inline void dev_pm_opp_put_clkname(int token)
+{
+ dev_pm_opp_clear_config(token);
+}
+
+static inline int devm_pm_opp_set_clkname(struct device *dev, const char *name)
+{
+ const char *names[] = { name, NULL };
+ struct dev_pm_opp_config config = {
+ .clk_names = names,
+ };
+
+ return devm_pm_opp_set_config(dev, &config);
+}
+
+/* config-regulators helpers */
+static inline int dev_pm_opp_set_config_regulators(struct device *dev,
+ config_regulators_t helper)
+{
+ struct dev_pm_opp_config config = {
+ .config_regulators = helper,
+ };
+
+ return dev_pm_opp_set_config(dev, &config);
+}
+
+static inline void dev_pm_opp_put_config_regulators(int token)
+{
+ dev_pm_opp_clear_config(token);
+}
+
+/* genpd helpers */
+static inline int dev_pm_opp_attach_genpd(struct device *dev,
+ const char * const *names,
+ struct device ***virt_devs)
+{
+ struct dev_pm_opp_config config = {
+ .genpd_names = names,
+ .virt_devs = virt_devs,
+ };
+
+ return dev_pm_opp_set_config(dev, &config);
+}
+
+static inline void dev_pm_opp_detach_genpd(int token)
+{
+ dev_pm_opp_clear_config(token);
+}
+
+static inline int devm_pm_opp_attach_genpd(struct device *dev,
+ const char * const *names,
+ struct device ***virt_devs)
+{
+ struct dev_pm_opp_config config = {
+ .genpd_names = names,
+ .virt_devs = virt_devs,
+ };
+
+ return devm_pm_opp_set_config(dev, &config);
+}
+
+/* prop-name helpers */
+static inline int dev_pm_opp_set_prop_name(struct device *dev, const char *name)
+{
+ struct dev_pm_opp_config config = {
+ .prop_name = name,
+ };
+
+ return dev_pm_opp_set_config(dev, &config);
+}
+
+static inline void dev_pm_opp_put_prop_name(int token)
+{
+ dev_pm_opp_clear_config(token);
+}
+
#endif /* __LINUX_OPP_H__ */
diff --git a/include/linux/pm_qos.h b/include/linux/pm_qos.h
index 032b55909145..4a69d4af3ff8 100644
--- a/include/linux/pm_qos.h
+++ b/include/linux/pm_qos.h
@@ -1,24 +1,20 @@
-#ifndef _LINUX_PM_QOS_H
-#define _LINUX_PM_QOS_H
-/* interface for the pm_qos_power infrastructure of the linux kernel.
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Definitions related to Power Management Quality of Service (PM QoS).
*
- * Mark Gross <mgross@linux.intel.com>
+ * Copyright (C) 2020 Intel Corporation
+ *
+ * Authors:
+ * Mark Gross <mgross@linux.intel.com>
+ * Rafael J. Wysocki <rafael.j.wysocki@intel.com>
*/
+
+#ifndef _LINUX_PM_QOS_H
+#define _LINUX_PM_QOS_H
+
#include <linux/plist.h>
#include <linux/notifier.h>
#include <linux/device.h>
-#include <linux/workqueue.h>
-
-enum {
- PM_QOS_RESERVED = 0,
- PM_QOS_CPU_DMA_LATENCY,
- PM_QOS_NETWORK_LATENCY,
- PM_QOS_NETWORK_THROUGHPUT,
- PM_QOS_MEMORY_BANDWIDTH,
-
- /* insert new class ID */
- PM_QOS_NUM_CLASSES,
-};
enum pm_qos_flags_status {
PM_QOS_FLAGS_UNDEFINED = -1,
@@ -27,51 +23,25 @@ enum pm_qos_flags_status {
PM_QOS_FLAGS_ALL,
};
-#define PM_QOS_DEFAULT_VALUE -1
+#define PM_QOS_DEFAULT_VALUE (-1)
+#define PM_QOS_LATENCY_ANY S32_MAX
+#define PM_QOS_LATENCY_ANY_NS ((s64)PM_QOS_LATENCY_ANY * NSEC_PER_USEC)
-#define PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE (2000 * USEC_PER_SEC)
-#define PM_QOS_NETWORK_LAT_DEFAULT_VALUE (2000 * USEC_PER_SEC)
-#define PM_QOS_NETWORK_THROUGHPUT_DEFAULT_VALUE 0
-#define PM_QOS_MEMORY_BANDWIDTH_DEFAULT_VALUE 0
-#define PM_QOS_RESUME_LATENCY_DEFAULT_VALUE 0
+#define PM_QOS_CPU_LATENCY_DEFAULT_VALUE (2000 * USEC_PER_SEC)
+#define PM_QOS_RESUME_LATENCY_DEFAULT_VALUE PM_QOS_LATENCY_ANY
+#define PM_QOS_RESUME_LATENCY_NO_CONSTRAINT PM_QOS_LATENCY_ANY
+#define PM_QOS_RESUME_LATENCY_NO_CONSTRAINT_NS PM_QOS_LATENCY_ANY_NS
#define PM_QOS_LATENCY_TOLERANCE_DEFAULT_VALUE 0
+#define PM_QOS_MIN_FREQUENCY_DEFAULT_VALUE 0
+#define PM_QOS_MAX_FREQUENCY_DEFAULT_VALUE FREQ_QOS_MAX_DEFAULT_VALUE
#define PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT (-1)
-#define PM_QOS_LATENCY_ANY ((s32)(~(__u32)0 >> 1))
#define PM_QOS_FLAG_NO_POWER_OFF (1 << 0)
-#define PM_QOS_FLAG_REMOTE_WAKEUP (1 << 1)
-
-struct pm_qos_request {
- struct plist_node node;
- int pm_qos_class;
- struct delayed_work work; /* for pm_qos_update_request_timeout */
-};
-
-struct pm_qos_flags_request {
- struct list_head node;
- s32 flags; /* Do not change to 64 bit */
-};
-
-enum dev_pm_qos_req_type {
- DEV_PM_QOS_RESUME_LATENCY = 1,
- DEV_PM_QOS_LATENCY_TOLERANCE,
- DEV_PM_QOS_FLAGS,
-};
-
-struct dev_pm_qos_request {
- enum dev_pm_qos_req_type type;
- union {
- struct plist_node pnode;
- struct pm_qos_flags_request flr;
- } data;
- struct device *dev;
-};
enum pm_qos_type {
PM_QOS_UNITIALIZED,
PM_QOS_MAX, /* return the largest value */
PM_QOS_MIN, /* return the smallest value */
- PM_QOS_SUM /* return the sum */
};
/*
@@ -88,14 +58,66 @@ struct pm_qos_constraints {
struct blocking_notifier_head *notifiers;
};
+struct pm_qos_request {
+ struct plist_node node;
+ struct pm_qos_constraints *qos;
+};
+
+struct pm_qos_flags_request {
+ struct list_head node;
+ s32 flags; /* Do not change to 64 bit */
+};
+
struct pm_qos_flags {
struct list_head list;
s32 effective_flags; /* Do not change to 64 bit */
};
+
+#define FREQ_QOS_MIN_DEFAULT_VALUE 0
+#define FREQ_QOS_MAX_DEFAULT_VALUE S32_MAX
+
+enum freq_qos_req_type {
+ FREQ_QOS_MIN = 1,
+ FREQ_QOS_MAX,
+};
+
+struct freq_constraints {
+ struct pm_qos_constraints min_freq;
+ struct blocking_notifier_head min_freq_notifiers;
+ struct pm_qos_constraints max_freq;
+ struct blocking_notifier_head max_freq_notifiers;
+};
+
+struct freq_qos_request {
+ enum freq_qos_req_type type;
+ struct plist_node pnode;
+ struct freq_constraints *qos;
+};
+
+
+enum dev_pm_qos_req_type {
+ DEV_PM_QOS_RESUME_LATENCY = 1,
+ DEV_PM_QOS_LATENCY_TOLERANCE,
+ DEV_PM_QOS_MIN_FREQUENCY,
+ DEV_PM_QOS_MAX_FREQUENCY,
+ DEV_PM_QOS_FLAGS,
+};
+
+struct dev_pm_qos_request {
+ enum dev_pm_qos_req_type type;
+ union {
+ struct plist_node pnode;
+ struct pm_qos_flags_request flr;
+ struct freq_qos_request freq;
+ } data;
+ struct device *dev;
+};
+
struct dev_pm_qos {
struct pm_qos_constraints resume_latency;
struct pm_qos_constraints latency_tolerance;
+ struct freq_constraints freq;
struct pm_qos_flags flags;
struct dev_pm_qos_request *resume_latency_req;
struct dev_pm_qos_request *latency_tolerance_req;
@@ -114,38 +136,47 @@ static inline int dev_pm_qos_request_active(struct dev_pm_qos_request *req)
return req->dev != NULL;
}
+s32 pm_qos_read_value(struct pm_qos_constraints *c);
int pm_qos_update_target(struct pm_qos_constraints *c, struct plist_node *node,
enum pm_qos_req_action action, int value);
bool pm_qos_update_flags(struct pm_qos_flags *pqf,
struct pm_qos_flags_request *req,
enum pm_qos_req_action action, s32 val);
-void pm_qos_add_request(struct pm_qos_request *req, int pm_qos_class,
- s32 value);
-void pm_qos_update_request(struct pm_qos_request *req,
- s32 new_value);
-void pm_qos_update_request_timeout(struct pm_qos_request *req,
- s32 new_value, unsigned long timeout_us);
-void pm_qos_remove_request(struct pm_qos_request *req);
-
-int pm_qos_request(int pm_qos_class);
-int pm_qos_add_notifier(int pm_qos_class, struct notifier_block *notifier);
-int pm_qos_remove_notifier(int pm_qos_class, struct notifier_block *notifier);
-int pm_qos_request_active(struct pm_qos_request *req);
-s32 pm_qos_read_value(struct pm_qos_constraints *c);
+
+#ifdef CONFIG_CPU_IDLE
+s32 cpu_latency_qos_limit(void);
+bool cpu_latency_qos_request_active(struct pm_qos_request *req);
+void cpu_latency_qos_add_request(struct pm_qos_request *req, s32 value);
+void cpu_latency_qos_update_request(struct pm_qos_request *req, s32 new_value);
+void cpu_latency_qos_remove_request(struct pm_qos_request *req);
+#else
+static inline s32 cpu_latency_qos_limit(void) { return INT_MAX; }
+static inline bool cpu_latency_qos_request_active(struct pm_qos_request *req)
+{
+ return false;
+}
+static inline void cpu_latency_qos_add_request(struct pm_qos_request *req,
+ s32 value) {}
+static inline void cpu_latency_qos_update_request(struct pm_qos_request *req,
+ s32 new_value) {}
+static inline void cpu_latency_qos_remove_request(struct pm_qos_request *req) {}
+#endif
#ifdef CONFIG_PM
enum pm_qos_flags_status __dev_pm_qos_flags(struct device *dev, s32 mask);
enum pm_qos_flags_status dev_pm_qos_flags(struct device *dev, s32 mask);
-s32 __dev_pm_qos_read_value(struct device *dev);
-s32 dev_pm_qos_read_value(struct device *dev);
+s32 __dev_pm_qos_resume_latency(struct device *dev);
+s32 dev_pm_qos_read_value(struct device *dev, enum dev_pm_qos_req_type type);
int dev_pm_qos_add_request(struct device *dev, struct dev_pm_qos_request *req,
enum dev_pm_qos_req_type type, s32 value);
int dev_pm_qos_update_request(struct dev_pm_qos_request *req, s32 new_value);
int dev_pm_qos_remove_request(struct dev_pm_qos_request *req);
int dev_pm_qos_add_notifier(struct device *dev,
- struct notifier_block *notifier);
+ struct notifier_block *notifier,
+ enum dev_pm_qos_req_type type);
int dev_pm_qos_remove_notifier(struct device *dev,
- struct notifier_block *notifier);
+ struct notifier_block *notifier,
+ enum dev_pm_qos_req_type type);
void dev_pm_qos_constraints_init(struct device *dev);
void dev_pm_qos_constraints_destroy(struct device *dev);
int dev_pm_qos_add_ancestor_request(struct device *dev,
@@ -171,10 +202,11 @@ static inline s32 dev_pm_qos_requested_flags(struct device *dev)
return dev->power.qos->flags_req->data.flr.flags;
}
-static inline s32 dev_pm_qos_raw_read_value(struct device *dev)
+static inline s32 dev_pm_qos_raw_resume_latency(struct device *dev)
{
return IS_ERR_OR_NULL(dev->power.qos) ?
- 0 : pm_qos_read_value(&dev->power.qos->resume_latency);
+ PM_QOS_RESUME_LATENCY_NO_CONSTRAINT :
+ pm_qos_read_value(&dev->power.qos->resume_latency);
}
#else
static inline enum pm_qos_flags_status __dev_pm_qos_flags(struct device *dev,
@@ -183,10 +215,24 @@ static inline enum pm_qos_flags_status __dev_pm_qos_flags(struct device *dev,
static inline enum pm_qos_flags_status dev_pm_qos_flags(struct device *dev,
s32 mask)
{ return PM_QOS_FLAGS_UNDEFINED; }
-static inline s32 __dev_pm_qos_read_value(struct device *dev)
- { return 0; }
-static inline s32 dev_pm_qos_read_value(struct device *dev)
- { return 0; }
+static inline s32 __dev_pm_qos_resume_latency(struct device *dev)
+ { return PM_QOS_RESUME_LATENCY_NO_CONSTRAINT; }
+static inline s32 dev_pm_qos_read_value(struct device *dev,
+ enum dev_pm_qos_req_type type)
+{
+ switch (type) {
+ case DEV_PM_QOS_RESUME_LATENCY:
+ return PM_QOS_RESUME_LATENCY_NO_CONSTRAINT;
+ case DEV_PM_QOS_MIN_FREQUENCY:
+ return PM_QOS_MIN_FREQUENCY_DEFAULT_VALUE;
+ case DEV_PM_QOS_MAX_FREQUENCY:
+ return PM_QOS_MAX_FREQUENCY_DEFAULT_VALUE;
+ default:
+ WARN_ON(1);
+ return 0;
+ }
+}
+
static inline int dev_pm_qos_add_request(struct device *dev,
struct dev_pm_qos_request *req,
enum dev_pm_qos_req_type type,
@@ -198,10 +244,12 @@ static inline int dev_pm_qos_update_request(struct dev_pm_qos_request *req,
static inline int dev_pm_qos_remove_request(struct dev_pm_qos_request *req)
{ return 0; }
static inline int dev_pm_qos_add_notifier(struct device *dev,
- struct notifier_block *notifier)
+ struct notifier_block *notifier,
+ enum dev_pm_qos_req_type type)
{ return 0; }
static inline int dev_pm_qos_remove_notifier(struct device *dev,
- struct notifier_block *notifier)
+ struct notifier_block *notifier,
+ enum dev_pm_qos_req_type type)
{ return 0; }
static inline void dev_pm_qos_constraints_init(struct device *dev)
{
@@ -232,9 +280,40 @@ static inline int dev_pm_qos_expose_latency_tolerance(struct device *dev)
{ return 0; }
static inline void dev_pm_qos_hide_latency_tolerance(struct device *dev) {}
-static inline s32 dev_pm_qos_requested_resume_latency(struct device *dev) { return 0; }
+static inline s32 dev_pm_qos_requested_resume_latency(struct device *dev)
+{
+ return PM_QOS_RESUME_LATENCY_NO_CONSTRAINT;
+}
static inline s32 dev_pm_qos_requested_flags(struct device *dev) { return 0; }
-static inline s32 dev_pm_qos_raw_read_value(struct device *dev) { return 0; }
+static inline s32 dev_pm_qos_raw_resume_latency(struct device *dev)
+{
+ return PM_QOS_RESUME_LATENCY_NO_CONSTRAINT;
+}
#endif
+static inline int freq_qos_request_active(struct freq_qos_request *req)
+{
+ return !IS_ERR_OR_NULL(req->qos);
+}
+
+void freq_constraints_init(struct freq_constraints *qos);
+
+s32 freq_qos_read_value(struct freq_constraints *qos,
+ enum freq_qos_req_type type);
+
+int freq_qos_add_request(struct freq_constraints *qos,
+ struct freq_qos_request *req,
+ enum freq_qos_req_type type, s32 value);
+int freq_qos_update_request(struct freq_qos_request *req, s32 new_value);
+int freq_qos_remove_request(struct freq_qos_request *req);
+int freq_qos_apply(struct freq_qos_request *req,
+ enum pm_qos_req_action action, s32 value);
+
+int freq_qos_add_notifier(struct freq_constraints *qos,
+ enum freq_qos_req_type type,
+ struct notifier_block *notifier);
+int freq_qos_remove_notifier(struct freq_constraints *qos,
+ enum freq_qos_req_type type,
+ struct notifier_block *notifier);
+
#endif
diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h
index 2efb08a60e63..9a8151a2bdea 100644
--- a/include/linux/pm_runtime.h
+++ b/include/linux/pm_runtime.h
@@ -1,9 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* pm_runtime.h - Device run-time power management helper functions.
*
* Copyright (C) 2009 Rafael J. Wysocki <rjw@sisk.pl>
- *
- * This file is released under the GPLv2.
*/
#ifndef _LINUX_PM_RUNTIME_H
@@ -23,6 +22,40 @@
usage_count */
#define RPM_AUTO 0x08 /* Use autosuspend_delay */
+/*
+ * Use this for defining a set of PM operations to be used in all situations
+ * (system suspend, hibernation or runtime PM).
+ *
+ * Note that the behaviour differs from the deprecated UNIVERSAL_DEV_PM_OPS()
+ * macro, which uses the provided callbacks for both runtime PM and system
+ * sleep, while DEFINE_RUNTIME_DEV_PM_OPS() uses pm_runtime_force_suspend()
+ * and pm_runtime_force_resume() for its system sleep callbacks.
+ *
+ * If the underlying dev_pm_ops struct symbol has to be exported, use
+ * EXPORT_RUNTIME_DEV_PM_OPS() or EXPORT_GPL_RUNTIME_DEV_PM_OPS() instead.
+ */
+#define DEFINE_RUNTIME_DEV_PM_OPS(name, suspend_fn, resume_fn, idle_fn) \
+ _DEFINE_DEV_PM_OPS(name, pm_runtime_force_suspend, \
+ pm_runtime_force_resume, suspend_fn, \
+ resume_fn, idle_fn)
+
+#define EXPORT_RUNTIME_DEV_PM_OPS(name, suspend_fn, resume_fn, idle_fn) \
+ EXPORT_DEV_PM_OPS(name) = { \
+ RUNTIME_PM_OPS(suspend_fn, resume_fn, idle_fn) \
+ }
+#define EXPORT_GPL_RUNTIME_DEV_PM_OPS(name, suspend_fn, resume_fn, idle_fn) \
+ EXPORT_GPL_DEV_PM_OPS(name) = { \
+ RUNTIME_PM_OPS(suspend_fn, resume_fn, idle_fn) \
+ }
+#define EXPORT_NS_RUNTIME_DEV_PM_OPS(name, suspend_fn, resume_fn, idle_fn, ns) \
+ EXPORT_NS_DEV_PM_OPS(name, ns) = { \
+ RUNTIME_PM_OPS(suspend_fn, resume_fn, idle_fn) \
+ }
+#define EXPORT_NS_GPL_RUNTIME_DEV_PM_OPS(name, suspend_fn, resume_fn, idle_fn, ns) \
+ EXPORT_NS_GPL_DEV_PM_OPS(name, ns) = { \
+ RUNTIME_PM_OPS(suspend_fn, resume_fn, idle_fn) \
+ }
+
#ifdef CONFIG_PM
extern struct workqueue_struct *pm_wq;
@@ -39,7 +72,7 @@ extern int pm_runtime_force_resume(struct device *dev);
extern int __pm_runtime_idle(struct device *dev, int rpmflags);
extern int __pm_runtime_suspend(struct device *dev, int rpmflags);
extern int __pm_runtime_resume(struct device *dev, int rpmflags);
-extern int pm_runtime_get_if_in_use(struct device *dev);
+extern int pm_runtime_get_if_active(struct device *dev, bool ign_usage_count);
extern int pm_schedule_suspend(struct device *dev, unsigned int delay);
extern int __pm_runtime_set_status(struct device *dev, unsigned int status);
extern int pm_runtime_barrier(struct device *dev);
@@ -51,68 +84,170 @@ extern void pm_runtime_no_callbacks(struct device *dev);
extern void pm_runtime_irq_safe(struct device *dev);
extern void __pm_runtime_use_autosuspend(struct device *dev, bool use);
extern void pm_runtime_set_autosuspend_delay(struct device *dev, int delay);
-extern unsigned long pm_runtime_autosuspend_expiration(struct device *dev);
+extern u64 pm_runtime_autosuspend_expiration(struct device *dev);
extern void pm_runtime_update_max_time_suspended(struct device *dev,
s64 delta_ns);
extern void pm_runtime_set_memalloc_noio(struct device *dev, bool enable);
-extern void pm_runtime_clean_up_links(struct device *dev);
extern void pm_runtime_get_suppliers(struct device *dev);
extern void pm_runtime_put_suppliers(struct device *dev);
extern void pm_runtime_new_link(struct device *dev);
-extern void pm_runtime_drop_link(struct device *dev);
+extern void pm_runtime_drop_link(struct device_link *link);
+extern void pm_runtime_release_supplier(struct device_link *link);
+extern int devm_pm_runtime_enable(struct device *dev);
+
+/**
+ * pm_runtime_get_if_in_use - Conditionally bump up runtime PM usage counter.
+ * @dev: Target device.
+ *
+ * Increment the runtime PM usage counter of @dev if its runtime PM status is
+ * %RPM_ACTIVE and its runtime PM usage counter is greater than 0.
+ */
+static inline int pm_runtime_get_if_in_use(struct device *dev)
+{
+ return pm_runtime_get_if_active(dev, false);
+}
+
+/**
+ * pm_suspend_ignore_children - Set runtime PM behavior regarding children.
+ * @dev: Target device.
+ * @enable: Whether or not to ignore possible dependencies on children.
+ *
+ * The dependencies of @dev on its children will not be taken into account by
+ * the runtime PM framework going forward if @enable is %true, or they will
+ * be taken into account otherwise.
+ */
static inline void pm_suspend_ignore_children(struct device *dev, bool enable)
{
dev->power.ignore_children = enable;
}
+/**
+ * pm_runtime_get_noresume - Bump up runtime PM usage counter of a device.
+ * @dev: Target device.
+ */
static inline void pm_runtime_get_noresume(struct device *dev)
{
atomic_inc(&dev->power.usage_count);
}
+/**
+ * pm_runtime_put_noidle - Drop runtime PM usage counter of a device.
+ * @dev: Target device.
+ *
+ * Decrement the runtime PM usage counter of @dev unless it is 0 already.
+ */
static inline void pm_runtime_put_noidle(struct device *dev)
{
atomic_add_unless(&dev->power.usage_count, -1, 0);
}
+/**
+ * pm_runtime_suspended - Check whether or not a device is runtime-suspended.
+ * @dev: Target device.
+ *
+ * Return %true if runtime PM is enabled for @dev and its runtime PM status is
+ * %RPM_SUSPENDED, or %false otherwise.
+ *
+ * Note that the return value of this function can only be trusted if it is
+ * called under the runtime PM lock of @dev or under conditions in which
+ * runtime PM cannot be either disabled or enabled for @dev and its runtime PM
+ * status cannot change.
+ */
static inline bool pm_runtime_suspended(struct device *dev)
{
return dev->power.runtime_status == RPM_SUSPENDED
&& !dev->power.disable_depth;
}
+/**
+ * pm_runtime_active - Check whether or not a device is runtime-active.
+ * @dev: Target device.
+ *
+ * Return %true if runtime PM is disabled for @dev or its runtime PM status is
+ * %RPM_ACTIVE, or %false otherwise.
+ *
+ * Note that the return value of this function can only be trusted if it is
+ * called under the runtime PM lock of @dev or under conditions in which
+ * runtime PM cannot be either disabled or enabled for @dev and its runtime PM
+ * status cannot change.
+ */
static inline bool pm_runtime_active(struct device *dev)
{
return dev->power.runtime_status == RPM_ACTIVE
|| dev->power.disable_depth;
}
+/**
+ * pm_runtime_status_suspended - Check if runtime PM status is "suspended".
+ * @dev: Target device.
+ *
+ * Return %true if the runtime PM status of @dev is %RPM_SUSPENDED, or %false
+ * otherwise, regardless of whether or not runtime PM has been enabled for @dev.
+ *
+ * Note that the return value of this function can only be trusted if it is
+ * called under the runtime PM lock of @dev or under conditions in which the
+ * runtime PM status of @dev cannot change.
+ */
static inline bool pm_runtime_status_suspended(struct device *dev)
{
return dev->power.runtime_status == RPM_SUSPENDED;
}
+/**
+ * pm_runtime_enabled - Check if runtime PM is enabled.
+ * @dev: Target device.
+ *
+ * Return %true if runtime PM is enabled for @dev or %false otherwise.
+ *
+ * Note that the return value of this function can only be trusted if it is
+ * called under the runtime PM lock of @dev or under conditions in which
+ * runtime PM cannot be either disabled or enabled for @dev.
+ */
static inline bool pm_runtime_enabled(struct device *dev)
{
return !dev->power.disable_depth;
}
-static inline bool pm_runtime_callbacks_present(struct device *dev)
+/**
+ * pm_runtime_has_no_callbacks - Check if runtime PM callbacks may be present.
+ * @dev: Target device.
+ *
+ * Return %true if @dev is a special device without runtime PM callbacks or
+ * %false otherwise.
+ */
+static inline bool pm_runtime_has_no_callbacks(struct device *dev)
{
- return !dev->power.no_callbacks;
+ return dev->power.no_callbacks;
}
+/**
+ * pm_runtime_mark_last_busy - Update the last access time of a device.
+ * @dev: Target device.
+ *
+ * Update the last access time of @dev used by the runtime PM autosuspend
+ * mechanism to the current time as returned by ktime_get_mono_fast_ns().
+ */
static inline void pm_runtime_mark_last_busy(struct device *dev)
{
- ACCESS_ONCE(dev->power.last_busy) = jiffies;
+ WRITE_ONCE(dev->power.last_busy, ktime_get_mono_fast_ns());
}
+/**
+ * pm_runtime_is_irq_safe - Check if runtime PM can work in interrupt context.
+ * @dev: Target device.
+ *
+ * Return %true if @dev has been marked as an "IRQ-safe" device (with respect
+ * to runtime PM), in which case its runtime PM callabcks can be expected to
+ * work correctly when invoked from interrupt handlers.
+ */
static inline bool pm_runtime_is_irq_safe(struct device *dev)
{
return dev->power.irq_safe;
}
+extern u64 pm_runtime_suspended_time(struct device *dev);
+
#else /* !CONFIG_PM */
static inline bool queue_pm_work(struct work_struct *work) { return false; }
@@ -142,6 +277,11 @@ static inline int pm_runtime_get_if_in_use(struct device *dev)
{
return -EINVAL;
}
+static inline int pm_runtime_get_if_active(struct device *dev,
+ bool ign_usage_count)
+{
+ return -EINVAL;
+}
static inline int __pm_runtime_set_status(struct device *dev,
unsigned int status) { return 0; }
static inline int pm_runtime_barrier(struct device *dev) { return 0; }
@@ -150,6 +290,8 @@ static inline void __pm_runtime_disable(struct device *dev, bool c) {}
static inline void pm_runtime_allow(struct device *dev) {}
static inline void pm_runtime_forbid(struct device *dev) {}
+static inline int devm_pm_runtime_enable(struct device *dev) { return 0; }
+
static inline void pm_suspend_ignore_children(struct device *dev, bool enable) {}
static inline void pm_runtime_get_noresume(struct device *dev) {}
static inline void pm_runtime_put_noidle(struct device *dev) {}
@@ -162,115 +304,296 @@ static inline void pm_runtime_no_callbacks(struct device *dev) {}
static inline void pm_runtime_irq_safe(struct device *dev) {}
static inline bool pm_runtime_is_irq_safe(struct device *dev) { return false; }
-static inline bool pm_runtime_callbacks_present(struct device *dev) { return false; }
+static inline bool pm_runtime_has_no_callbacks(struct device *dev) { return false; }
static inline void pm_runtime_mark_last_busy(struct device *dev) {}
static inline void __pm_runtime_use_autosuspend(struct device *dev,
bool use) {}
static inline void pm_runtime_set_autosuspend_delay(struct device *dev,
int delay) {}
-static inline unsigned long pm_runtime_autosuspend_expiration(
+static inline u64 pm_runtime_autosuspend_expiration(
struct device *dev) { return 0; }
static inline void pm_runtime_set_memalloc_noio(struct device *dev,
bool enable){}
-static inline void pm_runtime_clean_up_links(struct device *dev) {}
static inline void pm_runtime_get_suppliers(struct device *dev) {}
static inline void pm_runtime_put_suppliers(struct device *dev) {}
static inline void pm_runtime_new_link(struct device *dev) {}
-static inline void pm_runtime_drop_link(struct device *dev) {}
+static inline void pm_runtime_drop_link(struct device_link *link) {}
+static inline void pm_runtime_release_supplier(struct device_link *link) {}
#endif /* !CONFIG_PM */
+/**
+ * pm_runtime_idle - Conditionally set up autosuspend of a device or suspend it.
+ * @dev: Target device.
+ *
+ * Invoke the "idle check" callback of @dev and, depending on its return value,
+ * set up autosuspend of @dev or suspend it (depending on whether or not
+ * autosuspend has been enabled for it).
+ */
static inline int pm_runtime_idle(struct device *dev)
{
return __pm_runtime_idle(dev, 0);
}
+/**
+ * pm_runtime_suspend - Suspend a device synchronously.
+ * @dev: Target device.
+ */
static inline int pm_runtime_suspend(struct device *dev)
{
return __pm_runtime_suspend(dev, 0);
}
+/**
+ * pm_runtime_autosuspend - Set up autosuspend of a device or suspend it.
+ * @dev: Target device.
+ *
+ * Set up autosuspend of @dev or suspend it (depending on whether or not
+ * autosuspend is enabled for it) without engaging its "idle check" callback.
+ */
static inline int pm_runtime_autosuspend(struct device *dev)
{
return __pm_runtime_suspend(dev, RPM_AUTO);
}
+/**
+ * pm_runtime_resume - Resume a device synchronously.
+ * @dev: Target device.
+ */
static inline int pm_runtime_resume(struct device *dev)
{
return __pm_runtime_resume(dev, 0);
}
+/**
+ * pm_request_idle - Queue up "idle check" execution for a device.
+ * @dev: Target device.
+ *
+ * Queue up a work item to run an equivalent of pm_runtime_idle() for @dev
+ * asynchronously.
+ */
static inline int pm_request_idle(struct device *dev)
{
return __pm_runtime_idle(dev, RPM_ASYNC);
}
+/**
+ * pm_request_resume - Queue up runtime-resume of a device.
+ * @dev: Target device.
+ */
static inline int pm_request_resume(struct device *dev)
{
return __pm_runtime_resume(dev, RPM_ASYNC);
}
+/**
+ * pm_request_autosuspend - Queue up autosuspend of a device.
+ * @dev: Target device.
+ *
+ * Queue up a work item to run an equivalent pm_runtime_autosuspend() for @dev
+ * asynchronously.
+ */
static inline int pm_request_autosuspend(struct device *dev)
{
return __pm_runtime_suspend(dev, RPM_ASYNC | RPM_AUTO);
}
+/**
+ * pm_runtime_get - Bump up usage counter and queue up resume of a device.
+ * @dev: Target device.
+ *
+ * Bump up the runtime PM usage counter of @dev and queue up a work item to
+ * carry out runtime-resume of it.
+ */
static inline int pm_runtime_get(struct device *dev)
{
return __pm_runtime_resume(dev, RPM_GET_PUT | RPM_ASYNC);
}
+/**
+ * pm_runtime_get_sync - Bump up usage counter of a device and resume it.
+ * @dev: Target device.
+ *
+ * Bump up the runtime PM usage counter of @dev and carry out runtime-resume of
+ * it synchronously.
+ *
+ * The possible return values of this function are the same as for
+ * pm_runtime_resume() and the runtime PM usage counter of @dev remains
+ * incremented in all cases, even if it returns an error code.
+ * Consider using pm_runtime_resume_and_get() instead of it, especially
+ * if its return value is checked by the caller, as this is likely to result
+ * in cleaner code.
+ */
static inline int pm_runtime_get_sync(struct device *dev)
{
return __pm_runtime_resume(dev, RPM_GET_PUT);
}
+/**
+ * pm_runtime_resume_and_get - Bump up usage counter of a device and resume it.
+ * @dev: Target device.
+ *
+ * Resume @dev synchronously and if that is successful, increment its runtime
+ * PM usage counter. Return 0 if the runtime PM usage counter of @dev has been
+ * incremented or a negative error code otherwise.
+ */
+static inline int pm_runtime_resume_and_get(struct device *dev)
+{
+ int ret;
+
+ ret = __pm_runtime_resume(dev, RPM_GET_PUT);
+ if (ret < 0) {
+ pm_runtime_put_noidle(dev);
+ return ret;
+ }
+
+ return 0;
+}
+
+/**
+ * pm_runtime_put - Drop device usage counter and queue up "idle check" if 0.
+ * @dev: Target device.
+ *
+ * Decrement the runtime PM usage counter of @dev and if it turns out to be
+ * equal to 0, queue up a work item for @dev like in pm_request_idle().
+ */
static inline int pm_runtime_put(struct device *dev)
{
return __pm_runtime_idle(dev, RPM_GET_PUT | RPM_ASYNC);
}
+/**
+ * pm_runtime_put_autosuspend - Drop device usage counter and queue autosuspend if 0.
+ * @dev: Target device.
+ *
+ * Decrement the runtime PM usage counter of @dev and if it turns out to be
+ * equal to 0, queue up a work item for @dev like in pm_request_autosuspend().
+ */
static inline int pm_runtime_put_autosuspend(struct device *dev)
{
return __pm_runtime_suspend(dev,
RPM_GET_PUT | RPM_ASYNC | RPM_AUTO);
}
+/**
+ * pm_runtime_put_sync - Drop device usage counter and run "idle check" if 0.
+ * @dev: Target device.
+ *
+ * Decrement the runtime PM usage counter of @dev and if it turns out to be
+ * equal to 0, invoke the "idle check" callback of @dev and, depending on its
+ * return value, set up autosuspend of @dev or suspend it (depending on whether
+ * or not autosuspend has been enabled for it).
+ *
+ * The possible return values of this function are the same as for
+ * pm_runtime_idle() and the runtime PM usage counter of @dev remains
+ * decremented in all cases, even if it returns an error code.
+ */
static inline int pm_runtime_put_sync(struct device *dev)
{
return __pm_runtime_idle(dev, RPM_GET_PUT);
}
+/**
+ * pm_runtime_put_sync_suspend - Drop device usage counter and suspend if 0.
+ * @dev: Target device.
+ *
+ * Decrement the runtime PM usage counter of @dev and if it turns out to be
+ * equal to 0, carry out runtime-suspend of @dev synchronously.
+ *
+ * The possible return values of this function are the same as for
+ * pm_runtime_suspend() and the runtime PM usage counter of @dev remains
+ * decremented in all cases, even if it returns an error code.
+ */
static inline int pm_runtime_put_sync_suspend(struct device *dev)
{
return __pm_runtime_suspend(dev, RPM_GET_PUT);
}
+/**
+ * pm_runtime_put_sync_autosuspend - Drop device usage counter and autosuspend if 0.
+ * @dev: Target device.
+ *
+ * Decrement the runtime PM usage counter of @dev and if it turns out to be
+ * equal to 0, set up autosuspend of @dev or suspend it synchronously (depending
+ * on whether or not autosuspend has been enabled for it).
+ *
+ * The possible return values of this function are the same as for
+ * pm_runtime_autosuspend() and the runtime PM usage counter of @dev remains
+ * decremented in all cases, even if it returns an error code.
+ */
static inline int pm_runtime_put_sync_autosuspend(struct device *dev)
{
return __pm_runtime_suspend(dev, RPM_GET_PUT | RPM_AUTO);
}
+/**
+ * pm_runtime_set_active - Set runtime PM status to "active".
+ * @dev: Target device.
+ *
+ * Set the runtime PM status of @dev to %RPM_ACTIVE and ensure that dependencies
+ * of it will be taken into account.
+ *
+ * It is not valid to call this function for devices with runtime PM enabled.
+ */
static inline int pm_runtime_set_active(struct device *dev)
{
return __pm_runtime_set_status(dev, RPM_ACTIVE);
}
+/**
+ * pm_runtime_set_suspended - Set runtime PM status to "suspended".
+ * @dev: Target device.
+ *
+ * Set the runtime PM status of @dev to %RPM_SUSPENDED and ensure that
+ * dependencies of it will be taken into account.
+ *
+ * It is not valid to call this function for devices with runtime PM enabled.
+ */
static inline int pm_runtime_set_suspended(struct device *dev)
{
return __pm_runtime_set_status(dev, RPM_SUSPENDED);
}
+/**
+ * pm_runtime_disable - Disable runtime PM for a device.
+ * @dev: Target device.
+ *
+ * Prevent the runtime PM framework from working with @dev (by incrementing its
+ * "blocking" counter).
+ *
+ * For each invocation of this function for @dev there must be a matching
+ * pm_runtime_enable() call in order for runtime PM to be enabled for it.
+ */
static inline void pm_runtime_disable(struct device *dev)
{
__pm_runtime_disable(dev, true);
}
+/**
+ * pm_runtime_use_autosuspend - Allow autosuspend to be used for a device.
+ * @dev: Target device.
+ *
+ * Allow the runtime PM autosuspend mechanism to be used for @dev whenever
+ * requested (or "autosuspend" will be handled as direct runtime-suspend for
+ * it).
+ *
+ * NOTE: It's important to undo this with pm_runtime_dont_use_autosuspend()
+ * at driver exit time unless your driver initially enabled pm_runtime
+ * with devm_pm_runtime_enable() (which handles it for you).
+ */
static inline void pm_runtime_use_autosuspend(struct device *dev)
{
__pm_runtime_use_autosuspend(dev, true);
}
+/**
+ * pm_runtime_dont_use_autosuspend - Prevent autosuspend from being used.
+ * @dev: Target device.
+ *
+ * Prevent the runtime PM autosuspend mechanism from being used for @dev which
+ * means that "autosuspend" will be handled as direct runtime-suspend for it
+ * going forward.
+ */
static inline void pm_runtime_dont_use_autosuspend(struct device *dev)
{
__pm_runtime_use_autosuspend(dev, false);
diff --git a/include/linux/pm_wakeirq.h b/include/linux/pm_wakeirq.h
index cd5b62db9084..dd42d16945d0 100644
--- a/include/linux/pm_wakeirq.h
+++ b/include/linux/pm_wakeirq.h
@@ -1,15 +1,5 @@
-/*
- * pm_wakeirq.h - Device wakeirq helper functions
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* pm_wakeirq.h - Device wakeirq helper functions */
#ifndef _LINUX_PM_WAKEIRQ_H
#define _LINUX_PM_WAKEIRQ_H
@@ -17,8 +7,8 @@
#ifdef CONFIG_PM
extern int dev_pm_set_wake_irq(struct device *dev, int irq);
-extern int dev_pm_set_dedicated_wake_irq(struct device *dev,
- int irq);
+extern int dev_pm_set_dedicated_wake_irq(struct device *dev, int irq);
+extern int dev_pm_set_dedicated_wake_irq_reverse(struct device *dev, int irq);
extern void dev_pm_clear_wake_irq(struct device *dev);
extern void dev_pm_enable_wake_irq(struct device *dev);
extern void dev_pm_disable_wake_irq(struct device *dev);
@@ -35,6 +25,11 @@ static inline int dev_pm_set_dedicated_wake_irq(struct device *dev, int irq)
return 0;
}
+static inline int dev_pm_set_dedicated_wake_irq_reverse(struct device *dev, int irq)
+{
+ return 0;
+}
+
static inline void dev_pm_clear_wake_irq(struct device *dev)
{
}
diff --git a/include/linux/pm_wakeup.h b/include/linux/pm_wakeup.h
index 4c2cba7ec1d4..77f4849e3418 100644
--- a/include/linux/pm_wakeup.h
+++ b/include/linux/pm_wakeup.h
@@ -1,22 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* pm_wakeup.h - Power management wakeup interface
*
* Copyright (C) 2008 Alan Stern
* Copyright (C) 2010 Rafael J. Wysocki, Novell Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#ifndef _LINUX_PM_WAKEUP_H
@@ -34,6 +21,7 @@ struct wake_irq;
* struct wakeup_source - Representation of wakeup sources
*
* @name: Name of the wakeup source
+ * @id: Wakeup source id
* @entry: Wakeup source list entry
* @lock: Wakeup source lock
* @wakeirq: Optional device specific wakeirq
@@ -48,11 +36,13 @@ struct wake_irq;
* @relax_count: Number of times the wakeup source was deactivated.
* @expire_count: Number of times the wakeup source's timeout has expired.
* @wakeup_count: Number of times the wakeup source might abort suspend.
+ * @dev: Struct device for sysfs statistics about the wakeup source.
* @active: Status of the wakeup source.
- * @has_timeout: The wakeup source has been activated with a timeout.
+ * @autosleep_enabled: Autosleep is active, so update @prevent_sleep_time.
*/
struct wakeup_source {
const char *name;
+ int id;
struct list_head entry;
spinlock_t lock;
struct wake_irq *wakeirq;
@@ -68,10 +58,16 @@ struct wakeup_source {
unsigned long relax_count;
unsigned long expire_count;
unsigned long wakeup_count;
+ struct device *dev;
bool active:1;
bool autosleep_enabled:1;
};
+#define for_each_wakeup_source(ws) \
+ for ((ws) = wakeup_sources_walk_start(); \
+ (ws); \
+ (ws) = wakeup_sources_walk_next((ws)))
+
#ifdef CONFIG_PM_SLEEP
/*
@@ -88,19 +84,31 @@ static inline bool device_may_wakeup(struct device *dev)
return dev->power.can_wakeup && !!dev->power.wakeup;
}
+static inline bool device_wakeup_path(struct device *dev)
+{
+ return dev->power.wakeup_path;
+}
+
+static inline void device_set_wakeup_path(struct device *dev)
+{
+ dev->power.wakeup_path = true;
+}
+
/* drivers/base/power/wakeup.c */
-extern void wakeup_source_prepare(struct wakeup_source *ws, const char *name);
extern struct wakeup_source *wakeup_source_create(const char *name);
-extern void wakeup_source_drop(struct wakeup_source *ws);
extern void wakeup_source_destroy(struct wakeup_source *ws);
extern void wakeup_source_add(struct wakeup_source *ws);
extern void wakeup_source_remove(struct wakeup_source *ws);
-extern struct wakeup_source *wakeup_source_register(const char *name);
+extern struct wakeup_source *wakeup_source_register(struct device *dev,
+ const char *name);
extern void wakeup_source_unregister(struct wakeup_source *ws);
+extern int wakeup_sources_read_lock(void);
+extern void wakeup_sources_read_unlock(int idx);
+extern struct wakeup_source *wakeup_sources_walk_start(void);
+extern struct wakeup_source *wakeup_sources_walk_next(struct wakeup_source *ws);
extern int device_wakeup_enable(struct device *dev);
extern int device_wakeup_disable(struct device *dev);
extern void device_set_wakeup_capable(struct device *dev, bool capable);
-extern int device_init_wakeup(struct device *dev, bool val);
extern int device_set_wakeup_enable(struct device *dev, bool enable);
extern void __pm_stay_awake(struct wakeup_source *ws);
extern void pm_stay_awake(struct device *dev);
@@ -121,23 +129,19 @@ static inline bool device_can_wakeup(struct device *dev)
return dev->power.can_wakeup;
}
-static inline void wakeup_source_prepare(struct wakeup_source *ws,
- const char *name) {}
-
static inline struct wakeup_source *wakeup_source_create(const char *name)
{
return NULL;
}
-static inline void wakeup_source_drop(struct wakeup_source *ws) {}
-
static inline void wakeup_source_destroy(struct wakeup_source *ws) {}
static inline void wakeup_source_add(struct wakeup_source *ws) {}
static inline void wakeup_source_remove(struct wakeup_source *ws) {}
-static inline struct wakeup_source *wakeup_source_register(const char *name)
+static inline struct wakeup_source *wakeup_source_register(struct device *dev,
+ const char *name)
{
return NULL;
}
@@ -162,18 +166,18 @@ static inline int device_set_wakeup_enable(struct device *dev, bool enable)
return 0;
}
-static inline int device_init_wakeup(struct device *dev, bool val)
+static inline bool device_may_wakeup(struct device *dev)
{
- device_set_wakeup_capable(dev, val);
- device_set_wakeup_enable(dev, val);
- return 0;
+ return dev->power.can_wakeup && dev->power.should_wakeup;
}
-static inline bool device_may_wakeup(struct device *dev)
+static inline bool device_wakeup_path(struct device *dev)
{
- return dev->power.can_wakeup && dev->power.should_wakeup;
+ return false;
}
+static inline void device_set_wakeup_path(struct device *dev) {}
+
static inline void __pm_stay_awake(struct wakeup_source *ws) {}
static inline void pm_stay_awake(struct device *dev) {}
@@ -190,19 +194,6 @@ static inline void pm_wakeup_dev_event(struct device *dev, unsigned int msec,
#endif /* !CONFIG_PM_SLEEP */
-static inline void wakeup_source_init(struct wakeup_source *ws,
- const char *name)
-{
- wakeup_source_prepare(ws, name);
- wakeup_source_add(ws);
-}
-
-static inline void wakeup_source_trash(struct wakeup_source *ws)
-{
- wakeup_source_remove(ws);
- wakeup_source_drop(ws);
-}
-
static inline void __pm_wakeup_event(struct wakeup_source *ws, unsigned int msec)
{
return pm_wakeup_ws_event(ws, msec, false);
@@ -218,4 +209,27 @@ static inline void pm_wakeup_hard_event(struct device *dev)
return pm_wakeup_dev_event(dev, 0, true);
}
+/**
+ * device_init_wakeup - Device wakeup initialization.
+ * @dev: Device to handle.
+ * @enable: Whether or not to enable @dev as a wakeup device.
+ *
+ * By default, most devices should leave wakeup disabled. The exceptions are
+ * devices that everyone expects to be wakeup sources: keyboards, power buttons,
+ * possibly network interfaces, etc. Also, devices that don't generate their
+ * own wakeup requests but merely forward requests from one bus to another
+ * (like PCI bridges) should have wakeup enabled by default.
+ */
+static inline int device_init_wakeup(struct device *dev, bool enable)
+{
+ if (enable) {
+ device_set_wakeup_capable(dev, true);
+ return device_wakeup_enable(dev);
+ } else {
+ device_wakeup_disable(dev);
+ device_set_wakeup_capable(dev, false);
+ return 0;
+ }
+}
+
#endif /* _LINUX_PM_WAKEUP_H */
diff --git a/include/linux/pmbus.h b/include/linux/pmbus.h
index ee3c2aba2a8e..fa9f08164c36 100644
--- a/include/linux/pmbus.h
+++ b/include/linux/pmbus.h
@@ -1,26 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Hardware monitoring driver for PMBus devices
*
* Copyright (c) 2010, 2011 Ericsson AB.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#ifndef _PMBUS_H_
#define _PMBUS_H_
+#include <linux/bits.h>
+
/* flags */
/*
@@ -36,7 +25,53 @@
* communication errors for no explicable reason. For such chips, checking
* the status register must be disabled.
*/
-#define PMBUS_SKIP_STATUS_CHECK (1 << 0)
+#define PMBUS_SKIP_STATUS_CHECK BIT(0)
+
+/*
+ * PMBUS_WRITE_PROTECTED
+ * Set if the chip is write protected and write protection is not determined
+ * by the standard WRITE_PROTECT command.
+ */
+#define PMBUS_WRITE_PROTECTED BIT(1)
+
+/*
+ * PMBUS_NO_CAPABILITY
+ *
+ * Some PMBus chips don't respond with valid data when reading the CAPABILITY
+ * register. For such chips, this flag should be set so that the PMBus core
+ * driver doesn't use CAPABILITY to determine it's behavior.
+ */
+#define PMBUS_NO_CAPABILITY BIT(2)
+
+/*
+ * PMBUS_READ_STATUS_AFTER_FAILED_CHECK
+ *
+ * Some PMBus chips end up in an undefined state when trying to read an
+ * unsupported register. For such chips, it is necessary to reset the
+ * chip pmbus controller to a known state after a failed register check.
+ * This can be done by reading a known register. By setting this flag the
+ * driver will try to read the STATUS register after each failed
+ * register check. This read may fail, but it will put the chip in a
+ * known state.
+ */
+#define PMBUS_READ_STATUS_AFTER_FAILED_CHECK BIT(3)
+
+/*
+ * PMBUS_NO_WRITE_PROTECT
+ *
+ * Some PMBus chips respond with invalid data when reading the WRITE_PROTECT
+ * register. For such chips, this flag should be set so that the PMBus core
+ * driver doesn't use the WRITE_PROTECT command to determine its behavior.
+ */
+#define PMBUS_NO_WRITE_PROTECT BIT(4)
+
+/*
+ * PMBUS_USE_COEFFICIENTS_CMD
+ *
+ * When this flag is set the PMBus core driver will use the COEFFICIENTS
+ * register to initialize the coefficients for the direct mode format.
+ */
+#define PMBUS_USE_COEFFICIENTS_CMD BIT(5)
struct pmbus_platform_data {
u32 flags; /* Device specific flags */
diff --git a/include/linux/pmu.h b/include/linux/pmu.h
index 99b400b8a241..c677442d007c 100644
--- a/include/linux/pmu.h
+++ b/include/linux/pmu.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Definitions for talking to the PMU. The PMU is a microcontroller
* which controls battery charging and system power on PowerBook 3400
@@ -8,10 +9,11 @@
#ifndef _LINUX_PMU_H
#define _LINUX_PMU_H
+#include <linux/rtc.h>
#include <uapi/linux/pmu.h>
-extern int find_via_pmu(void);
+extern int __init find_via_pmu(void);
extern int pmu_request(struct adb_request *req,
void (*done)(struct adb_request *), int nbytes, ...);
@@ -35,6 +37,9 @@ static inline void pmu_resume(void)
extern void pmu_enable_irled(int on);
+extern time64_t pmu_get_time(void);
+extern int pmu_set_rtc_time(struct rtc_time *tm);
+
extern void pmu_restart(void);
extern void pmu_shutdown(void);
extern void pmu_unlock(void);
diff --git a/include/linux/pnfs_osd_xdr.h b/include/linux/pnfs_osd_xdr.h
deleted file mode 100644
index 17d7d0d20eca..000000000000
--- a/include/linux/pnfs_osd_xdr.h
+++ /dev/null
@@ -1,317 +0,0 @@
-/*
- * pNFS-osd on-the-wire data structures
- *
- * Copyright (C) 2007 Panasas Inc. [year of first publication]
- * All rights reserved.
- *
- * Benny Halevy <bhalevy@panasas.com>
- * Boaz Harrosh <ooo@electrozaur.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2
- * See the file COPYING included with this distribution for more details.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the name of the Panasas company nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
- * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
- * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
- * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
- * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-#ifndef __PNFS_OSD_XDR_H__
-#define __PNFS_OSD_XDR_H__
-
-#include <linux/nfs_fs.h>
-
-/*
- * draft-ietf-nfsv4-minorversion-22
- * draft-ietf-nfsv4-pnfs-obj-12
- */
-
-/* Layout Structure */
-
-enum pnfs_osd_raid_algorithm4 {
- PNFS_OSD_RAID_0 = 1,
- PNFS_OSD_RAID_4 = 2,
- PNFS_OSD_RAID_5 = 3,
- PNFS_OSD_RAID_PQ = 4 /* Reed-Solomon P+Q */
-};
-
-/* struct pnfs_osd_data_map4 {
- * uint32_t odm_num_comps;
- * length4 odm_stripe_unit;
- * uint32_t odm_group_width;
- * uint32_t odm_group_depth;
- * uint32_t odm_mirror_cnt;
- * pnfs_osd_raid_algorithm4 odm_raid_algorithm;
- * };
- */
-struct pnfs_osd_data_map {
- u32 odm_num_comps;
- u64 odm_stripe_unit;
- u32 odm_group_width;
- u32 odm_group_depth;
- u32 odm_mirror_cnt;
- u32 odm_raid_algorithm;
-};
-
-/* struct pnfs_osd_objid4 {
- * deviceid4 oid_device_id;
- * uint64_t oid_partition_id;
- * uint64_t oid_object_id;
- * };
- */
-struct pnfs_osd_objid {
- struct nfs4_deviceid oid_device_id;
- u64 oid_partition_id;
- u64 oid_object_id;
-};
-
-/* For printout. I use:
- * kprint("dev(%llx:%llx)", _DEVID_LO(pointer), _DEVID_HI(pointer));
- * BE style
- */
-#define _DEVID_LO(oid_device_id) \
- (unsigned long long)be64_to_cpup((__be64 *)(oid_device_id)->data)
-
-#define _DEVID_HI(oid_device_id) \
- (unsigned long long)be64_to_cpup(((__be64 *)(oid_device_id)->data) + 1)
-
-enum pnfs_osd_version {
- PNFS_OSD_MISSING = 0,
- PNFS_OSD_VERSION_1 = 1,
- PNFS_OSD_VERSION_2 = 2
-};
-
-struct pnfs_osd_opaque_cred {
- u32 cred_len;
- void *cred;
-};
-
-enum pnfs_osd_cap_key_sec {
- PNFS_OSD_CAP_KEY_SEC_NONE = 0,
- PNFS_OSD_CAP_KEY_SEC_SSV = 1,
-};
-
-/* struct pnfs_osd_object_cred4 {
- * pnfs_osd_objid4 oc_object_id;
- * pnfs_osd_version4 oc_osd_version;
- * pnfs_osd_cap_key_sec4 oc_cap_key_sec;
- * opaque oc_capability_key<>;
- * opaque oc_capability<>;
- * };
- */
-struct pnfs_osd_object_cred {
- struct pnfs_osd_objid oc_object_id;
- u32 oc_osd_version;
- u32 oc_cap_key_sec;
- struct pnfs_osd_opaque_cred oc_cap_key;
- struct pnfs_osd_opaque_cred oc_cap;
-};
-
-/* struct pnfs_osd_layout4 {
- * pnfs_osd_data_map4 olo_map;
- * uint32_t olo_comps_index;
- * pnfs_osd_object_cred4 olo_components<>;
- * };
- */
-struct pnfs_osd_layout {
- struct pnfs_osd_data_map olo_map;
- u32 olo_comps_index;
- u32 olo_num_comps;
- struct pnfs_osd_object_cred *olo_comps;
-};
-
-/* Device Address */
-enum pnfs_osd_targetid_type {
- OBJ_TARGET_ANON = 1,
- OBJ_TARGET_SCSI_NAME = 2,
- OBJ_TARGET_SCSI_DEVICE_ID = 3,
-};
-
-/* union pnfs_osd_targetid4 switch (pnfs_osd_targetid_type4 oti_type) {
- * case OBJ_TARGET_SCSI_NAME:
- * string oti_scsi_name<>;
- *
- * case OBJ_TARGET_SCSI_DEVICE_ID:
- * opaque oti_scsi_device_id<>;
- *
- * default:
- * void;
- * };
- *
- * union pnfs_osd_targetaddr4 switch (bool ota_available) {
- * case TRUE:
- * netaddr4 ota_netaddr;
- * case FALSE:
- * void;
- * };
- *
- * struct pnfs_osd_deviceaddr4 {
- * pnfs_osd_targetid4 oda_targetid;
- * pnfs_osd_targetaddr4 oda_targetaddr;
- * uint64_t oda_lun;
- * opaque oda_systemid<>;
- * pnfs_osd_object_cred4 oda_root_obj_cred;
- * opaque oda_osdname<>;
- * };
- */
-struct pnfs_osd_targetid {
- u32 oti_type;
- struct nfs4_string oti_scsi_device_id;
-};
-
-/* struct netaddr4 {
- * // see struct rpcb in RFC1833
- * string r_netid<>; // network id
- * string r_addr<>; // universal address
- * };
- */
-struct pnfs_osd_net_addr {
- struct nfs4_string r_netid;
- struct nfs4_string r_addr;
-};
-
-struct pnfs_osd_targetaddr {
- u32 ota_available;
- struct pnfs_osd_net_addr ota_netaddr;
-};
-
-struct pnfs_osd_deviceaddr {
- struct pnfs_osd_targetid oda_targetid;
- struct pnfs_osd_targetaddr oda_targetaddr;
- u8 oda_lun[8];
- struct nfs4_string oda_systemid;
- struct pnfs_osd_object_cred oda_root_obj_cred;
- struct nfs4_string oda_osdname;
-};
-
-/* LAYOUTCOMMIT: layoutupdate */
-
-/* union pnfs_osd_deltaspaceused4 switch (bool dsu_valid) {
- * case TRUE:
- * int64_t dsu_delta;
- * case FALSE:
- * void;
- * };
- *
- * struct pnfs_osd_layoutupdate4 {
- * pnfs_osd_deltaspaceused4 olu_delta_space_used;
- * bool olu_ioerr_flag;
- * };
- */
-struct pnfs_osd_layoutupdate {
- u32 dsu_valid;
- s64 dsu_delta;
- u32 olu_ioerr_flag;
-};
-
-/* LAYOUTRETURN: I/O Rrror Report */
-
-enum pnfs_osd_errno {
- PNFS_OSD_ERR_EIO = 1,
- PNFS_OSD_ERR_NOT_FOUND = 2,
- PNFS_OSD_ERR_NO_SPACE = 3,
- PNFS_OSD_ERR_BAD_CRED = 4,
- PNFS_OSD_ERR_NO_ACCESS = 5,
- PNFS_OSD_ERR_UNREACHABLE = 6,
- PNFS_OSD_ERR_RESOURCE = 7
-};
-
-/* struct pnfs_osd_ioerr4 {
- * pnfs_osd_objid4 oer_component;
- * length4 oer_comp_offset;
- * length4 oer_comp_length;
- * bool oer_iswrite;
- * pnfs_osd_errno4 oer_errno;
- * };
- */
-struct pnfs_osd_ioerr {
- struct pnfs_osd_objid oer_component;
- u64 oer_comp_offset;
- u64 oer_comp_length;
- u32 oer_iswrite;
- u32 oer_errno;
-};
-
-/* OSD XDR Client API */
-/* Layout helpers */
-/* Layout decoding is done in two parts:
- * 1. First Call pnfs_osd_xdr_decode_layout_map to read in only the header part
- * of the layout. @iter members need not be initialized.
- * Returned:
- * @layout members are set. (@layout->olo_comps set to NULL).
- *
- * Zero on success, or negative error if passed xdr is broken.
- *
- * 2. 2nd Call pnfs_osd_xdr_decode_layout_comp() in a loop until it returns
- * false, to decode the next component.
- * Returned:
- * true if there is more to decode or false if we are done or error.
- *
- * Example:
- * struct pnfs_osd_xdr_decode_layout_iter iter;
- * struct pnfs_osd_layout layout;
- * struct pnfs_osd_object_cred comp;
- * int status;
- *
- * status = pnfs_osd_xdr_decode_layout_map(&layout, &iter, xdr);
- * if (unlikely(status))
- * goto err;
- * while(pnfs_osd_xdr_decode_layout_comp(&comp, &iter, xdr, &status)) {
- * // All of @comp strings point to inside the xdr_buffer
- * // or scrach buffer. Copy them out to user memory eg.
- * copy_single_comp(dest_comp++, &comp);
- * }
- * if (unlikely(status))
- * goto err;
- */
-
-struct pnfs_osd_xdr_decode_layout_iter {
- unsigned total_comps;
- unsigned decoded_comps;
-};
-
-extern int pnfs_osd_xdr_decode_layout_map(struct pnfs_osd_layout *layout,
- struct pnfs_osd_xdr_decode_layout_iter *iter, struct xdr_stream *xdr);
-
-extern bool pnfs_osd_xdr_decode_layout_comp(struct pnfs_osd_object_cred *comp,
- struct pnfs_osd_xdr_decode_layout_iter *iter, struct xdr_stream *xdr,
- int *err);
-
-/* Device Info helpers */
-
-/* Note: All strings inside @deviceaddr point to space inside @p.
- * @p should stay valid while @deviceaddr is in use.
- */
-extern void pnfs_osd_xdr_decode_deviceaddr(
- struct pnfs_osd_deviceaddr *deviceaddr, __be32 *p);
-
-/* layoutupdate (layout_commit) xdr helpers */
-extern int
-pnfs_osd_xdr_encode_layoutupdate(struct xdr_stream *xdr,
- struct pnfs_osd_layoutupdate *lou);
-
-/* osd_ioerror encoding (layout_return) */
-extern __be32 *pnfs_osd_xdr_ioerr_reserve_space(struct xdr_stream *xdr);
-extern void pnfs_osd_xdr_encode_ioerr(__be32 *p, struct pnfs_osd_ioerr *ioerr);
-
-#endif /* __PNFS_OSD_XDR_H__ */
diff --git a/include/linux/pnp.h b/include/linux/pnp.h
index 2588ca6a9028..c2a7cfbca713 100644
--- a/include/linux/pnp.h
+++ b/include/linux/pnp.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Linux Plug and Play Support
* Copyright by Adam Belay <ambx1@neo.rr.com>
@@ -219,10 +220,8 @@ struct pnp_card {
#define global_to_pnp_card(n) list_entry(n, struct pnp_card, global_list)
#define protocol_to_pnp_card(n) list_entry(n, struct pnp_card, protocol_list)
#define to_pnp_card(n) container_of(n, struct pnp_card, dev)
-#define pnp_for_each_card(card) \
- for((card) = global_to_pnp_card(pnp_cards.next); \
- (card) != global_to_pnp_card(&pnp_cards); \
- (card) = global_to_pnp_card((card)->global_list.next))
+#define pnp_for_each_card(card) \
+ list_for_each_entry(card, &pnp_cards, global_list)
struct pnp_card_link {
struct pnp_card *card;
@@ -275,14 +274,9 @@ struct pnp_dev {
#define card_to_pnp_dev(n) list_entry(n, struct pnp_dev, card_list)
#define protocol_to_pnp_dev(n) list_entry(n, struct pnp_dev, protocol_list)
#define to_pnp_dev(n) container_of(n, struct pnp_dev, dev)
-#define pnp_for_each_dev(dev) \
- for((dev) = global_to_pnp_dev(pnp_global.next); \
- (dev) != global_to_pnp_dev(&pnp_global); \
- (dev) = global_to_pnp_dev((dev)->global_list.next))
-#define card_for_each_dev(card,dev) \
- for((dev) = card_to_pnp_dev((card)->devices.next); \
- (dev) != card_to_pnp_dev(&(card)->devices); \
- (dev) = card_to_pnp_dev((dev)->card_list.next))
+#define pnp_for_each_dev(dev) list_for_each_entry(dev, &pnp_global, global_list)
+#define card_for_each_dev(card, dev) \
+ list_for_each_entry(dev, &(card)->devices, card_list)
#define pnp_dev_name(dev) (dev)->name
static inline void *pnp_get_drvdata(struct pnp_dev *pdev)
@@ -378,7 +372,7 @@ struct pnp_id {
};
struct pnp_driver {
- char *name;
+ const char *name;
const struct pnp_device_id *id_table;
unsigned int flags;
int (*probe) (struct pnp_dev *dev, const struct pnp_device_id *dev_id);
@@ -436,14 +430,10 @@ struct pnp_protocol {
};
#define to_pnp_protocol(n) list_entry(n, struct pnp_protocol, protocol_list)
-#define protocol_for_each_card(protocol,card) \
- for((card) = protocol_to_pnp_card((protocol)->cards.next); \
- (card) != protocol_to_pnp_card(&(protocol)->cards); \
- (card) = protocol_to_pnp_card((card)->protocol_list.next))
-#define protocol_for_each_dev(protocol,dev) \
- for((dev) = protocol_to_pnp_dev((protocol)->devices.next); \
- (dev) != protocol_to_pnp_dev(&(protocol)->devices); \
- (dev) = protocol_to_pnp_dev((dev)->protocol_list.next))
+#define protocol_for_each_card(protocol, card) \
+ list_for_each_entry(card, &(protocol)->cards, protocol_list)
+#define protocol_for_each_dev(protocol, dev) \
+ list_for_each_entry(dev, &(protocol)->devices, protocol_list)
extern struct bus_type pnp_bus_type;
diff --git a/include/linux/poison.h b/include/linux/poison.h
index a39540326417..851a855d3868 100644
--- a/include/linux/poison.h
+++ b/include/linux/poison.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_POISON_H
#define _LINUX_POISON_H
@@ -20,21 +21,13 @@
* non-initialized list entries.
*/
#define LIST_POISON1 ((void *) 0x100 + POISON_POINTER_DELTA)
-#define LIST_POISON2 ((void *) 0x200 + POISON_POINTER_DELTA)
+#define LIST_POISON2 ((void *) 0x122 + POISON_POINTER_DELTA)
/********** include/linux/timer.h **********/
-/*
- * Magic number "tsta" to indicate a static timer initializer
- * for the object debugging code.
- */
#define TIMER_ENTRY_STATIC ((void *) 0x300 + POISON_POINTER_DELTA)
-/********** mm/debug-pagealloc.c **********/
-#ifdef CONFIG_PAGE_POISONING_ZERO
-#define PAGE_POISON 0x00
-#else
+/********** mm/page_poison.c **********/
#define PAGE_POISON 0xaa
-#endif
/********** mm/page_alloc.c ************/
@@ -82,10 +75,19 @@
#define MUTEX_DEBUG_FREE 0x22
#define MUTEX_POISON_WW_CTX ((void *) 0x500 + POISON_POINTER_DELTA)
-/********** lib/flex_array.c **********/
-#define FLEX_ARRAY_FREE 0x6c /* for use-after-free poisoning */
-
/********** security/ **********/
#define KEY_DESTROY 0xbd
+/********** net/core/page_pool.c **********/
+#define PP_SIGNATURE (0x40 + POISON_POINTER_DELTA)
+
+/********** net/core/skbuff.c **********/
+#define SKB_LIST_POISON_NEXT ((void *)(0x800 + POISON_POINTER_DELTA))
+
+/********** kernel/bpf/ **********/
+#define BPF_PTR_POISON ((void *)(0xeB9FUL + POISON_POINTER_DELTA))
+
+/********** VFS **********/
+#define VFS_PTR_POISON ((void *)(0xF5 + POISON_POINTER_DELTA))
+
#endif
diff --git a/include/linux/poll.h b/include/linux/poll.h
index 2889f09a1c60..a9e0e1c2d1f2 100644
--- a/include/linux/poll.h
+++ b/include/linux/poll.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_POLL_H
#define _LINUX_POLL_H
@@ -7,21 +8,24 @@
#include <linux/wait.h>
#include <linux/string.h>
#include <linux/fs.h>
-#include <linux/sysctl.h>
#include <linux/uaccess.h>
#include <uapi/linux/poll.h>
+#include <uapi/linux/eventpoll.h>
-extern struct ctl_table epoll_table[]; /* for sysctl */
/* ~832 bytes of stack space used max in sys_select/sys_poll before allocating
additional memory. */
+#ifdef __clang__
+#define MAX_STACK_ALLOC 768
+#else
#define MAX_STACK_ALLOC 832
+#endif
#define FRONTEND_STACK_ALLOC 256
#define SELECT_STACK_ALLOC FRONTEND_STACK_ALLOC
#define POLL_STACK_ALLOC FRONTEND_STACK_ALLOC
#define WQUEUES_STACK_ALLOC (MAX_STACK_ALLOC - FRONTEND_STACK_ALLOC)
#define N_INLINE_POLL_ENTRIES (WQUEUES_STACK_ALLOC / sizeof(struct poll_table_entry))
-#define DEFAULT_POLLMASK (POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM)
+#define DEFAULT_POLLMASK (EPOLLIN | EPOLLOUT | EPOLLRDNORM | EPOLLWRNORM)
struct poll_table_struct;
@@ -36,7 +40,7 @@ typedef void (*poll_queue_proc)(struct file *, wait_queue_head_t *, struct poll_
*/
typedef struct poll_table_struct {
poll_queue_proc _qproc;
- unsigned long _key;
+ __poll_t _key;
} poll_table;
static inline void poll_wait(struct file * filp, wait_queue_head_t * wait_address, poll_table *p)
@@ -61,20 +65,32 @@ static inline bool poll_does_not_wait(const poll_table *p)
* to be started implicitly on poll(). You typically only want to do that
* if the application is actually polling for POLLIN and/or POLLOUT.
*/
-static inline unsigned long poll_requested_events(const poll_table *p)
+static inline __poll_t poll_requested_events(const poll_table *p)
{
- return p ? p->_key : ~0UL;
+ return p ? p->_key : ~(__poll_t)0;
}
static inline void init_poll_funcptr(poll_table *pt, poll_queue_proc qproc)
{
pt->_qproc = qproc;
- pt->_key = ~0UL; /* all events enabled */
+ pt->_key = ~(__poll_t)0; /* all events enabled */
+}
+
+static inline bool file_can_poll(struct file *file)
+{
+ return file->f_op->poll;
+}
+
+static inline __poll_t vfs_poll(struct file *file, struct poll_table_struct *pt)
+{
+ if (unlikely(!file->f_op->poll))
+ return DEFAULT_POLLMASK;
+ return file->f_op->poll(file, pt);
}
struct poll_table_entry {
struct file *filp;
- unsigned long key;
+ __poll_t key;
wait_queue_entry_t wait;
wait_queue_head_t *wait_address;
};
@@ -94,8 +110,6 @@ struct poll_wqueues {
extern void poll_initwait(struct poll_wqueues *pwq);
extern void poll_freewait(struct poll_wqueues *pwq);
-extern int poll_schedule_timeout(struct poll_wqueues *pwq, int state,
- ktime_t *expires, unsigned long slack);
extern u64 select_estimate_accuracy(struct timespec64 *tv);
#define MAX_INT64_SECONDS (((s64)(~((u64)0)>>1)/HZ)-1)
@@ -106,4 +120,28 @@ extern int core_sys_select(int n, fd_set __user *inp, fd_set __user *outp,
extern int poll_select_set_timeout(struct timespec64 *to, time64_t sec,
long nsec);
+#define __MAP(v, from, to) \
+ (from < to ? (v & from) * (to/from) : (v & from) / (from/to))
+
+static inline __u16 mangle_poll(__poll_t val)
+{
+ __u16 v = (__force __u16)val;
+#define M(X) __MAP(v, (__force __u16)EPOLL##X, POLL##X)
+ return M(IN) | M(OUT) | M(PRI) | M(ERR) | M(NVAL) |
+ M(RDNORM) | M(RDBAND) | M(WRNORM) | M(WRBAND) |
+ M(HUP) | M(RDHUP) | M(MSG);
+#undef M
+}
+
+static inline __poll_t demangle_poll(u16 val)
+{
+#define M(X) (__force __poll_t)__MAP(val, POLL##X, (__force __u16)EPOLL##X)
+ return M(IN) | M(OUT) | M(PRI) | M(ERR) | M(NVAL) |
+ M(RDNORM) | M(RDBAND) | M(WRNORM) | M(WRBAND) |
+ M(HUP) | M(RDHUP) | M(MSG);
+#undef M
+}
+#undef __MAP
+
+
#endif /* _LINUX_POLL_H */
diff --git a/include/linux/polynomial.h b/include/linux/polynomial.h
new file mode 100644
index 000000000000..9e074a0bb6fa
--- /dev/null
+++ b/include/linux/polynomial.h
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2020 BAIKAL ELECTRONICS, JSC
+ */
+
+#ifndef _POLYNOMIAL_H
+#define _POLYNOMIAL_H
+
+/*
+ * struct polynomial_term - one term descriptor of a polynomial
+ * @deg: degree of the term.
+ * @coef: multiplication factor of the term.
+ * @divider: distributed divider per each degree.
+ * @divider_leftover: divider leftover, which couldn't be redistributed.
+ */
+struct polynomial_term {
+ unsigned int deg;
+ long coef;
+ long divider;
+ long divider_leftover;
+};
+
+/*
+ * struct polynomial - a polynomial descriptor
+ * @total_divider: total data divider.
+ * @terms: polynomial terms, last term must have degree of 0
+ */
+struct polynomial {
+ long total_divider;
+ struct polynomial_term terms[];
+};
+
+long polynomial_calc(const struct polynomial *poly, long data);
+
+#endif
diff --git a/include/linux/posix-clock.h b/include/linux/posix-clock.h
index 38d8225510f1..468328b1e1dd 100644
--- a/include/linux/posix-clock.h
+++ b/include/linux/posix-clock.h
@@ -1,21 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* posix-clock.h - support for dynamic clock devices
*
* Copyright (C) 2010 OMICRON electronics GmbH
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#ifndef _LINUX_POSIX_CLOCK_H_
#define _LINUX_POSIX_CLOCK_H_
@@ -51,7 +38,7 @@ struct posix_clock;
struct posix_clock_operations {
struct module *owner;
- int (*clock_adjtime)(struct posix_clock *pc, struct timex *tx);
+ int (*clock_adjtime)(struct posix_clock *pc, struct __kernel_timex *tx);
int (*clock_gettime)(struct posix_clock *pc, struct timespec64 *ts);
@@ -68,7 +55,7 @@ struct posix_clock_operations {
int (*open) (struct posix_clock *pc, fmode_t f_mode);
- uint (*poll) (struct posix_clock *pc,
+ __poll_t (*poll) (struct posix_clock *pc,
struct file *file, poll_table *wait);
int (*release) (struct posix_clock *pc);
@@ -82,29 +69,32 @@ struct posix_clock_operations {
*
* @ops: Functional interface to the clock
* @cdev: Character device instance for this clock
- * @kref: Reference count.
+ * @dev: Pointer to the clock's device.
* @rwsem: Protects the 'zombie' field from concurrent access.
* @zombie: If 'zombie' is true, then the hardware has disappeared.
- * @release: A function to free the structure when the reference count reaches
- * zero. May be NULL if structure is statically allocated.
*
* Drivers should embed their struct posix_clock within a private
* structure, obtaining a reference to it during callbacks using
* container_of().
+ *
+ * Drivers should supply an initialized but not exposed struct device
+ * to posix_clock_register(). It is used to manage lifetime of the
+ * driver's private structure. It's 'release' field should be set to
+ * a release function for this private structure.
*/
struct posix_clock {
struct posix_clock_operations ops;
struct cdev cdev;
- struct kref kref;
+ struct device *dev;
struct rw_semaphore rwsem;
bool zombie;
- void (*release)(struct posix_clock *clk);
};
/**
* posix_clock_register() - register a new clock
- * @clk: Pointer to the clock. Caller must provide 'ops' and 'release'
- * @devid: Allocated device id
+ * @clk: Pointer to the clock. Caller must provide 'ops' field
+ * @dev: Pointer to the initialized device. Caller must provide
+ * 'release' field
*
* A clock driver calls this function to register itself with the
* clock device subsystem. If 'clk' points to dynamically allocated
@@ -113,7 +103,7 @@ struct posix_clock {
*
* Returns zero on success, non-zero otherwise.
*/
-int posix_clock_register(struct posix_clock *clk, dev_t devid);
+int posix_clock_register(struct posix_clock *clk, struct device *dev);
/**
* posix_clock_unregister() - unregister a clock
diff --git a/include/linux/posix-timers.h b/include/linux/posix-timers.h
index 62839fd04dce..d607f51404fc 100644
--- a/include/linux/posix-timers.h
+++ b/include/linux/posix-timers.h
@@ -1,20 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _linux_POSIX_TIMERS_H
#define _linux_POSIX_TIMERS_H
#include <linux/spinlock.h>
#include <linux/list.h>
-#include <linux/sched.h>
-#include <linux/timex.h>
+#include <linux/mutex.h>
#include <linux/alarmtimer.h>
+#include <linux/timerqueue.h>
-struct siginfo;
-
-struct cpu_timer_list {
- struct list_head entry;
- u64 expires, incr;
- struct task_struct *task;
- int firing;
-};
+struct kernel_siginfo;
+struct task_struct;
/*
* Bit fields within a clockid:
@@ -41,13 +36,164 @@ struct cpu_timer_list {
#define CLOCKFD CPUCLOCK_MAX
#define CLOCKFD_MASK (CPUCLOCK_PERTHREAD_MASK|CPUCLOCK_CLOCK_MASK)
-#define MAKE_PROCESS_CPUCLOCK(pid, clock) \
- ((~(clockid_t) (pid) << 3) | (clockid_t) (clock))
-#define MAKE_THREAD_CPUCLOCK(tid, clock) \
- MAKE_PROCESS_CPUCLOCK((tid), (clock) | CPUCLOCK_PERTHREAD_MASK)
+static inline clockid_t make_process_cpuclock(const unsigned int pid,
+ const clockid_t clock)
+{
+ return ((~pid) << 3) | clock;
+}
+static inline clockid_t make_thread_cpuclock(const unsigned int tid,
+ const clockid_t clock)
+{
+ return make_process_cpuclock(tid, clock | CPUCLOCK_PERTHREAD_MASK);
+}
+
+static inline clockid_t fd_to_clockid(const int fd)
+{
+ return make_process_cpuclock((unsigned int) fd, CLOCKFD);
+}
+
+static inline int clockid_to_fd(const clockid_t clk)
+{
+ return ~(clk >> 3);
+}
+
+#ifdef CONFIG_POSIX_TIMERS
+
+/**
+ * cpu_timer - Posix CPU timer representation for k_itimer
+ * @node: timerqueue node to queue in the task/sig
+ * @head: timerqueue head on which this timer is queued
+ * @pid: Pointer to target task PID
+ * @elist: List head for the expiry list
+ * @firing: Timer is currently firing
+ * @handling: Pointer to the task which handles expiry
+ */
+struct cpu_timer {
+ struct timerqueue_node node;
+ struct timerqueue_head *head;
+ struct pid *pid;
+ struct list_head elist;
+ int firing;
+ struct task_struct __rcu *handling;
+};
+
+static inline bool cpu_timer_enqueue(struct timerqueue_head *head,
+ struct cpu_timer *ctmr)
+{
+ ctmr->head = head;
+ return timerqueue_add(head, &ctmr->node);
+}
+
+static inline bool cpu_timer_queued(struct cpu_timer *ctmr)
+{
+ return !!ctmr->head;
+}
+
+static inline bool cpu_timer_dequeue(struct cpu_timer *ctmr)
+{
+ if (cpu_timer_queued(ctmr)) {
+ timerqueue_del(ctmr->head, &ctmr->node);
+ ctmr->head = NULL;
+ return true;
+ }
+ return false;
+}
+
+static inline u64 cpu_timer_getexpires(struct cpu_timer *ctmr)
+{
+ return ctmr->node.expires;
+}
+
+static inline void cpu_timer_setexpires(struct cpu_timer *ctmr, u64 exp)
+{
+ ctmr->node.expires = exp;
+}
+
+/**
+ * posix_cputimer_base - Container per posix CPU clock
+ * @nextevt: Earliest-expiration cache
+ * @tqhead: timerqueue head for cpu_timers
+ */
+struct posix_cputimer_base {
+ u64 nextevt;
+ struct timerqueue_head tqhead;
+};
-#define FD_TO_CLOCKID(fd) ((~(clockid_t) (fd) << 3) | CLOCKFD)
-#define CLOCKID_TO_FD(clk) ((unsigned int) ~((clk) >> 3))
+/**
+ * posix_cputimers - Container for posix CPU timer related data
+ * @bases: Base container for posix CPU clocks
+ * @timers_active: Timers are queued.
+ * @expiry_active: Timer expiry is active. Used for
+ * process wide timers to avoid multiple
+ * task trying to handle expiry concurrently
+ *
+ * Used in task_struct and signal_struct
+ */
+struct posix_cputimers {
+ struct posix_cputimer_base bases[CPUCLOCK_MAX];
+ unsigned int timers_active;
+ unsigned int expiry_active;
+};
+
+/**
+ * posix_cputimers_work - Container for task work based posix CPU timer expiry
+ * @work: The task work to be scheduled
+ * @mutex: Mutex held around expiry in context of this task work
+ * @scheduled: @work has been scheduled already, no further processing
+ */
+struct posix_cputimers_work {
+ struct callback_head work;
+ struct mutex mutex;
+ unsigned int scheduled;
+};
+
+static inline void posix_cputimers_init(struct posix_cputimers *pct)
+{
+ memset(pct, 0, sizeof(*pct));
+ pct->bases[0].nextevt = U64_MAX;
+ pct->bases[1].nextevt = U64_MAX;
+ pct->bases[2].nextevt = U64_MAX;
+}
+
+void posix_cputimers_group_init(struct posix_cputimers *pct, u64 cpu_limit);
+
+static inline void posix_cputimers_rt_watchdog(struct posix_cputimers *pct,
+ u64 runtime)
+{
+ pct->bases[CPUCLOCK_SCHED].nextevt = runtime;
+}
+
+/* Init task static initializer */
+#define INIT_CPU_TIMERBASE(b) { \
+ .nextevt = U64_MAX, \
+}
+
+#define INIT_CPU_TIMERBASES(b) { \
+ INIT_CPU_TIMERBASE(b[0]), \
+ INIT_CPU_TIMERBASE(b[1]), \
+ INIT_CPU_TIMERBASE(b[2]), \
+}
+
+#define INIT_CPU_TIMERS(s) \
+ .posix_cputimers = { \
+ .bases = INIT_CPU_TIMERBASES(s.posix_cputimers.bases), \
+ },
+#else
+struct posix_cputimers { };
+struct cpu_timer { };
+#define INIT_CPU_TIMERS(s)
+static inline void posix_cputimers_init(struct posix_cputimers *pct) { }
+static inline void posix_cputimers_group_init(struct posix_cputimers *pct,
+ u64 cpu_limit) { }
+#endif
+
+#ifdef CONFIG_POSIX_CPU_TIMERS_TASK_WORK
+void clear_posix_cputimers_work(struct task_struct *p);
+void posix_cputimers_init_work(void);
+#else
+static inline void clear_posix_cputimers_work(struct task_struct *p) { }
+static inline void posix_cputimers_init_work(void) { }
+#endif
#define REQUEUE_PENDING 1
@@ -71,7 +217,8 @@ struct cpu_timer_list {
* @it_process: The task to wakeup on clock_nanosleep (CPU timers)
* @sigq: Pointer to preallocated sigqueue
* @it: Union representing the various posix timer type
- * internals. Also used for rcu freeing the timer.
+ * internals.
+ * @rcu: RCU head for freeing the timer.
*/
struct k_itimer {
struct list_head list;
@@ -81,8 +228,8 @@ struct k_itimer {
clockid_t it_clock;
timer_t it_id;
int it_active;
- int it_overrun;
- int it_overrun_last;
+ s64 it_overrun;
+ s64 it_overrun_last;
int it_requeue_pending;
int it_sigev_notify;
ktime_t it_interval;
@@ -96,21 +243,21 @@ struct k_itimer {
struct {
struct hrtimer timer;
} real;
- struct cpu_timer_list cpu;
+ struct cpu_timer cpu;
struct {
struct alarm alarmtimer;
} alarm;
- struct rcu_head rcu;
} it;
+ struct rcu_head rcu;
};
-void run_posix_cpu_timers(struct task_struct *task);
+void run_posix_cpu_timers(void);
void posix_cpu_timers_exit(struct task_struct *task);
void posix_cpu_timers_exit_group(struct task_struct *task);
void set_process_cpu_timer(struct task_struct *task, unsigned int clock_idx,
u64 *newval, u64 *oldval);
-void update_rlimit_cpu(struct task_struct *task, unsigned long rlim_new);
+int update_rlimit_cpu(struct task_struct *task, unsigned long rlim_new);
-void posixtimer_rearm(struct siginfo *info);
+void posixtimer_rearm(struct kernel_siginfo *info);
#endif
diff --git a/include/linux/posix_acl.h b/include/linux/posix_acl.h
index 5a9a739acdd5..0e65b3d634d9 100644
--- a/include/linux/posix_acl.h
+++ b/include/linux/posix_acl.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
File: linux/posix_acl.h
@@ -11,8 +12,11 @@
#include <linux/bug.h>
#include <linux/slab.h>
#include <linux/rcupdate.h>
+#include <linux/refcount.h>
#include <uapi/linux/posix_acl.h>
+struct user_namespace;
+
struct posix_acl_entry {
short e_tag;
unsigned short e_perm;
@@ -23,10 +27,10 @@ struct posix_acl_entry {
};
struct posix_acl {
- atomic_t a_refcount;
+ refcount_t a_refcount;
struct rcu_head a_rcu;
unsigned int a_count;
- struct posix_acl_entry a_entries[0];
+ struct posix_acl_entry a_entries[];
};
#define FOREACH_ACL_ENTRY(pa, acl, pe) \
@@ -40,7 +44,7 @@ static inline struct posix_acl *
posix_acl_dup(struct posix_acl *acl)
{
if (acl)
- atomic_inc(&acl->a_refcount);
+ refcount_inc(&acl->a_refcount);
return acl;
}
@@ -50,7 +54,7 @@ posix_acl_dup(struct posix_acl *acl)
static inline void
posix_acl_release(struct posix_acl *acl)
{
- if (acl && atomic_dec_and_test(&acl->a_refcount))
+ if (acl && refcount_dec_and_test(&acl->a_refcount))
kfree_rcu(acl, a_rcu);
}
@@ -59,38 +63,54 @@ posix_acl_release(struct posix_acl *acl)
extern void posix_acl_init(struct posix_acl *, int);
extern struct posix_acl *posix_acl_alloc(int, gfp_t);
-extern int posix_acl_valid(struct user_namespace *, const struct posix_acl *);
-extern int posix_acl_permission(struct inode *, const struct posix_acl *, int);
extern struct posix_acl *posix_acl_from_mode(umode_t, gfp_t);
extern int posix_acl_equiv_mode(const struct posix_acl *, umode_t *);
extern int __posix_acl_create(struct posix_acl **, gfp_t, umode_t *);
extern int __posix_acl_chmod(struct posix_acl **, gfp_t, umode_t);
extern struct posix_acl *get_posix_acl(struct inode *, int);
-extern int set_posix_acl(struct inode *, int, struct posix_acl *);
+int set_posix_acl(struct mnt_idmap *, struct dentry *, int,
+ struct posix_acl *);
+
+struct posix_acl *get_cached_acl_rcu(struct inode *inode, int type);
+struct posix_acl *posix_acl_clone(const struct posix_acl *acl, gfp_t flags);
#ifdef CONFIG_FS_POSIX_ACL
-extern int posix_acl_chmod(struct inode *, umode_t);
+int posix_acl_chmod(struct mnt_idmap *, struct dentry *, umode_t);
extern int posix_acl_create(struct inode *, umode_t *, struct posix_acl **,
struct posix_acl **);
-extern int posix_acl_update_mode(struct inode *, umode_t *, struct posix_acl **);
+int posix_acl_update_mode(struct mnt_idmap *, struct inode *, umode_t *,
+ struct posix_acl **);
-extern int simple_set_acl(struct inode *, struct posix_acl *, int);
+int simple_set_acl(struct mnt_idmap *, struct dentry *,
+ struct posix_acl *, int);
extern int simple_acl_create(struct inode *, struct inode *);
struct posix_acl *get_cached_acl(struct inode *inode, int type);
-struct posix_acl *get_cached_acl_rcu(struct inode *inode, int type);
void set_cached_acl(struct inode *inode, int type, struct posix_acl *acl);
void forget_cached_acl(struct inode *inode, int type);
void forget_all_cached_acls(struct inode *inode);
+int posix_acl_valid(struct user_namespace *, const struct posix_acl *);
+int posix_acl_permission(struct mnt_idmap *, struct inode *,
+ const struct posix_acl *, int);
static inline void cache_no_acl(struct inode *inode)
{
inode->i_acl = NULL;
inode->i_default_acl = NULL;
}
+
+int vfs_set_acl(struct mnt_idmap *idmap, struct dentry *dentry,
+ const char *acl_name, struct posix_acl *kacl);
+struct posix_acl *vfs_get_acl(struct mnt_idmap *idmap,
+ struct dentry *dentry, const char *acl_name);
+int vfs_remove_acl(struct mnt_idmap *idmap, struct dentry *dentry,
+ const char *acl_name);
+int posix_acl_listxattr(struct inode *inode, char **buffer,
+ ssize_t *remaining_size);
#else
-static inline int posix_acl_chmod(struct inode *inode, umode_t mode)
+static inline int posix_acl_chmod(struct mnt_idmap *idmap,
+ struct dentry *dentry, umode_t mode)
{
return 0;
}
@@ -115,8 +135,33 @@ static inline int posix_acl_create(struct inode *inode, umode_t *mode,
static inline void forget_all_cached_acls(struct inode *inode)
{
}
+
+static inline int vfs_set_acl(struct mnt_idmap *idmap,
+ struct dentry *dentry, const char *name,
+ struct posix_acl *acl)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline struct posix_acl *vfs_get_acl(struct mnt_idmap *idmap,
+ struct dentry *dentry,
+ const char *acl_name)
+{
+ return ERR_PTR(-EOPNOTSUPP);
+}
+
+static inline int vfs_remove_acl(struct mnt_idmap *idmap,
+ struct dentry *dentry, const char *acl_name)
+{
+ return -EOPNOTSUPP;
+}
+static inline int posix_acl_listxattr(struct inode *inode, char **buffer,
+ ssize_t *remaining_size)
+{
+ return 0;
+}
#endif /* CONFIG_FS_POSIX_ACL */
-struct posix_acl *get_acl(struct inode *inode, int type);
+struct posix_acl *get_inode_acl(struct inode *inode, int type);
#endif /* __LINUX_POSIX_ACL_H */
diff --git a/include/linux/posix_acl_xattr.h b/include/linux/posix_acl_xattr.h
index 8b867e3bf3aa..e86f3b731da2 100644
--- a/include/linux/posix_acl_xattr.h
+++ b/include/linux/posix_acl_xattr.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
File: linux/posix_acl_xattr.h
@@ -32,23 +33,43 @@ posix_acl_xattr_count(size_t size)
}
#ifdef CONFIG_FS_POSIX_ACL
-void posix_acl_fix_xattr_from_user(void *value, size_t size);
-void posix_acl_fix_xattr_to_user(void *value, size_t size);
+struct posix_acl *posix_acl_from_xattr(struct user_namespace *user_ns,
+ const void *value, size_t size);
#else
-static inline void posix_acl_fix_xattr_from_user(void *value, size_t size)
-{
-}
-static inline void posix_acl_fix_xattr_to_user(void *value, size_t size)
+static inline struct posix_acl *
+posix_acl_from_xattr(struct user_namespace *user_ns, const void *value,
+ size_t size)
{
+ return ERR_PTR(-EOPNOTSUPP);
}
#endif
-struct posix_acl *posix_acl_from_xattr(struct user_namespace *user_ns,
- const void *value, size_t size);
int posix_acl_to_xattr(struct user_namespace *user_ns,
const struct posix_acl *acl, void *buffer, size_t size);
+static inline const char *posix_acl_xattr_name(int type)
+{
+ switch (type) {
+ case ACL_TYPE_ACCESS:
+ return XATTR_NAME_POSIX_ACL_ACCESS;
+ case ACL_TYPE_DEFAULT:
+ return XATTR_NAME_POSIX_ACL_DEFAULT;
+ }
+
+ return "";
+}
+
+static inline int posix_acl_type(const char *name)
+{
+ if (strcmp(name, XATTR_NAME_POSIX_ACL_ACCESS) == 0)
+ return ACL_TYPE_ACCESS;
+ else if (strcmp(name, XATTR_NAME_POSIX_ACL_DEFAULT) == 0)
+ return ACL_TYPE_DEFAULT;
+
+ return -1;
+}
-extern const struct xattr_handler posix_acl_access_xattr_handler;
-extern const struct xattr_handler posix_acl_default_xattr_handler;
+/* These are legacy handlers. Don't use them for new code. */
+extern const struct xattr_handler nop_posix_acl_access;
+extern const struct xattr_handler nop_posix_acl_default;
#endif /* _POSIX_ACL_XATTR_H */
diff --git a/include/linux/power/ab8500.h b/include/linux/power/ab8500.h
deleted file mode 100644
index cdbb6c2a8c51..000000000000
--- a/include/linux/power/ab8500.h
+++ /dev/null
@@ -1,16 +0,0 @@
-/*
- * Copyright (C) ST-Ericsson 2013
- * Author: Hongbo Zhang <hongbo.zhang@linaro.com>
- * License terms: GNU General Public License v2
- */
-
-#ifndef PWR_AB8500_H
-#define PWR_AB8500_H
-
-extern const struct abx500_res_to_temp ab8500_temp_tbl_a_thermistor[];
-extern const int ab8500_temp_tbl_a_size;
-
-extern const struct abx500_res_to_temp ab8500_temp_tbl_b_thermistor[];
-extern const int ab8500_temp_tbl_b_size;
-
-#endif /* PWR_AB8500_H */
diff --git a/include/linux/power/bq2415x_charger.h b/include/linux/power/bq2415x_charger.h
index 50762af8b834..f3c267f2a467 100644
--- a/include/linux/power/bq2415x_charger.h
+++ b/include/linux/power/bq2415x_charger.h
@@ -1,21 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* bq2415x charger driver
*
- * Copyright (C) 2011-2013 Pali Rohár <pali.rohar@gmail.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ * Copyright (C) 2011-2013 Pali Rohár <pali@kernel.org>
*/
#ifndef BQ2415X_CHARGER_H
@@ -27,8 +14,8 @@
* value is -1 then default chip value (specified in datasheet) will be
* used.
*
- * Value resistor_sense is needed for for configuring charge and
- * termination current. It it is less or equal to zero, configuring charge
+ * Value resistor_sense is needed for configuring charge and
+ * termination current. If it is less or equal to zero, configuring charge
* and termination current will not be possible.
*
* For automode support is needed to provide name of power supply device
diff --git a/include/linux/power/bq24190_charger.h b/include/linux/power/bq24190_charger.h
new file mode 100644
index 000000000000..313e6fbcb7db
--- /dev/null
+++ b/include/linux/power/bq24190_charger.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Platform data for the TI bq24190 battery charger driver.
+ */
+
+#ifndef _BQ24190_CHARGER_H_
+#define _BQ24190_CHARGER_H_
+
+#include <linux/regulator/machine.h>
+
+struct bq24190_platform_data {
+ const struct regulator_init_data *regulator_init_data;
+};
+
+#endif
diff --git a/include/linux/power/bq24735-charger.h b/include/linux/power/bq24735-charger.h
index b04be59f914c..321dd009ce66 100644
--- a/include/linux/power/bq24735-charger.h
+++ b/include/linux/power/bq24735-charger.h
@@ -1,18 +1,5 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#ifndef __CHARGER_BQ24735_H_
diff --git a/include/linux/power/bq25890_charger.h b/include/linux/power/bq25890_charger.h
new file mode 100644
index 000000000000..c706ddb77a08
--- /dev/null
+++ b/include/linux/power/bq25890_charger.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Platform data for the TI bq25890 battery charger driver.
+ */
+
+#ifndef _BQ25890_CHARGER_H_
+#define _BQ25890_CHARGER_H_
+
+struct regulator_init_data;
+
+struct bq25890_platform_data {
+ const struct regulator_init_data *regulator_init_data;
+};
+
+#endif
diff --git a/include/linux/power/bq27xxx_battery.h b/include/linux/power/bq27xxx_battery.h
index 11e11685dd1d..a1aa68141d0b 100644
--- a/include/linux/power/bq27xxx_battery.h
+++ b/include/linux/power/bq27xxx_battery.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __LINUX_BQ27X00_BATTERY_H__
#define __LINUX_BQ27X00_BATTERY_H__
@@ -6,6 +7,7 @@ enum bq27xxx_chip {
BQ27010, /* bq27010, bq27210 */
BQ2750X, /* bq27500 deprecated alias */
BQ2751X, /* bq27510, bq27520 deprecated alias */
+ BQ2752X,
BQ27500, /* bq27500/1 */
BQ27510G1, /* bq27510G1 */
BQ27510G2, /* bq27510G2 */
@@ -14,27 +16,24 @@ enum bq27xxx_chip {
BQ27520G2, /* bq27520G2 */
BQ27520G3, /* bq27520G3 */
BQ27520G4, /* bq27520G4 */
+ BQ27521, /* bq27521 */
BQ27530, /* bq27530, bq27531 */
+ BQ27531,
BQ27541, /* bq27541, bq27542, bq27546, bq27742 */
+ BQ27542,
+ BQ27546,
+ BQ27742,
BQ27545, /* bq27545 */
- BQ27421, /* bq27421, bq27425, bq27441, bq27621 */
-};
-
-/**
- * struct bq27xxx_plaform_data - Platform data for bq27xxx devices
- * @name: Name of the battery.
- * @chip: Chip class number of this device.
- * @read: HDQ read callback.
- * This function should provide access to the HDQ bus the battery is
- * connected to.
- * The first parameter is a pointer to the battery device, the second the
- * register to be read. The return value should either be the content of
- * the passed register or an error value.
- */
-struct bq27xxx_platform_data {
- const char *name;
- enum bq27xxx_chip chip;
- int (*read)(struct device *dev, unsigned int);
+ BQ27411,
+ BQ27421, /* bq27421, bq27441, bq27621 */
+ BQ27425,
+ BQ27426,
+ BQ27441,
+ BQ27621,
+ BQ27Z561,
+ BQ28Z610,
+ BQ34Z100,
+ BQ78Z100,
};
struct bq27xxx_device_info;
@@ -55,7 +54,6 @@ struct bq27xxx_reg_cache {
int capacity;
int energy;
int flags;
- int power_avg;
int health;
};
@@ -63,7 +61,7 @@ struct bq27xxx_device_info {
struct device *dev;
int id;
enum bq27xxx_chip chip;
- bool ram_chip;
+ u32 opts;
const char *name;
struct bq27xxx_dm_reg *dm_regs;
u32 unseal_key;
diff --git a/include/linux/power/charger-manager.h b/include/linux/power/charger-manager.h
index c4fa907c8f14..45e228b353ea 100644
--- a/include/linux/power/charger-manager.h
+++ b/include/linux/power/charger-manager.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2011 Samsung Electronics Co., Ltd.
* MyungJoo.Ham <myungjoo.ham@samsung.com>
@@ -7,9 +8,6 @@
* monitor charging even in the context of suspend-to-RAM with
* an interface combining the chargers.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
**/
#ifndef _CHARGER_MANAGER_H
@@ -33,22 +31,16 @@ enum polling_modes {
CM_POLL_CHARGING_ONLY,
};
-enum cm_event_types {
- CM_EVENT_UNKNOWN = 0,
- CM_EVENT_BATT_FULL,
- CM_EVENT_BATT_IN,
- CM_EVENT_BATT_OUT,
- CM_EVENT_BATT_OVERHEAT,
- CM_EVENT_BATT_COLD,
- CM_EVENT_EXT_PWR_IN_OUT,
- CM_EVENT_CHG_START_STOP,
- CM_EVENT_OTHERS,
+enum cm_batt_temp {
+ CM_BATT_OK = 0,
+ CM_BATT_OVERHEAT,
+ CM_BATT_COLD,
};
/**
* struct charger_cable
* @extcon_name: the name of extcon device.
- * @name: the name of charger cable(external connector).
+ * @name: the name of the cable connector
* @extcon_dev: the extcon device.
* @wq: the workqueue to control charger according to the state of
* charger cable. If charger cable is attached, enable charger.
@@ -64,9 +56,10 @@ enum cm_event_types {
struct charger_cable {
const char *extcon_name;
const char *name;
+ struct extcon_dev *extcon_dev;
+ u64 extcon_type;
/* The charger-manager use Extcon framework */
- struct extcon_specific_cable_nb extcon_dev;
struct work_struct wq;
struct notifier_block nb;
@@ -119,7 +112,7 @@ struct charger_regulator {
struct charger_cable *cables;
int num_cables;
- struct attribute_group attr_g;
+ struct attribute_group attr_grp;
struct device_attribute attr_name;
struct device_attribute attr_state;
struct device_attribute attr_externally_control;
@@ -133,11 +126,10 @@ struct charger_regulator {
* @psy_name: the name of power-supply-class for charger manager
* @polling_mode:
* Determine which polling mode will be used
- * @fullbatt_vchkdrop_ms:
* @fullbatt_vchkdrop_uV:
* Check voltage drop after the battery is fully charged.
- * If it has dropped more than fullbatt_vchkdrop_uV after
- * fullbatt_vchkdrop_ms, CM will restart charging.
+ * If it has dropped more than fullbatt_vchkdrop_uV
+ * CM will restart charging.
* @fullbatt_uV: voltage in microvolt
* If VBATT >= fullbatt_uV, it is assumed to be full.
* @fullbatt_soc: state of Charge in %
@@ -174,7 +166,6 @@ struct charger_desc {
enum polling_modes polling_mode;
unsigned int polling_interval_ms;
- unsigned int fullbatt_vchkdrop_ms;
unsigned int fullbatt_vchkdrop_uV;
unsigned int fullbatt_uV;
unsigned int fullbatt_soc;
@@ -186,6 +177,7 @@ struct charger_desc {
int num_charger_regulators;
struct charger_regulator *charger_regulators;
+ const struct attribute_group **sysfs_groups;
const char *psy_fuel_gauge;
@@ -212,9 +204,6 @@ struct charger_desc {
* @charger_stat: array of power_supply for chargers
* @tzd_batt : thermal zone device for battery
* @charger_enabled: the state of charger
- * @fullbatt_vchk_jiffies_at:
- * jiffies at the time full battery check will occur.
- * @fullbatt_vchk_work: work queue for full battery check
* @emergency_stop:
* When setting true, stop charging
* @psy_name_buf: the name of power-supply-class for charger manager
@@ -225,6 +214,7 @@ struct charger_desc {
* saved status of battery before entering suspend-to-RAM
* @charging_start_time: saved start time of enabling charging
* @charging_end_time: saved end time of disabling charging
+ * @battery_status: Current battery status
*/
struct charger_manager {
struct list_head entry;
@@ -236,9 +226,6 @@ struct charger_manager {
#endif
bool charger_enabled;
- unsigned long fullbatt_vchk_jiffies_at;
- struct delayed_work fullbatt_vchk_work;
-
int emergency_stop;
char psy_name_buf[PSY_NAME_MAX + 1];
@@ -247,13 +234,8 @@ struct charger_manager {
u64 charging_start_time;
u64 charging_end_time;
+
+ int battery_status;
};
-#ifdef CONFIG_CHARGER_MANAGER
-extern void cm_notify_event(struct power_supply *psy,
- enum cm_event_types type, char *msg);
-#else
-static inline void cm_notify_event(struct power_supply *psy,
- enum cm_event_types type, char *msg) { }
-#endif
#endif /* _CHARGER_MANAGER_H */
diff --git a/include/linux/power/generic-adc-battery.h b/include/linux/power/generic-adc-battery.h
deleted file mode 100644
index b1ebe08533b6..000000000000
--- a/include/linux/power/generic-adc-battery.h
+++ /dev/null
@@ -1,29 +0,0 @@
-/*
- * Copyright (C) 2012, Anish Kumar <anish198519851985@gmail.com>
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#ifndef GENERIC_ADC_BATTERY_H
-#define GENERIC_ADC_BATTERY_H
-
-/**
- * struct gab_platform_data - platform_data for generic adc iio battery driver.
- * @battery_info: recommended structure to specify static power supply
- * parameters
- * @cal_charge: calculate charge level.
- * @gpio_charge_finished: gpio for the charger.
- * @gpio_inverted: Should be 1 if the GPIO is active low otherwise 0
- * @jitter_delay: delay required after the interrupt to check battery
- * status.Default set is 10ms.
- */
-struct gab_platform_data {
- struct power_supply_info battery_info;
- int (*cal_charge)(long value);
- int gpio_charge_finished;
- bool gpio_inverted;
- int jitter_delay;
-};
-
-#endif /* GENERIC_ADC_BATTERY_H */
diff --git a/include/linux/power/gpio-charger.h b/include/linux/power/gpio-charger.h
index de1dfe09a03d..c0b7657ac1df 100644
--- a/include/linux/power/gpio-charger.h
+++ b/include/linux/power/gpio-charger.h
@@ -1,15 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Copyright (C) 2010, Lars-Peter Clausen <lars@metafoo.de>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 675 Mass Ave, Cambridge, MA 02139, USA.
- *
*/
#ifndef __LINUX_POWER_GPIO_CHARGER_H__
@@ -22,18 +13,12 @@
* struct gpio_charger_platform_data - platform_data for gpio_charger devices
* @name: Name for the chargers power_supply device
* @type: Type of the charger
- * @gpio: GPIO which is used to indicate the chargers status
- * @gpio_active_low: Should be set to 1 if the GPIO is active low otherwise 0
* @supplied_to: Array of battery names to which this chargers supplies power
* @num_supplicants: Number of entries in the supplied_to array
*/
struct gpio_charger_platform_data {
const char *name;
enum power_supply_type type;
-
- int gpio;
- int gpio_active_low;
-
char **supplied_to;
size_t num_supplicants;
};
diff --git a/include/linux/power/isp1704_charger.h b/include/linux/power/isp1704_charger.h
deleted file mode 100644
index 0105d9e7af85..000000000000
--- a/include/linux/power/isp1704_charger.h
+++ /dev/null
@@ -1,30 +0,0 @@
-/*
- * ISP1704 USB Charger Detection driver
- *
- * Copyright (C) 2011 Nokia Corporation
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-
-
-#ifndef __ISP1704_CHARGER_H
-#define __ISP1704_CHARGER_H
-
-struct isp1704_charger_data {
- void (*set_power)(bool on);
- int enable_gpio;
-};
-
-#endif
diff --git a/include/linux/power/jz4740-battery.h b/include/linux/power/jz4740-battery.h
index 19c9610c720a..10da211678c8 100644
--- a/include/linux/power/jz4740-battery.h
+++ b/include/linux/power/jz4740-battery.h
@@ -1,15 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Copyright (C) 2009, Jiejing Zhang <kzjeef@gmail.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 675 Mass Ave, Cambridge, MA 02139, USA.
- *
*/
#ifndef __JZ4740_BATTERY_H
diff --git a/include/linux/power/max17042_battery.h b/include/linux/power/max17042_battery.h
index a7ed29baf44a..c417abd2ab70 100644
--- a/include/linux/power/max17042_battery.h
+++ b/include/linux/power/max17042_battery.h
@@ -1,23 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Fuel gauge driver for Maxim 17042 / 8966 / 8997
* Note that Maxim 8966 and 8997 are mfd and this is its subdevice.
*
* Copyright (C) 2011 Samsung Electronics
* MyungJoo Ham <myungjoo.ham@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#ifndef __MAX17042_BATTERY_H_
@@ -82,7 +69,7 @@ enum max17042_register {
MAX17042_RelaxCFG = 0x2A,
MAX17042_MiscCFG = 0x2B,
MAX17042_TGAIN = 0x2C,
- MAx17042_TOFF = 0x2D,
+ MAX17042_TOFF = 0x2D,
MAX17042_CGAIN = 0x2E,
MAX17042_COFF = 0x2F,
@@ -91,7 +78,7 @@ enum max17042_register {
MAX17042_T_empty = 0x34,
MAX17042_FullCAP0 = 0x35,
- MAX17042_LAvg_empty = 0x36,
+ MAX17042_IAvg_empty = 0x36,
MAX17042_FCTC = 0x37,
MAX17042_RCOMP0 = 0x38,
MAX17042_TempCo = 0x39,
@@ -118,18 +105,65 @@ enum max17042_register {
MAX17042_OCV = 0xEE,
- MAX17042_OCVInternal = 0xFB,
+ MAX17042_OCVInternal = 0xFB, /* MAX17055 VFOCV */
MAX17042_VFSOC = 0xFF,
};
-/* Registers specific to max17047/50 */
+/* Registers specific to max17055 only */
+enum max17055_register {
+ MAX17055_QRes = 0x0C,
+ MAX17055_RCell = 0x14,
+ MAX17055_TTF = 0x20,
+ MAX17055_DieTemp = 0x34,
+ MAX17055_USER_MEM = 0x40,
+ MAX17055_RGAIN = 0x43,
+
+ MAX17055_ConvgCfg = 0x49,
+ MAX17055_VFRemCap = 0x4A,
+
+ MAX17055_STATUS2 = 0xB0,
+ MAX17055_POWER = 0xB1,
+ MAX17055_ID = 0xB2,
+ MAX17055_AvgPower = 0xB3,
+ MAX17055_IAlrtTh = 0xB4,
+ MAX17055_TTFCfg = 0xB5,
+ MAX17055_CVMixCap = 0xB6,
+ MAX17055_CVHalfTime = 0xB7,
+ MAX17055_CGTempCo = 0xB8,
+ MAX17055_Curve = 0xB9,
+ MAX17055_HibCfg = 0xBA,
+ MAX17055_Config2 = 0xBB,
+ MAX17055_VRipple = 0xBC,
+ MAX17055_RippleCfg = 0xBD,
+ MAX17055_TimerH = 0xBE,
+
+ MAX17055_RSense = 0xD0,
+ MAX17055_ScOcvLim = 0xD1,
+
+ MAX17055_SOCHold = 0xD3,
+ MAX17055_MaxPeakPwr = 0xD4,
+ MAX17055_SusPeakPwr = 0xD5,
+ MAX17055_PackResistance = 0xD6,
+ MAX17055_SysResistance = 0xD7,
+ MAX17055_MinSysV = 0xD8,
+ MAX17055_MPPCurrent = 0xD9,
+ MAX17055_SPPCurrent = 0xDA,
+ MAX17055_ModelCfg = 0xDB,
+ MAX17055_AtQResidual = 0xDC,
+ MAX17055_AtTTE = 0xDD,
+ MAX17055_AtAvSOC = 0xDE,
+ MAX17055_AtAvCap = 0xDF,
+};
+
+/* Registers specific to max17047/50/55 */
enum max17047_register {
MAX17047_QRTbl00 = 0x12,
MAX17047_FullSOCThr = 0x13,
MAX17047_QRTbl10 = 0x22,
MAX17047_QRTbl20 = 0x32,
MAX17047_V_empty = 0x3A,
+ MAX17047_TIMER = 0x3E,
MAX17047_QRTbl30 = 0x42,
};
@@ -138,6 +172,7 @@ enum max170xx_chip_type {
MAXIM_DEVICE_TYPE_MAX17042,
MAXIM_DEVICE_TYPE_MAX17047,
MAXIM_DEVICE_TYPE_MAX17050,
+ MAXIM_DEVICE_TYPE_MAX17055,
MAXIM_DEVICE_TYPE_NUM
};
@@ -186,7 +221,7 @@ struct max17042_config_data {
u16 fullcap; /* 0x10 */
u16 fullcapnom; /* 0x23 */
u16 socempty; /* 0x33 */
- u16 lavg_empty; /* 0x36 */
+ u16 iavg_empty; /* 0x36 */
u16 dqacc; /* 0x45 */
u16 dpacc; /* 0x46 */
u16 qrtbl00; /* 0x12 */
diff --git a/include/linux/power/max8903_charger.h b/include/linux/power/max8903_charger.h
deleted file mode 100644
index 89d3f1cb3433..000000000000
--- a/include/linux/power/max8903_charger.h
+++ /dev/null
@@ -1,57 +0,0 @@
-/*
- * max8903_charger.h - Maxim 8903 USB/Adapter Charger Driver
- *
- * Copyright (C) 2011 Samsung Electronics
- * MyungJoo Ham <myungjoo.ham@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- */
-
-#ifndef __MAX8903_CHARGER_H__
-#define __MAX8903_CHARGER_H__
-
-struct max8903_pdata {
- /*
- * GPIOs
- * cen, chg, flt, dcm and usus are optional.
- * dok and uok are not optional depending on the status of
- * dc_valid and usb_valid.
- */
- int cen; /* Charger Enable input */
- int dok; /* DC(Adapter) Power OK output */
- int uok; /* USB Power OK output */
- int chg; /* Charger status output */
- int flt; /* Fault output */
- int dcm; /* Current-Limit Mode input (1: DC, 2: USB) */
- int usus; /* USB Suspend Input (1: suspended) */
-
- /*
- * DC(Adapter/TA) is wired
- * When dc_valid is true,
- * dok should be valid.
- *
- * At least one of dc_valid or usb_valid should be true.
- */
- bool dc_valid;
- /*
- * USB is wired
- * When usb_valid is true,
- * uok should be valid.
- */
- bool usb_valid;
-};
-
-#endif /* __MAX8903_CHARGER_H__ */
diff --git a/include/linux/power/sbs-battery.h b/include/linux/power/sbs-battery.h
index 519b8b43239a..ccfe79783cb1 100644
--- a/include/linux/power/sbs-battery.h
+++ b/include/linux/power/sbs-battery.h
@@ -1,21 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Gas Gauge driver for SBS Compliant Gas Gauges
*
* Copyright (c) 2010, NVIDIA Corporation.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#ifndef __LINUX_POWER_SBS_BATTERY_H_
diff --git a/include/linux/power/smartreflex.h b/include/linux/power/smartreflex.h
index d8b187c3925d..3a2c79dfc1ff 100644
--- a/include/linux/power/smartreflex.h
+++ b/include/linux/power/smartreflex.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* OMAP Smartreflex Defines and Routines
*
@@ -11,10 +12,6 @@
*
* Copyright (C) 2007 Texas Instruments, Inc.
* Lesly A M <x0080970@ti.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef __POWER_SMARTREFLEX_H
@@ -143,6 +140,13 @@
#define OMAP3430_SR_ERRWEIGHT 0x04
#define OMAP3430_SR_ERRMAXLIMIT 0x02
+enum sr_instance {
+ OMAP_SR_MPU, /* shared with iva on omap3 */
+ OMAP_SR_CORE,
+ OMAP_SR_IVA,
+ OMAP_SR_NR,
+};
+
struct omap_sr {
char *name;
struct list_head node;
@@ -151,6 +155,7 @@ struct omap_sr {
struct voltagedomain *voltdm;
struct dentry *dbg_dir;
unsigned int irq;
+ struct clk *fck;
int srid;
int ip_type;
int nvalue_count;
@@ -165,6 +170,7 @@ struct omap_sr {
u32 senp_mod;
u32 senn_mod;
void __iomem *base;
+ unsigned long enabled:1;
};
/**
@@ -207,7 +213,6 @@ struct omap_smartreflex_dev_attr {
const char *sensor_voltdm_name;
};
-#ifdef CONFIG_POWER_AVS_OMAP
/*
* The smart reflex driver supports CLASS1 CLASS2 and CLASS3 SR.
* The smartreflex class driver should pass the class type.
@@ -268,8 +273,6 @@ struct omap_sr_nvalue_table {
* @senn_avgweight SENNAVGWEIGHT value of the sr AVGWEIGHT register
* @senp_avgweight SENPAVGWEIGHT value of the sr AVGWEIGHT register
* @nvalue_count: Number of distinct nvalues in the nvalue table
- * @enable_on_init: whether this sr module needs to enabled at
- * boot up or not.
* @nvalue_table: table containing the efuse offsets and nvalues
* corresponding to them.
* @voltdm: Pointer to the voltage domain associated with the SR
@@ -285,19 +288,20 @@ struct omap_sr_data {
u32 senn_avgweight;
u32 senp_avgweight;
int nvalue_count;
- bool enable_on_init;
struct omap_sr_nvalue_table *nvalue_table;
struct voltagedomain *voltdm;
};
+
+extern struct omap_sr_data omap_sr_pdata[OMAP_SR_NR];
+
+#ifdef CONFIG_POWER_AVS_OMAP
+
/* Smartreflex module enable/disable interface */
void omap_sr_enable(struct voltagedomain *voltdm);
void omap_sr_disable(struct voltagedomain *voltdm);
void omap_sr_disable_reset_volt(struct voltagedomain *voltdm);
-/* API to register the pmic specific data with the smartreflex driver. */
-void omap_sr_register_pmic(struct omap_sr_pmic_data *pmic_data);
-
/* Smartreflex driver hooks to be called from Smartreflex class driver */
int sr_enable(struct omap_sr *sr, unsigned long volt);
void sr_disable(struct omap_sr *sr);
@@ -312,7 +316,5 @@ static inline void omap_sr_enable(struct voltagedomain *voltdm) {}
static inline void omap_sr_disable(struct voltagedomain *voltdm) {}
static inline void omap_sr_disable_reset_volt(
struct voltagedomain *voltdm) {}
-static inline void omap_sr_register_pmic(
- struct omap_sr_pmic_data *pmic_data) {}
#endif
#endif
diff --git a/include/linux/power/smb347-charger.h b/include/linux/power/smb347-charger.h
deleted file mode 100644
index b3cb20dab55f..000000000000
--- a/include/linux/power/smb347-charger.h
+++ /dev/null
@@ -1,117 +0,0 @@
-/*
- * Summit Microelectronics SMB347 Battery Charger Driver
- *
- * Copyright (C) 2011, Intel Corporation
- *
- * Authors: Bruce E. Robertson <bruce.e.robertson@intel.com>
- * Mika Westerberg <mika.westerberg@linux.intel.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#ifndef SMB347_CHARGER_H
-#define SMB347_CHARGER_H
-
-#include <linux/types.h>
-#include <linux/power_supply.h>
-
-enum {
- /* use the default compensation method */
- SMB347_SOFT_TEMP_COMPENSATE_DEFAULT = -1,
-
- SMB347_SOFT_TEMP_COMPENSATE_NONE,
- SMB347_SOFT_TEMP_COMPENSATE_CURRENT,
- SMB347_SOFT_TEMP_COMPENSATE_VOLTAGE,
-};
-
-/* Use default factory programmed value for hard/soft temperature limit */
-#define SMB347_TEMP_USE_DEFAULT -273
-
-/*
- * Charging enable can be controlled by software (via i2c) by
- * smb347-charger driver or by EN pin (active low/high).
- */
-enum smb347_chg_enable {
- SMB347_CHG_ENABLE_SW,
- SMB347_CHG_ENABLE_PIN_ACTIVE_LOW,
- SMB347_CHG_ENABLE_PIN_ACTIVE_HIGH,
-};
-
-/**
- * struct smb347_charger_platform_data - platform data for SMB347 charger
- * @battery_info: Information about the battery
- * @max_charge_current: maximum current (in uA) the battery can be charged
- * @max_charge_voltage: maximum voltage (in uV) the battery can be charged
- * @pre_charge_current: current (in uA) to use in pre-charging phase
- * @termination_current: current (in uA) used to determine when the
- * charging cycle terminates
- * @pre_to_fast_voltage: voltage (in uV) treshold used for transitioning to
- * pre-charge to fast charge mode
- * @mains_current_limit: maximum input current drawn from AC/DC input (in uA)
- * @usb_hc_current_limit: maximum input high current (in uA) drawn from USB
- * input
- * @chip_temp_threshold: die temperature where device starts limiting charge
- * current [%100 - %130] (in degree C)
- * @soft_cold_temp_limit: soft cold temperature limit [%0 - %15] (in degree C),
- * granularity is 5 deg C.
- * @soft_hot_temp_limit: soft hot temperature limit [%40 - %55] (in degree C),
- * granularity is 5 deg C.
- * @hard_cold_temp_limit: hard cold temperature limit [%-5 - %10] (in degree C),
- * granularity is 5 deg C.
- * @hard_hot_temp_limit: hard hot temperature limit [%50 - %65] (in degree C),
- * granularity is 5 deg C.
- * @suspend_on_hard_temp_limit: suspend charging when hard limit is hit
- * @soft_temp_limit_compensation: compensation method when soft temperature
- * limit is hit
- * @charge_current_compensation: current (in uA) for charging compensation
- * current when temperature hits soft limits
- * @use_mains: AC/DC input can be used
- * @use_usb: USB input can be used
- * @use_usb_otg: USB OTG output can be used (not implemented yet)
- * @irq_gpio: GPIO number used for interrupts (%-1 if not used)
- * @enable_control: how charging enable/disable is controlled
- * (driver/pin controls)
- *
- * @use_main, @use_usb, and @use_usb_otg are means to enable/disable
- * hardware support for these. This is useful when we want to have for
- * example OTG charging controlled via OTG transceiver driver and not by
- * the SMB347 hardware.
- *
- * Hard and soft temperature limit values are given as described in the
- * device data sheet and assuming NTC beta value is %3750. Even if this is
- * not the case, these values should be used. They can be mapped to the
- * corresponding NTC beta values with the help of table %2 in the data
- * sheet. So for example if NTC beta is %3375 and we want to program hard
- * hot limit to be %53 deg C, @hard_hot_temp_limit should be set to %50.
- *
- * If zero value is given in any of the current and voltage values, the
- * factory programmed default will be used. For soft/hard temperature
- * values, pass in %SMB347_TEMP_USE_DEFAULT instead.
- */
-struct smb347_charger_platform_data {
- struct power_supply_info battery_info;
- unsigned int max_charge_current;
- unsigned int max_charge_voltage;
- unsigned int pre_charge_current;
- unsigned int termination_current;
- unsigned int pre_to_fast_voltage;
- unsigned int mains_current_limit;
- unsigned int usb_hc_current_limit;
- unsigned int chip_temp_threshold;
- int soft_cold_temp_limit;
- int soft_hot_temp_limit;
- int hard_cold_temp_limit;
- int hard_hot_temp_limit;
- bool suspend_on_hard_temp_limit;
- unsigned int soft_temp_limit_compensation;
- unsigned int charge_current_compensation;
- bool use_mains;
- bool use_usb;
- bool use_usb_otg;
- int irq_gpio;
- enum smb347_chg_enable enable_control;
-};
-
-#endif /* SMB347_CHARGER_H */
diff --git a/include/linux/power/twl4030_madc_battery.h b/include/linux/power/twl4030_madc_battery.h
index 23110dc77726..26517e9dfaa9 100644
--- a/include/linux/power/twl4030_madc_battery.h
+++ b/include/linux/power/twl4030_madc_battery.h
@@ -1,18 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Dumb driver for LiIon batteries using TWL4030 madc.
*
* Copyright 2013 Golden Delicious Computers
* Nikolaus Schaller <hns@goldelico.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 675 Mass Ave, Cambridge, MA 02139, USA.
- *
*/
#ifndef __TWL4030_MADC_BATTERY_H
diff --git a/include/linux/power_supply.h b/include/linux/power_supply.h
index de89066b72b1..a427f13c757f 100644
--- a/include/linux/power_supply.h
+++ b/include/linux/power_supply.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Universal power supply monitor class
*
@@ -6,8 +7,6 @@
* Copyright © 2003 Ian Molton <spyro@f2s.com>
*
* Modified: 2004, Oct Szabolcs Gyurko
- *
- * You may use this code as per GPL version 2
*/
#ifndef __LINUX_POWER_SUPPLY_H__
@@ -40,11 +39,17 @@ enum {
POWER_SUPPLY_STATUS_FULL,
};
+/* What algorithm is the charger using? */
enum {
POWER_SUPPLY_CHARGE_TYPE_UNKNOWN = 0,
POWER_SUPPLY_CHARGE_TYPE_NONE,
- POWER_SUPPLY_CHARGE_TYPE_TRICKLE,
- POWER_SUPPLY_CHARGE_TYPE_FAST,
+ POWER_SUPPLY_CHARGE_TYPE_TRICKLE, /* slow speed */
+ POWER_SUPPLY_CHARGE_TYPE_FAST, /* fast speed */
+ POWER_SUPPLY_CHARGE_TYPE_STANDARD, /* normal speed */
+ POWER_SUPPLY_CHARGE_TYPE_ADAPTIVE, /* dynamically adjusted speed */
+ POWER_SUPPLY_CHARGE_TYPE_CUSTOM, /* use CHARGE_CONTROL_* props */
+ POWER_SUPPLY_CHARGE_TYPE_LONGLIFE, /* slow speed, longer life */
+ POWER_SUPPLY_CHARGE_TYPE_BYPASS, /* bypassing the charger */
};
enum {
@@ -57,6 +62,12 @@ enum {
POWER_SUPPLY_HEALTH_COLD,
POWER_SUPPLY_HEALTH_WATCHDOG_TIMER_EXPIRE,
POWER_SUPPLY_HEALTH_SAFETY_TIMER_EXPIRE,
+ POWER_SUPPLY_HEALTH_OVERCURRENT,
+ POWER_SUPPLY_HEALTH_CALIBRATION_REQUIRED,
+ POWER_SUPPLY_HEALTH_WARM,
+ POWER_SUPPLY_HEALTH_COOL,
+ POWER_SUPPLY_HEALTH_HOT,
+ POWER_SUPPLY_HEALTH_NO_BATTERY,
};
enum {
@@ -121,7 +132,12 @@ enum power_supply_property {
POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE_MAX,
POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT,
POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT_MAX,
+ POWER_SUPPLY_PROP_CHARGE_CONTROL_START_THRESHOLD, /* in percents! */
+ POWER_SUPPLY_PROP_CHARGE_CONTROL_END_THRESHOLD, /* in percents! */
+ POWER_SUPPLY_PROP_CHARGE_BEHAVIOUR,
POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT,
+ POWER_SUPPLY_PROP_INPUT_VOLTAGE_LIMIT,
+ POWER_SUPPLY_PROP_INPUT_POWER_LIMIT,
POWER_SUPPLY_PROP_ENERGY_FULL_DESIGN,
POWER_SUPPLY_PROP_ENERGY_EMPTY_DESIGN,
POWER_SUPPLY_PROP_ENERGY_FULL,
@@ -131,6 +147,7 @@ enum power_supply_property {
POWER_SUPPLY_PROP_CAPACITY, /* in percents! */
POWER_SUPPLY_PROP_CAPACITY_ALERT_MIN, /* in percents! */
POWER_SUPPLY_PROP_CAPACITY_ALERT_MAX, /* in percents! */
+ POWER_SUPPLY_PROP_CAPACITY_ERROR_MARGIN, /* in percents! */
POWER_SUPPLY_PROP_CAPACITY_LEVEL,
POWER_SUPPLY_PROP_TEMP,
POWER_SUPPLY_PROP_TEMP_MAX,
@@ -145,10 +162,14 @@ enum power_supply_property {
POWER_SUPPLY_PROP_TIME_TO_FULL_NOW,
POWER_SUPPLY_PROP_TIME_TO_FULL_AVG,
POWER_SUPPLY_PROP_TYPE, /* use power_supply.type instead */
+ POWER_SUPPLY_PROP_USB_TYPE,
POWER_SUPPLY_PROP_SCOPE,
POWER_SUPPLY_PROP_PRECHARGE_CURRENT,
POWER_SUPPLY_PROP_CHARGE_TERM_CURRENT,
POWER_SUPPLY_PROP_CALIBRATE,
+ POWER_SUPPLY_PROP_MANUFACTURE_YEAR,
+ POWER_SUPPLY_PROP_MANUFACTURE_MONTH,
+ POWER_SUPPLY_PROP_MANUFACTURE_DAY,
/* Properties of type `const char *' */
POWER_SUPPLY_PROP_MODEL_NAME,
POWER_SUPPLY_PROP_MANUFACTURER,
@@ -168,6 +189,26 @@ enum power_supply_type {
POWER_SUPPLY_TYPE_USB_PD, /* Power Delivery Port */
POWER_SUPPLY_TYPE_USB_PD_DRP, /* PD Dual Role Port */
POWER_SUPPLY_TYPE_APPLE_BRICK_ID, /* Apple Charging Method */
+ POWER_SUPPLY_TYPE_WIRELESS, /* Wireless */
+};
+
+enum power_supply_usb_type {
+ POWER_SUPPLY_USB_TYPE_UNKNOWN = 0,
+ POWER_SUPPLY_USB_TYPE_SDP, /* Standard Downstream Port */
+ POWER_SUPPLY_USB_TYPE_DCP, /* Dedicated Charging Port */
+ POWER_SUPPLY_USB_TYPE_CDP, /* Charging Downstream Port */
+ POWER_SUPPLY_USB_TYPE_ACA, /* Accessory Charger Adapters */
+ POWER_SUPPLY_USB_TYPE_C, /* Type C Port */
+ POWER_SUPPLY_USB_TYPE_PD, /* Power Delivery Port */
+ POWER_SUPPLY_USB_TYPE_PD_DRP, /* PD Dual Role Port */
+ POWER_SUPPLY_USB_TYPE_PD_PPS, /* PD Programmable Power Supply */
+ POWER_SUPPLY_USB_TYPE_APPLE_BRICK_ID, /* Apple Charging Method */
+};
+
+enum power_supply_charge_behaviour {
+ POWER_SUPPLY_CHARGE_BEHAVIOUR_AUTO = 0,
+ POWER_SUPPLY_CHARGE_BEHAVIOUR_INHIBIT_CHARGE,
+ POWER_SUPPLY_CHARGE_BEHAVIOUR_FORCE_DISCHARGE,
};
enum power_supply_notifier_events {
@@ -185,9 +226,14 @@ struct power_supply;
/* Run-time specific power supply configuration */
struct power_supply_config {
struct device_node *of_node;
+ struct fwnode_handle *fwnode;
+
/* Driver private data */
void *drv_data;
+ /* Device specific sysfs attributes */
+ const struct attribute_group **attr_grp;
+
char **supplied_to;
size_t num_supplicants;
};
@@ -196,7 +242,9 @@ struct power_supply_config {
struct power_supply_desc {
const char *name;
enum power_supply_type type;
- enum power_supply_property *properties;
+ const enum power_supply_usb_type *usb_types;
+ size_t num_usb_types;
+ const enum power_supply_property *properties;
size_t num_properties;
/*
@@ -251,7 +299,9 @@ struct power_supply {
spinlock_t changed_lock;
bool changed;
bool initialized;
+ bool removing;
atomic_t use_cnt;
+ struct power_supply_battery_info *battery_info;
#ifdef CONFIG_THERMAL
struct thermal_zone_device *tzd;
struct thermal_cooling_device *tcd;
@@ -290,30 +340,444 @@ struct power_supply_info {
int use_for_apm;
};
-/*
+struct power_supply_battery_ocv_table {
+ int ocv; /* microVolts */
+ int capacity; /* percent */
+};
+
+struct power_supply_resistance_temp_table {
+ int temp; /* celsius */
+ int resistance; /* internal resistance percent */
+};
+
+struct power_supply_vbat_ri_table {
+ int vbat_uv; /* Battery voltage in microvolt */
+ int ri_uohm; /* Internal resistance in microohm */
+};
+
+/**
+ * struct power_supply_maintenance_charge_table - setting for maintenace charging
+ * @charge_current_max_ua: maintenance charging current that is used to keep
+ * the charge of the battery full as current is consumed after full charging.
+ * The corresponding charge_voltage_max_uv is used as a safeguard: when we
+ * reach this voltage the maintenance charging current is turned off. It is
+ * turned back on if we fall below this voltage.
+ * @charge_voltage_max_uv: maintenance charging voltage that is usually a bit
+ * lower than the constant_charge_voltage_max_uv. We can apply this settings
+ * charge_current_max_ua until we get back up to this voltage.
+ * @safety_timer_minutes: maintenance charging safety timer, with an expiry
+ * time in minutes. We will only use maintenance charging in this setting
+ * for a certain amount of time, then we will first move to the next
+ * maintenance charge current and voltage pair in respective array and wait
+ * for the next safety timer timeout, or, if we reached the last maintencance
+ * charging setting, disable charging until we reach
+ * charge_restart_voltage_uv and restart ordinary CC/CV charging from there.
+ * These timers should be chosen to align with the typical discharge curve
+ * for the battery.
+ *
+ * Ordinary CC/CV charging will stop charging when the charge current goes
+ * below charge_term_current_ua, and then restart it (if the device is still
+ * plugged into the charger) at charge_restart_voltage_uv. This happens in most
+ * consumer products because the power usage while connected to a charger is
+ * not zero, and devices are not manufactured to draw power directly from the
+ * charger: instead they will at all times dissipate the battery a little, like
+ * the power used in standby mode. This will over time give a charge graph
+ * such as this:
+ *
+ * Energy
+ * ^ ... ... ... ... ... ... ...
+ * | . . . . . . . . . . . . .
+ * | .. . .. . .. . .. . .. . .. . ..
+ * |. .. .. .. .. .. ..
+ * +-------------------------------------------------------------------> t
+ *
+ * Practically this means that the Li-ions are wandering back and forth in the
+ * battery and this causes degeneration of the battery anode and cathode.
+ * To prolong the life of the battery, maintenance charging is applied after
+ * reaching charge_term_current_ua to hold up the charge in the battery while
+ * consuming power, thus lowering the wear on the battery:
+ *
+ * Energy
+ * ^ .......................................
+ * | . ......................
+ * | ..
+ * |.
+ * +-------------------------------------------------------------------> t
+ *
+ * Maintenance charging uses the voltages from this table: a table of settings
+ * is traversed using a slightly lower current and voltage than what is used for
+ * CC/CV charging. The maintenance charging will for safety reasons not go on
+ * indefinately: we lower the current and voltage with successive maintenance
+ * settings, then disable charging completely after we reach the last one,
+ * and after that we do not restart charging until we reach
+ * charge_restart_voltage_uv (see struct power_supply_battery_info) and restart
+ * ordinary CC/CV charging from there.
+ *
+ * As an example, a Samsung EB425161LA Lithium-Ion battery is CC/CV charged
+ * at 900mA to 4340mV, then maintenance charged at 600mA and 4150mV for up to
+ * 60 hours, then maintenance charged at 600mA and 4100mV for up to 200 hours.
+ * After this the charge cycle is restarted waiting for
+ * charge_restart_voltage_uv.
+ *
+ * For most mobile electronics this type of maintenance charging is enough for
+ * the user to disconnect the device and make use of it before both maintenance
+ * charging cycles are complete, if the current and voltage has been chosen
+ * appropriately. These need to be determined from battery discharge curves
+ * and expected standby current.
+ *
+ * If the voltage anyway drops to charge_restart_voltage_uv during maintenance
+ * charging, ordinary CC/CV charging is restarted. This can happen if the
+ * device is e.g. actively used during charging, so more current is drawn than
+ * the expected stand-by current. Also overvoltage protection will be applied
+ * as usual.
+ */
+struct power_supply_maintenance_charge_table {
+ int charge_current_max_ua;
+ int charge_voltage_max_uv;
+ int charge_safety_timer_minutes;
+};
+
+#define POWER_SUPPLY_OCV_TEMP_MAX 20
+
+/**
+ * struct power_supply_battery_info - information about batteries
+ * @technology: from the POWER_SUPPLY_TECHNOLOGY_* enum
+ * @energy_full_design_uwh: energy content when fully charged in microwatt
+ * hours
+ * @charge_full_design_uah: charge content when fully charged in microampere
+ * hours
+ * @voltage_min_design_uv: minimum voltage across the poles when the battery
+ * is at minimum voltage level in microvolts. If the voltage drops below this
+ * level the battery will need precharging when using CC/CV charging.
+ * @voltage_max_design_uv: voltage across the poles when the battery is fully
+ * charged in microvolts. This is the "nominal voltage" i.e. the voltage
+ * printed on the label of the battery.
+ * @tricklecharge_current_ua: the tricklecharge current used when trickle
+ * charging the battery in microamperes. This is the charging phase when the
+ * battery is completely empty and we need to carefully trickle in some
+ * charge until we reach the precharging voltage.
+ * @precharge_current_ua: current to use in the precharge phase in microamperes,
+ * the precharge rate is limited by limiting the current to this value.
+ * @precharge_voltage_max_uv: the maximum voltage allowed when precharging in
+ * microvolts. When we pass this voltage we will nominally switch over to the
+ * CC (constant current) charging phase defined by constant_charge_current_ua
+ * and constant_charge_voltage_max_uv.
+ * @charge_term_current_ua: when the current in the CV (constant voltage)
+ * charging phase drops below this value in microamperes the charging will
+ * terminate completely and not restart until the voltage over the battery
+ * poles reach charge_restart_voltage_uv unless we use maintenance charging.
+ * @charge_restart_voltage_uv: when the battery has been fully charged by
+ * CC/CV charging and charging has been disabled, and the voltage subsequently
+ * drops below this value in microvolts, the charging will be restarted
+ * (typically using CV charging).
+ * @overvoltage_limit_uv: If the voltage exceeds the nominal voltage
+ * voltage_max_design_uv and we reach this voltage level, all charging must
+ * stop and emergency procedures take place, such as shutting down the system
+ * in some cases.
+ * @constant_charge_current_max_ua: current in microamperes to use in the CC
+ * (constant current) charging phase. The charging rate is limited
+ * by this current. This is the main charging phase and as the current is
+ * constant into the battery the voltage slowly ascends to
+ * constant_charge_voltage_max_uv.
+ * @constant_charge_voltage_max_uv: voltage in microvolts signifying the end of
+ * the CC (constant current) charging phase and the beginning of the CV
+ * (constant voltage) charging phase.
+ * @maintenance_charge: an array of maintenance charging settings to be used
+ * after the main CC/CV charging phase is complete.
+ * @maintenance_charge_size: the number of maintenance charging settings in
+ * maintenance_charge.
+ * @alert_low_temp_charge_current_ua: The charging current to use if the battery
+ * enters low alert temperature, i.e. if the internal temperature is between
+ * temp_alert_min and temp_min. No matter the charging phase, this
+ * and alert_high_temp_charge_voltage_uv will be applied.
+ * @alert_low_temp_charge_voltage_uv: Same as alert_low_temp_charge_current_ua,
+ * but for the charging voltage.
+ * @alert_high_temp_charge_current_ua: The charging current to use if the
+ * battery enters high alert temperature, i.e. if the internal temperature is
+ * between temp_alert_max and temp_max. No matter the charging phase, this
+ * and alert_high_temp_charge_voltage_uv will be applied, usually lowering
+ * the charging current as an evasive manouver.
+ * @alert_high_temp_charge_voltage_uv: Same as
+ * alert_high_temp_charge_current_ua, but for the charging voltage.
+ * @factory_internal_resistance_uohm: the internal resistance of the battery
+ * at fabrication time, expressed in microohms. This resistance will vary
+ * depending on the lifetime and charge of the battery, so this is just a
+ * nominal ballpark figure. This internal resistance is given for the state
+ * when the battery is discharging.
+ * @factory_internal_resistance_charging_uohm: the internal resistance of the
+ * battery at fabrication time while charging, expressed in microohms.
+ * The charging process will affect the internal resistance of the battery
+ * so this value provides a better resistance under these circumstances.
+ * This resistance will vary depending on the lifetime and charge of the
+ * battery, so this is just a nominal ballpark figure.
+ * @ocv_temp: array indicating the open circuit voltage (OCV) capacity
+ * temperature indices. This is an array of temperatures in degrees Celsius
+ * indicating which capacity table to use for a certain temperature, since
+ * the capacity for reasons of chemistry will be different at different
+ * temperatures. Determining capacity is a multivariate problem and the
+ * temperature is the first variable we determine.
+ * @temp_ambient_alert_min: the battery will go outside of operating conditions
+ * when the ambient temperature goes below this temperature in degrees
+ * Celsius.
+ * @temp_ambient_alert_max: the battery will go outside of operating conditions
+ * when the ambient temperature goes above this temperature in degrees
+ * Celsius.
+ * @temp_alert_min: the battery should issue an alert if the internal
+ * temperature goes below this temperature in degrees Celsius.
+ * @temp_alert_max: the battery should issue an alert if the internal
+ * temperature goes above this temperature in degrees Celsius.
+ * @temp_min: the battery will go outside of operating conditions when
+ * the internal temperature goes below this temperature in degrees Celsius.
+ * Normally this means the system should shut down.
+ * @temp_max: the battery will go outside of operating conditions when
+ * the internal temperature goes above this temperature in degrees Celsius.
+ * Normally this means the system should shut down.
+ * @ocv_table: for each entry in ocv_temp there is a corresponding entry in
+ * ocv_table and a size for each entry in ocv_table_size. These arrays
+ * determine the capacity in percent in relation to the voltage in microvolts
+ * at the indexed temperature.
+ * @ocv_table_size: for each entry in ocv_temp this array is giving the size of
+ * each entry in the array of capacity arrays in ocv_table.
+ * @resist_table: this is a table that correlates a battery temperature to the
+ * expected internal resistance at this temperature. The resistance is given
+ * as a percentage of factory_internal_resistance_uohm. Knowing the
+ * resistance of the battery is usually necessary for calculating the open
+ * circuit voltage (OCV) that is then used with the ocv_table to calculate
+ * the capacity of the battery. The resist_table must be ordered descending
+ * by temperature: highest temperature with lowest resistance first, lowest
+ * temperature with highest resistance last.
+ * @resist_table_size: the number of items in the resist_table.
+ * @vbat2ri_discharging: this is a table that correlates Battery voltage (VBAT)
+ * to internal resistance (Ri). The resistance is given in microohm for the
+ * corresponding voltage in microvolts. The internal resistance is used to
+ * determine the open circuit voltage so that we can determine the capacity
+ * of the battery. These voltages to resistance tables apply when the battery
+ * is discharging. The table must be ordered descending by voltage: highest
+ * voltage first.
+ * @vbat2ri_discharging_size: the number of items in the vbat2ri_discharging
+ * table.
+ * @vbat2ri_charging: same function as vbat2ri_discharging but for the state
+ * when the battery is charging. Being under charge changes the battery's
+ * internal resistance characteristics so a separate table is needed.*
+ * The table must be ordered descending by voltage: highest voltage first.
+ * @vbat2ri_charging_size: the number of items in the vbat2ri_charging
+ * table.
+ * @bti_resistance_ohm: The Battery Type Indicator (BIT) nominal resistance
+ * in ohms for this battery, if an identification resistor is mounted
+ * between a third battery terminal and ground. This scheme is used by a lot
+ * of mobile device batteries.
+ * @bti_resistance_tolerance: The tolerance in percent of the BTI resistance,
+ * for example 10 for +/- 10%, if the bti_resistance is set to 7000 and the
+ * tolerance is 10% we will detect a proper battery if the BTI resistance
+ * is between 6300 and 7700 Ohm.
+ *
* This is the recommended struct to manage static battery parameters,
* populated by power_supply_get_battery_info(). Most platform drivers should
* use these for consistency.
+ *
* Its field names must correspond to elements in enum power_supply_property.
- * The default field value is -EINVAL.
- * Power supply class itself doesn't use this.
+ * The default field value is -EINVAL or NULL for pointers.
+ *
+ * CC/CV CHARGING:
+ *
+ * The charging parameters here assume a CC/CV charging scheme. This method
+ * is most common with Lithium Ion batteries (other methods are possible) and
+ * looks as follows:
+ *
+ * ^ Battery voltage
+ * | --- overvoltage_limit_uv
+ * |
+ * | ...................................................
+ * | .. constant_charge_voltage_max_uv
+ * | ..
+ * | .
+ * | .
+ * | .
+ * | .
+ * | .
+ * | .. precharge_voltage_max_uv
+ * | ..
+ * |. (trickle charging)
+ * +------------------------------------------------------------------> time
+ *
+ * ^ Current into the battery
+ * |
+ * | ............. constant_charge_current_max_ua
+ * | . .
+ * | . .
+ * | . .
+ * | . .
+ * | . ..
+ * | . ....
+ * | . .....
+ * | ... precharge_current_ua ....... charge_term_current_ua
+ * | . .
+ * | . .
+ * |.... tricklecharge_current_ua .
+ * | .
+ * +-----------------------------------------------------------------> time
+ *
+ * These diagrams are synchronized on time and the voltage and current
+ * follow each other.
+ *
+ * With CC/CV charging commence over time like this for an empty battery:
+ *
+ * 1. When the battery is completely empty it may need to be charged with
+ * an especially small current so that electrons just "trickle in",
+ * this is the tricklecharge_current_ua.
+ *
+ * 2. Next a small initial pre-charge current (precharge_current_ua)
+ * is applied if the voltage is below precharge_voltage_max_uv until we
+ * reach precharge_voltage_max_uv. CAUTION: in some texts this is referred
+ * to as "trickle charging" but the use in the Linux kernel is different
+ * see below!
+ *
+ * 3. Then the main charging current is applied, which is called the constant
+ * current (CC) phase. A current regulator is set up to allow
+ * constant_charge_current_max_ua of current to flow into the battery.
+ * The chemical reaction in the battery will make the voltage go up as
+ * charge goes into the battery. This current is applied until we reach
+ * the constant_charge_voltage_max_uv voltage.
+ *
+ * 4. At this voltage we switch over to the constant voltage (CV) phase. This
+ * means we allow current to go into the battery, but we keep the voltage
+ * fixed. This current will continue to charge the battery while keeping
+ * the voltage the same. A chemical reaction in the battery goes on
+ * storing energy without affecting the voltage. Over time the current
+ * will slowly drop and when we reach charge_term_current_ua we will
+ * end the constant voltage phase.
+ *
+ * After this the battery is fully charged, and if we do not support maintenance
+ * charging, the charging will not restart until power dissipation makes the
+ * voltage fall so that we reach charge_restart_voltage_uv and at this point
+ * we restart charging at the appropriate phase, usually this will be inside
+ * the CV phase.
+ *
+ * If we support maintenance charging the voltage is however kept high after
+ * the CV phase with a very low current. This is meant to let the same charge
+ * go in for usage while the charger is still connected, mainly for
+ * dissipation for the power consuming entity while connected to the
+ * charger.
+ *
+ * All charging MUST terminate if the overvoltage_limit_uv is ever reached.
+ * Overcharging Lithium Ion cells can be DANGEROUS and lead to fire or
+ * explosions.
+ *
+ * DETERMINING BATTERY CAPACITY:
+ *
+ * Several members of the struct deal with trying to determine the remaining
+ * capacity in the battery, usually as a percentage of charge. In practice
+ * many chargers uses a so-called fuel gauge or coloumb counter that measure
+ * how much charge goes into the battery and how much goes out (+/- leak
+ * consumption). This does not help if we do not know how much capacity the
+ * battery has to begin with, such as when it is first used or was taken out
+ * and charged in a separate charger. Therefore many capacity algorithms use
+ * the open circuit voltage with a look-up table to determine the rough
+ * capacity of the battery. The open circuit voltage can be conceptualized
+ * with an ideal voltage source (V) in series with an internal resistance (Ri)
+ * like this:
+ *
+ * +-------> IBAT >----------------+
+ * | ^ |
+ * [ ] Ri | |
+ * | | VBAT |
+ * o <---------- | |
+ * +| ^ | [ ] Rload
+ * .---. | | |
+ * | V | | OCV | |
+ * '---' | | |
+ * | | | |
+ * GND +-------------------------------+
+ *
+ * If we disconnect the load (here simplified as a fixed resistance Rload)
+ * and measure VBAT with a infinite impedance voltage meter we will get
+ * VBAT = OCV and this assumption is sometimes made even under load, assuming
+ * Rload is insignificant. However this will be of dubious quality because the
+ * load is rarely that small and Ri is strongly nonlinear depending on
+ * temperature and how much capacity is left in the battery due to the
+ * chemistry involved.
+ *
+ * In many practical applications we cannot just disconnect the battery from
+ * the load, so instead we often try to measure the instantaneous IBAT (the
+ * current out from the battery), estimate the Ri and thus calculate the
+ * voltage drop over Ri and compensate like this:
+ *
+ * OCV = VBAT - (IBAT * Ri)
+ *
+ * The tables vbat2ri_discharging and vbat2ri_charging are used to determine
+ * (by interpolation) the Ri from the VBAT under load. These curves are highly
+ * nonlinear and may need many datapoints but can be found in datasheets for
+ * some batteries. This gives the compensated open circuit voltage (OCV) for
+ * the battery even under load. Using this method will also compensate for
+ * temperature changes in the environment: this will also make the internal
+ * resistance change, and it will affect the VBAT under load, so correlating
+ * VBAT to Ri takes both remaining capacity and temperature into consideration.
+ *
+ * Alternatively a manufacturer can specify how the capacity of the battery
+ * is dependent on the battery temperature which is the main factor affecting
+ * Ri. As we know all checmical reactions are faster when it is warm and slower
+ * when it is cold. You can put in 1500mAh and only get 800mAh out before the
+ * voltage drops too low for example. This effect is also highly nonlinear and
+ * the purpose of the table resist_table: this will take a temperature and
+ * tell us how big percentage of Ri the specified temperature correlates to.
+ * Usually we have 100% of the factory_internal_resistance_uohm at 25 degrees
+ * Celsius.
+ *
+ * The power supply class itself doesn't use this struct as of now.
*/
struct power_supply_battery_info {
- int energy_full_design_uwh; /* microWatt-hours */
- int charge_full_design_uah; /* microAmp-hours */
- int voltage_min_design_uv; /* microVolts */
- int precharge_current_ua; /* microAmps */
- int charge_term_current_ua; /* microAmps */
- int constant_charge_current_max_ua; /* microAmps */
- int constant_charge_voltage_max_uv; /* microVolts */
+ unsigned int technology;
+ int energy_full_design_uwh;
+ int charge_full_design_uah;
+ int voltage_min_design_uv;
+ int voltage_max_design_uv;
+ int tricklecharge_current_ua;
+ int precharge_current_ua;
+ int precharge_voltage_max_uv;
+ int charge_term_current_ua;
+ int charge_restart_voltage_uv;
+ int overvoltage_limit_uv;
+ int constant_charge_current_max_ua;
+ int constant_charge_voltage_max_uv;
+ struct power_supply_maintenance_charge_table *maintenance_charge;
+ int maintenance_charge_size;
+ int alert_low_temp_charge_current_ua;
+ int alert_low_temp_charge_voltage_uv;
+ int alert_high_temp_charge_current_ua;
+ int alert_high_temp_charge_voltage_uv;
+ int factory_internal_resistance_uohm;
+ int factory_internal_resistance_charging_uohm;
+ int ocv_temp[POWER_SUPPLY_OCV_TEMP_MAX];
+ int temp_ambient_alert_min;
+ int temp_ambient_alert_max;
+ int temp_alert_min;
+ int temp_alert_max;
+ int temp_min;
+ int temp_max;
+ struct power_supply_battery_ocv_table *ocv_table[POWER_SUPPLY_OCV_TEMP_MAX];
+ int ocv_table_size[POWER_SUPPLY_OCV_TEMP_MAX];
+ struct power_supply_resistance_temp_table *resist_table;
+ int resist_table_size;
+ struct power_supply_vbat_ri_table *vbat2ri_discharging;
+ int vbat2ri_discharging_size;
+ struct power_supply_vbat_ri_table *vbat2ri_charging;
+ int vbat2ri_charging_size;
+ int bti_resistance_ohm;
+ int bti_resistance_tolerance;
};
extern struct atomic_notifier_head power_supply_notifier;
extern int power_supply_reg_notifier(struct notifier_block *nb);
extern void power_supply_unreg_notifier(struct notifier_block *nb);
+#if IS_ENABLED(CONFIG_POWER_SUPPLY)
extern struct power_supply *power_supply_get_by_name(const char *name);
extern void power_supply_put(struct power_supply *psy);
+#else
+static inline void power_supply_put(struct power_supply *psy) {}
+static inline struct power_supply *power_supply_get_by_name(const char *name)
+{ return NULL; }
+#endif
#ifdef CONFIG_OF
extern struct power_supply *power_supply_get_by_phandle(struct device_node *np,
const char *property);
@@ -328,12 +792,64 @@ devm_power_supply_get_by_phandle(struct device *dev, const char *property)
{ return NULL; }
#endif /* CONFIG_OF */
+extern const enum power_supply_property power_supply_battery_info_properties[];
+extern const size_t power_supply_battery_info_properties_size;
extern int power_supply_get_battery_info(struct power_supply *psy,
- struct power_supply_battery_info *info);
+ struct power_supply_battery_info **info_out);
+extern void power_supply_put_battery_info(struct power_supply *psy,
+ struct power_supply_battery_info *info);
+extern bool power_supply_battery_info_has_prop(struct power_supply_battery_info *info,
+ enum power_supply_property psp);
+extern int power_supply_battery_info_get_prop(struct power_supply_battery_info *info,
+ enum power_supply_property psp,
+ union power_supply_propval *val);
+extern int power_supply_ocv2cap_simple(struct power_supply_battery_ocv_table *table,
+ int table_len, int ocv);
+extern struct power_supply_battery_ocv_table *
+power_supply_find_ocv2cap_table(struct power_supply_battery_info *info,
+ int temp, int *table_len);
+extern int power_supply_batinfo_ocv2cap(struct power_supply_battery_info *info,
+ int ocv, int temp);
+extern int
+power_supply_temp2resist_simple(struct power_supply_resistance_temp_table *table,
+ int table_len, int temp);
+extern int power_supply_vbat2ri(struct power_supply_battery_info *info,
+ int vbat_uv, bool charging);
+extern struct power_supply_maintenance_charge_table *
+power_supply_get_maintenance_charging_setting(struct power_supply_battery_info *info, int index);
+extern bool power_supply_battery_bti_in_range(struct power_supply_battery_info *info,
+ int resistance);
extern void power_supply_changed(struct power_supply *psy);
extern int power_supply_am_i_supplied(struct power_supply *psy);
+int power_supply_get_property_from_supplier(struct power_supply *psy,
+ enum power_supply_property psp,
+ union power_supply_propval *val);
extern int power_supply_set_battery_charged(struct power_supply *psy);
+static inline bool
+power_supply_supports_maintenance_charging(struct power_supply_battery_info *info)
+{
+ struct power_supply_maintenance_charge_table *mt;
+
+ mt = power_supply_get_maintenance_charging_setting(info, 0);
+
+ return (mt != NULL);
+}
+
+static inline bool
+power_supply_supports_vbat2ri(struct power_supply_battery_info *info)
+{
+ return ((info->vbat2ri_discharging != NULL) &&
+ info->vbat2ri_discharging_size > 0);
+}
+
+static inline bool
+power_supply_supports_temp2ri(struct power_supply_battery_info *info)
+{
+ return ((info->resist_table != NULL) &&
+ info->resist_table_size > 0);
+}
+
#ifdef CONFIG_POWER_SUPPLY
extern int power_supply_is_system_supplied(void);
#else
@@ -343,9 +859,16 @@ static inline int power_supply_is_system_supplied(void) { return -ENOSYS; }
extern int power_supply_get_property(struct power_supply *psy,
enum power_supply_property psp,
union power_supply_propval *val);
+#if IS_ENABLED(CONFIG_POWER_SUPPLY)
extern int power_supply_set_property(struct power_supply *psy,
enum power_supply_property psp,
const union power_supply_propval *val);
+#else
+static inline int power_supply_set_property(struct power_supply *psy,
+ enum power_supply_property psp,
+ const union power_supply_propval *val)
+{ return 0; }
+#endif
extern int power_supply_property_is_writeable(struct power_supply *psy,
enum power_supply_property psp);
extern void power_supply_external_power_changed(struct power_supply *psy);
@@ -369,6 +892,8 @@ devm_power_supply_register_no_ws(struct device *parent,
extern void power_supply_unregister(struct power_supply *psy);
extern int power_supply_powers(struct power_supply *psy, struct device *dev);
+#define to_power_supply(device) container_of(device, struct power_supply, dev)
+
extern void *power_supply_get_drvdata(struct power_supply *psy);
/* For APM emulation, think legacy userspace. */
extern struct class *power_supply_class;
@@ -391,12 +916,12 @@ static inline bool power_supply_is_amp_property(enum power_supply_property psp)
case POWER_SUPPLY_PROP_CURRENT_NOW:
case POWER_SUPPLY_PROP_CURRENT_AVG:
case POWER_SUPPLY_PROP_CURRENT_BOOT:
- return 1;
+ return true;
default:
break;
}
- return 0;
+ return false;
}
static inline bool power_supply_is_watt_property(enum power_supply_property psp)
@@ -419,12 +944,49 @@ static inline bool power_supply_is_watt_property(enum power_supply_property psp)
case POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE:
case POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE_MAX:
case POWER_SUPPLY_PROP_POWER_NOW:
- return 1;
+ return true;
default:
break;
}
+ return false;
+}
+
+#ifdef CONFIG_POWER_SUPPLY_HWMON
+int power_supply_add_hwmon_sysfs(struct power_supply *psy);
+void power_supply_remove_hwmon_sysfs(struct power_supply *psy);
+#else
+static inline int power_supply_add_hwmon_sysfs(struct power_supply *psy)
+{
return 0;
}
+static inline
+void power_supply_remove_hwmon_sysfs(struct power_supply *psy) {}
+#endif
+
+#ifdef CONFIG_SYSFS
+ssize_t power_supply_charge_behaviour_show(struct device *dev,
+ unsigned int available_behaviours,
+ enum power_supply_charge_behaviour behaviour,
+ char *buf);
+
+int power_supply_charge_behaviour_parse(unsigned int available_behaviours, const char *buf);
+#else
+static inline
+ssize_t power_supply_charge_behaviour_show(struct device *dev,
+ unsigned int available_behaviours,
+ enum power_supply_charge_behaviour behaviour,
+ char *buf)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int power_supply_charge_behaviour_parse(unsigned int available_behaviours,
+ const char *buf)
+{
+ return -EOPNOTSUPP;
+}
+#endif
+
#endif /* __LINUX_POWER_SUPPLY_H__ */
diff --git a/include/linux/powercap.h b/include/linux/powercap.h
index f0a4e6257dcc..3d557bbcd2c7 100644
--- a/include/linux/powercap.h
+++ b/include/linux/powercap.h
@@ -1,19 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* powercap.h: Data types and headers for sysfs power capping interface
* Copyright (c) 2013, Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.
- *
*/
#ifndef __POWERCAP_H__
@@ -56,19 +44,18 @@ struct powercap_control_type_ops {
};
/**
- * struct powercap_control_type- Defines a powercap control_type
- * @name: name of control_type
+ * struct powercap_control_type - Defines a powercap control_type
* @dev: device for this control_type
* @idr: idr to have unique id for its child
- * @root_node: Root holding power zones for this control_type
+ * @nr_zones: counter for number of zones of this type
* @ops: Pointer to callback struct
- * @node_lock: mutex for control type
+ * @lock: mutex for control type
* @allocated: This is possible that client owns the memory
* used by this structure. In this case
* this flag is set to false by framework to
* prevent deallocation during release process.
* Otherwise this flag is set to true.
- * @ctrl_inst: link to the control_type list
+ * @node: linked-list node
*
* Defines powercap control_type. This acts as a container for power
* zones, which use same method to control power. E.g. RAPL, RAPL-PCI etc.
@@ -141,7 +128,7 @@ struct powercap_zone_ops {
* this flag is set to false by framework to
* prevent deallocation during release process.
* Otherwise this flag is set to true.
- * @constraint_ptr: List of constraints for this zone.
+ * @constraints: List of constraints for this zone.
*
* This defines a power zone instance. The fields of this structure are
* private, and should not be used by client drivers.
diff --git a/include/linux/ppp-comp.h b/include/linux/ppp-comp.h
index 4ea1d377e1ad..fb847e47f148 100644
--- a/include/linux/ppp-comp.h
+++ b/include/linux/ppp-comp.h
@@ -1,18 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* ppp-comp.h - Definitions for doing PPP packet compression.
*
* Copyright 1994-1998 Paul Mackerras.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2 as published by the Free Software Foundation.
*/
#ifndef _NET_PPP_COMP_H
#define _NET_PPP_COMP_H
#include <uapi/linux/ppp-comp.h>
-
+struct compstat;
struct module;
/*
diff --git a/include/linux/ppp_channel.h b/include/linux/ppp_channel.h
index 5d87f810a3b7..45e6e427ceb8 100644
--- a/include/linux/ppp_channel.h
+++ b/include/linux/ppp_channel.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
#ifndef _PPP_CHANNEL_H_
#define _PPP_CHANNEL_H_
/*
@@ -11,11 +12,6 @@
*
* Copyright 1999 Paul Mackerras.
*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- *
* ==FILEVERSION 20000322==
*/
@@ -24,6 +20,8 @@
#include <linux/poll.h>
#include <net/net_namespace.h>
+struct net_device_path;
+struct net_device_path_ctx;
struct ppp_channel;
struct ppp_channel_ops {
@@ -32,6 +30,9 @@ struct ppp_channel_ops {
int (*start_xmit)(struct ppp_channel *, struct sk_buff *);
/* Handle an ioctl call that has come in via /dev/ppp. */
int (*ioctl)(struct ppp_channel *, unsigned int, unsigned long);
+ int (*fill_forward_path)(struct net_device_path_ctx *,
+ struct net_device_path *,
+ const struct ppp_channel *);
};
struct ppp_channel {
diff --git a/include/linux/ppp_defs.h b/include/linux/ppp_defs.h
index 28aa0237c8c3..b7e57fdbd413 100644
--- a/include/linux/ppp_defs.h
+++ b/include/linux/ppp_defs.h
@@ -1,11 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* ppp_defs.h - PPP definitions.
*
* Copyright 1994-2000 Paul Mackerras.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2 as published by the Free Software Foundation.
*/
#ifndef _PPP_DEFS_H_
#define _PPP_DEFS_H_
@@ -14,4 +11,18 @@
#include <uapi/linux/ppp_defs.h>
#define PPP_FCS(fcs, c) crc_ccitt_byte(fcs, c)
+
+/**
+ * ppp_proto_is_valid - checks if PPP protocol is valid
+ * @proto: PPP protocol
+ *
+ * Assumes proto is not compressed.
+ * Protocol is valid if the value is odd and the least significant bit of the
+ * most significant octet is 0 (see RFC 1661, section 2).
+ */
+static inline bool ppp_proto_is_valid(u16 proto)
+{
+ return !!((proto & 0x0101) == 0x0001);
+}
+
#endif /* _PPP_DEFS_H_ */
diff --git a/include/linux/pps-gpio.h b/include/linux/pps-gpio.h
deleted file mode 100644
index 0035abe41b9a..000000000000
--- a/include/linux/pps-gpio.h
+++ /dev/null
@@ -1,32 +0,0 @@
-/*
- * pps-gpio.h -- PPS client for GPIOs
- *
- *
- * Copyright (C) 2011 James Nuss <jamesnuss@nanometrics.ca>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- */
-
-#ifndef _PPS_GPIO_H
-#define _PPS_GPIO_H
-
-struct pps_gpio_platform_data {
- bool assert_falling_edge;
- bool capture_clear;
- unsigned int gpio_pin;
- const char *gpio_label;
-};
-
-#endif
diff --git a/include/linux/pps_kernel.h b/include/linux/pps_kernel.h
index 35ac903956c7..78c8ac4951b5 100644
--- a/include/linux/pps_kernel.h
+++ b/include/linux/pps_kernel.h
@@ -1,28 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* PPS API kernel header
*
* Copyright (C) 2009 Rodolfo Giometti <giometti@linux.it>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#ifndef LINUX_PPS_KERNEL_H
#define LINUX_PPS_KERNEL_H
#include <linux/pps.h>
-
#include <linux/cdev.h>
#include <linux/device.h>
#include <linux/time.h>
@@ -35,9 +21,9 @@ struct pps_device;
/* The specific PPS source info */
struct pps_source_info {
- char name[PPS_MAX_NAME_LEN]; /* simbolic name */
+ char name[PPS_MAX_NAME_LEN]; /* symbolic name */
char path[PPS_MAX_NAME_LEN]; /* path of connected device */
- int mode; /* PPS's allowed mode */
+ int mode; /* PPS allowed mode */
void (*echo)(struct pps_device *pps,
int event, void *data); /* PPS echo function */
@@ -57,10 +43,10 @@ struct pps_event_time {
struct pps_device {
struct pps_source_info info; /* PSS source info */
- struct pps_kparams params; /* PPS's current params */
+ struct pps_kparams params; /* PPS current params */
- __u32 assert_sequence; /* PPS' assert event seq # */
- __u32 clear_sequence; /* PPS' clear event seq # */
+ __u32 assert_sequence; /* PPS assert event seq # */
+ __u32 clear_sequence; /* PPS clear event seq # */
struct pps_ktime assert_tu;
struct pps_ktime clear_tu;
int current_mode; /* PPS mode at event time */
@@ -69,7 +55,7 @@ struct pps_device {
wait_queue_head_t queue; /* PPS event queue */
unsigned int id; /* PPS source unique ID */
- void const *lookup_cookie; /* pps_lookup_dev only */
+ void const *lookup_cookie; /* For pps_lookup_dev() only */
struct cdev cdev;
struct device *dev;
struct fasync_struct *async_queue; /* fasync method */
@@ -101,7 +87,7 @@ extern struct pps_device *pps_register_source(
extern void pps_unregister_source(struct pps_device *pps);
extern void pps_event(struct pps_device *pps,
struct pps_event_time *ts, int event, void *data);
-/* Look up a pps device by magic cookie */
+/* Look up a pps_device by magic cookie */
struct pps_device *pps_lookup_dev(void const *cookie);
static inline void timespec_to_pps_ktime(struct pps_ktime *kt,
@@ -132,4 +118,3 @@ static inline void pps_sub_ts(struct pps_event_time *ts, struct timespec64 delta
}
#endif /* LINUX_PPS_KERNEL_H */
-
diff --git a/include/linux/pr.h b/include/linux/pr.h
index 65c01c10b335..94ceec713afe 100644
--- a/include/linux/pr.h
+++ b/include/linux/pr.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef LINUX_PR_H
#define LINUX_PR_H
diff --git a/include/linux/prandom.h b/include/linux/prandom.h
new file mode 100644
index 000000000000..f2ed5b72b3d6
--- /dev/null
+++ b/include/linux/prandom.h
@@ -0,0 +1,56 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * include/linux/prandom.h
+ *
+ * Include file for the fast pseudo-random 32-bit
+ * generation.
+ */
+#ifndef _LINUX_PRANDOM_H
+#define _LINUX_PRANDOM_H
+
+#include <linux/types.h>
+#include <linux/once.h>
+#include <linux/percpu.h>
+#include <linux/random.h>
+
+struct rnd_state {
+ __u32 s1, s2, s3, s4;
+};
+
+u32 prandom_u32_state(struct rnd_state *state);
+void prandom_bytes_state(struct rnd_state *state, void *buf, size_t nbytes);
+void prandom_seed_full_state(struct rnd_state __percpu *pcpu_state);
+
+#define prandom_init_once(pcpu_state) \
+ DO_ONCE(prandom_seed_full_state, (pcpu_state))
+
+/*
+ * Handle minimum values for seeds
+ */
+static inline u32 __seed(u32 x, u32 m)
+{
+ return (x < m) ? x + m : x;
+}
+
+/**
+ * prandom_seed_state - set seed for prandom_u32_state().
+ * @state: pointer to state structure to receive the seed.
+ * @seed: arbitrary 64-bit value to use as a seed.
+ */
+static inline void prandom_seed_state(struct rnd_state *state, u64 seed)
+{
+ u32 i = ((seed >> 32) ^ (seed << 10) ^ seed) & 0xffffffffUL;
+
+ state->s1 = __seed(i, 2U);
+ state->s2 = __seed(i, 8U);
+ state->s3 = __seed(i, 16U);
+ state->s4 = __seed(i, 128U);
+}
+
+/* Pseudo random number generator from numerical recipes. */
+static inline u32 next_pseudo_random32(u32 seed)
+{
+ return seed * 1664525 + 1013904223;
+}
+
+#endif
diff --git a/include/linux/preempt.h b/include/linux/preempt.h
index cae461224948..0df425bf9bd7 100644
--- a/include/linux/preempt.h
+++ b/include/linux/preempt.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __LINUX_PREEMPT_H
#define __LINUX_PREEMPT_H
@@ -25,13 +26,13 @@
* PREEMPT_MASK: 0x000000ff
* SOFTIRQ_MASK: 0x0000ff00
* HARDIRQ_MASK: 0x000f0000
- * NMI_MASK: 0x00100000
+ * NMI_MASK: 0x00f00000
* PREEMPT_NEED_RESCHED: 0x80000000
*/
#define PREEMPT_BITS 8
#define SOFTIRQ_BITS 8
#define HARDIRQ_BITS 4
-#define NMI_BITS 1
+#define NMI_BITS 4
#define PREEMPT_SHIFT 0
#define SOFTIRQ_SHIFT (PREEMPT_SHIFT + PREEMPT_BITS)
@@ -52,9 +53,6 @@
#define SOFTIRQ_DISABLE_OFFSET (2 * SOFTIRQ_OFFSET)
-/* We use the MSB mostly because its available */
-#define PREEMPT_NEED_RESCHED 0x80000000
-
#define PREEMPT_DISABLED (PREEMPT_DISABLE_OFFSET + PREEMPT_ENABLED)
/*
@@ -79,31 +77,58 @@
/* preempt_count() and related functions, depends on PREEMPT_NEED_RESCHED */
#include <asm/preempt.h>
+/**
+ * interrupt_context_level - return interrupt context level
+ *
+ * Returns the current interrupt context level.
+ * 0 - normal context
+ * 1 - softirq context
+ * 2 - hardirq context
+ * 3 - NMI context
+ */
+static __always_inline unsigned char interrupt_context_level(void)
+{
+ unsigned long pc = preempt_count();
+ unsigned char level = 0;
+
+ level += !!(pc & (NMI_MASK));
+ level += !!(pc & (NMI_MASK | HARDIRQ_MASK));
+ level += !!(pc & (NMI_MASK | HARDIRQ_MASK | SOFTIRQ_OFFSET));
+
+ return level;
+}
+
+#define nmi_count() (preempt_count() & NMI_MASK)
#define hardirq_count() (preempt_count() & HARDIRQ_MASK)
-#define softirq_count() (preempt_count() & SOFTIRQ_MASK)
-#define irq_count() (preempt_count() & (HARDIRQ_MASK | SOFTIRQ_MASK \
- | NMI_MASK))
+#ifdef CONFIG_PREEMPT_RT
+# define softirq_count() (current->softirq_disable_cnt & SOFTIRQ_MASK)
+#else
+# define softirq_count() (preempt_count() & SOFTIRQ_MASK)
+#endif
+#define irq_count() (nmi_count() | hardirq_count() | softirq_count())
/*
- * Are we doing bottom half or hardware interrupt processing?
+ * Macros to retrieve the current execution context:
*
- * in_irq() - We're in (hard) IRQ context
+ * in_nmi() - We're in NMI context
+ * in_hardirq() - We're in hard IRQ context
+ * in_serving_softirq() - We're in softirq context
+ * in_task() - We're in task context
+ */
+#define in_nmi() (nmi_count())
+#define in_hardirq() (hardirq_count())
+#define in_serving_softirq() (softirq_count() & SOFTIRQ_OFFSET)
+#define in_task() (!(in_nmi() | in_hardirq() | in_serving_softirq()))
+
+/*
+ * The following macros are deprecated and should not be used in new code:
+ * in_irq() - Obsolete version of in_hardirq()
* in_softirq() - We have BH disabled, or are processing softirqs
* in_interrupt() - We're in NMI,IRQ,SoftIRQ context or have BH disabled
- * in_serving_softirq() - We're in softirq context
- * in_nmi() - We're in NMI context
- * in_task() - We're in task context
- *
- * Note: due to the BH disabled confusion: in_softirq(),in_interrupt() really
- * should not be used in new code.
*/
#define in_irq() (hardirq_count())
#define in_softirq() (softirq_count())
#define in_interrupt() (irq_count())
-#define in_serving_softirq() (softirq_count() & SOFTIRQ_OFFSET)
-#define in_nmi() (preempt_count() & NMI_MASK)
-#define in_task() (!(preempt_count() & \
- (NMI_MASK | HARDIRQ_MASK | SOFTIRQ_OFFSET)))
/*
* The preempt_count offset after preempt_disable();
@@ -117,7 +142,12 @@
/*
* The preempt_count offset after spin_lock()
*/
-#define PREEMPT_LOCK_OFFSET PREEMPT_DISABLE_OFFSET
+#if !defined(CONFIG_PREEMPT_RT)
+#define PREEMPT_LOCK_OFFSET PREEMPT_DISABLE_OFFSET
+#else
+/* Locks on RT do not disable preemption */
+#define PREEMPT_LOCK_OFFSET 0
+#endif
/*
* The preempt_count offset needed for things like:
@@ -149,7 +179,7 @@
*/
#define in_atomic_preempt_off() (preempt_count() != PREEMPT_DISABLE_OFFSET)
-#if defined(CONFIG_DEBUG_PREEMPT) || defined(CONFIG_PREEMPT_TRACER)
+#if defined(CONFIG_DEBUG_PREEMPT) || defined(CONFIG_TRACE_PREEMPT_TOGGLE)
extern void preempt_count_add(int val);
extern void preempt_count_sub(int val);
#define preempt_count_dec_and_test() \
@@ -184,7 +214,7 @@ do { \
#define preemptible() (preempt_count() == 0 && !irqs_disabled())
-#ifdef CONFIG_PREEMPT
+#ifdef CONFIG_PREEMPTION
#define preempt_enable() \
do { \
barrier(); \
@@ -205,7 +235,7 @@ do { \
__preempt_schedule(); \
} while (0)
-#else /* !CONFIG_PREEMPT */
+#else /* !CONFIG_PREEMPTION */
#define preempt_enable() \
do { \
barrier(); \
@@ -219,7 +249,7 @@ do { \
} while (0)
#define preempt_check_resched() do { } while (0)
-#endif /* CONFIG_PREEMPT */
+#endif /* CONFIG_PREEMPTION */
#define preempt_disable_notrace() \
do { \
@@ -324,4 +354,113 @@ static inline void preempt_notifier_init(struct preempt_notifier *notifier,
#endif
+#ifdef CONFIG_SMP
+
+/*
+ * Migrate-Disable and why it is undesired.
+ *
+ * When a preempted task becomes elegible to run under the ideal model (IOW it
+ * becomes one of the M highest priority tasks), it might still have to wait
+ * for the preemptee's migrate_disable() section to complete. Thereby suffering
+ * a reduction in bandwidth in the exact duration of the migrate_disable()
+ * section.
+ *
+ * Per this argument, the change from preempt_disable() to migrate_disable()
+ * gets us:
+ *
+ * - a higher priority tasks gains reduced wake-up latency; with preempt_disable()
+ * it would have had to wait for the lower priority task.
+ *
+ * - a lower priority tasks; which under preempt_disable() could've instantly
+ * migrated away when another CPU becomes available, is now constrained
+ * by the ability to push the higher priority task away, which might itself be
+ * in a migrate_disable() section, reducing it's available bandwidth.
+ *
+ * IOW it trades latency / moves the interference term, but it stays in the
+ * system, and as long as it remains unbounded, the system is not fully
+ * deterministic.
+ *
+ *
+ * The reason we have it anyway.
+ *
+ * PREEMPT_RT breaks a number of assumptions traditionally held. By forcing a
+ * number of primitives into becoming preemptible, they would also allow
+ * migration. This turns out to break a bunch of per-cpu usage. To this end,
+ * all these primitives employ migirate_disable() to restore this implicit
+ * assumption.
+ *
+ * This is a 'temporary' work-around at best. The correct solution is getting
+ * rid of the above assumptions and reworking the code to employ explicit
+ * per-cpu locking or short preempt-disable regions.
+ *
+ * The end goal must be to get rid of migrate_disable(), alternatively we need
+ * a schedulability theory that does not depend on abritrary migration.
+ *
+ *
+ * Notes on the implementation.
+ *
+ * The implementation is particularly tricky since existing code patterns
+ * dictate neither migrate_disable() nor migrate_enable() is allowed to block.
+ * This means that it cannot use cpus_read_lock() to serialize against hotplug,
+ * nor can it easily migrate itself into a pending affinity mask change on
+ * migrate_enable().
+ *
+ *
+ * Note: even non-work-conserving schedulers like semi-partitioned depends on
+ * migration, so migrate_disable() is not only a problem for
+ * work-conserving schedulers.
+ *
+ */
+extern void migrate_disable(void);
+extern void migrate_enable(void);
+
+#else
+
+static inline void migrate_disable(void) { }
+static inline void migrate_enable(void) { }
+
+#endif /* CONFIG_SMP */
+
+/**
+ * preempt_disable_nested - Disable preemption inside a normally preempt disabled section
+ *
+ * Use for code which requires preemption protection inside a critical
+ * section which has preemption disabled implicitly on non-PREEMPT_RT
+ * enabled kernels, by e.g.:
+ * - holding a spinlock/rwlock
+ * - soft interrupt context
+ * - regular interrupt handlers
+ *
+ * On PREEMPT_RT enabled kernels spinlock/rwlock held sections, soft
+ * interrupt context and regular interrupt handlers are preemptible and
+ * only prevent migration. preempt_disable_nested() ensures that preemption
+ * is disabled for cases which require CPU local serialization even on
+ * PREEMPT_RT. For non-PREEMPT_RT kernels this is a NOP.
+ *
+ * The use cases are code sequences which are not serialized by a
+ * particular lock instance, e.g.:
+ * - seqcount write side critical sections where the seqcount is not
+ * associated to a particular lock and therefore the automatic
+ * protection mechanism does not work. This prevents a live lock
+ * against a preempting high priority reader.
+ * - RMW per CPU variable updates like vmstat.
+ */
+/* Macro to avoid header recursion hell vs. lockdep */
+#define preempt_disable_nested() \
+do { \
+ if (IS_ENABLED(CONFIG_PREEMPT_RT)) \
+ preempt_disable(); \
+ else \
+ lockdep_assert_preemption_disabled(); \
+} while (0)
+
+/**
+ * preempt_enable_nested - Undo the effect of preempt_disable_nested()
+ */
+static __always_inline void preempt_enable_nested(void)
+{
+ if (IS_ENABLED(CONFIG_PREEMPT_RT))
+ preempt_enable();
+}
+
#endif /* __LINUX_PREEMPT_H */
diff --git a/include/linux/prefetch.h b/include/linux/prefetch.h
index a3bfbdf63d32..b83a3f944f28 100644
--- a/include/linux/prefetch.h
+++ b/include/linux/prefetch.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Generic cache management functions. Everything is arch-specific,
* but this header exists to make sure the defines/functions can be
@@ -14,6 +15,7 @@
#include <asm/processor.h>
#include <asm/cache.h>
+struct page;
/*
prefetch(x) attempts to pre-emptively get the memory pointed to
by address "x" into the CPU L1 cache.
@@ -61,4 +63,11 @@ static inline void prefetch_range(void *addr, size_t len)
#endif
}
+static inline void prefetch_page_address(struct page *page)
+{
+#if defined(WANT_PAGE_VIRTUAL) || defined(HASHED_PAGE_VIRTUAL)
+ prefetch(page);
+#endif
+}
+
#endif
diff --git a/include/linux/prime_numbers.h b/include/linux/prime_numbers.h
index 14ec4f567342..2b8e99c948d3 100644
--- a/include/linux/prime_numbers.h
+++ b/include/linux/prime_numbers.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __LINUX_PRIME_NUMBERS_H
#define __LINUX_PRIME_NUMBERS_H
diff --git a/include/linux/printk.h b/include/linux/printk.h
index e10f27468322..8ef499ab3c1e 100644
--- a/include/linux/printk.h
+++ b/include/linux/printk.h
@@ -1,15 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __KERNEL_PRINTK__
#define __KERNEL_PRINTK__
-#include <stdarg.h>
+#include <linux/stdarg.h>
#include <linux/init.h>
#include <linux/kern_levels.h>
#include <linux/linkage.h>
-#include <linux/cache.h>
+#include <linux/ratelimit_types.h>
+#include <linux/once_lite.h>
extern const char linux_banner[];
extern const char linux_proc_banner[];
+extern int oops_in_progress; /* If set, an oops, panic(), BUG() or die() is in progress */
+
#define PRINTK_MAX_SINGLE_HEADER_LEN 2
static inline int printk_get_level(const char *buffer)
@@ -17,7 +21,6 @@ static inline int printk_get_level(const char *buffer)
if (buffer[0] == KERN_SOH_ASCII && buffer[1]) {
switch (buffer[1]) {
case '0' ... '7':
- case 'd': /* KERN_DEFAULT */
case 'c': /* KERN_CONT */
return buffer[1];
}
@@ -41,23 +44,21 @@ static inline const char *printk_skip_headers(const char *buffer)
return buffer;
}
-#define CONSOLE_EXT_LOG_MAX 8192
-
/* printk's without a loglevel use this.. */
#define MESSAGE_LOGLEVEL_DEFAULT CONFIG_MESSAGE_LOGLEVEL_DEFAULT
/* We show everything that is MORE important than this.. */
#define CONSOLE_LOGLEVEL_SILENT 0 /* Mum's the word */
#define CONSOLE_LOGLEVEL_MIN 1 /* Minimum loglevel we let people use */
-#define CONSOLE_LOGLEVEL_QUIET 4 /* Shhh ..., when booted with "quiet" */
#define CONSOLE_LOGLEVEL_DEBUG 10 /* issue debug messages */
#define CONSOLE_LOGLEVEL_MOTORMOUTH 15 /* You can't shut this one up */
/*
- * Default used to be hard-coded at 7, we're now allowing it to be set from
- * kernel config.
+ * Default used to be hard-coded at 7, quiet used to be hardcoded at 4,
+ * we're now allowing both to be set from kernel config.
*/
#define CONSOLE_LOGLEVEL_DEFAULT CONFIG_CONSOLE_LOGLEVEL_DEFAULT
+#define CONSOLE_LOGLEVEL_QUIET CONFIG_CONSOLE_LOGLEVEL_QUIET
extern int console_printk[];
@@ -66,22 +67,15 @@ extern int console_printk[];
#define minimum_console_loglevel (console_printk[2])
#define default_console_loglevel (console_printk[3])
-static inline void console_silent(void)
-{
- console_loglevel = CONSOLE_LOGLEVEL_SILENT;
-}
-
-static inline void console_verbose(void)
-{
- if (console_loglevel)
- console_loglevel = CONSOLE_LOGLEVEL_MOTORMOUTH;
-}
+extern void console_verbose(void);
/* strlen("ratelimit") + 1 */
#define DEVKMSG_STR_MAX_SIZE 10
extern char devkmsg_log_str[];
struct ctl_table;
+extern int suppress_printk;
+
struct va_format {
const char *fmt;
va_list *va;
@@ -131,10 +125,8 @@ struct va_format {
*/
#define no_printk(fmt, ...) \
({ \
- do { \
- if (0) \
- printk(fmt, ##__VA_ARGS__); \
- } while (0); \
+ if (0) \
+ printk(fmt, ##__VA_ARGS__); \
0; \
})
@@ -146,35 +138,34 @@ static inline __printf(1, 2) __cold
void early_printk(const char *s, ...) { }
#endif
-#ifdef CONFIG_PRINTK_NMI
-extern void printk_nmi_enter(void);
-extern void printk_nmi_exit(void);
-#else
-static inline void printk_nmi_enter(void) { }
-static inline void printk_nmi_exit(void) { }
-#endif /* PRINTK_NMI */
+struct dev_printk_info;
#ifdef CONFIG_PRINTK
-asmlinkage __printf(5, 0)
+asmlinkage __printf(4, 0)
int vprintk_emit(int facility, int level,
- const char *dict, size_t dictlen,
+ const struct dev_printk_info *dev_info,
const char *fmt, va_list args);
asmlinkage __printf(1, 0)
int vprintk(const char *fmt, va_list args);
-asmlinkage __printf(5, 6) __cold
-int printk_emit(int facility, int level,
- const char *dict, size_t dictlen,
- const char *fmt, ...);
-
asmlinkage __printf(1, 2) __cold
-int printk(const char *fmt, ...);
+int _printk(const char *fmt, ...);
/*
* Special printk facility for scheduler/timekeeping use only, _DO_NOT_USE_ !
*/
-__printf(1, 2) __cold int printk_deferred(const char *fmt, ...);
+__printf(1, 2) __cold int _printk_deferred(const char *fmt, ...);
+
+extern void __printk_safe_enter(void);
+extern void __printk_safe_exit(void);
+/*
+ * The printk_deferred_enter/exit macros are available only as a hack for
+ * some code paths that need to defer all printk console printing. Interrupts
+ * must be disabled for the deferred duration.
+ */
+#define printk_deferred_enter __printk_safe_enter
+#define printk_deferred_exit __printk_safe_exit
/*
* Please don't use printk_ratelimit(), because it shares ratelimiting state
@@ -188,11 +179,6 @@ extern bool printk_timed_ratelimit(unsigned long *caller_jiffies,
extern int printk_delay_msec;
extern int dmesg_restrict;
-extern int kptr_restrict;
-
-extern int
-devkmsg_sysctl_set_loglvl(struct ctl_table *table, int write, void __user *buf,
- size_t *lenp, loff_t *ppos);
extern void wake_up_klogd(void);
@@ -203,9 +189,9 @@ void __init setup_log_buf(int early);
__printf(1, 2) void dump_stack_set_arch_desc(const char *fmt, ...);
void dump_stack_print_info(const char *log_lvl);
void show_regs_print_info(const char *log_lvl);
-extern void printk_safe_init(void);
-extern void printk_safe_flush(void);
-extern void printk_safe_flush_on_panic(void);
+extern asmlinkage void dump_stack_lvl(const char *log_lvl) __cold;
+extern asmlinkage void dump_stack(void) __cold;
+void printk_trigger_flush(void);
#else
static inline __printf(1, 0)
int vprintk(const char *s, va_list args)
@@ -213,15 +199,24 @@ int vprintk(const char *s, va_list args)
return 0;
}
static inline __printf(1, 2) __cold
-int printk(const char *s, ...)
+int _printk(const char *s, ...)
{
return 0;
}
static inline __printf(1, 2) __cold
-int printk_deferred(const char *s, ...)
+int _printk_deferred(const char *s, ...)
{
return 0;
}
+
+static inline void printk_deferred_enter(void)
+{
+}
+
+static inline void printk_deferred_exit(void)
+{
+}
+
static inline int printk_ratelimit(void)
{
return 0;
@@ -266,55 +261,294 @@ static inline void show_regs_print_info(const char *log_lvl)
{
}
-static inline void printk_safe_init(void)
+static inline void dump_stack_lvl(const char *log_lvl)
{
}
-static inline void printk_safe_flush(void)
+static inline void dump_stack(void)
{
}
-
-static inline void printk_safe_flush_on_panic(void)
+static inline void printk_trigger_flush(void)
{
}
#endif
-extern asmlinkage void dump_stack(void) __cold;
+#ifdef CONFIG_SMP
+extern int __printk_cpu_sync_try_get(void);
+extern void __printk_cpu_sync_wait(void);
+extern void __printk_cpu_sync_put(void);
+
+#else
+
+#define __printk_cpu_sync_try_get() true
+#define __printk_cpu_sync_wait()
+#define __printk_cpu_sync_put()
+#endif /* CONFIG_SMP */
+
+/**
+ * printk_cpu_sync_get_irqsave() - Disable interrupts and acquire the printk
+ * cpu-reentrant spinning lock.
+ * @flags: Stack-allocated storage for saving local interrupt state,
+ * to be passed to printk_cpu_sync_put_irqrestore().
+ *
+ * If the lock is owned by another CPU, spin until it becomes available.
+ * Interrupts are restored while spinning.
+ *
+ * CAUTION: This function must be used carefully. It does not behave like a
+ * typical lock. Here are important things to watch out for...
+ *
+ * * This function is reentrant on the same CPU. Therefore the calling
+ * code must not assume exclusive access to data if code accessing the
+ * data can run reentrant or within NMI context on the same CPU.
+ *
+ * * If there exists usage of this function from NMI context, it becomes
+ * unsafe to perform any type of locking or spinning to wait for other
+ * CPUs after calling this function from any context. This includes
+ * using spinlocks or any other busy-waiting synchronization methods.
+ */
+#define printk_cpu_sync_get_irqsave(flags) \
+ for (;;) { \
+ local_irq_save(flags); \
+ if (__printk_cpu_sync_try_get()) \
+ break; \
+ local_irq_restore(flags); \
+ __printk_cpu_sync_wait(); \
+ }
+/**
+ * printk_cpu_sync_put_irqrestore() - Release the printk cpu-reentrant spinning
+ * lock and restore interrupts.
+ * @flags: Caller's saved interrupt state, from printk_cpu_sync_get_irqsave().
+ */
+#define printk_cpu_sync_put_irqrestore(flags) \
+ do { \
+ __printk_cpu_sync_put(); \
+ local_irq_restore(flags); \
+ } while (0)
+
+extern int kptr_restrict;
+
+/**
+ * pr_fmt - used by the pr_*() macros to generate the printk format string
+ * @fmt: format string passed from a pr_*() macro
+ *
+ * This macro can be used to generate a unified format string for pr_*()
+ * macros. A common use is to prefix all pr_*() messages in a file with a common
+ * string. For example, defining this at the top of a source file:
+ *
+ * #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+ *
+ * would prefix all pr_info, pr_emerg... messages in the file with the module
+ * name.
+ */
#ifndef pr_fmt
#define pr_fmt(fmt) fmt
#endif
+struct module;
+
+#ifdef CONFIG_PRINTK_INDEX
+struct pi_entry {
+ const char *fmt;
+ const char *func;
+ const char *file;
+ unsigned int line;
+
+ /*
+ * While printk and pr_* have the level stored in the string at compile
+ * time, some subsystems dynamically add it at runtime through the
+ * format string. For these dynamic cases, we allow the subsystem to
+ * tell us the level at compile time.
+ *
+ * NULL indicates that the level, if any, is stored in fmt.
+ */
+ const char *level;
+
+ /*
+ * The format string used by various subsystem specific printk()
+ * wrappers to prefix the message.
+ *
+ * Note that the static prefix defined by the pr_fmt() macro is stored
+ * directly in the message format (@fmt), not here.
+ */
+ const char *subsys_fmt_prefix;
+} __packed;
+
+#define __printk_index_emit(_fmt, _level, _subsys_fmt_prefix) \
+ do { \
+ if (__builtin_constant_p(_fmt) && __builtin_constant_p(_level)) { \
+ /*
+ * We check __builtin_constant_p multiple times here
+ * for the same input because GCC will produce an error
+ * if we try to assign a static variable to fmt if it
+ * is not a constant, even with the outer if statement.
+ */ \
+ static const struct pi_entry _entry \
+ __used = { \
+ .fmt = __builtin_constant_p(_fmt) ? (_fmt) : NULL, \
+ .func = __func__, \
+ .file = __FILE__, \
+ .line = __LINE__, \
+ .level = __builtin_constant_p(_level) ? (_level) : NULL, \
+ .subsys_fmt_prefix = _subsys_fmt_prefix,\
+ }; \
+ static const struct pi_entry *_entry_ptr \
+ __used __section(".printk_index") = &_entry; \
+ } \
+ } while (0)
+
+#else /* !CONFIG_PRINTK_INDEX */
+#define __printk_index_emit(...) do {} while (0)
+#endif /* CONFIG_PRINTK_INDEX */
+
/*
- * These can be used to print at the various log levels.
- * All of these will print unconditionally, although note that pr_debug()
- * and other debug macros are compiled out unless either DEBUG is defined
- * or CONFIG_DYNAMIC_DEBUG is set.
+ * Some subsystems have their own custom printk that applies a va_format to a
+ * generic format, for example, to include a device number or other metadata
+ * alongside the format supplied by the caller.
+ *
+ * In order to store these in the way they would be emitted by the printk
+ * infrastructure, the subsystem provides us with the start, fixed string, and
+ * any subsequent text in the format string.
+ *
+ * We take a variable argument list as pr_fmt/dev_fmt/etc are sometimes passed
+ * as multiple arguments (eg: `"%s: ", "blah"`), and we must only take the
+ * first one.
+ *
+ * subsys_fmt_prefix must be known at compile time, or compilation will fail
+ * (since this is a mistake). If fmt or level is not known at compile time, no
+ * index entry will be made (since this can legitimately happen).
+ */
+#define printk_index_subsys_emit(subsys_fmt_prefix, level, fmt, ...) \
+ __printk_index_emit(fmt, level, subsys_fmt_prefix)
+
+#define printk_index_wrap(_p_func, _fmt, ...) \
+ ({ \
+ __printk_index_emit(_fmt, NULL, NULL); \
+ _p_func(_fmt, ##__VA_ARGS__); \
+ })
+
+
+/**
+ * printk - print a kernel message
+ * @fmt: format string
+ *
+ * This is printk(). It can be called from any context. We want it to work.
+ *
+ * If printk indexing is enabled, _printk() is called from printk_index_wrap.
+ * Otherwise, printk is simply #defined to _printk.
+ *
+ * We try to grab the console_lock. If we succeed, it's easy - we log the
+ * output and call the console drivers. If we fail to get the semaphore, we
+ * place the output into the log buffer and return. The current holder of
+ * the console_sem will notice the new output in console_unlock(); and will
+ * send it to the consoles before releasing the lock.
+ *
+ * One effect of this deferred printing is that code which calls printk() and
+ * then changes console_loglevel may break. This is because console_loglevel
+ * is inspected when the actual printing occurs.
+ *
+ * See also:
+ * printf(3)
+ *
+ * See the vsnprintf() documentation for format string extensions over C99.
+ */
+#define printk(fmt, ...) printk_index_wrap(_printk, fmt, ##__VA_ARGS__)
+#define printk_deferred(fmt, ...) \
+ printk_index_wrap(_printk_deferred, fmt, ##__VA_ARGS__)
+
+/**
+ * pr_emerg - Print an emergency-level message
+ * @fmt: format string
+ * @...: arguments for the format string
+ *
+ * This macro expands to a printk with KERN_EMERG loglevel. It uses pr_fmt() to
+ * generate the format string.
*/
#define pr_emerg(fmt, ...) \
printk(KERN_EMERG pr_fmt(fmt), ##__VA_ARGS__)
+/**
+ * pr_alert - Print an alert-level message
+ * @fmt: format string
+ * @...: arguments for the format string
+ *
+ * This macro expands to a printk with KERN_ALERT loglevel. It uses pr_fmt() to
+ * generate the format string.
+ */
#define pr_alert(fmt, ...) \
printk(KERN_ALERT pr_fmt(fmt), ##__VA_ARGS__)
+/**
+ * pr_crit - Print a critical-level message
+ * @fmt: format string
+ * @...: arguments for the format string
+ *
+ * This macro expands to a printk with KERN_CRIT loglevel. It uses pr_fmt() to
+ * generate the format string.
+ */
#define pr_crit(fmt, ...) \
printk(KERN_CRIT pr_fmt(fmt), ##__VA_ARGS__)
+/**
+ * pr_err - Print an error-level message
+ * @fmt: format string
+ * @...: arguments for the format string
+ *
+ * This macro expands to a printk with KERN_ERR loglevel. It uses pr_fmt() to
+ * generate the format string.
+ */
#define pr_err(fmt, ...) \
printk(KERN_ERR pr_fmt(fmt), ##__VA_ARGS__)
-#define pr_warning(fmt, ...) \
+/**
+ * pr_warn - Print a warning-level message
+ * @fmt: format string
+ * @...: arguments for the format string
+ *
+ * This macro expands to a printk with KERN_WARNING loglevel. It uses pr_fmt()
+ * to generate the format string.
+ */
+#define pr_warn(fmt, ...) \
printk(KERN_WARNING pr_fmt(fmt), ##__VA_ARGS__)
-#define pr_warn pr_warning
+/**
+ * pr_notice - Print a notice-level message
+ * @fmt: format string
+ * @...: arguments for the format string
+ *
+ * This macro expands to a printk with KERN_NOTICE loglevel. It uses pr_fmt() to
+ * generate the format string.
+ */
#define pr_notice(fmt, ...) \
printk(KERN_NOTICE pr_fmt(fmt), ##__VA_ARGS__)
+/**
+ * pr_info - Print an info-level message
+ * @fmt: format string
+ * @...: arguments for the format string
+ *
+ * This macro expands to a printk with KERN_INFO loglevel. It uses pr_fmt() to
+ * generate the format string.
+ */
#define pr_info(fmt, ...) \
printk(KERN_INFO pr_fmt(fmt), ##__VA_ARGS__)
-/*
- * Like KERN_CONT, pr_cont() should only be used when continuing
- * a line with no newline ('\n') enclosed. Otherwise it defaults
- * back to KERN_DEFAULT.
+
+/**
+ * pr_cont - Continues a previous log message in the same line.
+ * @fmt: format string
+ * @...: arguments for the format string
+ *
+ * This macro expands to a printk with KERN_CONT loglevel. It should only be
+ * used when continuing a log message with no newline ('\n') enclosed. Otherwise
+ * it defaults back to KERN_DEFAULT loglevel.
*/
#define pr_cont(fmt, ...) \
printk(KERN_CONT fmt, ##__VA_ARGS__)
-/* pr_devel() should produce zero code unless DEBUG is defined */
+/**
+ * pr_devel - Print a debug-level message conditionally
+ * @fmt: format string
+ * @...: arguments for the format string
+ *
+ * This macro expands to a printk with KERN_DEBUG loglevel if DEBUG is
+ * defined. Otherwise it does nothing.
+ *
+ * It uses pr_fmt() to generate the format string.
+ */
#ifdef DEBUG
#define pr_devel(fmt, ...) \
printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__)
@@ -325,11 +559,23 @@ extern asmlinkage void dump_stack(void) __cold;
/* If you are writing a driver, please use dev_dbg instead */
-#if defined(CONFIG_DYNAMIC_DEBUG)
+#if defined(CONFIG_DYNAMIC_DEBUG) || \
+ (defined(CONFIG_DYNAMIC_DEBUG_CORE) && defined(DYNAMIC_DEBUG_MODULE))
#include <linux/dynamic_debug.h>
-/* dynamic_pr_debug() uses pr_fmt() internally so we don't need it here */
-#define pr_debug(fmt, ...) \
+/**
+ * pr_debug - Print a debug-level message conditionally
+ * @fmt: format string
+ * @...: arguments for the format string
+ *
+ * This macro expands to dynamic_pr_debug() if CONFIG_DYNAMIC_DEBUG is
+ * set. Otherwise, if DEBUG is defined, it's equivalent to a printk with
+ * KERN_DEBUG loglevel. If DEBUG is not defined it does nothing.
+ *
+ * It uses pr_fmt() to generate the format string (dynamic_pr_debug() uses
+ * pr_fmt() internally).
+ */
+#define pr_debug(fmt, ...) \
dynamic_pr_debug(fmt, ##__VA_ARGS__)
#elif defined(DEBUG)
#define pr_debug(fmt, ...) \
@@ -345,27 +591,9 @@ extern asmlinkage void dump_stack(void) __cold;
#ifdef CONFIG_PRINTK
#define printk_once(fmt, ...) \
-({ \
- static bool __print_once __read_mostly; \
- bool __ret_print_once = !__print_once; \
- \
- if (!__print_once) { \
- __print_once = true; \
- printk(fmt, ##__VA_ARGS__); \
- } \
- unlikely(__ret_print_once); \
-})
+ DO_ONCE_LITE(printk, fmt, ##__VA_ARGS__)
#define printk_deferred_once(fmt, ...) \
-({ \
- static bool __print_once __read_mostly; \
- bool __ret_print_once = !__print_once; \
- \
- if (!__print_once) { \
- __print_once = true; \
- printk_deferred(fmt, ##__VA_ARGS__); \
- } \
- unlikely(__ret_print_once); \
-})
+ DO_ONCE_LITE(printk_deferred, fmt, ##__VA_ARGS__)
#else
#define printk_once(fmt, ...) \
no_printk(fmt, ##__VA_ARGS__)
@@ -387,8 +615,7 @@ extern asmlinkage void dump_stack(void) __cold;
printk_once(KERN_NOTICE pr_fmt(fmt), ##__VA_ARGS__)
#define pr_info_once(fmt, ...) \
printk_once(KERN_INFO pr_fmt(fmt), ##__VA_ARGS__)
-#define pr_cont_once(fmt, ...) \
- printk_once(KERN_CONT pr_fmt(fmt), ##__VA_ARGS__)
+/* no pr_cont_once, don't do that... */
#if defined(DEBUG)
#define pr_devel_once(fmt, ...) \
@@ -451,7 +678,8 @@ extern asmlinkage void dump_stack(void) __cold;
#endif
/* If you are writing a driver, please use dev_dbg instead */
-#if defined(CONFIG_DYNAMIC_DEBUG)
+#if defined(CONFIG_DYNAMIC_DEBUG) || \
+ (defined(CONFIG_DYNAMIC_DEBUG_CORE) && defined(DYNAMIC_DEBUG_MODULE))
/* descriptor check is first to prevent flooding with "callbacks suppressed" */
#define pr_debug_ratelimited(fmt, ...) \
do { \
@@ -459,7 +687,7 @@ do { \
DEFAULT_RATELIMIT_INTERVAL, \
DEFAULT_RATELIMIT_BURST); \
DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, pr_fmt(fmt)); \
- if (unlikely(descriptor.flags & _DPRINTK_FLAGS_PRINT) && \
+ if (DYNAMIC_DEBUG_BRANCH(descriptor) && \
__ratelimit(&_rs)) \
__dynamic_pr_debug(&descriptor, pr_fmt(fmt), ##__VA_ARGS__); \
} while (0)
@@ -485,13 +713,6 @@ extern int hex_dump_to_buffer(const void *buf, size_t len, int rowsize,
extern void print_hex_dump(const char *level, const char *prefix_str,
int prefix_type, int rowsize, int groupsize,
const void *buf, size_t len, bool ascii);
-#if defined(CONFIG_DYNAMIC_DEBUG)
-#define print_hex_dump_bytes(prefix_str, prefix_type, buf, len) \
- dynamic_hex_dump(prefix_str, prefix_type, 16, 1, buf, len, true)
-#else
-extern void print_hex_dump_bytes(const char *prefix_str, int prefix_type,
- const void *buf, size_t len);
-#endif /* defined(CONFIG_DYNAMIC_DEBUG) */
#else
static inline void print_hex_dump(const char *level, const char *prefix_str,
int prefix_type, int rowsize, int groupsize,
@@ -505,7 +726,8 @@ static inline void print_hex_dump_bytes(const char *prefix_str, int prefix_type,
#endif
-#if defined(CONFIG_DYNAMIC_DEBUG)
+#if defined(CONFIG_DYNAMIC_DEBUG) || \
+ (defined(CONFIG_DYNAMIC_DEBUG_CORE) && defined(DYNAMIC_DEBUG_MODULE))
#define print_hex_dump_debug(prefix_str, prefix_type, rowsize, \
groupsize, buf, len, ascii) \
dynamic_hex_dump(prefix_str, prefix_type, rowsize, \
@@ -523,4 +745,19 @@ static inline void print_hex_dump_debug(const char *prefix_str, int prefix_type,
}
#endif
+/**
+ * print_hex_dump_bytes - shorthand form of print_hex_dump() with default params
+ * @prefix_str: string to prefix each line with;
+ * caller supplies trailing spaces for alignment if desired
+ * @prefix_type: controls whether prefix of an offset, address, or none
+ * is printed (%DUMP_PREFIX_OFFSET, %DUMP_PREFIX_ADDRESS, %DUMP_PREFIX_NONE)
+ * @buf: data blob to dump
+ * @len: number of bytes in the @buf
+ *
+ * Calls print_hex_dump(), with log level of KERN_DEBUG,
+ * rowsize of 16, groupsize of 1, and ASCII output included.
+ */
+#define print_hex_dump_bytes(prefix_str, prefix_type, buf, len) \
+ print_hex_dump_debug(prefix_str, prefix_type, 16, 1, buf, len, true)
+
#endif
diff --git a/include/linux/prmt.h b/include/linux/prmt.h
new file mode 100644
index 000000000000..24da8364b919
--- /dev/null
+++ b/include/linux/prmt.h
@@ -0,0 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifdef CONFIG_ACPI_PRMT
+void init_prmt(void);
+#else
+static inline void init_prmt(void) { }
+#endif
diff --git a/include/linux/proc_fs.h b/include/linux/proc_fs.h
index 2d2bf592d9db..0260f5ea98fe 100644
--- a/include/linux/proc_fs.h
+++ b/include/linux/proc_fs.h
@@ -1,55 +1,170 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* The proc filesystem constants/structures
*/
#ifndef _LINUX_PROC_FS_H
#define _LINUX_PROC_FS_H
+#include <linux/compiler.h>
#include <linux/types.h>
#include <linux/fs.h>
struct proc_dir_entry;
+struct seq_file;
+struct seq_operations;
+
+enum {
+ /*
+ * All /proc entries using this ->proc_ops instance are never removed.
+ *
+ * If in doubt, ignore this flag.
+ */
+#ifdef MODULE
+ PROC_ENTRY_PERMANENT = 0U,
+#else
+ PROC_ENTRY_PERMANENT = 1U << 0,
+#endif
+};
+
+struct proc_ops {
+ unsigned int proc_flags;
+ int (*proc_open)(struct inode *, struct file *);
+ ssize_t (*proc_read)(struct file *, char __user *, size_t, loff_t *);
+ ssize_t (*proc_read_iter)(struct kiocb *, struct iov_iter *);
+ ssize_t (*proc_write)(struct file *, const char __user *, size_t, loff_t *);
+ /* mandatory unless nonseekable_open() or equivalent is used */
+ loff_t (*proc_lseek)(struct file *, loff_t, int);
+ int (*proc_release)(struct inode *, struct file *);
+ __poll_t (*proc_poll)(struct file *, struct poll_table_struct *);
+ long (*proc_ioctl)(struct file *, unsigned int, unsigned long);
+#ifdef CONFIG_COMPAT
+ long (*proc_compat_ioctl)(struct file *, unsigned int, unsigned long);
+#endif
+ int (*proc_mmap)(struct file *, struct vm_area_struct *);
+ unsigned long (*proc_get_unmapped_area)(struct file *, unsigned long, unsigned long, unsigned long, unsigned long);
+} __randomize_layout;
+
+/* definitions for hide_pid field */
+enum proc_hidepid {
+ HIDEPID_OFF = 0,
+ HIDEPID_NO_ACCESS = 1,
+ HIDEPID_INVISIBLE = 2,
+ HIDEPID_NOT_PTRACEABLE = 4, /* Limit pids to only ptraceable pids */
+};
+
+/* definitions for proc mount option pidonly */
+enum proc_pidonly {
+ PROC_PIDONLY_OFF = 0,
+ PROC_PIDONLY_ON = 1,
+};
+
+struct proc_fs_info {
+ struct pid_namespace *pid_ns;
+ struct dentry *proc_self; /* For /proc/self */
+ struct dentry *proc_thread_self; /* For /proc/thread-self */
+ kgid_t pid_gid;
+ enum proc_hidepid hide_pid;
+ enum proc_pidonly pidonly;
+};
+
+static inline struct proc_fs_info *proc_sb_info(struct super_block *sb)
+{
+ return sb->s_fs_info;
+}
#ifdef CONFIG_PROC_FS
+typedef int (*proc_write_t)(struct file *, char *, size_t);
+
extern void proc_root_init(void);
-extern void proc_flush_task(struct task_struct *);
+extern void proc_flush_pid(struct pid *);
extern struct proc_dir_entry *proc_symlink(const char *,
struct proc_dir_entry *, const char *);
+struct proc_dir_entry *_proc_mkdir(const char *, umode_t, struct proc_dir_entry *, void *, bool);
extern struct proc_dir_entry *proc_mkdir(const char *, struct proc_dir_entry *);
extern struct proc_dir_entry *proc_mkdir_data(const char *, umode_t,
struct proc_dir_entry *, void *);
extern struct proc_dir_entry *proc_mkdir_mode(const char *, umode_t,
struct proc_dir_entry *);
struct proc_dir_entry *proc_create_mount_point(const char *name);
+
+struct proc_dir_entry *proc_create_seq_private(const char *name, umode_t mode,
+ struct proc_dir_entry *parent, const struct seq_operations *ops,
+ unsigned int state_size, void *data);
+#define proc_create_seq_data(name, mode, parent, ops, data) \
+ proc_create_seq_private(name, mode, parent, ops, 0, data)
+#define proc_create_seq(name, mode, parent, ops) \
+ proc_create_seq_private(name, mode, parent, ops, 0, NULL)
+struct proc_dir_entry *proc_create_single_data(const char *name, umode_t mode,
+ struct proc_dir_entry *parent,
+ int (*show)(struct seq_file *, void *), void *data);
+#define proc_create_single(name, mode, parent, show) \
+ proc_create_single_data(name, mode, parent, show, NULL)
extern struct proc_dir_entry *proc_create_data(const char *, umode_t,
struct proc_dir_entry *,
- const struct file_operations *,
+ const struct proc_ops *,
void *);
-static inline struct proc_dir_entry *proc_create(
- const char *name, umode_t mode, struct proc_dir_entry *parent,
- const struct file_operations *proc_fops)
+struct proc_dir_entry *proc_create(const char *name, umode_t mode, struct proc_dir_entry *parent, const struct proc_ops *proc_ops);
+extern void proc_set_size(struct proc_dir_entry *, loff_t);
+extern void proc_set_user(struct proc_dir_entry *, kuid_t, kgid_t);
+
+/*
+ * Obtain the private data passed by user through proc_create_data() or
+ * related.
+ */
+static inline void *pde_data(const struct inode *inode)
{
- return proc_create_data(name, mode, parent, proc_fops, NULL);
+ return inode->i_private;
}
-extern void proc_set_size(struct proc_dir_entry *, loff_t);
-extern void proc_set_user(struct proc_dir_entry *, kuid_t, kgid_t);
-extern void *PDE_DATA(const struct inode *);
extern void *proc_get_parent_data(const struct inode *);
extern void proc_remove(struct proc_dir_entry *);
extern void remove_proc_entry(const char *, struct proc_dir_entry *);
extern int remove_proc_subtree(const char *, struct proc_dir_entry *);
+struct proc_dir_entry *proc_create_net_data(const char *name, umode_t mode,
+ struct proc_dir_entry *parent, const struct seq_operations *ops,
+ unsigned int state_size, void *data);
+#define proc_create_net(name, mode, parent, ops, state_size) \
+ proc_create_net_data(name, mode, parent, ops, state_size, NULL)
+struct proc_dir_entry *proc_create_net_single(const char *name, umode_t mode,
+ struct proc_dir_entry *parent,
+ int (*show)(struct seq_file *, void *), void *data);
+struct proc_dir_entry *proc_create_net_data_write(const char *name, umode_t mode,
+ struct proc_dir_entry *parent,
+ const struct seq_operations *ops,
+ proc_write_t write,
+ unsigned int state_size, void *data);
+struct proc_dir_entry *proc_create_net_single_write(const char *name, umode_t mode,
+ struct proc_dir_entry *parent,
+ int (*show)(struct seq_file *, void *),
+ proc_write_t write,
+ void *data);
+extern struct pid *tgid_pidfd_to_pid(const struct file *file);
+
+struct bpf_iter_aux_info;
+extern int bpf_iter_init_seq_net(void *priv_data, struct bpf_iter_aux_info *aux);
+extern void bpf_iter_fini_seq_net(void *priv_data);
+
+#ifdef CONFIG_PROC_PID_ARCH_STATUS
+/*
+ * The architecture which selects CONFIG_PROC_PID_ARCH_STATUS must
+ * provide proc_pid_arch_status() definition.
+ */
+int proc_pid_arch_status(struct seq_file *m, struct pid_namespace *ns,
+ struct pid *pid, struct task_struct *task);
+#endif /* CONFIG_PROC_PID_ARCH_STATUS */
+
#else /* CONFIG_PROC_FS */
static inline void proc_root_init(void)
{
}
-static inline void proc_flush_task(struct task_struct *task)
+static inline void proc_flush_pid(struct pid *pid)
{
}
@@ -58,22 +173,51 @@ static inline struct proc_dir_entry *proc_symlink(const char *name,
static inline struct proc_dir_entry *proc_mkdir(const char *name,
struct proc_dir_entry *parent) {return NULL;}
static inline struct proc_dir_entry *proc_create_mount_point(const char *name) { return NULL; }
+static inline struct proc_dir_entry *_proc_mkdir(const char *name, umode_t mode,
+ struct proc_dir_entry *parent, void *data, bool force_lookup)
+{
+ return NULL;
+}
static inline struct proc_dir_entry *proc_mkdir_data(const char *name,
umode_t mode, struct proc_dir_entry *parent, void *data) { return NULL; }
static inline struct proc_dir_entry *proc_mkdir_mode(const char *name,
umode_t mode, struct proc_dir_entry *parent) { return NULL; }
-#define proc_create(name, mode, parent, proc_fops) ({NULL;})
-#define proc_create_data(name, mode, parent, proc_fops, data) ({NULL;})
+#define proc_create_seq_private(name, mode, parent, ops, size, data) ({NULL;})
+#define proc_create_seq_data(name, mode, parent, ops, data) ({NULL;})
+#define proc_create_seq(name, mode, parent, ops) ({NULL;})
+#define proc_create_single(name, mode, parent, show) ({NULL;})
+#define proc_create_single_data(name, mode, parent, show, data) ({NULL;})
+
+static inline struct proc_dir_entry *
+proc_create(const char *name, umode_t mode, struct proc_dir_entry *parent,
+ const struct proc_ops *proc_ops)
+{ return NULL; }
+
+static inline struct proc_dir_entry *
+proc_create_data(const char *name, umode_t mode, struct proc_dir_entry *parent,
+ const struct proc_ops *proc_ops, void *data)
+{ return NULL; }
static inline void proc_set_size(struct proc_dir_entry *de, loff_t size) {}
static inline void proc_set_user(struct proc_dir_entry *de, kuid_t uid, kgid_t gid) {}
-static inline void *PDE_DATA(const struct inode *inode) {BUG(); return NULL;}
+static inline void *pde_data(const struct inode *inode) {BUG(); return NULL;}
static inline void *proc_get_parent_data(const struct inode *inode) { BUG(); return NULL; }
static inline void proc_remove(struct proc_dir_entry *de) {}
#define remove_proc_entry(name, parent) do {} while (0)
static inline int remove_proc_subtree(const char *name, struct proc_dir_entry *parent) { return 0; }
+#define proc_create_net_data(name, mode, parent, ops, state_size, data) ({NULL;})
+#define proc_create_net_data_write(name, mode, parent, ops, write, state_size, data) ({NULL;})
+#define proc_create_net(name, mode, parent, state_size, ops) ({NULL;})
+#define proc_create_net_single(name, mode, parent, show, data) ({NULL;})
+#define proc_create_net_single_write(name, mode, parent, show, write, data) ({NULL;})
+
+static inline struct pid *tgid_pidfd_to_pid(const struct file *file)
+{
+ return ERR_PTR(-EBADF);
+}
+
#endif /* CONFIG_PROC_FS */
struct net;
@@ -81,11 +225,19 @@ struct net;
static inline struct proc_dir_entry *proc_net_mkdir(
struct net *net, const char *name, struct proc_dir_entry *parent)
{
- return proc_mkdir_data(name, 0, parent, net);
+ return _proc_mkdir(name, 0, parent, net, true);
}
struct ns_common;
int open_related_ns(struct ns_common *ns,
struct ns_common *(*get_ns)(struct ns_common *ns));
+/* get the associated pid namespace for a file in procfs */
+static inline struct pid_namespace *proc_pid_ns(struct super_block *sb)
+{
+ return proc_sb_info(sb)->pid_ns;
+}
+
+bool proc_ns_file(const struct file *file);
+
#endif /* _LINUX_PROC_FS_H */
diff --git a/include/linux/proc_ns.h b/include/linux/proc_ns.h
index 58ab28d81fc2..49539bc416ce 100644
--- a/include/linux/proc_ns.h
+++ b/include/linux/proc_ns.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* procfs namespace bits
*/
@@ -7,7 +8,7 @@
#include <linux/ns_common.h>
struct pid_namespace;
-struct nsproxy;
+struct nsset;
struct path;
struct task_struct;
struct inode;
@@ -18,10 +19,10 @@ struct proc_ns_operations {
int type;
struct ns_common *(*get)(struct task_struct *task);
void (*put)(struct ns_common *ns);
- int (*install)(struct nsproxy *nsproxy, struct ns_common *ns);
+ int (*install)(struct nsset *nsset, struct ns_common *ns);
struct user_namespace *(*owner)(struct ns_common *ns);
struct ns_common *(*get_parent)(struct ns_common *ns);
-};
+} __randomize_layout;
extern const struct proc_ns_operations netns_operations;
extern const struct proc_ns_operations utsns_operations;
@@ -31,6 +32,8 @@ extern const struct proc_ns_operations pidns_for_children_operations;
extern const struct proc_ns_operations userns_operations;
extern const struct proc_ns_operations mntns_operations;
extern const struct proc_ns_operations cgroupns_operations;
+extern const struct proc_ns_operations timens_operations;
+extern const struct proc_ns_operations timens_for_children_operations;
/*
* We always define these enumerators
@@ -42,20 +45,16 @@ enum {
PROC_USER_INIT_INO = 0xEFFFFFFDU,
PROC_PID_INIT_INO = 0xEFFFFFFCU,
PROC_CGROUP_INIT_INO = 0xEFFFFFFBU,
+ PROC_TIME_INIT_INO = 0xEFFFFFFAU,
};
#ifdef CONFIG_PROC_FS
-extern int pid_ns_prepare_proc(struct pid_namespace *ns);
-extern void pid_ns_release_proc(struct pid_namespace *ns);
extern int proc_alloc_inum(unsigned int *pino);
extern void proc_free_inum(unsigned int inum);
#else /* CONFIG_PROC_FS */
-static inline int pid_ns_prepare_proc(struct pid_namespace *ns) { return 0; }
-static inline void pid_ns_release_proc(struct pid_namespace *ns) {}
-
static inline int proc_alloc_inum(unsigned int *inum)
{
*inum = 1;
@@ -73,10 +72,14 @@ static inline int ns_alloc_inum(struct ns_common *ns)
#define ns_free_inum(ns) proc_free_inum((ns)->inum)
-extern struct file *proc_ns_fget(int fd);
#define get_proc_ns(inode) ((struct ns_common *)(inode)->i_private)
-extern void *ns_get_path(struct path *path, struct task_struct *task,
+extern int ns_get_path(struct path *path, struct task_struct *task,
const struct proc_ns_operations *ns_ops);
+typedef struct ns_common *ns_get_path_helper_t(void *);
+extern int ns_get_path_cb(struct path *path, ns_get_path_helper_t ns_get_cb,
+ void *private_data);
+
+extern bool ns_match(const struct ns_common *ns, dev_t dev, ino_t ino);
extern int ns_get_name(char *buf, size_t size, struct task_struct *task,
const struct proc_ns_operations *ns_ops);
diff --git a/include/linux/processor.h b/include/linux/processor.h
index da0c5e56ca02..dc78bdc7079a 100644
--- a/include/linux/processor.h
+++ b/include/linux/processor.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/* Misc low level processor primitives */
#ifndef _LINUX_PROCESSOR_H
#define _LINUX_PROCESSOR_H
@@ -31,15 +32,6 @@
#define spin_cpu_relax() cpu_relax()
#endif
-/*
- * spin_cpu_yield may be called to yield (undirected) to the hypervisor if
- * necessary. This should be used if the wait is expected to take longer
- * than context switch overhead, but we can't sleep or do a directed yield.
- */
-#ifndef spin_cpu_yield
-#define spin_cpu_yield() cpu_relax_yield()
-#endif
-
#ifndef spin_end
#define spin_end()
#endif
diff --git a/include/linux/profile.h b/include/linux/profile.h
index b537a25ffa17..11db1ec516e2 100644
--- a/include/linux/profile.h
+++ b/include/linux/profile.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_PROFILE_H
#define _LINUX_PROFILE_H
@@ -14,7 +15,6 @@
#define KVM_PROFILING 4
struct proc_dir_entry;
-struct pt_regs;
struct notifier_block;
#if defined(CONFIG_PROFILING) && defined(CONFIG_PROC_FS)
@@ -31,11 +31,6 @@ static inline int create_proc_profile(void)
}
#endif
-enum profile_type {
- PROFILE_TASK_EXIT,
- PROFILE_MUNMAP
-};
-
#ifdef CONFIG_PROFILING
extern int prof_on __read_mostly;
@@ -66,25 +61,6 @@ static inline void profile_hit(int type, void *ip)
struct task_struct;
struct mm_struct;
-/* task is in do_exit() */
-void profile_task_exit(struct task_struct * task);
-
-/* task is dead, free task struct ? Returns 1 if
- * the task was taken, 0 if the task should be freed.
- */
-int profile_handoff_task(struct task_struct * task);
-
-/* sys_munmap */
-void profile_munmap(unsigned long addr);
-
-int task_handoff_register(struct notifier_block * n);
-int task_handoff_unregister(struct notifier_block * n);
-
-int profile_event_register(enum profile_type, struct notifier_block * n);
-int profile_event_unregister(enum profile_type, struct notifier_block * n);
-
-struct pt_regs;
-
#else
#define prof_on 0
@@ -109,29 +85,6 @@ static inline void profile_hit(int type, void *ip)
return;
}
-static inline int task_handoff_register(struct notifier_block * n)
-{
- return -ENOSYS;
-}
-
-static inline int task_handoff_unregister(struct notifier_block * n)
-{
- return -ENOSYS;
-}
-
-static inline int profile_event_register(enum profile_type t, struct notifier_block * n)
-{
- return -ENOSYS;
-}
-
-static inline int profile_event_unregister(enum profile_type t, struct notifier_block * n)
-{
- return -ENOSYS;
-}
-
-#define profile_task_exit(a) do { } while (0)
-#define profile_handoff_task(a) (0)
-#define profile_munmap(a) do { } while (0)
#endif /* CONFIG_PROFILING */
diff --git a/include/linux/projid.h b/include/linux/projid.h
index 8c1f2c55226d..613730622a1a 100644
--- a/include/linux/projid.h
+++ b/include/linux/projid.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_PROJID_H
#define _LINUX_PROJID_H
diff --git a/include/linux/property.h b/include/linux/property.h
index 7e77039e6b81..66df1a15d518 100644
--- a/include/linux/property.h
+++ b/include/linux/property.h
@@ -1,19 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* property.h - Unified device property interface.
*
* Copyright (C) 2014, Intel Corporation
* Authors: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
* Mika Westerberg <mika.westerberg@linux.intel.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef _LINUX_PROPERTY_H_
#define _LINUX_PROPERTY_H_
+#include <linux/bits.h>
#include <linux/fwnode.h>
+#include <linux/stddef.h>
#include <linux/types.h>
struct device;
@@ -24,7 +23,7 @@ enum dev_prop_type {
DEV_PROP_U32,
DEV_PROP_U64,
DEV_PROP_STRING,
- DEV_PROP_MAX,
+ DEV_PROP_REF,
};
enum dev_dma_attr {
@@ -33,260 +32,475 @@ enum dev_dma_attr {
DEV_DMA_COHERENT,
};
-struct fwnode_handle *dev_fwnode(struct device *dev);
+const struct fwnode_handle *__dev_fwnode_const(const struct device *dev);
+struct fwnode_handle *__dev_fwnode(struct device *dev);
+#define dev_fwnode(dev) \
+ _Generic((dev), \
+ const struct device *: __dev_fwnode_const, \
+ struct device *: __dev_fwnode)(dev)
-bool device_property_present(struct device *dev, const char *propname);
-int device_property_read_u8_array(struct device *dev, const char *propname,
+bool device_property_present(const struct device *dev, const char *propname);
+int device_property_read_u8_array(const struct device *dev, const char *propname,
u8 *val, size_t nval);
-int device_property_read_u16_array(struct device *dev, const char *propname,
+int device_property_read_u16_array(const struct device *dev, const char *propname,
u16 *val, size_t nval);
-int device_property_read_u32_array(struct device *dev, const char *propname,
+int device_property_read_u32_array(const struct device *dev, const char *propname,
u32 *val, size_t nval);
-int device_property_read_u64_array(struct device *dev, const char *propname,
+int device_property_read_u64_array(const struct device *dev, const char *propname,
u64 *val, size_t nval);
-int device_property_read_string_array(struct device *dev, const char *propname,
+int device_property_read_string_array(const struct device *dev, const char *propname,
const char **val, size_t nval);
-int device_property_read_string(struct device *dev, const char *propname,
+int device_property_read_string(const struct device *dev, const char *propname,
const char **val);
-int device_property_match_string(struct device *dev,
+int device_property_match_string(const struct device *dev,
const char *propname, const char *string);
-bool fwnode_device_is_available(struct fwnode_handle *fwnode);
-bool fwnode_property_present(struct fwnode_handle *fwnode, const char *propname);
-int fwnode_property_read_u8_array(struct fwnode_handle *fwnode,
+bool fwnode_property_present(const struct fwnode_handle *fwnode,
+ const char *propname);
+int fwnode_property_read_u8_array(const struct fwnode_handle *fwnode,
const char *propname, u8 *val,
size_t nval);
-int fwnode_property_read_u16_array(struct fwnode_handle *fwnode,
+int fwnode_property_read_u16_array(const struct fwnode_handle *fwnode,
const char *propname, u16 *val,
size_t nval);
-int fwnode_property_read_u32_array(struct fwnode_handle *fwnode,
+int fwnode_property_read_u32_array(const struct fwnode_handle *fwnode,
const char *propname, u32 *val,
size_t nval);
-int fwnode_property_read_u64_array(struct fwnode_handle *fwnode,
+int fwnode_property_read_u64_array(const struct fwnode_handle *fwnode,
const char *propname, u64 *val,
size_t nval);
-int fwnode_property_read_string_array(struct fwnode_handle *fwnode,
+int fwnode_property_read_string_array(const struct fwnode_handle *fwnode,
const char *propname, const char **val,
size_t nval);
-int fwnode_property_read_string(struct fwnode_handle *fwnode,
+int fwnode_property_read_string(const struct fwnode_handle *fwnode,
const char *propname, const char **val);
-int fwnode_property_match_string(struct fwnode_handle *fwnode,
+int fwnode_property_match_string(const struct fwnode_handle *fwnode,
const char *propname, const char *string);
-struct fwnode_handle *fwnode_get_parent(struct fwnode_handle *fwnode);
+bool fwnode_device_is_available(const struct fwnode_handle *fwnode);
+
+static inline
+bool fwnode_device_is_compatible(const struct fwnode_handle *fwnode, const char *compat)
+{
+ return fwnode_property_match_string(fwnode, "compatible", compat) >= 0;
+}
+
+int fwnode_property_get_reference_args(const struct fwnode_handle *fwnode,
+ const char *prop, const char *nargs_prop,
+ unsigned int nargs, unsigned int index,
+ struct fwnode_reference_args *args);
+
+struct fwnode_handle *fwnode_find_reference(const struct fwnode_handle *fwnode,
+ const char *name,
+ unsigned int index);
+
+const char *fwnode_get_name(const struct fwnode_handle *fwnode);
+const char *fwnode_get_name_prefix(const struct fwnode_handle *fwnode);
+
+struct fwnode_handle *fwnode_get_parent(const struct fwnode_handle *fwnode);
struct fwnode_handle *fwnode_get_next_parent(struct fwnode_handle *fwnode);
-struct fwnode_handle *fwnode_get_next_child_node(struct fwnode_handle *fwnode,
- struct fwnode_handle *child);
+
+#define fwnode_for_each_parent_node(fwnode, parent) \
+ for (parent = fwnode_get_parent(fwnode); parent; \
+ parent = fwnode_get_next_parent(parent))
+
+struct device *fwnode_get_next_parent_dev(const struct fwnode_handle *fwnode);
+unsigned int fwnode_count_parents(const struct fwnode_handle *fwn);
+struct fwnode_handle *fwnode_get_nth_parent(struct fwnode_handle *fwn,
+ unsigned int depth);
+bool fwnode_is_ancestor_of(const struct fwnode_handle *ancestor, const struct fwnode_handle *child);
+struct fwnode_handle *fwnode_get_next_child_node(
+ const struct fwnode_handle *fwnode, struct fwnode_handle *child);
+struct fwnode_handle *fwnode_get_next_available_child_node(
+ const struct fwnode_handle *fwnode, struct fwnode_handle *child);
#define fwnode_for_each_child_node(fwnode, child) \
for (child = fwnode_get_next_child_node(fwnode, NULL); child; \
child = fwnode_get_next_child_node(fwnode, child))
-struct fwnode_handle *device_get_next_child_node(struct device *dev,
+#define fwnode_for_each_available_child_node(fwnode, child) \
+ for (child = fwnode_get_next_available_child_node(fwnode, NULL); child;\
+ child = fwnode_get_next_available_child_node(fwnode, child))
+
+struct fwnode_handle *device_get_next_child_node(const struct device *dev,
struct fwnode_handle *child);
#define device_for_each_child_node(dev, child) \
for (child = device_get_next_child_node(dev, NULL); child; \
child = device_get_next_child_node(dev, child))
-struct fwnode_handle *fwnode_get_named_child_node(struct fwnode_handle *fwnode,
+struct fwnode_handle *fwnode_get_named_child_node(const struct fwnode_handle *fwnode,
const char *childname);
-struct fwnode_handle *device_get_named_child_node(struct device *dev,
+struct fwnode_handle *device_get_named_child_node(const struct device *dev,
const char *childname);
-void fwnode_handle_get(struct fwnode_handle *fwnode);
+struct fwnode_handle *fwnode_handle_get(struct fwnode_handle *fwnode);
void fwnode_handle_put(struct fwnode_handle *fwnode);
-unsigned int device_get_child_node_count(struct device *dev);
+int fwnode_irq_get(const struct fwnode_handle *fwnode, unsigned int index);
+int fwnode_irq_get_byname(const struct fwnode_handle *fwnode, const char *name);
+
+unsigned int device_get_child_node_count(const struct device *dev);
-static inline bool device_property_read_bool(struct device *dev,
+static inline bool device_property_read_bool(const struct device *dev,
const char *propname)
{
return device_property_present(dev, propname);
}
-static inline int device_property_read_u8(struct device *dev,
+static inline int device_property_read_u8(const struct device *dev,
const char *propname, u8 *val)
{
return device_property_read_u8_array(dev, propname, val, 1);
}
-static inline int device_property_read_u16(struct device *dev,
+static inline int device_property_read_u16(const struct device *dev,
const char *propname, u16 *val)
{
return device_property_read_u16_array(dev, propname, val, 1);
}
-static inline int device_property_read_u32(struct device *dev,
+static inline int device_property_read_u32(const struct device *dev,
const char *propname, u32 *val)
{
return device_property_read_u32_array(dev, propname, val, 1);
}
-static inline int device_property_read_u64(struct device *dev,
+static inline int device_property_read_u64(const struct device *dev,
const char *propname, u64 *val)
{
return device_property_read_u64_array(dev, propname, val, 1);
}
-static inline bool fwnode_property_read_bool(struct fwnode_handle *fwnode,
+static inline int device_property_count_u8(const struct device *dev, const char *propname)
+{
+ return device_property_read_u8_array(dev, propname, NULL, 0);
+}
+
+static inline int device_property_count_u16(const struct device *dev, const char *propname)
+{
+ return device_property_read_u16_array(dev, propname, NULL, 0);
+}
+
+static inline int device_property_count_u32(const struct device *dev, const char *propname)
+{
+ return device_property_read_u32_array(dev, propname, NULL, 0);
+}
+
+static inline int device_property_count_u64(const struct device *dev, const char *propname)
+{
+ return device_property_read_u64_array(dev, propname, NULL, 0);
+}
+
+static inline int device_property_string_array_count(const struct device *dev,
+ const char *propname)
+{
+ return device_property_read_string_array(dev, propname, NULL, 0);
+}
+
+static inline bool fwnode_property_read_bool(const struct fwnode_handle *fwnode,
const char *propname)
{
return fwnode_property_present(fwnode, propname);
}
-static inline int fwnode_property_read_u8(struct fwnode_handle *fwnode,
+static inline int fwnode_property_read_u8(const struct fwnode_handle *fwnode,
const char *propname, u8 *val)
{
return fwnode_property_read_u8_array(fwnode, propname, val, 1);
}
-static inline int fwnode_property_read_u16(struct fwnode_handle *fwnode,
+static inline int fwnode_property_read_u16(const struct fwnode_handle *fwnode,
const char *propname, u16 *val)
{
return fwnode_property_read_u16_array(fwnode, propname, val, 1);
}
-static inline int fwnode_property_read_u32(struct fwnode_handle *fwnode,
+static inline int fwnode_property_read_u32(const struct fwnode_handle *fwnode,
const char *propname, u32 *val)
{
return fwnode_property_read_u32_array(fwnode, propname, val, 1);
}
-static inline int fwnode_property_read_u64(struct fwnode_handle *fwnode,
+static inline int fwnode_property_read_u64(const struct fwnode_handle *fwnode,
const char *propname, u64 *val)
{
return fwnode_property_read_u64_array(fwnode, propname, val, 1);
}
+static inline int fwnode_property_count_u8(const struct fwnode_handle *fwnode,
+ const char *propname)
+{
+ return fwnode_property_read_u8_array(fwnode, propname, NULL, 0);
+}
+
+static inline int fwnode_property_count_u16(const struct fwnode_handle *fwnode,
+ const char *propname)
+{
+ return fwnode_property_read_u16_array(fwnode, propname, NULL, 0);
+}
+
+static inline int fwnode_property_count_u32(const struct fwnode_handle *fwnode,
+ const char *propname)
+{
+ return fwnode_property_read_u32_array(fwnode, propname, NULL, 0);
+}
+
+static inline int fwnode_property_count_u64(const struct fwnode_handle *fwnode,
+ const char *propname)
+{
+ return fwnode_property_read_u64_array(fwnode, propname, NULL, 0);
+}
+
+static inline int
+fwnode_property_string_array_count(const struct fwnode_handle *fwnode,
+ const char *propname)
+{
+ return fwnode_property_read_string_array(fwnode, propname, NULL, 0);
+}
+
+struct software_node;
+
+/**
+ * struct software_node_ref_args - Reference property with additional arguments
+ * @node: Reference to a software node
+ * @nargs: Number of elements in @args array
+ * @args: Integer arguments
+ */
+struct software_node_ref_args {
+ const struct software_node *node;
+ unsigned int nargs;
+ u64 args[NR_FWNODE_REFERENCE_ARGS];
+};
+
+#define SOFTWARE_NODE_REFERENCE(_ref_, ...) \
+(const struct software_node_ref_args) { \
+ .node = _ref_, \
+ .nargs = ARRAY_SIZE(((u64[]){ 0, ##__VA_ARGS__ })) - 1, \
+ .args = { __VA_ARGS__ }, \
+}
+
/**
* struct property_entry - "Built-in" device property representation.
* @name: Name of the property.
* @length: Length of data making up the value.
- * @is_array: True when the property is an array.
- * @is_string: True when property is a string.
- * @pointer: Pointer to the property (an array of items of the given type).
- * @value: Value of the property (when it is a single item of the given type).
+ * @is_inline: True when the property value is stored inline.
+ * @type: Type of the data in unions.
+ * @pointer: Pointer to the property when it is not stored inline.
+ * @value: Value of the property when it is stored inline.
*/
struct property_entry {
const char *name;
size_t length;
- bool is_array;
- bool is_string;
+ bool is_inline;
+ enum dev_prop_type type;
union {
+ const void *pointer;
union {
- const void *raw_data;
- const u8 *u8_data;
- const u16 *u16_data;
- const u32 *u32_data;
- const u64 *u64_data;
- const char * const *str;
- } pointer;
- union {
- unsigned long long raw_data;
- u8 u8_data;
- u16 u16_data;
- u32 u32_data;
- u64 u64_data;
- const char *str;
+ u8 u8_data[sizeof(u64) / sizeof(u8)];
+ u16 u16_data[sizeof(u64) / sizeof(u16)];
+ u32 u32_data[sizeof(u64) / sizeof(u32)];
+ u64 u64_data[sizeof(u64) / sizeof(u64)];
+ const char *str[sizeof(u64) / sizeof(char *)];
} value;
};
};
/*
- * Note: the below four initializers for the anonymous union are carefully
+ * Note: the below initializers for the anonymous union are carefully
* crafted to avoid gcc-4.4.4's problems with initialization of anon unions
* and structs.
*/
-
-#define PROPERTY_ENTRY_INTEGER_ARRAY(_name_, _type_, _val_) \
-{ \
- .name = _name_, \
- .length = ARRAY_SIZE(_val_) * sizeof(_type_), \
- .is_array = true, \
- .is_string = false, \
- { .pointer = { ._type_##_data = _val_ } }, \
+#define __PROPERTY_ENTRY_ARRAY_LEN(_name_, _elem_, _Type_, _val_, _len_) \
+(struct property_entry) { \
+ .name = _name_, \
+ .length = (_len_) * sizeof_field(struct property_entry, value._elem_[0]), \
+ .type = DEV_PROP_##_Type_, \
+ { .pointer = _val_ }, \
}
-#define PROPERTY_ENTRY_U8_ARRAY(_name_, _val_) \
- PROPERTY_ENTRY_INTEGER_ARRAY(_name_, u8, _val_)
-#define PROPERTY_ENTRY_U16_ARRAY(_name_, _val_) \
- PROPERTY_ENTRY_INTEGER_ARRAY(_name_, u16, _val_)
-#define PROPERTY_ENTRY_U32_ARRAY(_name_, _val_) \
- PROPERTY_ENTRY_INTEGER_ARRAY(_name_, u32, _val_)
-#define PROPERTY_ENTRY_U64_ARRAY(_name_, _val_) \
- PROPERTY_ENTRY_INTEGER_ARRAY(_name_, u64, _val_)
-
-#define PROPERTY_ENTRY_STRING_ARRAY(_name_, _val_) \
-{ \
- .name = _name_, \
- .length = ARRAY_SIZE(_val_) * sizeof(const char *), \
- .is_array = true, \
- .is_string = true, \
- { .pointer = { .str = _val_ } }, \
+#define PROPERTY_ENTRY_U8_ARRAY_LEN(_name_, _val_, _len_) \
+ __PROPERTY_ENTRY_ARRAY_LEN(_name_, u8_data, U8, _val_, _len_)
+#define PROPERTY_ENTRY_U16_ARRAY_LEN(_name_, _val_, _len_) \
+ __PROPERTY_ENTRY_ARRAY_LEN(_name_, u16_data, U16, _val_, _len_)
+#define PROPERTY_ENTRY_U32_ARRAY_LEN(_name_, _val_, _len_) \
+ __PROPERTY_ENTRY_ARRAY_LEN(_name_, u32_data, U32, _val_, _len_)
+#define PROPERTY_ENTRY_U64_ARRAY_LEN(_name_, _val_, _len_) \
+ __PROPERTY_ENTRY_ARRAY_LEN(_name_, u64_data, U64, _val_, _len_)
+#define PROPERTY_ENTRY_STRING_ARRAY_LEN(_name_, _val_, _len_) \
+ __PROPERTY_ENTRY_ARRAY_LEN(_name_, str, STRING, _val_, _len_)
+
+#define PROPERTY_ENTRY_REF_ARRAY_LEN(_name_, _val_, _len_) \
+(struct property_entry) { \
+ .name = _name_, \
+ .length = (_len_) * sizeof(struct software_node_ref_args), \
+ .type = DEV_PROP_REF, \
+ { .pointer = _val_ }, \
}
-#define PROPERTY_ENTRY_INTEGER(_name_, _type_, _val_) \
-{ \
- .name = _name_, \
- .length = sizeof(_type_), \
- .is_string = false, \
- { .value = { ._type_##_data = _val_ } }, \
+#define PROPERTY_ENTRY_U8_ARRAY(_name_, _val_) \
+ PROPERTY_ENTRY_U8_ARRAY_LEN(_name_, _val_, ARRAY_SIZE(_val_))
+#define PROPERTY_ENTRY_U16_ARRAY(_name_, _val_) \
+ PROPERTY_ENTRY_U16_ARRAY_LEN(_name_, _val_, ARRAY_SIZE(_val_))
+#define PROPERTY_ENTRY_U32_ARRAY(_name_, _val_) \
+ PROPERTY_ENTRY_U32_ARRAY_LEN(_name_, _val_, ARRAY_SIZE(_val_))
+#define PROPERTY_ENTRY_U64_ARRAY(_name_, _val_) \
+ PROPERTY_ENTRY_U64_ARRAY_LEN(_name_, _val_, ARRAY_SIZE(_val_))
+#define PROPERTY_ENTRY_STRING_ARRAY(_name_, _val_) \
+ PROPERTY_ENTRY_STRING_ARRAY_LEN(_name_, _val_, ARRAY_SIZE(_val_))
+#define PROPERTY_ENTRY_REF_ARRAY(_name_, _val_) \
+ PROPERTY_ENTRY_REF_ARRAY_LEN(_name_, _val_, ARRAY_SIZE(_val_))
+
+#define __PROPERTY_ENTRY_ELEMENT(_name_, _elem_, _Type_, _val_) \
+(struct property_entry) { \
+ .name = _name_, \
+ .length = sizeof_field(struct property_entry, value._elem_[0]), \
+ .is_inline = true, \
+ .type = DEV_PROP_##_Type_, \
+ { .value = { ._elem_[0] = _val_ } }, \
}
-#define PROPERTY_ENTRY_U8(_name_, _val_) \
- PROPERTY_ENTRY_INTEGER(_name_, u8, _val_)
-#define PROPERTY_ENTRY_U16(_name_, _val_) \
- PROPERTY_ENTRY_INTEGER(_name_, u16, _val_)
-#define PROPERTY_ENTRY_U32(_name_, _val_) \
- PROPERTY_ENTRY_INTEGER(_name_, u32, _val_)
-#define PROPERTY_ENTRY_U64(_name_, _val_) \
- PROPERTY_ENTRY_INTEGER(_name_, u64, _val_)
-
-#define PROPERTY_ENTRY_STRING(_name_, _val_) \
-{ \
- .name = _name_, \
- .length = sizeof(_val_), \
- .is_string = true, \
- { .value = { .str = _val_ } }, \
+#define PROPERTY_ENTRY_U8(_name_, _val_) \
+ __PROPERTY_ENTRY_ELEMENT(_name_, u8_data, U8, _val_)
+#define PROPERTY_ENTRY_U16(_name_, _val_) \
+ __PROPERTY_ENTRY_ELEMENT(_name_, u16_data, U16, _val_)
+#define PROPERTY_ENTRY_U32(_name_, _val_) \
+ __PROPERTY_ENTRY_ELEMENT(_name_, u32_data, U32, _val_)
+#define PROPERTY_ENTRY_U64(_name_, _val_) \
+ __PROPERTY_ENTRY_ELEMENT(_name_, u64_data, U64, _val_)
+#define PROPERTY_ENTRY_STRING(_name_, _val_) \
+ __PROPERTY_ENTRY_ELEMENT(_name_, str, STRING, _val_)
+
+#define PROPERTY_ENTRY_REF(_name_, _ref_, ...) \
+(struct property_entry) { \
+ .name = _name_, \
+ .length = sizeof(struct software_node_ref_args), \
+ .type = DEV_PROP_REF, \
+ { .pointer = &SOFTWARE_NODE_REFERENCE(_ref_, ##__VA_ARGS__), }, \
}
#define PROPERTY_ENTRY_BOOL(_name_) \
-{ \
+(struct property_entry) { \
.name = _name_, \
+ .is_inline = true, \
}
struct property_entry *
property_entries_dup(const struct property_entry *properties);
-
void property_entries_free(const struct property_entry *properties);
-int device_add_properties(struct device *dev,
- const struct property_entry *properties);
-void device_remove_properties(struct device *dev);
+bool device_dma_supported(const struct device *dev);
+enum dev_dma_attr device_get_dma_attr(const struct device *dev);
-bool device_dma_supported(struct device *dev);
-
-enum dev_dma_attr device_get_dma_attr(struct device *dev);
+const void *device_get_match_data(const struct device *dev);
int device_get_phy_mode(struct device *dev);
+int fwnode_get_phy_mode(const struct fwnode_handle *fwnode);
-void *device_get_mac_address(struct device *dev, char *addr, int alen);
+void __iomem *fwnode_iomap(struct fwnode_handle *fwnode, int index);
struct fwnode_handle *fwnode_graph_get_next_endpoint(
- struct fwnode_handle *fwnode, struct fwnode_handle *prev);
+ const struct fwnode_handle *fwnode, struct fwnode_handle *prev);
struct fwnode_handle *
-fwnode_graph_get_port_parent(struct fwnode_handle *fwnode);
+fwnode_graph_get_port_parent(const struct fwnode_handle *fwnode);
struct fwnode_handle *fwnode_graph_get_remote_port_parent(
- struct fwnode_handle *fwnode);
+ const struct fwnode_handle *fwnode);
struct fwnode_handle *fwnode_graph_get_remote_port(
- struct fwnode_handle *fwnode);
+ const struct fwnode_handle *fwnode);
struct fwnode_handle *fwnode_graph_get_remote_endpoint(
- struct fwnode_handle *fwnode);
-struct fwnode_handle *fwnode_graph_get_remote_node(struct fwnode_handle *fwnode,
- u32 port, u32 endpoint);
+ const struct fwnode_handle *fwnode);
+
+static inline bool fwnode_graph_is_endpoint(const struct fwnode_handle *fwnode)
+{
+ return fwnode_property_present(fwnode, "remote-endpoint");
+}
+
+/*
+ * Fwnode lookup flags
+ *
+ * @FWNODE_GRAPH_ENDPOINT_NEXT: In the case of no exact match, look for the
+ * closest endpoint ID greater than the specified
+ * one.
+ * @FWNODE_GRAPH_DEVICE_DISABLED: That the device to which the remote
+ * endpoint of the given endpoint belongs to,
+ * may be disabled, or that the endpoint is not
+ * connected.
+ */
+#define FWNODE_GRAPH_ENDPOINT_NEXT BIT(0)
+#define FWNODE_GRAPH_DEVICE_DISABLED BIT(1)
+
+struct fwnode_handle *
+fwnode_graph_get_endpoint_by_id(const struct fwnode_handle *fwnode,
+ u32 port, u32 endpoint, unsigned long flags);
+unsigned int fwnode_graph_get_endpoint_count(const struct fwnode_handle *fwnode,
+ unsigned long flags);
-int fwnode_graph_parse_endpoint(struct fwnode_handle *fwnode,
+#define fwnode_graph_for_each_endpoint(fwnode, child) \
+ for (child = fwnode_graph_get_next_endpoint(fwnode, NULL); child; \
+ child = fwnode_graph_get_next_endpoint(fwnode, child))
+
+int fwnode_graph_parse_endpoint(const struct fwnode_handle *fwnode,
struct fwnode_endpoint *endpoint);
+typedef void *(*devcon_match_fn_t)(const struct fwnode_handle *fwnode, const char *id,
+ void *data);
+
+void *fwnode_connection_find_match(const struct fwnode_handle *fwnode,
+ const char *con_id, void *data,
+ devcon_match_fn_t match);
+
+static inline void *device_connection_find_match(const struct device *dev,
+ const char *con_id, void *data,
+ devcon_match_fn_t match)
+{
+ return fwnode_connection_find_match(dev_fwnode(dev), con_id, data, match);
+}
+
+int fwnode_connection_find_matches(const struct fwnode_handle *fwnode,
+ const char *con_id, void *data,
+ devcon_match_fn_t match,
+ void **matches, unsigned int matches_len);
+
+/* -------------------------------------------------------------------------- */
+/* Software fwnode support - when HW description is incomplete or missing */
+
+/**
+ * struct software_node - Software node description
+ * @name: Name of the software node
+ * @parent: Parent of the software node
+ * @properties: Array of device properties
+ */
+struct software_node {
+ const char *name;
+ const struct software_node *parent;
+ const struct property_entry *properties;
+};
+
+bool is_software_node(const struct fwnode_handle *fwnode);
+const struct software_node *
+to_software_node(const struct fwnode_handle *fwnode);
+struct fwnode_handle *software_node_fwnode(const struct software_node *node);
+
+const struct software_node *
+software_node_find_by_name(const struct software_node *parent,
+ const char *name);
+
+int software_node_register_node_group(const struct software_node **node_group);
+void software_node_unregister_node_group(const struct software_node **node_group);
+
+int software_node_register(const struct software_node *node);
+void software_node_unregister(const struct software_node *node);
+
+struct fwnode_handle *
+fwnode_create_software_node(const struct property_entry *properties,
+ const struct fwnode_handle *parent);
+void fwnode_remove_software_node(struct fwnode_handle *fwnode);
+
+int device_add_software_node(struct device *dev, const struct software_node *node);
+void device_remove_software_node(struct device *dev);
+
+int device_create_managed_software_node(struct device *dev,
+ const struct property_entry *properties,
+ const struct software_node *parent);
+
#endif /* _LINUX_PROPERTY_H_ */
diff --git a/include/linux/pruss_driver.h b/include/linux/pruss_driver.h
new file mode 100644
index 000000000000..ecfded30ed05
--- /dev/null
+++ b/include/linux/pruss_driver.h
@@ -0,0 +1,54 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * PRU-ICSS sub-system specific definitions
+ *
+ * Copyright (C) 2014-2020 Texas Instruments Incorporated - http://www.ti.com/
+ * Suman Anna <s-anna@ti.com>
+ */
+
+#ifndef _PRUSS_DRIVER_H_
+#define _PRUSS_DRIVER_H_
+
+#include <linux/types.h>
+
+/*
+ * enum pruss_mem - PRUSS memory range identifiers
+ */
+enum pruss_mem {
+ PRUSS_MEM_DRAM0 = 0,
+ PRUSS_MEM_DRAM1,
+ PRUSS_MEM_SHRD_RAM2,
+ PRUSS_MEM_MAX,
+};
+
+/**
+ * struct pruss_mem_region - PRUSS memory region structure
+ * @va: kernel virtual address of the PRUSS memory region
+ * @pa: physical (bus) address of the PRUSS memory region
+ * @size: size of the PRUSS memory region
+ */
+struct pruss_mem_region {
+ void __iomem *va;
+ phys_addr_t pa;
+ size_t size;
+};
+
+/**
+ * struct pruss - PRUSS parent structure
+ * @dev: pruss device pointer
+ * @cfg_base: base iomap for CFG region
+ * @cfg_regmap: regmap for config region
+ * @mem_regions: data for each of the PRUSS memory regions
+ * @core_clk_mux: clk handle for PRUSS CORE_CLK_MUX
+ * @iep_clk_mux: clk handle for PRUSS IEP_CLK_MUX
+ */
+struct pruss {
+ struct device *dev;
+ void __iomem *cfg_base;
+ struct regmap *cfg_regmap;
+ struct pruss_mem_region mem_regions[PRUSS_MEM_MAX];
+ struct clk *core_clk_mux;
+ struct clk *iep_clk_mux;
+};
+
+#endif /* _PRUSS_DRIVER_H_ */
diff --git a/include/linux/psci.h b/include/linux/psci.h
index bdea1cb5e1db..4ca0060a3fc4 100644
--- a/include/linux/psci.h
+++ b/include/linux/psci.h
@@ -1,12 +1,5 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*
* Copyright (C) 2015 ARM Limited
*/
@@ -14,6 +7,7 @@
#ifndef __LINUX_PSCI_H
#define __LINUX_PSCI_H
+#include <linux/arm-smccc.h>
#include <linux/init.h>
#include <linux/types.h>
@@ -22,10 +16,13 @@
bool psci_tos_resident_on(int cpu);
-int psci_cpu_init_idle(unsigned int cpu);
-int psci_cpu_suspend_enter(unsigned long index);
+int psci_cpu_suspend_enter(u32 state);
+bool psci_power_state_is_valid(u32 state);
+int psci_set_osi_mode(bool enable);
+bool psci_has_osi_support(void);
struct psci_operations {
+ u32 (*get_version)(void);
int (*cpu_suspend)(u32 state, unsigned long entry_point);
int (*cpu_off)(u32 state);
int (*cpu_on)(unsigned long cpuid, unsigned long entry_point);
@@ -37,6 +34,15 @@ struct psci_operations {
extern struct psci_operations psci_ops;
+struct psci_0_1_function_ids {
+ u32 cpu_suspend;
+ u32 cpu_on;
+ u32 cpu_off;
+ u32 migrate;
+};
+
+struct psci_0_1_function_ids get_psci_0_1_function_ids(void);
+
#if defined(CONFIG_ARM_PSCI_FW)
int __init psci_dt_init(void);
#else
@@ -46,10 +52,11 @@ static inline int psci_dt_init(void) { return 0; }
#if defined(CONFIG_ARM_PSCI_FW) && defined(CONFIG_ACPI)
int __init psci_acpi_init(void);
bool __init acpi_psci_present(void);
-bool __init acpi_psci_use_hvc(void);
+bool acpi_psci_use_hvc(void);
#else
static inline int psci_acpi_init(void) { return 0; }
static inline bool acpi_psci_present(void) { return false; }
+static inline bool acpi_psci_use_hvc(void) {return false; }
#endif
#endif /* __LINUX_PSCI_H */
diff --git a/include/linux/pse-pd/pse.h b/include/linux/pse-pd/pse.h
new file mode 100644
index 000000000000..fb724c65c77b
--- /dev/null
+++ b/include/linux/pse-pd/pse.h
@@ -0,0 +1,129 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+// Copyright (c) 2022 Pengutronix, Oleksij Rempel <kernel@pengutronix.de>
+ */
+#ifndef _LINUX_PSE_CONTROLLER_H
+#define _LINUX_PSE_CONTROLLER_H
+
+#include <linux/ethtool.h>
+#include <linux/list.h>
+#include <uapi/linux/ethtool.h>
+
+struct phy_device;
+struct pse_controller_dev;
+
+/**
+ * struct pse_control_config - PSE control/channel configuration.
+ *
+ * @admin_cotrol: set PoDL PSE admin control as described in
+ * IEEE 802.3-2018 30.15.1.2.1 acPoDLPSEAdminControl
+ */
+struct pse_control_config {
+ enum ethtool_podl_pse_admin_state admin_cotrol;
+};
+
+/**
+ * struct pse_control_status - PSE control/channel status.
+ *
+ * @podl_admin_state: operational state of the PoDL PSE
+ * functions. IEEE 802.3-2018 30.15.1.1.2 aPoDLPSEAdminState
+ * @podl_pw_status: power detection status of the PoDL PSE.
+ * IEEE 802.3-2018 30.15.1.1.3 aPoDLPSEPowerDetectionStatus:
+ */
+struct pse_control_status {
+ enum ethtool_podl_pse_admin_state podl_admin_state;
+ enum ethtool_podl_pse_pw_d_status podl_pw_status;
+};
+
+/**
+ * struct pse_controller_ops - PSE controller driver callbacks
+ *
+ * @ethtool_get_status: get PSE control status for ethtool interface
+ * @ethtool_set_config: set PSE control configuration over ethtool interface
+ */
+struct pse_controller_ops {
+ int (*ethtool_get_status)(struct pse_controller_dev *pcdev,
+ unsigned long id, struct netlink_ext_ack *extack,
+ struct pse_control_status *status);
+ int (*ethtool_set_config)(struct pse_controller_dev *pcdev,
+ unsigned long id, struct netlink_ext_ack *extack,
+ const struct pse_control_config *config);
+};
+
+struct module;
+struct device_node;
+struct of_phandle_args;
+struct pse_control;
+
+/**
+ * struct pse_controller_dev - PSE controller entity that might
+ * provide multiple PSE controls
+ * @ops: a pointer to device specific struct pse_controller_ops
+ * @owner: kernel module of the PSE controller driver
+ * @list: internal list of PSE controller devices
+ * @pse_control_head: head of internal list of requested PSE controls
+ * @dev: corresponding driver model device struct
+ * @of_pse_n_cells: number of cells in PSE line specifiers
+ * @of_xlate: translation function to translate from specifier as found in the
+ * device tree to id as given to the PSE control ops
+ * @nr_lines: number of PSE controls in this controller device
+ * @lock: Mutex for serialization access to the PSE controller
+ */
+struct pse_controller_dev {
+ const struct pse_controller_ops *ops;
+ struct module *owner;
+ struct list_head list;
+ struct list_head pse_control_head;
+ struct device *dev;
+ int of_pse_n_cells;
+ int (*of_xlate)(struct pse_controller_dev *pcdev,
+ const struct of_phandle_args *pse_spec);
+ unsigned int nr_lines;
+ struct mutex lock;
+};
+
+#if IS_ENABLED(CONFIG_PSE_CONTROLLER)
+int pse_controller_register(struct pse_controller_dev *pcdev);
+void pse_controller_unregister(struct pse_controller_dev *pcdev);
+struct device;
+int devm_pse_controller_register(struct device *dev,
+ struct pse_controller_dev *pcdev);
+
+struct pse_control *of_pse_control_get(struct device_node *node);
+void pse_control_put(struct pse_control *psec);
+
+int pse_ethtool_get_status(struct pse_control *psec,
+ struct netlink_ext_ack *extack,
+ struct pse_control_status *status);
+int pse_ethtool_set_config(struct pse_control *psec,
+ struct netlink_ext_ack *extack,
+ const struct pse_control_config *config);
+
+#else
+
+static inline struct pse_control *of_pse_control_get(struct device_node *node)
+{
+ return ERR_PTR(-ENOENT);
+}
+
+static inline void pse_control_put(struct pse_control *psec)
+{
+}
+
+static inline int pse_ethtool_get_status(struct pse_control *psec,
+ struct netlink_ext_ack *extack,
+ struct pse_control_status *status)
+{
+ return -ENOTSUPP;
+}
+
+static inline int pse_ethtool_set_config(struct pse_control *psec,
+ struct netlink_ext_ack *extack,
+ const struct pse_control_config *config)
+{
+ return -ENOTSUPP;
+}
+
+#endif
+
+#endif
diff --git a/include/linux/pseudo_fs.h b/include/linux/pseudo_fs.h
new file mode 100644
index 000000000000..eceda1d1407a
--- /dev/null
+++ b/include/linux/pseudo_fs.h
@@ -0,0 +1,16 @@
+#ifndef __LINUX_PSEUDO_FS__
+#define __LINUX_PSEUDO_FS__
+
+#include <linux/fs_context.h>
+
+struct pseudo_fs_context {
+ const struct super_operations *ops;
+ const struct xattr_handler **xattr;
+ const struct dentry_operations *dops;
+ unsigned long magic;
+};
+
+struct pseudo_fs_context *init_pseudo(struct fs_context *fc,
+ unsigned long magic);
+
+#endif
diff --git a/include/linux/psi.h b/include/linux/psi.h
new file mode 100644
index 000000000000..ab26200c2803
--- /dev/null
+++ b/include/linux/psi.h
@@ -0,0 +1,69 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_PSI_H
+#define _LINUX_PSI_H
+
+#include <linux/jump_label.h>
+#include <linux/psi_types.h>
+#include <linux/sched.h>
+#include <linux/poll.h>
+#include <linux/cgroup-defs.h>
+#include <linux/cgroup.h>
+
+struct seq_file;
+struct css_set;
+
+#ifdef CONFIG_PSI
+
+extern struct static_key_false psi_disabled;
+extern struct psi_group psi_system;
+
+void psi_init(void);
+
+void psi_memstall_enter(unsigned long *flags);
+void psi_memstall_leave(unsigned long *flags);
+
+int psi_show(struct seq_file *s, struct psi_group *group, enum psi_res res);
+struct psi_trigger *psi_trigger_create(struct psi_group *group,
+ char *buf, enum psi_res res, struct file *file);
+void psi_trigger_destroy(struct psi_trigger *t);
+
+__poll_t psi_trigger_poll(void **trigger_ptr, struct file *file,
+ poll_table *wait);
+
+#ifdef CONFIG_CGROUPS
+static inline struct psi_group *cgroup_psi(struct cgroup *cgrp)
+{
+ return cgroup_ino(cgrp) == 1 ? &psi_system : cgrp->psi;
+}
+
+int psi_cgroup_alloc(struct cgroup *cgrp);
+void psi_cgroup_free(struct cgroup *cgrp);
+void cgroup_move_task(struct task_struct *p, struct css_set *to);
+void psi_cgroup_restart(struct psi_group *group);
+#endif
+
+#else /* CONFIG_PSI */
+
+static inline void psi_init(void) {}
+
+static inline void psi_memstall_enter(unsigned long *flags) {}
+static inline void psi_memstall_leave(unsigned long *flags) {}
+
+#ifdef CONFIG_CGROUPS
+static inline int psi_cgroup_alloc(struct cgroup *cgrp)
+{
+ return 0;
+}
+static inline void psi_cgroup_free(struct cgroup *cgrp)
+{
+}
+static inline void cgroup_move_task(struct task_struct *p, struct css_set *to)
+{
+ rcu_assign_pointer(p->cgroups, to);
+}
+static inline void psi_cgroup_restart(struct psi_group *group) {}
+#endif
+
+#endif /* CONFIG_PSI */
+
+#endif /* _LINUX_PSI_H */
diff --git a/include/linux/psi_types.h b/include/linux/psi_types.h
new file mode 100644
index 000000000000..040c089581c6
--- /dev/null
+++ b/include/linux/psi_types.h
@@ -0,0 +1,215 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_PSI_TYPES_H
+#define _LINUX_PSI_TYPES_H
+
+#include <linux/kthread.h>
+#include <linux/seqlock.h>
+#include <linux/types.h>
+#include <linux/kref.h>
+#include <linux/wait.h>
+
+#ifdef CONFIG_PSI
+
+/* Tracked task states */
+enum psi_task_count {
+ NR_IOWAIT,
+ NR_MEMSTALL,
+ NR_RUNNING,
+ /*
+ * For IO and CPU stalls the presence of running/oncpu tasks
+ * in the domain means a partial rather than a full stall.
+ * For memory it's not so simple because of page reclaimers:
+ * they are running/oncpu while representing a stall. To tell
+ * whether a domain has productivity left or not, we need to
+ * distinguish between regular running (i.e. productive)
+ * threads and memstall ones.
+ */
+ NR_MEMSTALL_RUNNING,
+ NR_PSI_TASK_COUNTS = 4,
+};
+
+/* Task state bitmasks */
+#define TSK_IOWAIT (1 << NR_IOWAIT)
+#define TSK_MEMSTALL (1 << NR_MEMSTALL)
+#define TSK_RUNNING (1 << NR_RUNNING)
+#define TSK_MEMSTALL_RUNNING (1 << NR_MEMSTALL_RUNNING)
+
+/* Only one task can be scheduled, no corresponding task count */
+#define TSK_ONCPU (1 << NR_PSI_TASK_COUNTS)
+
+/* Resources that workloads could be stalled on */
+enum psi_res {
+ PSI_IO,
+ PSI_MEM,
+ PSI_CPU,
+#ifdef CONFIG_IRQ_TIME_ACCOUNTING
+ PSI_IRQ,
+#endif
+ NR_PSI_RESOURCES,
+};
+
+/*
+ * Pressure states for each resource:
+ *
+ * SOME: Stalled tasks & working tasks
+ * FULL: Stalled tasks & no working tasks
+ */
+enum psi_states {
+ PSI_IO_SOME,
+ PSI_IO_FULL,
+ PSI_MEM_SOME,
+ PSI_MEM_FULL,
+ PSI_CPU_SOME,
+ PSI_CPU_FULL,
+#ifdef CONFIG_IRQ_TIME_ACCOUNTING
+ PSI_IRQ_FULL,
+#endif
+ /* Only per-CPU, to weigh the CPU in the global average: */
+ PSI_NONIDLE,
+ NR_PSI_STATES,
+};
+
+/* Use one bit in the state mask to track TSK_ONCPU */
+#define PSI_ONCPU (1 << NR_PSI_STATES)
+
+/* Flag whether to re-arm avgs_work, see details in get_recent_times() */
+#define PSI_STATE_RESCHEDULE (1 << (NR_PSI_STATES + 1))
+
+enum psi_aggregators {
+ PSI_AVGS = 0,
+ PSI_POLL,
+ NR_PSI_AGGREGATORS,
+};
+
+struct psi_group_cpu {
+ /* 1st cacheline updated by the scheduler */
+
+ /* Aggregator needs to know of concurrent changes */
+ seqcount_t seq ____cacheline_aligned_in_smp;
+
+ /* States of the tasks belonging to this group */
+ unsigned int tasks[NR_PSI_TASK_COUNTS];
+
+ /* Aggregate pressure state derived from the tasks */
+ u32 state_mask;
+
+ /* Period time sampling buckets for each state of interest (ns) */
+ u32 times[NR_PSI_STATES];
+
+ /* Time of last task change in this group (rq_clock) */
+ u64 state_start;
+
+ /* 2nd cacheline updated by the aggregator */
+
+ /* Delta detection against the sampling buckets */
+ u32 times_prev[NR_PSI_AGGREGATORS][NR_PSI_STATES]
+ ____cacheline_aligned_in_smp;
+};
+
+/* PSI growth tracking window */
+struct psi_window {
+ /* Window size in ns */
+ u64 size;
+
+ /* Start time of the current window in ns */
+ u64 start_time;
+
+ /* Value at the start of the window */
+ u64 start_value;
+
+ /* Value growth in the previous window */
+ u64 prev_growth;
+};
+
+struct psi_trigger {
+ /* PSI state being monitored by the trigger */
+ enum psi_states state;
+
+ /* User-spacified threshold in ns */
+ u64 threshold;
+
+ /* List node inside triggers list */
+ struct list_head node;
+
+ /* Backpointer needed during trigger destruction */
+ struct psi_group *group;
+
+ /* Wait queue for polling */
+ wait_queue_head_t event_wait;
+
+ /* Pending event flag */
+ int event;
+
+ /* Tracking window */
+ struct psi_window win;
+
+ /*
+ * Time last event was generated. Used for rate-limiting
+ * events to one per window
+ */
+ u64 last_event_time;
+
+ /* Deferred event(s) from previous ratelimit window */
+ bool pending_event;
+
+ /* Trigger type - PSI_AVGS for unprivileged, PSI_POLL for RT */
+ enum psi_aggregators aggregator;
+};
+
+struct psi_group {
+ struct psi_group *parent;
+ bool enabled;
+
+ /* Protects data used by the aggregator */
+ struct mutex avgs_lock;
+
+ /* Per-cpu task state & time tracking */
+ struct psi_group_cpu __percpu *pcpu;
+
+ /* Running pressure averages */
+ u64 avg_total[NR_PSI_STATES - 1];
+ u64 avg_last_update;
+ u64 avg_next_update;
+
+ /* Aggregator work control */
+ struct delayed_work avgs_work;
+
+ /* Unprivileged triggers against N*PSI_FREQ windows */
+ struct list_head avg_triggers;
+ u32 avg_nr_triggers[NR_PSI_STATES - 1];
+
+ /* Total stall times and sampled pressure averages */
+ u64 total[NR_PSI_AGGREGATORS][NR_PSI_STATES - 1];
+ unsigned long avg[NR_PSI_STATES - 1][3];
+
+ /* Monitor RT polling work control */
+ struct task_struct __rcu *rtpoll_task;
+ struct timer_list rtpoll_timer;
+ wait_queue_head_t rtpoll_wait;
+ atomic_t rtpoll_wakeup;
+ atomic_t rtpoll_scheduled;
+
+ /* Protects data used by the monitor */
+ struct mutex rtpoll_trigger_lock;
+
+ /* Configured RT polling triggers */
+ struct list_head rtpoll_triggers;
+ u32 rtpoll_nr_triggers[NR_PSI_STATES - 1];
+ u32 rtpoll_states;
+ u64 rtpoll_min_period;
+
+ /* Total stall times at the start of RT polling monitor activation */
+ u64 rtpoll_total[NR_PSI_STATES - 1];
+ u64 rtpoll_next_update;
+ u64 rtpoll_until;
+};
+
+#else /* CONFIG_PSI */
+
+#define NR_PSI_RESOURCES 0
+
+struct psi_group { };
+
+#endif /* CONFIG_PSI */
+
+#endif /* _LINUX_PSI_TYPES_H */
diff --git a/include/linux/psp-platform-access.h b/include/linux/psp-platform-access.h
new file mode 100644
index 000000000000..75da8f5f7ad8
--- /dev/null
+++ b/include/linux/psp-platform-access.h
@@ -0,0 +1,65 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef __PSP_PLATFORM_ACCESS_H
+#define __PSP_PLATFORM_ACCESS_H
+
+#include <linux/psp.h>
+
+enum psp_platform_access_msg {
+ PSP_CMD_NONE = 0x0,
+ PSP_I2C_REQ_BUS_CMD = 0x64,
+};
+
+struct psp_req_buffer_hdr {
+ u32 payload_size;
+ u32 status;
+} __packed;
+
+struct psp_request {
+ struct psp_req_buffer_hdr header;
+ void *buf;
+} __packed;
+
+/**
+ * psp_send_platform_access_msg() - Send a message to control platform features
+ *
+ * This function is intended to be used by drivers outside of ccp to communicate
+ * with the platform.
+ *
+ * Returns:
+ * 0: success
+ * -%EBUSY: mailbox in recovery or in use
+ * -%ENODEV: driver not bound with PSP device
+ * -%ETIMEDOUT: request timed out
+ * -%EIO: unknown error (see kernel log)
+ */
+int psp_send_platform_access_msg(enum psp_platform_access_msg, struct psp_request *req);
+
+/**
+ * psp_ring_platform_doorbell() - Ring platform doorbell
+ *
+ * This function is intended to be used by drivers outside of ccp to ring the
+ * platform doorbell with a message.
+ *
+ * Returns:
+ * 0: success
+ * -%EBUSY: mailbox in recovery or in use
+ * -%ENODEV: driver not bound with PSP device
+ * -%ETIMEDOUT: request timed out
+ * -%EIO: error will be stored in result argument
+ */
+int psp_ring_platform_doorbell(int msg, u32 *result);
+
+/**
+ * psp_check_platform_access_status() - Checks whether platform features is ready
+ *
+ * This function is intended to be used by drivers outside of ccp to determine
+ * if platform features has initialized.
+ *
+ * Returns:
+ * 0 platform features is ready
+ * -%ENODEV platform features is not ready or present
+ */
+int psp_check_platform_access_status(void);
+
+#endif /* __PSP_PLATFORM_ACCESS_H */
diff --git a/include/linux/psp-sev.h b/include/linux/psp-sev.h
new file mode 100644
index 000000000000..7fd17e82bab4
--- /dev/null
+++ b/include/linux/psp-sev.h
@@ -0,0 +1,667 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * AMD Secure Encrypted Virtualization (SEV) driver interface
+ *
+ * Copyright (C) 2016-2017 Advanced Micro Devices, Inc.
+ *
+ * Author: Brijesh Singh <brijesh.singh@amd.com>
+ *
+ * SEV API spec is available at https://developer.amd.com/sev
+ */
+
+#ifndef __PSP_SEV_H__
+#define __PSP_SEV_H__
+
+#include <uapi/linux/psp-sev.h>
+
+#define SEV_FW_BLOB_MAX_SIZE 0x4000 /* 16KB */
+
+/**
+ * SEV platform state
+ */
+enum sev_state {
+ SEV_STATE_UNINIT = 0x0,
+ SEV_STATE_INIT = 0x1,
+ SEV_STATE_WORKING = 0x2,
+
+ SEV_STATE_MAX
+};
+
+/**
+ * SEV platform and guest management commands
+ */
+enum sev_cmd {
+ /* platform commands */
+ SEV_CMD_INIT = 0x001,
+ SEV_CMD_SHUTDOWN = 0x002,
+ SEV_CMD_FACTORY_RESET = 0x003,
+ SEV_CMD_PLATFORM_STATUS = 0x004,
+ SEV_CMD_PEK_GEN = 0x005,
+ SEV_CMD_PEK_CSR = 0x006,
+ SEV_CMD_PEK_CERT_IMPORT = 0x007,
+ SEV_CMD_PDH_CERT_EXPORT = 0x008,
+ SEV_CMD_PDH_GEN = 0x009,
+ SEV_CMD_DF_FLUSH = 0x00A,
+ SEV_CMD_DOWNLOAD_FIRMWARE = 0x00B,
+ SEV_CMD_GET_ID = 0x00C,
+ SEV_CMD_INIT_EX = 0x00D,
+
+ /* Guest commands */
+ SEV_CMD_DECOMMISSION = 0x020,
+ SEV_CMD_ACTIVATE = 0x021,
+ SEV_CMD_DEACTIVATE = 0x022,
+ SEV_CMD_GUEST_STATUS = 0x023,
+
+ /* Guest launch commands */
+ SEV_CMD_LAUNCH_START = 0x030,
+ SEV_CMD_LAUNCH_UPDATE_DATA = 0x031,
+ SEV_CMD_LAUNCH_UPDATE_VMSA = 0x032,
+ SEV_CMD_LAUNCH_MEASURE = 0x033,
+ SEV_CMD_LAUNCH_UPDATE_SECRET = 0x034,
+ SEV_CMD_LAUNCH_FINISH = 0x035,
+ SEV_CMD_ATTESTATION_REPORT = 0x036,
+
+ /* Guest migration commands (outgoing) */
+ SEV_CMD_SEND_START = 0x040,
+ SEV_CMD_SEND_UPDATE_DATA = 0x041,
+ SEV_CMD_SEND_UPDATE_VMSA = 0x042,
+ SEV_CMD_SEND_FINISH = 0x043,
+ SEV_CMD_SEND_CANCEL = 0x044,
+
+ /* Guest migration commands (incoming) */
+ SEV_CMD_RECEIVE_START = 0x050,
+ SEV_CMD_RECEIVE_UPDATE_DATA = 0x051,
+ SEV_CMD_RECEIVE_UPDATE_VMSA = 0x052,
+ SEV_CMD_RECEIVE_FINISH = 0x053,
+
+ /* Guest debug commands */
+ SEV_CMD_DBG_DECRYPT = 0x060,
+ SEV_CMD_DBG_ENCRYPT = 0x061,
+
+ SEV_CMD_MAX,
+};
+
+/**
+ * struct sev_data_init - INIT command parameters
+ *
+ * @flags: processing flags
+ * @tmr_address: system physical address used for SEV-ES
+ * @tmr_len: len of tmr_address
+ */
+struct sev_data_init {
+ u32 flags; /* In */
+ u32 reserved; /* In */
+ u64 tmr_address; /* In */
+ u32 tmr_len; /* In */
+} __packed;
+
+/**
+ * struct sev_data_init_ex - INIT_EX command parameters
+ *
+ * @length: len of the command buffer read by the PSP
+ * @flags: processing flags
+ * @tmr_address: system physical address used for SEV-ES
+ * @tmr_len: len of tmr_address
+ * @nv_address: system physical address used for PSP NV storage
+ * @nv_len: len of nv_address
+ */
+struct sev_data_init_ex {
+ u32 length; /* In */
+ u32 flags; /* In */
+ u64 tmr_address; /* In */
+ u32 tmr_len; /* In */
+ u32 reserved; /* In */
+ u64 nv_address; /* In/Out */
+ u32 nv_len; /* In */
+} __packed;
+
+#define SEV_INIT_FLAGS_SEV_ES 0x01
+
+/**
+ * struct sev_data_pek_csr - PEK_CSR command parameters
+ *
+ * @address: PEK certificate chain
+ * @len: len of certificate
+ */
+struct sev_data_pek_csr {
+ u64 address; /* In */
+ u32 len; /* In/Out */
+} __packed;
+
+/**
+ * struct sev_data_cert_import - PEK_CERT_IMPORT command parameters
+ *
+ * @pek_address: PEK certificate chain
+ * @pek_len: len of PEK certificate
+ * @oca_address: OCA certificate chain
+ * @oca_len: len of OCA certificate
+ */
+struct sev_data_pek_cert_import {
+ u64 pek_cert_address; /* In */
+ u32 pek_cert_len; /* In */
+ u32 reserved; /* In */
+ u64 oca_cert_address; /* In */
+ u32 oca_cert_len; /* In */
+} __packed;
+
+/**
+ * struct sev_data_download_firmware - DOWNLOAD_FIRMWARE command parameters
+ *
+ * @address: physical address of firmware image
+ * @len: len of the firmware image
+ */
+struct sev_data_download_firmware {
+ u64 address; /* In */
+ u32 len; /* In */
+} __packed;
+
+/**
+ * struct sev_data_get_id - GET_ID command parameters
+ *
+ * @address: physical address of region to place unique CPU ID(s)
+ * @len: len of the region
+ */
+struct sev_data_get_id {
+ u64 address; /* In */
+ u32 len; /* In/Out */
+} __packed;
+/**
+ * struct sev_data_pdh_cert_export - PDH_CERT_EXPORT command parameters
+ *
+ * @pdh_address: PDH certificate address
+ * @pdh_len: len of PDH certificate
+ * @cert_chain_address: PDH certificate chain
+ * @cert_chain_len: len of PDH certificate chain
+ */
+struct sev_data_pdh_cert_export {
+ u64 pdh_cert_address; /* In */
+ u32 pdh_cert_len; /* In/Out */
+ u32 reserved; /* In */
+ u64 cert_chain_address; /* In */
+ u32 cert_chain_len; /* In/Out */
+} __packed;
+
+/**
+ * struct sev_data_decommission - DECOMMISSION command parameters
+ *
+ * @handle: handle of the VM to decommission
+ */
+struct sev_data_decommission {
+ u32 handle; /* In */
+} __packed;
+
+/**
+ * struct sev_data_activate - ACTIVATE command parameters
+ *
+ * @handle: handle of the VM to activate
+ * @asid: asid assigned to the VM
+ */
+struct sev_data_activate {
+ u32 handle; /* In */
+ u32 asid; /* In */
+} __packed;
+
+/**
+ * struct sev_data_deactivate - DEACTIVATE command parameters
+ *
+ * @handle: handle of the VM to deactivate
+ */
+struct sev_data_deactivate {
+ u32 handle; /* In */
+} __packed;
+
+/**
+ * struct sev_data_guest_status - SEV GUEST_STATUS command parameters
+ *
+ * @handle: handle of the VM to retrieve status
+ * @policy: policy information for the VM
+ * @asid: current ASID of the VM
+ * @state: current state of the VM
+ */
+struct sev_data_guest_status {
+ u32 handle; /* In */
+ u32 policy; /* Out */
+ u32 asid; /* Out */
+ u8 state; /* Out */
+} __packed;
+
+/**
+ * struct sev_data_launch_start - LAUNCH_START command parameters
+ *
+ * @handle: handle assigned to the VM
+ * @policy: guest launch policy
+ * @dh_cert_address: physical address of DH certificate blob
+ * @dh_cert_len: len of DH certificate blob
+ * @session_address: physical address of session parameters
+ * @session_len: len of session parameters
+ */
+struct sev_data_launch_start {
+ u32 handle; /* In/Out */
+ u32 policy; /* In */
+ u64 dh_cert_address; /* In */
+ u32 dh_cert_len; /* In */
+ u32 reserved; /* In */
+ u64 session_address; /* In */
+ u32 session_len; /* In */
+} __packed;
+
+/**
+ * struct sev_data_launch_update_data - LAUNCH_UPDATE_DATA command parameter
+ *
+ * @handle: handle of the VM to update
+ * @len: len of memory to be encrypted
+ * @address: physical address of memory region to encrypt
+ */
+struct sev_data_launch_update_data {
+ u32 handle; /* In */
+ u32 reserved;
+ u64 address; /* In */
+ u32 len; /* In */
+} __packed;
+
+/**
+ * struct sev_data_launch_update_vmsa - LAUNCH_UPDATE_VMSA command
+ *
+ * @handle: handle of the VM
+ * @address: physical address of memory region to encrypt
+ * @len: len of memory region to encrypt
+ */
+struct sev_data_launch_update_vmsa {
+ u32 handle; /* In */
+ u32 reserved;
+ u64 address; /* In */
+ u32 len; /* In */
+} __packed;
+
+/**
+ * struct sev_data_launch_measure - LAUNCH_MEASURE command parameters
+ *
+ * @handle: handle of the VM to process
+ * @address: physical address containing the measurement blob
+ * @len: len of measurement blob
+ */
+struct sev_data_launch_measure {
+ u32 handle; /* In */
+ u32 reserved;
+ u64 address; /* In */
+ u32 len; /* In/Out */
+} __packed;
+
+/**
+ * struct sev_data_launch_secret - LAUNCH_SECRET command parameters
+ *
+ * @handle: handle of the VM to process
+ * @hdr_address: physical address containing the packet header
+ * @hdr_len: len of packet header
+ * @guest_address: system physical address of guest memory region
+ * @guest_len: len of guest_paddr
+ * @trans_address: physical address of transport memory buffer
+ * @trans_len: len of transport memory buffer
+ */
+struct sev_data_launch_secret {
+ u32 handle; /* In */
+ u32 reserved1;
+ u64 hdr_address; /* In */
+ u32 hdr_len; /* In */
+ u32 reserved2;
+ u64 guest_address; /* In */
+ u32 guest_len; /* In */
+ u32 reserved3;
+ u64 trans_address; /* In */
+ u32 trans_len; /* In */
+} __packed;
+
+/**
+ * struct sev_data_launch_finish - LAUNCH_FINISH command parameters
+ *
+ * @handle: handle of the VM to process
+ */
+struct sev_data_launch_finish {
+ u32 handle; /* In */
+} __packed;
+
+/**
+ * struct sev_data_send_start - SEND_START command parameters
+ *
+ * @handle: handle of the VM to process
+ * @policy: policy information for the VM
+ * @pdh_cert_address: physical address containing PDH certificate
+ * @pdh_cert_len: len of PDH certificate
+ * @plat_certs_address: physical address containing platform certificate
+ * @plat_certs_len: len of platform certificate
+ * @amd_certs_address: physical address containing AMD certificate
+ * @amd_certs_len: len of AMD certificate
+ * @session_address: physical address containing Session data
+ * @session_len: len of session data
+ */
+struct sev_data_send_start {
+ u32 handle; /* In */
+ u32 policy; /* Out */
+ u64 pdh_cert_address; /* In */
+ u32 pdh_cert_len; /* In */
+ u32 reserved1;
+ u64 plat_certs_address; /* In */
+ u32 plat_certs_len; /* In */
+ u32 reserved2;
+ u64 amd_certs_address; /* In */
+ u32 amd_certs_len; /* In */
+ u32 reserved3;
+ u64 session_address; /* In */
+ u32 session_len; /* In/Out */
+} __packed;
+
+/**
+ * struct sev_data_send_update - SEND_UPDATE_DATA command
+ *
+ * @handle: handle of the VM to process
+ * @hdr_address: physical address containing packet header
+ * @hdr_len: len of packet header
+ * @guest_address: physical address of guest memory region to send
+ * @guest_len: len of guest memory region to send
+ * @trans_address: physical address of host memory region
+ * @trans_len: len of host memory region
+ */
+struct sev_data_send_update_data {
+ u32 handle; /* In */
+ u32 reserved1;
+ u64 hdr_address; /* In */
+ u32 hdr_len; /* In/Out */
+ u32 reserved2;
+ u64 guest_address; /* In */
+ u32 guest_len; /* In */
+ u32 reserved3;
+ u64 trans_address; /* In */
+ u32 trans_len; /* In */
+} __packed;
+
+/**
+ * struct sev_data_send_update - SEND_UPDATE_VMSA command
+ *
+ * @handle: handle of the VM to process
+ * @hdr_address: physical address containing packet header
+ * @hdr_len: len of packet header
+ * @guest_address: physical address of guest memory region to send
+ * @guest_len: len of guest memory region to send
+ * @trans_address: physical address of host memory region
+ * @trans_len: len of host memory region
+ */
+struct sev_data_send_update_vmsa {
+ u32 handle; /* In */
+ u64 hdr_address; /* In */
+ u32 hdr_len; /* In/Out */
+ u32 reserved2;
+ u64 guest_address; /* In */
+ u32 guest_len; /* In */
+ u32 reserved3;
+ u64 trans_address; /* In */
+ u32 trans_len; /* In */
+} __packed;
+
+/**
+ * struct sev_data_send_finish - SEND_FINISH command parameters
+ *
+ * @handle: handle of the VM to process
+ */
+struct sev_data_send_finish {
+ u32 handle; /* In */
+} __packed;
+
+/**
+ * struct sev_data_send_cancel - SEND_CANCEL command parameters
+ *
+ * @handle: handle of the VM to process
+ */
+struct sev_data_send_cancel {
+ u32 handle; /* In */
+} __packed;
+
+/**
+ * struct sev_data_receive_start - RECEIVE_START command parameters
+ *
+ * @handle: handle of the VM to perform receive operation
+ * @pdh_cert_address: system physical address containing PDH certificate blob
+ * @pdh_cert_len: len of PDH certificate blob
+ * @session_address: system physical address containing session blob
+ * @session_len: len of session blob
+ */
+struct sev_data_receive_start {
+ u32 handle; /* In/Out */
+ u32 policy; /* In */
+ u64 pdh_cert_address; /* In */
+ u32 pdh_cert_len; /* In */
+ u32 reserved1;
+ u64 session_address; /* In */
+ u32 session_len; /* In */
+} __packed;
+
+/**
+ * struct sev_data_receive_update_data - RECEIVE_UPDATE_DATA command parameters
+ *
+ * @handle: handle of the VM to update
+ * @hdr_address: physical address containing packet header blob
+ * @hdr_len: len of packet header
+ * @guest_address: system physical address of guest memory region
+ * @guest_len: len of guest memory region
+ * @trans_address: system physical address of transport buffer
+ * @trans_len: len of transport buffer
+ */
+struct sev_data_receive_update_data {
+ u32 handle; /* In */
+ u32 reserved1;
+ u64 hdr_address; /* In */
+ u32 hdr_len; /* In */
+ u32 reserved2;
+ u64 guest_address; /* In */
+ u32 guest_len; /* In */
+ u32 reserved3;
+ u64 trans_address; /* In */
+ u32 trans_len; /* In */
+} __packed;
+
+/**
+ * struct sev_data_receive_update_vmsa - RECEIVE_UPDATE_VMSA command parameters
+ *
+ * @handle: handle of the VM to update
+ * @hdr_address: physical address containing packet header blob
+ * @hdr_len: len of packet header
+ * @guest_address: system physical address of guest memory region
+ * @guest_len: len of guest memory region
+ * @trans_address: system physical address of transport buffer
+ * @trans_len: len of transport buffer
+ */
+struct sev_data_receive_update_vmsa {
+ u32 handle; /* In */
+ u32 reserved1;
+ u64 hdr_address; /* In */
+ u32 hdr_len; /* In */
+ u32 reserved2;
+ u64 guest_address; /* In */
+ u32 guest_len; /* In */
+ u32 reserved3;
+ u64 trans_address; /* In */
+ u32 trans_len; /* In */
+} __packed;
+
+/**
+ * struct sev_data_receive_finish - RECEIVE_FINISH command parameters
+ *
+ * @handle: handle of the VM to finish
+ */
+struct sev_data_receive_finish {
+ u32 handle; /* In */
+} __packed;
+
+/**
+ * struct sev_data_dbg - DBG_ENCRYPT/DBG_DECRYPT command parameters
+ *
+ * @handle: handle of the VM to perform debug operation
+ * @src_addr: source address of data to operate on
+ * @dst_addr: destination address of data to operate on
+ * @len: len of data to operate on
+ */
+struct sev_data_dbg {
+ u32 handle; /* In */
+ u32 reserved;
+ u64 src_addr; /* In */
+ u64 dst_addr; /* In */
+ u32 len; /* In */
+} __packed;
+
+/**
+ * struct sev_data_attestation_report - SEV_ATTESTATION_REPORT command parameters
+ *
+ * @handle: handle of the VM
+ * @mnonce: a random nonce that will be included in the report.
+ * @address: physical address where the report will be copied.
+ * @len: length of the physical buffer.
+ */
+struct sev_data_attestation_report {
+ u32 handle; /* In */
+ u32 reserved;
+ u64 address; /* In */
+ u8 mnonce[16]; /* In */
+ u32 len; /* In/Out */
+} __packed;
+
+#ifdef CONFIG_CRYPTO_DEV_SP_PSP
+
+/**
+ * sev_platform_init - perform SEV INIT command
+ *
+ * @error: SEV command return code
+ *
+ * Returns:
+ * 0 if the SEV successfully processed the command
+ * -%ENODEV if the SEV device is not available
+ * -%ENOTSUPP if the SEV does not support SEV
+ * -%ETIMEDOUT if the SEV command timed out
+ * -%EIO if the SEV returned a non-zero return code
+ */
+int sev_platform_init(int *error);
+
+/**
+ * sev_platform_status - perform SEV PLATFORM_STATUS command
+ *
+ * @status: sev_user_data_status structure to be processed
+ * @error: SEV command return code
+ *
+ * Returns:
+ * 0 if the SEV successfully processed the command
+ * -%ENODEV if the SEV device is not available
+ * -%ENOTSUPP if the SEV does not support SEV
+ * -%ETIMEDOUT if the SEV command timed out
+ * -%EIO if the SEV returned a non-zero return code
+ */
+int sev_platform_status(struct sev_user_data_status *status, int *error);
+
+/**
+ * sev_issue_cmd_external_user - issue SEV command by other driver with a file
+ * handle.
+ *
+ * This function can be used by other drivers to issue a SEV command on
+ * behalf of userspace. The caller must pass a valid SEV file descriptor
+ * so that we know that it has access to SEV device.
+ *
+ * @filep - SEV device file pointer
+ * @cmd - command to issue
+ * @data - command buffer
+ * @error: SEV command return code
+ *
+ * Returns:
+ * 0 if the SEV successfully processed the command
+ * -%ENODEV if the SEV device is not available
+ * -%ENOTSUPP if the SEV does not support SEV
+ * -%ETIMEDOUT if the SEV command timed out
+ * -%EIO if the SEV returned a non-zero return code
+ * -%EINVAL if the SEV file descriptor is not valid
+ */
+int sev_issue_cmd_external_user(struct file *filep, unsigned int id,
+ void *data, int *error);
+
+/**
+ * sev_guest_deactivate - perform SEV DEACTIVATE command
+ *
+ * @deactivate: sev_data_deactivate structure to be processed
+ * @sev_ret: sev command return code
+ *
+ * Returns:
+ * 0 if the sev successfully processed the command
+ * -%ENODEV if the sev device is not available
+ * -%ENOTSUPP if the sev does not support SEV
+ * -%ETIMEDOUT if the sev command timed out
+ * -%EIO if the sev returned a non-zero return code
+ */
+int sev_guest_deactivate(struct sev_data_deactivate *data, int *error);
+
+/**
+ * sev_guest_activate - perform SEV ACTIVATE command
+ *
+ * @activate: sev_data_activate structure to be processed
+ * @sev_ret: sev command return code
+ *
+ * Returns:
+ * 0 if the sev successfully processed the command
+ * -%ENODEV if the sev device is not available
+ * -%ENOTSUPP if the sev does not support SEV
+ * -%ETIMEDOUT if the sev command timed out
+ * -%EIO if the sev returned a non-zero return code
+ */
+int sev_guest_activate(struct sev_data_activate *data, int *error);
+
+/**
+ * sev_guest_df_flush - perform SEV DF_FLUSH command
+ *
+ * @sev_ret: sev command return code
+ *
+ * Returns:
+ * 0 if the sev successfully processed the command
+ * -%ENODEV if the sev device is not available
+ * -%ENOTSUPP if the sev does not support SEV
+ * -%ETIMEDOUT if the sev command timed out
+ * -%EIO if the sev returned a non-zero return code
+ */
+int sev_guest_df_flush(int *error);
+
+/**
+ * sev_guest_decommission - perform SEV DECOMMISSION command
+ *
+ * @decommission: sev_data_decommission structure to be processed
+ * @sev_ret: sev command return code
+ *
+ * Returns:
+ * 0 if the sev successfully processed the command
+ * -%ENODEV if the sev device is not available
+ * -%ENOTSUPP if the sev does not support SEV
+ * -%ETIMEDOUT if the sev command timed out
+ * -%EIO if the sev returned a non-zero return code
+ */
+int sev_guest_decommission(struct sev_data_decommission *data, int *error);
+
+void *psp_copy_user_blob(u64 uaddr, u32 len);
+
+#else /* !CONFIG_CRYPTO_DEV_SP_PSP */
+
+static inline int
+sev_platform_status(struct sev_user_data_status *status, int *error) { return -ENODEV; }
+
+static inline int sev_platform_init(int *error) { return -ENODEV; }
+
+static inline int
+sev_guest_deactivate(struct sev_data_deactivate *data, int *error) { return -ENODEV; }
+
+static inline int
+sev_guest_decommission(struct sev_data_decommission *data, int *error) { return -ENODEV; }
+
+static inline int
+sev_guest_activate(struct sev_data_activate *data, int *error) { return -ENODEV; }
+
+static inline int sev_guest_df_flush(int *error) { return -ENODEV; }
+
+static inline int
+sev_issue_cmd_external_user(struct file *filep, unsigned int id, void *data, int *error) { return -ENODEV; }
+
+static inline void *psp_copy_user_blob(u64 __user uaddr, u32 len) { return ERR_PTR(-EINVAL); }
+
+#endif /* CONFIG_CRYPTO_DEV_SP_PSP */
+
+#endif /* __PSP_SEV_H__ */
diff --git a/include/linux/psp-tee.h b/include/linux/psp-tee.h
new file mode 100644
index 000000000000..cb0c95d6d76b
--- /dev/null
+++ b/include/linux/psp-tee.h
@@ -0,0 +1,91 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * AMD Trusted Execution Environment (TEE) interface
+ *
+ * Author: Rijo Thomas <Rijo-john.Thomas@amd.com>
+ *
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ */
+
+#ifndef __PSP_TEE_H_
+#define __PSP_TEE_H_
+
+#include <linux/types.h>
+#include <linux/errno.h>
+
+/* This file defines the Trusted Execution Environment (TEE) interface commands
+ * and the API exported by AMD Secure Processor driver to communicate with
+ * AMD-TEE Trusted OS.
+ */
+
+/**
+ * enum tee_cmd_id - TEE Interface Command IDs
+ * @TEE_CMD_ID_LOAD_TA: Load Trusted Application (TA) binary into
+ * TEE environment
+ * @TEE_CMD_ID_UNLOAD_TA: Unload TA binary from TEE environment
+ * @TEE_CMD_ID_OPEN_SESSION: Open session with loaded TA
+ * @TEE_CMD_ID_CLOSE_SESSION: Close session with loaded TA
+ * @TEE_CMD_ID_INVOKE_CMD: Invoke a command with loaded TA
+ * @TEE_CMD_ID_MAP_SHARED_MEM: Map shared memory
+ * @TEE_CMD_ID_UNMAP_SHARED_MEM: Unmap shared memory
+ */
+enum tee_cmd_id {
+ TEE_CMD_ID_LOAD_TA = 1,
+ TEE_CMD_ID_UNLOAD_TA,
+ TEE_CMD_ID_OPEN_SESSION,
+ TEE_CMD_ID_CLOSE_SESSION,
+ TEE_CMD_ID_INVOKE_CMD,
+ TEE_CMD_ID_MAP_SHARED_MEM,
+ TEE_CMD_ID_UNMAP_SHARED_MEM,
+};
+
+#ifdef CONFIG_CRYPTO_DEV_SP_PSP
+/**
+ * psp_tee_process_cmd() - Process command in Trusted Execution Environment
+ * @cmd_id: TEE command ID (&enum tee_cmd_id)
+ * @buf: Command buffer for TEE processing. On success, is updated
+ * with the response
+ * @len: Length of command buffer in bytes
+ * @status: On success, holds the TEE command execution status
+ *
+ * This function submits a command to the Trusted OS for processing in the
+ * TEE environment and waits for a response or until the command times out.
+ *
+ * Returns:
+ * 0 if TEE successfully processed the command
+ * -%ENODEV if PSP device not available
+ * -%EINVAL if invalid input
+ * -%ETIMEDOUT if TEE command timed out
+ * -%EBUSY if PSP device is not responsive
+ */
+int psp_tee_process_cmd(enum tee_cmd_id cmd_id, void *buf, size_t len,
+ u32 *status);
+
+/**
+ * psp_check_tee_status() - Checks whether there is a TEE which a driver can
+ * talk to.
+ *
+ * This function can be used by AMD-TEE driver to query if there is TEE with
+ * which it can communicate.
+ *
+ * Returns:
+ * 0 if the device has TEE
+ * -%ENODEV if there is no TEE available
+ */
+int psp_check_tee_status(void);
+
+#else /* !CONFIG_CRYPTO_DEV_SP_PSP */
+
+static inline int psp_tee_process_cmd(enum tee_cmd_id cmd_id, void *buf,
+ size_t len, u32 *status)
+{
+ return -ENODEV;
+}
+
+static inline int psp_check_tee_status(void)
+{
+ return -ENODEV;
+}
+#endif /* CONFIG_CRYPTO_DEV_SP_PSP */
+#endif /* __PSP_TEE_H_ */
diff --git a/include/linux/psp.h b/include/linux/psp.h
new file mode 100644
index 000000000000..92e60aeef21e
--- /dev/null
+++ b/include/linux/psp.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef __PSP_H
+#define __PSP_H
+
+#ifdef CONFIG_X86
+#include <linux/mem_encrypt.h>
+
+#define __psp_pa(x) __sme_pa(x)
+#else
+#define __psp_pa(x) __pa(x)
+#endif
+
+/*
+ * Fields and bits used by most PSP mailboxes
+ *
+ * Note: Some mailboxes (such as SEV) have extra bits or different meanings
+ * and should include an appropriate local definition in their source file.
+ */
+#define PSP_CMDRESP_STS GENMASK(15, 0)
+#define PSP_CMDRESP_CMD GENMASK(23, 16)
+#define PSP_CMDRESP_RESERVED GENMASK(29, 24)
+#define PSP_CMDRESP_RECOVERY BIT(30)
+#define PSP_CMDRESP_RESP BIT(31)
+
+#define PSP_DRBL_MSG PSP_CMDRESP_CMD
+#define PSP_DRBL_RING BIT(0)
+
+#endif /* __PSP_H */
diff --git a/include/linux/pstore.h b/include/linux/pstore.h
index 61f806a7fe29..638507a3c8ff 100644
--- a/include/linux/pstore.h
+++ b/include/linux/pstore.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Persistent Storage - pstore.h
*
@@ -5,19 +6,6 @@
*
* This code is the generic layer to export data records from platform
* level persistent storage via a file system.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#ifndef _LINUX_PSTORE_H
#define _LINUX_PSTORE_H
@@ -32,21 +20,32 @@
struct module;
-/* pstore record types (see fs/pstore/inode.c for filename templates) */
+/*
+ * pstore record types (see fs/pstore/platform.c for pstore_type_names[])
+ * These values may be written to storage (see EFI vars backend), so
+ * they are kind of an ABI. Be careful changing the mappings.
+ */
enum pstore_type_id {
+ /* Frontend storage types */
PSTORE_TYPE_DMESG = 0,
PSTORE_TYPE_MCE = 1,
PSTORE_TYPE_CONSOLE = 2,
PSTORE_TYPE_FTRACE = 3,
- /* PPC64 partition types */
+
+ /* PPC64-specific partition types */
PSTORE_TYPE_PPC_RTAS = 4,
PSTORE_TYPE_PPC_OF = 5,
PSTORE_TYPE_PPC_COMMON = 6,
PSTORE_TYPE_PMSG = 7,
PSTORE_TYPE_PPC_OPAL = 8,
- PSTORE_TYPE_UNKNOWN = 255
+
+ /* End of the list */
+ PSTORE_TYPE_MAX
};
+const char *pstore_type_to_name(enum pstore_type_id type);
+enum pstore_type_id pstore_name_to_type(const char *name);
+
struct pstore_info;
/**
* struct pstore_record - details of a pstore record entry
@@ -58,6 +57,9 @@ struct pstore_info;
* @size: size of @buf
* @ecc_notice_size:
* ECC information for @buf
+ * @priv: pointer for backend specific use, will be
+ * kfree()d by the pstore core if non-NULL
+ * when the record is freed.
*
* Valid for PSTORE_TYPE_DMESG @type:
*
@@ -71,10 +73,11 @@ struct pstore_record {
struct pstore_info *psi;
enum pstore_type_id type;
u64 id;
- struct timespec time;
+ struct timespec64 time;
char *buf;
ssize_t size;
ssize_t ecc_notice_size;
+ void *priv;
int count;
enum kmsg_dump_reason reason;
@@ -85,15 +88,24 @@ struct pstore_record {
/**
* struct pstore_info - backend pstore driver structure
*
- * @owner: module which is repsonsible for this backend driver
+ * @owner: module which is responsible for this backend driver
* @name: name of the backend driver
*
* @buf_lock: spinlock to serialize access to @buf
* @buf: preallocated crash dump buffer
- * @bufsize: size of @buf available for crash dump writes
+ * @bufsize: size of @buf available for crash dump bytes (must match
+ * smallest number of bytes available for writing to a
+ * backend entry, since compressed bytes don't take kindly
+ * to being truncated)
*
* @read_mutex: serializes @open, @read, @close, and @erase callbacks
* @flags: bitfield of frontends the backend can accept writes for
+ * @max_reason: Used when PSTORE_FLAGS_DMESG is set. Contains the
+ * kmsg_dump_reason enum value. KMSG_DUMP_UNDEF means
+ * "use existing kmsg_dump() filtering, based on the
+ * printk.always_kmsg_dump boot param" (which is either
+ * KMSG_DUMP_OOPS when false, or KMSG_DUMP_MAX when
+ * true); see printk.always_kmsg_dump for more details.
* @data: backend-private pointer passed back during callbacks
*
* Callbacks:
@@ -168,7 +180,7 @@ struct pstore_record {
*/
struct pstore_info {
struct module *owner;
- char *name;
+ const char *name;
spinlock_t buf_lock;
char *buf;
@@ -177,6 +189,7 @@ struct pstore_info {
struct mutex read_mutex;
int flags;
+ int max_reason;
void *data;
int (*open)(struct pstore_info *psi);
@@ -189,14 +202,13 @@ struct pstore_info {
};
/* Supported frontends */
-#define PSTORE_FLAGS_DMESG (1 << 0)
-#define PSTORE_FLAGS_CONSOLE (1 << 1)
-#define PSTORE_FLAGS_FTRACE (1 << 2)
-#define PSTORE_FLAGS_PMSG (1 << 3)
+#define PSTORE_FLAGS_DMESG BIT(0)
+#define PSTORE_FLAGS_CONSOLE BIT(1)
+#define PSTORE_FLAGS_FTRACE BIT(2)
+#define PSTORE_FLAGS_PMSG BIT(3)
extern int pstore_register(struct pstore_info *);
extern void pstore_unregister(struct pstore_info *);
-extern bool pstore_cannot_block_path(enum kmsg_dump_reason reason);
struct pstore_ftrace_record {
unsigned long ip;
diff --git a/include/linux/pstore_blk.h b/include/linux/pstore_blk.h
new file mode 100644
index 000000000000..924ca07aafbd
--- /dev/null
+++ b/include/linux/pstore_blk.h
@@ -0,0 +1,55 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __PSTORE_BLK_H_
+#define __PSTORE_BLK_H_
+
+#include <linux/types.h>
+#include <linux/pstore.h>
+#include <linux/pstore_zone.h>
+
+/**
+ * struct pstore_device_info - back-end pstore/blk driver structure.
+ *
+ * @flags: Refer to macro starting with PSTORE_FLAGS defined in
+ * linux/pstore.h. It means what front-ends this device support.
+ * Zero means all backends for compatible.
+ * @zone: The struct pstore_zone_info details.
+ *
+ */
+struct pstore_device_info {
+ unsigned int flags;
+ struct pstore_zone_info zone;
+};
+
+int register_pstore_device(struct pstore_device_info *dev);
+void unregister_pstore_device(struct pstore_device_info *dev);
+
+/**
+ * struct pstore_blk_config - the pstore_blk backend configuration
+ *
+ * @device: Name of the desired block device
+ * @max_reason: Maximum kmsg dump reason to store to block device
+ * @kmsg_size: Total size of for kmsg dumps
+ * @pmsg_size: Total size of the pmsg storage area
+ * @console_size: Total size of the console storage area
+ * @ftrace_size: Total size for ftrace logging data (for all CPUs)
+ */
+struct pstore_blk_config {
+ char device[80];
+ enum kmsg_dump_reason max_reason;
+ unsigned long kmsg_size;
+ unsigned long pmsg_size;
+ unsigned long console_size;
+ unsigned long ftrace_size;
+};
+
+/**
+ * pstore_blk_get_config - get a copy of the pstore_blk backend configuration
+ *
+ * @info: The sturct pstore_blk_config to be filled in
+ *
+ * Failure returns negative error code, and success returns 0.
+ */
+int pstore_blk_get_config(struct pstore_blk_config *info);
+
+#endif
diff --git a/include/linux/pstore_ram.h b/include/linux/pstore_ram.h
index 9395f06e8372..9d65ff94e216 100644
--- a/include/linux/pstore_ram.h
+++ b/include/linux/pstore_ram.h
@@ -1,85 +1,23 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2010 Marco Stornelli <marco.stornelli@gmail.com>
* Copyright (C) 2011 Kees Cook <keescook@chromium.org>
* Copyright (C) 2011 Google, Inc.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
*/
#ifndef __LINUX_PSTORE_RAM_H__
#define __LINUX_PSTORE_RAM_H__
-#include <linux/compiler.h>
-#include <linux/device.h>
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/list.h>
-#include <linux/types.h>
-
-/*
- * Choose whether access to the RAM zone requires locking or not. If a zone
- * can be written to from different CPUs like with ftrace for example, then
- * PRZ_FLAG_NO_LOCK is used. For all other cases, locking is required.
- */
-#define PRZ_FLAG_NO_LOCK BIT(0)
-
-struct persistent_ram_buffer;
-struct rs_control;
+#include <linux/pstore.h>
struct persistent_ram_ecc_info {
int block_size;
int ecc_size;
int symsize;
int poly;
+ uint16_t *par;
};
-struct persistent_ram_zone {
- phys_addr_t paddr;
- size_t size;
- void *vaddr;
- struct persistent_ram_buffer *buffer;
- size_t buffer_size;
- u32 flags;
- raw_spinlock_t buffer_lock;
-
- /* ECC correction */
- char *par_buffer;
- char *par_header;
- struct rs_control *rs_decoder;
- int corrected_bytes;
- int bad_blocks;
- struct persistent_ram_ecc_info ecc_info;
-
- char *old_log;
- size_t old_log_size;
-};
-
-struct persistent_ram_zone *persistent_ram_new(phys_addr_t start, size_t size,
- u32 sig, struct persistent_ram_ecc_info *ecc_info,
- unsigned int memtype, u32 flags);
-void persistent_ram_free(struct persistent_ram_zone *prz);
-void persistent_ram_zap(struct persistent_ram_zone *prz);
-
-int persistent_ram_write(struct persistent_ram_zone *prz, const void *s,
- unsigned int count);
-int persistent_ram_write_user(struct persistent_ram_zone *prz,
- const void __user *s, unsigned int count);
-
-void persistent_ram_save_old(struct persistent_ram_zone *prz);
-size_t persistent_ram_old_size(struct persistent_ram_zone *prz);
-void *persistent_ram_old(struct persistent_ram_zone *prz);
-void persistent_ram_free_old(struct persistent_ram_zone *prz);
-ssize_t persistent_ram_ecc_string(struct persistent_ram_zone *prz,
- char *str, size_t len);
-
/*
* Ramoops platform data
* @mem_size memory size for ramoops
@@ -96,7 +34,7 @@ struct ramoops_platform_data {
unsigned long console_size;
unsigned long ftrace_size;
unsigned long pmsg_size;
- int dump_oops;
+ int max_reason;
u32 flags;
struct persistent_ram_ecc_info ecc_info;
};
diff --git a/include/linux/pstore_zone.h b/include/linux/pstore_zone.h
new file mode 100644
index 000000000000..1e35eaa33e5e
--- /dev/null
+++ b/include/linux/pstore_zone.h
@@ -0,0 +1,60 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __PSTORE_ZONE_H_
+#define __PSTORE_ZONE_H_
+
+#include <linux/types.h>
+
+typedef ssize_t (*pstore_zone_read_op)(char *, size_t, loff_t);
+typedef ssize_t (*pstore_zone_write_op)(const char *, size_t, loff_t);
+typedef ssize_t (*pstore_zone_erase_op)(size_t, loff_t);
+/**
+ * struct pstore_zone_info - pstore/zone back-end driver structure
+ *
+ * @owner: Module which is responsible for this back-end driver.
+ * @name: Name of the back-end driver.
+ * @total_size: The total size in bytes pstore/zone can use. It must be greater
+ * than 4096 and be multiple of 4096.
+ * @kmsg_size: The size of oops/panic zone. Zero means disabled, otherwise,
+ * it must be multiple of SECTOR_SIZE(512 Bytes).
+ * @max_reason: Maximum kmsg dump reason to store.
+ * @pmsg_size: The size of pmsg zone which is the same as @kmsg_size.
+ * @console_size:The size of console zone which is the same as @kmsg_size.
+ * @ftrace_size:The size of ftrace zone which is the same as @kmsg_size.
+ * @read: The general read operation. Both of the function parameters
+ * @size and @offset are relative value to storage.
+ * On success, the number of bytes should be returned, others
+ * mean error.
+ * @write: The same as @read, but the following error number:
+ * -EBUSY means try to write again later.
+ * -ENOMSG means to try next zone.
+ * @erase: The general erase operation for device with special removing
+ * job. Both of the function parameters @size and @offset are
+ * relative value to storage.
+ * Return 0 on success and others on failure.
+ * @panic_write:The write operation only used for panic case. It's optional
+ * if you do not care panic log. The parameters are relative
+ * value to storage.
+ * On success, the number of bytes should be returned, others
+ * excluding -ENOMSG mean error. -ENOMSG means to try next zone.
+ */
+struct pstore_zone_info {
+ struct module *owner;
+ const char *name;
+
+ unsigned long total_size;
+ unsigned long kmsg_size;
+ int max_reason;
+ unsigned long pmsg_size;
+ unsigned long console_size;
+ unsigned long ftrace_size;
+ pstore_zone_read_op read;
+ pstore_zone_write_op write;
+ pstore_zone_erase_op erase;
+ pstore_zone_write_op panic_write;
+};
+
+extern int register_pstore_zone(struct pstore_zone_info *info);
+extern void unregister_pstore_zone(struct pstore_zone_info *info);
+
+#endif
diff --git a/include/linux/ptdump.h b/include/linux/ptdump.h
new file mode 100644
index 000000000000..2a3a95586425
--- /dev/null
+++ b/include/linux/ptdump.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef _LINUX_PTDUMP_H
+#define _LINUX_PTDUMP_H
+
+#include <linux/mm_types.h>
+
+struct ptdump_range {
+ unsigned long start;
+ unsigned long end;
+};
+
+struct ptdump_state {
+ /* level is 0:PGD to 4:PTE, or -1 if unknown */
+ void (*note_page)(struct ptdump_state *st, unsigned long addr,
+ int level, u64 val);
+ void (*effective_prot)(struct ptdump_state *st, int level, u64 val);
+ const struct ptdump_range *range;
+};
+
+void ptdump_walk_pgd(struct ptdump_state *st, struct mm_struct *mm, pgd_t *pgd);
+
+#endif /* _LINUX_PTDUMP_H */
diff --git a/include/linux/pti.h b/include/linux/pti.h
index b3ea01a3197e..1a941efcaa62 100644
--- a/include/linux/pti.h
+++ b/include/linux/pti.h
@@ -1,43 +1,12 @@
-/*
- * Copyright (C) Intel 2011
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- *
- * The PTI (Parallel Trace Interface) driver directs trace data routed from
- * various parts in the system out through the Intel Penwell PTI port and
- * out of the mobile device for analysis with a debugging tool
- * (Lauterbach, Fido). This is part of a solution for the MIPI P1149.7,
- * compact JTAG, standard.
- *
- * This header file will allow other parts of the OS to use the
- * interface to write out it's contents for debugging a mobile system.
- */
-
-#ifndef PTI_H_
-#define PTI_H_
-
-/* offset for last dword of any PTI message. Part of MIPI P1149.7 */
-#define PTI_LASTDWORD_DTS 0x30
-
-/* basic structure used as a write address to the PTI HW */
-struct pti_masterchannel {
- u8 master;
- u8 channel;
-};
-
-/* the following functions are defined in misc/pti.c */
-void pti_writedata(struct pti_masterchannel *mc, u8 *buf, int count);
-struct pti_masterchannel *pti_request_masterchannel(u8 type,
- const char *thread_name);
-void pti_release_masterchannel(struct pti_masterchannel *mc);
-
-#endif /*PTI_H_*/
+// SPDX-License-Identifier: GPL-2.0
+#ifndef _INCLUDE_PTI_H
+#define _INCLUDE_PTI_H
+
+#ifdef CONFIG_PAGE_TABLE_ISOLATION
+#include <asm/pti.h>
+#else
+static inline void pti_init(void) { }
+static inline void pti_finalize(void) { }
+#endif
+
+#endif
diff --git a/include/linux/ptp_classify.h b/include/linux/ptp_classify.h
index a079656b614c..1b5a953c6bbc 100644
--- a/include/linux/ptp_classify.h
+++ b/include/linux/ptp_classify.h
@@ -1,30 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* PTP 1588 support
*
* This file implements a BPF that recognizes PTP event messages.
*
* Copyright (C) 2010 OMICRON electronics GmbH
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#ifndef _PTP_CLASSIFY_H_
#define _PTP_CLASSIFY_H_
+#include <asm/unaligned.h>
#include <linux/ip.h>
+#include <linux/ktime.h>
#include <linux/skbuff.h>
+#include <linux/udp.h>
+#include <net/checksum.h>
#define PTP_CLASS_NONE 0x00 /* not a PTP event message */
#define PTP_CLASS_V1 0x01 /* protocol version 1 */
@@ -44,12 +35,20 @@
#define PTP_CLASS_V2_VLAN (PTP_CLASS_V2 | PTP_CLASS_VLAN)
#define PTP_CLASS_L4 (PTP_CLASS_IPV4 | PTP_CLASS_IPV6)
+#define PTP_MSGTYPE_SYNC 0x0
+#define PTP_MSGTYPE_DELAY_REQ 0x1
+#define PTP_MSGTYPE_PDELAY_REQ 0x2
+#define PTP_MSGTYPE_PDELAY_RESP 0x3
+
#define PTP_EV_PORT 319
+#define PTP_GEN_PORT 320
#define PTP_GEN_BIT 0x08 /* indicates general message, if set in message type */
#define OFF_PTP_SOURCE_UUID 22 /* PTPv1 only */
#define OFF_PTP_SEQUENCE_ID 30
-#define OFF_PTP_CONTROL 32 /* PTPv1 only */
+
+/* PTP header flag fields */
+#define PTP_FLAG_TWOSTEP BIT(1)
/* Below defines should actually be removed at some point in time. */
#define IP6_HLEN 40
@@ -57,6 +56,30 @@
#define OFF_IHL 14
#define IPV4_HLEN(data) (((struct iphdr *)(data + OFF_IHL))->ihl << 2)
+struct clock_identity {
+ u8 id[8];
+} __packed;
+
+struct port_identity {
+ struct clock_identity clock_identity;
+ __be16 port_number;
+} __packed;
+
+struct ptp_header {
+ u8 tsmt; /* transportSpecific | messageType */
+ u8 ver; /* reserved | versionPTP */
+ __be16 message_length;
+ u8 domain_number;
+ u8 reserved1;
+ u8 flag_field[2];
+ __be64 correction;
+ __be32 reserved2;
+ struct port_identity source_port_identity;
+ __be16 sequence_id;
+ u8 control;
+ u8 log_message_interval;
+} __packed;
+
#if defined(CONFIG_NET_PTP_CLASSIFY)
/**
* ptp_classify_raw - classify a PTP packet
@@ -70,10 +93,151 @@
*/
unsigned int ptp_classify_raw(const struct sk_buff *skb);
+/**
+ * ptp_parse_header - Get pointer to the PTP v2 header
+ * @skb: packet buffer
+ * @type: type of the packet (see ptp_classify_raw())
+ *
+ * This function takes care of the VLAN, UDP, IPv4 and IPv6 headers. The length
+ * is checked.
+ *
+ * Note, internally skb_mac_header() is used. Make sure that the @skb is
+ * initialized accordingly.
+ *
+ * Return: Pointer to the ptp v2 header or NULL if not found
+ */
+struct ptp_header *ptp_parse_header(struct sk_buff *skb, unsigned int type);
+
+/**
+ * ptp_get_msgtype - Extract ptp message type from given header
+ * @hdr: ptp header
+ * @type: type of the packet (see ptp_classify_raw())
+ *
+ * This function returns the message type for a given ptp header. It takes care
+ * of the different ptp header versions (v1 or v2).
+ *
+ * Return: The message type
+ */
+static inline u8 ptp_get_msgtype(const struct ptp_header *hdr,
+ unsigned int type)
+{
+ u8 msgtype;
+
+ if (unlikely(type & PTP_CLASS_V1)) {
+ /* msg type is located at the control field for ptp v1 */
+ msgtype = hdr->control;
+ } else {
+ msgtype = hdr->tsmt & 0x0f;
+ }
+
+ return msgtype;
+}
+
+/**
+ * ptp_check_diff8 - Computes new checksum (when altering a 64-bit field)
+ * @old: old field value
+ * @new: new field value
+ * @oldsum: previous checksum
+ *
+ * This function can be used to calculate a new checksum when only a single
+ * field is changed. Similar as ip_vs_check_diff*() in ip_vs.h.
+ *
+ * Return: Updated checksum
+ */
+static inline __wsum ptp_check_diff8(__be64 old, __be64 new, __wsum oldsum)
+{
+ __be64 diff[2] = { ~old, new };
+
+ return csum_partial(diff, sizeof(diff), oldsum);
+}
+
+/**
+ * ptp_header_update_correction - Update PTP header's correction field
+ * @skb: packet buffer
+ * @type: type of the packet (see ptp_classify_raw())
+ * @hdr: ptp header
+ * @correction: new correction value
+ *
+ * This updates the correction field of a PTP header and updates the UDP
+ * checksum (if UDP is used as transport). It is needed for hardware capable of
+ * one-step P2P that does not already modify the correction field of Pdelay_Req
+ * event messages on ingress.
+ */
+static inline
+void ptp_header_update_correction(struct sk_buff *skb, unsigned int type,
+ struct ptp_header *hdr, s64 correction)
+{
+ __be64 correction_old;
+ struct udphdr *uhdr;
+
+ /* previous correction value is required for checksum update. */
+ memcpy(&correction_old, &hdr->correction, sizeof(correction_old));
+
+ /* write new correction value */
+ put_unaligned_be64((u64)correction, &hdr->correction);
+
+ switch (type & PTP_CLASS_PMASK) {
+ case PTP_CLASS_IPV4:
+ case PTP_CLASS_IPV6:
+ /* locate udp header */
+ uhdr = (struct udphdr *)((char *)hdr - sizeof(struct udphdr));
+ break;
+ default:
+ return;
+ }
+
+ /* update checksum */
+ uhdr->check = csum_fold(ptp_check_diff8(correction_old,
+ hdr->correction,
+ ~csum_unfold(uhdr->check)));
+ if (!uhdr->check)
+ uhdr->check = CSUM_MANGLED_0;
+
+ skb->ip_summed = CHECKSUM_NONE;
+}
+
+/**
+ * ptp_msg_is_sync - Evaluates whether the given skb is a PTP Sync message
+ * @skb: packet buffer
+ * @type: type of the packet (see ptp_classify_raw())
+ *
+ * This function evaluates whether the given skb is a PTP Sync message.
+ *
+ * Return: true if sync message, false otherwise
+ */
+bool ptp_msg_is_sync(struct sk_buff *skb, unsigned int type);
+
void __init ptp_classifier_init(void);
#else
static inline void ptp_classifier_init(void)
{
}
+static inline unsigned int ptp_classify_raw(struct sk_buff *skb)
+{
+ return PTP_CLASS_NONE;
+}
+static inline struct ptp_header *ptp_parse_header(struct sk_buff *skb,
+ unsigned int type)
+{
+ return NULL;
+}
+static inline u8 ptp_get_msgtype(const struct ptp_header *hdr,
+ unsigned int type)
+{
+ /* The return is meaningless. The stub function would not be
+ * executed since no available header from ptp_parse_header.
+ */
+ return PTP_MSGTYPE_SYNC;
+}
+static inline bool ptp_msg_is_sync(struct sk_buff *skb, unsigned int type)
+{
+ return false;
+}
+
+static inline
+void ptp_header_update_correction(struct sk_buff *skb, unsigned int type,
+ struct ptp_header *hdr, s64 correction)
+{
+}
#endif
#endif /* _PTP_CLASSIFY_H_ */
diff --git a/include/linux/ptp_clock_kernel.h b/include/linux/ptp_clock_kernel.h
index a026bfd089db..fdffa6a98d79 100644
--- a/include/linux/ptp_clock_kernel.h
+++ b/include/linux/ptp_clock_kernel.h
@@ -1,21 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* PTP 1588 clock support
*
* Copyright (C) 2010 OMICRON electronics GmbH
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#ifndef _PTP_CLOCK_KERNEL_H_
@@ -24,7 +11,23 @@
#include <linux/device.h>
#include <linux/pps_kernel.h>
#include <linux/ptp_clock.h>
+#include <linux/timecounter.h>
+#include <linux/skbuff.h>
+#define PTP_CLOCK_NAME_LEN 32
+/**
+ * struct ptp_clock_request - request PTP clock event
+ *
+ * @type: The type of the request.
+ * EXTTS: Configure external trigger timestamping
+ * PEROUT: Configure periodic output signal (e.g. PPS)
+ * PPS: trigger internal PPS event for input
+ * into kernel PPS subsystem
+ * @extts: describes configuration for external trigger timestamping.
+ * This is only valid when event == PTP_CLK_REQ_EXTTS.
+ * @perout: describes configuration for periodic output.
+ * This is only valid when event == PTP_CLK_REQ_PEROUT.
+ */
struct ptp_clock_request {
enum {
@@ -39,8 +42,19 @@ struct ptp_clock_request {
};
struct system_device_crosststamp;
+
+/**
+ * struct ptp_system_timestamp - system time corresponding to a PHC timestamp
+ * @pre_ts: system timestamp before capturing PHC
+ * @post_ts: system timestamp after capturing PHC
+ */
+struct ptp_system_timestamp {
+ struct timespec64 pre_ts;
+ struct timespec64 post_ts;
+};
+
/**
- * struct ptp_clock_info - decribes a PTP hardware clock
+ * struct ptp_clock_info - describes a PTP hardware clock
*
* @owner: The clock driver should set to THIS_MODULE.
* @name: A short "friendly name" to identify the clock and to
@@ -63,18 +77,25 @@ struct system_device_crosststamp;
* nominal frequency in parts per million, but with a
* 16 bit binary fractional field.
*
- * @adjfreq: Adjusts the frequency of the hardware clock.
- * This method is deprecated. New drivers should implement
- * the @adjfine method instead.
- * parameter delta: Desired frequency offset from nominal frequency
- * in parts per billion
+ * @adjphase: Adjusts the phase offset of the hardware clock.
+ * parameter delta: Desired change in nanoseconds.
*
* @adjtime: Shifts the time of the hardware clock.
* parameter delta: Desired change in nanoseconds.
*
* @gettime64: Reads the current time from the hardware clock.
+ * This method is deprecated. New drivers should implement
+ * the @gettimex64 method instead.
* parameter ts: Holds the result.
*
+ * @gettimex64: Reads the current time from the hardware clock and optionally
+ * also the system clock.
+ * parameter ts: Holds the PHC timestamp.
+ * parameter sts: If not NULL, it holds a pair of timestamps from
+ * the system clock. The first reading is made right before
+ * reading the lowest bits of the PHC timestamp and the second
+ * reading immediately follows that.
+ *
* @getcrosststamp: Reads the current time from the hardware clock and
* system clock simultaneously.
* parameter cts: Contains timestamp (device,system) pair,
@@ -83,6 +104,32 @@ struct system_device_crosststamp;
* @settime64: Set the current time on the hardware clock.
* parameter ts: Time value to set.
*
+ * @getcycles64: Reads the current free running cycle counter from the hardware
+ * clock.
+ * If @getcycles64 and @getcyclesx64 are not supported, then
+ * @gettime64 or @gettimex64 will be used as default
+ * implementation.
+ * parameter ts: Holds the result.
+ *
+ * @getcyclesx64: Reads the current free running cycle counter from the
+ * hardware clock and optionally also the system clock.
+ * If @getcycles64 and @getcyclesx64 are not supported, then
+ * @gettimex64 will be used as default implementation if
+ * available.
+ * parameter ts: Holds the PHC timestamp.
+ * parameter sts: If not NULL, it holds a pair of timestamps
+ * from the system clock. The first reading is made right before
+ * reading the lowest bits of the PHC timestamp and the second
+ * reading immediately follows that.
+ *
+ * @getcrosscycles: Reads the current free running cycle counter from the
+ * hardware clock and system clock simultaneously.
+ * If @getcycles64 and @getcyclesx64 are not supported, then
+ * @getcrosststamp will be used as default implementation if
+ * available.
+ * parameter cts: Contains timestamp (device,system) pair,
+ * where system time is realtime and monotonic.
+ *
* @enable: Request driver to enable or disable an ancillary feature.
* parameter request: Desired resource to enable or disable.
* parameter on: Caller passes one to enable or zero to disable.
@@ -99,6 +146,11 @@ struct system_device_crosststamp;
* parameter func: the desired function to use.
* parameter chan: the function channel index to use.
*
+ * @do_aux_work: Request driver to perform auxiliary (periodic) operations
+ * Driver should return delay of the next auxiliary work
+ * scheduling time (>=0) or negative value in case further
+ * scheduling is not required.
+ *
* Drivers should embed their ptp_clock_info within a private
* structure, obtaining a reference to it using container_of().
*
@@ -107,7 +159,7 @@ struct system_device_crosststamp;
struct ptp_clock_info {
struct module *owner;
- char name[16];
+ char name[PTP_CLOCK_NAME_LEN];
s32 max_adj;
int n_alarm;
int n_ext_ts;
@@ -116,16 +168,24 @@ struct ptp_clock_info {
int pps;
struct ptp_pin_desc *pin_config;
int (*adjfine)(struct ptp_clock_info *ptp, long scaled_ppm);
- int (*adjfreq)(struct ptp_clock_info *ptp, s32 delta);
+ int (*adjphase)(struct ptp_clock_info *ptp, s32 phase);
int (*adjtime)(struct ptp_clock_info *ptp, s64 delta);
int (*gettime64)(struct ptp_clock_info *ptp, struct timespec64 *ts);
+ int (*gettimex64)(struct ptp_clock_info *ptp, struct timespec64 *ts,
+ struct ptp_system_timestamp *sts);
int (*getcrosststamp)(struct ptp_clock_info *ptp,
struct system_device_crosststamp *cts);
int (*settime64)(struct ptp_clock_info *p, const struct timespec64 *ts);
+ int (*getcycles64)(struct ptp_clock_info *ptp, struct timespec64 *ts);
+ int (*getcyclesx64)(struct ptp_clock_info *ptp, struct timespec64 *ts,
+ struct ptp_system_timestamp *sts);
+ int (*getcrosscycles)(struct ptp_clock_info *ptp,
+ struct system_device_crosststamp *cts);
int (*enable)(struct ptp_clock_info *ptp,
struct ptp_clock_request *request, int on);
int (*verify)(struct ptp_clock_info *ptp, unsigned int pin,
enum ptp_pin_function func, unsigned int chan);
+ long (*do_aux_work)(struct ptp_clock_info *ptp);
};
struct ptp_clock;
@@ -155,7 +215,79 @@ struct ptp_clock_event {
};
};
-#if IS_REACHABLE(CONFIG_PTP_1588_CLOCK)
+/**
+ * scaled_ppm_to_ppb() - convert scaled ppm to ppb
+ *
+ * @ppm: Parts per million, but with a 16 bit binary fractional field
+ */
+static inline long scaled_ppm_to_ppb(long ppm)
+{
+ /*
+ * The 'freq' field in the 'struct timex' is in parts per
+ * million, but with a 16 bit binary fractional field.
+ *
+ * We want to calculate
+ *
+ * ppb = scaled_ppm * 1000 / 2^16
+ *
+ * which simplifies to
+ *
+ * ppb = scaled_ppm * 125 / 2^13
+ */
+ s64 ppb = 1 + ppm;
+
+ ppb *= 125;
+ ppb >>= 13;
+ return (long)ppb;
+}
+
+/**
+ * diff_by_scaled_ppm - Calculate difference using scaled ppm
+ * @base: the base increment value to adjust
+ * @scaled_ppm: scaled parts per million to adjust by
+ * @diff: on return, the absolute value of calculated diff
+ *
+ * Calculate the difference to adjust the base increment using scaled parts
+ * per million.
+ *
+ * Use mul_u64_u64_div_u64 to perform the difference calculation in avoid
+ * possible overflow.
+ *
+ * Returns: true if scaled_ppm is negative, false otherwise
+ */
+static inline bool diff_by_scaled_ppm(u64 base, long scaled_ppm, u64 *diff)
+{
+ bool negative = false;
+
+ if (scaled_ppm < 0) {
+ negative = true;
+ scaled_ppm = -scaled_ppm;
+ }
+
+ *diff = mul_u64_u64_div_u64(base, (u64)scaled_ppm, 1000000ULL << 16);
+
+ return negative;
+}
+
+/**
+ * adjust_by_scaled_ppm - Adjust a base increment by scaled parts per million
+ * @base: the base increment value to adjust
+ * @scaled_ppm: scaled parts per million frequency adjustment
+ *
+ * Helper function which calculates a new increment value based on the
+ * requested scaled parts per million adjustment.
+ */
+static inline u64 adjust_by_scaled_ppm(u64 base, long scaled_ppm)
+{
+ u64 diff;
+
+ if (diff_by_scaled_ppm(base, scaled_ppm, &diff))
+ return base - diff;
+
+ return base + diff;
+}
+
+#if IS_ENABLED(CONFIG_PTP_1588_CLOCK)
/**
* ptp_clock_register() - register a PTP hardware clock driver
@@ -201,6 +333,12 @@ extern int ptp_clock_index(struct ptp_clock *ptp);
/**
* ptp_find_pin() - obtain the pin index of a given auxiliary function
*
+ * The caller must hold ptp_clock::pincfg_mux. Drivers do not have
+ * access to that mutex as ptp_clock is an opaque type. However, the
+ * core code acquires the mutex before invoking the driver's
+ * ptp_clock_info::enable() callback, and so drivers may call this
+ * function from that context.
+ *
* @ptp: The clock obtained from ptp_clock_register().
* @func: One of the ptp_pin_function enumerated values.
* @chan: The particular functional channel to find.
@@ -211,6 +349,41 @@ extern int ptp_clock_index(struct ptp_clock *ptp);
int ptp_find_pin(struct ptp_clock *ptp,
enum ptp_pin_function func, unsigned int chan);
+/**
+ * ptp_find_pin_unlocked() - wrapper for ptp_find_pin()
+ *
+ * This function acquires the ptp_clock::pincfg_mux mutex before
+ * invoking ptp_find_pin(). Instead of using this function, drivers
+ * should most likely call ptp_find_pin() directly from their
+ * ptp_clock_info::enable() method.
+ *
+* @ptp: The clock obtained from ptp_clock_register().
+* @func: One of the ptp_pin_function enumerated values.
+* @chan: The particular functional channel to find.
+* Return: Pin index in the range of zero to ptp_clock_caps.n_pins - 1,
+* or -1 if the auxiliary function cannot be found.
+ */
+
+int ptp_find_pin_unlocked(struct ptp_clock *ptp,
+ enum ptp_pin_function func, unsigned int chan);
+
+/**
+ * ptp_schedule_worker() - schedule ptp auxiliary work
+ *
+ * @ptp: The clock obtained from ptp_clock_register().
+ * @delay: number of jiffies to wait before queuing
+ * See kthread_queue_delayed_work() for more info.
+ */
+
+int ptp_schedule_worker(struct ptp_clock *ptp, unsigned long delay);
+
+/**
+ * ptp_cancel_worker_sync() - cancel ptp auxiliary clock
+ *
+ * @ptp: The clock obtained from ptp_clock_register().
+ */
+void ptp_cancel_worker_sync(struct ptp_clock *ptp);
+
#else
static inline struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info,
struct device *parent)
@@ -225,6 +398,63 @@ static inline int ptp_clock_index(struct ptp_clock *ptp)
static inline int ptp_find_pin(struct ptp_clock *ptp,
enum ptp_pin_function func, unsigned int chan)
{ return -1; }
+static inline int ptp_find_pin_unlocked(struct ptp_clock *ptp,
+ enum ptp_pin_function func,
+ unsigned int chan)
+{ return -1; }
+static inline int ptp_schedule_worker(struct ptp_clock *ptp,
+ unsigned long delay)
+{ return -EOPNOTSUPP; }
+static inline void ptp_cancel_worker_sync(struct ptp_clock *ptp)
+{ }
#endif
+#if IS_BUILTIN(CONFIG_PTP_1588_CLOCK)
+/*
+ * These are called by the network core, and don't work if PTP is in
+ * a loadable module.
+ */
+
+/**
+ * ptp_get_vclocks_index() - get all vclocks index on pclock, and
+ * caller is responsible to free memory
+ * of vclock_index
+ *
+ * @pclock_index: phc index of ptp pclock.
+ * @vclock_index: pointer to pointer of vclock index.
+ *
+ * return number of vclocks.
+ */
+int ptp_get_vclocks_index(int pclock_index, int **vclock_index);
+
+/**
+ * ptp_convert_timestamp() - convert timestamp to a ptp vclock time
+ *
+ * @hwtstamp: timestamp
+ * @vclock_index: phc index of ptp vclock.
+ *
+ * Returns converted timestamp, or 0 on error.
+ */
+ktime_t ptp_convert_timestamp(const ktime_t *hwtstamp, int vclock_index);
+#else
+static inline int ptp_get_vclocks_index(int pclock_index, int **vclock_index)
+{ return 0; }
+static inline ktime_t ptp_convert_timestamp(const ktime_t *hwtstamp,
+ int vclock_index)
+{ return 0; }
+
+#endif
+
+static inline void ptp_read_system_prets(struct ptp_system_timestamp *sts)
+{
+ if (sts)
+ ktime_get_real_ts64(&sts->pre_ts);
+}
+
+static inline void ptp_read_system_postts(struct ptp_system_timestamp *sts)
+{
+ if (sts)
+ ktime_get_real_ts64(&sts->post_ts);
+}
+
#endif
diff --git a/include/linux/ptp_kvm.h b/include/linux/ptp_kvm.h
new file mode 100644
index 000000000000..746fd67c3480
--- /dev/null
+++ b/include/linux/ptp_kvm.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Virtual PTP 1588 clock for use with KVM guests
+ *
+ * Copyright (C) 2017 Red Hat Inc.
+ */
+
+#ifndef _PTP_KVM_H_
+#define _PTP_KVM_H_
+
+#include <linux/types.h>
+
+struct timespec64;
+struct clocksource;
+
+int kvm_arch_ptp_init(void);
+void kvm_arch_ptp_exit(void);
+int kvm_arch_ptp_get_clock(struct timespec64 *ts);
+int kvm_arch_ptp_get_crosststamp(u64 *cycle,
+ struct timespec64 *tspec, struct clocksource **cs);
+
+#endif /* _PTP_KVM_H_ */
diff --git a/include/linux/ptp_pch.h b/include/linux/ptp_pch.h
new file mode 100644
index 000000000000..7ba643b62c15
--- /dev/null
+++ b/include/linux/ptp_pch.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * PTP PCH
+ *
+ * Copyright 2019 Linaro Ltd.
+ *
+ * Author Lee Jones <lee.jones@linaro.org>
+ */
+
+#ifndef _PTP_PCH_H_
+#define _PTP_PCH_H_
+
+#include <linux/types.h>
+
+struct pci_dev;
+
+void pch_ch_control_write(struct pci_dev *pdev, u32 val);
+u32 pch_ch_event_read(struct pci_dev *pdev);
+void pch_ch_event_write(struct pci_dev *pdev, u32 val);
+u32 pch_src_uuid_lo_read(struct pci_dev *pdev);
+u32 pch_src_uuid_hi_read(struct pci_dev *pdev);
+u64 pch_rx_snap_read(struct pci_dev *pdev);
+u64 pch_tx_snap_read(struct pci_dev *pdev);
+int pch_set_station_address(u8 *addr, struct pci_dev *pdev);
+
+#endif /* _PTP_PCH_H_ */
diff --git a/include/linux/ptr_ring.h b/include/linux/ptr_ring.h
index d8c97ec8a8e6..808f9d3ee546 100644
--- a/include/linux/ptr_ring.h
+++ b/include/linux/ptr_ring.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Definitions for the 'struct ptr_ring' datastructure.
*
@@ -6,11 +7,6 @@
*
* Copyright (C) 2016 Red Hat, Inc.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
* This is a limited-size FIFO maintaining pointers in FIFO order, with
* one CPU producing entries and another consuming entries from a FIFO.
*
@@ -26,8 +22,8 @@
#include <linux/cache.h>
#include <linux/types.h>
#include <linux/compiler.h>
-#include <linux/cache.h>
#include <linux/slab.h>
+#include <linux/mm.h>
#include <asm/errno.h>
#endif
@@ -45,9 +41,10 @@ struct ptr_ring {
};
/* Note: callers invoking this in a loop must use a compiler barrier,
- * for example cpu_relax(). If ring is ever resized, callers must hold
- * producer_lock - see e.g. ptr_ring_full. Otherwise, if callers don't hold
- * producer_lock, the next call to __ptr_ring_produce may fail.
+ * for example cpu_relax().
+ *
+ * NB: this is unlike __ptr_ring_empty in that callers must hold producer_lock:
+ * see e.g. ptr_ring_full.
*/
static inline bool __ptr_ring_full(struct ptr_ring *r)
{
@@ -101,13 +98,19 @@ static inline bool ptr_ring_full_bh(struct ptr_ring *r)
/* Note: callers invoking this in a loop must use a compiler barrier,
* for example cpu_relax(). Callers must hold producer_lock.
+ * Callers are responsible for making sure pointer that is being queued
+ * points to a valid data.
*/
static inline int __ptr_ring_produce(struct ptr_ring *r, void *ptr)
{
if (unlikely(!r->size) || r->queue[r->producer])
return -ENOSPC;
- r->queue[r->producer++] = ptr;
+ /* Make sure the pointer we are storing points to a valid data. */
+ /* Pairs with the dependency ordering in __ptr_ring_consume. */
+ smp_wmb();
+
+ WRITE_ONCE(r->queue[r->producer++], ptr);
if (unlikely(r->producer >= r->size))
r->producer = 0;
return 0;
@@ -163,26 +166,36 @@ static inline int ptr_ring_produce_bh(struct ptr_ring *r, void *ptr)
return ret;
}
-/* Note: callers invoking this in a loop must use a compiler barrier,
- * for example cpu_relax(). Callers must take consumer_lock
- * if they dereference the pointer - see e.g. PTR_RING_PEEK_CALL.
- * If ring is never resized, and if the pointer is merely
- * tested, there's no need to take the lock - see e.g. __ptr_ring_empty.
- */
static inline void *__ptr_ring_peek(struct ptr_ring *r)
{
if (likely(r->size))
- return r->queue[r->consumer_head];
+ return READ_ONCE(r->queue[r->consumer_head]);
return NULL;
}
-/* Note: callers invoking this in a loop must use a compiler barrier,
- * for example cpu_relax(). Callers must take consumer_lock
- * if the ring is ever resized - see e.g. ptr_ring_empty.
+/*
+ * Test ring empty status without taking any locks.
+ *
+ * NB: This is only safe to call if ring is never resized.
+ *
+ * However, if some other CPU consumes ring entries at the same time, the value
+ * returned is not guaranteed to be correct.
+ *
+ * In this case - to avoid incorrectly detecting the ring
+ * as empty - the CPU consuming the ring entries is responsible
+ * for either consuming all ring entries until the ring is empty,
+ * or synchronizing with some other CPU and causing it to
+ * re-test __ptr_ring_empty and/or consume the ring enteries
+ * after the synchronization point.
+ *
+ * Note: callers invoking this in a loop must use a compiler barrier,
+ * for example cpu_relax().
*/
static inline bool __ptr_ring_empty(struct ptr_ring *r)
{
- return !__ptr_ring_peek(r);
+ if (likely(r->size))
+ return !r->queue[READ_ONCE(r->consumer_head)];
+ return true;
}
static inline bool ptr_ring_empty(struct ptr_ring *r)
@@ -236,22 +249,28 @@ static inline void __ptr_ring_discard_one(struct ptr_ring *r)
/* Fundamentally, what we want to do is update consumer
* index and zero out the entry so producer can reuse it.
* Doing it naively at each consume would be as simple as:
- * r->queue[r->consumer++] = NULL;
- * if (unlikely(r->consumer >= r->size))
- * r->consumer = 0;
+ * consumer = r->consumer;
+ * r->queue[consumer++] = NULL;
+ * if (unlikely(consumer >= r->size))
+ * consumer = 0;
+ * r->consumer = consumer;
* but that is suboptimal when the ring is full as producer is writing
* out new entries in the same cache line. Defer these updates until a
* batch of entries has been consumed.
*/
- int head = r->consumer_head++;
+ /* Note: we must keep consumer_head valid at all times for __ptr_ring_empty
+ * to work correctly.
+ */
+ int consumer_head = r->consumer_head;
+ int head = consumer_head++;
/* Once we have processed enough entries invalidate them in
* the ring all at once so producer can reuse their space in the ring.
* We also do this when we reach end of the ring - not mandatory
* but helps keep the implementation simple.
*/
- if (unlikely(r->consumer_head - r->consumer_tail >= r->batch ||
- r->consumer_head >= r->size)) {
+ if (unlikely(consumer_head - r->consumer_tail >= r->batch ||
+ consumer_head >= r->size)) {
/* Zero out entries in the reverse order: this way we touch the
* cache line that producer might currently be reading the last;
* producer won't make progress and touch other cache lines
@@ -259,18 +278,24 @@ static inline void __ptr_ring_discard_one(struct ptr_ring *r)
*/
while (likely(head >= r->consumer_tail))
r->queue[head--] = NULL;
- r->consumer_tail = r->consumer_head;
+ r->consumer_tail = consumer_head;
}
- if (unlikely(r->consumer_head >= r->size)) {
- r->consumer_head = 0;
+ if (unlikely(consumer_head >= r->size)) {
+ consumer_head = 0;
r->consumer_tail = 0;
}
+ /* matching READ_ONCE in __ptr_ring_empty for lockless tests */
+ WRITE_ONCE(r->consumer_head, consumer_head);
}
static inline void *__ptr_ring_consume(struct ptr_ring *r)
{
void *ptr;
+ /* The READ_ONCE in __ptr_ring_peek guarantees that anyone
+ * accessing data through the pointer is up to date. Pairs
+ * with smp_wmb in __ptr_ring_produce.
+ */
ptr = __ptr_ring_peek(r);
if (ptr)
__ptr_ring_discard_one(r);
@@ -436,9 +461,14 @@ static inline int ptr_ring_consume_batched_bh(struct ptr_ring *r,
__PTR_RING_PEEK_CALL_v; \
})
-static inline void **__ptr_ring_init_queue_alloc(int size, gfp_t gfp)
+/* Not all gfp_t flags (besides GFP_KERNEL) are allowed. See
+ * documentation for vmalloc for which of them are legal.
+ */
+static inline void **__ptr_ring_init_queue_alloc(unsigned int size, gfp_t gfp)
{
- return kzalloc(ALIGN(size * sizeof(void *), SMP_CACHE_BYTES), gfp);
+ if (size > KMALLOC_MAX_SIZE / sizeof(void *))
+ return NULL;
+ return kvmalloc_array(size, sizeof(void *), gfp | __GFP_ZERO);
}
static inline void __ptr_ring_set_size(struct ptr_ring *r, int size)
@@ -512,7 +542,9 @@ static inline void ptr_ring_unconsume(struct ptr_ring *r, void **batch, int n,
goto done;
}
r->queue[head] = batch[--n];
- r->consumer_tail = r->consumer_head = head;
+ r->consumer_tail = head;
+ /* matching READ_ONCE in __ptr_ring_empty for lockless tests */
+ WRITE_ONCE(r->consumer_head, head);
}
done:
@@ -537,6 +569,8 @@ static inline void **__ptr_ring_swap_queue(struct ptr_ring *r, void **queue,
else if (destroy)
destroy(ptr);
+ if (producer >= size)
+ producer = 0;
__ptr_ring_set_size(r, size);
r->producer = producer;
r->consumer_head = 0;
@@ -571,7 +605,7 @@ static inline int ptr_ring_resize(struct ptr_ring *r, int size, gfp_t gfp,
spin_unlock(&(r)->producer_lock);
spin_unlock_irqrestore(&(r)->consumer_lock, flags);
- kfree(old);
+ kvfree(old);
return 0;
}
@@ -582,7 +616,8 @@ static inline int ptr_ring_resize(struct ptr_ring *r, int size, gfp_t gfp,
* In particular if you consume ring in interrupt or BH context, you must
* disable interrupts/BH when doing so.
*/
-static inline int ptr_ring_resize_multiple(struct ptr_ring **rings, int nrings,
+static inline int ptr_ring_resize_multiple(struct ptr_ring **rings,
+ unsigned int nrings,
int size,
gfp_t gfp, void (*destroy)(void *))
{
@@ -590,7 +625,7 @@ static inline int ptr_ring_resize_multiple(struct ptr_ring **rings, int nrings,
void ***queues;
int i;
- queues = kmalloc(nrings * sizeof *queues, gfp);
+ queues = kmalloc_array(nrings, sizeof(*queues), gfp);
if (!queues)
goto noqueues;
@@ -610,7 +645,7 @@ static inline int ptr_ring_resize_multiple(struct ptr_ring **rings, int nrings,
}
for (i = 0; i < nrings; ++i)
- kfree(queues[i]);
+ kvfree(queues[i]);
kfree(queues);
@@ -618,7 +653,7 @@ static inline int ptr_ring_resize_multiple(struct ptr_ring **rings, int nrings,
nomem:
while (--i >= 0)
- kfree(queues[i]);
+ kvfree(queues[i]);
kfree(queues);
@@ -633,7 +668,7 @@ static inline void ptr_ring_cleanup(struct ptr_ring *r, void (*destroy)(void *))
if (destroy)
while ((ptr = ptr_ring_consume(r)))
destroy(ptr);
- kfree(r->queue);
+ kvfree(r->queue);
}
#endif /* _LINUX_PTR_RING_H */
diff --git a/include/linux/ptrace.h b/include/linux/ptrace.h
index 0e5fcc11b1b8..eaaef3ffec22 100644
--- a/include/linux/ptrace.h
+++ b/include/linux/ptrace.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_PTRACE_H
#define _LINUX_PTRACE_H
@@ -8,6 +9,13 @@
#include <linux/bug.h> /* For BUG_ON. */
#include <linux/pid_namespace.h> /* For task_active_pid_ns. */
#include <uapi/linux/ptrace.h>
+#include <linux/seccomp.h>
+
+/* Add sp to seccomp_data, as seccomp is user API, we don't want to modify it */
+struct syscall_info {
+ __u64 sp;
+ struct seccomp_data data;
+};
extern int ptrace_access_vm(struct task_struct *tsk, unsigned long addr,
void *buf, int len, unsigned int gup_flags);
@@ -22,7 +30,6 @@ extern int ptrace_access_vm(struct task_struct *tsk, unsigned long addr,
#define PT_SEIZED 0x00010000 /* SEIZE used, enable new behavior */
#define PT_PTRACED 0x00000001
-#define PT_DTRACE 0x00000002 /* delayed trace (used on m68k, i386) */
#define PT_OPT_FLAG_SHIFT 3
/* PT_TRACE_* event enable flags */
@@ -39,12 +46,6 @@ extern int ptrace_access_vm(struct task_struct *tsk, unsigned long addr,
#define PT_EXITKILL (PTRACE_O_EXITKILL << PT_OPT_FLAG_SHIFT)
#define PT_SUSPEND_SECCOMP (PTRACE_O_SUSPEND_SECCOMP << PT_OPT_FLAG_SHIFT)
-/* single stepping state bits (used on ARM and PA-RISC) */
-#define PT_SINGLESTEP_BIT 31
-#define PT_SINGLESTEP (1<<PT_SINGLESTEP_BIT)
-#define PT_BLOCKSTEP_BIT 30
-#define PT_BLOCKSTEP (1<<PT_BLOCKSTEP_BIT)
-
extern long arch_ptrace(struct task_struct *child, long request,
unsigned long addr, unsigned long data);
extern int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst, int len);
@@ -52,7 +53,7 @@ extern int ptrace_writedata(struct task_struct *tsk, char __user *src, unsigned
extern void ptrace_disable(struct task_struct *);
extern int ptrace_request(struct task_struct *child, long request,
unsigned long addr, unsigned long data);
-extern void ptrace_notify(int exit_code);
+extern int ptrace_notify(int exit_code, unsigned long message);
extern void __ptrace_link(struct task_struct *child,
struct task_struct *new_parent,
const struct cred *ptracer_cred);
@@ -61,8 +62,8 @@ extern void exit_ptrace(struct task_struct *tracer, struct list_head *dead);
#define PTRACE_MODE_READ 0x01
#define PTRACE_MODE_ATTACH 0x02
#define PTRACE_MODE_NOAUDIT 0x04
-#define PTRACE_MODE_FSCREDS 0x08
-#define PTRACE_MODE_REALCREDS 0x10
+#define PTRACE_MODE_FSCREDS 0x08
+#define PTRACE_MODE_REALCREDS 0x10
/* shorthands for READ/ATTACH and FSCREDS/REALCREDS combinations */
#define PTRACE_MODE_READ_FSCREDS (PTRACE_MODE_READ | PTRACE_MODE_FSCREDS)
@@ -147,8 +148,7 @@ static inline bool ptrace_event_enabled(struct task_struct *task, int event)
static inline void ptrace_event(int event, unsigned long message)
{
if (unlikely(ptrace_event_enabled(current, event))) {
- current->ptrace_message = message;
- ptrace_notify((event << 8) | SIGTRAP);
+ ptrace_notify((event << 8) | SIGTRAP, message);
} else if (event == PTRACE_EVENT_EXEC) {
/* legacy EXEC report via SIGTRAP */
if ((current->ptrace & (PT_PTRACED|PT_SEIZED)) == PT_PTRACED)
@@ -163,7 +163,7 @@ static inline void ptrace_event(int event, unsigned long message)
*
* Check whether @event is enabled and, if so, report @event and @pid
* to the ptrace parent. @pid is reported as the pid_t seen from the
- * the ptrace parent's pid namespace.
+ * ptrace parent's pid namespace.
*
* Called without locks.
*/
@@ -213,8 +213,6 @@ static inline void ptrace_init_task(struct task_struct *child, bool ptrace)
task_set_jobctl_pending(child, JOBCTL_TRAP_STOP);
else
sigaddset(&child->pending.signal, SIGSTOP);
-
- set_tsk_thread_flag(child, TIF_SIGPENDING);
}
else
child->ptracer_cred = NULL;
@@ -337,44 +335,44 @@ static inline void user_enable_block_step(struct task_struct *task)
extern void user_enable_block_step(struct task_struct *);
#endif /* arch_has_block_step */
-#ifdef ARCH_HAS_USER_SINGLE_STEP_INFO
-extern void user_single_step_siginfo(struct task_struct *tsk,
- struct pt_regs *regs, siginfo_t *info);
+#ifdef ARCH_HAS_USER_SINGLE_STEP_REPORT
+extern void user_single_step_report(struct pt_regs *regs);
#else
-static inline void user_single_step_siginfo(struct task_struct *tsk,
- struct pt_regs *regs, siginfo_t *info)
+static inline void user_single_step_report(struct pt_regs *regs)
{
- memset(info, 0, sizeof(*info));
- info->si_signo = SIGTRAP;
+ kernel_siginfo_t info;
+ clear_siginfo(&info);
+ info.si_signo = SIGTRAP;
+ info.si_errno = 0;
+ info.si_code = SI_USER;
+ info.si_pid = 0;
+ info.si_uid = 0;
+ force_sig_info(&info);
}
#endif
#ifndef arch_ptrace_stop_needed
/**
* arch_ptrace_stop_needed - Decide whether arch_ptrace_stop() should be called
- * @code: current->exit_code value ptrace will stop with
- * @info: siginfo_t pointer (or %NULL) for signal ptrace will stop with
*
* This is called with the siglock held, to decide whether or not it's
- * necessary to release the siglock and call arch_ptrace_stop() with the
- * same @code and @info arguments. It can be defined to a constant if
- * arch_ptrace_stop() is never required, or always is. On machines where
- * this makes sense, it should be defined to a quick test to optimize out
- * calling arch_ptrace_stop() when it would be superfluous. For example,
- * if the thread has not been back to user mode since the last stop, the
- * thread state might indicate that nothing needs to be done.
+ * necessary to release the siglock and call arch_ptrace_stop(). It can be
+ * defined to a constant if arch_ptrace_stop() is never required, or always
+ * is. On machines where this makes sense, it should be defined to a quick
+ * test to optimize out calling arch_ptrace_stop() when it would be
+ * superfluous. For example, if the thread has not been back to user mode
+ * since the last stop, the thread state might indicate that nothing needs
+ * to be done.
*
* This is guaranteed to be invoked once before a task stops for ptrace and
* may include arch-specific operations necessary prior to a ptrace stop.
*/
-#define arch_ptrace_stop_needed(code, info) (0)
+#define arch_ptrace_stop_needed() (0)
#endif
#ifndef arch_ptrace_stop
/**
* arch_ptrace_stop - Do machine-specific work before stopping for ptrace
- * @code: current->exit_code value ptrace will stop with
- * @info: siginfo_t pointer (or %NULL) for signal ptrace will stop with
*
* This is called with no locks held when arch_ptrace_stop_needed() has
* just returned nonzero. It is allowed to block, e.g. for user memory
@@ -384,28 +382,94 @@ static inline void user_single_step_siginfo(struct task_struct *tsk,
* we only do it when the arch requires it for this particular stop, as
* indicated by arch_ptrace_stop_needed().
*/
-#define arch_ptrace_stop(code, info) do { } while (0)
+#define arch_ptrace_stop() do { } while (0)
#endif
#ifndef current_pt_regs
#define current_pt_regs() task_pt_regs(current)
#endif
-/*
- * unlike current_pt_regs(), this one is equal to task_pt_regs(current)
- * on *all* architectures; the only reason to have a per-arch definition
- * is optimisation.
- */
-#ifndef signal_pt_regs
-#define signal_pt_regs() task_pt_regs(current)
-#endif
-
#ifndef current_user_stack_pointer
#define current_user_stack_pointer() user_stack_pointer(current_pt_regs())
#endif
-extern int task_current_syscall(struct task_struct *target, long *callno,
- unsigned long args[6], unsigned int maxargs,
- unsigned long *sp, unsigned long *pc);
+extern int task_current_syscall(struct task_struct *target, struct syscall_info *info);
+
+extern void sigaction_compat_abi(struct k_sigaction *act, struct k_sigaction *oact);
+
+/*
+ * ptrace report for syscall entry and exit looks identical.
+ */
+static inline int ptrace_report_syscall(unsigned long message)
+{
+ int ptrace = current->ptrace;
+ int signr;
+
+ if (!(ptrace & PT_PTRACED))
+ return 0;
+
+ signr = ptrace_notify(SIGTRAP | ((ptrace & PT_TRACESYSGOOD) ? 0x80 : 0),
+ message);
+ /*
+ * this isn't the same as continuing with a signal, but it will do
+ * for normal use. strace only continues with a signal if the
+ * stopping signal is not SIGTRAP. -brl
+ */
+ if (signr)
+ send_sig(signr, current, 1);
+
+ return fatal_signal_pending(current);
+}
+
+/**
+ * ptrace_report_syscall_entry - task is about to attempt a system call
+ * @regs: user register state of current task
+ *
+ * This will be called if %SYSCALL_WORK_SYSCALL_TRACE or
+ * %SYSCALL_WORK_SYSCALL_EMU have been set, when the current task has just
+ * entered the kernel for a system call. Full user register state is
+ * available here. Changing the values in @regs can affect the system
+ * call number and arguments to be tried. It is safe to block here,
+ * preventing the system call from beginning.
+ *
+ * Returns zero normally, or nonzero if the calling arch code should abort
+ * the system call. That must prevent normal entry so no system call is
+ * made. If @task ever returns to user mode after this, its register state
+ * is unspecified, but should be something harmless like an %ENOSYS error
+ * return. It should preserve enough information so that syscall_rollback()
+ * can work (see asm-generic/syscall.h).
+ *
+ * Called without locks, just after entering kernel mode.
+ */
+static inline __must_check int ptrace_report_syscall_entry(
+ struct pt_regs *regs)
+{
+ return ptrace_report_syscall(PTRACE_EVENTMSG_SYSCALL_ENTRY);
+}
+
+/**
+ * ptrace_report_syscall_exit - task has just finished a system call
+ * @regs: user register state of current task
+ * @step: nonzero if simulating single-step or block-step
+ *
+ * This will be called if %SYSCALL_WORK_SYSCALL_TRACE has been set, when
+ * the current task has just finished an attempted system call. Full
+ * user register state is available here. It is safe to block here,
+ * preventing signals from being processed.
+ *
+ * If @step is nonzero, this report is also in lieu of the normal
+ * trap that would follow the system call instruction because
+ * user_enable_block_step() or user_enable_single_step() was used.
+ * In this case, %SYSCALL_WORK_SYSCALL_TRACE might not be set.
+ *
+ * Called without locks, just before checking for pending signals.
+ */
+static inline void ptrace_report_syscall_exit(struct pt_regs *regs, int step)
+{
+ if (step)
+ user_single_step_report(regs);
+ else
+ ptrace_report_syscall(PTRACE_EVENTMSG_SYSCALL_EXIT);
+}
#endif
diff --git a/include/linux/ptrace_api.h b/include/linux/ptrace_api.h
new file mode 100644
index 000000000000..26e7d275ad8d
--- /dev/null
+++ b/include/linux/ptrace_api.h
@@ -0,0 +1 @@
+#include <linux/ptrace.h>
diff --git a/include/linux/purgatory.h b/include/linux/purgatory.h
index d60d4e278609..d7dc1559427f 100644
--- a/include/linux/purgatory.h
+++ b/include/linux/purgatory.h
@@ -1,8 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_PURGATORY_H
#define _LINUX_PURGATORY_H
#include <linux/types.h>
-#include <crypto/sha.h>
+#include <crypto/sha2.h>
#include <uapi/linux/kexec.h>
struct kexec_sha_region {
diff --git a/include/linux/pvclock_gtod.h b/include/linux/pvclock_gtod.h
index a71d2dbd3610..f63549581f3e 100644
--- a/include/linux/pvclock_gtod.h
+++ b/include/linux/pvclock_gtod.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _PVCLOCK_GTOD_H
#define _PVCLOCK_GTOD_H
diff --git a/include/linux/pwm.h b/include/linux/pwm.h
index 08fad7c6a471..04ae1d9073a7 100644
--- a/include/linux/pwm.h
+++ b/include/linux/pwm.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __LINUX_PWM_H
#define __LINUX_PWM_H
@@ -5,9 +6,6 @@
#include <linux/mutex.h>
#include <linux/of.h>
-struct pwm_capture;
-struct seq_file;
-
struct pwm_chip;
/**
@@ -38,7 +36,7 @@ enum pwm_polarity {
* current PWM hardware state.
*/
struct pwm_args {
- unsigned int period;
+ u64 period;
enum pwm_polarity polarity;
};
@@ -53,12 +51,17 @@ enum {
* @duty_cycle: PWM duty cycle (in nanoseconds)
* @polarity: PWM polarity
* @enabled: PWM enabled status
+ * @usage_power: If set, the PWM driver is only required to maintain the power
+ * output but has more freedom regarding signal form.
+ * If supported, the signal can be optimized, for example to
+ * improve EMI by phase shifting individual channels.
*/
struct pwm_state {
- unsigned int period;
- unsigned int duty_cycle;
+ u64 period;
+ u64 duty_cycle;
enum pwm_polarity polarity;
bool enabled;
+ bool usage_power;
};
/**
@@ -70,7 +73,8 @@ struct pwm_state {
* @chip: PWM chip providing this PWM device
* @chip_data: chip-private data associated with the PWM device
* @args: PWM arguments
- * @state: curent PWM channel state
+ * @state: last applied state
+ * @last: last implemented state (for PWM_DEBUG)
*/
struct pwm_device {
const char *label;
@@ -82,12 +86,18 @@ struct pwm_device {
struct pwm_args args;
struct pwm_state state;
+ struct pwm_state last;
};
/**
* pwm_get_state() - retrieve the current PWM state
* @pwm: PWM device
* @state: state to fill with the current PWM state
+ *
+ * The returned PWM state represents the state that was applied by a previous call to
+ * pwm_apply_state(). Drivers may have to slightly tweak that state before programming it to
+ * hardware. If pwm_apply_state() was never called, this returns either the current hardware
+ * state (if supported) or the default settings.
*/
static inline void pwm_get_state(const struct pwm_device *pwm,
struct pwm_state *state)
@@ -104,13 +114,13 @@ static inline bool pwm_is_enabled(const struct pwm_device *pwm)
return state.enabled;
}
-static inline void pwm_set_period(struct pwm_device *pwm, unsigned int period)
+static inline void pwm_set_period(struct pwm_device *pwm, u64 period)
{
if (pwm)
pwm->state.period = period;
}
-static inline unsigned int pwm_get_period(const struct pwm_device *pwm)
+static inline u64 pwm_get_period(const struct pwm_device *pwm)
{
struct pwm_state state;
@@ -125,7 +135,7 @@ static inline void pwm_set_duty_cycle(struct pwm_device *pwm, unsigned int duty)
pwm->state.duty_cycle = duty;
}
-static inline unsigned int pwm_get_duty_cycle(const struct pwm_device *pwm)
+static inline u64 pwm_get_duty_cycle(const struct pwm_device *pwm)
{
struct pwm_state state;
@@ -180,6 +190,7 @@ static inline void pwm_init_state(const struct pwm_device *pwm,
state->period = args.period;
state->polarity = args.polarity;
state->duty_cycle = 0;
+ state->usage_power = false;
}
/**
@@ -238,85 +249,67 @@ pwm_set_relative_duty_cycle(struct pwm_state *state, unsigned int duty_cycle,
}
/**
+ * struct pwm_capture - PWM capture data
+ * @period: period of the PWM signal (in nanoseconds)
+ * @duty_cycle: duty cycle of the PWM signal (in nanoseconds)
+ */
+struct pwm_capture {
+ unsigned int period;
+ unsigned int duty_cycle;
+};
+
+/**
* struct pwm_ops - PWM controller operations
* @request: optional hook for requesting a PWM
* @free: optional hook for freeing a PWM
- * @config: configure duty cycles and period length for this PWM
- * @set_polarity: configure the polarity of this PWM
* @capture: capture and report PWM signal
- * @enable: enable PWM output toggling
- * @disable: disable PWM output toggling
- * @apply: atomically apply a new PWM config. The state argument
- * should be adjusted with the real hardware config (if the
- * approximate the period or duty_cycle value, state should
- * reflect it)
+ * @apply: atomically apply a new PWM config
* @get_state: get the current PWM state. This function is only
* called once per PWM device when the PWM chip is
* registered.
- * @dbg_show: optional routine to show contents in debugfs
* @owner: helps prevent removal of modules exporting active PWMs
*/
struct pwm_ops {
int (*request)(struct pwm_chip *chip, struct pwm_device *pwm);
void (*free)(struct pwm_chip *chip, struct pwm_device *pwm);
- int (*config)(struct pwm_chip *chip, struct pwm_device *pwm,
- int duty_ns, int period_ns);
- int (*set_polarity)(struct pwm_chip *chip, struct pwm_device *pwm,
- enum pwm_polarity polarity);
int (*capture)(struct pwm_chip *chip, struct pwm_device *pwm,
struct pwm_capture *result, unsigned long timeout);
- int (*enable)(struct pwm_chip *chip, struct pwm_device *pwm);
- void (*disable)(struct pwm_chip *chip, struct pwm_device *pwm);
int (*apply)(struct pwm_chip *chip, struct pwm_device *pwm,
- struct pwm_state *state);
- void (*get_state)(struct pwm_chip *chip, struct pwm_device *pwm,
- struct pwm_state *state);
-#ifdef CONFIG_DEBUG_FS
- void (*dbg_show)(struct pwm_chip *chip, struct seq_file *s);
-#endif
+ const struct pwm_state *state);
+ int (*get_state)(struct pwm_chip *chip, struct pwm_device *pwm,
+ struct pwm_state *state);
struct module *owner;
};
/**
* struct pwm_chip - abstract a PWM controller
* @dev: device providing the PWMs
- * @list: list node for internal use
* @ops: callbacks for this PWM controller
* @base: number of first PWM controlled by this chip
* @npwm: number of PWMs controlled by this chip
- * @pwms: array of PWM devices allocated by the framework
* @of_xlate: request a PWM device given a device tree PWM specifier
* @of_pwm_n_cells: number of cells expected in the device tree PWM specifier
+ * @list: list node for internal use
+ * @pwms: array of PWM devices allocated by the framework
*/
struct pwm_chip {
struct device *dev;
- struct list_head list;
const struct pwm_ops *ops;
int base;
unsigned int npwm;
- struct pwm_device *pwms;
-
struct pwm_device * (*of_xlate)(struct pwm_chip *pc,
const struct of_phandle_args *args);
unsigned int of_pwm_n_cells;
-};
-/**
- * struct pwm_capture - PWM capture data
- * @period: period of the PWM signal (in nanoseconds)
- * @duty_cycle: duty cycle of the PWM signal (in nanoseconds)
- */
-struct pwm_capture {
- unsigned int period;
- unsigned int duty_cycle;
+ /* only used internally by the PWM framework */
+ struct list_head list;
+ struct pwm_device *pwms;
};
#if IS_ENABLED(CONFIG_PWM)
/* PWM user APIs */
-struct pwm_device *pwm_request(int pwm_id, const char *label);
-void pwm_free(struct pwm_device *pwm);
-int pwm_apply_state(struct pwm_device *pwm, struct pwm_state *state);
+int pwm_apply_state(struct pwm_device *pwm, const struct pwm_state *state);
int pwm_adjust_config(struct pwm_device *pwm);
/**
@@ -348,42 +341,6 @@ static inline int pwm_config(struct pwm_device *pwm, int duty_ns,
}
/**
- * pwm_set_polarity() - configure the polarity of a PWM signal
- * @pwm: PWM device
- * @polarity: new polarity of the PWM signal
- *
- * Note that the polarity cannot be configured while the PWM device is
- * enabled.
- *
- * Returns: 0 on success or a negative error code on failure.
- */
-static inline int pwm_set_polarity(struct pwm_device *pwm,
- enum pwm_polarity polarity)
-{
- struct pwm_state state;
-
- if (!pwm)
- return -EINVAL;
-
- pwm_get_state(pwm, &state);
- if (state.polarity == polarity)
- return 0;
-
- /*
- * Changing the polarity of a running PWM without adjusting the
- * dutycycle/period value is a bit risky (can introduce glitches).
- * Return -EBUSY in this case.
- * Note that this is allowed when using pwm_apply_state() because
- * the user specifies all the parameters.
- */
- if (state.enabled)
- return -EBUSY;
-
- state.polarity = polarity;
- return pwm_apply_state(pwm, &state);
-}
-
-/**
* pwm_enable() - start a PWM output toggling
* @pwm: PWM device
*
@@ -429,38 +386,32 @@ int pwm_capture(struct pwm_device *pwm, struct pwm_capture *result,
int pwm_set_chip_data(struct pwm_device *pwm, void *data);
void *pwm_get_chip_data(struct pwm_device *pwm);
-int pwmchip_add_with_polarity(struct pwm_chip *chip,
- enum pwm_polarity polarity);
int pwmchip_add(struct pwm_chip *chip);
-int pwmchip_remove(struct pwm_chip *chip);
+void pwmchip_remove(struct pwm_chip *chip);
+
+int devm_pwmchip_add(struct device *dev, struct pwm_chip *chip);
+
struct pwm_device *pwm_request_from_chip(struct pwm_chip *chip,
unsigned int index,
const char *label);
struct pwm_device *of_pwm_xlate_with_flags(struct pwm_chip *pc,
const struct of_phandle_args *args);
+struct pwm_device *of_pwm_single_xlate(struct pwm_chip *pc,
+ const struct of_phandle_args *args);
struct pwm_device *pwm_get(struct device *dev, const char *con_id);
-struct pwm_device *of_pwm_get(struct device_node *np, const char *con_id);
void pwm_put(struct pwm_device *pwm);
struct pwm_device *devm_pwm_get(struct device *dev, const char *con_id);
-struct pwm_device *devm_of_pwm_get(struct device *dev, struct device_node *np,
- const char *con_id);
-void devm_pwm_put(struct device *dev, struct pwm_device *pwm);
+struct pwm_device *devm_fwnode_pwm_get(struct device *dev,
+ struct fwnode_handle *fwnode,
+ const char *con_id);
#else
-static inline struct pwm_device *pwm_request(int pwm_id, const char *label)
-{
- return ERR_PTR(-ENODEV);
-}
-
-static inline void pwm_free(struct pwm_device *pwm)
-{
-}
-
static inline int pwm_apply_state(struct pwm_device *pwm,
const struct pwm_state *state)
{
+ might_sleep();
return -ENOTSUPP;
}
@@ -472,31 +423,28 @@ static inline int pwm_adjust_config(struct pwm_device *pwm)
static inline int pwm_config(struct pwm_device *pwm, int duty_ns,
int period_ns)
{
+ might_sleep();
return -EINVAL;
}
-static inline int pwm_capture(struct pwm_device *pwm,
- struct pwm_capture *result,
- unsigned long timeout)
+static inline int pwm_enable(struct pwm_device *pwm)
{
+ might_sleep();
return -EINVAL;
}
-static inline int pwm_set_polarity(struct pwm_device *pwm,
- enum pwm_polarity polarity)
+static inline void pwm_disable(struct pwm_device *pwm)
{
- return -ENOTSUPP;
+ might_sleep();
}
-static inline int pwm_enable(struct pwm_device *pwm)
+static inline int pwm_capture(struct pwm_device *pwm,
+ struct pwm_capture *result,
+ unsigned long timeout)
{
return -EINVAL;
}
-static inline void pwm_disable(struct pwm_device *pwm)
-{
-}
-
static inline int pwm_set_chip_data(struct pwm_device *pwm, void *data)
{
return -EINVAL;
@@ -512,12 +460,12 @@ static inline int pwmchip_add(struct pwm_chip *chip)
return -EINVAL;
}
-static inline int pwmchip_add_inversed(struct pwm_chip *chip)
+static inline int pwmchip_remove(struct pwm_chip *chip)
{
return -EINVAL;
}
-static inline int pwmchip_remove(struct pwm_chip *chip)
+static inline int devm_pwmchip_add(struct device *dev, struct pwm_chip *chip)
{
return -EINVAL;
}
@@ -526,41 +474,36 @@ static inline struct pwm_device *pwm_request_from_chip(struct pwm_chip *chip,
unsigned int index,
const char *label)
{
+ might_sleep();
return ERR_PTR(-ENODEV);
}
static inline struct pwm_device *pwm_get(struct device *dev,
const char *consumer)
{
- return ERR_PTR(-ENODEV);
-}
-
-static inline struct pwm_device *of_pwm_get(struct device_node *np,
- const char *con_id)
-{
+ might_sleep();
return ERR_PTR(-ENODEV);
}
static inline void pwm_put(struct pwm_device *pwm)
{
+ might_sleep();
}
static inline struct pwm_device *devm_pwm_get(struct device *dev,
const char *consumer)
{
+ might_sleep();
return ERR_PTR(-ENODEV);
}
-static inline struct pwm_device *devm_of_pwm_get(struct device *dev,
- struct device_node *np,
- const char *con_id)
+static inline struct pwm_device *
+devm_fwnode_pwm_get(struct device *dev, struct fwnode_handle *fwnode,
+ const char *con_id)
{
+ might_sleep();
return ERR_PTR(-ENODEV);
}
-
-static inline void devm_pwm_put(struct device *dev, struct pwm_device *pwm)
-{
-}
#endif
static inline void pwm_apply_args(struct pwm_device *pwm)
@@ -591,6 +534,7 @@ static inline void pwm_apply_args(struct pwm_device *pwm)
state.enabled = false;
state.polarity = pwm->args.polarity;
state.period = pwm->args.period;
+ state.usage_power = false;
pwm_apply_state(pwm, &state);
}
@@ -638,7 +582,6 @@ static inline void pwm_remove_table(struct pwm_lookup *table, size_t num)
#ifdef CONFIG_PWM_SYSFS
void pwmchip_sysfs_export(struct pwm_chip *chip);
void pwmchip_sysfs_unexport(struct pwm_chip *chip);
-void pwmchip_sysfs_unexport_children(struct pwm_chip *chip);
#else
static inline void pwmchip_sysfs_export(struct pwm_chip *chip)
{
@@ -647,10 +590,6 @@ static inline void pwmchip_sysfs_export(struct pwm_chip *chip)
static inline void pwmchip_sysfs_unexport(struct pwm_chip *chip)
{
}
-
-static inline void pwmchip_sysfs_unexport_children(struct pwm_chip *chip)
-{
-}
#endif /* CONFIG_PWM_SYSFS */
#endif /* __LINUX_PWM_H */
diff --git a/include/linux/pwm_backlight.h b/include/linux/pwm_backlight.h
index efdd9227a49c..cdd2ac366bc7 100644
--- a/include/linux/pwm_backlight.h
+++ b/include/linux/pwm_backlight.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Generic PWM backlight driver data - see drivers/video/backlight/pwm_bl.c
*/
@@ -7,14 +8,13 @@
#include <linux/backlight.h>
struct platform_pwm_backlight_data {
- int pwm_id;
unsigned int max_brightness;
unsigned int dft_brightness;
unsigned int lth_brightness;
unsigned int pwm_period_ns;
unsigned int *levels;
- /* TODO remove once all users are switched to gpiod_* API */
- int enable_gpio;
+ unsigned int post_pwm_on_delay;
+ unsigned int pwm_off_delay;
int (*init)(struct device *dev);
int (*notify)(struct device *dev, int brightness);
void (*notify_after)(struct device *dev, int brightness);
diff --git a/include/linux/pxa168_eth.h b/include/linux/pxa168_eth.h
index e1ab6e86cdb3..fb09c2c7cb75 100644
--- a/include/linux/pxa168_eth.h
+++ b/include/linux/pxa168_eth.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
*pxa168 ethernet platform device data definition file.
*/
diff --git a/include/linux/pxa2xx_ssp.h b/include/linux/pxa2xx_ssp.h
index 8461b18e4608..cd1973e6ac4b 100644
--- a/include/linux/pxa2xx_ssp.h
+++ b/include/linux/pxa2xx_ssp.h
@@ -1,11 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * pxa2xx_ssp.h
- *
- * Copyright (C) 2003 Russell King, All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
+ * Copyright (C) 2003 Russell King, All Rights Reserved.
*
* This driver supports the following PXA CPU/SSP ports:-
*
@@ -16,13 +11,19 @@
* PXA3xx SSP1, SSP2, SSP3, SSP4
*/
-#ifndef __LINUX_SSP_H
-#define __LINUX_SSP_H
+#ifndef __LINUX_PXA2XX_SSP_H
+#define __LINUX_PXA2XX_SSP_H
-#include <linux/list.h>
+#include <linux/bits.h>
+#include <linux/compiler_types.h>
#include <linux/io.h>
-#include <linux/of.h>
+#include <linux/kconfig.h>
+#include <linux/list.h>
+#include <linux/types.h>
+struct clk;
+struct device;
+struct device_node;
/*
* SSP Serial Port Registers
@@ -37,7 +38,6 @@
#define SSDR (0x10) /* SSP Data Write/Data Read Register */
#define SSTO (0x28) /* SSP Time Out Register */
-#define DDS_RATE (0x28) /* SSP DDS Clock Rate Register (Intel Quark) */
#define SSPSP (0x2C) /* SSP Programmable Serial Protocol */
#define SSTSA (0x30) /* SSP Tx Timeslot Active */
#define SSRSA (0x34) /* SSP Rx Timeslot Active */
@@ -46,141 +46,170 @@
#define SSACDD (0x40) /* SSP Audio Clock Dither Divider */
/* Common PXA2xx bits first */
-#define SSCR0_DSS (0x0000000f) /* Data Size Select (mask) */
+#define SSCR0_DSS GENMASK(3, 0) /* Data Size Select (mask) */
#define SSCR0_DataSize(x) ((x) - 1) /* Data Size Select [4..16] */
-#define SSCR0_FRF (0x00000030) /* FRame Format (mask) */
+#define SSCR0_FRF GENMASK(5, 4) /* FRame Format (mask) */
#define SSCR0_Motorola (0x0 << 4) /* Motorola's Serial Peripheral Interface (SPI) */
#define SSCR0_TI (0x1 << 4) /* Texas Instruments' Synchronous Serial Protocol (SSP) */
#define SSCR0_National (0x2 << 4) /* National Microwire */
-#define SSCR0_ECS (1 << 6) /* External clock select */
-#define SSCR0_SSE (1 << 7) /* Synchronous Serial Port Enable */
+#define SSCR0_ECS BIT(6) /* External clock select */
+#define SSCR0_SSE BIT(7) /* Synchronous Serial Port Enable */
#define SSCR0_SCR(x) ((x) << 8) /* Serial Clock Rate (mask) */
/* PXA27x, PXA3xx */
-#define SSCR0_EDSS (1 << 20) /* Extended data size select */
-#define SSCR0_NCS (1 << 21) /* Network clock select */
-#define SSCR0_RIM (1 << 22) /* Receive FIFO overrrun interrupt mask */
-#define SSCR0_TUM (1 << 23) /* Transmit FIFO underrun interrupt mask */
-#define SSCR0_FRDC (0x07000000) /* Frame rate divider control (mask) */
+#define SSCR0_EDSS BIT(20) /* Extended data size select */
+#define SSCR0_NCS BIT(21) /* Network clock select */
+#define SSCR0_RIM BIT(22) /* Receive FIFO overrun interrupt mask */
+#define SSCR0_TUM BIT(23) /* Transmit FIFO underrun interrupt mask */
+#define SSCR0_FRDC GENMASK(26, 24) /* Frame rate divider control (mask) */
#define SSCR0_SlotsPerFrm(x) (((x) - 1) << 24) /* Time slots per frame [1..8] */
-#define SSCR0_FPCKE (1 << 29) /* FIFO packing enable */
-#define SSCR0_ACS (1 << 30) /* Audio clock select */
-#define SSCR0_MOD (1 << 31) /* Mode (normal or network) */
-
-
-#define SSCR1_RIE (1 << 0) /* Receive FIFO Interrupt Enable */
-#define SSCR1_TIE (1 << 1) /* Transmit FIFO Interrupt Enable */
-#define SSCR1_LBM (1 << 2) /* Loop-Back Mode */
-#define SSCR1_SPO (1 << 3) /* Motorola SPI SSPSCLK polarity setting */
-#define SSCR1_SPH (1 << 4) /* Motorola SPI SSPSCLK phase setting */
-#define SSCR1_MWDS (1 << 5) /* Microwire Transmit Data Size */
-
-#define SSSR_ALT_FRM_MASK 3 /* Masks the SFRM signal number */
-#define SSSR_TNF (1 << 2) /* Transmit FIFO Not Full */
-#define SSSR_RNE (1 << 3) /* Receive FIFO Not Empty */
-#define SSSR_BSY (1 << 4) /* SSP Busy */
-#define SSSR_TFS (1 << 5) /* Transmit FIFO Service Request */
-#define SSSR_RFS (1 << 6) /* Receive FIFO Service Request */
-#define SSSR_ROR (1 << 7) /* Receive FIFO Overrun */
+#define SSCR0_FPCKE BIT(29) /* FIFO packing enable */
+#define SSCR0_ACS BIT(30) /* Audio clock select */
+#define SSCR0_MOD BIT(31) /* Mode (normal or network) */
+
+#define SSCR1_RIE BIT(0) /* Receive FIFO Interrupt Enable */
+#define SSCR1_TIE BIT(1) /* Transmit FIFO Interrupt Enable */
+#define SSCR1_LBM BIT(2) /* Loop-Back Mode */
+#define SSCR1_SPO BIT(3) /* Motorola SPI SSPSCLK polarity setting */
+#define SSCR1_SPH BIT(4) /* Motorola SPI SSPSCLK phase setting */
+#define SSCR1_MWDS BIT(5) /* Microwire Transmit Data Size */
+
+#define SSSR_ALT_FRM_MASK GENMASK(1, 0) /* Masks the SFRM signal number */
+#define SSSR_TNF BIT(2) /* Transmit FIFO Not Full */
+#define SSSR_RNE BIT(3) /* Receive FIFO Not Empty */
+#define SSSR_BSY BIT(4) /* SSP Busy */
+#define SSSR_TFS BIT(5) /* Transmit FIFO Service Request */
+#define SSSR_RFS BIT(6) /* Receive FIFO Service Request */
+#define SSSR_ROR BIT(7) /* Receive FIFO Overrun */
#define RX_THRESH_DFLT 8
#define TX_THRESH_DFLT 8
-#define SSSR_TFL_MASK (0xf << 8) /* Transmit FIFO Level mask */
-#define SSSR_RFL_MASK (0xf << 12) /* Receive FIFO Level mask */
+#define SSSR_TFL_MASK GENMASK(11, 8) /* Transmit FIFO Level mask */
+#define SSSR_RFL_MASK GENMASK(15, 12) /* Receive FIFO Level mask */
-#define SSCR1_TFT (0x000003c0) /* Transmit FIFO Threshold (mask) */
+#define SSCR1_TFT GENMASK(9, 6) /* Transmit FIFO Threshold (mask) */
#define SSCR1_TxTresh(x) (((x) - 1) << 6) /* level [1..16] */
-#define SSCR1_RFT (0x00003c00) /* Receive FIFO Threshold (mask) */
+#define SSCR1_RFT GENMASK(13, 10) /* Receive FIFO Threshold (mask) */
#define SSCR1_RxTresh(x) (((x) - 1) << 10) /* level [1..16] */
#define RX_THRESH_CE4100_DFLT 2
#define TX_THRESH_CE4100_DFLT 2
-#define CE4100_SSSR_TFL_MASK (0x3 << 8) /* Transmit FIFO Level mask */
-#define CE4100_SSSR_RFL_MASK (0x3 << 12) /* Receive FIFO Level mask */
+#define CE4100_SSSR_TFL_MASK GENMASK(9, 8) /* Transmit FIFO Level mask */
+#define CE4100_SSSR_RFL_MASK GENMASK(13, 12) /* Receive FIFO Level mask */
-#define CE4100_SSCR1_TFT (0x000000c0) /* Transmit FIFO Threshold (mask) */
+#define CE4100_SSCR1_TFT GENMASK(7, 6) /* Transmit FIFO Threshold (mask) */
#define CE4100_SSCR1_TxTresh(x) (((x) - 1) << 6) /* level [1..4] */
-#define CE4100_SSCR1_RFT (0x00000c00) /* Receive FIFO Threshold (mask) */
+#define CE4100_SSCR1_RFT GENMASK(11, 10) /* Receive FIFO Threshold (mask) */
#define CE4100_SSCR1_RxTresh(x) (((x) - 1) << 10) /* level [1..4] */
+/* Intel Quark X1000 */
+#define DDS_RATE 0x28 /* SSP DDS Clock Rate Register */
+
/* QUARK_X1000 SSCR0 bit definition */
-#define QUARK_X1000_SSCR0_DSS (0x1F << 0) /* Data Size Select (mask) */
+#define QUARK_X1000_SSCR0_DSS GENMASK(4, 0) /* Data Size Select (mask) */
#define QUARK_X1000_SSCR0_DataSize(x) ((x) - 1) /* Data Size Select [4..32] */
-#define QUARK_X1000_SSCR0_FRF (0x3 << 5) /* FRame Format (mask) */
+#define QUARK_X1000_SSCR0_FRF GENMASK(6, 5) /* FRame Format (mask) */
#define QUARK_X1000_SSCR0_Motorola (0x0 << 5) /* Motorola's Serial Peripheral Interface (SPI) */
#define RX_THRESH_QUARK_X1000_DFLT 1
#define TX_THRESH_QUARK_X1000_DFLT 16
-#define QUARK_X1000_SSSR_TFL_MASK (0x1F << 8) /* Transmit FIFO Level mask */
-#define QUARK_X1000_SSSR_RFL_MASK (0x1F << 13) /* Receive FIFO Level mask */
+#define QUARK_X1000_SSSR_TFL_MASK GENMASK(12, 8) /* Transmit FIFO Level mask */
+#define QUARK_X1000_SSSR_RFL_MASK GENMASK(17, 13) /* Receive FIFO Level mask */
-#define QUARK_X1000_SSCR1_TFT (0x1F << 6) /* Transmit FIFO Threshold (mask) */
+#define QUARK_X1000_SSCR1_TFT GENMASK(10, 6) /* Transmit FIFO Threshold (mask) */
#define QUARK_X1000_SSCR1_TxTresh(x) (((x) - 1) << 6) /* level [1..32] */
-#define QUARK_X1000_SSCR1_RFT (0x1F << 11) /* Receive FIFO Threshold (mask) */
+#define QUARK_X1000_SSCR1_RFT GENMASK(15, 11) /* Receive FIFO Threshold (mask) */
#define QUARK_X1000_SSCR1_RxTresh(x) (((x) - 1) << 11) /* level [1..32] */
-#define QUARK_X1000_SSCR1_STRF (1 << 17) /* Select FIFO or EFWR */
-#define QUARK_X1000_SSCR1_EFWR (1 << 16) /* Enable FIFO Write/Read */
+#define QUARK_X1000_SSCR1_EFWR BIT(16) /* Enable FIFO Write/Read */
+#define QUARK_X1000_SSCR1_STRF BIT(17) /* Select FIFO or EFWR */
-/* extra bits in PXA255, PXA26x and PXA27x SSP ports */
+/* Extra bits in PXA255, PXA26x and PXA27x SSP ports */
#define SSCR0_TISSP (1 << 4) /* TI Sync Serial Protocol */
#define SSCR0_PSP (3 << 4) /* PSP - Programmable Serial Protocol */
-#define SSCR1_TTELP (1 << 31) /* TXD Tristate Enable Last Phase */
-#define SSCR1_TTE (1 << 30) /* TXD Tristate Enable */
-#define SSCR1_EBCEI (1 << 29) /* Enable Bit Count Error interrupt */
-#define SSCR1_SCFR (1 << 28) /* Slave Clock free Running */
-#define SSCR1_ECRA (1 << 27) /* Enable Clock Request A */
-#define SSCR1_ECRB (1 << 26) /* Enable Clock request B */
-#define SSCR1_SCLKDIR (1 << 25) /* Serial Bit Rate Clock Direction */
-#define SSCR1_SFRMDIR (1 << 24) /* Frame Direction */
-#define SSCR1_RWOT (1 << 23) /* Receive Without Transmit */
-#define SSCR1_TRAIL (1 << 22) /* Trailing Byte */
-#define SSCR1_TSRE (1 << 21) /* Transmit Service Request Enable */
-#define SSCR1_RSRE (1 << 20) /* Receive Service Request Enable */
-#define SSCR1_TINTE (1 << 19) /* Receiver Time-out Interrupt enable */
-#define SSCR1_PINTE (1 << 18) /* Peripheral Trailing Byte Interrupt Enable */
-#define SSCR1_IFS (1 << 16) /* Invert Frame Signal */
-#define SSCR1_STRF (1 << 15) /* Select FIFO or EFWR */
-#define SSCR1_EFWR (1 << 14) /* Enable FIFO Write/Read */
-
-#define SSSR_BCE (1 << 23) /* Bit Count Error */
-#define SSSR_CSS (1 << 22) /* Clock Synchronisation Status */
-#define SSSR_TUR (1 << 21) /* Transmit FIFO Under Run */
-#define SSSR_EOC (1 << 20) /* End Of Chain */
-#define SSSR_TINT (1 << 19) /* Receiver Time-out Interrupt */
-#define SSSR_PINT (1 << 18) /* Peripheral Trailing Byte Interrupt */
+#define SSCR1_EFWR BIT(14) /* Enable FIFO Write/Read */
+#define SSCR1_STRF BIT(15) /* Select FIFO or EFWR */
+#define SSCR1_IFS BIT(16) /* Invert Frame Signal */
+#define SSCR1_PINTE BIT(18) /* Peripheral Trailing Byte Interrupt Enable */
+#define SSCR1_TINTE BIT(19) /* Receiver Time-out Interrupt enable */
+#define SSCR1_RSRE BIT(20) /* Receive Service Request Enable */
+#define SSCR1_TSRE BIT(21) /* Transmit Service Request Enable */
+#define SSCR1_TRAIL BIT(22) /* Trailing Byte */
+#define SSCR1_RWOT BIT(23) /* Receive Without Transmit */
+#define SSCR1_SFRMDIR BIT(24) /* Frame Direction */
+#define SSCR1_SCLKDIR BIT(25) /* Serial Bit Rate Clock Direction */
+#define SSCR1_ECRB BIT(26) /* Enable Clock request B */
+#define SSCR1_ECRA BIT(27) /* Enable Clock Request A */
+#define SSCR1_SCFR BIT(28) /* Slave Clock free Running */
+#define SSCR1_EBCEI BIT(29) /* Enable Bit Count Error interrupt */
+#define SSCR1_TTE BIT(30) /* TXD Tristate Enable */
+#define SSCR1_TTELP BIT(31) /* TXD Tristate Enable Last Phase */
+
+#define SSSR_PINT BIT(18) /* Peripheral Trailing Byte Interrupt */
+#define SSSR_TINT BIT(19) /* Receiver Time-out Interrupt */
+#define SSSR_EOC BIT(20) /* End Of Chain */
+#define SSSR_TUR BIT(21) /* Transmit FIFO Under Run */
+#define SSSR_CSS BIT(22) /* Clock Synchronisation Status */
+#define SSSR_BCE BIT(23) /* Bit Count Error */
#define SSPSP_SCMODE(x) ((x) << 0) /* Serial Bit Rate Clock Mode */
-#define SSPSP_SFRMP (1 << 2) /* Serial Frame Polarity */
-#define SSPSP_ETDS (1 << 3) /* End of Transfer data State */
+#define SSPSP_SFRMP BIT(2) /* Serial Frame Polarity */
+#define SSPSP_ETDS BIT(3) /* End of Transfer data State */
#define SSPSP_STRTDLY(x) ((x) << 4) /* Start Delay */
#define SSPSP_DMYSTRT(x) ((x) << 7) /* Dummy Start */
#define SSPSP_SFRMDLY(x) ((x) << 9) /* Serial Frame Delay */
#define SSPSP_SFRMWDTH(x) ((x) << 16) /* Serial Frame Width */
#define SSPSP_DMYSTOP(x) ((x) << 23) /* Dummy Stop */
-#define SSPSP_FSRT (1 << 25) /* Frame Sync Relative Timing */
+#define SSPSP_FSRT BIT(25) /* Frame Sync Relative Timing */
/* PXA3xx */
#define SSPSP_EDMYSTRT(x) ((x) << 26) /* Extended Dummy Start */
#define SSPSP_EDMYSTOP(x) ((x) << 28) /* Extended Dummy Stop */
#define SSPSP_TIMING_MASK (0x7f8001f0)
-#define SSACD_SCDB (1 << 3) /* SSPSYSCLK Divider Bypass */
-#define SSACD_ACPS(x) ((x) << 4) /* Audio clock PLL select */
#define SSACD_ACDS(x) ((x) << 0) /* Audio clock divider select */
-#define SSACD_SCDX8 (1 << 7) /* SYSCLK division ratio select */
+#define SSACD_ACDS_1 (0)
+#define SSACD_ACDS_2 (1)
+#define SSACD_ACDS_4 (2)
+#define SSACD_ACDS_8 (3)
+#define SSACD_ACDS_16 (4)
+#define SSACD_ACDS_32 (5)
+#define SSACD_SCDB BIT(3) /* SSPSYSCLK Divider Bypass */
+#define SSACD_SCDB_4X (0)
+#define SSACD_SCDB_1X (1)
+#define SSACD_ACPS(x) ((x) << 4) /* Audio clock PLL select */
+#define SSACD_SCDX8 BIT(7) /* SYSCLK division ratio select */
+
+/* Intel Merrifield SSP */
+#define SFIFOL 0x68 /* FIFO level */
+#define SFIFOTT 0x6c /* FIFO trigger threshold */
+
+#define RX_THRESH_MRFLD_DFLT 16
+#define TX_THRESH_MRFLD_DFLT 16
+
+#define SFIFOL_TFL_MASK GENMASK(15, 0) /* Transmit FIFO Level mask */
+#define SFIFOL_RFL_MASK GENMASK(31, 16) /* Receive FIFO Level mask */
+
+#define SFIFOTT_TFT GENMASK(15, 0) /* Transmit FIFO Threshold (mask) */
+#define SFIFOTT_TxThresh(x) (((x) - 1) << 0) /* TX FIFO trigger threshold / level */
+#define SFIFOTT_RFT GENMASK(31, 16) /* Receive FIFO Threshold (mask) */
+#define SFIFOTT_RxThresh(x) (((x) - 1) << 16) /* RX FIFO trigger threshold / level */
/* LPSS SSP */
#define SSITF 0x44 /* TX FIFO trigger level */
+#define SSITF_TxHiThresh(x) (((x) - 1) << 0)
#define SSITF_TxLoThresh(x) (((x) - 1) << 8)
-#define SSITF_TxHiThresh(x) ((x) - 1)
#define SSIRF 0x48 /* RX FIFO trigger level */
#define SSIRF_RxThresh(x) ((x) - 1)
+/* LPT/WPT SSP */
+#define SSCR2 (0x40) /* SSP Command / Status 2 */
+#define SSPSP2 (0x44) /* SSP Programmable Serial Protocol 2 */
+
enum pxa_ssp_type {
SSP_UNDEFINED = 0,
PXA25x_SSP, /* pxa 210, 250, 255, 26x */
@@ -188,19 +217,23 @@ enum pxa_ssp_type {
PXA27x_SSP,
PXA3xx_SSP,
PXA168_SSP,
+ MMP2_SSP,
PXA910_SSP,
CE4100_SSP,
+ MRFLD_SSP,
QUARK_X1000_SSP,
- LPSS_LPT_SSP, /* Keep LPSS types sorted with lpss_platforms[] */
+ /* Keep LPSS types sorted with lpss_platforms[] */
+ LPSS_LPT_SSP,
LPSS_BYT_SSP,
LPSS_BSW_SSP,
LPSS_SPT_SSP,
LPSS_BXT_SSP,
LPSS_CNL_SSP,
+ SSP_MAX
};
struct ssp_device {
- struct platform_device *pdev;
+ struct device *dev;
struct list_head node;
struct clk *clk;
@@ -209,11 +242,9 @@ struct ssp_device {
const char *label;
int port_id;
- int type;
+ enum pxa_ssp_type type;
int use_count;
int irq;
- int drcmr_rx;
- int drcmr_tx;
struct device_node *of_node;
};
@@ -241,6 +272,22 @@ static inline u32 pxa_ssp_read_reg(struct ssp_device *dev, u32 reg)
return __raw_readl(dev->mmio_base + reg);
}
+static inline void pxa_ssp_enable(struct ssp_device *ssp)
+{
+ u32 sscr0;
+
+ sscr0 = pxa_ssp_read_reg(ssp, SSCR0) | SSCR0_SSE;
+ pxa_ssp_write_reg(ssp, SSCR0, sscr0);
+}
+
+static inline void pxa_ssp_disable(struct ssp_device *ssp)
+{
+ u32 sscr0;
+
+ sscr0 = pxa_ssp_read_reg(ssp, SSCR0) & ~SSCR0_SSE;
+ pxa_ssp_write_reg(ssp, SSCR0, sscr0);
+}
+
#if IS_ENABLED(CONFIG_PXA_SSP)
struct ssp_device *pxa_ssp_request(int port, const char *label);
void pxa_ssp_free(struct ssp_device *);
@@ -259,4 +306,4 @@ static inline struct ssp_device *pxa_ssp_request_of(const struct device_node *n,
static inline void pxa_ssp_free(struct ssp_device *ssp) {}
#endif
-#endif
+#endif /* __LINUX_PXA2XX_SSP_H */
diff --git a/include/linux/qcom_scm.h b/include/linux/qcom_scm.h
deleted file mode 100644
index e5380471c2cd..000000000000
--- a/include/linux/qcom_scm.h
+++ /dev/null
@@ -1,77 +0,0 @@
-/* Copyright (c) 2010-2015, The Linux Foundation. All rights reserved.
- * Copyright (C) 2015 Linaro Ltd.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-#ifndef __QCOM_SCM_H
-#define __QCOM_SCM_H
-
-#define QCOM_SCM_VERSION(major, minor) (((major) << 16) | ((minor) & 0xFF))
-#define QCOM_SCM_CPU_PWR_DOWN_L2_ON 0x0
-#define QCOM_SCM_CPU_PWR_DOWN_L2_OFF 0x1
-#define QCOM_SCM_HDCP_MAX_REQ_CNT 5
-
-struct qcom_scm_hdcp_req {
- u32 addr;
- u32 val;
-};
-
-#if IS_ENABLED(CONFIG_QCOM_SCM)
-extern int qcom_scm_set_cold_boot_addr(void *entry, const cpumask_t *cpus);
-extern int qcom_scm_set_warm_boot_addr(void *entry, const cpumask_t *cpus);
-extern bool qcom_scm_is_available(void);
-extern bool qcom_scm_hdcp_available(void);
-extern int qcom_scm_hdcp_req(struct qcom_scm_hdcp_req *req, u32 req_cnt,
- u32 *resp);
-extern bool qcom_scm_pas_supported(u32 peripheral);
-extern int qcom_scm_pas_init_image(u32 peripheral, const void *metadata,
- size_t size);
-extern int qcom_scm_pas_mem_setup(u32 peripheral, phys_addr_t addr,
- phys_addr_t size);
-extern int qcom_scm_pas_auth_and_reset(u32 peripheral);
-extern int qcom_scm_pas_shutdown(u32 peripheral);
-extern void qcom_scm_cpu_power_down(u32 flags);
-extern u32 qcom_scm_get_version(void);
-extern int qcom_scm_set_remote_state(u32 state, u32 id);
-extern int qcom_scm_restore_sec_cfg(u32 device_id, u32 spare);
-extern int qcom_scm_iommu_secure_ptbl_size(u32 spare, size_t *size);
-extern int qcom_scm_iommu_secure_ptbl_init(u64 addr, u32 size, u32 spare);
-#else
-static inline
-int qcom_scm_set_cold_boot_addr(void *entry, const cpumask_t *cpus)
-{
- return -ENODEV;
-}
-static inline
-int qcom_scm_set_warm_boot_addr(void *entry, const cpumask_t *cpus)
-{
- return -ENODEV;
-}
-static inline bool qcom_scm_is_available(void) { return false; }
-static inline bool qcom_scm_hdcp_available(void) { return false; }
-static inline int qcom_scm_hdcp_req(struct qcom_scm_hdcp_req *req, u32 req_cnt,
- u32 *resp) { return -ENODEV; }
-static inline bool qcom_scm_pas_supported(u32 peripheral) { return false; }
-static inline int qcom_scm_pas_init_image(u32 peripheral, const void *metadata,
- size_t size) { return -ENODEV; }
-static inline int qcom_scm_pas_mem_setup(u32 peripheral, phys_addr_t addr,
- phys_addr_t size) { return -ENODEV; }
-static inline int
-qcom_scm_pas_auth_and_reset(u32 peripheral) { return -ENODEV; }
-static inline int qcom_scm_pas_shutdown(u32 peripheral) { return -ENODEV; }
-static inline void qcom_scm_cpu_power_down(u32 flags) {}
-static inline u32 qcom_scm_get_version(void) { return 0; }
-static inline u32
-qcom_scm_set_remote_state(u32 state,u32 id) { return -ENODEV; }
-static inline int qcom_scm_restore_sec_cfg(u32 device_id, u32 spare) { return -ENODEV; }
-static inline int qcom_scm_iommu_secure_ptbl_size(u32 spare, size_t *size) { return -ENODEV; }
-static inline int qcom_scm_iommu_secure_ptbl_init(u64 addr, u32 size, u32 spare) { return -ENODEV; }
-#endif
-#endif
diff --git a/include/linux/qed/common_hsi.h b/include/linux/qed/common_hsi.h
index 39e2a2ac2471..827624840ee2 100644
--- a/include/linux/qed/common_hsi.h
+++ b/include/linux/qed/common_hsi.h
@@ -1,45 +1,20 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
/* QLogic qed NIC Driver
* Copyright (c) 2015-2016 QLogic Corporation
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and /or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * Copyright (c) 2019-2021 Marvell International Ltd.
*/
#ifndef _COMMON_HSI_H
#define _COMMON_HSI_H
+
#include <linux/types.h>
#include <asm/byteorder.h>
#include <linux/bitops.h>
#include <linux/slab.h>
/* dma_addr_t manip */
-#define PTR_LO(x) ((u32)(((uintptr_t)(x)) & 0xffffffff))
-#define PTR_HI(x) ((u32)((((uintptr_t)(x)) >> 16) >> 16))
+#define PTR_LO(x) ((u32)(((uintptr_t)(x)) & 0xffffffff))
+#define PTR_HI(x) ((u32)((((uintptr_t)(x)) >> 16) >> 16))
#define DMA_LO_LE(x) cpu_to_le32(lower_32_bits(x))
#define DMA_HI_LE(x) cpu_to_le32(upper_32_bits(x))
#define DMA_REGPAIR_LE(x, val) do { \
@@ -47,42 +22,50 @@
(x).lo = DMA_LO_LE((val)); \
} while (0)
-#define HILO_GEN(hi, lo, type) ((((type)(hi)) << 32) + (lo))
-#define HILO_64(hi, lo) HILO_GEN((le32_to_cpu(hi)), (le32_to_cpu(lo)), u64)
-#define HILO_64_REGPAIR(regpair) (HILO_64(regpair.hi, regpair.lo))
+#define HILO_GEN(hi, lo, type) ((((type)(hi)) << 32) + (lo))
+#define HILO_64(hi, lo) \
+ HILO_GEN(le32_to_cpu(hi), le32_to_cpu(lo), u64)
+#define HILO_64_REGPAIR(regpair) ({ \
+ typeof(regpair) __regpair = (regpair); \
+ HILO_64(__regpair.hi, __regpair.lo); })
#define HILO_DMA_REGPAIR(regpair) ((dma_addr_t)HILO_64_REGPAIR(regpair))
#ifndef __COMMON_HSI__
#define __COMMON_HSI__
+/********************************/
+/* PROTOCOL COMMON FW CONSTANTS */
+/********************************/
-#define X_FINAL_CLEANUP_AGG_INT 1
-
-#define EVENT_RING_PAGE_SIZE_BYTES 4096
+#define X_FINAL_CLEANUP_AGG_INT 1
-#define NUM_OF_GLOBAL_QUEUES 128
-#define COMMON_QUEUE_ENTRY_MAX_BYTE_SIZE 64
+#define EVENT_RING_PAGE_SIZE_BYTES 4096
-#define ISCSI_CDU_TASK_SEG_TYPE 0
-#define FCOE_CDU_TASK_SEG_TYPE 0
-#define RDMA_CDU_TASK_SEG_TYPE 1
+#define NUM_OF_GLOBAL_QUEUES 128
+#define COMMON_QUEUE_ENTRY_MAX_BYTE_SIZE 64
-#define FW_ASSERT_GENERAL_ATTN_IDX 32
+#define ISCSI_CDU_TASK_SEG_TYPE 0
+#define FCOE_CDU_TASK_SEG_TYPE 0
+#define RDMA_CDU_TASK_SEG_TYPE 1
+#define ETH_CDU_TASK_SEG_TYPE 2
-#define MAX_PINNED_CCFC 32
+#define FW_ASSERT_GENERAL_ATTN_IDX 32
/* Queue Zone sizes in bytes */
-#define TSTORM_QZONE_SIZE 8
-#define MSTORM_QZONE_SIZE 16
-#define USTORM_QZONE_SIZE 8
-#define XSTORM_QZONE_SIZE 8
-#define YSTORM_QZONE_SIZE 0
-#define PSTORM_QZONE_SIZE 0
-
-#define MSTORM_VF_ZONE_DEFAULT_SIZE_LOG 7
-#define ETH_MAX_NUM_RX_QUEUES_PER_VF_DEFAULT 16
-#define ETH_MAX_NUM_RX_QUEUES_PER_VF_DOUBLE 48
-#define ETH_MAX_NUM_RX_QUEUES_PER_VF_QUAD 112
+#define TSTORM_QZONE_SIZE 8
+#define MSTORM_QZONE_SIZE 16
+#define USTORM_QZONE_SIZE 8
+#define XSTORM_QZONE_SIZE 8
+#define YSTORM_QZONE_SIZE 0
+#define PSTORM_QZONE_SIZE 0
+
+#define MSTORM_VF_ZONE_DEFAULT_SIZE_LOG 7
+#define ETH_MAX_RXQ_VF_DEFAULT 16
+#define ETH_MAX_RXQ_VF_DOUBLE 48
+#define ETH_MAX_RXQ_VF_QUAD 112
+
+#define ETH_RGSRC_CTX_SIZE 6
+#define ETH_TGSRC_CTX_SIZE 6
/********************************/
/* CORE (LIGHT L2) FW CONSTANTS */
@@ -98,12 +81,19 @@
#define CORE_SPQE_PAGE_SIZE_BYTES 4096
-#define MAX_NUM_LL2_RX_QUEUES 48
-#define MAX_NUM_LL2_TX_STATS_COUNTERS 48
+/* Number of LL2 RAM based queues */
+#define MAX_NUM_LL2_RX_RAM_QUEUES 32
+
+/* Number of LL2 context based queues */
+#define MAX_NUM_LL2_RX_CTX_QUEUES 208
+#define MAX_NUM_LL2_RX_QUEUES \
+ (MAX_NUM_LL2_RX_RAM_QUEUES + MAX_NUM_LL2_RX_CTX_QUEUES)
+
+#define MAX_NUM_LL2_TX_STATS_COUNTERS 48
#define FW_MAJOR_VERSION 8
-#define FW_MINOR_VERSION 20
-#define FW_REVISION_VERSION 0
+#define FW_MINOR_VERSION 59
+#define FW_REVISION_VERSION 1
#define FW_ENGINEERING_VERSION 0
/***********************/
@@ -115,20 +105,21 @@
#define MAX_NUM_PORTS_BB (2)
#define MAX_NUM_PORTS (MAX_NUM_PORTS_K2)
-#define MAX_NUM_PFS_K2 (16)
-#define MAX_NUM_PFS_BB (8)
-#define MAX_NUM_PFS (MAX_NUM_PFS_K2)
-#define MAX_NUM_OF_PFS_IN_CHIP (16) /* On both engines */
+#define MAX_NUM_PFS_K2 (16)
+#define MAX_NUM_PFS_BB (8)
+#define MAX_NUM_PFS (MAX_NUM_PFS_K2)
+#define MAX_NUM_OF_PFS_IN_CHIP (16) /* On both engines */
#define MAX_NUM_VFS_K2 (192)
#define MAX_NUM_VFS_BB (120)
#define MAX_NUM_VFS (MAX_NUM_VFS_K2)
#define MAX_NUM_FUNCTIONS_BB (MAX_NUM_PFS_BB + MAX_NUM_VFS_BB)
-#define MAX_NUM_FUNCTIONS (MAX_NUM_PFS + MAX_NUM_VFS)
+#define MAX_NUM_FUNCTIONS_K2 (MAX_NUM_PFS_K2 + MAX_NUM_VFS_K2)
#define MAX_FUNCTION_NUMBER_BB (MAX_NUM_PFS + MAX_NUM_VFS_BB)
-#define MAX_FUNCTION_NUMBER (MAX_NUM_PFS + MAX_NUM_VFS)
+#define MAX_FUNCTION_NUMBER_K2 (MAX_NUM_PFS + MAX_NUM_VFS_K2)
+#define MAX_NUM_FUNCTIONS (MAX_FUNCTION_NUMBER_K2)
#define MAX_NUM_VPORTS_K2 (208)
#define MAX_NUM_VPORTS_BB (160)
@@ -141,29 +132,14 @@
/* Traffic classes in network-facing blocks (PBF, BTB, NIG, BRB, PRS and QM) */
#define NUM_PHYS_TCS_4PORT_K2 (4)
#define NUM_OF_PHYS_TCS (8)
-
+#define PURE_LB_TC NUM_OF_PHYS_TCS
#define NUM_TCS_4PORT_K2 (NUM_PHYS_TCS_4PORT_K2 + 1)
#define NUM_OF_TCS (NUM_OF_PHYS_TCS + 1)
-#define LB_TC (NUM_OF_PHYS_TCS)
-
-/* Num of possible traffic priority values */
-#define NUM_OF_PRIO (8)
-
-#define MAX_NUM_VOQS_K2 (NUM_TCS_4PORT_K2 * MAX_NUM_PORTS_K2)
-#define MAX_NUM_VOQS_BB (NUM_OF_TCS * MAX_NUM_PORTS_BB)
-#define MAX_NUM_VOQS (MAX_NUM_VOQS_K2)
-#define MAX_PHYS_VOQS (NUM_OF_PHYS_TCS * MAX_NUM_PORTS_BB)
-
/* CIDs */
#define NUM_OF_CONNECTION_TYPES (8)
-#define NUM_OF_LCIDS (320)
-#define NUM_OF_LTIDS (320)
-
-/* Clock values */
-#define MASTER_CLK_FREQ_E4 (375e6)
-#define STORM_CLK_FREQ_E4 (1000e6)
-#define CLK25M_CLK_FREQ_E4 (25e6)
+#define NUM_OF_LCIDS (320)
+#define NUM_OF_LTIDS (320)
/* Global PXP windows (GTT) */
#define NUM_OF_GTT 19
@@ -172,17 +148,17 @@
#define GTT_DWORD_SIZE BIT(GTT_DWORD_SIZE_BITS)
/* Tools Version */
-#define TOOLS_VERSION 10
+#define TOOLS_VERSION 11
/*****************/
/* CDU CONSTANTS */
/*****************/
-#define CDU_SEG_TYPE_OFFSET_REG_TYPE_SHIFT (17)
-#define CDU_SEG_TYPE_OFFSET_REG_OFFSET_MASK (0x1ffff)
+#define CDU_SEG_TYPE_OFFSET_REG_TYPE_SHIFT (17)
+#define CDU_SEG_TYPE_OFFSET_REG_OFFSET_MASK (0x1ffff)
-#define CDU_VF_FL_SEG_TYPE_OFFSET_REG_TYPE_SHIFT (12)
-#define CDU_VF_FL_SEG_TYPE_OFFSET_REG_OFFSET_MASK (0xfff)
+#define CDU_VF_FL_SEG_TYPE_OFFSET_REG_TYPE_SHIFT (12)
+#define CDU_VF_FL_SEG_TYPE_OFFSET_REG_OFFSET_MASK (0xfff)
#define CDU_CONTEXT_VALIDATION_CFG_ENABLE_SHIFT (0)
#define CDU_CONTEXT_VALIDATION_CFG_VALIDATION_TYPE_SHIFT (1)
@@ -190,6 +166,7 @@
#define CDU_CONTEXT_VALIDATION_CFG_USE_REGION (3)
#define CDU_CONTEXT_VALIDATION_CFG_USE_CID (4)
#define CDU_CONTEXT_VALIDATION_CFG_USE_ACTIVE (5)
+#define CDU_CONTEXT_VALIDATION_DEFAULT_CFG (0x3d)
/*****************/
/* DQ CONSTANTS */
@@ -201,45 +178,46 @@
#define DQ_DEMS_TOE_LOCAL_ADV_WND 4
#define DQ_DEMS_ROCE_CQ_CONS 7
-/* XCM agg val selection */
-#define DQ_XCM_AGG_VAL_SEL_WORD2 0
-#define DQ_XCM_AGG_VAL_SEL_WORD3 1
-#define DQ_XCM_AGG_VAL_SEL_WORD4 2
-#define DQ_XCM_AGG_VAL_SEL_WORD5 3
-#define DQ_XCM_AGG_VAL_SEL_REG3 4
-#define DQ_XCM_AGG_VAL_SEL_REG4 5
-#define DQ_XCM_AGG_VAL_SEL_REG5 6
-#define DQ_XCM_AGG_VAL_SEL_REG6 7
-
-/* XCM agg val selection */
-#define DQ_XCM_CORE_TX_BD_CONS_CMD DQ_XCM_AGG_VAL_SEL_WORD3
-#define DQ_XCM_CORE_TX_BD_PROD_CMD DQ_XCM_AGG_VAL_SEL_WORD4
-#define DQ_XCM_CORE_SPQ_PROD_CMD DQ_XCM_AGG_VAL_SEL_WORD4
-#define DQ_XCM_ETH_EDPM_NUM_BDS_CMD DQ_XCM_AGG_VAL_SEL_WORD2
-#define DQ_XCM_ETH_TX_BD_CONS_CMD DQ_XCM_AGG_VAL_SEL_WORD3
-#define DQ_XCM_ETH_TX_BD_PROD_CMD DQ_XCM_AGG_VAL_SEL_WORD4
-#define DQ_XCM_ETH_GO_TO_BD_CONS_CMD DQ_XCM_AGG_VAL_SEL_WORD5
-#define DQ_XCM_FCOE_SQ_CONS_CMD DQ_XCM_AGG_VAL_SEL_WORD3
-#define DQ_XCM_FCOE_SQ_PROD_CMD DQ_XCM_AGG_VAL_SEL_WORD4
-#define DQ_XCM_FCOE_X_FERQ_PROD_CMD DQ_XCM_AGG_VAL_SEL_WORD5
-#define DQ_XCM_ISCSI_SQ_CONS_CMD DQ_XCM_AGG_VAL_SEL_WORD3
-#define DQ_XCM_ISCSI_SQ_PROD_CMD DQ_XCM_AGG_VAL_SEL_WORD4
-#define DQ_XCM_ISCSI_MORE_TO_SEND_SEQ_CMD DQ_XCM_AGG_VAL_SEL_REG3
-#define DQ_XCM_ISCSI_EXP_STAT_SN_CMD DQ_XCM_AGG_VAL_SEL_REG6
-#define DQ_XCM_ROCE_SQ_PROD_CMD DQ_XCM_AGG_VAL_SEL_WORD4
-#define DQ_XCM_TOE_TX_BD_PROD_CMD DQ_XCM_AGG_VAL_SEL_WORD4
-#define DQ_XCM_TOE_MORE_TO_SEND_SEQ_CMD DQ_XCM_AGG_VAL_SEL_REG3
-#define DQ_XCM_TOE_LOCAL_ADV_WND_SEQ_CMD DQ_XCM_AGG_VAL_SEL_REG4
+/* XCM agg val selection (HW) */
+#define DQ_XCM_AGG_VAL_SEL_WORD2 0
+#define DQ_XCM_AGG_VAL_SEL_WORD3 1
+#define DQ_XCM_AGG_VAL_SEL_WORD4 2
+#define DQ_XCM_AGG_VAL_SEL_WORD5 3
+#define DQ_XCM_AGG_VAL_SEL_REG3 4
+#define DQ_XCM_AGG_VAL_SEL_REG4 5
+#define DQ_XCM_AGG_VAL_SEL_REG5 6
+#define DQ_XCM_AGG_VAL_SEL_REG6 7
+
+/* XCM agg val selection (FW) */
+#define DQ_XCM_CORE_TX_BD_CONS_CMD DQ_XCM_AGG_VAL_SEL_WORD3
+#define DQ_XCM_CORE_TX_BD_PROD_CMD DQ_XCM_AGG_VAL_SEL_WORD4
+#define DQ_XCM_CORE_SPQ_PROD_CMD DQ_XCM_AGG_VAL_SEL_WORD4
+#define DQ_XCM_ETH_EDPM_NUM_BDS_CMD DQ_XCM_AGG_VAL_SEL_WORD2
+#define DQ_XCM_ETH_TX_BD_CONS_CMD DQ_XCM_AGG_VAL_SEL_WORD3
+#define DQ_XCM_ETH_TX_BD_PROD_CMD DQ_XCM_AGG_VAL_SEL_WORD4
+#define DQ_XCM_ETH_GO_TO_BD_CONS_CMD DQ_XCM_AGG_VAL_SEL_WORD5
+#define DQ_XCM_FCOE_SQ_CONS_CMD DQ_XCM_AGG_VAL_SEL_WORD3
+#define DQ_XCM_FCOE_SQ_PROD_CMD DQ_XCM_AGG_VAL_SEL_WORD4
+#define DQ_XCM_FCOE_X_FERQ_PROD_CMD DQ_XCM_AGG_VAL_SEL_WORD5
+#define DQ_XCM_ISCSI_SQ_CONS_CMD DQ_XCM_AGG_VAL_SEL_WORD3
+#define DQ_XCM_ISCSI_SQ_PROD_CMD DQ_XCM_AGG_VAL_SEL_WORD4
+#define DQ_XCM_ISCSI_MORE_TO_SEND_SEQ_CMD DQ_XCM_AGG_VAL_SEL_REG3
+#define DQ_XCM_ISCSI_EXP_STAT_SN_CMD DQ_XCM_AGG_VAL_SEL_REG6
+#define DQ_XCM_ROCE_SQ_PROD_CMD DQ_XCM_AGG_VAL_SEL_WORD4
+#define DQ_XCM_TOE_TX_BD_PROD_CMD DQ_XCM_AGG_VAL_SEL_WORD4
+#define DQ_XCM_TOE_MORE_TO_SEND_SEQ_CMD DQ_XCM_AGG_VAL_SEL_REG3
+#define DQ_XCM_TOE_LOCAL_ADV_WND_SEQ_CMD DQ_XCM_AGG_VAL_SEL_REG4
+#define DQ_XCM_ROCE_ACK_EDPM_DORQ_SEQ_CMD DQ_XCM_AGG_VAL_SEL_WORD5
/* UCM agg val selection (HW) */
#define DQ_UCM_AGG_VAL_SEL_WORD0 0
#define DQ_UCM_AGG_VAL_SEL_WORD1 1
#define DQ_UCM_AGG_VAL_SEL_WORD2 2
#define DQ_UCM_AGG_VAL_SEL_WORD3 3
-#define DQ_UCM_AGG_VAL_SEL_REG0 4
-#define DQ_UCM_AGG_VAL_SEL_REG1 5
-#define DQ_UCM_AGG_VAL_SEL_REG2 6
-#define DQ_UCM_AGG_VAL_SEL_REG3 7
+#define DQ_UCM_AGG_VAL_SEL_REG0 4
+#define DQ_UCM_AGG_VAL_SEL_REG1 5
+#define DQ_UCM_AGG_VAL_SEL_REG2 6
+#define DQ_UCM_AGG_VAL_SEL_REG3 7
/* UCM agg val selection (FW) */
#define DQ_UCM_ETH_PMD_TX_CONS_CMD DQ_UCM_AGG_VAL_SEL_WORD2
@@ -263,7 +241,7 @@
#define DQ_TCM_ROCE_RQ_PROD_CMD \
DQ_TCM_AGG_VAL_SEL_WORD0
-/* XCM agg counter flag selection */
+/* XCM agg counter flag selection (HW) */
#define DQ_XCM_AGG_FLG_SHIFT_BIT14 0
#define DQ_XCM_AGG_FLG_SHIFT_BIT15 1
#define DQ_XCM_AGG_FLG_SHIFT_CF12 2
@@ -273,20 +251,20 @@
#define DQ_XCM_AGG_FLG_SHIFT_CF22 6
#define DQ_XCM_AGG_FLG_SHIFT_CF23 7
-/* XCM agg counter flag selection */
-#define DQ_XCM_CORE_DQ_CF_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF18)
-#define DQ_XCM_CORE_TERMINATE_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF19)
-#define DQ_XCM_CORE_SLOW_PATH_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF22)
-#define DQ_XCM_ETH_DQ_CF_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF18)
-#define DQ_XCM_ETH_TERMINATE_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF19)
-#define DQ_XCM_ETH_SLOW_PATH_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF22)
-#define DQ_XCM_ETH_TPH_EN_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF23)
-#define DQ_XCM_FCOE_SLOW_PATH_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF22)
-#define DQ_XCM_ISCSI_DQ_FLUSH_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF19)
-#define DQ_XCM_ISCSI_SLOW_PATH_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF22)
-#define DQ_XCM_ISCSI_PROC_ONLY_CLEANUP_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF23)
-#define DQ_XCM_TOE_DQ_FLUSH_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF19)
-#define DQ_XCM_TOE_SLOW_PATH_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF22)
+/* XCM agg counter flag selection (FW) */
+#define DQ_XCM_CORE_DQ_CF_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF18)
+#define DQ_XCM_CORE_TERMINATE_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF19)
+#define DQ_XCM_CORE_SLOW_PATH_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF22)
+#define DQ_XCM_ETH_DQ_CF_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF18)
+#define DQ_XCM_ETH_TERMINATE_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF19)
+#define DQ_XCM_ETH_SLOW_PATH_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF22)
+#define DQ_XCM_ETH_TPH_EN_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF23)
+#define DQ_XCM_FCOE_SLOW_PATH_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF22)
+#define DQ_XCM_ISCSI_DQ_FLUSH_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF19)
+#define DQ_XCM_ISCSI_SLOW_PATH_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF22)
+#define DQ_XCM_ISCSI_PROC_ONLY_CLEANUP_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF23)
+#define DQ_XCM_TOE_DQ_FLUSH_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF19)
+#define DQ_XCM_TOE_SLOW_PATH_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF22)
/* UCM agg counter flag selection (HW) */
#define DQ_UCM_AGG_FLG_SHIFT_CF0 0
@@ -317,9 +295,9 @@
#define DQ_TCM_AGG_FLG_SHIFT_CF6 6
#define DQ_TCM_AGG_FLG_SHIFT_CF7 7
/* TCM agg counter flag selection (FW) */
-#define DQ_TCM_FCOE_FLUSH_Q0_CMD BIT(DQ_TCM_AGG_FLG_SHIFT_CF1)
-#define DQ_TCM_FCOE_DUMMY_TIMER_CMD BIT(DQ_TCM_AGG_FLG_SHIFT_CF2)
-#define DQ_TCM_FCOE_TIMER_STOP_ALL_CMD BIT(DQ_TCM_AGG_FLG_SHIFT_CF3)
+#define DQ_TCM_FCOE_FLUSH_Q0_CMD BIT(DQ_TCM_AGG_FLG_SHIFT_CF1)
+#define DQ_TCM_FCOE_DUMMY_TIMER_CMD BIT(DQ_TCM_AGG_FLG_SHIFT_CF2)
+#define DQ_TCM_FCOE_TIMER_STOP_ALL_CMD BIT(DQ_TCM_AGG_FLG_SHIFT_CF3)
#define DQ_TCM_ISCSI_FLUSH_Q0_CMD BIT(DQ_TCM_AGG_FLG_SHIFT_CF1)
#define DQ_TCM_ISCSI_TIMER_STOP_ALL_CMD BIT(DQ_TCM_AGG_FLG_SHIFT_CF3)
#define DQ_TCM_TOE_FLUSH_Q0_CMD BIT(DQ_TCM_AGG_FLG_SHIFT_CF1)
@@ -327,18 +305,21 @@
#define DQ_TCM_IWARP_POST_RQ_CF_CMD BIT(DQ_TCM_AGG_FLG_SHIFT_CF1)
/* PWM address mapping */
-#define DQ_PWM_OFFSET_DPM_BASE 0x0
-#define DQ_PWM_OFFSET_DPM_END 0x27
+#define DQ_PWM_OFFSET_DPM_BASE 0x0
+#define DQ_PWM_OFFSET_DPM_END 0x27
+#define DQ_PWM_OFFSET_XCM32_24ICID_BASE 0x28
+#define DQ_PWM_OFFSET_UCM32_24ICID_BASE 0x30
+#define DQ_PWM_OFFSET_TCM32_24ICID_BASE 0x38
#define DQ_PWM_OFFSET_XCM16_BASE 0x40
#define DQ_PWM_OFFSET_XCM32_BASE 0x44
#define DQ_PWM_OFFSET_UCM16_BASE 0x48
#define DQ_PWM_OFFSET_UCM32_BASE 0x4C
-#define DQ_PWM_OFFSET_UCM16_4 0x50
+#define DQ_PWM_OFFSET_UCM16_4 0x50
#define DQ_PWM_OFFSET_TCM16_BASE 0x58
#define DQ_PWM_OFFSET_TCM32_BASE 0x5C
-#define DQ_PWM_OFFSET_XCM_FLAGS 0x68
-#define DQ_PWM_OFFSET_UCM_FLAGS 0x69
-#define DQ_PWM_OFFSET_TCM_FLAGS 0x6B
+#define DQ_PWM_OFFSET_XCM_FLAGS 0x68
+#define DQ_PWM_OFFSET_UCM_FLAGS 0x69
+#define DQ_PWM_OFFSET_TCM_FLAGS 0x6B
#define DQ_PWM_OFFSET_XCM_RDMA_SQ_PROD (DQ_PWM_OFFSET_XCM16_BASE + 2)
#define DQ_PWM_OFFSET_UCM_RDMA_CQ_CONS_32BIT (DQ_PWM_OFFSET_UCM32_BASE)
@@ -347,10 +328,22 @@
#define DQ_PWM_OFFSET_UCM_RDMA_ARM_FLAGS (DQ_PWM_OFFSET_UCM_FLAGS)
#define DQ_PWM_OFFSET_TCM_ROCE_RQ_PROD (DQ_PWM_OFFSET_TCM16_BASE + 1)
#define DQ_PWM_OFFSET_TCM_IWARP_RQ_PROD (DQ_PWM_OFFSET_TCM16_BASE + 3)
-#define DQ_REGION_SHIFT (12)
+
+/* DQ_DEMS_AGG_VAL_BASE */
+#define DQ_PWM_OFFSET_TCM_LL2_PROD_UPDATE \
+ (DQ_PWM_OFFSET_TCM32_BASE + DQ_TCM_AGG_VAL_SEL_REG9 - 4)
+
+#define DQ_PWM_OFFSET_XCM_RDMA_24B_ICID_SQ_PROD \
+ (DQ_PWM_OFFSET_XCM32_24ICID_BASE + 2)
+#define DQ_PWM_OFFSET_UCM_RDMA_24B_ICID_CQ_CONS_32BIT \
+ (DQ_PWM_OFFSET_UCM32_24ICID_BASE + 4)
+#define DQ_PWM_OFFSET_TCM_ROCE_24B_ICID_RQ_PROD \
+ (DQ_PWM_OFFSET_TCM32_24ICID_BASE + 1)
+
+#define DQ_REGION_SHIFT (12)
/* DPM */
-#define DQ_DPM_WQE_BUFF_SIZE (320)
+#define DQ_DPM_WQE_BUFF_SIZE (320)
/* Conn type ranges */
#define DQ_CONN_TYPE_RANGE_SHIFT (4)
@@ -359,29 +352,31 @@
/* QM CONSTANTS */
/*****************/
-/* number of TX queues in the QM */
+/* Number of TX queues in the QM */
#define MAX_QM_TX_QUEUES_K2 512
#define MAX_QM_TX_QUEUES_BB 448
#define MAX_QM_TX_QUEUES MAX_QM_TX_QUEUES_K2
-/* number of Other queues in the QM */
+/* Number of Other queues in the QM */
#define MAX_QM_OTHER_QUEUES_BB 64
#define MAX_QM_OTHER_QUEUES_K2 128
#define MAX_QM_OTHER_QUEUES MAX_QM_OTHER_QUEUES_K2
-/* number of queues in a PF queue group */
+/* Number of queues in a PF queue group */
#define QM_PF_QUEUE_GROUP_SIZE 8
-/* the size of a single queue element in bytes */
-#define QM_PQ_ELEMENT_SIZE 4
+/* The size of a single queue element in bytes */
+#define QM_PQ_ELEMENT_SIZE 4
-/* base number of Tx PQs in the CM PQ representation.
- * should be used when storing PQ IDs in CM PQ registers and context
+/* Base number of Tx PQs in the CM PQ representation.
+ * Should be used when storing PQ IDs in CM PQ registers and context.
*/
-#define CM_TX_PQ_BASE 0x200
+#define CM_TX_PQ_BASE 0x200
-/* number of global Vport/QCN rate limiters */
+/* Number of global Vport/QCN rate limiters */
#define MAX_QM_GLOBAL_RLS 256
+#define COMMON_MAX_QM_GLOBAL_RLS MAX_QM_GLOBAL_RLS
+
/* QM registers data */
#define QM_LINE_CRD_REG_WIDTH 16
#define QM_LINE_CRD_REG_SIGN_BIT BIT((QM_LINE_CRD_REG_WIDTH - 1))
@@ -400,7 +395,8 @@
#define CAU_FSM_ETH_TX 1
/* Number of Protocol Indices per Status Block */
-#define PIS_PER_SB 12
+#define PIS_PER_SB 12
+#define MAX_PIS_PER_SB PIS_PER_SB
#define CAU_HC_STOPPED_STATE 3
#define CAU_HC_DISABLE_STATE 4
@@ -431,9 +427,6 @@
#define IGU_MEM_PBA_MSIX_RESERVED_UPPER 0x03ff
#define IGU_CMD_INT_ACK_BASE 0x0400
-#define IGU_CMD_INT_ACK_UPPER (IGU_CMD_INT_ACK_BASE + \
- MAX_TOT_SB_PER_PATH - \
- 1)
#define IGU_CMD_INT_ACK_RESERVED_UPPER 0x05ff
#define IGU_CMD_ATTN_BIT_UPD_UPPER 0x05f0
@@ -446,9 +439,6 @@
#define IGU_REG_SISR_MDPC_WOMASK_UPPER 0x05f6
#define IGU_CMD_PROD_UPD_BASE 0x0600
-#define IGU_CMD_PROD_UPD_UPPER (IGU_CMD_PROD_UPD_BASE +\
- MAX_TOT_SB_PER_PATH - \
- 1)
#define IGU_CMD_PROD_UPD_RESERVED_UPPER 0x07ff
/*****************/
@@ -514,129 +504,126 @@
PXP_EXTERNAL_BAR_GLOBAL_WINDOW_LENGTH - 1)
/* PF BAR */
-#define PXP_BAR0_START_GRC 0x0000
-#define PXP_BAR0_GRC_LENGTH 0x1C00000
-#define PXP_BAR0_END_GRC (PXP_BAR0_START_GRC + \
- PXP_BAR0_GRC_LENGTH - 1)
-
-#define PXP_BAR0_START_IGU 0x1C00000
-#define PXP_BAR0_IGU_LENGTH 0x10000
-#define PXP_BAR0_END_IGU (PXP_BAR0_START_IGU + \
- PXP_BAR0_IGU_LENGTH - 1)
-
-#define PXP_BAR0_START_TSDM 0x1C80000
-#define PXP_BAR0_SDM_LENGTH 0x40000
+#define PXP_BAR0_START_GRC 0x0000
+#define PXP_BAR0_GRC_LENGTH 0x1C00000
+#define PXP_BAR0_END_GRC (PXP_BAR0_START_GRC + \
+ PXP_BAR0_GRC_LENGTH - 1)
+
+#define PXP_BAR0_START_IGU 0x1C00000
+#define PXP_BAR0_IGU_LENGTH 0x10000
+#define PXP_BAR0_END_IGU (PXP_BAR0_START_IGU + \
+ PXP_BAR0_IGU_LENGTH - 1)
+
+#define PXP_BAR0_START_TSDM 0x1C80000
+#define PXP_BAR0_SDM_LENGTH 0x40000
#define PXP_BAR0_SDM_RESERVED_LENGTH 0x40000
-#define PXP_BAR0_END_TSDM (PXP_BAR0_START_TSDM + \
- PXP_BAR0_SDM_LENGTH - 1)
+#define PXP_BAR0_END_TSDM (PXP_BAR0_START_TSDM + \
+ PXP_BAR0_SDM_LENGTH - 1)
-#define PXP_BAR0_START_MSDM 0x1D00000
-#define PXP_BAR0_END_MSDM (PXP_BAR0_START_MSDM + \
- PXP_BAR0_SDM_LENGTH - 1)
+#define PXP_BAR0_START_MSDM 0x1D00000
+#define PXP_BAR0_END_MSDM (PXP_BAR0_START_MSDM + \
+ PXP_BAR0_SDM_LENGTH - 1)
-#define PXP_BAR0_START_USDM 0x1D80000
-#define PXP_BAR0_END_USDM (PXP_BAR0_START_USDM + \
- PXP_BAR0_SDM_LENGTH - 1)
+#define PXP_BAR0_START_USDM 0x1D80000
+#define PXP_BAR0_END_USDM (PXP_BAR0_START_USDM + \
+ PXP_BAR0_SDM_LENGTH - 1)
-#define PXP_BAR0_START_XSDM 0x1E00000
-#define PXP_BAR0_END_XSDM (PXP_BAR0_START_XSDM + \
- PXP_BAR0_SDM_LENGTH - 1)
+#define PXP_BAR0_START_XSDM 0x1E00000
+#define PXP_BAR0_END_XSDM (PXP_BAR0_START_XSDM + \
+ PXP_BAR0_SDM_LENGTH - 1)
-#define PXP_BAR0_START_YSDM 0x1E80000
-#define PXP_BAR0_END_YSDM (PXP_BAR0_START_YSDM + \
- PXP_BAR0_SDM_LENGTH - 1)
+#define PXP_BAR0_START_YSDM 0x1E80000
+#define PXP_BAR0_END_YSDM (PXP_BAR0_START_YSDM + \
+ PXP_BAR0_SDM_LENGTH - 1)
-#define PXP_BAR0_START_PSDM 0x1F00000
-#define PXP_BAR0_END_PSDM (PXP_BAR0_START_PSDM + \
- PXP_BAR0_SDM_LENGTH - 1)
+#define PXP_BAR0_START_PSDM 0x1F00000
+#define PXP_BAR0_END_PSDM (PXP_BAR0_START_PSDM + \
+ PXP_BAR0_SDM_LENGTH - 1)
#define PXP_BAR0_FIRST_INVALID_ADDRESS (PXP_BAR0_END_PSDM + 1)
/* VF BAR */
-#define PXP_VF_BAR0 0
-
-#define PXP_VF_BAR0_START_GRC 0x3E00
-#define PXP_VF_BAR0_GRC_LENGTH 0x200
-#define PXP_VF_BAR0_END_GRC (PXP_VF_BAR0_START_GRC + \
- PXP_VF_BAR0_GRC_LENGTH - 1)
-
-#define PXP_VF_BAR0_START_IGU 0
-#define PXP_VF_BAR0_IGU_LENGTH 0x3000
-#define PXP_VF_BAR0_END_IGU (PXP_VF_BAR0_START_IGU + \
- PXP_VF_BAR0_IGU_LENGTH - 1)
-
-#define PXP_VF_BAR0_START_DQ 0x3000
-#define PXP_VF_BAR0_DQ_LENGTH 0x200
-#define PXP_VF_BAR0_DQ_OPAQUE_OFFSET 0
-#define PXP_VF_BAR0_ME_OPAQUE_ADDRESS (PXP_VF_BAR0_START_DQ + \
- PXP_VF_BAR0_DQ_OPAQUE_OFFSET)
-#define PXP_VF_BAR0_ME_CONCRETE_ADDRESS (PXP_VF_BAR0_ME_OPAQUE_ADDRESS \
- + 4)
-#define PXP_VF_BAR0_END_DQ (PXP_VF_BAR0_START_DQ + \
- PXP_VF_BAR0_DQ_LENGTH - 1)
-
-#define PXP_VF_BAR0_START_TSDM_ZONE_B 0x3200
-#define PXP_VF_BAR0_SDM_LENGTH_ZONE_B 0x200
-#define PXP_VF_BAR0_END_TSDM_ZONE_B (PXP_VF_BAR0_START_TSDM_ZONE_B \
- + \
- PXP_VF_BAR0_SDM_LENGTH_ZONE_B \
- - 1)
-
-#define PXP_VF_BAR0_START_MSDM_ZONE_B 0x3400
-#define PXP_VF_BAR0_END_MSDM_ZONE_B (PXP_VF_BAR0_START_MSDM_ZONE_B \
- + \
- PXP_VF_BAR0_SDM_LENGTH_ZONE_B \
- - 1)
-
-#define PXP_VF_BAR0_START_USDM_ZONE_B 0x3600
-#define PXP_VF_BAR0_END_USDM_ZONE_B (PXP_VF_BAR0_START_USDM_ZONE_B \
- + \
- PXP_VF_BAR0_SDM_LENGTH_ZONE_B \
- - 1)
-
-#define PXP_VF_BAR0_START_XSDM_ZONE_B 0x3800
-#define PXP_VF_BAR0_END_XSDM_ZONE_B (PXP_VF_BAR0_START_XSDM_ZONE_B \
- + \
- PXP_VF_BAR0_SDM_LENGTH_ZONE_B \
- - 1)
-
-#define PXP_VF_BAR0_START_YSDM_ZONE_B 0x3a00
-#define PXP_VF_BAR0_END_YSDM_ZONE_B (PXP_VF_BAR0_START_YSDM_ZONE_B \
- + \
- PXP_VF_BAR0_SDM_LENGTH_ZONE_B \
- - 1)
-
-#define PXP_VF_BAR0_START_PSDM_ZONE_B 0x3c00
-#define PXP_VF_BAR0_END_PSDM_ZONE_B (PXP_VF_BAR0_START_PSDM_ZONE_B \
- + \
- PXP_VF_BAR0_SDM_LENGTH_ZONE_B \
- - 1)
-
-#define PXP_VF_BAR0_START_SDM_ZONE_A 0x4000
-#define PXP_VF_BAR0_END_SDM_ZONE_A 0x10000
-
-#define PXP_VF_BAR0_GRC_WINDOW_LENGTH 32
-
-#define PXP_ILT_PAGE_SIZE_NUM_BITS_MIN 12
-#define PXP_ILT_BLOCK_FACTOR_MULTIPLIER 1024
+#define PXP_VF_BAR0 0
+
+#define PXP_VF_BAR0_START_IGU 0
+#define PXP_VF_BAR0_IGU_LENGTH 0x3000
+#define PXP_VF_BAR0_END_IGU (PXP_VF_BAR0_START_IGU + \
+ PXP_VF_BAR0_IGU_LENGTH - 1)
+
+#define PXP_VF_BAR0_START_DQ 0x3000
+#define PXP_VF_BAR0_DQ_LENGTH 0x200
+#define PXP_VF_BAR0_DQ_OPAQUE_OFFSET 0
+#define PXP_VF_BAR0_ME_OPAQUE_ADDRESS (PXP_VF_BAR0_START_DQ + \
+ PXP_VF_BAR0_DQ_OPAQUE_OFFSET)
+#define PXP_VF_BAR0_ME_CONCRETE_ADDRESS (PXP_VF_BAR0_ME_OPAQUE_ADDRESS \
+ + 4)
+#define PXP_VF_BAR0_END_DQ (PXP_VF_BAR0_START_DQ + \
+ PXP_VF_BAR0_DQ_LENGTH - 1)
+
+#define PXP_VF_BAR0_START_TSDM_ZONE_B 0x3200
+#define PXP_VF_BAR0_SDM_LENGTH_ZONE_B 0x200
+#define PXP_VF_BAR0_END_TSDM_ZONE_B (PXP_VF_BAR0_START_TSDM_ZONE_B + \
+ PXP_VF_BAR0_SDM_LENGTH_ZONE_B - 1)
+
+#define PXP_VF_BAR0_START_MSDM_ZONE_B 0x3400
+#define PXP_VF_BAR0_END_MSDM_ZONE_B (PXP_VF_BAR0_START_MSDM_ZONE_B + \
+ PXP_VF_BAR0_SDM_LENGTH_ZONE_B - 1)
+
+#define PXP_VF_BAR0_START_USDM_ZONE_B 0x3600
+#define PXP_VF_BAR0_END_USDM_ZONE_B (PXP_VF_BAR0_START_USDM_ZONE_B + \
+ PXP_VF_BAR0_SDM_LENGTH_ZONE_B - 1)
+
+#define PXP_VF_BAR0_START_XSDM_ZONE_B 0x3800
+#define PXP_VF_BAR0_END_XSDM_ZONE_B (PXP_VF_BAR0_START_XSDM_ZONE_B + \
+ PXP_VF_BAR0_SDM_LENGTH_ZONE_B - 1)
+
+#define PXP_VF_BAR0_START_YSDM_ZONE_B 0x3a00
+#define PXP_VF_BAR0_END_YSDM_ZONE_B (PXP_VF_BAR0_START_YSDM_ZONE_B + \
+ PXP_VF_BAR0_SDM_LENGTH_ZONE_B - 1)
+
+#define PXP_VF_BAR0_START_PSDM_ZONE_B 0x3c00
+#define PXP_VF_BAR0_END_PSDM_ZONE_B (PXP_VF_BAR0_START_PSDM_ZONE_B + \
+ PXP_VF_BAR0_SDM_LENGTH_ZONE_B - 1)
+
+#define PXP_VF_BAR0_START_GRC 0x3E00
+#define PXP_VF_BAR0_GRC_LENGTH 0x200
+#define PXP_VF_BAR0_END_GRC (PXP_VF_BAR0_START_GRC + \
+ PXP_VF_BAR0_GRC_LENGTH - 1)
+
+#define PXP_VF_BAR0_START_SDM_ZONE_A 0x4000
+#define PXP_VF_BAR0_END_SDM_ZONE_A 0x10000
+
+#define PXP_VF_BAR0_START_IGU2 0x10000
+#define PXP_VF_BAR0_IGU2_LENGTH 0xD000
+#define PXP_VF_BAR0_END_IGU2 (PXP_VF_BAR0_START_IGU2 + \
+ PXP_VF_BAR0_IGU2_LENGTH - 1)
+
+#define PXP_VF_BAR0_GRC_WINDOW_LENGTH 32
+
+#define PXP_ILT_PAGE_SIZE_NUM_BITS_MIN 12
+#define PXP_ILT_BLOCK_FACTOR_MULTIPLIER 1024
/* ILT Records */
#define PXP_NUM_ILT_RECORDS_BB 7600
#define PXP_NUM_ILT_RECORDS_K2 11000
#define MAX_NUM_ILT_RECORDS MAX(PXP_NUM_ILT_RECORDS_BB, PXP_NUM_ILT_RECORDS_K2)
-#define PXP_QUEUES_ZONE_MAX_NUM 320
+
+/* Host Interface */
+#define PXP_QUEUES_ZONE_MAX_NUM 320
+
/*****************/
/* PRM CONSTANTS */
/*****************/
-#define PRM_DMA_PAD_BYTES_NUM 2
+#define PRM_DMA_PAD_BYTES_NUM 2
+
/*****************/
/* SDMs CONSTANTS */
/*****************/
-#define SDM_OP_GEN_TRIG_NONE 0
-#define SDM_OP_GEN_TRIG_WAKE_THREAD 1
-#define SDM_OP_GEN_TRIG_AGG_INT 2
-#define SDM_OP_GEN_TRIG_LOADER 4
+#define SDM_OP_GEN_TRIG_NONE 0
+#define SDM_OP_GEN_TRIG_WAKE_THREAD 1
+#define SDM_OP_GEN_TRIG_AGG_INT 2
+#define SDM_OP_GEN_TRIG_LOADER 4
#define SDM_OP_GEN_TRIG_INDICATE_ERROR 6
#define SDM_OP_GEN_TRIG_INC_ORDER_CNT 9
@@ -644,41 +631,34 @@
/* Completion types */
/********************/
-#define SDM_COMP_TYPE_NONE 0
-#define SDM_COMP_TYPE_WAKE_THREAD 1
-#define SDM_COMP_TYPE_AGG_INT 2
-#define SDM_COMP_TYPE_CM 3
-#define SDM_COMP_TYPE_LOADER 4
-#define SDM_COMP_TYPE_PXP 5
-#define SDM_COMP_TYPE_INDICATE_ERROR 6
-#define SDM_COMP_TYPE_RELEASE_THREAD 7
-#define SDM_COMP_TYPE_RAM 8
-#define SDM_COMP_TYPE_INC_ORDER_CNT 9
+#define SDM_COMP_TYPE_NONE 0
+#define SDM_COMP_TYPE_WAKE_THREAD 1
+#define SDM_COMP_TYPE_AGG_INT 2
+#define SDM_COMP_TYPE_CM 3
+#define SDM_COMP_TYPE_LOADER 4
+#define SDM_COMP_TYPE_PXP 5
+#define SDM_COMP_TYPE_INDICATE_ERROR 6
+#define SDM_COMP_TYPE_RELEASE_THREAD 7
+#define SDM_COMP_TYPE_RAM 8
+#define SDM_COMP_TYPE_INC_ORDER_CNT 9
/*****************/
-/* PBF Constants */
+/* PBF CONSTANTS */
/*****************/
/* Number of PBF command queue lines. Each line is 32B. */
-#define PBF_MAX_CMD_LINES 3328
+#define PBF_MAX_CMD_LINES 3328
/* Number of BTB blocks. Each block is 256B. */
-#define BTB_MAX_BLOCKS 1440
-
+#define BTB_MAX_BLOCKS_BB 1440
+#define BTB_MAX_BLOCKS_K2 1840
/*****************/
/* PRS CONSTANTS */
/*****************/
#define PRS_GFT_CAM_LINES_NO_MATCH 31
-/* Async data KCQ CQE */
-struct async_data {
- __le32 cid;
- __le16 itid;
- u8 error_code;
- u8 fw_debug_param;
-};
-
+/* Interrupt coalescing TimeSet */
struct coalescing_timeset {
u8 value;
#define COALESCING_TIMESET_TIMESET_MASK 0x7F
@@ -692,23 +672,32 @@ struct common_queue_zone {
__le16 reserved;
};
+/* ETH Rx producers data */
struct eth_rx_prod_data {
__le16 bd_prod;
__le16 cqe_prod;
};
-struct regpair {
- __le32 lo;
- __le32 hi;
+struct tcp_ulp_connect_done_params {
+ __le16 mss;
+ u8 snd_wnd_scale;
+ u8 flags;
+#define TCP_ULP_CONNECT_DONE_PARAMS_TS_EN_MASK 0x1
+#define TCP_ULP_CONNECT_DONE_PARAMS_TS_EN_SHIFT 0
+#define TCP_ULP_CONNECT_DONE_PARAMS_RESERVED_MASK 0x7F
+#define TCP_ULP_CONNECT_DONE_PARAMS_RESERVED_SHIFT 1
};
-struct vf_pf_channel_eqe_data {
- struct regpair msg_addr;
+struct iscsi_connect_done_results {
+ __le16 icid;
+ __le16 conn_id;
+ struct tcp_ulp_connect_done_params params;
};
struct iscsi_eqe_data {
- __le32 cid;
+ __le16 icid;
__le16 conn_id;
+ __le16 reserved;
u8 error_code;
u8 error_pdu_opcode_reserved;
#define ISCSI_EQE_DATA_ERROR_PDU_OPCODE_MASK 0x3F
@@ -719,52 +708,6 @@ struct iscsi_eqe_data {
#define ISCSI_EQE_DATA_RESERVED0_SHIFT 7
};
-struct rdma_eqe_destroy_qp {
- __le32 cid;
- u8 reserved[4];
-};
-
-union rdma_eqe_data {
- struct regpair async_handle;
- struct rdma_eqe_destroy_qp rdma_destroy_qp_data;
-};
-
-struct malicious_vf_eqe_data {
- u8 vf_id;
- u8 err_id;
- __le16 reserved[3];
-};
-
-struct initial_cleanup_eqe_data {
- u8 vf_id;
- u8 reserved[7];
-};
-
-/* Event Data Union */
-union event_ring_data {
- u8 bytes[8];
- struct vf_pf_channel_eqe_data vf_pf_channel;
- struct iscsi_eqe_data iscsi_info;
- union rdma_eqe_data rdma_data;
- struct malicious_vf_eqe_data malicious_vf;
- struct initial_cleanup_eqe_data vf_init_cleanup;
-};
-
-/* Event Ring Entry */
-struct event_ring_entry {
- u8 protocol_id;
- u8 opcode;
- __le16 reserved0;
- __le16 echo;
- u8 fw_return_code;
- u8 flags;
-#define EVENT_RING_ENTRY_ASYNC_MASK 0x1
-#define EVENT_RING_ENTRY_ASYNC_SHIFT 0
-#define EVENT_RING_ENTRY_RESERVED1_MASK 0x7F
-#define EVENT_RING_ENTRY_RESERVED1_SHIFT 1
- union event_ring_data data;
-};
-
/* Multi function mode */
enum mf_mode {
ERROR_MODE /* Unsupported mode */,
@@ -773,21 +716,72 @@ enum mf_mode {
MAX_MF_MODE
};
+/* Per protocol packet duplication enable bit vector. If set, duplicate
+ * offloaded traffic to LL2 debug queueu.
+ */
+struct offload_pkt_dup_enable {
+ __le16 enable_vector;
+};
+
/* Per-protocol connection types */
enum protocol_type {
- PROTOCOLID_ISCSI,
+ PROTOCOLID_TCP_ULP,
PROTOCOLID_FCOE,
PROTOCOLID_ROCE,
PROTOCOLID_CORE,
PROTOCOLID_ETH,
PROTOCOLID_IWARP,
- PROTOCOLID_RESERVED5,
+ PROTOCOLID_RESERVED0,
PROTOCOLID_PREROCE,
PROTOCOLID_COMMON,
- PROTOCOLID_RESERVED6,
+ PROTOCOLID_RESERVED1,
+ PROTOCOLID_RDMA,
+ PROTOCOLID_SCSI,
MAX_PROTOCOL_TYPE
};
+/* Pstorm packet duplication config */
+struct pstorm_pkt_dup_cfg {
+ struct offload_pkt_dup_enable enable;
+ __le16 reserved[3];
+};
+
+struct regpair {
+ __le32 lo;
+ __le32 hi;
+};
+
+/* RoCE Destroy Event Data */
+struct rdma_eqe_destroy_qp {
+ __le32 cid;
+ u8 reserved[4];
+};
+
+/* RoCE Suspend Event Data */
+struct rdma_eqe_suspend_qp {
+ __le32 cid;
+ u8 reserved[4];
+};
+
+/* RDMA Event Data Union */
+union rdma_eqe_data {
+ struct regpair async_handle;
+ struct rdma_eqe_destroy_qp rdma_destroy_qp_data;
+ struct rdma_eqe_suspend_qp rdma_suspend_qp_data;
+};
+
+/* Tstorm packet duplication config */
+struct tstorm_pkt_dup_cfg {
+ struct offload_pkt_dup_enable enable;
+ __le16 reserved;
+ __le32 cid;
+};
+
+struct tstorm_queue_zone {
+ __le32 reserved[2];
+};
+
+/* Ustorm Queue Zone */
struct ustorm_eth_queue_zone {
struct coalescing_timeset int_coalescing_timeset;
u8 reserved[3];
@@ -798,62 +792,71 @@ struct ustorm_queue_zone {
struct common_queue_zone common;
};
-/* status block structure */
+/* Status block structure */
struct cau_pi_entry {
- u32 prod;
-#define CAU_PI_ENTRY_PROD_VAL_MASK 0xFFFF
-#define CAU_PI_ENTRY_PROD_VAL_SHIFT 0
-#define CAU_PI_ENTRY_PI_TIMESET_MASK 0x7F
-#define CAU_PI_ENTRY_PI_TIMESET_SHIFT 16
-#define CAU_PI_ENTRY_FSM_SEL_MASK 0x1
-#define CAU_PI_ENTRY_FSM_SEL_SHIFT 23
-#define CAU_PI_ENTRY_RESERVED_MASK 0xFF
-#define CAU_PI_ENTRY_RESERVED_SHIFT 24
+ __le32 prod;
+#define CAU_PI_ENTRY_PROD_VAL_MASK 0xFFFF
+#define CAU_PI_ENTRY_PROD_VAL_SHIFT 0
+#define CAU_PI_ENTRY_PI_TIMESET_MASK 0x7F
+#define CAU_PI_ENTRY_PI_TIMESET_SHIFT 16
+#define CAU_PI_ENTRY_FSM_SEL_MASK 0x1
+#define CAU_PI_ENTRY_FSM_SEL_SHIFT 23
+#define CAU_PI_ENTRY_RESERVED_MASK 0xFF
+#define CAU_PI_ENTRY_RESERVED_SHIFT 24
};
-/* status block structure */
+/* Status block structure */
struct cau_sb_entry {
- u32 data;
-#define CAU_SB_ENTRY_SB_PROD_MASK 0xFFFFFF
-#define CAU_SB_ENTRY_SB_PROD_SHIFT 0
-#define CAU_SB_ENTRY_STATE0_MASK 0xF
-#define CAU_SB_ENTRY_STATE0_SHIFT 24
-#define CAU_SB_ENTRY_STATE1_MASK 0xF
-#define CAU_SB_ENTRY_STATE1_SHIFT 28
- u32 params;
-#define CAU_SB_ENTRY_SB_TIMESET0_MASK 0x7F
-#define CAU_SB_ENTRY_SB_TIMESET0_SHIFT 0
-#define CAU_SB_ENTRY_SB_TIMESET1_MASK 0x7F
-#define CAU_SB_ENTRY_SB_TIMESET1_SHIFT 7
-#define CAU_SB_ENTRY_TIMER_RES0_MASK 0x3
-#define CAU_SB_ENTRY_TIMER_RES0_SHIFT 14
-#define CAU_SB_ENTRY_TIMER_RES1_MASK 0x3
-#define CAU_SB_ENTRY_TIMER_RES1_SHIFT 16
-#define CAU_SB_ENTRY_VF_NUMBER_MASK 0xFF
-#define CAU_SB_ENTRY_VF_NUMBER_SHIFT 18
-#define CAU_SB_ENTRY_VF_VALID_MASK 0x1
-#define CAU_SB_ENTRY_VF_VALID_SHIFT 26
-#define CAU_SB_ENTRY_PF_NUMBER_MASK 0xF
-#define CAU_SB_ENTRY_PF_NUMBER_SHIFT 27
-#define CAU_SB_ENTRY_TPH_MASK 0x1
-#define CAU_SB_ENTRY_TPH_SHIFT 31
+ __le32 data;
+#define CAU_SB_ENTRY_SB_PROD_MASK 0xFFFFFF
+#define CAU_SB_ENTRY_SB_PROD_SHIFT 0
+#define CAU_SB_ENTRY_STATE0_MASK 0xF
+#define CAU_SB_ENTRY_STATE0_SHIFT 24
+#define CAU_SB_ENTRY_STATE1_MASK 0xF
+#define CAU_SB_ENTRY_STATE1_SHIFT 28
+ __le32 params;
+#define CAU_SB_ENTRY_SB_TIMESET0_MASK 0x7F
+#define CAU_SB_ENTRY_SB_TIMESET0_SHIFT 0
+#define CAU_SB_ENTRY_SB_TIMESET1_MASK 0x7F
+#define CAU_SB_ENTRY_SB_TIMESET1_SHIFT 7
+#define CAU_SB_ENTRY_TIMER_RES0_MASK 0x3
+#define CAU_SB_ENTRY_TIMER_RES0_SHIFT 14
+#define CAU_SB_ENTRY_TIMER_RES1_MASK 0x3
+#define CAU_SB_ENTRY_TIMER_RES1_SHIFT 16
+#define CAU_SB_ENTRY_VF_NUMBER_MASK 0xFF
+#define CAU_SB_ENTRY_VF_NUMBER_SHIFT 18
+#define CAU_SB_ENTRY_VF_VALID_MASK 0x1
+#define CAU_SB_ENTRY_VF_VALID_SHIFT 26
+#define CAU_SB_ENTRY_PF_NUMBER_MASK 0xF
+#define CAU_SB_ENTRY_PF_NUMBER_SHIFT 27
+#define CAU_SB_ENTRY_TPH_MASK 0x1
+#define CAU_SB_ENTRY_TPH_SHIFT 31
};
-/* core doorbell data */
+/* Igu cleanup bit values to distinguish between clean or producer consumer
+ * update.
+ */
+enum command_type_bit {
+ IGU_COMMAND_TYPE_NOP = 0,
+ IGU_COMMAND_TYPE_SET = 1,
+ MAX_COMMAND_TYPE_BIT
+};
+
+/* Core doorbell data */
struct core_db_data {
u8 params;
-#define CORE_DB_DATA_DEST_MASK 0x3
-#define CORE_DB_DATA_DEST_SHIFT 0
-#define CORE_DB_DATA_AGG_CMD_MASK 0x3
-#define CORE_DB_DATA_AGG_CMD_SHIFT 2
-#define CORE_DB_DATA_BYPASS_EN_MASK 0x1
-#define CORE_DB_DATA_BYPASS_EN_SHIFT 4
-#define CORE_DB_DATA_RESERVED_MASK 0x1
-#define CORE_DB_DATA_RESERVED_SHIFT 5
-#define CORE_DB_DATA_AGG_VAL_SEL_MASK 0x3
-#define CORE_DB_DATA_AGG_VAL_SEL_SHIFT 6
- u8 agg_flags;
- __le16 spq_prod;
+#define CORE_DB_DATA_DEST_MASK 0x3
+#define CORE_DB_DATA_DEST_SHIFT 0
+#define CORE_DB_DATA_AGG_CMD_MASK 0x3
+#define CORE_DB_DATA_AGG_CMD_SHIFT 2
+#define CORE_DB_DATA_BYPASS_EN_MASK 0x1
+#define CORE_DB_DATA_BYPASS_EN_SHIFT 4
+#define CORE_DB_DATA_RESERVED_MASK 0x1
+#define CORE_DB_DATA_RESERVED_SHIFT 5
+#define CORE_DB_DATA_AGG_VAL_SEL_MASK 0x3
+#define CORE_DB_DATA_AGG_VAL_SEL_SHIFT 6
+ u8 agg_flags;
+ __le16 spq_prod;
};
/* Enum of doorbell aggregative command selection */
@@ -900,8 +903,8 @@ struct db_l2_dpm_data {
#define DB_L2_DPM_DATA_RESERVED0_SHIFT 27
#define DB_L2_DPM_DATA_SGE_NUM_MASK 0x7
#define DB_L2_DPM_DATA_SGE_NUM_SHIFT 28
-#define DB_L2_DPM_DATA_GFS_SRC_EN_MASK 0x1
-#define DB_L2_DPM_DATA_GFS_SRC_EN_SHIFT 31
+#define DB_L2_DPM_DATA_TGFS_SRC_EN_MASK 0x1
+#define DB_L2_DPM_DATA_TGFS_SRC_EN_SHIFT 31
};
/* Structure for SGE in a DPM doorbell of type DPM_L2_BD */
@@ -909,67 +912,103 @@ struct db_l2_dpm_sge {
struct regpair addr;
__le16 nbytes;
__le16 bitfields;
-#define DB_L2_DPM_SGE_TPH_ST_INDEX_MASK 0x1FF
-#define DB_L2_DPM_SGE_TPH_ST_INDEX_SHIFT 0
-#define DB_L2_DPM_SGE_RESERVED0_MASK 0x3
-#define DB_L2_DPM_SGE_RESERVED0_SHIFT 9
-#define DB_L2_DPM_SGE_ST_VALID_MASK 0x1
-#define DB_L2_DPM_SGE_ST_VALID_SHIFT 11
-#define DB_L2_DPM_SGE_RESERVED1_MASK 0xF
-#define DB_L2_DPM_SGE_RESERVED1_SHIFT 12
+#define DB_L2_DPM_SGE_TPH_ST_INDEX_MASK 0x1FF
+#define DB_L2_DPM_SGE_TPH_ST_INDEX_SHIFT 0
+#define DB_L2_DPM_SGE_RESERVED0_MASK 0x3
+#define DB_L2_DPM_SGE_RESERVED0_SHIFT 9
+#define DB_L2_DPM_SGE_ST_VALID_MASK 0x1
+#define DB_L2_DPM_SGE_ST_VALID_SHIFT 11
+#define DB_L2_DPM_SGE_RESERVED1_MASK 0xF
+#define DB_L2_DPM_SGE_RESERVED1_SHIFT 12
__le32 reserved2;
};
/* Structure for doorbell address, in legacy mode */
struct db_legacy_addr {
__le32 addr;
-#define DB_LEGACY_ADDR_RESERVED0_MASK 0x3
-#define DB_LEGACY_ADDR_RESERVED0_SHIFT 0
-#define DB_LEGACY_ADDR_DEMS_MASK 0x7
-#define DB_LEGACY_ADDR_DEMS_SHIFT 2
-#define DB_LEGACY_ADDR_ICID_MASK 0x7FFFFFF
-#define DB_LEGACY_ADDR_ICID_SHIFT 5
+#define DB_LEGACY_ADDR_RESERVED0_MASK 0x3
+#define DB_LEGACY_ADDR_RESERVED0_SHIFT 0
+#define DB_LEGACY_ADDR_DEMS_MASK 0x7
+#define DB_LEGACY_ADDR_DEMS_SHIFT 2
+#define DB_LEGACY_ADDR_ICID_MASK 0x7FFFFFF
+#define DB_LEGACY_ADDR_ICID_SHIFT 5
+};
+
+/* Structure for doorbell address, in legacy mode, without DEMS */
+struct db_legacy_wo_dems_addr {
+ __le32 addr;
+#define DB_LEGACY_WO_DEMS_ADDR_RESERVED0_MASK 0x3
+#define DB_LEGACY_WO_DEMS_ADDR_RESERVED0_SHIFT 0
+#define DB_LEGACY_WO_DEMS_ADDR_ICID_MASK 0x3FFFFFFF
+#define DB_LEGACY_WO_DEMS_ADDR_ICID_SHIFT 2
};
/* Structure for doorbell address, in PWM mode */
struct db_pwm_addr {
__le32 addr;
#define DB_PWM_ADDR_RESERVED0_MASK 0x7
-#define DB_PWM_ADDR_RESERVED0_SHIFT 0
-#define DB_PWM_ADDR_OFFSET_MASK 0x7F
+#define DB_PWM_ADDR_RESERVED0_SHIFT 0
+#define DB_PWM_ADDR_OFFSET_MASK 0x7F
#define DB_PWM_ADDR_OFFSET_SHIFT 3
-#define DB_PWM_ADDR_WID_MASK 0x3
-#define DB_PWM_ADDR_WID_SHIFT 10
-#define DB_PWM_ADDR_DPI_MASK 0xFFFF
-#define DB_PWM_ADDR_DPI_SHIFT 12
+#define DB_PWM_ADDR_WID_MASK 0x3
+#define DB_PWM_ADDR_WID_SHIFT 10
+#define DB_PWM_ADDR_DPI_MASK 0xFFFF
+#define DB_PWM_ADDR_DPI_SHIFT 12
#define DB_PWM_ADDR_RESERVED1_MASK 0xF
-#define DB_PWM_ADDR_RESERVED1_SHIFT 28
+#define DB_PWM_ADDR_RESERVED1_SHIFT 28
};
-/* Parameters to RoCE firmware, passed in EDPM doorbell */
+/* Parameters to RDMA firmware, passed in EDPM doorbell */
+struct db_rdma_24b_icid_dpm_params {
+ __le32 params;
+#define DB_RDMA_24B_ICID_DPM_PARAMS_SIZE_MASK 0x3F
+#define DB_RDMA_24B_ICID_DPM_PARAMS_SIZE_SHIFT 0
+#define DB_RDMA_24B_ICID_DPM_PARAMS_DPM_TYPE_MASK 0x3
+#define DB_RDMA_24B_ICID_DPM_PARAMS_DPM_TYPE_SHIFT 6
+#define DB_RDMA_24B_ICID_DPM_PARAMS_OPCODE_MASK 0xFF
+#define DB_RDMA_24B_ICID_DPM_PARAMS_OPCODE_SHIFT 8
+#define DB_RDMA_24B_ICID_DPM_PARAMS_ICID_EXT_MASK 0xFF
+#define DB_RDMA_24B_ICID_DPM_PARAMS_ICID_EXT_SHIFT 16
+#define DB_RDMA_24B_ICID_DPM_PARAMS_INV_BYTE_CNT_MASK 0x7
+#define DB_RDMA_24B_ICID_DPM_PARAMS_INV_BYTE_CNT_SHIFT 24
+#define DB_RDMA_24B_ICID_DPM_PARAMS_EXT_ICID_MODE_EN_MASK 0x1
+#define DB_RDMA_24B_ICID_DPM_PARAMS_EXT_ICID_MODE_EN_SHIFT 27
+#define DB_RDMA_24B_ICID_DPM_PARAMS_COMPLETION_FLG_MASK 0x1
+#define DB_RDMA_24B_ICID_DPM_PARAMS_COMPLETION_FLG_SHIFT 28
+#define DB_RDMA_24B_ICID_DPM_PARAMS_S_FLG_MASK 0x1
+#define DB_RDMA_24B_ICID_DPM_PARAMS_S_FLG_SHIFT 29
+#define DB_RDMA_24B_ICID_DPM_PARAMS_RESERVED1_MASK 0x1
+#define DB_RDMA_24B_ICID_DPM_PARAMS_RESERVED1_SHIFT 30
+#define DB_RDMA_24B_ICID_DPM_PARAMS_CONN_TYPE_IS_IWARP_MASK 0x1
+#define DB_RDMA_24B_ICID_DPM_PARAMS_CONN_TYPE_IS_IWARP_SHIFT 31
+};
+
+/* Parameters to RDMA firmware, passed in EDPM doorbell */
struct db_rdma_dpm_params {
__le32 params;
-#define DB_RDMA_DPM_PARAMS_SIZE_MASK 0x3F
-#define DB_RDMA_DPM_PARAMS_SIZE_SHIFT 0
-#define DB_RDMA_DPM_PARAMS_DPM_TYPE_MASK 0x3
-#define DB_RDMA_DPM_PARAMS_DPM_TYPE_SHIFT 6
-#define DB_RDMA_DPM_PARAMS_OPCODE_MASK 0xFF
-#define DB_RDMA_DPM_PARAMS_OPCODE_SHIFT 8
-#define DB_RDMA_DPM_PARAMS_WQE_SIZE_MASK 0x7FF
-#define DB_RDMA_DPM_PARAMS_WQE_SIZE_SHIFT 16
-#define DB_RDMA_DPM_PARAMS_RESERVED0_MASK 0x1
-#define DB_RDMA_DPM_PARAMS_RESERVED0_SHIFT 27
-#define DB_RDMA_DPM_PARAMS_COMPLETION_FLG_MASK 0x1
-#define DB_RDMA_DPM_PARAMS_COMPLETION_FLG_SHIFT 28
-#define DB_RDMA_DPM_PARAMS_S_FLG_MASK 0x1
-#define DB_RDMA_DPM_PARAMS_S_FLG_SHIFT 29
-#define DB_RDMA_DPM_PARAMS_RESERVED1_MASK 0x1
-#define DB_RDMA_DPM_PARAMS_RESERVED1_SHIFT 30
+#define DB_RDMA_DPM_PARAMS_SIZE_MASK 0x3F
+#define DB_RDMA_DPM_PARAMS_SIZE_SHIFT 0
+#define DB_RDMA_DPM_PARAMS_DPM_TYPE_MASK 0x3
+#define DB_RDMA_DPM_PARAMS_DPM_TYPE_SHIFT 6
+#define DB_RDMA_DPM_PARAMS_OPCODE_MASK 0xFF
+#define DB_RDMA_DPM_PARAMS_OPCODE_SHIFT 8
+#define DB_RDMA_DPM_PARAMS_WQE_SIZE_MASK 0x7FF
+#define DB_RDMA_DPM_PARAMS_WQE_SIZE_SHIFT 16
+#define DB_RDMA_DPM_PARAMS_RESERVED0_MASK 0x1
+#define DB_RDMA_DPM_PARAMS_RESERVED0_SHIFT 27
+#define DB_RDMA_DPM_PARAMS_ACK_REQUEST_MASK 0x1
+#define DB_RDMA_DPM_PARAMS_ACK_REQUEST_SHIFT 28
+#define DB_RDMA_DPM_PARAMS_S_FLG_MASK 0x1
+#define DB_RDMA_DPM_PARAMS_S_FLG_SHIFT 29
+#define DB_RDMA_DPM_PARAMS_COMPLETION_FLG_MASK 0x1
+#define DB_RDMA_DPM_PARAMS_COMPLETION_FLG_SHIFT 30
#define DB_RDMA_DPM_PARAMS_CONN_TYPE_IS_IWARP_MASK 0x1
#define DB_RDMA_DPM_PARAMS_CONN_TYPE_IS_IWARP_SHIFT 31
};
-/* Structure for doorbell data, in ROCE DPM mode, for 1st db in a DPM burst */
+/* Structure for doorbell data, in RDMA DPM mode, for the first doorbell in a
+ * DPM burst.
+ */
struct db_rdma_dpm_data {
__le16 icid;
__le16 prod_val;
@@ -987,22 +1026,22 @@ enum igu_int_cmd {
/* IGU producer or consumer update command */
struct igu_prod_cons_update {
- u32 sb_id_and_flags;
-#define IGU_PROD_CONS_UPDATE_SB_INDEX_MASK 0xFFFFFF
-#define IGU_PROD_CONS_UPDATE_SB_INDEX_SHIFT 0
-#define IGU_PROD_CONS_UPDATE_UPDATE_FLAG_MASK 0x1
-#define IGU_PROD_CONS_UPDATE_UPDATE_FLAG_SHIFT 24
-#define IGU_PROD_CONS_UPDATE_ENABLE_INT_MASK 0x3
-#define IGU_PROD_CONS_UPDATE_ENABLE_INT_SHIFT 25
-#define IGU_PROD_CONS_UPDATE_SEGMENT_ACCESS_MASK 0x1
-#define IGU_PROD_CONS_UPDATE_SEGMENT_ACCESS_SHIFT 27
-#define IGU_PROD_CONS_UPDATE_TIMER_MASK_MASK 0x1
-#define IGU_PROD_CONS_UPDATE_TIMER_MASK_SHIFT 28
-#define IGU_PROD_CONS_UPDATE_RESERVED0_MASK 0x3
-#define IGU_PROD_CONS_UPDATE_RESERVED0_SHIFT 29
-#define IGU_PROD_CONS_UPDATE_COMMAND_TYPE_MASK 0x1
-#define IGU_PROD_CONS_UPDATE_COMMAND_TYPE_SHIFT 31
- u32 reserved1;
+ __le32 sb_id_and_flags;
+#define IGU_PROD_CONS_UPDATE_SB_INDEX_MASK 0xFFFFFF
+#define IGU_PROD_CONS_UPDATE_SB_INDEX_SHIFT 0
+#define IGU_PROD_CONS_UPDATE_UPDATE_FLAG_MASK 0x1
+#define IGU_PROD_CONS_UPDATE_UPDATE_FLAG_SHIFT 24
+#define IGU_PROD_CONS_UPDATE_ENABLE_INT_MASK 0x3
+#define IGU_PROD_CONS_UPDATE_ENABLE_INT_SHIFT 25
+#define IGU_PROD_CONS_UPDATE_SEGMENT_ACCESS_MASK 0x1
+#define IGU_PROD_CONS_UPDATE_SEGMENT_ACCESS_SHIFT 27
+#define IGU_PROD_CONS_UPDATE_TIMER_MASK_MASK 0x1
+#define IGU_PROD_CONS_UPDATE_TIMER_MASK_SHIFT 28
+#define IGU_PROD_CONS_UPDATE_RESERVED0_MASK 0x3
+#define IGU_PROD_CONS_UPDATE_RESERVED0_SHIFT 29
+#define IGU_PROD_CONS_UPDATE_COMMAND_TYPE_MASK 0x1
+#define IGU_PROD_CONS_UPDATE_COMMAND_TYPE_SHIFT 31
+ __le32 reserved1;
};
/* Igu segments access for default status block only */
@@ -1012,38 +1051,63 @@ enum igu_seg_access {
MAX_IGU_SEG_ACCESS
};
+/* Enumeration for L3 type field of parsing_and_err_flags.
+ * L3Type: 0 - unknown (not ip), 1 - Ipv4, 2 - Ipv6
+ * (This field can be filled according to the last-ethertype)
+ */
+enum l3_type {
+ e_l3_type_unknown,
+ e_l3_type_ipv4,
+ e_l3_type_ipv6,
+ MAX_L3_TYPE
+};
+
+/* Enumeration for l4Protocol field of parsing_and_err_flags.
+ * L4-protocol: 0 - none, 1 - TCP, 2 - UDP.
+ * If the packet is IPv4 fragment, and its not the first fragment, the
+ * protocol-type should be set to none.
+ */
+enum l4_protocol {
+ e_l4_protocol_none,
+ e_l4_protocol_tcp,
+ e_l4_protocol_udp,
+ MAX_L4_PROTOCOL
+};
+
+/* Parsing and error flags field */
struct parsing_and_err_flags {
__le16 flags;
-#define PARSING_AND_ERR_FLAGS_L3TYPE_MASK 0x3
-#define PARSING_AND_ERR_FLAGS_L3TYPE_SHIFT 0
-#define PARSING_AND_ERR_FLAGS_L4PROTOCOL_MASK 0x3
-#define PARSING_AND_ERR_FLAGS_L4PROTOCOL_SHIFT 2
-#define PARSING_AND_ERR_FLAGS_IPV4FRAG_MASK 0x1
-#define PARSING_AND_ERR_FLAGS_IPV4FRAG_SHIFT 4
-#define PARSING_AND_ERR_FLAGS_TAG8021QEXIST_MASK 0x1
-#define PARSING_AND_ERR_FLAGS_TAG8021QEXIST_SHIFT 5
-#define PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_MASK 0x1
-#define PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_SHIFT 6
-#define PARSING_AND_ERR_FLAGS_TIMESYNCPKT_MASK 0x1
-#define PARSING_AND_ERR_FLAGS_TIMESYNCPKT_SHIFT 7
-#define PARSING_AND_ERR_FLAGS_TIMESTAMPRECORDED_MASK 0x1
-#define PARSING_AND_ERR_FLAGS_TIMESTAMPRECORDED_SHIFT 8
-#define PARSING_AND_ERR_FLAGS_IPHDRERROR_MASK 0x1
-#define PARSING_AND_ERR_FLAGS_IPHDRERROR_SHIFT 9
-#define PARSING_AND_ERR_FLAGS_L4CHKSMERROR_MASK 0x1
-#define PARSING_AND_ERR_FLAGS_L4CHKSMERROR_SHIFT 10
-#define PARSING_AND_ERR_FLAGS_TUNNELEXIST_MASK 0x1
-#define PARSING_AND_ERR_FLAGS_TUNNELEXIST_SHIFT 11
-#define PARSING_AND_ERR_FLAGS_TUNNEL8021QTAGEXIST_MASK 0x1
-#define PARSING_AND_ERR_FLAGS_TUNNEL8021QTAGEXIST_SHIFT 12
-#define PARSING_AND_ERR_FLAGS_TUNNELIPHDRERROR_MASK 0x1
-#define PARSING_AND_ERR_FLAGS_TUNNELIPHDRERROR_SHIFT 13
-#define PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMWASCALCULATED_MASK 0x1
-#define PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMWASCALCULATED_SHIFT 14
-#define PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_MASK 0x1
-#define PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_SHIFT 15
+#define PARSING_AND_ERR_FLAGS_L3TYPE_MASK 0x3
+#define PARSING_AND_ERR_FLAGS_L3TYPE_SHIFT 0
+#define PARSING_AND_ERR_FLAGS_L4PROTOCOL_MASK 0x3
+#define PARSING_AND_ERR_FLAGS_L4PROTOCOL_SHIFT 2
+#define PARSING_AND_ERR_FLAGS_IPV4FRAG_MASK 0x1
+#define PARSING_AND_ERR_FLAGS_IPV4FRAG_SHIFT 4
+#define PARSING_AND_ERR_FLAGS_TAG8021QEXIST_MASK 0x1
+#define PARSING_AND_ERR_FLAGS_TAG8021QEXIST_SHIFT 5
+#define PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_MASK 0x1
+#define PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_SHIFT 6
+#define PARSING_AND_ERR_FLAGS_TIMESYNCPKT_MASK 0x1
+#define PARSING_AND_ERR_FLAGS_TIMESYNCPKT_SHIFT 7
+#define PARSING_AND_ERR_FLAGS_TIMESTAMPRECORDED_MASK 0x1
+#define PARSING_AND_ERR_FLAGS_TIMESTAMPRECORDED_SHIFT 8
+#define PARSING_AND_ERR_FLAGS_IPHDRERROR_MASK 0x1
+#define PARSING_AND_ERR_FLAGS_IPHDRERROR_SHIFT 9
+#define PARSING_AND_ERR_FLAGS_L4CHKSMERROR_MASK 0x1
+#define PARSING_AND_ERR_FLAGS_L4CHKSMERROR_SHIFT 10
+#define PARSING_AND_ERR_FLAGS_TUNNELEXIST_MASK 0x1
+#define PARSING_AND_ERR_FLAGS_TUNNELEXIST_SHIFT 11
+#define PARSING_AND_ERR_FLAGS_TUNNEL8021QTAGEXIST_MASK 0x1
+#define PARSING_AND_ERR_FLAGS_TUNNEL8021QTAGEXIST_SHIFT 12
+#define PARSING_AND_ERR_FLAGS_TUNNELIPHDRERROR_MASK 0x1
+#define PARSING_AND_ERR_FLAGS_TUNNELIPHDRERROR_SHIFT 13
+#define PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMWASCALCULATED_MASK 0x1
+#define PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMWASCALCULATED_SHIFT 14
+#define PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_MASK 0x1
+#define PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_SHIFT 15
};
+/* Parsing error flags bitmap */
struct parsing_err_flags {
__le16 flags;
#define PARSING_ERR_FLAGS_MAC_ERROR_MASK 0x1
@@ -1080,266 +1144,280 @@ struct parsing_err_flags {
#define PARSING_ERR_FLAGS_TUNNEL_L4_CHKSM_ERROR_SHIFT 15
};
+/* Pb context */
struct pb_context {
__le32 crc[4];
};
+/* Concrete Function ID */
struct pxp_concrete_fid {
__le16 fid;
-#define PXP_CONCRETE_FID_PFID_MASK 0xF
-#define PXP_CONCRETE_FID_PFID_SHIFT 0
-#define PXP_CONCRETE_FID_PORT_MASK 0x3
-#define PXP_CONCRETE_FID_PORT_SHIFT 4
-#define PXP_CONCRETE_FID_PATH_MASK 0x1
-#define PXP_CONCRETE_FID_PATH_SHIFT 6
-#define PXP_CONCRETE_FID_VFVALID_MASK 0x1
-#define PXP_CONCRETE_FID_VFVALID_SHIFT 7
-#define PXP_CONCRETE_FID_VFID_MASK 0xFF
-#define PXP_CONCRETE_FID_VFID_SHIFT 8
+#define PXP_CONCRETE_FID_PFID_MASK 0xF
+#define PXP_CONCRETE_FID_PFID_SHIFT 0
+#define PXP_CONCRETE_FID_PORT_MASK 0x3
+#define PXP_CONCRETE_FID_PORT_SHIFT 4
+#define PXP_CONCRETE_FID_PATH_MASK 0x1
+#define PXP_CONCRETE_FID_PATH_SHIFT 6
+#define PXP_CONCRETE_FID_VFVALID_MASK 0x1
+#define PXP_CONCRETE_FID_VFVALID_SHIFT 7
+#define PXP_CONCRETE_FID_VFID_MASK 0xFF
+#define PXP_CONCRETE_FID_VFID_SHIFT 8
};
+/* Concrete Function ID */
struct pxp_pretend_concrete_fid {
__le16 fid;
-#define PXP_PRETEND_CONCRETE_FID_PFID_MASK 0xF
-#define PXP_PRETEND_CONCRETE_FID_PFID_SHIFT 0
-#define PXP_PRETEND_CONCRETE_FID_RESERVED_MASK 0x7
-#define PXP_PRETEND_CONCRETE_FID_RESERVED_SHIFT 4
-#define PXP_PRETEND_CONCRETE_FID_VFVALID_MASK 0x1
-#define PXP_PRETEND_CONCRETE_FID_VFVALID_SHIFT 7
-#define PXP_PRETEND_CONCRETE_FID_VFID_MASK 0xFF
-#define PXP_PRETEND_CONCRETE_FID_VFID_SHIFT 8
+#define PXP_PRETEND_CONCRETE_FID_PFID_MASK 0xF
+#define PXP_PRETEND_CONCRETE_FID_PFID_SHIFT 0
+#define PXP_PRETEND_CONCRETE_FID_RESERVED_MASK 0x7
+#define PXP_PRETEND_CONCRETE_FID_RESERVED_SHIFT 4
+#define PXP_PRETEND_CONCRETE_FID_VFVALID_MASK 0x1
+#define PXP_PRETEND_CONCRETE_FID_VFVALID_SHIFT 7
+#define PXP_PRETEND_CONCRETE_FID_VFID_MASK 0xFF
+#define PXP_PRETEND_CONCRETE_FID_VFID_SHIFT 8
};
+/* Function ID */
union pxp_pretend_fid {
struct pxp_pretend_concrete_fid concrete_fid;
- __le16 opaque_fid;
+ __le16 opaque_fid;
};
-/* Pxp Pretend Command Register. */
+/* Pxp Pretend Command Register */
struct pxp_pretend_cmd {
- union pxp_pretend_fid fid;
- __le16 control;
-#define PXP_PRETEND_CMD_PATH_MASK 0x1
-#define PXP_PRETEND_CMD_PATH_SHIFT 0
-#define PXP_PRETEND_CMD_USE_PORT_MASK 0x1
-#define PXP_PRETEND_CMD_USE_PORT_SHIFT 1
-#define PXP_PRETEND_CMD_PORT_MASK 0x3
-#define PXP_PRETEND_CMD_PORT_SHIFT 2
-#define PXP_PRETEND_CMD_RESERVED0_MASK 0xF
-#define PXP_PRETEND_CMD_RESERVED0_SHIFT 4
-#define PXP_PRETEND_CMD_RESERVED1_MASK 0xF
-#define PXP_PRETEND_CMD_RESERVED1_SHIFT 8
-#define PXP_PRETEND_CMD_PRETEND_PATH_MASK 0x1
-#define PXP_PRETEND_CMD_PRETEND_PATH_SHIFT 12
-#define PXP_PRETEND_CMD_PRETEND_PORT_MASK 0x1
-#define PXP_PRETEND_CMD_PRETEND_PORT_SHIFT 13
-#define PXP_PRETEND_CMD_PRETEND_FUNCTION_MASK 0x1
-#define PXP_PRETEND_CMD_PRETEND_FUNCTION_SHIFT 14
-#define PXP_PRETEND_CMD_IS_CONCRETE_MASK 0x1
-#define PXP_PRETEND_CMD_IS_CONCRETE_SHIFT 15
+ union pxp_pretend_fid fid;
+ __le16 control;
+#define PXP_PRETEND_CMD_PATH_MASK 0x1
+#define PXP_PRETEND_CMD_PATH_SHIFT 0
+#define PXP_PRETEND_CMD_USE_PORT_MASK 0x1
+#define PXP_PRETEND_CMD_USE_PORT_SHIFT 1
+#define PXP_PRETEND_CMD_PORT_MASK 0x3
+#define PXP_PRETEND_CMD_PORT_SHIFT 2
+#define PXP_PRETEND_CMD_RESERVED0_MASK 0xF
+#define PXP_PRETEND_CMD_RESERVED0_SHIFT 4
+#define PXP_PRETEND_CMD_RESERVED1_MASK 0xF
+#define PXP_PRETEND_CMD_RESERVED1_SHIFT 8
+#define PXP_PRETEND_CMD_PRETEND_PATH_MASK 0x1
+#define PXP_PRETEND_CMD_PRETEND_PATH_SHIFT 12
+#define PXP_PRETEND_CMD_PRETEND_PORT_MASK 0x1
+#define PXP_PRETEND_CMD_PRETEND_PORT_SHIFT 13
+#define PXP_PRETEND_CMD_PRETEND_FUNCTION_MASK 0x1
+#define PXP_PRETEND_CMD_PRETEND_FUNCTION_SHIFT 14
+#define PXP_PRETEND_CMD_IS_CONCRETE_MASK 0x1
+#define PXP_PRETEND_CMD_IS_CONCRETE_SHIFT 15
};
-/* PTT Record in PXP Admin Window. */
+/* PTT Record in PXP Admin Window */
struct pxp_ptt_entry {
- __le32 offset;
-#define PXP_PTT_ENTRY_OFFSET_MASK 0x7FFFFF
-#define PXP_PTT_ENTRY_OFFSET_SHIFT 0
-#define PXP_PTT_ENTRY_RESERVED0_MASK 0x1FF
-#define PXP_PTT_ENTRY_RESERVED0_SHIFT 23
- struct pxp_pretend_cmd pretend;
+ __le32 offset;
+#define PXP_PTT_ENTRY_OFFSET_MASK 0x7FFFFF
+#define PXP_PTT_ENTRY_OFFSET_SHIFT 0
+#define PXP_PTT_ENTRY_RESERVED0_MASK 0x1FF
+#define PXP_PTT_ENTRY_RESERVED0_SHIFT 23
+ struct pxp_pretend_cmd pretend;
};
-/* VF Zone A Permission Register. */
+/* VF Zone A Permission Register */
struct pxp_vf_zone_a_permission {
__le32 control;
-#define PXP_VF_ZONE_A_PERMISSION_VFID_MASK 0xFF
-#define PXP_VF_ZONE_A_PERMISSION_VFID_SHIFT 0
-#define PXP_VF_ZONE_A_PERMISSION_VALID_MASK 0x1
-#define PXP_VF_ZONE_A_PERMISSION_VALID_SHIFT 8
-#define PXP_VF_ZONE_A_PERMISSION_RESERVED0_MASK 0x7F
-#define PXP_VF_ZONE_A_PERMISSION_RESERVED0_SHIFT 9
-#define PXP_VF_ZONE_A_PERMISSION_RESERVED1_MASK 0xFFFF
-#define PXP_VF_ZONE_A_PERMISSION_RESERVED1_SHIFT 16
+#define PXP_VF_ZONE_A_PERMISSION_VFID_MASK 0xFF
+#define PXP_VF_ZONE_A_PERMISSION_VFID_SHIFT 0
+#define PXP_VF_ZONE_A_PERMISSION_VALID_MASK 0x1
+#define PXP_VF_ZONE_A_PERMISSION_VALID_SHIFT 8
+#define PXP_VF_ZONE_A_PERMISSION_RESERVED0_MASK 0x7F
+#define PXP_VF_ZONE_A_PERMISSION_RESERVED0_SHIFT 9
+#define PXP_VF_ZONE_A_PERMISSION_RESERVED1_MASK 0xFFFF
+#define PXP_VF_ZONE_A_PERMISSION_RESERVED1_SHIFT 16
};
-/* RSS hash type */
+/* Rdif context */
struct rdif_task_context {
__le32 initial_ref_tag;
__le16 app_tag_value;
__le16 app_tag_mask;
u8 flags0;
-#define RDIF_TASK_CONTEXT_IGNOREAPPTAG_MASK 0x1
-#define RDIF_TASK_CONTEXT_IGNOREAPPTAG_SHIFT 0
-#define RDIF_TASK_CONTEXT_INITIALREFTAGVALID_MASK 0x1
-#define RDIF_TASK_CONTEXT_INITIALREFTAGVALID_SHIFT 1
-#define RDIF_TASK_CONTEXT_HOSTGUARDTYPE_MASK 0x1
-#define RDIF_TASK_CONTEXT_HOSTGUARDTYPE_SHIFT 2
-#define RDIF_TASK_CONTEXT_SETERRORWITHEOP_MASK 0x1
-#define RDIF_TASK_CONTEXT_SETERRORWITHEOP_SHIFT 3
-#define RDIF_TASK_CONTEXT_PROTECTIONTYPE_MASK 0x3
-#define RDIF_TASK_CONTEXT_PROTECTIONTYPE_SHIFT 4
-#define RDIF_TASK_CONTEXT_CRC_SEED_MASK 0x1
-#define RDIF_TASK_CONTEXT_CRC_SEED_SHIFT 6
-#define RDIF_TASK_CONTEXT_KEEPREFTAGCONST_MASK 0x1
-#define RDIF_TASK_CONTEXT_KEEPREFTAGCONST_SHIFT 7
+#define RDIF_TASK_CONTEXT_IGNORE_APP_TAG_MASK 0x1
+#define RDIF_TASK_CONTEXT_IGNORE_APP_TAG_SHIFT 0
+#define RDIF_TASK_CONTEXT_INITIAL_REF_TAG_VALID_MASK 0x1
+#define RDIF_TASK_CONTEXT_INITIAL_REF_TAG_VALID_SHIFT 1
+#define RDIF_TASK_CONTEXT_HOST_GUARD_TYPE_MASK 0x1
+#define RDIF_TASK_CONTEXT_HOST_GUARD_TYPE_SHIFT 2
+#define RDIF_TASK_CONTEXT_SET_ERROR_WITH_EOP_MASK 0x1
+#define RDIF_TASK_CONTEXT_SET_ERROR_WITH_EOP_SHIFT 3
+#define RDIF_TASK_CONTEXT_PROTECTION_TYPE_MASK 0x3
+#define RDIF_TASK_CONTEXT_PROTECTION_TYPE_SHIFT 4
+#define RDIF_TASK_CONTEXT_CRC_SEED_MASK 0x1
+#define RDIF_TASK_CONTEXT_CRC_SEED_SHIFT 6
+#define RDIF_TASK_CONTEXT_KEEP_REF_TAG_CONST_MASK 0x1
+#define RDIF_TASK_CONTEXT_KEEP_REF_TAG_CONST_SHIFT 7
u8 partial_dif_data[7];
__le16 partial_crc_value;
__le16 partial_checksum_value;
__le32 offset_in_io;
__le16 flags1;
-#define RDIF_TASK_CONTEXT_VALIDATEGUARD_MASK 0x1
-#define RDIF_TASK_CONTEXT_VALIDATEGUARD_SHIFT 0
-#define RDIF_TASK_CONTEXT_VALIDATEAPPTAG_MASK 0x1
-#define RDIF_TASK_CONTEXT_VALIDATEAPPTAG_SHIFT 1
-#define RDIF_TASK_CONTEXT_VALIDATEREFTAG_MASK 0x1
-#define RDIF_TASK_CONTEXT_VALIDATEREFTAG_SHIFT 2
-#define RDIF_TASK_CONTEXT_FORWARDGUARD_MASK 0x1
-#define RDIF_TASK_CONTEXT_FORWARDGUARD_SHIFT 3
-#define RDIF_TASK_CONTEXT_FORWARDAPPTAG_MASK 0x1
-#define RDIF_TASK_CONTEXT_FORWARDAPPTAG_SHIFT 4
-#define RDIF_TASK_CONTEXT_FORWARDREFTAG_MASK 0x1
-#define RDIF_TASK_CONTEXT_FORWARDREFTAG_SHIFT 5
-#define RDIF_TASK_CONTEXT_INTERVALSIZE_MASK 0x7
-#define RDIF_TASK_CONTEXT_INTERVALSIZE_SHIFT 6
-#define RDIF_TASK_CONTEXT_HOSTINTERFACE_MASK 0x3
-#define RDIF_TASK_CONTEXT_HOSTINTERFACE_SHIFT 9
-#define RDIF_TASK_CONTEXT_DIFBEFOREDATA_MASK 0x1
-#define RDIF_TASK_CONTEXT_DIFBEFOREDATA_SHIFT 11
-#define RDIF_TASK_CONTEXT_RESERVED0_MASK 0x1
-#define RDIF_TASK_CONTEXT_RESERVED0_SHIFT 12
-#define RDIF_TASK_CONTEXT_NETWORKINTERFACE_MASK 0x1
-#define RDIF_TASK_CONTEXT_NETWORKINTERFACE_SHIFT 13
-#define RDIF_TASK_CONTEXT_FORWARDAPPTAGWITHMASK_MASK 0x1
-#define RDIF_TASK_CONTEXT_FORWARDAPPTAGWITHMASK_SHIFT 14
-#define RDIF_TASK_CONTEXT_FORWARDREFTAGWITHMASK_MASK 0x1
-#define RDIF_TASK_CONTEXT_FORWARDREFTAGWITHMASK_SHIFT 15
+#define RDIF_TASK_CONTEXT_VALIDATE_GUARD_MASK 0x1
+#define RDIF_TASK_CONTEXT_VALIDATE_GUARD_SHIFT 0
+#define RDIF_TASK_CONTEXT_VALIDATE_APP_TAG_MASK 0x1
+#define RDIF_TASK_CONTEXT_VALIDATE_APP_TAG_SHIFT 1
+#define RDIF_TASK_CONTEXT_VALIDATE_REF_TAG_MASK 0x1
+#define RDIF_TASK_CONTEXT_VALIDATE_REF_TAG_SHIFT 2
+#define RDIF_TASK_CONTEXT_FORWARD_GUARD_MASK 0x1
+#define RDIF_TASK_CONTEXT_FORWARD_GUARD_SHIFT 3
+#define RDIF_TASK_CONTEXT_FORWARD_APP_TAG_MASK 0x1
+#define RDIF_TASK_CONTEXT_FORWARD_APP_TAG_SHIFT 4
+#define RDIF_TASK_CONTEXT_FORWARD_REF_TAG_MASK 0x1
+#define RDIF_TASK_CONTEXT_FORWARD_REF_TAG_SHIFT 5
+#define RDIF_TASK_CONTEXT_INTERVAL_SIZE_MASK 0x7
+#define RDIF_TASK_CONTEXT_INTERVAL_SIZE_SHIFT 6
+#define RDIF_TASK_CONTEXT_HOST_INTERFACE_MASK 0x3
+#define RDIF_TASK_CONTEXT_HOST_INTERFACE_SHIFT 9
+#define RDIF_TASK_CONTEXT_DIF_BEFORE_DATA_MASK 0x1
+#define RDIF_TASK_CONTEXT_DIF_BEFORE_DATA_SHIFT 11
+#define RDIF_TASK_CONTEXT_RESERVED0_MASK 0x1
+#define RDIF_TASK_CONTEXT_RESERVED0_SHIFT 12
+#define RDIF_TASK_CONTEXT_NETWORK_INTERFACE_MASK 0x1
+#define RDIF_TASK_CONTEXT_NETWORK_INTERFACE_SHIFT 13
+#define RDIF_TASK_CONTEXT_FORWARD_APP_TAG_WITH_MASK_MASK 0x1
+#define RDIF_TASK_CONTEXT_FORWARD_APP_TAG_WITH_MASK_SHIFT 14
+#define RDIF_TASK_CONTEXT_FORWARD_REF_TAG_WITH_MASK_MASK 0x1
+#define RDIF_TASK_CONTEXT_FORWARD_REF_TAG_WITH_MASK_SHIFT 15
__le16 state;
-#define RDIF_TASK_CONTEXT_RECEIVEDDIFBYTESLEFT_MASK 0xF
-#define RDIF_TASK_CONTEXT_RECEIVEDDIFBYTESLEFT_SHIFT 0
-#define RDIF_TASK_CONTEXT_TRANSMITEDDIFBYTESLEFT_MASK 0xF
-#define RDIF_TASK_CONTEXT_TRANSMITEDDIFBYTESLEFT_SHIFT 4
-#define RDIF_TASK_CONTEXT_ERRORINIO_MASK 0x1
-#define RDIF_TASK_CONTEXT_ERRORINIO_SHIFT 8
-#define RDIF_TASK_CONTEXT_CHECKSUMOVERFLOW_MASK 0x1
-#define RDIF_TASK_CONTEXT_CHECKSUMOVERFLOW_SHIFT 9
-#define RDIF_TASK_CONTEXT_REFTAGMASK_MASK 0xF
-#define RDIF_TASK_CONTEXT_REFTAGMASK_SHIFT 10
-#define RDIF_TASK_CONTEXT_RESERVED1_MASK 0x3
-#define RDIF_TASK_CONTEXT_RESERVED1_SHIFT 14
+#define RDIF_TASK_CONTEXT_RECEIVED_DIF_BYTES_LEFT_MASK 0xF
+#define RDIF_TASK_CONTEXT_RECEIVED_DIF_BYTES_LEFT_SHIFT 0
+#define RDIF_TASK_CONTEXT_TRANSMITED_DIF_BYTES_LEFT_MASK 0xF
+#define RDIF_TASK_CONTEXT_TRANSMITED_DIF_BYTES_LEFT_SHIFT 4
+#define RDIF_TASK_CONTEXT_ERROR_IN_IO_MASK 0x1
+#define RDIF_TASK_CONTEXT_ERROR_IN_IO_SHIFT 8
+#define RDIF_TASK_CONTEXT_CHECKSUM_OVERFLOW_MASK 0x1
+#define RDIF_TASK_CONTEXT_CHECKSUM_OVERFLOW_SHIFT 9
+#define RDIF_TASK_CONTEXT_REF_TAG_MASK_MASK 0xF
+#define RDIF_TASK_CONTEXT_REF_TAG_MASK_SHIFT 10
+#define RDIF_TASK_CONTEXT_RESERVED1_MASK 0x3
+#define RDIF_TASK_CONTEXT_RESERVED1_SHIFT 14
__le32 reserved2;
};
-/* RSS hash type */
-enum rss_hash_type {
- RSS_HASH_TYPE_DEFAULT = 0,
- RSS_HASH_TYPE_IPV4 = 1,
- RSS_HASH_TYPE_TCP_IPV4 = 2,
- RSS_HASH_TYPE_IPV6 = 3,
- RSS_HASH_TYPE_TCP_IPV6 = 4,
- RSS_HASH_TYPE_UDP_IPV4 = 5,
- RSS_HASH_TYPE_UDP_IPV6 = 6,
- MAX_RSS_HASH_TYPE
+/* Searcher Table struct */
+struct src_entry_header {
+ __le32 flags;
+#define SRC_ENTRY_HEADER_NEXT_PTR_TYPE_MASK 0x1
+#define SRC_ENTRY_HEADER_NEXT_PTR_TYPE_SHIFT 0
+#define SRC_ENTRY_HEADER_EMPTY_MASK 0x1
+#define SRC_ENTRY_HEADER_EMPTY_SHIFT 1
+#define SRC_ENTRY_HEADER_RESERVED_MASK 0x3FFFFFFF
+#define SRC_ENTRY_HEADER_RESERVED_SHIFT 2
+ __le32 magic_number;
+ struct regpair next_ptr;
+};
+
+/* Enumeration for address type */
+enum src_header_next_ptr_type_enum {
+ e_physical_addr,
+ e_logical_addr,
+ MAX_SRC_HEADER_NEXT_PTR_TYPE_ENUM
};
-/* status block structure */
+/* Status block structure */
struct status_block {
__le16 pi_array[PIS_PER_SB];
__le32 sb_num;
-#define STATUS_BLOCK_SB_NUM_MASK 0x1FF
-#define STATUS_BLOCK_SB_NUM_SHIFT 0
-#define STATUS_BLOCK_ZERO_PAD_MASK 0x7F
-#define STATUS_BLOCK_ZERO_PAD_SHIFT 9
-#define STATUS_BLOCK_ZERO_PAD2_MASK 0xFFFF
-#define STATUS_BLOCK_ZERO_PAD2_SHIFT 16
+#define STATUS_BLOCK_SB_NUM_MASK 0x1FF
+#define STATUS_BLOCK_SB_NUM_SHIFT 0
+#define STATUS_BLOCK_ZERO_PAD_MASK 0x7F
+#define STATUS_BLOCK_ZERO_PAD_SHIFT 9
+#define STATUS_BLOCK_ZERO_PAD2_MASK 0xFFFF
+#define STATUS_BLOCK_ZERO_PAD2_SHIFT 16
__le32 prod_index;
-#define STATUS_BLOCK_PROD_INDEX_MASK 0xFFFFFF
-#define STATUS_BLOCK_PROD_INDEX_SHIFT 0
-#define STATUS_BLOCK_ZERO_PAD3_MASK 0xFF
-#define STATUS_BLOCK_ZERO_PAD3_SHIFT 24
+#define STATUS_BLOCK_PROD_INDEX_MASK 0xFFFFFF
+#define STATUS_BLOCK_PROD_INDEX_SHIFT 0
+#define STATUS_BLOCK_ZERO_PAD3_MASK 0xFF
+#define STATUS_BLOCK_ZERO_PAD3_SHIFT 24
};
+/* Tdif context */
struct tdif_task_context {
__le32 initial_ref_tag;
__le16 app_tag_value;
__le16 app_tag_mask;
- __le16 partial_crc_valueB;
- __le16 partial_checksum_valueB;
+ __le16 partial_crc_value_b;
+ __le16 partial_checksum_value_b;
__le16 stateB;
-#define TDIF_TASK_CONTEXT_RECEIVEDDIFBYTESLEFTB_MASK 0xF
-#define TDIF_TASK_CONTEXT_RECEIVEDDIFBYTESLEFTB_SHIFT 0
-#define TDIF_TASK_CONTEXT_TRANSMITEDDIFBYTESLEFTB_MASK 0xF
-#define TDIF_TASK_CONTEXT_TRANSMITEDDIFBYTESLEFTB_SHIFT 4
-#define TDIF_TASK_CONTEXT_ERRORINIOB_MASK 0x1
-#define TDIF_TASK_CONTEXT_ERRORINIOB_SHIFT 8
-#define TDIF_TASK_CONTEXT_CHECKSUMOVERFLOW_MASK 0x1
-#define TDIF_TASK_CONTEXT_CHECKSUMOVERFLOW_SHIFT 9
-#define TDIF_TASK_CONTEXT_RESERVED0_MASK 0x3F
-#define TDIF_TASK_CONTEXT_RESERVED0_SHIFT 10
+#define TDIF_TASK_CONTEXT_RECEIVED_DIF_BYTES_LEFT_B_MASK 0xF
+#define TDIF_TASK_CONTEXT_RECEIVED_DIF_BYTES_LEFT_B_SHIFT 0
+#define TDIF_TASK_CONTEXT_TRANSMITED_DIF_BYTES_LEFT_B_MASK 0xF
+#define TDIF_TASK_CONTEXT_TRANSMITED_DIF_BYTES_LEFT_B_SHIFT 4
+#define TDIF_TASK_CONTEXT_ERROR_IN_IO_B_MASK 0x1
+#define TDIF_TASK_CONTEXT_ERROR_IN_IO_B_SHIFT 8
+#define TDIF_TASK_CONTEXT_CHECKSUM_VERFLOW_MASK 0x1
+#define TDIF_TASK_CONTEXT_CHECKSUM_VERFLOW_SHIFT 9
+#define TDIF_TASK_CONTEXT_RESERVED0_MASK 0x3F
+#define TDIF_TASK_CONTEXT_RESERVED0_SHIFT 10
u8 reserved1;
u8 flags0;
-#define TDIF_TASK_CONTEXT_IGNOREAPPTAG_MASK 0x1
-#define TDIF_TASK_CONTEXT_IGNOREAPPTAG_SHIFT 0
-#define TDIF_TASK_CONTEXT_INITIALREFTAGVALID_MASK 0x1
-#define TDIF_TASK_CONTEXT_INITIALREFTAGVALID_SHIFT 1
-#define TDIF_TASK_CONTEXT_HOSTGUARDTYPE_MASK 0x1
-#define TDIF_TASK_CONTEXT_HOSTGUARDTYPE_SHIFT 2
-#define TDIF_TASK_CONTEXT_SETERRORWITHEOP_MASK 0x1
-#define TDIF_TASK_CONTEXT_SETERRORWITHEOP_SHIFT 3
-#define TDIF_TASK_CONTEXT_PROTECTIONTYPE_MASK 0x3
-#define TDIF_TASK_CONTEXT_PROTECTIONTYPE_SHIFT 4
-#define TDIF_TASK_CONTEXT_CRC_SEED_MASK 0x1
-#define TDIF_TASK_CONTEXT_CRC_SEED_SHIFT 6
-#define TDIF_TASK_CONTEXT_RESERVED2_MASK 0x1
-#define TDIF_TASK_CONTEXT_RESERVED2_SHIFT 7
+#define TDIF_TASK_CONTEXT_IGNORE_APP_TAG_MASK 0x1
+#define TDIF_TASK_CONTEXT_IGNORE_APP_TAG_SHIFT 0
+#define TDIF_TASK_CONTEXT_INITIAL_REF_TAG_VALID_MASK 0x1
+#define TDIF_TASK_CONTEXT_INITIAL_REF_TAG_VALID_SHIFT 1
+#define TDIF_TASK_CONTEXT_HOST_GUARD_TYPE_MASK 0x1
+#define TDIF_TASK_CONTEXT_HOST_GUARD_TYPE_SHIFT 2
+#define TDIF_TASK_CONTEXT_SET_ERROR_WITH_EOP_MASK 0x1
+#define TDIF_TASK_CONTEXT_SET_ERROR_WITH_EOP_SHIFT 3
+#define TDIF_TASK_CONTEXT_PROTECTION_TYPE_MASK 0x3
+#define TDIF_TASK_CONTEXT_PROTECTION_TYPE_SHIFT 4
+#define TDIF_TASK_CONTEXT_CRC_SEED_MASK 0x1
+#define TDIF_TASK_CONTEXT_CRC_SEED_SHIFT 6
+#define TDIF_TASK_CONTEXT_RESERVED2_MASK 0x1
+#define TDIF_TASK_CONTEXT_RESERVED2_SHIFT 7
__le32 flags1;
-#define TDIF_TASK_CONTEXT_VALIDATEGUARD_MASK 0x1
-#define TDIF_TASK_CONTEXT_VALIDATEGUARD_SHIFT 0
-#define TDIF_TASK_CONTEXT_VALIDATEAPPTAG_MASK 0x1
-#define TDIF_TASK_CONTEXT_VALIDATEAPPTAG_SHIFT 1
-#define TDIF_TASK_CONTEXT_VALIDATEREFTAG_MASK 0x1
-#define TDIF_TASK_CONTEXT_VALIDATEREFTAG_SHIFT 2
-#define TDIF_TASK_CONTEXT_FORWARDGUARD_MASK 0x1
-#define TDIF_TASK_CONTEXT_FORWARDGUARD_SHIFT 3
-#define TDIF_TASK_CONTEXT_FORWARDAPPTAG_MASK 0x1
-#define TDIF_TASK_CONTEXT_FORWARDAPPTAG_SHIFT 4
-#define TDIF_TASK_CONTEXT_FORWARDREFTAG_MASK 0x1
-#define TDIF_TASK_CONTEXT_FORWARDREFTAG_SHIFT 5
-#define TDIF_TASK_CONTEXT_INTERVALSIZE_MASK 0x7
-#define TDIF_TASK_CONTEXT_INTERVALSIZE_SHIFT 6
-#define TDIF_TASK_CONTEXT_HOSTINTERFACE_MASK 0x3
-#define TDIF_TASK_CONTEXT_HOSTINTERFACE_SHIFT 9
-#define TDIF_TASK_CONTEXT_DIFBEFOREDATA_MASK 0x1
-#define TDIF_TASK_CONTEXT_DIFBEFOREDATA_SHIFT 11
-#define TDIF_TASK_CONTEXT_RESERVED3_MASK 0x1
-#define TDIF_TASK_CONTEXT_RESERVED3_SHIFT 12
-#define TDIF_TASK_CONTEXT_NETWORKINTERFACE_MASK 0x1
-#define TDIF_TASK_CONTEXT_NETWORKINTERFACE_SHIFT 13
-#define TDIF_TASK_CONTEXT_RECEIVEDDIFBYTESLEFTA_MASK 0xF
-#define TDIF_TASK_CONTEXT_RECEIVEDDIFBYTESLEFTA_SHIFT 14
-#define TDIF_TASK_CONTEXT_TRANSMITEDDIFBYTESLEFTA_MASK 0xF
-#define TDIF_TASK_CONTEXT_TRANSMITEDDIFBYTESLEFTA_SHIFT 18
-#define TDIF_TASK_CONTEXT_ERRORINIOA_MASK 0x1
-#define TDIF_TASK_CONTEXT_ERRORINIOA_SHIFT 22
-#define TDIF_TASK_CONTEXT_CHECKSUMOVERFLOWA_MASK 0x1
-#define TDIF_TASK_CONTEXT_CHECKSUMOVERFLOWA_SHIFT 23
-#define TDIF_TASK_CONTEXT_REFTAGMASK_MASK 0xF
-#define TDIF_TASK_CONTEXT_REFTAGMASK_SHIFT 24
-#define TDIF_TASK_CONTEXT_FORWARDAPPTAGWITHMASK_MASK 0x1
-#define TDIF_TASK_CONTEXT_FORWARDAPPTAGWITHMASK_SHIFT 28
-#define TDIF_TASK_CONTEXT_FORWARDREFTAGWITHMASK_MASK 0x1
-#define TDIF_TASK_CONTEXT_FORWARDREFTAGWITHMASK_SHIFT 29
-#define TDIF_TASK_CONTEXT_KEEPREFTAGCONST_MASK 0x1
-#define TDIF_TASK_CONTEXT_KEEPREFTAGCONST_SHIFT 30
-#define TDIF_TASK_CONTEXT_RESERVED4_MASK 0x1
-#define TDIF_TASK_CONTEXT_RESERVED4_SHIFT 31
- __le32 offset_in_iob;
+#define TDIF_TASK_CONTEXT_VALIDATE_GUARD_MASK 0x1
+#define TDIF_TASK_CONTEXT_VALIDATE_GUARD_SHIFT 0
+#define TDIF_TASK_CONTEXT_VALIDATE_APP_TAG_MASK 0x1
+#define TDIF_TASK_CONTEXT_VALIDATE_APP_TAG_SHIFT 1
+#define TDIF_TASK_CONTEXT_VALIDATE_REF_TAG_MASK 0x1
+#define TDIF_TASK_CONTEXT_VALIDATE_REF_TAG_SHIFT 2
+#define TDIF_TASK_CONTEXT_FORWARD_GUARD_MASK 0x1
+#define TDIF_TASK_CONTEXT_FORWARD_GUARD_SHIFT 3
+#define TDIF_TASK_CONTEXT_FORWARD_APP_TAG_MASK 0x1
+#define TDIF_TASK_CONTEXT_FORWARD_APP_TAG_SHIFT 4
+#define TDIF_TASK_CONTEXT_FORWARD_REF_TAG_MASK 0x1
+#define TDIF_TASK_CONTEXT_FORWARD_REF_TAG_SHIFT 5
+#define TDIF_TASK_CONTEXT_INTERVAL_SIZE_MASK 0x7
+#define TDIF_TASK_CONTEXT_INTERVAL_SIZE_SHIFT 6
+#define TDIF_TASK_CONTEXT_HOST_INTERFACE_MASK 0x3
+#define TDIF_TASK_CONTEXT_HOST_INTERFACE_SHIFT 9
+#define TDIF_TASK_CONTEXT_DIF_BEFORE_DATA_MASK 0x1
+#define TDIF_TASK_CONTEXT_DIF_BEFORE_DATA_SHIFT 11
+#define TDIF_TASK_CONTEXT_RESERVED3_MASK 0x1
+#define TDIF_TASK_CONTEXT_RESERVED3_SHIFT 12
+#define TDIF_TASK_CONTEXT_NETWORK_INTERFACE_MASK 0x1
+#define TDIF_TASK_CONTEXT_NETWORK_INTERFACE_SHIFT 13
+#define TDIF_TASK_CONTEXT_RECEIVED_DIF_BYTES_LEFT_A_MASK 0xF
+#define TDIF_TASK_CONTEXT_RECEIVED_DIF_BYTES_LEFT_A_SHIFT 14
+#define TDIF_TASK_CONTEXT_TRANSMITED_DIF_BYTES_LEFT_A_MASK 0xF
+#define TDIF_TASK_CONTEXT_TRANSMITED_DIF_BYTES_LEFT_A_SHIFT 18
+#define TDIF_TASK_CONTEXT_ERROR_IN_IO_A_MASK 0x1
+#define TDIF_TASK_CONTEXT_ERROR_IN_IO_A_SHIFT 22
+#define TDIF_TASK_CONTEXT_CHECKSUM_OVERFLOW_A_MASK 0x1
+#define TDIF_TASK_CONTEXT_CHECKSUM_OVERFLOW_A_SHIFT 23
+#define TDIF_TASK_CONTEXT_REF_TAG_MASK_MASK 0xF
+#define TDIF_TASK_CONTEXT_REF_TAG_MASK_SHIFT 24
+#define TDIF_TASK_CONTEXT_FORWARD_APP_TAG_WITH_MASK_MASK 0x1
+#define TDIF_TASK_CONTEXT_FORWARD_APP_TAG_WITH_MASK_SHIFT 28
+#define TDIF_TASK_CONTEXT_FORWARD_REF_TAG_WITH_MASK_MASK 0x1
+#define TDIF_TASK_CONTEXT_FORWARD_REF_TAG_WITH_MASK_SHIFT 29
+#define TDIF_TASK_CONTEXT_KEEP_REF_TAG_CONST_MASK 0x1
+#define TDIF_TASK_CONTEXT_KEEP_REF_TAG_CONST_SHIFT 30
+#define TDIF_TASK_CONTEXT_RESERVED4_MASK 0x1
+#define TDIF_TASK_CONTEXT_RESERVED4_SHIFT 31
+ __le32 offset_in_io_b;
__le16 partial_crc_value_a;
- __le16 partial_checksum_valuea_;
- __le32 offset_in_ioa;
+ __le16 partial_checksum_value_a;
+ __le32 offset_in_io_a;
u8 partial_dif_data_a[8];
u8 partial_dif_data_b[8];
};
+/* Timers context */
struct timers_context {
__le32 logical_client_0;
#define TIMERS_CONTEXT_EXPIRATIONTIMELC0_MASK 0x7FFFFFF
@@ -1385,6 +1463,7 @@ struct timers_context {
#define TIMERS_CONTEXT_RESERVED7_SHIFT 29
};
+/* Enum for next_protocol field of tunnel_parsing_flags / tunnelTypeDesc */
enum tunnel_next_protocol {
e_unknown = 0,
e_l2 = 1,
diff --git a/include/linux/qed/eth_common.h b/include/linux/qed/eth_common.h
index cb06e6e368e1..c84e08bc6802 100644
--- a/include/linux/qed/eth_common.h
+++ b/include/linux/qed/eth_common.h
@@ -1,33 +1,7 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
/* QLogic qed NIC Driver
* Copyright (c) 2015-2017 QLogic Corporation
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and /or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * Copyright (c) 2019-2020 Marvell International Ltd.
*/
#ifndef __ETH_COMMON__
@@ -36,150 +10,171 @@
/********************/
/* ETH FW CONSTANTS */
/********************/
-#define ETH_HSI_VER_MAJOR 3
-#define ETH_HSI_VER_MINOR 10
-
-#define ETH_HSI_VER_NO_PKT_LEN_TUNN 5
-
-#define ETH_CACHE_LINE_SIZE 64
-#define ETH_RX_CQE_GAP 32
-#define ETH_MAX_RAMROD_PER_CON 8
-#define ETH_TX_BD_PAGE_SIZE_BYTES 4096
-#define ETH_RX_BD_PAGE_SIZE_BYTES 4096
-#define ETH_RX_CQE_PAGE_SIZE_BYTES 4096
-#define ETH_RX_NUM_NEXT_PAGE_BDS 2
-
-#define ETH_MAX_TUNN_LSO_INNER_IPV4_OFFSET 253
-#define ETH_MAX_TUNN_LSO_INNER_IPV6_OFFSET 251
-
-#define ETH_TX_MIN_BDS_PER_NON_LSO_PKT 1
-#define ETH_TX_MAX_BDS_PER_NON_LSO_PACKET 18
-#define ETH_TX_MAX_BDS_PER_LSO_PACKET 255
-#define ETH_TX_MAX_LSO_HDR_NBD 4
-#define ETH_TX_MIN_BDS_PER_LSO_PKT 3
-#define ETH_TX_MIN_BDS_PER_TUNN_IPV6_WITH_EXT_PKT 3
-#define ETH_TX_MIN_BDS_PER_IPV6_WITH_EXT_PKT 2
-#define ETH_TX_MIN_BDS_PER_PKT_W_LOOPBACK_MODE 2
-#define ETH_TX_MAX_NON_LSO_PKT_LEN (9700 - (4 + 4 + 12 + 8))
-#define ETH_TX_MAX_LSO_HDR_BYTES 510
-#define ETH_TX_LSO_WINDOW_BDS_NUM (18 - 1)
-#define ETH_TX_LSO_WINDOW_MIN_LEN 9700
-#define ETH_TX_MAX_LSO_PAYLOAD_LEN 0xFE000
-#define ETH_TX_NUM_SAME_AS_LAST_ENTRIES 320
-#define ETH_TX_INACTIVE_SAME_AS_LAST 0xFFFF
-
-#define ETH_NUM_STATISTIC_COUNTERS MAX_NUM_VPORTS
+
+#define ETH_HSI_VER_MAJOR 3
+#define ETH_HSI_VER_MINOR 11
+
+#define ETH_HSI_VER_NO_PKT_LEN_TUNN 5
+/* Maximum number of pinned L2 connections (CIDs) */
+#define ETH_PINNED_CONN_MAX_NUM 32
+
+#define ETH_CACHE_LINE_SIZE 64
+#define ETH_RX_CQE_GAP 32
+#define ETH_MAX_RAMROD_PER_CON 8
+#define ETH_TX_BD_PAGE_SIZE_BYTES 4096
+#define ETH_RX_BD_PAGE_SIZE_BYTES 4096
+#define ETH_RX_CQE_PAGE_SIZE_BYTES 4096
+#define ETH_RX_NUM_NEXT_PAGE_BDS 2
+
+#define ETH_MAX_TUNN_LSO_INNER_IPV4_OFFSET 253
+#define ETH_MAX_TUNN_LSO_INNER_IPV6_OFFSET 251
+
+#define ETH_TX_MIN_BDS_PER_NON_LSO_PKT 1
+#define ETH_TX_MAX_BDS_PER_NON_LSO_PACKET 18
+#define ETH_TX_MAX_BDS_PER_LSO_PACKET 255
+#define ETH_TX_MAX_LSO_HDR_NBD 4
+#define ETH_TX_MIN_BDS_PER_LSO_PKT 3
+#define ETH_TX_MIN_BDS_PER_TUNN_IPV6_WITH_EXT_PKT 3
+#define ETH_TX_MIN_BDS_PER_IPV6_WITH_EXT_PKT 2
+#define ETH_TX_MIN_BDS_PER_PKT_W_LOOPBACK_MODE 2
+#define ETH_TX_MIN_BDS_PER_PKT_W_VPORT_FORWARDING 4
+#define ETH_TX_MAX_NON_LSO_PKT_LEN (9700 - (4 + 4 + 12 + 8))
+#define ETH_TX_MAX_LSO_HDR_BYTES 510
+#define ETH_TX_LSO_WINDOW_BDS_NUM (18 - 1)
+#define ETH_TX_LSO_WINDOW_MIN_LEN 9700
+#define ETH_TX_MAX_LSO_PAYLOAD_LEN 0xFE000
+#define ETH_TX_NUM_SAME_AS_LAST_ENTRIES 320
+#define ETH_TX_INACTIVE_SAME_AS_LAST 0xFFFF
+
+#define ETH_NUM_STATISTIC_COUNTERS MAX_NUM_VPORTS
#define ETH_NUM_STATISTIC_COUNTERS_DOUBLE_VF_ZONE \
(ETH_NUM_STATISTIC_COUNTERS - MAX_NUM_VFS / 2)
#define ETH_NUM_STATISTIC_COUNTERS_QUAD_VF_ZONE \
(ETH_NUM_STATISTIC_COUNTERS - 3 * MAX_NUM_VFS / 4)
-/* Maximum number of buffers, used for RX packet placement */
-#define ETH_RX_MAX_BUFF_PER_PKT 5
-#define ETH_RX_BD_THRESHOLD 12
+#define ETH_RX_MAX_BUFF_PER_PKT 5
+#define ETH_RX_BD_THRESHOLD 16
-/* num of MAC/VLAN filters */
-#define ETH_NUM_MAC_FILTERS 512
-#define ETH_NUM_VLAN_FILTERS 512
+/* Num of MAC/VLAN filters */
+#define ETH_NUM_MAC_FILTERS 512
+#define ETH_NUM_VLAN_FILTERS 512
-/* approx. multicast constants */
-#define ETH_MULTICAST_BIN_FROM_MAC_SEED 0
-#define ETH_MULTICAST_MAC_BINS 256
-#define ETH_MULTICAST_MAC_BINS_IN_REGS (ETH_MULTICAST_MAC_BINS / 32)
+/* Approx. multicast constants */
+#define ETH_MULTICAST_BIN_FROM_MAC_SEED 0
+#define ETH_MULTICAST_MAC_BINS 256
+#define ETH_MULTICAST_MAC_BINS_IN_REGS (ETH_MULTICAST_MAC_BINS / 32)
-/* ethernet vport update constants */
-#define ETH_FILTER_RULES_COUNT 10
-#define ETH_RSS_IND_TABLE_ENTRIES_NUM 128
-#define ETH_RSS_KEY_SIZE_REGS 10
-#define ETH_RSS_ENGINE_NUM_K2 207
-#define ETH_RSS_ENGINE_NUM_BB 127
+/* Ethernet vport update constants */
+#define ETH_FILTER_RULES_COUNT 10
+#define ETH_RSS_IND_TABLE_ENTRIES_NUM 128
+#define ETH_RSS_IND_TABLE_MASK_SIZE_REGS (ETH_RSS_IND_TABLE_ENTRIES_NUM / 32)
+#define ETH_RSS_KEY_SIZE_REGS 10
+#define ETH_RSS_ENGINE_NUM_K2 207
+#define ETH_RSS_ENGINE_NUM_BB 127
/* TPA constants */
-#define ETH_TPA_MAX_AGGS_NUM 64
-#define ETH_TPA_CQE_START_LEN_LIST_SIZE ETH_RX_MAX_BUFF_PER_PKT
-#define ETH_TPA_CQE_CONT_LEN_LIST_SIZE 6
-#define ETH_TPA_CQE_END_LEN_LIST_SIZE 4
+#define ETH_TPA_MAX_AGGS_NUM 64
+#define ETH_TPA_CQE_START_BW_LEN_LIST_SIZE 2
+#define ETH_TPA_CQE_CONT_LEN_LIST_SIZE 6
+#define ETH_TPA_CQE_END_LEN_LIST_SIZE 4
/* Control frame check constants */
-#define ETH_CTL_FRAME_ETH_TYPE_NUM 4
+#define ETH_CTL_FRAME_ETH_TYPE_NUM 4
+
+/* GFS constants */
+#define ETH_GFT_TRASHCAN_VPORT 0x1FF /* GFT drop flow vport number */
+
+/* Destination port mode */
+enum dst_port_mode {
+ DST_PORT_PHY,
+ DST_PORT_LOOPBACK,
+ DST_PORT_PHY_LOOPBACK,
+ DST_PORT_DROP,
+ MAX_DST_PORT_MODE
+};
+
+/* Ethernet address type */
+enum eth_addr_type {
+ BROADCAST_ADDRESS,
+ MULTICAST_ADDRESS,
+ UNICAST_ADDRESS,
+ UNKNOWN_ADDRESS,
+ MAX_ETH_ADDR_TYPE
+};
struct eth_tx_1st_bd_flags {
u8 bitfields;
-#define ETH_TX_1ST_BD_FLAGS_START_BD_MASK 0x1
-#define ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT 0
-#define ETH_TX_1ST_BD_FLAGS_FORCE_VLAN_MODE_MASK 0x1
-#define ETH_TX_1ST_BD_FLAGS_FORCE_VLAN_MODE_SHIFT 1
-#define ETH_TX_1ST_BD_FLAGS_IP_CSUM_MASK 0x1
-#define ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT 2
-#define ETH_TX_1ST_BD_FLAGS_L4_CSUM_MASK 0x1
-#define ETH_TX_1ST_BD_FLAGS_L4_CSUM_SHIFT 3
-#define ETH_TX_1ST_BD_FLAGS_VLAN_INSERTION_MASK 0x1
-#define ETH_TX_1ST_BD_FLAGS_VLAN_INSERTION_SHIFT 4
-#define ETH_TX_1ST_BD_FLAGS_LSO_MASK 0x1
-#define ETH_TX_1ST_BD_FLAGS_LSO_SHIFT 5
-#define ETH_TX_1ST_BD_FLAGS_TUNN_IP_CSUM_MASK 0x1
-#define ETH_TX_1ST_BD_FLAGS_TUNN_IP_CSUM_SHIFT 6
-#define ETH_TX_1ST_BD_FLAGS_TUNN_L4_CSUM_MASK 0x1
-#define ETH_TX_1ST_BD_FLAGS_TUNN_L4_CSUM_SHIFT 7
-};
-
-/* The parsing information data fo rthe first tx bd of a given packet. */
+#define ETH_TX_1ST_BD_FLAGS_START_BD_MASK 0x1
+#define ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT 0
+#define ETH_TX_1ST_BD_FLAGS_FORCE_VLAN_MODE_MASK 0x1
+#define ETH_TX_1ST_BD_FLAGS_FORCE_VLAN_MODE_SHIFT 1
+#define ETH_TX_1ST_BD_FLAGS_IP_CSUM_MASK 0x1
+#define ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT 2
+#define ETH_TX_1ST_BD_FLAGS_L4_CSUM_MASK 0x1
+#define ETH_TX_1ST_BD_FLAGS_L4_CSUM_SHIFT 3
+#define ETH_TX_1ST_BD_FLAGS_VLAN_INSERTION_MASK 0x1
+#define ETH_TX_1ST_BD_FLAGS_VLAN_INSERTION_SHIFT 4
+#define ETH_TX_1ST_BD_FLAGS_LSO_MASK 0x1
+#define ETH_TX_1ST_BD_FLAGS_LSO_SHIFT 5
+#define ETH_TX_1ST_BD_FLAGS_TUNN_IP_CSUM_MASK 0x1
+#define ETH_TX_1ST_BD_FLAGS_TUNN_IP_CSUM_SHIFT 6
+#define ETH_TX_1ST_BD_FLAGS_TUNN_L4_CSUM_MASK 0x1
+#define ETH_TX_1ST_BD_FLAGS_TUNN_L4_CSUM_SHIFT 7
+};
+
+/* The parsing information data fo rthe first tx bd of a given packet */
struct eth_tx_data_1st_bd {
__le16 vlan;
u8 nbds;
struct eth_tx_1st_bd_flags bd_flags;
__le16 bitfields;
-#define ETH_TX_DATA_1ST_BD_TUNN_FLAG_MASK 0x1
-#define ETH_TX_DATA_1ST_BD_TUNN_FLAG_SHIFT 0
-#define ETH_TX_DATA_1ST_BD_RESERVED0_MASK 0x1
-#define ETH_TX_DATA_1ST_BD_RESERVED0_SHIFT 1
-#define ETH_TX_DATA_1ST_BD_PKT_LEN_MASK 0x3FFF
-#define ETH_TX_DATA_1ST_BD_PKT_LEN_SHIFT 2
+#define ETH_TX_DATA_1ST_BD_TUNN_FLAG_MASK 0x1
+#define ETH_TX_DATA_1ST_BD_TUNN_FLAG_SHIFT 0
+#define ETH_TX_DATA_1ST_BD_RESERVED0_MASK 0x1
+#define ETH_TX_DATA_1ST_BD_RESERVED0_SHIFT 1
+#define ETH_TX_DATA_1ST_BD_PKT_LEN_MASK 0x3FFF
+#define ETH_TX_DATA_1ST_BD_PKT_LEN_SHIFT 2
};
-/* The parsing information data for the second tx bd of a given packet. */
+/* The parsing information data for the second tx bd of a given packet */
struct eth_tx_data_2nd_bd {
__le16 tunn_ip_size;
__le16 bitfields1;
-#define ETH_TX_DATA_2ND_BD_TUNN_INNER_L2_HDR_SIZE_W_MASK 0xF
-#define ETH_TX_DATA_2ND_BD_TUNN_INNER_L2_HDR_SIZE_W_SHIFT 0
-#define ETH_TX_DATA_2ND_BD_TUNN_INNER_ETH_TYPE_MASK 0x3
-#define ETH_TX_DATA_2ND_BD_TUNN_INNER_ETH_TYPE_SHIFT 4
-#define ETH_TX_DATA_2ND_BD_DEST_PORT_MODE_MASK 0x3
-#define ETH_TX_DATA_2ND_BD_DEST_PORT_MODE_SHIFT 6
-#define ETH_TX_DATA_2ND_BD_START_BD_MASK 0x1
-#define ETH_TX_DATA_2ND_BD_START_BD_SHIFT 8
-#define ETH_TX_DATA_2ND_BD_TUNN_TYPE_MASK 0x3
-#define ETH_TX_DATA_2ND_BD_TUNN_TYPE_SHIFT 9
-#define ETH_TX_DATA_2ND_BD_TUNN_INNER_IPV6_MASK 0x1
-#define ETH_TX_DATA_2ND_BD_TUNN_INNER_IPV6_SHIFT 11
-#define ETH_TX_DATA_2ND_BD_IPV6_EXT_MASK 0x1
-#define ETH_TX_DATA_2ND_BD_IPV6_EXT_SHIFT 12
-#define ETH_TX_DATA_2ND_BD_TUNN_IPV6_EXT_MASK 0x1
-#define ETH_TX_DATA_2ND_BD_TUNN_IPV6_EXT_SHIFT 13
-#define ETH_TX_DATA_2ND_BD_L4_UDP_MASK 0x1
-#define ETH_TX_DATA_2ND_BD_L4_UDP_SHIFT 14
-#define ETH_TX_DATA_2ND_BD_L4_PSEUDO_CSUM_MODE_MASK 0x1
-#define ETH_TX_DATA_2ND_BD_L4_PSEUDO_CSUM_MODE_SHIFT 15
+#define ETH_TX_DATA_2ND_BD_TUNN_INNER_L2_HDR_SIZE_W_MASK 0xF
+#define ETH_TX_DATA_2ND_BD_TUNN_INNER_L2_HDR_SIZE_W_SHIFT 0
+#define ETH_TX_DATA_2ND_BD_TUNN_INNER_ETH_TYPE_MASK 0x3
+#define ETH_TX_DATA_2ND_BD_TUNN_INNER_ETH_TYPE_SHIFT 4
+#define ETH_TX_DATA_2ND_BD_DST_PORT_MODE_MASK 0x3
+#define ETH_TX_DATA_2ND_BD_DST_PORT_MODE_SHIFT 6
+#define ETH_TX_DATA_2ND_BD_START_BD_MASK 0x1
+#define ETH_TX_DATA_2ND_BD_START_BD_SHIFT 8
+#define ETH_TX_DATA_2ND_BD_TUNN_TYPE_MASK 0x3
+#define ETH_TX_DATA_2ND_BD_TUNN_TYPE_SHIFT 9
+#define ETH_TX_DATA_2ND_BD_TUNN_INNER_IPV6_MASK 0x1
+#define ETH_TX_DATA_2ND_BD_TUNN_INNER_IPV6_SHIFT 11
+#define ETH_TX_DATA_2ND_BD_IPV6_EXT_MASK 0x1
+#define ETH_TX_DATA_2ND_BD_IPV6_EXT_SHIFT 12
+#define ETH_TX_DATA_2ND_BD_TUNN_IPV6_EXT_MASK 0x1
+#define ETH_TX_DATA_2ND_BD_TUNN_IPV6_EXT_SHIFT 13
+#define ETH_TX_DATA_2ND_BD_L4_UDP_MASK 0x1
+#define ETH_TX_DATA_2ND_BD_L4_UDP_SHIFT 14
+#define ETH_TX_DATA_2ND_BD_L4_PSEUDO_CSUM_MODE_MASK 0x1
+#define ETH_TX_DATA_2ND_BD_L4_PSEUDO_CSUM_MODE_SHIFT 15
__le16 bitfields2;
-#define ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_MASK 0x1FFF
-#define ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_SHIFT 0
-#define ETH_TX_DATA_2ND_BD_RESERVED0_MASK 0x7
-#define ETH_TX_DATA_2ND_BD_RESERVED0_SHIFT 13
+#define ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_MASK 0x1FFF
+#define ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_SHIFT 0
+#define ETH_TX_DATA_2ND_BD_RESERVED0_MASK 0x7
+#define ETH_TX_DATA_2ND_BD_RESERVED0_SHIFT 13
};
-/* Firmware data for L2-EDPM packet. */
+/* Firmware data for L2-EDPM packet */
struct eth_edpm_fw_data {
struct eth_tx_data_1st_bd data_1st_bd;
struct eth_tx_data_2nd_bd data_2nd_bd;
__le32 reserved;
};
-struct eth_fast_path_cqe_fw_debug {
- __le16 reserved2;
-};
-
-/* tunneling parsing flags */
+/* Tunneling parsing flags */
struct eth_tunnel_parsing_flags {
u8 flags;
#define ETH_TUNNEL_PARSING_FLAGS_TYPE_MASK 0x3
@@ -199,24 +194,24 @@ struct eth_tunnel_parsing_flags {
/* PMD flow control bits */
struct eth_pmd_flow_flags {
u8 flags;
-#define ETH_PMD_FLOW_FLAGS_VALID_MASK 0x1
-#define ETH_PMD_FLOW_FLAGS_VALID_SHIFT 0
-#define ETH_PMD_FLOW_FLAGS_TOGGLE_MASK 0x1
-#define ETH_PMD_FLOW_FLAGS_TOGGLE_SHIFT 1
-#define ETH_PMD_FLOW_FLAGS_RESERVED_MASK 0x3F
-#define ETH_PMD_FLOW_FLAGS_RESERVED_SHIFT 2
+#define ETH_PMD_FLOW_FLAGS_VALID_MASK 0x1
+#define ETH_PMD_FLOW_FLAGS_VALID_SHIFT 0
+#define ETH_PMD_FLOW_FLAGS_TOGGLE_MASK 0x1
+#define ETH_PMD_FLOW_FLAGS_TOGGLE_SHIFT 1
+#define ETH_PMD_FLOW_FLAGS_RESERVED_MASK 0x3F
+#define ETH_PMD_FLOW_FLAGS_RESERVED_SHIFT 2
};
-/* Regular ETH Rx FP CQE. */
+/* Regular ETH Rx FP CQE */
struct eth_fast_path_rx_reg_cqe {
u8 type;
u8 bitfields;
-#define ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE_MASK 0x7
-#define ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE_SHIFT 0
-#define ETH_FAST_PATH_RX_REG_CQE_TC_MASK 0xF
-#define ETH_FAST_PATH_RX_REG_CQE_TC_SHIFT 3
-#define ETH_FAST_PATH_RX_REG_CQE_RESERVED0_MASK 0x1
-#define ETH_FAST_PATH_RX_REG_CQE_RESERVED0_SHIFT 7
+#define ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE_MASK 0x7
+#define ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE_SHIFT 0
+#define ETH_FAST_PATH_RX_REG_CQE_TC_MASK 0xF
+#define ETH_FAST_PATH_RX_REG_CQE_TC_SHIFT 3
+#define ETH_FAST_PATH_RX_REG_CQE_RESERVED0_MASK 0x1
+#define ETH_FAST_PATH_RX_REG_CQE_RESERVED0_SHIFT 7
__le16 pkt_len;
struct parsing_and_err_flags pars_flags;
__le16 vlan_tag;
@@ -225,13 +220,14 @@ struct eth_fast_path_rx_reg_cqe {
u8 placement_offset;
struct eth_tunnel_parsing_flags tunnel_pars_flags;
u8 bd_num;
- u8 reserved[9];
- struct eth_fast_path_cqe_fw_debug fw_debug;
- u8 reserved1[3];
+ u8 reserved;
+ __le16 reserved2;
+ __le32 flow_id_or_resource_id;
+ u8 reserved1[7];
struct eth_pmd_flow_flags pmd_flags;
};
-/* TPA-continue ETH Rx FP CQE. */
+/* TPA-continue ETH Rx FP CQE */
struct eth_fast_path_rx_tpa_cont_cqe {
u8 type;
u8 tpa_agg_index;
@@ -243,7 +239,7 @@ struct eth_fast_path_rx_tpa_cont_cqe {
struct eth_pmd_flow_flags pmd_flags;
};
-/* TPA-end ETH Rx FP CQE. */
+/* TPA-end ETH Rx FP CQE */
struct eth_fast_path_rx_tpa_end_cqe {
u8 type;
u8 tpa_agg_index;
@@ -259,16 +255,16 @@ struct eth_fast_path_rx_tpa_end_cqe {
struct eth_pmd_flow_flags pmd_flags;
};
-/* TPA-start ETH Rx FP CQE. */
+/* TPA-start ETH Rx FP CQE */
struct eth_fast_path_rx_tpa_start_cqe {
u8 type;
u8 bitfields;
-#define ETH_FAST_PATH_RX_TPA_START_CQE_RSS_HASH_TYPE_MASK 0x7
-#define ETH_FAST_PATH_RX_TPA_START_CQE_RSS_HASH_TYPE_SHIFT 0
-#define ETH_FAST_PATH_RX_TPA_START_CQE_TC_MASK 0xF
-#define ETH_FAST_PATH_RX_TPA_START_CQE_TC_SHIFT 3
-#define ETH_FAST_PATH_RX_TPA_START_CQE_RESERVED0_MASK 0x1
-#define ETH_FAST_PATH_RX_TPA_START_CQE_RESERVED0_SHIFT 7
+#define ETH_FAST_PATH_RX_TPA_START_CQE_RSS_HASH_TYPE_MASK 0x7
+#define ETH_FAST_PATH_RX_TPA_START_CQE_RSS_HASH_TYPE_SHIFT 0
+#define ETH_FAST_PATH_RX_TPA_START_CQE_TC_MASK 0xF
+#define ETH_FAST_PATH_RX_TPA_START_CQE_TC_SHIFT 3
+#define ETH_FAST_PATH_RX_TPA_START_CQE_RESERVED0_MASK 0x1
+#define ETH_FAST_PATH_RX_TPA_START_CQE_RESERVED0_SHIFT 7
__le16 seg_len;
struct parsing_and_err_flags pars_flags;
__le16 vlan_tag;
@@ -278,9 +274,10 @@ struct eth_fast_path_rx_tpa_start_cqe {
struct eth_tunnel_parsing_flags tunnel_pars_flags;
u8 tpa_agg_index;
u8 header_len;
- __le16 ext_bd_len_list[ETH_TPA_CQE_START_LEN_LIST_SIZE];
- struct eth_fast_path_cqe_fw_debug fw_debug;
- u8 reserved;
+ __le16 bw_ext_bd_len_list[ETH_TPA_CQE_START_BW_LEN_LIST_SIZE];
+ __le16 reserved2;
+ __le32 flow_id_or_resource_id;
+ u8 reserved[3];
struct eth_pmd_flow_flags pmd_flags;
};
@@ -295,24 +292,24 @@ struct eth_rx_bd {
struct regpair addr;
};
-/* regular ETH Rx SP CQE */
+/* Regular ETH Rx SP CQE */
struct eth_slow_path_rx_cqe {
- u8 type;
- u8 ramrod_cmd_id;
- u8 error_flag;
- u8 reserved[25];
- __le16 echo;
- u8 reserved1;
+ u8 type;
+ u8 ramrod_cmd_id;
+ u8 error_flag;
+ u8 reserved[25];
+ __le16 echo;
+ u8 reserved1;
struct eth_pmd_flow_flags pmd_flags;
};
-/* union for all ETH Rx CQE types */
+/* Union for all ETH Rx CQE types */
union eth_rx_cqe {
- struct eth_fast_path_rx_reg_cqe fast_path_regular;
- struct eth_fast_path_rx_tpa_start_cqe fast_path_tpa_start;
- struct eth_fast_path_rx_tpa_cont_cqe fast_path_tpa_cont;
- struct eth_fast_path_rx_tpa_end_cqe fast_path_tpa_end;
- struct eth_slow_path_rx_cqe slow_path;
+ struct eth_fast_path_rx_reg_cqe fast_path_regular;
+ struct eth_fast_path_rx_tpa_start_cqe fast_path_tpa_start;
+ struct eth_fast_path_rx_tpa_cont_cqe fast_path_tpa_cont;
+ struct eth_fast_path_rx_tpa_end_cqe fast_path_tpa_end;
+ struct eth_slow_path_rx_cqe slow_path;
};
/* ETH Rx CQE type */
@@ -339,7 +336,7 @@ enum eth_rx_tunn_type {
MAX_ETH_RX_TUNN_TYPE
};
-/* Aggregation end reason. */
+/* Aggregation end reason. */
enum eth_tpa_end_reason {
ETH_AGG_END_UNUSED,
ETH_AGG_END_SP_UPDATE,
@@ -354,65 +351,89 @@ enum eth_tpa_end_reason {
/* The first tx bd of a given packet */
struct eth_tx_1st_bd {
- struct regpair addr;
- __le16 nbytes;
- struct eth_tx_data_1st_bd data;
+ struct regpair addr;
+ __le16 nbytes;
+ struct eth_tx_data_1st_bd data;
};
/* The second tx bd of a given packet */
struct eth_tx_2nd_bd {
- struct regpair addr;
- __le16 nbytes;
- struct eth_tx_data_2nd_bd data;
+ struct regpair addr;
+ __le16 nbytes;
+ struct eth_tx_data_2nd_bd data;
};
-/* The parsing information data for the third tx bd of a given packet. */
+/* The parsing information data for the third tx bd of a given packet */
struct eth_tx_data_3rd_bd {
__le16 lso_mss;
__le16 bitfields;
-#define ETH_TX_DATA_3RD_BD_TCP_HDR_LEN_DW_MASK 0xF
-#define ETH_TX_DATA_3RD_BD_TCP_HDR_LEN_DW_SHIFT 0
-#define ETH_TX_DATA_3RD_BD_HDR_NBD_MASK 0xF
-#define ETH_TX_DATA_3RD_BD_HDR_NBD_SHIFT 4
-#define ETH_TX_DATA_3RD_BD_START_BD_MASK 0x1
-#define ETH_TX_DATA_3RD_BD_START_BD_SHIFT 8
-#define ETH_TX_DATA_3RD_BD_RESERVED0_MASK 0x7F
-#define ETH_TX_DATA_3RD_BD_RESERVED0_SHIFT 9
+#define ETH_TX_DATA_3RD_BD_TCP_HDR_LEN_DW_MASK 0xF
+#define ETH_TX_DATA_3RD_BD_TCP_HDR_LEN_DW_SHIFT 0
+#define ETH_TX_DATA_3RD_BD_HDR_NBD_MASK 0xF
+#define ETH_TX_DATA_3RD_BD_HDR_NBD_SHIFT 4
+#define ETH_TX_DATA_3RD_BD_START_BD_MASK 0x1
+#define ETH_TX_DATA_3RD_BD_START_BD_SHIFT 8
+#define ETH_TX_DATA_3RD_BD_RESERVED0_MASK 0x7F
+#define ETH_TX_DATA_3RD_BD_RESERVED0_SHIFT 9
u8 tunn_l4_hdr_start_offset_w;
u8 tunn_hdr_size_w;
};
/* The third tx bd of a given packet */
struct eth_tx_3rd_bd {
- struct regpair addr;
- __le16 nbytes;
- struct eth_tx_data_3rd_bd data;
+ struct regpair addr;
+ __le16 nbytes;
+ struct eth_tx_data_3rd_bd data;
+};
+
+/* The parsing information data for the forth tx bd of a given packet. */
+struct eth_tx_data_4th_bd {
+ u8 dst_vport_id;
+ u8 reserved4;
+ __le16 bitfields;
+#define ETH_TX_DATA_4TH_BD_DST_VPORT_ID_VALID_MASK 0x1
+#define ETH_TX_DATA_4TH_BD_DST_VPORT_ID_VALID_SHIFT 0
+#define ETH_TX_DATA_4TH_BD_RESERVED1_MASK 0x7F
+#define ETH_TX_DATA_4TH_BD_RESERVED1_SHIFT 1
+#define ETH_TX_DATA_4TH_BD_START_BD_MASK 0x1
+#define ETH_TX_DATA_4TH_BD_START_BD_SHIFT 8
+#define ETH_TX_DATA_4TH_BD_RESERVED2_MASK 0x7F
+#define ETH_TX_DATA_4TH_BD_RESERVED2_SHIFT 9
+ __le16 reserved3;
+};
+
+/* The forth tx bd of a given packet */
+struct eth_tx_4th_bd {
+ struct regpair addr; /* Single continuous buffer */
+ __le16 nbytes; /* Number of bytes in this BD */
+ struct eth_tx_data_4th_bd data; /* Parsing information data */
};
-/* Complementary information for the regular tx bd of a given packet. */
+/* Complementary information for the regular tx bd of a given packet */
struct eth_tx_data_bd {
- __le16 reserved0;
- __le16 bitfields;
-#define ETH_TX_DATA_BD_RESERVED1_MASK 0xFF
-#define ETH_TX_DATA_BD_RESERVED1_SHIFT 0
-#define ETH_TX_DATA_BD_START_BD_MASK 0x1
-#define ETH_TX_DATA_BD_START_BD_SHIFT 8
-#define ETH_TX_DATA_BD_RESERVED2_MASK 0x7F
-#define ETH_TX_DATA_BD_RESERVED2_SHIFT 9
+ __le16 reserved0;
+ __le16 bitfields;
+#define ETH_TX_DATA_BD_RESERVED1_MASK 0xFF
+#define ETH_TX_DATA_BD_RESERVED1_SHIFT 0
+#define ETH_TX_DATA_BD_START_BD_MASK 0x1
+#define ETH_TX_DATA_BD_START_BD_SHIFT 8
+#define ETH_TX_DATA_BD_RESERVED2_MASK 0x7F
+#define ETH_TX_DATA_BD_RESERVED2_SHIFT 9
__le16 reserved3;
};
/* The common non-special TX BD ring element */
struct eth_tx_bd {
- struct regpair addr;
- __le16 nbytes;
- struct eth_tx_data_bd data;
+ struct regpair addr;
+ __le16 nbytes;
+ struct eth_tx_data_bd data;
};
union eth_tx_bd_types {
struct eth_tx_1st_bd first_bd;
struct eth_tx_2nd_bd second_bd;
struct eth_tx_3rd_bd third_bd;
+ struct eth_tx_4th_bd fourth_bd;
struct eth_tx_bd reg_bd;
};
@@ -425,6 +446,12 @@ enum eth_tx_tunn_type {
MAX_ETH_TX_TUNN_TYPE
};
+/* Mstorm Queue Zone */
+struct mstorm_eth_queue_zone {
+ struct eth_rx_prod_data rx_producers;
+ __le32 reserved[3];
+};
+
/* Ystorm Queue Zone */
struct xstorm_eth_queue_zone {
struct coalescing_timeset int_coalescing_timeset;
@@ -434,18 +461,30 @@ struct xstorm_eth_queue_zone {
/* ETH doorbell data */
struct eth_db_data {
u8 params;
-#define ETH_DB_DATA_DEST_MASK 0x3
-#define ETH_DB_DATA_DEST_SHIFT 0
-#define ETH_DB_DATA_AGG_CMD_MASK 0x3
-#define ETH_DB_DATA_AGG_CMD_SHIFT 2
-#define ETH_DB_DATA_BYPASS_EN_MASK 0x1
-#define ETH_DB_DATA_BYPASS_EN_SHIFT 4
-#define ETH_DB_DATA_RESERVED_MASK 0x1
-#define ETH_DB_DATA_RESERVED_SHIFT 5
-#define ETH_DB_DATA_AGG_VAL_SEL_MASK 0x3
-#define ETH_DB_DATA_AGG_VAL_SEL_SHIFT 6
+#define ETH_DB_DATA_DEST_MASK 0x3
+#define ETH_DB_DATA_DEST_SHIFT 0
+#define ETH_DB_DATA_AGG_CMD_MASK 0x3
+#define ETH_DB_DATA_AGG_CMD_SHIFT 2
+#define ETH_DB_DATA_BYPASS_EN_MASK 0x1
+#define ETH_DB_DATA_BYPASS_EN_SHIFT 4
+#define ETH_DB_DATA_RESERVED_MASK 0x1
+#define ETH_DB_DATA_RESERVED_SHIFT 5
+#define ETH_DB_DATA_AGG_VAL_SEL_MASK 0x3
+#define ETH_DB_DATA_AGG_VAL_SEL_SHIFT 6
u8 agg_flags;
__le16 bd_prod;
};
+/* RSS hash type */
+enum rss_hash_type {
+ RSS_HASH_TYPE_DEFAULT = 0,
+ RSS_HASH_TYPE_IPV4 = 1,
+ RSS_HASH_TYPE_TCP_IPV4 = 2,
+ RSS_HASH_TYPE_IPV6 = 3,
+ RSS_HASH_TYPE_TCP_IPV6 = 4,
+ RSS_HASH_TYPE_UDP_IPV4 = 5,
+ RSS_HASH_TYPE_UDP_IPV6 = 6,
+ MAX_RSS_HASH_TYPE
+};
+
#endif /* __ETH_COMMON__ */
diff --git a/include/linux/qed/fcoe_common.h b/include/linux/qed/fcoe_common.h
index 12fc9e788eea..7ba0abc867f1 100644
--- a/include/linux/qed/fcoe_common.h
+++ b/include/linux/qed/fcoe_common.h
@@ -1,224 +1,83 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
/* QLogic qed NIC Driver
* Copyright (c) 2015 QLogic Corporation
- *
- * This software is available under the terms of the GNU General Public License
- * (GPL) Version 2, available from the file COPYING in the main directory of
- * this source tree.
+ * Copyright (c) 2019-2020 Marvell International Ltd.
*/
#ifndef __FCOE_COMMON__
#define __FCOE_COMMON__
+
/*********************/
/* FCOE FW CONSTANTS */
/*********************/
#define FC_ABTS_REPLY_MAX_PAYLOAD_LEN 12
-struct fcoe_abts_pkt {
- __le32 abts_rsp_fc_payload_lo;
- __le16 abts_rsp_rx_id;
- u8 abts_rsp_rctl;
- u8 reserved2;
-};
-
-/* FCoE additional WQE (Sq/XferQ) information */
-union fcoe_additional_info_union {
- __le32 previous_tid;
- __le32 parent_tid;
- __le32 burst_length;
- __le32 seq_rec_updated_offset;
-};
-
-struct fcoe_exp_ro {
- __le32 data_offset;
- __le32 reserved;
-};
-
-union fcoe_cleanup_addr_exp_ro_union {
- struct regpair abts_rsp_fc_payload_hi;
- struct fcoe_exp_ro exp_ro;
-};
-
-/* FCoE Ramrod Command IDs */
-enum fcoe_completion_status {
- FCOE_COMPLETION_STATUS_SUCCESS,
- FCOE_COMPLETION_STATUS_FCOE_VER_ERR,
- FCOE_COMPLETION_STATUS_SRC_MAC_ADD_ARR_ERR,
- MAX_FCOE_COMPLETION_STATUS
-};
-
-struct fc_addr_nw {
- u8 addr_lo;
- u8 addr_mid;
- u8 addr_hi;
-};
-
-/* FCoE connection offload */
-struct fcoe_conn_offload_ramrod_data {
- struct regpair sq_pbl_addr;
- struct regpair sq_curr_page_addr;
- struct regpair sq_next_page_addr;
- struct regpair xferq_pbl_addr;
- struct regpair xferq_curr_page_addr;
- struct regpair xferq_next_page_addr;
- struct regpair respq_pbl_addr;
- struct regpair respq_curr_page_addr;
- struct regpair respq_next_page_addr;
- __le16 dst_mac_addr_lo;
- __le16 dst_mac_addr_mid;
- __le16 dst_mac_addr_hi;
- __le16 src_mac_addr_lo;
- __le16 src_mac_addr_mid;
- __le16 src_mac_addr_hi;
- __le16 tx_max_fc_pay_len;
- __le16 e_d_tov_timer_val;
- __le16 rx_max_fc_pay_len;
- __le16 vlan_tag;
-#define FCOE_CONN_OFFLOAD_RAMROD_DATA_VLAN_ID_MASK 0xFFF
-#define FCOE_CONN_OFFLOAD_RAMROD_DATA_VLAN_ID_SHIFT 0
-#define FCOE_CONN_OFFLOAD_RAMROD_DATA_CFI_MASK 0x1
-#define FCOE_CONN_OFFLOAD_RAMROD_DATA_CFI_SHIFT 12
-#define FCOE_CONN_OFFLOAD_RAMROD_DATA_PRIORITY_MASK 0x7
-#define FCOE_CONN_OFFLOAD_RAMROD_DATA_PRIORITY_SHIFT 13
- __le16 physical_q0;
- __le16 rec_rr_tov_timer_val;
- struct fc_addr_nw s_id;
- u8 max_conc_seqs_c3;
- struct fc_addr_nw d_id;
- u8 flags;
-#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_CONT_INCR_SEQ_CNT_MASK 0x1
-#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_CONT_INCR_SEQ_CNT_SHIFT 0
-#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_CONF_REQ_MASK 0x1
-#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_CONF_REQ_SHIFT 1
-#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_REC_VALID_MASK 0x1
-#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_REC_VALID_SHIFT 2
-#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_VLAN_FLAG_MASK 0x1
-#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_VLAN_FLAG_SHIFT 3
-#define FCOE_CONN_OFFLOAD_RAMROD_DATA_MODE_MASK 0x3
-#define FCOE_CONN_OFFLOAD_RAMROD_DATA_MODE_SHIFT 4
-#define FCOE_CONN_OFFLOAD_RAMROD_DATA_RESERVED0_MASK 0x3
-#define FCOE_CONN_OFFLOAD_RAMROD_DATA_RESERVED0_SHIFT 6
- __le16 conn_id;
- u8 def_q_idx;
- u8 reserved[5];
-};
-
-/* FCoE terminate connection request */
-struct fcoe_conn_terminate_ramrod_data {
- struct regpair terminate_params_addr;
-};
-
-struct fcoe_slow_sgl_ctx {
- struct regpair base_sgl_addr;
- __le16 curr_sge_off;
- __le16 remainder_num_sges;
- __le16 curr_sgl_index;
- __le16 reserved;
-};
-
-union fcoe_dix_desc_ctx {
- struct fcoe_slow_sgl_ctx dix_sgl;
- struct scsi_sge cached_dix_sge;
+/* The fcoe storm task context protection-information of Ystorm */
+struct protection_info_ctx {
+ __le16 flags;
+#define PROTECTION_INFO_CTX_HOST_INTERFACE_MASK 0x3
+#define PROTECTION_INFO_CTX_HOST_INTERFACE_SHIFT 0
+#define PROTECTION_INFO_CTX_DIF_TO_PEER_MASK 0x1
+#define PROTECTION_INFO_CTX_DIF_TO_PEER_SHIFT 2
+#define PROTECTION_INFO_CTX_VALIDATE_DIX_APP_TAG_MASK 0x1
+#define PROTECTION_INFO_CTX_VALIDATE_DIX_APP_TAG_SHIFT 3
+#define PROTECTION_INFO_CTX_INTERVAL_SIZE_LOG_MASK 0xF
+#define PROTECTION_INFO_CTX_INTERVAL_SIZE_LOG_SHIFT 4
+#define PROTECTION_INFO_CTX_VALIDATE_DIX_REF_TAG_MASK 0x1
+#define PROTECTION_INFO_CTX_VALIDATE_DIX_REF_TAG_SHIFT 8
+#define PROTECTION_INFO_CTX_RESERVED0_MASK 0x7F
+#define PROTECTION_INFO_CTX_RESERVED0_SHIFT 9
+ u8 dix_block_size;
+ u8 dst_size;
};
-struct fcoe_fast_sgl_ctx {
- struct regpair sgl_start_addr;
- __le32 sgl_byte_offset;
- __le16 task_reuse_cnt;
- __le16 init_offset_in_first_sge;
+/* The fcoe storm task context protection-information of Ystorm */
+union protection_info_union_ctx {
+ struct protection_info_ctx info;
+ __le32 value;
};
+/* FCP CMD payload */
struct fcoe_fcp_cmd_payload {
__le32 opaque[8];
};
+/* FCP RSP payload */
struct fcoe_fcp_rsp_payload {
__le32 opaque[6];
};
-struct fcoe_fcp_xfer_payload {
- __le32 opaque[3];
-};
-
-/* FCoE firmware function init */
-struct fcoe_init_func_ramrod_data {
- struct scsi_init_func_params func_params;
- struct scsi_init_func_queues q_params;
- __le16 mtu;
- __le16 sq_num_pages_in_pbl;
- __le32 reserved;
-};
-
-/* FCoE: Mode of the connection: Target or Initiator or both */
-enum fcoe_mode_type {
- FCOE_INITIATOR_MODE = 0x0,
- FCOE_TARGET_MODE = 0x1,
- FCOE_BOTH_OR_NOT_CHOSEN = 0x3,
- MAX_FCOE_MODE_TYPE
-};
-
-struct fcoe_rx_stat {
- struct regpair fcoe_rx_byte_cnt;
- struct regpair fcoe_rx_data_pkt_cnt;
- struct regpair fcoe_rx_xfer_pkt_cnt;
- struct regpair fcoe_rx_other_pkt_cnt;
- __le32 fcoe_silent_drop_pkt_cmdq_full_cnt;
- __le32 fcoe_silent_drop_pkt_rq_full_cnt;
- __le32 fcoe_silent_drop_pkt_crc_error_cnt;
- __le32 fcoe_silent_drop_pkt_task_invalid_cnt;
- __le32 fcoe_silent_drop_total_pkt_cnt;
- __le32 rsrv;
-};
-
-struct fcoe_stat_ramrod_data {
- struct regpair stat_params_addr;
-};
-
-struct protection_info_ctx {
- __le16 flags;
-#define PROTECTION_INFO_CTX_HOST_INTERFACE_MASK 0x3
-#define PROTECTION_INFO_CTX_HOST_INTERFACE_SHIFT 0
-#define PROTECTION_INFO_CTX_DIF_TO_PEER_MASK 0x1
-#define PROTECTION_INFO_CTX_DIF_TO_PEER_SHIFT 2
-#define PROTECTION_INFO_CTX_VALIDATE_DIX_APP_TAG_MASK 0x1
-#define PROTECTION_INFO_CTX_VALIDATE_DIX_APP_TAG_SHIFT 3
-#define PROTECTION_INFO_CTX_INTERVAL_SIZE_LOG_MASK 0xF
-#define PROTECTION_INFO_CTX_INTERVAL_SIZE_LOG_SHIFT 4
-#define PROTECTION_INFO_CTX_VALIDATE_DIX_REF_TAG_MASK 0x1
-#define PROTECTION_INFO_CTX_VALIDATE_DIX_REF_TAG_SHIFT 8
-#define PROTECTION_INFO_CTX_RESERVED0_MASK 0x7F
-#define PROTECTION_INFO_CTX_RESERVED0_SHIFT 9
- u8 dix_block_size;
- u8 dst_size;
-};
-
-union protection_info_union_ctx {
- struct protection_info_ctx info;
- __le32 value;
-};
-
+/* FCP RSP payload */
struct fcp_rsp_payload_padded {
struct fcoe_fcp_rsp_payload rsp_payload;
__le32 reserved[2];
};
+/* FCP RSP payload */
+struct fcoe_fcp_xfer_payload {
+ __le32 opaque[3];
+};
+
+/* FCP RSP payload */
struct fcp_xfer_payload_padded {
struct fcoe_fcp_xfer_payload xfer_payload;
__le32 reserved[5];
};
+/* Task params */
struct fcoe_tx_data_params {
__le32 data_offset;
__le32 offset_in_io;
u8 flags;
-#define FCOE_TX_DATA_PARAMS_OFFSET_IN_IO_VALID_MASK 0x1
-#define FCOE_TX_DATA_PARAMS_OFFSET_IN_IO_VALID_SHIFT 0
-#define FCOE_TX_DATA_PARAMS_DROP_DATA_MASK 0x1
-#define FCOE_TX_DATA_PARAMS_DROP_DATA_SHIFT 1
-#define FCOE_TX_DATA_PARAMS_AFTER_SEQ_REC_MASK 0x1
-#define FCOE_TX_DATA_PARAMS_AFTER_SEQ_REC_SHIFT 2
-#define FCOE_TX_DATA_PARAMS_RESERVED0_MASK 0x1F
-#define FCOE_TX_DATA_PARAMS_RESERVED0_SHIFT 3
+#define FCOE_TX_DATA_PARAMS_OFFSET_IN_IO_VALID_MASK 0x1
+#define FCOE_TX_DATA_PARAMS_OFFSET_IN_IO_VALID_SHIFT 0
+#define FCOE_TX_DATA_PARAMS_DROP_DATA_MASK 0x1
+#define FCOE_TX_DATA_PARAMS_DROP_DATA_SHIFT 1
+#define FCOE_TX_DATA_PARAMS_AFTER_SEQ_REC_MASK 0x1
+#define FCOE_TX_DATA_PARAMS_AFTER_SEQ_REC_SHIFT 2
+#define FCOE_TX_DATA_PARAMS_RESERVED0_MASK 0x1F
+#define FCOE_TX_DATA_PARAMS_RESERVED0_SHIFT 3
u8 dif_residual;
__le16 seq_cnt;
__le16 single_sge_saved_offset;
@@ -227,6 +86,7 @@ struct fcoe_tx_data_params {
__le16 reserved3;
};
+/* Middle path parameters: FC header fields provided by the driver */
struct fcoe_tx_mid_path_params {
__le32 parameter;
u8 r_ctl;
@@ -237,11 +97,13 @@ struct fcoe_tx_mid_path_params {
__le16 ox_id;
};
+/* Task params */
struct fcoe_tx_params {
struct fcoe_tx_data_params data;
struct fcoe_tx_mid_path_params mid_path;
};
+/* Union of FCP CMD payload \ TX params \ ABTS \ Cleanup */
union fcoe_tx_info_union_ctx {
struct fcoe_fcp_cmd_payload fcp_cmd_payload;
struct fcp_rsp_payload_padded fcp_rsp_payload;
@@ -249,13 +111,29 @@ union fcoe_tx_info_union_ctx {
struct fcoe_tx_params tx_params;
};
+/* Data sgl */
+struct fcoe_slow_sgl_ctx {
+ struct regpair base_sgl_addr;
+ __le16 curr_sge_off;
+ __le16 remainder_num_sges;
+ __le16 curr_sgl_index;
+ __le16 reserved;
+};
+
+/* Union of DIX SGL \ cached DIX sges */
+union fcoe_dix_desc_ctx {
+ struct fcoe_slow_sgl_ctx dix_sgl;
+ struct scsi_sge cached_dix_sge;
+};
+
+/* The fcoe storm task context of Ystorm */
struct ystorm_fcoe_task_st_ctx {
u8 task_type;
u8 sgl_mode;
-#define YSTORM_FCOE_TASK_ST_CTX_TX_SGL_MODE_MASK 0x1
-#define YSTORM_FCOE_TASK_ST_CTX_TX_SGL_MODE_SHIFT 0
-#define YSTORM_FCOE_TASK_ST_CTX_RSRV_MASK 0x7F
-#define YSTORM_FCOE_TASK_ST_CTX_RSRV_SHIFT 1
+#define YSTORM_FCOE_TASK_ST_CTX_TX_SGL_MODE_MASK 0x1
+#define YSTORM_FCOE_TASK_ST_CTX_TX_SGL_MODE_SHIFT 0
+#define YSTORM_FCOE_TASK_ST_CTX_RSRV_MASK 0x7F
+#define YSTORM_FCOE_TASK_ST_CTX_RSRV_SHIFT 1
u8 cached_dix_sge;
u8 expect_first_xfer;
__le32 num_pbf_zero_write;
@@ -277,44 +155,44 @@ struct ystorm_fcoe_task_ag_ctx {
u8 byte1;
__le16 word0;
u8 flags0;
-#define YSTORM_FCOE_TASK_AG_CTX_NIBBLE0_MASK 0xF
-#define YSTORM_FCOE_TASK_AG_CTX_NIBBLE0_SHIFT 0
-#define YSTORM_FCOE_TASK_AG_CTX_BIT0_MASK 0x1
-#define YSTORM_FCOE_TASK_AG_CTX_BIT0_SHIFT 4
-#define YSTORM_FCOE_TASK_AG_CTX_BIT1_MASK 0x1
-#define YSTORM_FCOE_TASK_AG_CTX_BIT1_SHIFT 5
-#define YSTORM_FCOE_TASK_AG_CTX_BIT2_MASK 0x1
-#define YSTORM_FCOE_TASK_AG_CTX_BIT2_SHIFT 6
-#define YSTORM_FCOE_TASK_AG_CTX_BIT3_MASK 0x1
-#define YSTORM_FCOE_TASK_AG_CTX_BIT3_SHIFT 7
+#define YSTORM_FCOE_TASK_AG_CTX_NIBBLE0_MASK 0xF
+#define YSTORM_FCOE_TASK_AG_CTX_NIBBLE0_SHIFT 0
+#define YSTORM_FCOE_TASK_AG_CTX_BIT0_MASK 0x1
+#define YSTORM_FCOE_TASK_AG_CTX_BIT0_SHIFT 4
+#define YSTORM_FCOE_TASK_AG_CTX_BIT1_MASK 0x1
+#define YSTORM_FCOE_TASK_AG_CTX_BIT1_SHIFT 5
+#define YSTORM_FCOE_TASK_AG_CTX_BIT2_MASK 0x1
+#define YSTORM_FCOE_TASK_AG_CTX_BIT2_SHIFT 6
+#define YSTORM_FCOE_TASK_AG_CTX_BIT3_MASK 0x1
+#define YSTORM_FCOE_TASK_AG_CTX_BIT3_SHIFT 7
u8 flags1;
-#define YSTORM_FCOE_TASK_AG_CTX_CF0_MASK 0x3
-#define YSTORM_FCOE_TASK_AG_CTX_CF0_SHIFT 0
-#define YSTORM_FCOE_TASK_AG_CTX_CF1_MASK 0x3
-#define YSTORM_FCOE_TASK_AG_CTX_CF1_SHIFT 2
-#define YSTORM_FCOE_TASK_AG_CTX_CF2SPECIAL_MASK 0x3
-#define YSTORM_FCOE_TASK_AG_CTX_CF2SPECIAL_SHIFT 4
-#define YSTORM_FCOE_TASK_AG_CTX_CF0EN_MASK 0x1
-#define YSTORM_FCOE_TASK_AG_CTX_CF0EN_SHIFT 6
-#define YSTORM_FCOE_TASK_AG_CTX_CF1EN_MASK 0x1
-#define YSTORM_FCOE_TASK_AG_CTX_CF1EN_SHIFT 7
+#define YSTORM_FCOE_TASK_AG_CTX_CF0_MASK 0x3
+#define YSTORM_FCOE_TASK_AG_CTX_CF0_SHIFT 0
+#define YSTORM_FCOE_TASK_AG_CTX_CF1_MASK 0x3
+#define YSTORM_FCOE_TASK_AG_CTX_CF1_SHIFT 2
+#define YSTORM_FCOE_TASK_AG_CTX_CF2SPECIAL_MASK 0x3
+#define YSTORM_FCOE_TASK_AG_CTX_CF2SPECIAL_SHIFT 4
+#define YSTORM_FCOE_TASK_AG_CTX_CF0EN_MASK 0x1
+#define YSTORM_FCOE_TASK_AG_CTX_CF0EN_SHIFT 6
+#define YSTORM_FCOE_TASK_AG_CTX_CF1EN_MASK 0x1
+#define YSTORM_FCOE_TASK_AG_CTX_CF1EN_SHIFT 7
u8 flags2;
-#define YSTORM_FCOE_TASK_AG_CTX_BIT4_MASK 0x1
-#define YSTORM_FCOE_TASK_AG_CTX_BIT4_SHIFT 0
-#define YSTORM_FCOE_TASK_AG_CTX_RULE0EN_MASK 0x1
-#define YSTORM_FCOE_TASK_AG_CTX_RULE0EN_SHIFT 1
-#define YSTORM_FCOE_TASK_AG_CTX_RULE1EN_MASK 0x1
-#define YSTORM_FCOE_TASK_AG_CTX_RULE1EN_SHIFT 2
-#define YSTORM_FCOE_TASK_AG_CTX_RULE2EN_MASK 0x1
-#define YSTORM_FCOE_TASK_AG_CTX_RULE2EN_SHIFT 3
-#define YSTORM_FCOE_TASK_AG_CTX_RULE3EN_MASK 0x1
-#define YSTORM_FCOE_TASK_AG_CTX_RULE3EN_SHIFT 4
-#define YSTORM_FCOE_TASK_AG_CTX_RULE4EN_MASK 0x1
-#define YSTORM_FCOE_TASK_AG_CTX_RULE4EN_SHIFT 5
-#define YSTORM_FCOE_TASK_AG_CTX_RULE5EN_MASK 0x1
-#define YSTORM_FCOE_TASK_AG_CTX_RULE5EN_SHIFT 6
-#define YSTORM_FCOE_TASK_AG_CTX_RULE6EN_MASK 0x1
-#define YSTORM_FCOE_TASK_AG_CTX_RULE6EN_SHIFT 7
+#define YSTORM_FCOE_TASK_AG_CTX_BIT4_MASK 0x1
+#define YSTORM_FCOE_TASK_AG_CTX_BIT4_SHIFT 0
+#define YSTORM_FCOE_TASK_AG_CTX_RULE0EN_MASK 0x1
+#define YSTORM_FCOE_TASK_AG_CTX_RULE0EN_SHIFT 1
+#define YSTORM_FCOE_TASK_AG_CTX_RULE1EN_MASK 0x1
+#define YSTORM_FCOE_TASK_AG_CTX_RULE1EN_SHIFT 2
+#define YSTORM_FCOE_TASK_AG_CTX_RULE2EN_MASK 0x1
+#define YSTORM_FCOE_TASK_AG_CTX_RULE2EN_SHIFT 3
+#define YSTORM_FCOE_TASK_AG_CTX_RULE3EN_MASK 0x1
+#define YSTORM_FCOE_TASK_AG_CTX_RULE3EN_SHIFT 4
+#define YSTORM_FCOE_TASK_AG_CTX_RULE4EN_MASK 0x1
+#define YSTORM_FCOE_TASK_AG_CTX_RULE4EN_SHIFT 5
+#define YSTORM_FCOE_TASK_AG_CTX_RULE5EN_MASK 0x1
+#define YSTORM_FCOE_TASK_AG_CTX_RULE5EN_SHIFT 6
+#define YSTORM_FCOE_TASK_AG_CTX_RULE6EN_MASK 0x1
+#define YSTORM_FCOE_TASK_AG_CTX_RULE6EN_SHIFT 7
u8 byte2;
__le32 reg0;
u8 byte3;
@@ -333,68 +211,68 @@ struct tstorm_fcoe_task_ag_ctx {
u8 byte1;
__le16 icid;
u8 flags0;
-#define TSTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF
-#define TSTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0
-#define TSTORM_FCOE_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1
-#define TSTORM_FCOE_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4
-#define TSTORM_FCOE_TASK_AG_CTX_BIT1_MASK 0x1
-#define TSTORM_FCOE_TASK_AG_CTX_BIT1_SHIFT 5
-#define TSTORM_FCOE_TASK_AG_CTX_WAIT_ABTS_RSP_F_MASK 0x1
-#define TSTORM_FCOE_TASK_AG_CTX_WAIT_ABTS_RSP_F_SHIFT 6
-#define TSTORM_FCOE_TASK_AG_CTX_VALID_MASK 0x1
-#define TSTORM_FCOE_TASK_AG_CTX_VALID_SHIFT 7
+#define TSTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF
+#define TSTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0
+#define TSTORM_FCOE_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define TSTORM_FCOE_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4
+#define TSTORM_FCOE_TASK_AG_CTX_BIT1_MASK 0x1
+#define TSTORM_FCOE_TASK_AG_CTX_BIT1_SHIFT 5
+#define TSTORM_FCOE_TASK_AG_CTX_WAIT_ABTS_RSP_F_MASK 0x1
+#define TSTORM_FCOE_TASK_AG_CTX_WAIT_ABTS_RSP_F_SHIFT 6
+#define TSTORM_FCOE_TASK_AG_CTX_VALID_MASK 0x1
+#define TSTORM_FCOE_TASK_AG_CTX_VALID_SHIFT 7
u8 flags1;
-#define TSTORM_FCOE_TASK_AG_CTX_FALSE_RR_TOV_MASK 0x1
-#define TSTORM_FCOE_TASK_AG_CTX_FALSE_RR_TOV_SHIFT 0
-#define TSTORM_FCOE_TASK_AG_CTX_BIT5_MASK 0x1
-#define TSTORM_FCOE_TASK_AG_CTX_BIT5_SHIFT 1
-#define TSTORM_FCOE_TASK_AG_CTX_REC_RR_TOV_CF_MASK 0x3
-#define TSTORM_FCOE_TASK_AG_CTX_REC_RR_TOV_CF_SHIFT 2
-#define TSTORM_FCOE_TASK_AG_CTX_ED_TOV_CF_MASK 0x3
-#define TSTORM_FCOE_TASK_AG_CTX_ED_TOV_CF_SHIFT 4
-#define TSTORM_FCOE_TASK_AG_CTX_CF2_MASK 0x3
-#define TSTORM_FCOE_TASK_AG_CTX_CF2_SHIFT 6
+#define TSTORM_FCOE_TASK_AG_CTX_FALSE_RR_TOV_MASK 0x1
+#define TSTORM_FCOE_TASK_AG_CTX_FALSE_RR_TOV_SHIFT 0
+#define TSTORM_FCOE_TASK_AG_CTX_BIT5_MASK 0x1
+#define TSTORM_FCOE_TASK_AG_CTX_BIT5_SHIFT 1
+#define TSTORM_FCOE_TASK_AG_CTX_REC_RR_TOV_CF_MASK 0x3
+#define TSTORM_FCOE_TASK_AG_CTX_REC_RR_TOV_CF_SHIFT 2
+#define TSTORM_FCOE_TASK_AG_CTX_ED_TOV_CF_MASK 0x3
+#define TSTORM_FCOE_TASK_AG_CTX_ED_TOV_CF_SHIFT 4
+#define TSTORM_FCOE_TASK_AG_CTX_CF2_MASK 0x3
+#define TSTORM_FCOE_TASK_AG_CTX_CF2_SHIFT 6
u8 flags2;
-#define TSTORM_FCOE_TASK_AG_CTX_TIMER_STOP_ALL_MASK 0x3
-#define TSTORM_FCOE_TASK_AG_CTX_TIMER_STOP_ALL_SHIFT 0
-#define TSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_MASK 0x3
-#define TSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_SHIFT 2
-#define TSTORM_FCOE_TASK_AG_CTX_SEQ_INIT_CF_MASK 0x3
-#define TSTORM_FCOE_TASK_AG_CTX_SEQ_INIT_CF_SHIFT 4
-#define TSTORM_FCOE_TASK_AG_CTX_SEQ_RECOVERY_CF_MASK 0x3
-#define TSTORM_FCOE_TASK_AG_CTX_SEQ_RECOVERY_CF_SHIFT 6
+#define TSTORM_FCOE_TASK_AG_CTX_TIMER_STOP_ALL_MASK 0x3
+#define TSTORM_FCOE_TASK_AG_CTX_TIMER_STOP_ALL_SHIFT 0
+#define TSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_MASK 0x3
+#define TSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_SHIFT 2
+#define TSTORM_FCOE_TASK_AG_CTX_SEQ_INIT_CF_MASK 0x3
+#define TSTORM_FCOE_TASK_AG_CTX_SEQ_INIT_CF_SHIFT 4
+#define TSTORM_FCOE_TASK_AG_CTX_SEQ_RECOVERY_CF_MASK 0x3
+#define TSTORM_FCOE_TASK_AG_CTX_SEQ_RECOVERY_CF_SHIFT 6
u8 flags3;
-#define TSTORM_FCOE_TASK_AG_CTX_UNSOL_COMP_CF_MASK 0x3
-#define TSTORM_FCOE_TASK_AG_CTX_UNSOL_COMP_CF_SHIFT 0
-#define TSTORM_FCOE_TASK_AG_CTX_REC_RR_TOV_CF_EN_MASK 0x1
-#define TSTORM_FCOE_TASK_AG_CTX_REC_RR_TOV_CF_EN_SHIFT 2
-#define TSTORM_FCOE_TASK_AG_CTX_ED_TOV_CF_EN_MASK 0x1
-#define TSTORM_FCOE_TASK_AG_CTX_ED_TOV_CF_EN_SHIFT 3
-#define TSTORM_FCOE_TASK_AG_CTX_CF2EN_MASK 0x1
-#define TSTORM_FCOE_TASK_AG_CTX_CF2EN_SHIFT 4
-#define TSTORM_FCOE_TASK_AG_CTX_TIMER_STOP_ALL_EN_MASK 0x1
-#define TSTORM_FCOE_TASK_AG_CTX_TIMER_STOP_ALL_EN_SHIFT 5
-#define TSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_EN_MASK 0x1
-#define TSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_EN_SHIFT 6
-#define TSTORM_FCOE_TASK_AG_CTX_SEQ_INIT_CF_EN_MASK 0x1
-#define TSTORM_FCOE_TASK_AG_CTX_SEQ_INIT_CF_EN_SHIFT 7
+#define TSTORM_FCOE_TASK_AG_CTX_UNSOL_COMP_CF_MASK 0x3
+#define TSTORM_FCOE_TASK_AG_CTX_UNSOL_COMP_CF_SHIFT 0
+#define TSTORM_FCOE_TASK_AG_CTX_REC_RR_TOV_CF_EN_MASK 0x1
+#define TSTORM_FCOE_TASK_AG_CTX_REC_RR_TOV_CF_EN_SHIFT 2
+#define TSTORM_FCOE_TASK_AG_CTX_ED_TOV_CF_EN_MASK 0x1
+#define TSTORM_FCOE_TASK_AG_CTX_ED_TOV_CF_EN_SHIFT 3
+#define TSTORM_FCOE_TASK_AG_CTX_CF2EN_MASK 0x1
+#define TSTORM_FCOE_TASK_AG_CTX_CF2EN_SHIFT 4
+#define TSTORM_FCOE_TASK_AG_CTX_TIMER_STOP_ALL_EN_MASK 0x1
+#define TSTORM_FCOE_TASK_AG_CTX_TIMER_STOP_ALL_EN_SHIFT 5
+#define TSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_EN_MASK 0x1
+#define TSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_EN_SHIFT 6
+#define TSTORM_FCOE_TASK_AG_CTX_SEQ_INIT_CF_EN_MASK 0x1
+#define TSTORM_FCOE_TASK_AG_CTX_SEQ_INIT_CF_EN_SHIFT 7
u8 flags4;
-#define TSTORM_FCOE_TASK_AG_CTX_SEQ_RECOVERY_CF_EN_MASK 0x1
-#define TSTORM_FCOE_TASK_AG_CTX_SEQ_RECOVERY_CF_EN_SHIFT 0
-#define TSTORM_FCOE_TASK_AG_CTX_UNSOL_COMP_CF_EN_MASK 0x1
-#define TSTORM_FCOE_TASK_AG_CTX_UNSOL_COMP_CF_EN_SHIFT 1
-#define TSTORM_FCOE_TASK_AG_CTX_RULE0EN_MASK 0x1
-#define TSTORM_FCOE_TASK_AG_CTX_RULE0EN_SHIFT 2
-#define TSTORM_FCOE_TASK_AG_CTX_RULE1EN_MASK 0x1
-#define TSTORM_FCOE_TASK_AG_CTX_RULE1EN_SHIFT 3
-#define TSTORM_FCOE_TASK_AG_CTX_RULE2EN_MASK 0x1
-#define TSTORM_FCOE_TASK_AG_CTX_RULE2EN_SHIFT 4
-#define TSTORM_FCOE_TASK_AG_CTX_RULE3EN_MASK 0x1
-#define TSTORM_FCOE_TASK_AG_CTX_RULE3EN_SHIFT 5
-#define TSTORM_FCOE_TASK_AG_CTX_RULE4EN_MASK 0x1
-#define TSTORM_FCOE_TASK_AG_CTX_RULE4EN_SHIFT 6
-#define TSTORM_FCOE_TASK_AG_CTX_RULE5EN_MASK 0x1
-#define TSTORM_FCOE_TASK_AG_CTX_RULE5EN_SHIFT 7
+#define TSTORM_FCOE_TASK_AG_CTX_SEQ_RECOVERY_CF_EN_MASK 0x1
+#define TSTORM_FCOE_TASK_AG_CTX_SEQ_RECOVERY_CF_EN_SHIFT 0
+#define TSTORM_FCOE_TASK_AG_CTX_UNSOL_COMP_CF_EN_MASK 0x1
+#define TSTORM_FCOE_TASK_AG_CTX_UNSOL_COMP_CF_EN_SHIFT 1
+#define TSTORM_FCOE_TASK_AG_CTX_RULE0EN_MASK 0x1
+#define TSTORM_FCOE_TASK_AG_CTX_RULE0EN_SHIFT 2
+#define TSTORM_FCOE_TASK_AG_CTX_RULE1EN_MASK 0x1
+#define TSTORM_FCOE_TASK_AG_CTX_RULE1EN_SHIFT 3
+#define TSTORM_FCOE_TASK_AG_CTX_RULE2EN_MASK 0x1
+#define TSTORM_FCOE_TASK_AG_CTX_RULE2EN_SHIFT 4
+#define TSTORM_FCOE_TASK_AG_CTX_RULE3EN_MASK 0x1
+#define TSTORM_FCOE_TASK_AG_CTX_RULE3EN_SHIFT 5
+#define TSTORM_FCOE_TASK_AG_CTX_RULE4EN_MASK 0x1
+#define TSTORM_FCOE_TASK_AG_CTX_RULE4EN_SHIFT 6
+#define TSTORM_FCOE_TASK_AG_CTX_RULE5EN_MASK 0x1
+#define TSTORM_FCOE_TASK_AG_CTX_RULE5EN_SHIFT 7
u8 cleanup_state;
__le16 last_sent_tid;
__le32 rec_rr_tov_exp_timeout;
@@ -407,25 +285,46 @@ struct tstorm_fcoe_task_ag_ctx {
__le32 data_offset_next;
};
+/* Cached data sges */
+struct fcoe_exp_ro {
+ __le32 data_offset;
+ __le32 reserved;
+};
+
+/* Union of Cleanup address \ expected relative offsets */
+union fcoe_cleanup_addr_exp_ro_union {
+ struct regpair abts_rsp_fc_payload_hi;
+ struct fcoe_exp_ro exp_ro;
+};
+
+/* Fields coppied from ABTSrsp pckt */
+struct fcoe_abts_pkt {
+ __le32 abts_rsp_fc_payload_lo;
+ __le16 abts_rsp_rx_id;
+ u8 abts_rsp_rctl;
+ u8 reserved2;
+};
+
+/* FW read- write (modifyable) part The fcoe task storm context of Tstorm */
struct fcoe_tstorm_fcoe_task_st_ctx_read_write {
union fcoe_cleanup_addr_exp_ro_union cleanup_addr_exp_ro_union;
__le16 flags;
-#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_RX_SGL_MODE_MASK 0x1
-#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_RX_SGL_MODE_SHIFT 0
-#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_EXP_FIRST_FRAME_MASK 0x1
-#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_EXP_FIRST_FRAME_SHIFT 1
-#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_SEQ_ACTIVE_MASK 0x1
-#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_SEQ_ACTIVE_SHIFT 2
-#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_SEQ_TIMEOUT_MASK 0x1
-#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_SEQ_TIMEOUT_SHIFT 3
-#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_SINGLE_PKT_IN_EX_MASK 0x1
-#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_SINGLE_PKT_IN_EX_SHIFT 4
-#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_OOO_RX_SEQ_STAT_MASK 0x1
-#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_OOO_RX_SEQ_STAT_SHIFT 5
-#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_CQ_ADD_ADV_MASK 0x3
-#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_CQ_ADD_ADV_SHIFT 6
-#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_RSRV1_MASK 0xFF
-#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_RSRV1_SHIFT 8
+#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_RX_SGL_MODE_MASK 0x1
+#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_RX_SGL_MODE_SHIFT 0
+#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_EXP_FIRST_FRAME_MASK 0x1
+#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_EXP_FIRST_FRAME_SHIFT 1
+#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_SEQ_ACTIVE_MASK 0x1
+#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_SEQ_ACTIVE_SHIFT 2
+#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_SEQ_TIMEOUT_MASK 0x1
+#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_SEQ_TIMEOUT_SHIFT 3
+#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_SINGLE_PKT_IN_EX_MASK 0x1
+#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_SINGLE_PKT_IN_EX_SHIFT 4
+#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_OOO_RX_SEQ_STAT_MASK 0x1
+#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_OOO_RX_SEQ_STAT_SHIFT 5
+#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_CQ_ADD_ADV_MASK 0x3
+#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_CQ_ADD_ADV_SHIFT 6
+#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_RSRV1_MASK 0xFF
+#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_RSRV1_SHIFT 8
__le16 seq_cnt;
u8 seq_id;
u8 ooo_rx_seq_id;
@@ -436,6 +335,7 @@ struct fcoe_tstorm_fcoe_task_st_ctx_read_write {
__le16 reserved1;
};
+/* FW read only part The fcoe task storm context of Tstorm */
struct fcoe_tstorm_fcoe_task_st_ctx_read_only {
u8 task_type;
u8 dev_type;
@@ -446,6 +346,7 @@ struct fcoe_tstorm_fcoe_task_st_ctx_read_only {
__le32 rsrv;
};
+/** The fcoe task storm context of Tstorm */
struct tstorm_fcoe_task_st_ctx {
struct fcoe_tstorm_fcoe_task_st_ctx_read_write read_write;
struct fcoe_tstorm_fcoe_task_st_ctx_read_only read_only;
@@ -456,44 +357,44 @@ struct mstorm_fcoe_task_ag_ctx {
u8 byte1;
__le16 icid;
u8 flags0;
-#define MSTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF
-#define MSTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0
-#define MSTORM_FCOE_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1
-#define MSTORM_FCOE_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4
-#define MSTORM_FCOE_TASK_AG_CTX_CQE_PLACED_MASK 0x1
-#define MSTORM_FCOE_TASK_AG_CTX_CQE_PLACED_SHIFT 5
-#define MSTORM_FCOE_TASK_AG_CTX_BIT2_MASK 0x1
-#define MSTORM_FCOE_TASK_AG_CTX_BIT2_SHIFT 6
-#define MSTORM_FCOE_TASK_AG_CTX_BIT3_MASK 0x1
-#define MSTORM_FCOE_TASK_AG_CTX_BIT3_SHIFT 7
+#define MSTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF
+#define MSTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0
+#define MSTORM_FCOE_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define MSTORM_FCOE_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4
+#define MSTORM_FCOE_TASK_AG_CTX_CQE_PLACED_MASK 0x1
+#define MSTORM_FCOE_TASK_AG_CTX_CQE_PLACED_SHIFT 5
+#define MSTORM_FCOE_TASK_AG_CTX_BIT2_MASK 0x1
+#define MSTORM_FCOE_TASK_AG_CTX_BIT2_SHIFT 6
+#define MSTORM_FCOE_TASK_AG_CTX_BIT3_MASK 0x1
+#define MSTORM_FCOE_TASK_AG_CTX_BIT3_SHIFT 7
u8 flags1;
-#define MSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_MASK 0x3
-#define MSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_SHIFT 0
-#define MSTORM_FCOE_TASK_AG_CTX_CF1_MASK 0x3
-#define MSTORM_FCOE_TASK_AG_CTX_CF1_SHIFT 2
-#define MSTORM_FCOE_TASK_AG_CTX_CF2_MASK 0x3
-#define MSTORM_FCOE_TASK_AG_CTX_CF2_SHIFT 4
-#define MSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_EN_MASK 0x1
-#define MSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_EN_SHIFT 6
-#define MSTORM_FCOE_TASK_AG_CTX_CF1EN_MASK 0x1
-#define MSTORM_FCOE_TASK_AG_CTX_CF1EN_SHIFT 7
+#define MSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_MASK 0x3
+#define MSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_SHIFT 0
+#define MSTORM_FCOE_TASK_AG_CTX_CF1_MASK 0x3
+#define MSTORM_FCOE_TASK_AG_CTX_CF1_SHIFT 2
+#define MSTORM_FCOE_TASK_AG_CTX_CF2_MASK 0x3
+#define MSTORM_FCOE_TASK_AG_CTX_CF2_SHIFT 4
+#define MSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_EN_MASK 0x1
+#define MSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_EN_SHIFT 6
+#define MSTORM_FCOE_TASK_AG_CTX_CF1EN_MASK 0x1
+#define MSTORM_FCOE_TASK_AG_CTX_CF1EN_SHIFT 7
u8 flags2;
-#define MSTORM_FCOE_TASK_AG_CTX_CF2EN_MASK 0x1
-#define MSTORM_FCOE_TASK_AG_CTX_CF2EN_SHIFT 0
-#define MSTORM_FCOE_TASK_AG_CTX_RULE0EN_MASK 0x1
-#define MSTORM_FCOE_TASK_AG_CTX_RULE0EN_SHIFT 1
-#define MSTORM_FCOE_TASK_AG_CTX_RULE1EN_MASK 0x1
-#define MSTORM_FCOE_TASK_AG_CTX_RULE1EN_SHIFT 2
-#define MSTORM_FCOE_TASK_AG_CTX_RULE2EN_MASK 0x1
-#define MSTORM_FCOE_TASK_AG_CTX_RULE2EN_SHIFT 3
-#define MSTORM_FCOE_TASK_AG_CTX_RULE3EN_MASK 0x1
-#define MSTORM_FCOE_TASK_AG_CTX_RULE3EN_SHIFT 4
-#define MSTORM_FCOE_TASK_AG_CTX_RULE4EN_MASK 0x1
-#define MSTORM_FCOE_TASK_AG_CTX_RULE4EN_SHIFT 5
-#define MSTORM_FCOE_TASK_AG_CTX_XFER_PLACEMENT_EN_MASK 0x1
-#define MSTORM_FCOE_TASK_AG_CTX_XFER_PLACEMENT_EN_SHIFT 6
-#define MSTORM_FCOE_TASK_AG_CTX_RULE6EN_MASK 0x1
-#define MSTORM_FCOE_TASK_AG_CTX_RULE6EN_SHIFT 7
+#define MSTORM_FCOE_TASK_AG_CTX_CF2EN_MASK 0x1
+#define MSTORM_FCOE_TASK_AG_CTX_CF2EN_SHIFT 0
+#define MSTORM_FCOE_TASK_AG_CTX_RULE0EN_MASK 0x1
+#define MSTORM_FCOE_TASK_AG_CTX_RULE0EN_SHIFT 1
+#define MSTORM_FCOE_TASK_AG_CTX_RULE1EN_MASK 0x1
+#define MSTORM_FCOE_TASK_AG_CTX_RULE1EN_SHIFT 2
+#define MSTORM_FCOE_TASK_AG_CTX_RULE2EN_MASK 0x1
+#define MSTORM_FCOE_TASK_AG_CTX_RULE2EN_SHIFT 3
+#define MSTORM_FCOE_TASK_AG_CTX_RULE3EN_MASK 0x1
+#define MSTORM_FCOE_TASK_AG_CTX_RULE3EN_SHIFT 4
+#define MSTORM_FCOE_TASK_AG_CTX_RULE4EN_MASK 0x1
+#define MSTORM_FCOE_TASK_AG_CTX_RULE4EN_SHIFT 5
+#define MSTORM_FCOE_TASK_AG_CTX_XFER_PLACEMENT_EN_MASK 0x1
+#define MSTORM_FCOE_TASK_AG_CTX_XFER_PLACEMENT_EN_SHIFT 6
+#define MSTORM_FCOE_TASK_AG_CTX_RULE6EN_MASK 0x1
+#define MSTORM_FCOE_TASK_AG_CTX_RULE6EN_SHIFT 7
u8 cleanup_state;
__le32 received_bytes;
u8 byte3;
@@ -507,6 +408,7 @@ struct mstorm_fcoe_task_ag_ctx {
__le32 reg2;
};
+/* The fcoe task storm context of Mstorm */
struct mstorm_fcoe_task_st_ctx {
struct regpair rsp_buf_addr;
__le32 rsrv[2];
@@ -515,26 +417,26 @@ struct mstorm_fcoe_task_st_ctx {
__le32 data_buffer_offset;
__le16 parent_id;
__le16 flags;
-#define MSTORM_FCOE_TASK_ST_CTX_INTERVAL_SIZE_LOG_MASK 0xF
-#define MSTORM_FCOE_TASK_ST_CTX_INTERVAL_SIZE_LOG_SHIFT 0
-#define MSTORM_FCOE_TASK_ST_CTX_HOST_INTERFACE_MASK 0x3
-#define MSTORM_FCOE_TASK_ST_CTX_HOST_INTERFACE_SHIFT 4
-#define MSTORM_FCOE_TASK_ST_CTX_DIF_TO_PEER_MASK 0x1
-#define MSTORM_FCOE_TASK_ST_CTX_DIF_TO_PEER_SHIFT 6
-#define MSTORM_FCOE_TASK_ST_CTX_MP_INCLUDE_FC_HEADER_MASK 0x1
-#define MSTORM_FCOE_TASK_ST_CTX_MP_INCLUDE_FC_HEADER_SHIFT 7
-#define MSTORM_FCOE_TASK_ST_CTX_DIX_BLOCK_SIZE_MASK 0x3
-#define MSTORM_FCOE_TASK_ST_CTX_DIX_BLOCK_SIZE_SHIFT 8
-#define MSTORM_FCOE_TASK_ST_CTX_VALIDATE_DIX_REF_TAG_MASK 0x1
-#define MSTORM_FCOE_TASK_ST_CTX_VALIDATE_DIX_REF_TAG_SHIFT 10
-#define MSTORM_FCOE_TASK_ST_CTX_DIX_CACHED_SGE_FLG_MASK 0x1
-#define MSTORM_FCOE_TASK_ST_CTX_DIX_CACHED_SGE_FLG_SHIFT 11
-#define MSTORM_FCOE_TASK_ST_CTX_DIF_SUPPORTED_MASK 0x1
-#define MSTORM_FCOE_TASK_ST_CTX_DIF_SUPPORTED_SHIFT 12
-#define MSTORM_FCOE_TASK_ST_CTX_TX_SGL_MODE_MASK 0x1
-#define MSTORM_FCOE_TASK_ST_CTX_TX_SGL_MODE_SHIFT 13
-#define MSTORM_FCOE_TASK_ST_CTX_RESERVED_MASK 0x3
-#define MSTORM_FCOE_TASK_ST_CTX_RESERVED_SHIFT 14
+#define MSTORM_FCOE_TASK_ST_CTX_INTERVAL_SIZE_LOG_MASK 0xF
+#define MSTORM_FCOE_TASK_ST_CTX_INTERVAL_SIZE_LOG_SHIFT 0
+#define MSTORM_FCOE_TASK_ST_CTX_HOST_INTERFACE_MASK 0x3
+#define MSTORM_FCOE_TASK_ST_CTX_HOST_INTERFACE_SHIFT 4
+#define MSTORM_FCOE_TASK_ST_CTX_DIF_TO_PEER_MASK 0x1
+#define MSTORM_FCOE_TASK_ST_CTX_DIF_TO_PEER_SHIFT 6
+#define MSTORM_FCOE_TASK_ST_CTX_MP_INCLUDE_FC_HEADER_MASK 0x1
+#define MSTORM_FCOE_TASK_ST_CTX_MP_INCLUDE_FC_HEADER_SHIFT 7
+#define MSTORM_FCOE_TASK_ST_CTX_DIX_BLOCK_SIZE_MASK 0x3
+#define MSTORM_FCOE_TASK_ST_CTX_DIX_BLOCK_SIZE_SHIFT 8
+#define MSTORM_FCOE_TASK_ST_CTX_VALIDATE_DIX_REF_TAG_MASK 0x1
+#define MSTORM_FCOE_TASK_ST_CTX_VALIDATE_DIX_REF_TAG_SHIFT 10
+#define MSTORM_FCOE_TASK_ST_CTX_DIX_CACHED_SGE_FLG_MASK 0x1
+#define MSTORM_FCOE_TASK_ST_CTX_DIX_CACHED_SGE_FLG_SHIFT 11
+#define MSTORM_FCOE_TASK_ST_CTX_DIF_SUPPORTED_MASK 0x1
+#define MSTORM_FCOE_TASK_ST_CTX_DIF_SUPPORTED_SHIFT 12
+#define MSTORM_FCOE_TASK_ST_CTX_TX_SGL_MODE_MASK 0x1
+#define MSTORM_FCOE_TASK_ST_CTX_TX_SGL_MODE_SHIFT 13
+#define MSTORM_FCOE_TASK_ST_CTX_RESERVED_MASK 0x3
+#define MSTORM_FCOE_TASK_ST_CTX_RESERVED_SHIFT 14
struct scsi_cached_sges data_desc;
};
@@ -543,51 +445,51 @@ struct ustorm_fcoe_task_ag_ctx {
u8 byte1;
__le16 icid;
u8 flags0;
-#define USTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF
-#define USTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0
-#define USTORM_FCOE_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1
-#define USTORM_FCOE_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4
-#define USTORM_FCOE_TASK_AG_CTX_BIT1_MASK 0x1
-#define USTORM_FCOE_TASK_AG_CTX_BIT1_SHIFT 5
-#define USTORM_FCOE_TASK_AG_CTX_CF0_MASK 0x3
-#define USTORM_FCOE_TASK_AG_CTX_CF0_SHIFT 6
+#define USTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF
+#define USTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0
+#define USTORM_FCOE_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define USTORM_FCOE_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4
+#define USTORM_FCOE_TASK_AG_CTX_BIT1_MASK 0x1
+#define USTORM_FCOE_TASK_AG_CTX_BIT1_SHIFT 5
+#define USTORM_FCOE_TASK_AG_CTX_CF0_MASK 0x3
+#define USTORM_FCOE_TASK_AG_CTX_CF0_SHIFT 6
u8 flags1;
-#define USTORM_FCOE_TASK_AG_CTX_CF1_MASK 0x3
-#define USTORM_FCOE_TASK_AG_CTX_CF1_SHIFT 0
-#define USTORM_FCOE_TASK_AG_CTX_CF2_MASK 0x3
-#define USTORM_FCOE_TASK_AG_CTX_CF2_SHIFT 2
-#define USTORM_FCOE_TASK_AG_CTX_CF3_MASK 0x3
-#define USTORM_FCOE_TASK_AG_CTX_CF3_SHIFT 4
-#define USTORM_FCOE_TASK_AG_CTX_DIF_ERROR_CF_MASK 0x3
-#define USTORM_FCOE_TASK_AG_CTX_DIF_ERROR_CF_SHIFT 6
+#define USTORM_FCOE_TASK_AG_CTX_CF1_MASK 0x3
+#define USTORM_FCOE_TASK_AG_CTX_CF1_SHIFT 0
+#define USTORM_FCOE_TASK_AG_CTX_CF2_MASK 0x3
+#define USTORM_FCOE_TASK_AG_CTX_CF2_SHIFT 2
+#define USTORM_FCOE_TASK_AG_CTX_CF3_MASK 0x3
+#define USTORM_FCOE_TASK_AG_CTX_CF3_SHIFT 4
+#define USTORM_FCOE_TASK_AG_CTX_DIF_ERROR_CF_MASK 0x3
+#define USTORM_FCOE_TASK_AG_CTX_DIF_ERROR_CF_SHIFT 6
u8 flags2;
-#define USTORM_FCOE_TASK_AG_CTX_CF0EN_MASK 0x1
-#define USTORM_FCOE_TASK_AG_CTX_CF0EN_SHIFT 0
-#define USTORM_FCOE_TASK_AG_CTX_CF1EN_MASK 0x1
-#define USTORM_FCOE_TASK_AG_CTX_CF1EN_SHIFT 1
-#define USTORM_FCOE_TASK_AG_CTX_CF2EN_MASK 0x1
-#define USTORM_FCOE_TASK_AG_CTX_CF2EN_SHIFT 2
-#define USTORM_FCOE_TASK_AG_CTX_CF3EN_MASK 0x1
-#define USTORM_FCOE_TASK_AG_CTX_CF3EN_SHIFT 3
-#define USTORM_FCOE_TASK_AG_CTX_DIF_ERROR_CF_EN_MASK 0x1
-#define USTORM_FCOE_TASK_AG_CTX_DIF_ERROR_CF_EN_SHIFT 4
-#define USTORM_FCOE_TASK_AG_CTX_RULE0EN_MASK 0x1
-#define USTORM_FCOE_TASK_AG_CTX_RULE0EN_SHIFT 5
-#define USTORM_FCOE_TASK_AG_CTX_RULE1EN_MASK 0x1
-#define USTORM_FCOE_TASK_AG_CTX_RULE1EN_SHIFT 6
-#define USTORM_FCOE_TASK_AG_CTX_RULE2EN_MASK 0x1
-#define USTORM_FCOE_TASK_AG_CTX_RULE2EN_SHIFT 7
+#define USTORM_FCOE_TASK_AG_CTX_CF0EN_MASK 0x1
+#define USTORM_FCOE_TASK_AG_CTX_CF0EN_SHIFT 0
+#define USTORM_FCOE_TASK_AG_CTX_CF1EN_MASK 0x1
+#define USTORM_FCOE_TASK_AG_CTX_CF1EN_SHIFT 1
+#define USTORM_FCOE_TASK_AG_CTX_CF2EN_MASK 0x1
+#define USTORM_FCOE_TASK_AG_CTX_CF2EN_SHIFT 2
+#define USTORM_FCOE_TASK_AG_CTX_CF3EN_MASK 0x1
+#define USTORM_FCOE_TASK_AG_CTX_CF3EN_SHIFT 3
+#define USTORM_FCOE_TASK_AG_CTX_DIF_ERROR_CF_EN_MASK 0x1
+#define USTORM_FCOE_TASK_AG_CTX_DIF_ERROR_CF_EN_SHIFT 4
+#define USTORM_FCOE_TASK_AG_CTX_RULE0EN_MASK 0x1
+#define USTORM_FCOE_TASK_AG_CTX_RULE0EN_SHIFT 5
+#define USTORM_FCOE_TASK_AG_CTX_RULE1EN_MASK 0x1
+#define USTORM_FCOE_TASK_AG_CTX_RULE1EN_SHIFT 6
+#define USTORM_FCOE_TASK_AG_CTX_RULE2EN_MASK 0x1
+#define USTORM_FCOE_TASK_AG_CTX_RULE2EN_SHIFT 7
u8 flags3;
-#define USTORM_FCOE_TASK_AG_CTX_RULE3EN_MASK 0x1
-#define USTORM_FCOE_TASK_AG_CTX_RULE3EN_SHIFT 0
-#define USTORM_FCOE_TASK_AG_CTX_RULE4EN_MASK 0x1
-#define USTORM_FCOE_TASK_AG_CTX_RULE4EN_SHIFT 1
-#define USTORM_FCOE_TASK_AG_CTX_RULE5EN_MASK 0x1
-#define USTORM_FCOE_TASK_AG_CTX_RULE5EN_SHIFT 2
-#define USTORM_FCOE_TASK_AG_CTX_RULE6EN_MASK 0x1
-#define USTORM_FCOE_TASK_AG_CTX_RULE6EN_SHIFT 3
-#define USTORM_FCOE_TASK_AG_CTX_DIF_ERROR_TYPE_MASK 0xF
-#define USTORM_FCOE_TASK_AG_CTX_DIF_ERROR_TYPE_SHIFT 4
+#define USTORM_FCOE_TASK_AG_CTX_RULE3EN_MASK 0x1
+#define USTORM_FCOE_TASK_AG_CTX_RULE3EN_SHIFT 0
+#define USTORM_FCOE_TASK_AG_CTX_RULE4EN_MASK 0x1
+#define USTORM_FCOE_TASK_AG_CTX_RULE4EN_SHIFT 1
+#define USTORM_FCOE_TASK_AG_CTX_RULE5EN_MASK 0x1
+#define USTORM_FCOE_TASK_AG_CTX_RULE5EN_SHIFT 2
+#define USTORM_FCOE_TASK_AG_CTX_RULE6EN_MASK 0x1
+#define USTORM_FCOE_TASK_AG_CTX_RULE6EN_SHIFT 3
+#define USTORM_FCOE_TASK_AG_CTX_DIF_ERROR_TYPE_MASK 0xF
+#define USTORM_FCOE_TASK_AG_CTX_DIF_ERROR_TYPE_SHIFT 4
__le32 dif_err_intervals;
__le32 dif_error_1st_interval;
__le32 global_cq_num;
@@ -596,6 +498,7 @@ struct ustorm_fcoe_task_ag_ctx {
__le32 reg5;
};
+/* FCoE task context */
struct fcoe_task_context {
struct ystorm_fcoe_task_st_ctx ystorm_st_context;
struct regpair ystorm_st_padding[2];
@@ -611,6 +514,173 @@ struct fcoe_task_context {
struct rdif_task_context rdif_context;
};
+/* FCoE additional WQE (Sq/XferQ) information */
+union fcoe_additional_info_union {
+ __le32 previous_tid;
+ __le32 parent_tid;
+ __le32 burst_length;
+ __le32 seq_rec_updated_offset;
+};
+
+/* FCoE Ramrod Command IDs */
+enum fcoe_completion_status {
+ FCOE_COMPLETION_STATUS_SUCCESS,
+ FCOE_COMPLETION_STATUS_FCOE_VER_ERR,
+ FCOE_COMPLETION_STATUS_SRC_MAC_ADD_ARR_ERR,
+ MAX_FCOE_COMPLETION_STATUS
+};
+
+/* FC address (SID/DID) network presentation */
+struct fc_addr_nw {
+ u8 addr_lo;
+ u8 addr_mid;
+ u8 addr_hi;
+};
+
+/* FCoE connection offload */
+struct fcoe_conn_offload_ramrod_data {
+ struct regpair sq_pbl_addr;
+ struct regpair sq_curr_page_addr;
+ struct regpair sq_next_page_addr;
+ struct regpair xferq_pbl_addr;
+ struct regpair xferq_curr_page_addr;
+ struct regpair xferq_next_page_addr;
+ struct regpair respq_pbl_addr;
+ struct regpair respq_curr_page_addr;
+ struct regpair respq_next_page_addr;
+ __le16 dst_mac_addr_lo;
+ __le16 dst_mac_addr_mid;
+ __le16 dst_mac_addr_hi;
+ __le16 src_mac_addr_lo;
+ __le16 src_mac_addr_mid;
+ __le16 src_mac_addr_hi;
+ __le16 tx_max_fc_pay_len;
+ __le16 e_d_tov_timer_val;
+ __le16 rx_max_fc_pay_len;
+ __le16 vlan_tag;
+#define FCOE_CONN_OFFLOAD_RAMROD_DATA_VLAN_ID_MASK 0xFFF
+#define FCOE_CONN_OFFLOAD_RAMROD_DATA_VLAN_ID_SHIFT 0
+#define FCOE_CONN_OFFLOAD_RAMROD_DATA_CFI_MASK 0x1
+#define FCOE_CONN_OFFLOAD_RAMROD_DATA_CFI_SHIFT 12
+#define FCOE_CONN_OFFLOAD_RAMROD_DATA_PRIORITY_MASK 0x7
+#define FCOE_CONN_OFFLOAD_RAMROD_DATA_PRIORITY_SHIFT 13
+ __le16 physical_q0;
+ __le16 rec_rr_tov_timer_val;
+ struct fc_addr_nw s_id;
+ u8 max_conc_seqs_c3;
+ struct fc_addr_nw d_id;
+ u8 flags;
+#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_CONT_INCR_SEQ_CNT_MASK 0x1
+#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_CONT_INCR_SEQ_CNT_SHIFT 0
+#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_CONF_REQ_MASK 0x1
+#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_CONF_REQ_SHIFT 1
+#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_REC_VALID_MASK 0x1
+#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_REC_VALID_SHIFT 2
+#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_VLAN_FLAG_MASK 0x1
+#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_VLAN_FLAG_SHIFT 3
+#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_SINGLE_VLAN_MASK 0x1
+#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_SINGLE_VLAN_SHIFT 4
+#define FCOE_CONN_OFFLOAD_RAMROD_DATA_MODE_MASK 0x3
+#define FCOE_CONN_OFFLOAD_RAMROD_DATA_MODE_SHIFT 5
+#define FCOE_CONN_OFFLOAD_RAMROD_DATA_RESERVED0_MASK 0x1
+#define FCOE_CONN_OFFLOAD_RAMROD_DATA_RESERVED0_SHIFT 7
+ __le16 conn_id;
+ u8 def_q_idx;
+ u8 reserved[5];
+};
+
+/* FCoE terminate connection request */
+struct fcoe_conn_terminate_ramrod_data {
+ struct regpair terminate_params_addr;
+};
+
+/* FCoE device type */
+enum fcoe_device_type {
+ FCOE_TASK_DEV_TYPE_DISK,
+ FCOE_TASK_DEV_TYPE_TAPE,
+ MAX_FCOE_DEVICE_TYPE
+};
+
+/* Data sgl */
+struct fcoe_fast_sgl_ctx {
+ struct regpair sgl_start_addr;
+ __le32 sgl_byte_offset;
+ __le16 task_reuse_cnt;
+ __le16 init_offset_in_first_sge;
+};
+
+/* FCoE firmware function init */
+struct fcoe_init_func_ramrod_data {
+ struct scsi_init_func_params func_params;
+ struct scsi_init_func_queues q_params;
+ __le16 mtu;
+ __le16 sq_num_pages_in_pbl;
+ __le32 reserved[3];
+};
+
+/* FCoE: Mode of the connection: Target or Initiator or both */
+enum fcoe_mode_type {
+ FCOE_INITIATOR_MODE = 0x0,
+ FCOE_TARGET_MODE = 0x1,
+ FCOE_BOTH_OR_NOT_CHOSEN = 0x3,
+ MAX_FCOE_MODE_TYPE
+};
+
+/* Per PF FCoE receive path statistics - tStorm RAM structure */
+struct fcoe_rx_stat {
+ struct regpair fcoe_rx_byte_cnt;
+ struct regpair fcoe_rx_data_pkt_cnt;
+ struct regpair fcoe_rx_xfer_pkt_cnt;
+ struct regpair fcoe_rx_other_pkt_cnt;
+ __le32 fcoe_silent_drop_pkt_cmdq_full_cnt;
+ __le32 fcoe_silent_drop_pkt_rq_full_cnt;
+ __le32 fcoe_silent_drop_pkt_crc_error_cnt;
+ __le32 fcoe_silent_drop_pkt_task_invalid_cnt;
+ __le32 fcoe_silent_drop_total_pkt_cnt;
+ __le32 rsrv;
+};
+
+/* FCoE SQE request type */
+enum fcoe_sqe_request_type {
+ SEND_FCOE_CMD,
+ SEND_FCOE_MIDPATH,
+ SEND_FCOE_ABTS_REQUEST,
+ FCOE_EXCHANGE_CLEANUP,
+ FCOE_SEQUENCE_RECOVERY,
+ SEND_FCOE_XFER_RDY,
+ SEND_FCOE_RSP,
+ SEND_FCOE_RSP_WITH_SENSE_DATA,
+ SEND_FCOE_TARGET_DATA,
+ SEND_FCOE_INITIATOR_DATA,
+ SEND_FCOE_XFER_CONTINUATION_RDY,
+ SEND_FCOE_TARGET_ABTS_RSP,
+ MAX_FCOE_SQE_REQUEST_TYPE
+};
+
+/* FCoe statistics request */
+struct fcoe_stat_ramrod_data {
+ struct regpair stat_params_addr;
+};
+
+/* FCoE task type */
+enum fcoe_task_type {
+ FCOE_TASK_TYPE_WRITE_INITIATOR,
+ FCOE_TASK_TYPE_READ_INITIATOR,
+ FCOE_TASK_TYPE_MIDPATH,
+ FCOE_TASK_TYPE_UNSOLICITED,
+ FCOE_TASK_TYPE_ABTS,
+ FCOE_TASK_TYPE_EXCHANGE_CLEANUP,
+ FCOE_TASK_TYPE_SEQUENCE_CLEANUP,
+ FCOE_TASK_TYPE_WRITE_TARGET,
+ FCOE_TASK_TYPE_READ_TARGET,
+ FCOE_TASK_TYPE_RSP,
+ FCOE_TASK_TYPE_RSP_SENSE_DATA,
+ FCOE_TASK_TYPE_ABTS_TARGET,
+ FCOE_TASK_TYPE_ENUM_SIZE,
+ MAX_FCOE_TASK_TYPE
+};
+
+/* Per PF FCoE transmit path statistics - pStorm RAM structure */
struct fcoe_tx_stat {
struct regpair fcoe_tx_byte_cnt;
struct regpair fcoe_tx_data_pkt_cnt;
@@ -618,51 +688,55 @@ struct fcoe_tx_stat {
struct regpair fcoe_tx_other_pkt_cnt;
};
+/* FCoE SQ/XferQ element */
struct fcoe_wqe {
__le16 task_id;
__le16 flags;
-#define FCOE_WQE_REQ_TYPE_MASK 0xF
-#define FCOE_WQE_REQ_TYPE_SHIFT 0
-#define FCOE_WQE_SGL_MODE_MASK 0x1
-#define FCOE_WQE_SGL_MODE_SHIFT 4
-#define FCOE_WQE_CONTINUATION_MASK 0x1
-#define FCOE_WQE_CONTINUATION_SHIFT 5
-#define FCOE_WQE_SEND_AUTO_RSP_MASK 0x1
-#define FCOE_WQE_SEND_AUTO_RSP_SHIFT 6
-#define FCOE_WQE_RESERVED_MASK 0x1
-#define FCOE_WQE_RESERVED_SHIFT 7
-#define FCOE_WQE_NUM_SGES_MASK 0xF
-#define FCOE_WQE_NUM_SGES_SHIFT 8
-#define FCOE_WQE_RESERVED1_MASK 0xF
-#define FCOE_WQE_RESERVED1_SHIFT 12
+#define FCOE_WQE_REQ_TYPE_MASK 0xF
+#define FCOE_WQE_REQ_TYPE_SHIFT 0
+#define FCOE_WQE_SGL_MODE_MASK 0x1
+#define FCOE_WQE_SGL_MODE_SHIFT 4
+#define FCOE_WQE_CONTINUATION_MASK 0x1
+#define FCOE_WQE_CONTINUATION_SHIFT 5
+#define FCOE_WQE_SEND_AUTO_RSP_MASK 0x1
+#define FCOE_WQE_SEND_AUTO_RSP_SHIFT 6
+#define FCOE_WQE_RESERVED_MASK 0x1
+#define FCOE_WQE_RESERVED_SHIFT 7
+#define FCOE_WQE_NUM_SGES_MASK 0xF
+#define FCOE_WQE_NUM_SGES_SHIFT 8
+#define FCOE_WQE_RESERVED1_MASK 0xF
+#define FCOE_WQE_RESERVED1_SHIFT 12
union fcoe_additional_info_union additional_info_union;
};
+/* FCoE XFRQ element */
struct xfrqe_prot_flags {
u8 flags;
-#define XFRQE_PROT_FLAGS_PROT_INTERVAL_SIZE_LOG_MASK 0xF
-#define XFRQE_PROT_FLAGS_PROT_INTERVAL_SIZE_LOG_SHIFT 0
-#define XFRQE_PROT_FLAGS_DIF_TO_PEER_MASK 0x1
-#define XFRQE_PROT_FLAGS_DIF_TO_PEER_SHIFT 4
-#define XFRQE_PROT_FLAGS_HOST_INTERFACE_MASK 0x3
-#define XFRQE_PROT_FLAGS_HOST_INTERFACE_SHIFT 5
-#define XFRQE_PROT_FLAGS_RESERVED_MASK 0x1
-#define XFRQE_PROT_FLAGS_RESERVED_SHIFT 7
+#define XFRQE_PROT_FLAGS_PROT_INTERVAL_SIZE_LOG_MASK 0xF
+#define XFRQE_PROT_FLAGS_PROT_INTERVAL_SIZE_LOG_SHIFT 0
+#define XFRQE_PROT_FLAGS_DIF_TO_PEER_MASK 0x1
+#define XFRQE_PROT_FLAGS_DIF_TO_PEER_SHIFT 4
+#define XFRQE_PROT_FLAGS_HOST_INTERFACE_MASK 0x3
+#define XFRQE_PROT_FLAGS_HOST_INTERFACE_SHIFT 5
+#define XFRQE_PROT_FLAGS_RESERVED_MASK 0x1
+#define XFRQE_PROT_FLAGS_RESERVED_SHIFT 7
};
+/* FCoE doorbell data */
struct fcoe_db_data {
u8 params;
-#define FCOE_DB_DATA_DEST_MASK 0x3
-#define FCOE_DB_DATA_DEST_SHIFT 0
-#define FCOE_DB_DATA_AGG_CMD_MASK 0x3
-#define FCOE_DB_DATA_AGG_CMD_SHIFT 2
-#define FCOE_DB_DATA_BYPASS_EN_MASK 0x1
-#define FCOE_DB_DATA_BYPASS_EN_SHIFT 4
-#define FCOE_DB_DATA_RESERVED_MASK 0x1
-#define FCOE_DB_DATA_RESERVED_SHIFT 5
-#define FCOE_DB_DATA_AGG_VAL_SEL_MASK 0x3
-#define FCOE_DB_DATA_AGG_VAL_SEL_SHIFT 6
+#define FCOE_DB_DATA_DEST_MASK 0x3
+#define FCOE_DB_DATA_DEST_SHIFT 0
+#define FCOE_DB_DATA_AGG_CMD_MASK 0x3
+#define FCOE_DB_DATA_AGG_CMD_SHIFT 2
+#define FCOE_DB_DATA_BYPASS_EN_MASK 0x1
+#define FCOE_DB_DATA_BYPASS_EN_SHIFT 4
+#define FCOE_DB_DATA_RESERVED_MASK 0x1
+#define FCOE_DB_DATA_RESERVED_SHIFT 5
+#define FCOE_DB_DATA_AGG_VAL_SEL_MASK 0x3
+#define FCOE_DB_DATA_AGG_VAL_SEL_SHIFT 6
u8 agg_flags;
__le16 sq_prod;
};
+
#endif /* __FCOE_COMMON__ */
diff --git a/include/linux/qed/iscsi_common.h b/include/linux/qed/iscsi_common.h
index 85e086cba639..1a60285a01e3 100644
--- a/include/linux/qed/iscsi_common.h
+++ b/include/linux/qed/iscsi_common.h
@@ -1,78 +1,53 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
/* QLogic qed NIC Driver
* Copyright (c) 2015-2017 QLogic Corporation
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and /or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * Copyright (c) 2019-2020 Marvell International Ltd.
*/
#ifndef __ISCSI_COMMON__
#define __ISCSI_COMMON__
+
/**********************/
/* ISCSI FW CONSTANTS */
/**********************/
/* iSCSI HSI constants */
-#define ISCSI_DEFAULT_MTU (1500)
+#define ISCSI_DEFAULT_MTU (1500)
/* KWQ (kernel work queue) layer codes */
-#define ISCSI_SLOW_PATH_LAYER_CODE (6)
+#define ISCSI_SLOW_PATH_LAYER_CODE (6)
/* iSCSI parameter defaults */
-#define ISCSI_DEFAULT_HEADER_DIGEST (0)
-#define ISCSI_DEFAULT_DATA_DIGEST (0)
-#define ISCSI_DEFAULT_INITIAL_R2T (1)
-#define ISCSI_DEFAULT_IMMEDIATE_DATA (1)
-#define ISCSI_DEFAULT_MAX_PDU_LENGTH (0x2000)
-#define ISCSI_DEFAULT_FIRST_BURST_LENGTH (0x10000)
-#define ISCSI_DEFAULT_MAX_BURST_LENGTH (0x40000)
-#define ISCSI_DEFAULT_MAX_OUTSTANDING_R2T (1)
+#define ISCSI_DEFAULT_HEADER_DIGEST (0)
+#define ISCSI_DEFAULT_DATA_DIGEST (0)
+#define ISCSI_DEFAULT_INITIAL_R2T (1)
+#define ISCSI_DEFAULT_IMMEDIATE_DATA (1)
+#define ISCSI_DEFAULT_MAX_PDU_LENGTH (0x2000)
+#define ISCSI_DEFAULT_FIRST_BURST_LENGTH (0x10000)
+#define ISCSI_DEFAULT_MAX_BURST_LENGTH (0x40000)
+#define ISCSI_DEFAULT_MAX_OUTSTANDING_R2T (1)
/* iSCSI parameter limits */
-#define ISCSI_MIN_VAL_MAX_PDU_LENGTH (0x200)
-#define ISCSI_MAX_VAL_MAX_PDU_LENGTH (0xffffff)
-#define ISCSI_MIN_VAL_BURST_LENGTH (0x200)
-#define ISCSI_MAX_VAL_BURST_LENGTH (0xffffff)
-#define ISCSI_MIN_VAL_MAX_OUTSTANDING_R2T (1)
-#define ISCSI_MAX_VAL_MAX_OUTSTANDING_R2T (0xff)
+#define ISCSI_MIN_VAL_MAX_PDU_LENGTH (0x200)
+#define ISCSI_MAX_VAL_MAX_PDU_LENGTH (0xffffff)
+#define ISCSI_MIN_VAL_BURST_LENGTH (0x200)
+#define ISCSI_MAX_VAL_BURST_LENGTH (0xffffff)
+#define ISCSI_MIN_VAL_MAX_OUTSTANDING_R2T (1)
+#define ISCSI_MAX_VAL_MAX_OUTSTANDING_R2T (0xff)
-#define ISCSI_AHS_CNTL_SIZE 4
+#define ISCSI_AHS_CNTL_SIZE 4
-#define ISCSI_WQE_NUM_SGES_SLOWIO (0xf)
+#define ISCSI_WQE_NUM_SGES_SLOWIO (0xf)
/* iSCSI reserved params */
#define ISCSI_ITT_ALL_ONES (0xffffffff)
#define ISCSI_TTT_ALL_ONES (0xffffffff)
-#define ISCSI_OPTION_1_OFF_CHIP_TCP 1
-#define ISCSI_OPTION_2_ON_CHIP_TCP 2
+#define ISCSI_OPTION_1_OFF_CHIP_TCP 1
+#define ISCSI_OPTION_2_ON_CHIP_TCP 2
-#define ISCSI_INITIATOR_MODE 0
-#define ISCSI_TARGET_MODE 1
+#define ISCSI_INITIATOR_MODE 0
+#define ISCSI_TARGET_MODE 1
/* iSCSI request op codes */
#define ISCSI_OPCODE_NOP_OUT (0)
@@ -84,41 +59,48 @@
#define ISCSI_OPCODE_LOGOUT_REQUEST (6)
/* iSCSI response/messages op codes */
-#define ISCSI_OPCODE_NOP_IN (0x20)
-#define ISCSI_OPCODE_SCSI_RESPONSE (0x21)
-#define ISCSI_OPCODE_TMF_RESPONSE (0x22)
-#define ISCSI_OPCODE_LOGIN_RESPONSE (0x23)
-#define ISCSI_OPCODE_TEXT_RESPONSE (0x24)
-#define ISCSI_OPCODE_DATA_IN (0x25)
-#define ISCSI_OPCODE_LOGOUT_RESPONSE (0x26)
-#define ISCSI_OPCODE_R2T (0x31)
-#define ISCSI_OPCODE_ASYNC_MSG (0x32)
-#define ISCSI_OPCODE_REJECT (0x3f)
+#define ISCSI_OPCODE_NOP_IN (0x20)
+#define ISCSI_OPCODE_SCSI_RESPONSE (0x21)
+#define ISCSI_OPCODE_TMF_RESPONSE (0x22)
+#define ISCSI_OPCODE_LOGIN_RESPONSE (0x23)
+#define ISCSI_OPCODE_TEXT_RESPONSE (0x24)
+#define ISCSI_OPCODE_DATA_IN (0x25)
+#define ISCSI_OPCODE_LOGOUT_RESPONSE (0x26)
+#define ISCSI_OPCODE_R2T (0x31)
+#define ISCSI_OPCODE_ASYNC_MSG (0x32)
+#define ISCSI_OPCODE_REJECT (0x3f)
/* iSCSI stages */
-#define ISCSI_STAGE_SECURITY_NEGOTIATION (0)
-#define ISCSI_STAGE_LOGIN_OPERATIONAL_NEGOTIATION (1)
-#define ISCSI_STAGE_FULL_FEATURE_PHASE (3)
+#define ISCSI_STAGE_SECURITY_NEGOTIATION (0)
+#define ISCSI_STAGE_LOGIN_OPERATIONAL_NEGOTIATION (1)
+#define ISCSI_STAGE_FULL_FEATURE_PHASE (3)
/* iSCSI CQE errors */
-#define CQE_ERROR_BITMAP_DATA_DIGEST (0x08)
-#define CQE_ERROR_BITMAP_RCV_ON_INVALID_CONN (0x10)
-#define CQE_ERROR_BITMAP_DATA_TRUNCATED (0x20)
+#define CQE_ERROR_BITMAP_DATA_DIGEST (0x08)
+#define CQE_ERROR_BITMAP_RCV_ON_INVALID_CONN (0x10)
+#define CQE_ERROR_BITMAP_DATA_TRUNCATED (0x20)
+
+/* Union of data bd_opaque/ tq_tid */
+union bd_opaque_tq_union {
+ __le16 bd_opaque;
+ __le16 tq_tid;
+};
+/* ISCSI SGL entry */
struct cqe_error_bitmap {
u8 cqe_error_status_bits;
-#define CQE_ERROR_BITMAP_DIF_ERR_BITS_MASK 0x7
-#define CQE_ERROR_BITMAP_DIF_ERR_BITS_SHIFT 0
-#define CQE_ERROR_BITMAP_DATA_DIGEST_ERR_MASK 0x1
-#define CQE_ERROR_BITMAP_DATA_DIGEST_ERR_SHIFT 3
-#define CQE_ERROR_BITMAP_RCV_ON_INVALID_CONN_MASK 0x1
-#define CQE_ERROR_BITMAP_RCV_ON_INVALID_CONN_SHIFT 4
-#define CQE_ERROR_BITMAP_DATA_TRUNCATED_ERR_MASK 0x1
-#define CQE_ERROR_BITMAP_DATA_TRUNCATED_ERR_SHIFT 5
-#define CQE_ERROR_BITMAP_UNDER_RUN_ERR_MASK 0x1
-#define CQE_ERROR_BITMAP_UNDER_RUN_ERR_SHIFT 6
-#define CQE_ERROR_BITMAP_RESERVED2_MASK 0x1
-#define CQE_ERROR_BITMAP_RESERVED2_SHIFT 7
+#define CQE_ERROR_BITMAP_DIF_ERR_BITS_MASK 0x7
+#define CQE_ERROR_BITMAP_DIF_ERR_BITS_SHIFT 0
+#define CQE_ERROR_BITMAP_DATA_DIGEST_ERR_MASK 0x1
+#define CQE_ERROR_BITMAP_DATA_DIGEST_ERR_SHIFT 3
+#define CQE_ERROR_BITMAP_RCV_ON_INVALID_CONN_MASK 0x1
+#define CQE_ERROR_BITMAP_RCV_ON_INVALID_CONN_SHIFT 4
+#define CQE_ERROR_BITMAP_DATA_TRUNCATED_ERR_MASK 0x1
+#define CQE_ERROR_BITMAP_DATA_TRUNCATED_ERR_SHIFT 5
+#define CQE_ERROR_BITMAP_UNDER_RUN_ERR_MASK 0x1
+#define CQE_ERROR_BITMAP_UNDER_RUN_ERR_SHIFT 6
+#define CQE_ERROR_BITMAP_RESERVED2_MASK 0x1
+#define CQE_ERROR_BITMAP_RESERVED2_SHIFT 7
};
union cqe_error_status {
@@ -126,86 +108,133 @@ union cqe_error_status {
struct cqe_error_bitmap error_bits;
};
+/* iSCSI Login Response PDU header */
struct data_hdr {
__le32 data[12];
};
-struct iscsi_async_msg_hdr {
- __le16 reserved0;
- u8 flags_attr;
-#define ISCSI_ASYNC_MSG_HDR_RSRV_MASK 0x7F
-#define ISCSI_ASYNC_MSG_HDR_RSRV_SHIFT 0
-#define ISCSI_ASYNC_MSG_HDR_CONST1_MASK 0x1
-#define ISCSI_ASYNC_MSG_HDR_CONST1_SHIFT 7
- u8 opcode;
- __le32 hdr_second_dword;
-#define ISCSI_ASYNC_MSG_HDR_DATA_SEG_LEN_MASK 0xFFFFFF
-#define ISCSI_ASYNC_MSG_HDR_DATA_SEG_LEN_SHIFT 0
-#define ISCSI_ASYNC_MSG_HDR_TOTAL_AHS_LEN_MASK 0xFF
-#define ISCSI_ASYNC_MSG_HDR_TOTAL_AHS_LEN_SHIFT 24
- struct regpair lun;
- __le32 all_ones;
- __le32 reserved1;
- __le32 stat_sn;
- __le32 exp_cmd_sn;
- __le32 max_cmd_sn;
- __le16 param1_rsrv;
- u8 async_vcode;
- u8 async_event;
- __le16 param3_rsrv;
- __le16 param2_rsrv;
- __le32 reserved7;
+struct lun_mapper_addr_reserved {
+ struct regpair lun_mapper_addr;
+ u8 reserved0[8];
+};
+
+/* rdif conetxt for dif on immediate */
+struct dif_on_immediate_params {
+ __le32 initial_ref_tag;
+ __le16 application_tag;
+ __le16 application_tag_mask;
+ __le16 flags1;
+#define DIF_ON_IMMEDIATE_PARAMS_VALIDATE_GUARD_MASK 0x1
+#define DIF_ON_IMMEDIATE_PARAMS_VALIDATE_GUARD_SHIFT 0
+#define DIF_ON_IMMEDIATE_PARAMS_VALIDATE_APP_TAG_MASK 0x1
+#define DIF_ON_IMMEDIATE_PARAMS_VALIDATE_APP_TAG_SHIFT 1
+#define DIF_ON_IMMEDIATE_PARAMS_VALIDATE_REF_TAG_MASK 0x1
+#define DIF_ON_IMMEDIATE_PARAMS_VALIDATE_REF_TAG_SHIFT 2
+#define DIF_ON_IMMEDIATE_PARAMS_FORWARD_GUARD_MASK 0x1
+#define DIF_ON_IMMEDIATE_PARAMS_FORWARD_GUARD_SHIFT 3
+#define DIF_ON_IMMEDIATE_PARAMS_FORWARD_APP_TAG_MASK 0x1
+#define DIF_ON_IMMEDIATE_PARAMS_FORWARD_APP_TAG_SHIFT 4
+#define DIF_ON_IMMEDIATE_PARAMS_FORWARD_REF_TAG_MASK 0x1
+#define DIF_ON_IMMEDIATE_PARAMS_FORWARD_REF_TAG_SHIFT 5
+#define DIF_ON_IMMEDIATE_PARAMS_INTERVAL_SIZE_MASK 0x1
+#define DIF_ON_IMMEDIATE_PARAMS_INTERVAL_SIZE_SHIFT 6
+#define DIF_ON_IMMEDIATE_PARAMS_NETWORK_INTERFACE_MASK 0x1
+#define DIF_ON_IMMEDIATE_PARAMS_NETWORK_INTERFACE_SHIFT 7
+#define DIF_ON_IMMEDIATE_PARAMS_HOST_INTERFACE_MASK 0x3
+#define DIF_ON_IMMEDIATE_PARAMS_HOST_INTERFACE_SHIFT 8
+#define DIF_ON_IMMEDIATE_PARAMS_REF_TAG_MASK_MASK 0xF
+#define DIF_ON_IMMEDIATE_PARAMS_REF_TAG_MASK_SHIFT 10
+#define DIF_ON_IMMEDIATE_PARAMS_FORWARD_APP_TAG_WITH_MASK_MASK 0x1
+#define DIF_ON_IMMEDIATE_PARAMS_FORWARD_APP_TAG_WITH_MASK_SHIFT 14
+#define DIF_ON_IMMEDIATE_PARAMS_FORWARD_REF_TAG_WITH_MASK_MASK 0x1
+#define DIF_ON_IMMEDIATE_PARAMS_FORWARD_REF_TAG_WITH_MASK_SHIFT 15
+ u8 flags0;
+#define DIF_ON_IMMEDIATE_PARAMS_RESERVED_MASK 0x1
+#define DIF_ON_IMMEDIATE_PARAMS_RESERVED_SHIFT 0
+#define DIF_ON_IMMEDIATE_PARAMS_IGNORE_APP_TAG_MASK 0x1
+#define DIF_ON_IMMEDIATE_PARAMS_IGNORE_APP_TAG_SHIFT 1
+#define DIF_ON_IMMEDIATE_PARAMS_INITIAL_REF_TAG_IS_VALID_MASK 0x1
+#define DIF_ON_IMMEDIATE_PARAMS_INITIAL_REF_TAG_IS_VALID_SHIFT 2
+#define DIF_ON_IMMEDIATE_PARAMS_HOST_GUARD_TYPE_MASK 0x1
+#define DIF_ON_IMMEDIATE_PARAMS_HOST_GUARD_TYPE_SHIFT 3
+#define DIF_ON_IMMEDIATE_PARAMS_PROTECTION_TYPE_MASK 0x3
+#define DIF_ON_IMMEDIATE_PARAMS_PROTECTION_TYPE_SHIFT 4
+#define DIF_ON_IMMEDIATE_PARAMS_CRC_SEED_MASK 0x1
+#define DIF_ON_IMMEDIATE_PARAMS_CRC_SEED_SHIFT 6
+#define DIF_ON_IMMEDIATE_PARAMS_KEEP_REF_TAG_CONST_MASK 0x1
+#define DIF_ON_IMMEDIATE_PARAMS_KEEP_REF_TAG_CONST_SHIFT 7
+ u8 reserved_zero[5];
+};
+
+/* iSCSI dif on immediate mode attributes union */
+union dif_configuration_params {
+ struct lun_mapper_addr_reserved lun_mapper_address;
+ struct dif_on_immediate_params def_dif_conf;
+};
+
+/* Union of data/r2t sequence number */
+union iscsi_seq_num {
+ __le16 data_sn;
+ __le16 r2t_sn;
};
-struct iscsi_cmd_hdr {
- __le16 reserved1;
- u8 flags_attr;
-#define ISCSI_CMD_HDR_ATTR_MASK 0x7
-#define ISCSI_CMD_HDR_ATTR_SHIFT 0
-#define ISCSI_CMD_HDR_RSRV_MASK 0x3
-#define ISCSI_CMD_HDR_RSRV_SHIFT 3
-#define ISCSI_CMD_HDR_WRITE_MASK 0x1
-#define ISCSI_CMD_HDR_WRITE_SHIFT 5
-#define ISCSI_CMD_HDR_READ_MASK 0x1
-#define ISCSI_CMD_HDR_READ_SHIFT 6
-#define ISCSI_CMD_HDR_FINAL_MASK 0x1
-#define ISCSI_CMD_HDR_FINAL_SHIFT 7
- u8 hdr_first_byte;
-#define ISCSI_CMD_HDR_OPCODE_MASK 0x3F
-#define ISCSI_CMD_HDR_OPCODE_SHIFT 0
-#define ISCSI_CMD_HDR_IMM_MASK 0x1
-#define ISCSI_CMD_HDR_IMM_SHIFT 6
-#define ISCSI_CMD_HDR_RSRV1_MASK 0x1
-#define ISCSI_CMD_HDR_RSRV1_SHIFT 7
- __le32 hdr_second_dword;
-#define ISCSI_CMD_HDR_DATA_SEG_LEN_MASK 0xFFFFFF
-#define ISCSI_CMD_HDR_DATA_SEG_LEN_SHIFT 0
-#define ISCSI_CMD_HDR_TOTAL_AHS_LEN_MASK 0xFF
-#define ISCSI_CMD_HDR_TOTAL_AHS_LEN_SHIFT 24
- struct regpair lun;
- __le32 itt;
- __le32 expected_transfer_length;
- __le32 cmd_sn;
- __le32 exp_stat_sn;
- __le32 cdb[4];
+/* iSCSI DIF flags */
+struct iscsi_dif_flags {
+ u8 flags;
+#define ISCSI_DIF_FLAGS_PROT_INTERVAL_SIZE_LOG_MASK 0xF
+#define ISCSI_DIF_FLAGS_PROT_INTERVAL_SIZE_LOG_SHIFT 0
+#define ISCSI_DIF_FLAGS_DIF_TO_PEER_MASK 0x1
+#define ISCSI_DIF_FLAGS_DIF_TO_PEER_SHIFT 4
+#define ISCSI_DIF_FLAGS_HOST_INTERFACE_MASK 0x7
+#define ISCSI_DIF_FLAGS_HOST_INTERFACE_SHIFT 5
+};
+
+/* The iscsi storm task context of Ystorm */
+struct ystorm_iscsi_task_state {
+ struct scsi_cached_sges data_desc;
+ struct scsi_sgl_params sgl_params;
+ __le32 exp_r2t_sn;
+ __le32 buffer_offset;
+ union iscsi_seq_num seq_num;
+ struct iscsi_dif_flags dif_flags;
+ u8 flags;
+#define YSTORM_ISCSI_TASK_STATE_LOCAL_COMP_MASK 0x1
+#define YSTORM_ISCSI_TASK_STATE_LOCAL_COMP_SHIFT 0
+#define YSTORM_ISCSI_TASK_STATE_SLOW_IO_MASK 0x1
+#define YSTORM_ISCSI_TASK_STATE_SLOW_IO_SHIFT 1
+#define YSTORM_ISCSI_TASK_STATE_SET_DIF_OFFSET_MASK 0x1
+#define YSTORM_ISCSI_TASK_STATE_SET_DIF_OFFSET_SHIFT 2
+#define YSTORM_ISCSI_TASK_STATE_RESERVED0_MASK 0x1F
+#define YSTORM_ISCSI_TASK_STATE_RESERVED0_SHIFT 3
};
+/* The iscsi storm task context of Ystorm */
+struct ystorm_iscsi_task_rxmit_opt {
+ __le32 fast_rxmit_sge_offset;
+ __le32 scan_start_buffer_offset;
+ __le32 fast_rxmit_buffer_offset;
+ u8 scan_start_sgl_index;
+ u8 fast_rxmit_sgl_index;
+ __le16 reserved;
+};
+
+/* iSCSI Common PDU header */
struct iscsi_common_hdr {
u8 hdr_status;
u8 hdr_response;
u8 hdr_flags;
u8 hdr_first_byte;
-#define ISCSI_COMMON_HDR_OPCODE_MASK 0x3F
-#define ISCSI_COMMON_HDR_OPCODE_SHIFT 0
-#define ISCSI_COMMON_HDR_IMM_MASK 0x1
-#define ISCSI_COMMON_HDR_IMM_SHIFT 6
-#define ISCSI_COMMON_HDR_RSRV_MASK 0x1
-#define ISCSI_COMMON_HDR_RSRV_SHIFT 7
+#define ISCSI_COMMON_HDR_OPCODE_MASK 0x3F
+#define ISCSI_COMMON_HDR_OPCODE_SHIFT 0
+#define ISCSI_COMMON_HDR_IMM_MASK 0x1
+#define ISCSI_COMMON_HDR_IMM_SHIFT 6
+#define ISCSI_COMMON_HDR_RSRV_MASK 0x1
+#define ISCSI_COMMON_HDR_RSRV_SHIFT 7
__le32 hdr_second_dword;
-#define ISCSI_COMMON_HDR_DATA_SEG_LEN_MASK 0xFFFFFF
-#define ISCSI_COMMON_HDR_DATA_SEG_LEN_SHIFT 0
-#define ISCSI_COMMON_HDR_TOTAL_AHS_LEN_MASK 0xFF
-#define ISCSI_COMMON_HDR_TOTAL_AHS_LEN_SHIFT 24
+#define ISCSI_COMMON_HDR_DATA_SEG_LEN_MASK 0xFFFFFF
+#define ISCSI_COMMON_HDR_DATA_SEG_LEN_SHIFT 0
+#define ISCSI_COMMON_HDR_TOTAL_AHS_LEN_MASK 0xFF
+#define ISCSI_COMMON_HDR_TOTAL_AHS_LEN_SHIFT 24
struct regpair lun_reserved;
__le32 itt;
__le32 ttt;
@@ -215,86 +244,60 @@ struct iscsi_common_hdr {
__le32 data[3];
};
-struct iscsi_conn_offload_params {
- struct regpair sq_pbl_addr;
- struct regpair r2tq_pbl_addr;
- struct regpair xhq_pbl_addr;
- struct regpair uhq_pbl_addr;
- __le32 initial_ack;
- __le16 physical_q0;
- __le16 physical_q1;
- u8 flags;
-#define ISCSI_CONN_OFFLOAD_PARAMS_TCP_ON_CHIP_1B_MASK 0x1
-#define ISCSI_CONN_OFFLOAD_PARAMS_TCP_ON_CHIP_1B_SHIFT 0
-#define ISCSI_CONN_OFFLOAD_PARAMS_TARGET_MODE_MASK 0x1
-#define ISCSI_CONN_OFFLOAD_PARAMS_TARGET_MODE_SHIFT 1
-#define ISCSI_CONN_OFFLOAD_PARAMS_RESTRICTED_MODE_MASK 0x1
-#define ISCSI_CONN_OFFLOAD_PARAMS_RESTRICTED_MODE_SHIFT 2
-#define ISCSI_CONN_OFFLOAD_PARAMS_RESERVED1_MASK 0x1F
-#define ISCSI_CONN_OFFLOAD_PARAMS_RESERVED1_SHIFT 3
- u8 pbl_page_size_log;
- u8 pbe_page_size_log;
- u8 default_cq;
- __le32 stat_sn;
-};
-
-struct iscsi_slow_path_hdr {
- u8 op_code;
- u8 flags;
-#define ISCSI_SLOW_PATH_HDR_RESERVED0_MASK 0xF
-#define ISCSI_SLOW_PATH_HDR_RESERVED0_SHIFT 0
-#define ISCSI_SLOW_PATH_HDR_LAYER_CODE_MASK 0x7
-#define ISCSI_SLOW_PATH_HDR_LAYER_CODE_SHIFT 4
-#define ISCSI_SLOW_PATH_HDR_RESERVED1_MASK 0x1
-#define ISCSI_SLOW_PATH_HDR_RESERVED1_SHIFT 7
-};
-
-struct iscsi_conn_update_ramrod_params {
- struct iscsi_slow_path_hdr hdr;
- __le16 conn_id;
- __le32 fw_cid;
- u8 flags;
-#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_HD_EN_MASK 0x1
-#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_HD_EN_SHIFT 0
-#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_DD_EN_MASK 0x1
-#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_DD_EN_SHIFT 1
-#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_INITIAL_R2T_MASK 0x1
-#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_INITIAL_R2T_SHIFT 2
-#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_IMMEDIATE_DATA_MASK 0x1
-#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_IMMEDIATE_DATA_SHIFT 3
-#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_DIF_BLOCK_SIZE_MASK 0x1
-#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_DIF_BLOCK_SIZE_SHIFT 4
-#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_DIF_ON_HOST_EN_MASK 0x1
-#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_DIF_ON_HOST_EN_SHIFT 5
-#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_RESERVED1_MASK 0x3
-#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_RESERVED1_SHIFT 6
- u8 reserved0[3];
- __le32 max_seq_size;
- __le32 max_send_pdu_length;
- __le32 max_recv_pdu_length;
- __le32 first_seq_length;
+/* iSCSI Command PDU header */
+struct iscsi_cmd_hdr {
+ __le16 reserved1;
+ u8 flags_attr;
+#define ISCSI_CMD_HDR_ATTR_MASK 0x7
+#define ISCSI_CMD_HDR_ATTR_SHIFT 0
+#define ISCSI_CMD_HDR_RSRV_MASK 0x3
+#define ISCSI_CMD_HDR_RSRV_SHIFT 3
+#define ISCSI_CMD_HDR_WRITE_MASK 0x1
+#define ISCSI_CMD_HDR_WRITE_SHIFT 5
+#define ISCSI_CMD_HDR_READ_MASK 0x1
+#define ISCSI_CMD_HDR_READ_SHIFT 6
+#define ISCSI_CMD_HDR_FINAL_MASK 0x1
+#define ISCSI_CMD_HDR_FINAL_SHIFT 7
+ u8 hdr_first_byte;
+#define ISCSI_CMD_HDR_OPCODE_MASK 0x3F
+#define ISCSI_CMD_HDR_OPCODE_SHIFT 0
+#define ISCSI_CMD_HDR_IMM_MASK 0x1
+#define ISCSI_CMD_HDR_IMM_SHIFT 6
+#define ISCSI_CMD_HDR_RSRV1_MASK 0x1
+#define ISCSI_CMD_HDR_RSRV1_SHIFT 7
+ __le32 hdr_second_dword;
+#define ISCSI_CMD_HDR_DATA_SEG_LEN_MASK 0xFFFFFF
+#define ISCSI_CMD_HDR_DATA_SEG_LEN_SHIFT 0
+#define ISCSI_CMD_HDR_TOTAL_AHS_LEN_MASK 0xFF
+#define ISCSI_CMD_HDR_TOTAL_AHS_LEN_SHIFT 24
+ struct regpair lun;
+ __le32 itt;
+ __le32 expected_transfer_length;
+ __le32 cmd_sn;
__le32 exp_stat_sn;
+ __le32 cdb[4];
};
+/* iSCSI Command PDU header with Extended CDB (Initiator Mode) */
struct iscsi_ext_cdb_cmd_hdr {
__le16 reserved1;
u8 flags_attr;
-#define ISCSI_EXT_CDB_CMD_HDR_ATTR_MASK 0x7
-#define ISCSI_EXT_CDB_CMD_HDR_ATTR_SHIFT 0
-#define ISCSI_EXT_CDB_CMD_HDR_RSRV_MASK 0x3
-#define ISCSI_EXT_CDB_CMD_HDR_RSRV_SHIFT 3
-#define ISCSI_EXT_CDB_CMD_HDR_WRITE_MASK 0x1
-#define ISCSI_EXT_CDB_CMD_HDR_WRITE_SHIFT 5
-#define ISCSI_EXT_CDB_CMD_HDR_READ_MASK 0x1
-#define ISCSI_EXT_CDB_CMD_HDR_READ_SHIFT 6
-#define ISCSI_EXT_CDB_CMD_HDR_FINAL_MASK 0x1
-#define ISCSI_EXT_CDB_CMD_HDR_FINAL_SHIFT 7
+#define ISCSI_EXT_CDB_CMD_HDR_ATTR_MASK 0x7
+#define ISCSI_EXT_CDB_CMD_HDR_ATTR_SHIFT 0
+#define ISCSI_EXT_CDB_CMD_HDR_RSRV_MASK 0x3
+#define ISCSI_EXT_CDB_CMD_HDR_RSRV_SHIFT 3
+#define ISCSI_EXT_CDB_CMD_HDR_WRITE_MASK 0x1
+#define ISCSI_EXT_CDB_CMD_HDR_WRITE_SHIFT 5
+#define ISCSI_EXT_CDB_CMD_HDR_READ_MASK 0x1
+#define ISCSI_EXT_CDB_CMD_HDR_READ_SHIFT 6
+#define ISCSI_EXT_CDB_CMD_HDR_FINAL_MASK 0x1
+#define ISCSI_EXT_CDB_CMD_HDR_FINAL_SHIFT 7
u8 opcode;
__le32 hdr_second_dword;
-#define ISCSI_EXT_CDB_CMD_HDR_DATA_SEG_LEN_MASK 0xFFFFFF
-#define ISCSI_EXT_CDB_CMD_HDR_DATA_SEG_LEN_SHIFT 0
-#define ISCSI_EXT_CDB_CMD_HDR_CDB_SIZE_MASK 0xFF
-#define ISCSI_EXT_CDB_CMD_HDR_CDB_SIZE_SHIFT 24
+#define ISCSI_EXT_CDB_CMD_HDR_DATA_SEG_LEN_MASK 0xFFFFFF
+#define ISCSI_EXT_CDB_CMD_HDR_DATA_SEG_LEN_SHIFT 0
+#define ISCSI_EXT_CDB_CMD_HDR_CDB_SIZE_MASK 0xFF
+#define ISCSI_EXT_CDB_CMD_HDR_CDB_SIZE_SHIFT 24
struct regpair lun;
__le32 itt;
__le32 expected_transfer_length;
@@ -303,26 +306,27 @@ struct iscsi_ext_cdb_cmd_hdr {
struct scsi_sge cdb_sge;
};
+/* iSCSI login request PDU header */
struct iscsi_login_req_hdr {
u8 version_min;
u8 version_max;
u8 flags_attr;
-#define ISCSI_LOGIN_REQ_HDR_NSG_MASK 0x3
-#define ISCSI_LOGIN_REQ_HDR_NSG_SHIFT 0
-#define ISCSI_LOGIN_REQ_HDR_CSG_MASK 0x3
-#define ISCSI_LOGIN_REQ_HDR_CSG_SHIFT 2
-#define ISCSI_LOGIN_REQ_HDR_RSRV_MASK 0x3
-#define ISCSI_LOGIN_REQ_HDR_RSRV_SHIFT 4
-#define ISCSI_LOGIN_REQ_HDR_C_MASK 0x1
-#define ISCSI_LOGIN_REQ_HDR_C_SHIFT 6
-#define ISCSI_LOGIN_REQ_HDR_T_MASK 0x1
-#define ISCSI_LOGIN_REQ_HDR_T_SHIFT 7
+#define ISCSI_LOGIN_REQ_HDR_NSG_MASK 0x3
+#define ISCSI_LOGIN_REQ_HDR_NSG_SHIFT 0
+#define ISCSI_LOGIN_REQ_HDR_CSG_MASK 0x3
+#define ISCSI_LOGIN_REQ_HDR_CSG_SHIFT 2
+#define ISCSI_LOGIN_REQ_HDR_RSRV_MASK 0x3
+#define ISCSI_LOGIN_REQ_HDR_RSRV_SHIFT 4
+#define ISCSI_LOGIN_REQ_HDR_C_MASK 0x1
+#define ISCSI_LOGIN_REQ_HDR_C_SHIFT 6
+#define ISCSI_LOGIN_REQ_HDR_T_MASK 0x1
+#define ISCSI_LOGIN_REQ_HDR_T_SHIFT 7
u8 opcode;
__le32 hdr_second_dword;
-#define ISCSI_LOGIN_REQ_HDR_DATA_SEG_LEN_MASK 0xFFFFFF
-#define ISCSI_LOGIN_REQ_HDR_DATA_SEG_LEN_SHIFT 0
-#define ISCSI_LOGIN_REQ_HDR_TOTAL_AHS_LEN_MASK 0xFF
-#define ISCSI_LOGIN_REQ_HDR_TOTAL_AHS_LEN_SHIFT 24
+#define ISCSI_LOGIN_REQ_HDR_DATA_SEG_LEN_MASK 0xFFFFFF
+#define ISCSI_LOGIN_REQ_HDR_DATA_SEG_LEN_SHIFT 0
+#define ISCSI_LOGIN_REQ_HDR_TOTAL_AHS_LEN_MASK 0xFF
+#define ISCSI_LOGIN_REQ_HDR_TOTAL_AHS_LEN_SHIFT 24
__le32 isid_tabc;
__le16 tsih;
__le16 isid_d;
@@ -334,6 +338,7 @@ struct iscsi_login_req_hdr {
__le32 reserved2[4];
};
+/* iSCSI logout request PDU header */
struct iscsi_logout_req_hdr {
__le16 reserved0;
u8 reason_code;
@@ -348,13 +353,14 @@ struct iscsi_logout_req_hdr {
__le32 reserved4[4];
};
+/* iSCSI Data-out PDU header */
struct iscsi_data_out_hdr {
__le16 reserved1;
u8 flags_attr;
-#define ISCSI_DATA_OUT_HDR_RSRV_MASK 0x7F
-#define ISCSI_DATA_OUT_HDR_RSRV_SHIFT 0
-#define ISCSI_DATA_OUT_HDR_FINAL_MASK 0x1
-#define ISCSI_DATA_OUT_HDR_FINAL_SHIFT 7
+#define ISCSI_DATA_OUT_HDR_RSRV_MASK 0x7F
+#define ISCSI_DATA_OUT_HDR_RSRV_SHIFT 0
+#define ISCSI_DATA_OUT_HDR_FINAL_MASK 0x1
+#define ISCSI_DATA_OUT_HDR_FINAL_SHIFT 7
u8 opcode;
__le32 reserved2;
struct regpair lun;
@@ -368,22 +374,23 @@ struct iscsi_data_out_hdr {
__le32 reserved5;
};
+/* iSCSI Data-in PDU header */
struct iscsi_data_in_hdr {
u8 status_rsvd;
u8 reserved1;
u8 flags;
-#define ISCSI_DATA_IN_HDR_STATUS_MASK 0x1
-#define ISCSI_DATA_IN_HDR_STATUS_SHIFT 0
-#define ISCSI_DATA_IN_HDR_UNDERFLOW_MASK 0x1
-#define ISCSI_DATA_IN_HDR_UNDERFLOW_SHIFT 1
-#define ISCSI_DATA_IN_HDR_OVERFLOW_MASK 0x1
-#define ISCSI_DATA_IN_HDR_OVERFLOW_SHIFT 2
-#define ISCSI_DATA_IN_HDR_RSRV_MASK 0x7
-#define ISCSI_DATA_IN_HDR_RSRV_SHIFT 3
-#define ISCSI_DATA_IN_HDR_ACK_MASK 0x1
-#define ISCSI_DATA_IN_HDR_ACK_SHIFT 6
-#define ISCSI_DATA_IN_HDR_FINAL_MASK 0x1
-#define ISCSI_DATA_IN_HDR_FINAL_SHIFT 7
+#define ISCSI_DATA_IN_HDR_STATUS_MASK 0x1
+#define ISCSI_DATA_IN_HDR_STATUS_SHIFT 0
+#define ISCSI_DATA_IN_HDR_UNDERFLOW_MASK 0x1
+#define ISCSI_DATA_IN_HDR_UNDERFLOW_SHIFT 1
+#define ISCSI_DATA_IN_HDR_OVERFLOW_MASK 0x1
+#define ISCSI_DATA_IN_HDR_OVERFLOW_SHIFT 2
+#define ISCSI_DATA_IN_HDR_RSRV_MASK 0x7
+#define ISCSI_DATA_IN_HDR_RSRV_SHIFT 3
+#define ISCSI_DATA_IN_HDR_ACK_MASK 0x1
+#define ISCSI_DATA_IN_HDR_ACK_SHIFT 6
+#define ISCSI_DATA_IN_HDR_FINAL_MASK 0x1
+#define ISCSI_DATA_IN_HDR_FINAL_SHIFT 7
u8 opcode;
__le32 reserved2;
struct regpair lun;
@@ -397,6 +404,7 @@ struct iscsi_data_in_hdr {
__le32 residual_count;
};
+/* iSCSI R2T PDU header */
struct iscsi_r2t_hdr {
u8 reserved0[3];
u8 opcode;
@@ -412,13 +420,14 @@ struct iscsi_r2t_hdr {
__le32 desired_data_trns_len;
};
+/* iSCSI NOP-out PDU header */
struct iscsi_nop_out_hdr {
__le16 reserved1;
u8 flags_attr;
-#define ISCSI_NOP_OUT_HDR_RSRV_MASK 0x7F
-#define ISCSI_NOP_OUT_HDR_RSRV_SHIFT 0
-#define ISCSI_NOP_OUT_HDR_CONST1_MASK 0x1
-#define ISCSI_NOP_OUT_HDR_CONST1_SHIFT 7
+#define ISCSI_NOP_OUT_HDR_RSRV_MASK 0x7F
+#define ISCSI_NOP_OUT_HDR_RSRV_SHIFT 0
+#define ISCSI_NOP_OUT_HDR_CONST1_MASK 0x1
+#define ISCSI_NOP_OUT_HDR_CONST1_SHIFT 7
u8 opcode;
__le32 reserved2;
struct regpair lun;
@@ -432,19 +441,20 @@ struct iscsi_nop_out_hdr {
__le32 reserved6;
};
+/* iSCSI NOP-in PDU header */
struct iscsi_nop_in_hdr {
__le16 reserved0;
u8 flags_attr;
-#define ISCSI_NOP_IN_HDR_RSRV_MASK 0x7F
-#define ISCSI_NOP_IN_HDR_RSRV_SHIFT 0
-#define ISCSI_NOP_IN_HDR_CONST1_MASK 0x1
-#define ISCSI_NOP_IN_HDR_CONST1_SHIFT 7
+#define ISCSI_NOP_IN_HDR_RSRV_MASK 0x7F
+#define ISCSI_NOP_IN_HDR_RSRV_SHIFT 0
+#define ISCSI_NOP_IN_HDR_CONST1_MASK 0x1
+#define ISCSI_NOP_IN_HDR_CONST1_SHIFT 7
u8 opcode;
__le32 hdr_second_dword;
-#define ISCSI_NOP_IN_HDR_DATA_SEG_LEN_MASK 0xFFFFFF
-#define ISCSI_NOP_IN_HDR_DATA_SEG_LEN_SHIFT 0
-#define ISCSI_NOP_IN_HDR_TOTAL_AHS_LEN_MASK 0xFF
-#define ISCSI_NOP_IN_HDR_TOTAL_AHS_LEN_SHIFT 24
+#define ISCSI_NOP_IN_HDR_DATA_SEG_LEN_MASK 0xFFFFFF
+#define ISCSI_NOP_IN_HDR_DATA_SEG_LEN_SHIFT 0
+#define ISCSI_NOP_IN_HDR_TOTAL_AHS_LEN_MASK 0xFF
+#define ISCSI_NOP_IN_HDR_TOTAL_AHS_LEN_SHIFT 24
struct regpair lun;
__le32 itt;
__le32 ttt;
@@ -456,26 +466,27 @@ struct iscsi_nop_in_hdr {
__le32 reserved7;
};
+/* iSCSI Login Response PDU header */
struct iscsi_login_response_hdr {
u8 version_active;
u8 version_max;
u8 flags_attr;
-#define ISCSI_LOGIN_RESPONSE_HDR_NSG_MASK 0x3
-#define ISCSI_LOGIN_RESPONSE_HDR_NSG_SHIFT 0
-#define ISCSI_LOGIN_RESPONSE_HDR_CSG_MASK 0x3
-#define ISCSI_LOGIN_RESPONSE_HDR_CSG_SHIFT 2
-#define ISCSI_LOGIN_RESPONSE_HDR_RSRV_MASK 0x3
-#define ISCSI_LOGIN_RESPONSE_HDR_RSRV_SHIFT 4
-#define ISCSI_LOGIN_RESPONSE_HDR_C_MASK 0x1
-#define ISCSI_LOGIN_RESPONSE_HDR_C_SHIFT 6
-#define ISCSI_LOGIN_RESPONSE_HDR_T_MASK 0x1
-#define ISCSI_LOGIN_RESPONSE_HDR_T_SHIFT 7
+#define ISCSI_LOGIN_RESPONSE_HDR_NSG_MASK 0x3
+#define ISCSI_LOGIN_RESPONSE_HDR_NSG_SHIFT 0
+#define ISCSI_LOGIN_RESPONSE_HDR_CSG_MASK 0x3
+#define ISCSI_LOGIN_RESPONSE_HDR_CSG_SHIFT 2
+#define ISCSI_LOGIN_RESPONSE_HDR_RSRV_MASK 0x3
+#define ISCSI_LOGIN_RESPONSE_HDR_RSRV_SHIFT 4
+#define ISCSI_LOGIN_RESPONSE_HDR_C_MASK 0x1
+#define ISCSI_LOGIN_RESPONSE_HDR_C_SHIFT 6
+#define ISCSI_LOGIN_RESPONSE_HDR_T_MASK 0x1
+#define ISCSI_LOGIN_RESPONSE_HDR_T_SHIFT 7
u8 opcode;
__le32 hdr_second_dword;
-#define ISCSI_LOGIN_RESPONSE_HDR_DATA_SEG_LEN_MASK 0xFFFFFF
-#define ISCSI_LOGIN_RESPONSE_HDR_DATA_SEG_LEN_SHIFT 0
-#define ISCSI_LOGIN_RESPONSE_HDR_TOTAL_AHS_LEN_MASK 0xFF
-#define ISCSI_LOGIN_RESPONSE_HDR_TOTAL_AHS_LEN_SHIFT 24
+#define ISCSI_LOGIN_RESPONSE_HDR_DATA_SEG_LEN_MASK 0xFFFFFF
+#define ISCSI_LOGIN_RESPONSE_HDR_DATA_SEG_LEN_SHIFT 0
+#define ISCSI_LOGIN_RESPONSE_HDR_TOTAL_AHS_LEN_MASK 0xFF
+#define ISCSI_LOGIN_RESPONSE_HDR_TOTAL_AHS_LEN_SHIFT 24
__le32 isid_tabc;
__le16 tsih;
__le16 isid_d;
@@ -490,16 +501,17 @@ struct iscsi_login_response_hdr {
__le32 reserved4[2];
};
+/* iSCSI Logout Response PDU header */
struct iscsi_logout_response_hdr {
u8 reserved1;
u8 response;
u8 flags;
u8 opcode;
__le32 hdr_second_dword;
-#define ISCSI_LOGOUT_RESPONSE_HDR_DATA_SEG_LEN_MASK 0xFFFFFF
-#define ISCSI_LOGOUT_RESPONSE_HDR_DATA_SEG_LEN_SHIFT 0
-#define ISCSI_LOGOUT_RESPONSE_HDR_TOTAL_AHS_LEN_MASK 0xFF
-#define ISCSI_LOGOUT_RESPONSE_HDR_TOTAL_AHS_LEN_SHIFT 24
+#define ISCSI_LOGOUT_RESPONSE_HDR_DATA_SEG_LEN_MASK 0xFFFFFF
+#define ISCSI_LOGOUT_RESPONSE_HDR_DATA_SEG_LEN_SHIFT 0
+#define ISCSI_LOGOUT_RESPONSE_HDR_TOTAL_AHS_LEN_MASK 0xFF
+#define ISCSI_LOGOUT_RESPONSE_HDR_TOTAL_AHS_LEN_SHIFT 24
__le32 reserved2[2];
__le32 itt;
__le32 reserved3;
@@ -512,21 +524,22 @@ struct iscsi_logout_response_hdr {
__le32 reserved5[1];
};
+/* iSCSI Text Request PDU header */
struct iscsi_text_request_hdr {
__le16 reserved0;
u8 flags_attr;
-#define ISCSI_TEXT_REQUEST_HDR_RSRV_MASK 0x3F
-#define ISCSI_TEXT_REQUEST_HDR_RSRV_SHIFT 0
-#define ISCSI_TEXT_REQUEST_HDR_C_MASK 0x1
-#define ISCSI_TEXT_REQUEST_HDR_C_SHIFT 6
-#define ISCSI_TEXT_REQUEST_HDR_F_MASK 0x1
-#define ISCSI_TEXT_REQUEST_HDR_F_SHIFT 7
+#define ISCSI_TEXT_REQUEST_HDR_RSRV_MASK 0x3F
+#define ISCSI_TEXT_REQUEST_HDR_RSRV_SHIFT 0
+#define ISCSI_TEXT_REQUEST_HDR_C_MASK 0x1
+#define ISCSI_TEXT_REQUEST_HDR_C_SHIFT 6
+#define ISCSI_TEXT_REQUEST_HDR_F_MASK 0x1
+#define ISCSI_TEXT_REQUEST_HDR_F_SHIFT 7
u8 opcode;
__le32 hdr_second_dword;
-#define ISCSI_TEXT_REQUEST_HDR_DATA_SEG_LEN_MASK 0xFFFFFF
-#define ISCSI_TEXT_REQUEST_HDR_DATA_SEG_LEN_SHIFT 0
-#define ISCSI_TEXT_REQUEST_HDR_TOTAL_AHS_LEN_MASK 0xFF
-#define ISCSI_TEXT_REQUEST_HDR_TOTAL_AHS_LEN_SHIFT 24
+#define ISCSI_TEXT_REQUEST_HDR_DATA_SEG_LEN_MASK 0xFFFFFF
+#define ISCSI_TEXT_REQUEST_HDR_DATA_SEG_LEN_SHIFT 0
+#define ISCSI_TEXT_REQUEST_HDR_TOTAL_AHS_LEN_MASK 0xFF
+#define ISCSI_TEXT_REQUEST_HDR_TOTAL_AHS_LEN_SHIFT 24
struct regpair lun;
__le32 itt;
__le32 ttt;
@@ -535,21 +548,22 @@ struct iscsi_text_request_hdr {
__le32 reserved4[4];
};
+/* iSCSI Text Response PDU header */
struct iscsi_text_response_hdr {
__le16 reserved1;
u8 flags;
-#define ISCSI_TEXT_RESPONSE_HDR_RSRV_MASK 0x3F
-#define ISCSI_TEXT_RESPONSE_HDR_RSRV_SHIFT 0
-#define ISCSI_TEXT_RESPONSE_HDR_C_MASK 0x1
-#define ISCSI_TEXT_RESPONSE_HDR_C_SHIFT 6
-#define ISCSI_TEXT_RESPONSE_HDR_F_MASK 0x1
-#define ISCSI_TEXT_RESPONSE_HDR_F_SHIFT 7
+#define ISCSI_TEXT_RESPONSE_HDR_RSRV_MASK 0x3F
+#define ISCSI_TEXT_RESPONSE_HDR_RSRV_SHIFT 0
+#define ISCSI_TEXT_RESPONSE_HDR_C_MASK 0x1
+#define ISCSI_TEXT_RESPONSE_HDR_C_SHIFT 6
+#define ISCSI_TEXT_RESPONSE_HDR_F_MASK 0x1
+#define ISCSI_TEXT_RESPONSE_HDR_F_SHIFT 7
u8 opcode;
__le32 hdr_second_dword;
-#define ISCSI_TEXT_RESPONSE_HDR_DATA_SEG_LEN_MASK 0xFFFFFF
-#define ISCSI_TEXT_RESPONSE_HDR_DATA_SEG_LEN_SHIFT 0
-#define ISCSI_TEXT_RESPONSE_HDR_TOTAL_AHS_LEN_MASK 0xFF
-#define ISCSI_TEXT_RESPONSE_HDR_TOTAL_AHS_LEN_SHIFT 24
+#define ISCSI_TEXT_RESPONSE_HDR_DATA_SEG_LEN_MASK 0xFFFFFF
+#define ISCSI_TEXT_RESPONSE_HDR_DATA_SEG_LEN_SHIFT 0
+#define ISCSI_TEXT_RESPONSE_HDR_TOTAL_AHS_LEN_MASK 0xFF
+#define ISCSI_TEXT_RESPONSE_HDR_TOTAL_AHS_LEN_SHIFT 24
struct regpair lun;
__le32 itt;
__le32 ttt;
@@ -559,15 +573,16 @@ struct iscsi_text_response_hdr {
__le32 reserved4[3];
};
+/* iSCSI TMF Request PDU header */
struct iscsi_tmf_request_hdr {
__le16 reserved0;
u8 function;
u8 opcode;
__le32 hdr_second_dword;
-#define ISCSI_TMF_REQUEST_HDR_DATA_SEG_LEN_MASK 0xFFFFFF
-#define ISCSI_TMF_REQUEST_HDR_DATA_SEG_LEN_SHIFT 0
-#define ISCSI_TMF_REQUEST_HDR_TOTAL_AHS_LEN_MASK 0xFF
-#define ISCSI_TMF_REQUEST_HDR_TOTAL_AHS_LEN_SHIFT 24
+#define ISCSI_TMF_REQUEST_HDR_DATA_SEG_LEN_MASK 0xFFFFFF
+#define ISCSI_TMF_REQUEST_HDR_DATA_SEG_LEN_SHIFT 0
+#define ISCSI_TMF_REQUEST_HDR_TOTAL_AHS_LEN_MASK 0xFF
+#define ISCSI_TMF_REQUEST_HDR_TOTAL_AHS_LEN_SHIFT 24
struct regpair lun;
__le32 itt;
__le32 rtt;
@@ -584,10 +599,10 @@ struct iscsi_tmf_response_hdr {
u8 hdr_flags;
u8 opcode;
__le32 hdr_second_dword;
-#define ISCSI_TMF_RESPONSE_HDR_DATA_SEG_LEN_MASK 0xFFFFFF
-#define ISCSI_TMF_RESPONSE_HDR_DATA_SEG_LEN_SHIFT 0
-#define ISCSI_TMF_RESPONSE_HDR_TOTAL_AHS_LEN_MASK 0xFF
-#define ISCSI_TMF_RESPONSE_HDR_TOTAL_AHS_LEN_SHIFT 24
+#define ISCSI_TMF_RESPONSE_HDR_DATA_SEG_LEN_MASK 0xFFFFFF
+#define ISCSI_TMF_RESPONSE_HDR_DATA_SEG_LEN_SHIFT 0
+#define ISCSI_TMF_RESPONSE_HDR_TOTAL_AHS_LEN_MASK 0xFF
+#define ISCSI_TMF_RESPONSE_HDR_TOTAL_AHS_LEN_SHIFT 24
struct regpair reserved0;
__le32 itt;
__le32 reserved1;
@@ -597,16 +612,17 @@ struct iscsi_tmf_response_hdr {
__le32 reserved4[3];
};
+/* iSCSI Response PDU header */
struct iscsi_response_hdr {
u8 hdr_status;
u8 hdr_response;
u8 hdr_flags;
u8 opcode;
__le32 hdr_second_dword;
-#define ISCSI_RESPONSE_HDR_DATA_SEG_LEN_MASK 0xFFFFFF
-#define ISCSI_RESPONSE_HDR_DATA_SEG_LEN_SHIFT 0
-#define ISCSI_RESPONSE_HDR_TOTAL_AHS_LEN_MASK 0xFF
-#define ISCSI_RESPONSE_HDR_TOTAL_AHS_LEN_SHIFT 24
+#define ISCSI_RESPONSE_HDR_DATA_SEG_LEN_MASK 0xFFFFFF
+#define ISCSI_RESPONSE_HDR_DATA_SEG_LEN_SHIFT 0
+#define ISCSI_RESPONSE_HDR_TOTAL_AHS_LEN_MASK 0xFF
+#define ISCSI_RESPONSE_HDR_TOTAL_AHS_LEN_SHIFT 24
struct regpair lun;
__le32 itt;
__le32 snack_tag;
@@ -618,16 +634,17 @@ struct iscsi_response_hdr {
__le32 residual_count;
};
+/* iSCSI Reject PDU header */
struct iscsi_reject_hdr {
u8 reserved4;
u8 hdr_reason;
u8 hdr_flags;
u8 opcode;
__le32 hdr_second_dword;
-#define ISCSI_REJECT_HDR_DATA_SEG_LEN_MASK 0xFFFFFF
-#define ISCSI_REJECT_HDR_DATA_SEG_LEN_SHIFT 0
-#define ISCSI_REJECT_HDR_TOTAL_AHS_LEN_MASK 0xFF
-#define ISCSI_REJECT_HDR_TOTAL_AHS_LEN_SHIFT 24
+#define ISCSI_REJECT_HDR_DATA_SEG_LEN_MASK 0xFFFFFF
+#define ISCSI_REJECT_HDR_DATA_SEG_LEN_SHIFT 0
+#define ISCSI_REJECT_HDR_TOTAL_AHS_LEN_MASK 0xFF
+#define ISCSI_REJECT_HDR_TOTAL_AHS_LEN_SHIFT 24
struct regpair reserved0;
__le32 all_ones;
__le32 reserved2;
@@ -638,6 +655,35 @@ struct iscsi_reject_hdr {
__le32 reserved3[2];
};
+/* iSCSI Asynchronous Message PDU header */
+struct iscsi_async_msg_hdr {
+ __le16 reserved0;
+ u8 flags_attr;
+#define ISCSI_ASYNC_MSG_HDR_RSRV_MASK 0x7F
+#define ISCSI_ASYNC_MSG_HDR_RSRV_SHIFT 0
+#define ISCSI_ASYNC_MSG_HDR_CONST1_MASK 0x1
+#define ISCSI_ASYNC_MSG_HDR_CONST1_SHIFT 7
+ u8 opcode;
+ __le32 hdr_second_dword;
+#define ISCSI_ASYNC_MSG_HDR_DATA_SEG_LEN_MASK 0xFFFFFF
+#define ISCSI_ASYNC_MSG_HDR_DATA_SEG_LEN_SHIFT 0
+#define ISCSI_ASYNC_MSG_HDR_TOTAL_AHS_LEN_MASK 0xFF
+#define ISCSI_ASYNC_MSG_HDR_TOTAL_AHS_LEN_SHIFT 24
+ struct regpair lun;
+ __le32 all_ones;
+ __le32 reserved1;
+ __le32 stat_sn;
+ __le32 exp_cmd_sn;
+ __le32 max_cmd_sn;
+ __le16 param1_rsrv;
+ u8 async_vcode;
+ u8 async_event;
+ __le16 param3_rsrv;
+ __le16 param2_rsrv;
+ __le32 reserved7;
+};
+
+/* PDU header part of Ystorm task context */
union iscsi_task_hdr {
struct iscsi_common_hdr common;
struct data_hdr data;
@@ -661,6 +707,336 @@ union iscsi_task_hdr {
struct iscsi_async_msg_hdr async_msg;
};
+/* The iscsi storm task context of Ystorm */
+struct ystorm_iscsi_task_st_ctx {
+ struct ystorm_iscsi_task_state state;
+ struct ystorm_iscsi_task_rxmit_opt rxmit_opt;
+ union iscsi_task_hdr pdu_hdr;
+};
+
+struct ystorm_iscsi_task_ag_ctx {
+ u8 reserved;
+ u8 byte1;
+ __le16 word0;
+ u8 flags0;
+#define YSTORM_ISCSI_TASK_AG_CTX_NIBBLE0_MASK 0xF
+#define YSTORM_ISCSI_TASK_AG_CTX_NIBBLE0_SHIFT 0
+#define YSTORM_ISCSI_TASK_AG_CTX_BIT0_MASK 0x1
+#define YSTORM_ISCSI_TASK_AG_CTX_BIT0_SHIFT 4
+#define YSTORM_ISCSI_TASK_AG_CTX_BIT1_MASK 0x1
+#define YSTORM_ISCSI_TASK_AG_CTX_BIT1_SHIFT 5
+#define YSTORM_ISCSI_TASK_AG_CTX_VALID_MASK 0x1
+#define YSTORM_ISCSI_TASK_AG_CTX_VALID_SHIFT 6
+#define YSTORM_ISCSI_TASK_AG_CTX_TTT_VALID_MASK 0x1 /* bit3 */
+#define YSTORM_ISCSI_TASK_AG_CTX_TTT_VALID_SHIFT 7
+ u8 flags1;
+#define YSTORM_ISCSI_TASK_AG_CTX_CF0_MASK 0x3
+#define YSTORM_ISCSI_TASK_AG_CTX_CF0_SHIFT 0
+#define YSTORM_ISCSI_TASK_AG_CTX_CF1_MASK 0x3
+#define YSTORM_ISCSI_TASK_AG_CTX_CF1_SHIFT 2
+#define YSTORM_ISCSI_TASK_AG_CTX_CF2SPECIAL_MASK 0x3
+#define YSTORM_ISCSI_TASK_AG_CTX_CF2SPECIAL_SHIFT 4
+#define YSTORM_ISCSI_TASK_AG_CTX_CF0EN_MASK 0x1
+#define YSTORM_ISCSI_TASK_AG_CTX_CF0EN_SHIFT 6
+#define YSTORM_ISCSI_TASK_AG_CTX_CF1EN_MASK 0x1
+#define YSTORM_ISCSI_TASK_AG_CTX_CF1EN_SHIFT 7
+ u8 flags2;
+#define YSTORM_ISCSI_TASK_AG_CTX_BIT4_MASK 0x1
+#define YSTORM_ISCSI_TASK_AG_CTX_BIT4_SHIFT 0
+#define YSTORM_ISCSI_TASK_AG_CTX_RULE0EN_MASK 0x1
+#define YSTORM_ISCSI_TASK_AG_CTX_RULE0EN_SHIFT 1
+#define YSTORM_ISCSI_TASK_AG_CTX_RULE1EN_MASK 0x1
+#define YSTORM_ISCSI_TASK_AG_CTX_RULE1EN_SHIFT 2
+#define YSTORM_ISCSI_TASK_AG_CTX_RULE2EN_MASK 0x1
+#define YSTORM_ISCSI_TASK_AG_CTX_RULE2EN_SHIFT 3
+#define YSTORM_ISCSI_TASK_AG_CTX_RULE3EN_MASK 0x1
+#define YSTORM_ISCSI_TASK_AG_CTX_RULE3EN_SHIFT 4
+#define YSTORM_ISCSI_TASK_AG_CTX_RULE4EN_MASK 0x1
+#define YSTORM_ISCSI_TASK_AG_CTX_RULE4EN_SHIFT 5
+#define YSTORM_ISCSI_TASK_AG_CTX_RULE5EN_MASK 0x1
+#define YSTORM_ISCSI_TASK_AG_CTX_RULE5EN_SHIFT 6
+#define YSTORM_ISCSI_TASK_AG_CTX_RULE6EN_MASK 0x1
+#define YSTORM_ISCSI_TASK_AG_CTX_RULE6EN_SHIFT 7
+ u8 byte2;
+ __le32 TTT;
+ u8 byte3;
+ u8 byte4;
+ __le16 word1;
+};
+
+struct mstorm_iscsi_task_ag_ctx {
+ u8 cdu_validation;
+ u8 byte1;
+ __le16 task_cid;
+ u8 flags0;
+#define MSTORM_ISCSI_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF
+#define MSTORM_ISCSI_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0
+#define MSTORM_ISCSI_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define MSTORM_ISCSI_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4
+#define MSTORM_ISCSI_TASK_AG_CTX_CONN_CLEAR_SQ_FLAG_MASK 0x1
+#define MSTORM_ISCSI_TASK_AG_CTX_CONN_CLEAR_SQ_FLAG_SHIFT 5
+#define MSTORM_ISCSI_TASK_AG_CTX_VALID_MASK 0x1
+#define MSTORM_ISCSI_TASK_AG_CTX_VALID_SHIFT 6
+#define MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_FLAG_MASK 0x1
+#define MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_FLAG_SHIFT 7
+ u8 flags1;
+#define MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_CF_MASK 0x3
+#define MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_CF_SHIFT 0
+#define MSTORM_ISCSI_TASK_AG_CTX_CF1_MASK 0x3
+#define MSTORM_ISCSI_TASK_AG_CTX_CF1_SHIFT 2
+#define MSTORM_ISCSI_TASK_AG_CTX_CF2_MASK 0x3
+#define MSTORM_ISCSI_TASK_AG_CTX_CF2_SHIFT 4
+#define MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_CF_EN_MASK 0x1
+#define MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_CF_EN_SHIFT 6
+#define MSTORM_ISCSI_TASK_AG_CTX_CF1EN_MASK 0x1
+#define MSTORM_ISCSI_TASK_AG_CTX_CF1EN_SHIFT 7
+ u8 flags2;
+#define MSTORM_ISCSI_TASK_AG_CTX_CF2EN_MASK 0x1
+#define MSTORM_ISCSI_TASK_AG_CTX_CF2EN_SHIFT 0
+#define MSTORM_ISCSI_TASK_AG_CTX_RULE0EN_MASK 0x1
+#define MSTORM_ISCSI_TASK_AG_CTX_RULE0EN_SHIFT 1
+#define MSTORM_ISCSI_TASK_AG_CTX_RULE1EN_MASK 0x1
+#define MSTORM_ISCSI_TASK_AG_CTX_RULE1EN_SHIFT 2
+#define MSTORM_ISCSI_TASK_AG_CTX_RULE2EN_MASK 0x1
+#define MSTORM_ISCSI_TASK_AG_CTX_RULE2EN_SHIFT 3
+#define MSTORM_ISCSI_TASK_AG_CTX_RULE3EN_MASK 0x1
+#define MSTORM_ISCSI_TASK_AG_CTX_RULE3EN_SHIFT 4
+#define MSTORM_ISCSI_TASK_AG_CTX_RULE4EN_MASK 0x1
+#define MSTORM_ISCSI_TASK_AG_CTX_RULE4EN_SHIFT 5
+#define MSTORM_ISCSI_TASK_AG_CTX_RULE5EN_MASK 0x1
+#define MSTORM_ISCSI_TASK_AG_CTX_RULE5EN_SHIFT 6
+#define MSTORM_ISCSI_TASK_AG_CTX_RULE6EN_MASK 0x1
+#define MSTORM_ISCSI_TASK_AG_CTX_RULE6EN_SHIFT 7
+ u8 byte2;
+ __le32 reg0;
+ u8 byte3;
+ u8 byte4;
+ __le16 word1;
+};
+
+struct ustorm_iscsi_task_ag_ctx {
+ u8 reserved;
+ u8 state;
+ __le16 icid;
+ u8 flags0;
+#define USTORM_ISCSI_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF
+#define USTORM_ISCSI_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0
+#define USTORM_ISCSI_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define USTORM_ISCSI_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4
+#define USTORM_ISCSI_TASK_AG_CTX_CONN_CLEAR_SQ_FLAG_MASK 0x1
+#define USTORM_ISCSI_TASK_AG_CTX_CONN_CLEAR_SQ_FLAG_SHIFT 5
+#define USTORM_ISCSI_TASK_AG_CTX_HQ_SCANNED_CF_MASK 0x3
+#define USTORM_ISCSI_TASK_AG_CTX_HQ_SCANNED_CF_SHIFT 6
+ u8 flags1;
+#define USTORM_ISCSI_TASK_AG_CTX_RESERVED1_MASK 0x3
+#define USTORM_ISCSI_TASK_AG_CTX_RESERVED1_SHIFT 0
+#define USTORM_ISCSI_TASK_AG_CTX_R2T2RECV_MASK 0x3
+#define USTORM_ISCSI_TASK_AG_CTX_R2T2RECV_SHIFT 2
+#define USTORM_ISCSI_TASK_AG_CTX_CF3_MASK 0x3
+#define USTORM_ISCSI_TASK_AG_CTX_CF3_SHIFT 4
+#define USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_CF_MASK 0x3
+#define USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_CF_SHIFT 6
+ u8 flags2;
+#define USTORM_ISCSI_TASK_AG_CTX_HQ_SCANNED_CF_EN_MASK 0x1
+#define USTORM_ISCSI_TASK_AG_CTX_HQ_SCANNED_CF_EN_SHIFT 0
+#define USTORM_ISCSI_TASK_AG_CTX_DISABLE_DATA_ACKED_MASK 0x1
+#define USTORM_ISCSI_TASK_AG_CTX_DISABLE_DATA_ACKED_SHIFT 1
+#define USTORM_ISCSI_TASK_AG_CTX_R2T2RECV_EN_MASK 0x1
+#define USTORM_ISCSI_TASK_AG_CTX_R2T2RECV_EN_SHIFT 2
+#define USTORM_ISCSI_TASK_AG_CTX_CF3EN_MASK 0x1
+#define USTORM_ISCSI_TASK_AG_CTX_CF3EN_SHIFT 3
+#define USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_CF_EN_MASK 0x1
+#define USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_CF_EN_SHIFT 4
+#define USTORM_ISCSI_TASK_AG_CTX_CMP_DATA_TOTAL_EXP_EN_MASK 0x1
+#define USTORM_ISCSI_TASK_AG_CTX_CMP_DATA_TOTAL_EXP_EN_SHIFT 5
+#define USTORM_ISCSI_TASK_AG_CTX_RULE1EN_MASK 0x1
+#define USTORM_ISCSI_TASK_AG_CTX_RULE1EN_SHIFT 6
+#define USTORM_ISCSI_TASK_AG_CTX_CMP_CONT_RCV_EXP_EN_MASK 0x1
+#define USTORM_ISCSI_TASK_AG_CTX_CMP_CONT_RCV_EXP_EN_SHIFT 7
+ u8 flags3;
+#define USTORM_ISCSI_TASK_AG_CTX_RULE3EN_MASK 0x1
+#define USTORM_ISCSI_TASK_AG_CTX_RULE3EN_SHIFT 0
+#define USTORM_ISCSI_TASK_AG_CTX_RULE4EN_MASK 0x1
+#define USTORM_ISCSI_TASK_AG_CTX_RULE4EN_SHIFT 1
+#define USTORM_ISCSI_TASK_AG_CTX_RULE5EN_MASK 0x1
+#define USTORM_ISCSI_TASK_AG_CTX_RULE5EN_SHIFT 2
+#define USTORM_ISCSI_TASK_AG_CTX_RULE6EN_MASK 0x1
+#define USTORM_ISCSI_TASK_AG_CTX_RULE6EN_SHIFT 3
+#define USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_TYPE_MASK 0xF
+#define USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_TYPE_SHIFT 4
+ __le32 dif_err_intervals;
+ __le32 dif_error_1st_interval;
+ __le32 rcv_cont_len;
+ __le32 exp_cont_len;
+ __le32 total_data_acked;
+ __le32 exp_data_acked;
+ u8 byte2;
+ u8 byte3;
+ __le16 word1;
+ __le16 next_tid;
+ __le16 word3;
+ __le32 hdr_residual_count;
+ __le32 exp_r2t_sn;
+};
+
+/* The iscsi storm task context of Mstorm */
+struct mstorm_iscsi_task_st_ctx {
+ struct scsi_cached_sges data_desc;
+ struct scsi_sgl_params sgl_params;
+ __le32 rem_task_size;
+ __le32 data_buffer_offset;
+ u8 task_type;
+ struct iscsi_dif_flags dif_flags;
+ __le16 dif_task_icid;
+ struct regpair sense_db;
+ __le32 expected_itt;
+ __le32 reserved1;
+};
+
+struct iscsi_reg1 {
+ __le32 reg1_map;
+#define ISCSI_REG1_NUM_SGES_MASK 0xF
+#define ISCSI_REG1_NUM_SGES_SHIFT 0
+#define ISCSI_REG1_RESERVED1_MASK 0xFFFFFFF
+#define ISCSI_REG1_RESERVED1_SHIFT 4
+};
+
+struct tqe_opaque {
+ __le16 opaque[2];
+};
+
+/* The iscsi storm task context of Ustorm */
+struct ustorm_iscsi_task_st_ctx {
+ __le32 rem_rcv_len;
+ __le32 exp_data_transfer_len;
+ __le32 exp_data_sn;
+ struct regpair lun;
+ struct iscsi_reg1 reg1;
+ u8 flags2;
+#define USTORM_ISCSI_TASK_ST_CTX_AHS_EXIST_MASK 0x1
+#define USTORM_ISCSI_TASK_ST_CTX_AHS_EXIST_SHIFT 0
+#define USTORM_ISCSI_TASK_ST_CTX_RESERVED1_MASK 0x7F
+#define USTORM_ISCSI_TASK_ST_CTX_RESERVED1_SHIFT 1
+ struct iscsi_dif_flags dif_flags;
+ __le16 reserved3;
+ struct tqe_opaque tqe_opaque_list;
+ __le32 reserved5;
+ __le32 reserved6;
+ __le32 reserved7;
+ u8 task_type;
+ u8 error_flags;
+#define USTORM_ISCSI_TASK_ST_CTX_DATA_DIGEST_ERROR_MASK 0x1
+#define USTORM_ISCSI_TASK_ST_CTX_DATA_DIGEST_ERROR_SHIFT 0
+#define USTORM_ISCSI_TASK_ST_CTX_DATA_TRUNCATED_ERROR_MASK 0x1
+#define USTORM_ISCSI_TASK_ST_CTX_DATA_TRUNCATED_ERROR_SHIFT 1
+#define USTORM_ISCSI_TASK_ST_CTX_UNDER_RUN_ERROR_MASK 0x1
+#define USTORM_ISCSI_TASK_ST_CTX_UNDER_RUN_ERROR_SHIFT 2
+#define USTORM_ISCSI_TASK_ST_CTX_RESERVED8_MASK 0x1F
+#define USTORM_ISCSI_TASK_ST_CTX_RESERVED8_SHIFT 3
+ u8 flags;
+#define USTORM_ISCSI_TASK_ST_CTX_CQE_WRITE_MASK 0x3
+#define USTORM_ISCSI_TASK_ST_CTX_CQE_WRITE_SHIFT 0
+#define USTORM_ISCSI_TASK_ST_CTX_LOCAL_COMP_MASK 0x1
+#define USTORM_ISCSI_TASK_ST_CTX_LOCAL_COMP_SHIFT 2
+#define USTORM_ISCSI_TASK_ST_CTX_Q0_R2TQE_WRITE_MASK 0x1
+#define USTORM_ISCSI_TASK_ST_CTX_Q0_R2TQE_WRITE_SHIFT 3
+#define USTORM_ISCSI_TASK_ST_CTX_TOTAL_DATA_ACKED_DONE_MASK 0x1
+#define USTORM_ISCSI_TASK_ST_CTX_TOTAL_DATA_ACKED_DONE_SHIFT 4
+#define USTORM_ISCSI_TASK_ST_CTX_HQ_SCANNED_DONE_MASK 0x1
+#define USTORM_ISCSI_TASK_ST_CTX_HQ_SCANNED_DONE_SHIFT 5
+#define USTORM_ISCSI_TASK_ST_CTX_R2T2RECV_DONE_MASK 0x1
+#define USTORM_ISCSI_TASK_ST_CTX_R2T2RECV_DONE_SHIFT 6
+#define USTORM_ISCSI_TASK_ST_CTX_RESERVED0_MASK 0x1
+#define USTORM_ISCSI_TASK_ST_CTX_RESERVED0_SHIFT 7
+ u8 cq_rss_number;
+};
+
+/* iscsi task context */
+struct iscsi_task_context {
+ struct ystorm_iscsi_task_st_ctx ystorm_st_context;
+ struct ystorm_iscsi_task_ag_ctx ystorm_ag_context;
+ struct regpair ystorm_ag_padding[2];
+ struct tdif_task_context tdif_context;
+ struct mstorm_iscsi_task_ag_ctx mstorm_ag_context;
+ struct regpair mstorm_ag_padding[2];
+ struct ustorm_iscsi_task_ag_ctx ustorm_ag_context;
+ struct mstorm_iscsi_task_st_ctx mstorm_st_context;
+ struct ustorm_iscsi_task_st_ctx ustorm_st_context;
+ struct rdif_task_context rdif_context;
+};
+
+/* iSCSI connection offload params passed by driver to FW in ISCSI offload
+ * ramrod.
+ */
+struct iscsi_conn_offload_params {
+ struct regpair sq_pbl_addr;
+ struct regpair r2tq_pbl_addr;
+ struct regpair xhq_pbl_addr;
+ struct regpair uhq_pbl_addr;
+ __le16 physical_q0;
+ __le16 physical_q1;
+ u8 flags;
+#define ISCSI_CONN_OFFLOAD_PARAMS_TCP_ON_CHIP_1B_MASK 0x1
+#define ISCSI_CONN_OFFLOAD_PARAMS_TCP_ON_CHIP_1B_SHIFT 0
+#define ISCSI_CONN_OFFLOAD_PARAMS_TARGET_MODE_MASK 0x1
+#define ISCSI_CONN_OFFLOAD_PARAMS_TARGET_MODE_SHIFT 1
+#define ISCSI_CONN_OFFLOAD_PARAMS_RESTRICTED_MODE_MASK 0x1
+#define ISCSI_CONN_OFFLOAD_PARAMS_RESTRICTED_MODE_SHIFT 2
+#define ISCSI_CONN_OFFLOAD_PARAMS_RESERVED1_MASK 0x1F
+#define ISCSI_CONN_OFFLOAD_PARAMS_RESERVED1_SHIFT 3
+ u8 default_cq;
+ __le16 reserved0;
+ __le32 stat_sn;
+ __le32 initial_ack;
+};
+
+/* iSCSI connection statistics */
+struct iscsi_conn_stats_params {
+ struct regpair iscsi_tcp_tx_packets_cnt;
+ struct regpair iscsi_tcp_tx_bytes_cnt;
+ struct regpair iscsi_tcp_tx_rxmit_cnt;
+ struct regpair iscsi_tcp_rx_packets_cnt;
+ struct regpair iscsi_tcp_rx_bytes_cnt;
+ struct regpair iscsi_tcp_rx_dup_ack_cnt;
+ __le32 iscsi_tcp_rx_chksum_err_cnt;
+ __le32 reserved;
+};
+
+
+/* iSCSI connection update params passed by driver to FW in ISCSI update
+ *ramrod.
+ */
+struct iscsi_conn_update_ramrod_params {
+ __le16 reserved0;
+ __le16 conn_id;
+ __le32 reserved1;
+ u8 flags;
+#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_HD_EN_MASK 0x1
+#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_HD_EN_SHIFT 0
+#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_DD_EN_MASK 0x1
+#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_DD_EN_SHIFT 1
+#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_INITIAL_R2T_MASK 0x1
+#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_INITIAL_R2T_SHIFT 2
+#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_IMMEDIATE_DATA_MASK 0x1
+#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_IMMEDIATE_DATA_SHIFT 3
+#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_DIF_BLOCK_SIZE_MASK 0x1
+#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_DIF_BLOCK_SIZE_SHIFT 4
+#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_DIF_ON_HOST_EN_MASK 0x1
+#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_DIF_ON_HOST_EN_SHIFT 5
+#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_DIF_ON_IMM_EN_MASK 0x1
+#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_DIF_ON_IMM_EN_SHIFT 6
+#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_LUN_MAPPER_EN_MASK 0x1
+#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_LUN_MAPPER_EN_SHIFT 7
+ u8 reserved3[3];
+ __le32 max_seq_size;
+ __le32 max_send_pdu_length;
+ __le32 max_recv_pdu_length;
+ __le32 first_seq_length;
+ __le32 exp_stat_sn;
+ union dif_configuration_params dif_on_imme_params;
+};
+
+/* iSCSI CQ element */
struct iscsi_cqe_common {
__le16 conn_id;
u8 cqe_type;
@@ -669,6 +1045,7 @@ struct iscsi_cqe_common {
union iscsi_task_hdr iscsi_hdr;
};
+/* iSCSI CQ element */
struct iscsi_cqe_solicited {
__le16 conn_id;
u8 cqe_type;
@@ -678,10 +1055,11 @@ struct iscsi_cqe_solicited {
u8 fw_dbg_field;
u8 caused_conn_err;
u8 reserved0[3];
- __le32 reserved1[1];
+ __le32 data_truncated_bytes;
union iscsi_task_hdr iscsi_hdr;
};
+/* iSCSI CQ element */
struct iscsi_cqe_unsolicited {
__le16 conn_id;
u8 cqe_type;
@@ -689,16 +1067,19 @@ struct iscsi_cqe_unsolicited {
__le16 reserved0;
u8 reserved1;
u8 unsol_cqe_type;
- struct regpair rqe_opaque;
+ __le16 rqe_opaque;
+ __le16 reserved2[3];
union iscsi_task_hdr iscsi_hdr;
};
+/* iSCSI CQ element */
union iscsi_cqe {
struct iscsi_cqe_common cqe_common;
struct iscsi_cqe_solicited cqe_solicited;
struct iscsi_cqe_unsolicited cqe_unsolicited;
};
+/* iSCSI CQE type */
enum iscsi_cqes_type {
ISCSI_CQE_TYPE_SOLICITED = 1,
ISCSI_CQE_TYPE_UNSOLICITED,
@@ -708,6 +1089,7 @@ enum iscsi_cqes_type {
MAX_ISCSI_CQES_TYPE
};
+/* iSCSI CQE type */
enum iscsi_cqe_unsolicited_type {
ISCSI_CQE_UNSOLICITED_NONE,
ISCSI_CQE_UNSOLICITED_SINGLE,
@@ -717,37 +1099,28 @@ enum iscsi_cqe_unsolicited_type {
MAX_ISCSI_CQE_UNSOLICITED_TYPE
};
-
+/* iscsi debug modes */
struct iscsi_debug_modes {
u8 flags;
-#define ISCSI_DEBUG_MODES_ASSERT_IF_RX_CONN_ERROR_MASK 0x1
-#define ISCSI_DEBUG_MODES_ASSERT_IF_RX_CONN_ERROR_SHIFT 0
-#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_RESET_MASK 0x1
-#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_RESET_SHIFT 1
-#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_FIN_MASK 0x1
-#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_FIN_SHIFT 2
-#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_CLEANUP_MASK 0x1
-#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_CLEANUP_SHIFT 3
-#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_REJECT_OR_ASYNC_MASK 0x1
-#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_REJECT_OR_ASYNC_SHIFT 4
-#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_NOP_MASK 0x1
-#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_NOP_SHIFT 5
-#define ISCSI_DEBUG_MODES_ASSERT_IF_DATA_DIGEST_ERROR_MASK 0x1
-#define ISCSI_DEBUG_MODES_ASSERT_IF_DATA_DIGEST_ERROR_SHIFT 6
-#define ISCSI_DEBUG_MODES_ASSERT_IF_DIF_ERROR_MASK 0x1
-#define ISCSI_DEBUG_MODES_ASSERT_IF_DIF_ERROR_SHIFT 7
-};
-
-struct iscsi_dif_flags {
- u8 flags;
-#define ISCSI_DIF_FLAGS_PROT_INTERVAL_SIZE_LOG_MASK 0xF
-#define ISCSI_DIF_FLAGS_PROT_INTERVAL_SIZE_LOG_SHIFT 0
-#define ISCSI_DIF_FLAGS_DIF_TO_PEER_MASK 0x1
-#define ISCSI_DIF_FLAGS_DIF_TO_PEER_SHIFT 4
-#define ISCSI_DIF_FLAGS_HOST_INTERFACE_MASK 0x7
-#define ISCSI_DIF_FLAGS_HOST_INTERFACE_SHIFT 5
-};
-
+#define ISCSI_DEBUG_MODES_ASSERT_IF_RX_CONN_ERROR_MASK 0x1
+#define ISCSI_DEBUG_MODES_ASSERT_IF_RX_CONN_ERROR_SHIFT 0
+#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_RESET_MASK 0x1
+#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_RESET_SHIFT 1
+#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_FIN_MASK 0x1
+#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_FIN_SHIFT 2
+#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_CLEANUP_MASK 0x1
+#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_CLEANUP_SHIFT 3
+#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_REJECT_OR_ASYNC_MASK 0x1
+#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_REJECT_OR_ASYNC_SHIFT 4
+#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_NOP_MASK 0x1
+#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_NOP_SHIFT 5
+#define ISCSI_DEBUG_MODES_ASSERT_IF_DIF_OR_DATA_DIGEST_ERROR_MASK 0x1
+#define ISCSI_DEBUG_MODES_ASSERT_IF_DIF_OR_DATA_DIGEST_ERROR_SHIFT 6
+#define ISCSI_DEBUG_MODES_ASSERT_IF_HQ_CORRUPT_MASK 0x1
+#define ISCSI_DEBUG_MODES_ASSERT_IF_HQ_CORRUPT_SHIFT 7
+};
+
+/* iSCSI kernel completion queue IDs */
enum iscsi_eqe_opcode {
ISCSI_EVENT_TYPE_INIT_FUNC = 0,
ISCSI_EVENT_TYPE_DESTROY_FUNC,
@@ -756,9 +1129,9 @@ enum iscsi_eqe_opcode {
ISCSI_EVENT_TYPE_CLEAR_SQ,
ISCSI_EVENT_TYPE_TERMINATE_CONN,
ISCSI_EVENT_TYPE_MAC_UPDATE_CONN,
+ ISCSI_EVENT_TYPE_COLLECT_STATS_CONN,
ISCSI_EVENT_TYPE_ASYN_CONNECT_COMPLETE,
ISCSI_EVENT_TYPE_ASYN_TERMINATE_DONE,
- RESERVED9,
ISCSI_EVENT_TYPE_START_OF_ERROR_TYPES = 10,
ISCSI_EVENT_TYPE_ASYN_ABORT_RCVD,
ISCSI_EVENT_TYPE_ASYN_CLOSE_RCVD,
@@ -772,6 +1145,7 @@ enum iscsi_eqe_opcode {
MAX_ISCSI_EQE_OPCODE
};
+/* iSCSI EQE and CQE completion status */
enum iscsi_error_types {
ISCSI_STATUS_NONE = 0,
ISCSI_CQE_ERROR_UNSOLICITED_RCV_ON_INVALID_CONN = 1,
@@ -823,7 +1197,7 @@ enum iscsi_error_types {
MAX_ISCSI_ERROR_TYPES
};
-
+/* iSCSI Ramrod Command IDs */
enum iscsi_ramrod_cmd_id {
ISCSI_RAMROD_CMD_ID_UNUSED = 0,
ISCSI_RAMROD_CMD_ID_INIT_FUNC = 1,
@@ -833,348 +1207,88 @@ enum iscsi_ramrod_cmd_id {
ISCSI_RAMROD_CMD_ID_TERMINATION_CONN = 5,
ISCSI_RAMROD_CMD_ID_CLEAR_SQ = 6,
ISCSI_RAMROD_CMD_ID_MAC_UPDATE = 7,
+ ISCSI_RAMROD_CMD_ID_CONN_STATS = 8,
MAX_ISCSI_RAMROD_CMD_ID
};
-struct iscsi_reg1 {
- __le32 reg1_map;
-#define ISCSI_REG1_NUM_SGES_MASK 0xF
-#define ISCSI_REG1_NUM_SGES_SHIFT 0
-#define ISCSI_REG1_RESERVED1_MASK 0xFFFFFFF
-#define ISCSI_REG1_RESERVED1_SHIFT 4
-};
-
-union iscsi_seq_num {
- __le16 data_sn;
- __le16 r2t_sn;
-};
-
+/* iSCSI connection termination request */
struct iscsi_spe_conn_mac_update {
- struct iscsi_slow_path_hdr hdr;
+ __le16 reserved0;
__le16 conn_id;
- __le32 fw_cid;
+ __le32 reserved1;
__le16 remote_mac_addr_lo;
__le16 remote_mac_addr_mid;
__le16 remote_mac_addr_hi;
- u8 reserved0[2];
+ u8 reserved2[2];
};
+/* iSCSI and TCP connection (Option 1) offload params passed by driver to FW in
+ * iSCSI offload ramrod.
+ */
struct iscsi_spe_conn_offload {
- struct iscsi_slow_path_hdr hdr;
+ __le16 reserved0;
__le16 conn_id;
- __le32 fw_cid;
+ __le32 reserved1;
struct iscsi_conn_offload_params iscsi;
struct tcp_offload_params tcp;
};
+/* iSCSI and TCP connection(Option 2) offload params passed by driver to FW in
+ * iSCSI offload ramrod.
+ */
struct iscsi_spe_conn_offload_option2 {
- struct iscsi_slow_path_hdr hdr;
+ __le16 reserved0;
__le16 conn_id;
- __le32 fw_cid;
+ __le32 reserved1;
struct iscsi_conn_offload_params iscsi;
struct tcp_offload_params_opt2 tcp;
};
-struct iscsi_spe_conn_termination {
- struct iscsi_slow_path_hdr hdr;
+/* iSCSI collect connection statistics request */
+struct iscsi_spe_conn_statistics {
+ __le16 reserved0;
__le16 conn_id;
- __le32 fw_cid;
- u8 abortive;
- u8 reserved0[7];
- struct regpair queue_cnts_addr;
- struct regpair query_params_addr;
+ __le32 reserved1;
+ u8 reset_stats;
+ u8 reserved2[7];
+ struct regpair stats_cnts_addr;
};
-struct iscsi_spe_func_dstry {
- struct iscsi_slow_path_hdr hdr;
+/* iSCSI connection termination request */
+struct iscsi_spe_conn_termination {
__le16 reserved0;
+ __le16 conn_id;
__le32 reserved1;
+ u8 abortive;
+ u8 reserved2[7];
+ struct regpair queue_cnts_addr;
+ struct regpair query_params_addr;
};
+/* iSCSI firmware function init parameters */
struct iscsi_spe_func_init {
- struct iscsi_slow_path_hdr hdr;
__le16 half_way_close_timeout;
u8 num_sq_pages_in_ring;
u8 num_r2tq_pages_in_ring;
u8 num_uhq_pages_in_ring;
u8 ll2_rx_queue_id;
- u8 ooo_enable;
+ u8 flags;
+#define ISCSI_SPE_FUNC_INIT_COUNTERS_EN_MASK 0x1
+#define ISCSI_SPE_FUNC_INIT_COUNTERS_EN_SHIFT 0
+#define ISCSI_SPE_FUNC_INIT_RESERVED0_MASK 0x7F
+#define ISCSI_SPE_FUNC_INIT_RESERVED0_SHIFT 1
struct iscsi_debug_modes debug_mode;
- __le16 reserved1;
- __le32 reserved2;
- __le32 reserved3;
- __le32 reserved4;
+ u8 params;
+#define ISCSI_SPE_FUNC_INIT_MAX_SYN_RT_MASK 0xF
+#define ISCSI_SPE_FUNC_INIT_MAX_SYN_RT_SHIFT 0
+#define ISCSI_SPE_FUNC_INIT_RESERVED1_MASK 0xF
+#define ISCSI_SPE_FUNC_INIT_RESERVED1_SHIFT 4
+ u8 reserved2[7];
struct scsi_init_func_params func_params;
struct scsi_init_func_queues q_params;
};
-struct ystorm_iscsi_task_state {
- struct scsi_cached_sges data_desc;
- struct scsi_sgl_params sgl_params;
- __le32 exp_r2t_sn;
- __le32 buffer_offset;
- union iscsi_seq_num seq_num;
- struct iscsi_dif_flags dif_flags;
- u8 flags;
-#define YSTORM_ISCSI_TASK_STATE_LOCAL_COMP_MASK 0x1
-#define YSTORM_ISCSI_TASK_STATE_LOCAL_COMP_SHIFT 0
-#define YSTORM_ISCSI_TASK_STATE_SLOW_IO_MASK 0x1
-#define YSTORM_ISCSI_TASK_STATE_SLOW_IO_SHIFT 1
-#define YSTORM_ISCSI_TASK_STATE_RESERVED0_MASK 0x3F
-#define YSTORM_ISCSI_TASK_STATE_RESERVED0_SHIFT 2
-};
-
-struct ystorm_iscsi_task_rxmit_opt {
- __le32 fast_rxmit_sge_offset;
- __le32 scan_start_buffer_offset;
- __le32 fast_rxmit_buffer_offset;
- u8 scan_start_sgl_index;
- u8 fast_rxmit_sgl_index;
- __le16 reserved;
-};
-
-struct ystorm_iscsi_task_st_ctx {
- struct ystorm_iscsi_task_state state;
- struct ystorm_iscsi_task_rxmit_opt rxmit_opt;
- union iscsi_task_hdr pdu_hdr;
-};
-
-struct ystorm_iscsi_task_ag_ctx {
- u8 reserved;
- u8 byte1;
- __le16 word0;
- u8 flags0;
-#define YSTORM_ISCSI_TASK_AG_CTX_NIBBLE0_MASK 0xF
-#define YSTORM_ISCSI_TASK_AG_CTX_NIBBLE0_SHIFT 0
-#define YSTORM_ISCSI_TASK_AG_CTX_BIT0_MASK 0x1
-#define YSTORM_ISCSI_TASK_AG_CTX_BIT0_SHIFT 4
-#define YSTORM_ISCSI_TASK_AG_CTX_BIT1_MASK 0x1
-#define YSTORM_ISCSI_TASK_AG_CTX_BIT1_SHIFT 5
-#define YSTORM_ISCSI_TASK_AG_CTX_VALID_MASK 0x1
-#define YSTORM_ISCSI_TASK_AG_CTX_VALID_SHIFT 6
-#define YSTORM_ISCSI_TASK_AG_CTX_BIT3_MASK 0x1
-#define YSTORM_ISCSI_TASK_AG_CTX_BIT3_SHIFT 7
- u8 flags1;
-#define YSTORM_ISCSI_TASK_AG_CTX_CF0_MASK 0x3
-#define YSTORM_ISCSI_TASK_AG_CTX_CF0_SHIFT 0
-#define YSTORM_ISCSI_TASK_AG_CTX_CF1_MASK 0x3
-#define YSTORM_ISCSI_TASK_AG_CTX_CF1_SHIFT 2
-#define YSTORM_ISCSI_TASK_AG_CTX_CF2SPECIAL_MASK 0x3
-#define YSTORM_ISCSI_TASK_AG_CTX_CF2SPECIAL_SHIFT 4
-#define YSTORM_ISCSI_TASK_AG_CTX_CF0EN_MASK 0x1
-#define YSTORM_ISCSI_TASK_AG_CTX_CF0EN_SHIFT 6
-#define YSTORM_ISCSI_TASK_AG_CTX_CF1EN_MASK 0x1
-#define YSTORM_ISCSI_TASK_AG_CTX_CF1EN_SHIFT 7
- u8 flags2;
-#define YSTORM_ISCSI_TASK_AG_CTX_BIT4_MASK 0x1
-#define YSTORM_ISCSI_TASK_AG_CTX_BIT4_SHIFT 0
-#define YSTORM_ISCSI_TASK_AG_CTX_RULE0EN_MASK 0x1
-#define YSTORM_ISCSI_TASK_AG_CTX_RULE0EN_SHIFT 1
-#define YSTORM_ISCSI_TASK_AG_CTX_RULE1EN_MASK 0x1
-#define YSTORM_ISCSI_TASK_AG_CTX_RULE1EN_SHIFT 2
-#define YSTORM_ISCSI_TASK_AG_CTX_RULE2EN_MASK 0x1
-#define YSTORM_ISCSI_TASK_AG_CTX_RULE2EN_SHIFT 3
-#define YSTORM_ISCSI_TASK_AG_CTX_RULE3EN_MASK 0x1
-#define YSTORM_ISCSI_TASK_AG_CTX_RULE3EN_SHIFT 4
-#define YSTORM_ISCSI_TASK_AG_CTX_RULE4EN_MASK 0x1
-#define YSTORM_ISCSI_TASK_AG_CTX_RULE4EN_SHIFT 5
-#define YSTORM_ISCSI_TASK_AG_CTX_RULE5EN_MASK 0x1
-#define YSTORM_ISCSI_TASK_AG_CTX_RULE5EN_SHIFT 6
-#define YSTORM_ISCSI_TASK_AG_CTX_RULE6EN_MASK 0x1
-#define YSTORM_ISCSI_TASK_AG_CTX_RULE6EN_SHIFT 7
- u8 byte2;
- __le32 TTT;
- u8 byte3;
- u8 byte4;
- __le16 word1;
-};
-
-struct mstorm_iscsi_task_ag_ctx {
- u8 cdu_validation;
- u8 byte1;
- __le16 task_cid;
- u8 flags0;
-#define MSTORM_ISCSI_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF
-#define MSTORM_ISCSI_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0
-#define MSTORM_ISCSI_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1
-#define MSTORM_ISCSI_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4
-#define MSTORM_ISCSI_TASK_AG_CTX_BIT1_MASK 0x1
-#define MSTORM_ISCSI_TASK_AG_CTX_BIT1_SHIFT 5
-#define MSTORM_ISCSI_TASK_AG_CTX_VALID_MASK 0x1
-#define MSTORM_ISCSI_TASK_AG_CTX_VALID_SHIFT 6
-#define MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_FLAG_MASK 0x1
-#define MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_FLAG_SHIFT 7
- u8 flags1;
-#define MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_CF_MASK 0x3
-#define MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_CF_SHIFT 0
-#define MSTORM_ISCSI_TASK_AG_CTX_CF1_MASK 0x3
-#define MSTORM_ISCSI_TASK_AG_CTX_CF1_SHIFT 2
-#define MSTORM_ISCSI_TASK_AG_CTX_CF2_MASK 0x3
-#define MSTORM_ISCSI_TASK_AG_CTX_CF2_SHIFT 4
-#define MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_CF_EN_MASK 0x1
-#define MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_CF_EN_SHIFT 6
-#define MSTORM_ISCSI_TASK_AG_CTX_CF1EN_MASK 0x1
-#define MSTORM_ISCSI_TASK_AG_CTX_CF1EN_SHIFT 7
- u8 flags2;
-#define MSTORM_ISCSI_TASK_AG_CTX_CF2EN_MASK 0x1
-#define MSTORM_ISCSI_TASK_AG_CTX_CF2EN_SHIFT 0
-#define MSTORM_ISCSI_TASK_AG_CTX_RULE0EN_MASK 0x1
-#define MSTORM_ISCSI_TASK_AG_CTX_RULE0EN_SHIFT 1
-#define MSTORM_ISCSI_TASK_AG_CTX_RULE1EN_MASK 0x1
-#define MSTORM_ISCSI_TASK_AG_CTX_RULE1EN_SHIFT 2
-#define MSTORM_ISCSI_TASK_AG_CTX_RULE2EN_MASK 0x1
-#define MSTORM_ISCSI_TASK_AG_CTX_RULE2EN_SHIFT 3
-#define MSTORM_ISCSI_TASK_AG_CTX_RULE3EN_MASK 0x1
-#define MSTORM_ISCSI_TASK_AG_CTX_RULE3EN_SHIFT 4
-#define MSTORM_ISCSI_TASK_AG_CTX_RULE4EN_MASK 0x1
-#define MSTORM_ISCSI_TASK_AG_CTX_RULE4EN_SHIFT 5
-#define MSTORM_ISCSI_TASK_AG_CTX_RULE5EN_MASK 0x1
-#define MSTORM_ISCSI_TASK_AG_CTX_RULE5EN_SHIFT 6
-#define MSTORM_ISCSI_TASK_AG_CTX_RULE6EN_MASK 0x1
-#define MSTORM_ISCSI_TASK_AG_CTX_RULE6EN_SHIFT 7
- u8 byte2;
- __le32 reg0;
- u8 byte3;
- u8 byte4;
- __le16 word1;
-};
-
-struct ustorm_iscsi_task_ag_ctx {
- u8 reserved;
- u8 state;
- __le16 icid;
- u8 flags0;
-#define USTORM_ISCSI_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF
-#define USTORM_ISCSI_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0
-#define USTORM_ISCSI_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1
-#define USTORM_ISCSI_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4
-#define USTORM_ISCSI_TASK_AG_CTX_BIT1_MASK 0x1
-#define USTORM_ISCSI_TASK_AG_CTX_BIT1_SHIFT 5
-#define USTORM_ISCSI_TASK_AG_CTX_HQ_SCANNED_CF_MASK 0x3
-#define USTORM_ISCSI_TASK_AG_CTX_HQ_SCANNED_CF_SHIFT 6
- u8 flags1;
-#define USTORM_ISCSI_TASK_AG_CTX_RESERVED1_MASK 0x3
-#define USTORM_ISCSI_TASK_AG_CTX_RESERVED1_SHIFT 0
-#define USTORM_ISCSI_TASK_AG_CTX_R2T2RECV_MASK 0x3
-#define USTORM_ISCSI_TASK_AG_CTX_R2T2RECV_SHIFT 2
-#define USTORM_ISCSI_TASK_AG_CTX_CF3_MASK 0x3
-#define USTORM_ISCSI_TASK_AG_CTX_CF3_SHIFT 4
-#define USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_CF_MASK 0x3
-#define USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_CF_SHIFT 6
- u8 flags2;
-#define USTORM_ISCSI_TASK_AG_CTX_HQ_SCANNED_CF_EN_MASK 0x1
-#define USTORM_ISCSI_TASK_AG_CTX_HQ_SCANNED_CF_EN_SHIFT 0
-#define USTORM_ISCSI_TASK_AG_CTX_DISABLE_DATA_ACKED_MASK 0x1
-#define USTORM_ISCSI_TASK_AG_CTX_DISABLE_DATA_ACKED_SHIFT 1
-#define USTORM_ISCSI_TASK_AG_CTX_R2T2RECV_EN_MASK 0x1
-#define USTORM_ISCSI_TASK_AG_CTX_R2T2RECV_EN_SHIFT 2
-#define USTORM_ISCSI_TASK_AG_CTX_CF3EN_MASK 0x1
-#define USTORM_ISCSI_TASK_AG_CTX_CF3EN_SHIFT 3
-#define USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_CF_EN_MASK 0x1
-#define USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_CF_EN_SHIFT 4
-#define USTORM_ISCSI_TASK_AG_CTX_CMP_DATA_TOTAL_EXP_EN_MASK 0x1
-#define USTORM_ISCSI_TASK_AG_CTX_CMP_DATA_TOTAL_EXP_EN_SHIFT 5
-#define USTORM_ISCSI_TASK_AG_CTX_RULE1EN_MASK 0x1
-#define USTORM_ISCSI_TASK_AG_CTX_RULE1EN_SHIFT 6
-#define USTORM_ISCSI_TASK_AG_CTX_CMP_CONT_RCV_EXP_EN_MASK 0x1
-#define USTORM_ISCSI_TASK_AG_CTX_CMP_CONT_RCV_EXP_EN_SHIFT 7
- u8 flags3;
-#define USTORM_ISCSI_TASK_AG_CTX_RULE3EN_MASK 0x1
-#define USTORM_ISCSI_TASK_AG_CTX_RULE3EN_SHIFT 0
-#define USTORM_ISCSI_TASK_AG_CTX_RULE4EN_MASK 0x1
-#define USTORM_ISCSI_TASK_AG_CTX_RULE4EN_SHIFT 1
-#define USTORM_ISCSI_TASK_AG_CTX_RULE5EN_MASK 0x1
-#define USTORM_ISCSI_TASK_AG_CTX_RULE5EN_SHIFT 2
-#define USTORM_ISCSI_TASK_AG_CTX_RULE6EN_MASK 0x1
-#define USTORM_ISCSI_TASK_AG_CTX_RULE6EN_SHIFT 3
-#define USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_TYPE_MASK 0xF
-#define USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_TYPE_SHIFT 4
- __le32 dif_err_intervals;
- __le32 dif_error_1st_interval;
- __le32 rcv_cont_len;
- __le32 exp_cont_len;
- __le32 total_data_acked;
- __le32 exp_data_acked;
- u8 next_tid_valid;
- u8 byte3;
- __le16 word1;
- __le16 next_tid;
- __le16 word3;
- __le32 hdr_residual_count;
- __le32 exp_r2t_sn;
-};
-
-struct mstorm_iscsi_task_st_ctx {
- struct scsi_cached_sges data_desc;
- struct scsi_sgl_params sgl_params;
- __le32 rem_task_size;
- __le32 data_buffer_offset;
- u8 task_type;
- struct iscsi_dif_flags dif_flags;
- u8 reserved0[2];
- struct regpair sense_db;
- __le32 expected_itt;
- __le32 reserved1;
-};
-
-struct ustorm_iscsi_task_st_ctx {
- __le32 rem_rcv_len;
- __le32 exp_data_transfer_len;
- __le32 exp_data_sn;
- struct regpair lun;
- struct iscsi_reg1 reg1;
- u8 flags2;
-#define USTORM_ISCSI_TASK_ST_CTX_AHS_EXIST_MASK 0x1
-#define USTORM_ISCSI_TASK_ST_CTX_AHS_EXIST_SHIFT 0
-#define USTORM_ISCSI_TASK_ST_CTX_RESERVED1_MASK 0x7F
-#define USTORM_ISCSI_TASK_ST_CTX_RESERVED1_SHIFT 1
- struct iscsi_dif_flags dif_flags;
- __le16 reserved3;
- __le32 reserved4;
- __le32 reserved5;
- __le32 reserved6;
- __le32 reserved7;
- u8 task_type;
- u8 error_flags;
-#define USTORM_ISCSI_TASK_ST_CTX_DATA_DIGEST_ERROR_MASK 0x1
-#define USTORM_ISCSI_TASK_ST_CTX_DATA_DIGEST_ERROR_SHIFT 0
-#define USTORM_ISCSI_TASK_ST_CTX_DATA_TRUNCATED_ERROR_MASK 0x1
-#define USTORM_ISCSI_TASK_ST_CTX_DATA_TRUNCATED_ERROR_SHIFT 1
-#define USTORM_ISCSI_TASK_ST_CTX_UNDER_RUN_ERROR_MASK 0x1
-#define USTORM_ISCSI_TASK_ST_CTX_UNDER_RUN_ERROR_SHIFT 2
-#define USTORM_ISCSI_TASK_ST_CTX_RESERVED8_MASK 0x1F
-#define USTORM_ISCSI_TASK_ST_CTX_RESERVED8_SHIFT 3
- u8 flags;
-#define USTORM_ISCSI_TASK_ST_CTX_CQE_WRITE_MASK 0x3
-#define USTORM_ISCSI_TASK_ST_CTX_CQE_WRITE_SHIFT 0
-#define USTORM_ISCSI_TASK_ST_CTX_LOCAL_COMP_MASK 0x1
-#define USTORM_ISCSI_TASK_ST_CTX_LOCAL_COMP_SHIFT 2
-#define USTORM_ISCSI_TASK_ST_CTX_Q0_R2TQE_WRITE_MASK 0x1
-#define USTORM_ISCSI_TASK_ST_CTX_Q0_R2TQE_WRITE_SHIFT 3
-#define USTORM_ISCSI_TASK_ST_CTX_TOTAL_DATA_ACKED_DONE_MASK 0x1
-#define USTORM_ISCSI_TASK_ST_CTX_TOTAL_DATA_ACKED_DONE_SHIFT 4
-#define USTORM_ISCSI_TASK_ST_CTX_HQ_SCANNED_DONE_MASK 0x1
-#define USTORM_ISCSI_TASK_ST_CTX_HQ_SCANNED_DONE_SHIFT 5
-#define USTORM_ISCSI_TASK_ST_CTX_R2T2RECV_DONE_MASK 0x1
-#define USTORM_ISCSI_TASK_ST_CTX_R2T2RECV_DONE_SHIFT 6
-#define USTORM_ISCSI_TASK_ST_CTX_RESERVED0_MASK 0x1
-#define USTORM_ISCSI_TASK_ST_CTX_RESERVED0_SHIFT 7
- u8 cq_rss_number;
-};
-
-struct iscsi_task_context {
- struct ystorm_iscsi_task_st_ctx ystorm_st_context;
- struct ystorm_iscsi_task_ag_ctx ystorm_ag_context;
- struct regpair ystorm_ag_padding[2];
- struct tdif_task_context tdif_context;
- struct mstorm_iscsi_task_ag_ctx mstorm_ag_context;
- struct regpair mstorm_ag_padding[2];
- struct ustorm_iscsi_task_ag_ctx ustorm_ag_context;
- struct mstorm_iscsi_task_st_ctx mstorm_st_context;
- struct ustorm_iscsi_task_st_ctx ustorm_st_context;
- struct rdif_task_context rdif_context;
-};
-
+/* iSCSI task type */
enum iscsi_task_type {
ISCSI_TASK_TYPE_INITIATOR_WRITE,
ISCSI_TASK_TYPE_INITIATOR_READ,
@@ -1186,53 +1300,57 @@ enum iscsi_task_type {
ISCSI_TASK_TYPE_TARGET_READ,
ISCSI_TASK_TYPE_TARGET_RESPONSE,
ISCSI_TASK_TYPE_LOGIN_RESPONSE,
+ ISCSI_TASK_TYPE_TARGET_IMM_W_DIF,
MAX_ISCSI_TASK_TYPE
};
+/* iSCSI DesiredDataTransferLength/ttt union */
union iscsi_ttt_txlen_union {
__le32 desired_tx_len;
__le32 ttt;
};
+/* iSCSI uHQ element */
struct iscsi_uhqe {
__le32 reg1;
-#define ISCSI_UHQE_PDU_PAYLOAD_LEN_MASK 0xFFFFF
-#define ISCSI_UHQE_PDU_PAYLOAD_LEN_SHIFT 0
-#define ISCSI_UHQE_LOCAL_COMP_MASK 0x1
-#define ISCSI_UHQE_LOCAL_COMP_SHIFT 20
-#define ISCSI_UHQE_TOGGLE_BIT_MASK 0x1
-#define ISCSI_UHQE_TOGGLE_BIT_SHIFT 21
-#define ISCSI_UHQE_PURE_PAYLOAD_MASK 0x1
-#define ISCSI_UHQE_PURE_PAYLOAD_SHIFT 22
-#define ISCSI_UHQE_LOGIN_RESPONSE_PDU_MASK 0x1
-#define ISCSI_UHQE_LOGIN_RESPONSE_PDU_SHIFT 23
-#define ISCSI_UHQE_TASK_ID_HI_MASK 0xFF
-#define ISCSI_UHQE_TASK_ID_HI_SHIFT 24
+#define ISCSI_UHQE_PDU_PAYLOAD_LEN_MASK 0xFFFFF
+#define ISCSI_UHQE_PDU_PAYLOAD_LEN_SHIFT 0
+#define ISCSI_UHQE_LOCAL_COMP_MASK 0x1
+#define ISCSI_UHQE_LOCAL_COMP_SHIFT 20
+#define ISCSI_UHQE_TOGGLE_BIT_MASK 0x1
+#define ISCSI_UHQE_TOGGLE_BIT_SHIFT 21
+#define ISCSI_UHQE_PURE_PAYLOAD_MASK 0x1
+#define ISCSI_UHQE_PURE_PAYLOAD_SHIFT 22
+#define ISCSI_UHQE_LOGIN_RESPONSE_PDU_MASK 0x1
+#define ISCSI_UHQE_LOGIN_RESPONSE_PDU_SHIFT 23
+#define ISCSI_UHQE_TASK_ID_HI_MASK 0xFF
+#define ISCSI_UHQE_TASK_ID_HI_SHIFT 24
__le32 reg2;
-#define ISCSI_UHQE_BUFFER_OFFSET_MASK 0xFFFFFF
-#define ISCSI_UHQE_BUFFER_OFFSET_SHIFT 0
-#define ISCSI_UHQE_TASK_ID_LO_MASK 0xFF
-#define ISCSI_UHQE_TASK_ID_LO_SHIFT 24
+#define ISCSI_UHQE_BUFFER_OFFSET_MASK 0xFFFFFF
+#define ISCSI_UHQE_BUFFER_OFFSET_SHIFT 0
+#define ISCSI_UHQE_TASK_ID_LO_MASK 0xFF
+#define ISCSI_UHQE_TASK_ID_LO_SHIFT 24
};
-
+/* iSCSI WQ element */
struct iscsi_wqe {
__le16 task_id;
u8 flags;
-#define ISCSI_WQE_WQE_TYPE_MASK 0x7
-#define ISCSI_WQE_WQE_TYPE_SHIFT 0
-#define ISCSI_WQE_NUM_SGES_MASK 0xF
-#define ISCSI_WQE_NUM_SGES_SHIFT 3
-#define ISCSI_WQE_RESPONSE_MASK 0x1
-#define ISCSI_WQE_RESPONSE_SHIFT 7
+#define ISCSI_WQE_WQE_TYPE_MASK 0x7
+#define ISCSI_WQE_WQE_TYPE_SHIFT 0
+#define ISCSI_WQE_NUM_SGES_MASK 0xF
+#define ISCSI_WQE_NUM_SGES_SHIFT 3
+#define ISCSI_WQE_RESPONSE_MASK 0x1
+#define ISCSI_WQE_RESPONSE_SHIFT 7
struct iscsi_dif_flags prot_flags;
__le32 contlen_cdbsize;
-#define ISCSI_WQE_CONT_LEN_MASK 0xFFFFFF
-#define ISCSI_WQE_CONT_LEN_SHIFT 0
-#define ISCSI_WQE_CDB_SIZE_MASK 0xFF
-#define ISCSI_WQE_CDB_SIZE_SHIFT 24
+#define ISCSI_WQE_CONT_LEN_MASK 0xFFFFFF
+#define ISCSI_WQE_CONT_LEN_SHIFT 0
+#define ISCSI_WQE_CDB_SIZE_MASK 0xFF
+#define ISCSI_WQE_CDB_SIZE_SHIFT 24
};
+/* iSCSI wqe type */
enum iscsi_wqe_type {
ISCSI_WQE_TYPE_NORMAL,
ISCSI_WQE_TYPE_TASK_CLEANUP,
@@ -1244,6 +1362,7 @@ enum iscsi_wqe_type {
MAX_ISCSI_WQE_TYPE
};
+/* iSCSI xHQ element */
struct iscsi_xhqe {
union iscsi_ttt_txlen_union ttt_or_txlen;
__le32 exp_stat_sn;
@@ -1251,51 +1370,65 @@ struct iscsi_xhqe {
u8 total_ahs_length;
u8 opcode;
u8 flags;
-#define ISCSI_XHQE_FINAL_MASK 0x1
-#define ISCSI_XHQE_FINAL_SHIFT 0
-#define ISCSI_XHQE_STATUS_BIT_MASK 0x1
-#define ISCSI_XHQE_STATUS_BIT_SHIFT 1
-#define ISCSI_XHQE_NUM_SGES_MASK 0xF
-#define ISCSI_XHQE_NUM_SGES_SHIFT 2
-#define ISCSI_XHQE_RESERVED0_MASK 0x3
-#define ISCSI_XHQE_RESERVED0_SHIFT 6
+#define ISCSI_XHQE_FINAL_MASK 0x1
+#define ISCSI_XHQE_FINAL_SHIFT 0
+#define ISCSI_XHQE_STATUS_BIT_MASK 0x1
+#define ISCSI_XHQE_STATUS_BIT_SHIFT 1
+#define ISCSI_XHQE_NUM_SGES_MASK 0xF
+#define ISCSI_XHQE_NUM_SGES_SHIFT 2
+#define ISCSI_XHQE_RESERVED0_MASK 0x3
+#define ISCSI_XHQE_RESERVED0_SHIFT 6
union iscsi_seq_num seq_num;
__le16 reserved1;
};
+/* Per PF iSCSI receive path statistics - mStorm RAM structure */
struct mstorm_iscsi_stats_drv {
struct regpair iscsi_rx_dropped_pdus_task_not_valid;
+ struct regpair iscsi_rx_dup_ack_cnt;
};
+/* Per PF iSCSI transmit path statistics - pStorm RAM structure */
struct pstorm_iscsi_stats_drv {
struct regpair iscsi_tx_bytes_cnt;
struct regpair iscsi_tx_packet_cnt;
};
+/* Per PF iSCSI receive path statistics - tStorm RAM structure */
struct tstorm_iscsi_stats_drv {
struct regpair iscsi_rx_bytes_cnt;
struct regpair iscsi_rx_packet_cnt;
struct regpair iscsi_rx_new_ooo_isle_events_cnt;
+ struct regpair iscsi_rx_tcp_payload_bytes_cnt;
+ struct regpair iscsi_rx_tcp_pkt_cnt;
+ struct regpair iscsi_rx_pure_ack_cnt;
__le32 iscsi_cmdq_threshold_cnt;
__le32 iscsi_rq_threshold_cnt;
__le32 iscsi_immq_threshold_cnt;
};
+/* Per PF iSCSI receive path statistics - uStorm RAM structure */
struct ustorm_iscsi_stats_drv {
struct regpair iscsi_rx_data_pdu_cnt;
struct regpair iscsi_rx_r2t_pdu_cnt;
struct regpair iscsi_rx_total_pdu_cnt;
};
+/* Per PF iSCSI transmit path statistics - xStorm RAM structure */
struct xstorm_iscsi_stats_drv {
struct regpair iscsi_tx_go_to_slow_start_event_cnt;
struct regpair iscsi_tx_fast_retransmit_event_cnt;
+ struct regpair iscsi_tx_pure_ack_cnt;
+ struct regpair iscsi_tx_delayed_ack_cnt;
};
+/* Per PF iSCSI transmit path statistics - yStorm RAM structure */
struct ystorm_iscsi_stats_drv {
struct regpair iscsi_tx_data_pdu_cnt;
struct regpair iscsi_tx_r2t_pdu_cnt;
struct regpair iscsi_tx_total_pdu_cnt;
+ struct regpair iscsi_tx_tcp_payload_bytes_cnt;
+ struct regpair iscsi_tx_tcp_pkt_cnt;
};
struct tstorm_iscsi_task_ag_ctx {
@@ -1303,68 +1436,68 @@ struct tstorm_iscsi_task_ag_ctx {
u8 byte1;
__le16 word0;
u8 flags0;
-#define TSTORM_ISCSI_TASK_AG_CTX_NIBBLE0_MASK 0xF
-#define TSTORM_ISCSI_TASK_AG_CTX_NIBBLE0_SHIFT 0
-#define TSTORM_ISCSI_TASK_AG_CTX_BIT0_MASK 0x1
-#define TSTORM_ISCSI_TASK_AG_CTX_BIT0_SHIFT 4
-#define TSTORM_ISCSI_TASK_AG_CTX_BIT1_MASK 0x1
-#define TSTORM_ISCSI_TASK_AG_CTX_BIT1_SHIFT 5
-#define TSTORM_ISCSI_TASK_AG_CTX_BIT2_MASK 0x1
-#define TSTORM_ISCSI_TASK_AG_CTX_BIT2_SHIFT 6
-#define TSTORM_ISCSI_TASK_AG_CTX_BIT3_MASK 0x1
-#define TSTORM_ISCSI_TASK_AG_CTX_BIT3_SHIFT 7
+#define TSTORM_ISCSI_TASK_AG_CTX_NIBBLE0_MASK 0xF
+#define TSTORM_ISCSI_TASK_AG_CTX_NIBBLE0_SHIFT 0
+#define TSTORM_ISCSI_TASK_AG_CTX_BIT0_MASK 0x1
+#define TSTORM_ISCSI_TASK_AG_CTX_BIT0_SHIFT 4
+#define TSTORM_ISCSI_TASK_AG_CTX_BIT1_MASK 0x1
+#define TSTORM_ISCSI_TASK_AG_CTX_BIT1_SHIFT 5
+#define TSTORM_ISCSI_TASK_AG_CTX_BIT2_MASK 0x1
+#define TSTORM_ISCSI_TASK_AG_CTX_BIT2_SHIFT 6
+#define TSTORM_ISCSI_TASK_AG_CTX_BIT3_MASK 0x1
+#define TSTORM_ISCSI_TASK_AG_CTX_BIT3_SHIFT 7
u8 flags1;
-#define TSTORM_ISCSI_TASK_AG_CTX_BIT4_MASK 0x1
-#define TSTORM_ISCSI_TASK_AG_CTX_BIT4_SHIFT 0
-#define TSTORM_ISCSI_TASK_AG_CTX_BIT5_MASK 0x1
-#define TSTORM_ISCSI_TASK_AG_CTX_BIT5_SHIFT 1
-#define TSTORM_ISCSI_TASK_AG_CTX_CF0_MASK 0x3
-#define TSTORM_ISCSI_TASK_AG_CTX_CF0_SHIFT 2
-#define TSTORM_ISCSI_TASK_AG_CTX_CF1_MASK 0x3
-#define TSTORM_ISCSI_TASK_AG_CTX_CF1_SHIFT 4
-#define TSTORM_ISCSI_TASK_AG_CTX_CF2_MASK 0x3
-#define TSTORM_ISCSI_TASK_AG_CTX_CF2_SHIFT 6
+#define TSTORM_ISCSI_TASK_AG_CTX_BIT4_MASK 0x1
+#define TSTORM_ISCSI_TASK_AG_CTX_BIT4_SHIFT 0
+#define TSTORM_ISCSI_TASK_AG_CTX_BIT5_MASK 0x1
+#define TSTORM_ISCSI_TASK_AG_CTX_BIT5_SHIFT 1
+#define TSTORM_ISCSI_TASK_AG_CTX_CF0_MASK 0x3
+#define TSTORM_ISCSI_TASK_AG_CTX_CF0_SHIFT 2
+#define TSTORM_ISCSI_TASK_AG_CTX_CF1_MASK 0x3
+#define TSTORM_ISCSI_TASK_AG_CTX_CF1_SHIFT 4
+#define TSTORM_ISCSI_TASK_AG_CTX_CF2_MASK 0x3
+#define TSTORM_ISCSI_TASK_AG_CTX_CF2_SHIFT 6
u8 flags2;
-#define TSTORM_ISCSI_TASK_AG_CTX_CF3_MASK 0x3
-#define TSTORM_ISCSI_TASK_AG_CTX_CF3_SHIFT 0
-#define TSTORM_ISCSI_TASK_AG_CTX_CF4_MASK 0x3
-#define TSTORM_ISCSI_TASK_AG_CTX_CF4_SHIFT 2
-#define TSTORM_ISCSI_TASK_AG_CTX_CF5_MASK 0x3
-#define TSTORM_ISCSI_TASK_AG_CTX_CF5_SHIFT 4
-#define TSTORM_ISCSI_TASK_AG_CTX_CF6_MASK 0x3
-#define TSTORM_ISCSI_TASK_AG_CTX_CF6_SHIFT 6
+#define TSTORM_ISCSI_TASK_AG_CTX_CF3_MASK 0x3
+#define TSTORM_ISCSI_TASK_AG_CTX_CF3_SHIFT 0
+#define TSTORM_ISCSI_TASK_AG_CTX_CF4_MASK 0x3
+#define TSTORM_ISCSI_TASK_AG_CTX_CF4_SHIFT 2
+#define TSTORM_ISCSI_TASK_AG_CTX_CF5_MASK 0x3
+#define TSTORM_ISCSI_TASK_AG_CTX_CF5_SHIFT 4
+#define TSTORM_ISCSI_TASK_AG_CTX_CF6_MASK 0x3
+#define TSTORM_ISCSI_TASK_AG_CTX_CF6_SHIFT 6
u8 flags3;
-#define TSTORM_ISCSI_TASK_AG_CTX_CF7_MASK 0x3
-#define TSTORM_ISCSI_TASK_AG_CTX_CF7_SHIFT 0
-#define TSTORM_ISCSI_TASK_AG_CTX_CF0EN_MASK 0x1
-#define TSTORM_ISCSI_TASK_AG_CTX_CF0EN_SHIFT 2
-#define TSTORM_ISCSI_TASK_AG_CTX_CF1EN_MASK 0x1
-#define TSTORM_ISCSI_TASK_AG_CTX_CF1EN_SHIFT 3
-#define TSTORM_ISCSI_TASK_AG_CTX_CF2EN_MASK 0x1
-#define TSTORM_ISCSI_TASK_AG_CTX_CF2EN_SHIFT 4
-#define TSTORM_ISCSI_TASK_AG_CTX_CF3EN_MASK 0x1
-#define TSTORM_ISCSI_TASK_AG_CTX_CF3EN_SHIFT 5
-#define TSTORM_ISCSI_TASK_AG_CTX_CF4EN_MASK 0x1
-#define TSTORM_ISCSI_TASK_AG_CTX_CF4EN_SHIFT 6
-#define TSTORM_ISCSI_TASK_AG_CTX_CF5EN_MASK 0x1
-#define TSTORM_ISCSI_TASK_AG_CTX_CF5EN_SHIFT 7
+#define TSTORM_ISCSI_TASK_AG_CTX_CF7_MASK 0x3
+#define TSTORM_ISCSI_TASK_AG_CTX_CF7_SHIFT 0
+#define TSTORM_ISCSI_TASK_AG_CTX_CF0EN_MASK 0x1
+#define TSTORM_ISCSI_TASK_AG_CTX_CF0EN_SHIFT 2
+#define TSTORM_ISCSI_TASK_AG_CTX_CF1EN_MASK 0x1
+#define TSTORM_ISCSI_TASK_AG_CTX_CF1EN_SHIFT 3
+#define TSTORM_ISCSI_TASK_AG_CTX_CF2EN_MASK 0x1
+#define TSTORM_ISCSI_TASK_AG_CTX_CF2EN_SHIFT 4
+#define TSTORM_ISCSI_TASK_AG_CTX_CF3EN_MASK 0x1
+#define TSTORM_ISCSI_TASK_AG_CTX_CF3EN_SHIFT 5
+#define TSTORM_ISCSI_TASK_AG_CTX_CF4EN_MASK 0x1
+#define TSTORM_ISCSI_TASK_AG_CTX_CF4EN_SHIFT 6
+#define TSTORM_ISCSI_TASK_AG_CTX_CF5EN_MASK 0x1
+#define TSTORM_ISCSI_TASK_AG_CTX_CF5EN_SHIFT 7
u8 flags4;
-#define TSTORM_ISCSI_TASK_AG_CTX_CF6EN_MASK 0x1
-#define TSTORM_ISCSI_TASK_AG_CTX_CF6EN_SHIFT 0
-#define TSTORM_ISCSI_TASK_AG_CTX_CF7EN_MASK 0x1
-#define TSTORM_ISCSI_TASK_AG_CTX_CF7EN_SHIFT 1
-#define TSTORM_ISCSI_TASK_AG_CTX_RULE0EN_MASK 0x1
-#define TSTORM_ISCSI_TASK_AG_CTX_RULE0EN_SHIFT 2
-#define TSTORM_ISCSI_TASK_AG_CTX_RULE1EN_MASK 0x1
-#define TSTORM_ISCSI_TASK_AG_CTX_RULE1EN_SHIFT 3
-#define TSTORM_ISCSI_TASK_AG_CTX_RULE2EN_MASK 0x1
-#define TSTORM_ISCSI_TASK_AG_CTX_RULE2EN_SHIFT 4
-#define TSTORM_ISCSI_TASK_AG_CTX_RULE3EN_MASK 0x1
-#define TSTORM_ISCSI_TASK_AG_CTX_RULE3EN_SHIFT 5
-#define TSTORM_ISCSI_TASK_AG_CTX_RULE4EN_MASK 0x1
-#define TSTORM_ISCSI_TASK_AG_CTX_RULE4EN_SHIFT 6
-#define TSTORM_ISCSI_TASK_AG_CTX_RULE5EN_MASK 0x1
-#define TSTORM_ISCSI_TASK_AG_CTX_RULE5EN_SHIFT 7
+#define TSTORM_ISCSI_TASK_AG_CTX_CF6EN_MASK 0x1
+#define TSTORM_ISCSI_TASK_AG_CTX_CF6EN_SHIFT 0
+#define TSTORM_ISCSI_TASK_AG_CTX_CF7EN_MASK 0x1
+#define TSTORM_ISCSI_TASK_AG_CTX_CF7EN_SHIFT 1
+#define TSTORM_ISCSI_TASK_AG_CTX_RULE0EN_MASK 0x1
+#define TSTORM_ISCSI_TASK_AG_CTX_RULE0EN_SHIFT 2
+#define TSTORM_ISCSI_TASK_AG_CTX_RULE1EN_MASK 0x1
+#define TSTORM_ISCSI_TASK_AG_CTX_RULE1EN_SHIFT 3
+#define TSTORM_ISCSI_TASK_AG_CTX_RULE2EN_MASK 0x1
+#define TSTORM_ISCSI_TASK_AG_CTX_RULE2EN_SHIFT 4
+#define TSTORM_ISCSI_TASK_AG_CTX_RULE3EN_MASK 0x1
+#define TSTORM_ISCSI_TASK_AG_CTX_RULE3EN_SHIFT 5
+#define TSTORM_ISCSI_TASK_AG_CTX_RULE4EN_MASK 0x1
+#define TSTORM_ISCSI_TASK_AG_CTX_RULE4EN_SHIFT 6
+#define TSTORM_ISCSI_TASK_AG_CTX_RULE5EN_MASK 0x1
+#define TSTORM_ISCSI_TASK_AG_CTX_RULE5EN_SHIFT 7
u8 byte2;
__le16 word1;
__le32 reg0;
@@ -1376,18 +1509,20 @@ struct tstorm_iscsi_task_ag_ctx {
__le32 reg1;
__le32 reg2;
};
+
+/* iSCSI doorbell data */
struct iscsi_db_data {
u8 params;
-#define ISCSI_DB_DATA_DEST_MASK 0x3
-#define ISCSI_DB_DATA_DEST_SHIFT 0
-#define ISCSI_DB_DATA_AGG_CMD_MASK 0x3
-#define ISCSI_DB_DATA_AGG_CMD_SHIFT 2
-#define ISCSI_DB_DATA_BYPASS_EN_MASK 0x1
-#define ISCSI_DB_DATA_BYPASS_EN_SHIFT 4
-#define ISCSI_DB_DATA_RESERVED_MASK 0x1
-#define ISCSI_DB_DATA_RESERVED_SHIFT 5
-#define ISCSI_DB_DATA_AGG_VAL_SEL_MASK 0x3
-#define ISCSI_DB_DATA_AGG_VAL_SEL_SHIFT 6
+#define ISCSI_DB_DATA_DEST_MASK 0x3
+#define ISCSI_DB_DATA_DEST_SHIFT 0
+#define ISCSI_DB_DATA_AGG_CMD_MASK 0x3
+#define ISCSI_DB_DATA_AGG_CMD_SHIFT 2
+#define ISCSI_DB_DATA_BYPASS_EN_MASK 0x1
+#define ISCSI_DB_DATA_BYPASS_EN_SHIFT 4
+#define ISCSI_DB_DATA_RESERVED_MASK 0x1
+#define ISCSI_DB_DATA_RESERVED_SHIFT 5
+#define ISCSI_DB_DATA_AGG_VAL_SEL_MASK 0x3
+#define ISCSI_DB_DATA_AGG_VAL_SEL_SHIFT 6
u8 agg_flags;
__le16 sq_prod;
};
diff --git a/include/linux/qed/iwarp_common.h b/include/linux/qed/iwarp_common.h
index b8b3e1cfae90..14f9e4c0ddd9 100644
--- a/include/linux/qed/iwarp_common.h
+++ b/include/linux/qed/iwarp_common.h
@@ -1,37 +1,14 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
/* QLogic qed NIC Driver
* Copyright (c) 2015-2017 QLogic Corporation
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and /or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * Copyright (c) 2019-2020 Marvell International Ltd.
*/
+
#ifndef __IWARP_COMMON__
#define __IWARP_COMMON__
+
#include <linux/qed/rdma_common.h>
+
/************************/
/* IWARP FW CONSTANTS */
/************************/
@@ -40,14 +17,14 @@
#define IWARP_PASSIVE_MODE 1
#define IWARP_SHARED_QUEUE_PAGE_SIZE (0x8000)
-#define IWARP_SHARED_QUEUE_PAGE_RQ_PBL_OFFSET (0x4000)
-#define IWARP_SHARED_QUEUE_PAGE_RQ_PBL_MAX_SIZE (0x1000)
-#define IWARP_SHARED_QUEUE_PAGE_SQ_PBL_OFFSET (0x5000)
-#define IWARP_SHARED_QUEUE_PAGE_SQ_PBL_MAX_SIZE (0x3000)
+#define IWARP_SHARED_QUEUE_PAGE_RQ_PBL_OFFSET (0x4000)
+#define IWARP_SHARED_QUEUE_PAGE_RQ_PBL_MAX_SIZE (0x1000)
+#define IWARP_SHARED_QUEUE_PAGE_SQ_PBL_OFFSET (0x5000)
+#define IWARP_SHARED_QUEUE_PAGE_SQ_PBL_MAX_SIZE (0x3000)
-#define IWARP_REQ_MAX_INLINE_DATA_SIZE (128)
-#define IWARP_REQ_MAX_SINGLE_SQ_WQE_SIZE (176)
+#define IWARP_REQ_MAX_INLINE_DATA_SIZE (128)
+#define IWARP_REQ_MAX_SINGLE_SQ_WQE_SIZE (176)
-#define IWARP_MAX_QPS (64 * 1024)
+#define IWARP_MAX_QPS (64 * 1024)
#endif /* __IWARP_COMMON__ */
diff --git a/include/linux/qed/nvmetcp_common.h b/include/linux/qed/nvmetcp_common.h
new file mode 100644
index 000000000000..cc7c7481a0e0
--- /dev/null
+++ b/include/linux/qed/nvmetcp_common.h
@@ -0,0 +1,531 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
+/* Copyright 2021 Marvell. All rights reserved. */
+
+#ifndef __NVMETCP_COMMON__
+#define __NVMETCP_COMMON__
+
+#include "tcp_common.h"
+#include <linux/nvme-tcp.h>
+
+#define NVMETCP_SLOW_PATH_LAYER_CODE (6)
+#define NVMETCP_WQE_NUM_SGES_SLOWIO (0xf)
+
+/* NVMeTCP firmware function init parameters */
+struct nvmetcp_spe_func_init {
+ __le16 half_way_close_timeout;
+ u8 num_sq_pages_in_ring;
+ u8 num_r2tq_pages_in_ring;
+ u8 num_uhq_pages_in_ring;
+ u8 ll2_rx_queue_id;
+ u8 flags;
+#define NVMETCP_SPE_FUNC_INIT_COUNTERS_EN_MASK 0x1
+#define NVMETCP_SPE_FUNC_INIT_COUNTERS_EN_SHIFT 0
+#define NVMETCP_SPE_FUNC_INIT_NVMETCP_MODE_MASK 0x1
+#define NVMETCP_SPE_FUNC_INIT_NVMETCP_MODE_SHIFT 1
+#define NVMETCP_SPE_FUNC_INIT_RESERVED0_MASK 0x3F
+#define NVMETCP_SPE_FUNC_INIT_RESERVED0_SHIFT 2
+ u8 debug_flags;
+ __le16 reserved1;
+ u8 params;
+#define NVMETCP_SPE_FUNC_INIT_MAX_SYN_RT_MASK 0xF
+#define NVMETCP_SPE_FUNC_INIT_MAX_SYN_RT_SHIFT 0
+#define NVMETCP_SPE_FUNC_INIT_RESERVED1_MASK 0xF
+#define NVMETCP_SPE_FUNC_INIT_RESERVED1_SHIFT 4
+ u8 reserved2[5];
+ struct scsi_init_func_params func_params;
+ struct scsi_init_func_queues q_params;
+};
+
+/* NVMeTCP init params passed by driver to FW in NVMeTCP init ramrod. */
+struct nvmetcp_init_ramrod_params {
+ struct nvmetcp_spe_func_init nvmetcp_init_spe;
+ struct tcp_init_params tcp_init;
+};
+
+/* NVMeTCP Ramrod Command IDs */
+enum nvmetcp_ramrod_cmd_id {
+ NVMETCP_RAMROD_CMD_ID_UNUSED = 0,
+ NVMETCP_RAMROD_CMD_ID_INIT_FUNC = 1,
+ NVMETCP_RAMROD_CMD_ID_DESTROY_FUNC = 2,
+ NVMETCP_RAMROD_CMD_ID_OFFLOAD_CONN = 3,
+ NVMETCP_RAMROD_CMD_ID_UPDATE_CONN = 4,
+ NVMETCP_RAMROD_CMD_ID_TERMINATION_CONN = 5,
+ NVMETCP_RAMROD_CMD_ID_CLEAR_SQ = 6,
+ MAX_NVMETCP_RAMROD_CMD_ID
+};
+
+struct nvmetcp_glbl_queue_entry {
+ struct regpair cq_pbl_addr;
+ struct regpair reserved;
+};
+
+/* NVMeTCP conn level EQEs */
+enum nvmetcp_eqe_opcode {
+ NVMETCP_EVENT_TYPE_INIT_FUNC = 0, /* Response after init Ramrod */
+ NVMETCP_EVENT_TYPE_DESTROY_FUNC, /* Response after destroy Ramrod */
+ NVMETCP_EVENT_TYPE_OFFLOAD_CONN,/* Response after option 2 offload Ramrod */
+ NVMETCP_EVENT_TYPE_UPDATE_CONN, /* Response after update Ramrod */
+ NVMETCP_EVENT_TYPE_CLEAR_SQ, /* Response after clear sq Ramrod */
+ NVMETCP_EVENT_TYPE_TERMINATE_CONN, /* Response after termination Ramrod */
+ NVMETCP_EVENT_TYPE_RESERVED0,
+ NVMETCP_EVENT_TYPE_RESERVED1,
+ NVMETCP_EVENT_TYPE_ASYN_CONNECT_COMPLETE, /* Connect completed (A-syn EQE) */
+ NVMETCP_EVENT_TYPE_ASYN_TERMINATE_DONE, /* Termination completed (A-syn EQE) */
+ NVMETCP_EVENT_TYPE_START_OF_ERROR_TYPES = 10, /* Separate EQs from err EQs */
+ NVMETCP_EVENT_TYPE_ASYN_ABORT_RCVD, /* TCP RST packet receive (A-syn EQE) */
+ NVMETCP_EVENT_TYPE_ASYN_CLOSE_RCVD, /* TCP FIN packet receive (A-syn EQE) */
+ NVMETCP_EVENT_TYPE_ASYN_SYN_RCVD, /* TCP SYN+ACK packet receive (A-syn EQE) */
+ NVMETCP_EVENT_TYPE_ASYN_MAX_RT_TIME, /* TCP max retransmit time (A-syn EQE) */
+ NVMETCP_EVENT_TYPE_ASYN_MAX_RT_CNT, /* TCP max retransmit count (A-syn EQE) */
+ NVMETCP_EVENT_TYPE_ASYN_MAX_KA_PROBES_CNT, /* TCP ka probes count (A-syn EQE) */
+ NVMETCP_EVENT_TYPE_ASYN_FIN_WAIT2, /* TCP fin wait 2 (A-syn EQE) */
+ NVMETCP_EVENT_TYPE_NVMETCP_CONN_ERROR, /* NVMeTCP error response (A-syn EQE) */
+ NVMETCP_EVENT_TYPE_TCP_CONN_ERROR, /* NVMeTCP error - tcp error (A-syn EQE) */
+ MAX_NVMETCP_EQE_OPCODE
+};
+
+struct nvmetcp_conn_offload_section {
+ struct regpair cccid_itid_table_addr; /* CCCID to iTID table address */
+ __le16 cccid_max_range; /* CCCID max value - used for validation */
+ __le16 reserved[3];
+};
+
+/* NVMe TCP connection offload params passed by driver to FW in NVMeTCP offload ramrod */
+struct nvmetcp_conn_offload_params {
+ struct regpair sq_pbl_addr;
+ struct regpair r2tq_pbl_addr;
+ struct regpair xhq_pbl_addr;
+ struct regpair uhq_pbl_addr;
+ __le16 physical_q0;
+ __le16 physical_q1;
+ u8 flags;
+#define NVMETCP_CONN_OFFLOAD_PARAMS_TCP_ON_CHIP_1B_MASK 0x1
+#define NVMETCP_CONN_OFFLOAD_PARAMS_TCP_ON_CHIP_1B_SHIFT 0
+#define NVMETCP_CONN_OFFLOAD_PARAMS_TARGET_MODE_MASK 0x1
+#define NVMETCP_CONN_OFFLOAD_PARAMS_TARGET_MODE_SHIFT 1
+#define NVMETCP_CONN_OFFLOAD_PARAMS_RESTRICTED_MODE_MASK 0x1
+#define NVMETCP_CONN_OFFLOAD_PARAMS_RESTRICTED_MODE_SHIFT 2
+#define NVMETCP_CONN_OFFLOAD_PARAMS_NVMETCP_MODE_MASK 0x1
+#define NVMETCP_CONN_OFFLOAD_PARAMS_NVMETCP_MODE_SHIFT 3
+#define NVMETCP_CONN_OFFLOAD_PARAMS_RESERVED1_MASK 0xF
+#define NVMETCP_CONN_OFFLOAD_PARAMS_RESERVED1_SHIFT 4
+ u8 default_cq;
+ __le16 reserved0;
+ __le32 reserved1;
+ __le32 initial_ack;
+
+ struct nvmetcp_conn_offload_section nvmetcp; /* NVMe/TCP section */
+};
+
+/* NVMe TCP and TCP connection offload params passed by driver to FW in NVMeTCP offload ramrod. */
+struct nvmetcp_spe_conn_offload {
+ __le16 reserved;
+ __le16 conn_id;
+ __le32 fw_cid;
+ struct nvmetcp_conn_offload_params nvmetcp;
+ struct tcp_offload_params_opt2 tcp;
+};
+
+/* NVMeTCP connection update params passed by driver to FW in NVMETCP update ramrod. */
+struct nvmetcp_conn_update_ramrod_params {
+ __le16 reserved0;
+ __le16 conn_id;
+ __le32 reserved1;
+ u8 flags;
+#define NVMETCP_CONN_UPDATE_RAMROD_PARAMS_HD_EN_MASK 0x1
+#define NVMETCP_CONN_UPDATE_RAMROD_PARAMS_HD_EN_SHIFT 0
+#define NVMETCP_CONN_UPDATE_RAMROD_PARAMS_DD_EN_MASK 0x1
+#define NVMETCP_CONN_UPDATE_RAMROD_PARAMS_DD_EN_SHIFT 1
+#define NVMETCP_CONN_UPDATE_RAMROD_PARAMS_RESERVED0_MASK 0x1
+#define NVMETCP_CONN_UPDATE_RAMROD_PARAMS_RESERVED0_SHIFT 2
+#define NVMETCP_CONN_UPDATE_RAMROD_PARAMS_RESERVED1_MASK 0x1
+#define NVMETCP_CONN_UPDATE_RAMROD_PARAMS_RESERVED1_DATA_SHIFT 3
+#define NVMETCP_CONN_UPDATE_RAMROD_PARAMS_RESERVED2_MASK 0x1
+#define NVMETCP_CONN_UPDATE_RAMROD_PARAMS_RESERVED2_SHIFT 4
+#define NVMETCP_CONN_UPDATE_RAMROD_PARAMS_RESERVED3_MASK 0x1
+#define NVMETCP_CONN_UPDATE_RAMROD_PARAMS_RESERVED3_SHIFT 5
+#define NVMETCP_CONN_UPDATE_RAMROD_PARAMS_RESERVED4_MASK 0x1
+#define NVMETCP_CONN_UPDATE_RAMROD_PARAMS_RESERVED4_SHIFT 6
+#define NVMETCP_CONN_UPDATE_RAMROD_PARAMS_RESERVED5_MASK 0x1
+#define NVMETCP_CONN_UPDATE_RAMROD_PARAMS_RESERVED5_SHIFT 7
+ u8 reserved3[3];
+ __le32 max_seq_size;
+ __le32 max_send_pdu_length;
+ __le32 max_recv_pdu_length;
+ __le32 first_seq_length;
+ __le32 reserved4[5];
+};
+
+/* NVMeTCP connection termination request */
+struct nvmetcp_spe_conn_termination {
+ __le16 reserved0;
+ __le16 conn_id;
+ __le32 reserved1;
+ u8 abortive;
+ u8 reserved2[7];
+ struct regpair reserved3;
+ struct regpair reserved4;
+};
+
+struct nvmetcp_dif_flags {
+ u8 flags;
+};
+
+enum nvmetcp_wqe_type {
+ NVMETCP_WQE_TYPE_NORMAL,
+ NVMETCP_WQE_TYPE_TASK_CLEANUP,
+ NVMETCP_WQE_TYPE_MIDDLE_PATH,
+ NVMETCP_WQE_TYPE_IC,
+ MAX_NVMETCP_WQE_TYPE
+};
+
+struct nvmetcp_wqe {
+ __le16 task_id;
+ u8 flags;
+#define NVMETCP_WQE_WQE_TYPE_MASK 0x7 /* [use nvmetcp_wqe_type] */
+#define NVMETCP_WQE_WQE_TYPE_SHIFT 0
+#define NVMETCP_WQE_NUM_SGES_MASK 0xF
+#define NVMETCP_WQE_NUM_SGES_SHIFT 3
+#define NVMETCP_WQE_RESPONSE_MASK 0x1
+#define NVMETCP_WQE_RESPONSE_SHIFT 7
+ struct nvmetcp_dif_flags prot_flags;
+ __le32 contlen_cdbsize;
+#define NVMETCP_WQE_CONT_LEN_MASK 0xFFFFFF
+#define NVMETCP_WQE_CONT_LEN_SHIFT 0
+#define NVMETCP_WQE_CDB_SIZE_OR_NVMETCP_CMD_MASK 0xFF
+#define NVMETCP_WQE_CDB_SIZE_OR_NVMETCP_CMD_SHIFT 24
+};
+
+struct nvmetcp_host_cccid_itid_entry {
+ __le16 itid;
+};
+
+struct nvmetcp_connect_done_results {
+ __le16 icid;
+ __le16 conn_id;
+ struct tcp_ulp_connect_done_params params;
+};
+
+struct nvmetcp_eqe_data {
+ __le16 icid;
+ __le16 conn_id;
+ __le16 reserved;
+ u8 error_code;
+ u8 error_pdu_opcode_reserved;
+#define NVMETCP_EQE_DATA_ERROR_PDU_OPCODE_MASK 0x3F
+#define NVMETCP_EQE_DATA_ERROR_PDU_OPCODE_SHIFT 0
+#define NVMETCP_EQE_DATA_ERROR_PDU_OPCODE_VALID_MASK 0x1
+#define NVMETCP_EQE_DATA_ERROR_PDU_OPCODE_VALID_SHIFT 6
+#define NVMETCP_EQE_DATA_RESERVED0_MASK 0x1
+#define NVMETCP_EQE_DATA_RESERVED0_SHIFT 7
+};
+
+enum nvmetcp_task_type {
+ NVMETCP_TASK_TYPE_HOST_WRITE,
+ NVMETCP_TASK_TYPE_HOST_READ,
+ NVMETCP_TASK_TYPE_INIT_CONN_REQUEST,
+ NVMETCP_TASK_TYPE_RESERVED0,
+ NVMETCP_TASK_TYPE_CLEANUP,
+ NVMETCP_TASK_TYPE_HOST_READ_NO_CQE,
+ MAX_NVMETCP_TASK_TYPE
+};
+
+struct nvmetcp_db_data {
+ u8 params;
+#define NVMETCP_DB_DATA_DEST_MASK 0x3 /* destination of doorbell (use enum db_dest) */
+#define NVMETCP_DB_DATA_DEST_SHIFT 0
+#define NVMETCP_DB_DATA_AGG_CMD_MASK 0x3 /* aggregative command to CM (use enum db_agg_cmd_sel) */
+#define NVMETCP_DB_DATA_AGG_CMD_SHIFT 2
+#define NVMETCP_DB_DATA_BYPASS_EN_MASK 0x1 /* enable QM bypass */
+#define NVMETCP_DB_DATA_BYPASS_EN_SHIFT 4
+#define NVMETCP_DB_DATA_RESERVED_MASK 0x1
+#define NVMETCP_DB_DATA_RESERVED_SHIFT 5
+#define NVMETCP_DB_DATA_AGG_VAL_SEL_MASK 0x3 /* aggregative value selection */
+#define NVMETCP_DB_DATA_AGG_VAL_SEL_SHIFT 6
+ u8 agg_flags; /* bit for every DQ counter flags in CM context that DQ can increment */
+ __le16 sq_prod;
+};
+
+struct nvmetcp_fw_nvmf_cqe {
+ __le32 reserved[4];
+};
+
+struct nvmetcp_icresp_mdata {
+ u8 digest;
+ u8 cpda;
+ __le16 pfv;
+ __le32 maxdata;
+ __le16 rsvd[4];
+};
+
+union nvmetcp_fw_cqe_data {
+ struct nvmetcp_fw_nvmf_cqe nvme_cqe;
+ struct nvmetcp_icresp_mdata icresp_mdata;
+};
+
+struct nvmetcp_fw_cqe {
+ __le16 conn_id;
+ u8 cqe_type;
+ u8 cqe_error_status_bits;
+#define CQE_ERROR_BITMAP_DIF_ERR_BITS_MASK 0x7
+#define CQE_ERROR_BITMAP_DIF_ERR_BITS_SHIFT 0
+#define CQE_ERROR_BITMAP_DATA_DIGEST_ERR_MASK 0x1
+#define CQE_ERROR_BITMAP_DATA_DIGEST_ERR_SHIFT 3
+#define CQE_ERROR_BITMAP_RCV_ON_INVALID_CONN_MASK 0x1
+#define CQE_ERROR_BITMAP_RCV_ON_INVALID_CONN_SHIFT 4
+ __le16 itid;
+ u8 task_type;
+ u8 fw_dbg_field;
+ u8 caused_conn_err;
+ u8 reserved0[3];
+ __le32 reserved1;
+ union nvmetcp_fw_cqe_data cqe_data;
+ struct regpair task_opaque;
+ __le32 reserved[6];
+};
+
+enum nvmetcp_fw_cqes_type {
+ NVMETCP_FW_CQE_TYPE_NORMAL = 1,
+ NVMETCP_FW_CQE_TYPE_RESERVED0,
+ NVMETCP_FW_CQE_TYPE_RESERVED1,
+ NVMETCP_FW_CQE_TYPE_CLEANUP,
+ NVMETCP_FW_CQE_TYPE_DUMMY,
+ MAX_NVMETCP_FW_CQES_TYPE
+};
+
+struct ystorm_nvmetcp_task_state {
+ struct scsi_cached_sges data_desc;
+ struct scsi_sgl_params sgl_params;
+ __le32 resrved0;
+ __le32 buffer_offset;
+ __le16 cccid;
+ struct nvmetcp_dif_flags dif_flags;
+ u8 flags;
+#define YSTORM_NVMETCP_TASK_STATE_LOCAL_COMP_MASK 0x1
+#define YSTORM_NVMETCP_TASK_STATE_LOCAL_COMP_SHIFT 0
+#define YSTORM_NVMETCP_TASK_STATE_SLOW_IO_MASK 0x1
+#define YSTORM_NVMETCP_TASK_STATE_SLOW_IO_SHIFT 1
+#define YSTORM_NVMETCP_TASK_STATE_SET_DIF_OFFSET_MASK 0x1
+#define YSTORM_NVMETCP_TASK_STATE_SET_DIF_OFFSET_SHIFT 2
+#define YSTORM_NVMETCP_TASK_STATE_SEND_W_RSP_MASK 0x1
+#define YSTORM_NVMETCP_TASK_STATE_SEND_W_RSP_SHIFT 3
+};
+
+struct ystorm_nvmetcp_task_rxmit_opt {
+ __le32 reserved[4];
+};
+
+struct nvmetcp_task_hdr {
+ __le32 reg[18];
+};
+
+struct nvmetcp_task_hdr_aligned {
+ struct nvmetcp_task_hdr task_hdr;
+ __le32 reserved[2]; /* HSI_COMMENT: Align to QREG */
+};
+
+struct e5_tdif_task_context {
+ __le32 reserved[16];
+};
+
+struct e5_rdif_task_context {
+ __le32 reserved[12];
+};
+
+struct ystorm_nvmetcp_task_st_ctx {
+ struct ystorm_nvmetcp_task_state state;
+ struct ystorm_nvmetcp_task_rxmit_opt rxmit_opt;
+ struct nvmetcp_task_hdr_aligned pdu_hdr;
+};
+
+struct mstorm_nvmetcp_task_st_ctx {
+ struct scsi_cached_sges data_desc;
+ struct scsi_sgl_params sgl_params;
+ __le32 rem_task_size;
+ __le32 data_buffer_offset;
+ u8 task_type;
+ struct nvmetcp_dif_flags dif_flags;
+ __le16 dif_task_icid;
+ struct regpair reserved0;
+ __le32 expected_itt;
+ __le32 reserved1;
+};
+
+struct ustorm_nvmetcp_task_st_ctx {
+ __le32 rem_rcv_len;
+ __le32 exp_data_transfer_len;
+ __le32 exp_data_sn;
+ struct regpair reserved0;
+ __le32 reg1_map;
+#define REG1_NUM_SGES_MASK 0xF
+#define REG1_NUM_SGES_SHIFT 0
+#define REG1_RESERVED1_MASK 0xFFFFFFF
+#define REG1_RESERVED1_SHIFT 4
+ u8 flags2;
+#define USTORM_NVMETCP_TASK_ST_CTX_AHS_EXIST_MASK 0x1
+#define USTORM_NVMETCP_TASK_ST_CTX_AHS_EXIST_SHIFT 0
+#define USTORM_NVMETCP_TASK_ST_CTX_RESERVED1_MASK 0x7F
+#define USTORM_NVMETCP_TASK_ST_CTX_RESERVED1_SHIFT 1
+ struct nvmetcp_dif_flags dif_flags;
+ __le16 reserved3;
+ __le16 tqe_opaque[2];
+ __le32 reserved5;
+ __le32 nvme_tcp_opaque_lo;
+ __le32 nvme_tcp_opaque_hi;
+ u8 task_type;
+ u8 error_flags;
+#define USTORM_NVMETCP_TASK_ST_CTX_DATA_DIGEST_ERROR_MASK 0x1
+#define USTORM_NVMETCP_TASK_ST_CTX_DATA_DIGEST_ERROR_SHIFT 0
+#define USTORM_NVMETCP_TASK_ST_CTX_DATA_TRUNCATED_ERROR_MASK 0x1
+#define USTORM_NVMETCP_TASK_ST_CTX_DATA_TRUNCATED_ERROR_SHIFT 1
+#define USTORM_NVMETCP_TASK_ST_CTX_UNDER_RUN_ERROR_MASK 0x1
+#define USTORM_NVMETCP_TASK_ST_CTX_UNDER_RUN_ERROR_SHIFT 2
+#define USTORM_NVMETCP_TASK_ST_CTX_NVME_TCP_MASK 0x1
+#define USTORM_NVMETCP_TASK_ST_CTX_NVME_TCP_SHIFT 3
+ u8 flags;
+#define USTORM_NVMETCP_TASK_ST_CTX_CQE_WRITE_MASK 0x3
+#define USTORM_NVMETCP_TASK_ST_CTX_CQE_WRITE_SHIFT 0
+#define USTORM_NVMETCP_TASK_ST_CTX_LOCAL_COMP_MASK 0x1
+#define USTORM_NVMETCP_TASK_ST_CTX_LOCAL_COMP_SHIFT 2
+#define USTORM_NVMETCP_TASK_ST_CTX_Q0_R2TQE_WRITE_MASK 0x1
+#define USTORM_NVMETCP_TASK_ST_CTX_Q0_R2TQE_WRITE_SHIFT 3
+#define USTORM_NVMETCP_TASK_ST_CTX_TOTAL_DATA_ACKED_DONE_MASK 0x1
+#define USTORM_NVMETCP_TASK_ST_CTX_TOTAL_DATA_ACKED_DONE_SHIFT 4
+#define USTORM_NVMETCP_TASK_ST_CTX_HQ_SCANNED_DONE_MASK 0x1
+#define USTORM_NVMETCP_TASK_ST_CTX_HQ_SCANNED_DONE_SHIFT 5
+#define USTORM_NVMETCP_TASK_ST_CTX_R2T2RECV_DONE_MASK 0x1
+#define USTORM_NVMETCP_TASK_ST_CTX_R2T2RECV_DONE_SHIFT 6
+ u8 cq_rss_number;
+};
+
+struct e5_ystorm_nvmetcp_task_ag_ctx {
+ u8 reserved /* cdu_validation */;
+ u8 byte1 /* state_and_core_id */;
+ __le16 word0 /* icid */;
+ u8 flags0;
+ u8 flags1;
+ u8 flags2;
+ u8 flags3;
+ __le32 TTT;
+ u8 byte2;
+ u8 byte3;
+ u8 byte4;
+ u8 reserved7;
+};
+
+struct e5_mstorm_nvmetcp_task_ag_ctx {
+ u8 cdu_validation;
+ u8 byte1;
+ __le16 task_cid;
+ u8 flags0;
+#define E5_MSTORM_NVMETCP_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF
+#define E5_MSTORM_NVMETCP_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0
+#define E5_MSTORM_NVMETCP_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define E5_MSTORM_NVMETCP_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4
+#define E5_MSTORM_NVMETCP_TASK_AG_CTX_CONN_CLEAR_SQ_FLAG_MASK 0x1
+#define E5_MSTORM_NVMETCP_TASK_AG_CTX_CONN_CLEAR_SQ_FLAG_SHIFT 5
+#define E5_MSTORM_NVMETCP_TASK_AG_CTX_VALID_MASK 0x1
+#define E5_MSTORM_NVMETCP_TASK_AG_CTX_VALID_SHIFT 6
+#define E5_MSTORM_NVMETCP_TASK_AG_CTX_TASK_CLEANUP_FLAG_MASK 0x1
+#define E5_MSTORM_NVMETCP_TASK_AG_CTX_TASK_CLEANUP_FLAG_SHIFT 7
+ u8 flags1;
+#define E5_MSTORM_NVMETCP_TASK_AG_CTX_TASK_CLEANUP_CF_MASK 0x3
+#define E5_MSTORM_NVMETCP_TASK_AG_CTX_TASK_CLEANUP_CF_SHIFT 0
+#define E5_MSTORM_NVMETCP_TASK_AG_CTX_CF1_MASK 0x3
+#define E5_MSTORM_NVMETCP_TASK_AG_CTX_CF1_SHIFT 2
+#define E5_MSTORM_NVMETCP_TASK_AG_CTX_CF2_MASK 0x3
+#define E5_MSTORM_NVMETCP_TASK_AG_CTX_CF2_SHIFT 4
+#define E5_MSTORM_NVMETCP_TASK_AG_CTX_TASK_CLEANUP_CF_EN_MASK 0x1
+#define E5_MSTORM_NVMETCP_TASK_AG_CTX_TASK_CLEANUP_CF_EN_SHIFT 6
+#define E5_MSTORM_NVMETCP_TASK_AG_CTX_CF1EN_MASK 0x1
+#define E5_MSTORM_NVMETCP_TASK_AG_CTX_CF1EN_SHIFT 7
+ u8 flags2;
+ u8 flags3;
+ __le32 reg0;
+ u8 byte2;
+ u8 byte3;
+ u8 byte4;
+ u8 reserved7;
+};
+
+struct e5_ustorm_nvmetcp_task_ag_ctx {
+ u8 reserved;
+ u8 state_and_core_id;
+ __le16 icid;
+ u8 flags0;
+#define E5_USTORM_NVMETCP_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF
+#define E5_USTORM_NVMETCP_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0
+#define E5_USTORM_NVMETCP_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define E5_USTORM_NVMETCP_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4
+#define E5_USTORM_NVMETCP_TASK_AG_CTX_CONN_CLEAR_SQ_FLAG_MASK 0x1
+#define E5_USTORM_NVMETCP_TASK_AG_CTX_CONN_CLEAR_SQ_FLAG_SHIFT 5
+#define E5_USTORM_NVMETCP_TASK_AG_CTX_HQ_SCANNED_CF_MASK 0x3
+#define E5_USTORM_NVMETCP_TASK_AG_CTX_HQ_SCANNED_CF_SHIFT 6
+ u8 flags1;
+#define E5_USTORM_NVMETCP_TASK_AG_CTX_RESERVED1_MASK 0x3
+#define E5_USTORM_NVMETCP_TASK_AG_CTX_RESERVED1_SHIFT 0
+#define E5_USTORM_NVMETCP_TASK_AG_CTX_R2T2RECV_MASK 0x3
+#define E5_USTORM_NVMETCP_TASK_AG_CTX_R2T2RECV_SHIFT 2
+#define E5_USTORM_NVMETCP_TASK_AG_CTX_CF3_MASK 0x3
+#define E5_USTORM_NVMETCP_TASK_AG_CTX_CF3_SHIFT 4
+#define E5_USTORM_NVMETCP_TASK_AG_CTX_DIF_ERROR_CF_MASK 0x3
+#define E5_USTORM_NVMETCP_TASK_AG_CTX_DIF_ERROR_CF_SHIFT 6
+ u8 flags2;
+#define E5_USTORM_NVMETCP_TASK_AG_CTX_HQ_SCANNED_CF_EN_MASK 0x1
+#define E5_USTORM_NVMETCP_TASK_AG_CTX_HQ_SCANNED_CF_EN_SHIFT 0
+#define E5_USTORM_NVMETCP_TASK_AG_CTX_DISABLE_DATA_ACKED_MASK 0x1
+#define E5_USTORM_NVMETCP_TASK_AG_CTX_DISABLE_DATA_ACKED_SHIFT 1
+#define E5_USTORM_NVMETCP_TASK_AG_CTX_R2T2RECV_EN_MASK 0x1
+#define E5_USTORM_NVMETCP_TASK_AG_CTX_R2T2RECV_EN_SHIFT 2
+#define E5_USTORM_NVMETCP_TASK_AG_CTX_CF3EN_MASK 0x1
+#define E5_USTORM_NVMETCP_TASK_AG_CTX_CF3EN_SHIFT 3
+#define E5_USTORM_NVMETCP_TASK_AG_CTX_DIF_ERROR_CF_EN_MASK 0x1
+#define E5_USTORM_NVMETCP_TASK_AG_CTX_DIF_ERROR_CF_EN_SHIFT 4
+#define E5_USTORM_NVMETCP_TASK_AG_CTX_CMP_DATA_TOTAL_EXP_EN_MASK 0x1
+#define E5_USTORM_NVMETCP_TASK_AG_CTX_CMP_DATA_TOTAL_EXP_EN_SHIFT 5
+#define E5_USTORM_NVMETCP_TASK_AG_CTX_RULE1EN_MASK 0x1
+#define E5_USTORM_NVMETCP_TASK_AG_CTX_RULE1EN_SHIFT 6
+#define E5_USTORM_NVMETCP_TASK_AG_CTX_CMP_CONT_RCV_EXP_EN_MASK 0x1
+#define E5_USTORM_NVMETCP_TASK_AG_CTX_CMP_CONT_RCV_EXP_EN_SHIFT 7
+ u8 flags3;
+ u8 flags4;
+#define E5_USTORM_NVMETCP_TASK_AG_CTX_RESERVED5_MASK 0x3
+#define E5_USTORM_NVMETCP_TASK_AG_CTX_RESERVED5_SHIFT 0
+#define E5_USTORM_NVMETCP_TASK_AG_CTX_RESERVED6_MASK 0x1
+#define E5_USTORM_NVMETCP_TASK_AG_CTX_RESERVED6_SHIFT 2
+#define E5_USTORM_NVMETCP_TASK_AG_CTX_RESERVED7_MASK 0x1
+#define E5_USTORM_NVMETCP_TASK_AG_CTX_RESERVED7_SHIFT 3
+#define E5_USTORM_NVMETCP_TASK_AG_CTX_DIF_ERROR_TYPE_MASK 0xF
+#define E5_USTORM_NVMETCP_TASK_AG_CTX_DIF_ERROR_TYPE_SHIFT 4
+ u8 byte2;
+ u8 byte3;
+ u8 reserved8;
+ __le32 dif_err_intervals;
+ __le32 dif_error_1st_interval;
+ __le32 rcv_cont_len;
+ __le32 exp_cont_len;
+ __le32 total_data_acked;
+ __le32 exp_data_acked;
+ __le16 word1;
+ __le16 next_tid;
+ __le32 hdr_residual_count;
+ __le32 exp_r2t_sn;
+};
+
+struct e5_nvmetcp_task_context {
+ struct ystorm_nvmetcp_task_st_ctx ystorm_st_context;
+ struct e5_ystorm_nvmetcp_task_ag_ctx ystorm_ag_context;
+ struct regpair ystorm_ag_padding[2];
+ struct e5_tdif_task_context tdif_context;
+ struct e5_mstorm_nvmetcp_task_ag_ctx mstorm_ag_context;
+ struct regpair mstorm_ag_padding[2];
+ struct e5_ustorm_nvmetcp_task_ag_ctx ustorm_ag_context;
+ struct regpair ustorm_ag_padding[2];
+ struct mstorm_nvmetcp_task_st_ctx mstorm_st_context;
+ struct regpair mstorm_st_padding[2];
+ struct ustorm_nvmetcp_task_st_ctx ustorm_st_context;
+ struct regpair ustorm_st_padding[2];
+ struct e5_rdif_task_context rdif_context;
+};
+
+#endif /* __NVMETCP_COMMON__*/
diff --git a/include/linux/qed/qed_chain.h b/include/linux/qed/qed_chain.h
index 59ddf9af909e..a84063492c71 100644
--- a/include/linux/qed/qed_chain.h
+++ b/include/linux/qed/qed_chain.h
@@ -1,33 +1,7 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
/* QLogic qed NIC Driver
* Copyright (c) 2015-2017 QLogic Corporation
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and /or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * Copyright (c) 2019-2020 Marvell International Ltd.
*/
#ifndef _QED_CHAIN_H
@@ -37,6 +11,7 @@
#include <asm/byteorder.h>
#include <linux/kernel.h>
#include <linux/list.h>
+#include <linux/sizes.h>
#include <linux/slab.h>
#include <linux/qed/common_hsi.h>
@@ -44,7 +19,7 @@ enum qed_chain_mode {
/* Each Page contains a next pointer at its end */
QED_CHAIN_MODE_NEXT_PTR,
- /* Chain is a single page (next ptr) is unrequired */
+ /* Chain is a single page (next ptr) is not required */
QED_CHAIN_MODE_SINGLE,
/* Page pointers are located in a side list */
@@ -52,9 +27,9 @@ enum qed_chain_mode {
};
enum qed_chain_use_mode {
- QED_CHAIN_USE_TO_PRODUCE, /* Chain starts empty */
- QED_CHAIN_USE_TO_CONSUME, /* Chain starts full */
- QED_CHAIN_USE_TO_CONSUME_PRODUCE, /* Chain starts empty */
+ QED_CHAIN_USE_TO_PRODUCE, /* Chain starts empty */
+ QED_CHAIN_USE_TO_CONSUME, /* Chain starts full */
+ QED_CHAIN_USE_TO_CONSUME_PRODUCE, /* Chain starts empty */
};
enum qed_chain_cnt_type {
@@ -66,196 +41,242 @@ enum qed_chain_cnt_type {
};
struct qed_chain_next {
- struct regpair next_phys;
- void *next_virt;
+ struct regpair next_phys;
+ void *next_virt;
};
struct qed_chain_pbl_u16 {
- u16 prod_page_idx;
- u16 cons_page_idx;
+ u16 prod_page_idx;
+ u16 cons_page_idx;
};
struct qed_chain_pbl_u32 {
- u32 prod_page_idx;
- u32 cons_page_idx;
-};
-
-struct qed_chain_ext_pbl {
- dma_addr_t p_pbl_phys;
- void *p_pbl_virt;
+ u32 prod_page_idx;
+ u32 cons_page_idx;
};
struct qed_chain_u16 {
- /* Cyclic index of next element to produce/consme */
- u16 prod_idx;
- u16 cons_idx;
+ /* Cyclic index of next element to produce/consume */
+ u16 prod_idx;
+ u16 cons_idx;
};
struct qed_chain_u32 {
- /* Cyclic index of next element to produce/consme */
- u32 prod_idx;
- u32 cons_idx;
+ /* Cyclic index of next element to produce/consume */
+ u32 prod_idx;
+ u32 cons_idx;
+};
+
+struct addr_tbl_entry {
+ void *virt_addr;
+ dma_addr_t dma_map;
};
struct qed_chain {
- /* fastpath portion of the chain - required for commands such
+ /* Fastpath portion of the chain - required for commands such
* as produce / consume.
*/
+
/* Point to next element to produce/consume */
- void *p_prod_elem;
- void *p_cons_elem;
+ void *p_prod_elem;
+ void *p_cons_elem;
/* Fastpath portions of the PBL [if exists] */
+
struct {
- /* Table for keeping the virtual addresses of the chain pages,
- * respectively to the physical addresses in the pbl table.
+ /* Table for keeping the virtual and physical addresses of the
+ * chain pages, respectively to the physical addresses
+ * in the pbl table.
*/
- void **pp_virt_addr_tbl;
+ struct addr_tbl_entry *pp_addr_tbl;
union {
- struct qed_chain_pbl_u16 u16;
- struct qed_chain_pbl_u32 u32;
- } c;
- } pbl;
+ struct qed_chain_pbl_u16 u16;
+ struct qed_chain_pbl_u32 u32;
+ } c;
+ } pbl;
union {
- struct qed_chain_u16 chain16;
- struct qed_chain_u32 chain32;
- } u;
+ struct qed_chain_u16 chain16;
+ struct qed_chain_u32 chain32;
+ } u;
/* Capacity counts only usable elements */
- u32 capacity;
- u32 page_cnt;
+ u32 capacity;
+ u32 page_cnt;
- enum qed_chain_mode mode;
+ enum qed_chain_mode mode;
/* Elements information for fast calculations */
- u16 elem_per_page;
- u16 elem_per_page_mask;
- u16 elem_size;
- u16 next_page_mask;
- u16 usable_per_page;
- u8 elem_unusable;
+ u16 elem_per_page;
+ u16 elem_per_page_mask;
+ u16 elem_size;
+ u16 next_page_mask;
+ u16 usable_per_page;
+ u8 elem_unusable;
- u8 cnt_type;
+ enum qed_chain_cnt_type cnt_type;
/* Slowpath of the chain - required for initialization and destruction,
* but isn't involved in regular functionality.
*/
+ u32 page_size;
+
/* Base address of a pre-allocated buffer for pbl */
struct {
- dma_addr_t p_phys_table;
- void *p_virt_table;
- } pbl_sp;
+ __le64 *table_virt;
+ dma_addr_t table_phys;
+ size_t table_size;
+ } pbl_sp;
/* Address of first page of the chain - the address is required
- * for fastpath operation [consume/produce] but only for the the SINGLE
+ * for fastpath operation [consume/produce] but only for the SINGLE
* flavour which isn't considered fastpath [== SPQ].
*/
- void *p_virt_addr;
- dma_addr_t p_phys_addr;
+ void *p_virt_addr;
+ dma_addr_t p_phys_addr;
/* Total number of elements [for entire chain] */
- u32 size;
+ u32 size;
+
+ enum qed_chain_use_mode intended_use;
+
+ bool b_external_pbl;
+};
- u8 intended_use;
+struct qed_chain_init_params {
+ enum qed_chain_mode mode;
+ enum qed_chain_use_mode intended_use;
+ enum qed_chain_cnt_type cnt_type;
- bool b_external_pbl;
+ u32 page_size;
+ u32 num_elems;
+ size_t elem_size;
+
+ void *ext_pbl_virt;
+ dma_addr_t ext_pbl_phys;
};
-#define QED_CHAIN_PBL_ENTRY_SIZE (8)
-#define QED_CHAIN_PAGE_SIZE (0x1000)
-#define ELEMS_PER_PAGE(elem_size) (QED_CHAIN_PAGE_SIZE / (elem_size))
+#define QED_CHAIN_PAGE_SIZE SZ_4K
+
+#define ELEMS_PER_PAGE(elem_size, page_size) \
+ ((page_size) / (elem_size))
-#define UNUSABLE_ELEMS_PER_PAGE(elem_size, mode) \
- (((mode) == QED_CHAIN_MODE_NEXT_PTR) ? \
- (u8)(1 + ((sizeof(struct qed_chain_next) - 1) / \
- (elem_size))) : 0)
+#define UNUSABLE_ELEMS_PER_PAGE(elem_size, mode) \
+ (((mode) == QED_CHAIN_MODE_NEXT_PTR) ? \
+ (u8)(1 + ((sizeof(struct qed_chain_next) - 1) / (elem_size))) : \
+ 0)
-#define USABLE_ELEMS_PER_PAGE(elem_size, mode) \
- ((u32)(ELEMS_PER_PAGE(elem_size) - \
- UNUSABLE_ELEMS_PER_PAGE(elem_size, mode)))
+#define USABLE_ELEMS_PER_PAGE(elem_size, page_size, mode) \
+ ((u32)(ELEMS_PER_PAGE((elem_size), (page_size)) - \
+ UNUSABLE_ELEMS_PER_PAGE((elem_size), (mode))))
-#define QED_CHAIN_PAGE_CNT(elem_cnt, elem_size, mode) \
- DIV_ROUND_UP(elem_cnt, USABLE_ELEMS_PER_PAGE(elem_size, mode))
+#define QED_CHAIN_PAGE_CNT(elem_cnt, elem_size, page_size, mode) \
+ DIV_ROUND_UP((elem_cnt), \
+ USABLE_ELEMS_PER_PAGE((elem_size), (page_size), (mode)))
-#define is_chain_u16(p) ((p)->cnt_type == QED_CHAIN_CNT_TYPE_U16)
-#define is_chain_u32(p) ((p)->cnt_type == QED_CHAIN_CNT_TYPE_U32)
+#define is_chain_u16(p) \
+ ((p)->cnt_type == QED_CHAIN_CNT_TYPE_U16)
+#define is_chain_u32(p) \
+ ((p)->cnt_type == QED_CHAIN_CNT_TYPE_U32)
/* Accessors */
-static inline u16 qed_chain_get_prod_idx(struct qed_chain *p_chain)
+
+static inline u16 qed_chain_get_prod_idx(const struct qed_chain *chain)
+{
+ return chain->u.chain16.prod_idx;
+}
+
+static inline u16 qed_chain_get_cons_idx(const struct qed_chain *chain)
{
- return p_chain->u.chain16.prod_idx;
+ return chain->u.chain16.cons_idx;
}
-static inline u16 qed_chain_get_cons_idx(struct qed_chain *p_chain)
+static inline u32 qed_chain_get_prod_idx_u32(const struct qed_chain *chain)
{
- return p_chain->u.chain16.cons_idx;
+ return chain->u.chain32.prod_idx;
}
-static inline u32 qed_chain_get_cons_idx_u32(struct qed_chain *p_chain)
+static inline u32 qed_chain_get_cons_idx_u32(const struct qed_chain *chain)
{
- return p_chain->u.chain32.cons_idx;
+ return chain->u.chain32.cons_idx;
}
-static inline u16 qed_chain_get_elem_left(struct qed_chain *p_chain)
+static inline u16 qed_chain_get_elem_used(const struct qed_chain *chain)
{
+ u32 prod = qed_chain_get_prod_idx(chain);
+ u32 cons = qed_chain_get_cons_idx(chain);
+ u16 elem_per_page = chain->elem_per_page;
u16 used;
- used = (u16) (((u32)0x10000 +
- (u32)p_chain->u.chain16.prod_idx) -
- (u32)p_chain->u.chain16.cons_idx);
- if (p_chain->mode == QED_CHAIN_MODE_NEXT_PTR)
- used -= p_chain->u.chain16.prod_idx / p_chain->elem_per_page -
- p_chain->u.chain16.cons_idx / p_chain->elem_per_page;
+ if (prod < cons)
+ prod += (u32)U16_MAX + 1;
+
+ used = (u16)(prod - cons);
+ if (chain->mode == QED_CHAIN_MODE_NEXT_PTR)
+ used -= (u16)(prod / elem_per_page - cons / elem_per_page);
- return (u16)(p_chain->capacity - used);
+ return used;
+}
+
+static inline u16 qed_chain_get_elem_left(const struct qed_chain *chain)
+{
+ return (u16)(chain->capacity - qed_chain_get_elem_used(chain));
}
-static inline u32 qed_chain_get_elem_left_u32(struct qed_chain *p_chain)
+static inline u32 qed_chain_get_elem_used_u32(const struct qed_chain *chain)
{
+ u64 prod = qed_chain_get_prod_idx_u32(chain);
+ u64 cons = qed_chain_get_cons_idx_u32(chain);
+ u16 elem_per_page = chain->elem_per_page;
u32 used;
- used = (u32) (((u64)0x100000000ULL +
- (u64)p_chain->u.chain32.prod_idx) -
- (u64)p_chain->u.chain32.cons_idx);
- if (p_chain->mode == QED_CHAIN_MODE_NEXT_PTR)
- used -= p_chain->u.chain32.prod_idx / p_chain->elem_per_page -
- p_chain->u.chain32.cons_idx / p_chain->elem_per_page;
+ if (prod < cons)
+ prod += (u64)U32_MAX + 1;
- return p_chain->capacity - used;
+ used = (u32)(prod - cons);
+ if (chain->mode == QED_CHAIN_MODE_NEXT_PTR)
+ used -= (u32)(prod / elem_per_page - cons / elem_per_page);
+
+ return used;
}
-static inline u16 qed_chain_get_usable_per_page(struct qed_chain *p_chain)
+static inline u32 qed_chain_get_elem_left_u32(const struct qed_chain *chain)
{
- return p_chain->usable_per_page;
+ return chain->capacity - qed_chain_get_elem_used_u32(chain);
}
-static inline u8 qed_chain_get_unusable_per_page(struct qed_chain *p_chain)
+static inline u16 qed_chain_get_usable_per_page(const struct qed_chain *chain)
{
- return p_chain->elem_unusable;
+ return chain->usable_per_page;
}
-static inline u32 qed_chain_get_page_cnt(struct qed_chain *p_chain)
+static inline u8 qed_chain_get_unusable_per_page(const struct qed_chain *chain)
{
- return p_chain->page_cnt;
+ return chain->elem_unusable;
}
-static inline dma_addr_t qed_chain_get_pbl_phys(struct qed_chain *p_chain)
+static inline u32 qed_chain_get_page_cnt(const struct qed_chain *chain)
{
- return p_chain->pbl_sp.p_phys_table;
+ return chain->page_cnt;
+}
+
+static inline dma_addr_t qed_chain_get_pbl_phys(const struct qed_chain *chain)
+{
+ return chain->pbl_sp.table_phys;
}
/**
- * @brief qed_chain_advance_page -
+ * qed_chain_advance_page(): Advance the next element across pages for a
+ * linked chain.
*
- * Advance the next element accros pages for a linked chain
+ * @p_chain: P_chain.
+ * @p_next_elem: P_next_elem.
+ * @idx_to_inc: Idx_to_inc.
+ * @page_to_inc: page_to_inc.
*
- * @param p_chain
- * @param p_next_elem
- * @param idx_to_inc
- * @param page_to_inc
+ * Return: Void.
*/
static inline void
qed_chain_advance_page(struct qed_chain *p_chain,
@@ -287,7 +308,7 @@ qed_chain_advance_page(struct qed_chain *p_chain,
*(u32 *)page_to_inc = 0;
page_index = *(u32 *)page_to_inc;
}
- *p_next_elem = p_chain->pbl.pp_virt_addr_tbl[page_index];
+ *p_next_elem = p_chain->pbl.pp_addr_tbl[page_index].virt_addr;
}
}
@@ -316,12 +337,14 @@ qed_chain_advance_page(struct qed_chain *p_chain,
} while (0)
/**
- * @brief qed_chain_return_produced -
+ * qed_chain_return_produced(): A chain in which the driver "Produces"
+ * elements should use this API
+ * to indicate previous produced elements
+ * are now consumed.
*
- * A chain in which the driver "Produces" elements should use this API
- * to indicate previous produced elements are now consumed.
+ * @p_chain: Chain.
*
- * @param p_chain
+ * Return: Void.
*/
static inline void qed_chain_return_produced(struct qed_chain *p_chain)
{
@@ -333,15 +356,15 @@ static inline void qed_chain_return_produced(struct qed_chain *p_chain)
}
/**
- * @brief qed_chain_produce -
- *
- * A chain in which the driver "Produces" elements should use this to get
- * a pointer to the next element which can be "Produced". It's driver
- * responsibility to validate that the chain has room for new element.
+ * qed_chain_produce(): A chain in which the driver "Produces"
+ * elements should use this to get a pointer to
+ * the next element which can be "Produced". It's driver
+ * responsibility to validate that the chain has room for
+ * new element.
*
- * @param p_chain
+ * @p_chain: Chain.
*
- * @return void*, a pointer to next element
+ * Return: void*, a pointer to next element.
*/
static inline void *qed_chain_produce(struct qed_chain *p_chain)
{
@@ -375,14 +398,11 @@ static inline void *qed_chain_produce(struct qed_chain *p_chain)
}
/**
- * @brief qed_chain_get_capacity -
+ * qed_chain_get_capacity(): Get the maximum number of BDs in chain
*
- * Get the maximum number of BDs in chain
+ * @p_chain: Chain.
*
- * @param p_chain
- * @param num
- *
- * @return number of unusable BDs
+ * Return: number of unusable BDs.
*/
static inline u32 qed_chain_get_capacity(struct qed_chain *p_chain)
{
@@ -390,12 +410,14 @@ static inline u32 qed_chain_get_capacity(struct qed_chain *p_chain)
}
/**
- * @brief qed_chain_recycle_consumed -
+ * qed_chain_recycle_consumed(): Returns an element which was
+ * previously consumed;
+ * Increments producers so they could
+ * be written to FW.
*
- * Returns an element which was previously consumed;
- * Increments producers so they could be written to FW.
+ * @p_chain: Chain.
*
- * @param p_chain
+ * Return: Void.
*/
static inline void qed_chain_recycle_consumed(struct qed_chain *p_chain)
{
@@ -407,14 +429,13 @@ static inline void qed_chain_recycle_consumed(struct qed_chain *p_chain)
}
/**
- * @brief qed_chain_consume -
- *
- * A Chain in which the driver utilizes data written by a different source
- * (i.e., FW) should use this to access passed buffers.
+ * qed_chain_consume(): A Chain in which the driver utilizes data written
+ * by a different source (i.e., FW) should use this to
+ * access passed buffers.
*
- * @param p_chain
+ * @p_chain: Chain.
*
- * @return void*, a pointer to the next buffer written
+ * Return: void*, a pointer to the next buffer written.
*/
static inline void *qed_chain_consume(struct qed_chain *p_chain)
{
@@ -448,9 +469,11 @@ static inline void *qed_chain_consume(struct qed_chain *p_chain)
}
/**
- * @brief qed_chain_reset - Resets the chain to its start state
+ * qed_chain_reset(): Resets the chain to its start state.
+ *
+ * @p_chain: pointer to a previously allocated chain.
*
- * @param p_chain pointer to a previously allocted chain
+ * Return Void.
*/
static inline void qed_chain_reset(struct qed_chain *p_chain)
{
@@ -499,125 +522,12 @@ static inline void qed_chain_reset(struct qed_chain *p_chain)
}
/**
- * @brief qed_chain_init - Initalizes a basic chain struct
- *
- * @param p_chain
- * @param p_virt_addr
- * @param p_phys_addr physical address of allocated buffer's beginning
- * @param page_cnt number of pages in the allocated buffer
- * @param elem_size size of each element in the chain
- * @param intended_use
- * @param mode
- */
-static inline void qed_chain_init_params(struct qed_chain *p_chain,
- u32 page_cnt,
- u8 elem_size,
- enum qed_chain_use_mode intended_use,
- enum qed_chain_mode mode,
- enum qed_chain_cnt_type cnt_type)
-{
- /* chain fixed parameters */
- p_chain->p_virt_addr = NULL;
- p_chain->p_phys_addr = 0;
- p_chain->elem_size = elem_size;
- p_chain->intended_use = (u8)intended_use;
- p_chain->mode = mode;
- p_chain->cnt_type = (u8)cnt_type;
-
- p_chain->elem_per_page = ELEMS_PER_PAGE(elem_size);
- p_chain->usable_per_page = USABLE_ELEMS_PER_PAGE(elem_size, mode);
- p_chain->elem_per_page_mask = p_chain->elem_per_page - 1;
- p_chain->elem_unusable = UNUSABLE_ELEMS_PER_PAGE(elem_size, mode);
- p_chain->next_page_mask = (p_chain->usable_per_page &
- p_chain->elem_per_page_mask);
-
- p_chain->page_cnt = page_cnt;
- p_chain->capacity = p_chain->usable_per_page * page_cnt;
- p_chain->size = p_chain->elem_per_page * page_cnt;
-
- p_chain->pbl_sp.p_phys_table = 0;
- p_chain->pbl_sp.p_virt_table = NULL;
- p_chain->pbl.pp_virt_addr_tbl = NULL;
-}
-
-/**
- * @brief qed_chain_init_mem -
- *
- * Initalizes a basic chain struct with its chain buffers
- *
- * @param p_chain
- * @param p_virt_addr virtual address of allocated buffer's beginning
- * @param p_phys_addr physical address of allocated buffer's beginning
- *
- */
-static inline void qed_chain_init_mem(struct qed_chain *p_chain,
- void *p_virt_addr, dma_addr_t p_phys_addr)
-{
- p_chain->p_virt_addr = p_virt_addr;
- p_chain->p_phys_addr = p_phys_addr;
-}
-
-/**
- * @brief qed_chain_init_pbl_mem -
- *
- * Initalizes a basic chain struct with its pbl buffers
- *
- * @param p_chain
- * @param p_virt_pbl pointer to a pre allocated side table which will hold
- * virtual page addresses.
- * @param p_phys_pbl pointer to a pre-allocated side table which will hold
- * physical page addresses.
- * @param pp_virt_addr_tbl
- * pointer to a pre-allocated side table which will hold
- * the virtual addresses of the chain pages.
- *
- */
-static inline void qed_chain_init_pbl_mem(struct qed_chain *p_chain,
- void *p_virt_pbl,
- dma_addr_t p_phys_pbl,
- void **pp_virt_addr_tbl)
-{
- p_chain->pbl_sp.p_phys_table = p_phys_pbl;
- p_chain->pbl_sp.p_virt_table = p_virt_pbl;
- p_chain->pbl.pp_virt_addr_tbl = pp_virt_addr_tbl;
-}
-
-/**
- * @brief qed_chain_init_next_ptr_elem -
- *
- * Initalizes a next pointer element
- *
- * @param p_chain
- * @param p_virt_curr virtual address of a chain page of which the next
- * pointer element is initialized
- * @param p_virt_next virtual address of the next chain page
- * @param p_phys_next physical address of the next chain page
- *
- */
-static inline void
-qed_chain_init_next_ptr_elem(struct qed_chain *p_chain,
- void *p_virt_curr,
- void *p_virt_next, dma_addr_t p_phys_next)
-{
- struct qed_chain_next *p_next;
- u32 size;
-
- size = p_chain->elem_size * p_chain->usable_per_page;
- p_next = (struct qed_chain_next *)((u8 *)p_virt_curr + size);
-
- DMA_REGPAIR_LE(p_next->next_phys, p_phys_next);
-
- p_next->next_virt = p_virt_next;
-}
-
-/**
- * @brief qed_chain_get_last_elem -
- *
- * Returns a pointer to the last element of the chain
+ * qed_chain_get_last_elem(): Returns a pointer to the last element of the
+ * chain.
*
- * @param p_chain
+ * @p_chain: Chain.
*
- * @return void*
+ * Return: void*.
*/
static inline void *qed_chain_get_last_elem(struct qed_chain *p_chain)
{
@@ -644,7 +554,7 @@ static inline void *qed_chain_get_last_elem(struct qed_chain *p_chain)
break;
case QED_CHAIN_MODE_PBL:
last_page_idx = p_chain->page_cnt - 1;
- p_virt_addr = p_chain->pbl.pp_virt_addr_tbl[last_page_idx];
+ p_virt_addr = p_chain->pbl.pp_addr_tbl[last_page_idx].virt_addr;
break;
}
/* p_virt_addr points at this stage to the last page of the chain */
@@ -655,14 +565,48 @@ out:
}
/**
- * @brief qed_chain_set_prod - sets the prod to the given value
+ * qed_chain_set_prod(): sets the prod to the given value.
*
- * @param prod_idx
- * @param p_prod_elem
+ * @p_chain: Chain.
+ * @prod_idx: Prod Idx.
+ * @p_prod_elem: Prod elem.
+ *
+ * Return Void.
*/
static inline void qed_chain_set_prod(struct qed_chain *p_chain,
u32 prod_idx, void *p_prod_elem)
{
+ if (p_chain->mode == QED_CHAIN_MODE_PBL) {
+ u32 cur_prod, page_mask, page_cnt, page_diff;
+
+ cur_prod = is_chain_u16(p_chain) ? p_chain->u.chain16.prod_idx :
+ p_chain->u.chain32.prod_idx;
+
+ /* Assume that number of elements in a page is power of 2 */
+ page_mask = ~p_chain->elem_per_page_mask;
+
+ /* Use "cur_prod - 1" and "prod_idx - 1" since producer index
+ * reaches the first element of next page before the page index
+ * is incremented. See qed_chain_produce().
+ * Index wrap around is not a problem because the difference
+ * between current and given producer indices is always
+ * positive and lower than the chain's capacity.
+ */
+ page_diff = (((cur_prod - 1) & page_mask) -
+ ((prod_idx - 1) & page_mask)) /
+ p_chain->elem_per_page;
+
+ page_cnt = qed_chain_get_page_cnt(p_chain);
+ if (is_chain_u16(p_chain))
+ p_chain->pbl.c.u16.prod_page_idx =
+ (p_chain->pbl.c.u16.prod_page_idx -
+ page_diff + page_cnt) % page_cnt;
+ else
+ p_chain->pbl.c.u32.prod_page_idx =
+ (p_chain->pbl.c.u32.prod_page_idx -
+ page_diff + page_cnt) % page_cnt;
+ }
+
if (is_chain_u16(p_chain))
p_chain->u.chain16.prod_idx = (u16) prod_idx;
else
@@ -671,9 +615,11 @@ static inline void qed_chain_set_prod(struct qed_chain *p_chain,
}
/**
- * @brief qed_chain_pbl_zero_mem - set chain memory to 0
+ * qed_chain_pbl_zero_mem(): set chain memory to 0.
+ *
+ * @p_chain: Chain.
*
- * @param p_chain
+ * Return: Void.
*/
static inline void qed_chain_pbl_zero_mem(struct qed_chain *p_chain)
{
@@ -685,8 +631,8 @@ static inline void qed_chain_pbl_zero_mem(struct qed_chain *p_chain)
page_cnt = qed_chain_get_page_cnt(p_chain);
for (i = 0; i < page_cnt; i++)
- memset(p_chain->pbl.pp_virt_addr_tbl[i], 0,
- QED_CHAIN_PAGE_SIZE);
+ memset(p_chain->pbl.pp_addr_tbl[i].virt_addr, 0,
+ p_chain->page_size);
}
#endif
diff --git a/include/linux/qed/qed_eth_if.h b/include/linux/qed/qed_eth_if.h
index 0eef0a2b1901..e1bf3219b4e6 100644
--- a/include/linux/qed/qed_eth_if.h
+++ b/include/linux/qed/qed_eth_if.h
@@ -1,33 +1,7 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
/* QLogic qed NIC Driver
* Copyright (c) 2015-2017 QLogic Corporation
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and /or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * Copyright (c) 2019-2020 Marvell International Ltd.
*/
#ifndef _QED_ETH_IF_H
@@ -39,6 +13,10 @@
#include <linux/qed/qed_if.h>
#include <linux/qed/qed_iov_if.h>
+/* 64 max queues * (1 rx + 4 tx-cos + 1 xdp) */
+#define QED_MIN_L2_CONS (2 + NUM_PHYS_TCS_4PORT_K2)
+#define QED_MAX_L2_CONS (64 * (QED_MIN_L2_CONS))
+
struct qed_queue_start_common_params {
/* Should always be relative to entity sending this. */
u8 vport_id;
@@ -49,6 +27,8 @@ struct qed_queue_start_common_params {
struct qed_sb_info *p_sb;
u8 sb_idx;
+
+ u8 tc;
};
struct qed_rxq_start_ret_params {
@@ -61,6 +41,39 @@ struct qed_txq_start_ret_params {
void *p_handle;
};
+enum qed_filter_config_mode {
+ QED_FILTER_CONFIG_MODE_DISABLE,
+ QED_FILTER_CONFIG_MODE_5_TUPLE,
+ QED_FILTER_CONFIG_MODE_L4_PORT,
+ QED_FILTER_CONFIG_MODE_IP_DEST,
+ QED_FILTER_CONFIG_MODE_IP_SRC,
+};
+
+struct qed_ntuple_filter_params {
+ /* Physically mapped address containing header of buffer to be used
+ * as filter.
+ */
+ dma_addr_t addr;
+
+ /* Length of header in bytes */
+ u16 length;
+
+ /* Relative queue-id to receive classified packet */
+#define QED_RFS_NTUPLE_QID_RSS ((u16)-1)
+ u16 qid;
+
+ /* Identifier can either be according to vport-id or vfid */
+ bool b_is_vf;
+ u8 vport_id;
+ u8 vf_id;
+
+ /* true iff this filter is to be added. Else to be removed */
+ bool b_is_add;
+
+ /* If flow needs to be dropped */
+ bool b_is_drop;
+};
+
struct qed_dev_eth_info {
struct qed_dev_info common;
@@ -132,12 +145,6 @@ struct qed_filter_mcast_params {
unsigned char mac[64][ETH_ALEN];
};
-union qed_filter_type_params {
- enum qed_filter_rx_mode_type accept_flags;
- struct qed_filter_ucast_params ucast;
- struct qed_filter_mcast_params mcast;
-};
-
enum qed_filter_type {
QED_FILTER_TYPE_UCAST,
QED_FILTER_TYPE_MCAST,
@@ -145,11 +152,6 @@ enum qed_filter_type {
QED_MAX_FILTER_TYPES,
};
-struct qed_filter_params {
- enum qed_filter_type type;
- union qed_filter_type_params filter;
-};
-
struct qed_tunn_params {
u16 vxlan_port;
u8 update_vxlan_port;
@@ -301,8 +303,14 @@ struct qed_eth_ops {
int (*q_tx_stop)(struct qed_dev *cdev, u8 rss_id, void *handle);
- int (*filter_config)(struct qed_dev *cdev,
- struct qed_filter_params *params);
+ int (*filter_config_rx_mode)(struct qed_dev *cdev,
+ enum qed_filter_rx_mode_type type);
+
+ int (*filter_config_ucast)(struct qed_dev *cdev,
+ struct qed_filter_ucast_params *params);
+
+ int (*filter_config_mcast)(struct qed_dev *cdev,
+ struct qed_filter_mcast_params *params);
int (*fastpath_stop)(struct qed_dev *cdev);
@@ -316,13 +324,14 @@ struct qed_eth_ops {
int (*tunn_config)(struct qed_dev *cdev,
struct qed_tunn_params *params);
- int (*ntuple_filter_config)(struct qed_dev *cdev, void *cookie,
- dma_addr_t mapping, u16 length,
- u16 vport_id, u16 rx_queue_id,
- bool add_filter);
+ int (*ntuple_filter_config)(struct qed_dev *cdev,
+ void *cookie,
+ struct qed_ntuple_filter_params *params);
int (*configure_arfs_searcher)(struct qed_dev *cdev,
- bool en_searcher);
+ enum qed_filter_config_mode mode);
+ int (*get_coalesce)(struct qed_dev *cdev, u16 *coal, void *handle);
+ int (*req_bulletin_update_mac)(struct qed_dev *cdev, const u8 *mac);
};
const struct qed_eth_ops *qed_get_eth_ops(void);
diff --git a/include/linux/qed/qed_fcoe_if.h b/include/linux/qed/qed_fcoe_if.h
index 1e015c50e6b8..90e3045b2dcb 100644
--- a/include/linux/qed/qed_fcoe_if.h
+++ b/include/linux/qed/qed_fcoe_if.h
@@ -1,3 +1,6 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
+/* Copyright (c) 2019-2020 Marvell International Ltd. */
+
#ifndef _QED_FCOE_IF_H
#define _QED_FCOE_IF_H
#include <linux/types.h>
@@ -73,7 +76,7 @@ void qed_fcoe_set_pf_params(struct qed_dev *cdev,
* @fill_dev_info: fills FCoE specific information
* @param cdev
* @param info
- * @return 0 on sucesss, otherwise error value.
+ * @return 0 on success, otherwise error value.
* @register_ops: register FCoE operations
* @param cdev
* @param ops - specified using qed_iscsi_cb_ops
@@ -93,7 +96,7 @@ void qed_fcoe_set_pf_params(struct qed_dev *cdev,
* connection.
* @param p_doorbell - qed will fill the address of the
* doorbell.
- * return 0 on sucesss, otherwise error value.
+ * return 0 on success, otherwise error value.
* @release_conn: release a previously acquired fcoe connection
* @param cdev
* @param handle - the connection handle.
diff --git a/include/linux/qed/qed_if.h b/include/linux/qed/qed_if.h
index ef39c7f40ae6..6dc4943d8aec 100644
--- a/include/linux/qed/qed_if.h
+++ b/include/linux/qed/qed_if.h
@@ -1,44 +1,18 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
/* QLogic qed NIC Driver
* Copyright (c) 2015-2017 QLogic Corporation
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and /or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * Copyright (c) 2019-2020 Marvell International Ltd.
*/
#ifndef _QED_IF_H
#define _QED_IF_H
+#include <linux/ethtool.h>
#include <linux/types.h>
#include <linux/interrupt.h>
#include <linux/netdevice.h>
#include <linux/pci.h>
#include <linux/skbuff.h>
-#include <linux/types.h>
#include <asm/byteorder.h>
#include <linux/io.h>
#include <linux/compiler.h>
@@ -47,6 +21,11 @@
#include <linux/slab.h>
#include <linux/qed/common_hsi.h>
#include <linux/qed/qed_chain.h>
+#include <linux/io-64-nonatomic-lo-hi.h>
+#include <net/devlink.h>
+
+#define QED_TX_SWS_TIMER_DFLT 500
+#define QED_TWO_MSL_TIMER_DFLT 4000
enum dcbx_protocol_type {
DCBX_PROTOCOL_ISCSI,
@@ -159,6 +138,22 @@ struct qed_dcbx_get {
enum qed_nvm_images {
QED_NVM_IMAGE_ISCSI_CFG,
QED_NVM_IMAGE_FCOE_CFG,
+ QED_NVM_IMAGE_MDUMP,
+ QED_NVM_IMAGE_NVM_CFG1,
+ QED_NVM_IMAGE_DEFAULT_CFG,
+ QED_NVM_IMAGE_NVM_META,
+};
+
+struct qed_link_eee_params {
+ u32 tx_lpi_timer;
+#define QED_EEE_1G_ADV BIT(0)
+#define QED_EEE_10G_ADV BIT(1)
+
+ /* Capabilities are represented using QED_EEE_*_ADV values */
+ u8 adv_caps;
+ u8 lp_adv_caps;
+ bool enable;
+ bool tx_lpi_enable;
};
enum qed_led_mode {
@@ -167,13 +162,293 @@ enum qed_led_mode {
QED_LED_MODE_RESTORE
};
+struct qed_mfw_tlv_eth {
+ u16 lso_maxoff_size;
+ bool lso_maxoff_size_set;
+ u16 lso_minseg_size;
+ bool lso_minseg_size_set;
+ u8 prom_mode;
+ bool prom_mode_set;
+ u16 tx_descr_size;
+ bool tx_descr_size_set;
+ u16 rx_descr_size;
+ bool rx_descr_size_set;
+ u16 netq_count;
+ bool netq_count_set;
+ u32 tcp4_offloads;
+ bool tcp4_offloads_set;
+ u32 tcp6_offloads;
+ bool tcp6_offloads_set;
+ u16 tx_descr_qdepth;
+ bool tx_descr_qdepth_set;
+ u16 rx_descr_qdepth;
+ bool rx_descr_qdepth_set;
+ u8 iov_offload;
+#define QED_MFW_TLV_IOV_OFFLOAD_NONE (0)
+#define QED_MFW_TLV_IOV_OFFLOAD_MULTIQUEUE (1)
+#define QED_MFW_TLV_IOV_OFFLOAD_VEB (2)
+#define QED_MFW_TLV_IOV_OFFLOAD_VEPA (3)
+ bool iov_offload_set;
+ u8 txqs_empty;
+ bool txqs_empty_set;
+ u8 rxqs_empty;
+ bool rxqs_empty_set;
+ u8 num_txqs_full;
+ bool num_txqs_full_set;
+ u8 num_rxqs_full;
+ bool num_rxqs_full_set;
+};
+
+#define QED_MFW_TLV_TIME_SIZE 14
+struct qed_mfw_tlv_time {
+ bool b_set;
+ u8 month;
+ u8 day;
+ u8 hour;
+ u8 min;
+ u16 msec;
+ u16 usec;
+};
+
+struct qed_mfw_tlv_fcoe {
+ u8 scsi_timeout;
+ bool scsi_timeout_set;
+ u32 rt_tov;
+ bool rt_tov_set;
+ u32 ra_tov;
+ bool ra_tov_set;
+ u32 ed_tov;
+ bool ed_tov_set;
+ u32 cr_tov;
+ bool cr_tov_set;
+ u8 boot_type;
+ bool boot_type_set;
+ u8 npiv_state;
+ bool npiv_state_set;
+ u32 num_npiv_ids;
+ bool num_npiv_ids_set;
+ u8 switch_name[8];
+ bool switch_name_set;
+ u16 switch_portnum;
+ bool switch_portnum_set;
+ u8 switch_portid[3];
+ bool switch_portid_set;
+ u8 vendor_name[8];
+ bool vendor_name_set;
+ u8 switch_model[8];
+ bool switch_model_set;
+ u8 switch_fw_version[8];
+ bool switch_fw_version_set;
+ u8 qos_pri;
+ bool qos_pri_set;
+ u8 port_alias[3];
+ bool port_alias_set;
+ u8 port_state;
+#define QED_MFW_TLV_PORT_STATE_OFFLINE (0)
+#define QED_MFW_TLV_PORT_STATE_LOOP (1)
+#define QED_MFW_TLV_PORT_STATE_P2P (2)
+#define QED_MFW_TLV_PORT_STATE_FABRIC (3)
+ bool port_state_set;
+ u16 fip_tx_descr_size;
+ bool fip_tx_descr_size_set;
+ u16 fip_rx_descr_size;
+ bool fip_rx_descr_size_set;
+ u16 link_failures;
+ bool link_failures_set;
+ u8 fcoe_boot_progress;
+ bool fcoe_boot_progress_set;
+ u64 rx_bcast;
+ bool rx_bcast_set;
+ u64 tx_bcast;
+ bool tx_bcast_set;
+ u16 fcoe_txq_depth;
+ bool fcoe_txq_depth_set;
+ u16 fcoe_rxq_depth;
+ bool fcoe_rxq_depth_set;
+ u64 fcoe_rx_frames;
+ bool fcoe_rx_frames_set;
+ u64 fcoe_rx_bytes;
+ bool fcoe_rx_bytes_set;
+ u64 fcoe_tx_frames;
+ bool fcoe_tx_frames_set;
+ u64 fcoe_tx_bytes;
+ bool fcoe_tx_bytes_set;
+ u16 crc_count;
+ bool crc_count_set;
+ u32 crc_err_src_fcid[5];
+ bool crc_err_src_fcid_set[5];
+ struct qed_mfw_tlv_time crc_err[5];
+ u16 losync_err;
+ bool losync_err_set;
+ u16 losig_err;
+ bool losig_err_set;
+ u16 primtive_err;
+ bool primtive_err_set;
+ u16 disparity_err;
+ bool disparity_err_set;
+ u16 code_violation_err;
+ bool code_violation_err_set;
+ u32 flogi_param[4];
+ bool flogi_param_set[4];
+ struct qed_mfw_tlv_time flogi_tstamp;
+ u32 flogi_acc_param[4];
+ bool flogi_acc_param_set[4];
+ struct qed_mfw_tlv_time flogi_acc_tstamp;
+ u32 flogi_rjt;
+ bool flogi_rjt_set;
+ struct qed_mfw_tlv_time flogi_rjt_tstamp;
+ u32 fdiscs;
+ bool fdiscs_set;
+ u8 fdisc_acc;
+ bool fdisc_acc_set;
+ u8 fdisc_rjt;
+ bool fdisc_rjt_set;
+ u8 plogi;
+ bool plogi_set;
+ u8 plogi_acc;
+ bool plogi_acc_set;
+ u8 plogi_rjt;
+ bool plogi_rjt_set;
+ u32 plogi_dst_fcid[5];
+ bool plogi_dst_fcid_set[5];
+ struct qed_mfw_tlv_time plogi_tstamp[5];
+ u32 plogi_acc_src_fcid[5];
+ bool plogi_acc_src_fcid_set[5];
+ struct qed_mfw_tlv_time plogi_acc_tstamp[5];
+ u8 tx_plogos;
+ bool tx_plogos_set;
+ u8 plogo_acc;
+ bool plogo_acc_set;
+ u8 plogo_rjt;
+ bool plogo_rjt_set;
+ u32 plogo_src_fcid[5];
+ bool plogo_src_fcid_set[5];
+ struct qed_mfw_tlv_time plogo_tstamp[5];
+ u8 rx_logos;
+ bool rx_logos_set;
+ u8 tx_accs;
+ bool tx_accs_set;
+ u8 tx_prlis;
+ bool tx_prlis_set;
+ u8 rx_accs;
+ bool rx_accs_set;
+ u8 tx_abts;
+ bool tx_abts_set;
+ u8 rx_abts_acc;
+ bool rx_abts_acc_set;
+ u8 rx_abts_rjt;
+ bool rx_abts_rjt_set;
+ u32 abts_dst_fcid[5];
+ bool abts_dst_fcid_set[5];
+ struct qed_mfw_tlv_time abts_tstamp[5];
+ u8 rx_rscn;
+ bool rx_rscn_set;
+ u32 rx_rscn_nport[4];
+ bool rx_rscn_nport_set[4];
+ u8 tx_lun_rst;
+ bool tx_lun_rst_set;
+ u8 abort_task_sets;
+ bool abort_task_sets_set;
+ u8 tx_tprlos;
+ bool tx_tprlos_set;
+ u8 tx_nos;
+ bool tx_nos_set;
+ u8 rx_nos;
+ bool rx_nos_set;
+ u8 ols;
+ bool ols_set;
+ u8 lr;
+ bool lr_set;
+ u8 lrr;
+ bool lrr_set;
+ u8 tx_lip;
+ bool tx_lip_set;
+ u8 rx_lip;
+ bool rx_lip_set;
+ u8 eofa;
+ bool eofa_set;
+ u8 eofni;
+ bool eofni_set;
+ u8 scsi_chks;
+ bool scsi_chks_set;
+ u8 scsi_cond_met;
+ bool scsi_cond_met_set;
+ u8 scsi_busy;
+ bool scsi_busy_set;
+ u8 scsi_inter;
+ bool scsi_inter_set;
+ u8 scsi_inter_cond_met;
+ bool scsi_inter_cond_met_set;
+ u8 scsi_rsv_conflicts;
+ bool scsi_rsv_conflicts_set;
+ u8 scsi_tsk_full;
+ bool scsi_tsk_full_set;
+ u8 scsi_aca_active;
+ bool scsi_aca_active_set;
+ u8 scsi_tsk_abort;
+ bool scsi_tsk_abort_set;
+ u32 scsi_rx_chk[5];
+ bool scsi_rx_chk_set[5];
+ struct qed_mfw_tlv_time scsi_chk_tstamp[5];
+};
+
+struct qed_mfw_tlv_iscsi {
+ u8 target_llmnr;
+ bool target_llmnr_set;
+ u8 header_digest;
+ bool header_digest_set;
+ u8 data_digest;
+ bool data_digest_set;
+ u8 auth_method;
+#define QED_MFW_TLV_AUTH_METHOD_NONE (1)
+#define QED_MFW_TLV_AUTH_METHOD_CHAP (2)
+#define QED_MFW_TLV_AUTH_METHOD_MUTUAL_CHAP (3)
+ bool auth_method_set;
+ u16 boot_taget_portal;
+ bool boot_taget_portal_set;
+ u16 frame_size;
+ bool frame_size_set;
+ u16 tx_desc_size;
+ bool tx_desc_size_set;
+ u16 rx_desc_size;
+ bool rx_desc_size_set;
+ u8 boot_progress;
+ bool boot_progress_set;
+ u16 tx_desc_qdepth;
+ bool tx_desc_qdepth_set;
+ u16 rx_desc_qdepth;
+ bool rx_desc_qdepth_set;
+ u64 rx_frames;
+ bool rx_frames_set;
+ u64 rx_bytes;
+ bool rx_bytes_set;
+ u64 tx_frames;
+ bool tx_frames_set;
+ u64 tx_bytes;
+ bool tx_bytes_set;
+};
+
+enum qed_db_rec_width {
+ DB_REC_WIDTH_32B,
+ DB_REC_WIDTH_64B,
+};
+
+enum qed_db_rec_space {
+ DB_REC_KERNEL,
+ DB_REC_USER,
+};
+
#define DIRECT_REG_WR(reg_addr, val) writel((u32)val, \
(void __iomem *)(reg_addr))
#define DIRECT_REG_RD(reg_addr) readl((void __iomem *)(reg_addr))
-#define QED_COALESCE_MAX 0xFF
+#define DIRECT_REG_WR64(reg_addr, val) writeq((u64)val, \
+ (void __iomem *)(reg_addr))
+
+#define QED_COALESCE_MAX 0x1FF
#define QED_DEFAULT_RX_USECS 12
+#define QED_DEFAULT_TX_USECS 48
/* forward */
struct qed_dev;
@@ -228,19 +503,14 @@ struct qed_fcoe_pf_params {
u8 bdq_pbl_num_entries[2];
};
-/* Most of the the parameters below are described in the FW iSCSI / TCP HSI */
+/* Most of the parameters below are described in the FW iSCSI / TCP HSI */
struct qed_iscsi_pf_params {
u64 glbl_q_params_addr;
- u64 bdq_pbl_base_addr[2];
- u32 max_cwnd;
+ u64 bdq_pbl_base_addr[3];
u16 cq_num_entries;
u16 cmdq_num_entries;
u32 two_msl_timer;
- u16 dup_ack_threshold;
u16 tx_sws_timer;
- u16 min_rto;
- u16 min_rto_rt;
- u16 max_rto;
/* The following parameters are used during HW-init
* and these parameters need to be passed as arguments
@@ -251,8 +521,8 @@ struct qed_iscsi_pf_params {
/* The following parameters are used during protocol-init */
u16 half_way_close_timeout;
- u16 bdq_xoff_threshold[2];
- u16 bdq_xon_threshold[2];
+ u16 bdq_xoff_threshold[3];
+ u16 bdq_xon_threshold[3];
u16 cmdq_xoff_threshold;
u16 cmdq_xon_threshold;
u16 rq_buffer_size;
@@ -268,10 +538,27 @@ struct qed_iscsi_pf_params {
u8 gl_cmd_pi;
u8 debug_mode;
u8 ll2_ooo_queue_id;
- u8 ooo_enable;
u8 is_target;
- u8 bdq_pbl_num_entries[2];
+ u8 is_soc_en;
+ u8 soc_num_of_blocks_log;
+ u8 bdq_pbl_num_entries[3];
+};
+
+struct qed_nvmetcp_pf_params {
+ u64 glbl_q_params_addr;
+ u16 cq_num_entries;
+ u16 num_cons;
+ u16 num_tasks;
+ u8 num_sq_pages_in_ring;
+ u8 num_r2tq_pages_in_ring;
+ u8 num_uhq_pages_in_ring;
+ u8 num_queues;
+ u8 gl_rq_pi;
+ u8 gl_cmd_pi;
+ u8 debug_mode;
+ u8 ll2_ooo_queue_id;
+ u16 min_rto;
};
struct qed_rdma_pf_params {
@@ -292,6 +579,7 @@ struct qed_pf_params {
struct qed_eth_pf_params eth_pf_params;
struct qed_fcoe_pf_params fcoe_pf_params;
struct qed_iscsi_pf_params iscsi_pf_params;
+ struct qed_nvmetcp_pf_params nvmetcp_pf_params;
struct qed_rdma_pf_params rdma_pf_params;
};
@@ -303,16 +591,26 @@ enum qed_int_mode {
};
struct qed_sb_info {
- struct status_block *sb_virt;
- dma_addr_t sb_phys;
- u32 sb_ack; /* Last given ack */
- u16 igu_sb_id;
- void __iomem *igu_addr;
- u8 flags;
-#define QED_SB_INFO_INIT 0x1
-#define QED_SB_INFO_SETUP 0x2
+ struct status_block *sb_virt;
+ dma_addr_t sb_phys;
+ u32 sb_ack; /* Last given ack */
+ u16 igu_sb_id;
+ void __iomem *igu_addr;
+ u8 flags;
+#define QED_SB_INFO_INIT 0x1
+#define QED_SB_INFO_SETUP 0x2
+
+ struct qed_dev *cdev;
+};
- struct qed_dev *cdev;
+enum qed_hw_err_type {
+ QED_HW_ERR_FAN_FAIL,
+ QED_HW_ERR_MFW_RESP_FAIL,
+ QED_HW_ERR_HW_ATTN,
+ QED_HW_ERR_DMAE_FAIL,
+ QED_HW_ERR_RAMROD_FAIL,
+ QED_HW_ERR_FW_ASSERT,
+ QED_HW_ERR_LAST,
};
enum qed_dev_type {
@@ -327,7 +625,6 @@ struct qed_dev_info {
u8 num_hwfns;
u8 hw_mac[ETH_ALEN];
- bool is_mf_default;
/* FW version */
u16 fw_major;
@@ -347,12 +644,15 @@ struct qed_dev_info {
#define QED_MFW_VERSION_3_OFFSET 24
u32 flash_size;
- u8 mf_mode;
+ bool b_arfs_capable;
+ bool b_inter_pf_switch;
bool tx_switching;
bool rdma_supported;
u16 mtu;
bool wol_support;
+ bool smart_an;
+ bool esl;
/* MBI version */
u32 mbi_version;
@@ -382,61 +682,76 @@ enum qed_sb_type {
enum qed_protocol {
QED_PROTOCOL_ETH,
QED_PROTOCOL_ISCSI,
+ QED_PROTOCOL_NVMETCP = QED_PROTOCOL_ISCSI,
QED_PROTOCOL_FCOE,
};
-enum qed_link_mode_bits {
- QED_LM_FIBRE_BIT = BIT(0),
- QED_LM_Autoneg_BIT = BIT(1),
- QED_LM_Asym_Pause_BIT = BIT(2),
- QED_LM_Pause_BIT = BIT(3),
- QED_LM_1000baseT_Half_BIT = BIT(4),
- QED_LM_1000baseT_Full_BIT = BIT(5),
- QED_LM_10000baseKR_Full_BIT = BIT(6),
- QED_LM_25000baseKR_Full_BIT = BIT(7),
- QED_LM_40000baseLR4_Full_BIT = BIT(8),
- QED_LM_50000baseKR2_Full_BIT = BIT(9),
- QED_LM_100000baseKR4_Full_BIT = BIT(10),
- QED_LM_COUNT = 11
+enum qed_fec_mode {
+ QED_FEC_MODE_NONE = BIT(0),
+ QED_FEC_MODE_FIRECODE = BIT(1),
+ QED_FEC_MODE_RS = BIT(2),
+ QED_FEC_MODE_AUTO = BIT(3),
+ QED_FEC_MODE_UNSUPPORTED = BIT(4),
};
struct qed_link_params {
- bool link_up;
-
-#define QED_LINK_OVERRIDE_SPEED_AUTONEG BIT(0)
-#define QED_LINK_OVERRIDE_SPEED_ADV_SPEEDS BIT(1)
-#define QED_LINK_OVERRIDE_SPEED_FORCED_SPEED BIT(2)
-#define QED_LINK_OVERRIDE_PAUSE_CONFIG BIT(3)
-#define QED_LINK_OVERRIDE_LOOPBACK_MODE BIT(4)
- u32 override_flags;
- bool autoneg;
- u32 adv_speeds;
- u32 forced_speed;
-#define QED_LINK_PAUSE_AUTONEG_ENABLE BIT(0)
-#define QED_LINK_PAUSE_RX_ENABLE BIT(1)
-#define QED_LINK_PAUSE_TX_ENABLE BIT(2)
- u32 pause_config;
-#define QED_LINK_LOOPBACK_NONE BIT(0)
-#define QED_LINK_LOOPBACK_INT_PHY BIT(1)
-#define QED_LINK_LOOPBACK_EXT_PHY BIT(2)
-#define QED_LINK_LOOPBACK_EXT BIT(3)
-#define QED_LINK_LOOPBACK_MAC BIT(4)
- u32 loopback_mode;
+ bool link_up;
+
+ u32 override_flags;
+#define QED_LINK_OVERRIDE_SPEED_AUTONEG BIT(0)
+#define QED_LINK_OVERRIDE_SPEED_ADV_SPEEDS BIT(1)
+#define QED_LINK_OVERRIDE_SPEED_FORCED_SPEED BIT(2)
+#define QED_LINK_OVERRIDE_PAUSE_CONFIG BIT(3)
+#define QED_LINK_OVERRIDE_LOOPBACK_MODE BIT(4)
+#define QED_LINK_OVERRIDE_EEE_CONFIG BIT(5)
+#define QED_LINK_OVERRIDE_FEC_CONFIG BIT(6)
+
+ bool autoneg;
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(adv_speeds);
+ u32 forced_speed;
+
+ u32 pause_config;
+#define QED_LINK_PAUSE_AUTONEG_ENABLE BIT(0)
+#define QED_LINK_PAUSE_RX_ENABLE BIT(1)
+#define QED_LINK_PAUSE_TX_ENABLE BIT(2)
+
+ u32 loopback_mode;
+#define QED_LINK_LOOPBACK_NONE BIT(0)
+#define QED_LINK_LOOPBACK_INT_PHY BIT(1)
+#define QED_LINK_LOOPBACK_EXT_PHY BIT(2)
+#define QED_LINK_LOOPBACK_EXT BIT(3)
+#define QED_LINK_LOOPBACK_MAC BIT(4)
+#define QED_LINK_LOOPBACK_CNIG_AH_ONLY_0123 BIT(5)
+#define QED_LINK_LOOPBACK_CNIG_AH_ONLY_2301 BIT(6)
+#define QED_LINK_LOOPBACK_PCS_AH_ONLY BIT(7)
+#define QED_LINK_LOOPBACK_REVERSE_MAC_AH_ONLY BIT(8)
+#define QED_LINK_LOOPBACK_INT_PHY_FEA_AH_ONLY BIT(9)
+
+ struct qed_link_eee_params eee;
+ u32 fec;
};
struct qed_link_output {
- bool link_up;
-
- /* In QED_LM_* defs */
- u32 supported_caps;
- u32 advertised_caps;
- u32 lp_caps;
-
- u32 speed; /* In Mb/s */
- u8 duplex; /* In DUPLEX defs */
- u8 port; /* In PORT defs */
- bool autoneg;
- u32 pause_config;
+ bool link_up;
+
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(supported_caps);
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(advertised_caps);
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(lp_caps);
+
+ u32 speed; /* In Mb/s */
+ u8 duplex; /* In DUPLEX defs */
+ u8 port; /* In PORT defs */
+ bool autoneg;
+ u32 pause_config;
+
+ /* EEE - capability & param */
+ bool eee_supported;
+ bool eee_active;
+ u8 sup_caps;
+ struct qed_link_eee_params eee;
+
+ u32 sup_fec;
+ u32 active_fec;
};
struct qed_probe_params {
@@ -444,6 +759,7 @@ struct qed_probe_params {
u32 dp_module;
u8 dp_level;
bool is_vf;
+ bool recov_in_prog;
};
#define QED_DRV_VER_STR_SIZE 12
@@ -466,56 +782,93 @@ struct qed_int_info {
u8 used_cnt;
};
+struct qed_generic_tlvs {
+#define QED_TLV_IP_CSUM BIT(0)
+#define QED_TLV_LSO BIT(1)
+ u16 feat_flags;
+#define QED_TLV_MAC_COUNT 3
+ u8 mac[QED_TLV_MAC_COUNT][ETH_ALEN];
+};
+
+#define QED_I2C_DEV_ADDR_A0 0xA0
+#define QED_I2C_DEV_ADDR_A2 0xA2
+
+#define QED_NVM_SIGNATURE 0x12435687
+
+enum qed_nvm_flash_cmd {
+ QED_NVM_FLASH_CMD_FILE_DATA = 0x2,
+ QED_NVM_FLASH_CMD_FILE_START = 0x3,
+ QED_NVM_FLASH_CMD_NVM_CHANGE = 0x4,
+ QED_NVM_FLASH_CMD_NVM_CFG_ID = 0x5,
+ QED_NVM_FLASH_CMD_NVM_MAX,
+};
+
+struct qed_devlink {
+ struct qed_dev *cdev;
+ struct devlink_health_reporter *fw_reporter;
+};
+
+struct qed_sb_info_dbg {
+ u32 igu_prod;
+ u32 igu_cons;
+ u16 pi[PIS_PER_SB];
+};
+
struct qed_common_cb_ops {
void (*arfs_filter_op)(void *dev, void *fltr, u8 fw_rc);
- void (*link_update)(void *dev,
- struct qed_link_output *link);
- void (*dcbx_aen)(void *dev, struct qed_dcbx_get *get, u32 mib_type);
+ void (*link_update)(void *dev, struct qed_link_output *link);
+ void (*schedule_recovery_handler)(void *dev);
+ void (*schedule_hw_err_handler)(void *dev,
+ enum qed_hw_err_type err_type);
+ void (*dcbx_aen)(void *dev, struct qed_dcbx_get *get, u32 mib_type);
+ void (*get_generic_tlv_data)(void *dev, struct qed_generic_tlvs *data);
+ void (*get_protocol_tlv_data)(void *dev, void *data);
+ void (*bw_update)(void *dev);
};
struct qed_selftest_ops {
/**
- * @brief selftest_interrupt - Perform interrupt test
+ * selftest_interrupt(): Perform interrupt test.
*
- * @param cdev
+ * @cdev: Qed dev pointer.
*
- * @return 0 on success, error otherwise.
+ * Return: 0 on success, error otherwise.
*/
int (*selftest_interrupt)(struct qed_dev *cdev);
/**
- * @brief selftest_memory - Perform memory test
+ * selftest_memory(): Perform memory test.
*
- * @param cdev
+ * @cdev: Qed dev pointer.
*
- * @return 0 on success, error otherwise.
+ * Return: 0 on success, error otherwise.
*/
int (*selftest_memory)(struct qed_dev *cdev);
/**
- * @brief selftest_register - Perform register test
+ * selftest_register(): Perform register test.
*
- * @param cdev
+ * @cdev: Qed dev pointer.
*
- * @return 0 on success, error otherwise.
+ * Return: 0 on success, error otherwise.
*/
int (*selftest_register)(struct qed_dev *cdev);
/**
- * @brief selftest_clock - Perform clock test
+ * selftest_clock(): Perform clock test.
*
- * @param cdev
+ * @cdev: Qed dev pointer.
*
- * @return 0 on success, error otherwise.
+ * Return: 0 on success, error otherwise.
*/
int (*selftest_clock)(struct qed_dev *cdev);
/**
- * @brief selftest_nvram - Perform nvram test
+ * selftest_nvram(): Perform nvram test.
*
- * @param cdev
+ * @cdev: Qed dev pointer.
*
- * @return 0 on success, error otherwise.
+ * Return: 0 on success, error otherwise.
*/
int (*selftest_nvram) (struct qed_dev *cdev);
};
@@ -526,10 +879,9 @@ struct qed_common_ops {
struct qed_dev* (*probe)(struct pci_dev *dev,
struct qed_probe_params *params);
- void (*remove)(struct qed_dev *cdev);
+ void (*remove)(struct qed_dev *cdev);
- int (*set_power_state)(struct qed_dev *cdev,
- pci_power_t state);
+ int (*set_power_state)(struct qed_dev *cdev, pci_power_t state);
void (*set_name) (struct qed_dev *cdev, char name[]);
@@ -537,191 +889,325 @@ struct qed_common_ops {
* PF params required for the call before slowpath_start is
* documented within the qed_pf_params structure definition.
*/
- void (*update_pf_params)(struct qed_dev *cdev,
- struct qed_pf_params *params);
- int (*slowpath_start)(struct qed_dev *cdev,
- struct qed_slowpath_params *params);
+ void (*update_pf_params)(struct qed_dev *cdev,
+ struct qed_pf_params *params);
- int (*slowpath_stop)(struct qed_dev *cdev);
+ int (*slowpath_start)(struct qed_dev *cdev,
+ struct qed_slowpath_params *params);
+
+ int (*slowpath_stop)(struct qed_dev *cdev);
/* Requests to use `cnt' interrupts for fastpath.
* upon success, returns number of interrupts allocated for fastpath.
*/
- int (*set_fp_int)(struct qed_dev *cdev,
- u16 cnt);
+ int (*set_fp_int)(struct qed_dev *cdev, u16 cnt);
/* Fills `info' with pointers required for utilizing interrupts */
- int (*get_fp_int)(struct qed_dev *cdev,
- struct qed_int_info *info);
-
- u32 (*sb_init)(struct qed_dev *cdev,
- struct qed_sb_info *sb_info,
- void *sb_virt_addr,
- dma_addr_t sb_phy_addr,
- u16 sb_id,
- enum qed_sb_type type);
-
- u32 (*sb_release)(struct qed_dev *cdev,
- struct qed_sb_info *sb_info,
- u16 sb_id);
-
- void (*simd_handler_config)(struct qed_dev *cdev,
- void *token,
- int index,
- void (*handler)(void *));
-
- void (*simd_handler_clean)(struct qed_dev *cdev,
- int index);
- int (*dbg_grc)(struct qed_dev *cdev,
- void *buffer, u32 *num_dumped_bytes);
+ int (*get_fp_int)(struct qed_dev *cdev, struct qed_int_info *info);
+
+ u32 (*sb_init)(struct qed_dev *cdev,
+ struct qed_sb_info *sb_info,
+ void *sb_virt_addr,
+ dma_addr_t sb_phy_addr,
+ u16 sb_id,
+ enum qed_sb_type type);
+
+ u32 (*sb_release)(struct qed_dev *cdev,
+ struct qed_sb_info *sb_info,
+ u16 sb_id,
+ enum qed_sb_type type);
+
+ void (*simd_handler_config)(struct qed_dev *cdev,
+ void *token,
+ int index,
+ void (*handler)(void *));
+
+ void (*simd_handler_clean)(struct qed_dev *cdev, int index);
+
+ int (*dbg_grc)(struct qed_dev *cdev, void *buffer, u32 *num_dumped_bytes);
int (*dbg_grc_size)(struct qed_dev *cdev);
- int (*dbg_all_data) (struct qed_dev *cdev, void *buffer);
+ int (*dbg_all_data)(struct qed_dev *cdev, void *buffer);
- int (*dbg_all_data_size) (struct qed_dev *cdev);
+ int (*dbg_all_data_size)(struct qed_dev *cdev);
+
+ int (*report_fatal_error)(struct devlink *devlink,
+ enum qed_hw_err_type err_type);
/**
- * @brief can_link_change - can the instance change the link or not
+ * can_link_change(): can the instance change the link or not.
*
- * @param cdev
+ * @cdev: Qed dev pointer.
*
- * @return true if link-change is allowed, false otherwise.
+ * Return: true if link-change is allowed, false otherwise.
*/
bool (*can_link_change)(struct qed_dev *cdev);
/**
- * @brief set_link - set links according to params
+ * set_link(): set links according to params.
*
- * @param cdev
- * @param params - values used to override the default link configuration
+ * @cdev: Qed dev pointer.
+ * @params: values used to override the default link configuration.
*
- * @return 0 on success, error otherwise.
+ * Return: 0 on success, error otherwise.
*/
int (*set_link)(struct qed_dev *cdev,
struct qed_link_params *params);
/**
- * @brief get_link - returns the current link state.
+ * get_link(): returns the current link state.
+ *
+ * @cdev: Qed dev pointer.
+ * @if_link: structure to be filled with current link configuration.
*
- * @param cdev
- * @param if_link - structure to be filled with current link configuration.
+ * Return: Void.
*/
void (*get_link)(struct qed_dev *cdev,
struct qed_link_output *if_link);
/**
- * @brief - drains chip in case Tx completions fail to arrive due to pause.
+ * drain(): drains chip in case Tx completions fail to arrive due to pause.
*
- * @param cdev
+ * @cdev: Qed dev pointer.
+ *
+ * Return: Int.
*/
int (*drain)(struct qed_dev *cdev);
/**
- * @brief update_msglvl - update module debug level
+ * update_msglvl(): update module debug level.
+ *
+ * @cdev: Qed dev pointer.
+ * @dp_module: Debug module.
+ * @dp_level: Debug level.
*
- * @param cdev
- * @param dp_module
- * @param dp_level
+ * Return: Void.
*/
void (*update_msglvl)(struct qed_dev *cdev,
u32 dp_module,
u8 dp_level);
int (*chain_alloc)(struct qed_dev *cdev,
- enum qed_chain_use_mode intended_use,
- enum qed_chain_mode mode,
- enum qed_chain_cnt_type cnt_type,
- u32 num_elems,
- size_t elem_size,
- struct qed_chain *p_chain,
- struct qed_chain_ext_pbl *ext_pbl);
+ struct qed_chain *chain,
+ struct qed_chain_init_params *params);
void (*chain_free)(struct qed_dev *cdev,
struct qed_chain *p_chain);
/**
- * @brief nvm_get_image - reads an entire image from nvram
+ * nvm_flash(): Flash nvm data.
*
- * @param cdev
- * @param type - type of the request nvram image
- * @param buf - preallocated buffer to fill with the image
- * @param len - length of the allocated buffer
+ * @cdev: Qed dev pointer.
+ * @name: file containing the data.
*
- * @return 0 on success, error otherwise
+ * Return: 0 on success, error otherwise.
*/
- int (*nvm_get_image)(struct qed_dev *cdev,
- enum qed_nvm_images type, u8 *buf, u16 len);
+ int (*nvm_flash)(struct qed_dev *cdev, const char *name);
/**
- * @brief get_coalesce - Get coalesce parameters in usec
+ * nvm_get_image(): reads an entire image from nvram.
*
- * @param cdev
- * @param rx_coal - Rx coalesce value in usec
- * @param tx_coal - Tx coalesce value in usec
+ * @cdev: Qed dev pointer.
+ * @type: type of the request nvram image.
+ * @buf: preallocated buffer to fill with the image.
+ * @len: length of the allocated buffer.
*
+ * Return: 0 on success, error otherwise.
*/
- void (*get_coalesce)(struct qed_dev *cdev, u16 *rx_coal, u16 *tx_coal);
+ int (*nvm_get_image)(struct qed_dev *cdev,
+ enum qed_nvm_images type, u8 *buf, u16 len);
/**
- * @brief set_coalesce - Configure Rx coalesce value in usec
+ * set_coalesce(): Configure Rx coalesce value in usec.
*
- * @param cdev
- * @param rx_coal - Rx coalesce value in usec
- * @param tx_coal - Tx coalesce value in usec
- * @param qid - Queue index
- * @param sb_id - Status Block Id
+ * @cdev: Qed dev pointer.
+ * @rx_coal: Rx coalesce value in usec.
+ * @tx_coal: Tx coalesce value in usec.
+ * @handle: Handle.
*
- * @return 0 on success, error otherwise.
+ * Return: 0 on success, error otherwise.
*/
- int (*set_coalesce)(struct qed_dev *cdev, u16 rx_coal, u16 tx_coal,
- u16 qid, u16 sb_id);
+ int (*set_coalesce)(struct qed_dev *cdev,
+ u16 rx_coal, u16 tx_coal, void *handle);
/**
- * @brief set_led - Configure LED mode
+ * set_led() - Configure LED mode.
*
- * @param cdev
- * @param mode - LED mode
+ * @cdev: Qed dev pointer.
+ * @mode: LED mode.
*
- * @return 0 on success, error otherwise.
+ * Return: 0 on success, error otherwise.
*/
int (*set_led)(struct qed_dev *cdev,
enum qed_led_mode mode);
/**
- * @brief update_drv_state - API to inform the change in the driver state.
+ * attn_clr_enable(): Prevent attentions from being reasserted.
+ *
+ * @cdev: Qed dev pointer.
+ * @clr_enable: Clear enable.
+ *
+ * Return: Void.
+ */
+ void (*attn_clr_enable)(struct qed_dev *cdev, bool clr_enable);
+
+/**
+ * db_recovery_add(): add doorbell information to the doorbell
+ * recovery mechanism.
+ *
+ * @cdev: Qed dev pointer.
+ * @db_addr: Doorbell address.
+ * @db_data: Dddress of where db_data is stored.
+ * @db_width: Doorbell is 32b or 64b.
+ * @db_space: Doorbell recovery addresses are user or kernel space.
+ *
+ * Return: Int.
+ */
+ int (*db_recovery_add)(struct qed_dev *cdev,
+ void __iomem *db_addr,
+ void *db_data,
+ enum qed_db_rec_width db_width,
+ enum qed_db_rec_space db_space);
+
+/**
+ * db_recovery_del(): remove doorbell information from the doorbell
+ * recovery mechanism. db_data serves as key (db_addr is not unique).
+ *
+ * @cdev: Qed dev pointer.
+ * @db_addr: Doorbell address.
+ * @db_data: Address where db_data is stored. Serves as key for the
+ * entry to delete.
+ *
+ * Return: Int.
+ */
+ int (*db_recovery_del)(struct qed_dev *cdev,
+ void __iomem *db_addr, void *db_data);
+
+/**
+ * recovery_process(): Trigger a recovery process.
+ *
+ * @cdev: Qed dev pointer.
+ *
+ * Return: 0 on success, error otherwise.
+ */
+ int (*recovery_process)(struct qed_dev *cdev);
+
+/**
+ * recovery_prolog(): Execute the prolog operations of a recovery process.
+ *
+ * @cdev: Qed dev pointer.
+ *
+ * Return: 0 on success, error otherwise.
+ */
+ int (*recovery_prolog)(struct qed_dev *cdev);
+
+/**
+ * update_drv_state(): API to inform the change in the driver state.
*
- * @param cdev
- * @param active
+ * @cdev: Qed dev pointer.
+ * @active: Active
*
+ * Return: Int.
*/
int (*update_drv_state)(struct qed_dev *cdev, bool active);
/**
- * @brief update_mac - API to inform the change in the mac address
+ * update_mac(): API to inform the change in the mac address.
*
- * @param cdev
- * @param mac
+ * @cdev: Qed dev pointer.
+ * @mac: MAC.
*
+ * Return: Int.
*/
- int (*update_mac)(struct qed_dev *cdev, u8 *mac);
+ int (*update_mac)(struct qed_dev *cdev, const u8 *mac);
/**
- * @brief update_mtu - API to inform the change in the mtu
+ * update_mtu(): API to inform the change in the mtu.
*
- * @param cdev
- * @param mtu
+ * @cdev: Qed dev pointer.
+ * @mtu: MTU.
*
+ * Return: Int.
*/
int (*update_mtu)(struct qed_dev *cdev, u16 mtu);
/**
- * @brief update_wol - update of changes in the WoL configuration
+ * update_wol(): Update of changes in the WoL configuration.
*
- * @param cdev
- * @param enabled - true iff WoL should be enabled.
+ * @cdev: Qed dev pointer.
+ * @enabled: true iff WoL should be enabled.
+ *
+ * Return: Int.
*/
int (*update_wol) (struct qed_dev *cdev, bool enabled);
+
+/**
+ * read_module_eeprom(): Read EEPROM.
+ *
+ * @cdev: Qed dev pointer.
+ * @buf: buffer.
+ * @dev_addr: PHY device memory region.
+ * @offset: offset into eeprom contents to be read.
+ * @len: buffer length, i.e., max bytes to be read.
+ *
+ * Return: Int.
+ */
+ int (*read_module_eeprom)(struct qed_dev *cdev,
+ char *buf, u8 dev_addr, u32 offset, u32 len);
+
+/**
+ * get_affin_hwfn_idx(): Get affine HW function.
+ *
+ * @cdev: Qed dev pointer.
+ *
+ * Return: u8.
+ */
+ u8 (*get_affin_hwfn_idx)(struct qed_dev *cdev);
+
+/**
+ * read_nvm_cfg(): Read NVM config attribute value.
+ *
+ * @cdev: Qed dev pointer.
+ * @buf: Buffer.
+ * @cmd: NVM CFG command id.
+ * @entity_id: Entity id.
+ *
+ * Return: Int.
+ */
+ int (*read_nvm_cfg)(struct qed_dev *cdev, u8 **buf, u32 cmd,
+ u32 entity_id);
+/**
+ * read_nvm_cfg_len(): Read NVM config attribute value.
+ *
+ * @cdev: Qed dev pointer.
+ * @cmd: NVM CFG command id.
+ *
+ * Return: config id length, 0 on error.
+ */
+ int (*read_nvm_cfg_len)(struct qed_dev *cdev, u32 cmd);
+
+/**
+ * set_grc_config(): Configure value for grc config id.
+ *
+ * @cdev: Qed dev pointer.
+ * @cfg_id: grc config id
+ * @val: grc config value
+ *
+ * Return: Int.
+ */
+ int (*set_grc_config)(struct qed_dev *cdev, u32 cfg_id, u32 val);
+
+ struct devlink* (*devlink_register)(struct qed_dev *cdev);
+
+ void (*devlink_unregister)(struct devlink *devlink);
+
+ __printf(2, 3) void (*mfw_report)(struct qed_dev *cdev, char *fmt, ...);
+
+ int (*get_sb_info)(struct qed_dev *cdev, struct qed_sb_info *sb,
+ u16 qid, struct qed_sb_info_dbg *sb_dbg);
+
+ int (*get_esl_status)(struct qed_dev *cdev, bool *esl_active);
};
#define MASK_FIELD(_name, _value) \
@@ -739,6 +1225,17 @@ struct qed_common_ops {
#define GET_FIELD(value, name) \
(((value) >> (name ## _SHIFT)) & name ## _MASK)
+#define GET_MFW_FIELD(name, field) \
+ (((name) & (field ## _MASK)) >> (field ## _OFFSET))
+
+#define SET_MFW_FIELD(name, field, value) \
+ do { \
+ (name) &= ~(field ## _MASK); \
+ (name) |= (((value) << (field ## _OFFSET)) & (field ## _MASK));\
+ } while (0)
+
+#define DB_ADDR_SHIFT(addr) ((addr) << DB_PWM_ADDR_OFFSET_SHIFT)
+
/* Debug print definitions */
#define DP_ERR(cdev, fmt, ...) \
do { \
@@ -825,6 +1322,7 @@ struct qed_eth_stats_common {
u64 rx_bcast_pkts;
u64 mftag_filter_discards;
u64 mac_filter_discards;
+ u64 gft_filter_drop;
u64 tx_ucast_bytes;
u64 tx_mcast_bytes;
u64 tx_bcast_bytes;
@@ -875,6 +1373,7 @@ struct qed_eth_stats_common {
u64 tx_mac_mc_packets;
u64 tx_mac_bc_packets;
u64 tx_mac_ctrl_frames;
+ u64 link_change_count;
};
struct qed_eth_stats_bb {
@@ -935,43 +1434,38 @@ static inline u16 qed_sb_update_sb_idx(struct qed_sb_info *sb_info)
}
/* Let SB update */
- mmiowb();
return rc;
}
/**
+ * qed_sb_ack(): This function creates an update command for interrupts
+ * that is written to the IGU.
*
- * @brief This function creates an update command for interrupts that is
- * written to the IGU.
- *
- * @param sb_info - This is the structure allocated and
- * initialized per status block. Assumption is
- * that it was initialized using qed_sb_init
- * @param int_cmd - Enable/Disable/Nop
- * @param upd_flg - whether igu consumer should be
- * updated.
+ * @sb_info: This is the structure allocated and
+ * initialized per status block. Assumption is
+ * that it was initialized using qed_sb_init
+ * @int_cmd: Enable/Disable/Nop
+ * @upd_flg: Whether igu consumer should be updated.
*
- * @return inline void
+ * Return: inline void.
*/
static inline void qed_sb_ack(struct qed_sb_info *sb_info,
enum igu_int_cmd int_cmd,
u8 upd_flg)
{
- struct igu_prod_cons_update igu_ack = { 0 };
+ u32 igu_ack;
- igu_ack.sb_id_and_flags =
- ((sb_info->sb_ack << IGU_PROD_CONS_UPDATE_SB_INDEX_SHIFT) |
- (upd_flg << IGU_PROD_CONS_UPDATE_UPDATE_FLAG_SHIFT) |
- (int_cmd << IGU_PROD_CONS_UPDATE_ENABLE_INT_SHIFT) |
- (IGU_SEG_ACCESS_REG <<
- IGU_PROD_CONS_UPDATE_SEGMENT_ACCESS_SHIFT));
+ igu_ack = ((sb_info->sb_ack << IGU_PROD_CONS_UPDATE_SB_INDEX_SHIFT) |
+ (upd_flg << IGU_PROD_CONS_UPDATE_UPDATE_FLAG_SHIFT) |
+ (int_cmd << IGU_PROD_CONS_UPDATE_ENABLE_INT_SHIFT) |
+ (IGU_SEG_ACCESS_REG <<
+ IGU_PROD_CONS_UPDATE_SEGMENT_ACCESS_SHIFT));
- DIRECT_REG_WR(sb_info->igu_addr, igu_ack.sb_id_and_flags);
+ DIRECT_REG_WR(sb_info->igu_addr, igu_ack);
/* Both segments (interrupts & acks) are written to same place address;
* Need to guarantee all commands will be received (in-order) by HW.
*/
- mmiowb();
barrier();
}
diff --git a/include/linux/qed/qed_iov_if.h b/include/linux/qed/qed_iov_if.h
index ac2e6a3199a3..8e31a28e51b9 100644
--- a/include/linux/qed/qed_iov_if.h
+++ b/include/linux/qed/qed_iov_if.h
@@ -1,33 +1,7 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
/* QLogic qed NIC Driver
* Copyright (c) 2015-2017 QLogic Corporation
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and /or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * Copyright (c) 2019-2020 Marvell International Ltd.
*/
#ifndef _QED_IOV_IF_H
diff --git a/include/linux/qed/qed_iscsi_if.h b/include/linux/qed/qed_iscsi_if.h
index 111e606a74c8..fbf7973ae9ba 100644
--- a/include/linux/qed/qed_iscsi_if.h
+++ b/include/linux/qed/qed_iscsi_if.h
@@ -1,33 +1,7 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
/* QLogic qed NIC Driver
* Copyright (c) 2015-2017 QLogic Corporation
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and /or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * Copyright (c) 2019-2020 Marvell International Ltd.
*/
#ifndef _QED_ISCSI_IF_H
@@ -102,7 +76,6 @@ struct qed_iscsi_params_offload {
u32 ss_thresh;
u16 srtt;
u16 rtt_var;
- u32 ts_time;
u32 ts_recent;
u32 ts_recent_age;
u32 total_rt;
@@ -124,7 +97,6 @@ struct qed_iscsi_params_offload {
u16 mss;
u8 snd_wnd_scale;
u8 rcv_wnd_scale;
- u32 ts_ticks_per_second;
u16 da_timeout_value;
u8 ack_frequency;
};
@@ -161,7 +133,7 @@ struct qed_iscsi_cb_ops {
* @fill_dev_info: fills iSCSI specific information
* @param cdev
* @param info
- * @return 0 on sucesss, otherwise error value.
+ * @return 0 on success, otherwise error value.
* @register_ops: register iscsi operations
* @param cdev
* @param ops - specified using qed_iscsi_cb_ops
@@ -180,7 +152,7 @@ struct qed_iscsi_cb_ops {
* connection.
* @param p_doorbell - qed will fill the address of the
* doorbell.
- * @return 0 on sucesss, otherwise error value.
+ * @return 0 on success, otherwise error value.
* @release_conn: release a previously acquired iscsi connection
* @param cdev
* @param handle - the connection handle.
@@ -210,7 +182,7 @@ struct qed_iscsi_cb_ops {
* @param stats - pointer to struck that would be filled
* we stats
* @return 0 on success, error otherwise.
- * @change_mac Change MAC of interface
+ * @change_mac: Change MAC of interface
* @param cdev
* @param handle - the connection handle.
* @param mac - new MAC to configure.
diff --git a/include/linux/qed/qed_ll2_if.h b/include/linux/qed/qed_ll2_if.h
index dd7a3b86bb9e..5b67cd03276e 100644
--- a/include/linux/qed/qed_ll2_if.h
+++ b/include/linux/qed/qed_ll2_if.h
@@ -1,33 +1,7 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
/* QLogic qed NIC Driver
* Copyright (c) 2015-2017 QLogic Corporation
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and /or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * Copyright (c) 2019-2020 Marvell International Ltd.
*/
#ifndef _QED_LL2_IF_H
@@ -38,20 +12,25 @@
#include <linux/netdevice.h>
#include <linux/pci.h>
#include <linux/skbuff.h>
-#include <linux/version.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/qed/qed_if.h>
enum qed_ll2_conn_type {
QED_LL2_TYPE_FCOE,
- QED_LL2_TYPE_ISCSI,
+ QED_LL2_TYPE_TCP_ULP,
QED_LL2_TYPE_TEST,
QED_LL2_TYPE_OOO,
QED_LL2_TYPE_RESERVED2,
QED_LL2_TYPE_ROCE,
QED_LL2_TYPE_IWARP,
QED_LL2_TYPE_RESERVED3,
+ MAX_QED_LL2_CONN_TYPE
+};
+
+enum qed_ll2_rx_conn_type {
+ QED_LL2_RX_TYPE_LEGACY,
+ QED_LL2_RX_TYPE_CTX,
MAX_QED_LL2_RX_CONN_TYPE
};
@@ -64,6 +43,7 @@ enum qed_ll2_roce_flavor_type {
enum qed_ll2_tx_dest {
QED_LL2_TX_DEST_NW, /* Light L2 TX Destination to the Network */
QED_LL2_TX_DEST_LB, /* Light L2 TX Destination to the Loopback */
+ QED_LL2_TX_DEST_DROP, /* Light L2 Drop the TX packet */
QED_LL2_TX_DEST_MAX
};
@@ -101,6 +81,7 @@ struct qed_ll2_comp_rx_data {
void *cookie;
dma_addr_t rx_buf_addr;
u16 parse_flags;
+ u16 err_flags;
u16 vlan;
bool b_last_packet;
u8 connection_handle;
@@ -114,7 +95,7 @@ struct qed_ll2_comp_rx_data {
u32 opaque_data_1;
/* GSI only */
- u32 gid_dst[4];
+ u32 src_qp;
u16 qp_id;
union {
@@ -149,15 +130,21 @@ void (*qed_ll2_release_tx_packet_cb)(void *cxt,
dma_addr_t first_frag_addr,
bool b_last_fragment, bool b_last_packet);
+typedef
+void (*qed_ll2_slowpath_cb)(void *cxt, u8 connection_handle,
+ u32 opaque_data_0, u32 opaque_data_1);
+
struct qed_ll2_cbs {
qed_ll2_complete_rx_packet_cb rx_comp_cb;
qed_ll2_release_rx_packet_cb rx_release_cb;
qed_ll2_complete_tx_packet_cb tx_comp_cb;
qed_ll2_release_tx_packet_cb tx_release_cb;
+ qed_ll2_slowpath_cb slowpath_cb;
void *cookie;
};
struct qed_ll2_acquire_data_inputs {
+ enum qed_ll2_rx_conn_type rx_conn_type;
enum qed_ll2_conn_type conn_type;
u16 mtu;
u16 rx_num_desc;
@@ -170,6 +157,7 @@ struct qed_ll2_acquire_data_inputs {
enum qed_ll2_tx_dest tx_dest;
enum qed_ll2_error_handle ai_err_packet_too_big;
enum qed_ll2_error_handle ai_err_no_buf;
+ bool secondary_queue;
u8 gsi_enable;
};
@@ -194,6 +182,7 @@ struct qed_ll2_tx_pkt_info {
bool enable_ip_cksum;
bool enable_l4_cksum;
bool calc_ip_len;
+ bool remove_stag;
};
#define QED_LL2_UNUSED_HANDLE (0xff)
@@ -212,57 +201,64 @@ struct qed_ll2_params {
u8 ll2_mac_address[ETH_ALEN];
};
+enum qed_ll2_xmit_flags {
+ /* FIP discovery packet */
+ QED_LL2_XMIT_FLAGS_FIP_DISCOVERY
+};
+
struct qed_ll2_ops {
/**
- * @brief start - initializes ll2
+ * start(): Initializes ll2.
*
- * @param cdev
- * @param params - protocol driver configuration for the ll2.
+ * @cdev: Qed dev pointer.
+ * @params: Protocol driver configuration for the ll2.
*
- * @return 0 on success, otherwise error value.
+ * Return: 0 on success, otherwise error value.
*/
int (*start)(struct qed_dev *cdev, struct qed_ll2_params *params);
/**
- * @brief stop - stops the ll2
+ * stop(): Stops the ll2
*
- * @param cdev
+ * @cdev: Qed dev pointer.
*
- * @return 0 on success, otherwise error value.
+ * Return: 0 on success, otherwise error value.
*/
int (*stop)(struct qed_dev *cdev);
/**
- * @brief start_xmit - transmits an skb over the ll2 interface
+ * start_xmit(): Transmits an skb over the ll2 interface
*
- * @param cdev
- * @param skb
+ * @cdev: Qed dev pointer.
+ * @skb: SKB.
+ * @xmit_flags: Transmit options defined by the enum qed_ll2_xmit_flags.
*
- * @return 0 on success, otherwise error value.
+ * Return: 0 on success, otherwise error value.
*/
- int (*start_xmit)(struct qed_dev *cdev, struct sk_buff *skb);
+ int (*start_xmit)(struct qed_dev *cdev, struct sk_buff *skb,
+ unsigned long xmit_flags);
/**
- * @brief register_cb_ops - protocol driver register the callback for Rx/Tx
+ * register_cb_ops(): Protocol driver register the callback for Rx/Tx
* packets. Should be called before `start'.
*
- * @param cdev
- * @param cookie - to be passed to the callback functions.
- * @param ops - the callback functions to register for Rx / Tx.
+ * @cdev: Qed dev pointer.
+ * @cookie: to be passed to the callback functions.
+ * @ops: the callback functions to register for Rx / Tx.
*
- * @return 0 on success, otherwise error value.
+ * Return: 0 on success, otherwise error value.
*/
void (*register_cb_ops)(struct qed_dev *cdev,
const struct qed_ll2_cb_ops *ops,
void *cookie);
/**
- * @brief get LL2 related statistics
+ * get_stats(): Get LL2 related statistics.
*
- * @param cdev
- * @param stats - pointer to struct that would be filled with stats
+ * @cdev: Qed dev pointer.
+ * @stats: Pointer to struct that would be filled with stats.
*
- * @return 0 on success, error otherwise.
+ * Return: 0 on success, error otherwise.
*/
int (*get_stats)(struct qed_dev *cdev, struct qed_ll2_stats *stats);
};
diff --git a/include/linux/qed/qed_nvmetcp_if.h b/include/linux/qed/qed_nvmetcp_if.h
new file mode 100644
index 000000000000..bbfbfba51f37
--- /dev/null
+++ b/include/linux/qed/qed_nvmetcp_if.h
@@ -0,0 +1,257 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
+/* Copyright 2021 Marvell. All rights reserved. */
+
+#ifndef _QED_NVMETCP_IF_H
+#define _QED_NVMETCP_IF_H
+#include <linux/types.h>
+#include <linux/qed/qed_if.h>
+#include <linux/qed/storage_common.h>
+#include <linux/qed/nvmetcp_common.h>
+
+#define QED_NVMETCP_MAX_IO_SIZE 0x800000
+#define QED_NVMETCP_CMN_HDR_SIZE (sizeof(struct nvme_tcp_hdr))
+#define QED_NVMETCP_CMD_HDR_SIZE (sizeof(struct nvme_tcp_cmd_pdu))
+#define QED_NVMETCP_NON_IO_HDR_SIZE ((QED_NVMETCP_CMN_HDR_SIZE + 16))
+
+typedef int (*nvmetcp_event_cb_t) (void *context,
+ u8 fw_event_code, void *fw_handle);
+
+struct qed_dev_nvmetcp_info {
+ struct qed_dev_info common;
+ u8 port_id; /* Physical port */
+ u8 num_cqs;
+};
+
+#define MAX_TID_BLOCKS_NVMETCP (512)
+struct qed_nvmetcp_tid {
+ u32 size; /* In bytes per task */
+ u32 num_tids_per_block;
+ u8 *blocks[MAX_TID_BLOCKS_NVMETCP];
+};
+
+struct qed_nvmetcp_id_params {
+ u8 mac[ETH_ALEN];
+ u32 ip[4];
+ u16 port;
+};
+
+struct qed_nvmetcp_params_offload {
+ /* FW initializations */
+ dma_addr_t sq_pbl_addr;
+ dma_addr_t nvmetcp_cccid_itid_table_addr;
+ u16 nvmetcp_cccid_max_range;
+ u8 default_cq;
+
+ /* Networking and TCP stack initializations */
+ struct qed_nvmetcp_id_params src;
+ struct qed_nvmetcp_id_params dst;
+ u32 ka_timeout;
+ u32 ka_interval;
+ u32 max_rt_time;
+ u32 cwnd;
+ u16 mss;
+ u16 vlan_id;
+ bool timestamp_en;
+ bool delayed_ack_en;
+ bool tcp_keep_alive_en;
+ bool ecn_en;
+ u8 ip_version;
+ u8 ka_max_probe_cnt;
+ u8 ttl;
+ u8 tos_or_tc;
+ u8 rcv_wnd_scale;
+};
+
+struct qed_nvmetcp_params_update {
+ u32 max_io_size;
+ u32 max_recv_pdu_length;
+ u32 max_send_pdu_length;
+
+ /* Placeholder: pfv, cpda, hpda */
+
+ bool hdr_digest_en;
+ bool data_digest_en;
+};
+
+struct qed_nvmetcp_cb_ops {
+ struct qed_common_cb_ops common;
+};
+
+struct nvmetcp_sge {
+ struct regpair sge_addr; /* SGE address */
+ __le32 sge_len; /* SGE length */
+ __le32 reserved;
+};
+
+/* IO path HSI function SGL params */
+struct storage_sgl_task_params {
+ struct nvmetcp_sge *sgl;
+ struct regpair sgl_phys_addr;
+ u32 total_buffer_size;
+ u16 num_sges;
+ bool small_mid_sge;
+};
+
+/* IO path HSI function FW task context params */
+struct nvmetcp_task_params {
+ void *context; /* Output parameter - set/filled by the HSI function */
+ struct nvmetcp_wqe *sqe;
+ u32 tx_io_size; /* in bytes (Without DIF, if exists) */
+ u32 rx_io_size; /* in bytes (Without DIF, if exists) */
+ u16 conn_icid;
+ u16 itid;
+ struct regpair opq; /* qedn_task_ctx address */
+ u16 host_cccid;
+ u8 cq_rss_number;
+ bool send_write_incapsule;
+};
+
+/**
+ * struct qed_nvmetcp_ops - qed NVMeTCP operations.
+ * @common: common operations pointer
+ * @ll2: light L2 operations pointer
+ * @fill_dev_info: fills NVMeTCP specific information
+ * @param cdev
+ * @param info
+ * @return 0 on success, otherwise error value.
+ * @register_ops: register nvmetcp operations
+ * @param cdev
+ * @param ops - specified using qed_nvmetcp_cb_ops
+ * @param cookie - driver private
+ * @start: nvmetcp in FW
+ * @param cdev
+ * @param tasks - qed will fill information about tasks
+ * return 0 on success, otherwise error value.
+ * @stop: nvmetcp in FW
+ * @param cdev
+ * return 0 on success, otherwise error value.
+ * @acquire_conn: acquire a new nvmetcp connection
+ * @param cdev
+ * @param handle - qed will fill handle that should be
+ * used henceforth as identifier of the
+ * connection.
+ * @param p_doorbell - qed will fill the address of the
+ * doorbell.
+ * @return 0 on success, otherwise error value.
+ * @release_conn: release a previously acquired nvmetcp connection
+ * @param cdev
+ * @param handle - the connection handle.
+ * @return 0 on success, otherwise error value.
+ * @offload_conn: configures an offloaded connection
+ * @param cdev
+ * @param handle - the connection handle.
+ * @param conn_info - the configuration to use for the
+ * offload.
+ * @return 0 on success, otherwise error value.
+ * @update_conn: updates an offloaded connection
+ * @param cdev
+ * @param handle - the connection handle.
+ * @param conn_info - the configuration to use for the
+ * offload.
+ * @return 0 on success, otherwise error value.
+ * @destroy_conn: stops an offloaded connection
+ * @param cdev
+ * @param handle - the connection handle.
+ * @return 0 on success, otherwise error value.
+ * @clear_sq: clear all task in sq
+ * @param cdev
+ * @param handle - the connection handle.
+ * @return 0 on success, otherwise error value.
+ * @add_src_tcp_port_filter: Add source tcp port filter
+ * @param cdev
+ * @param src_port
+ * @remove_src_tcp_port_filter: Remove source tcp port filter
+ * @param cdev
+ * @param src_port
+ * @add_dst_tcp_port_filter: Add destination tcp port filter
+ * @param cdev
+ * @param dest_port
+ * @remove_dst_tcp_port_filter: Remove destination tcp port filter
+ * @param cdev
+ * @param dest_port
+ * @clear_all_filters: Clear all filters.
+ * @param cdev
+ * @init_read_io: Init read IO.
+ * @task_params
+ * @cmd_pdu_header
+ * @nvme_cmd
+ * @sgl_task_params
+ * @init_write_io: Init write IO.
+ * @task_params
+ * @cmd_pdu_header
+ * @nvme_cmd
+ * @sgl_task_params
+ * @init_icreq_exchange: Exchange ICReq.
+ * @task_params
+ * @init_conn_req_pdu_hdr
+ * @tx_sgl_task_params
+ * @rx_sgl_task_params
+ * @init_task_cleanup: Init task cleanup.
+ * @task_params
+ */
+struct qed_nvmetcp_ops {
+ const struct qed_common_ops *common;
+
+ const struct qed_ll2_ops *ll2;
+
+ int (*fill_dev_info)(struct qed_dev *cdev,
+ struct qed_dev_nvmetcp_info *info);
+
+ void (*register_ops)(struct qed_dev *cdev,
+ struct qed_nvmetcp_cb_ops *ops, void *cookie);
+
+ int (*start)(struct qed_dev *cdev,
+ struct qed_nvmetcp_tid *tasks,
+ void *event_context, nvmetcp_event_cb_t async_event_cb);
+
+ int (*stop)(struct qed_dev *cdev);
+
+ int (*acquire_conn)(struct qed_dev *cdev,
+ u32 *handle,
+ u32 *fw_cid, void __iomem **p_doorbell);
+
+ int (*release_conn)(struct qed_dev *cdev, u32 handle);
+
+ int (*offload_conn)(struct qed_dev *cdev,
+ u32 handle,
+ struct qed_nvmetcp_params_offload *conn_info);
+
+ int (*update_conn)(struct qed_dev *cdev,
+ u32 handle,
+ struct qed_nvmetcp_params_update *conn_info);
+
+ int (*destroy_conn)(struct qed_dev *cdev, u32 handle, u8 abrt_conn);
+
+ int (*clear_sq)(struct qed_dev *cdev, u32 handle);
+
+ int (*add_src_tcp_port_filter)(struct qed_dev *cdev, u16 src_port);
+
+ void (*remove_src_tcp_port_filter)(struct qed_dev *cdev, u16 src_port);
+
+ int (*add_dst_tcp_port_filter)(struct qed_dev *cdev, u16 dest_port);
+
+ void (*remove_dst_tcp_port_filter)(struct qed_dev *cdev, u16 dest_port);
+
+ void (*clear_all_filters)(struct qed_dev *cdev);
+
+ void (*init_read_io)(struct nvmetcp_task_params *task_params,
+ struct nvme_tcp_cmd_pdu *cmd_pdu_header,
+ struct nvme_command *nvme_cmd,
+ struct storage_sgl_task_params *sgl_task_params);
+
+ void (*init_write_io)(struct nvmetcp_task_params *task_params,
+ struct nvme_tcp_cmd_pdu *cmd_pdu_header,
+ struct nvme_command *nvme_cmd,
+ struct storage_sgl_task_params *sgl_task_params);
+
+ void (*init_icreq_exchange)(struct nvmetcp_task_params *task_params,
+ struct nvme_tcp_icreq_pdu *init_conn_req_pdu_hdr,
+ struct storage_sgl_task_params *tx_sgl_task_params,
+ struct storage_sgl_task_params *rx_sgl_task_params);
+
+ void (*init_task_cleanup)(struct nvmetcp_task_params *task_params);
+};
+
+const struct qed_nvmetcp_ops *qed_get_nvmetcp_ops(void);
+void qed_put_nvmetcp_ops(void);
+#endif
diff --git a/include/linux/qed/qed_rdma_if.h b/include/linux/qed/qed_rdma_if.h
index 4dd72ba210f5..3b76c07fbcf8 100644
--- a/include/linux/qed/qed_rdma_if.h
+++ b/include/linux/qed/qed_rdma_if.h
@@ -1,34 +1,9 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
/* QLogic qed NIC Driver
* Copyright (c) 2015-2017 QLogic Corporation
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and /or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * Copyright (c) 2019-2020 Marvell International Ltd.
*/
+
#ifndef _QED_RDMA_IF_H
#define _QED_RDMA_IF_H
#include <linux/types.h>
@@ -39,15 +14,6 @@
#include <linux/qed/qed_ll2_if.h>
#include <linux/qed/rdma_common.h>
-enum qed_roce_ll2_tx_dest {
- /* Light L2 TX Destination to the Network */
- QED_ROCE_LL2_TX_DEST_NW,
-
- /* Light L2 TX Destination to the Loopback */
- QED_ROCE_LL2_TX_DEST_LB,
- QED_ROCE_LL2_TX_DEST_MAX
-};
-
#define QED_RDMA_MAX_CNQ_SIZE (0xFFFF)
/* rdma interface */
@@ -62,11 +28,17 @@ enum qed_roce_qp_state {
QED_ROCE_QP_STATE_SQE
};
+enum qed_rdma_qp_type {
+ QED_RDMA_QP_TYPE_RC,
+ QED_RDMA_QP_TYPE_XRC_INI,
+ QED_RDMA_QP_TYPE_XRC_TGT,
+ QED_RDMA_QP_TYPE_INVAL = 0xffff,
+};
+
enum qed_rdma_tid_type {
QED_RDMA_TID_REGISTERED_MR,
QED_RDMA_TID_FMR,
- QED_RDMA_TID_MW_TYPE1,
- QED_RDMA_TID_MW_TYPE2A
+ QED_RDMA_TID_MW
};
struct qed_rdma_events {
@@ -101,7 +73,6 @@ struct qed_rdma_device {
u64 max_mr_size;
u32 max_cqe;
u32 max_mw;
- u32 max_fmr;
u32 max_mr_mw_fmr_pbl;
u64 max_mr_mw_fmr_size;
u32 max_pd;
@@ -235,7 +206,7 @@ struct qed_rdma_start_in_params {
struct qed_rdma_add_user_out_params {
u16 dpi;
- u64 dpi_addr;
+ void __iomem *dpi_addr;
u64 dpi_phys_addr;
u32 dpi_size;
u16 wid_count;
@@ -271,16 +242,13 @@ struct qed_rdma_register_tid_in_params {
bool pbl_two_level;
u8 pbl_page_size_log;
u8 page_size_log;
- u32 fbo;
u64 length;
u64 vaddr;
- bool zbva;
bool phy_mr;
bool dma_mr;
bool dif_enabled;
u64 dif_error_addr;
- u64 dif_runt_addr;
};
struct qed_rdma_create_cq_in_params {
@@ -302,6 +270,12 @@ struct qed_rdma_create_srq_in_params {
u16 num_pages;
u16 pd_id;
u16 page_size;
+
+ /* XRC related only */
+ bool reserved_key_en;
+ bool is_xrc;
+ u32 cq_cid;
+ u16 xrcd_id;
};
struct qed_rdma_destroy_cq_in_params {
@@ -330,7 +304,12 @@ struct qed_rdma_create_qp_in_params {
u16 rq_num_pages;
u64 rq_pbl_ptr;
u16 srq_id;
+ u16 xrcd_id;
u8 stats_queue;
+ enum qed_rdma_qp_type qp_type;
+ u8 flags;
+#define QED_ROCE_EDPM_MODE_MASK 0x1
+#define QED_ROCE_EDPM_MODE_SHIFT 0
};
struct qed_rdma_create_qp_out_params {
@@ -440,11 +419,13 @@ struct qed_rdma_create_srq_out_params {
struct qed_rdma_destroy_srq_in_params {
u16 srq_id;
+ bool is_xrc;
};
struct qed_rdma_modify_srq_in_params {
u32 wqe_limit;
u16 srq_id;
+ bool is_xrc;
};
struct qed_rdma_stats_out_params {
@@ -485,7 +466,9 @@ enum qed_iwarp_event_type {
QED_IWARP_EVENT_ACTIVE_MPA_REPLY,
QED_IWARP_EVENT_LOCAL_ACCESS_ERROR,
QED_IWARP_EVENT_REMOTE_OPERATION_ERROR,
- QED_IWARP_EVENT_TERMINATE_RECEIVED
+ QED_IWARP_EVENT_TERMINATE_RECEIVED,
+ QED_IWARP_EVENT_SRQ_LIMIT,
+ QED_IWARP_EVENT_SRQ_EMPTY,
};
enum qed_tcp_ip_version {
@@ -581,7 +564,7 @@ struct qed_roce_ll2_packet {
int n_seg;
struct qed_roce_ll2_buffer payload[RDMA_MAX_SGE_PER_SQ_WQE];
int roce_mode;
- enum qed_roce_ll2_tx_dest tx_dest;
+ enum qed_ll2_tx_dest tx_dest;
};
enum qed_rdma_type {
@@ -620,6 +603,8 @@ struct qed_rdma_ops {
int (*rdma_set_rdma_int)(struct qed_dev *cdev, u16 cnt);
int (*rdma_alloc_pd)(void *rdma_cxt, u16 *pd);
void (*rdma_dealloc_pd)(void *rdma_cxt, u16 pd);
+ int (*rdma_alloc_xrcd)(void *rdma_cxt, u16 *xrcd);
+ void (*rdma_dealloc_xrcd)(void *rdma_cxt, u16 xrcd);
int (*rdma_create_cq)(void *rdma_cxt,
struct qed_rdma_create_cq_in_params *params,
u16 *icid);
@@ -646,6 +631,14 @@ struct qed_rdma_ops {
int (*rdma_alloc_tid)(void *rdma_cxt, u32 *itid);
void (*rdma_free_tid)(void *rdma_cxt, u32 itid);
+ int (*rdma_create_srq)(void *rdma_cxt,
+ struct qed_rdma_create_srq_in_params *iparams,
+ struct qed_rdma_create_srq_out_params *oparams);
+ int (*rdma_destroy_srq)(void *rdma_cxt,
+ struct qed_rdma_destroy_srq_in_params *iparams);
+ int (*rdma_modify_srq)(void *rdma_cxt,
+ struct qed_rdma_modify_srq_in_params *iparams);
+
int (*ll2_acquire_connection)(void *rdma_cxt,
struct qed_ll2_acquire_data *data);
@@ -669,7 +662,10 @@ struct qed_rdma_ops {
u8 connection_handle,
struct qed_ll2_stats *p_stats);
int (*ll2_set_mac_filter)(struct qed_dev *cdev,
- u8 *old_mac_address, u8 *new_mac_address);
+ u8 *old_mac_address,
+ const u8 *new_mac_address);
+
+ int (*iwarp_set_engine_affin)(struct qed_dev *cdev, bool b_reset);
int (*iwarp_connect)(void *rdma_cxt,
struct qed_iwarp_connect_in *iparams,
diff --git a/include/linux/qed/qede_rdma.h b/include/linux/qed/qede_rdma.h
index 9904617a9730..0d5564a59a59 100644
--- a/include/linux/qed/qede_rdma.h
+++ b/include/linux/qed/qede_rdma.h
@@ -1,34 +1,9 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
/* QLogic qedr NIC Driver
* Copyright (c) 2015-2017 QLogic Corporation
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and /or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * Copyright (c) 2019-2020 Marvell International Ltd.
*/
+
#ifndef QEDE_ROCE_H
#define QEDE_ROCE_H
@@ -45,7 +20,8 @@ enum qede_rdma_event {
QEDE_UP,
QEDE_DOWN,
QEDE_CHANGE_ADDR,
- QEDE_CLOSE
+ QEDE_CLOSE,
+ QEDE_CHANGE_MTU,
};
struct qede_rdma_event_work {
@@ -74,21 +50,24 @@ void qede_rdma_unregister_driver(struct qedr_driver *drv);
bool qede_rdma_supported(struct qede_dev *dev);
#if IS_ENABLED(CONFIG_QED_RDMA)
-int qede_rdma_dev_add(struct qede_dev *dev);
+int qede_rdma_dev_add(struct qede_dev *dev, bool recovery);
void qede_rdma_dev_event_open(struct qede_dev *dev);
void qede_rdma_dev_event_close(struct qede_dev *dev);
-void qede_rdma_dev_remove(struct qede_dev *dev);
+void qede_rdma_dev_remove(struct qede_dev *dev, bool recovery);
void qede_rdma_event_changeaddr(struct qede_dev *edr);
+void qede_rdma_event_change_mtu(struct qede_dev *edev);
#else
-static inline int qede_rdma_dev_add(struct qede_dev *dev)
+static inline int qede_rdma_dev_add(struct qede_dev *dev,
+ bool recovery)
{
return 0;
}
static inline void qede_rdma_dev_event_open(struct qede_dev *dev) {}
static inline void qede_rdma_dev_event_close(struct qede_dev *dev) {}
-static inline void qede_rdma_dev_remove(struct qede_dev *dev) {}
+static inline void qede_rdma_dev_remove(struct qede_dev *dev,
+ bool recovery) {}
static inline void qede_rdma_event_changeaddr(struct qede_dev *edr) {}
#endif
#endif
diff --git a/include/linux/qed/rdma_common.h b/include/linux/qed/rdma_common.h
index a9b3050f469c..6dfed163ab6c 100644
--- a/include/linux/qed/rdma_common.h
+++ b/include/linux/qed/rdma_common.h
@@ -1,59 +1,37 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
/* QLogic qed NIC Driver
* Copyright (c) 2015-2017 QLogic Corporation
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and /or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * Copyright (c) 2019-2020 Marvell International Ltd.
*/
#ifndef __RDMA_COMMON__
#define __RDMA_COMMON__
+
/************************/
/* RDMA FW CONSTANTS */
/************************/
-#define RDMA_RESERVED_LKEY (0)
-#define RDMA_RING_PAGE_SIZE (0x1000)
+#define RDMA_RESERVED_LKEY (0)
+#define RDMA_RING_PAGE_SIZE (0x1000)
-#define RDMA_MAX_SGE_PER_SQ_WQE (4)
-#define RDMA_MAX_SGE_PER_RQ_WQE (4)
+#define RDMA_MAX_SGE_PER_SQ_WQE (4)
+#define RDMA_MAX_SGE_PER_RQ_WQE (4)
#define RDMA_MAX_DATA_SIZE_IN_WQE (0x80000000)
-#define RDMA_REQ_RD_ATOMIC_ELM_SIZE (0x50)
-#define RDMA_RESP_RD_ATOMIC_ELM_SIZE (0x20)
+#define RDMA_REQ_RD_ATOMIC_ELM_SIZE (0x50)
+#define RDMA_RESP_RD_ATOMIC_ELM_SIZE (0x20)
-#define RDMA_MAX_CQS (64 * 1024)
-#define RDMA_MAX_TIDS (128 * 1024 - 1)
-#define RDMA_MAX_PDS (64 * 1024)
+#define RDMA_MAX_CQS (64 * 1024)
+#define RDMA_MAX_TIDS (128 * 1024 - 1)
+#define RDMA_MAX_PDS (64 * 1024)
+#define RDMA_MAX_XRC_SRQS (1024)
+#define RDMA_MAX_SRQS (32 * 1024)
+#define RDMA_MAX_IRQ_ELEMS_IN_PAGE (128)
-#define RDMA_NUM_STATISTIC_COUNTERS MAX_NUM_VPORTS
-#define RDMA_NUM_STATISTIC_COUNTERS_K2 MAX_NUM_VPORTS_K2
-#define RDMA_NUM_STATISTIC_COUNTERS_BB MAX_NUM_VPORTS_BB
+#define RDMA_NUM_STATISTIC_COUNTERS MAX_NUM_VPORTS
+#define RDMA_NUM_STATISTIC_COUNTERS_K2 MAX_NUM_VPORTS_K2
+#define RDMA_NUM_STATISTIC_COUNTERS_BB MAX_NUM_VPORTS_BB
#define RDMA_TASK_TYPE (PROTOCOLID_ROCE)
diff --git a/include/linux/qed/roce_common.h b/include/linux/qed/roce_common.h
index fe6a33e45977..ccddd7a96b67 100644
--- a/include/linux/qed/roce_common.h
+++ b/include/linux/qed/roce_common.h
@@ -1,45 +1,25 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
/* QLogic qed NIC Driver
* Copyright (c) 2015-2017 QLogic Corporation
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and /or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * Copyright (c) 2019-2020 Marvell International Ltd.
*/
#ifndef __ROCE_COMMON__
#define __ROCE_COMMON__
-#define ROCE_REQ_MAX_INLINE_DATA_SIZE (256)
-#define ROCE_REQ_MAX_SINGLE_SQ_WQE_SIZE (288)
+/************************/
+/* ROCE FW CONSTANTS */
+/************************/
-#define ROCE_MAX_QPS (32 * 1024)
-#define ROCE_DCQCN_NP_MAX_QPS (64)
-#define ROCE_DCQCN_RP_MAX_QPS (64)
+#define ROCE_REQ_MAX_INLINE_DATA_SIZE (256)
+#define ROCE_REQ_MAX_SINGLE_SQ_WQE_SIZE (288)
+#define ROCE_MAX_QPS (32 * 1024)
+#define ROCE_DCQCN_NP_MAX_QPS (64)
+#define ROCE_DCQCN_RP_MAX_QPS (64)
+#define ROCE_LKEY_MW_DIF_EN_BIT (28)
+
+/* Affiliated asynchronous events / errors enumeration */
enum roce_async_events_type {
ROCE_ASYNC_EVENT_NONE = 0,
ROCE_ASYNC_EVENT_COMM_EST = 1,
@@ -54,6 +34,9 @@ enum roce_async_events_type {
ROCE_ASYNC_EVENT_CQ_OVERFLOW_ERR,
ROCE_ASYNC_EVENT_SRQ_EMPTY,
ROCE_ASYNC_EVENT_DESTROY_QP_DONE,
+ ROCE_ASYNC_EVENT_XRC_DOMAIN_ERR,
+ ROCE_ASYNC_EVENT_INVALID_XRCETH_ERR,
+ ROCE_ASYNC_EVENT_XRC_SRQ_CATASTROPHIC_ERR,
MAX_ROCE_ASYNC_EVENTS_TYPE
};
diff --git a/include/linux/qed/storage_common.h b/include/linux/qed/storage_common.h
index 08df82a096b6..91896e8793bf 100644
--- a/include/linux/qed/storage_common.h
+++ b/include/linux/qed/storage_common.h
@@ -1,82 +1,92 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
/* QLogic qed NIC Driver
* Copyright (c) 2015-2017 QLogic Corporation
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and /or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * Copyright (c) 2019-2020 Marvell International Ltd.
*/
#ifndef __STORAGE_COMMON__
#define __STORAGE_COMMON__
-#define NUM_OF_CMDQS_CQS (NUM_OF_GLOBAL_QUEUES / 2)
-#define BDQ_NUM_RESOURCES (4)
-
-#define BDQ_ID_RQ (0)
-#define BDQ_ID_IMM_DATA (1)
-#define BDQ_NUM_IDS (2)
-
-#define SCSI_NUM_SGES_SLOW_SGL_THR 8
+/*********************/
+/* SCSI CONSTANTS */
+/*********************/
+
+#define SCSI_MAX_NUM_OF_CMDQS (NUM_OF_GLOBAL_QUEUES / 2)
+#define BDQ_NUM_RESOURCES (4)
+
+#define BDQ_ID_RQ (0)
+#define BDQ_ID_IMM_DATA (1)
+#define BDQ_ID_TQ (2)
+#define BDQ_NUM_IDS (3)
+
+#define SCSI_NUM_SGES_SLOW_SGL_THR 8
+
+#define BDQ_MAX_EXTERNAL_RING_SIZE BIT(15)
+
+/* SCSI op codes */
+#define SCSI_OPCODE_COMPARE_AND_WRITE (0x89)
+#define SCSI_OPCODE_READ_10 (0x28)
+#define SCSI_OPCODE_WRITE_6 (0x0A)
+#define SCSI_OPCODE_WRITE_10 (0x2A)
+#define SCSI_OPCODE_WRITE_12 (0xAA)
+#define SCSI_OPCODE_WRITE_16 (0x8A)
+#define SCSI_OPCODE_WRITE_AND_VERIFY_10 (0x2E)
+#define SCSI_OPCODE_WRITE_AND_VERIFY_12 (0xAE)
+#define SCSI_OPCODE_WRITE_AND_VERIFY_16 (0x8E)
+
+/* iSCSI Drv opaque */
+struct iscsi_drv_opaque {
+ __le16 reserved_zero[3];
+ __le16 opaque;
+};
-#define BDQ_MAX_EXTERNAL_RING_SIZE (1 << 15)
+/* Scsi 2B/8B opaque union */
+union scsi_opaque {
+ struct regpair fcoe_opaque;
+ struct iscsi_drv_opaque iscsi_opaque;
+};
+/* SCSI buffer descriptor */
struct scsi_bd {
struct regpair address;
- struct regpair opaque;
+ union scsi_opaque opaque;
};
+/* Scsi Drv BDQ struct */
struct scsi_bdq_ram_drv_data {
__le16 external_producer;
__le16 reserved0[3];
};
+/* SCSI SGE entry */
struct scsi_sge {
struct regpair sge_addr;
__le32 sge_len;
__le32 reserved;
};
+/* Cached SGEs section */
struct scsi_cached_sges {
struct scsi_sge sge[4];
};
+/* Scsi Drv CMDQ struct */
struct scsi_drv_cmdq {
__le16 cmdq_cons;
__le16 reserved0;
__le32 reserved1;
};
+/* Common SCSI init params passed by driver to FW in function init ramrod */
struct scsi_init_func_params {
__le16 num_tasks;
u8 log_page_size;
+ u8 log_page_size_conn;
u8 debug_mode;
- u8 reserved2[12];
+ u8 reserved2[11];
};
+/* SCSI RQ/CQ/CMDQ firmware function init parameters */
struct scsi_init_func_queues {
struct regpair glbl_q_params_addr;
__le16 rq_buffer_size;
@@ -84,39 +94,45 @@ struct scsi_init_func_queues {
__le16 cmdq_num_entries;
u8 bdq_resource_id;
u8 q_validity;
-#define SCSI_INIT_FUNC_QUEUES_RQ_VALID_MASK 0x1
-#define SCSI_INIT_FUNC_QUEUES_RQ_VALID_SHIFT 0
-#define SCSI_INIT_FUNC_QUEUES_IMM_DATA_VALID_MASK 0x1
-#define SCSI_INIT_FUNC_QUEUES_IMM_DATA_VALID_SHIFT 1
-#define SCSI_INIT_FUNC_QUEUES_CMD_VALID_MASK 0x1
-#define SCSI_INIT_FUNC_QUEUES_CMD_VALID_SHIFT 2
-#define SCSI_INIT_FUNC_QUEUES_RESERVED_VALID_MASK 0x1F
-#define SCSI_INIT_FUNC_QUEUES_RESERVED_VALID_SHIFT 3
+#define SCSI_INIT_FUNC_QUEUES_RQ_VALID_MASK 0x1
+#define SCSI_INIT_FUNC_QUEUES_RQ_VALID_SHIFT 0
+#define SCSI_INIT_FUNC_QUEUES_IMM_DATA_VALID_MASK 0x1
+#define SCSI_INIT_FUNC_QUEUES_IMM_DATA_VALID_SHIFT 1
+#define SCSI_INIT_FUNC_QUEUES_CMD_VALID_MASK 0x1
+#define SCSI_INIT_FUNC_QUEUES_CMD_VALID_SHIFT 2
+#define SCSI_INIT_FUNC_QUEUES_TQ_VALID_MASK 0x1
+#define SCSI_INIT_FUNC_QUEUES_TQ_VALID_SHIFT 3
+#define SCSI_INIT_FUNC_QUEUES_SOC_EN_MASK 0x1
+#define SCSI_INIT_FUNC_QUEUES_SOC_EN_SHIFT 4
+#define SCSI_INIT_FUNC_QUEUES_SOC_NUM_OF_BLOCKS_LOG_MASK 0x7
+#define SCSI_INIT_FUNC_QUEUES_SOC_NUM_OF_BLOCKS_LOG_SHIFT 5
+ __le16 cq_cmdq_sb_num_arr[SCSI_MAX_NUM_OF_CMDQS];
u8 num_queues;
u8 queue_relative_offset;
u8 cq_sb_pi;
u8 cmdq_sb_pi;
- __le16 cq_cmdq_sb_num_arr[NUM_OF_CMDQS_CQS];
- __le16 reserved0;
u8 bdq_pbl_num_entries[BDQ_NUM_IDS];
+ u8 reserved1;
struct regpair bdq_pbl_base_address[BDQ_NUM_IDS];
__le16 bdq_xoff_threshold[BDQ_NUM_IDS];
- __le16 bdq_xon_threshold[BDQ_NUM_IDS];
__le16 cmdq_xoff_threshold;
+ __le16 bdq_xon_threshold[BDQ_NUM_IDS];
__le16 cmdq_xon_threshold;
- __le32 reserved1;
};
+/* Scsi Drv BDQ Data struct (2 BDQ IDs: 0 - RQ, 1 - Immediate Data) */
struct scsi_ram_per_bdq_resource_drv_data {
struct scsi_bdq_ram_drv_data drv_data_per_bdq_id[BDQ_NUM_IDS];
};
+/* SCSI SGL types */
enum scsi_sgl_mode {
SCSI_TX_SLOW_SGL,
SCSI_FAST_SGL,
MAX_SCSI_SGL_MODE
};
+/* SCSI SGL parameters */
struct scsi_sgl_params {
struct regpair sgl_addr;
__le32 sgl_total_length;
@@ -126,10 +142,16 @@ struct scsi_sgl_params {
u8 reserved;
};
+/* SCSI terminate connection params */
struct scsi_terminate_extra_params {
__le16 unsolicited_cq_count;
__le16 cmdq_count;
u8 reserved[4];
};
+/* SCSI Task Queue Element */
+struct scsi_tqe {
+ __le16 itid;
+};
+
#endif /* __STORAGE_COMMON__ */
diff --git a/include/linux/qed/tcp_common.h b/include/linux/qed/tcp_common.h
index dbf7a43c3e1f..2b2c87d10e0a 100644
--- a/include/linux/qed/tcp_common.h
+++ b/include/linux/qed/tcp_common.h
@@ -1,40 +1,19 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
/* QLogic qed NIC Driver
* Copyright (c) 2015-2017 QLogic Corporation
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and /or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * Copyright (c) 2019-2020 Marvell International Ltd.
*/
#ifndef __TCP_COMMON__
#define __TCP_COMMON__
-#define TCP_INVALID_TIMEOUT_VAL -1
+/********************/
+/* TCP FW CONSTANTS */
+/********************/
+#define TCP_INVALID_TIMEOUT_VAL -1
+
+/* OOO opaque data received from LL2 */
struct ooo_opaque {
__le32 cid;
u8 drop_isle;
@@ -43,25 +22,29 @@ struct ooo_opaque {
u8 ooo_isle;
};
+/* tcp connect mode enum */
enum tcp_connect_mode {
TCP_CONNECT_ACTIVE,
TCP_CONNECT_PASSIVE,
MAX_TCP_CONNECT_MODE
};
+/* tcp function init parameters */
struct tcp_init_params {
__le32 two_msl_timer;
__le16 tx_sws_timer;
- u8 maxfinrt;
+ u8 max_fin_rt;
u8 reserved[9];
};
+/* tcp IPv4/IPv6 enum */
enum tcp_ip_version {
TCP_IPV4,
TCP_IPV6,
MAX_TCP_IP_VERSION
};
+/* tcp offload parameters */
struct tcp_offload_params {
__le16 local_mac_addr_lo;
__le16 local_mac_addr_mid;
@@ -70,24 +53,29 @@ struct tcp_offload_params {
__le16 remote_mac_addr_mid;
__le16 remote_mac_addr_hi;
__le16 vlan_id;
- u8 flags;
-#define TCP_OFFLOAD_PARAMS_TS_EN_MASK 0x1
-#define TCP_OFFLOAD_PARAMS_TS_EN_SHIFT 0
-#define TCP_OFFLOAD_PARAMS_DA_EN_MASK 0x1
-#define TCP_OFFLOAD_PARAMS_DA_EN_SHIFT 1
-#define TCP_OFFLOAD_PARAMS_KA_EN_MASK 0x1
-#define TCP_OFFLOAD_PARAMS_KA_EN_SHIFT 2
-#define TCP_OFFLOAD_PARAMS_NAGLE_EN_MASK 0x1
-#define TCP_OFFLOAD_PARAMS_NAGLE_EN_SHIFT 3
-#define TCP_OFFLOAD_PARAMS_DA_CNT_EN_MASK 0x1
-#define TCP_OFFLOAD_PARAMS_DA_CNT_EN_SHIFT 4
-#define TCP_OFFLOAD_PARAMS_FIN_SENT_MASK 0x1
-#define TCP_OFFLOAD_PARAMS_FIN_SENT_SHIFT 5
-#define TCP_OFFLOAD_PARAMS_FIN_RECEIVED_MASK 0x1
-#define TCP_OFFLOAD_PARAMS_FIN_RECEIVED_SHIFT 6
-#define TCP_OFFLOAD_PARAMS_RESERVED0_MASK 0x1
-#define TCP_OFFLOAD_PARAMS_RESERVED0_SHIFT 7
+ __le16 flags;
+#define TCP_OFFLOAD_PARAMS_TS_EN_MASK 0x1
+#define TCP_OFFLOAD_PARAMS_TS_EN_SHIFT 0
+#define TCP_OFFLOAD_PARAMS_DA_EN_MASK 0x1
+#define TCP_OFFLOAD_PARAMS_DA_EN_SHIFT 1
+#define TCP_OFFLOAD_PARAMS_KA_EN_MASK 0x1
+#define TCP_OFFLOAD_PARAMS_KA_EN_SHIFT 2
+#define TCP_OFFLOAD_PARAMS_ECN_SENDER_EN_MASK 0x1
+#define TCP_OFFLOAD_PARAMS_ECN_SENDER_EN_SHIFT 3
+#define TCP_OFFLOAD_PARAMS_ECN_RECEIVER_EN_MASK 0x1
+#define TCP_OFFLOAD_PARAMS_ECN_RECEIVER_EN_SHIFT 4
+#define TCP_OFFLOAD_PARAMS_NAGLE_EN_MASK 0x1
+#define TCP_OFFLOAD_PARAMS_NAGLE_EN_SHIFT 5
+#define TCP_OFFLOAD_PARAMS_DA_CNT_EN_MASK 0x1
+#define TCP_OFFLOAD_PARAMS_DA_CNT_EN_SHIFT 6
+#define TCP_OFFLOAD_PARAMS_FIN_SENT_MASK 0x1
+#define TCP_OFFLOAD_PARAMS_FIN_SENT_SHIFT 7
+#define TCP_OFFLOAD_PARAMS_FIN_RECEIVED_MASK 0x1
+#define TCP_OFFLOAD_PARAMS_FIN_RECEIVED_SHIFT 8
+#define TCP_OFFLOAD_PARAMS_RESERVED_MASK 0x7F
+#define TCP_OFFLOAD_PARAMS_RESERVED_SHIFT 9
u8 ip_version;
+ u8 reserved0[3];
__le32 remote_ip[4];
__le32 local_ip[4];
__le32 flow_label;
@@ -99,17 +87,21 @@ struct tcp_offload_params {
u8 rcv_wnd_scale;
u8 connect_mode;
__le16 srtt;
- __le32 cwnd;
__le32 ss_thresh;
- __le16 reserved1;
+ __le32 rcv_wnd;
+ __le32 cwnd;
u8 ka_max_probe_cnt;
u8 dup_ack_theshold;
+ __le16 reserved1;
+ __le32 ka_timeout;
+ __le32 ka_interval;
+ __le32 max_rt_time;
+ __le32 initial_rcv_wnd;
__le32 rcv_next;
__le32 snd_una;
__le32 snd_next;
__le32 snd_max;
__le32 snd_wnd;
- __le32 rcv_wnd;
__le32 snd_wl1;
__le32 ts_recent;
__le32 ts_recent_age;
@@ -122,16 +114,13 @@ struct tcp_offload_params {
u8 rt_cnt;
__le16 rtt_var;
__le16 fw_internal;
- __le32 ka_timeout;
- __le32 ka_interval;
- __le32 max_rt_time;
- __le32 initial_rcv_wnd;
u8 snd_wnd_scale;
u8 ack_frequency;
__le16 da_timeout_value;
- __le32 reserved3[2];
+ __le32 reserved3;
};
+/* tcp offload parameters */
struct tcp_offload_params_opt2 {
__le16 local_mac_addr_lo;
__le16 local_mac_addr_mid;
@@ -140,16 +129,19 @@ struct tcp_offload_params_opt2 {
__le16 remote_mac_addr_mid;
__le16 remote_mac_addr_hi;
__le16 vlan_id;
- u8 flags;
-#define TCP_OFFLOAD_PARAMS_OPT2_TS_EN_MASK 0x1
-#define TCP_OFFLOAD_PARAMS_OPT2_TS_EN_SHIFT 0
-#define TCP_OFFLOAD_PARAMS_OPT2_DA_EN_MASK 0x1
-#define TCP_OFFLOAD_PARAMS_OPT2_DA_EN_SHIFT 1
-#define TCP_OFFLOAD_PARAMS_OPT2_KA_EN_MASK 0x1
-#define TCP_OFFLOAD_PARAMS_OPT2_KA_EN_SHIFT 2
-#define TCP_OFFLOAD_PARAMS_OPT2_RESERVED0_MASK 0x1F
-#define TCP_OFFLOAD_PARAMS_OPT2_RESERVED0_SHIFT 3
+ __le16 flags;
+#define TCP_OFFLOAD_PARAMS_OPT2_TS_EN_MASK 0x1
+#define TCP_OFFLOAD_PARAMS_OPT2_TS_EN_SHIFT 0
+#define TCP_OFFLOAD_PARAMS_OPT2_DA_EN_MASK 0x1
+#define TCP_OFFLOAD_PARAMS_OPT2_DA_EN_SHIFT 1
+#define TCP_OFFLOAD_PARAMS_OPT2_KA_EN_MASK 0x1
+#define TCP_OFFLOAD_PARAMS_OPT2_KA_EN_SHIFT 2
+#define TCP_OFFLOAD_PARAMS_OPT2_ECN_EN_MASK 0x1
+#define TCP_OFFLOAD_PARAMS_OPT2_ECN_EN_SHIFT 3
+#define TCP_OFFLOAD_PARAMS_OPT2_RESERVED0_MASK 0xFFF
+#define TCP_OFFLOAD_PARAMS_OPT2_RESERVED0_SHIFT 4
u8 ip_version;
+ u8 reserved1[3];
__le32 remote_ip[4];
__le32 local_ip[4];
__le32 flow_label;
@@ -163,9 +155,16 @@ struct tcp_offload_params_opt2 {
__le16 syn_ip_payload_length;
__le32 syn_phy_addr_lo;
__le32 syn_phy_addr_hi;
- __le32 reserved1[22];
+ __le32 cwnd;
+ u8 ka_max_probe_cnt;
+ u8 reserved2[3];
+ __le32 ka_timeout;
+ __le32 ka_interval;
+ __le32 max_rt_time;
+ __le32 reserved3[16];
};
+/* tcp IPv4/IPv6 enum */
enum tcp_seg_placement_event {
TCP_EVENT_ADD_PEN,
TCP_EVENT_ADD_NEW_ISLE,
@@ -177,40 +176,41 @@ enum tcp_seg_placement_event {
MAX_TCP_SEG_PLACEMENT_EVENT
};
+/* tcp init parameters */
struct tcp_update_params {
__le16 flags;
-#define TCP_UPDATE_PARAMS_REMOTE_MAC_ADDR_CHANGED_MASK 0x1
-#define TCP_UPDATE_PARAMS_REMOTE_MAC_ADDR_CHANGED_SHIFT 0
-#define TCP_UPDATE_PARAMS_MSS_CHANGED_MASK 0x1
-#define TCP_UPDATE_PARAMS_MSS_CHANGED_SHIFT 1
-#define TCP_UPDATE_PARAMS_TTL_CHANGED_MASK 0x1
-#define TCP_UPDATE_PARAMS_TTL_CHANGED_SHIFT 2
-#define TCP_UPDATE_PARAMS_TOS_OR_TC_CHANGED_MASK 0x1
-#define TCP_UPDATE_PARAMS_TOS_OR_TC_CHANGED_SHIFT 3
-#define TCP_UPDATE_PARAMS_KA_TIMEOUT_CHANGED_MASK 0x1
-#define TCP_UPDATE_PARAMS_KA_TIMEOUT_CHANGED_SHIFT 4
-#define TCP_UPDATE_PARAMS_KA_INTERVAL_CHANGED_MASK 0x1
-#define TCP_UPDATE_PARAMS_KA_INTERVAL_CHANGED_SHIFT 5
-#define TCP_UPDATE_PARAMS_MAX_RT_TIME_CHANGED_MASK 0x1
-#define TCP_UPDATE_PARAMS_MAX_RT_TIME_CHANGED_SHIFT 6
-#define TCP_UPDATE_PARAMS_FLOW_LABEL_CHANGED_MASK 0x1
-#define TCP_UPDATE_PARAMS_FLOW_LABEL_CHANGED_SHIFT 7
-#define TCP_UPDATE_PARAMS_INITIAL_RCV_WND_CHANGED_MASK 0x1
-#define TCP_UPDATE_PARAMS_INITIAL_RCV_WND_CHANGED_SHIFT 8
-#define TCP_UPDATE_PARAMS_KA_MAX_PROBE_CNT_CHANGED_MASK 0x1
-#define TCP_UPDATE_PARAMS_KA_MAX_PROBE_CNT_CHANGED_SHIFT 9
-#define TCP_UPDATE_PARAMS_KA_EN_CHANGED_MASK 0x1
-#define TCP_UPDATE_PARAMS_KA_EN_CHANGED_SHIFT 10
-#define TCP_UPDATE_PARAMS_NAGLE_EN_CHANGED_MASK 0x1
-#define TCP_UPDATE_PARAMS_NAGLE_EN_CHANGED_SHIFT 11
-#define TCP_UPDATE_PARAMS_KA_EN_MASK 0x1
-#define TCP_UPDATE_PARAMS_KA_EN_SHIFT 12
-#define TCP_UPDATE_PARAMS_NAGLE_EN_MASK 0x1
-#define TCP_UPDATE_PARAMS_NAGLE_EN_SHIFT 13
-#define TCP_UPDATE_PARAMS_KA_RESTART_MASK 0x1
-#define TCP_UPDATE_PARAMS_KA_RESTART_SHIFT 14
-#define TCP_UPDATE_PARAMS_RETRANSMIT_RESTART_MASK 0x1
-#define TCP_UPDATE_PARAMS_RETRANSMIT_RESTART_SHIFT 15
+#define TCP_UPDATE_PARAMS_REMOTE_MAC_ADDR_CHANGED_MASK 0x1
+#define TCP_UPDATE_PARAMS_REMOTE_MAC_ADDR_CHANGED_SHIFT 0
+#define TCP_UPDATE_PARAMS_MSS_CHANGED_MASK 0x1
+#define TCP_UPDATE_PARAMS_MSS_CHANGED_SHIFT 1
+#define TCP_UPDATE_PARAMS_TTL_CHANGED_MASK 0x1
+#define TCP_UPDATE_PARAMS_TTL_CHANGED_SHIFT 2
+#define TCP_UPDATE_PARAMS_TOS_OR_TC_CHANGED_MASK 0x1
+#define TCP_UPDATE_PARAMS_TOS_OR_TC_CHANGED_SHIFT 3
+#define TCP_UPDATE_PARAMS_KA_TIMEOUT_CHANGED_MASK 0x1
+#define TCP_UPDATE_PARAMS_KA_TIMEOUT_CHANGED_SHIFT 4
+#define TCP_UPDATE_PARAMS_KA_INTERVAL_CHANGED_MASK 0x1
+#define TCP_UPDATE_PARAMS_KA_INTERVAL_CHANGED_SHIFT 5
+#define TCP_UPDATE_PARAMS_MAX_RT_TIME_CHANGED_MASK 0x1
+#define TCP_UPDATE_PARAMS_MAX_RT_TIME_CHANGED_SHIFT 6
+#define TCP_UPDATE_PARAMS_FLOW_LABEL_CHANGED_MASK 0x1
+#define TCP_UPDATE_PARAMS_FLOW_LABEL_CHANGED_SHIFT 7
+#define TCP_UPDATE_PARAMS_INITIAL_RCV_WND_CHANGED_MASK 0x1
+#define TCP_UPDATE_PARAMS_INITIAL_RCV_WND_CHANGED_SHIFT 8
+#define TCP_UPDATE_PARAMS_KA_MAX_PROBE_CNT_CHANGED_MASK 0x1
+#define TCP_UPDATE_PARAMS_KA_MAX_PROBE_CNT_CHANGED_SHIFT 9
+#define TCP_UPDATE_PARAMS_KA_EN_CHANGED_MASK 0x1
+#define TCP_UPDATE_PARAMS_KA_EN_CHANGED_SHIFT 10
+#define TCP_UPDATE_PARAMS_NAGLE_EN_CHANGED_MASK 0x1
+#define TCP_UPDATE_PARAMS_NAGLE_EN_CHANGED_SHIFT 11
+#define TCP_UPDATE_PARAMS_KA_EN_MASK 0x1
+#define TCP_UPDATE_PARAMS_KA_EN_SHIFT 12
+#define TCP_UPDATE_PARAMS_NAGLE_EN_MASK 0x1
+#define TCP_UPDATE_PARAMS_NAGLE_EN_SHIFT 13
+#define TCP_UPDATE_PARAMS_KA_RESTART_MASK 0x1
+#define TCP_UPDATE_PARAMS_KA_RESTART_SHIFT 14
+#define TCP_UPDATE_PARAMS_RETRANSMIT_RESTART_MASK 0x1
+#define TCP_UPDATE_PARAMS_RETRANSMIT_RESTART_SHIFT 15
__le16 remote_mac_addr_lo;
__le16 remote_mac_addr_mid;
__le16 remote_mac_addr_hi;
@@ -226,6 +226,7 @@ struct tcp_update_params {
u8 reserved1[7];
};
+/* toe upload parameters */
struct tcp_upload_params {
__le32 rcv_next;
__le32 snd_una;
diff --git a/include/linux/qnx6_fs.h b/include/linux/qnx6_fs.h
index 26049eab9010..13373d437cf2 100644
--- a/include/linux/qnx6_fs.h
+++ b/include/linux/qnx6_fs.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Name : qnx6_fs.h
* Author : Kai Bankett
diff --git a/include/linux/quicklist.h b/include/linux/quicklist.h
deleted file mode 100644
index 3bdfa70bc642..000000000000
--- a/include/linux/quicklist.h
+++ /dev/null
@@ -1,93 +0,0 @@
-#ifndef LINUX_QUICKLIST_H
-#define LINUX_QUICKLIST_H
-/*
- * Fast allocations and disposal of pages. Pages must be in the condition
- * as needed after allocation when they are freed. Per cpu lists of pages
- * are kept that only contain node local pages.
- *
- * (C) 2007, SGI. Christoph Lameter <cl@linux.com>
- */
-#include <linux/kernel.h>
-#include <linux/gfp.h>
-#include <linux/percpu.h>
-
-#ifdef CONFIG_QUICKLIST
-
-struct quicklist {
- void *page;
- int nr_pages;
-};
-
-DECLARE_PER_CPU(struct quicklist, quicklist)[CONFIG_NR_QUICK];
-
-/*
- * The two key functions quicklist_alloc and quicklist_free are inline so
- * that they may be custom compiled for the platform.
- * Specifying a NULL ctor can remove constructor support. Specifying
- * a constant quicklist allows the determination of the exact address
- * in the per cpu area.
- *
- * The fast patch in quicklist_alloc touched only a per cpu cacheline and
- * the first cacheline of the page itself. There is minmal overhead involved.
- */
-static inline void *quicklist_alloc(int nr, gfp_t flags, void (*ctor)(void *))
-{
- struct quicklist *q;
- void **p = NULL;
-
- q =&get_cpu_var(quicklist)[nr];
- p = q->page;
- if (likely(p)) {
- q->page = p[0];
- p[0] = NULL;
- q->nr_pages--;
- }
- put_cpu_var(quicklist);
- if (likely(p))
- return p;
-
- p = (void *)__get_free_page(flags | __GFP_ZERO);
- if (ctor && p)
- ctor(p);
- return p;
-}
-
-static inline void __quicklist_free(int nr, void (*dtor)(void *), void *p,
- struct page *page)
-{
- struct quicklist *q;
-
- q = &get_cpu_var(quicklist)[nr];
- *(void **)p = q->page;
- q->page = p;
- q->nr_pages++;
- put_cpu_var(quicklist);
-}
-
-static inline void quicklist_free(int nr, void (*dtor)(void *), void *pp)
-{
- __quicklist_free(nr, dtor, pp, virt_to_page(pp));
-}
-
-static inline void quicklist_free_page(int nr, void (*dtor)(void *),
- struct page *page)
-{
- __quicklist_free(nr, dtor, page_address(page), page);
-}
-
-void quicklist_trim(int nr, void (*dtor)(void *),
- unsigned long min_pages, unsigned long max_free);
-
-unsigned long quicklist_total_size(void);
-
-#else
-
-static inline unsigned long quicklist_total_size(void)
-{
- return 0;
-}
-
-#endif
-
-#endif /* LINUX_QUICKLIST_H */
-
diff --git a/include/linux/quota.h b/include/linux/quota.h
index bfd077ca6ac3..fd692b4a41d5 100644
--- a/include/linux/quota.h
+++ b/include/linux/quota.h
@@ -91,7 +91,7 @@ extern bool qid_valid(struct kqid qid);
*
* When there is no mapping defined for the user-namespace, type,
* qid tuple an invalid kqid is returned. Callers are expected to
- * test for and handle handle invalid kqids being returned.
+ * test for and handle invalid kqids being returned.
* Invalid kqids may be tested for using qid_valid().
*/
static inline struct kqid make_kqid(struct user_namespace *from,
@@ -223,12 +223,12 @@ struct mem_dqinfo {
struct quota_format_type *dqi_format;
int dqi_fmt_id; /* Id of the dqi_format - used when turning
* quotas on after remount RW */
- struct list_head dqi_dirty_list; /* List of dirty dquots */
- unsigned long dqi_flags;
- unsigned int dqi_bgrace;
- unsigned int dqi_igrace;
- qsize_t dqi_max_spc_limit;
- qsize_t dqi_max_ino_limit;
+ struct list_head dqi_dirty_list; /* List of dirty dquots [dq_list_lock] */
+ unsigned long dqi_flags; /* DFQ_ flags [dq_data_lock] */
+ unsigned int dqi_bgrace; /* Space grace time [dq_data_lock] */
+ unsigned int dqi_igrace; /* Inode grace time [dq_data_lock] */
+ qsize_t dqi_max_spc_limit; /* Maximum space limit [static] */
+ qsize_t dqi_max_ino_limit; /* Maximum inode limit [static] */
void *dqi_priv;
};
@@ -263,11 +263,10 @@ enum {
};
struct dqstats {
- int stat[_DQST_DQSTAT_LAST];
+ unsigned long stat[_DQST_DQSTAT_LAST];
struct percpu_counter counter[_DQST_DQSTAT_LAST];
};
-extern struct dqstats *dqstats_pcpu;
extern struct dqstats dqstats;
static inline void dqstats_inc(unsigned int type)
@@ -293,18 +292,18 @@ static inline void dqstats_dec(unsigned int type)
* clear them when it sees fit. */
struct dquot {
- struct hlist_node dq_hash; /* Hash list in memory */
- struct list_head dq_inuse; /* List of all quotas */
- struct list_head dq_free; /* Free list element */
- struct list_head dq_dirty; /* List of dirty dquots */
+ struct hlist_node dq_hash; /* Hash list in memory [dq_list_lock] */
+ struct list_head dq_inuse; /* List of all quotas [dq_list_lock] */
+ struct list_head dq_free; /* Free list element [dq_list_lock] */
+ struct list_head dq_dirty; /* List of dirty dquots [dq_list_lock] */
struct mutex dq_lock; /* dquot IO lock */
+ spinlock_t dq_dqb_lock; /* Lock protecting dq_dqb changes */
atomic_t dq_count; /* Use count */
- wait_queue_head_t dq_wait_unused; /* Wait queue for dquot to become unused */
struct super_block *dq_sb; /* superblock this applies to */
struct kqid dq_id; /* ID this applies to (uid, gid, projid) */
- loff_t dq_off; /* Offset of dquot on disk */
+ loff_t dq_off; /* Offset of dquot on disk [dq_lock, stable once set] */
unsigned long dq_flags; /* See DQ_* */
- struct mem_dqblk dq_dqb; /* Diskquota usage */
+ struct mem_dqblk dq_dqb; /* Diskquota usage [dq_dqb_lock] */
};
/* Operations which must be implemented by each quota format */
@@ -409,13 +408,7 @@ struct qc_type_state {
struct qc_state {
unsigned int s_incoredqs; /* Number of dquots in core */
- /*
- * Per quota type information. The array should really have
- * max(MAXQUOTAS, XQM_MAXQUOTAS) entries. BUILD_BUG_ON in
- * quota_getinfo() makes sure XQM_MAXQUOTAS is large enough. Once VFS
- * supports project quotas, this can be changed to MAXQUOTAS
- */
- struct qc_type_state s_state[XQM_MAXQUOTAS];
+ struct qc_type_state s_state[MAXQUOTAS]; /* Per quota type information */
};
/* Structure for communicating via ->set_info */
@@ -455,17 +448,18 @@ struct quota_format_type {
};
/**
- * Quota state flags - they actually come in two flavors - for users and groups.
+ * Quota state flags - they come in three flavors - for users, groups and projects.
*
* Actual typed flags layout:
- * USRQUOTA GRPQUOTA
- * DQUOT_USAGE_ENABLED 0x0001 0x0002
- * DQUOT_LIMITS_ENABLED 0x0004 0x0008
- * DQUOT_SUSPENDED 0x0010 0x0020
+ * USRQUOTA GRPQUOTA PRJQUOTA
+ * DQUOT_USAGE_ENABLED 0x0001 0x0002 0x0004
+ * DQUOT_LIMITS_ENABLED 0x0008 0x0010 0x0020
+ * DQUOT_SUSPENDED 0x0040 0x0080 0x0100
*
* Following bits are used for non-typed flags:
- * DQUOT_QUOTA_SYS_FILE 0x0040
- * DQUOT_NEGATIVE_USAGE 0x0080
+ * DQUOT_QUOTA_SYS_FILE 0x0200
+ * DQUOT_NEGATIVE_USAGE 0x0400
+ * DQUOT_NOLIST_DIRTY 0x0800
*/
enum {
_DQUOT_USAGE_ENABLED = 0, /* Track disk usage for users */
@@ -491,6 +485,9 @@ enum {
*/
#define DQUOT_NEGATIVE_USAGE (1 << (DQUOT_STATE_LAST + 1))
/* Allow negative quota usage */
+/* Do not track dirty dquots in a list */
+#define DQUOT_NOLIST_DIRTY (1 << (DQUOT_STATE_LAST + 2))
+
static inline unsigned int dquot_state_flag(unsigned int flags, int type)
{
return flags << type;
@@ -521,7 +518,7 @@ static inline void quota_send_warning(struct kqid qid, dev_t dev,
struct quota_info {
unsigned int flags; /* Flags for diskquotas on this device */
- struct mutex dqio_mutex; /* lock device while I/O in progress */
+ struct rw_semaphore dqio_sem; /* Lock quota file while I/O in progress */
struct inode *files[MAXQUOTAS]; /* inodes of quotafiles */
struct mem_dqinfo info[MAXQUOTAS]; /* Information for each quota type */
const struct quota_format_ops *ops[MAXQUOTAS]; /* Operations for each type */
diff --git a/include/linux/quotaops.h b/include/linux/quotaops.h
index dda22f45fc1b..11a4becff3a9 100644
--- a/include/linux/quotaops.h
+++ b/include/linux/quotaops.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Definitions for diskquota-operations. When diskquota is configured these
* macros expand to the right source-code.
@@ -19,11 +20,12 @@ static inline struct quota_info *sb_dqopt(struct super_block *sb)
}
/* i_mutex must being held */
-static inline bool is_quota_modification(struct inode *inode, struct iattr *ia)
+static inline bool is_quota_modification(struct mnt_idmap *idmap,
+ struct inode *inode, struct iattr *ia)
{
- return (ia->ia_valid & ATTR_SIZE && ia->ia_size != inode->i_size) ||
- (ia->ia_valid & ATTR_UID && !uid_eq(ia->ia_uid, inode->i_uid)) ||
- (ia->ia_valid & ATTR_GID && !gid_eq(ia->ia_gid, inode->i_gid));
+ return ((ia->ia_valid & ATTR_SIZE) ||
+ i_uid_needs_update(idmap, ia, inode) ||
+ i_gid_needs_update(idmap, ia, inode));
}
#if defined(CONFIG_QUOTA)
@@ -38,11 +40,6 @@ void __quota_error(struct super_block *sb, const char *func,
/*
* declaration of quota_function calls in kernel.
*/
-void inode_add_rsv_space(struct inode *inode, qsize_t number);
-void inode_claim_rsv_space(struct inode *inode, qsize_t number);
-void inode_sub_rsv_space(struct inode *inode, qsize_t number);
-void inode_reclaim_rsv_space(struct inode *inode, qsize_t number);
-
int dquot_initialize(struct inode *inode);
bool dquot_initialize_needed(struct inode *inode);
void dquot_drop(struct inode *inode);
@@ -55,6 +52,16 @@ static inline struct dquot *dqgrab(struct dquot *dquot)
atomic_inc(&dquot->dq_count);
return dquot;
}
+
+static inline bool dquot_is_busy(struct dquot *dquot)
+{
+ if (test_bit(DQ_MOD_B, &dquot->dq_flags))
+ return true;
+ if (atomic_read(&dquot->dq_count) > 1)
+ return true;
+ return false;
+}
+
void dqput(struct dquot *dquot);
int dquot_scan_active(struct super_block *sb,
int (*fn)(struct dquot *dquot, unsigned long priv),
@@ -88,7 +95,9 @@ int dquot_mark_dquot_dirty(struct dquot *dquot);
int dquot_file_open(struct inode *inode, struct file *file);
-int dquot_enable(struct inode *inode, int type, int format_id,
+int dquot_load_quota_sb(struct super_block *sb, int type, int format_id,
+ unsigned int flags);
+int dquot_load_quota_inode(struct inode *inode, int type, int format_id,
unsigned int flags);
int dquot_quota_on(struct super_block *sb, int type, int format_id,
const struct path *path);
@@ -107,7 +116,8 @@ int dquot_set_dqblk(struct super_block *sb, struct kqid id,
struct qc_dqblk *di);
int __dquot_transfer(struct inode *inode, struct dquot **transfer_to);
-int dquot_transfer(struct inode *inode, struct iattr *iattr);
+int dquot_transfer(struct mnt_idmap *idmap, struct inode *inode,
+ struct iattr *iattr);
static inline struct mem_dqinfo *sb_dqinfo(struct super_block *sb, int type)
{
@@ -226,7 +236,8 @@ static inline void dquot_free_inode(struct inode *inode)
{
}
-static inline int dquot_transfer(struct inode *inode, struct iattr *iattr)
+static inline int dquot_transfer(struct mnt_idmap *idmap,
+ struct inode *inode, struct iattr *iattr)
{
return 0;
}
diff --git a/include/linux/radix-tree.h b/include/linux/radix-tree.h
index 3e5735064b71..eae67015ce51 100644
--- a/include/linux/radix-tree.h
+++ b/include/linux/radix-tree.h
@@ -1,62 +1,56 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Copyright (C) 2001 Momchil Velikov
* Portions Copyright (C) 2001 Christoph Hellwig
* Copyright (C) 2006 Nick Piggin
* Copyright (C) 2012 Konstantin Khlebnikov
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation; either version 2, or (at
- * your option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#ifndef _LINUX_RADIX_TREE_H
#define _LINUX_RADIX_TREE_H
#include <linux/bitops.h>
-#include <linux/bug.h>
-#include <linux/kernel.h>
+#include <linux/gfp_types.h>
#include <linux/list.h>
+#include <linux/lockdep.h>
+#include <linux/math.h>
+#include <linux/percpu.h>
#include <linux/preempt.h>
#include <linux/rcupdate.h>
#include <linux/spinlock.h>
#include <linux/types.h>
+#include <linux/xarray.h>
+#include <linux/local_lock.h>
+
+/* Keep unconverted code working */
+#define radix_tree_root xarray
+#define radix_tree_node xa_node
+
+struct radix_tree_preload {
+ local_lock_t lock;
+ unsigned nr;
+ /* nodes->parent points to next preallocated node */
+ struct radix_tree_node *nodes;
+};
+DECLARE_PER_CPU(struct radix_tree_preload, radix_tree_preloads);
/*
* The bottom two bits of the slot determine how the remaining bits in the
* slot are interpreted:
*
* 00 - data pointer
- * 01 - internal entry
- * 10 - exceptional entry
- * 11 - this bit combination is currently unused/reserved
+ * 10 - internal entry
+ * x1 - value entry
*
* The internal entry may be a pointer to the next level in the tree, a
* sibling entry, or an indicator that the entry in this slot has been moved
* to another location in the tree and the lookup should be restarted. While
* NULL fits the 'data pointer' pattern, it means that there is no entry in
* the tree for this index (no matter what level of the tree it is found at).
- * This means that you cannot store NULL in the tree as a value for the index.
+ * This means that storing a NULL entry in the tree is the same as deleting
+ * the entry from the tree.
*/
#define RADIX_TREE_ENTRY_MASK 3UL
-#define RADIX_TREE_INTERNAL_NODE 1UL
-
-/*
- * Most users of the radix tree store pointers but shmem/tmpfs stores swap
- * entries in the same tree. They are marked as exceptional entries to
- * distinguish them from pointers to struct page.
- * EXCEPTIONAL_ENTRY tests the bit, EXCEPTIONAL_SHIFT shifts content past it.
- */
-#define RADIX_TREE_EXCEPTIONAL_ENTRY 2
-#define RADIX_TREE_EXCEPTIONAL_SHIFT 2
+#define RADIX_TREE_INTERNAL_NODE 2UL
static inline bool radix_tree_is_internal_node(void *ptr)
{
@@ -66,71 +60,32 @@ static inline bool radix_tree_is_internal_node(void *ptr)
/*** radix-tree API starts here ***/
-#define RADIX_TREE_MAX_TAGS 3
-
-#ifndef RADIX_TREE_MAP_SHIFT
-#define RADIX_TREE_MAP_SHIFT (CONFIG_BASE_SMALL ? 4 : 6)
-#endif
-
+#define RADIX_TREE_MAP_SHIFT XA_CHUNK_SHIFT
#define RADIX_TREE_MAP_SIZE (1UL << RADIX_TREE_MAP_SHIFT)
#define RADIX_TREE_MAP_MASK (RADIX_TREE_MAP_SIZE-1)
-#define RADIX_TREE_TAG_LONGS \
- ((RADIX_TREE_MAP_SIZE + BITS_PER_LONG - 1) / BITS_PER_LONG)
+#define RADIX_TREE_MAX_TAGS XA_MAX_MARKS
+#define RADIX_TREE_TAG_LONGS XA_MARK_LONGS
#define RADIX_TREE_INDEX_BITS (8 /* CHAR_BIT */ * sizeof(unsigned long))
#define RADIX_TREE_MAX_PATH (DIV_ROUND_UP(RADIX_TREE_INDEX_BITS, \
RADIX_TREE_MAP_SHIFT))
-/*
- * @count is the count of every non-NULL element in the ->slots array
- * whether that is an exceptional entry, a retry entry, a user pointer,
- * a sibling entry or a pointer to the next level of the tree.
- * @exceptional is the count of every element in ->slots which is
- * either radix_tree_exceptional_entry() or is a sibling entry for an
- * exceptional entry.
- */
-struct radix_tree_node {
- unsigned char shift; /* Bits remaining in each slot */
- unsigned char offset; /* Slot offset in parent */
- unsigned char count; /* Total entry count */
- unsigned char exceptional; /* Exceptional entry count */
- struct radix_tree_node *parent; /* Used when ascending tree */
- struct radix_tree_root *root; /* The tree we belong to */
- union {
- struct list_head private_list; /* For tree user */
- struct rcu_head rcu_head; /* Used when freeing node */
- };
- void __rcu *slots[RADIX_TREE_MAP_SIZE];
- unsigned long tags[RADIX_TREE_MAX_TAGS][RADIX_TREE_TAG_LONGS];
-};
-
-/* The top bits of gfp_mask are used to store the root tags and the IDR flag */
-#define ROOT_IS_IDR ((__force gfp_t)(1 << __GFP_BITS_SHIFT))
-#define ROOT_TAG_SHIFT (__GFP_BITS_SHIFT + 1)
+/* The IDR tag is stored in the low bits of xa_flags */
+#define ROOT_IS_IDR ((__force gfp_t)4)
+/* The top bits of xa_flags are used to store the root tags */
+#define ROOT_TAG_SHIFT (__GFP_BITS_SHIFT)
-struct radix_tree_root {
- gfp_t gfp_mask;
- struct radix_tree_node __rcu *rnode;
-};
-
-#define RADIX_TREE_INIT(mask) { \
- .gfp_mask = (mask), \
- .rnode = NULL, \
-}
+#define RADIX_TREE_INIT(name, mask) XARRAY_INIT(name, mask)
#define RADIX_TREE(name, mask) \
- struct radix_tree_root name = RADIX_TREE_INIT(mask)
+ struct radix_tree_root name = RADIX_TREE_INIT(name, mask)
-#define INIT_RADIX_TREE(root, mask) \
-do { \
- (root)->gfp_mask = (mask); \
- (root)->rnode = NULL; \
-} while (0)
+#define INIT_RADIX_TREE(root, mask) xa_init_flags(root, mask)
static inline bool radix_tree_empty(const struct radix_tree_root *root)
{
- return root->rnode == NULL;
+ return root->xa_head == NULL;
}
/**
@@ -140,7 +95,6 @@ static inline bool radix_tree_empty(const struct radix_tree_root *root)
* @next_index: one beyond the last index for this chunk
* @tags: bit-mask for tag-iterating
* @node: node that contains current slot
- * @shift: shift for the node that holds our slots
*
* This radix tree iterator works in terms of "chunks" of slots. A chunk is a
* subinterval of slots contained within one radix tree leaf node. It is
@@ -154,20 +108,8 @@ struct radix_tree_iter {
unsigned long next_index;
unsigned long tags;
struct radix_tree_node *node;
-#ifdef CONFIG_RADIX_TREE_MULTIORDER
- unsigned int shift;
-#endif
};
-static inline unsigned int iter_shift(const struct radix_tree_iter *iter)
-{
-#ifdef CONFIG_RADIX_TREE_MULTIORDER
- return iter->shift;
-#else
- return 0;
-#endif
-}
-
/**
* Radix-tree synchronization
*
@@ -191,12 +133,11 @@ static inline unsigned int iter_shift(const struct radix_tree_iter *iter)
* radix_tree_lookup_slot
* radix_tree_tag_get
* radix_tree_gang_lookup
- * radix_tree_gang_lookup_slot
* radix_tree_gang_lookup_tag
* radix_tree_gang_lookup_tag_slot
* radix_tree_tagged
*
- * The first 8 functions are able to be called locklessly, using RCU. The
+ * The first 7 functions are able to be called locklessly, using RCU. The
* caller must ensure calls to these functions are made within rcu_read_lock()
* regions. Other readers (lock-free or otherwise) and modifications may be
* running concurrently.
@@ -266,17 +207,6 @@ static inline int radix_tree_deref_retry(void *arg)
}
/**
- * radix_tree_exceptional_entry - radix_tree_deref_slot gave exceptional entry?
- * @arg: value returned by radix_tree_deref_slot
- * Returns: 0 if well-aligned pointer, non-0 if exceptional entry.
- */
-static inline int radix_tree_exceptional_entry(void *arg)
-{
- /* Not unlikely because radix_tree_exception often tested first */
- return (unsigned long)arg & RADIX_TREE_EXCEPTIONAL_ENTRY;
-}
-
-/**
* radix_tree_exception - radix_tree_deref_slot returned either exception?
* @arg: value returned by radix_tree_deref_slot
* Returns: 0 if well-aligned pointer, non-0 if either kind of exception.
@@ -286,48 +216,28 @@ static inline int radix_tree_exception(void *arg)
return unlikely((unsigned long)arg & RADIX_TREE_ENTRY_MASK);
}
-int __radix_tree_create(struct radix_tree_root *, unsigned long index,
- unsigned order, struct radix_tree_node **nodep,
- void __rcu ***slotp);
-int __radix_tree_insert(struct radix_tree_root *, unsigned long index,
- unsigned order, void *);
-static inline int radix_tree_insert(struct radix_tree_root *root,
- unsigned long index, void *entry)
-{
- return __radix_tree_insert(root, index, 0, entry);
-}
+int radix_tree_insert(struct radix_tree_root *, unsigned long index,
+ void *);
void *__radix_tree_lookup(const struct radix_tree_root *, unsigned long index,
struct radix_tree_node **nodep, void __rcu ***slotp);
void *radix_tree_lookup(const struct radix_tree_root *, unsigned long);
void __rcu **radix_tree_lookup_slot(const struct radix_tree_root *,
unsigned long index);
-typedef void (*radix_tree_update_node_t)(struct radix_tree_node *, void *);
void __radix_tree_replace(struct radix_tree_root *, struct radix_tree_node *,
- void __rcu **slot, void *entry,
- radix_tree_update_node_t update_node, void *private);
+ void __rcu **slot, void *entry);
void radix_tree_iter_replace(struct radix_tree_root *,
const struct radix_tree_iter *, void __rcu **slot, void *entry);
void radix_tree_replace_slot(struct radix_tree_root *,
void __rcu **slot, void *entry);
-void __radix_tree_delete_node(struct radix_tree_root *,
- struct radix_tree_node *,
- radix_tree_update_node_t update_node,
- void *private);
void radix_tree_iter_delete(struct radix_tree_root *,
struct radix_tree_iter *iter, void __rcu **slot);
void *radix_tree_delete_item(struct radix_tree_root *, unsigned long, void *);
void *radix_tree_delete(struct radix_tree_root *, unsigned long);
-void radix_tree_clear_tags(struct radix_tree_root *, struct radix_tree_node *,
- void __rcu **slot);
unsigned int radix_tree_gang_lookup(const struct radix_tree_root *,
void **results, unsigned long first_index,
unsigned int max_items);
-unsigned int radix_tree_gang_lookup_slot(const struct radix_tree_root *,
- void __rcu ***results, unsigned long *indices,
- unsigned long first_index, unsigned int max_items);
int radix_tree_preload(gfp_t gfp_mask);
int radix_tree_maybe_preload(gfp_t gfp_mask);
-int radix_tree_maybe_preload_order(gfp_t gfp_mask, int order);
void radix_tree_init(void);
void *radix_tree_tag_set(struct radix_tree_root *,
unsigned long index, unsigned int tag);
@@ -335,8 +245,6 @@ void *radix_tree_tag_clear(struct radix_tree_root *,
unsigned long index, unsigned int tag);
int radix_tree_tag_get(const struct radix_tree_root *,
unsigned long index, unsigned int tag);
-void radix_tree_iter_tag_set(struct radix_tree_root *,
- const struct radix_tree_iter *iter, unsigned int tag);
void radix_tree_iter_tag_clear(struct radix_tree_root *,
const struct radix_tree_iter *iter, unsigned int tag);
unsigned int radix_tree_gang_lookup_tag(const struct radix_tree_root *,
@@ -349,16 +257,12 @@ int radix_tree_tagged(const struct radix_tree_root *, unsigned int tag);
static inline void radix_tree_preload_end(void)
{
- preempt_enable();
+ local_unlock(&radix_tree_preloads.lock);
}
-int radix_tree_split_preload(unsigned old_order, unsigned new_order, gfp_t);
-int radix_tree_split(struct radix_tree_root *, unsigned long index,
- unsigned new_order);
-int radix_tree_join(struct radix_tree_root *, unsigned long index,
- unsigned new_order, void *);
-void __rcu **idr_get_free(struct radix_tree_root *, struct radix_tree_iter *,
- gfp_t, int end);
+void __rcu **idr_get_free(struct radix_tree_root *root,
+ struct radix_tree_iter *iter, gfp_t gfp,
+ unsigned long max);
enum {
RADIX_TREE_ITER_TAG_MASK = 0x0f, /* tag index in lower nybble */
@@ -424,24 +328,6 @@ radix_tree_iter_lookup(const struct radix_tree_root *root,
}
/**
- * radix_tree_iter_find - find a present entry
- * @root: radix tree root
- * @iter: iterator state
- * @index: start location
- *
- * This function returns the slot containing the entry with the lowest index
- * which is at least @index. If @index is larger than any present entry, this
- * function returns NULL. The @iter is updated to describe the entry found.
- */
-static inline void __rcu **
-radix_tree_iter_find(const struct radix_tree_root *root,
- struct radix_tree_iter *iter, unsigned long index)
-{
- radix_tree_iter_init(iter, index);
- return radix_tree_next_chunk(root, iter, 0);
-}
-
-/**
* radix_tree_iter_retry - retry this chunk of the iteration
* @iter: iterator state
*
@@ -461,7 +347,7 @@ void __rcu **radix_tree_iter_retry(struct radix_tree_iter *iter)
static inline unsigned long
__radix_tree_iter_add(struct radix_tree_iter *iter, unsigned long slots)
{
- return iter->index + (slots << iter_shift(iter));
+ return iter->index + slots;
}
/**
@@ -486,26 +372,14 @@ void __rcu **__must_check radix_tree_iter_resume(void __rcu **slot,
static __always_inline long
radix_tree_chunk_size(struct radix_tree_iter *iter)
{
- return (iter->next_index - iter->index) >> iter_shift(iter);
-}
-
-#ifdef CONFIG_RADIX_TREE_MULTIORDER
-void __rcu **__radix_tree_next_slot(void __rcu **slot,
- struct radix_tree_iter *iter, unsigned flags);
-#else
-/* Can't happen without sibling entries, but the compiler can't tell that */
-static inline void __rcu **__radix_tree_next_slot(void __rcu **slot,
- struct radix_tree_iter *iter, unsigned flags)
-{
- return slot;
+ return iter->next_index - iter->index;
}
-#endif
/**
* radix_tree_next_slot - find next slot in chunk
*
* @slot: pointer to current slot
- * @iter: pointer to interator state
+ * @iter: pointer to iterator state
* @flags: RADIX_TREE_ITER_*, should be constant
* Returns: pointer to next slot, or NULL if there no more left
*
@@ -559,8 +433,6 @@ static __always_inline void __rcu **radix_tree_next_slot(void __rcu **slot,
return NULL;
found:
- if (unlikely(radix_tree_is_internal_node(rcu_dereference_raw(*slot))))
- return __radix_tree_next_slot(slot, iter, flags);
return slot;
}
@@ -580,23 +452,6 @@ static __always_inline void __rcu **radix_tree_next_slot(void __rcu **slot,
slot = radix_tree_next_slot(slot, iter, 0))
/**
- * radix_tree_for_each_contig - iterate over contiguous slots
- *
- * @slot: the void** variable for pointer to slot
- * @root: the struct radix_tree_root pointer
- * @iter: the struct radix_tree_iter pointer
- * @start: iteration starting index
- *
- * @slot points to radix tree slot, @iter->index contains its index.
- */
-#define radix_tree_for_each_contig(slot, root, iter, start) \
- for (slot = radix_tree_iter_init(iter, start) ; \
- slot || (slot = radix_tree_next_chunk(root, iter, \
- RADIX_TREE_ITER_CONTIG)) ; \
- slot = radix_tree_next_slot(slot, iter, \
- RADIX_TREE_ITER_CONTIG))
-
-/**
* radix_tree_for_each_tagged - iterate over tagged slots
*
* @slot: the void** variable for pointer to slot
diff --git a/include/linux/raid/detect.h b/include/linux/raid/detect.h
new file mode 100644
index 000000000000..1f029a71c3ef
--- /dev/null
+++ b/include/linux/raid/detect.h
@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+void md_autodetect_dev(dev_t dev);
+
+#ifdef CONFIG_BLK_DEV_MD
+void md_run_setup(void);
+#else
+static inline void md_run_setup(void)
+{
+}
+#endif
diff --git a/include/linux/raid/md_u.h b/include/linux/raid/md_u.h
deleted file mode 100644
index 358c04bfbe2a..000000000000
--- a/include/linux/raid/md_u.h
+++ /dev/null
@@ -1,20 +0,0 @@
-/*
- md_u.h : user <=> kernel API between Linux raidtools and RAID drivers
- Copyright (C) 1998 Ingo Molnar
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2, or (at your option)
- any later version.
-
- You should have received a copy of the GNU General Public License
- (for example /usr/src/linux/COPYING); if not, write to the Free
- Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
-*/
-#ifndef _MD_U_H
-#define _MD_U_H
-
-#include <uapi/linux/raid/md_u.h>
-
-extern int mdp_major;
-#endif
diff --git a/include/linux/raid/pq.h b/include/linux/raid/pq.h
index 30f945329818..f29aaaf2eb21 100644
--- a/include/linux/raid/pq.h
+++ b/include/linux/raid/pq.h
@@ -1,13 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/* -*- linux-c -*- ------------------------------------------------------- *
*
* Copyright 2003 H. Peter Anvin - All Rights Reserved
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation, Inc., 53 Temple Place Ste 330,
- * Boston MA 02111-1307, USA; either version 2 of the License, or
- * (at your option) any later version; incorporated herein by reference.
- *
* ----------------------------------------------------------------------- */
#ifndef LINUX_RAID_RAID6_H
@@ -15,26 +10,19 @@
#ifdef __KERNEL__
-/* Set to 1 to use kernel-wide empty_zero_page */
-#define RAID6_USE_EMPTY_ZERO_PAGE 0
#include <linux/blkdev.h>
-/* We need a pre-zeroed page... if we don't want to use the kernel-provided
- one define it here */
-#if RAID6_USE_EMPTY_ZERO_PAGE
-# define raid6_empty_zero_page empty_zero_page
-#else
extern const char raid6_empty_zero_page[PAGE_SIZE];
-#endif
#else /* ! __KERNEL__ */
/* Used for testing in user space */
#include <errno.h>
#include <inttypes.h>
-#include <limits.h>
#include <stddef.h>
+#include <string.h>
#include <sys/mman.h>
+#include <sys/time.h>
#include <sys/types.h>
/* Not standard, but glibc defines it */
@@ -48,11 +36,16 @@ typedef uint64_t u64;
#ifndef PAGE_SIZE
# define PAGE_SIZE 4096
#endif
+#ifndef PAGE_SHIFT
+# define PAGE_SHIFT 12
+#endif
extern const char raid6_empty_zero_page[PAGE_SIZE];
#define __init
#define __exit
-#define __attribute_const__ __attribute__((const))
+#ifndef __attribute_const__
+# define __attribute_const__ __attribute__((const))
+#endif
#define noinline __attribute__((noinline))
#define preempt_enable()
@@ -61,12 +54,17 @@ extern const char raid6_empty_zero_page[PAGE_SIZE];
#define enable_kernel_altivec()
#define disable_kernel_altivec()
+#undef EXPORT_SYMBOL
#define EXPORT_SYMBOL(sym)
+#undef EXPORT_SYMBOL_GPL
#define EXPORT_SYMBOL_GPL(sym)
#define MODULE_LICENSE(licence)
#define MODULE_DESCRIPTION(desc)
#define subsys_initcall(x)
#define module_exit(x)
+
+#define IS_ENABLED(x) (x)
+#define CONFIG_RAID6_PQ_BENCHMARK 1
#endif /* __KERNEL__ */
/* Routine choices */
@@ -75,7 +73,7 @@ struct raid6_calls {
void (*xor_syndrome)(int, int, int, size_t, void **);
int (*valid)(void); /* Returns 1 if this routine set is usable */
const char *name; /* Name of this routine set */
- int prefer; /* Has special performance attribute */
+ int priority; /* Relative priority ranking if non-zero */
};
/* Selected algorithm */
@@ -105,8 +103,11 @@ extern const struct raid6_calls raid6_avx2x4;
extern const struct raid6_calls raid6_avx512x1;
extern const struct raid6_calls raid6_avx512x2;
extern const struct raid6_calls raid6_avx512x4;
-extern const struct raid6_calls raid6_tilegx8;
extern const struct raid6_calls raid6_s390vx8;
+extern const struct raid6_calls raid6_vpermxor1;
+extern const struct raid6_calls raid6_vpermxor2;
+extern const struct raid6_calls raid6_vpermxor4;
+extern const struct raid6_calls raid6_vpermxor8;
struct raid6_recov_calls {
void (*data2)(int, size_t, int, int, void **);
@@ -121,6 +122,7 @@ extern const struct raid6_recov_calls raid6_recov_ssse3;
extern const struct raid6_recov_calls raid6_recov_avx2;
extern const struct raid6_recov_calls raid6_recov_avx512;
extern const struct raid6_recov_calls raid6_recov_s390xc;
+extern const struct raid6_recov_calls raid6_recov_neon;
extern const struct raid6_calls raid6_neonx1;
extern const struct raid6_calls raid6_neonx2;
diff --git a/include/linux/raid/xor.h b/include/linux/raid/xor.h
index 5a210959e3f8..51b811b62322 100644
--- a/include/linux/raid/xor.h
+++ b/include/linux/raid/xor.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _XOR_H
#define _XOR_H
@@ -10,13 +11,20 @@ struct xor_block_template {
struct xor_block_template *next;
const char *name;
int speed;
- void (*do_2)(unsigned long, unsigned long *, unsigned long *);
- void (*do_3)(unsigned long, unsigned long *, unsigned long *,
- unsigned long *);
- void (*do_4)(unsigned long, unsigned long *, unsigned long *,
- unsigned long *, unsigned long *);
- void (*do_5)(unsigned long, unsigned long *, unsigned long *,
- unsigned long *, unsigned long *, unsigned long *);
+ void (*do_2)(unsigned long, unsigned long * __restrict,
+ const unsigned long * __restrict);
+ void (*do_3)(unsigned long, unsigned long * __restrict,
+ const unsigned long * __restrict,
+ const unsigned long * __restrict);
+ void (*do_4)(unsigned long, unsigned long * __restrict,
+ const unsigned long * __restrict,
+ const unsigned long * __restrict,
+ const unsigned long * __restrict);
+ void (*do_5)(unsigned long, unsigned long * __restrict,
+ const unsigned long * __restrict,
+ const unsigned long * __restrict,
+ const unsigned long * __restrict,
+ const unsigned long * __restrict);
};
#endif
diff --git a/include/linux/raid_class.h b/include/linux/raid_class.h
index 31e1ff69efc8..6a9b177d5c41 100644
--- a/include/linux/raid_class.h
+++ b/include/linux/raid_class.h
@@ -1,9 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* raid_class.h - a generic raid visualisation class
*
* Copyright (c) 2005 - James Bottomley <James.Bottomley@steeleye.com>
- *
- * This file is licensed under GPLv2
*/
#include <linux/transport_class.h>
@@ -12,7 +11,7 @@ struct raid_template {
};
struct raid_function_template {
- void *cookie;
+ const void *cookie;
int (*is_raid)(struct device *);
void (*get_resync)(struct device *);
void (*get_state)(struct device *);
@@ -38,6 +37,7 @@ enum raid_level {
RAID_LEVEL_5,
RAID_LEVEL_50,
RAID_LEVEL_6,
+ RAID_LEVEL_JBOD,
};
struct raid_data {
diff --git a/include/linux/ramfs.h b/include/linux/ramfs.h
index ecc730977a5a..917528d102c4 100644
--- a/include/linux/ramfs.h
+++ b/include/linux/ramfs.h
@@ -1,10 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_RAMFS_H
#define _LINUX_RAMFS_H
+#include <linux/fs_parser.h> // bleh...
+
struct inode *ramfs_get_inode(struct super_block *sb, const struct inode *dir,
umode_t mode, dev_t dev);
-extern struct dentry *ramfs_mount(struct file_system_type *fs_type,
- int flags, const char *dev_name, void *data);
+extern int ramfs_init_fs_context(struct fs_context *fc);
#ifdef CONFIG_MMU
static inline int
@@ -16,10 +18,8 @@ ramfs_nommu_expand_for_mapping(struct inode *inode, size_t newsize)
extern int ramfs_nommu_expand_for_mapping(struct inode *inode, size_t newsize);
#endif
+extern const struct fs_parameter_spec ramfs_fs_parameters[];
extern const struct file_operations ramfs_file_operations;
extern const struct vm_operations_struct generic_file_vm_ops;
-extern int __init init_ramfs_fs(void);
-
-int ramfs_fill_super(struct super_block *sb, void *data, int silent);
#endif
diff --git a/include/linux/random.h b/include/linux/random.h
index eafea6a09361..b0a940af4fff 100644
--- a/include/linux/random.h
+++ b/include/linux/random.h
@@ -1,54 +1,46 @@
-/*
- * include/linux/random.h
- *
- * Include file for the random number generator.
- */
+/* SPDX-License-Identifier: GPL-2.0 */
+
#ifndef _LINUX_RANDOM_H
#define _LINUX_RANDOM_H
+#include <linux/bug.h>
+#include <linux/kernel.h>
#include <linux/list.h>
-#include <linux/once.h>
#include <uapi/linux/random.h>
-struct random_ready_callback {
- struct list_head list;
- void (*func)(struct random_ready_callback *rdy);
- struct module *owner;
-};
+struct notifier_block;
-extern void add_device_randomness(const void *, unsigned int);
+void add_device_randomness(const void *buf, size_t len);
+void __init add_bootloader_randomness(const void *buf, size_t len);
+void add_input_randomness(unsigned int type, unsigned int code,
+ unsigned int value) __latent_entropy;
+void add_interrupt_randomness(int irq) __latent_entropy;
+void add_hwgenerator_randomness(const void *buf, size_t len, size_t entropy, bool sleep_after);
-#if defined(CONFIG_GCC_PLUGIN_LATENT_ENTROPY) && !defined(__CHECKER__)
static inline void add_latent_entropy(void)
{
- add_device_randomness((const void *)&latent_entropy,
- sizeof(latent_entropy));
-}
+#if defined(LATENT_ENTROPY_PLUGIN) && !defined(__CHECKER__)
+ add_device_randomness((const void *)&latent_entropy, sizeof(latent_entropy));
#else
-static inline void add_latent_entropy(void) {}
+ add_device_randomness(NULL, 0);
#endif
+}
-extern void add_input_randomness(unsigned int type, unsigned int code,
- unsigned int value) __latent_entropy;
-extern void add_interrupt_randomness(int irq, int irq_flags) __latent_entropy;
-
-extern void get_random_bytes(void *buf, int nbytes);
-extern int wait_for_random_bytes(void);
-extern int add_random_ready_callback(struct random_ready_callback *rdy);
-extern void del_random_ready_callback(struct random_ready_callback *rdy);
-extern void get_random_bytes_arch(void *buf, int nbytes);
-
-#ifndef MODULE
-extern const struct file_operations random_fops, urandom_fops;
+#if IS_ENABLED(CONFIG_VMGENID)
+void add_vmfork_randomness(const void *unique_vm_id, size_t len);
+int register_random_vmfork_notifier(struct notifier_block *nb);
+int unregister_random_vmfork_notifier(struct notifier_block *nb);
+#else
+static inline int register_random_vmfork_notifier(struct notifier_block *nb) { return 0; }
+static inline int unregister_random_vmfork_notifier(struct notifier_block *nb) { return 0; }
#endif
+void get_random_bytes(void *buf, size_t len);
+u8 get_random_u8(void);
+u16 get_random_u16(void);
u32 get_random_u32(void);
u64 get_random_u64(void);
-static inline unsigned int get_random_int(void)
-{
- return get_random_u32();
-}
static inline unsigned long get_random_long(void)
{
#if BITS_PER_LONG == 64
@@ -58,143 +50,115 @@ static inline unsigned long get_random_long(void)
#endif
}
+u32 __get_random_u32_below(u32 ceil);
+
/*
- * On 64-bit architectures, protect against non-terminated C string overflows
- * by zeroing out the first byte of the canary; this leaves 56 bits of entropy.
+ * Returns a random integer in the interval [0, ceil), with uniform
+ * distribution, suitable for all uses. Fastest when ceil is a constant, but
+ * still fast for variable ceil as well.
*/
-#ifdef CONFIG_64BIT
-# ifdef __LITTLE_ENDIAN
-# define CANARY_MASK 0xffffffffffffff00UL
-# else /* big endian, 64 bits: */
-# define CANARY_MASK 0x00ffffffffffffffUL
-# endif
-#else /* 32 bits: */
-# define CANARY_MASK 0xffffffffUL
-#endif
+static inline u32 get_random_u32_below(u32 ceil)
+{
+ if (!__builtin_constant_p(ceil))
+ return __get_random_u32_below(ceil);
+
+ /*
+ * For the fast path, below, all operations on ceil are precomputed by
+ * the compiler, so this incurs no overhead for checking pow2, doing
+ * divisions, or branching based on integer size. The resultant
+ * algorithm does traditional reciprocal multiplication (typically
+ * optimized by the compiler into shifts and adds), rejecting samples
+ * whose lower half would indicate a range indivisible by ceil.
+ */
+ BUILD_BUG_ON_MSG(!ceil, "get_random_u32_below() must take ceil > 0");
+ if (ceil <= 1)
+ return 0;
+ for (;;) {
+ if (ceil <= 1U << 8) {
+ u32 mult = ceil * get_random_u8();
+ if (likely(is_power_of_2(ceil) || (u8)mult >= (1U << 8) % ceil))
+ return mult >> 8;
+ } else if (ceil <= 1U << 16) {
+ u32 mult = ceil * get_random_u16();
+ if (likely(is_power_of_2(ceil) || (u16)mult >= (1U << 16) % ceil))
+ return mult >> 16;
+ } else {
+ u64 mult = (u64)ceil * get_random_u32();
+ if (likely(is_power_of_2(ceil) || (u32)mult >= -ceil % ceil))
+ return mult >> 32;
+ }
+ }
+}
-static inline unsigned long get_random_canary(void)
+/*
+ * Returns a random integer in the interval (floor, U32_MAX], with uniform
+ * distribution, suitable for all uses. Fastest when floor is a constant, but
+ * still fast for variable floor as well.
+ */
+static inline u32 get_random_u32_above(u32 floor)
{
- unsigned long val = get_random_long();
+ BUILD_BUG_ON_MSG(__builtin_constant_p(floor) && floor == U32_MAX,
+ "get_random_u32_above() must take floor < U32_MAX");
+ return floor + 1 + get_random_u32_below(U32_MAX - floor);
+}
- return val & CANARY_MASK;
+/*
+ * Returns a random integer in the interval [floor, ceil], with uniform
+ * distribution, suitable for all uses. Fastest when floor and ceil are
+ * constant, but still fast for variable floor and ceil as well.
+ */
+static inline u32 get_random_u32_inclusive(u32 floor, u32 ceil)
+{
+ BUILD_BUG_ON_MSG(__builtin_constant_p(floor) && __builtin_constant_p(ceil) &&
+ (floor > ceil || ceil - floor == U32_MAX),
+ "get_random_u32_inclusive() must take floor <= ceil");
+ return floor + get_random_u32_below(ceil - floor + 1);
}
+void __init random_init_early(const char *command_line);
+void __init random_init(void);
+bool rng_is_initialized(void);
+int wait_for_random_bytes(void);
+int execute_with_initialized_rng(struct notifier_block *nb);
+
/* Calls wait_for_random_bytes() and then calls get_random_bytes(buf, nbytes).
* Returns the result of the call to wait_for_random_bytes. */
-static inline int get_random_bytes_wait(void *buf, int nbytes)
+static inline int get_random_bytes_wait(void *buf, size_t nbytes)
{
int ret = wait_for_random_bytes();
- if (unlikely(ret))
- return ret;
get_random_bytes(buf, nbytes);
- return 0;
+ return ret;
}
-#define declare_get_random_var_wait(var) \
- static inline int get_random_ ## var ## _wait(var *out) { \
+#define declare_get_random_var_wait(name, ret_type) \
+ static inline int get_random_ ## name ## _wait(ret_type *out) { \
int ret = wait_for_random_bytes(); \
if (unlikely(ret)) \
return ret; \
- *out = get_random_ ## var(); \
+ *out = get_random_ ## name(); \
return 0; \
}
-declare_get_random_var_wait(u32)
-declare_get_random_var_wait(u64)
-declare_get_random_var_wait(int)
-declare_get_random_var_wait(long)
+declare_get_random_var_wait(u8, u8)
+declare_get_random_var_wait(u16, u16)
+declare_get_random_var_wait(u32, u32)
+declare_get_random_var_wait(u64, u32)
+declare_get_random_var_wait(long, unsigned long)
#undef declare_get_random_var
-unsigned long randomize_page(unsigned long start, unsigned long range);
-
-u32 prandom_u32(void);
-void prandom_bytes(void *buf, size_t nbytes);
-void prandom_seed(u32 seed);
-void prandom_reseed_late(void);
-
-struct rnd_state {
- __u32 s1, s2, s3, s4;
-};
-
-u32 prandom_u32_state(struct rnd_state *state);
-void prandom_bytes_state(struct rnd_state *state, void *buf, size_t nbytes);
-void prandom_seed_full_state(struct rnd_state __percpu *pcpu_state);
-
-#define prandom_init_once(pcpu_state) \
- DO_ONCE(prandom_seed_full_state, (pcpu_state))
-
-/**
- * prandom_u32_max - returns a pseudo-random number in interval [0, ep_ro)
- * @ep_ro: right open interval endpoint
- *
- * Returns a pseudo-random number that is in interval [0, ep_ro). Note
- * that the result depends on PRNG being well distributed in [0, ~0U]
- * u32 space. Here we use maximally equidistributed combined Tausworthe
- * generator, that is, prandom_u32(). This is useful when requesting a
- * random index of an array containing ep_ro elements, for example.
- *
- * Returns: pseudo-random number in interval [0, ep_ro)
- */
-static inline u32 prandom_u32_max(u32 ep_ro)
-{
- return (u32)(((u64) prandom_u32() * ep_ro) >> 32);
-}
-
/*
- * Handle minimum values for seeds
+ * This is designed to be standalone for just prandom
+ * users, but for now we include it from <linux/random.h>
+ * for legacy reasons.
*/
-static inline u32 __seed(u32 x, u32 m)
-{
- return (x < m) ? x + m : x;
-}
-
-/**
- * prandom_seed_state - set seed for prandom_u32_state().
- * @state: pointer to state structure to receive the seed.
- * @seed: arbitrary 64-bit value to use as a seed.
- */
-static inline void prandom_seed_state(struct rnd_state *state, u64 seed)
-{
- u32 i = (seed >> 32) ^ (seed << 10) ^ seed;
+#include <linux/prandom.h>
- state->s1 = __seed(i, 2U);
- state->s2 = __seed(i, 8U);
- state->s3 = __seed(i, 16U);
- state->s4 = __seed(i, 128U);
-}
-
-#ifdef CONFIG_ARCH_RANDOM
-# include <asm/archrandom.h>
-#else
-static inline bool arch_get_random_long(unsigned long *v)
-{
- return 0;
-}
-static inline bool arch_get_random_int(unsigned int *v)
-{
- return 0;
-}
-static inline bool arch_has_random(void)
-{
- return 0;
-}
-static inline bool arch_get_random_seed_long(unsigned long *v)
-{
- return 0;
-}
-static inline bool arch_get_random_seed_int(unsigned int *v)
-{
- return 0;
-}
-static inline bool arch_has_random_seed(void)
-{
- return 0;
-}
+#ifdef CONFIG_SMP
+int random_prepare_cpu(unsigned int cpu);
+int random_online_cpu(unsigned int cpu);
#endif
-/* Pseudo random number generator from numerical recipes. */
-static inline u32 next_pseudo_random32(u32 seed)
-{
- return seed * 1664525 + 1013904223;
-}
+#ifndef MODULE
+extern const struct file_operations random_fops, urandom_fops;
+#endif
#endif /* _LINUX_RANDOM_H */
diff --git a/include/linux/randomize_kstack.h b/include/linux/randomize_kstack.h
new file mode 100644
index 000000000000..5d868505a94e
--- /dev/null
+++ b/include/linux/randomize_kstack.h
@@ -0,0 +1,92 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+#ifndef _LINUX_RANDOMIZE_KSTACK_H
+#define _LINUX_RANDOMIZE_KSTACK_H
+
+#ifdef CONFIG_RANDOMIZE_KSTACK_OFFSET
+#include <linux/kernel.h>
+#include <linux/jump_label.h>
+#include <linux/percpu-defs.h>
+
+DECLARE_STATIC_KEY_MAYBE(CONFIG_RANDOMIZE_KSTACK_OFFSET_DEFAULT,
+ randomize_kstack_offset);
+DECLARE_PER_CPU(u32, kstack_offset);
+
+/*
+ * Do not use this anywhere else in the kernel. This is used here because
+ * it provides an arch-agnostic way to grow the stack with correct
+ * alignment. Also, since this use is being explicitly masked to a max of
+ * 10 bits, stack-clash style attacks are unlikely. For more details see
+ * "VLAs" in Documentation/process/deprecated.rst
+ *
+ * The normal __builtin_alloca() is initialized with INIT_STACK_ALL (currently
+ * only with Clang and not GCC). Initializing the unused area on each syscall
+ * entry is expensive, and generating an implicit call to memset() may also be
+ * problematic (such as in noinstr functions). Therefore, if the compiler
+ * supports it (which it should if it initializes allocas), always use the
+ * "uninitialized" variant of the builtin.
+ */
+#if __has_builtin(__builtin_alloca_uninitialized)
+#define __kstack_alloca __builtin_alloca_uninitialized
+#else
+#define __kstack_alloca __builtin_alloca
+#endif
+
+/*
+ * Use, at most, 10 bits of entropy. We explicitly cap this to keep the
+ * "VLA" from being unbounded (see above). 10 bits leaves enough room for
+ * per-arch offset masks to reduce entropy (by removing higher bits, since
+ * high entropy may overly constrain usable stack space), and for
+ * compiler/arch-specific stack alignment to remove the lower bits.
+ */
+#define KSTACK_OFFSET_MAX(x) ((x) & 0x3FF)
+
+/**
+ * add_random_kstack_offset - Increase stack utilization by previously
+ * chosen random offset
+ *
+ * This should be used in the syscall entry path when interrupts and
+ * preempt are disabled, and after user registers have been stored to
+ * the stack. For testing the resulting entropy, please see:
+ * tools/testing/selftests/lkdtm/stack-entropy.sh
+ */
+#define add_random_kstack_offset() do { \
+ if (static_branch_maybe(CONFIG_RANDOMIZE_KSTACK_OFFSET_DEFAULT, \
+ &randomize_kstack_offset)) { \
+ u32 offset = raw_cpu_read(kstack_offset); \
+ u8 *ptr = __kstack_alloca(KSTACK_OFFSET_MAX(offset)); \
+ /* Keep allocation even after "ptr" loses scope. */ \
+ asm volatile("" :: "r"(ptr) : "memory"); \
+ } \
+} while (0)
+
+/**
+ * choose_random_kstack_offset - Choose the random offset for the next
+ * add_random_kstack_offset()
+ *
+ * This should only be used during syscall exit when interrupts and
+ * preempt are disabled. This position in the syscall flow is done to
+ * frustrate attacks from userspace attempting to learn the next offset:
+ * - Maximize the timing uncertainty visible from userspace: if the
+ * offset is chosen at syscall entry, userspace has much more control
+ * over the timing between choosing offsets. "How long will we be in
+ * kernel mode?" tends to be more difficult to predict than "how long
+ * will we be in user mode?"
+ * - Reduce the lifetime of the new offset sitting in memory during
+ * kernel mode execution. Exposure of "thread-local" memory content
+ * (e.g. current, percpu, etc) tends to be easier than arbitrary
+ * location memory exposure.
+ */
+#define choose_random_kstack_offset(rand) do { \
+ if (static_branch_maybe(CONFIG_RANDOMIZE_KSTACK_OFFSET_DEFAULT, \
+ &randomize_kstack_offset)) { \
+ u32 offset = raw_cpu_read(kstack_offset); \
+ offset ^= (rand); \
+ raw_cpu_write(kstack_offset, offset); \
+ } \
+} while (0)
+#else /* CONFIG_RANDOMIZE_KSTACK_OFFSET */
+#define add_random_kstack_offset() do { } while (0)
+#define choose_random_kstack_offset(rand) do { } while (0)
+#endif /* CONFIG_RANDOMIZE_KSTACK_OFFSET */
+
+#endif
diff --git a/include/linux/range.h b/include/linux/range.h
index bd184a5db791..7efb6a9b069b 100644
--- a/include/linux/range.h
+++ b/include/linux/range.h
@@ -1,11 +1,23 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_RANGE_H
#define _LINUX_RANGE_H
+#include <linux/types.h>
struct range {
u64 start;
u64 end;
};
+static inline u64 range_len(const struct range *range)
+{
+ return range->end - range->start + 1;
+}
+
+static inline bool range_contains(struct range *r1, struct range *r2)
+{
+ return r1->start <= r2->start && r1->end >= r2->end;
+}
+
int add_range(struct range *range, int az, int nr_range,
u64 start, u64 end);
diff --git a/include/linux/ras.h b/include/linux/ras.h
index be5338a35d57..1f4048bf2674 100644
--- a/include/linux/ras.h
+++ b/include/linux/ras.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __RAS_H__
#define __RAS_H__
@@ -16,12 +17,7 @@ static inline int ras_add_daemon_trace(void) { return 0; }
#endif
#ifdef CONFIG_RAS_CEC
-void __init cec_init(void);
int __init parse_cec_param(char *str);
-int cec_add_elem(u64 pfn);
-#else
-static inline void __init cec_init(void) { }
-static inline int cec_add_elem(u64 pfn) { return -ENODEV; }
#endif
#ifdef CONFIG_RAS
diff --git a/include/linux/ratelimit.h b/include/linux/ratelimit.h
index 56375edf2ed2..b17e0cd0a30c 100644
--- a/include/linux/ratelimit.h
+++ b/include/linux/ratelimit.h
@@ -1,41 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_RATELIMIT_H
#define _LINUX_RATELIMIT_H
-#include <linux/param.h>
+#include <linux/ratelimit_types.h>
#include <linux/sched.h>
#include <linux/spinlock.h>
-#define DEFAULT_RATELIMIT_INTERVAL (5 * HZ)
-#define DEFAULT_RATELIMIT_BURST 10
-
-/* issue num suppressed message on exit */
-#define RATELIMIT_MSG_ON_RELEASE BIT(0)
-
-struct ratelimit_state {
- raw_spinlock_t lock; /* protect the state */
-
- int interval;
- int burst;
- int printed;
- int missed;
- unsigned long begin;
- unsigned long flags;
-};
-
-#define RATELIMIT_STATE_INIT(name, interval_init, burst_init) { \
- .lock = __RAW_SPIN_LOCK_UNLOCKED(name.lock), \
- .interval = interval_init, \
- .burst = burst_init, \
- }
-
-#define RATELIMIT_STATE_INIT_DISABLED \
- RATELIMIT_STATE_INIT(ratelimit_state, 0, DEFAULT_RATELIMIT_BURST)
-
-#define DEFINE_RATELIMIT_STATE(name, interval_init, burst_init) \
- \
- struct ratelimit_state name = \
- RATELIMIT_STATE_INIT(name, interval_init, burst_init) \
-
static inline void ratelimit_state_init(struct ratelimit_state *rs,
int interval, int burst)
{
@@ -72,9 +42,6 @@ ratelimit_set_flags(struct ratelimit_state *rs, unsigned long flags)
extern struct ratelimit_state printk_ratelimit_state;
-extern int ___ratelimit(struct ratelimit_state *rs, const char *func);
-#define __ratelimit(state) ___ratelimit(state, __func__)
-
#ifdef CONFIG_PRINTK
#define WARN_ON_RATELIMIT(condition, state) ({ \
diff --git a/include/linux/ratelimit_types.h b/include/linux/ratelimit_types.h
new file mode 100644
index 000000000000..002266693e50
--- /dev/null
+++ b/include/linux/ratelimit_types.h
@@ -0,0 +1,47 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_RATELIMIT_TYPES_H
+#define _LINUX_RATELIMIT_TYPES_H
+
+#include <linux/bits.h>
+#include <linux/param.h>
+#include <linux/spinlock_types_raw.h>
+
+#define DEFAULT_RATELIMIT_INTERVAL (5 * HZ)
+#define DEFAULT_RATELIMIT_BURST 10
+
+/* issue num suppressed message on exit */
+#define RATELIMIT_MSG_ON_RELEASE BIT(0)
+
+struct ratelimit_state {
+ raw_spinlock_t lock; /* protect the state */
+
+ int interval;
+ int burst;
+ int printed;
+ int missed;
+ unsigned long begin;
+ unsigned long flags;
+};
+
+#define RATELIMIT_STATE_INIT_FLAGS(name, interval_init, burst_init, flags_init) { \
+ .lock = __RAW_SPIN_LOCK_UNLOCKED(name.lock), \
+ .interval = interval_init, \
+ .burst = burst_init, \
+ .flags = flags_init, \
+ }
+
+#define RATELIMIT_STATE_INIT(name, interval_init, burst_init) \
+ RATELIMIT_STATE_INIT_FLAGS(name, interval_init, burst_init, 0)
+
+#define RATELIMIT_STATE_INIT_DISABLED \
+ RATELIMIT_STATE_INIT(ratelimit_state, 0, DEFAULT_RATELIMIT_BURST)
+
+#define DEFINE_RATELIMIT_STATE(name, interval_init, burst_init) \
+ \
+ struct ratelimit_state name = \
+ RATELIMIT_STATE_INIT(name, interval_init, burst_init) \
+
+extern int ___ratelimit(struct ratelimit_state *rs, const char *func);
+#define __ratelimit(state) ___ratelimit(state, __func__)
+
+#endif /* _LINUX_RATELIMIT_TYPES_H */
diff --git a/include/linux/rational.h b/include/linux/rational.h
index bfa6a2bcfb32..33f5f5fc3e36 100644
--- a/include/linux/rational.h
+++ b/include/linux/rational.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* rational fractions
*
diff --git a/include/linux/rbtree.h b/include/linux/rbtree.h
index e585018498d5..f7edca369eda 100644
--- a/include/linux/rbtree.h
+++ b/include/linux/rbtree.h
@@ -1,20 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
Red Black Trees
(C) 1999 Andrea Arcangeli <andrea@suse.de>
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
linux/include/linux/rbtree.h
@@ -23,31 +11,20 @@
I know it's not the cleaner way, but in C (not in C++) to get
performances and genericity...
- See Documentation/rbtree.txt for documentation and samples.
+ See Documentation/core-api/rbtree.rst for documentation and samples.
*/
#ifndef _LINUX_RBTREE_H
#define _LINUX_RBTREE_H
-#include <linux/kernel.h>
+#include <linux/container_of.h>
+#include <linux/rbtree_types.h>
+
#include <linux/stddef.h>
#include <linux/rcupdate.h>
-struct rb_node {
- unsigned long __rb_parent_color;
- struct rb_node *rb_right;
- struct rb_node *rb_left;
-} __attribute__((aligned(sizeof(long))));
- /* The alignment might seem pointless, but allegedly CRIS needs it */
-
-struct rb_root {
- struct rb_node *rb_node;
-};
-
-
#define rb_parent(r) ((struct rb_node *)((r)->__rb_parent_color & ~3))
-#define RB_ROOT (struct rb_root) { NULL, }
#define rb_entry(ptr, type, member) container_of(ptr, type, member)
#define RB_EMPTY_ROOT(root) (READ_ONCE((root)->rb_node) == NULL)
@@ -125,4 +102,233 @@ static inline void rb_link_node_rcu(struct rb_node *node, struct rb_node *parent
typeof(*pos), field); 1; }); \
pos = n)
+/* Same as rb_first(), but O(1) */
+#define rb_first_cached(root) (root)->rb_leftmost
+
+static inline void rb_insert_color_cached(struct rb_node *node,
+ struct rb_root_cached *root,
+ bool leftmost)
+{
+ if (leftmost)
+ root->rb_leftmost = node;
+ rb_insert_color(node, &root->rb_root);
+}
+
+
+static inline struct rb_node *
+rb_erase_cached(struct rb_node *node, struct rb_root_cached *root)
+{
+ struct rb_node *leftmost = NULL;
+
+ if (root->rb_leftmost == node)
+ leftmost = root->rb_leftmost = rb_next(node);
+
+ rb_erase(node, &root->rb_root);
+
+ return leftmost;
+}
+
+static inline void rb_replace_node_cached(struct rb_node *victim,
+ struct rb_node *new,
+ struct rb_root_cached *root)
+{
+ if (root->rb_leftmost == victim)
+ root->rb_leftmost = new;
+ rb_replace_node(victim, new, &root->rb_root);
+}
+
+/*
+ * The below helper functions use 2 operators with 3 different
+ * calling conventions. The operators are related like:
+ *
+ * comp(a->key,b) < 0 := less(a,b)
+ * comp(a->key,b) > 0 := less(b,a)
+ * comp(a->key,b) == 0 := !less(a,b) && !less(b,a)
+ *
+ * If these operators define a partial order on the elements we make no
+ * guarantee on which of the elements matching the key is found. See
+ * rb_find().
+ *
+ * The reason for this is to allow the find() interface without requiring an
+ * on-stack dummy object, which might not be feasible due to object size.
+ */
+
+/**
+ * rb_add_cached() - insert @node into the leftmost cached tree @tree
+ * @node: node to insert
+ * @tree: leftmost cached tree to insert @node into
+ * @less: operator defining the (partial) node order
+ *
+ * Returns @node when it is the new leftmost, or NULL.
+ */
+static __always_inline struct rb_node *
+rb_add_cached(struct rb_node *node, struct rb_root_cached *tree,
+ bool (*less)(struct rb_node *, const struct rb_node *))
+{
+ struct rb_node **link = &tree->rb_root.rb_node;
+ struct rb_node *parent = NULL;
+ bool leftmost = true;
+
+ while (*link) {
+ parent = *link;
+ if (less(node, parent)) {
+ link = &parent->rb_left;
+ } else {
+ link = &parent->rb_right;
+ leftmost = false;
+ }
+ }
+
+ rb_link_node(node, parent, link);
+ rb_insert_color_cached(node, tree, leftmost);
+
+ return leftmost ? node : NULL;
+}
+
+/**
+ * rb_add() - insert @node into @tree
+ * @node: node to insert
+ * @tree: tree to insert @node into
+ * @less: operator defining the (partial) node order
+ */
+static __always_inline void
+rb_add(struct rb_node *node, struct rb_root *tree,
+ bool (*less)(struct rb_node *, const struct rb_node *))
+{
+ struct rb_node **link = &tree->rb_node;
+ struct rb_node *parent = NULL;
+
+ while (*link) {
+ parent = *link;
+ if (less(node, parent))
+ link = &parent->rb_left;
+ else
+ link = &parent->rb_right;
+ }
+
+ rb_link_node(node, parent, link);
+ rb_insert_color(node, tree);
+}
+
+/**
+ * rb_find_add() - find equivalent @node in @tree, or add @node
+ * @node: node to look-for / insert
+ * @tree: tree to search / modify
+ * @cmp: operator defining the node order
+ *
+ * Returns the rb_node matching @node, or NULL when no match is found and @node
+ * is inserted.
+ */
+static __always_inline struct rb_node *
+rb_find_add(struct rb_node *node, struct rb_root *tree,
+ int (*cmp)(struct rb_node *, const struct rb_node *))
+{
+ struct rb_node **link = &tree->rb_node;
+ struct rb_node *parent = NULL;
+ int c;
+
+ while (*link) {
+ parent = *link;
+ c = cmp(node, parent);
+
+ if (c < 0)
+ link = &parent->rb_left;
+ else if (c > 0)
+ link = &parent->rb_right;
+ else
+ return parent;
+ }
+
+ rb_link_node(node, parent, link);
+ rb_insert_color(node, tree);
+ return NULL;
+}
+
+/**
+ * rb_find() - find @key in tree @tree
+ * @key: key to match
+ * @tree: tree to search
+ * @cmp: operator defining the node order
+ *
+ * Returns the rb_node matching @key or NULL.
+ */
+static __always_inline struct rb_node *
+rb_find(const void *key, const struct rb_root *tree,
+ int (*cmp)(const void *key, const struct rb_node *))
+{
+ struct rb_node *node = tree->rb_node;
+
+ while (node) {
+ int c = cmp(key, node);
+
+ if (c < 0)
+ node = node->rb_left;
+ else if (c > 0)
+ node = node->rb_right;
+ else
+ return node;
+ }
+
+ return NULL;
+}
+
+/**
+ * rb_find_first() - find the first @key in @tree
+ * @key: key to match
+ * @tree: tree to search
+ * @cmp: operator defining node order
+ *
+ * Returns the leftmost node matching @key, or NULL.
+ */
+static __always_inline struct rb_node *
+rb_find_first(const void *key, const struct rb_root *tree,
+ int (*cmp)(const void *key, const struct rb_node *))
+{
+ struct rb_node *node = tree->rb_node;
+ struct rb_node *match = NULL;
+
+ while (node) {
+ int c = cmp(key, node);
+
+ if (c <= 0) {
+ if (!c)
+ match = node;
+ node = node->rb_left;
+ } else if (c > 0) {
+ node = node->rb_right;
+ }
+ }
+
+ return match;
+}
+
+/**
+ * rb_next_match() - find the next @key in @tree
+ * @key: key to match
+ * @tree: tree to search
+ * @cmp: operator defining node order
+ *
+ * Returns the next node matching @key, or NULL.
+ */
+static __always_inline struct rb_node *
+rb_next_match(const void *key, struct rb_node *node,
+ int (*cmp)(const void *key, const struct rb_node *))
+{
+ node = rb_next(node);
+ if (node && cmp(key, node))
+ node = NULL;
+ return node;
+}
+
+/**
+ * rb_for_each() - iterates a subtree matching @key
+ * @node: iterator
+ * @key: key to match
+ * @tree: tree to search
+ * @cmp: operator defining node order
+ */
+#define rb_for_each(node, key, tree, cmp) \
+ for ((node) = rb_find_first((key), (tree), (cmp)); \
+ (node); (node) = rb_next_match((key), (node), (cmp)))
+
#endif /* _LINUX_RBTREE_H */
diff --git a/include/linux/rbtree_augmented.h b/include/linux/rbtree_augmented.h
index 9702b6e183bc..7ee7ed5de722 100644
--- a/include/linux/rbtree_augmented.h
+++ b/include/linux/rbtree_augmented.h
@@ -1,22 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
Red Black Trees
(C) 1999 Andrea Arcangeli <andrea@suse.de>
(C) 2002 David Woodhouse <dwmw2@infradead.org>
(C) 2012 Michel Lespinasse <walken@google.com>
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
linux/include/linux/rbtree_augmented.h
*/
@@ -26,13 +14,14 @@
#include <linux/compiler.h>
#include <linux/rbtree.h>
+#include <linux/rcupdate.h>
/*
* Please note - only struct rb_augment_callbacks and the prototypes for
* rb_insert_augmented() and rb_erase_augmented() are intended to be public.
* The rest are implementation details you are not expected to depend on.
*
- * See Documentation/rbtree.txt for documentation and samples.
+ * See Documentation/core-api/rbtree.rst for documentation and samples.
*/
struct rb_augment_callbacks {
@@ -43,13 +32,14 @@ struct rb_augment_callbacks {
extern void __rb_insert_augmented(struct rb_node *node, struct rb_root *root,
void (*augment_rotate)(struct rb_node *old, struct rb_node *new));
+
/*
* Fixup the rbtree and update the augmented information when rebalancing.
*
* On insertion, the user must update the augmented information on the path
* leading to the inserted node, then call rb_link_node() as usual and
- * rb_augment_inserted() instead of the usual rb_insert_color() call.
- * If rb_augment_inserted() rebalances the rbtree, it will callback into
+ * rb_insert_augmented() instead of the usual rb_insert_color() call.
+ * If rb_insert_augmented() rebalances the rbtree, it will callback into
* a user provided function to update the augmented information on the
* affected subtrees.
*/
@@ -60,41 +50,97 @@ rb_insert_augmented(struct rb_node *node, struct rb_root *root,
__rb_insert_augmented(node, root, augment->rotate);
}
-#define RB_DECLARE_CALLBACKS(rbstatic, rbname, rbstruct, rbfield, \
- rbtype, rbaugmented, rbcompute) \
+static inline void
+rb_insert_augmented_cached(struct rb_node *node,
+ struct rb_root_cached *root, bool newleft,
+ const struct rb_augment_callbacks *augment)
+{
+ if (newleft)
+ root->rb_leftmost = node;
+ rb_insert_augmented(node, &root->rb_root, augment);
+}
+
+/*
+ * Template for declaring augmented rbtree callbacks (generic case)
+ *
+ * RBSTATIC: 'static' or empty
+ * RBNAME: name of the rb_augment_callbacks structure
+ * RBSTRUCT: struct type of the tree nodes
+ * RBFIELD: name of struct rb_node field within RBSTRUCT
+ * RBAUGMENTED: name of field within RBSTRUCT holding data for subtree
+ * RBCOMPUTE: name of function that recomputes the RBAUGMENTED data
+ */
+
+#define RB_DECLARE_CALLBACKS(RBSTATIC, RBNAME, \
+ RBSTRUCT, RBFIELD, RBAUGMENTED, RBCOMPUTE) \
static inline void \
-rbname ## _propagate(struct rb_node *rb, struct rb_node *stop) \
+RBNAME ## _propagate(struct rb_node *rb, struct rb_node *stop) \
{ \
while (rb != stop) { \
- rbstruct *node = rb_entry(rb, rbstruct, rbfield); \
- rbtype augmented = rbcompute(node); \
- if (node->rbaugmented == augmented) \
+ RBSTRUCT *node = rb_entry(rb, RBSTRUCT, RBFIELD); \
+ if (RBCOMPUTE(node, true)) \
break; \
- node->rbaugmented = augmented; \
- rb = rb_parent(&node->rbfield); \
+ rb = rb_parent(&node->RBFIELD); \
} \
} \
static inline void \
-rbname ## _copy(struct rb_node *rb_old, struct rb_node *rb_new) \
+RBNAME ## _copy(struct rb_node *rb_old, struct rb_node *rb_new) \
{ \
- rbstruct *old = rb_entry(rb_old, rbstruct, rbfield); \
- rbstruct *new = rb_entry(rb_new, rbstruct, rbfield); \
- new->rbaugmented = old->rbaugmented; \
+ RBSTRUCT *old = rb_entry(rb_old, RBSTRUCT, RBFIELD); \
+ RBSTRUCT *new = rb_entry(rb_new, RBSTRUCT, RBFIELD); \
+ new->RBAUGMENTED = old->RBAUGMENTED; \
} \
static void \
-rbname ## _rotate(struct rb_node *rb_old, struct rb_node *rb_new) \
+RBNAME ## _rotate(struct rb_node *rb_old, struct rb_node *rb_new) \
{ \
- rbstruct *old = rb_entry(rb_old, rbstruct, rbfield); \
- rbstruct *new = rb_entry(rb_new, rbstruct, rbfield); \
- new->rbaugmented = old->rbaugmented; \
- old->rbaugmented = rbcompute(old); \
+ RBSTRUCT *old = rb_entry(rb_old, RBSTRUCT, RBFIELD); \
+ RBSTRUCT *new = rb_entry(rb_new, RBSTRUCT, RBFIELD); \
+ new->RBAUGMENTED = old->RBAUGMENTED; \
+ RBCOMPUTE(old, false); \
} \
-rbstatic const struct rb_augment_callbacks rbname = { \
- .propagate = rbname ## _propagate, \
- .copy = rbname ## _copy, \
- .rotate = rbname ## _rotate \
+RBSTATIC const struct rb_augment_callbacks RBNAME = { \
+ .propagate = RBNAME ## _propagate, \
+ .copy = RBNAME ## _copy, \
+ .rotate = RBNAME ## _rotate \
};
+/*
+ * Template for declaring augmented rbtree callbacks,
+ * computing RBAUGMENTED scalar as max(RBCOMPUTE(node)) for all subtree nodes.
+ *
+ * RBSTATIC: 'static' or empty
+ * RBNAME: name of the rb_augment_callbacks structure
+ * RBSTRUCT: struct type of the tree nodes
+ * RBFIELD: name of struct rb_node field within RBSTRUCT
+ * RBTYPE: type of the RBAUGMENTED field
+ * RBAUGMENTED: name of RBTYPE field within RBSTRUCT holding data for subtree
+ * RBCOMPUTE: name of function that returns the per-node RBTYPE scalar
+ */
+
+#define RB_DECLARE_CALLBACKS_MAX(RBSTATIC, RBNAME, RBSTRUCT, RBFIELD, \
+ RBTYPE, RBAUGMENTED, RBCOMPUTE) \
+static inline bool RBNAME ## _compute_max(RBSTRUCT *node, bool exit) \
+{ \
+ RBSTRUCT *child; \
+ RBTYPE max = RBCOMPUTE(node); \
+ if (node->RBFIELD.rb_left) { \
+ child = rb_entry(node->RBFIELD.rb_left, RBSTRUCT, RBFIELD); \
+ if (child->RBAUGMENTED > max) \
+ max = child->RBAUGMENTED; \
+ } \
+ if (node->RBFIELD.rb_right) { \
+ child = rb_entry(node->RBFIELD.rb_right, RBSTRUCT, RBFIELD); \
+ if (child->RBAUGMENTED > max) \
+ max = child->RBAUGMENTED; \
+ } \
+ if (exit && node->RBAUGMENTED == max) \
+ return true; \
+ node->RBAUGMENTED = max; \
+ return false; \
+} \
+RB_DECLARE_CALLBACKS(RBSTATIC, RBNAME, \
+ RBSTRUCT, RBFIELD, RBAUGMENTED, RBNAME ## _compute_max)
+
#define RB_RED 0
#define RB_BLACK 1
@@ -110,13 +156,13 @@ rbstatic const struct rb_augment_callbacks rbname = { \
static inline void rb_set_parent(struct rb_node *rb, struct rb_node *p)
{
- rb->__rb_parent_color = rb_color(rb) | (unsigned long)p;
+ rb->__rb_parent_color = rb_color(rb) + (unsigned long)p;
}
static inline void rb_set_parent_color(struct rb_node *rb,
struct rb_node *p, int color)
{
- rb->__rb_parent_color = (unsigned long)p | color;
+ rb->__rb_parent_color = (unsigned long)p + color;
}
static inline void
@@ -237,14 +283,12 @@ __rb_erase_augmented(struct rb_node *node, struct rb_root *root,
__rb_change_child(node, successor, tmp, root);
if (child2) {
- successor->__rb_parent_color = pc;
rb_set_parent_color(child2, parent, RB_BLACK);
rebalance = NULL;
} else {
- unsigned long pc2 = successor->__rb_parent_color;
- successor->__rb_parent_color = pc;
- rebalance = __rb_is_black(pc2) ? parent : NULL;
+ rebalance = rb_is_black(successor) ? parent : NULL;
}
+ successor->__rb_parent_color = pc;
tmp = successor;
}
@@ -261,4 +305,13 @@ rb_erase_augmented(struct rb_node *node, struct rb_root *root,
__rb_erase_color(rebalance, root, augment->rotate);
}
+static __always_inline void
+rb_erase_augmented_cached(struct rb_node *node, struct rb_root_cached *root,
+ const struct rb_augment_callbacks *augment)
+{
+ if (root->rb_leftmost == node)
+ root->rb_leftmost = rb_next(node);
+ rb_erase_augmented(node, &root->rb_root, augment);
+}
+
#endif /* _LINUX_RBTREE_AUGMENTED_H */
diff --git a/include/linux/rbtree_latch.h b/include/linux/rbtree_latch.h
index 4f3432c61d12..3d1a9e716b80 100644
--- a/include/linux/rbtree_latch.h
+++ b/include/linux/rbtree_latch.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Latched RB-trees
*
@@ -34,14 +35,15 @@
#include <linux/rbtree.h>
#include <linux/seqlock.h>
+#include <linux/rcupdate.h>
struct latch_tree_node {
struct rb_node node[2];
};
struct latch_tree_root {
- seqcount_t seq;
- struct rb_root tree[2];
+ seqcount_latch_t seq;
+ struct rb_root tree[2];
};
/**
@@ -204,7 +206,7 @@ latch_tree_find(void *key, struct latch_tree_root *root,
do {
seq = raw_read_seqcount_latch(&root->seq);
node = __lt_find(key, root, seq & 1, ops->comp);
- } while (read_seqcount_retry(&root->seq, seq));
+ } while (read_seqcount_latch_retry(&root->seq, seq));
return node;
}
diff --git a/include/linux/rbtree_types.h b/include/linux/rbtree_types.h
new file mode 100644
index 000000000000..45b6ecde3665
--- /dev/null
+++ b/include/linux/rbtree_types.h
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+#ifndef _LINUX_RBTREE_TYPES_H
+#define _LINUX_RBTREE_TYPES_H
+
+struct rb_node {
+ unsigned long __rb_parent_color;
+ struct rb_node *rb_right;
+ struct rb_node *rb_left;
+} __attribute__((aligned(sizeof(long))));
+/* The alignment might seem pointless, but allegedly CRIS needs it */
+
+struct rb_root {
+ struct rb_node *rb_node;
+};
+
+/*
+ * Leftmost-cached rbtrees.
+ *
+ * We do not cache the rightmost node based on footprint
+ * size vs number of potential users that could benefit
+ * from O(1) rb_last(). Just not worth it, users that want
+ * this feature can always implement the logic explicitly.
+ * Furthermore, users that want to cache both pointers may
+ * find it a bit asymmetric, but that's ok.
+ */
+struct rb_root_cached {
+ struct rb_root rb_root;
+ struct rb_node *rb_leftmost;
+};
+
+#define RB_ROOT (struct rb_root) { NULL, }
+#define RB_ROOT_CACHED (struct rb_root_cached) { {NULL, }, NULL }
+
+#endif
diff --git a/include/linux/rcu_node_tree.h b/include/linux/rcu_node_tree.h
index 426cee67f0e2..78feb8ba7358 100644
--- a/include/linux/rcu_node_tree.h
+++ b/include/linux/rcu_node_tree.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* RCU node combining tree definitions. These are used to compute
* global attributes while avoiding common-case global contention. A key
@@ -11,28 +12,16 @@
* because the size of the TREE SRCU srcu_struct structure depends
* on these definitions.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, you can access it online at
- * http://www.gnu.org/licenses/gpl-2.0.html.
- *
* Copyright IBM Corporation, 2017
*
- * Author: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
+ * Author: Paul E. McKenney <paulmck@linux.ibm.com>
*/
#ifndef __LINUX_RCU_NODE_TREE_H
#define __LINUX_RCU_NODE_TREE_H
+#include <linux/math.h>
+
/*
* Define shape of hierarchy based on NR_CPUS, CONFIG_RCU_FANOUT, and
* CONFIG_RCU_FANOUT_LEAF.
diff --git a/include/linux/rcu_segcblist.h b/include/linux/rcu_segcblist.h
index c3ad00e63556..659d13a7ddaa 100644
--- a/include/linux/rcu_segcblist.h
+++ b/include/linux/rcu_segcblist.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* RCU segmented callback lists
*
@@ -5,34 +6,22 @@
* because the size of the TREE SRCU srcu_struct structure depends
* on these definitions.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, you can access it online at
- * http://www.gnu.org/licenses/gpl-2.0.html.
- *
* Copyright IBM Corporation, 2017
*
- * Authors: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
+ * Authors: Paul E. McKenney <paulmck@linux.net.ibm.com>
*/
#ifndef __INCLUDE_LINUX_RCU_SEGCBLIST_H
#define __INCLUDE_LINUX_RCU_SEGCBLIST_H
+#include <linux/types.h>
+#include <linux/atomic.h>
+
/* Simple unsegmented callback lists. */
struct rcu_cblist {
struct rcu_head *head;
struct rcu_head **tail;
long len;
- long len_lazy;
};
#define RCU_CBLIST_INITIALIZER(n) { .head = NULL, .tail = &n.head }
@@ -74,12 +63,157 @@ struct rcu_cblist {
#define RCU_NEXT_TAIL 3
#define RCU_CBLIST_NSEGS 4
+
+/*
+ * ==NOCB Offloading state machine==
+ *
+ *
+ * ----------------------------------------------------------------------------
+ * | SEGCBLIST_RCU_CORE |
+ * | |
+ * | Callbacks processed by rcu_core() from softirqs or local |
+ * | rcuc kthread, without holding nocb_lock. |
+ * ----------------------------------------------------------------------------
+ * |
+ * v
+ * ----------------------------------------------------------------------------
+ * | SEGCBLIST_RCU_CORE | SEGCBLIST_LOCKING | SEGCBLIST_OFFLOADED |
+ * | |
+ * | Callbacks processed by rcu_core() from softirqs or local |
+ * | rcuc kthread, while holding nocb_lock. Waking up CB and GP kthreads, |
+ * | allowing nocb_timer to be armed. |
+ * ----------------------------------------------------------------------------
+ * |
+ * v
+ * -----------------------------------
+ * | |
+ * v v
+ * --------------------------------------- ----------------------------------|
+ * | SEGCBLIST_RCU_CORE | | | SEGCBLIST_RCU_CORE | |
+ * | SEGCBLIST_LOCKING | | | SEGCBLIST_LOCKING | |
+ * | SEGCBLIST_OFFLOADED | | | SEGCBLIST_OFFLOADED | |
+ * | SEGCBLIST_KTHREAD_CB | | SEGCBLIST_KTHREAD_GP |
+ * | | | |
+ * | | | |
+ * | CB kthread woke up and | | GP kthread woke up and |
+ * | acknowledged SEGCBLIST_OFFLOADED. | | acknowledged SEGCBLIST_OFFLOADED|
+ * | Processes callbacks concurrently | | |
+ * | with rcu_core(), holding | | |
+ * | nocb_lock. | | |
+ * --------------------------------------- -----------------------------------
+ * | |
+ * -----------------------------------
+ * |
+ * v
+ * |--------------------------------------------------------------------------|
+ * | SEGCBLIST_LOCKING | |
+ * | SEGCBLIST_OFFLOADED | |
+ * | SEGCBLIST_KTHREAD_GP | |
+ * | SEGCBLIST_KTHREAD_CB |
+ * | |
+ * | Kthreads handle callbacks holding nocb_lock, local rcu_core() stops |
+ * | handling callbacks. Enable bypass queueing. |
+ * ----------------------------------------------------------------------------
+ */
+
+
+
+/*
+ * ==NOCB De-Offloading state machine==
+ *
+ *
+ * |--------------------------------------------------------------------------|
+ * | SEGCBLIST_LOCKING | |
+ * | SEGCBLIST_OFFLOADED | |
+ * | SEGCBLIST_KTHREAD_CB | |
+ * | SEGCBLIST_KTHREAD_GP |
+ * | |
+ * | CB/GP kthreads handle callbacks holding nocb_lock, local rcu_core() |
+ * | ignores callbacks. Bypass enqueue is enabled. |
+ * ----------------------------------------------------------------------------
+ * |
+ * v
+ * |--------------------------------------------------------------------------|
+ * | SEGCBLIST_RCU_CORE | |
+ * | SEGCBLIST_LOCKING | |
+ * | SEGCBLIST_OFFLOADED | |
+ * | SEGCBLIST_KTHREAD_CB | |
+ * | SEGCBLIST_KTHREAD_GP |
+ * | |
+ * | CB/GP kthreads handle callbacks holding nocb_lock, local rcu_core() |
+ * | handles callbacks concurrently. Bypass enqueue is enabled. |
+ * | Invoke RCU core so we make sure not to preempt it in the middle with |
+ * | leaving some urgent work unattended within a jiffy. |
+ * ----------------------------------------------------------------------------
+ * |
+ * v
+ * |--------------------------------------------------------------------------|
+ * | SEGCBLIST_RCU_CORE | |
+ * | SEGCBLIST_LOCKING | |
+ * | SEGCBLIST_KTHREAD_CB | |
+ * | SEGCBLIST_KTHREAD_GP |
+ * | |
+ * | CB/GP kthreads and local rcu_core() handle callbacks concurrently |
+ * | holding nocb_lock. Wake up CB and GP kthreads if necessary. Disable |
+ * | bypass enqueue. |
+ * ----------------------------------------------------------------------------
+ * |
+ * v
+ * -----------------------------------
+ * | |
+ * v v
+ * ---------------------------------------------------------------------------|
+ * | | |
+ * | SEGCBLIST_RCU_CORE | | SEGCBLIST_RCU_CORE | |
+ * | SEGCBLIST_LOCKING | | SEGCBLIST_LOCKING | |
+ * | SEGCBLIST_KTHREAD_CB | SEGCBLIST_KTHREAD_GP |
+ * | | |
+ * | GP kthread woke up and | CB kthread woke up and |
+ * | acknowledged the fact that | acknowledged the fact that |
+ * | SEGCBLIST_OFFLOADED got cleared. | SEGCBLIST_OFFLOADED got cleared. |
+ * | | The CB kthread goes to sleep |
+ * | The callbacks from the target CPU | until it ever gets re-offloaded. |
+ * | will be ignored from the GP kthread | |
+ * | loop. | |
+ * ----------------------------------------------------------------------------
+ * | |
+ * -----------------------------------
+ * |
+ * v
+ * ----------------------------------------------------------------------------
+ * | SEGCBLIST_RCU_CORE | SEGCBLIST_LOCKING |
+ * | |
+ * | Callbacks processed by rcu_core() from softirqs or local |
+ * | rcuc kthread, while holding nocb_lock. Forbid nocb_timer to be armed. |
+ * | Flush pending nocb_timer. Flush nocb bypass callbacks. |
+ * ----------------------------------------------------------------------------
+ * |
+ * v
+ * ----------------------------------------------------------------------------
+ * | SEGCBLIST_RCU_CORE |
+ * | |
+ * | Callbacks processed by rcu_core() from softirqs or local |
+ * | rcuc kthread, without holding nocb_lock. |
+ * ----------------------------------------------------------------------------
+ */
+#define SEGCBLIST_ENABLED BIT(0)
+#define SEGCBLIST_RCU_CORE BIT(1)
+#define SEGCBLIST_LOCKING BIT(2)
+#define SEGCBLIST_KTHREAD_CB BIT(3)
+#define SEGCBLIST_KTHREAD_GP BIT(4)
+#define SEGCBLIST_OFFLOADED BIT(5)
+
struct rcu_segcblist {
struct rcu_head *head;
struct rcu_head **tails[RCU_CBLIST_NSEGS];
unsigned long gp_seq[RCU_CBLIST_NSEGS];
+#ifdef CONFIG_RCU_NOCB_CPU
+ atomic_long_t len;
+#else
long len;
- long len_lazy;
+#endif
+ long seglen[RCU_CBLIST_NSEGS];
+ u8 flags;
};
#define RCU_SEGCBLIST_INITIALIZER(n) \
diff --git a/include/linux/rcu_sync.h b/include/linux/rcu_sync.h
index ece7ed9a4a70..0027d4c8087c 100644
--- a/include/linux/rcu_sync.h
+++ b/include/linux/rcu_sync.h
@@ -1,20 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* RCU-based infrastructure for lightweight reader-writer locking
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, you can access it online at
- * http://www.gnu.org/licenses/gpl-2.0.html.
- *
* Copyright (c) 2015, Red Hat, Inc.
*
* Author: Oleg Nesterov <oleg@redhat.com>
@@ -26,62 +13,42 @@
#include <linux/wait.h>
#include <linux/rcupdate.h>
-enum rcu_sync_type { RCU_SYNC, RCU_SCHED_SYNC, RCU_BH_SYNC };
-
/* Structure to mediate between updaters and fastpath-using readers. */
struct rcu_sync {
int gp_state;
int gp_count;
wait_queue_head_t gp_wait;
- int cb_state;
struct rcu_head cb_head;
-
- enum rcu_sync_type gp_type;
};
-extern void rcu_sync_lockdep_assert(struct rcu_sync *);
-
/**
* rcu_sync_is_idle() - Are readers permitted to use their fastpaths?
* @rsp: Pointer to rcu_sync structure to use for synchronization
*
- * Returns true if readers are permitted to use their fastpaths.
- * Must be invoked within an RCU read-side critical section whose
- * flavor matches that of the rcu_sync struture.
+ * Returns true if readers are permitted to use their fastpaths. Must be
+ * invoked within some flavor of RCU read-side critical section.
*/
static inline bool rcu_sync_is_idle(struct rcu_sync *rsp)
{
-#ifdef CONFIG_PROVE_RCU
- rcu_sync_lockdep_assert(rsp);
-#endif
- return !rsp->gp_state; /* GP_IDLE */
+ RCU_LOCKDEP_WARN(!rcu_read_lock_any_held(),
+ "suspicious rcu_sync_is_idle() usage");
+ return !READ_ONCE(rsp->gp_state); /* GP_IDLE */
}
-extern void rcu_sync_init(struct rcu_sync *, enum rcu_sync_type);
+extern void rcu_sync_init(struct rcu_sync *);
extern void rcu_sync_enter_start(struct rcu_sync *);
extern void rcu_sync_enter(struct rcu_sync *);
extern void rcu_sync_exit(struct rcu_sync *);
extern void rcu_sync_dtor(struct rcu_sync *);
-#define __RCU_SYNC_INITIALIZER(name, type) { \
+#define __RCU_SYNC_INITIALIZER(name) { \
.gp_state = 0, \
.gp_count = 0, \
.gp_wait = __WAIT_QUEUE_HEAD_INITIALIZER(name.gp_wait), \
- .cb_state = 0, \
- .gp_type = type, \
}
-#define __DEFINE_RCU_SYNC(name, type) \
- struct rcu_sync_struct name = __RCU_SYNC_INITIALIZER(name, type)
-
-#define DEFINE_RCU_SYNC(name) \
- __DEFINE_RCU_SYNC(name, RCU_SYNC)
-
-#define DEFINE_RCU_SCHED_SYNC(name) \
- __DEFINE_RCU_SYNC(name, RCU_SCHED_SYNC)
-
-#define DEFINE_RCU_BH_SYNC(name) \
- __DEFINE_RCU_SYNC(name, RCU_BH_SYNC)
+#define DEFINE_RCU_SYNC(name) \
+ struct rcu_sync name = __RCU_SYNC_INITIALIZER(name)
#endif /* _LINUX_RCU_SYNC_H_ */
diff --git a/include/linux/rculist.h b/include/linux/rculist.h
index b1fd8bf85fdc..d29740be4833 100644
--- a/include/linux/rculist.h
+++ b/include/linux/rculist.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_RCULIST_H
#define _LINUX_RCULIST_H
@@ -10,15 +11,6 @@
#include <linux/rcupdate.h>
/*
- * Why is there no list_empty_rcu()? Because list_empty() serves this
- * purpose. The list_empty() function fetches the RCU-protected pointer
- * and compares it to the address of the list head, but neither dereferences
- * this pointer itself nor provides this pointer to the caller. Therefore,
- * it is not necessary to use rcu_dereference(), so that list_empty() can
- * be used anywhere you would want to use a list_empty_rcu().
- */
-
-/*
* INIT_LIST_HEAD_RCU - Initialize a list_head visible to RCU readers
* @list: list to be initialized
*
@@ -39,6 +31,42 @@ static inline void INIT_LIST_HEAD_RCU(struct list_head *list)
*/
#define list_next_rcu(list) (*((struct list_head __rcu **)(&(list)->next)))
+/**
+ * list_tail_rcu - returns the prev pointer of the head of the list
+ * @head: the head of the list
+ *
+ * Note: This should only be used with the list header, and even then
+ * only if list_del() and similar primitives are not also used on the
+ * list header.
+ */
+#define list_tail_rcu(head) (*((struct list_head __rcu **)(&(head)->prev)))
+
+/*
+ * Check during list traversal that we are within an RCU reader
+ */
+
+#define check_arg_count_one(dummy)
+
+#ifdef CONFIG_PROVE_RCU_LIST
+#define __list_check_rcu(dummy, cond, extra...) \
+ ({ \
+ check_arg_count_one(extra); \
+ RCU_LOCKDEP_WARN(!(cond) && !rcu_read_lock_any_held(), \
+ "RCU-list traversed in non-reader section!"); \
+ })
+
+#define __list_check_srcu(cond) \
+ ({ \
+ RCU_LOCKDEP_WARN(!(cond), \
+ "RCU-list traversed without holding the required lock!");\
+ })
+#else
+#define __list_check_rcu(dummy, cond, extra...) \
+ ({ check_arg_count_one(extra); })
+
+#define __list_check_srcu(cond) ({ })
+#endif
+
/*
* Insert a new entry between two known consecutive entries.
*
@@ -154,7 +182,7 @@ static inline void hlist_del_init_rcu(struct hlist_node *n)
{
if (!hlist_unhashed(n)) {
__hlist_del(n);
- n->pprev = NULL;
+ WRITE_ONCE(n->pprev, NULL);
}
}
@@ -181,7 +209,7 @@ static inline void list_replace_rcu(struct list_head *old,
* @list: the RCU-protected list to splice
* @prev: points to the last element of the existing list
* @next: points to the first element of the existing list
- * @sync: function to sync: synchronize_rcu(), synchronize_sched(), ...
+ * @sync: synchronize_rcu, synchronize_rcu_expedited, ...
*
* The list pointed to by @prev and @next can be RCU-read traversed
* concurrently with this function.
@@ -219,6 +247,8 @@ static inline void __list_splice_init_rcu(struct list_head *list,
*/
sync();
+ ASSERT_EXCLUSIVE_ACCESS(*first);
+ ASSERT_EXCLUSIVE_ACCESS(*last);
/*
* Readers are finished with the source list, so perform splice.
@@ -239,7 +269,7 @@ static inline void __list_splice_init_rcu(struct list_head *list,
* designed for stacks.
* @list: the RCU-protected list to splice
* @head: the place in the existing list to splice the first list into
- * @sync: function to sync: synchronize_rcu(), synchronize_sched(), ...
+ * @sync: synchronize_rcu, synchronize_rcu_expedited, ...
*/
static inline void list_splice_init_rcu(struct list_head *list,
struct list_head *head,
@@ -254,7 +284,7 @@ static inline void list_splice_init_rcu(struct list_head *list,
* list, designed for queues.
* @list: the RCU-protected list to splice
* @head: the place in the existing list to splice the first list into
- * @sync: function to sync: synchronize_rcu(), synchronize_sched(), ...
+ * @sync: synchronize_rcu, synchronize_rcu_expedited, ...
*/
static inline void list_splice_tail_init_rcu(struct list_head *list,
struct list_head *head,
@@ -274,26 +304,34 @@ static inline void list_splice_tail_init_rcu(struct list_head *list,
* primitives such as list_add_rcu() as long as it's guarded by rcu_read_lock().
*/
#define list_entry_rcu(ptr, type, member) \
- container_of(lockless_dereference(ptr), type, member)
+ container_of(READ_ONCE(ptr), type, member)
-/**
+/*
* Where are list_empty_rcu() and list_first_entry_rcu()?
*
- * Implementing those functions following their counterparts list_empty() and
- * list_first_entry() is not advisable because they lead to subtle race
- * conditions as the following snippet shows:
+ * They do not exist because they would lead to subtle race conditions:
*
* if (!list_empty_rcu(mylist)) {
* struct foo *bar = list_first_entry_rcu(mylist, struct foo, list_member);
* do_something(bar);
* }
*
- * The list may not be empty when list_empty_rcu checks it, but it may be when
- * list_first_entry_rcu rereads the ->next pointer.
+ * The list might be non-empty when list_empty_rcu() checks it, but it
+ * might have become empty by the time that list_first_entry_rcu() rereads
+ * the ->next pointer, which would result in a SEGV.
*
- * Rereading the ->next pointer is not a problem for list_empty() and
- * list_first_entry() because they would be protected by a lock that blocks
- * writers.
+ * When not using RCU, it is OK for list_first_entry() to re-read that
+ * pointer because both functions should be protected by some lock that
+ * blocks writers.
+ *
+ * When using RCU, list_empty() uses READ_ONCE() to fetch the
+ * RCU-protected ->next pointer and then compares it to the address of the
+ * list head. However, it neither dereferences this pointer nor provides
+ * this pointer to its caller. Thus, READ_ONCE() suffices (that is,
+ * rcu_dereference() is not needed), which means that list_empty() can be
+ * used anywhere you would want to use list_empty_rcu(). Just don't
+ * expect anything useful to happen if you do a subsequent lockless
+ * call to list_first_entry_rcu()!!!
*
* See list_first_or_null_rcu for an alternative.
*/
@@ -342,14 +380,35 @@ static inline void list_splice_tail_init_rcu(struct list_head *list,
* @pos: the type * to use as a loop cursor.
* @head: the head for your list.
* @member: the name of the list_head within the struct.
+ * @cond: optional lockdep expression if called from non-RCU protection.
*
* This list-traversal primitive may safely run concurrently with
* the _rcu list-mutation primitives such as list_add_rcu()
* as long as the traversal is guarded by rcu_read_lock().
*/
-#define list_for_each_entry_rcu(pos, head, member) \
- for (pos = list_entry_rcu((head)->next, typeof(*pos), member); \
- &pos->member != (head); \
+#define list_for_each_entry_rcu(pos, head, member, cond...) \
+ for (__list_check_rcu(dummy, ## cond, 0), \
+ pos = list_entry_rcu((head)->next, typeof(*pos), member); \
+ &pos->member != (head); \
+ pos = list_entry_rcu(pos->member.next, typeof(*pos), member))
+
+/**
+ * list_for_each_entry_srcu - iterate over rcu list of given type
+ * @pos: the type * to use as a loop cursor.
+ * @head: the head for your list.
+ * @member: the name of the list_head within the struct.
+ * @cond: lockdep expression for the lock required to traverse the list.
+ *
+ * This list-traversal primitive may safely run concurrently with
+ * the _rcu list-mutation primitives such as list_add_rcu()
+ * as long as the traversal is guarded by srcu_read_lock().
+ * The lockdep expression srcu_read_lock_held() can be passed as the
+ * cond argument from read side.
+ */
+#define list_for_each_entry_srcu(pos, head, member, cond) \
+ for (__list_check_srcu(cond), \
+ pos = list_entry_rcu((head)->next, typeof(*pos), member); \
+ &pos->member != (head); \
pos = list_entry_rcu(pos->member.next, typeof(*pos), member))
/**
@@ -358,16 +417,15 @@ static inline void list_splice_tail_init_rcu(struct list_head *list,
* @type: the type of the struct this is embedded in.
* @member: the name of the list_head within the struct.
*
- * This primitive may safely run concurrently with the _rcu list-mutation
- * primitives such as list_add_rcu(), but requires some implicit RCU
- * read-side guarding. One example is running within a special
- * exception-time environment where preemption is disabled and where
- * lockdep cannot be invoked (in which case updaters must use RCU-sched,
- * as in synchronize_sched(), call_rcu_sched(), and friends). Another
- * example is when items are added to the list, but never deleted.
+ * This primitive may safely run concurrently with the _rcu
+ * list-mutation primitives such as list_add_rcu(), but requires some
+ * implicit RCU read-side guarding. One example is running within a special
+ * exception-time environment where preemption is disabled and where lockdep
+ * cannot be invoked. Another example is when items are added to the list,
+ * but never deleted.
*/
#define list_entry_lockless(ptr, type, member) \
- container_of((typeof(ptr))lockless_dereference(ptr), type, member)
+ container_of((typeof(ptr))READ_ONCE(ptr), type, member)
/**
* list_for_each_entry_lockless - iterate over rcu list of given type
@@ -375,13 +433,12 @@ static inline void list_splice_tail_init_rcu(struct list_head *list,
* @head: the head for your list.
* @member: the name of the list_struct within the struct.
*
- * This primitive may safely run concurrently with the _rcu list-mutation
- * primitives such as list_add_rcu(), but requires some implicit RCU
- * read-side guarding. One example is running within a special
- * exception-time environment where preemption is disabled and where
- * lockdep cannot be invoked (in which case updaters must use RCU-sched,
- * as in synchronize_sched(), call_rcu_sched(), and friends). Another
- * example is when items are added to the list, but never deleted.
+ * This primitive may safely run concurrently with the _rcu
+ * list-mutation primitives such as list_add_rcu(), but requires some
+ * implicit RCU read-side guarding. One example is running within a special
+ * exception-time environment where preemption is disabled and where lockdep
+ * cannot be invoked. Another example is when items are added to the list,
+ * but never deleted.
*/
#define list_for_each_entry_lockless(pos, head, member) \
for (pos = list_entry_lockless((head)->next, typeof(*pos), member); \
@@ -395,7 +452,16 @@ static inline void list_splice_tail_init_rcu(struct list_head *list,
* @member: the name of the list_head within the struct.
*
* Continue to iterate over list of given type, continuing after
- * the current position.
+ * the current position which must have been in the list when the RCU read
+ * lock was taken.
+ * This would typically require either that you obtained the node from a
+ * previous walk of the list in the same RCU read-side critical section, or
+ * that you held some sort of non-RCU reference (such as a reference count)
+ * to keep the node alive *and* in the list.
+ *
+ * This iterator is similar to list_for_each_entry_from_rcu() except
+ * this starts after the given position and that one starts at the given
+ * position.
*/
#define list_for_each_entry_continue_rcu(pos, head, member) \
for (pos = list_entry_rcu(pos->member.next, typeof(*pos), member); \
@@ -403,6 +469,27 @@ static inline void list_splice_tail_init_rcu(struct list_head *list,
pos = list_entry_rcu(pos->member.next, typeof(*pos), member))
/**
+ * list_for_each_entry_from_rcu - iterate over a list from current point
+ * @pos: the type * to use as a loop cursor.
+ * @head: the head for your list.
+ * @member: the name of the list_node within the struct.
+ *
+ * Iterate over the tail of a list starting from a given position,
+ * which must have been in the list when the RCU read lock was taken.
+ * This would typically require either that you obtained the node from a
+ * previous walk of the list in the same RCU read-side critical section, or
+ * that you held some sort of non-RCU reference (such as a reference count)
+ * to keep the node alive *and* in the list.
+ *
+ * This iterator is similar to list_for_each_entry_continue_rcu() except
+ * this starts from the given position and that one starts from the position
+ * after the given position.
+ */
+#define list_for_each_entry_from_rcu(pos, head, member) \
+ for (; &(pos)->member != (head); \
+ pos = list_entry_rcu(pos->member.next, typeof(*(pos)), member))
+
+/**
* hlist_del_rcu - deletes entry from hash list without re-initialization
* @n: the element to delete from the hash list.
*
@@ -424,7 +511,7 @@ static inline void list_splice_tail_init_rcu(struct list_head *list,
static inline void hlist_del_rcu(struct hlist_node *n)
{
__hlist_del(n);
- n->pprev = LIST_POISON2;
+ WRITE_ONCE(n->pprev, LIST_POISON2);
}
/**
@@ -440,11 +527,32 @@ static inline void hlist_replace_rcu(struct hlist_node *old,
struct hlist_node *next = old->next;
new->next = next;
- new->pprev = old->pprev;
+ WRITE_ONCE(new->pprev, old->pprev);
rcu_assign_pointer(*(struct hlist_node __rcu **)new->pprev, new);
if (next)
- new->next->pprev = &new->next;
- old->pprev = LIST_POISON2;
+ WRITE_ONCE(new->next->pprev, &new->next);
+ WRITE_ONCE(old->pprev, LIST_POISON2);
+}
+
+/**
+ * hlists_swap_heads_rcu - swap the lists the hlist heads point to
+ * @left: The hlist head on the left
+ * @right: The hlist head on the right
+ *
+ * The lists start out as [@left ][node1 ... ] and
+ * [@right ][node2 ... ]
+ * The lists end up as [@left ][node2 ... ]
+ * [@right ][node1 ... ]
+ */
+static inline void hlists_swap_heads_rcu(struct hlist_head *left, struct hlist_head *right)
+{
+ struct hlist_node *node1 = left->first;
+ struct hlist_node *node2 = right->first;
+
+ rcu_assign_pointer(left->first, node2);
+ rcu_assign_pointer(right->first, node1);
+ WRITE_ONCE(node2->pprev, &left->first);
+ WRITE_ONCE(node1->pprev, &right->first);
}
/*
@@ -479,10 +587,10 @@ static inline void hlist_add_head_rcu(struct hlist_node *n,
struct hlist_node *first = h->first;
n->next = first;
- n->pprev = &h->first;
+ WRITE_ONCE(n->pprev, &h->first);
rcu_assign_pointer(hlist_first_rcu(h), n);
if (first)
- first->pprev = &n->next;
+ WRITE_ONCE(first->pprev, &n->next);
}
/**
@@ -515,7 +623,7 @@ static inline void hlist_add_tail_rcu(struct hlist_node *n,
if (last) {
n->next = last->next;
- n->pprev = &last->next;
+ WRITE_ONCE(n->pprev, &last->next);
rcu_assign_pointer(hlist_next_rcu(last), n);
} else {
hlist_add_head_rcu(n, h);
@@ -543,10 +651,10 @@ static inline void hlist_add_tail_rcu(struct hlist_node *n,
static inline void hlist_add_before_rcu(struct hlist_node *n,
struct hlist_node *next)
{
- n->pprev = next->pprev;
+ WRITE_ONCE(n->pprev, next->pprev);
n->next = next;
rcu_assign_pointer(hlist_pprev_rcu(n), n);
- next->pprev = &n->next;
+ WRITE_ONCE(next->pprev, &n->next);
}
/**
@@ -571,10 +679,10 @@ static inline void hlist_add_behind_rcu(struct hlist_node *n,
struct hlist_node *prev)
{
n->next = prev->next;
- n->pprev = &prev->next;
+ WRITE_ONCE(n->pprev, &prev->next);
rcu_assign_pointer(hlist_next_rcu(prev), n);
if (n->next)
- n->next->pprev = &n->next;
+ WRITE_ONCE(n->next->pprev, &n->next);
}
#define __hlist_for_each_rcu(pos, head) \
@@ -587,13 +695,36 @@ static inline void hlist_add_behind_rcu(struct hlist_node *n,
* @pos: the type * to use as a loop cursor.
* @head: the head for your list.
* @member: the name of the hlist_node within the struct.
+ * @cond: optional lockdep expression if called from non-RCU protection.
*
* This list-traversal primitive may safely run concurrently with
* the _rcu list-mutation primitives such as hlist_add_head_rcu()
* as long as the traversal is guarded by rcu_read_lock().
*/
-#define hlist_for_each_entry_rcu(pos, head, member) \
- for (pos = hlist_entry_safe (rcu_dereference_raw(hlist_first_rcu(head)),\
+#define hlist_for_each_entry_rcu(pos, head, member, cond...) \
+ for (__list_check_rcu(dummy, ## cond, 0), \
+ pos = hlist_entry_safe(rcu_dereference_raw(hlist_first_rcu(head)),\
+ typeof(*(pos)), member); \
+ pos; \
+ pos = hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu(\
+ &(pos)->member)), typeof(*(pos)), member))
+
+/**
+ * hlist_for_each_entry_srcu - iterate over rcu list of given type
+ * @pos: the type * to use as a loop cursor.
+ * @head: the head for your list.
+ * @member: the name of the hlist_node within the struct.
+ * @cond: lockdep expression for the lock required to traverse the list.
+ *
+ * This list-traversal primitive may safely run concurrently with
+ * the _rcu list-mutation primitives such as hlist_add_head_rcu()
+ * as long as the traversal is guarded by srcu_read_lock().
+ * The lockdep expression srcu_read_lock_held() can be passed as the
+ * cond argument from read side.
+ */
+#define hlist_for_each_entry_srcu(pos, head, member, cond) \
+ for (__list_check_srcu(cond), \
+ pos = hlist_entry_safe(rcu_dereference_raw(hlist_first_rcu(head)),\
typeof(*(pos)), member); \
pos; \
pos = hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu(\
@@ -613,10 +744,10 @@ static inline void hlist_add_behind_rcu(struct hlist_node *n,
* not do any RCU debugging or tracing.
*/
#define hlist_for_each_entry_rcu_notrace(pos, head, member) \
- for (pos = hlist_entry_safe (rcu_dereference_raw_notrace(hlist_first_rcu(head)),\
+ for (pos = hlist_entry_safe(rcu_dereference_raw_check(hlist_first_rcu(head)),\
typeof(*(pos)), member); \
pos; \
- pos = hlist_entry_safe(rcu_dereference_raw_notrace(hlist_next_rcu(\
+ pos = hlist_entry_safe(rcu_dereference_raw_check(hlist_next_rcu(\
&(pos)->member)), typeof(*(pos)), member))
/**
diff --git a/include/linux/rculist_bl.h b/include/linux/rculist_bl.h
index 4f216c59e7db..0b952d06eb0b 100644
--- a/include/linux/rculist_bl.h
+++ b/include/linux/rculist_bl.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_RCULIST_BL_H
#define _LINUX_RCULIST_BL_H
@@ -24,34 +25,6 @@ static inline struct hlist_bl_node *hlist_bl_first_rcu(struct hlist_bl_head *h)
}
/**
- * hlist_bl_del_init_rcu - deletes entry from hash list with re-initialization
- * @n: the element to delete from the hash list.
- *
- * Note: hlist_bl_unhashed() on the node returns true after this. It is
- * useful for RCU based read lockfree traversal if the writer side
- * must know if the list entry is still hashed or already unhashed.
- *
- * In particular, it means that we can not poison the forward pointers
- * that may still be used for walking the hash list and we can only
- * zero the pprev pointer so list_unhashed() will return true after
- * this.
- *
- * The caller must take whatever precautions are necessary (such as
- * holding appropriate locks) to avoid racing with another
- * list-mutation primitive, such as hlist_bl_add_head_rcu() or
- * hlist_bl_del_rcu(), running on this same list. However, it is
- * perfectly legal to run concurrently with the _rcu list-traversal
- * primitives, such as hlist_bl_for_each_entry_rcu().
- */
-static inline void hlist_bl_del_init_rcu(struct hlist_bl_node *n)
-{
- if (!hlist_bl_unhashed(n)) {
- __hlist_bl_del(n);
- n->pprev = NULL;
- }
-}
-
-/**
* hlist_bl_del_rcu - deletes entry from hash list without re-initialization
* @n: the element to delete from the hash list.
*
diff --git a/include/linux/rculist_nulls.h b/include/linux/rculist_nulls.h
index a23a33153180..ba4c00dd8005 100644
--- a/include/linux/rculist_nulls.h
+++ b/include/linux/rculist_nulls.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_RCULIST_NULLS_H
#define _LINUX_RCULIST_NULLS_H
@@ -33,13 +34,21 @@ static inline void hlist_nulls_del_init_rcu(struct hlist_nulls_node *n)
{
if (!hlist_nulls_unhashed(n)) {
__hlist_nulls_del(n);
- n->pprev = NULL;
+ WRITE_ONCE(n->pprev, NULL);
}
}
+/**
+ * hlist_nulls_first_rcu - returns the first element of the hash list.
+ * @head: the head of the list.
+ */
#define hlist_nulls_first_rcu(head) \
(*((struct hlist_nulls_node __rcu __force **)&(head)->first))
+/**
+ * hlist_nulls_next_rcu - returns the element of the list after @node.
+ * @node: element of the list.
+ */
#define hlist_nulls_next_rcu(node) \
(*((struct hlist_nulls_node __rcu __force **)&(node)->next))
@@ -65,7 +74,7 @@ static inline void hlist_nulls_del_init_rcu(struct hlist_nulls_node *n)
static inline void hlist_nulls_del_rcu(struct hlist_nulls_node *n)
{
__hlist_nulls_del(n);
- n->pprev = LIST_POISON2;
+ WRITE_ONCE(n->pprev, LIST_POISON2);
}
/**
@@ -93,10 +102,10 @@ static inline void hlist_nulls_add_head_rcu(struct hlist_nulls_node *n,
struct hlist_nulls_node *first = h->first;
n->next = first;
- n->pprev = &h->first;
+ WRITE_ONCE(n->pprev, &h->first);
rcu_assign_pointer(hlist_nulls_first_rcu(h), n);
if (!is_a_nulls(first))
- first->pprev = &n->next;
+ WRITE_ONCE(first->pprev, &n->next);
}
/**
@@ -105,9 +114,8 @@ static inline void hlist_nulls_add_head_rcu(struct hlist_nulls_node *n,
* @h: the list to add to.
*
* Description:
- * Adds the specified element to the end of the specified hlist_nulls,
- * while permitting racing traversals. NOTE: tail insertion requires
- * list traversal.
+ * Adds the specified element to the specified hlist_nulls,
+ * while permitting racing traversals.
*
* The caller must take whatever precautions are necessary
* (such as holding appropriate locks) to avoid racing
@@ -120,12 +128,12 @@ static inline void hlist_nulls_add_head_rcu(struct hlist_nulls_node *n,
* list-traversal primitive must be guarded by rcu_read_lock().
*/
static inline void hlist_nulls_add_tail_rcu(struct hlist_nulls_node *n,
- struct hlist_nulls_head *h)
+ struct hlist_nulls_head *h)
{
struct hlist_nulls_node *i, *last = NULL;
- for (i = hlist_nulls_first_rcu(h); !is_a_nulls(i);
- i = hlist_nulls_next_rcu(i))
+ /* Note: write side code, so rcu accessors are not needed. */
+ for (i = h->first; !is_a_nulls(i); i = i->next)
last = i;
if (last) {
@@ -137,17 +145,24 @@ static inline void hlist_nulls_add_tail_rcu(struct hlist_nulls_node *n,
}
}
+/* after that hlist_nulls_del will work */
+static inline void hlist_nulls_add_fake(struct hlist_nulls_node *n)
+{
+ n->pprev = &n->next;
+ n->next = (struct hlist_nulls_node *)NULLS_MARKER(NULL);
+}
+
/**
* hlist_nulls_for_each_entry_rcu - iterate over rcu list of given type
* @tpos: the type * to use as a loop cursor.
* @pos: the &struct hlist_nulls_node to use as a loop cursor.
- * @head: the head for your list.
+ * @head: the head of the list.
* @member: the name of the hlist_nulls_node within the struct.
*
* The barrier() is needed to make sure compiler doesn't cache first element [1],
* as this loop can be restarted [2]
- * [1] Documentation/atomic_ops.txt around line 114
- * [2] Documentation/RCU/rculist_nulls.txt around line 146
+ * [1] Documentation/memory-barriers.txt around line 1533
+ * [2] Documentation/RCU/rculist_nulls.rst around line 146
*/
#define hlist_nulls_for_each_entry_rcu(tpos, pos, head, member) \
for (({barrier();}), \
@@ -161,7 +176,7 @@ static inline void hlist_nulls_add_tail_rcu(struct hlist_nulls_node *n,
* iterate over list of given type safe against removal of list entry
* @tpos: the type * to use as a loop cursor.
* @pos: the &struct hlist_nulls_node to use as a loop cursor.
- * @head: the head for your list.
+ * @head: the head of the list.
* @member: the name of the hlist_nulls_node within the struct.
*/
#define hlist_nulls_for_each_entry_safe(tpos, pos, head, member) \
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index f816fc72b51e..dcd2cf1e8326 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -1,25 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* Read-Copy Update mechanism for mutual exclusion
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, you can access it online at
- * http://www.gnu.org/licenses/gpl-2.0.html.
- *
* Copyright IBM Corporation, 2001
*
* Author: Dipankar Sarma <dipankar@in.ibm.com>
*
- * Based on the original work by Paul McKenney <paulmck@us.ibm.com>
+ * Based on the original work by Paul McKenney <paulmck@vnet.ibm.com>
* and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen.
* Papers:
* http://www.rdrop.com/users/paulmck/paper/rclockpdcsproof.pdf
@@ -42,32 +29,49 @@
#include <linux/lockdep.h>
#include <asm/processor.h>
#include <linux/cpumask.h>
+#include <linux/context_tracking_irq.h>
#define ULONG_CMP_GE(a, b) (ULONG_MAX / 2 >= (a) - (b))
#define ULONG_CMP_LT(a, b) (ULONG_MAX / 2 < (a) - (b))
#define ulong2long(a) (*(long *)(&(a)))
+#define USHORT_CMP_GE(a, b) (USHRT_MAX / 2 >= (unsigned short)((a) - (b)))
+#define USHORT_CMP_LT(a, b) (USHRT_MAX / 2 < (unsigned short)((a) - (b)))
/* Exported common interfaces */
-
-#ifdef CONFIG_PREEMPT_RCU
void call_rcu(struct rcu_head *head, rcu_callback_t func);
-#else /* #ifdef CONFIG_PREEMPT_RCU */
-#define call_rcu call_rcu_sched
-#endif /* #else #ifdef CONFIG_PREEMPT_RCU */
-
-void call_rcu_bh(struct rcu_head *head, rcu_callback_t func);
-void call_rcu_sched(struct rcu_head *head, rcu_callback_t func);
-void synchronize_sched(void);
-void call_rcu_tasks(struct rcu_head *head, rcu_callback_t func);
-void synchronize_rcu_tasks(void);
void rcu_barrier_tasks(void);
+void rcu_barrier_tasks_rude(void);
+void synchronize_rcu(void);
+
+struct rcu_gp_oldstate;
+unsigned long get_completed_synchronize_rcu(void);
+void get_completed_synchronize_rcu_full(struct rcu_gp_oldstate *rgosp);
+
+// Maximum number of unsigned long values corresponding to
+// not-yet-completed RCU grace periods.
+#define NUM_ACTIVE_RCU_POLL_OLDSTATE 2
+
+/**
+ * same_state_synchronize_rcu - Are two old-state values identical?
+ * @oldstate1: First old-state value.
+ * @oldstate2: Second old-state value.
+ *
+ * The two old-state values must have been obtained from either
+ * get_state_synchronize_rcu(), start_poll_synchronize_rcu(), or
+ * get_completed_synchronize_rcu(). Returns @true if the two values are
+ * identical and @false otherwise. This allows structures whose lifetimes
+ * are tracked by old-state values to push these values to a list header,
+ * allowing those structures to be slightly smaller.
+ */
+static inline bool same_state_synchronize_rcu(unsigned long oldstate1, unsigned long oldstate2)
+{
+ return oldstate1 == oldstate2;
+}
#ifdef CONFIG_PREEMPT_RCU
void __rcu_read_lock(void);
void __rcu_read_unlock(void);
-void rcu_read_unlock_special(struct task_struct *t);
-void synchronize_rcu(void);
/*
* Defined as a macro as it is a very low level header included from
@@ -75,25 +79,26 @@ void synchronize_rcu(void);
* nesting depth, but makes sense only if CONFIG_PREEMPT_RCU -- in other
* types of kernel builds, the rcu_read_lock() nesting depth is unknowable.
*/
-#define rcu_preempt_depth() (current->rcu_read_lock_nesting)
+#define rcu_preempt_depth() READ_ONCE(current->rcu_read_lock_nesting)
#else /* #ifdef CONFIG_PREEMPT_RCU */
+#ifdef CONFIG_TINY_RCU
+#define rcu_read_unlock_strict() do { } while (0)
+#else
+void rcu_read_unlock_strict(void);
+#endif
+
static inline void __rcu_read_lock(void)
{
- if (IS_ENABLED(CONFIG_PREEMPT_COUNT))
- preempt_disable();
+ preempt_disable();
}
static inline void __rcu_read_unlock(void)
{
- if (IS_ENABLED(CONFIG_PREEMPT_COUNT))
- preempt_enable();
-}
-
-static inline void synchronize_rcu(void)
-{
- synchronize_sched();
+ preempt_enable();
+ if (IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD))
+ rcu_read_unlock_strict();
}
static inline int rcu_preempt_depth(void)
@@ -103,13 +108,27 @@ static inline int rcu_preempt_depth(void)
#endif /* #else #ifdef CONFIG_PREEMPT_RCU */
+#ifdef CONFIG_RCU_LAZY
+void call_rcu_hurry(struct rcu_head *head, rcu_callback_t func);
+#else
+static inline void call_rcu_hurry(struct rcu_head *head, rcu_callback_t func)
+{
+ call_rcu(head, func);
+}
+#endif
+
/* Internal to kernel */
void rcu_init(void);
-void rcu_sched_qs(void);
-void rcu_bh_qs(void);
-void rcu_check_callbacks(int user);
+extern int rcu_scheduler_active;
+void rcu_sched_clock_irq(int user);
void rcu_report_dead(unsigned int cpu);
-void rcu_cpu_starting(unsigned int cpu);
+void rcutree_migrate_callbacks(int cpu);
+
+#ifdef CONFIG_TASKS_RCU_GENERIC
+void rcu_init_tasks_generic(void);
+#else
+static inline void rcu_init_tasks_generic(void) { }
+#endif
#ifdef CONFIG_RCU_STALL_COMMON
void rcu_sysrq_start(void);
@@ -119,29 +138,32 @@ static inline void rcu_sysrq_start(void) { }
static inline void rcu_sysrq_end(void) { }
#endif /* #else #ifdef CONFIG_RCU_STALL_COMMON */
-#ifdef CONFIG_NO_HZ_FULL
-void rcu_user_enter(void);
-void rcu_user_exit(void);
+#if defined(CONFIG_NO_HZ_FULL) && (!defined(CONFIG_GENERIC_ENTRY) || !defined(CONFIG_KVM_XFER_TO_GUEST_WORK))
+void rcu_irq_work_resched(void);
#else
-static inline void rcu_user_enter(void) { }
-static inline void rcu_user_exit(void) { }
-#endif /* CONFIG_NO_HZ_FULL */
+static inline void rcu_irq_work_resched(void) { }
+#endif
#ifdef CONFIG_RCU_NOCB_CPU
void rcu_init_nohz(void);
+int rcu_nocb_cpu_offload(int cpu);
+int rcu_nocb_cpu_deoffload(int cpu);
+void rcu_nocb_flush_deferred_wakeup(void);
#else /* #ifdef CONFIG_RCU_NOCB_CPU */
static inline void rcu_init_nohz(void) { }
+static inline int rcu_nocb_cpu_offload(int cpu) { return -EINVAL; }
+static inline int rcu_nocb_cpu_deoffload(int cpu) { return 0; }
+static inline void rcu_nocb_flush_deferred_wakeup(void) { }
#endif /* #else #ifdef CONFIG_RCU_NOCB_CPU */
/**
* RCU_NONIDLE - Indicate idle-loop code that needs RCU readers
* @a: Code that RCU needs to pay attention to.
*
- * RCU, RCU-bh, and RCU-sched read-side critical sections are forbidden
- * in the inner idle loop, that is, between the rcu_idle_enter() and
- * the rcu_idle_exit() -- RCU will happily ignore any such read-side
- * critical sections. However, things like powertop need tracepoints
- * in the inner idle loop.
+ * RCU read-side critical sections are forbidden in the inner idle loop,
+ * that is, between the ct_idle_enter() and the ct_idle_exit() -- RCU
+ * will happily ignore any such read-side critical sections. However,
+ * things like powertop need tracepoints in the inner idle loop.
*
* This macro provides the way out: RCU_NONIDLE(do_something_with_RCU())
* will tell RCU that it needs to pay attention, invoke its argument
@@ -154,45 +176,104 @@ static inline void rcu_init_nohz(void) { }
*/
#define RCU_NONIDLE(a) \
do { \
- rcu_irq_enter_irqson(); \
+ ct_irq_enter_irqson(); \
do { a; } while (0); \
- rcu_irq_exit_irqson(); \
+ ct_irq_exit_irqson(); \
} while (0)
/*
- * Note a voluntary context switch for RCU-tasks benefit. This is a
- * macro rather than an inline function to avoid #include hell.
+ * Note a quasi-voluntary context switch for RCU-tasks's benefit.
+ * This is a macro rather than an inline function to avoid #include hell.
*/
-#ifdef CONFIG_TASKS_RCU
-#define TASKS_RCU(x) x
-extern struct srcu_struct tasks_rcu_exit_srcu;
-#define rcu_note_voluntary_context_switch_lite(t) \
- do { \
- if (READ_ONCE((t)->rcu_tasks_holdout)) \
- WRITE_ONCE((t)->rcu_tasks_holdout, false); \
+#ifdef CONFIG_TASKS_RCU_GENERIC
+
+# ifdef CONFIG_TASKS_RCU
+# define rcu_tasks_classic_qs(t, preempt) \
+ do { \
+ if (!(preempt) && READ_ONCE((t)->rcu_tasks_holdout)) \
+ WRITE_ONCE((t)->rcu_tasks_holdout, false); \
} while (0)
-#define rcu_note_voluntary_context_switch(t) \
- do { \
- rcu_all_qs(); \
- rcu_note_voluntary_context_switch_lite(t); \
+void call_rcu_tasks(struct rcu_head *head, rcu_callback_t func);
+void synchronize_rcu_tasks(void);
+# else
+# define rcu_tasks_classic_qs(t, preempt) do { } while (0)
+# define call_rcu_tasks call_rcu
+# define synchronize_rcu_tasks synchronize_rcu
+# endif
+
+# ifdef CONFIG_TASKS_TRACE_RCU
+// Bits for ->trc_reader_special.b.need_qs field.
+#define TRC_NEED_QS 0x1 // Task needs a quiescent state.
+#define TRC_NEED_QS_CHECKED 0x2 // Task has been checked for needing quiescent state.
+
+u8 rcu_trc_cmpxchg_need_qs(struct task_struct *t, u8 old, u8 new);
+void rcu_tasks_trace_qs_blkd(struct task_struct *t);
+
+# define rcu_tasks_trace_qs(t) \
+ do { \
+ int ___rttq_nesting = READ_ONCE((t)->trc_reader_nesting); \
+ \
+ if (likely(!READ_ONCE((t)->trc_reader_special.b.need_qs)) && \
+ likely(!___rttq_nesting)) { \
+ rcu_trc_cmpxchg_need_qs((t), 0, TRC_NEED_QS_CHECKED); \
+ } else if (___rttq_nesting && ___rttq_nesting != INT_MIN && \
+ !READ_ONCE((t)->trc_reader_special.b.blocked)) { \
+ rcu_tasks_trace_qs_blkd(t); \
+ } \
} while (0)
-#else /* #ifdef CONFIG_TASKS_RCU */
-#define TASKS_RCU(x) do { } while (0)
-#define rcu_note_voluntary_context_switch_lite(t) do { } while (0)
-#define rcu_note_voluntary_context_switch(t) rcu_all_qs()
-#endif /* #else #ifdef CONFIG_TASKS_RCU */
+# else
+# define rcu_tasks_trace_qs(t) do { } while (0)
+# endif
+
+#define rcu_tasks_qs(t, preempt) \
+do { \
+ rcu_tasks_classic_qs((t), (preempt)); \
+ rcu_tasks_trace_qs(t); \
+} while (0)
+
+# ifdef CONFIG_TASKS_RUDE_RCU
+void call_rcu_tasks_rude(struct rcu_head *head, rcu_callback_t func);
+void synchronize_rcu_tasks_rude(void);
+# endif
+
+#define rcu_note_voluntary_context_switch(t) rcu_tasks_qs(t, false)
+void exit_tasks_rcu_start(void);
+void exit_tasks_rcu_stop(void);
+void exit_tasks_rcu_finish(void);
+#else /* #ifdef CONFIG_TASKS_RCU_GENERIC */
+#define rcu_tasks_classic_qs(t, preempt) do { } while (0)
+#define rcu_tasks_qs(t, preempt) do { } while (0)
+#define rcu_note_voluntary_context_switch(t) do { } while (0)
+#define call_rcu_tasks call_rcu
+#define synchronize_rcu_tasks synchronize_rcu
+static inline void exit_tasks_rcu_start(void) { }
+static inline void exit_tasks_rcu_stop(void) { }
+static inline void exit_tasks_rcu_finish(void) { }
+#endif /* #else #ifdef CONFIG_TASKS_RCU_GENERIC */
/**
- * cond_resched_rcu_qs - Report potential quiescent states to RCU
+ * rcu_trace_implies_rcu_gp - does an RCU Tasks Trace grace period imply an RCU grace period?
+ *
+ * As an accident of implementation, an RCU Tasks Trace grace period also
+ * acts as an RCU grace period. However, this could change at any time.
+ * Code relying on this accident must call this function to verify that
+ * this accident is still happening.
+ *
+ * You have been warned!
+ */
+static inline bool rcu_trace_implies_rcu_gp(void) { return true; }
+
+/**
+ * cond_resched_tasks_rcu_qs - Report potential quiescent states to RCU
*
* This macro resembles cond_resched(), except that it is defined to
* report potential quiescent states to RCU-tasks even if the cond_resched()
- * machinery were to be shut off, as some advocate for PREEMPT kernels.
+ * machinery were to be shut off, as some advocate for PREEMPTION kernels.
*/
-#define cond_resched_rcu_qs() \
+#define cond_resched_tasks_rcu_qs() \
do { \
- if (!cond_resched()) \
- rcu_note_voluntary_context_switch(current); \
+ rcu_tasks_qs(current, false); \
+ cond_resched(); \
} while (0)
/*
@@ -200,7 +281,7 @@ do { \
* TREE_RCU and rcu_barrier_() primitives in TINY_RCU.
*/
-#if defined(CONFIG_TREE_RCU) || defined(CONFIG_PREEMPT_RCU)
+#if defined(CONFIG_TREE_RCU)
#include <linux/rcutree.h>
#elif defined(CONFIG_TINY_RCU)
#include <linux/rcutiny.h>
@@ -209,10 +290,12 @@ do { \
#endif
/*
- * init_rcu_head_on_stack()/destroy_rcu_head_on_stack() are needed for dynamic
- * initialization and destruction of rcu_head on the stack. rcu_head structures
- * allocated dynamically in the heap or defined statically don't need any
- * initialization.
+ * The init_rcu_head_on_stack() and destroy_rcu_head_on_stack() calls
+ * are needed for dynamic initialization and destruction of rcu_head
+ * on the stack, and init_rcu_head()/destroy_rcu_head() are needed for
+ * dynamic initialization and destruction of statically allocated rcu_head
+ * structures. However, rcu_head structures allocated dynamically in the
+ * heap don't need any initialization.
*/
#ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD
void init_rcu_head(struct rcu_head *head);
@@ -232,6 +315,11 @@ bool rcu_lockdep_current_cpu_online(void);
static inline bool rcu_lockdep_current_cpu_online(void) { return true; }
#endif /* #else #if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_PROVE_RCU) */
+extern struct lockdep_map rcu_lock_map;
+extern struct lockdep_map rcu_bh_lock_map;
+extern struct lockdep_map rcu_sched_lock_map;
+extern struct lockdep_map rcu_callback_map;
+
#ifdef CONFIG_DEBUG_LOCK_ALLOC
static inline void rcu_lock_acquire(struct lockdep_map *map)
@@ -241,17 +329,14 @@ static inline void rcu_lock_acquire(struct lockdep_map *map)
static inline void rcu_lock_release(struct lockdep_map *map)
{
- lock_release(map, 1, _THIS_IP_);
+ lock_release(map, _THIS_IP_);
}
-extern struct lockdep_map rcu_lock_map;
-extern struct lockdep_map rcu_bh_lock_map;
-extern struct lockdep_map rcu_sched_lock_map;
-extern struct lockdep_map rcu_callback_map;
int debug_lockdep_rcu_enabled(void);
int rcu_read_lock_held(void);
int rcu_read_lock_bh_held(void);
int rcu_read_lock_sched_held(void);
+int rcu_read_lock_any_held(void);
#else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
@@ -272,6 +357,17 @@ static inline int rcu_read_lock_sched_held(void)
{
return !preemptible();
}
+
+static inline int rcu_read_lock_any_held(void)
+{
+ return !preemptible();
+}
+
+static inline int debug_lockdep_rcu_enabled(void)
+{
+ return 0;
+}
+
#endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */
#ifdef CONFIG_PROVE_RCU
@@ -280,11 +376,18 @@ static inline int rcu_read_lock_sched_held(void)
* RCU_LOCKDEP_WARN - emit lockdep splat if specified condition is met
* @c: condition to check
* @s: informative message
+ *
+ * This checks debug_lockdep_rcu_enabled() before checking (c) to
+ * prevent early boot splats due to lockdep not yet being initialized,
+ * and rechecks it after checking (c) to prevent false-positive splats
+ * due to races with lockdep being disabled. See commit 3066820034b5dd
+ * ("rcu: Reject RCU_LOCKDEP_WARN() false positives") for more detail.
*/
#define RCU_LOCKDEP_WARN(c, s) \
do { \
- static bool __section(.data.unlikely) __warned; \
- if (debug_lockdep_rcu_enabled() && !__warned && (c)) { \
+ static bool __section(".data.unlikely") __warned; \
+ if (debug_lockdep_rcu_enabled() && (c) && \
+ debug_lockdep_rcu_enabled() && !__warned) { \
__warned = true; \
lockdep_rcu_suspicious(__FILE__, __LINE__, s); \
} \
@@ -303,7 +406,8 @@ static inline void rcu_preempt_sleep_check(void) { }
#define rcu_sleep_check() \
do { \
rcu_preempt_sleep_check(); \
- RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map), \
+ if (!IS_ENABLED(CONFIG_PREEMPT_RT)) \
+ RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map), \
"Illegal context switch in RCU-bh read-side critical section"); \
RCU_LOCKDEP_WARN(lock_is_held(&rcu_sched_lock_map), \
"Illegal context switch in RCU-sched read-side critical section"); \
@@ -311,7 +415,7 @@ static inline void rcu_preempt_sleep_check(void) { }
#else /* #ifdef CONFIG_PROVE_RCU */
-#define RCU_LOCKDEP_WARN(c, s) do { } while (0)
+#define RCU_LOCKDEP_WARN(c, s) do { } while (0 && (c))
#define rcu_sleep_check() do { } while (0)
#endif /* #else #ifdef CONFIG_PROVE_RCU */
@@ -320,44 +424,59 @@ static inline void rcu_preempt_sleep_check(void) { }
* Helper functions for rcu_dereference_check(), rcu_dereference_protected()
* and rcu_assign_pointer(). Some of these could be folded into their
* callers, but they are left separate in order to ease introduction of
- * multiple flavors of pointers to match the multiple flavors of RCU
- * (e.g., __rcu_bh, * __rcu_sched, and __srcu), should this make sense in
- * the future.
+ * multiple pointers markings to match different RCU implementations
+ * (e.g., __srcu), should this make sense in the future.
*/
#ifdef __CHECKER__
-#define rcu_dereference_sparse(p, space) \
+#define rcu_check_sparse(p, space) \
((void)(((typeof(*p) space *)p) == p))
#else /* #ifdef __CHECKER__ */
-#define rcu_dereference_sparse(p, space)
+#define rcu_check_sparse(p, space)
#endif /* #else #ifdef __CHECKER__ */
-#define __rcu_access_pointer(p, space) \
+#define __unrcu_pointer(p, local) \
+({ \
+ typeof(*p) *local = (typeof(*p) *__force)(p); \
+ rcu_check_sparse(p, __rcu); \
+ ((typeof(*p) __force __kernel *)(local)); \
+})
+/**
+ * unrcu_pointer - mark a pointer as not being RCU protected
+ * @p: pointer needing to lose its __rcu property
+ *
+ * Converts @p from an __rcu pointer to a __kernel pointer.
+ * This allows an __rcu pointer to be used with xchg() and friends.
+ */
+#define unrcu_pointer(p) __unrcu_pointer(p, __UNIQUE_ID(rcu))
+
+#define __rcu_access_pointer(p, local, space) \
({ \
- typeof(*p) *_________p1 = (typeof(*p) *__force)READ_ONCE(p); \
- rcu_dereference_sparse(p, space); \
- ((typeof(*p) __force __kernel *)(_________p1)); \
+ typeof(*p) *local = (typeof(*p) *__force)READ_ONCE(p); \
+ rcu_check_sparse(p, space); \
+ ((typeof(*p) __force __kernel *)(local)); \
})
-#define __rcu_dereference_check(p, c, space) \
+#define __rcu_dereference_check(p, local, c, space) \
({ \
/* Dependency order vs. p above. */ \
- typeof(*p) *________p1 = (typeof(*p) *__force)lockless_dereference(p); \
+ typeof(*p) *local = (typeof(*p) *__force)READ_ONCE(p); \
RCU_LOCKDEP_WARN(!(c), "suspicious rcu_dereference_check() usage"); \
- rcu_dereference_sparse(p, space); \
- ((typeof(*p) __force __kernel *)(________p1)); \
+ rcu_check_sparse(p, space); \
+ ((typeof(*p) __force __kernel *)(local)); \
})
-#define __rcu_dereference_protected(p, c, space) \
+#define __rcu_dereference_protected(p, local, c, space) \
({ \
RCU_LOCKDEP_WARN(!(c), "suspicious rcu_dereference_protected() usage"); \
- rcu_dereference_sparse(p, space); \
+ rcu_check_sparse(p, space); \
((typeof(*p) __force __kernel *)(p)); \
})
-#define rcu_dereference_raw(p) \
+#define __rcu_dereference_raw(p, local) \
({ \
/* Dependency order vs. p above. */ \
- typeof(p) ________p1 = lockless_dereference(p); \
- ((typeof(*p) __force __kernel *)(________p1)); \
+ typeof(p) local = READ_ONCE(p); \
+ ((typeof(*p) __force __kernel *)(local)); \
})
+#define rcu_dereference_raw(p) __rcu_dereference_raw(p, __UNIQUE_ID(rcu))
/**
* RCU_INITIALIZER() - statically initialize an RCU-protected global variable
@@ -397,14 +516,32 @@ static inline void rcu_preempt_sleep_check(void) { }
* other macros that it invokes.
*/
#define rcu_assign_pointer(p, v) \
-({ \
+do { \
uintptr_t _r_a_p__v = (uintptr_t)(v); \
+ rcu_check_sparse(p, __rcu); \
\
if (__builtin_constant_p(v) && (_r_a_p__v) == (uintptr_t)NULL) \
WRITE_ONCE((p), (typeof(p))(_r_a_p__v)); \
else \
smp_store_release(&p, RCU_INITIALIZER((typeof(p))_r_a_p__v)); \
- _r_a_p__v; \
+} while (0)
+
+/**
+ * rcu_replace_pointer() - replace an RCU pointer, returning its old value
+ * @rcu_ptr: RCU pointer, whose old value is returned
+ * @ptr: regular pointer
+ * @c: the lockdep conditions under which the dereference will take place
+ *
+ * Perform a replacement, where @rcu_ptr is an RCU-annotated
+ * pointer and @c is the lockdep argument that is passed to the
+ * rcu_dereference_protected() call used to read that pointer. The old
+ * value of @rcu_ptr is returned, and @rcu_ptr is set to @ptr.
+ */
+#define rcu_replace_pointer(rcu_ptr, ptr, c) \
+({ \
+ typeof(ptr) __tmp = rcu_dereference_protected((rcu_ptr), (c)); \
+ rcu_assign_pointer((rcu_ptr), (ptr)); \
+ __tmp; \
})
/**
@@ -412,21 +549,29 @@ static inline void rcu_preempt_sleep_check(void) { }
* @p: The pointer to read
*
* Return the value of the specified RCU-protected pointer, but omit the
- * smp_read_barrier_depends() and keep the READ_ONCE(). This is useful
- * when the value of this pointer is accessed, but the pointer is not
- * dereferenced, for example, when testing an RCU-protected pointer against
- * NULL. Although rcu_access_pointer() may also be used in cases where
- * update-side locks prevent the value of the pointer from changing, you
- * should instead use rcu_dereference_protected() for this use case.
+ * lockdep checks for being in an RCU read-side critical section. This is
+ * useful when the value of this pointer is accessed, but the pointer is
+ * not dereferenced, for example, when testing an RCU-protected pointer
+ * against NULL. Although rcu_access_pointer() may also be used in cases
+ * where update-side locks prevent the value of the pointer from changing,
+ * you should instead use rcu_dereference_protected() for this use case.
+ * Within an RCU read-side critical section, there is little reason to
+ * use rcu_access_pointer().
+ *
+ * It is usually best to test the rcu_access_pointer() return value
+ * directly in order to avoid accidental dereferences being introduced
+ * by later inattentive changes. In other words, assigning the
+ * rcu_access_pointer() return value to a local variable results in an
+ * accident waiting to happen.
*
* It is also permissible to use rcu_access_pointer() when read-side
- * access to the pointer was removed at least one grace period ago, as
- * is the case in the context of the RCU callback that is freeing up
- * the data, or after a synchronize_rcu() returns. This can be useful
- * when tearing down multi-linked structures after a grace period
- * has elapsed.
+ * access to the pointer was removed at least one grace period ago, as is
+ * the case in the context of the RCU callback that is freeing up the data,
+ * or after a synchronize_rcu() returns. This can be useful when tearing
+ * down multi-linked structures after a grace period has elapsed. However,
+ * rcu_dereference_protected() is normally preferred for this use case.
*/
-#define rcu_access_pointer(p) __rcu_access_pointer((p), __rcu)
+#define rcu_access_pointer(p) __rcu_access_pointer((p), __UNIQUE_ID(rcu), __rcu)
/**
* rcu_dereference_check() - rcu_dereference with debug checking
@@ -462,17 +607,24 @@ static inline void rcu_preempt_sleep_check(void) { }
* annotated as __rcu.
*/
#define rcu_dereference_check(p, c) \
- __rcu_dereference_check((p), (c) || rcu_read_lock_held(), __rcu)
+ __rcu_dereference_check((p), __UNIQUE_ID(rcu), \
+ (c) || rcu_read_lock_held(), __rcu)
/**
* rcu_dereference_bh_check() - rcu_dereference_bh with debug checking
* @p: The pointer to read, prior to dereferencing
* @c: The conditions under which the dereference will take place
*
- * This is the RCU-bh counterpart to rcu_dereference_check().
+ * This is the RCU-bh counterpart to rcu_dereference_check(). However,
+ * please note that starting in v5.0 kernels, vanilla RCU grace periods
+ * wait for local_bh_disable() regions of code in addition to regions of
+ * code demarked by rcu_read_lock() and rcu_read_unlock(). This means
+ * that synchronize_rcu(), call_rcu, and friends all take not only
+ * rcu_read_lock() but also rcu_read_lock_bh() into account.
*/
#define rcu_dereference_bh_check(p, c) \
- __rcu_dereference_check((p), (c) || rcu_read_lock_bh_held(), __rcu)
+ __rcu_dereference_check((p), __UNIQUE_ID(rcu), \
+ (c) || rcu_read_lock_bh_held(), __rcu)
/**
* rcu_dereference_sched_check() - rcu_dereference_sched with debug checking
@@ -480,9 +632,15 @@ static inline void rcu_preempt_sleep_check(void) { }
* @c: The conditions under which the dereference will take place
*
* This is the RCU-sched counterpart to rcu_dereference_check().
+ * However, please note that starting in v5.0 kernels, vanilla RCU grace
+ * periods wait for preempt_disable() regions of code in addition to
+ * regions of code demarked by rcu_read_lock() and rcu_read_unlock().
+ * This means that synchronize_rcu(), call_rcu, and friends all take not
+ * only rcu_read_lock() but also rcu_read_lock_sched() into account.
*/
#define rcu_dereference_sched_check(p, c) \
- __rcu_dereference_check((p), (c) || rcu_read_lock_sched_held(), \
+ __rcu_dereference_check((p), __UNIQUE_ID(rcu), \
+ (c) || rcu_read_lock_sched_held(), \
__rcu)
/*
@@ -492,7 +650,8 @@ static inline void rcu_preempt_sleep_check(void) { }
* The no-tracing version of rcu_dereference_raw() must not call
* rcu_read_lock_held().
*/
-#define rcu_dereference_raw_notrace(p) __rcu_dereference_check((p), 1, __rcu)
+#define rcu_dereference_raw_check(p) \
+ __rcu_dereference_check((p), __UNIQUE_ID(rcu), 1, __rcu)
/**
* rcu_dereference_protected() - fetch RCU pointer when updates prevented
@@ -500,19 +659,18 @@ static inline void rcu_preempt_sleep_check(void) { }
* @c: The conditions under which the dereference will take place
*
* Return the value of the specified RCU-protected pointer, but omit
- * both the smp_read_barrier_depends() and the READ_ONCE(). This
- * is useful in cases where update-side locks prevent the value of the
- * pointer from changing. Please note that this primitive does -not-
- * prevent the compiler from repeating this reference or combining it
- * with other references, so it should not be used without protection
- * of appropriate locks.
+ * the READ_ONCE(). This is useful in cases where update-side locks
+ * prevent the value of the pointer from changing. Please note that this
+ * primitive does *not* prevent the compiler from repeating this reference
+ * or combining it with other references, so it should not be used without
+ * protection of appropriate locks.
*
* This function is only for update-side use. Using this function
* when protected only by rcu_read_lock() will result in infrequent
* but very ugly failures.
*/
#define rcu_dereference_protected(p, c) \
- __rcu_dereference_protected((p), (c), __rcu)
+ __rcu_dereference_protected((p), __UNIQUE_ID(rcu), (c), __rcu)
/**
@@ -546,7 +704,7 @@ static inline void rcu_preempt_sleep_check(void) { }
* This is simply an identity function, but it documents where a pointer
* is handed off from RCU to some other synchronization mechanism, for
* example, reference counting or locking. In C11, it would map to
- * kill_dependency(). It could be used as follows:
+ * kill_dependency(). It could be used as follows::
*
* rcu_read_lock();
* p = rcu_dereference(gp);
@@ -572,6 +730,12 @@ static inline void rcu_preempt_sleep_check(void) { }
* sections, invocation of the corresponding RCU callback is deferred
* until after the all the other CPUs exit their critical sections.
*
+ * In v5.0 and later kernels, synchronize_rcu() and call_rcu() also
+ * wait for regions of code with preemption disabled, including regions of
+ * code with interrupts or softirqs disabled. In pre-v5.0 kernels, which
+ * define synchronize_sched(), only code enclosed within rcu_read_lock()
+ * and rcu_read_unlock() are guaranteed to be waited for.
+ *
* Note, however, that RCU callbacks are permitted to run concurrently
* with new RCU read-side critical sections. One way that this can happen
* is via the following sequence of events: (1) CPU 0 enters an RCU
@@ -590,19 +754,19 @@ static inline void rcu_preempt_sleep_check(void) { }
*
* You can avoid reading and understanding the next paragraph by
* following this rule: don't put anything in an rcu_read_lock() RCU
- * read-side critical section that would block in a !PREEMPT kernel.
+ * read-side critical section that would block in a !PREEMPTION kernel.
* But if you want the full story, read on!
*
- * In non-preemptible RCU implementations (TREE_RCU and TINY_RCU),
+ * In non-preemptible RCU implementations (pure TREE_RCU and TINY_RCU),
* it is illegal to block while in an RCU read-side critical section.
- * In preemptible RCU implementations (PREEMPT_RCU) in CONFIG_PREEMPT
+ * In preemptible RCU implementations (PREEMPT_RCU) in CONFIG_PREEMPTION
* kernel builds, RCU read-side critical sections may be preempted,
* but explicit blocking is illegal. Finally, in preemptible RCU
* implementations in real-time (with -rt patchset) kernel builds, RCU
* read-side critical sections may be preempted and they may also block, but
* only when acquiring spinlocks that are subject to priority inheritance.
*/
-static inline void rcu_read_lock(void)
+static __always_inline void rcu_read_lock(void)
{
__rcu_read_lock();
__acquire(RCU);
@@ -624,35 +788,12 @@ static inline void rcu_read_lock(void)
/**
* rcu_read_unlock() - marks the end of an RCU read-side critical section.
*
- * In most situations, rcu_read_unlock() is immune from deadlock.
- * However, in kernels built with CONFIG_RCU_BOOST, rcu_read_unlock()
- * is responsible for deboosting, which it does via rt_mutex_unlock().
- * Unfortunately, this function acquires the scheduler's runqueue and
- * priority-inheritance spinlocks. This means that deadlock could result
- * if the caller of rcu_read_unlock() already holds one of these locks or
- * any lock that is ever acquired while holding them; or any lock which
- * can be taken from interrupt context because rcu_boost()->rt_mutex_lock()
- * does not disable irqs while taking ->wait_lock.
- *
- * That said, RCU readers are never priority boosted unless they were
- * preempted. Therefore, one way to avoid deadlock is to make sure
- * that preemption never happens within any RCU read-side critical
- * section whose outermost rcu_read_unlock() is called with one of
- * rt_mutex_unlock()'s locks held. Such preemption can be avoided in
- * a number of ways, for example, by invoking preempt_disable() before
- * critical section's outermost rcu_read_lock().
- *
- * Given that the set of locks acquired by rt_mutex_unlock() might change
- * at any time, a somewhat more future-proofed approach is to make sure
- * that that preemption never happens within any RCU read-side critical
- * section whose outermost rcu_read_unlock() is called with irqs disabled.
- * This approach relies on the fact that rt_mutex_unlock() currently only
- * acquires irq-disabled locks.
- *
- * The second of these two approaches is best in most situations,
- * however, the first approach can also be useful, at least to those
- * developers willing to keep abreast of the set of locks acquired by
- * rt_mutex_unlock().
+ * In almost all situations, rcu_read_unlock() is immune from deadlock.
+ * In recent kernels that have consolidated synchronize_sched() and
+ * synchronize_rcu_bh() into synchronize_rcu(), this deadlock immunity
+ * also extends to the scheduler's runqueue and priority-inheritance
+ * spinlocks, courtesy of the quiescent-state deferral that is carried
+ * out when rcu_read_unlock() is invoked with interrupts disabled.
*
* See rcu_read_lock() for more information.
*/
@@ -668,14 +809,11 @@ static inline void rcu_read_unlock(void)
/**
* rcu_read_lock_bh() - mark the beginning of an RCU-bh critical section
*
- * This is equivalent of rcu_read_lock(), but to be used when updates
- * are being done using call_rcu_bh() or synchronize_rcu_bh(). Since
- * both call_rcu_bh() and synchronize_rcu_bh() consider completion of a
- * softirq handler to be a quiescent state, a process in RCU read-side
- * critical section must be protected by disabling softirqs. Read-side
- * critical sections in interrupt context can use just rcu_read_lock(),
- * though this should at least be commented to avoid confusing people
- * reading the code.
+ * This is equivalent to rcu_read_lock(), but also disables softirqs.
+ * Note that anything else that disables softirqs can also serve as an RCU
+ * read-side critical section. However, please note that this equivalence
+ * applies only to v5.0 and later. Before v5.0, rcu_read_lock() and
+ * rcu_read_lock_bh() were unrelated.
*
* Note that rcu_read_lock_bh() and the matching rcu_read_unlock_bh()
* must occur in the same context, for example, it is illegal to invoke
@@ -691,8 +829,8 @@ static inline void rcu_read_lock_bh(void)
"rcu_read_lock_bh() used illegally while idle");
}
-/*
- * rcu_read_unlock_bh - marks the end of a softirq-only RCU critical section
+/**
+ * rcu_read_unlock_bh() - marks the end of a softirq-only RCU critical section
*
* See rcu_read_lock_bh() for more information.
*/
@@ -708,10 +846,12 @@ static inline void rcu_read_unlock_bh(void)
/**
* rcu_read_lock_sched() - mark the beginning of a RCU-sched critical section
*
- * This is equivalent of rcu_read_lock(), but to be used when updates
- * are being done using call_rcu_sched() or synchronize_rcu_sched().
- * Read-side critical sections can also be introduced by anything that
- * disables preemption, including local_irq_disable() and friends.
+ * This is equivalent to rcu_read_lock(), but also disables preemption.
+ * Read-side critical sections can also be introduced by anything else that
+ * disables preemption, including local_irq_disable() and friends. However,
+ * please note that the equivalence to rcu_read_lock() applies only to
+ * v5.0 and later. Before v5.0, rcu_read_lock() and rcu_read_lock_sched()
+ * were unrelated.
*
* Note that rcu_read_lock_sched() and the matching rcu_read_unlock_sched()
* must occur in the same context, for example, it is illegal to invoke
@@ -734,10 +874,10 @@ static inline notrace void rcu_read_lock_sched_notrace(void)
__acquire(RCU_SCHED);
}
-/*
- * rcu_read_unlock_sched - marks the end of a RCU-classic critical section
+/**
+ * rcu_read_unlock_sched() - marks the end of a RCU-classic critical section
*
- * See rcu_read_lock_sched for more information.
+ * See rcu_read_lock_sched() for more information.
*/
static inline void rcu_read_unlock_sched(void)
{
@@ -757,18 +897,21 @@ static inline notrace void rcu_read_unlock_sched_notrace(void)
/**
* RCU_INIT_POINTER() - initialize an RCU protected pointer
+ * @p: The pointer to be initialized.
+ * @v: The value to initialized the pointer to.
*
* Initialize an RCU-protected pointer in special cases where readers
* do not need ordering constraints on the CPU or the compiler. These
* special cases are:
*
- * 1. This use of RCU_INIT_POINTER() is NULLing out the pointer -or-
+ * 1. This use of RCU_INIT_POINTER() is NULLing out the pointer *or*
* 2. The caller has taken whatever steps are required to prevent
- * RCU readers from concurrently accessing this pointer -or-
+ * RCU readers from concurrently accessing this pointer *or*
* 3. The referenced data structure has already been exposed to
- * readers either at compile time or via rcu_assign_pointer() -and-
- * a. You have not made -any- reader-visible changes to
- * this structure since then -or-
+ * readers either at compile time or via rcu_assign_pointer() *and*
+ *
+ * a. You have not made *any* reader-visible changes to
+ * this structure since then *or*
* b. It is OK for readers accessing this structure from its
* new location to see the old state of the structure. (For
* example, the changes were to statistical counters or to
@@ -784,7 +927,7 @@ static inline notrace void rcu_read_unlock_sched_notrace(void)
* by a single external-to-structure RCU-protected pointer, then you may
* use RCU_INIT_POINTER() to initialize the internal RCU-protected
* pointers, but you must use rcu_assign_pointer() to initialize the
- * external-to-structure pointer -after- you have completely initialized
+ * external-to-structure pointer *after* you have completely initialized
* the reader-accessible portions of the linked structure.
*
* Note that unlike rcu_assign_pointer(), RCU_INIT_POINTER() provides no
@@ -792,12 +935,14 @@ static inline notrace void rcu_read_unlock_sched_notrace(void)
*/
#define RCU_INIT_POINTER(p, v) \
do { \
- rcu_dereference_sparse(p, __rcu); \
+ rcu_check_sparse(p, __rcu); \
WRITE_ONCE(p, RCU_INITIALIZER(v)); \
} while (0)
/**
* RCU_POINTER_INITIALIZER() - statically initialize an RCU protected pointer
+ * @p: The pointer to be initialized.
+ * @v: The value to initialized the pointer to.
*
* GCC-style initialization for an RCU-protected pointer in a structure field.
*/
@@ -806,23 +951,15 @@ static inline notrace void rcu_read_unlock_sched_notrace(void)
/*
* Does the specified offset indicate that the corresponding rcu_head
- * structure can be handled by kfree_rcu()?
+ * structure can be handled by kvfree_rcu()?
*/
-#define __is_kfree_rcu_offset(offset) ((offset) < 4096)
-
-/*
- * Helper macro for kfree_rcu() to prevent argument-expansion eyestrain.
- */
-#define __kfree_rcu(head, offset) \
- do { \
- BUILD_BUG_ON(!__is_kfree_rcu_offset(offset)); \
- kfree_call_rcu(head, (rcu_callback_t)(unsigned long)(offset)); \
- } while (0)
+#define __is_kvfree_rcu_offset(offset) ((offset) < 4096)
/**
* kfree_rcu() - kfree an object after a grace period.
- * @ptr: pointer to kfree
- * @rcu_head: the name of the struct rcu_head within the type of @ptr.
+ * @ptr: pointer to kfree for both single- and double-argument invocations.
+ * @rhf: the name of the struct rcu_head within the type of @ptr,
+ * but only for double-argument invocations.
*
* Many rcu callbacks functions just call kfree() on the base structure.
* These functions are trivial, but their size adds up, and furthermore
@@ -835,19 +972,70 @@ static inline notrace void rcu_read_unlock_sched_notrace(void)
* Because the functions are not allowed in the low-order 4096 bytes of
* kernel virtual memory, offsets up to 4095 bytes can be accommodated.
* If the offset is larger than 4095 bytes, a compile-time error will
- * be generated in __kfree_rcu(). If this error is triggered, you can
+ * be generated in kvfree_rcu_arg_2(). If this error is triggered, you can
* either fall back to use of call_rcu() or rearrange the structure to
* position the rcu_head structure into the first 4096 bytes.
*
- * Note that the allowable offset might decrease in the future, for example,
- * to allow something like kmem_cache_free_rcu().
+ * The object to be freed can be allocated either by kmalloc() or
+ * kmem_cache_alloc().
+ *
+ * Note that the allowable offset might decrease in the future.
*
* The BUILD_BUG_ON check must not involve any function calls, hence the
* checks are done in macros here.
*/
-#define kfree_rcu(ptr, rcu_head) \
- __kfree_rcu(&((ptr)->rcu_head), offsetof(typeof(*(ptr)), rcu_head))
+#define kfree_rcu(ptr, rhf...) kvfree_rcu(ptr, ## rhf)
+/**
+ * kvfree_rcu() - kvfree an object after a grace period.
+ *
+ * This macro consists of one or two arguments and it is
+ * based on whether an object is head-less or not. If it
+ * has a head then a semantic stays the same as it used
+ * to be before:
+ *
+ * kvfree_rcu(ptr, rhf);
+ *
+ * where @ptr is a pointer to kvfree(), @rhf is the name
+ * of the rcu_head structure within the type of @ptr.
+ *
+ * When it comes to head-less variant, only one argument
+ * is passed and that is just a pointer which has to be
+ * freed after a grace period. Therefore the semantic is
+ *
+ * kvfree_rcu(ptr);
+ *
+ * where @ptr is the pointer to be freed by kvfree().
+ *
+ * Please note, head-less way of freeing is permitted to
+ * use from a context that has to follow might_sleep()
+ * annotation. Otherwise, please switch and embed the
+ * rcu_head structure within the type of @ptr.
+ */
+#define kvfree_rcu(...) KVFREE_GET_MACRO(__VA_ARGS__, \
+ kvfree_rcu_arg_2, kvfree_rcu_arg_1)(__VA_ARGS__)
+
+#define kvfree_rcu_mightsleep(ptr) kvfree_rcu_arg_1(ptr)
+#define kfree_rcu_mightsleep(ptr) kvfree_rcu_mightsleep(ptr)
+
+#define KVFREE_GET_MACRO(_1, _2, NAME, ...) NAME
+#define kvfree_rcu_arg_2(ptr, rhf) \
+do { \
+ typeof (ptr) ___p = (ptr); \
+ \
+ if (___p) { \
+ BUILD_BUG_ON(!__is_kvfree_rcu_offset(offsetof(typeof(*(ptr)), rhf))); \
+ kvfree_call_rcu(&((___p)->rhf), (void *) (___p)); \
+ } \
+} while (0)
+
+#define kvfree_rcu_arg_1(ptr) \
+do { \
+ typeof(ptr) ___p = (ptr); \
+ \
+ if (___p) \
+ kvfree_call_rcu(NULL, (void *) (___p)); \
+} while (0)
/*
* Place this after a lock-acquisition primitive to guarantee that
@@ -862,4 +1050,49 @@ static inline notrace void rcu_read_unlock_sched_notrace(void)
#endif /* #else #ifdef CONFIG_ARCH_WEAK_RELEASE_ACQUIRE */
+/* Has the specified rcu_head structure been handed to call_rcu()? */
+
+/**
+ * rcu_head_init - Initialize rcu_head for rcu_head_after_call_rcu()
+ * @rhp: The rcu_head structure to initialize.
+ *
+ * If you intend to invoke rcu_head_after_call_rcu() to test whether a
+ * given rcu_head structure has already been passed to call_rcu(), then
+ * you must also invoke this rcu_head_init() function on it just after
+ * allocating that structure. Calls to this function must not race with
+ * calls to call_rcu(), rcu_head_after_call_rcu(), or callback invocation.
+ */
+static inline void rcu_head_init(struct rcu_head *rhp)
+{
+ rhp->func = (rcu_callback_t)~0L;
+}
+
+/**
+ * rcu_head_after_call_rcu() - Has this rcu_head been passed to call_rcu()?
+ * @rhp: The rcu_head structure to test.
+ * @f: The function passed to call_rcu() along with @rhp.
+ *
+ * Returns @true if the @rhp has been passed to call_rcu() with @func,
+ * and @false otherwise. Emits a warning in any other case, including
+ * the case where @rhp has already been invoked after a grace period.
+ * Calls to this function must not race with callback invocation. One way
+ * to avoid such races is to enclose the call to rcu_head_after_call_rcu()
+ * in an RCU read-side critical section that includes a read-side fetch
+ * of the pointer to the structure containing @rhp.
+ */
+static inline bool
+rcu_head_after_call_rcu(struct rcu_head *rhp, rcu_callback_t f)
+{
+ rcu_callback_t func = READ_ONCE(rhp->func);
+
+ if (func == f)
+ return true;
+ WARN_ON_ONCE(func != (rcu_callback_t)~0L);
+ return false;
+}
+
+/* kernel/ksysfs.c definitions */
+extern int rcu_expedited;
+extern int rcu_normal;
+
#endif /* __LINUX_RCUPDATE_H */
diff --git a/include/linux/rcupdate_trace.h b/include/linux/rcupdate_trace.h
new file mode 100644
index 000000000000..9bc8cbb33340
--- /dev/null
+++ b/include/linux/rcupdate_trace.h
@@ -0,0 +1,100 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Read-Copy Update mechanism for mutual exclusion, adapted for tracing.
+ *
+ * Copyright (C) 2020 Paul E. McKenney.
+ */
+
+#ifndef __LINUX_RCUPDATE_TRACE_H
+#define __LINUX_RCUPDATE_TRACE_H
+
+#include <linux/sched.h>
+#include <linux/rcupdate.h>
+
+extern struct lockdep_map rcu_trace_lock_map;
+
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+
+static inline int rcu_read_lock_trace_held(void)
+{
+ return lock_is_held(&rcu_trace_lock_map);
+}
+
+#else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
+
+static inline int rcu_read_lock_trace_held(void)
+{
+ return 1;
+}
+
+#endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */
+
+#ifdef CONFIG_TASKS_TRACE_RCU
+
+void rcu_read_unlock_trace_special(struct task_struct *t);
+
+/**
+ * rcu_read_lock_trace - mark beginning of RCU-trace read-side critical section
+ *
+ * When synchronize_rcu_tasks_trace() is invoked by one task, then that
+ * task is guaranteed to block until all other tasks exit their read-side
+ * critical sections. Similarly, if call_rcu_trace() is invoked on one
+ * task while other tasks are within RCU read-side critical sections,
+ * invocation of the corresponding RCU callback is deferred until after
+ * the all the other tasks exit their critical sections.
+ *
+ * For more details, please see the documentation for rcu_read_lock().
+ */
+static inline void rcu_read_lock_trace(void)
+{
+ struct task_struct *t = current;
+
+ WRITE_ONCE(t->trc_reader_nesting, READ_ONCE(t->trc_reader_nesting) + 1);
+ barrier();
+ if (IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB) &&
+ t->trc_reader_special.b.need_mb)
+ smp_mb(); // Pairs with update-side barriers
+ rcu_lock_acquire(&rcu_trace_lock_map);
+}
+
+/**
+ * rcu_read_unlock_trace - mark end of RCU-trace read-side critical section
+ *
+ * Pairs with a preceding call to rcu_read_lock_trace(), and nesting is
+ * allowed. Invoking a rcu_read_unlock_trace() when there is no matching
+ * rcu_read_lock_trace() is verboten, and will result in lockdep complaints.
+ *
+ * For more details, please see the documentation for rcu_read_unlock().
+ */
+static inline void rcu_read_unlock_trace(void)
+{
+ int nesting;
+ struct task_struct *t = current;
+
+ rcu_lock_release(&rcu_trace_lock_map);
+ nesting = READ_ONCE(t->trc_reader_nesting) - 1;
+ barrier(); // Critical section before disabling.
+ // Disable IPI-based setting of .need_qs.
+ WRITE_ONCE(t->trc_reader_nesting, INT_MIN + nesting);
+ if (likely(!READ_ONCE(t->trc_reader_special.s)) || nesting) {
+ WRITE_ONCE(t->trc_reader_nesting, nesting);
+ return; // We assume shallow reader nesting.
+ }
+ WARN_ON_ONCE(nesting != 0);
+ rcu_read_unlock_trace_special(t);
+}
+
+void call_rcu_tasks_trace(struct rcu_head *rhp, rcu_callback_t func);
+void synchronize_rcu_tasks_trace(void);
+void rcu_barrier_tasks_trace(void);
+#else
+/*
+ * The BPF JIT forms these addresses even when it doesn't call these
+ * functions, so provide definitions that result in runtime errors.
+ */
+static inline void call_rcu_tasks_trace(struct rcu_head *rhp, rcu_callback_t func) { BUG(); }
+static inline void rcu_read_lock_trace(void) { BUG(); }
+static inline void rcu_read_unlock_trace(void) { BUG(); }
+#endif /* #ifdef CONFIG_TASKS_TRACE_RCU */
+
+#endif /* __LINUX_RCUPDATE_TRACE_H */
diff --git a/include/linux/rcupdate_wait.h b/include/linux/rcupdate_wait.h
index e774b4f5f220..699b938358bf 100644
--- a/include/linux/rcupdate_wait.h
+++ b/include/linux/rcupdate_wait.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_SCHED_RCUPDATE_WAIT_H
#define _LINUX_SCHED_RCUPDATE_WAIT_H
@@ -32,17 +33,19 @@ do { \
/**
* synchronize_rcu_mult - Wait concurrently for multiple grace periods
- * @...: List of call_rcu() functions for the flavors to wait on.
+ * @...: List of call_rcu() functions for different grace periods to wait on
*
- * This macro waits concurrently for multiple flavors of RCU grace periods.
- * For example, synchronize_rcu_mult(call_rcu, call_rcu_bh) would wait
- * on concurrent RCU and RCU-bh grace periods. Waiting on a give SRCU
+ * This macro waits concurrently for multiple types of RCU grace periods.
+ * For example, synchronize_rcu_mult(call_rcu, call_rcu_tasks) would wait
+ * on concurrent RCU and RCU-tasks grace periods. Waiting on a given SRCU
* domain requires you to write a wrapper function for that SRCU domain's
- * call_srcu() function, supplying the corresponding srcu_struct.
+ * call_srcu() function, with this wrapper supplying the pointer to the
+ * corresponding srcu_struct.
*
- * If Tiny RCU, tell _wait_rcu_gp() not to bother waiting for RCU
- * or RCU-bh, given that anywhere synchronize_rcu_mult() can be called
- * is automatically a grace period.
+ * The first argument tells Tiny RCU's _wait_rcu_gp() not to
+ * bother waiting for RCU. The reason for this is because anywhere
+ * synchronize_rcu_mult() can be called is automatically already a full
+ * grace period.
*/
#define synchronize_rcu_mult(...) \
_wait_rcu_gp(IS_ENABLED(CONFIG_TINY_RCU), __VA_ARGS__)
diff --git a/include/linux/rcuref.h b/include/linux/rcuref.h
new file mode 100644
index 000000000000..2c8bfd0f1b6b
--- /dev/null
+++ b/include/linux/rcuref.h
@@ -0,0 +1,155 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+#ifndef _LINUX_RCUREF_H
+#define _LINUX_RCUREF_H
+
+#include <linux/atomic.h>
+#include <linux/bug.h>
+#include <linux/limits.h>
+#include <linux/lockdep.h>
+#include <linux/preempt.h>
+#include <linux/rcupdate.h>
+
+#define RCUREF_ONEREF 0x00000000U
+#define RCUREF_MAXREF 0x7FFFFFFFU
+#define RCUREF_SATURATED 0xA0000000U
+#define RCUREF_RELEASED 0xC0000000U
+#define RCUREF_DEAD 0xE0000000U
+#define RCUREF_NOREF 0xFFFFFFFFU
+
+/**
+ * rcuref_init - Initialize a rcuref reference count with the given reference count
+ * @ref: Pointer to the reference count
+ * @cnt: The initial reference count typically '1'
+ */
+static inline void rcuref_init(rcuref_t *ref, unsigned int cnt)
+{
+ atomic_set(&ref->refcnt, cnt - 1);
+}
+
+/**
+ * rcuref_read - Read the number of held reference counts of a rcuref
+ * @ref: Pointer to the reference count
+ *
+ * Return: The number of held references (0 ... N)
+ */
+static inline unsigned int rcuref_read(rcuref_t *ref)
+{
+ unsigned int c = atomic_read(&ref->refcnt);
+
+ /* Return 0 if within the DEAD zone. */
+ return c >= RCUREF_RELEASED ? 0 : c + 1;
+}
+
+extern __must_check bool rcuref_get_slowpath(rcuref_t *ref);
+
+/**
+ * rcuref_get - Acquire one reference on a rcuref reference count
+ * @ref: Pointer to the reference count
+ *
+ * Similar to atomic_inc_not_zero() but saturates at RCUREF_MAXREF.
+ *
+ * Provides no memory ordering, it is assumed the caller has guaranteed the
+ * object memory to be stable (RCU, etc.). It does provide a control dependency
+ * and thereby orders future stores. See documentation in lib/rcuref.c
+ *
+ * Return:
+ * False if the attempt to acquire a reference failed. This happens
+ * when the last reference has been put already
+ *
+ * True if a reference was successfully acquired
+ */
+static inline __must_check bool rcuref_get(rcuref_t *ref)
+{
+ /*
+ * Unconditionally increase the reference count. The saturation and
+ * dead zones provide enough tolerance for this.
+ */
+ if (likely(!atomic_add_negative_relaxed(1, &ref->refcnt)))
+ return true;
+
+ /* Handle the cases inside the saturation and dead zones */
+ return rcuref_get_slowpath(ref);
+}
+
+extern __must_check bool rcuref_put_slowpath(rcuref_t *ref);
+
+/*
+ * Internal helper. Do not invoke directly.
+ */
+static __always_inline __must_check bool __rcuref_put(rcuref_t *ref)
+{
+ RCU_LOCKDEP_WARN(!rcu_read_lock_held() && preemptible(),
+ "suspicious rcuref_put_rcusafe() usage");
+ /*
+ * Unconditionally decrease the reference count. The saturation and
+ * dead zones provide enough tolerance for this.
+ */
+ if (likely(!atomic_add_negative_release(-1, &ref->refcnt)))
+ return false;
+
+ /*
+ * Handle the last reference drop and cases inside the saturation
+ * and dead zones.
+ */
+ return rcuref_put_slowpath(ref);
+}
+
+/**
+ * rcuref_put_rcusafe -- Release one reference for a rcuref reference count RCU safe
+ * @ref: Pointer to the reference count
+ *
+ * Provides release memory ordering, such that prior loads and stores are done
+ * before, and provides an acquire ordering on success such that free()
+ * must come after.
+ *
+ * Can be invoked from contexts, which guarantee that no grace period can
+ * happen which would free the object concurrently if the decrement drops
+ * the last reference and the slowpath races against a concurrent get() and
+ * put() pair. rcu_read_lock()'ed and atomic contexts qualify.
+ *
+ * Return:
+ * True if this was the last reference with no future references
+ * possible. This signals the caller that it can safely release the
+ * object which is protected by the reference counter.
+ *
+ * False if there are still active references or the put() raced
+ * with a concurrent get()/put() pair. Caller is not allowed to
+ * release the protected object.
+ */
+static inline __must_check bool rcuref_put_rcusafe(rcuref_t *ref)
+{
+ return __rcuref_put(ref);
+}
+
+/**
+ * rcuref_put -- Release one reference for a rcuref reference count
+ * @ref: Pointer to the reference count
+ *
+ * Can be invoked from any context.
+ *
+ * Provides release memory ordering, such that prior loads and stores are done
+ * before, and provides an acquire ordering on success such that free()
+ * must come after.
+ *
+ * Return:
+ *
+ * True if this was the last reference with no future references
+ * possible. This signals the caller that it can safely schedule the
+ * object, which is protected by the reference counter, for
+ * deconstruction.
+ *
+ * False if there are still active references or the put() raced
+ * with a concurrent get()/put() pair. Caller is not allowed to
+ * deconstruct the protected object.
+ */
+static inline __must_check bool rcuref_put(rcuref_t *ref)
+{
+ bool released;
+
+ preempt_disable();
+ released = __rcuref_put(ref);
+ preempt_enable();
+ return released;
+}
+
+#endif
diff --git a/include/linux/rcutiny.h b/include/linux/rcutiny.h
index 5becbbccb998..7f17acf29dda 100644
--- a/include/linux/rcutiny.h
+++ b/include/linux/rcutiny.h
@@ -1,23 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* Read-Copy Update mechanism for mutual exclusion, the Bloatwatch edition.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, you can access it online at
- * http://www.gnu.org/licenses/gpl-2.0.html.
- *
* Copyright IBM Corporation, 2008
*
- * Author: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
+ * Author: Paul E. McKenney <paulmck@linux.ibm.com>
*
* For detailed explanation of Read-Copy Update mechanism see -
* Documentation/RCU
@@ -25,20 +12,45 @@
#ifndef __LINUX_TINY_H
#define __LINUX_TINY_H
-#include <linux/ktime.h>
+#include <asm/param.h> /* for HZ */
+
+struct rcu_gp_oldstate {
+ unsigned long rgos_norm;
+};
+
+// Maximum number of rcu_gp_oldstate values corresponding to
+// not-yet-completed RCU grace periods.
+#define NUM_ACTIVE_RCU_POLL_FULL_OLDSTATE 2
-struct rcu_dynticks;
-static inline int rcu_dynticks_snap(struct rcu_dynticks *rdtp)
+/*
+ * Are the two oldstate values the same? See the Tree RCU version for
+ * docbook header.
+ */
+static inline bool same_state_synchronize_rcu_full(struct rcu_gp_oldstate *rgosp1,
+ struct rcu_gp_oldstate *rgosp2)
{
- return 0;
+ return rgosp1->rgos_norm == rgosp2->rgos_norm;
}
-/* Never flag non-existent other CPUs! */
-static inline bool rcu_eqs_special_set(int cpu) { return false; }
+unsigned long get_state_synchronize_rcu(void);
-static inline unsigned long get_state_synchronize_rcu(void)
+static inline void get_state_synchronize_rcu_full(struct rcu_gp_oldstate *rgosp)
{
- return 0;
+ rgosp->rgos_norm = get_state_synchronize_rcu();
+}
+
+unsigned long start_poll_synchronize_rcu(void);
+
+static inline void start_poll_synchronize_rcu_full(struct rcu_gp_oldstate *rgosp)
+{
+ rgosp->rgos_norm = start_poll_synchronize_rcu();
+}
+
+bool poll_state_synchronize_rcu(unsigned long oldstate);
+
+static inline bool poll_state_synchronize_rcu_full(struct rcu_gp_oldstate *rgosp)
+{
+ return poll_state_synchronize_rcu(rgosp->rgos_norm);
}
static inline void cond_synchronize_rcu(unsigned long oldstate)
@@ -46,59 +58,83 @@ static inline void cond_synchronize_rcu(unsigned long oldstate)
might_sleep();
}
-static inline unsigned long get_state_synchronize_sched(void)
+static inline void cond_synchronize_rcu_full(struct rcu_gp_oldstate *rgosp)
{
- return 0;
+ cond_synchronize_rcu(rgosp->rgos_norm);
}
-static inline void cond_synchronize_sched(unsigned long oldstate)
+static inline unsigned long start_poll_synchronize_rcu_expedited(void)
{
- might_sleep();
+ return start_poll_synchronize_rcu();
}
-extern void rcu_barrier_bh(void);
-extern void rcu_barrier_sched(void);
+static inline void start_poll_synchronize_rcu_expedited_full(struct rcu_gp_oldstate *rgosp)
+{
+ rgosp->rgos_norm = start_poll_synchronize_rcu_expedited();
+}
-static inline void synchronize_rcu_expedited(void)
+static inline void cond_synchronize_rcu_expedited(unsigned long oldstate)
{
- synchronize_sched(); /* Only one CPU, so pretty fast anyway!!! */
+ cond_synchronize_rcu(oldstate);
}
-static inline void rcu_barrier(void)
+static inline void cond_synchronize_rcu_expedited_full(struct rcu_gp_oldstate *rgosp)
{
- rcu_barrier_sched(); /* Only one CPU, so only one list of callbacks! */
+ cond_synchronize_rcu_expedited(rgosp->rgos_norm);
}
-static inline void synchronize_rcu_bh(void)
+extern void rcu_barrier(void);
+
+static inline void synchronize_rcu_expedited(void)
{
- synchronize_sched();
+ synchronize_rcu();
}
-static inline void synchronize_rcu_bh_expedited(void)
+/*
+ * Add one more declaration of kvfree() here. It is
+ * not so straight forward to just include <linux/mm.h>
+ * where it is defined due to getting many compile
+ * errors caused by that include.
+ */
+extern void kvfree(const void *addr);
+
+static inline void __kvfree_call_rcu(struct rcu_head *head, void *ptr)
{
- synchronize_sched();
+ if (head) {
+ call_rcu(head, (rcu_callback_t) ((void *) head - ptr));
+ return;
+ }
+
+ // kvfree_rcu(one_arg) call.
+ might_sleep();
+ synchronize_rcu();
+ kvfree(ptr);
}
-static inline void synchronize_sched_expedited(void)
+#ifdef CONFIG_KASAN_GENERIC
+void kvfree_call_rcu(struct rcu_head *head, void *ptr);
+#else
+static inline void kvfree_call_rcu(struct rcu_head *head, void *ptr)
{
- synchronize_sched();
+ __kvfree_call_rcu(head, ptr);
}
+#endif
-static inline void kfree_call_rcu(struct rcu_head *head,
- rcu_callback_t func)
+void rcu_qs(void);
+
+static inline void rcu_softirq_qs(void)
{
- call_rcu(head, func);
+ rcu_qs();
}
#define rcu_note_context_switch(preempt) \
do { \
- rcu_sched_qs(); \
- rcu_note_voluntary_context_switch_lite(current); \
+ rcu_qs(); \
+ rcu_tasks_qs(current, (preempt)); \
} while (0)
-static inline int rcu_needs_cpu(u64 basemono, u64 *nextevt)
+static inline int rcu_needs_cpu(void)
{
- *nextevt = KTIME_MAX;
return 0;
}
@@ -106,25 +142,23 @@ static inline int rcu_needs_cpu(u64 basemono, u64 *nextevt)
* Take advantage of the fact that there is only one CPU, which
* allows us to ignore virtualization-based context switches.
*/
-static inline void rcu_virt_note_context_switch(int cpu) { }
+static inline void rcu_virt_note_context_switch(void) { }
static inline void rcu_cpu_stall_reset(void) { }
-static inline void rcu_idle_enter(void) { }
-static inline void rcu_idle_exit(void) { }
-static inline void rcu_irq_enter(void) { }
-static inline bool rcu_irq_enter_disabled(void) { return false; }
-static inline void rcu_irq_exit_irqson(void) { }
-static inline void rcu_irq_enter_irqson(void) { }
-static inline void rcu_irq_exit(void) { }
+static inline int rcu_jiffies_till_stall_check(void) { return 21 * HZ; }
+static inline void rcu_irq_exit_check_preempt(void) { }
static inline void exit_rcu(void) { }
-
-#if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_SRCU)
-extern int rcu_scheduler_active __read_mostly;
+static inline bool rcu_preempt_need_deferred_qs(struct task_struct *t)
+{
+ return false;
+}
+static inline void rcu_preempt_deferred_qs(struct task_struct *t) { }
void rcu_scheduler_starting(void);
-#else /* #if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_SRCU) */
-static inline void rcu_scheduler_starting(void) { }
-#endif /* #else #if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_SRCU) */
static inline void rcu_end_inkernel_boot(void) { }
+static inline bool rcu_inkernel_boot_has_ended(void) { return true; }
static inline bool rcu_is_watching(void) { return true; }
+static inline void rcu_momentary_dyntick_idle(void) { }
+static inline void kfree_rcu_scheduler_running(void) { }
+static inline bool rcu_gp_might_be_stalled(void) { return false; }
/* Avoid RCU read-side critical sections leaking across. */
static inline void rcu_all_qs(void) { barrier(); }
@@ -135,5 +169,6 @@ static inline void rcu_all_qs(void) { barrier(); }
#define rcutree_offline_cpu NULL
#define rcutree_dead_cpu NULL
#define rcutree_dying_cpu NULL
+static inline void rcu_cpu_starting(unsigned int cpu) { }
#endif /* __LINUX_RCUTINY_H */
diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h
index 37d6fd3b7ff8..56bccb5a8fde 100644
--- a/include/linux/rcutree.h
+++ b/include/linux/rcutree.h
@@ -1,26 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* Read-Copy Update mechanism for mutual exclusion (tree-based version)
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, you can access it online at
- * http://www.gnu.org/licenses/gpl-2.0.html.
- *
* Copyright IBM Corporation, 2008
*
* Author: Dipankar Sarma <dipankar@in.ibm.com>
- * Paul E. McKenney <paulmck@linux.vnet.ibm.com> Hierarchical algorithm
+ * Paul E. McKenney <paulmck@linux.ibm.com> Hierarchical algorithm
*
- * Based on the original work by Paul McKenney <paulmck@us.ibm.com>
+ * Based on the original work by Paul McKenney <paulmck@linux.ibm.com>
* and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen.
*
* For detailed explanation of Read-Copy Update mechanism see -
@@ -30,8 +17,9 @@
#ifndef __LINUX_RCUTREE_H
#define __LINUX_RCUTREE_H
+void rcu_softirq_qs(void);
void rcu_note_context_switch(bool preempt);
-int rcu_needs_cpu(u64 basem, u64 *nextevt);
+int rcu_needs_cpu(void);
void rcu_cpu_stall_reset(void);
/*
@@ -39,61 +27,85 @@ void rcu_cpu_stall_reset(void);
* wrapper around rcu_note_context_switch(), which allows TINY_RCU
* to save a few bytes. The caller must have disabled interrupts.
*/
-static inline void rcu_virt_note_context_switch(int cpu)
+static inline void rcu_virt_note_context_switch(void)
{
rcu_note_context_switch(false);
}
-void synchronize_rcu_bh(void);
-void synchronize_sched_expedited(void);
void synchronize_rcu_expedited(void);
+void kvfree_call_rcu(struct rcu_head *head, void *ptr);
+
+void rcu_barrier(void);
+bool rcu_eqs_special_set(int cpu);
+void rcu_momentary_dyntick_idle(void);
+void kfree_rcu_scheduler_running(void);
+bool rcu_gp_might_be_stalled(void);
+
+struct rcu_gp_oldstate {
+ unsigned long rgos_norm;
+ unsigned long rgos_exp;
+};
-void kfree_call_rcu(struct rcu_head *head, rcu_callback_t func);
+// Maximum number of rcu_gp_oldstate values corresponding to
+// not-yet-completed RCU grace periods.
+#define NUM_ACTIVE_RCU_POLL_FULL_OLDSTATE 4
/**
- * synchronize_rcu_bh_expedited - Brute-force RCU-bh grace period
+ * same_state_synchronize_rcu_full - Are two old-state values identical?
+ * @rgosp1: First old-state value.
+ * @rgosp2: Second old-state value.
*
- * Wait for an RCU-bh grace period to elapse, but use a "big hammer"
- * approach to force the grace period to end quickly. This consumes
- * significant time on all CPUs and is unfriendly to real-time workloads,
- * so is thus not recommended for any sort of common-case code. In fact,
- * if you are using synchronize_rcu_bh_expedited() in a loop, please
- * restructure your code to batch your updates, and then use a single
- * synchronize_rcu_bh() instead.
+ * The two old-state values must have been obtained from either
+ * get_state_synchronize_rcu_full(), start_poll_synchronize_rcu_full(),
+ * or get_completed_synchronize_rcu_full(). Returns @true if the two
+ * values are identical and @false otherwise. This allows structures
+ * whose lifetimes are tracked by old-state values to push these values
+ * to a list header, allowing those structures to be slightly smaller.
*
- * Note that it is illegal to call this function while holding any lock
- * that is acquired by a CPU-hotplug notifier. And yes, it is also illegal
- * to call this function from a CPU-hotplug notifier. Failing to observe
- * these restriction will result in deadlock.
+ * Note that equality is judged on a bitwise basis, so that an
+ * @rcu_gp_oldstate structure with an already-completed state in one field
+ * will compare not-equal to a structure with an already-completed state
+ * in the other field. After all, the @rcu_gp_oldstate structure is opaque
+ * so how did such a situation come to pass in the first place?
*/
-static inline void synchronize_rcu_bh_expedited(void)
+static inline bool same_state_synchronize_rcu_full(struct rcu_gp_oldstate *rgosp1,
+ struct rcu_gp_oldstate *rgosp2)
{
- synchronize_sched_expedited();
+ return rgosp1->rgos_norm == rgosp2->rgos_norm && rgosp1->rgos_exp == rgosp2->rgos_exp;
}
-void rcu_barrier(void);
-void rcu_barrier_bh(void);
-void rcu_barrier_sched(void);
+unsigned long start_poll_synchronize_rcu_expedited(void);
+void start_poll_synchronize_rcu_expedited_full(struct rcu_gp_oldstate *rgosp);
+void cond_synchronize_rcu_expedited(unsigned long oldstate);
+void cond_synchronize_rcu_expedited_full(struct rcu_gp_oldstate *rgosp);
unsigned long get_state_synchronize_rcu(void);
+void get_state_synchronize_rcu_full(struct rcu_gp_oldstate *rgosp);
+unsigned long start_poll_synchronize_rcu(void);
+void start_poll_synchronize_rcu_full(struct rcu_gp_oldstate *rgosp);
+bool poll_state_synchronize_rcu(unsigned long oldstate);
+bool poll_state_synchronize_rcu_full(struct rcu_gp_oldstate *rgosp);
void cond_synchronize_rcu(unsigned long oldstate);
-unsigned long get_state_synchronize_sched(void);
-void cond_synchronize_sched(unsigned long oldstate);
+void cond_synchronize_rcu_full(struct rcu_gp_oldstate *rgosp);
+
+#ifdef CONFIG_PROVE_RCU
+void rcu_irq_exit_check_preempt(void);
+#else
+static inline void rcu_irq_exit_check_preempt(void) { }
+#endif
-void rcu_idle_enter(void);
-void rcu_idle_exit(void);
-void rcu_irq_enter(void);
-void rcu_irq_exit(void);
-void rcu_irq_enter_irqson(void);
-void rcu_irq_exit_irqson(void);
-bool rcu_irq_enter_disabled(void);
+struct task_struct;
+void rcu_preempt_deferred_qs(struct task_struct *t);
void exit_rcu(void);
void rcu_scheduler_starting(void);
-extern int rcu_scheduler_active __read_mostly;
+extern int rcu_scheduler_active;
void rcu_end_inkernel_boot(void);
+bool rcu_inkernel_boot_has_ended(void);
bool rcu_is_watching(void);
+#ifndef CONFIG_PREEMPTION
void rcu_all_qs(void);
+#endif
/* RCUtree hotplug events */
int rcutree_prepare_cpu(unsigned int cpu);
@@ -101,5 +113,6 @@ int rcutree_online_cpu(unsigned int cpu);
int rcutree_offline_cpu(unsigned int cpu);
int rcutree_dead_cpu(unsigned int cpu);
int rcutree_dying_cpu(unsigned int cpu);
+void rcu_cpu_starting(unsigned int cpu);
#endif /* __LINUX_RCUTREE_H */
diff --git a/include/linux/rcuwait.h b/include/linux/rcuwait.h
index a4ede51b3e7c..8052d34da782 100644
--- a/include/linux/rcuwait.h
+++ b/include/linux/rcuwait.h
@@ -1,23 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_RCUWAIT_H_
#define _LINUX_RCUWAIT_H_
#include <linux/rcupdate.h>
+#include <linux/sched/signal.h>
/*
* rcuwait provides a way of blocking and waking up a single
- * task in an rcu-safe manner; where it is forbidden to use
- * after exit_notify(). task_struct is not properly rcu protected,
- * unless dealing with rcu-aware lists, ie: find_task_by_*().
+ * task in an rcu-safe manner.
*
- * Alternatively we have task_rcu_dereference(), but the return
- * semantics have different implications which would break the
- * wakeup side. The only time @task is non-nil is when a user is
- * blocked (or checking if it needs to) on a condition, and reset
- * as soon as we know that the condition has succeeded and are
- * awoken.
+ * The only time @task is non-nil is when a user is blocked (or
+ * checking if it needs to) on a condition, and reset as soon as we
+ * know that the condition has succeeded and are awoken.
*/
struct rcuwait {
- struct task_struct *task;
+ struct task_struct __rcu *task;
};
#define __RCUWAIT_INITIALIZER(name) \
@@ -28,36 +25,52 @@ static inline void rcuwait_init(struct rcuwait *w)
w->task = NULL;
}
-extern void rcuwait_wake_up(struct rcuwait *w);
+/*
+ * Note: this provides no serialization and, just as with waitqueues,
+ * requires care to estimate as to whether or not the wait is active.
+ */
+static inline int rcuwait_active(struct rcuwait *w)
+{
+ return !!rcu_access_pointer(w->task);
+}
+
+extern int rcuwait_wake_up(struct rcuwait *w);
/*
* The caller is responsible for locking around rcuwait_wait_event(),
- * such that writes to @task are properly serialized.
+ * and [prepare_to/finish]_rcuwait() such that writes to @task are
+ * properly serialized.
*/
-#define rcuwait_wait_event(w, condition) \
+
+static inline void prepare_to_rcuwait(struct rcuwait *w)
+{
+ rcu_assign_pointer(w->task, current);
+}
+
+extern void finish_rcuwait(struct rcuwait *w);
+
+#define rcuwait_wait_event(w, condition, state) \
({ \
- /* \
- * Complain if we are called after do_exit()/exit_notify(), \
- * as we cannot rely on the rcu critical region for the \
- * wakeup side. \
- */ \
- WARN_ON(current->exit_state); \
- \
- rcu_assign_pointer((w)->task, current); \
+ int __ret = 0; \
+ prepare_to_rcuwait(w); \
for (;;) { \
/* \
* Implicit barrier (A) pairs with (B) in \
* rcuwait_wake_up(). \
*/ \
- set_current_state(TASK_UNINTERRUPTIBLE); \
+ set_current_state(state); \
if (condition) \
break; \
\
+ if (signal_pending_state(state, current)) { \
+ __ret = -EINTR; \
+ break; \
+ } \
+ \
schedule(); \
} \
- \
- WRITE_ONCE((w)->task, NULL); \
- __set_current_state(TASK_RUNNING); \
+ finish_rcuwait(w); \
+ __ret; \
})
#endif /* _LINUX_RCUWAIT_H_ */
diff --git a/include/linux/rcuwait_api.h b/include/linux/rcuwait_api.h
new file mode 100644
index 000000000000..f962e28544dd
--- /dev/null
+++ b/include/linux/rcuwait_api.h
@@ -0,0 +1 @@
+#include <linux/rcuwait.h>
diff --git a/include/linux/reboot-mode.h b/include/linux/reboot-mode.h
index 75f7fe5c881f..4a2abb38d1d6 100644
--- a/include/linux/reboot-mode.h
+++ b/include/linux/reboot-mode.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __REBOOT_MODE_H__
#define __REBOOT_MODE_H__
diff --git a/include/linux/reboot.h b/include/linux/reboot.h
index a7ff409f386d..2b6bb593be5b 100644
--- a/include/linux/reboot.h
+++ b/include/linux/reboot.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_REBOOT_H
#define _LINUX_REBOOT_H
@@ -5,12 +6,16 @@
#include <linux/notifier.h>
#include <uapi/linux/reboot.h>
+struct device;
+struct sys_off_handler;
+
#define SYS_DOWN 0x0001 /* Notify of system down */
#define SYS_RESTART SYS_DOWN
#define SYS_HALT 0x0002 /* Notify of system halt */
#define SYS_POWER_OFF 0x0003 /* Notify of system power off */
enum reboot_mode {
+ REBOOT_UNDEFINED = -1,
REBOOT_COLD = 0,
REBOOT_WARM,
REBOOT_HARD,
@@ -18,6 +23,7 @@ enum reboot_mode {
REBOOT_GPIO,
};
extern enum reboot_mode reboot_mode;
+extern enum reboot_mode panic_reboot_mode;
enum reboot_type {
BOOT_TRIPLE = 't',
@@ -38,6 +44,8 @@ extern int reboot_force;
extern int register_reboot_notifier(struct notifier_block *);
extern int unregister_reboot_notifier(struct notifier_block *);
+extern int devm_register_reboot_notifier(struct device *, struct notifier_block *);
+
extern int register_restart_handler(struct notifier_block *);
extern int unregister_restart_handler(struct notifier_block *);
extern void do_kernel_restart(char *cmd);
@@ -55,6 +63,103 @@ extern void machine_shutdown(void);
struct pt_regs;
extern void machine_crash_shutdown(struct pt_regs *);
+void do_kernel_power_off(void);
+
+/*
+ * sys-off handler API.
+ */
+
+/*
+ * Standard sys-off priority levels. Users are expected to set priorities
+ * relative to the standard levels.
+ *
+ * SYS_OFF_PRIO_PLATFORM: Use this for platform-level handlers.
+ *
+ * SYS_OFF_PRIO_LOW: Use this for handler of last resort.
+ *
+ * SYS_OFF_PRIO_DEFAULT: Use this for normal handlers.
+ *
+ * SYS_OFF_PRIO_HIGH: Use this for higher priority handlers.
+ *
+ * SYS_OFF_PRIO_FIRMWARE: Use this if handler uses firmware call.
+ */
+#define SYS_OFF_PRIO_PLATFORM -256
+#define SYS_OFF_PRIO_LOW -128
+#define SYS_OFF_PRIO_DEFAULT 0
+#define SYS_OFF_PRIO_HIGH 192
+#define SYS_OFF_PRIO_FIRMWARE 224
+
+enum sys_off_mode {
+ /**
+ * @SYS_OFF_MODE_POWER_OFF_PREPARE:
+ *
+ * Handlers prepare system to be powered off. Handlers are
+ * allowed to sleep.
+ */
+ SYS_OFF_MODE_POWER_OFF_PREPARE,
+
+ /**
+ * @SYS_OFF_MODE_POWER_OFF:
+ *
+ * Handlers power-off system. Handlers are disallowed to sleep.
+ */
+ SYS_OFF_MODE_POWER_OFF,
+
+ /**
+ * @SYS_OFF_MODE_RESTART_PREPARE:
+ *
+ * Handlers prepare system to be restarted. Handlers are
+ * allowed to sleep.
+ */
+ SYS_OFF_MODE_RESTART_PREPARE,
+
+ /**
+ * @SYS_OFF_MODE_RESTART:
+ *
+ * Handlers restart system. Handlers are disallowed to sleep.
+ */
+ SYS_OFF_MODE_RESTART,
+};
+
+/**
+ * struct sys_off_data - sys-off callback argument
+ *
+ * @mode: Mode ID. Currently used only by the sys-off restart mode,
+ * see enum reboot_mode for the available modes.
+ * @cb_data: User's callback data.
+ * @cmd: Command string. Currently used only by the sys-off restart mode,
+ * NULL otherwise.
+ */
+struct sys_off_data {
+ int mode;
+ void *cb_data;
+ const char *cmd;
+};
+
+struct sys_off_handler *
+register_sys_off_handler(enum sys_off_mode mode,
+ int priority,
+ int (*callback)(struct sys_off_data *data),
+ void *cb_data);
+void unregister_sys_off_handler(struct sys_off_handler *handler);
+
+int devm_register_sys_off_handler(struct device *dev,
+ enum sys_off_mode mode,
+ int priority,
+ int (*callback)(struct sys_off_data *data),
+ void *cb_data);
+
+int devm_register_power_off_handler(struct device *dev,
+ int (*callback)(struct sys_off_data *data),
+ void *cb_data);
+
+int devm_register_restart_handler(struct device *dev,
+ int (*callback)(struct sys_off_data *data),
+ void *cb_data);
+
+int register_platform_power_off(void (*power_off)(void));
+void unregister_platform_power_off(void (*power_off)(void));
+
/*
* Architecture independent implemenations of sys_reboot commands.
*/
@@ -63,15 +168,13 @@ extern void kernel_restart_prepare(char *cmd);
extern void kernel_restart(char *cmd);
extern void kernel_halt(void);
extern void kernel_power_off(void);
+extern bool kernel_can_power_off(void);
-extern int C_A_D; /* for sysctl */
void ctrl_alt_del(void);
-#define POWEROFF_CMD_PATH_LEN 256
-extern char poweroff_cmd[POWEROFF_CMD_PATH_LEN];
-
extern void orderly_poweroff(bool force);
extern void orderly_reboot(void);
+void hw_protection_shutdown(const char *reason, int ms_until_forced);
/*
* Emergency restart, callable from an interrupt handler.
diff --git a/include/linux/reciprocal_div.h b/include/linux/reciprocal_div.h
index 8c5a3fb6c6c5..585ce89c0f33 100644
--- a/include/linux/reciprocal_div.h
+++ b/include/linux/reciprocal_div.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_RECIPROCAL_DIV_H
#define _LINUX_RECIPROCAL_DIV_H
@@ -24,6 +25,9 @@ struct reciprocal_value {
u8 sh1, sh2;
};
+/* "reciprocal_value" and "reciprocal_divide" together implement the basic
+ * version of the algorithm described in Figure 4.1 of the paper.
+ */
struct reciprocal_value reciprocal_value(u32 d);
static inline u32 reciprocal_divide(u32 a, struct reciprocal_value R)
@@ -32,4 +36,69 @@ static inline u32 reciprocal_divide(u32 a, struct reciprocal_value R)
return (t + ((a - t) >> R.sh1)) >> R.sh2;
}
+struct reciprocal_value_adv {
+ u32 m;
+ u8 sh, exp;
+ bool is_wide_m;
+};
+
+/* "reciprocal_value_adv" implements the advanced version of the algorithm
+ * described in Figure 4.2 of the paper except when "divisor > (1U << 31)" whose
+ * ceil(log2(d)) result will be 32 which then requires u128 divide on host. The
+ * exception case could be easily handled before calling "reciprocal_value_adv".
+ *
+ * The advanced version requires more complex calculation to get the reciprocal
+ * multiplier and other control variables, but then could reduce the required
+ * emulation operations.
+ *
+ * It makes no sense to use this advanced version for host divide emulation,
+ * those extra complexities for calculating multiplier etc could completely
+ * waive our saving on emulation operations.
+ *
+ * However, it makes sense to use it for JIT divide code generation for which
+ * we are willing to trade performance of JITed code with that of host. As shown
+ * by the following pseudo code, the required emulation operations could go down
+ * from 6 (the basic version) to 3 or 4.
+ *
+ * To use the result of "reciprocal_value_adv", suppose we want to calculate
+ * n/d, the pseudo C code will be:
+ *
+ * struct reciprocal_value_adv rvalue;
+ * u8 pre_shift, exp;
+ *
+ * // handle exception case.
+ * if (d >= (1U << 31)) {
+ * result = n >= d;
+ * return;
+ * }
+ *
+ * rvalue = reciprocal_value_adv(d, 32)
+ * exp = rvalue.exp;
+ * if (rvalue.is_wide_m && !(d & 1)) {
+ * // floor(log2(d & (2^32 -d)))
+ * pre_shift = fls(d & -d) - 1;
+ * rvalue = reciprocal_value_adv(d >> pre_shift, 32 - pre_shift);
+ * } else {
+ * pre_shift = 0;
+ * }
+ *
+ * // code generation starts.
+ * if (imm == 1U << exp) {
+ * result = n >> exp;
+ * } else if (rvalue.is_wide_m) {
+ * // pre_shift must be zero when reached here.
+ * t = (n * rvalue.m) >> 32;
+ * result = n - t;
+ * result >>= 1;
+ * result += t;
+ * result >>= rvalue.sh - 1;
+ * } else {
+ * if (pre_shift)
+ * result = n >> pre_shift;
+ * result = ((u64)result * rvalue.m) >> 32;
+ * result >>= rvalue.sh;
+ * }
+ */
+struct reciprocal_value_adv reciprocal_value_adv(u32 d, u8 prec);
+
#endif /* _LINUX_RECIPROCAL_DIV_H */
diff --git a/include/linux/ref_tracker.h b/include/linux/ref_tracker.h
new file mode 100644
index 000000000000..9ca353ab712b
--- /dev/null
+++ b/include/linux/ref_tracker.h
@@ -0,0 +1,79 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+#ifndef _LINUX_REF_TRACKER_H
+#define _LINUX_REF_TRACKER_H
+#include <linux/refcount.h>
+#include <linux/types.h>
+#include <linux/spinlock.h>
+#include <linux/stackdepot.h>
+
+struct ref_tracker;
+
+struct ref_tracker_dir {
+#ifdef CONFIG_REF_TRACKER
+ spinlock_t lock;
+ unsigned int quarantine_avail;
+ refcount_t untracked;
+ refcount_t no_tracker;
+ bool dead;
+ struct list_head list; /* List of active trackers */
+ struct list_head quarantine; /* List of dead trackers */
+#endif
+};
+
+#ifdef CONFIG_REF_TRACKER
+static inline void ref_tracker_dir_init(struct ref_tracker_dir *dir,
+ unsigned int quarantine_count)
+{
+ INIT_LIST_HEAD(&dir->list);
+ INIT_LIST_HEAD(&dir->quarantine);
+ spin_lock_init(&dir->lock);
+ dir->quarantine_avail = quarantine_count;
+ dir->dead = false;
+ refcount_set(&dir->untracked, 1);
+ refcount_set(&dir->no_tracker, 1);
+ stack_depot_init();
+}
+
+void ref_tracker_dir_exit(struct ref_tracker_dir *dir);
+
+void ref_tracker_dir_print(struct ref_tracker_dir *dir,
+ unsigned int display_limit);
+
+int ref_tracker_alloc(struct ref_tracker_dir *dir,
+ struct ref_tracker **trackerp, gfp_t gfp);
+
+int ref_tracker_free(struct ref_tracker_dir *dir,
+ struct ref_tracker **trackerp);
+
+#else /* CONFIG_REF_TRACKER */
+
+static inline void ref_tracker_dir_init(struct ref_tracker_dir *dir,
+ unsigned int quarantine_count)
+{
+}
+
+static inline void ref_tracker_dir_exit(struct ref_tracker_dir *dir)
+{
+}
+
+static inline void ref_tracker_dir_print(struct ref_tracker_dir *dir,
+ unsigned int display_limit)
+{
+}
+
+static inline int ref_tracker_alloc(struct ref_tracker_dir *dir,
+ struct ref_tracker **trackerp,
+ gfp_t gfp)
+{
+ return 0;
+}
+
+static inline int ref_tracker_free(struct ref_tracker_dir *dir,
+ struct ref_tracker **trackerp)
+{
+ return 0;
+}
+
+#endif
+
+#endif /* _LINUX_REF_TRACKER_H */
diff --git a/include/linux/refcount.h b/include/linux/refcount.h
index 591792c8e5b0..a62fcca97486 100644
--- a/include/linux/refcount.h
+++ b/include/linux/refcount.h
@@ -1,16 +1,110 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Variant of atomic_t specialized for reference counts.
+ *
+ * The interface matches the atomic_t interface (to aid in porting) but only
+ * provides the few functions one should use for reference counting.
+ *
+ * Saturation semantics
+ * ====================
+ *
+ * refcount_t differs from atomic_t in that the counter saturates at
+ * REFCOUNT_SATURATED and will not move once there. This avoids wrapping the
+ * counter and causing 'spurious' use-after-free issues. In order to avoid the
+ * cost associated with introducing cmpxchg() loops into all of the saturating
+ * operations, we temporarily allow the counter to take on an unchecked value
+ * and then explicitly set it to REFCOUNT_SATURATED on detecting that underflow
+ * or overflow has occurred. Although this is racy when multiple threads
+ * access the refcount concurrently, by placing REFCOUNT_SATURATED roughly
+ * equidistant from 0 and INT_MAX we minimise the scope for error:
+ *
+ * INT_MAX REFCOUNT_SATURATED UINT_MAX
+ * 0 (0x7fff_ffff) (0xc000_0000) (0xffff_ffff)
+ * +--------------------------------+----------------+----------------+
+ * <---------- bad value! ---------->
+ *
+ * (in a signed view of the world, the "bad value" range corresponds to
+ * a negative counter value).
+ *
+ * As an example, consider a refcount_inc() operation that causes the counter
+ * to overflow:
+ *
+ * int old = atomic_fetch_add_relaxed(r);
+ * // old is INT_MAX, refcount now INT_MIN (0x8000_0000)
+ * if (old < 0)
+ * atomic_set(r, REFCOUNT_SATURATED);
+ *
+ * If another thread also performs a refcount_inc() operation between the two
+ * atomic operations, then the count will continue to edge closer to 0. If it
+ * reaches a value of 1 before /any/ of the threads reset it to the saturated
+ * value, then a concurrent refcount_dec_and_test() may erroneously free the
+ * underlying object.
+ * Linux limits the maximum number of tasks to PID_MAX_LIMIT, which is currently
+ * 0x400000 (and can't easily be raised in the future beyond FUTEX_TID_MASK).
+ * With the current PID limit, if no batched refcounting operations are used and
+ * the attacker can't repeatedly trigger kernel oopses in the middle of refcount
+ * operations, this makes it impossible for a saturated refcount to leave the
+ * saturation range, even if it is possible for multiple uses of the same
+ * refcount to nest in the context of a single task:
+ *
+ * (UINT_MAX+1-REFCOUNT_SATURATED) / PID_MAX_LIMIT =
+ * 0x40000000 / 0x400000 = 0x100 = 256
+ *
+ * If hundreds of references are added/removed with a single refcounting
+ * operation, it may potentially be possible to leave the saturation range; but
+ * given the precise timing details involved with the round-robin scheduling of
+ * each thread manipulating the refcount and the need to hit the race multiple
+ * times in succession, there doesn't appear to be a practical avenue of attack
+ * even if using refcount_add() operations with larger increments.
+ *
+ * Memory ordering
+ * ===============
+ *
+ * Memory ordering rules are slightly relaxed wrt regular atomic_t functions
+ * and provide only what is strictly required for refcounts.
+ *
+ * The increments are fully relaxed; these will not provide ordering. The
+ * rationale is that whatever is used to obtain the object we're increasing the
+ * reference count on will provide the ordering. For locked data structures,
+ * its the lock acquire, for RCU/lockless data structures its the dependent
+ * load.
+ *
+ * Do note that inc_not_zero() provides a control dependency which will order
+ * future stores against the inc, this ensures we'll never modify the object
+ * if we did not in fact acquire a reference.
+ *
+ * The decrements will provide release order, such that all the prior loads and
+ * stores will be issued before, it also provides a control dependency, which
+ * will order us against the subsequent free().
+ *
+ * The control dependency is against the load of the cmpxchg (ll/sc) that
+ * succeeded. This means the stores aren't fully ordered, but this is fine
+ * because the 1->0 transition indicates no concurrency.
+ *
+ * Note that the allocator is responsible for ordering things between free()
+ * and alloc().
+ *
+ * The decrements dec_and_test() and sub_and_test() also provide acquire
+ * ordering on success.
+ *
+ */
+
#ifndef _LINUX_REFCOUNT_H
#define _LINUX_REFCOUNT_H
#include <linux/atomic.h>
-#include <linux/mutex.h>
-#include <linux/spinlock.h>
-#include <linux/kernel.h>
+#include <linux/bug.h>
+#include <linux/compiler.h>
+#include <linux/limits.h>
+#include <linux/spinlock_types.h>
+
+struct mutex;
/**
- * refcount_t - variant of atomic_t specialized for reference counts
+ * typedef refcount_t - variant of atomic_t specialized for reference counts
* @refs: atomic_t counter field
*
- * The counter saturates at UINT_MAX and will not move once
+ * The counter saturates at REFCOUNT_SATURATED and will not move once
* there. This avoids wrapping the counter and causing 'spurious'
* use-after-free bugs.
*/
@@ -19,13 +113,25 @@ typedef struct refcount_struct {
} refcount_t;
#define REFCOUNT_INIT(n) { .refs = ATOMIC_INIT(n), }
+#define REFCOUNT_MAX INT_MAX
+#define REFCOUNT_SATURATED (INT_MIN / 2)
+
+enum refcount_saturation_type {
+ REFCOUNT_ADD_NOT_ZERO_OVF,
+ REFCOUNT_ADD_OVF,
+ REFCOUNT_ADD_UAF,
+ REFCOUNT_SUB_UAF,
+ REFCOUNT_DEC_LEAK,
+};
+
+void refcount_warn_saturate(refcount_t *r, enum refcount_saturation_type t);
/**
* refcount_set - set a refcount's value
* @r: the refcount
* @n: value to which the refcount will be set
*/
-static inline void refcount_set(refcount_t *r, unsigned int n)
+static inline void refcount_set(refcount_t *r, int n)
{
atomic_set(&r->refs, n);
}
@@ -41,57 +147,223 @@ static inline unsigned int refcount_read(const refcount_t *r)
return atomic_read(&r->refs);
}
-#ifdef CONFIG_REFCOUNT_FULL
-extern __must_check bool refcount_add_not_zero(unsigned int i, refcount_t *r);
-extern void refcount_add(unsigned int i, refcount_t *r);
+static inline __must_check bool __refcount_add_not_zero(int i, refcount_t *r, int *oldp)
+{
+ int old = refcount_read(r);
+
+ do {
+ if (!old)
+ break;
+ } while (!atomic_try_cmpxchg_relaxed(&r->refs, &old, old + i));
+
+ if (oldp)
+ *oldp = old;
+
+ if (unlikely(old < 0 || old + i < 0))
+ refcount_warn_saturate(r, REFCOUNT_ADD_NOT_ZERO_OVF);
+
+ return old;
+}
+
+/**
+ * refcount_add_not_zero - add a value to a refcount unless it is 0
+ * @i: the value to add to the refcount
+ * @r: the refcount
+ *
+ * Will saturate at REFCOUNT_SATURATED and WARN.
+ *
+ * Provides no memory ordering, it is assumed the caller has guaranteed the
+ * object memory to be stable (RCU, etc.). It does provide a control dependency
+ * and thereby orders future stores. See the comment on top.
+ *
+ * Use of this function is not recommended for the normal reference counting
+ * use case in which references are taken and released one at a time. In these
+ * cases, refcount_inc(), or one of its variants, should instead be used to
+ * increment a reference count.
+ *
+ * Return: false if the passed refcount is 0, true otherwise
+ */
+static inline __must_check bool refcount_add_not_zero(int i, refcount_t *r)
+{
+ return __refcount_add_not_zero(i, r, NULL);
+}
+
+static inline void __refcount_add(int i, refcount_t *r, int *oldp)
+{
+ int old = atomic_fetch_add_relaxed(i, &r->refs);
-extern __must_check bool refcount_inc_not_zero(refcount_t *r);
-extern void refcount_inc(refcount_t *r);
+ if (oldp)
+ *oldp = old;
-extern __must_check bool refcount_sub_and_test(unsigned int i, refcount_t *r);
+ if (unlikely(!old))
+ refcount_warn_saturate(r, REFCOUNT_ADD_UAF);
+ else if (unlikely(old < 0 || old + i < 0))
+ refcount_warn_saturate(r, REFCOUNT_ADD_OVF);
+}
-extern __must_check bool refcount_dec_and_test(refcount_t *r);
-extern void refcount_dec(refcount_t *r);
-#else
-static inline __must_check bool refcount_add_not_zero(unsigned int i, refcount_t *r)
+/**
+ * refcount_add - add a value to a refcount
+ * @i: the value to add to the refcount
+ * @r: the refcount
+ *
+ * Similar to atomic_add(), but will saturate at REFCOUNT_SATURATED and WARN.
+ *
+ * Provides no memory ordering, it is assumed the caller has guaranteed the
+ * object memory to be stable (RCU, etc.). It does provide a control dependency
+ * and thereby orders future stores. See the comment on top.
+ *
+ * Use of this function is not recommended for the normal reference counting
+ * use case in which references are taken and released one at a time. In these
+ * cases, refcount_inc(), or one of its variants, should instead be used to
+ * increment a reference count.
+ */
+static inline void refcount_add(int i, refcount_t *r)
{
- return atomic_add_unless(&r->refs, i, 0);
+ __refcount_add(i, r, NULL);
}
-static inline void refcount_add(unsigned int i, refcount_t *r)
+static inline __must_check bool __refcount_inc_not_zero(refcount_t *r, int *oldp)
{
- atomic_add(i, &r->refs);
+ return __refcount_add_not_zero(1, r, oldp);
}
+/**
+ * refcount_inc_not_zero - increment a refcount unless it is 0
+ * @r: the refcount to increment
+ *
+ * Similar to atomic_inc_not_zero(), but will saturate at REFCOUNT_SATURATED
+ * and WARN.
+ *
+ * Provides no memory ordering, it is assumed the caller has guaranteed the
+ * object memory to be stable (RCU, etc.). It does provide a control dependency
+ * and thereby orders future stores. See the comment on top.
+ *
+ * Return: true if the increment was successful, false otherwise
+ */
static inline __must_check bool refcount_inc_not_zero(refcount_t *r)
{
- return atomic_add_unless(&r->refs, 1, 0);
+ return __refcount_inc_not_zero(r, NULL);
}
+static inline void __refcount_inc(refcount_t *r, int *oldp)
+{
+ __refcount_add(1, r, oldp);
+}
+
+/**
+ * refcount_inc - increment a refcount
+ * @r: the refcount to increment
+ *
+ * Similar to atomic_inc(), but will saturate at REFCOUNT_SATURATED and WARN.
+ *
+ * Provides no memory ordering, it is assumed the caller already has a
+ * reference on the object.
+ *
+ * Will WARN if the refcount is 0, as this represents a possible use-after-free
+ * condition.
+ */
static inline void refcount_inc(refcount_t *r)
{
- atomic_inc(&r->refs);
+ __refcount_inc(r, NULL);
+}
+
+static inline __must_check bool __refcount_sub_and_test(int i, refcount_t *r, int *oldp)
+{
+ int old = atomic_fetch_sub_release(i, &r->refs);
+
+ if (oldp)
+ *oldp = old;
+
+ if (old == i) {
+ smp_acquire__after_ctrl_dep();
+ return true;
+ }
+
+ if (unlikely(old < 0 || old - i < 0))
+ refcount_warn_saturate(r, REFCOUNT_SUB_UAF);
+
+ return false;
+}
+
+/**
+ * refcount_sub_and_test - subtract from a refcount and test if it is 0
+ * @i: amount to subtract from the refcount
+ * @r: the refcount
+ *
+ * Similar to atomic_dec_and_test(), but it will WARN, return false and
+ * ultimately leak on underflow and will fail to decrement when saturated
+ * at REFCOUNT_SATURATED.
+ *
+ * Provides release memory ordering, such that prior loads and stores are done
+ * before, and provides an acquire ordering on success such that free()
+ * must come after.
+ *
+ * Use of this function is not recommended for the normal reference counting
+ * use case in which references are taken and released one at a time. In these
+ * cases, refcount_dec(), or one of its variants, should instead be used to
+ * decrement a reference count.
+ *
+ * Return: true if the resulting refcount is 0, false otherwise
+ */
+static inline __must_check bool refcount_sub_and_test(int i, refcount_t *r)
+{
+ return __refcount_sub_and_test(i, r, NULL);
}
-static inline __must_check bool refcount_sub_and_test(unsigned int i, refcount_t *r)
+static inline __must_check bool __refcount_dec_and_test(refcount_t *r, int *oldp)
{
- return atomic_sub_and_test(i, &r->refs);
+ return __refcount_sub_and_test(1, r, oldp);
}
+/**
+ * refcount_dec_and_test - decrement a refcount and test if it is 0
+ * @r: the refcount
+ *
+ * Similar to atomic_dec_and_test(), it will WARN on underflow and fail to
+ * decrement when saturated at REFCOUNT_SATURATED.
+ *
+ * Provides release memory ordering, such that prior loads and stores are done
+ * before, and provides an acquire ordering on success such that free()
+ * must come after.
+ *
+ * Return: true if the resulting refcount is 0, false otherwise
+ */
static inline __must_check bool refcount_dec_and_test(refcount_t *r)
{
- return atomic_dec_and_test(&r->refs);
+ return __refcount_dec_and_test(r, NULL);
+}
+
+static inline void __refcount_dec(refcount_t *r, int *oldp)
+{
+ int old = atomic_fetch_sub_release(1, &r->refs);
+
+ if (oldp)
+ *oldp = old;
+
+ if (unlikely(old <= 1))
+ refcount_warn_saturate(r, REFCOUNT_DEC_LEAK);
}
+/**
+ * refcount_dec - decrement a refcount
+ * @r: the refcount
+ *
+ * Similar to atomic_dec(), it will WARN on underflow and fail to decrement
+ * when saturated at REFCOUNT_SATURATED.
+ *
+ * Provides release memory ordering, such that prior loads and stores are done
+ * before.
+ */
static inline void refcount_dec(refcount_t *r)
{
- atomic_dec(&r->refs);
+ __refcount_dec(r, NULL);
}
-#endif /* CONFIG_REFCOUNT_FULL */
extern __must_check bool refcount_dec_if_one(refcount_t *r);
extern __must_check bool refcount_dec_not_one(refcount_t *r);
-extern __must_check bool refcount_dec_and_mutex_lock(refcount_t *r, struct mutex *lock);
-extern __must_check bool refcount_dec_and_lock(refcount_t *r, spinlock_t *lock);
-
+extern __must_check bool refcount_dec_and_mutex_lock(refcount_t *r, struct mutex *lock) __cond_acquires(lock);
+extern __must_check bool refcount_dec_and_lock(refcount_t *r, spinlock_t *lock) __cond_acquires(lock);
+extern __must_check bool refcount_dec_and_lock_irqsave(refcount_t *r,
+ spinlock_t *lock,
+ unsigned long *flags) __cond_acquires(lock);
#endif /* _LINUX_REFCOUNT_H */
diff --git a/include/linux/refcount_api.h b/include/linux/refcount_api.h
new file mode 100644
index 000000000000..5f032589f568
--- /dev/null
+++ b/include/linux/refcount_api.h
@@ -0,0 +1 @@
+#include <linux/refcount.h>
diff --git a/include/linux/regmap.h b/include/linux/regmap.h
index 978abfbac617..c2b9cc5db824 100644
--- a/include/linux/regmap.h
+++ b/include/linux/regmap.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
#ifndef __LINUX_REGMAP_H
#define __LINUX_REGMAP_H
@@ -7,36 +8,58 @@
* Copyright 2011 Wolfson Microelectronics plc
*
* Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/list.h>
#include <linux/rbtree.h>
+#include <linux/ktime.h>
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/bug.h>
#include <linux/lockdep.h>
+#include <linux/iopoll.h>
+#include <linux/fwnode.h>
struct module;
+struct clk;
struct device;
+struct device_node;
+struct fsi_device;
struct i2c_client;
+struct i3c_device;
struct irq_domain;
+struct mdio_device;
+struct slim_device;
struct spi_device;
struct spmi_device;
struct regmap;
struct regmap_range_cfg;
struct regmap_field;
struct snd_ac97;
+struct sdw_slave;
+
+/*
+ * regmap_mdio address encoding. IEEE 802.3ae clause 45 addresses consist of a
+ * device address and a register address.
+ */
+#define REGMAP_MDIO_C45_DEVAD_SHIFT 16
+#define REGMAP_MDIO_C45_DEVAD_MASK GENMASK(20, 16)
+#define REGMAP_MDIO_C45_REGNUM_MASK GENMASK(15, 0)
+
+/*
+ * regmap.reg_shift indicates by how much we must shift registers prior to
+ * performing any operation. It's a signed value, positive numbers means
+ * downshifting the register's address, while negative numbers means upshifting.
+ */
+#define REGMAP_UPSHIFT(s) (-(s))
+#define REGMAP_DOWNSHIFT(s) (s)
/* An enum of all the supported cache types */
enum regcache_type {
REGCACHE_NONE,
REGCACHE_RBTREE,
- REGCACHE_COMPRESSED,
REGCACHE_FLAT,
+ REGCACHE_MAPLE,
};
/**
@@ -69,35 +92,12 @@ struct reg_sequence {
unsigned int delay_us;
};
-#define regmap_update_bits(map, reg, mask, val) \
- regmap_update_bits_base(map, reg, mask, val, NULL, false, false)
-#define regmap_update_bits_async(map, reg, mask, val)\
- regmap_update_bits_base(map, reg, mask, val, NULL, true, false)
-#define regmap_update_bits_check(map, reg, mask, val, change)\
- regmap_update_bits_base(map, reg, mask, val, change, false, false)
-#define regmap_update_bits_check_async(map, reg, mask, val, change)\
- regmap_update_bits_base(map, reg, mask, val, change, true, false)
-
-#define regmap_write_bits(map, reg, mask, val) \
- regmap_update_bits_base(map, reg, mask, val, NULL, false, true)
-
-#define regmap_field_write(field, val) \
- regmap_field_update_bits_base(field, ~0, val, NULL, false, false)
-#define regmap_field_force_write(field, val) \
- regmap_field_update_bits_base(field, ~0, val, NULL, false, true)
-#define regmap_field_update_bits(field, mask, val)\
- regmap_field_update_bits_base(field, mask, val, NULL, false, false)
-#define regmap_field_force_update_bits(field, mask, val) \
- regmap_field_update_bits_base(field, mask, val, NULL, false, true)
-
-#define regmap_fields_write(field, id, val) \
- regmap_fields_update_bits_base(field, id, ~0, val, NULL, false, false)
-#define regmap_fields_force_write(field, id, val) \
- regmap_fields_update_bits_base(field, id, ~0, val, NULL, false, true)
-#define regmap_fields_update_bits(field, id, mask, val)\
- regmap_fields_update_bits_base(field, id, mask, val, NULL, false, false)
-#define regmap_fields_force_update_bits(field, id, mask, val) \
- regmap_fields_update_bits_base(field, id, mask, val, NULL, false, true)
+#define REG_SEQ(_reg, _def, _delay_us) { \
+ .reg = _reg, \
+ .def = _def, \
+ .delay_us = _delay_us, \
+ }
+#define REG_SEQ0(_reg, _def) REG_SEQ(_reg, _def, 0)
/**
* regmap_read_poll_timeout - Poll until a condition is met or a timeout occurs
@@ -108,7 +108,7 @@ struct reg_sequence {
* @cond: Break condition (usually involving @val)
* @sleep_us: Maximum time to sleep between reads in us (0
* tight-loops). Should be less than ~20ms since usleep_range
- * is used (see Documentation/timers/timers-howto.txt).
+ * is used (see Documentation/timers/timers-howto.rst).
* @timeout_us: Timeout in us, 0 means never timeout
*
* Returns 0 on success and -ETIMEDOUT upon a timeout or the regmap_read
@@ -120,23 +120,81 @@ struct reg_sequence {
*/
#define regmap_read_poll_timeout(map, addr, val, cond, sleep_us, timeout_us) \
({ \
- ktime_t timeout = ktime_add_us(ktime_get(), timeout_us); \
- int pollret; \
- might_sleep_if(sleep_us); \
+ int __ret, __tmp; \
+ __tmp = read_poll_timeout(regmap_read, __ret, __ret || (cond), \
+ sleep_us, timeout_us, false, (map), (addr), &(val)); \
+ __ret ?: __tmp; \
+})
+
+/**
+ * regmap_read_poll_timeout_atomic - Poll until a condition is met or a timeout occurs
+ *
+ * @map: Regmap to read from
+ * @addr: Address to poll
+ * @val: Unsigned integer variable to read the value into
+ * @cond: Break condition (usually involving @val)
+ * @delay_us: Time to udelay between reads in us (0 tight-loops).
+ * Should be less than ~10us since udelay is used
+ * (see Documentation/timers/timers-howto.rst).
+ * @timeout_us: Timeout in us, 0 means never timeout
+ *
+ * Returns 0 on success and -ETIMEDOUT upon a timeout or the regmap_read
+ * error return value in case of a error read. In the two former cases,
+ * the last read value at @addr is stored in @val.
+ *
+ * This is modelled after the readx_poll_timeout_atomic macros in linux/iopoll.h.
+ *
+ * Note: In general regmap cannot be used in atomic context. If you want to use
+ * this macro then first setup your regmap for atomic use (flat or no cache
+ * and MMIO regmap).
+ */
+#define regmap_read_poll_timeout_atomic(map, addr, val, cond, delay_us, timeout_us) \
+({ \
+ u64 __timeout_us = (timeout_us); \
+ unsigned long __delay_us = (delay_us); \
+ ktime_t __timeout = ktime_add_us(ktime_get(), __timeout_us); \
+ int __ret; \
for (;;) { \
- pollret = regmap_read((map), (addr), &(val)); \
- if (pollret) \
+ __ret = regmap_read((map), (addr), &(val)); \
+ if (__ret) \
break; \
if (cond) \
break; \
- if (timeout_us && ktime_compare(ktime_get(), timeout) > 0) { \
- pollret = regmap_read((map), (addr), &(val)); \
+ if ((__timeout_us) && \
+ ktime_compare(ktime_get(), __timeout) > 0) { \
+ __ret = regmap_read((map), (addr), &(val)); \
break; \
} \
- if (sleep_us) \
- usleep_range((sleep_us >> 2) + 1, sleep_us); \
+ if (__delay_us) \
+ udelay(__delay_us); \
} \
- pollret ?: ((cond) ? 0 : -ETIMEDOUT); \
+ __ret ?: ((cond) ? 0 : -ETIMEDOUT); \
+})
+
+/**
+ * regmap_field_read_poll_timeout - Poll until a condition is met or timeout
+ *
+ * @field: Regmap field to read from
+ * @val: Unsigned integer variable to read the value into
+ * @cond: Break condition (usually involving @val)
+ * @sleep_us: Maximum time to sleep between reads in us (0
+ * tight-loops). Should be less than ~20ms since usleep_range
+ * is used (see Documentation/timers/timers-howto.rst).
+ * @timeout_us: Timeout in us, 0 means never timeout
+ *
+ * Returns 0 on success and -ETIMEDOUT upon a timeout or the regmap_field_read
+ * error return value in case of a error read. In the two former cases,
+ * the last read value at @addr is stored in @val. Must not be called
+ * from atomic context if sleep_us or timeout_us are used.
+ *
+ * This is modelled after the readx_poll_timeout macros in linux/iopoll.h.
+ */
+#define regmap_field_read_poll_timeout(field, val, cond, sleep_us, timeout_us) \
+({ \
+ int __ret, __tmp; \
+ __tmp = read_poll_timeout(regmap_field_read, __ret, __ret || (cond), \
+ sleep_us, timeout_us, false, (field), &(val)); \
+ __ret ?: __tmp; \
})
#ifdef CONFIG_REGMAP
@@ -196,6 +254,11 @@ typedef void (*regmap_unlock)(void *);
* @reg_stride: The register address stride. Valid register addresses are a
* multiple of this value. If set to 0, a value of 1 will be
* used.
+ * @reg_shift: The number of bits to shift the register before performing any
+ * operations. Any positive number will be downshifted, and negative
+ * values will be upshifted
+ * @reg_base: Value to be added to every register address before performing any
+ * operation.
* @pad_bits: Number of bits of padding between register and value.
* @val_bits: Number of bits in a register value, mandatory.
*
@@ -220,6 +283,23 @@ typedef void (*regmap_unlock)(void *);
* field is NULL but precious_table (see below) is not, the
* check is performed on such table (a register is precious if
* it belongs to one of the ranges specified by precious_table).
+ * @writeable_noinc_reg: Optional callback returning true if the register
+ * supports multiple write operations without incrementing
+ * the register number. If this field is NULL but
+ * wr_noinc_table (see below) is not, the check is
+ * performed on such table (a register is no increment
+ * writeable if it belongs to one of the ranges specified
+ * by wr_noinc_table).
+ * @readable_noinc_reg: Optional callback returning true if the register
+ * supports multiple read operations without incrementing
+ * the register number. If this field is NULL but
+ * rd_noinc_table (see below) is not, the check is
+ * performed on such table (a register is no increment
+ * readable if it belongs to one of the ranges specified
+ * by rd_noinc_table).
+ * @disable_locking: This regmap is either protected by external means or
+ * is guaranteed not to be accessed from multiple threads.
+ * Don't use any locking mechanisms.
* @lock: Optional lock callback (overrides regmap's default lock
* function, based on spinlock or mutex).
* @unlock: As above for unlocking.
@@ -232,18 +312,33 @@ typedef void (*regmap_unlock)(void *);
* read operation on a bus such as SPI, I2C, etc. Most of the
* devices do not need this.
* @reg_write: Same as above for writing.
+ * @reg_update_bits: Optional callback that if filled will be used to perform
+ * all the update_bits(rmw) operation. Should only be provided
+ * if the function require special handling with lock and reg
+ * handling and the operation cannot be represented as a simple
+ * update_bits operation on a bus such as SPI, I2C, etc.
+ * @read: Optional callback that if filled will be used to perform all the
+ * bulk reads from the registers. Data is returned in the buffer used
+ * to transmit data.
+ * @write: Same as above for writing.
+ * @max_raw_read: Max raw read size that can be used on the device.
+ * @max_raw_write: Max raw write size that can be used on the device.
* @fast_io: Register IO is fast. Use a spinlock instead of a mutex
* to perform locking. This field is ignored if custom lock/unlock
* functions are used (see fields lock/unlock of struct regmap_config).
* This field is a duplicate of a similar file in
* 'struct regmap_bus' and serves exact same purpose.
* Use it only for "no-bus" cases.
+ * @io_port: Support IO port accessors. Makes sense only when MMIO vs. IO port
+ * access can be distinguished.
* @max_register: Optional, specifies the maximum valid register address.
* @wr_table: Optional, points to a struct regmap_access_table specifying
* valid ranges for write access.
* @rd_table: As above, for read access.
* @volatile_table: As above, for volatile registers.
* @precious_table: As above, for precious registers.
+ * @wr_noinc_table: As above, for no increment writeable registers.
+ * @rd_noinc_table: As above, for no increment readable registers.
* @reg_defaults: Power on reset values for registers (for use with
* register cache support).
* @num_reg_defaults: Number of elements in reg_defaults.
@@ -252,10 +347,20 @@ typedef void (*regmap_unlock)(void *);
* a read.
* @write_flag_mask: Mask to be set in the top bytes of the register when doing
* a write. If both read_flag_mask and write_flag_mask are
- * empty the regmap_bus default masks are used.
- * @use_single_rw: If set, converts the bulk read and write operations into
- * a series of single read and write operations. This is useful
- * for device that does not support bulk read and write.
+ * empty and zero_flag_mask is not set the regmap_bus default
+ * masks are used.
+ * @zero_flag_mask: If set, read_flag_mask and write_flag_mask are used even
+ * if they are both empty.
+ * @use_relaxed_mmio: If set, MMIO R/W operations will not use memory barriers.
+ * This can avoid load on devices which don't require strict
+ * orderings, but drivers should carefully add any explicit
+ * memory barriers when they may require them.
+ * @use_single_read: If set, converts the bulk read operation into a series of
+ * single read operations. This is useful for a device that
+ * does not support bulk read.
+ * @use_single_write: If set, converts the bulk write operation into a series of
+ * single write operations. This is useful for a device that
+ * does not support bulk write.
* @can_multi_write: If set, the device supports the multi write mode of bulk
* write operations, if clear multi write requests will be
* split into individual write operations
@@ -273,12 +378,20 @@ typedef void (*regmap_unlock)(void *);
*
* @ranges: Array of configuration entries for virtual address ranges.
* @num_ranges: Number of range configuration entries.
+ * @use_hwlock: Indicate if a hardware spinlock should be used.
+ * @use_raw_spinlock: Indicate if a raw spinlock should be used.
+ * @hwlock_id: Specify the hardware spinlock id.
+ * @hwlock_mode: The hardware spinlock mode, should be HWLOCK_IRQSTATE,
+ * HWLOCK_IRQ or 0.
+ * @can_sleep: Optional, specifies whether regmap operations can sleep.
*/
struct regmap_config {
const char *name;
int reg_bits;
int reg_stride;
+ int reg_shift;
+ unsigned int reg_base;
int pad_bits;
int val_bits;
@@ -286,20 +399,35 @@ struct regmap_config {
bool (*readable_reg)(struct device *dev, unsigned int reg);
bool (*volatile_reg)(struct device *dev, unsigned int reg);
bool (*precious_reg)(struct device *dev, unsigned int reg);
+ bool (*writeable_noinc_reg)(struct device *dev, unsigned int reg);
+ bool (*readable_noinc_reg)(struct device *dev, unsigned int reg);
+
+ bool disable_locking;
regmap_lock lock;
regmap_unlock unlock;
void *lock_arg;
int (*reg_read)(void *context, unsigned int reg, unsigned int *val);
int (*reg_write)(void *context, unsigned int reg, unsigned int val);
+ int (*reg_update_bits)(void *context, unsigned int reg,
+ unsigned int mask, unsigned int val);
+ /* Bulk read/write */
+ int (*read)(void *context, const void *reg_buf, size_t reg_size,
+ void *val_buf, size_t val_size);
+ int (*write)(void *context, const void *data, size_t count);
+ size_t max_raw_read;
+ size_t max_raw_write;
bool fast_io;
+ bool io_port;
unsigned int max_register;
const struct regmap_access_table *wr_table;
const struct regmap_access_table *rd_table;
const struct regmap_access_table *volatile_table;
const struct regmap_access_table *precious_table;
+ const struct regmap_access_table *wr_noinc_table;
+ const struct regmap_access_table *rd_noinc_table;
const struct reg_default *reg_defaults;
unsigned int num_reg_defaults;
enum regcache_type cache_type;
@@ -308,8 +436,11 @@ struct regmap_config {
unsigned long read_flag_mask;
unsigned long write_flag_mask;
+ bool zero_flag_mask;
- bool use_single_rw;
+ bool use_single_read;
+ bool use_single_write;
+ bool use_relaxed_mmio;
bool can_multi_write;
enum regmap_endian reg_format_endian;
@@ -317,6 +448,13 @@ struct regmap_config {
const struct regmap_range_cfg *ranges;
unsigned int num_ranges;
+
+ bool use_hwlock;
+ bool use_raw_spinlock;
+ unsigned int hwlock_id;
+ unsigned int hwlock_mode;
+
+ bool can_sleep;
};
/**
@@ -329,8 +467,8 @@ struct regmap_config {
* @range_max: Address of the highest register in virtual range.
*
* @selector_reg: Register with selector field.
- * @selector_mask: Bit shift for selector value.
- * @selector_shift: Bit mask for selector value.
+ * @selector_mask: Bit mask for selector value.
+ * @selector_shift: Bit shift for selector value.
*
* @window_start: Address of first (lowest) register in data window.
* @window_len: Number of registers in data window.
@@ -372,8 +510,12 @@ typedef int (*regmap_hw_read)(void *context,
void *val_buf, size_t val_size);
typedef int (*regmap_hw_reg_read)(void *context, unsigned int reg,
unsigned int *val);
+typedef int (*regmap_hw_reg_noinc_read)(void *context, unsigned int reg,
+ void *val, size_t val_count);
typedef int (*regmap_hw_reg_write)(void *context, unsigned int reg,
unsigned int val);
+typedef int (*regmap_hw_reg_noinc_write)(void *context, unsigned int reg,
+ const void *val, size_t val_count);
typedef int (*regmap_hw_reg_update_bits)(void *context, unsigned int reg,
unsigned int mask, unsigned int val);
typedef struct regmap_async *(*regmap_hw_async_alloc)(void);
@@ -387,6 +529,7 @@ typedef void (*regmap_hw_free_context)(void *context);
* to perform locking. This field is ignored if custom lock/unlock
* functions are used (see fields lock/unlock of
* struct regmap_config).
+ * @free_on_exit: kfree this on exit of regmap
* @write: Write operation.
* @gather_write: Write operation with split register/value, return -ENOTSUPP
* if not implemented on a given device.
@@ -394,6 +537,8 @@ typedef void (*regmap_hw_free_context)(void *context);
* must serialise with respect to non-async I/O.
* @reg_write: Write a single register value to the given register address. This
* write operation has to complete when returning from the function.
+ * @reg_write_noinc: Write multiple register value to the same register. This
+ * write operation has to complete when returning from the function.
* @reg_update_bits: Update bits operation to be used against volatile
* registers, intended for devices supporting some mechanism
* for setting clearing bits without having to
@@ -416,13 +561,16 @@ typedef void (*regmap_hw_free_context)(void *context);
*/
struct regmap_bus {
bool fast_io;
+ bool free_on_exit;
regmap_hw_write write;
regmap_hw_gather_write gather_write;
regmap_hw_async_write async_write;
regmap_hw_reg_write reg_write;
+ regmap_hw_reg_noinc_write reg_noinc_write;
regmap_hw_reg_update_bits reg_update_bits;
regmap_hw_read read;
regmap_hw_reg_read reg_read;
+ regmap_hw_reg_noinc_read reg_noinc_read;
regmap_hw_free_context free_context;
regmap_hw_async_alloc async_alloc;
u8 read_flag_mask;
@@ -449,6 +597,18 @@ struct regmap *__regmap_init_i2c(struct i2c_client *i2c,
const struct regmap_config *config,
struct lock_class_key *lock_key,
const char *lock_name);
+struct regmap *__regmap_init_mdio(struct mdio_device *mdio_dev,
+ const struct regmap_config *config,
+ struct lock_class_key *lock_key,
+ const char *lock_name);
+struct regmap *__regmap_init_sccb(struct i2c_client *i2c,
+ const struct regmap_config *config,
+ struct lock_class_key *lock_key,
+ const char *lock_name);
+struct regmap *__regmap_init_slimbus(struct slim_device *slimbus,
+ const struct regmap_config *config,
+ struct lock_class_key *lock_key,
+ const char *lock_name);
struct regmap *__regmap_init_spi(struct spi_device *dev,
const struct regmap_config *config,
struct lock_class_key *lock_key,
@@ -474,6 +634,22 @@ struct regmap *__regmap_init_ac97(struct snd_ac97 *ac97,
const struct regmap_config *config,
struct lock_class_key *lock_key,
const char *lock_name);
+struct regmap *__regmap_init_sdw(struct sdw_slave *sdw,
+ const struct regmap_config *config,
+ struct lock_class_key *lock_key,
+ const char *lock_name);
+struct regmap *__regmap_init_sdw_mbq(struct sdw_slave *sdw,
+ const struct regmap_config *config,
+ struct lock_class_key *lock_key,
+ const char *lock_name);
+struct regmap *__regmap_init_spi_avmm(struct spi_device *spi,
+ const struct regmap_config *config,
+ struct lock_class_key *lock_key,
+ const char *lock_name);
+struct regmap *__regmap_init_fsi(struct fsi_device *fsi_dev,
+ const struct regmap_config *config,
+ struct lock_class_key *lock_key,
+ const char *lock_name);
struct regmap *__devm_regmap_init(struct device *dev,
const struct regmap_bus *bus,
@@ -485,6 +661,14 @@ struct regmap *__devm_regmap_init_i2c(struct i2c_client *i2c,
const struct regmap_config *config,
struct lock_class_key *lock_key,
const char *lock_name);
+struct regmap *__devm_regmap_init_mdio(struct mdio_device *mdio_dev,
+ const struct regmap_config *config,
+ struct lock_class_key *lock_key,
+ const char *lock_name);
+struct regmap *__devm_regmap_init_sccb(struct i2c_client *i2c,
+ const struct regmap_config *config,
+ struct lock_class_key *lock_key,
+ const char *lock_name);
struct regmap *__devm_regmap_init_spi(struct spi_device *dev,
const struct regmap_config *config,
struct lock_class_key *lock_key,
@@ -511,6 +695,30 @@ struct regmap *__devm_regmap_init_ac97(struct snd_ac97 *ac97,
const struct regmap_config *config,
struct lock_class_key *lock_key,
const char *lock_name);
+struct regmap *__devm_regmap_init_sdw(struct sdw_slave *sdw,
+ const struct regmap_config *config,
+ struct lock_class_key *lock_key,
+ const char *lock_name);
+struct regmap *__devm_regmap_init_sdw_mbq(struct sdw_slave *sdw,
+ const struct regmap_config *config,
+ struct lock_class_key *lock_key,
+ const char *lock_name);
+struct regmap *__devm_regmap_init_slimbus(struct slim_device *slimbus,
+ const struct regmap_config *config,
+ struct lock_class_key *lock_key,
+ const char *lock_name);
+struct regmap *__devm_regmap_init_i3c(struct i3c_device *i3c,
+ const struct regmap_config *config,
+ struct lock_class_key *lock_key,
+ const char *lock_name);
+struct regmap *__devm_regmap_init_spi_avmm(struct spi_device *spi,
+ const struct regmap_config *config,
+ struct lock_class_key *lock_key,
+ const char *lock_name);
+struct regmap *__devm_regmap_init_fsi(struct fsi_device *fsi_dev,
+ const struct regmap_config *config,
+ struct lock_class_key *lock_key,
+ const char *lock_name);
/*
* Wrapper for regmap_init macros to include a unique lockdep key and name
@@ -566,6 +774,45 @@ int regmap_attach_dev(struct device *dev, struct regmap *map,
i2c, config)
/**
+ * regmap_init_mdio() - Initialise register map
+ *
+ * @mdio_dev: Device that will be interacted with
+ * @config: Configuration for register map
+ *
+ * The return value will be an ERR_PTR() on error or a valid pointer to
+ * a struct regmap.
+ */
+#define regmap_init_mdio(mdio_dev, config) \
+ __regmap_lockdep_wrapper(__regmap_init_mdio, #config, \
+ mdio_dev, config)
+
+/**
+ * regmap_init_sccb() - Initialise register map
+ *
+ * @i2c: Device that will be interacted with
+ * @config: Configuration for register map
+ *
+ * The return value will be an ERR_PTR() on error or a valid pointer to
+ * a struct regmap.
+ */
+#define regmap_init_sccb(i2c, config) \
+ __regmap_lockdep_wrapper(__regmap_init_sccb, #config, \
+ i2c, config)
+
+/**
+ * regmap_init_slimbus() - Initialise register map
+ *
+ * @slimbus: Device that will be interacted with
+ * @config: Configuration for register map
+ *
+ * The return value will be an ERR_PTR() on error or a valid pointer to
+ * a struct regmap.
+ */
+#define regmap_init_slimbus(slimbus, config) \
+ __regmap_lockdep_wrapper(__regmap_init_slimbus, #config, \
+ slimbus, config)
+
+/**
* regmap_init_spi() - Initialise register map
*
* @dev: Device that will be interacted with
@@ -660,6 +907,59 @@ int regmap_attach_dev(struct device *dev, struct regmap *map,
bool regmap_ac97_default_volatile(struct device *dev, unsigned int reg);
/**
+ * regmap_init_sdw() - Initialise register map
+ *
+ * @sdw: Device that will be interacted with
+ * @config: Configuration for register map
+ *
+ * The return value will be an ERR_PTR() on error or a valid pointer to
+ * a struct regmap.
+ */
+#define regmap_init_sdw(sdw, config) \
+ __regmap_lockdep_wrapper(__regmap_init_sdw, #config, \
+ sdw, config)
+
+/**
+ * regmap_init_sdw_mbq() - Initialise register map
+ *
+ * @sdw: Device that will be interacted with
+ * @config: Configuration for register map
+ *
+ * The return value will be an ERR_PTR() on error or a valid pointer to
+ * a struct regmap.
+ */
+#define regmap_init_sdw_mbq(sdw, config) \
+ __regmap_lockdep_wrapper(__regmap_init_sdw_mbq, #config, \
+ sdw, config)
+
+/**
+ * regmap_init_spi_avmm() - Initialize register map for Intel SPI Slave
+ * to AVMM Bus Bridge
+ *
+ * @spi: Device that will be interacted with
+ * @config: Configuration for register map
+ *
+ * The return value will be an ERR_PTR() on error or a valid pointer
+ * to a struct regmap.
+ */
+#define regmap_init_spi_avmm(spi, config) \
+ __regmap_lockdep_wrapper(__regmap_init_spi_avmm, #config, \
+ spi, config)
+
+/**
+ * regmap_init_fsi() - Initialise register map
+ *
+ * @fsi_dev: Device that will be interacted with
+ * @config: Configuration for register map
+ *
+ * The return value will be an ERR_PTR() on error or a valid pointer to
+ * a struct regmap.
+ */
+#define regmap_init_fsi(fsi_dev, config) \
+ __regmap_lockdep_wrapper(__regmap_init_fsi, #config, fsi_dev, \
+ config)
+
+/**
* devm_regmap_init() - Initialise managed register map
*
* @dev: Device that will be interacted with
@@ -691,6 +991,34 @@ bool regmap_ac97_default_volatile(struct device *dev, unsigned int reg);
i2c, config)
/**
+ * devm_regmap_init_mdio() - Initialise managed register map
+ *
+ * @mdio_dev: Device that will be interacted with
+ * @config: Configuration for register map
+ *
+ * The return value will be an ERR_PTR() on error or a valid pointer
+ * to a struct regmap. The regmap will be automatically freed by the
+ * device management code.
+ */
+#define devm_regmap_init_mdio(mdio_dev, config) \
+ __regmap_lockdep_wrapper(__devm_regmap_init_mdio, #config, \
+ mdio_dev, config)
+
+/**
+ * devm_regmap_init_sccb() - Initialise managed register map
+ *
+ * @i2c: Device that will be interacted with
+ * @config: Configuration for register map
+ *
+ * The return value will be an ERR_PTR() on error or a valid pointer
+ * to a struct regmap. The regmap will be automatically freed by the
+ * device management code.
+ */
+#define devm_regmap_init_sccb(i2c, config) \
+ __regmap_lockdep_wrapper(__devm_regmap_init_sccb, #config, \
+ i2c, config)
+
+/**
* devm_regmap_init_spi() - Initialise register map
*
* @dev: Device that will be interacted with
@@ -789,6 +1117,93 @@ bool regmap_ac97_default_volatile(struct device *dev, unsigned int reg);
__regmap_lockdep_wrapper(__devm_regmap_init_ac97, #config, \
ac97, config)
+/**
+ * devm_regmap_init_sdw() - Initialise managed register map
+ *
+ * @sdw: Device that will be interacted with
+ * @config: Configuration for register map
+ *
+ * The return value will be an ERR_PTR() on error or a valid pointer
+ * to a struct regmap. The regmap will be automatically freed by the
+ * device management code.
+ */
+#define devm_regmap_init_sdw(sdw, config) \
+ __regmap_lockdep_wrapper(__devm_regmap_init_sdw, #config, \
+ sdw, config)
+
+/**
+ * devm_regmap_init_sdw_mbq() - Initialise managed register map
+ *
+ * @sdw: Device that will be interacted with
+ * @config: Configuration for register map
+ *
+ * The return value will be an ERR_PTR() on error or a valid pointer
+ * to a struct regmap. The regmap will be automatically freed by the
+ * device management code.
+ */
+#define devm_regmap_init_sdw_mbq(sdw, config) \
+ __regmap_lockdep_wrapper(__devm_regmap_init_sdw_mbq, #config, \
+ sdw, config)
+
+/**
+ * devm_regmap_init_slimbus() - Initialise managed register map
+ *
+ * @slimbus: Device that will be interacted with
+ * @config: Configuration for register map
+ *
+ * The return value will be an ERR_PTR() on error or a valid pointer
+ * to a struct regmap. The regmap will be automatically freed by the
+ * device management code.
+ */
+#define devm_regmap_init_slimbus(slimbus, config) \
+ __regmap_lockdep_wrapper(__devm_regmap_init_slimbus, #config, \
+ slimbus, config)
+
+/**
+ * devm_regmap_init_i3c() - Initialise managed register map
+ *
+ * @i3c: Device that will be interacted with
+ * @config: Configuration for register map
+ *
+ * The return value will be an ERR_PTR() on error or a valid pointer
+ * to a struct regmap. The regmap will be automatically freed by the
+ * device management code.
+ */
+#define devm_regmap_init_i3c(i3c, config) \
+ __regmap_lockdep_wrapper(__devm_regmap_init_i3c, #config, \
+ i3c, config)
+
+/**
+ * devm_regmap_init_spi_avmm() - Initialize register map for Intel SPI Slave
+ * to AVMM Bus Bridge
+ *
+ * @spi: Device that will be interacted with
+ * @config: Configuration for register map
+ *
+ * The return value will be an ERR_PTR() on error or a valid pointer
+ * to a struct regmap. The map will be automatically freed by the
+ * device management code.
+ */
+#define devm_regmap_init_spi_avmm(spi, config) \
+ __regmap_lockdep_wrapper(__devm_regmap_init_spi_avmm, #config, \
+ spi, config)
+
+/**
+ * devm_regmap_init_fsi() - Initialise managed register map
+ *
+ * @fsi_dev: Device that will be interacted with
+ * @config: Configuration for register map
+ *
+ * The return value will be an ERR_PTR() on error or a valid pointer
+ * to a struct regmap. The regmap will be automatically freed by the
+ * device management code.
+ */
+#define devm_regmap_init_fsi(fsi_dev, config) \
+ __regmap_lockdep_wrapper(__devm_regmap_init_fsi, #config, \
+ fsi_dev, config)
+
+int regmap_mmio_attach_clk(struct regmap *map, struct clk *clk);
+void regmap_mmio_detach_clk(struct regmap *map);
void regmap_exit(struct regmap *map);
int regmap_reinit_cache(struct regmap *map,
const struct regmap_config *config);
@@ -798,6 +1213,8 @@ int regmap_write(struct regmap *map, unsigned int reg, unsigned int val);
int regmap_write_async(struct regmap *map, unsigned int reg, unsigned int val);
int regmap_raw_write(struct regmap *map, unsigned int reg,
const void *val, size_t val_len);
+int regmap_noinc_write(struct regmap *map, unsigned int reg,
+ const void *val, size_t val_len);
int regmap_bulk_write(struct regmap *map, unsigned int reg, const void *val,
size_t val_count);
int regmap_multi_reg_write(struct regmap *map, const struct reg_sequence *regs,
@@ -810,14 +1227,53 @@ int regmap_raw_write_async(struct regmap *map, unsigned int reg,
int regmap_read(struct regmap *map, unsigned int reg, unsigned int *val);
int regmap_raw_read(struct regmap *map, unsigned int reg,
void *val, size_t val_len);
+int regmap_noinc_read(struct regmap *map, unsigned int reg,
+ void *val, size_t val_len);
int regmap_bulk_read(struct regmap *map, unsigned int reg, void *val,
size_t val_count);
int regmap_update_bits_base(struct regmap *map, unsigned int reg,
unsigned int mask, unsigned int val,
bool *change, bool async, bool force);
+
+static inline int regmap_update_bits(struct regmap *map, unsigned int reg,
+ unsigned int mask, unsigned int val)
+{
+ return regmap_update_bits_base(map, reg, mask, val, NULL, false, false);
+}
+
+static inline int regmap_update_bits_async(struct regmap *map, unsigned int reg,
+ unsigned int mask, unsigned int val)
+{
+ return regmap_update_bits_base(map, reg, mask, val, NULL, true, false);
+}
+
+static inline int regmap_update_bits_check(struct regmap *map, unsigned int reg,
+ unsigned int mask, unsigned int val,
+ bool *change)
+{
+ return regmap_update_bits_base(map, reg, mask, val,
+ change, false, false);
+}
+
+static inline int
+regmap_update_bits_check_async(struct regmap *map, unsigned int reg,
+ unsigned int mask, unsigned int val,
+ bool *change)
+{
+ return regmap_update_bits_base(map, reg, mask, val,
+ change, true, false);
+}
+
+static inline int regmap_write_bits(struct regmap *map, unsigned int reg,
+ unsigned int mask, unsigned int val)
+{
+ return regmap_update_bits_base(map, reg, mask, val, NULL, false, true);
+}
+
int regmap_get_val_bytes(struct regmap *map);
int regmap_get_max_register(struct regmap *map);
int regmap_get_reg_stride(struct regmap *map);
+bool regmap_might_sleep(struct regmap *map);
int regmap_async_complete(struct regmap *map);
bool regmap_can_raw_write(struct regmap *map);
size_t regmap_get_raw_read_max(struct regmap *map);
@@ -850,6 +1306,21 @@ bool regmap_reg_in_ranges(unsigned int reg,
const struct regmap_range *ranges,
unsigned int nranges);
+static inline int regmap_set_bits(struct regmap *map,
+ unsigned int reg, unsigned int bits)
+{
+ return regmap_update_bits_base(map, reg, bits, bits,
+ NULL, false, false);
+}
+
+static inline int regmap_clear_bits(struct regmap *map,
+ unsigned int reg, unsigned int bits)
+{
+ return regmap_update_bits_base(map, reg, bits, 0, NULL, false, false);
+}
+
+int regmap_test_bits(struct regmap *map, unsigned int reg, unsigned int bits);
+
/**
* struct reg_field - Description of an register field
*
@@ -873,6 +1344,14 @@ struct reg_field {
.msb = _msb, \
}
+#define REG_FIELD_ID(_reg, _lsb, _msb, _size, _offset) { \
+ .reg = _reg, \
+ .lsb = _lsb, \
+ .msb = _msb, \
+ .id_size = _size, \
+ .id_offset = _offset, \
+ }
+
struct regmap_field *regmap_field_alloc(struct regmap *regmap,
struct reg_field reg_field);
void regmap_field_free(struct regmap_field *field);
@@ -881,6 +1360,18 @@ struct regmap_field *devm_regmap_field_alloc(struct device *dev,
struct regmap *regmap, struct reg_field reg_field);
void devm_regmap_field_free(struct device *dev, struct regmap_field *field);
+int regmap_field_bulk_alloc(struct regmap *regmap,
+ struct regmap_field **rm_field,
+ const struct reg_field *reg_field,
+ int num_fields);
+void regmap_field_bulk_free(struct regmap_field *field);
+int devm_regmap_field_bulk_alloc(struct device *dev, struct regmap *regmap,
+ struct regmap_field **field,
+ const struct reg_field *reg_field,
+ int num_fields);
+void devm_regmap_field_bulk_free(struct device *dev,
+ struct regmap_field *field);
+
int regmap_field_read(struct regmap_field *field, unsigned int *val);
int regmap_field_update_bits_base(struct regmap_field *field,
unsigned int mask, unsigned int val,
@@ -891,60 +1382,233 @@ int regmap_fields_update_bits_base(struct regmap_field *field, unsigned int id,
unsigned int mask, unsigned int val,
bool *change, bool async, bool force);
+static inline int regmap_field_write(struct regmap_field *field,
+ unsigned int val)
+{
+ return regmap_field_update_bits_base(field, ~0, val,
+ NULL, false, false);
+}
+
+static inline int regmap_field_force_write(struct regmap_field *field,
+ unsigned int val)
+{
+ return regmap_field_update_bits_base(field, ~0, val, NULL, false, true);
+}
+
+static inline int regmap_field_update_bits(struct regmap_field *field,
+ unsigned int mask, unsigned int val)
+{
+ return regmap_field_update_bits_base(field, mask, val,
+ NULL, false, false);
+}
+
+static inline int regmap_field_set_bits(struct regmap_field *field,
+ unsigned int bits)
+{
+ return regmap_field_update_bits_base(field, bits, bits, NULL, false,
+ false);
+}
+
+static inline int regmap_field_clear_bits(struct regmap_field *field,
+ unsigned int bits)
+{
+ return regmap_field_update_bits_base(field, bits, 0, NULL, false,
+ false);
+}
+
+int regmap_field_test_bits(struct regmap_field *field, unsigned int bits);
+
+static inline int
+regmap_field_force_update_bits(struct regmap_field *field,
+ unsigned int mask, unsigned int val)
+{
+ return regmap_field_update_bits_base(field, mask, val,
+ NULL, false, true);
+}
+
+static inline int regmap_fields_write(struct regmap_field *field,
+ unsigned int id, unsigned int val)
+{
+ return regmap_fields_update_bits_base(field, id, ~0, val,
+ NULL, false, false);
+}
+
+static inline int regmap_fields_force_write(struct regmap_field *field,
+ unsigned int id, unsigned int val)
+{
+ return regmap_fields_update_bits_base(field, id, ~0, val,
+ NULL, false, true);
+}
+
+static inline int
+regmap_fields_update_bits(struct regmap_field *field, unsigned int id,
+ unsigned int mask, unsigned int val)
+{
+ return regmap_fields_update_bits_base(field, id, mask, val,
+ NULL, false, false);
+}
+
+static inline int
+regmap_fields_force_update_bits(struct regmap_field *field, unsigned int id,
+ unsigned int mask, unsigned int val)
+{
+ return regmap_fields_update_bits_base(field, id, mask, val,
+ NULL, false, true);
+}
+
+/**
+ * struct regmap_irq_type - IRQ type definitions.
+ *
+ * @type_reg_offset: Offset register for the irq type setting.
+ * @type_rising_val: Register value to configure RISING type irq.
+ * @type_falling_val: Register value to configure FALLING type irq.
+ * @type_level_low_val: Register value to configure LEVEL_LOW type irq.
+ * @type_level_high_val: Register value to configure LEVEL_HIGH type irq.
+ * @types_supported: logical OR of IRQ_TYPE_* flags indicating supported types.
+ */
+struct regmap_irq_type {
+ unsigned int type_reg_offset;
+ unsigned int type_reg_mask;
+ unsigned int type_rising_val;
+ unsigned int type_falling_val;
+ unsigned int type_level_low_val;
+ unsigned int type_level_high_val;
+ unsigned int types_supported;
+};
+
/**
* struct regmap_irq - Description of an IRQ for the generic regmap irq_chip.
*
* @reg_offset: Offset of the status/mask register within the bank
* @mask: Mask used to flag/control the register.
- * @type_reg_offset: Offset register for the irq type setting.
- * @type_rising_mask: Mask bit to configure RISING type irq.
- * @type_falling_mask: Mask bit to configure FALLING type irq.
+ * @type: IRQ trigger type setting details if supported.
*/
struct regmap_irq {
unsigned int reg_offset;
unsigned int mask;
- unsigned int type_reg_offset;
- unsigned int type_rising_mask;
- unsigned int type_falling_mask;
+ struct regmap_irq_type type;
};
#define REGMAP_IRQ_REG(_irq, _off, _mask) \
[_irq] = { .reg_offset = (_off), .mask = (_mask) }
+#define REGMAP_IRQ_REG_LINE(_id, _reg_bits) \
+ [_id] = { \
+ .mask = BIT((_id) % (_reg_bits)), \
+ .reg_offset = (_id) / (_reg_bits), \
+ }
+
+#define REGMAP_IRQ_MAIN_REG_OFFSET(arr) \
+ { .num_regs = ARRAY_SIZE((arr)), .offset = &(arr)[0] }
+
+struct regmap_irq_sub_irq_map {
+ unsigned int num_regs;
+ unsigned int *offset;
+};
+
+struct regmap_irq_chip_data;
+
/**
* struct regmap_irq_chip - Description of a generic regmap irq_chip.
*
* @name: Descriptive name for IRQ controller.
*
+ * @main_status: Base main status register address. For chips which have
+ * interrupts arranged in separate sub-irq blocks with own IRQ
+ * registers and which have a main IRQ registers indicating
+ * sub-irq blocks with unhandled interrupts. For such chips fill
+ * sub-irq register information in status_base, mask_base and
+ * ack_base.
+ * @num_main_status_bits: Should be given to chips where number of meaningfull
+ * main status bits differs from num_regs.
+ * @sub_reg_offsets: arrays of mappings from main register bits to sub irq
+ * registers. First item in array describes the registers
+ * for first main status bit. Second array for second bit etc.
+ * Offset is given as sub register status offset to
+ * status_base. Should contain num_regs arrays.
+ * Can be provided for chips with more complex mapping than
+ * 1.st bit to 1.st sub-reg, 2.nd bit to 2.nd sub-reg, ...
+ * When used with not_fixed_stride, each one-element array
+ * member contains offset calculated as address from each
+ * peripheral to first peripheral.
+ * @num_main_regs: Number of 'main status' irq registers for chips which have
+ * main_status set.
+ *
* @status_base: Base status register address.
- * @mask_base: Base mask register address.
- * @mask_writeonly: Base mask register is write only.
- * @unmask_base: Base unmask register address. for chips who have
- * separate mask and unmask registers
+ * @mask_base: Base mask register address. Mask bits are set to 1 when an
+ * interrupt is masked, 0 when unmasked.
+ * @unmask_base: Base unmask register address. Unmask bits are set to 1 when
+ * an interrupt is unmasked and 0 when masked.
* @ack_base: Base ack address. If zero then the chip is clear on read.
* Using zero value is possible with @use_ack bit.
* @wake_base: Base address for wake enables. If zero unsupported.
- * @type_base: Base address for irq type. If zero unsupported.
+ * @type_base: Base address for irq type. If zero unsupported. Deprecated,
+ * use @config_base instead.
+ * @virt_reg_base: Base addresses for extra config regs. Deprecated, use
+ * @config_base instead.
+ * @config_base: Base address for IRQ type config regs. If null unsupported.
* @irq_reg_stride: Stride to use for chips where registers are not contiguous.
* @init_ack_masked: Ack all masked interrupts once during initalization.
- * @mask_invert: Inverted mask register: cleared bits are masked out.
+ * @mask_unmask_non_inverted: Controls mask bit inversion for chips that set
+ * both @mask_base and @unmask_base. If false, mask and unmask bits are
+ * inverted (which is deprecated behavior); if true, bits will not be
+ * inverted and the registers keep their normal behavior. Note that if
+ * you use only one of @mask_base or @unmask_base, this flag has no
+ * effect and is unnecessary. Any new drivers that set both @mask_base
+ * and @unmask_base should set this to true to avoid relying on the
+ * deprecated behavior.
* @use_ack: Use @ack register even if it is zero.
* @ack_invert: Inverted ack register: cleared bits for ack.
+ * @clear_ack: Use this to set 1 and 0 or vice-versa to clear interrupts.
+ * @status_invert: Inverted status register: cleared bits are active interrupts.
* @wake_invert: Inverted wake register: cleared bits are wake enabled.
- * @type_invert: Invert the type flags.
+ * @type_in_mask: Use the mask registers for controlling irq type. Use this if
+ * the hardware provides separate bits for rising/falling edge
+ * or low/high level interrupts and they should be combined into
+ * a single logical interrupt. Use &struct regmap_irq_type data
+ * to define the mask bit for each irq type.
+ * @clear_on_unmask: For chips with interrupts cleared on read: read the status
+ * registers before unmasking interrupts to clear any bits
+ * set when they were masked.
* @runtime_pm: Hold a runtime PM lock on the device when accessing it.
+ * @not_fixed_stride: Used when chip peripherals are not laid out with fixed
+ * stride. Must be used with sub_reg_offsets containing the
+ * offsets to each peripheral. Deprecated; the same thing
+ * can be accomplished with a @get_irq_reg callback, without
+ * the need for a @sub_reg_offsets table.
+ * @no_status: No status register: all interrupts assumed generated by device.
*
* @num_regs: Number of registers in each control bank.
+ *
* @irqs: Descriptors for individual IRQs. Interrupt numbers are
* assigned based on the index in the array of the interrupt.
* @num_irqs: Number of descriptors.
- * @num_type_reg: Number of type registers.
- * @type_reg_stride: Stride to use for chips where type registers are not
- * contiguous.
+ *
+ * @num_type_reg: Number of type registers. Deprecated, use config registers
+ * instead.
+ * @num_virt_regs: Number of non-standard irq configuration registers.
+ * If zero unsupported. Deprecated, use config registers
+ * instead.
+ * @num_config_bases: Number of config base registers.
+ * @num_config_regs: Number of config registers for each config base register.
+ *
* @handle_pre_irq: Driver specific callback to handle interrupt from device
* before regmap_irq_handler process the interrupts.
* @handle_post_irq: Driver specific callback to handle interrupt from device
* after handling the interrupts in regmap_irq_handler().
+ * @handle_mask_sync: Callback used to handle IRQ mask syncs. The index will be
+ * in the range [0, num_regs)
+ * @set_type_virt: Driver specific callback to extend regmap_irq_set_type()
+ * and configure virt regs. Deprecated, use @set_type_config
+ * callback and config registers instead.
+ * @set_type_config: Callback used for configuring irq types.
+ * @get_irq_reg: Callback for mapping (base register, index) pairs to register
+ * addresses. The base register will be one of @status_base,
+ * @mask_base, etc., @main_status, or any of @config_base.
+ * The index will be in the range [0, num_main_regs[ for the
+ * main status base, [0, num_type_settings[ for any config
+ * register base, and [0, num_regs[ for any other base.
+ * If unspecified then regmap_irq_get_irq_reg_linear() is used.
* @irq_drv_data: Driver specific IRQ data which is passed as parameter when
* driver specific pre/post interrupt handler is called.
*
@@ -955,21 +1619,32 @@ struct regmap_irq {
struct regmap_irq_chip {
const char *name;
+ unsigned int main_status;
+ unsigned int num_main_status_bits;
+ struct regmap_irq_sub_irq_map *sub_reg_offsets;
+ int num_main_regs;
+
unsigned int status_base;
unsigned int mask_base;
unsigned int unmask_base;
unsigned int ack_base;
unsigned int wake_base;
unsigned int type_base;
+ unsigned int *virt_reg_base;
+ const unsigned int *config_base;
unsigned int irq_reg_stride;
- bool mask_writeonly:1;
- bool init_ack_masked:1;
- bool mask_invert:1;
- bool use_ack:1;
- bool ack_invert:1;
- bool wake_invert:1;
- bool runtime_pm:1;
- bool type_invert:1;
+ unsigned int init_ack_masked:1;
+ unsigned int mask_unmask_non_inverted:1;
+ unsigned int use_ack:1;
+ unsigned int ack_invert:1;
+ unsigned int clear_ack:1;
+ unsigned int status_invert:1;
+ unsigned int wake_invert:1;
+ unsigned int type_in_mask:1;
+ unsigned int clear_on_unmask:1;
+ unsigned int runtime_pm:1;
+ unsigned int not_fixed_stride:1;
+ unsigned int no_status:1;
int num_regs;
@@ -977,24 +1652,51 @@ struct regmap_irq_chip {
int num_irqs;
int num_type_reg;
- unsigned int type_reg_stride;
+ int num_virt_regs;
+ int num_config_bases;
+ int num_config_regs;
int (*handle_pre_irq)(void *irq_drv_data);
int (*handle_post_irq)(void *irq_drv_data);
+ int (*handle_mask_sync)(struct regmap *map, int index,
+ unsigned int mask_buf_def,
+ unsigned int mask_buf, void *irq_drv_data);
+ int (*set_type_virt)(unsigned int **buf, unsigned int type,
+ unsigned long hwirq, int reg);
+ int (*set_type_config)(unsigned int **buf, unsigned int type,
+ const struct regmap_irq *irq_data, int idx,
+ void *irq_drv_data);
+ unsigned int (*get_irq_reg)(struct regmap_irq_chip_data *data,
+ unsigned int base, int index);
void *irq_drv_data;
};
-struct regmap_irq_chip_data;
+unsigned int regmap_irq_get_irq_reg_linear(struct regmap_irq_chip_data *data,
+ unsigned int base, int index);
+int regmap_irq_set_type_config_simple(unsigned int **buf, unsigned int type,
+ const struct regmap_irq *irq_data,
+ int idx, void *irq_drv_data);
int regmap_add_irq_chip(struct regmap *map, int irq, int irq_flags,
int irq_base, const struct regmap_irq_chip *chip,
struct regmap_irq_chip_data **data);
+int regmap_add_irq_chip_fwnode(struct fwnode_handle *fwnode,
+ struct regmap *map, int irq,
+ int irq_flags, int irq_base,
+ const struct regmap_irq_chip *chip,
+ struct regmap_irq_chip_data **data);
void regmap_del_irq_chip(int irq, struct regmap_irq_chip_data *data);
int devm_regmap_add_irq_chip(struct device *dev, struct regmap *map, int irq,
int irq_flags, int irq_base,
const struct regmap_irq_chip *chip,
struct regmap_irq_chip_data **data);
+int devm_regmap_add_irq_chip_fwnode(struct device *dev,
+ struct fwnode_handle *fwnode,
+ struct regmap *map, int irq,
+ int irq_flags, int irq_base,
+ const struct regmap_irq_chip *chip,
+ struct regmap_irq_chip_data **data);
void devm_regmap_del_irq_chip(struct device *dev, int irq,
struct regmap_irq_chip_data *data);
@@ -1039,6 +1741,13 @@ static inline int regmap_raw_write_async(struct regmap *map, unsigned int reg,
return -EINVAL;
}
+static inline int regmap_noinc_write(struct regmap *map, unsigned int reg,
+ const void *val, size_t val_len)
+{
+ WARN_ONCE(1, "regmap API is disabled");
+ return -EINVAL;
+}
+
static inline int regmap_bulk_write(struct regmap *map, unsigned int reg,
const void *val, size_t val_count)
{
@@ -1060,6 +1769,13 @@ static inline int regmap_raw_read(struct regmap *map, unsigned int reg,
return -EINVAL;
}
+static inline int regmap_noinc_read(struct regmap *map, unsigned int reg,
+ void *val, size_t val_len)
+{
+ WARN_ONCE(1, "regmap API is disabled");
+ return -EINVAL;
+}
+
static inline int regmap_bulk_read(struct regmap *map, unsigned int reg,
void *val, size_t val_count)
{
@@ -1075,6 +1791,27 @@ static inline int regmap_update_bits_base(struct regmap *map, unsigned int reg,
return -EINVAL;
}
+static inline int regmap_set_bits(struct regmap *map,
+ unsigned int reg, unsigned int bits)
+{
+ WARN_ONCE(1, "regmap API is disabled");
+ return -EINVAL;
+}
+
+static inline int regmap_clear_bits(struct regmap *map,
+ unsigned int reg, unsigned int bits)
+{
+ WARN_ONCE(1, "regmap API is disabled");
+ return -EINVAL;
+}
+
+static inline int regmap_test_bits(struct regmap *map,
+ unsigned int reg, unsigned int bits)
+{
+ WARN_ONCE(1, "regmap API is disabled");
+ return -EINVAL;
+}
+
static inline int regmap_field_update_bits_base(struct regmap_field *field,
unsigned int mask, unsigned int val,
bool *change, bool async, bool force)
@@ -1092,6 +1829,124 @@ static inline int regmap_fields_update_bits_base(struct regmap_field *field,
return -EINVAL;
}
+static inline int regmap_update_bits(struct regmap *map, unsigned int reg,
+ unsigned int mask, unsigned int val)
+{
+ WARN_ONCE(1, "regmap API is disabled");
+ return -EINVAL;
+}
+
+static inline int regmap_update_bits_async(struct regmap *map, unsigned int reg,
+ unsigned int mask, unsigned int val)
+{
+ WARN_ONCE(1, "regmap API is disabled");
+ return -EINVAL;
+}
+
+static inline int regmap_update_bits_check(struct regmap *map, unsigned int reg,
+ unsigned int mask, unsigned int val,
+ bool *change)
+{
+ WARN_ONCE(1, "regmap API is disabled");
+ return -EINVAL;
+}
+
+static inline int
+regmap_update_bits_check_async(struct regmap *map, unsigned int reg,
+ unsigned int mask, unsigned int val,
+ bool *change)
+{
+ WARN_ONCE(1, "regmap API is disabled");
+ return -EINVAL;
+}
+
+static inline int regmap_write_bits(struct regmap *map, unsigned int reg,
+ unsigned int mask, unsigned int val)
+{
+ WARN_ONCE(1, "regmap API is disabled");
+ return -EINVAL;
+}
+
+static inline int regmap_field_write(struct regmap_field *field,
+ unsigned int val)
+{
+ WARN_ONCE(1, "regmap API is disabled");
+ return -EINVAL;
+}
+
+static inline int regmap_field_force_write(struct regmap_field *field,
+ unsigned int val)
+{
+ WARN_ONCE(1, "regmap API is disabled");
+ return -EINVAL;
+}
+
+static inline int regmap_field_update_bits(struct regmap_field *field,
+ unsigned int mask, unsigned int val)
+{
+ WARN_ONCE(1, "regmap API is disabled");
+ return -EINVAL;
+}
+
+static inline int
+regmap_field_force_update_bits(struct regmap_field *field,
+ unsigned int mask, unsigned int val)
+{
+ WARN_ONCE(1, "regmap API is disabled");
+ return -EINVAL;
+}
+
+static inline int regmap_field_set_bits(struct regmap_field *field,
+ unsigned int bits)
+{
+ WARN_ONCE(1, "regmap API is disabled");
+ return -EINVAL;
+}
+
+static inline int regmap_field_clear_bits(struct regmap_field *field,
+ unsigned int bits)
+{
+ WARN_ONCE(1, "regmap API is disabled");
+ return -EINVAL;
+}
+
+static inline int regmap_field_test_bits(struct regmap_field *field,
+ unsigned int bits)
+{
+ WARN_ONCE(1, "regmap API is disabled");
+ return -EINVAL;
+}
+
+static inline int regmap_fields_write(struct regmap_field *field,
+ unsigned int id, unsigned int val)
+{
+ WARN_ONCE(1, "regmap API is disabled");
+ return -EINVAL;
+}
+
+static inline int regmap_fields_force_write(struct regmap_field *field,
+ unsigned int id, unsigned int val)
+{
+ WARN_ONCE(1, "regmap API is disabled");
+ return -EINVAL;
+}
+
+static inline int
+regmap_fields_update_bits(struct regmap_field *field, unsigned int id,
+ unsigned int mask, unsigned int val)
+{
+ WARN_ONCE(1, "regmap API is disabled");
+ return -EINVAL;
+}
+
+static inline int
+regmap_fields_force_update_bits(struct regmap_field *field, unsigned int id,
+ unsigned int mask, unsigned int val)
+{
+ WARN_ONCE(1, "regmap API is disabled");
+ return -EINVAL;
+}
+
static inline int regmap_get_val_bytes(struct regmap *map)
{
WARN_ONCE(1, "regmap API is disabled");
@@ -1110,6 +1965,12 @@ static inline int regmap_get_reg_stride(struct regmap *map)
return -EINVAL;
}
+static inline bool regmap_might_sleep(struct regmap *map)
+{
+ WARN_ONCE(1, "regmap API is disabled");
+ return true;
+}
+
static inline int regcache_sync(struct regmap *map)
{
WARN_ONCE(1, "regmap API is disabled");
diff --git a/include/linux/regset.h b/include/linux/regset.h
index 8e0c9febf495..9061266dd8de 100644
--- a/include/linux/regset.h
+++ b/include/linux/regset.h
@@ -1,12 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* User-mode machine state access
*
* Copyright (C) 2007 Red Hat, Inc. All rights reserved.
*
- * This copyrighted material is made available to anyone wishing to use,
- * modify, copy, or redistribute it subject to the terms and conditions
- * of the GNU General Public License v.2.
- *
* Red Hat Author: Roland McGrath.
*/
@@ -20,6 +17,64 @@
struct task_struct;
struct user_regset;
+struct membuf {
+ void *p;
+ size_t left;
+};
+
+static inline int membuf_zero(struct membuf *s, size_t size)
+{
+ if (s->left) {
+ if (size > s->left)
+ size = s->left;
+ memset(s->p, 0, size);
+ s->p += size;
+ s->left -= size;
+ }
+ return s->left;
+}
+
+static inline int membuf_write(struct membuf *s, const void *v, size_t size)
+{
+ if (s->left) {
+ if (size > s->left)
+ size = s->left;
+ memcpy(s->p, v, size);
+ s->p += size;
+ s->left -= size;
+ }
+ return s->left;
+}
+
+static inline struct membuf membuf_at(const struct membuf *s, size_t offs)
+{
+ struct membuf n = *s;
+
+ if (offs > n.left)
+ offs = n.left;
+ n.p += offs;
+ n.left -= offs;
+
+ return n;
+}
+
+/* current s->p must be aligned for v; v must be a scalar */
+#define membuf_store(s, v) \
+({ \
+ struct membuf *__s = (s); \
+ if (__s->left) { \
+ typeof(v) __v = (v); \
+ size_t __size = sizeof(__v); \
+ if (unlikely(__size > __s->left)) { \
+ __size = __s->left; \
+ memcpy(__s->p, &__v, __size); \
+ } else { \
+ *(typeof(__v + 0) *)__s->p = __v; \
+ } \
+ __s->p += __size; \
+ __s->left -= __size; \
+ } \
+ __s->left;})
/**
* user_regset_active_fn - type of @active function in &struct user_regset
@@ -39,26 +94,9 @@ struct user_regset;
typedef int user_regset_active_fn(struct task_struct *target,
const struct user_regset *regset);
-/**
- * user_regset_get_fn - type of @get function in &struct user_regset
- * @target: thread being examined
- * @regset: regset being examined
- * @pos: offset into the regset data to access, in bytes
- * @count: amount of data to copy, in bytes
- * @kbuf: if not %NULL, a kernel-space pointer to copy into
- * @ubuf: if @kbuf is %NULL, a user-space pointer to copy into
- *
- * Fetch register values. Return %0 on success; -%EIO or -%ENODEV
- * are usual failure returns. The @pos and @count values are in
- * bytes, but must be properly aligned. If @kbuf is non-null, that
- * buffer is used and @ubuf is ignored. If @kbuf is %NULL, then
- * ubuf gives a userland pointer to access directly, and an -%EFAULT
- * return value is possible.
- */
-typedef int user_regset_get_fn(struct task_struct *target,
+typedef int user_regset_get2_fn(struct task_struct *target,
const struct user_regset *regset,
- unsigned int pos, unsigned int count,
- void *kbuf, void __user *ubuf);
+ struct membuf to);
/**
* user_regset_set_fn - type of @set function in &struct user_regset
@@ -122,14 +160,22 @@ typedef int user_regset_writeback_fn(struct task_struct *target,
* This is part of the state of an individual thread, not necessarily
* actual CPU registers per se. A register set consists of a number of
* similar slots, given by @n. Each slot is @size bytes, and aligned to
- * @align bytes (which is at least @size).
+ * @align bytes (which is at least @size). For dynamically-sized
+ * regsets, @n must contain the maximum possible number of slots for the
+ * regset.
*
- * These functions must be called only on the current thread or on a
- * thread that is in %TASK_STOPPED or %TASK_TRACED state, that we are
- * guaranteed will not be woken up and return to user mode, and that we
- * have called wait_task_inactive() on. (The target thread always might
- * wake up for SIGKILL while these functions are working, in which case
- * that thread's user_regset state might be scrambled.)
+ * For backward compatibility, the @get and @set methods must pad to, or
+ * accept, @n * @size bytes, even if the current regset size is smaller.
+ * The precise semantics of these operations depend on the regset being
+ * accessed.
+ *
+ * The functions to which &struct user_regset members point must be
+ * called only on the current thread or on a thread that is in
+ * %TASK_STOPPED or %TASK_TRACED state, that we are guaranteed will not
+ * be woken up and return to user mode, and that we have called
+ * wait_task_inactive() on. (The target thread always might wake up for
+ * SIGKILL while these functions are working, in which case that
+ * thread's user_regset state might be scrambled.)
*
* The @pos argument must be aligned according to @align; the @count
* argument must be a multiple of @size. These functions are not
@@ -152,7 +198,7 @@ typedef int user_regset_writeback_fn(struct task_struct *target,
* omitted when there is an @active function and it returns zero.
*/
struct user_regset {
- user_regset_get_fn *get;
+ user_regset_get2_fn *regset_get;
user_regset_set_fn *set;
user_regset_active_fn *active;
user_regset_writeback_fn *writeback;
@@ -204,44 +250,6 @@ struct user_regset_view {
*/
const struct user_regset_view *task_user_regset_view(struct task_struct *tsk);
-
-/*
- * These are helpers for writing regset get/set functions in arch code.
- * Because @start_pos and @end_pos are always compile-time constants,
- * these are inlined into very little code though they look large.
- *
- * Use one or more calls sequentially for each chunk of regset data stored
- * contiguously in memory. Call with constants for @start_pos and @end_pos,
- * giving the range of byte positions in the regset that data corresponds
- * to; @end_pos can be -1 if this chunk is at the end of the regset layout.
- * Each call updates the arguments to point past its chunk.
- */
-
-static inline int user_regset_copyout(unsigned int *pos, unsigned int *count,
- void **kbuf,
- void __user **ubuf, const void *data,
- const int start_pos, const int end_pos)
-{
- if (*count == 0)
- return 0;
- BUG_ON(*pos < start_pos);
- if (end_pos < 0 || *pos < end_pos) {
- unsigned int copy = (end_pos < 0 ? *count
- : min(*count, end_pos - *pos));
- data += *pos - start_pos;
- if (*kbuf) {
- memcpy(*kbuf, data, copy);
- *kbuf += copy;
- } else if (__copy_to_user(*ubuf, data, copy))
- return -EFAULT;
- else
- *ubuf += copy;
- *pos += copy;
- *count -= copy;
- }
- return 0;
-}
-
static inline int user_regset_copyin(unsigned int *pos, unsigned int *count,
const void **kbuf,
const void __user **ubuf, void *data,
@@ -267,44 +275,15 @@ static inline int user_regset_copyin(unsigned int *pos, unsigned int *count,
return 0;
}
-/*
- * These two parallel the two above, but for portions of a regset layout
- * that always read as all-zero or for which writes are ignored.
- */
-static inline int user_regset_copyout_zero(unsigned int *pos,
- unsigned int *count,
- void **kbuf, void __user **ubuf,
- const int start_pos,
- const int end_pos)
+static inline void user_regset_copyin_ignore(unsigned int *pos,
+ unsigned int *count,
+ const void **kbuf,
+ const void __user **ubuf,
+ const int start_pos,
+ const int end_pos)
{
if (*count == 0)
- return 0;
- BUG_ON(*pos < start_pos);
- if (end_pos < 0 || *pos < end_pos) {
- unsigned int copy = (end_pos < 0 ? *count
- : min(*count, end_pos - *pos));
- if (*kbuf) {
- memset(*kbuf, 0, copy);
- *kbuf += copy;
- } else if (__clear_user(*ubuf, copy))
- return -EFAULT;
- else
- *ubuf += copy;
- *pos += copy;
- *count -= copy;
- }
- return 0;
-}
-
-static inline int user_regset_copyin_ignore(unsigned int *pos,
- unsigned int *count,
- const void **kbuf,
- const void __user **ubuf,
- const int start_pos,
- const int end_pos)
-{
- if (*count == 0)
- return 0;
+ return;
BUG_ON(*pos < start_pos);
if (end_pos < 0 || *pos < end_pos) {
unsigned int copy = (end_pos < 0 ? *count
@@ -316,34 +295,21 @@ static inline int user_regset_copyin_ignore(unsigned int *pos,
*pos += copy;
*count -= copy;
}
- return 0;
}
-/**
- * copy_regset_to_user - fetch a thread's user_regset data into user memory
- * @target: thread to be examined
- * @view: &struct user_regset_view describing user thread machine state
- * @setno: index in @view->regsets
- * @offset: offset into the regset data, in bytes
- * @size: amount of data to copy, in bytes
- * @data: user-mode pointer to copy into
- */
-static inline int copy_regset_to_user(struct task_struct *target,
- const struct user_regset_view *view,
- unsigned int setno,
- unsigned int offset, unsigned int size,
- void __user *data)
-{
- const struct user_regset *regset = &view->regsets[setno];
-
- if (!regset->get)
- return -EOPNOTSUPP;
+extern int regset_get(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int size, void *data);
- if (!access_ok(VERIFY_WRITE, data, size))
- return -EFAULT;
+extern int regset_get_alloc(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int size,
+ void **data);
- return regset->get(target, regset, offset, size, NULL, data);
-}
+extern int copy_regset_to_user(struct task_struct *target,
+ const struct user_regset_view *view,
+ unsigned int setno, unsigned int offset,
+ unsigned int size, void __user *data);
/**
* copy_regset_from_user - store into thread's user_regset data from user memory
@@ -365,11 +331,10 @@ static inline int copy_regset_from_user(struct task_struct *target,
if (!regset->set)
return -EOPNOTSUPP;
- if (!access_ok(VERIFY_READ, data, size))
+ if (!access_ok(data, size))
return -EFAULT;
return regset->set(target, regset, offset, size, NULL, data);
}
-
#endif /* <linux/regset.h> */
diff --git a/include/linux/regulator/ab8500.h b/include/linux/regulator/ab8500.h
deleted file mode 100644
index d8ecefaf63ca..000000000000
--- a/include/linux/regulator/ab8500.h
+++ /dev/null
@@ -1,325 +0,0 @@
-/*
- * Copyright (C) ST-Ericsson SA 2010
- *
- * License Terms: GNU General Public License v2
- *
- * Authors: Sundar Iyer <sundar.iyer@stericsson.com> for ST-Ericsson
- * Bengt Jonsson <bengt.g.jonsson@stericsson.com> for ST-Ericsson
- * Daniel Willerud <daniel.willerud@stericsson.com> for ST-Ericsson
- */
-
-#ifndef __LINUX_MFD_AB8500_REGULATOR_H
-#define __LINUX_MFD_AB8500_REGULATOR_H
-
-#include <linux/platform_device.h>
-
-/* AB8500 regulators */
-enum ab8500_regulator_id {
- AB8500_LDO_AUX1,
- AB8500_LDO_AUX2,
- AB8500_LDO_AUX3,
- AB8500_LDO_INTCORE,
- AB8500_LDO_TVOUT,
- AB8500_LDO_AUDIO,
- AB8500_LDO_ANAMIC1,
- AB8500_LDO_ANAMIC2,
- AB8500_LDO_DMIC,
- AB8500_LDO_ANA,
- AB8500_NUM_REGULATORS,
-};
-
-/* AB8505 regulators */
-enum ab8505_regulator_id {
- AB8505_LDO_AUX1,
- AB8505_LDO_AUX2,
- AB8505_LDO_AUX3,
- AB8505_LDO_AUX4,
- AB8505_LDO_AUX5,
- AB8505_LDO_AUX6,
- AB8505_LDO_INTCORE,
- AB8505_LDO_ADC,
- AB8505_LDO_USB,
- AB8505_LDO_AUDIO,
- AB8505_LDO_ANAMIC1,
- AB8505_LDO_ANAMIC2,
- AB8505_LDO_AUX8,
- AB8505_LDO_ANA,
- AB8505_SYSCLKREQ_2,
- AB8505_SYSCLKREQ_4,
- AB8505_NUM_REGULATORS,
-};
-
-/* AB9540 regulators */
-enum ab9540_regulator_id {
- AB9540_LDO_AUX1,
- AB9540_LDO_AUX2,
- AB9540_LDO_AUX3,
- AB9540_LDO_AUX4,
- AB9540_LDO_INTCORE,
- AB9540_LDO_TVOUT,
- AB9540_LDO_USB,
- AB9540_LDO_AUDIO,
- AB9540_LDO_ANAMIC1,
- AB9540_LDO_ANAMIC2,
- AB9540_LDO_DMIC,
- AB9540_LDO_ANA,
- AB9540_SYSCLKREQ_2,
- AB9540_SYSCLKREQ_4,
- AB9540_NUM_REGULATORS,
-};
-
-/* AB8540 regulators */
-enum ab8540_regulator_id {
- AB8540_LDO_AUX1,
- AB8540_LDO_AUX2,
- AB8540_LDO_AUX3,
- AB8540_LDO_AUX4,
- AB8540_LDO_AUX5,
- AB8540_LDO_AUX6,
- AB8540_LDO_INTCORE,
- AB8540_LDO_TVOUT,
- AB8540_LDO_AUDIO,
- AB8540_LDO_ANAMIC1,
- AB8540_LDO_ANAMIC2,
- AB8540_LDO_DMIC,
- AB8540_LDO_ANA,
- AB8540_LDO_SDIO,
- AB8540_SYSCLKREQ_2,
- AB8540_SYSCLKREQ_4,
- AB8540_NUM_REGULATORS,
-};
-
-/* AB8500, AB8505, and AB9540 register initialization */
-struct ab8500_regulator_reg_init {
- int id;
- u8 mask;
- u8 value;
-};
-
-#define INIT_REGULATOR_REGISTER(_id, _mask, _value) \
- { \
- .id = _id, \
- .mask = _mask, \
- .value = _value, \
- }
-
-/* AB8500 registers */
-enum ab8500_regulator_reg {
- AB8500_REGUREQUESTCTRL2,
- AB8500_REGUREQUESTCTRL3,
- AB8500_REGUREQUESTCTRL4,
- AB8500_REGUSYSCLKREQ1HPVALID1,
- AB8500_REGUSYSCLKREQ1HPVALID2,
- AB8500_REGUHWHPREQ1VALID1,
- AB8500_REGUHWHPREQ1VALID2,
- AB8500_REGUHWHPREQ2VALID1,
- AB8500_REGUHWHPREQ2VALID2,
- AB8500_REGUSWHPREQVALID1,
- AB8500_REGUSWHPREQVALID2,
- AB8500_REGUSYSCLKREQVALID1,
- AB8500_REGUSYSCLKREQVALID2,
- AB8500_REGUMISC1,
- AB8500_VAUDIOSUPPLY,
- AB8500_REGUCTRL1VAMIC,
- AB8500_VPLLVANAREGU,
- AB8500_VREFDDR,
- AB8500_EXTSUPPLYREGU,
- AB8500_VAUX12REGU,
- AB8500_VRF1VAUX3REGU,
- AB8500_VAUX1SEL,
- AB8500_VAUX2SEL,
- AB8500_VRF1VAUX3SEL,
- AB8500_REGUCTRL2SPARE,
- AB8500_REGUCTRLDISCH,
- AB8500_REGUCTRLDISCH2,
- AB8500_NUM_REGULATOR_REGISTERS,
-};
-
-/* AB8505 registers */
-enum ab8505_regulator_reg {
- AB8505_REGUREQUESTCTRL1,
- AB8505_REGUREQUESTCTRL2,
- AB8505_REGUREQUESTCTRL3,
- AB8505_REGUREQUESTCTRL4,
- AB8505_REGUSYSCLKREQ1HPVALID1,
- AB8505_REGUSYSCLKREQ1HPVALID2,
- AB8505_REGUHWHPREQ1VALID1,
- AB8505_REGUHWHPREQ1VALID2,
- AB8505_REGUHWHPREQ2VALID1,
- AB8505_REGUHWHPREQ2VALID2,
- AB8505_REGUSWHPREQVALID1,
- AB8505_REGUSWHPREQVALID2,
- AB8505_REGUSYSCLKREQVALID1,
- AB8505_REGUSYSCLKREQVALID2,
- AB8505_REGUVAUX4REQVALID,
- AB8505_REGUMISC1,
- AB8505_VAUDIOSUPPLY,
- AB8505_REGUCTRL1VAMIC,
- AB8505_VSMPSAREGU,
- AB8505_VSMPSBREGU,
- AB8505_VSAFEREGU, /* NOTE! PRCMU register */
- AB8505_VPLLVANAREGU,
- AB8505_EXTSUPPLYREGU,
- AB8505_VAUX12REGU,
- AB8505_VRF1VAUX3REGU,
- AB8505_VSMPSASEL1,
- AB8505_VSMPSASEL2,
- AB8505_VSMPSASEL3,
- AB8505_VSMPSBSEL1,
- AB8505_VSMPSBSEL2,
- AB8505_VSMPSBSEL3,
- AB8505_VSAFESEL1, /* NOTE! PRCMU register */
- AB8505_VSAFESEL2, /* NOTE! PRCMU register */
- AB8505_VSAFESEL3, /* NOTE! PRCMU register */
- AB8505_VAUX1SEL,
- AB8505_VAUX2SEL,
- AB8505_VRF1VAUX3SEL,
- AB8505_VAUX4REQCTRL,
- AB8505_VAUX4REGU,
- AB8505_VAUX4SEL,
- AB8505_REGUCTRLDISCH,
- AB8505_REGUCTRLDISCH2,
- AB8505_REGUCTRLDISCH3,
- AB8505_CTRLVAUX5,
- AB8505_CTRLVAUX6,
- AB8505_NUM_REGULATOR_REGISTERS,
-};
-
-/* AB9540 registers */
-enum ab9540_regulator_reg {
- AB9540_REGUREQUESTCTRL1,
- AB9540_REGUREQUESTCTRL2,
- AB9540_REGUREQUESTCTRL3,
- AB9540_REGUREQUESTCTRL4,
- AB9540_REGUSYSCLKREQ1HPVALID1,
- AB9540_REGUSYSCLKREQ1HPVALID2,
- AB9540_REGUHWHPREQ1VALID1,
- AB9540_REGUHWHPREQ1VALID2,
- AB9540_REGUHWHPREQ2VALID1,
- AB9540_REGUHWHPREQ2VALID2,
- AB9540_REGUSWHPREQVALID1,
- AB9540_REGUSWHPREQVALID2,
- AB9540_REGUSYSCLKREQVALID1,
- AB9540_REGUSYSCLKREQVALID2,
- AB9540_REGUVAUX4REQVALID,
- AB9540_REGUMISC1,
- AB9540_VAUDIOSUPPLY,
- AB9540_REGUCTRL1VAMIC,
- AB9540_VSMPS1REGU,
- AB9540_VSMPS2REGU,
- AB9540_VSMPS3REGU, /* NOTE! PRCMU register */
- AB9540_VPLLVANAREGU,
- AB9540_EXTSUPPLYREGU,
- AB9540_VAUX12REGU,
- AB9540_VRF1VAUX3REGU,
- AB9540_VSMPS1SEL1,
- AB9540_VSMPS1SEL2,
- AB9540_VSMPS1SEL3,
- AB9540_VSMPS2SEL1,
- AB9540_VSMPS2SEL2,
- AB9540_VSMPS2SEL3,
- AB9540_VSMPS3SEL1, /* NOTE! PRCMU register */
- AB9540_VSMPS3SEL2, /* NOTE! PRCMU register */
- AB9540_VAUX1SEL,
- AB9540_VAUX2SEL,
- AB9540_VRF1VAUX3SEL,
- AB9540_REGUCTRL2SPARE,
- AB9540_VAUX4REQCTRL,
- AB9540_VAUX4REGU,
- AB9540_VAUX4SEL,
- AB9540_REGUCTRLDISCH,
- AB9540_REGUCTRLDISCH2,
- AB9540_REGUCTRLDISCH3,
- AB9540_NUM_REGULATOR_REGISTERS,
-};
-
-/* AB8540 registers */
-enum ab8540_regulator_reg {
- AB8540_REGUREQUESTCTRL1,
- AB8540_REGUREQUESTCTRL2,
- AB8540_REGUREQUESTCTRL3,
- AB8540_REGUREQUESTCTRL4,
- AB8540_REGUSYSCLKREQ1HPVALID1,
- AB8540_REGUSYSCLKREQ1HPVALID2,
- AB8540_REGUHWHPREQ1VALID1,
- AB8540_REGUHWHPREQ1VALID2,
- AB8540_REGUHWHPREQ2VALID1,
- AB8540_REGUHWHPREQ2VALID2,
- AB8540_REGUSWHPREQVALID1,
- AB8540_REGUSWHPREQVALID2,
- AB8540_REGUSYSCLKREQVALID1,
- AB8540_REGUSYSCLKREQVALID2,
- AB8540_REGUVAUX4REQVALID,
- AB8540_REGUVAUX5REQVALID,
- AB8540_REGUVAUX6REQVALID,
- AB8540_REGUVCLKBREQVALID,
- AB8540_REGUVRF1REQVALID,
- AB8540_REGUMISC1,
- AB8540_VAUDIOSUPPLY,
- AB8540_REGUCTRL1VAMIC,
- AB8540_VHSIC,
- AB8540_VSDIO,
- AB8540_VSMPS1REGU,
- AB8540_VSMPS2REGU,
- AB8540_VSMPS3REGU,
- AB8540_VPLLVANAREGU,
- AB8540_EXTSUPPLYREGU,
- AB8540_VAUX12REGU,
- AB8540_VRF1VAUX3REGU,
- AB8540_VSMPS1SEL1,
- AB8540_VSMPS1SEL2,
- AB8540_VSMPS1SEL3,
- AB8540_VSMPS2SEL1,
- AB8540_VSMPS2SEL2,
- AB8540_VSMPS2SEL3,
- AB8540_VSMPS3SEL1,
- AB8540_VSMPS3SEL2,
- AB8540_VAUX1SEL,
- AB8540_VAUX2SEL,
- AB8540_VRF1VAUX3SEL,
- AB8540_REGUCTRL2SPARE,
- AB8540_VAUX4REQCTRL,
- AB8540_VAUX4REGU,
- AB8540_VAUX4SEL,
- AB8540_VAUX5REQCTRL,
- AB8540_VAUX5REGU,
- AB8540_VAUX5SEL,
- AB8540_VAUX6REQCTRL,
- AB8540_VAUX6REGU,
- AB8540_VAUX6SEL,
- AB8540_VCLKBREQCTRL,
- AB8540_VCLKBREGU,
- AB8540_VCLKBSEL,
- AB8540_VRF1REQCTRL,
- AB8540_REGUCTRLDISCH,
- AB8540_REGUCTRLDISCH2,
- AB8540_REGUCTRLDISCH3,
- AB8540_REGUCTRLDISCH4,
- AB8540_VSIMSYSCLKCTRL,
- AB8540_VANAVPLLSEL,
- AB8540_NUM_REGULATOR_REGISTERS,
-};
-
-/* AB8500 external regulators */
-struct ab8500_ext_regulator_cfg {
- bool hwreq; /* requires hw mode or high power mode */
-};
-
-enum ab8500_ext_regulator_id {
- AB8500_EXT_SUPPLY1,
- AB8500_EXT_SUPPLY2,
- AB8500_EXT_SUPPLY3,
- AB8500_NUM_EXT_REGULATORS,
-};
-
-/* AB8500 regulator platform data */
-struct ab8500_regulator_platform_data {
- int num_reg_init;
- struct ab8500_regulator_reg_init *reg_init;
- int num_regulator;
- struct regulator_init_data *regulator;
- int num_ext_regulator;
- struct regulator_init_data *ext_regulator;
-};
-
-#endif
diff --git a/include/linux/regulator/act8865.h b/include/linux/regulator/act8865.h
index 113d861a1e4c..d25e24f596c3 100644
--- a/include/linux/regulator/act8865.h
+++ b/include/linux/regulator/act8865.h
@@ -1,16 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* act8865.h -- Voltage regulation for active-semi act88xx PMUs
*
* Copyright (C) 2013 Atmel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#ifndef __LINUX_REGULATOR_ACT8865_H
diff --git a/include/linux/regulator/arizona-ldo1.h b/include/linux/regulator/arizona-ldo1.h
index c685f1277c63..1fe2c71fc699 100644
--- a/include/linux/regulator/arizona-ldo1.h
+++ b/include/linux/regulator/arizona-ldo1.h
@@ -1,11 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Platform data for Arizona LDO1 regulator
*
* Copyright 2017 Cirrus Logic
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef ARIZONA_LDO1_H
@@ -14,9 +11,6 @@
struct regulator_init_data;
struct arizona_ldo1_pdata {
- /** GPIO controlling LDOENA, if any */
- int ldoena;
-
/** Regulator configuration for LDO1 */
const struct regulator_init_data *init_data;
};
diff --git a/include/linux/regulator/arizona-micsupp.h b/include/linux/regulator/arizona-micsupp.h
index 616842619c00..cacb866d5bfb 100644
--- a/include/linux/regulator/arizona-micsupp.h
+++ b/include/linux/regulator/arizona-micsupp.h
@@ -1,11 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Platform data for Arizona micsupp regulator
*
* Copyright 2017 Cirrus Logic
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef ARIZONA_MICSUPP_H
diff --git a/include/linux/regulator/consumer.h b/include/linux/regulator/consumer.h
index df176d7c2b87..39b666b40ea6 100644
--- a/include/linux/regulator/consumer.h
+++ b/include/linux/regulator/consumer.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* consumer.h -- SoC Regulator consumer support.
*
@@ -5,10 +6,6 @@
*
* Author: Liam Girdwood <lrg@slimlogic.co.uk>
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
* Regulator Consumer Interface.
*
* A Power Management Regulator framework for SoC based devices.
@@ -29,17 +26,18 @@
* but this drops rapidly to 60% when below 100mA. Regulator r has > 90%
* efficiency in IDLE mode at loads < 10mA. Thus regulator r will operate
* in normal mode for loads > 10mA and in IDLE mode for load <= 10mA.
- *
*/
#ifndef __LINUX_REGULATOR_CONSUMER_H_
#define __LINUX_REGULATOR_CONSUMER_H_
#include <linux/err.h>
+#include <linux/suspend.h>
struct device;
struct notifier_block;
struct regmap;
+struct regulator_dev;
/*
* Regulator operating modes.
@@ -80,6 +78,7 @@ struct regmap;
* These modes can be OR'ed together to make up a mask of valid register modes.
*/
+#define REGULATOR_MODE_INVALID 0x0
#define REGULATOR_MODE_FAST 0x1
#define REGULATOR_MODE_NORMAL 0x2
#define REGULATOR_MODE_IDLE 0x4
@@ -120,6 +119,16 @@ struct regmap;
#define REGULATOR_EVENT_PRE_DISABLE 0x400
#define REGULATOR_EVENT_ABORT_DISABLE 0x800
#define REGULATOR_EVENT_ENABLE 0x1000
+/*
+ * Following notifications should be emitted only if detected condition
+ * is such that the HW is likely to still be working but consumers should
+ * take a recovery action to prevent problems esacalating into errors.
+ */
+#define REGULATOR_EVENT_UNDER_VOLTAGE_WARN 0x2000
+#define REGULATOR_EVENT_OVER_CURRENT_WARN 0x4000
+#define REGULATOR_EVENT_OVER_VOLTAGE_WARN 0x8000
+#define REGULATOR_EVENT_OVER_TEMP_WARN 0x10000
+#define REGULATOR_EVENT_WARN_MASK 0x1E000
/*
* Regulator errors that can be queried using regulator_get_error_flags
@@ -139,6 +148,10 @@ struct regmap;
#define REGULATOR_ERROR_FAIL BIT(4)
#define REGULATOR_ERROR_OVER_TEMP BIT(5)
+#define REGULATOR_ERROR_UNDER_VOLTAGE_WARN BIT(6)
+#define REGULATOR_ERROR_OVER_CURRENT_WARN BIT(7)
+#define REGULATOR_ERROR_OVER_VOLTAGE_WARN BIT(8)
+#define REGULATOR_ERROR_OVER_TEMP_WARN BIT(9)
/**
* struct pre_voltage_change_data - Data sent with PRE_VOLTAGE_CHANGE event
@@ -158,10 +171,13 @@ struct regulator;
/**
* struct regulator_bulk_data - Data used for bulk regulator operations.
*
- * @supply: The name of the supply. Initialised by the user before
- * using the bulk regulator APIs.
- * @consumer: The regulator consumer for the supply. This will be managed
- * by the bulk API.
+ * @supply: The name of the supply. Initialised by the user before
+ * using the bulk regulator APIs.
+ * @init_load_uA: After getting the regulator, regulator_set_load() will be
+ * called with this load. Initialised by the user before
+ * using the bulk regulator APIs.
+ * @consumer: The regulator consumer for the supply. This will be managed
+ * by the bulk API.
*
* The regulator APIs provide a series of regulator_bulk_() API calls as
* a convenience to consumers which require multiple supplies. This
@@ -169,6 +185,7 @@ struct regulator;
*/
struct regulator_bulk_data {
const char *supply;
+ int init_load_uA;
struct regulator *consumer;
/* private: Internal use */
@@ -190,6 +207,8 @@ struct regulator *__must_check regulator_get_optional(struct device *dev,
const char *id);
struct regulator *__must_check devm_regulator_get_optional(struct device *dev,
const char *id);
+int devm_regulator_get_enable(struct device *dev, const char *id);
+int devm_regulator_get_enable_optional(struct device *dev, const char *id);
void regulator_put(struct regulator *regulator);
void devm_regulator_put(struct regulator *regulator);
@@ -209,17 +228,12 @@ void regulator_bulk_unregister_supply_alias(struct device *dev,
int devm_regulator_register_supply_alias(struct device *dev, const char *id,
struct device *alias_dev,
const char *alias_id);
-void devm_regulator_unregister_supply_alias(struct device *dev,
- const char *id);
int devm_regulator_bulk_register_supply_alias(struct device *dev,
const char *const *id,
struct device *alias_dev,
const char *const *alias_id,
int num_id);
-void devm_regulator_bulk_unregister_supply_alias(struct device *dev,
- const char *const *id,
- int num_id);
/* regulator output control and status */
int __must_check regulator_enable(struct regulator *regulator);
@@ -230,10 +244,21 @@ int regulator_disable_deferred(struct regulator *regulator, int ms);
int __must_check regulator_bulk_get(struct device *dev, int num_consumers,
struct regulator_bulk_data *consumers);
+int __must_check of_regulator_bulk_get_all(struct device *dev, struct device_node *np,
+ struct regulator_bulk_data **consumers);
int __must_check devm_regulator_bulk_get(struct device *dev, int num_consumers,
struct regulator_bulk_data *consumers);
+void devm_regulator_bulk_put(struct regulator_bulk_data *consumers);
+int __must_check devm_regulator_bulk_get_exclusive(struct device *dev, int num_consumers,
+ struct regulator_bulk_data *consumers);
+int __must_check devm_regulator_bulk_get_const(
+ struct device *dev, int num_consumers,
+ const struct regulator_bulk_data *in_consumers,
+ struct regulator_bulk_data **out_consumers);
int __must_check regulator_bulk_enable(int num_consumers,
struct regulator_bulk_data *consumers);
+int devm_regulator_bulk_get_enable(struct device *dev, int num_consumers,
+ const char * const *id);
int regulator_bulk_disable(int num_consumers,
struct regulator_bulk_data *consumers);
int regulator_bulk_force_disable(int num_consumers,
@@ -280,10 +305,26 @@ int regulator_unregister_notifier(struct regulator *regulator,
void devm_regulator_unregister_notifier(struct regulator *regulator,
struct notifier_block *nb);
+/* regulator suspend */
+int regulator_suspend_enable(struct regulator_dev *rdev,
+ suspend_state_t state);
+int regulator_suspend_disable(struct regulator_dev *rdev,
+ suspend_state_t state);
+int regulator_set_suspend_voltage(struct regulator *regulator, int min_uV,
+ int max_uV, suspend_state_t state);
+
/* driver data - core doesn't touch */
void *regulator_get_drvdata(struct regulator *regulator);
void regulator_set_drvdata(struct regulator *regulator, void *data);
+/* misc helpers */
+
+void regulator_bulk_set_supply_names(struct regulator_bulk_data *consumers,
+ const char *const *supply_names,
+ unsigned int num_supplies);
+
+bool regulator_is_equal(struct regulator *reg1, struct regulator *reg2);
+
#else
/*
@@ -317,6 +358,23 @@ regulator_get_exclusive(struct device *dev, const char *id)
}
static inline struct regulator *__must_check
+devm_regulator_get_exclusive(struct device *dev, const char *id)
+{
+ return ERR_PTR(-ENODEV);
+}
+
+static inline int devm_regulator_get_enable(struct device *dev, const char *id)
+{
+ return -ENODEV;
+}
+
+static inline int devm_regulator_get_enable_optional(struct device *dev,
+ const char *id)
+{
+ return -ENODEV;
+}
+
+static inline struct regulator *__must_check
regulator_get_optional(struct device *dev, const char *id)
{
return ERR_PTR(-ENODEV);
@@ -337,6 +395,10 @@ static inline void devm_regulator_put(struct regulator *regulator)
{
}
+static inline void devm_regulator_bulk_put(struct regulator_bulk_data *consumers)
+{
+}
+
static inline int regulator_register_supply_alias(struct device *dev,
const char *id,
struct device *alias_dev,
@@ -373,11 +435,6 @@ static inline int devm_regulator_register_supply_alias(struct device *dev,
return 0;
}
-static inline void devm_regulator_unregister_supply_alias(struct device *dev,
- const char *id)
-{
-}
-
static inline int devm_regulator_bulk_register_supply_alias(struct device *dev,
const char *const *id,
struct device *alias_dev,
@@ -387,11 +444,6 @@ static inline int devm_regulator_bulk_register_supply_alias(struct device *dev,
return 0;
}
-static inline void devm_regulator_bulk_unregister_supply_alias(
- struct device *dev, const char *const *id, int num_id)
-{
-}
-
static inline int regulator_enable(struct regulator *regulator)
{
return 0;
@@ -431,12 +483,25 @@ static inline int devm_regulator_bulk_get(struct device *dev, int num_consumers,
return 0;
}
+static inline int of_regulator_bulk_get_all(struct device *dev, struct device_node *np,
+ struct regulator_bulk_data **consumers)
+{
+ return 0;
+}
+
static inline int regulator_bulk_enable(int num_consumers,
struct regulator_bulk_data *consumers)
{
return 0;
}
+static inline int devm_regulator_bulk_get_enable(struct device *dev,
+ int num_consumers,
+ const char * const *id)
+{
+ return 0;
+}
+
static inline int regulator_bulk_disable(int num_consumers,
struct regulator_bulk_data *consumers)
{
@@ -471,12 +536,22 @@ static inline int regulator_get_voltage(struct regulator *regulator)
return -EINVAL;
}
+static inline int regulator_sync_voltage(struct regulator *regulator)
+{
+ return -EINVAL;
+}
+
static inline int regulator_is_supported_voltage(struct regulator *regulator,
int min_uV, int max_uV)
{
return 0;
}
+static inline unsigned int regulator_get_linear_step(struct regulator *regulator)
+{
+ return 0;
+}
+
static inline int regulator_set_current_limit(struct regulator *regulator,
int min_uA, int max_uA)
{
@@ -507,7 +582,7 @@ static inline int regulator_get_error_flags(struct regulator *regulator,
static inline int regulator_set_load(struct regulator *regulator, int load_uA)
{
- return REGULATOR_MODE_NORMAL;
+ return 0;
}
static inline int regulator_allow_bypass(struct regulator *regulator,
@@ -558,6 +633,25 @@ static inline int devm_regulator_unregister_notifier(struct regulator *regulator
return 0;
}
+static inline int regulator_suspend_enable(struct regulator_dev *rdev,
+ suspend_state_t state)
+{
+ return -EINVAL;
+}
+
+static inline int regulator_suspend_disable(struct regulator_dev *rdev,
+ suspend_state_t state)
+{
+ return -EINVAL;
+}
+
+static inline int regulator_set_suspend_voltage(struct regulator *regulator,
+ int min_uV, int max_uV,
+ suspend_state_t state)
+{
+ return -EINVAL;
+}
+
static inline void *regulator_get_drvdata(struct regulator *regulator)
{
return NULL;
@@ -578,6 +672,18 @@ static inline int regulator_list_voltage(struct regulator *regulator, unsigned s
return -EINVAL;
}
+static inline void
+regulator_bulk_set_supply_names(struct regulator_bulk_data *consumers,
+ const char *const *supply_names,
+ unsigned int num_supplies)
+{
+}
+
+static inline bool
+regulator_is_equal(struct regulator *reg1, struct regulator *reg2)
+{
+ return false;
+}
#endif
static inline int regulator_set_voltage_triplet(struct regulator *regulator,
diff --git a/include/linux/regulator/coupler.h b/include/linux/regulator/coupler.h
new file mode 100644
index 000000000000..73291f280a23
--- /dev/null
+++ b/include/linux/regulator/coupler.h
@@ -0,0 +1,100 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * coupler.h -- SoC Regulator support, coupler API.
+ *
+ * Regulator Coupler Interface.
+ */
+
+#ifndef __LINUX_REGULATOR_COUPLER_H_
+#define __LINUX_REGULATOR_COUPLER_H_
+
+#include <linux/kernel.h>
+#include <linux/suspend.h>
+
+struct regulator_coupler;
+struct regulator_dev;
+
+/**
+ * struct regulator_coupler - customized regulator's coupler
+ *
+ * Regulator's coupler allows to customize coupling algorithm.
+ *
+ * @list: couplers list entry
+ * @attach_regulator: Callback invoked on creation of a coupled regulator,
+ * couples are unresolved at this point. The callee should
+ * check that it could handle the regulator and return 0 on
+ * success, -errno on failure and 1 if given regulator is
+ * not suitable for this coupler (case of having multiple
+ * regulators in a system). Callback shall be implemented.
+ * @detach_regulator: Callback invoked on destruction of a coupled regulator.
+ * This callback is optional and could be NULL.
+ * @balance_voltage: Callback invoked when voltage of a coupled regulator is
+ * changing. Called with all of the coupled rdev's being held
+ * under "consumer lock". The callee should perform voltage
+ * balancing, changing voltage of the coupled regulators as
+ * needed. It's up to the coupler to verify the voltage
+ * before changing it in hardware, i.e. coupler should
+ * check consumer's min/max and etc. This callback is
+ * optional and could be NULL, in which case a generic
+ * voltage balancer will be used.
+ */
+struct regulator_coupler {
+ struct list_head list;
+
+ int (*attach_regulator)(struct regulator_coupler *coupler,
+ struct regulator_dev *rdev);
+ int (*detach_regulator)(struct regulator_coupler *coupler,
+ struct regulator_dev *rdev);
+ int (*balance_voltage)(struct regulator_coupler *coupler,
+ struct regulator_dev *rdev,
+ suspend_state_t state);
+};
+
+#ifdef CONFIG_REGULATOR
+int regulator_coupler_register(struct regulator_coupler *coupler);
+int regulator_check_consumers(struct regulator_dev *rdev,
+ int *min_uV, int *max_uV,
+ suspend_state_t state);
+int regulator_check_voltage(struct regulator_dev *rdev,
+ int *min_uV, int *max_uV);
+int regulator_get_voltage_rdev(struct regulator_dev *rdev);
+int regulator_set_voltage_rdev(struct regulator_dev *rdev,
+ int min_uV, int max_uV,
+ suspend_state_t state);
+int regulator_do_balance_voltage(struct regulator_dev *rdev,
+ suspend_state_t state, bool skip_coupled);
+#else
+static inline int regulator_coupler_register(struct regulator_coupler *coupler)
+{
+ return 0;
+}
+static inline int regulator_check_consumers(struct regulator_dev *rdev,
+ int *min_uV, int *max_uV,
+ suspend_state_t state)
+{
+ return -EINVAL;
+}
+static inline int regulator_check_voltage(struct regulator_dev *rdev,
+ int *min_uV, int *max_uV)
+{
+ return -EINVAL;
+}
+static inline int regulator_get_voltage_rdev(struct regulator_dev *rdev)
+{
+ return -EINVAL;
+}
+static inline int regulator_set_voltage_rdev(struct regulator_dev *rdev,
+ int min_uV, int max_uV,
+ suspend_state_t state)
+{
+ return -EINVAL;
+}
+static inline int regulator_do_balance_voltage(struct regulator_dev *rdev,
+ suspend_state_t state,
+ bool skip_coupled)
+{
+ return -EINVAL;
+}
+#endif
+
+#endif
diff --git a/include/linux/regulator/da9121.h b/include/linux/regulator/da9121.h
new file mode 100644
index 000000000000..62d9d257dc25
--- /dev/null
+++ b/include/linux/regulator/da9121.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * DA9121 Single-channel dual-phase 10A buck converter
+ * DA9130 Single-channel dual-phase 10A buck converter (Automotive)
+ * DA9217 Single-channel dual-phase 6A buck converter
+ * DA9122 Dual-channel single-phase 5A buck converter
+ * DA9131 Dual-channel single-phase 5A buck converter (Automotive)
+ * DA9220 Dual-channel single-phase 3A buck converter
+ * DA9132 Dual-channel single-phase 3A buck converter (Automotive)
+ *
+ * Copyright (C) 2020 Dialog Semiconductor
+ *
+ * Authors: Adam Ward, Dialog Semiconductor
+ */
+
+#ifndef __LINUX_REGULATOR_DA9121_H
+#define __LINUX_REGULATOR_DA9121_H
+
+#include <linux/regulator/machine.h>
+
+struct gpio_desc;
+
+enum {
+ DA9121_IDX_BUCK1,
+ DA9121_IDX_BUCK2,
+ DA9121_IDX_MAX
+};
+
+struct da9121_pdata {
+ int num_buck;
+ struct gpio_desc *gpiod_ren[DA9121_IDX_MAX];
+ struct device_node *reg_node[DA9121_IDX_MAX];
+ struct regulator_init_data *init_data[DA9121_IDX_MAX];
+};
+
+#endif
diff --git a/include/linux/regulator/da9211.h b/include/linux/regulator/da9211.h
index 80cb40b7c88d..0d3c0f0ebc1a 100644
--- a/include/linux/regulator/da9211.h
+++ b/include/linux/regulator/da9211.h
@@ -1,17 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* da9211.h - Regulator device driver for DA9211/DA9212
- * /DA9213/DA9214/DA9215
+ * /DA9213/DA9223/DA9214/DA9224/DA9215/DA9225
* Copyright (C) 2015 Dialog Semiconductor Ltd.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version 2
- * of the License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#ifndef __LINUX_REGULATOR_DA9211_H
@@ -21,12 +12,17 @@
#define DA9211_MAX_REGULATORS 2
+struct gpio_desc;
+
enum da9211_chip_id {
DA9211,
DA9212,
DA9213,
+ DA9223,
DA9214,
+ DA9224,
DA9215,
+ DA9225,
};
struct da9211_pdata {
@@ -36,7 +32,7 @@ struct da9211_pdata {
* 2 : 2 phase 2 buck
*/
int num_buck;
- int gpio_ren[DA9211_MAX_REGULATORS];
+ struct gpio_desc *gpiod_ren[DA9211_MAX_REGULATORS];
struct device_node *reg_node[DA9211_MAX_REGULATORS];
struct regulator_init_data *init_data[DA9211_MAX_REGULATORS];
};
diff --git a/include/linux/regulator/db8500-prcmu.h b/include/linux/regulator/db8500-prcmu.h
index 612062313b68..f90df9ee703e 100644
--- a/include/linux/regulator/db8500-prcmu.h
+++ b/include/linux/regulator/db8500-prcmu.h
@@ -1,8 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) ST-Ericsson SA 2010
*
- * License Terms: GNU General Public License v2
- *
* Author: Bengt Jonsson <bengt.g.jonsson@stericsson.com> for ST-Ericsson
*
* Interface to power domain regulators on DB8500
diff --git a/include/linux/regulator/driver.h b/include/linux/regulator/driver.h
index 94417b4226bd..d3b4a3d4514a 100644
--- a/include/linux/regulator/driver.h
+++ b/include/linux/regulator/driver.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* driver.h -- SoC Regulator driver support.
*
@@ -5,10 +6,6 @@
*
* Author: Liam Girdwood <lrg@slimlogic.co.uk>
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
* Regulator Driver Interface.
*/
@@ -16,9 +13,12 @@
#define __LINUX_REGULATOR_DRIVER_H_
#include <linux/device.h>
+#include <linux/linear_range.h>
#include <linux/notifier.h>
#include <linux/regulator/consumer.h>
+#include <linux/ww_mutex.h>
+struct gpio_desc;
struct regmap;
struct regulator_dev;
struct regulator_config;
@@ -40,31 +40,22 @@ enum regulator_status {
REGULATOR_STATUS_UNDEFINED,
};
-/**
- * struct regulator_linear_range - specify linear voltage ranges
- *
- * Specify a range of voltages for regulator_map_linar_range() and
- * regulator_list_linear_range().
- *
- * @min_uV: Lowest voltage in range
- * @min_sel: Lowest selector for range
- * @max_sel: Highest selector for range
- * @uV_step: Step size
- */
-struct regulator_linear_range {
- unsigned int min_uV;
- unsigned int min_sel;
- unsigned int max_sel;
- unsigned int uV_step;
+enum regulator_detection_severity {
+ /* Hardware shut down voltage outputs if condition is detected */
+ REGULATOR_SEVERITY_PROT,
+ /* Hardware is probably damaged/inoperable */
+ REGULATOR_SEVERITY_ERR,
+ /* Hardware is still recoverable but recovery action must be taken */
+ REGULATOR_SEVERITY_WARN,
};
-/* Initialize struct regulator_linear_range */
+/* Initialize struct linear_range for regulators */
#define REGULATOR_LINEAR_RANGE(_min_uV, _min_sel, _max_sel, _step_uV) \
{ \
- .min_uV = _min_uV, \
+ .min = _min_uV, \
.min_sel = _min_sel, \
.max_sel = _max_sel, \
- .uV_step = _step_uV, \
+ .step = _step_uV, \
}
/**
@@ -80,9 +71,12 @@ struct regulator_linear_range {
* @set_voltage_sel: Set the voltage for the regulator using the specified
* selector.
* @map_voltage: Convert a voltage into a selector
- * @get_voltage: Return the currently configured voltage for the regulator.
+ * @get_voltage: Return the currently configured voltage for the regulator;
+ * return -ENOTRECOVERABLE if regulator can't be read at
+ * bootup and hasn't been set yet.
* @get_voltage_sel: Return the currently configured voltage selector for the
- * regulator.
+ * regulator; return -ENOTRECOVERABLE if regulator can't
+ * be read at bootup and hasn't been set yet.
* @list_voltage: Return one of the supported voltages, in microvolts; zero
* if the selector indicates a voltage that is unusable on this system;
* or negative errno. Selectors range from zero to one less than
@@ -93,8 +87,31 @@ struct regulator_linear_range {
* @get_current_limit: Get the configured limit for a current-limited regulator.
* @set_input_current_limit: Configure an input limit.
*
- * @set_over_current_protection: Support capability of automatically shutting
- * down when detecting an over current event.
+ * @set_over_current_protection: Support enabling of and setting limits for over
+ * current situation detection. Detection can be configured for three
+ * levels of severity.
+ *
+ * - REGULATOR_SEVERITY_PROT should automatically shut down the regulator(s).
+ *
+ * - REGULATOR_SEVERITY_ERR should indicate that over-current situation is
+ * caused by an unrecoverable error but HW does not perform
+ * automatic shut down.
+ *
+ * - REGULATOR_SEVERITY_WARN should indicate situation where hardware is
+ * still believed to not be damaged but that a board sepcific
+ * recovery action is needed. If lim_uA is 0 the limit should not
+ * be changed but the detection should just be enabled/disabled as
+ * is requested.
+ *
+ * @set_over_voltage_protection: Support enabling of and setting limits for over
+ * voltage situation detection. Detection can be configured for same
+ * severities as over current protection. Units of uV.
+ * @set_under_voltage_protection: Support enabling of and setting limits for
+ * under voltage situation detection. Detection can be configured for same
+ * severities as over current protection. Units of uV.
+ * @set_thermal_protection: Support enabling of and setting limits for over
+ * temperature situation detection.Detection can be configured for same
+ * severities as over current protection. Units of degree Kelvin.
*
* @set_active_discharge: Set active discharge enable/disable of regulators.
*
@@ -132,7 +149,7 @@ struct regulator_linear_range {
* suspended.
* @set_suspend_mode: Set the operating mode for the regulator when the
* system is suspended.
- *
+ * @resume: Resume operation of suspended regulator.
* @set_pull_down: Configure the regulator to pull down when the regulator
* is disabled.
*
@@ -158,8 +175,15 @@ struct regulator_ops {
int (*get_current_limit) (struct regulator_dev *);
int (*set_input_current_limit) (struct regulator_dev *, int lim_uA);
- int (*set_over_current_protection) (struct regulator_dev *);
- int (*set_active_discharge) (struct regulator_dev *, bool enable);
+ int (*set_over_current_protection)(struct regulator_dev *, int lim_uA,
+ int severity, bool enable);
+ int (*set_over_voltage_protection)(struct regulator_dev *, int lim_uV,
+ int severity, bool enable);
+ int (*set_under_voltage_protection)(struct regulator_dev *, int lim_uV,
+ int severity, bool enable);
+ int (*set_thermal_protection)(struct regulator_dev *, int lim,
+ int severity, bool enable);
+ int (*set_active_discharge)(struct regulator_dev *, bool enable);
/* enable/disable regulator */
int (*enable) (struct regulator_dev *);
@@ -214,6 +238,8 @@ struct regulator_ops {
/* set regulator suspend operating mode (defined in consumer.h) */
int (*set_suspend_mode) (struct regulator_dev *, unsigned int mode);
+ int (*resume)(struct regulator_dev *rdev);
+
int (*set_pull_down) (struct regulator_dev *);
};
@@ -236,6 +262,8 @@ enum regulator_type {
* @name: Identifying name for the regulator.
* @supply_name: Identifying the regulator supply
* @of_match: Name used to identify regulator in DT.
+ * @of_match_full_name: A flag to indicate that the of_match string, if
+ * present, should be matched against the node full_name.
* @regulators_node: Name of node containing regulator definitions in DT.
* @of_parse_cb: Optional callback called only if of_match is present.
* Will be called for each regulator parsed from DT, during
@@ -255,6 +283,7 @@ enum regulator_type {
* @continuous_voltage_range: Indicates if the regulator can set any
* voltage within constrains range.
* @n_voltages: Number of selectors available for ops.list_voltage().
+ * @n_current_limits: Number of selectors available for current limits
*
* @min_uV: Voltage given by the lowest selector (if linear mapping)
* @uV_step: Voltage increase with each selector (if linear mapping)
@@ -263,13 +292,26 @@ enum regulator_type {
* @ramp_delay: Time to settle down after voltage change (unit: uV/us)
* @min_dropout_uV: The minimum dropout voltage this regulator can handle
* @linear_ranges: A constant table of possible voltage ranges.
- * @n_linear_ranges: Number of entries in the @linear_ranges table.
+ * @linear_range_selectors: A constant table of voltage range selectors.
+ * If pickable ranges are used each range must
+ * have corresponding selector here.
+ * @n_linear_ranges: Number of entries in the @linear_ranges (and in
+ * linear_range_selectors if used) table(s).
* @volt_table: Voltage mapping table (if table based mapping)
+ * @curr_table: Current limit mapping table (if table based mapping)
*
- * @vsel_reg: Register for selector when using regulator_regmap_X_voltage_
+ * @vsel_range_reg: Register for range selector when using pickable ranges
+ * and ``regulator_map_*_voltage_*_pickable`` functions.
+ * @vsel_range_mask: Mask for register bitfield used for range selector
+ * @vsel_reg: Register for selector when using ``regulator_map_*_voltage_*``
* @vsel_mask: Mask for register bitfield used for selector
- * @csel_reg: Register for TPS65218 LS3 current regulator
- * @csel_mask: Mask for TPS65218 LS3 current regulator
+ * @vsel_step: Specify the resolution of selector stepping when setting
+ * voltage. If 0, then no stepping is done (requested selector is
+ * set directly), if >0 then the regulator API will ramp the
+ * voltage up/down gradually each time increasing/decreasing the
+ * selector by the specified step value.
+ * @csel_reg: Register for current limit selector using regmap set_current_limit
+ * @csel_mask: Mask for register bitfield used for current limit selector
* @apply_reg: Register for initiate voltage change on the output when
* using regulator_set_voltage_sel_regmap
* @apply_bit: Register bitfield used for initiate voltage change on the
@@ -301,15 +343,26 @@ enum regulator_type {
* @pull_down_val_on: Enabling value for control when using regmap
* set_pull_down
*
+ * @ramp_reg: Register for controlling the regulator ramp-rate.
+ * @ramp_mask: Bitmask for the ramp-rate control register.
+ * @ramp_delay_table: Table for mapping the regulator ramp-rate values. Values
+ * should be given in units of V/S (uV/uS). See the
+ * regulator_set_ramp_delay_regmap().
+ * @n_ramp_values: number of elements at @ramp_delay_table.
+ *
* @enable_time: Time taken for initial enable of regulator (in uS).
* @off_on_delay: guard time (in uS), before re-enabling a regulator
*
+ * @poll_enabled_time: The polling interval (in uS) to use while checking that
+ * the regulator was actually enabled. Max upto enable_time.
+ *
* @of_map_mode: Maps a hardware mode defined in a DeviceTree to a standard mode
*/
struct regulator_desc {
const char *name;
const char *supply_name;
const char *of_match;
+ bool of_match_full_name;
const char *regulators_node;
int (*of_parse_cb)(struct device_node *,
const struct regulator_desc *,
@@ -317,6 +370,7 @@ struct regulator_desc {
int id;
unsigned int continuous_voltage_range:1;
unsigned n_voltages;
+ unsigned int n_current_limits;
const struct regulator_ops *ops;
int irq;
enum regulator_type type;
@@ -329,13 +383,19 @@ struct regulator_desc {
unsigned int ramp_delay;
int min_dropout_uV;
- const struct regulator_linear_range *linear_ranges;
+ const struct linear_range *linear_ranges;
+ const unsigned int *linear_range_selectors;
+
int n_linear_ranges;
const unsigned int *volt_table;
+ const unsigned int *curr_table;
+ unsigned int vsel_range_reg;
+ unsigned int vsel_range_mask;
unsigned int vsel_reg;
unsigned int vsel_mask;
+ unsigned int vsel_step;
unsigned int csel_reg;
unsigned int csel_mask;
unsigned int apply_reg;
@@ -359,11 +419,17 @@ struct regulator_desc {
unsigned int pull_down_reg;
unsigned int pull_down_mask;
unsigned int pull_down_val_on;
+ unsigned int ramp_reg;
+ unsigned int ramp_mask;
+ const unsigned int *ramp_delay_table;
+ unsigned int n_ramp_values;
unsigned int enable_time;
unsigned int off_on_delay;
+ unsigned int poll_enabled_time;
+
unsigned int (*of_map_mode)(unsigned int mode);
};
@@ -381,12 +447,7 @@ struct regulator_desc {
* NULL).
* @regmap: regmap to use for core regmap helpers if dev_get_regmap() is
* insufficient.
- * @ena_gpio_initialized: GPIO controlling regulator enable was properly
- * initialized, meaning that >= 0 is a valid gpio
- * identifier and < 0 is a non existent gpio.
- * @ena_gpio: GPIO controlling regulator enable.
- * @ena_gpio_invert: Sense for GPIO enable control.
- * @ena_gpio_flags: Flags to use when calling gpio_request_one()
+ * @ena_gpiod: GPIO controlling regulator enable.
*/
struct regulator_config {
struct device *dev;
@@ -395,10 +456,146 @@ struct regulator_config {
struct device_node *of_node;
struct regmap *regmap;
- bool ena_gpio_initialized;
- int ena_gpio;
- unsigned int ena_gpio_invert:1;
- unsigned int ena_gpio_flags;
+ struct gpio_desc *ena_gpiod;
+};
+
+/**
+ * struct regulator_err_state - regulator error/notification status
+ *
+ * @rdev: Regulator which status the struct indicates.
+ * @notifs: Events which have occurred on the regulator.
+ * @errors: Errors which are active on the regulator.
+ * @possible_errs: Errors which can be signaled (by given IRQ).
+ */
+struct regulator_err_state {
+ struct regulator_dev *rdev;
+ unsigned long notifs;
+ unsigned long errors;
+ int possible_errs;
+};
+
+/**
+ * struct regulator_irq_data - regulator error/notification status data
+ *
+ * @states: Status structs for each of the associated regulators.
+ * @num_states: Amount of associated regulators.
+ * @data: Driver data pointer given at regulator_irq_desc.
+ * @opaque: Value storage for IC driver. Core does not update this. ICs
+ * may want to store status register value here at map_event and
+ * compare contents at 'renable' callback to see if new problems
+ * have been added to status. If that is the case it may be
+ * desirable to return REGULATOR_ERROR_CLEARED and not
+ * REGULATOR_ERROR_ON to allow IRQ fire again and to generate
+ * notifications also for the new issues.
+ *
+ * This structure is passed to 'map_event' and 'renable' callbacks for
+ * reporting regulator status to core.
+ */
+struct regulator_irq_data {
+ struct regulator_err_state *states;
+ int num_states;
+ void *data;
+ long opaque;
+};
+
+/**
+ * struct regulator_irq_desc - notification sender for IRQ based events.
+ *
+ * @name: The visible name for the IRQ
+ * @fatal_cnt: If this IRQ is used to signal HW damaging condition it may be
+ * best to shut-down regulator(s) or reboot the SOC if error
+ * handling is repeatedly failing. If fatal_cnt is given the IRQ
+ * handling is aborted if it fails for fatal_cnt times and die()
+ * callback (if populated) is called. If die() is not populated
+ * poweroff for the system is attempted in order to prevent any
+ * further damage.
+ * @reread_ms: The time which is waited before attempting to re-read status
+ * at the worker if IC reading fails. Immediate re-read is done
+ * if time is not specified.
+ * @irq_off_ms: The time which IRQ is kept disabled before re-evaluating the
+ * status for devices which keep IRQ disabled for duration of the
+ * error. If this is not given the IRQ is left enabled and renable
+ * is not called.
+ * @skip_off: If set to true the IRQ handler will attempt to check if any of
+ * the associated regulators are enabled prior to taking other
+ * actions. If no regulators are enabled and this is set to true
+ * a spurious IRQ is assumed and IRQ_NONE is returned.
+ * @high_prio: Boolean to indicate that high priority WQ should be used.
+ * @data: Driver private data pointer which will be passed as such to
+ * the renable, map_event and die callbacks in regulator_irq_data.
+ * @die: Protection callback. If IC status reading or recovery actions
+ * fail fatal_cnt times this callback is called or system is
+ * powered off. This callback should implement a final protection
+ * attempt like disabling the regulator. If protection succeeded
+ * die() may return 0. If anything else is returned the core
+ * assumes final protection failed and attempts to perform a
+ * poweroff as a last resort.
+ * @map_event: Driver callback to map IRQ status into regulator devices with
+ * events / errors. NOTE: callback MUST initialize both the
+ * errors and notifs for all rdevs which it signals having
+ * active events as core does not clean the map data.
+ * REGULATOR_FAILED_RETRY can be returned to indicate that the
+ * status reading from IC failed. If this is repeated for
+ * fatal_cnt times the core will call die() callback or power-off
+ * the system as a last resort to protect the HW.
+ * @renable: Optional callback to check status (if HW supports that) before
+ * re-enabling IRQ. If implemented this should clear the error
+ * flags so that errors fetched by regulator_get_error_flags()
+ * are updated. If callback is not implemented then errors are
+ * assumed to be cleared and IRQ is re-enabled.
+ * REGULATOR_FAILED_RETRY can be returned to
+ * indicate that the status reading from IC failed. If this is
+ * repeated for 'fatal_cnt' times the core will call die()
+ * callback or if die() is not populated then attempt to power-off
+ * the system as a last resort to protect the HW.
+ * Returning zero indicates that the problem in HW has been solved
+ * and IRQ will be re-enabled. Returning REGULATOR_ERROR_ON
+ * indicates the error condition is still active and keeps IRQ
+ * disabled. Please note that returning REGULATOR_ERROR_ON does
+ * not retrigger evaluating what events are active or resending
+ * notifications. If this is needed you probably want to return
+ * zero and allow IRQ to retrigger causing events to be
+ * re-evaluated and re-sent.
+ *
+ * This structure is used for registering regulator IRQ notification helper.
+ */
+struct regulator_irq_desc {
+ const char *name;
+ int fatal_cnt;
+ int reread_ms;
+ int irq_off_ms;
+ bool skip_off;
+ bool high_prio;
+ void *data;
+
+ int (*die)(struct regulator_irq_data *rid);
+ int (*map_event)(int irq, struct regulator_irq_data *rid,
+ unsigned long *dev_mask);
+ int (*renable)(struct regulator_irq_data *rid);
+};
+
+/*
+ * Return values for regulator IRQ helpers.
+ */
+enum {
+ REGULATOR_ERROR_CLEARED,
+ REGULATOR_FAILED_RETRY,
+ REGULATOR_ERROR_ON,
+};
+
+/*
+ * struct coupling_desc
+ *
+ * Describes coupling of regulators. Each regulator should have
+ * at least a pointer to itself in coupled_rdevs array.
+ * When a new coupled regulator is resolved, n_resolved is
+ * incremented.
+ */
+struct coupling_desc {
+ struct regulator_dev **coupled_rdevs;
+ struct regulator_coupler *coupler;
+ int n_resolved;
+ int n_coupled;
};
/*
@@ -424,8 +621,12 @@ struct regulator_dev {
/* lists we own */
struct list_head consumer_list; /* consumers we supply */
+ struct coupling_desc coupling_desc;
+
struct blocking_notifier_head notifier;
- struct mutex mutex; /* consumer lock */
+ struct ww_mutex mutex; /* consumer lock */
+ struct task_struct *mutex_owner;
+ int ref_cnt;
struct module *owner;
struct device dev;
struct regulation_constraints *constraints;
@@ -434,7 +635,6 @@ struct regulator_dev {
struct regmap *regmap;
struct delayed_work disable_work;
- int deferred_disables;
void *reg_data; /* regulator_dev data */
@@ -446,42 +646,99 @@ struct regulator_dev {
unsigned int is_switch:1;
/* time when this regulator was disabled last time */
- unsigned long last_off_jiffy;
+ ktime_t last_off;
+ int cached_err;
+ bool use_cached_err;
+ spinlock_t err_lock;
};
+/*
+ * Convert error flags to corresponding notifications.
+ *
+ * Can be used by drivers which use the notification helpers to
+ * find out correct notification flags based on the error flags. Drivers
+ * can avoid storing both supported notification and error flags which
+ * may save few bytes.
+ */
+static inline int regulator_err2notif(int err)
+{
+ switch (err) {
+ case REGULATOR_ERROR_UNDER_VOLTAGE:
+ return REGULATOR_EVENT_UNDER_VOLTAGE;
+ case REGULATOR_ERROR_OVER_CURRENT:
+ return REGULATOR_EVENT_OVER_CURRENT;
+ case REGULATOR_ERROR_REGULATION_OUT:
+ return REGULATOR_EVENT_REGULATION_OUT;
+ case REGULATOR_ERROR_FAIL:
+ return REGULATOR_EVENT_FAIL;
+ case REGULATOR_ERROR_OVER_TEMP:
+ return REGULATOR_EVENT_OVER_TEMP;
+ case REGULATOR_ERROR_UNDER_VOLTAGE_WARN:
+ return REGULATOR_EVENT_UNDER_VOLTAGE_WARN;
+ case REGULATOR_ERROR_OVER_CURRENT_WARN:
+ return REGULATOR_EVENT_OVER_CURRENT_WARN;
+ case REGULATOR_ERROR_OVER_VOLTAGE_WARN:
+ return REGULATOR_EVENT_OVER_VOLTAGE_WARN;
+ case REGULATOR_ERROR_OVER_TEMP_WARN:
+ return REGULATOR_EVENT_OVER_TEMP_WARN;
+ }
+ return 0;
+}
+
+
struct regulator_dev *
-regulator_register(const struct regulator_desc *regulator_desc,
+regulator_register(struct device *dev,
+ const struct regulator_desc *regulator_desc,
const struct regulator_config *config);
struct regulator_dev *
devm_regulator_register(struct device *dev,
const struct regulator_desc *regulator_desc,
const struct regulator_config *config);
void regulator_unregister(struct regulator_dev *rdev);
-void devm_regulator_unregister(struct device *dev, struct regulator_dev *rdev);
int regulator_notifier_call_chain(struct regulator_dev *rdev,
unsigned long event, void *data);
+void *devm_regulator_irq_helper(struct device *dev,
+ const struct regulator_irq_desc *d, int irq,
+ int irq_flags, int common_errs,
+ int *per_rdev_errs, struct regulator_dev **rdev,
+ int rdev_amount);
+void *regulator_irq_helper(struct device *dev,
+ const struct regulator_irq_desc *d, int irq,
+ int irq_flags, int common_errs, int *per_rdev_errs,
+ struct regulator_dev **rdev, int rdev_amount);
+void regulator_irq_helper_cancel(void **handle);
+int regulator_irq_map_event_simple(int irq, struct regulator_irq_data *rid,
+ unsigned long *dev_mask);
void *rdev_get_drvdata(struct regulator_dev *rdev);
struct device *rdev_get_dev(struct regulator_dev *rdev);
+struct regmap *rdev_get_regmap(struct regulator_dev *rdev);
int rdev_get_id(struct regulator_dev *rdev);
int regulator_mode_to_status(unsigned int);
int regulator_list_voltage_linear(struct regulator_dev *rdev,
unsigned int selector);
+int regulator_list_voltage_pickable_linear_range(struct regulator_dev *rdev,
+ unsigned int selector);
int regulator_list_voltage_linear_range(struct regulator_dev *rdev,
unsigned int selector);
int regulator_list_voltage_table(struct regulator_dev *rdev,
unsigned int selector);
int regulator_map_voltage_linear(struct regulator_dev *rdev,
int min_uV, int max_uV);
+int regulator_map_voltage_pickable_linear_range(struct regulator_dev *rdev,
+ int min_uV, int max_uV);
int regulator_map_voltage_linear_range(struct regulator_dev *rdev,
int min_uV, int max_uV);
int regulator_map_voltage_iterate(struct regulator_dev *rdev,
int min_uV, int max_uV);
int regulator_map_voltage_ascend(struct regulator_dev *rdev,
int min_uV, int max_uV);
+int regulator_get_voltage_sel_pickable_regmap(struct regulator_dev *rdev);
+int regulator_set_voltage_sel_pickable_regmap(struct regulator_dev *rdev,
+ unsigned int sel);
int regulator_get_voltage_sel_regmap(struct regulator_dev *rdev);
int regulator_set_voltage_sel_regmap(struct regulator_dev *rdev, unsigned sel);
int regulator_is_enabled_regmap(struct regulator_dev *rdev);
@@ -497,6 +754,30 @@ int regulator_set_pull_down_regmap(struct regulator_dev *rdev);
int regulator_set_active_discharge_regmap(struct regulator_dev *rdev,
bool enable);
+int regulator_set_current_limit_regmap(struct regulator_dev *rdev,
+ int min_uA, int max_uA);
+int regulator_get_current_limit_regmap(struct regulator_dev *rdev);
void *regulator_get_init_drvdata(struct regulator_init_data *reg_init_data);
+int regulator_set_ramp_delay_regmap(struct regulator_dev *rdev, int ramp_delay);
+int regulator_sync_voltage_rdev(struct regulator_dev *rdev);
+
+/*
+ * Helper functions intended to be used by regulator drivers prior registering
+ * their regulators.
+ */
+int regulator_desc_list_voltage_linear_range(const struct regulator_desc *desc,
+ unsigned int selector);
+
+int regulator_desc_list_voltage_linear(const struct regulator_desc *desc,
+ unsigned int selector);
+
+#ifdef CONFIG_REGULATOR
+const char *rdev_get_name(struct regulator_dev *rdev);
+#else
+static inline const char *rdev_get_name(struct regulator_dev *rdev)
+{
+ return NULL;
+}
+#endif
#endif
diff --git a/include/linux/regulator/fan53555.h b/include/linux/regulator/fan53555.h
index f13880e84d85..ce8df21863f0 100644
--- a/include/linux/regulator/fan53555.h
+++ b/include/linux/regulator/fan53555.h
@@ -1,13 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* fan53555.h - Fairchild Regulator FAN53555 Driver
*
* Copyright (C) 2012 Marvell Technology Ltd.
* Yunfan Zhang <yfzhang@marvell.com>
- *
- * This package is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
*/
#ifndef __FAN53555_H__
diff --git a/include/linux/regulator/fixed.h b/include/linux/regulator/fixed.h
index 48918be649d4..55319943fcc5 100644
--- a/include/linux/regulator/fixed.h
+++ b/include/linux/regulator/fixed.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* fixed.h
*
@@ -7,11 +8,6 @@
*
* Copyright (c) 2009 Nokia Corporation
* Roger Quadros <ext-roger.quadros@nokia.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation; either version 2 of the
- * License, or (at your option) any later version.
*/
#ifndef __REGULATOR_FIXED_H
@@ -24,17 +20,7 @@ struct regulator_init_data;
* @supply_name: Name of the regulator supply
* @input_supply: Name of the input regulator supply
* @microvolts: Output voltage of regulator
- * @gpio: GPIO to use for enable control
- * set to -EINVAL if not used
* @startup_delay: Start-up time in microseconds
- * @gpio_is_open_drain: Gpio pin is open drain or normal type.
- * If it is open drain type then HIGH will be set
- * through PULL-UP with setting gpio as input
- * and low will be set as gpio-output with driven
- * to low. For non-open-drain case, the gpio will
- * will be in output and drive to low/high accordingly.
- * @enable_high: Polarity of enable GPIO
- * 1 = Active high, 0 = Active low
* @enabled_at_boot: Whether regulator has been enabled at
* boot or not. 1 = Yes, 0 = No
* This is used to keep the regulator at
@@ -49,10 +35,8 @@ struct fixed_voltage_config {
const char *supply_name;
const char *input_supply;
int microvolts;
- int gpio;
unsigned startup_delay;
- unsigned gpio_is_open_drain:1;
- unsigned enable_high:1;
+ unsigned int off_on_delay;
unsigned enabled_at_boot:1;
struct regulator_init_data *init_data;
};
diff --git a/include/linux/regulator/gpio-regulator.h b/include/linux/regulator/gpio-regulator.h
index 19fbd267406d..c223e50ff9f7 100644
--- a/include/linux/regulator/gpio-regulator.h
+++ b/include/linux/regulator/gpio-regulator.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* gpio-regulator.h
*
@@ -11,16 +12,13 @@
*
* Copyright (c) 2009 Nokia Corporation
* Roger Quadros <ext-roger.quadros@nokia.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation; either version 2 of the
- * License, or (at your option) any later version.
*/
#ifndef __REGULATOR_GPIO_H
#define __REGULATOR_GPIO_H
+#include <linux/gpio/consumer.h>
+
struct regulator_init_data;
enum regulator_type;
@@ -44,18 +42,15 @@ struct gpio_regulator_state {
/**
* struct gpio_regulator_config - config structure
* @supply_name: Name of the regulator supply
- * @enable_gpio: GPIO to use for enable control
- * set to -EINVAL if not used
- * @enable_high: Polarity of enable GPIO
- * 1 = Active high, 0 = Active low
+ * @input_supply: Name of the input regulator supply
* @enabled_at_boot: Whether regulator has been enabled at
* boot or not. 1 = Yes, 0 = No
* This is used to keep the regulator at
* the default state
* @startup_delay: Start-up time in microseconds
- * @gpios: Array containing the gpios needed to control
- * the setting of the regulator
- * @nr_gpios: Number of gpios
+ * @gflags: Array of GPIO configuration flags for initial
+ * states
+ * @ngpios: Number of GPIOs and configurations available
* @states: Array of gpio_regulator_state entries describing
* the gpio state for specific voltages
* @nr_states: Number of states available
@@ -68,14 +63,13 @@ struct gpio_regulator_state {
*/
struct gpio_regulator_config {
const char *supply_name;
+ const char *input_supply;
- int enable_gpio;
- unsigned enable_high:1;
unsigned enabled_at_boot:1;
unsigned startup_delay;
- struct gpio *gpios;
- int nr_gpios;
+ enum gpiod_flags *gflags;
+ int ngpios;
struct gpio_regulator_state *states;
int nr_states;
diff --git a/include/linux/regulator/lp3971.h b/include/linux/regulator/lp3971.h
index 61401649fe7d..0522e82d4716 100644
--- a/include/linux/regulator/lp3971.h
+++ b/include/linux/regulator/lp3971.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* National Semiconductors LP3971 PMIC chip client interface
*
@@ -5,20 +6,6 @@
* Author: Marek Szyprowski <m.szyprowski@samsung.com>
*
* Based on wm8400.h
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#ifndef __LINUX_REGULATOR_LP3971_H
diff --git a/include/linux/regulator/lp3972.h b/include/linux/regulator/lp3972.h
index 9bb7389b7a1e..160a3def317f 100644
--- a/include/linux/regulator/lp3972.h
+++ b/include/linux/regulator/lp3972.h
@@ -1,21 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* National Semiconductors LP3972 PMIC chip client interface
*
* Based on lp3971.h
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#ifndef __LINUX_REGULATOR_LP3972_H
diff --git a/include/linux/regulator/lp872x.h b/include/linux/regulator/lp872x.h
index 6029279f4eed..b62e45aa1dd3 100644
--- a/include/linux/regulator/lp872x.h
+++ b/include/linux/regulator/lp872x.h
@@ -1,12 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright 2012 Texas Instruments
*
* Author: Milo(Woogyom) Kim <milo.kim@ti.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
*/
#ifndef __LP872X_REGULATOR_H__
@@ -14,7 +10,7 @@
#include <linux/regulator/machine.h>
#include <linux/platform_device.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#define LP872X_MAX_REGULATORS 9
@@ -44,11 +40,6 @@ enum lp872x_regulator_id {
LP872X_ID_MAX,
};
-enum lp872x_dvs_state {
- DVS_LOW = GPIOF_OUT_INIT_LOW,
- DVS_HIGH = GPIOF_OUT_INIT_HIGH,
-};
-
enum lp872x_dvs_sel {
SEL_V1,
SEL_V2,
@@ -56,14 +47,14 @@ enum lp872x_dvs_sel {
/**
* lp872x_dvs
- * @gpio : gpio pin number for dvs control
+ * @gpio : gpio descriptor for dvs control
* @vsel : dvs selector for buck v1 or buck v2 register
* @init_state : initial dvs pin state
*/
struct lp872x_dvs {
- int gpio;
+ struct gpio_desc *gpio;
enum lp872x_dvs_sel vsel;
- enum lp872x_dvs_state init_state;
+ enum gpiod_flags init_state;
};
/**
@@ -82,14 +73,14 @@ struct lp872x_regulator_data {
* @update_config : if LP872X_GENERAL_CFG register is updated, set true
* @regulator_data : platform regulator id and init data
* @dvs : dvs data for buck voltage control
- * @enable_gpio : gpio pin number for enable control
+ * @enable_gpio : gpio descriptor for enable control
*/
struct lp872x_platform_data {
u8 general_config;
bool update_config;
struct lp872x_regulator_data regulator_data[LP872X_MAX_REGULATORS];
struct lp872x_dvs *dvs;
- int enable_gpio;
+ struct gpio_desc *enable_gpio;
};
#endif
diff --git a/include/linux/regulator/machine.h b/include/linux/regulator/machine.h
index 9cd4fef37203..621b7f4a3639 100644
--- a/include/linux/regulator/machine.h
+++ b/include/linux/regulator/machine.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* machine.h -- SoC Regulator support, machine/board driver API.
*
@@ -5,10 +6,6 @@
*
* Author: Liam Girdwood <lrg@slimlogic.co.uk>
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
* Regulator Machine/Board Interface.
*/
@@ -42,6 +39,16 @@ struct regulator;
#define REGULATOR_CHANGE_DRMS 0x10
#define REGULATOR_CHANGE_BYPASS 0x20
+/*
+ * operations in suspend mode
+ * DO_NOTHING_IN_SUSPEND - the default value
+ * DISABLE_IN_SUSPEND - turn off regulator in suspend states
+ * ENABLE_IN_SUSPEND - keep regulator on in suspend states
+ */
+#define DO_NOTHING_IN_SUSPEND 0
+#define DISABLE_IN_SUSPEND 1
+#define ENABLE_IN_SUSPEND 2
+
/* Regulator active discharge flags */
enum regulator_active_discharge {
REGULATOR_ACTIVE_DISCHARGE_DEFAULT,
@@ -56,16 +63,32 @@ enum regulator_active_discharge {
* state. One of enabled or disabled must be set for the
* configuration to be applied.
*
- * @uV: Operating voltage during suspend.
+ * @uV: Default operating voltage during suspend, it can be adjusted
+ * among <min_uV, max_uV>.
+ * @min_uV: Minimum suspend voltage may be set.
+ * @max_uV: Maximum suspend voltage may be set.
* @mode: Operating mode during suspend.
- * @enabled: Enabled during suspend.
- * @disabled: Disabled during suspend.
+ * @enabled: operations during suspend.
+ * - DO_NOTHING_IN_SUSPEND
+ * - DISABLE_IN_SUSPEND
+ * - ENABLE_IN_SUSPEND
+ * @changeable: Is this state can be switched between enabled/disabled,
*/
struct regulator_state {
- int uV; /* suspend voltage */
- unsigned int mode; /* suspend regulator operating mode */
- int enabled; /* is regulator enabled in this suspend state */
- int disabled; /* is the regulator disabled in this suspend state */
+ int uV;
+ int min_uV;
+ int max_uV;
+ unsigned int mode;
+ int enabled;
+ bool changeable;
+};
+
+#define REGULATOR_NOTIF_LIMIT_DISABLE -1
+#define REGULATOR_NOTIF_LIMIT_ENABLE -2
+struct notification_limit {
+ int prot;
+ int err;
+ int warn;
};
/**
@@ -85,6 +108,13 @@ struct regulator_state {
* @ilim_uA: Maximum input current.
* @system_load: Load that isn't captured by any consumer requests.
*
+ * @over_curr_limits: Limits for acting on over current.
+ * @over_voltage_limits: Limits for acting on over voltage.
+ * @under_voltage_limits: Limits for acting on under voltage.
+ * @temp_limits: Limits for acting on over temperature.
+ *
+ * @max_spread: Max possible spread between coupled regulators
+ * @max_uV_step: Max possible step change in voltage
* @valid_modes_mask: Mask of modes which may be configured by consumers.
* @valid_ops_mask: Operations which may be performed by consumers.
*
@@ -99,6 +129,11 @@ struct regulator_state {
* @pull_down: Enable pull down when regulator is disabled.
* @over_current_protection: Auto disable on over current event.
*
+ * @over_current_detection: Configure over current limits.
+ * @over_voltage_detection: Configure over voltage limits.
+ * @under_voltage_detection: Configure under voltage limits.
+ * @over_temp_detection: Configure over temperature limits.
+ *
* @input_uV: Input voltage for regulator when supplied by another regulator.
*
* @state_disk: State for regulator when system is suspended in disk mode.
@@ -136,6 +171,12 @@ struct regulation_constraints {
int system_load;
+ /* used for coupled regulators */
+ u32 *max_spread;
+
+ /* used for changing voltage in steps */
+ int max_uV_step;
+
/* valid regulator operating modes for this machine */
unsigned int valid_modes_mask;
@@ -149,6 +190,10 @@ struct regulation_constraints {
struct regulator_state state_disk;
struct regulator_state state_mem;
struct regulator_state state_standby;
+ struct notification_limit over_curr_limits;
+ struct notification_limit over_voltage_limits;
+ struct notification_limit under_voltage_limits;
+ struct notification_limit temp_limits;
suspend_state_t initial_state; /* suspend state to set at init */
/* mode to set on startup */
@@ -170,6 +215,10 @@ struct regulation_constraints {
unsigned soft_start:1; /* ramp voltage slowly */
unsigned pull_down:1; /* pull down resistor when regulator off */
unsigned over_current_protection:1; /* auto disable on over current */
+ unsigned over_current_detection:1; /* notify on over current */
+ unsigned over_voltage_detection:1; /* notify on over voltage */
+ unsigned under_voltage_detection:1; /* notify on under voltage */
+ unsigned over_temp_detection:1; /* notify on over temperature */
};
/**
@@ -225,12 +274,12 @@ struct regulator_init_data {
#ifdef CONFIG_REGULATOR
void regulator_has_full_constraints(void);
-int regulator_suspend_prepare(suspend_state_t state);
-int regulator_suspend_finish(void);
#else
static inline void regulator_has_full_constraints(void)
{
}
+#endif
+
static inline int regulator_suspend_prepare(suspend_state_t state)
{
return 0;
@@ -239,6 +288,5 @@ static inline int regulator_suspend_finish(void)
{
return 0;
}
-#endif
#endif
diff --git a/include/linux/regulator/max1586.h b/include/linux/regulator/max1586.h
index cedd0febe882..969f4c948480 100644
--- a/include/linux/regulator/max1586.h
+++ b/include/linux/regulator/max1586.h
@@ -1,21 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* max1586.h -- Voltage regulation for the Maxim 1586
*
* Copyright (C) 2008 Robert Jarzmik
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#ifndef REGULATOR_MAX1586
diff --git a/include/linux/regulator/max8649.h b/include/linux/regulator/max8649.h
index 417d14ecd5cb..bc9b9c98c1ad 100644
--- a/include/linux/regulator/max8649.h
+++ b/include/linux/regulator/max8649.h
@@ -1,12 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Interface of Maxim max8649
*
* Copyright (C) 2009-2010 Marvell International Ltd.
* Haojian Zhuang <haojian.zhuang@marvell.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef __LINUX_REGULATOR_MAX8649_H
diff --git a/include/linux/regulator/max8660.h b/include/linux/regulator/max8660.h
index f8a6a4844864..e1b9f9020eed 100644
--- a/include/linux/regulator/max8660.h
+++ b/include/linux/regulator/max8660.h
@@ -1,20 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* max8660.h -- Voltage regulation for the Maxim 8660/8661
*
* Copyright (C) 2009 Wolfram Sang, Pengutronix e.K.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#ifndef __LINUX_REGULATOR_MAX8660_H
diff --git a/include/linux/regulator/max8952.h b/include/linux/regulator/max8952.h
index 4dbb63a1d4ab..8712c091abf0 100644
--- a/include/linux/regulator/max8952.h
+++ b/include/linux/regulator/max8952.h
@@ -1,22 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* max8952.h - Voltage regulation for the Maxim 8952
*
* Copyright (C) 2010 Samsung Electrnoics
* MyungJoo Ham <myungjoo.ham@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#ifndef REGULATOR_MAX8952
@@ -118,10 +105,6 @@ enum {
#define MAX8952_NUM_DVS_MODE 4
struct max8952_platform_data {
- int gpio_vid0;
- int gpio_vid1;
- int gpio_en;
-
u32 default_mode;
u32 dvs_mode[MAX8952_NUM_DVS_MODE]; /* MAX8952_DVS_MODEx_XXXXmV */
diff --git a/include/linux/regulator/max8973-regulator.h b/include/linux/regulator/max8973-regulator.h
index 2fcb9980262a..8313e7ed6aec 100644
--- a/include/linux/regulator/max8973-regulator.h
+++ b/include/linux/regulator/max8973-regulator.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* max8973-regulator.h -- MAXIM 8973 regulator
*
@@ -7,21 +8,6 @@
* Copyright (C) 2012 NVIDIA Corporation
* Author: Laxman Dewangan <ldewangan@nvidia.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
- *
*/
#ifndef __LINUX_REGULATOR_MAX8973_H
diff --git a/include/linux/regulator/mt6311.h b/include/linux/regulator/mt6311.h
index 8473259395b6..eb20c9d1ad46 100644
--- a/include/linux/regulator/mt6311.h
+++ b/include/linux/regulator/mt6311.h
@@ -1,15 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2015 MediaTek Inc.
* Author: Henry Chen <henryc.chen@mediatek.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#ifndef __LINUX_REGULATOR_MT6311_H
diff --git a/include/linux/regulator/mt6315-regulator.h b/include/linux/regulator/mt6315-regulator.h
new file mode 100644
index 000000000000..3b80d3f3910c
--- /dev/null
+++ b/include/linux/regulator/mt6315-regulator.h
@@ -0,0 +1,44 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2021 MediaTek Inc.
+ */
+
+#ifndef __LINUX_REGULATOR_MT6315_H
+#define __LINUX_REGULATOR_MT6315_H
+
+#define MT6315_RP 3
+#define MT6315_PP 6
+#define MT6315_SP 7
+
+enum {
+ MT6315_VBUCK1 = 0,
+ MT6315_VBUCK2,
+ MT6315_VBUCK3,
+ MT6315_VBUCK4,
+ MT6315_VBUCK_MAX,
+};
+
+/* Register */
+#define MT6315_TOP2_ELR7 0x139
+#define MT6315_TOP_TMA_KEY 0x39F
+#define MT6315_TOP_TMA_KEY_H 0x3A0
+#define MT6315_BUCK_TOP_CON0 0x1440
+#define MT6315_BUCK_TOP_CON1 0x1443
+#define MT6315_BUCK_TOP_ELR0 0x1449
+#define MT6315_BUCK_TOP_ELR2 0x144B
+#define MT6315_BUCK_TOP_ELR4 0x144D
+#define MT6315_BUCK_TOP_ELR6 0x144F
+#define MT6315_VBUCK1_DBG0 0x1499
+#define MT6315_VBUCK1_DBG4 0x149D
+#define MT6315_VBUCK2_DBG0 0x1519
+#define MT6315_VBUCK2_DBG4 0x151D
+#define MT6315_VBUCK3_DBG0 0x1599
+#define MT6315_VBUCK3_DBG4 0x159D
+#define MT6315_VBUCK4_DBG0 0x1619
+#define MT6315_VBUCK4_DBG4 0x161D
+#define MT6315_BUCK_TOP_4PHASE_ANA_CON42 0x16B1
+
+#define PROTECTION_KEY_H 0x9C
+#define PROTECTION_KEY 0xEA
+
+#endif /* __LINUX_REGULATOR_MT6315_H */
diff --git a/include/linux/regulator/mt6323-regulator.h b/include/linux/regulator/mt6323-regulator.h
index 67011cd1ce55..c8103b8175b3 100644
--- a/include/linux/regulator/mt6323-regulator.h
+++ b/include/linux/regulator/mt6323-regulator.h
@@ -1,15 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2016 MediaTek Inc.
* Author: Chen Zhong <chen.zhong@mediatek.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#ifndef __LINUX_REGULATOR_MT6323_H
diff --git a/include/linux/regulator/mt6331-regulator.h b/include/linux/regulator/mt6331-regulator.h
new file mode 100644
index 000000000000..2801a9879c14
--- /dev/null
+++ b/include/linux/regulator/mt6331-regulator.h
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2022 Collabora Ltd.
+ * Author: AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com>
+ */
+
+#ifndef __LINUX_REGULATOR_MT6331_H
+#define __LINUX_REGULATOR_MT6331_H
+
+enum {
+ /* BUCK */
+ MT6331_ID_VDVFS11 = 0,
+ MT6331_ID_VDVFS12,
+ MT6331_ID_VDVFS13,
+ MT6331_ID_VDVFS14,
+ MT6331_ID_VCORE2,
+ MT6331_ID_VIO18,
+ /* LDO */
+ MT6331_ID_VTCXO1,
+ MT6331_ID_VTCXO2,
+ MT6331_ID_AVDD32_AUD,
+ MT6331_ID_VAUXA32,
+ MT6331_ID_VCAMA,
+ MT6331_ID_VIO28,
+ MT6331_ID_VCAM_AF,
+ MT6331_ID_VMC,
+ MT6331_ID_VMCH,
+ MT6331_ID_VEMC33,
+ MT6331_ID_VGP1,
+ MT6331_ID_VSIM1,
+ MT6331_ID_VSIM2,
+ MT6331_ID_VMIPI,
+ MT6331_ID_VIBR,
+ MT6331_ID_VGP4,
+ MT6331_ID_VCAMD,
+ MT6331_ID_VUSB10,
+ MT6331_ID_VCAM_IO,
+ MT6331_ID_VSRAM_DVFS1,
+ MT6331_ID_VGP2,
+ MT6331_ID_VGP3,
+ MT6331_ID_VRTC,
+ MT6331_ID_VDIG18,
+ MT6331_ID_VREG_MAX
+};
+
+#endif /* __LINUX_REGULATOR_MT6331_H */
diff --git a/include/linux/regulator/mt6332-regulator.h b/include/linux/regulator/mt6332-regulator.h
new file mode 100644
index 000000000000..af5e3ed31029
--- /dev/null
+++ b/include/linux/regulator/mt6332-regulator.h
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2022 Collabora Ltd.
+ * Author: AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com>
+ */
+
+#ifndef __LINUX_REGULATOR_MT6332_H
+#define __LINUX_REGULATOR_MT6332_H
+
+enum {
+ /* BUCK */
+ MT6332_ID_VDRAM = 0,
+ MT6332_ID_VDVFS2,
+ MT6332_ID_VPA,
+ MT6332_ID_VRF1,
+ MT6332_ID_VRF2,
+ MT6332_ID_VSBST,
+ /* LDO */
+ MT6332_ID_VAUXB32,
+ MT6332_ID_VBIF28,
+ MT6332_ID_VDIG18,
+ MT6332_ID_VSRAM_DVFS2,
+ MT6332_ID_VUSB33,
+ MT6332_ID_VREG_MAX
+};
+
+#endif /* __LINUX_REGULATOR_MT6332_H */
diff --git a/include/linux/regulator/mt6357-regulator.h b/include/linux/regulator/mt6357-regulator.h
new file mode 100644
index 000000000000..238b1ee77ea6
--- /dev/null
+++ b/include/linux/regulator/mt6357-regulator.h
@@ -0,0 +1,51 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2022 MediaTek Inc.
+ */
+
+#ifndef __LINUX_REGULATOR_MT6357_H
+#define __LINUX_REGULATOR_MT6357_H
+
+enum {
+ /* Bucks */
+ MT6357_ID_VCORE,
+ MT6357_ID_VMODEM,
+ MT6357_ID_VPA,
+ MT6357_ID_VPROC,
+ MT6357_ID_VS1,
+
+ /* LDOs */
+ MT6357_ID_VAUX18,
+ MT6357_ID_VAUD28,
+ MT6357_ID_VCAMA,
+ MT6357_ID_VCAMD,
+ MT6357_ID_VCAMIO,
+ MT6357_ID_VCN18,
+ MT6357_ID_VCN28,
+ MT6357_ID_VCN33_BT,
+ MT6357_ID_VCN33_WIFI,
+ MT6357_ID_VDRAM,
+ MT6357_ID_VEFUSE,
+ MT6357_ID_VEMC,
+ MT6357_ID_VFE28,
+ MT6357_ID_VIBR,
+ MT6357_ID_VIO18,
+ MT6357_ID_VIO28,
+ MT6357_ID_VLDO28,
+ MT6357_ID_VMC,
+ MT6357_ID_VMCH,
+ MT6357_ID_VRF12,
+ MT6357_ID_VRF18,
+ MT6357_ID_VSIM1,
+ MT6357_ID_VSIM2,
+ MT6357_ID_VSRAM_OTHERS,
+ MT6357_ID_VSRAM_PROC,
+ MT6357_ID_VUSB33,
+ MT6357_ID_VXO22,
+
+ MT6357_ID_RG_MAX,
+};
+
+#define MT6357_MAX_REGULATOR MT6357_ID_RG_MAX
+
+#endif /* __LINUX_REGULATOR_MT6357_H */
diff --git a/include/linux/regulator/mt6358-regulator.h b/include/linux/regulator/mt6358-regulator.h
new file mode 100644
index 000000000000..bdcf83cd719e
--- /dev/null
+++ b/include/linux/regulator/mt6358-regulator.h
@@ -0,0 +1,101 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2019 MediaTek Inc.
+ */
+
+#ifndef __LINUX_REGULATOR_MT6358_H
+#define __LINUX_REGULATOR_MT6358_H
+
+enum {
+ MT6358_ID_VDRAM1 = 0,
+ MT6358_ID_VCORE,
+ MT6358_ID_VPA,
+ MT6358_ID_VPROC11,
+ MT6358_ID_VPROC12,
+ MT6358_ID_VGPU,
+ MT6358_ID_VS2,
+ MT6358_ID_VMODEM,
+ MT6358_ID_VS1,
+ MT6358_ID_VDRAM2 = 9,
+ MT6358_ID_VSIM1,
+ MT6358_ID_VIBR,
+ MT6358_ID_VRF12,
+ MT6358_ID_VIO18,
+ MT6358_ID_VUSB,
+ MT6358_ID_VCAMIO,
+ MT6358_ID_VCAMD,
+ MT6358_ID_VCN18,
+ MT6358_ID_VFE28,
+ MT6358_ID_VSRAM_PROC11,
+ MT6358_ID_VCN28,
+ MT6358_ID_VSRAM_OTHERS,
+ MT6358_ID_VSRAM_GPU,
+ MT6358_ID_VXO22,
+ MT6358_ID_VEFUSE,
+ MT6358_ID_VAUX18,
+ MT6358_ID_VMCH,
+ MT6358_ID_VBIF28,
+ MT6358_ID_VSRAM_PROC12,
+ MT6358_ID_VCAMA1,
+ MT6358_ID_VEMC,
+ MT6358_ID_VIO28,
+ MT6358_ID_VA12,
+ MT6358_ID_VRF18,
+ MT6358_ID_VCN33_BT,
+ MT6358_ID_VCN33_WIFI,
+ MT6358_ID_VCAMA2,
+ MT6358_ID_VMC,
+ MT6358_ID_VLDO28,
+ MT6358_ID_VAUD28,
+ MT6358_ID_VSIM2,
+ MT6358_ID_VCORE_SSHUB,
+ MT6358_ID_VSRAM_OTHERS_SSHUB,
+ MT6358_ID_RG_MAX,
+};
+
+enum {
+ MT6366_ID_VDRAM1 = 0,
+ MT6366_ID_VCORE,
+ MT6366_ID_VPA,
+ MT6366_ID_VPROC11,
+ MT6366_ID_VPROC12,
+ MT6366_ID_VGPU,
+ MT6366_ID_VS2,
+ MT6366_ID_VMODEM,
+ MT6366_ID_VS1,
+ MT6366_ID_VDRAM2,
+ MT6366_ID_VSIM1,
+ MT6366_ID_VIBR,
+ MT6366_ID_VRF12,
+ MT6366_ID_VIO18,
+ MT6366_ID_VUSB,
+ MT6366_ID_VCN18,
+ MT6366_ID_VFE28,
+ MT6366_ID_VSRAM_PROC11,
+ MT6366_ID_VCN28,
+ MT6366_ID_VSRAM_OTHERS,
+ MT6366_ID_VSRAM_GPU,
+ MT6366_ID_VXO22,
+ MT6366_ID_VEFUSE,
+ MT6366_ID_VAUX18,
+ MT6366_ID_VMCH,
+ MT6366_ID_VBIF28,
+ MT6366_ID_VSRAM_PROC12,
+ MT6366_ID_VEMC,
+ MT6366_ID_VIO28,
+ MT6366_ID_VA12,
+ MT6366_ID_VRF18,
+ MT6366_ID_VCN33_BT,
+ MT6366_ID_VCN33_WIFI,
+ MT6366_ID_VMC,
+ MT6366_ID_VAUD28,
+ MT6366_ID_VSIM2,
+ MT6366_ID_VCORE_SSHUB,
+ MT6366_ID_VSRAM_OTHERS_SSHUB,
+ MT6366_ID_RG_MAX,
+};
+
+#define MT6358_MAX_REGULATOR MT6358_ID_RG_MAX
+#define MT6366_MAX_REGULATOR MT6366_ID_RG_MAX
+
+#endif /* __LINUX_REGULATOR_MT6358_H */
diff --git a/include/linux/regulator/mt6359-regulator.h b/include/linux/regulator/mt6359-regulator.h
new file mode 100644
index 000000000000..6d6e5a58f482
--- /dev/null
+++ b/include/linux/regulator/mt6359-regulator.h
@@ -0,0 +1,59 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2021 MediaTek Inc.
+ */
+
+#ifndef __LINUX_REGULATOR_MT6359_H
+#define __LINUX_REGULATOR_MT6359_H
+
+enum {
+ MT6359_ID_VS1 = 0,
+ MT6359_ID_VGPU11,
+ MT6359_ID_VMODEM,
+ MT6359_ID_VPU,
+ MT6359_ID_VCORE,
+ MT6359_ID_VS2,
+ MT6359_ID_VPA,
+ MT6359_ID_VPROC2,
+ MT6359_ID_VPROC1,
+ MT6359_ID_VCORE_SSHUB,
+ MT6359_ID_VGPU11_SSHUB = MT6359_ID_VCORE_SSHUB,
+ MT6359_ID_VAUD18 = 10,
+ MT6359_ID_VSIM1,
+ MT6359_ID_VIBR,
+ MT6359_ID_VRF12,
+ MT6359_ID_VUSB,
+ MT6359_ID_VSRAM_PROC2,
+ MT6359_ID_VIO18,
+ MT6359_ID_VCAMIO,
+ MT6359_ID_VCN18,
+ MT6359_ID_VFE28,
+ MT6359_ID_VCN13,
+ MT6359_ID_VCN33_1_BT,
+ MT6359_ID_VCN33_1_WIFI,
+ MT6359_ID_VAUX18,
+ MT6359_ID_VSRAM_OTHERS,
+ MT6359_ID_VEFUSE,
+ MT6359_ID_VXO22,
+ MT6359_ID_VRFCK,
+ MT6359_ID_VBIF28,
+ MT6359_ID_VIO28,
+ MT6359_ID_VEMC,
+ MT6359_ID_VCN33_2_BT,
+ MT6359_ID_VCN33_2_WIFI,
+ MT6359_ID_VA12,
+ MT6359_ID_VA09,
+ MT6359_ID_VRF18,
+ MT6359_ID_VSRAM_MD,
+ MT6359_ID_VUFS,
+ MT6359_ID_VM18,
+ MT6359_ID_VBBCK,
+ MT6359_ID_VSRAM_PROC1,
+ MT6359_ID_VSIM2,
+ MT6359_ID_VSRAM_OTHERS_SSHUB,
+ MT6359_ID_RG_MAX,
+};
+
+#define MT6359_MAX_REGULATOR MT6359_ID_RG_MAX
+
+#endif /* __LINUX_REGULATOR_MT6359_H */
diff --git a/include/linux/regulator/mt6380-regulator.h b/include/linux/regulator/mt6380-regulator.h
new file mode 100644
index 000000000000..9e90ce9b14e6
--- /dev/null
+++ b/include/linux/regulator/mt6380-regulator.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2017 MediaTek Inc.
+ * Author: Chenglin Xu <chenglin.xu@mediatek.com>
+ */
+
+#ifndef __LINUX_REGULATOR_mt6380_H
+#define __LINUX_REGULATOR_mt6380_H
+
+enum {
+ MT6380_ID_VCPU = 0,
+ MT6380_ID_VCORE,
+ MT6380_ID_VRF,
+ MT6380_ID_VMLDO,
+ MT6380_ID_VALDO,
+ MT6380_ID_VPHYLDO,
+ MT6380_ID_VDDRLDO,
+ MT6380_ID_VTLDO,
+ MT6380_ID_RG_MAX,
+};
+
+#define MT6380_MAX_REGULATOR MT6380_ID_RG_MAX
+
+#endif /* __LINUX_REGULATOR_mt6380_H */
diff --git a/include/linux/regulator/mt6397-regulator.h b/include/linux/regulator/mt6397-regulator.h
index 30cc5963e265..99b266711b3e 100644
--- a/include/linux/regulator/mt6397-regulator.h
+++ b/include/linux/regulator/mt6397-regulator.h
@@ -1,15 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2014 MediaTek Inc.
* Author: Flora Fu <flora.fu@mediatek.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#ifndef __LINUX_REGULATOR_MT6397_H
diff --git a/include/linux/regulator/of_regulator.h b/include/linux/regulator/of_regulator.h
index 763953f7e3b8..df7f154a2ed5 100644
--- a/include/linux/regulator/of_regulator.h
+++ b/include/linux/regulator/of_regulator.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* OpenFirmware regulator support routines
*
diff --git a/include/linux/regulator/pca9450.h b/include/linux/regulator/pca9450.h
new file mode 100644
index 000000000000..3c01c2bf84f5
--- /dev/null
+++ b/include/linux/regulator/pca9450.h
@@ -0,0 +1,236 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/* Copyright 2020 NXP. */
+
+#ifndef __LINUX_REG_PCA9450_H__
+#define __LINUX_REG_PCA9450_H__
+
+#include <linux/regmap.h>
+
+enum pca9450_chip_type {
+ PCA9450_TYPE_PCA9450A = 0,
+ PCA9450_TYPE_PCA9450BC,
+ PCA9450_TYPE_AMOUNT,
+};
+
+enum {
+ PCA9450_BUCK1 = 0,
+ PCA9450_BUCK2,
+ PCA9450_BUCK3,
+ PCA9450_BUCK4,
+ PCA9450_BUCK5,
+ PCA9450_BUCK6,
+ PCA9450_LDO1,
+ PCA9450_LDO2,
+ PCA9450_LDO3,
+ PCA9450_LDO4,
+ PCA9450_LDO5,
+ PCA9450_REGULATOR_CNT,
+};
+
+enum {
+ PCA9450_DVS_LEVEL_RUN = 0,
+ PCA9450_DVS_LEVEL_STANDBY,
+ PCA9450_DVS_LEVEL_MAX,
+};
+
+#define PCA9450_BUCK1_VOLTAGE_NUM 0x80
+#define PCA9450_BUCK2_VOLTAGE_NUM 0x80
+#define PCA9450_BUCK3_VOLTAGE_NUM 0x80
+#define PCA9450_BUCK4_VOLTAGE_NUM 0x80
+
+#define PCA9450_BUCK5_VOLTAGE_NUM 0x80
+#define PCA9450_BUCK6_VOLTAGE_NUM 0x80
+
+#define PCA9450_LDO1_VOLTAGE_NUM 0x08
+#define PCA9450_LDO2_VOLTAGE_NUM 0x08
+#define PCA9450_LDO3_VOLTAGE_NUM 0x20
+#define PCA9450_LDO4_VOLTAGE_NUM 0x20
+#define PCA9450_LDO5_VOLTAGE_NUM 0x10
+
+enum {
+ PCA9450_REG_DEV_ID = 0x00,
+ PCA9450_REG_INT1 = 0x01,
+ PCA9450_REG_INT1_MSK = 0x02,
+ PCA9450_REG_STATUS1 = 0x03,
+ PCA9450_REG_STATUS2 = 0x04,
+ PCA9450_REG_PWRON_STAT = 0x05,
+ PCA9450_REG_SWRST = 0x06,
+ PCA9450_REG_PWRCTRL = 0x07,
+ PCA9450_REG_RESET_CTRL = 0x08,
+ PCA9450_REG_CONFIG1 = 0x09,
+ PCA9450_REG_CONFIG2 = 0x0A,
+ PCA9450_REG_BUCK123_DVS = 0x0C,
+ PCA9450_REG_BUCK1OUT_LIMIT = 0x0D,
+ PCA9450_REG_BUCK2OUT_LIMIT = 0x0E,
+ PCA9450_REG_BUCK3OUT_LIMIT = 0x0F,
+ PCA9450_REG_BUCK1CTRL = 0x10,
+ PCA9450_REG_BUCK1OUT_DVS0 = 0x11,
+ PCA9450_REG_BUCK1OUT_DVS1 = 0x12,
+ PCA9450_REG_BUCK2CTRL = 0x13,
+ PCA9450_REG_BUCK2OUT_DVS0 = 0x14,
+ PCA9450_REG_BUCK2OUT_DVS1 = 0x15,
+ PCA9450_REG_BUCK3CTRL = 0x16,
+ PCA9450_REG_BUCK3OUT_DVS0 = 0x17,
+ PCA9450_REG_BUCK3OUT_DVS1 = 0x18,
+ PCA9450_REG_BUCK4CTRL = 0x19,
+ PCA9450_REG_BUCK4OUT = 0x1A,
+ PCA9450_REG_BUCK5CTRL = 0x1B,
+ PCA9450_REG_BUCK5OUT = 0x1C,
+ PCA9450_REG_BUCK6CTRL = 0x1D,
+ PCA9450_REG_BUCK6OUT = 0x1E,
+ PCA9450_REG_LDO_AD_CTRL = 0x20,
+ PCA9450_REG_LDO1CTRL = 0x21,
+ PCA9450_REG_LDO2CTRL = 0x22,
+ PCA9450_REG_LDO3CTRL = 0x23,
+ PCA9450_REG_LDO4CTRL = 0x24,
+ PCA9450_REG_LDO5CTRL_L = 0x25,
+ PCA9450_REG_LDO5CTRL_H = 0x26,
+ PCA9450_REG_LOADSW_CTRL = 0x2A,
+ PCA9450_REG_VRFLT1_STS = 0x2B,
+ PCA9450_REG_VRFLT2_STS = 0x2C,
+ PCA9450_REG_VRFLT1_MASK = 0x2D,
+ PCA9450_REG_VRFLT2_MASK = 0x2E,
+ PCA9450_MAX_REGISTER = 0x2F,
+};
+
+/* PCA9450 BUCK ENMODE bits */
+#define BUCK_ENMODE_OFF 0x00
+#define BUCK_ENMODE_ONREQ 0x01
+#define BUCK_ENMODE_ONREQ_STBYREQ 0x02
+#define BUCK_ENMODE_ON 0x03
+
+/* PCA9450_REG_BUCK1_CTRL bits */
+#define BUCK1_RAMP_MASK 0xC0
+#define BUCK1_RAMP_25MV 0x0
+#define BUCK1_RAMP_12P5MV 0x1
+#define BUCK1_RAMP_6P25MV 0x2
+#define BUCK1_RAMP_3P125MV 0x3
+#define BUCK1_DVS_CTRL 0x10
+#define BUCK1_AD 0x08
+#define BUCK1_FPWM 0x04
+#define BUCK1_ENMODE_MASK 0x03
+
+/* PCA9450_REG_BUCK2_CTRL bits */
+#define BUCK2_RAMP_MASK 0xC0
+#define BUCK2_RAMP_25MV 0x0
+#define BUCK2_RAMP_12P5MV 0x1
+#define BUCK2_RAMP_6P25MV 0x2
+#define BUCK2_RAMP_3P125MV 0x3
+#define BUCK2_DVS_CTRL 0x10
+#define BUCK2_AD 0x08
+#define BUCK2_FPWM 0x04
+#define BUCK2_ENMODE_MASK 0x03
+
+/* PCA9450_REG_BUCK3_CTRL bits */
+#define BUCK3_RAMP_MASK 0xC0
+#define BUCK3_RAMP_25MV 0x0
+#define BUCK3_RAMP_12P5MV 0x1
+#define BUCK3_RAMP_6P25MV 0x2
+#define BUCK3_RAMP_3P125MV 0x3
+#define BUCK3_DVS_CTRL 0x10
+#define BUCK3_AD 0x08
+#define BUCK3_FPWM 0x04
+#define BUCK3_ENMODE_MASK 0x03
+
+/* PCA9450_REG_BUCK4_CTRL bits */
+#define BUCK4_AD 0x08
+#define BUCK4_FPWM 0x04
+#define BUCK4_ENMODE_MASK 0x03
+
+/* PCA9450_REG_BUCK5_CTRL bits */
+#define BUCK5_AD 0x08
+#define BUCK5_FPWM 0x04
+#define BUCK5_ENMODE_MASK 0x03
+
+/* PCA9450_REG_BUCK6_CTRL bits */
+#define BUCK6_AD 0x08
+#define BUCK6_FPWM 0x04
+#define BUCK6_ENMODE_MASK 0x03
+
+/* PCA9450_REG_BUCK123_PRESET_EN bit */
+#define BUCK123_PRESET_EN 0x80
+
+/* PCA9450_BUCK1OUT_DVS0 bits */
+#define BUCK1OUT_DVS0_MASK 0x7F
+#define BUCK1OUT_DVS0_DEFAULT 0x14
+
+/* PCA9450_BUCK1OUT_DVS1 bits */
+#define BUCK1OUT_DVS1_MASK 0x7F
+#define BUCK1OUT_DVS1_DEFAULT 0x14
+
+/* PCA9450_BUCK2OUT_DVS0 bits */
+#define BUCK2OUT_DVS0_MASK 0x7F
+#define BUCK2OUT_DVS0_DEFAULT 0x14
+
+/* PCA9450_BUCK2OUT_DVS1 bits */
+#define BUCK2OUT_DVS1_MASK 0x7F
+#define BUCK2OUT_DVS1_DEFAULT 0x14
+
+/* PCA9450_BUCK3OUT_DVS0 bits */
+#define BUCK3OUT_DVS0_MASK 0x7F
+#define BUCK3OUT_DVS0_DEFAULT 0x14
+
+/* PCA9450_BUCK3OUT_DVS1 bits */
+#define BUCK3OUT_DVS1_MASK 0x7F
+#define BUCK3OUT_DVS1_DEFAULT 0x14
+
+/* PCA9450_REG_BUCK4OUT bits */
+#define BUCK4OUT_MASK 0x7F
+#define BUCK4OUT_DEFAULT 0x6C
+
+/* PCA9450_REG_BUCK5OUT bits */
+#define BUCK5OUT_MASK 0x7F
+#define BUCK5OUT_DEFAULT 0x30
+
+/* PCA9450_REG_BUCK6OUT bits */
+#define BUCK6OUT_MASK 0x7F
+#define BUCK6OUT_DEFAULT 0x14
+
+/* PCA9450_REG_LDO1_VOLT bits */
+#define LDO1_EN_MASK 0xC0
+#define LDO1OUT_MASK 0x07
+
+/* PCA9450_REG_LDO2_VOLT bits */
+#define LDO2_EN_MASK 0xC0
+#define LDO2OUT_MASK 0x07
+
+/* PCA9450_REG_LDO3_VOLT bits */
+#define LDO3_EN_MASK 0xC0
+#define LDO3OUT_MASK 0x0F
+
+/* PCA9450_REG_LDO4_VOLT bits */
+#define LDO4_EN_MASK 0xC0
+#define LDO4OUT_MASK 0x0F
+
+/* PCA9450_REG_LDO5_VOLT bits */
+#define LDO5L_EN_MASK 0xC0
+#define LDO5LOUT_MASK 0x0F
+
+#define LDO5H_EN_MASK 0xC0
+#define LDO5HOUT_MASK 0x0F
+
+/* PCA9450_REG_IRQ bits */
+#define IRQ_PWRON 0x80
+#define IRQ_WDOGB 0x40
+#define IRQ_RSVD 0x20
+#define IRQ_VR_FLT1 0x10
+#define IRQ_VR_FLT2 0x08
+#define IRQ_LOWVSYS 0x04
+#define IRQ_THERM_105 0x02
+#define IRQ_THERM_125 0x01
+
+/* PCA9450_REG_RESET_CTRL bits */
+#define WDOG_B_CFG_MASK 0xC0
+#define WDOG_B_CFG_NONE 0x00
+#define WDOG_B_CFG_WARM 0x40
+#define WDOG_B_CFG_COLD_LDO12 0x80
+#define WDOG_B_CFG_COLD 0xC0
+
+/* PCA9450_REG_CONFIG2 bits */
+#define I2C_LT_MASK 0x03
+#define I2C_LT_FORCE_DISABLE 0x00
+#define I2C_LT_ON_STANDBY_RUN 0x01
+#define I2C_LT_ON_RUN 0x02
+#define I2C_LT_FORCE_ENABLE 0x03
+
+#endif /* __LINUX_REG_PCA9450_H__ */
diff --git a/include/linux/regulator/pfuze100.h b/include/linux/regulator/pfuze100.h
index e0ccf46f66cf..c964fe8ab698 100644
--- a/include/linux/regulator/pfuze100.h
+++ b/include/linux/regulator/pfuze100.h
@@ -1,19 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Copyright (C) 2011-2013 Freescale Semiconductor, Inc. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
#ifndef __LINUX_REG_PFUZE100_H
#define __LINUX_REG_PFUZE100_H
@@ -33,7 +20,8 @@
#define PFUZE100_VGEN4 12
#define PFUZE100_VGEN5 13
#define PFUZE100_VGEN6 14
-#define PFUZE100_MAX_REGULATOR 15
+#define PFUZE100_COIN 15
+#define PFUZE100_MAX_REGULATOR 16
#define PFUZE200_SW1AB 0
#define PFUZE200_SW2 1
@@ -64,10 +52,15 @@
#define PFUZE3000_VLDO3 11
#define PFUZE3000_VLDO4 12
-struct regulator_init_data;
-
-struct pfuze_regulator_platform_data {
- struct regulator_init_data *init_data[PFUZE100_MAX_REGULATOR];
-};
+#define PFUZE3001_SW1 0
+#define PFUZE3001_SW2 1
+#define PFUZE3001_SW3 2
+#define PFUZE3001_VSNVS 3
+#define PFUZE3001_VLDO1 4
+#define PFUZE3001_VLDO2 5
+#define PFUZE3001_VCCSD 6
+#define PFUZE3001_V33 7
+#define PFUZE3001_VLDO3 8
+#define PFUZE3001_VLDO4 9
#endif /* __LINUX_REG_PFUZE100_H */
diff --git a/include/linux/regulator/tps51632-regulator.h b/include/linux/regulator/tps51632-regulator.h
index d00841e1a75a..1413d77c2fed 100644
--- a/include/linux/regulator/tps51632-regulator.h
+++ b/include/linux/regulator/tps51632-regulator.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* tps51632-regulator.h -- TPS51632 regulator
*
@@ -7,21 +8,6 @@
* Copyright (C) 2012 NVIDIA Corporation
* Author: Laxman Dewangan <ldewangan@nvidia.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
- *
*/
#ifndef __LINUX_REGULATOR_TPS51632_H
diff --git a/include/linux/regulator/tps62360.h b/include/linux/regulator/tps62360.h
index a4c49394c497..398e74a1d941 100644
--- a/include/linux/regulator/tps62360.h
+++ b/include/linux/regulator/tps62360.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* tps62360.h -- TI tps62360
*
@@ -6,21 +7,6 @@
* Copyright (C) 2012 NVIDIA Corporation
* Author: Laxman Dewangan <ldewangan@nvidia.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
- *
*/
#ifndef __LINUX_REGULATOR_TPS62360_H
@@ -33,10 +19,6 @@
* @en_discharge: Enable discharge the output capacitor via internal
* register.
* @en_internal_pulldn: internal pull down enable or not.
- * @vsel0_gpio: Gpio number for vsel0. It should be -1 if this is tied with
- * fixed logic.
- * @vsel1_gpio: Gpio number for vsel1. It should be -1 if this is tied with
- * fixed logic.
* @vsel0_def_state: Default state of vsel0. 1 if it is high else 0.
* @vsel1_def_state: Default state of vsel1. 1 if it is high else 0.
*/
@@ -44,8 +26,6 @@ struct tps62360_regulator_platform_data {
struct regulator_init_data *reg_init_data;
bool en_discharge;
bool en_internal_pulldn;
- int vsel0_gpio;
- int vsel1_gpio;
int vsel0_def_state;
int vsel1_def_state;
};
diff --git a/include/linux/regulator/tps6507x.h b/include/linux/regulator/tps6507x.h
index 4892f591bab1..58117575de8a 100644
--- a/include/linux/regulator/tps6507x.h
+++ b/include/linux/regulator/tps6507x.h
@@ -1,20 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* tps6507x.h -- Voltage regulation for the Texas Instruments TPS6507X
*
* Copyright (C) 2010 Texas Instruments, Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#ifndef REGULATOR_TPS6507X
diff --git a/include/linux/regulator/userspace-consumer.h b/include/linux/regulator/userspace-consumer.h
index b4554ce9d4bb..2249ee697f8b 100644
--- a/include/linux/regulator/userspace-consumer.h
+++ b/include/linux/regulator/userspace-consumer.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __REGULATOR_PLATFORM_CONSUMER_H_
#define __REGULATOR_PLATFORM_CONSUMER_H_
@@ -20,6 +21,7 @@ struct regulator_userspace_consumer_data {
struct regulator_bulk_data *supplies;
bool init_on;
+ bool no_autoswitch;
};
#endif /* __REGULATOR_PLATFORM_CONSUMER_H_ */
diff --git a/include/linux/relay.h b/include/linux/relay.h
index 68c1448e56bb..72b876dd5cb8 100644
--- a/include/linux/relay.h
+++ b/include/linux/relay.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* linux/include/linux/relay.h
*
@@ -61,11 +62,11 @@ struct rchan
size_t subbuf_size; /* sub-buffer size */
size_t n_subbufs; /* number of sub-buffers per buffer */
size_t alloc_size; /* total buffer size allocated */
- struct rchan_callbacks *cb; /* client callbacks */
+ const struct rchan_callbacks *cb; /* client callbacks */
struct kref kref; /* channel refcount */
void *private_data; /* for user-defined data */
size_t last_toobig; /* tried to log event > subbuf size */
- struct rchan_buf ** __percpu buf; /* per-cpu channel buffers */
+ struct rchan_buf * __percpu *buf; /* per-cpu channel buffers */
int is_global; /* One global buffer ? */
struct list_head list; /* for channel list */
struct dentry *parent; /* parent dentry passed to open */
@@ -88,6 +89,8 @@ struct rchan_callbacks
* The client should return 1 to continue logging, 0 to stop
* logging.
*
+ * This callback is optional.
+ *
* NOTE: subbuf_start will also be invoked when the buffer is
* created, so that the first sub-buffer can be initialized
* if necessary. In this case, prev_subbuf will be NULL.
@@ -101,25 +104,6 @@ struct rchan_callbacks
size_t prev_padding);
/*
- * buf_mapped - relay buffer mmap notification
- * @buf: the channel buffer
- * @filp: relay file pointer
- *
- * Called when a relay file is successfully mmapped
- */
- void (*buf_mapped)(struct rchan_buf *buf,
- struct file *filp);
-
- /*
- * buf_unmapped - relay buffer unmap notification
- * @buf: the channel buffer
- * @filp: relay file pointer
- *
- * Called when a relay file is successfully unmapped
- */
- void (*buf_unmapped)(struct rchan_buf *buf,
- struct file *filp);
- /*
* create_buf_file - create file to represent a relay channel buffer
* @filename: the name of the file to create
* @parent: the parent of the file to create
@@ -140,7 +124,9 @@ struct rchan_callbacks
* cause relay_open() to create a single global buffer rather
* than the default set of per-cpu buffers.
*
- * See Documentation/filesystems/relay.txt for more info.
+ * This callback is mandatory.
+ *
+ * See Documentation/filesystems/relay.rst for more info.
*/
struct dentry *(*create_buf_file)(const char *filename,
struct dentry *parent,
@@ -157,6 +143,8 @@ struct rchan_callbacks
* channel buffer.
*
* The callback should return 0 if successful, negative if not.
+ *
+ * This callback is mandatory.
*/
int (*remove_buf_file)(struct dentry *dentry);
};
@@ -169,7 +157,7 @@ struct rchan *relay_open(const char *base_filename,
struct dentry *parent,
size_t subbuf_size,
size_t n_subbufs,
- struct rchan_callbacks *cb,
+ const struct rchan_callbacks *cb,
void *private_data);
extern int relay_late_setup_files(struct rchan *chan,
const char *base_filename,
diff --git a/include/linux/remoteproc.h b/include/linux/remoteproc.h
index 81da49564ff4..fe8978eb69f1 100644
--- a/include/linux/remoteproc.h
+++ b/include/linux/remoteproc.h
@@ -38,6 +38,7 @@
#include <linux/types.h>
#include <linux/mutex.h>
#include <linux/virtio.h>
+#include <linux/cdev.h>
#include <linux/completion.h>
#include <linux/idr.h>
#include <linux/of.h>
@@ -73,7 +74,7 @@ struct resource_table {
u32 ver;
u32 num;
u32 reserved[2];
- u32 offset[0];
+ u32 offset[];
} __packed;
/**
@@ -87,7 +88,7 @@ struct resource_table {
*/
struct fw_rsc_hdr {
u32 type;
- u8 data[0];
+ u8 data[];
} __packed;
/**
@@ -100,7 +101,9 @@ struct fw_rsc_hdr {
* the remote processor will be writing logs.
* @RSC_VDEV: declare support for a virtio device, and serve as its
* virtio header.
- * @RSC_LAST: just keep this one at the end
+ * @RSC_LAST: just keep this one at the end of standard resources
+ * @RSC_VENDOR_START: start of the vendor specific resource types range
+ * @RSC_VENDOR_END: end of the vendor specific resource types range
*
* For more details regarding a specific resource type, please see its
* dedicated structure below.
@@ -111,11 +114,13 @@ struct fw_rsc_hdr {
* please update it as needed.
*/
enum fw_resource_type {
- RSC_CARVEOUT = 0,
- RSC_DEVMEM = 1,
- RSC_TRACE = 2,
- RSC_VDEV = 3,
- RSC_LAST = 4,
+ RSC_CARVEOUT = 0,
+ RSC_DEVMEM = 1,
+ RSC_TRACE = 2,
+ RSC_VDEV = 3,
+ RSC_LAST = 4,
+ RSC_VENDOR_START = 128,
+ RSC_VENDOR_END = 512,
};
#define FW_RSC_ADDR_ANY (-1)
@@ -238,7 +243,7 @@ struct fw_rsc_trace {
* @da: device address
* @align: the alignment between the consumer and producer parts of the vring
* @num: num of buffers supported by this vring (must be power of two)
- * @notifyid is a unique rproc-wide notify index for this vring. This notify
+ * @notifyid: a unique rproc-wide notify index for this vring. This notify
* index is used when kicking a remote processor, to let it know that this
* vring is triggered.
* @pa: physical address
@@ -261,18 +266,18 @@ struct fw_rsc_vdev_vring {
/**
* struct fw_rsc_vdev - virtio device header
* @id: virtio device id (as in virtio_ids.h)
- * @notifyid is a unique rproc-wide notify index for this vdev. This notify
+ * @notifyid: a unique rproc-wide notify index for this vdev. This notify
* index is used when kicking a remote processor, to let it know that the
* status/features of this vdev have changes.
- * @dfeatures specifies the virtio device features supported by the firmware
- * @gfeatures is a place holder used by the host to write back the
+ * @dfeatures: specifies the virtio device features supported by the firmware
+ * @gfeatures: a place holder used by the host to write back the
* negotiated features that are supported by both sides.
- * @config_len is the size of the virtio config space of this vdev. The config
+ * @config_len: the size of the virtio config space of this vdev. The config
* space lies in the resource table immediate after this vdev header.
- * @status is a place holder where the host will indicate its virtio progress.
- * @num_of_vrings indicates how many vrings are described in this vdev header
+ * @status: a place holder where the host will indicate its virtio progress.
+ * @num_of_vrings: indicates how many vrings are described in this vdev header
* @reserved: reserved (must be zero)
- * @vring is an array of @num_of_vrings entries of 'struct fw_rsc_vdev_vring'.
+ * @vring: an array of @num_of_vrings entries of 'struct fw_rsc_vdev_vring'.
*
* This resource is a virtio device header: it provides information about
* the vdev, and is then used by the host and its peer remote processors
@@ -282,16 +287,17 @@ struct fw_rsc_vdev_vring {
* to statically allocate a vdev upon registration of the rproc (dynamic vdev
* allocation is not yet supported).
*
- * Note: unlike virtualization systems, the term 'host' here means
- * the Linux side which is running remoteproc to control the remote
- * processors. We use the name 'gfeatures' to comply with virtio's terms,
- * though there isn't really any virtualized guest OS here: it's the host
- * which is responsible for negotiating the final features.
- * Yeah, it's a bit confusing.
- *
- * Note: immediately following this structure is the virtio config space for
- * this vdev (which is specific to the vdev; for more info, read the virtio
- * spec). the size of the config space is specified by @config_len.
+ * Note:
+ * 1. unlike virtualization systems, the term 'host' here means
+ * the Linux side which is running remoteproc to control the remote
+ * processors. We use the name 'gfeatures' to comply with virtio's terms,
+ * though there isn't really any virtualized guest OS here: it's the host
+ * which is responsible for negotiating the final features.
+ * Yeah, it's a bit confusing.
+ *
+ * 2. immediately following this structure is the virtio config space for
+ * this vdev (which is specific to the vdev; for more info, read the virtio
+ * spec). The size of the config space is specified by @config_len.
*/
struct fw_rsc_vdev {
u32 id;
@@ -302,41 +308,101 @@ struct fw_rsc_vdev {
u8 status;
u8 num_of_vrings;
u8 reserved[2];
- struct fw_rsc_vdev_vring vring[0];
+ struct fw_rsc_vdev_vring vring[];
} __packed;
+struct rproc;
+
/**
* struct rproc_mem_entry - memory entry descriptor
* @va: virtual address
+ * @is_iomem: io memory
* @dma: dma address
* @len: length, in bytes
* @da: device address
+ * @release: release associated memory
* @priv: associated data
+ * @name: associated memory region name (optional)
* @node: list node
+ * @rsc_offset: offset in resource table
+ * @flags: iommu protection flags
+ * @of_resm_idx: reserved memory phandle index
+ * @alloc: specific memory allocator function
*/
struct rproc_mem_entry {
void *va;
+ bool is_iomem;
dma_addr_t dma;
- int len;
+ size_t len;
u32 da;
void *priv;
+ char name[32];
struct list_head node;
+ u32 rsc_offset;
+ u32 flags;
+ u32 of_resm_idx;
+ int (*alloc)(struct rproc *rproc, struct rproc_mem_entry *mem);
+ int (*release)(struct rproc *rproc, struct rproc_mem_entry *mem);
};
-struct rproc;
+struct firmware;
+
+/**
+ * enum rsc_handling_status - return status of rproc_ops handle_rsc hook
+ * @RSC_HANDLED: resource was handled
+ * @RSC_IGNORED: resource was ignored
+ */
+enum rsc_handling_status {
+ RSC_HANDLED = 0,
+ RSC_IGNORED = 1,
+};
/**
* struct rproc_ops - platform-specific device handlers
+ * @prepare: prepare device for code loading
+ * @unprepare: unprepare device after stop
* @start: power on the device and boot it
* @stop: power off the device
+ * @attach: attach to a device that his already powered up
+ * @detach: detach from a device, leaving it powered up
* @kick: kick a virtqueue (virtqueue id given as a parameter)
* @da_to_va: optional platform hook to perform address translations
+ * @parse_fw: parse firmware to extract information (e.g. resource table)
+ * @handle_rsc: optional platform hook to handle vendor resources. Should return
+ * RSC_HANDLED if resource was handled, RSC_IGNORED if not handled
+ * and a negative value on error
+ * @find_loaded_rsc_table: find the loaded resource table from firmware image
+ * @get_loaded_rsc_table: get resource table installed in memory
+ * by external entity
+ * @load: load firmware to memory, where the remote processor
+ * expects to find it
+ * @sanity_check: sanity check the fw image
+ * @get_boot_addr: get boot address to entry point specified in firmware
+ * @panic: optional callback to react to system panic, core will delay
+ * panic at least the returned number of milliseconds
+ * @coredump: collect firmware dump after the subsystem is shutdown
*/
struct rproc_ops {
+ int (*prepare)(struct rproc *rproc);
+ int (*unprepare)(struct rproc *rproc);
int (*start)(struct rproc *rproc);
int (*stop)(struct rproc *rproc);
+ int (*attach)(struct rproc *rproc);
+ int (*detach)(struct rproc *rproc);
void (*kick)(struct rproc *rproc, int vqid);
- void * (*da_to_va)(struct rproc *rproc, u64 da, int len);
+ void * (*da_to_va)(struct rproc *rproc, u64 da, size_t len, bool *is_iomem);
+ int (*parse_fw)(struct rproc *rproc, const struct firmware *fw);
+ int (*handle_rsc)(struct rproc *rproc, u32 rsc_type, void *rsc,
+ int offset, int avail);
+ struct resource_table *(*find_loaded_rsc_table)(
+ struct rproc *rproc, const struct firmware *fw);
+ struct resource_table *(*get_loaded_rsc_table)(
+ struct rproc *rproc, size_t *size);
+ int (*load)(struct rproc *rproc, const struct firmware *fw);
+ int (*sanity_check)(struct rproc *rproc, const struct firmware *fw);
+ u64 (*get_boot_addr)(struct rproc *rproc, const struct firmware *fw);
+ unsigned long (*panic)(struct rproc *rproc);
+ void (*coredump)(struct rproc *rproc);
};
/**
@@ -347,6 +413,10 @@ struct rproc_ops {
* @RPROC_RUNNING: device is up and running
* @RPROC_CRASHED: device has crashed; need to start recovery
* @RPROC_DELETED: device is deleted
+ * @RPROC_ATTACHED: device has been booted by another entity and the core
+ * has attached to it
+ * @RPROC_DETACHED: device has been booted by another entity and waiting
+ * for the core to attach to it
* @RPROC_LAST: just keep this one at the end
*
* Please note that the values of these states are used as indices
@@ -361,14 +431,16 @@ enum rproc_state {
RPROC_RUNNING = 2,
RPROC_CRASHED = 3,
RPROC_DELETED = 4,
- RPROC_LAST = 5,
+ RPROC_ATTACHED = 5,
+ RPROC_DETACHED = 6,
+ RPROC_LAST = 7,
};
/**
* enum rproc_crash_type - remote processor crash types
* @RPROC_MMUFAULT: iommu fault
* @RPROC_WATCHDOG: watchdog bite
- * @RPROC_FATAL_ERROR fatal error
+ * @RPROC_FATAL_ERROR: fatal error
*
* Each element of the enum is used as an array index. So that, the value of
* the elements should be always something sane.
@@ -382,6 +454,56 @@ enum rproc_crash_type {
};
/**
+ * enum rproc_dump_mechanism - Coredump options for core
+ * @RPROC_COREDUMP_DISABLED: Don't perform any dump
+ * @RPROC_COREDUMP_ENABLED: Copy dump to separate buffer and carry on with
+ * recovery
+ * @RPROC_COREDUMP_INLINE: Read segments directly from device memory. Stall
+ * recovery until all segments are read
+ */
+enum rproc_dump_mechanism {
+ RPROC_COREDUMP_DISABLED,
+ RPROC_COREDUMP_ENABLED,
+ RPROC_COREDUMP_INLINE,
+};
+
+/**
+ * struct rproc_dump_segment - segment info from ELF header
+ * @node: list node related to the rproc segment list
+ * @da: device address of the segment
+ * @size: size of the segment
+ * @priv: private data associated with the dump_segment
+ * @dump: custom dump function to fill device memory segment associated
+ * with coredump
+ * @offset: offset of the segment
+ */
+struct rproc_dump_segment {
+ struct list_head node;
+
+ dma_addr_t da;
+ size_t size;
+
+ void *priv;
+ void (*dump)(struct rproc *rproc, struct rproc_dump_segment *segment,
+ void *dest, size_t offset, size_t size);
+ loff_t offset;
+};
+
+/**
+ * enum rproc_features - features supported
+ *
+ * @RPROC_FEAT_ATTACH_ON_RECOVERY: The remote processor does not need help
+ * from Linux to recover, such as firmware
+ * loading. Linux just needs to attach after
+ * recovery.
+ */
+
+enum rproc_features {
+ RPROC_FEAT_ATTACH_ON_RECOVERY,
+ RPROC_MAX_FEATURES,
+};
+
+/**
* struct rproc - represents a physical remote processor device
* @node: list node of this rproc object
* @domain: iommu domain
@@ -390,9 +512,9 @@ enum rproc_crash_type {
* @priv: private data which belongs to the platform-specific rproc module
* @ops: platform-specific start/stop rproc handlers
* @dev: virtual device for refcounting and common remoteproc behavior
- * @fw_ops: firmware-specific handlers
* @power: refcount of users who need this rproc powered up
* @state: state of the device
+ * @dump_conf: Currently selected coredump configuration
* @lock: lock which protects concurrent manipulations of the rproc
* @dbg_dir: debugfs directory of this rproc device
* @traces: list of trace buffers
@@ -406,57 +528,82 @@ enum rproc_crash_type {
* @index: index of this rproc device
* @crash_handler: workqueue for handling a crash
* @crash_cnt: crash counter
- * @crash_comp: completion used to sync crash handler and the rproc reload
* @recovery_disabled: flag that state if recovery was disabled
* @max_notifyid: largest allocated notify id.
* @table_ptr: pointer to the resource table in effect
+ * @clean_table: copy of the resource table without modifications. Used
+ * when a remote processor is attached or detached from the core
* @cached_table: copy of the resource table
+ * @table_sz: size of @cached_table
* @has_iommu: flag to indicate if remote processor is behind an MMU
+ * @auto_boot: flag to indicate if remote processor should be auto-started
+ * @sysfs_read_only: flag to make remoteproc sysfs files read only
+ * @dump_segments: list of segments in the firmware
+ * @nb_vdev: number of vdev currently handled by rproc
+ * @elf_class: firmware ELF class
+ * @elf_machine: firmware ELF machine
+ * @cdev: character device of the rproc
+ * @cdev_put_on_release: flag to indicate if remoteproc should be shutdown on @char_dev release
+ * @features: indicate remoteproc features
*/
struct rproc {
struct list_head node;
struct iommu_domain *domain;
const char *name;
- char *firmware;
+ const char *firmware;
void *priv;
- const struct rproc_ops *ops;
+ struct rproc_ops *ops;
struct device dev;
- const struct rproc_fw_ops *fw_ops;
atomic_t power;
unsigned int state;
+ enum rproc_dump_mechanism dump_conf;
struct mutex lock;
struct dentry *dbg_dir;
struct list_head traces;
int num_traces;
struct list_head carveouts;
struct list_head mappings;
- u32 bootaddr;
+ u64 bootaddr;
struct list_head rvdevs;
struct list_head subdevs;
struct idr notifyids;
int index;
struct work_struct crash_handler;
unsigned int crash_cnt;
- struct completion crash_comp;
bool recovery_disabled;
int max_notifyid;
struct resource_table *table_ptr;
+ struct resource_table *clean_table;
struct resource_table *cached_table;
+ size_t table_sz;
bool has_iommu;
bool auto_boot;
+ bool sysfs_read_only;
+ struct list_head dump_segments;
+ int nb_vdev;
+ u8 elf_class;
+ u16 elf_machine;
+ struct cdev cdev;
+ bool cdev_put_on_release;
+ DECLARE_BITMAP(features, RPROC_MAX_FEATURES);
};
/**
* struct rproc_subdev - subdevice tied to a remoteproc
* @node: list node related to the rproc subdevs list
- * @probe: probe function, called as the rproc is started
- * @remove: remove function, called as the rproc is stopped
+ * @prepare: prepare function, called before the rproc is started
+ * @start: start function, called after the rproc has been started
+ * @stop: stop function, called before the rproc is stopped; the @crashed
+ * parameter indicates if this originates from a recovery
+ * @unprepare: unprepare function, called after the rproc has been stopped
*/
struct rproc_subdev {
struct list_head node;
- int (*probe)(struct rproc_subdev *subdev);
- void (*remove)(struct rproc_subdev *subdev);
+ int (*prepare)(struct rproc_subdev *subdev);
+ int (*start)(struct rproc_subdev *subdev);
+ void (*stop)(struct rproc_subdev *subdev, bool crashed);
+ void (*unprepare)(struct rproc_subdev *subdev);
};
/* we currently support only two vrings per rvdev */
@@ -466,8 +613,7 @@ struct rproc_subdev {
/**
* struct rproc_vring - remoteproc vring state
* @va: virtual address
- * @dma: dma address
- * @len: length, in bytes
+ * @num: vring size
* @da: device address
* @align: vring alignment
* @notifyid: rproc-specific unique vring index
@@ -476,8 +622,7 @@ struct rproc_subdev {
*/
struct rproc_vring {
void *va;
- dma_addr_t dma;
- int len;
+ int num;
u32 da;
u32 align;
int notifyid;
@@ -487,29 +632,31 @@ struct rproc_vring {
/**
* struct rproc_vdev - remoteproc state for a supported virtio device
- * @refcount: reference counter for the vdev and vring allocations
* @subdev: handle for registering the vdev as a rproc subdevice
+ * @pdev: remoteproc virtio platform device
* @id: virtio device id (as in virtio_ids.h)
* @node: list node
* @rproc: the rproc handle
- * @vdev: the virio device
* @vring: the vrings for this vdev
* @rsc_offset: offset of the vdev's resource entry
+ * @index: vdev position versus other vdev declared in resource table
*/
struct rproc_vdev {
- struct kref refcount;
struct rproc_subdev subdev;
+ struct platform_device *pdev;
unsigned int id;
struct list_head node;
struct rproc *rproc;
- struct virtio_device vdev;
struct rproc_vring vring[RVDEV_NUM_VRINGS];
u32 rsc_offset;
+ u32 index;
};
struct rproc *rproc_get_by_phandle(phandle phandle);
+struct rproc *rproc_get_by_child(struct device *dev);
+
struct rproc *rproc_alloc(struct device *dev, const char *name,
const struct rproc_ops *ops,
const char *firmware, int len);
@@ -517,27 +664,44 @@ void rproc_put(struct rproc *rproc);
int rproc_add(struct rproc *rproc);
int rproc_del(struct rproc *rproc);
void rproc_free(struct rproc *rproc);
+void rproc_resource_cleanup(struct rproc *rproc);
-int rproc_boot(struct rproc *rproc);
-void rproc_shutdown(struct rproc *rproc);
-void rproc_report_crash(struct rproc *rproc, enum rproc_crash_type type);
+struct rproc *devm_rproc_alloc(struct device *dev, const char *name,
+ const struct rproc_ops *ops,
+ const char *firmware, int len);
+int devm_rproc_add(struct device *dev, struct rproc *rproc);
-static inline struct rproc_vdev *vdev_to_rvdev(struct virtio_device *vdev)
-{
- return container_of(vdev, struct rproc_vdev, vdev);
-}
+void rproc_add_carveout(struct rproc *rproc, struct rproc_mem_entry *mem);
-static inline struct rproc *vdev_to_rproc(struct virtio_device *vdev)
-{
- struct rproc_vdev *rvdev = vdev_to_rvdev(vdev);
+struct rproc_mem_entry *
+rproc_mem_entry_init(struct device *dev,
+ void *va, dma_addr_t dma, size_t len, u32 da,
+ int (*alloc)(struct rproc *, struct rproc_mem_entry *),
+ int (*release)(struct rproc *, struct rproc_mem_entry *),
+ const char *name, ...);
- return rvdev->rproc;
-}
+struct rproc_mem_entry *
+rproc_of_resm_mem_entry_init(struct device *dev, u32 of_resm_idx, size_t len,
+ u32 da, const char *name, ...);
-void rproc_add_subdev(struct rproc *rproc,
- struct rproc_subdev *subdev,
- int (*probe)(struct rproc_subdev *subdev),
- void (*remove)(struct rproc_subdev *subdev));
+int rproc_boot(struct rproc *rproc);
+int rproc_shutdown(struct rproc *rproc);
+int rproc_detach(struct rproc *rproc);
+int rproc_set_firmware(struct rproc *rproc, const char *fw_name);
+void rproc_report_crash(struct rproc *rproc, enum rproc_crash_type type);
+void *rproc_da_to_va(struct rproc *rproc, u64 da, size_t len, bool *is_iomem);
+void rproc_coredump_using_sections(struct rproc *rproc);
+int rproc_coredump_add_segment(struct rproc *rproc, dma_addr_t da, size_t size);
+int rproc_coredump_add_custom_segment(struct rproc *rproc,
+ dma_addr_t da, size_t size,
+ void (*dumpfn)(struct rproc *rproc,
+ struct rproc_dump_segment *segment,
+ void *dest, size_t offset,
+ size_t size),
+ void *priv);
+int rproc_coredump_set_elf_info(struct rproc *rproc, u8 class, u16 machine);
+
+void rproc_add_subdev(struct rproc *rproc, struct rproc_subdev *subdev);
void rproc_remove_subdev(struct rproc *rproc, struct rproc_subdev *subdev);
diff --git a/include/linux/remoteproc/mtk_scp.h b/include/linux/remoteproc/mtk_scp.h
new file mode 100644
index 000000000000..7c2b7cc9fe6c
--- /dev/null
+++ b/include/linux/remoteproc/mtk_scp.h
@@ -0,0 +1,68 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2019 MediaTek Inc.
+ */
+
+#ifndef _MTK_SCP_H
+#define _MTK_SCP_H
+
+#include <linux/platform_device.h>
+
+typedef void (*scp_ipi_handler_t) (void *data,
+ unsigned int len,
+ void *priv);
+struct mtk_scp;
+
+/**
+ * enum ipi_id - the id of inter-processor interrupt
+ *
+ * @SCP_IPI_INIT: The interrupt from scp is to notfiy kernel
+ * SCP initialization completed.
+ * IPI_SCP_INIT is sent from SCP when firmware is
+ * loaded. AP doesn't need to send IPI_SCP_INIT
+ * command to SCP.
+ * For other IPI below, AP should send the request
+ * to SCP to trigger the interrupt.
+ * @SCP_IPI_MAX: The maximum IPI number
+ */
+
+enum scp_ipi_id {
+ SCP_IPI_INIT = 0,
+ SCP_IPI_VDEC_H264,
+ SCP_IPI_VDEC_VP8,
+ SCP_IPI_VDEC_VP9,
+ SCP_IPI_VENC_H264,
+ SCP_IPI_VENC_VP8,
+ SCP_IPI_MDP_INIT,
+ SCP_IPI_MDP_DEINIT,
+ SCP_IPI_MDP_FRAME,
+ SCP_IPI_DIP,
+ SCP_IPI_ISP_CMD,
+ SCP_IPI_ISP_FRAME,
+ SCP_IPI_FD_CMD,
+ SCP_IPI_CROS_HOST_CMD,
+ SCP_IPI_VDEC_LAT,
+ SCP_IPI_VDEC_CORE,
+ SCP_IPI_NS_SERVICE = 0xFF,
+ SCP_IPI_MAX = 0x100,
+};
+
+struct mtk_scp *scp_get(struct platform_device *pdev);
+void scp_put(struct mtk_scp *scp);
+
+struct device *scp_get_device(struct mtk_scp *scp);
+struct rproc *scp_get_rproc(struct mtk_scp *scp);
+
+int scp_ipi_register(struct mtk_scp *scp, u32 id, scp_ipi_handler_t handler,
+ void *priv);
+void scp_ipi_unregister(struct mtk_scp *scp, u32 id);
+
+int scp_ipi_send(struct mtk_scp *scp, u32 id, void *buf, unsigned int len,
+ unsigned int wait);
+
+unsigned int scp_get_vdec_hw_capa(struct mtk_scp *scp);
+unsigned int scp_get_venc_hw_capa(struct mtk_scp *scp);
+
+void *scp_mapping_dm_addr(struct mtk_scp *scp, u32 mem_addr);
+
+#endif /* _MTK_SCP_H */
diff --git a/include/linux/remoteproc/pruss.h b/include/linux/remoteproc/pruss.h
new file mode 100644
index 000000000000..039b50d58df2
--- /dev/null
+++ b/include/linux/remoteproc/pruss.h
@@ -0,0 +1,83 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * PRU-ICSS Subsystem user interfaces
+ *
+ * Copyright (C) 2015-2022 Texas Instruments Incorporated - http://www.ti.com
+ * Suman Anna <s-anna@ti.com>
+ */
+
+#ifndef __LINUX_PRUSS_H
+#define __LINUX_PRUSS_H
+
+#include <linux/device.h>
+#include <linux/types.h>
+
+#define PRU_RPROC_DRVNAME "pru-rproc"
+
+/**
+ * enum pruss_pru_id - PRU core identifiers
+ * @PRUSS_PRU0: PRU Core 0.
+ * @PRUSS_PRU1: PRU Core 1.
+ * @PRUSS_NUM_PRUS: Total number of PRU Cores available.
+ *
+ */
+
+enum pruss_pru_id {
+ PRUSS_PRU0 = 0,
+ PRUSS_PRU1,
+ PRUSS_NUM_PRUS,
+};
+
+/*
+ * enum pru_ctable_idx - Configurable Constant table index identifiers
+ */
+enum pru_ctable_idx {
+ PRU_C24 = 0,
+ PRU_C25,
+ PRU_C26,
+ PRU_C27,
+ PRU_C28,
+ PRU_C29,
+ PRU_C30,
+ PRU_C31,
+};
+
+struct device_node;
+struct rproc;
+
+#if IS_ENABLED(CONFIG_PRU_REMOTEPROC)
+
+struct rproc *pru_rproc_get(struct device_node *np, int index,
+ enum pruss_pru_id *pru_id);
+void pru_rproc_put(struct rproc *rproc);
+int pru_rproc_set_ctable(struct rproc *rproc, enum pru_ctable_idx c, u32 addr);
+
+#else
+
+static inline struct rproc *
+pru_rproc_get(struct device_node *np, int index, enum pruss_pru_id *pru_id)
+{
+ return ERR_PTR(-EOPNOTSUPP);
+}
+
+static inline void pru_rproc_put(struct rproc *rproc) { }
+
+static inline int pru_rproc_set_ctable(struct rproc *rproc,
+ enum pru_ctable_idx c, u32 addr)
+{
+ return -EOPNOTSUPP;
+}
+
+#endif /* CONFIG_PRU_REMOTEPROC */
+
+static inline bool is_pru_rproc(struct device *dev)
+{
+ const char *drv_name = dev_driver_string(dev);
+
+ if (strncmp(drv_name, PRU_RPROC_DRVNAME, sizeof(PRU_RPROC_DRVNAME)))
+ return false;
+
+ return true;
+}
+
+#endif /* __LINUX_PRUSS_H */
diff --git a/include/linux/remoteproc/qcom_rproc.h b/include/linux/remoteproc/qcom_rproc.h
new file mode 100644
index 000000000000..82b211518136
--- /dev/null
+++ b/include/linux/remoteproc/qcom_rproc.h
@@ -0,0 +1,48 @@
+#ifndef __QCOM_RPROC_H__
+#define __QCOM_RPROC_H__
+
+struct notifier_block;
+
+/**
+ * enum qcom_ssr_notify_type - Startup/Shutdown events related to a remoteproc
+ * processor.
+ *
+ * @QCOM_SSR_BEFORE_POWERUP: Remoteproc about to start (prepare stage)
+ * @QCOM_SSR_AFTER_POWERUP: Remoteproc is running (start stage)
+ * @QCOM_SSR_BEFORE_SHUTDOWN: Remoteproc crashed or shutting down (stop stage)
+ * @QCOM_SSR_AFTER_SHUTDOWN: Remoteproc is down (unprepare stage)
+ */
+enum qcom_ssr_notify_type {
+ QCOM_SSR_BEFORE_POWERUP,
+ QCOM_SSR_AFTER_POWERUP,
+ QCOM_SSR_BEFORE_SHUTDOWN,
+ QCOM_SSR_AFTER_SHUTDOWN,
+};
+
+struct qcom_ssr_notify_data {
+ const char *name;
+ bool crashed;
+};
+
+#if IS_ENABLED(CONFIG_QCOM_RPROC_COMMON)
+
+void *qcom_register_ssr_notifier(const char *name, struct notifier_block *nb);
+int qcom_unregister_ssr_notifier(void *notify, struct notifier_block *nb);
+
+#else
+
+static inline void *qcom_register_ssr_notifier(const char *name,
+ struct notifier_block *nb)
+{
+ return NULL;
+}
+
+static inline int qcom_unregister_ssr_notifier(void *notify,
+ struct notifier_block *nb)
+{
+ return 0;
+}
+
+#endif
+
+#endif
diff --git a/include/linux/remoteproc/st_slim_rproc.h b/include/linux/remoteproc/st_slim_rproc.h
index 4155556fa4b2..a01ba0b7a6f2 100644
--- a/include/linux/remoteproc/st_slim_rproc.h
+++ b/include/linux/remoteproc/st_slim_rproc.h
@@ -1,14 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* SLIM core rproc driver header
*
* Copyright (C) 2016 STMicroelectronics
*
* Author: Peter Griffin <peter.griffin@linaro.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
#ifndef _ST_REMOTEPROC_SLIM_H
#define _ST_REMOTEPROC_SLIM_H
diff --git a/include/linux/resctrl.h b/include/linux/resctrl.h
new file mode 100644
index 000000000000..8334eeacfec5
--- /dev/null
+++ b/include/linux/resctrl.h
@@ -0,0 +1,267 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _RESCTRL_H
+#define _RESCTRL_H
+
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/pid.h>
+
+#ifdef CONFIG_PROC_CPU_RESCTRL
+
+int proc_resctrl_show(struct seq_file *m,
+ struct pid_namespace *ns,
+ struct pid *pid,
+ struct task_struct *tsk);
+
+#endif
+
+/* max value for struct rdt_domain's mbps_val */
+#define MBA_MAX_MBPS U32_MAX
+
+/**
+ * enum resctrl_conf_type - The type of configuration.
+ * @CDP_NONE: No prioritisation, both code and data are controlled or monitored.
+ * @CDP_CODE: Configuration applies to instruction fetches.
+ * @CDP_DATA: Configuration applies to reads and writes.
+ */
+enum resctrl_conf_type {
+ CDP_NONE,
+ CDP_CODE,
+ CDP_DATA,
+};
+
+#define CDP_NUM_TYPES (CDP_DATA + 1)
+
+/*
+ * Event IDs, the values match those used to program IA32_QM_EVTSEL before
+ * reading IA32_QM_CTR on RDT systems.
+ */
+enum resctrl_event_id {
+ QOS_L3_OCCUP_EVENT_ID = 0x01,
+ QOS_L3_MBM_TOTAL_EVENT_ID = 0x02,
+ QOS_L3_MBM_LOCAL_EVENT_ID = 0x03,
+};
+
+/**
+ * struct resctrl_staged_config - parsed configuration to be applied
+ * @new_ctrl: new ctrl value to be loaded
+ * @have_new_ctrl: whether the user provided new_ctrl is valid
+ */
+struct resctrl_staged_config {
+ u32 new_ctrl;
+ bool have_new_ctrl;
+};
+
+/**
+ * struct rdt_domain - group of CPUs sharing a resctrl resource
+ * @list: all instances of this resource
+ * @id: unique id for this instance
+ * @cpu_mask: which CPUs share this resource
+ * @rmid_busy_llc: bitmap of which limbo RMIDs are above threshold
+ * @mbm_total: saved state for MBM total bandwidth
+ * @mbm_local: saved state for MBM local bandwidth
+ * @mbm_over: worker to periodically read MBM h/w counters
+ * @cqm_limbo: worker to periodically read CQM h/w counters
+ * @mbm_work_cpu: worker CPU for MBM h/w counters
+ * @cqm_work_cpu: worker CPU for CQM h/w counters
+ * @plr: pseudo-locked region (if any) associated with domain
+ * @staged_config: parsed configuration to be applied
+ * @mbps_val: When mba_sc is enabled, this holds the array of user
+ * specified control values for mba_sc in MBps, indexed
+ * by closid
+ */
+struct rdt_domain {
+ struct list_head list;
+ int id;
+ struct cpumask cpu_mask;
+ unsigned long *rmid_busy_llc;
+ struct mbm_state *mbm_total;
+ struct mbm_state *mbm_local;
+ struct delayed_work mbm_over;
+ struct delayed_work cqm_limbo;
+ int mbm_work_cpu;
+ int cqm_work_cpu;
+ struct pseudo_lock_region *plr;
+ struct resctrl_staged_config staged_config[CDP_NUM_TYPES];
+ u32 *mbps_val;
+};
+
+/**
+ * struct resctrl_cache - Cache allocation related data
+ * @cbm_len: Length of the cache bit mask
+ * @min_cbm_bits: Minimum number of consecutive bits to be set.
+ * The value 0 means the architecture can support
+ * zero CBM.
+ * @shareable_bits: Bitmask of shareable resource with other
+ * executing entities
+ * @arch_has_sparse_bitmaps: True if a bitmap like f00f is valid.
+ * @arch_has_per_cpu_cfg: True if QOS_CFG register for this cache
+ * level has CPU scope.
+ */
+struct resctrl_cache {
+ unsigned int cbm_len;
+ unsigned int min_cbm_bits;
+ unsigned int shareable_bits;
+ bool arch_has_sparse_bitmaps;
+ bool arch_has_per_cpu_cfg;
+};
+
+/**
+ * enum membw_throttle_mode - System's memory bandwidth throttling mode
+ * @THREAD_THROTTLE_UNDEFINED: Not relevant to the system
+ * @THREAD_THROTTLE_MAX: Memory bandwidth is throttled at the core
+ * always using smallest bandwidth percentage
+ * assigned to threads, aka "max throttling"
+ * @THREAD_THROTTLE_PER_THREAD: Memory bandwidth is throttled at the thread
+ */
+enum membw_throttle_mode {
+ THREAD_THROTTLE_UNDEFINED = 0,
+ THREAD_THROTTLE_MAX,
+ THREAD_THROTTLE_PER_THREAD,
+};
+
+/**
+ * struct resctrl_membw - Memory bandwidth allocation related data
+ * @min_bw: Minimum memory bandwidth percentage user can request
+ * @bw_gran: Granularity at which the memory bandwidth is allocated
+ * @delay_linear: True if memory B/W delay is in linear scale
+ * @arch_needs_linear: True if we can't configure non-linear resources
+ * @throttle_mode: Bandwidth throttling mode when threads request
+ * different memory bandwidths
+ * @mba_sc: True if MBA software controller(mba_sc) is enabled
+ * @mb_map: Mapping of memory B/W percentage to memory B/W delay
+ */
+struct resctrl_membw {
+ u32 min_bw;
+ u32 bw_gran;
+ u32 delay_linear;
+ bool arch_needs_linear;
+ enum membw_throttle_mode throttle_mode;
+ bool mba_sc;
+ u32 *mb_map;
+};
+
+struct rdt_parse_data;
+struct resctrl_schema;
+
+/**
+ * struct rdt_resource - attributes of a resctrl resource
+ * @rid: The index of the resource
+ * @alloc_capable: Is allocation available on this machine
+ * @mon_capable: Is monitor feature available on this machine
+ * @num_rmid: Number of RMIDs available
+ * @cache_level: Which cache level defines scope of this resource
+ * @cache: Cache allocation related data
+ * @membw: If the component has bandwidth controls, their properties.
+ * @domains: All domains for this resource
+ * @name: Name to use in "schemata" file.
+ * @data_width: Character width of data when displaying
+ * @default_ctrl: Specifies default cache cbm or memory B/W percent.
+ * @format_str: Per resource format string to show domain value
+ * @parse_ctrlval: Per resource function pointer to parse control values
+ * @evt_list: List of monitoring events
+ * @fflags: flags to choose base and info files
+ * @cdp_capable: Is the CDP feature available on this resource
+ */
+struct rdt_resource {
+ int rid;
+ bool alloc_capable;
+ bool mon_capable;
+ int num_rmid;
+ int cache_level;
+ struct resctrl_cache cache;
+ struct resctrl_membw membw;
+ struct list_head domains;
+ char *name;
+ int data_width;
+ u32 default_ctrl;
+ const char *format_str;
+ int (*parse_ctrlval)(struct rdt_parse_data *data,
+ struct resctrl_schema *s,
+ struct rdt_domain *d);
+ struct list_head evt_list;
+ unsigned long fflags;
+ bool cdp_capable;
+};
+
+/**
+ * struct resctrl_schema - configuration abilities of a resource presented to
+ * user-space
+ * @list: Member of resctrl_schema_all.
+ * @name: The name to use in the "schemata" file.
+ * @conf_type: Whether this schema is specific to code/data.
+ * @res: The resource structure exported by the architecture to describe
+ * the hardware that is configured by this schema.
+ * @num_closid: The number of closid that can be used with this schema. When
+ * features like CDP are enabled, this will be lower than the
+ * hardware supports for the resource.
+ */
+struct resctrl_schema {
+ struct list_head list;
+ char name[8];
+ enum resctrl_conf_type conf_type;
+ struct rdt_resource *res;
+ u32 num_closid;
+};
+
+/* The number of closid supported by this resource regardless of CDP */
+u32 resctrl_arch_get_num_closid(struct rdt_resource *r);
+int resctrl_arch_update_domains(struct rdt_resource *r, u32 closid);
+
+/*
+ * Update the ctrl_val and apply this config right now.
+ * Must be called on one of the domain's CPUs.
+ */
+int resctrl_arch_update_one(struct rdt_resource *r, struct rdt_domain *d,
+ u32 closid, enum resctrl_conf_type t, u32 cfg_val);
+
+u32 resctrl_arch_get_config(struct rdt_resource *r, struct rdt_domain *d,
+ u32 closid, enum resctrl_conf_type type);
+int resctrl_online_domain(struct rdt_resource *r, struct rdt_domain *d);
+void resctrl_offline_domain(struct rdt_resource *r, struct rdt_domain *d);
+
+/**
+ * resctrl_arch_rmid_read() - Read the eventid counter corresponding to rmid
+ * for this resource and domain.
+ * @r: resource that the counter should be read from.
+ * @d: domain that the counter should be read from.
+ * @rmid: rmid of the counter to read.
+ * @eventid: eventid to read, e.g. L3 occupancy.
+ * @val: result of the counter read in bytes.
+ *
+ * Call from process context on a CPU that belongs to domain @d.
+ *
+ * Return:
+ * 0 on success, or -EIO, -EINVAL etc on error.
+ */
+int resctrl_arch_rmid_read(struct rdt_resource *r, struct rdt_domain *d,
+ u32 rmid, enum resctrl_event_id eventid, u64 *val);
+
+/**
+ * resctrl_arch_reset_rmid() - Reset any private state associated with rmid
+ * and eventid.
+ * @r: The domain's resource.
+ * @d: The rmid's domain.
+ * @rmid: The rmid whose counter values should be reset.
+ * @eventid: The eventid whose counter values should be reset.
+ *
+ * This can be called from any CPU.
+ */
+void resctrl_arch_reset_rmid(struct rdt_resource *r, struct rdt_domain *d,
+ u32 rmid, enum resctrl_event_id eventid);
+
+/**
+ * resctrl_arch_reset_rmid_all() - Reset all private state associated with
+ * all rmids and eventids.
+ * @r: The resctrl resource.
+ * @d: The domain for which all architectural counter state will
+ * be cleared.
+ *
+ * This can be called from any CPU.
+ */
+void resctrl_arch_reset_rmid_all(struct rdt_resource *r, struct rdt_domain *d);
+
+extern unsigned int resctrl_rmid_realloc_threshold;
+extern unsigned int resctrl_rmid_realloc_limit;
+
+#endif /* _RESCTRL_H */
diff --git a/include/linux/reservation.h b/include/linux/reservation.h
deleted file mode 100644
index 156cfd330b66..000000000000
--- a/include/linux/reservation.h
+++ /dev/null
@@ -1,264 +0,0 @@
-/*
- * Header file for reservations for dma-buf and ttm
- *
- * Copyright(C) 2011 Linaro Limited. All rights reserved.
- * Copyright (C) 2012-2013 Canonical Ltd
- * Copyright (C) 2012 Texas Instruments
- *
- * Authors:
- * Rob Clark <robdclark@gmail.com>
- * Maarten Lankhorst <maarten.lankhorst@canonical.com>
- * Thomas Hellstrom <thellstrom-at-vmware-dot-com>
- *
- * Based on bo.c which bears the following copyright notice,
- * but is dual licensed:
- *
- * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
- * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
- * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
- * USE OR OTHER DEALINGS IN THE SOFTWARE.
- */
-#ifndef _LINUX_RESERVATION_H
-#define _LINUX_RESERVATION_H
-
-#include <linux/ww_mutex.h>
-#include <linux/dma-fence.h>
-#include <linux/slab.h>
-#include <linux/seqlock.h>
-#include <linux/rcupdate.h>
-
-extern struct ww_class reservation_ww_class;
-extern struct lock_class_key reservation_seqcount_class;
-extern const char reservation_seqcount_string[];
-
-/**
- * struct reservation_object_list - a list of shared fences
- * @rcu: for internal use
- * @shared_count: table of shared fences
- * @shared_max: for growing shared fence table
- * @shared: shared fence table
- */
-struct reservation_object_list {
- struct rcu_head rcu;
- u32 shared_count, shared_max;
- struct dma_fence __rcu *shared[];
-};
-
-/**
- * struct reservation_object - a reservation object manages fences for a buffer
- * @lock: update side lock
- * @seq: sequence count for managing RCU read-side synchronization
- * @fence_excl: the exclusive fence, if there is one currently
- * @fence: list of current shared fences
- * @staged: staged copy of shared fences for RCU updates
- */
-struct reservation_object {
- struct ww_mutex lock;
- seqcount_t seq;
-
- struct dma_fence __rcu *fence_excl;
- struct reservation_object_list __rcu *fence;
- struct reservation_object_list *staged;
-};
-
-#define reservation_object_held(obj) lockdep_is_held(&(obj)->lock.base)
-#define reservation_object_assert_held(obj) \
- lockdep_assert_held(&(obj)->lock.base)
-
-/**
- * reservation_object_init - initialize a reservation object
- * @obj: the reservation object
- */
-static inline void
-reservation_object_init(struct reservation_object *obj)
-{
- ww_mutex_init(&obj->lock, &reservation_ww_class);
-
- __seqcount_init(&obj->seq, reservation_seqcount_string, &reservation_seqcount_class);
- RCU_INIT_POINTER(obj->fence, NULL);
- RCU_INIT_POINTER(obj->fence_excl, NULL);
- obj->staged = NULL;
-}
-
-/**
- * reservation_object_fini - destroys a reservation object
- * @obj: the reservation object
- */
-static inline void
-reservation_object_fini(struct reservation_object *obj)
-{
- int i;
- struct reservation_object_list *fobj;
- struct dma_fence *excl;
-
- /*
- * This object should be dead and all references must have
- * been released to it, so no need to be protected with rcu.
- */
- excl = rcu_dereference_protected(obj->fence_excl, 1);
- if (excl)
- dma_fence_put(excl);
-
- fobj = rcu_dereference_protected(obj->fence, 1);
- if (fobj) {
- for (i = 0; i < fobj->shared_count; ++i)
- dma_fence_put(rcu_dereference_protected(fobj->shared[i], 1));
-
- kfree(fobj);
- }
- kfree(obj->staged);
-
- ww_mutex_destroy(&obj->lock);
-}
-
-/**
- * reservation_object_get_list - get the reservation object's
- * shared fence list, with update-side lock held
- * @obj: the reservation object
- *
- * Returns the shared fence list. Does NOT take references to
- * the fence. The obj->lock must be held.
- */
-static inline struct reservation_object_list *
-reservation_object_get_list(struct reservation_object *obj)
-{
- return rcu_dereference_protected(obj->fence,
- reservation_object_held(obj));
-}
-
-/**
- * reservation_object_lock - lock the reservation object
- * @obj: the reservation object
- * @ctx: the locking context
- *
- * Locks the reservation object for exclusive access and modification. Note,
- * that the lock is only against other writers, readers will run concurrently
- * with a writer under RCU. The seqlock is used to notify readers if they
- * overlap with a writer.
- *
- * As the reservation object may be locked by multiple parties in an
- * undefined order, a #ww_acquire_ctx is passed to unwind if a cycle
- * is detected. See ww_mutex_lock() and ww_acquire_init(). A reservation
- * object may be locked by itself by passing NULL as @ctx.
- */
-static inline int
-reservation_object_lock(struct reservation_object *obj,
- struct ww_acquire_ctx *ctx)
-{
- return ww_mutex_lock(&obj->lock, ctx);
-}
-
-/**
- * reservation_object_trylock - trylock the reservation object
- * @obj: the reservation object
- *
- * Tries to lock the reservation object for exclusive access and modification.
- * Note, that the lock is only against other writers, readers will run
- * concurrently with a writer under RCU. The seqlock is used to notify readers
- * if they overlap with a writer.
- *
- * Also note that since no context is provided, no deadlock protection is
- * possible.
- *
- * Returns true if the lock was acquired, false otherwise.
- */
-static inline bool __must_check
-reservation_object_trylock(struct reservation_object *obj)
-{
- return ww_mutex_trylock(&obj->lock);
-}
-
-/**
- * reservation_object_unlock - unlock the reservation object
- * @obj: the reservation object
- *
- * Unlocks the reservation object following exclusive access.
- */
-static inline void
-reservation_object_unlock(struct reservation_object *obj)
-{
- ww_mutex_unlock(&obj->lock);
-}
-
-/**
- * reservation_object_get_excl - get the reservation object's
- * exclusive fence, with update-side lock held
- * @obj: the reservation object
- *
- * Returns the exclusive fence (if any). Does NOT take a
- * reference. The obj->lock must be held.
- *
- * RETURNS
- * The exclusive fence or NULL
- */
-static inline struct dma_fence *
-reservation_object_get_excl(struct reservation_object *obj)
-{
- return rcu_dereference_protected(obj->fence_excl,
- reservation_object_held(obj));
-}
-
-/**
- * reservation_object_get_excl_rcu - get the reservation object's
- * exclusive fence, without lock held.
- * @obj: the reservation object
- *
- * If there is an exclusive fence, this atomically increments it's
- * reference count and returns it.
- *
- * RETURNS
- * The exclusive fence or NULL if none
- */
-static inline struct dma_fence *
-reservation_object_get_excl_rcu(struct reservation_object *obj)
-{
- struct dma_fence *fence;
-
- if (!rcu_access_pointer(obj->fence_excl))
- return NULL;
-
- rcu_read_lock();
- fence = dma_fence_get_rcu_safe(&obj->fence_excl);
- rcu_read_unlock();
-
- return fence;
-}
-
-int reservation_object_reserve_shared(struct reservation_object *obj);
-void reservation_object_add_shared_fence(struct reservation_object *obj,
- struct dma_fence *fence);
-
-void reservation_object_add_excl_fence(struct reservation_object *obj,
- struct dma_fence *fence);
-
-int reservation_object_get_fences_rcu(struct reservation_object *obj,
- struct dma_fence **pfence_excl,
- unsigned *pshared_count,
- struct dma_fence ***pshared);
-
-long reservation_object_wait_timeout_rcu(struct reservation_object *obj,
- bool wait_all, bool intr,
- unsigned long timeout);
-
-bool reservation_object_test_signaled_rcu(struct reservation_object *obj,
- bool test_all);
-
-#endif /* _LINUX_RESERVATION_H */
diff --git a/include/linux/reset-controller.h b/include/linux/reset-controller.h
index db1fe6772ad5..0fa4f60e1186 100644
--- a/include/linux/reset-controller.h
+++ b/include/linux/reset-controller.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_RESET_CONTROLLER_H_
#define _LINUX_RESET_CONTROLLER_H_
@@ -6,7 +7,7 @@
struct reset_controller_dev;
/**
- * struct reset_control_ops
+ * struct reset_control_ops - reset controller driver callbacks
*
* @reset: for self-deasserting resets, does all necessary
* things to reset the device
@@ -26,16 +27,43 @@ struct device_node;
struct of_phandle_args;
/**
+ * struct reset_control_lookup - represents a single lookup entry
+ *
+ * @list: internal list of all reset lookup entries
+ * @provider: name of the reset controller device controlling this reset line
+ * @index: ID of the reset controller in the reset controller device
+ * @dev_id: name of the device associated with this reset line
+ * @con_id: name of the reset line (can be NULL)
+ */
+struct reset_control_lookup {
+ struct list_head list;
+ const char *provider;
+ unsigned int index;
+ const char *dev_id;
+ const char *con_id;
+};
+
+#define RESET_LOOKUP(_provider, _index, _dev_id, _con_id) \
+ { \
+ .provider = _provider, \
+ .index = _index, \
+ .dev_id = _dev_id, \
+ .con_id = _con_id, \
+ }
+
+/**
* struct reset_controller_dev - reset controller entity that might
* provide multiple reset controls
* @ops: a pointer to device specific struct reset_control_ops
* @owner: kernel module of the reset controller driver
* @list: internal list of reset controller devices
* @reset_control_head: head of internal list of requested reset controls
+ * @dev: corresponding driver model device struct
* @of_node: corresponding device tree node as phandle target
* @of_reset_n_cells: number of cells in reset line specifiers
* @of_xlate: translation function to translate from specifier as found in the
- * device tree to id as given to the reset control ops
+ * device tree to id as given to the reset control ops, defaults
+ * to :c:func:`of_reset_simple_xlate`.
* @nr_resets: number of reset controls in this reset controller device
*/
struct reset_controller_dev {
@@ -43,6 +71,7 @@ struct reset_controller_dev {
struct module *owner;
struct list_head list;
struct list_head reset_control_head;
+ struct device *dev;
struct device_node *of_node;
int of_reset_n_cells;
int (*of_xlate)(struct reset_controller_dev *rcdev,
@@ -50,6 +79,7 @@ struct reset_controller_dev {
unsigned int nr_resets;
};
+#if IS_ENABLED(CONFIG_RESET_CONTROLLER)
int reset_controller_register(struct reset_controller_dev *rcdev);
void reset_controller_unregister(struct reset_controller_dev *rcdev);
@@ -57,4 +87,28 @@ struct device;
int devm_reset_controller_register(struct device *dev,
struct reset_controller_dev *rcdev);
+void reset_controller_add_lookup(struct reset_control_lookup *lookup,
+ unsigned int num_entries);
+#else
+static inline int reset_controller_register(struct reset_controller_dev *rcdev)
+{
+ return 0;
+}
+
+static inline void reset_controller_unregister(struct reset_controller_dev *rcdev)
+{
+}
+
+static inline int devm_reset_controller_register(struct device *dev,
+ struct reset_controller_dev *rcdev)
+{
+ return 0;
+}
+
+static inline void reset_controller_add_lookup(struct reset_control_lookup *lookup,
+ unsigned int num_entries)
+{
+}
+#endif
+
#endif
diff --git a/include/linux/reset.h b/include/linux/reset.h
index 13d8681210d5..514ddf003efc 100644
--- a/include/linux/reset.h
+++ b/include/linux/reset.h
@@ -1,34 +1,73 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_RESET_H_
#define _LINUX_RESET_H_
-#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/types.h>
+struct device;
+struct device_node;
struct reset_control;
+/**
+ * struct reset_control_bulk_data - Data used for bulk reset control operations.
+ *
+ * @id: reset control consumer ID
+ * @rstc: struct reset_control * to store the associated reset control
+ *
+ * The reset APIs provide a series of reset_control_bulk_*() API calls as
+ * a convenience to consumers which require multiple reset controls.
+ * This structure is used to manage data for these calls.
+ */
+struct reset_control_bulk_data {
+ const char *id;
+ struct reset_control *rstc;
+};
+
#ifdef CONFIG_RESET_CONTROLLER
int reset_control_reset(struct reset_control *rstc);
+int reset_control_rearm(struct reset_control *rstc);
int reset_control_assert(struct reset_control *rstc);
int reset_control_deassert(struct reset_control *rstc);
int reset_control_status(struct reset_control *rstc);
+int reset_control_acquire(struct reset_control *rstc);
+void reset_control_release(struct reset_control *rstc);
+
+int reset_control_bulk_reset(int num_rstcs, struct reset_control_bulk_data *rstcs);
+int reset_control_bulk_assert(int num_rstcs, struct reset_control_bulk_data *rstcs);
+int reset_control_bulk_deassert(int num_rstcs, struct reset_control_bulk_data *rstcs);
+int reset_control_bulk_acquire(int num_rstcs, struct reset_control_bulk_data *rstcs);
+void reset_control_bulk_release(int num_rstcs, struct reset_control_bulk_data *rstcs);
struct reset_control *__of_reset_control_get(struct device_node *node,
const char *id, int index, bool shared,
- bool optional);
+ bool optional, bool acquired);
struct reset_control *__reset_control_get(struct device *dev, const char *id,
int index, bool shared,
- bool optional);
+ bool optional, bool acquired);
void reset_control_put(struct reset_control *rstc);
+int __reset_control_bulk_get(struct device *dev, int num_rstcs,
+ struct reset_control_bulk_data *rstcs,
+ bool shared, bool optional, bool acquired);
+void reset_control_bulk_put(int num_rstcs, struct reset_control_bulk_data *rstcs);
+
+int __device_reset(struct device *dev, bool optional);
struct reset_control *__devm_reset_control_get(struct device *dev,
const char *id, int index, bool shared,
- bool optional);
+ bool optional, bool acquired);
+int __devm_reset_control_bulk_get(struct device *dev, int num_rstcs,
+ struct reset_control_bulk_data *rstcs,
+ bool shared, bool optional, bool acquired);
-int __must_check device_reset(struct device *dev);
+struct reset_control *devm_reset_control_array_get(struct device *dev,
+ bool shared, bool optional);
+struct reset_control *of_reset_control_array_get(struct device_node *np,
+ bool shared, bool optional,
+ bool acquired);
-static inline int device_reset_optional(struct device *dev)
-{
- return device_reset(dev);
-}
+int reset_control_get_count(struct device *dev);
#else
@@ -37,6 +76,11 @@ static inline int reset_control_reset(struct reset_control *rstc)
return 0;
}
+static inline int reset_control_rearm(struct reset_control *rstc)
+{
+ return 0;
+}
+
static inline int reset_control_assert(struct reset_control *rstc)
{
return 0;
@@ -52,45 +96,128 @@ static inline int reset_control_status(struct reset_control *rstc)
return 0;
}
-static inline void reset_control_put(struct reset_control *rstc)
+static inline int reset_control_acquire(struct reset_control *rstc)
{
+ return 0;
}
-static inline int __must_check device_reset(struct device *dev)
+static inline void reset_control_release(struct reset_control *rstc)
{
- WARN_ON(1);
- return -ENOTSUPP;
}
-static inline int device_reset_optional(struct device *dev)
+static inline void reset_control_put(struct reset_control *rstc)
{
- return -ENOTSUPP;
+}
+
+static inline int __device_reset(struct device *dev, bool optional)
+{
+ return optional ? 0 : -ENOTSUPP;
}
static inline struct reset_control *__of_reset_control_get(
struct device_node *node,
const char *id, int index, bool shared,
- bool optional)
+ bool optional, bool acquired)
{
return optional ? NULL : ERR_PTR(-ENOTSUPP);
}
static inline struct reset_control *__reset_control_get(
struct device *dev, const char *id,
- int index, bool shared, bool optional)
+ int index, bool shared, bool optional,
+ bool acquired)
{
return optional ? NULL : ERR_PTR(-ENOTSUPP);
}
+static inline int
+reset_control_bulk_reset(int num_rstcs, struct reset_control_bulk_data *rstcs)
+{
+ return 0;
+}
+
+static inline int
+reset_control_bulk_assert(int num_rstcs, struct reset_control_bulk_data *rstcs)
+{
+ return 0;
+}
+
+static inline int
+reset_control_bulk_deassert(int num_rstcs, struct reset_control_bulk_data *rstcs)
+{
+ return 0;
+}
+
+static inline int
+reset_control_bulk_acquire(int num_rstcs, struct reset_control_bulk_data *rstcs)
+{
+ return 0;
+}
+
+static inline void
+reset_control_bulk_release(int num_rstcs, struct reset_control_bulk_data *rstcs)
+{
+}
+
+static inline int
+__reset_control_bulk_get(struct device *dev, int num_rstcs,
+ struct reset_control_bulk_data *rstcs,
+ bool shared, bool optional, bool acquired)
+{
+ return optional ? 0 : -EOPNOTSUPP;
+}
+
+static inline void
+reset_control_bulk_put(int num_rstcs, struct reset_control_bulk_data *rstcs)
+{
+}
+
static inline struct reset_control *__devm_reset_control_get(
struct device *dev, const char *id,
- int index, bool shared, bool optional)
+ int index, bool shared, bool optional,
+ bool acquired)
+{
+ return optional ? NULL : ERR_PTR(-ENOTSUPP);
+}
+
+static inline int
+__devm_reset_control_bulk_get(struct device *dev, int num_rstcs,
+ struct reset_control_bulk_data *rstcs,
+ bool shared, bool optional, bool acquired)
+{
+ return optional ? 0 : -EOPNOTSUPP;
+}
+
+static inline struct reset_control *
+devm_reset_control_array_get(struct device *dev, bool shared, bool optional)
{
return optional ? NULL : ERR_PTR(-ENOTSUPP);
}
+static inline struct reset_control *
+of_reset_control_array_get(struct device_node *np, bool shared, bool optional,
+ bool acquired)
+{
+ return optional ? NULL : ERR_PTR(-ENOTSUPP);
+}
+
+static inline int reset_control_get_count(struct device *dev)
+{
+ return -ENOENT;
+}
+
#endif /* CONFIG_RESET_CONTROLLER */
+static inline int __must_check device_reset(struct device *dev)
+{
+ return __device_reset(dev, false);
+}
+
+static inline int device_reset_optional(struct device *dev)
+{
+ return __device_reset(dev, true);
+}
+
/**
* reset_control_get_exclusive - Lookup and obtain an exclusive reference
* to a reset controller.
@@ -98,10 +225,10 @@ static inline struct reset_control *__devm_reset_control_get(
* @id: reset line name
*
* Returns a struct reset_control or IS_ERR() condition containing errno.
- * If this function is called more then once for the same reset_control it will
+ * If this function is called more than once for the same reset_control it will
* return -EBUSY.
*
- * See reset_control_get_shared for details on shared references to
+ * See reset_control_get_shared() for details on shared references to
* reset-controls.
*
* Use of id names is optional.
@@ -109,10 +236,87 @@ static inline struct reset_control *__devm_reset_control_get(
static inline struct reset_control *
__must_check reset_control_get_exclusive(struct device *dev, const char *id)
{
-#ifndef CONFIG_RESET_CONTROLLER
- WARN_ON(1);
-#endif
- return __reset_control_get(dev, id, 0, false, false);
+ return __reset_control_get(dev, id, 0, false, false, true);
+}
+
+/**
+ * reset_control_bulk_get_exclusive - Lookup and obtain exclusive references to
+ * multiple reset controllers.
+ * @dev: device to be reset by the controller
+ * @num_rstcs: number of entries in rstcs array
+ * @rstcs: array of struct reset_control_bulk_data with reset line names set
+ *
+ * Fills the rstcs array with pointers to exclusive reset controls and
+ * returns 0, or an IS_ERR() condition containing errno.
+ */
+static inline int __must_check
+reset_control_bulk_get_exclusive(struct device *dev, int num_rstcs,
+ struct reset_control_bulk_data *rstcs)
+{
+ return __reset_control_bulk_get(dev, num_rstcs, rstcs, false, false, true);
+}
+
+/**
+ * reset_control_get_exclusive_released - Lookup and obtain a temoprarily
+ * exclusive reference to a reset
+ * controller.
+ * @dev: device to be reset by the controller
+ * @id: reset line name
+ *
+ * Returns a struct reset_control or IS_ERR() condition containing errno.
+ * reset-controls returned by this function must be acquired via
+ * reset_control_acquire() before they can be used and should be released
+ * via reset_control_release() afterwards.
+ *
+ * Use of id names is optional.
+ */
+static inline struct reset_control *
+__must_check reset_control_get_exclusive_released(struct device *dev,
+ const char *id)
+{
+ return __reset_control_get(dev, id, 0, false, false, false);
+}
+
+/**
+ * reset_control_bulk_get_exclusive_released - Lookup and obtain temporarily
+ * exclusive references to multiple reset
+ * controllers.
+ * @dev: device to be reset by the controller
+ * @num_rstcs: number of entries in rstcs array
+ * @rstcs: array of struct reset_control_bulk_data with reset line names set
+ *
+ * Fills the rstcs array with pointers to exclusive reset controls and
+ * returns 0, or an IS_ERR() condition containing errno.
+ * reset-controls returned by this function must be acquired via
+ * reset_control_bulk_acquire() before they can be used and should be released
+ * via reset_control_bulk_release() afterwards.
+ */
+static inline int __must_check
+reset_control_bulk_get_exclusive_released(struct device *dev, int num_rstcs,
+ struct reset_control_bulk_data *rstcs)
+{
+ return __reset_control_bulk_get(dev, num_rstcs, rstcs, false, false, false);
+}
+
+/**
+ * reset_control_bulk_get_optional_exclusive_released - Lookup and obtain optional
+ * temporarily exclusive references to multiple
+ * reset controllers.
+ * @dev: device to be reset by the controller
+ * @num_rstcs: number of entries in rstcs array
+ * @rstcs: array of struct reset_control_bulk_data with reset line names set
+ *
+ * Optional variant of reset_control_bulk_get_exclusive_released(). If the
+ * requested reset is not specified in the device tree, this function returns 0
+ * instead of an error and missing rtsc is set to NULL.
+ *
+ * See reset_control_bulk_get_exclusive_released() for more information.
+ */
+static inline int __must_check
+reset_control_bulk_get_optional_exclusive_released(struct device *dev, int num_rstcs,
+ struct reset_control_bulk_data *rstcs)
+{
+ return __reset_control_bulk_get(dev, num_rstcs, rstcs, false, true, false);
}
/**
@@ -123,7 +327,7 @@ __must_check reset_control_get_exclusive(struct device *dev, const char *id)
*
* Returns a struct reset_control or IS_ERR() condition containing errno.
* This function is intended for use with reset-controls which are shared
- * between hardware-blocks.
+ * between hardware blocks.
*
* When a reset-control is shared, the behavior of reset_control_assert /
* deassert is changed, the reset-core will keep track of a deassert_count
@@ -140,19 +344,98 @@ __must_check reset_control_get_exclusive(struct device *dev, const char *id)
static inline struct reset_control *reset_control_get_shared(
struct device *dev, const char *id)
{
- return __reset_control_get(dev, id, 0, true, false);
+ return __reset_control_get(dev, id, 0, true, false, false);
+}
+
+/**
+ * reset_control_bulk_get_shared - Lookup and obtain shared references to
+ * multiple reset controllers.
+ * @dev: device to be reset by the controller
+ * @num_rstcs: number of entries in rstcs array
+ * @rstcs: array of struct reset_control_bulk_data with reset line names set
+ *
+ * Fills the rstcs array with pointers to shared reset controls and
+ * returns 0, or an IS_ERR() condition containing errno.
+ */
+static inline int __must_check
+reset_control_bulk_get_shared(struct device *dev, int num_rstcs,
+ struct reset_control_bulk_data *rstcs)
+{
+ return __reset_control_bulk_get(dev, num_rstcs, rstcs, true, false, false);
}
+/**
+ * reset_control_get_optional_exclusive - optional reset_control_get_exclusive()
+ * @dev: device to be reset by the controller
+ * @id: reset line name
+ *
+ * Optional variant of reset_control_get_exclusive(). If the requested reset
+ * is not specified in the device tree, this function returns NULL instead of
+ * an error.
+ *
+ * See reset_control_get_exclusive() for more information.
+ */
static inline struct reset_control *reset_control_get_optional_exclusive(
struct device *dev, const char *id)
{
- return __reset_control_get(dev, id, 0, false, true);
+ return __reset_control_get(dev, id, 0, false, true, true);
+}
+
+/**
+ * reset_control_bulk_get_optional_exclusive - optional
+ * reset_control_bulk_get_exclusive()
+ * @dev: device to be reset by the controller
+ * @num_rstcs: number of entries in rstcs array
+ * @rstcs: array of struct reset_control_bulk_data with reset line names set
+ *
+ * Optional variant of reset_control_bulk_get_exclusive(). If any of the
+ * requested resets are not specified in the device tree, this function sets
+ * them to NULL instead of returning an error.
+ *
+ * See reset_control_bulk_get_exclusive() for more information.
+ */
+static inline int __must_check
+reset_control_bulk_get_optional_exclusive(struct device *dev, int num_rstcs,
+ struct reset_control_bulk_data *rstcs)
+{
+ return __reset_control_bulk_get(dev, num_rstcs, rstcs, false, true, true);
}
+/**
+ * reset_control_get_optional_shared - optional reset_control_get_shared()
+ * @dev: device to be reset by the controller
+ * @id: reset line name
+ *
+ * Optional variant of reset_control_get_shared(). If the requested reset
+ * is not specified in the device tree, this function returns NULL instead of
+ * an error.
+ *
+ * See reset_control_get_shared() for more information.
+ */
static inline struct reset_control *reset_control_get_optional_shared(
struct device *dev, const char *id)
{
- return __reset_control_get(dev, id, 0, true, true);
+ return __reset_control_get(dev, id, 0, true, true, false);
+}
+
+/**
+ * reset_control_bulk_get_optional_shared - optional
+ * reset_control_bulk_get_shared()
+ * @dev: device to be reset by the controller
+ * @num_rstcs: number of entries in rstcs array
+ * @rstcs: array of struct reset_control_bulk_data with reset line names set
+ *
+ * Optional variant of reset_control_bulk_get_shared(). If the requested resets
+ * are not specified in the device tree, this function sets them to NULL
+ * instead of returning an error.
+ *
+ * See reset_control_bulk_get_shared() for more information.
+ */
+static inline int __must_check
+reset_control_bulk_get_optional_shared(struct device *dev, int num_rstcs,
+ struct reset_control_bulk_data *rstcs)
+{
+ return __reset_control_bulk_get(dev, num_rstcs, rstcs, true, true, false);
}
/**
@@ -168,11 +451,31 @@ static inline struct reset_control *reset_control_get_optional_shared(
static inline struct reset_control *of_reset_control_get_exclusive(
struct device_node *node, const char *id)
{
- return __of_reset_control_get(node, id, 0, false, false);
+ return __of_reset_control_get(node, id, 0, false, false, true);
}
/**
- * of_reset_control_get_shared - Lookup and obtain an shared reference
+ * of_reset_control_get_optional_exclusive - Lookup and obtain an optional exclusive
+ * reference to a reset controller.
+ * @node: device to be reset by the controller
+ * @id: reset line name
+ *
+ * Optional variant of of_reset_control_get_exclusive(). If the requested reset
+ * is not specified in the device tree, this function returns NULL instead of
+ * an error.
+ *
+ * Returns a struct reset_control or IS_ERR() condition containing errno.
+ *
+ * Use of id names is optional.
+ */
+static inline struct reset_control *of_reset_control_get_optional_exclusive(
+ struct device_node *node, const char *id)
+{
+ return __of_reset_control_get(node, id, 0, false, true, true);
+}
+
+/**
+ * of_reset_control_get_shared - Lookup and obtain a shared reference
* to a reset controller.
* @node: device to be reset by the controller
* @id: reset line name
@@ -193,7 +496,7 @@ static inline struct reset_control *of_reset_control_get_exclusive(
static inline struct reset_control *of_reset_control_get_shared(
struct device_node *node, const char *id)
{
- return __of_reset_control_get(node, id, 0, true, false);
+ return __of_reset_control_get(node, id, 0, true, false, false);
}
/**
@@ -210,11 +513,11 @@ static inline struct reset_control *of_reset_control_get_shared(
static inline struct reset_control *of_reset_control_get_exclusive_by_index(
struct device_node *node, int index)
{
- return __of_reset_control_get(node, NULL, index, false, false);
+ return __of_reset_control_get(node, NULL, index, false, false, true);
}
/**
- * of_reset_control_get_shared_by_index - Lookup and obtain an shared
+ * of_reset_control_get_shared_by_index - Lookup and obtain a shared
* reference to a reset controller
* by index.
* @node: device to be reset by the controller
@@ -238,7 +541,7 @@ static inline struct reset_control *of_reset_control_get_exclusive_by_index(
static inline struct reset_control *of_reset_control_get_shared_by_index(
struct device_node *node, int index)
{
- return __of_reset_control_get(node, NULL, index, true, false);
+ return __of_reset_control_get(node, NULL, index, true, false, false);
}
/**
@@ -257,10 +560,105 @@ static inline struct reset_control *
__must_check devm_reset_control_get_exclusive(struct device *dev,
const char *id)
{
-#ifndef CONFIG_RESET_CONTROLLER
- WARN_ON(1);
-#endif
- return __devm_reset_control_get(dev, id, 0, false, false);
+ return __devm_reset_control_get(dev, id, 0, false, false, true);
+}
+
+/**
+ * devm_reset_control_bulk_get_exclusive - resource managed
+ * reset_control_bulk_get_exclusive()
+ * @dev: device to be reset by the controller
+ * @num_rstcs: number of entries in rstcs array
+ * @rstcs: array of struct reset_control_bulk_data with reset line names set
+ *
+ * Managed reset_control_bulk_get_exclusive(). For reset controllers returned
+ * from this function, reset_control_put() is called automatically on driver
+ * detach.
+ *
+ * See reset_control_bulk_get_exclusive() for more information.
+ */
+static inline int __must_check
+devm_reset_control_bulk_get_exclusive(struct device *dev, int num_rstcs,
+ struct reset_control_bulk_data *rstcs)
+{
+ return __devm_reset_control_bulk_get(dev, num_rstcs, rstcs, false, false, true);
+}
+
+/**
+ * devm_reset_control_get_exclusive_released - resource managed
+ * reset_control_get_exclusive_released()
+ * @dev: device to be reset by the controller
+ * @id: reset line name
+ *
+ * Managed reset_control_get_exclusive_released(). For reset controllers
+ * returned from this function, reset_control_put() is called automatically on
+ * driver detach.
+ *
+ * See reset_control_get_exclusive_released() for more information.
+ */
+static inline struct reset_control *
+__must_check devm_reset_control_get_exclusive_released(struct device *dev,
+ const char *id)
+{
+ return __devm_reset_control_get(dev, id, 0, false, false, false);
+}
+
+/**
+ * devm_reset_control_bulk_get_exclusive_released - resource managed
+ * reset_control_bulk_get_exclusive_released()
+ * @dev: device to be reset by the controller
+ * @num_rstcs: number of entries in rstcs array
+ * @rstcs: array of struct reset_control_bulk_data with reset line names set
+ *
+ * Managed reset_control_bulk_get_exclusive_released(). For reset controllers
+ * returned from this function, reset_control_put() is called automatically on
+ * driver detach.
+ *
+ * See reset_control_bulk_get_exclusive_released() for more information.
+ */
+static inline int __must_check
+devm_reset_control_bulk_get_exclusive_released(struct device *dev, int num_rstcs,
+ struct reset_control_bulk_data *rstcs)
+{
+ return __devm_reset_control_bulk_get(dev, num_rstcs, rstcs, false, false, false);
+}
+
+/**
+ * devm_reset_control_get_optional_exclusive_released - resource managed
+ * reset_control_get_optional_exclusive_released()
+ * @dev: device to be reset by the controller
+ * @id: reset line name
+ *
+ * Managed-and-optional variant of reset_control_get_exclusive_released(). For
+ * reset controllers returned from this function, reset_control_put() is called
+ * automatically on driver detach.
+ *
+ * See reset_control_get_exclusive_released() for more information.
+ */
+static inline struct reset_control *
+__must_check devm_reset_control_get_optional_exclusive_released(struct device *dev,
+ const char *id)
+{
+ return __devm_reset_control_get(dev, id, 0, false, true, false);
+}
+
+/**
+ * devm_reset_control_bulk_get_optional_exclusive_released - resource managed
+ * reset_control_bulk_optional_get_exclusive_released()
+ * @dev: device to be reset by the controller
+ * @num_rstcs: number of entries in rstcs array
+ * @rstcs: array of struct reset_control_bulk_data with reset line names set
+ *
+ * Managed reset_control_bulk_optional_get_exclusive_released(). For reset
+ * controllers returned from this function, reset_control_put() is called
+ * automatically on driver detach.
+ *
+ * See reset_control_bulk_optional_get_exclusive_released() for more information.
+ */
+static inline int __must_check
+devm_reset_control_bulk_get_optional_exclusive_released(struct device *dev, int num_rstcs,
+ struct reset_control_bulk_data *rstcs)
+{
+ return __devm_reset_control_bulk_get(dev, num_rstcs, rstcs, false, true, false);
}
/**
@@ -275,19 +673,103 @@ __must_check devm_reset_control_get_exclusive(struct device *dev,
static inline struct reset_control *devm_reset_control_get_shared(
struct device *dev, const char *id)
{
- return __devm_reset_control_get(dev, id, 0, true, false);
+ return __devm_reset_control_get(dev, id, 0, true, false, false);
+}
+
+/**
+ * devm_reset_control_bulk_get_shared - resource managed
+ * reset_control_bulk_get_shared()
+ * @dev: device to be reset by the controller
+ * @num_rstcs: number of entries in rstcs array
+ * @rstcs: array of struct reset_control_bulk_data with reset line names set
+ *
+ * Managed reset_control_bulk_get_shared(). For reset controllers returned
+ * from this function, reset_control_put() is called automatically on driver
+ * detach.
+ *
+ * See reset_control_bulk_get_shared() for more information.
+ */
+static inline int __must_check
+devm_reset_control_bulk_get_shared(struct device *dev, int num_rstcs,
+ struct reset_control_bulk_data *rstcs)
+{
+ return __devm_reset_control_bulk_get(dev, num_rstcs, rstcs, true, false, false);
}
+/**
+ * devm_reset_control_get_optional_exclusive - resource managed
+ * reset_control_get_optional_exclusive()
+ * @dev: device to be reset by the controller
+ * @id: reset line name
+ *
+ * Managed reset_control_get_optional_exclusive(). For reset controllers
+ * returned from this function, reset_control_put() is called automatically on
+ * driver detach.
+ *
+ * See reset_control_get_optional_exclusive() for more information.
+ */
static inline struct reset_control *devm_reset_control_get_optional_exclusive(
struct device *dev, const char *id)
{
- return __devm_reset_control_get(dev, id, 0, false, true);
+ return __devm_reset_control_get(dev, id, 0, false, true, true);
+}
+
+/**
+ * devm_reset_control_bulk_get_optional_exclusive - resource managed
+ * reset_control_bulk_get_optional_exclusive()
+ * @dev: device to be reset by the controller
+ * @num_rstcs: number of entries in rstcs array
+ * @rstcs: array of struct reset_control_bulk_data with reset line names set
+ *
+ * Managed reset_control_bulk_get_optional_exclusive(). For reset controllers
+ * returned from this function, reset_control_put() is called automatically on
+ * driver detach.
+ *
+ * See reset_control_bulk_get_optional_exclusive() for more information.
+ */
+static inline int __must_check
+devm_reset_control_bulk_get_optional_exclusive(struct device *dev, int num_rstcs,
+ struct reset_control_bulk_data *rstcs)
+{
+ return __devm_reset_control_bulk_get(dev, num_rstcs, rstcs, false, true, true);
}
+/**
+ * devm_reset_control_get_optional_shared - resource managed
+ * reset_control_get_optional_shared()
+ * @dev: device to be reset by the controller
+ * @id: reset line name
+ *
+ * Managed reset_control_get_optional_shared(). For reset controllers returned
+ * from this function, reset_control_put() is called automatically on driver
+ * detach.
+ *
+ * See reset_control_get_optional_shared() for more information.
+ */
static inline struct reset_control *devm_reset_control_get_optional_shared(
struct device *dev, const char *id)
{
- return __devm_reset_control_get(dev, id, 0, true, true);
+ return __devm_reset_control_get(dev, id, 0, true, true, false);
+}
+
+/**
+ * devm_reset_control_bulk_get_optional_shared - resource managed
+ * reset_control_bulk_get_optional_shared()
+ * @dev: device to be reset by the controller
+ * @num_rstcs: number of entries in rstcs array
+ * @rstcs: array of struct reset_control_bulk_data with reset line names set
+ *
+ * Managed reset_control_bulk_get_optional_shared(). For reset controllers
+ * returned from this function, reset_control_put() is called automatically on
+ * driver detach.
+ *
+ * See reset_control_bulk_get_optional_shared() for more information.
+ */
+static inline int __must_check
+devm_reset_control_bulk_get_optional_shared(struct device *dev, int num_rstcs,
+ struct reset_control_bulk_data *rstcs)
+{
+ return __devm_reset_control_bulk_get(dev, num_rstcs, rstcs, true, true, false);
}
/**
@@ -305,12 +787,12 @@ static inline struct reset_control *devm_reset_control_get_optional_shared(
static inline struct reset_control *
devm_reset_control_get_exclusive_by_index(struct device *dev, int index)
{
- return __devm_reset_control_get(dev, NULL, index, false, false);
+ return __devm_reset_control_get(dev, NULL, index, false, false, true);
}
/**
* devm_reset_control_get_shared_by_index - resource managed
- * reset_control_get_shared
+ * reset_control_get_shared
* @dev: device to be reset by the controller
* @index: index of the reset controller
*
@@ -321,7 +803,7 @@ devm_reset_control_get_exclusive_by_index(struct device *dev, int index)
static inline struct reset_control *
devm_reset_control_get_shared_by_index(struct device *dev, int index)
{
- return __devm_reset_control_get(dev, NULL, index, true, false);
+ return __devm_reset_control_get(dev, NULL, index, true, false, false);
}
/*
@@ -332,18 +814,6 @@ devm_reset_control_get_shared_by_index(struct device *dev, int index)
* These inline function calls will be removed once all consumers
* have been moved over to the new explicit API.
*/
-static inline struct reset_control *reset_control_get(
- struct device *dev, const char *id)
-{
- return reset_control_get_exclusive(dev, id);
-}
-
-static inline struct reset_control *reset_control_get_optional(
- struct device *dev, const char *id)
-{
- return reset_control_get_optional_exclusive(dev, id);
-}
-
static inline struct reset_control *of_reset_control_get(
struct device_node *node, const char *id)
{
@@ -374,4 +844,61 @@ static inline struct reset_control *devm_reset_control_get_by_index(
{
return devm_reset_control_get_exclusive_by_index(dev, index);
}
+
+/*
+ * APIs to manage a list of reset controllers
+ */
+static inline struct reset_control *
+devm_reset_control_array_get_exclusive(struct device *dev)
+{
+ return devm_reset_control_array_get(dev, false, false);
+}
+
+static inline struct reset_control *
+devm_reset_control_array_get_shared(struct device *dev)
+{
+ return devm_reset_control_array_get(dev, true, false);
+}
+
+static inline struct reset_control *
+devm_reset_control_array_get_optional_exclusive(struct device *dev)
+{
+ return devm_reset_control_array_get(dev, false, true);
+}
+
+static inline struct reset_control *
+devm_reset_control_array_get_optional_shared(struct device *dev)
+{
+ return devm_reset_control_array_get(dev, true, true);
+}
+
+static inline struct reset_control *
+of_reset_control_array_get_exclusive(struct device_node *node)
+{
+ return of_reset_control_array_get(node, false, false, true);
+}
+
+static inline struct reset_control *
+of_reset_control_array_get_exclusive_released(struct device_node *node)
+{
+ return of_reset_control_array_get(node, false, false, false);
+}
+
+static inline struct reset_control *
+of_reset_control_array_get_shared(struct device_node *node)
+{
+ return of_reset_control_array_get(node, true, false, true);
+}
+
+static inline struct reset_control *
+of_reset_control_array_get_optional_exclusive(struct device_node *node)
+{
+ return of_reset_control_array_get(node, false, true, true);
+}
+
+static inline struct reset_control *
+of_reset_control_array_get_optional_shared(struct device_node *node)
+{
+ return of_reset_control_array_get(node, true, true, true);
+}
#endif
diff --git a/include/linux/reset/bcm63xx_pmb.h b/include/linux/reset/bcm63xx_pmb.h
index bb4af7b5eb36..c77b6999518a 100644
--- a/include/linux/reset/bcm63xx_pmb.h
+++ b/include/linux/reset/bcm63xx_pmb.h
@@ -1,17 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Broadcom BCM63xx Processor Monitor Bus shared routines (SMP and reset)
*
* Copyright (C) 2015, Broadcom Corporation
* Author: Florian Fainelli <f.fainelli@gmail.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#ifndef __BCM63XX_PMB_H
#define __BCM63XX_PMB_H
diff --git a/include/linux/reset/reset-simple.h b/include/linux/reset/reset-simple.h
new file mode 100644
index 000000000000..c3e44f45b0f7
--- /dev/null
+++ b/include/linux/reset/reset-simple.h
@@ -0,0 +1,48 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Simple Reset Controller ops
+ *
+ * Based on Allwinner SoCs Reset Controller driver
+ *
+ * Copyright 2013 Maxime Ripard
+ *
+ * Maxime Ripard <maxime.ripard@free-electrons.com>
+ */
+
+#ifndef __RESET_SIMPLE_H__
+#define __RESET_SIMPLE_H__
+
+#include <linux/io.h>
+#include <linux/reset-controller.h>
+#include <linux/spinlock.h>
+
+/**
+ * struct reset_simple_data - driver data for simple reset controllers
+ * @lock: spinlock to protect registers during read-modify-write cycles
+ * @membase: memory mapped I/O register range
+ * @rcdev: reset controller device base structure
+ * @active_low: if true, bits are cleared to assert the reset. Otherwise, bits
+ * are set to assert the reset. Note that this says nothing about
+ * the voltage level of the actual reset line.
+ * @status_active_low: if true, bits read back as cleared while the reset is
+ * asserted. Otherwise, bits read back as set while the
+ * reset is asserted.
+ * @reset_us: Minimum delay in microseconds needed that needs to be
+ * waited for between an assert and a deassert to reset the
+ * device. If multiple consumers with different delay
+ * requirements are connected to this controller, it must
+ * be the largest minimum delay. 0 means that such a delay is
+ * unknown and the reset operation is unsupported.
+ */
+struct reset_simple_data {
+ spinlock_t lock;
+ void __iomem *membase;
+ struct reset_controller_dev rcdev;
+ bool active_low;
+ bool status_active_low;
+ unsigned int reset_us;
+};
+
+extern const struct reset_control_ops reset_simple_ops;
+
+#endif /* __RESET_SIMPLE_H__ */
diff --git a/include/linux/reset/socfpga.h b/include/linux/reset/socfpga.h
new file mode 100644
index 000000000000..b11a2047c342
--- /dev/null
+++ b/include/linux/reset/socfpga.h
@@ -0,0 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __LINUX_RESET_SOCFPGA_H__
+#define __LINUX_RESET_SOCFPGA_H__
+
+void __init socfpga_reset_init(void);
+
+#endif /* __LINUX_RESET_SOCFPGA_H__ */
diff --git a/include/linux/reset/sunxi.h b/include/linux/reset/sunxi.h
new file mode 100644
index 000000000000..1ad7fffb413e
--- /dev/null
+++ b/include/linux/reset/sunxi.h
@@ -0,0 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __LINUX_RESET_SUNXI_H__
+#define __LINUX_RESET_SUNXI_H__
+
+void __init sun6i_reset_init(void);
+
+#endif /* __LINUX_RESET_SUNXI_H__ */
diff --git a/include/linux/resource.h b/include/linux/resource.h
index 277afdad6589..4fdbc0c3f315 100644
--- a/include/linux/resource.h
+++ b/include/linux/resource.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_RESOURCE_H
#define _LINUX_RESOURCE_H
@@ -7,7 +8,5 @@
struct task_struct;
void getrusage(struct task_struct *p, int who, struct rusage *ru);
-int do_prlimit(struct task_struct *tsk, unsigned int resource,
- struct rlimit *new_rlim, struct rlimit *old_rlim);
#endif
diff --git a/include/linux/resource_ext.h b/include/linux/resource_ext.h
index e2bf63d881d4..ff0339df56af 100644
--- a/include/linux/resource_ext.h
+++ b/include/linux/resource_ext.h
@@ -1,15 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2015, Intel Corporation
* Author: Jiang Liu <jiang.liu@linux.intel.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
*/
#ifndef _LINUX_RESOURCE_EXT_H
#define _LINUX_RESOURCE_EXT_H
@@ -74,4 +66,16 @@ resource_list_destroy_entry(struct resource_entry *entry)
#define resource_list_for_each_entry_safe(entry, tmp, list) \
list_for_each_entry_safe((entry), (tmp), (list), node)
+static inline struct resource_entry *
+resource_list_first_type(struct list_head *list, unsigned long type)
+{
+ struct resource_entry *entry;
+
+ resource_list_for_each_entry(entry, list) {
+ if (resource_type(entry->res) == type)
+ return entry;
+ }
+ return NULL;
+}
+
#endif /* _LINUX_RESOURCE_EXT_H */
diff --git a/include/linux/restart_block.h b/include/linux/restart_block.h
index 19df8422606c..980a65594412 100644
--- a/include/linux/restart_block.h
+++ b/include/linux/restart_block.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Common syscall restarting data
*/
@@ -6,23 +7,23 @@
#include <linux/compiler.h>
#include <linux/types.h>
+#include <linux/time64.h>
struct timespec;
-struct compat_timespec;
+struct old_timespec32;
struct pollfd;
enum timespec_type {
TT_NONE = 0,
TT_NATIVE = 1,
-#ifdef CONFIG_COMPAT
TT_COMPAT = 2,
-#endif
};
/*
* System call restart block.
*/
struct restart_block {
+ unsigned long arch_data;
long (*fn)(struct restart_block *);
union {
/* For futex_wait and futex_wait_requeue_pi */
@@ -39,10 +40,8 @@ struct restart_block {
clockid_t clockid;
enum timespec_type type;
union {
- struct timespec __user *rmtp;
-#ifdef CONFIG_COMPAT
- struct compat_timespec __user *compat_rmtp;
-#endif
+ struct __kernel_timespec __user *rmtp;
+ struct old_timespec32 __user *compat_rmtp;
};
u64 expires;
} nanosleep;
diff --git a/include/linux/resume_user_mode.h b/include/linux/resume_user_mode.h
new file mode 100644
index 000000000000..285189454449
--- /dev/null
+++ b/include/linux/resume_user_mode.h
@@ -0,0 +1,64 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef LINUX_RESUME_USER_MODE_H
+#define LINUX_RESUME_USER_MODE_H
+
+#include <linux/sched.h>
+#include <linux/task_work.h>
+#include <linux/memcontrol.h>
+#include <linux/blk-cgroup.h>
+
+/**
+ * set_notify_resume - cause resume_user_mode_work() to be called
+ * @task: task that will call resume_user_mode_work()
+ *
+ * Calling this arranges that @task will call resume_user_mode_work()
+ * before returning to user mode. If it's already running in user mode,
+ * it will enter the kernel and call resume_user_mode_work() soon.
+ * If it's blocked, it will not be woken.
+ */
+static inline void set_notify_resume(struct task_struct *task)
+{
+ if (!test_and_set_tsk_thread_flag(task, TIF_NOTIFY_RESUME))
+ kick_process(task);
+}
+
+
+/**
+ * resume_user_mode_work - Perform work before returning to user mode
+ * @regs: user-mode registers of @current task
+ *
+ * This is called when %TIF_NOTIFY_RESUME has been set. Now we are
+ * about to return to user mode, and the user state in @regs can be
+ * inspected or adjusted. The caller in arch code has cleared
+ * %TIF_NOTIFY_RESUME before the call. If the flag gets set again
+ * asynchronously, this will be called again before we return to
+ * user mode.
+ *
+ * Called without locks.
+ */
+static inline void resume_user_mode_work(struct pt_regs *regs)
+{
+ clear_thread_flag(TIF_NOTIFY_RESUME);
+ /*
+ * This barrier pairs with task_work_add()->set_notify_resume() after
+ * hlist_add_head(task->task_works);
+ */
+ smp_mb__after_atomic();
+ if (unlikely(task_work_pending(current)))
+ task_work_run();
+
+#ifdef CONFIG_KEYS_REQUEST_CACHE
+ if (unlikely(current->cached_requested_key)) {
+ key_put(current->cached_requested_key);
+ current->cached_requested_key = NULL;
+ }
+#endif
+
+ mem_cgroup_handle_over_high();
+ blkcg_maybe_throttle_current();
+
+ rseq_handle_notify_resume(NULL, regs);
+}
+
+#endif /* LINUX_RESUME_USER_MODE_H */
diff --git a/include/linux/rethook.h b/include/linux/rethook.h
new file mode 100644
index 000000000000..c8ac1e5afcd1
--- /dev/null
+++ b/include/linux/rethook.h
@@ -0,0 +1,100 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Return hooking with list-based shadow stack.
+ */
+#ifndef _LINUX_RETHOOK_H
+#define _LINUX_RETHOOK_H
+
+#include <linux/compiler.h>
+#include <linux/freelist.h>
+#include <linux/kallsyms.h>
+#include <linux/llist.h>
+#include <linux/rcupdate.h>
+#include <linux/refcount.h>
+
+struct rethook_node;
+
+typedef void (*rethook_handler_t) (struct rethook_node *, void *, struct pt_regs *);
+
+/**
+ * struct rethook - The rethook management data structure.
+ * @data: The user-defined data storage.
+ * @handler: The user-defined return hook handler.
+ * @pool: The pool of struct rethook_node.
+ * @ref: The reference counter.
+ * @rcu: The rcu_head for deferred freeing.
+ *
+ * Don't embed to another data structure, because this is a self-destructive
+ * data structure when all rethook_node are freed.
+ */
+struct rethook {
+ void *data;
+ rethook_handler_t handler;
+ struct freelist_head pool;
+ refcount_t ref;
+ struct rcu_head rcu;
+};
+
+/**
+ * struct rethook_node - The rethook shadow-stack entry node.
+ * @freelist: The freelist, linked to struct rethook::pool.
+ * @rcu: The rcu_head for deferred freeing.
+ * @llist: The llist, linked to a struct task_struct::rethooks.
+ * @rethook: The pointer to the struct rethook.
+ * @ret_addr: The storage for the real return address.
+ * @frame: The storage for the frame pointer.
+ *
+ * You can embed this to your extended data structure to store any data
+ * on each entry of the shadow stack.
+ */
+struct rethook_node {
+ union {
+ struct freelist_node freelist;
+ struct rcu_head rcu;
+ };
+ struct llist_node llist;
+ struct rethook *rethook;
+ unsigned long ret_addr;
+ unsigned long frame;
+};
+
+struct rethook *rethook_alloc(void *data, rethook_handler_t handler);
+void rethook_free(struct rethook *rh);
+void rethook_add_node(struct rethook *rh, struct rethook_node *node);
+struct rethook_node *rethook_try_get(struct rethook *rh);
+void rethook_recycle(struct rethook_node *node);
+void rethook_hook(struct rethook_node *node, struct pt_regs *regs, bool mcount);
+unsigned long rethook_find_ret_addr(struct task_struct *tsk, unsigned long frame,
+ struct llist_node **cur);
+
+/* Arch dependent code must implement arch_* and trampoline code */
+void arch_rethook_prepare(struct rethook_node *node, struct pt_regs *regs, bool mcount);
+void arch_rethook_trampoline(void);
+
+/**
+ * is_rethook_trampoline() - Check whether the address is rethook trampoline
+ * @addr: The address to be checked
+ *
+ * Return true if the @addr is the rethook trampoline address.
+ */
+static inline bool is_rethook_trampoline(unsigned long addr)
+{
+ return addr == (unsigned long)dereference_symbol_descriptor(arch_rethook_trampoline);
+}
+
+/* If the architecture needs to fixup the return address, implement it. */
+void arch_rethook_fixup_return(struct pt_regs *regs,
+ unsigned long correct_ret_addr);
+
+/* Generic trampoline handler, arch code must prepare asm stub */
+unsigned long rethook_trampoline_handler(struct pt_regs *regs,
+ unsigned long frame);
+
+#ifdef CONFIG_RETHOOK
+void rethook_flush_task(struct task_struct *tk);
+#else
+#define rethook_flush_task(tsk) do { } while (0)
+#endif
+
+#endif
+
diff --git a/include/linux/rfkill.h b/include/linux/rfkill.h
index e6a0031d1b1f..373003ace639 100644
--- a/include/linux/rfkill.h
+++ b/include/linux/rfkill.h
@@ -66,7 +66,7 @@ struct rfkill_ops {
#if defined(CONFIG_RFKILL) || defined(CONFIG_RFKILL_MODULE)
/**
- * rfkill_alloc - allocate rfkill structure
+ * rfkill_alloc - Allocate rfkill structure
* @name: name of the struct -- the string is not copied internally
* @parent: device that has rf switch on it
* @type: type of the switch (RFKILL_TYPE_*)
@@ -112,7 +112,7 @@ void rfkill_pause_polling(struct rfkill *rfkill);
/**
* rfkill_resume_polling(struct rfkill *rfkill)
*
- * Pause polling -- say transmitter is off for other reasons.
+ * Resume polling
* NOTE: not necessary for suspend/resume -- in that case the
* core stops polling anyway
*/
@@ -130,7 +130,7 @@ void rfkill_resume_polling(struct rfkill *rfkill);
void rfkill_unregister(struct rfkill *rfkill);
/**
- * rfkill_destroy - free rfkill structure
+ * rfkill_destroy - Free rfkill structure
* @rfkill: rfkill structure to be destroyed
*
* Destroys the rfkill structure.
@@ -138,9 +138,20 @@ void rfkill_unregister(struct rfkill *rfkill);
void rfkill_destroy(struct rfkill *rfkill);
/**
+ * rfkill_set_hw_state_reason - Set the internal rfkill hardware block state
+ * with a reason
+ * @rfkill: pointer to the rfkill class to modify.
+ * @blocked: the current hardware block state to set
+ * @reason: one of &enum rfkill_hard_block_reasons
+ *
+ * Prefer to use rfkill_set_hw_state if you don't need any special reason.
+ */
+bool rfkill_set_hw_state_reason(struct rfkill *rfkill,
+ bool blocked, unsigned long reason);
+/**
* rfkill_set_hw_state - Set the internal rfkill hardware block state
* @rfkill: pointer to the rfkill class to modify.
- * @state: the current hardware block state to set
+ * @blocked: the current hardware block state to set
*
* rfkill drivers that get events when the hard-blocked state changes
* use this function to notify the rfkill core (and through that also
@@ -156,12 +167,16 @@ void rfkill_destroy(struct rfkill *rfkill);
* should be blocked) so that drivers need not keep track of the soft
* block state -- which they might not be able to.
*/
-bool rfkill_set_hw_state(struct rfkill *rfkill, bool blocked);
+static inline bool rfkill_set_hw_state(struct rfkill *rfkill, bool blocked)
+{
+ return rfkill_set_hw_state_reason(rfkill, blocked,
+ RFKILL_HARD_BLOCK_SIGNAL);
+}
/**
* rfkill_set_sw_state - Set the internal rfkill software block state
* @rfkill: pointer to the rfkill class to modify.
- * @state: the current software block state to set
+ * @blocked: the current software block state to set
*
* rfkill drivers that get events when the soft-blocked state changes
* (yes, some platforms directly act on input but allow changing again)
@@ -183,7 +198,7 @@ bool rfkill_set_sw_state(struct rfkill *rfkill, bool blocked);
/**
* rfkill_init_sw_state - Initialize persistent software block state
* @rfkill: pointer to the rfkill class to modify.
- * @state: the current software block state to set
+ * @blocked: the current software block state to set
*
* rfkill drivers that preserve their software block state over power off
* use this function to notify the rfkill core (and through that also
@@ -208,17 +223,24 @@ void rfkill_init_sw_state(struct rfkill *rfkill, bool blocked);
void rfkill_set_states(struct rfkill *rfkill, bool sw, bool hw);
/**
- * rfkill_blocked - query rfkill block
+ * rfkill_blocked - Query rfkill block state
*
* @rfkill: rfkill struct to query
*/
bool rfkill_blocked(struct rfkill *rfkill);
/**
- * rfkill_find_type - Helpper for finding rfkill type by name
+ * rfkill_soft_blocked - Query soft rfkill block state
+ *
+ * @rfkill: rfkill struct to query
+ */
+bool rfkill_soft_blocked(struct rfkill *rfkill);
+
+/**
+ * rfkill_find_type - Helper for finding rfkill type by name
* @name: the name of the type
*
- * Returns enum rfkill_type that conrresponds the name.
+ * Returns enum rfkill_type that corresponds to the name.
*/
enum rfkill_type rfkill_find_type(const char *name);
@@ -256,6 +278,13 @@ static inline void rfkill_destroy(struct rfkill *rfkill)
{
}
+static inline bool rfkill_set_hw_state_reason(struct rfkill *rfkill,
+ bool blocked,
+ unsigned long reason)
+{
+ return blocked;
+}
+
static inline bool rfkill_set_hw_state(struct rfkill *rfkill, bool blocked)
{
return blocked;
@@ -279,6 +308,11 @@ static inline bool rfkill_blocked(struct rfkill *rfkill)
return false;
}
+static inline bool rfkill_soft_blocked(struct rfkill *rfkill)
+{
+ return false;
+}
+
static inline enum rfkill_type rfkill_find_type(const char *name)
{
return RFKILL_TYPE_ALL;
@@ -296,7 +330,7 @@ static inline enum rfkill_type rfkill_find_type(const char *name)
const char *rfkill_get_led_trigger_name(struct rfkill *rfkill);
/**
- * rfkill_set_led_trigger_name -- set the LED trigger name
+ * rfkill_set_led_trigger_name - Set the LED trigger name
* @rfkill: rfkill struct
* @name: LED trigger name
*
diff --git a/include/linux/rhashtable-types.h b/include/linux/rhashtable-types.h
new file mode 100644
index 000000000000..57467cbf4c5b
--- /dev/null
+++ b/include/linux/rhashtable-types.h
@@ -0,0 +1,135 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Resizable, Scalable, Concurrent Hash Table
+ *
+ * Simple structures that might be needed in include
+ * files.
+ */
+
+#ifndef _LINUX_RHASHTABLE_TYPES_H
+#define _LINUX_RHASHTABLE_TYPES_H
+
+#include <linux/atomic.h>
+#include <linux/compiler.h>
+#include <linux/mutex.h>
+#include <linux/workqueue.h>
+
+struct rhash_head {
+ struct rhash_head __rcu *next;
+};
+
+struct rhlist_head {
+ struct rhash_head rhead;
+ struct rhlist_head __rcu *next;
+};
+
+struct bucket_table;
+
+/**
+ * struct rhashtable_compare_arg - Key for the function rhashtable_compare
+ * @ht: Hash table
+ * @key: Key to compare against
+ */
+struct rhashtable_compare_arg {
+ struct rhashtable *ht;
+ const void *key;
+};
+
+typedef u32 (*rht_hashfn_t)(const void *data, u32 len, u32 seed);
+typedef u32 (*rht_obj_hashfn_t)(const void *data, u32 len, u32 seed);
+typedef int (*rht_obj_cmpfn_t)(struct rhashtable_compare_arg *arg,
+ const void *obj);
+
+/**
+ * struct rhashtable_params - Hash table construction parameters
+ * @nelem_hint: Hint on number of elements, should be 75% of desired size
+ * @key_len: Length of key
+ * @key_offset: Offset of key in struct to be hashed
+ * @head_offset: Offset of rhash_head in struct to be hashed
+ * @max_size: Maximum size while expanding
+ * @min_size: Minimum size while shrinking
+ * @automatic_shrinking: Enable automatic shrinking of tables
+ * @hashfn: Hash function (default: jhash2 if !(key_len % 4), or jhash)
+ * @obj_hashfn: Function to hash object
+ * @obj_cmpfn: Function to compare key with object
+ */
+struct rhashtable_params {
+ u16 nelem_hint;
+ u16 key_len;
+ u16 key_offset;
+ u16 head_offset;
+ unsigned int max_size;
+ u16 min_size;
+ bool automatic_shrinking;
+ rht_hashfn_t hashfn;
+ rht_obj_hashfn_t obj_hashfn;
+ rht_obj_cmpfn_t obj_cmpfn;
+};
+
+/**
+ * struct rhashtable - Hash table handle
+ * @tbl: Bucket table
+ * @key_len: Key length for hashfn
+ * @max_elems: Maximum number of elements in table
+ * @p: Configuration parameters
+ * @rhlist: True if this is an rhltable
+ * @run_work: Deferred worker to expand/shrink asynchronously
+ * @mutex: Mutex to protect current/future table swapping
+ * @lock: Spin lock to protect walker list
+ * @nelems: Number of elements in table
+ */
+struct rhashtable {
+ struct bucket_table __rcu *tbl;
+ unsigned int key_len;
+ unsigned int max_elems;
+ struct rhashtable_params p;
+ bool rhlist;
+ struct work_struct run_work;
+ struct mutex mutex;
+ spinlock_t lock;
+ atomic_t nelems;
+};
+
+/**
+ * struct rhltable - Hash table with duplicate objects in a list
+ * @ht: Underlying rhtable
+ */
+struct rhltable {
+ struct rhashtable ht;
+};
+
+/**
+ * struct rhashtable_walker - Hash table walker
+ * @list: List entry on list of walkers
+ * @tbl: The table that we were walking over
+ */
+struct rhashtable_walker {
+ struct list_head list;
+ struct bucket_table *tbl;
+};
+
+/**
+ * struct rhashtable_iter - Hash table iterator
+ * @ht: Table to iterate through
+ * @p: Current pointer
+ * @list: Current hash list pointer
+ * @walker: Associated rhashtable walker
+ * @slot: Current slot
+ * @skip: Number of entries to skip in slot
+ */
+struct rhashtable_iter {
+ struct rhashtable *ht;
+ struct rhash_head *p;
+ struct rhlist_head *list;
+ struct rhashtable_walker walker;
+ unsigned int slot;
+ unsigned int skip;
+ bool end_of_table;
+};
+
+int rhashtable_init(struct rhashtable *ht,
+ const struct rhashtable_params *params);
+int rhltable_init(struct rhltable *hlt,
+ const struct rhashtable_params *params);
+
+#endif /* _LINUX_RHASHTABLE_TYPES_H */
diff --git a/include/linux/rhashtable.h b/include/linux/rhashtable.h
index 7d56a7ea2b2e..5b5357c0bd8c 100644
--- a/include/linux/rhashtable.h
+++ b/include/linux/rhashtable.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Resizable, Scalable, Concurrent Hash Table
*
@@ -17,37 +18,33 @@
#ifndef _LINUX_RHASHTABLE_H
#define _LINUX_RHASHTABLE_H
-#include <linux/atomic.h>
-#include <linux/compiler.h>
#include <linux/err.h>
#include <linux/errno.h>
#include <linux/jhash.h>
#include <linux/list_nulls.h>
#include <linux/workqueue.h>
-#include <linux/mutex.h>
#include <linux/rculist.h>
+#include <linux/bit_spinlock.h>
+#include <linux/rhashtable-types.h>
/*
+ * Objects in an rhashtable have an embedded struct rhash_head
+ * which is linked into as hash chain from the hash table - or one
+ * of two or more hash tables when the rhashtable is being resized.
* The end of the chain is marked with a special nulls marks which has
- * the following format:
- *
- * +-------+-----------------------------------------------------+-+
- * | Base | Hash |1|
- * +-------+-----------------------------------------------------+-+
- *
- * Base (4 bits) : Reserved to distinguish between multiple tables.
- * Specified via &struct rhashtable_params.nulls_base.
- * Hash (27 bits): Full hash (unmasked) of first element added to bucket
- * 1 (1 bit) : Nulls marker (always set)
- *
- * The remaining bits of the next pointer remain unused for now.
+ * the least significant bit set but otherwise stores the address of
+ * the hash bucket. This allows us to be sure we've found the end
+ * of the right list.
+ * The value stored in the hash bucket has BIT(0) used as a lock bit.
+ * This bit must be atomically set before any changes are made to
+ * the chain. To avoid dereferencing this pointer without clearing
+ * the bit first, we use an opaque 'struct rhash_lock_head *' for the
+ * pointer stored in the bucket. This struct needs to be defined so
+ * that rcu_dereference() works on it, but it has no content so a
+ * cast is needed for it to be useful. This ensures it isn't
+ * used by mistake with clearing the lock bit first.
*/
-#define RHT_BASE_BITS 4
-#define RHT_HASH_BITS 27
-#define RHT_BASE_SHIFT RHT_HASH_BITS
-
-/* Base bits plus 1 bit for nulls marker */
-#define RHT_HASH_RESERVED_SPACE (RHT_BASE_BITS + 1)
+struct rhash_lock_head {};
/* Maximum chain length before rehash
*
@@ -64,23 +61,12 @@
*/
#define RHT_ELASTICITY 16u
-struct rhash_head {
- struct rhash_head __rcu *next;
-};
-
-struct rhlist_head {
- struct rhash_head rhead;
- struct rhlist_head __rcu *next;
-};
-
/**
* struct bucket_table - Table of hash buckets
* @size: Number of hash buckets
* @nest: Number of bits of first-level nested table.
* @rehash: Current bucket being rehashed
* @hash_rnd: Random seed to fold into hash
- * @locks_mask: Mask to apply before accessing locks[]
- * @locks: Array of spinlocks protecting individual buckets
* @walkers: List of active walkers
* @rcu: RCU structure for freeing the table
* @future_tbl: Table under construction during rehashing
@@ -90,143 +76,40 @@ struct rhlist_head {
struct bucket_table {
unsigned int size;
unsigned int nest;
- unsigned int rehash;
u32 hash_rnd;
- unsigned int locks_mask;
- spinlock_t *locks;
struct list_head walkers;
struct rcu_head rcu;
struct bucket_table __rcu *future_tbl;
- struct rhash_head __rcu *buckets[] ____cacheline_aligned_in_smp;
-};
+ struct lockdep_map dep_map;
-/**
- * struct rhashtable_compare_arg - Key for the function rhashtable_compare
- * @ht: Hash table
- * @key: Key to compare against
- */
-struct rhashtable_compare_arg {
- struct rhashtable *ht;
- const void *key;
-};
-
-typedef u32 (*rht_hashfn_t)(const void *data, u32 len, u32 seed);
-typedef u32 (*rht_obj_hashfn_t)(const void *data, u32 len, u32 seed);
-typedef int (*rht_obj_cmpfn_t)(struct rhashtable_compare_arg *arg,
- const void *obj);
-
-struct rhashtable;
-
-/**
- * struct rhashtable_params - Hash table construction parameters
- * @nelem_hint: Hint on number of elements, should be 75% of desired size
- * @key_len: Length of key
- * @key_offset: Offset of key in struct to be hashed
- * @head_offset: Offset of rhash_head in struct to be hashed
- * @max_size: Maximum size while expanding
- * @min_size: Minimum size while shrinking
- * @locks_mul: Number of bucket locks to allocate per cpu (default: 128)
- * @automatic_shrinking: Enable automatic shrinking of tables
- * @nulls_base: Base value to generate nulls marker
- * @hashfn: Hash function (default: jhash2 if !(key_len % 4), or jhash)
- * @obj_hashfn: Function to hash object
- * @obj_cmpfn: Function to compare key with object
- */
-struct rhashtable_params {
- u16 nelem_hint;
- u16 key_len;
- u16 key_offset;
- u16 head_offset;
- unsigned int max_size;
- u16 min_size;
- bool automatic_shrinking;
- u8 locks_mul;
- u32 nulls_base;
- rht_hashfn_t hashfn;
- rht_obj_hashfn_t obj_hashfn;
- rht_obj_cmpfn_t obj_cmpfn;
+ struct rhash_lock_head __rcu *buckets[] ____cacheline_aligned_in_smp;
};
-/**
- * struct rhashtable - Hash table handle
- * @tbl: Bucket table
- * @nelems: Number of elements in table
- * @key_len: Key length for hashfn
- * @p: Configuration parameters
- * @max_elems: Maximum number of elements in table
- * @rhlist: True if this is an rhltable
- * @run_work: Deferred worker to expand/shrink asynchronously
- * @mutex: Mutex to protect current/future table swapping
- * @lock: Spin lock to protect walker list
- */
-struct rhashtable {
- struct bucket_table __rcu *tbl;
- atomic_t nelems;
- unsigned int key_len;
- struct rhashtable_params p;
- unsigned int max_elems;
- bool rhlist;
- struct work_struct run_work;
- struct mutex mutex;
- spinlock_t lock;
-};
-
-/**
- * struct rhltable - Hash table with duplicate objects in a list
- * @ht: Underlying rhtable
- */
-struct rhltable {
- struct rhashtable ht;
-};
-
-/**
- * struct rhashtable_walker - Hash table walker
- * @list: List entry on list of walkers
- * @tbl: The table that we were walking over
+/*
+ * NULLS_MARKER() expects a hash value with the low
+ * bits mostly likely to be significant, and it discards
+ * the msb.
+ * We give it an address, in which the bottom bit is
+ * always 0, and the msb might be significant.
+ * So we shift the address down one bit to align with
+ * expectations and avoid losing a significant bit.
+ *
+ * We never store the NULLS_MARKER in the hash table
+ * itself as we need the lsb for locking.
+ * Instead we store a NULL
*/
-struct rhashtable_walker {
- struct list_head list;
- struct bucket_table *tbl;
-};
-
-/**
- * struct rhashtable_iter - Hash table iterator
- * @ht: Table to iterate through
- * @p: Current pointer
- * @list: Current hash list pointer
- * @walker: Associated rhashtable walker
- * @slot: Current slot
- * @skip: Number of entries to skip in slot
- */
-struct rhashtable_iter {
- struct rhashtable *ht;
- struct rhash_head *p;
- struct rhlist_head *list;
- struct rhashtable_walker walker;
- unsigned int slot;
- unsigned int skip;
-};
-
-static inline unsigned long rht_marker(const struct rhashtable *ht, u32 hash)
-{
- return NULLS_MARKER(ht->p.nulls_base + hash);
-}
-
-#define INIT_RHT_NULLS_HEAD(ptr, ht, hash) \
- ((ptr) = (typeof(ptr)) rht_marker(ht, hash))
+#define RHT_NULLS_MARKER(ptr) \
+ ((void *)NULLS_MARKER(((unsigned long) (ptr)) >> 1))
+#define INIT_RHT_NULLS_HEAD(ptr) \
+ ((ptr) = NULL)
static inline bool rht_is_a_nulls(const struct rhash_head *ptr)
{
return ((unsigned long) ptr & 1);
}
-static inline unsigned long rht_get_nulls_value(const struct rhash_head *ptr)
-{
- return ((unsigned long) ptr) >> 1;
-}
-
static inline void *rht_obj(const struct rhashtable *ht,
const struct rhash_head *he)
{
@@ -236,37 +119,45 @@ static inline void *rht_obj(const struct rhashtable *ht,
static inline unsigned int rht_bucket_index(const struct bucket_table *tbl,
unsigned int hash)
{
- return (hash >> RHT_HASH_RESERVED_SPACE) & (tbl->size - 1);
+ return hash & (tbl->size - 1);
}
-static inline unsigned int rht_key_hashfn(
- struct rhashtable *ht, const struct bucket_table *tbl,
- const void *key, const struct rhashtable_params params)
+static inline unsigned int rht_key_get_hash(struct rhashtable *ht,
+ const void *key, const struct rhashtable_params params,
+ unsigned int hash_rnd)
{
unsigned int hash;
/* params must be equal to ht->p if it isn't constant. */
if (!__builtin_constant_p(params.key_len))
- hash = ht->p.hashfn(key, ht->key_len, tbl->hash_rnd);
+ hash = ht->p.hashfn(key, ht->key_len, hash_rnd);
else if (params.key_len) {
unsigned int key_len = params.key_len;
if (params.hashfn)
- hash = params.hashfn(key, key_len, tbl->hash_rnd);
+ hash = params.hashfn(key, key_len, hash_rnd);
else if (key_len & (sizeof(u32) - 1))
- hash = jhash(key, key_len, tbl->hash_rnd);
+ hash = jhash(key, key_len, hash_rnd);
else
- hash = jhash2(key, key_len / sizeof(u32),
- tbl->hash_rnd);
+ hash = jhash2(key, key_len / sizeof(u32), hash_rnd);
} else {
unsigned int key_len = ht->p.key_len;
if (params.hashfn)
- hash = params.hashfn(key, key_len, tbl->hash_rnd);
+ hash = params.hashfn(key, key_len, hash_rnd);
else
- hash = jhash(key, key_len, tbl->hash_rnd);
+ hash = jhash(key, key_len, hash_rnd);
}
+ return hash;
+}
+
+static inline unsigned int rht_key_hashfn(
+ struct rhashtable *ht, const struct bucket_table *tbl,
+ const void *key, const struct rhashtable_params params)
+{
+ unsigned int hash = rht_key_get_hash(ht, key, params, tbl->hash_rnd);
+
return rht_bucket_index(tbl, hash);
}
@@ -332,25 +223,6 @@ static inline bool rht_grow_above_max(const struct rhashtable *ht,
return atomic_read(&ht->nelems) >= ht->max_elems;
}
-/* The bucket lock is selected based on the hash and protects mutations
- * on a group of hash buckets.
- *
- * A maximum of tbl->size/2 bucket locks is allocated. This ensures that
- * a single lock always covers both buckets which may both contains
- * entries which link to the same bucket of the old table during resizing.
- * This allows to simplify the locking as locking the bucket in both
- * tables during resize always guarantee protection.
- *
- * IMPORTANT: When holding the bucket lock of both the old and new table
- * during expansions and shrinking, the old bucket lock must always be
- * acquired first.
- */
-static inline spinlock_t *rht_bucket_lock(const struct bucket_table *tbl,
- unsigned int hash)
-{
- return &tbl->locks[hash & tbl->locks_mask];
-}
-
#ifdef CONFIG_PROVE_LOCKING
int lockdep_rht_mutex_is_held(struct rhashtable *ht);
int lockdep_rht_bucket_is_held(const struct bucket_table *tbl, u32 hash);
@@ -367,19 +239,21 @@ static inline int lockdep_rht_bucket_is_held(const struct bucket_table *tbl,
}
#endif /* CONFIG_PROVE_LOCKING */
-int rhashtable_init(struct rhashtable *ht,
- const struct rhashtable_params *params);
-int rhltable_init(struct rhltable *hlt,
- const struct rhashtable_params *params);
-
void *rhashtable_insert_slow(struct rhashtable *ht, const void *key,
struct rhash_head *obj);
void rhashtable_walk_enter(struct rhashtable *ht,
struct rhashtable_iter *iter);
void rhashtable_walk_exit(struct rhashtable_iter *iter);
-int rhashtable_walk_start(struct rhashtable_iter *iter) __acquires(RCU);
+int rhashtable_walk_start_check(struct rhashtable_iter *iter) __acquires(RCU);
+
+static inline void rhashtable_walk_start(struct rhashtable_iter *iter)
+{
+ (void)rhashtable_walk_start_check(iter);
+}
+
void *rhashtable_walk_next(struct rhashtable_iter *iter);
+void *rhashtable_walk_peek(struct rhashtable_iter *iter);
void rhashtable_walk_stop(struct rhashtable_iter *iter) __releases(RCU);
void rhashtable_free_and_destroy(struct rhashtable *ht,
@@ -387,11 +261,12 @@ void rhashtable_free_and_destroy(struct rhashtable *ht,
void *arg);
void rhashtable_destroy(struct rhashtable *ht);
-struct rhash_head __rcu **rht_bucket_nested(const struct bucket_table *tbl,
- unsigned int hash);
-struct rhash_head __rcu **rht_bucket_nested_insert(struct rhashtable *ht,
- struct bucket_table *tbl,
- unsigned int hash);
+struct rhash_lock_head __rcu **rht_bucket_nested(
+ const struct bucket_table *tbl, unsigned int hash);
+struct rhash_lock_head __rcu **__rht_bucket_nested(
+ const struct bucket_table *tbl, unsigned int hash);
+struct rhash_lock_head __rcu **rht_bucket_nested_insert(
+ struct rhashtable *ht, struct bucket_table *tbl, unsigned int hash);
#define rht_dereference(p, ht) \
rcu_dereference_protected(p, lockdep_rht_mutex_is_held(ht))
@@ -408,37 +283,145 @@ struct rhash_head __rcu **rht_bucket_nested_insert(struct rhashtable *ht,
#define rht_entry(tpos, pos, member) \
({ tpos = container_of(pos, typeof(*tpos), member); 1; })
-static inline struct rhash_head __rcu *const *rht_bucket(
+static inline struct rhash_lock_head __rcu *const *rht_bucket(
const struct bucket_table *tbl, unsigned int hash)
{
return unlikely(tbl->nest) ? rht_bucket_nested(tbl, hash) :
&tbl->buckets[hash];
}
-static inline struct rhash_head __rcu **rht_bucket_var(
+static inline struct rhash_lock_head __rcu **rht_bucket_var(
struct bucket_table *tbl, unsigned int hash)
{
- return unlikely(tbl->nest) ? rht_bucket_nested(tbl, hash) :
+ return unlikely(tbl->nest) ? __rht_bucket_nested(tbl, hash) :
&tbl->buckets[hash];
}
-static inline struct rhash_head __rcu **rht_bucket_insert(
+static inline struct rhash_lock_head __rcu **rht_bucket_insert(
struct rhashtable *ht, struct bucket_table *tbl, unsigned int hash)
{
return unlikely(tbl->nest) ? rht_bucket_nested_insert(ht, tbl, hash) :
&tbl->buckets[hash];
}
+/*
+ * We lock a bucket by setting BIT(0) in the pointer - this is always
+ * zero in real pointers. The NULLS mark is never stored in the bucket,
+ * rather we store NULL if the bucket is empty.
+ * bit_spin_locks do not handle contention well, but the whole point
+ * of the hashtable design is to achieve minimum per-bucket contention.
+ * A nested hash table might not have a bucket pointer. In that case
+ * we cannot get a lock. For remove and replace the bucket cannot be
+ * interesting and doesn't need locking.
+ * For insert we allocate the bucket if this is the last bucket_table,
+ * and then take the lock.
+ * Sometimes we unlock a bucket by writing a new pointer there. In that
+ * case we don't need to unlock, but we do need to reset state such as
+ * local_bh. For that we have rht_assign_unlock(). As rcu_assign_pointer()
+ * provides the same release semantics that bit_spin_unlock() provides,
+ * this is safe.
+ * When we write to a bucket without unlocking, we use rht_assign_locked().
+ */
+
+static inline unsigned long rht_lock(struct bucket_table *tbl,
+ struct rhash_lock_head __rcu **bkt)
+{
+ unsigned long flags;
+
+ local_irq_save(flags);
+ bit_spin_lock(0, (unsigned long *)bkt);
+ lock_map_acquire(&tbl->dep_map);
+ return flags;
+}
+
+static inline unsigned long rht_lock_nested(struct bucket_table *tbl,
+ struct rhash_lock_head __rcu **bucket,
+ unsigned int subclass)
+{
+ unsigned long flags;
+
+ local_irq_save(flags);
+ bit_spin_lock(0, (unsigned long *)bucket);
+ lock_acquire_exclusive(&tbl->dep_map, subclass, 0, NULL, _THIS_IP_);
+ return flags;
+}
+
+static inline void rht_unlock(struct bucket_table *tbl,
+ struct rhash_lock_head __rcu **bkt,
+ unsigned long flags)
+{
+ lock_map_release(&tbl->dep_map);
+ bit_spin_unlock(0, (unsigned long *)bkt);
+ local_irq_restore(flags);
+}
+
+static inline struct rhash_head *__rht_ptr(
+ struct rhash_lock_head *p, struct rhash_lock_head __rcu *const *bkt)
+{
+ return (struct rhash_head *)
+ ((unsigned long)p & ~BIT(0) ?:
+ (unsigned long)RHT_NULLS_MARKER(bkt));
+}
+
+/*
+ * Where 'bkt' is a bucket and might be locked:
+ * rht_ptr_rcu() dereferences that pointer and clears the lock bit.
+ * rht_ptr() dereferences in a context where the bucket is locked.
+ * rht_ptr_exclusive() dereferences in a context where exclusive
+ * access is guaranteed, such as when destroying the table.
+ */
+static inline struct rhash_head *rht_ptr_rcu(
+ struct rhash_lock_head __rcu *const *bkt)
+{
+ return __rht_ptr(rcu_dereference(*bkt), bkt);
+}
+
+static inline struct rhash_head *rht_ptr(
+ struct rhash_lock_head __rcu *const *bkt,
+ struct bucket_table *tbl,
+ unsigned int hash)
+{
+ return __rht_ptr(rht_dereference_bucket(*bkt, tbl, hash), bkt);
+}
+
+static inline struct rhash_head *rht_ptr_exclusive(
+ struct rhash_lock_head __rcu *const *bkt)
+{
+ return __rht_ptr(rcu_dereference_protected(*bkt, 1), bkt);
+}
+
+static inline void rht_assign_locked(struct rhash_lock_head __rcu **bkt,
+ struct rhash_head *obj)
+{
+ if (rht_is_a_nulls(obj))
+ obj = NULL;
+ rcu_assign_pointer(*bkt, (void *)((unsigned long)obj | BIT(0)));
+}
+
+static inline void rht_assign_unlock(struct bucket_table *tbl,
+ struct rhash_lock_head __rcu **bkt,
+ struct rhash_head *obj,
+ unsigned long flags)
+{
+ if (rht_is_a_nulls(obj))
+ obj = NULL;
+ lock_map_release(&tbl->dep_map);
+ rcu_assign_pointer(*bkt, (void *)obj);
+ preempt_enable();
+ __release(bitlock);
+ local_irq_restore(flags);
+}
+
/**
- * rht_for_each_continue - continue iterating over hash chain
+ * rht_for_each_from - iterate over hash chain from given head
* @pos: the &struct rhash_head to use as a loop cursor.
- * @head: the previous &struct rhash_head to continue from
+ * @head: the &struct rhash_head to start from
* @tbl: the &struct bucket_table
* @hash: the hash value / bucket index
*/
-#define rht_for_each_continue(pos, head, tbl, hash) \
- for (pos = rht_dereference_bucket(head, tbl, hash); \
- !rht_is_a_nulls(pos); \
+#define rht_for_each_from(pos, head, tbl, hash) \
+ for (pos = head; \
+ !rht_is_a_nulls(pos); \
pos = rht_dereference_bucket((pos)->next, tbl, hash))
/**
@@ -448,19 +431,20 @@ static inline struct rhash_head __rcu **rht_bucket_insert(
* @hash: the hash value / bucket index
*/
#define rht_for_each(pos, tbl, hash) \
- rht_for_each_continue(pos, *rht_bucket(tbl, hash), tbl, hash)
+ rht_for_each_from(pos, rht_ptr(rht_bucket(tbl, hash), tbl, hash), \
+ tbl, hash)
/**
- * rht_for_each_entry_continue - continue iterating over hash chain
+ * rht_for_each_entry_from - iterate over hash chain from given head
* @tpos: the type * to use as a loop cursor.
* @pos: the &struct rhash_head to use as a loop cursor.
- * @head: the previous &struct rhash_head to continue from
+ * @head: the &struct rhash_head to start from
* @tbl: the &struct bucket_table
* @hash: the hash value / bucket index
* @member: name of the &struct rhash_head within the hashable struct.
*/
-#define rht_for_each_entry_continue(tpos, pos, head, tbl, hash, member) \
- for (pos = rht_dereference_bucket(head, tbl, hash); \
+#define rht_for_each_entry_from(tpos, pos, head, tbl, hash, member) \
+ for (pos = head; \
(!rht_is_a_nulls(pos)) && rht_entry(tpos, pos, member); \
pos = rht_dereference_bucket((pos)->next, tbl, hash))
@@ -473,8 +457,9 @@ static inline struct rhash_head __rcu **rht_bucket_insert(
* @member: name of the &struct rhash_head within the hashable struct.
*/
#define rht_for_each_entry(tpos, pos, tbl, hash, member) \
- rht_for_each_entry_continue(tpos, pos, *rht_bucket(tbl, hash), \
- tbl, hash, member)
+ rht_for_each_entry_from(tpos, pos, \
+ rht_ptr(rht_bucket(tbl, hash), tbl, hash), \
+ tbl, hash, member)
/**
* rht_for_each_entry_safe - safely iterate over hash chain of given type
@@ -489,7 +474,7 @@ static inline struct rhash_head __rcu **rht_bucket_insert(
* remove the loop cursor from the list.
*/
#define rht_for_each_entry_safe(tpos, pos, next, tbl, hash, member) \
- for (pos = rht_dereference_bucket(*rht_bucket(tbl, hash), tbl, hash), \
+ for (pos = rht_ptr(rht_bucket(tbl, hash), tbl, hash), \
next = !rht_is_a_nulls(pos) ? \
rht_dereference_bucket(pos->next, tbl, hash) : NULL; \
(!rht_is_a_nulls(pos)) && rht_entry(tpos, pos, member); \
@@ -498,9 +483,9 @@ static inline struct rhash_head __rcu **rht_bucket_insert(
rht_dereference_bucket(pos->next, tbl, hash) : NULL)
/**
- * rht_for_each_rcu_continue - continue iterating over rcu hash chain
+ * rht_for_each_rcu_from - iterate over rcu hash chain from given head
* @pos: the &struct rhash_head to use as a loop cursor.
- * @head: the previous &struct rhash_head to continue from
+ * @head: the &struct rhash_head to start from
* @tbl: the &struct bucket_table
* @hash: the hash value / bucket index
*
@@ -508,9 +493,9 @@ static inline struct rhash_head __rcu **rht_bucket_insert(
* the _rcu mutation primitives such as rhashtable_insert() as long as the
* traversal is guarded by rcu_read_lock().
*/
-#define rht_for_each_rcu_continue(pos, head, tbl, hash) \
+#define rht_for_each_rcu_from(pos, head, tbl, hash) \
for (({barrier(); }), \
- pos = rht_dereference_bucket_rcu(head, tbl, hash); \
+ pos = head; \
!rht_is_a_nulls(pos); \
pos = rcu_dereference_raw(pos->next))
@@ -524,14 +509,17 @@ static inline struct rhash_head __rcu **rht_bucket_insert(
* the _rcu mutation primitives such as rhashtable_insert() as long as the
* traversal is guarded by rcu_read_lock().
*/
-#define rht_for_each_rcu(pos, tbl, hash) \
- rht_for_each_rcu_continue(pos, *rht_bucket(tbl, hash), tbl, hash)
+#define rht_for_each_rcu(pos, tbl, hash) \
+ for (({barrier(); }), \
+ pos = rht_ptr_rcu(rht_bucket(tbl, hash)); \
+ !rht_is_a_nulls(pos); \
+ pos = rcu_dereference_raw(pos->next))
/**
- * rht_for_each_entry_rcu_continue - continue iterating over rcu hash chain
+ * rht_for_each_entry_rcu_from - iterated over rcu hash chain from given head
* @tpos: the type * to use as a loop cursor.
* @pos: the &struct rhash_head to use as a loop cursor.
- * @head: the previous &struct rhash_head to continue from
+ * @head: the &struct rhash_head to start from
* @tbl: the &struct bucket_table
* @hash: the hash value / bucket index
* @member: name of the &struct rhash_head within the hashable struct.
@@ -540,9 +528,9 @@ static inline struct rhash_head __rcu **rht_bucket_insert(
* the _rcu mutation primitives such as rhashtable_insert() as long as the
* traversal is guarded by rcu_read_lock().
*/
-#define rht_for_each_entry_rcu_continue(tpos, pos, head, tbl, hash, member) \
+#define rht_for_each_entry_rcu_from(tpos, pos, head, tbl, hash, member) \
for (({barrier(); }), \
- pos = rht_dereference_bucket_rcu(head, tbl, hash); \
+ pos = head; \
(!rht_is_a_nulls(pos)) && rht_entry(tpos, pos, member); \
pos = rht_dereference_bucket_rcu(pos->next, tbl, hash))
@@ -559,8 +547,9 @@ static inline struct rhash_head __rcu **rht_bucket_insert(
* traversal is guarded by rcu_read_lock().
*/
#define rht_for_each_entry_rcu(tpos, pos, tbl, hash, member) \
- rht_for_each_entry_rcu_continue(tpos, pos, *rht_bucket(tbl, hash), \
- tbl, hash, member)
+ rht_for_each_entry_rcu_from(tpos, pos, \
+ rht_ptr_rcu(rht_bucket(tbl, hash)), \
+ tbl, hash, member)
/**
* rhl_for_each_rcu - iterate over rcu hash table list
@@ -605,6 +594,7 @@ static inline struct rhash_head *__rhashtable_lookup(
.ht = ht,
.key = key,
};
+ struct rhash_lock_head __rcu *const *bkt;
struct bucket_table *tbl;
struct rhash_head *he;
unsigned int hash;
@@ -612,13 +602,19 @@ static inline struct rhash_head *__rhashtable_lookup(
tbl = rht_dereference_rcu(ht->tbl, ht);
restart:
hash = rht_key_hashfn(ht, tbl, key, params);
- rht_for_each_rcu(he, tbl, hash) {
- if (params.obj_cmpfn ?
- params.obj_cmpfn(&arg, rht_obj(ht, he)) :
- rhashtable_compare(&arg, rht_obj(ht, he)))
- continue;
- return he;
- }
+ bkt = rht_bucket(tbl, hash);
+ do {
+ rht_for_each_rcu_from(he, rht_ptr_rcu(bkt), tbl, hash) {
+ if (params.obj_cmpfn ?
+ params.obj_cmpfn(&arg, rht_obj(ht, he)) :
+ rhashtable_compare(&arg, rht_obj(ht, he)))
+ continue;
+ return he;
+ }
+ /* An object might have been moved to a different hash chain,
+ * while we walk along it - better check and retry.
+ */
+ } while (he != RHT_NULLS_MARKER(bkt));
/* Ensure we see any new tables. */
smp_rmb();
@@ -714,10 +710,11 @@ static inline void *__rhashtable_insert_fast(
.ht = ht,
.key = key,
};
+ struct rhash_lock_head __rcu **bkt;
struct rhash_head __rcu **pprev;
struct bucket_table *tbl;
struct rhash_head *head;
- spinlock_t *lock;
+ unsigned long flags;
unsigned int hash;
int elasticity;
void *data;
@@ -726,23 +723,22 @@ static inline void *__rhashtable_insert_fast(
tbl = rht_dereference_rcu(ht->tbl, ht);
hash = rht_head_hashfn(ht, tbl, obj, params);
- lock = rht_bucket_lock(tbl, hash);
- spin_lock_bh(lock);
+ elasticity = RHT_ELASTICITY;
+ bkt = rht_bucket_insert(ht, tbl, hash);
+ data = ERR_PTR(-ENOMEM);
+ if (!bkt)
+ goto out;
+ pprev = NULL;
+ flags = rht_lock(tbl, bkt);
- if (unlikely(rht_dereference_bucket(tbl->future_tbl, tbl, hash))) {
+ if (unlikely(rcu_access_pointer(tbl->future_tbl))) {
slow_path:
- spin_unlock_bh(lock);
+ rht_unlock(tbl, bkt, flags);
rcu_read_unlock();
return rhashtable_insert_slow(ht, key, obj);
}
- elasticity = RHT_ELASTICITY;
- pprev = rht_bucket_insert(ht, tbl, hash);
- data = ERR_PTR(-ENOMEM);
- if (!pprev)
- goto out;
-
- rht_for_each_continue(head, *pprev, tbl, hash) {
+ rht_for_each_from(head, rht_ptr(bkt, tbl, hash), tbl, hash) {
struct rhlist_head *plist;
struct rhlist_head *list;
@@ -750,13 +746,15 @@ slow_path:
if (!key ||
(params.obj_cmpfn ?
params.obj_cmpfn(&arg, rht_obj(ht, head)) :
- rhashtable_compare(&arg, rht_obj(ht, head))))
+ rhashtable_compare(&arg, rht_obj(ht, head)))) {
+ pprev = &head->next;
continue;
+ }
data = rht_obj(ht, head);
if (!rhlist)
- goto out;
+ goto out_unlock;
list = container_of(obj, struct rhlist_head, rhead);
@@ -765,9 +763,13 @@ slow_path:
RCU_INIT_POINTER(list->next, plist);
head = rht_dereference_bucket(head->next, tbl, hash);
RCU_INIT_POINTER(list->rhead.next, head);
- rcu_assign_pointer(*pprev, obj);
-
- goto good;
+ if (pprev) {
+ rcu_assign_pointer(*pprev, obj);
+ rht_unlock(tbl, bkt, flags);
+ } else
+ rht_assign_unlock(tbl, bkt, obj, flags);
+ data = NULL;
+ goto out;
}
if (elasticity <= 0)
@@ -775,12 +777,13 @@ slow_path:
data = ERR_PTR(-E2BIG);
if (unlikely(rht_grow_above_max(ht, tbl)))
- goto out;
+ goto out_unlock;
if (unlikely(rht_grow_above_100(ht, tbl)))
goto slow_path;
- head = rht_dereference_bucket(*pprev, tbl, hash);
+ /* Inserting at head of list makes unlocking free. */
+ head = rht_ptr(bkt, tbl, hash);
RCU_INIT_POINTER(obj->next, head);
if (rhlist) {
@@ -790,20 +793,21 @@ slow_path:
RCU_INIT_POINTER(list->next, NULL);
}
- rcu_assign_pointer(*pprev, obj);
-
atomic_inc(&ht->nelems);
+ rht_assign_unlock(tbl, bkt, obj, flags);
+
if (rht_grow_above_75(ht, tbl))
schedule_work(&ht->run_work);
-good:
data = NULL;
-
out:
- spin_unlock_bh(lock);
rcu_read_unlock();
return data;
+
+out_unlock:
+ rht_unlock(tbl, bkt, flags);
+ goto out;
}
/**
@@ -812,15 +816,14 @@ out:
* @obj: pointer to hash head inside object
* @params: hash table parameters
*
- * Will take a per bucket spinlock to protect against mutual mutations
+ * Will take the per bucket bitlock to protect against mutual mutations
* on the same bucket. Multiple insertions may occur in parallel unless
- * they map to the same bucket lock.
+ * they map to the same bucket.
*
* It is safe to call this function from atomic context.
*
- * Will trigger an automatic deferred table resizing if the size grows
- * beyond the watermark indicated by grow_decision() which can be passed
- * to rhashtable_init().
+ * Will trigger an automatic deferred table resizing if residency in the
+ * table grows beyond 70%.
*/
static inline int rhashtable_insert_fast(
struct rhashtable *ht, struct rhash_head *obj,
@@ -842,15 +845,14 @@ static inline int rhashtable_insert_fast(
* @list: pointer to hash list head inside object
* @params: hash table parameters
*
- * Will take a per bucket spinlock to protect against mutual mutations
+ * Will take the per bucket bitlock to protect against mutual mutations
* on the same bucket. Multiple insertions may occur in parallel unless
- * they map to the same bucket lock.
+ * they map to the same bucket.
*
* It is safe to call this function from atomic context.
*
- * Will trigger an automatic deferred table resizing if the size grows
- * beyond the watermark indicated by grow_decision() which can be passed
- * to rhashtable_init().
+ * Will trigger an automatic deferred table resizing if residency in the
+ * table grows beyond 70%.
*/
static inline int rhltable_insert_key(
struct rhltable *hlt, const void *key, struct rhlist_head *list,
@@ -866,15 +868,14 @@ static inline int rhltable_insert_key(
* @list: pointer to hash list head inside object
* @params: hash table parameters
*
- * Will take a per bucket spinlock to protect against mutual mutations
+ * Will take the per bucket bitlock to protect against mutual mutations
* on the same bucket. Multiple insertions may occur in parallel unless
- * they map to the same bucket lock.
+ * they map to the same bucket.
*
* It is safe to call this function from atomic context.
*
- * Will trigger an automatic deferred table resizing if the size grows
- * beyond the watermark indicated by grow_decision() which can be passed
- * to rhashtable_init().
+ * Will trigger an automatic deferred table resizing if residency in the
+ * table grows beyond 70%.
*/
static inline int rhltable_insert(
struct rhltable *hlt, struct rhlist_head *list,
@@ -893,20 +894,13 @@ static inline int rhltable_insert(
* @obj: pointer to hash head inside object
* @params: hash table parameters
*
- * Locks down the bucket chain in both the old and new table if a resize
- * is in progress to ensure that writers can't remove from the old table
- * and can't insert to the new table during the atomic operation of search
- * and insertion. Searches for duplicates in both the old and new table if
- * a resize is in progress.
- *
* This lookup function may only be used for fixed key hash table (key_len
* parameter set). It will BUG() if used inappropriately.
*
* It is safe to call this function from atomic context.
*
- * Will trigger an automatic deferred table resizing if the size grows
- * beyond the watermark indicated by grow_decision() which can be passed
- * to rhashtable_init().
+ * Will trigger an automatic deferred table resizing if residency in the
+ * table grows beyond 70%.
*/
static inline int rhashtable_lookup_insert_fast(
struct rhashtable *ht, struct rhash_head *obj,
@@ -955,17 +949,10 @@ static inline void *rhashtable_lookup_get_insert_fast(
* @obj: pointer to hash head inside object
* @params: hash table parameters
*
- * Locks down the bucket chain in both the old and new table if a resize
- * is in progress to ensure that writers can't remove from the old table
- * and can't insert to the new table during the atomic operation of search
- * and insertion. Searches for duplicates in both the old and new table if
- * a resize is in progress.
- *
* Lookups may occur in parallel with hashtable mutations and resizing.
*
- * Will trigger an automatic deferred table resizing if the size grows
- * beyond the watermark indicated by grow_decision() which can be passed
- * to rhashtable_init().
+ * Will trigger an automatic deferred table resizing if residency in the
+ * table grows beyond 70%.
*
* Returns zero on success.
*/
@@ -987,9 +974,9 @@ static inline int rhashtable_lookup_insert_key(
/**
* rhashtable_lookup_get_insert_key - lookup and insert object into hash table
* @ht: hash table
+ * @key: key
* @obj: pointer to hash head inside object
* @params: hash table parameters
- * @data: pointer to element data already in hashes
*
* Just like rhashtable_lookup_insert_key(), but this function returns the
* object if it exists, NULL if it does not and the insertion was successful,
@@ -1010,19 +997,21 @@ static inline int __rhashtable_remove_fast_one(
struct rhash_head *obj, const struct rhashtable_params params,
bool rhlist)
{
+ struct rhash_lock_head __rcu **bkt;
struct rhash_head __rcu **pprev;
struct rhash_head *he;
- spinlock_t * lock;
+ unsigned long flags;
unsigned int hash;
int err = -ENOENT;
hash = rht_head_hashfn(ht, tbl, obj, params);
- lock = rht_bucket_lock(tbl, hash);
-
- spin_lock_bh(lock);
+ bkt = rht_bucket_var(tbl, hash);
+ if (!bkt)
+ return -ENOENT;
+ pprev = NULL;
+ flags = rht_lock(tbl, bkt);
- pprev = rht_bucket_var(tbl, hash);
- rht_for_each_continue(he, *pprev, tbl, hash) {
+ rht_for_each_from(he, rht_ptr(bkt, tbl, hash), tbl, hash) {
struct rhlist_head *list;
list = container_of(he, struct rhlist_head, rhead);
@@ -1062,12 +1051,17 @@ static inline int __rhashtable_remove_fast_one(
}
}
- rcu_assign_pointer(*pprev, obj);
- break;
+ if (pprev) {
+ rcu_assign_pointer(*pprev, obj);
+ rht_unlock(tbl, bkt, flags);
+ } else {
+ rht_assign_unlock(tbl, bkt, obj, flags);
+ }
+ goto unlocked;
}
- spin_unlock_bh(lock);
-
+ rht_unlock(tbl, bkt, flags);
+unlocked:
if (err > 0) {
atomic_dec(&ht->nelems);
if (unlikely(ht->p.automatic_shrinking &&
@@ -1116,8 +1110,8 @@ static inline int __rhashtable_remove_fast(
* walk the bucket chain upon removal. The removal operation is thus
* considerable slow if the hash table is not correctly sized.
*
- * Will automatically shrink the table via rhashtable_expand() if the
- * shrink_decision function specified at rhashtable_init() returns true.
+ * Will automatically shrink the table if permitted when residency drops
+ * below 30%.
*
* Returns zero on success, -ENOENT if the entry could not be found.
*/
@@ -1138,8 +1132,8 @@ static inline int rhashtable_remove_fast(
* walk the bucket chain upon removal. The removal operation is thus
* considerable slow if the hash table is not correctly sized.
*
- * Will automatically shrink the table via rhashtable_expand() if the
- * shrink_decision function specified at rhashtable_init() returns true.
+ * Will automatically shrink the table if permitted when residency drops
+ * below 30%
*
* Returns zero on success, -ENOENT if the entry could not be found.
*/
@@ -1156,9 +1150,10 @@ static inline int __rhashtable_replace_fast(
struct rhash_head *obj_old, struct rhash_head *obj_new,
const struct rhashtable_params params)
{
+ struct rhash_lock_head __rcu **bkt;
struct rhash_head __rcu **pprev;
struct rhash_head *he;
- spinlock_t *lock;
+ unsigned long flags;
unsigned int hash;
int err = -ENOENT;
@@ -1169,25 +1164,33 @@ static inline int __rhashtable_replace_fast(
if (hash != rht_head_hashfn(ht, tbl, obj_new, params))
return -EINVAL;
- lock = rht_bucket_lock(tbl, hash);
+ bkt = rht_bucket_var(tbl, hash);
+ if (!bkt)
+ return -ENOENT;
- spin_lock_bh(lock);
+ pprev = NULL;
+ flags = rht_lock(tbl, bkt);
- pprev = rht_bucket_var(tbl, hash);
- rht_for_each_continue(he, *pprev, tbl, hash) {
+ rht_for_each_from(he, rht_ptr(bkt, tbl, hash), tbl, hash) {
if (he != obj_old) {
pprev = &he->next;
continue;
}
rcu_assign_pointer(obj_new->next, obj_old->next);
- rcu_assign_pointer(*pprev, obj_new);
+ if (pprev) {
+ rcu_assign_pointer(*pprev, obj_new);
+ rht_unlock(tbl, bkt, flags);
+ } else {
+ rht_assign_unlock(tbl, bkt, obj_new, flags);
+ }
err = 0;
- break;
+ goto unlocked;
}
- spin_unlock_bh(lock);
+ rht_unlock(tbl, bkt, flags);
+unlocked:
return err;
}
@@ -1232,14 +1235,6 @@ static inline int rhashtable_replace_fast(
return err;
}
-/* Obsolete function, do not use in new code. */
-static inline int rhashtable_walk_init(struct rhashtable *ht,
- struct rhashtable_iter *iter, gfp_t gfp)
-{
- rhashtable_walk_enter(ht, iter);
- return 0;
-}
-
/**
* rhltable_walk_enter - Initialise an iterator
* @hlt: Table to walk over
@@ -1255,8 +1250,9 @@ static inline int rhashtable_walk_init(struct rhashtable *ht,
* For a completely stable walk you should construct your own data
* structure outside the hash table.
*
- * This function may sleep so you must not call it from interrupt
- * context or with spin locks held.
+ * This function may be called from any process context, including
+ * non-preemptable context, but cannot be called from softirq or
+ * hardirq context.
*
* You must call rhashtable_walk_exit after this function returns.
*/
diff --git a/include/linux/ring_buffer.h b/include/linux/ring_buffer.h
index ee9b461af095..782e14f62201 100644
--- a/include/linux/ring_buffer.h
+++ b/include/linux/ring_buffer.h
@@ -1,21 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_RING_BUFFER_H
#define _LINUX_RING_BUFFER_H
-#include <linux/kmemcheck.h>
#include <linux/mm.h>
#include <linux/seq_file.h>
#include <linux/poll.h>
-struct ring_buffer;
+struct trace_buffer;
struct ring_buffer_iter;
/*
* Don't refer to this struct directly, use functions below.
*/
struct ring_buffer_event {
- kmemcheck_bitfield_begin(bitfield);
u32 type_len:5, time_delta:27;
- kmemcheck_bitfield_end(bitfield);
u32 array[];
};
@@ -36,10 +34,12 @@ struct ring_buffer_event {
* array[0] = time delta (28 .. 59)
* size = 8 bytes
*
- * @RINGBUF_TYPE_TIME_STAMP: Sync time stamp with external clock
- * array[0] = tv_nsec
- * array[1..2] = tv_sec
- * size = 16 bytes
+ * @RINGBUF_TYPE_TIME_STAMP: Absolute timestamp
+ * Same format as TIME_EXTEND except that the
+ * value is an absolute timestamp, not a delta
+ * event.time_delta contains bottom 27 bits
+ * array[0] = top (28 .. 59) bits
+ * size = 8 bytes
*
* <= @RINGBUF_TYPE_DATA_TYPE_LEN_MAX:
* Data record
@@ -56,16 +56,17 @@ enum ring_buffer_type {
RINGBUF_TYPE_DATA_TYPE_LEN_MAX = 28,
RINGBUF_TYPE_PADDING,
RINGBUF_TYPE_TIME_EXTEND,
- /* FIXME: RINGBUF_TYPE_TIME_STAMP not implemented */
RINGBUF_TYPE_TIME_STAMP,
};
unsigned ring_buffer_event_length(struct ring_buffer_event *event);
void *ring_buffer_event_data(struct ring_buffer_event *event);
+u64 ring_buffer_event_time_stamp(struct trace_buffer *buffer,
+ struct ring_buffer_event *event);
/*
* ring_buffer_discard_commit will remove an event that has not
- * ben committed yet. If this is used, then ring_buffer_unlock_commit
+ * been committed yet. If this is used, then ring_buffer_unlock_commit
* must not be called on the discarded event. This function
* will try to remove the event from the ring buffer completely
* if another event has not been written after it.
@@ -77,13 +78,13 @@ void *ring_buffer_event_data(struct ring_buffer_event *event);
* else
* ring_buffer_unlock_commit(buffer, event);
*/
-void ring_buffer_discard_commit(struct ring_buffer *buffer,
+void ring_buffer_discard_commit(struct trace_buffer *buffer,
struct ring_buffer_event *event);
/*
* size is in bytes for each per CPU buffer.
*/
-struct ring_buffer *
+struct trace_buffer *
__ring_buffer_alloc(unsigned long size, unsigned flags, struct lock_class_key *key);
/*
@@ -97,96 +98,102 @@ __ring_buffer_alloc(unsigned long size, unsigned flags, struct lock_class_key *k
__ring_buffer_alloc((size), (flags), &__key); \
})
-int ring_buffer_wait(struct ring_buffer *buffer, int cpu, bool full);
-int ring_buffer_poll_wait(struct ring_buffer *buffer, int cpu,
- struct file *filp, poll_table *poll_table);
-
+int ring_buffer_wait(struct trace_buffer *buffer, int cpu, int full);
+__poll_t ring_buffer_poll_wait(struct trace_buffer *buffer, int cpu,
+ struct file *filp, poll_table *poll_table, int full);
+void ring_buffer_wake_waiters(struct trace_buffer *buffer, int cpu);
#define RING_BUFFER_ALL_CPUS -1
-void ring_buffer_free(struct ring_buffer *buffer);
+void ring_buffer_free(struct trace_buffer *buffer);
-int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size, int cpu);
+int ring_buffer_resize(struct trace_buffer *buffer, unsigned long size, int cpu);
-void ring_buffer_change_overwrite(struct ring_buffer *buffer, int val);
+void ring_buffer_change_overwrite(struct trace_buffer *buffer, int val);
-struct ring_buffer_event *ring_buffer_lock_reserve(struct ring_buffer *buffer,
+struct ring_buffer_event *ring_buffer_lock_reserve(struct trace_buffer *buffer,
unsigned long length);
-int ring_buffer_unlock_commit(struct ring_buffer *buffer,
- struct ring_buffer_event *event);
-int ring_buffer_write(struct ring_buffer *buffer,
+int ring_buffer_unlock_commit(struct trace_buffer *buffer);
+int ring_buffer_write(struct trace_buffer *buffer,
unsigned long length, void *data);
+void ring_buffer_nest_start(struct trace_buffer *buffer);
+void ring_buffer_nest_end(struct trace_buffer *buffer);
+
struct ring_buffer_event *
-ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts,
+ring_buffer_peek(struct trace_buffer *buffer, int cpu, u64 *ts,
unsigned long *lost_events);
struct ring_buffer_event *
-ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts,
+ring_buffer_consume(struct trace_buffer *buffer, int cpu, u64 *ts,
unsigned long *lost_events);
struct ring_buffer_iter *
-ring_buffer_read_prepare(struct ring_buffer *buffer, int cpu);
+ring_buffer_read_prepare(struct trace_buffer *buffer, int cpu, gfp_t flags);
void ring_buffer_read_prepare_sync(void);
void ring_buffer_read_start(struct ring_buffer_iter *iter);
void ring_buffer_read_finish(struct ring_buffer_iter *iter);
struct ring_buffer_event *
ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts);
-struct ring_buffer_event *
-ring_buffer_read(struct ring_buffer_iter *iter, u64 *ts);
+void ring_buffer_iter_advance(struct ring_buffer_iter *iter);
void ring_buffer_iter_reset(struct ring_buffer_iter *iter);
int ring_buffer_iter_empty(struct ring_buffer_iter *iter);
+bool ring_buffer_iter_dropped(struct ring_buffer_iter *iter);
-unsigned long ring_buffer_size(struct ring_buffer *buffer, int cpu);
+unsigned long ring_buffer_size(struct trace_buffer *buffer, int cpu);
-void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu);
-void ring_buffer_reset(struct ring_buffer *buffer);
+void ring_buffer_reset_cpu(struct trace_buffer *buffer, int cpu);
+void ring_buffer_reset_online_cpus(struct trace_buffer *buffer);
+void ring_buffer_reset(struct trace_buffer *buffer);
#ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
-int ring_buffer_swap_cpu(struct ring_buffer *buffer_a,
- struct ring_buffer *buffer_b, int cpu);
+int ring_buffer_swap_cpu(struct trace_buffer *buffer_a,
+ struct trace_buffer *buffer_b, int cpu);
#else
static inline int
-ring_buffer_swap_cpu(struct ring_buffer *buffer_a,
- struct ring_buffer *buffer_b, int cpu)
+ring_buffer_swap_cpu(struct trace_buffer *buffer_a,
+ struct trace_buffer *buffer_b, int cpu)
{
return -ENODEV;
}
#endif
-bool ring_buffer_empty(struct ring_buffer *buffer);
-bool ring_buffer_empty_cpu(struct ring_buffer *buffer, int cpu);
-
-void ring_buffer_record_disable(struct ring_buffer *buffer);
-void ring_buffer_record_enable(struct ring_buffer *buffer);
-void ring_buffer_record_off(struct ring_buffer *buffer);
-void ring_buffer_record_on(struct ring_buffer *buffer);
-int ring_buffer_record_is_on(struct ring_buffer *buffer);
-void ring_buffer_record_disable_cpu(struct ring_buffer *buffer, int cpu);
-void ring_buffer_record_enable_cpu(struct ring_buffer *buffer, int cpu);
-
-u64 ring_buffer_oldest_event_ts(struct ring_buffer *buffer, int cpu);
-unsigned long ring_buffer_bytes_cpu(struct ring_buffer *buffer, int cpu);
-unsigned long ring_buffer_entries(struct ring_buffer *buffer);
-unsigned long ring_buffer_overruns(struct ring_buffer *buffer);
-unsigned long ring_buffer_entries_cpu(struct ring_buffer *buffer, int cpu);
-unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu);
-unsigned long ring_buffer_commit_overrun_cpu(struct ring_buffer *buffer, int cpu);
-unsigned long ring_buffer_dropped_events_cpu(struct ring_buffer *buffer, int cpu);
-unsigned long ring_buffer_read_events_cpu(struct ring_buffer *buffer, int cpu);
-
-u64 ring_buffer_time_stamp(struct ring_buffer *buffer, int cpu);
-void ring_buffer_normalize_time_stamp(struct ring_buffer *buffer,
+bool ring_buffer_empty(struct trace_buffer *buffer);
+bool ring_buffer_empty_cpu(struct trace_buffer *buffer, int cpu);
+
+void ring_buffer_record_disable(struct trace_buffer *buffer);
+void ring_buffer_record_enable(struct trace_buffer *buffer);
+void ring_buffer_record_off(struct trace_buffer *buffer);
+void ring_buffer_record_on(struct trace_buffer *buffer);
+bool ring_buffer_record_is_on(struct trace_buffer *buffer);
+bool ring_buffer_record_is_set_on(struct trace_buffer *buffer);
+void ring_buffer_record_disable_cpu(struct trace_buffer *buffer, int cpu);
+void ring_buffer_record_enable_cpu(struct trace_buffer *buffer, int cpu);
+
+u64 ring_buffer_oldest_event_ts(struct trace_buffer *buffer, int cpu);
+unsigned long ring_buffer_bytes_cpu(struct trace_buffer *buffer, int cpu);
+unsigned long ring_buffer_entries(struct trace_buffer *buffer);
+unsigned long ring_buffer_overruns(struct trace_buffer *buffer);
+unsigned long ring_buffer_entries_cpu(struct trace_buffer *buffer, int cpu);
+unsigned long ring_buffer_overrun_cpu(struct trace_buffer *buffer, int cpu);
+unsigned long ring_buffer_commit_overrun_cpu(struct trace_buffer *buffer, int cpu);
+unsigned long ring_buffer_dropped_events_cpu(struct trace_buffer *buffer, int cpu);
+unsigned long ring_buffer_read_events_cpu(struct trace_buffer *buffer, int cpu);
+
+u64 ring_buffer_time_stamp(struct trace_buffer *buffer);
+void ring_buffer_normalize_time_stamp(struct trace_buffer *buffer,
int cpu, u64 *ts);
-void ring_buffer_set_clock(struct ring_buffer *buffer,
+void ring_buffer_set_clock(struct trace_buffer *buffer,
u64 (*clock)(void));
+void ring_buffer_set_time_stamp_abs(struct trace_buffer *buffer, bool abs);
+bool ring_buffer_time_stamp_abs(struct trace_buffer *buffer);
-size_t ring_buffer_page_len(void *page);
-
+size_t ring_buffer_nr_pages(struct trace_buffer *buffer, int cpu);
+size_t ring_buffer_nr_dirty_pages(struct trace_buffer *buffer, int cpu);
-void *ring_buffer_alloc_read_page(struct ring_buffer *buffer, int cpu);
-void ring_buffer_free_read_page(struct ring_buffer *buffer, int cpu, void *data);
-int ring_buffer_read_page(struct ring_buffer *buffer, void **data_page,
+void *ring_buffer_alloc_read_page(struct trace_buffer *buffer, int cpu);
+void ring_buffer_free_read_page(struct trace_buffer *buffer, int cpu, void *data);
+int ring_buffer_read_page(struct trace_buffer *buffer, void **data_page,
size_t len, int cpu, int full);
struct trace_seq;
diff --git a/include/linux/rio.h b/include/linux/rio.h
index 37b95c4af99d..2cd637268b4f 100644
--- a/include/linux/rio.h
+++ b/include/linux/rio.h
@@ -1,14 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* RapidIO interconnect services
* (RapidIO Interconnect Specification, http://www.rapidio.org)
*
* Copyright 2005 MontaVista Software, Inc.
* Matt Porter <mporter@kernel.crashing.org>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
*/
#ifndef LINUX_RIO_H
@@ -104,7 +100,7 @@ struct rio_switch {
u32 port_ok;
struct rio_switch_ops *ops;
spinlock_t lock;
- struct rio_dev *nextdev[0];
+ struct rio_dev *nextdev[];
};
/**
@@ -205,7 +201,7 @@ struct rio_dev {
u8 hopcount;
struct rio_dev *prev;
atomic_t state;
- struct rio_switch rswitch[0]; /* RIO switch info */
+ struct rio_switch rswitch[]; /* RIO switch info */
};
#define rio_dev_g(n) list_entry(n, struct rio_dev, global_list)
diff --git a/include/linux/rio_drv.h b/include/linux/rio_drv.h
index 0834264fb7f2..e49c32b0f394 100644
--- a/include/linux/rio_drv.h
+++ b/include/linux/rio_drv.h
@@ -1,13 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* RapidIO driver services
*
* Copyright 2005 MontaVista Software, Inc.
* Matt Porter <mporter@kernel.crashing.org>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
*/
#ifndef LINUX_RIO_DRV_H
@@ -448,9 +444,6 @@ static inline void rio_set_drvdata(struct rio_dev *rdev, void *data)
/* Misc driver helpers */
extern u16 rio_local_get_device_id(struct rio_mport *port);
extern void rio_local_set_device_id(struct rio_mport *port, u16 did);
-extern struct rio_dev *rio_get_device(u16 vid, u16 did, struct rio_dev *from);
-extern struct rio_dev *rio_get_asm(u16 vid, u16 did, u16 asm_vid, u16 asm_did,
- struct rio_dev *from);
extern int rio_init_mports(void);
#endif /* LINUX_RIO_DRV_H */
diff --git a/include/linux/rio_ids.h b/include/linux/rio_ids.h
index 334c576c151c..c7e2f21dd5c1 100644
--- a/include/linux/rio_ids.h
+++ b/include/linux/rio_ids.h
@@ -1,30 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* RapidIO devices
*
* Copyright 2005 MontaVista Software, Inc.
* Matt Porter <mporter@kernel.crashing.org>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
*/
#ifndef LINUX_RIO_IDS_H
#define LINUX_RIO_IDS_H
-#define RIO_VID_FREESCALE 0x0002
-#define RIO_DID_MPC8560 0x0003
-
-#define RIO_VID_TUNDRA 0x000d
-#define RIO_DID_TSI500 0x0500
-#define RIO_DID_TSI568 0x0568
-#define RIO_DID_TSI572 0x0572
-#define RIO_DID_TSI574 0x0574
-#define RIO_DID_TSI576 0x0578 /* Same ID as Tsi578 */
-#define RIO_DID_TSI577 0x0577
-#define RIO_DID_TSI578 0x0578
-
#define RIO_VID_IDT 0x0038
#define RIO_DID_IDT70K200 0x0310
#define RIO_DID_IDTCPS8 0x035c
@@ -37,7 +21,6 @@
#define RIO_DID_IDTCPS1616 0x0379
#define RIO_DID_IDTVPS1616 0x0377
#define RIO_DID_IDTSPS1616 0x0378
-#define RIO_DID_TSI721 0x80ab
#define RIO_DID_IDTRXS1632 0x80e5
#define RIO_DID_IDTRXS2448 0x80e6
diff --git a/include/linux/rio_regs.h b/include/linux/rio_regs.h
index 40c04efe7409..e97594325486 100644
--- a/include/linux/rio_regs.h
+++ b/include/linux/rio_regs.h
@@ -1,13 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* RapidIO register definitions
*
* Copyright 2005 MontaVista Software, Inc.
* Matt Porter <mporter@kernel.crashing.org>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
*/
#ifndef LINUX_RIO_REGS_H
diff --git a/include/linux/rmap.h b/include/linux/rmap.h
index 43ef2c30cb0f..b87d01660412 100644
--- a/include/linux/rmap.h
+++ b/include/linux/rmap.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_RMAP_H
#define _LINUX_RMAP_H
/*
@@ -10,6 +11,8 @@
#include <linux/rwsem.h>
#include <linux/memcontrol.h>
#include <linux/highmem.h>
+#include <linux/pagemap.h>
+#include <linux/memremap.h>
/*
* The anon_vma heads a list of private "related" vmas, to scan if
@@ -38,12 +41,15 @@ struct anon_vma {
atomic_t refcount;
/*
- * Count of child anon_vmas and VMAs which points to this anon_vma.
+ * Count of child anon_vmas. Equals to the count of all anon_vmas that
+ * have ->parent pointing to this one, including itself.
*
* This counter is used for making decision about reusing anon_vma
* instead of forking new one. See comments in function anon_vma_clone.
*/
- unsigned degree;
+ unsigned long num_children;
+ /* Count of VMAs whose ->anon_vma pointer points to this object. */
+ unsigned long num_active_vmas;
struct anon_vma *parent; /* Parent of this anon_vma */
@@ -55,7 +61,9 @@ struct anon_vma {
* is serialized by a system wide lock only visible to
* mm_take_all_locks() (mm_all_locks_mutex).
*/
- struct rb_root rb_root; /* Interval tree of private "related" vmas */
+
+ /* Interval tree of private "related" vmas */
+ struct rb_root_cached rb_root;
};
/*
@@ -74,7 +82,7 @@ struct anon_vma {
struct anon_vma_chain {
struct vm_area_struct *vma;
struct anon_vma *anon_vma;
- struct list_head same_vma; /* locked by mmap_sem & page_table_lock */
+ struct list_head same_vma; /* locked by mmap_lock & page_table_lock */
struct rb_node rb; /* locked by anon_vma->rwsem */
unsigned long rb_subtree_last;
#ifdef CONFIG_DEBUG_VM_RB
@@ -83,17 +91,14 @@ struct anon_vma_chain {
};
enum ttu_flags {
- TTU_MIGRATION = 0x1, /* migration mode */
- TTU_MUNLOCK = 0x2, /* munlock mode */
-
TTU_SPLIT_HUGE_PMD = 0x4, /* split huge PMD if any */
TTU_IGNORE_MLOCK = 0x8, /* ignore mlock */
- TTU_IGNORE_ACCESS = 0x10, /* don't age */
- TTU_IGNORE_HWPOISON = 0x20, /* corrupted page is recoverable */
+ TTU_SYNC = 0x10, /* avoid racy checks with PVMW_SYNC */
+ TTU_HWPOISON = 0x20, /* do convert pte to hwpoison entry */
TTU_BATCH_FLUSH = 0x40, /* Batch TLB flushes where possible
* and caller guarantees they will
* do a final flush if necessary */
- TTU_RMAP_LOCKED = 0x80 /* do not grab rmap lock:
+ TTU_RMAP_LOCKED = 0x80, /* do not grab rmap lock:
* caller holds it */
};
@@ -126,6 +131,11 @@ static inline void anon_vma_lock_read(struct anon_vma *anon_vma)
down_read(&anon_vma->root->rwsem);
}
+static inline int anon_vma_trylock_read(struct anon_vma *anon_vma)
+{
+ return down_read_trylock(&anon_vma->root->rwsem);
+}
+
static inline void anon_vma_unlock_read(struct anon_vma *anon_vma)
{
up_read(&anon_vma->root->rwsem);
@@ -156,50 +166,222 @@ static inline void anon_vma_merge(struct vm_area_struct *vma,
unlink_anon_vmas(next);
}
-struct anon_vma *page_get_anon_vma(struct page *page);
+struct anon_vma *folio_get_anon_vma(struct folio *folio);
-/* bitflags for do_page_add_anon_rmap() */
-#define RMAP_EXCLUSIVE 0x01
-#define RMAP_COMPOUND 0x02
+/* RMAP flags, currently only relevant for some anon rmap operations. */
+typedef int __bitwise rmap_t;
+
+/*
+ * No special request: if the page is a subpage of a compound page, it is
+ * mapped via a PTE. The mapped (sub)page is possibly shared between processes.
+ */
+#define RMAP_NONE ((__force rmap_t)0)
+
+/* The (sub)page is exclusive to a single process. */
+#define RMAP_EXCLUSIVE ((__force rmap_t)BIT(0))
+
+/*
+ * The compound page is not mapped via PTEs, but instead via a single PMD and
+ * should be accounted accordingly.
+ */
+#define RMAP_COMPOUND ((__force rmap_t)BIT(1))
/*
* rmap interfaces called when adding or removing pte of page
*/
void page_move_anon_rmap(struct page *, struct vm_area_struct *);
void page_add_anon_rmap(struct page *, struct vm_area_struct *,
- unsigned long, bool);
-void do_page_add_anon_rmap(struct page *, struct vm_area_struct *,
- unsigned long, int);
+ unsigned long address, rmap_t flags);
void page_add_new_anon_rmap(struct page *, struct vm_area_struct *,
- unsigned long, bool);
-void page_add_file_rmap(struct page *, bool);
-void page_remove_rmap(struct page *, bool);
+ unsigned long address);
+void folio_add_new_anon_rmap(struct folio *, struct vm_area_struct *,
+ unsigned long address);
+void page_add_file_rmap(struct page *, struct vm_area_struct *,
+ bool compound);
+void page_remove_rmap(struct page *, struct vm_area_struct *,
+ bool compound);
void hugepage_add_anon_rmap(struct page *, struct vm_area_struct *,
- unsigned long);
-void hugepage_add_new_anon_rmap(struct page *, struct vm_area_struct *,
- unsigned long);
+ unsigned long address, rmap_t flags);
+void hugepage_add_new_anon_rmap(struct folio *, struct vm_area_struct *,
+ unsigned long address);
+
+static inline void __page_dup_rmap(struct page *page, bool compound)
+{
+ if (compound) {
+ struct folio *folio = (struct folio *)page;
+
+ VM_BUG_ON_PAGE(compound && !PageHead(page), page);
+ atomic_inc(&folio->_entire_mapcount);
+ } else {
+ atomic_inc(&page->_mapcount);
+ }
+}
-static inline void page_dup_rmap(struct page *page, bool compound)
+static inline void page_dup_file_rmap(struct page *page, bool compound)
{
- atomic_inc(compound ? compound_mapcount_ptr(page) : &page->_mapcount);
+ __page_dup_rmap(page, compound);
+}
+
+/**
+ * page_try_dup_anon_rmap - try duplicating a mapping of an already mapped
+ * anonymous page
+ * @page: the page to duplicate the mapping for
+ * @compound: the page is mapped as compound or as a small page
+ * @vma: the source vma
+ *
+ * The caller needs to hold the PT lock and the vma->vma_mm->write_protect_seq.
+ *
+ * Duplicating the mapping can only fail if the page may be pinned; device
+ * private pages cannot get pinned and consequently this function cannot fail.
+ *
+ * If duplicating the mapping succeeds, the page has to be mapped R/O into
+ * the parent and the child. It must *not* get mapped writable after this call.
+ *
+ * Returns 0 if duplicating the mapping succeeded. Returns -EBUSY otherwise.
+ */
+static inline int page_try_dup_anon_rmap(struct page *page, bool compound,
+ struct vm_area_struct *vma)
+{
+ VM_BUG_ON_PAGE(!PageAnon(page), page);
+
+ /*
+ * No need to check+clear for already shared pages, including KSM
+ * pages.
+ */
+ if (!PageAnonExclusive(page))
+ goto dup;
+
+ /*
+ * If this page may have been pinned by the parent process,
+ * don't allow to duplicate the mapping but instead require to e.g.,
+ * copy the page immediately for the child so that we'll always
+ * guarantee the pinned page won't be randomly replaced in the
+ * future on write faults.
+ */
+ if (likely(!is_device_private_page(page) &&
+ unlikely(page_needs_cow_for_dma(vma, page))))
+ return -EBUSY;
+
+ ClearPageAnonExclusive(page);
+ /*
+ * It's okay to share the anon page between both processes, mapping
+ * the page R/O into both processes.
+ */
+dup:
+ __page_dup_rmap(page, compound);
+ return 0;
+}
+
+/**
+ * page_try_share_anon_rmap - try marking an exclusive anonymous page possibly
+ * shared to prepare for KSM or temporary unmapping
+ * @page: the exclusive anonymous page to try marking possibly shared
+ *
+ * The caller needs to hold the PT lock and has to have the page table entry
+ * cleared/invalidated.
+ *
+ * This is similar to page_try_dup_anon_rmap(), however, not used during fork()
+ * to duplicate a mapping, but instead to prepare for KSM or temporarily
+ * unmapping a page (swap, migration) via page_remove_rmap().
+ *
+ * Marking the page shared can only fail if the page may be pinned; device
+ * private pages cannot get pinned and consequently this function cannot fail.
+ *
+ * Returns 0 if marking the page possibly shared succeeded. Returns -EBUSY
+ * otherwise.
+ */
+static inline int page_try_share_anon_rmap(struct page *page)
+{
+ VM_BUG_ON_PAGE(!PageAnon(page) || !PageAnonExclusive(page), page);
+
+ /* device private pages cannot get pinned via GUP. */
+ if (unlikely(is_device_private_page(page))) {
+ ClearPageAnonExclusive(page);
+ return 0;
+ }
+
+ /*
+ * We have to make sure that when we clear PageAnonExclusive, that
+ * the page is not pinned and that concurrent GUP-fast won't succeed in
+ * concurrently pinning the page.
+ *
+ * Conceptually, PageAnonExclusive clearing consists of:
+ * (A1) Clear PTE
+ * (A2) Check if the page is pinned; back off if so.
+ * (A3) Clear PageAnonExclusive
+ * (A4) Restore PTE (optional, but certainly not writable)
+ *
+ * When clearing PageAnonExclusive, we cannot possibly map the page
+ * writable again, because anon pages that may be shared must never
+ * be writable. So in any case, if the PTE was writable it cannot
+ * be writable anymore afterwards and there would be a PTE change. Only
+ * if the PTE wasn't writable, there might not be a PTE change.
+ *
+ * Conceptually, GUP-fast pinning of an anon page consists of:
+ * (B1) Read the PTE
+ * (B2) FOLL_WRITE: check if the PTE is not writable; back off if so.
+ * (B3) Pin the mapped page
+ * (B4) Check if the PTE changed by re-reading it; back off if so.
+ * (B5) If the original PTE is not writable, check if
+ * PageAnonExclusive is not set; back off if so.
+ *
+ * If the PTE was writable, we only have to make sure that GUP-fast
+ * observes a PTE change and properly backs off.
+ *
+ * If the PTE was not writable, we have to make sure that GUP-fast either
+ * detects a (temporary) PTE change or that PageAnonExclusive is cleared
+ * and properly backs off.
+ *
+ * Consequently, when clearing PageAnonExclusive(), we have to make
+ * sure that (A1), (A2)/(A3) and (A4) happen in the right memory
+ * order. In GUP-fast pinning code, we have to make sure that (B3),(B4)
+ * and (B5) happen in the right memory order.
+ *
+ * We assume that there might not be a memory barrier after
+ * clearing/invalidating the PTE (A1) and before restoring the PTE (A4),
+ * so we use explicit ones here.
+ */
+
+ /* Paired with the memory barrier in try_grab_folio(). */
+ if (IS_ENABLED(CONFIG_HAVE_FAST_GUP))
+ smp_mb();
+
+ if (unlikely(page_maybe_dma_pinned(page)))
+ return -EBUSY;
+ ClearPageAnonExclusive(page);
+
+ /*
+ * This is conceptually a smp_wmb() paired with the smp_rmb() in
+ * gup_must_unshare().
+ */
+ if (IS_ENABLED(CONFIG_HAVE_FAST_GUP))
+ smp_mb__after_atomic();
+ return 0;
}
/*
* Called from mm/vmscan.c to handle paging out
*/
-int page_referenced(struct page *, int is_locked,
+int folio_referenced(struct folio *, int is_locked,
struct mem_cgroup *memcg, unsigned long *vm_flags);
-bool try_to_unmap(struct page *, enum ttu_flags flags);
+void try_to_migrate(struct folio *folio, enum ttu_flags flags);
+void try_to_unmap(struct folio *, enum ttu_flags flags);
+
+int make_device_exclusive_range(struct mm_struct *mm, unsigned long start,
+ unsigned long end, struct page **pages,
+ void *arg);
/* Avoid racy checks */
#define PVMW_SYNC (1 << 0)
-/* Look for migarion entries rather than present PTEs */
+/* Look for migration entries rather than present PTEs */
#define PVMW_MIGRATION (1 << 1)
struct page_vma_mapped_walk {
- struct page *page;
+ unsigned long pfn;
+ unsigned long nr_pages;
+ pgoff_t pgoff;
struct vm_area_struct *vma;
unsigned long address;
pmd_t *pmd;
@@ -208,9 +390,30 @@ struct page_vma_mapped_walk {
unsigned int flags;
};
+#define DEFINE_PAGE_VMA_WALK(name, _page, _vma, _address, _flags) \
+ struct page_vma_mapped_walk name = { \
+ .pfn = page_to_pfn(_page), \
+ .nr_pages = compound_nr(_page), \
+ .pgoff = page_to_pgoff(_page), \
+ .vma = _vma, \
+ .address = _address, \
+ .flags = _flags, \
+ }
+
+#define DEFINE_FOLIO_VMA_WALK(name, _folio, _vma, _address, _flags) \
+ struct page_vma_mapped_walk name = { \
+ .pfn = folio_pfn(_folio), \
+ .nr_pages = folio_nr_pages(_folio), \
+ .pgoff = folio_pgoff(_folio), \
+ .vma = _vma, \
+ .address = _address, \
+ .flags = _flags, \
+ }
+
static inline void page_vma_mapped_walk_done(struct page_vma_mapped_walk *pvmw)
{
- if (pvmw->pte)
+ /* HugeTLB pte is set to the relevant page table entry without pte_mapped. */
+ if (pvmw->pte && !is_vm_hugetlb_page(pvmw->vma))
pte_unmap(pvmw->pte);
if (pvmw->ptl)
spin_unlock(pvmw->ptl);
@@ -229,27 +432,21 @@ unsigned long page_address_in_vma(struct page *, struct vm_area_struct *);
*
* returns the number of cleaned PTEs.
*/
-int page_mkclean(struct page *);
+int folio_mkclean(struct folio *);
-/*
- * called in munlock()/munmap() path to check for other vmas holding
- * the page mlocked.
- */
-void try_to_munlock(struct page *);
+int pfn_mkclean_range(unsigned long pfn, unsigned long nr_pages, pgoff_t pgoff,
+ struct vm_area_struct *vma);
-void remove_migration_ptes(struct page *old, struct page *new, bool locked);
+void remove_migration_ptes(struct folio *src, struct folio *dst, bool locked);
-/*
- * Called by memory-failure.c to kill processes.
- */
-struct anon_vma *page_lock_anon_vma_read(struct page *page);
-void page_unlock_anon_vma_read(struct anon_vma *anon_vma);
int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma);
/*
* rmap_walk_control: To control rmap traversing for specific needs
*
* arg: passed to rmap_one() and invalid_vma()
+ * try_lock: bail out if the rmap lock is contended
+ * contended: indicate the rmap traversal bailed out due to lock contention
* rmap_one: executed on each vma where page is mapped
* done: for checking traversing termination condition
* anon_lock: for getting anon_lock by optimized way rather than default
@@ -257,19 +454,24 @@ int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma);
*/
struct rmap_walk_control {
void *arg;
+ bool try_lock;
+ bool contended;
/*
* Return false if page table scanning in rmap_walk should be stopped.
* Otherwise, return true.
*/
- bool (*rmap_one)(struct page *page, struct vm_area_struct *vma,
+ bool (*rmap_one)(struct folio *folio, struct vm_area_struct *vma,
unsigned long addr, void *arg);
- int (*done)(struct page *page);
- struct anon_vma *(*anon_lock)(struct page *page);
+ int (*done)(struct folio *folio);
+ struct anon_vma *(*anon_lock)(struct folio *folio,
+ struct rmap_walk_control *rwc);
bool (*invalid_vma)(struct vm_area_struct *vma, void *arg);
};
-void rmap_walk(struct page *page, struct rmap_walk_control *rwc);
-void rmap_walk_locked(struct page *page, struct rmap_walk_control *rwc);
+void rmap_walk(struct folio *folio, struct rmap_walk_control *rwc);
+void rmap_walk_locked(struct folio *folio, struct rmap_walk_control *rwc);
+struct anon_vma *folio_lock_anon_vma_read(struct folio *folio,
+ struct rmap_walk_control *rwc);
#else /* !CONFIG_MMU */
@@ -277,7 +479,7 @@ void rmap_walk_locked(struct page *page, struct rmap_walk_control *rwc);
#define anon_vma_prepare(vma) (0)
#define anon_vma_link(vma) do {} while (0)
-static inline int page_referenced(struct page *page, int is_locked,
+static inline int folio_referenced(struct folio *folio, int is_locked,
struct mem_cgroup *memcg,
unsigned long *vm_flags)
{
@@ -285,14 +487,18 @@ static inline int page_referenced(struct page *page, int is_locked,
return 0;
}
-#define try_to_unmap(page, refs) false
+static inline void try_to_unmap(struct folio *folio, enum ttu_flags flags)
+{
+}
-static inline int page_mkclean(struct page *page)
+static inline int folio_mkclean(struct folio *folio)
{
return 0;
}
-
-
#endif /* CONFIG_MMU */
+static inline int page_mkclean(struct page *page)
+{
+ return folio_mkclean(page_folio(page));
+}
#endif /* _LINUX_RMAP_H */
diff --git a/include/linux/rmi.h b/include/linux/rmi.h
index 64125443f8a6..ab7eea01ab42 100644
--- a/include/linux/rmi.h
+++ b/include/linux/rmi.h
@@ -1,10 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2011-2016 Synaptics Incorporated
* Copyright (c) 2011 Unixphere
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
*/
#ifndef _RMI_H
@@ -105,15 +102,16 @@ struct rmi_2d_sensor_platform_data {
};
/**
- * struct rmi_f30_data - overrides defaults for a single F30 GPIOs/LED chip.
+ * struct rmi_gpio_data - overrides defaults for a single F30/F3A GPIOs/LED
+ * chip.
* @buttonpad - the touchpad is a buttonpad, so enable only the first actual
* button that is found.
- * @trackstick_buttons - Set when the function 30 is handling the physical
+ * @trackstick_buttons - Set when the function 30 or 3a is handling the physical
* buttons of the trackstick (as a PS/2 passthrough device).
- * @disable - the touchpad incorrectly reports F30 and it should be ignored.
+ * @disable - the touchpad incorrectly reports F30/F3A and it should be ignored.
* This is a special case which is due to misconfigured firmware.
*/
-struct rmi_f30_data {
+struct rmi_gpio_data {
bool buttonpad;
bool trackstick_buttons;
bool disable;
@@ -209,7 +207,7 @@ struct rmi_device_platform_data_spi {
*
* @reset_delay_ms - after issuing a reset command to the touch sensor, the
* driver waits a few milliseconds to give the firmware a chance to
- * to re-initialize. You can override the default wait period here.
+ * re-initialize. You can override the default wait period here.
* @irq: irq associated with the attn gpio line, or negative
*/
struct rmi_device_platform_data {
@@ -221,7 +219,7 @@ struct rmi_device_platform_data {
/* function handler pdata */
struct rmi_2d_sensor_platform_data sensor_pdata;
struct rmi_f01_power_management power_management;
- struct rmi_f30_data f30_data;
+ struct rmi_gpio_data gpio_data;
};
/**
@@ -354,6 +352,8 @@ struct rmi_driver_data {
struct mutex irq_mutex;
struct input_dev *input;
+ struct irq_domain *irqdomain;
+
u8 pdt_props;
u8 num_rx_electrodes;
diff --git a/include/linux/rndis.h b/include/linux/rndis.h
index 93c0a64aefa6..882587c2b15e 100644
--- a/include/linux/rndis.h
+++ b/include/linux/rndis.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Remote Network Driver Interface Specification (RNDIS)
* definitions of the magic numbers used by this protocol
diff --git a/include/linux/rodata_test.h b/include/linux/rodata_test.h
index 84766bcdd01f..635ad4dbd0ae 100644
--- a/include/linux/rodata_test.h
+++ b/include/linux/rodata_test.h
@@ -1,13 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* rodata_test.h: functional test for mark_rodata_ro function
*
* (C) Copyright 2008 Intel Corporation
* Author: Arjan van de Ven <arjan@linux.intel.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; version 2
- * of the License.
*/
#ifndef _RODATA_TEST_H
diff --git a/include/linux/root_dev.h b/include/linux/root_dev.h
index ed241aad7c17..4e78651371ba 100644
--- a/include/linux/root_dev.h
+++ b/include/linux/root_dev.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ROOT_DEV_H_
#define _ROOT_DEV_H_
@@ -7,6 +8,7 @@
enum {
Root_NFS = MKDEV(UNNAMED_MAJOR, 255),
+ Root_CIFS = MKDEV(UNNAMED_MAJOR, 254),
Root_RAM0 = MKDEV(RAMDISK_MAJOR, 0),
Root_RAM1 = MKDEV(RAMDISK_MAJOR, 1),
Root_FD0 = MKDEV(FLOPPY_MAJOR, 0),
diff --git a/include/linux/rpmsg.h b/include/linux/rpmsg.h
index 10d6ae8bbb7d..523c98b96cb4 100644
--- a/include/linux/rpmsg.h
+++ b/include/linux/rpmsg.h
@@ -1,35 +1,10 @@
+/* SPDX-License-Identifier: BSD-3-Clause */
/*
* Remote processor messaging
*
* Copyright (C) 2011 Texas Instruments, Inc.
* Copyright (C) 2011 Google, Inc.
* All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name Texas Instruments nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef _LINUX_RPMSG_H
@@ -42,8 +17,8 @@
#include <linux/kref.h>
#include <linux/mutex.h>
#include <linux/poll.h>
-
-#define RPMSG_ADDR_ANY 0xFFFFFFFF
+#include <linux/rpmsg/byteorder.h>
+#include <uapi/linux/rpmsg.h>
struct rpmsg_device;
struct rpmsg_endpoint;
@@ -66,20 +41,24 @@ struct rpmsg_channel_info {
* rpmsg_device - device that belong to the rpmsg bus
* @dev: the device struct
* @id: device id (used to match between rpmsg drivers and devices)
- * @driver_override: driver name to force a match
+ * @driver_override: driver name to force a match; do not set directly,
+ * because core frees it; use driver_set_override() to
+ * set or clear it.
* @src: local address
* @dst: destination address
* @ept: the rpmsg endpoint of this channel
* @announce: if set, rpmsg will announce the creation/removal of this channel
+ * @little_endian: True if transport is using little endian byte representation
*/
struct rpmsg_device {
struct device dev;
struct rpmsg_device_id id;
- char *driver_override;
+ const char *driver_override;
u32 src;
u32 dst;
struct rpmsg_endpoint *ept;
bool announce;
+ bool little_endian;
const struct rpmsg_device_ops *ops;
};
@@ -136,10 +115,61 @@ struct rpmsg_driver {
int (*callback)(struct rpmsg_device *, void *, int, void *, u32);
};
+static inline u16 rpmsg16_to_cpu(struct rpmsg_device *rpdev, __rpmsg16 val)
+{
+ if (!rpdev)
+ return __rpmsg16_to_cpu(rpmsg_is_little_endian(), val);
+ else
+ return __rpmsg16_to_cpu(rpdev->little_endian, val);
+}
+
+static inline __rpmsg16 cpu_to_rpmsg16(struct rpmsg_device *rpdev, u16 val)
+{
+ if (!rpdev)
+ return __cpu_to_rpmsg16(rpmsg_is_little_endian(), val);
+ else
+ return __cpu_to_rpmsg16(rpdev->little_endian, val);
+}
+
+static inline u32 rpmsg32_to_cpu(struct rpmsg_device *rpdev, __rpmsg32 val)
+{
+ if (!rpdev)
+ return __rpmsg32_to_cpu(rpmsg_is_little_endian(), val);
+ else
+ return __rpmsg32_to_cpu(rpdev->little_endian, val);
+}
+
+static inline __rpmsg32 cpu_to_rpmsg32(struct rpmsg_device *rpdev, u32 val)
+{
+ if (!rpdev)
+ return __cpu_to_rpmsg32(rpmsg_is_little_endian(), val);
+ else
+ return __cpu_to_rpmsg32(rpdev->little_endian, val);
+}
+
+static inline u64 rpmsg64_to_cpu(struct rpmsg_device *rpdev, __rpmsg64 val)
+{
+ if (!rpdev)
+ return __rpmsg64_to_cpu(rpmsg_is_little_endian(), val);
+ else
+ return __rpmsg64_to_cpu(rpdev->little_endian, val);
+}
+
+static inline __rpmsg64 cpu_to_rpmsg64(struct rpmsg_device *rpdev, u64 val)
+{
+ if (!rpdev)
+ return __cpu_to_rpmsg64(rpmsg_is_little_endian(), val);
+ else
+ return __cpu_to_rpmsg64(rpdev->little_endian, val);
+}
+
#if IS_ENABLED(CONFIG_RPMSG)
-int register_rpmsg_device(struct rpmsg_device *dev);
-void unregister_rpmsg_device(struct rpmsg_device *dev);
+int rpmsg_register_device_override(struct rpmsg_device *rpdev,
+ const char *driver_override);
+int rpmsg_register_device(struct rpmsg_device *rpdev);
+int rpmsg_unregister_device(struct device *parent,
+ struct rpmsg_channel_info *chinfo);
int __register_rpmsg_driver(struct rpmsg_driver *drv, struct module *owner);
void unregister_rpmsg_driver(struct rpmsg_driver *drv);
void rpmsg_destroy_ept(struct rpmsg_endpoint *);
@@ -157,20 +187,31 @@ int rpmsg_trysendto(struct rpmsg_endpoint *ept, void *data, int len, u32 dst);
int rpmsg_trysend_offchannel(struct rpmsg_endpoint *ept, u32 src, u32 dst,
void *data, int len);
-unsigned int rpmsg_poll(struct rpmsg_endpoint *ept, struct file *filp,
+__poll_t rpmsg_poll(struct rpmsg_endpoint *ept, struct file *filp,
poll_table *wait);
+ssize_t rpmsg_get_mtu(struct rpmsg_endpoint *ept);
+
#else
-static inline int register_rpmsg_device(struct rpmsg_device *dev)
+static inline int rpmsg_register_device_override(struct rpmsg_device *rpdev,
+ const char *driver_override)
+{
+ return -ENXIO;
+}
+
+static inline int rpmsg_register_device(struct rpmsg_device *rpdev)
{
return -ENXIO;
}
-static inline void unregister_rpmsg_device(struct rpmsg_device *dev)
+static inline int rpmsg_unregister_device(struct device *parent,
+ struct rpmsg_channel_info *chinfo)
{
/* This shouldn't be possible */
WARN_ON(1);
+
+ return -ENXIO;
}
static inline int __register_rpmsg_driver(struct rpmsg_driver *drv,
@@ -202,7 +243,7 @@ static inline struct rpmsg_endpoint *rpmsg_create_ept(struct rpmsg_device *rpdev
/* This shouldn't be possible */
WARN_ON(1);
- return ERR_PTR(-ENXIO);
+ return NULL;
}
static inline int rpmsg_send(struct rpmsg_endpoint *ept, void *data, int len)
@@ -258,7 +299,7 @@ static inline int rpmsg_trysend_offchannel(struct rpmsg_endpoint *ept, u32 src,
return -ENXIO;
}
-static inline unsigned int rpmsg_poll(struct rpmsg_endpoint *ept,
+static inline __poll_t rpmsg_poll(struct rpmsg_endpoint *ept,
struct file *filp, poll_table *wait)
{
/* This shouldn't be possible */
@@ -267,6 +308,14 @@ static inline unsigned int rpmsg_poll(struct rpmsg_endpoint *ept,
return 0;
}
+static inline ssize_t rpmsg_get_mtu(struct rpmsg_endpoint *ept)
+{
+ /* This shouldn't be possible */
+ WARN_ON(1);
+
+ return -ENXIO;
+}
+
#endif /* IS_ENABLED(CONFIG_RPMSG) */
/* use a macro to avoid include chaining to get THIS_MODULE */
diff --git a/include/linux/rpmsg/byteorder.h b/include/linux/rpmsg/byteorder.h
new file mode 100644
index 000000000000..c0f565dbad6d
--- /dev/null
+++ b/include/linux/rpmsg/byteorder.h
@@ -0,0 +1,67 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Follows implementation found in linux/virtio_byteorder.h
+ */
+#ifndef _LINUX_RPMSG_BYTEORDER_H
+#define _LINUX_RPMSG_BYTEORDER_H
+#include <linux/types.h>
+#include <uapi/linux/rpmsg_types.h>
+
+static inline bool rpmsg_is_little_endian(void)
+{
+#ifdef __LITTLE_ENDIAN
+ return true;
+#else
+ return false;
+#endif
+}
+
+static inline u16 __rpmsg16_to_cpu(bool little_endian, __rpmsg16 val)
+{
+ if (little_endian)
+ return le16_to_cpu((__force __le16)val);
+ else
+ return be16_to_cpu((__force __be16)val);
+}
+
+static inline __rpmsg16 __cpu_to_rpmsg16(bool little_endian, u16 val)
+{
+ if (little_endian)
+ return (__force __rpmsg16)cpu_to_le16(val);
+ else
+ return (__force __rpmsg16)cpu_to_be16(val);
+}
+
+static inline u32 __rpmsg32_to_cpu(bool little_endian, __rpmsg32 val)
+{
+ if (little_endian)
+ return le32_to_cpu((__force __le32)val);
+ else
+ return be32_to_cpu((__force __be32)val);
+}
+
+static inline __rpmsg32 __cpu_to_rpmsg32(bool little_endian, u32 val)
+{
+ if (little_endian)
+ return (__force __rpmsg32)cpu_to_le32(val);
+ else
+ return (__force __rpmsg32)cpu_to_be32(val);
+}
+
+static inline u64 __rpmsg64_to_cpu(bool little_endian, __rpmsg64 val)
+{
+ if (little_endian)
+ return le64_to_cpu((__force __le64)val);
+ else
+ return be64_to_cpu((__force __be64)val);
+}
+
+static inline __rpmsg64 __cpu_to_rpmsg64(bool little_endian, u64 val)
+{
+ if (little_endian)
+ return (__force __rpmsg64)cpu_to_le64(val);
+ else
+ return (__force __rpmsg64)cpu_to_be64(val);
+}
+
+#endif /* _LINUX_RPMSG_BYTEORDER_H */
diff --git a/include/linux/rpmsg/mtk_rpmsg.h b/include/linux/rpmsg/mtk_rpmsg.h
new file mode 100644
index 000000000000..363b60178040
--- /dev/null
+++ b/include/linux/rpmsg/mtk_rpmsg.h
@@ -0,0 +1,38 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright 2019 Google LLC.
+ */
+
+#ifndef __LINUX_RPMSG_MTK_RPMSG_H
+#define __LINUX_RPMSG_MTK_RPMSG_H
+
+#include <linux/platform_device.h>
+#include <linux/remoteproc.h>
+
+typedef void (*ipi_handler_t)(void *data, unsigned int len, void *priv);
+
+/*
+ * struct mtk_rpmsg_info - IPI functions tied to the rpmsg device.
+ * @register_ipi: register IPI handler for an IPI id.
+ * @unregister_ipi: unregister IPI handler for a registered IPI id.
+ * @send_ipi: send IPI to an IPI id. wait is the timeout (in msecs) to wait
+ * until response, or 0 if there's no timeout.
+ * @ns_ipi_id: the IPI id used for name service, or -1 if name service isn't
+ * supported.
+ */
+struct mtk_rpmsg_info {
+ int (*register_ipi)(struct platform_device *pdev, u32 id,
+ ipi_handler_t handler, void *priv);
+ void (*unregister_ipi)(struct platform_device *pdev, u32 id);
+ int (*send_ipi)(struct platform_device *pdev, u32 id,
+ void *buf, unsigned int len, unsigned int wait);
+ int ns_ipi_id;
+};
+
+struct rproc_subdev *
+mtk_rpmsg_create_rproc_subdev(struct platform_device *pdev,
+ struct mtk_rpmsg_info *info);
+
+void mtk_rpmsg_destroy_rproc_subdev(struct rproc_subdev *subdev);
+
+#endif
diff --git a/include/linux/rpmsg/ns.h b/include/linux/rpmsg/ns.h
new file mode 100644
index 000000000000..a7804edd6d58
--- /dev/null
+++ b/include/linux/rpmsg/ns.h
@@ -0,0 +1,45 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef _LINUX_RPMSG_NS_H
+#define _LINUX_RPMSG_NS_H
+
+#include <linux/mod_devicetable.h>
+#include <linux/rpmsg.h>
+#include <linux/rpmsg/byteorder.h>
+#include <linux/types.h>
+
+/**
+ * struct rpmsg_ns_msg - dynamic name service announcement message
+ * @name: name of remote service that is published
+ * @addr: address of remote service that is published
+ * @flags: indicates whether service is created or destroyed
+ *
+ * This message is sent across to publish a new service, or announce
+ * about its removal. When we receive these messages, an appropriate
+ * rpmsg channel (i.e device) is created/destroyed. In turn, the ->probe()
+ * or ->remove() handler of the appropriate rpmsg driver will be invoked
+ * (if/as-soon-as one is registered).
+ */
+struct rpmsg_ns_msg {
+ char name[RPMSG_NAME_SIZE];
+ __rpmsg32 addr;
+ __rpmsg32 flags;
+} __packed;
+
+/**
+ * enum rpmsg_ns_flags - dynamic name service announcement flags
+ *
+ * @RPMSG_NS_CREATE: a new remote service was just created
+ * @RPMSG_NS_DESTROY: a known remote service was just destroyed
+ */
+enum rpmsg_ns_flags {
+ RPMSG_NS_CREATE = 0,
+ RPMSG_NS_DESTROY = 1,
+};
+
+/* Address 53 is reserved for advertising remote services */
+#define RPMSG_NS_ADDR (53)
+
+int rpmsg_ns_register_device(struct rpmsg_device *rpdev);
+
+#endif
diff --git a/include/linux/rpmsg/qcom_glink.h b/include/linux/rpmsg/qcom_glink.h
new file mode 100644
index 000000000000..bfbd48f435fa
--- /dev/null
+++ b/include/linux/rpmsg/qcom_glink.h
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef _LINUX_RPMSG_QCOM_GLINK_H
+#define _LINUX_RPMSG_QCOM_GLINK_H
+
+#include <linux/device.h>
+
+struct qcom_glink_smem;
+
+#if IS_ENABLED(CONFIG_RPMSG_QCOM_GLINK)
+void qcom_glink_ssr_notify(const char *ssr_name);
+#else
+static inline void qcom_glink_ssr_notify(const char *ssr_name) {}
+#endif
+
+#if IS_ENABLED(CONFIG_RPMSG_QCOM_GLINK_SMEM)
+
+struct qcom_glink_smem *qcom_glink_smem_register(struct device *parent,
+ struct device_node *node);
+void qcom_glink_smem_unregister(struct qcom_glink_smem *glink);
+
+#else
+
+static inline struct qcom_glink_smem *
+qcom_glink_smem_register(struct device *parent,
+ struct device_node *node)
+{
+ return NULL;
+}
+
+static inline void qcom_glink_smem_unregister(struct qcom_glink_smem *glink) {}
+#endif
+
+#endif
diff --git a/include/linux/rpmsg/qcom_smd.h b/include/linux/rpmsg/qcom_smd.h
index f27917e0a101..3379bf4e1cb1 100644
--- a/include/linux/rpmsg/qcom_smd.h
+++ b/include/linux/rpmsg/qcom_smd.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_RPMSG_QCOM_SMD_H
#define _LINUX_RPMSG_QCOM_SMD_H
@@ -10,7 +11,7 @@ struct qcom_smd_edge;
struct qcom_smd_edge *qcom_smd_register_edge(struct device *parent,
struct device_node *node);
-int qcom_smd_unregister_edge(struct qcom_smd_edge *edge);
+void qcom_smd_unregister_edge(struct qcom_smd_edge *edge);
#else
@@ -21,9 +22,8 @@ qcom_smd_register_edge(struct device *parent,
return NULL;
}
-static inline int qcom_smd_unregister_edge(struct qcom_smd_edge *edge)
+static inline void qcom_smd_unregister_edge(struct qcom_smd_edge *edge)
{
- return 0;
}
#endif
diff --git a/include/linux/rslib.h b/include/linux/rslib.h
index 746580c1939c..238bb85243d3 100644
--- a/include/linux/rslib.h
+++ b/include/linux/rslib.h
@@ -1,28 +1,21 @@
+// SPDX-License-Identifier: GPL-2.0
/*
- * include/linux/rslib.h
- *
- * Overview:
- * Generic Reed Solomon encoder / decoder library
+ * Generic Reed Solomon encoder / decoder library
*
* Copyright (C) 2004 Thomas Gleixner (tglx@linutronix.de)
*
* RS code lifted from reed solomon library written by Phil Karn
* Copyright 2002 Phil Karn, KA9Q
- *
- * $Id: rslib.h,v 1.4 2005/11/07 11:14:52 gleixner Exp $
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
-
#ifndef _RSLIB_H_
#define _RSLIB_H_
#include <linux/list.h>
+#include <linux/types.h> /* for gfp_t */
+#include <linux/gfp.h> /* for GFP_KERNEL */
/**
- * struct rs_control - rs control structure
+ * struct rs_codec - rs codec data
*
* @mm: Bits per symbol
* @nn: Symbols per block (= (1<<mm)-1)
@@ -36,24 +29,34 @@
* @gfpoly: The primitive generator polynominal
* @gffunc: Function to generate the field, if non-canonical representation
* @users: Users of this structure
- * @list: List entry for the rs control list
+ * @list: List entry for the rs codec list
*/
-struct rs_control {
- int mm;
- int nn;
+struct rs_codec {
+ int mm;
+ int nn;
uint16_t *alpha_to;
uint16_t *index_of;
uint16_t *genpoly;
- int nroots;
- int fcr;
- int prim;
- int iprim;
+ int nroots;
+ int fcr;
+ int prim;
+ int iprim;
int gfpoly;
int (*gffunc)(int);
int users;
struct list_head list;
};
+/**
+ * struct rs_control - rs control structure per instance
+ * @codec: The codec used for this instance
+ * @buffers: Internal scratch buffers used in calls to decode_rs()
+ */
+struct rs_control {
+ struct rs_codec *codec;
+ uint16_t buffers[];
+};
+
/* General purpose RS codec, 8-bit data width, symbol width 1-15 bit */
#ifdef CONFIG_REED_SOLOMON_ENC8
int encode_rs8(struct rs_control *rs, uint8_t *data, int len, uint16_t *par,
@@ -76,18 +79,37 @@ int decode_rs16(struct rs_control *rs, uint16_t *data, uint16_t *par, int len,
uint16_t *corr);
#endif
-/* Create or get a matching rs control structure */
-struct rs_control *init_rs(int symsize, int gfpoly, int fcr, int prim,
- int nroots);
+struct rs_control *init_rs_gfp(int symsize, int gfpoly, int fcr, int prim,
+ int nroots, gfp_t gfp);
+
+/**
+ * init_rs - Create a RS control struct and initialize it
+ * @symsize: the symbol size (number of bits)
+ * @gfpoly: the extended Galois field generator polynomial coefficients,
+ * with the 0th coefficient in the low order bit. The polynomial
+ * must be primitive;
+ * @fcr: the first consecutive root of the rs code generator polynomial
+ * in index form
+ * @prim: primitive element to generate polynomial roots
+ * @nroots: RS code generator polynomial degree (number of roots)
+ *
+ * Allocations use GFP_KERNEL.
+ */
+static inline struct rs_control *init_rs(int symsize, int gfpoly, int fcr,
+ int prim, int nroots)
+{
+ return init_rs_gfp(symsize, gfpoly, fcr, prim, nroots, GFP_KERNEL);
+}
+
struct rs_control *init_rs_non_canonical(int symsize, int (*func)(int),
- int fcr, int prim, int nroots);
+ int fcr, int prim, int nroots);
/* Release a rs control structure */
void free_rs(struct rs_control *rs);
/** modulo replacement for galois field arithmetics
*
- * @rs: the rs control structure
+ * @rs: Pointer to the RS codec
* @x: the value to reduce
*
* where
@@ -97,7 +119,7 @@ void free_rs(struct rs_control *rs);
* Simple arithmetic modulo would return a wrong result for values
* >= 3 * rs->nn
*/
-static inline int rs_modnn(struct rs_control *rs, int x)
+static inline int rs_modnn(struct rs_codec *rs, int x)
{
while (x >= rs->nn) {
x -= rs->nn;
diff --git a/include/linux/rtc.h b/include/linux/rtc.h
index 0a0f0d14a5fb..1fd9c6a21ebe 100644
--- a/include/linux/rtc.h
+++ b/include/linux/rtc.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Generic RTC interface.
* This version contains the part of the user interface to the Real Time Clock
@@ -33,18 +34,6 @@ static inline time64_t rtc_tm_sub(struct rtc_time *lhs, struct rtc_time *rhs)
return rtc_tm_to_time64(lhs) - rtc_tm_to_time64(rhs);
}
-static inline void rtc_time_to_tm(unsigned long time, struct rtc_time *tm)
-{
- rtc_time64_to_tm(time, tm);
-}
-
-static inline int rtc_tm_to_time(struct rtc_time *tm, unsigned long *time)
-{
- *time = rtc_tm_to_time64(tm);
-
- return 0;
-}
-
#include <linux/device.h>
#include <linux/seq_file.h>
#include <linux/cdev.h>
@@ -66,45 +55,34 @@ extern struct class *rtc_class;
*
* The (current) exceptions are mostly filesystem hooks:
* - the proc() hook for procfs
- * - non-ioctl() chardev hooks: open(), release(), read_callback()
- *
- * REVISIT those periodic irq calls *do* have ops_lock when they're
- * issued through ioctl() ...
*/
struct rtc_class_ops {
- int (*open)(struct device *);
- void (*release)(struct device *);
int (*ioctl)(struct device *, unsigned int, unsigned long);
int (*read_time)(struct device *, struct rtc_time *);
int (*set_time)(struct device *, struct rtc_time *);
int (*read_alarm)(struct device *, struct rtc_wkalrm *);
int (*set_alarm)(struct device *, struct rtc_wkalrm *);
int (*proc)(struct device *, struct seq_file *);
- int (*set_mmss64)(struct device *, time64_t secs);
- int (*set_mmss)(struct device *, unsigned long secs);
- int (*read_callback)(struct device *, int data);
int (*alarm_irq_enable)(struct device *, unsigned int enabled);
int (*read_offset)(struct device *, long *offset);
int (*set_offset)(struct device *, long offset);
+ int (*param_get)(struct device *, struct rtc_param *param);
+ int (*param_set)(struct device *, struct rtc_param *param);
};
-#define RTC_DEVICE_NAME_SIZE 20
-typedef struct rtc_task {
- void (*func)(void *private_data);
- void *private_data;
-} rtc_task_t;
-
+struct rtc_device;
struct rtc_timer {
- struct rtc_task task;
struct timerqueue_node node;
ktime_t period;
+ void (*func)(struct rtc_device *rtc);
+ struct rtc_device *rtc;
int enabled;
};
-
/* flags */
#define RTC_DEV_BUSY 0
+#define RTC_NO_CDEV 1
struct rtc_device {
struct device dev;
@@ -123,8 +101,6 @@ struct rtc_device {
wait_queue_head_t irq_queue;
struct fasync_struct *async_queue;
- struct rtc_task *irq_task;
- spinlock_t irq_task_lock;
int irq_freq;
int max_user_freq;
@@ -134,16 +110,45 @@ struct rtc_device {
struct hrtimer pie_timer; /* sub second exp, so needs hrtimer */
int pie_enabled;
struct work_struct irqwork;
- /* Some hardware can't support UIE mode */
- int uie_unsupported;
-
- bool registered;
- struct nvmem_config *nvmem_config;
- struct nvmem_device *nvmem;
- /* Old ABI support */
- bool nvram_old_abi;
- struct bin_attribute *nvram;
+ /*
+ * This offset specifies the update timing of the RTC.
+ *
+ * tsched t1 write(t2.tv_sec - 1sec)) t2 RTC increments seconds
+ *
+ * The offset defines how tsched is computed so that the write to
+ * the RTC (t2.tv_sec - 1sec) is correct versus the time required
+ * for the transport of the write and the time which the RTC needs
+ * to increment seconds the first time after the write (t2).
+ *
+ * For direct accessible RTCs tsched ~= t1 because the write time
+ * is negligible. For RTCs behind slow busses the transport time is
+ * significant and has to be taken into account.
+ *
+ * The time between the write (t1) and the first increment after
+ * the write (t2) is RTC specific. For a MC146818 RTC it's 500ms,
+ * for many others it's exactly 1 second. Consult the datasheet.
+ *
+ * The value of this offset is also used to calculate the to be
+ * written value (t2.tv_sec - 1sec) at tsched.
+ *
+ * The default value for this is NSEC_PER_SEC + 10 msec default
+ * transport time. The offset can be adjusted by drivers so the
+ * calculation for the to be written value at tsched becomes
+ * correct:
+ *
+ * newval = tsched + set_offset_nsec - NSEC_PER_SEC
+ * and (tsched + set_offset_nsec) % NSEC_PER_SEC == 0
+ */
+ unsigned long set_offset_nsec;
+
+ unsigned long features[BITS_TO_LONGS(RTC_FEATURE_CNT)];
+
+ time64_t range_min;
+ timeu64_t range_max;
+ time64_t start_secs;
+ time64_t offset_secs;
+ bool set_start_time;
#ifdef CONFIG_RTC_INTF_DEV_UIE_EMUL
struct work_struct uie_task;
@@ -158,23 +163,28 @@ struct rtc_device {
};
#define to_rtc_device(d) container_of(d, struct rtc_device, dev)
-extern struct rtc_device *rtc_device_register(const char *name,
- struct device *dev,
- const struct rtc_class_ops *ops,
- struct module *owner);
+#define rtc_lock(d) mutex_lock(&d->ops_lock)
+#define rtc_unlock(d) mutex_unlock(&d->ops_lock)
+
+/* useful timestamps */
+#define RTC_TIMESTAMP_BEGIN_0000 -62167219200ULL /* 0000-01-01 00:00:00 */
+#define RTC_TIMESTAMP_BEGIN_1900 -2208988800LL /* 1900-01-01 00:00:00 */
+#define RTC_TIMESTAMP_BEGIN_2000 946684800LL /* 2000-01-01 00:00:00 */
+#define RTC_TIMESTAMP_END_2063 2966371199LL /* 2063-12-31 23:59:59 */
+#define RTC_TIMESTAMP_END_2079 3471292799LL /* 2079-12-31 23:59:59 */
+#define RTC_TIMESTAMP_END_2099 4102444799LL /* 2099-12-31 23:59:59 */
+#define RTC_TIMESTAMP_END_2199 7258118399LL /* 2199-12-31 23:59:59 */
+#define RTC_TIMESTAMP_END_9999 253402300799LL /* 9999-12-31 23:59:59 */
+
extern struct rtc_device *devm_rtc_device_register(struct device *dev,
const char *name,
const struct rtc_class_ops *ops,
struct module *owner);
struct rtc_device *devm_rtc_allocate_device(struct device *dev);
-int __rtc_register_device(struct module *owner, struct rtc_device *rtc);
-extern void rtc_device_unregister(struct rtc_device *rtc);
-extern void devm_rtc_device_unregister(struct device *dev,
- struct rtc_device *rtc);
+int __devm_rtc_register_device(struct module *owner, struct rtc_device *rtc);
extern int rtc_read_time(struct rtc_device *rtc, struct rtc_time *tm);
extern int rtc_set_time(struct rtc_device *rtc, struct rtc_time *tm);
-extern int rtc_set_ntp_time(struct timespec64 now);
int __rtc_read_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm);
extern int rtc_read_alarm(struct rtc_device *rtc,
struct rtc_wkalrm *alrm);
@@ -188,29 +198,20 @@ extern void rtc_update_irq(struct rtc_device *rtc,
extern struct rtc_device *rtc_class_open(const char *name);
extern void rtc_class_close(struct rtc_device *rtc);
-extern int rtc_irq_register(struct rtc_device *rtc,
- struct rtc_task *task);
-extern void rtc_irq_unregister(struct rtc_device *rtc,
- struct rtc_task *task);
-extern int rtc_irq_set_state(struct rtc_device *rtc,
- struct rtc_task *task, int enabled);
-extern int rtc_irq_set_freq(struct rtc_device *rtc,
- struct rtc_task *task, int freq);
+extern int rtc_irq_set_state(struct rtc_device *rtc, int enabled);
+extern int rtc_irq_set_freq(struct rtc_device *rtc, int freq);
extern int rtc_update_irq_enable(struct rtc_device *rtc, unsigned int enabled);
extern int rtc_alarm_irq_enable(struct rtc_device *rtc, unsigned int enabled);
extern int rtc_dev_update_irq_enable_emul(struct rtc_device *rtc,
unsigned int enabled);
void rtc_handle_legacy_irq(struct rtc_device *rtc, int num, int mode);
-void rtc_aie_update_irq(void *private);
-void rtc_uie_update_irq(void *private);
+void rtc_aie_update_irq(struct rtc_device *rtc);
+void rtc_uie_update_irq(struct rtc_device *rtc);
enum hrtimer_restart rtc_pie_update_irq(struct hrtimer *timer);
-int rtc_register(rtc_task_t *task);
-int rtc_unregister(rtc_task_t *task);
-int rtc_control(rtc_task_t *t, unsigned int cmd, unsigned long arg);
-
-void rtc_timer_init(struct rtc_timer *timer, void (*f)(void *p), void *data);
+void rtc_timer_init(struct rtc_timer *timer, void (*f)(struct rtc_device *r),
+ struct rtc_device *rtc);
int rtc_timer_start(struct rtc_device *rtc, struct rtc_timer *timer,
ktime_t expires, ktime_t period);
void rtc_timer_cancel(struct rtc_device *rtc, struct rtc_timer *timer);
@@ -223,8 +224,8 @@ static inline bool is_leap_year(unsigned int year)
return (!(year % 4) && (year % 100)) || !(year % 400);
}
-#define rtc_register_device(device) \
- __rtc_register_device(THIS_MODULE, device)
+#define devm_rtc_register_device(device) \
+ __devm_rtc_register_device(THIS_MODULE, device)
#ifdef CONFIG_RTC_HCTOSYS_DEVICE
extern int rtc_hctosys_ret;
@@ -232,4 +233,31 @@ extern int rtc_hctosys_ret;
#define rtc_hctosys_ret -ENODEV
#endif
+#ifdef CONFIG_RTC_NVMEM
+int devm_rtc_nvmem_register(struct rtc_device *rtc,
+ struct nvmem_config *nvmem_config);
+#else
+static inline int devm_rtc_nvmem_register(struct rtc_device *rtc,
+ struct nvmem_config *nvmem_config)
+{
+ return 0;
+}
+#endif
+
+#ifdef CONFIG_RTC_INTF_SYSFS
+int rtc_add_group(struct rtc_device *rtc, const struct attribute_group *grp);
+int rtc_add_groups(struct rtc_device *rtc, const struct attribute_group **grps);
+#else
+static inline
+int rtc_add_group(struct rtc_device *rtc, const struct attribute_group *grp)
+{
+ return 0;
+}
+
+static inline
+int rtc_add_groups(struct rtc_device *rtc, const struct attribute_group **grps)
+{
+ return 0;
+}
+#endif
#endif /* _LINUX_RTC_H_ */
diff --git a/include/linux/rtc/ds1685.h b/include/linux/rtc/ds1685.h
index e6337a56d741..5a41c3bbcbe3 100644
--- a/include/linux/rtc/ds1685.h
+++ b/include/linux/rtc/ds1685.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Definitions for the registers, addresses, and platform data of the
* DS1685/DS1687-series RTC chips.
@@ -15,10 +16,6 @@
* DS17x85/DS17x87 3V/5V Real-Time Clocks, 19-5222, Rev 4/10.
* DS1689/DS1693 3V/5V Serialized Real-Time Clocks, Rev 112105.
* Application Note 90, Using the Multiplex Bus RTC Extended Features.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef _LINUX_RTC_DS1685_H_
@@ -45,16 +42,10 @@
struct ds1685_priv {
struct rtc_device *dev;
void __iomem *regs;
+ void __iomem *data;
u32 regstep;
- resource_size_t baseaddr;
- size_t size;
- spinlock_t lock;
- struct work_struct work;
int irq_num;
bool bcd_mode;
- bool no_irq;
- bool uie_unsupported;
- bool alloc_io_resources;
u8 (*read)(struct ds1685_priv *, int);
void (*write)(struct ds1685_priv *, int, u8);
void (*prepare_poweroff)(void);
@@ -79,12 +70,13 @@ struct ds1685_rtc_platform_data {
const bool bcd_mode;
const bool no_irq;
const bool uie_unsupported;
- const bool alloc_io_resources;
- u8 (*plat_read)(struct ds1685_priv *, int);
- void (*plat_write)(struct ds1685_priv *, int, u8);
void (*plat_prepare_poweroff)(void);
void (*plat_wake_alarm)(void);
void (*plat_post_ram_clear)(void);
+ enum {
+ ds1685_reg_direct,
+ ds1685_reg_indirect
+ } access_type;
};
diff --git a/include/linux/rtc/m48t59.h b/include/linux/rtc/m48t59.h
index 6fc961459b4a..9465d5405fe2 100644
--- a/include/linux/rtc/m48t59.h
+++ b/include/linux/rtc/m48t59.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* include/linux/rtc/m48t59.h
*
@@ -6,10 +7,6 @@
* Copyright (c) 2007 Wind River Systems, Inc.
*
* Mark Zhan <rongkai.zhan@windriver.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef _LINUX_RTC_M48T59_H_
diff --git a/include/linux/rtc/rtc-omap.h b/include/linux/rtc/rtc-omap.h
new file mode 100644
index 000000000000..9f03a329e63f
--- /dev/null
+++ b/include/linux/rtc/rtc-omap.h
@@ -0,0 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef _LINUX_RTCOMAP_H_
+#define _LINUX_RTCOMAP_H_
+
+int omap_rtc_power_off_program(struct device *dev);
+#endif /* _LINUX_RTCOMAP_H_ */
diff --git a/include/linux/rtc/sirfsoc_rtciobrg.h b/include/linux/rtc/sirfsoc_rtciobrg.h
deleted file mode 100644
index aefd997262e4..000000000000
--- a/include/linux/rtc/sirfsoc_rtciobrg.h
+++ /dev/null
@@ -1,22 +0,0 @@
-/*
- * RTC I/O Bridge interfaces for CSR SiRFprimaII
- * ARM access the registers of SYSRTC, GPSRTC and PWRC through this module
- *
- * Copyright (c) 2011 Cambridge Silicon Radio Limited, a CSR plc group company.
- *
- * Licensed under GPLv2 or later.
- */
-#ifndef _SIRFSOC_RTC_IOBRG_H_
-#define _SIRFSOC_RTC_IOBRG_H_
-
-struct regmap_config;
-
-extern void sirfsoc_rtc_iobrg_besyncing(void);
-
-extern u32 sirfsoc_rtc_iobrg_readl(u32 addr);
-
-extern void sirfsoc_rtc_iobrg_writel(u32 val, u32 addr);
-struct regmap *devm_regmap_init_iobg(struct device *dev,
- const struct regmap_config *config);
-
-#endif
diff --git a/include/linux/rtmutex.h b/include/linux/rtmutex.h
index 44fd002f7cd5..7d049883a08a 100644
--- a/include/linux/rtmutex.h
+++ b/include/linux/rtmutex.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* RT Mutexes: blocking mutual exclusion locks with PI support
*
@@ -12,31 +13,49 @@
#ifndef __LINUX_RT_MUTEX_H
#define __LINUX_RT_MUTEX_H
+#include <linux/compiler.h>
#include <linux/linkage.h>
-#include <linux/rbtree.h>
-#include <linux/spinlock_types.h>
+#include <linux/rbtree_types.h>
+#include <linux/spinlock_types_raw.h>
extern int max_lock_depth; /* for sysctl */
+struct rt_mutex_base {
+ raw_spinlock_t wait_lock;
+ struct rb_root_cached waiters;
+ struct task_struct *owner;
+};
+
+#define __RT_MUTEX_BASE_INITIALIZER(rtbasename) \
+{ \
+ .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(rtbasename.wait_lock), \
+ .waiters = RB_ROOT_CACHED, \
+ .owner = NULL \
+}
+
+/**
+ * rt_mutex_base_is_locked - is the rtmutex locked
+ * @lock: the mutex to be queried
+ *
+ * Returns true if the mutex is locked, false if unlocked.
+ */
+static inline bool rt_mutex_base_is_locked(struct rt_mutex_base *lock)
+{
+ return READ_ONCE(lock->owner) != NULL;
+}
+
+extern void rt_mutex_base_init(struct rt_mutex_base *rtb);
+
/**
* The rt_mutex structure
*
* @wait_lock: spinlock to protect the structure
- * @waiters: rbtree root to enqueue waiters in priority order
- * @waiters_leftmost: top waiter
+ * @waiters: rbtree root to enqueue waiters in priority order;
+ * caches top-waiter (leftmost node).
* @owner: the mutex owner
*/
struct rt_mutex {
- raw_spinlock_t wait_lock;
- struct rb_root waiters;
- struct rb_node *waiters_leftmost;
- struct task_struct *owner;
-#ifdef CONFIG_DEBUG_RT_MUTEXES
- int save_state;
- const char *name, *file;
- int line;
- void *magic;
-#endif
+ struct rt_mutex_base rtmutex;
#ifdef CONFIG_DEBUG_LOCK_ALLOC
struct lockdep_map dep_map;
#endif
@@ -46,71 +65,56 @@ struct rt_mutex_waiter;
struct hrtimer_sleeper;
#ifdef CONFIG_DEBUG_RT_MUTEXES
- extern int rt_mutex_debug_check_no_locks_freed(const void *from,
- unsigned long len);
- extern void rt_mutex_debug_check_no_locks_held(struct task_struct *task);
+extern void rt_mutex_debug_task_free(struct task_struct *tsk);
#else
- static inline int rt_mutex_debug_check_no_locks_freed(const void *from,
- unsigned long len)
- {
- return 0;
- }
-# define rt_mutex_debug_check_no_locks_held(task) do { } while (0)
+static inline void rt_mutex_debug_task_free(struct task_struct *tsk) { }
#endif
-#ifdef CONFIG_DEBUG_RT_MUTEXES
-# define __DEBUG_RT_MUTEX_INITIALIZER(mutexname) \
- , .name = #mutexname, .file = __FILE__, .line = __LINE__
-
-# define rt_mutex_init(mutex) \
+#define rt_mutex_init(mutex) \
do { \
static struct lock_class_key __key; \
__rt_mutex_init(mutex, __func__, &__key); \
} while (0)
- extern void rt_mutex_debug_task_free(struct task_struct *tsk);
-#else
-# define __DEBUG_RT_MUTEX_INITIALIZER(mutexname)
-# define rt_mutex_init(mutex) __rt_mutex_init(mutex, NULL, NULL)
-# define rt_mutex_debug_task_free(t) do { } while (0)
-#endif
-
#ifdef CONFIG_DEBUG_LOCK_ALLOC
-#define __DEP_MAP_RT_MUTEX_INITIALIZER(mutexname) \
- , .dep_map = { .name = #mutexname }
+#define __DEP_MAP_RT_MUTEX_INITIALIZER(mutexname) \
+ .dep_map = { \
+ .name = #mutexname, \
+ .wait_type_inner = LD_WAIT_SLEEP, \
+ }
#else
#define __DEP_MAP_RT_MUTEX_INITIALIZER(mutexname)
#endif
-#define __RT_MUTEX_INITIALIZER(mutexname) \
- { .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(mutexname.wait_lock) \
- , .waiters = RB_ROOT \
- , .owner = NULL \
- __DEBUG_RT_MUTEX_INITIALIZER(mutexname) \
- __DEP_MAP_RT_MUTEX_INITIALIZER(mutexname)}
+#define __RT_MUTEX_INITIALIZER(mutexname) \
+{ \
+ .rtmutex = __RT_MUTEX_BASE_INITIALIZER(mutexname.rtmutex), \
+ __DEP_MAP_RT_MUTEX_INITIALIZER(mutexname) \
+}
#define DEFINE_RT_MUTEX(mutexname) \
struct rt_mutex mutexname = __RT_MUTEX_INITIALIZER(mutexname)
-/**
- * rt_mutex_is_locked - is the mutex locked
- * @lock: the mutex to be queried
- *
- * Returns 1 if the mutex is locked, 0 if unlocked.
- */
-static inline int rt_mutex_is_locked(struct rt_mutex *lock)
-{
- return lock->owner != NULL;
-}
-
extern void __rt_mutex_init(struct rt_mutex *lock, const char *name, struct lock_class_key *key);
-extern void rt_mutex_destroy(struct rt_mutex *lock);
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+extern void rt_mutex_lock_nested(struct rt_mutex *lock, unsigned int subclass);
+extern void _rt_mutex_lock_nest_lock(struct rt_mutex *lock, struct lockdep_map *nest_lock);
+#define rt_mutex_lock(lock) rt_mutex_lock_nested(lock, 0)
+#define rt_mutex_lock_nest_lock(lock, nest_lock) \
+ do { \
+ typecheck(struct lockdep_map *, &(nest_lock)->dep_map); \
+ _rt_mutex_lock_nest_lock(lock, &(nest_lock)->dep_map); \
+ } while (0)
+
+#else
extern void rt_mutex_lock(struct rt_mutex *lock);
-extern int rt_mutex_lock_interruptible(struct rt_mutex *lock);
-extern int rt_mutex_timed_lock(struct rt_mutex *lock,
- struct hrtimer_sleeper *timeout);
+#define rt_mutex_lock_nested(lock, subclass) rt_mutex_lock(lock)
+#define rt_mutex_lock_nest_lock(lock, nest_lock) rt_mutex_lock(lock)
+#endif
+extern int rt_mutex_lock_interruptible(struct rt_mutex *lock);
+extern int rt_mutex_lock_killable(struct rt_mutex *lock);
extern int rt_mutex_trylock(struct rt_mutex *lock);
extern void rt_mutex_unlock(struct rt_mutex *lock);
diff --git a/include/linux/rtnetlink.h b/include/linux/rtnetlink.h
index dea59c8eec54..3d6cf306cd55 100644
--- a/include/linux/rtnetlink.h
+++ b/include/linux/rtnetlink.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __LINUX_RTNETLINK_H
#define __LINUX_RTNETLINK_H
@@ -5,23 +6,29 @@
#include <linux/mutex.h>
#include <linux/netdevice.h>
#include <linux/wait.h>
+#include <linux/refcount.h>
#include <uapi/linux/rtnetlink.h>
extern int rtnetlink_send(struct sk_buff *skb, struct net *net, u32 pid, u32 group, int echo);
extern int rtnl_unicast(struct sk_buff *skb, struct net *net, u32 pid);
extern void rtnl_notify(struct sk_buff *skb, struct net *net, u32 pid,
- u32 group, struct nlmsghdr *nlh, gfp_t flags);
+ u32 group, const struct nlmsghdr *nlh, gfp_t flags);
extern void rtnl_set_sk_err(struct net *net, u32 group, int error);
extern int rtnetlink_put_metrics(struct sk_buff *skb, u32 *metrics);
extern int rtnl_put_cacheinfo(struct sk_buff *skb, struct dst_entry *dst,
u32 id, long expires, u32 error);
-void rtmsg_ifinfo(int type, struct net_device *dev, unsigned change, gfp_t flags);
+void rtmsg_ifinfo(int type, struct net_device *dev, unsigned int change, gfp_t flags,
+ u32 portid, const struct nlmsghdr *nlh);
+void rtmsg_ifinfo_newnet(int type, struct net_device *dev, unsigned int change,
+ gfp_t flags, int *new_nsid, int new_ifindex);
struct sk_buff *rtmsg_ifinfo_build_skb(int type, struct net_device *dev,
unsigned change, u32 event,
- gfp_t flags);
+ gfp_t flags, int *new_nsid,
+ int new_ifindex, u32 portid,
+ const struct nlmsghdr *nlh);
void rtmsg_ifinfo_send(struct sk_buff *skb, struct net_device *dev,
- gfp_t flags);
+ gfp_t flags, u32 portid, const struct nlmsghdr *nlh);
/* RTNL is used as a global lock for all changes to network configuration */
@@ -29,9 +36,12 @@ extern void rtnl_lock(void);
extern void rtnl_unlock(void);
extern int rtnl_trylock(void);
extern int rtnl_is_locked(void);
+extern int rtnl_lock_killable(void);
+extern bool refcount_dec_and_rtnl_lock(refcount_t *r);
extern wait_queue_head_t netdev_unregistering_wq;
-extern struct mutex net_mutex;
+extern struct rw_semaphore pernet_ops_rwsem;
+extern struct rw_semaphore net_rwsem;
#ifdef CONFIG_PROVE_LOCKING
extern bool lockdep_rtnl_is_held(void);
@@ -53,22 +63,11 @@ static inline bool lockdep_rtnl_is_held(void)
rcu_dereference_check(p, lockdep_rtnl_is_held())
/**
- * rcu_dereference_bh_rtnl - rcu_dereference_bh with debug checking
- * @p: The pointer to read, prior to dereference
- *
- * Do an rcu_dereference_bh(p), but check caller either holds rcu_read_lock_bh()
- * or RTNL. Note : Please prefer rtnl_dereference() or rcu_dereference_bh()
- */
-#define rcu_dereference_bh_rtnl(p) \
- rcu_dereference_bh_check(p, lockdep_rtnl_is_held())
-
-/**
* rtnl_dereference - fetch RCU pointer when updates are prevented by RTNL
* @p: The pointer to read, prior to dereferencing
*
* Return the value of the specified RCU-protected pointer, but omit
- * both the smp_read_barrier_depends() and the ACCESS_ONCE(), because
- * caller holds RTNL.
+ * the READ_ONCE(), because caller holds RTNL.
*/
#define rtnl_dereference(p) \
rcu_dereference_protected(p, lockdep_rtnl_is_held())
@@ -78,6 +77,11 @@ static inline struct netdev_queue *dev_ingress_queue(struct net_device *dev)
return rtnl_dereference(dev->ingress_queue);
}
+static inline struct netdev_queue *dev_ingress_queue_rcu(struct net_device *dev)
+{
+ return rcu_dereference(dev->ingress_queue);
+}
+
struct netdev_queue *dev_ingress_queue_create(struct net_device *dev);
#ifdef CONFIG_NET_INGRESS
@@ -88,19 +92,16 @@ void net_dec_ingress_queue(void);
#ifdef CONFIG_NET_EGRESS
void net_inc_egress_queue(void);
void net_dec_egress_queue(void);
+void netdev_xmit_skip_txqueue(bool skip);
#endif
void rtnetlink_init(void);
void __rtnl_unlock(void);
void rtnl_kfree_skbs(struct sk_buff *head, struct sk_buff *tail);
-#define ASSERT_RTNL() do { \
- if (unlikely(!rtnl_is_locked())) { \
- printk(KERN_ERR "RTNL: assertion failed at %s (%d)\n", \
- __FILE__, __LINE__); \
- dump_stack(); \
- } \
-} while(0)
+#define ASSERT_RTNL() \
+ WARN_ONCE(!rtnl_is_locked(), \
+ "RTNL: assertion failed at %s (%d)\n", __FILE__, __LINE__)
extern int ndo_dflt_fdb_dump(struct sk_buff *skb,
struct netlink_callback *cb,
@@ -126,4 +127,7 @@ extern int ndo_dflt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
int (*vlan_fill)(struct sk_buff *skb,
struct net_device *dev,
u32 filter_mask));
+
+extern void rtnl_offload_xstats_notify(struct net_device *dev);
+
#endif /* __LINUX_RTNETLINK_H */
diff --git a/include/linux/mfd/rtsx_common.h b/include/linux/rtsx_common.h
index 443176ee1ab0..bf290ad14c57 100644
--- a/include/linux/mfd/rtsx_common.h
+++ b/include/linux/rtsx_common.h
@@ -1,20 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/* Driver for Realtek driver-based card reader
*
* Copyright(c) 2009-2013 Realtek Semiconductor Corp. All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2, or (at your option) any
- * later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, see <http://www.gnu.org/licenses/>.
- *
* Author:
* Wei WANG <wei_wang@realsil.com.cn>
*/
diff --git a/include/linux/mfd/rtsx_pci.h b/include/linux/rtsx_pci.h
index 116816fb9110..534038d962e4 100644
--- a/include/linux/mfd/rtsx_pci.h
+++ b/include/linux/rtsx_pci.h
@@ -1,20 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/* Driver for Realtek PCI-Express card reader
*
* Copyright(c) 2009-2013 Realtek Semiconductor Corp. All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2, or (at your option) any
- * later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, see <http://www.gnu.org/licenses/>.
- *
* Author:
* Wei WANG <wei_wang@realsil.com.cn>
*/
@@ -24,7 +12,7 @@
#include <linux/sched.h>
#include <linux/pci.h>
-#include <linux/mfd/rtsx_common.h>
+#include <linux/rtsx_common.h>
#define MAX_RW_REG_CNT 1024
@@ -36,12 +24,12 @@
#define CHECK_REG_CMD 2
#define RTSX_HDBAR 0x08
-#define SG_INT 0x04
-#define SG_END 0x02
-#define SG_VALID 0x01
-#define SG_NO_OP 0x00
-#define SG_TRANS_DATA (0x02 << 4)
-#define SG_LINK_DESC (0x03 << 4)
+#define RTSX_SG_INT 0x04
+#define RTSX_SG_END 0x02
+#define RTSX_SG_VALID 0x01
+#define RTSX_SG_NO_OP 0x00
+#define RTSX_SG_TRANS_DATA (0x02 << 4)
+#define RTSX_SG_LINK_DESC (0x03 << 4)
#define RTSX_HDBCTLR 0x0C
#define SDMA_MODE 0x00
#define ADMA_MODE (0x02 << 26)
@@ -94,6 +82,7 @@
#define MS_OC_INT_EN (1 << 23)
#define SD_OC_INT_EN (1 << 22)
+#define RTSX_DUM_REG 0x1C
/*
* macros for easy use
@@ -111,18 +100,6 @@
#define rtsx_pci_readb(pcr, reg) \
ioread8((pcr)->remap_addr + reg)
-#define rtsx_pci_read_config_byte(pcr, where, val) \
- pci_read_config_byte((pcr)->pci, where, val)
-
-#define rtsx_pci_write_config_byte(pcr, where, val) \
- pci_write_config_byte((pcr)->pci, where, val)
-
-#define rtsx_pci_read_config_dword(pcr, where, val) \
- pci_read_config_dword((pcr)->pci, where, val)
-
-#define rtsx_pci_write_config_dword(pcr, where, val) \
- pci_write_config_dword((pcr)->pci, where, val)
-
#define STATE_TRANS_NONE 0
#define STATE_TRANS_CMD 1
#define STATE_TRANS_BUF 2
@@ -203,6 +180,7 @@
#define SD_DDR_MODE 0x04
#define SD_30_MODE 0x08
#define SD_CLK_DIVIDE_MASK 0xC0
+#define SD_MODE_SELECT_MASK 0x0C
#define SD_CFG2 0xFDA1
#define SD_CALCULATE_CRC7 0x00
#define SD_NO_CALCULATE_CRC7 0x80
@@ -226,6 +204,7 @@
#define SD_RSP_TYPE_R6 0x01
#define SD_RSP_TYPE_R7 0x01
#define SD_CFG3 0xFDA2
+#define SD30_CLK_END_EN 0x10
#define SD_RSP_80CLK_TIMEOUT_EN 0x01
#define SD_STAT1 0xFDA3
@@ -309,6 +288,14 @@
#define SD_DATA_STATE 0xFDB6
#define SD_DATA_IDLE 0x80
+#define REG_SD_STOP_SDCLK_CFG 0xFDB8
+#define SD30_CLK_STOP_CFG_EN 0x04
+#define SD30_CLK_STOP_CFG1 0x02
+#define SD30_CLK_STOP_CFG0 0x01
+#define REG_PRE_RW_MODE 0xFD70
+#define EN_INFINITE_MODE 0x01
+#define REG_CRC_DUMMY_0 0xFD71
+#define CFG_SD_POW_AUTO_PD (1<<0)
#define SRCTL 0xFC13
@@ -334,6 +321,7 @@
#define DCM_DRP_RD_DATA_H 0xFC29
#define SD_VPCLK0_CTL 0xFC2A
#define SD_VPCLK1_CTL 0xFC2B
+#define PHASE_SELECT_MASK 0x1F
#define SD_DCMPS0_CTL 0xFC2C
#define SD_DCMPS1_CTL 0xFC2D
#define SD_VPTX_CTL SD_VPCLK0_CTL
@@ -433,6 +421,7 @@
#define CARD_CLK_EN 0xFD69
#define SD_CLK_EN 0x04
#define MS_CLK_EN 0x08
+#define SD40_CLK_EN 0x10
#define SDIO_CTRL 0xFD6B
#define CD_PAD_CTL 0xFD73
#define CD_DISABLE_MASK 0x07
@@ -452,8 +441,8 @@
#define FPDCTL 0xFC00
#define SSC_POWER_DOWN 0x01
#define SD_OC_POWER_DOWN 0x02
-#define ALL_POWER_DOWN 0x07
-#define OC_POWER_DOWN 0x06
+#define ALL_POWER_DOWN 0x03
+#define OC_POWER_DOWN 0x02
#define PDINFO 0xFC01
#define CLK_CTL 0xFC02
@@ -489,6 +478,9 @@
#define FPGA_PULL_CTL 0xFC1D
#define OLT_LED_CTL 0xFC1E
+#define LED_SHINE_MASK 0x08
+#define LED_SHINE_EN 0x08
+#define LED_SHINE_DISABLE 0x00
#define GPIO_CTL 0xFC1F
#define LDO_CTL 0xFC1E
@@ -510,7 +502,11 @@
#define BPP_LDO_ON 0x00
#define BPP_LDO_SUSPEND 0x02
#define BPP_LDO_OFF 0x03
+#define EFUSE_CTL 0xFC30
+#define EFUSE_ADD 0xFC31
#define SYS_VER 0xFC32
+#define EFUSE_DATAL 0xFC34
+#define EFUSE_DATAH 0xFC35
#define CARD_PULL_CTL1 0xFD60
#define CARD_PULL_CTL2 0xFD61
@@ -552,6 +548,9 @@
#define RBBC1 0xFE2F
#define RBDAT 0xFE30
#define RBCTL 0xFE34
+#define U_AUTO_DMA_EN_MASK 0x20
+#define U_AUTO_DMA_DISABLE 0x00
+#define RB_FLUSH 0x80
#define CFGADDR0 0xFE35
#define CFGADDR1 0xFE36
#define CFGDATA0 0xFE37
@@ -573,7 +572,15 @@
#define MSGTXDATA3 0xFE47
#define MSGTXCTL 0xFE48
#define LTR_CTL 0xFE4A
+#define LTR_TX_EN_MASK BIT(7)
+#define LTR_TX_EN_1 BIT(7)
+#define LTR_TX_EN_0 0
+#define LTR_LATENCY_MODE_MASK BIT(6)
+#define LTR_LATENCY_MODE_HW 0
+#define LTR_LATENCY_MODE_SW BIT(6)
#define OBFF_CFG 0xFE4C
+#define OBFF_EN_MASK 0x03
+#define OBFF_DISABLE 0x00
#define CDRESUMECTL 0xFE52
#define WAKE_SEL_CTL 0xFE54
@@ -583,11 +590,13 @@
#define ASPM_FORCE_CTL 0xFE57
#define FORCE_ASPM_CTL0 0x10
+#define FORCE_ASPM_CTL1 0x20
#define FORCE_ASPM_VAL_MASK 0x03
#define FORCE_ASPM_L1_EN 0x02
#define FORCE_ASPM_L0_EN 0x01
#define FORCE_ASPM_NO_ASPM 0x00
#define PM_CLK_FORCE_CTL 0xFE58
+#define CLK_PM_EN 0x01
#define FUNC_FORCE_CTL 0xFE59
#define FUNC_FORCE_UPME_XMT_DBG 0x02
#define PERST_GLITCH_WIDTH 0xFE5C
@@ -613,14 +622,27 @@
#define LDO_PWR_SEL 0xFE78
#define L1SUB_CONFIG1 0xFE8D
+#define AUX_CLK_ACTIVE_SEL_MASK 0x01
+#define MAC_CKSW_DONE 0x00
#define L1SUB_CONFIG2 0xFE8E
#define L1SUB_AUTO_CFG 0x02
#define L1SUB_CONFIG3 0xFE8F
+#define L1OFF_MBIAS2_EN_5250 BIT(7)
#define DUMMY_REG_RESET_0 0xFE90
+#define IC_VERSION_MASK 0x0F
+#define REG_VREF 0xFE97
+#define PWD_SUSPND_EN 0x10
+#define RTS5260_DMA_RST_CTL_0 0xFEBF
+#define RTS5260_DMA_RST 0x80
+#define RTS5260_ADMA3_RST 0x40
#define AUTOLOAD_CFG_BASE 0xFF00
+#define RELINK_TIME_MASK 0x01
#define PETXCFG 0xFF03
+#define FORCE_CLKREQ_DELINK_MASK BIT(7)
+#define FORCE_CLKREQ_LOW 0x80
+#define FORCE_CLKREQ_HIGH 0x00
#define PM_CTRL1 0xFF44
#define CD_RESUME_EN_MASK 0xF0
@@ -637,6 +659,28 @@
#define PM_WAKE_EN 0x01
#define PM_CTRL4 0xFF47
+/* FW config info register */
+#define RTS5261_FW_CFG_INFO0 0xFF50
+#define RTS5261_FW_EXPRESS_TEST_MASK (0x01 << 0)
+#define RTS5261_FW_EA_MODE_MASK (0x01 << 5)
+#define RTS5261_FW_CFG0 0xFF54
+#define RTS5261_FW_ENTER_EXPRESS (0x01 << 0)
+
+#define RTS5261_FW_CFG1 0xFF55
+#define RTS5261_SYS_CLK_SEL_MCU_CLK (0x01 << 7)
+#define RTS5261_CRC_CLK_SEL_MCU_CLK (0x01 << 6)
+#define RTS5261_FAKE_MCU_CLOCK_GATING (0x01 << 5)
+#define RTS5261_MCU_BUS_SEL_MASK (0x01 << 4)
+#define RTS5261_MCU_CLOCK_SEL_MASK (0x03 << 2)
+#define RTS5261_MCU_CLOCK_SEL_16M (0x01 << 2)
+#define RTS5261_MCU_CLOCK_GATING (0x01 << 1)
+#define RTS5261_DRIVER_ENABLE_FW (0x01 << 0)
+
+#define REG_CFG_OOBS_OFF_TIMER 0xFEA6
+#define REG_CFG_OOBS_ON_TIMER 0xFEA7
+#define REG_CFG_VCM_ON_TIMER 0xFEA8
+#define REG_CFG_OOBS_POLLING 0xFEA9
+
/* Memory mapping */
#define SRAM_BASE 0xE600
#define RBUF_BASE 0xF400
@@ -656,15 +700,31 @@
#define LDO_DV18_CFG 0xFF70
#define LDO_DV18_SR_MASK 0xC0
#define LDO_DV18_SR_DF 0x40
+#define DV331812_MASK 0x70
+#define DV331812_33 0x70
+#define DV331812_17 0x30
#define LDO_CONFIG2 0xFF71
#define LDO_D3318_MASK 0x07
#define LDO_D3318_33V 0x07
#define LDO_D3318_18V 0x02
+#define DV331812_VDD1 0x04
+#define DV331812_POWERON 0x08
+#define DV331812_POWEROFF 0x00
#define LDO_VCC_CFG0 0xFF72
#define LDO_VCC_LMTVTH_MASK 0x30
#define LDO_VCC_LMTVTH_2A 0x10
+/*RTS5260*/
+#define RTS5260_DVCC_TUNE_MASK 0x70
+#define RTS5260_DVCC_33 0x70
+
+/*RTS5261*/
+#define RTS5261_LDO1_CFG0 0xFF72
+#define RTS5261_LDO1_OCP_THD_MASK (0x07 << 5)
+#define RTS5261_LDO1_OCP_EN (0x01 << 4)
+#define RTS5261_LDO1_OCP_LMT_THD_MASK (0x03 << 2)
+#define RTS5261_LDO1_OCP_LMT_EN (0x01 << 1)
#define LDO_VCC_CFG1 0xFF73
#define LDO_VCC_REF_TUNE_MASK 0x30
@@ -673,6 +733,10 @@
#define LDO_VCC_1V8 0x04
#define LDO_VCC_3V3 0x07
#define LDO_VCC_LMT_EN 0x08
+/*RTS5260*/
+#define LDO_POW_SDVDD1_MASK 0x08
+#define LDO_POW_SDVDD1_ON 0x08
+#define LDO_POW_SDVDD1_OFF 0x00
#define LDO_VIO_CFG 0xFF75
#define LDO_VIO_SR_MASK 0xC0
@@ -700,6 +764,162 @@
#define SD_VIO_LDO_1V8 0x40
#define SD_VIO_LDO_3V3 0x70
+#define RTS5260_AUTOLOAD_CFG4 0xFF7F
+#define RTS5260_MIMO_DISABLE 0x8A
+/*RTS5261*/
+#define RTS5261_AUX_CLK_16M_EN (1 << 5)
+
+#define RTS5260_REG_GPIO_CTL0 0xFC1A
+#define RTS5260_REG_GPIO_MASK 0x01
+#define RTS5260_REG_GPIO_ON 0x01
+#define RTS5260_REG_GPIO_OFF 0x00
+
+#define PWR_GLOBAL_CTRL 0xF200
+#define PCIE_L1_2_EN 0x0C
+#define PCIE_L1_1_EN 0x0A
+#define PCIE_L1_0_EN 0x09
+#define PWR_FE_CTL 0xF201
+#define PCIE_L1_2_PD_FE_EN 0x0C
+#define PCIE_L1_1_PD_FE_EN 0x0A
+#define PCIE_L1_0_PD_FE_EN 0x09
+#define CFG_PCIE_APHY_OFF_0 0xF204
+#define CFG_PCIE_APHY_OFF_0_DEFAULT 0xBF
+#define CFG_PCIE_APHY_OFF_1 0xF205
+#define CFG_PCIE_APHY_OFF_1_DEFAULT 0xFF
+#define CFG_PCIE_APHY_OFF_2 0xF206
+#define CFG_PCIE_APHY_OFF_2_DEFAULT 0x01
+#define CFG_PCIE_APHY_OFF_3 0xF207
+#define CFG_PCIE_APHY_OFF_3_DEFAULT 0x00
+#define CFG_L1_0_PCIE_MAC_RET_VALUE 0xF20C
+#define CFG_L1_0_PCIE_DPHY_RET_VALUE 0xF20E
+#define CFG_L1_0_SYS_RET_VALUE 0xF210
+#define CFG_L1_0_CRC_MISC_RET_VALUE 0xF212
+#define CFG_L1_0_CRC_SD30_RET_VALUE 0xF214
+#define CFG_L1_0_CRC_SD40_RET_VALUE 0xF216
+#define CFG_LP_FPWM_VALUE 0xF219
+#define CFG_LP_FPWM_VALUE_DEFAULT 0x18
+#define PWC_CDR 0xF253
+#define PWC_CDR_DEFAULT 0x03
+#define CFG_L1_0_RET_VALUE_DEFAULT 0x1B
+#define CFG_L1_0_CRC_MISC_RET_VALUE_DEFAULT 0x0C
+
+/* OCPCTL */
+#define SD_DETECT_EN 0x08
+#define SD_OCP_INT_EN 0x04
+#define SD_OCP_INT_CLR 0x02
+#define SD_OC_CLR 0x01
+
+#define SDVIO_DETECT_EN (1 << 7)
+#define SDVIO_OCP_INT_EN (1 << 6)
+#define SDVIO_OCP_INT_CLR (1 << 5)
+#define SDVIO_OC_CLR (1 << 4)
+
+/* OCPSTAT */
+#define SD_OCP_DETECT 0x08
+#define SD_OC_NOW 0x04
+#define SD_OC_EVER 0x02
+
+#define SDVIO_OC_NOW (1 << 6)
+#define SDVIO_OC_EVER (1 << 5)
+
+#define REG_OCPCTL 0xFD6A
+#define REG_OCPSTAT 0xFD6E
+#define REG_OCPGLITCH 0xFD6C
+#define REG_OCPPARA1 0xFD6B
+#define REG_OCPPARA2 0xFD6D
+
+/* rts5260 DV3318 OCP-related registers */
+#define REG_DV3318_OCPCTL 0xFD89
+#define DV3318_OCP_TIME_MASK 0xF0
+#define DV3318_DETECT_EN 0x08
+#define DV3318_OCP_INT_EN 0x04
+#define DV3318_OCP_INT_CLR 0x02
+#define DV3318_OCP_CLR 0x01
+
+#define REG_DV3318_OCPSTAT 0xFD8A
+#define DV3318_OCP_GlITCH_TIME_MASK 0xF0
+#define DV3318_OCP_DETECT 0x08
+#define DV3318_OCP_NOW 0x04
+#define DV3318_OCP_EVER 0x02
+
+#define SD_OCP_GLITCH_MASK 0x0F
+
+/* OCPPARA1 */
+#define SDVIO_OCP_TIME_60 0x00
+#define SDVIO_OCP_TIME_100 0x10
+#define SDVIO_OCP_TIME_200 0x20
+#define SDVIO_OCP_TIME_400 0x30
+#define SDVIO_OCP_TIME_600 0x40
+#define SDVIO_OCP_TIME_800 0x50
+#define SDVIO_OCP_TIME_1100 0x60
+#define SDVIO_OCP_TIME_MASK 0x70
+
+#define SD_OCP_TIME_60 0x00
+#define SD_OCP_TIME_100 0x01
+#define SD_OCP_TIME_200 0x02
+#define SD_OCP_TIME_400 0x03
+#define SD_OCP_TIME_600 0x04
+#define SD_OCP_TIME_800 0x05
+#define SD_OCP_TIME_1100 0x06
+#define SD_OCP_TIME_MASK 0x07
+
+/* OCPPARA2 */
+#define SDVIO_OCP_THD_190 0x00
+#define SDVIO_OCP_THD_250 0x10
+#define SDVIO_OCP_THD_320 0x20
+#define SDVIO_OCP_THD_380 0x30
+#define SDVIO_OCP_THD_440 0x40
+#define SDVIO_OCP_THD_500 0x50
+#define SDVIO_OCP_THD_570 0x60
+#define SDVIO_OCP_THD_630 0x70
+#define SDVIO_OCP_THD_MASK 0x70
+
+#define SD_OCP_THD_450 0x00
+#define SD_OCP_THD_550 0x01
+#define SD_OCP_THD_650 0x02
+#define SD_OCP_THD_750 0x03
+#define SD_OCP_THD_850 0x04
+#define SD_OCP_THD_950 0x05
+#define SD_OCP_THD_1050 0x06
+#define SD_OCP_THD_1150 0x07
+#define SD_OCP_THD_MASK 0x07
+
+#define SDVIO_OCP_GLITCH_MASK 0xF0
+#define SDVIO_OCP_GLITCH_NONE 0x00
+#define SDVIO_OCP_GLITCH_50U 0x10
+#define SDVIO_OCP_GLITCH_100U 0x20
+#define SDVIO_OCP_GLITCH_200U 0x30
+#define SDVIO_OCP_GLITCH_600U 0x40
+#define SDVIO_OCP_GLITCH_800U 0x50
+#define SDVIO_OCP_GLITCH_1M 0x60
+#define SDVIO_OCP_GLITCH_2M 0x70
+#define SDVIO_OCP_GLITCH_3M 0x80
+#define SDVIO_OCP_GLITCH_4M 0x90
+#define SDVIO_OCP_GLIVCH_5M 0xA0
+#define SDVIO_OCP_GLITCH_6M 0xB0
+#define SDVIO_OCP_GLITCH_7M 0xC0
+#define SDVIO_OCP_GLITCH_8M 0xD0
+#define SDVIO_OCP_GLITCH_9M 0xE0
+#define SDVIO_OCP_GLITCH_10M 0xF0
+
+#define SD_OCP_GLITCH_MASK 0x0F
+#define SD_OCP_GLITCH_NONE 0x00
+#define SD_OCP_GLITCH_50U 0x01
+#define SD_OCP_GLITCH_100U 0x02
+#define SD_OCP_GLITCH_200U 0x03
+#define SD_OCP_GLITCH_600U 0x04
+#define SD_OCP_GLITCH_800U 0x05
+#define SD_OCP_GLITCH_1M 0x06
+#define SD_OCP_GLITCH_2M 0x07
+#define SD_OCP_GLITCH_3M 0x08
+#define SD_OCP_GLITCH_4M 0x09
+#define SD_OCP_GLIVCH_5M 0x0A
+#define SD_OCP_GLITCH_6M 0x0B
+#define SD_OCP_GLITCH_7M 0x0C
+#define SD_OCP_GLITCH_8M 0x0D
+#define SD_OCP_GLITCH_9M 0x0E
+#define SD_OCP_GLITCH_10M 0x0F
+
/* Phy register */
#define PHY_PCR 0x00
#define PHY_PCR_FORCE_CODE 0xB000
@@ -847,6 +1067,9 @@
#define PCR_SETTING_REG1 0x724
#define PCR_SETTING_REG2 0x814
#define PCR_SETTING_REG3 0x747
+#define PCR_SETTING_REG4 0x818
+#define PCR_SETTING_REG5 0x81C
+
#define rtsx_pci_init_cmd(pcr) ((pcr)->ci = 0)
@@ -875,15 +1098,86 @@ struct pcr_ops {
unsigned int (*cd_deglitch)(struct rtsx_pcr *pcr);
int (*conv_clk_and_div_n)(int clk, int dir);
void (*fetch_vendor_settings)(struct rtsx_pcr *pcr);
- void (*force_power_down)(struct rtsx_pcr *pcr, u8 pm_state);
+ void (*force_power_down)(struct rtsx_pcr *pcr, u8 pm_state, bool runtime);
+ void (*stop_cmd)(struct rtsx_pcr *pcr);
+
+ void (*set_aspm)(struct rtsx_pcr *pcr, bool enable);
+ void (*set_l1off_cfg_sub_d0)(struct rtsx_pcr *pcr, int active);
+ void (*enable_ocp)(struct rtsx_pcr *pcr);
+ void (*disable_ocp)(struct rtsx_pcr *pcr);
+ void (*init_ocp)(struct rtsx_pcr *pcr);
+ void (*process_ocp)(struct rtsx_pcr *pcr);
+ int (*get_ocpstat)(struct rtsx_pcr *pcr, u8 *val);
+ void (*clear_ocpstat)(struct rtsx_pcr *pcr);
};
enum PDEV_STAT {PDEV_STAT_IDLE, PDEV_STAT_RUN};
+enum ASPM_MODE {ASPM_MODE_CFG, ASPM_MODE_REG};
+
+#define ASPM_L1_1_EN BIT(0)
+#define ASPM_L1_2_EN BIT(1)
+#define PM_L1_1_EN BIT(2)
+#define PM_L1_2_EN BIT(3)
+#define LTR_L1SS_PWR_GATE_EN BIT(4)
+#define L1_SNOOZE_TEST_EN BIT(5)
+#define LTR_L1SS_PWR_GATE_CHECK_CARD_EN BIT(6)
+
+/*
+ * struct rtsx_cr_option - card reader option
+ * @dev_flags: device flags
+ * @force_clkreq_0: force clock request
+ * @ltr_en: enable ltr mode flag
+ * @ltr_enabled: ltr mode in configure space flag
+ * @ltr_active: ltr mode status
+ * @ltr_active_latency: ltr mode active latency
+ * @ltr_idle_latency: ltr mode idle latency
+ * @ltr_l1off_latency: ltr mode l1off latency
+ * @l1_snooze_delay: l1 snooze delay
+ * @ltr_l1off_sspwrgate: ltr l1off sspwrgate
+ * @ltr_l1off_snooze_sspwrgate: ltr l1off snooze sspwrgate
+ * @ocp_en: enable ocp flag
+ * @sd_400mA_ocp_thd: 400mA ocp thd
+ * @sd_800mA_ocp_thd: 800mA ocp thd
+ */
+struct rtsx_cr_option {
+ u32 dev_flags;
+ bool force_clkreq_0;
+ bool ltr_en;
+ bool ltr_enabled;
+ bool ltr_active;
+ u32 ltr_active_latency;
+ u32 ltr_idle_latency;
+ u32 ltr_l1off_latency;
+ u32 l1_snooze_delay;
+ u8 ltr_l1off_sspwrgate;
+ u8 ltr_l1off_snooze_sspwrgate;
+ bool ocp_en;
+ u8 sd_400mA_ocp_thd;
+ u8 sd_800mA_ocp_thd;
+};
+
+/*
+ * struct rtsx_hw_param - card reader hardware param
+ * @interrupt_en: indicate which interrutp enable
+ * @ocp_glitch: ocp glitch time
+ */
+struct rtsx_hw_param {
+ u32 interrupt_en;
+ u8 ocp_glitch;
+};
+
+#define rtsx_set_dev_flag(cr, flag) \
+ ((cr)->option.dev_flags |= (flag))
+#define rtsx_clear_dev_flag(cr, flag) \
+ ((cr)->option.dev_flags &= ~(flag))
+#define rtsx_check_dev_flag(cr, flag) \
+ ((cr)->option.dev_flags & (flag))
struct rtsx_pcr {
struct pci_dev *pci;
unsigned int id;
- int pcie_cap;
+ struct rtsx_cr_option option;
+ struct rtsx_hw_param hw_param;
/* pci resources */
unsigned long addr;
@@ -910,7 +1204,6 @@ struct rtsx_pcr {
unsigned int card_exist;
struct delayed_work carddet_work;
- struct delayed_work idle_work;
spinlock_t lock;
struct mutex pcr_mutex;
@@ -927,6 +1220,8 @@ struct rtsx_pcr {
#define EXTRA_CAPS_MMC_HSDDR (1 << 3)
#define EXTRA_CAPS_MMC_HS200 (1 << 4)
#define EXTRA_CAPS_MMC_8BIT (1 << 5)
+#define EXTRA_CAPS_NO_MMC (1 << 7)
+#define EXTRA_CAPS_SD_EXPRESS (1 << 8)
u32 extra_caps;
#define IC_VER_A 0
@@ -940,6 +1235,8 @@ struct rtsx_pcr {
u8 card_drive_sel;
#define ASPM_L1_EN 0x02
u8 aspm_en;
+ enum ASPM_MODE aspm_mode;
+ bool aspm_enabled;
#define PCR_MS_PMOS (1 << 0)
#define PCR_REVERSE_SOCKET (1 << 1)
@@ -962,13 +1259,26 @@ struct rtsx_pcr {
struct rtsx_slot *slots;
u8 dma_error_count;
+ u8 ocp_stat;
+ u8 ocp_stat2;
+ u8 rtd3_en;
};
+#define PID_524A 0x524A
+#define PID_5249 0x5249
+#define PID_5250 0x5250
+#define PID_525A 0x525A
+#define PID_5260 0x5260
+#define PID_5261 0x5261
+#define PID_5228 0x5228
+
#define CHK_PCI_PID(pcr, pid) ((pcr)->pci->device == (pid))
#define PCI_VID(pcr) ((pcr)->pci->vendor)
#define PCI_PID(pcr) ((pcr)->pci->device)
#define is_version(pcr, pid, ver) \
(CHK_PCI_PID(pcr, pid) && (pcr)->ic_version == (ver))
+#define is_version_higher_than(pcr, pid, ver) \
+ (CHK_PCI_PID(pcr, pid) && (pcr)->ic_version > (ver))
#define pcr_dbg(pcr, fmt, arg...) \
dev_dbg(&(pcr)->pci->dev, fmt, ##arg)
@@ -1020,18 +1330,6 @@ static inline u8 *rtsx_pci_get_cmd_data(struct rtsx_pcr *pcr)
return (u8 *)(pcr->host_cmds_ptr);
}
-static inline int rtsx_pci_update_cfg_byte(struct rtsx_pcr *pcr, int addr,
- u8 mask, u8 append)
-{
- int err;
- u8 val;
-
- err = pci_read_config_byte(pcr->pci, addr, &val);
- if (err < 0)
- return err;
- return pci_write_config_byte(pcr->pci, addr, (val & mask) | append);
-}
-
static inline void rtsx_pci_write_be32(struct rtsx_pcr *pcr, u16 reg, u32 val)
{
rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, reg, 0xFF, val >> 24);
diff --git a/include/linux/mfd/rtsx_usb.h b/include/linux/rtsx_usb.h
index c446e4fd6b5c..3247ed8e9ff0 100644
--- a/include/linux/mfd/rtsx_usb.h
+++ b/include/linux/rtsx_usb.h
@@ -1,19 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/* Driver for Realtek RTS5139 USB card reader
*
* Copyright(c) 2009-2013 Realtek Semiconductor Corp. All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, see <http://www.gnu.org/licenses/>.
- *
* Author:
* Roger Tseng <rogerable@realtek.com>
*/
@@ -65,8 +54,6 @@ struct rtsx_ucr {
struct usb_device *pusb_dev;
struct usb_interface *pusb_intf;
struct usb_sg_request current_sg;
- unsigned char *iobuf;
- dma_addr_t iobuf_dma;
struct timer_list sg_timer;
struct mutex dev_mutex;
diff --git a/include/linux/rv.h b/include/linux/rv.h
new file mode 100644
index 000000000000..8883b41d88ec
--- /dev/null
+++ b/include/linux/rv.h
@@ -0,0 +1,70 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Runtime Verification.
+ *
+ * For futher information, see: kernel/trace/rv/rv.c.
+ */
+#ifndef _LINUX_RV_H
+#define _LINUX_RV_H
+
+#define MAX_DA_NAME_LEN 24
+
+#ifdef CONFIG_RV
+/*
+ * Deterministic automaton per-object variables.
+ */
+struct da_monitor {
+ bool monitoring;
+ unsigned int curr_state;
+};
+
+/*
+ * Per-task RV monitors count. Nowadays fixed in RV_PER_TASK_MONITORS.
+ * If we find justification for more monitors, we can think about
+ * adding more or developing a dynamic method. So far, none of
+ * these are justified.
+ */
+#define RV_PER_TASK_MONITORS 1
+#define RV_PER_TASK_MONITOR_INIT (RV_PER_TASK_MONITORS)
+
+/*
+ * Futher monitor types are expected, so make this a union.
+ */
+union rv_task_monitor {
+ struct da_monitor da_mon;
+};
+
+#ifdef CONFIG_RV_REACTORS
+struct rv_reactor {
+ const char *name;
+ const char *description;
+ void (*react)(char *msg);
+};
+#endif
+
+struct rv_monitor {
+ const char *name;
+ const char *description;
+ bool enabled;
+ int (*enable)(void);
+ void (*disable)(void);
+ void (*reset)(void);
+#ifdef CONFIG_RV_REACTORS
+ void (*react)(char *msg);
+#endif
+};
+
+bool rv_monitoring_on(void);
+int rv_unregister_monitor(struct rv_monitor *monitor);
+int rv_register_monitor(struct rv_monitor *monitor);
+int rv_get_task_monitor_slot(void);
+void rv_put_task_monitor_slot(int slot);
+
+#ifdef CONFIG_RV_REACTORS
+bool rv_reacting_on(void);
+int rv_unregister_reactor(struct rv_reactor *reactor);
+int rv_register_reactor(struct rv_reactor *reactor);
+#endif /* CONFIG_RV_REACTORS */
+
+#endif /* CONFIG_RV */
+#endif /* _LINUX_RV_H */
diff --git a/include/linux/rwbase_rt.h b/include/linux/rwbase_rt.h
new file mode 100644
index 000000000000..1d264dd08625
--- /dev/null
+++ b/include/linux/rwbase_rt.h
@@ -0,0 +1,39 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#ifndef _LINUX_RWBASE_RT_H
+#define _LINUX_RWBASE_RT_H
+
+#include <linux/rtmutex.h>
+#include <linux/atomic.h>
+
+#define READER_BIAS (1U << 31)
+#define WRITER_BIAS (1U << 30)
+
+struct rwbase_rt {
+ atomic_t readers;
+ struct rt_mutex_base rtmutex;
+};
+
+#define __RWBASE_INITIALIZER(name) \
+{ \
+ .readers = ATOMIC_INIT(READER_BIAS), \
+ .rtmutex = __RT_MUTEX_BASE_INITIALIZER(name.rtmutex), \
+}
+
+#define init_rwbase_rt(rwbase) \
+ do { \
+ rt_mutex_base_init(&(rwbase)->rtmutex); \
+ atomic_set(&(rwbase)->readers, READER_BIAS); \
+ } while (0)
+
+
+static __always_inline bool rw_base_is_locked(struct rwbase_rt *rwb)
+{
+ return atomic_read(&rwb->readers) != READER_BIAS;
+}
+
+static __always_inline bool rw_base_is_contended(struct rwbase_rt *rwb)
+{
+ return atomic_read(&rwb->readers) > 0;
+}
+
+#endif /* _LINUX_RWBASE_RT_H */
diff --git a/include/linux/rwlock.h b/include/linux/rwlock.h
index bc2994ed66e1..c0ef596f340b 100644
--- a/include/linux/rwlock.h
+++ b/include/linux/rwlock.h
@@ -1,7 +1,7 @@
#ifndef __LINUX_RWLOCK_H
#define __LINUX_RWLOCK_H
-#ifndef __LINUX_SPINLOCK_H
+#ifndef __LINUX_INSIDE_SPINLOCK_H
# error "please don't include this file directly"
#endif
@@ -30,29 +30,20 @@ do { \
#ifdef CONFIG_DEBUG_SPINLOCK
extern void do_raw_read_lock(rwlock_t *lock) __acquires(lock);
-#define do_raw_read_lock_flags(lock, flags) do_raw_read_lock(lock)
extern int do_raw_read_trylock(rwlock_t *lock);
extern void do_raw_read_unlock(rwlock_t *lock) __releases(lock);
extern void do_raw_write_lock(rwlock_t *lock) __acquires(lock);
-#define do_raw_write_lock_flags(lock, flags) do_raw_write_lock(lock)
extern int do_raw_write_trylock(rwlock_t *lock);
extern void do_raw_write_unlock(rwlock_t *lock) __releases(lock);
#else
# define do_raw_read_lock(rwlock) do {__acquire(lock); arch_read_lock(&(rwlock)->raw_lock); } while (0)
-# define do_raw_read_lock_flags(lock, flags) \
- do {__acquire(lock); arch_read_lock_flags(&(lock)->raw_lock, *(flags)); } while (0)
# define do_raw_read_trylock(rwlock) arch_read_trylock(&(rwlock)->raw_lock)
# define do_raw_read_unlock(rwlock) do {arch_read_unlock(&(rwlock)->raw_lock); __release(lock); } while (0)
# define do_raw_write_lock(rwlock) do {__acquire(lock); arch_write_lock(&(rwlock)->raw_lock); } while (0)
-# define do_raw_write_lock_flags(lock, flags) \
- do {__acquire(lock); arch_write_lock_flags(&(lock)->raw_lock, *(flags)); } while (0)
# define do_raw_write_trylock(rwlock) arch_write_trylock(&(rwlock)->raw_lock)
# define do_raw_write_unlock(rwlock) do {arch_write_unlock(&(rwlock)->raw_lock); __release(lock); } while (0)
#endif
-#define read_can_lock(rwlock) arch_read_can_lock(&(rwlock)->raw_lock)
-#define write_can_lock(rwlock) arch_write_can_lock(&(rwlock)->raw_lock)
-
/*
* Define the various rw_lock methods. Note we define these
* regardless of whether CONFIG_SMP or CONFIG_PREEMPT are set. The various
@@ -64,6 +55,12 @@ do { \
#define write_lock(lock) _raw_write_lock(lock)
#define read_lock(lock) _raw_read_lock(lock)
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+#define write_lock_nested(lock, subclass) _raw_write_lock_nested(lock, subclass)
+#else
+#define write_lock_nested(lock, subclass) _raw_write_lock(lock)
+#endif
+
#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
#define read_lock_irqsave(lock, flags) \
@@ -122,4 +119,11 @@ do { \
1 : ({ local_irq_restore(flags); 0; }); \
})
+#ifdef arch_rwlock_is_contended
+#define rwlock_is_contended(lock) \
+ arch_rwlock_is_contended(&(lock)->raw_lock)
+#else
+#define rwlock_is_contended(lock) ((void)(lock), 0)
+#endif /* arch_rwlock_is_contended */
+
#endif /* __LINUX_RWLOCK_H */
diff --git a/include/linux/rwlock_api_smp.h b/include/linux/rwlock_api_smp.h
index 5b9b84b20407..dceb0a59b692 100644
--- a/include/linux/rwlock_api_smp.h
+++ b/include/linux/rwlock_api_smp.h
@@ -17,6 +17,7 @@
void __lockfunc _raw_read_lock(rwlock_t *lock) __acquires(lock);
void __lockfunc _raw_write_lock(rwlock_t *lock) __acquires(lock);
+void __lockfunc _raw_write_lock_nested(rwlock_t *lock, int subclass) __acquires(lock);
void __lockfunc _raw_read_lock_bh(rwlock_t *lock) __acquires(lock);
void __lockfunc _raw_write_lock_bh(rwlock_t *lock) __acquires(lock);
void __lockfunc _raw_read_lock_irq(rwlock_t *lock) __acquires(lock);
@@ -157,8 +158,7 @@ static inline unsigned long __raw_read_lock_irqsave(rwlock_t *lock)
local_irq_save(flags);
preempt_disable();
rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_);
- LOCK_CONTENDED_FLAGS(lock, do_raw_read_trylock, do_raw_read_lock,
- do_raw_read_lock_flags, &flags);
+ LOCK_CONTENDED(lock, do_raw_read_trylock, do_raw_read_lock);
return flags;
}
@@ -184,8 +184,7 @@ static inline unsigned long __raw_write_lock_irqsave(rwlock_t *lock)
local_irq_save(flags);
preempt_disable();
rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_);
- LOCK_CONTENDED_FLAGS(lock, do_raw_write_trylock, do_raw_write_lock,
- do_raw_write_lock_flags, &flags);
+ LOCK_CONTENDED(lock, do_raw_write_trylock, do_raw_write_lock);
return flags;
}
@@ -211,18 +210,25 @@ static inline void __raw_write_lock(rwlock_t *lock)
LOCK_CONTENDED(lock, do_raw_write_trylock, do_raw_write_lock);
}
-#endif /* CONFIG_PREEMPT */
+static inline void __raw_write_lock_nested(rwlock_t *lock, int subclass)
+{
+ preempt_disable();
+ rwlock_acquire(&lock->dep_map, subclass, 0, _RET_IP_);
+ LOCK_CONTENDED(lock, do_raw_write_trylock, do_raw_write_lock);
+}
+
+#endif /* !CONFIG_GENERIC_LOCKBREAK || CONFIG_DEBUG_LOCK_ALLOC */
static inline void __raw_write_unlock(rwlock_t *lock)
{
- rwlock_release(&lock->dep_map, 1, _RET_IP_);
+ rwlock_release(&lock->dep_map, _RET_IP_);
do_raw_write_unlock(lock);
preempt_enable();
}
static inline void __raw_read_unlock(rwlock_t *lock)
{
- rwlock_release(&lock->dep_map, 1, _RET_IP_);
+ rwlock_release(&lock->dep_map, _RET_IP_);
do_raw_read_unlock(lock);
preempt_enable();
}
@@ -230,7 +236,7 @@ static inline void __raw_read_unlock(rwlock_t *lock)
static inline void
__raw_read_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
{
- rwlock_release(&lock->dep_map, 1, _RET_IP_);
+ rwlock_release(&lock->dep_map, _RET_IP_);
do_raw_read_unlock(lock);
local_irq_restore(flags);
preempt_enable();
@@ -238,7 +244,7 @@ __raw_read_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
static inline void __raw_read_unlock_irq(rwlock_t *lock)
{
- rwlock_release(&lock->dep_map, 1, _RET_IP_);
+ rwlock_release(&lock->dep_map, _RET_IP_);
do_raw_read_unlock(lock);
local_irq_enable();
preempt_enable();
@@ -246,7 +252,7 @@ static inline void __raw_read_unlock_irq(rwlock_t *lock)
static inline void __raw_read_unlock_bh(rwlock_t *lock)
{
- rwlock_release(&lock->dep_map, 1, _RET_IP_);
+ rwlock_release(&lock->dep_map, _RET_IP_);
do_raw_read_unlock(lock);
__local_bh_enable_ip(_RET_IP_, SOFTIRQ_LOCK_OFFSET);
}
@@ -254,7 +260,7 @@ static inline void __raw_read_unlock_bh(rwlock_t *lock)
static inline void __raw_write_unlock_irqrestore(rwlock_t *lock,
unsigned long flags)
{
- rwlock_release(&lock->dep_map, 1, _RET_IP_);
+ rwlock_release(&lock->dep_map, _RET_IP_);
do_raw_write_unlock(lock);
local_irq_restore(flags);
preempt_enable();
@@ -262,7 +268,7 @@ static inline void __raw_write_unlock_irqrestore(rwlock_t *lock,
static inline void __raw_write_unlock_irq(rwlock_t *lock)
{
- rwlock_release(&lock->dep_map, 1, _RET_IP_);
+ rwlock_release(&lock->dep_map, _RET_IP_);
do_raw_write_unlock(lock);
local_irq_enable();
preempt_enable();
@@ -270,7 +276,7 @@ static inline void __raw_write_unlock_irq(rwlock_t *lock)
static inline void __raw_write_unlock_bh(rwlock_t *lock)
{
- rwlock_release(&lock->dep_map, 1, _RET_IP_);
+ rwlock_release(&lock->dep_map, _RET_IP_);
do_raw_write_unlock(lock);
__local_bh_enable_ip(_RET_IP_, SOFTIRQ_LOCK_OFFSET);
}
diff --git a/include/linux/rwlock_rt.h b/include/linux/rwlock_rt.h
new file mode 100644
index 000000000000..8544ff05e594
--- /dev/null
+++ b/include/linux/rwlock_rt.h
@@ -0,0 +1,150 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#ifndef __LINUX_RWLOCK_RT_H
+#define __LINUX_RWLOCK_RT_H
+
+#ifndef __LINUX_SPINLOCK_RT_H
+#error Do not #include directly. Use <linux/spinlock.h>.
+#endif
+
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+extern void __rt_rwlock_init(rwlock_t *rwlock, const char *name,
+ struct lock_class_key *key);
+#else
+static inline void __rt_rwlock_init(rwlock_t *rwlock, char *name,
+ struct lock_class_key *key)
+{
+}
+#endif
+
+#define rwlock_init(rwl) \
+do { \
+ static struct lock_class_key __key; \
+ \
+ init_rwbase_rt(&(rwl)->rwbase); \
+ __rt_rwlock_init(rwl, #rwl, &__key); \
+} while (0)
+
+extern void rt_read_lock(rwlock_t *rwlock);
+extern int rt_read_trylock(rwlock_t *rwlock);
+extern void rt_read_unlock(rwlock_t *rwlock);
+extern void rt_write_lock(rwlock_t *rwlock);
+extern void rt_write_lock_nested(rwlock_t *rwlock, int subclass);
+extern int rt_write_trylock(rwlock_t *rwlock);
+extern void rt_write_unlock(rwlock_t *rwlock);
+
+static __always_inline void read_lock(rwlock_t *rwlock)
+{
+ rt_read_lock(rwlock);
+}
+
+static __always_inline void read_lock_bh(rwlock_t *rwlock)
+{
+ local_bh_disable();
+ rt_read_lock(rwlock);
+}
+
+static __always_inline void read_lock_irq(rwlock_t *rwlock)
+{
+ rt_read_lock(rwlock);
+}
+
+#define read_lock_irqsave(lock, flags) \
+ do { \
+ typecheck(unsigned long, flags); \
+ rt_read_lock(lock); \
+ flags = 0; \
+ } while (0)
+
+#define read_trylock(lock) __cond_lock(lock, rt_read_trylock(lock))
+
+static __always_inline void read_unlock(rwlock_t *rwlock)
+{
+ rt_read_unlock(rwlock);
+}
+
+static __always_inline void read_unlock_bh(rwlock_t *rwlock)
+{
+ rt_read_unlock(rwlock);
+ local_bh_enable();
+}
+
+static __always_inline void read_unlock_irq(rwlock_t *rwlock)
+{
+ rt_read_unlock(rwlock);
+}
+
+static __always_inline void read_unlock_irqrestore(rwlock_t *rwlock,
+ unsigned long flags)
+{
+ rt_read_unlock(rwlock);
+}
+
+static __always_inline void write_lock(rwlock_t *rwlock)
+{
+ rt_write_lock(rwlock);
+}
+
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+static __always_inline void write_lock_nested(rwlock_t *rwlock, int subclass)
+{
+ rt_write_lock_nested(rwlock, subclass);
+}
+#else
+#define write_lock_nested(lock, subclass) rt_write_lock(((void)(subclass), (lock)))
+#endif
+
+static __always_inline void write_lock_bh(rwlock_t *rwlock)
+{
+ local_bh_disable();
+ rt_write_lock(rwlock);
+}
+
+static __always_inline void write_lock_irq(rwlock_t *rwlock)
+{
+ rt_write_lock(rwlock);
+}
+
+#define write_lock_irqsave(lock, flags) \
+ do { \
+ typecheck(unsigned long, flags); \
+ rt_write_lock(lock); \
+ flags = 0; \
+ } while (0)
+
+#define write_trylock(lock) __cond_lock(lock, rt_write_trylock(lock))
+
+#define write_trylock_irqsave(lock, flags) \
+({ \
+ int __locked; \
+ \
+ typecheck(unsigned long, flags); \
+ flags = 0; \
+ __locked = write_trylock(lock); \
+ __locked; \
+})
+
+static __always_inline void write_unlock(rwlock_t *rwlock)
+{
+ rt_write_unlock(rwlock);
+}
+
+static __always_inline void write_unlock_bh(rwlock_t *rwlock)
+{
+ rt_write_unlock(rwlock);
+ local_bh_enable();
+}
+
+static __always_inline void write_unlock_irq(rwlock_t *rwlock)
+{
+ rt_write_unlock(rwlock);
+}
+
+static __always_inline void write_unlock_irqrestore(rwlock_t *rwlock,
+ unsigned long flags)
+{
+ rt_write_unlock(rwlock);
+}
+
+#define rwlock_is_contended(lock) (((void)(lock), 0))
+
+#endif /* __LINUX_RWLOCK_RT_H */
diff --git a/include/linux/rwlock_types.h b/include/linux/rwlock_types.h
index cc0072e93e36..1948442e7750 100644
--- a/include/linux/rwlock_types.h
+++ b/include/linux/rwlock_types.h
@@ -1,18 +1,29 @@
#ifndef __LINUX_RWLOCK_TYPES_H
#define __LINUX_RWLOCK_TYPES_H
+#if !defined(__LINUX_SPINLOCK_TYPES_H)
+# error "Do not include directly, include spinlock_types.h"
+#endif
+
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+# define RW_DEP_MAP_INIT(lockname) \
+ .dep_map = { \
+ .name = #lockname, \
+ .wait_type_inner = LD_WAIT_CONFIG, \
+ }
+#else
+# define RW_DEP_MAP_INIT(lockname)
+#endif
+
+#ifndef CONFIG_PREEMPT_RT
/*
- * include/linux/rwlock_types.h - generic rwlock type definitions
- * and initializers
+ * generic rwlock type definitions and initializers
*
* portions Copyright 2005, Red Hat, Inc., Ingo Molnar
* Released under the General Public License (GPL).
*/
typedef struct {
arch_rwlock_t raw_lock;
-#ifdef CONFIG_GENERIC_LOCKBREAK
- unsigned int break_lock;
-#endif
#ifdef CONFIG_DEBUG_SPINLOCK
unsigned int magic, owner_cpu;
void *owner;
@@ -24,12 +35,6 @@ typedef struct {
#define RWLOCK_MAGIC 0xdeaf1eed
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
-# define RW_DEP_MAP_INIT(lockname) .dep_map = { .name = #lockname }
-#else
-# define RW_DEP_MAP_INIT(lockname)
-#endif
-
#ifdef CONFIG_DEBUG_SPINLOCK
#define __RW_LOCK_UNLOCKED(lockname) \
(rwlock_t) { .raw_lock = __ARCH_RW_LOCK_UNLOCKED, \
@@ -45,4 +50,29 @@ typedef struct {
#define DEFINE_RWLOCK(x) rwlock_t x = __RW_LOCK_UNLOCKED(x)
+#else /* !CONFIG_PREEMPT_RT */
+
+#include <linux/rwbase_rt.h>
+
+typedef struct {
+ struct rwbase_rt rwbase;
+ atomic_t readers;
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+ struct lockdep_map dep_map;
+#endif
+} rwlock_t;
+
+#define __RWLOCK_RT_INITIALIZER(name) \
+{ \
+ .rwbase = __RWBASE_INITIALIZER(name), \
+ RW_DEP_MAP_INIT(name) \
+}
+
+#define __RW_LOCK_UNLOCKED(name) __RWLOCK_RT_INITIALIZER(name)
+
+#define DEFINE_RWLOCK(name) \
+ rwlock_t name = __RW_LOCK_UNLOCKED(name)
+
+#endif /* CONFIG_PREEMPT_RT */
+
#endif /* __LINUX_RWLOCK_TYPES_H */
diff --git a/include/linux/rwsem-spinlock.h b/include/linux/rwsem-spinlock.h
deleted file mode 100644
index ae0528b834cd..000000000000
--- a/include/linux/rwsem-spinlock.h
+++ /dev/null
@@ -1,45 +0,0 @@
-/* rwsem-spinlock.h: fallback C implementation
- *
- * Copyright (c) 2001 David Howells (dhowells@redhat.com).
- * - Derived partially from ideas by Andrea Arcangeli <andrea@suse.de>
- * - Derived also from comments by Linus
- */
-
-#ifndef _LINUX_RWSEM_SPINLOCK_H
-#define _LINUX_RWSEM_SPINLOCK_H
-
-#ifndef _LINUX_RWSEM_H
-#error "please don't include linux/rwsem-spinlock.h directly, use linux/rwsem.h instead"
-#endif
-
-#ifdef __KERNEL__
-/*
- * the rw-semaphore definition
- * - if count is 0 then there are no active readers or writers
- * - if count is +ve then that is the number of active readers
- * - if count is -1 then there is one active writer
- * - if wait_list is not empty, then there are processes waiting for the semaphore
- */
-struct rw_semaphore {
- __s32 count;
- raw_spinlock_t wait_lock;
- struct list_head wait_list;
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
- struct lockdep_map dep_map;
-#endif
-};
-
-#define RWSEM_UNLOCKED_VALUE 0x00000000
-
-extern void __down_read(struct rw_semaphore *sem);
-extern int __down_read_trylock(struct rw_semaphore *sem);
-extern void __down_write(struct rw_semaphore *sem);
-extern int __must_check __down_write_killable(struct rw_semaphore *sem);
-extern int __down_write_trylock(struct rw_semaphore *sem);
-extern void __up_read(struct rw_semaphore *sem);
-extern void __up_write(struct rw_semaphore *sem);
-extern void __downgrade_write(struct rw_semaphore *sem);
-extern int rwsem_is_locked(struct rw_semaphore *sem);
-
-#endif /* __KERNEL__ */
-#endif /* _LINUX_RWSEM_SPINLOCK_H */
diff --git a/include/linux/rwsem.h b/include/linux/rwsem.h
index dd1d14250340..efa5c324369a 100644
--- a/include/linux/rwsem.h
+++ b/include/linux/rwsem.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/* rwsem.h: R/W semaphores, public interface
*
* Written by David Howells (dhowells@redhat.com).
@@ -10,76 +11,90 @@
#include <linux/linkage.h>
#include <linux/types.h>
-#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/spinlock.h>
#include <linux/atomic.h>
#include <linux/err.h>
+
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+# define __RWSEM_DEP_MAP_INIT(lockname) \
+ .dep_map = { \
+ .name = #lockname, \
+ .wait_type_inner = LD_WAIT_SLEEP, \
+ },
+#else
+# define __RWSEM_DEP_MAP_INIT(lockname)
+#endif
+
+#ifndef CONFIG_PREEMPT_RT
+
#ifdef CONFIG_RWSEM_SPIN_ON_OWNER
#include <linux/osq_lock.h>
#endif
-struct rw_semaphore;
-
-#ifdef CONFIG_RWSEM_GENERIC_SPINLOCK
-#include <linux/rwsem-spinlock.h> /* use a generic implementation */
-#define __RWSEM_INIT_COUNT(name) .count = RWSEM_UNLOCKED_VALUE
-#else
-/* All arch specific implementations share the same struct */
+/*
+ * For an uncontended rwsem, count and owner are the only fields a task
+ * needs to touch when acquiring the rwsem. So they are put next to each
+ * other to increase the chance that they will share the same cacheline.
+ *
+ * In a contended rwsem, the owner is likely the most frequently accessed
+ * field in the structure as the optimistic waiter that holds the osq lock
+ * will spin on owner. For an embedded rwsem, other hot fields in the
+ * containing structure should be moved further away from the rwsem to
+ * reduce the chance that they will share the same cacheline causing
+ * cacheline bouncing problem.
+ */
struct rw_semaphore {
atomic_long_t count;
- struct list_head wait_list;
- raw_spinlock_t wait_lock;
-#ifdef CONFIG_RWSEM_SPIN_ON_OWNER
- struct optimistic_spin_queue osq; /* spinner MCS lock */
/*
- * Write owner. Used as a speculative check to see
- * if the owner is running on the cpu.
+ * Write owner or one of the read owners as well flags regarding
+ * the current state of the rwsem. Can be used as a speculative
+ * check to see if the write owner is running on the cpu.
*/
- struct task_struct *owner;
+ atomic_long_t owner;
+#ifdef CONFIG_RWSEM_SPIN_ON_OWNER
+ struct optimistic_spin_queue osq; /* spinner MCS lock */
+#endif
+ raw_spinlock_t wait_lock;
+ struct list_head wait_list;
+#ifdef CONFIG_DEBUG_RWSEMS
+ void *magic;
#endif
#ifdef CONFIG_DEBUG_LOCK_ALLOC
struct lockdep_map dep_map;
#endif
};
-extern struct rw_semaphore *rwsem_down_read_failed(struct rw_semaphore *sem);
-extern struct rw_semaphore *rwsem_down_write_failed(struct rw_semaphore *sem);
-extern struct rw_semaphore *rwsem_down_write_failed_killable(struct rw_semaphore *sem);
-extern struct rw_semaphore *rwsem_wake(struct rw_semaphore *);
-extern struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem);
-
-/* Include the arch specific part */
-#include <asm/rwsem.h>
-
/* In all implementations count != 0 means locked */
static inline int rwsem_is_locked(struct rw_semaphore *sem)
{
return atomic_long_read(&sem->count) != 0;
}
-#define __RWSEM_INIT_COUNT(name) .count = ATOMIC_LONG_INIT(RWSEM_UNLOCKED_VALUE)
-#endif
+#define RWSEM_UNLOCKED_VALUE 0L
+#define __RWSEM_COUNT_INIT(name) .count = ATOMIC_LONG_INIT(RWSEM_UNLOCKED_VALUE)
/* Common initializer macros and functions */
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
-# define __RWSEM_DEP_MAP_INIT(lockname) , .dep_map = { .name = #lockname }
+#ifdef CONFIG_DEBUG_RWSEMS
+# define __RWSEM_DEBUG_INIT(lockname) .magic = &lockname,
#else
-# define __RWSEM_DEP_MAP_INIT(lockname)
+# define __RWSEM_DEBUG_INIT(lockname)
#endif
#ifdef CONFIG_RWSEM_SPIN_ON_OWNER
-#define __RWSEM_OPT_INIT(lockname) , .osq = OSQ_LOCK_UNLOCKED, .owner = NULL
+#define __RWSEM_OPT_INIT(lockname) .osq = OSQ_LOCK_UNLOCKED,
#else
#define __RWSEM_OPT_INIT(lockname)
#endif
#define __RWSEM_INITIALIZER(name) \
- { __RWSEM_INIT_COUNT(name), \
- .wait_list = LIST_HEAD_INIT((name).wait_list), \
- .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(name.wait_lock) \
+ { __RWSEM_COUNT_INIT(name), \
+ .owner = ATOMIC_LONG_INIT(0), \
__RWSEM_OPT_INIT(name) \
+ .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(name.wait_lock),\
+ .wait_list = LIST_HEAD_INIT((name).wait_list), \
+ __RWSEM_DEBUG_INIT(name) \
__RWSEM_DEP_MAP_INIT(name) }
#define DECLARE_RWSEM(name) \
@@ -97,7 +112,7 @@ do { \
/*
* This is the same regardless of which rwsem implementation that is being used.
- * It is just a heuristic meant to be called by somebody alreadying holding the
+ * It is just a heuristic meant to be called by somebody already holding the
* rwsem to see if somebody from an incompatible type is wanting access to the
* lock.
*/
@@ -106,10 +121,59 @@ static inline int rwsem_is_contended(struct rw_semaphore *sem)
return !list_empty(&sem->wait_list);
}
+#else /* !CONFIG_PREEMPT_RT */
+
+#include <linux/rwbase_rt.h>
+
+struct rw_semaphore {
+ struct rwbase_rt rwbase;
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+ struct lockdep_map dep_map;
+#endif
+};
+
+#define __RWSEM_INITIALIZER(name) \
+ { \
+ .rwbase = __RWBASE_INITIALIZER(name), \
+ __RWSEM_DEP_MAP_INIT(name) \
+ }
+
+#define DECLARE_RWSEM(lockname) \
+ struct rw_semaphore lockname = __RWSEM_INITIALIZER(lockname)
+
+extern void __init_rwsem(struct rw_semaphore *rwsem, const char *name,
+ struct lock_class_key *key);
+
+#define init_rwsem(sem) \
+do { \
+ static struct lock_class_key __key; \
+ \
+ __init_rwsem((sem), #sem, &__key); \
+} while (0)
+
+static __always_inline int rwsem_is_locked(struct rw_semaphore *sem)
+{
+ return rw_base_is_locked(&sem->rwbase);
+}
+
+static __always_inline int rwsem_is_contended(struct rw_semaphore *sem)
+{
+ return rw_base_is_contended(&sem->rwbase);
+}
+
+#endif /* CONFIG_PREEMPT_RT */
+
+/*
+ * The functions below are the same for all rwsem implementations including
+ * the RT specific variant.
+ */
+
/*
* lock for reading
*/
extern void down_read(struct rw_semaphore *sem);
+extern int __must_check down_read_interruptible(struct rw_semaphore *sem);
+extern int __must_check down_read_killable(struct rw_semaphore *sem);
/*
* trylock for reading -- returns 1 if successful, 0 if contention
@@ -154,9 +218,10 @@ extern void downgrade_write(struct rw_semaphore *sem);
* static then another method for expressing nested locking is
* the explicit definition of lock class keys and the use of
* lockdep_set_class() at lock initialization time.
- * See Documentation/locking/lockdep-design.txt for more details.)
+ * See Documentation/locking/lockdep-design.rst for more details.)
*/
extern void down_read_nested(struct rw_semaphore *sem, int subclass);
+extern int __must_check down_read_killable_nested(struct rw_semaphore *sem, int subclass);
extern void down_write_nested(struct rw_semaphore *sem, int subclass);
extern int down_write_killable_nested(struct rw_semaphore *sem, int subclass);
extern void _down_write_nest_lock(struct rw_semaphore *sem, struct lockdep_map *nest_lock);
@@ -165,7 +230,7 @@ extern void _down_write_nest_lock(struct rw_semaphore *sem, struct lockdep_map *
do { \
typecheck(struct lockdep_map *, &(nest_lock)->dep_map); \
_down_write_nest_lock(sem, &(nest_lock)->dep_map); \
-} while (0);
+} while (0)
/*
* Take/release a lock when not the owner will release it.
@@ -177,6 +242,7 @@ extern void down_read_non_owner(struct rw_semaphore *sem);
extern void up_read_non_owner(struct rw_semaphore *sem);
#else
# define down_read_nested(sem, subclass) down_read(sem)
+# define down_read_killable_nested(sem, subclass) down_read_killable(sem)
# define down_write_nest_lock(sem, nest_lock) down_write(sem)
# define down_write_nested(sem, subclass) down_write(sem)
# define down_write_killable_nested(sem, subclass) down_write_killable(sem)
diff --git a/include/linux/rxrpc.h b/include/linux/rxrpc.h
deleted file mode 100644
index 7343f71783dc..000000000000
--- a/include/linux/rxrpc.h
+++ /dev/null
@@ -1,79 +0,0 @@
-/* AF_RXRPC parameters
- *
- * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
- * Written by David Howells (dhowells@redhat.com)
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- */
-
-#ifndef _LINUX_RXRPC_H
-#define _LINUX_RXRPC_H
-
-#include <linux/in.h>
-#include <linux/in6.h>
-
-/*
- * RxRPC socket address
- */
-struct sockaddr_rxrpc {
- sa_family_t srx_family; /* address family */
- u16 srx_service; /* service desired */
- u16 transport_type; /* type of transport socket (SOCK_DGRAM) */
- u16 transport_len; /* length of transport address */
- union {
- sa_family_t family; /* transport address family */
- struct sockaddr_in sin; /* IPv4 transport address */
- struct sockaddr_in6 sin6; /* IPv6 transport address */
- } transport;
-};
-
-/*
- * RxRPC socket options
- */
-#define RXRPC_SECURITY_KEY 1 /* [clnt] set client security key */
-#define RXRPC_SECURITY_KEYRING 2 /* [srvr] set ring of server security keys */
-#define RXRPC_EXCLUSIVE_CONNECTION 3 /* Deprecated; use RXRPC_EXCLUSIVE_CALL instead */
-#define RXRPC_MIN_SECURITY_LEVEL 4 /* minimum security level */
-#define RXRPC_UPGRADEABLE_SERVICE 5 /* Upgrade service[0] -> service[1] */
-#define RXRPC_SUPPORTED_CMSG 6 /* Get highest supported control message type */
-
-/*
- * RxRPC control messages
- * - If neither abort or accept are specified, the message is a data message.
- * - terminal messages mean that a user call ID tag can be recycled
- * - s/r/- indicate whether these are applicable to sendmsg() and/or recvmsg()
- */
-enum rxrpc_cmsg_type {
- RXRPC_USER_CALL_ID = 1, /* sr: user call ID specifier */
- RXRPC_ABORT = 2, /* sr: abort request / notification [terminal] */
- RXRPC_ACK = 3, /* -r: [Service] RPC op final ACK received [terminal] */
- RXRPC_NET_ERROR = 5, /* -r: network error received [terminal] */
- RXRPC_BUSY = 6, /* -r: server busy received [terminal] */
- RXRPC_LOCAL_ERROR = 7, /* -r: local error generated [terminal] */
- RXRPC_NEW_CALL = 8, /* -r: [Service] new incoming call notification */
- RXRPC_ACCEPT = 9, /* s-: [Service] accept request */
- RXRPC_EXCLUSIVE_CALL = 10, /* s-: Call should be on exclusive connection */
- RXRPC_UPGRADE_SERVICE = 11, /* s-: Request service upgrade for client call */
- RXRPC_TX_LENGTH = 12, /* s-: Total length of Tx data */
- RXRPC__SUPPORTED
-};
-
-/*
- * RxRPC security levels
- */
-#define RXRPC_SECURITY_PLAIN 0 /* plain secure-checksummed packets only */
-#define RXRPC_SECURITY_AUTH 1 /* authenticated packets */
-#define RXRPC_SECURITY_ENCRYPT 2 /* encrypted packets */
-
-/*
- * RxRPC security indices
- */
-#define RXRPC_SECURITY_NONE 0 /* no security protocol */
-#define RXRPC_SECURITY_RXKAD 2 /* kaserver or kerberos 4 */
-#define RXRPC_SECURITY_RXGK 4 /* gssapi-based */
-#define RXRPC_SECURITY_RXK5 5 /* kerberos 5 */
-
-#endif /* _LINUX_RXRPC_H */
diff --git a/include/linux/s3c_adc_battery.h b/include/linux/s3c_adc_battery.h
deleted file mode 100644
index 99dadbffdd4f..000000000000
--- a/include/linux/s3c_adc_battery.h
+++ /dev/null
@@ -1,41 +0,0 @@
-#ifndef _S3C_ADC_BATTERY_H
-#define _S3C_ADC_BATTERY_H
-
-struct s3c_adc_bat_thresh {
- int volt; /* mV */
- int cur; /* mA */
- int level; /* percent */
-};
-
-struct s3c_adc_bat_pdata {
- int (*init)(void);
- void (*exit)(void);
- void (*enable_charger)(void);
- void (*disable_charger)(void);
-
- int gpio_charge_finished;
- int gpio_inverted;
-
- const struct s3c_adc_bat_thresh *lut_noac;
- unsigned int lut_noac_cnt;
- const struct s3c_adc_bat_thresh *lut_acin;
- unsigned int lut_acin_cnt;
-
- const unsigned int volt_channel;
- const unsigned int current_channel;
- const unsigned int backup_volt_channel;
-
- const unsigned int volt_samples;
- const unsigned int current_samples;
- const unsigned int backup_volt_samples;
-
- const unsigned int volt_mult;
- const unsigned int current_mult;
- const unsigned int backup_volt_mult;
- const unsigned int internal_impedance;
-
- const unsigned int backup_volt_max;
- const unsigned int backup_volt_min;
-};
-
-#endif
diff --git a/include/linux/sa11x0-dma.h b/include/linux/sa11x0-dma.h
deleted file mode 100644
index 65839a58b8e5..000000000000
--- a/include/linux/sa11x0-dma.h
+++ /dev/null
@@ -1,24 +0,0 @@
-/*
- * SA11x0 DMA Engine support
- *
- * Copyright (C) 2012 Russell King
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-#ifndef __LINUX_SA11X0_DMA_H
-#define __LINUX_SA11X0_DMA_H
-
-struct dma_chan;
-
-#if defined(CONFIG_DMA_SA11X0) || defined(CONFIG_DMA_SA11X0_MODULE)
-bool sa11x0_dma_filter_fn(struct dma_chan *, void *);
-#else
-static inline bool sa11x0_dma_filter_fn(struct dma_chan *c, void *d)
-{
- return false;
-}
-#endif
-
-#endif
diff --git a/include/linux/sbitmap.h b/include/linux/sbitmap.h
index a1904aadbc45..d662cf136021 100644
--- a/include/linux/sbitmap.h
+++ b/include/linux/sbitmap.h
@@ -1,41 +1,41 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Fast and scalable bitmaps.
*
* Copyright (C) 2016 Facebook
* Copyright (C) 2013-2014 Jens Axboe
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public
- * License v2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <https://www.gnu.org/licenses/>.
*/
#ifndef __LINUX_SCALE_BITMAP_H
#define __LINUX_SCALE_BITMAP_H
-#include <linux/kernel.h>
+#include <linux/atomic.h>
+#include <linux/bitops.h>
+#include <linux/cache.h>
+#include <linux/list.h>
+#include <linux/log2.h>
+#include <linux/minmax.h>
+#include <linux/percpu.h>
#include <linux/slab.h>
+#include <linux/smp.h>
+#include <linux/types.h>
+#include <linux/wait.h>
+
+struct seq_file;
/**
* struct sbitmap_word - Word in a &struct sbitmap.
*/
struct sbitmap_word {
/**
- * @word: The bitmap word itself.
+ * @word: word holding free bits
*/
unsigned long word;
/**
- * @depth: Number of bits being used in @word.
+ * @cleared: word holding cleared bits
*/
- unsigned long depth;
+ unsigned long cleared ____cacheline_aligned_in_smp;
} ____cacheline_aligned_in_smp;
/**
@@ -61,9 +61,22 @@ struct sbitmap {
unsigned int map_nr;
/**
+ * @round_robin: Allocate bits in strict round-robin order.
+ */
+ bool round_robin;
+
+ /**
* @map: Allocated bitmap.
*/
struct sbitmap_word *map;
+
+ /*
+ * @alloc_hint: Cache of last successfully allocated or freed bit.
+ *
+ * This is per-cpu, which allows multiple users to stick to different
+ * cachelines until the map is exhausted.
+ */
+ unsigned int __percpu *alloc_hint;
};
#define SBQ_WAIT_QUEUES 8
@@ -74,11 +87,6 @@ struct sbitmap {
*/
struct sbq_wait_state {
/**
- * @wait_cnt: Number of frees remaining before we wake up.
- */
- atomic_t wait_cnt;
-
- /**
* @wait: Wait queue.
*/
wait_queue_head_t wait;
@@ -99,14 +107,6 @@ struct sbitmap_queue {
*/
struct sbitmap sb;
- /*
- * @alloc_hint: Cache of last successfully allocated or freed bit.
- *
- * This is per-cpu, which allows multiple users to stick to different
- * cachelines until the map is exhausted.
- */
- unsigned int __percpu *alloc_hint;
-
/**
* @wake_batch: Number of bits which must be freed before we wake up any
* waiters.
@@ -123,10 +123,27 @@ struct sbitmap_queue {
*/
struct sbq_wait_state *ws;
+ /*
+ * @ws_active: count of currently active ws waitqueues
+ */
+ atomic_t ws_active;
+
/**
- * @round_robin: Allocate bits in strict round-robin order.
+ * @min_shallow_depth: The minimum shallow depth which may be passed to
+ * sbitmap_queue_get_shallow()
*/
- bool round_robin;
+ unsigned int min_shallow_depth;
+
+ /**
+ * @completion_cnt: Number of bits cleared passed to the
+ * wakeup function.
+ */
+ atomic_t completion_cnt;
+
+ /**
+ * @wakeup_cnt: Number of thread wake ups issued.
+ */
+ atomic_t wakeup_cnt;
};
/**
@@ -137,11 +154,24 @@ struct sbitmap_queue {
* given, a good default is chosen.
* @flags: Allocation flags.
* @node: Memory node to allocate on.
+ * @round_robin: If true, be stricter about allocation order; always allocate
+ * starting from the last allocated bit. This is less efficient
+ * than the default behavior (false).
+ * @alloc_hint: If true, apply percpu hint for where to start searching for
+ * a free bit.
*
* Return: Zero on success or negative errno on failure.
*/
int sbitmap_init_node(struct sbitmap *sb, unsigned int depth, int shift,
- gfp_t flags, int node);
+ gfp_t flags, int node, bool round_robin, bool alloc_hint);
+
+/* sbitmap internal helper */
+static inline unsigned int __map_depth(const struct sbitmap *sb, int index)
+{
+ if (index == sb->map_nr - 1)
+ return sb->depth - (index << sb->shift);
+ return 1U << sb->shift;
+}
/**
* sbitmap_free() - Free memory used by a &struct sbitmap.
@@ -149,7 +179,8 @@ int sbitmap_init_node(struct sbitmap *sb, unsigned int depth, int shift,
*/
static inline void sbitmap_free(struct sbitmap *sb)
{
- kfree(sb->map);
+ free_percpu(sb->alloc_hint);
+ kvfree(sb->map);
sb->map = NULL;
}
@@ -166,20 +197,17 @@ void sbitmap_resize(struct sbitmap *sb, unsigned int depth);
/**
* sbitmap_get() - Try to allocate a free bit from a &struct sbitmap.
* @sb: Bitmap to allocate from.
- * @alloc_hint: Hint for where to start searching for a free bit.
- * @round_robin: If true, be stricter about allocation order; always allocate
- * starting from the last allocated bit. This is less efficient
- * than the default behavior (false).
+ *
+ * This operation provides acquire barrier semantics if it succeeds.
*
* Return: Non-negative allocated bit number if successful, -1 otherwise.
*/
-int sbitmap_get(struct sbitmap *sb, unsigned int alloc_hint, bool round_robin);
+int sbitmap_get(struct sbitmap *sb);
/**
* sbitmap_get_shallow() - Try to allocate a free bit from a &struct sbitmap,
* limiting the depth used from each word.
* @sb: Bitmap to allocate from.
- * @alloc_hint: Hint for where to start searching for a free bit.
* @shallow_depth: The maximum number of bits to allocate from a single word.
*
* This rather specific operation allows for having multiple users with
@@ -191,8 +219,7 @@ int sbitmap_get(struct sbitmap *sb, unsigned int alloc_hint, bool round_robin);
*
* Return: Non-negative allocated bit number if successful, -1 otherwise.
*/
-int sbitmap_get_shallow(struct sbitmap *sb, unsigned int alloc_hint,
- unsigned long shallow_depth);
+int sbitmap_get_shallow(struct sbitmap *sb, unsigned long shallow_depth);
/**
* sbitmap_any_bit_set() - Check for a set bit in a &struct sbitmap.
@@ -202,19 +229,14 @@ int sbitmap_get_shallow(struct sbitmap *sb, unsigned int alloc_hint,
*/
bool sbitmap_any_bit_set(const struct sbitmap *sb);
-/**
- * sbitmap_any_bit_clear() - Check for an unset bit in a &struct
- * sbitmap.
- * @sb: Bitmap to check.
- *
- * Return: true if any bit in the bitmap is clear, false otherwise.
- */
-bool sbitmap_any_bit_clear(const struct sbitmap *sb);
+#define SB_NR_TO_INDEX(sb, bitnr) ((bitnr) >> (sb)->shift)
+#define SB_NR_TO_BIT(sb, bitnr) ((bitnr) & ((1U << (sb)->shift) - 1U))
typedef bool (*sb_for_each_fn)(struct sbitmap *, unsigned int, void *);
/**
- * sbitmap_for_each_set() - Iterate over each set bit in a &struct sbitmap.
+ * __sbitmap_for_each_set() - Iterate over each set bit in a &struct sbitmap.
+ * @start: Where to start the iteration.
* @sb: Bitmap to iterate over.
* @fn: Callback. Should return true to continue or false to break early.
* @data: Pointer to pass to callback.
@@ -222,35 +244,63 @@ typedef bool (*sb_for_each_fn)(struct sbitmap *, unsigned int, void *);
* This is inline even though it's non-trivial so that the function calls to the
* callback will hopefully get optimized away.
*/
-static inline void sbitmap_for_each_set(struct sbitmap *sb, sb_for_each_fn fn,
- void *data)
+static inline void __sbitmap_for_each_set(struct sbitmap *sb,
+ unsigned int start,
+ sb_for_each_fn fn, void *data)
{
- unsigned int i;
-
- for (i = 0; i < sb->map_nr; i++) {
- struct sbitmap_word *word = &sb->map[i];
- unsigned int off, nr;
-
- if (!word->word)
- continue;
-
- nr = 0;
- off = i << sb->shift;
+ unsigned int index;
+ unsigned int nr;
+ unsigned int scanned = 0;
+
+ if (start >= sb->depth)
+ start = 0;
+ index = SB_NR_TO_INDEX(sb, start);
+ nr = SB_NR_TO_BIT(sb, start);
+
+ while (scanned < sb->depth) {
+ unsigned long word;
+ unsigned int depth = min_t(unsigned int,
+ __map_depth(sb, index) - nr,
+ sb->depth - scanned);
+
+ scanned += depth;
+ word = sb->map[index].word & ~sb->map[index].cleared;
+ if (!word)
+ goto next;
+
+ /*
+ * On the first iteration of the outer loop, we need to add the
+ * bit offset back to the size of the word for find_next_bit().
+ * On all other iterations, nr is zero, so this is a noop.
+ */
+ depth += nr;
while (1) {
- nr = find_next_bit(&word->word, word->depth, nr);
- if (nr >= word->depth)
+ nr = find_next_bit(&word, depth, nr);
+ if (nr >= depth)
break;
-
- if (!fn(sb, off + nr, data))
+ if (!fn(sb, (index << sb->shift) + nr, data))
return;
nr++;
}
+next:
+ nr = 0;
+ if (++index >= sb->map_nr)
+ index = 0;
}
}
-#define SB_NR_TO_INDEX(sb, bitnr) ((bitnr) >> (sb)->shift)
-#define SB_NR_TO_BIT(sb, bitnr) ((bitnr) & ((1U << (sb)->shift) - 1U))
+/**
+ * sbitmap_for_each_set() - Iterate over each set bit in a &struct sbitmap.
+ * @sb: Bitmap to iterate over.
+ * @fn: Callback. Should return true to continue or false to break early.
+ * @data: Pointer to pass to callback.
+ */
+static inline void sbitmap_for_each_set(struct sbitmap *sb, sb_for_each_fn fn,
+ void *data)
+{
+ __sbitmap_for_each_set(sb, 0, fn, data);
+}
static inline unsigned long *__sbitmap_word(struct sbitmap *sb,
unsigned int bitnr)
@@ -270,12 +320,53 @@ static inline void sbitmap_clear_bit(struct sbitmap *sb, unsigned int bitnr)
clear_bit(SB_NR_TO_BIT(sb, bitnr), __sbitmap_word(sb, bitnr));
}
+/*
+ * This one is special, since it doesn't actually clear the bit, rather it
+ * sets the corresponding bit in the ->cleared mask instead. Paired with
+ * the caller doing sbitmap_deferred_clear() if a given index is full, which
+ * will clear the previously freed entries in the corresponding ->word.
+ */
+static inline void sbitmap_deferred_clear_bit(struct sbitmap *sb, unsigned int bitnr)
+{
+ unsigned long *addr = &sb->map[SB_NR_TO_INDEX(sb, bitnr)].cleared;
+
+ set_bit(SB_NR_TO_BIT(sb, bitnr), addr);
+}
+
+/*
+ * Pair of sbitmap_get, and this one applies both cleared bit and
+ * allocation hint.
+ */
+static inline void sbitmap_put(struct sbitmap *sb, unsigned int bitnr)
+{
+ sbitmap_deferred_clear_bit(sb, bitnr);
+
+ if (likely(sb->alloc_hint && !sb->round_robin && bitnr < sb->depth))
+ *raw_cpu_ptr(sb->alloc_hint) = bitnr;
+}
+
static inline int sbitmap_test_bit(struct sbitmap *sb, unsigned int bitnr)
{
return test_bit(SB_NR_TO_BIT(sb, bitnr), __sbitmap_word(sb, bitnr));
}
-unsigned int sbitmap_weight(const struct sbitmap *sb);
+static inline int sbitmap_calculate_shift(unsigned int depth)
+{
+ int shift = ilog2(BITS_PER_LONG);
+
+ /*
+ * If the bitmap is small, shrink the number of bits per word so
+ * we spread over a few cachelines, at least. If less than 4
+ * bits, just forget about it, it's not going to work optimally
+ * anyway.
+ */
+ if (depth >= 4) {
+ while ((4U << shift) > depth)
+ shift--;
+ }
+
+ return shift;
+}
/**
* sbitmap_show() - Dump &struct sbitmap information to a &struct seq_file.
@@ -286,6 +377,16 @@ unsigned int sbitmap_weight(const struct sbitmap *sb);
*/
void sbitmap_show(struct sbitmap *sb, struct seq_file *m);
+
+/**
+ * sbitmap_weight() - Return how many set and not cleared bits in a &struct
+ * sbitmap.
+ * @sb: Bitmap to check.
+ *
+ * Return: How many set and not cleared bits set
+ */
+unsigned int sbitmap_weight(const struct sbitmap *sb);
+
/**
* sbitmap_bitmap_show() - Write a hex dump of a &struct sbitmap to a &struct
* seq_file.
@@ -320,11 +421,21 @@ int sbitmap_queue_init_node(struct sbitmap_queue *sbq, unsigned int depth,
static inline void sbitmap_queue_free(struct sbitmap_queue *sbq)
{
kfree(sbq->ws);
- free_percpu(sbq->alloc_hint);
sbitmap_free(&sbq->sb);
}
/**
+ * sbitmap_queue_recalculate_wake_batch() - Recalculate wake batch
+ * @sbq: Bitmap queue to recalculate wake batch.
+ * @users: Number of shares.
+ *
+ * Like sbitmap_queue_update_wake_batch(), this will calculate wake batch
+ * by depth. This interface is for HCTX shared tags or queue shared tags.
+ */
+void sbitmap_queue_recalculate_wake_batch(struct sbitmap_queue *sbq,
+ unsigned int users);
+
+/**
* sbitmap_queue_resize() - Resize a &struct sbitmap_queue.
* @sbq: Bitmap queue to resize.
* @depth: New number of bits to resize to.
@@ -345,17 +456,33 @@ void sbitmap_queue_resize(struct sbitmap_queue *sbq, unsigned int depth);
int __sbitmap_queue_get(struct sbitmap_queue *sbq);
/**
- * __sbitmap_queue_get_shallow() - Try to allocate a free bit from a &struct
+ * __sbitmap_queue_get_batch() - Try to allocate a batch of free bits
+ * @sbq: Bitmap queue to allocate from.
+ * @nr_tags: number of tags requested
+ * @offset: offset to add to returned bits
+ *
+ * Return: Mask of allocated tags, 0 if none are found. Each tag allocated is
+ * a bit in the mask returned, and the caller must add @offset to the value to
+ * get the absolute tag value.
+ */
+unsigned long __sbitmap_queue_get_batch(struct sbitmap_queue *sbq, int nr_tags,
+ unsigned int *offset);
+
+/**
+ * sbitmap_queue_get_shallow() - Try to allocate a free bit from a &struct
* sbitmap_queue, limiting the depth used from each word, with preemption
* already disabled.
* @sbq: Bitmap queue to allocate from.
* @shallow_depth: The maximum number of bits to allocate from a single word.
* See sbitmap_get_shallow().
*
+ * If you call this, make sure to call sbitmap_queue_min_shallow_depth() after
+ * initializing @sbq.
+ *
* Return: Non-negative allocated bit number if successful, -1 otherwise.
*/
-int __sbitmap_queue_get_shallow(struct sbitmap_queue *sbq,
- unsigned int shallow_depth);
+int sbitmap_queue_get_shallow(struct sbitmap_queue *sbq,
+ unsigned int shallow_depth);
/**
* sbitmap_queue_get() - Try to allocate a free bit from a &struct
@@ -378,27 +505,21 @@ static inline int sbitmap_queue_get(struct sbitmap_queue *sbq,
}
/**
- * sbitmap_queue_get_shallow() - Try to allocate a free bit from a &struct
- * sbitmap_queue, limiting the depth used from each word.
- * @sbq: Bitmap queue to allocate from.
- * @cpu: Output parameter; will contain the CPU we ran on (e.g., to be passed to
- * sbitmap_queue_clear()).
- * @shallow_depth: The maximum number of bits to allocate from a single word.
- * See sbitmap_get_shallow().
+ * sbitmap_queue_min_shallow_depth() - Inform a &struct sbitmap_queue of the
+ * minimum shallow depth that will be used.
+ * @sbq: Bitmap queue in question.
+ * @min_shallow_depth: The minimum shallow depth that will be passed to
+ * sbitmap_queue_get_shallow() or __sbitmap_queue_get_shallow().
*
- * Return: Non-negative allocated bit number if successful, -1 otherwise.
+ * sbitmap_queue_clear() batches wakeups as an optimization. The batch size
+ * depends on the depth of the bitmap. Since the shallow allocation functions
+ * effectively operate with a different depth, the shallow depth must be taken
+ * into account when calculating the batch size. This function must be called
+ * with the minimum shallow depth that will be used. Failure to do so can result
+ * in missed wakeups.
*/
-static inline int sbitmap_queue_get_shallow(struct sbitmap_queue *sbq,
- unsigned int *cpu,
- unsigned int shallow_depth)
-{
- int nr;
-
- *cpu = get_cpu();
- nr = __sbitmap_queue_get_shallow(sbq, shallow_depth);
- put_cpu();
- return nr;
-}
+void sbitmap_queue_min_shallow_depth(struct sbitmap_queue *sbq,
+ unsigned int min_shallow_depth);
/**
* sbitmap_queue_clear() - Free an allocated bit and wake up waiters on a
@@ -410,6 +531,17 @@ static inline int sbitmap_queue_get_shallow(struct sbitmap_queue *sbq,
void sbitmap_queue_clear(struct sbitmap_queue *sbq, unsigned int nr,
unsigned int cpu);
+/**
+ * sbitmap_queue_clear_batch() - Free a batch of allocated bits
+ * &struct sbitmap_queue.
+ * @sbq: Bitmap to free from.
+ * @offset: offset for each tag in array
+ * @tags: array of tags
+ * @nr_tags: number of tags in array
+ */
+void sbitmap_queue_clear_batch(struct sbitmap_queue *sbq, int offset,
+ int *tags, int nr_tags);
+
static inline int sbq_index_inc(int index)
{
return (index + 1) & (SBQ_WAIT_QUEUES - 1);
@@ -446,6 +578,14 @@ static inline struct sbq_wait_state *sbq_wait_ptr(struct sbitmap_queue *sbq,
void sbitmap_queue_wake_all(struct sbitmap_queue *sbq);
/**
+ * sbitmap_queue_wake_up() - Wake up some of waiters in one waitqueue
+ * on a &struct sbitmap_queue.
+ * @sbq: Bitmap queue to wake up.
+ * @nr: Number of bits cleared.
+ */
+void sbitmap_queue_wake_up(struct sbitmap_queue *sbq, int nr);
+
+/**
* sbitmap_queue_show() - Dump &struct sbitmap_queue information to a &struct
* seq_file.
* @sbq: Bitmap queue to show.
@@ -455,4 +595,45 @@ void sbitmap_queue_wake_all(struct sbitmap_queue *sbq);
*/
void sbitmap_queue_show(struct sbitmap_queue *sbq, struct seq_file *m);
+struct sbq_wait {
+ struct sbitmap_queue *sbq; /* if set, sbq_wait is accounted */
+ struct wait_queue_entry wait;
+};
+
+#define DEFINE_SBQ_WAIT(name) \
+ struct sbq_wait name = { \
+ .sbq = NULL, \
+ .wait = { \
+ .private = current, \
+ .func = autoremove_wake_function, \
+ .entry = LIST_HEAD_INIT((name).wait.entry), \
+ } \
+ }
+
+/*
+ * Wrapper around prepare_to_wait_exclusive(), which maintains some extra
+ * internal state.
+ */
+void sbitmap_prepare_to_wait(struct sbitmap_queue *sbq,
+ struct sbq_wait_state *ws,
+ struct sbq_wait *sbq_wait, int state);
+
+/*
+ * Must be paired with sbitmap_prepare_to_wait().
+ */
+void sbitmap_finish_wait(struct sbitmap_queue *sbq, struct sbq_wait_state *ws,
+ struct sbq_wait *sbq_wait);
+
+/*
+ * Wrapper around add_wait_queue(), which maintains some extra internal state
+ */
+void sbitmap_add_wait_queue(struct sbitmap_queue *sbq,
+ struct sbq_wait_state *ws,
+ struct sbq_wait *sbq_wait);
+
+/*
+ * Must be paired with sbitmap_add_wait_queue()
+ */
+void sbitmap_del_wait_queue(struct sbq_wait *sbq_wait);
+
#endif /* __LINUX_SCALE_BITMAP_H */
diff --git a/include/linux/scatterlist.h b/include/linux/scatterlist.h
index 4b3286ac60c8..375a5e90d86a 100644
--- a/include/linux/scatterlist.h
+++ b/include/linux/scatterlist.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_SCATTERLIST_H
#define _LINUX_SCATTERLIST_H
@@ -8,9 +9,6 @@
#include <asm/io.h>
struct scatterlist {
-#ifdef CONFIG_DEBUG_SG
- unsigned long sg_magic;
-#endif
unsigned long page_link;
unsigned int offset;
unsigned int length;
@@ -18,6 +16,9 @@ struct scatterlist {
#ifdef CONFIG_NEED_SG_DMA_LENGTH
unsigned int dma_length;
#endif
+#ifdef CONFIG_PCI_P2PDMA
+ unsigned int dma_flags;
+#endif
};
/*
@@ -41,6 +42,12 @@ struct sg_table {
unsigned int orig_nents; /* original size of list */
};
+struct sg_append_table {
+ struct sg_table sgt; /* The scatter list table */
+ struct scatterlist *prv; /* last populated sge in the table */
+ unsigned int total_nents; /* Total entries in the table */
+};
+
/*
* Notes on SG table design.
*
@@ -57,17 +64,35 @@ struct sg_table {
*
*/
-#define SG_MAGIC 0x87654321
+#define SG_CHAIN 0x01UL
+#define SG_END 0x02UL
/*
* We overload the LSB of the page pointer to indicate whether it's
* a valid sg entry, or whether it points to the start of a new scatterlist.
* Those low bits are there for everyone! (thanks mason :-)
*/
-#define sg_is_chain(sg) ((sg)->page_link & 0x01)
-#define sg_is_last(sg) ((sg)->page_link & 0x02)
-#define sg_chain_ptr(sg) \
- ((struct scatterlist *) ((sg)->page_link & ~0x03))
+#define SG_PAGE_LINK_MASK (SG_CHAIN | SG_END)
+
+static inline unsigned int __sg_flags(struct scatterlist *sg)
+{
+ return sg->page_link & SG_PAGE_LINK_MASK;
+}
+
+static inline struct scatterlist *sg_chain_ptr(struct scatterlist *sg)
+{
+ return (struct scatterlist *)(sg->page_link & ~SG_PAGE_LINK_MASK);
+}
+
+static inline bool sg_is_chain(struct scatterlist *sg)
+{
+ return __sg_flags(sg) & SG_CHAIN;
+}
+
+static inline bool sg_is_last(struct scatterlist *sg)
+{
+ return __sg_flags(sg) & SG_END;
+}
/**
* sg_assign_page - Assign a given page to an SG entry
@@ -81,15 +106,14 @@ struct sg_table {
**/
static inline void sg_assign_page(struct scatterlist *sg, struct page *page)
{
- unsigned long page_link = sg->page_link & 0x3;
+ unsigned long page_link = sg->page_link & (SG_CHAIN | SG_END);
/*
* In order for the low bit stealing approach to work, pages
* must be aligned at a 32-bit boundary as a minimum.
*/
- BUG_ON((unsigned long) page & 0x03);
+ BUG_ON((unsigned long)page & SG_PAGE_LINK_MASK);
#ifdef CONFIG_DEBUG_SG
- BUG_ON(sg->sg_magic != SG_MAGIC);
BUG_ON(sg_is_chain(sg));
#endif
sg->page_link = page_link | (unsigned long) page;
@@ -120,10 +144,9 @@ static inline void sg_set_page(struct scatterlist *sg, struct page *page,
static inline struct page *sg_page(struct scatterlist *sg)
{
#ifdef CONFIG_DEBUG_SG
- BUG_ON(sg->sg_magic != SG_MAGIC);
BUG_ON(sg_is_chain(sg));
#endif
- return (struct page *)((sg)->page_link & ~0x3);
+ return (struct page *)((sg)->page_link & ~SG_PAGE_LINK_MASK);
}
/**
@@ -148,6 +171,36 @@ static inline void sg_set_buf(struct scatterlist *sg, const void *buf,
#define for_each_sg(sglist, sg, nr, __i) \
for (__i = 0, sg = (sglist); __i < (nr); __i++, sg = sg_next(sg))
+/*
+ * Loop over each sg element in the given sg_table object.
+ */
+#define for_each_sgtable_sg(sgt, sg, i) \
+ for_each_sg((sgt)->sgl, sg, (sgt)->orig_nents, i)
+
+/*
+ * Loop over each sg element in the given *DMA mapped* sg_table object.
+ * Please use sg_dma_address(sg) and sg_dma_len(sg) to extract DMA addresses
+ * of the each element.
+ */
+#define for_each_sgtable_dma_sg(sgt, sg, i) \
+ for_each_sg((sgt)->sgl, sg, (sgt)->nents, i)
+
+static inline void __sg_chain(struct scatterlist *chain_sg,
+ struct scatterlist *sgl)
+{
+ /*
+ * offset and length are unused for chain entry. Clear them.
+ */
+ chain_sg->offset = 0;
+ chain_sg->length = 0;
+
+ /*
+ * Set lowest bit to indicate a link pointer, and make sure to clear
+ * the termination bit if it happens to be set.
+ */
+ chain_sg->page_link = ((unsigned long) sgl | SG_CHAIN) & ~SG_END;
+}
+
/**
* sg_chain - Chain two sglists together
* @prv: First scatterlist
@@ -161,17 +214,7 @@ static inline void sg_set_buf(struct scatterlist *sg, const void *buf,
static inline void sg_chain(struct scatterlist *prv, unsigned int prv_nents,
struct scatterlist *sgl)
{
- /*
- * offset and length are unused for chain entry. Clear them.
- */
- prv[prv_nents - 1].offset = 0;
- prv[prv_nents - 1].length = 0;
-
- /*
- * Set lowest bit to indicate a link pointer, and make sure to clear
- * the termination bit if it happens to be set.
- */
- prv[prv_nents - 1].page_link = ((unsigned long) sgl | 0x01) & ~0x02;
+ __sg_chain(&prv[prv_nents - 1], sgl);
}
/**
@@ -185,14 +228,11 @@ static inline void sg_chain(struct scatterlist *prv, unsigned int prv_nents,
**/
static inline void sg_mark_end(struct scatterlist *sg)
{
-#ifdef CONFIG_DEBUG_SG
- BUG_ON(sg->sg_magic != SG_MAGIC);
-#endif
/*
* Set termination bit, clear potential chain bit
*/
- sg->page_link |= 0x02;
- sg->page_link &= ~0x01;
+ sg->page_link |= SG_END;
+ sg->page_link &= ~SG_CHAIN;
}
/**
@@ -205,13 +245,76 @@ static inline void sg_mark_end(struct scatterlist *sg)
**/
static inline void sg_unmark_end(struct scatterlist *sg)
{
-#ifdef CONFIG_DEBUG_SG
- BUG_ON(sg->sg_magic != SG_MAGIC);
-#endif
- sg->page_link &= ~0x02;
+ sg->page_link &= ~SG_END;
+}
+
+/*
+ * CONFGI_PCI_P2PDMA depends on CONFIG_64BIT which means there is 4 bytes
+ * in struct scatterlist (assuming also CONFIG_NEED_SG_DMA_LENGTH is set).
+ * Use this padding for DMA flags bits to indicate when a specific
+ * dma address is a bus address.
+ */
+#ifdef CONFIG_PCI_P2PDMA
+
+#define SG_DMA_BUS_ADDRESS (1 << 0)
+
+/**
+ * sg_dma_is_bus address - Return whether a given segment was marked
+ * as a bus address
+ * @sg: SG entry
+ *
+ * Description:
+ * Returns true if sg_dma_mark_bus_address() has been called on
+ * this segment.
+ **/
+static inline bool sg_is_dma_bus_address(struct scatterlist *sg)
+{
+ return sg->dma_flags & SG_DMA_BUS_ADDRESS;
+}
+
+/**
+ * sg_dma_mark_bus address - Mark the scatterlist entry as a bus address
+ * @sg: SG entry
+ *
+ * Description:
+ * Marks the passed in sg entry to indicate that the dma_address is
+ * a bus address and doesn't need to be unmapped. This should only be
+ * used by dma_map_sg() implementations to mark bus addresses
+ * so they can be properly cleaned up in dma_unmap_sg().
+ **/
+static inline void sg_dma_mark_bus_address(struct scatterlist *sg)
+{
+ sg->dma_flags |= SG_DMA_BUS_ADDRESS;
}
/**
+ * sg_unmark_bus_address - Unmark the scatterlist entry as a bus address
+ * @sg: SG entry
+ *
+ * Description:
+ * Clears the bus address mark.
+ **/
+static inline void sg_dma_unmark_bus_address(struct scatterlist *sg)
+{
+ sg->dma_flags &= ~SG_DMA_BUS_ADDRESS;
+}
+
+#else
+
+static inline bool sg_is_dma_bus_address(struct scatterlist *sg)
+{
+ return false;
+}
+static inline void sg_dma_mark_bus_address(struct scatterlist *sg)
+{
+}
+static inline void sg_dma_unmark_bus_address(struct scatterlist *sg)
+{
+}
+
+#endif
+
+/**
* sg_phys - Return physical address of an sg entry
* @sg: SG entry
*
@@ -241,6 +344,18 @@ static inline void *sg_virt(struct scatterlist *sg)
return page_address(sg_page(sg)) + sg->offset;
}
+/**
+ * sg_init_marker - Initialize markers in sg table
+ * @sgl: The SG table
+ * @nents: Number of entries in table
+ *
+ **/
+static inline void sg_init_marker(struct scatterlist *sgl,
+ unsigned int nents)
+{
+ sg_mark_end(&sgl[nents - 1]);
+}
+
int sg_nents(struct scatterlist *sg);
int sg_nents_for_len(struct scatterlist *sg, u64 len);
struct scatterlist *sg_next(struct scatterlist *);
@@ -256,15 +371,63 @@ int sg_split(struct scatterlist *in, const int in_mapped_nents,
typedef struct scatterlist *(sg_alloc_fn)(unsigned int, gfp_t);
typedef void (sg_free_fn)(struct scatterlist *, unsigned int);
-void __sg_free_table(struct sg_table *, unsigned int, bool, sg_free_fn *);
+void __sg_free_table(struct sg_table *, unsigned int, unsigned int,
+ sg_free_fn *, unsigned int);
void sg_free_table(struct sg_table *);
+void sg_free_append_table(struct sg_append_table *sgt);
int __sg_alloc_table(struct sg_table *, unsigned int, unsigned int,
- struct scatterlist *, gfp_t, sg_alloc_fn *);
+ struct scatterlist *, unsigned int, gfp_t, sg_alloc_fn *);
int sg_alloc_table(struct sg_table *, unsigned int, gfp_t);
-int sg_alloc_table_from_pages(struct sg_table *sgt,
- struct page **pages, unsigned int n_pages,
- unsigned long offset, unsigned long size,
- gfp_t gfp_mask);
+int sg_alloc_append_table_from_pages(struct sg_append_table *sgt,
+ struct page **pages, unsigned int n_pages,
+ unsigned int offset, unsigned long size,
+ unsigned int max_segment,
+ unsigned int left_pages, gfp_t gfp_mask);
+int sg_alloc_table_from_pages_segment(struct sg_table *sgt, struct page **pages,
+ unsigned int n_pages, unsigned int offset,
+ unsigned long size,
+ unsigned int max_segment, gfp_t gfp_mask);
+
+/**
+ * sg_alloc_table_from_pages - Allocate and initialize an sg table from
+ * an array of pages
+ * @sgt: The sg table header to use
+ * @pages: Pointer to an array of page pointers
+ * @n_pages: Number of pages in the pages array
+ * @offset: Offset from start of the first page to the start of a buffer
+ * @size: Number of valid bytes in the buffer (after offset)
+ * @gfp_mask: GFP allocation mask
+ *
+ * Description:
+ * Allocate and initialize an sg table from a list of pages. Contiguous
+ * ranges of the pages are squashed into a single scatterlist node. A user
+ * may provide an offset at a start and a size of valid data in a buffer
+ * specified by the page array. The returned sg table is released by
+ * sg_free_table.
+ *
+ * Returns:
+ * 0 on success, negative error on failure
+ */
+static inline int sg_alloc_table_from_pages(struct sg_table *sgt,
+ struct page **pages,
+ unsigned int n_pages,
+ unsigned int offset,
+ unsigned long size, gfp_t gfp_mask)
+{
+ return sg_alloc_table_from_pages_segment(sgt, pages, n_pages, offset,
+ size, UINT_MAX, gfp_mask);
+}
+
+#ifdef CONFIG_SGL_ALLOC
+struct scatterlist *sgl_alloc_order(unsigned long long length,
+ unsigned int order, bool chainable,
+ gfp_t gfp, unsigned int *nent_p);
+struct scatterlist *sgl_alloc(unsigned long long length, gfp_t gfp,
+ unsigned int *nent_p);
+void sgl_free_n_order(struct scatterlist *sgl, int nents, int order);
+void sgl_free_order(struct scatterlist *sgl, int order);
+void sgl_free(struct scatterlist *sgl);
+#endif /* CONFIG_SGL_ALLOC */
size_t sg_copy_buffer(struct scatterlist *sgl, unsigned int nents, void *buf,
size_t buflen, off_t skip, bool to_buffer);
@@ -300,27 +463,29 @@ size_t sg_zero_buffer(struct scatterlist *sgl, unsigned int nents,
* Like SG_CHUNK_SIZE, but for archs that have sg chaining. This limit
* is totally arbitrary, a setting of 2048 will get you at least 8mb ios.
*/
-#ifdef CONFIG_ARCH_HAS_SG_CHAIN
-#define SG_MAX_SEGMENTS 2048
-#else
+#ifdef CONFIG_ARCH_NO_SG_CHAIN
#define SG_MAX_SEGMENTS SG_CHUNK_SIZE
+#else
+#define SG_MAX_SEGMENTS 2048
#endif
#ifdef CONFIG_SG_POOL
-void sg_free_table_chained(struct sg_table *table, bool first_chunk);
+void sg_free_table_chained(struct sg_table *table,
+ unsigned nents_first_chunk);
int sg_alloc_table_chained(struct sg_table *table, int nents,
- struct scatterlist *first_chunk);
+ struct scatterlist *first_chunk,
+ unsigned nents_first_chunk);
#endif
/*
* sg page iterator
*
- * Iterates over sg entries page-by-page. On each successful iteration,
- * you can call sg_page_iter_page(@piter) and sg_page_iter_dma_address(@piter)
- * to get the current page and its dma address. @piter->sg will point to the
- * sg holding this page and @piter->sg_pgoffset to the page's page offset
- * within the sg. The iteration will stop either when a maximum number of sg
- * entries was reached or a terminating sg (sg_last(sg) == true) was reached.
+ * Iterates over sg entries page-by-page. On each successful iteration, you
+ * can call sg_page_iter_page(@piter) to get the current page.
+ * @piter->sg will point to the sg holding this page and @piter->sg_pgoffset to
+ * the page's page offset within the sg. The iteration will stop either when a
+ * maximum number of sg entries was reached or a terminating sg
+ * (sg_last(sg) == true) was reached.
*/
struct sg_page_iter {
struct scatterlist *sg; /* sg holding the page */
@@ -332,7 +497,19 @@ struct sg_page_iter {
* next step */
};
+/*
+ * sg page iterator for DMA addresses
+ *
+ * This is the same as sg_page_iter however you can call
+ * sg_page_iter_dma_address(@dma_iter) to get the page's DMA
+ * address. sg_page_iter_page() cannot be called on this iterator.
+ */
+struct sg_dma_page_iter {
+ struct sg_page_iter base;
+};
+
bool __sg_page_iter_next(struct sg_page_iter *piter);
+bool __sg_page_iter_dma_next(struct sg_dma_page_iter *dma_iter);
void __sg_page_iter_start(struct sg_page_iter *piter,
struct scatterlist *sglist, unsigned int nents,
unsigned long pgoffset);
@@ -348,11 +525,13 @@ static inline struct page *sg_page_iter_page(struct sg_page_iter *piter)
/**
* sg_page_iter_dma_address - get the dma address of the current page held by
* the page iterator.
- * @piter: page iterator holding the page
+ * @dma_iter: page iterator holding the page
*/
-static inline dma_addr_t sg_page_iter_dma_address(struct sg_page_iter *piter)
+static inline dma_addr_t
+sg_page_iter_dma_address(struct sg_dma_page_iter *dma_iter)
{
- return sg_dma_address(piter->sg) + (piter->sg_pgoffset << PAGE_SHIFT);
+ return sg_dma_address(dma_iter->base.sg) +
+ (dma_iter->base.sg_pgoffset << PAGE_SHIFT);
}
/**
@@ -360,19 +539,66 @@ static inline dma_addr_t sg_page_iter_dma_address(struct sg_page_iter *piter)
* @sglist: sglist to iterate over
* @piter: page iterator to hold current page, sg, sg_pgoffset
* @nents: maximum number of sg entries to iterate over
- * @pgoffset: starting page offset
+ * @pgoffset: starting page offset (in pages)
+ *
+ * Callers may use sg_page_iter_page() to get each page pointer.
+ * In each loop it operates on PAGE_SIZE unit.
*/
#define for_each_sg_page(sglist, piter, nents, pgoffset) \
for (__sg_page_iter_start((piter), (sglist), (nents), (pgoffset)); \
__sg_page_iter_next(piter);)
+/**
+ * for_each_sg_dma_page - iterate over the pages of the given sg list
+ * @sglist: sglist to iterate over
+ * @dma_iter: DMA page iterator to hold current page
+ * @dma_nents: maximum number of sg entries to iterate over, this is the value
+ * returned from dma_map_sg
+ * @pgoffset: starting page offset (in pages)
+ *
+ * Callers may use sg_page_iter_dma_address() to get each page's DMA address.
+ * In each loop it operates on PAGE_SIZE unit.
+ */
+#define for_each_sg_dma_page(sglist, dma_iter, dma_nents, pgoffset) \
+ for (__sg_page_iter_start(&(dma_iter)->base, sglist, dma_nents, \
+ pgoffset); \
+ __sg_page_iter_dma_next(dma_iter);)
+
+/**
+ * for_each_sgtable_page - iterate over all pages in the sg_table object
+ * @sgt: sg_table object to iterate over
+ * @piter: page iterator to hold current page
+ * @pgoffset: starting page offset (in pages)
+ *
+ * Iterates over the all memory pages in the buffer described by
+ * a scatterlist stored in the given sg_table object.
+ * See also for_each_sg_page(). In each loop it operates on PAGE_SIZE unit.
+ */
+#define for_each_sgtable_page(sgt, piter, pgoffset) \
+ for_each_sg_page((sgt)->sgl, piter, (sgt)->orig_nents, pgoffset)
+
+/**
+ * for_each_sgtable_dma_page - iterate over the DMA mapped sg_table object
+ * @sgt: sg_table object to iterate over
+ * @dma_iter: DMA page iterator to hold current page
+ * @pgoffset: starting page offset (in pages)
+ *
+ * Iterates over the all DMA mapped pages in the buffer described by
+ * a scatterlist stored in the given sg_table object.
+ * See also for_each_sg_dma_page(). In each loop it operates on PAGE_SIZE
+ * unit.
+ */
+#define for_each_sgtable_dma_page(sgt, dma_iter, pgoffset) \
+ for_each_sg_dma_page((sgt)->sgl, dma_iter, (sgt)->nents, pgoffset)
+
+
/*
* Mapping sg iterator
*
* Iterates over sg entries mapping page-by-page. On each successful
* iteration, @miter->page points to the mapped page and
* @miter->length bytes of data can be accessed at @miter->addr. As
- * long as an interation is enclosed between start and stop, the user
+ * long as an iteration is enclosed between start and stop, the user
* is free to choose control structure and when to stop.
*
* @miter->consumed is set to @miter->length on each iteration. It
diff --git a/include/linux/scc.h b/include/linux/scc.h
index c5a004962679..745eabd17c10 100644
--- a/include/linux/scc.h
+++ b/include/linux/scc.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/* $Id: scc.h,v 1.29 1997/04/02 14:56:45 jreuter Exp jreuter $ */
#ifndef _SCC_H
#define _SCC_H
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 2ba9ec93423f..eed5d65b8d1f 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_SCHED_H
#define _LINUX_SCHED_H
@@ -13,29 +14,44 @@
#include <linux/pid.h>
#include <linux/sem.h>
#include <linux/shm.h>
-#include <linux/kcov.h>
+#include <linux/kmsan_types.h>
#include <linux/mutex.h>
#include <linux/plist.h>
#include <linux/hrtimer.h>
+#include <linux/irqflags.h>
#include <linux/seccomp.h>
#include <linux/nodemask.h>
#include <linux/rcupdate.h>
+#include <linux/refcount.h>
#include <linux/resource.h>
#include <linux/latencytop.h>
#include <linux/sched/prio.h>
+#include <linux/sched/types.h>
#include <linux/signal_types.h>
+#include <linux/syscall_user_dispatch.h>
#include <linux/mm_types_task.h>
#include <linux/task_io_accounting.h>
+#include <linux/posix-timers.h>
+#include <linux/rseq.h>
+#include <linux/seqlock.h>
+#include <linux/kcsan.h>
+#include <linux/rv.h>
+#include <linux/livepatch_sched.h>
+#include <asm/kmap_size.h>
/* task_struct member predeclarations (sorted alphabetically): */
struct audit_context;
struct backing_dev_info;
struct bio_list;
struct blk_plug;
+struct bpf_local_storage;
+struct bpf_run_ctx;
+struct capture_control;
struct cfs_rq;
struct fs_struct;
struct futex_pi_state;
struct io_context;
+struct io_uring_task;
struct mempolicy;
struct nameidata;
struct nsproxy;
@@ -45,6 +61,8 @@ struct pipe_inode_info;
struct rcu_node;
struct reclaim_state;
struct robust_list_head;
+struct root_domain;
+struct rq;
struct sched_attr;
struct sched_param;
struct seq_file;
@@ -52,6 +70,7 @@ struct sighand_struct;
struct signal_struct;
struct task_delay_info;
struct task_group;
+struct user_event_mm;
/*
* Task state bitmask. NOTE! These bits are also
@@ -65,66 +84,95 @@ struct task_group;
*/
/* Used in tsk->state: */
-#define TASK_RUNNING 0
-#define TASK_INTERRUPTIBLE 1
-#define TASK_UNINTERRUPTIBLE 2
-#define __TASK_STOPPED 4
-#define __TASK_TRACED 8
+#define TASK_RUNNING 0x00000000
+#define TASK_INTERRUPTIBLE 0x00000001
+#define TASK_UNINTERRUPTIBLE 0x00000002
+#define __TASK_STOPPED 0x00000004
+#define __TASK_TRACED 0x00000008
/* Used in tsk->exit_state: */
-#define EXIT_DEAD 16
-#define EXIT_ZOMBIE 32
+#define EXIT_DEAD 0x00000010
+#define EXIT_ZOMBIE 0x00000020
#define EXIT_TRACE (EXIT_ZOMBIE | EXIT_DEAD)
/* Used in tsk->state again: */
-#define TASK_DEAD 64
-#define TASK_WAKEKILL 128
-#define TASK_WAKING 256
-#define TASK_PARKED 512
-#define TASK_NOLOAD 1024
-#define TASK_NEW 2048
-#define TASK_STATE_MAX 4096
+#define TASK_PARKED 0x00000040
+#define TASK_DEAD 0x00000080
+#define TASK_WAKEKILL 0x00000100
+#define TASK_WAKING 0x00000200
+#define TASK_NOLOAD 0x00000400
+#define TASK_NEW 0x00000800
+#define TASK_RTLOCK_WAIT 0x00001000
+#define TASK_FREEZABLE 0x00002000
+#define __TASK_FREEZABLE_UNSAFE (0x00004000 * IS_ENABLED(CONFIG_LOCKDEP))
+#define TASK_FROZEN 0x00008000
+#define TASK_STATE_MAX 0x00010000
+
+#define TASK_ANY (TASK_STATE_MAX-1)
-#define TASK_STATE_TO_CHAR_STR "RSDTtXZxKWPNn"
+/*
+ * DO NOT ADD ANY NEW USERS !
+ */
+#define TASK_FREEZABLE_UNSAFE (TASK_FREEZABLE | __TASK_FREEZABLE_UNSAFE)
/* Convenience macros for the sake of set_current_state: */
#define TASK_KILLABLE (TASK_WAKEKILL | TASK_UNINTERRUPTIBLE)
#define TASK_STOPPED (TASK_WAKEKILL | __TASK_STOPPED)
-#define TASK_TRACED (TASK_WAKEKILL | __TASK_TRACED)
+#define TASK_TRACED __TASK_TRACED
#define TASK_IDLE (TASK_UNINTERRUPTIBLE | TASK_NOLOAD)
/* Convenience macros for the sake of wake_up(): */
#define TASK_NORMAL (TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE)
-#define TASK_ALL (TASK_NORMAL | __TASK_STOPPED | __TASK_TRACED)
/* get_task_state(): */
#define TASK_REPORT (TASK_RUNNING | TASK_INTERRUPTIBLE | \
TASK_UNINTERRUPTIBLE | __TASK_STOPPED | \
- __TASK_TRACED | EXIT_ZOMBIE | EXIT_DEAD)
+ __TASK_TRACED | EXIT_DEAD | EXIT_ZOMBIE | \
+ TASK_PARKED)
-#define task_is_traced(task) ((task->state & __TASK_TRACED) != 0)
+#define task_is_running(task) (READ_ONCE((task)->__state) == TASK_RUNNING)
-#define task_is_stopped(task) ((task->state & __TASK_STOPPED) != 0)
+#define task_is_traced(task) ((READ_ONCE(task->jobctl) & JOBCTL_TRACED) != 0)
+#define task_is_stopped(task) ((READ_ONCE(task->jobctl) & JOBCTL_STOPPED) != 0)
+#define task_is_stopped_or_traced(task) ((READ_ONCE(task->jobctl) & (JOBCTL_STOPPED | JOBCTL_TRACED)) != 0)
-#define task_is_stopped_or_traced(task) ((task->state & (__TASK_STOPPED | __TASK_TRACED)) != 0)
-
-#define task_contributes_to_load(task) ((task->state & TASK_UNINTERRUPTIBLE) != 0 && \
- (task->flags & PF_FROZEN) == 0 && \
- (task->state & TASK_NOLOAD) == 0)
+/*
+ * Special states are those that do not use the normal wait-loop pattern. See
+ * the comment with set_special_state().
+ */
+#define is_special_task_state(state) \
+ ((state) & (__TASK_STOPPED | __TASK_TRACED | TASK_PARKED | TASK_DEAD))
#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
+# define debug_normal_state_change(state_value) \
+ do { \
+ WARN_ON_ONCE(is_special_task_state(state_value)); \
+ current->task_state_change = _THIS_IP_; \
+ } while (0)
+
+# define debug_special_state_change(state_value) \
+ do { \
+ WARN_ON_ONCE(!is_special_task_state(state_value)); \
+ current->task_state_change = _THIS_IP_; \
+ } while (0)
-#define __set_current_state(state_value) \
- do { \
- current->task_state_change = _THIS_IP_; \
- current->state = (state_value); \
+# define debug_rtlock_wait_set_state() \
+ do { \
+ current->saved_state_change = current->task_state_change;\
+ current->task_state_change = _THIS_IP_; \
} while (0)
-#define set_current_state(state_value) \
- do { \
- current->task_state_change = _THIS_IP_; \
- smp_store_mb(current->state, (state_value)); \
+
+# define debug_rtlock_wait_restore_state() \
+ do { \
+ current->task_state_change = current->saved_state_change;\
} while (0)
#else
+# define debug_normal_state_change(cond) do { } while (0)
+# define debug_special_state_change(cond) do { } while (0)
+# define debug_rtlock_wait_set_state() do { } while (0)
+# define debug_rtlock_wait_restore_state() do { } while (0)
+#endif
+
/*
* set_current_state() includes a barrier so that the write of current->state
* is correctly serialised wrt the caller's subsequent test of whether to
@@ -132,41 +180,118 @@ struct task_group;
*
* for (;;) {
* set_current_state(TASK_UNINTERRUPTIBLE);
- * if (!need_sleep)
- * break;
+ * if (CONDITION)
+ * break;
*
* schedule();
* }
* __set_current_state(TASK_RUNNING);
*
* If the caller does not need such serialisation (because, for instance, the
- * condition test and condition change and wakeup are under the same lock) then
+ * CONDITION test and condition change and wakeup are under the same lock) then
* use __set_current_state().
*
* The above is typically ordered against the wakeup, which does:
*
- * need_sleep = false;
- * wake_up_state(p, TASK_UNINTERRUPTIBLE);
+ * CONDITION = 1;
+ * wake_up_state(p, TASK_UNINTERRUPTIBLE);
*
- * Where wake_up_state() (and all other wakeup primitives) imply enough
- * barriers to order the store of the variable against wakeup.
+ * where wake_up_state()/try_to_wake_up() executes a full memory barrier before
+ * accessing p->state.
*
* Wakeup will do: if (@state & p->state) p->state = TASK_RUNNING, that is,
* once it observes the TASK_UNINTERRUPTIBLE store the waking CPU can issue a
* TASK_RUNNING store which can collide with __set_current_state(TASK_RUNNING).
*
- * This is obviously fine, since they both store the exact same value.
+ * However, with slightly different timing the wakeup TASK_RUNNING store can
+ * also collide with the TASK_UNINTERRUPTIBLE store. Losing that store is not
+ * a problem either because that will result in one extra go around the loop
+ * and our @cond test will save the day.
*
* Also see the comments of try_to_wake_up().
*/
-#define __set_current_state(state_value) do { current->state = (state_value); } while (0)
-#define set_current_state(state_value) smp_store_mb(current->state, (state_value))
-#endif
+#define __set_current_state(state_value) \
+ do { \
+ debug_normal_state_change((state_value)); \
+ WRITE_ONCE(current->__state, (state_value)); \
+ } while (0)
+
+#define set_current_state(state_value) \
+ do { \
+ debug_normal_state_change((state_value)); \
+ smp_store_mb(current->__state, (state_value)); \
+ } while (0)
-/* Task command name length: */
-#define TASK_COMM_LEN 16
+/*
+ * set_special_state() should be used for those states when the blocking task
+ * can not use the regular condition based wait-loop. In that case we must
+ * serialize against wakeups such that any possible in-flight TASK_RUNNING
+ * stores will not collide with our state change.
+ */
+#define set_special_state(state_value) \
+ do { \
+ unsigned long flags; /* may shadow */ \
+ \
+ raw_spin_lock_irqsave(&current->pi_lock, flags); \
+ debug_special_state_change((state_value)); \
+ WRITE_ONCE(current->__state, (state_value)); \
+ raw_spin_unlock_irqrestore(&current->pi_lock, flags); \
+ } while (0)
+
+/*
+ * PREEMPT_RT specific variants for "sleeping" spin/rwlocks
+ *
+ * RT's spin/rwlock substitutions are state preserving. The state of the
+ * task when blocking on the lock is saved in task_struct::saved_state and
+ * restored after the lock has been acquired. These operations are
+ * serialized by task_struct::pi_lock against try_to_wake_up(). Any non RT
+ * lock related wakeups while the task is blocked on the lock are
+ * redirected to operate on task_struct::saved_state to ensure that these
+ * are not dropped. On restore task_struct::saved_state is set to
+ * TASK_RUNNING so any wakeup attempt redirected to saved_state will fail.
+ *
+ * The lock operation looks like this:
+ *
+ * current_save_and_set_rtlock_wait_state();
+ * for (;;) {
+ * if (try_lock())
+ * break;
+ * raw_spin_unlock_irq(&lock->wait_lock);
+ * schedule_rtlock();
+ * raw_spin_lock_irq(&lock->wait_lock);
+ * set_current_state(TASK_RTLOCK_WAIT);
+ * }
+ * current_restore_rtlock_saved_state();
+ */
+#define current_save_and_set_rtlock_wait_state() \
+ do { \
+ lockdep_assert_irqs_disabled(); \
+ raw_spin_lock(&current->pi_lock); \
+ current->saved_state = current->__state; \
+ debug_rtlock_wait_set_state(); \
+ WRITE_ONCE(current->__state, TASK_RTLOCK_WAIT); \
+ raw_spin_unlock(&current->pi_lock); \
+ } while (0);
+
+#define current_restore_rtlock_saved_state() \
+ do { \
+ lockdep_assert_irqs_disabled(); \
+ raw_spin_lock(&current->pi_lock); \
+ debug_rtlock_wait_restore_state(); \
+ WRITE_ONCE(current->__state, current->saved_state); \
+ current->saved_state = TASK_RUNNING; \
+ raw_spin_unlock(&current->pi_lock); \
+ } while (0);
+
+#define get_current_state() READ_ONCE(current->__state)
-extern cpumask_var_t cpu_isolated_map;
+/*
+ * Define the task command name length as enum, then it can be visible to
+ * BPF programs.
+ */
+enum {
+ TASK_COMM_LEN = 16,
+};
extern void scheduler_tick(void);
@@ -179,6 +304,10 @@ extern long schedule_timeout_uninterruptible(long timeout);
extern long schedule_timeout_idle(long timeout);
asmlinkage void schedule(void);
extern void schedule_preempt_disabled(void);
+asmlinkage void preempt_schedule_irq(void);
+#ifdef CONFIG_PREEMPT_RT
+ extern void schedule_rtlock(void);
+#endif
extern int __must_check io_schedule_prepare(void);
extern void io_schedule_finish(int token);
@@ -202,45 +331,46 @@ struct prev_cputime {
#endif
};
-/**
- * struct task_cputime - collected CPU time counts
- * @utime: time spent in user mode, in nanoseconds
- * @stime: time spent in kernel mode, in nanoseconds
- * @sum_exec_runtime: total time spent on the CPU, in nanoseconds
- *
- * This structure groups together three kinds of CPU time that are tracked for
- * threads and thread groups. Most things considering CPU time want to group
- * these counts together and treat all three of them in parallel.
- */
-struct task_cputime {
- u64 utime;
- u64 stime;
- unsigned long long sum_exec_runtime;
-};
-
-/* Alternate field names when used on cache expirations: */
-#define virt_exp utime
-#define prof_exp stime
-#define sched_exp sum_exec_runtime
-
enum vtime_state {
/* Task is sleeping or running in a CPU with VTIME inactive: */
VTIME_INACTIVE = 0,
- /* Task runs in userspace in a CPU with VTIME active: */
- VTIME_USER,
+ /* Task is idle */
+ VTIME_IDLE,
/* Task runs in kernelspace in a CPU with VTIME active: */
VTIME_SYS,
+ /* Task runs in userspace in a CPU with VTIME active: */
+ VTIME_USER,
+ /* Task runs as guests in a CPU with VTIME active: */
+ VTIME_GUEST,
};
struct vtime {
seqcount_t seqcount;
unsigned long long starttime;
enum vtime_state state;
+ unsigned int cpu;
u64 utime;
u64 stime;
u64 gtime;
};
+/*
+ * Utilization clamp constraints.
+ * @UCLAMP_MIN: Minimum utilization
+ * @UCLAMP_MAX: Maximum utilization
+ * @UCLAMP_CNT: Utilization clamp constraints count
+ */
+enum uclamp_id {
+ UCLAMP_MIN = 0,
+ UCLAMP_MAX,
+ UCLAMP_CNT
+};
+
+#ifdef CONFIG_SMP
+extern struct root_domain def_root_domain;
+extern struct mutex sched_domains_mutex;
+#endif
+
struct sched_info {
#ifdef CONFIG_SCHED_INFO
/* Cumulative counters: */
@@ -272,48 +402,81 @@ struct sched_info {
# define SCHED_FIXEDPOINT_SHIFT 10
# define SCHED_FIXEDPOINT_SCALE (1L << SCHED_FIXEDPOINT_SHIFT)
+/* Increase resolution of cpu_capacity calculations */
+# define SCHED_CAPACITY_SHIFT SCHED_FIXEDPOINT_SHIFT
+# define SCHED_CAPACITY_SCALE (1L << SCHED_CAPACITY_SHIFT)
+
struct load_weight {
unsigned long weight;
u32 inv_weight;
};
+/**
+ * struct util_est - Estimation utilization of FAIR tasks
+ * @enqueued: instantaneous estimated utilization of a task/cpu
+ * @ewma: the Exponential Weighted Moving Average (EWMA)
+ * utilization of a task
+ *
+ * Support data structure to track an Exponential Weighted Moving Average
+ * (EWMA) of a FAIR task's utilization. New samples are added to the moving
+ * average each time a task completes an activation. Sample's weight is chosen
+ * so that the EWMA will be relatively insensitive to transient changes to the
+ * task's workload.
+ *
+ * The enqueued attribute has a slightly different meaning for tasks and cpus:
+ * - task: the task's util_avg at last task dequeue time
+ * - cfs_rq: the sum of util_est.enqueued for each RUNNABLE task on that CPU
+ * Thus, the util_est.enqueued of a task represents the contribution on the
+ * estimated utilization of the CPU where that task is currently enqueued.
+ *
+ * Only for tasks we track a moving average of the past instantaneous
+ * estimated utilization. This allows to absorb sporadic drops in utilization
+ * of an otherwise almost periodic task.
+ *
+ * The UTIL_AVG_UNCHANGED flag is used to synchronize util_est with util_avg
+ * updates. When a task is dequeued, its util_est should not be updated if its
+ * util_avg has not been updated in the meantime.
+ * This information is mapped into the MSB bit of util_est.enqueued at dequeue
+ * time. Since max value of util_est.enqueued for a task is 1024 (PELT util_avg
+ * for a task) it is safe to use MSB.
+ */
+struct util_est {
+ unsigned int enqueued;
+ unsigned int ewma;
+#define UTIL_EST_WEIGHT_SHIFT 2
+#define UTIL_AVG_UNCHANGED 0x80000000
+} __attribute__((__aligned__(sizeof(u64))));
+
/*
- * The load_avg/util_avg accumulates an infinite geometric series
- * (see __update_load_avg() in kernel/sched/fair.c).
+ * The load/runnable/util_avg accumulates an infinite geometric series
+ * (see __update_load_avg_cfs_rq() in kernel/sched/pelt.c).
*
* [load_avg definition]
*
* load_avg = runnable% * scale_load_down(load)
*
- * where runnable% is the time ratio that a sched_entity is runnable.
- * For cfs_rq, it is the aggregated load_avg of all runnable and
- * blocked sched_entities.
+ * [runnable_avg definition]
*
- * load_avg may also take frequency scaling into account:
- *
- * load_avg = runnable% * scale_load_down(load) * freq%
- *
- * where freq% is the CPU frequency normalized to the highest frequency.
+ * runnable_avg = runnable% * SCHED_CAPACITY_SCALE
*
* [util_avg definition]
*
* util_avg = running% * SCHED_CAPACITY_SCALE
*
- * where running% is the time ratio that a sched_entity is running on
- * a CPU. For cfs_rq, it is the aggregated util_avg of all runnable
- * and blocked sched_entities.
+ * where runnable% is the time ratio that a sched_entity is runnable and
+ * running% the time ratio that a sched_entity is running.
*
- * util_avg may also factor frequency scaling and CPU capacity scaling:
+ * For cfs_rq, they are the aggregated values of all runnable and blocked
+ * sched_entities.
*
- * util_avg = running% * SCHED_CAPACITY_SCALE * freq% * capacity%
+ * The load/runnable/util_avg doesn't directly factor frequency scaling and CPU
+ * capacity scaling. The scaling is done through the rq_clock_pelt that is used
+ * for computing those signals (see update_rq_clock_pelt())
*
- * where freq% is the same as above, and capacity% is the CPU capacity
- * normalized to the greatest capacity (due to uarch differences, etc).
- *
- * N.B., the above ratios (runnable%, running%, freq%, and capacity%)
- * themselves are in the range of [0, 1]. To do fixed point arithmetics,
- * we therefore scale them to as large a range as necessary. This is for
- * example reflected by util_avg's SCHED_CAPACITY_SCALE.
+ * N.B., the above ratios (runnable% and running%) themselves are in the
+ * range of [0, 1]. To do fixed point arithmetics, we therefore scale them
+ * to as large a range as necessary. This is for example reflected by
+ * util_avg's SCHED_CAPACITY_SCALE.
*
* [Overflow issue]
*
@@ -332,11 +495,14 @@ struct load_weight {
struct sched_avg {
u64 last_update_time;
u64 load_sum;
+ u64 runnable_sum;
u32 util_sum;
u32 period_contrib;
unsigned long load_avg;
+ unsigned long runnable_avg;
unsigned long util_avg;
-};
+ struct util_est util_est;
+} ____cacheline_aligned;
struct sched_statistics {
#ifdef CONFIG_SCHEDSTATS
@@ -353,6 +519,8 @@ struct sched_statistics {
u64 block_start;
u64 block_max;
+ s64 sum_block_runtime;
+
u64 exec_max;
u64 slice_max;
@@ -371,8 +539,12 @@ struct sched_statistics {
u64 nr_wakeups_affine_attempts;
u64 nr_wakeups_passive;
u64 nr_wakeups_idle;
+
+#ifdef CONFIG_SCHED_CORE
+ u64 core_forceidle_sum;
#endif
-};
+#endif /* CONFIG_SCHEDSTATS */
+} ____cacheline_aligned;
struct sched_entity {
/* For load-balancing: */
@@ -388,8 +560,6 @@ struct sched_entity {
u64 nr_migrations;
- struct sched_statistics statistics;
-
#ifdef CONFIG_FAIR_GROUP_SCHED
int depth;
struct sched_entity *parent;
@@ -397,6 +567,8 @@ struct sched_entity {
struct cfs_rq *cfs_rq;
/* rq "owned" by this entity/group: */
struct cfs_rq *my_q;
+ /* cached value of my_q->h_nr_running */
+ unsigned long runnable_weight;
#endif
#ifdef CONFIG_SMP
@@ -406,7 +578,7 @@ struct sched_entity {
* Put into separate cache line so it does not
* collide with read-mostly values above.
*/
- struct sched_avg avg ____cacheline_aligned_in_smp;
+ struct sched_avg avg;
#endif
};
@@ -426,7 +598,7 @@ struct sched_rt_entity {
/* rq "owned" by this entity/group: */
struct rt_rq *my_q;
#endif
-};
+} __randomize_layout;
struct sched_dl_entity {
struct rb_node rb_node;
@@ -444,7 +616,7 @@ struct sched_dl_entity {
/*
* Actual scheduling parameters. Initialized with the values above,
- * they are continously updated during task execution. Note that
+ * they are continuously updated during task execution. Note that
* the remaining runtime could be < 0 in case we are in overrun.
*/
s64 runtime; /* Remaining runtime for this instance */
@@ -458,10 +630,6 @@ struct sched_dl_entity {
* task has to wait for a replenishment to be performed at the
* next firing of dl_timer.
*
- * @dl_boosted tells if we are boosted due to DI. If so we are
- * outside bandwidth enforcement mechanism (but only until we
- * exit the critical section);
- *
* @dl_yielded tells if task gave up the CPU before consuming
* all its available runtime during the last job.
*
@@ -471,11 +639,14 @@ struct sched_dl_entity {
* has not been executed yet. This flag is useful to avoid race
* conditions between the inactive timer handler and the wakeup
* code.
+ *
+ * @dl_overrun tells if the task asked to be informed about runtime
+ * overruns.
*/
- int dl_throttled;
- int dl_boosted;
- int dl_yielded;
- int dl_non_contending;
+ unsigned int dl_throttled : 1;
+ unsigned int dl_yielded : 1;
+ unsigned int dl_non_contending : 1;
+ unsigned int dl_overrun : 1;
/*
* Bandwidth enforcement timer. Each -deadline task has its
@@ -491,16 +662,58 @@ struct sched_dl_entity {
* time.
*/
struct hrtimer inactive_timer;
+
+#ifdef CONFIG_RT_MUTEXES
+ /*
+ * Priority Inheritance. When a DEADLINE scheduling entity is boosted
+ * pi_se points to the donor, otherwise points to the dl_se it belongs
+ * to (the original one/itself).
+ */
+ struct sched_dl_entity *pi_se;
+#endif
+};
+
+#ifdef CONFIG_UCLAMP_TASK
+/* Number of utilization clamp buckets (shorter alias) */
+#define UCLAMP_BUCKETS CONFIG_UCLAMP_BUCKETS_COUNT
+
+/*
+ * Utilization clamp for a scheduling entity
+ * @value: clamp value "assigned" to a se
+ * @bucket_id: bucket index corresponding to the "assigned" value
+ * @active: the se is currently refcounted in a rq's bucket
+ * @user_defined: the requested clamp value comes from user-space
+ *
+ * The bucket_id is the index of the clamp bucket matching the clamp value
+ * which is pre-computed and stored to avoid expensive integer divisions from
+ * the fast path.
+ *
+ * The active bit is set whenever a task has got an "effective" value assigned,
+ * which can be different from the clamp value "requested" from user-space.
+ * This allows to know a task is refcounted in the rq's bucket corresponding
+ * to the "effective" bucket_id.
+ *
+ * The user_defined bit is set whenever a task has got a task-specific clamp
+ * value requested from userspace, i.e. the system defaults apply to this task
+ * just as a restriction. This allows to relax default clamps when a less
+ * restrictive task-specific value has been requested, thus allowing to
+ * implement a "nice" semantic. For example, a task running with a 20%
+ * default boost can still drop its own boosting to 0%.
+ */
+struct uclamp_se {
+ unsigned int value : bits_per(SCHED_CAPACITY_SCALE);
+ unsigned int bucket_id : bits_per(UCLAMP_BUCKETS);
+ unsigned int active : 1;
+ unsigned int user_defined : 1;
};
+#endif /* CONFIG_UCLAMP_TASK */
union rcu_special {
struct {
u8 blocked;
u8 need_qs;
- u8 exp_need_qs;
-
- /* Otherwise the compiler can store garbage here: */
- u8 pad;
+ u8 exp_hint; /* Hint for performance. */
+ u8 need_mb; /* Readers need smp_mb(). */
} b; /* Bits. */
u32 s; /* Set of bits. */
};
@@ -516,6 +729,13 @@ struct wake_q_node {
struct wake_q_node *next;
};
+struct kmap_ctrl {
+#ifdef CONFIG_KMAP_LOCAL
+ int idx;
+ pte_t pteval[KM_MAX_IDX];
+#endif
+};
+
struct task_struct {
#ifdef CONFIG_THREAD_INFO_IN_TASK
/*
@@ -524,25 +744,40 @@ struct task_struct {
*/
struct thread_info thread_info;
#endif
- /* -1 unrunnable, 0 runnable, >0 stopped: */
- volatile long state;
+ unsigned int __state;
+
+#ifdef CONFIG_PREEMPT_RT
+ /* saved state for "spinlock sleepers" */
+ unsigned int saved_state;
+#endif
+
+ /*
+ * This begins the randomizable portion of task_struct. Only
+ * scheduling-critical items should be added above here.
+ */
+ randomized_struct_fields_start
+
void *stack;
- atomic_t usage;
+ refcount_t usage;
/* Per task flags (PF_*), defined further below: */
unsigned int flags;
unsigned int ptrace;
#ifdef CONFIG_SMP
- struct llist_node wake_entry;
int on_cpu;
-#ifdef CONFIG_THREAD_INFO_IN_TASK
- /* Current CPU: */
- unsigned int cpu;
-#endif
+ struct __call_single_node wake_entry;
unsigned int wakee_flips;
unsigned long wakee_flip_decay_ts;
struct task_struct *last_wakee;
+ /*
+ * recent_used_cpu is initially set as the last CPU used by a task
+ * that wakes affine another task. Waker/wakee relationships can
+ * push tasks around a CPU where each wakeup moves to the next one.
+ * Tracking a recently used CPU allows a quick search for a recently
+ * used CPU that may be idle.
+ */
+ int recent_used_cpu;
int wake_cpu;
#endif
int on_rq;
@@ -552,13 +787,35 @@ struct task_struct {
int normal_prio;
unsigned int rt_priority;
- const struct sched_class *sched_class;
struct sched_entity se;
struct sched_rt_entity rt;
+ struct sched_dl_entity dl;
+ const struct sched_class *sched_class;
+
+#ifdef CONFIG_SCHED_CORE
+ struct rb_node core_node;
+ unsigned long core_cookie;
+ unsigned int core_occupation;
+#endif
+
#ifdef CONFIG_CGROUP_SCHED
struct task_group *sched_task_group;
#endif
- struct sched_dl_entity dl;
+
+#ifdef CONFIG_UCLAMP_TASK
+ /*
+ * Clamp values requested for a scheduling entity.
+ * Must be updated with task_rq_lock() held.
+ */
+ struct uclamp_se uclamp_req[UCLAMP_CNT];
+ /*
+ * Effective clamp values used for a scheduling entity.
+ * Must be updated with task_rq_lock() held.
+ */
+ struct uclamp_se uclamp[UCLAMP_CNT];
+#endif
+
+ struct sched_statistics stats;
#ifdef CONFIG_PREEMPT_NOTIFIERS
/* List of struct preempt_notifier: */
@@ -571,7 +828,14 @@ struct task_struct {
unsigned int policy;
int nr_cpus_allowed;
- cpumask_t cpus_allowed;
+ const cpumask_t *cpus_ptr;
+ cpumask_t *user_cpus_ptr;
+ cpumask_t cpus_mask;
+ void *migration_pending;
+#ifdef CONFIG_SMP
+ unsigned short migration_disabled;
+#endif
+ unsigned short migration_flags;
#ifdef CONFIG_PREEMPT_RCU
int rcu_read_lock_nesting;
@@ -582,11 +846,21 @@ struct task_struct {
#ifdef CONFIG_TASKS_RCU
unsigned long rcu_tasks_nvcsw;
- bool rcu_tasks_holdout;
- struct list_head rcu_tasks_holdout_list;
+ u8 rcu_tasks_holdout;
+ u8 rcu_tasks_idx;
int rcu_tasks_idle_cpu;
+ struct list_head rcu_tasks_holdout_list;
#endif /* #ifdef CONFIG_TASKS_RCU */
+#ifdef CONFIG_TASKS_TRACE_RCU
+ int trc_reader_nesting;
+ int trc_ipi_to_cpu;
+ union rcu_special trc_reader_special;
+ struct list_head trc_holdout_list;
+ struct list_head trc_blkd_node;
+ int trc_blkd_cpu;
+#endif /* #ifdef CONFIG_TASKS_TRACE_RCU */
+
struct sched_info sched_info;
struct list_head tasks;
@@ -598,12 +872,6 @@ struct task_struct {
struct mm_struct *mm;
struct mm_struct *active_mm;
- /* Per-thread vma caching: */
- struct vmacache vmacache;
-
-#ifdef SPLIT_RSS_COUNTING
- struct task_rss_stat rss_stat;
-#endif
int exit_state;
int exit_code;
int exit_signal;
@@ -619,12 +887,27 @@ struct task_struct {
unsigned sched_reset_on_fork:1;
unsigned sched_contributes_to_load:1;
unsigned sched_migrated:1;
- unsigned sched_remote_wakeup:1;
+
/* Force alignment to the next boundary: */
unsigned :0;
/* Unserialized, strictly 'current' */
+ /*
+ * This field must not be in the scheduler word above due to wakelist
+ * queueing no longer being serialized by p->on_cpu. However:
+ *
+ * p->XXX = X; ttwu()
+ * schedule() if (p->on_rq && ..) // false
+ * smp_mb__after_spinlock(); if (smp_load_acquire(&p->on_cpu) && //true
+ * deactivate_task() ttwu_queue_wakelist())
+ * p->on_rq = 0; p->sched_remote_wakeup = Y;
+ *
+ * guarantees all stores of 'current' are visible before
+ * ->sched_remote_wakeup gets used, so it can be in this word.
+ */
+ unsigned sched_remote_wakeup:1;
+
/* Bit to tell LSMs we're in execve(): */
unsigned in_execve:1;
unsigned in_iowait:1;
@@ -632,10 +915,11 @@ struct task_struct {
unsigned restore_sigmask:1;
#endif
#ifdef CONFIG_MEMCG
- unsigned memcg_may_oom:1;
-#ifndef CONFIG_SLOB
- unsigned memcg_kmem_skip_account:1;
+ unsigned in_user_fault:1;
#endif
+#ifdef CONFIG_LRU_GEN
+ /* whether the LRU algorithm may apply to this access */
+ unsigned in_lru_fault:1;
#endif
#ifdef CONFIG_COMPAT_BRK
unsigned brk_randomized:1;
@@ -643,6 +927,33 @@ struct task_struct {
#ifdef CONFIG_CGROUPS
/* disallow userland-initiated cgroup migration */
unsigned no_cgroup_migration:1;
+ /* task is frozen/stopped (used by the cgroup freezer) */
+ unsigned frozen:1;
+#endif
+#ifdef CONFIG_BLK_CGROUP
+ unsigned use_memdelay:1;
+#endif
+#ifdef CONFIG_PSI
+ /* Stalled due to lack of memory */
+ unsigned in_memstall:1;
+#endif
+#ifdef CONFIG_PAGE_OWNER
+ /* Used by page_owner=on to detect recursion in page tracking. */
+ unsigned in_page_owner:1;
+#endif
+#ifdef CONFIG_EVENTFD
+ /* Recursion prevention for eventfd_signal() */
+ unsigned in_eventfd:1;
+#endif
+#ifdef CONFIG_IOMMU_SVA
+ unsigned pasid_activated:1;
+#endif
+#ifdef CONFIG_CPU_SUP_INTEL
+ unsigned reported_split_lock:1;
+#endif
+#ifdef CONFIG_TASK_DELAY_ACCT
+ /* delay due to memory thrashing */
+ unsigned in_thrashing:1;
#endif
unsigned long atomic_flags; /* Flags requiring atomic access. */
@@ -652,7 +963,7 @@ struct task_struct {
pid_t pid;
pid_t tgid;
-#ifdef CONFIG_CC_STACKPROTECTOR
+#ifdef CONFIG_STACKPROTECTOR
/* Canary value for the -fstack-protector GCC feature: */
unsigned long stack_canary;
#endif
@@ -685,7 +996,8 @@ struct task_struct {
struct list_head ptrace_entry;
/* PID/PID hash table linkage. */
- struct pid_link pids[PIDTYPE_MAX];
+ struct pid *thread_pid;
+ struct hlist_node pid_links[PIDTYPE_MAX];
struct list_head thread_group;
struct list_head thread_node;
@@ -697,6 +1009,9 @@ struct task_struct {
/* CLONE_CHILD_CLEARTID: */
int __user *clear_child_tid;
+ /* PF_KTHREAD | PF_IO_WORKER */
+ void *worker_private;
+
u64 utime;
u64 stime;
#ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME
@@ -720,15 +1035,17 @@ struct task_struct {
u64 start_time;
/* Boot based time in nsecs: */
- u64 real_start_time;
+ u64 start_boottime;
/* MM fault and swap info: this can arguably be seen as either mm-specific or thread-specific: */
unsigned long min_flt;
unsigned long maj_flt;
-#ifdef CONFIG_POSIX_TIMERS
- struct task_cputime cputime_expires;
- struct list_head cpu_timers[3];
+ /* Empty if CONFIG_POSIX_CPUTIMERS=n */
+ struct posix_cputimers posix_cputimers;
+
+#ifdef CONFIG_POSIX_CPU_TIMERS_TASK_WORK
+ struct posix_cputimers_work posix_cputimers_work;
#endif
/* Process credentials: */
@@ -742,6 +1059,11 @@ struct task_struct {
/* Effective (overridable) subjective task credentials (COW): */
const struct cred __rcu *cred;
+#ifdef CONFIG_KEYS
+ /* Cached requested key. */
+ struct key *cached_requested_key;
+#endif
+
/*
* executable name, excluding path.
*
@@ -759,6 +1081,7 @@ struct task_struct {
#endif
#ifdef CONFIG_DETECT_HUNG_TASK
unsigned long last_switch_count;
+ unsigned long last_switch_time;
#endif
/* Filesystem information: */
struct fs_struct *fs;
@@ -766,12 +1089,16 @@ struct task_struct {
/* Open file information: */
struct files_struct *files;
+#ifdef CONFIG_IO_URING
+ struct io_uring_task *io_uring;
+#endif
+
/* Namespaces: */
struct nsproxy *nsproxy;
/* Signal handlers: */
struct signal_struct *signal;
- struct sighand_struct *sighand;
+ struct sighand_struct __rcu *sighand;
sigset_t blocked;
sigset_t real_blocked;
/* Restored if set_restore_sigmask() was used: */
@@ -783,16 +1110,19 @@ struct task_struct {
struct callback_head *task_works;
- struct audit_context *audit_context;
+#ifdef CONFIG_AUDIT
#ifdef CONFIG_AUDITSYSCALL
+ struct audit_context *audit_context;
+#endif
kuid_t loginuid;
unsigned int sessionid;
#endif
struct seccomp seccomp;
+ struct syscall_user_dispatch syscall_dispatch;
/* Thread group tracking: */
- u32 parent_exec_id;
- u32 self_exec_id;
+ u64 parent_exec_id;
+ u64 self_exec_id;
/* Protection against (de-)allocation: mm, files, fs, tty, keyrings, mems_allowed, mempolicy: */
spinlock_t alloc_lock;
@@ -804,8 +1134,7 @@ struct task_struct {
#ifdef CONFIG_RT_MUTEXES
/* PI waiters blocked on a rt_mutex held by this task: */
- struct rb_root pi_waiters;
- struct rb_node *pi_waiters_leftmost;
+ struct rb_root_cached pi_waiters;
/* Updated under owner's pi_lock and rq lock */
struct task_struct *pi_top_task;
/* Deadlock detection and priority inheritance handling: */
@@ -817,20 +1146,20 @@ struct task_struct {
struct mutex_waiter *blocked_on;
#endif
+#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
+ int non_block_count;
+#endif
+
#ifdef CONFIG_TRACE_IRQFLAGS
- unsigned int irq_events;
- unsigned long hardirq_enable_ip;
- unsigned long hardirq_disable_ip;
- unsigned int hardirq_enable_event;
- unsigned int hardirq_disable_event;
- int hardirqs_enabled;
- int hardirq_context;
- unsigned long softirq_disable_ip;
- unsigned long softirq_enable_ip;
- unsigned int softirq_disable_event;
- unsigned int softirq_enable_event;
+ struct irqtrace_events irqtrace;
+ unsigned int hardirq_threaded;
+ u64 hardirq_chain_key;
int softirqs_enabled;
int softirq_context;
+ int irq_config;
+#endif
+#ifdef CONFIG_PREEMPT_RT
+ int softirq_disable_cnt;
#endif
#ifdef CONFIG_LOCKDEP
@@ -839,10 +1168,9 @@ struct task_struct {
int lockdep_depth;
unsigned int lockdep_recursion;
struct held_lock held_locks[MAX_LOCK_DEPTH];
- gfp_t lockdep_reclaim_gfp;
#endif
-#ifdef CONFIG_UBSAN
+#if defined(CONFIG_UBSAN) && !defined(CONFIG_UBSAN_TRAP)
unsigned int in_ubsan;
#endif
@@ -852,10 +1180,8 @@ struct task_struct {
/* Stacked block device info: */
struct bio_list *bio_list;
-#ifdef CONFIG_BLOCK
/* Stack plugging: */
struct blk_plug *plug;
-#endif
/* VM state: */
struct reclaim_state *reclaim_state;
@@ -864,11 +1190,18 @@ struct task_struct {
struct io_context *io_context;
+#ifdef CONFIG_COMPACTION
+ struct capture_control *capture_control;
+#endif
/* Ptrace state: */
unsigned long ptrace_message;
- siginfo_t *last_siginfo;
+ kernel_siginfo_t *last_siginfo;
struct task_io_accounting ioac;
+#ifdef CONFIG_PSI
+ /* Pressure stall state */
+ unsigned int psi_flags;
+#endif
#ifdef CONFIG_TASK_XACCT
/* Accumulated RSS usage: */
u64 acct_rss_mem1;
@@ -880,8 +1213,8 @@ struct task_struct {
#ifdef CONFIG_CPUSETS
/* Protected by ->alloc_lock: */
nodemask_t mems_allowed;
- /* Seqence number to catch updates: */
- seqcount_t mems_allowed_seq;
+ /* Sequence number to catch updates: */
+ seqcount_spinlock_t mems_allowed_seq;
int cpuset_mem_spread_rotor;
int cpuset_slab_spread_rotor;
#endif
@@ -891,8 +1224,9 @@ struct task_struct {
/* cg_list protected by css_set_lock and tsk->alloc_lock: */
struct list_head cg_list;
#endif
-#ifdef CONFIG_INTEL_RDT_A
- int closid;
+#ifdef CONFIG_X86_CPU_RESCTRL
+ u32 closid;
+ u32 rmid;
#endif
#ifdef CONFIG_FUTEX
struct robust_list_head __user *robust_list;
@@ -901,9 +1235,11 @@ struct task_struct {
#endif
struct list_head pi_state_list;
struct futex_pi_state *pi_state_cache;
+ struct mutex futex_exit_mutex;
+ unsigned int futex_state;
#endif
#ifdef CONFIG_PERF_EVENTS
- struct perf_event_context *perf_event_ctxp[perf_nr_task_contexts];
+ struct perf_event_context *perf_event_ctxp;
struct mutex perf_event_mutex;
struct list_head perf_event_list;
#endif
@@ -928,8 +1264,15 @@ struct task_struct {
u64 last_sum_exec_runtime;
struct callback_head numa_work;
- struct list_head numa_entry;
- struct numa_group *numa_group;
+ /*
+ * This pointer is only modified for current in syscall and
+ * pagefault context (and for tasks being destroyed), so it can be read
+ * from any of the following contexts:
+ * - RCU read-side critical section
+ * - current->numa_group from everywhere
+ * - task's runqueue locked, task not running
+ */
+ struct numa_group __rcu *numa_group;
/*
* numa_faults is an array split into four regions:
@@ -959,9 +1302,26 @@ struct task_struct {
unsigned long numa_pages_migrated;
#endif /* CONFIG_NUMA_BALANCING */
- struct tlbflush_unmap_batch tlb_ubc;
+#ifdef CONFIG_RSEQ
+ struct rseq __user *rseq;
+ u32 rseq_len;
+ u32 rseq_sig;
+ /*
+ * RmW on rseq_event_mask must be performed atomically
+ * with respect to preemption.
+ */
+ unsigned long rseq_event_mask;
+#endif
- struct rcu_head rcu;
+#ifdef CONFIG_SCHED_MM_CID
+ int mm_cid; /* Current cid in mm */
+ int last_mm_cid; /* Most recent cid in mm */
+ int migrate_from_cpu;
+ int mm_cid_active; /* Whether cid bitmap is active */
+ struct callback_head cid_work;
+#endif
+
+ struct tlbflush_unmap_batch tlb_ubc;
/* Cache last used pipe for splice(): */
struct pipe_inode_info *splice_pipe;
@@ -996,13 +1356,32 @@ struct task_struct {
u64 timer_slack_ns;
u64 default_timer_slack_ns;
-#ifdef CONFIG_KASAN
+#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
unsigned int kasan_depth;
#endif
+#ifdef CONFIG_KCSAN
+ struct kcsan_ctx kcsan_ctx;
+#ifdef CONFIG_TRACE_IRQFLAGS
+ struct irqtrace_events kcsan_save_irqtrace;
+#endif
+#ifdef CONFIG_KCSAN_WEAK_MEMORY
+ int kcsan_stack_depth;
+#endif
+#endif
+
+#ifdef CONFIG_KMSAN
+ struct kmsan_ctx kmsan_ctx;
+#endif
+
+#if IS_ENABLED(CONFIG_KUNIT)
+ struct kunit *kunit_test;
+#endif
+
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
/* Index of current stored address in ret_stack: */
int curr_ret_stack;
+ int curr_ret_depth;
/* Stack of return addresses for return function tracing: */
struct ftrace_ret_stack *ret_stack;
@@ -1021,16 +1400,15 @@ struct task_struct {
#endif
#ifdef CONFIG_TRACING
- /* State flags for use by tracers: */
- unsigned long trace;
-
/* Bitmask and counter of trace recursion: */
unsigned long trace_recursion;
#endif /* CONFIG_TRACING */
#ifdef CONFIG_KCOV
+ /* See kernel/kcov.c for more details. */
+
/* Coverage collection mode enabled for this task (0 if disabled): */
- enum kcov_mode kcov_mode;
+ unsigned int kcov_mode;
/* Size of the kcov_area: */
unsigned int kcov_size;
@@ -1040,6 +1418,15 @@ struct task_struct {
/* KCOV descriptor wired with this task or NULL: */
struct kcov *kcov;
+
+ /* KCOV common handle for remote coverage collection: */
+ u64 kcov_handle;
+
+ /* KCOV sequence number: */
+ int kcov_sequence;
+
+ /* Collect coverage from softirq context: */
+ unsigned int kcov_softirq;
#endif
#ifdef CONFIG_MEMCG
@@ -1049,6 +1436,13 @@ struct task_struct {
/* Number of pages to reclaim on returning to userland: */
unsigned int memcg_nr_pages_over_high;
+
+ /* Used by memcontrol for targeted memcg charge: */
+ struct mem_cgroup *active_memcg;
+#endif
+
+#ifdef CONFIG_BLK_CGROUP
+ struct gendisk *throttle_disk;
#endif
#ifdef CONFIG_UPROBES
@@ -1058,19 +1452,26 @@ struct task_struct {
unsigned int sequential_io;
unsigned int sequential_io_avg;
#endif
+ struct kmap_ctrl kmap_ctrl;
#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
unsigned long task_state_change;
+# ifdef CONFIG_PREEMPT_RT
+ unsigned long saved_state_change;
+# endif
#endif
+ struct rcu_head rcu;
+ refcount_t rcu_users;
int pagefault_disabled;
#ifdef CONFIG_MMU
struct task_struct *oom_reaper_list;
+ struct timer_list oom_reaper_timer;
#endif
#ifdef CONFIG_VMAP_STACK
struct vm_struct *stack_vm_area;
#endif
#ifdef CONFIG_THREAD_INFO_IN_TASK
/* A live task holds one reference: */
- atomic_t stack_refcount;
+ refcount_t stack_refcount;
#endif
#ifdef CONFIG_LIVEPATCH
int patch_state;
@@ -1079,6 +1480,66 @@ struct task_struct {
/* Used by LSM modules for access restriction: */
void *security;
#endif
+#ifdef CONFIG_BPF_SYSCALL
+ /* Used by BPF task local storage */
+ struct bpf_local_storage __rcu *bpf_storage;
+ /* Used for BPF run context */
+ struct bpf_run_ctx *bpf_ctx;
+#endif
+
+#ifdef CONFIG_GCC_PLUGIN_STACKLEAK
+ unsigned long lowest_stack;
+ unsigned long prev_lowest_stack;
+#endif
+
+#ifdef CONFIG_X86_MCE
+ void __user *mce_vaddr;
+ __u64 mce_kflags;
+ u64 mce_addr;
+ __u64 mce_ripv : 1,
+ mce_whole_page : 1,
+ __mce_reserved : 62;
+ struct callback_head mce_kill_me;
+ int mce_count;
+#endif
+
+#ifdef CONFIG_KRETPROBES
+ struct llist_head kretprobe_instances;
+#endif
+#ifdef CONFIG_RETHOOK
+ struct llist_head rethooks;
+#endif
+
+#ifdef CONFIG_ARCH_HAS_PARANOID_L1D_FLUSH
+ /*
+ * If L1D flush is supported on mm context switch
+ * then we use this callback head to queue kill work
+ * to kill tasks that are not running on SMT disabled
+ * cores
+ */
+ struct callback_head l1d_flush_kill;
+#endif
+
+#ifdef CONFIG_RV
+ /*
+ * Per-task RV monitor. Nowadays fixed in RV_PER_TASK_MONITORS.
+ * If we find justification for more monitors, we can think
+ * about adding more or developing a dynamic method. So far,
+ * none of these are justified.
+ */
+ union rv_task_monitor rv[RV_PER_TASK_MONITORS];
+#endif
+
+#ifdef CONFIG_USER_EVENTS
+ struct user_event_mm *user_event_mm;
+#endif
+
+ /*
+ * New fields for task_struct should be added above here, so that
+ * they are included in the randomized portion of task_struct.
+ */
+ randomized_struct_fields_end
+
/* CPU-specific state of this task: */
struct thread_struct thread;
@@ -1092,27 +1553,7 @@ struct task_struct {
static inline struct pid *task_pid(struct task_struct *task)
{
- return task->pids[PIDTYPE_PID].pid;
-}
-
-static inline struct pid *task_tgid(struct task_struct *task)
-{
- return task->group_leader->pids[PIDTYPE_PID].pid;
-}
-
-/*
- * Without tasklist or RCU lock it is not safe to dereference
- * the result of task_pgrp/task_session even if task == current,
- * we can race with another thread doing sys_setsid/sys_setpgid.
- */
-static inline struct pid *task_pgrp(struct task_struct *task)
-{
- return task->group_leader->pids[PIDTYPE_PGID].pid;
-}
-
-static inline struct pid *task_session(struct task_struct *task)
-{
- return task->group_leader->pids[PIDTYPE_SID].pid;
+ return task->thread_pid;
}
/*
@@ -1149,13 +1590,6 @@ static inline pid_t task_tgid_nr(struct task_struct *tsk)
return tsk->tgid;
}
-extern pid_t task_tgid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns);
-
-static inline pid_t task_tgid_vnr(struct task_struct *tsk)
-{
- return pid_vnr(task_tgid(tsk));
-}
-
/**
* pid_alive - check that a task structure is not stale
* @p: Task structure to be checked.
@@ -1168,7 +1602,38 @@ static inline pid_t task_tgid_vnr(struct task_struct *tsk)
*/
static inline int pid_alive(const struct task_struct *p)
{
- return p->pids[PIDTYPE_PID].pid != NULL;
+ return p->thread_pid != NULL;
+}
+
+static inline pid_t task_pgrp_nr_ns(struct task_struct *tsk, struct pid_namespace *ns)
+{
+ return __task_pid_nr_ns(tsk, PIDTYPE_PGID, ns);
+}
+
+static inline pid_t task_pgrp_vnr(struct task_struct *tsk)
+{
+ return __task_pid_nr_ns(tsk, PIDTYPE_PGID, NULL);
+}
+
+
+static inline pid_t task_session_nr_ns(struct task_struct *tsk, struct pid_namespace *ns)
+{
+ return __task_pid_nr_ns(tsk, PIDTYPE_SID, ns);
+}
+
+static inline pid_t task_session_vnr(struct task_struct *tsk)
+{
+ return __task_pid_nr_ns(tsk, PIDTYPE_SID, NULL);
+}
+
+static inline pid_t task_tgid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns)
+{
+ return __task_pid_nr_ns(tsk, PIDTYPE_TGID, ns);
+}
+
+static inline pid_t task_tgid_vnr(struct task_struct *tsk)
+{
+ return __task_pid_nr_ns(tsk, PIDTYPE_TGID, NULL);
}
static inline pid_t task_ppid_nr_ns(const struct task_struct *tsk, struct pid_namespace *ns)
@@ -1188,31 +1653,53 @@ static inline pid_t task_ppid_nr(const struct task_struct *tsk)
return task_ppid_nr_ns(tsk, &init_pid_ns);
}
-static inline pid_t task_pgrp_nr_ns(struct task_struct *tsk, struct pid_namespace *ns)
+/* Obsolete, do not use: */
+static inline pid_t task_pgrp_nr(struct task_struct *tsk)
{
- return __task_pid_nr_ns(tsk, PIDTYPE_PGID, ns);
+ return task_pgrp_nr_ns(tsk, &init_pid_ns);
}
-static inline pid_t task_pgrp_vnr(struct task_struct *tsk)
+#define TASK_REPORT_IDLE (TASK_REPORT + 1)
+#define TASK_REPORT_MAX (TASK_REPORT_IDLE << 1)
+
+static inline unsigned int __task_state_index(unsigned int tsk_state,
+ unsigned int tsk_exit_state)
{
- return __task_pid_nr_ns(tsk, PIDTYPE_PGID, NULL);
-}
+ unsigned int state = (tsk_state | tsk_exit_state) & TASK_REPORT;
+ BUILD_BUG_ON_NOT_POWER_OF_2(TASK_REPORT_MAX);
-static inline pid_t task_session_nr_ns(struct task_struct *tsk, struct pid_namespace *ns)
+ if (tsk_state == TASK_IDLE)
+ state = TASK_REPORT_IDLE;
+
+ /*
+ * We're lying here, but rather than expose a completely new task state
+ * to userspace, we can make this appear as if the task has gone through
+ * a regular rt_mutex_lock() call.
+ */
+ if (tsk_state == TASK_RTLOCK_WAIT)
+ state = TASK_UNINTERRUPTIBLE;
+
+ return fls(state);
+}
+
+static inline unsigned int task_state_index(struct task_struct *tsk)
{
- return __task_pid_nr_ns(tsk, PIDTYPE_SID, ns);
+ return __task_state_index(READ_ONCE(tsk->__state), tsk->exit_state);
}
-static inline pid_t task_session_vnr(struct task_struct *tsk)
+static inline char task_index_to_char(unsigned int state)
{
- return __task_pid_nr_ns(tsk, PIDTYPE_SID, NULL);
+ static const char state_char[] = "RSDTtXZPI";
+
+ BUILD_BUG_ON(1 + ilog2(TASK_REPORT_MAX) != sizeof(state_char) - 1);
+
+ return state_char[state];
}
-/* Obsolete, do not use: */
-static inline pid_t task_pgrp_nr(struct task_struct *tsk)
+static inline char task_state_to_char(struct task_struct *tsk)
{
- return task_pgrp_nr_ns(tsk, &init_pid_ns);
+ return task_index_to_char(task_state_index(tsk));
}
/**
@@ -1234,10 +1721,11 @@ extern struct pid *cad_pid;
/*
* Per process flags
*/
+#define PF_VCPU 0x00000001 /* I'm a virtual CPU */
#define PF_IDLE 0x00000002 /* I am an IDLE thread */
#define PF_EXITING 0x00000004 /* Getting shut down */
-#define PF_EXITPIDONE 0x00000008 /* PI exit done on shut down */
-#define PF_VCPU 0x00000010 /* I'm a virtual CPU */
+#define PF_POSTCOREDUMP 0x00000008 /* Coredumps should ignore this task */
+#define PF_IO_WORKER 0x00000010 /* Task is an IO worker */
#define PF_WQ_WORKER 0x00000020 /* I'm a workqueue worker */
#define PF_FORKNOEXEC 0x00000040 /* Forked but didn't exec */
#define PF_MCE_PROCESS 0x00000080 /* Process policy on mce errors */
@@ -1247,20 +1735,24 @@ extern struct pid *cad_pid;
#define PF_MEMALLOC 0x00000800 /* Allocating memory */
#define PF_NPROC_EXCEEDED 0x00001000 /* set_user() noticed that RLIMIT_NPROC was exceeded */
#define PF_USED_MATH 0x00002000 /* If unset the fpu must be initialized before use */
-#define PF_USED_ASYNC 0x00004000 /* Used async_schedule*(), used by module init */
+#define PF_USER_WORKER 0x00004000 /* Kernel thread cloned from userspace thread */
#define PF_NOFREEZE 0x00008000 /* This thread should not be frozen */
-#define PF_FROZEN 0x00010000 /* Frozen for system suspend */
+#define PF__HOLE__00010000 0x00010000
#define PF_KSWAPD 0x00020000 /* I am kswapd */
#define PF_MEMALLOC_NOFS 0x00040000 /* All allocation requests will inherit GFP_NOFS */
#define PF_MEMALLOC_NOIO 0x00080000 /* All allocation requests will inherit GFP_NOIO */
-#define PF_LESS_THROTTLE 0x00100000 /* Throttle me less: I clean memory */
+#define PF_LOCAL_THROTTLE 0x00100000 /* Throttle writes only against the bdi I write to,
+ * I am cleaning dirty pages from some other bdi. */
#define PF_KTHREAD 0x00200000 /* I am a kernel thread */
#define PF_RANDOMIZE 0x00400000 /* Randomize virtual address space */
-#define PF_SWAPWRITE 0x00800000 /* Allowed to write to swap */
-#define PF_NO_SETAFFINITY 0x04000000 /* Userland is not allowed to meddle with cpus_allowed */
+#define PF__HOLE__00800000 0x00800000
+#define PF__HOLE__01000000 0x01000000
+#define PF__HOLE__02000000 0x02000000
+#define PF_NO_SETAFFINITY 0x04000000 /* Userland is not allowed to meddle with cpus_mask */
#define PF_MCE_EARLY 0x08000000 /* Early kill for mce process policy */
-#define PF_MUTEX_TESTER 0x20000000 /* Thread belongs to the rt mutex tester */
-#define PF_FREEZER_SKIP 0x40000000 /* Freezer should not count it as freezable */
+#define PF_MEMALLOC_PIN 0x10000000 /* Allocation context constrained to zones which allow long term pinning. */
+#define PF__HOLE__20000000 0x20000000
+#define PF__HOLE__40000000 0x40000000
#define PF_SUSPEND_TASK 0x80000000 /* This thread called freeze_processes() and should not be frozen */
/*
@@ -1291,7 +1783,7 @@ extern struct pid *cad_pid;
#define tsk_used_math(p) ((p)->flags & PF_USED_MATH)
#define used_math() tsk_used_math(current)
-static inline bool is_percpu_thread(void)
+static __always_inline bool is_percpu_thread(void)
{
#ifdef CONFIG_SMP
return (current->flags & PF_NO_SETAFFINITY) &&
@@ -1305,7 +1797,11 @@ static inline bool is_percpu_thread(void)
#define PFA_NO_NEW_PRIVS 0 /* May not gain new privileges. */
#define PFA_SPREAD_PAGE 1 /* Spread page cache over cpuset */
#define PFA_SPREAD_SLAB 2 /* Spread some slab caches over cpuset */
-
+#define PFA_SPEC_SSB_DISABLE 3 /* Speculative Store Bypass disabled */
+#define PFA_SPEC_SSB_FORCE_DISABLE 4 /* Speculative Store Bypass force disabled*/
+#define PFA_SPEC_IB_DISABLE 5 /* Indirect branch speculation restricted */
+#define PFA_SPEC_IB_FORCE_DISABLE 6 /* Indirect branch speculation permanently restricted */
+#define PFA_SPEC_SSB_NOEXEC 7 /* Speculative Store Bypass clear on execve() */
#define TASK_PFA_TEST(name, func) \
static inline bool task_##func(struct task_struct *p) \
@@ -1330,6 +1826,24 @@ TASK_PFA_TEST(SPREAD_SLAB, spread_slab)
TASK_PFA_SET(SPREAD_SLAB, spread_slab)
TASK_PFA_CLEAR(SPREAD_SLAB, spread_slab)
+TASK_PFA_TEST(SPEC_SSB_DISABLE, spec_ssb_disable)
+TASK_PFA_SET(SPEC_SSB_DISABLE, spec_ssb_disable)
+TASK_PFA_CLEAR(SPEC_SSB_DISABLE, spec_ssb_disable)
+
+TASK_PFA_TEST(SPEC_SSB_NOEXEC, spec_ssb_noexec)
+TASK_PFA_SET(SPEC_SSB_NOEXEC, spec_ssb_noexec)
+TASK_PFA_CLEAR(SPEC_SSB_NOEXEC, spec_ssb_noexec)
+
+TASK_PFA_TEST(SPEC_SSB_FORCE_DISABLE, spec_ssb_force_disable)
+TASK_PFA_SET(SPEC_SSB_FORCE_DISABLE, spec_ssb_force_disable)
+
+TASK_PFA_TEST(SPEC_IB_DISABLE, spec_ib_disable)
+TASK_PFA_SET(SPEC_IB_DISABLE, spec_ib_disable)
+TASK_PFA_CLEAR(SPEC_IB_DISABLE, spec_ib_disable)
+
+TASK_PFA_TEST(SPEC_IB_FORCE_DISABLE, spec_ib_force_disable)
+TASK_PFA_SET(SPEC_IB_FORCE_DISABLE, spec_ib_force_disable)
+
static inline void
current_restore_flags(unsigned long orig_flags, unsigned long flags)
{
@@ -1338,10 +1852,15 @@ current_restore_flags(unsigned long orig_flags, unsigned long flags)
}
extern int cpuset_cpumask_can_shrink(const struct cpumask *cur, const struct cpumask *trial);
-extern int task_can_attach(struct task_struct *p, const struct cpumask *cs_cpus_allowed);
+extern int task_can_attach(struct task_struct *p, const struct cpumask *cs_effective_cpus);
#ifdef CONFIG_SMP
extern void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask);
extern int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask);
+extern int dup_user_cpus_ptr(struct task_struct *dst, struct task_struct *src, int node);
+extern void release_user_cpus_ptr(struct task_struct *p);
+extern int dl_task_check_affinity(struct task_struct *p, const struct cpumask *mask);
+extern void force_compatible_cpus_allowed_ptr(struct task_struct *p);
+extern void relax_compatible_cpus_allowed_ptr(struct task_struct *p);
#else
static inline void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
{
@@ -1352,10 +1871,21 @@ static inline int set_cpus_allowed_ptr(struct task_struct *p, const struct cpuma
return -EINVAL;
return 0;
}
-#endif
+static inline int dup_user_cpus_ptr(struct task_struct *dst, struct task_struct *src, int node)
+{
+ if (src->user_cpus_ptr)
+ return -EINVAL;
+ return 0;
+}
+static inline void release_user_cpus_ptr(struct task_struct *p)
+{
+ WARN_ON(p->user_cpus_ptr);
+}
-#ifndef cpu_relax_yield
-#define cpu_relax_yield() cpu_relax()
+static inline int dl_task_check_affinity(struct task_struct *p, const struct cpumask *mask)
+{
+ return 0;
+}
#endif
extern int yield_to(struct task_struct *p, bool preempt);
@@ -1376,9 +1906,14 @@ static inline int task_nice(const struct task_struct *p)
extern int can_nice(const struct task_struct *p, const int nice);
extern int task_curr(const struct task_struct *p);
extern int idle_cpu(int cpu);
+extern int available_idle_cpu(int cpu);
extern int sched_setscheduler(struct task_struct *, int, const struct sched_param *);
extern int sched_setscheduler_nocheck(struct task_struct *, int, const struct sched_param *);
+extern void sched_set_fifo(struct task_struct *p);
+extern void sched_set_fifo_low(struct task_struct *p);
+extern void sched_set_normal(struct task_struct *p, int nice);
extern int sched_setattr(struct task_struct *, const struct sched_attr *);
+extern int sched_setattr_nocheck(struct task_struct *, const struct sched_attr *);
extern struct task_struct *idle_task(int cpu);
/**
@@ -1387,7 +1922,7 @@ extern struct task_struct *idle_task(int cpu);
*
* Return: 1 if @p is an idle task. 0 otherwise.
*/
-static inline bool is_idle_task(const struct task_struct *p)
+static __always_inline bool is_idle_task(const struct task_struct *p)
{
return !!(p->flags & PF_IDLE);
}
@@ -1398,17 +1933,23 @@ extern void ia64_set_curr_task(int cpu, struct task_struct *p);
void yield(void);
union thread_union {
+#ifndef CONFIG_ARCH_TASK_STRUCT_ON_STACK
+ struct task_struct task;
+#endif
#ifndef CONFIG_THREAD_INFO_IN_TASK
struct thread_info thread_info;
#endif
unsigned long stack[THREAD_SIZE/sizeof(long)];
};
+#ifndef CONFIG_THREAD_INFO_IN_TASK
+extern struct thread_info init_thread_info;
+#endif
+
+extern unsigned long init_stack[THREAD_SIZE / sizeof(unsigned long)];
+
#ifdef CONFIG_THREAD_INFO_IN_TASK
-static inline struct thread_info *task_thread_info(struct task_struct *task)
-{
- return &task->thread_info;
-}
+# define task_thread_info(task) (&(task)->thread_info)
#elif !defined(__HAVE_THREAD_FUNCTIONS)
# define task_thread_info(task) ((struct thread_info *)(task)->stack)
#endif
@@ -1427,6 +1968,11 @@ static inline struct thread_info *task_thread_info(struct task_struct *task)
extern struct task_struct *find_task_by_vpid(pid_t nr);
extern struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns);
+/*
+ * find a task by its virtual pid and get the task struct
+ */
+extern struct task_struct *find_get_task_by_vpid(pid_t nr);
+
extern int wake_up_state(struct task_struct *tsk, unsigned int state);
extern int wake_up_process(struct task_struct *tsk);
extern void wake_up_new_task(struct task_struct *tsk);
@@ -1444,14 +1990,26 @@ static inline void set_task_comm(struct task_struct *tsk, const char *from)
__set_task_comm(tsk, from, false);
}
-extern char *get_task_comm(char *to, struct task_struct *tsk);
+extern char *__get_task_comm(char *to, size_t len, struct task_struct *tsk);
+#define get_task_comm(buf, tsk) ({ \
+ BUILD_BUG_ON(sizeof(buf) != TASK_COMM_LEN); \
+ __get_task_comm(buf, sizeof(buf), tsk); \
+})
#ifdef CONFIG_SMP
-void scheduler_ipi(void);
-extern unsigned long wait_task_inactive(struct task_struct *, long match_state);
+static __always_inline void scheduler_ipi(void)
+{
+ /*
+ * Fold TIF_NEED_RESCHED into the preempt_count; anybody setting
+ * TIF_NEED_RESCHED remotely (for the first time) will also send
+ * this IPI.
+ */
+ preempt_fold_need_resched();
+}
+extern unsigned long wait_task_inactive(struct task_struct *, unsigned int match_state);
#else
static inline void scheduler_ipi(void) { }
-static inline unsigned long wait_task_inactive(struct task_struct *p, long match_state)
+static inline unsigned long wait_task_inactive(struct task_struct *p, unsigned int match_state)
{
return 1;
}
@@ -1471,6 +2029,12 @@ static inline void clear_tsk_thread_flag(struct task_struct *tsk, int flag)
clear_ti_thread_flag(task_thread_info(tsk), flag);
}
+static inline void update_tsk_thread_flag(struct task_struct *tsk, int flag,
+ bool value)
+{
+ update_ti_thread_flag(task_thread_info(tsk), flag, value);
+}
+
static inline int test_and_set_tsk_thread_flag(struct task_struct *tsk, int flag)
{
return test_and_set_ti_thread_flag(task_thread_info(tsk), flag);
@@ -1506,31 +2070,92 @@ static inline int test_tsk_need_resched(struct task_struct *tsk)
* explicit rescheduling in places that are safe. The return
* value indicates whether a reschedule was done in fact.
* cond_resched_lock() will drop the spinlock before scheduling,
- * cond_resched_softirq() will enable bhs before scheduling.
*/
-#ifndef CONFIG_PREEMPT
-extern int _cond_resched(void);
-#else
-static inline int _cond_resched(void) { return 0; }
-#endif
+#if !defined(CONFIG_PREEMPTION) || defined(CONFIG_PREEMPT_DYNAMIC)
+extern int __cond_resched(void);
+
+#if defined(CONFIG_PREEMPT_DYNAMIC) && defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL)
+
+void sched_dynamic_klp_enable(void);
+void sched_dynamic_klp_disable(void);
+
+DECLARE_STATIC_CALL(cond_resched, __cond_resched);
+
+static __always_inline int _cond_resched(void)
+{
+ return static_call_mod(cond_resched)();
+}
+
+#elif defined(CONFIG_PREEMPT_DYNAMIC) && defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY)
+
+extern int dynamic_cond_resched(void);
+
+static __always_inline int _cond_resched(void)
+{
+ return dynamic_cond_resched();
+}
+
+#else /* !CONFIG_PREEMPTION */
+
+static inline int _cond_resched(void)
+{
+ klp_sched_try_switch();
+ return __cond_resched();
+}
+
+#endif /* PREEMPT_DYNAMIC && CONFIG_HAVE_PREEMPT_DYNAMIC_CALL */
+
+#else /* CONFIG_PREEMPTION && !CONFIG_PREEMPT_DYNAMIC */
+
+static inline int _cond_resched(void)
+{
+ klp_sched_try_switch();
+ return 0;
+}
+
+#endif /* !CONFIG_PREEMPTION || CONFIG_PREEMPT_DYNAMIC */
#define cond_resched() ({ \
- ___might_sleep(__FILE__, __LINE__, 0); \
+ __might_resched(__FILE__, __LINE__, 0); \
_cond_resched(); \
})
extern int __cond_resched_lock(spinlock_t *lock);
+extern int __cond_resched_rwlock_read(rwlock_t *lock);
+extern int __cond_resched_rwlock_write(rwlock_t *lock);
+
+#define MIGHT_RESCHED_RCU_SHIFT 8
+#define MIGHT_RESCHED_PREEMPT_MASK ((1U << MIGHT_RESCHED_RCU_SHIFT) - 1)
-#define cond_resched_lock(lock) ({ \
- ___might_sleep(__FILE__, __LINE__, PREEMPT_LOCK_OFFSET);\
- __cond_resched_lock(lock); \
+#ifndef CONFIG_PREEMPT_RT
+/*
+ * Non RT kernels have an elevated preempt count due to the held lock,
+ * but are not allowed to be inside a RCU read side critical section
+ */
+# define PREEMPT_LOCK_RESCHED_OFFSETS PREEMPT_LOCK_OFFSET
+#else
+/*
+ * spin/rw_lock() on RT implies rcu_read_lock(). The might_sleep() check in
+ * cond_resched*lock() has to take that into account because it checks for
+ * preempt_count() and rcu_preempt_depth().
+ */
+# define PREEMPT_LOCK_RESCHED_OFFSETS \
+ (PREEMPT_LOCK_OFFSET + (1U << MIGHT_RESCHED_RCU_SHIFT))
+#endif
+
+#define cond_resched_lock(lock) ({ \
+ __might_resched(__FILE__, __LINE__, PREEMPT_LOCK_RESCHED_OFFSETS); \
+ __cond_resched_lock(lock); \
})
-extern int __cond_resched_softirq(void);
+#define cond_resched_rwlock_read(lock) ({ \
+ __might_resched(__FILE__, __LINE__, PREEMPT_LOCK_RESCHED_OFFSETS); \
+ __cond_resched_rwlock_read(lock); \
+})
-#define cond_resched_softirq() ({ \
- ___might_sleep(__FILE__, __LINE__, SOFTIRQ_DISABLE_OFFSET); \
- __cond_resched_softirq(); \
+#define cond_resched_rwlock_write(lock) ({ \
+ __might_resched(__FILE__, __LINE__, PREEMPT_LOCK_RESCHED_OFFSETS); \
+ __cond_resched_rwlock_write(lock); \
})
static inline void cond_resched_rcu(void)
@@ -1542,20 +2167,78 @@ static inline void cond_resched_rcu(void)
#endif
}
+#ifdef CONFIG_PREEMPT_DYNAMIC
+
+extern bool preempt_model_none(void);
+extern bool preempt_model_voluntary(void);
+extern bool preempt_model_full(void);
+
+#else
+
+static inline bool preempt_model_none(void)
+{
+ return IS_ENABLED(CONFIG_PREEMPT_NONE);
+}
+static inline bool preempt_model_voluntary(void)
+{
+ return IS_ENABLED(CONFIG_PREEMPT_VOLUNTARY);
+}
+static inline bool preempt_model_full(void)
+{
+ return IS_ENABLED(CONFIG_PREEMPT);
+}
+
+#endif
+
+static inline bool preempt_model_rt(void)
+{
+ return IS_ENABLED(CONFIG_PREEMPT_RT);
+}
+
+/*
+ * Does the preemption model allow non-cooperative preemption?
+ *
+ * For !CONFIG_PREEMPT_DYNAMIC kernels this is an exact match with
+ * CONFIG_PREEMPTION; for CONFIG_PREEMPT_DYNAMIC this doesn't work as the
+ * kernel is *built* with CONFIG_PREEMPTION=y but may run with e.g. the
+ * PREEMPT_NONE model.
+ */
+static inline bool preempt_model_preemptible(void)
+{
+ return preempt_model_full() || preempt_model_rt();
+}
+
/*
* Does a critical section need to be broken due to another
- * task waiting?: (technically does not depend on CONFIG_PREEMPT,
+ * task waiting?: (technically does not depend on CONFIG_PREEMPTION,
* but a general need for low latency)
*/
static inline int spin_needbreak(spinlock_t *lock)
{
-#ifdef CONFIG_PREEMPT
+#ifdef CONFIG_PREEMPTION
return spin_is_contended(lock);
#else
return 0;
#endif
}
+/*
+ * Check if a rwlock is contended.
+ * Returns non-zero if there is another task waiting on the rwlock.
+ * Returns zero if the lock is not contended or the system / underlying
+ * rwlock implementation does not support contention detection.
+ * Technically does not depend on CONFIG_PREEMPTION, but a general need
+ * for low latency.
+ */
+static inline int rwlock_needbreak(rwlock_t *lock)
+{
+#ifdef CONFIG_PREEMPTION
+ return rwlock_is_contended(lock);
+#else
+ return 0;
+#endif
+}
+
static __always_inline bool need_resched(void)
{
return unlikely(tif_need_resched());
@@ -1568,11 +2251,7 @@ static __always_inline bool need_resched(void)
static inline unsigned int task_cpu(const struct task_struct *p)
{
-#ifdef CONFIG_THREAD_INFO_IN_TASK
- return p->cpu;
-#else
- return task_thread_info(p)->cpu;
-#endif
+ return READ_ONCE(task_thread_info(p)->cpu);
}
extern void set_task_cpu(struct task_struct *p, unsigned int cpu);
@@ -1590,6 +2269,10 @@ static inline void set_task_cpu(struct task_struct *p, unsigned int cpu)
#endif /* CONFIG_SMP */
+extern bool sched_task_on_rq(struct task_struct *p);
+extern unsigned long get_wchan(struct task_struct *p);
+extern struct task_struct *cpu_curr_snapshot(int cpu);
+
/*
* In order to reduce various lock holder preemption latencies provide an
* interface to see if a vCPU is currently running or not.
@@ -1599,7 +2282,10 @@ static inline void set_task_cpu(struct task_struct *p, unsigned int cpu)
* running or not.
*/
#ifndef vcpu_is_preempted
-# define vcpu_is_preempted(cpu) false
+static inline bool vcpu_is_preempted(int cpu)
+{
+ return false;
+}
#endif
extern long sched_setaffinity(pid_t pid, const struct cpumask *new_mask);
@@ -1609,4 +2295,153 @@ extern long sched_getaffinity(pid_t pid, struct cpumask *mask);
#define TASK_SIZE_OF(tsk) TASK_SIZE
#endif
+#ifdef CONFIG_SMP
+static inline bool owner_on_cpu(struct task_struct *owner)
+{
+ /*
+ * As lock holder preemption issue, we both skip spinning if
+ * task is not on cpu or its cpu is preempted
+ */
+ return READ_ONCE(owner->on_cpu) && !vcpu_is_preempted(task_cpu(owner));
+}
+
+/* Returns effective CPU energy utilization, as seen by the scheduler */
+unsigned long sched_cpu_util(int cpu);
+#endif /* CONFIG_SMP */
+
+#ifdef CONFIG_RSEQ
+
+/*
+ * Map the event mask on the user-space ABI enum rseq_cs_flags
+ * for direct mask checks.
+ */
+enum rseq_event_mask_bits {
+ RSEQ_EVENT_PREEMPT_BIT = RSEQ_CS_FLAG_NO_RESTART_ON_PREEMPT_BIT,
+ RSEQ_EVENT_SIGNAL_BIT = RSEQ_CS_FLAG_NO_RESTART_ON_SIGNAL_BIT,
+ RSEQ_EVENT_MIGRATE_BIT = RSEQ_CS_FLAG_NO_RESTART_ON_MIGRATE_BIT,
+};
+
+enum rseq_event_mask {
+ RSEQ_EVENT_PREEMPT = (1U << RSEQ_EVENT_PREEMPT_BIT),
+ RSEQ_EVENT_SIGNAL = (1U << RSEQ_EVENT_SIGNAL_BIT),
+ RSEQ_EVENT_MIGRATE = (1U << RSEQ_EVENT_MIGRATE_BIT),
+};
+
+static inline void rseq_set_notify_resume(struct task_struct *t)
+{
+ if (t->rseq)
+ set_tsk_thread_flag(t, TIF_NOTIFY_RESUME);
+}
+
+void __rseq_handle_notify_resume(struct ksignal *sig, struct pt_regs *regs);
+
+static inline void rseq_handle_notify_resume(struct ksignal *ksig,
+ struct pt_regs *regs)
+{
+ if (current->rseq)
+ __rseq_handle_notify_resume(ksig, regs);
+}
+
+static inline void rseq_signal_deliver(struct ksignal *ksig,
+ struct pt_regs *regs)
+{
+ preempt_disable();
+ __set_bit(RSEQ_EVENT_SIGNAL_BIT, &current->rseq_event_mask);
+ preempt_enable();
+ rseq_handle_notify_resume(ksig, regs);
+}
+
+/* rseq_preempt() requires preemption to be disabled. */
+static inline void rseq_preempt(struct task_struct *t)
+{
+ __set_bit(RSEQ_EVENT_PREEMPT_BIT, &t->rseq_event_mask);
+ rseq_set_notify_resume(t);
+}
+
+/* rseq_migrate() requires preemption to be disabled. */
+static inline void rseq_migrate(struct task_struct *t)
+{
+ __set_bit(RSEQ_EVENT_MIGRATE_BIT, &t->rseq_event_mask);
+ rseq_set_notify_resume(t);
+}
+
+/*
+ * If parent process has a registered restartable sequences area, the
+ * child inherits. Unregister rseq for a clone with CLONE_VM set.
+ */
+static inline void rseq_fork(struct task_struct *t, unsigned long clone_flags)
+{
+ if (clone_flags & CLONE_VM) {
+ t->rseq = NULL;
+ t->rseq_len = 0;
+ t->rseq_sig = 0;
+ t->rseq_event_mask = 0;
+ } else {
+ t->rseq = current->rseq;
+ t->rseq_len = current->rseq_len;
+ t->rseq_sig = current->rseq_sig;
+ t->rseq_event_mask = current->rseq_event_mask;
+ }
+}
+
+static inline void rseq_execve(struct task_struct *t)
+{
+ t->rseq = NULL;
+ t->rseq_len = 0;
+ t->rseq_sig = 0;
+ t->rseq_event_mask = 0;
+}
+
+#else
+
+static inline void rseq_set_notify_resume(struct task_struct *t)
+{
+}
+static inline void rseq_handle_notify_resume(struct ksignal *ksig,
+ struct pt_regs *regs)
+{
+}
+static inline void rseq_signal_deliver(struct ksignal *ksig,
+ struct pt_regs *regs)
+{
+}
+static inline void rseq_preempt(struct task_struct *t)
+{
+}
+static inline void rseq_migrate(struct task_struct *t)
+{
+}
+static inline void rseq_fork(struct task_struct *t, unsigned long clone_flags)
+{
+}
+static inline void rseq_execve(struct task_struct *t)
+{
+}
+
+#endif
+
+#ifdef CONFIG_DEBUG_RSEQ
+
+void rseq_syscall(struct pt_regs *regs);
+
+#else
+
+static inline void rseq_syscall(struct pt_regs *regs)
+{
+}
+
+#endif
+
+#ifdef CONFIG_SCHED_CORE
+extern void sched_core_free(struct task_struct *tsk);
+extern void sched_core_fork(struct task_struct *p);
+extern int sched_core_share_pid(unsigned int cmd, pid_t pid, enum pid_type type,
+ unsigned long uaddr);
+#else
+static inline void sched_core_free(struct task_struct *tsk) { }
+static inline void sched_core_fork(struct task_struct *p) { }
+#endif
+
+extern void sched_set_stop_task(int cpu, struct task_struct *stop);
+
#endif
diff --git a/include/linux/sched/affinity.h b/include/linux/sched/affinity.h
new file mode 100644
index 000000000000..227f5be81bcd
--- /dev/null
+++ b/include/linux/sched/affinity.h
@@ -0,0 +1 @@
+#include <linux/sched.h>
diff --git a/include/linux/sched/autogroup.h b/include/linux/sched/autogroup.h
index 55cd496df884..704391cc1d20 100644
--- a/include/linux/sched/autogroup.h
+++ b/include/linux/sched/autogroup.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_SCHED_AUTOGROUP_H
#define _LINUX_SCHED_AUTOGROUP_H
diff --git a/include/linux/sched/clock.h b/include/linux/sched/clock.h
index a55600ffdf4b..ca008f7d3615 100644
--- a/include/linux/sched/clock.h
+++ b/include/linux/sched/clock.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_SCHED_CLOCK_H
#define _LINUX_SCHED_CLOCK_H
@@ -44,7 +45,7 @@ static inline u64 cpu_clock(int cpu)
return sched_clock();
}
-static inline u64 local_clock(void)
+static __always_inline u64 local_clock(void)
{
return sched_clock();
}
@@ -78,10 +79,8 @@ static inline u64 cpu_clock(int cpu)
return sched_clock_cpu(cpu);
}
-static inline u64 local_clock(void)
-{
- return sched_clock_cpu(raw_smp_processor_id());
-}
+extern u64 local_clock(void);
+
#endif
#ifdef CONFIG_IRQ_TIME_ACCOUNTING
diff --git a/include/linux/sched/cond_resched.h b/include/linux/sched/cond_resched.h
new file mode 100644
index 000000000000..227f5be81bcd
--- /dev/null
+++ b/include/linux/sched/cond_resched.h
@@ -0,0 +1 @@
+#include <linux/sched.h>
diff --git a/include/linux/sched/coredump.h b/include/linux/sched/coredump.h
index 98ae0d05aa32..0ee96ea7a0e9 100644
--- a/include/linux/sched/coredump.h
+++ b/include/linux/sched/coredump.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_SCHED_COREDUMP_H
#define _LINUX_SCHED_COREDUMP_H
@@ -56,7 +57,8 @@ static inline int get_dumpable(struct mm_struct *mm)
#endif
/* leave room for more dump flags */
#define MMF_VM_MERGEABLE 16 /* KSM may merge identical pages */
-#define MMF_VM_HUGEPAGE 17 /* set when VM_HUGEPAGE is set on vma */
+#define MMF_VM_HUGEPAGE 17 /* set when mm is available for
+ khugepaged */
/*
* This one-shot flag is dropped due to necessity of changing exe once again
* on NFS restore
@@ -69,9 +71,24 @@ static inline int get_dumpable(struct mm_struct *mm)
#define MMF_UNSTABLE 22 /* mm is unstable for copy_from_user */
#define MMF_HUGE_ZERO_PAGE 23 /* mm has ever used the global huge zero page */
#define MMF_DISABLE_THP 24 /* disable THP for all VMAs */
+#define MMF_OOM_REAP_QUEUED 25 /* mm was queued for oom_reaper */
+#define MMF_MULTIPROCESS 26 /* mm is shared between processes */
+/*
+ * MMF_HAS_PINNED: Whether this mm has pinned any pages. This can be either
+ * replaced in the future by mm.pinned_vm when it becomes stable, or grow into
+ * a counter on its own. We're aggresive on this bit for now: even if the
+ * pinned pages were unpinned later on, we'll still keep this bit set for the
+ * lifecycle of this mm, just for simplicity.
+ */
+#define MMF_HAS_PINNED 27 /* FOLL_PIN has run, never cleared */
+
+#define MMF_HAS_MDWE 28
+#define MMF_HAS_MDWE_MASK (1 << MMF_HAS_MDWE)
+
#define MMF_DISABLE_THP_MASK (1 << MMF_DISABLE_THP)
#define MMF_INIT_MASK (MMF_DUMPABLE_MASK | MMF_DUMP_FILTER_MASK |\
- MMF_DISABLE_THP_MASK)
+ MMF_DISABLE_THP_MASK | MMF_HAS_MDWE_MASK)
+#define MMF_VM_MERGE_ANY 29
#endif /* _LINUX_SCHED_COREDUMP_H */
diff --git a/include/linux/sched/cpufreq.h b/include/linux/sched/cpufreq.h
index d2be2ccbb372..bdd31ab93bc5 100644
--- a/include/linux/sched/cpufreq.h
+++ b/include/linux/sched/cpufreq.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_SCHED_CPUFREQ_H
#define _LINUX_SCHED_CPUFREQ_H
@@ -7,13 +8,11 @@
* Interface between cpufreq drivers and the scheduler:
*/
-#define SCHED_CPUFREQ_RT (1U << 0)
-#define SCHED_CPUFREQ_DL (1U << 1)
-#define SCHED_CPUFREQ_IOWAIT (1U << 2)
-
-#define SCHED_CPUFREQ_RT_DL (SCHED_CPUFREQ_RT | SCHED_CPUFREQ_DL)
+#define SCHED_CPUFREQ_IOWAIT (1U << 0)
#ifdef CONFIG_CPU_FREQ
+struct cpufreq_policy;
+
struct update_util_data {
void (*func)(struct update_util_data *data, u64 time, unsigned int flags);
};
@@ -22,6 +21,18 @@ void cpufreq_add_update_util_hook(int cpu, struct update_util_data *data,
void (*func)(struct update_util_data *data, u64 time,
unsigned int flags));
void cpufreq_remove_update_util_hook(int cpu);
+bool cpufreq_this_cpu_can_update(struct cpufreq_policy *policy);
+
+static inline unsigned long map_util_freq(unsigned long util,
+ unsigned long freq, unsigned long cap)
+{
+ return freq * util / cap;
+}
+
+static inline unsigned long map_util_perf(unsigned long util)
+{
+ return util + (util >> 2);
+}
#endif /* CONFIG_CPU_FREQ */
#endif /* _LINUX_SCHED_CPUFREQ_H */
diff --git a/include/linux/sched/cputime.h b/include/linux/sched/cputime.h
index 4c5b9735c1ae..5f8fd5b24a2e 100644
--- a/include/linux/sched/cputime.h
+++ b/include/linux/sched/cputime.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_SCHED_CPUTIME_H
#define _LINUX_SCHED_CPUTIME_H
@@ -7,25 +8,17 @@
* cputime accounting APIs:
*/
-#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
-#include <asm/cputime.h>
-
-#ifndef cputime_to_nsecs
-# define cputime_to_nsecs(__ct) \
- (cputime_to_usecs(__ct) * NSEC_PER_USEC)
-#endif
-#endif /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
-
#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
-extern void task_cputime(struct task_struct *t,
+extern bool task_cputime(struct task_struct *t,
u64 *utime, u64 *stime);
extern u64 task_gtime(struct task_struct *t);
#else
-static inline void task_cputime(struct task_struct *t,
+static inline bool task_cputime(struct task_struct *t,
u64 *utime, u64 *stime)
{
*utime = t->utime;
*stime = t->stime;
+ return false;
}
static inline u64 task_gtime(struct task_struct *t)
@@ -53,14 +46,14 @@ static inline void task_cputime_scaled(struct task_struct *t,
extern void task_cputime_adjusted(struct task_struct *p, u64 *ut, u64 *st);
extern void thread_group_cputime_adjusted(struct task_struct *p, u64 *ut, u64 *st);
-
+extern void cputime_adjust(struct task_cputime *curr, struct prev_cputime *prev,
+ u64 *ut, u64 *st);
/*
* Thread group CPU time accounting.
*/
void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times);
-void thread_group_cputimer(struct task_struct *tsk, struct task_cputime *times);
-
+void thread_group_sample_cputime(struct task_struct *tsk, u64 *samples);
/*
* The following are functions that support scheduler-internal time accounting.
@@ -69,7 +62,7 @@ void thread_group_cputimer(struct task_struct *tsk, struct task_cputime *times);
*/
/**
- * get_running_cputimer - return &tsk->signal->cputimer if cputimer is running
+ * get_running_cputimer - return &tsk->signal->cputimer if cputimers are active
*
* @tsk: Pointer to target task.
*/
@@ -79,8 +72,11 @@ struct thread_group_cputimer *get_running_cputimer(struct task_struct *tsk)
{
struct thread_group_cputimer *cputimer = &tsk->signal->cputimer;
- /* Check if cputimer isn't running. This is accessed without locking. */
- if (!READ_ONCE(cputimer->running))
+ /*
+ * Check whether posix CPU timers are active. If not the thread
+ * group accounting is not active either. Lockless check.
+ */
+ if (!READ_ONCE(tsk->signal->posix_cputimers.timers_active))
return NULL;
/*
diff --git a/include/linux/sched/deadline.h b/include/linux/sched/deadline.h
index 975be862e083..7c83d4d5a971 100644
--- a/include/linux/sched/deadline.h
+++ b/include/linux/sched/deadline.h
@@ -1,7 +1,4 @@
-#ifndef _LINUX_SCHED_DEADLINE_H
-#define _LINUX_SCHED_DEADLINE_H
-
-#include <linux/sched.h>
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* SCHED_DEADLINE tasks has negative priorities, reflecting
@@ -9,6 +6,8 @@
* NORMAL/BATCH tasks.
*/
+#include <linux/sched.h>
+
#define MAX_DL_PRIO 0
static inline int dl_prio(int prio)
@@ -28,4 +27,10 @@ static inline bool dl_time_before(u64 a, u64 b)
return (s64)(a - b) < 0;
}
-#endif /* _LINUX_SCHED_DEADLINE_H */
+#ifdef CONFIG_SMP
+
+struct root_domain;
+extern void dl_add_task_root_domain(struct task_struct *p);
+extern void dl_clear_root_domain(struct root_domain *rd);
+
+#endif /* CONFIG_SMP */
diff --git a/include/linux/sched/debug.h b/include/linux/sched/debug.h
index e0eaee54c5a4..b5035afa2396 100644
--- a/include/linux/sched/debug.h
+++ b/include/linux/sched/debug.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_SCHED_DEBUG_H
#define _LINUX_SCHED_DEBUG_H
@@ -6,13 +7,14 @@
*/
struct task_struct;
+struct pid_namespace;
extern void dump_cpu_task(int cpu);
/*
* Only dump TASK_* tasks. (0 for all tasks)
*/
-extern void show_state_filter(unsigned long state_filter);
+extern void show_state_filter(unsigned int state_filter);
static inline void show_state(void)
{
@@ -28,18 +30,20 @@ extern void show_regs(struct pt_regs *);
* task), SP is the stack pointer of the first frame that should be shown in the back
* trace (or NULL if the entire call-chain of the task should be shown).
*/
-extern void show_stack(struct task_struct *task, unsigned long *sp);
+extern void show_stack(struct task_struct *task, unsigned long *sp,
+ const char *loglvl);
extern void sched_show_task(struct task_struct *p);
#ifdef CONFIG_SCHED_DEBUG
struct seq_file;
-extern void proc_sched_show_task(struct task_struct *p, struct seq_file *m);
+extern void proc_sched_show_task(struct task_struct *p,
+ struct pid_namespace *ns, struct seq_file *m);
extern void proc_sched_set_task(struct task_struct *p);
#endif
/* Attach to any functions which should be ignored in wchan output. */
-#define __sched __attribute__((__section__(".sched.text")))
+#define __sched __section(".sched.text")
/* Linker adds these: start and end of __sched functions */
extern char __sched_text_start[], __sched_text_end[];
diff --git a/include/linux/sched/hotplug.h b/include/linux/sched/hotplug.h
index 752ac7e628d7..412cdaba33eb 100644
--- a/include/linux/sched/hotplug.h
+++ b/include/linux/sched/hotplug.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_SCHED_HOTPLUG_H
#define _LINUX_SCHED_HOTPLUG_H
@@ -10,8 +11,10 @@ extern int sched_cpu_activate(unsigned int cpu);
extern int sched_cpu_deactivate(unsigned int cpu);
#ifdef CONFIG_HOTPLUG_CPU
+extern int sched_cpu_wait_empty(unsigned int cpu);
extern int sched_cpu_dying(unsigned int cpu);
#else
+# define sched_cpu_wait_empty NULL
# define sched_cpu_dying NULL
#endif
diff --git a/include/linux/sched/idle.h b/include/linux/sched/idle.h
index 5ca63ebad6b4..478084f9105e 100644
--- a/include/linux/sched/idle.h
+++ b/include/linux/sched/idle.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_SCHED_IDLE_H
#define _LINUX_SCHED_IDLE_H
@@ -10,7 +11,11 @@ enum cpu_idle_type {
CPU_MAX_IDLE_TYPES
};
+#ifdef CONFIG_SMP
extern void wake_up_if_idle(int cpu);
+#else
+static inline void wake_up_if_idle(int cpu) { }
+#endif
/*
* Idle thread specific functions to determine the need_resched
@@ -18,12 +23,37 @@ extern void wake_up_if_idle(int cpu);
*/
#ifdef TIF_POLLING_NRFLAG
-static inline void __current_set_polling(void)
+#ifdef _ASM_GENERIC_BITOPS_INSTRUMENTED_ATOMIC_H
+
+static __always_inline void __current_set_polling(void)
{
- set_thread_flag(TIF_POLLING_NRFLAG);
+ arch_set_bit(TIF_POLLING_NRFLAG,
+ (unsigned long *)(&current_thread_info()->flags));
}
-static inline bool __must_check current_set_polling_and_test(void)
+static __always_inline void __current_clr_polling(void)
+{
+ arch_clear_bit(TIF_POLLING_NRFLAG,
+ (unsigned long *)(&current_thread_info()->flags));
+}
+
+#else
+
+static __always_inline void __current_set_polling(void)
+{
+ set_bit(TIF_POLLING_NRFLAG,
+ (unsigned long *)(&current_thread_info()->flags));
+}
+
+static __always_inline void __current_clr_polling(void)
+{
+ clear_bit(TIF_POLLING_NRFLAG,
+ (unsigned long *)(&current_thread_info()->flags));
+}
+
+#endif /* _ASM_GENERIC_BITOPS_INSTRUMENTED_ATOMIC_H */
+
+static __always_inline bool __must_check current_set_polling_and_test(void)
{
__current_set_polling();
@@ -36,12 +66,7 @@ static inline bool __must_check current_set_polling_and_test(void)
return unlikely(tif_need_resched());
}
-static inline void __current_clr_polling(void)
-{
- clear_thread_flag(TIF_POLLING_NRFLAG);
-}
-
-static inline bool __must_check current_clr_polling_and_test(void)
+static __always_inline bool __must_check current_clr_polling_and_test(void)
{
__current_clr_polling();
@@ -68,7 +93,7 @@ static inline bool __must_check current_clr_polling_and_test(void)
}
#endif
-static inline void current_clr_polling(void)
+static __always_inline void current_clr_polling(void)
{
__current_clr_polling();
diff --git a/include/linux/sched/init.h b/include/linux/sched/init.h
index 127215045285..03542575fd33 100644
--- a/include/linux/sched/init.h
+++ b/include/linux/sched/init.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_SCHED_INIT_H
#define _LINUX_SCHED_INIT_H
diff --git a/include/linux/sched/isolation.h b/include/linux/sched/isolation.h
new file mode 100644
index 000000000000..fe1a46f30d24
--- /dev/null
+++ b/include/linux/sched/isolation.h
@@ -0,0 +1,73 @@
+#ifndef _LINUX_SCHED_ISOLATION_H
+#define _LINUX_SCHED_ISOLATION_H
+
+#include <linux/cpumask.h>
+#include <linux/init.h>
+#include <linux/tick.h>
+
+enum hk_type {
+ HK_TYPE_TIMER,
+ HK_TYPE_RCU,
+ HK_TYPE_MISC,
+ HK_TYPE_SCHED,
+ HK_TYPE_TICK,
+ HK_TYPE_DOMAIN,
+ HK_TYPE_WQ,
+ HK_TYPE_MANAGED_IRQ,
+ HK_TYPE_KTHREAD,
+ HK_TYPE_MAX
+};
+
+#ifdef CONFIG_CPU_ISOLATION
+DECLARE_STATIC_KEY_FALSE(housekeeping_overridden);
+extern int housekeeping_any_cpu(enum hk_type type);
+extern const struct cpumask *housekeeping_cpumask(enum hk_type type);
+extern bool housekeeping_enabled(enum hk_type type);
+extern void housekeeping_affine(struct task_struct *t, enum hk_type type);
+extern bool housekeeping_test_cpu(int cpu, enum hk_type type);
+extern void __init housekeeping_init(void);
+
+#else
+
+static inline int housekeeping_any_cpu(enum hk_type type)
+{
+ return smp_processor_id();
+}
+
+static inline const struct cpumask *housekeeping_cpumask(enum hk_type type)
+{
+ return cpu_possible_mask;
+}
+
+static inline bool housekeeping_enabled(enum hk_type type)
+{
+ return false;
+}
+
+static inline void housekeeping_affine(struct task_struct *t,
+ enum hk_type type) { }
+
+static inline bool housekeeping_test_cpu(int cpu, enum hk_type type)
+{
+ return true;
+}
+
+static inline void housekeeping_init(void) { }
+#endif /* CONFIG_CPU_ISOLATION */
+
+static inline bool housekeeping_cpu(int cpu, enum hk_type type)
+{
+#ifdef CONFIG_CPU_ISOLATION
+ if (static_branch_unlikely(&housekeeping_overridden))
+ return housekeeping_test_cpu(cpu, type);
+#endif
+ return true;
+}
+
+static inline bool cpu_is_isolated(int cpu)
+{
+ return !housekeeping_test_cpu(cpu, HK_TYPE_DOMAIN) ||
+ !housekeeping_test_cpu(cpu, HK_TYPE_TICK);
+}
+
+#endif /* _LINUX_SCHED_ISOLATION_H */
diff --git a/include/linux/sched/jobctl.h b/include/linux/sched/jobctl.h
index 016afa0fb3bb..68876d0a7ef9 100644
--- a/include/linux/sched/jobctl.h
+++ b/include/linux/sched/jobctl.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_SCHED_JOBCTL_H
#define _LINUX_SCHED_JOBCTL_H
@@ -17,6 +18,11 @@ struct task_struct;
#define JOBCTL_TRAP_NOTIFY_BIT 20 /* trap for NOTIFY */
#define JOBCTL_TRAPPING_BIT 21 /* switching to TRACED */
#define JOBCTL_LISTENING_BIT 22 /* ptracer is listening for events */
+#define JOBCTL_TRAP_FREEZE_BIT 23 /* trap for cgroup freezer */
+#define JOBCTL_PTRACE_FROZEN_BIT 24 /* frozen for ptrace */
+
+#define JOBCTL_STOPPED_BIT 26 /* do_signal_stop() */
+#define JOBCTL_TRACED_BIT 27 /* ptrace_stop() */
#define JOBCTL_STOP_DEQUEUED (1UL << JOBCTL_STOP_DEQUEUED_BIT)
#define JOBCTL_STOP_PENDING (1UL << JOBCTL_STOP_PENDING_BIT)
@@ -25,6 +31,11 @@ struct task_struct;
#define JOBCTL_TRAP_NOTIFY (1UL << JOBCTL_TRAP_NOTIFY_BIT)
#define JOBCTL_TRAPPING (1UL << JOBCTL_TRAPPING_BIT)
#define JOBCTL_LISTENING (1UL << JOBCTL_LISTENING_BIT)
+#define JOBCTL_TRAP_FREEZE (1UL << JOBCTL_TRAP_FREEZE_BIT)
+#define JOBCTL_PTRACE_FROZEN (1UL << JOBCTL_PTRACE_FROZEN_BIT)
+
+#define JOBCTL_STOPPED (1UL << JOBCTL_STOPPED_BIT)
+#define JOBCTL_TRACED (1UL << JOBCTL_TRACED_BIT)
#define JOBCTL_TRAP_MASK (JOBCTL_TRAP_STOP | JOBCTL_TRAP_NOTIFY)
#define JOBCTL_PENDING_MASK (JOBCTL_STOP_PENDING | JOBCTL_TRAP_MASK)
diff --git a/include/linux/sched/loadavg.h b/include/linux/sched/loadavg.h
index 4264bc6b2c27..83ec54b65e79 100644
--- a/include/linux/sched/loadavg.h
+++ b/include/linux/sched/loadavg.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_SCHED_LOADAVG_H
#define _LINUX_SCHED_LOADAVG_H
@@ -21,11 +22,27 @@ extern void get_avenrun(unsigned long *loads, unsigned long offset, int shift);
#define EXP_5 2014 /* 1/exp(5sec/5min) */
#define EXP_15 2037 /* 1/exp(5sec/15min) */
-#define CALC_LOAD(load,exp,n) \
- load *= exp; \
- load += n*(FIXED_1-exp); \
- load >>= FSHIFT;
+/*
+ * a1 = a0 * e + a * (1 - e)
+ */
+static inline unsigned long
+calc_load(unsigned long load, unsigned long exp, unsigned long active)
+{
+ unsigned long newload;
+
+ newload = load * exp + active * (FIXED_1 - exp);
+ if (active >= load)
+ newload += FIXED_1-1;
+
+ return newload / FIXED_1;
+}
+
+extern unsigned long calc_load_n(unsigned long load, unsigned long exp,
+ unsigned long active, unsigned int n);
+
+#define LOAD_INT(x) ((x) >> FSHIFT)
+#define LOAD_FRAC(x) LOAD_INT(((x) & (FIXED_1-1)) * 100)
-extern void calc_global_load(unsigned long ticks);
+extern void calc_global_load(void);
#endif /* _LINUX_SCHED_LOADAVG_H */
diff --git a/include/linux/sched/mm.h b/include/linux/sched/mm.h
index 2b24a6974847..8d89c8c4fac1 100644
--- a/include/linux/sched/mm.h
+++ b/include/linux/sched/mm.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_SCHED_MM_H
#define _LINUX_SCHED_MM_H
@@ -6,11 +7,12 @@
#include <linux/sched.h>
#include <linux/mm_types.h>
#include <linux/gfp.h>
+#include <linux/sync_core.h>
/*
* Routines for handling mm_structs
*/
-extern struct mm_struct * mm_alloc(void);
+extern struct mm_struct *mm_alloc(void);
/**
* mmgrab() - Pin a &struct mm_struct.
@@ -21,12 +23,12 @@ extern struct mm_struct * mm_alloc(void);
* will still exist later on and mmget_not_zero() has to be used before
* accessing it.
*
- * This is a preferred way to to pin @mm for a longer/unbounded amount
+ * This is a preferred way to pin @mm for a longer/unbounded amount
* of time.
*
* Use mmdrop() to release the reference acquired by mmgrab().
*
- * See also <Documentation/vm/active_mm.txt> for an in-depth explanation
+ * See also <Documentation/mm/active_mm.rst> for an in-depth explanation
* of &mm_struct.mm_count vs &mm_struct.mm_users.
*/
static inline void mmgrab(struct mm_struct *mm)
@@ -34,28 +36,81 @@ static inline void mmgrab(struct mm_struct *mm)
atomic_inc(&mm->mm_count);
}
-/* mmdrop drops the mm and the page tables */
-extern void __mmdrop(struct mm_struct *);
+static inline void smp_mb__after_mmgrab(void)
+{
+ smp_mb__after_atomic();
+}
+
+extern void __mmdrop(struct mm_struct *mm);
+
static inline void mmdrop(struct mm_struct *mm)
{
+ /*
+ * The implicit full barrier implied by atomic_dec_and_test() is
+ * required by the membarrier system call before returning to
+ * user-space, after storing to rq->curr.
+ */
if (unlikely(atomic_dec_and_test(&mm->mm_count)))
__mmdrop(mm);
}
-static inline void mmdrop_async_fn(struct work_struct *work)
+#ifdef CONFIG_PREEMPT_RT
+/*
+ * RCU callback for delayed mm drop. Not strictly RCU, but call_rcu() is
+ * by far the least expensive way to do that.
+ */
+static inline void __mmdrop_delayed(struct rcu_head *rhp)
{
- struct mm_struct *mm = container_of(work, struct mm_struct, async_put_work);
+ struct mm_struct *mm = container_of(rhp, struct mm_struct, delayed_drop);
+
__mmdrop(mm);
}
-static inline void mmdrop_async(struct mm_struct *mm)
+/*
+ * Invoked from finish_task_switch(). Delegates the heavy lifting on RT
+ * kernels via RCU.
+ */
+static inline void mmdrop_sched(struct mm_struct *mm)
+{
+ /* Provides a full memory barrier. See mmdrop() */
+ if (atomic_dec_and_test(&mm->mm_count))
+ call_rcu(&mm->delayed_drop, __mmdrop_delayed);
+}
+#else
+static inline void mmdrop_sched(struct mm_struct *mm)
+{
+ mmdrop(mm);
+}
+#endif
+
+/* Helpers for lazy TLB mm refcounting */
+static inline void mmgrab_lazy_tlb(struct mm_struct *mm)
+{
+ if (IS_ENABLED(CONFIG_MMU_LAZY_TLB_REFCOUNT))
+ mmgrab(mm);
+}
+
+static inline void mmdrop_lazy_tlb(struct mm_struct *mm)
{
- if (unlikely(atomic_dec_and_test(&mm->mm_count))) {
- INIT_WORK(&mm->async_put_work, mmdrop_async_fn);
- schedule_work(&mm->async_put_work);
+ if (IS_ENABLED(CONFIG_MMU_LAZY_TLB_REFCOUNT)) {
+ mmdrop(mm);
+ } else {
+ /*
+ * mmdrop_lazy_tlb must provide a full memory barrier, see the
+ * membarrier comment finish_task_switch which relies on this.
+ */
+ smp_mb();
}
}
+static inline void mmdrop_lazy_tlb_sched(struct mm_struct *mm)
+{
+ if (IS_ENABLED(CONFIG_MMU_LAZY_TLB_REFCOUNT))
+ mmdrop_sched(mm);
+ else
+ smp_mb(); /* see mmdrop_lazy_tlb() above */
+}
+
/**
* mmget() - Pin the address space associated with a &struct mm_struct.
* @mm: The address space to pin.
@@ -69,7 +124,7 @@ static inline void mmdrop_async(struct mm_struct *mm)
*
* Use mmput() to release the reference acquired by mmget().
*
- * See also <Documentation/vm/active_mm.txt> for an in-depth explanation
+ * See also <Documentation/mm/active_mm.rst> for an in-depth explanation
* of &mm_struct.mm_count vs &mm_struct.mm_users.
*/
static inline void mmget(struct mm_struct *mm)
@@ -88,7 +143,7 @@ extern void mmput(struct mm_struct *);
/* same as above but performs the slow path from the async context. Can
* be called from the atomic context as well
*/
-extern void mmput_async(struct mm_struct *);
+void mmput_async(struct mm_struct *);
#endif
/* Grab a reference to a task's mm, if it is not already going away */
@@ -99,8 +154,10 @@ extern struct mm_struct *get_task_mm(struct task_struct *task);
* succeeds.
*/
extern struct mm_struct *mm_access(struct task_struct *task, unsigned int mode);
-/* Remove the current tasks stale references to the old mm_struct */
-extern void mm_release(struct task_struct *, struct mm_struct *);
+/* Remove the current tasks stale references to the old mm_struct on exit() */
+extern void exit_mm_release(struct task_struct *, struct mm_struct *);
+/* Remove the current tasks stale references to the old mm_struct on exec() */
+extern void exec_mm_release(struct task_struct *, struct mm_struct *);
#ifdef CONFIG_MEMCG
extern void mm_update_next_owner(struct mm_struct *mm);
@@ -111,7 +168,16 @@ static inline void mm_update_next_owner(struct mm_struct *mm)
#endif /* CONFIG_MEMCG */
#ifdef CONFIG_MMU
-extern void arch_pick_mmap_layout(struct mm_struct *mm);
+#ifndef arch_get_mmap_end
+#define arch_get_mmap_end(addr, len, flags) (TASK_SIZE)
+#endif
+
+#ifndef arch_get_mmap_base
+#define arch_get_mmap_base(addr, base) (base)
+#endif
+
+extern void arch_pick_mmap_layout(struct mm_struct *mm,
+ struct rlimit *rlim_stack);
extern unsigned long
arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
unsigned long, unsigned long);
@@ -119,8 +185,18 @@ extern unsigned long
arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr,
unsigned long len, unsigned long pgoff,
unsigned long flags);
+
+unsigned long
+generic_get_unmapped_area(struct file *filp, unsigned long addr,
+ unsigned long len, unsigned long pgoff,
+ unsigned long flags);
+unsigned long
+generic_get_unmapped_area_topdown(struct file *filp, unsigned long addr,
+ unsigned long len, unsigned long pgoff,
+ unsigned long flags);
#else
-static inline void arch_pick_mmap_layout(struct mm_struct *mm) {}
+static inline void arch_pick_mmap_layout(struct mm_struct *mm,
+ struct rlimit *rlim_stack) {}
#endif
static inline bool in_vfork(struct task_struct *tsk)
@@ -143,7 +219,8 @@ static inline bool in_vfork(struct task_struct *tsk)
* another oom-unkillable task does this it should blame itself.
*/
rcu_read_lock();
- ret = tsk->vfork_done && tsk->real_parent->mm == tsk->mm;
+ ret = tsk->vfork_done &&
+ rcu_dereference(tsk->real_parent)->mm == tsk->mm;
rcu_read_unlock();
return ret;
@@ -153,20 +230,93 @@ static inline bool in_vfork(struct task_struct *tsk)
* Applies per-task gfp context to the given allocation flags.
* PF_MEMALLOC_NOIO implies GFP_NOIO
* PF_MEMALLOC_NOFS implies GFP_NOFS
+ * PF_MEMALLOC_PIN implies !GFP_MOVABLE
*/
static inline gfp_t current_gfp_context(gfp_t flags)
{
- /*
- * NOIO implies both NOIO and NOFS and it is a weaker context
- * so always make sure it makes precendence
- */
- if (unlikely(current->flags & PF_MEMALLOC_NOIO))
- flags &= ~(__GFP_IO | __GFP_FS);
- else if (unlikely(current->flags & PF_MEMALLOC_NOFS))
- flags &= ~__GFP_FS;
+ unsigned int pflags = READ_ONCE(current->flags);
+
+ if (unlikely(pflags & (PF_MEMALLOC_NOIO | PF_MEMALLOC_NOFS | PF_MEMALLOC_PIN))) {
+ /*
+ * NOIO implies both NOIO and NOFS and it is a weaker context
+ * so always make sure it makes precedence
+ */
+ if (pflags & PF_MEMALLOC_NOIO)
+ flags &= ~(__GFP_IO | __GFP_FS);
+ else if (pflags & PF_MEMALLOC_NOFS)
+ flags &= ~__GFP_FS;
+
+ if (pflags & PF_MEMALLOC_PIN)
+ flags &= ~__GFP_MOVABLE;
+ }
return flags;
}
+#ifdef CONFIG_LOCKDEP
+extern void __fs_reclaim_acquire(unsigned long ip);
+extern void __fs_reclaim_release(unsigned long ip);
+extern void fs_reclaim_acquire(gfp_t gfp_mask);
+extern void fs_reclaim_release(gfp_t gfp_mask);
+#else
+static inline void __fs_reclaim_acquire(unsigned long ip) { }
+static inline void __fs_reclaim_release(unsigned long ip) { }
+static inline void fs_reclaim_acquire(gfp_t gfp_mask) { }
+static inline void fs_reclaim_release(gfp_t gfp_mask) { }
+#endif
+
+/* Any memory-allocation retry loop should use
+ * memalloc_retry_wait(), and pass the flags for the most
+ * constrained allocation attempt that might have failed.
+ * This provides useful documentation of where loops are,
+ * and a central place to fine tune the waiting as the MM
+ * implementation changes.
+ */
+static inline void memalloc_retry_wait(gfp_t gfp_flags)
+{
+ /* We use io_schedule_timeout because waiting for memory
+ * typically included waiting for dirty pages to be
+ * written out, which requires IO.
+ */
+ __set_current_state(TASK_UNINTERRUPTIBLE);
+ gfp_flags = current_gfp_context(gfp_flags);
+ if (gfpflags_allow_blocking(gfp_flags) &&
+ !(gfp_flags & __GFP_NORETRY))
+ /* Probably waited already, no need for much more */
+ io_schedule_timeout(1);
+ else
+ /* Probably didn't wait, and has now released a lock,
+ * so now is a good time to wait
+ */
+ io_schedule_timeout(HZ/50);
+}
+
+/**
+ * might_alloc - Mark possible allocation sites
+ * @gfp_mask: gfp_t flags that would be used to allocate
+ *
+ * Similar to might_sleep() and other annotations, this can be used in functions
+ * that might allocate, but often don't. Compiles to nothing without
+ * CONFIG_LOCKDEP. Includes a conditional might_sleep() if @gfp allows blocking.
+ */
+static inline void might_alloc(gfp_t gfp_mask)
+{
+ fs_reclaim_acquire(gfp_mask);
+ fs_reclaim_release(gfp_mask);
+
+ might_sleep_if(gfpflags_allow_blocking(gfp_mask));
+}
+
+/**
+ * memalloc_noio_save - Marks implicit GFP_NOIO allocation scope.
+ *
+ * This functions marks the beginning of the GFP_NOIO allocation scope.
+ * All further allocations will implicitly drop __GFP_IO flag and so
+ * they are safe for the IO critical section from the allocation recursion
+ * point of view. Use memalloc_noio_restore to end the scope with flags
+ * returned by this function.
+ *
+ * This function is safe to be used from any context.
+ */
static inline unsigned int memalloc_noio_save(void)
{
unsigned int flags = current->flags & PF_MEMALLOC_NOIO;
@@ -174,11 +324,30 @@ static inline unsigned int memalloc_noio_save(void)
return flags;
}
+/**
+ * memalloc_noio_restore - Ends the implicit GFP_NOIO scope.
+ * @flags: Flags to restore.
+ *
+ * Ends the implicit GFP_NOIO scope started by memalloc_noio_save function.
+ * Always make sure that the given flags is the return value from the
+ * pairing memalloc_noio_save call.
+ */
static inline void memalloc_noio_restore(unsigned int flags)
{
current->flags = (current->flags & ~PF_MEMALLOC_NOIO) | flags;
}
+/**
+ * memalloc_nofs_save - Marks implicit GFP_NOFS allocation scope.
+ *
+ * This functions marks the beginning of the GFP_NOFS allocation scope.
+ * All further allocations will implicitly drop __GFP_FS flag and so
+ * they are safe for the FS critical section from the allocation recursion
+ * point of view. Use memalloc_nofs_restore to end the scope with flags
+ * returned by this function.
+ *
+ * This function is safe to be used from any context.
+ */
static inline unsigned int memalloc_nofs_save(void)
{
unsigned int flags = current->flags & PF_MEMALLOC_NOFS;
@@ -186,6 +355,14 @@ static inline unsigned int memalloc_nofs_save(void)
return flags;
}
+/**
+ * memalloc_nofs_restore - Ends the implicit GFP_NOFS scope.
+ * @flags: Flags to restore.
+ *
+ * Ends the implicit GFP_NOFS scope started by memalloc_nofs_save function.
+ * Always make sure that the given flags is the return value from the
+ * pairing memalloc_nofs_save call.
+ */
static inline void memalloc_nofs_restore(unsigned int flags)
{
current->flags = (current->flags & ~PF_MEMALLOC_NOFS) | flags;
@@ -203,4 +380,107 @@ static inline void memalloc_noreclaim_restore(unsigned int flags)
current->flags = (current->flags & ~PF_MEMALLOC) | flags;
}
+static inline unsigned int memalloc_pin_save(void)
+{
+ unsigned int flags = current->flags & PF_MEMALLOC_PIN;
+
+ current->flags |= PF_MEMALLOC_PIN;
+ return flags;
+}
+
+static inline void memalloc_pin_restore(unsigned int flags)
+{
+ current->flags = (current->flags & ~PF_MEMALLOC_PIN) | flags;
+}
+
+#ifdef CONFIG_MEMCG
+DECLARE_PER_CPU(struct mem_cgroup *, int_active_memcg);
+/**
+ * set_active_memcg - Starts the remote memcg charging scope.
+ * @memcg: memcg to charge.
+ *
+ * This function marks the beginning of the remote memcg charging scope. All the
+ * __GFP_ACCOUNT allocations till the end of the scope will be charged to the
+ * given memcg.
+ *
+ * NOTE: This function can nest. Users must save the return value and
+ * reset the previous value after their own charging scope is over.
+ */
+static inline struct mem_cgroup *
+set_active_memcg(struct mem_cgroup *memcg)
+{
+ struct mem_cgroup *old;
+
+ if (!in_task()) {
+ old = this_cpu_read(int_active_memcg);
+ this_cpu_write(int_active_memcg, memcg);
+ } else {
+ old = current->active_memcg;
+ current->active_memcg = memcg;
+ }
+
+ return old;
+}
+#else
+static inline struct mem_cgroup *
+set_active_memcg(struct mem_cgroup *memcg)
+{
+ return NULL;
+}
+#endif
+
+#ifdef CONFIG_MEMBARRIER
+enum {
+ MEMBARRIER_STATE_PRIVATE_EXPEDITED_READY = (1U << 0),
+ MEMBARRIER_STATE_PRIVATE_EXPEDITED = (1U << 1),
+ MEMBARRIER_STATE_GLOBAL_EXPEDITED_READY = (1U << 2),
+ MEMBARRIER_STATE_GLOBAL_EXPEDITED = (1U << 3),
+ MEMBARRIER_STATE_PRIVATE_EXPEDITED_SYNC_CORE_READY = (1U << 4),
+ MEMBARRIER_STATE_PRIVATE_EXPEDITED_SYNC_CORE = (1U << 5),
+ MEMBARRIER_STATE_PRIVATE_EXPEDITED_RSEQ_READY = (1U << 6),
+ MEMBARRIER_STATE_PRIVATE_EXPEDITED_RSEQ = (1U << 7),
+};
+
+enum {
+ MEMBARRIER_FLAG_SYNC_CORE = (1U << 0),
+ MEMBARRIER_FLAG_RSEQ = (1U << 1),
+};
+
+#ifdef CONFIG_ARCH_HAS_MEMBARRIER_CALLBACKS
+#include <asm/membarrier.h>
+#endif
+
+static inline void membarrier_mm_sync_core_before_usermode(struct mm_struct *mm)
+{
+ if (current->mm != mm)
+ return;
+ if (likely(!(atomic_read(&mm->membarrier_state) &
+ MEMBARRIER_STATE_PRIVATE_EXPEDITED_SYNC_CORE)))
+ return;
+ sync_core_before_usermode();
+}
+
+extern void membarrier_exec_mmap(struct mm_struct *mm);
+
+extern void membarrier_update_current_mm(struct mm_struct *next_mm);
+
+#else
+#ifdef CONFIG_ARCH_HAS_MEMBARRIER_CALLBACKS
+static inline void membarrier_arch_switch_mm(struct mm_struct *prev,
+ struct mm_struct *next,
+ struct task_struct *tsk)
+{
+}
+#endif
+static inline void membarrier_exec_mmap(struct mm_struct *mm)
+{
+}
+static inline void membarrier_mm_sync_core_before_usermode(struct mm_struct *mm)
+{
+}
+static inline void membarrier_update_current_mm(struct mm_struct *next_mm)
+{
+}
+#endif
+
#endif /* _LINUX_SCHED_MM_H */
diff --git a/include/linux/sched/nohz.h b/include/linux/sched/nohz.h
index 028d17b918a7..6d67e9a5af6b 100644
--- a/include/linux/sched/nohz.h
+++ b/include/linux/sched/nohz.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_SCHED_NOHZ_H
#define _LINUX_SCHED_NOHZ_H
@@ -6,27 +7,19 @@
*/
#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON)
-extern void cpu_load_update_nohz_start(void);
-extern void cpu_load_update_nohz_stop(void);
-#else
-static inline void cpu_load_update_nohz_start(void) { }
-static inline void cpu_load_update_nohz_stop(void) { }
-#endif
-
-#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON)
extern void nohz_balance_enter_idle(int cpu);
-extern void set_cpu_sd_state_idle(void);
extern int get_nohz_timer_target(void);
#else
static inline void nohz_balance_enter_idle(int cpu) { }
-static inline void set_cpu_sd_state_idle(void) { }
#endif
#ifdef CONFIG_NO_HZ_COMMON
void calc_load_nohz_start(void);
+void calc_load_nohz_remote(struct rq *rq);
void calc_load_nohz_stop(void);
#else
static inline void calc_load_nohz_start(void) { }
+static inline void calc_load_nohz_remote(struct rq *rq) { }
static inline void calc_load_nohz_stop(void) { }
#endif /* CONFIG_NO_HZ_COMMON */
@@ -36,8 +29,4 @@ extern void wake_up_nohz_cpu(int cpu);
static inline void wake_up_nohz_cpu(int cpu) { }
#endif
-#ifdef CONFIG_NO_HZ_FULL
-extern u64 scheduler_tick_max_deferment(void);
-#endif
-
#endif /* _LINUX_SCHED_NOHZ_H */
diff --git a/include/linux/sched/numa_balancing.h b/include/linux/sched/numa_balancing.h
index 35d5fc77b4be..3988762efe15 100644
--- a/include/linux/sched/numa_balancing.h
+++ b/include/linux/sched/numa_balancing.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_SCHED_NUMA_BALANCING_H
#define _LINUX_SCHED_NUMA_BALANCING_H
@@ -18,7 +19,7 @@
extern void task_numa_fault(int last_node, int node, int pages, int flags);
extern pid_t task_numa_group_id(struct task_struct *p);
extern void set_numabalancing_state(bool enabled);
-extern void task_numa_free(struct task_struct *p);
+extern void task_numa_free(struct task_struct *p, bool final);
extern bool should_numa_migrate_memory(struct task_struct *p, struct page *page,
int src_nid, int dst_cpu);
#else
@@ -33,7 +34,7 @@ static inline pid_t task_numa_group_id(struct task_struct *p)
static inline void set_numabalancing_state(bool enabled)
{
}
-static inline void task_numa_free(struct task_struct *p)
+static inline void task_numa_free(struct task_struct *p, bool final)
{
}
static inline bool should_numa_migrate_memory(struct task_struct *p,
diff --git a/include/linux/sched/posix-timers.h b/include/linux/sched/posix-timers.h
new file mode 100644
index 000000000000..523a381d6c88
--- /dev/null
+++ b/include/linux/sched/posix-timers.h
@@ -0,0 +1 @@
+#include <linux/posix-timers.h>
diff --git a/include/linux/sched/prio.h b/include/linux/sched/prio.h
index 2cc450f6ec54..ab83d85e1183 100644
--- a/include/linux/sched/prio.h
+++ b/include/linux/sched/prio.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_SCHED_PRIO_H
#define _LINUX_SCHED_PRIO_H
@@ -10,16 +11,9 @@
* priority is 0..MAX_RT_PRIO-1, and SCHED_NORMAL/SCHED_BATCH
* tasks are in the range MAX_RT_PRIO..MAX_PRIO-1. Priority
* values are inverted: lower p->prio value means higher priority.
- *
- * The MAX_USER_RT_PRIO value allows the actual maximum
- * RT priority to be separate from the value exported to
- * user-space. This allows kernel threads to set their
- * priority to a value higher than any user task. Note:
- * MAX_RT_PRIO must not be smaller than MAX_USER_RT_PRIO.
*/
-#define MAX_USER_RT_PRIO 100
-#define MAX_RT_PRIO MAX_USER_RT_PRIO
+#define MAX_RT_PRIO 100
#define MAX_PRIO (MAX_RT_PRIO + NICE_WIDTH)
#define DEFAULT_PRIO (MAX_RT_PRIO + NICE_WIDTH / 2)
@@ -33,15 +27,6 @@
#define PRIO_TO_NICE(prio) ((prio) - DEFAULT_PRIO)
/*
- * 'User priority' is the nice value converted to something we
- * can work with better when scaling various scheduler parameters,
- * it's a [ 0 ... 39 ] range.
- */
-#define USER_PRIO(p) ((p)-MAX_RT_PRIO)
-#define TASK_USER_PRIO(p) USER_PRIO((p)->static_prio)
-#define MAX_USER_PRIO (USER_PRIO(MAX_PRIO))
-
-/*
* Convert nice value [19,-20] to rlimit style value [1,40].
*/
static inline long nice_to_rlimit(long nice)
diff --git a/include/linux/sched/rseq_api.h b/include/linux/sched/rseq_api.h
new file mode 100644
index 000000000000..cf2af72693e1
--- /dev/null
+++ b/include/linux/sched/rseq_api.h
@@ -0,0 +1 @@
+#include <linux/rseq.h>
diff --git a/include/linux/sched/rt.h b/include/linux/sched/rt.h
index f93329aba31a..994c25640e15 100644
--- a/include/linux/sched/rt.h
+++ b/include/linux/sched/rt.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_SCHED_RT_H
#define _LINUX_SCHED_RT_H
@@ -17,6 +18,17 @@ static inline int rt_task(struct task_struct *p)
return rt_prio(p->prio);
}
+static inline bool task_is_realtime(struct task_struct *tsk)
+{
+ int policy = tsk->policy;
+
+ if (policy == SCHED_FIFO || policy == SCHED_RR)
+ return true;
+ if (policy == SCHED_DEADLINE)
+ return true;
+ return false;
+}
+
#ifdef CONFIG_RT_MUTEXES
/*
* Must hold either p->pi_lock or task_rq(p)->lock.
@@ -27,20 +39,12 @@ static inline struct task_struct *rt_mutex_get_top_task(struct task_struct *p)
}
extern void rt_mutex_setprio(struct task_struct *p, struct task_struct *pi_task);
extern void rt_mutex_adjust_pi(struct task_struct *p);
-static inline bool tsk_is_pi_blocked(struct task_struct *tsk)
-{
- return tsk->pi_blocked_on != NULL;
-}
#else
static inline struct task_struct *rt_mutex_get_top_task(struct task_struct *task)
{
return NULL;
}
# define rt_mutex_adjust_pi(p) do { } while (0)
-static inline bool tsk_is_pi_blocked(struct task_struct *tsk)
-{
- return false;
-}
#endif
extern void normalize_rt_tasks(void);
diff --git a/include/linux/sched/sd_flags.h b/include/linux/sched/sd_flags.h
new file mode 100644
index 000000000000..57bde66d95f7
--- /dev/null
+++ b/include/linux/sched/sd_flags.h
@@ -0,0 +1,166 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * sched-domains (multiprocessor balancing) flag declarations.
+ */
+
+#ifndef SD_FLAG
+# error "Incorrect import of SD flags definitions"
+#endif
+
+/*
+ * Hierarchical metaflags
+ *
+ * SHARED_CHILD: These flags are meant to be set from the base domain upwards.
+ * If a domain has this flag set, all of its children should have it set. This
+ * is usually because the flag describes some shared resource (all CPUs in that
+ * domain share the same resource), or because they are tied to a scheduling
+ * behaviour that we want to disable at some point in the hierarchy for
+ * scalability reasons.
+ *
+ * In those cases it doesn't make sense to have the flag set for a domain but
+ * not have it in (some of) its children: sched domains ALWAYS span their child
+ * domains, so operations done with parent domains will cover CPUs in the lower
+ * child domains.
+ *
+ *
+ * SHARED_PARENT: These flags are meant to be set from the highest domain
+ * downwards. If a domain has this flag set, all of its parents should have it
+ * set. This is usually for topology properties that start to appear above a
+ * certain level (e.g. domain starts spanning CPUs outside of the base CPU's
+ * socket).
+ */
+#define SDF_SHARED_CHILD 0x1
+#define SDF_SHARED_PARENT 0x2
+
+/*
+ * Behavioural metaflags
+ *
+ * NEEDS_GROUPS: These flags are only relevant if the domain they are set on has
+ * more than one group. This is usually for balancing flags (load balancing
+ * involves equalizing a metric between groups), or for flags describing some
+ * shared resource (which would be shared between groups).
+ */
+#define SDF_NEEDS_GROUPS 0x4
+
+/*
+ * Balance when about to become idle
+ *
+ * SHARED_CHILD: Set from the base domain up to cpuset.sched_relax_domain_level.
+ * NEEDS_GROUPS: Load balancing flag.
+ */
+SD_FLAG(SD_BALANCE_NEWIDLE, SDF_SHARED_CHILD | SDF_NEEDS_GROUPS)
+
+/*
+ * Balance on exec
+ *
+ * SHARED_CHILD: Set from the base domain up to the NUMA reclaim level.
+ * NEEDS_GROUPS: Load balancing flag.
+ */
+SD_FLAG(SD_BALANCE_EXEC, SDF_SHARED_CHILD | SDF_NEEDS_GROUPS)
+
+/*
+ * Balance on fork, clone
+ *
+ * SHARED_CHILD: Set from the base domain up to the NUMA reclaim level.
+ * NEEDS_GROUPS: Load balancing flag.
+ */
+SD_FLAG(SD_BALANCE_FORK, SDF_SHARED_CHILD | SDF_NEEDS_GROUPS)
+
+/*
+ * Balance on wakeup
+ *
+ * SHARED_CHILD: Set from the base domain up to cpuset.sched_relax_domain_level.
+ * NEEDS_GROUPS: Load balancing flag.
+ */
+SD_FLAG(SD_BALANCE_WAKE, SDF_SHARED_CHILD | SDF_NEEDS_GROUPS)
+
+/*
+ * Consider waking task on waking CPU.
+ *
+ * SHARED_CHILD: Set from the base domain up to the NUMA reclaim level.
+ */
+SD_FLAG(SD_WAKE_AFFINE, SDF_SHARED_CHILD)
+
+/*
+ * Domain members have different CPU capacities
+ *
+ * SHARED_PARENT: Set from the topmost domain down to the first domain where
+ * asymmetry is detected.
+ * NEEDS_GROUPS: Per-CPU capacity is asymmetric between groups.
+ */
+SD_FLAG(SD_ASYM_CPUCAPACITY, SDF_SHARED_PARENT | SDF_NEEDS_GROUPS)
+
+/*
+ * Domain members have different CPU capacities spanning all unique CPU
+ * capacity values.
+ *
+ * SHARED_PARENT: Set from the topmost domain down to the first domain where
+ * all available CPU capacities are visible
+ * NEEDS_GROUPS: Per-CPU capacity is asymmetric between groups.
+ */
+SD_FLAG(SD_ASYM_CPUCAPACITY_FULL, SDF_SHARED_PARENT | SDF_NEEDS_GROUPS)
+
+/*
+ * Domain members share CPU capacity (i.e. SMT)
+ *
+ * SHARED_CHILD: Set from the base domain up until spanned CPUs no longer share
+ * CPU capacity.
+ * NEEDS_GROUPS: Capacity is shared between groups.
+ */
+SD_FLAG(SD_SHARE_CPUCAPACITY, SDF_SHARED_CHILD | SDF_NEEDS_GROUPS)
+
+/*
+ * Domain members share CPU package resources (i.e. caches)
+ *
+ * SHARED_CHILD: Set from the base domain up until spanned CPUs no longer share
+ * the same cache(s).
+ * NEEDS_GROUPS: Caches are shared between groups.
+ */
+SD_FLAG(SD_SHARE_PKG_RESOURCES, SDF_SHARED_CHILD | SDF_NEEDS_GROUPS)
+
+/*
+ * Only a single load balancing instance
+ *
+ * SHARED_PARENT: Set for all NUMA levels above NODE. Could be set from a
+ * different level upwards, but it doesn't change that if a
+ * domain has this flag set, then all of its parents need to have
+ * it too (otherwise the serialization doesn't make sense).
+ * NEEDS_GROUPS: No point in preserving domain if it has a single group.
+ */
+SD_FLAG(SD_SERIALIZE, SDF_SHARED_PARENT | SDF_NEEDS_GROUPS)
+
+/*
+ * Place busy tasks earlier in the domain
+ *
+ * SHARED_CHILD: Usually set on the SMT level. Technically could be set further
+ * up, but currently assumed to be set from the base domain
+ * upwards (see update_top_cache_domain()).
+ * NEEDS_GROUPS: Load balancing flag.
+ */
+SD_FLAG(SD_ASYM_PACKING, SDF_SHARED_CHILD | SDF_NEEDS_GROUPS)
+
+/*
+ * Prefer to place tasks in a sibling domain
+ *
+ * Set up until domains start spanning NUMA nodes. Close to being a SHARED_CHILD
+ * flag, but cleared below domains with SD_ASYM_CPUCAPACITY.
+ *
+ * NEEDS_GROUPS: Load balancing flag.
+ */
+SD_FLAG(SD_PREFER_SIBLING, SDF_NEEDS_GROUPS)
+
+/*
+ * sched_groups of this level overlap
+ *
+ * SHARED_PARENT: Set for all NUMA levels above NODE.
+ * NEEDS_GROUPS: Overlaps can only exist with more than one group.
+ */
+SD_FLAG(SD_OVERLAP, SDF_SHARED_PARENT | SDF_NEEDS_GROUPS)
+
+/*
+ * Cross-node balancing
+ *
+ * SHARED_PARENT: Set for all NUMA levels above NODE.
+ * NEEDS_GROUPS: No point in preserving domain if it has a single group.
+ */
+SD_FLAG(SD_NUMA, SDF_SHARED_PARENT | SDF_NEEDS_GROUPS)
diff --git a/include/linux/sched/signal.h b/include/linux/sched/signal.h
index c06d63b3a583..20099268fa25 100644
--- a/include/linux/sched/signal.h
+++ b/include/linux/sched/signal.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_SCHED_SIGNAL_H
#define _LINUX_SCHED_SIGNAL_H
@@ -7,16 +8,20 @@
#include <linux/sched/jobctl.h>
#include <linux/sched/task.h>
#include <linux/cred.h>
+#include <linux/refcount.h>
+#include <linux/posix-timers.h>
+#include <linux/mm_types.h>
+#include <asm/ptrace.h>
/*
* Types defining task->signal and task->sighand and APIs using them:
*/
struct sighand_struct {
- atomic_t count;
- struct k_sigaction action[_NSIG];
spinlock_t siglock;
+ refcount_t count;
wait_queue_head_t signalfd_wqh;
+ struct k_sigaction action[_NSIG];
};
/*
@@ -54,18 +59,28 @@ struct task_cputime_atomic {
/**
* struct thread_group_cputimer - thread group interval timer counts
* @cputime_atomic: atomic thread group interval timers.
- * @running: true when there are timers running and
- * @cputime_atomic receives updates.
- * @checking_timer: true when a thread in the group is in the
- * process of checking for thread group timers.
*
* This structure contains the version of task_cputime, above, that is
* used for thread group CPU timer calculations.
*/
struct thread_group_cputimer {
struct task_cputime_atomic cputime_atomic;
- bool running;
- bool checking_timer;
+};
+
+struct multiprocess_signals {
+ sigset_t signal;
+ struct hlist_node node;
+};
+
+struct core_thread {
+ struct task_struct *task;
+ struct core_thread *next;
+};
+
+struct core_state {
+ atomic_t nr_threads;
+ struct core_thread dumper;
+ struct completion startup;
};
/*
@@ -76,9 +91,10 @@ struct thread_group_cputimer {
* the locking of signal_struct.
*/
struct signal_struct {
- atomic_t sigcnt;
+ refcount_t sigcnt;
atomic_t live;
int nr_threads;
+ int quick_threads;
struct list_head thread_head;
wait_queue_head_t wait_chldexit; /* for wait4() */
@@ -89,20 +105,21 @@ struct signal_struct {
/* shared signal handling: */
struct sigpending shared_pending;
+ /* For collecting multiprocess signals during fork */
+ struct hlist_head multiprocess;
+
/* thread group exit support */
int group_exit_code;
- /* overloaded:
- * - notify group_exit_task when ->count is equal to notify_count
- * - everyone except group_exit_task is stopped during signal delivery
- * of fatal signals, group_exit_task processes the signal.
- */
+ /* notify group_exec_task when notify_count is less or equal to 0 */
int notify_count;
- struct task_struct *group_exit_task;
+ struct task_struct *group_exec_task;
/* thread group stop support, overloads group_exit_code too */
int group_stop_count;
unsigned int flags; /* see SIGNAL_* flags below */
+ struct core_state *core_state; /* coredumping support */
+
/*
* PR_SET_CHILD_SUBREAPER marks a process, like a service
* manager, to re-parent orphan (double-forking) child processes
@@ -138,14 +155,12 @@ struct signal_struct {
*/
struct thread_group_cputimer cputimer;
- /* Earliest-expiration cache. */
- struct task_cputime cputime_expires;
-
- struct list_head cpu_timers[3];
-
#endif
+ /* Empty if CONFIG_POSIX_TIMERS=n */
+ struct posix_cputimers posix_cputimers;
- struct pid *leader_pid;
+ /* PID/PID hash table linkage. */
+ struct pid *pids[PIDTYPE_MAX];
#ifdef CONFIG_NO_HZ_FULL
atomic_t tick_dep_mask;
@@ -221,8 +236,16 @@ struct signal_struct {
struct mutex cred_guard_mutex; /* guard against foreign influences on
* credential calculations
- * (notably. ptrace) */
-};
+ * (notably. ptrace)
+ * Deprecated do not use in new code.
+ * Use exec_update_lock instead.
+ */
+ struct rw_semaphore exec_update_lock; /* Held while task_struct is
+ * being updated during exec,
+ * and may have inconsistent
+ * permissions.
+ */
+} __randomize_layout;
/*
* Bits in flags field of signal_struct.
@@ -230,7 +253,6 @@ struct signal_struct {
#define SIGNAL_STOP_STOPPED 0x00000001 /* job control stop in effect */
#define SIGNAL_STOP_CONTINUED 0x00000002 /* SIGCONT since WCONTINUED reap */
#define SIGNAL_GROUP_EXIT 0x00000004 /* group exit in progress */
-#define SIGNAL_GROUP_COREDUMP 0x00000008 /* coredump in progress */
/*
* Pending notifications to parent.
*/
@@ -246,31 +268,26 @@ struct signal_struct {
static inline void signal_set_stop_flags(struct signal_struct *sig,
unsigned int flags)
{
- WARN_ON(sig->flags & (SIGNAL_GROUP_EXIT|SIGNAL_GROUP_COREDUMP));
+ WARN_ON(sig->flags & SIGNAL_GROUP_EXIT);
sig->flags = (sig->flags & ~SIGNAL_STOP_MASK) | flags;
}
-/* If true, all threads except ->group_exit_task have pending SIGKILL */
-static inline int signal_group_exit(const struct signal_struct *sig)
-{
- return (sig->flags & SIGNAL_GROUP_EXIT) ||
- (sig->group_exit_task != NULL);
-}
-
extern void flush_signals(struct task_struct *);
extern void ignore_signals(struct task_struct *);
extern void flush_signal_handlers(struct task_struct *, int force_default);
-extern int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info);
+extern int dequeue_signal(struct task_struct *task, sigset_t *mask,
+ kernel_siginfo_t *info, enum pid_type *type);
-static inline int kernel_dequeue_signal(siginfo_t *info)
+static inline int kernel_dequeue_signal(void)
{
- struct task_struct *tsk = current;
- siginfo_t __info;
+ struct task_struct *task = current;
+ kernel_siginfo_t __info;
+ enum pid_type __type;
int ret;
- spin_lock_irq(&tsk->sighand->siglock);
- ret = dequeue_signal(tsk, &tsk->blocked, info ?: &__info);
- spin_unlock_irq(&tsk->sighand->siglock);
+ spin_lock_irq(&task->sighand->siglock);
+ ret = dequeue_signal(task, &task->blocked, &__info, &__type);
+ spin_unlock_irq(&task->sighand->siglock);
return ret;
}
@@ -278,42 +295,112 @@ static inline int kernel_dequeue_signal(siginfo_t *info)
static inline void kernel_signal_stop(void)
{
spin_lock_irq(&current->sighand->siglock);
- if (current->jobctl & JOBCTL_STOP_DEQUEUED)
- __set_current_state(TASK_STOPPED);
+ if (current->jobctl & JOBCTL_STOP_DEQUEUED) {
+ current->jobctl |= JOBCTL_STOPPED;
+ set_special_state(TASK_STOPPED);
+ }
spin_unlock_irq(&current->sighand->siglock);
schedule();
}
-extern int send_sig_info(int, struct siginfo *, struct task_struct *);
-extern int force_sigsegv(int, struct task_struct *);
-extern int force_sig_info(int, struct siginfo *, struct task_struct *);
-extern int __kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp);
-extern int kill_pid_info(int sig, struct siginfo *info, struct pid *pid);
-extern int kill_pid_info_as_cred(int, struct siginfo *, struct pid *,
- const struct cred *, u32);
+#ifdef __ia64__
+# define ___ARCH_SI_IA64(_a1, _a2, _a3) , _a1, _a2, _a3
+#else
+# define ___ARCH_SI_IA64(_a1, _a2, _a3)
+#endif
+
+int force_sig_fault_to_task(int sig, int code, void __user *addr
+ ___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr)
+ , struct task_struct *t);
+int force_sig_fault(int sig, int code, void __user *addr
+ ___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr));
+int send_sig_fault(int sig, int code, void __user *addr
+ ___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr)
+ , struct task_struct *t);
+
+int force_sig_mceerr(int code, void __user *, short);
+int send_sig_mceerr(int code, void __user *, short, struct task_struct *);
+
+int force_sig_bnderr(void __user *addr, void __user *lower, void __user *upper);
+int force_sig_pkuerr(void __user *addr, u32 pkey);
+int send_sig_perf(void __user *addr, u32 type, u64 sig_data);
+
+int force_sig_ptrace_errno_trap(int errno, void __user *addr);
+int force_sig_fault_trapno(int sig, int code, void __user *addr, int trapno);
+int send_sig_fault_trapno(int sig, int code, void __user *addr, int trapno,
+ struct task_struct *t);
+int force_sig_seccomp(int syscall, int reason, bool force_coredump);
+
+extern int send_sig_info(int, struct kernel_siginfo *, struct task_struct *);
+extern void force_sigsegv(int sig);
+extern int force_sig_info(struct kernel_siginfo *);
+extern int __kill_pgrp_info(int sig, struct kernel_siginfo *info, struct pid *pgrp);
+extern int kill_pid_info(int sig, struct kernel_siginfo *info, struct pid *pid);
+extern int kill_pid_usb_asyncio(int sig, int errno, sigval_t addr, struct pid *,
+ const struct cred *);
extern int kill_pgrp(struct pid *pid, int sig, int priv);
extern int kill_pid(struct pid *pid, int sig, int priv);
extern __must_check bool do_notify_parent(struct task_struct *, int);
extern void __wake_up_parent(struct task_struct *p, struct task_struct *parent);
-extern void force_sig(int, struct task_struct *);
+extern void force_sig(int);
+extern void force_fatal_sig(int);
+extern void force_exit_sig(int);
extern int send_sig(int, struct task_struct *, int);
extern int zap_other_threads(struct task_struct *p);
extern struct sigqueue *sigqueue_alloc(void);
extern void sigqueue_free(struct sigqueue *);
-extern int send_sigqueue(struct sigqueue *, struct task_struct *, int group);
+extern int send_sigqueue(struct sigqueue *, struct pid *, enum pid_type);
extern int do_sigaction(int, struct k_sigaction *, struct k_sigaction *);
+static inline void clear_notify_signal(void)
+{
+ clear_thread_flag(TIF_NOTIFY_SIGNAL);
+ smp_mb__after_atomic();
+}
+
+/*
+ * Returns 'true' if kick_process() is needed to force a transition from
+ * user -> kernel to guarantee expedient run of TWA_SIGNAL based task_work.
+ */
+static inline bool __set_notify_signal(struct task_struct *task)
+{
+ return !test_and_set_tsk_thread_flag(task, TIF_NOTIFY_SIGNAL) &&
+ !wake_up_state(task, TASK_INTERRUPTIBLE);
+}
+
+/*
+ * Called to break out of interruptible wait loops, and enter the
+ * exit_to_user_mode_loop().
+ */
+static inline void set_notify_signal(struct task_struct *task)
+{
+ if (__set_notify_signal(task))
+ kick_process(task);
+}
+
static inline int restart_syscall(void)
{
set_tsk_thread_flag(current, TIF_SIGPENDING);
return -ERESTARTNOINTR;
}
-static inline int signal_pending(struct task_struct *p)
+static inline int task_sigpending(struct task_struct *p)
{
return unlikely(test_tsk_thread_flag(p,TIF_SIGPENDING));
}
+static inline int signal_pending(struct task_struct *p)
+{
+ /*
+ * TIF_NOTIFY_SIGNAL isn't really a signal, but it requires the same
+ * behavior in terms of ensuring that we break out of wait loops
+ * so that notify signal callbacks can be processed.
+ */
+ if (unlikely(test_tsk_thread_flag(p, TIF_NOTIFY_SIGNAL)))
+ return 1;
+ return task_sigpending(p);
+}
+
static inline int __fatal_signal_pending(struct task_struct *p)
{
return unlikely(sigismember(&p->pending.signal, SIGKILL));
@@ -321,10 +408,10 @@ static inline int __fatal_signal_pending(struct task_struct *p)
static inline int fatal_signal_pending(struct task_struct *p)
{
- return signal_pending(p) && __fatal_signal_pending(p);
+ return task_sigpending(p) && __fatal_signal_pending(p);
}
-static inline int signal_pending_state(long state, struct task_struct *p)
+static inline int signal_pending_state(unsigned int state, struct task_struct *p)
{
if (!(state & (TASK_INTERRUPTIBLE | TASK_WAKEKILL)))
return 0;
@@ -335,6 +422,20 @@ static inline int signal_pending_state(long state, struct task_struct *p)
}
/*
+ * This should only be used in fault handlers to decide whether we
+ * should stop the current fault routine to handle the signals
+ * instead, especially with the case where we've got interrupted with
+ * a VM_FAULT_RETRY.
+ */
+static inline bool fault_signal_pending(vm_fault_t fault_flags,
+ struct pt_regs *regs)
+{
+ return unlikely((fault_flags & VM_FAULT_RETRY) &&
+ (fatal_signal_pending(current) ||
+ (user_mode(regs) && signal_pending(current))));
+}
+
+/*
* Reevaluate whether the task has signals pending delivery.
* Wake the task if so.
* This is required every time the blocked sigset_t changes.
@@ -342,18 +443,31 @@ static inline int signal_pending_state(long state, struct task_struct *p)
*/
extern void recalc_sigpending_and_wake(struct task_struct *t);
extern void recalc_sigpending(void);
+extern void calculate_sigpending(void);
extern void signal_wake_up_state(struct task_struct *t, unsigned int state);
-static inline void signal_wake_up(struct task_struct *t, bool resume)
+static inline void signal_wake_up(struct task_struct *t, bool fatal)
{
- signal_wake_up_state(t, resume ? TASK_WAKEKILL : 0);
+ unsigned int state = 0;
+ if (fatal && !(t->jobctl & JOBCTL_PTRACE_FROZEN)) {
+ t->jobctl &= ~(JOBCTL_STOPPED | JOBCTL_TRACED);
+ state = TASK_WAKEKILL | __TASK_TRACED;
+ }
+ signal_wake_up_state(t, state);
}
static inline void ptrace_signal_wake_up(struct task_struct *t, bool resume)
{
- signal_wake_up_state(t, resume ? __TASK_TRACED : 0);
+ unsigned int state = 0;
+ if (resume) {
+ t->jobctl &= ~JOBCTL_TRACED;
+ state = __TASK_TRACED;
+ }
+ signal_wake_up_state(t, state);
}
+void task_join_group_stop(struct task_struct *task);
+
#ifdef TIF_RESTORE_SIGMASK
/*
* Legacy restore_sigmask accessors. These are inefficient on
@@ -374,12 +488,21 @@ static inline void ptrace_signal_wake_up(struct task_struct *t, bool resume)
static inline void set_restore_sigmask(void)
{
set_thread_flag(TIF_RESTORE_SIGMASK);
- WARN_ON(!test_thread_flag(TIF_SIGPENDING));
}
+
+static inline void clear_tsk_restore_sigmask(struct task_struct *task)
+{
+ clear_tsk_thread_flag(task, TIF_RESTORE_SIGMASK);
+}
+
static inline void clear_restore_sigmask(void)
{
clear_thread_flag(TIF_RESTORE_SIGMASK);
}
+static inline bool test_tsk_restore_sigmask(struct task_struct *task)
+{
+ return test_tsk_thread_flag(task, TIF_RESTORE_SIGMASK);
+}
static inline bool test_restore_sigmask(void)
{
return test_thread_flag(TIF_RESTORE_SIGMASK);
@@ -395,7 +518,10 @@ static inline bool test_and_clear_restore_sigmask(void)
static inline void set_restore_sigmask(void)
{
current->restore_sigmask = true;
- WARN_ON(!test_thread_flag(TIF_SIGPENDING));
+}
+static inline void clear_tsk_restore_sigmask(struct task_struct *task)
+{
+ task->restore_sigmask = false;
}
static inline void clear_restore_sigmask(void)
{
@@ -405,6 +531,10 @@ static inline bool test_restore_sigmask(void)
{
return current->restore_sigmask;
}
+static inline bool test_tsk_restore_sigmask(struct task_struct *task)
+{
+ return task->restore_sigmask;
+}
static inline bool test_and_clear_restore_sigmask(void)
{
if (!current->restore_sigmask)
@@ -420,6 +550,16 @@ static inline void restore_saved_sigmask(void)
__set_current_blocked(&current->saved_sigmask);
}
+extern int set_user_sigmask(const sigset_t __user *umask, size_t sigsetsize);
+
+static inline void restore_saved_sigmask_unless(bool interrupted)
+{
+ if (interrupted)
+ WARN_ON(!signal_pending(current));
+ else
+ restore_saved_sigmask();
+}
+
static inline sigset_t *sigmask_to_save(void)
{
sigset_t *res = &current->blocked;
@@ -434,9 +574,19 @@ static inline int kill_cad_pid(int sig, int priv)
}
/* These can be the second arg to send_sig_info/send_group_sig_info. */
-#define SEND_SIG_NOINFO ((struct siginfo *) 0)
-#define SEND_SIG_PRIV ((struct siginfo *) 1)
-#define SEND_SIG_FORCED ((struct siginfo *) 2)
+#define SEND_SIG_NOINFO ((struct kernel_siginfo *) 0)
+#define SEND_SIG_PRIV ((struct kernel_siginfo *) 1)
+
+static inline int __on_sig_stack(unsigned long sp)
+{
+#ifdef CONFIG_STACK_GROWSUP
+ return sp >= current->sas_ss_sp &&
+ sp - current->sas_ss_sp < current->sas_ss_size;
+#else
+ return sp > current->sas_ss_sp &&
+ sp - current->sas_ss_sp <= current->sas_ss_size;
+#endif
+}
/*
* True if we are on the alternate signal stack.
@@ -455,13 +605,7 @@ static inline int on_sig_stack(unsigned long sp)
if (current->sas_ss_flags & SS_AUTODISARM)
return 0;
-#ifdef CONFIG_STACK_GROWSUP
- return sp >= current->sas_ss_sp &&
- sp - current->sas_ss_sp < current->sas_ss_size;
-#else
- return sp > current->sas_ss_sp &&
- sp - current->sas_ss_sp <= current->sas_ss_size;
-#endif
+ return __on_sig_stack(sp);
}
static inline int sas_ss_flags(unsigned long sp)
@@ -527,25 +671,45 @@ extern bool current_is_single_threaded(void);
typedef int (*proc_visitor)(struct task_struct *p, void *data);
void walk_process_tree(struct task_struct *top, proc_visitor, void *);
-static inline int get_nr_threads(struct task_struct *tsk)
+static inline
+struct pid *task_pid_type(struct task_struct *task, enum pid_type type)
{
- return tsk->signal->nr_threads;
+ struct pid *pid;
+ if (type == PIDTYPE_PID)
+ pid = task_pid(task);
+ else
+ pid = task->signal->pids[type];
+ return pid;
}
-static inline bool thread_group_leader(struct task_struct *p)
+static inline struct pid *task_tgid(struct task_struct *task)
{
- return p->exit_signal >= 0;
+ return task->signal->pids[PIDTYPE_TGID];
}
-/* Do to the insanities of de_thread it is possible for a process
- * to have the pid of the thread group leader without actually being
- * the thread group leader. For iteration through the pids in proc
- * all we care about is that we have a task with the appropriate
- * pid, we don't actually care if we have the right task.
+/*
+ * Without tasklist or RCU lock it is not safe to dereference
+ * the result of task_pgrp/task_session even if task == current,
+ * we can race with another thread doing sys_setsid/sys_setpgid.
*/
-static inline bool has_group_leader_pid(struct task_struct *p)
+static inline struct pid *task_pgrp(struct task_struct *task)
+{
+ return task->signal->pids[PIDTYPE_PGID];
+}
+
+static inline struct pid *task_session(struct task_struct *task)
{
- return task_pid(p) == p->signal->leader_pid;
+ return task->signal->pids[PIDTYPE_SID];
+}
+
+static inline int get_nr_threads(struct task_struct *task)
+{
+ return task->signal->nr_threads;
+}
+
+static inline bool thread_group_leader(struct task_struct *p)
+{
+ return p->exit_signal >= 0;
}
static inline
@@ -568,35 +732,43 @@ static inline int thread_group_empty(struct task_struct *p)
#define delay_group_leader(p) \
(thread_group_leader(p) && !thread_group_empty(p))
-extern struct sighand_struct *__lock_task_sighand(struct task_struct *tsk,
+extern bool thread_group_exited(struct pid *pid);
+
+extern struct sighand_struct *__lock_task_sighand(struct task_struct *task,
unsigned long *flags);
-static inline struct sighand_struct *lock_task_sighand(struct task_struct *tsk,
+static inline struct sighand_struct *lock_task_sighand(struct task_struct *task,
unsigned long *flags)
{
struct sighand_struct *ret;
- ret = __lock_task_sighand(tsk, flags);
- (void)__cond_lock(&tsk->sighand->siglock, ret);
+ ret = __lock_task_sighand(task, flags);
+ (void)__cond_lock(&task->sighand->siglock, ret);
return ret;
}
-static inline void unlock_task_sighand(struct task_struct *tsk,
+static inline void unlock_task_sighand(struct task_struct *task,
unsigned long *flags)
{
- spin_unlock_irqrestore(&tsk->sighand->siglock, *flags);
+ spin_unlock_irqrestore(&task->sighand->siglock, *flags);
}
-static inline unsigned long task_rlimit(const struct task_struct *tsk,
+#ifdef CONFIG_LOCKDEP
+extern void lockdep_assert_task_sighand_held(struct task_struct *task);
+#else
+static inline void lockdep_assert_task_sighand_held(struct task_struct *task) { }
+#endif
+
+static inline unsigned long task_rlimit(const struct task_struct *task,
unsigned int limit)
{
- return READ_ONCE(tsk->signal->rlim[limit].rlim_cur);
+ return READ_ONCE(task->signal->rlim[limit].rlim_cur);
}
-static inline unsigned long task_rlimit_max(const struct task_struct *tsk,
+static inline unsigned long task_rlimit_max(const struct task_struct *task,
unsigned int limit)
{
- return READ_ONCE(tsk->signal->rlim[limit].rlim_max);
+ return READ_ONCE(task->signal->rlim[limit].rlim_max);
}
static inline unsigned long rlimit(unsigned int limit)
diff --git a/include/linux/sched/smt.h b/include/linux/sched/smt.h
new file mode 100644
index 000000000000..59d3736c454c
--- /dev/null
+++ b/include/linux/sched/smt.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_SCHED_SMT_H
+#define _LINUX_SCHED_SMT_H
+
+#include <linux/static_key.h>
+
+#ifdef CONFIG_SCHED_SMT
+extern struct static_key_false sched_smt_present;
+
+static __always_inline bool sched_smt_active(void)
+{
+ return static_branch_likely(&sched_smt_present);
+}
+#else
+static inline bool sched_smt_active(void) { return false; }
+#endif
+
+void arch_smt_update(void);
+
+#endif
diff --git a/include/linux/sched/stat.h b/include/linux/sched/stat.h
index 141b74c53fad..0108a38bb64d 100644
--- a/include/linux/sched/stat.h
+++ b/include/linux/sched/stat.h
@@ -1,13 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_SCHED_STAT_H
#define _LINUX_SCHED_STAT_H
#include <linux/percpu.h>
+#include <linux/kconfig.h>
/*
* Various counters maintained by the scheduler and fork(),
* exposed via /proc, sys.c or used by drivers via these APIs.
*
- * ( Note that all these values are aquired without locking,
+ * ( Note that all these values are acquired without locking,
* so they can only be relied on in narrow circumstances. )
*/
@@ -15,22 +17,14 @@ extern unsigned long total_forks;
extern int nr_threads;
DECLARE_PER_CPU(unsigned long, process_counts);
extern int nr_processes(void);
-extern unsigned long nr_running(void);
+extern unsigned int nr_running(void);
extern bool single_task_running(void);
-extern unsigned long nr_iowait(void);
-extern unsigned long nr_iowait_cpu(int cpu);
-extern void get_iowait_load(unsigned long *nr_waiters, unsigned long *load);
+extern unsigned int nr_iowait(void);
+extern unsigned int nr_iowait_cpu(int cpu);
static inline int sched_info_on(void)
{
-#ifdef CONFIG_SCHEDSTATS
- return 1;
-#elif defined(CONFIG_TASK_DELAY_ACCT)
- extern int delayacct_on;
- return delayacct_on;
-#else
- return 0;
-#endif
+ return IS_ENABLED(CONFIG_SCHED_INFO);
}
#ifdef CONFIG_SCHEDSTATS
diff --git a/include/linux/sched/sysctl.h b/include/linux/sched/sysctl.h
index 0f5ecd4d298e..5a64582b086b 100644
--- a/include/linux/sched/sysctl.h
+++ b/include/linux/sched/sysctl.h
@@ -1,85 +1,32 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_SCHED_SYSCTL_H
#define _LINUX_SCHED_SYSCTL_H
#include <linux/types.h>
-struct ctl_table;
-
#ifdef CONFIG_DETECT_HUNG_TASK
-extern int sysctl_hung_task_check_count;
-extern unsigned int sysctl_hung_task_panic;
+/* used for hung_task and block/ */
extern unsigned long sysctl_hung_task_timeout_secs;
-extern int sysctl_hung_task_warnings;
-extern int proc_dohung_task_timeout_secs(struct ctl_table *table, int write,
- void __user *buffer,
- size_t *lenp, loff_t *ppos);
#else
/* Avoid need for ifdefs elsewhere in the code */
enum { sysctl_hung_task_timeout_secs = 0 };
#endif
-extern unsigned int sysctl_sched_latency;
-extern unsigned int sysctl_sched_min_granularity;
-extern unsigned int sysctl_sched_wakeup_granularity;
-extern unsigned int sysctl_sched_child_runs_first;
-
enum sched_tunable_scaling {
SCHED_TUNABLESCALING_NONE,
SCHED_TUNABLESCALING_LOG,
SCHED_TUNABLESCALING_LINEAR,
SCHED_TUNABLESCALING_END,
};
-extern enum sched_tunable_scaling sysctl_sched_tunable_scaling;
-
-extern unsigned int sysctl_numa_balancing_scan_delay;
-extern unsigned int sysctl_numa_balancing_scan_period_min;
-extern unsigned int sysctl_numa_balancing_scan_period_max;
-extern unsigned int sysctl_numa_balancing_scan_size;
-
-#ifdef CONFIG_SCHED_DEBUG
-extern unsigned int sysctl_sched_migration_cost;
-extern unsigned int sysctl_sched_nr_migrate;
-extern unsigned int sysctl_sched_time_avg;
-
-int sched_proc_update_handler(struct ctl_table *table, int write,
- void __user *buffer, size_t *length,
- loff_t *ppos);
-#endif
-
-/*
- * control realtime throttling:
- *
- * /proc/sys/kernel/sched_rt_period_us
- * /proc/sys/kernel/sched_rt_runtime_us
- */
-extern unsigned int sysctl_sched_rt_period;
-extern int sysctl_sched_rt_runtime;
-#ifdef CONFIG_CFS_BANDWIDTH
-extern unsigned int sysctl_sched_cfs_bandwidth_slice;
-#endif
+#define NUMA_BALANCING_DISABLED 0x0
+#define NUMA_BALANCING_NORMAL 0x1
+#define NUMA_BALANCING_MEMORY_TIERING 0x2
-#ifdef CONFIG_SCHED_AUTOGROUP
-extern unsigned int sysctl_sched_autogroup_enabled;
+#ifdef CONFIG_NUMA_BALANCING
+extern int sysctl_numa_balancing_mode;
+#else
+#define sysctl_numa_balancing_mode 0
#endif
-extern int sysctl_sched_rr_timeslice;
-extern int sched_rr_timeslice;
-
-extern int sched_rr_handler(struct ctl_table *table, int write,
- void __user *buffer, size_t *lenp,
- loff_t *ppos);
-
-extern int sched_rt_handler(struct ctl_table *table, int write,
- void __user *buffer, size_t *lenp,
- loff_t *ppos);
-
-extern int sysctl_numa_balancing(struct ctl_table *table, int write,
- void __user *buffer, size_t *lenp,
- loff_t *ppos);
-
-extern int sysctl_schedstats(struct ctl_table *table, int write,
- void __user *buffer, size_t *lenp,
- loff_t *ppos);
-
#endif /* _LINUX_SCHED_SYSCTL_H */
diff --git a/include/linux/sched/task.h b/include/linux/sched/task.h
index c97e5f096927..537cbf9a2ade 100644
--- a/include/linux/sched/task.h
+++ b/include/linux/sched/task.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_SCHED_TASK_H
#define _LINUX_SCHED_TASK_H
@@ -7,10 +8,41 @@
*/
#include <linux/sched.h>
+#include <linux/uaccess.h>
struct task_struct;
struct rusage;
union thread_union;
+struct css_set;
+
+/* All the bits taken by the old clone syscall. */
+#define CLONE_LEGACY_FLAGS 0xffffffffULL
+
+struct kernel_clone_args {
+ u64 flags;
+ int __user *pidfd;
+ int __user *child_tid;
+ int __user *parent_tid;
+ const char *name;
+ int exit_signal;
+ u32 kthread:1;
+ u32 io_thread:1;
+ u32 user_worker:1;
+ u32 no_files:1;
+ u32 ignore_signals:1;
+ unsigned long stack;
+ unsigned long stack_size;
+ unsigned long tls;
+ pid_t *set_tid;
+ /* Number of elements in *set_tid */
+ size_t set_tid_size;
+ int cgroup;
+ int idle;
+ int (*fn)(void *);
+ void *fn_arg;
+ struct cgroup *cgrp;
+ struct css_set *cset;
+};
/*
* This serializes "schedule()" and also protects
@@ -24,39 +56,28 @@ extern spinlock_t mmlist_lock;
extern union thread_union init_thread_union;
extern struct task_struct init_task;
-#ifdef CONFIG_PROVE_RCU
extern int lockdep_tasklist_lock_is_held(void);
-#endif /* #ifdef CONFIG_PROVE_RCU */
extern asmlinkage void schedule_tail(struct task_struct *prev);
extern void init_idle(struct task_struct *idle, int cpu);
-extern void init_idle_bootup_task(struct task_struct *idle);
extern int sched_fork(unsigned long clone_flags, struct task_struct *p);
+extern void sched_cgroup_fork(struct task_struct *p, struct kernel_clone_args *kargs);
+extern void sched_post_fork(struct task_struct *p);
extern void sched_dead(struct task_struct *p);
void __noreturn do_task_dead(void);
+void __noreturn make_task_dead(int signr);
+extern void mm_cache_init(void);
extern void proc_caches_init(void);
+extern void fork_init(void);
+
extern void release_task(struct task_struct * p);
-#ifdef CONFIG_HAVE_COPY_THREAD_TLS
-extern int copy_thread_tls(unsigned long, unsigned long, unsigned long,
- struct task_struct *, unsigned long);
-#else
-extern int copy_thread(unsigned long, unsigned long, unsigned long,
- struct task_struct *);
-
-/* Architectures that haven't opted into copy_thread_tls get the tls argument
- * via pt_regs, so ignore the tls argument passed via C. */
-static inline int copy_thread_tls(
- unsigned long clone_flags, unsigned long sp, unsigned long arg,
- struct task_struct *p, unsigned long tls)
-{
- return copy_thread(clone_flags, sp, arg, p);
-}
-#endif
+extern int copy_thread(struct task_struct *, const struct kernel_clone_args *);
+
extern void flush_thread(void);
#ifdef CONFIG_HAVE_EXIT_THREAD
@@ -66,16 +87,21 @@ static inline void exit_thread(struct task_struct *tsk)
{
}
#endif
-extern void do_group_exit(int);
+extern __noreturn void do_group_exit(int);
extern void exit_files(struct task_struct *);
-extern void exit_itimers(struct signal_struct *);
+extern void exit_itimers(struct task_struct *);
-extern long _do_fork(unsigned long, unsigned long, unsigned long, int __user *, int __user *, unsigned long);
-extern long do_fork(unsigned long, unsigned long, unsigned long, int __user *, int __user *);
+extern pid_t kernel_clone(struct kernel_clone_args *kargs);
+struct task_struct *copy_process(struct pid *pid, int trace, int node,
+ struct kernel_clone_args *args);
+struct task_struct *create_io_thread(int (*fn)(void *), void *arg, int node);
struct task_struct *fork_idle(int);
-extern pid_t kernel_thread(int (*fn)(void *), void *arg, unsigned long flags);
-extern long kernel_wait4(pid_t, int *, int, struct rusage *);
+extern pid_t kernel_thread(int (*fn)(void *), void *arg, const char *name,
+ unsigned long flags);
+extern pid_t user_mode_thread(int (*fn)(void *), void *arg, unsigned long flags);
+extern long kernel_wait4(pid_t, int __user *, int, struct rusage *);
+int kernel_wait(pid_t pid, int *stat);
extern void free_task(struct task_struct *tsk);
@@ -86,17 +112,30 @@ extern void sched_exec(void);
#define sched_exec() {}
#endif
-#define get_task_struct(tsk) do { atomic_inc(&(tsk)->usage); } while(0)
+static inline struct task_struct *get_task_struct(struct task_struct *t)
+{
+ refcount_inc(&t->usage);
+ return t;
+}
extern void __put_task_struct(struct task_struct *t);
static inline void put_task_struct(struct task_struct *t)
{
- if (atomic_dec_and_test(&t->usage))
+ if (refcount_dec_and_test(&t->usage))
__put_task_struct(t);
}
-struct task_struct *task_rcu_dereference(struct task_struct **ptask);
+static inline void put_task_struct_many(struct task_struct *t, int nr)
+{
+ if (refcount_sub_and_test(nr, &t->usage))
+ __put_task_struct(t);
+}
+
+void put_task_struct_rcu_user(struct task_struct *task);
+
+/* Free all architecture-specific resources held by a thread. */
+void release_thread(struct task_struct *dead_task);
#ifdef CONFIG_ARCH_WANTS_DYNAMIC_TASK_STRUCT
extern int arch_task_struct_size __read_mostly;
@@ -104,6 +143,20 @@ extern int arch_task_struct_size __read_mostly;
# define arch_task_struct_size (sizeof(struct task_struct))
#endif
+#ifndef CONFIG_HAVE_ARCH_THREAD_STRUCT_WHITELIST
+/*
+ * If an architecture has not declared a thread_struct whitelist we
+ * must assume something there may need to be copied to userspace.
+ */
+static inline void arch_thread_struct_whitelist(unsigned long *offset,
+ unsigned long *size)
+{
+ *offset = 0;
+ /* Handle dynamically sized thread_struct. */
+ *size = arch_task_struct_size - offsetof(struct task_struct, thread);
+}
+#endif
+
#ifdef CONFIG_VMAP_STACK
static inline struct vm_struct *task_stack_vm_area(const struct task_struct *t)
{
@@ -120,7 +173,7 @@ static inline struct vm_struct *task_stack_vm_area(const struct task_struct *t)
* Protects ->fs, ->files, ->mm, ->group_info, ->comm, keyring
* subscriptions and synchronises with wait4(). Also used in procfs. Also
* pins the final release of task.io_context. Also protects ->cpuset and
- * ->cgroup.subsys[]. And ->vfork_done.
+ * ->cgroup.subsys[]. And ->vfork_done. And ->sysvshm.shm_clist.
*
* Nests both inside and outside of read_lock(&tasklist_lock).
* It must not be nested with write_lock_irq(&tasklist_lock),
diff --git a/include/linux/sched/task_flags.h b/include/linux/sched/task_flags.h
new file mode 100644
index 000000000000..227f5be81bcd
--- /dev/null
+++ b/include/linux/sched/task_flags.h
@@ -0,0 +1 @@
+#include <linux/sched.h>
diff --git a/include/linux/sched/task_stack.h b/include/linux/sched/task_stack.h
index df6ea6665b31..f158b025c175 100644
--- a/include/linux/sched/task_stack.h
+++ b/include/linux/sched/task_stack.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_SCHED_TASK_STACK_H
#define _LINUX_SCHED_TASK_STACK_H
@@ -15,16 +16,20 @@
* try_get_task_stack() instead. task_stack_page will return a pointer
* that could get freed out from under you.
*/
-static inline void *task_stack_page(const struct task_struct *task)
+static __always_inline void *task_stack_page(const struct task_struct *task)
{
return task->stack;
}
#define setup_thread_stack(new,old) do { } while(0)
-static inline unsigned long *end_of_stack(const struct task_struct *task)
+static __always_inline unsigned long *end_of_stack(const struct task_struct *task)
{
+#ifdef CONFIG_STACK_GROWSUP
+ return (unsigned long *)((unsigned long)task->stack + THREAD_SIZE) - 1;
+#else
return task->stack;
+#endif
}
#elif !defined(__HAVE_THREAD_FUNCTIONS)
@@ -60,7 +65,7 @@ static inline unsigned long *end_of_stack(struct task_struct *p)
#ifdef CONFIG_THREAD_INFO_IN_TASK
static inline void *try_get_task_stack(struct task_struct *tsk)
{
- return atomic_inc_not_zero(&tsk->stack_refcount) ?
+ return refcount_inc_not_zero(&tsk->stack_refcount) ?
task_stack_page(tsk) : NULL;
}
@@ -74,10 +79,12 @@ static inline void *try_get_task_stack(struct task_struct *tsk)
static inline void put_task_stack(struct task_struct *tsk) {}
#endif
+void exit_task_stack_account(struct task_struct *tsk);
+
#define task_stack_end_corrupted(task) \
(*(end_of_stack(task)) != STACK_END_MAGIC)
-static inline int object_is_on_stack(void *obj)
+static inline int object_is_on_stack(const void *obj)
{
void *stack = task_stack_page(current);
diff --git a/include/linux/sched/thread_info_api.h b/include/linux/sched/thread_info_api.h
new file mode 100644
index 000000000000..2c60fbc16c08
--- /dev/null
+++ b/include/linux/sched/thread_info_api.h
@@ -0,0 +1 @@
+#include <linux/thread_info.h>
diff --git a/include/linux/sched/topology.h b/include/linux/sched/topology.h
index 7d065abc7a47..816df6cc444e 100644
--- a/include/linux/sched/topology.h
+++ b/include/linux/sched/topology.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_SCHED_TOPOLOGY_H
#define _LINUX_SCHED_TOPOLOGY_H
@@ -10,27 +11,29 @@
*/
#ifdef CONFIG_SMP
-#define SD_LOAD_BALANCE 0x0001 /* Do load balancing on this domain. */
-#define SD_BALANCE_NEWIDLE 0x0002 /* Balance when about to become idle */
-#define SD_BALANCE_EXEC 0x0004 /* Balance on exec */
-#define SD_BALANCE_FORK 0x0008 /* Balance on fork, clone */
-#define SD_BALANCE_WAKE 0x0010 /* Balance on wakeup */
-#define SD_WAKE_AFFINE 0x0020 /* Wake task to waking CPU */
-#define SD_ASYM_CPUCAPACITY 0x0040 /* Groups have different max cpu capacities */
-#define SD_SHARE_CPUCAPACITY 0x0080 /* Domain members share cpu capacity */
-#define SD_SHARE_POWERDOMAIN 0x0100 /* Domain members share power domain */
-#define SD_SHARE_PKG_RESOURCES 0x0200 /* Domain members share cpu pkg resources */
-#define SD_SERIALIZE 0x0400 /* Only a single load balancing instance */
-#define SD_ASYM_PACKING 0x0800 /* Place busy groups earlier in the domain */
-#define SD_PREFER_SIBLING 0x1000 /* Prefer to place tasks in a sibling domain */
-#define SD_OVERLAP 0x2000 /* sched_domains of this level overlap */
-#define SD_NUMA 0x4000 /* cross-node balancing */
+/* Generate SD flag indexes */
+#define SD_FLAG(name, mflags) __##name,
+enum {
+ #include <linux/sched/sd_flags.h>
+ __SD_FLAG_CNT,
+};
+#undef SD_FLAG
+/* Generate SD flag bits */
+#define SD_FLAG(name, mflags) name = 1 << __##name,
+enum {
+ #include <linux/sched/sd_flags.h>
+};
+#undef SD_FLAG
-/*
- * Increase resolution of cpu_capacity calculations
- */
-#define SCHED_CAPACITY_SHIFT SCHED_FIXEDPOINT_SHIFT
-#define SCHED_CAPACITY_SCALE (1L << SCHED_CAPACITY_SHIFT)
+#ifdef CONFIG_SCHED_DEBUG
+
+struct sd_flag_debug {
+ unsigned int meta_flags;
+ char *name;
+};
+extern const struct sd_flag_debug sd_flag_debug[];
+
+#endif
#ifdef CONFIG_SCHED_SMT
static inline int cpu_smt_flags(void)
@@ -39,6 +42,13 @@ static inline int cpu_smt_flags(void)
}
#endif
+#ifdef CONFIG_SCHED_CLUSTER
+static inline int cpu_cluster_flags(void)
+{
+ return SD_SHARE_PKG_RESOURCES;
+}
+#endif
+
#ifdef CONFIG_SCHED_MC
static inline int cpu_core_flags(void)
{
@@ -71,24 +81,20 @@ struct sched_domain_shared {
atomic_t ref;
atomic_t nr_busy_cpus;
int has_idle_cores;
+ int nr_idle_scan;
};
struct sched_domain {
/* These fields must be setup */
- struct sched_domain *parent; /* top domain must be null terminated */
- struct sched_domain *child; /* bottom domain must be null terminated */
+ struct sched_domain __rcu *parent; /* top domain must be null terminated */
+ struct sched_domain __rcu *child; /* bottom domain must be null terminated */
struct sched_group *groups; /* the balancing groups of the domain */
unsigned long min_interval; /* Minimum balance interval ms */
unsigned long max_interval; /* Maximum balance interval ms */
unsigned int busy_factor; /* less balancing by factor if busy */
unsigned int imbalance_pct; /* No balance until over watermark */
unsigned int cache_nice_tries; /* Leave cache hot tasks for # tries */
- unsigned int busy_idx;
- unsigned int idle_idx;
- unsigned int newidle_idx;
- unsigned int wake_idx;
- unsigned int forkexec_idx;
- unsigned int smt_gain;
+ unsigned int imb_numa_nr; /* Nr running tasks that allows a NUMA imbalance */
int nohz_idle; /* NOHZ IDLE status */
int flags; /* See SD_* */
@@ -101,7 +107,7 @@ struct sched_domain {
/* idle_balance() stats */
u64 max_newidle_lb_cost;
- unsigned long next_decay_max_lb_cost;
+ unsigned long last_decay_max_lb_cost;
u64 avg_scan_cost; /* select_idle_sibling */
@@ -153,7 +159,7 @@ struct sched_domain {
* by attaching extra space to the end of the structure,
* depending on how many CPUs the kernel has booted up with)
*/
- unsigned long span[0];
+ unsigned long span[];
};
static inline struct cpumask *sched_domain_span(struct sched_domain *sd)
@@ -161,6 +167,10 @@ static inline struct cpumask *sched_domain_span(struct sched_domain *sd)
return to_cpumask(sd->span);
}
+extern void partition_sched_domains_locked(int ndoms_new,
+ cpumask_var_t doms_new[],
+ struct sched_domain_attr *dattr_new);
+
extern void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
struct sched_domain_attr *dattr_new);
@@ -176,10 +186,10 @@ typedef int (*sched_domain_flags_f)(void);
#define SDTL_OVERLAP 0x01
struct sd_data {
- struct sched_domain **__percpu sd;
- struct sched_domain_shared **__percpu sds;
- struct sched_group **__percpu sg;
- struct sched_group_capacity **__percpu sgc;
+ struct sched_domain *__percpu *sd;
+ struct sched_domain_shared *__percpu *sds;
+ struct sched_group *__percpu *sg;
+ struct sched_group_capacity *__percpu *sgc;
};
struct sched_domain_topology_level {
@@ -206,6 +216,12 @@ extern void set_sched_topology(struct sched_domain_topology_level *tl);
struct sched_domain_attr;
static inline void
+partition_sched_domains_locked(int ndoms_new, cpumask_var_t doms_new[],
+ struct sched_domain_attr *dattr_new)
+{
+}
+
+static inline void
partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
struct sched_domain_attr *dattr_new)
{
@@ -218,6 +234,47 @@ static inline bool cpus_share_cache(int this_cpu, int that_cpu)
#endif /* !CONFIG_SMP */
+#if defined(CONFIG_ENERGY_MODEL) && defined(CONFIG_CPU_FREQ_GOV_SCHEDUTIL)
+extern void rebuild_sched_domains_energy(void);
+#else
+static inline void rebuild_sched_domains_energy(void)
+{
+}
+#endif
+
+#ifndef arch_scale_cpu_capacity
+/**
+ * arch_scale_cpu_capacity - get the capacity scale factor of a given CPU.
+ * @cpu: the CPU in question.
+ *
+ * Return: the CPU scale factor normalized against SCHED_CAPACITY_SCALE, i.e.
+ *
+ * max_perf(cpu)
+ * ----------------------------- * SCHED_CAPACITY_SCALE
+ * max(max_perf(c) : c \in CPUs)
+ */
+static __always_inline
+unsigned long arch_scale_cpu_capacity(int cpu)
+{
+ return SCHED_CAPACITY_SCALE;
+}
+#endif
+
+#ifndef arch_scale_thermal_pressure
+static __always_inline
+unsigned long arch_scale_thermal_pressure(int cpu)
+{
+ return 0;
+}
+#endif
+
+#ifndef arch_update_thermal_pressure
+static __always_inline
+void arch_update_thermal_pressure(const struct cpumask *cpus,
+ unsigned long capped_frequency)
+{ }
+#endif
+
static inline int task_node(const struct task_struct *p)
{
return cpu_to_node(task_cpu(p));
diff --git a/include/linux/sched/types.h b/include/linux/sched/types.h
new file mode 100644
index 000000000000..3c3e049224ae
--- /dev/null
+++ b/include/linux/sched/types.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_SCHED_TYPES_H
+#define _LINUX_SCHED_TYPES_H
+
+#include <linux/types.h>
+
+/**
+ * struct task_cputime - collected CPU time counts
+ * @stime: time spent in kernel mode, in nanoseconds
+ * @utime: time spent in user mode, in nanoseconds
+ * @sum_exec_runtime: total time spent on the CPU, in nanoseconds
+ *
+ * This structure groups together three kinds of CPU time that are tracked for
+ * threads and thread groups. Most things considering CPU time want to group
+ * these counts together and treat all three of them in parallel.
+ */
+struct task_cputime {
+ u64 stime;
+ u64 utime;
+ unsigned long long sum_exec_runtime;
+};
+
+#endif
diff --git a/include/linux/sched/user.h b/include/linux/sched/user.h
index 5d5415e129d4..4cc52698e214 100644
--- a/include/linux/sched/user.h
+++ b/include/linux/sched/user.h
@@ -1,44 +1,39 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_SCHED_USER_H
#define _LINUX_SCHED_USER_H
#include <linux/uidgid.h>
#include <linux/atomic.h>
-
-struct key;
+#include <linux/percpu_counter.h>
+#include <linux/refcount.h>
+#include <linux/ratelimit.h>
/*
* Some day this will be a full-fledged user tracking system..
*/
struct user_struct {
- atomic_t __count; /* reference count */
- atomic_t processes; /* How many processes does this user have? */
- atomic_t sigpending; /* How many pending signals does this user have? */
-#ifdef CONFIG_FANOTIFY
- atomic_t fanotify_listeners;
-#endif
+ refcount_t __count; /* reference count */
#ifdef CONFIG_EPOLL
- atomic_long_t epoll_watches; /* The number of file descriptors currently watched */
+ struct percpu_counter epoll_watches; /* The number of file descriptors currently watched */
#endif
-#ifdef CONFIG_POSIX_MQUEUE
- /* protected by mq_lock */
- unsigned long mq_bytes; /* How many bytes can be allocated to mqueue? */
-#endif
- unsigned long locked_shm; /* How many pages of mlocked shm ? */
unsigned long unix_inflight; /* How many files in flight in unix sockets */
atomic_long_t pipe_bufs; /* how many pages are allocated in pipe buffers */
-#ifdef CONFIG_KEYS
- struct key *uid_keyring; /* UID specific keyring */
- struct key *session_keyring; /* UID's default session keyring */
-#endif
-
/* Hash table maintenance information */
struct hlist_node uidhash_node;
kuid_t uid;
-#if defined(CONFIG_PERF_EVENTS) || defined(CONFIG_BPF_SYSCALL)
+#if defined(CONFIG_PERF_EVENTS) || defined(CONFIG_BPF_SYSCALL) || \
+ defined(CONFIG_NET) || defined(CONFIG_IO_URING) || \
+ defined(CONFIG_VFIO_PCI_ZDEV_KVM) || IS_ENABLED(CONFIG_IOMMUFD)
atomic_long_t locked_vm;
#endif
+#ifdef CONFIG_WATCH_QUEUE
+ atomic_t nr_watches; /* The number of watches this user currently has */
+#endif
+
+ /* Miscellaneous per-user rate limit */
+ struct ratelimit_state ratelimit;
};
extern int uids_sysfs_init(void);
@@ -53,7 +48,7 @@ extern struct user_struct root_user;
extern struct user_struct * alloc_uid(kuid_t);
static inline struct user_struct *get_uid(struct user_struct *u)
{
- atomic_inc(&u->__count);
+ refcount_inc(&u->__count);
return u;
}
extern void free_uid(struct user_struct *);
diff --git a/include/linux/sched/vhost_task.h b/include/linux/sched/vhost_task.h
new file mode 100644
index 000000000000..6123c10b99cf
--- /dev/null
+++ b/include/linux/sched/vhost_task.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_VHOST_TASK_H
+#define _LINUX_VHOST_TASK_H
+
+#include <linux/completion.h>
+
+struct task_struct;
+
+struct vhost_task {
+ int (*fn)(void *data);
+ void *data;
+ struct completion exited;
+ unsigned long flags;
+ struct task_struct *task;
+};
+
+struct vhost_task *vhost_task_create(int (*fn)(void *), void *arg,
+ const char *name);
+void vhost_task_start(struct vhost_task *vtsk);
+void vhost_task_stop(struct vhost_task *vtsk);
+bool vhost_task_should_stop(struct vhost_task *vtsk);
+
+#endif
diff --git a/include/linux/sched/wake_q.h b/include/linux/sched/wake_q.h
index d03d8a9047dc..06cd8fb2f409 100644
--- a/include/linux/sched/wake_q.h
+++ b/include/linux/sched/wake_q.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_SCHED_WAKE_Q_H
#define _LINUX_SCHED_WAKE_Q_H
@@ -23,9 +24,13 @@
* called near the end of a function. Otherwise, the list can be
* re-initialized for later re-use by wake_q_init().
*
- * Note that this can cause spurious wakeups. schedule() callers
+ * NOTE that this can cause spurious wakeups. schedule() callers
* must ensure the call is done inside a loop, confirming that the
* wakeup condition has in fact occurred.
+ *
+ * NOTE that there is no guarantee the wakeup will happen any later than the
+ * wake_q_add() location. Therefore task must be ready to be woken at the
+ * location of the wake_q_add().
*/
#include <linux/sched.h>
@@ -37,8 +42,11 @@ struct wake_q_head {
#define WAKE_Q_TAIL ((struct wake_q_node *) 0x01)
-#define DEFINE_WAKE_Q(name) \
- struct wake_q_head name = { WAKE_Q_TAIL, &name.first }
+#define WAKE_Q_HEAD_INITIALIZER(name) \
+ { WAKE_Q_TAIL, &name.first }
+
+#define DEFINE_WAKE_Q(name) \
+ struct wake_q_head name = WAKE_Q_HEAD_INITIALIZER(name)
static inline void wake_q_init(struct wake_q_head *head)
{
@@ -46,8 +54,13 @@ static inline void wake_q_init(struct wake_q_head *head)
head->lastp = &head->first;
}
-extern void wake_q_add(struct wake_q_head *head,
- struct task_struct *task);
+static inline bool wake_q_empty(struct wake_q_head *head)
+{
+ return head->first == WAKE_Q_TAIL;
+}
+
+extern void wake_q_add(struct wake_q_head *head, struct task_struct *task);
+extern void wake_q_add_safe(struct wake_q_head *head, struct task_struct *task);
extern void wake_up_q(struct wake_q_head *head);
#endif /* _LINUX_SCHED_WAKE_Q_H */
diff --git a/include/linux/sched/xacct.h b/include/linux/sched/xacct.h
index a28156a0d34a..c078f0a94cec 100644
--- a/include/linux/sched/xacct.h
+++ b/include/linux/sched/xacct.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_SCHED_XACCT_H
#define _LINUX_SCHED_XACCT_H
diff --git a/include/linux/sched_clock.h b/include/linux/sched_clock.h
index 411b52e424e1..cb41c5edb4d4 100644
--- a/include/linux/sched_clock.h
+++ b/include/linux/sched_clock.h
@@ -1,25 +1,51 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* sched_clock.h: support for extending counters to full 64-bit ns counter
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef LINUX_SCHED_CLOCK
#define LINUX_SCHED_CLOCK
+#include <linux/types.h>
+
#ifdef CONFIG_GENERIC_SCHED_CLOCK
-extern void sched_clock_postinit(void);
+/**
+ * struct clock_read_data - data required to read from sched_clock()
+ *
+ * @epoch_ns: sched_clock() value at last update
+ * @epoch_cyc: Clock cycle value at last update.
+ * @sched_clock_mask: Bitmask for two's complement subtraction of non 64bit
+ * clocks.
+ * @read_sched_clock: Current clock source (or dummy source when suspended).
+ * @mult: Multiplier for scaled math conversion.
+ * @shift: Shift value for scaled math conversion.
+ *
+ * Care must be taken when updating this structure; it is read by
+ * some very hot code paths. It occupies <=40 bytes and, when combined
+ * with the seqcount used to synchronize access, comfortably fits into
+ * a 64 byte cache line.
+ */
+struct clock_read_data {
+ u64 epoch_ns;
+ u64 epoch_cyc;
+ u64 sched_clock_mask;
+ u64 (*read_sched_clock)(void);
+ u32 mult;
+ u32 shift;
+};
+
+extern struct clock_read_data *sched_clock_read_begin(unsigned int *seq);
+extern int sched_clock_read_retry(unsigned int seq);
+
+extern void generic_sched_clock_init(void);
extern void sched_clock_register(u64 (*read)(void), int bits,
unsigned long rate);
#else
-static inline void sched_clock_postinit(void) { }
+static inline void generic_sched_clock_init(void) { }
static inline void sched_clock_register(u64 (*read)(void), int bits,
unsigned long rate)
{
- ;
}
#endif
diff --git a/include/linux/scif.h b/include/linux/scif.h
deleted file mode 100644
index 49a35d6edc94..000000000000
--- a/include/linux/scif.h
+++ /dev/null
@@ -1,1339 +0,0 @@
-/*
- * Intel MIC Platform Software Stack (MPSS)
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2014 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * BSD LICENSE
- *
- * Copyright(c) 2014 Intel Corporation.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * Intel SCIF driver.
- *
- */
-#ifndef __SCIF_H__
-#define __SCIF_H__
-
-#include <linux/types.h>
-#include <linux/poll.h>
-#include <linux/device.h>
-#include <linux/scif_ioctl.h>
-
-#define SCIF_ACCEPT_SYNC 1
-#define SCIF_SEND_BLOCK 1
-#define SCIF_RECV_BLOCK 1
-
-enum {
- SCIF_PROT_READ = (1 << 0),
- SCIF_PROT_WRITE = (1 << 1)
-};
-
-enum {
- SCIF_MAP_FIXED = 0x10,
- SCIF_MAP_KERNEL = 0x20,
-};
-
-enum {
- SCIF_FENCE_INIT_SELF = (1 << 0),
- SCIF_FENCE_INIT_PEER = (1 << 1),
- SCIF_SIGNAL_LOCAL = (1 << 4),
- SCIF_SIGNAL_REMOTE = (1 << 5)
-};
-
-enum {
- SCIF_RMA_USECPU = (1 << 0),
- SCIF_RMA_USECACHE = (1 << 1),
- SCIF_RMA_SYNC = (1 << 2),
- SCIF_RMA_ORDERED = (1 << 3)
-};
-
-/* End of SCIF Admin Reserved Ports */
-#define SCIF_ADMIN_PORT_END 1024
-
-/* End of SCIF Reserved Ports */
-#define SCIF_PORT_RSVD 1088
-
-typedef struct scif_endpt *scif_epd_t;
-typedef struct scif_pinned_pages *scif_pinned_pages_t;
-
-/**
- * struct scif_range - SCIF registered range used in kernel mode
- * @cookie: cookie used internally by SCIF
- * @nr_pages: number of pages of PAGE_SIZE
- * @prot_flags: R/W protection
- * @phys_addr: Array of bus addresses
- * @va: Array of kernel virtual addresses backed by the pages in the phys_addr
- * array. The va is populated only when called on the host for a remote
- * SCIF connection on MIC. This is required to support the use case of DMA
- * between MIC and another device which is not a SCIF node e.g., an IB or
- * ethernet NIC.
- */
-struct scif_range {
- void *cookie;
- int nr_pages;
- int prot_flags;
- dma_addr_t *phys_addr;
- void __iomem **va;
-};
-
-/**
- * struct scif_pollepd - SCIF endpoint to be monitored via scif_poll
- * @epd: SCIF endpoint
- * @events: requested events
- * @revents: returned events
- */
-struct scif_pollepd {
- scif_epd_t epd;
- short events;
- short revents;
-};
-
-/**
- * scif_peer_dev - representation of a peer SCIF device
- *
- * Peer devices show up as PCIe devices for the mgmt node but not the cards.
- * The mgmt node discovers all the cards on the PCIe bus and informs the other
- * cards about their peers. Upon notification of a peer a node adds a peer
- * device to the peer bus to maintain symmetry in the way devices are
- * discovered across all nodes in the SCIF network.
- *
- * @dev: underlying device
- * @dnode - The destination node which this device will communicate with.
- */
-struct scif_peer_dev {
- struct device dev;
- u8 dnode;
-};
-
-/**
- * scif_client - representation of a SCIF client
- * @name: client name
- * @probe - client method called when a peer device is registered
- * @remove - client method called when a peer device is unregistered
- * @si - subsys_interface used internally for implementing SCIF clients
- */
-struct scif_client {
- const char *name;
- void (*probe)(struct scif_peer_dev *spdev);
- void (*remove)(struct scif_peer_dev *spdev);
- struct subsys_interface si;
-};
-
-#define SCIF_OPEN_FAILED ((scif_epd_t)-1)
-#define SCIF_REGISTER_FAILED ((off_t)-1)
-#define SCIF_MMAP_FAILED ((void *)-1)
-
-/**
- * scif_open() - Create an endpoint
- *
- * Return:
- * Upon successful completion, scif_open() returns an endpoint descriptor to
- * be used in subsequent SCIF functions calls to refer to that endpoint;
- * otherwise in user mode SCIF_OPEN_FAILED (that is ((scif_epd_t)-1)) is
- * returned and errno is set to indicate the error; in kernel mode a NULL
- * scif_epd_t is returned.
- *
- * Errors:
- * ENOMEM - Insufficient kernel memory was available
- */
-scif_epd_t scif_open(void);
-
-/**
- * scif_bind() - Bind an endpoint to a port
- * @epd: endpoint descriptor
- * @pn: port number
- *
- * scif_bind() binds endpoint epd to port pn, where pn is a port number on the
- * local node. If pn is zero, a port number greater than or equal to
- * SCIF_PORT_RSVD is assigned and returned. Each endpoint may be bound to
- * exactly one local port. Ports less than 1024 when requested can only be bound
- * by system (or root) processes or by processes executed by privileged users.
- *
- * Return:
- * Upon successful completion, scif_bind() returns the port number to which epd
- * is bound; otherwise in user mode -1 is returned and errno is set to
- * indicate the error; in kernel mode the negative of one of the following
- * errors is returned.
- *
- * Errors:
- * EBADF, ENOTTY - epd is not a valid endpoint descriptor
- * EINVAL - the endpoint or the port is already bound
- * EISCONN - The endpoint is already connected
- * ENOSPC - No port number available for assignment
- * EACCES - The port requested is protected and the user is not the superuser
- */
-int scif_bind(scif_epd_t epd, u16 pn);
-
-/**
- * scif_listen() - Listen for connections on an endpoint
- * @epd: endpoint descriptor
- * @backlog: maximum pending connection requests
- *
- * scif_listen() marks the endpoint epd as a listening endpoint - that is, as
- * an endpoint that will be used to accept incoming connection requests. Once
- * so marked, the endpoint is said to be in the listening state and may not be
- * used as the endpoint of a connection.
- *
- * The endpoint, epd, must have been bound to a port.
- *
- * The backlog argument defines the maximum length to which the queue of
- * pending connections for epd may grow. If a connection request arrives when
- * the queue is full, the client may receive an error with an indication that
- * the connection was refused.
- *
- * Return:
- * Upon successful completion, scif_listen() returns 0; otherwise in user mode
- * -1 is returned and errno is set to indicate the error; in kernel mode the
- * negative of one of the following errors is returned.
- *
- * Errors:
- * EBADF, ENOTTY - epd is not a valid endpoint descriptor
- * EINVAL - the endpoint is not bound to a port
- * EISCONN - The endpoint is already connected or listening
- */
-int scif_listen(scif_epd_t epd, int backlog);
-
-/**
- * scif_connect() - Initiate a connection on a port
- * @epd: endpoint descriptor
- * @dst: global id of port to which to connect
- *
- * The scif_connect() function requests the connection of endpoint epd to remote
- * port dst. If the connection is successful, a peer endpoint, bound to dst, is
- * created on node dst.node. On successful return, the connection is complete.
- *
- * If the endpoint epd has not already been bound to a port, scif_connect()
- * will bind it to an unused local port.
- *
- * A connection is terminated when an endpoint of the connection is closed,
- * either explicitly by scif_close(), or when a process that owns one of the
- * endpoints of the connection is terminated.
- *
- * In user space, scif_connect() supports an asynchronous connection mode
- * if the application has set the O_NONBLOCK flag on the endpoint via the
- * fcntl() system call. Setting this flag will result in the calling process
- * not to wait during scif_connect().
- *
- * Return:
- * Upon successful completion, scif_connect() returns the port ID to which the
- * endpoint, epd, is bound; otherwise in user mode -1 is returned and errno is
- * set to indicate the error; in kernel mode the negative of one of the
- * following errors is returned.
- *
- * Errors:
- * EBADF, ENOTTY - epd is not a valid endpoint descriptor
- * ECONNREFUSED - The destination was not listening for connections or refused
- * the connection request
- * EINVAL - dst.port is not a valid port ID
- * EISCONN - The endpoint is already connected
- * ENOMEM - No buffer space is available
- * ENODEV - The destination node does not exist, or the node is lost or existed,
- * but is not currently in the network since it may have crashed
- * ENOSPC - No port number available for assignment
- * EOPNOTSUPP - The endpoint is listening and cannot be connected
- */
-int scif_connect(scif_epd_t epd, struct scif_port_id *dst);
-
-/**
- * scif_accept() - Accept a connection on an endpoint
- * @epd: endpoint descriptor
- * @peer: global id of port to which connected
- * @newepd: new connected endpoint descriptor
- * @flags: flags
- *
- * The scif_accept() call extracts the first connection request from the queue
- * of pending connections for the port on which epd is listening. scif_accept()
- * creates a new endpoint, bound to the same port as epd, and allocates a new
- * SCIF endpoint descriptor, returned in newepd, for the endpoint. The new
- * endpoint is connected to the endpoint through which the connection was
- * requested. epd is unaffected by this call, and remains in the listening
- * state.
- *
- * On successful return, peer holds the global port identifier (node id and
- * local port number) of the port which requested the connection.
- *
- * A connection is terminated when an endpoint of the connection is closed,
- * either explicitly by scif_close(), or when a process that owns one of the
- * endpoints of the connection is terminated.
- *
- * The number of connections that can (subsequently) be accepted on epd is only
- * limited by system resources (memory).
- *
- * The flags argument is formed by OR'ing together zero or more of the
- * following values.
- * SCIF_ACCEPT_SYNC - block until a connection request is presented. If
- * SCIF_ACCEPT_SYNC is not in flags, and no pending
- * connections are present on the queue, scif_accept()
- * fails with an EAGAIN error
- *
- * In user mode, the select() and poll() functions can be used to determine
- * when there is a connection request. In kernel mode, the scif_poll()
- * function may be used for this purpose. A readable event will be delivered
- * when a connection is requested.
- *
- * Return:
- * Upon successful completion, scif_accept() returns 0; otherwise in user mode
- * -1 is returned and errno is set to indicate the error; in kernel mode the
- * negative of one of the following errors is returned.
- *
- * Errors:
- * EAGAIN - SCIF_ACCEPT_SYNC is not set and no connections are present to be
- * accepted or SCIF_ACCEPT_SYNC is not set and remote node failed to complete
- * its connection request
- * EBADF, ENOTTY - epd is not a valid endpoint descriptor
- * EINTR - Interrupted function
- * EINVAL - epd is not a listening endpoint, or flags is invalid, or peer is
- * NULL, or newepd is NULL
- * ENODEV - The requesting node is lost or existed, but is not currently in the
- * network since it may have crashed
- * ENOMEM - Not enough space
- * ENOENT - Secondary part of epd registration failed
- */
-int scif_accept(scif_epd_t epd, struct scif_port_id *peer, scif_epd_t
- *newepd, int flags);
-
-/**
- * scif_close() - Close an endpoint
- * @epd: endpoint descriptor
- *
- * scif_close() closes an endpoint and performs necessary teardown of
- * facilities associated with that endpoint.
- *
- * If epd is a listening endpoint then it will no longer accept connection
- * requests on the port to which it is bound. Any pending connection requests
- * are rejected.
- *
- * If epd is a connected endpoint, then its peer endpoint is also closed. RMAs
- * which are in-process through epd or its peer endpoint will complete before
- * scif_close() returns. Registered windows of the local and peer endpoints are
- * released as if scif_unregister() was called against each window.
- *
- * Closing a SCIF endpoint does not affect local registered memory mapped by
- * a SCIF endpoint on a remote node. The local memory remains mapped by the peer
- * SCIF endpoint explicitly removed by calling munmap(..) by the peer.
- *
- * If the peer endpoint's receive queue is not empty at the time that epd is
- * closed, then the peer endpoint can be passed as the endpoint parameter to
- * scif_recv() until the receive queue is empty.
- *
- * epd is freed and may no longer be accessed.
- *
- * Return:
- * Upon successful completion, scif_close() returns 0; otherwise in user mode
- * -1 is returned and errno is set to indicate the error; in kernel mode the
- * negative of one of the following errors is returned.
- *
- * Errors:
- * EBADF, ENOTTY - epd is not a valid endpoint descriptor
- */
-int scif_close(scif_epd_t epd);
-
-/**
- * scif_send() - Send a message
- * @epd: endpoint descriptor
- * @msg: message buffer address
- * @len: message length
- * @flags: blocking mode flags
- *
- * scif_send() sends data to the peer of endpoint epd. Up to len bytes of data
- * are copied from memory starting at address msg. On successful execution the
- * return value of scif_send() is the number of bytes that were sent, and is
- * zero if no bytes were sent because len was zero. scif_send() may be called
- * only when the endpoint is in a connected state.
- *
- * If a scif_send() call is non-blocking, then it sends only those bytes which
- * can be sent without waiting, up to a maximum of len bytes.
- *
- * If a scif_send() call is blocking, then it normally returns after sending
- * all len bytes. If a blocking call is interrupted or the connection is
- * reset, the call is considered successful if some bytes were sent or len is
- * zero, otherwise the call is considered unsuccessful.
- *
- * In user mode, the select() and poll() functions can be used to determine
- * when the send queue is not full. In kernel mode, the scif_poll() function
- * may be used for this purpose.
- *
- * It is recommended that scif_send()/scif_recv() only be used for short
- * control-type message communication between SCIF endpoints. The SCIF RMA
- * APIs are expected to provide better performance for transfer sizes of
- * 1024 bytes or longer for the current MIC hardware and software
- * implementation.
- *
- * scif_send() will block until the entire message is sent if SCIF_SEND_BLOCK
- * is passed as the flags argument.
- *
- * Return:
- * Upon successful completion, scif_send() returns the number of bytes sent;
- * otherwise in user mode -1 is returned and errno is set to indicate the
- * error; in kernel mode the negative of one of the following errors is
- * returned.
- *
- * Errors:
- * EBADF, ENOTTY - epd is not a valid endpoint descriptor
- * ECONNRESET - Connection reset by peer
- * EINVAL - flags is invalid, or len is negative
- * ENODEV - The remote node is lost or existed, but is not currently in the
- * network since it may have crashed
- * ENOMEM - Not enough space
- * ENOTCONN - The endpoint is not connected
- */
-int scif_send(scif_epd_t epd, void *msg, int len, int flags);
-
-/**
- * scif_recv() - Receive a message
- * @epd: endpoint descriptor
- * @msg: message buffer address
- * @len: message buffer length
- * @flags: blocking mode flags
- *
- * scif_recv() receives data from the peer of endpoint epd. Up to len bytes of
- * data are copied to memory starting at address msg. On successful execution
- * the return value of scif_recv() is the number of bytes that were received,
- * and is zero if no bytes were received because len was zero. scif_recv() may
- * be called only when the endpoint is in a connected state.
- *
- * If a scif_recv() call is non-blocking, then it receives only those bytes
- * which can be received without waiting, up to a maximum of len bytes.
- *
- * If a scif_recv() call is blocking, then it normally returns after receiving
- * all len bytes. If the blocking call was interrupted due to a disconnection,
- * subsequent calls to scif_recv() will copy all bytes received upto the point
- * of disconnection.
- *
- * In user mode, the select() and poll() functions can be used to determine
- * when data is available to be received. In kernel mode, the scif_poll()
- * function may be used for this purpose.
- *
- * It is recommended that scif_send()/scif_recv() only be used for short
- * control-type message communication between SCIF endpoints. The SCIF RMA
- * APIs are expected to provide better performance for transfer sizes of
- * 1024 bytes or longer for the current MIC hardware and software
- * implementation.
- *
- * scif_recv() will block until the entire message is received if
- * SCIF_RECV_BLOCK is passed as the flags argument.
- *
- * Return:
- * Upon successful completion, scif_recv() returns the number of bytes
- * received; otherwise in user mode -1 is returned and errno is set to
- * indicate the error; in kernel mode the negative of one of the following
- * errors is returned.
- *
- * Errors:
- * EAGAIN - The destination node is returning from a low power state
- * EBADF, ENOTTY - epd is not a valid endpoint descriptor
- * ECONNRESET - Connection reset by peer
- * EINVAL - flags is invalid, or len is negative
- * ENODEV - The remote node is lost or existed, but is not currently in the
- * network since it may have crashed
- * ENOMEM - Not enough space
- * ENOTCONN - The endpoint is not connected
- */
-int scif_recv(scif_epd_t epd, void *msg, int len, int flags);
-
-/**
- * scif_register() - Mark a memory region for remote access.
- * @epd: endpoint descriptor
- * @addr: starting virtual address
- * @len: length of range
- * @offset: offset of window
- * @prot_flags: read/write protection flags
- * @map_flags: mapping flags
- *
- * The scif_register() function opens a window, a range of whole pages of the
- * registered address space of the endpoint epd, starting at offset po and
- * continuing for len bytes. The value of po, further described below, is a
- * function of the parameters offset and len, and the value of map_flags. Each
- * page of the window represents the physical memory page which backs the
- * corresponding page of the range of virtual address pages starting at addr
- * and continuing for len bytes. addr and len are constrained to be multiples
- * of the page size. A successful scif_register() call returns po.
- *
- * When SCIF_MAP_FIXED is set in the map_flags argument, po will be offset
- * exactly, and offset is constrained to be a multiple of the page size. The
- * mapping established by scif_register() will not replace any existing
- * registration; an error is returned if any page within the range [offset,
- * offset + len - 1] intersects an existing window.
- *
- * When SCIF_MAP_FIXED is not set, the implementation uses offset in an
- * implementation-defined manner to arrive at po. The po value so chosen will
- * be an area of the registered address space that the implementation deems
- * suitable for a mapping of len bytes. An offset value of 0 is interpreted as
- * granting the implementation complete freedom in selecting po, subject to
- * constraints described below. A non-zero value of offset is taken to be a
- * suggestion of an offset near which the mapping should be placed. When the
- * implementation selects a value for po, it does not replace any extant
- * window. In all cases, po will be a multiple of the page size.
- *
- * The physical pages which are so represented by a window are available for
- * access in calls to mmap(), scif_readfrom(), scif_writeto(),
- * scif_vreadfrom(), and scif_vwriteto(). While a window is registered, the
- * physical pages represented by the window will not be reused by the memory
- * subsystem for any other purpose. Note that the same physical page may be
- * represented by multiple windows.
- *
- * Subsequent operations which change the memory pages to which virtual
- * addresses are mapped (such as mmap(), munmap()) have no effect on
- * existing window.
- *
- * If the process will fork(), it is recommended that the registered
- * virtual address range be marked with MADV_DONTFORK. Doing so will prevent
- * problems due to copy-on-write semantics.
- *
- * The prot_flags argument is formed by OR'ing together one or more of the
- * following values.
- * SCIF_PROT_READ - allow read operations from the window
- * SCIF_PROT_WRITE - allow write operations to the window
- *
- * Return:
- * Upon successful completion, scif_register() returns the offset at which the
- * mapping was placed (po); otherwise in user mode SCIF_REGISTER_FAILED (that
- * is (off_t *)-1) is returned and errno is set to indicate the error; in
- * kernel mode the negative of one of the following errors is returned.
- *
- * Errors:
- * EADDRINUSE - SCIF_MAP_FIXED is set in map_flags, and pages in the range
- * [offset, offset + len -1] are already registered
- * EAGAIN - The mapping could not be performed due to lack of resources
- * EBADF, ENOTTY - epd is not a valid endpoint descriptor
- * ECONNRESET - Connection reset by peer
- * EINVAL - map_flags is invalid, or prot_flags is invalid, or SCIF_MAP_FIXED is
- * set in flags, and offset is not a multiple of the page size, or addr is not a
- * multiple of the page size, or len is not a multiple of the page size, or is
- * 0, or offset is negative
- * ENODEV - The remote node is lost or existed, but is not currently in the
- * network since it may have crashed
- * ENOMEM - Not enough space
- * ENOTCONN -The endpoint is not connected
- */
-off_t scif_register(scif_epd_t epd, void *addr, size_t len, off_t offset,
- int prot_flags, int map_flags);
-
-/**
- * scif_unregister() - Mark a memory region for remote access.
- * @epd: endpoint descriptor
- * @offset: start of range to unregister
- * @len: length of range to unregister
- *
- * The scif_unregister() function closes those previously registered windows
- * which are entirely within the range [offset, offset + len - 1]. It is an
- * error to specify a range which intersects only a subrange of a window.
- *
- * On a successful return, pages within the window may no longer be specified
- * in calls to mmap(), scif_readfrom(), scif_writeto(), scif_vreadfrom(),
- * scif_vwriteto(), scif_get_pages, and scif_fence_signal(). The window,
- * however, continues to exist until all previous references against it are
- * removed. A window is referenced if there is a mapping to it created by
- * mmap(), or if scif_get_pages() was called against the window
- * (and the pages have not been returned via scif_put_pages()). A window is
- * also referenced while an RMA, in which some range of the window is a source
- * or destination, is in progress. Finally a window is referenced while some
- * offset in that window was specified to scif_fence_signal(), and the RMAs
- * marked by that call to scif_fence_signal() have not completed. While a
- * window is in this state, its registered address space pages are not
- * available for use in a new registered window.
- *
- * When all such references to the window have been removed, its references to
- * all the physical pages which it represents are removed. Similarly, the
- * registered address space pages of the window become available for
- * registration in a new window.
- *
- * Return:
- * Upon successful completion, scif_unregister() returns 0; otherwise in user
- * mode -1 is returned and errno is set to indicate the error; in kernel mode
- * the negative of one of the following errors is returned. In the event of an
- * error, no windows are unregistered.
- *
- * Errors:
- * EBADF, ENOTTY - epd is not a valid endpoint descriptor
- * ECONNRESET - Connection reset by peer
- * EINVAL - the range [offset, offset + len - 1] intersects a subrange of a
- * window, or offset is negative
- * ENODEV - The remote node is lost or existed, but is not currently in the
- * network since it may have crashed
- * ENOTCONN - The endpoint is not connected
- * ENXIO - Offsets in the range [offset, offset + len - 1] are invalid for the
- * registered address space of epd
- */
-int scif_unregister(scif_epd_t epd, off_t offset, size_t len);
-
-/**
- * scif_readfrom() - Copy from a remote address space
- * @epd: endpoint descriptor
- * @loffset: offset in local registered address space to
- * which to copy
- * @len: length of range to copy
- * @roffset: offset in remote registered address space
- * from which to copy
- * @rma_flags: transfer mode flags
- *
- * scif_readfrom() copies len bytes from the remote registered address space of
- * the peer of endpoint epd, starting at the offset roffset to the local
- * registered address space of epd, starting at the offset loffset.
- *
- * Each of the specified ranges [loffset, loffset + len - 1] and [roffset,
- * roffset + len - 1] must be within some registered window or windows of the
- * local and remote nodes. A range may intersect multiple registered windows,
- * but only if those windows are contiguous in the registered address space.
- *
- * If rma_flags includes SCIF_RMA_USECPU, then the data is copied using
- * programmed read/writes. Otherwise the data is copied using DMA. If rma_-
- * flags includes SCIF_RMA_SYNC, then scif_readfrom() will return after the
- * transfer is complete. Otherwise, the transfer may be performed asynchron-
- * ously. The order in which any two asynchronous RMA operations complete
- * is non-deterministic. The synchronization functions, scif_fence_mark()/
- * scif_fence_wait() and scif_fence_signal(), can be used to synchronize to
- * the completion of asynchronous RMA operations on the same endpoint.
- *
- * The DMA transfer of individual bytes is not guaranteed to complete in
- * address order. If rma_flags includes SCIF_RMA_ORDERED, then the last
- * cacheline or partial cacheline of the source range will become visible on
- * the destination node after all other transferred data in the source
- * range has become visible on the destination node.
- *
- * The optimal DMA performance will likely be realized if both
- * loffset and roffset are cacheline aligned (are a multiple of 64). Lower
- * performance will likely be realized if loffset and roffset are not
- * cacheline aligned but are separated by some multiple of 64. The lowest level
- * of performance is likely if loffset and roffset are not separated by a
- * multiple of 64.
- *
- * The rma_flags argument is formed by ORing together zero or more of the
- * following values.
- * SCIF_RMA_USECPU - perform the transfer using the CPU, otherwise use the DMA
- * engine.
- * SCIF_RMA_SYNC - perform the transfer synchronously, returning after the
- * transfer has completed. Passing this flag results in the
- * current implementation busy waiting and consuming CPU cycles
- * while the DMA transfer is in progress for best performance by
- * avoiding the interrupt latency.
- * SCIF_RMA_ORDERED - ensure that the last cacheline or partial cacheline of
- * the source range becomes visible on the destination node
- * after all other transferred data in the source range has
- * become visible on the destination
- *
- * Return:
- * Upon successful completion, scif_readfrom() returns 0; otherwise in user
- * mode -1 is returned and errno is set to indicate the error; in kernel mode
- * the negative of one of the following errors is returned.
- *
- * Errors:
- * EACCESS - Attempt to write to a read-only range
- * EBADF, ENOTTY - epd is not a valid endpoint descriptor
- * ECONNRESET - Connection reset by peer
- * EINVAL - rma_flags is invalid
- * ENODEV - The remote node is lost or existed, but is not currently in the
- * network since it may have crashed
- * ENOTCONN - The endpoint is not connected
- * ENXIO - The range [loffset, loffset + len - 1] is invalid for the registered
- * address space of epd, or, The range [roffset, roffset + len - 1] is invalid
- * for the registered address space of the peer of epd, or loffset or roffset
- * is negative
- */
-int scif_readfrom(scif_epd_t epd, off_t loffset, size_t len, off_t
- roffset, int rma_flags);
-
-/**
- * scif_writeto() - Copy to a remote address space
- * @epd: endpoint descriptor
- * @loffset: offset in local registered address space
- * from which to copy
- * @len: length of range to copy
- * @roffset: offset in remote registered address space to
- * which to copy
- * @rma_flags: transfer mode flags
- *
- * scif_writeto() copies len bytes from the local registered address space of
- * epd, starting at the offset loffset to the remote registered address space
- * of the peer of endpoint epd, starting at the offset roffset.
- *
- * Each of the specified ranges [loffset, loffset + len - 1] and [roffset,
- * roffset + len - 1] must be within some registered window or windows of the
- * local and remote nodes. A range may intersect multiple registered windows,
- * but only if those windows are contiguous in the registered address space.
- *
- * If rma_flags includes SCIF_RMA_USECPU, then the data is copied using
- * programmed read/writes. Otherwise the data is copied using DMA. If rma_-
- * flags includes SCIF_RMA_SYNC, then scif_writeto() will return after the
- * transfer is complete. Otherwise, the transfer may be performed asynchron-
- * ously. The order in which any two asynchronous RMA operations complete
- * is non-deterministic. The synchronization functions, scif_fence_mark()/
- * scif_fence_wait() and scif_fence_signal(), can be used to synchronize to
- * the completion of asynchronous RMA operations on the same endpoint.
- *
- * The DMA transfer of individual bytes is not guaranteed to complete in
- * address order. If rma_flags includes SCIF_RMA_ORDERED, then the last
- * cacheline or partial cacheline of the source range will become visible on
- * the destination node after all other transferred data in the source
- * range has become visible on the destination node.
- *
- * The optimal DMA performance will likely be realized if both
- * loffset and roffset are cacheline aligned (are a multiple of 64). Lower
- * performance will likely be realized if loffset and roffset are not cacheline
- * aligned but are separated by some multiple of 64. The lowest level of
- * performance is likely if loffset and roffset are not separated by a multiple
- * of 64.
- *
- * The rma_flags argument is formed by ORing together zero or more of the
- * following values.
- * SCIF_RMA_USECPU - perform the transfer using the CPU, otherwise use the DMA
- * engine.
- * SCIF_RMA_SYNC - perform the transfer synchronously, returning after the
- * transfer has completed. Passing this flag results in the
- * current implementation busy waiting and consuming CPU cycles
- * while the DMA transfer is in progress for best performance by
- * avoiding the interrupt latency.
- * SCIF_RMA_ORDERED - ensure that the last cacheline or partial cacheline of
- * the source range becomes visible on the destination node
- * after all other transferred data in the source range has
- * become visible on the destination
- *
- * Return:
- * Upon successful completion, scif_readfrom() returns 0; otherwise in user
- * mode -1 is returned and errno is set to indicate the error; in kernel mode
- * the negative of one of the following errors is returned.
- *
- * Errors:
- * EACCESS - Attempt to write to a read-only range
- * EBADF, ENOTTY - epd is not a valid endpoint descriptor
- * ECONNRESET - Connection reset by peer
- * EINVAL - rma_flags is invalid
- * ENODEV - The remote node is lost or existed, but is not currently in the
- * network since it may have crashed
- * ENOTCONN - The endpoint is not connected
- * ENXIO - The range [loffset, loffset + len - 1] is invalid for the registered
- * address space of epd, or, The range [roffset , roffset + len -1] is invalid
- * for the registered address space of the peer of epd, or loffset or roffset
- * is negative
- */
-int scif_writeto(scif_epd_t epd, off_t loffset, size_t len, off_t
- roffset, int rma_flags);
-
-/**
- * scif_vreadfrom() - Copy from a remote address space
- * @epd: endpoint descriptor
- * @addr: address to which to copy
- * @len: length of range to copy
- * @roffset: offset in remote registered address space
- * from which to copy
- * @rma_flags: transfer mode flags
- *
- * scif_vreadfrom() copies len bytes from the remote registered address
- * space of the peer of endpoint epd, starting at the offset roffset, to local
- * memory, starting at addr.
- *
- * The specified range [roffset, roffset + len - 1] must be within some
- * registered window or windows of the remote nodes. The range may
- * intersect multiple registered windows, but only if those windows are
- * contiguous in the registered address space.
- *
- * If rma_flags includes SCIF_RMA_USECPU, then the data is copied using
- * programmed read/writes. Otherwise the data is copied using DMA. If rma_-
- * flags includes SCIF_RMA_SYNC, then scif_vreadfrom() will return after the
- * transfer is complete. Otherwise, the transfer may be performed asynchron-
- * ously. The order in which any two asynchronous RMA operations complete
- * is non-deterministic. The synchronization functions, scif_fence_mark()/
- * scif_fence_wait() and scif_fence_signal(), can be used to synchronize to
- * the completion of asynchronous RMA operations on the same endpoint.
- *
- * The DMA transfer of individual bytes is not guaranteed to complete in
- * address order. If rma_flags includes SCIF_RMA_ORDERED, then the last
- * cacheline or partial cacheline of the source range will become visible on
- * the destination node after all other transferred data in the source
- * range has become visible on the destination node.
- *
- * If rma_flags includes SCIF_RMA_USECACHE, then the physical pages which back
- * the specified local memory range may be remain in a pinned state even after
- * the specified transfer completes. This may reduce overhead if some or all of
- * the same virtual address range is referenced in a subsequent call of
- * scif_vreadfrom() or scif_vwriteto().
- *
- * The optimal DMA performance will likely be realized if both
- * addr and roffset are cacheline aligned (are a multiple of 64). Lower
- * performance will likely be realized if addr and roffset are not
- * cacheline aligned but are separated by some multiple of 64. The lowest level
- * of performance is likely if addr and roffset are not separated by a
- * multiple of 64.
- *
- * The rma_flags argument is formed by ORing together zero or more of the
- * following values.
- * SCIF_RMA_USECPU - perform the transfer using the CPU, otherwise use the DMA
- * engine.
- * SCIF_RMA_USECACHE - enable registration caching
- * SCIF_RMA_SYNC - perform the transfer synchronously, returning after the
- * transfer has completed. Passing this flag results in the
- * current implementation busy waiting and consuming CPU cycles
- * while the DMA transfer is in progress for best performance by
- * avoiding the interrupt latency.
- * SCIF_RMA_ORDERED - ensure that the last cacheline or partial cacheline of
- * the source range becomes visible on the destination node
- * after all other transferred data in the source range has
- * become visible on the destination
- *
- * Return:
- * Upon successful completion, scif_vreadfrom() returns 0; otherwise in user
- * mode -1 is returned and errno is set to indicate the error; in kernel mode
- * the negative of one of the following errors is returned.
- *
- * Errors:
- * EACCESS - Attempt to write to a read-only range
- * EBADF, ENOTTY - epd is not a valid endpoint descriptor
- * ECONNRESET - Connection reset by peer
- * EINVAL - rma_flags is invalid
- * ENODEV - The remote node is lost or existed, but is not currently in the
- * network since it may have crashed
- * ENOTCONN - The endpoint is not connected
- * ENXIO - Offsets in the range [roffset, roffset + len - 1] are invalid for the
- * registered address space of epd
- */
-int scif_vreadfrom(scif_epd_t epd, void *addr, size_t len, off_t roffset,
- int rma_flags);
-
-/**
- * scif_vwriteto() - Copy to a remote address space
- * @epd: endpoint descriptor
- * @addr: address from which to copy
- * @len: length of range to copy
- * @roffset: offset in remote registered address space to
- * which to copy
- * @rma_flags: transfer mode flags
- *
- * scif_vwriteto() copies len bytes from the local memory, starting at addr, to
- * the remote registered address space of the peer of endpoint epd, starting at
- * the offset roffset.
- *
- * The specified range [roffset, roffset + len - 1] must be within some
- * registered window or windows of the remote nodes. The range may intersect
- * multiple registered windows, but only if those windows are contiguous in the
- * registered address space.
- *
- * If rma_flags includes SCIF_RMA_USECPU, then the data is copied using
- * programmed read/writes. Otherwise the data is copied using DMA. If rma_-
- * flags includes SCIF_RMA_SYNC, then scif_vwriteto() will return after the
- * transfer is complete. Otherwise, the transfer may be performed asynchron-
- * ously. The order in which any two asynchronous RMA operations complete
- * is non-deterministic. The synchronization functions, scif_fence_mark()/
- * scif_fence_wait() and scif_fence_signal(), can be used to synchronize to
- * the completion of asynchronous RMA operations on the same endpoint.
- *
- * The DMA transfer of individual bytes is not guaranteed to complete in
- * address order. If rma_flags includes SCIF_RMA_ORDERED, then the last
- * cacheline or partial cacheline of the source range will become visible on
- * the destination node after all other transferred data in the source
- * range has become visible on the destination node.
- *
- * If rma_flags includes SCIF_RMA_USECACHE, then the physical pages which back
- * the specified local memory range may be remain in a pinned state even after
- * the specified transfer completes. This may reduce overhead if some or all of
- * the same virtual address range is referenced in a subsequent call of
- * scif_vreadfrom() or scif_vwriteto().
- *
- * The optimal DMA performance will likely be realized if both
- * addr and offset are cacheline aligned (are a multiple of 64). Lower
- * performance will likely be realized if addr and offset are not cacheline
- * aligned but are separated by some multiple of 64. The lowest level of
- * performance is likely if addr and offset are not separated by a multiple of
- * 64.
- *
- * The rma_flags argument is formed by ORing together zero or more of the
- * following values.
- * SCIF_RMA_USECPU - perform the transfer using the CPU, otherwise use the DMA
- * engine.
- * SCIF_RMA_USECACHE - allow registration caching
- * SCIF_RMA_SYNC - perform the transfer synchronously, returning after the
- * transfer has completed. Passing this flag results in the
- * current implementation busy waiting and consuming CPU cycles
- * while the DMA transfer is in progress for best performance by
- * avoiding the interrupt latency.
- * SCIF_RMA_ORDERED - ensure that the last cacheline or partial cacheline of
- * the source range becomes visible on the destination node
- * after all other transferred data in the source range has
- * become visible on the destination
- *
- * Return:
- * Upon successful completion, scif_vwriteto() returns 0; otherwise in user
- * mode -1 is returned and errno is set to indicate the error; in kernel mode
- * the negative of one of the following errors is returned.
- *
- * Errors:
- * EACCESS - Attempt to write to a read-only range
- * EBADF, ENOTTY - epd is not a valid endpoint descriptor
- * ECONNRESET - Connection reset by peer
- * EINVAL - rma_flags is invalid
- * ENODEV - The remote node is lost or existed, but is not currently in the
- * network since it may have crashed
- * ENOTCONN - The endpoint is not connected
- * ENXIO - Offsets in the range [roffset, roffset + len - 1] are invalid for the
- * registered address space of epd
- */
-int scif_vwriteto(scif_epd_t epd, void *addr, size_t len, off_t roffset,
- int rma_flags);
-
-/**
- * scif_fence_mark() - Mark previously issued RMAs
- * @epd: endpoint descriptor
- * @flags: control flags
- * @mark: marked value returned as output.
- *
- * scif_fence_mark() returns after marking the current set of all uncompleted
- * RMAs initiated through the endpoint epd or the current set of all
- * uncompleted RMAs initiated through the peer of endpoint epd. The RMAs are
- * marked with a value returned at mark. The application may subsequently call
- * scif_fence_wait(), passing the value returned at mark, to await completion
- * of all RMAs so marked.
- *
- * The flags argument has exactly one of the following values.
- * SCIF_FENCE_INIT_SELF - RMA operations initiated through endpoint
- * epd are marked
- * SCIF_FENCE_INIT_PEER - RMA operations initiated through the peer
- * of endpoint epd are marked
- *
- * Return:
- * Upon successful completion, scif_fence_mark() returns 0; otherwise in user
- * mode -1 is returned and errno is set to indicate the error; in kernel mode
- * the negative of one of the following errors is returned.
- *
- * Errors:
- * EBADF, ENOTTY - epd is not a valid endpoint descriptor
- * ECONNRESET - Connection reset by peer
- * EINVAL - flags is invalid
- * ENODEV - The remote node is lost or existed, but is not currently in the
- * network since it may have crashed
- * ENOTCONN - The endpoint is not connected
- * ENOMEM - Insufficient kernel memory was available
- */
-int scif_fence_mark(scif_epd_t epd, int flags, int *mark);
-
-/**
- * scif_fence_wait() - Wait for completion of marked RMAs
- * @epd: endpoint descriptor
- * @mark: mark request
- *
- * scif_fence_wait() returns after all RMAs marked with mark have completed.
- * The value passed in mark must have been obtained in a previous call to
- * scif_fence_mark().
- *
- * Return:
- * Upon successful completion, scif_fence_wait() returns 0; otherwise in user
- * mode -1 is returned and errno is set to indicate the error; in kernel mode
- * the negative of one of the following errors is returned.
- *
- * Errors:
- * EBADF, ENOTTY - epd is not a valid endpoint descriptor
- * ECONNRESET - Connection reset by peer
- * ENODEV - The remote node is lost or existed, but is not currently in the
- * network since it may have crashed
- * ENOTCONN - The endpoint is not connected
- * ENOMEM - Insufficient kernel memory was available
- */
-int scif_fence_wait(scif_epd_t epd, int mark);
-
-/**
- * scif_fence_signal() - Request a memory update on completion of RMAs
- * @epd: endpoint descriptor
- * @loff: local offset
- * @lval: local value to write to loffset
- * @roff: remote offset
- * @rval: remote value to write to roffset
- * @flags: flags
- *
- * scif_fence_signal() returns after marking the current set of all uncompleted
- * RMAs initiated through the endpoint epd or marking the current set of all
- * uncompleted RMAs initiated through the peer of endpoint epd.
- *
- * If flags includes SCIF_SIGNAL_LOCAL, then on completion of the RMAs in the
- * marked set, lval is written to memory at the address corresponding to offset
- * loff in the local registered address space of epd. loff must be within a
- * registered window. If flags includes SCIF_SIGNAL_REMOTE, then on completion
- * of the RMAs in the marked set, rval is written to memory at the address
- * corresponding to offset roff in the remote registered address space of epd.
- * roff must be within a remote registered window of the peer of epd. Note
- * that any specified offset must be DWORD (4 byte / 32 bit) aligned.
- *
- * The flags argument is formed by OR'ing together the following.
- * Exactly one of the following values.
- * SCIF_FENCE_INIT_SELF - RMA operations initiated through endpoint
- * epd are marked
- * SCIF_FENCE_INIT_PEER - RMA operations initiated through the peer
- * of endpoint epd are marked
- * One or more of the following values.
- * SCIF_SIGNAL_LOCAL - On completion of the marked set of RMAs, write lval to
- * memory at the address corresponding to offset loff in the local
- * registered address space of epd.
- * SCIF_SIGNAL_REMOTE - On completion of the marked set of RMAs, write rval to
- * memory at the address corresponding to offset roff in the remote
- * registered address space of epd.
- *
- * Return:
- * Upon successful completion, scif_fence_signal() returns 0; otherwise in
- * user mode -1 is returned and errno is set to indicate the error; in kernel
- * mode the negative of one of the following errors is returned.
- *
- * Errors:
- * EBADF, ENOTTY - epd is not a valid endpoint descriptor
- * ECONNRESET - Connection reset by peer
- * EINVAL - flags is invalid, or loff or roff are not DWORD aligned
- * ENODEV - The remote node is lost or existed, but is not currently in the
- * network since it may have crashed
- * ENOTCONN - The endpoint is not connected
- * ENXIO - loff is invalid for the registered address of epd, or roff is invalid
- * for the registered address space, of the peer of epd
- */
-int scif_fence_signal(scif_epd_t epd, off_t loff, u64 lval, off_t roff,
- u64 rval, int flags);
-
-/**
- * scif_get_node_ids() - Return information about online nodes
- * @nodes: array in which to return online node IDs
- * @len: number of entries in the nodes array
- * @self: address to place the node ID of the local node
- *
- * scif_get_node_ids() fills in the nodes array with up to len node IDs of the
- * nodes in the SCIF network. If there is not enough space in nodes, as
- * indicated by the len parameter, only len node IDs are returned in nodes. The
- * return value of scif_get_node_ids() is the total number of nodes currently in
- * the SCIF network. By checking the return value against the len parameter,
- * the user may determine if enough space for nodes was allocated.
- *
- * The node ID of the local node is returned at self.
- *
- * Return:
- * Upon successful completion, scif_get_node_ids() returns the actual number of
- * online nodes in the SCIF network including 'self'; otherwise in user mode
- * -1 is returned and errno is set to indicate the error; in kernel mode no
- * errors are returned.
- */
-int scif_get_node_ids(u16 *nodes, int len, u16 *self);
-
-/**
- * scif_pin_pages() - Pin a set of pages
- * @addr: Virtual address of range to pin
- * @len: Length of range to pin
- * @prot_flags: Page protection flags
- * @map_flags: Page classification flags
- * @pinned_pages: Handle to pinned pages
- *
- * scif_pin_pages() pins (locks in physical memory) the physical pages which
- * back the range of virtual address pages starting at addr and continuing for
- * len bytes. addr and len are constrained to be multiples of the page size. A
- * successful scif_pin_pages() call returns a handle to pinned_pages which may
- * be used in subsequent calls to scif_register_pinned_pages().
- *
- * The pages will remain pinned as long as there is a reference against the
- * scif_pinned_pages_t value returned by scif_pin_pages() and until
- * scif_unpin_pages() is called, passing the scif_pinned_pages_t value. A
- * reference is added to a scif_pinned_pages_t value each time a window is
- * created by calling scif_register_pinned_pages() and passing the
- * scif_pinned_pages_t value. A reference is removed from a
- * scif_pinned_pages_t value each time such a window is deleted.
- *
- * Subsequent operations which change the memory pages to which virtual
- * addresses are mapped (such as mmap(), munmap()) have no effect on the
- * scif_pinned_pages_t value or windows created against it.
- *
- * If the process will fork(), it is recommended that the registered
- * virtual address range be marked with MADV_DONTFORK. Doing so will prevent
- * problems due to copy-on-write semantics.
- *
- * The prot_flags argument is formed by OR'ing together one or more of the
- * following values.
- * SCIF_PROT_READ - allow read operations against the pages
- * SCIF_PROT_WRITE - allow write operations against the pages
- * The map_flags argument can be set as SCIF_MAP_KERNEL to interpret addr as a
- * kernel space address. By default, addr is interpreted as a user space
- * address.
- *
- * Return:
- * Upon successful completion, scif_pin_pages() returns 0; otherwise the
- * negative of one of the following errors is returned.
- *
- * Errors:
- * EINVAL - prot_flags is invalid, map_flags is invalid, or offset is negative
- * ENOMEM - Not enough space
- */
-int scif_pin_pages(void *addr, size_t len, int prot_flags, int map_flags,
- scif_pinned_pages_t *pinned_pages);
-
-/**
- * scif_unpin_pages() - Unpin a set of pages
- * @pinned_pages: Handle to pinned pages to be unpinned
- *
- * scif_unpin_pages() prevents scif_register_pinned_pages() from registering new
- * windows against pinned_pages. The physical pages represented by pinned_pages
- * will remain pinned until all windows previously registered against
- * pinned_pages are deleted (the window is scif_unregister()'d and all
- * references to the window are removed (see scif_unregister()).
- *
- * pinned_pages must have been obtain from a previous call to scif_pin_pages().
- * After calling scif_unpin_pages(), it is an error to pass pinned_pages to
- * scif_register_pinned_pages().
- *
- * Return:
- * Upon successful completion, scif_unpin_pages() returns 0; otherwise the
- * negative of one of the following errors is returned.
- *
- * Errors:
- * EINVAL - pinned_pages is not valid
- */
-int scif_unpin_pages(scif_pinned_pages_t pinned_pages);
-
-/**
- * scif_register_pinned_pages() - Mark a memory region for remote access.
- * @epd: endpoint descriptor
- * @pinned_pages: Handle to pinned pages
- * @offset: Registered address space offset
- * @map_flags: Flags which control where pages are mapped
- *
- * The scif_register_pinned_pages() function opens a window, a range of whole
- * pages of the registered address space of the endpoint epd, starting at
- * offset po. The value of po, further described below, is a function of the
- * parameters offset and pinned_pages, and the value of map_flags. Each page of
- * the window represents a corresponding physical memory page of the range
- * represented by pinned_pages; the length of the window is the same as the
- * length of range represented by pinned_pages. A successful
- * scif_register_pinned_pages() call returns po as the return value.
- *
- * When SCIF_MAP_FIXED is set in the map_flags argument, po will be offset
- * exactly, and offset is constrained to be a multiple of the page size. The
- * mapping established by scif_register_pinned_pages() will not replace any
- * existing registration; an error is returned if any page of the new window
- * would intersect an existing window.
- *
- * When SCIF_MAP_FIXED is not set, the implementation uses offset in an
- * implementation-defined manner to arrive at po. The po so chosen will be an
- * area of the registered address space that the implementation deems suitable
- * for a mapping of the required size. An offset value of 0 is interpreted as
- * granting the implementation complete freedom in selecting po, subject to
- * constraints described below. A non-zero value of offset is taken to be a
- * suggestion of an offset near which the mapping should be placed. When the
- * implementation selects a value for po, it does not replace any extant
- * window. In all cases, po will be a multiple of the page size.
- *
- * The physical pages which are so represented by a window are available for
- * access in calls to scif_get_pages(), scif_readfrom(), scif_writeto(),
- * scif_vreadfrom(), and scif_vwriteto(). While a window is registered, the
- * physical pages represented by the window will not be reused by the memory
- * subsystem for any other purpose. Note that the same physical page may be
- * represented by multiple windows.
- *
- * Windows created by scif_register_pinned_pages() are unregistered by
- * scif_unregister().
- *
- * The map_flags argument can be set to SCIF_MAP_FIXED which interprets a
- * fixed offset.
- *
- * Return:
- * Upon successful completion, scif_register_pinned_pages() returns the offset
- * at which the mapping was placed (po); otherwise the negative of one of the
- * following errors is returned.
- *
- * Errors:
- * EADDRINUSE - SCIF_MAP_FIXED is set in map_flags and pages in the new window
- * would intersect an existing window
- * EAGAIN - The mapping could not be performed due to lack of resources
- * ECONNRESET - Connection reset by peer
- * EINVAL - map_flags is invalid, or SCIF_MAP_FIXED is set in map_flags, and
- * offset is not a multiple of the page size, or offset is negative
- * ENODEV - The remote node is lost or existed, but is not currently in the
- * network since it may have crashed
- * ENOMEM - Not enough space
- * ENOTCONN - The endpoint is not connected
- */
-off_t scif_register_pinned_pages(scif_epd_t epd,
- scif_pinned_pages_t pinned_pages,
- off_t offset, int map_flags);
-
-/**
- * scif_get_pages() - Add references to remote registered pages
- * @epd: endpoint descriptor
- * @offset: remote registered offset
- * @len: length of range of pages
- * @pages: returned scif_range structure
- *
- * scif_get_pages() returns the addresses of the physical pages represented by
- * those pages of the registered address space of the peer of epd, starting at
- * offset and continuing for len bytes. offset and len are constrained to be
- * multiples of the page size.
- *
- * All of the pages in the specified range [offset, offset + len - 1] must be
- * within a single window of the registered address space of the peer of epd.
- *
- * The addresses are returned as a virtually contiguous array pointed to by the
- * phys_addr component of the scif_range structure whose address is returned in
- * pages. The nr_pages component of scif_range is the length of the array. The
- * prot_flags component of scif_range holds the protection flag value passed
- * when the pages were registered.
- *
- * Each physical page whose address is returned by scif_get_pages() remains
- * available and will not be released for reuse until the scif_range structure
- * is returned in a call to scif_put_pages(). The scif_range structure returned
- * by scif_get_pages() must be unmodified.
- *
- * It is an error to call scif_close() on an endpoint on which a scif_range
- * structure of that endpoint has not been returned to scif_put_pages().
- *
- * Return:
- * Upon successful completion, scif_get_pages() returns 0; otherwise the
- * negative of one of the following errors is returned.
- * Errors:
- * ECONNRESET - Connection reset by peer.
- * EINVAL - offset is not a multiple of the page size, or offset is negative, or
- * len is not a multiple of the page size
- * ENODEV - The remote node is lost or existed, but is not currently in the
- * network since it may have crashed
- * ENOTCONN - The endpoint is not connected
- * ENXIO - Offsets in the range [offset, offset + len - 1] are invalid
- * for the registered address space of the peer epd
- */
-int scif_get_pages(scif_epd_t epd, off_t offset, size_t len,
- struct scif_range **pages);
-
-/**
- * scif_put_pages() - Remove references from remote registered pages
- * @pages: pages to be returned
- *
- * scif_put_pages() releases a scif_range structure previously obtained by
- * calling scif_get_pages(). The physical pages represented by pages may
- * be reused when the window which represented those pages is unregistered.
- * Therefore, those pages must not be accessed after calling scif_put_pages().
- *
- * Return:
- * Upon successful completion, scif_put_pages() returns 0; otherwise the
- * negative of one of the following errors is returned.
- * Errors:
- * EINVAL - pages does not point to a valid scif_range structure, or
- * the scif_range structure pointed to by pages was already returned
- * ENODEV - The remote node is lost or existed, but is not currently in the
- * network since it may have crashed
- * ENOTCONN - The endpoint is not connected
- */
-int scif_put_pages(struct scif_range *pages);
-
-/**
- * scif_poll() - Wait for some event on an endpoint
- * @epds: Array of endpoint descriptors
- * @nepds: Length of epds
- * @timeout: Upper limit on time for which scif_poll() will block
- *
- * scif_poll() waits for one of a set of endpoints to become ready to perform
- * an I/O operation.
- *
- * The epds argument specifies the endpoint descriptors to be examined and the
- * events of interest for each endpoint descriptor. epds is a pointer to an
- * array with one member for each open endpoint descriptor of interest.
- *
- * The number of items in the epds array is specified in nepds. The epd field
- * of scif_pollepd is an endpoint descriptor of an open endpoint. The field
- * events is a bitmask specifying the events which the application is
- * interested in. The field revents is an output parameter, filled by the
- * kernel with the events that actually occurred. The bits returned in revents
- * can include any of those specified in events, or one of the values POLLERR,
- * POLLHUP, or POLLNVAL. (These three bits are meaningless in the events
- * field, and will be set in the revents field whenever the corresponding
- * condition is true.)
- *
- * If none of the events requested (and no error) has occurred for any of the
- * endpoint descriptors, then scif_poll() blocks until one of the events occurs.
- *
- * The timeout argument specifies an upper limit on the time for which
- * scif_poll() will block, in milliseconds. Specifying a negative value in
- * timeout means an infinite timeout.
- *
- * The following bits may be set in events and returned in revents.
- * POLLIN - Data may be received without blocking. For a connected
- * endpoint, this means that scif_recv() may be called without blocking. For a
- * listening endpoint, this means that scif_accept() may be called without
- * blocking.
- * POLLOUT - Data may be sent without blocking. For a connected endpoint, this
- * means that scif_send() may be called without blocking. POLLOUT may also be
- * used to block waiting for a non-blocking connect to complete. This bit value
- * has no meaning for a listening endpoint and is ignored if specified.
- *
- * The following bits are only returned in revents, and are ignored if set in
- * events.
- * POLLERR - An error occurred on the endpoint
- * POLLHUP - The connection to the peer endpoint was disconnected
- * POLLNVAL - The specified endpoint descriptor is invalid.
- *
- * Return:
- * Upon successful completion, scif_poll() returns a non-negative value. A
- * positive value indicates the total number of endpoint descriptors that have
- * been selected (that is, endpoint descriptors for which the revents member is
- * non-zero). A value of 0 indicates that the call timed out and no endpoint
- * descriptors have been selected. Otherwise in user mode -1 is returned and
- * errno is set to indicate the error; in kernel mode the negative of one of
- * the following errors is returned.
- *
- * Errors:
- * EINTR - A signal occurred before any requested event
- * EINVAL - The nepds argument is greater than {OPEN_MAX}
- * ENOMEM - There was no space to allocate file descriptor tables
- */
-int scif_poll(struct scif_pollepd *epds, unsigned int nepds, long timeout);
-
-/**
- * scif_client_register() - Register a SCIF client
- * @client: client to be registered
- *
- * scif_client_register() registers a SCIF client. The probe() method
- * of the client is called when SCIF peer devices come online and the
- * remove() method is called when the peer devices disappear.
- *
- * Return:
- * Upon successful completion, scif_client_register() returns a non-negative
- * value. Otherwise the return value is the same as subsys_interface_register()
- * in the kernel.
- */
-int scif_client_register(struct scif_client *client);
-
-/**
- * scif_client_unregister() - Unregister a SCIF client
- * @client: client to be unregistered
- *
- * scif_client_unregister() unregisters a SCIF client.
- *
- * Return:
- * None
- */
-void scif_client_unregister(struct scif_client *client);
-
-#endif /* __SCIF_H__ */
diff --git a/include/linux/scmi_protocol.h b/include/linux/scmi_protocol.h
new file mode 100644
index 000000000000..0ce5746a4470
--- /dev/null
+++ b/include/linux/scmi_protocol.h
@@ -0,0 +1,968 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * SCMI Message Protocol driver header
+ *
+ * Copyright (C) 2018-2021 ARM Ltd.
+ */
+
+#ifndef _LINUX_SCMI_PROTOCOL_H
+#define _LINUX_SCMI_PROTOCOL_H
+
+#include <linux/bitfield.h>
+#include <linux/device.h>
+#include <linux/notifier.h>
+#include <linux/types.h>
+
+#define SCMI_MAX_STR_SIZE 64
+#define SCMI_SHORT_NAME_MAX_SIZE 16
+#define SCMI_MAX_NUM_RATES 16
+
+/**
+ * struct scmi_revision_info - version information structure
+ *
+ * @major_ver: Major ABI version. Change here implies risk of backward
+ * compatibility break.
+ * @minor_ver: Minor ABI version. Change here implies new feature addition,
+ * or compatible change in ABI.
+ * @num_protocols: Number of protocols that are implemented, excluding the
+ * base protocol.
+ * @num_agents: Number of agents in the system.
+ * @impl_ver: A vendor-specific implementation version.
+ * @vendor_id: A vendor identifier(Null terminated ASCII string)
+ * @sub_vendor_id: A sub-vendor identifier(Null terminated ASCII string)
+ */
+struct scmi_revision_info {
+ u16 major_ver;
+ u16 minor_ver;
+ u8 num_protocols;
+ u8 num_agents;
+ u32 impl_ver;
+ char vendor_id[SCMI_SHORT_NAME_MAX_SIZE];
+ char sub_vendor_id[SCMI_SHORT_NAME_MAX_SIZE];
+};
+
+struct scmi_clock_info {
+ char name[SCMI_MAX_STR_SIZE];
+ unsigned int enable_latency;
+ bool rate_discrete;
+ bool rate_changed_notifications;
+ bool rate_change_requested_notifications;
+ union {
+ struct {
+ int num_rates;
+ u64 rates[SCMI_MAX_NUM_RATES];
+ } list;
+ struct {
+ u64 min_rate;
+ u64 max_rate;
+ u64 step_size;
+ } range;
+ };
+};
+
+enum scmi_power_scale {
+ SCMI_POWER_BOGOWATTS,
+ SCMI_POWER_MILLIWATTS,
+ SCMI_POWER_MICROWATTS
+};
+
+struct scmi_handle;
+struct scmi_device;
+struct scmi_protocol_handle;
+
+/**
+ * struct scmi_clk_proto_ops - represents the various operations provided
+ * by SCMI Clock Protocol
+ *
+ * @count_get: get the count of clocks provided by SCMI
+ * @info_get: get the information of the specified clock
+ * @rate_get: request the current clock rate of a clock
+ * @rate_set: set the clock rate of a clock
+ * @enable: enables the specified clock
+ * @disable: disables the specified clock
+ */
+struct scmi_clk_proto_ops {
+ int (*count_get)(const struct scmi_protocol_handle *ph);
+
+ const struct scmi_clock_info __must_check *(*info_get)
+ (const struct scmi_protocol_handle *ph, u32 clk_id);
+ int (*rate_get)(const struct scmi_protocol_handle *ph, u32 clk_id,
+ u64 *rate);
+ int (*rate_set)(const struct scmi_protocol_handle *ph, u32 clk_id,
+ u64 rate);
+ int (*enable)(const struct scmi_protocol_handle *ph, u32 clk_id);
+ int (*disable)(const struct scmi_protocol_handle *ph, u32 clk_id);
+ int (*enable_atomic)(const struct scmi_protocol_handle *ph, u32 clk_id);
+ int (*disable_atomic)(const struct scmi_protocol_handle *ph,
+ u32 clk_id);
+};
+
+/**
+ * struct scmi_perf_proto_ops - represents the various operations provided
+ * by SCMI Performance Protocol
+ *
+ * @limits_set: sets limits on the performance level of a domain
+ * @limits_get: gets limits on the performance level of a domain
+ * @level_set: sets the performance level of a domain
+ * @level_get: gets the performance level of a domain
+ * @device_domain_id: gets the scmi domain id for a given device
+ * @transition_latency_get: gets the DVFS transition latency for a given device
+ * @device_opps_add: adds all the OPPs for a given device
+ * @freq_set: sets the frequency for a given device using sustained frequency
+ * to sustained performance level mapping
+ * @freq_get: gets the frequency for a given device using sustained frequency
+ * to sustained performance level mapping
+ * @est_power_get: gets the estimated power cost for a given performance domain
+ * at a given frequency
+ * @fast_switch_possible: indicates if fast DVFS switching is possible or not
+ * for a given device
+ * @power_scale_mw_get: indicates if the power values provided are in milliWatts
+ * or in some other (abstract) scale
+ */
+struct scmi_perf_proto_ops {
+ int (*limits_set)(const struct scmi_protocol_handle *ph, u32 domain,
+ u32 max_perf, u32 min_perf);
+ int (*limits_get)(const struct scmi_protocol_handle *ph, u32 domain,
+ u32 *max_perf, u32 *min_perf);
+ int (*level_set)(const struct scmi_protocol_handle *ph, u32 domain,
+ u32 level, bool poll);
+ int (*level_get)(const struct scmi_protocol_handle *ph, u32 domain,
+ u32 *level, bool poll);
+ int (*device_domain_id)(struct device *dev);
+ int (*transition_latency_get)(const struct scmi_protocol_handle *ph,
+ struct device *dev);
+ int (*device_opps_add)(const struct scmi_protocol_handle *ph,
+ struct device *dev);
+ int (*freq_set)(const struct scmi_protocol_handle *ph, u32 domain,
+ unsigned long rate, bool poll);
+ int (*freq_get)(const struct scmi_protocol_handle *ph, u32 domain,
+ unsigned long *rate, bool poll);
+ int (*est_power_get)(const struct scmi_protocol_handle *ph, u32 domain,
+ unsigned long *rate, unsigned long *power);
+ bool (*fast_switch_possible)(const struct scmi_protocol_handle *ph,
+ struct device *dev);
+ enum scmi_power_scale (*power_scale_get)(const struct scmi_protocol_handle *ph);
+};
+
+/**
+ * struct scmi_power_proto_ops - represents the various operations provided
+ * by SCMI Power Protocol
+ *
+ * @num_domains_get: get the count of power domains provided by SCMI
+ * @name_get: gets the name of a power domain
+ * @state_set: sets the power state of a power domain
+ * @state_get: gets the power state of a power domain
+ */
+struct scmi_power_proto_ops {
+ int (*num_domains_get)(const struct scmi_protocol_handle *ph);
+ const char *(*name_get)(const struct scmi_protocol_handle *ph,
+ u32 domain);
+#define SCMI_POWER_STATE_TYPE_SHIFT 30
+#define SCMI_POWER_STATE_ID_MASK (BIT(28) - 1)
+#define SCMI_POWER_STATE_PARAM(type, id) \
+ ((((type) & BIT(0)) << SCMI_POWER_STATE_TYPE_SHIFT) | \
+ ((id) & SCMI_POWER_STATE_ID_MASK))
+#define SCMI_POWER_STATE_GENERIC_ON SCMI_POWER_STATE_PARAM(0, 0)
+#define SCMI_POWER_STATE_GENERIC_OFF SCMI_POWER_STATE_PARAM(1, 0)
+ int (*state_set)(const struct scmi_protocol_handle *ph, u32 domain,
+ u32 state);
+ int (*state_get)(const struct scmi_protocol_handle *ph, u32 domain,
+ u32 *state);
+};
+
+/**
+ * struct scmi_sensor_reading - represent a timestamped read
+ *
+ * Used by @reading_get_timestamped method.
+ *
+ * @value: The signed value sensor read.
+ * @timestamp: An unsigned timestamp for the sensor read, as provided by
+ * SCMI platform. Set to zero when not available.
+ */
+struct scmi_sensor_reading {
+ long long value;
+ unsigned long long timestamp;
+};
+
+/**
+ * struct scmi_range_attrs - specifies a sensor or axis values' range
+ * @min_range: The minimum value which can be represented by the sensor/axis.
+ * @max_range: The maximum value which can be represented by the sensor/axis.
+ */
+struct scmi_range_attrs {
+ long long min_range;
+ long long max_range;
+};
+
+/**
+ * struct scmi_sensor_axis_info - describes one sensor axes
+ * @id: The axes ID.
+ * @type: Axes type. Chosen amongst one of @enum scmi_sensor_class.
+ * @scale: Power-of-10 multiplier applied to the axis unit.
+ * @name: NULL-terminated string representing axes name as advertised by
+ * SCMI platform.
+ * @extended_attrs: Flag to indicate the presence of additional extended
+ * attributes for this axes.
+ * @resolution: Extended attribute representing the resolution of the axes.
+ * Set to 0 if not reported by this axes.
+ * @exponent: Extended attribute representing the power-of-10 multiplier that
+ * is applied to the resolution field. Set to 0 if not reported by
+ * this axes.
+ * @attrs: Extended attributes representing minimum and maximum values
+ * measurable by this axes. Set to 0 if not reported by this sensor.
+ */
+struct scmi_sensor_axis_info {
+ unsigned int id;
+ unsigned int type;
+ int scale;
+ char name[SCMI_MAX_STR_SIZE];
+ bool extended_attrs;
+ unsigned int resolution;
+ int exponent;
+ struct scmi_range_attrs attrs;
+};
+
+/**
+ * struct scmi_sensor_intervals_info - describes number and type of available
+ * update intervals
+ * @segmented: Flag for segmented intervals' representation. When True there
+ * will be exactly 3 intervals in @desc, with each entry
+ * representing a member of a segment in this order:
+ * {lowest update interval, highest update interval, step size}
+ * @count: Number of intervals described in @desc.
+ * @desc: Array of @count interval descriptor bitmask represented as detailed in
+ * the SCMI specification: it can be accessed using the accompanying
+ * macros.
+ * @prealloc_pool: A minimal preallocated pool of desc entries used to avoid
+ * lesser-than-64-bytes dynamic allocation for small @count
+ * values.
+ */
+struct scmi_sensor_intervals_info {
+ bool segmented;
+ unsigned int count;
+#define SCMI_SENS_INTVL_SEGMENT_LOW 0
+#define SCMI_SENS_INTVL_SEGMENT_HIGH 1
+#define SCMI_SENS_INTVL_SEGMENT_STEP 2
+ unsigned int *desc;
+#define SCMI_SENS_INTVL_GET_SECS(x) FIELD_GET(GENMASK(20, 5), (x))
+#define SCMI_SENS_INTVL_GET_EXP(x) \
+ ({ \
+ int __signed_exp = FIELD_GET(GENMASK(4, 0), (x)); \
+ \
+ if (__signed_exp & BIT(4)) \
+ __signed_exp |= GENMASK(31, 5); \
+ __signed_exp; \
+ })
+#define SCMI_MAX_PREALLOC_POOL 16
+ unsigned int prealloc_pool[SCMI_MAX_PREALLOC_POOL];
+};
+
+/**
+ * struct scmi_sensor_info - represents information related to one of the
+ * available sensors.
+ * @id: Sensor ID.
+ * @type: Sensor type. Chosen amongst one of @enum scmi_sensor_class.
+ * @scale: Power-of-10 multiplier applied to the sensor unit.
+ * @num_trip_points: Number of maximum configurable trip points.
+ * @async: Flag for asynchronous read support.
+ * @update: Flag for continuouos update notification support.
+ * @timestamped: Flag for timestamped read support.
+ * @tstamp_scale: Power-of-10 multiplier applied to the sensor timestamps to
+ * represent it in seconds.
+ * @num_axis: Number of supported axis if any. Reported as 0 for scalar sensors.
+ * @axis: Pointer to an array of @num_axis descriptors.
+ * @intervals: Descriptor of available update intervals.
+ * @sensor_config: A bitmask reporting the current sensor configuration as
+ * detailed in the SCMI specification: it can accessed and
+ * modified through the accompanying macros.
+ * @name: NULL-terminated string representing sensor name as advertised by
+ * SCMI platform.
+ * @extended_scalar_attrs: Flag to indicate the presence of additional extended
+ * attributes for this sensor.
+ * @sensor_power: Extended attribute representing the average power
+ * consumed by the sensor in microwatts (uW) when it is active.
+ * Reported here only for scalar sensors.
+ * Set to 0 if not reported by this sensor.
+ * @resolution: Extended attribute representing the resolution of the sensor.
+ * Reported here only for scalar sensors.
+ * Set to 0 if not reported by this sensor.
+ * @exponent: Extended attribute representing the power-of-10 multiplier that is
+ * applied to the resolution field.
+ * Reported here only for scalar sensors.
+ * Set to 0 if not reported by this sensor.
+ * @scalar_attrs: Extended attributes representing minimum and maximum
+ * measurable values by this sensor.
+ * Reported here only for scalar sensors.
+ * Set to 0 if not reported by this sensor.
+ */
+struct scmi_sensor_info {
+ unsigned int id;
+ unsigned int type;
+ int scale;
+ unsigned int num_trip_points;
+ bool async;
+ bool update;
+ bool timestamped;
+ int tstamp_scale;
+ unsigned int num_axis;
+ struct scmi_sensor_axis_info *axis;
+ struct scmi_sensor_intervals_info intervals;
+ unsigned int sensor_config;
+#define SCMI_SENS_CFG_UPDATE_SECS_MASK GENMASK(31, 16)
+#define SCMI_SENS_CFG_GET_UPDATE_SECS(x) \
+ FIELD_GET(SCMI_SENS_CFG_UPDATE_SECS_MASK, (x))
+
+#define SCMI_SENS_CFG_UPDATE_EXP_MASK GENMASK(15, 11)
+#define SCMI_SENS_CFG_GET_UPDATE_EXP(x) \
+ ({ \
+ int __signed_exp = \
+ FIELD_GET(SCMI_SENS_CFG_UPDATE_EXP_MASK, (x)); \
+ \
+ if (__signed_exp & BIT(4)) \
+ __signed_exp |= GENMASK(31, 5); \
+ __signed_exp; \
+ })
+
+#define SCMI_SENS_CFG_ROUND_MASK GENMASK(10, 9)
+#define SCMI_SENS_CFG_ROUND_AUTO 2
+#define SCMI_SENS_CFG_ROUND_UP 1
+#define SCMI_SENS_CFG_ROUND_DOWN 0
+
+#define SCMI_SENS_CFG_TSTAMP_ENABLED_MASK BIT(1)
+#define SCMI_SENS_CFG_TSTAMP_ENABLE 1
+#define SCMI_SENS_CFG_TSTAMP_DISABLE 0
+#define SCMI_SENS_CFG_IS_TSTAMP_ENABLED(x) \
+ FIELD_GET(SCMI_SENS_CFG_TSTAMP_ENABLED_MASK, (x))
+
+#define SCMI_SENS_CFG_SENSOR_ENABLED_MASK BIT(0)
+#define SCMI_SENS_CFG_SENSOR_ENABLE 1
+#define SCMI_SENS_CFG_SENSOR_DISABLE 0
+ char name[SCMI_MAX_STR_SIZE];
+#define SCMI_SENS_CFG_IS_ENABLED(x) FIELD_GET(BIT(0), (x))
+ bool extended_scalar_attrs;
+ unsigned int sensor_power;
+ unsigned int resolution;
+ int exponent;
+ struct scmi_range_attrs scalar_attrs;
+};
+
+/*
+ * Partial list from Distributed Management Task Force (DMTF) specification:
+ * DSP0249 (Platform Level Data Model specification)
+ */
+enum scmi_sensor_class {
+ NONE = 0x0,
+ UNSPEC = 0x1,
+ TEMPERATURE_C = 0x2,
+ TEMPERATURE_F = 0x3,
+ TEMPERATURE_K = 0x4,
+ VOLTAGE = 0x5,
+ CURRENT = 0x6,
+ POWER = 0x7,
+ ENERGY = 0x8,
+ CHARGE = 0x9,
+ VOLTAMPERE = 0xA,
+ NITS = 0xB,
+ LUMENS = 0xC,
+ LUX = 0xD,
+ CANDELAS = 0xE,
+ KPA = 0xF,
+ PSI = 0x10,
+ NEWTON = 0x11,
+ CFM = 0x12,
+ RPM = 0x13,
+ HERTZ = 0x14,
+ SECS = 0x15,
+ MINS = 0x16,
+ HOURS = 0x17,
+ DAYS = 0x18,
+ WEEKS = 0x19,
+ MILS = 0x1A,
+ INCHES = 0x1B,
+ FEET = 0x1C,
+ CUBIC_INCHES = 0x1D,
+ CUBIC_FEET = 0x1E,
+ METERS = 0x1F,
+ CUBIC_CM = 0x20,
+ CUBIC_METERS = 0x21,
+ LITERS = 0x22,
+ FLUID_OUNCES = 0x23,
+ RADIANS = 0x24,
+ STERADIANS = 0x25,
+ REVOLUTIONS = 0x26,
+ CYCLES = 0x27,
+ GRAVITIES = 0x28,
+ OUNCES = 0x29,
+ POUNDS = 0x2A,
+ FOOT_POUNDS = 0x2B,
+ OUNCE_INCHES = 0x2C,
+ GAUSS = 0x2D,
+ GILBERTS = 0x2E,
+ HENRIES = 0x2F,
+ FARADS = 0x30,
+ OHMS = 0x31,
+ SIEMENS = 0x32,
+ MOLES = 0x33,
+ BECQUERELS = 0x34,
+ PPM = 0x35,
+ DECIBELS = 0x36,
+ DBA = 0x37,
+ DBC = 0x38,
+ GRAYS = 0x39,
+ SIEVERTS = 0x3A,
+ COLOR_TEMP_K = 0x3B,
+ BITS = 0x3C,
+ BYTES = 0x3D,
+ WORDS = 0x3E,
+ DWORDS = 0x3F,
+ QWORDS = 0x40,
+ PERCENTAGE = 0x41,
+ PASCALS = 0x42,
+ COUNTS = 0x43,
+ GRAMS = 0x44,
+ NEWTON_METERS = 0x45,
+ HITS = 0x46,
+ MISSES = 0x47,
+ RETRIES = 0x48,
+ OVERRUNS = 0x49,
+ UNDERRUNS = 0x4A,
+ COLLISIONS = 0x4B,
+ PACKETS = 0x4C,
+ MESSAGES = 0x4D,
+ CHARS = 0x4E,
+ ERRORS = 0x4F,
+ CORRECTED_ERRS = 0x50,
+ UNCORRECTABLE_ERRS = 0x51,
+ SQ_MILS = 0x52,
+ SQ_INCHES = 0x53,
+ SQ_FEET = 0x54,
+ SQ_CM = 0x55,
+ SQ_METERS = 0x56,
+ RADIANS_SEC = 0x57,
+ BPM = 0x58,
+ METERS_SEC_SQUARED = 0x59,
+ METERS_SEC = 0x5A,
+ CUBIC_METERS_SEC = 0x5B,
+ MM_MERCURY = 0x5C,
+ RADIANS_SEC_SQUARED = 0x5D,
+ OEM_UNIT = 0xFF
+};
+
+/**
+ * struct scmi_sensor_proto_ops - represents the various operations provided
+ * by SCMI Sensor Protocol
+ *
+ * @count_get: get the count of sensors provided by SCMI
+ * @info_get: get the information of the specified sensor
+ * @trip_point_config: selects and configures a trip-point of interest
+ * @reading_get: gets the current value of the sensor
+ * @reading_get_timestamped: gets the current value and timestamp, when
+ * available, of the sensor. (as of v3.0 spec)
+ * Supports multi-axis sensors for sensors which
+ * supports it and if the @reading array size of
+ * @count entry equals the sensor num_axis
+ * @config_get: Get sensor current configuration
+ * @config_set: Set sensor current configuration
+ */
+struct scmi_sensor_proto_ops {
+ int (*count_get)(const struct scmi_protocol_handle *ph);
+ const struct scmi_sensor_info __must_check *(*info_get)
+ (const struct scmi_protocol_handle *ph, u32 sensor_id);
+ int (*trip_point_config)(const struct scmi_protocol_handle *ph,
+ u32 sensor_id, u8 trip_id, u64 trip_value);
+ int (*reading_get)(const struct scmi_protocol_handle *ph, u32 sensor_id,
+ u64 *value);
+ int (*reading_get_timestamped)(const struct scmi_protocol_handle *ph,
+ u32 sensor_id, u8 count,
+ struct scmi_sensor_reading *readings);
+ int (*config_get)(const struct scmi_protocol_handle *ph,
+ u32 sensor_id, u32 *sensor_config);
+ int (*config_set)(const struct scmi_protocol_handle *ph,
+ u32 sensor_id, u32 sensor_config);
+};
+
+/**
+ * struct scmi_reset_proto_ops - represents the various operations provided
+ * by SCMI Reset Protocol
+ *
+ * @num_domains_get: get the count of reset domains provided by SCMI
+ * @name_get: gets the name of a reset domain
+ * @latency_get: gets the reset latency for the specified reset domain
+ * @reset: resets the specified reset domain
+ * @assert: explicitly assert reset signal of the specified reset domain
+ * @deassert: explicitly deassert reset signal of the specified reset domain
+ */
+struct scmi_reset_proto_ops {
+ int (*num_domains_get)(const struct scmi_protocol_handle *ph);
+ const char *(*name_get)(const struct scmi_protocol_handle *ph,
+ u32 domain);
+ int (*latency_get)(const struct scmi_protocol_handle *ph, u32 domain);
+ int (*reset)(const struct scmi_protocol_handle *ph, u32 domain);
+ int (*assert)(const struct scmi_protocol_handle *ph, u32 domain);
+ int (*deassert)(const struct scmi_protocol_handle *ph, u32 domain);
+};
+
+enum scmi_voltage_level_mode {
+ SCMI_VOLTAGE_LEVEL_SET_AUTO,
+ SCMI_VOLTAGE_LEVEL_SET_SYNC,
+};
+
+/**
+ * struct scmi_voltage_info - describe one available SCMI Voltage Domain
+ *
+ * @id: the domain ID as advertised by the platform
+ * @segmented: defines the layout of the entries of array @levels_uv.
+ * - when True the entries are to be interpreted as triplets,
+ * each defining a segment representing a range of equally
+ * space voltages: <lowest_volts>, <highest_volt>, <step_uV>
+ * - when False the entries simply represent a single discrete
+ * supported voltage level
+ * @negative_volts_allowed: True if any of the entries of @levels_uv represent
+ * a negative voltage.
+ * @async_level_set: True when the voltage domain supports asynchronous level
+ * set commands.
+ * @name: name assigned to the Voltage Domain by platform
+ * @num_levels: number of total entries in @levels_uv.
+ * @levels_uv: array of entries describing the available voltage levels for
+ * this domain.
+ */
+struct scmi_voltage_info {
+ unsigned int id;
+ bool segmented;
+ bool negative_volts_allowed;
+ bool async_level_set;
+ char name[SCMI_MAX_STR_SIZE];
+ unsigned int num_levels;
+#define SCMI_VOLTAGE_SEGMENT_LOW 0
+#define SCMI_VOLTAGE_SEGMENT_HIGH 1
+#define SCMI_VOLTAGE_SEGMENT_STEP 2
+ int *levels_uv;
+};
+
+/**
+ * struct scmi_voltage_proto_ops - represents the various operations provided
+ * by SCMI Voltage Protocol
+ *
+ * @num_domains_get: get the count of voltage domains provided by SCMI
+ * @info_get: get the information of the specified domain
+ * @config_set: set the config for the specified domain
+ * @config_get: get the config of the specified domain
+ * @level_set: set the voltage level for the specified domain
+ * @level_get: get the voltage level of the specified domain
+ */
+struct scmi_voltage_proto_ops {
+ int (*num_domains_get)(const struct scmi_protocol_handle *ph);
+ const struct scmi_voltage_info __must_check *(*info_get)
+ (const struct scmi_protocol_handle *ph, u32 domain_id);
+ int (*config_set)(const struct scmi_protocol_handle *ph, u32 domain_id,
+ u32 config);
+#define SCMI_VOLTAGE_ARCH_STATE_OFF 0x0
+#define SCMI_VOLTAGE_ARCH_STATE_ON 0x7
+ int (*config_get)(const struct scmi_protocol_handle *ph, u32 domain_id,
+ u32 *config);
+ int (*level_set)(const struct scmi_protocol_handle *ph, u32 domain_id,
+ enum scmi_voltage_level_mode mode, s32 volt_uV);
+ int (*level_get)(const struct scmi_protocol_handle *ph, u32 domain_id,
+ s32 *volt_uV);
+};
+
+/**
+ * struct scmi_powercap_info - Describe one available Powercap domain
+ *
+ * @id: Domain ID as advertised by the platform.
+ * @notify_powercap_cap_change: CAP change notification support.
+ * @notify_powercap_measurement_change: MEASUREMENTS change notifications
+ * support.
+ * @async_powercap_cap_set: Asynchronous CAP set support.
+ * @powercap_cap_config: CAP configuration support.
+ * @powercap_monitoring: Monitoring (measurements) support.
+ * @powercap_pai_config: PAI configuration support.
+ * @powercap_scale_mw: Domain reports power data in milliwatt units.
+ * @powercap_scale_uw: Domain reports power data in microwatt units.
+ * Note that, when both @powercap_scale_mw and
+ * @powercap_scale_uw are set to false, the domain
+ * reports power data on an abstract linear scale.
+ * @name: name assigned to the Powercap Domain by platform.
+ * @min_pai: Minimum configurable PAI.
+ * @max_pai: Maximum configurable PAI.
+ * @pai_step: Step size between two consecutive PAI values.
+ * @min_power_cap: Minimum configurable CAP.
+ * @max_power_cap: Maximum configurable CAP.
+ * @power_cap_step: Step size between two consecutive CAP values.
+ * @sustainable_power: Maximum sustainable power consumption for this domain
+ * under normal conditions.
+ * @accuracy: The accuracy with which the power is measured and reported in
+ * integral multiples of 0.001 percent.
+ * @parent_id: Identifier of the containing parent power capping domain, or the
+ * value 0xFFFFFFFF if this powercap domain is a root domain not
+ * contained in any other domain.
+ */
+struct scmi_powercap_info {
+ unsigned int id;
+ bool notify_powercap_cap_change;
+ bool notify_powercap_measurement_change;
+ bool async_powercap_cap_set;
+ bool powercap_cap_config;
+ bool powercap_monitoring;
+ bool powercap_pai_config;
+ bool powercap_scale_mw;
+ bool powercap_scale_uw;
+ bool fastchannels;
+ char name[SCMI_MAX_STR_SIZE];
+ unsigned int min_pai;
+ unsigned int max_pai;
+ unsigned int pai_step;
+ unsigned int min_power_cap;
+ unsigned int max_power_cap;
+ unsigned int power_cap_step;
+ unsigned int sustainable_power;
+ unsigned int accuracy;
+#define SCMI_POWERCAP_ROOT_ZONE_ID 0xFFFFFFFFUL
+ unsigned int parent_id;
+ struct scmi_fc_info *fc_info;
+};
+
+/**
+ * struct scmi_powercap_proto_ops - represents the various operations provided
+ * by SCMI Powercap Protocol
+ *
+ * @num_domains_get: get the count of powercap domains provided by SCMI.
+ * @info_get: get the information for the specified domain.
+ * @cap_get: get the current CAP value for the specified domain.
+ * @cap_set: set the CAP value for the specified domain to the provided value;
+ * if the domain supports setting the CAP with an asynchronous command
+ * this request will finally trigger an asynchronous transfer, but, if
+ * @ignore_dresp here is set to true, this call will anyway return
+ * immediately without waiting for the related delayed response.
+ * @pai_get: get the current PAI value for the specified domain.
+ * @pai_set: set the PAI value for the specified domain to the provided value.
+ * @measurements_get: retrieve the current average power measurements for the
+ * specified domain and the related PAI upon which is
+ * calculated.
+ * @measurements_threshold_set: set the desired low and high power thresholds
+ * to be used when registering for notification
+ * of type POWERCAP_MEASUREMENTS_NOTIFY with this
+ * powercap domain.
+ * Note that this must be called at least once
+ * before registering any callback with the usual
+ * @scmi_notify_ops; moreover, in case this method
+ * is called with measurement notifications already
+ * enabled it will also trigger, transparently, a
+ * proper update of the power thresholds configured
+ * in the SCMI backend server.
+ * @measurements_threshold_get: get the currently configured low and high power
+ * thresholds used when registering callbacks for
+ * notification POWERCAP_MEASUREMENTS_NOTIFY.
+ */
+struct scmi_powercap_proto_ops {
+ int (*num_domains_get)(const struct scmi_protocol_handle *ph);
+ const struct scmi_powercap_info __must_check *(*info_get)
+ (const struct scmi_protocol_handle *ph, u32 domain_id);
+ int (*cap_get)(const struct scmi_protocol_handle *ph, u32 domain_id,
+ u32 *power_cap);
+ int (*cap_set)(const struct scmi_protocol_handle *ph, u32 domain_id,
+ u32 power_cap, bool ignore_dresp);
+ int (*pai_get)(const struct scmi_protocol_handle *ph, u32 domain_id,
+ u32 *pai);
+ int (*pai_set)(const struct scmi_protocol_handle *ph, u32 domain_id,
+ u32 pai);
+ int (*measurements_get)(const struct scmi_protocol_handle *ph,
+ u32 domain_id, u32 *average_power, u32 *pai);
+ int (*measurements_threshold_set)(const struct scmi_protocol_handle *ph,
+ u32 domain_id, u32 power_thresh_low,
+ u32 power_thresh_high);
+ int (*measurements_threshold_get)(const struct scmi_protocol_handle *ph,
+ u32 domain_id, u32 *power_thresh_low,
+ u32 *power_thresh_high);
+};
+
+/**
+ * struct scmi_notify_ops - represents notifications' operations provided by
+ * SCMI core
+ * @devm_event_notifier_register: Managed registration of a notifier_block for
+ * the requested event
+ * @devm_event_notifier_unregister: Managed unregistration of a notifier_block
+ * for the requested event
+ * @event_notifier_register: Register a notifier_block for the requested event
+ * @event_notifier_unregister: Unregister a notifier_block for the requested
+ * event
+ *
+ * A user can register/unregister its own notifier_block against the wanted
+ * platform instance regarding the desired event identified by the
+ * tuple: (proto_id, evt_id, src_id) using the provided register/unregister
+ * interface where:
+ *
+ * @sdev: The scmi_device to use when calling the devres managed ops devm_
+ * @handle: The handle identifying the platform instance to use, when not
+ * calling the managed ops devm_
+ * @proto_id: The protocol ID as in SCMI Specification
+ * @evt_id: The message ID of the desired event as in SCMI Specification
+ * @src_id: A pointer to the desired source ID if different sources are
+ * possible for the protocol (like domain_id, sensor_id...etc)
+ *
+ * @src_id can be provided as NULL if it simply does NOT make sense for
+ * the protocol at hand, OR if the user is explicitly interested in
+ * receiving notifications from ANY existent source associated to the
+ * specified proto_id / evt_id.
+ *
+ * Received notifications are finally delivered to the registered users,
+ * invoking the callback provided with the notifier_block *nb as follows:
+ *
+ * int user_cb(nb, evt_id, report)
+ *
+ * with:
+ *
+ * @nb: The notifier block provided by the user
+ * @evt_id: The message ID of the delivered event
+ * @report: A custom struct describing the specific event delivered
+ */
+struct scmi_notify_ops {
+ int (*devm_event_notifier_register)(struct scmi_device *sdev,
+ u8 proto_id, u8 evt_id,
+ const u32 *src_id,
+ struct notifier_block *nb);
+ int (*devm_event_notifier_unregister)(struct scmi_device *sdev,
+ u8 proto_id, u8 evt_id,
+ const u32 *src_id,
+ struct notifier_block *nb);
+ int (*event_notifier_register)(const struct scmi_handle *handle,
+ u8 proto_id, u8 evt_id,
+ const u32 *src_id,
+ struct notifier_block *nb);
+ int (*event_notifier_unregister)(const struct scmi_handle *handle,
+ u8 proto_id, u8 evt_id,
+ const u32 *src_id,
+ struct notifier_block *nb);
+};
+
+/**
+ * struct scmi_handle - Handle returned to ARM SCMI clients for usage.
+ *
+ * @dev: pointer to the SCMI device
+ * @version: pointer to the structure containing SCMI version information
+ * @devm_protocol_acquire: devres managed method to get hold of a protocol,
+ * causing its initialization and related resource
+ * accounting
+ * @devm_protocol_get: devres managed method to acquire a protocol and get specific
+ * operations and a dedicated protocol handler
+ * @devm_protocol_put: devres managed method to release a protocol
+ * @is_transport_atomic: method to check if the underlying transport for this
+ * instance handle is configured to support atomic
+ * transactions for commands.
+ * Some users of the SCMI stack in the upper layers could
+ * be interested to know if they can assume SCMI
+ * command transactions associated to this handle will
+ * never sleep and act accordingly.
+ * An optional atomic threshold value could be returned
+ * where configured.
+ * @notify_ops: pointer to set of notifications related operations
+ */
+struct scmi_handle {
+ struct device *dev;
+ struct scmi_revision_info *version;
+
+ int __must_check (*devm_protocol_acquire)(struct scmi_device *sdev,
+ u8 proto);
+ const void __must_check *
+ (*devm_protocol_get)(struct scmi_device *sdev, u8 proto,
+ struct scmi_protocol_handle **ph);
+ void (*devm_protocol_put)(struct scmi_device *sdev, u8 proto);
+ bool (*is_transport_atomic)(const struct scmi_handle *handle,
+ unsigned int *atomic_threshold);
+
+ const struct scmi_notify_ops *notify_ops;
+};
+
+enum scmi_std_protocol {
+ SCMI_PROTOCOL_BASE = 0x10,
+ SCMI_PROTOCOL_POWER = 0x11,
+ SCMI_PROTOCOL_SYSTEM = 0x12,
+ SCMI_PROTOCOL_PERF = 0x13,
+ SCMI_PROTOCOL_CLOCK = 0x14,
+ SCMI_PROTOCOL_SENSOR = 0x15,
+ SCMI_PROTOCOL_RESET = 0x16,
+ SCMI_PROTOCOL_VOLTAGE = 0x17,
+ SCMI_PROTOCOL_POWERCAP = 0x18,
+};
+
+enum scmi_system_events {
+ SCMI_SYSTEM_SHUTDOWN,
+ SCMI_SYSTEM_COLDRESET,
+ SCMI_SYSTEM_WARMRESET,
+ SCMI_SYSTEM_POWERUP,
+ SCMI_SYSTEM_SUSPEND,
+ SCMI_SYSTEM_MAX
+};
+
+struct scmi_device {
+ u32 id;
+ u8 protocol_id;
+ const char *name;
+ struct device dev;
+ struct scmi_handle *handle;
+};
+
+#define to_scmi_dev(d) container_of(d, struct scmi_device, dev)
+
+struct scmi_device_id {
+ u8 protocol_id;
+ const char *name;
+};
+
+struct scmi_driver {
+ const char *name;
+ int (*probe)(struct scmi_device *sdev);
+ void (*remove)(struct scmi_device *sdev);
+ const struct scmi_device_id *id_table;
+
+ struct device_driver driver;
+};
+
+#define to_scmi_driver(d) container_of(d, struct scmi_driver, driver)
+
+#if IS_REACHABLE(CONFIG_ARM_SCMI_PROTOCOL)
+int scmi_driver_register(struct scmi_driver *driver,
+ struct module *owner, const char *mod_name);
+void scmi_driver_unregister(struct scmi_driver *driver);
+#else
+static inline int
+scmi_driver_register(struct scmi_driver *driver, struct module *owner,
+ const char *mod_name)
+{
+ return -EINVAL;
+}
+
+static inline void scmi_driver_unregister(struct scmi_driver *driver) {}
+#endif /* CONFIG_ARM_SCMI_PROTOCOL */
+
+#define scmi_register(driver) \
+ scmi_driver_register(driver, THIS_MODULE, KBUILD_MODNAME)
+#define scmi_unregister(driver) \
+ scmi_driver_unregister(driver)
+
+/**
+ * module_scmi_driver() - Helper macro for registering a scmi driver
+ * @__scmi_driver: scmi_driver structure
+ *
+ * Helper macro for scmi drivers to set up proper module init / exit
+ * functions. Replaces module_init() and module_exit() and keeps people from
+ * printing pointless things to the kernel log when their driver is loaded.
+ */
+#define module_scmi_driver(__scmi_driver) \
+ module_driver(__scmi_driver, scmi_register, scmi_unregister)
+
+/**
+ * module_scmi_protocol() - Helper macro for registering a scmi protocol
+ * @__scmi_protocol: scmi_protocol structure
+ *
+ * Helper macro for scmi drivers to set up proper module init / exit
+ * functions. Replaces module_init() and module_exit() and keeps people from
+ * printing pointless things to the kernel log when their driver is loaded.
+ */
+#define module_scmi_protocol(__scmi_protocol) \
+ module_driver(__scmi_protocol, \
+ scmi_protocol_register, scmi_protocol_unregister)
+
+struct scmi_protocol;
+int scmi_protocol_register(const struct scmi_protocol *proto);
+void scmi_protocol_unregister(const struct scmi_protocol *proto);
+
+/* SCMI Notification API - Custom Event Reports */
+enum scmi_notification_events {
+ SCMI_EVENT_POWER_STATE_CHANGED = 0x0,
+ SCMI_EVENT_CLOCK_RATE_CHANGED = 0x0,
+ SCMI_EVENT_CLOCK_RATE_CHANGE_REQUESTED = 0x1,
+ SCMI_EVENT_PERFORMANCE_LIMITS_CHANGED = 0x0,
+ SCMI_EVENT_PERFORMANCE_LEVEL_CHANGED = 0x1,
+ SCMI_EVENT_SENSOR_TRIP_POINT_EVENT = 0x0,
+ SCMI_EVENT_SENSOR_UPDATE = 0x1,
+ SCMI_EVENT_RESET_ISSUED = 0x0,
+ SCMI_EVENT_BASE_ERROR_EVENT = 0x0,
+ SCMI_EVENT_SYSTEM_POWER_STATE_NOTIFIER = 0x0,
+ SCMI_EVENT_POWERCAP_CAP_CHANGED = 0x0,
+ SCMI_EVENT_POWERCAP_MEASUREMENTS_CHANGED = 0x1,
+};
+
+struct scmi_power_state_changed_report {
+ ktime_t timestamp;
+ unsigned int agent_id;
+ unsigned int domain_id;
+ unsigned int power_state;
+};
+
+struct scmi_clock_rate_notif_report {
+ ktime_t timestamp;
+ unsigned int agent_id;
+ unsigned int clock_id;
+ unsigned long long rate;
+};
+
+struct scmi_system_power_state_notifier_report {
+ ktime_t timestamp;
+ unsigned int agent_id;
+#define SCMI_SYSPOWER_IS_REQUEST_GRACEFUL(flags) ((flags) & BIT(0))
+ unsigned int flags;
+ unsigned int system_state;
+ unsigned int timeout;
+};
+
+struct scmi_perf_limits_report {
+ ktime_t timestamp;
+ unsigned int agent_id;
+ unsigned int domain_id;
+ unsigned int range_max;
+ unsigned int range_min;
+};
+
+struct scmi_perf_level_report {
+ ktime_t timestamp;
+ unsigned int agent_id;
+ unsigned int domain_id;
+ unsigned int performance_level;
+};
+
+struct scmi_sensor_trip_point_report {
+ ktime_t timestamp;
+ unsigned int agent_id;
+ unsigned int sensor_id;
+ unsigned int trip_point_desc;
+};
+
+struct scmi_sensor_update_report {
+ ktime_t timestamp;
+ unsigned int agent_id;
+ unsigned int sensor_id;
+ unsigned int readings_count;
+ struct scmi_sensor_reading readings[];
+};
+
+struct scmi_reset_issued_report {
+ ktime_t timestamp;
+ unsigned int agent_id;
+ unsigned int domain_id;
+ unsigned int reset_state;
+};
+
+struct scmi_base_error_report {
+ ktime_t timestamp;
+ unsigned int agent_id;
+ bool fatal;
+ unsigned int cmd_count;
+ unsigned long long reports[];
+};
+
+struct scmi_powercap_cap_changed_report {
+ ktime_t timestamp;
+ unsigned int agent_id;
+ unsigned int domain_id;
+ unsigned int power_cap;
+ unsigned int pai;
+};
+
+struct scmi_powercap_meas_changed_report {
+ ktime_t timestamp;
+ unsigned int agent_id;
+ unsigned int domain_id;
+ unsigned int power;
+};
+#endif /* _LINUX_SCMI_PROTOCOL_H */
diff --git a/include/linux/scpi_protocol.h b/include/linux/scpi_protocol.h
index 327d65663dbf..d2176a56828a 100644
--- a/include/linux/scpi_protocol.h
+++ b/include/linux/scpi_protocol.h
@@ -1,20 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* SCPI Message Protocol driver header
*
* Copyright (C) 2014 ARM Ltd.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program. If not, see <http://www.gnu.org/licenses/>.
*/
+
+#ifndef _LINUX_SCPI_PROTOCOL_H
+#define _LINUX_SCPI_PROTOCOL_H
+
#include <linux/types.h>
struct scpi_opp {
@@ -58,6 +51,14 @@ struct scpi_sensor_info {
* OPP is an index to the list return by @dvfs_get_info
* @dvfs_get_info: returns the DVFS capabilities of the given power
* domain. It includes the OPP list and the latency information
+ * @device_domain_id: gets the scpi domain id for a given device
+ * @get_transition_latency: gets the DVFS transition latency for a given device
+ * @add_opps_to_device: adds all the OPPs for a given device
+ * @sensor_get_capability: get the list of capabilities for the sensors
+ * @sensor_get_info: get the information of the specified sensor
+ * @sensor_get_value: gets the current value of the sensor
+ * @device_get_power_state: gets the power state of a power domain
+ * @device_set_power_state: sets the power state of a power domain
*/
struct scpi_ops {
u32 (*get_version)(void);
@@ -82,3 +83,5 @@ struct scpi_ops *get_scpi_ops(void);
#else
static inline struct scpi_ops *get_scpi_ops(void) { return NULL; }
#endif
+
+#endif /* _LINUX_SCPI_PROTOCOL_H */
diff --git a/include/linux/screen_info.h b/include/linux/screen_info.h
index f0f8bad54be9..eab7081392d5 100644
--- a/include/linux/screen_info.h
+++ b/include/linux/screen_info.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _SCREEN_INFO_H
#define _SCREEN_INFO_H
diff --git a/include/linux/scs.h b/include/linux/scs.h
new file mode 100644
index 000000000000..4ab5bdc898cf
--- /dev/null
+++ b/include/linux/scs.h
@@ -0,0 +1,86 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Shadow Call Stack support.
+ *
+ * Copyright (C) 2019 Google LLC
+ */
+
+#ifndef _LINUX_SCS_H
+#define _LINUX_SCS_H
+
+#include <linux/gfp.h>
+#include <linux/poison.h>
+#include <linux/sched.h>
+#include <linux/sizes.h>
+
+#ifdef CONFIG_SHADOW_CALL_STACK
+
+#define SCS_ORDER 0
+#define SCS_SIZE (PAGE_SIZE << SCS_ORDER)
+#define GFP_SCS (GFP_KERNEL | __GFP_ZERO)
+
+/* An illegal pointer value to mark the end of the shadow stack. */
+#define SCS_END_MAGIC (0x5f6UL + POISON_POINTER_DELTA)
+
+#define task_scs(tsk) (task_thread_info(tsk)->scs_base)
+#define task_scs_sp(tsk) (task_thread_info(tsk)->scs_sp)
+
+void *scs_alloc(int node);
+void scs_free(void *s);
+void scs_init(void);
+int scs_prepare(struct task_struct *tsk, int node);
+void scs_release(struct task_struct *tsk);
+
+static inline void scs_task_reset(struct task_struct *tsk)
+{
+ /*
+ * Reset the shadow stack to the base address in case the task
+ * is reused.
+ */
+ task_scs_sp(tsk) = task_scs(tsk);
+}
+
+static inline unsigned long *__scs_magic(void *s)
+{
+ return (unsigned long *)(s + SCS_SIZE) - 1;
+}
+
+static inline bool task_scs_end_corrupted(struct task_struct *tsk)
+{
+ unsigned long *magic = __scs_magic(task_scs(tsk));
+ unsigned long sz = task_scs_sp(tsk) - task_scs(tsk);
+
+ return sz >= SCS_SIZE - 1 || READ_ONCE_NOCHECK(*magic) != SCS_END_MAGIC;
+}
+
+DECLARE_STATIC_KEY_FALSE(dynamic_scs_enabled);
+
+static inline bool scs_is_dynamic(void)
+{
+ if (!IS_ENABLED(CONFIG_DYNAMIC_SCS))
+ return false;
+ return static_branch_likely(&dynamic_scs_enabled);
+}
+
+static inline bool scs_is_enabled(void)
+{
+ if (!IS_ENABLED(CONFIG_DYNAMIC_SCS))
+ return true;
+ return scs_is_dynamic();
+}
+
+#else /* CONFIG_SHADOW_CALL_STACK */
+
+static inline void *scs_alloc(int node) { return NULL; }
+static inline void scs_free(void *s) {}
+static inline void scs_init(void) {}
+static inline void scs_task_reset(struct task_struct *tsk) {}
+static inline int scs_prepare(struct task_struct *tsk, int node) { return 0; }
+static inline void scs_release(struct task_struct *tsk) {}
+static inline bool task_scs_end_corrupted(struct task_struct *tsk) { return false; }
+static inline bool scs_is_enabled(void) { return false; }
+static inline bool scs_is_dynamic(void) { return false; }
+
+#endif /* CONFIG_SHADOW_CALL_STACK */
+
+#endif /* _LINUX_SCS_H */
diff --git a/include/linux/sctp.h b/include/linux/sctp.h
index 99e866487e2f..836a7e200f39 100644
--- a/include/linux/sctp.h
+++ b/include/linux/sctp.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/* SCTP kernel reference Implementation
* (C) Copyright IBM Corp. 2001, 2004
* Copyright (c) 1999-2000 Cisco, Inc.
@@ -10,22 +11,6 @@
*
* Various protocol defined structures.
*
- * This SCTP implementation is free software;
- * you can redistribute it and/or modify it under the terms of
- * the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This SCTP implementation is distributed in the hope that it
- * will be useful, but WITHOUT ANY WARRANTY; without even the implied
- * ************************
- * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
- * See the GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with GNU CC; see the file COPYING. If not, see
- * <http://www.gnu.org/licenses/>.
- *
* Please send any bug reports or fixes you make to the
* email address(es):
* lksctp developers <linux-sctp@vger.kernel.org>
@@ -102,13 +87,18 @@ enum sctp_cid {
/* AUTH Extension Section 4.1 */
SCTP_CID_AUTH = 0x0F,
+ /* sctp ndata 5.1. I-DATA */
+ SCTP_CID_I_DATA = 0x40,
+
/* PR-SCTP Sec 3.2 */
SCTP_CID_FWD_TSN = 0xC0,
/* Use hex, as defined in ADDIP sec. 3.1 */
SCTP_CID_ASCONF = 0xC1,
+ SCTP_CID_I_FWD_TSN = 0xC2,
SCTP_CID_ASCONF_ACK = 0x80,
SCTP_CID_RECONF = 0x82,
+ SCTP_CID_PAD = 0x84,
}; /* enum */
@@ -231,8 +221,8 @@ struct sctp_datahdr {
__be32 tsn;
__be16 stream;
__be16 ssn;
- __be32 ppid;
- __u8 payload[0];
+ __u32 ppid;
+ /* __u8 payload[]; */
};
struct sctp_data_chunk {
@@ -240,6 +230,23 @@ struct sctp_data_chunk {
struct sctp_datahdr data_hdr;
};
+struct sctp_idatahdr {
+ __be32 tsn;
+ __be16 stream;
+ __be16 reserved;
+ __be32 mid;
+ union {
+ __u32 ppid;
+ __be32 fsn;
+ };
+ __u8 payload[0];
+};
+
+struct sctp_idata_chunk {
+ struct sctp_chunkhdr chunk_hdr;
+ struct sctp_idatahdr data_hdr;
+};
+
/* DATA Chuck Specific Flags */
enum {
SCTP_DATA_MIDDLE_FRAG = 0x00,
@@ -263,7 +270,7 @@ struct sctp_inithdr {
__be16 num_outbound_streams;
__be16 num_inbound_streams;
__be32 initial_tsn;
- __u8 params[0];
+ /* __u8 params[]; */
};
struct sctp_init_chunk {
@@ -273,87 +280,85 @@ struct sctp_init_chunk {
/* Section 3.3.2.1. IPv4 Address Parameter (5) */
-typedef struct sctp_ipv4addr_param {
+struct sctp_ipv4addr_param {
struct sctp_paramhdr param_hdr;
- struct in_addr addr;
-} sctp_ipv4addr_param_t;
+ struct in_addr addr;
+};
/* Section 3.3.2.1. IPv6 Address Parameter (6) */
-typedef struct sctp_ipv6addr_param {
+struct sctp_ipv6addr_param {
struct sctp_paramhdr param_hdr;
struct in6_addr addr;
-} sctp_ipv6addr_param_t;
+};
/* Section 3.3.2.1 Cookie Preservative (9) */
-typedef struct sctp_cookie_preserve_param {
+struct sctp_cookie_preserve_param {
struct sctp_paramhdr param_hdr;
- __be32 lifespan_increment;
-} sctp_cookie_preserve_param_t;
+ __be32 lifespan_increment;
+};
/* Section 3.3.2.1 Host Name Address (11) */
-typedef struct sctp_hostname_param {
+struct sctp_hostname_param {
struct sctp_paramhdr param_hdr;
- uint8_t hostname[0];
-} sctp_hostname_param_t;
+ uint8_t hostname[];
+};
/* Section 3.3.2.1 Supported Address Types (12) */
-typedef struct sctp_supported_addrs_param {
+struct sctp_supported_addrs_param {
struct sctp_paramhdr param_hdr;
- __be16 types[0];
-} sctp_supported_addrs_param_t;
-
-/* Appendix A. ECN Capable (32768) */
-typedef struct sctp_ecn_capable_param {
- struct sctp_paramhdr param_hdr;
-} sctp_ecn_capable_param_t;
+ __be16 types[];
+};
/* ADDIP Section 3.2.6 Adaptation Layer Indication */
-typedef struct sctp_adaptation_ind_param {
+struct sctp_adaptation_ind_param {
struct sctp_paramhdr param_hdr;
__be32 adaptation_ind;
-} sctp_adaptation_ind_param_t;
+};
/* ADDIP Section 4.2.7 Supported Extensions Parameter */
-typedef struct sctp_supported_ext_param {
+struct sctp_supported_ext_param {
struct sctp_paramhdr param_hdr;
- __u8 chunks[0];
-} sctp_supported_ext_param_t;
+ __u8 chunks[];
+};
/* AUTH Section 3.1 Random */
-typedef struct sctp_random_param {
+struct sctp_random_param {
struct sctp_paramhdr param_hdr;
- __u8 random_val[0];
-} sctp_random_param_t;
+ __u8 random_val[];
+};
/* AUTH Section 3.2 Chunk List */
-typedef struct sctp_chunks_param {
+struct sctp_chunks_param {
struct sctp_paramhdr param_hdr;
- __u8 chunks[0];
-} sctp_chunks_param_t;
+ __u8 chunks[];
+};
/* AUTH Section 3.3 HMAC Algorithm */
-typedef struct sctp_hmac_algo_param {
+struct sctp_hmac_algo_param {
struct sctp_paramhdr param_hdr;
- __be16 hmac_ids[0];
-} sctp_hmac_algo_param_t;
+ __be16 hmac_ids[];
+};
/* RFC 2960. Section 3.3.3 Initiation Acknowledgement (INIT ACK) (2):
* The INIT ACK chunk is used to acknowledge the initiation of an SCTP
* association.
*/
-typedef struct sctp_init_chunk sctp_initack_chunk_t;
+struct sctp_initack_chunk {
+ struct sctp_chunkhdr chunk_hdr;
+ struct sctp_inithdr init_hdr;
+};
/* Section 3.3.3.1 State Cookie (7) */
-typedef struct sctp_cookie_param {
+struct sctp_cookie_param {
struct sctp_paramhdr p;
- __u8 body[0];
-} sctp_cookie_param_t;
+ __u8 body[];
+};
/* Section 3.3.3.1 Unrecognized Parameters (8) */
-typedef struct sctp_unrecognized_param {
+struct sctp_unrecognized_param {
struct sctp_paramhdr param_hdr;
struct sctp_paramhdr unrecognized;
-} sctp_unrecognized_param_t;
+};
@@ -365,30 +370,28 @@ typedef struct sctp_unrecognized_param {
* subsequences of DATA chunks as represented by their TSNs.
*/
-typedef struct sctp_gap_ack_block {
+struct sctp_gap_ack_block {
__be16 start;
__be16 end;
-} sctp_gap_ack_block_t;
-
-typedef __be32 sctp_dup_tsn_t;
+};
-typedef union {
- sctp_gap_ack_block_t gab;
- sctp_dup_tsn_t dup;
-} sctp_sack_variable_t;
+union sctp_sack_variable {
+ struct sctp_gap_ack_block gab;
+ __be32 dup;
+};
-typedef struct sctp_sackhdr {
+struct sctp_sackhdr {
__be32 cum_tsn_ack;
__be32 a_rwnd;
__be16 num_gap_ack_blocks;
__be16 num_dup_tsns;
- sctp_sack_variable_t variable[0];
-} sctp_sackhdr_t;
+ /* union sctp_sack_variable variable[]; */
+};
-typedef struct sctp_sack_chunk {
+struct sctp_sack_chunk {
struct sctp_chunkhdr chunk_hdr;
- sctp_sackhdr_t sack_hdr;
-} sctp_sack_chunk_t;
+ struct sctp_sackhdr sack_hdr;
+};
/* RFC 2960. Section 3.3.5 Heartbeat Request (HEARTBEAT) (4):
@@ -398,49 +401,55 @@ typedef struct sctp_sack_chunk {
* the present association.
*/
-typedef struct sctp_heartbeathdr {
+struct sctp_heartbeathdr {
struct sctp_paramhdr info;
-} sctp_heartbeathdr_t;
+};
-typedef struct sctp_heartbeat_chunk {
+struct sctp_heartbeat_chunk {
struct sctp_chunkhdr chunk_hdr;
- sctp_heartbeathdr_t hb_hdr;
-} sctp_heartbeat_chunk_t;
+ struct sctp_heartbeathdr hb_hdr;
+};
+
+
+/* PAD chunk could be bundled with heartbeat chunk to probe pmtu */
+struct sctp_pad_chunk {
+ struct sctp_chunkhdr uh;
+};
/* For the abort and shutdown ACK we must carry the init tag in the
* common header. Just the common header is all that is needed with a
* chunk descriptor.
*/
-typedef struct sctp_abort_chunk {
+struct sctp_abort_chunk {
struct sctp_chunkhdr uh;
-} sctp_abort_chunk_t;
+};
/* For the graceful shutdown we must carry the tag (in common header)
* and the highest consecutive acking value.
*/
-typedef struct sctp_shutdownhdr {
+struct sctp_shutdownhdr {
__be32 cum_tsn_ack;
-} sctp_shutdownhdr_t;
+};
-struct sctp_shutdown_chunk_t {
+struct sctp_shutdown_chunk {
struct sctp_chunkhdr chunk_hdr;
- sctp_shutdownhdr_t shutdown_hdr;
+ struct sctp_shutdownhdr shutdown_hdr;
};
/* RFC 2960. Section 3.3.10 Operation Error (ERROR) (9) */
-typedef struct sctp_errhdr {
+struct sctp_errhdr {
__be16 cause;
__be16 length;
- __u8 variable[0];
-} sctp_errhdr_t;
+ /* __u8 variable[]; */
+};
-typedef struct sctp_operr_chunk {
+struct sctp_operr_chunk {
struct sctp_chunkhdr chunk_hdr;
- sctp_errhdr_t err_hdr;
-} sctp_operr_chunk_t;
+ struct sctp_errhdr err_hdr;
+};
/* RFC 2960 3.3.10 - Operation Error
*
@@ -461,7 +470,7 @@ typedef struct sctp_operr_chunk {
* 9 No User Data
* 10 Cookie Received While Shutting Down
*/
-typedef enum {
+enum sctp_error {
SCTP_ERROR_NO_ERROR = cpu_to_be16(0x00),
SCTP_ERROR_INV_STRM = cpu_to_be16(0x01),
@@ -480,11 +489,13 @@ typedef enum {
* 11 Restart of an association with new addresses
* 12 User Initiated Abort
* 13 Protocol Violation
+ * 14 Restart of an Association with New Encapsulation Port
*/
SCTP_ERROR_RESTART = cpu_to_be16(0x0b),
SCTP_ERROR_USER_ABORT = cpu_to_be16(0x0c),
SCTP_ERROR_PROTO_VIOLATION = cpu_to_be16(0x0d),
+ SCTP_ERROR_NEW_ENCAP_PORT = cpu_to_be16(0x0e),
/* ADDIP Section 3.3 New Error Causes
*
@@ -516,33 +527,28 @@ typedef enum {
* 0x0105 Unsupported HMAC Identifier
*/
SCTP_ERROR_UNSUP_HMAC = cpu_to_be16(0x0105)
-} sctp_error_t;
+};
/* RFC 2960. Appendix A. Explicit Congestion Notification.
* Explicit Congestion Notification Echo (ECNE) (12)
*/
-typedef struct sctp_ecnehdr {
+struct sctp_ecnehdr {
__be32 lowest_tsn;
-} sctp_ecnehdr_t;
+};
-typedef struct sctp_ecne_chunk {
+struct sctp_ecne_chunk {
struct sctp_chunkhdr chunk_hdr;
- sctp_ecnehdr_t ence_hdr;
-} sctp_ecne_chunk_t;
+ struct sctp_ecnehdr ence_hdr;
+};
/* RFC 2960. Appendix A. Explicit Congestion Notification.
* Congestion Window Reduced (CWR) (13)
*/
-typedef struct sctp_cwrhdr {
+struct sctp_cwrhdr {
__be32 lowest_tsn;
-} sctp_cwrhdr_t;
-
-typedef struct sctp_cwr_chunk {
- struct sctp_chunkhdr chunk_hdr;
- sctp_cwrhdr_t cwr_hdr;
-} sctp_cwr_chunk_t;
+};
/* PR-SCTP
* 3.2 Forward Cumulative TSN Chunk Definition (FORWARD TSN)
@@ -597,7 +603,7 @@ struct sctp_fwdtsn_skip {
struct sctp_fwdtsn_hdr {
__be32 new_cum_tsn;
- struct sctp_fwdtsn_skip skip[0];
+ /* struct sctp_fwdtsn_skip skip[]; */
};
struct sctp_fwdtsn_chunk {
@@ -605,6 +611,22 @@ struct sctp_fwdtsn_chunk {
struct sctp_fwdtsn_hdr fwdtsn_hdr;
};
+struct sctp_ifwdtsn_skip {
+ __be16 stream;
+ __u8 reserved;
+ __u8 flags;
+ __be32 mid;
+};
+
+struct sctp_ifwdtsn_hdr {
+ __be32 new_cum_tsn;
+ /* struct sctp_ifwdtsn_skip skip[]; */
+};
+
+struct sctp_ifwdtsn_chunk {
+ struct sctp_chunkhdr chunk_hdr;
+ struct sctp_ifwdtsn_hdr fwdtsn_hdr;
+};
/* ADDIP
* Section 3.1.1 Address Configuration Change Chunk (ASCONF)
@@ -638,20 +660,20 @@ struct sctp_fwdtsn_chunk {
* The ASCONF Parameter Response is used in the ASCONF-ACK to
* report status of ASCONF processing.
*/
-typedef struct sctp_addip_param {
- struct sctp_paramhdr param_hdr;
- __be32 crr_id;
-} sctp_addip_param_t;
+struct sctp_addip_param {
+ struct sctp_paramhdr param_hdr;
+ __be32 crr_id;
+};
-typedef struct sctp_addiphdr {
+struct sctp_addiphdr {
__be32 serial;
- __u8 params[0];
-} sctp_addiphdr_t;
+ /* __u8 params[]; */
+};
-typedef struct sctp_addip_chunk {
+struct sctp_addip_chunk {
struct sctp_chunkhdr chunk_hdr;
- sctp_addiphdr_t addip_hdr;
-} sctp_addip_chunk_t;
+ struct sctp_addiphdr addip_hdr;
+};
/* AUTH
* Section 4.1 Authentication Chunk (AUTH)
@@ -702,16 +724,16 @@ typedef struct sctp_addip_chunk {
* HMAC: n bytes (unsigned integer) This hold the result of the HMAC
* calculation.
*/
-typedef struct sctp_authhdr {
+struct sctp_authhdr {
__be16 shkey_id;
__be16 hmac_id;
- __u8 hmac[0];
-} sctp_authhdr_t;
+ /* __u8 hmac[]; */
+};
-typedef struct sctp_auth_chunk {
+struct sctp_auth_chunk {
struct sctp_chunkhdr chunk_hdr;
- sctp_authhdr_t auth_hdr;
-} sctp_auth_chunk_t;
+ struct sctp_authhdr auth_hdr;
+};
struct sctp_infox {
struct sctp_info *sctpinfo;
@@ -720,33 +742,33 @@ struct sctp_infox {
struct sctp_reconf_chunk {
struct sctp_chunkhdr chunk_hdr;
- __u8 params[0];
+ /* __u8 params[]; */
};
struct sctp_strreset_outreq {
struct sctp_paramhdr param_hdr;
- __u32 request_seq;
- __u32 response_seq;
- __u32 send_reset_at_tsn;
- __u16 list_of_streams[0];
+ __be32 request_seq;
+ __be32 response_seq;
+ __be32 send_reset_at_tsn;
+ __be16 list_of_streams[];
};
struct sctp_strreset_inreq {
struct sctp_paramhdr param_hdr;
- __u32 request_seq;
- __u16 list_of_streams[0];
+ __be32 request_seq;
+ __be16 list_of_streams[];
};
struct sctp_strreset_tsnreq {
struct sctp_paramhdr param_hdr;
- __u32 request_seq;
+ __be32 request_seq;
};
struct sctp_strreset_addstrm {
struct sctp_paramhdr param_hdr;
- __u32 request_seq;
- __u16 number_of_streams;
- __u16 reserved;
+ __be32 request_seq;
+ __be16 number_of_streams;
+ __be16 reserved;
};
enum {
@@ -761,16 +783,46 @@ enum {
struct sctp_strreset_resp {
struct sctp_paramhdr param_hdr;
- __u32 response_seq;
- __u32 result;
+ __be32 response_seq;
+ __be32 result;
};
struct sctp_strreset_resptsn {
struct sctp_paramhdr param_hdr;
- __u32 response_seq;
- __u32 result;
- __u32 senders_next_tsn;
- __u32 receivers_next_tsn;
+ __be32 response_seq;
+ __be32 result;
+ __be32 senders_next_tsn;
+ __be32 receivers_next_tsn;
};
+enum {
+ SCTP_DSCP_SET_MASK = 0x1,
+ SCTP_DSCP_VAL_MASK = 0xfc,
+ SCTP_FLOWLABEL_SET_MASK = 0x100000,
+ SCTP_FLOWLABEL_VAL_MASK = 0xfffff
+};
+
+/* UDP Encapsulation
+ * draft-tuexen-tsvwg-sctp-udp-encaps-cons-03.html#section-4-4
+ *
+ * The error cause indicating an "Restart of an Association with
+ * New Encapsulation Port"
+ *
+ * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Cause Code = 14 | Cause Length = 8 |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Current Encapsulation Port | New Encapsulation Port |
+ * +-------------------------------+-------------------------------+
+ */
+struct sctp_new_encap_port_hdr {
+ __be16 cur_port;
+ __be16 new_port;
+};
+
+/* Round an int up to the next multiple of 4. */
+#define SCTP_PAD4(s) (((s)+3)&~3)
+/* Truncate to the previous multiple of 4. */
+#define SCTP_TRUNC4(s) ((s)&~3)
+
#endif /* __LINUX_SCTP_H__ */
diff --git a/include/linux/scx200.h b/include/linux/scx200.h
index de466e11e271..652ec1a45f7c 100644
--- a/include/linux/scx200.h
+++ b/include/linux/scx200.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/* linux/include/linux/scx200.h
Copyright (c) 2001,2002 Christer Weinigel <wingel@nano-system.com>
diff --git a/include/linux/scx200_gpio.h b/include/linux/scx200_gpio.h
index ece4e553e9ac..6386ddbb6b70 100644
--- a/include/linux/scx200_gpio.h
+++ b/include/linux/scx200_gpio.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
u32 scx200_gpio_configure(unsigned index, u32 set, u32 clear);
extern unsigned scx200_gpio_base;
diff --git a/include/linux/sdb.h b/include/linux/sdb.h
deleted file mode 100644
index fbb76a46c8a5..000000000000
--- a/include/linux/sdb.h
+++ /dev/null
@@ -1,159 +0,0 @@
-/*
- * This is the official version 1.1 of sdb.h
- */
-#ifndef __SDB_H__
-#define __SDB_H__
-#ifdef __KERNEL__
-#include <linux/types.h>
-#else
-#include <stdint.h>
-#endif
-
-/*
- * All structures are 64 bytes long and are expected
- * to live in an array, one for each interconnect.
- * Most fields of the structures are shared among the
- * various types, and most-specific fields are at the
- * beginning (for alignment reasons, and to keep the
- * magic number at the head of the interconnect record
- */
-
-/* Product, 40 bytes at offset 24, 8-byte aligned
- *
- * device_id is vendor-assigned; version is device-specific,
- * date is hex (e.g 0x20120501), name is UTF-8, blank-filled
- * and not terminated with a 0 byte.
- */
-struct sdb_product {
- uint64_t vendor_id; /* 0x18..0x1f */
- uint32_t device_id; /* 0x20..0x23 */
- uint32_t version; /* 0x24..0x27 */
- uint32_t date; /* 0x28..0x2b */
- uint8_t name[19]; /* 0x2c..0x3e */
- uint8_t record_type; /* 0x3f */
-};
-
-/*
- * Component, 56 bytes at offset 8, 8-byte aligned
- *
- * The address range is first to last, inclusive
- * (for example 0x100000 - 0x10ffff)
- */
-struct sdb_component {
- uint64_t addr_first; /* 0x08..0x0f */
- uint64_t addr_last; /* 0x10..0x17 */
- struct sdb_product product; /* 0x18..0x3f */
-};
-
-/* Type of the SDB record */
-enum sdb_record_type {
- sdb_type_interconnect = 0x00,
- sdb_type_device = 0x01,
- sdb_type_bridge = 0x02,
- sdb_type_integration = 0x80,
- sdb_type_repo_url = 0x81,
- sdb_type_synthesis = 0x82,
- sdb_type_empty = 0xFF,
-};
-
-/* Type 0: interconnect (first of the array)
- *
- * sdb_records is the length of the table including this first
- * record, version is 1. The bus type is enumerated later.
- */
-#define SDB_MAGIC 0x5344422d /* "SDB-" */
-struct sdb_interconnect {
- uint32_t sdb_magic; /* 0x00-0x03 */
- uint16_t sdb_records; /* 0x04-0x05 */
- uint8_t sdb_version; /* 0x06 */
- uint8_t sdb_bus_type; /* 0x07 */
- struct sdb_component sdb_component; /* 0x08-0x3f */
-};
-
-/* Type 1: device
- *
- * class is 0 for "custom device", other values are
- * to be standardized; ABI version is for the driver,
- * bus-specific bits are defined by each bus (see below)
- */
-struct sdb_device {
- uint16_t abi_class; /* 0x00-0x01 */
- uint8_t abi_ver_major; /* 0x02 */
- uint8_t abi_ver_minor; /* 0x03 */
- uint32_t bus_specific; /* 0x04-0x07 */
- struct sdb_component sdb_component; /* 0x08-0x3f */
-};
-
-/* Type 2: bridge
- *
- * child is the address of the nested SDB table
- */
-struct sdb_bridge {
- uint64_t sdb_child; /* 0x00-0x07 */
- struct sdb_component sdb_component; /* 0x08-0x3f */
-};
-
-/* Type 0x80: integration
- *
- * all types with bit 7 set are meta-information, so
- * software can ignore the types it doesn't know. Here we
- * just provide product information for an aggregate device
- */
-struct sdb_integration {
- uint8_t reserved[24]; /* 0x00-0x17 */
- struct sdb_product product; /* 0x08-0x3f */
-};
-
-/* Type 0x81: Top module repository url
- *
- * again, an informative field that software can ignore
- */
-struct sdb_repo_url {
- uint8_t repo_url[63]; /* 0x00-0x3e */
- uint8_t record_type; /* 0x3f */
-};
-
-/* Type 0x82: Synthesis tool information
- *
- * this informative record
- */
-struct sdb_synthesis {
- uint8_t syn_name[16]; /* 0x00-0x0f */
- uint8_t commit_id[16]; /* 0x10-0x1f */
- uint8_t tool_name[8]; /* 0x20-0x27 */
- uint32_t tool_version; /* 0x28-0x2b */
- uint32_t date; /* 0x2c-0x2f */
- uint8_t user_name[15]; /* 0x30-0x3e */
- uint8_t record_type; /* 0x3f */
-};
-
-/* Type 0xff: empty
- *
- * this allows keeping empty slots during development,
- * so they can be filled later with minimal efforts and
- * no misleading description is ever shipped -- hopefully.
- * It can also be used to pad a table to a desired length.
- */
-struct sdb_empty {
- uint8_t reserved[63]; /* 0x00-0x3e */
- uint8_t record_type; /* 0x3f */
-};
-
-/* The type of bus, for bus-specific flags */
-enum sdb_bus_type {
- sdb_wishbone = 0x00,
- sdb_data = 0x01,
-};
-
-#define SDB_WB_WIDTH_MASK 0x0f
-#define SDB_WB_ACCESS8 0x01
-#define SDB_WB_ACCESS16 0x02
-#define SDB_WB_ACCESS32 0x04
-#define SDB_WB_ACCESS64 0x08
-#define SDB_WB_LITTLE_ENDIAN 0x80
-
-#define SDB_DATA_READ 0x04
-#define SDB_DATA_WRITE 0x02
-#define SDB_DATA_EXEC 0x01
-
-#endif /* __SDB_H__ */
diff --git a/include/linux/sdla.h b/include/linux/sdla.h
deleted file mode 100644
index fe7a967d7de4..000000000000
--- a/include/linux/sdla.h
+++ /dev/null
@@ -1,244 +0,0 @@
-/*
- * INET An implementation of the TCP/IP protocol suite for the LINUX
- * operating system. INET is implemented using the BSD Socket
- * interface as the means of communication with the user level.
- *
- * Global definitions for the Frame relay interface.
- *
- * Version: @(#)if_ifrad.h 0.20 13 Apr 96
- *
- * Author: Mike McLagan <mike.mclagan@linux.org>
- *
- * Changes:
- * 0.15 Mike McLagan Structure packing
- *
- * 0.20 Mike McLagan New flags for S508 buffer handling
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- */
-#ifndef SDLA_H
-#define SDLA_H
-
-#include <uapi/linux/sdla.h>
-
-
-/* important Z80 window addresses */
-#define SDLA_CONTROL_WND 0xE000
-
-#define SDLA_502_CMD_BUF 0xEF60
-#define SDLA_502_RCV_BUF 0xA900
-#define SDLA_502_TXN_AVAIL 0xFFF1
-#define SDLA_502_RCV_AVAIL 0xFFF2
-#define SDLA_502_EVENT_FLAGS 0xFFF3
-#define SDLA_502_MDM_STATUS 0xFFF4
-#define SDLA_502_IRQ_INTERFACE 0xFFFD
-#define SDLA_502_IRQ_PERMISSION 0xFFFE
-#define SDLA_502_DATA_OFS 0x0010
-
-#define SDLA_508_CMD_BUF 0xE000
-#define SDLA_508_TXBUF_INFO 0xF100
-#define SDLA_508_RXBUF_INFO 0xF120
-#define SDLA_508_EVENT_FLAGS 0xF003
-#define SDLA_508_MDM_STATUS 0xF004
-#define SDLA_508_IRQ_INTERFACE 0xF010
-#define SDLA_508_IRQ_PERMISSION 0xF011
-#define SDLA_508_TSE_OFFSET 0xF012
-
-/* Event flags */
-#define SDLA_EVENT_STATUS 0x01
-#define SDLA_EVENT_DLCI_STATUS 0x02
-#define SDLA_EVENT_BAD_DLCI 0x04
-#define SDLA_EVENT_LINK_DOWN 0x40
-
-/* IRQ Trigger flags */
-#define SDLA_INTR_RX 0x01
-#define SDLA_INTR_TX 0x02
-#define SDLA_INTR_MODEM 0x04
-#define SDLA_INTR_COMPLETE 0x08
-#define SDLA_INTR_STATUS 0x10
-#define SDLA_INTR_TIMER 0x20
-
-/* DLCI status bits */
-#define SDLA_DLCI_DELETED 0x01
-#define SDLA_DLCI_ACTIVE 0x02
-#define SDLA_DLCI_WAITING 0x04
-#define SDLA_DLCI_NEW 0x08
-#define SDLA_DLCI_INCLUDED 0x40
-
-/* valid command codes */
-#define SDLA_INFORMATION_WRITE 0x01
-#define SDLA_INFORMATION_READ 0x02
-#define SDLA_ISSUE_IN_CHANNEL_SIGNAL 0x03
-#define SDLA_SET_DLCI_CONFIGURATION 0x10
-#define SDLA_READ_DLCI_CONFIGURATION 0x11
-#define SDLA_DISABLE_COMMUNICATIONS 0x12
-#define SDLA_ENABLE_COMMUNICATIONS 0x13
-#define SDLA_READ_DLC_STATUS 0x14
-#define SDLA_READ_DLC_STATISTICS 0x15
-#define SDLA_FLUSH_DLC_STATISTICS 0x16
-#define SDLA_LIST_ACTIVE_DLCI 0x17
-#define SDLA_FLUSH_INFORMATION_BUFFERS 0x18
-#define SDLA_ADD_DLCI 0x20
-#define SDLA_DELETE_DLCI 0x21
-#define SDLA_ACTIVATE_DLCI 0x22
-#define SDLA_DEACTIVATE_DLCI 0x23
-#define SDLA_READ_MODEM_STATUS 0x30
-#define SDLA_SET_MODEM_STATUS 0x31
-#define SDLA_READ_COMMS_ERR_STATS 0x32
-#define SDLA_FLUSH_COMMS_ERR_STATS 0x33
-#define SDLA_READ_CODE_VERSION 0x40
-#define SDLA_SET_IRQ_TRIGGER 0x50
-#define SDLA_GET_IRQ_TRIGGER 0x51
-
-/* In channel signal types */
-#define SDLA_ICS_LINK_VERIFY 0x02
-#define SDLA_ICS_STATUS_ENQ 0x03
-
-/* modem status flags */
-#define SDLA_MODEM_DTR_HIGH 0x01
-#define SDLA_MODEM_RTS_HIGH 0x02
-#define SDLA_MODEM_DCD_HIGH 0x08
-#define SDLA_MODEM_CTS_HIGH 0x20
-
-/* used for RET_MODEM interpretation */
-#define SDLA_MODEM_DCD_LOW 0x01
-#define SDLA_MODEM_CTS_LOW 0x02
-
-/* return codes */
-#define SDLA_RET_OK 0x00
-#define SDLA_RET_COMMUNICATIONS 0x01
-#define SDLA_RET_CHANNEL_INACTIVE 0x02
-#define SDLA_RET_DLCI_INACTIVE 0x03
-#define SDLA_RET_DLCI_CONFIG 0x04
-#define SDLA_RET_BUF_TOO_BIG 0x05
-#define SDLA_RET_NO_DATA 0x05
-#define SDLA_RET_BUF_OVERSIZE 0x06
-#define SDLA_RET_CIR_OVERFLOW 0x07
-#define SDLA_RET_NO_BUFS 0x08
-#define SDLA_RET_TIMEOUT 0x0A
-#define SDLA_RET_MODEM 0x10
-#define SDLA_RET_CHANNEL_OFF 0x11
-#define SDLA_RET_CHANNEL_ON 0x12
-#define SDLA_RET_DLCI_STATUS 0x13
-#define SDLA_RET_DLCI_UNKNOWN 0x14
-#define SDLA_RET_COMMAND_INVALID 0x1F
-
-/* Configuration flags */
-#define SDLA_DIRECT_RECV 0x0080
-#define SDLA_TX_NO_EXCEPT 0x0020
-#define SDLA_NO_ICF_MSGS 0x1000
-#define SDLA_TX50_RX50 0x0000
-#define SDLA_TX70_RX30 0x2000
-#define SDLA_TX30_RX70 0x4000
-
-/* IRQ selection flags */
-#define SDLA_IRQ_RECEIVE 0x01
-#define SDLA_IRQ_TRANSMIT 0x02
-#define SDLA_IRQ_MODEM_STAT 0x04
-#define SDLA_IRQ_COMMAND 0x08
-#define SDLA_IRQ_CHANNEL 0x10
-#define SDLA_IRQ_TIMER 0x20
-
-/* definitions for PC memory mapping */
-#define SDLA_8K_WINDOW 0x01
-#define SDLA_S502_SEG_A 0x10
-#define SDLA_S502_SEG_C 0x20
-#define SDLA_S502_SEG_D 0x00
-#define SDLA_S502_SEG_E 0x30
-#define SDLA_S507_SEG_A 0x00
-#define SDLA_S507_SEG_B 0x40
-#define SDLA_S507_SEG_C 0x80
-#define SDLA_S507_SEG_E 0xC0
-#define SDLA_S508_SEG_A 0x00
-#define SDLA_S508_SEG_C 0x10
-#define SDLA_S508_SEG_D 0x08
-#define SDLA_S508_SEG_E 0x18
-
-/* SDLA adapter port constants */
-#define SDLA_IO_EXTENTS 0x04
-
-#define SDLA_REG_CONTROL 0x00
-#define SDLA_REG_PC_WINDOW 0x01 /* offset for PC window select latch */
-#define SDLA_REG_Z80_WINDOW 0x02 /* offset for Z80 window select latch */
-#define SDLA_REG_Z80_CONTROL 0x03 /* offset for Z80 control latch */
-
-#define SDLA_S502_STS 0x00 /* status reg for 502, 502E, 507 */
-#define SDLA_S508_GNRL 0x00 /* general purp. reg for 508 */
-#define SDLA_S508_STS 0x01 /* status reg for 508 */
-#define SDLA_S508_IDR 0x02 /* ID reg for 508 */
-
-/* control register flags */
-#define SDLA_S502A_START 0x00 /* start the CPU */
-#define SDLA_S502A_INTREQ 0x02
-#define SDLA_S502A_INTEN 0x04
-#define SDLA_S502A_HALT 0x08 /* halt the CPU */
-#define SDLA_S502A_NMI 0x10 /* issue an NMI to the CPU */
-
-#define SDLA_S502E_CPUEN 0x01
-#define SDLA_S502E_ENABLE 0x02
-#define SDLA_S502E_INTACK 0x04
-
-#define SDLA_S507_ENABLE 0x01
-#define SDLA_S507_IRQ3 0x00
-#define SDLA_S507_IRQ4 0x20
-#define SDLA_S507_IRQ5 0x40
-#define SDLA_S507_IRQ7 0x60
-#define SDLA_S507_IRQ10 0x80
-#define SDLA_S507_IRQ11 0xA0
-#define SDLA_S507_IRQ12 0xC0
-#define SDLA_S507_IRQ15 0xE0
-
-#define SDLA_HALT 0x00
-#define SDLA_CPUEN 0x02
-#define SDLA_MEMEN 0x04
-#define SDLA_S507_EPROMWR 0x08
-#define SDLA_S507_EPROMCLK 0x10
-#define SDLA_S508_INTRQ 0x08
-#define SDLA_S508_INTEN 0x10
-
-struct sdla_cmd {
- char opp_flag;
- char cmd;
- short length;
- char retval;
- short dlci;
- char flags;
- short rxlost_int;
- long rxlost_app;
- char reserve[2];
- char data[SDLA_MAX_DATA]; /* transfer data buffer */
-} __attribute__((packed));
-
-struct intr_info {
- char flags;
- short txlen;
- char irq;
- char flags2;
- short timeout;
-} __attribute__((packed));
-
-/* found in the 508's control window at RXBUF_INFO */
-struct buf_info {
- unsigned short rse_num;
- unsigned long rse_base;
- unsigned long rse_next;
- unsigned long buf_base;
- unsigned short reserved;
- unsigned long buf_top;
-} __attribute__((packed));
-
-/* structure pointed to by rse_base in RXBUF_INFO struct */
-struct buf_entry {
- char opp_flag;
- short length;
- short dlci;
- char flags;
- short timestamp;
- short reserved[2];
- long buf_addr;
-} __attribute__((packed));
-
-#endif
diff --git a/include/linux/seccomp.h b/include/linux/seccomp.h
index ecc296c137cd..175079552f68 100644
--- a/include/linux/seccomp.h
+++ b/include/linux/seccomp.h
@@ -1,13 +1,24 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_SECCOMP_H
#define _LINUX_SECCOMP_H
#include <uapi/linux/seccomp.h>
-#define SECCOMP_FILTER_FLAG_MASK (SECCOMP_FILTER_FLAG_TSYNC)
+#define SECCOMP_FILTER_FLAG_MASK (SECCOMP_FILTER_FLAG_TSYNC | \
+ SECCOMP_FILTER_FLAG_LOG | \
+ SECCOMP_FILTER_FLAG_SPEC_ALLOW | \
+ SECCOMP_FILTER_FLAG_NEW_LISTENER | \
+ SECCOMP_FILTER_FLAG_TSYNC_ESRCH | \
+ SECCOMP_FILTER_FLAG_WAIT_KILLABLE_RECV)
+
+/* sizeof() the first published struct seccomp_notif_addfd */
+#define SECCOMP_NOTIFY_ADDFD_SIZE_VER0 24
+#define SECCOMP_NOTIFY_ADDFD_SIZE_LATEST SECCOMP_NOTIFY_ADDFD_SIZE_VER0
#ifdef CONFIG_SECCOMP
#include <linux/thread_info.h>
+#include <linux/atomic.h>
#include <asm/seccomp.h>
struct seccomp_filter;
@@ -16,6 +27,7 @@ struct seccomp_filter;
*
* @mode: indicates one of the valid values above for controlled
* system calls available to a process.
+ * @filter_count: number of seccomp filters
* @filter: must always point to a valid seccomp-filter or NULL as it is
* accessed without locking during system call entry.
*
@@ -24,15 +36,16 @@ struct seccomp_filter;
*/
struct seccomp {
int mode;
+ atomic_t filter_count;
struct seccomp_filter *filter;
};
#ifdef CONFIG_HAVE_ARCH_SECCOMP_FILTER
extern int __secure_computing(const struct seccomp_data *sd);
-static inline int secure_computing(const struct seccomp_data *sd)
+static inline int secure_computing(void)
{
- if (unlikely(test_thread_flag(TIF_SECCOMP)))
- return __secure_computing(sd);
+ if (unlikely(test_syscall_work(SECCOMP)))
+ return __secure_computing(NULL);
return 0;
}
#else
@@ -40,7 +53,7 @@ extern void secure_computing_strict(int this_syscall);
#endif
extern long prctl_get_seccomp(void);
-extern long prctl_set_seccomp(unsigned long, char __user *);
+extern long prctl_set_seccomp(unsigned long, void __user *);
static inline int seccomp_mode(struct seccomp *s)
{
@@ -53,9 +66,11 @@ static inline int seccomp_mode(struct seccomp *s)
struct seccomp { };
struct seccomp_filter { };
+struct seccomp_data;
#ifdef CONFIG_HAVE_ARCH_SECCOMP_FILTER
-static inline int secure_computing(struct seccomp_data *sd) { return 0; }
+static inline int secure_computing(void) { return 0; }
+static inline int __secure_computing(const struct seccomp_data *sd) { return 0; }
#else
static inline void secure_computing_strict(int this_syscall) { return; }
#endif
@@ -77,10 +92,10 @@ static inline int seccomp_mode(struct seccomp *s)
#endif /* CONFIG_SECCOMP */
#ifdef CONFIG_SECCOMP_FILTER
-extern void put_seccomp_filter(struct task_struct *tsk);
+extern void seccomp_filter_release(struct task_struct *tsk);
extern void get_seccomp_filter(struct task_struct *tsk);
#else /* CONFIG_SECCOMP_FILTER */
-static inline void put_seccomp_filter(struct task_struct *tsk)
+static inline void seccomp_filter_release(struct task_struct *tsk)
{
return;
}
@@ -93,11 +108,26 @@ static inline void get_seccomp_filter(struct task_struct *tsk)
#if defined(CONFIG_SECCOMP_FILTER) && defined(CONFIG_CHECKPOINT_RESTORE)
extern long seccomp_get_filter(struct task_struct *task,
unsigned long filter_off, void __user *data);
+extern long seccomp_get_metadata(struct task_struct *task,
+ unsigned long filter_off, void __user *data);
#else
static inline long seccomp_get_filter(struct task_struct *task,
unsigned long n, void __user *data)
{
return -EINVAL;
}
+static inline long seccomp_get_metadata(struct task_struct *task,
+ unsigned long filter_off,
+ void __user *data)
+{
+ return -EINVAL;
+}
#endif /* CONFIG_SECCOMP_FILTER && CONFIG_CHECKPOINT_RESTORE */
+
+#ifdef CONFIG_SECCOMP_CACHE_DEBUG
+struct seq_file;
+
+int proc_pid_seccomp_cache(struct seq_file *m, struct pid_namespace *ns,
+ struct pid *pid, struct task_struct *task);
+#endif
#endif /* _LINUX_SECCOMP_H */
diff --git a/include/linux/secretmem.h b/include/linux/secretmem.h
new file mode 100644
index 000000000000..988528b5da43
--- /dev/null
+++ b/include/linux/secretmem.h
@@ -0,0 +1,54 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+#ifndef _LINUX_SECRETMEM_H
+#define _LINUX_SECRETMEM_H
+
+#ifdef CONFIG_SECRETMEM
+
+extern const struct address_space_operations secretmem_aops;
+
+static inline bool page_is_secretmem(struct page *page)
+{
+ struct address_space *mapping;
+
+ /*
+ * Using page_mapping() is quite slow because of the actual call
+ * instruction and repeated compound_head(page) inside the
+ * page_mapping() function.
+ * We know that secretmem pages are not compound and LRU so we can
+ * save a couple of cycles here.
+ */
+ if (PageCompound(page) || !PageLRU(page))
+ return false;
+
+ mapping = (struct address_space *)
+ ((unsigned long)page->mapping & ~PAGE_MAPPING_FLAGS);
+
+ if (!mapping || mapping != page->mapping)
+ return false;
+
+ return mapping->a_ops == &secretmem_aops;
+}
+
+bool vma_is_secretmem(struct vm_area_struct *vma);
+bool secretmem_active(void);
+
+#else
+
+static inline bool vma_is_secretmem(struct vm_area_struct *vma)
+{
+ return false;
+}
+
+static inline bool page_is_secretmem(struct page *page)
+{
+ return false;
+}
+
+static inline bool secretmem_active(void)
+{
+ return false;
+}
+
+#endif /* CONFIG_SECRETMEM */
+
+#endif /* _LINUX_SECRETMEM_H */
diff --git a/include/linux/securebits.h b/include/linux/securebits.h
index da1b33b33af7..656528673983 100644
--- a/include/linux/securebits.h
+++ b/include/linux/securebits.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_SECUREBITS_H
#define _LINUX_SECUREBITS_H 1
diff --git a/include/linux/security.h b/include/linux/security.h
index b6ea1dc9cc9d..e2734e9e44d5 100644
--- a/include/linux/security.h
+++ b/include/linux/security.h
@@ -23,6 +23,7 @@
#ifndef __LINUX_SECURITY_H
#define __LINUX_SECURITY_H
+#include <linux/kernel_read_file.h>
#include <linux/key.h>
#include <linux/capability.h>
#include <linux/fs.h>
@@ -30,13 +31,12 @@
#include <linux/err.h>
#include <linux/string.h>
#include <linux/mm.h>
-#include <linux/fs.h>
+#include <linux/sockptr.h>
struct linux_binprm;
struct cred;
struct rlimit;
-struct siginfo;
-struct sem_array;
+struct kernel_siginfo;
struct sembuf;
struct kern_ipc_perm;
struct audit_context;
@@ -50,18 +50,25 @@ struct qstr;
struct iattr;
struct fown_struct;
struct file_operations;
-struct shmid_kernel;
struct msg_msg;
-struct msg_queue;
struct xattr;
+struct kernfs_node;
struct xfrm_sec_ctx;
struct mm_struct;
-
+struct fs_context;
+struct fs_parameter;
+enum fs_value_type;
+struct watch;
+struct watch_notification;
+
+/* Default (no) options for the capable function */
+#define CAP_OPT_NONE 0x0
/* If capable should audit the security request */
-#define SECURITY_CAP_NOAUDIT 0
-#define SECURITY_CAP_AUDIT 1
+#define CAP_OPT_NOAUDIT BIT(1)
+/* If capable is being called by a setid function */
+#define CAP_OPT_INSETID BIT(2)
-/* LSM Agnostic defines for sb_set_mnt_opts */
+/* LSM Agnostic defines for security_sb_set_mnt_opts() flags */
#define SECURITY_LSM_NATIVE_LABELS 1
struct ctl_table;
@@ -73,9 +80,68 @@ enum lsm_event {
LSM_POLICY_CHANGE,
};
+/*
+ * These are reasons that can be passed to the security_locked_down()
+ * LSM hook. Lockdown reasons that protect kernel integrity (ie, the
+ * ability for userland to modify kernel code) are placed before
+ * LOCKDOWN_INTEGRITY_MAX. Lockdown reasons that protect kernel
+ * confidentiality (ie, the ability for userland to extract
+ * information from the running kernel that would otherwise be
+ * restricted) are placed before LOCKDOWN_CONFIDENTIALITY_MAX.
+ *
+ * LSM authors should note that the semantics of any given lockdown
+ * reason are not guaranteed to be stable - the same reason may block
+ * one set of features in one kernel release, and a slightly different
+ * set of features in a later kernel release. LSMs that seek to expose
+ * lockdown policy at any level of granularity other than "none",
+ * "integrity" or "confidentiality" are responsible for either
+ * ensuring that they expose a consistent level of functionality to
+ * userland, or ensuring that userland is aware that this is
+ * potentially a moving target. It is easy to misuse this information
+ * in a way that could break userspace. Please be careful not to do
+ * so.
+ *
+ * If you add to this, remember to extend lockdown_reasons in
+ * security/lockdown/lockdown.c.
+ */
+enum lockdown_reason {
+ LOCKDOWN_NONE,
+ LOCKDOWN_MODULE_SIGNATURE,
+ LOCKDOWN_DEV_MEM,
+ LOCKDOWN_EFI_TEST,
+ LOCKDOWN_KEXEC,
+ LOCKDOWN_HIBERNATION,
+ LOCKDOWN_PCI_ACCESS,
+ LOCKDOWN_IOPORT,
+ LOCKDOWN_MSR,
+ LOCKDOWN_ACPI_TABLES,
+ LOCKDOWN_DEVICE_TREE,
+ LOCKDOWN_PCMCIA_CIS,
+ LOCKDOWN_TIOCSSERIAL,
+ LOCKDOWN_MODULE_PARAMETERS,
+ LOCKDOWN_MMIOTRACE,
+ LOCKDOWN_DEBUGFS,
+ LOCKDOWN_XMON_WR,
+ LOCKDOWN_BPF_WRITE_USER,
+ LOCKDOWN_DBG_WRITE_KERNEL,
+ LOCKDOWN_RTAS_ERROR_INJECTION,
+ LOCKDOWN_INTEGRITY_MAX,
+ LOCKDOWN_KCORE,
+ LOCKDOWN_KPROBES,
+ LOCKDOWN_BPF_READ_KERNEL,
+ LOCKDOWN_DBG_READ_KERNEL,
+ LOCKDOWN_PERF,
+ LOCKDOWN_TRACEFS,
+ LOCKDOWN_XMON_RW,
+ LOCKDOWN_XFRM_SECRET,
+ LOCKDOWN_CONFIDENTIALITY_MAX,
+};
+
+extern const char *const lockdown_reasons[LOCKDOWN_CONFIDENTIALITY_MAX+1];
+
/* These functions are in security/commoncap.c */
extern int cap_capable(const struct cred *cred, struct user_namespace *ns,
- int cap, int audit);
+ int cap, unsigned int opts);
extern int cap_settime(const struct timespec64 *ts, const struct timezone *tz);
extern int cap_ptrace_access_check(struct task_struct *child, unsigned int mode);
extern int cap_ptrace_traceme(struct task_struct *parent);
@@ -84,13 +150,16 @@ extern int cap_capset(struct cred *new, const struct cred *old,
const kernel_cap_t *effective,
const kernel_cap_t *inheritable,
const kernel_cap_t *permitted);
-extern int cap_bprm_set_creds(struct linux_binprm *bprm);
-extern int cap_bprm_secureexec(struct linux_binprm *bprm);
-extern int cap_inode_setxattr(struct dentry *dentry, const char *name,
- const void *value, size_t size, int flags);
-extern int cap_inode_removexattr(struct dentry *dentry, const char *name);
-extern int cap_inode_need_killpriv(struct dentry *dentry);
-extern int cap_inode_killpriv(struct dentry *dentry);
+extern int cap_bprm_creds_from_file(struct linux_binprm *bprm, struct file *file);
+int cap_inode_setxattr(struct dentry *dentry, const char *name,
+ const void *value, size_t size, int flags);
+int cap_inode_removexattr(struct mnt_idmap *idmap,
+ struct dentry *dentry, const char *name);
+int cap_inode_need_killpriv(struct dentry *dentry);
+int cap_inode_killpriv(struct mnt_idmap *idmap, struct dentry *dentry);
+int cap_inode_getsecurity(struct mnt_idmap *idmap,
+ struct inode *inode, const char *name, void **buffer,
+ bool alloc);
extern int cap_mmap_addr(unsigned long addr);
extern int cap_mmap_file(struct file *file, unsigned long reqprot,
unsigned long prot, unsigned long flags);
@@ -107,13 +176,14 @@ struct sk_buff;
struct sock;
struct sockaddr;
struct socket;
-struct flowi;
+struct flowi_common;
struct dst_entry;
struct xfrm_selector;
struct xfrm_policy;
struct xfrm_state;
struct xfrm_user_sec_ctx;
struct seq_file;
+struct sctp_association;
#ifdef CONFIG_MMU
extern unsigned long mmap_min_addr;
@@ -153,56 +223,52 @@ struct request_sock;
#ifdef CONFIG_MMU
extern int mmap_min_addr_handler(struct ctl_table *table, int write,
- void __user *buffer, size_t *lenp, loff_t *ppos);
+ void *buffer, size_t *lenp, loff_t *ppos);
#endif
/* security_inode_init_security callback function to write xattrs */
typedef int (*initxattrs) (struct inode *inode,
const struct xattr *xattr_array, void *fs_data);
-#ifdef CONFIG_SECURITY
-struct security_mnt_opts {
- char **mnt_opts;
- int *mnt_opts_flags;
- int num_mnt_opts;
+/* Keep the kernel_load_data_id enum in sync with kernel_read_file_id */
+#define __data_id_enumify(ENUM, dummy) LOADING_ ## ENUM,
+#define __data_id_stringify(dummy, str) #str,
+
+enum kernel_load_data_id {
+ __kernel_read_file_id(__data_id_enumify)
};
-int call_lsm_notifier(enum lsm_event event, void *data);
-int register_lsm_notifier(struct notifier_block *nb);
-int unregister_lsm_notifier(struct notifier_block *nb);
+static const char * const kernel_load_data_str[] = {
+ __kernel_read_file_id(__data_id_stringify)
+};
-static inline void security_init_mnt_opts(struct security_mnt_opts *opts)
+static inline const char *kernel_load_data_id_str(enum kernel_load_data_id id)
{
- opts->mnt_opts = NULL;
- opts->mnt_opts_flags = NULL;
- opts->num_mnt_opts = 0;
-}
+ if ((unsigned)id >= LOADING_MAX_ID)
+ return kernel_load_data_str[LOADING_UNKNOWN];
-static inline void security_free_mnt_opts(struct security_mnt_opts *opts)
-{
- int i;
- if (opts->mnt_opts)
- for (i = 0; i < opts->num_mnt_opts; i++)
- kfree(opts->mnt_opts[i]);
- kfree(opts->mnt_opts);
- opts->mnt_opts = NULL;
- kfree(opts->mnt_opts_flags);
- opts->mnt_opts_flags = NULL;
- opts->num_mnt_opts = 0;
+ return kernel_load_data_str[id];
}
+#ifdef CONFIG_SECURITY
+
+int call_blocking_lsm_notifier(enum lsm_event event, void *data);
+int register_blocking_lsm_notifier(struct notifier_block *nb);
+int unregister_blocking_lsm_notifier(struct notifier_block *nb);
+
/* prototypes */
extern int security_init(void);
+extern int early_security_init(void);
/* Security operations */
-int security_binder_set_context_mgr(struct task_struct *mgr);
-int security_binder_transaction(struct task_struct *from,
- struct task_struct *to);
-int security_binder_transfer_binder(struct task_struct *from,
- struct task_struct *to);
-int security_binder_transfer_file(struct task_struct *from,
- struct task_struct *to, struct file *file);
+int security_binder_set_context_mgr(const struct cred *mgr);
+int security_binder_transaction(const struct cred *from,
+ const struct cred *to);
+int security_binder_transfer_binder(const struct cred *from,
+ const struct cred *to);
+int security_binder_transfer_file(const struct cred *from,
+ const struct cred *to, struct file *file);
int security_ptrace_access_check(struct task_struct *child, unsigned int mode);
int security_ptrace_traceme(struct task_struct *parent);
int security_capget(struct task_struct *target,
@@ -213,31 +279,30 @@ int security_capset(struct cred *new, const struct cred *old,
const kernel_cap_t *effective,
const kernel_cap_t *inheritable,
const kernel_cap_t *permitted);
-int security_capable(const struct cred *cred, struct user_namespace *ns,
- int cap);
-int security_capable_noaudit(const struct cred *cred, struct user_namespace *ns,
- int cap);
+int security_capable(const struct cred *cred,
+ struct user_namespace *ns,
+ int cap,
+ unsigned int opts);
int security_quotactl(int cmds, int type, int id, struct super_block *sb);
int security_quota_on(struct dentry *dentry);
int security_syslog(int type);
int security_settime64(const struct timespec64 *ts, const struct timezone *tz);
-static inline int security_settime(const struct timespec *ts, const struct timezone *tz)
-{
- struct timespec64 ts64 = timespec_to_timespec64(*ts);
-
- return security_settime64(&ts64, tz);
-}
int security_vm_enough_memory_mm(struct mm_struct *mm, long pages);
-int security_bprm_set_creds(struct linux_binprm *bprm);
+int security_bprm_creds_for_exec(struct linux_binprm *bprm);
+int security_bprm_creds_from_file(struct linux_binprm *bprm, struct file *file);
int security_bprm_check(struct linux_binprm *bprm);
void security_bprm_committing_creds(struct linux_binprm *bprm);
void security_bprm_committed_creds(struct linux_binprm *bprm);
-int security_bprm_secureexec(struct linux_binprm *bprm);
+int security_fs_context_dup(struct fs_context *fc, struct fs_context *src_fc);
+int security_fs_context_parse_param(struct fs_context *fc, struct fs_parameter *param);
int security_sb_alloc(struct super_block *sb);
+void security_sb_delete(struct super_block *sb);
void security_sb_free(struct super_block *sb);
-int security_sb_copy_data(char *orig, char *copy);
-int security_sb_remount(struct super_block *sb, void *data);
-int security_sb_kern_mount(struct super_block *sb, int flags, void *data);
+void security_free_mnt_opts(void **mnt_opts);
+int security_sb_eat_lsm_opts(char *options, void **mnt_opts);
+int security_sb_mnt_opts_compat(struct super_block *sb, void *mnt_opts);
+int security_sb_remount(struct super_block *sb, void *mnt_opts);
+int security_sb_kern_mount(struct super_block *sb);
int security_sb_show_options(struct seq_file *m, struct super_block *sb);
int security_sb_statfs(struct dentry *dentry);
int security_sb_mount(const char *dev_name, const struct path *path,
@@ -245,30 +310,32 @@ int security_sb_mount(const char *dev_name, const struct path *path,
int security_sb_umount(struct vfsmount *mnt, int flags);
int security_sb_pivotroot(const struct path *old_path, const struct path *new_path);
int security_sb_set_mnt_opts(struct super_block *sb,
- struct security_mnt_opts *opts,
+ void *mnt_opts,
unsigned long kern_flags,
unsigned long *set_kern_flags);
int security_sb_clone_mnt_opts(const struct super_block *oldsb,
struct super_block *newsb,
unsigned long kern_flags,
unsigned long *set_kern_flags);
-int security_sb_parse_opts_str(char *options, struct security_mnt_opts *opts);
+int security_move_mount(const struct path *from_path, const struct path *to_path);
int security_dentry_init_security(struct dentry *dentry, int mode,
- const struct qstr *name, void **ctx,
- u32 *ctxlen);
+ const struct qstr *name,
+ const char **xattr_name, void **ctx,
+ u32 *ctxlen);
int security_dentry_create_files_as(struct dentry *dentry, int mode,
struct qstr *name,
const struct cred *old,
struct cred *new);
-
+int security_path_notify(const struct path *path, u64 mask,
+ unsigned int obj_type);
int security_inode_alloc(struct inode *inode);
void security_inode_free(struct inode *inode);
int security_inode_init_security(struct inode *inode, struct inode *dir,
const struct qstr *qstr,
initxattrs initxattrs, void *fs_data);
-int security_old_inode_init_security(struct inode *inode, struct inode *dir,
- const struct qstr *qstr, const char **name,
- void **value, size_t *len);
+int security_inode_init_security_anon(struct inode *inode,
+ const struct qstr *name,
+ const struct inode *context_inode);
int security_inode_create(struct inode *dir, struct dentry *dentry, umode_t mode);
int security_inode_link(struct dentry *old_dentry, struct inode *dir,
struct dentry *new_dentry);
@@ -285,23 +352,37 @@ int security_inode_readlink(struct dentry *dentry);
int security_inode_follow_link(struct dentry *dentry, struct inode *inode,
bool rcu);
int security_inode_permission(struct inode *inode, int mask);
-int security_inode_setattr(struct dentry *dentry, struct iattr *attr);
+int security_inode_setattr(struct mnt_idmap *idmap,
+ struct dentry *dentry, struct iattr *attr);
int security_inode_getattr(const struct path *path);
-int security_inode_setxattr(struct dentry *dentry, const char *name,
+int security_inode_setxattr(struct mnt_idmap *idmap,
+ struct dentry *dentry, const char *name,
const void *value, size_t size, int flags);
+int security_inode_set_acl(struct mnt_idmap *idmap,
+ struct dentry *dentry, const char *acl_name,
+ struct posix_acl *kacl);
+int security_inode_get_acl(struct mnt_idmap *idmap,
+ struct dentry *dentry, const char *acl_name);
+int security_inode_remove_acl(struct mnt_idmap *idmap,
+ struct dentry *dentry, const char *acl_name);
void security_inode_post_setxattr(struct dentry *dentry, const char *name,
const void *value, size_t size, int flags);
int security_inode_getxattr(struct dentry *dentry, const char *name);
int security_inode_listxattr(struct dentry *dentry);
-int security_inode_removexattr(struct dentry *dentry, const char *name);
+int security_inode_removexattr(struct mnt_idmap *idmap,
+ struct dentry *dentry, const char *name);
int security_inode_need_killpriv(struct dentry *dentry);
-int security_inode_killpriv(struct dentry *dentry);
-int security_inode_getsecurity(struct inode *inode, const char *name, void **buffer, bool alloc);
+int security_inode_killpriv(struct mnt_idmap *idmap, struct dentry *dentry);
+int security_inode_getsecurity(struct mnt_idmap *idmap,
+ struct inode *inode, const char *name,
+ void **buffer, bool alloc);
int security_inode_setsecurity(struct inode *inode, const char *name, const void *value, size_t size, int flags);
int security_inode_listsecurity(struct inode *inode, char *buffer, size_t buffer_size);
void security_inode_getsecid(struct inode *inode, u32 *secid);
int security_inode_copy_up(struct dentry *src, struct cred **new);
int security_inode_copy_up_xattr(const char *name);
+int security_kernfs_init_security(struct kernfs_node *kn_dir,
+ struct kernfs_node *kn);
int security_file_permission(struct file *file, int mask);
int security_file_alloc(struct file *file);
void security_file_free(struct file *file);
@@ -317,26 +398,36 @@ void security_file_set_fowner(struct file *file);
int security_file_send_sigiotask(struct task_struct *tsk,
struct fown_struct *fown, int sig);
int security_file_receive(struct file *file);
-int security_file_open(struct file *file, const struct cred *cred);
-int security_task_create(unsigned long clone_flags);
+int security_file_open(struct file *file);
+int security_file_truncate(struct file *file);
int security_task_alloc(struct task_struct *task, unsigned long clone_flags);
void security_task_free(struct task_struct *task);
int security_cred_alloc_blank(struct cred *cred, gfp_t gfp);
void security_cred_free(struct cred *cred);
int security_prepare_creds(struct cred *new, const struct cred *old, gfp_t gfp);
void security_transfer_creds(struct cred *new, const struct cred *old);
+void security_cred_getsecid(const struct cred *c, u32 *secid);
int security_kernel_act_as(struct cred *new, u32 secid);
int security_kernel_create_files_as(struct cred *new, struct inode *inode);
int security_kernel_module_request(char *kmod_name);
-int security_kernel_read_file(struct file *file, enum kernel_read_file_id id);
+int security_kernel_load_data(enum kernel_load_data_id id, bool contents);
+int security_kernel_post_load_data(char *buf, loff_t size,
+ enum kernel_load_data_id id,
+ char *description);
+int security_kernel_read_file(struct file *file, enum kernel_read_file_id id,
+ bool contents);
int security_kernel_post_read_file(struct file *file, char *buf, loff_t size,
enum kernel_read_file_id id);
int security_task_fix_setuid(struct cred *new, const struct cred *old,
int flags);
+int security_task_fix_setgid(struct cred *new, const struct cred *old,
+ int flags);
+int security_task_fix_setgroups(struct cred *new, const struct cred *old);
int security_task_setpgid(struct task_struct *p, pid_t pgid);
int security_task_getpgid(struct task_struct *p);
int security_task_getsid(struct task_struct *p);
-void security_task_getsecid(struct task_struct *p, u32 *secid);
+void security_current_getsecid_subj(u32 *secid);
+void security_task_getsecid_obj(struct task_struct *p, u32 *secid);
int security_task_setnice(struct task_struct *p, int nice);
int security_task_setioprio(struct task_struct *p, int ioprio);
int security_task_getioprio(struct task_struct *p);
@@ -347,71 +438,68 @@ int security_task_setrlimit(struct task_struct *p, unsigned int resource,
int security_task_setscheduler(struct task_struct *p);
int security_task_getscheduler(struct task_struct *p);
int security_task_movememory(struct task_struct *p);
-int security_task_kill(struct task_struct *p, struct siginfo *info,
- int sig, u32 secid);
+int security_task_kill(struct task_struct *p, struct kernel_siginfo *info,
+ int sig, const struct cred *cred);
int security_task_prctl(int option, unsigned long arg2, unsigned long arg3,
unsigned long arg4, unsigned long arg5);
void security_task_to_inode(struct task_struct *p, struct inode *inode);
+int security_create_user_ns(const struct cred *cred);
int security_ipc_permission(struct kern_ipc_perm *ipcp, short flag);
void security_ipc_getsecid(struct kern_ipc_perm *ipcp, u32 *secid);
int security_msg_msg_alloc(struct msg_msg *msg);
void security_msg_msg_free(struct msg_msg *msg);
-int security_msg_queue_alloc(struct msg_queue *msq);
-void security_msg_queue_free(struct msg_queue *msq);
-int security_msg_queue_associate(struct msg_queue *msq, int msqflg);
-int security_msg_queue_msgctl(struct msg_queue *msq, int cmd);
-int security_msg_queue_msgsnd(struct msg_queue *msq,
+int security_msg_queue_alloc(struct kern_ipc_perm *msq);
+void security_msg_queue_free(struct kern_ipc_perm *msq);
+int security_msg_queue_associate(struct kern_ipc_perm *msq, int msqflg);
+int security_msg_queue_msgctl(struct kern_ipc_perm *msq, int cmd);
+int security_msg_queue_msgsnd(struct kern_ipc_perm *msq,
struct msg_msg *msg, int msqflg);
-int security_msg_queue_msgrcv(struct msg_queue *msq, struct msg_msg *msg,
+int security_msg_queue_msgrcv(struct kern_ipc_perm *msq, struct msg_msg *msg,
struct task_struct *target, long type, int mode);
-int security_shm_alloc(struct shmid_kernel *shp);
-void security_shm_free(struct shmid_kernel *shp);
-int security_shm_associate(struct shmid_kernel *shp, int shmflg);
-int security_shm_shmctl(struct shmid_kernel *shp, int cmd);
-int security_shm_shmat(struct shmid_kernel *shp, char __user *shmaddr, int shmflg);
-int security_sem_alloc(struct sem_array *sma);
-void security_sem_free(struct sem_array *sma);
-int security_sem_associate(struct sem_array *sma, int semflg);
-int security_sem_semctl(struct sem_array *sma, int cmd);
-int security_sem_semop(struct sem_array *sma, struct sembuf *sops,
+int security_shm_alloc(struct kern_ipc_perm *shp);
+void security_shm_free(struct kern_ipc_perm *shp);
+int security_shm_associate(struct kern_ipc_perm *shp, int shmflg);
+int security_shm_shmctl(struct kern_ipc_perm *shp, int cmd);
+int security_shm_shmat(struct kern_ipc_perm *shp, char __user *shmaddr, int shmflg);
+int security_sem_alloc(struct kern_ipc_perm *sma);
+void security_sem_free(struct kern_ipc_perm *sma);
+int security_sem_associate(struct kern_ipc_perm *sma, int semflg);
+int security_sem_semctl(struct kern_ipc_perm *sma, int cmd);
+int security_sem_semop(struct kern_ipc_perm *sma, struct sembuf *sops,
unsigned nsops, int alter);
void security_d_instantiate(struct dentry *dentry, struct inode *inode);
-int security_getprocattr(struct task_struct *p, char *name, char **value);
-int security_setprocattr(const char *name, void *value, size_t size);
+int security_getprocattr(struct task_struct *p, const char *lsm, const char *name,
+ char **value);
+int security_setprocattr(const char *lsm, const char *name, void *value,
+ size_t size);
int security_netlink_send(struct sock *sk, struct sk_buff *skb);
int security_ismaclabel(const char *name);
int security_secid_to_secctx(u32 secid, char **secdata, u32 *seclen);
int security_secctx_to_secid(const char *secdata, u32 seclen, u32 *secid);
void security_release_secctx(char *secdata, u32 seclen);
-
void security_inode_invalidate_secctx(struct inode *inode);
int security_inode_notifysecctx(struct inode *inode, void *ctx, u32 ctxlen);
int security_inode_setsecctx(struct dentry *dentry, void *ctx, u32 ctxlen);
int security_inode_getsecctx(struct inode *inode, void **ctx, u32 *ctxlen);
+int security_locked_down(enum lockdown_reason what);
#else /* CONFIG_SECURITY */
-struct security_mnt_opts {
-};
-static inline int call_lsm_notifier(enum lsm_event event, void *data)
+static inline int call_blocking_lsm_notifier(enum lsm_event event, void *data)
{
return 0;
}
-static inline int register_lsm_notifier(struct notifier_block *nb)
+static inline int register_blocking_lsm_notifier(struct notifier_block *nb)
{
return 0;
}
-static inline int unregister_lsm_notifier(struct notifier_block *nb)
+static inline int unregister_blocking_lsm_notifier(struct notifier_block *nb)
{
return 0;
}
-static inline void security_init_mnt_opts(struct security_mnt_opts *opts)
-{
-}
-
-static inline void security_free_mnt_opts(struct security_mnt_opts *opts)
+static inline void security_free_mnt_opts(void **mnt_opts)
{
}
@@ -425,25 +513,30 @@ static inline int security_init(void)
return 0;
}
-static inline int security_binder_set_context_mgr(struct task_struct *mgr)
+static inline int early_security_init(void)
+{
+ return 0;
+}
+
+static inline int security_binder_set_context_mgr(const struct cred *mgr)
{
return 0;
}
-static inline int security_binder_transaction(struct task_struct *from,
- struct task_struct *to)
+static inline int security_binder_transaction(const struct cred *from,
+ const struct cred *to)
{
return 0;
}
-static inline int security_binder_transfer_binder(struct task_struct *from,
- struct task_struct *to)
+static inline int security_binder_transfer_binder(const struct cred *from,
+ const struct cred *to)
{
return 0;
}
-static inline int security_binder_transfer_file(struct task_struct *from,
- struct task_struct *to,
+static inline int security_binder_transfer_file(const struct cred *from,
+ const struct cred *to,
struct file *file)
{
return 0;
@@ -478,14 +571,11 @@ static inline int security_capset(struct cred *new,
}
static inline int security_capable(const struct cred *cred,
- struct user_namespace *ns, int cap)
+ struct user_namespace *ns,
+ int cap,
+ unsigned int opts)
{
- return cap_capable(cred, ns, cap, SECURITY_CAP_AUDIT);
-}
-
-static inline int security_capable_noaudit(const struct cred *cred,
- struct user_namespace *ns, int cap) {
- return cap_capable(cred, ns, cap, SECURITY_CAP_NOAUDIT);
+ return cap_capable(cred, ns, cap, opts);
}
static inline int security_quotactl(int cmds, int type, int id,
@@ -510,22 +600,20 @@ static inline int security_settime64(const struct timespec64 *ts,
return cap_settime(ts, tz);
}
-static inline int security_settime(const struct timespec *ts,
- const struct timezone *tz)
+static inline int security_vm_enough_memory_mm(struct mm_struct *mm, long pages)
{
- struct timespec64 ts64 = timespec_to_timespec64(*ts);
-
- return cap_settime(&ts64, tz);
+ return __vm_enough_memory(mm, pages, cap_vm_enough_memory(mm, pages));
}
-static inline int security_vm_enough_memory_mm(struct mm_struct *mm, long pages)
+static inline int security_bprm_creds_for_exec(struct linux_binprm *bprm)
{
- return __vm_enough_memory(mm, pages, cap_vm_enough_memory(mm, pages));
+ return 0;
}
-static inline int security_bprm_set_creds(struct linux_binprm *bprm)
+static inline int security_bprm_creds_from_file(struct linux_binprm *bprm,
+ struct file *file)
{
- return cap_bprm_set_creds(bprm);
+ return cap_bprm_creds_from_file(bprm, file);
}
static inline int security_bprm_check(struct linux_binprm *bprm)
@@ -541,9 +629,15 @@ static inline void security_bprm_committed_creds(struct linux_binprm *bprm)
{
}
-static inline int security_bprm_secureexec(struct linux_binprm *bprm)
+static inline int security_fs_context_dup(struct fs_context *fc,
+ struct fs_context *src_fc)
+{
+ return 0;
+}
+static inline int security_fs_context_parse_param(struct fs_context *fc,
+ struct fs_parameter *param)
{
- return cap_bprm_secureexec(bprm);
+ return -ENOPARAM;
}
static inline int security_sb_alloc(struct super_block *sb)
@@ -551,20 +645,32 @@ static inline int security_sb_alloc(struct super_block *sb)
return 0;
}
+static inline void security_sb_delete(struct super_block *sb)
+{ }
+
static inline void security_sb_free(struct super_block *sb)
{ }
-static inline int security_sb_copy_data(char *orig, char *copy)
+static inline int security_sb_eat_lsm_opts(char *options,
+ void **mnt_opts)
+{
+ return 0;
+}
+
+static inline int security_sb_remount(struct super_block *sb,
+ void *mnt_opts)
{
return 0;
}
-static inline int security_sb_remount(struct super_block *sb, void *data)
+static inline int security_sb_mnt_opts_compat(struct super_block *sb,
+ void *mnt_opts)
{
return 0;
}
-static inline int security_sb_kern_mount(struct super_block *sb, int flags, void *data)
+
+static inline int security_sb_kern_mount(struct super_block *sb)
{
return 0;
}
@@ -599,7 +705,7 @@ static inline int security_sb_pivotroot(const struct path *old_path,
}
static inline int security_sb_set_mnt_opts(struct super_block *sb,
- struct security_mnt_opts *opts,
+ void *mnt_opts,
unsigned long kern_flags,
unsigned long *set_kern_flags)
{
@@ -614,7 +720,14 @@ static inline int security_sb_clone_mnt_opts(const struct super_block *oldsb,
return 0;
}
-static inline int security_sb_parse_opts_str(char *options, struct security_mnt_opts *opts)
+static inline int security_move_mount(const struct path *from_path,
+ const struct path *to_path)
+{
+ return 0;
+}
+
+static inline int security_path_notify(const struct path *path, u64 mask,
+ unsigned int obj_type)
{
return 0;
}
@@ -630,6 +743,7 @@ static inline void security_inode_free(struct inode *inode)
static inline int security_dentry_init_security(struct dentry *dentry,
int mode,
const struct qstr *name,
+ const char **xattr_name,
void **ctx,
u32 *ctxlen)
{
@@ -654,13 +768,11 @@ static inline int security_inode_init_security(struct inode *inode,
return 0;
}
-static inline int security_old_inode_init_security(struct inode *inode,
- struct inode *dir,
- const struct qstr *qstr,
- const char **name,
- void **value, size_t *len)
+static inline int security_inode_init_security_anon(struct inode *inode,
+ const struct qstr *name,
+ const struct inode *context_inode)
{
- return -EOPNOTSUPP;
+ return 0;
}
static inline int security_inode_create(struct inode *dir,
@@ -736,8 +848,9 @@ static inline int security_inode_permission(struct inode *inode, int mask)
return 0;
}
-static inline int security_inode_setattr(struct dentry *dentry,
- struct iattr *attr)
+static inline int security_inode_setattr(struct mnt_idmap *idmap,
+ struct dentry *dentry,
+ struct iattr *attr)
{
return 0;
}
@@ -747,12 +860,35 @@ static inline int security_inode_getattr(const struct path *path)
return 0;
}
-static inline int security_inode_setxattr(struct dentry *dentry,
- const char *name, const void *value, size_t size, int flags)
+static inline int security_inode_setxattr(struct mnt_idmap *idmap,
+ struct dentry *dentry, const char *name, const void *value,
+ size_t size, int flags)
{
return cap_inode_setxattr(dentry, name, value, size, flags);
}
+static inline int security_inode_set_acl(struct mnt_idmap *idmap,
+ struct dentry *dentry,
+ const char *acl_name,
+ struct posix_acl *kacl)
+{
+ return 0;
+}
+
+static inline int security_inode_get_acl(struct mnt_idmap *idmap,
+ struct dentry *dentry,
+ const char *acl_name)
+{
+ return 0;
+}
+
+static inline int security_inode_remove_acl(struct mnt_idmap *idmap,
+ struct dentry *dentry,
+ const char *acl_name)
+{
+ return 0;
+}
+
static inline void security_inode_post_setxattr(struct dentry *dentry,
const char *name, const void *value, size_t size, int flags)
{ }
@@ -768,10 +904,11 @@ static inline int security_inode_listxattr(struct dentry *dentry)
return 0;
}
-static inline int security_inode_removexattr(struct dentry *dentry,
- const char *name)
+static inline int security_inode_removexattr(struct mnt_idmap *idmap,
+ struct dentry *dentry,
+ const char *name)
{
- return cap_inode_removexattr(dentry, name);
+ return cap_inode_removexattr(idmap, dentry, name);
}
static inline int security_inode_need_killpriv(struct dentry *dentry)
@@ -779,14 +916,18 @@ static inline int security_inode_need_killpriv(struct dentry *dentry)
return cap_inode_need_killpriv(dentry);
}
-static inline int security_inode_killpriv(struct dentry *dentry)
+static inline int security_inode_killpriv(struct mnt_idmap *idmap,
+ struct dentry *dentry)
{
- return cap_inode_killpriv(dentry);
+ return cap_inode_killpriv(idmap, dentry);
}
-static inline int security_inode_getsecurity(struct inode *inode, const char *name, void **buffer, bool alloc)
+static inline int security_inode_getsecurity(struct mnt_idmap *idmap,
+ struct inode *inode,
+ const char *name, void **buffer,
+ bool alloc)
{
- return -EOPNOTSUPP;
+ return cap_inode_getsecurity(idmap, inode, name, buffer, alloc);
}
static inline int security_inode_setsecurity(struct inode *inode, const char *name, const void *value, size_t size, int flags)
@@ -809,6 +950,12 @@ static inline int security_inode_copy_up(struct dentry *src, struct cred **new)
return 0;
}
+static inline int security_kernfs_init_security(struct kernfs_node *kn_dir,
+ struct kernfs_node *kn)
+{
+ return 0;
+}
+
static inline int security_inode_copy_up_xattr(const char *name)
{
return -EOPNOTSUPP;
@@ -879,13 +1026,12 @@ static inline int security_file_receive(struct file *file)
return 0;
}
-static inline int security_file_open(struct file *file,
- const struct cred *cred)
+static inline int security_file_open(struct file *file)
{
return 0;
}
-static inline int security_task_create(unsigned long clone_flags)
+static inline int security_file_truncate(struct file *file)
{
return 0;
}
@@ -919,6 +1065,11 @@ static inline void security_transfer_creds(struct cred *new,
{
}
+static inline void security_cred_getsecid(const struct cred *c, u32 *secid)
+{
+ *secid = 0;
+}
+
static inline int security_kernel_act_as(struct cred *cred, u32 secid)
{
return 0;
@@ -935,8 +1086,21 @@ static inline int security_kernel_module_request(char *kmod_name)
return 0;
}
+static inline int security_kernel_load_data(enum kernel_load_data_id id, bool contents)
+{
+ return 0;
+}
+
+static inline int security_kernel_post_load_data(char *buf, loff_t size,
+ enum kernel_load_data_id id,
+ char *description)
+{
+ return 0;
+}
+
static inline int security_kernel_read_file(struct file *file,
- enum kernel_read_file_id id)
+ enum kernel_read_file_id id,
+ bool contents)
{
return 0;
}
@@ -955,6 +1119,19 @@ static inline int security_task_fix_setuid(struct cred *new,
return cap_task_fix_setuid(new, old, flags);
}
+static inline int security_task_fix_setgid(struct cred *new,
+ const struct cred *old,
+ int flags)
+{
+ return 0;
+}
+
+static inline int security_task_fix_setgroups(struct cred *new,
+ const struct cred *old)
+{
+ return 0;
+}
+
static inline int security_task_setpgid(struct task_struct *p, pid_t pgid)
{
return 0;
@@ -970,7 +1147,12 @@ static inline int security_task_getsid(struct task_struct *p)
return 0;
}
-static inline void security_task_getsecid(struct task_struct *p, u32 *secid)
+static inline void security_current_getsecid_subj(u32 *secid)
+{
+ *secid = 0;
+}
+
+static inline void security_task_getsecid_obj(struct task_struct *p, u32 *secid)
{
*secid = 0;
}
@@ -1020,8 +1202,8 @@ static inline int security_task_movememory(struct task_struct *p)
}
static inline int security_task_kill(struct task_struct *p,
- struct siginfo *info, int sig,
- u32 secid)
+ struct kernel_siginfo *info, int sig,
+ const struct cred *cred)
{
return 0;
}
@@ -1037,6 +1219,11 @@ static inline int security_task_prctl(int option, unsigned long arg2,
static inline void security_task_to_inode(struct task_struct *p, struct inode *inode)
{ }
+static inline int security_create_user_ns(const struct cred *cred)
+{
+ return 0;
+}
+
static inline int security_ipc_permission(struct kern_ipc_perm *ipcp,
short flag)
{
@@ -1056,32 +1243,32 @@ static inline int security_msg_msg_alloc(struct msg_msg *msg)
static inline void security_msg_msg_free(struct msg_msg *msg)
{ }
-static inline int security_msg_queue_alloc(struct msg_queue *msq)
+static inline int security_msg_queue_alloc(struct kern_ipc_perm *msq)
{
return 0;
}
-static inline void security_msg_queue_free(struct msg_queue *msq)
+static inline void security_msg_queue_free(struct kern_ipc_perm *msq)
{ }
-static inline int security_msg_queue_associate(struct msg_queue *msq,
+static inline int security_msg_queue_associate(struct kern_ipc_perm *msq,
int msqflg)
{
return 0;
}
-static inline int security_msg_queue_msgctl(struct msg_queue *msq, int cmd)
+static inline int security_msg_queue_msgctl(struct kern_ipc_perm *msq, int cmd)
{
return 0;
}
-static inline int security_msg_queue_msgsnd(struct msg_queue *msq,
+static inline int security_msg_queue_msgsnd(struct kern_ipc_perm *msq,
struct msg_msg *msg, int msqflg)
{
return 0;
}
-static inline int security_msg_queue_msgrcv(struct msg_queue *msq,
+static inline int security_msg_queue_msgrcv(struct kern_ipc_perm *msq,
struct msg_msg *msg,
struct task_struct *target,
long type, int mode)
@@ -1089,65 +1276,68 @@ static inline int security_msg_queue_msgrcv(struct msg_queue *msq,
return 0;
}
-static inline int security_shm_alloc(struct shmid_kernel *shp)
+static inline int security_shm_alloc(struct kern_ipc_perm *shp)
{
return 0;
}
-static inline void security_shm_free(struct shmid_kernel *shp)
+static inline void security_shm_free(struct kern_ipc_perm *shp)
{ }
-static inline int security_shm_associate(struct shmid_kernel *shp,
+static inline int security_shm_associate(struct kern_ipc_perm *shp,
int shmflg)
{
return 0;
}
-static inline int security_shm_shmctl(struct shmid_kernel *shp, int cmd)
+static inline int security_shm_shmctl(struct kern_ipc_perm *shp, int cmd)
{
return 0;
}
-static inline int security_shm_shmat(struct shmid_kernel *shp,
+static inline int security_shm_shmat(struct kern_ipc_perm *shp,
char __user *shmaddr, int shmflg)
{
return 0;
}
-static inline int security_sem_alloc(struct sem_array *sma)
+static inline int security_sem_alloc(struct kern_ipc_perm *sma)
{
return 0;
}
-static inline void security_sem_free(struct sem_array *sma)
+static inline void security_sem_free(struct kern_ipc_perm *sma)
{ }
-static inline int security_sem_associate(struct sem_array *sma, int semflg)
+static inline int security_sem_associate(struct kern_ipc_perm *sma, int semflg)
{
return 0;
}
-static inline int security_sem_semctl(struct sem_array *sma, int cmd)
+static inline int security_sem_semctl(struct kern_ipc_perm *sma, int cmd)
{
return 0;
}
-static inline int security_sem_semop(struct sem_array *sma,
+static inline int security_sem_semop(struct kern_ipc_perm *sma,
struct sembuf *sops, unsigned nsops,
int alter)
{
return 0;
}
-static inline void security_d_instantiate(struct dentry *dentry, struct inode *inode)
+static inline void security_d_instantiate(struct dentry *dentry,
+ struct inode *inode)
{ }
-static inline int security_getprocattr(struct task_struct *p, char *name, char **value)
+static inline int security_getprocattr(struct task_struct *p, const char *lsm,
+ const char *name, char **value)
{
return -EINVAL;
}
-static inline int security_setprocattr(char *name, void *value, size_t size)
+static inline int security_setprocattr(const char *lsm, char *name,
+ void *value, size_t size)
{
return -EINVAL;
}
@@ -1194,8 +1384,34 @@ static inline int security_inode_getsecctx(struct inode *inode, void **ctx, u32
{
return -EOPNOTSUPP;
}
+static inline int security_locked_down(enum lockdown_reason what)
+{
+ return 0;
+}
#endif /* CONFIG_SECURITY */
+#if defined(CONFIG_SECURITY) && defined(CONFIG_WATCH_QUEUE)
+int security_post_notification(const struct cred *w_cred,
+ const struct cred *cred,
+ struct watch_notification *n);
+#else
+static inline int security_post_notification(const struct cred *w_cred,
+ const struct cred *cred,
+ struct watch_notification *n)
+{
+ return 0;
+}
+#endif
+
+#if defined(CONFIG_SECURITY) && defined(CONFIG_KEY_NOTIFICATIONS)
+int security_watch_key(struct key *key);
+#else
+static inline int security_watch_key(struct key *key)
+{
+ return 0;
+}
+#endif
+
#ifdef CONFIG_SECURITY_NETWORK
int security_unix_stream_connect(struct sock *sock, struct sock *other, struct sock *newsk);
@@ -1203,6 +1419,7 @@ int security_unix_may_send(struct socket *sock, struct socket *other);
int security_socket_create(int family, int type, int protocol, int kern);
int security_socket_post_create(struct socket *sock, int family,
int type, int protocol, int kern);
+int security_socket_socketpair(struct socket *socka, struct socket *sockb);
int security_socket_bind(struct socket *sock, struct sockaddr *address, int addrlen);
int security_socket_connect(struct socket *sock, struct sockaddr *address, int addrlen);
int security_socket_listen(struct socket *sock, int backlog);
@@ -1216,16 +1433,17 @@ int security_socket_getsockopt(struct socket *sock, int level, int optname);
int security_socket_setsockopt(struct socket *sock, int level, int optname);
int security_socket_shutdown(struct socket *sock, int how);
int security_sock_rcv_skb(struct sock *sk, struct sk_buff *skb);
-int security_socket_getpeersec_stream(struct socket *sock, char __user *optval,
- int __user *optlen, unsigned len);
+int security_socket_getpeersec_stream(struct socket *sock, sockptr_t optval,
+ sockptr_t optlen, unsigned int len);
int security_socket_getpeersec_dgram(struct socket *sock, struct sk_buff *skb, u32 *secid);
int security_sk_alloc(struct sock *sk, int family, gfp_t priority);
void security_sk_free(struct sock *sk);
void security_sk_clone(const struct sock *sk, struct sock *newsk);
-void security_sk_classify_flow(struct sock *sk, struct flowi *fl);
-void security_req_classify_flow(const struct request_sock *req, struct flowi *fl);
+void security_sk_classify_flow(struct sock *sk, struct flowi_common *flic);
+void security_req_classify_flow(const struct request_sock *req,
+ struct flowi_common *flic);
void security_sock_graft(struct sock*sk, struct socket *parent);
-int security_inet_conn_request(struct sock *sk,
+int security_inet_conn_request(const struct sock *sk,
struct sk_buff *skb, struct request_sock *req);
void security_inet_csk_clone(struct sock *newsk,
const struct request_sock *req);
@@ -1240,6 +1458,13 @@ int security_tun_dev_create(void);
int security_tun_dev_attach_queue(void *security);
int security_tun_dev_attach(struct sock *sk, void *security);
int security_tun_dev_open(void *security);
+int security_sctp_assoc_request(struct sctp_association *asoc, struct sk_buff *skb);
+int security_sctp_bind_connect(struct sock *sk, int optname,
+ struct sockaddr *address, int addrlen);
+void security_sctp_sk_clone(struct sctp_association *asoc, struct sock *sk,
+ struct sock *newsk);
+int security_sctp_assoc_established(struct sctp_association *asoc,
+ struct sk_buff *skb);
#else /* CONFIG_SECURITY_NETWORK */
static inline int security_unix_stream_connect(struct sock *sock,
@@ -1269,6 +1494,12 @@ static inline int security_socket_post_create(struct socket *sock,
return 0;
}
+static inline int security_socket_socketpair(struct socket *socka,
+ struct socket *sockb)
+{
+ return 0;
+}
+
static inline int security_socket_bind(struct socket *sock,
struct sockaddr *address,
int addrlen)
@@ -1339,8 +1570,10 @@ static inline int security_sock_rcv_skb(struct sock *sk,
return 0;
}
-static inline int security_socket_getpeersec_stream(struct socket *sock, char __user *optval,
- int __user *optlen, unsigned len)
+static inline int security_socket_getpeersec_stream(struct socket *sock,
+ sockptr_t optval,
+ sockptr_t optlen,
+ unsigned int len)
{
return -ENOPROTOOPT;
}
@@ -1363,11 +1596,13 @@ static inline void security_sk_clone(const struct sock *sk, struct sock *newsk)
{
}
-static inline void security_sk_classify_flow(struct sock *sk, struct flowi *fl)
+static inline void security_sk_classify_flow(struct sock *sk,
+ struct flowi_common *flic)
{
}
-static inline void security_req_classify_flow(const struct request_sock *req, struct flowi *fl)
+static inline void security_req_classify_flow(const struct request_sock *req,
+ struct flowi_common *flic)
{
}
@@ -1375,7 +1610,7 @@ static inline void security_sock_graft(struct sock *sk, struct socket *parent)
{
}
-static inline int security_inet_conn_request(struct sock *sk,
+static inline int security_inet_conn_request(const struct sock *sk,
struct sk_buff *skb, struct request_sock *req)
{
return 0;
@@ -1432,6 +1667,31 @@ static inline int security_tun_dev_open(void *security)
{
return 0;
}
+
+static inline int security_sctp_assoc_request(struct sctp_association *asoc,
+ struct sk_buff *skb)
+{
+ return 0;
+}
+
+static inline int security_sctp_bind_connect(struct sock *sk, int optname,
+ struct sockaddr *address,
+ int addrlen)
+{
+ return 0;
+}
+
+static inline void security_sctp_sk_clone(struct sctp_association *asoc,
+ struct sock *sk,
+ struct sock *newsk)
+{
+}
+
+static inline int security_sctp_assoc_established(struct sctp_association *asoc,
+ struct sk_buff *skb)
+{
+ return 0;
+}
#endif /* CONFIG_SECURITY_NETWORK */
#ifdef CONFIG_SECURITY_INFINIBAND
@@ -1472,12 +1732,12 @@ int security_xfrm_state_alloc_acquire(struct xfrm_state *x,
struct xfrm_sec_ctx *polsec, u32 secid);
int security_xfrm_state_delete(struct xfrm_state *x);
void security_xfrm_state_free(struct xfrm_state *x);
-int security_xfrm_policy_lookup(struct xfrm_sec_ctx *ctx, u32 fl_secid, u8 dir);
+int security_xfrm_policy_lookup(struct xfrm_sec_ctx *ctx, u32 fl_secid);
int security_xfrm_state_pol_flow_match(struct xfrm_state *x,
struct xfrm_policy *xp,
- const struct flowi *fl);
+ const struct flowi_common *flic);
int security_xfrm_decode_session(struct sk_buff *skb, u32 *secid);
-void security_skb_classify_flow(struct sk_buff *skb, struct flowi *fl);
+void security_skb_classify_flow(struct sk_buff *skb, struct flowi_common *flic);
#else /* CONFIG_SECURITY_NETWORK_XFRM */
@@ -1523,13 +1783,14 @@ static inline int security_xfrm_state_delete(struct xfrm_state *x)
return 0;
}
-static inline int security_xfrm_policy_lookup(struct xfrm_sec_ctx *ctx, u32 fl_secid, u8 dir)
+static inline int security_xfrm_policy_lookup(struct xfrm_sec_ctx *ctx, u32 fl_secid)
{
return 0;
}
static inline int security_xfrm_state_pol_flow_match(struct xfrm_state *x,
- struct xfrm_policy *xp, const struct flowi *fl)
+ struct xfrm_policy *xp,
+ const struct flowi_common *flic)
{
return 1;
}
@@ -1539,7 +1800,8 @@ static inline int security_xfrm_decode_session(struct sk_buff *skb, u32 *secid)
return 0;
}
-static inline void security_skb_classify_flow(struct sk_buff *skb, struct flowi *fl)
+static inline void security_skb_classify_flow(struct sk_buff *skb,
+ struct flowi_common *flic)
{
}
@@ -1633,8 +1895,8 @@ static inline int security_path_chroot(const struct path *path)
int security_key_alloc(struct key *key, const struct cred *cred, unsigned long flags);
void security_key_free(struct key *key);
-int security_key_permission(key_ref_t key_ref,
- const struct cred *cred, unsigned perm);
+int security_key_permission(key_ref_t key_ref, const struct cred *cred,
+ enum key_need_perm need_perm);
int security_key_getsecurity(struct key *key, char **_buffer);
#else
@@ -1652,7 +1914,7 @@ static inline void security_key_free(struct key *key)
static inline int security_key_permission(key_ref_t key_ref,
const struct cred *cred,
- unsigned perm)
+ enum key_need_perm need_perm)
{
return 0;
}
@@ -1670,8 +1932,7 @@ static inline int security_key_getsecurity(struct key *key, char **_buffer)
#ifdef CONFIG_SECURITY
int security_audit_rule_init(u32 field, u32 op, char *rulestr, void **lsmrule);
int security_audit_rule_known(struct audit_krule *krule);
-int security_audit_rule_match(u32 secid, u32 field, u32 op, void *lsmrule,
- struct audit_context *actx);
+int security_audit_rule_match(u32 secid, u32 field, u32 op, void *lsmrule);
void security_audit_rule_free(void *lsmrule);
#else
@@ -1688,7 +1949,7 @@ static inline int security_audit_rule_known(struct audit_krule *krule)
}
static inline int security_audit_rule_match(u32 secid, u32 field, u32 op,
- void *lsmrule, struct audit_context *actx)
+ void *lsmrule)
{
return 0;
}
@@ -1741,28 +2002,111 @@ static inline void securityfs_remove(struct dentry *dentry)
#endif
+#ifdef CONFIG_BPF_SYSCALL
+union bpf_attr;
+struct bpf_map;
+struct bpf_prog;
+struct bpf_prog_aux;
#ifdef CONFIG_SECURITY
+extern int security_bpf(int cmd, union bpf_attr *attr, unsigned int size);
+extern int security_bpf_map(struct bpf_map *map, fmode_t fmode);
+extern int security_bpf_prog(struct bpf_prog *prog);
+extern int security_bpf_map_alloc(struct bpf_map *map);
+extern void security_bpf_map_free(struct bpf_map *map);
+extern int security_bpf_prog_alloc(struct bpf_prog_aux *aux);
+extern void security_bpf_prog_free(struct bpf_prog_aux *aux);
+#else
+static inline int security_bpf(int cmd, union bpf_attr *attr,
+ unsigned int size)
+{
+ return 0;
+}
-static inline char *alloc_secdata(void)
+static inline int security_bpf_map(struct bpf_map *map, fmode_t fmode)
{
- return (char *)get_zeroed_page(GFP_KERNEL);
+ return 0;
}
-static inline void free_secdata(void *secdata)
+static inline int security_bpf_prog(struct bpf_prog *prog)
{
- free_page((unsigned long)secdata);
+ return 0;
}
-#else
+static inline int security_bpf_map_alloc(struct bpf_map *map)
+{
+ return 0;
+}
+
+static inline void security_bpf_map_free(struct bpf_map *map)
+{ }
-static inline char *alloc_secdata(void)
+static inline int security_bpf_prog_alloc(struct bpf_prog_aux *aux)
{
- return (char *)1;
+ return 0;
}
-static inline void free_secdata(void *secdata)
+static inline void security_bpf_prog_free(struct bpf_prog_aux *aux)
{ }
#endif /* CONFIG_SECURITY */
+#endif /* CONFIG_BPF_SYSCALL */
-#endif /* ! __LINUX_SECURITY_H */
+#ifdef CONFIG_PERF_EVENTS
+struct perf_event_attr;
+struct perf_event;
+#ifdef CONFIG_SECURITY
+extern int security_perf_event_open(struct perf_event_attr *attr, int type);
+extern int security_perf_event_alloc(struct perf_event *event);
+extern void security_perf_event_free(struct perf_event *event);
+extern int security_perf_event_read(struct perf_event *event);
+extern int security_perf_event_write(struct perf_event *event);
+#else
+static inline int security_perf_event_open(struct perf_event_attr *attr,
+ int type)
+{
+ return 0;
+}
+
+static inline int security_perf_event_alloc(struct perf_event *event)
+{
+ return 0;
+}
+
+static inline void security_perf_event_free(struct perf_event *event)
+{
+}
+
+static inline int security_perf_event_read(struct perf_event *event)
+{
+ return 0;
+}
+
+static inline int security_perf_event_write(struct perf_event *event)
+{
+ return 0;
+}
+#endif /* CONFIG_SECURITY */
+#endif /* CONFIG_PERF_EVENTS */
+
+#ifdef CONFIG_IO_URING
+#ifdef CONFIG_SECURITY
+extern int security_uring_override_creds(const struct cred *new);
+extern int security_uring_sqpoll(void);
+extern int security_uring_cmd(struct io_uring_cmd *ioucmd);
+#else
+static inline int security_uring_override_creds(const struct cred *new)
+{
+ return 0;
+}
+static inline int security_uring_sqpoll(void)
+{
+ return 0;
+}
+static inline int security_uring_cmd(struct io_uring_cmd *ioucmd)
+{
+ return 0;
+}
+#endif /* CONFIG_SECURITY */
+#endif /* CONFIG_IO_URING */
+
+#endif /* ! __LINUX_SECURITY_H */
diff --git a/include/linux/sed-opal.h b/include/linux/sed-opal.h
index 04b124fca51e..bbae1e52ab4f 100644
--- a/include/linux/sed-opal.h
+++ b/include/linux/sed-opal.h
@@ -1,25 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright © 2016 Intel Corporation
*
* Authors:
* Rafael Antognolli <rafael.antognolli@intel.com>
* Scott Bauer <scott.bauer@intel.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
*/
#ifndef LINUX_OPAL_H
#define LINUX_OPAL_H
#include <uapi/linux/sed-opal.h>
-#include <linux/kernel.h>
+#include <linux/compiler_types.h>
+#include <linux/types.h>
struct opal_dev;
@@ -47,6 +40,13 @@ static inline bool is_sed_ioctl(unsigned int cmd)
case IOC_OPAL_ENABLE_DISABLE_MBR:
case IOC_OPAL_ERASE_LR:
case IOC_OPAL_SECURE_ERASE_LR:
+ case IOC_OPAL_PSID_REVERT_TPR:
+ case IOC_OPAL_MBR_DONE:
+ case IOC_OPAL_WRITE_SHADOW_MBR:
+ case IOC_OPAL_GENERIC_TABLE_RW:
+ case IOC_OPAL_GET_STATUS:
+ case IOC_OPAL_GET_LR_STATUS:
+ case IOC_OPAL_GET_GEOMETRY:
return true;
}
return false;
diff --git a/include/linux/seg6.h b/include/linux/seg6.h
index 7a66d2b4c5a6..369066a33ab9 100644
--- a/include/linux/seg6.h
+++ b/include/linux/seg6.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_SEG6_H
#define _LINUX_SEG6_H
diff --git a/include/linux/seg6_genl.h b/include/linux/seg6_genl.h
index d6c3fb4f3734..2f25a3cd7cb3 100644
--- a/include/linux/seg6_genl.h
+++ b/include/linux/seg6_genl.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_SEG6_GENL_H
#define _LINUX_SEG6_GENL_H
diff --git a/include/linux/seg6_hmac.h b/include/linux/seg6_hmac.h
index da437ebdc6cd..16e59595e870 100644
--- a/include/linux/seg6_hmac.h
+++ b/include/linux/seg6_hmac.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_SEG6_HMAC_H
#define _LINUX_SEG6_HMAC_H
diff --git a/include/linux/seg6_iptunnel.h b/include/linux/seg6_iptunnel.h
index 5377cf6a5a02..d07df7fc9dee 100644
--- a/include/linux/seg6_iptunnel.h
+++ b/include/linux/seg6_iptunnel.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_SEG6_IPTUNNEL_H
#define _LINUX_SEG6_IPTUNNEL_H
diff --git a/include/linux/seg6_local.h b/include/linux/seg6_local.h
new file mode 100644
index 000000000000..ee63e76fe0c7
--- /dev/null
+++ b/include/linux/seg6_local.h
@@ -0,0 +1,6 @@
+#ifndef _LINUX_SEG6_LOCAL_H
+#define _LINUX_SEG6_LOCAL_H
+
+#include <uapi/linux/seg6_local.h>
+
+#endif
diff --git a/include/linux/selection.h b/include/linux/selection.h
index 8e4624efdb6f..170ef28ff26b 100644
--- a/include/linux/selection.h
+++ b/include/linux/selection.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* selection.h
*
@@ -11,17 +12,20 @@
#include <linux/vt_buffer.h>
struct tty_struct;
-
-extern struct vc_data *sel_cons;
-struct tty_struct;
+struct vc_data;
extern void clear_selection(void);
-extern int set_selection(const struct tiocl_selection __user *sel, struct tty_struct *tty);
+extern int set_selection_user(const struct tiocl_selection __user *sel,
+ struct tty_struct *tty);
+extern int set_selection_kernel(struct tiocl_selection *v,
+ struct tty_struct *tty);
extern int paste_selection(struct tty_struct *tty);
extern int sel_loadlut(char __user *p);
extern int mouse_reporting(void);
extern void mouse_report(struct tty_struct * tty, int butt, int mrx, int mry);
+bool vc_is_sel(struct vc_data *vc);
+
extern int console_blanked;
extern const unsigned char color_table[];
@@ -29,16 +33,24 @@ extern unsigned char default_red[];
extern unsigned char default_grn[];
extern unsigned char default_blu[];
-extern unsigned short *screen_pos(struct vc_data *vc, int w_offset, int viewed);
-extern u16 screen_glyph(struct vc_data *vc, int offset);
+extern unsigned short *screen_pos(const struct vc_data *vc, int w_offset,
+ bool viewed);
+extern u16 screen_glyph(const struct vc_data *vc, int offset);
+extern u32 screen_glyph_unicode(const struct vc_data *vc, int offset);
extern void complement_pos(struct vc_data *vc, int offset);
-extern void invert_screen(struct vc_data *vc, int offset, int count, int shift);
+extern void invert_screen(struct vc_data *vc, int offset, int count, bool viewed);
-extern void getconsxy(struct vc_data *vc, unsigned char *p);
-extern void putconsxy(struct vc_data *vc, unsigned char *p);
+extern void getconsxy(const struct vc_data *vc, unsigned char xy[static 2]);
+extern void putconsxy(struct vc_data *vc, unsigned char xy[static const 2]);
-extern u16 vcs_scr_readw(struct vc_data *vc, const u16 *org);
+extern u16 vcs_scr_readw(const struct vc_data *vc, const u16 *org);
extern void vcs_scr_writew(struct vc_data *vc, u16 val, u16 *org);
extern void vcs_scr_updated(struct vc_data *vc);
+extern int vc_uniscr_check(struct vc_data *vc);
+extern void vc_uniscr_copy_line(const struct vc_data *vc, void *dest,
+ bool viewed,
+ unsigned int row, unsigned int col,
+ unsigned int nr);
+
#endif
diff --git a/include/linux/selinux.h b/include/linux/selinux.h
deleted file mode 100644
index 44f459612690..000000000000
--- a/include/linux/selinux.h
+++ /dev/null
@@ -1,35 +0,0 @@
-/*
- * SELinux services exported to the rest of the kernel.
- *
- * Author: James Morris <jmorris@redhat.com>
- *
- * Copyright (C) 2005 Red Hat, Inc., James Morris <jmorris@redhat.com>
- * Copyright (C) 2006 Trusted Computer Solutions, Inc. <dgoeddel@trustedcs.com>
- * Copyright (C) 2006 IBM Corporation, Timothy R. Chavez <tinytim@us.ibm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2,
- * as published by the Free Software Foundation.
- */
-#ifndef _LINUX_SELINUX_H
-#define _LINUX_SELINUX_H
-
-struct selinux_audit_rule;
-struct audit_context;
-struct kern_ipc_perm;
-
-#ifdef CONFIG_SECURITY_SELINUX
-
-/**
- * selinux_is_enabled - is SELinux enabled?
- */
-bool selinux_is_enabled(void);
-#else
-
-static inline bool selinux_is_enabled(void)
-{
- return false;
-}
-#endif /* CONFIG_SECURITY_SELINUX */
-
-#endif /* _LINUX_SELINUX_H */
diff --git a/include/linux/sem.h b/include/linux/sem.h
index be5cf2ea14ad..5608a500c43e 100644
--- a/include/linux/sem.h
+++ b/include/linux/sem.h
@@ -1,47 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_SEM_H
#define _LINUX_SEM_H
-#include <linux/atomic.h>
-#include <linux/rcupdate.h>
-#include <linux/cache.h>
#include <uapi/linux/sem.h>
struct task_struct;
-
-/* One semaphore structure for each semaphore in the system. */
-struct sem {
- int semval; /* current value */
- /*
- * PID of the process that last modified the semaphore. For
- * Linux, specifically these are:
- * - semop
- * - semctl, via SETVAL and SETALL.
- * - at task exit when performing undo adjustments (see exit_sem).
- */
- int sempid;
- spinlock_t lock; /* spinlock for fine-grained semtimedop */
- struct list_head pending_alter; /* pending single-sop operations */
- /* that alter the semaphore */
- struct list_head pending_const; /* pending single-sop operations */
- /* that do not alter the semaphore*/
- time_t sem_otime; /* candidate for sem_otime */
-} ____cacheline_aligned_in_smp;
-
-/* One sem_array data structure for each set of semaphores in the system. */
-struct sem_array {
- struct kern_ipc_perm sem_perm; /* permissions .. see ipc.h */
- time_t sem_ctime; /* create/last semctl() time */
- struct list_head pending_alter; /* pending operations */
- /* that alter the array */
- struct list_head pending_const; /* pending complex operations */
- /* that do not alter semvals */
- struct list_head list_id; /* undo requests on this array */
- int sem_nsems; /* no. of semaphores in array */
- int complex_count; /* pending complex operations */
- unsigned int use_global_lock;/* >0: global lock required */
-
- struct sem sems[];
-};
+struct sem_undo_list;
#ifdef CONFIG_SYSVIPC
diff --git a/include/linux/semaphore.h b/include/linux/semaphore.h
index dc368b8ce215..04655faadc2d 100644
--- a/include/linux/semaphore.h
+++ b/include/linux/semaphore.h
@@ -1,10 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2008 Intel Corporation
* Author: Matthew Wilcox <willy@linux.intel.com>
*
- * Distributed under the terms of the GNU GPL, version 2
- *
- * Please see kernel/semaphore.c for documentation of these functions
+ * Please see kernel/locking/semaphore.c for documentation of these functions
*/
#ifndef __LINUX_SEMAPHORE_H
#define __LINUX_SEMAPHORE_H
@@ -26,8 +25,14 @@ struct semaphore {
.wait_list = LIST_HEAD_INIT((name).wait_list), \
}
-#define DEFINE_SEMAPHORE(name) \
- struct semaphore name = __SEMAPHORE_INITIALIZER(name, 1)
+/*
+ * Unlike mutexes, binary semaphores do not have an owner, so up() can
+ * be called in a different thread from the one which called down().
+ * It is also safe to call down_trylock() and up() from interrupt
+ * context.
+ */
+#define DEFINE_SEMAPHORE(_name, _n) \
+ struct semaphore _name = __SEMAPHORE_INITIALIZER(_name, _n)
static inline void sema_init(struct semaphore *sem, int val)
{
diff --git a/include/linux/seq_buf.h b/include/linux/seq_buf.h
index fb7eb9ccb1cd..515d7fcb9634 100644
--- a/include/linux/seq_buf.h
+++ b/include/linux/seq_buf.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_SEQ_BUF_H
#define _LINUX_SEQ_BUF_H
@@ -29,7 +30,7 @@ static inline void seq_buf_clear(struct seq_buf *s)
}
static inline void
-seq_buf_init(struct seq_buf *s, unsigned char *buf, unsigned int size)
+seq_buf_init(struct seq_buf *s, char *buf, unsigned int size)
{
s->buffer = buf;
s->size = size;
@@ -71,6 +72,31 @@ static inline unsigned int seq_buf_used(struct seq_buf *s)
}
/**
+ * seq_buf_terminate - Make sure buffer is nul terminated
+ * @s: the seq_buf descriptor to terminate.
+ *
+ * This makes sure that the buffer in @s is nul terminated and
+ * safe to read as a string.
+ *
+ * Note, if this is called when the buffer has overflowed, then
+ * the last byte of the buffer is zeroed, and the len will still
+ * point passed it.
+ *
+ * After this function is called, s->buffer is safe to use
+ * in string operations.
+ */
+static inline void seq_buf_terminate(struct seq_buf *s)
+{
+ if (WARN_ON(s->size == 0))
+ return;
+
+ if (seq_buf_buffer_left(s))
+ s->buffer[s->len] = 0;
+ else
+ s->buffer[s->size - 1] = 0;
+}
+
+/**
* seq_buf_get_buf - get buffer to write arbitrary data to
* @s: the seq_buf handle
* @bufp: the beginning of the buffer is stored here
@@ -124,10 +150,15 @@ extern int seq_buf_putmem(struct seq_buf *s, const void *mem, unsigned int len);
extern int seq_buf_putmem_hex(struct seq_buf *s, const void *mem,
unsigned int len);
extern int seq_buf_path(struct seq_buf *s, const struct path *path, const char *esc);
+extern int seq_buf_hex_dump(struct seq_buf *s, const char *prefix_str,
+ int prefix_type, int rowsize, int groupsize,
+ const void *buf, size_t len, bool ascii);
#ifdef CONFIG_BINARY_PRINTF
extern int
seq_buf_bprintf(struct seq_buf *s, const char *fmt, const u32 *binary);
#endif
+void seq_buf_do_printk(struct seq_buf *s, const char *lvl);
+
#endif /* _LINUX_SEQ_BUF_H */
diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h
index e305b66a9fb9..bd023dd38ae6 100644
--- a/include/linux/seq_file.h
+++ b/include/linux/seq_file.h
@@ -1,8 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_SEQ_FILE_H
#define _LINUX_SEQ_FILE_H
#include <linux/types.h>
#include <linux/string.h>
+#include <linux/string_helpers.h>
#include <linux/bug.h>
#include <linux/mutex.h>
#include <linux/cpumask.h>
@@ -20,7 +22,6 @@ struct seq_file {
size_t pad_until;
loff_t index;
loff_t read_pos;
- u64 version;
struct mutex lock;
const struct seq_operations *op;
int poll_event;
@@ -107,6 +108,7 @@ void seq_pad(struct seq_file *m, char c);
char *mangle_path(char *s, const char *p, const char *esc);
int seq_open(struct file *, const struct seq_operations *);
ssize_t seq_read(struct file *, char __user *, size_t, loff_t *);
+ssize_t seq_read_iter(struct kiocb *iocb, struct iov_iter *iter);
loff_t seq_lseek(struct file *, loff_t, int);
int seq_release(struct inode *, struct file *);
int seq_write(struct seq_file *seq, const void *data, size_t len);
@@ -117,10 +119,38 @@ __printf(2, 3)
void seq_printf(struct seq_file *m, const char *fmt, ...);
void seq_putc(struct seq_file *m, char c);
void seq_puts(struct seq_file *m, const char *s);
+void seq_put_decimal_ull_width(struct seq_file *m, const char *delimiter,
+ unsigned long long num, unsigned int width);
void seq_put_decimal_ull(struct seq_file *m, const char *delimiter,
unsigned long long num);
void seq_put_decimal_ll(struct seq_file *m, const char *delimiter, long long num);
-void seq_escape(struct seq_file *m, const char *s, const char *esc);
+void seq_put_hex_ll(struct seq_file *m, const char *delimiter,
+ unsigned long long v, unsigned int width);
+
+void seq_escape_mem(struct seq_file *m, const char *src, size_t len,
+ unsigned int flags, const char *esc);
+
+static inline void seq_escape_str(struct seq_file *m, const char *src,
+ unsigned int flags, const char *esc)
+{
+ seq_escape_mem(m, src, strlen(src), flags, esc);
+}
+
+/**
+ * seq_escape - print string into buffer, escaping some characters
+ * @m: target buffer
+ * @s: NULL-terminated string
+ * @esc: set of characters that need escaping
+ *
+ * Puts string into buffer, replacing each occurrence of character from
+ * @esc with usual octal escape.
+ *
+ * Use seq_has_overflowed() to check for errors.
+ */
+static inline void seq_escape(struct seq_file *m, const char *s, const char *esc)
+{
+ seq_escape_str(m, s, ESCAPE_OCTAL, esc);
+}
void seq_hex_dump(struct seq_file *m, const char *prefix_str, int prefix_type,
int rowsize, int groupsize, const void *buf, size_t len,
@@ -132,6 +162,7 @@ int seq_dentry(struct seq_file *, struct dentry *, const char *);
int seq_path_root(struct seq_file *m, const struct path *path,
const struct path *root, const char *esc);
+void *single_start(struct seq_file *, loff_t *);
int single_open(struct file *, int (*)(struct seq_file *, void *), void *);
int single_open_size(struct file *, int (*)(struct seq_file *, void *), void *, size_t);
int single_release(struct inode *, struct file *);
@@ -139,6 +170,56 @@ void *__seq_open_private(struct file *, const struct seq_operations *, int);
int seq_open_private(struct file *, const struct seq_operations *, int);
int seq_release_private(struct inode *, struct file *);
+#ifdef CONFIG_BINARY_PRINTF
+void seq_bprintf(struct seq_file *m, const char *f, const u32 *binary);
+#endif
+
+#define DEFINE_SEQ_ATTRIBUTE(__name) \
+static int __name ## _open(struct inode *inode, struct file *file) \
+{ \
+ int ret = seq_open(file, &__name ## _sops); \
+ if (!ret && inode->i_private) { \
+ struct seq_file *seq_f = file->private_data; \
+ seq_f->private = inode->i_private; \
+ } \
+ return ret; \
+} \
+ \
+static const struct file_operations __name ## _fops = { \
+ .owner = THIS_MODULE, \
+ .open = __name ## _open, \
+ .read = seq_read, \
+ .llseek = seq_lseek, \
+ .release = seq_release, \
+}
+
+#define DEFINE_SHOW_ATTRIBUTE(__name) \
+static int __name ## _open(struct inode *inode, struct file *file) \
+{ \
+ return single_open(file, __name ## _show, inode->i_private); \
+} \
+ \
+static const struct file_operations __name ## _fops = { \
+ .owner = THIS_MODULE, \
+ .open = __name ## _open, \
+ .read = seq_read, \
+ .llseek = seq_lseek, \
+ .release = single_release, \
+}
+
+#define DEFINE_PROC_SHOW_ATTRIBUTE(__name) \
+static int __name ## _open(struct inode *inode, struct file *file) \
+{ \
+ return single_open(file, __name ## _show, pde_data(inode)); \
+} \
+ \
+static const struct proc_ops __name ## _proc_ops = { \
+ .proc_open = __name ## _open, \
+ .proc_read = seq_read, \
+ .proc_lseek = seq_lseek, \
+ .proc_release = single_release, \
+}
+
static inline struct user_namespace *seq_user_ns(struct seq_file *seq)
{
#ifdef CONFIG_USER_NS
@@ -196,6 +277,10 @@ extern struct list_head *seq_list_start_head(struct list_head *head,
extern struct list_head *seq_list_next(void *v, struct list_head *head,
loff_t *ppos);
+extern struct list_head *seq_list_start_rcu(struct list_head *head, loff_t pos);
+extern struct list_head *seq_list_start_head_rcu(struct list_head *head, loff_t pos);
+extern struct list_head *seq_list_next_rcu(void *v, struct list_head *head, loff_t *ppos);
+
/*
* Helpers for iteration over hlist_head-s in seq_files
*/
@@ -220,4 +305,5 @@ extern struct hlist_node *seq_hlist_start_percpu(struct hlist_head __percpu *hea
extern struct hlist_node *seq_hlist_next_percpu(void *v, struct hlist_head __percpu *head, int *cpu, loff_t *pos);
+void seq_file_init(void);
#endif
diff --git a/include/linux/seq_file_net.h b/include/linux/seq_file_net.h
index 32c89bbe24a2..79638395bc32 100644
--- a/include/linux/seq_file_net.h
+++ b/include/linux/seq_file_net.h
@@ -1,23 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __SEQ_FILE_NET_H__
#define __SEQ_FILE_NET_H__
#include <linux/seq_file.h>
+#include <net/net_trackers.h>
struct net;
extern struct net init_net;
struct seq_net_private {
#ifdef CONFIG_NET_NS
- struct net *net;
+ struct net *net;
+ netns_tracker ns_tracker;
#endif
};
-int seq_open_net(struct inode *, struct file *,
- const struct seq_operations *, int);
-int single_open_net(struct inode *, struct file *file,
- int (*show)(struct seq_file *, void *));
-int seq_release_net(struct inode *, struct file *);
-int single_release_net(struct inode *, struct file *);
static inline struct net *seq_file_net(struct seq_file *seq)
{
#ifdef CONFIG_NET_NS
@@ -27,4 +24,17 @@ static inline struct net *seq_file_net(struct seq_file *seq)
#endif
}
+/*
+ * This one is needed for proc_create_net_single since net is stored directly
+ * in private not as a struct i.e. seq_file_net can't be used.
+ */
+static inline struct net *seq_file_single_net(struct seq_file *seq)
+{
+#ifdef CONFIG_NET_NS
+ return (struct net *)seq->private;
+#else
+ return &init_net;
+#endif
+}
+
#endif
diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h
index ead97654c4e9..3926e9027947 100644
--- a/include/linux/seqlock.h
+++ b/include/linux/seqlock.h
@@ -1,48 +1,65 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __LINUX_SEQLOCK_H
#define __LINUX_SEQLOCK_H
+
/*
- * Reader/writer consistent mechanism without starving writers. This type of
- * lock for data where the reader wants a consistent set of information
- * and is willing to retry if the information changes. There are two types
- * of readers:
- * 1. Sequence readers which never block a writer but they may have to retry
- * if a writer is in progress by detecting change in sequence number.
- * Writers do not wait for a sequence reader.
- * 2. Locking readers which will wait if a writer or another locking reader
- * is in progress. A locking reader in progress will also block a writer
- * from going forward. Unlike the regular rwlock, the read lock here is
- * exclusive so that only one locking reader can get it.
- *
- * This is not as cache friendly as brlock. Also, this may not work well
- * for data that contains pointers, because any writer could
- * invalidate a pointer that a reader was following.
- *
- * Expected non-blocking reader usage:
- * do {
- * seq = read_seqbegin(&foo);
- * ...
- * } while (read_seqretry(&foo, seq));
+ * seqcount_t / seqlock_t - a reader-writer consistency mechanism with
+ * lockless readers (read-only retry loops), and no writer starvation.
*
+ * See Documentation/locking/seqlock.rst
*
- * On non-SMP the spin locks disappear but the writer still needs
- * to increment the sequence variables because an interrupt routine could
- * change the state of the data.
- *
- * Based on x86_64 vsyscall gettimeofday
- * by Keith Owens and Andrea Arcangeli
+ * Copyrights:
+ * - Based on x86_64 vsyscall gettimeofday: Keith Owens, Andrea Arcangeli
+ * - Sequence counters with associated locks, (C) 2020 Linutronix GmbH
*/
-#include <linux/spinlock.h>
-#include <linux/preempt.h>
-#include <linux/lockdep.h>
#include <linux/compiler.h>
+#include <linux/kcsan-checks.h>
+#include <linux/lockdep.h>
+#include <linux/mutex.h>
+#include <linux/preempt.h>
+#include <linux/spinlock.h>
+
#include <asm/processor.h>
/*
- * Version using sequence counter only.
- * This can be used when code has its own mutex protecting the
- * updating starting before the write_seqcountbeqin() and ending
- * after the write_seqcount_end().
+ * The seqlock seqcount_t interface does not prescribe a precise sequence of
+ * read begin/retry/end. For readers, typically there is a call to
+ * read_seqcount_begin() and read_seqcount_retry(), however, there are more
+ * esoteric cases which do not follow this pattern.
+ *
+ * As a consequence, we take the following best-effort approach for raw usage
+ * via seqcount_t under KCSAN: upon beginning a seq-reader critical section,
+ * pessimistically mark the next KCSAN_SEQLOCK_REGION_MAX memory accesses as
+ * atomics; if there is a matching read_seqcount_retry() call, no following
+ * memory operations are considered atomic. Usage of the seqlock_t interface
+ * is not affected.
+ */
+#define KCSAN_SEQLOCK_REGION_MAX 1000
+
+/*
+ * Sequence counters (seqcount_t)
+ *
+ * This is the raw counting mechanism, without any writer protection.
+ *
+ * Write side critical sections must be serialized and non-preemptible.
+ *
+ * If readers can be invoked from hardirq or softirq contexts,
+ * interrupts or bottom halves must also be respectively disabled before
+ * entering the write section.
+ *
+ * This mechanism can't be used if the protected data contains pointers,
+ * as the writer can invalidate a pointer that a reader is following.
+ *
+ * If the write serialization mechanism is one of the common kernel
+ * locking primitives, use a sequence counter with associated lock
+ * (seqcount_LOCKNAME_t) instead.
+ *
+ * If it's desired to automatically handle the sequence counter writer
+ * serialization and non-preemptibility requirements, use a sequential
+ * lock (seqlock_t) instead.
+ *
+ * See Documentation/locking/seqlock.rst
*/
typedef struct seqcount {
unsigned sequence;
@@ -62,13 +79,18 @@ static inline void __seqcount_init(seqcount_t *s, const char *name,
}
#ifdef CONFIG_DEBUG_LOCK_ALLOC
-# define SEQCOUNT_DEP_MAP_INIT(lockname) \
- .dep_map = { .name = #lockname } \
-# define seqcount_init(s) \
- do { \
- static struct lock_class_key __key; \
- __seqcount_init((s), #s, &__key); \
+# define SEQCOUNT_DEP_MAP_INIT(lockname) \
+ .dep_map = { .name = #lockname }
+
+/**
+ * seqcount_init() - runtime initializer for seqcount_t
+ * @s: Pointer to the seqcount_t instance
+ */
+# define seqcount_init(s) \
+ do { \
+ static struct lock_class_key __key; \
+ __seqcount_init((s), #s, &__key); \
} while (0)
static inline void seqcount_lockdep_reader_access(const seqcount_t *s)
@@ -78,7 +100,7 @@ static inline void seqcount_lockdep_reader_access(const seqcount_t *s)
local_irq_save(flags);
seqcount_acquire_read(&l->dep_map, 0, 0, _RET_IP_);
- seqcount_release(&l->dep_map, 1, _RET_IP_);
+ seqcount_release(&l->dep_map, _RET_IP_);
local_irq_restore(flags);
}
@@ -88,13 +110,207 @@ static inline void seqcount_lockdep_reader_access(const seqcount_t *s)
# define seqcount_lockdep_reader_access(x)
#endif
-#define SEQCNT_ZERO(lockname) { .sequence = 0, SEQCOUNT_DEP_MAP_INIT(lockname)}
+/**
+ * SEQCNT_ZERO() - static initializer for seqcount_t
+ * @name: Name of the seqcount_t instance
+ */
+#define SEQCNT_ZERO(name) { .sequence = 0, SEQCOUNT_DEP_MAP_INIT(name) }
+
+/*
+ * Sequence counters with associated locks (seqcount_LOCKNAME_t)
+ *
+ * A sequence counter which associates the lock used for writer
+ * serialization at initialization time. This enables lockdep to validate
+ * that the write side critical section is properly serialized.
+ *
+ * For associated locks which do not implicitly disable preemption,
+ * preemption protection is enforced in the write side function.
+ *
+ * Lockdep is never used in any for the raw write variants.
+ *
+ * See Documentation/locking/seqlock.rst
+ */
+
+/*
+ * For PREEMPT_RT, seqcount_LOCKNAME_t write side critical sections cannot
+ * disable preemption. It can lead to higher latencies, and the write side
+ * sections will not be able to acquire locks which become sleeping locks
+ * (e.g. spinlock_t).
+ *
+ * To remain preemptible while avoiding a possible livelock caused by the
+ * reader preempting the writer, use a different technique: let the reader
+ * detect if a seqcount_LOCKNAME_t writer is in progress. If that is the
+ * case, acquire then release the associated LOCKNAME writer serialization
+ * lock. This will allow any possibly-preempted writer to make progress
+ * until the end of its writer serialization lock critical section.
+ *
+ * This lock-unlock technique must be implemented for all of PREEMPT_RT
+ * sleeping locks. See Documentation/locking/locktypes.rst
+ */
+#if defined(CONFIG_LOCKDEP) || defined(CONFIG_PREEMPT_RT)
+#define __SEQ_LOCK(expr) expr
+#else
+#define __SEQ_LOCK(expr)
+#endif
+
+/*
+ * typedef seqcount_LOCKNAME_t - sequence counter with LOCKNAME associated
+ * @seqcount: The real sequence counter
+ * @lock: Pointer to the associated lock
+ *
+ * A plain sequence counter with external writer synchronization by
+ * LOCKNAME @lock. The lock is associated to the sequence counter in the
+ * static initializer or init function. This enables lockdep to validate
+ * that the write side critical section is properly serialized.
+ *
+ * LOCKNAME: raw_spinlock, spinlock, rwlock or mutex
+ */
+
+/*
+ * seqcount_LOCKNAME_init() - runtime initializer for seqcount_LOCKNAME_t
+ * @s: Pointer to the seqcount_LOCKNAME_t instance
+ * @lock: Pointer to the associated lock
+ */
+
+#define seqcount_LOCKNAME_init(s, _lock, lockname) \
+ do { \
+ seqcount_##lockname##_t *____s = (s); \
+ seqcount_init(&____s->seqcount); \
+ __SEQ_LOCK(____s->lock = (_lock)); \
+ } while (0)
+
+#define seqcount_raw_spinlock_init(s, lock) seqcount_LOCKNAME_init(s, lock, raw_spinlock)
+#define seqcount_spinlock_init(s, lock) seqcount_LOCKNAME_init(s, lock, spinlock)
+#define seqcount_rwlock_init(s, lock) seqcount_LOCKNAME_init(s, lock, rwlock)
+#define seqcount_mutex_init(s, lock) seqcount_LOCKNAME_init(s, lock, mutex)
+
+/*
+ * SEQCOUNT_LOCKNAME() - Instantiate seqcount_LOCKNAME_t and helpers
+ * seqprop_LOCKNAME_*() - Property accessors for seqcount_LOCKNAME_t
+ *
+ * @lockname: "LOCKNAME" part of seqcount_LOCKNAME_t
+ * @locktype: LOCKNAME canonical C data type
+ * @preemptible: preemptibility of above locktype
+ * @lockmember: argument for lockdep_assert_held()
+ * @lockbase: associated lock release function (prefix only)
+ * @lock_acquire: associated lock acquisition function (full call)
+ */
+#define SEQCOUNT_LOCKNAME(lockname, locktype, preemptible, lockmember, lockbase, lock_acquire) \
+typedef struct seqcount_##lockname { \
+ seqcount_t seqcount; \
+ __SEQ_LOCK(locktype *lock); \
+} seqcount_##lockname##_t; \
+ \
+static __always_inline seqcount_t * \
+__seqprop_##lockname##_ptr(seqcount_##lockname##_t *s) \
+{ \
+ return &s->seqcount; \
+} \
+ \
+static __always_inline unsigned \
+__seqprop_##lockname##_sequence(const seqcount_##lockname##_t *s) \
+{ \
+ unsigned seq = READ_ONCE(s->seqcount.sequence); \
+ \
+ if (!IS_ENABLED(CONFIG_PREEMPT_RT)) \
+ return seq; \
+ \
+ if (preemptible && unlikely(seq & 1)) { \
+ __SEQ_LOCK(lock_acquire); \
+ __SEQ_LOCK(lockbase##_unlock(s->lock)); \
+ \
+ /* \
+ * Re-read the sequence counter since the (possibly \
+ * preempted) writer made progress. \
+ */ \
+ seq = READ_ONCE(s->seqcount.sequence); \
+ } \
+ \
+ return seq; \
+} \
+ \
+static __always_inline bool \
+__seqprop_##lockname##_preemptible(const seqcount_##lockname##_t *s) \
+{ \
+ if (!IS_ENABLED(CONFIG_PREEMPT_RT)) \
+ return preemptible; \
+ \
+ /* PREEMPT_RT relies on the above LOCK+UNLOCK */ \
+ return false; \
+} \
+ \
+static __always_inline void \
+__seqprop_##lockname##_assert(const seqcount_##lockname##_t *s) \
+{ \
+ __SEQ_LOCK(lockdep_assert_held(lockmember)); \
+}
+
+/*
+ * __seqprop() for seqcount_t
+ */
+
+static inline seqcount_t *__seqprop_ptr(seqcount_t *s)
+{
+ return s;
+}
+
+static inline unsigned __seqprop_sequence(const seqcount_t *s)
+{
+ return READ_ONCE(s->sequence);
+}
+
+static inline bool __seqprop_preemptible(const seqcount_t *s)
+{
+ return false;
+}
+
+static inline void __seqprop_assert(const seqcount_t *s)
+{
+ lockdep_assert_preemption_disabled();
+}
+
+#define __SEQ_RT IS_ENABLED(CONFIG_PREEMPT_RT)
+
+SEQCOUNT_LOCKNAME(raw_spinlock, raw_spinlock_t, false, s->lock, raw_spin, raw_spin_lock(s->lock))
+SEQCOUNT_LOCKNAME(spinlock, spinlock_t, __SEQ_RT, s->lock, spin, spin_lock(s->lock))
+SEQCOUNT_LOCKNAME(rwlock, rwlock_t, __SEQ_RT, s->lock, read, read_lock(s->lock))
+SEQCOUNT_LOCKNAME(mutex, struct mutex, true, s->lock, mutex, mutex_lock(s->lock))
+
+/*
+ * SEQCNT_LOCKNAME_ZERO - static initializer for seqcount_LOCKNAME_t
+ * @name: Name of the seqcount_LOCKNAME_t instance
+ * @lock: Pointer to the associated LOCKNAME
+ */
+
+#define SEQCOUNT_LOCKNAME_ZERO(seq_name, assoc_lock) { \
+ .seqcount = SEQCNT_ZERO(seq_name.seqcount), \
+ __SEQ_LOCK(.lock = (assoc_lock)) \
+}
+
+#define SEQCNT_RAW_SPINLOCK_ZERO(name, lock) SEQCOUNT_LOCKNAME_ZERO(name, lock)
+#define SEQCNT_SPINLOCK_ZERO(name, lock) SEQCOUNT_LOCKNAME_ZERO(name, lock)
+#define SEQCNT_RWLOCK_ZERO(name, lock) SEQCOUNT_LOCKNAME_ZERO(name, lock)
+#define SEQCNT_MUTEX_ZERO(name, lock) SEQCOUNT_LOCKNAME_ZERO(name, lock)
+#define SEQCNT_WW_MUTEX_ZERO(name, lock) SEQCOUNT_LOCKNAME_ZERO(name, lock)
+
+#define __seqprop_case(s, lockname, prop) \
+ seqcount_##lockname##_t: __seqprop_##lockname##_##prop((void *)(s))
+
+#define __seqprop(s, prop) _Generic(*(s), \
+ seqcount_t: __seqprop_##prop((void *)(s)), \
+ __seqprop_case((s), raw_spinlock, prop), \
+ __seqprop_case((s), spinlock, prop), \
+ __seqprop_case((s), rwlock, prop), \
+ __seqprop_case((s), mutex, prop))
+#define seqprop_ptr(s) __seqprop(s, ptr)
+#define seqprop_sequence(s) __seqprop(s, sequence)
+#define seqprop_preemptible(s) __seqprop(s, preemptible)
+#define seqprop_assert(s) __seqprop(s, assert)
/**
- * __read_seqcount_begin - begin a seq-read critical section (without barrier)
- * @s: pointer to seqcount_t
- * Returns: count to be passed to read_seqcount_retry
+ * __read_seqcount_begin() - begin a seqcount_t read section w/o barrier
+ * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
*
* __read_seqcount_begin is like read_seqcount_begin, but has no smp_rmb()
* barrier. Callers should ensure that smp_rmb() or equivalent ordering is
@@ -103,93 +319,96 @@ static inline void seqcount_lockdep_reader_access(const seqcount_t *s)
*
* Use carefully, only in critical code, and comment how the barrier is
* provided.
+ *
+ * Return: count to be passed to read_seqcount_retry()
*/
-static inline unsigned __read_seqcount_begin(const seqcount_t *s)
-{
- unsigned ret;
-
-repeat:
- ret = READ_ONCE(s->sequence);
- if (unlikely(ret & 1)) {
- cpu_relax();
- goto repeat;
- }
- return ret;
-}
+#define __read_seqcount_begin(s) \
+({ \
+ unsigned __seq; \
+ \
+ while ((__seq = seqprop_sequence(s)) & 1) \
+ cpu_relax(); \
+ \
+ kcsan_atomic_next(KCSAN_SEQLOCK_REGION_MAX); \
+ __seq; \
+})
/**
- * raw_read_seqcount - Read the raw seqcount
- * @s: pointer to seqcount_t
- * Returns: count to be passed to read_seqcount_retry
+ * raw_read_seqcount_begin() - begin a seqcount_t read section w/o lockdep
+ * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
*
- * raw_read_seqcount opens a read critical section of the given
- * seqcount without any lockdep checking and without checking or
- * masking the LSB. Calling code is responsible for handling that.
+ * Return: count to be passed to read_seqcount_retry()
*/
-static inline unsigned raw_read_seqcount(const seqcount_t *s)
-{
- unsigned ret = READ_ONCE(s->sequence);
- smp_rmb();
- return ret;
-}
+#define raw_read_seqcount_begin(s) \
+({ \
+ unsigned _seq = __read_seqcount_begin(s); \
+ \
+ smp_rmb(); \
+ _seq; \
+})
/**
- * raw_read_seqcount_begin - start seq-read critical section w/o lockdep
- * @s: pointer to seqcount_t
- * Returns: count to be passed to read_seqcount_retry
+ * read_seqcount_begin() - begin a seqcount_t read critical section
+ * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
*
- * raw_read_seqcount_begin opens a read critical section of the given
- * seqcount, but without any lockdep checking. Validity of the critical
- * section is tested by checking read_seqcount_retry function.
+ * Return: count to be passed to read_seqcount_retry()
*/
-static inline unsigned raw_read_seqcount_begin(const seqcount_t *s)
-{
- unsigned ret = __read_seqcount_begin(s);
- smp_rmb();
- return ret;
-}
+#define read_seqcount_begin(s) \
+({ \
+ seqcount_lockdep_reader_access(seqprop_ptr(s)); \
+ raw_read_seqcount_begin(s); \
+})
/**
- * read_seqcount_begin - begin a seq-read critical section
- * @s: pointer to seqcount_t
- * Returns: count to be passed to read_seqcount_retry
+ * raw_read_seqcount() - read the raw seqcount_t counter value
+ * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
+ *
+ * raw_read_seqcount opens a read critical section of the given
+ * seqcount_t, without any lockdep checking, and without checking or
+ * masking the sequence counter LSB. Calling code is responsible for
+ * handling that.
*
- * read_seqcount_begin opens a read critical section of the given seqcount.
- * Validity of the critical section is tested by checking read_seqcount_retry
- * function.
+ * Return: count to be passed to read_seqcount_retry()
*/
-static inline unsigned read_seqcount_begin(const seqcount_t *s)
-{
- seqcount_lockdep_reader_access(s);
- return raw_read_seqcount_begin(s);
-}
+#define raw_read_seqcount(s) \
+({ \
+ unsigned __seq = seqprop_sequence(s); \
+ \
+ smp_rmb(); \
+ kcsan_atomic_next(KCSAN_SEQLOCK_REGION_MAX); \
+ __seq; \
+})
/**
- * raw_seqcount_begin - begin a seq-read critical section
- * @s: pointer to seqcount_t
- * Returns: count to be passed to read_seqcount_retry
+ * raw_seqcount_begin() - begin a seqcount_t read critical section w/o
+ * lockdep and w/o counter stabilization
+ * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
*
- * raw_seqcount_begin opens a read critical section of the given seqcount.
- * Validity of the critical section is tested by checking read_seqcount_retry
- * function.
+ * raw_seqcount_begin opens a read critical section of the given
+ * seqcount_t. Unlike read_seqcount_begin(), this function will not wait
+ * for the count to stabilize. If a writer is active when it begins, it
+ * will fail the read_seqcount_retry() at the end of the read critical
+ * section instead of stabilizing at the beginning of it.
*
- * Unlike read_seqcount_begin(), this function will not wait for the count
- * to stabilize. If a writer is active when we begin, we will fail the
- * read_seqcount_retry() instead of stabilizing at the beginning of the
- * critical section.
+ * Use this only in special kernel hot paths where the read section is
+ * small and has a high probability of success through other external
+ * means. It will save a single branching instruction.
+ *
+ * Return: count to be passed to read_seqcount_retry()
*/
-static inline unsigned raw_seqcount_begin(const seqcount_t *s)
-{
- unsigned ret = READ_ONCE(s->sequence);
- smp_rmb();
- return ret & ~1;
-}
+#define raw_seqcount_begin(s) \
+({ \
+ /* \
+ * If the counter is odd, let read_seqcount_retry() fail \
+ * by decrementing the counter. \
+ */ \
+ raw_read_seqcount(s) & ~1; \
+})
/**
- * __read_seqcount_retry - end a seq-read critical section (without barrier)
- * @s: pointer to seqcount_t
- * @start: count, from read_seqcount_begin
- * Returns: 1 if retry is required, else 0
+ * __read_seqcount_retry() - end a seqcount_t read section w/o barrier
+ * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
+ * @start: count, from read_seqcount_begin()
*
* __read_seqcount_retry is like read_seqcount_retry, but has no smp_rmb()
* barrier. Callers should ensure that smp_rmb() or equivalent ordering is
@@ -198,94 +417,287 @@ static inline unsigned raw_seqcount_begin(const seqcount_t *s)
*
* Use carefully, only in critical code, and comment how the barrier is
* provided.
+ *
+ * Return: true if a read section retry is required, else false
*/
-static inline int __read_seqcount_retry(const seqcount_t *s, unsigned start)
+#define __read_seqcount_retry(s, start) \
+ do___read_seqcount_retry(seqprop_ptr(s), start)
+
+static inline int do___read_seqcount_retry(const seqcount_t *s, unsigned start)
{
- return unlikely(s->sequence != start);
+ kcsan_atomic_next(0);
+ return unlikely(READ_ONCE(s->sequence) != start);
}
/**
- * read_seqcount_retry - end a seq-read critical section
- * @s: pointer to seqcount_t
- * @start: count, from read_seqcount_begin
- * Returns: 1 if retry is required, else 0
+ * read_seqcount_retry() - end a seqcount_t read critical section
+ * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
+ * @start: count, from read_seqcount_begin()
*
- * read_seqcount_retry closes a read critical section of the given seqcount.
- * If the critical section was invalid, it must be ignored (and typically
- * retried).
+ * read_seqcount_retry closes the read critical section of given
+ * seqcount_t. If the critical section was invalid, it must be ignored
+ * (and typically retried).
+ *
+ * Return: true if a read section retry is required, else false
*/
-static inline int read_seqcount_retry(const seqcount_t *s, unsigned start)
+#define read_seqcount_retry(s, start) \
+ do_read_seqcount_retry(seqprop_ptr(s), start)
+
+static inline int do_read_seqcount_retry(const seqcount_t *s, unsigned start)
{
smp_rmb();
- return __read_seqcount_retry(s, start);
+ return do___read_seqcount_retry(s, start);
}
+/**
+ * raw_write_seqcount_begin() - start a seqcount_t write section w/o lockdep
+ * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
+ *
+ * Context: check write_seqcount_begin()
+ */
+#define raw_write_seqcount_begin(s) \
+do { \
+ if (seqprop_preemptible(s)) \
+ preempt_disable(); \
+ \
+ do_raw_write_seqcount_begin(seqprop_ptr(s)); \
+} while (0)
-
-static inline void raw_write_seqcount_begin(seqcount_t *s)
+static inline void do_raw_write_seqcount_begin(seqcount_t *s)
{
+ kcsan_nestable_atomic_begin();
s->sequence++;
smp_wmb();
}
-static inline void raw_write_seqcount_end(seqcount_t *s)
+/**
+ * raw_write_seqcount_end() - end a seqcount_t write section w/o lockdep
+ * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
+ *
+ * Context: check write_seqcount_end()
+ */
+#define raw_write_seqcount_end(s) \
+do { \
+ do_raw_write_seqcount_end(seqprop_ptr(s)); \
+ \
+ if (seqprop_preemptible(s)) \
+ preempt_enable(); \
+} while (0)
+
+static inline void do_raw_write_seqcount_end(seqcount_t *s)
{
smp_wmb();
s->sequence++;
+ kcsan_nestable_atomic_end();
+}
+
+/**
+ * write_seqcount_begin_nested() - start a seqcount_t write section with
+ * custom lockdep nesting level
+ * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
+ * @subclass: lockdep nesting level
+ *
+ * See Documentation/locking/lockdep-design.rst
+ * Context: check write_seqcount_begin()
+ */
+#define write_seqcount_begin_nested(s, subclass) \
+do { \
+ seqprop_assert(s); \
+ \
+ if (seqprop_preemptible(s)) \
+ preempt_disable(); \
+ \
+ do_write_seqcount_begin_nested(seqprop_ptr(s), subclass); \
+} while (0)
+
+static inline void do_write_seqcount_begin_nested(seqcount_t *s, int subclass)
+{
+ do_raw_write_seqcount_begin(s);
+ seqcount_acquire(&s->dep_map, subclass, 0, _RET_IP_);
}
/**
- * raw_write_seqcount_barrier - do a seq write barrier
- * @s: pointer to seqcount_t
+ * write_seqcount_begin() - start a seqcount_t write side critical section
+ * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
*
- * This can be used to provide an ordering guarantee instead of the
- * usual consistency guarantee. It is one wmb cheaper, because we can
- * collapse the two back-to-back wmb()s.
+ * Context: sequence counter write side sections must be serialized and
+ * non-preemptible. Preemption will be automatically disabled if and
+ * only if the seqcount write serialization lock is associated, and
+ * preemptible. If readers can be invoked from hardirq or softirq
+ * context, interrupts or bottom halves must be respectively disabled.
+ */
+#define write_seqcount_begin(s) \
+do { \
+ seqprop_assert(s); \
+ \
+ if (seqprop_preemptible(s)) \
+ preempt_disable(); \
+ \
+ do_write_seqcount_begin(seqprop_ptr(s)); \
+} while (0)
+
+static inline void do_write_seqcount_begin(seqcount_t *s)
+{
+ do_write_seqcount_begin_nested(s, 0);
+}
+
+/**
+ * write_seqcount_end() - end a seqcount_t write side critical section
+ * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
*
- * seqcount_t seq;
- * bool X = true, Y = false;
+ * Context: Preemption will be automatically re-enabled if and only if
+ * the seqcount write serialization lock is associated, and preemptible.
+ */
+#define write_seqcount_end(s) \
+do { \
+ do_write_seqcount_end(seqprop_ptr(s)); \
+ \
+ if (seqprop_preemptible(s)) \
+ preempt_enable(); \
+} while (0)
+
+static inline void do_write_seqcount_end(seqcount_t *s)
+{
+ seqcount_release(&s->dep_map, _RET_IP_);
+ do_raw_write_seqcount_end(s);
+}
+
+/**
+ * raw_write_seqcount_barrier() - do a seqcount_t write barrier
+ * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
*
- * void read(void)
- * {
- * bool x, y;
+ * This can be used to provide an ordering guarantee instead of the usual
+ * consistency guarantee. It is one wmb cheaper, because it can collapse
+ * the two back-to-back wmb()s.
+ *
+ * Note that writes surrounding the barrier should be declared atomic (e.g.
+ * via WRITE_ONCE): a) to ensure the writes become visible to other threads
+ * atomically, avoiding compiler optimizations; b) to document which writes are
+ * meant to propagate to the reader critical section. This is necessary because
+ * neither writes before and after the barrier are enclosed in a seq-writer
+ * critical section that would ensure readers are aware of ongoing writes::
+ *
+ * seqcount_t seq;
+ * bool X = true, Y = false;
*
- * do {
- * int s = read_seqcount_begin(&seq);
+ * void read(void)
+ * {
+ * bool x, y;
*
- * x = X; y = Y;
+ * do {
+ * int s = read_seqcount_begin(&seq);
*
- * } while (read_seqcount_retry(&seq, s));
+ * x = X; y = Y;
*
- * BUG_ON(!x && !y);
+ * } while (read_seqcount_retry(&seq, s));
+ *
+ * BUG_ON(!x && !y);
* }
*
* void write(void)
* {
- * Y = true;
+ * WRITE_ONCE(Y, true);
*
- * raw_write_seqcount_barrier(seq);
+ * raw_write_seqcount_barrier(seq);
*
- * X = false;
+ * WRITE_ONCE(X, false);
* }
*/
-static inline void raw_write_seqcount_barrier(seqcount_t *s)
+#define raw_write_seqcount_barrier(s) \
+ do_raw_write_seqcount_barrier(seqprop_ptr(s))
+
+static inline void do_raw_write_seqcount_barrier(seqcount_t *s)
{
+ kcsan_nestable_atomic_begin();
s->sequence++;
smp_wmb();
s->sequence++;
+ kcsan_nestable_atomic_end();
}
-static inline int raw_read_seqcount_latch(seqcount_t *s)
+/**
+ * write_seqcount_invalidate() - invalidate in-progress seqcount_t read
+ * side operations
+ * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
+ *
+ * After write_seqcount_invalidate, no seqcount_t read side operations
+ * will complete successfully and see data older than this.
+ */
+#define write_seqcount_invalidate(s) \
+ do_write_seqcount_invalidate(seqprop_ptr(s))
+
+static inline void do_write_seqcount_invalidate(seqcount_t *s)
{
- int seq = READ_ONCE(s->sequence);
- /* Pairs with the first smp_wmb() in raw_write_seqcount_latch() */
- smp_read_barrier_depends();
- return seq;
+ smp_wmb();
+ kcsan_nestable_atomic_begin();
+ s->sequence+=2;
+ kcsan_nestable_atomic_end();
+}
+
+/*
+ * Latch sequence counters (seqcount_latch_t)
+ *
+ * A sequence counter variant where the counter even/odd value is used to
+ * switch between two copies of protected data. This allows the read path,
+ * typically NMIs, to safely interrupt the write side critical section.
+ *
+ * As the write sections are fully preemptible, no special handling for
+ * PREEMPT_RT is needed.
+ */
+typedef struct {
+ seqcount_t seqcount;
+} seqcount_latch_t;
+
+/**
+ * SEQCNT_LATCH_ZERO() - static initializer for seqcount_latch_t
+ * @seq_name: Name of the seqcount_latch_t instance
+ */
+#define SEQCNT_LATCH_ZERO(seq_name) { \
+ .seqcount = SEQCNT_ZERO(seq_name.seqcount), \
}
/**
- * raw_write_seqcount_latch - redirect readers to even/odd copy
- * @s: pointer to seqcount_t
+ * seqcount_latch_init() - runtime initializer for seqcount_latch_t
+ * @s: Pointer to the seqcount_latch_t instance
+ */
+#define seqcount_latch_init(s) seqcount_init(&(s)->seqcount)
+
+/**
+ * raw_read_seqcount_latch() - pick even/odd latch data copy
+ * @s: Pointer to seqcount_latch_t
+ *
+ * See raw_write_seqcount_latch() for details and a full reader/writer
+ * usage example.
+ *
+ * Return: sequence counter raw value. Use the lowest bit as an index for
+ * picking which data copy to read. The full counter must then be checked
+ * with read_seqcount_latch_retry().
+ */
+static inline unsigned raw_read_seqcount_latch(const seqcount_latch_t *s)
+{
+ /*
+ * Pairs with the first smp_wmb() in raw_write_seqcount_latch().
+ * Due to the dependent load, a full smp_rmb() is not needed.
+ */
+ return READ_ONCE(s->seqcount.sequence);
+}
+
+/**
+ * read_seqcount_latch_retry() - end a seqcount_latch_t read section
+ * @s: Pointer to seqcount_latch_t
+ * @start: count, from raw_read_seqcount_latch()
+ *
+ * Return: true if a read section retry is required, else false
+ */
+static inline int
+read_seqcount_latch_retry(const seqcount_latch_t *s, unsigned start)
+{
+ return read_seqcount_retry(&s->seqcount, start);
+}
+
+/**
+ * raw_write_seqcount_latch() - redirect latch readers to even/odd copy
+ * @s: Pointer to seqcount_latch_t
*
* The latch technique is a multiversion concurrency control method that allows
* queries during non-atomic modifications. If you can guarantee queries never
@@ -301,181 +713,243 @@ static inline int raw_read_seqcount_latch(seqcount_t *s)
* Very simply put: we first modify one copy and then the other. This ensures
* there is always one copy in a stable state, ready to give us an answer.
*
- * The basic form is a data structure like:
+ * The basic form is a data structure like::
*
- * struct latch_struct {
- * seqcount_t seq;
- * struct data_struct data[2];
- * };
+ * struct latch_struct {
+ * seqcount_latch_t seq;
+ * struct data_struct data[2];
+ * };
*
* Where a modification, which is assumed to be externally serialized, does the
- * following:
+ * following::
*
- * void latch_modify(struct latch_struct *latch, ...)
- * {
- * smp_wmb(); <- Ensure that the last data[1] update is visible
- * latch->seq++;
- * smp_wmb(); <- Ensure that the seqcount update is visible
+ * void latch_modify(struct latch_struct *latch, ...)
+ * {
+ * smp_wmb(); // Ensure that the last data[1] update is visible
+ * latch->seq.sequence++;
+ * smp_wmb(); // Ensure that the seqcount update is visible
*
- * modify(latch->data[0], ...);
+ * modify(latch->data[0], ...);
*
- * smp_wmb(); <- Ensure that the data[0] update is visible
- * latch->seq++;
- * smp_wmb(); <- Ensure that the seqcount update is visible
+ * smp_wmb(); // Ensure that the data[0] update is visible
+ * latch->seq.sequence++;
+ * smp_wmb(); // Ensure that the seqcount update is visible
*
- * modify(latch->data[1], ...);
- * }
+ * modify(latch->data[1], ...);
+ * }
*
- * The query will have a form like:
+ * The query will have a form like::
*
- * struct entry *latch_query(struct latch_struct *latch, ...)
- * {
- * struct entry *entry;
- * unsigned seq, idx;
+ * struct entry *latch_query(struct latch_struct *latch, ...)
+ * {
+ * struct entry *entry;
+ * unsigned seq, idx;
*
- * do {
- * seq = raw_read_seqcount_latch(&latch->seq);
+ * do {
+ * seq = raw_read_seqcount_latch(&latch->seq);
*
- * idx = seq & 0x01;
- * entry = data_query(latch->data[idx], ...);
+ * idx = seq & 0x01;
+ * entry = data_query(latch->data[idx], ...);
*
- * smp_rmb();
- * } while (seq != latch->seq);
+ * // This includes needed smp_rmb()
+ * } while (read_seqcount_latch_retry(&latch->seq, seq));
*
- * return entry;
- * }
+ * return entry;
+ * }
*
* So during the modification, queries are first redirected to data[1]. Then we
* modify data[0]. When that is complete, we redirect queries back to data[0]
* and we can modify data[1].
*
- * NOTE: The non-requirement for atomic modifications does _NOT_ include
- * the publishing of new entries in the case where data is a dynamic
- * data structure.
+ * NOTE:
+ *
+ * The non-requirement for atomic modifications does _NOT_ include
+ * the publishing of new entries in the case where data is a dynamic
+ * data structure.
+ *
+ * An iteration might start in data[0] and get suspended long enough
+ * to miss an entire modification sequence, once it resumes it might
+ * observe the new entry.
*
- * An iteration might start in data[0] and get suspended long enough
- * to miss an entire modification sequence, once it resumes it might
- * observe the new entry.
+ * NOTE2:
*
- * NOTE: When data is a dynamic data structure; one should use regular RCU
- * patterns to manage the lifetimes of the objects within.
+ * When data is a dynamic data structure; one should use regular RCU
+ * patterns to manage the lifetimes of the objects within.
*/
-static inline void raw_write_seqcount_latch(seqcount_t *s)
+static inline void raw_write_seqcount_latch(seqcount_latch_t *s)
{
- smp_wmb(); /* prior stores before incrementing "sequence" */
- s->sequence++;
- smp_wmb(); /* increment "sequence" before following stores */
+ smp_wmb(); /* prior stores before incrementing "sequence" */
+ s->seqcount.sequence++;
+ smp_wmb(); /* increment "sequence" before following stores */
}
/*
- * Sequence counter only version assumes that callers are using their
- * own mutexing.
- */
-static inline void write_seqcount_begin_nested(seqcount_t *s, int subclass)
-{
- raw_write_seqcount_begin(s);
- seqcount_acquire(&s->dep_map, subclass, 0, _RET_IP_);
-}
-
-static inline void write_seqcount_begin(seqcount_t *s)
-{
- write_seqcount_begin_nested(s, 0);
-}
-
-static inline void write_seqcount_end(seqcount_t *s)
-{
- seqcount_release(&s->dep_map, 1, _RET_IP_);
- raw_write_seqcount_end(s);
-}
-
-/**
- * write_seqcount_invalidate - invalidate in-progress read-side seq operations
- * @s: pointer to seqcount_t
+ * Sequential locks (seqlock_t)
+ *
+ * Sequence counters with an embedded spinlock for writer serialization
+ * and non-preemptibility.
*
- * After write_seqcount_invalidate, no read-side seq operations will complete
- * successfully and see data older than this.
+ * For more info, see:
+ * - Comments on top of seqcount_t
+ * - Documentation/locking/seqlock.rst
*/
-static inline void write_seqcount_invalidate(seqcount_t *s)
-{
- smp_wmb();
- s->sequence+=2;
-}
-
typedef struct {
- struct seqcount seqcount;
+ /*
+ * Make sure that readers don't starve writers on PREEMPT_RT: use
+ * seqcount_spinlock_t instead of seqcount_t. Check __SEQ_LOCK().
+ */
+ seqcount_spinlock_t seqcount;
spinlock_t lock;
} seqlock_t;
-/*
- * These macros triggered gcc-3.x compile-time problems. We think these are
- * OK now. Be cautious.
- */
-#define __SEQLOCK_UNLOCKED(lockname) \
- { \
- .seqcount = SEQCNT_ZERO(lockname), \
- .lock = __SPIN_LOCK_UNLOCKED(lockname) \
+#define __SEQLOCK_UNLOCKED(lockname) \
+ { \
+ .seqcount = SEQCNT_SPINLOCK_ZERO(lockname, &(lockname).lock), \
+ .lock = __SPIN_LOCK_UNLOCKED(lockname) \
}
-#define seqlock_init(x) \
- do { \
- seqcount_init(&(x)->seqcount); \
- spin_lock_init(&(x)->lock); \
+/**
+ * seqlock_init() - dynamic initializer for seqlock_t
+ * @sl: Pointer to the seqlock_t instance
+ */
+#define seqlock_init(sl) \
+ do { \
+ spin_lock_init(&(sl)->lock); \
+ seqcount_spinlock_init(&(sl)->seqcount, &(sl)->lock); \
} while (0)
-#define DEFINE_SEQLOCK(x) \
- seqlock_t x = __SEQLOCK_UNLOCKED(x)
+/**
+ * DEFINE_SEQLOCK(sl) - Define a statically allocated seqlock_t
+ * @sl: Name of the seqlock_t instance
+ */
+#define DEFINE_SEQLOCK(sl) \
+ seqlock_t sl = __SEQLOCK_UNLOCKED(sl)
-/*
- * Read side functions for starting and finalizing a read side section.
+/**
+ * read_seqbegin() - start a seqlock_t read side critical section
+ * @sl: Pointer to seqlock_t
+ *
+ * Return: count, to be passed to read_seqretry()
*/
static inline unsigned read_seqbegin(const seqlock_t *sl)
{
- return read_seqcount_begin(&sl->seqcount);
+ unsigned ret = read_seqcount_begin(&sl->seqcount);
+
+ kcsan_atomic_next(0); /* non-raw usage, assume closing read_seqretry() */
+ kcsan_flat_atomic_begin();
+ return ret;
}
+/**
+ * read_seqretry() - end a seqlock_t read side section
+ * @sl: Pointer to seqlock_t
+ * @start: count, from read_seqbegin()
+ *
+ * read_seqretry closes the read side critical section of given seqlock_t.
+ * If the critical section was invalid, it must be ignored (and typically
+ * retried).
+ *
+ * Return: true if a read section retry is required, else false
+ */
static inline unsigned read_seqretry(const seqlock_t *sl, unsigned start)
{
+ /*
+ * Assume not nested: read_seqretry() may be called multiple times when
+ * completing read critical section.
+ */
+ kcsan_flat_atomic_end();
+
return read_seqcount_retry(&sl->seqcount, start);
}
/*
- * Lock out other writers and update the count.
- * Acts like a normal spin_lock/unlock.
- * Don't need preempt_disable() because that is in the spin_lock already.
+ * For all seqlock_t write side functions, use the the internal
+ * do_write_seqcount_begin() instead of generic write_seqcount_begin().
+ * This way, no redundant lockdep_assert_held() checks are added.
+ */
+
+/**
+ * write_seqlock() - start a seqlock_t write side critical section
+ * @sl: Pointer to seqlock_t
+ *
+ * write_seqlock opens a write side critical section for the given
+ * seqlock_t. It also implicitly acquires the spinlock_t embedded inside
+ * that sequential lock. All seqlock_t write side sections are thus
+ * automatically serialized and non-preemptible.
+ *
+ * Context: if the seqlock_t read section, or other write side critical
+ * sections, can be invoked from hardirq or softirq contexts, use the
+ * _irqsave or _bh variants of this function instead.
*/
static inline void write_seqlock(seqlock_t *sl)
{
spin_lock(&sl->lock);
- write_seqcount_begin(&sl->seqcount);
+ do_write_seqcount_begin(&sl->seqcount.seqcount);
}
+/**
+ * write_sequnlock() - end a seqlock_t write side critical section
+ * @sl: Pointer to seqlock_t
+ *
+ * write_sequnlock closes the (serialized and non-preemptible) write side
+ * critical section of given seqlock_t.
+ */
static inline void write_sequnlock(seqlock_t *sl)
{
- write_seqcount_end(&sl->seqcount);
+ do_write_seqcount_end(&sl->seqcount.seqcount);
spin_unlock(&sl->lock);
}
+/**
+ * write_seqlock_bh() - start a softirqs-disabled seqlock_t write section
+ * @sl: Pointer to seqlock_t
+ *
+ * _bh variant of write_seqlock(). Use only if the read side section, or
+ * other write side sections, can be invoked from softirq contexts.
+ */
static inline void write_seqlock_bh(seqlock_t *sl)
{
spin_lock_bh(&sl->lock);
- write_seqcount_begin(&sl->seqcount);
+ do_write_seqcount_begin(&sl->seqcount.seqcount);
}
+/**
+ * write_sequnlock_bh() - end a softirqs-disabled seqlock_t write section
+ * @sl: Pointer to seqlock_t
+ *
+ * write_sequnlock_bh closes the serialized, non-preemptible, and
+ * softirqs-disabled, seqlock_t write side critical section opened with
+ * write_seqlock_bh().
+ */
static inline void write_sequnlock_bh(seqlock_t *sl)
{
- write_seqcount_end(&sl->seqcount);
+ do_write_seqcount_end(&sl->seqcount.seqcount);
spin_unlock_bh(&sl->lock);
}
+/**
+ * write_seqlock_irq() - start a non-interruptible seqlock_t write section
+ * @sl: Pointer to seqlock_t
+ *
+ * _irq variant of write_seqlock(). Use only if the read side section, or
+ * other write sections, can be invoked from hardirq contexts.
+ */
static inline void write_seqlock_irq(seqlock_t *sl)
{
spin_lock_irq(&sl->lock);
- write_seqcount_begin(&sl->seqcount);
+ do_write_seqcount_begin(&sl->seqcount.seqcount);
}
+/**
+ * write_sequnlock_irq() - end a non-interruptible seqlock_t write section
+ * @sl: Pointer to seqlock_t
+ *
+ * write_sequnlock_irq closes the serialized and non-interruptible
+ * seqlock_t write side section opened with write_seqlock_irq().
+ */
static inline void write_sequnlock_irq(seqlock_t *sl)
{
- write_seqcount_end(&sl->seqcount);
+ do_write_seqcount_end(&sl->seqcount.seqcount);
spin_unlock_irq(&sl->lock);
}
@@ -484,79 +958,112 @@ static inline unsigned long __write_seqlock_irqsave(seqlock_t *sl)
unsigned long flags;
spin_lock_irqsave(&sl->lock, flags);
- write_seqcount_begin(&sl->seqcount);
+ do_write_seqcount_begin(&sl->seqcount.seqcount);
return flags;
}
+/**
+ * write_seqlock_irqsave() - start a non-interruptible seqlock_t write
+ * section
+ * @lock: Pointer to seqlock_t
+ * @flags: Stack-allocated storage for saving caller's local interrupt
+ * state, to be passed to write_sequnlock_irqrestore().
+ *
+ * _irqsave variant of write_seqlock(). Use it only if the read side
+ * section, or other write sections, can be invoked from hardirq context.
+ */
#define write_seqlock_irqsave(lock, flags) \
do { flags = __write_seqlock_irqsave(lock); } while (0)
+/**
+ * write_sequnlock_irqrestore() - end non-interruptible seqlock_t write
+ * section
+ * @sl: Pointer to seqlock_t
+ * @flags: Caller's saved interrupt state, from write_seqlock_irqsave()
+ *
+ * write_sequnlock_irqrestore closes the serialized and non-interruptible
+ * seqlock_t write section previously opened with write_seqlock_irqsave().
+ */
static inline void
write_sequnlock_irqrestore(seqlock_t *sl, unsigned long flags)
{
- write_seqcount_end(&sl->seqcount);
+ do_write_seqcount_end(&sl->seqcount.seqcount);
spin_unlock_irqrestore(&sl->lock, flags);
}
-/*
- * A locking reader exclusively locks out other writers and locking readers,
- * but doesn't update the sequence number. Acts like a normal spin_lock/unlock.
- * Don't need preempt_disable() because that is in the spin_lock already.
+/**
+ * read_seqlock_excl() - begin a seqlock_t locking reader section
+ * @sl: Pointer to seqlock_t
+ *
+ * read_seqlock_excl opens a seqlock_t locking reader critical section. A
+ * locking reader exclusively locks out *both* other writers *and* other
+ * locking readers, but it does not update the embedded sequence number.
+ *
+ * Locking readers act like a normal spin_lock()/spin_unlock().
+ *
+ * Context: if the seqlock_t write section, *or other read sections*, can
+ * be invoked from hardirq or softirq contexts, use the _irqsave or _bh
+ * variant of this function instead.
+ *
+ * The opened read section must be closed with read_sequnlock_excl().
*/
static inline void read_seqlock_excl(seqlock_t *sl)
{
spin_lock(&sl->lock);
}
+/**
+ * read_sequnlock_excl() - end a seqlock_t locking reader critical section
+ * @sl: Pointer to seqlock_t
+ */
static inline void read_sequnlock_excl(seqlock_t *sl)
{
spin_unlock(&sl->lock);
}
/**
- * read_seqbegin_or_lock - begin a sequence number check or locking block
- * @lock: sequence lock
- * @seq : sequence number to be checked
+ * read_seqlock_excl_bh() - start a seqlock_t locking reader section with
+ * softirqs disabled
+ * @sl: Pointer to seqlock_t
*
- * First try it once optimistically without taking the lock. If that fails,
- * take the lock. The sequence number is also used as a marker for deciding
- * whether to be a reader (even) or writer (odd).
- * N.B. seq must be initialized to an even number to begin with.
+ * _bh variant of read_seqlock_excl(). Use this variant only if the
+ * seqlock_t write side section, *or other read sections*, can be invoked
+ * from softirq contexts.
*/
-static inline void read_seqbegin_or_lock(seqlock_t *lock, int *seq)
-{
- if (!(*seq & 1)) /* Even */
- *seq = read_seqbegin(lock);
- else /* Odd */
- read_seqlock_excl(lock);
-}
-
-static inline int need_seqretry(seqlock_t *lock, int seq)
-{
- return !(seq & 1) && read_seqretry(lock, seq);
-}
-
-static inline void done_seqretry(seqlock_t *lock, int seq)
-{
- if (seq & 1)
- read_sequnlock_excl(lock);
-}
-
static inline void read_seqlock_excl_bh(seqlock_t *sl)
{
spin_lock_bh(&sl->lock);
}
+/**
+ * read_sequnlock_excl_bh() - stop a seqlock_t softirq-disabled locking
+ * reader section
+ * @sl: Pointer to seqlock_t
+ */
static inline void read_sequnlock_excl_bh(seqlock_t *sl)
{
spin_unlock_bh(&sl->lock);
}
+/**
+ * read_seqlock_excl_irq() - start a non-interruptible seqlock_t locking
+ * reader section
+ * @sl: Pointer to seqlock_t
+ *
+ * _irq variant of read_seqlock_excl(). Use this only if the seqlock_t
+ * write side section, *or other read sections*, can be invoked from a
+ * hardirq context.
+ */
static inline void read_seqlock_excl_irq(seqlock_t *sl)
{
spin_lock_irq(&sl->lock);
}
+/**
+ * read_sequnlock_excl_irq() - end an interrupts-disabled seqlock_t
+ * locking reader section
+ * @sl: Pointer to seqlock_t
+ */
static inline void read_sequnlock_excl_irq(seqlock_t *sl)
{
spin_unlock_irq(&sl->lock);
@@ -570,15 +1077,117 @@ static inline unsigned long __read_seqlock_excl_irqsave(seqlock_t *sl)
return flags;
}
+/**
+ * read_seqlock_excl_irqsave() - start a non-interruptible seqlock_t
+ * locking reader section
+ * @lock: Pointer to seqlock_t
+ * @flags: Stack-allocated storage for saving caller's local interrupt
+ * state, to be passed to read_sequnlock_excl_irqrestore().
+ *
+ * _irqsave variant of read_seqlock_excl(). Use this only if the seqlock_t
+ * write side section, *or other read sections*, can be invoked from a
+ * hardirq context.
+ */
#define read_seqlock_excl_irqsave(lock, flags) \
do { flags = __read_seqlock_excl_irqsave(lock); } while (0)
+/**
+ * read_sequnlock_excl_irqrestore() - end non-interruptible seqlock_t
+ * locking reader section
+ * @sl: Pointer to seqlock_t
+ * @flags: Caller saved interrupt state, from read_seqlock_excl_irqsave()
+ */
static inline void
read_sequnlock_excl_irqrestore(seqlock_t *sl, unsigned long flags)
{
spin_unlock_irqrestore(&sl->lock, flags);
}
+/**
+ * read_seqbegin_or_lock() - begin a seqlock_t lockless or locking reader
+ * @lock: Pointer to seqlock_t
+ * @seq : Marker and return parameter. If the passed value is even, the
+ * reader will become a *lockless* seqlock_t reader as in read_seqbegin().
+ * If the passed value is odd, the reader will become a *locking* reader
+ * as in read_seqlock_excl(). In the first call to this function, the
+ * caller *must* initialize and pass an even value to @seq; this way, a
+ * lockless read can be optimistically tried first.
+ *
+ * read_seqbegin_or_lock is an API designed to optimistically try a normal
+ * lockless seqlock_t read section first. If an odd counter is found, the
+ * lockless read trial has failed, and the next read iteration transforms
+ * itself into a full seqlock_t locking reader.
+ *
+ * This is typically used to avoid seqlock_t lockless readers starvation
+ * (too much retry loops) in the case of a sharp spike in write side
+ * activity.
+ *
+ * Context: if the seqlock_t write section, *or other read sections*, can
+ * be invoked from hardirq or softirq contexts, use the _irqsave or _bh
+ * variant of this function instead.
+ *
+ * Check Documentation/locking/seqlock.rst for template example code.
+ *
+ * Return: the encountered sequence counter value, through the @seq
+ * parameter, which is overloaded as a return parameter. This returned
+ * value must be checked with need_seqretry(). If the read section need to
+ * be retried, this returned value must also be passed as the @seq
+ * parameter of the next read_seqbegin_or_lock() iteration.
+ */
+static inline void read_seqbegin_or_lock(seqlock_t *lock, int *seq)
+{
+ if (!(*seq & 1)) /* Even */
+ *seq = read_seqbegin(lock);
+ else /* Odd */
+ read_seqlock_excl(lock);
+}
+
+/**
+ * need_seqretry() - validate seqlock_t "locking or lockless" read section
+ * @lock: Pointer to seqlock_t
+ * @seq: sequence count, from read_seqbegin_or_lock()
+ *
+ * Return: true if a read section retry is required, false otherwise
+ */
+static inline int need_seqretry(seqlock_t *lock, int seq)
+{
+ return !(seq & 1) && read_seqretry(lock, seq);
+}
+
+/**
+ * done_seqretry() - end seqlock_t "locking or lockless" reader section
+ * @lock: Pointer to seqlock_t
+ * @seq: count, from read_seqbegin_or_lock()
+ *
+ * done_seqretry finishes the seqlock_t read side critical section started
+ * with read_seqbegin_or_lock() and validated by need_seqretry().
+ */
+static inline void done_seqretry(seqlock_t *lock, int seq)
+{
+ if (seq & 1)
+ read_sequnlock_excl(lock);
+}
+
+/**
+ * read_seqbegin_or_lock_irqsave() - begin a seqlock_t lockless reader, or
+ * a non-interruptible locking reader
+ * @lock: Pointer to seqlock_t
+ * @seq: Marker and return parameter. Check read_seqbegin_or_lock().
+ *
+ * This is the _irqsave variant of read_seqbegin_or_lock(). Use it only if
+ * the seqlock_t write section, *or other read sections*, can be invoked
+ * from hardirq context.
+ *
+ * Note: Interrupts will be disabled only for "locking reader" mode.
+ *
+ * Return:
+ *
+ * 1. The saved local interrupts state in case of a locking reader, to
+ * be passed to done_seqretry_irqrestore().
+ *
+ * 2. The encountered sequence counter value, returned through @seq
+ * overloaded as a return parameter. Check read_seqbegin_or_lock().
+ */
static inline unsigned long
read_seqbegin_or_lock_irqsave(seqlock_t *lock, int *seq)
{
@@ -592,6 +1201,18 @@ read_seqbegin_or_lock_irqsave(seqlock_t *lock, int *seq)
return flags;
}
+/**
+ * done_seqretry_irqrestore() - end a seqlock_t lockless reader, or a
+ * non-interruptible locking reader section
+ * @lock: Pointer to seqlock_t
+ * @seq: Count, from read_seqbegin_or_lock_irqsave()
+ * @flags: Caller's saved local interrupt state in case of a locking
+ * reader, also from read_seqbegin_or_lock_irqsave()
+ *
+ * This is the _irqrestore variant of done_seqretry(). The read section
+ * must've been opened with read_seqbegin_or_lock_irqsave(), and validated
+ * by need_seqretry().
+ */
static inline void
done_seqretry_irqrestore(seqlock_t *lock, int seq, unsigned long flags)
{
diff --git a/include/linux/seqlock_api.h b/include/linux/seqlock_api.h
new file mode 100644
index 000000000000..be91e7d3b826
--- /dev/null
+++ b/include/linux/seqlock_api.h
@@ -0,0 +1 @@
+#include <linux/seqlock.h>
diff --git a/include/linux/seqno-fence.h b/include/linux/seqno-fence.h
deleted file mode 100644
index c58c535d12a8..000000000000
--- a/include/linux/seqno-fence.h
+++ /dev/null
@@ -1,117 +0,0 @@
-/*
- * seqno-fence, using a dma-buf to synchronize fencing
- *
- * Copyright (C) 2012 Texas Instruments
- * Copyright (C) 2012 Canonical Ltd
- * Authors:
- * Rob Clark <robdclark@gmail.com>
- * Maarten Lankhorst <maarten.lankhorst@canonical.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- */
-
-#ifndef __LINUX_SEQNO_FENCE_H
-#define __LINUX_SEQNO_FENCE_H
-
-#include <linux/dma-fence.h>
-#include <linux/dma-buf.h>
-
-enum seqno_fence_condition {
- SEQNO_FENCE_WAIT_GEQUAL,
- SEQNO_FENCE_WAIT_NONZERO
-};
-
-struct seqno_fence {
- struct dma_fence base;
-
- const struct dma_fence_ops *ops;
- struct dma_buf *sync_buf;
- uint32_t seqno_ofs;
- enum seqno_fence_condition condition;
-};
-
-extern const struct dma_fence_ops seqno_fence_ops;
-
-/**
- * to_seqno_fence - cast a fence to a seqno_fence
- * @fence: fence to cast to a seqno_fence
- *
- * Returns NULL if the fence is not a seqno_fence,
- * or the seqno_fence otherwise.
- */
-static inline struct seqno_fence *
-to_seqno_fence(struct dma_fence *fence)
-{
- if (fence->ops != &seqno_fence_ops)
- return NULL;
- return container_of(fence, struct seqno_fence, base);
-}
-
-/**
- * seqno_fence_init - initialize a seqno fence
- * @fence: seqno_fence to initialize
- * @lock: pointer to spinlock to use for fence
- * @sync_buf: buffer containing the memory location to signal on
- * @context: the execution context this fence is a part of
- * @seqno_ofs: the offset within @sync_buf
- * @seqno: the sequence # to signal on
- * @cond: fence wait condition
- * @ops: the fence_ops for operations on this seqno fence
- *
- * This function initializes a struct seqno_fence with passed parameters,
- * and takes a reference on sync_buf which is released on fence destruction.
- *
- * A seqno_fence is a dma_fence which can complete in software when
- * enable_signaling is called, but it also completes when
- * (s32)((sync_buf)[seqno_ofs] - seqno) >= 0 is true
- *
- * The seqno_fence will take a refcount on the sync_buf until it's
- * destroyed, but actual lifetime of sync_buf may be longer if one of the
- * callers take a reference to it.
- *
- * Certain hardware have instructions to insert this type of wait condition
- * in the command stream, so no intervention from software would be needed.
- * This type of fence can be destroyed before completed, however a reference
- * on the sync_buf dma-buf can be taken. It is encouraged to re-use the same
- * dma-buf for sync_buf, since mapping or unmapping the sync_buf to the
- * device's vm can be expensive.
- *
- * It is recommended for creators of seqno_fence to call dma_fence_signal()
- * before destruction. This will prevent possible issues from wraparound at
- * time of issue vs time of check, since users can check dma_fence_is_signaled()
- * before submitting instructions for the hardware to wait on the fence.
- * However, when ops.enable_signaling is not called, it doesn't have to be
- * done as soon as possible, just before there's any real danger of seqno
- * wraparound.
- */
-static inline void
-seqno_fence_init(struct seqno_fence *fence, spinlock_t *lock,
- struct dma_buf *sync_buf, uint32_t context,
- uint32_t seqno_ofs, uint32_t seqno,
- enum seqno_fence_condition cond,
- const struct dma_fence_ops *ops)
-{
- BUG_ON(!fence || !sync_buf || !ops);
- BUG_ON(!ops->wait || !ops->enable_signaling ||
- !ops->get_driver_name || !ops->get_timeline_name);
-
- /*
- * ops is used in dma_fence_init for get_driver_name, so needs to be
- * initialized first
- */
- fence->ops = ops;
- dma_fence_init(&fence->base, &seqno_fence_ops, lock, context, seqno);
- get_dma_buf(sync_buf);
- fence->sync_buf = sync_buf;
- fence->seqno_ofs = seqno_ofs;
- fence->condition = cond;
-}
-
-#endif /* __LINUX_SEQNO_FENCE_H */
diff --git a/include/linux/serdev.h b/include/linux/serdev.h
index e69402d4a8ae..f5f97fa25e8a 100644
--- a/include/linux/serdev.h
+++ b/include/linux/serdev.h
@@ -1,20 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2016-2017 Linaro Ltd., Rob Herring <robh@kernel.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#ifndef _LINUX_SERDEV_H
#define _LINUX_SERDEV_H
#include <linux/types.h>
#include <linux/device.h>
+#include <linux/iopoll.h>
+#include <linux/uaccess.h>
#include <linux/termios.h>
#include <linux/delay.h>
@@ -27,8 +21,10 @@ struct serdev_device;
/**
* struct serdev_device_ops - Callback operations for a serdev device
- * @receive_buf: Function called with data received from device.
- * @write_wakeup: Function called when ready to transmit more data.
+ * @receive_buf: Function called with data received from device;
+ * returns number of bytes accepted; may sleep.
+ * @write_wakeup: Function called when ready to transmit more data; must
+ * not sleep.
*/
struct serdev_device_ops {
int (*receive_buf)(struct serdev_device *, const unsigned char *, size_t);
@@ -76,6 +72,12 @@ static inline struct serdev_device_driver *to_serdev_device_driver(struct device
return container_of(d, struct serdev_device_driver, driver);
}
+enum serdev_parity {
+ SERDEV_PARITY_NONE,
+ SERDEV_PARITY_EVEN,
+ SERDEV_PARITY_ODD,
+};
+
/*
* serdev controller structures
*/
@@ -86,10 +88,12 @@ struct serdev_controller_ops {
int (*open)(struct serdev_controller *);
void (*close)(struct serdev_controller *);
void (*set_flow_control)(struct serdev_controller *, bool);
+ int (*set_parity)(struct serdev_controller *, enum serdev_parity);
unsigned int (*set_baudrate)(struct serdev_controller *, unsigned int);
void (*wait_until_sent)(struct serdev_controller *, long);
int (*get_tiocm)(struct serdev_controller *);
int (*set_tiocm)(struct serdev_controller *, unsigned int, unsigned int);
+ int (*break_ctl)(struct serdev_controller *ctrl, unsigned int break_state);
};
/**
@@ -184,7 +188,7 @@ static inline int serdev_controller_receive_buf(struct serdev_controller *ctrl,
struct serdev_device *serdev = ctrl->serdev;
if (!serdev || !serdev->ops->receive_buf)
- return -EINVAL;
+ return 0;
return serdev->ops->receive_buf(serdev, data, count);
}
@@ -193,14 +197,16 @@ static inline int serdev_controller_receive_buf(struct serdev_controller *ctrl,
int serdev_device_open(struct serdev_device *);
void serdev_device_close(struct serdev_device *);
+int devm_serdev_device_open(struct device *, struct serdev_device *);
unsigned int serdev_device_set_baudrate(struct serdev_device *, unsigned int);
void serdev_device_set_flow_control(struct serdev_device *, bool);
int serdev_device_write_buf(struct serdev_device *, const unsigned char *, size_t);
void serdev_device_wait_until_sent(struct serdev_device *, long);
int serdev_device_get_tiocm(struct serdev_device *);
int serdev_device_set_tiocm(struct serdev_device *, int, int);
+int serdev_device_break_ctl(struct serdev_device *serdev, int break_state);
void serdev_device_write_wakeup(struct serdev_device *);
-int serdev_device_write(struct serdev_device *, const unsigned char *, size_t, unsigned long);
+int serdev_device_write(struct serdev_device *, const unsigned char *, size_t, long);
void serdev_device_write_flush(struct serdev_device *);
int serdev_device_write_room(struct serdev_device *);
@@ -246,11 +252,15 @@ static inline int serdev_device_write_buf(struct serdev_device *serdev,
static inline void serdev_device_wait_until_sent(struct serdev_device *sdev, long timeout) {}
static inline int serdev_device_get_tiocm(struct serdev_device *serdev)
{
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
}
static inline int serdev_device_set_tiocm(struct serdev_device *serdev, int set, int clear)
{
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
+}
+static inline int serdev_device_break_ctl(struct serdev_device *serdev, int break_state)
+{
+ return -EOPNOTSUPP;
}
static inline int serdev_device_write(struct serdev_device *sdev, const unsigned char *buf,
size_t count, unsigned long timeout)
@@ -276,18 +286,10 @@ static inline bool serdev_device_get_cts(struct serdev_device *serdev)
static inline int serdev_device_wait_for_cts(struct serdev_device *serdev, bool state, int timeout_ms)
{
- unsigned long timeout;
bool signal;
- timeout = jiffies + msecs_to_jiffies(timeout_ms);
- while (time_is_after_jiffies(timeout)) {
- signal = serdev_device_get_cts(serdev);
- if (signal == state)
- return 0;
- usleep_range(1000, 2000);
- }
-
- return -ETIMEDOUT;
+ return readx_poll_timeout(serdev_device_get_cts, serdev, signal, signal == state,
+ 2000, timeout_ms * 1000);
}
static inline int serdev_device_set_rts(struct serdev_device *serdev, bool enable)
@@ -298,6 +300,9 @@ static inline int serdev_device_set_rts(struct serdev_device *serdev, bool enabl
return serdev_device_set_tiocm(serdev, 0, TIOCM_RTS);
}
+int serdev_device_set_parity(struct serdev_device *serdev,
+ enum serdev_parity parity);
+
/*
* serdev hooks into TTY core
*/
@@ -322,4 +327,18 @@ static inline int serdev_tty_port_unregister(struct tty_port *port)
}
#endif /* CONFIG_SERIAL_DEV_CTRL_TTYPORT */
+struct acpi_resource;
+struct acpi_resource_uart_serialbus;
+
+#ifdef CONFIG_ACPI
+bool serdev_acpi_get_uart_resource(struct acpi_resource *ares,
+ struct acpi_resource_uart_serialbus **uart);
+#else
+static inline bool serdev_acpi_get_uart_resource(struct acpi_resource *ares,
+ struct acpi_resource_uart_serialbus **uart)
+{
+ return false;
+}
+#endif /* CONFIG_ACPI */
+
#endif /*_LINUX_SERDEV_H */
diff --git a/include/linux/serial.h b/include/linux/serial.h
index 0916107c77f9..bfda927dde15 100644
--- a/include/linux/serial.h
+++ b/include/linux/serial.h
@@ -9,9 +9,29 @@
#ifndef _LINUX_SERIAL_H
#define _LINUX_SERIAL_H
-#include <asm/page.h>
#include <uapi/linux/serial.h>
+#include <uapi/linux/serial_reg.h>
+#define UART_IER_ALL_INTR (UART_IER_MSI | \
+ UART_IER_RLSI | \
+ UART_IER_THRI | \
+ UART_IER_RDI)
+
+/* Helper for dealing with UART_LCR_WLEN* defines */
+#define UART_LCR_WLEN(x) ((x) - 5)
+
+/* FIFO and shifting register empty */
+#define UART_LSR_BOTH_EMPTY (UART_LSR_TEMT | UART_LSR_THRE)
+
+static inline bool uart_lsr_tx_empty(u16 lsr)
+{
+ return (lsr & UART_LSR_BOTH_EMPTY) == UART_LSR_BOTH_EMPTY;
+}
+
+#define UART_MSR_STATUS_BITS (UART_MSR_DCD | \
+ UART_MSR_RI | \
+ UART_MSR_DSR | \
+ UART_MSR_CTS)
/*
* Counters of the input lines (CTS, DSR, RI, CD) interrupts
@@ -23,11 +43,6 @@ struct async_icount {
__u32 buf_overrun;
};
-/*
- * The size of the serial xmit buffer is 1 page, or 4096 bytes
- */
-#define SERIAL_XMIT_SIZE PAGE_SIZE
-
#include <linux/compiler.h>
#endif /* _LINUX_SERIAL_H */
diff --git a/include/linux/serial_8250.h b/include/linux/serial_8250.h
index 61fbb440449c..6f78f302d272 100644
--- a/include/linux/serial_8250.h
+++ b/include/linux/serial_8250.h
@@ -1,12 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* linux/include/linux/serial_8250.h
*
* Copyright (C) 2004 Russell King
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
#ifndef _LINUX_SERIAL_8250_H
#define _LINUX_SERIAL_8250_H
@@ -22,20 +18,21 @@ struct plat_serial8250_port {
unsigned long iobase; /* io base address */
void __iomem *membase; /* ioremap cookie or NULL */
resource_size_t mapbase; /* resource base */
+ unsigned int uartclk; /* UART clock rate */
unsigned int irq; /* interrupt number */
unsigned long irqflags; /* request_irq flags */
- unsigned int uartclk; /* UART clock rate */
void *private_data;
unsigned char regshift; /* register shift */
unsigned char iotype; /* UPIO_* */
unsigned char hub6;
- upf_t flags; /* UPF_* flags */
+ unsigned char has_sysrq; /* supports magic SysRq */
unsigned int type; /* If UPF_FIXED_TYPE */
+ upf_t flags; /* UPF_* flags */
unsigned int (*serial_in)(struct uart_port *, int);
void (*serial_out)(struct uart_port *, int, int);
void (*set_termios)(struct uart_port *,
struct ktermios *new,
- struct ktermios *old);
+ const struct ktermios *old);
void (*set_ldisc)(struct uart_port *,
struct ktermios *);
unsigned int (*get_mctrl)(struct uart_port *);
@@ -77,12 +74,15 @@ struct uart_8250_port;
struct uart_8250_ops {
int (*setup_irq)(struct uart_8250_port *);
void (*release_irq)(struct uart_8250_port *);
+ void (*setup_timer)(struct uart_8250_port *);
};
struct uart_8250_em485 {
- struct timer_list start_tx_timer; /* "rs485 start tx" timer */
- struct timer_list stop_tx_timer; /* "rs485 stop tx" timer */
- struct timer_list *active_timer; /* pointer to active timer */
+ struct hrtimer start_tx_timer; /* "rs485 start tx" timer */
+ struct hrtimer stop_tx_timer; /* "rs485 stop tx" timer */
+ struct hrtimer *active_timer; /* pointer to active timer */
+ struct uart_8250_port *port; /* for hrtimer callbacks */
+ unsigned int tx_stopped:1; /* tx is currently stopped */
};
/*
@@ -105,14 +105,13 @@ struct uart_8250_port {
unsigned char ier;
unsigned char lcr;
unsigned char mcr;
- unsigned char mcr_mask; /* mask of user bits */
- unsigned char mcr_force; /* mask of forced bits */
unsigned char cur_iotype; /* Running I/O type */
unsigned int rpm_tx_active;
unsigned char canary; /* non-zero during system sleep
* if no_console_suspend
*/
unsigned char probe;
+ struct mctrl_gpios *gpios;
#define UART_PROBE_RSA (1 << 0)
/*
@@ -121,7 +120,8 @@ struct uart_8250_port {
* be immediately processed.
*/
#define LSR_SAVE_FLAGS UART_LSR_BRK_ERROR_BITS
- unsigned char lsr_saved_flags;
+ u16 lsr_saved_flags;
+ u16 lsr_save_mask;
#define MSR_SAVE_FLAGS UART_MSR_ANY_DELTA
unsigned char msr_saved_flags;
@@ -133,6 +133,12 @@ struct uart_8250_port {
void (*dl_write)(struct uart_8250_port *, int);
struct uart_8250_em485 *em485;
+ void (*rs485_start_tx)(struct uart_8250_port *);
+ void (*rs485_stop_tx)(struct uart_8250_port *);
+
+ /* Serial port overrun backoff */
+ struct delayed_work overrun_backoff;
+ u32 overrun_backoff_time_ms;
};
static inline struct uart_8250_port *up_to_u8250p(struct uart_port *up)
@@ -140,28 +146,30 @@ static inline struct uart_8250_port *up_to_u8250p(struct uart_port *up)
return container_of(up, struct uart_8250_port, port);
}
-int serial8250_register_8250_port(struct uart_8250_port *);
+int serial8250_register_8250_port(const struct uart_8250_port *);
void serial8250_unregister_port(int line);
void serial8250_suspend_port(int line);
void serial8250_resume_port(int line);
-extern int early_serial_setup(struct uart_port *port);
-
-extern int early_serial8250_setup(struct earlycon_device *device,
- const char *options);
-extern void serial8250_do_set_termios(struct uart_port *port,
- struct ktermios *termios, struct ktermios *old);
-extern void serial8250_do_set_ldisc(struct uart_port *port,
- struct ktermios *termios);
-extern unsigned int serial8250_do_get_mctrl(struct uart_port *port);
-extern int serial8250_do_startup(struct uart_port *port);
-extern void serial8250_do_shutdown(struct uart_port *port);
-extern void serial8250_do_pm(struct uart_port *port, unsigned int state,
- unsigned int oldstate);
-extern void serial8250_do_set_mctrl(struct uart_port *port, unsigned int mctrl);
-extern int fsl8250_handle_irq(struct uart_port *port);
+int early_serial_setup(struct uart_port *port);
+int early_serial8250_setup(struct earlycon_device *device, const char *options);
+
+void serial8250_update_uartclk(struct uart_port *port, unsigned int uartclk);
+void serial8250_do_set_termios(struct uart_port *port, struct ktermios *termios,
+ const struct ktermios *old);
+void serial8250_do_set_ldisc(struct uart_port *port, struct ktermios *termios);
+unsigned int serial8250_do_get_mctrl(struct uart_port *port);
+int serial8250_do_startup(struct uart_port *port);
+void serial8250_do_shutdown(struct uart_port *port);
+void serial8250_do_pm(struct uart_port *port, unsigned int state,
+ unsigned int oldstate);
+void serial8250_do_set_mctrl(struct uart_port *port, unsigned int mctrl);
+void serial8250_do_set_divisor(struct uart_port *port, unsigned int baud,
+ unsigned int quot, unsigned int quot_frac);
+int fsl8250_handle_irq(struct uart_port *port);
int serial8250_handle_irq(struct uart_port *port, unsigned int iir);
-unsigned char serial8250_rx_chars(struct uart_8250_port *up, unsigned char lsr);
+u16 serial8250_rx_chars(struct uart_8250_port *up, u16 lsr);
+void serial8250_read_char(struct uart_8250_port *up, u16 lsr);
void serial8250_tx_chars(struct uart_8250_port *up);
unsigned int serial8250_modem_status(struct uart_8250_port *up);
void serial8250_init_port(struct uart_8250_port *up);
@@ -169,9 +177,14 @@ void serial8250_set_defaults(struct uart_8250_port *up);
void serial8250_console_write(struct uart_8250_port *up, const char *s,
unsigned int count);
int serial8250_console_setup(struct uart_port *port, char *options, bool probe);
+int serial8250_console_exit(struct uart_port *port);
-extern void serial8250_set_isa_configurator(void (*v)
- (int port, struct uart_port *up,
- u32 *capabilities));
+void serial8250_set_isa_configurator(void (*v)(int port, struct uart_port *up,
+ u32 *capabilities));
+
+#ifdef CONFIG_SERIAL_8250_RT288X
+unsigned int au_serial_in(struct uart_port *p, int offset);
+void au_serial_out(struct uart_port *p, int offset, int value);
+#endif
#endif
diff --git a/include/linux/serial_bcm63xx.h b/include/linux/serial_bcm63xx.h
index 570e964dc899..b5e48ef89736 100644
--- a/include/linux/serial_bcm63xx.h
+++ b/include/linux/serial_bcm63xx.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_SERIAL_BCM63XX_H
#define _LINUX_SERIAL_BCM63XX_H
diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h
index 1775500294bb..66ecec15a1bf 100644
--- a/include/linux/serial_core.h
+++ b/include/linux/serial_core.h
@@ -1,27 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* linux/drivers/char/serial_core.h
*
* Copyright (C) 2000 Deep Blue Solutions Ltd.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#ifndef LINUX_SERIAL_CORE_H
#define LINUX_SERIAL_CORE_H
-
+#include <linux/bitops.h>
#include <linux/compiler.h>
+#include <linux/console.h>
#include <linux/interrupt.h>
#include <linux/circ_buf.h>
#include <linux/spinlock.h>
@@ -41,10 +29,346 @@
struct uart_port;
struct serial_struct;
struct device;
+struct gpio_desc;
-/*
+/**
+ * struct uart_ops -- interface between serial_core and the driver
+ *
* This structure describes all the operations that can be done on the
- * physical hardware. See Documentation/serial/driver for details.
+ * physical hardware.
+ *
+ * @tx_empty: ``unsigned int ()(struct uart_port *port)``
+ *
+ * This function tests whether the transmitter fifo and shifter for the
+ * @port is empty. If it is empty, this function should return
+ * %TIOCSER_TEMT, otherwise return 0. If the port does not support this
+ * operation, then it should return %TIOCSER_TEMT.
+ *
+ * Locking: none.
+ * Interrupts: caller dependent.
+ * This call must not sleep
+ *
+ * @set_mctrl: ``void ()(struct uart_port *port, unsigned int mctrl)``
+ *
+ * This function sets the modem control lines for @port to the state
+ * described by @mctrl. The relevant bits of @mctrl are:
+ *
+ * - %TIOCM_RTS RTS signal.
+ * - %TIOCM_DTR DTR signal.
+ * - %TIOCM_OUT1 OUT1 signal.
+ * - %TIOCM_OUT2 OUT2 signal.
+ * - %TIOCM_LOOP Set the port into loopback mode.
+ *
+ * If the appropriate bit is set, the signal should be driven
+ * active. If the bit is clear, the signal should be driven
+ * inactive.
+ *
+ * Locking: @port->lock taken.
+ * Interrupts: locally disabled.
+ * This call must not sleep
+ *
+ * @get_mctrl: ``unsigned int ()(struct uart_port *port)``
+ *
+ * Returns the current state of modem control inputs of @port. The state
+ * of the outputs should not be returned, since the core keeps track of
+ * their state. The state information should include:
+ *
+ * - %TIOCM_CAR state of DCD signal
+ * - %TIOCM_CTS state of CTS signal
+ * - %TIOCM_DSR state of DSR signal
+ * - %TIOCM_RI state of RI signal
+ *
+ * The bit is set if the signal is currently driven active. If
+ * the port does not support CTS, DCD or DSR, the driver should
+ * indicate that the signal is permanently active. If RI is
+ * not available, the signal should not be indicated as active.
+ *
+ * Locking: @port->lock taken.
+ * Interrupts: locally disabled.
+ * This call must not sleep
+ *
+ * @stop_tx: ``void ()(struct uart_port *port)``
+ *
+ * Stop transmitting characters. This might be due to the CTS line
+ * becoming inactive or the tty layer indicating we want to stop
+ * transmission due to an %XOFF character.
+ *
+ * The driver should stop transmitting characters as soon as possible.
+ *
+ * Locking: @port->lock taken.
+ * Interrupts: locally disabled.
+ * This call must not sleep
+ *
+ * @start_tx: ``void ()(struct uart_port *port)``
+ *
+ * Start transmitting characters.
+ *
+ * Locking: @port->lock taken.
+ * Interrupts: locally disabled.
+ * This call must not sleep
+ *
+ * @throttle: ``void ()(struct uart_port *port)``
+ *
+ * Notify the serial driver that input buffers for the line discipline are
+ * close to full, and it should somehow signal that no more characters
+ * should be sent to the serial port.
+ * This will be called only if hardware assisted flow control is enabled.
+ *
+ * Locking: serialized with @unthrottle() and termios modification by the
+ * tty layer.
+ *
+ * @unthrottle: ``void ()(struct uart_port *port)``
+ *
+ * Notify the serial driver that characters can now be sent to the serial
+ * port without fear of overrunning the input buffers of the line
+ * disciplines.
+ *
+ * This will be called only if hardware assisted flow control is enabled.
+ *
+ * Locking: serialized with @throttle() and termios modification by the
+ * tty layer.
+ *
+ * @send_xchar: ``void ()(struct uart_port *port, char ch)``
+ *
+ * Transmit a high priority character, even if the port is stopped. This
+ * is used to implement XON/XOFF flow control and tcflow(). If the serial
+ * driver does not implement this function, the tty core will append the
+ * character to the circular buffer and then call start_tx() / stop_tx()
+ * to flush the data out.
+ *
+ * Do not transmit if @ch == '\0' (%__DISABLED_CHAR).
+ *
+ * Locking: none.
+ * Interrupts: caller dependent.
+ *
+ * @start_rx: ``void ()(struct uart_port *port)``
+ *
+ * Start receiving characters.
+ *
+ * Locking: @port->lock taken.
+ * Interrupts: locally disabled.
+ * This call must not sleep
+ *
+ * @stop_rx: ``void ()(struct uart_port *port)``
+ *
+ * Stop receiving characters; the @port is in the process of being closed.
+ *
+ * Locking: @port->lock taken.
+ * Interrupts: locally disabled.
+ * This call must not sleep
+ *
+ * @enable_ms: ``void ()(struct uart_port *port)``
+ *
+ * Enable the modem status interrupts.
+ *
+ * This method may be called multiple times. Modem status interrupts
+ * should be disabled when the @shutdown() method is called.
+ *
+ * Locking: @port->lock taken.
+ * Interrupts: locally disabled.
+ * This call must not sleep
+ *
+ * @break_ctl: ``void ()(struct uart_port *port, int ctl)``
+ *
+ * Control the transmission of a break signal. If @ctl is nonzero, the
+ * break signal should be transmitted. The signal should be terminated
+ * when another call is made with a zero @ctl.
+ *
+ * Locking: caller holds tty_port->mutex
+ *
+ * @startup: ``int ()(struct uart_port *port)``
+ *
+ * Grab any interrupt resources and initialise any low level driver state.
+ * Enable the port for reception. It should not activate RTS nor DTR;
+ * this will be done via a separate call to @set_mctrl().
+ *
+ * This method will only be called when the port is initially opened.
+ *
+ * Locking: port_sem taken.
+ * Interrupts: globally disabled.
+ *
+ * @shutdown: ``void ()(struct uart_port *port)``
+ *
+ * Disable the @port, disable any break condition that may be in effect,
+ * and free any interrupt resources. It should not disable RTS nor DTR;
+ * this will have already been done via a separate call to @set_mctrl().
+ *
+ * Drivers must not access @port->state once this call has completed.
+ *
+ * This method will only be called when there are no more users of this
+ * @port.
+ *
+ * Locking: port_sem taken.
+ * Interrupts: caller dependent.
+ *
+ * @flush_buffer: ``void ()(struct uart_port *port)``
+ *
+ * Flush any write buffers, reset any DMA state and stop any ongoing DMA
+ * transfers.
+ *
+ * This will be called whenever the @port->state->xmit circular buffer is
+ * cleared.
+ *
+ * Locking: @port->lock taken.
+ * Interrupts: locally disabled.
+ * This call must not sleep
+ *
+ * @set_termios: ``void ()(struct uart_port *port, struct ktermios *new,
+ * struct ktermios *old)``
+ *
+ * Change the @port parameters, including word length, parity, stop bits.
+ * Update @port->read_status_mask and @port->ignore_status_mask to
+ * indicate the types of events we are interested in receiving. Relevant
+ * ktermios::c_cflag bits are:
+ *
+ * - %CSIZE - word size
+ * - %CSTOPB - 2 stop bits
+ * - %PARENB - parity enable
+ * - %PARODD - odd parity (when %PARENB is in force)
+ * - %ADDRB - address bit (changed through uart_port::rs485_config()).
+ * - %CREAD - enable reception of characters (if not set, still receive
+ * characters from the port, but throw them away).
+ * - %CRTSCTS - if set, enable CTS status change reporting.
+ * - %CLOCAL - if not set, enable modem status change reporting.
+ *
+ * Relevant ktermios::c_iflag bits are:
+ *
+ * - %INPCK - enable frame and parity error events to be passed to the TTY
+ * layer.
+ * - %BRKINT / %PARMRK - both of these enable break events to be passed to
+ * the TTY layer.
+ * - %IGNPAR - ignore parity and framing errors.
+ * - %IGNBRK - ignore break errors. If %IGNPAR is also set, ignore overrun
+ * errors as well.
+ *
+ * The interaction of the ktermios::c_iflag bits is as follows (parity
+ * error given as an example):
+ *
+ * ============ ======= ======= =========================================
+ * Parity error INPCK IGNPAR
+ * ============ ======= ======= =========================================
+ * n/a 0 n/a character received, marked as %TTY_NORMAL
+ * None 1 n/a character received, marked as %TTY_NORMAL
+ * Yes 1 0 character received, marked as %TTY_PARITY
+ * Yes 1 1 character discarded
+ * ============ ======= ======= =========================================
+ *
+ * Other flags may be used (eg, xon/xoff characters) if your hardware
+ * supports hardware "soft" flow control.
+ *
+ * Locking: caller holds tty_port->mutex
+ * Interrupts: caller dependent.
+ * This call must not sleep
+ *
+ * @set_ldisc: ``void ()(struct uart_port *port, struct ktermios *termios)``
+ *
+ * Notifier for discipline change. See
+ * Documentation/driver-api/tty/tty_ldisc.rst.
+ *
+ * Locking: caller holds tty_port->mutex
+ *
+ * @pm: ``void ()(struct uart_port *port, unsigned int state,
+ * unsigned int oldstate)``
+ *
+ * Perform any power management related activities on the specified @port.
+ * @state indicates the new state (defined by enum uart_pm_state),
+ * @oldstate indicates the previous state.
+ *
+ * This function should not be used to grab any resources.
+ *
+ * This will be called when the @port is initially opened and finally
+ * closed, except when the @port is also the system console. This will
+ * occur even if %CONFIG_PM is not set.
+ *
+ * Locking: none.
+ * Interrupts: caller dependent.
+ *
+ * @type: ``const char *()(struct uart_port *port)``
+ *
+ * Return a pointer to a string constant describing the specified @port,
+ * or return %NULL, in which case the string 'unknown' is substituted.
+ *
+ * Locking: none.
+ * Interrupts: caller dependent.
+ *
+ * @release_port: ``void ()(struct uart_port *port)``
+ *
+ * Release any memory and IO region resources currently in use by the
+ * @port.
+ *
+ * Locking: none.
+ * Interrupts: caller dependent.
+ *
+ * @request_port: ``int ()(struct uart_port *port)``
+ *
+ * Request any memory and IO region resources required by the port. If any
+ * fail, no resources should be registered when this function returns, and
+ * it should return -%EBUSY on failure.
+ *
+ * Locking: none.
+ * Interrupts: caller dependent.
+ *
+ * @config_port: ``void ()(struct uart_port *port, int type)``
+ *
+ * Perform any autoconfiguration steps required for the @port. @type
+ * contains a bit mask of the required configuration. %UART_CONFIG_TYPE
+ * indicates that the port requires detection and identification.
+ * @port->type should be set to the type found, or %PORT_UNKNOWN if no
+ * port was detected.
+ *
+ * %UART_CONFIG_IRQ indicates autoconfiguration of the interrupt signal,
+ * which should be probed using standard kernel autoprobing techniques.
+ * This is not necessary on platforms where ports have interrupts
+ * internally hard wired (eg, system on a chip implementations).
+ *
+ * Locking: none.
+ * Interrupts: caller dependent.
+ *
+ * @verify_port: ``int ()(struct uart_port *port,
+ * struct serial_struct *serinfo)``
+ *
+ * Verify the new serial port information contained within @serinfo is
+ * suitable for this port type.
+ *
+ * Locking: none.
+ * Interrupts: caller dependent.
+ *
+ * @ioctl: ``int ()(struct uart_port *port, unsigned int cmd,
+ * unsigned long arg)``
+ *
+ * Perform any port specific IOCTLs. IOCTL commands must be defined using
+ * the standard numbering system found in <asm/ioctl.h>.
+ *
+ * Locking: none.
+ * Interrupts: caller dependent.
+ *
+ * @poll_init: ``int ()(struct uart_port *port)``
+ *
+ * Called by kgdb to perform the minimal hardware initialization needed to
+ * support @poll_put_char() and @poll_get_char(). Unlike @startup(), this
+ * should not request interrupts.
+ *
+ * Locking: %tty_mutex and tty_port->mutex taken.
+ * Interrupts: n/a.
+ *
+ * @poll_put_char: ``void ()(struct uart_port *port, unsigned char ch)``
+ *
+ * Called by kgdb to write a single character @ch directly to the serial
+ * @port. It can and should block until there is space in the TX FIFO.
+ *
+ * Locking: none.
+ * Interrupts: caller dependent.
+ * This call must not sleep
+ *
+ * @poll_get_char: ``int ()(struct uart_port *port)``
+ *
+ * Called by kgdb to read a single character directly from the serial
+ * port. If data is available, it should be returned; otherwise the
+ * function should return %NO_POLL_CHAR immediately.
+ *
+ * Locking: none.
+ * Interrupts: caller dependent.
+ * This call must not sleep
*/
struct uart_ops {
unsigned int (*tx_empty)(struct uart_port *);
@@ -56,32 +380,19 @@ struct uart_ops {
void (*unthrottle)(struct uart_port *);
void (*send_xchar)(struct uart_port *, char ch);
void (*stop_rx)(struct uart_port *);
+ void (*start_rx)(struct uart_port *);
void (*enable_ms)(struct uart_port *);
void (*break_ctl)(struct uart_port *, int ctl);
int (*startup)(struct uart_port *);
void (*shutdown)(struct uart_port *);
void (*flush_buffer)(struct uart_port *);
void (*set_termios)(struct uart_port *, struct ktermios *new,
- struct ktermios *old);
+ const struct ktermios *old);
void (*set_ldisc)(struct uart_port *, struct ktermios *);
void (*pm)(struct uart_port *, unsigned int state,
unsigned int oldstate);
-
- /*
- * Return a string describing the type of the port
- */
const char *(*type)(struct uart_port *);
-
- /*
- * Release IO and memory resources used by the port.
- * This includes iounmap if necessary.
- */
void (*release_port)(struct uart_port *);
-
- /*
- * Request IO and memory resources used by the port.
- * This includes iomapping the port if necessary.
- */
int (*request_port)(struct uart_port *);
void (*config_port)(struct uart_port *, int);
int (*verify_port)(struct uart_port *, struct serial_struct *);
@@ -111,7 +422,7 @@ struct uart_icount {
__u32 buf_overrun;
};
-typedef unsigned int __bitwise upf_t;
+typedef u64 __bitwise upf_t;
typedef unsigned int __bitwise upstat_t;
struct uart_port {
@@ -122,11 +433,18 @@ struct uart_port {
void (*serial_out)(struct uart_port *, int, int);
void (*set_termios)(struct uart_port *,
struct ktermios *new,
- struct ktermios *old);
+ const struct ktermios *old);
void (*set_ldisc)(struct uart_port *,
struct ktermios *);
unsigned int (*get_mctrl)(struct uart_port *);
void (*set_mctrl)(struct uart_port *, unsigned int);
+ unsigned int (*get_divisor)(struct uart_port *,
+ unsigned int baud,
+ unsigned int *frac);
+ void (*set_divisor)(struct uart_port *,
+ unsigned int baud,
+ unsigned int quot,
+ unsigned int quot_frac);
int (*startup)(struct uart_port *port);
void (*shutdown)(struct uart_port *port);
void (*throttle)(struct uart_port *port);
@@ -136,7 +454,10 @@ struct uart_port {
unsigned int old);
void (*handle_break)(struct uart_port *);
int (*rs485_config)(struct uart_port *,
+ struct ktermios *termios,
struct serial_rs485 *rs485);
+ int (*iso7816_config)(struct uart_port *,
+ struct serial_iso7816 *iso7816);
unsigned int irq; /* irq number */
unsigned long irqflags; /* irq flags */
unsigned int uartclk; /* base uart clock */
@@ -144,7 +465,7 @@ struct uart_port {
unsigned char x_char; /* xon/xoff char */
unsigned char regshift; /* reg offset shift */
unsigned char iotype; /* io access style */
- unsigned char unused1;
+ unsigned char quirks; /* internal quirks */
#define UPIO_PORT (SERIAL_IO_PORT) /* 8b I/O port access */
#define UPIO_HUB6 (SERIAL_IO_HUB6) /* Hub6 ISA card */
@@ -155,16 +476,15 @@ struct uart_port {
#define UPIO_MEM32BE (SERIAL_IO_MEM32BE) /* 32b big endian */
#define UPIO_MEM16 (SERIAL_IO_MEM16) /* 16b little endian */
+ /* quirks must be updated while holding port mutex */
+#define UPQ_NO_TXEN_TEST BIT(0)
+
unsigned int read_status_mask; /* driver specific */
unsigned int ignore_status_mask; /* driver specific */
struct uart_state *state; /* pointer to parent state */
struct uart_icount icount; /* statistics */
struct console *cons; /* struct console, if any */
-#if defined(CONFIG_SERIAL_CORE_CONSOLE) || defined(SUPPORT_SYSRQ)
- unsigned long sysrq; /* sysrq timeout */
-#endif
-
/* flags must be updated while holding port mutex */
upf_t flags;
@@ -174,8 +494,7 @@ struct uart_port {
* assigned from the serial_struct flags in uart_set_info()
* [for bit definitions in the UPF_CHANGE_MASK]
*
- * Bits [0..UPF_LAST_USER] are userspace defined/visible/changeable
- * except bit 15 (UPF_NO_TXEN_TEST) which is masked off.
+ * Bits [0..ASYNCB_LAST_USER] are userspace defined/visible/changeable
* The remaining bits are serial-core specific and not modifiable by
* userspace.
*/
@@ -192,26 +511,26 @@ struct uart_port {
#define UPF_SPD_SHI ((__force upf_t) ASYNC_SPD_SHI /* 12 */ )
#define UPF_LOW_LATENCY ((__force upf_t) ASYNC_LOW_LATENCY /* 13 */ )
#define UPF_BUGGY_UART ((__force upf_t) ASYNC_BUGGY_UART /* 14 */ )
-#define UPF_NO_TXEN_TEST ((__force upf_t) (1 << 15))
#define UPF_MAGIC_MULTIPLIER ((__force upf_t) ASYNC_MAGIC_MULTIPLIER /* 16 */ )
-#define UPF_NO_THRE_TEST ((__force upf_t) (1 << 19))
+#define UPF_NO_THRE_TEST ((__force upf_t) BIT_ULL(19))
/* Port has hardware-assisted h/w flow control */
-#define UPF_AUTO_CTS ((__force upf_t) (1 << 20))
-#define UPF_AUTO_RTS ((__force upf_t) (1 << 21))
+#define UPF_AUTO_CTS ((__force upf_t) BIT_ULL(20))
+#define UPF_AUTO_RTS ((__force upf_t) BIT_ULL(21))
#define UPF_HARD_FLOW ((__force upf_t) (UPF_AUTO_CTS | UPF_AUTO_RTS))
/* Port has hardware-assisted s/w flow control */
-#define UPF_SOFT_FLOW ((__force upf_t) (1 << 22))
-#define UPF_CONS_FLOW ((__force upf_t) (1 << 23))
-#define UPF_SHARE_IRQ ((__force upf_t) (1 << 24))
-#define UPF_EXAR_EFR ((__force upf_t) (1 << 25))
-#define UPF_BUG_THRE ((__force upf_t) (1 << 26))
+#define UPF_SOFT_FLOW ((__force upf_t) BIT_ULL(22))
+#define UPF_CONS_FLOW ((__force upf_t) BIT_ULL(23))
+#define UPF_SHARE_IRQ ((__force upf_t) BIT_ULL(24))
+#define UPF_EXAR_EFR ((__force upf_t) BIT_ULL(25))
+#define UPF_BUG_THRE ((__force upf_t) BIT_ULL(26))
/* The exact UART type is known and should not be probed. */
-#define UPF_FIXED_TYPE ((__force upf_t) (1 << 27))
-#define UPF_BOOT_AUTOCONF ((__force upf_t) (1 << 28))
-#define UPF_FIXED_PORT ((__force upf_t) (1 << 29))
-#define UPF_DEAD ((__force upf_t) (1 << 30))
-#define UPF_IOREMAP ((__force upf_t) (1 << 31))
+#define UPF_FIXED_TYPE ((__force upf_t) BIT_ULL(27))
+#define UPF_BOOT_AUTOCONF ((__force upf_t) BIT_ULL(28))
+#define UPF_FIXED_PORT ((__force upf_t) BIT_ULL(29))
+#define UPF_DEAD ((__force upf_t) BIT_ULL(30))
+#define UPF_IOREMAP ((__force upf_t) BIT_ULL(31))
+#define UPF_FULL_PROBE ((__force upf_t) BIT_ULL(32))
#define __UPF_CHANGE_MASK 0x17fff
#define UPF_CHANGE_MASK ((__force upf_t) __UPF_CHANGE_MASK)
@@ -232,10 +551,11 @@ struct uart_port {
#define UPSTAT_AUTORTS ((__force upstat_t) (1 << 2))
#define UPSTAT_AUTOCTS ((__force upstat_t) (1 << 3))
#define UPSTAT_AUTOXOFF ((__force upstat_t) (1 << 4))
+#define UPSTAT_SYNC_FIFO ((__force upstat_t) (1 << 5))
- int hw_stopped; /* sw-assisted CTS flow state */
+ bool hw_stopped; /* sw-assisted CTS flow state */
unsigned int mctrl; /* current modem ctrl settings */
- unsigned int timeout; /* character-based timeout */
+ unsigned int frame_time; /* frame timing in ns */
unsigned int type; /* port type */
const struct uart_ops *ops;
unsigned int custom_divisor;
@@ -244,14 +564,23 @@ struct uart_port {
resource_size_t mapbase; /* for ioremap */
resource_size_t mapsize;
struct device *dev; /* parent device */
+
+ unsigned long sysrq; /* sysrq timeout */
+ unsigned int sysrq_ch; /* char for sysrq */
+ unsigned char has_sysrq;
+ unsigned char sysrq_seq; /* index in sysrq_toggle_seq */
+
unsigned char hub6; /* this should be in the 8250 driver */
unsigned char suspended;
- unsigned char irq_wake;
- unsigned char unused[2];
+ unsigned char console_reinit;
const char *name; /* port name */
struct attribute_group *attr_group; /* port specific attributes */
const struct attribute_group **tty_groups; /* all attributes (serial core use only) */
struct serial_rs485 rs485;
+ struct serial_rs485 rs485_supported; /* Supported mask for serial_rs485 */
+ struct gpio_desc *rs485_term_gpio; /* enable RS485 bus termination */
+ struct gpio_desc *rs485_rx_during_tx_gpio; /* Output GPIO that sets the state of RS485 RX during TX */
+ struct serial_iso7816 iso7816;
void *private_data; /* generic platform data pointer */
};
@@ -297,6 +626,23 @@ struct uart_state {
/* number of characters left in xmit buffer before we ask for more */
#define WAKEUP_CHARS 256
+/**
+ * uart_xmit_advance - Advance xmit buffer and account Tx'ed chars
+ * @up: uart_port structure describing the port
+ * @chars: number of characters sent
+ *
+ * This function advances the tail of circular xmit buffer by the number of
+ * @chars transmitted and handles accounting of transmitted bytes (into
+ * @up's icount.tx).
+ */
+static inline void uart_xmit_advance(struct uart_port *up, unsigned int chars)
+{
+ struct circ_buf *xmit = &up->state->xmit;
+
+ xmit->tail = (xmit->tail + chars) & (UART_XMIT_SIZE - 1);
+ up->icount.tx += chars;
+}
+
struct module;
struct tty_driver;
@@ -319,20 +665,113 @@ struct uart_driver {
void uart_write_wakeup(struct uart_port *port);
+#define __uart_port_tx(uport, ch, tx_ready, put_char, tx_done, for_test, \
+ for_post) \
+({ \
+ struct uart_port *__port = (uport); \
+ struct circ_buf *xmit = &__port->state->xmit; \
+ unsigned int pending; \
+ \
+ for (; (for_test) && (tx_ready); (for_post), __port->icount.tx++) { \
+ if (__port->x_char) { \
+ (ch) = __port->x_char; \
+ (put_char); \
+ __port->x_char = 0; \
+ continue; \
+ } \
+ \
+ if (uart_circ_empty(xmit) || uart_tx_stopped(__port)) \
+ break; \
+ \
+ (ch) = xmit->buf[xmit->tail]; \
+ (put_char); \
+ xmit->tail = (xmit->tail + 1) % UART_XMIT_SIZE; \
+ } \
+ \
+ (tx_done); \
+ \
+ pending = uart_circ_chars_pending(xmit); \
+ if (pending < WAKEUP_CHARS) { \
+ uart_write_wakeup(__port); \
+ \
+ if (pending == 0) \
+ __port->ops->stop_tx(__port); \
+ } \
+ \
+ pending; \
+})
+
+/**
+ * uart_port_tx_limited -- transmit helper for uart_port with count limiting
+ * @port: uart port
+ * @ch: variable to store a character to be written to the HW
+ * @count: a limit of characters to send
+ * @tx_ready: can HW accept more data function
+ * @put_char: function to write a character
+ * @tx_done: function to call after the loop is done
+ *
+ * This helper transmits characters from the xmit buffer to the hardware using
+ * @put_char(). It does so until @count characters are sent and while @tx_ready
+ * evaluates to true.
+ *
+ * Returns: the number of characters in the xmit buffer when done.
+ *
+ * The expression in macro parameters shall be designed as follows:
+ * * **tx_ready:** should evaluate to true if the HW can accept more data to
+ * be sent. This parameter can be %true, which means the HW is always ready.
+ * * **put_char:** shall write @ch to the device of @port.
+ * * **tx_done:** when the write loop is done, this can perform arbitrary
+ * action before potential invocation of ops->stop_tx() happens. If the
+ * driver does not need to do anything, use e.g. ({}).
+ *
+ * For all of them, @port->lock is held, interrupts are locally disabled and
+ * the expressions must not sleep.
+ */
+#define uart_port_tx_limited(port, ch, count, tx_ready, put_char, tx_done) ({ \
+ unsigned int __count = (count); \
+ __uart_port_tx(port, ch, tx_ready, put_char, tx_done, __count, \
+ __count--); \
+})
+
+/**
+ * uart_port_tx -- transmit helper for uart_port
+ * @port: uart port
+ * @ch: variable to store a character to be written to the HW
+ * @tx_ready: can HW accept more data function
+ * @put_char: function to write a character
+ *
+ * See uart_port_tx_limited() for more details.
+ */
+#define uart_port_tx(port, ch, tx_ready, put_char) \
+ __uart_port_tx(port, ch, tx_ready, put_char, ({}), true, ({}))
+
/*
* Baud rate helpers.
*/
void uart_update_timeout(struct uart_port *port, unsigned int cflag,
unsigned int baud);
unsigned int uart_get_baud_rate(struct uart_port *port, struct ktermios *termios,
- struct ktermios *old, unsigned int min,
+ const struct ktermios *old, unsigned int min,
unsigned int max);
unsigned int uart_get_divisor(struct uart_port *port, unsigned int baud);
+/*
+ * Calculates FIFO drain time.
+ */
+static inline unsigned long uart_fifo_timeout(struct uart_port *port)
+{
+ u64 fifo_timeout = (u64)READ_ONCE(port->frame_time) * port->fifosize;
+
+ /* Add .02 seconds of slop */
+ fifo_timeout += 20 * NSEC_PER_MSEC;
+
+ return max(nsecs_to_jiffies(fifo_timeout), 1UL);
+}
+
/* Base timer interval for polling */
static inline int uart_poll_timeout(struct uart_port *port)
{
- int timeout = port->timeout;
+ int timeout = uart_fifo_timeout(port);
return timeout > 6 ? (timeout / 2 - 2) : 1;
}
@@ -343,15 +782,16 @@ static inline int uart_poll_timeout(struct uart_port *port)
struct earlycon_device {
struct console *con;
struct uart_port port;
- char options[16]; /* e.g., 115200n8 */
+ char options[32]; /* e.g., 115200n8 */
unsigned int baud;
};
struct earlycon_id {
- char name[16];
+ char name[15];
+ char name_term; /* In case compiler didn't '\0' term name */
char compatible[128];
int (*setup)(struct earlycon_device *, const char *options);
-} __aligned(32);
+};
extern const struct earlycon_id __earlycon_table[];
extern const struct earlycon_id __earlycon_table_end[];
@@ -363,38 +803,49 @@ extern const struct earlycon_id __earlycon_table_end[];
#endif
#define OF_EARLYCON_DECLARE(_name, compat, fn) \
- static const struct earlycon_id __UNIQUE_ID(__earlycon_##_name) \
- EARLYCON_USED_OR_UNUSED __section(__earlycon_table) \
+ static const struct earlycon_id __UNIQUE_ID(__earlycon_##_name) \
+ EARLYCON_USED_OR_UNUSED __section("__earlycon_table") \
+ __aligned(__alignof__(struct earlycon_id)) \
= { .name = __stringify(_name), \
.compatible = compat, \
- .setup = fn }
+ .setup = fn }
#define EARLYCON_DECLARE(_name, fn) OF_EARLYCON_DECLARE(_name, "", fn)
-extern int of_setup_earlycon(const struct earlycon_id *match,
- unsigned long node,
- const char *options);
+int of_setup_earlycon(const struct earlycon_id *match, unsigned long node,
+ const char *options);
#ifdef CONFIG_SERIAL_EARLYCON
-extern bool earlycon_init_is_deferred __initdata;
+extern bool earlycon_acpi_spcr_enable __initdata;
int setup_earlycon(char *buf);
#else
-static const bool earlycon_init_is_deferred;
+static const bool earlycon_acpi_spcr_enable EARLYCON_USED_OR_UNUSED;
static inline int setup_earlycon(char *buf) { return 0; }
#endif
+/* Variant of uart_console_registered() when the console_list_lock is held. */
+static inline bool uart_console_registered_locked(struct uart_port *port)
+{
+ return uart_console(port) && console_is_registered_locked(port->cons);
+}
+
+static inline bool uart_console_registered(struct uart_port *port)
+{
+ return uart_console(port) && console_is_registered(port->cons);
+}
+
struct uart_port *uart_get_console(struct uart_port *ports, int nr,
struct console *c);
int uart_parse_earlycon(char *p, unsigned char *iotype, resource_size_t *addr,
char **options);
-void uart_parse_options(char *options, int *baud, int *parity, int *bits,
+void uart_parse_options(const char *options, int *baud, int *parity, int *bits,
int *flow);
int uart_set_options(struct uart_port *port, struct console *co, int baud,
int parity, int bits, int flow);
struct tty_driver *uart_console_device(struct console *co, int *index);
void uart_console_write(struct uart_port *port, const char *s,
unsigned int count,
- void (*putchar)(struct uart_port *, int));
+ void (*putchar)(struct uart_port *, unsigned char));
/*
* Port/driver registration/removal
@@ -403,7 +854,8 @@ int uart_register_driver(struct uart_driver *uart);
void uart_unregister_driver(struct uart_driver *uart);
int uart_add_one_port(struct uart_driver *reg, struct uart_port *port);
int uart_remove_one_port(struct uart_driver *reg, struct uart_port *port);
-int uart_match_port(struct uart_port *port1, struct uart_port *port2);
+bool uart_match_port(const struct uart_port *port1,
+ const struct uart_port *port2);
/*
* Power Management
@@ -423,7 +875,7 @@ int uart_resume_port(struct uart_driver *reg, struct uart_port *port);
static inline int uart_tx_stopped(struct uart_port *port)
{
struct tty_struct *tty = port->state->port.tty;
- if ((tty && tty->stopped) || port->hw_stopped)
+ if ((tty && tty->flow.stopped) || port->hw_stopped)
return 1;
return 0;
}
@@ -444,31 +896,112 @@ static inline bool uart_softcts_mode(struct uart_port *uport)
* The following are helper functions for the low level drivers.
*/
-extern void uart_handle_dcd_change(struct uart_port *uport,
- unsigned int status);
-extern void uart_handle_cts_change(struct uart_port *uport,
- unsigned int status);
+void uart_handle_dcd_change(struct uart_port *uport, bool active);
+void uart_handle_cts_change(struct uart_port *uport, bool active);
+
+void uart_insert_char(struct uart_port *port, unsigned int status,
+ unsigned int overrun, unsigned int ch, unsigned int flag);
+
+void uart_xchar_out(struct uart_port *uport, int offset);
-extern void uart_insert_char(struct uart_port *port, unsigned int status,
- unsigned int overrun, unsigned int ch, unsigned int flag);
+#ifdef CONFIG_MAGIC_SYSRQ_SERIAL
+#define SYSRQ_TIMEOUT (HZ * 5)
-#if defined(SUPPORT_SYSRQ) && defined(CONFIG_MAGIC_SYSRQ_SERIAL)
-static inline int
-uart_handle_sysrq_char(struct uart_port *port, unsigned int ch)
+bool uart_try_toggle_sysrq(struct uart_port *port, unsigned int ch);
+
+static inline int uart_handle_sysrq_char(struct uart_port *port, unsigned int ch)
{
- if (port->sysrq) {
- if (ch && time_before(jiffies, port->sysrq)) {
+ if (!port->sysrq)
+ return 0;
+
+ if (ch && time_before(jiffies, port->sysrq)) {
+ if (sysrq_mask()) {
handle_sysrq(ch);
port->sysrq = 0;
return 1;
}
- port->sysrq = 0;
+ if (uart_try_toggle_sysrq(port, ch))
+ return 1;
}
+ port->sysrq = 0;
+
return 0;
}
-#else
-#define uart_handle_sysrq_char(port,ch) ({ (void)port; 0; })
-#endif
+
+static inline int uart_prepare_sysrq_char(struct uart_port *port, unsigned int ch)
+{
+ if (!port->sysrq)
+ return 0;
+
+ if (ch && time_before(jiffies, port->sysrq)) {
+ if (sysrq_mask()) {
+ port->sysrq_ch = ch;
+ port->sysrq = 0;
+ return 1;
+ }
+ if (uart_try_toggle_sysrq(port, ch))
+ return 1;
+ }
+ port->sysrq = 0;
+
+ return 0;
+}
+
+static inline void uart_unlock_and_check_sysrq(struct uart_port *port)
+{
+ int sysrq_ch;
+
+ if (!port->has_sysrq) {
+ spin_unlock(&port->lock);
+ return;
+ }
+
+ sysrq_ch = port->sysrq_ch;
+ port->sysrq_ch = 0;
+
+ spin_unlock(&port->lock);
+
+ if (sysrq_ch)
+ handle_sysrq(sysrq_ch);
+}
+
+static inline void uart_unlock_and_check_sysrq_irqrestore(struct uart_port *port,
+ unsigned long flags)
+{
+ int sysrq_ch;
+
+ if (!port->has_sysrq) {
+ spin_unlock_irqrestore(&port->lock, flags);
+ return;
+ }
+
+ sysrq_ch = port->sysrq_ch;
+ port->sysrq_ch = 0;
+
+ spin_unlock_irqrestore(&port->lock, flags);
+
+ if (sysrq_ch)
+ handle_sysrq(sysrq_ch);
+}
+#else /* CONFIG_MAGIC_SYSRQ_SERIAL */
+static inline int uart_handle_sysrq_char(struct uart_port *port, unsigned int ch)
+{
+ return 0;
+}
+static inline int uart_prepare_sysrq_char(struct uart_port *port, unsigned int ch)
+{
+ return 0;
+}
+static inline void uart_unlock_and_check_sysrq(struct uart_port *port)
+{
+ spin_unlock(&port->lock);
+}
+static inline void uart_unlock_and_check_sysrq_irqrestore(struct uart_port *port,
+ unsigned long flags)
+{
+ spin_unlock_irqrestore(&port->lock, flags);
+}
+#endif /* CONFIG_MAGIC_SYSRQ_SERIAL */
/*
* We do the SysRQ and SAK checking like this...
@@ -480,10 +1013,10 @@ static inline int uart_handle_break(struct uart_port *port)
if (port->handle_break)
port->handle_break(port);
-#ifdef SUPPORT_SYSRQ
- if (port->cons && port->cons->index == port->line) {
+#ifdef CONFIG_MAGIC_SYSRQ_SERIAL
+ if (port->has_sysrq && uart_console(port)) {
if (!port->sysrq) {
- port->sysrq = jiffies + HZ*5;
+ port->sysrq = jiffies + SYSRQ_TIMEOUT;
return 1;
}
port->sysrq = 0;
@@ -501,4 +1034,5 @@ static inline int uart_handle_break(struct uart_port *port)
(cflag) & CRTSCTS || \
!((cflag) & CLOCAL))
+int uart_get_rs485_mode(struct uart_port *port);
#endif /* LINUX_SERIAL_CORE_H */
diff --git a/include/linux/serial_max3100.h b/include/linux/serial_max3100.h
index 4976befb6aeb..befd55c08a7c 100644
--- a/include/linux/serial_max3100.h
+++ b/include/linux/serial_max3100.h
@@ -1,11 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
*
* Copyright (C) 2007 Christian Pellegrin
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
diff --git a/include/linux/serial_pnx8xxx.h b/include/linux/serial_pnx8xxx.h
deleted file mode 100644
index 79ad87b0be3e..000000000000
--- a/include/linux/serial_pnx8xxx.h
+++ /dev/null
@@ -1,80 +0,0 @@
-/*
- * Embedded Alley Solutions, source@embeddedalley.com.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-
-#ifndef _LINUX_SERIAL_PNX8XXX_H
-#define _LINUX_SERIAL_PNX8XXX_H
-
-#include <linux/serial_core.h>
-
-#define PNX8XXX_NR_PORTS 2
-
-struct pnx8xxx_port {
- struct uart_port port;
- struct timer_list timer;
- unsigned int old_status;
-};
-
-/* register offsets */
-#define PNX8XXX_LCR 0
-#define PNX8XXX_MCR 0x004
-#define PNX8XXX_BAUD 0x008
-#define PNX8XXX_CFG 0x00c
-#define PNX8XXX_FIFO 0x028
-#define PNX8XXX_ISTAT 0xfe0
-#define PNX8XXX_IEN 0xfe4
-#define PNX8XXX_ICLR 0xfe8
-#define PNX8XXX_ISET 0xfec
-#define PNX8XXX_PD 0xff4
-#define PNX8XXX_MID 0xffc
-
-#define PNX8XXX_UART_LCR_TXBREAK (1<<30)
-#define PNX8XXX_UART_LCR_PAREVN 0x10000000
-#define PNX8XXX_UART_LCR_PAREN 0x08000000
-#define PNX8XXX_UART_LCR_2STOPB 0x04000000
-#define PNX8XXX_UART_LCR_8BIT 0x01000000
-#define PNX8XXX_UART_LCR_TX_RST 0x00040000
-#define PNX8XXX_UART_LCR_RX_RST 0x00020000
-#define PNX8XXX_UART_LCR_RX_NEXT 0x00010000
-
-#define PNX8XXX_UART_MCR_SCR 0xFF000000
-#define PNX8XXX_UART_MCR_DCD 0x00800000
-#define PNX8XXX_UART_MCR_CTS 0x00100000
-#define PNX8XXX_UART_MCR_LOOP 0x00000010
-#define PNX8XXX_UART_MCR_RTS 0x00000002
-#define PNX8XXX_UART_MCR_DTR 0x00000001
-
-#define PNX8XXX_UART_INT_TX 0x00000080
-#define PNX8XXX_UART_INT_EMPTY 0x00000040
-#define PNX8XXX_UART_INT_RCVTO 0x00000020
-#define PNX8XXX_UART_INT_RX 0x00000010
-#define PNX8XXX_UART_INT_RXOVRN 0x00000008
-#define PNX8XXX_UART_INT_FRERR 0x00000004
-#define PNX8XXX_UART_INT_BREAK 0x00000002
-#define PNX8XXX_UART_INT_PARITY 0x00000001
-#define PNX8XXX_UART_INT_ALLRX 0x0000003F
-#define PNX8XXX_UART_INT_ALLTX 0x000000C0
-
-#define PNX8XXX_UART_FIFO_TXFIFO 0x001F0000
-#define PNX8XXX_UART_FIFO_TXFIFO_STA (0x1f<<16)
-#define PNX8XXX_UART_FIFO_RXBRK 0x00008000
-#define PNX8XXX_UART_FIFO_RXFE 0x00004000
-#define PNX8XXX_UART_FIFO_RXPAR 0x00002000
-#define PNX8XXX_UART_FIFO_RXFIFO 0x00001F00
-#define PNX8XXX_UART_FIFO_RBRTHR 0x000000FF
-
-#endif
diff --git a/include/linux/serial_s3c.h b/include/linux/serial_s3c.h
index a7f004a3c177..1672cf0810ef 100644
--- a/include/linux/serial_s3c.h
+++ b/include/linux/serial_s3c.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* Internal header file for Samsung S3C2410 serial ports (UART0-2)
*
@@ -10,21 +11,7 @@
* Internal header file for MX1ADS serial ports (UART1 & 2)
*
* Copyright (C) 2002 Shane Nay (shane@minirl.com)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-*/
+ */
#ifndef __ASM_ARM_REGS_SERIAL_H
#define __ASM_ARM_REGS_SERIAL_H
@@ -96,7 +83,7 @@
#define S3C2410_UCON_RXIRQMODE (1<<0)
#define S3C2410_UCON_RXFIFO_TOI (1<<7)
#define S3C2443_UCON_RXERR_IRQEN (1<<6)
-#define S3C2443_UCON_LOOPBACK (1<<5)
+#define S3C2410_UCON_LOOPBACK (1<<5)
#define S3C2410_UCON_DEFAULT (S3C2410_UCON_TXILEVEL | \
S3C2410_UCON_RXILEVEL | \
@@ -259,6 +246,25 @@
S5PV210_UFCON_TXTRIG4 | \
S5PV210_UFCON_RXTRIG4)
+#define APPLE_S5L_UCON_RXTO_ENA 9
+#define APPLE_S5L_UCON_RXTHRESH_ENA 12
+#define APPLE_S5L_UCON_TXTHRESH_ENA 13
+#define APPLE_S5L_UCON_RXTO_ENA_MSK (1 << APPLE_S5L_UCON_RXTO_ENA)
+#define APPLE_S5L_UCON_RXTHRESH_ENA_MSK (1 << APPLE_S5L_UCON_RXTHRESH_ENA)
+#define APPLE_S5L_UCON_TXTHRESH_ENA_MSK (1 << APPLE_S5L_UCON_TXTHRESH_ENA)
+
+#define APPLE_S5L_UCON_DEFAULT (S3C2410_UCON_TXIRQMODE | \
+ S3C2410_UCON_RXIRQMODE | \
+ S3C2410_UCON_RXFIFO_TOI)
+#define APPLE_S5L_UCON_MASK (APPLE_S5L_UCON_RXTO_ENA_MSK | \
+ APPLE_S5L_UCON_RXTHRESH_ENA_MSK | \
+ APPLE_S5L_UCON_TXTHRESH_ENA_MSK)
+
+#define APPLE_S5L_UTRSTAT_RXTHRESH (1<<4)
+#define APPLE_S5L_UTRSTAT_TXTHRESH (1<<5)
+#define APPLE_S5L_UTRSTAT_RXTO (1<<9)
+#define APPLE_S5L_UTRSTAT_ALL_FLAGS (0x3f0)
+
#ifndef __ASSEMBLY__
#include <linux/serial_core.h>
@@ -267,7 +273,7 @@
* serial port
*
* the pointer is setup by the machine specific initialisation from the
- * arch/arm/mach-s3c2410/ directory.
+ * arch/arm/mach-s3c/ directory.
*/
struct s3c2410_uartcfg {
diff --git a/include/linux/serial_sci.h b/include/linux/serial_sci.h
index e598eaef3962..1c89611e0e06 100644
--- a/include/linux/serial_sci.h
+++ b/include/linux/serial_sci.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __LINUX_SERIAL_SCI_H
#define __LINUX_SERIAL_SCI_H
@@ -35,6 +36,7 @@ enum {
SCIx_SH4_SCIF_FIFODATA_REGTYPE,
SCIx_SH7705_SCIF_REGTYPE,
SCIx_HSCIF_REGTYPE,
+ SCIx_RZ_SCIFA_REGTYPE,
SCIx_NR_REGTYPES,
};
diff --git a/include/linux/serio.h b/include/linux/serio.h
index 138a5efe863a..6c27d413da92 100644
--- a/include/linux/serio.h
+++ b/include/linux/serio.h
@@ -1,9 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 1999-2002 Vojtech Pavlik
-*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
*/
#ifndef _SERIO_H
#define _SERIO_H
diff --git a/include/linux/set_memory.h b/include/linux/set_memory.h
index e5140648f638..95ac8398ee72 100644
--- a/include/linux/set_memory.h
+++ b/include/linux/set_memory.h
@@ -1,9 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright 2017, Michael Ellerman, IBM Corporation.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation;
*/
#ifndef _LINUX_SET_MEMORY_H_
#define _LINUX_SET_MEMORY_H_
@@ -17,4 +14,68 @@ static inline int set_memory_x(unsigned long addr, int numpages) { return 0; }
static inline int set_memory_nx(unsigned long addr, int numpages) { return 0; }
#endif
+#ifndef set_memory_rox
+static inline int set_memory_rox(unsigned long addr, int numpages)
+{
+ int ret = set_memory_ro(addr, numpages);
+ if (ret)
+ return ret;
+ return set_memory_x(addr, numpages);
+}
+#endif
+
+#ifndef CONFIG_ARCH_HAS_SET_DIRECT_MAP
+static inline int set_direct_map_invalid_noflush(struct page *page)
+{
+ return 0;
+}
+static inline int set_direct_map_default_noflush(struct page *page)
+{
+ return 0;
+}
+
+static inline bool kernel_page_present(struct page *page)
+{
+ return true;
+}
+#else /* CONFIG_ARCH_HAS_SET_DIRECT_MAP */
+/*
+ * Some architectures, e.g. ARM64 can disable direct map modifications at
+ * boot time. Let them overrive this query.
+ */
+#ifndef can_set_direct_map
+static inline bool can_set_direct_map(void)
+{
+ return true;
+}
+#define can_set_direct_map can_set_direct_map
+#endif
+#endif /* CONFIG_ARCH_HAS_SET_DIRECT_MAP */
+
+#ifdef CONFIG_X86_64
+int set_mce_nospec(unsigned long pfn);
+int clear_mce_nospec(unsigned long pfn);
+#else
+static inline int set_mce_nospec(unsigned long pfn)
+{
+ return 0;
+}
+static inline int clear_mce_nospec(unsigned long pfn)
+{
+ return 0;
+}
+#endif
+
+#ifndef CONFIG_ARCH_HAS_MEM_ENCRYPT
+static inline int set_memory_encrypted(unsigned long addr, int numpages)
+{
+ return 0;
+}
+
+static inline int set_memory_decrypted(unsigned long addr, int numpages)
+{
+ return 0;
+}
+#endif /* CONFIG_ARCH_HAS_MEM_ENCRYPT */
+
#endif /* _LINUX_SET_MEMORY_H_ */
diff --git a/include/linux/sfi.h b/include/linux/sfi.h
deleted file mode 100644
index e0e1597ef9e6..000000000000
--- a/include/linux/sfi.h
+++ /dev/null
@@ -1,210 +0,0 @@
-/* sfi.h Simple Firmware Interface */
-
-/*
-
- This file is provided under a dual BSD/GPLv2 license. When using or
- redistributing this file, you may do so under either license.
-
- GPL LICENSE SUMMARY
-
- Copyright(c) 2009 Intel Corporation. All rights reserved.
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of version 2 of the GNU General Public License as
- published by the Free Software Foundation.
-
- This program is distributed in the hope that it will be useful, but
- WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
- The full GNU General Public License is included in this distribution
- in the file called LICENSE.GPL.
-
- BSD LICENSE
-
- Copyright(c) 2009 Intel Corporation. All rights reserved.
-
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
-
- * Redistributions of source code must retain the above copyright
- notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in
- the documentation and/or other materials provided with the
- distribution.
- * Neither the name of Intel Corporation nor the names of its
- contributors may be used to endorse or promote products derived
- from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-*/
-
-#ifndef _LINUX_SFI_H
-#define _LINUX_SFI_H
-
-#include <linux/init.h>
-#include <linux/types.h>
-
-/* Table signatures reserved by the SFI specification */
-#define SFI_SIG_SYST "SYST"
-#define SFI_SIG_FREQ "FREQ"
-#define SFI_SIG_IDLE "IDLE"
-#define SFI_SIG_CPUS "CPUS"
-#define SFI_SIG_MTMR "MTMR"
-#define SFI_SIG_MRTC "MRTC"
-#define SFI_SIG_MMAP "MMAP"
-#define SFI_SIG_APIC "APIC"
-#define SFI_SIG_XSDT "XSDT"
-#define SFI_SIG_WAKE "WAKE"
-#define SFI_SIG_DEVS "DEVS"
-#define SFI_SIG_GPIO "GPIO"
-
-#define SFI_SIGNATURE_SIZE 4
-#define SFI_OEM_ID_SIZE 6
-#define SFI_OEM_TABLE_ID_SIZE 8
-
-#define SFI_NAME_LEN 16
-
-#define SFI_SYST_SEARCH_BEGIN 0x000E0000
-#define SFI_SYST_SEARCH_END 0x000FFFFF
-
-#define SFI_GET_NUM_ENTRIES(ptable, entry_type) \
- ((ptable->header.len - sizeof(struct sfi_table_header)) / \
- (sizeof(entry_type)))
-/*
- * Table structures must be byte-packed to match the SFI specification,
- * as they are provided by the BIOS.
- */
-struct sfi_table_header {
- char sig[SFI_SIGNATURE_SIZE];
- u32 len;
- u8 rev;
- u8 csum;
- char oem_id[SFI_OEM_ID_SIZE];
- char oem_table_id[SFI_OEM_TABLE_ID_SIZE];
-} __packed;
-
-struct sfi_table_simple {
- struct sfi_table_header header;
- u64 pentry[1];
-} __packed;
-
-/* Comply with UEFI spec 2.1 */
-struct sfi_mem_entry {
- u32 type;
- u64 phys_start;
- u64 virt_start;
- u64 pages;
- u64 attrib;
-} __packed;
-
-struct sfi_cpu_table_entry {
- u32 apic_id;
-} __packed;
-
-struct sfi_cstate_table_entry {
- u32 hint; /* MWAIT hint */
- u32 latency; /* latency in ms */
-} __packed;
-
-struct sfi_apic_table_entry {
- u64 phys_addr; /* phy base addr for APIC reg */
-} __packed;
-
-struct sfi_freq_table_entry {
- u32 freq_mhz; /* in MHZ */
- u32 latency; /* transition latency in ms */
- u32 ctrl_val; /* value to write to PERF_CTL */
-} __packed;
-
-struct sfi_wake_table_entry {
- u64 phys_addr; /* pointer to where the wake vector locates */
-} __packed;
-
-struct sfi_timer_table_entry {
- u64 phys_addr; /* phy base addr for the timer */
- u32 freq_hz; /* in HZ */
- u32 irq;
-} __packed;
-
-struct sfi_rtc_table_entry {
- u64 phys_addr; /* phy base addr for the RTC */
- u32 irq;
-} __packed;
-
-struct sfi_device_table_entry {
- u8 type; /* bus type, I2C, SPI or ...*/
-#define SFI_DEV_TYPE_SPI 0
-#define SFI_DEV_TYPE_I2C 1
-#define SFI_DEV_TYPE_UART 2
-#define SFI_DEV_TYPE_HSI 3
-#define SFI_DEV_TYPE_IPC 4
-#define SFI_DEV_TYPE_SD 5
-
- u8 host_num; /* attached to host 0, 1...*/
- u16 addr;
- u8 irq;
- u32 max_freq;
- char name[SFI_NAME_LEN];
-} __packed;
-
-struct sfi_gpio_table_entry {
- char controller_name[SFI_NAME_LEN];
- u16 pin_no;
- char pin_name[SFI_NAME_LEN];
-} __packed;
-
-typedef int (*sfi_table_handler) (struct sfi_table_header *table);
-
-#ifdef CONFIG_SFI
-extern void __init sfi_init(void);
-extern int __init sfi_platform_init(void);
-extern void __init sfi_init_late(void);
-extern int sfi_table_parse(char *signature, char *oem_id, char *oem_table_id,
- sfi_table_handler handler);
-
-extern int sfi_disabled;
-static inline void disable_sfi(void)
-{
- sfi_disabled = 1;
-}
-
-#else /* !CONFIG_SFI */
-
-static inline void sfi_init(void)
-{
-}
-
-static inline void sfi_init_late(void)
-{
-}
-
-#define sfi_disabled 0
-
-static inline int sfi_table_parse(char *signature, char *oem_id,
- char *oem_table_id,
- sfi_table_handler handler)
-{
- return -1;
-}
-
-#endif /* !CONFIG_SFI */
-
-#endif /*_LINUX_SFI_H*/
diff --git a/include/linux/sfi_acpi.h b/include/linux/sfi_acpi.h
deleted file mode 100644
index a6e555cbe05c..000000000000
--- a/include/linux/sfi_acpi.h
+++ /dev/null
@@ -1,93 +0,0 @@
-/* sfi.h Simple Firmware Interface */
-
-/*
-
- This file is provided under a dual BSD/GPLv2 license. When using or
- redistributing this file, you may do so under either license.
-
- GPL LICENSE SUMMARY
-
- Copyright(c) 2009 Intel Corporation. All rights reserved.
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of version 2 of the GNU General Public License as
- published by the Free Software Foundation.
-
- This program is distributed in the hope that it will be useful, but
- WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
- The full GNU General Public License is included in this distribution
- in the file called LICENSE.GPL.
-
- BSD LICENSE
-
- Copyright(c) 2009 Intel Corporation. All rights reserved.
-
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
-
- * Redistributions of source code must retain the above copyright
- notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in
- the documentation and/or other materials provided with the
- distribution.
- * Neither the name of Intel Corporation nor the names of its
- contributors may be used to endorse or promote products derived
- from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-*/
-
-#ifndef _LINUX_SFI_ACPI_H
-#define _LINUX_SFI_ACPI_H
-
-#include <linux/acpi.h>
-#include <linux/sfi.h>
-
-#ifdef CONFIG_SFI
-extern int sfi_acpi_table_parse(char *signature, char *oem_id,
- char *oem_table_id,
- int (*handler)(struct acpi_table_header *));
-
-static inline int __init acpi_sfi_table_parse(char *signature,
- int (*handler)(struct acpi_table_header *))
-{
- if (!acpi_table_parse(signature, handler))
- return 0;
-
- return sfi_acpi_table_parse(signature, NULL, NULL, handler);
-}
-#else /* !CONFIG_SFI */
-static inline int sfi_acpi_table_parse(char *signature, char *oem_id,
- char *oem_table_id,
- int (*handler)(struct acpi_table_header *))
-{
- return -1;
-}
-
-static inline int __init acpi_sfi_table_parse(char *signature,
- int (*handler)(struct acpi_table_header *))
-{
- return acpi_table_parse(signature, handler);
-}
-#endif /* !CONFIG_SFI */
-
-#endif /*_LINUX_SFI_ACPI_H*/
diff --git a/include/linux/sfp.h b/include/linux/sfp.h
new file mode 100644
index 000000000000..ef06a195b3c2
--- /dev/null
+++ b/include/linux/sfp.h
@@ -0,0 +1,639 @@
+#ifndef LINUX_SFP_H
+#define LINUX_SFP_H
+
+#include <linux/phy.h>
+
+struct sfp_eeprom_base {
+ u8 phys_id;
+ u8 phys_ext_id;
+ u8 connector;
+#if defined __BIG_ENDIAN_BITFIELD
+ u8 e10g_base_er:1;
+ u8 e10g_base_lrm:1;
+ u8 e10g_base_lr:1;
+ u8 e10g_base_sr:1;
+ u8 if_1x_sx:1;
+ u8 if_1x_lx:1;
+ u8 if_1x_copper_active:1;
+ u8 if_1x_copper_passive:1;
+
+ u8 escon_mmf_1310_led:1;
+ u8 escon_smf_1310_laser:1;
+ u8 sonet_oc192_short_reach:1;
+ u8 sonet_reach_bit1:1;
+ u8 sonet_reach_bit2:1;
+ u8 sonet_oc48_long_reach:1;
+ u8 sonet_oc48_intermediate_reach:1;
+ u8 sonet_oc48_short_reach:1;
+
+ u8 unallocated_5_7:1;
+ u8 sonet_oc12_smf_long_reach:1;
+ u8 sonet_oc12_smf_intermediate_reach:1;
+ u8 sonet_oc12_short_reach:1;
+ u8 unallocated_5_3:1;
+ u8 sonet_oc3_smf_long_reach:1;
+ u8 sonet_oc3_smf_intermediate_reach:1;
+ u8 sonet_oc3_short_reach:1;
+
+ u8 e_base_px:1;
+ u8 e_base_bx10:1;
+ u8 e100_base_fx:1;
+ u8 e100_base_lx:1;
+ u8 e1000_base_t:1;
+ u8 e1000_base_cx:1;
+ u8 e1000_base_lx:1;
+ u8 e1000_base_sx:1;
+
+ u8 fc_ll_v:1;
+ u8 fc_ll_s:1;
+ u8 fc_ll_i:1;
+ u8 fc_ll_l:1;
+ u8 fc_ll_m:1;
+ u8 fc_tech_sa:1;
+ u8 fc_tech_lc:1;
+ u8 fc_tech_electrical_inter_enclosure:1;
+
+ u8 fc_tech_electrical_intra_enclosure:1;
+ u8 fc_tech_sn:1;
+ u8 fc_tech_sl:1;
+ u8 fc_tech_ll:1;
+ u8 sfp_ct_active:1;
+ u8 sfp_ct_passive:1;
+ u8 unallocated_8_1:1;
+ u8 unallocated_8_0:1;
+
+ u8 fc_media_tw:1;
+ u8 fc_media_tp:1;
+ u8 fc_media_mi:1;
+ u8 fc_media_tv:1;
+ u8 fc_media_m6:1;
+ u8 fc_media_m5:1;
+ u8 unallocated_9_1:1;
+ u8 fc_media_sm:1;
+
+ u8 fc_speed_1200:1;
+ u8 fc_speed_800:1;
+ u8 fc_speed_1600:1;
+ u8 fc_speed_400:1;
+ u8 fc_speed_3200:1;
+ u8 fc_speed_200:1;
+ u8 unallocated_10_1:1;
+ u8 fc_speed_100:1;
+#elif defined __LITTLE_ENDIAN_BITFIELD
+ u8 if_1x_copper_passive:1;
+ u8 if_1x_copper_active:1;
+ u8 if_1x_lx:1;
+ u8 if_1x_sx:1;
+ u8 e10g_base_sr:1;
+ u8 e10g_base_lr:1;
+ u8 e10g_base_lrm:1;
+ u8 e10g_base_er:1;
+
+ u8 sonet_oc3_short_reach:1;
+ u8 sonet_oc3_smf_intermediate_reach:1;
+ u8 sonet_oc3_smf_long_reach:1;
+ u8 unallocated_5_3:1;
+ u8 sonet_oc12_short_reach:1;
+ u8 sonet_oc12_smf_intermediate_reach:1;
+ u8 sonet_oc12_smf_long_reach:1;
+ u8 unallocated_5_7:1;
+
+ u8 sonet_oc48_short_reach:1;
+ u8 sonet_oc48_intermediate_reach:1;
+ u8 sonet_oc48_long_reach:1;
+ u8 sonet_reach_bit2:1;
+ u8 sonet_reach_bit1:1;
+ u8 sonet_oc192_short_reach:1;
+ u8 escon_smf_1310_laser:1;
+ u8 escon_mmf_1310_led:1;
+
+ u8 e1000_base_sx:1;
+ u8 e1000_base_lx:1;
+ u8 e1000_base_cx:1;
+ u8 e1000_base_t:1;
+ u8 e100_base_lx:1;
+ u8 e100_base_fx:1;
+ u8 e_base_bx10:1;
+ u8 e_base_px:1;
+
+ u8 fc_tech_electrical_inter_enclosure:1;
+ u8 fc_tech_lc:1;
+ u8 fc_tech_sa:1;
+ u8 fc_ll_m:1;
+ u8 fc_ll_l:1;
+ u8 fc_ll_i:1;
+ u8 fc_ll_s:1;
+ u8 fc_ll_v:1;
+
+ u8 unallocated_8_0:1;
+ u8 unallocated_8_1:1;
+ u8 sfp_ct_passive:1;
+ u8 sfp_ct_active:1;
+ u8 fc_tech_ll:1;
+ u8 fc_tech_sl:1;
+ u8 fc_tech_sn:1;
+ u8 fc_tech_electrical_intra_enclosure:1;
+
+ u8 fc_media_sm:1;
+ u8 unallocated_9_1:1;
+ u8 fc_media_m5:1;
+ u8 fc_media_m6:1;
+ u8 fc_media_tv:1;
+ u8 fc_media_mi:1;
+ u8 fc_media_tp:1;
+ u8 fc_media_tw:1;
+
+ u8 fc_speed_100:1;
+ u8 unallocated_10_1:1;
+ u8 fc_speed_200:1;
+ u8 fc_speed_3200:1;
+ u8 fc_speed_400:1;
+ u8 fc_speed_1600:1;
+ u8 fc_speed_800:1;
+ u8 fc_speed_1200:1;
+#else
+#error Unknown Endian
+#endif
+ u8 encoding;
+ u8 br_nominal;
+ u8 rate_id;
+ u8 link_len[6];
+ char vendor_name[16];
+ u8 extended_cc;
+ char vendor_oui[3];
+ char vendor_pn[16];
+ char vendor_rev[4];
+ union {
+ __be16 optical_wavelength;
+ __be16 cable_compliance;
+ struct {
+#if defined __BIG_ENDIAN_BITFIELD
+ u8 reserved60_2:6;
+ u8 fc_pi_4_app_h:1;
+ u8 sff8431_app_e:1;
+ u8 reserved61:8;
+#elif defined __LITTLE_ENDIAN_BITFIELD
+ u8 sff8431_app_e:1;
+ u8 fc_pi_4_app_h:1;
+ u8 reserved60_2:6;
+ u8 reserved61:8;
+#else
+#error Unknown Endian
+#endif
+ } __packed passive;
+ struct {
+#if defined __BIG_ENDIAN_BITFIELD
+ u8 reserved60_4:4;
+ u8 fc_pi_4_lim:1;
+ u8 sff8431_lim:1;
+ u8 fc_pi_4_app_h:1;
+ u8 sff8431_app_e:1;
+ u8 reserved61:8;
+#elif defined __LITTLE_ENDIAN_BITFIELD
+ u8 sff8431_app_e:1;
+ u8 fc_pi_4_app_h:1;
+ u8 sff8431_lim:1;
+ u8 fc_pi_4_lim:1;
+ u8 reserved60_4:4;
+ u8 reserved61:8;
+#else
+#error Unknown Endian
+#endif
+ } __packed active;
+ } __packed;
+ u8 reserved62;
+ u8 cc_base;
+} __packed;
+
+struct sfp_eeprom_ext {
+ __be16 options;
+ u8 br_max;
+ u8 br_min;
+ char vendor_sn[16];
+ char datecode[8];
+ u8 diagmon;
+ u8 enhopts;
+ u8 sff8472_compliance;
+ u8 cc_ext;
+} __packed;
+
+/**
+ * struct sfp_eeprom_id - raw SFP module identification information
+ * @base: base SFP module identification structure
+ * @ext: extended SFP module identification structure
+ *
+ * See the SFF-8472 specification and related documents for the definition
+ * of these structure members. This can be obtained from
+ * https://www.snia.org/technology-communities/sff/specifications
+ */
+struct sfp_eeprom_id {
+ struct sfp_eeprom_base base;
+ struct sfp_eeprom_ext ext;
+} __packed;
+
+struct sfp_diag {
+ __be16 temp_high_alarm;
+ __be16 temp_low_alarm;
+ __be16 temp_high_warn;
+ __be16 temp_low_warn;
+ __be16 volt_high_alarm;
+ __be16 volt_low_alarm;
+ __be16 volt_high_warn;
+ __be16 volt_low_warn;
+ __be16 bias_high_alarm;
+ __be16 bias_low_alarm;
+ __be16 bias_high_warn;
+ __be16 bias_low_warn;
+ __be16 txpwr_high_alarm;
+ __be16 txpwr_low_alarm;
+ __be16 txpwr_high_warn;
+ __be16 txpwr_low_warn;
+ __be16 rxpwr_high_alarm;
+ __be16 rxpwr_low_alarm;
+ __be16 rxpwr_high_warn;
+ __be16 rxpwr_low_warn;
+ __be16 laser_temp_high_alarm;
+ __be16 laser_temp_low_alarm;
+ __be16 laser_temp_high_warn;
+ __be16 laser_temp_low_warn;
+ __be16 tec_cur_high_alarm;
+ __be16 tec_cur_low_alarm;
+ __be16 tec_cur_high_warn;
+ __be16 tec_cur_low_warn;
+ __be32 cal_rxpwr4;
+ __be32 cal_rxpwr3;
+ __be32 cal_rxpwr2;
+ __be32 cal_rxpwr1;
+ __be32 cal_rxpwr0;
+ __be16 cal_txi_slope;
+ __be16 cal_txi_offset;
+ __be16 cal_txpwr_slope;
+ __be16 cal_txpwr_offset;
+ __be16 cal_t_slope;
+ __be16 cal_t_offset;
+ __be16 cal_v_slope;
+ __be16 cal_v_offset;
+} __packed;
+
+/* SFF8024 defined constants */
+enum {
+ SFF8024_ID_UNK = 0x00,
+ SFF8024_ID_SFF_8472 = 0x02,
+ SFF8024_ID_SFP = 0x03,
+ SFF8024_ID_DWDM_SFP = 0x0b,
+ SFF8024_ID_QSFP_8438 = 0x0c,
+ SFF8024_ID_QSFP_8436_8636 = 0x0d,
+ SFF8024_ID_QSFP28_8636 = 0x11,
+
+ SFF8024_ENCODING_UNSPEC = 0x00,
+ SFF8024_ENCODING_8B10B = 0x01,
+ SFF8024_ENCODING_4B5B = 0x02,
+ SFF8024_ENCODING_NRZ = 0x03,
+ SFF8024_ENCODING_8472_MANCHESTER= 0x04,
+ SFF8024_ENCODING_8472_SONET = 0x05,
+ SFF8024_ENCODING_8472_64B66B = 0x06,
+ SFF8024_ENCODING_8436_MANCHESTER= 0x06,
+ SFF8024_ENCODING_8436_SONET = 0x04,
+ SFF8024_ENCODING_8436_64B66B = 0x05,
+ SFF8024_ENCODING_256B257B = 0x07,
+ SFF8024_ENCODING_PAM4 = 0x08,
+
+ SFF8024_CONNECTOR_UNSPEC = 0x00,
+ /* codes 01-05 not supportable on SFP, but some modules have single SC */
+ SFF8024_CONNECTOR_SC = 0x01,
+ SFF8024_CONNECTOR_FIBERJACK = 0x06,
+ SFF8024_CONNECTOR_LC = 0x07,
+ SFF8024_CONNECTOR_MT_RJ = 0x08,
+ SFF8024_CONNECTOR_MU = 0x09,
+ SFF8024_CONNECTOR_SG = 0x0a,
+ SFF8024_CONNECTOR_OPTICAL_PIGTAIL= 0x0b,
+ SFF8024_CONNECTOR_MPO_1X12 = 0x0c,
+ SFF8024_CONNECTOR_MPO_2X16 = 0x0d,
+ SFF8024_CONNECTOR_HSSDC_II = 0x20,
+ SFF8024_CONNECTOR_COPPER_PIGTAIL= 0x21,
+ SFF8024_CONNECTOR_RJ45 = 0x22,
+ SFF8024_CONNECTOR_NOSEPARATE = 0x23,
+ SFF8024_CONNECTOR_MXC_2X16 = 0x24,
+
+ SFF8024_ECC_UNSPEC = 0x00,
+ SFF8024_ECC_100G_25GAUI_C2M_AOC = 0x01,
+ SFF8024_ECC_100GBASE_SR4_25GBASE_SR = 0x02,
+ SFF8024_ECC_100GBASE_LR4_25GBASE_LR = 0x03,
+ SFF8024_ECC_100GBASE_ER4_25GBASE_ER = 0x04,
+ SFF8024_ECC_100GBASE_SR10 = 0x05,
+ SFF8024_ECC_100GBASE_CR4 = 0x0b,
+ SFF8024_ECC_25GBASE_CR_S = 0x0c,
+ SFF8024_ECC_25GBASE_CR_N = 0x0d,
+ SFF8024_ECC_10GBASE_T_SFI = 0x16,
+ SFF8024_ECC_10GBASE_T_SR = 0x1c,
+ SFF8024_ECC_5GBASE_T = 0x1d,
+ SFF8024_ECC_2_5GBASE_T = 0x1e,
+};
+
+/* SFP EEPROM registers */
+enum {
+ SFP_PHYS_ID = 0,
+
+ SFP_PHYS_EXT_ID = 1,
+ SFP_PHYS_EXT_ID_SFP = 0x04,
+
+ SFP_CONNECTOR = 2,
+ SFP_COMPLIANCE = 3,
+ SFP_ENCODING = 11,
+ SFP_BR_NOMINAL = 12,
+ SFP_RATE_ID = 13,
+ SFP_LINK_LEN_SM_KM = 14,
+ SFP_LINK_LEN_SM_100M = 15,
+ SFP_LINK_LEN_50UM_OM2_10M = 16,
+ SFP_LINK_LEN_62_5UM_OM1_10M = 17,
+ SFP_LINK_LEN_COPPER_1M = 18,
+ SFP_LINK_LEN_50UM_OM4_10M = 18,
+ SFP_LINK_LEN_50UM_OM3_10M = 19,
+ SFP_VENDOR_NAME = 20,
+ SFP_VENDOR_OUI = 37,
+ SFP_VENDOR_PN = 40,
+ SFP_VENDOR_REV = 56,
+ SFP_OPTICAL_WAVELENGTH_MSB = 60,
+ SFP_OPTICAL_WAVELENGTH_LSB = 61,
+ SFP_CABLE_SPEC = 60,
+ SFP_CC_BASE = 63,
+
+ SFP_OPTIONS = 64, /* 2 bytes, MSB, LSB */
+ SFP_OPTIONS_HIGH_POWER_LEVEL = BIT(13),
+ SFP_OPTIONS_PAGING_A2 = BIT(12),
+ SFP_OPTIONS_RETIMER = BIT(11),
+ SFP_OPTIONS_COOLED_XCVR = BIT(10),
+ SFP_OPTIONS_POWER_DECL = BIT(9),
+ SFP_OPTIONS_RX_LINEAR_OUT = BIT(8),
+ SFP_OPTIONS_RX_DECISION_THRESH = BIT(7),
+ SFP_OPTIONS_TUNABLE_TX = BIT(6),
+ SFP_OPTIONS_RATE_SELECT = BIT(5),
+ SFP_OPTIONS_TX_DISABLE = BIT(4),
+ SFP_OPTIONS_TX_FAULT = BIT(3),
+ SFP_OPTIONS_LOS_INVERTED = BIT(2),
+ SFP_OPTIONS_LOS_NORMAL = BIT(1),
+
+ SFP_BR_MAX = 66,
+ SFP_BR_MIN = 67,
+ SFP_VENDOR_SN = 68,
+ SFP_DATECODE = 84,
+
+ SFP_DIAGMON = 92,
+ SFP_DIAGMON_DDM = BIT(6),
+ SFP_DIAGMON_INT_CAL = BIT(5),
+ SFP_DIAGMON_EXT_CAL = BIT(4),
+ SFP_DIAGMON_RXPWR_AVG = BIT(3),
+ SFP_DIAGMON_ADDRMODE = BIT(2),
+
+ SFP_ENHOPTS = 93,
+ SFP_ENHOPTS_ALARMWARN = BIT(7),
+ SFP_ENHOPTS_SOFT_TX_DISABLE = BIT(6),
+ SFP_ENHOPTS_SOFT_TX_FAULT = BIT(5),
+ SFP_ENHOPTS_SOFT_RX_LOS = BIT(4),
+ SFP_ENHOPTS_SOFT_RATE_SELECT = BIT(3),
+ SFP_ENHOPTS_APP_SELECT_SFF8079 = BIT(2),
+ SFP_ENHOPTS_SOFT_RATE_SFF8431 = BIT(1),
+
+ SFP_SFF8472_COMPLIANCE = 94,
+ SFP_SFF8472_COMPLIANCE_NONE = 0x00,
+ SFP_SFF8472_COMPLIANCE_REV9_3 = 0x01,
+ SFP_SFF8472_COMPLIANCE_REV9_5 = 0x02,
+ SFP_SFF8472_COMPLIANCE_REV10_2 = 0x03,
+ SFP_SFF8472_COMPLIANCE_REV10_4 = 0x04,
+ SFP_SFF8472_COMPLIANCE_REV11_0 = 0x05,
+ SFP_SFF8472_COMPLIANCE_REV11_3 = 0x06,
+ SFP_SFF8472_COMPLIANCE_REV11_4 = 0x07,
+ SFP_SFF8472_COMPLIANCE_REV12_0 = 0x08,
+
+ SFP_CC_EXT = 95,
+};
+
+/* SFP Diagnostics */
+enum {
+ /* Alarm and warnings stored MSB at lower address then LSB */
+ SFP_TEMP_HIGH_ALARM = 0,
+ SFP_TEMP_LOW_ALARM = 2,
+ SFP_TEMP_HIGH_WARN = 4,
+ SFP_TEMP_LOW_WARN = 6,
+ SFP_VOLT_HIGH_ALARM = 8,
+ SFP_VOLT_LOW_ALARM = 10,
+ SFP_VOLT_HIGH_WARN = 12,
+ SFP_VOLT_LOW_WARN = 14,
+ SFP_BIAS_HIGH_ALARM = 16,
+ SFP_BIAS_LOW_ALARM = 18,
+ SFP_BIAS_HIGH_WARN = 20,
+ SFP_BIAS_LOW_WARN = 22,
+ SFP_TXPWR_HIGH_ALARM = 24,
+ SFP_TXPWR_LOW_ALARM = 26,
+ SFP_TXPWR_HIGH_WARN = 28,
+ SFP_TXPWR_LOW_WARN = 30,
+ SFP_RXPWR_HIGH_ALARM = 32,
+ SFP_RXPWR_LOW_ALARM = 34,
+ SFP_RXPWR_HIGH_WARN = 36,
+ SFP_RXPWR_LOW_WARN = 38,
+ SFP_LASER_TEMP_HIGH_ALARM = 40,
+ SFP_LASER_TEMP_LOW_ALARM = 42,
+ SFP_LASER_TEMP_HIGH_WARN = 44,
+ SFP_LASER_TEMP_LOW_WARN = 46,
+ SFP_TEC_CUR_HIGH_ALARM = 48,
+ SFP_TEC_CUR_LOW_ALARM = 50,
+ SFP_TEC_CUR_HIGH_WARN = 52,
+ SFP_TEC_CUR_LOW_WARN = 54,
+ SFP_CAL_RXPWR4 = 56,
+ SFP_CAL_RXPWR3 = 60,
+ SFP_CAL_RXPWR2 = 64,
+ SFP_CAL_RXPWR1 = 68,
+ SFP_CAL_RXPWR0 = 72,
+ SFP_CAL_TXI_SLOPE = 76,
+ SFP_CAL_TXI_OFFSET = 78,
+ SFP_CAL_TXPWR_SLOPE = 80,
+ SFP_CAL_TXPWR_OFFSET = 82,
+ SFP_CAL_T_SLOPE = 84,
+ SFP_CAL_T_OFFSET = 86,
+ SFP_CAL_V_SLOPE = 88,
+ SFP_CAL_V_OFFSET = 90,
+ SFP_CHKSUM = 95,
+
+ SFP_TEMP = 96,
+ SFP_VCC = 98,
+ SFP_TX_BIAS = 100,
+ SFP_TX_POWER = 102,
+ SFP_RX_POWER = 104,
+ SFP_LASER_TEMP = 106,
+ SFP_TEC_CUR = 108,
+
+ SFP_STATUS = 110,
+ SFP_STATUS_TX_DISABLE = BIT(7),
+ SFP_STATUS_TX_DISABLE_FORCE = BIT(6),
+ SFP_STATUS_TX_FAULT = BIT(2),
+ SFP_STATUS_RX_LOS = BIT(1),
+ SFP_ALARM0 = 112,
+ SFP_ALARM0_TEMP_HIGH = BIT(7),
+ SFP_ALARM0_TEMP_LOW = BIT(6),
+ SFP_ALARM0_VCC_HIGH = BIT(5),
+ SFP_ALARM0_VCC_LOW = BIT(4),
+ SFP_ALARM0_TX_BIAS_HIGH = BIT(3),
+ SFP_ALARM0_TX_BIAS_LOW = BIT(2),
+ SFP_ALARM0_TXPWR_HIGH = BIT(1),
+ SFP_ALARM0_TXPWR_LOW = BIT(0),
+
+ SFP_ALARM1 = 113,
+ SFP_ALARM1_RXPWR_HIGH = BIT(7),
+ SFP_ALARM1_RXPWR_LOW = BIT(6),
+
+ SFP_WARN0 = 116,
+ SFP_WARN0_TEMP_HIGH = BIT(7),
+ SFP_WARN0_TEMP_LOW = BIT(6),
+ SFP_WARN0_VCC_HIGH = BIT(5),
+ SFP_WARN0_VCC_LOW = BIT(4),
+ SFP_WARN0_TX_BIAS_HIGH = BIT(3),
+ SFP_WARN0_TX_BIAS_LOW = BIT(2),
+ SFP_WARN0_TXPWR_HIGH = BIT(1),
+ SFP_WARN0_TXPWR_LOW = BIT(0),
+
+ SFP_WARN1 = 117,
+ SFP_WARN1_RXPWR_HIGH = BIT(7),
+ SFP_WARN1_RXPWR_LOW = BIT(6),
+
+ SFP_EXT_STATUS = 118,
+ SFP_EXT_STATUS_PWRLVL_SELECT = BIT(0),
+
+ SFP_VSL = 120,
+ SFP_PAGE = 127,
+};
+
+struct fwnode_handle;
+struct ethtool_eeprom;
+struct ethtool_modinfo;
+struct sfp_bus;
+
+/**
+ * struct sfp_upstream_ops - upstream operations structure
+ * @attach: called when the sfp socket driver is bound to the upstream
+ * (mandatory).
+ * @detach: called when the sfp socket driver is unbound from the upstream
+ * (mandatory).
+ * @module_insert: called after a module has been detected to determine
+ * whether the module is supported for the upstream device.
+ * @module_remove: called after the module has been removed.
+ * @module_start: called after the PHY probe step
+ * @module_stop: called before the PHY is removed
+ * @link_down: called when the link is non-operational for whatever
+ * reason.
+ * @link_up: called when the link is operational.
+ * @connect_phy: called when an I2C accessible PHY has been detected
+ * on the module.
+ * @disconnect_phy: called when a module with an I2C accessible PHY has
+ * been removed.
+ */
+struct sfp_upstream_ops {
+ void (*attach)(void *priv, struct sfp_bus *bus);
+ void (*detach)(void *priv, struct sfp_bus *bus);
+ int (*module_insert)(void *priv, const struct sfp_eeprom_id *id);
+ void (*module_remove)(void *priv);
+ int (*module_start)(void *priv);
+ void (*module_stop)(void *priv);
+ void (*link_down)(void *priv);
+ void (*link_up)(void *priv);
+ int (*connect_phy)(void *priv, struct phy_device *);
+ void (*disconnect_phy)(void *priv);
+};
+
+#if IS_ENABLED(CONFIG_SFP)
+int sfp_parse_port(struct sfp_bus *bus, const struct sfp_eeprom_id *id,
+ unsigned long *support);
+bool sfp_may_have_phy(struct sfp_bus *bus, const struct sfp_eeprom_id *id);
+void sfp_parse_support(struct sfp_bus *bus, const struct sfp_eeprom_id *id,
+ unsigned long *support, unsigned long *interfaces);
+phy_interface_t sfp_select_interface(struct sfp_bus *bus,
+ unsigned long *link_modes);
+
+int sfp_get_module_info(struct sfp_bus *bus, struct ethtool_modinfo *modinfo);
+int sfp_get_module_eeprom(struct sfp_bus *bus, struct ethtool_eeprom *ee,
+ u8 *data);
+int sfp_get_module_eeprom_by_page(struct sfp_bus *bus,
+ const struct ethtool_module_eeprom *page,
+ struct netlink_ext_ack *extack);
+void sfp_upstream_start(struct sfp_bus *bus);
+void sfp_upstream_stop(struct sfp_bus *bus);
+void sfp_bus_put(struct sfp_bus *bus);
+struct sfp_bus *sfp_bus_find_fwnode(const struct fwnode_handle *fwnode);
+int sfp_bus_add_upstream(struct sfp_bus *bus, void *upstream,
+ const struct sfp_upstream_ops *ops);
+void sfp_bus_del_upstream(struct sfp_bus *bus);
+#else
+static inline int sfp_parse_port(struct sfp_bus *bus,
+ const struct sfp_eeprom_id *id,
+ unsigned long *support)
+{
+ return PORT_OTHER;
+}
+
+static inline bool sfp_may_have_phy(struct sfp_bus *bus,
+ const struct sfp_eeprom_id *id)
+{
+ return false;
+}
+
+static inline void sfp_parse_support(struct sfp_bus *bus,
+ const struct sfp_eeprom_id *id,
+ unsigned long *support,
+ unsigned long *interfaces)
+{
+}
+
+static inline phy_interface_t sfp_select_interface(struct sfp_bus *bus,
+ unsigned long *link_modes)
+{
+ return PHY_INTERFACE_MODE_NA;
+}
+
+static inline int sfp_get_module_info(struct sfp_bus *bus,
+ struct ethtool_modinfo *modinfo)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int sfp_get_module_eeprom(struct sfp_bus *bus,
+ struct ethtool_eeprom *ee, u8 *data)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int sfp_get_module_eeprom_by_page(struct sfp_bus *bus,
+ const struct ethtool_module_eeprom *page,
+ struct netlink_ext_ack *extack)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline void sfp_upstream_start(struct sfp_bus *bus)
+{
+}
+
+static inline void sfp_upstream_stop(struct sfp_bus *bus)
+{
+}
+
+static inline void sfp_bus_put(struct sfp_bus *bus)
+{
+}
+
+static inline struct sfp_bus *
+sfp_bus_find_fwnode(const struct fwnode_handle *fwnode)
+{
+ return NULL;
+}
+
+static inline int sfp_bus_add_upstream(struct sfp_bus *bus, void *upstream,
+ const struct sfp_upstream_ops *ops)
+{
+ return 0;
+}
+
+static inline void sfp_bus_del_upstream(struct sfp_bus *bus)
+{
+}
+#endif
+
+#endif
diff --git a/include/linux/sh_clk.h b/include/linux/sh_clk.h
index 645896b81244..7bed5be886c6 100644
--- a/include/linux/sh_clk.h
+++ b/include/linux/sh_clk.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __SH_CLOCK_H
#define __SH_CLOCK_H
diff --git a/include/linux/sh_dma.h b/include/linux/sh_dma.h
index 56b97eed28a4..9f79806085f5 100644
--- a/include/linux/sh_dma.h
+++ b/include/linux/sh_dma.h
@@ -1,11 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Header for the new SH dmaengine driver
*
* Copyright (C) 2010 Guennadi Liakhovetski <g.liakhovetski@gmx.de>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef SH_DMA_H
#define SH_DMA_H
diff --git a/include/linux/sh_eth.h b/include/linux/sh_eth.h
index f2e27e078362..6dfda97a6c1a 100644
--- a/include/linux/sh_eth.h
+++ b/include/linux/sh_eth.h
@@ -1,22 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __ASM_SH_ETH_H__
#define __ASM_SH_ETH_H__
#include <linux/phy.h>
#include <linux/if_ether.h>
-enum {EDMAC_LITTLE_ENDIAN};
-
struct sh_eth_plat_data {
int phy;
int phy_irq;
- int edmac_endian;
phy_interface_t phy_interface;
void (*set_mdio_gate)(void *addr);
unsigned char mac_addr[ETH_ALEN];
unsigned no_ether_link:1;
unsigned ether_link_active_low:1;
- unsigned needs_init:1;
};
#endif
diff --git a/include/linux/sh_intc.h b/include/linux/sh_intc.h
index 32383285da68..37ad81058d6a 100644
--- a/include/linux/sh_intc.h
+++ b/include/linux/sh_intc.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __SH_INTC_H
#define __SH_INTC_H
@@ -96,7 +97,10 @@ struct intc_hw_desc {
unsigned int nr_subgroups;
};
-#define _INTC_ARRAY(a) a, __same_type(a, NULL) ? 0 : sizeof(a)/sizeof(*a)
+#define _INTC_SIZEOF_OR_ZERO(a) (_Generic(a, \
+ typeof(NULL): 0, \
+ default: sizeof(a)))
+#define _INTC_ARRAY(a) a, _INTC_SIZEOF_OR_ZERO(a)/sizeof(*a)
#define INTC_HW_DESC(vectors, groups, mask_regs, \
prio_regs, sense_regs, ack_regs) \
diff --git a/include/linux/sh_timer.h b/include/linux/sh_timer.h
index 64638b058076..74fd5140bb7a 100644
--- a/include/linux/sh_timer.h
+++ b/include/linux/sh_timer.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __SH_TIMER_H__
#define __SH_TIMER_H__
diff --git a/include/linux/shdma-base.h b/include/linux/shdma-base.h
index d927647e6350..6dfd05ef5c2d 100644
--- a/include/linux/shdma-base.h
+++ b/include/linux/shdma-base.h
@@ -1,4 +1,5 @@
-/*
+/* SPDX-License-Identifier: GPL-2.0
+ *
* Dmaengine driver base library for DMA controllers, found on SH-based SoCs
*
* extracted from shdma.c and headers
@@ -7,10 +8,6 @@
* Copyright (C) 2009 Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com>
* Copyright (C) 2009 Renesas Solutions, Inc. All rights reserved.
* Copyright (C) 2007 Freescale Semiconductor, Inc. All rights reserved.
- *
- * This is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
*/
#ifndef SHDMA_BASE_H
diff --git a/include/linux/shm.h b/include/linux/shm.h
index 04e881829625..d8e69aed3d32 100644
--- a/include/linux/shm.h
+++ b/include/linux/shm.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_SHM_H_
#define _LINUX_SHM_H_
@@ -6,44 +7,7 @@
#include <uapi/linux/shm.h>
#include <asm/shmparam.h>
-struct shmid_kernel /* private to the kernel */
-{
- struct kern_ipc_perm shm_perm;
- struct file *shm_file;
- unsigned long shm_nattch;
- unsigned long shm_segsz;
- time_t shm_atim;
- time_t shm_dtim;
- time_t shm_ctim;
- pid_t shm_cprid;
- pid_t shm_lprid;
- struct user_struct *mlock_user;
-
- /* The task created the shm object. NULL if the task is dead. */
- struct task_struct *shm_creator;
- struct list_head shm_clist; /* list by creator */
-};
-
-/* shm_mode upper byte flags */
-#define SHM_DEST 01000 /* segment will be destroyed on last detach */
-#define SHM_LOCKED 02000 /* segment will not be swapped */
-#define SHM_HUGETLB 04000 /* segment will use huge TLB pages */
-#define SHM_NORESERVE 010000 /* don't check for reservations */
-
-/* Bits [26:31] are reserved */
-
-/*
- * When SHM_HUGETLB is set bits [26:31] encode the log2 of the huge page size.
- * This gives us 6 bits, which is enough until someone invents 128 bit address
- * spaces.
- *
- * Assume these are all power of twos.
- * When 0 use the default page size.
- */
-#define SHM_HUGE_SHIFT 26
-#define SHM_HUGE_MASK 0x3f
-#define SHM_HUGE_2MB (21 << SHM_HUGE_SHIFT)
-#define SHM_HUGE_1GB (30 << SHM_HUGE_SHIFT)
+struct file;
#ifdef CONFIG_SYSVIPC
struct sysv_shm {
diff --git a/include/linux/shmem_fs.h b/include/linux/shmem_fs.h
index a7d6bd2a918f..9029abd29b1c 100644
--- a/include/linux/shmem_fs.h
+++ b/include/linux/shmem_fs.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __SHMEM_FS_H
#define __SHMEM_FS_H
@@ -7,6 +8,8 @@
#include <linux/pagemap.h>
#include <linux/percpu_counter.h>
#include <linux/xattr.h>
+#include <linux/fs_parser.h>
+#include <linux/userfaultfd_k.h>
/* inode in-kernel data */
@@ -16,23 +19,36 @@ struct shmem_inode_info {
unsigned long flags;
unsigned long alloced; /* data pages alloced to file */
unsigned long swapped; /* subtotal assigned to swap */
+ pgoff_t fallocend; /* highest fallocate endindex */
struct list_head shrinklist; /* shrinkable hpage inodes */
struct list_head swaplist; /* chain of maybes on swap */
struct shared_policy policy; /* NUMA memory alloc policy */
struct simple_xattrs xattrs; /* list of xattrs */
+ atomic_t stop_eviction; /* hold when working on inode */
+ struct timespec64 i_crtime; /* file creation time */
+ unsigned int fsflags; /* flags for FS_IOC_[SG]ETFLAGS */
struct inode vfs_inode;
};
+#define SHMEM_FL_USER_VISIBLE FS_FL_USER_VISIBLE
+#define SHMEM_FL_USER_MODIFIABLE \
+ (FS_IMMUTABLE_FL | FS_APPEND_FL | FS_NODUMP_FL | FS_NOATIME_FL)
+#define SHMEM_FL_INHERITED (FS_NODUMP_FL | FS_NOATIME_FL)
+
struct shmem_sb_info {
unsigned long max_blocks; /* How many blocks are allowed */
struct percpu_counter used_blocks; /* How many are allocated */
unsigned long max_inodes; /* How many inodes are allowed */
unsigned long free_inodes; /* How many are left for allocation */
- spinlock_t stat_lock; /* Serialize shmem_sb_info changes */
+ raw_spinlock_t stat_lock; /* Serialize shmem_sb_info changes */
umode_t mode; /* Mount mode for root directory */
unsigned char huge; /* Whether to try for hugepages */
kuid_t uid; /* Mount uid for root directory */
kgid_t gid; /* Mount gid for root directory */
+ bool full_inums; /* If i_ino should be uint or ino_t */
+ bool noswap; /* ignores VM reclaim / swap requests */
+ ino_t next_ino; /* The next per-sb inode number to use */
+ ino_t __percpu *ino_batch; /* The next per-cpu inode number to use */
struct mempolicy *mpol; /* default memory policy for mappings */
spinlock_t shrinklist_lock; /* Protects shrinklist */
struct list_head shrinklist; /* List of shinkable inodes */
@@ -47,18 +63,25 @@ static inline struct shmem_inode_info *SHMEM_I(struct inode *inode)
/*
* Functions in mm/shmem.c called directly from elsewhere:
*/
-extern int shmem_init(void);
-extern int shmem_fill_super(struct super_block *sb, void *data, int silent);
+extern const struct fs_parameter_spec shmem_fs_parameters[];
+extern void shmem_init(void);
+extern int shmem_init_fs_context(struct fs_context *fc);
extern struct file *shmem_file_setup(const char *name,
loff_t size, unsigned long flags);
extern struct file *shmem_kernel_file_setup(const char *name, loff_t size,
unsigned long flags);
+extern struct file *shmem_file_setup_with_mnt(struct vfsmount *mnt,
+ const char *name, loff_t size, unsigned long flags);
extern int shmem_zero_setup(struct vm_area_struct *);
extern unsigned long shmem_get_unmapped_area(struct file *, unsigned long addr,
unsigned long len, unsigned long pgoff, unsigned long flags);
-extern int shmem_lock(struct file *file, int lock, struct user_struct *user);
+extern int shmem_lock(struct file *file, int lock, struct ucounts *ucounts);
#ifdef CONFIG_SHMEM
-extern bool shmem_mapping(struct address_space *mapping);
+extern const struct address_space_operations shmem_aops;
+static inline bool shmem_mapping(struct address_space *mapping)
+{
+ return mapping->a_ops == &shmem_aops;
+}
#else
static inline bool shmem_mapping(struct address_space *mapping)
{
@@ -69,24 +92,40 @@ extern void shmem_unlock_mapping(struct address_space *mapping);
extern struct page *shmem_read_mapping_page_gfp(struct address_space *mapping,
pgoff_t index, gfp_t gfp_mask);
extern void shmem_truncate_range(struct inode *inode, loff_t start, loff_t end);
-extern int shmem_unuse(swp_entry_t entry, struct page *page);
+int shmem_unuse(unsigned int type);
+extern bool shmem_is_huge(struct inode *inode, pgoff_t index, bool shmem_huge_force,
+ struct mm_struct *mm, unsigned long vm_flags);
+#ifdef CONFIG_SHMEM
extern unsigned long shmem_swap_usage(struct vm_area_struct *vma);
+#else
+static inline unsigned long shmem_swap_usage(struct vm_area_struct *vma)
+{
+ return 0;
+}
+#endif
extern unsigned long shmem_partial_swap_usage(struct address_space *mapping,
pgoff_t start, pgoff_t end);
-/* Flag allocation requirements to shmem_getpage */
+/* Flag allocation requirements to shmem_get_folio */
enum sgp_type {
SGP_READ, /* don't exceed i_size, don't allocate page */
+ SGP_NOALLOC, /* similar, but fail on hole or use fallocated page */
SGP_CACHE, /* don't exceed i_size, may allocate page */
- SGP_NOHUGE, /* like SGP_CACHE, but no huge pages */
- SGP_HUGE, /* like SGP_CACHE, huge pages preferred */
SGP_WRITE, /* may exceed i_size, may allocate !Uptodate page */
SGP_FALLOC, /* like SGP_WRITE, but make existing page Uptodate */
};
-extern int shmem_getpage(struct inode *inode, pgoff_t index,
- struct page **pagep, enum sgp_type sgp);
+int shmem_get_folio(struct inode *inode, pgoff_t index, struct folio **foliop,
+ enum sgp_type sgp);
+struct folio *shmem_read_folio_gfp(struct address_space *mapping,
+ pgoff_t index, gfp_t gfp);
+
+static inline struct folio *shmem_read_folio(struct address_space *mapping,
+ pgoff_t index)
+{
+ return shmem_read_folio_gfp(mapping, index, mapping_gfp_mask(mapping));
+}
static inline struct page *shmem_read_mapping_page(
struct address_space *mapping, pgoff_t index)
@@ -104,42 +143,33 @@ static inline bool shmem_file(struct file *file)
return shmem_mapping(file->f_mapping);
}
-extern bool shmem_charge(struct inode *inode, long pages);
-extern void shmem_uncharge(struct inode *inode, long pages);
-
-#ifdef CONFIG_TMPFS
-
-extern int shmem_add_seals(struct file *file, unsigned int seals);
-extern int shmem_get_seals(struct file *file);
-extern long shmem_fcntl(struct file *file, unsigned int cmd, unsigned long arg);
-
-#else
-
-static inline long shmem_fcntl(struct file *f, unsigned int c, unsigned long a)
+/*
+ * If fallocate(FALLOC_FL_KEEP_SIZE) has been used, there may be pages
+ * beyond i_size's notion of EOF, which fallocate has committed to reserving:
+ * which split_huge_page() must therefore not delete. This use of a single
+ * "fallocend" per inode errs on the side of not deleting a reservation when
+ * in doubt: there are plenty of cases when it preserves unreserved pages.
+ */
+static inline pgoff_t shmem_fallocend(struct inode *inode, pgoff_t eof)
{
- return -EINVAL;
+ return max(eof, SHMEM_I(inode)->fallocend);
}
-#endif
-
-#ifdef CONFIG_TRANSPARENT_HUGE_PAGECACHE
-extern bool shmem_huge_enabled(struct vm_area_struct *vma);
-#else
-static inline bool shmem_huge_enabled(struct vm_area_struct *vma)
-{
- return false;
-}
-#endif
+extern bool shmem_charge(struct inode *inode, long pages);
+extern void shmem_uncharge(struct inode *inode, long pages);
+#ifdef CONFIG_USERFAULTFD
#ifdef CONFIG_SHMEM
-extern int shmem_mcopy_atomic_pte(struct mm_struct *dst_mm, pmd_t *dst_pmd,
+extern int shmem_mfill_atomic_pte(pmd_t *dst_pmd,
struct vm_area_struct *dst_vma,
unsigned long dst_addr,
unsigned long src_addr,
- struct page **pagep);
-#else
-#define shmem_mcopy_atomic_pte(dst_mm, dst_pte, dst_vma, dst_addr, \
- src_addr, pagep) ({ BUG(); 0; })
-#endif
+ uffd_flags_t flags,
+ struct folio **foliop);
+#else /* !CONFIG_SHMEM */
+#define shmem_mfill_atomic_pte(dst_pmd, dst_vma, dst_addr, \
+ src_addr, flags, foliop) ({ BUG(); 0; })
+#endif /* CONFIG_SHMEM */
+#endif /* CONFIG_USERFAULTFD */
#endif
diff --git a/include/linux/shrinker.h b/include/linux/shrinker.h
index 4fcacd915d45..7bde8e1c228a 100644
--- a/include/linux/shrinker.h
+++ b/include/linux/shrinker.h
@@ -1,9 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_SHRINKER_H
#define _LINUX_SHRINKER_H
+#include <linux/atomic.h>
+#include <linux/types.h>
+
/*
* This struct is used to pass information from page reclaim to the shrinkers.
- * We consolidate the values for easier extention later.
+ * We consolidate the values for easier extension later.
*
* The 'gfpmask' refers to the allocation we are currently trying to
* fulfil.
@@ -11,6 +15,9 @@
struct shrink_control {
gfp_t gfp_mask;
+ /* current node being shrunk (for NUMA aware shrinkers) */
+ int nid;
+
/*
* How many objects scan_objects should scan and try to reclaim.
* This is reset before every call, so it is safe for callees
@@ -18,20 +25,27 @@ struct shrink_control {
*/
unsigned long nr_to_scan;
- /* current node being shrunk (for NUMA aware shrinkers) */
- int nid;
+ /*
+ * How many objects did scan_objects process?
+ * This defaults to nr_to_scan before every call, but the callee
+ * should track its actual progress.
+ */
+ unsigned long nr_scanned;
/* current memcg being shrunk (for memcg aware shrinkers) */
struct mem_cgroup *memcg;
};
#define SHRINK_STOP (~0UL)
+#define SHRINK_EMPTY (~0UL - 1)
/*
* A callback you can register to apply pressure to ageable caches.
*
* @count_objects should return the number of freeable items in the cache. If
- * there are no objects to free or the number of freeable items cannot be
- * determined, it should return 0. No deadlock checks should be done during the
+ * there are no objects to free, it should return SHRINK_EMPTY, while 0 is
+ * returned in cases of the number of freeable items cannot be determined
+ * or shrinker should skip this cache for this time (e.g., their number
+ * is below shrinkable limit). No deadlock checks should be done during the
* count callback - the shrinker relies on aggregating scan counts that couldn't
* be executed due to potential deadlocks to be run at a later call when the
* deadlock condition is no longer pending.
@@ -52,21 +66,63 @@ struct shrinker {
unsigned long (*scan_objects)(struct shrinker *,
struct shrink_control *sc);
- int seeks; /* seeks to recreate an obj */
long batch; /* reclaim batch size, 0 = default */
- unsigned long flags;
+ int seeks; /* seeks to recreate an obj */
+ unsigned flags;
/* These are for internal use */
struct list_head list;
+#ifdef CONFIG_MEMCG
+ /* ID in shrinker_idr */
+ int id;
+#endif
+#ifdef CONFIG_SHRINKER_DEBUG
+ int debugfs_id;
+ const char *name;
+ struct dentry *debugfs_entry;
+#endif
/* objs pending delete, per node */
atomic_long_t *nr_deferred;
};
#define DEFAULT_SEEKS 2 /* A good number if you don't know better. */
/* Flags */
-#define SHRINKER_NUMA_AWARE (1 << 0)
-#define SHRINKER_MEMCG_AWARE (1 << 1)
+#define SHRINKER_REGISTERED (1 << 0)
+#define SHRINKER_NUMA_AWARE (1 << 1)
+#define SHRINKER_MEMCG_AWARE (1 << 2)
+/*
+ * It just makes sense when the shrinker is also MEMCG_AWARE for now,
+ * non-MEMCG_AWARE shrinker should not have this flag set.
+ */
+#define SHRINKER_NONSLAB (1 << 3)
-extern int register_shrinker(struct shrinker *);
-extern void unregister_shrinker(struct shrinker *);
-#endif
+extern int __printf(2, 3) prealloc_shrinker(struct shrinker *shrinker,
+ const char *fmt, ...);
+extern void register_shrinker_prepared(struct shrinker *shrinker);
+extern int __printf(2, 3) register_shrinker(struct shrinker *shrinker,
+ const char *fmt, ...);
+extern void unregister_shrinker(struct shrinker *shrinker);
+extern void free_prealloced_shrinker(struct shrinker *shrinker);
+extern void synchronize_shrinkers(void);
+
+#ifdef CONFIG_SHRINKER_DEBUG
+extern int shrinker_debugfs_add(struct shrinker *shrinker);
+extern struct dentry *shrinker_debugfs_remove(struct shrinker *shrinker);
+extern int __printf(2, 3) shrinker_debugfs_rename(struct shrinker *shrinker,
+ const char *fmt, ...);
+#else /* CONFIG_SHRINKER_DEBUG */
+static inline int shrinker_debugfs_add(struct shrinker *shrinker)
+{
+ return 0;
+}
+static inline struct dentry *shrinker_debugfs_remove(struct shrinker *shrinker)
+{
+ return NULL;
+}
+static inline __printf(2, 3)
+int shrinker_debugfs_rename(struct shrinker *shrinker, const char *fmt, ...)
+{
+ return 0;
+}
+#endif /* CONFIG_SHRINKER_DEBUG */
+#endif /* _LINUX_SHRINKER_H */
diff --git a/include/linux/signal.h b/include/linux/signal.h
index e2678b5dbb21..3b98e7a28538 100644
--- a/include/linux/signal.h
+++ b/include/linux/signal.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_SIGNAL_H
#define _LINUX_SIGNAL_H
@@ -10,16 +11,46 @@ struct task_struct;
/* for sysctl */
extern int print_fatal_signals;
-static inline void copy_siginfo(struct siginfo *to, struct siginfo *from)
+static inline void copy_siginfo(kernel_siginfo_t *to,
+ const kernel_siginfo_t *from)
{
- if (from->si_code < 0)
- memcpy(to, from, sizeof(*to));
- else
- /* _sigchld is currently the largest know union member */
- memcpy(to, from, __ARCH_SI_PREAMBLE_SIZE + sizeof(from->_sifields._sigchld));
+ memcpy(to, from, sizeof(*to));
+}
+
+static inline void clear_siginfo(kernel_siginfo_t *info)
+{
+ memset(info, 0, sizeof(*info));
+}
+
+#define SI_EXPANSION_SIZE (sizeof(struct siginfo) - sizeof(struct kernel_siginfo))
+
+static inline void copy_siginfo_to_external(siginfo_t *to,
+ const kernel_siginfo_t *from)
+{
+ memcpy(to, from, sizeof(*from));
+ memset(((char *)to) + sizeof(struct kernel_siginfo), 0,
+ SI_EXPANSION_SIZE);
}
-int copy_siginfo_to_user(struct siginfo __user *to, const struct siginfo *from);
+int copy_siginfo_to_user(siginfo_t __user *to, const kernel_siginfo_t *from);
+int copy_siginfo_from_user(kernel_siginfo_t *to, const siginfo_t __user *from);
+
+enum siginfo_layout {
+ SIL_KILL,
+ SIL_TIMER,
+ SIL_POLL,
+ SIL_FAULT,
+ SIL_FAULT_TRAPNO,
+ SIL_FAULT_MCEERR,
+ SIL_FAULT_BNDERR,
+ SIL_FAULT_PKUERR,
+ SIL_FAULT_PERF_EVENT,
+ SIL_CHLD,
+ SIL_RT,
+ SIL_SYS,
+};
+
+enum siginfo_layout siginfo_layout(unsigned sig, int si_code);
/*
* Define some primitives to manipulate sigset_t.
@@ -95,7 +126,6 @@ static inline int sigequalsets(const sigset_t *set1, const sigset_t *set2)
#define sigmask(sig) (1UL << ((sig) - 1))
#ifndef __HAVE_ARCH_SIG_SETOPS
-#include <linux/string.h>
#define _SIG_SET_BINOP(name, op) \
static inline void name(sigset_t *r, const sigset_t *a, const sigset_t *b) \
@@ -108,9 +138,11 @@ static inline void name(sigset_t *r, const sigset_t *a, const sigset_t *b) \
b3 = b->sig[3]; b2 = b->sig[2]; \
r->sig[3] = op(a3, b3); \
r->sig[2] = op(a2, b2); \
+ fallthrough; \
case 2: \
a1 = a->sig[1]; b1 = b->sig[1]; \
r->sig[1] = op(a1, b1); \
+ fallthrough; \
case 1: \
a0 = a->sig[0]; b0 = b->sig[0]; \
r->sig[0] = op(a0, b0); \
@@ -140,7 +172,9 @@ static inline void name(sigset_t *set) \
switch (_NSIG_WORDS) { \
case 4: set->sig[3] = op(set->sig[3]); \
set->sig[2] = op(set->sig[2]); \
+ fallthrough; \
case 2: set->sig[1] = op(set->sig[1]); \
+ fallthrough; \
case 1: set->sig[0] = op(set->sig[0]); \
break; \
default: \
@@ -161,6 +195,7 @@ static inline void sigemptyset(sigset_t *set)
memset(set, 0, sizeof(sigset_t));
break;
case 2: set->sig[1] = 0;
+ fallthrough;
case 1: set->sig[0] = 0;
break;
}
@@ -173,6 +208,7 @@ static inline void sigfillset(sigset_t *set)
memset(set, -1, sizeof(sigset_t));
break;
case 2: set->sig[1] = -1;
+ fallthrough;
case 1: set->sig[0] = -1;
break;
}
@@ -203,6 +239,7 @@ static inline void siginitset(sigset_t *set, unsigned long mask)
memset(&set->sig[1], 0, sizeof(long)*(_NSIG_WORDS-1));
break;
case 2: set->sig[1] = 0;
+ break;
case 1: ;
}
}
@@ -215,6 +252,7 @@ static inline void siginitsetinv(sigset_t *set, unsigned long mask)
memset(&set->sig[1], -1, sizeof(long)*(_NSIG_WORDS-1));
break;
case 2: set->sig[1] = -1;
+ break;
case 1: ;
}
}
@@ -237,22 +275,28 @@ static inline int valid_signal(unsigned long sig)
struct timespec;
struct pt_regs;
+enum pid_type;
extern int next_signal(struct sigpending *pending, sigset_t *mask);
-extern int do_send_sig_info(int sig, struct siginfo *info,
- struct task_struct *p, bool group);
-extern int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p);
-extern int __group_send_sig_info(int, struct siginfo *, struct task_struct *);
+extern int do_send_sig_info(int sig, struct kernel_siginfo *info,
+ struct task_struct *p, enum pid_type type);
+extern int group_send_sig_info(int sig, struct kernel_siginfo *info,
+ struct task_struct *p, enum pid_type type);
+extern int send_signal_locked(int sig, struct kernel_siginfo *info,
+ struct task_struct *p, enum pid_type type);
extern int sigprocmask(int, sigset_t *, sigset_t *);
extern void set_current_blocked(sigset_t *);
extern void __set_current_blocked(const sigset_t *);
extern int show_unhandled_signals;
-extern int get_signal(struct ksignal *ksig);
+extern bool get_signal(struct ksignal *ksig);
extern void signal_setup_done(int failed, struct ksignal *ksig, int stepping);
extern void exit_signals(struct task_struct *tsk);
extern void kernel_sigaction(int, __sighandler_t);
+#define SIG_KTHREAD ((__force __sighandler_t)2)
+#define SIG_KTHREAD_KERNEL ((__force __sighandler_t)3)
+
static inline void allow_signal(int sig)
{
/*
@@ -260,7 +304,17 @@ static inline void allow_signal(int sig)
* know it'll be handled, so that they don't get converted to
* SIGKILL or just silently dropped.
*/
- kernel_sigaction(sig, (__force __sighandler_t)2);
+ kernel_sigaction(sig, SIG_KTHREAD);
+}
+
+static inline void allow_kernel_signal(int sig)
+{
+ /*
+ * Kernel threads handle their own signals. Let the signal code
+ * know signals sent by the kernel will be handled, so that they
+ * don't get silently dropped.
+ */
+ kernel_sigaction(sig, SIG_KTHREAD_KERNEL);
}
static inline void disallow_signal(int sig)
@@ -270,7 +324,7 @@ static inline void disallow_signal(int sig)
extern struct kmem_cache *sighand_cachep;
-int unhandled_signal(struct task_struct *tsk, int sig);
+extern bool unhandled_signal(struct task_struct *tsk, int sig);
/*
* In POSIX a signal is sent either to a specific thread (Linux task)
@@ -359,7 +413,7 @@ int unhandled_signal(struct task_struct *tsk, int sig);
#endif
#define siginmask(sig, mask) \
- ((sig) < SIGRTMIN && (rt_sigmask(sig) & (mask)))
+ ((sig) > 0 && (sig) < SIGRTMIN && (rt_sigmask(sig) & (mask)))
#define SIG_KERNEL_ONLY_MASK (\
rt_sigmask(SIGKILL) | rt_sigmask(SIGSTOP))
@@ -380,10 +434,18 @@ int unhandled_signal(struct task_struct *tsk, int sig);
rt_sigmask(SIGCONT) | rt_sigmask(SIGCHLD) | \
rt_sigmask(SIGWINCH) | rt_sigmask(SIGURG) )
+#define SIG_SPECIFIC_SICODES_MASK (\
+ rt_sigmask(SIGILL) | rt_sigmask(SIGFPE) | \
+ rt_sigmask(SIGSEGV) | rt_sigmask(SIGBUS) | \
+ rt_sigmask(SIGTRAP) | rt_sigmask(SIGCHLD) | \
+ rt_sigmask(SIGPOLL) | rt_sigmask(SIGSYS) | \
+ SIGEMT_MASK )
+
#define sig_kernel_only(sig) siginmask(sig, SIG_KERNEL_ONLY_MASK)
#define sig_kernel_coredump(sig) siginmask(sig, SIG_KERNEL_COREDUMP_MASK)
#define sig_kernel_ignore(sig) siginmask(sig, SIG_KERNEL_IGNORE_MASK)
#define sig_kernel_stop(sig) siginmask(sig, SIG_KERNEL_STOP_MASK)
+#define sig_specific_sicodes(sig) siginmask(sig, SIG_SPECIFIC_SICODES_MASK)
#define sig_fatal(t, signr) \
(!siginmask(signr, SIG_KERNEL_IGNORE_MASK|SIG_KERNEL_STOP_MASK) && \
@@ -394,19 +456,37 @@ void signals_init(void);
int restore_altstack(const stack_t __user *);
int __save_altstack(stack_t __user *, unsigned long);
-#define save_altstack_ex(uss, sp) do { \
+#define unsafe_save_altstack(uss, sp, label) do { \
stack_t __user *__uss = uss; \
struct task_struct *t = current; \
- put_user_ex((void __user *)t->sas_ss_sp, &__uss->ss_sp); \
- put_user_ex(t->sas_ss_flags, &__uss->ss_flags); \
- put_user_ex(t->sas_ss_size, &__uss->ss_size); \
- if (t->sas_ss_flags & SS_AUTODISARM) \
- sas_ss_reset(t); \
+ unsafe_put_user((void __user *)t->sas_ss_sp, &__uss->ss_sp, label); \
+ unsafe_put_user(t->sas_ss_flags, &__uss->ss_flags, label); \
+ unsafe_put_user(t->sas_ss_size, &__uss->ss_size, label); \
} while (0);
+#ifdef CONFIG_DYNAMIC_SIGFRAME
+bool sigaltstack_size_valid(size_t ss_size);
+#else
+static inline bool sigaltstack_size_valid(size_t size) { return true; }
+#endif /* !CONFIG_DYNAMIC_SIGFRAME */
+
#ifdef CONFIG_PROC_FS
struct seq_file;
extern void render_sigset_t(struct seq_file *, const char *, sigset_t *);
#endif
+#ifndef arch_untagged_si_addr
+/*
+ * Given a fault address and a signal and si_code which correspond to the
+ * _sigfault union member, returns the address that must appear in si_addr if
+ * the signal handler does not have SA_EXPOSE_TAGBITS enabled in sa_flags.
+ */
+static inline void __user *arch_untagged_si_addr(void __user *addr,
+ unsigned long sig,
+ unsigned long si_code)
+{
+ return addr;
+}
+#endif
+
#endif /* _LINUX_SIGNAL_H */
diff --git a/include/linux/signal_types.h b/include/linux/signal_types.h
index 16d862a3d8f3..a70b2bdbf4d9 100644
--- a/include/linux/signal_types.h
+++ b/include/linux/signal_types.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_SIGNAL_TYPES_H
#define _LINUX_SIGNAL_TYPES_H
@@ -8,6 +9,12 @@
#include <linux/list.h>
#include <uapi/linux/signal.h>
+typedef struct kernel_siginfo {
+ __SIGINFO;
+} kernel_siginfo_t;
+
+struct ucounts;
+
/*
* Real Time signals may be queued.
*/
@@ -15,8 +22,8 @@
struct sigqueue {
struct list_head list;
int flags;
- siginfo_t info;
- struct user_struct *user;
+ kernel_siginfo_t info;
+ struct ucounts *ucounts;
};
/* flags values. */
@@ -59,8 +66,23 @@ struct old_sigaction {
struct ksignal {
struct k_sigaction ka;
- siginfo_t info;
+ kernel_siginfo_t info;
int sig;
};
+/* Used to kill the race between sigaction and forced signals */
+#define SA_IMMUTABLE 0x00800000
+
+#ifndef __ARCH_UAPI_SA_FLAGS
+#ifdef SA_RESTORER
+#define __ARCH_UAPI_SA_FLAGS SA_RESTORER
+#else
+#define __ARCH_UAPI_SA_FLAGS 0
+#endif
+#endif
+
+#define UAPI_SA_FLAGS \
+ (SA_NOCLDSTOP | SA_NOCLDWAIT | SA_SIGINFO | SA_ONSTACK | SA_RESTART | \
+ SA_NODEFER | SA_RESETHAND | SA_EXPOSE_TAGBITS | __ARCH_UAPI_SA_FLAGS)
+
#endif /* _LINUX_SIGNAL_TYPES_H */
diff --git a/include/linux/signalfd.h b/include/linux/signalfd.h
index 4985048640a7..9a47c380bda8 100644
--- a/include/linux/signalfd.h
+++ b/include/linux/signalfd.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* include/linux/signalfd.h
*
diff --git a/include/linux/siox.h b/include/linux/siox.h
new file mode 100644
index 000000000000..6bfbda3f634c
--- /dev/null
+++ b/include/linux/siox.h
@@ -0,0 +1,84 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2015 Pengutronix, Uwe Kleine-König <kernel@pengutronix.de>
+ */
+
+#include <linux/device.h>
+
+#define to_siox_device(_dev) container_of((_dev), struct siox_device, dev)
+struct siox_device {
+ struct list_head node; /* node in smaster->devices */
+ struct siox_master *smaster;
+ struct device dev;
+
+ const char *type;
+ size_t inbytes;
+ size_t outbytes;
+ u8 statustype;
+
+ u8 status_read_clean;
+ u8 status_written;
+ u8 status_written_lastcycle;
+ bool connected;
+
+ /* statistics */
+ unsigned int watchdog_errors;
+ unsigned int status_errors;
+
+ struct kernfs_node *status_errors_kn;
+ struct kernfs_node *watchdog_kn;
+ struct kernfs_node *watchdog_errors_kn;
+ struct kernfs_node *connected_kn;
+};
+
+bool siox_device_synced(struct siox_device *sdevice);
+bool siox_device_connected(struct siox_device *sdevice);
+
+struct siox_driver {
+ int (*probe)(struct siox_device *sdevice);
+ void (*remove)(struct siox_device *sdevice);
+ void (*shutdown)(struct siox_device *sdevice);
+
+ /*
+ * buf is big enough to hold sdev->inbytes - 1 bytes, the status byte
+ * is in the scope of the framework.
+ */
+ int (*set_data)(struct siox_device *sdevice, u8 status, u8 buf[]);
+ /*
+ * buf is big enough to hold sdev->outbytes - 1 bytes, the status byte
+ * is in the scope of the framework
+ */
+ int (*get_data)(struct siox_device *sdevice, const u8 buf[]);
+
+ struct device_driver driver;
+};
+
+static inline struct siox_driver *to_siox_driver(struct device_driver *driver)
+{
+ if (driver)
+ return container_of(driver, struct siox_driver, driver);
+ else
+ return NULL;
+}
+
+int __siox_driver_register(struct siox_driver *sdriver, struct module *owner);
+
+static inline int siox_driver_register(struct siox_driver *sdriver)
+{
+ return __siox_driver_register(sdriver, THIS_MODULE);
+}
+
+static inline void siox_driver_unregister(struct siox_driver *sdriver)
+{
+ return driver_unregister(&sdriver->driver);
+}
+
+/*
+ * module_siox_driver() - Helper macro for drivers that don't do
+ * anything special in module init/exit. This eliminates a lot of
+ * boilerplate. Each module may only use this macro once, and
+ * calling it replaces module_init() and module_exit()
+ */
+#define module_siox_driver(__siox_driver) \
+ module_driver(__siox_driver, siox_driver_register, \
+ siox_driver_unregister)
diff --git a/include/linux/siphash.h b/include/linux/siphash.h
index fa7a6b9cedbf..9153e77382e1 100644
--- a/include/linux/siphash.h
+++ b/include/linux/siphash.h
@@ -1,6 +1,5 @@
-/* Copyright (C) 2016 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
- *
- * This file is provided under a dual BSD/GPLv2 license.
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
+/* Copyright (C) 2016-2022 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
*
* SipHash: a fast short-input PRF
* https://131002.net/siphash/
@@ -21,10 +20,15 @@ typedef struct {
u64 key[2];
} siphash_key_t;
+#define siphash_aligned_key_t siphash_key_t __aligned(16)
+
+static inline bool siphash_key_is_zero(const siphash_key_t *key)
+{
+ return !(key->key[0] | key->key[1]);
+}
+
u64 __siphash_aligned(const void *data, size_t len, const siphash_key_t *key);
-#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
u64 __siphash_unaligned(const void *data, size_t len, const siphash_key_t *key);
-#endif
u64 siphash_1u64(const u64 a, const siphash_key_t *key);
u64 siphash_2u64(const u64 a, const u64 b, const siphash_key_t *key);
@@ -77,10 +81,9 @@ static inline u64 ___siphash_aligned(const __le64 *data, size_t len,
static inline u64 siphash(const void *data, size_t len,
const siphash_key_t *key)
{
-#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
- if (!IS_ALIGNED((unsigned long)data, SIPHASH_ALIGNMENT))
+ if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) ||
+ !IS_ALIGNED((unsigned long)data, SIPHASH_ALIGNMENT))
return __siphash_unaligned(data, len, key);
-#endif
return ___siphash_aligned(data, len, key);
}
@@ -91,10 +94,8 @@ typedef struct {
u32 __hsiphash_aligned(const void *data, size_t len,
const hsiphash_key_t *key);
-#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
u32 __hsiphash_unaligned(const void *data, size_t len,
const hsiphash_key_t *key);
-#endif
u32 hsiphash_1u32(const u32 a, const hsiphash_key_t *key);
u32 hsiphash_2u32(const u32 a, const u32 b, const hsiphash_key_t *key);
@@ -130,11 +131,38 @@ static inline u32 ___hsiphash_aligned(const __le32 *data, size_t len,
static inline u32 hsiphash(const void *data, size_t len,
const hsiphash_key_t *key)
{
-#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
- if (!IS_ALIGNED((unsigned long)data, HSIPHASH_ALIGNMENT))
+ if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) ||
+ !IS_ALIGNED((unsigned long)data, HSIPHASH_ALIGNMENT))
return __hsiphash_unaligned(data, len, key);
-#endif
return ___hsiphash_aligned(data, len, key);
}
+/*
+ * These macros expose the raw SipHash and HalfSipHash permutations.
+ * Do not use them directly! If you think you have a use for them,
+ * be sure to CC the maintainer of this file explaining why.
+ */
+
+#define SIPHASH_PERMUTATION(a, b, c, d) ( \
+ (a) += (b), (b) = rol64((b), 13), (b) ^= (a), (a) = rol64((a), 32), \
+ (c) += (d), (d) = rol64((d), 16), (d) ^= (c), \
+ (a) += (d), (d) = rol64((d), 21), (d) ^= (a), \
+ (c) += (b), (b) = rol64((b), 17), (b) ^= (c), (c) = rol64((c), 32))
+
+#define SIPHASH_CONST_0 0x736f6d6570736575ULL
+#define SIPHASH_CONST_1 0x646f72616e646f6dULL
+#define SIPHASH_CONST_2 0x6c7967656e657261ULL
+#define SIPHASH_CONST_3 0x7465646279746573ULL
+
+#define HSIPHASH_PERMUTATION(a, b, c, d) ( \
+ (a) += (b), (b) = rol32((b), 5), (b) ^= (a), (a) = rol32((a), 16), \
+ (c) += (d), (d) = rol32((d), 8), (d) ^= (c), \
+ (a) += (d), (d) = rol32((d), 7), (d) ^= (a), \
+ (c) += (b), (b) = rol32((b), 13), (b) ^= (c), (c) = rol32((c), 16))
+
+#define HSIPHASH_CONST_0 0U
+#define HSIPHASH_CONST_1 0U
+#define HSIPHASH_CONST_2 0x6c796765U
+#define HSIPHASH_CONST_3 0x74656462U
+
#endif /* _LINUX_SIPHASH_H */
diff --git a/include/linux/sirfsoc_dma.h b/include/linux/sirfsoc_dma.h
deleted file mode 100644
index 29d959333d81..000000000000
--- a/include/linux/sirfsoc_dma.h
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef _SIRFSOC_DMA_H_
-#define _SIRFSOC_DMA_H_
-
-bool sirfsoc_dma_filter_id(struct dma_chan *chan, void *chan_id);
-
-#endif
diff --git a/include/linux/sizes.h b/include/linux/sizes.h
index ce3e8150c174..84aa448d8bb3 100644
--- a/include/linux/sizes.h
+++ b/include/linux/sizes.h
@@ -1,13 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* include/linux/sizes.h
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef __LINUX_SIZES_H__
#define __LINUX_SIZES_H__
+#include <linux/const.h>
+
#define SZ_1 0x00000001
#define SZ_2 0x00000002
#define SZ_4 0x00000004
@@ -44,4 +43,12 @@
#define SZ_1G 0x40000000
#define SZ_2G 0x80000000
+#define SZ_4G _AC(0x100000000, ULL)
+#define SZ_8G _AC(0x200000000, ULL)
+#define SZ_16G _AC(0x400000000, ULL)
+#define SZ_32G _AC(0x800000000, ULL)
+
+#define SZ_1T _AC(0x10000000000, ULL)
+#define SZ_64T _AC(0x400000000000, ULL)
+
#endif /* __LINUX_SIZES_H__ */
diff --git a/include/linux/skb_array.h b/include/linux/skb_array.h
index 35226cd4efb0..e2d45b7cb619 100644
--- a/include/linux/skb_array.h
+++ b/include/linux/skb_array.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Definitions for the 'struct skb_array' datastructure.
*
@@ -6,11 +7,6 @@
*
* Copyright (C) 2016 Red Hat, Inc.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
* Limited-size FIFO of skbs. Can be used more or less whenever
* sk_buff_head can be used, except you need to know the queue size in
* advance.
@@ -69,7 +65,12 @@ static inline int skb_array_produce_any(struct skb_array *a, struct sk_buff *skb
*/
static inline bool __skb_array_empty(struct skb_array *a)
{
- return !__ptr_ring_peek(&a->ring);
+ return __ptr_ring_empty(&a->ring);
+}
+
+static inline struct sk_buff *__skb_array_peek(struct skb_array *a)
+{
+ return __ptr_ring_peek(&a->ring);
}
static inline bool skb_array_empty(struct skb_array *a)
@@ -92,6 +93,11 @@ static inline bool skb_array_empty_any(struct skb_array *a)
return ptr_ring_empty_any(&a->ring);
}
+static inline struct sk_buff *__skb_array_consume(struct skb_array *a)
+{
+ return __ptr_ring_consume(&a->ring);
+}
+
static inline struct sk_buff *skb_array_consume(struct skb_array *a)
{
return ptr_ring_consume(&a->ring);
@@ -193,7 +199,8 @@ static inline int skb_array_resize(struct skb_array *a, int size, gfp_t gfp)
}
static inline int skb_array_resize_multiple(struct skb_array **rings,
- int nrings, int size, gfp_t gfp)
+ int nrings, unsigned int size,
+ gfp_t gfp)
{
BUILD_BUG_ON(offsetof(struct skb_array, ring));
return ptr_ring_resize_multiple((struct ptr_ring **)rings,
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index dbe29b6c9bd6..738776ab8838 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -1,138 +1,150 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Definitions for the 'struct sk_buff' memory handlers.
*
* Authors:
* Alan Cox, <gw4pts@gw4pts.ampr.org>
* Florian La Roche, <rzsfl@rz.uni-sb.de>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
*/
#ifndef _LINUX_SKBUFF_H
#define _LINUX_SKBUFF_H
#include <linux/kernel.h>
-#include <linux/kmemcheck.h>
#include <linux/compiler.h>
#include <linux/time.h>
#include <linux/bug.h>
+#include <linux/bvec.h>
#include <linux/cache.h>
#include <linux/rbtree.h>
#include <linux/socket.h>
+#include <linux/refcount.h>
#include <linux/atomic.h>
#include <asm/types.h>
#include <linux/spinlock.h>
-#include <linux/net.h>
-#include <linux/textsearch.h>
#include <net/checksum.h>
#include <linux/rcupdate.h>
-#include <linux/hrtimer.h>
#include <linux/dma-mapping.h>
#include <linux/netdev_features.h>
-#include <linux/sched.h>
-#include <linux/sched/clock.h>
#include <net/flow_dissector.h>
-#include <linux/splice.h>
#include <linux/in6.h>
#include <linux/if_packet.h>
+#include <linux/llist.h>
#include <net/flow.h>
+#include <net/page_pool.h>
+#if IS_ENABLED(CONFIG_NF_CONNTRACK)
+#include <linux/netfilter/nf_conntrack_common.h>
+#endif
+#include <net/net_debug.h>
+#include <net/dropreason-core.h>
-/* The interface for checksum offload between the stack and networking drivers
+/**
+ * DOC: skb checksums
+ *
+ * The interface for checksum offload between the stack and networking drivers
* is as follows...
*
- * A. IP checksum related features
+ * IP checksum related features
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*
* Drivers advertise checksum offload capabilities in the features of a device.
- * From the stack's point of view these are capabilities offered by the driver,
- * a driver typically only advertises features that it is capable of offloading
+ * From the stack's point of view these are capabilities offered by the driver.
+ * A driver typically only advertises features that it is capable of offloading
* to its device.
*
- * The checksum related features are:
- *
- * NETIF_F_HW_CSUM - The driver (or its device) is able to compute one
- * IP (one's complement) checksum for any combination
- * of protocols or protocol layering. The checksum is
- * computed and set in a packet per the CHECKSUM_PARTIAL
- * interface (see below).
- *
- * NETIF_F_IP_CSUM - Driver (device) is only able to checksum plain
- * TCP or UDP packets over IPv4. These are specifically
- * unencapsulated packets of the form IPv4|TCP or
- * IPv4|UDP where the Protocol field in the IPv4 header
- * is TCP or UDP. The IPv4 header may contain IP options
- * This feature cannot be set in features for a device
- * with NETIF_F_HW_CSUM also set. This feature is being
- * DEPRECATED (see below).
- *
- * NETIF_F_IPV6_CSUM - Driver (device) is only able to checksum plain
- * TCP or UDP packets over IPv6. These are specifically
- * unencapsulated packets of the form IPv6|TCP or
- * IPv4|UDP where the Next Header field in the IPv6
- * header is either TCP or UDP. IPv6 extension headers
- * are not supported with this feature. This feature
- * cannot be set in features for a device with
- * NETIF_F_HW_CSUM also set. This feature is being
- * DEPRECATED (see below).
- *
- * NETIF_F_RXCSUM - Driver (device) performs receive checksum offload.
- * This flag is used only used to disable the RX checksum
- * feature for a device. The stack will accept receive
- * checksum indication in packets received on a device
- * regardless of whether NETIF_F_RXCSUM is set.
- *
- * B. Checksumming of received packets by device. Indication of checksum
- * verification is in set skb->ip_summed. Possible values are:
- *
- * CHECKSUM_NONE:
+ * .. flat-table:: Checksum related device features
+ * :widths: 1 10
+ *
+ * * - %NETIF_F_HW_CSUM
+ * - The driver (or its device) is able to compute one
+ * IP (one's complement) checksum for any combination
+ * of protocols or protocol layering. The checksum is
+ * computed and set in a packet per the CHECKSUM_PARTIAL
+ * interface (see below).
+ *
+ * * - %NETIF_F_IP_CSUM
+ * - Driver (device) is only able to checksum plain
+ * TCP or UDP packets over IPv4. These are specifically
+ * unencapsulated packets of the form IPv4|TCP or
+ * IPv4|UDP where the Protocol field in the IPv4 header
+ * is TCP or UDP. The IPv4 header may contain IP options.
+ * This feature cannot be set in features for a device
+ * with NETIF_F_HW_CSUM also set. This feature is being
+ * DEPRECATED (see below).
+ *
+ * * - %NETIF_F_IPV6_CSUM
+ * - Driver (device) is only able to checksum plain
+ * TCP or UDP packets over IPv6. These are specifically
+ * unencapsulated packets of the form IPv6|TCP or
+ * IPv6|UDP where the Next Header field in the IPv6
+ * header is either TCP or UDP. IPv6 extension headers
+ * are not supported with this feature. This feature
+ * cannot be set in features for a device with
+ * NETIF_F_HW_CSUM also set. This feature is being
+ * DEPRECATED (see below).
+ *
+ * * - %NETIF_F_RXCSUM
+ * - Driver (device) performs receive checksum offload.
+ * This flag is only used to disable the RX checksum
+ * feature for a device. The stack will accept receive
+ * checksum indication in packets received on a device
+ * regardless of whether NETIF_F_RXCSUM is set.
+ *
+ * Checksumming of received packets by device
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * Indication of checksum verification is set in &sk_buff.ip_summed.
+ * Possible values are:
+ *
+ * - %CHECKSUM_NONE
*
* Device did not checksum this packet e.g. due to lack of capabilities.
* The packet contains full (though not verified) checksum in packet but
* not in skb->csum. Thus, skb->csum is undefined in this case.
*
- * CHECKSUM_UNNECESSARY:
+ * - %CHECKSUM_UNNECESSARY
*
* The hardware you're dealing with doesn't calculate the full checksum
- * (as in CHECKSUM_COMPLETE), but it does parse headers and verify checksums
- * for specific protocols. For such packets it will set CHECKSUM_UNNECESSARY
- * if their checksums are okay. skb->csum is still undefined in this case
+ * (as in %CHECKSUM_COMPLETE), but it does parse headers and verify checksums
+ * for specific protocols. For such packets it will set %CHECKSUM_UNNECESSARY
+ * if their checksums are okay. &sk_buff.csum is still undefined in this case
* though. A driver or device must never modify the checksum field in the
* packet even if checksum is verified.
*
- * CHECKSUM_UNNECESSARY is applicable to following protocols:
- * TCP: IPv6 and IPv4.
- * UDP: IPv4 and IPv6. A device may apply CHECKSUM_UNNECESSARY to a
+ * %CHECKSUM_UNNECESSARY is applicable to following protocols:
+ *
+ * - TCP: IPv6 and IPv4.
+ * - UDP: IPv4 and IPv6. A device may apply CHECKSUM_UNNECESSARY to a
* zero UDP checksum for either IPv4 or IPv6, the networking stack
* may perform further validation in this case.
- * GRE: only if the checksum is present in the header.
- * SCTP: indicates the CRC in SCTP header has been validated.
- * FCOE: indicates the CRC in FC frame has been validated.
+ * - GRE: only if the checksum is present in the header.
+ * - SCTP: indicates the CRC in SCTP header has been validated.
+ * - FCOE: indicates the CRC in FC frame has been validated.
*
- * skb->csum_level indicates the number of consecutive checksums found in
- * the packet minus one that have been verified as CHECKSUM_UNNECESSARY.
+ * &sk_buff.csum_level indicates the number of consecutive checksums found in
+ * the packet minus one that have been verified as %CHECKSUM_UNNECESSARY.
* For instance if a device receives an IPv6->UDP->GRE->IPv4->TCP packet
* and a device is able to verify the checksums for UDP (possibly zero),
- * GRE (checksum flag is set), and TCP-- skb->csum_level would be set to
+ * GRE (checksum flag is set) and TCP, &sk_buff.csum_level would be set to
* two. If the device were only able to verify the UDP checksum and not
- * GRE, either because it doesn't support GRE checksum of because GRE
+ * GRE, either because it doesn't support GRE checksum or because GRE
* checksum is bad, skb->csum_level would be set to zero (TCP checksum is
* not considered in this case).
*
- * CHECKSUM_COMPLETE:
+ * - %CHECKSUM_COMPLETE
*
* This is the most generic way. The device supplied checksum of the _whole_
- * packet as seen by netif_rx() and fills out in skb->csum. Meaning, the
+ * packet as seen by netif_rx() and fills in &sk_buff.csum. This means the
* hardware doesn't need to parse L3/L4 headers to implement this.
*
* Notes:
+ *
* - Even if device supports only some protocols, but is able to produce
* skb->csum, it MUST use CHECKSUM_COMPLETE, not CHECKSUM_UNNECESSARY.
* - CHECKSUM_COMPLETE is not applicable to SCTP and FCoE protocols.
*
- * CHECKSUM_PARTIAL:
+ * - %CHECKSUM_PARTIAL
*
* A checksum is set up to be offloaded to a device as described in the
* output description for CHECKSUM_PARTIAL. This may occur on a packet
@@ -144,17 +156,21 @@
* packet that are after the checksum being offloaded are not considered to
* be verified.
*
- * C. Checksumming on transmit for non-GSO. The stack requests checksum offload
- * in the skb->ip_summed for a packet. Values are:
+ * Checksumming on transmit for non-GSO
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * The stack requests checksum offload in the &sk_buff.ip_summed for a packet.
+ * Values are:
*
- * CHECKSUM_PARTIAL:
+ * - %CHECKSUM_PARTIAL
*
* The driver is required to checksum the packet as seen by hard_start_xmit()
- * from skb->csum_start up to the end, and to record/write the checksum at
- * offset skb->csum_start + skb->csum_offset. A driver may verify that the
+ * from &sk_buff.csum_start up to the end, and to record/write the checksum at
+ * offset &sk_buff.csum_start + &sk_buff.csum_offset.
+ * A driver may verify that the
* csum_start and csum_offset values are valid values given the length and
- * offset of the packet, however they should not attempt to validate that the
- * checksum refers to a legitimate transport layer checksum-- it is the
+ * offset of the packet, but it should not attempt to validate that the
+ * checksum refers to a legitimate transport layer checksum -- it is the
* purview of the stack to validate that csum_start and csum_offset are set
* correctly.
*
@@ -163,57 +179,68 @@
* checksum calculation to the device, or call skb_checksum_help (in the case
* that the device does not support offload for a particular checksum).
*
- * NETIF_F_IP_CSUM and NETIF_F_IPV6_CSUM are being deprecated in favor of
- * NETIF_F_HW_CSUM. New devices should use NETIF_F_HW_CSUM to indicate
+ * %NETIF_F_IP_CSUM and %NETIF_F_IPV6_CSUM are being deprecated in favor of
+ * %NETIF_F_HW_CSUM. New devices should use %NETIF_F_HW_CSUM to indicate
* checksum offload capability.
- * skb_csum_hwoffload_help() can be called to resolve CHECKSUM_PARTIAL based
+ * skb_csum_hwoffload_help() can be called to resolve %CHECKSUM_PARTIAL based
* on network device checksumming capabilities: if a packet does not match
- * them, skb_checksum_help or skb_crc32c_help (depending on the value of
- * csum_not_inet, see item D.) is called to resolve the checksum.
+ * them, skb_checksum_help() or skb_crc32c_help() (depending on the value of
+ * &sk_buff.csum_not_inet, see :ref:`crc`)
+ * is called to resolve the checksum.
*
- * CHECKSUM_NONE:
+ * - %CHECKSUM_NONE
*
* The skb was already checksummed by the protocol, or a checksum is not
* required.
*
- * CHECKSUM_UNNECESSARY:
+ * - %CHECKSUM_UNNECESSARY
*
- * This has the same meaning on as CHECKSUM_NONE for checksum offload on
+ * This has the same meaning as CHECKSUM_NONE for checksum offload on
* output.
*
- * CHECKSUM_COMPLETE:
+ * - %CHECKSUM_COMPLETE
+ *
* Not used in checksum output. If a driver observes a packet with this value
- * set in skbuff, if should treat as CHECKSUM_NONE being set.
- *
- * D. Non-IP checksum (CRC) offloads
- *
- * NETIF_F_SCTP_CRC - This feature indicates that a device is capable of
- * offloading the SCTP CRC in a packet. To perform this offload the stack
- * will set set csum_start and csum_offset accordingly, set ip_summed to
- * CHECKSUM_PARTIAL and set csum_not_inet to 1, to provide an indication in
- * the skbuff that the CHECKSUM_PARTIAL refers to CRC32c.
- * A driver that supports both IP checksum offload and SCTP CRC32c offload
- * must verify which offload is configured for a packet by testing the
- * value of skb->csum_not_inet; skb_crc32c_csum_help is provided to resolve
- * CHECKSUM_PARTIAL on skbs where csum_not_inet is set to 1.
- *
- * NETIF_F_FCOE_CRC - This feature indicates that a device is capable of
- * offloading the FCOE CRC in a packet. To perform this offload the stack
- * will set ip_summed to CHECKSUM_PARTIAL and set csum_start and csum_offset
- * accordingly. Note the there is no indication in the skbuff that the
- * CHECKSUM_PARTIAL refers to an FCOE checksum, a driver that supports
- * both IP checksum offload and FCOE CRC offload must verify which offload
- * is configured for a packet presumably by inspecting packet headers.
- *
- * E. Checksumming on output with GSO.
- *
- * In the case of a GSO packet (skb_is_gso(skb) is true), checksum offload
+ * set in skbuff, it should treat the packet as if %CHECKSUM_NONE were set.
+ *
+ * .. _crc:
+ *
+ * Non-IP checksum (CRC) offloads
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * .. flat-table::
+ * :widths: 1 10
+ *
+ * * - %NETIF_F_SCTP_CRC
+ * - This feature indicates that a device is capable of
+ * offloading the SCTP CRC in a packet. To perform this offload the stack
+ * will set csum_start and csum_offset accordingly, set ip_summed to
+ * %CHECKSUM_PARTIAL and set csum_not_inet to 1, to provide an indication
+ * in the skbuff that the %CHECKSUM_PARTIAL refers to CRC32c.
+ * A driver that supports both IP checksum offload and SCTP CRC32c offload
+ * must verify which offload is configured for a packet by testing the
+ * value of &sk_buff.csum_not_inet; skb_crc32c_csum_help() is provided to
+ * resolve %CHECKSUM_PARTIAL on skbs where csum_not_inet is set to 1.
+ *
+ * * - %NETIF_F_FCOE_CRC
+ * - This feature indicates that a device is capable of offloading the FCOE
+ * CRC in a packet. To perform this offload the stack will set ip_summed
+ * to %CHECKSUM_PARTIAL and set csum_start and csum_offset
+ * accordingly. Note that there is no indication in the skbuff that the
+ * %CHECKSUM_PARTIAL refers to an FCOE checksum, so a driver that supports
+ * both IP checksum offload and FCOE CRC offload must verify which offload
+ * is configured for a packet, presumably by inspecting packet headers.
+ *
+ * Checksumming on output with GSO
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * In the case of a GSO packet (skb_is_gso() is true), checksum offload
* is implied by the SKB_GSO_* flags in gso_type. Most obviously, if the
- * gso_type is SKB_GSO_TCPV4 or SKB_GSO_TCPV6, TCP checksum offload as
+ * gso_type is %SKB_GSO_TCPV4 or %SKB_GSO_TCPV6, TCP checksum offload as
* part of the GSO operation is implied. If a checksum is being offloaded
- * with GSO then ip_summed is CHECKSUM_PARTIAL, csum_start and csum_offset
- * are set to refer to the outermost checksum being offload (two offloaded
- * checksums are possible with UDP encapsulation).
+ * with GSO then ip_summed is %CHECKSUM_PARTIAL, and both csum_start and
+ * csum_offset are set to refer to the outermost checksum being offloaded
+ * (two offloaded checksums are possible with UDP encapsulation).
*/
/* Don't change this without changing skb_csum_unnecessary! */
@@ -228,6 +255,14 @@
#define SKB_DATA_ALIGN(X) ALIGN(X, SMP_CACHE_BYTES)
#define SKB_WITH_OVERHEAD(X) \
((X) - SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
+
+/* For X bytes available in skb->head, what is the minimal
+ * allocation needed, knowing struct skb_shared_info needs
+ * to be aligned.
+ */
+#define SKB_HEAD_ALIGN(X) (SKB_DATA_ALIGN(X) + \
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
+
#define SKB_MAX_ORDER(X, ORDER) \
SKB_WITH_OVERHEAD((PAGE_SIZE << (ORDER)) - (X))
#define SKB_MAX_HEAD(X) (SKB_MAX_ORDER((X), 0))
@@ -238,21 +273,19 @@
SKB_DATA_ALIGN(sizeof(struct sk_buff)) + \
SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
+struct ahash_request;
struct net_device;
struct scatterlist;
struct pipe_inode_info;
struct iov_iter;
struct napi_struct;
-
-#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
-struct nf_conntrack {
- atomic_t use;
-};
-#endif
+struct bpf_prog;
+union bpf_attr;
+struct skb_ext;
+struct ts_config;
#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
struct nf_bridge_info {
- refcount_t use;
enum {
BRNF_PROTO_UNCHANGED,
BRNF_PROTO_8021Q,
@@ -261,6 +294,7 @@ struct nf_bridge_info {
u8 pkt_otherhost:1;
u8 in_prerouting:1;
u8 bridged_dnat:1;
+ u8 sabotage_in_done:1;
__u16 frag_max_size;
struct net_device *physindev;
@@ -280,10 +314,31 @@ struct nf_bridge_info {
};
#endif
+#if IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
+/* Chain in tc_skb_ext will be used to share the tc chain with
+ * ovs recirc_id. It will be set to the current chain by tc
+ * and read by ovs to recirc_id.
+ */
+struct tc_skb_ext {
+ union {
+ u64 act_miss_cookie;
+ __u32 chain;
+ };
+ __u16 mru;
+ __u16 zone;
+ u8 post_ct:1;
+ u8 post_ct_snat:1;
+ u8 post_ct_dnat:1;
+ u8 act_miss:1; /* Set if act_miss_cookie is used */
+};
+#endif
+
struct sk_buff_head {
- /* These two members must be first. */
- struct sk_buff *next;
- struct sk_buff *prev;
+ /* These two members must be first to match sk_buff. */
+ struct_group_tagged(sk_buff_list, list,
+ struct sk_buff *next;
+ struct sk_buff *prev;
+ );
__u32 qlen;
spinlock_t lock;
@@ -291,18 +346,12 @@ struct sk_buff_head {
struct sk_buff;
-/* To allow 64K frame to be packed as single skb without frag_list we
- * require 64K/PAGE_SIZE pages plus 1 additional page to allow for
- * buffers which do not start on a page boundary.
- *
- * Since GRO uses frags we allocate at least 16 regardless of page
- * size.
- */
-#if (65536/PAGE_SIZE + 1) < 16
-#define MAX_SKB_FRAGS 16UL
-#else
-#define MAX_SKB_FRAGS (65536/PAGE_SIZE + 1)
+#ifndef CONFIG_MAX_SKB_FRAGS
+# define CONFIG_MAX_SKB_FRAGS 17
#endif
+
+#define MAX_SKB_FRAGS CONFIG_MAX_SKB_FRAGS
+
extern int sysctl_max_skb_frags;
/* Set skb_shinfo(skb)->gso_size to this in case you want skb_segment to
@@ -310,47 +359,95 @@ extern int sysctl_max_skb_frags;
*/
#define GSO_BY_FRAGS 0xFFFF
-typedef struct skb_frag_struct skb_frag_t;
-
-struct skb_frag_struct {
- struct {
- struct page *p;
- } page;
-#if (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536)
- __u32 page_offset;
- __u32 size;
-#else
- __u16 page_offset;
- __u16 size;
-#endif
-};
+typedef struct bio_vec skb_frag_t;
+/**
+ * skb_frag_size() - Returns the size of a skb fragment
+ * @frag: skb fragment
+ */
static inline unsigned int skb_frag_size(const skb_frag_t *frag)
{
- return frag->size;
+ return frag->bv_len;
}
+/**
+ * skb_frag_size_set() - Sets the size of a skb fragment
+ * @frag: skb fragment
+ * @size: size of fragment
+ */
static inline void skb_frag_size_set(skb_frag_t *frag, unsigned int size)
{
- frag->size = size;
+ frag->bv_len = size;
}
+/**
+ * skb_frag_size_add() - Increments the size of a skb fragment by @delta
+ * @frag: skb fragment
+ * @delta: value to add
+ */
static inline void skb_frag_size_add(skb_frag_t *frag, int delta)
{
- frag->size += delta;
+ frag->bv_len += delta;
}
+/**
+ * skb_frag_size_sub() - Decrements the size of a skb fragment by @delta
+ * @frag: skb fragment
+ * @delta: value to subtract
+ */
static inline void skb_frag_size_sub(skb_frag_t *frag, int delta)
{
- frag->size -= delta;
+ frag->bv_len -= delta;
+}
+
+/**
+ * skb_frag_must_loop - Test if %p is a high memory page
+ * @p: fragment's page
+ */
+static inline bool skb_frag_must_loop(struct page *p)
+{
+#if defined(CONFIG_HIGHMEM)
+ if (IS_ENABLED(CONFIG_DEBUG_KMAP_LOCAL_FORCE_MAP) || PageHighMem(p))
+ return true;
+#endif
+ return false;
}
+/**
+ * skb_frag_foreach_page - loop over pages in a fragment
+ *
+ * @f: skb frag to operate on
+ * @f_off: offset from start of f->bv_page
+ * @f_len: length from f_off to loop over
+ * @p: (temp var) current page
+ * @p_off: (temp var) offset from start of current page,
+ * non-zero only on first page.
+ * @p_len: (temp var) length in current page,
+ * < PAGE_SIZE only on first and last page.
+ * @copied: (temp var) length so far, excluding current p_len.
+ *
+ * A fragment can hold a compound page, in which case per-page
+ * operations, notably kmap_atomic, must be called for each
+ * regular page.
+ */
+#define skb_frag_foreach_page(f, f_off, f_len, p, p_off, p_len, copied) \
+ for (p = skb_frag_page(f) + ((f_off) >> PAGE_SHIFT), \
+ p_off = (f_off) & (PAGE_SIZE - 1), \
+ p_len = skb_frag_must_loop(p) ? \
+ min_t(u32, f_len, PAGE_SIZE - p_off) : f_len, \
+ copied = 0; \
+ copied < f_len; \
+ copied += p_len, p++, p_off = 0, \
+ p_len = min_t(u32, f_len - copied, PAGE_SIZE)) \
+
#define HAVE_HW_TIME_STAMP
/**
* struct skb_shared_hwtstamps - hardware time stamps
- * @hwtstamp: hardware time stamp transformed into duration
- * since arbitrary point in time
+ * @hwtstamp: hardware time stamp transformed into duration
+ * since arbitrary point in time
+ * @netdev_data: address/cookie of network device driver used as
+ * reference to actual hardware time stamp
*
* Software time stamps generated by ktime_get_real() are stored in
* skb->tstamp.
@@ -362,7 +459,10 @@ static inline void skb_frag_size_sub(skb_frag_t *frag, int delta)
* &skb_shared_info. Use skb_hwtstamps() to get a pointer.
*/
struct skb_shared_hwtstamps {
- ktime_t hwtstamp;
+ union {
+ ktime_t hwtstamp;
+ void *netdev_data;
+ };
};
/* Definitions for tx_flags in struct skb_shared_info */
@@ -376,26 +476,53 @@ enum {
/* device driver is going to provide hardware time stamp */
SKBTX_IN_PROGRESS = 1 << 2,
- /* device driver supports TX zero-copy buffers */
- SKBTX_DEV_ZEROCOPY = 1 << 3,
+ /* generate hardware time stamp based on cycles if supported */
+ SKBTX_HW_TSTAMP_USE_CYCLES = 1 << 3,
/* generate wifi status information (where possible) */
SKBTX_WIFI_STATUS = 1 << 4,
+ /* determine hardware time stamp based on time or cycles */
+ SKBTX_HW_TSTAMP_NETDEV = 1 << 5,
+
+ /* generate software time stamp when entering packet scheduling */
+ SKBTX_SCHED_TSTAMP = 1 << 6,
+};
+
+#define SKBTX_ANY_SW_TSTAMP (SKBTX_SW_TSTAMP | \
+ SKBTX_SCHED_TSTAMP)
+#define SKBTX_ANY_TSTAMP (SKBTX_HW_TSTAMP | \
+ SKBTX_HW_TSTAMP_USE_CYCLES | \
+ SKBTX_ANY_SW_TSTAMP)
+
+/* Definitions for flags in struct skb_shared_info */
+enum {
+ /* use zcopy routines */
+ SKBFL_ZEROCOPY_ENABLE = BIT(0),
+
/* This indicates at least one fragment might be overwritten
* (as in vmsplice(), sendfile() ...)
* If we need to compute a TX checksum, we'll need to copy
* all frags to avoid possible bad checksum
*/
- SKBTX_SHARED_FRAG = 1 << 5,
+ SKBFL_SHARED_FRAG = BIT(1),
- /* generate software time stamp when entering packet scheduling */
- SKBTX_SCHED_TSTAMP = 1 << 6,
+ /* segment contains only zerocopy data and should not be
+ * charged to the kernel memory.
+ */
+ SKBFL_PURE_ZEROCOPY = BIT(2),
+
+ SKBFL_DONT_ORPHAN = BIT(3),
+
+ /* page references are managed by the ubuf_info, so it's safe to
+ * use frags only up until ubuf_info is released
+ */
+ SKBFL_MANAGED_FRAG_REFS = BIT(4),
};
-#define SKBTX_ANY_SW_TSTAMP (SKBTX_SW_TSTAMP | \
- SKBTX_SCHED_TSTAMP)
-#define SKBTX_ANY_TSTAMP (SKBTX_HW_TSTAMP | SKBTX_ANY_SW_TSTAMP)
+#define SKBFL_ZEROCOPY_FRAG (SKBFL_ZEROCOPY_ENABLE | SKBFL_SHARED_FRAG)
+#define SKBFL_ALL_ZEROCOPY (SKBFL_ZEROCOPY_FRAG | SKBFL_PURE_ZEROCOPY | \
+ SKBFL_DONT_ORPHAN | SKBFL_MANAGED_FRAG_REFS)
/*
* The callback notifies userspace to release buffers when skb DMA is done in
@@ -406,17 +533,48 @@ enum {
* The desc field is used to track userspace buffer index.
*/
struct ubuf_info {
- void (*callback)(struct ubuf_info *, bool zerocopy_success);
- void *ctx;
- unsigned long desc;
+ void (*callback)(struct sk_buff *, struct ubuf_info *,
+ bool zerocopy_success);
+ refcount_t refcnt;
+ u8 flags;
};
+struct ubuf_info_msgzc {
+ struct ubuf_info ubuf;
+
+ union {
+ struct {
+ unsigned long desc;
+ void *ctx;
+ };
+ struct {
+ u32 id;
+ u16 len;
+ u16 zerocopy:1;
+ u32 bytelen;
+ };
+ };
+
+ struct mmpin {
+ struct user_struct *user;
+ unsigned int num_pg;
+ } mmp;
+};
+
+#define skb_uarg(SKB) ((struct ubuf_info *)(skb_shinfo(SKB)->destructor_arg))
+#define uarg_to_msgzc(ubuf_ptr) container_of((ubuf_ptr), struct ubuf_info_msgzc, \
+ ubuf)
+
+int mm_account_pinned_pages(struct mmpin *mmp, size_t size);
+void mm_unaccount_pinned_pages(struct mmpin *mmp);
+
/* This data is invariant across clones and lives at
* the end of the header data, ie. at skb->end.
*/
struct skb_shared_info {
- unsigned short _unused;
- unsigned char nr_frags;
+ __u8 flags;
+ __u8 meta_len;
+ __u8 nr_frags;
__u8 tx_flags;
unsigned short gso_size;
/* Warning: this field is not always filled in (UFO)! */
@@ -425,12 +583,12 @@ struct skb_shared_info {
struct skb_shared_hwtstamps hwtstamps;
unsigned int gso_type;
u32 tskey;
- __be32 ip6_frag_id;
/*
* Warning : all fields before dataref are cleared in __alloc_skb()
*/
atomic_t dataref;
+ unsigned int xdp_frags_size;
/* Intermediate layers must ensure that destructor_arg
* remains valid until skb destructor */
@@ -440,16 +598,32 @@ struct skb_shared_info {
skb_frag_t frags[MAX_SKB_FRAGS];
};
-/* We divide dataref into two halves. The higher 16 bits hold references
- * to the payload part of skb->data. The lower 16 bits hold references to
- * the entire skb->data. A clone of a headerless skb holds the length of
- * the header in skb->hdr_len.
- *
- * All users must obey the rule that the skb->data reference count must be
- * greater than or equal to the payload reference count.
- *
- * Holding a reference to the payload part means that the user does not
- * care about modifications to the header part of skb->data.
+/**
+ * DOC: dataref and headerless skbs
+ *
+ * Transport layers send out clones of payload skbs they hold for
+ * retransmissions. To allow lower layers of the stack to prepend their headers
+ * we split &skb_shared_info.dataref into two halves.
+ * The lower 16 bits count the overall number of references.
+ * The higher 16 bits indicate how many of the references are payload-only.
+ * skb_header_cloned() checks if skb is allowed to add / write the headers.
+ *
+ * The creator of the skb (e.g. TCP) marks its skb as &sk_buff.nohdr
+ * (via __skb_header_release()). Any clone created from marked skb will get
+ * &sk_buff.hdr_len populated with the available headroom.
+ * If there's the only clone in existence it's able to modify the headroom
+ * at will. The sequence of calls inside the transport layer is::
+ *
+ * <alloc skb>
+ * skb_reserve()
+ * __skb_header_release()
+ * skb_clone()
+ * // send the clone down the stack
+ *
+ * This is not a very generic construct and it depends on the transport layers
+ * doing the right thing. In practice there's usually only one payload-only skb.
+ * Having multiple payload-only skbs with different lengths of hdr_len is not
+ * possible. The payload-only skbs should never leave their owner.
*/
#define SKB_DATAREF_SHIFT 16
#define SKB_DATAREF_MASK ((1 << SKB_DATAREF_SHIFT) - 1)
@@ -463,39 +637,44 @@ enum {
enum {
SKB_GSO_TCPV4 = 1 << 0,
- SKB_GSO_UDP = 1 << 1,
/* This indicates the skb is from an untrusted source. */
- SKB_GSO_DODGY = 1 << 2,
+ SKB_GSO_DODGY = 1 << 1,
/* This indicates the tcp segment has CWR set. */
- SKB_GSO_TCP_ECN = 1 << 3,
+ SKB_GSO_TCP_ECN = 1 << 2,
+
+ SKB_GSO_TCP_FIXEDID = 1 << 3,
+
+ SKB_GSO_TCPV6 = 1 << 4,
+
+ SKB_GSO_FCOE = 1 << 5,
- SKB_GSO_TCP_FIXEDID = 1 << 4,
+ SKB_GSO_GRE = 1 << 6,
- SKB_GSO_TCPV6 = 1 << 5,
+ SKB_GSO_GRE_CSUM = 1 << 7,
- SKB_GSO_FCOE = 1 << 6,
+ SKB_GSO_IPXIP4 = 1 << 8,
- SKB_GSO_GRE = 1 << 7,
+ SKB_GSO_IPXIP6 = 1 << 9,
- SKB_GSO_GRE_CSUM = 1 << 8,
+ SKB_GSO_UDP_TUNNEL = 1 << 10,
- SKB_GSO_IPXIP4 = 1 << 9,
+ SKB_GSO_UDP_TUNNEL_CSUM = 1 << 11,
- SKB_GSO_IPXIP6 = 1 << 10,
+ SKB_GSO_PARTIAL = 1 << 12,
- SKB_GSO_UDP_TUNNEL = 1 << 11,
+ SKB_GSO_TUNNEL_REMCSUM = 1 << 13,
- SKB_GSO_UDP_TUNNEL_CSUM = 1 << 12,
+ SKB_GSO_SCTP = 1 << 14,
- SKB_GSO_PARTIAL = 1 << 13,
+ SKB_GSO_ESP = 1 << 15,
- SKB_GSO_TUNNEL_REMCSUM = 1 << 14,
+ SKB_GSO_UDP = 1 << 16,
- SKB_GSO_SCTP = 1 << 15,
+ SKB_GSO_UDP_L4 = 1 << 17,
- SKB_GSO_ESP = 1 << 16,
+ SKB_GSO_FRAGLIST = 1 << 18,
};
#if BITS_PER_LONG > 32
@@ -508,14 +687,61 @@ typedef unsigned int sk_buff_data_t;
typedef unsigned char *sk_buff_data_t;
#endif
-/**
+/**
+ * DOC: Basic sk_buff geometry
+ *
+ * struct sk_buff itself is a metadata structure and does not hold any packet
+ * data. All the data is held in associated buffers.
+ *
+ * &sk_buff.head points to the main "head" buffer. The head buffer is divided
+ * into two parts:
+ *
+ * - data buffer, containing headers and sometimes payload;
+ * this is the part of the skb operated on by the common helpers
+ * such as skb_put() or skb_pull();
+ * - shared info (struct skb_shared_info) which holds an array of pointers
+ * to read-only data in the (page, offset, length) format.
+ *
+ * Optionally &skb_shared_info.frag_list may point to another skb.
+ *
+ * Basic diagram may look like this::
+ *
+ * ---------------
+ * | sk_buff |
+ * ---------------
+ * ,--------------------------- + head
+ * / ,----------------- + data
+ * / / ,----------- + tail
+ * | | | , + end
+ * | | | |
+ * v v v v
+ * -----------------------------------------------
+ * | headroom | data | tailroom | skb_shared_info |
+ * -----------------------------------------------
+ * + [page frag]
+ * + [page frag]
+ * + [page frag]
+ * + [page frag] ---------
+ * + frag_list --> | sk_buff |
+ * ---------
+ *
+ */
+
+/**
* struct sk_buff - socket buffer
* @next: Next buffer in list
* @prev: Previous buffer in list
* @tstamp: Time we arrived/left
+ * @skb_mstamp_ns: (aka @tstamp) earliest departure time; start point
+ * for retransmit timer
* @rbnode: RB tree node, alternative to next/prev for netem/tcp
+ * @list: queue head
+ * @ll_node: anchor in an llist (eg socket defer_list)
* @sk: Socket we are owned by
+ * @ip_defrag_offset: (aka @sk) alternate use of @sk, used in
+ * fragmentation management
* @dev: Device we arrived on/are leaving by
+ * @dev_scratch: (aka @dev) alternate use of @dev when @dev would be %NULL
* @cb: Control buffer. Free for use by every layer. Put private vars here
* @_skb_refdst: destination entry (with norefcount bit)
* @sp: the security path, used for xfrm
@@ -534,22 +760,35 @@ typedef unsigned char *sk_buff_data_t;
* @pkt_type: Packet class
* @fclone: skbuff clone status
* @ipvs_property: skbuff is owned by ipvs
+ * @inner_protocol_type: whether the inner protocol is
+ * ENCAP_TYPE_ETHER or ENCAP_TYPE_IPPROTO
+ * @remcsum_offload: remote checksum offload is enabled
+ * @offload_fwd_mark: Packet was L2-forwarded in hardware
+ * @offload_l3_fwd_mark: Packet was L3-forwarded in hardware
* @tc_skip_classify: do not classify packet. set by IFB device
* @tc_at_ingress: used within tc_classify to distinguish in/egress
- * @tc_redirected: packet was redirected by a tc action
- * @tc_from_ingress: if tc_redirected, tc_at_ingress at time of redirect
+ * @redirected: packet was redirected by packet classifier
+ * @from_ingress: packet was redirected from the ingress path
+ * @nf_skip_egress: packet shall skip nf egress - see netfilter_netdev.h
* @peeked: this packet has been seen already, so stats have been
* done for it, don't do them again
* @nf_trace: netfilter packet trace flag
* @protocol: Packet protocol from driver
* @destructor: Destruct function
+ * @tcp_tsorted_anchor: list structure for TCP (tp->tsorted_sent_queue)
+ * @_sk_redir: socket redirection information for skmsg
* @_nfct: Associated connection, if any (with nfctinfo bits)
* @nf_bridge: Saved data about a bridged frame - see br_netfilter.c
* @skb_iif: ifindex of device we arrived on
* @tc_index: Traffic control index
* @hash: the packet hash
* @queue_mapping: Queue mapping for multiqueue devices
- * @xmit_more: More SKBs are pending for this queue
+ * @head_frag: skb was allocated from page fragments,
+ * not allocated by kmalloc() or vmalloc().
+ * @pfmemalloc: skbuff was allocated from PFMEMALLOC reserves
+ * @pp_recycle: mark the packet for recycling instead of freeing (implies
+ * page_pool support on driver)
+ * @active_extensions: active extensions (skb_ext_id types)
* @ndisc_nodetype: router type (from link layer)
* @ooo_okay: allow the mapping of a socket to a queue to be changed
* @l4_hash: indicate hash is a canonical 4-tuple hash over transport
@@ -558,51 +797,79 @@ typedef unsigned char *sk_buff_data_t;
* @wifi_acked_valid: wifi_acked was set
* @wifi_acked: whether frame was acked on wifi or not
* @no_fcs: Request NIC to treat last 4 bytes as Ethernet FCS
+ * @encapsulation: indicates the inner headers in the skbuff are valid
+ * @encap_hdr_csum: software checksum is needed
+ * @csum_valid: checksum is already valid
* @csum_not_inet: use CRC32c to resolve CHECKSUM_PARTIAL
+ * @csum_complete_sw: checksum was completed by software
+ * @csum_level: indicates the number of consecutive checksums found in
+ * the packet minus one that have been verified as
+ * CHECKSUM_UNNECESSARY (max 3)
* @dst_pending_confirm: need to confirm neighbour
- * @napi_id: id of the NAPI struct this skb came from
+ * @decrypted: Decrypted SKB
+ * @slow_gro: state present at GRO time, slower prepare step required
+ * @mono_delivery_time: When set, skb->tstamp has the
+ * delivery_time in mono clock base (i.e. EDT). Otherwise, the
+ * skb->tstamp has the (rcv) timestamp at ingress and
+ * delivery_time at egress.
+ * @napi_id: id of the NAPI struct this skb came from
+ * @sender_cpu: (aka @napi_id) source CPU in XPS
+ * @alloc_cpu: CPU which did the skb allocation.
* @secmark: security marking
* @mark: Generic packet mark
+ * @reserved_tailroom: (aka @mark) number of bytes of free space available
+ * at the tail of an sk_buff
+ * @vlan_all: vlan fields (proto & tci)
* @vlan_proto: vlan encapsulation protocol
* @vlan_tci: vlan tag control information
* @inner_protocol: Protocol (encapsulation)
+ * @inner_ipproto: (aka @inner_protocol) stores ipproto when
+ * skb->inner_protocol_type == ENCAP_TYPE_IPPROTO;
* @inner_transport_header: Inner transport layer header (encapsulation)
* @inner_network_header: Network layer header (encapsulation)
* @inner_mac_header: Link layer header (encapsulation)
* @transport_header: Transport layer header
* @network_header: Network layer header
* @mac_header: Link layer header
+ * @kcov_handle: KCOV remote handle for remote coverage collection
* @tail: Tail pointer
* @end: End pointer
* @head: Head of buffer
* @data: Data head pointer
* @truesize: Buffer size
* @users: User count - see {datagram,tcp}.c
+ * @extensions: allocated extensions, valid if active_extensions is nonzero
*/
struct sk_buff {
union {
struct {
- /* These two members must be first. */
+ /* These two members must be first to match sk_buff_head. */
struct sk_buff *next;
struct sk_buff *prev;
union {
- ktime_t tstamp;
- u64 skb_mstamp;
+ struct net_device *dev;
+ /* Some protocols might use this space to store information,
+ * while device pointer would be NULL.
+ * UDP receive path is one user.
+ */
+ unsigned long dev_scratch;
};
};
- struct rb_node rbnode; /* used in netem & tcp stack */
+ struct rb_node rbnode; /* used in netem, ip4 defrag, and tcp stack */
+ struct list_head list;
+ struct llist_node ll_node;
};
- struct sock *sk;
union {
- struct net_device *dev;
- /* Some protocols might use this space to store information,
- * while device pointer would be NULL.
- * UDP receive path is one user.
- */
- unsigned long dev_scratch;
+ struct sock *sk;
+ int ip_defrag_offset;
+ };
+
+ union {
+ ktime_t tstamp;
+ u64 skb_mstamp_ns; /* earliest departure time */
};
/*
* This is the control buffer. It is free to use for every
@@ -612,17 +879,20 @@ struct sk_buff {
*/
char cb[48] __aligned(8);
- unsigned long _skb_refdst;
- void (*destructor)(struct sk_buff *skb);
-#ifdef CONFIG_XFRM
- struct sec_path *sp;
+ union {
+ struct {
+ unsigned long _skb_refdst;
+ void (*destructor)(struct sk_buff *skb);
+ };